summaryrefslogtreecommitdiffstats
path: root/src/cmd/compile
diff options
context:
space:
mode:
authorDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-28 13:16:40 +0000
committerDaniel Baumann <daniel.baumann@progress-linux.org>2024-04-28 13:16:40 +0000
commit47ab3d4a42e9ab51c465c4322d2ec233f6324e6b (patch)
treea61a0ffd83f4a3def4b36e5c8e99630c559aa723 /src/cmd/compile
parentInitial commit. (diff)
downloadgolang-1.18-47ab3d4a42e9ab51c465c4322d2ec233f6324e6b.tar.xz
golang-1.18-47ab3d4a42e9ab51c465c4322d2ec233f6324e6b.zip
Adding upstream version 1.18.10.upstream/1.18.10upstream
Signed-off-by: Daniel Baumann <daniel.baumann@progress-linux.org>
Diffstat (limited to 'src/cmd/compile')
-rw-r--r--src/cmd/compile/README.md117
-rw-r--r--src/cmd/compile/abi-internal.md870
-rw-r--r--src/cmd/compile/doc.go261
-rw-r--r--src/cmd/compile/internal/abi/abiutils.go829
-rw-r--r--src/cmd/compile/internal/amd64/galign.go27
-rw-r--r--src/cmd/compile/internal/amd64/ggen.go154
-rw-r--r--src/cmd/compile/internal/amd64/ssa.go1411
-rw-r--r--src/cmd/compile/internal/amd64/versions_test.go393
-rw-r--r--src/cmd/compile/internal/arm/galign.go25
-rw-r--r--src/cmd/compile/internal/arm/ggen.go60
-rw-r--r--src/cmd/compile/internal/arm/ssa.go980
-rw-r--r--src/cmd/compile/internal/arm64/galign.go27
-rw-r--r--src/cmd/compile/internal/arm64/ggen.go76
-rw-r--r--src/cmd/compile/internal/arm64/ssa.go1346
-rw-r--r--src/cmd/compile/internal/base/base.go77
-rw-r--r--src/cmd/compile/internal/base/bootstrap_false.go12
-rw-r--r--src/cmd/compile/internal/base/bootstrap_true.go12
-rw-r--r--src/cmd/compile/internal/base/debug.go53
-rw-r--r--src/cmd/compile/internal/base/flag.go489
-rw-r--r--src/cmd/compile/internal/base/link.go36
-rw-r--r--src/cmd/compile/internal/base/mapfile_mmap.go49
-rw-r--r--src/cmd/compile/internal/base/mapfile_read.go22
-rw-r--r--src/cmd/compile/internal/base/print.go285
-rw-r--r--src/cmd/compile/internal/base/timings.go237
-rw-r--r--src/cmd/compile/internal/bitvec/bv.go201
-rw-r--r--src/cmd/compile/internal/deadcode/deadcode.go167
-rw-r--r--src/cmd/compile/internal/devirtualize/devirtualize.go85
-rw-r--r--src/cmd/compile/internal/dwarfgen/dwarf.go585
-rw-r--r--src/cmd/compile/internal/dwarfgen/dwinl.go454
-rw-r--r--src/cmd/compile/internal/dwarfgen/marker.go94
-rw-r--r--src/cmd/compile/internal/dwarfgen/scope.go136
-rw-r--r--src/cmd/compile/internal/dwarfgen/scope_test.go539
-rw-r--r--src/cmd/compile/internal/escape/assign.go120
-rw-r--r--src/cmd/compile/internal/escape/call.go458
-rw-r--r--src/cmd/compile/internal/escape/desugar.go37
-rw-r--r--src/cmd/compile/internal/escape/escape.go483
-rw-r--r--src/cmd/compile/internal/escape/expr.go335
-rw-r--r--src/cmd/compile/internal/escape/graph.go324
-rw-r--r--src/cmd/compile/internal/escape/leaks.go106
-rw-r--r--src/cmd/compile/internal/escape/solve.go289
-rw-r--r--src/cmd/compile/internal/escape/stmt.go208
-rw-r--r--src/cmd/compile/internal/escape/utils.go215
-rw-r--r--src/cmd/compile/internal/gc/bootstrap.go17
-rw-r--r--src/cmd/compile/internal/gc/compile.go169
-rw-r--r--src/cmd/compile/internal/gc/export.go51
-rw-r--r--src/cmd/compile/internal/gc/main.go380
-rw-r--r--src/cmd/compile/internal/gc/obj.go312
-rw-r--r--src/cmd/compile/internal/gc/pprof.go14
-rw-r--r--src/cmd/compile/internal/gc/trace.go30
-rw-r--r--src/cmd/compile/internal/gc/util.go76
-rw-r--r--src/cmd/compile/internal/importer/exportdata.go91
-rw-r--r--src/cmd/compile/internal/importer/gcimporter.go174
-rw-r--r--src/cmd/compile/internal/importer/gcimporter_test.go613
-rw-r--r--src/cmd/compile/internal/importer/iimport.go804
-rw-r--r--src/cmd/compile/internal/importer/support.go134
-rw-r--r--src/cmd/compile/internal/importer/testdata/a.go14
-rw-r--r--src/cmd/compile/internal/importer/testdata/b.go11
-rw-r--r--src/cmd/compile/internal/importer/testdata/exports.go91
-rw-r--r--src/cmd/compile/internal/importer/testdata/generics.go29
-rw-r--r--src/cmd/compile/internal/importer/testdata/issue15920.go11
-rw-r--r--src/cmd/compile/internal/importer/testdata/issue20046.go9
-rw-r--r--src/cmd/compile/internal/importer/testdata/issue25301.go17
-rw-r--r--src/cmd/compile/internal/importer/testdata/issue25596.go13
-rw-r--r--src/cmd/compile/internal/importer/testdata/p.go13
-rw-r--r--src/cmd/compile/internal/importer/testdata/versions/test.go28
-rw-r--r--src/cmd/compile/internal/inline/inl.go1407
-rw-r--r--src/cmd/compile/internal/ir/bitset.go71
-rw-r--r--src/cmd/compile/internal/ir/cfg.go26
-rw-r--r--src/cmd/compile/internal/ir/class_string.go30
-rw-r--r--src/cmd/compile/internal/ir/const.go99
-rw-r--r--src/cmd/compile/internal/ir/copy.go102
-rw-r--r--src/cmd/compile/internal/ir/dump.go272
-rw-r--r--src/cmd/compile/internal/ir/expr.go1147
-rw-r--r--src/cmd/compile/internal/ir/fmt.go1359
-rw-r--r--src/cmd/compile/internal/ir/func.go438
-rw-r--r--src/cmd/compile/internal/ir/ir.go5
-rw-r--r--src/cmd/compile/internal/ir/mini.go92
-rw-r--r--src/cmd/compile/internal/ir/mknode.go229
-rw-r--r--src/cmd/compile/internal/ir/name.go557
-rw-r--r--src/cmd/compile/internal/ir/node.go620
-rw-r--r--src/cmd/compile/internal/ir/node_gen.go1524
-rw-r--r--src/cmd/compile/internal/ir/op_string.go184
-rw-r--r--src/cmd/compile/internal/ir/package.go35
-rw-r--r--src/cmd/compile/internal/ir/scc.go131
-rw-r--r--src/cmd/compile/internal/ir/sizeof_test.go37
-rw-r--r--src/cmd/compile/internal/ir/stmt.go411
-rw-r--r--src/cmd/compile/internal/ir/symtab.go75
-rw-r--r--src/cmd/compile/internal/ir/type.go335
-rw-r--r--src/cmd/compile/internal/ir/val.go171
-rw-r--r--src/cmd/compile/internal/ir/visit.go186
-rw-r--r--src/cmd/compile/internal/liveness/arg.go339
-rw-r--r--src/cmd/compile/internal/liveness/bvset.go98
-rw-r--r--src/cmd/compile/internal/liveness/plive.go1530
-rw-r--r--src/cmd/compile/internal/logopt/escape.go14
-rw-r--r--src/cmd/compile/internal/logopt/escape_bootstrap.go13
-rw-r--r--src/cmd/compile/internal/logopt/log_opts.go523
-rw-r--r--src/cmd/compile/internal/logopt/logopt_test.go258
-rw-r--r--src/cmd/compile/internal/mips/galign.go27
-rw-r--r--src/cmd/compile/internal/mips/ggen.go55
-rw-r--r--src/cmd/compile/internal/mips/ssa.go876
-rw-r--r--src/cmd/compile/internal/mips64/galign.go28
-rw-r--r--src/cmd/compile/internal/mips64/ggen.go59
-rw-r--r--src/cmd/compile/internal/mips64/ssa.go846
-rw-r--r--src/cmd/compile/internal/noder/codes.go124
-rw-r--r--src/cmd/compile/internal/noder/decl.go358
-rw-r--r--src/cmd/compile/internal/noder/decoder.go302
-rw-r--r--src/cmd/compile/internal/noder/encoder.go285
-rw-r--r--src/cmd/compile/internal/noder/export.go65
-rw-r--r--src/cmd/compile/internal/noder/expr.go493
-rw-r--r--src/cmd/compile/internal/noder/frames_go1.go21
-rw-r--r--src/cmd/compile/internal/noder/frames_go17.go25
-rw-r--r--src/cmd/compile/internal/noder/func.go73
-rw-r--r--src/cmd/compile/internal/noder/helpers.go226
-rw-r--r--src/cmd/compile/internal/noder/import.go601
-rw-r--r--src/cmd/compile/internal/noder/irgen.go357
-rw-r--r--src/cmd/compile/internal/noder/lex.go193
-rw-r--r--src/cmd/compile/internal/noder/lex_test.go122
-rw-r--r--src/cmd/compile/internal/noder/linker.go296
-rw-r--r--src/cmd/compile/internal/noder/noder.go1917
-rw-r--r--src/cmd/compile/internal/noder/object.go206
-rw-r--r--src/cmd/compile/internal/noder/posmap.go85
-rw-r--r--src/cmd/compile/internal/noder/quirks.go450
-rw-r--r--src/cmd/compile/internal/noder/reader.go2460
-rw-r--r--src/cmd/compile/internal/noder/reader2.go506
-rw-r--r--src/cmd/compile/internal/noder/reloc.go42
-rw-r--r--src/cmd/compile/internal/noder/scopes.go64
-rw-r--r--src/cmd/compile/internal/noder/sizes.go150
-rw-r--r--src/cmd/compile/internal/noder/stencil.go2235
-rw-r--r--src/cmd/compile/internal/noder/stmt.go353
-rw-r--r--src/cmd/compile/internal/noder/sync.go187
-rw-r--r--src/cmd/compile/internal/noder/syncmarker_string.go156
-rw-r--r--src/cmd/compile/internal/noder/transform.go1091
-rw-r--r--src/cmd/compile/internal/noder/types.go517
-rw-r--r--src/cmd/compile/internal/noder/unified.go334
-rw-r--r--src/cmd/compile/internal/noder/unified_test.go160
-rw-r--r--src/cmd/compile/internal/noder/validate.go132
-rw-r--r--src/cmd/compile/internal/noder/writer.go1862
-rw-r--r--src/cmd/compile/internal/objw/objw.go85
-rw-r--r--src/cmd/compile/internal/objw/prog.go226
-rw-r--r--src/cmd/compile/internal/pkginit/init.go144
-rw-r--r--src/cmd/compile/internal/pkginit/initorder.go368
-rw-r--r--src/cmd/compile/internal/ppc64/galign.go29
-rw-r--r--src/cmd/compile/internal/ppc64/ggen.go55
-rw-r--r--src/cmd/compile/internal/ppc64/opt.go12
-rw-r--r--src/cmd/compile/internal/ppc64/ssa.go2068
-rw-r--r--src/cmd/compile/internal/reflectdata/alg.go808
-rw-r--r--src/cmd/compile/internal/reflectdata/reflect.go2120
-rw-r--r--src/cmd/compile/internal/riscv64/galign.go24
-rw-r--r--src/cmd/compile/internal/riscv64/ggen.go59
-rw-r--r--src/cmd/compile/internal/riscv64/gsubr.go20
-rw-r--r--src/cmd/compile/internal/riscv64/ssa.go775
-rw-r--r--src/cmd/compile/internal/s390x/galign.go23
-rw-r--r--src/cmd/compile/internal/s390x/ggen.go89
-rw-r--r--src/cmd/compile/internal/s390x/ssa.go953
-rw-r--r--src/cmd/compile/internal/ssa/README.md222
-rw-r--r--src/cmd/compile/internal/ssa/TODO24
-rw-r--r--src/cmd/compile/internal/ssa/addressingmodes.go460
-rw-r--r--src/cmd/compile/internal/ssa/bench_test.go32
-rw-r--r--src/cmd/compile/internal/ssa/biasedsparsemap.go112
-rw-r--r--src/cmd/compile/internal/ssa/block.go410
-rw-r--r--src/cmd/compile/internal/ssa/branchelim.go449
-rw-r--r--src/cmd/compile/internal/ssa/branchelim_test.go172
-rw-r--r--src/cmd/compile/internal/ssa/cache.go81
-rw-r--r--src/cmd/compile/internal/ssa/check.go600
-rw-r--r--src/cmd/compile/internal/ssa/checkbce.go35
-rw-r--r--src/cmd/compile/internal/ssa/compile.go604
-rw-r--r--src/cmd/compile/internal/ssa/config.go369
-rw-r--r--src/cmd/compile/internal/ssa/copyelim.go84
-rw-r--r--src/cmd/compile/internal/ssa/copyelim_test.go41
-rw-r--r--src/cmd/compile/internal/ssa/critical.go115
-rw-r--r--src/cmd/compile/internal/ssa/cse.go373
-rw-r--r--src/cmd/compile/internal/ssa/cse_test.go131
-rw-r--r--src/cmd/compile/internal/ssa/deadcode.go389
-rw-r--r--src/cmd/compile/internal/ssa/deadcode_test.go161
-rw-r--r--src/cmd/compile/internal/ssa/deadstore.go350
-rw-r--r--src/cmd/compile/internal/ssa/deadstore_test.go129
-rw-r--r--src/cmd/compile/internal/ssa/debug.go1734
-rw-r--r--src/cmd/compile/internal/ssa/debug_lines_test.go258
-rw-r--r--src/cmd/compile/internal/ssa/debug_test.go1023
-rw-r--r--src/cmd/compile/internal/ssa/decompose.go479
-rw-r--r--src/cmd/compile/internal/ssa/dom.go302
-rw-r--r--src/cmd/compile/internal/ssa/dom_test.go608
-rw-r--r--src/cmd/compile/internal/ssa/expand_calls.go1795
-rw-r--r--src/cmd/compile/internal/ssa/export_test.go123
-rw-r--r--src/cmd/compile/internal/ssa/flagalloc.go269
-rw-r--r--src/cmd/compile/internal/ssa/flags_amd64_test.s29
-rw-r--r--src/cmd/compile/internal/ssa/flags_arm64_test.s30
-rw-r--r--src/cmd/compile/internal/ssa/flags_test.go109
-rw-r--r--src/cmd/compile/internal/ssa/func.go921
-rw-r--r--src/cmd/compile/internal/ssa/func_test.go484
-rw-r--r--src/cmd/compile/internal/ssa/fuse.go254
-rw-r--r--src/cmd/compile/internal/ssa/fuse_branchredirect.go110
-rw-r--r--src/cmd/compile/internal/ssa/fuse_comparisons.go157
-rw-r--r--src/cmd/compile/internal/ssa/fuse_test.go301
-rw-r--r--src/cmd/compile/internal/ssa/gen/386.rules1112
-rw-r--r--src/cmd/compile/internal/ssa/gen/386Ops.go588
-rw-r--r--src/cmd/compile/internal/ssa/gen/386splitload.rules11
-rw-r--r--src/cmd/compile/internal/ssa/gen/AMD64.rules2247
-rw-r--r--src/cmd/compile/internal/ssa/gen/AMD64Ops.go967
-rw-r--r--src/cmd/compile/internal/ssa/gen/AMD64splitload.rules45
-rw-r--r--src/cmd/compile/internal/ssa/gen/ARM.rules1482
-rw-r--r--src/cmd/compile/internal/ssa/gen/ARM64.rules2944
-rw-r--r--src/cmd/compile/internal/ssa/gen/ARM64Ops.go792
-rw-r--r--src/cmd/compile/internal/ssa/gen/ARMOps.go603
-rw-r--r--src/cmd/compile/internal/ssa/gen/MIPS.rules703
-rw-r--r--src/cmd/compile/internal/ssa/gen/MIPS64.rules691
-rw-r--r--src/cmd/compile/internal/ssa/gen/MIPS64Ops.go485
-rw-r--r--src/cmd/compile/internal/ssa/gen/MIPSOps.go442
-rw-r--r--src/cmd/compile/internal/ssa/gen/PPC64.rules1485
-rw-r--r--src/cmd/compile/internal/ssa/gen/PPC64Ops.go726
-rw-r--r--src/cmd/compile/internal/ssa/gen/README7
-rw-r--r--src/cmd/compile/internal/ssa/gen/RISCV64.rules757
-rw-r--r--src/cmd/compile/internal/ssa/gen/RISCV64Ops.go481
-rw-r--r--src/cmd/compile/internal/ssa/gen/S390X.rules1708
-rw-r--r--src/cmd/compile/internal/ssa/gen/S390XOps.go820
-rw-r--r--src/cmd/compile/internal/ssa/gen/Wasm.rules411
-rw-r--r--src/cmd/compile/internal/ssa/gen/WasmOps.go280
-rwxr-xr-xsrc/cmd/compile/internal/ssa/gen/cover.bash26
-rw-r--r--src/cmd/compile/internal/ssa/gen/dec.rules93
-rw-r--r--src/cmd/compile/internal/ssa/gen/dec64.rules396
-rw-r--r--src/cmd/compile/internal/ssa/gen/dec64Ops.go21
-rw-r--r--src/cmd/compile/internal/ssa/gen/decOps.go21
-rw-r--r--src/cmd/compile/internal/ssa/gen/generic.rules2542
-rw-r--r--src/cmd/compile/internal/ssa/gen/genericOps.go662
-rw-r--r--src/cmd/compile/internal/ssa/gen/main.go573
-rw-r--r--src/cmd/compile/internal/ssa/gen/rulegen.go1886
-rw-r--r--src/cmd/compile/internal/ssa/html.go1316
-rw-r--r--src/cmd/compile/internal/ssa/id.go28
-rw-r--r--src/cmd/compile/internal/ssa/layout.go182
-rw-r--r--src/cmd/compile/internal/ssa/lca.go127
-rw-r--r--src/cmd/compile/internal/ssa/lca_test.go88
-rw-r--r--src/cmd/compile/internal/ssa/likelyadjust.go576
-rw-r--r--src/cmd/compile/internal/ssa/location.go109
-rw-r--r--src/cmd/compile/internal/ssa/loopbce.go353
-rw-r--r--src/cmd/compile/internal/ssa/loopreschedchecks.go500
-rw-r--r--src/cmd/compile/internal/ssa/looprotate.go111
-rw-r--r--src/cmd/compile/internal/ssa/lower.go44
-rw-r--r--src/cmd/compile/internal/ssa/magic.go424
-rw-r--r--src/cmd/compile/internal/ssa/magic_test.go410
-rw-r--r--src/cmd/compile/internal/ssa/nilcheck.go337
-rw-r--r--src/cmd/compile/internal/ssa/nilcheck_test.go434
-rw-r--r--src/cmd/compile/internal/ssa/numberlines.go262
-rw-r--r--src/cmd/compile/internal/ssa/op.go531
-rw-r--r--src/cmd/compile/internal/ssa/opGen.go37469
-rw-r--r--src/cmd/compile/internal/ssa/opt.go10
-rw-r--r--src/cmd/compile/internal/ssa/passbm_test.go101
-rw-r--r--src/cmd/compile/internal/ssa/phielim.go69
-rw-r--r--src/cmd/compile/internal/ssa/phiopt.go323
-rw-r--r--src/cmd/compile/internal/ssa/poset.go1359
-rw-r--r--src/cmd/compile/internal/ssa/poset_test.go800
-rw-r--r--src/cmd/compile/internal/ssa/print.go191
-rw-r--r--src/cmd/compile/internal/ssa/prove.go1442
-rw-r--r--src/cmd/compile/internal/ssa/regalloc.go2829
-rw-r--r--src/cmd/compile/internal/ssa/regalloc_test.go230
-rw-r--r--src/cmd/compile/internal/ssa/rewrite.go1963
-rw-r--r--src/cmd/compile/internal/ssa/rewrite386.go12557
-rw-r--r--src/cmd/compile/internal/ssa/rewrite386splitload.go160
-rw-r--r--src/cmd/compile/internal/ssa/rewriteAMD64.go35214
-rw-r--r--src/cmd/compile/internal/ssa/rewriteAMD64splitload.go851
-rw-r--r--src/cmd/compile/internal/ssa/rewriteARM.go21903
-rw-r--r--src/cmd/compile/internal/ssa/rewriteARM64.go30569
-rw-r--r--src/cmd/compile/internal/ssa/rewriteCond_test.go597
-rw-r--r--src/cmd/compile/internal/ssa/rewriteMIPS.go7558
-rw-r--r--src/cmd/compile/internal/ssa/rewriteMIPS64.go8141
-rw-r--r--src/cmd/compile/internal/ssa/rewritePPC64.go18471
-rw-r--r--src/cmd/compile/internal/ssa/rewriteRISCV64.go6764
-rw-r--r--src/cmd/compile/internal/ssa/rewriteS390X.go17915
-rw-r--r--src/cmd/compile/internal/ssa/rewriteWasm.go4909
-rw-r--r--src/cmd/compile/internal/ssa/rewrite_test.go220
-rw-r--r--src/cmd/compile/internal/ssa/rewritedec.go429
-rw-r--r--src/cmd/compile/internal/ssa/rewritedec64.go2462
-rw-r--r--src/cmd/compile/internal/ssa/rewritegeneric.go25604
-rw-r--r--src/cmd/compile/internal/ssa/schedule.go524
-rw-r--r--src/cmd/compile/internal/ssa/schedule_test.go101
-rw-r--r--src/cmd/compile/internal/ssa/shift_test.go107
-rw-r--r--src/cmd/compile/internal/ssa/shortcircuit.go513
-rw-r--r--src/cmd/compile/internal/ssa/shortcircuit_test.go53
-rw-r--r--src/cmd/compile/internal/ssa/sizeof_test.go39
-rw-r--r--src/cmd/compile/internal/ssa/softfloat.go80
-rw-r--r--src/cmd/compile/internal/ssa/sparsemap.go93
-rw-r--r--src/cmd/compile/internal/ssa/sparseset.go79
-rw-r--r--src/cmd/compile/internal/ssa/sparsetree.go241
-rw-r--r--src/cmd/compile/internal/ssa/stackalloc.go472
-rw-r--r--src/cmd/compile/internal/ssa/stackframe.go10
-rw-r--r--src/cmd/compile/internal/ssa/stmtlines_test.go141
-rw-r--r--src/cmd/compile/internal/ssa/testdata/convertline.go16
-rw-r--r--src/cmd/compile/internal/ssa/testdata/hist.dlv-dbg.nexts99
-rw-r--r--src/cmd/compile/internal/ssa/testdata/hist.dlv-opt.nexts94
-rw-r--r--src/cmd/compile/internal/ssa/testdata/hist.gdb-dbg.nexts123
-rw-r--r--src/cmd/compile/internal/ssa/testdata/hist.gdb-opt.nexts143
-rw-r--r--src/cmd/compile/internal/ssa/testdata/hist.go106
-rw-r--r--src/cmd/compile/internal/ssa/testdata/i22558.dlv-dbg.nexts11
-rw-r--r--src/cmd/compile/internal/ssa/testdata/i22558.gdb-dbg.nexts11
-rw-r--r--src/cmd/compile/internal/ssa/testdata/i22558.go51
-rw-r--r--src/cmd/compile/internal/ssa/testdata/i22600.dlv-dbg-race.nexts7
-rw-r--r--src/cmd/compile/internal/ssa/testdata/i22600.gdb-dbg-race.nexts7
-rw-r--r--src/cmd/compile/internal/ssa/testdata/i22600.go27
-rw-r--r--src/cmd/compile/internal/ssa/testdata/infloop.dlv-opt.nexts12
-rw-r--r--src/cmd/compile/internal/ssa/testdata/infloop.gdb-opt.nexts4
-rw-r--r--src/cmd/compile/internal/ssa/testdata/infloop.go16
-rw-r--r--src/cmd/compile/internal/ssa/testdata/inline-dump.go17
-rw-r--r--src/cmd/compile/internal/ssa/testdata/pushback.go30
-rw-r--r--src/cmd/compile/internal/ssa/testdata/sayhi.go12
-rw-r--r--src/cmd/compile/internal/ssa/testdata/scopes.dlv-dbg.nexts56
-rw-r--r--src/cmd/compile/internal/ssa/testdata/scopes.dlv-opt.nexts46
-rw-r--r--src/cmd/compile/internal/ssa/testdata/scopes.gdb-dbg.nexts64
-rw-r--r--src/cmd/compile/internal/ssa/testdata/scopes.gdb-opt.nexts55
-rw-r--r--src/cmd/compile/internal/ssa/testdata/scopes.go107
-rw-r--r--src/cmd/compile/internal/ssa/tighten.go165
-rw-r--r--src/cmd/compile/internal/ssa/trim.go172
-rw-r--r--src/cmd/compile/internal/ssa/tuple.go71
-rw-r--r--src/cmd/compile/internal/ssa/value.go559
-rw-r--r--src/cmd/compile/internal/ssa/writebarrier.go665
-rw-r--r--src/cmd/compile/internal/ssa/writebarrier_test.go56
-rw-r--r--src/cmd/compile/internal/ssa/xposmap.go116
-rw-r--r--src/cmd/compile/internal/ssa/zcse.go79
-rw-r--r--src/cmd/compile/internal/ssa/zeroextension_test.go34
-rw-r--r--src/cmd/compile/internal/ssagen/abi.go434
-rw-r--r--src/cmd/compile/internal/ssagen/arch.go51
-rw-r--r--src/cmd/compile/internal/ssagen/nowb.go206
-rw-r--r--src/cmd/compile/internal/ssagen/pgen.go291
-rw-r--r--src/cmd/compile/internal/ssagen/phi.go557
-rw-r--r--src/cmd/compile/internal/ssagen/ssa.go7943
-rw-r--r--src/cmd/compile/internal/staticdata/data.go376
-rw-r--r--src/cmd/compile/internal/staticdata/embed.go174
-rw-r--r--src/cmd/compile/internal/staticinit/sched.go609
-rw-r--r--src/cmd/compile/internal/syntax/branches.go311
-rw-r--r--src/cmd/compile/internal/syntax/dumper.go212
-rw-r--r--src/cmd/compile/internal/syntax/dumper_test.go21
-rw-r--r--src/cmd/compile/internal/syntax/error_test.go195
-rw-r--r--src/cmd/compile/internal/syntax/nodes.go479
-rw-r--r--src/cmd/compile/internal/syntax/nodes_test.go329
-rw-r--r--src/cmd/compile/internal/syntax/operator_string.go46
-rw-r--r--src/cmd/compile/internal/syntax/parser.go2862
-rw-r--r--src/cmd/compile/internal/syntax/parser_test.go374
-rw-r--r--src/cmd/compile/internal/syntax/pos.go209
-rw-r--r--src/cmd/compile/internal/syntax/positions.go364
-rw-r--r--src/cmd/compile/internal/syntax/printer.go996
-rw-r--r--src/cmd/compile/internal/syntax/printer_test.go250
-rw-r--r--src/cmd/compile/internal/syntax/scanner.go881
-rw-r--r--src/cmd/compile/internal/syntax/scanner_test.go767
-rw-r--r--src/cmd/compile/internal/syntax/source.go218
-rw-r--r--src/cmd/compile/internal/syntax/syntax.go97
-rw-r--r--src/cmd/compile/internal/syntax/testdata/go2/chans.go262
-rw-r--r--src/cmd/compile/internal/syntax/testdata/go2/linalg.go283
-rw-r--r--src/cmd/compile/internal/syntax/testdata/go2/map.go2113
-rw-r--r--src/cmd/compile/internal/syntax/testdata/go2/map2.go2146
-rw-r--r--src/cmd/compile/internal/syntax/testdata/go2/slices.go268
-rw-r--r--src/cmd/compile/internal/syntax/testdata/go2/smoketest.go283
-rw-r--r--src/cmd/compile/internal/syntax/testdata/go2/typeinst.go260
-rw-r--r--src/cmd/compile/internal/syntax/testdata/go2/typeinst2.go2232
-rw-r--r--src/cmd/compile/internal/syntax/testdata/go2/typeparams.go2451
-rw-r--r--src/cmd/compile/internal/syntax/testdata/interface.go274
-rw-r--r--src/cmd/compile/internal/syntax/testdata/issue20789.src9
-rw-r--r--src/cmd/compile/internal/syntax/testdata/issue23385.src17
-rw-r--r--src/cmd/compile/internal/syntax/testdata/issue23434.src31
-rw-r--r--src/cmd/compile/internal/syntax/testdata/issue31092.src16
-rw-r--r--src/cmd/compile/internal/syntax/testdata/issue43527.go223
-rw-r--r--src/cmd/compile/internal/syntax/testdata/issue43674.src13
-rw-r--r--src/cmd/compile/internal/syntax/testdata/issue46558.src14
-rw-r--r--src/cmd/compile/internal/syntax/testdata/issue47704.go218
-rw-r--r--src/cmd/compile/internal/syntax/testdata/issue47704.src18
-rw-r--r--src/cmd/compile/internal/syntax/testdata/issue48382.go215
-rw-r--r--src/cmd/compile/internal/syntax/testdata/issue49482.go231
-rw-r--r--src/cmd/compile/internal/syntax/testdata/sample.src33
-rw-r--r--src/cmd/compile/internal/syntax/testdata/tparams.go224
-rw-r--r--src/cmd/compile/internal/syntax/testdata/typeset.go289
-rw-r--r--src/cmd/compile/internal/syntax/testing.go72
-rw-r--r--src/cmd/compile/internal/syntax/testing_test.go45
-rw-r--r--src/cmd/compile/internal/syntax/token_string.go70
-rw-r--r--src/cmd/compile/internal/syntax/tokens.go157
-rw-r--r--src/cmd/compile/internal/syntax/walk.go362
-rw-r--r--src/cmd/compile/internal/test/README4
-rw-r--r--src/cmd/compile/internal/test/abiutils_test.go399
-rw-r--r--src/cmd/compile/internal/test/abiutilsaux_test.go132
-rw-r--r--src/cmd/compile/internal/test/align_test.go96
-rw-r--r--src/cmd/compile/internal/test/bench_test.go124
-rw-r--r--src/cmd/compile/internal/test/clobberdead_test.go55
-rw-r--r--src/cmd/compile/internal/test/constFold_test.go18111
-rw-r--r--src/cmd/compile/internal/test/dep_test.go30
-rw-r--r--src/cmd/compile/internal/test/divconst_test.go325
-rw-r--r--src/cmd/compile/internal/test/fixedbugs_test.go92
-rw-r--r--src/cmd/compile/internal/test/float_test.go544
-rw-r--r--src/cmd/compile/internal/test/global_test.go116
-rw-r--r--src/cmd/compile/internal/test/iface_test.go126
-rw-r--r--src/cmd/compile/internal/test/inl_test.go275
-rw-r--r--src/cmd/compile/internal/test/inst_test.go73
-rw-r--r--src/cmd/compile/internal/test/issue50182_test.go62
-rw-r--r--src/cmd/compile/internal/test/lang_test.go64
-rw-r--r--src/cmd/compile/internal/test/logic_test.go289
-rw-r--r--src/cmd/compile/internal/test/mulconst_test.go242
-rw-r--r--src/cmd/compile/internal/test/race.go65
-rw-r--r--src/cmd/compile/internal/test/reproduciblebuilds_test.go112
-rw-r--r--src/cmd/compile/internal/test/shift_test.go1031
-rw-r--r--src/cmd/compile/internal/test/ssa_test.go191
-rw-r--r--src/cmd/compile/internal/test/test.go1
-rw-r--r--src/cmd/compile/internal/test/testdata/addressed_test.go210
-rw-r--r--src/cmd/compile/internal/test/testdata/append_test.go61
-rw-r--r--src/cmd/compile/internal/test/testdata/arithBoundary_test.go694
-rw-r--r--src/cmd/compile/internal/test/testdata/arithConst_test.go9570
-rw-r--r--src/cmd/compile/internal/test/testdata/arith_test.go1497
-rw-r--r--src/cmd/compile/internal/test/testdata/array_test.go132
-rw-r--r--src/cmd/compile/internal/test/testdata/assert_test.go128
-rw-r--r--src/cmd/compile/internal/test/testdata/break_test.go250
-rw-r--r--src/cmd/compile/internal/test/testdata/chan_test.go63
-rw-r--r--src/cmd/compile/internal/test/testdata/closure_test.go32
-rw-r--r--src/cmd/compile/internal/test/testdata/cmpConst_test.go2209
-rw-r--r--src/cmd/compile/internal/test/testdata/cmp_test.go37
-rw-r--r--src/cmd/compile/internal/test/testdata/compound_test.go128
-rw-r--r--src/cmd/compile/internal/test/testdata/copy_test.go760
-rw-r--r--src/cmd/compile/internal/test/testdata/ctl_test.go149
-rw-r--r--src/cmd/compile/internal/test/testdata/deferNoReturn_test.go21
-rw-r--r--src/cmd/compile/internal/test/testdata/divbyzero_test.go48
-rw-r--r--src/cmd/compile/internal/test/testdata/dupLoad_test.go83
-rw-r--r--src/cmd/compile/internal/test/testdata/flowgraph_generator1.go315
-rw-r--r--src/cmd/compile/internal/test/testdata/fp_test.go1773
-rw-r--r--src/cmd/compile/internal/test/testdata/gen/arithBoundaryGen.go209
-rw-r--r--src/cmd/compile/internal/test/testdata/gen/arithConstGen.go346
-rw-r--r--src/cmd/compile/internal/test/testdata/gen/cmpConstGen.go247
-rw-r--r--src/cmd/compile/internal/test/testdata/gen/constFoldGen.go307
-rw-r--r--src/cmd/compile/internal/test/testdata/gen/copyGen.go121
-rw-r--r--src/cmd/compile/internal/test/testdata/gen/zeroGen.go143
-rw-r--r--src/cmd/compile/internal/test/testdata/loadstore_test.go204
-rw-r--r--src/cmd/compile/internal/test/testdata/map_test.go37
-rw-r--r--src/cmd/compile/internal/test/testdata/mysort/mysort.go40
-rw-r--r--src/cmd/compile/internal/test/testdata/namedReturn_test.go93
-rw-r--r--src/cmd/compile/internal/test/testdata/phi_test.go99
-rw-r--r--src/cmd/compile/internal/test/testdata/ptrsort.go30
-rw-r--r--src/cmd/compile/internal/test/testdata/ptrsort.out3
-rw-r--r--src/cmd/compile/internal/test/testdata/regalloc_test.go50
-rw-r--r--src/cmd/compile/internal/test/testdata/reproducible/issue20272.go34
-rw-r--r--src/cmd/compile/internal/test/testdata/reproducible/issue27013.go15
-rw-r--r--src/cmd/compile/internal/test/testdata/reproducible/issue30202.go17
-rw-r--r--src/cmd/compile/internal/test/testdata/reproducible/issue38068.go70
-rw-r--r--src/cmd/compile/internal/test/testdata/short_test.go57
-rw-r--r--src/cmd/compile/internal/test/testdata/slice_test.go46
-rw-r--r--src/cmd/compile/internal/test/testdata/sqrtConst_test.go50
-rw-r--r--src/cmd/compile/internal/test/testdata/string_test.go207
-rw-r--r--src/cmd/compile/internal/test/testdata/unsafe_test.go145
-rw-r--r--src/cmd/compile/internal/test/testdata/zero_test.go711
-rw-r--r--src/cmd/compile/internal/test/truncconst_test.go63
-rw-r--r--src/cmd/compile/internal/test/zerorange_test.go185
-rw-r--r--src/cmd/compile/internal/typebits/typebits.go87
-rw-r--r--src/cmd/compile/internal/typecheck/bexport.go108
-rw-r--r--src/cmd/compile/internal/typecheck/builtin.go380
-rw-r--r--src/cmd/compile/internal/typecheck/builtin/runtime.go274
-rw-r--r--src/cmd/compile/internal/typecheck/builtin_test.go32
-rw-r--r--src/cmd/compile/internal/typecheck/const.go955
-rw-r--r--src/cmd/compile/internal/typecheck/crawler.go399
-rw-r--r--src/cmd/compile/internal/typecheck/dcl.go504
-rw-r--r--src/cmd/compile/internal/typecheck/export.go74
-rw-r--r--src/cmd/compile/internal/typecheck/expr.go917
-rw-r--r--src/cmd/compile/internal/typecheck/func.go977
-rw-r--r--src/cmd/compile/internal/typecheck/iexport.go2325
-rw-r--r--src/cmd/compile/internal/typecheck/iimport.go2011
-rw-r--r--src/cmd/compile/internal/typecheck/mkbuiltin.go249
-rw-r--r--src/cmd/compile/internal/typecheck/stmt.go683
-rw-r--r--src/cmd/compile/internal/typecheck/subr.go1584
-rw-r--r--src/cmd/compile/internal/typecheck/syms.go103
-rw-r--r--src/cmd/compile/internal/typecheck/target.go12
-rw-r--r--src/cmd/compile/internal/typecheck/type.go188
-rw-r--r--src/cmd/compile/internal/typecheck/typecheck.go2249
-rw-r--r--src/cmd/compile/internal/typecheck/universe.go231
-rw-r--r--src/cmd/compile/internal/types/alg.go173
-rw-r--r--src/cmd/compile/internal/types/algkind_string.go48
-rw-r--r--src/cmd/compile/internal/types/fmt.go776
-rw-r--r--src/cmd/compile/internal/types/goversion.go94
-rw-r--r--src/cmd/compile/internal/types/identity.go157
-rw-r--r--src/cmd/compile/internal/types/kind_string.go62
-rw-r--r--src/cmd/compile/internal/types/pkg.go147
-rw-r--r--src/cmd/compile/internal/types/scope.go113
-rw-r--r--src/cmd/compile/internal/types/size.go709
-rw-r--r--src/cmd/compile/internal/types/sizeof_test.go48
-rw-r--r--src/cmd/compile/internal/types/sort.go19
-rw-r--r--src/cmd/compile/internal/types/structuraltype.go191
-rw-r--r--src/cmd/compile/internal/types/structuraltype_test.go135
-rw-r--r--src/cmd/compile/internal/types/sym.go150
-rw-r--r--src/cmd/compile/internal/types/sym_test.go59
-rw-r--r--src/cmd/compile/internal/types/type.go2235
-rw-r--r--src/cmd/compile/internal/types/type_test.go27
-rw-r--r--src/cmd/compile/internal/types/universe.go159
-rw-r--r--src/cmd/compile/internal/types/utils.go17
-rw-r--r--src/cmd/compile/internal/types2/api.go486
-rw-r--r--src/cmd/compile/internal/types2/api_test.go2612
-rw-r--r--src/cmd/compile/internal/types2/array.go25
-rw-r--r--src/cmd/compile/internal/types2/assignments.go535
-rw-r--r--src/cmd/compile/internal/types2/basic.go82
-rw-r--r--src/cmd/compile/internal/types2/builtins.go891
-rw-r--r--src/cmd/compile/internal/types2/builtins_test.go241
-rw-r--r--src/cmd/compile/internal/types2/call.go701
-rw-r--r--src/cmd/compile/internal/types2/chan.go35
-rw-r--r--src/cmd/compile/internal/types2/check.go566
-rw-r--r--src/cmd/compile/internal/types2/check_test.go345
-rw-r--r--src/cmd/compile/internal/types2/compilersupport.go30
-rw-r--r--src/cmd/compile/internal/types2/context.go124
-rw-r--r--src/cmd/compile/internal/types2/context_test.go69
-rw-r--r--src/cmd/compile/internal/types2/conversions.go299
-rw-r--r--src/cmd/compile/internal/types2/decl.go880
-rw-r--r--src/cmd/compile/internal/types2/errorcalls_test.go49
-rw-r--r--src/cmd/compile/internal/types2/errors.go308
-rw-r--r--src/cmd/compile/internal/types2/errors_test.go44
-rw-r--r--src/cmd/compile/internal/types2/example_test.go269
-rw-r--r--src/cmd/compile/internal/types2/expr.go1863
-rw-r--r--src/cmd/compile/internal/types2/gccgosizes.go40
-rw-r--r--src/cmd/compile/internal/types2/hilbert_test.go218
-rw-r--r--src/cmd/compile/internal/types2/importer_test.go35
-rw-r--r--src/cmd/compile/internal/types2/index.go466
-rw-r--r--src/cmd/compile/internal/types2/infer.go788
-rw-r--r--src/cmd/compile/internal/types2/initorder.go323
-rw-r--r--src/cmd/compile/internal/types2/instantiate.go255
-rw-r--r--src/cmd/compile/internal/types2/instantiate_test.go247
-rw-r--r--src/cmd/compile/internal/types2/interface.go185
-rw-r--r--src/cmd/compile/internal/types2/issues_test.go676
-rw-r--r--src/cmd/compile/internal/types2/labels.go263
-rw-r--r--src/cmd/compile/internal/types2/lookup.go521
-rw-r--r--src/cmd/compile/internal/types2/map.go24
-rw-r--r--src/cmd/compile/internal/types2/methodlist.go79
-rw-r--r--src/cmd/compile/internal/types2/methodlist_test.go40
-rw-r--r--src/cmd/compile/internal/types2/mono.go337
-rw-r--r--src/cmd/compile/internal/types2/mono_test.go89
-rw-r--r--src/cmd/compile/internal/types2/named.go413
-rw-r--r--src/cmd/compile/internal/types2/object.go597
-rw-r--r--src/cmd/compile/internal/types2/object_test.go167
-rw-r--r--src/cmd/compile/internal/types2/objset.go31
-rw-r--r--src/cmd/compile/internal/types2/operand.go389
-rw-r--r--src/cmd/compile/internal/types2/package.go80
-rw-r--r--src/cmd/compile/internal/types2/pointer.go19
-rw-r--r--src/cmd/compile/internal/types2/predicates.go467
-rw-r--r--src/cmd/compile/internal/types2/resolver.go756
-rw-r--r--src/cmd/compile/internal/types2/resolver_test.go222
-rw-r--r--src/cmd/compile/internal/types2/return.go184
-rw-r--r--src/cmd/compile/internal/types2/scope.go293
-rw-r--r--src/cmd/compile/internal/types2/selection.go143
-rw-r--r--src/cmd/compile/internal/types2/self_test.go113
-rw-r--r--src/cmd/compile/internal/types2/signature.go336
-rw-r--r--src/cmd/compile/internal/types2/sizeof_test.go64
-rw-r--r--src/cmd/compile/internal/types2/sizes.go273
-rw-r--r--src/cmd/compile/internal/types2/sizes_test.go107
-rw-r--r--src/cmd/compile/internal/types2/slice.go19
-rw-r--r--src/cmd/compile/internal/types2/stdlib_test.go332
-rw-r--r--src/cmd/compile/internal/types2/stmt.go962
-rw-r--r--src/cmd/compile/internal/types2/struct.go225
-rw-r--r--src/cmd/compile/internal/types2/subst.go422
-rw-r--r--src/cmd/compile/internal/types2/termlist.go158
-rw-r--r--src/cmd/compile/internal/types2/termlist_test.go284
-rw-r--r--src/cmd/compile/internal/types2/testdata/check/blank.src5
-rw-r--r--src/cmd/compile/internal/types2/testdata/check/builtins.go2274
-rw-r--r--src/cmd/compile/internal/types2/testdata/check/builtins.src902
-rw-r--r--src/cmd/compile/internal/types2/testdata/check/chans.go262
-rw-r--r--src/cmd/compile/internal/types2/testdata/check/compliterals.go222
-rw-r--r--src/cmd/compile/internal/types2/testdata/check/const0.src382
-rw-r--r--src/cmd/compile/internal/types2/testdata/check/const1.src334
-rw-r--r--src/cmd/compile/internal/types2/testdata/check/constdecl.src138
-rw-r--r--src/cmd/compile/internal/types2/testdata/check/conversions.src93
-rw-r--r--src/cmd/compile/internal/types2/testdata/check/conversions2.src313
-rw-r--r--src/cmd/compile/internal/types2/testdata/check/cycles.src175
-rw-r--r--src/cmd/compile/internal/types2/testdata/check/cycles1.src77
-rw-r--r--src/cmd/compile/internal/types2/testdata/check/cycles2.src98
-rw-r--r--src/cmd/compile/internal/types2/testdata/check/cycles3.src60
-rw-r--r--src/cmd/compile/internal/types2/testdata/check/cycles4.src121
-rw-r--r--src/cmd/compile/internal/types2/testdata/check/cycles5.src200
-rw-r--r--src/cmd/compile/internal/types2/testdata/check/decls0.src206
-rw-r--r--src/cmd/compile/internal/types2/testdata/check/decls1.src144
-rw-r--r--src/cmd/compile/internal/types2/testdata/check/decls2/decls2a.src111
-rw-r--r--src/cmd/compile/internal/types2/testdata/check/decls2/decls2b.src75
-rw-r--r--src/cmd/compile/internal/types2/testdata/check/decls3.src309
-rw-r--r--src/cmd/compile/internal/types2/testdata/check/decls4.src199
-rw-r--r--src/cmd/compile/internal/types2/testdata/check/decls5.src10
-rw-r--r--src/cmd/compile/internal/types2/testdata/check/errors.src66
-rw-r--r--src/cmd/compile/internal/types2/testdata/check/expr0.src180
-rw-r--r--src/cmd/compile/internal/types2/testdata/check/expr1.src127
-rw-r--r--src/cmd/compile/internal/types2/testdata/check/expr2.src260
-rw-r--r--src/cmd/compile/internal/types2/testdata/check/expr3.src565
-rw-r--r--src/cmd/compile/internal/types2/testdata/check/funcinference.go2104
-rw-r--r--src/cmd/compile/internal/types2/testdata/check/go1_12.src34
-rw-r--r--src/cmd/compile/internal/types2/testdata/check/go1_13.src21
-rw-r--r--src/cmd/compile/internal/types2/testdata/check/go1_16.src13
-rw-r--r--src/cmd/compile/internal/types2/testdata/check/go1_8.src10
-rw-r--r--src/cmd/compile/internal/types2/testdata/check/gotos.src560
-rw-r--r--src/cmd/compile/internal/types2/testdata/check/importC.src54
-rw-r--r--src/cmd/compile/internal/types2/testdata/check/importdecl0/importdecl0a.src53
-rw-r--r--src/cmd/compile/internal/types2/testdata/check/importdecl0/importdecl0b.src30
-rw-r--r--src/cmd/compile/internal/types2/testdata/check/importdecl1/importdecl1a.src22
-rw-r--r--src/cmd/compile/internal/types2/testdata/check/importdecl1/importdecl1b.src11
-rw-r--r--src/cmd/compile/internal/types2/testdata/check/init0.src106
-rw-r--r--src/cmd/compile/internal/types2/testdata/check/init1.src97
-rw-r--r--src/cmd/compile/internal/types2/testdata/check/init2.src139
-rw-r--r--src/cmd/compile/internal/types2/testdata/check/issue25008/issue25008a.src15
-rw-r--r--src/cmd/compile/internal/types2/testdata/check/issue25008/issue25008b.src9
-rw-r--r--src/cmd/compile/internal/types2/testdata/check/issues.go2253
-rw-r--r--src/cmd/compile/internal/types2/testdata/check/issues.src371
-rw-r--r--src/cmd/compile/internal/types2/testdata/check/labels.src207
-rw-r--r--src/cmd/compile/internal/types2/testdata/check/linalg.go282
-rw-r--r--src/cmd/compile/internal/types2/testdata/check/literals.src111
-rw-r--r--src/cmd/compile/internal/types2/testdata/check/main.go27
-rw-r--r--src/cmd/compile/internal/types2/testdata/check/main.src9
-rw-r--r--src/cmd/compile/internal/types2/testdata/check/map.go2113
-rw-r--r--src/cmd/compile/internal/types2/testdata/check/map2.go2146
-rw-r--r--src/cmd/compile/internal/types2/testdata/check/methodsets.src214
-rw-r--r--src/cmd/compile/internal/types2/testdata/check/mtypeparams.go252
-rw-r--r--src/cmd/compile/internal/types2/testdata/check/shifts.src398
-rw-r--r--src/cmd/compile/internal/types2/testdata/check/slices.go268
-rw-r--r--src/cmd/compile/internal/types2/testdata/check/stmt0.src992
-rw-r--r--src/cmd/compile/internal/types2/testdata/check/stmt1.src259
-rw-r--r--src/cmd/compile/internal/types2/testdata/check/typeinference.go249
-rw-r--r--src/cmd/compile/internal/types2/testdata/check/typeinst.go262
-rw-r--r--src/cmd/compile/internal/types2/testdata/check/typeinst2.go2280
-rw-r--r--src/cmd/compile/internal/types2/testdata/check/typeinstcycles.go211
-rw-r--r--src/cmd/compile/internal/types2/testdata/check/typeparams.go2531
-rw-r--r--src/cmd/compile/internal/types2/testdata/check/unions.go266
-rw-r--r--src/cmd/compile/internal/types2/testdata/check/vardecl.src214
-rw-r--r--src/cmd/compile/internal/types2/testdata/examples/constraints.go280
-rw-r--r--src/cmd/compile/internal/types2/testdata/examples/functions.go2219
-rw-r--r--src/cmd/compile/internal/types2/testdata/examples/inference.go2116
-rw-r--r--src/cmd/compile/internal/types2/testdata/examples/methods.go2112
-rw-r--r--src/cmd/compile/internal/types2/testdata/examples/operations.go229
-rw-r--r--src/cmd/compile/internal/types2/testdata/examples/types.go2315
-rw-r--r--src/cmd/compile/internal/types2/testdata/examples/typesets.go260
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue20583.src12
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue23203a.src14
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue23203b.src14
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue25838.go26
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue26390.src11
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue28251.src65
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue39634.go293
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue39664.go215
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue39680.go231
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue39693.go223
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue39699.go229
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue39711.go213
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue39723.go29
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue39725.go216
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue39754.go221
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue39755.go223
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue39768.go221
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue39938.go254
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue39948.go29
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue39976.go216
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue39982.go236
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue40038.go215
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue40056.go215
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue40057.go217
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue40301.go212
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue40684.go215
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue40789.go237
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue41124.go291
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue42695.src17
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue42758.go233
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue42987.src8
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue43056.go231
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue43087.src43
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue43110.src43
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue43124.src16
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue43125.src8
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue43190.src19
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue43527.go216
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue43671.go258
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue44688.go283
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue44799.go219
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue45114.go8
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue45548.go213
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue45550.go210
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue45635.go231
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue45639.go213
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue45920.go217
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue45985.go213
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue46090.go29
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue46275.go226
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue46461.go220
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue46583.src28
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue47031.go220
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue47115.go240
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue47127.go237
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue47411.go226
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue47747.go271
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue47796.go233
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue47818.go257
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue47887.go228
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue47968.go221
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue47996.go28
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue48008.go260
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue48018.go220
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue48048.go215
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue48082.src7
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue48083.go29
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue48136.go236
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue48234.go210
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue48312.go220
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue48472.go216
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue48529.go211
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue48582.go229
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue48619.go222
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue48656.go213
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue48695.go214
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue48703.go227
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue48712.go241
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue48819.src15
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue48951.go221
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue48962.go213
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue48974.go222
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue49003.go10
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue49005.go31
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue49043.go224
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue49112.go215
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue49179.go237
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue49242.go227
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue49247.go220
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue49276.go46
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue49296.go220
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue49439.go226
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue49482.go225
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue49541.go245
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue49579.go217
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue49592.go211
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue49602.go219
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue49705.go214
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue49735.go211
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue49739.go223
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue49864.go29
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue50259.go218
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue50276.go239
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue50281.go226
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue50321.go28
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue50372.go27
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue50417.go268
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue50426.go244
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue50450.go211
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue50516.go213
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue50646.go226
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue50755.go247
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue50779.go223
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue50782.go247
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue50816.go223
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue50833.go216
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue50912.go219
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue50918.go21
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue50929.go268
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue50965.go17
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue51048.go211
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue51145.go18
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue51158.go218
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue51229.go2164
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue51232.go230
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue51233.go227
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue51257.go246
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue51335.go216
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue51339.go218
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue51360.go13
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue51376.go224
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue51386.go217
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue51437.go17
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue51472.go254
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue51509.go7
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue51578.go217
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue51593.go213
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue51607.go265
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue51658.go239
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue52529.go215
-rw-r--r--src/cmd/compile/internal/types2/testdata/fixedbugs/issue6977.src82
-rw-r--r--src/cmd/compile/internal/types2/testdata/manual.go28
-rw-r--r--src/cmd/compile/internal/types2/testdata/spec/assignability.go2264
-rw-r--r--src/cmd/compile/internal/types2/testdata/spec/comparisons.go2120
-rw-r--r--src/cmd/compile/internal/types2/testdata/spec/conversions.go2178
-rw-r--r--src/cmd/compile/internal/types2/tuple.go34
-rw-r--r--src/cmd/compile/internal/types2/type.go124
-rw-r--r--src/cmd/compile/internal/types2/typelists.go69
-rw-r--r--src/cmd/compile/internal/types2/typeparam.go156
-rw-r--r--src/cmd/compile/internal/types2/typeset.go433
-rw-r--r--src/cmd/compile/internal/types2/typeset_test.go80
-rw-r--r--src/cmd/compile/internal/types2/typestring.go483
-rw-r--r--src/cmd/compile/internal/types2/typestring_test.go176
-rw-r--r--src/cmd/compile/internal/types2/typeterm.go166
-rw-r--r--src/cmd/compile/internal/types2/typeterm_test.go239
-rw-r--r--src/cmd/compile/internal/types2/typexpr.go568
-rw-r--r--src/cmd/compile/internal/types2/unify.go582
-rw-r--r--src/cmd/compile/internal/types2/union.go192
-rw-r--r--src/cmd/compile/internal/types2/universe.go276
-rw-r--r--src/cmd/compile/internal/types2/validtype.go147
-rw-r--r--src/cmd/compile/internal/types2/version.go81
-rw-r--r--src/cmd/compile/internal/walk/assign.go719
-rw-r--r--src/cmd/compile/internal/walk/builtin.go708
-rw-r--r--src/cmd/compile/internal/walk/closure.go276
-rw-r--r--src/cmd/compile/internal/walk/compare.go491
-rw-r--r--src/cmd/compile/internal/walk/complit.go676
-rw-r--r--src/cmd/compile/internal/walk/convert.go474
-rw-r--r--src/cmd/compile/internal/walk/expr.go1024
-rw-r--r--src/cmd/compile/internal/walk/order.go1507
-rw-r--r--src/cmd/compile/internal/walk/race.go34
-rw-r--r--src/cmd/compile/internal/walk/range.go475
-rw-r--r--src/cmd/compile/internal/walk/select.go291
-rw-r--r--src/cmd/compile/internal/walk/stmt.go231
-rw-r--r--src/cmd/compile/internal/walk/switch.go597
-rw-r--r--src/cmd/compile/internal/walk/temp.go40
-rw-r--r--src/cmd/compile/internal/walk/walk.go402
-rw-r--r--src/cmd/compile/internal/wasm/ssa.go511
-rw-r--r--src/cmd/compile/internal/x86/galign.go39
-rw-r--r--src/cmd/compile/internal/x86/ggen.go50
-rw-r--r--src/cmd/compile/internal/x86/ssa.go931
-rw-r--r--src/cmd/compile/main.go57
799 files changed, 484668 insertions, 0 deletions
diff --git a/src/cmd/compile/README.md b/src/cmd/compile/README.md
new file mode 100644
index 0000000..25fa818
--- /dev/null
+++ b/src/cmd/compile/README.md
@@ -0,0 +1,117 @@
+<!---
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+-->
+
+## Introduction to the Go compiler
+
+`cmd/compile` contains the main packages that form the Go compiler. The compiler
+may be logically split in four phases, which we will briefly describe alongside
+the list of packages that contain their code.
+
+You may sometimes hear the terms "front-end" and "back-end" when referring to
+the compiler. Roughly speaking, these translate to the first two and last two
+phases we are going to list here. A third term, "middle-end", often refers to
+much of the work that happens in the second phase.
+
+Note that the `go/*` family of packages, such as `go/parser` and `go/types`,
+have no relation to the compiler. Since the compiler was initially written in C,
+the `go/*` packages were developed to enable writing tools working with Go code,
+such as `gofmt` and `vet`.
+
+It should be clarified that the name "gc" stands for "Go compiler", and has
+little to do with uppercase "GC", which stands for garbage collection.
+
+### 1. Parsing
+
+* `cmd/compile/internal/syntax` (lexer, parser, syntax tree)
+
+In the first phase of compilation, source code is tokenized (lexical analysis),
+parsed (syntax analysis), and a syntax tree is constructed for each source
+file.
+
+Each syntax tree is an exact representation of the respective source file, with
+nodes corresponding to the various elements of the source such as expressions,
+declarations, and statements. The syntax tree also includes position information
+which is used for error reporting and the creation of debugging information.
+
+### 2. Type-checking and AST transformations
+
+* `cmd/compile/internal/gc` (create compiler AST, type checking, AST transformations)
+
+The gc package includes its own AST definition carried over from when it was written
+in C. All of its code is written in terms of this AST, so the first thing that the gc
+package must do is convert the syntax package's syntax tree to the compiler's
+AST representation. This extra step may be refactored away in the future.
+
+The gc AST is then type-checked. The first steps are name resolution and type
+inference, which determine which object belongs to which identifier, and what
+type each expression has. Type-checking includes certain extra checks, such as
+"declared and not used" as well as determining whether or not a function
+terminates.
+
+Certain transformations are also done on the AST. Some nodes are refined based
+on type information, such as string additions being split from the arithmetic
+addition node type. Some other examples are dead code elimination, function call
+inlining, and escape analysis.
+
+### 3. Generic SSA
+
+* `cmd/compile/internal/gc` (converting to SSA)
+* `cmd/compile/internal/ssa` (SSA passes and rules)
+
+
+In this phase, the AST is converted into Static Single Assignment (SSA) form, a
+lower-level intermediate representation with specific properties that make it
+easier to implement optimizations and to eventually generate machine code from
+it.
+
+During this conversion, function intrinsics are applied. These are special
+functions that the compiler has been taught to replace with heavily optimized
+code on a case-by-case basis.
+
+Certain nodes are also lowered into simpler components during the AST to SSA
+conversion, so that the rest of the compiler can work with them. For instance,
+the copy builtin is replaced by memory moves, and range loops are rewritten into
+for loops. Some of these currently happen before the conversion to SSA due to
+historical reasons, but the long-term plan is to move all of them here.
+
+Then, a series of machine-independent passes and rules are applied. These do not
+concern any single computer architecture, and thus run on all `GOARCH` variants.
+These passes include dead code elimination, removal of
+unneeded nil checks, and removal of unused branches. The generic rewrite rules
+mainly concern expressions, such as replacing some expressions with constant
+values, and optimizing multiplications and float operations.
+
+### 4. Generating machine code
+
+* `cmd/compile/internal/ssa` (SSA lowering and arch-specific passes)
+* `cmd/internal/obj` (machine code generation)
+
+The machine-dependent phase of the compiler begins with the "lower" pass, which
+rewrites generic values into their machine-specific variants. For example, on
+amd64 memory operands are possible, so many load-store operations may be combined.
+
+Note that the lower pass runs all machine-specific rewrite rules, and thus it
+currently applies lots of optimizations too.
+
+Once the SSA has been "lowered" and is more specific to the target architecture,
+the final code optimization passes are run. This includes yet another dead code
+elimination pass, moving values closer to their uses, the removal of local
+variables that are never read from, and register allocation.
+
+Other important pieces of work done as part of this step include stack frame
+layout, which assigns stack offsets to local variables, and pointer liveness
+analysis, which computes which on-stack pointers are live at each GC safe point.
+
+At the end of the SSA generation phase, Go functions have been transformed into
+a series of obj.Prog instructions. These are passed to the assembler
+(`cmd/internal/obj`), which turns them into machine code and writes out the
+final object file. The object file will also contain reflect data, export data,
+and debugging information.
+
+### Further reading
+
+To dig deeper into how the SSA package works, including its passes and rules,
+head to [cmd/compile/internal/ssa/README.md](internal/ssa/README.md).
diff --git a/src/cmd/compile/abi-internal.md b/src/cmd/compile/abi-internal.md
new file mode 100644
index 0000000..53eaa84
--- /dev/null
+++ b/src/cmd/compile/abi-internal.md
@@ -0,0 +1,870 @@
+# Go internal ABI specification
+
+This document describes Go’s internal application binary interface
+(ABI), known as ABIInternal.
+Go's ABI defines the layout of data in memory and the conventions for
+calling between Go functions.
+This ABI is *unstable* and will change between Go versions.
+If you’re writing assembly code, please instead refer to Go’s
+[assembly documentation](/doc/asm.html), which describes Go’s stable
+ABI, known as ABI0.
+
+All functions defined in Go source follow ABIInternal.
+However, ABIInternal and ABI0 functions are able to call each other
+through transparent *ABI wrappers*, described in the [internal calling
+convention proposal](https://golang.org/design/27539-internal-abi).
+
+Go uses a common ABI design across all architectures.
+We first describe the common ABI, and then cover per-architecture
+specifics.
+
+*Rationale*: For the reasoning behind using a common ABI across
+architectures instead of the platform ABI, see the [register-based Go
+calling convention proposal](https://golang.org/design/40724-register-calling).
+
+## Memory layout
+
+Go's built-in types have the following sizes and alignments.
+Many, though not all, of these sizes are guaranteed by the [language
+specification](/doc/go_spec.html#Size_and_alignment_guarantees).
+Those that aren't guaranteed may change in future versions of Go (for
+example, we've considered changing the alignment of int64 on 32-bit).
+
+| Type | 64-bit | | 32-bit | |
+| --- | --- | --- | --- | --- |
+| | Size | Align | Size | Align |
+| bool, uint8, int8 | 1 | 1 | 1 | 1 |
+| uint16, int16 | 2 | 2 | 2 | 2 |
+| uint32, int32 | 4 | 4 | 4 | 4 |
+| uint64, int64 | 8 | 8 | 8 | 4 |
+| int, uint | 8 | 8 | 4 | 4 |
+| float32 | 4 | 4 | 4 | 4 |
+| float64 | 8 | 8 | 8 | 4 |
+| complex64 | 8 | 4 | 8 | 4 |
+| complex128 | 16 | 8 | 16 | 4 |
+| uintptr, *T, unsafe.Pointer | 8 | 8 | 4 | 4 |
+
+The types `byte` and `rune` are aliases for `uint8` and `int32`,
+respectively, and hence have the same size and alignment as these
+types.
+
+The layout of `map`, `chan`, and `func` types is equivalent to *T.
+
+To describe the layout of the remaining composite types, we first
+define the layout of a *sequence* S of N fields with types
+t<sub>1</sub>, t<sub>2</sub>, ..., t<sub>N</sub>.
+We define the byte offset at which each field begins relative to a
+base address of 0, as well as the size and alignment of the sequence
+as follows:
+
+```
+offset(S, i) = 0 if i = 1
+ = align(offset(S, i-1) + sizeof(t_(i-1)), alignof(t_i))
+alignof(S) = 1 if N = 0
+ = max(alignof(t_i) | 1 <= i <= N)
+sizeof(S) = 0 if N = 0
+ = align(offset(S, N) + sizeof(t_N), alignof(S))
+```
+
+Where sizeof(T) and alignof(T) are the size and alignment of type T,
+respectively, and align(x, y) rounds x up to a multiple of y.
+
+The `interface{}` type is a sequence of 1. a pointer to the runtime type
+description for the interface's dynamic type and 2. an `unsafe.Pointer`
+data field.
+Any other interface type (besides the empty interface) is a sequence
+of 1. a pointer to the runtime "itab" that gives the method pointers and
+the type of the data field and 2. an `unsafe.Pointer` data field.
+An interface can be "direct" or "indirect" depending on the dynamic
+type: a direct interface stores the value directly in the data field,
+and an indirect interface stores a pointer to the value in the data
+field.
+An interface can only be direct if the value consists of a single
+pointer word.
+
+An array type `[N]T` is a sequence of N fields of type T.
+
+The slice type `[]T` is a sequence of a `*[cap]T` pointer to the slice
+backing store, an `int` giving the `len` of the slice, and an `int`
+giving the `cap` of the slice.
+
+The `string` type is a sequence of a `*[len]byte` pointer to the
+string backing store, and an `int` giving the `len` of the string.
+
+A struct type `struct { f1 t1; ...; fM tM }` is laid out as the
+sequence t1, ..., tM, tP, where tP is either:
+
+- Type `byte` if sizeof(tM) = 0 and any of sizeof(t*i*) ≠ 0.
+- Empty (size 0 and align 1) otherwise.
+
+The padding byte prevents creating a past-the-end pointer by taking
+the address of the final, empty fN field.
+
+Note that user-written assembly code should generally not depend on Go
+type layout and should instead use the constants defined in
+[`go_asm.h`](/doc/asm.html#data-offsets).
+
+## Function call argument and result passing
+
+Function calls pass arguments and results using a combination of the
+stack and machine registers.
+Each argument or result is passed either entirely in registers or
+entirely on the stack.
+Because access to registers is generally faster than access to the
+stack, arguments and results are preferentially passed in registers.
+However, any argument or result that contains a non-trivial array or
+does not fit entirely in the remaining available registers is passed
+on the stack.
+
+Each architecture defines a sequence of integer registers and a
+sequence of floating-point registers.
+At a high level, arguments and results are recursively broken down
+into values of base types and these base values are assigned to
+registers from these sequences.
+
+Arguments and results can share the same registers, but do not share
+the same stack space.
+Beyond the arguments and results passed on the stack, the caller also
+reserves spill space on the stack for all register-based arguments
+(but does not populate this space).
+
+The receiver, arguments, and results of function or method F are
+assigned to registers or the stack using the following algorithm:
+
+1. Let NI and NFP be the length of integer and floating-point register
+ sequences defined by the architecture.
+ Let I and FP be 0; these are the indexes of the next integer and
+ floating-pointer register.
+ Let S, the type sequence defining the stack frame, be empty.
+1. If F is a method, assign F’s receiver.
+1. For each argument A of F, assign A.
+1. Add a pointer-alignment field to S. This has size 0 and the same
+ alignment as `uintptr`.
+1. Reset I and FP to 0.
+1. For each result R of F, assign R.
+1. Add a pointer-alignment field to S.
+1. For each register-assigned receiver and argument of F, let T be its
+ type and add T to the stack sequence S.
+ This is the argument's (or receiver's) spill space and will be
+ uninitialized at the call.
+1. Add a pointer-alignment field to S.
+
+Assigning a receiver, argument, or result V of underlying type T works
+as follows:
+
+1. Remember I and FP.
+1. If T has zero size, add T to the stack sequence S and return.
+1. Try to register-assign V.
+1. If step 3 failed, reset I and FP to the values from step 1, add T
+ to the stack sequence S, and assign V to this field in S.
+
+Register-assignment of a value V of underlying type T works as follows:
+
+1. If T is a boolean or integral type that fits in an integer
+ register, assign V to register I and increment I.
+1. If T is an integral type that fits in two integer registers, assign
+ the least significant and most significant halves of V to registers
+ I and I+1, respectively, and increment I by 2
+1. If T is a floating-point type and can be represented without loss
+ of precision in a floating-point register, assign V to register FP
+ and increment FP.
+1. If T is a complex type, recursively register-assign its real and
+ imaginary parts.
+1. If T is a pointer type, map type, channel type, or function type,
+ assign V to register I and increment I.
+1. If T is a string type, interface type, or slice type, recursively
+ register-assign V’s components (2 for strings and interfaces, 3 for
+ slices).
+1. If T is a struct type, recursively register-assign each field of V.
+1. If T is an array type of length 0, do nothing.
+1. If T is an array type of length 1, recursively register-assign its
+ one element.
+1. If T is an array type of length > 1, fail.
+1. If I > NI or FP > NFP, fail.
+1. If any recursive assignment above fails, fail.
+
+The above algorithm produces an assignment of each receiver, argument,
+and result to registers or to a field in the stack sequence.
+The final stack sequence looks like: stack-assigned receiver,
+stack-assigned arguments, pointer-alignment, stack-assigned results,
+pointer-alignment, spill space for each register-assigned argument,
+pointer-alignment.
+The following diagram shows what this stack frame looks like on the
+stack, using the typical convention where address 0 is at the bottom:
+
+ +------------------------------+
+ | . . . |
+ | 2nd reg argument spill space |
+ | 1st reg argument spill space |
+ | <pointer-sized alignment> |
+ | . . . |
+ | 2nd stack-assigned result |
+ | 1st stack-assigned result |
+ | <pointer-sized alignment> |
+ | . . . |
+ | 2nd stack-assigned argument |
+ | 1st stack-assigned argument |
+ | stack-assigned receiver |
+ +------------------------------+ ↓ lower addresses
+
+To perform a call, the caller reserves space starting at the lowest
+address in its stack frame for the call stack frame, stores arguments
+in the registers and argument stack fields determined by the above
+algorithm, and performs the call.
+At the time of a call, spill space, result stack fields, and result
+registers are left uninitialized.
+Upon return, the callee must have stored results to all result
+registers and result stack fields determined by the above algorithm.
+
+There are no callee-save registers, so a call may overwrite any
+register that doesn’t have a fixed meaning, including argument
+registers.
+
+### Example
+
+Consider the function `func f(a1 uint8, a2 [2]uintptr, a3 uint8) (r1
+struct { x uintptr; y [2]uintptr }, r2 string)` on a 64-bit
+architecture with hypothetical integer registers R0–R9.
+
+On entry, `a1` is assigned to `R0`, `a3` is assigned to `R1` and the
+stack frame is laid out in the following sequence:
+
+ a2 [2]uintptr
+ r1.x uintptr
+ r1.y [2]uintptr
+ a1Spill uint8
+ a3Spill uint8
+ _ [6]uint8 // alignment padding
+
+In the stack frame, only the `a2` field is initialized on entry; the
+rest of the frame is left uninitialized.
+
+On exit, `r2.base` is assigned to `R0`, `r2.len` is assigned to `R1`,
+and `r1.x` and `r1.y` are initialized in the stack frame.
+
+There are several things to note in this example.
+First, `a2` and `r1` are stack-assigned because they contain arrays.
+The other arguments and results are register-assigned.
+Result `r2` is decomposed into its components, which are individually
+register-assigned.
+On the stack, the stack-assigned arguments appear at lower addresses
+than the stack-assigned results, which appear at lower addresses than
+the argument spill area.
+Only arguments, not results, are assigned a spill area on the stack.
+
+### Rationale
+
+Each base value is assigned to its own register to optimize
+construction and access.
+An alternative would be to pack multiple sub-word values into
+registers, or to simply map an argument's in-memory layout to
+registers (this is common in C ABIs), but this typically adds cost to
+pack and unpack these values.
+Modern architectures have more than enough registers to pass all
+arguments and results this way for nearly all functions (see the
+appendix), so there’s little downside to spreading base values across
+registers.
+
+Arguments that can’t be fully assigned to registers are passed
+entirely on the stack in case the callee takes the address of that
+argument.
+If an argument could be split across the stack and registers and the
+callee took its address, it would need to be reconstructed in memory,
+a process that would be proportional to the size of the argument.
+
+Non-trivial arrays are always passed on the stack because indexing
+into an array typically requires a computed offset, which generally
+isn’t possible with registers.
+Arrays in general are rare in function signatures (only 0.7% of
+functions in the Go 1.15 standard library and 0.2% in kubelet).
+We considered allowing array fields to be passed on the stack while
+the rest of an argument’s fields are passed in registers, but this
+creates the same problems as other large structs if the callee takes
+the address of an argument, and would benefit <0.1% of functions in
+kubelet (and even these very little).
+
+We make exceptions for 0 and 1-element arrays because these don’t
+require computed offsets, and 1-element arrays are already decomposed
+in the compiler’s SSA representation.
+
+The ABI assignment algorithm above is equivalent to Go’s stack-based
+ABI0 calling convention if there are zero architecture registers.
+This is intended to ease the transition to the register-based internal
+ABI and make it easy for the compiler to generate either calling
+convention.
+An architecture may still define register meanings that aren’t
+compatible with ABI0, but these differences should be easy to account
+for in the compiler.
+
+The assignment algorithm assigns zero-sized values to the stack
+(assignment step 2) in order to support ABI0-equivalence.
+While these values take no space themselves, they do result in
+alignment padding on the stack in ABI0.
+Without this step, the internal ABI would register-assign zero-sized
+values even on architectures that provide no argument registers
+because they don't consume any registers, and hence not add alignment
+padding to the stack.
+
+The algorithm reserves spill space for arguments in the caller’s frame
+so that the compiler can generate a stack growth path that spills into
+this reserved space.
+If the callee has to grow the stack, it may not be able to reserve
+enough additional stack space in its own frame to spill these, which
+is why it’s important that the caller do so.
+These slots also act as the home location if these arguments need to
+be spilled for any other reason, which simplifies traceback printing.
+
+There are several options for how to lay out the argument spill space.
+We chose to lay out each argument according to its type's usual memory
+layout but to separate the spill space from the regular argument
+space.
+Using the usual memory layout simplifies the compiler because it
+already understands this layout.
+Also, if a function takes the address of a register-assigned argument,
+the compiler must spill that argument to memory in its usual memory
+layout and it's more convenient to use the argument spill space for
+this purpose.
+
+Alternatively, the spill space could be structured around argument
+registers.
+In this approach, the stack growth spill path would spill each
+argument register to a register-sized stack word.
+However, if the function takes the address of a register-assigned
+argument, the compiler would have to reconstruct it in memory layout
+elsewhere on the stack.
+
+The spill space could also be interleaved with the stack-assigned
+arguments so the arguments appear in order whether they are register-
+or stack-assigned.
+This would be close to ABI0, except that register-assigned arguments
+would be uninitialized on the stack and there's no need to reserve
+stack space for register-assigned results.
+We expect separating the spill space to perform better because of
+memory locality.
+Separating the space is also potentially simpler for `reflect` calls
+because this allows `reflect` to summarize the spill space as a single
+number.
+Finally, the long-term intent is to remove reserved spill slots
+entirely – allowing most functions to be called without any stack
+setup and easing the introduction of callee-save registers – and
+separating the spill space makes that transition easier.
+
+## Closures
+
+A func value (e.g., `var x func()`) is a pointer to a closure object.
+A closure object begins with a pointer-sized program counter
+representing the entry point of the function, followed by zero or more
+bytes containing the closed-over environment.
+
+Closure calls follow the same conventions as static function and
+method calls, with one addition. Each architecture specifies a
+*closure context pointer* register and calls to closures store the
+address of the closure object in the closure context pointer register
+prior to the call.
+
+## Software floating-point mode
+
+In "softfloat" mode, the ABI simply treats the hardware as having zero
+floating-point registers.
+As a result, any arguments containing floating-point values will be
+passed on the stack.
+
+*Rationale*: Softfloat mode is about compatibility over performance
+and is not commonly used.
+Hence, we keep the ABI as simple as possible in this case, rather than
+adding additional rules for passing floating-point values in integer
+registers.
+
+## Architecture specifics
+
+This section describes per-architecture register mappings, as well as
+other per-architecture special cases.
+
+### amd64 architecture
+
+The amd64 architecture uses the following sequence of 9 registers for
+integer arguments and results:
+
+ RAX, RBX, RCX, RDI, RSI, R8, R9, R10, R11
+
+It uses X0 – X14 for floating-point arguments and results.
+
+*Rationale*: These sequences are chosen from the available registers
+to be relatively easy to remember.
+
+Registers R12 and R13 are permanent scratch registers.
+R15 is a scratch register except in dynamically linked binaries.
+
+*Rationale*: Some operations such as stack growth and reflection calls
+need dedicated scratch registers in order to manipulate call frames
+without corrupting arguments or results.
+
+Special-purpose registers are as follows:
+
+| Register | Call meaning | Return meaning | Body meaning |
+| --- | --- | --- | --- |
+| RSP | Stack pointer | Same | Same |
+| RBP | Frame pointer | Same | Same |
+| RDX | Closure context pointer | Scratch | Scratch |
+| R12 | Scratch | Scratch | Scratch |
+| R13 | Scratch | Scratch | Scratch |
+| R14 | Current goroutine | Same | Same |
+| R15 | GOT reference temporary if dynlink | Same | Same |
+| X15 | Zero value (*) | Same | Scratch |
+
+(*) Except on Plan 9, where X15 is a scratch register because SSE
+registers cannot be used in note handlers (so the compiler avoids
+using them except when absolutely necessary).
+
+*Rationale*: These register meanings are compatible with Go’s
+stack-based calling convention except for R14 and X15, which will have
+to be restored on transitions from ABI0 code to ABIInternal code.
+In ABI0, these are undefined, so transitions from ABIInternal to ABI0
+can ignore these registers.
+
+*Rationale*: For the current goroutine pointer, we chose a register
+that requires an additional REX byte.
+While this adds one byte to every function prologue, it is hardly ever
+accessed outside the function prologue and we expect making more
+single-byte registers available to be a net win.
+
+*Rationale*: We could allow R14 (the current goroutine pointer) to be
+a scratch register in function bodies because it can always be
+restored from TLS on amd64.
+However, we designate it as a fixed register for simplicity and for
+consistency with other architectures that may not have a copy of the
+current goroutine pointer in TLS.
+
+*Rationale*: We designate X15 as a fixed zero register because
+functions often have to bulk zero their stack frames, and this is more
+efficient with a designated zero register.
+
+*Implementation note*: Registers with fixed meaning at calls but not
+in function bodies must be initialized by "injected" calls such as
+signal-based panics.
+
+#### Stack layout
+
+The stack pointer, RSP, grows down and is always aligned to 8 bytes.
+
+The amd64 architecture does not use a link register.
+
+A function's stack frame is laid out as follows:
+
+ +------------------------------+
+ | return PC |
+ | RBP on entry |
+ | ... locals ... |
+ | ... outgoing arguments ... |
+ +------------------------------+ ↓ lower addresses
+
+The "return PC" is pushed as part of the standard amd64 `CALL`
+operation.
+On entry, a function subtracts from RSP to open its stack frame and
+saves the value of RBP directly below the return PC.
+A leaf function that does not require any stack space may omit the
+saved RBP.
+
+The Go ABI's use of RBP as a frame pointer register is compatible with
+amd64 platform conventions so that Go can inter-operate with platform
+debuggers and profilers.
+
+#### Flags
+
+The direction flag (D) is always cleared (set to the “forward”
+direction) at a call.
+The arithmetic status flags are treated like scratch registers and not
+preserved across calls.
+All other bits in RFLAGS are system flags.
+
+At function calls and returns, the CPU is in x87 mode (not MMX
+technology mode).
+
+*Rationale*: Go on amd64 does not use either the x87 registers or MMX
+registers. Hence, we follow the SysV platform conventions in order to
+simplify transitions to and from the C ABI.
+
+At calls, the MXCSR control bits are always set as follows:
+
+| Flag | Bit | Value | Meaning |
+| --- | --- | --- | --- |
+| FZ | 15 | 0 | Do not flush to zero |
+| RC | 14/13 | 0 (RN) | Round to nearest |
+| PM | 12 | 1 | Precision masked |
+| UM | 11 | 1 | Underflow masked |
+| OM | 10 | 1 | Overflow masked |
+| ZM | 9 | 1 | Divide-by-zero masked |
+| DM | 8 | 1 | Denormal operations masked |
+| IM | 7 | 1 | Invalid operations masked |
+| DAZ | 6 | 0 | Do not zero de-normals |
+
+The MXCSR status bits are callee-save.
+
+*Rationale*: Having a fixed MXCSR control configuration allows Go
+functions to use SSE operations without modifying or saving the MXCSR.
+Functions are allowed to modify it between calls (as long as they
+restore it), but as of this writing Go code never does.
+The above fixed configuration matches the process initialization
+control bits specified by the ELF AMD64 ABI.
+
+The x87 floating-point control word is not used by Go on amd64.
+
+### arm64 architecture
+
+The arm64 architecture uses R0 – R15 for integer arguments and results.
+
+It uses F0 – F15 for floating-point arguments and results.
+
+*Rationale*: 16 integer registers and 16 floating-point registers are
+more than enough for passing arguments and results for practically all
+functions (see Appendix). While there are more registers available,
+using more registers provides little benefit. Additionally, it will add
+overhead on code paths where the number of arguments are not statically
+known (e.g. reflect call), and will consume more stack space when there
+is only limited stack space available to fit in the nosplit limit.
+
+Registers R16 and R17 are permanent scratch registers. They are also
+used as scratch registers by the linker (Go linker and external
+linker) in trampolines.
+
+Register R18 is reserved and never used. It is reserved for the OS
+on some platforms (e.g. macOS).
+
+Registers R19 – R25 are permanent scratch registers. In addition,
+R27 is a permanent scratch register used by the assembler when
+expanding instructions.
+
+Floating-point registers F16 – F31 are also permanent scratch
+registers.
+
+Special-purpose registers are as follows:
+
+| Register | Call meaning | Return meaning | Body meaning |
+| --- | --- | --- | --- |
+| RSP | Stack pointer | Same | Same |
+| R30 | Link register | Same | Scratch (non-leaf functions) |
+| R29 | Frame pointer | Same | Same |
+| R28 | Current goroutine | Same | Same |
+| R27 | Scratch | Scratch | Scratch |
+| R26 | Closure context pointer | Scratch | Scratch |
+| R18 | Reserved (not used) | Same | Same |
+| ZR | Zero value | Same | Same |
+
+*Rationale*: These register meanings are compatible with Go’s
+stack-based calling convention.
+
+*Rationale*: The link register, R30, holds the function return
+address at the function entry. For functions that have frames
+(including most non-leaf functions), R30 is saved to stack in the
+function prologue and restored in the epilogue. Within the function
+body, R30 can be used as a scratch register.
+
+*Implementation note*: Registers with fixed meaning at calls but not
+in function bodies must be initialized by "injected" calls such as
+signal-based panics.
+
+#### Stack layout
+
+The stack pointer, RSP, grows down and is always aligned to 16 bytes.
+
+*Rationale*: The arm64 architecture requires the stack pointer to be
+16-byte aligned.
+
+A function's stack frame, after the frame is created, is laid out as
+follows:
+
+ +------------------------------+
+ | ... locals ... |
+ | ... outgoing arguments ... |
+ | return PC | ← RSP points to
+ | frame pointer on entry |
+ +------------------------------+ ↓ lower addresses
+
+The "return PC" is loaded to the link register, R30, as part of the
+arm64 `CALL` operation.
+
+On entry, a function subtracts from RSP to open its stack frame, and
+saves the values of R30 and R29 at the bottom of the frame.
+Specifically, R30 is saved at 0(RSP) and R29 is saved at -8(RSP),
+after RSP is updated.
+
+A leaf function that does not require any stack space may omit the
+saved R30 and R29.
+
+The Go ABI's use of R29 as a frame pointer register is compatible with
+arm64 architecture requirement so that Go can inter-operate with platform
+debuggers and profilers.
+
+This stack layout is used by both register-based (ABIInternal) and
+stack-based (ABI0) calling conventions.
+
+#### Flags
+
+The arithmetic status flags (NZCV) are treated like scratch registers
+and not preserved across calls.
+All other bits in PSTATE are system flags and are not modified by Go.
+
+The floating-point status register (FPSR) is treated like scratch
+registers and not preserved across calls.
+
+At calls, the floating-point control register (FPCR) bits are always
+set as follows:
+
+| Flag | Bit | Value | Meaning |
+| --- | --- | --- | --- |
+| DN | 25 | 0 | Propagate NaN operands |
+| FZ | 24 | 0 | Do not flush to zero |
+| RC | 23/22 | 0 (RN) | Round to nearest, choose even if tied |
+| IDE | 15 | 0 | Denormal operations trap disabled |
+| IXE | 12 | 0 | Inexact trap disabled |
+| UFE | 11 | 0 | Underflow trap disabled |
+| OFE | 10 | 0 | Overflow trap disabled |
+| DZE | 9 | 0 | Divide-by-zero trap disabled |
+| IOE | 8 | 0 | Invalid operations trap disabled |
+| NEP | 2 | 0 | Scalar operations do not affect higher elements in vector registers |
+| AH | 1 | 0 | No alternate handling of de-normal inputs |
+| FIZ | 0 | 0 | Do not zero de-normals |
+
+*Rationale*: Having a fixed FPCR control configuration allows Go
+functions to use floating-point and vector (SIMD) operations without
+modifying or saving the FPCR.
+Functions are allowed to modify it between calls (as long as they
+restore it), but as of this writing Go code never does.
+
+### ppc64 architecture
+
+The ppc64 architecture uses R3 – R10 and R14 – R17 for integer arguments
+and results.
+
+It uses F1 – F12 for floating-point arguments and results.
+
+Register R31 is a permanent scratch register in Go.
+
+Special-purpose registers used within Go generated code and Go
+assembly code are as follows:
+
+| Register | Call meaning | Return meaning | Body meaning |
+| --- | --- | --- | --- |
+| R0 | Zero value | Same | Same |
+| R1 | Stack pointer | Same | Same |
+| R2 | TOC register | Same | Same |
+| R11 | Closure context pointer | Scratch | Scratch |
+| R12 | Function address on indirect calls | Scratch | Scratch |
+| R13 | TLS pointer | Same | Same |
+| R20,R21 | Scratch | Scratch | Used by duffcopy, duffzero |
+| R30 | Current goroutine | Same | Same |
+| R31 | Scratch | Scratch | Scratch |
+| LR | Link register | Link register | Scratch |
+*Rationale*: These register meanings are compatible with Go’s
+stack-based calling convention.
+
+The link register, LR, holds the function return
+address at the function entry and is set to the correct return
+address before exiting the function. It is also used
+in some cases as the function address when doing an indirect call.
+
+The register R2 contains the address of the TOC (table of contents) which
+contains data or code addresses used when generating position independent
+code. Non-Go code generated when using cgo contains TOC-relative addresses
+which depend on R2 holding a valid TOC. Go code compiled with -shared or
+-dynlink initializes and maintains R2 and uses it in some cases for
+function calls; Go code compiled without these options does not modify R2.
+
+When making a function call R12 contains the function address for use by the
+code to generate R2 at the beginning of the function. R12 can be used for
+other purposes within the body of the function, such as trampoline generation.
+
+R20 and R21 are used in duffcopy and duffzero which could be generated
+before arguments are saved so should not be used for register arguments.
+
+The Count register CTR can be used as the call target for some branch instructions.
+It holds the return address when preemption has occurred.
+
+On PPC64 when a float32 is loaded it becomes a float64 in the register, which is
+different from other platforms and that needs to be recognized by the internal
+implementation of reflection so that float32 arguments are passed correctly.
+
+Registers R18 - R29 and F13 - F31 are considered scratch registers.
+
+#### Stack layout
+
+The stack pointer, R1, grows down and is aligned to 8 bytes in Go, but changed
+to 16 bytes when calling cgo.
+
+A function's stack frame, after the frame is created, is laid out as
+follows:
+
+ +------------------------------+
+ | ... locals ... |
+ | ... outgoing arguments ... |
+ | 24 TOC register R2 save | When compiled with -shared/-dynlink
+ | 16 Unused in Go | Not used in Go
+ | 8 CR save | nonvolatile CR fields
+ | 0 return PC | ← R1 points to
+ +------------------------------+ ↓ lower addresses
+
+The "return PC" is loaded to the link register, LR, as part of the
+ppc64 `BL` operations.
+
+On entry to a non-leaf function, the stack frame size is subtracted from R1 to
+create its stack frame, and saves the value of LR at the bottom of the frame.
+
+A leaf function that does not require any stack space does not modify R1 and
+does not save LR.
+
+*NOTE*: We might need to save the frame pointer on the stack as
+in the PPC64 ELF v2 ABI so Go can inter-operate with platform debuggers
+and profilers.
+
+This stack layout is used by both register-based (ABIInternal) and
+stack-based (ABI0) calling conventions.
+
+#### Flags
+
+The condition register consists of 8 condition code register fields
+CR0-CR7. Go generated code only sets and uses CR0, commonly set by
+compare functions and use to determine the target of a conditional
+branch. The generated code does not set or use CR1-CR7.
+
+The floating point status and control register (FPSCR) is initialized
+to 0 by the kernel at startup of the Go program and not changed by
+the Go generated code.
+
+## Future directions
+
+### Spill path improvements
+
+The ABI currently reserves spill space for argument registers so the
+compiler can statically generate an argument spill path before calling
+into `runtime.morestack` to grow the stack.
+This ensures there will be sufficient spill space even when the stack
+is nearly exhausted and keeps stack growth and stack scanning
+essentially unchanged from ABI0.
+
+However, this wastes stack space (the median wastage is 16 bytes per
+call), resulting in larger stacks and increased cache footprint.
+A better approach would be to reserve stack space only when spilling.
+One way to ensure enough space is available to spill would be for
+every function to ensure there is enough space for the function's own
+frame *as well as* the spill space of all functions it calls.
+For most functions, this would change the threshold for the prologue
+stack growth check.
+For `nosplit` functions, this would change the threshold used in the
+linker's static stack size check.
+
+Allocating spill space in the callee rather than the caller may also
+allow for faster reflection calls in the common case where a function
+takes only register arguments, since it would allow reflection to make
+these calls directly without allocating any frame.
+
+The statically-generated spill path also increases code size.
+It is possible to instead have a generic spill path in the runtime, as
+part of `morestack`.
+However, this complicates reserving the spill space, since spilling
+all possible register arguments would, in most cases, take
+significantly more space than spilling only those used by a particular
+function.
+Some options are to spill to a temporary space and copy back only the
+registers used by the function, or to grow the stack if necessary
+before spilling to it (using a temporary space if necessary), or to
+use a heap-allocated space if insufficient stack space is available.
+These options all add enough complexity that we will have to make this
+decision based on the actual code size growth caused by the static
+spill paths.
+
+### Clobber sets
+
+As defined, the ABI does not use callee-save registers.
+This significantly simplifies the garbage collector and the compiler's
+register allocator, but at some performance cost.
+A potentially better balance for Go code would be to use *clobber
+sets*: for each function, the compiler records the set of registers it
+clobbers (including those clobbered by functions it calls) and any
+register not clobbered by function F can remain live across calls to
+F.
+
+This is generally a good fit for Go because Go's package DAG allows
+function metadata like the clobber set to flow up the call graph, even
+across package boundaries.
+Clobber sets would require relatively little change to the garbage
+collector, unlike general callee-save registers.
+One disadvantage of clobber sets over callee-save registers is that
+they don't help with indirect function calls or interface method
+calls, since static information isn't available in these cases.
+
+### Large aggregates
+
+Go encourages passing composite values by value, and this simplifies
+reasoning about mutation and races.
+However, this comes at a performance cost for large composite values.
+It may be possible to instead transparently pass large composite
+values by reference and delay copying until it is actually necessary.
+
+## Appendix: Register usage analysis
+
+In order to understand the impacts of the above design on register
+usage, we
+[analyzed](https://github.com/aclements/go-misc/tree/master/abi) the
+impact of the above ABI on a large code base: cmd/kubelet from
+[Kubernetes](https://github.com/kubernetes/kubernetes) at tag v1.18.8.
+
+The following table shows the impact of different numbers of available
+integer and floating-point registers on argument assignment:
+
+```
+| | | | stack args | spills | stack total |
+| ints | floats | % fit | p50 | p95 | p99 | p50 | p95 | p99 | p50 | p95 | p99 |
+| 0 | 0 | 6.3% | 32 | 152 | 256 | 0 | 0 | 0 | 32 | 152 | 256 |
+| 0 | 8 | 6.4% | 32 | 152 | 256 | 0 | 0 | 0 | 32 | 152 | 256 |
+| 1 | 8 | 21.3% | 24 | 144 | 248 | 8 | 8 | 8 | 32 | 152 | 256 |
+| 2 | 8 | 38.9% | 16 | 128 | 224 | 8 | 16 | 16 | 24 | 136 | 240 |
+| 3 | 8 | 57.0% | 0 | 120 | 224 | 16 | 24 | 24 | 24 | 136 | 240 |
+| 4 | 8 | 73.0% | 0 | 120 | 216 | 16 | 32 | 32 | 24 | 136 | 232 |
+| 5 | 8 | 83.3% | 0 | 112 | 216 | 16 | 40 | 40 | 24 | 136 | 232 |
+| 6 | 8 | 87.5% | 0 | 112 | 208 | 16 | 48 | 48 | 24 | 136 | 232 |
+| 7 | 8 | 89.8% | 0 | 112 | 208 | 16 | 48 | 56 | 24 | 136 | 232 |
+| 8 | 8 | 91.3% | 0 | 112 | 200 | 16 | 56 | 64 | 24 | 136 | 232 |
+| 9 | 8 | 92.1% | 0 | 112 | 192 | 16 | 56 | 72 | 24 | 136 | 232 |
+| 10 | 8 | 92.6% | 0 | 104 | 192 | 16 | 56 | 72 | 24 | 136 | 232 |
+| 11 | 8 | 93.1% | 0 | 104 | 184 | 16 | 56 | 80 | 24 | 128 | 232 |
+| 12 | 8 | 93.4% | 0 | 104 | 176 | 16 | 56 | 88 | 24 | 128 | 232 |
+| 13 | 8 | 94.0% | 0 | 88 | 176 | 16 | 56 | 96 | 24 | 128 | 232 |
+| 14 | 8 | 94.4% | 0 | 80 | 152 | 16 | 64 | 104 | 24 | 128 | 232 |
+| 15 | 8 | 94.6% | 0 | 80 | 152 | 16 | 64 | 112 | 24 | 128 | 232 |
+| 16 | 8 | 94.9% | 0 | 16 | 152 | 16 | 64 | 112 | 24 | 128 | 232 |
+| ∞ | 8 | 99.8% | 0 | 0 | 0 | 24 | 112 | 216 | 24 | 120 | 216 |
+```
+
+The first two columns show the number of available integer and
+floating-point registers.
+The first row shows the results for 0 integer and 0 floating-point
+registers, which is equivalent to ABI0.
+We found that any reasonable number of floating-point registers has
+the same effect, so we fixed it at 8 for all other rows.
+
+The “% fit” column gives the fraction of functions where all arguments
+and results are register-assigned and no arguments are passed on the
+stack.
+The three “stack args” columns give the median, 95th and 99th
+percentile number of bytes of stack arguments.
+The “spills” columns likewise summarize the number of bytes in
+on-stack spill space.
+And “stack total” summarizes the sum of stack arguments and on-stack
+spill slots.
+Note that these are three different distributions; for example,
+there’s no single function that takes 0 stack argument bytes, 16 spill
+bytes, and 24 total stack bytes.
+
+From this, we can see that the fraction of functions that fit entirely
+in registers grows very slowly once it reaches about 90%, though
+curiously there is a small minority of functions that could benefit
+from a huge number of registers.
+Making 9 integer registers available on amd64 puts it in this realm.
+We also see that the stack space required for most functions is fairly
+small.
+While the increasing space required for spills largely balances out
+the decreasing space required for stack arguments as the number of
+available registers increases, there is a general reduction in the
+total stack space required with more available registers.
+This does, however, suggest that eliminating spill slots in the future
+would noticeably reduce stack requirements.
diff --git a/src/cmd/compile/doc.go b/src/cmd/compile/doc.go
new file mode 100644
index 0000000..ef7fa86
--- /dev/null
+++ b/src/cmd/compile/doc.go
@@ -0,0 +1,261 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+/*
+Compile, typically invoked as ``go tool compile,'' compiles a single Go package
+comprising the files named on the command line. It then writes a single
+object file named for the basename of the first source file with a .o suffix.
+The object file can then be combined with other objects into a package archive
+or passed directly to the linker (``go tool link''). If invoked with -pack, the compiler
+writes an archive directly, bypassing the intermediate object file.
+
+The generated files contain type information about the symbols exported by
+the package and about types used by symbols imported by the package from
+other packages. It is therefore not necessary when compiling client C of
+package P to read the files of P's dependencies, only the compiled output of P.
+
+Command Line
+
+Usage:
+
+ go tool compile [flags] file...
+
+The specified files must be Go source files and all part of the same package.
+The same compiler is used for all target operating systems and architectures.
+The GOOS and GOARCH environment variables set the desired target.
+
+Flags:
+
+ -D path
+ Set relative path for local imports.
+ -I dir1 -I dir2
+ Search for imported packages in dir1, dir2, etc,
+ after consulting $GOROOT/pkg/$GOOS_$GOARCH.
+ -L
+ Show complete file path in error messages.
+ -N
+ Disable optimizations.
+ -S
+ Print assembly listing to standard output (code only).
+ -S -S
+ Print assembly listing to standard output (code and data).
+ -V
+ Print compiler version and exit.
+ -asmhdr file
+ Write assembly header to file.
+ -asan
+ Insert calls to C/C++ address sanitizer.
+ -buildid id
+ Record id as the build id in the export metadata.
+ -blockprofile file
+ Write block profile for the compilation to file.
+ -c int
+ Concurrency during compilation. Set 1 for no concurrency (default is 1).
+ -complete
+ Assume package has no non-Go components.
+ -cpuprofile file
+ Write a CPU profile for the compilation to file.
+ -dynlink
+ Allow references to Go symbols in shared libraries (experimental).
+ -e
+ Remove the limit on the number of errors reported (default limit is 10).
+ -goversion string
+ Specify required go tool version of the runtime.
+ Exits when the runtime go version does not match goversion.
+ -h
+ Halt with a stack trace at the first error detected.
+ -importcfg file
+ Read import configuration from file.
+ In the file, set importmap, packagefile to specify import resolution.
+ -importmap old=new
+ Interpret import "old" as import "new" during compilation.
+ The option may be repeated to add multiple mappings.
+ -installsuffix suffix
+ Look for packages in $GOROOT/pkg/$GOOS_$GOARCH_suffix
+ instead of $GOROOT/pkg/$GOOS_$GOARCH.
+ -l
+ Disable inlining.
+ -lang version
+ Set language version to compile, as in -lang=go1.12.
+ Default is current version.
+ -linkobj file
+ Write linker-specific object to file and compiler-specific
+ object to usual output file (as specified by -o).
+ Without this flag, the -o output is a combination of both
+ linker and compiler input.
+ -m
+ Print optimization decisions. Higher values or repetition
+ produce more detail.
+ -memprofile file
+ Write memory profile for the compilation to file.
+ -memprofilerate rate
+ Set runtime.MemProfileRate for the compilation to rate.
+ -msan
+ Insert calls to C/C++ memory sanitizer.
+ -mutexprofile file
+ Write mutex profile for the compilation to file.
+ -nolocalimports
+ Disallow local (relative) imports.
+ -o file
+ Write object to file (default file.o or, with -pack, file.a).
+ -p path
+ Set expected package import path for the code being compiled,
+ and diagnose imports that would cause a circular dependency.
+ -pack
+ Write a package (archive) file rather than an object file
+ -race
+ Compile with race detector enabled.
+ -s
+ Warn about composite literals that can be simplified.
+ -shared
+ Generate code that can be linked into a shared library.
+ -spectre list
+ Enable spectre mitigations in list (all, index, ret).
+ -traceprofile file
+ Write an execution trace to file.
+ -trimpath prefix
+ Remove prefix from recorded source file paths.
+
+Flags related to debugging information:
+
+ -dwarf
+ Generate DWARF symbols.
+ -dwarflocationlists
+ Add location lists to DWARF in optimized mode.
+ -gendwarfinl int
+ Generate DWARF inline info records (default 2).
+
+Flags to debug the compiler itself:
+
+ -E
+ Debug symbol export.
+ -K
+ Debug missing line numbers.
+ -d list
+ Print debug information about items in list. Try -d help for further information.
+ -live
+ Debug liveness analysis.
+ -v
+ Increase debug verbosity.
+ -%
+ Debug non-static initializers.
+ -W
+ Debug parse tree after type checking.
+ -f
+ Debug stack frames.
+ -i
+ Debug line number stack.
+ -j
+ Debug runtime-initialized variables.
+ -r
+ Debug generated wrappers.
+ -w
+ Debug type checking.
+
+Compiler Directives
+
+The compiler accepts directives in the form of comments.
+To distinguish them from non-directive comments, directives
+require no space between the comment opening and the name of the directive. However, since
+they are comments, tools unaware of the directive convention or of a particular
+directive can skip over a directive like any other comment.
+*/
+// Line directives come in several forms:
+//
+// //line :line
+// //line :line:col
+// //line filename:line
+// //line filename:line:col
+// /*line :line*/
+// /*line :line:col*/
+// /*line filename:line*/
+// /*line filename:line:col*/
+//
+// In order to be recognized as a line directive, the comment must start with
+// //line or /*line followed by a space, and must contain at least one colon.
+// The //line form must start at the beginning of a line.
+// A line directive specifies the source position for the character immediately following
+// the comment as having come from the specified file, line and column:
+// For a //line comment, this is the first character of the next line, and
+// for a /*line comment this is the character position immediately following the closing */.
+// If no filename is given, the recorded filename is empty if there is also no column number;
+// otherwise it is the most recently recorded filename (actual filename or filename specified
+// by previous line directive).
+// If a line directive doesn't specify a column number, the column is "unknown" until
+// the next directive and the compiler does not report column numbers for that range.
+// The line directive text is interpreted from the back: First the trailing :ddd is peeled
+// off from the directive text if ddd is a valid number > 0. Then the second :ddd
+// is peeled off the same way if it is valid. Anything before that is considered the filename
+// (possibly including blanks and colons). Invalid line or column values are reported as errors.
+//
+// Examples:
+//
+// //line foo.go:10 the filename is foo.go, and the line number is 10 for the next line
+// //line C:foo.go:10 colons are permitted in filenames, here the filename is C:foo.go, and the line is 10
+// //line a:100 :10 blanks are permitted in filenames, here the filename is " a:100 " (excluding quotes)
+// /*line :10:20*/x the position of x is in the current file with line number 10 and column number 20
+// /*line foo: 10 */ this comment is recognized as invalid line directive (extra blanks around line number)
+//
+// Line directives typically appear in machine-generated code, so that compilers and debuggers
+// will report positions in the original input to the generator.
+/*
+The line directive is a historical special case; all other directives are of the form
+//go:name, indicating that they are defined by the Go toolchain.
+Each directive must be placed its own line, with only leading spaces and tabs
+allowed before the comment.
+Each directive applies to the Go code that immediately follows it,
+which typically must be a declaration.
+
+ //go:noescape
+
+The //go:noescape directive must be followed by a function declaration without
+a body (meaning that the function has an implementation not written in Go).
+It specifies that the function does not allow any of the pointers passed as
+arguments to escape into the heap or into the values returned from the function.
+This information can be used during the compiler's escape analysis of Go code
+calling the function.
+
+ //go:uintptrescapes
+
+The //go:uintptrescapes directive must be followed by a function declaration.
+It specifies that the function's uintptr arguments may be pointer values
+that have been converted to uintptr and must be treated as such by the
+garbage collector. The conversion from pointer to uintptr must appear in
+the argument list of any call to this function. This directive is necessary
+for some low-level system call implementations and should be avoided otherwise.
+
+ //go:noinline
+
+The //go:noinline directive must be followed by a function declaration.
+It specifies that calls to the function should not be inlined, overriding
+the compiler's usual optimization rules. This is typically only needed
+for special runtime functions or when debugging the compiler.
+
+ //go:norace
+
+The //go:norace directive must be followed by a function declaration.
+It specifies that the function's memory accesses must be ignored by the
+race detector. This is most commonly used in low-level code invoked
+at times when it is unsafe to call into the race detector runtime.
+
+ //go:nosplit
+
+The //go:nosplit directive must be followed by a function declaration.
+It specifies that the function must omit its usual stack overflow check.
+This is most commonly used by low-level runtime code invoked
+at times when it is unsafe for the calling goroutine to be preempted.
+
+ //go:linkname localname [importpath.name]
+
+This special directive does not apply to the Go code that follows it.
+Instead, the //go:linkname directive instructs the compiler to use ``importpath.name''
+as the object file symbol name for the variable or function declared as ``localname''
+in the source code.
+If the ``importpath.name'' argument is omitted, the directive uses the
+symbol's default object file symbol name and only has the effect of making
+the symbol accessible to other packages.
+Because this directive can subvert the type system and package
+modularity, it is only enabled in files that have imported "unsafe".
+*/
+package main
diff --git a/src/cmd/compile/internal/abi/abiutils.go b/src/cmd/compile/internal/abi/abiutils.go
new file mode 100644
index 0000000..529150a
--- /dev/null
+++ b/src/cmd/compile/internal/abi/abiutils.go
@@ -0,0 +1,829 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package abi
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+ "fmt"
+ "sync"
+)
+
+//......................................................................
+//
+// Public/exported bits of the ABI utilities.
+//
+
+// ABIParamResultInfo stores the results of processing a given
+// function type to compute stack layout and register assignments. For
+// each input and output parameter we capture whether the param was
+// register-assigned (and to which register(s)) or the stack offset
+// for the param if is not going to be passed in registers according
+// to the rules in the Go internal ABI specification (1.17).
+type ABIParamResultInfo struct {
+ inparams []ABIParamAssignment // Includes receiver for method calls. Does NOT include hidden closure pointer.
+ outparams []ABIParamAssignment
+ offsetToSpillArea int64
+ spillAreaSize int64
+ inRegistersUsed int
+ outRegistersUsed int
+ config *ABIConfig // to enable String() method
+}
+
+func (a *ABIParamResultInfo) Config() *ABIConfig {
+ return a.config
+}
+
+func (a *ABIParamResultInfo) InParams() []ABIParamAssignment {
+ return a.inparams
+}
+
+func (a *ABIParamResultInfo) OutParams() []ABIParamAssignment {
+ return a.outparams
+}
+
+func (a *ABIParamResultInfo) InRegistersUsed() int {
+ return a.inRegistersUsed
+}
+
+func (a *ABIParamResultInfo) OutRegistersUsed() int {
+ return a.outRegistersUsed
+}
+
+func (a *ABIParamResultInfo) InParam(i int) *ABIParamAssignment {
+ return &a.inparams[i]
+}
+
+func (a *ABIParamResultInfo) OutParam(i int) *ABIParamAssignment {
+ return &a.outparams[i]
+}
+
+func (a *ABIParamResultInfo) SpillAreaOffset() int64 {
+ return a.offsetToSpillArea
+}
+
+func (a *ABIParamResultInfo) SpillAreaSize() int64 {
+ return a.spillAreaSize
+}
+
+// ArgWidth returns the amount of stack needed for all the inputs
+// and outputs of a function or method, including ABI-defined parameter
+// slots and ABI-defined spill slots for register-resident parameters.
+// The name is inherited from (*Type).ArgWidth(), which it replaces.
+func (a *ABIParamResultInfo) ArgWidth() int64 {
+ return a.spillAreaSize + a.offsetToSpillArea - a.config.LocalsOffset()
+}
+
+// RegIndex stores the index into the set of machine registers used by
+// the ABI on a specific architecture for parameter passing. RegIndex
+// values 0 through N-1 (where N is the number of integer registers
+// used for param passing according to the ABI rules) describe integer
+// registers; values N through M (where M is the number of floating
+// point registers used). Thus if the ABI says there are 5 integer
+// registers and 7 floating point registers, then RegIndex value of 4
+// indicates the 5th integer register, and a RegIndex value of 11
+// indicates the 7th floating point register.
+type RegIndex uint8
+
+// ABIParamAssignment holds information about how a specific param or
+// result will be passed: in registers (in which case 'Registers' is
+// populated) or on the stack (in which case 'Offset' is set to a
+// non-negative stack offset. The values in 'Registers' are indices
+// (as described above), not architected registers.
+type ABIParamAssignment struct {
+ Type *types.Type
+ Name types.Object // should always be *ir.Name, used to match with a particular ssa.OpArg.
+ Registers []RegIndex
+ offset int32
+}
+
+// Offset returns the stack offset for addressing the parameter that "a" describes.
+// This will panic if "a" describes a register-allocated parameter.
+func (a *ABIParamAssignment) Offset() int32 {
+ if len(a.Registers) > 0 {
+ base.Fatalf("register allocated parameters have no offset")
+ }
+ return a.offset
+}
+
+// RegisterTypes returns a slice of the types of the registers
+// corresponding to a slice of parameters. The returned slice
+// has capacity for one more, likely a memory type.
+func RegisterTypes(apa []ABIParamAssignment) []*types.Type {
+ rcount := 0
+ for _, pa := range apa {
+ rcount += len(pa.Registers)
+ }
+ if rcount == 0 {
+ // Note that this catches top-level struct{} and [0]Foo, which are stack allocated.
+ return make([]*types.Type, 0, 1)
+ }
+ rts := make([]*types.Type, 0, rcount+1)
+ for _, pa := range apa {
+ if len(pa.Registers) == 0 {
+ continue
+ }
+ rts = appendParamTypes(rts, pa.Type)
+ }
+ return rts
+}
+
+func (pa *ABIParamAssignment) RegisterTypesAndOffsets() ([]*types.Type, []int64) {
+ l := len(pa.Registers)
+ if l == 0 {
+ return nil, nil
+ }
+ typs := make([]*types.Type, 0, l)
+ offs := make([]int64, 0, l)
+ offs, _ = appendParamOffsets(offs, 0, pa.Type)
+ return appendParamTypes(typs, pa.Type), offs
+}
+
+func appendParamTypes(rts []*types.Type, t *types.Type) []*types.Type {
+ w := t.Size()
+ if w == 0 {
+ return rts
+ }
+ if t.IsScalar() || t.IsPtrShaped() {
+ if t.IsComplex() {
+ c := types.FloatForComplex(t)
+ return append(rts, c, c)
+ } else {
+ if int(t.Size()) <= types.RegSize {
+ return append(rts, t)
+ }
+ // assume 64bit int on 32-bit machine
+ // TODO endianness? Should high-order (sign bits) word come first?
+ if t.IsSigned() {
+ rts = append(rts, types.Types[types.TINT32])
+ } else {
+ rts = append(rts, types.Types[types.TUINT32])
+ }
+ return append(rts, types.Types[types.TUINT32])
+ }
+ } else {
+ typ := t.Kind()
+ switch typ {
+ case types.TARRAY:
+ for i := int64(0); i < t.NumElem(); i++ { // 0 gets no registers, plus future-proofing.
+ rts = appendParamTypes(rts, t.Elem())
+ }
+ case types.TSTRUCT:
+ for _, f := range t.FieldSlice() {
+ if f.Type.Size() > 0 { // embedded zero-width types receive no registers
+ rts = appendParamTypes(rts, f.Type)
+ }
+ }
+ case types.TSLICE:
+ return appendParamTypes(rts, synthSlice)
+ case types.TSTRING:
+ return appendParamTypes(rts, synthString)
+ case types.TINTER:
+ return appendParamTypes(rts, synthIface)
+ }
+ }
+ return rts
+}
+
+// appendParamOffsets appends the offset(s) of type t, starting from "at",
+// to input offsets, and returns the longer slice and the next unused offset.
+func appendParamOffsets(offsets []int64, at int64, t *types.Type) ([]int64, int64) {
+ at = align(at, t)
+ w := t.Size()
+ if w == 0 {
+ return offsets, at
+ }
+ if t.IsScalar() || t.IsPtrShaped() {
+ if t.IsComplex() || int(t.Size()) > types.RegSize { // complex and *int64 on 32-bit
+ s := w / 2
+ return append(offsets, at, at+s), at + w
+ } else {
+ return append(offsets, at), at + w
+ }
+ } else {
+ typ := t.Kind()
+ switch typ {
+ case types.TARRAY:
+ for i := int64(0); i < t.NumElem(); i++ {
+ offsets, at = appendParamOffsets(offsets, at, t.Elem())
+ }
+ case types.TSTRUCT:
+ for i, f := range t.FieldSlice() {
+ offsets, at = appendParamOffsets(offsets, at, f.Type)
+ if f.Type.Size() == 0 && i == t.NumFields()-1 {
+ at++ // last field has zero width
+ }
+ }
+ at = align(at, t) // type size is rounded up to its alignment
+ case types.TSLICE:
+ return appendParamOffsets(offsets, at, synthSlice)
+ case types.TSTRING:
+ return appendParamOffsets(offsets, at, synthString)
+ case types.TINTER:
+ return appendParamOffsets(offsets, at, synthIface)
+ }
+ }
+ return offsets, at
+}
+
+// FrameOffset returns the frame-pointer-relative location that a function
+// would spill its input or output parameter to, if such a spill slot exists.
+// If there is none defined (e.g., register-allocated outputs) it panics.
+// For register-allocated inputs that is their spill offset reserved for morestack;
+// for stack-allocated inputs and outputs, that is their location on the stack.
+// (In a future version of the ABI, register-resident inputs may lose their defined
+// spill area to help reduce stack sizes.)
+func (a *ABIParamAssignment) FrameOffset(i *ABIParamResultInfo) int64 {
+ if a.offset == -1 {
+ base.Fatalf("function parameter has no ABI-defined frame-pointer offset")
+ }
+ if len(a.Registers) == 0 { // passed on stack
+ return int64(a.offset) - i.config.LocalsOffset()
+ }
+ // spill area for registers
+ return int64(a.offset) + i.SpillAreaOffset() - i.config.LocalsOffset()
+}
+
+// RegAmounts holds a specified number of integer/float registers.
+type RegAmounts struct {
+ intRegs int
+ floatRegs int
+}
+
+// ABIConfig captures the number of registers made available
+// by the ABI rules for parameter passing and result returning.
+type ABIConfig struct {
+ // Do we need anything more than this?
+ offsetForLocals int64 // e.g., obj.(*Link).FixedFrameSize() -- extra linkage information on some architectures.
+ regAmounts RegAmounts
+ regsForTypeCache map[*types.Type]int
+}
+
+// NewABIConfig returns a new ABI configuration for an architecture with
+// iRegsCount integer/pointer registers and fRegsCount floating point registers.
+func NewABIConfig(iRegsCount, fRegsCount int, offsetForLocals int64) *ABIConfig {
+ return &ABIConfig{offsetForLocals: offsetForLocals, regAmounts: RegAmounts{iRegsCount, fRegsCount}, regsForTypeCache: make(map[*types.Type]int)}
+}
+
+// Copy returns a copy of an ABIConfig for use in a function's compilation so that access to the cache does not need to be protected with a mutex.
+func (a *ABIConfig) Copy() *ABIConfig {
+ b := *a
+ b.regsForTypeCache = make(map[*types.Type]int)
+ return &b
+}
+
+// LocalsOffset returns the architecture-dependent offset from SP for args and results.
+// In theory this is only used for debugging; it ought to already be incorporated into
+// results from the ABI-related methods
+func (a *ABIConfig) LocalsOffset() int64 {
+ return a.offsetForLocals
+}
+
+// FloatIndexFor translates r into an index in the floating point parameter
+// registers. If the result is negative, the input index was actually for the
+// integer parameter registers.
+func (a *ABIConfig) FloatIndexFor(r RegIndex) int64 {
+ return int64(r) - int64(a.regAmounts.intRegs)
+}
+
+// NumParamRegs returns the number of parameter registers used for a given type,
+// without regard for the number available.
+func (a *ABIConfig) NumParamRegs(t *types.Type) int {
+ var n int
+ if n, ok := a.regsForTypeCache[t]; ok {
+ return n
+ }
+
+ if t.IsScalar() || t.IsPtrShaped() {
+ if t.IsComplex() {
+ n = 2
+ } else {
+ n = (int(t.Size()) + types.RegSize - 1) / types.RegSize
+ }
+ } else {
+ typ := t.Kind()
+ switch typ {
+ case types.TARRAY:
+ n = a.NumParamRegs(t.Elem()) * int(t.NumElem())
+ case types.TSTRUCT:
+ for _, f := range t.FieldSlice() {
+ n += a.NumParamRegs(f.Type)
+ }
+ case types.TSLICE:
+ n = a.NumParamRegs(synthSlice)
+ case types.TSTRING:
+ n = a.NumParamRegs(synthString)
+ case types.TINTER:
+ n = a.NumParamRegs(synthIface)
+ }
+ }
+ a.regsForTypeCache[t] = n
+
+ return n
+}
+
+// preAllocateParams gets the slice sizes right for inputs and outputs.
+func (a *ABIParamResultInfo) preAllocateParams(hasRcvr bool, nIns, nOuts int) {
+ if hasRcvr {
+ nIns++
+ }
+ a.inparams = make([]ABIParamAssignment, 0, nIns)
+ a.outparams = make([]ABIParamAssignment, 0, nOuts)
+}
+
+// ABIAnalyzeTypes takes an optional receiver type, arrays of ins and outs, and returns an ABIParamResultInfo,
+// based on the given configuration. This is the same result computed by config.ABIAnalyze applied to the
+// corresponding method/function type, except that all the embedded parameter names are nil.
+// This is intended for use by ssagen/ssa.go:(*state).rtcall, for runtime functions that lack a parsed function type.
+func (config *ABIConfig) ABIAnalyzeTypes(rcvr *types.Type, ins, outs []*types.Type) *ABIParamResultInfo {
+ setup()
+ s := assignState{
+ stackOffset: config.offsetForLocals,
+ rTotal: config.regAmounts,
+ }
+ result := &ABIParamResultInfo{config: config}
+ result.preAllocateParams(rcvr != nil, len(ins), len(outs))
+
+ // Receiver
+ if rcvr != nil {
+ result.inparams = append(result.inparams,
+ s.assignParamOrReturn(rcvr, nil, false))
+ }
+
+ // Inputs
+ for _, t := range ins {
+ result.inparams = append(result.inparams,
+ s.assignParamOrReturn(t, nil, false))
+ }
+ s.stackOffset = types.Rnd(s.stackOffset, int64(types.RegSize))
+ result.inRegistersUsed = s.rUsed.intRegs + s.rUsed.floatRegs
+
+ // Outputs
+ s.rUsed = RegAmounts{}
+ for _, t := range outs {
+ result.outparams = append(result.outparams, s.assignParamOrReturn(t, nil, true))
+ }
+ // The spill area is at a register-aligned offset and its size is rounded up to a register alignment.
+ // TODO in theory could align offset only to minimum required by spilled data types.
+ result.offsetToSpillArea = alignTo(s.stackOffset, types.RegSize)
+ result.spillAreaSize = alignTo(s.spillOffset, types.RegSize)
+ result.outRegistersUsed = s.rUsed.intRegs + s.rUsed.floatRegs
+
+ return result
+}
+
+// ABIAnalyzeFuncType takes a function type 'ft' and an ABI rules description
+// 'config' and analyzes the function to determine how its parameters
+// and results will be passed (in registers or on the stack), returning
+// an ABIParamResultInfo object that holds the results of the analysis.
+func (config *ABIConfig) ABIAnalyzeFuncType(ft *types.Func) *ABIParamResultInfo {
+ setup()
+ s := assignState{
+ stackOffset: config.offsetForLocals,
+ rTotal: config.regAmounts,
+ }
+ result := &ABIParamResultInfo{config: config}
+ result.preAllocateParams(ft.Receiver != nil, ft.Params.NumFields(), ft.Results.NumFields())
+
+ // Receiver
+ // TODO(register args) ? seems like "struct" and "fields" is not right anymore for describing function parameters
+ if ft.Receiver != nil && ft.Receiver.NumFields() != 0 {
+ r := ft.Receiver.FieldSlice()[0]
+ result.inparams = append(result.inparams,
+ s.assignParamOrReturn(r.Type, r.Nname, false))
+ }
+
+ // Inputs
+ ifsl := ft.Params.FieldSlice()
+ for _, f := range ifsl {
+ result.inparams = append(result.inparams,
+ s.assignParamOrReturn(f.Type, f.Nname, false))
+ }
+ s.stackOffset = types.Rnd(s.stackOffset, int64(types.RegSize))
+ result.inRegistersUsed = s.rUsed.intRegs + s.rUsed.floatRegs
+
+ // Outputs
+ s.rUsed = RegAmounts{}
+ ofsl := ft.Results.FieldSlice()
+ for _, f := range ofsl {
+ result.outparams = append(result.outparams, s.assignParamOrReturn(f.Type, f.Nname, true))
+ }
+ // The spill area is at a register-aligned offset and its size is rounded up to a register alignment.
+ // TODO in theory could align offset only to minimum required by spilled data types.
+ result.offsetToSpillArea = alignTo(s.stackOffset, types.RegSize)
+ result.spillAreaSize = alignTo(s.spillOffset, types.RegSize)
+ result.outRegistersUsed = s.rUsed.intRegs + s.rUsed.floatRegs
+ return result
+}
+
+// ABIAnalyze returns the same result as ABIAnalyzeFuncType, but also
+// updates the offsets of all the receiver, input, and output fields.
+// If setNname is true, it also sets the FrameOffset of the Nname for
+// the field(s); this is for use when compiling a function and figuring out
+// spill locations. Doing this for callers can cause races for register
+// outputs because their frame location transitions from BOGUS_FUNARG_OFFSET
+// to zero to an as-if-AUTO offset that has no use for callers.
+func (config *ABIConfig) ABIAnalyze(t *types.Type, setNname bool) *ABIParamResultInfo {
+ ft := t.FuncType()
+ result := config.ABIAnalyzeFuncType(ft)
+
+ // Fill in the frame offsets for receiver, inputs, results
+ k := 0
+ if t.NumRecvs() != 0 {
+ config.updateOffset(result, ft.Receiver.FieldSlice()[0], result.inparams[0], false, setNname)
+ k++
+ }
+ for i, f := range ft.Params.FieldSlice() {
+ config.updateOffset(result, f, result.inparams[k+i], false, setNname)
+ }
+ for i, f := range ft.Results.FieldSlice() {
+ config.updateOffset(result, f, result.outparams[i], true, setNname)
+ }
+ return result
+}
+
+func (config *ABIConfig) updateOffset(result *ABIParamResultInfo, f *types.Field, a ABIParamAssignment, isReturn, setNname bool) {
+ // Everything except return values in registers has either a frame home (if not in a register) or a frame spill location.
+ if !isReturn || len(a.Registers) == 0 {
+ // The type frame offset DOES NOT show effects of minimum frame size.
+ // Getting this wrong breaks stackmaps, see liveness/plive.go:WriteFuncMap and typebits/typebits.go:Set
+ off := a.FrameOffset(result)
+ fOffset := f.Offset
+ if fOffset == types.BOGUS_FUNARG_OFFSET {
+ if setNname && f.Nname != nil {
+ f.Nname.(*ir.Name).SetFrameOffset(off)
+ f.Nname.(*ir.Name).SetIsOutputParamInRegisters(false)
+ }
+ } else {
+ base.Fatalf("field offset for %s at %s has been set to %d", f.Sym.Name, base.FmtPos(f.Pos), fOffset)
+ }
+ } else {
+ if setNname && f.Nname != nil {
+ fname := f.Nname.(*ir.Name)
+ fname.SetIsOutputParamInRegisters(true)
+ fname.SetFrameOffset(0)
+ }
+ }
+}
+
+//......................................................................
+//
+// Non-public portions.
+
+// regString produces a human-readable version of a RegIndex.
+func (c *RegAmounts) regString(r RegIndex) string {
+ if int(r) < c.intRegs {
+ return fmt.Sprintf("I%d", int(r))
+ } else if int(r) < c.intRegs+c.floatRegs {
+ return fmt.Sprintf("F%d", int(r)-c.intRegs)
+ }
+ return fmt.Sprintf("<?>%d", r)
+}
+
+// ToString method renders an ABIParamAssignment in human-readable
+// form, suitable for debugging or unit testing.
+func (ri *ABIParamAssignment) ToString(config *ABIConfig, extra bool) string {
+ regs := "R{"
+ offname := "spilloffset" // offset is for spill for register(s)
+ if len(ri.Registers) == 0 {
+ offname = "offset" // offset is for memory arg
+ }
+ for _, r := range ri.Registers {
+ regs += " " + config.regAmounts.regString(r)
+ if extra {
+ regs += fmt.Sprintf("(%d)", r)
+ }
+ }
+ if extra {
+ regs += fmt.Sprintf(" | #I=%d, #F=%d", config.regAmounts.intRegs, config.regAmounts.floatRegs)
+ }
+ return fmt.Sprintf("%s } %s: %d typ: %v", regs, offname, ri.offset, ri.Type)
+}
+
+// String method renders an ABIParamResultInfo in human-readable
+// form, suitable for debugging or unit testing.
+func (ri *ABIParamResultInfo) String() string {
+ res := ""
+ for k, p := range ri.inparams {
+ res += fmt.Sprintf("IN %d: %s\n", k, p.ToString(ri.config, false))
+ }
+ for k, r := range ri.outparams {
+ res += fmt.Sprintf("OUT %d: %s\n", k, r.ToString(ri.config, false))
+ }
+ res += fmt.Sprintf("offsetToSpillArea: %d spillAreaSize: %d",
+ ri.offsetToSpillArea, ri.spillAreaSize)
+ return res
+}
+
+// assignState holds intermediate state during the register assigning process
+// for a given function signature.
+type assignState struct {
+ rTotal RegAmounts // total reg amounts from ABI rules
+ rUsed RegAmounts // regs used by params completely assigned so far
+ pUsed RegAmounts // regs used by the current param (or pieces therein)
+ stackOffset int64 // current stack offset
+ spillOffset int64 // current spill offset
+}
+
+// align returns a rounded up to t's alignment
+func align(a int64, t *types.Type) int64 {
+ return alignTo(a, int(uint8(t.Alignment())))
+}
+
+// alignTo returns a rounded up to t, where t must be 0 or a power of 2.
+func alignTo(a int64, t int) int64 {
+ if t == 0 {
+ return a
+ }
+ return types.Rnd(a, int64(t))
+}
+
+// stackSlot returns a stack offset for a param or result of the
+// specified type.
+func (state *assignState) stackSlot(t *types.Type) int64 {
+ rv := align(state.stackOffset, t)
+ state.stackOffset = rv + t.Size()
+ return rv
+}
+
+// allocateRegs returns an ordered list of register indices for a parameter or result
+// that we've just determined to be register-assignable. The number of registers
+// needed is assumed to be stored in state.pUsed.
+func (state *assignState) allocateRegs(regs []RegIndex, t *types.Type) []RegIndex {
+ if t.Size() == 0 {
+ return regs
+ }
+ ri := state.rUsed.intRegs
+ rf := state.rUsed.floatRegs
+ if t.IsScalar() || t.IsPtrShaped() {
+ if t.IsComplex() {
+ regs = append(regs, RegIndex(rf+state.rTotal.intRegs), RegIndex(rf+1+state.rTotal.intRegs))
+ rf += 2
+ } else if t.IsFloat() {
+ regs = append(regs, RegIndex(rf+state.rTotal.intRegs))
+ rf += 1
+ } else {
+ n := (int(t.Size()) + types.RegSize - 1) / types.RegSize
+ for i := 0; i < n; i++ { // looking ahead to really big integers
+ regs = append(regs, RegIndex(ri))
+ ri += 1
+ }
+ }
+ state.rUsed.intRegs = ri
+ state.rUsed.floatRegs = rf
+ return regs
+ } else {
+ typ := t.Kind()
+ switch typ {
+ case types.TARRAY:
+ for i := int64(0); i < t.NumElem(); i++ {
+ regs = state.allocateRegs(regs, t.Elem())
+ }
+ return regs
+ case types.TSTRUCT:
+ for _, f := range t.FieldSlice() {
+ regs = state.allocateRegs(regs, f.Type)
+ }
+ return regs
+ case types.TSLICE:
+ return state.allocateRegs(regs, synthSlice)
+ case types.TSTRING:
+ return state.allocateRegs(regs, synthString)
+ case types.TINTER:
+ return state.allocateRegs(regs, synthIface)
+ }
+ }
+ base.Fatalf("was not expecting type %s", t)
+ panic("unreachable")
+}
+
+// regAllocate creates a register ABIParamAssignment object for a param
+// or result with the specified type, as a final step (this assumes
+// that all of the safety/suitability analysis is complete).
+func (state *assignState) regAllocate(t *types.Type, name types.Object, isReturn bool) ABIParamAssignment {
+ spillLoc := int64(-1)
+ if !isReturn {
+ // Spill for register-resident t must be aligned for storage of a t.
+ spillLoc = align(state.spillOffset, t)
+ state.spillOffset = spillLoc + t.Size()
+ }
+ return ABIParamAssignment{
+ Type: t,
+ Name: name,
+ Registers: state.allocateRegs([]RegIndex{}, t),
+ offset: int32(spillLoc),
+ }
+}
+
+// stackAllocate creates a stack memory ABIParamAssignment object for
+// a param or result with the specified type, as a final step (this
+// assumes that all of the safety/suitability analysis is complete).
+func (state *assignState) stackAllocate(t *types.Type, name types.Object) ABIParamAssignment {
+ return ABIParamAssignment{
+ Type: t,
+ Name: name,
+ offset: int32(state.stackSlot(t)),
+ }
+}
+
+// intUsed returns the number of integer registers consumed
+// at a given point within an assignment stage.
+func (state *assignState) intUsed() int {
+ return state.rUsed.intRegs + state.pUsed.intRegs
+}
+
+// floatUsed returns the number of floating point registers consumed at
+// a given point within an assignment stage.
+func (state *assignState) floatUsed() int {
+ return state.rUsed.floatRegs + state.pUsed.floatRegs
+}
+
+// regassignIntegral examines a param/result of integral type 't' to
+// determines whether it can be register-assigned. Returns TRUE if we
+// can register allocate, FALSE otherwise (and updates state
+// accordingly).
+func (state *assignState) regassignIntegral(t *types.Type) bool {
+ regsNeeded := int(types.Rnd(t.Size(), int64(types.PtrSize)) / int64(types.PtrSize))
+ if t.IsComplex() {
+ regsNeeded = 2
+ }
+
+ // Floating point and complex.
+ if t.IsFloat() || t.IsComplex() {
+ if regsNeeded+state.floatUsed() > state.rTotal.floatRegs {
+ // not enough regs
+ return false
+ }
+ state.pUsed.floatRegs += regsNeeded
+ return true
+ }
+
+ // Non-floating point
+ if regsNeeded+state.intUsed() > state.rTotal.intRegs {
+ // not enough regs
+ return false
+ }
+ state.pUsed.intRegs += regsNeeded
+ return true
+}
+
+// regassignArray processes an array type (or array component within some
+// other enclosing type) to determine if it can be register assigned.
+// Returns TRUE if we can register allocate, FALSE otherwise.
+func (state *assignState) regassignArray(t *types.Type) bool {
+
+ nel := t.NumElem()
+ if nel == 0 {
+ return true
+ }
+ if nel > 1 {
+ // Not an array of length 1: stack assign
+ return false
+ }
+ // Visit element
+ return state.regassign(t.Elem())
+}
+
+// regassignStruct processes a struct type (or struct component within
+// some other enclosing type) to determine if it can be register
+// assigned. Returns TRUE if we can register allocate, FALSE otherwise.
+func (state *assignState) regassignStruct(t *types.Type) bool {
+ for _, field := range t.FieldSlice() {
+ if !state.regassign(field.Type) {
+ return false
+ }
+ }
+ return true
+}
+
+// synthOnce ensures that we only create the synth* fake types once.
+var synthOnce sync.Once
+
+// synthSlice, synthString, and syncIface are synthesized struct types
+// meant to capture the underlying implementations of string/slice/interface.
+var synthSlice *types.Type
+var synthString *types.Type
+var synthIface *types.Type
+
+// setup performs setup for the register assignment utilities, manufacturing
+// a small set of synthesized types that we'll need along the way.
+func setup() {
+ synthOnce.Do(func() {
+ fname := types.BuiltinPkg.Lookup
+ nxp := src.NoXPos
+ bp := types.NewPtr(types.Types[types.TUINT8])
+ it := types.Types[types.TINT]
+ synthSlice = types.NewStruct(types.NoPkg, []*types.Field{
+ types.NewField(nxp, fname("ptr"), bp),
+ types.NewField(nxp, fname("len"), it),
+ types.NewField(nxp, fname("cap"), it),
+ })
+ types.CalcStructSize(synthSlice)
+ synthString = types.NewStruct(types.NoPkg, []*types.Field{
+ types.NewField(nxp, fname("data"), bp),
+ types.NewField(nxp, fname("len"), it),
+ })
+ types.CalcStructSize(synthString)
+ unsp := types.Types[types.TUNSAFEPTR]
+ synthIface = types.NewStruct(types.NoPkg, []*types.Field{
+ types.NewField(nxp, fname("f1"), unsp),
+ types.NewField(nxp, fname("f2"), unsp),
+ })
+ types.CalcStructSize(synthIface)
+ })
+}
+
+// regassign examines a given param type (or component within some
+// composite) to determine if it can be register assigned. Returns
+// TRUE if we can register allocate, FALSE otherwise.
+func (state *assignState) regassign(pt *types.Type) bool {
+ typ := pt.Kind()
+ if pt.IsScalar() || pt.IsPtrShaped() {
+ return state.regassignIntegral(pt)
+ }
+ switch typ {
+ case types.TARRAY:
+ return state.regassignArray(pt)
+ case types.TSTRUCT:
+ return state.regassignStruct(pt)
+ case types.TSLICE:
+ return state.regassignStruct(synthSlice)
+ case types.TSTRING:
+ return state.regassignStruct(synthString)
+ case types.TINTER:
+ return state.regassignStruct(synthIface)
+ default:
+ base.Fatalf("not expected")
+ panic("unreachable")
+ }
+}
+
+// assignParamOrReturn processes a given receiver, param, or result
+// of field f to determine whether it can be register assigned.
+// The result of the analysis is recorded in the result
+// ABIParamResultInfo held in 'state'.
+func (state *assignState) assignParamOrReturn(pt *types.Type, n types.Object, isReturn bool) ABIParamAssignment {
+ state.pUsed = RegAmounts{}
+ if pt.Size() == types.BADWIDTH {
+ base.Fatalf("should never happen")
+ panic("unreachable")
+ } else if pt.Size() == 0 {
+ return state.stackAllocate(pt, n)
+ } else if state.regassign(pt) {
+ return state.regAllocate(pt, n, isReturn)
+ } else {
+ return state.stackAllocate(pt, n)
+ }
+}
+
+// ComputePadding returns a list of "post element" padding values in
+// the case where we have a structure being passed in registers. Given
+// a param assignment corresponding to a struct, it returns a list
+// containing padding values for each field, e.g. the Kth element in
+// the list is the amount of padding between field K and the following
+// field. For things that are not structs (or structs without padding)
+// it returns a list of zeros. Example:
+//
+// type small struct {
+// x uint16
+// y uint8
+// z int32
+// w int32
+// }
+//
+// For this struct we would return a list [0, 1, 0, 0], meaning that
+// we have one byte of padding after the second field, and no bytes of
+// padding after any of the other fields. Input parameter "storage" is
+// a slice with enough capacity to accommodate padding elements for
+// the architected register set in question.
+func (pa *ABIParamAssignment) ComputePadding(storage []uint64) []uint64 {
+ nr := len(pa.Registers)
+ padding := storage[:nr]
+ for i := 0; i < nr; i++ {
+ padding[i] = 0
+ }
+ if pa.Type.Kind() != types.TSTRUCT || nr == 0 {
+ return padding
+ }
+ types := make([]*types.Type, 0, nr)
+ types = appendParamTypes(types, pa.Type)
+ if len(types) != nr {
+ panic("internal error")
+ }
+ off := int64(0)
+ for idx, t := range types {
+ ts := t.Size()
+ off += int64(ts)
+ if idx < len(types)-1 {
+ noff := align(off, types[idx+1])
+ if noff != off {
+ padding[idx] = uint64(noff - off)
+ }
+ }
+ }
+ return padding
+}
diff --git a/src/cmd/compile/internal/amd64/galign.go b/src/cmd/compile/internal/amd64/galign.go
new file mode 100644
index 0000000..ca44263
--- /dev/null
+++ b/src/cmd/compile/internal/amd64/galign.go
@@ -0,0 +1,27 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package amd64
+
+import (
+ "cmd/compile/internal/ssagen"
+ "cmd/internal/obj/x86"
+)
+
+var leaptr = x86.ALEAQ
+
+func Init(arch *ssagen.ArchInfo) {
+ arch.LinkArch = &x86.Linkamd64
+ arch.REGSP = x86.REGSP
+ arch.MAXWIDTH = 1 << 50
+
+ arch.ZeroRange = zerorange
+ arch.Ginsnop = ginsnop
+
+ arch.SSAMarkMoves = ssaMarkMoves
+ arch.SSAGenValue = ssaGenValue
+ arch.SSAGenBlock = ssaGenBlock
+ arch.LoadRegResult = loadRegResult
+ arch.SpillArgReg = spillArgReg
+}
diff --git a/src/cmd/compile/internal/amd64/ggen.go b/src/cmd/compile/internal/amd64/ggen.go
new file mode 100644
index 0000000..b8dce81
--- /dev/null
+++ b/src/cmd/compile/internal/amd64/ggen.go
@@ -0,0 +1,154 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package amd64
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/objw"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/obj/x86"
+ "internal/buildcfg"
+)
+
+// no floating point in note handlers on Plan 9
+var isPlan9 = buildcfg.GOOS == "plan9"
+
+// DUFFZERO consists of repeated blocks of 4 MOVUPSs + LEAQ,
+// See runtime/mkduff.go.
+const (
+ dzBlocks = 16 // number of MOV/ADD blocks
+ dzBlockLen = 4 // number of clears per block
+ dzBlockSize = 23 // size of instructions in a single block
+ dzMovSize = 5 // size of single MOV instruction w/ offset
+ dzLeaqSize = 4 // size of single LEAQ instruction
+ dzClearStep = 16 // number of bytes cleared by each MOV instruction
+
+ dzClearLen = dzClearStep * dzBlockLen // bytes cleared by one block
+ dzSize = dzBlocks * dzBlockSize
+)
+
+// dzOff returns the offset for a jump into DUFFZERO.
+// b is the number of bytes to zero.
+func dzOff(b int64) int64 {
+ off := int64(dzSize)
+ off -= b / dzClearLen * dzBlockSize
+ tailLen := b % dzClearLen
+ if tailLen >= dzClearStep {
+ off -= dzLeaqSize + dzMovSize*(tailLen/dzClearStep)
+ }
+ return off
+}
+
+// duffzeroDI returns the pre-adjustment to DI for a call to DUFFZERO.
+// b is the number of bytes to zero.
+func dzDI(b int64) int64 {
+ tailLen := b % dzClearLen
+ if tailLen < dzClearStep {
+ return 0
+ }
+ tailSteps := tailLen / dzClearStep
+ return -dzClearStep * (dzBlockLen - tailSteps)
+}
+
+func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, state *uint32) *obj.Prog {
+ const (
+ r13 = 1 << iota // if R13 is already zeroed.
+ )
+
+ if cnt == 0 {
+ return p
+ }
+
+ if cnt%int64(types.RegSize) != 0 {
+ // should only happen with nacl
+ if cnt%int64(types.PtrSize) != 0 {
+ base.Fatalf("zerorange count not a multiple of widthptr %d", cnt)
+ }
+ if *state&r13 == 0 {
+ p = pp.Append(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_R13, 0)
+ *state |= r13
+ }
+ p = pp.Append(p, x86.AMOVL, obj.TYPE_REG, x86.REG_R13, 0, obj.TYPE_MEM, x86.REG_SP, off)
+ off += int64(types.PtrSize)
+ cnt -= int64(types.PtrSize)
+ }
+
+ if cnt == 8 {
+ if *state&r13 == 0 {
+ p = pp.Append(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_R13, 0)
+ *state |= r13
+ }
+ p = pp.Append(p, x86.AMOVQ, obj.TYPE_REG, x86.REG_R13, 0, obj.TYPE_MEM, x86.REG_SP, off)
+ } else if !isPlan9 && cnt <= int64(8*types.RegSize) {
+ for i := int64(0); i < cnt/16; i++ {
+ p = pp.Append(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X15, 0, obj.TYPE_MEM, x86.REG_SP, off+i*16)
+ }
+
+ if cnt%16 != 0 {
+ p = pp.Append(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X15, 0, obj.TYPE_MEM, x86.REG_SP, off+cnt-int64(16))
+ }
+ } else if !isPlan9 && (cnt <= int64(128*types.RegSize)) {
+ // Save DI to r12. With the amd64 Go register abi, DI can contain
+ // an incoming parameter, whereas R12 is always scratch.
+ p = pp.Append(p, x86.AMOVQ, obj.TYPE_REG, x86.REG_DI, 0, obj.TYPE_REG, x86.REG_R12, 0)
+ // Emit duffzero call
+ p = pp.Append(p, leaptr, obj.TYPE_MEM, x86.REG_SP, off+dzDI(cnt), obj.TYPE_REG, x86.REG_DI, 0)
+ p = pp.Append(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_ADDR, 0, dzOff(cnt))
+ p.To.Sym = ir.Syms.Duffzero
+ if cnt%16 != 0 {
+ p = pp.Append(p, x86.AMOVUPS, obj.TYPE_REG, x86.REG_X15, 0, obj.TYPE_MEM, x86.REG_DI, -int64(8))
+ }
+ // Restore DI from r12
+ p = pp.Append(p, x86.AMOVQ, obj.TYPE_REG, x86.REG_R12, 0, obj.TYPE_REG, x86.REG_DI, 0)
+
+ } else {
+ // When the register ABI is in effect, at this point in the
+ // prolog we may have live values in all of RAX,RDI,RCX. Save
+ // them off to registers before the REPSTOSQ below, then
+ // restore. Note that R12 and R13 are always available as
+ // scratch regs; here we also use R15 (this is safe to do
+ // since there won't be any globals accessed in the prolog).
+ // See rewriteToUseGot() in obj6.go for more on r15 use.
+
+ // Save rax/rdi/rcx
+ p = pp.Append(p, x86.AMOVQ, obj.TYPE_REG, x86.REG_DI, 0, obj.TYPE_REG, x86.REG_R12, 0)
+ p = pp.Append(p, x86.AMOVQ, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_REG, x86.REG_R13, 0)
+ p = pp.Append(p, x86.AMOVQ, obj.TYPE_REG, x86.REG_CX, 0, obj.TYPE_REG, x86.REG_R15, 0)
+
+ // Set up the REPSTOSQ and kick it off.
+ p = pp.Append(p, x86.AMOVQ, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0)
+ p = pp.Append(p, x86.AMOVQ, obj.TYPE_CONST, 0, cnt/int64(types.RegSize), obj.TYPE_REG, x86.REG_CX, 0)
+ p = pp.Append(p, leaptr, obj.TYPE_MEM, x86.REG_SP, off, obj.TYPE_REG, x86.REG_DI, 0)
+ p = pp.Append(p, x86.AREP, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0)
+ p = pp.Append(p, x86.ASTOSQ, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0)
+
+ // Restore rax/rdi/rcx
+ p = pp.Append(p, x86.AMOVQ, obj.TYPE_REG, x86.REG_R12, 0, obj.TYPE_REG, x86.REG_DI, 0)
+ p = pp.Append(p, x86.AMOVQ, obj.TYPE_REG, x86.REG_R13, 0, obj.TYPE_REG, x86.REG_AX, 0)
+ p = pp.Append(p, x86.AMOVQ, obj.TYPE_REG, x86.REG_R15, 0, obj.TYPE_REG, x86.REG_CX, 0)
+
+ // Record the fact that r13 is no longer zero.
+ *state &= ^uint32(r13)
+ }
+
+ return p
+}
+
+func ginsnop(pp *objw.Progs) *obj.Prog {
+ // This is a hardware nop (1-byte 0x90) instruction,
+ // even though we describe it as an explicit XCHGL here.
+ // Particularly, this does not zero the high 32 bits
+ // like typical *L opcodes.
+ // (gas assembles "xchg %eax,%eax" to 0x87 0xc0, which
+ // does zero the high 32 bits.)
+ p := pp.Prog(x86.AXCHGL)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = x86.REG_AX
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = x86.REG_AX
+ return p
+}
diff --git a/src/cmd/compile/internal/amd64/ssa.go b/src/cmd/compile/internal/amd64/ssa.go
new file mode 100644
index 0000000..b5db283
--- /dev/null
+++ b/src/cmd/compile/internal/amd64/ssa.go
@@ -0,0 +1,1411 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package amd64
+
+import (
+ "fmt"
+ "internal/buildcfg"
+ "math"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/logopt"
+ "cmd/compile/internal/objw"
+ "cmd/compile/internal/ssa"
+ "cmd/compile/internal/ssagen"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/obj/x86"
+)
+
+// markMoves marks any MOVXconst ops that need to avoid clobbering flags.
+func ssaMarkMoves(s *ssagen.State, b *ssa.Block) {
+ flive := b.FlagsLiveAtEnd
+ for _, c := range b.ControlValues() {
+ flive = c.Type.IsFlags() || flive
+ }
+ for i := len(b.Values) - 1; i >= 0; i-- {
+ v := b.Values[i]
+ if flive && (v.Op == ssa.OpAMD64MOVLconst || v.Op == ssa.OpAMD64MOVQconst) {
+ // The "mark" is any non-nil Aux value.
+ v.Aux = v
+ }
+ if v.Type.IsFlags() {
+ flive = false
+ }
+ for _, a := range v.Args {
+ if a.Type.IsFlags() {
+ flive = true
+ }
+ }
+ }
+}
+
+// loadByType returns the load instruction of the given type.
+func loadByType(t *types.Type) obj.As {
+ // Avoid partial register write
+ if !t.IsFloat() {
+ switch t.Size() {
+ case 1:
+ return x86.AMOVBLZX
+ case 2:
+ return x86.AMOVWLZX
+ }
+ }
+ // Otherwise, there's no difference between load and store opcodes.
+ return storeByType(t)
+}
+
+// storeByType returns the store instruction of the given type.
+func storeByType(t *types.Type) obj.As {
+ width := t.Size()
+ if t.IsFloat() {
+ switch width {
+ case 4:
+ return x86.AMOVSS
+ case 8:
+ return x86.AMOVSD
+ }
+ } else {
+ switch width {
+ case 1:
+ return x86.AMOVB
+ case 2:
+ return x86.AMOVW
+ case 4:
+ return x86.AMOVL
+ case 8:
+ return x86.AMOVQ
+ case 16:
+ return x86.AMOVUPS
+ }
+ }
+ panic(fmt.Sprintf("bad store type %v", t))
+}
+
+// moveByType returns the reg->reg move instruction of the given type.
+func moveByType(t *types.Type) obj.As {
+ if t.IsFloat() {
+ // Moving the whole sse2 register is faster
+ // than moving just the correct low portion of it.
+ // There is no xmm->xmm move with 1 byte opcode,
+ // so use movups, which has 2 byte opcode.
+ return x86.AMOVUPS
+ } else {
+ switch t.Size() {
+ case 1:
+ // Avoids partial register write
+ return x86.AMOVL
+ case 2:
+ return x86.AMOVL
+ case 4:
+ return x86.AMOVL
+ case 8:
+ return x86.AMOVQ
+ case 16:
+ return x86.AMOVUPS // int128s are in SSE registers
+ default:
+ panic(fmt.Sprintf("bad int register width %d:%v", t.Size(), t))
+ }
+ }
+}
+
+// opregreg emits instructions for
+// dest := dest(To) op src(From)
+// and also returns the created obj.Prog so it
+// may be further adjusted (offset, scale, etc).
+func opregreg(s *ssagen.State, op obj.As, dest, src int16) *obj.Prog {
+ p := s.Prog(op)
+ p.From.Type = obj.TYPE_REG
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = dest
+ p.From.Reg = src
+ return p
+}
+
+// memIdx fills out a as an indexed memory reference for v.
+// It assumes that the base register and the index register
+// are v.Args[0].Reg() and v.Args[1].Reg(), respectively.
+// The caller must still use gc.AddAux/gc.AddAux2 to handle v.Aux as necessary.
+func memIdx(a *obj.Addr, v *ssa.Value) {
+ r, i := v.Args[0].Reg(), v.Args[1].Reg()
+ a.Type = obj.TYPE_MEM
+ a.Scale = v.Op.Scale()
+ if a.Scale == 1 && i == x86.REG_SP {
+ r, i = i, r
+ }
+ a.Reg = r
+ a.Index = i
+}
+
+// DUFFZERO consists of repeated blocks of 4 MOVUPSs + LEAQ,
+// See runtime/mkduff.go.
+func duffStart(size int64) int64 {
+ x, _ := duff(size)
+ return x
+}
+func duffAdj(size int64) int64 {
+ _, x := duff(size)
+ return x
+}
+
+// duff returns the offset (from duffzero, in bytes) and pointer adjust (in bytes)
+// required to use the duffzero mechanism for a block of the given size.
+func duff(size int64) (int64, int64) {
+ if size < 32 || size > 1024 || size%dzClearStep != 0 {
+ panic("bad duffzero size")
+ }
+ steps := size / dzClearStep
+ blocks := steps / dzBlockLen
+ steps %= dzBlockLen
+ off := dzBlockSize * (dzBlocks - blocks)
+ var adj int64
+ if steps != 0 {
+ off -= dzLeaqSize
+ off -= dzMovSize * steps
+ adj -= dzClearStep * (dzBlockLen - steps)
+ }
+ return off, adj
+}
+
+func getgFromTLS(s *ssagen.State, r int16) {
+ // See the comments in cmd/internal/obj/x86/obj6.go
+ // near CanUse1InsnTLS for a detailed explanation of these instructions.
+ if x86.CanUse1InsnTLS(base.Ctxt) {
+ // MOVQ (TLS), r
+ p := s.Prog(x86.AMOVQ)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = x86.REG_TLS
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ } else {
+ // MOVQ TLS, r
+ // MOVQ (r)(TLS*1), r
+ p := s.Prog(x86.AMOVQ)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = x86.REG_TLS
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ q := s.Prog(x86.AMOVQ)
+ q.From.Type = obj.TYPE_MEM
+ q.From.Reg = r
+ q.From.Index = x86.REG_TLS
+ q.From.Scale = 1
+ q.To.Type = obj.TYPE_REG
+ q.To.Reg = r
+ }
+}
+
+func ssaGenValue(s *ssagen.State, v *ssa.Value) {
+ switch v.Op {
+ case ssa.OpAMD64VFMADD231SD:
+ p := s.Prog(v.Op.Asm())
+ p.From = obj.Addr{Type: obj.TYPE_REG, Reg: v.Args[2].Reg()}
+ p.To = obj.Addr{Type: obj.TYPE_REG, Reg: v.Reg()}
+ p.SetFrom3Reg(v.Args[1].Reg())
+ case ssa.OpAMD64ADDQ, ssa.OpAMD64ADDL:
+ r := v.Reg()
+ r1 := v.Args[0].Reg()
+ r2 := v.Args[1].Reg()
+ switch {
+ case r == r1:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r2
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ case r == r2:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r1
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ default:
+ var asm obj.As
+ if v.Op == ssa.OpAMD64ADDQ {
+ asm = x86.ALEAQ
+ } else {
+ asm = x86.ALEAL
+ }
+ p := s.Prog(asm)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = r1
+ p.From.Scale = 1
+ p.From.Index = r2
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ }
+ // 2-address opcode arithmetic
+ case ssa.OpAMD64SUBQ, ssa.OpAMD64SUBL,
+ ssa.OpAMD64MULQ, ssa.OpAMD64MULL,
+ ssa.OpAMD64ANDQ, ssa.OpAMD64ANDL,
+ ssa.OpAMD64ORQ, ssa.OpAMD64ORL,
+ ssa.OpAMD64XORQ, ssa.OpAMD64XORL,
+ ssa.OpAMD64SHLQ, ssa.OpAMD64SHLL,
+ ssa.OpAMD64SHRQ, ssa.OpAMD64SHRL, ssa.OpAMD64SHRW, ssa.OpAMD64SHRB,
+ ssa.OpAMD64SARQ, ssa.OpAMD64SARL, ssa.OpAMD64SARW, ssa.OpAMD64SARB,
+ ssa.OpAMD64ROLQ, ssa.OpAMD64ROLL, ssa.OpAMD64ROLW, ssa.OpAMD64ROLB,
+ ssa.OpAMD64RORQ, ssa.OpAMD64RORL, ssa.OpAMD64RORW, ssa.OpAMD64RORB,
+ ssa.OpAMD64ADDSS, ssa.OpAMD64ADDSD, ssa.OpAMD64SUBSS, ssa.OpAMD64SUBSD,
+ ssa.OpAMD64MULSS, ssa.OpAMD64MULSD, ssa.OpAMD64DIVSS, ssa.OpAMD64DIVSD,
+ ssa.OpAMD64PXOR,
+ ssa.OpAMD64BTSL, ssa.OpAMD64BTSQ,
+ ssa.OpAMD64BTCL, ssa.OpAMD64BTCQ,
+ ssa.OpAMD64BTRL, ssa.OpAMD64BTRQ:
+ opregreg(s, v.Op.Asm(), v.Reg(), v.Args[1].Reg())
+
+ case ssa.OpAMD64SHRDQ, ssa.OpAMD64SHLDQ:
+ p := s.Prog(v.Op.Asm())
+ lo, hi, bits := v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg()
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = bits
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = lo
+ p.SetFrom3Reg(hi)
+
+ case ssa.OpAMD64BLSIQ, ssa.OpAMD64BLSIL,
+ ssa.OpAMD64BLSMSKQ, ssa.OpAMD64BLSMSKL,
+ ssa.OpAMD64BLSRQ, ssa.OpAMD64BLSRL,
+ ssa.OpAMD64TZCNTQ, ssa.OpAMD64TZCNTL:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+
+ case ssa.OpAMD64ANDNQ, ssa.OpAMD64ANDNL:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ p.SetFrom3Reg(v.Args[1].Reg())
+
+ case ssa.OpAMD64DIVQU, ssa.OpAMD64DIVLU, ssa.OpAMD64DIVWU:
+ // Arg[0] (the dividend) is in AX.
+ // Arg[1] (the divisor) can be in any other register.
+ // Result[0] (the quotient) is in AX.
+ // Result[1] (the remainder) is in DX.
+ r := v.Args[1].Reg()
+
+ // Zero extend dividend.
+ c := s.Prog(x86.AXORL)
+ c.From.Type = obj.TYPE_REG
+ c.From.Reg = x86.REG_DX
+ c.To.Type = obj.TYPE_REG
+ c.To.Reg = x86.REG_DX
+
+ // Issue divide.
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r
+
+ case ssa.OpAMD64DIVQ, ssa.OpAMD64DIVL, ssa.OpAMD64DIVW:
+ // Arg[0] (the dividend) is in AX.
+ // Arg[1] (the divisor) can be in any other register.
+ // Result[0] (the quotient) is in AX.
+ // Result[1] (the remainder) is in DX.
+ r := v.Args[1].Reg()
+ var j1 *obj.Prog
+
+ // CPU faults upon signed overflow, which occurs when the most
+ // negative int is divided by -1. Handle divide by -1 as a special case.
+ if ssa.DivisionNeedsFixUp(v) {
+ var c *obj.Prog
+ switch v.Op {
+ case ssa.OpAMD64DIVQ:
+ c = s.Prog(x86.ACMPQ)
+ case ssa.OpAMD64DIVL:
+ c = s.Prog(x86.ACMPL)
+ case ssa.OpAMD64DIVW:
+ c = s.Prog(x86.ACMPW)
+ }
+ c.From.Type = obj.TYPE_REG
+ c.From.Reg = r
+ c.To.Type = obj.TYPE_CONST
+ c.To.Offset = -1
+ j1 = s.Prog(x86.AJEQ)
+ j1.To.Type = obj.TYPE_BRANCH
+ }
+
+ // Sign extend dividend.
+ switch v.Op {
+ case ssa.OpAMD64DIVQ:
+ s.Prog(x86.ACQO)
+ case ssa.OpAMD64DIVL:
+ s.Prog(x86.ACDQ)
+ case ssa.OpAMD64DIVW:
+ s.Prog(x86.ACWD)
+ }
+
+ // Issue divide.
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r
+
+ if j1 != nil {
+ // Skip over -1 fixup code.
+ j2 := s.Prog(obj.AJMP)
+ j2.To.Type = obj.TYPE_BRANCH
+
+ // Issue -1 fixup code.
+ // n / -1 = -n
+ var n1 *obj.Prog
+ switch v.Op {
+ case ssa.OpAMD64DIVQ:
+ n1 = s.Prog(x86.ANEGQ)
+ case ssa.OpAMD64DIVL:
+ n1 = s.Prog(x86.ANEGL)
+ case ssa.OpAMD64DIVW:
+ n1 = s.Prog(x86.ANEGW)
+ }
+ n1.To.Type = obj.TYPE_REG
+ n1.To.Reg = x86.REG_AX
+
+ // n % -1 == 0
+ n2 := s.Prog(x86.AXORL)
+ n2.From.Type = obj.TYPE_REG
+ n2.From.Reg = x86.REG_DX
+ n2.To.Type = obj.TYPE_REG
+ n2.To.Reg = x86.REG_DX
+
+ // TODO(khr): issue only the -1 fixup code we need.
+ // For instance, if only the quotient is used, no point in zeroing the remainder.
+
+ j1.To.SetTarget(n1)
+ j2.To.SetTarget(s.Pc())
+ }
+
+ case ssa.OpAMD64HMULQ, ssa.OpAMD64HMULL, ssa.OpAMD64HMULQU, ssa.OpAMD64HMULLU:
+ // the frontend rewrites constant division by 8/16/32 bit integers into
+ // HMUL by a constant
+ // SSA rewrites generate the 64 bit versions
+
+ // Arg[0] is already in AX as it's the only register we allow
+ // and DX is the only output we care about (the high bits)
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+
+ // IMULB puts the high portion in AH instead of DL,
+ // so move it to DL for consistency
+ if v.Type.Size() == 1 {
+ m := s.Prog(x86.AMOVB)
+ m.From.Type = obj.TYPE_REG
+ m.From.Reg = x86.REG_AH
+ m.To.Type = obj.TYPE_REG
+ m.To.Reg = x86.REG_DX
+ }
+
+ case ssa.OpAMD64MULQU, ssa.OpAMD64MULLU:
+ // Arg[0] is already in AX as it's the only register we allow
+ // results lo in AX
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+
+ case ssa.OpAMD64MULQU2:
+ // Arg[0] is already in AX as it's the only register we allow
+ // results hi in DX, lo in AX
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+
+ case ssa.OpAMD64DIVQU2:
+ // Arg[0], Arg[1] are already in Dx, AX, as they're the only registers we allow
+ // results q in AX, r in DX
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[2].Reg()
+
+ case ssa.OpAMD64AVGQU:
+ // compute (x+y)/2 unsigned.
+ // Do a 64-bit add, the overflow goes into the carry.
+ // Shift right once and pull the carry back into the 63rd bit.
+ p := s.Prog(x86.AADDQ)
+ p.From.Type = obj.TYPE_REG
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ p.From.Reg = v.Args[1].Reg()
+ p = s.Prog(x86.ARCRQ)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = 1
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+
+ case ssa.OpAMD64ADDQcarry, ssa.OpAMD64ADCQ:
+ r := v.Reg0()
+ r0 := v.Args[0].Reg()
+ r1 := v.Args[1].Reg()
+ switch r {
+ case r0:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r1
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ case r1:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r0
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ default:
+ v.Fatalf("output not in same register as an input %s", v.LongString())
+ }
+
+ case ssa.OpAMD64SUBQborrow, ssa.OpAMD64SBBQ:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg0()
+
+ case ssa.OpAMD64ADDQconstcarry, ssa.OpAMD64ADCQconst, ssa.OpAMD64SUBQconstborrow, ssa.OpAMD64SBBQconst:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg0()
+
+ case ssa.OpAMD64ADDQconst, ssa.OpAMD64ADDLconst:
+ r := v.Reg()
+ a := v.Args[0].Reg()
+ if r == a {
+ switch v.AuxInt {
+ case 1:
+ var asm obj.As
+ // Software optimization manual recommends add $1,reg.
+ // But inc/dec is 1 byte smaller. ICC always uses inc
+ // Clang/GCC choose depending on flags, but prefer add.
+ // Experiments show that inc/dec is both a little faster
+ // and make a binary a little smaller.
+ if v.Op == ssa.OpAMD64ADDQconst {
+ asm = x86.AINCQ
+ } else {
+ asm = x86.AINCL
+ }
+ p := s.Prog(asm)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ return
+ case -1:
+ var asm obj.As
+ if v.Op == ssa.OpAMD64ADDQconst {
+ asm = x86.ADECQ
+ } else {
+ asm = x86.ADECL
+ }
+ p := s.Prog(asm)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ return
+ case 0x80:
+ // 'SUBQ $-0x80, r' is shorter to encode than
+ // and functionally equivalent to 'ADDQ $0x80, r'.
+ asm := x86.ASUBL
+ if v.Op == ssa.OpAMD64ADDQconst {
+ asm = x86.ASUBQ
+ }
+ p := s.Prog(asm)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = -0x80
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ return
+
+ }
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ return
+ }
+ var asm obj.As
+ if v.Op == ssa.OpAMD64ADDQconst {
+ asm = x86.ALEAQ
+ } else {
+ asm = x86.ALEAL
+ }
+ p := s.Prog(asm)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = a
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+
+ case ssa.OpAMD64CMOVQEQ, ssa.OpAMD64CMOVLEQ, ssa.OpAMD64CMOVWEQ,
+ ssa.OpAMD64CMOVQLT, ssa.OpAMD64CMOVLLT, ssa.OpAMD64CMOVWLT,
+ ssa.OpAMD64CMOVQNE, ssa.OpAMD64CMOVLNE, ssa.OpAMD64CMOVWNE,
+ ssa.OpAMD64CMOVQGT, ssa.OpAMD64CMOVLGT, ssa.OpAMD64CMOVWGT,
+ ssa.OpAMD64CMOVQLE, ssa.OpAMD64CMOVLLE, ssa.OpAMD64CMOVWLE,
+ ssa.OpAMD64CMOVQGE, ssa.OpAMD64CMOVLGE, ssa.OpAMD64CMOVWGE,
+ ssa.OpAMD64CMOVQHI, ssa.OpAMD64CMOVLHI, ssa.OpAMD64CMOVWHI,
+ ssa.OpAMD64CMOVQLS, ssa.OpAMD64CMOVLLS, ssa.OpAMD64CMOVWLS,
+ ssa.OpAMD64CMOVQCC, ssa.OpAMD64CMOVLCC, ssa.OpAMD64CMOVWCC,
+ ssa.OpAMD64CMOVQCS, ssa.OpAMD64CMOVLCS, ssa.OpAMD64CMOVWCS,
+ ssa.OpAMD64CMOVQGTF, ssa.OpAMD64CMOVLGTF, ssa.OpAMD64CMOVWGTF,
+ ssa.OpAMD64CMOVQGEF, ssa.OpAMD64CMOVLGEF, ssa.OpAMD64CMOVWGEF:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+
+ case ssa.OpAMD64CMOVQNEF, ssa.OpAMD64CMOVLNEF, ssa.OpAMD64CMOVWNEF:
+ // Flag condition: ^ZERO || PARITY
+ // Generate:
+ // CMOV*NE SRC,DST
+ // CMOV*PS SRC,DST
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ var q *obj.Prog
+ if v.Op == ssa.OpAMD64CMOVQNEF {
+ q = s.Prog(x86.ACMOVQPS)
+ } else if v.Op == ssa.OpAMD64CMOVLNEF {
+ q = s.Prog(x86.ACMOVLPS)
+ } else {
+ q = s.Prog(x86.ACMOVWPS)
+ }
+ q.From.Type = obj.TYPE_REG
+ q.From.Reg = v.Args[1].Reg()
+ q.To.Type = obj.TYPE_REG
+ q.To.Reg = v.Reg()
+
+ case ssa.OpAMD64CMOVQEQF, ssa.OpAMD64CMOVLEQF, ssa.OpAMD64CMOVWEQF:
+ // Flag condition: ZERO && !PARITY
+ // Generate:
+ // MOV SRC,AX
+ // CMOV*NE DST,AX
+ // CMOV*PC AX,DST
+ //
+ // TODO(rasky): we could generate:
+ // CMOV*NE DST,SRC
+ // CMOV*PC SRC,DST
+ // But this requires a way for regalloc to know that SRC might be
+ // clobbered by this instruction.
+ if v.Args[1].Reg() != x86.REG_AX {
+ opregreg(s, moveByType(v.Type), x86.REG_AX, v.Args[1].Reg())
+ }
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = x86.REG_AX
+ var q *obj.Prog
+ if v.Op == ssa.OpAMD64CMOVQEQF {
+ q = s.Prog(x86.ACMOVQPC)
+ } else if v.Op == ssa.OpAMD64CMOVLEQF {
+ q = s.Prog(x86.ACMOVLPC)
+ } else {
+ q = s.Prog(x86.ACMOVWPC)
+ }
+ q.From.Type = obj.TYPE_REG
+ q.From.Reg = x86.REG_AX
+ q.To.Type = obj.TYPE_REG
+ q.To.Reg = v.Reg()
+
+ case ssa.OpAMD64MULQconst, ssa.OpAMD64MULLconst:
+ r := v.Reg()
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ p.SetFrom3Reg(v.Args[0].Reg())
+
+ case ssa.OpAMD64ANDQconst:
+ asm := v.Op.Asm()
+ // If the constant is positive and fits into 32 bits, use ANDL.
+ // This saves a few bytes of encoding.
+ if 0 <= v.AuxInt && v.AuxInt <= (1<<32-1) {
+ asm = x86.AANDL
+ }
+ p := s.Prog(asm)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+
+ case ssa.OpAMD64SUBQconst, ssa.OpAMD64SUBLconst,
+ ssa.OpAMD64ANDLconst,
+ ssa.OpAMD64ORQconst, ssa.OpAMD64ORLconst,
+ ssa.OpAMD64XORQconst, ssa.OpAMD64XORLconst,
+ ssa.OpAMD64SHLQconst, ssa.OpAMD64SHLLconst,
+ ssa.OpAMD64SHRQconst, ssa.OpAMD64SHRLconst, ssa.OpAMD64SHRWconst, ssa.OpAMD64SHRBconst,
+ ssa.OpAMD64SARQconst, ssa.OpAMD64SARLconst, ssa.OpAMD64SARWconst, ssa.OpAMD64SARBconst,
+ ssa.OpAMD64ROLQconst, ssa.OpAMD64ROLLconst, ssa.OpAMD64ROLWconst, ssa.OpAMD64ROLBconst:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpAMD64SBBQcarrymask, ssa.OpAMD64SBBLcarrymask:
+ r := v.Reg()
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ case ssa.OpAMD64LEAQ1, ssa.OpAMD64LEAQ2, ssa.OpAMD64LEAQ4, ssa.OpAMD64LEAQ8,
+ ssa.OpAMD64LEAL1, ssa.OpAMD64LEAL2, ssa.OpAMD64LEAL4, ssa.OpAMD64LEAL8,
+ ssa.OpAMD64LEAW1, ssa.OpAMD64LEAW2, ssa.OpAMD64LEAW4, ssa.OpAMD64LEAW8:
+ p := s.Prog(v.Op.Asm())
+ memIdx(&p.From, v)
+ o := v.Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = o
+ if v.AuxInt != 0 && v.Aux == nil {
+ // Emit an additional LEA to add the displacement instead of creating a slow 3 operand LEA.
+ switch v.Op {
+ case ssa.OpAMD64LEAQ1, ssa.OpAMD64LEAQ2, ssa.OpAMD64LEAQ4, ssa.OpAMD64LEAQ8:
+ p = s.Prog(x86.ALEAQ)
+ case ssa.OpAMD64LEAL1, ssa.OpAMD64LEAL2, ssa.OpAMD64LEAL4, ssa.OpAMD64LEAL8:
+ p = s.Prog(x86.ALEAL)
+ case ssa.OpAMD64LEAW1, ssa.OpAMD64LEAW2, ssa.OpAMD64LEAW4, ssa.OpAMD64LEAW8:
+ p = s.Prog(x86.ALEAW)
+ }
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = o
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = o
+ }
+ ssagen.AddAux(&p.From, v)
+ case ssa.OpAMD64LEAQ, ssa.OpAMD64LEAL, ssa.OpAMD64LEAW:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpAMD64CMPQ, ssa.OpAMD64CMPL, ssa.OpAMD64CMPW, ssa.OpAMD64CMPB,
+ ssa.OpAMD64TESTQ, ssa.OpAMD64TESTL, ssa.OpAMD64TESTW, ssa.OpAMD64TESTB,
+ ssa.OpAMD64BTL, ssa.OpAMD64BTQ:
+ opregreg(s, v.Op.Asm(), v.Args[1].Reg(), v.Args[0].Reg())
+ case ssa.OpAMD64UCOMISS, ssa.OpAMD64UCOMISD:
+ // Go assembler has swapped operands for UCOMISx relative to CMP,
+ // must account for that right here.
+ opregreg(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg())
+ case ssa.OpAMD64CMPQconst, ssa.OpAMD64CMPLconst, ssa.OpAMD64CMPWconst, ssa.OpAMD64CMPBconst:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_CONST
+ p.To.Offset = v.AuxInt
+ case ssa.OpAMD64BTLconst, ssa.OpAMD64BTQconst,
+ ssa.OpAMD64TESTQconst, ssa.OpAMD64TESTLconst, ssa.OpAMD64TESTWconst, ssa.OpAMD64TESTBconst,
+ ssa.OpAMD64BTSLconst, ssa.OpAMD64BTSQconst,
+ ssa.OpAMD64BTCLconst, ssa.OpAMD64BTCQconst,
+ ssa.OpAMD64BTRLconst, ssa.OpAMD64BTRQconst:
+ op := v.Op
+ if op == ssa.OpAMD64BTQconst && v.AuxInt < 32 {
+ // Emit 32-bit version because it's shorter
+ op = ssa.OpAMD64BTLconst
+ }
+ p := s.Prog(op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Args[0].Reg()
+ case ssa.OpAMD64CMPQload, ssa.OpAMD64CMPLload, ssa.OpAMD64CMPWload, ssa.OpAMD64CMPBload:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Args[1].Reg()
+ case ssa.OpAMD64CMPQconstload, ssa.OpAMD64CMPLconstload, ssa.OpAMD64CMPWconstload, ssa.OpAMD64CMPBconstload:
+ sc := v.AuxValAndOff()
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ ssagen.AddAux2(&p.From, v, sc.Off64())
+ p.To.Type = obj.TYPE_CONST
+ p.To.Offset = sc.Val64()
+ case ssa.OpAMD64CMPQloadidx8, ssa.OpAMD64CMPQloadidx1, ssa.OpAMD64CMPLloadidx4, ssa.OpAMD64CMPLloadidx1, ssa.OpAMD64CMPWloadidx2, ssa.OpAMD64CMPWloadidx1, ssa.OpAMD64CMPBloadidx1:
+ p := s.Prog(v.Op.Asm())
+ memIdx(&p.From, v)
+ ssagen.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Args[2].Reg()
+ case ssa.OpAMD64CMPQconstloadidx8, ssa.OpAMD64CMPQconstloadidx1, ssa.OpAMD64CMPLconstloadidx4, ssa.OpAMD64CMPLconstloadidx1, ssa.OpAMD64CMPWconstloadidx2, ssa.OpAMD64CMPWconstloadidx1, ssa.OpAMD64CMPBconstloadidx1:
+ sc := v.AuxValAndOff()
+ p := s.Prog(v.Op.Asm())
+ memIdx(&p.From, v)
+ ssagen.AddAux2(&p.From, v, sc.Off64())
+ p.To.Type = obj.TYPE_CONST
+ p.To.Offset = sc.Val64()
+ case ssa.OpAMD64MOVLconst, ssa.OpAMD64MOVQconst:
+ x := v.Reg()
+
+ // If flags aren't live (indicated by v.Aux == nil),
+ // then we can rewrite MOV $0, AX into XOR AX, AX.
+ if v.AuxInt == 0 && v.Aux == nil {
+ p := s.Prog(x86.AXORL)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = x
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = x
+ break
+ }
+
+ asm := v.Op.Asm()
+ // Use MOVL to move a small constant into a register
+ // when the constant is positive and fits into 32 bits.
+ if 0 <= v.AuxInt && v.AuxInt <= (1<<32-1) {
+ // The upper 32bit are zeroed automatically when using MOVL.
+ asm = x86.AMOVL
+ }
+ p := s.Prog(asm)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = x
+ case ssa.OpAMD64MOVSSconst, ssa.OpAMD64MOVSDconst:
+ x := v.Reg()
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_FCONST
+ p.From.Val = math.Float64frombits(uint64(v.AuxInt))
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = x
+ case ssa.OpAMD64MOVQload, ssa.OpAMD64MOVLload, ssa.OpAMD64MOVWload, ssa.OpAMD64MOVBload, ssa.OpAMD64MOVOload,
+ ssa.OpAMD64MOVSSload, ssa.OpAMD64MOVSDload, ssa.OpAMD64MOVBQSXload, ssa.OpAMD64MOVWQSXload, ssa.OpAMD64MOVLQSXload,
+ ssa.OpAMD64MOVBEQload, ssa.OpAMD64MOVBELload:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpAMD64MOVBloadidx1, ssa.OpAMD64MOVWloadidx1, ssa.OpAMD64MOVLloadidx1, ssa.OpAMD64MOVQloadidx1, ssa.OpAMD64MOVSSloadidx1, ssa.OpAMD64MOVSDloadidx1,
+ ssa.OpAMD64MOVQloadidx8, ssa.OpAMD64MOVSDloadidx8, ssa.OpAMD64MOVLloadidx8, ssa.OpAMD64MOVLloadidx4, ssa.OpAMD64MOVSSloadidx4, ssa.OpAMD64MOVWloadidx2:
+ p := s.Prog(v.Op.Asm())
+ memIdx(&p.From, v)
+ ssagen.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpAMD64MOVQstore, ssa.OpAMD64MOVSSstore, ssa.OpAMD64MOVSDstore, ssa.OpAMD64MOVLstore, ssa.OpAMD64MOVWstore, ssa.OpAMD64MOVBstore, ssa.OpAMD64MOVOstore,
+ ssa.OpAMD64ADDQmodify, ssa.OpAMD64SUBQmodify, ssa.OpAMD64ANDQmodify, ssa.OpAMD64ORQmodify, ssa.OpAMD64XORQmodify,
+ ssa.OpAMD64ADDLmodify, ssa.OpAMD64SUBLmodify, ssa.OpAMD64ANDLmodify, ssa.OpAMD64ORLmodify, ssa.OpAMD64XORLmodify,
+ ssa.OpAMD64MOVBEQstore, ssa.OpAMD64MOVBELstore:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.To, v)
+ case ssa.OpAMD64MOVBstoreidx1, ssa.OpAMD64MOVWstoreidx1, ssa.OpAMD64MOVLstoreidx1, ssa.OpAMD64MOVQstoreidx1, ssa.OpAMD64MOVSSstoreidx1, ssa.OpAMD64MOVSDstoreidx1,
+ ssa.OpAMD64MOVQstoreidx8, ssa.OpAMD64MOVSDstoreidx8, ssa.OpAMD64MOVLstoreidx8, ssa.OpAMD64MOVSSstoreidx4, ssa.OpAMD64MOVLstoreidx4, ssa.OpAMD64MOVWstoreidx2,
+ ssa.OpAMD64ADDLmodifyidx1, ssa.OpAMD64ADDLmodifyidx4, ssa.OpAMD64ADDLmodifyidx8, ssa.OpAMD64ADDQmodifyidx1, ssa.OpAMD64ADDQmodifyidx8,
+ ssa.OpAMD64SUBLmodifyidx1, ssa.OpAMD64SUBLmodifyidx4, ssa.OpAMD64SUBLmodifyidx8, ssa.OpAMD64SUBQmodifyidx1, ssa.OpAMD64SUBQmodifyidx8,
+ ssa.OpAMD64ANDLmodifyidx1, ssa.OpAMD64ANDLmodifyidx4, ssa.OpAMD64ANDLmodifyidx8, ssa.OpAMD64ANDQmodifyidx1, ssa.OpAMD64ANDQmodifyidx8,
+ ssa.OpAMD64ORLmodifyidx1, ssa.OpAMD64ORLmodifyidx4, ssa.OpAMD64ORLmodifyidx8, ssa.OpAMD64ORQmodifyidx1, ssa.OpAMD64ORQmodifyidx8,
+ ssa.OpAMD64XORLmodifyidx1, ssa.OpAMD64XORLmodifyidx4, ssa.OpAMD64XORLmodifyidx8, ssa.OpAMD64XORQmodifyidx1, ssa.OpAMD64XORQmodifyidx8:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[2].Reg()
+ memIdx(&p.To, v)
+ ssagen.AddAux(&p.To, v)
+ case ssa.OpAMD64ADDQconstmodify, ssa.OpAMD64ADDLconstmodify:
+ sc := v.AuxValAndOff()
+ off := sc.Off64()
+ val := sc.Val()
+ if val == 1 || val == -1 {
+ var asm obj.As
+ if v.Op == ssa.OpAMD64ADDQconstmodify {
+ if val == 1 {
+ asm = x86.AINCQ
+ } else {
+ asm = x86.ADECQ
+ }
+ } else {
+ if val == 1 {
+ asm = x86.AINCL
+ } else {
+ asm = x86.ADECL
+ }
+ }
+ p := s.Prog(asm)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ ssagen.AddAux2(&p.To, v, off)
+ break
+ }
+ fallthrough
+ case ssa.OpAMD64ANDQconstmodify, ssa.OpAMD64ANDLconstmodify, ssa.OpAMD64ORQconstmodify, ssa.OpAMD64ORLconstmodify,
+ ssa.OpAMD64XORQconstmodify, ssa.OpAMD64XORLconstmodify:
+ sc := v.AuxValAndOff()
+ off := sc.Off64()
+ val := sc.Val64()
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = val
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ ssagen.AddAux2(&p.To, v, off)
+
+ case ssa.OpAMD64MOVQstoreconst, ssa.OpAMD64MOVLstoreconst, ssa.OpAMD64MOVWstoreconst, ssa.OpAMD64MOVBstoreconst:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ sc := v.AuxValAndOff()
+ p.From.Offset = sc.Val64()
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ ssagen.AddAux2(&p.To, v, sc.Off64())
+ case ssa.OpAMD64MOVOstoreconst:
+ sc := v.AuxValAndOff()
+ if sc.Val() != 0 {
+ v.Fatalf("MOVO for non zero constants not implemented: %s", v.LongString())
+ }
+
+ if s.ABI != obj.ABIInternal {
+ // zero X15 manually
+ opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15)
+ }
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = x86.REG_X15
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ ssagen.AddAux2(&p.To, v, sc.Off64())
+
+ case ssa.OpAMD64MOVQstoreconstidx1, ssa.OpAMD64MOVQstoreconstidx8, ssa.OpAMD64MOVLstoreconstidx1, ssa.OpAMD64MOVLstoreconstidx4, ssa.OpAMD64MOVWstoreconstidx1, ssa.OpAMD64MOVWstoreconstidx2, ssa.OpAMD64MOVBstoreconstidx1,
+ ssa.OpAMD64ADDLconstmodifyidx1, ssa.OpAMD64ADDLconstmodifyidx4, ssa.OpAMD64ADDLconstmodifyidx8, ssa.OpAMD64ADDQconstmodifyidx1, ssa.OpAMD64ADDQconstmodifyidx8,
+ ssa.OpAMD64ANDLconstmodifyidx1, ssa.OpAMD64ANDLconstmodifyidx4, ssa.OpAMD64ANDLconstmodifyidx8, ssa.OpAMD64ANDQconstmodifyidx1, ssa.OpAMD64ANDQconstmodifyidx8,
+ ssa.OpAMD64ORLconstmodifyidx1, ssa.OpAMD64ORLconstmodifyidx4, ssa.OpAMD64ORLconstmodifyidx8, ssa.OpAMD64ORQconstmodifyidx1, ssa.OpAMD64ORQconstmodifyidx8,
+ ssa.OpAMD64XORLconstmodifyidx1, ssa.OpAMD64XORLconstmodifyidx4, ssa.OpAMD64XORLconstmodifyidx8, ssa.OpAMD64XORQconstmodifyidx1, ssa.OpAMD64XORQconstmodifyidx8:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ sc := v.AuxValAndOff()
+ p.From.Offset = sc.Val64()
+ switch {
+ case p.As == x86.AADDQ && p.From.Offset == 1:
+ p.As = x86.AINCQ
+ p.From.Type = obj.TYPE_NONE
+ case p.As == x86.AADDQ && p.From.Offset == -1:
+ p.As = x86.ADECQ
+ p.From.Type = obj.TYPE_NONE
+ case p.As == x86.AADDL && p.From.Offset == 1:
+ p.As = x86.AINCL
+ p.From.Type = obj.TYPE_NONE
+ case p.As == x86.AADDL && p.From.Offset == -1:
+ p.As = x86.ADECL
+ p.From.Type = obj.TYPE_NONE
+ }
+ memIdx(&p.To, v)
+ ssagen.AddAux2(&p.To, v, sc.Off64())
+ case ssa.OpAMD64MOVLQSX, ssa.OpAMD64MOVWQSX, ssa.OpAMD64MOVBQSX, ssa.OpAMD64MOVLQZX, ssa.OpAMD64MOVWQZX, ssa.OpAMD64MOVBQZX,
+ ssa.OpAMD64CVTTSS2SL, ssa.OpAMD64CVTTSD2SL, ssa.OpAMD64CVTTSS2SQ, ssa.OpAMD64CVTTSD2SQ,
+ ssa.OpAMD64CVTSS2SD, ssa.OpAMD64CVTSD2SS:
+ opregreg(s, v.Op.Asm(), v.Reg(), v.Args[0].Reg())
+ case ssa.OpAMD64CVTSL2SD, ssa.OpAMD64CVTSQ2SD, ssa.OpAMD64CVTSQ2SS, ssa.OpAMD64CVTSL2SS:
+ r := v.Reg()
+ // Break false dependency on destination register.
+ opregreg(s, x86.AXORPS, r, r)
+ opregreg(s, v.Op.Asm(), r, v.Args[0].Reg())
+ case ssa.OpAMD64MOVQi2f, ssa.OpAMD64MOVQf2i, ssa.OpAMD64MOVLi2f, ssa.OpAMD64MOVLf2i:
+ var p *obj.Prog
+ switch v.Op {
+ case ssa.OpAMD64MOVQi2f, ssa.OpAMD64MOVQf2i:
+ p = s.Prog(x86.AMOVQ)
+ case ssa.OpAMD64MOVLi2f, ssa.OpAMD64MOVLf2i:
+ p = s.Prog(x86.AMOVL)
+ }
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpAMD64ADDQload, ssa.OpAMD64ADDLload, ssa.OpAMD64SUBQload, ssa.OpAMD64SUBLload,
+ ssa.OpAMD64ANDQload, ssa.OpAMD64ANDLload, ssa.OpAMD64ORQload, ssa.OpAMD64ORLload,
+ ssa.OpAMD64XORQload, ssa.OpAMD64XORLload, ssa.OpAMD64ADDSDload, ssa.OpAMD64ADDSSload,
+ ssa.OpAMD64SUBSDload, ssa.OpAMD64SUBSSload, ssa.OpAMD64MULSDload, ssa.OpAMD64MULSSload,
+ ssa.OpAMD64DIVSDload, ssa.OpAMD64DIVSSload:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[1].Reg()
+ ssagen.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpAMD64ADDLloadidx1, ssa.OpAMD64ADDLloadidx4, ssa.OpAMD64ADDLloadidx8, ssa.OpAMD64ADDQloadidx1, ssa.OpAMD64ADDQloadidx8,
+ ssa.OpAMD64SUBLloadidx1, ssa.OpAMD64SUBLloadidx4, ssa.OpAMD64SUBLloadidx8, ssa.OpAMD64SUBQloadidx1, ssa.OpAMD64SUBQloadidx8,
+ ssa.OpAMD64ANDLloadidx1, ssa.OpAMD64ANDLloadidx4, ssa.OpAMD64ANDLloadidx8, ssa.OpAMD64ANDQloadidx1, ssa.OpAMD64ANDQloadidx8,
+ ssa.OpAMD64ORLloadidx1, ssa.OpAMD64ORLloadidx4, ssa.OpAMD64ORLloadidx8, ssa.OpAMD64ORQloadidx1, ssa.OpAMD64ORQloadidx8,
+ ssa.OpAMD64XORLloadidx1, ssa.OpAMD64XORLloadidx4, ssa.OpAMD64XORLloadidx8, ssa.OpAMD64XORQloadidx1, ssa.OpAMD64XORQloadidx8,
+ ssa.OpAMD64ADDSSloadidx1, ssa.OpAMD64ADDSSloadidx4, ssa.OpAMD64ADDSDloadidx1, ssa.OpAMD64ADDSDloadidx8,
+ ssa.OpAMD64SUBSSloadidx1, ssa.OpAMD64SUBSSloadidx4, ssa.OpAMD64SUBSDloadidx1, ssa.OpAMD64SUBSDloadidx8,
+ ssa.OpAMD64MULSSloadidx1, ssa.OpAMD64MULSSloadidx4, ssa.OpAMD64MULSDloadidx1, ssa.OpAMD64MULSDloadidx8,
+ ssa.OpAMD64DIVSSloadidx1, ssa.OpAMD64DIVSSloadidx4, ssa.OpAMD64DIVSDloadidx1, ssa.OpAMD64DIVSDloadidx8:
+ p := s.Prog(v.Op.Asm())
+
+ r, i := v.Args[1].Reg(), v.Args[2].Reg()
+ p.From.Type = obj.TYPE_MEM
+ p.From.Scale = v.Op.Scale()
+ if p.From.Scale == 1 && i == x86.REG_SP {
+ r, i = i, r
+ }
+ p.From.Reg = r
+ p.From.Index = i
+
+ ssagen.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpAMD64DUFFZERO:
+ if s.ABI != obj.ABIInternal {
+ // zero X15 manually
+ opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15)
+ }
+ off := duffStart(v.AuxInt)
+ adj := duffAdj(v.AuxInt)
+ var p *obj.Prog
+ if adj != 0 {
+ p = s.Prog(x86.ALEAQ)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Offset = adj
+ p.From.Reg = x86.REG_DI
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = x86.REG_DI
+ }
+ p = s.Prog(obj.ADUFFZERO)
+ p.To.Type = obj.TYPE_ADDR
+ p.To.Sym = ir.Syms.Duffzero
+ p.To.Offset = off
+ case ssa.OpAMD64DUFFCOPY:
+ p := s.Prog(obj.ADUFFCOPY)
+ p.To.Type = obj.TYPE_ADDR
+ p.To.Sym = ir.Syms.Duffcopy
+ if v.AuxInt%16 != 0 {
+ v.Fatalf("bad DUFFCOPY AuxInt %v", v.AuxInt)
+ }
+ p.To.Offset = 14 * (64 - v.AuxInt/16)
+ // 14 and 64 are magic constants. 14 is the number of bytes to encode:
+ // MOVUPS (SI), X0
+ // ADDQ $16, SI
+ // MOVUPS X0, (DI)
+ // ADDQ $16, DI
+ // and 64 is the number of such blocks. See src/runtime/duff_amd64.s:duffcopy.
+
+ case ssa.OpCopy: // TODO: use MOVQreg for reg->reg copies instead of OpCopy?
+ if v.Type.IsMemory() {
+ return
+ }
+ x := v.Args[0].Reg()
+ y := v.Reg()
+ if x != y {
+ opregreg(s, moveByType(v.Type), y, x)
+ }
+ case ssa.OpLoadReg:
+ if v.Type.IsFlags() {
+ v.Fatalf("load flags not implemented: %v", v.LongString())
+ return
+ }
+ p := s.Prog(loadByType(v.Type))
+ ssagen.AddrAuto(&p.From, v.Args[0])
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+
+ case ssa.OpStoreReg:
+ if v.Type.IsFlags() {
+ v.Fatalf("store flags not implemented: %v", v.LongString())
+ return
+ }
+ p := s.Prog(storeByType(v.Type))
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ ssagen.AddrAuto(&p.To, v)
+ case ssa.OpAMD64LoweredHasCPUFeature:
+ p := s.Prog(x86.AMOVBQZX)
+ p.From.Type = obj.TYPE_MEM
+ ssagen.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpArgIntReg, ssa.OpArgFloatReg:
+ // The assembler needs to wrap the entry safepoint/stack growth code with spill/unspill
+ // The loop only runs once.
+ for _, ap := range v.Block.Func.RegArgs {
+ // Pass the spill/unspill information along to the assembler, offset by size of return PC pushed on stack.
+ addr := ssagen.SpillSlotAddr(ap, x86.REG_SP, v.Block.Func.Config.PtrSize)
+ s.FuncInfo().AddSpill(
+ obj.RegSpill{Reg: ap.Reg, Addr: addr, Unspill: loadByType(ap.Type), Spill: storeByType(ap.Type)})
+ }
+ v.Block.Func.RegArgs = nil
+ ssagen.CheckArgReg(v)
+ case ssa.OpAMD64LoweredGetClosurePtr:
+ // Closure pointer is DX.
+ ssagen.CheckLoweredGetClosurePtr(v)
+ case ssa.OpAMD64LoweredGetG:
+ if s.ABI == obj.ABIInternal {
+ v.Fatalf("LoweredGetG should not appear in ABIInternal")
+ }
+ r := v.Reg()
+ getgFromTLS(s, r)
+ case ssa.OpAMD64CALLstatic, ssa.OpAMD64CALLtail:
+ if s.ABI == obj.ABI0 && v.Aux.(*ssa.AuxCall).Fn.ABI() == obj.ABIInternal {
+ // zeroing X15 when entering ABIInternal from ABI0
+ if buildcfg.GOOS != "plan9" { // do not use SSE on Plan 9
+ opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15)
+ }
+ // set G register from TLS
+ getgFromTLS(s, x86.REG_R14)
+ }
+ if v.Op == ssa.OpAMD64CALLtail {
+ s.TailCall(v)
+ break
+ }
+ s.Call(v)
+ if s.ABI == obj.ABIInternal && v.Aux.(*ssa.AuxCall).Fn.ABI() == obj.ABI0 {
+ // zeroing X15 when entering ABIInternal from ABI0
+ if buildcfg.GOOS != "plan9" { // do not use SSE on Plan 9
+ opregreg(s, x86.AXORPS, x86.REG_X15, x86.REG_X15)
+ }
+ // set G register from TLS
+ getgFromTLS(s, x86.REG_R14)
+ }
+ case ssa.OpAMD64CALLclosure, ssa.OpAMD64CALLinter:
+ s.Call(v)
+
+ case ssa.OpAMD64LoweredGetCallerPC:
+ p := s.Prog(x86.AMOVQ)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Offset = -8 // PC is stored 8 bytes below first parameter.
+ p.From.Name = obj.NAME_PARAM
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+
+ case ssa.OpAMD64LoweredGetCallerSP:
+ // caller's SP is the address of the first arg
+ mov := x86.AMOVQ
+ if types.PtrSize == 4 {
+ mov = x86.AMOVL
+ }
+ p := s.Prog(mov)
+ p.From.Type = obj.TYPE_ADDR
+ p.From.Offset = -base.Ctxt.FixedFrameSize() // 0 on amd64, just to be consistent with other architectures
+ p.From.Name = obj.NAME_PARAM
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+
+ case ssa.OpAMD64LoweredWB:
+ p := s.Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ // arg0 is in DI. Set sym to match where regalloc put arg1.
+ p.To.Sym = ssagen.GCWriteBarrierReg[v.Args[1].Reg()]
+
+ case ssa.OpAMD64LoweredPanicBoundsA, ssa.OpAMD64LoweredPanicBoundsB, ssa.OpAMD64LoweredPanicBoundsC:
+ p := s.Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt]
+ s.UseArgs(int64(2 * types.PtrSize)) // space used in callee args area by assembly stubs
+
+ case ssa.OpAMD64NEGQ, ssa.OpAMD64NEGL,
+ ssa.OpAMD64BSWAPQ, ssa.OpAMD64BSWAPL,
+ ssa.OpAMD64NOTQ, ssa.OpAMD64NOTL:
+ p := s.Prog(v.Op.Asm())
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+
+ case ssa.OpAMD64NEGLflags:
+ p := s.Prog(v.Op.Asm())
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg0()
+
+ case ssa.OpAMD64BSFQ, ssa.OpAMD64BSRQ, ssa.OpAMD64BSFL, ssa.OpAMD64BSRL, ssa.OpAMD64SQRTSD, ssa.OpAMD64SQRTSS:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ switch v.Op {
+ case ssa.OpAMD64BSFQ, ssa.OpAMD64BSRQ:
+ p.To.Reg = v.Reg0()
+ case ssa.OpAMD64BSFL, ssa.OpAMD64BSRL, ssa.OpAMD64SQRTSD, ssa.OpAMD64SQRTSS:
+ p.To.Reg = v.Reg()
+ }
+ case ssa.OpAMD64ROUNDSD:
+ p := s.Prog(v.Op.Asm())
+ val := v.AuxInt
+ // 0 means math.RoundToEven, 1 Floor, 2 Ceil, 3 Trunc
+ if val < 0 || val > 3 {
+ v.Fatalf("Invalid rounding mode")
+ }
+ p.From.Offset = val
+ p.From.Type = obj.TYPE_CONST
+ p.SetFrom3Reg(v.Args[0].Reg())
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpAMD64POPCNTQ, ssa.OpAMD64POPCNTL:
+ if v.Args[0].Reg() != v.Reg() {
+ // POPCNT on Intel has a false dependency on the destination register.
+ // Xor register with itself to break the dependency.
+ p := s.Prog(x86.AXORL)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ }
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+
+ case ssa.OpAMD64SETEQ, ssa.OpAMD64SETNE,
+ ssa.OpAMD64SETL, ssa.OpAMD64SETLE,
+ ssa.OpAMD64SETG, ssa.OpAMD64SETGE,
+ ssa.OpAMD64SETGF, ssa.OpAMD64SETGEF,
+ ssa.OpAMD64SETB, ssa.OpAMD64SETBE,
+ ssa.OpAMD64SETORD, ssa.OpAMD64SETNAN,
+ ssa.OpAMD64SETA, ssa.OpAMD64SETAE,
+ ssa.OpAMD64SETO:
+ p := s.Prog(v.Op.Asm())
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+
+ case ssa.OpAMD64SETEQstore, ssa.OpAMD64SETNEstore,
+ ssa.OpAMD64SETLstore, ssa.OpAMD64SETLEstore,
+ ssa.OpAMD64SETGstore, ssa.OpAMD64SETGEstore,
+ ssa.OpAMD64SETBstore, ssa.OpAMD64SETBEstore,
+ ssa.OpAMD64SETAstore, ssa.OpAMD64SETAEstore:
+ p := s.Prog(v.Op.Asm())
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.To, v)
+
+ case ssa.OpAMD64SETNEF:
+ p := s.Prog(v.Op.Asm())
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ q := s.Prog(x86.ASETPS)
+ q.To.Type = obj.TYPE_REG
+ q.To.Reg = x86.REG_AX
+ // ORL avoids partial register write and is smaller than ORQ, used by old compiler
+ opregreg(s, x86.AORL, v.Reg(), x86.REG_AX)
+
+ case ssa.OpAMD64SETEQF:
+ p := s.Prog(v.Op.Asm())
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ q := s.Prog(x86.ASETPC)
+ q.To.Type = obj.TYPE_REG
+ q.To.Reg = x86.REG_AX
+ // ANDL avoids partial register write and is smaller than ANDQ, used by old compiler
+ opregreg(s, x86.AANDL, v.Reg(), x86.REG_AX)
+
+ case ssa.OpAMD64InvertFlags:
+ v.Fatalf("InvertFlags should never make it to codegen %v", v.LongString())
+ case ssa.OpAMD64FlagEQ, ssa.OpAMD64FlagLT_ULT, ssa.OpAMD64FlagLT_UGT, ssa.OpAMD64FlagGT_ULT, ssa.OpAMD64FlagGT_UGT:
+ v.Fatalf("Flag* ops should never make it to codegen %v", v.LongString())
+ case ssa.OpAMD64AddTupleFirst32, ssa.OpAMD64AddTupleFirst64:
+ v.Fatalf("AddTupleFirst* should never make it to codegen %v", v.LongString())
+ case ssa.OpAMD64REPSTOSQ:
+ s.Prog(x86.AREP)
+ s.Prog(x86.ASTOSQ)
+ case ssa.OpAMD64REPMOVSQ:
+ s.Prog(x86.AREP)
+ s.Prog(x86.AMOVSQ)
+ case ssa.OpAMD64LoweredNilCheck:
+ // Issue a load which will fault if the input is nil.
+ // TODO: We currently use the 2-byte instruction TESTB AX, (reg).
+ // Should we use the 3-byte TESTB $0, (reg) instead? It is larger
+ // but it doesn't have false dependency on AX.
+ // Or maybe allocate an output register and use MOVL (reg),reg2 ?
+ // That trades clobbering flags for clobbering a register.
+ p := s.Prog(x86.ATESTB)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = x86.REG_AX
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ if logopt.Enabled() {
+ logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name)
+ }
+ if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
+ base.WarnfAt(v.Pos, "generated nil check")
+ }
+ case ssa.OpAMD64MOVBatomicload, ssa.OpAMD64MOVLatomicload, ssa.OpAMD64MOVQatomicload:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg0()
+ case ssa.OpAMD64XCHGB, ssa.OpAMD64XCHGL, ssa.OpAMD64XCHGQ:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Reg0()
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[1].Reg()
+ ssagen.AddAux(&p.To, v)
+ case ssa.OpAMD64XADDLlock, ssa.OpAMD64XADDQlock:
+ s.Prog(x86.ALOCK)
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Reg0()
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[1].Reg()
+ ssagen.AddAux(&p.To, v)
+ case ssa.OpAMD64CMPXCHGLlock, ssa.OpAMD64CMPXCHGQlock:
+ if v.Args[1].Reg() != x86.REG_AX {
+ v.Fatalf("input[1] not in AX %s", v.LongString())
+ }
+ s.Prog(x86.ALOCK)
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[2].Reg()
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.To, v)
+ p = s.Prog(x86.ASETEQ)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg0()
+ case ssa.OpAMD64ANDBlock, ssa.OpAMD64ANDLlock, ssa.OpAMD64ORBlock, ssa.OpAMD64ORLlock:
+ s.Prog(x86.ALOCK)
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.To, v)
+ case ssa.OpAMD64PrefetchT0, ssa.OpAMD64PrefetchNTA:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ case ssa.OpClobber:
+ p := s.Prog(x86.AMOVL)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = 0xdeaddead
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = x86.REG_SP
+ ssagen.AddAux(&p.To, v)
+ p = s.Prog(x86.AMOVL)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = 0xdeaddead
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = x86.REG_SP
+ ssagen.AddAux(&p.To, v)
+ p.To.Offset += 4
+ case ssa.OpClobberReg:
+ x := uint64(0xdeaddeaddeaddead)
+ p := s.Prog(x86.AMOVQ)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = int64(x)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ default:
+ v.Fatalf("genValue not implemented: %s", v.LongString())
+ }
+}
+
+var blockJump = [...]struct {
+ asm, invasm obj.As
+}{
+ ssa.BlockAMD64EQ: {x86.AJEQ, x86.AJNE},
+ ssa.BlockAMD64NE: {x86.AJNE, x86.AJEQ},
+ ssa.BlockAMD64LT: {x86.AJLT, x86.AJGE},
+ ssa.BlockAMD64GE: {x86.AJGE, x86.AJLT},
+ ssa.BlockAMD64LE: {x86.AJLE, x86.AJGT},
+ ssa.BlockAMD64GT: {x86.AJGT, x86.AJLE},
+ ssa.BlockAMD64OS: {x86.AJOS, x86.AJOC},
+ ssa.BlockAMD64OC: {x86.AJOC, x86.AJOS},
+ ssa.BlockAMD64ULT: {x86.AJCS, x86.AJCC},
+ ssa.BlockAMD64UGE: {x86.AJCC, x86.AJCS},
+ ssa.BlockAMD64UGT: {x86.AJHI, x86.AJLS},
+ ssa.BlockAMD64ULE: {x86.AJLS, x86.AJHI},
+ ssa.BlockAMD64ORD: {x86.AJPC, x86.AJPS},
+ ssa.BlockAMD64NAN: {x86.AJPS, x86.AJPC},
+}
+
+var eqfJumps = [2][2]ssagen.IndexJump{
+ {{Jump: x86.AJNE, Index: 1}, {Jump: x86.AJPS, Index: 1}}, // next == b.Succs[0]
+ {{Jump: x86.AJNE, Index: 1}, {Jump: x86.AJPC, Index: 0}}, // next == b.Succs[1]
+}
+var nefJumps = [2][2]ssagen.IndexJump{
+ {{Jump: x86.AJNE, Index: 0}, {Jump: x86.AJPC, Index: 1}}, // next == b.Succs[0]
+ {{Jump: x86.AJNE, Index: 0}, {Jump: x86.AJPS, Index: 0}}, // next == b.Succs[1]
+}
+
+func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
+ switch b.Kind {
+ case ssa.BlockPlain:
+ if b.Succs[0].Block() != next {
+ p := s.Prog(obj.AJMP)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
+ }
+ case ssa.BlockDefer:
+ // defer returns in rax:
+ // 0 if we should continue executing
+ // 1 if we should jump to deferreturn call
+ p := s.Prog(x86.ATESTL)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = x86.REG_AX
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = x86.REG_AX
+ p = s.Prog(x86.AJNE)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[1].Block()})
+ if b.Succs[0].Block() != next {
+ p := s.Prog(obj.AJMP)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
+ }
+ case ssa.BlockExit, ssa.BlockRetJmp:
+ case ssa.BlockRet:
+ s.Prog(obj.ARET)
+
+ case ssa.BlockAMD64EQF:
+ s.CombJump(b, next, &eqfJumps)
+
+ case ssa.BlockAMD64NEF:
+ s.CombJump(b, next, &nefJumps)
+
+ case ssa.BlockAMD64EQ, ssa.BlockAMD64NE,
+ ssa.BlockAMD64LT, ssa.BlockAMD64GE,
+ ssa.BlockAMD64LE, ssa.BlockAMD64GT,
+ ssa.BlockAMD64OS, ssa.BlockAMD64OC,
+ ssa.BlockAMD64ULT, ssa.BlockAMD64UGT,
+ ssa.BlockAMD64ULE, ssa.BlockAMD64UGE:
+ jmp := blockJump[b.Kind]
+ switch next {
+ case b.Succs[0].Block():
+ s.Br(jmp.invasm, b.Succs[1].Block())
+ case b.Succs[1].Block():
+ s.Br(jmp.asm, b.Succs[0].Block())
+ default:
+ if b.Likely != ssa.BranchUnlikely {
+ s.Br(jmp.asm, b.Succs[0].Block())
+ s.Br(obj.AJMP, b.Succs[1].Block())
+ } else {
+ s.Br(jmp.invasm, b.Succs[1].Block())
+ s.Br(obj.AJMP, b.Succs[0].Block())
+ }
+ }
+
+ default:
+ b.Fatalf("branch not implemented: %s", b.LongString())
+ }
+}
+
+func loadRegResult(s *ssagen.State, f *ssa.Func, t *types.Type, reg int16, n *ir.Name, off int64) *obj.Prog {
+ p := s.Prog(loadByType(t))
+ p.From.Type = obj.TYPE_MEM
+ p.From.Name = obj.NAME_AUTO
+ p.From.Sym = n.Linksym()
+ p.From.Offset = n.FrameOffset() + off
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = reg
+ return p
+}
+
+func spillArgReg(pp *objw.Progs, p *obj.Prog, f *ssa.Func, t *types.Type, reg int16, n *ir.Name, off int64) *obj.Prog {
+ p = pp.Append(p, storeByType(t), obj.TYPE_REG, reg, 0, obj.TYPE_MEM, 0, n.FrameOffset()+off)
+ p.To.Name = obj.NAME_PARAM
+ p.To.Sym = n.Linksym()
+ p.Pos = p.Pos.WithNotStmt()
+ return p
+}
diff --git a/src/cmd/compile/internal/amd64/versions_test.go b/src/cmd/compile/internal/amd64/versions_test.go
new file mode 100644
index 0000000..7aa697b
--- /dev/null
+++ b/src/cmd/compile/internal/amd64/versions_test.go
@@ -0,0 +1,393 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package amd64_test
+
+import (
+ "bufio"
+ "debug/elf"
+ "debug/macho"
+ "fmt"
+ "internal/testenv"
+ "io"
+ "math"
+ "math/bits"
+ "os"
+ "os/exec"
+ "regexp"
+ "runtime"
+ "strconv"
+ "strings"
+ "testing"
+)
+
+// Test to make sure that when building for GOAMD64=v1, we don't
+// use any >v1 instructions.
+func TestGoAMD64v1(t *testing.T) {
+ if runtime.GOARCH != "amd64" {
+ t.Skip("amd64-only test")
+ }
+ if runtime.GOOS != "linux" && runtime.GOOS != "darwin" {
+ t.Skip("test only works on elf or macho platforms")
+ }
+ if v := os.Getenv("GOAMD64"); v != "" && v != "v1" {
+ // Test runs only on v1 (which is the default).
+ // TODO: use build tags from #45454 instead.
+ t.Skip("GOAMD64 already set")
+ }
+ if os.Getenv("TESTGOAMD64V1") != "" {
+ t.Skip("recursive call")
+ }
+
+ // Make a binary which will be a modified version of the
+ // currently running binary.
+ dst, err := os.CreateTemp("", "TestGoAMD64v1")
+ if err != nil {
+ t.Fatalf("failed to create temp file: %v", err)
+ }
+ defer os.Remove(dst.Name())
+ dst.Chmod(0500) // make executable
+
+ // Clobber all the non-v1 opcodes.
+ opcodes := map[string]bool{}
+ var features []string
+ for feature, opcodeList := range featureToOpcodes {
+ if runtimeFeatures[feature] {
+ features = append(features, fmt.Sprintf("cpu.%s=off", feature))
+ }
+ for _, op := range opcodeList {
+ opcodes[op] = true
+ }
+ }
+ clobber(t, os.Args[0], dst, opcodes)
+ if err = dst.Close(); err != nil {
+ t.Fatalf("can't close binary: %v", err)
+ }
+
+ // Run the resulting binary.
+ cmd := exec.Command(dst.Name())
+ testenv.CleanCmdEnv(cmd)
+ cmd.Env = append(cmd.Env, "TESTGOAMD64V1=yes")
+ cmd.Env = append(cmd.Env, fmt.Sprintf("GODEBUG=%s", strings.Join(features, ",")))
+ out, err := cmd.CombinedOutput()
+ if err != nil {
+ t.Fatalf("couldn't execute test: %s", err)
+ }
+ // Expect to see output of the form "PASS\n", unless the test binary
+ // was compiled for coverage (in which case there will be an extra line).
+ success := false
+ lines := strings.Split(string(out), "\n")
+ if len(lines) == 2 {
+ success = lines[0] == "PASS" && lines[1] == ""
+ } else if len(lines) == 3 {
+ success = lines[0] == "PASS" &&
+ strings.HasPrefix(lines[1], "coverage") && lines[2] == ""
+ }
+ if !success {
+ t.Fatalf("test reported error: %s lines=%+v", string(out), lines)
+ }
+}
+
+// Clobber copies the binary src to dst, replacing all the instructions in opcodes with
+// faulting instructions.
+func clobber(t *testing.T, src string, dst *os.File, opcodes map[string]bool) {
+ // Run objdump to get disassembly.
+ var re *regexp.Regexp
+ var disasm io.Reader
+ if false {
+ // TODO: go tool objdump doesn't disassemble the bmi1 instructions
+ // in question correctly. See issue 48584.
+ cmd := exec.Command("go", "tool", "objdump", src)
+ var err error
+ disasm, err = cmd.StdoutPipe()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := cmd.Start(); err != nil {
+ t.Fatal(err)
+ }
+ re = regexp.MustCompile(`^[^:]*:[-0-9]+\s+0x([0-9a-f]+)\s+([0-9a-f]+)\s+([A-Z]+)`)
+ } else {
+ // TODO: we're depending on platform-native objdump here. Hence the Skipf
+ // below if it doesn't run for some reason.
+ cmd := exec.Command("objdump", "-d", src)
+ var err error
+ disasm, err = cmd.StdoutPipe()
+ if err != nil {
+ t.Skipf("can't run test due to missing objdump: %s", err)
+ }
+ if err := cmd.Start(); err != nil {
+ t.Fatal(err)
+ }
+ re = regexp.MustCompile(`^\s*([0-9a-f]+):\s*((?:[0-9a-f][0-9a-f] )+)\s*([a-z0-9]+)`)
+ }
+
+ // Find all the instruction addresses we need to edit.
+ virtualEdits := map[uint64]bool{}
+ scanner := bufio.NewScanner(disasm)
+ for scanner.Scan() {
+ line := scanner.Text()
+ parts := re.FindStringSubmatch(line)
+ if len(parts) == 0 {
+ continue
+ }
+ addr, err := strconv.ParseUint(parts[1], 16, 64)
+ if err != nil {
+ continue // not a hex address
+ }
+ opcode := strings.ToLower(parts[3])
+ if !opcodes[opcode] {
+ continue
+ }
+ t.Logf("clobbering instruction %s", line)
+ n := (len(parts[2]) - strings.Count(parts[2], " ")) / 2 // number of bytes in instruction encoding
+ for i := 0; i < n; i++ {
+ // Only really need to make the first byte faulting, but might
+ // as well make all the bytes faulting.
+ virtualEdits[addr+uint64(i)] = true
+ }
+ }
+
+ // Figure out where in the binary the edits must be done.
+ physicalEdits := map[uint64]bool{}
+ if e, err := elf.Open(src); err == nil {
+ for _, sec := range e.Sections {
+ vaddr := sec.Addr
+ paddr := sec.Offset
+ size := sec.Size
+ for a := range virtualEdits {
+ if a >= vaddr && a < vaddr+size {
+ physicalEdits[paddr+(a-vaddr)] = true
+ }
+ }
+ }
+ } else if m, err2 := macho.Open(src); err2 == nil {
+ for _, sec := range m.Sections {
+ vaddr := sec.Addr
+ paddr := uint64(sec.Offset)
+ size := sec.Size
+ for a := range virtualEdits {
+ if a >= vaddr && a < vaddr+size {
+ physicalEdits[paddr+(a-vaddr)] = true
+ }
+ }
+ }
+ } else {
+ t.Log(err)
+ t.Log(err2)
+ t.Fatal("executable format not elf or macho")
+ }
+ if len(virtualEdits) != len(physicalEdits) {
+ t.Fatal("couldn't find an instruction in text sections")
+ }
+
+ // Copy source to destination, making edits along the way.
+ f, err := os.Open(src)
+ if err != nil {
+ t.Fatal(err)
+ }
+ r := bufio.NewReader(f)
+ w := bufio.NewWriter(dst)
+ a := uint64(0)
+ done := 0
+ for {
+ b, err := r.ReadByte()
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ t.Fatal("can't read")
+ }
+ if physicalEdits[a] {
+ b = 0xcc // INT3 opcode
+ done++
+ }
+ err = w.WriteByte(b)
+ if err != nil {
+ t.Fatal("can't write")
+ }
+ a++
+ }
+ if done != len(physicalEdits) {
+ t.Fatal("physical edits remaining")
+ }
+ w.Flush()
+ f.Close()
+}
+
+func setOf(keys ...string) map[string]bool {
+ m := make(map[string]bool, len(keys))
+ for _, key := range keys {
+ m[key] = true
+ }
+ return m
+}
+
+var runtimeFeatures = setOf(
+ "adx", "aes", "avx", "avx2", "bmi1", "bmi2", "erms", "fma",
+ "pclmulqdq", "popcnt", "rdtscp", "sse3", "sse41", "sse42", "ssse3",
+)
+
+var featureToOpcodes = map[string][]string{
+ // Note: we include *q, *l, and plain opcodes here.
+ // go tool objdump doesn't include a [QL] on popcnt instructions, until CL 351889
+ // native objdump doesn't include [QL] on linux.
+ "popcnt": {"popcntq", "popcntl", "popcnt"},
+ "bmi1": {"andnq", "andnl", "andn", "blsiq", "blsil", "blsi", "blsmskq", "blsmskl", "blsmsk", "blsrq", "blsrl", "blsr", "tzcntq", "tzcntl", "tzcnt"},
+ "sse41": {"roundsd"},
+ "fma": {"vfmadd231sd"},
+ "movbe": {"movbeqq", "movbeq", "movbell", "movbel", "movbe"},
+}
+
+// Test to use POPCNT instruction, if available
+func TestPopCnt(t *testing.T) {
+ for _, tt := range []struct {
+ x uint64
+ want int
+ }{
+ {0b00001111, 4},
+ {0b00001110, 3},
+ {0b00001100, 2},
+ {0b00000000, 0},
+ } {
+ if got := bits.OnesCount64(tt.x); got != tt.want {
+ t.Errorf("OnesCount64(%#x) = %d, want %d", tt.x, got, tt.want)
+ }
+ if got := bits.OnesCount32(uint32(tt.x)); got != tt.want {
+ t.Errorf("OnesCount32(%#x) = %d, want %d", tt.x, got, tt.want)
+ }
+ }
+}
+
+// Test to use ANDN, if available
+func TestAndNot(t *testing.T) {
+ for _, tt := range []struct {
+ x, y, want uint64
+ }{
+ {0b00001111, 0b00000011, 0b1100},
+ {0b00001111, 0b00001100, 0b0011},
+ {0b00000000, 0b00000000, 0b0000},
+ } {
+ if got := tt.x &^ tt.y; got != tt.want {
+ t.Errorf("%#x &^ %#x = %#x, want %#x", tt.x, tt.y, got, tt.want)
+ }
+ if got := uint32(tt.x) &^ uint32(tt.y); got != uint32(tt.want) {
+ t.Errorf("%#x &^ %#x = %#x, want %#x", tt.x, tt.y, got, tt.want)
+ }
+ }
+}
+
+// Test to use BLSI, if available
+func TestBLSI(t *testing.T) {
+ for _, tt := range []struct {
+ x, want uint64
+ }{
+ {0b00001111, 0b001},
+ {0b00001110, 0b010},
+ {0b00001100, 0b100},
+ {0b11000110, 0b010},
+ {0b00000000, 0b000},
+ } {
+ if got := tt.x & -tt.x; got != tt.want {
+ t.Errorf("%#x & (-%#x) = %#x, want %#x", tt.x, tt.x, got, tt.want)
+ }
+ if got := uint32(tt.x) & -uint32(tt.x); got != uint32(tt.want) {
+ t.Errorf("%#x & (-%#x) = %#x, want %#x", tt.x, tt.x, got, tt.want)
+ }
+ }
+}
+
+// Test to use BLSMSK, if available
+func TestBLSMSK(t *testing.T) {
+ for _, tt := range []struct {
+ x, want uint64
+ }{
+ {0b00001111, 0b001},
+ {0b00001110, 0b011},
+ {0b00001100, 0b111},
+ {0b11000110, 0b011},
+ {0b00000000, 1<<64 - 1},
+ } {
+ if got := tt.x ^ (tt.x - 1); got != tt.want {
+ t.Errorf("%#x ^ (%#x-1) = %#x, want %#x", tt.x, tt.x, got, tt.want)
+ }
+ if got := uint32(tt.x) ^ (uint32(tt.x) - 1); got != uint32(tt.want) {
+ t.Errorf("%#x ^ (%#x-1) = %#x, want %#x", tt.x, tt.x, got, uint32(tt.want))
+ }
+ }
+}
+
+// Test to use BLSR, if available
+func TestBLSR(t *testing.T) {
+ for _, tt := range []struct {
+ x, want uint64
+ }{
+ {0b00001111, 0b00001110},
+ {0b00001110, 0b00001100},
+ {0b00001100, 0b00001000},
+ {0b11000110, 0b11000100},
+ {0b00000000, 0b00000000},
+ } {
+ if got := tt.x & (tt.x - 1); got != tt.want {
+ t.Errorf("%#x & (%#x-1) = %#x, want %#x", tt.x, tt.x, got, tt.want)
+ }
+ if got := uint32(tt.x) & (uint32(tt.x) - 1); got != uint32(tt.want) {
+ t.Errorf("%#x & (%#x-1) = %#x, want %#x", tt.x, tt.x, got, tt.want)
+ }
+ }
+}
+
+func TestTrailingZeros(t *testing.T) {
+ for _, tt := range []struct {
+ x uint64
+ want int
+ }{
+ {0b00001111, 0},
+ {0b00001110, 1},
+ {0b00001100, 2},
+ {0b00001000, 3},
+ {0b00000000, 64},
+ } {
+ if got := bits.TrailingZeros64(tt.x); got != tt.want {
+ t.Errorf("TrailingZeros64(%#x) = %d, want %d", tt.x, got, tt.want)
+ }
+ want := tt.want
+ if want == 64 {
+ want = 32
+ }
+ if got := bits.TrailingZeros32(uint32(tt.x)); got != want {
+ t.Errorf("TrailingZeros64(%#x) = %d, want %d", tt.x, got, want)
+ }
+ }
+}
+
+func TestRound(t *testing.T) {
+ for _, tt := range []struct {
+ x, want float64
+ }{
+ {1.4, 1},
+ {1.5, 2},
+ {1.6, 2},
+ {2.4, 2},
+ {2.5, 2},
+ {2.6, 3},
+ } {
+ if got := math.RoundToEven(tt.x); got != tt.want {
+ t.Errorf("RoundToEven(%f) = %f, want %f", tt.x, got, tt.want)
+ }
+ }
+}
+
+func TestFMA(t *testing.T) {
+ for _, tt := range []struct {
+ x, y, z, want float64
+ }{
+ {2, 3, 4, 10},
+ {3, 4, 5, 17},
+ } {
+ if got := math.FMA(tt.x, tt.y, tt.z); got != tt.want {
+ t.Errorf("FMA(%f,%f,%f) = %f, want %f", tt.x, tt.y, tt.z, got, tt.want)
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/arm/galign.go b/src/cmd/compile/internal/arm/galign.go
new file mode 100644
index 0000000..23e52ba
--- /dev/null
+++ b/src/cmd/compile/internal/arm/galign.go
@@ -0,0 +1,25 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package arm
+
+import (
+ "cmd/compile/internal/ssa"
+ "cmd/compile/internal/ssagen"
+ "cmd/internal/obj/arm"
+ "internal/buildcfg"
+)
+
+func Init(arch *ssagen.ArchInfo) {
+ arch.LinkArch = &arm.Linkarm
+ arch.REGSP = arm.REGSP
+ arch.MAXWIDTH = (1 << 32) - 1
+ arch.SoftFloat = buildcfg.GOARM == 5
+ arch.ZeroRange = zerorange
+ arch.Ginsnop = ginsnop
+
+ arch.SSAMarkMoves = func(s *ssagen.State, b *ssa.Block) {}
+ arch.SSAGenValue = ssaGenValue
+ arch.SSAGenBlock = ssaGenBlock
+}
diff --git a/src/cmd/compile/internal/arm/ggen.go b/src/cmd/compile/internal/arm/ggen.go
new file mode 100644
index 0000000..f2c6763
--- /dev/null
+++ b/src/cmd/compile/internal/arm/ggen.go
@@ -0,0 +1,60 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package arm
+
+import (
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/objw"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/obj/arm"
+)
+
+func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, r0 *uint32) *obj.Prog {
+ if cnt == 0 {
+ return p
+ }
+ if *r0 == 0 {
+ p = pp.Append(p, arm.AMOVW, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, arm.REG_R0, 0)
+ *r0 = 1
+ }
+
+ if cnt < int64(4*types.PtrSize) {
+ for i := int64(0); i < cnt; i += int64(types.PtrSize) {
+ p = pp.Append(p, arm.AMOVW, obj.TYPE_REG, arm.REG_R0, 0, obj.TYPE_MEM, arm.REGSP, 4+off+i)
+ }
+ } else if cnt <= int64(128*types.PtrSize) {
+ p = pp.Append(p, arm.AADD, obj.TYPE_CONST, 0, 4+off, obj.TYPE_REG, arm.REG_R1, 0)
+ p.Reg = arm.REGSP
+ p = pp.Append(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = ir.Syms.Duffzero
+ p.To.Offset = 4 * (128 - cnt/int64(types.PtrSize))
+ } else {
+ p = pp.Append(p, arm.AADD, obj.TYPE_CONST, 0, 4+off, obj.TYPE_REG, arm.REG_R1, 0)
+ p.Reg = arm.REGSP
+ p = pp.Append(p, arm.AADD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, arm.REG_R2, 0)
+ p.Reg = arm.REG_R1
+ p = pp.Append(p, arm.AMOVW, obj.TYPE_REG, arm.REG_R0, 0, obj.TYPE_MEM, arm.REG_R1, 4)
+ p1 := p
+ p.Scond |= arm.C_PBIT
+ p = pp.Append(p, arm.ACMP, obj.TYPE_REG, arm.REG_R1, 0, obj.TYPE_NONE, 0, 0)
+ p.Reg = arm.REG_R2
+ p = pp.Append(p, arm.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0)
+ p.To.SetTarget(p1)
+ }
+
+ return p
+}
+
+func ginsnop(pp *objw.Progs) *obj.Prog {
+ p := pp.Prog(arm.AAND)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = arm.REG_R0
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = arm.REG_R0
+ p.Scond = arm.C_SCOND_EQ
+ return p
+}
diff --git a/src/cmd/compile/internal/arm/ssa.go b/src/cmd/compile/internal/arm/ssa.go
new file mode 100644
index 0000000..063fb65
--- /dev/null
+++ b/src/cmd/compile/internal/arm/ssa.go
@@ -0,0 +1,980 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package arm
+
+import (
+ "fmt"
+ "internal/buildcfg"
+ "math"
+ "math/bits"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/logopt"
+ "cmd/compile/internal/ssa"
+ "cmd/compile/internal/ssagen"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/obj/arm"
+)
+
+// loadByType returns the load instruction of the given type.
+func loadByType(t *types.Type) obj.As {
+ if t.IsFloat() {
+ switch t.Size() {
+ case 4:
+ return arm.AMOVF
+ case 8:
+ return arm.AMOVD
+ }
+ } else {
+ switch t.Size() {
+ case 1:
+ if t.IsSigned() {
+ return arm.AMOVB
+ } else {
+ return arm.AMOVBU
+ }
+ case 2:
+ if t.IsSigned() {
+ return arm.AMOVH
+ } else {
+ return arm.AMOVHU
+ }
+ case 4:
+ return arm.AMOVW
+ }
+ }
+ panic("bad load type")
+}
+
+// storeByType returns the store instruction of the given type.
+func storeByType(t *types.Type) obj.As {
+ if t.IsFloat() {
+ switch t.Size() {
+ case 4:
+ return arm.AMOVF
+ case 8:
+ return arm.AMOVD
+ }
+ } else {
+ switch t.Size() {
+ case 1:
+ return arm.AMOVB
+ case 2:
+ return arm.AMOVH
+ case 4:
+ return arm.AMOVW
+ }
+ }
+ panic("bad store type")
+}
+
+// shift type is used as Offset in obj.TYPE_SHIFT operands to encode shifted register operands
+type shift int64
+
+// copied from ../../../internal/obj/util.go:/TYPE_SHIFT
+func (v shift) String() string {
+ op := "<<>>->@>"[((v>>5)&3)<<1:]
+ if v&(1<<4) != 0 {
+ // register shift
+ return fmt.Sprintf("R%d%c%cR%d", v&15, op[0], op[1], (v>>8)&15)
+ } else {
+ // constant shift
+ return fmt.Sprintf("R%d%c%c%d", v&15, op[0], op[1], (v>>7)&31)
+ }
+}
+
+// makeshift encodes a register shifted by a constant
+func makeshift(v *ssa.Value, reg int16, typ int64, s int64) shift {
+ if s < 0 || s >= 32 {
+ v.Fatalf("shift out of range: %d", s)
+ }
+ return shift(int64(reg&0xf) | typ | (s&31)<<7)
+}
+
+// genshift generates a Prog for r = r0 op (r1 shifted by n)
+func genshift(s *ssagen.State, v *ssa.Value, as obj.As, r0, r1, r int16, typ int64, n int64) *obj.Prog {
+ p := s.Prog(as)
+ p.From.Type = obj.TYPE_SHIFT
+ p.From.Offset = int64(makeshift(v, r1, typ, n))
+ p.Reg = r0
+ if r != 0 {
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ }
+ return p
+}
+
+// makeregshift encodes a register shifted by a register
+func makeregshift(r1 int16, typ int64, r2 int16) shift {
+ return shift(int64(r1&0xf) | typ | int64(r2&0xf)<<8 | 1<<4)
+}
+
+// genregshift generates a Prog for r = r0 op (r1 shifted by r2)
+func genregshift(s *ssagen.State, as obj.As, r0, r1, r2, r int16, typ int64) *obj.Prog {
+ p := s.Prog(as)
+ p.From.Type = obj.TYPE_SHIFT
+ p.From.Offset = int64(makeregshift(r1, typ, r2))
+ p.Reg = r0
+ if r != 0 {
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ }
+ return p
+}
+
+// find a (lsb, width) pair for BFC
+// lsb must be in [0, 31], width must be in [1, 32 - lsb]
+// return (0xffffffff, 0) if v is not a binary like 0...01...10...0
+func getBFC(v uint32) (uint32, uint32) {
+ var m, l uint32
+ // BFC is not applicable with zero
+ if v == 0 {
+ return 0xffffffff, 0
+ }
+ // find the lowest set bit, for example l=2 for 0x3ffffffc
+ l = uint32(bits.TrailingZeros32(v))
+ // m-1 represents the highest set bit index, for example m=30 for 0x3ffffffc
+ m = 32 - uint32(bits.LeadingZeros32(v))
+ // check if v is a binary like 0...01...10...0
+ if (1<<m)-(1<<l) == v {
+ // it must be m > l for non-zero v
+ return l, m - l
+ }
+ // invalid
+ return 0xffffffff, 0
+}
+
+func ssaGenValue(s *ssagen.State, v *ssa.Value) {
+ switch v.Op {
+ case ssa.OpCopy, ssa.OpARMMOVWreg:
+ if v.Type.IsMemory() {
+ return
+ }
+ x := v.Args[0].Reg()
+ y := v.Reg()
+ if x == y {
+ return
+ }
+ as := arm.AMOVW
+ if v.Type.IsFloat() {
+ switch v.Type.Size() {
+ case 4:
+ as = arm.AMOVF
+ case 8:
+ as = arm.AMOVD
+ default:
+ panic("bad float size")
+ }
+ }
+ p := s.Prog(as)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = x
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = y
+ case ssa.OpARMMOVWnop:
+ // nothing to do
+ case ssa.OpLoadReg:
+ if v.Type.IsFlags() {
+ v.Fatalf("load flags not implemented: %v", v.LongString())
+ return
+ }
+ p := s.Prog(loadByType(v.Type))
+ ssagen.AddrAuto(&p.From, v.Args[0])
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpStoreReg:
+ if v.Type.IsFlags() {
+ v.Fatalf("store flags not implemented: %v", v.LongString())
+ return
+ }
+ p := s.Prog(storeByType(v.Type))
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ ssagen.AddrAuto(&p.To, v)
+ case ssa.OpARMADD,
+ ssa.OpARMADC,
+ ssa.OpARMSUB,
+ ssa.OpARMSBC,
+ ssa.OpARMRSB,
+ ssa.OpARMAND,
+ ssa.OpARMOR,
+ ssa.OpARMXOR,
+ ssa.OpARMBIC,
+ ssa.OpARMMUL,
+ ssa.OpARMADDF,
+ ssa.OpARMADDD,
+ ssa.OpARMSUBF,
+ ssa.OpARMSUBD,
+ ssa.OpARMSLL,
+ ssa.OpARMSRL,
+ ssa.OpARMSRA,
+ ssa.OpARMMULF,
+ ssa.OpARMMULD,
+ ssa.OpARMNMULF,
+ ssa.OpARMNMULD,
+ ssa.OpARMDIVF,
+ ssa.OpARMDIVD:
+ r := v.Reg()
+ r1 := v.Args[0].Reg()
+ r2 := v.Args[1].Reg()
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r2
+ p.Reg = r1
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ case ssa.OpARMSRR:
+ genregshift(s, arm.AMOVW, 0, v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_RR)
+ case ssa.OpARMMULAF, ssa.OpARMMULAD, ssa.OpARMMULSF, ssa.OpARMMULSD, ssa.OpARMFMULAD:
+ r := v.Reg()
+ r0 := v.Args[0].Reg()
+ r1 := v.Args[1].Reg()
+ r2 := v.Args[2].Reg()
+ if r != r0 {
+ v.Fatalf("result and addend are not in the same register: %v", v.LongString())
+ }
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r2
+ p.Reg = r1
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ case ssa.OpARMADDS,
+ ssa.OpARMSUBS:
+ r := v.Reg0()
+ r1 := v.Args[0].Reg()
+ r2 := v.Args[1].Reg()
+ p := s.Prog(v.Op.Asm())
+ p.Scond = arm.C_SBIT
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r2
+ p.Reg = r1
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ case ssa.OpARMSRAcond:
+ // ARM shift instructions uses only the low-order byte of the shift amount
+ // generate conditional instructions to deal with large shifts
+ // flag is already set
+ // SRA.HS $31, Rarg0, Rdst // shift 31 bits to get the sign bit
+ // SRA.LO Rarg1, Rarg0, Rdst
+ r := v.Reg()
+ r1 := v.Args[0].Reg()
+ r2 := v.Args[1].Reg()
+ p := s.Prog(arm.ASRA)
+ p.Scond = arm.C_SCOND_HS
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = 31
+ p.Reg = r1
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ p = s.Prog(arm.ASRA)
+ p.Scond = arm.C_SCOND_LO
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r2
+ p.Reg = r1
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ case ssa.OpARMBFX, ssa.OpARMBFXU:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt >> 8
+ p.SetFrom3Const(v.AuxInt & 0xff)
+ p.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpARMANDconst, ssa.OpARMBICconst:
+ // try to optimize ANDconst and BICconst to BFC, which saves bytes and ticks
+ // BFC is only available on ARMv7, and its result and source are in the same register
+ if buildcfg.GOARM == 7 && v.Reg() == v.Args[0].Reg() {
+ var val uint32
+ if v.Op == ssa.OpARMANDconst {
+ val = ^uint32(v.AuxInt)
+ } else { // BICconst
+ val = uint32(v.AuxInt)
+ }
+ lsb, width := getBFC(val)
+ // omit BFC for ARM's imm12
+ if 8 < width && width < 24 {
+ p := s.Prog(arm.ABFC)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = int64(width)
+ p.SetFrom3Const(int64(lsb))
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ break
+ }
+ }
+ // fall back to ordinary form
+ fallthrough
+ case ssa.OpARMADDconst,
+ ssa.OpARMADCconst,
+ ssa.OpARMSUBconst,
+ ssa.OpARMSBCconst,
+ ssa.OpARMRSBconst,
+ ssa.OpARMRSCconst,
+ ssa.OpARMORconst,
+ ssa.OpARMXORconst,
+ ssa.OpARMSLLconst,
+ ssa.OpARMSRLconst,
+ ssa.OpARMSRAconst:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpARMADDSconst,
+ ssa.OpARMSUBSconst,
+ ssa.OpARMRSBSconst:
+ p := s.Prog(v.Op.Asm())
+ p.Scond = arm.C_SBIT
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg0()
+ case ssa.OpARMSRRconst:
+ genshift(s, v, arm.AMOVW, 0, v.Args[0].Reg(), v.Reg(), arm.SHIFT_RR, v.AuxInt)
+ case ssa.OpARMADDshiftLL,
+ ssa.OpARMADCshiftLL,
+ ssa.OpARMSUBshiftLL,
+ ssa.OpARMSBCshiftLL,
+ ssa.OpARMRSBshiftLL,
+ ssa.OpARMRSCshiftLL,
+ ssa.OpARMANDshiftLL,
+ ssa.OpARMORshiftLL,
+ ssa.OpARMXORshiftLL,
+ ssa.OpARMBICshiftLL:
+ genshift(s, v, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_LL, v.AuxInt)
+ case ssa.OpARMADDSshiftLL,
+ ssa.OpARMSUBSshiftLL,
+ ssa.OpARMRSBSshiftLL:
+ p := genshift(s, v, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg0(), arm.SHIFT_LL, v.AuxInt)
+ p.Scond = arm.C_SBIT
+ case ssa.OpARMADDshiftRL,
+ ssa.OpARMADCshiftRL,
+ ssa.OpARMSUBshiftRL,
+ ssa.OpARMSBCshiftRL,
+ ssa.OpARMRSBshiftRL,
+ ssa.OpARMRSCshiftRL,
+ ssa.OpARMANDshiftRL,
+ ssa.OpARMORshiftRL,
+ ssa.OpARMXORshiftRL,
+ ssa.OpARMBICshiftRL:
+ genshift(s, v, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_LR, v.AuxInt)
+ case ssa.OpARMADDSshiftRL,
+ ssa.OpARMSUBSshiftRL,
+ ssa.OpARMRSBSshiftRL:
+ p := genshift(s, v, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg0(), arm.SHIFT_LR, v.AuxInt)
+ p.Scond = arm.C_SBIT
+ case ssa.OpARMADDshiftRA,
+ ssa.OpARMADCshiftRA,
+ ssa.OpARMSUBshiftRA,
+ ssa.OpARMSBCshiftRA,
+ ssa.OpARMRSBshiftRA,
+ ssa.OpARMRSCshiftRA,
+ ssa.OpARMANDshiftRA,
+ ssa.OpARMORshiftRA,
+ ssa.OpARMXORshiftRA,
+ ssa.OpARMBICshiftRA:
+ genshift(s, v, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_AR, v.AuxInt)
+ case ssa.OpARMADDSshiftRA,
+ ssa.OpARMSUBSshiftRA,
+ ssa.OpARMRSBSshiftRA:
+ p := genshift(s, v, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg0(), arm.SHIFT_AR, v.AuxInt)
+ p.Scond = arm.C_SBIT
+ case ssa.OpARMXORshiftRR:
+ genshift(s, v, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_RR, v.AuxInt)
+ case ssa.OpARMMVNshiftLL:
+ genshift(s, v, v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm.SHIFT_LL, v.AuxInt)
+ case ssa.OpARMMVNshiftRL:
+ genshift(s, v, v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm.SHIFT_LR, v.AuxInt)
+ case ssa.OpARMMVNshiftRA:
+ genshift(s, v, v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm.SHIFT_AR, v.AuxInt)
+ case ssa.OpARMMVNshiftLLreg:
+ genregshift(s, v.Op.Asm(), 0, v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_LL)
+ case ssa.OpARMMVNshiftRLreg:
+ genregshift(s, v.Op.Asm(), 0, v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_LR)
+ case ssa.OpARMMVNshiftRAreg:
+ genregshift(s, v.Op.Asm(), 0, v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm.SHIFT_AR)
+ case ssa.OpARMADDshiftLLreg,
+ ssa.OpARMADCshiftLLreg,
+ ssa.OpARMSUBshiftLLreg,
+ ssa.OpARMSBCshiftLLreg,
+ ssa.OpARMRSBshiftLLreg,
+ ssa.OpARMRSCshiftLLreg,
+ ssa.OpARMANDshiftLLreg,
+ ssa.OpARMORshiftLLreg,
+ ssa.OpARMXORshiftLLreg,
+ ssa.OpARMBICshiftLLreg:
+ genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg(), arm.SHIFT_LL)
+ case ssa.OpARMADDSshiftLLreg,
+ ssa.OpARMSUBSshiftLLreg,
+ ssa.OpARMRSBSshiftLLreg:
+ p := genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg0(), arm.SHIFT_LL)
+ p.Scond = arm.C_SBIT
+ case ssa.OpARMADDshiftRLreg,
+ ssa.OpARMADCshiftRLreg,
+ ssa.OpARMSUBshiftRLreg,
+ ssa.OpARMSBCshiftRLreg,
+ ssa.OpARMRSBshiftRLreg,
+ ssa.OpARMRSCshiftRLreg,
+ ssa.OpARMANDshiftRLreg,
+ ssa.OpARMORshiftRLreg,
+ ssa.OpARMXORshiftRLreg,
+ ssa.OpARMBICshiftRLreg:
+ genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg(), arm.SHIFT_LR)
+ case ssa.OpARMADDSshiftRLreg,
+ ssa.OpARMSUBSshiftRLreg,
+ ssa.OpARMRSBSshiftRLreg:
+ p := genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg0(), arm.SHIFT_LR)
+ p.Scond = arm.C_SBIT
+ case ssa.OpARMADDshiftRAreg,
+ ssa.OpARMADCshiftRAreg,
+ ssa.OpARMSUBshiftRAreg,
+ ssa.OpARMSBCshiftRAreg,
+ ssa.OpARMRSBshiftRAreg,
+ ssa.OpARMRSCshiftRAreg,
+ ssa.OpARMANDshiftRAreg,
+ ssa.OpARMORshiftRAreg,
+ ssa.OpARMXORshiftRAreg,
+ ssa.OpARMBICshiftRAreg:
+ genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg(), arm.SHIFT_AR)
+ case ssa.OpARMADDSshiftRAreg,
+ ssa.OpARMSUBSshiftRAreg,
+ ssa.OpARMRSBSshiftRAreg:
+ p := genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), v.Reg0(), arm.SHIFT_AR)
+ p.Scond = arm.C_SBIT
+ case ssa.OpARMHMUL,
+ ssa.OpARMHMULU:
+ // 32-bit high multiplication
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_REGREG
+ p.To.Reg = v.Reg()
+ p.To.Offset = arm.REGTMP // throw away low 32-bit into tmp register
+ case ssa.OpARMMULLU:
+ // 32-bit multiplication, results 64-bit, high 32-bit in out0, low 32-bit in out1
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_REGREG
+ p.To.Reg = v.Reg0() // high 32-bit
+ p.To.Offset = int64(v.Reg1()) // low 32-bit
+ case ssa.OpARMMULA, ssa.OpARMMULS:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_REGREG2
+ p.To.Reg = v.Reg() // result
+ p.To.Offset = int64(v.Args[2].Reg()) // addend
+ case ssa.OpARMMOVWconst:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpARMMOVFconst,
+ ssa.OpARMMOVDconst:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_FCONST
+ p.From.Val = math.Float64frombits(uint64(v.AuxInt))
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpARMCMP,
+ ssa.OpARMCMN,
+ ssa.OpARMTST,
+ ssa.OpARMTEQ,
+ ssa.OpARMCMPF,
+ ssa.OpARMCMPD:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ // Special layout in ARM assembly
+ // Comparing to x86, the operands of ARM's CMP are reversed.
+ p.From.Reg = v.Args[1].Reg()
+ p.Reg = v.Args[0].Reg()
+ case ssa.OpARMCMPconst,
+ ssa.OpARMCMNconst,
+ ssa.OpARMTSTconst,
+ ssa.OpARMTEQconst:
+ // Special layout in ARM assembly
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.Reg = v.Args[0].Reg()
+ case ssa.OpARMCMPF0,
+ ssa.OpARMCMPD0:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ case ssa.OpARMCMPshiftLL, ssa.OpARMCMNshiftLL, ssa.OpARMTSTshiftLL, ssa.OpARMTEQshiftLL:
+ genshift(s, v, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm.SHIFT_LL, v.AuxInt)
+ case ssa.OpARMCMPshiftRL, ssa.OpARMCMNshiftRL, ssa.OpARMTSTshiftRL, ssa.OpARMTEQshiftRL:
+ genshift(s, v, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm.SHIFT_LR, v.AuxInt)
+ case ssa.OpARMCMPshiftRA, ssa.OpARMCMNshiftRA, ssa.OpARMTSTshiftRA, ssa.OpARMTEQshiftRA:
+ genshift(s, v, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm.SHIFT_AR, v.AuxInt)
+ case ssa.OpARMCMPshiftLLreg, ssa.OpARMCMNshiftLLreg, ssa.OpARMTSTshiftLLreg, ssa.OpARMTEQshiftLLreg:
+ genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), 0, arm.SHIFT_LL)
+ case ssa.OpARMCMPshiftRLreg, ssa.OpARMCMNshiftRLreg, ssa.OpARMTSTshiftRLreg, ssa.OpARMTEQshiftRLreg:
+ genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), 0, arm.SHIFT_LR)
+ case ssa.OpARMCMPshiftRAreg, ssa.OpARMCMNshiftRAreg, ssa.OpARMTSTshiftRAreg, ssa.OpARMTEQshiftRAreg:
+ genregshift(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Args[2].Reg(), 0, arm.SHIFT_AR)
+ case ssa.OpARMMOVWaddr:
+ p := s.Prog(arm.AMOVW)
+ p.From.Type = obj.TYPE_ADDR
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+
+ var wantreg string
+ // MOVW $sym+off(base), R
+ // the assembler expands it as the following:
+ // - base is SP: add constant offset to SP (R13)
+ // when constant is large, tmp register (R11) may be used
+ // - base is SB: load external address from constant pool (use relocation)
+ switch v.Aux.(type) {
+ default:
+ v.Fatalf("aux is of unknown type %T", v.Aux)
+ case *obj.LSym:
+ wantreg = "SB"
+ ssagen.AddAux(&p.From, v)
+ case *ir.Name:
+ wantreg = "SP"
+ ssagen.AddAux(&p.From, v)
+ case nil:
+ // No sym, just MOVW $off(SP), R
+ wantreg = "SP"
+ p.From.Offset = v.AuxInt
+ }
+ if reg := v.Args[0].RegName(); reg != wantreg {
+ v.Fatalf("bad reg %s for symbol type %T, want %s", reg, v.Aux, wantreg)
+ }
+
+ case ssa.OpARMMOVBload,
+ ssa.OpARMMOVBUload,
+ ssa.OpARMMOVHload,
+ ssa.OpARMMOVHUload,
+ ssa.OpARMMOVWload,
+ ssa.OpARMMOVFload,
+ ssa.OpARMMOVDload:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpARMMOVBstore,
+ ssa.OpARMMOVHstore,
+ ssa.OpARMMOVWstore,
+ ssa.OpARMMOVFstore,
+ ssa.OpARMMOVDstore:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.To, v)
+ case ssa.OpARMMOVWloadidx, ssa.OpARMMOVBUloadidx, ssa.OpARMMOVBloadidx, ssa.OpARMMOVHUloadidx, ssa.OpARMMOVHloadidx:
+ // this is just shift 0 bits
+ fallthrough
+ case ssa.OpARMMOVWloadshiftLL:
+ p := genshift(s, v, v.Op.Asm(), 0, v.Args[1].Reg(), v.Reg(), arm.SHIFT_LL, v.AuxInt)
+ p.From.Reg = v.Args[0].Reg()
+ case ssa.OpARMMOVWloadshiftRL:
+ p := genshift(s, v, v.Op.Asm(), 0, v.Args[1].Reg(), v.Reg(), arm.SHIFT_LR, v.AuxInt)
+ p.From.Reg = v.Args[0].Reg()
+ case ssa.OpARMMOVWloadshiftRA:
+ p := genshift(s, v, v.Op.Asm(), 0, v.Args[1].Reg(), v.Reg(), arm.SHIFT_AR, v.AuxInt)
+ p.From.Reg = v.Args[0].Reg()
+ case ssa.OpARMMOVWstoreidx, ssa.OpARMMOVBstoreidx, ssa.OpARMMOVHstoreidx:
+ // this is just shift 0 bits
+ fallthrough
+ case ssa.OpARMMOVWstoreshiftLL:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[2].Reg()
+ p.To.Type = obj.TYPE_SHIFT
+ p.To.Reg = v.Args[0].Reg()
+ p.To.Offset = int64(makeshift(v, v.Args[1].Reg(), arm.SHIFT_LL, v.AuxInt))
+ case ssa.OpARMMOVWstoreshiftRL:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[2].Reg()
+ p.To.Type = obj.TYPE_SHIFT
+ p.To.Reg = v.Args[0].Reg()
+ p.To.Offset = int64(makeshift(v, v.Args[1].Reg(), arm.SHIFT_LR, v.AuxInt))
+ case ssa.OpARMMOVWstoreshiftRA:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[2].Reg()
+ p.To.Type = obj.TYPE_SHIFT
+ p.To.Reg = v.Args[0].Reg()
+ p.To.Offset = int64(makeshift(v, v.Args[1].Reg(), arm.SHIFT_AR, v.AuxInt))
+ case ssa.OpARMMOVBreg,
+ ssa.OpARMMOVBUreg,
+ ssa.OpARMMOVHreg,
+ ssa.OpARMMOVHUreg:
+ a := v.Args[0]
+ for a.Op == ssa.OpCopy || a.Op == ssa.OpARMMOVWreg || a.Op == ssa.OpARMMOVWnop {
+ a = a.Args[0]
+ }
+ if a.Op == ssa.OpLoadReg {
+ t := a.Type
+ switch {
+ case v.Op == ssa.OpARMMOVBreg && t.Size() == 1 && t.IsSigned(),
+ v.Op == ssa.OpARMMOVBUreg && t.Size() == 1 && !t.IsSigned(),
+ v.Op == ssa.OpARMMOVHreg && t.Size() == 2 && t.IsSigned(),
+ v.Op == ssa.OpARMMOVHUreg && t.Size() == 2 && !t.IsSigned():
+ // arg is a proper-typed load, already zero/sign-extended, don't extend again
+ if v.Reg() == v.Args[0].Reg() {
+ return
+ }
+ p := s.Prog(arm.AMOVW)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ return
+ default:
+ }
+ }
+ if buildcfg.GOARM >= 6 {
+ // generate more efficient "MOVB/MOVBU/MOVH/MOVHU Reg@>0, Reg" on ARMv6 & ARMv7
+ genshift(s, v, v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm.SHIFT_RR, 0)
+ return
+ }
+ fallthrough
+ case ssa.OpARMMVN,
+ ssa.OpARMCLZ,
+ ssa.OpARMREV,
+ ssa.OpARMREV16,
+ ssa.OpARMRBIT,
+ ssa.OpARMSQRTF,
+ ssa.OpARMSQRTD,
+ ssa.OpARMNEGF,
+ ssa.OpARMNEGD,
+ ssa.OpARMABSD,
+ ssa.OpARMMOVWF,
+ ssa.OpARMMOVWD,
+ ssa.OpARMMOVFW,
+ ssa.OpARMMOVDW,
+ ssa.OpARMMOVFD,
+ ssa.OpARMMOVDF:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpARMMOVWUF,
+ ssa.OpARMMOVWUD,
+ ssa.OpARMMOVFWU,
+ ssa.OpARMMOVDWU:
+ p := s.Prog(v.Op.Asm())
+ p.Scond = arm.C_UBIT
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpARMCMOVWHSconst:
+ p := s.Prog(arm.AMOVW)
+ p.Scond = arm.C_SCOND_HS
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpARMCMOVWLSconst:
+ p := s.Prog(arm.AMOVW)
+ p.Scond = arm.C_SCOND_LS
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpARMCALLstatic, ssa.OpARMCALLclosure, ssa.OpARMCALLinter:
+ s.Call(v)
+ case ssa.OpARMCALLtail:
+ s.TailCall(v)
+ case ssa.OpARMCALLudiv:
+ p := s.Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = ir.Syms.Udiv
+ case ssa.OpARMLoweredWB:
+ p := s.Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = v.Aux.(*obj.LSym)
+ case ssa.OpARMLoweredPanicBoundsA, ssa.OpARMLoweredPanicBoundsB, ssa.OpARMLoweredPanicBoundsC:
+ p := s.Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt]
+ s.UseArgs(8) // space used in callee args area by assembly stubs
+ case ssa.OpARMLoweredPanicExtendA, ssa.OpARMLoweredPanicExtendB, ssa.OpARMLoweredPanicExtendC:
+ p := s.Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = ssagen.ExtendCheckFunc[v.AuxInt]
+ s.UseArgs(12) // space used in callee args area by assembly stubs
+ case ssa.OpARMDUFFZERO:
+ p := s.Prog(obj.ADUFFZERO)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = ir.Syms.Duffzero
+ p.To.Offset = v.AuxInt
+ case ssa.OpARMDUFFCOPY:
+ p := s.Prog(obj.ADUFFCOPY)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = ir.Syms.Duffcopy
+ p.To.Offset = v.AuxInt
+ case ssa.OpARMLoweredNilCheck:
+ // Issue a load which will fault if arg is nil.
+ p := s.Prog(arm.AMOVB)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = arm.REGTMP
+ if logopt.Enabled() {
+ logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name)
+ }
+ if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
+ base.WarnfAt(v.Pos, "generated nil check")
+ }
+ case ssa.OpARMLoweredZero:
+ // MOVW.P Rarg2, 4(R1)
+ // CMP Rarg1, R1
+ // BLE -2(PC)
+ // arg1 is the address of the last element to zero
+ // arg2 is known to be zero
+ // auxint is alignment
+ var sz int64
+ var mov obj.As
+ switch {
+ case v.AuxInt%4 == 0:
+ sz = 4
+ mov = arm.AMOVW
+ case v.AuxInt%2 == 0:
+ sz = 2
+ mov = arm.AMOVH
+ default:
+ sz = 1
+ mov = arm.AMOVB
+ }
+ p := s.Prog(mov)
+ p.Scond = arm.C_PBIT
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[2].Reg()
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = arm.REG_R1
+ p.To.Offset = sz
+ p2 := s.Prog(arm.ACMP)
+ p2.From.Type = obj.TYPE_REG
+ p2.From.Reg = v.Args[1].Reg()
+ p2.Reg = arm.REG_R1
+ p3 := s.Prog(arm.ABLE)
+ p3.To.Type = obj.TYPE_BRANCH
+ p3.To.SetTarget(p)
+ case ssa.OpARMLoweredMove:
+ // MOVW.P 4(R1), Rtmp
+ // MOVW.P Rtmp, 4(R2)
+ // CMP Rarg2, R1
+ // BLE -3(PC)
+ // arg2 is the address of the last element of src
+ // auxint is alignment
+ var sz int64
+ var mov obj.As
+ switch {
+ case v.AuxInt%4 == 0:
+ sz = 4
+ mov = arm.AMOVW
+ case v.AuxInt%2 == 0:
+ sz = 2
+ mov = arm.AMOVH
+ default:
+ sz = 1
+ mov = arm.AMOVB
+ }
+ p := s.Prog(mov)
+ p.Scond = arm.C_PBIT
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = arm.REG_R1
+ p.From.Offset = sz
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = arm.REGTMP
+ p2 := s.Prog(mov)
+ p2.Scond = arm.C_PBIT
+ p2.From.Type = obj.TYPE_REG
+ p2.From.Reg = arm.REGTMP
+ p2.To.Type = obj.TYPE_MEM
+ p2.To.Reg = arm.REG_R2
+ p2.To.Offset = sz
+ p3 := s.Prog(arm.ACMP)
+ p3.From.Type = obj.TYPE_REG
+ p3.From.Reg = v.Args[2].Reg()
+ p3.Reg = arm.REG_R1
+ p4 := s.Prog(arm.ABLE)
+ p4.To.Type = obj.TYPE_BRANCH
+ p4.To.SetTarget(p)
+ case ssa.OpARMEqual,
+ ssa.OpARMNotEqual,
+ ssa.OpARMLessThan,
+ ssa.OpARMLessEqual,
+ ssa.OpARMGreaterThan,
+ ssa.OpARMGreaterEqual,
+ ssa.OpARMLessThanU,
+ ssa.OpARMLessEqualU,
+ ssa.OpARMGreaterThanU,
+ ssa.OpARMGreaterEqualU:
+ // generate boolean values
+ // use conditional move
+ p := s.Prog(arm.AMOVW)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = 0
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ p = s.Prog(arm.AMOVW)
+ p.Scond = condBits[v.Op]
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = 1
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpARMLoweredGetClosurePtr:
+ // Closure pointer is R7 (arm.REGCTXT).
+ ssagen.CheckLoweredGetClosurePtr(v)
+ case ssa.OpARMLoweredGetCallerSP:
+ // caller's SP is FixedFrameSize below the address of the first arg
+ p := s.Prog(arm.AMOVW)
+ p.From.Type = obj.TYPE_ADDR
+ p.From.Offset = -base.Ctxt.FixedFrameSize()
+ p.From.Name = obj.NAME_PARAM
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpARMLoweredGetCallerPC:
+ p := s.Prog(obj.AGETCALLERPC)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpARMFlagConstant:
+ v.Fatalf("FlagConstant op should never make it to codegen %v", v.LongString())
+ case ssa.OpARMInvertFlags:
+ v.Fatalf("InvertFlags should never make it to codegen %v", v.LongString())
+ case ssa.OpClobber, ssa.OpClobberReg:
+ // TODO: implement for clobberdead experiment. Nop is ok for now.
+ default:
+ v.Fatalf("genValue not implemented: %s", v.LongString())
+ }
+}
+
+var condBits = map[ssa.Op]uint8{
+ ssa.OpARMEqual: arm.C_SCOND_EQ,
+ ssa.OpARMNotEqual: arm.C_SCOND_NE,
+ ssa.OpARMLessThan: arm.C_SCOND_LT,
+ ssa.OpARMLessThanU: arm.C_SCOND_LO,
+ ssa.OpARMLessEqual: arm.C_SCOND_LE,
+ ssa.OpARMLessEqualU: arm.C_SCOND_LS,
+ ssa.OpARMGreaterThan: arm.C_SCOND_GT,
+ ssa.OpARMGreaterThanU: arm.C_SCOND_HI,
+ ssa.OpARMGreaterEqual: arm.C_SCOND_GE,
+ ssa.OpARMGreaterEqualU: arm.C_SCOND_HS,
+}
+
+var blockJump = map[ssa.BlockKind]struct {
+ asm, invasm obj.As
+}{
+ ssa.BlockARMEQ: {arm.ABEQ, arm.ABNE},
+ ssa.BlockARMNE: {arm.ABNE, arm.ABEQ},
+ ssa.BlockARMLT: {arm.ABLT, arm.ABGE},
+ ssa.BlockARMGE: {arm.ABGE, arm.ABLT},
+ ssa.BlockARMLE: {arm.ABLE, arm.ABGT},
+ ssa.BlockARMGT: {arm.ABGT, arm.ABLE},
+ ssa.BlockARMULT: {arm.ABLO, arm.ABHS},
+ ssa.BlockARMUGE: {arm.ABHS, arm.ABLO},
+ ssa.BlockARMUGT: {arm.ABHI, arm.ABLS},
+ ssa.BlockARMULE: {arm.ABLS, arm.ABHI},
+ ssa.BlockARMLTnoov: {arm.ABMI, arm.ABPL},
+ ssa.BlockARMGEnoov: {arm.ABPL, arm.ABMI},
+}
+
+// To model a 'LEnoov' ('<=' without overflow checking) branching
+var leJumps = [2][2]ssagen.IndexJump{
+ {{Jump: arm.ABEQ, Index: 0}, {Jump: arm.ABPL, Index: 1}}, // next == b.Succs[0]
+ {{Jump: arm.ABMI, Index: 0}, {Jump: arm.ABEQ, Index: 0}}, // next == b.Succs[1]
+}
+
+// To model a 'GTnoov' ('>' without overflow checking) branching
+var gtJumps = [2][2]ssagen.IndexJump{
+ {{Jump: arm.ABMI, Index: 1}, {Jump: arm.ABEQ, Index: 1}}, // next == b.Succs[0]
+ {{Jump: arm.ABEQ, Index: 1}, {Jump: arm.ABPL, Index: 0}}, // next == b.Succs[1]
+}
+
+func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
+ switch b.Kind {
+ case ssa.BlockPlain:
+ if b.Succs[0].Block() != next {
+ p := s.Prog(obj.AJMP)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
+ }
+
+ case ssa.BlockDefer:
+ // defer returns in R0:
+ // 0 if we should continue executing
+ // 1 if we should jump to deferreturn call
+ p := s.Prog(arm.ACMP)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = 0
+ p.Reg = arm.REG_R0
+ p = s.Prog(arm.ABNE)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[1].Block()})
+ if b.Succs[0].Block() != next {
+ p := s.Prog(obj.AJMP)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
+ }
+
+ case ssa.BlockExit, ssa.BlockRetJmp:
+
+ case ssa.BlockRet:
+ s.Prog(obj.ARET)
+
+ case ssa.BlockARMEQ, ssa.BlockARMNE,
+ ssa.BlockARMLT, ssa.BlockARMGE,
+ ssa.BlockARMLE, ssa.BlockARMGT,
+ ssa.BlockARMULT, ssa.BlockARMUGT,
+ ssa.BlockARMULE, ssa.BlockARMUGE,
+ ssa.BlockARMLTnoov, ssa.BlockARMGEnoov:
+ jmp := blockJump[b.Kind]
+ switch next {
+ case b.Succs[0].Block():
+ s.Br(jmp.invasm, b.Succs[1].Block())
+ case b.Succs[1].Block():
+ s.Br(jmp.asm, b.Succs[0].Block())
+ default:
+ if b.Likely != ssa.BranchUnlikely {
+ s.Br(jmp.asm, b.Succs[0].Block())
+ s.Br(obj.AJMP, b.Succs[1].Block())
+ } else {
+ s.Br(jmp.invasm, b.Succs[1].Block())
+ s.Br(obj.AJMP, b.Succs[0].Block())
+ }
+ }
+
+ case ssa.BlockARMLEnoov:
+ s.CombJump(b, next, &leJumps)
+
+ case ssa.BlockARMGTnoov:
+ s.CombJump(b, next, &gtJumps)
+
+ default:
+ b.Fatalf("branch not implemented: %s", b.LongString())
+ }
+}
diff --git a/src/cmd/compile/internal/arm64/galign.go b/src/cmd/compile/internal/arm64/galign.go
new file mode 100644
index 0000000..3ebd860
--- /dev/null
+++ b/src/cmd/compile/internal/arm64/galign.go
@@ -0,0 +1,27 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package arm64
+
+import (
+ "cmd/compile/internal/ssa"
+ "cmd/compile/internal/ssagen"
+ "cmd/internal/obj/arm64"
+)
+
+func Init(arch *ssagen.ArchInfo) {
+ arch.LinkArch = &arm64.Linkarm64
+ arch.REGSP = arm64.REGSP
+ arch.MAXWIDTH = 1 << 50
+
+ arch.PadFrame = padframe
+ arch.ZeroRange = zerorange
+ arch.Ginsnop = ginsnop
+
+ arch.SSAMarkMoves = func(s *ssagen.State, b *ssa.Block) {}
+ arch.SSAGenValue = ssaGenValue
+ arch.SSAGenBlock = ssaGenBlock
+ arch.LoadRegResult = loadRegResult
+ arch.SpillArgReg = spillArgReg
+}
diff --git a/src/cmd/compile/internal/arm64/ggen.go b/src/cmd/compile/internal/arm64/ggen.go
new file mode 100644
index 0000000..89be496
--- /dev/null
+++ b/src/cmd/compile/internal/arm64/ggen.go
@@ -0,0 +1,76 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package arm64
+
+import (
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/objw"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/obj/arm64"
+ "internal/buildcfg"
+)
+
+var darwin = buildcfg.GOOS == "darwin" || buildcfg.GOOS == "ios"
+
+func padframe(frame int64) int64 {
+ // arm64 requires that the frame size (not counting saved FP&LR)
+ // be 16 bytes aligned. If not, pad it.
+ if frame%16 != 0 {
+ frame += 16 - (frame % 16)
+ }
+ return frame
+}
+
+func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
+ if cnt == 0 {
+ return p
+ }
+ if cnt < int64(4*types.PtrSize) {
+ for i := int64(0); i < cnt; i += int64(types.PtrSize) {
+ p = pp.Append(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGZERO, 0, obj.TYPE_MEM, arm64.REGSP, 8+off+i)
+ }
+ } else if cnt <= int64(128*types.PtrSize) && !darwin { // darwin ld64 cannot handle BR26 reloc with non-zero addend
+ if cnt%(2*int64(types.PtrSize)) != 0 {
+ p = pp.Append(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGZERO, 0, obj.TYPE_MEM, arm64.REGSP, 8+off)
+ off += int64(types.PtrSize)
+ cnt -= int64(types.PtrSize)
+ }
+ p = pp.Append(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGSP, 0, obj.TYPE_REG, arm64.REG_R20, 0)
+ p = pp.Append(p, arm64.AADD, obj.TYPE_CONST, 0, 8+off, obj.TYPE_REG, arm64.REG_R20, 0)
+ p.Reg = arm64.REG_R20
+ p = pp.Append(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = ir.Syms.Duffzero
+ p.To.Offset = 4 * (64 - cnt/(2*int64(types.PtrSize)))
+ } else {
+ // Not using REGTMP, so this is async preemptible (async preemption clobbers REGTMP).
+ // We are at the function entry, where no register is live, so it is okay to clobber
+ // other registers
+ const rtmp = arm64.REG_R20
+ p = pp.Append(p, arm64.AMOVD, obj.TYPE_CONST, 0, 8+off-8, obj.TYPE_REG, rtmp, 0)
+ p = pp.Append(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGSP, 0, obj.TYPE_REG, arm64.REGRT1, 0)
+ p = pp.Append(p, arm64.AADD, obj.TYPE_REG, rtmp, 0, obj.TYPE_REG, arm64.REGRT1, 0)
+ p.Reg = arm64.REGRT1
+ p = pp.Append(p, arm64.AMOVD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, rtmp, 0)
+ p = pp.Append(p, arm64.AADD, obj.TYPE_REG, rtmp, 0, obj.TYPE_REG, arm64.REGRT2, 0)
+ p.Reg = arm64.REGRT1
+ p = pp.Append(p, arm64.AMOVD, obj.TYPE_REG, arm64.REGZERO, 0, obj.TYPE_MEM, arm64.REGRT1, int64(types.PtrSize))
+ p.Scond = arm64.C_XPRE
+ p1 := p
+ p = pp.Append(p, arm64.ACMP, obj.TYPE_REG, arm64.REGRT1, 0, obj.TYPE_NONE, 0, 0)
+ p.Reg = arm64.REGRT2
+ p = pp.Append(p, arm64.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0)
+ p.To.SetTarget(p1)
+ }
+
+ return p
+}
+
+func ginsnop(pp *objw.Progs) *obj.Prog {
+ p := pp.Prog(arm64.AHINT)
+ p.From.Type = obj.TYPE_CONST
+ return p
+}
diff --git a/src/cmd/compile/internal/arm64/ssa.go b/src/cmd/compile/internal/arm64/ssa.go
new file mode 100644
index 0000000..96a2922
--- /dev/null
+++ b/src/cmd/compile/internal/arm64/ssa.go
@@ -0,0 +1,1346 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package arm64
+
+import (
+ "math"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/logopt"
+ "cmd/compile/internal/objw"
+ "cmd/compile/internal/ssa"
+ "cmd/compile/internal/ssagen"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/obj/arm64"
+)
+
+// loadByType returns the load instruction of the given type.
+func loadByType(t *types.Type) obj.As {
+ if t.IsFloat() {
+ switch t.Size() {
+ case 4:
+ return arm64.AFMOVS
+ case 8:
+ return arm64.AFMOVD
+ }
+ } else {
+ switch t.Size() {
+ case 1:
+ if t.IsSigned() {
+ return arm64.AMOVB
+ } else {
+ return arm64.AMOVBU
+ }
+ case 2:
+ if t.IsSigned() {
+ return arm64.AMOVH
+ } else {
+ return arm64.AMOVHU
+ }
+ case 4:
+ if t.IsSigned() {
+ return arm64.AMOVW
+ } else {
+ return arm64.AMOVWU
+ }
+ case 8:
+ return arm64.AMOVD
+ }
+ }
+ panic("bad load type")
+}
+
+// storeByType returns the store instruction of the given type.
+func storeByType(t *types.Type) obj.As {
+ if t.IsFloat() {
+ switch t.Size() {
+ case 4:
+ return arm64.AFMOVS
+ case 8:
+ return arm64.AFMOVD
+ }
+ } else {
+ switch t.Size() {
+ case 1:
+ return arm64.AMOVB
+ case 2:
+ return arm64.AMOVH
+ case 4:
+ return arm64.AMOVW
+ case 8:
+ return arm64.AMOVD
+ }
+ }
+ panic("bad store type")
+}
+
+// makeshift encodes a register shifted by a constant, used as an Offset in Prog
+func makeshift(v *ssa.Value, reg int16, typ int64, s int64) int64 {
+ if s < 0 || s >= 64 {
+ v.Fatalf("shift out of range: %d", s)
+ }
+ return int64(reg&31)<<16 | typ | (s&63)<<10
+}
+
+// genshift generates a Prog for r = r0 op (r1 shifted by n)
+func genshift(s *ssagen.State, v *ssa.Value, as obj.As, r0, r1, r int16, typ int64, n int64) *obj.Prog {
+ p := s.Prog(as)
+ p.From.Type = obj.TYPE_SHIFT
+ p.From.Offset = makeshift(v, r1, typ, n)
+ p.Reg = r0
+ if r != 0 {
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ }
+ return p
+}
+
+// generate the memory operand for the indexed load/store instructions
+func genIndexedOperand(v *ssa.Value) obj.Addr {
+ // Reg: base register, Index: (shifted) index register
+ mop := obj.Addr{Type: obj.TYPE_MEM, Reg: v.Args[0].Reg()}
+ switch v.Op {
+ case ssa.OpARM64MOVDloadidx8, ssa.OpARM64MOVDstoreidx8, ssa.OpARM64MOVDstorezeroidx8,
+ ssa.OpARM64FMOVDloadidx8, ssa.OpARM64FMOVDstoreidx8:
+ mop.Index = arm64.REG_LSL | 3<<5 | v.Args[1].Reg()&31
+ case ssa.OpARM64MOVWloadidx4, ssa.OpARM64MOVWUloadidx4, ssa.OpARM64MOVWstoreidx4, ssa.OpARM64MOVWstorezeroidx4,
+ ssa.OpARM64FMOVSloadidx4, ssa.OpARM64FMOVSstoreidx4:
+ mop.Index = arm64.REG_LSL | 2<<5 | v.Args[1].Reg()&31
+ case ssa.OpARM64MOVHloadidx2, ssa.OpARM64MOVHUloadidx2, ssa.OpARM64MOVHstoreidx2, ssa.OpARM64MOVHstorezeroidx2:
+ mop.Index = arm64.REG_LSL | 1<<5 | v.Args[1].Reg()&31
+ default: // not shifted
+ mop.Index = v.Args[1].Reg()
+ }
+ return mop
+}
+
+func ssaGenValue(s *ssagen.State, v *ssa.Value) {
+ switch v.Op {
+ case ssa.OpCopy, ssa.OpARM64MOVDreg:
+ if v.Type.IsMemory() {
+ return
+ }
+ x := v.Args[0].Reg()
+ y := v.Reg()
+ if x == y {
+ return
+ }
+ as := arm64.AMOVD
+ if v.Type.IsFloat() {
+ switch v.Type.Size() {
+ case 4:
+ as = arm64.AFMOVS
+ case 8:
+ as = arm64.AFMOVD
+ default:
+ panic("bad float size")
+ }
+ }
+ p := s.Prog(as)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = x
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = y
+ case ssa.OpARM64MOVDnop:
+ // nothing to do
+ case ssa.OpLoadReg:
+ if v.Type.IsFlags() {
+ v.Fatalf("load flags not implemented: %v", v.LongString())
+ return
+ }
+ p := s.Prog(loadByType(v.Type))
+ ssagen.AddrAuto(&p.From, v.Args[0])
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpStoreReg:
+ if v.Type.IsFlags() {
+ v.Fatalf("store flags not implemented: %v", v.LongString())
+ return
+ }
+ p := s.Prog(storeByType(v.Type))
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ ssagen.AddrAuto(&p.To, v)
+ case ssa.OpArgIntReg, ssa.OpArgFloatReg:
+ // The assembler needs to wrap the entry safepoint/stack growth code with spill/unspill
+ // The loop only runs once.
+ for _, a := range v.Block.Func.RegArgs {
+ // Pass the spill/unspill information along to the assembler, offset by size of
+ // the saved LR slot.
+ addr := ssagen.SpillSlotAddr(a, arm64.REGSP, base.Ctxt.FixedFrameSize())
+ s.FuncInfo().AddSpill(
+ obj.RegSpill{Reg: a.Reg, Addr: addr, Unspill: loadByType(a.Type), Spill: storeByType(a.Type)})
+ }
+ v.Block.Func.RegArgs = nil
+ ssagen.CheckArgReg(v)
+ case ssa.OpARM64ADD,
+ ssa.OpARM64SUB,
+ ssa.OpARM64AND,
+ ssa.OpARM64OR,
+ ssa.OpARM64XOR,
+ ssa.OpARM64BIC,
+ ssa.OpARM64EON,
+ ssa.OpARM64ORN,
+ ssa.OpARM64MUL,
+ ssa.OpARM64MULW,
+ ssa.OpARM64MNEG,
+ ssa.OpARM64MNEGW,
+ ssa.OpARM64MULH,
+ ssa.OpARM64UMULH,
+ ssa.OpARM64MULL,
+ ssa.OpARM64UMULL,
+ ssa.OpARM64DIV,
+ ssa.OpARM64UDIV,
+ ssa.OpARM64DIVW,
+ ssa.OpARM64UDIVW,
+ ssa.OpARM64MOD,
+ ssa.OpARM64UMOD,
+ ssa.OpARM64MODW,
+ ssa.OpARM64UMODW,
+ ssa.OpARM64SLL,
+ ssa.OpARM64SRL,
+ ssa.OpARM64SRA,
+ ssa.OpARM64FADDS,
+ ssa.OpARM64FADDD,
+ ssa.OpARM64FSUBS,
+ ssa.OpARM64FSUBD,
+ ssa.OpARM64FMULS,
+ ssa.OpARM64FMULD,
+ ssa.OpARM64FNMULS,
+ ssa.OpARM64FNMULD,
+ ssa.OpARM64FDIVS,
+ ssa.OpARM64FDIVD,
+ ssa.OpARM64ROR,
+ ssa.OpARM64RORW:
+ r := v.Reg()
+ r1 := v.Args[0].Reg()
+ r2 := v.Args[1].Reg()
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r2
+ p.Reg = r1
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ case ssa.OpARM64FMADDS,
+ ssa.OpARM64FMADDD,
+ ssa.OpARM64FNMADDS,
+ ssa.OpARM64FNMADDD,
+ ssa.OpARM64FMSUBS,
+ ssa.OpARM64FMSUBD,
+ ssa.OpARM64FNMSUBS,
+ ssa.OpARM64FNMSUBD,
+ ssa.OpARM64MADD,
+ ssa.OpARM64MADDW,
+ ssa.OpARM64MSUB,
+ ssa.OpARM64MSUBW:
+ rt := v.Reg()
+ ra := v.Args[0].Reg()
+ rm := v.Args[1].Reg()
+ rn := v.Args[2].Reg()
+ p := s.Prog(v.Op.Asm())
+ p.Reg = ra
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = rm
+ p.SetFrom3Reg(rn)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = rt
+ case ssa.OpARM64ADDconst,
+ ssa.OpARM64SUBconst,
+ ssa.OpARM64ANDconst,
+ ssa.OpARM64ORconst,
+ ssa.OpARM64XORconst,
+ ssa.OpARM64SLLconst,
+ ssa.OpARM64SRLconst,
+ ssa.OpARM64SRAconst,
+ ssa.OpARM64RORconst,
+ ssa.OpARM64RORWconst:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpARM64ADDSconstflags:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg0()
+ case ssa.OpARM64ADCzerocarry:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = arm64.REGZERO
+ p.Reg = arm64.REGZERO
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpARM64ADCSflags,
+ ssa.OpARM64ADDSflags,
+ ssa.OpARM64SBCSflags,
+ ssa.OpARM64SUBSflags:
+ r := v.Reg0()
+ r1 := v.Args[0].Reg()
+ r2 := v.Args[1].Reg()
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r2
+ p.Reg = r1
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ case ssa.OpARM64NEGSflags:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg0()
+ case ssa.OpARM64NGCzerocarry:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = arm64.REGZERO
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpARM64EXTRconst,
+ ssa.OpARM64EXTRWconst:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.SetFrom3Reg(v.Args[0].Reg())
+ p.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpARM64MVNshiftLL, ssa.OpARM64NEGshiftLL:
+ genshift(s, v, v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm64.SHIFT_LL, v.AuxInt)
+ case ssa.OpARM64MVNshiftRL, ssa.OpARM64NEGshiftRL:
+ genshift(s, v, v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm64.SHIFT_LR, v.AuxInt)
+ case ssa.OpARM64MVNshiftRA, ssa.OpARM64NEGshiftRA:
+ genshift(s, v, v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm64.SHIFT_AR, v.AuxInt)
+ case ssa.OpARM64MVNshiftRO:
+ genshift(s, v, v.Op.Asm(), 0, v.Args[0].Reg(), v.Reg(), arm64.SHIFT_ROR, v.AuxInt)
+ case ssa.OpARM64ADDshiftLL,
+ ssa.OpARM64SUBshiftLL,
+ ssa.OpARM64ANDshiftLL,
+ ssa.OpARM64ORshiftLL,
+ ssa.OpARM64XORshiftLL,
+ ssa.OpARM64EONshiftLL,
+ ssa.OpARM64ORNshiftLL,
+ ssa.OpARM64BICshiftLL:
+ genshift(s, v, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm64.SHIFT_LL, v.AuxInt)
+ case ssa.OpARM64ADDshiftRL,
+ ssa.OpARM64SUBshiftRL,
+ ssa.OpARM64ANDshiftRL,
+ ssa.OpARM64ORshiftRL,
+ ssa.OpARM64XORshiftRL,
+ ssa.OpARM64EONshiftRL,
+ ssa.OpARM64ORNshiftRL,
+ ssa.OpARM64BICshiftRL:
+ genshift(s, v, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm64.SHIFT_LR, v.AuxInt)
+ case ssa.OpARM64ADDshiftRA,
+ ssa.OpARM64SUBshiftRA,
+ ssa.OpARM64ANDshiftRA,
+ ssa.OpARM64ORshiftRA,
+ ssa.OpARM64XORshiftRA,
+ ssa.OpARM64EONshiftRA,
+ ssa.OpARM64ORNshiftRA,
+ ssa.OpARM64BICshiftRA:
+ genshift(s, v, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm64.SHIFT_AR, v.AuxInt)
+ case ssa.OpARM64ANDshiftRO,
+ ssa.OpARM64ORshiftRO,
+ ssa.OpARM64XORshiftRO,
+ ssa.OpARM64EONshiftRO,
+ ssa.OpARM64ORNshiftRO,
+ ssa.OpARM64BICshiftRO:
+ genshift(s, v, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), v.Reg(), arm64.SHIFT_ROR, v.AuxInt)
+ case ssa.OpARM64MOVDconst:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpARM64FMOVSconst,
+ ssa.OpARM64FMOVDconst:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_FCONST
+ p.From.Val = math.Float64frombits(uint64(v.AuxInt))
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpARM64FCMPS0,
+ ssa.OpARM64FCMPD0:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_FCONST
+ p.From.Val = math.Float64frombits(0)
+ p.Reg = v.Args[0].Reg()
+ case ssa.OpARM64CMP,
+ ssa.OpARM64CMPW,
+ ssa.OpARM64CMN,
+ ssa.OpARM64CMNW,
+ ssa.OpARM64TST,
+ ssa.OpARM64TSTW,
+ ssa.OpARM64FCMPS,
+ ssa.OpARM64FCMPD:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+ p.Reg = v.Args[0].Reg()
+ case ssa.OpARM64CMPconst,
+ ssa.OpARM64CMPWconst,
+ ssa.OpARM64CMNconst,
+ ssa.OpARM64CMNWconst,
+ ssa.OpARM64TSTconst,
+ ssa.OpARM64TSTWconst:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.Reg = v.Args[0].Reg()
+ case ssa.OpARM64CMPshiftLL, ssa.OpARM64CMNshiftLL, ssa.OpARM64TSTshiftLL:
+ genshift(s, v, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm64.SHIFT_LL, v.AuxInt)
+ case ssa.OpARM64CMPshiftRL, ssa.OpARM64CMNshiftRL, ssa.OpARM64TSTshiftRL:
+ genshift(s, v, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm64.SHIFT_LR, v.AuxInt)
+ case ssa.OpARM64CMPshiftRA, ssa.OpARM64CMNshiftRA, ssa.OpARM64TSTshiftRA:
+ genshift(s, v, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm64.SHIFT_AR, v.AuxInt)
+ case ssa.OpARM64TSTshiftRO:
+ genshift(s, v, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg(), 0, arm64.SHIFT_ROR, v.AuxInt)
+ case ssa.OpARM64MOVDaddr:
+ p := s.Prog(arm64.AMOVD)
+ p.From.Type = obj.TYPE_ADDR
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+
+ var wantreg string
+ // MOVD $sym+off(base), R
+ // the assembler expands it as the following:
+ // - base is SP: add constant offset to SP (R13)
+ // when constant is large, tmp register (R11) may be used
+ // - base is SB: load external address from constant pool (use relocation)
+ switch v.Aux.(type) {
+ default:
+ v.Fatalf("aux is of unknown type %T", v.Aux)
+ case *obj.LSym:
+ wantreg = "SB"
+ ssagen.AddAux(&p.From, v)
+ case *ir.Name:
+ wantreg = "SP"
+ ssagen.AddAux(&p.From, v)
+ case nil:
+ // No sym, just MOVD $off(SP), R
+ wantreg = "SP"
+ p.From.Offset = v.AuxInt
+ }
+ if reg := v.Args[0].RegName(); reg != wantreg {
+ v.Fatalf("bad reg %s for symbol type %T, want %s", reg, v.Aux, wantreg)
+ }
+ case ssa.OpARM64MOVBload,
+ ssa.OpARM64MOVBUload,
+ ssa.OpARM64MOVHload,
+ ssa.OpARM64MOVHUload,
+ ssa.OpARM64MOVWload,
+ ssa.OpARM64MOVWUload,
+ ssa.OpARM64MOVDload,
+ ssa.OpARM64FMOVSload,
+ ssa.OpARM64FMOVDload:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpARM64MOVBloadidx,
+ ssa.OpARM64MOVBUloadidx,
+ ssa.OpARM64MOVHloadidx,
+ ssa.OpARM64MOVHUloadidx,
+ ssa.OpARM64MOVWloadidx,
+ ssa.OpARM64MOVWUloadidx,
+ ssa.OpARM64MOVDloadidx,
+ ssa.OpARM64FMOVSloadidx,
+ ssa.OpARM64FMOVDloadidx,
+ ssa.OpARM64MOVHloadidx2,
+ ssa.OpARM64MOVHUloadidx2,
+ ssa.OpARM64MOVWloadidx4,
+ ssa.OpARM64MOVWUloadidx4,
+ ssa.OpARM64MOVDloadidx8,
+ ssa.OpARM64FMOVDloadidx8,
+ ssa.OpARM64FMOVSloadidx4:
+ p := s.Prog(v.Op.Asm())
+ p.From = genIndexedOperand(v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpARM64LDAR,
+ ssa.OpARM64LDARB,
+ ssa.OpARM64LDARW:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg0()
+ case ssa.OpARM64MOVBstore,
+ ssa.OpARM64MOVHstore,
+ ssa.OpARM64MOVWstore,
+ ssa.OpARM64MOVDstore,
+ ssa.OpARM64FMOVSstore,
+ ssa.OpARM64FMOVDstore,
+ ssa.OpARM64STLRB,
+ ssa.OpARM64STLR,
+ ssa.OpARM64STLRW:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.To, v)
+ case ssa.OpARM64MOVBstoreidx,
+ ssa.OpARM64MOVHstoreidx,
+ ssa.OpARM64MOVWstoreidx,
+ ssa.OpARM64MOVDstoreidx,
+ ssa.OpARM64FMOVSstoreidx,
+ ssa.OpARM64FMOVDstoreidx,
+ ssa.OpARM64MOVHstoreidx2,
+ ssa.OpARM64MOVWstoreidx4,
+ ssa.OpARM64FMOVSstoreidx4,
+ ssa.OpARM64MOVDstoreidx8,
+ ssa.OpARM64FMOVDstoreidx8:
+ p := s.Prog(v.Op.Asm())
+ p.To = genIndexedOperand(v)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[2].Reg()
+ case ssa.OpARM64STP:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REGREG
+ p.From.Reg = v.Args[1].Reg()
+ p.From.Offset = int64(v.Args[2].Reg())
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.To, v)
+ case ssa.OpARM64MOVBstorezero,
+ ssa.OpARM64MOVHstorezero,
+ ssa.OpARM64MOVWstorezero,
+ ssa.OpARM64MOVDstorezero:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = arm64.REGZERO
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.To, v)
+ case ssa.OpARM64MOVBstorezeroidx,
+ ssa.OpARM64MOVHstorezeroidx,
+ ssa.OpARM64MOVWstorezeroidx,
+ ssa.OpARM64MOVDstorezeroidx,
+ ssa.OpARM64MOVHstorezeroidx2,
+ ssa.OpARM64MOVWstorezeroidx4,
+ ssa.OpARM64MOVDstorezeroidx8:
+ p := s.Prog(v.Op.Asm())
+ p.To = genIndexedOperand(v)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = arm64.REGZERO
+ case ssa.OpARM64MOVQstorezero:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REGREG
+ p.From.Reg = arm64.REGZERO
+ p.From.Offset = int64(arm64.REGZERO)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.To, v)
+ case ssa.OpARM64BFI,
+ ssa.OpARM64BFXIL:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt >> 8
+ p.SetFrom3Const(v.AuxInt & 0xff)
+ p.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpARM64SBFIZ,
+ ssa.OpARM64SBFX,
+ ssa.OpARM64UBFIZ,
+ ssa.OpARM64UBFX:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt >> 8
+ p.SetFrom3Const(v.AuxInt & 0xff)
+ p.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpARM64LoweredMuluhilo:
+ r0 := v.Args[0].Reg()
+ r1 := v.Args[1].Reg()
+ p := s.Prog(arm64.AUMULH)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r1
+ p.Reg = r0
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg0()
+ p1 := s.Prog(arm64.AMUL)
+ p1.From.Type = obj.TYPE_REG
+ p1.From.Reg = r1
+ p1.Reg = r0
+ p1.To.Type = obj.TYPE_REG
+ p1.To.Reg = v.Reg1()
+ case ssa.OpARM64LoweredAtomicExchange64,
+ ssa.OpARM64LoweredAtomicExchange32:
+ // LDAXR (Rarg0), Rout
+ // STLXR Rarg1, (Rarg0), Rtmp
+ // CBNZ Rtmp, -2(PC)
+ ld := arm64.ALDAXR
+ st := arm64.ASTLXR
+ if v.Op == ssa.OpARM64LoweredAtomicExchange32 {
+ ld = arm64.ALDAXRW
+ st = arm64.ASTLXRW
+ }
+ r0 := v.Args[0].Reg()
+ r1 := v.Args[1].Reg()
+ out := v.Reg0()
+ p := s.Prog(ld)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = r0
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = out
+ p1 := s.Prog(st)
+ p1.From.Type = obj.TYPE_REG
+ p1.From.Reg = r1
+ p1.To.Type = obj.TYPE_MEM
+ p1.To.Reg = r0
+ p1.RegTo2 = arm64.REGTMP
+ p2 := s.Prog(arm64.ACBNZ)
+ p2.From.Type = obj.TYPE_REG
+ p2.From.Reg = arm64.REGTMP
+ p2.To.Type = obj.TYPE_BRANCH
+ p2.To.SetTarget(p)
+ case ssa.OpARM64LoweredAtomicExchange64Variant,
+ ssa.OpARM64LoweredAtomicExchange32Variant:
+ swap := arm64.ASWPALD
+ if v.Op == ssa.OpARM64LoweredAtomicExchange32Variant {
+ swap = arm64.ASWPALW
+ }
+ r0 := v.Args[0].Reg()
+ r1 := v.Args[1].Reg()
+ out := v.Reg0()
+
+ // SWPALD Rarg1, (Rarg0), Rout
+ p := s.Prog(swap)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r1
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = r0
+ p.RegTo2 = out
+
+ case ssa.OpARM64LoweredAtomicAdd64,
+ ssa.OpARM64LoweredAtomicAdd32:
+ // LDAXR (Rarg0), Rout
+ // ADD Rarg1, Rout
+ // STLXR Rout, (Rarg0), Rtmp
+ // CBNZ Rtmp, -3(PC)
+ ld := arm64.ALDAXR
+ st := arm64.ASTLXR
+ if v.Op == ssa.OpARM64LoweredAtomicAdd32 {
+ ld = arm64.ALDAXRW
+ st = arm64.ASTLXRW
+ }
+ r0 := v.Args[0].Reg()
+ r1 := v.Args[1].Reg()
+ out := v.Reg0()
+ p := s.Prog(ld)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = r0
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = out
+ p1 := s.Prog(arm64.AADD)
+ p1.From.Type = obj.TYPE_REG
+ p1.From.Reg = r1
+ p1.To.Type = obj.TYPE_REG
+ p1.To.Reg = out
+ p2 := s.Prog(st)
+ p2.From.Type = obj.TYPE_REG
+ p2.From.Reg = out
+ p2.To.Type = obj.TYPE_MEM
+ p2.To.Reg = r0
+ p2.RegTo2 = arm64.REGTMP
+ p3 := s.Prog(arm64.ACBNZ)
+ p3.From.Type = obj.TYPE_REG
+ p3.From.Reg = arm64.REGTMP
+ p3.To.Type = obj.TYPE_BRANCH
+ p3.To.SetTarget(p)
+ case ssa.OpARM64LoweredAtomicAdd64Variant,
+ ssa.OpARM64LoweredAtomicAdd32Variant:
+ // LDADDAL Rarg1, (Rarg0), Rout
+ // ADD Rarg1, Rout
+ op := arm64.ALDADDALD
+ if v.Op == ssa.OpARM64LoweredAtomicAdd32Variant {
+ op = arm64.ALDADDALW
+ }
+ r0 := v.Args[0].Reg()
+ r1 := v.Args[1].Reg()
+ out := v.Reg0()
+ p := s.Prog(op)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r1
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = r0
+ p.RegTo2 = out
+ p1 := s.Prog(arm64.AADD)
+ p1.From.Type = obj.TYPE_REG
+ p1.From.Reg = r1
+ p1.To.Type = obj.TYPE_REG
+ p1.To.Reg = out
+ case ssa.OpARM64LoweredAtomicCas64,
+ ssa.OpARM64LoweredAtomicCas32:
+ // LDAXR (Rarg0), Rtmp
+ // CMP Rarg1, Rtmp
+ // BNE 3(PC)
+ // STLXR Rarg2, (Rarg0), Rtmp
+ // CBNZ Rtmp, -4(PC)
+ // CSET EQ, Rout
+ ld := arm64.ALDAXR
+ st := arm64.ASTLXR
+ cmp := arm64.ACMP
+ if v.Op == ssa.OpARM64LoweredAtomicCas32 {
+ ld = arm64.ALDAXRW
+ st = arm64.ASTLXRW
+ cmp = arm64.ACMPW
+ }
+ r0 := v.Args[0].Reg()
+ r1 := v.Args[1].Reg()
+ r2 := v.Args[2].Reg()
+ out := v.Reg0()
+ p := s.Prog(ld)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = r0
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = arm64.REGTMP
+ p1 := s.Prog(cmp)
+ p1.From.Type = obj.TYPE_REG
+ p1.From.Reg = r1
+ p1.Reg = arm64.REGTMP
+ p2 := s.Prog(arm64.ABNE)
+ p2.To.Type = obj.TYPE_BRANCH
+ p3 := s.Prog(st)
+ p3.From.Type = obj.TYPE_REG
+ p3.From.Reg = r2
+ p3.To.Type = obj.TYPE_MEM
+ p3.To.Reg = r0
+ p3.RegTo2 = arm64.REGTMP
+ p4 := s.Prog(arm64.ACBNZ)
+ p4.From.Type = obj.TYPE_REG
+ p4.From.Reg = arm64.REGTMP
+ p4.To.Type = obj.TYPE_BRANCH
+ p4.To.SetTarget(p)
+ p5 := s.Prog(arm64.ACSET)
+ p5.From.Type = obj.TYPE_REG // assembler encodes conditional bits in Reg
+ p5.From.Reg = arm64.COND_EQ
+ p5.To.Type = obj.TYPE_REG
+ p5.To.Reg = out
+ p2.To.SetTarget(p5)
+ case ssa.OpARM64LoweredAtomicCas64Variant,
+ ssa.OpARM64LoweredAtomicCas32Variant:
+ // Rarg0: ptr
+ // Rarg1: old
+ // Rarg2: new
+ // MOV Rarg1, Rtmp
+ // CASAL Rtmp, (Rarg0), Rarg2
+ // CMP Rarg1, Rtmp
+ // CSET EQ, Rout
+ cas := arm64.ACASALD
+ cmp := arm64.ACMP
+ mov := arm64.AMOVD
+ if v.Op == ssa.OpARM64LoweredAtomicCas32Variant {
+ cas = arm64.ACASALW
+ cmp = arm64.ACMPW
+ mov = arm64.AMOVW
+ }
+ r0 := v.Args[0].Reg()
+ r1 := v.Args[1].Reg()
+ r2 := v.Args[2].Reg()
+ out := v.Reg0()
+
+ // MOV Rarg1, Rtmp
+ p := s.Prog(mov)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r1
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = arm64.REGTMP
+
+ // CASAL Rtmp, (Rarg0), Rarg2
+ p1 := s.Prog(cas)
+ p1.From.Type = obj.TYPE_REG
+ p1.From.Reg = arm64.REGTMP
+ p1.To.Type = obj.TYPE_MEM
+ p1.To.Reg = r0
+ p1.RegTo2 = r2
+
+ // CMP Rarg1, Rtmp
+ p2 := s.Prog(cmp)
+ p2.From.Type = obj.TYPE_REG
+ p2.From.Reg = r1
+ p2.Reg = arm64.REGTMP
+
+ // CSET EQ, Rout
+ p3 := s.Prog(arm64.ACSET)
+ p3.From.Type = obj.TYPE_REG
+ p3.From.Reg = arm64.COND_EQ
+ p3.To.Type = obj.TYPE_REG
+ p3.To.Reg = out
+
+ case ssa.OpARM64LoweredAtomicAnd8,
+ ssa.OpARM64LoweredAtomicAnd32,
+ ssa.OpARM64LoweredAtomicOr8,
+ ssa.OpARM64LoweredAtomicOr32:
+ // LDAXRB/LDAXRW (Rarg0), Rout
+ // AND/OR Rarg1, Rout
+ // STLXRB/STLXRB Rout, (Rarg0), Rtmp
+ // CBNZ Rtmp, -3(PC)
+ ld := arm64.ALDAXRB
+ st := arm64.ASTLXRB
+ if v.Op == ssa.OpARM64LoweredAtomicAnd32 || v.Op == ssa.OpARM64LoweredAtomicOr32 {
+ ld = arm64.ALDAXRW
+ st = arm64.ASTLXRW
+ }
+ r0 := v.Args[0].Reg()
+ r1 := v.Args[1].Reg()
+ out := v.Reg0()
+ p := s.Prog(ld)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = r0
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = out
+ p1 := s.Prog(v.Op.Asm())
+ p1.From.Type = obj.TYPE_REG
+ p1.From.Reg = r1
+ p1.To.Type = obj.TYPE_REG
+ p1.To.Reg = out
+ p2 := s.Prog(st)
+ p2.From.Type = obj.TYPE_REG
+ p2.From.Reg = out
+ p2.To.Type = obj.TYPE_MEM
+ p2.To.Reg = r0
+ p2.RegTo2 = arm64.REGTMP
+ p3 := s.Prog(arm64.ACBNZ)
+ p3.From.Type = obj.TYPE_REG
+ p3.From.Reg = arm64.REGTMP
+ p3.To.Type = obj.TYPE_BRANCH
+ p3.To.SetTarget(p)
+ case ssa.OpARM64LoweredAtomicAnd8Variant,
+ ssa.OpARM64LoweredAtomicAnd32Variant:
+ atomic_clear := arm64.ALDCLRALW
+ if v.Op == ssa.OpARM64LoweredAtomicAnd8Variant {
+ atomic_clear = arm64.ALDCLRALB
+ }
+ r0 := v.Args[0].Reg()
+ r1 := v.Args[1].Reg()
+ out := v.Reg0()
+
+ // MNV Rarg1 Rtemp
+ p := s.Prog(arm64.AMVN)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r1
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = arm64.REGTMP
+
+ // LDCLRALW Rtemp, (Rarg0), Rout
+ p1 := s.Prog(atomic_clear)
+ p1.From.Type = obj.TYPE_REG
+ p1.From.Reg = arm64.REGTMP
+ p1.To.Type = obj.TYPE_MEM
+ p1.To.Reg = r0
+ p1.RegTo2 = out
+
+ // AND Rarg1, Rout
+ p2 := s.Prog(arm64.AAND)
+ p2.From.Type = obj.TYPE_REG
+ p2.From.Reg = r1
+ p2.To.Type = obj.TYPE_REG
+ p2.To.Reg = out
+
+ case ssa.OpARM64LoweredAtomicOr8Variant,
+ ssa.OpARM64LoweredAtomicOr32Variant:
+ atomic_or := arm64.ALDORALW
+ if v.Op == ssa.OpARM64LoweredAtomicOr8Variant {
+ atomic_or = arm64.ALDORALB
+ }
+ r0 := v.Args[0].Reg()
+ r1 := v.Args[1].Reg()
+ out := v.Reg0()
+
+ // LDORALW Rarg1, (Rarg0), Rout
+ p := s.Prog(atomic_or)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r1
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = r0
+ p.RegTo2 = out
+
+ // ORR Rarg1, Rout
+ p2 := s.Prog(arm64.AORR)
+ p2.From.Type = obj.TYPE_REG
+ p2.From.Reg = r1
+ p2.To.Type = obj.TYPE_REG
+ p2.To.Reg = out
+
+ case ssa.OpARM64MOVBreg,
+ ssa.OpARM64MOVBUreg,
+ ssa.OpARM64MOVHreg,
+ ssa.OpARM64MOVHUreg,
+ ssa.OpARM64MOVWreg,
+ ssa.OpARM64MOVWUreg:
+ a := v.Args[0]
+ for a.Op == ssa.OpCopy || a.Op == ssa.OpARM64MOVDreg {
+ a = a.Args[0]
+ }
+ if a.Op == ssa.OpLoadReg {
+ t := a.Type
+ switch {
+ case v.Op == ssa.OpARM64MOVBreg && t.Size() == 1 && t.IsSigned(),
+ v.Op == ssa.OpARM64MOVBUreg && t.Size() == 1 && !t.IsSigned(),
+ v.Op == ssa.OpARM64MOVHreg && t.Size() == 2 && t.IsSigned(),
+ v.Op == ssa.OpARM64MOVHUreg && t.Size() == 2 && !t.IsSigned(),
+ v.Op == ssa.OpARM64MOVWreg && t.Size() == 4 && t.IsSigned(),
+ v.Op == ssa.OpARM64MOVWUreg && t.Size() == 4 && !t.IsSigned():
+ // arg is a proper-typed load, already zero/sign-extended, don't extend again
+ if v.Reg() == v.Args[0].Reg() {
+ return
+ }
+ p := s.Prog(arm64.AMOVD)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ return
+ default:
+ }
+ }
+ fallthrough
+ case ssa.OpARM64MVN,
+ ssa.OpARM64NEG,
+ ssa.OpARM64FABSD,
+ ssa.OpARM64FMOVDfpgp,
+ ssa.OpARM64FMOVDgpfp,
+ ssa.OpARM64FMOVSfpgp,
+ ssa.OpARM64FMOVSgpfp,
+ ssa.OpARM64FNEGS,
+ ssa.OpARM64FNEGD,
+ ssa.OpARM64FSQRTS,
+ ssa.OpARM64FSQRTD,
+ ssa.OpARM64FCVTZSSW,
+ ssa.OpARM64FCVTZSDW,
+ ssa.OpARM64FCVTZUSW,
+ ssa.OpARM64FCVTZUDW,
+ ssa.OpARM64FCVTZSS,
+ ssa.OpARM64FCVTZSD,
+ ssa.OpARM64FCVTZUS,
+ ssa.OpARM64FCVTZUD,
+ ssa.OpARM64SCVTFWS,
+ ssa.OpARM64SCVTFWD,
+ ssa.OpARM64SCVTFS,
+ ssa.OpARM64SCVTFD,
+ ssa.OpARM64UCVTFWS,
+ ssa.OpARM64UCVTFWD,
+ ssa.OpARM64UCVTFS,
+ ssa.OpARM64UCVTFD,
+ ssa.OpARM64FCVTSD,
+ ssa.OpARM64FCVTDS,
+ ssa.OpARM64REV,
+ ssa.OpARM64REVW,
+ ssa.OpARM64REV16,
+ ssa.OpARM64REV16W,
+ ssa.OpARM64RBIT,
+ ssa.OpARM64RBITW,
+ ssa.OpARM64CLZ,
+ ssa.OpARM64CLZW,
+ ssa.OpARM64FRINTAD,
+ ssa.OpARM64FRINTMD,
+ ssa.OpARM64FRINTND,
+ ssa.OpARM64FRINTPD,
+ ssa.OpARM64FRINTZD:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpARM64LoweredRound32F, ssa.OpARM64LoweredRound64F:
+ // input is already rounded
+ case ssa.OpARM64VCNT:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = (v.Args[0].Reg()-arm64.REG_F0)&31 + arm64.REG_ARNG + ((arm64.ARNG_8B & 15) << 5)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = (v.Reg()-arm64.REG_F0)&31 + arm64.REG_ARNG + ((arm64.ARNG_8B & 15) << 5)
+ case ssa.OpARM64VUADDLV:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = (v.Args[0].Reg()-arm64.REG_F0)&31 + arm64.REG_ARNG + ((arm64.ARNG_8B & 15) << 5)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg() - arm64.REG_F0 + arm64.REG_V0
+ case ssa.OpARM64CSEL, ssa.OpARM64CSEL0:
+ r1 := int16(arm64.REGZERO)
+ if v.Op != ssa.OpARM64CSEL0 {
+ r1 = v.Args[1].Reg()
+ }
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG // assembler encodes conditional bits in Reg
+ p.From.Reg = condBits[ssa.Op(v.AuxInt)]
+ p.Reg = v.Args[0].Reg()
+ p.SetFrom3Reg(r1)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpARM64CSINC, ssa.OpARM64CSINV, ssa.OpARM64CSNEG:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG // assembler encodes conditional bits in Reg
+ p.From.Reg = condBits[ssa.Op(v.AuxInt)]
+ p.Reg = v.Args[0].Reg()
+ p.SetFrom3Reg(v.Args[1].Reg())
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpARM64CSETM:
+ p := s.Prog(arm64.ACSETM)
+ p.From.Type = obj.TYPE_REG // assembler encodes conditional bits in Reg
+ p.From.Reg = condBits[ssa.Op(v.AuxInt)]
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpARM64DUFFZERO:
+ // runtime.duffzero expects start address in R20
+ p := s.Prog(obj.ADUFFZERO)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = ir.Syms.Duffzero
+ p.To.Offset = v.AuxInt
+ case ssa.OpARM64LoweredZero:
+ // STP.P (ZR,ZR), 16(R16)
+ // CMP Rarg1, R16
+ // BLE -2(PC)
+ // arg1 is the address of the last 16-byte unit to zero
+ p := s.Prog(arm64.ASTP)
+ p.Scond = arm64.C_XPOST
+ p.From.Type = obj.TYPE_REGREG
+ p.From.Reg = arm64.REGZERO
+ p.From.Offset = int64(arm64.REGZERO)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = arm64.REG_R16
+ p.To.Offset = 16
+ p2 := s.Prog(arm64.ACMP)
+ p2.From.Type = obj.TYPE_REG
+ p2.From.Reg = v.Args[1].Reg()
+ p2.Reg = arm64.REG_R16
+ p3 := s.Prog(arm64.ABLE)
+ p3.To.Type = obj.TYPE_BRANCH
+ p3.To.SetTarget(p)
+ case ssa.OpARM64DUFFCOPY:
+ p := s.Prog(obj.ADUFFCOPY)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = ir.Syms.Duffcopy
+ p.To.Offset = v.AuxInt
+ case ssa.OpARM64LoweredMove:
+ // MOVD.P 8(R16), Rtmp
+ // MOVD.P Rtmp, 8(R17)
+ // CMP Rarg2, R16
+ // BLE -3(PC)
+ // arg2 is the address of the last element of src
+ p := s.Prog(arm64.AMOVD)
+ p.Scond = arm64.C_XPOST
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = arm64.REG_R16
+ p.From.Offset = 8
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = arm64.REGTMP
+ p2 := s.Prog(arm64.AMOVD)
+ p2.Scond = arm64.C_XPOST
+ p2.From.Type = obj.TYPE_REG
+ p2.From.Reg = arm64.REGTMP
+ p2.To.Type = obj.TYPE_MEM
+ p2.To.Reg = arm64.REG_R17
+ p2.To.Offset = 8
+ p3 := s.Prog(arm64.ACMP)
+ p3.From.Type = obj.TYPE_REG
+ p3.From.Reg = v.Args[2].Reg()
+ p3.Reg = arm64.REG_R16
+ p4 := s.Prog(arm64.ABLE)
+ p4.To.Type = obj.TYPE_BRANCH
+ p4.To.SetTarget(p)
+ case ssa.OpARM64CALLstatic, ssa.OpARM64CALLclosure, ssa.OpARM64CALLinter:
+ s.Call(v)
+ case ssa.OpARM64CALLtail:
+ s.TailCall(v)
+ case ssa.OpARM64LoweredWB:
+ p := s.Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = v.Aux.(*obj.LSym)
+ case ssa.OpARM64LoweredPanicBoundsA, ssa.OpARM64LoweredPanicBoundsB, ssa.OpARM64LoweredPanicBoundsC:
+ p := s.Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt]
+ s.UseArgs(16) // space used in callee args area by assembly stubs
+ case ssa.OpARM64LoweredNilCheck:
+ // Issue a load which will fault if arg is nil.
+ p := s.Prog(arm64.AMOVB)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = arm64.REGTMP
+ if logopt.Enabled() {
+ logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name)
+ }
+ if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Line==1 in generated wrappers
+ base.WarnfAt(v.Pos, "generated nil check")
+ }
+ case ssa.OpARM64Equal,
+ ssa.OpARM64NotEqual,
+ ssa.OpARM64LessThan,
+ ssa.OpARM64LessEqual,
+ ssa.OpARM64GreaterThan,
+ ssa.OpARM64GreaterEqual,
+ ssa.OpARM64LessThanU,
+ ssa.OpARM64LessEqualU,
+ ssa.OpARM64GreaterThanU,
+ ssa.OpARM64GreaterEqualU,
+ ssa.OpARM64LessThanF,
+ ssa.OpARM64LessEqualF,
+ ssa.OpARM64GreaterThanF,
+ ssa.OpARM64GreaterEqualF,
+ ssa.OpARM64NotLessThanF,
+ ssa.OpARM64NotLessEqualF,
+ ssa.OpARM64NotGreaterThanF,
+ ssa.OpARM64NotGreaterEqualF:
+ // generate boolean values using CSET
+ p := s.Prog(arm64.ACSET)
+ p.From.Type = obj.TYPE_REG // assembler encodes conditional bits in Reg
+ p.From.Reg = condBits[v.Op]
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpARM64PRFM:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_CONST
+ p.To.Offset = v.AuxInt
+ case ssa.OpARM64LoweredGetClosurePtr:
+ // Closure pointer is R26 (arm64.REGCTXT).
+ ssagen.CheckLoweredGetClosurePtr(v)
+ case ssa.OpARM64LoweredGetCallerSP:
+ // caller's SP is FixedFrameSize below the address of the first arg
+ p := s.Prog(arm64.AMOVD)
+ p.From.Type = obj.TYPE_ADDR
+ p.From.Offset = -base.Ctxt.FixedFrameSize()
+ p.From.Name = obj.NAME_PARAM
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpARM64LoweredGetCallerPC:
+ p := s.Prog(obj.AGETCALLERPC)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpARM64DMB:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ case ssa.OpARM64FlagConstant:
+ v.Fatalf("FlagConstant op should never make it to codegen %v", v.LongString())
+ case ssa.OpARM64InvertFlags:
+ v.Fatalf("InvertFlags should never make it to codegen %v", v.LongString())
+ case ssa.OpClobber:
+ // MOVW $0xdeaddead, REGTMP
+ // MOVW REGTMP, (slot)
+ // MOVW REGTMP, 4(slot)
+ p := s.Prog(arm64.AMOVW)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = 0xdeaddead
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = arm64.REGTMP
+ p = s.Prog(arm64.AMOVW)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = arm64.REGTMP
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = arm64.REGSP
+ ssagen.AddAux(&p.To, v)
+ p = s.Prog(arm64.AMOVW)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = arm64.REGTMP
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = arm64.REGSP
+ ssagen.AddAux2(&p.To, v, v.AuxInt+4)
+ case ssa.OpClobberReg:
+ x := uint64(0xdeaddeaddeaddead)
+ p := s.Prog(arm64.AMOVD)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = int64(x)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ default:
+ v.Fatalf("genValue not implemented: %s", v.LongString())
+ }
+}
+
+var condBits = map[ssa.Op]int16{
+ ssa.OpARM64Equal: arm64.COND_EQ,
+ ssa.OpARM64NotEqual: arm64.COND_NE,
+ ssa.OpARM64LessThan: arm64.COND_LT,
+ ssa.OpARM64LessThanU: arm64.COND_LO,
+ ssa.OpARM64LessEqual: arm64.COND_LE,
+ ssa.OpARM64LessEqualU: arm64.COND_LS,
+ ssa.OpARM64GreaterThan: arm64.COND_GT,
+ ssa.OpARM64GreaterThanU: arm64.COND_HI,
+ ssa.OpARM64GreaterEqual: arm64.COND_GE,
+ ssa.OpARM64GreaterEqualU: arm64.COND_HS,
+ ssa.OpARM64LessThanF: arm64.COND_MI, // Less than
+ ssa.OpARM64LessEqualF: arm64.COND_LS, // Less than or equal to
+ ssa.OpARM64GreaterThanF: arm64.COND_GT, // Greater than
+ ssa.OpARM64GreaterEqualF: arm64.COND_GE, // Greater than or equal to
+
+ // The following condition codes have unordered to handle comparisons related to NaN.
+ ssa.OpARM64NotLessThanF: arm64.COND_PL, // Greater than, equal to, or unordered
+ ssa.OpARM64NotLessEqualF: arm64.COND_HI, // Greater than or unordered
+ ssa.OpARM64NotGreaterThanF: arm64.COND_LE, // Less than, equal to or unordered
+ ssa.OpARM64NotGreaterEqualF: arm64.COND_LT, // Less than or unordered
+}
+
+var blockJump = map[ssa.BlockKind]struct {
+ asm, invasm obj.As
+}{
+ ssa.BlockARM64EQ: {arm64.ABEQ, arm64.ABNE},
+ ssa.BlockARM64NE: {arm64.ABNE, arm64.ABEQ},
+ ssa.BlockARM64LT: {arm64.ABLT, arm64.ABGE},
+ ssa.BlockARM64GE: {arm64.ABGE, arm64.ABLT},
+ ssa.BlockARM64LE: {arm64.ABLE, arm64.ABGT},
+ ssa.BlockARM64GT: {arm64.ABGT, arm64.ABLE},
+ ssa.BlockARM64ULT: {arm64.ABLO, arm64.ABHS},
+ ssa.BlockARM64UGE: {arm64.ABHS, arm64.ABLO},
+ ssa.BlockARM64UGT: {arm64.ABHI, arm64.ABLS},
+ ssa.BlockARM64ULE: {arm64.ABLS, arm64.ABHI},
+ ssa.BlockARM64Z: {arm64.ACBZ, arm64.ACBNZ},
+ ssa.BlockARM64NZ: {arm64.ACBNZ, arm64.ACBZ},
+ ssa.BlockARM64ZW: {arm64.ACBZW, arm64.ACBNZW},
+ ssa.BlockARM64NZW: {arm64.ACBNZW, arm64.ACBZW},
+ ssa.BlockARM64TBZ: {arm64.ATBZ, arm64.ATBNZ},
+ ssa.BlockARM64TBNZ: {arm64.ATBNZ, arm64.ATBZ},
+ ssa.BlockARM64FLT: {arm64.ABMI, arm64.ABPL},
+ ssa.BlockARM64FGE: {arm64.ABGE, arm64.ABLT},
+ ssa.BlockARM64FLE: {arm64.ABLS, arm64.ABHI},
+ ssa.BlockARM64FGT: {arm64.ABGT, arm64.ABLE},
+ ssa.BlockARM64LTnoov: {arm64.ABMI, arm64.ABPL},
+ ssa.BlockARM64GEnoov: {arm64.ABPL, arm64.ABMI},
+}
+
+// To model a 'LEnoov' ('<=' without overflow checking) branching
+var leJumps = [2][2]ssagen.IndexJump{
+ {{Jump: arm64.ABEQ, Index: 0}, {Jump: arm64.ABPL, Index: 1}}, // next == b.Succs[0]
+ {{Jump: arm64.ABMI, Index: 0}, {Jump: arm64.ABEQ, Index: 0}}, // next == b.Succs[1]
+}
+
+// To model a 'GTnoov' ('>' without overflow checking) branching
+var gtJumps = [2][2]ssagen.IndexJump{
+ {{Jump: arm64.ABMI, Index: 1}, {Jump: arm64.ABEQ, Index: 1}}, // next == b.Succs[0]
+ {{Jump: arm64.ABEQ, Index: 1}, {Jump: arm64.ABPL, Index: 0}}, // next == b.Succs[1]
+}
+
+func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
+ switch b.Kind {
+ case ssa.BlockPlain:
+ if b.Succs[0].Block() != next {
+ p := s.Prog(obj.AJMP)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
+ }
+
+ case ssa.BlockDefer:
+ // defer returns in R0:
+ // 0 if we should continue executing
+ // 1 if we should jump to deferreturn call
+ p := s.Prog(arm64.ACMP)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = 0
+ p.Reg = arm64.REG_R0
+ p = s.Prog(arm64.ABNE)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[1].Block()})
+ if b.Succs[0].Block() != next {
+ p := s.Prog(obj.AJMP)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
+ }
+
+ case ssa.BlockExit, ssa.BlockRetJmp:
+
+ case ssa.BlockRet:
+ s.Prog(obj.ARET)
+
+ case ssa.BlockARM64EQ, ssa.BlockARM64NE,
+ ssa.BlockARM64LT, ssa.BlockARM64GE,
+ ssa.BlockARM64LE, ssa.BlockARM64GT,
+ ssa.BlockARM64ULT, ssa.BlockARM64UGT,
+ ssa.BlockARM64ULE, ssa.BlockARM64UGE,
+ ssa.BlockARM64Z, ssa.BlockARM64NZ,
+ ssa.BlockARM64ZW, ssa.BlockARM64NZW,
+ ssa.BlockARM64FLT, ssa.BlockARM64FGE,
+ ssa.BlockARM64FLE, ssa.BlockARM64FGT,
+ ssa.BlockARM64LTnoov, ssa.BlockARM64GEnoov:
+ jmp := blockJump[b.Kind]
+ var p *obj.Prog
+ switch next {
+ case b.Succs[0].Block():
+ p = s.Br(jmp.invasm, b.Succs[1].Block())
+ case b.Succs[1].Block():
+ p = s.Br(jmp.asm, b.Succs[0].Block())
+ default:
+ if b.Likely != ssa.BranchUnlikely {
+ p = s.Br(jmp.asm, b.Succs[0].Block())
+ s.Br(obj.AJMP, b.Succs[1].Block())
+ } else {
+ p = s.Br(jmp.invasm, b.Succs[1].Block())
+ s.Br(obj.AJMP, b.Succs[0].Block())
+ }
+ }
+ if !b.Controls[0].Type.IsFlags() {
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = b.Controls[0].Reg()
+ }
+ case ssa.BlockARM64TBZ, ssa.BlockARM64TBNZ:
+ jmp := blockJump[b.Kind]
+ var p *obj.Prog
+ switch next {
+ case b.Succs[0].Block():
+ p = s.Br(jmp.invasm, b.Succs[1].Block())
+ case b.Succs[1].Block():
+ p = s.Br(jmp.asm, b.Succs[0].Block())
+ default:
+ if b.Likely != ssa.BranchUnlikely {
+ p = s.Br(jmp.asm, b.Succs[0].Block())
+ s.Br(obj.AJMP, b.Succs[1].Block())
+ } else {
+ p = s.Br(jmp.invasm, b.Succs[1].Block())
+ s.Br(obj.AJMP, b.Succs[0].Block())
+ }
+ }
+ p.From.Offset = b.AuxInt
+ p.From.Type = obj.TYPE_CONST
+ p.Reg = b.Controls[0].Reg()
+
+ case ssa.BlockARM64LEnoov:
+ s.CombJump(b, next, &leJumps)
+ case ssa.BlockARM64GTnoov:
+ s.CombJump(b, next, &gtJumps)
+ default:
+ b.Fatalf("branch not implemented: %s", b.LongString())
+ }
+}
+
+func loadRegResult(s *ssagen.State, f *ssa.Func, t *types.Type, reg int16, n *ir.Name, off int64) *obj.Prog {
+ p := s.Prog(loadByType(t))
+ p.From.Type = obj.TYPE_MEM
+ p.From.Name = obj.NAME_AUTO
+ p.From.Sym = n.Linksym()
+ p.From.Offset = n.FrameOffset() + off
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = reg
+ return p
+}
+
+func spillArgReg(pp *objw.Progs, p *obj.Prog, f *ssa.Func, t *types.Type, reg int16, n *ir.Name, off int64) *obj.Prog {
+ p = pp.Append(p, storeByType(t), obj.TYPE_REG, reg, 0, obj.TYPE_MEM, 0, n.FrameOffset()+off)
+ p.To.Name = obj.NAME_PARAM
+ p.To.Sym = n.Linksym()
+ p.Pos = p.Pos.WithNotStmt()
+ return p
+}
diff --git a/src/cmd/compile/internal/base/base.go b/src/cmd/compile/internal/base/base.go
new file mode 100644
index 0000000..39ce8e6
--- /dev/null
+++ b/src/cmd/compile/internal/base/base.go
@@ -0,0 +1,77 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package base
+
+import (
+ "os"
+)
+
+var atExitFuncs []func()
+
+func AtExit(f func()) {
+ atExitFuncs = append(atExitFuncs, f)
+}
+
+func Exit(code int) {
+ for i := len(atExitFuncs) - 1; i >= 0; i-- {
+ f := atExitFuncs[i]
+ atExitFuncs = atExitFuncs[:i]
+ f()
+ }
+ os.Exit(code)
+}
+
+// To enable tracing support (-t flag), set EnableTrace to true.
+const EnableTrace = false
+
+func Compiling(pkgs []string) bool {
+ if Ctxt.Pkgpath != "" {
+ for _, p := range pkgs {
+ if Ctxt.Pkgpath == p {
+ return true
+ }
+ }
+ }
+
+ return false
+}
+
+// The racewalk pass is currently handled in three parts.
+//
+// First, for flag_race, it inserts calls to racefuncenter and
+// racefuncexit at the start and end (respectively) of each
+// function. This is handled below.
+//
+// Second, during buildssa, it inserts appropriate instrumentation
+// calls immediately before each memory load or store. This is handled
+// by the (*state).instrument method in ssa.go, so here we just set
+// the Func.InstrumentBody flag as needed. For background on why this
+// is done during SSA construction rather than a separate SSA pass,
+// see issue #19054.
+//
+// Third we remove calls to racefuncenter and racefuncexit, for leaf
+// functions without instrumented operations. This is done as part of
+// ssa opt pass via special rule.
+
+// TODO(dvyukov): do not instrument initialization as writes:
+// a := make([]int, 10)
+
+// Do not instrument the following packages at all,
+// at best instrumentation would cause infinite recursion.
+var NoInstrumentPkgs = []string{
+ "runtime/internal/atomic",
+ "runtime/internal/math",
+ "runtime/internal/sys",
+ "runtime/internal/syscall",
+ "runtime",
+ "runtime/race",
+ "runtime/msan",
+ "runtime/asan",
+ "internal/cpu",
+}
+
+// Don't insert racefuncenter/racefuncexit into the following packages.
+// Memory accesses in the packages are either uninteresting or will cause false positives.
+var NoRacePkgs = []string{"sync", "sync/atomic"}
diff --git a/src/cmd/compile/internal/base/bootstrap_false.go b/src/cmd/compile/internal/base/bootstrap_false.go
new file mode 100644
index 0000000..c77fcd7
--- /dev/null
+++ b/src/cmd/compile/internal/base/bootstrap_false.go
@@ -0,0 +1,12 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !compiler_bootstrap
+// +build !compiler_bootstrap
+
+package base
+
+// CompilerBootstrap reports whether the current compiler binary was
+// built with -tags=compiler_bootstrap.
+const CompilerBootstrap = false
diff --git a/src/cmd/compile/internal/base/bootstrap_true.go b/src/cmd/compile/internal/base/bootstrap_true.go
new file mode 100644
index 0000000..1eb58b2
--- /dev/null
+++ b/src/cmd/compile/internal/base/bootstrap_true.go
@@ -0,0 +1,12 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build compiler_bootstrap
+// +build compiler_bootstrap
+
+package base
+
+// CompilerBootstrap reports whether the current compiler binary was
+// built with -tags=compiler_bootstrap.
+const CompilerBootstrap = true
diff --git a/src/cmd/compile/internal/base/debug.go b/src/cmd/compile/internal/base/debug.go
new file mode 100644
index 0000000..b105e46
--- /dev/null
+++ b/src/cmd/compile/internal/base/debug.go
@@ -0,0 +1,53 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Debug arguments, set by -d flag.
+
+package base
+
+// Debug holds the parsed debugging configuration values.
+var Debug DebugFlags
+
+// DebugFlags defines the debugging configuration values (see var Debug).
+// Each struct field is a different value, named for the lower-case of the field name.
+// Each field must be an int or string and must have a `help` struct tag.
+//
+// The -d option takes a comma-separated list of settings.
+// Each setting is name=value; for ints, name is short for name=1.
+type DebugFlags struct {
+ Append int `help:"print information about append compilation"`
+ Checkptr int `help:"instrument unsafe pointer conversions\n0: instrumentation disabled\n1: conversions involving unsafe.Pointer are instrumented\n2: conversions to unsafe.Pointer force heap allocation"`
+ Closure int `help:"print information about closure compilation"`
+ DclStack int `help:"run internal dclstack check"`
+ Defer int `help:"print information about defer compilation"`
+ DisableNil int `help:"disable nil checks"`
+ DumpPtrs int `help:"show Node pointers values in dump output"`
+ DwarfInl int `help:"print information about DWARF inlined function creation"`
+ Export int `help:"print export data"`
+ GCProg int `help:"print dump of GC programs"`
+ InlFuncsWithClosures int `help:"allow functions with closures to be inlined"`
+ Libfuzzer int `help:"enable coverage instrumentation for libfuzzer"`
+ LocationLists int `help:"print information about DWARF location list creation"`
+ Nil int `help:"print information about nil checks"`
+ NoOpenDefer int `help:"disable open-coded defers"`
+ PCTab string `help:"print named pc-value table\nOne of: pctospadj, pctofile, pctoline, pctoinline, pctopcdata"`
+ Panic int `help:"show all compiler panics"`
+ Slice int `help:"print information about slice compilation"`
+ SoftFloat int `help:"force compiler to emit soft-float code"`
+ SyncFrames int `help:"how many writer stack frames to include at sync points in unified export data"`
+ TypeAssert int `help:"print information about type assertion inlining"`
+ TypecheckInl int `help:"eager typechecking of inline function bodies"`
+ Unified int `help:"enable unified IR construction"`
+ UnifiedQuirks int `help:"enable unified IR construction's quirks mode"`
+ WB int `help:"print information about write barriers"`
+ ABIWrap int `help:"print information about ABI wrapper generation"`
+ MayMoreStack string `help:"call named function before all stack growth checks"`
+
+ Any bool // set when any of the debug flags have been set
+}
+
+// DebugSSA is called to set a -d ssa/... option.
+// If nil, those options are reported as invalid options.
+// If DebugSSA returns a non-empty string, that text is reported as a compiler error.
+var DebugSSA func(phase, flag string, val int, valString string) string
diff --git a/src/cmd/compile/internal/base/flag.go b/src/cmd/compile/internal/base/flag.go
new file mode 100644
index 0000000..d78f93b
--- /dev/null
+++ b/src/cmd/compile/internal/base/flag.go
@@ -0,0 +1,489 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package base
+
+import (
+ "encoding/json"
+ "flag"
+ "fmt"
+ "internal/buildcfg"
+ "io/ioutil"
+ "log"
+ "os"
+ "reflect"
+ "runtime"
+ "strings"
+
+ "cmd/internal/objabi"
+ "cmd/internal/sys"
+)
+
+func usage() {
+ fmt.Fprintf(os.Stderr, "usage: compile [options] file.go...\n")
+ objabi.Flagprint(os.Stderr)
+ Exit(2)
+}
+
+// Flag holds the parsed command-line flags.
+// See ParseFlag for non-zero defaults.
+var Flag CmdFlags
+
+// A CountFlag is a counting integer flag.
+// It accepts -name=value to set the value directly,
+// but it also accepts -name with no =value to increment the count.
+type CountFlag int
+
+// CmdFlags defines the command-line flags (see var Flag).
+// Each struct field is a different flag, by default named for the lower-case of the field name.
+// If the flag name is a single letter, the default flag name is left upper-case.
+// If the flag name is "Lower" followed by a single letter, the default flag name is the lower-case of the last letter.
+//
+// If this default flag name can't be made right, the `flag` struct tag can be used to replace it,
+// but this should be done only in exceptional circumstances: it helps everyone if the flag name
+// is obvious from the field name when the flag is used elsewhere in the compiler sources.
+// The `flag:"-"` struct tag makes a field invisible to the flag logic and should also be used sparingly.
+//
+// Each field must have a `help` struct tag giving the flag help message.
+//
+// The allowed field types are bool, int, string, pointers to those (for values stored elsewhere),
+// CountFlag (for a counting flag), and func(string) (for a flag that uses special code for parsing).
+type CmdFlags struct {
+ // Single letters
+ B CountFlag "help:\"disable bounds checking\""
+ C CountFlag "help:\"disable printing of columns in error messages\""
+ D string "help:\"set relative `path` for local imports\""
+ E CountFlag "help:\"debug symbol export\""
+ G CountFlag "help:\"accept generic code\""
+ I func(string) "help:\"add `directory` to import search path\""
+ K CountFlag "help:\"debug missing line numbers\""
+ L CountFlag "help:\"show full file names in error messages\""
+ N CountFlag "help:\"disable optimizations\""
+ S CountFlag "help:\"print assembly listing\""
+ // V is added by objabi.AddVersionFlag
+ W CountFlag "help:\"debug parse tree after type checking\""
+
+ LowerC int "help:\"concurrency during compilation (1 means no concurrency)\""
+ LowerD flag.Value "help:\"enable debugging settings; try -d help\""
+ LowerE CountFlag "help:\"no limit on number of errors reported\""
+ LowerH CountFlag "help:\"halt on error\""
+ LowerJ CountFlag "help:\"debug runtime-initialized variables\""
+ LowerL CountFlag "help:\"disable inlining\""
+ LowerM CountFlag "help:\"print optimization decisions\""
+ LowerO string "help:\"write output to `file`\""
+ LowerP *string "help:\"set expected package import `path`\"" // &Ctxt.Pkgpath, set below
+ LowerR CountFlag "help:\"debug generated wrappers\""
+ LowerT bool "help:\"enable tracing for debugging the compiler\""
+ LowerW CountFlag "help:\"debug type checking\""
+ LowerV *bool "help:\"increase debug verbosity\""
+
+ // Special characters
+ Percent int "flag:\"%\" help:\"debug non-static initializers\""
+ CompilingRuntime bool "flag:\"+\" help:\"compiling runtime\""
+
+ // Longer names
+ AsmHdr string "help:\"write assembly header to `file`\""
+ ASan bool "help:\"build code compatible with C/C++ address sanitizer\""
+ Bench string "help:\"append benchmark times to `file`\""
+ BlockProfile string "help:\"write block profile to `file`\""
+ BuildID string "help:\"record `id` as the build id in the export metadata\""
+ CPUProfile string "help:\"write cpu profile to `file`\""
+ Complete bool "help:\"compiling complete package (no C or assembly)\""
+ ClobberDead bool "help:\"clobber dead stack slots (for debugging)\""
+ ClobberDeadReg bool "help:\"clobber dead registers (for debugging)\""
+ Dwarf bool "help:\"generate DWARF symbols\""
+ DwarfBASEntries *bool "help:\"use base address selection entries in DWARF\"" // &Ctxt.UseBASEntries, set below
+ DwarfLocationLists *bool "help:\"add location lists to DWARF in optimized mode\"" // &Ctxt.Flag_locationlists, set below
+ Dynlink *bool "help:\"support references to Go symbols defined in other shared libraries\"" // &Ctxt.Flag_dynlink, set below
+ EmbedCfg func(string) "help:\"read go:embed configuration from `file`\""
+ GenDwarfInl int "help:\"generate DWARF inline info records\"" // 0=disabled, 1=funcs, 2=funcs+formals/locals
+ GoVersion string "help:\"required version of the runtime\""
+ ImportCfg func(string) "help:\"read import configuration from `file`\""
+ ImportMap func(string) "help:\"add `definition` of the form source=actual to import map\""
+ InstallSuffix string "help:\"set pkg directory `suffix`\""
+ JSON string "help:\"version,file for JSON compiler/optimizer detail output\""
+ Lang string "help:\"Go language version source code expects\""
+ LinkObj string "help:\"write linker-specific object to `file`\""
+ LinkShared *bool "help:\"generate code that will be linked against Go shared libraries\"" // &Ctxt.Flag_linkshared, set below
+ Live CountFlag "help:\"debug liveness analysis\""
+ MSan bool "help:\"build code compatible with C/C++ memory sanitizer\""
+ MemProfile string "help:\"write memory profile to `file`\""
+ MemProfileRate int "help:\"set runtime.MemProfileRate to `rate`\""
+ MutexProfile string "help:\"write mutex profile to `file`\""
+ NoLocalImports bool "help:\"reject local (relative) imports\""
+ Pack bool "help:\"write to file.a instead of file.o\""
+ Race bool "help:\"enable race detector\""
+ Shared *bool "help:\"generate code that can be linked into a shared library\"" // &Ctxt.Flag_shared, set below
+ SmallFrames bool "help:\"reduce the size limit for stack allocated objects\"" // small stacks, to diagnose GC latency; see golang.org/issue/27732
+ Spectre string "help:\"enable spectre mitigations in `list` (all, index, ret)\""
+ Std bool "help:\"compiling standard library\""
+ SymABIs string "help:\"read symbol ABIs from `file`\""
+ TraceProfile string "help:\"write an execution trace to `file`\""
+ TrimPath string "help:\"remove `prefix` from recorded source file paths\""
+ WB bool "help:\"enable write barrier\"" // TODO: remove
+
+ // Configuration derived from flags; not a flag itself.
+ Cfg struct {
+ Embed struct { // set by -embedcfg
+ Patterns map[string][]string
+ Files map[string]string
+ }
+ ImportDirs []string // appended to by -I
+ ImportMap map[string]string // set by -importmap OR -importcfg
+ PackageFile map[string]string // set by -importcfg; nil means not in use
+ SpectreIndex bool // set by -spectre=index or -spectre=all
+ // Whether we are adding any sort of code instrumentation, such as
+ // when the race detector is enabled.
+ Instrumenting bool
+ }
+}
+
+// ParseFlags parses the command-line flags into Flag.
+func ParseFlags() {
+ Flag.G = 3
+ Flag.I = addImportDir
+
+ Flag.LowerC = 1
+ Flag.LowerD = objabi.NewDebugFlag(&Debug, DebugSSA)
+ Flag.LowerP = &Ctxt.Pkgpath
+ Flag.LowerV = &Ctxt.Debugvlog
+
+ Flag.Dwarf = buildcfg.GOARCH != "wasm"
+ Flag.DwarfBASEntries = &Ctxt.UseBASEntries
+ Flag.DwarfLocationLists = &Ctxt.Flag_locationlists
+ *Flag.DwarfLocationLists = true
+ Flag.Dynlink = &Ctxt.Flag_dynlink
+ Flag.EmbedCfg = readEmbedCfg
+ Flag.GenDwarfInl = 2
+ Flag.ImportCfg = readImportCfg
+ Flag.ImportMap = addImportMap
+ Flag.LinkShared = &Ctxt.Flag_linkshared
+ Flag.Shared = &Ctxt.Flag_shared
+ Flag.WB = true
+
+ Debug.InlFuncsWithClosures = 1
+ if buildcfg.Experiment.Unified {
+ Debug.Unified = 1
+ }
+
+ Debug.Checkptr = -1 // so we can tell whether it is set explicitly
+
+ Flag.Cfg.ImportMap = make(map[string]string)
+
+ objabi.AddVersionFlag() // -V
+ registerFlags()
+ objabi.Flagparse(usage)
+
+ if Flag.MSan && !sys.MSanSupported(buildcfg.GOOS, buildcfg.GOARCH) {
+ log.Fatalf("%s/%s does not support -msan", buildcfg.GOOS, buildcfg.GOARCH)
+ }
+ if Flag.ASan && !sys.ASanSupported(buildcfg.GOOS, buildcfg.GOARCH) {
+ log.Fatalf("%s/%s does not support -asan", buildcfg.GOOS, buildcfg.GOARCH)
+ }
+ if Flag.Race && !sys.RaceDetectorSupported(buildcfg.GOOS, buildcfg.GOARCH) {
+ log.Fatalf("%s/%s does not support -race", buildcfg.GOOS, buildcfg.GOARCH)
+ }
+ if (*Flag.Shared || *Flag.Dynlink || *Flag.LinkShared) && !Ctxt.Arch.InFamily(sys.AMD64, sys.ARM, sys.ARM64, sys.I386, sys.PPC64, sys.RISCV64, sys.S390X) {
+ log.Fatalf("%s/%s does not support -shared", buildcfg.GOOS, buildcfg.GOARCH)
+ }
+ parseSpectre(Flag.Spectre) // left as string for RecordFlags
+
+ Ctxt.Flag_shared = Ctxt.Flag_dynlink || Ctxt.Flag_shared
+ Ctxt.Flag_optimize = Flag.N == 0
+ Ctxt.Debugasm = int(Flag.S)
+ Ctxt.Flag_maymorestack = Debug.MayMoreStack
+
+ if flag.NArg() < 1 {
+ usage()
+ }
+
+ if Flag.GoVersion != "" && Flag.GoVersion != runtime.Version() {
+ fmt.Printf("compile: version %q does not match go tool version %q\n", runtime.Version(), Flag.GoVersion)
+ Exit(2)
+ }
+
+ if Flag.LowerO == "" {
+ p := flag.Arg(0)
+ if i := strings.LastIndex(p, "/"); i >= 0 {
+ p = p[i+1:]
+ }
+ if runtime.GOOS == "windows" {
+ if i := strings.LastIndex(p, `\`); i >= 0 {
+ p = p[i+1:]
+ }
+ }
+ if i := strings.LastIndex(p, "."); i >= 0 {
+ p = p[:i]
+ }
+ suffix := ".o"
+ if Flag.Pack {
+ suffix = ".a"
+ }
+ Flag.LowerO = p + suffix
+ }
+ switch {
+ case Flag.Race && Flag.MSan:
+ log.Fatal("cannot use both -race and -msan")
+ case Flag.Race && Flag.ASan:
+ log.Fatal("cannot use both -race and -asan")
+ case Flag.MSan && Flag.ASan:
+ log.Fatal("cannot use both -msan and -asan")
+ }
+ if Flag.Race || Flag.MSan || Flag.ASan {
+ // -race, -msan and -asan imply -d=checkptr for now.
+ if Debug.Checkptr == -1 { // if not set explicitly
+ Debug.Checkptr = 1
+ }
+ }
+
+ if Flag.CompilingRuntime && Flag.N != 0 {
+ log.Fatal("cannot disable optimizations while compiling runtime")
+ }
+ if Flag.LowerC < 1 {
+ log.Fatalf("-c must be at least 1, got %d", Flag.LowerC)
+ }
+ if Flag.LowerC > 1 && !concurrentBackendAllowed() {
+ log.Fatalf("cannot use concurrent backend compilation with provided flags; invoked as %v", os.Args)
+ }
+
+ if Flag.CompilingRuntime {
+ // Runtime can't use -d=checkptr, at least not yet.
+ Debug.Checkptr = 0
+
+ // Fuzzing the runtime isn't interesting either.
+ Debug.Libfuzzer = 0
+ }
+
+ if Debug.Checkptr == -1 { // if not set explicitly
+ Debug.Checkptr = 0
+ }
+
+ // set via a -d flag
+ Ctxt.Debugpcln = Debug.PCTab
+}
+
+// registerFlags adds flag registrations for all the fields in Flag.
+// See the comment on type CmdFlags for the rules.
+func registerFlags() {
+ var (
+ boolType = reflect.TypeOf(bool(false))
+ intType = reflect.TypeOf(int(0))
+ stringType = reflect.TypeOf(string(""))
+ ptrBoolType = reflect.TypeOf(new(bool))
+ ptrIntType = reflect.TypeOf(new(int))
+ ptrStringType = reflect.TypeOf(new(string))
+ countType = reflect.TypeOf(CountFlag(0))
+ funcType = reflect.TypeOf((func(string))(nil))
+ )
+
+ v := reflect.ValueOf(&Flag).Elem()
+ t := v.Type()
+ for i := 0; i < t.NumField(); i++ {
+ f := t.Field(i)
+ if f.Name == "Cfg" {
+ continue
+ }
+
+ var name string
+ if len(f.Name) == 1 {
+ name = f.Name
+ } else if len(f.Name) == 6 && f.Name[:5] == "Lower" && 'A' <= f.Name[5] && f.Name[5] <= 'Z' {
+ name = string(rune(f.Name[5] + 'a' - 'A'))
+ } else {
+ name = strings.ToLower(f.Name)
+ }
+ if tag := f.Tag.Get("flag"); tag != "" {
+ name = tag
+ }
+
+ help := f.Tag.Get("help")
+ if help == "" {
+ panic(fmt.Sprintf("base.Flag.%s is missing help text", f.Name))
+ }
+
+ if k := f.Type.Kind(); (k == reflect.Ptr || k == reflect.Func) && v.Field(i).IsNil() {
+ panic(fmt.Sprintf("base.Flag.%s is uninitialized %v", f.Name, f.Type))
+ }
+
+ switch f.Type {
+ case boolType:
+ p := v.Field(i).Addr().Interface().(*bool)
+ flag.BoolVar(p, name, *p, help)
+ case intType:
+ p := v.Field(i).Addr().Interface().(*int)
+ flag.IntVar(p, name, *p, help)
+ case stringType:
+ p := v.Field(i).Addr().Interface().(*string)
+ flag.StringVar(p, name, *p, help)
+ case ptrBoolType:
+ p := v.Field(i).Interface().(*bool)
+ flag.BoolVar(p, name, *p, help)
+ case ptrIntType:
+ p := v.Field(i).Interface().(*int)
+ flag.IntVar(p, name, *p, help)
+ case ptrStringType:
+ p := v.Field(i).Interface().(*string)
+ flag.StringVar(p, name, *p, help)
+ case countType:
+ p := (*int)(v.Field(i).Addr().Interface().(*CountFlag))
+ objabi.Flagcount(name, help, p)
+ case funcType:
+ f := v.Field(i).Interface().(func(string))
+ objabi.Flagfn1(name, help, f)
+ default:
+ if val, ok := v.Field(i).Interface().(flag.Value); ok {
+ flag.Var(val, name, help)
+ } else {
+ panic(fmt.Sprintf("base.Flag.%s has unexpected type %s", f.Name, f.Type))
+ }
+ }
+ }
+}
+
+// concurrentFlagOk reports whether the current compiler flags
+// are compatible with concurrent compilation.
+func concurrentFlagOk() bool {
+ // TODO(rsc): Many of these are fine. Remove them.
+ return Flag.Percent == 0 &&
+ Flag.E == 0 &&
+ Flag.K == 0 &&
+ Flag.L == 0 &&
+ Flag.LowerH == 0 &&
+ Flag.LowerJ == 0 &&
+ Flag.LowerM == 0 &&
+ Flag.LowerR == 0
+}
+
+func concurrentBackendAllowed() bool {
+ if !concurrentFlagOk() {
+ return false
+ }
+
+ // Debug.S by itself is ok, because all printing occurs
+ // while writing the object file, and that is non-concurrent.
+ // Adding Debug_vlog, however, causes Debug.S to also print
+ // while flushing the plist, which happens concurrently.
+ if Ctxt.Debugvlog || Debug.Any || Flag.Live > 0 {
+ return false
+ }
+ // TODO: Test and delete this condition.
+ if buildcfg.Experiment.FieldTrack {
+ return false
+ }
+ // TODO: fix races and enable the following flags
+ if Ctxt.Flag_dynlink || Flag.Race {
+ return false
+ }
+ return true
+}
+
+func addImportDir(dir string) {
+ if dir != "" {
+ Flag.Cfg.ImportDirs = append(Flag.Cfg.ImportDirs, dir)
+ }
+}
+
+func addImportMap(s string) {
+ if Flag.Cfg.ImportMap == nil {
+ Flag.Cfg.ImportMap = make(map[string]string)
+ }
+ if strings.Count(s, "=") != 1 {
+ log.Fatal("-importmap argument must be of the form source=actual")
+ }
+ i := strings.Index(s, "=")
+ source, actual := s[:i], s[i+1:]
+ if source == "" || actual == "" {
+ log.Fatal("-importmap argument must be of the form source=actual; source and actual must be non-empty")
+ }
+ Flag.Cfg.ImportMap[source] = actual
+}
+
+func readImportCfg(file string) {
+ if Flag.Cfg.ImportMap == nil {
+ Flag.Cfg.ImportMap = make(map[string]string)
+ }
+ Flag.Cfg.PackageFile = map[string]string{}
+ data, err := ioutil.ReadFile(file)
+ if err != nil {
+ log.Fatalf("-importcfg: %v", err)
+ }
+
+ for lineNum, line := range strings.Split(string(data), "\n") {
+ lineNum++ // 1-based
+ line = strings.TrimSpace(line)
+ if line == "" || strings.HasPrefix(line, "#") {
+ continue
+ }
+
+ var verb, args string
+ if i := strings.Index(line, " "); i < 0 {
+ verb = line
+ } else {
+ verb, args = line[:i], strings.TrimSpace(line[i+1:])
+ }
+ var before, after string
+ if i := strings.Index(args, "="); i >= 0 {
+ before, after = args[:i], args[i+1:]
+ }
+ switch verb {
+ default:
+ log.Fatalf("%s:%d: unknown directive %q", file, lineNum, verb)
+ case "importmap":
+ if before == "" || after == "" {
+ log.Fatalf(`%s:%d: invalid importmap: syntax is "importmap old=new"`, file, lineNum)
+ }
+ Flag.Cfg.ImportMap[before] = after
+ case "packagefile":
+ if before == "" || after == "" {
+ log.Fatalf(`%s:%d: invalid packagefile: syntax is "packagefile path=filename"`, file, lineNum)
+ }
+ Flag.Cfg.PackageFile[before] = after
+ }
+ }
+}
+
+func readEmbedCfg(file string) {
+ data, err := ioutil.ReadFile(file)
+ if err != nil {
+ log.Fatalf("-embedcfg: %v", err)
+ }
+ if err := json.Unmarshal(data, &Flag.Cfg.Embed); err != nil {
+ log.Fatalf("%s: %v", file, err)
+ }
+ if Flag.Cfg.Embed.Patterns == nil {
+ log.Fatalf("%s: invalid embedcfg: missing Patterns", file)
+ }
+ if Flag.Cfg.Embed.Files == nil {
+ log.Fatalf("%s: invalid embedcfg: missing Files", file)
+ }
+}
+
+// parseSpectre parses the spectre configuration from the string s.
+func parseSpectre(s string) {
+ for _, f := range strings.Split(s, ",") {
+ f = strings.TrimSpace(f)
+ switch f {
+ default:
+ log.Fatalf("unknown setting -spectre=%s", f)
+ case "":
+ // nothing
+ case "all":
+ Flag.Cfg.SpectreIndex = true
+ Ctxt.Retpoline = true
+ case "index":
+ Flag.Cfg.SpectreIndex = true
+ case "ret":
+ Ctxt.Retpoline = true
+ }
+ }
+
+ if Flag.Cfg.SpectreIndex {
+ switch buildcfg.GOARCH {
+ case "amd64":
+ // ok
+ default:
+ log.Fatalf("GOARCH=%s does not support -spectre=index", buildcfg.GOARCH)
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/base/link.go b/src/cmd/compile/internal/base/link.go
new file mode 100644
index 0000000..49fe435
--- /dev/null
+++ b/src/cmd/compile/internal/base/link.go
@@ -0,0 +1,36 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package base
+
+import (
+ "cmd/internal/obj"
+)
+
+var Ctxt *obj.Link
+
+// TODO(mdempsky): These should probably be obj.Link methods.
+
+// PkgLinksym returns the linker symbol for name within the given
+// package prefix. For user packages, prefix should be the package
+// path encoded with objabi.PathToPrefix.
+func PkgLinksym(prefix, name string, abi obj.ABI) *obj.LSym {
+ if name == "_" {
+ // TODO(mdempsky): Cleanup callers and Fatalf instead.
+ return linksym(prefix, "_", abi)
+ }
+ return linksym(prefix, prefix+"."+name, abi)
+}
+
+// Linkname returns the linker symbol for the given name as it might
+// appear within a //go:linkname directive.
+func Linkname(name string, abi obj.ABI) *obj.LSym {
+ return linksym("_", name, abi)
+}
+
+// linksym is an internal helper function for implementing the above
+// exported APIs.
+func linksym(pkg, name string, abi obj.ABI) *obj.LSym {
+ return Ctxt.LookupABIInit(name, abi, func(r *obj.LSym) { r.Pkg = pkg })
+}
diff --git a/src/cmd/compile/internal/base/mapfile_mmap.go b/src/cmd/compile/internal/base/mapfile_mmap.go
new file mode 100644
index 0000000..c1616db
--- /dev/null
+++ b/src/cmd/compile/internal/base/mapfile_mmap.go
@@ -0,0 +1,49 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build darwin || dragonfly || freebsd || linux || netbsd || openbsd
+// +build darwin dragonfly freebsd linux netbsd openbsd
+
+package base
+
+import (
+ "os"
+ "reflect"
+ "syscall"
+ "unsafe"
+)
+
+// TODO(mdempsky): Is there a higher-level abstraction that still
+// works well for iimport?
+
+// mapFile returns length bytes from the file starting at the
+// specified offset as a string.
+func MapFile(f *os.File, offset, length int64) (string, error) {
+ // POSIX mmap: "The implementation may require that off is a
+ // multiple of the page size."
+ x := offset & int64(os.Getpagesize()-1)
+ offset -= x
+ length += x
+
+ buf, err := syscall.Mmap(int(f.Fd()), offset, int(length), syscall.PROT_READ, syscall.MAP_SHARED)
+ keepAlive(f)
+ if err != nil {
+ return "", err
+ }
+
+ buf = buf[x:]
+ pSlice := (*reflect.SliceHeader)(unsafe.Pointer(&buf))
+
+ var res string
+ pString := (*reflect.StringHeader)(unsafe.Pointer(&res))
+
+ pString.Data = pSlice.Data
+ pString.Len = pSlice.Len
+
+ return res, nil
+}
+
+// keepAlive is a reimplementation of runtime.KeepAlive, which wasn't
+// added until Go 1.7, whereas we need to compile with Go 1.4.
+var keepAlive = func(interface{}) {}
diff --git a/src/cmd/compile/internal/base/mapfile_read.go b/src/cmd/compile/internal/base/mapfile_read.go
new file mode 100644
index 0000000..01796a9
--- /dev/null
+++ b/src/cmd/compile/internal/base/mapfile_read.go
@@ -0,0 +1,22 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !darwin && !dragonfly && !freebsd && !linux && !netbsd && !openbsd
+// +build !darwin,!dragonfly,!freebsd,!linux,!netbsd,!openbsd
+
+package base
+
+import (
+ "io"
+ "os"
+)
+
+func MapFile(f *os.File, offset, length int64) (string, error) {
+ buf := make([]byte, length)
+ _, err := io.ReadFull(io.NewSectionReader(f, offset, length), buf)
+ if err != nil {
+ return "", err
+ }
+ return string(buf), nil
+}
diff --git a/src/cmd/compile/internal/base/print.go b/src/cmd/compile/internal/base/print.go
new file mode 100644
index 0000000..955f9d2
--- /dev/null
+++ b/src/cmd/compile/internal/base/print.go
@@ -0,0 +1,285 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package base
+
+import (
+ "fmt"
+ "internal/buildcfg"
+ "os"
+ "runtime/debug"
+ "sort"
+ "strings"
+
+ "cmd/internal/src"
+)
+
+// An errorMsg is a queued error message, waiting to be printed.
+type errorMsg struct {
+ pos src.XPos
+ msg string
+}
+
+// Pos is the current source position being processed,
+// printed by Errorf, ErrorfLang, Fatalf, and Warnf.
+var Pos src.XPos
+
+var (
+ errorMsgs []errorMsg
+ numErrors int // number of entries in errorMsgs that are errors (as opposed to warnings)
+ numSyntaxErrors int
+)
+
+// Errors returns the number of errors reported.
+func Errors() int {
+ return numErrors
+}
+
+// SyntaxErrors returns the number of syntax errors reported
+func SyntaxErrors() int {
+ return numSyntaxErrors
+}
+
+// addErrorMsg adds a new errorMsg (which may be a warning) to errorMsgs.
+func addErrorMsg(pos src.XPos, format string, args ...interface{}) {
+ msg := fmt.Sprintf(format, args...)
+ // Only add the position if know the position.
+ // See issue golang.org/issue/11361.
+ if pos.IsKnown() {
+ msg = fmt.Sprintf("%v: %s", FmtPos(pos), msg)
+ }
+ errorMsgs = append(errorMsgs, errorMsg{
+ pos: pos,
+ msg: msg + "\n",
+ })
+}
+
+// FmtPos formats pos as a file:line string.
+func FmtPos(pos src.XPos) string {
+ if Ctxt == nil {
+ return "???"
+ }
+ return Ctxt.OutermostPos(pos).Format(Flag.C == 0, Flag.L == 1)
+}
+
+// byPos sorts errors by source position.
+type byPos []errorMsg
+
+func (x byPos) Len() int { return len(x) }
+func (x byPos) Less(i, j int) bool { return x[i].pos.Before(x[j].pos) }
+func (x byPos) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+
+// FlushErrors sorts errors seen so far by line number, prints them to stdout,
+// and empties the errors array.
+func FlushErrors() {
+ if Ctxt != nil && Ctxt.Bso != nil {
+ Ctxt.Bso.Flush()
+ }
+ if len(errorMsgs) == 0 {
+ return
+ }
+ sort.Stable(byPos(errorMsgs))
+ for i, err := range errorMsgs {
+ if i == 0 || err.msg != errorMsgs[i-1].msg {
+ fmt.Printf("%s", err.msg)
+ }
+ }
+ errorMsgs = errorMsgs[:0]
+}
+
+// lasterror keeps track of the most recently issued error,
+// to avoid printing multiple error messages on the same line.
+var lasterror struct {
+ syntax src.XPos // source position of last syntax error
+ other src.XPos // source position of last non-syntax error
+ msg string // error message of last non-syntax error
+}
+
+// sameline reports whether two positions a, b are on the same line.
+func sameline(a, b src.XPos) bool {
+ p := Ctxt.PosTable.Pos(a)
+ q := Ctxt.PosTable.Pos(b)
+ return p.Base() == q.Base() && p.Line() == q.Line()
+}
+
+// Errorf reports a formatted error at the current line.
+func Errorf(format string, args ...interface{}) {
+ ErrorfAt(Pos, format, args...)
+}
+
+// ErrorfAt reports a formatted error message at pos.
+func ErrorfAt(pos src.XPos, format string, args ...interface{}) {
+ msg := fmt.Sprintf(format, args...)
+
+ if strings.HasPrefix(msg, "syntax error") {
+ numSyntaxErrors++
+ // only one syntax error per line, no matter what error
+ if sameline(lasterror.syntax, pos) {
+ return
+ }
+ lasterror.syntax = pos
+ } else {
+ // only one of multiple equal non-syntax errors per line
+ // (FlushErrors shows only one of them, so we filter them
+ // here as best as we can (they may not appear in order)
+ // so that we don't count them here and exit early, and
+ // then have nothing to show for.)
+ if sameline(lasterror.other, pos) && lasterror.msg == msg {
+ return
+ }
+ lasterror.other = pos
+ lasterror.msg = msg
+ }
+
+ addErrorMsg(pos, "%s", msg)
+ numErrors++
+
+ hcrash()
+ if numErrors >= 10 && Flag.LowerE == 0 {
+ FlushErrors()
+ fmt.Printf("%v: too many errors\n", FmtPos(pos))
+ ErrorExit()
+ }
+}
+
+// ErrorfVers reports that a language feature (format, args) requires a later version of Go.
+func ErrorfVers(lang string, format string, args ...interface{}) {
+ Errorf("%s requires %s or later (-lang was set to %s; check go.mod)", fmt.Sprintf(format, args...), lang, Flag.Lang)
+}
+
+// UpdateErrorDot is a clumsy hack that rewrites the last error,
+// if it was "LINE: undefined: NAME", to be "LINE: undefined: NAME in EXPR".
+// It is used to give better error messages for dot (selector) expressions.
+func UpdateErrorDot(line string, name, expr string) {
+ if len(errorMsgs) == 0 {
+ return
+ }
+ e := &errorMsgs[len(errorMsgs)-1]
+ if strings.HasPrefix(e.msg, line) && e.msg == fmt.Sprintf("%v: undefined: %v\n", line, name) {
+ e.msg = fmt.Sprintf("%v: undefined: %v in %v\n", line, name, expr)
+ }
+}
+
+// Warnf reports a formatted warning at the current line.
+// In general the Go compiler does NOT generate warnings,
+// so this should be used only when the user has opted in
+// to additional output by setting a particular flag.
+func Warn(format string, args ...interface{}) {
+ WarnfAt(Pos, format, args...)
+}
+
+// WarnfAt reports a formatted warning at pos.
+// In general the Go compiler does NOT generate warnings,
+// so this should be used only when the user has opted in
+// to additional output by setting a particular flag.
+func WarnfAt(pos src.XPos, format string, args ...interface{}) {
+ addErrorMsg(pos, format, args...)
+ if Flag.LowerM != 0 {
+ FlushErrors()
+ }
+}
+
+// Fatalf reports a fatal error - an internal problem - at the current line and exits.
+// If other errors have already been printed, then Fatalf just quietly exits.
+// (The internal problem may have been caused by incomplete information
+// after the already-reported errors, so best to let users fix those and
+// try again without being bothered about a spurious internal error.)
+//
+// But if no errors have been printed, or if -d panic has been specified,
+// Fatalf prints the error as an "internal compiler error". In a released build,
+// it prints an error asking to file a bug report. In development builds, it
+// prints a stack trace.
+//
+// If -h has been specified, Fatalf panics to force the usual runtime info dump.
+func Fatalf(format string, args ...interface{}) {
+ FatalfAt(Pos, format, args...)
+}
+
+// FatalfAt reports a fatal error - an internal problem - at pos and exits.
+// If other errors have already been printed, then FatalfAt just quietly exits.
+// (The internal problem may have been caused by incomplete information
+// after the already-reported errors, so best to let users fix those and
+// try again without being bothered about a spurious internal error.)
+//
+// But if no errors have been printed, or if -d panic has been specified,
+// FatalfAt prints the error as an "internal compiler error". In a released build,
+// it prints an error asking to file a bug report. In development builds, it
+// prints a stack trace.
+//
+// If -h has been specified, FatalfAt panics to force the usual runtime info dump.
+func FatalfAt(pos src.XPos, format string, args ...interface{}) {
+ FlushErrors()
+
+ if Debug.Panic != 0 || numErrors == 0 {
+ fmt.Printf("%v: internal compiler error: ", FmtPos(pos))
+ fmt.Printf(format, args...)
+ fmt.Printf("\n")
+
+ // If this is a released compiler version, ask for a bug report.
+ if Debug.Panic == 0 && strings.HasPrefix(buildcfg.Version, "go") {
+ fmt.Printf("\n")
+ fmt.Printf("Please file a bug report including a short program that triggers the error.\n")
+ fmt.Printf("https://go.dev/issue/new\n")
+ } else {
+ // Not a release; dump a stack trace, too.
+ fmt.Println()
+ os.Stdout.Write(debug.Stack())
+ fmt.Println()
+ }
+ }
+
+ hcrash()
+ ErrorExit()
+}
+
+// Assert reports "assertion failed" with Fatalf, unless b is true.
+func Assert(b bool) {
+ if !b {
+ Fatalf("assertion failed")
+ }
+}
+
+// Assertf reports a fatal error with Fatalf, unless b is true.
+func Assertf(b bool, format string, args ...interface{}) {
+ if !b {
+ Fatalf(format, args...)
+ }
+}
+
+// AssertfAt reports a fatal error with FatalfAt, unless b is true.
+func AssertfAt(b bool, pos src.XPos, format string, args ...interface{}) {
+ if !b {
+ FatalfAt(pos, format, args...)
+ }
+}
+
+// hcrash crashes the compiler when -h is set, to find out where a message is generated.
+func hcrash() {
+ if Flag.LowerH != 0 {
+ FlushErrors()
+ if Flag.LowerO != "" {
+ os.Remove(Flag.LowerO)
+ }
+ panic("-h")
+ }
+}
+
+// ErrorExit handles an error-status exit.
+// It flushes any pending errors, removes the output file, and exits.
+func ErrorExit() {
+ FlushErrors()
+ if Flag.LowerO != "" {
+ os.Remove(Flag.LowerO)
+ }
+ os.Exit(2)
+}
+
+// ExitIfErrors calls ErrorExit if any errors have been reported.
+func ExitIfErrors() {
+ if Errors() > 0 {
+ ErrorExit()
+ }
+}
+
+var AutogeneratedPos src.XPos
diff --git a/src/cmd/compile/internal/base/timings.go b/src/cmd/compile/internal/base/timings.go
new file mode 100644
index 0000000..f599f4e
--- /dev/null
+++ b/src/cmd/compile/internal/base/timings.go
@@ -0,0 +1,237 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package base
+
+import (
+ "fmt"
+ "io"
+ "strings"
+ "time"
+)
+
+var Timer Timings
+
+// Timings collects the execution times of labeled phases
+// which are added trough a sequence of Start/Stop calls.
+// Events may be associated with each phase via AddEvent.
+type Timings struct {
+ list []timestamp
+ events map[int][]*event // lazily allocated
+}
+
+type timestamp struct {
+ time time.Time
+ label string
+ start bool
+}
+
+type event struct {
+ size int64 // count or amount of data processed (allocations, data size, lines, funcs, ...)
+ unit string // unit of size measure (count, MB, lines, funcs, ...)
+}
+
+func (t *Timings) append(labels []string, start bool) {
+ t.list = append(t.list, timestamp{time.Now(), strings.Join(labels, ":"), start})
+}
+
+// Start marks the beginning of a new phase and implicitly stops the previous phase.
+// The phase name is the colon-separated concatenation of the labels.
+func (t *Timings) Start(labels ...string) {
+ t.append(labels, true)
+}
+
+// Stop marks the end of a phase and implicitly starts a new phase.
+// The labels are added to the labels of the ended phase.
+func (t *Timings) Stop(labels ...string) {
+ t.append(labels, false)
+}
+
+// AddEvent associates an event, i.e., a count, or an amount of data,
+// with the most recently started or stopped phase; or the very first
+// phase if Start or Stop hasn't been called yet. The unit specifies
+// the unit of measurement (e.g., MB, lines, no. of funcs, etc.).
+func (t *Timings) AddEvent(size int64, unit string) {
+ m := t.events
+ if m == nil {
+ m = make(map[int][]*event)
+ t.events = m
+ }
+ i := len(t.list)
+ if i > 0 {
+ i--
+ }
+ m[i] = append(m[i], &event{size, unit})
+}
+
+// Write prints the phase times to w.
+// The prefix is printed at the start of each line.
+func (t *Timings) Write(w io.Writer, prefix string) {
+ if len(t.list) > 0 {
+ var lines lines
+
+ // group of phases with shared non-empty label prefix
+ var group struct {
+ label string // label prefix
+ tot time.Duration // accumulated phase time
+ size int // number of phases collected in group
+ }
+
+ // accumulated time between Stop/Start timestamps
+ var unaccounted time.Duration
+
+ // process Start/Stop timestamps
+ pt := &t.list[0] // previous timestamp
+ tot := t.list[len(t.list)-1].time.Sub(pt.time)
+ for i := 1; i < len(t.list); i++ {
+ qt := &t.list[i] // current timestamp
+ dt := qt.time.Sub(pt.time)
+
+ var label string
+ var events []*event
+ if pt.start {
+ // previous phase started
+ label = pt.label
+ events = t.events[i-1]
+ if qt.start {
+ // start implicitly ended previous phase; nothing to do
+ } else {
+ // stop ended previous phase; append stop labels, if any
+ if qt.label != "" {
+ label += ":" + qt.label
+ }
+ // events associated with stop replace prior events
+ if e := t.events[i]; e != nil {
+ events = e
+ }
+ }
+ } else {
+ // previous phase stopped
+ if qt.start {
+ // between a stopped and started phase; unaccounted time
+ unaccounted += dt
+ } else {
+ // previous stop implicitly started current phase
+ label = qt.label
+ events = t.events[i]
+ }
+ }
+ if label != "" {
+ // add phase to existing group, or start a new group
+ l := commonPrefix(group.label, label)
+ if group.size == 1 && l != "" || group.size > 1 && l == group.label {
+ // add to existing group
+ group.label = l
+ group.tot += dt
+ group.size++
+ } else {
+ // start a new group
+ if group.size > 1 {
+ lines.add(prefix+group.label+"subtotal", 1, group.tot, tot, nil)
+ }
+ group.label = label
+ group.tot = dt
+ group.size = 1
+ }
+
+ // write phase
+ lines.add(prefix+label, 1, dt, tot, events)
+ }
+
+ pt = qt
+ }
+
+ if group.size > 1 {
+ lines.add(prefix+group.label+"subtotal", 1, group.tot, tot, nil)
+ }
+
+ if unaccounted != 0 {
+ lines.add(prefix+"unaccounted", 1, unaccounted, tot, nil)
+ }
+
+ lines.add(prefix+"total", 1, tot, tot, nil)
+
+ lines.write(w)
+ }
+}
+
+func commonPrefix(a, b string) string {
+ i := 0
+ for i < len(a) && i < len(b) && a[i] == b[i] {
+ i++
+ }
+ return a[:i]
+}
+
+type lines [][]string
+
+func (lines *lines) add(label string, n int, dt, tot time.Duration, events []*event) {
+ var line []string
+ add := func(format string, args ...interface{}) {
+ line = append(line, fmt.Sprintf(format, args...))
+ }
+
+ add("%s", label)
+ add(" %d", n)
+ add(" %d ns/op", dt)
+ add(" %.2f %%", float64(dt)/float64(tot)*100)
+
+ for _, e := range events {
+ add(" %d", e.size)
+ add(" %s", e.unit)
+ add(" %d", int64(float64(e.size)/dt.Seconds()+0.5))
+ add(" %s/s", e.unit)
+ }
+
+ *lines = append(*lines, line)
+}
+
+func (lines lines) write(w io.Writer) {
+ // determine column widths and contents
+ var widths []int
+ var number []bool
+ for _, line := range lines {
+ for i, col := range line {
+ if i < len(widths) {
+ if len(col) > widths[i] {
+ widths[i] = len(col)
+ }
+ } else {
+ widths = append(widths, len(col))
+ number = append(number, isnumber(col)) // first line determines column contents
+ }
+ }
+ }
+
+ // make column widths a multiple of align for more stable output
+ const align = 1 // set to a value > 1 to enable
+ if align > 1 {
+ for i, w := range widths {
+ w += align - 1
+ widths[i] = w - w%align
+ }
+ }
+
+ // print lines taking column widths and contents into account
+ for _, line := range lines {
+ for i, col := range line {
+ format := "%-*s"
+ if number[i] {
+ format = "%*s" // numbers are right-aligned
+ }
+ fmt.Fprintf(w, format, widths[i], col)
+ }
+ fmt.Fprintln(w)
+ }
+}
+
+func isnumber(s string) bool {
+ for _, ch := range s {
+ if ch <= ' ' {
+ continue // ignore leading whitespace
+ }
+ return '0' <= ch && ch <= '9' || ch == '.' || ch == '-' || ch == '+'
+ }
+ return false
+}
diff --git a/src/cmd/compile/internal/bitvec/bv.go b/src/cmd/compile/internal/bitvec/bv.go
new file mode 100644
index 0000000..ad7ed0a
--- /dev/null
+++ b/src/cmd/compile/internal/bitvec/bv.go
@@ -0,0 +1,201 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package bitvec
+
+import (
+ "math/bits"
+
+ "cmd/compile/internal/base"
+)
+
+const (
+ wordBits = 32
+ wordMask = wordBits - 1
+ wordShift = 5
+)
+
+// A BitVec is a bit vector.
+type BitVec struct {
+ N int32 // number of bits in vector
+ B []uint32 // words holding bits
+}
+
+func New(n int32) BitVec {
+ nword := (n + wordBits - 1) / wordBits
+ return BitVec{n, make([]uint32, nword)}
+}
+
+type Bulk struct {
+ words []uint32
+ nbit int32
+ nword int32
+}
+
+func NewBulk(nbit int32, count int32) Bulk {
+ nword := (nbit + wordBits - 1) / wordBits
+ size := int64(nword) * int64(count)
+ if int64(int32(size*4)) != size*4 {
+ base.Fatalf("NewBulk too big: nbit=%d count=%d nword=%d size=%d", nbit, count, nword, size)
+ }
+ return Bulk{
+ words: make([]uint32, size),
+ nbit: nbit,
+ nword: nword,
+ }
+}
+
+func (b *Bulk) Next() BitVec {
+ out := BitVec{b.nbit, b.words[:b.nword]}
+ b.words = b.words[b.nword:]
+ return out
+}
+
+func (bv1 BitVec) Eq(bv2 BitVec) bool {
+ if bv1.N != bv2.N {
+ base.Fatalf("bvequal: lengths %d and %d are not equal", bv1.N, bv2.N)
+ }
+ for i, x := range bv1.B {
+ if x != bv2.B[i] {
+ return false
+ }
+ }
+ return true
+}
+
+func (dst BitVec) Copy(src BitVec) {
+ copy(dst.B, src.B)
+}
+
+func (bv BitVec) Get(i int32) bool {
+ if i < 0 || i >= bv.N {
+ base.Fatalf("bvget: index %d is out of bounds with length %d\n", i, bv.N)
+ }
+ mask := uint32(1 << uint(i%wordBits))
+ return bv.B[i>>wordShift]&mask != 0
+}
+
+func (bv BitVec) Set(i int32) {
+ if i < 0 || i >= bv.N {
+ base.Fatalf("bvset: index %d is out of bounds with length %d\n", i, bv.N)
+ }
+ mask := uint32(1 << uint(i%wordBits))
+ bv.B[i/wordBits] |= mask
+}
+
+func (bv BitVec) Unset(i int32) {
+ if i < 0 || i >= bv.N {
+ base.Fatalf("bvunset: index %d is out of bounds with length %d\n", i, bv.N)
+ }
+ mask := uint32(1 << uint(i%wordBits))
+ bv.B[i/wordBits] &^= mask
+}
+
+// bvnext returns the smallest index >= i for which bvget(bv, i) == 1.
+// If there is no such index, bvnext returns -1.
+func (bv BitVec) Next(i int32) int32 {
+ if i >= bv.N {
+ return -1
+ }
+
+ // Jump i ahead to next word with bits.
+ if bv.B[i>>wordShift]>>uint(i&wordMask) == 0 {
+ i &^= wordMask
+ i += wordBits
+ for i < bv.N && bv.B[i>>wordShift] == 0 {
+ i += wordBits
+ }
+ }
+
+ if i >= bv.N {
+ return -1
+ }
+
+ // Find 1 bit.
+ w := bv.B[i>>wordShift] >> uint(i&wordMask)
+ i += int32(bits.TrailingZeros32(w))
+
+ return i
+}
+
+func (bv BitVec) IsEmpty() bool {
+ for _, x := range bv.B {
+ if x != 0 {
+ return false
+ }
+ }
+ return true
+}
+
+func (bv BitVec) Count() int {
+ n := 0
+ for _, x := range bv.B {
+ n += bits.OnesCount32(x)
+ }
+ return n
+}
+
+func (bv BitVec) Not() {
+ for i, x := range bv.B {
+ bv.B[i] = ^x
+ }
+ if bv.N%wordBits != 0 {
+ bv.B[len(bv.B)-1] &= 1<<uint(bv.N%wordBits) - 1 // clear bits past N in the last word
+ }
+}
+
+// union
+func (dst BitVec) Or(src1, src2 BitVec) {
+ if len(src1.B) == 0 {
+ return
+ }
+ _, _ = dst.B[len(src1.B)-1], src2.B[len(src1.B)-1] // hoist bounds checks out of the loop
+
+ for i, x := range src1.B {
+ dst.B[i] = x | src2.B[i]
+ }
+}
+
+// intersection
+func (dst BitVec) And(src1, src2 BitVec) {
+ if len(src1.B) == 0 {
+ return
+ }
+ _, _ = dst.B[len(src1.B)-1], src2.B[len(src1.B)-1] // hoist bounds checks out of the loop
+
+ for i, x := range src1.B {
+ dst.B[i] = x & src2.B[i]
+ }
+}
+
+// difference
+func (dst BitVec) AndNot(src1, src2 BitVec) {
+ if len(src1.B) == 0 {
+ return
+ }
+ _, _ = dst.B[len(src1.B)-1], src2.B[len(src1.B)-1] // hoist bounds checks out of the loop
+
+ for i, x := range src1.B {
+ dst.B[i] = x &^ src2.B[i]
+ }
+}
+
+func (bv BitVec) String() string {
+ s := make([]byte, 2+bv.N)
+ copy(s, "#*")
+ for i := int32(0); i < bv.N; i++ {
+ ch := byte('0')
+ if bv.Get(i) {
+ ch = '1'
+ }
+ s[2+i] = ch
+ }
+ return string(s)
+}
+
+func (bv BitVec) Clear() {
+ for i := range bv.B {
+ bv.B[i] = 0
+ }
+}
diff --git a/src/cmd/compile/internal/deadcode/deadcode.go b/src/cmd/compile/internal/deadcode/deadcode.go
new file mode 100644
index 0000000..c37a5a6
--- /dev/null
+++ b/src/cmd/compile/internal/deadcode/deadcode.go
@@ -0,0 +1,167 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package deadcode
+
+import (
+ "go/constant"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+)
+
+func Func(fn *ir.Func) {
+ stmts(&fn.Body)
+
+ if len(fn.Body) == 0 {
+ return
+ }
+
+ for _, n := range fn.Body {
+ if len(n.Init()) > 0 {
+ return
+ }
+ switch n.Op() {
+ case ir.OIF:
+ n := n.(*ir.IfStmt)
+ if !ir.IsConst(n.Cond, constant.Bool) || len(n.Body) > 0 || len(n.Else) > 0 {
+ return
+ }
+ case ir.OFOR:
+ n := n.(*ir.ForStmt)
+ if !ir.IsConst(n.Cond, constant.Bool) || ir.BoolVal(n.Cond) {
+ return
+ }
+ default:
+ return
+ }
+ }
+
+ ir.VisitList(fn.Body, markHiddenClosureDead)
+ fn.Body = []ir.Node{ir.NewBlockStmt(base.Pos, nil)}
+}
+
+func stmts(nn *ir.Nodes) {
+ var lastLabel = -1
+ for i, n := range *nn {
+ if n != nil && n.Op() == ir.OLABEL {
+ lastLabel = i
+ }
+ }
+ for i, n := range *nn {
+ // Cut is set to true when all nodes after i'th position
+ // should be removed.
+ // In other words, it marks whole slice "tail" as dead.
+ cut := false
+ if n == nil {
+ continue
+ }
+ if n.Op() == ir.OIF {
+ n := n.(*ir.IfStmt)
+ n.Cond = expr(n.Cond)
+ if ir.IsConst(n.Cond, constant.Bool) {
+ var body ir.Nodes
+ if ir.BoolVal(n.Cond) {
+ ir.VisitList(n.Else, markHiddenClosureDead)
+ n.Else = ir.Nodes{}
+ body = n.Body
+ } else {
+ ir.VisitList(n.Body, markHiddenClosureDead)
+ n.Body = ir.Nodes{}
+ body = n.Else
+ }
+ // If "then" or "else" branch ends with panic or return statement,
+ // it is safe to remove all statements after this node.
+ // isterminating is not used to avoid goto-related complications.
+ // We must be careful not to deadcode-remove labels, as they
+ // might be the target of a goto. See issue 28616.
+ if body := body; len(body) != 0 {
+ switch body[(len(body) - 1)].Op() {
+ case ir.ORETURN, ir.OTAILCALL, ir.OPANIC:
+ if i > lastLabel {
+ cut = true
+ }
+ }
+ }
+ }
+ }
+
+ if len(n.Init()) != 0 {
+ stmts(n.(ir.InitNode).PtrInit())
+ }
+ switch n.Op() {
+ case ir.OBLOCK:
+ n := n.(*ir.BlockStmt)
+ stmts(&n.List)
+ case ir.OFOR:
+ n := n.(*ir.ForStmt)
+ stmts(&n.Body)
+ case ir.OIF:
+ n := n.(*ir.IfStmt)
+ stmts(&n.Body)
+ stmts(&n.Else)
+ case ir.ORANGE:
+ n := n.(*ir.RangeStmt)
+ stmts(&n.Body)
+ case ir.OSELECT:
+ n := n.(*ir.SelectStmt)
+ for _, cas := range n.Cases {
+ stmts(&cas.Body)
+ }
+ case ir.OSWITCH:
+ n := n.(*ir.SwitchStmt)
+ for _, cas := range n.Cases {
+ stmts(&cas.Body)
+ }
+ }
+
+ if cut {
+ ir.VisitList((*nn)[i+1:len(*nn)], markHiddenClosureDead)
+ *nn = (*nn)[:i+1]
+ break
+ }
+ }
+}
+
+func expr(n ir.Node) ir.Node {
+ // Perform dead-code elimination on short-circuited boolean
+ // expressions involving constants with the intent of
+ // producing a constant 'if' condition.
+ switch n.Op() {
+ case ir.OANDAND:
+ n := n.(*ir.LogicalExpr)
+ n.X = expr(n.X)
+ n.Y = expr(n.Y)
+ if ir.IsConst(n.X, constant.Bool) {
+ if ir.BoolVal(n.X) {
+ return n.Y // true && x => x
+ } else {
+ return n.X // false && x => false
+ }
+ }
+ case ir.OOROR:
+ n := n.(*ir.LogicalExpr)
+ n.X = expr(n.X)
+ n.Y = expr(n.Y)
+ if ir.IsConst(n.X, constant.Bool) {
+ if ir.BoolVal(n.X) {
+ return n.X // true || x => true
+ } else {
+ return n.Y // false || x => x
+ }
+ }
+ }
+ return n
+}
+
+func markHiddenClosureDead(n ir.Node) {
+ if n.Op() != ir.OCLOSURE {
+ return
+ }
+ clo := n.(*ir.ClosureExpr)
+ if clo.Func.IsHiddenClosure() {
+ clo.Func.SetIsDeadcodeClosure(true)
+ }
+ ir.VisitList(clo.Func.Body, markHiddenClosureDead)
+}
diff --git a/src/cmd/compile/internal/devirtualize/devirtualize.go b/src/cmd/compile/internal/devirtualize/devirtualize.go
new file mode 100644
index 0000000..60ba208
--- /dev/null
+++ b/src/cmd/compile/internal/devirtualize/devirtualize.go
@@ -0,0 +1,85 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package devirtualize implements a simple "devirtualization"
+// optimization pass, which replaces interface method calls with
+// direct concrete-type method calls where possible.
+package devirtualize
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+)
+
+// Func devirtualizes calls within fn where possible.
+func Func(fn *ir.Func) {
+ ir.CurFunc = fn
+ ir.VisitList(fn.Body, func(n ir.Node) {
+ if call, ok := n.(*ir.CallExpr); ok {
+ Call(call)
+ }
+ })
+}
+
+// Call devirtualizes the given call if possible.
+func Call(call *ir.CallExpr) {
+ if call.Op() != ir.OCALLINTER {
+ return
+ }
+ sel := call.X.(*ir.SelectorExpr)
+ r := ir.StaticValue(sel.X)
+ if r.Op() != ir.OCONVIFACE {
+ return
+ }
+ recv := r.(*ir.ConvExpr)
+
+ typ := recv.X.Type()
+ if typ.IsInterface() {
+ return
+ }
+
+ dt := ir.NewTypeAssertExpr(sel.Pos(), sel.X, nil)
+ dt.SetType(typ)
+ x := typecheck.Callee(ir.NewSelectorExpr(sel.Pos(), ir.OXDOT, dt, sel.Sel))
+ switch x.Op() {
+ case ir.ODOTMETH:
+ x := x.(*ir.SelectorExpr)
+ if base.Flag.LowerM != 0 {
+ base.WarnfAt(call.Pos(), "devirtualizing %v to %v", sel, typ)
+ }
+ call.SetOp(ir.OCALLMETH)
+ call.X = x
+ case ir.ODOTINTER:
+ // Promoted method from embedded interface-typed field (#42279).
+ x := x.(*ir.SelectorExpr)
+ if base.Flag.LowerM != 0 {
+ base.WarnfAt(call.Pos(), "partially devirtualizing %v to %v", sel, typ)
+ }
+ call.SetOp(ir.OCALLINTER)
+ call.X = x
+ default:
+ // TODO(mdempsky): Turn back into Fatalf after more testing.
+ if base.Flag.LowerM != 0 {
+ base.WarnfAt(call.Pos(), "failed to devirtualize %v (%v)", x, x.Op())
+ }
+ return
+ }
+
+ // Duplicated logic from typecheck for function call return
+ // value types.
+ //
+ // Receiver parameter size may have changed; need to update
+ // call.Type to get correct stack offsets for result
+ // parameters.
+ types.CheckSize(x.Type())
+ switch ft := x.Type(); ft.NumResults() {
+ case 0:
+ case 1:
+ call.SetType(ft.Results().Field(0).Type)
+ default:
+ call.SetType(ft.Results())
+ }
+}
diff --git a/src/cmd/compile/internal/dwarfgen/dwarf.go b/src/cmd/compile/internal/dwarfgen/dwarf.go
new file mode 100644
index 0000000..8ae03d7
--- /dev/null
+++ b/src/cmd/compile/internal/dwarfgen/dwarf.go
@@ -0,0 +1,585 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package dwarfgen
+
+import (
+ "bytes"
+ "flag"
+ "fmt"
+ "internal/buildcfg"
+ "sort"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/reflectdata"
+ "cmd/compile/internal/ssa"
+ "cmd/compile/internal/ssagen"
+ "cmd/compile/internal/types"
+ "cmd/internal/dwarf"
+ "cmd/internal/obj"
+ "cmd/internal/objabi"
+ "cmd/internal/src"
+)
+
+func Info(fnsym *obj.LSym, infosym *obj.LSym, curfn interface{}) ([]dwarf.Scope, dwarf.InlCalls) {
+ fn := curfn.(*ir.Func)
+
+ if fn.Nname != nil {
+ expect := fn.Linksym()
+ if fnsym.ABI() == obj.ABI0 {
+ expect = fn.LinksymABI(obj.ABI0)
+ }
+ if fnsym != expect {
+ base.Fatalf("unexpected fnsym: %v != %v", fnsym, expect)
+ }
+ }
+
+ // Back when there were two different *Funcs for a function, this code
+ // was not consistent about whether a particular *Node being processed
+ // was an ODCLFUNC or ONAME node. Partly this is because inlined function
+ // bodies have no ODCLFUNC node, which was it's own inconsistency.
+ // In any event, the handling of the two different nodes for DWARF purposes
+ // was subtly different, likely in unintended ways. CL 272253 merged the
+ // two nodes' Func fields, so that code sees the same *Func whether it is
+ // holding the ODCLFUNC or the ONAME. This resulted in changes in the
+ // DWARF output. To preserve the existing DWARF output and leave an
+ // intentional change for a future CL, this code does the following when
+ // fn.Op == ONAME:
+ //
+ // 1. Disallow use of createComplexVars in createDwarfVars.
+ // It was not possible to reach that code for an ONAME before,
+ // because the DebugInfo was set only on the ODCLFUNC Func.
+ // Calling into it in the ONAME case causes an index out of bounds panic.
+ //
+ // 2. Do not populate apdecls. fn.Func.Dcl was in the ODCLFUNC Func,
+ // not the ONAME Func. Populating apdecls for the ONAME case results
+ // in selected being populated after createSimpleVars is called in
+ // createDwarfVars, and then that causes the loop to skip all the entries
+ // in dcl, meaning that the RecordAutoType calls don't happen.
+ //
+ // These two adjustments keep toolstash -cmp working for now.
+ // Deciding the right answer is, as they say, future work.
+ //
+ // We can tell the difference between the old ODCLFUNC and ONAME
+ // cases by looking at the infosym.Name. If it's empty, DebugInfo is
+ // being called from (*obj.Link).populateDWARF, which used to use
+ // the ODCLFUNC. If it's non-empty (the name will end in $abstract),
+ // DebugInfo is being called from (*obj.Link).DwarfAbstractFunc,
+ // which used to use the ONAME form.
+ isODCLFUNC := infosym.Name == ""
+
+ var apdecls []*ir.Name
+ // Populate decls for fn.
+ if isODCLFUNC {
+ for _, n := range fn.Dcl {
+ if n.Op() != ir.ONAME { // might be OTYPE or OLITERAL
+ continue
+ }
+ switch n.Class {
+ case ir.PAUTO:
+ if !n.Used() {
+ // Text == nil -> generating abstract function
+ if fnsym.Func().Text != nil {
+ base.Fatalf("debuginfo unused node (AllocFrame should truncate fn.Func.Dcl)")
+ }
+ continue
+ }
+ case ir.PPARAM, ir.PPARAMOUT:
+ default:
+ continue
+ }
+ apdecls = append(apdecls, n)
+ if n.Type().Kind() == types.TSSA {
+ // Can happen for TypeInt128 types. This only happens for
+ // spill locations, so not a huge deal.
+ continue
+ }
+ fnsym.Func().RecordAutoType(reflectdata.TypeLinksym(n.Type()))
+ }
+ }
+
+ decls, dwarfVars := createDwarfVars(fnsym, isODCLFUNC, fn, apdecls)
+
+ // For each type referenced by the functions auto vars but not
+ // already referenced by a dwarf var, attach an R_USETYPE relocation to
+ // the function symbol to insure that the type included in DWARF
+ // processing during linking.
+ typesyms := []*obj.LSym{}
+ for t, _ := range fnsym.Func().Autot {
+ typesyms = append(typesyms, t)
+ }
+ sort.Sort(obj.BySymName(typesyms))
+ for _, sym := range typesyms {
+ r := obj.Addrel(infosym)
+ r.Sym = sym
+ r.Type = objabi.R_USETYPE
+ }
+ fnsym.Func().Autot = nil
+
+ var varScopes []ir.ScopeID
+ for _, decl := range decls {
+ pos := declPos(decl)
+ varScopes = append(varScopes, findScope(fn.Marks, pos))
+ }
+
+ scopes := assembleScopes(fnsym, fn, dwarfVars, varScopes)
+ var inlcalls dwarf.InlCalls
+ if base.Flag.GenDwarfInl > 0 {
+ inlcalls = assembleInlines(fnsym, dwarfVars)
+ }
+ return scopes, inlcalls
+}
+
+func declPos(decl *ir.Name) src.XPos {
+ return decl.Canonical().Pos()
+}
+
+// createDwarfVars process fn, returning a list of DWARF variables and the
+// Nodes they represent.
+func createDwarfVars(fnsym *obj.LSym, complexOK bool, fn *ir.Func, apDecls []*ir.Name) ([]*ir.Name, []*dwarf.Var) {
+ // Collect a raw list of DWARF vars.
+ var vars []*dwarf.Var
+ var decls []*ir.Name
+ var selected ir.NameSet
+
+ if base.Ctxt.Flag_locationlists && base.Ctxt.Flag_optimize && fn.DebugInfo != nil && complexOK {
+ decls, vars, selected = createComplexVars(fnsym, fn)
+ } else if fn.ABI == obj.ABIInternal && base.Flag.N != 0 && complexOK {
+ decls, vars, selected = createABIVars(fnsym, fn, apDecls)
+ } else {
+ decls, vars, selected = createSimpleVars(fnsym, apDecls)
+ }
+
+ dcl := apDecls
+ if fnsym.WasInlined() {
+ dcl = preInliningDcls(fnsym)
+ } else {
+ // The backend's stackframe pass prunes away entries from the
+ // fn's Dcl list, including PARAMOUT nodes that correspond to
+ // output params passed in registers. Add back in these
+ // entries here so that we can process them properly during
+ // DWARF-gen. See issue 48573 for more details.
+ debugInfo := fn.DebugInfo.(*ssa.FuncDebug)
+ for _, n := range debugInfo.RegOutputParams {
+ if n.Class != ir.PPARAMOUT || !n.IsOutputParamInRegisters() {
+ panic("invalid ir.Name on debugInfo.RegOutputParams list")
+ }
+ dcl = append(dcl, n)
+ }
+ }
+
+ // If optimization is enabled, the list above will typically be
+ // missing some of the original pre-optimization variables in the
+ // function (they may have been promoted to registers, folded into
+ // constants, dead-coded away, etc). Input arguments not eligible
+ // for SSA optimization are also missing. Here we add back in entries
+ // for selected missing vars. Note that the recipe below creates a
+ // conservative location. The idea here is that we want to
+ // communicate to the user that "yes, there is a variable named X
+ // in this function, but no, I don't have enough information to
+ // reliably report its contents."
+ // For non-SSA-able arguments, however, the correct information
+ // is known -- they have a single home on the stack.
+ for _, n := range dcl {
+ if selected.Has(n) {
+ continue
+ }
+ c := n.Sym().Name[0]
+ if c == '.' || n.Type().IsUntyped() {
+ continue
+ }
+ if n.Class == ir.PPARAM && !ssagen.TypeOK(n.Type()) {
+ // SSA-able args get location lists, and may move in and
+ // out of registers, so those are handled elsewhere.
+ // Autos and named output params seem to get handled
+ // with VARDEF, which creates location lists.
+ // Args not of SSA-able type are treated here; they
+ // are homed on the stack in a single place for the
+ // entire call.
+ vars = append(vars, createSimpleVar(fnsym, n))
+ decls = append(decls, n)
+ continue
+ }
+ typename := dwarf.InfoPrefix + types.TypeSymName(n.Type())
+ decls = append(decls, n)
+ abbrev := dwarf.DW_ABRV_AUTO_LOCLIST
+ isReturnValue := (n.Class == ir.PPARAMOUT)
+ if n.Class == ir.PPARAM || n.Class == ir.PPARAMOUT {
+ abbrev = dwarf.DW_ABRV_PARAM_LOCLIST
+ }
+ if n.Esc() == ir.EscHeap {
+ // The variable in question has been promoted to the heap.
+ // Its address is in n.Heapaddr.
+ // TODO(thanm): generate a better location expression
+ }
+ inlIndex := 0
+ if base.Flag.GenDwarfInl > 1 {
+ if n.InlFormal() || n.InlLocal() {
+ inlIndex = posInlIndex(n.Pos()) + 1
+ if n.InlFormal() {
+ abbrev = dwarf.DW_ABRV_PARAM_LOCLIST
+ }
+ }
+ }
+ declpos := base.Ctxt.InnermostPos(n.Pos())
+ vars = append(vars, &dwarf.Var{
+ Name: n.Sym().Name,
+ IsReturnValue: isReturnValue,
+ Abbrev: abbrev,
+ StackOffset: int32(n.FrameOffset()),
+ Type: base.Ctxt.Lookup(typename),
+ DeclFile: declpos.RelFilename(),
+ DeclLine: declpos.RelLine(),
+ DeclCol: declpos.RelCol(),
+ InlIndex: int32(inlIndex),
+ ChildIndex: -1,
+ DictIndex: n.DictIndex,
+ })
+ // Record go type of to insure that it gets emitted by the linker.
+ fnsym.Func().RecordAutoType(reflectdata.TypeLinksym(n.Type()))
+ }
+
+ // Sort decls and vars.
+ sortDeclsAndVars(fn, decls, vars)
+
+ return decls, vars
+}
+
+// sortDeclsAndVars sorts the decl and dwarf var lists according to
+// parameter declaration order, so as to insure that when a subprogram
+// DIE is emitted, its parameter children appear in declaration order.
+// Prior to the advent of the register ABI, sorting by frame offset
+// would achieve this; with the register we now need to go back to the
+// original function signature.
+func sortDeclsAndVars(fn *ir.Func, decls []*ir.Name, vars []*dwarf.Var) {
+ paramOrder := make(map[*ir.Name]int)
+ idx := 1
+ for _, selfn := range types.RecvsParamsResults {
+ fsl := selfn(fn.Type()).FieldSlice()
+ for _, f := range fsl {
+ if n, ok := f.Nname.(*ir.Name); ok {
+ paramOrder[n] = idx
+ idx++
+ }
+ }
+ }
+ sort.Stable(varsAndDecls{decls, vars, paramOrder})
+}
+
+type varsAndDecls struct {
+ decls []*ir.Name
+ vars []*dwarf.Var
+ paramOrder map[*ir.Name]int
+}
+
+func (v varsAndDecls) Len() int {
+ return len(v.decls)
+}
+
+func (v varsAndDecls) Less(i, j int) bool {
+ nameLT := func(ni, nj *ir.Name) bool {
+ oi, foundi := v.paramOrder[ni]
+ oj, foundj := v.paramOrder[nj]
+ if foundi {
+ if foundj {
+ return oi < oj
+ } else {
+ return true
+ }
+ }
+ return false
+ }
+ return nameLT(v.decls[i], v.decls[j])
+}
+
+func (v varsAndDecls) Swap(i, j int) {
+ v.vars[i], v.vars[j] = v.vars[j], v.vars[i]
+ v.decls[i], v.decls[j] = v.decls[j], v.decls[i]
+}
+
+// Given a function that was inlined at some point during the
+// compilation, return a sorted list of nodes corresponding to the
+// autos/locals in that function prior to inlining. If this is a
+// function that is not local to the package being compiled, then the
+// names of the variables may have been "versioned" to avoid conflicts
+// with local vars; disregard this versioning when sorting.
+func preInliningDcls(fnsym *obj.LSym) []*ir.Name {
+ fn := base.Ctxt.DwFixups.GetPrecursorFunc(fnsym).(*ir.Func)
+ var rdcl []*ir.Name
+ for _, n := range fn.Inl.Dcl {
+ c := n.Sym().Name[0]
+ // Avoid reporting "_" parameters, since if there are more than
+ // one, it can result in a collision later on, as in #23179.
+ if unversion(n.Sym().Name) == "_" || c == '.' || n.Type().IsUntyped() {
+ continue
+ }
+ rdcl = append(rdcl, n)
+ }
+ return rdcl
+}
+
+// createSimpleVars creates a DWARF entry for every variable declared in the
+// function, claiming that they are permanently on the stack.
+func createSimpleVars(fnsym *obj.LSym, apDecls []*ir.Name) ([]*ir.Name, []*dwarf.Var, ir.NameSet) {
+ var vars []*dwarf.Var
+ var decls []*ir.Name
+ var selected ir.NameSet
+ for _, n := range apDecls {
+ if ir.IsAutoTmp(n) {
+ continue
+ }
+
+ decls = append(decls, n)
+ vars = append(vars, createSimpleVar(fnsym, n))
+ selected.Add(n)
+ }
+ return decls, vars, selected
+}
+
+func createSimpleVar(fnsym *obj.LSym, n *ir.Name) *dwarf.Var {
+ var abbrev int
+ var offs int64
+
+ localAutoOffset := func() int64 {
+ offs = n.FrameOffset()
+ if base.Ctxt.FixedFrameSize() == 0 {
+ offs -= int64(types.PtrSize)
+ }
+ if buildcfg.FramePointerEnabled {
+ offs -= int64(types.PtrSize)
+ }
+ return offs
+ }
+
+ switch n.Class {
+ case ir.PAUTO:
+ offs = localAutoOffset()
+ abbrev = dwarf.DW_ABRV_AUTO
+ case ir.PPARAM, ir.PPARAMOUT:
+ abbrev = dwarf.DW_ABRV_PARAM
+ if n.IsOutputParamInRegisters() {
+ offs = localAutoOffset()
+ } else {
+ offs = n.FrameOffset() + base.Ctxt.FixedFrameSize()
+ }
+
+ default:
+ base.Fatalf("createSimpleVar unexpected class %v for node %v", n.Class, n)
+ }
+
+ typename := dwarf.InfoPrefix + types.TypeSymName(n.Type())
+ delete(fnsym.Func().Autot, reflectdata.TypeLinksym(n.Type()))
+ inlIndex := 0
+ if base.Flag.GenDwarfInl > 1 {
+ if n.InlFormal() || n.InlLocal() {
+ inlIndex = posInlIndex(n.Pos()) + 1
+ if n.InlFormal() {
+ abbrev = dwarf.DW_ABRV_PARAM
+ }
+ }
+ }
+ declpos := base.Ctxt.InnermostPos(declPos(n))
+ return &dwarf.Var{
+ Name: n.Sym().Name,
+ IsReturnValue: n.Class == ir.PPARAMOUT,
+ IsInlFormal: n.InlFormal(),
+ Abbrev: abbrev,
+ StackOffset: int32(offs),
+ Type: base.Ctxt.Lookup(typename),
+ DeclFile: declpos.RelFilename(),
+ DeclLine: declpos.RelLine(),
+ DeclCol: declpos.RelCol(),
+ InlIndex: int32(inlIndex),
+ ChildIndex: -1,
+ DictIndex: n.DictIndex,
+ }
+}
+
+// createABIVars creates DWARF variables for functions in which the
+// register ABI is enabled but optimization is turned off. It uses a
+// hybrid approach in which register-resident input params are
+// captured with location lists, and all other vars use the "simple"
+// strategy.
+func createABIVars(fnsym *obj.LSym, fn *ir.Func, apDecls []*ir.Name) ([]*ir.Name, []*dwarf.Var, ir.NameSet) {
+
+ // Invoke createComplexVars to generate dwarf vars for input parameters
+ // that are register-allocated according to the ABI rules.
+ decls, vars, selected := createComplexVars(fnsym, fn)
+
+ // Now fill in the remainder of the variables: input parameters
+ // that are not register-resident, output parameters, and local
+ // variables.
+ for _, n := range apDecls {
+ if ir.IsAutoTmp(n) {
+ continue
+ }
+ if _, ok := selected[n]; ok {
+ // already handled
+ continue
+ }
+
+ decls = append(decls, n)
+ vars = append(vars, createSimpleVar(fnsym, n))
+ selected.Add(n)
+ }
+
+ return decls, vars, selected
+}
+
+// createComplexVars creates recomposed DWARF vars with location lists,
+// suitable for describing optimized code.
+func createComplexVars(fnsym *obj.LSym, fn *ir.Func) ([]*ir.Name, []*dwarf.Var, ir.NameSet) {
+ debugInfo := fn.DebugInfo.(*ssa.FuncDebug)
+
+ // Produce a DWARF variable entry for each user variable.
+ var decls []*ir.Name
+ var vars []*dwarf.Var
+ var ssaVars ir.NameSet
+
+ for varID, dvar := range debugInfo.Vars {
+ n := dvar
+ ssaVars.Add(n)
+ for _, slot := range debugInfo.VarSlots[varID] {
+ ssaVars.Add(debugInfo.Slots[slot].N)
+ }
+
+ if dvar := createComplexVar(fnsym, fn, ssa.VarID(varID)); dvar != nil {
+ decls = append(decls, n)
+ vars = append(vars, dvar)
+ }
+ }
+
+ return decls, vars, ssaVars
+}
+
+// createComplexVar builds a single DWARF variable entry and location list.
+func createComplexVar(fnsym *obj.LSym, fn *ir.Func, varID ssa.VarID) *dwarf.Var {
+ debug := fn.DebugInfo.(*ssa.FuncDebug)
+ n := debug.Vars[varID]
+
+ var abbrev int
+ switch n.Class {
+ case ir.PAUTO:
+ abbrev = dwarf.DW_ABRV_AUTO_LOCLIST
+ case ir.PPARAM, ir.PPARAMOUT:
+ abbrev = dwarf.DW_ABRV_PARAM_LOCLIST
+ default:
+ return nil
+ }
+
+ gotype := reflectdata.TypeLinksym(n.Type())
+ delete(fnsym.Func().Autot, gotype)
+ typename := dwarf.InfoPrefix + gotype.Name[len("type."):]
+ inlIndex := 0
+ if base.Flag.GenDwarfInl > 1 {
+ if n.InlFormal() || n.InlLocal() {
+ inlIndex = posInlIndex(n.Pos()) + 1
+ if n.InlFormal() {
+ abbrev = dwarf.DW_ABRV_PARAM_LOCLIST
+ }
+ }
+ }
+ declpos := base.Ctxt.InnermostPos(n.Pos())
+ dvar := &dwarf.Var{
+ Name: n.Sym().Name,
+ IsReturnValue: n.Class == ir.PPARAMOUT,
+ IsInlFormal: n.InlFormal(),
+ Abbrev: abbrev,
+ Type: base.Ctxt.Lookup(typename),
+ // The stack offset is used as a sorting key, so for decomposed
+ // variables just give it the first one. It's not used otherwise.
+ // This won't work well if the first slot hasn't been assigned a stack
+ // location, but it's not obvious how to do better.
+ StackOffset: ssagen.StackOffset(debug.Slots[debug.VarSlots[varID][0]]),
+ DeclFile: declpos.RelFilename(),
+ DeclLine: declpos.RelLine(),
+ DeclCol: declpos.RelCol(),
+ InlIndex: int32(inlIndex),
+ ChildIndex: -1,
+ DictIndex: n.DictIndex,
+ }
+ list := debug.LocationLists[varID]
+ if len(list) != 0 {
+ dvar.PutLocationList = func(listSym, startPC dwarf.Sym) {
+ debug.PutLocationList(list, base.Ctxt, listSym.(*obj.LSym), startPC.(*obj.LSym))
+ }
+ }
+ return dvar
+}
+
+// RecordFlags records the specified command-line flags to be placed
+// in the DWARF info.
+func RecordFlags(flags ...string) {
+ if base.Ctxt.Pkgpath == "" {
+ // We can't record the flags if we don't know what the
+ // package name is.
+ return
+ }
+
+ type BoolFlag interface {
+ IsBoolFlag() bool
+ }
+ type CountFlag interface {
+ IsCountFlag() bool
+ }
+ var cmd bytes.Buffer
+ for _, name := range flags {
+ f := flag.Lookup(name)
+ if f == nil {
+ continue
+ }
+ getter := f.Value.(flag.Getter)
+ if getter.String() == f.DefValue {
+ // Flag has default value, so omit it.
+ continue
+ }
+ if bf, ok := f.Value.(BoolFlag); ok && bf.IsBoolFlag() {
+ val, ok := getter.Get().(bool)
+ if ok && val {
+ fmt.Fprintf(&cmd, " -%s", f.Name)
+ continue
+ }
+ }
+ if cf, ok := f.Value.(CountFlag); ok && cf.IsCountFlag() {
+ val, ok := getter.Get().(int)
+ if ok && val == 1 {
+ fmt.Fprintf(&cmd, " -%s", f.Name)
+ continue
+ }
+ }
+ fmt.Fprintf(&cmd, " -%s=%v", f.Name, getter.Get())
+ }
+
+ // Adds flag to producer string singalling whether regabi is turned on or
+ // off.
+ // Once regabi is turned on across the board and the relative GOEXPERIMENT
+ // knobs no longer exist this code should be removed.
+ if buildcfg.Experiment.RegabiArgs {
+ cmd.Write([]byte(" regabi"))
+ }
+
+ if cmd.Len() == 0 {
+ return
+ }
+ s := base.Ctxt.Lookup(dwarf.CUInfoPrefix + "producer." + base.Ctxt.Pkgpath)
+ s.Type = objabi.SDWARFCUINFO
+ // Sometimes (for example when building tests) we can link
+ // together two package main archives. So allow dups.
+ s.Set(obj.AttrDuplicateOK, true)
+ base.Ctxt.Data = append(base.Ctxt.Data, s)
+ s.P = cmd.Bytes()[1:]
+}
+
+// RecordPackageName records the name of the package being
+// compiled, so that the linker can save it in the compile unit's DIE.
+func RecordPackageName() {
+ s := base.Ctxt.Lookup(dwarf.CUInfoPrefix + "packagename." + base.Ctxt.Pkgpath)
+ s.Type = objabi.SDWARFCUINFO
+ // Sometimes (for example when building tests) we can link
+ // together two package main archives. So allow dups.
+ s.Set(obj.AttrDuplicateOK, true)
+ base.Ctxt.Data = append(base.Ctxt.Data, s)
+ s.P = []byte(types.LocalPkg.Name)
+}
diff --git a/src/cmd/compile/internal/dwarfgen/dwinl.go b/src/cmd/compile/internal/dwarfgen/dwinl.go
new file mode 100644
index 0000000..c785e06
--- /dev/null
+++ b/src/cmd/compile/internal/dwarfgen/dwinl.go
@@ -0,0 +1,454 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package dwarfgen
+
+import (
+ "fmt"
+ "strings"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/internal/dwarf"
+ "cmd/internal/obj"
+ "cmd/internal/src"
+)
+
+// To identify variables by original source position.
+type varPos struct {
+ DeclName string
+ DeclFile string
+ DeclLine uint
+ DeclCol uint
+}
+
+// This is the main entry point for collection of raw material to
+// drive generation of DWARF "inlined subroutine" DIEs. See proposal
+// 22080 for more details and background info.
+func assembleInlines(fnsym *obj.LSym, dwVars []*dwarf.Var) dwarf.InlCalls {
+ var inlcalls dwarf.InlCalls
+
+ if base.Debug.DwarfInl != 0 {
+ base.Ctxt.Logf("assembling DWARF inlined routine info for %v\n", fnsym.Name)
+ }
+
+ // This maps inline index (from Ctxt.InlTree) to index in inlcalls.Calls
+ imap := make(map[int]int)
+
+ // Walk progs to build up the InlCalls data structure
+ var prevpos src.XPos
+ for p := fnsym.Func().Text; p != nil; p = p.Link {
+ if p.Pos == prevpos {
+ continue
+ }
+ ii := posInlIndex(p.Pos)
+ if ii >= 0 {
+ insertInlCall(&inlcalls, ii, imap)
+ }
+ prevpos = p.Pos
+ }
+
+ // This is used to partition DWARF vars by inline index. Vars not
+ // produced by the inliner will wind up in the vmap[0] entry.
+ vmap := make(map[int32][]*dwarf.Var)
+
+ // Now walk the dwarf vars and partition them based on whether they
+ // were produced by the inliner (dwv.InlIndex > 0) or were original
+ // vars/params from the function (dwv.InlIndex == 0).
+ for _, dwv := range dwVars {
+
+ vmap[dwv.InlIndex] = append(vmap[dwv.InlIndex], dwv)
+
+ // Zero index => var was not produced by an inline
+ if dwv.InlIndex == 0 {
+ continue
+ }
+
+ // Look up index in our map, then tack the var in question
+ // onto the vars list for the correct inlined call.
+ ii := int(dwv.InlIndex) - 1
+ idx, ok := imap[ii]
+ if !ok {
+ // We can occasionally encounter a var produced by the
+ // inliner for which there is no remaining prog; add a new
+ // entry to the call list in this scenario.
+ idx = insertInlCall(&inlcalls, ii, imap)
+ }
+ inlcalls.Calls[idx].InlVars =
+ append(inlcalls.Calls[idx].InlVars, dwv)
+ }
+
+ // Post process the map above to assign child indices to vars.
+ //
+ // A given variable is treated differently depending on whether it
+ // is part of the top-level function (ii == 0) or if it was
+ // produced as a result of an inline (ii != 0).
+ //
+ // If a variable was not produced by an inline and its containing
+ // function was not inlined, then we just assign an ordering of
+ // based on variable name.
+ //
+ // If a variable was not produced by an inline and its containing
+ // function was inlined, then we need to assign a child index
+ // based on the order of vars in the abstract function (in
+ // addition, those vars that don't appear in the abstract
+ // function, such as "~r1", are flagged as such).
+ //
+ // If a variable was produced by an inline, then we locate it in
+ // the pre-inlining decls for the target function and assign child
+ // index accordingly.
+ for ii, sl := range vmap {
+ var m map[varPos]int
+ if ii == 0 {
+ if !fnsym.WasInlined() {
+ for j, v := range sl {
+ v.ChildIndex = int32(j)
+ }
+ continue
+ }
+ m = makePreinlineDclMap(fnsym)
+ } else {
+ ifnlsym := base.Ctxt.InlTree.InlinedFunction(int(ii - 1))
+ m = makePreinlineDclMap(ifnlsym)
+ }
+
+ // Here we assign child indices to variables based on
+ // pre-inlined decls, and set the "IsInAbstract" flag
+ // appropriately. In addition: parameter and local variable
+ // names are given "middle dot" version numbers as part of the
+ // writing them out to export data (see issue 4326). If DWARF
+ // inlined routine generation is turned on, we want to undo
+ // this versioning, since DWARF variables in question will be
+ // parented by the inlined routine and not the top-level
+ // caller.
+ synthCount := len(m)
+ for _, v := range sl {
+ canonName := unversion(v.Name)
+ vp := varPos{
+ DeclName: canonName,
+ DeclFile: v.DeclFile,
+ DeclLine: v.DeclLine,
+ DeclCol: v.DeclCol,
+ }
+ synthesized := strings.HasPrefix(v.Name, "~r") || canonName == "_" || strings.HasPrefix(v.Name, "~b")
+ if idx, found := m[vp]; found {
+ v.ChildIndex = int32(idx)
+ v.IsInAbstract = !synthesized
+ v.Name = canonName
+ } else {
+ // Variable can't be found in the pre-inline dcl list.
+ // In the top-level case (ii=0) this can happen
+ // because a composite variable was split into pieces,
+ // and we're looking at a piece. We can also see
+ // return temps (~r%d) that were created during
+ // lowering, or unnamed params ("_").
+ v.ChildIndex = int32(synthCount)
+ synthCount++
+ }
+ }
+ }
+
+ // Make a second pass through the progs to compute PC ranges for
+ // the various inlined calls.
+ start := int64(-1)
+ curii := -1
+ var prevp *obj.Prog
+ for p := fnsym.Func().Text; p != nil; prevp, p = p, p.Link {
+ if prevp != nil && p.Pos == prevp.Pos {
+ continue
+ }
+ ii := posInlIndex(p.Pos)
+ if ii == curii {
+ continue
+ }
+ // Close out the current range
+ if start != -1 {
+ addRange(inlcalls.Calls, start, p.Pc, curii, imap)
+ }
+ // Begin new range
+ start = p.Pc
+ curii = ii
+ }
+ if start != -1 {
+ addRange(inlcalls.Calls, start, fnsym.Size, curii, imap)
+ }
+
+ // Issue 33188: if II foo is a child of II bar, then ensure that
+ // bar's ranges include the ranges of foo (the loop above will produce
+ // disjoint ranges).
+ for k, c := range inlcalls.Calls {
+ if c.Root {
+ unifyCallRanges(inlcalls, k)
+ }
+ }
+
+ // Debugging
+ if base.Debug.DwarfInl != 0 {
+ dumpInlCalls(inlcalls)
+ dumpInlVars(dwVars)
+ }
+
+ // Perform a consistency check on inlined routine PC ranges
+ // produced by unifyCallRanges above. In particular, complain in
+ // cases where you have A -> B -> C (e.g. C is inlined into B, and
+ // B is inlined into A) and the ranges for B are not enclosed
+ // within the ranges for A, or C within B.
+ for k, c := range inlcalls.Calls {
+ if c.Root {
+ checkInlCall(fnsym.Name, inlcalls, fnsym.Size, k, -1)
+ }
+ }
+
+ return inlcalls
+}
+
+// Secondary hook for DWARF inlined subroutine generation. This is called
+// late in the compilation when it is determined that we need an
+// abstract function DIE for an inlined routine imported from a
+// previously compiled package.
+func AbstractFunc(fn *obj.LSym) {
+ ifn := base.Ctxt.DwFixups.GetPrecursorFunc(fn)
+ if ifn == nil {
+ base.Ctxt.Diag("failed to locate precursor fn for %v", fn)
+ return
+ }
+ _ = ifn.(*ir.Func)
+ if base.Debug.DwarfInl != 0 {
+ base.Ctxt.Logf("DwarfAbstractFunc(%v)\n", fn.Name)
+ }
+ base.Ctxt.DwarfAbstractFunc(ifn, fn, base.Ctxt.Pkgpath)
+}
+
+// Undo any versioning performed when a name was written
+// out as part of export data.
+func unversion(name string) string {
+ if i := strings.Index(name, "·"); i > 0 {
+ name = name[:i]
+ }
+ return name
+}
+
+// Given a function that was inlined as part of the compilation, dig
+// up the pre-inlining DCL list for the function and create a map that
+// supports lookup of pre-inline dcl index, based on variable
+// position/name. NB: the recipe for computing variable pos/file/line
+// needs to be kept in sync with the similar code in gc.createSimpleVars
+// and related functions.
+func makePreinlineDclMap(fnsym *obj.LSym) map[varPos]int {
+ dcl := preInliningDcls(fnsym)
+ m := make(map[varPos]int)
+ for i, n := range dcl {
+ pos := base.Ctxt.InnermostPos(n.Pos())
+ vp := varPos{
+ DeclName: unversion(n.Sym().Name),
+ DeclFile: pos.RelFilename(),
+ DeclLine: pos.RelLine(),
+ DeclCol: pos.RelCol(),
+ }
+ if _, found := m[vp]; found {
+ // We can see collisions (variables with the same name/file/line/col) in obfuscated or machine-generated code -- see issue 44378 for an example. Skip duplicates in such cases, since it is unlikely that a human will be debugging such code.
+ continue
+ }
+ m[vp] = i
+ }
+ return m
+}
+
+func insertInlCall(dwcalls *dwarf.InlCalls, inlIdx int, imap map[int]int) int {
+ callIdx, found := imap[inlIdx]
+ if found {
+ return callIdx
+ }
+
+ // Haven't seen this inline yet. Visit parent of inline if there
+ // is one. We do this first so that parents appear before their
+ // children in the resulting table.
+ parCallIdx := -1
+ parInlIdx := base.Ctxt.InlTree.Parent(inlIdx)
+ if parInlIdx >= 0 {
+ parCallIdx = insertInlCall(dwcalls, parInlIdx, imap)
+ }
+
+ // Create new entry for this inline
+ inlinedFn := base.Ctxt.InlTree.InlinedFunction(inlIdx)
+ callXPos := base.Ctxt.InlTree.CallPos(inlIdx)
+ absFnSym := base.Ctxt.DwFixups.AbsFuncDwarfSym(inlinedFn)
+ pb := base.Ctxt.PosTable.Pos(callXPos).Base()
+ callFileSym := base.Ctxt.Lookup(pb.SymFilename())
+ ic := dwarf.InlCall{
+ InlIndex: inlIdx,
+ CallFile: callFileSym,
+ CallLine: uint32(callXPos.Line()),
+ AbsFunSym: absFnSym,
+ Root: parCallIdx == -1,
+ }
+ dwcalls.Calls = append(dwcalls.Calls, ic)
+ callIdx = len(dwcalls.Calls) - 1
+ imap[inlIdx] = callIdx
+
+ if parCallIdx != -1 {
+ // Add this inline to parent's child list
+ dwcalls.Calls[parCallIdx].Children = append(dwcalls.Calls[parCallIdx].Children, callIdx)
+ }
+
+ return callIdx
+}
+
+// Given a src.XPos, return its associated inlining index if it
+// corresponds to something created as a result of an inline, or -1 if
+// there is no inline info. Note that the index returned will refer to
+// the deepest call in the inlined stack, e.g. if you have "A calls B
+// calls C calls D" and all three callees are inlined (B, C, and D),
+// the index for a node from the inlined body of D will refer to the
+// call to D from C. Whew.
+func posInlIndex(xpos src.XPos) int {
+ pos := base.Ctxt.PosTable.Pos(xpos)
+ if b := pos.Base(); b != nil {
+ ii := b.InliningIndex()
+ if ii >= 0 {
+ return ii
+ }
+ }
+ return -1
+}
+
+func addRange(calls []dwarf.InlCall, start, end int64, ii int, imap map[int]int) {
+ if start == -1 {
+ panic("bad range start")
+ }
+ if end == -1 {
+ panic("bad range end")
+ }
+ if ii == -1 {
+ return
+ }
+ if start == end {
+ return
+ }
+ // Append range to correct inlined call
+ callIdx, found := imap[ii]
+ if !found {
+ base.Fatalf("can't find inlIndex %d in imap for prog at %d\n", ii, start)
+ }
+ call := &calls[callIdx]
+ call.Ranges = append(call.Ranges, dwarf.Range{Start: start, End: end})
+}
+
+func dumpInlCall(inlcalls dwarf.InlCalls, idx, ilevel int) {
+ for i := 0; i < ilevel; i++ {
+ base.Ctxt.Logf(" ")
+ }
+ ic := inlcalls.Calls[idx]
+ callee := base.Ctxt.InlTree.InlinedFunction(ic.InlIndex)
+ base.Ctxt.Logf(" %d: II:%d (%s) V: (", idx, ic.InlIndex, callee.Name)
+ for _, f := range ic.InlVars {
+ base.Ctxt.Logf(" %v", f.Name)
+ }
+ base.Ctxt.Logf(" ) C: (")
+ for _, k := range ic.Children {
+ base.Ctxt.Logf(" %v", k)
+ }
+ base.Ctxt.Logf(" ) R:")
+ for _, r := range ic.Ranges {
+ base.Ctxt.Logf(" [%d,%d)", r.Start, r.End)
+ }
+ base.Ctxt.Logf("\n")
+ for _, k := range ic.Children {
+ dumpInlCall(inlcalls, k, ilevel+1)
+ }
+
+}
+
+func dumpInlCalls(inlcalls dwarf.InlCalls) {
+ for k, c := range inlcalls.Calls {
+ if c.Root {
+ dumpInlCall(inlcalls, k, 0)
+ }
+ }
+}
+
+func dumpInlVars(dwvars []*dwarf.Var) {
+ for i, dwv := range dwvars {
+ typ := "local"
+ if dwv.Abbrev == dwarf.DW_ABRV_PARAM_LOCLIST || dwv.Abbrev == dwarf.DW_ABRV_PARAM {
+ typ = "param"
+ }
+ ia := 0
+ if dwv.IsInAbstract {
+ ia = 1
+ }
+ base.Ctxt.Logf("V%d: %s CI:%d II:%d IA:%d %s\n", i, dwv.Name, dwv.ChildIndex, dwv.InlIndex-1, ia, typ)
+ }
+}
+
+func rangesContains(par []dwarf.Range, rng dwarf.Range) (bool, string) {
+ for _, r := range par {
+ if rng.Start >= r.Start && rng.End <= r.End {
+ return true, ""
+ }
+ }
+ msg := fmt.Sprintf("range [%d,%d) not contained in {", rng.Start, rng.End)
+ for _, r := range par {
+ msg += fmt.Sprintf(" [%d,%d)", r.Start, r.End)
+ }
+ msg += " }"
+ return false, msg
+}
+
+func rangesContainsAll(parent, child []dwarf.Range) (bool, string) {
+ for _, r := range child {
+ c, m := rangesContains(parent, r)
+ if !c {
+ return false, m
+ }
+ }
+ return true, ""
+}
+
+// checkInlCall verifies that the PC ranges for inline info 'idx' are
+// enclosed/contained within the ranges of its parent inline (or if
+// this is a root/toplevel inline, checks that the ranges fall within
+// the extent of the top level function). A panic is issued if a
+// malformed range is found.
+func checkInlCall(funcName string, inlCalls dwarf.InlCalls, funcSize int64, idx, parentIdx int) {
+
+ // Callee
+ ic := inlCalls.Calls[idx]
+ callee := base.Ctxt.InlTree.InlinedFunction(ic.InlIndex).Name
+ calleeRanges := ic.Ranges
+
+ // Caller
+ caller := funcName
+ parentRanges := []dwarf.Range{dwarf.Range{Start: int64(0), End: funcSize}}
+ if parentIdx != -1 {
+ pic := inlCalls.Calls[parentIdx]
+ caller = base.Ctxt.InlTree.InlinedFunction(pic.InlIndex).Name
+ parentRanges = pic.Ranges
+ }
+
+ // Callee ranges contained in caller ranges?
+ c, m := rangesContainsAll(parentRanges, calleeRanges)
+ if !c {
+ base.Fatalf("** malformed inlined routine range in %s: caller %s callee %s II=%d %s\n", funcName, caller, callee, idx, m)
+ }
+
+ // Now visit kids
+ for _, k := range ic.Children {
+ checkInlCall(funcName, inlCalls, funcSize, k, idx)
+ }
+}
+
+// unifyCallRanges ensures that the ranges for a given inline
+// transitively include all of the ranges for its child inlines.
+func unifyCallRanges(inlcalls dwarf.InlCalls, idx int) {
+ ic := &inlcalls.Calls[idx]
+ for _, childIdx := range ic.Children {
+ // First make sure child ranges are unified.
+ unifyCallRanges(inlcalls, childIdx)
+
+ // Then merge child ranges into ranges for this inline.
+ cic := inlcalls.Calls[childIdx]
+ ic.Ranges = dwarf.MergeRanges(ic.Ranges, cic.Ranges)
+ }
+}
diff --git a/src/cmd/compile/internal/dwarfgen/marker.go b/src/cmd/compile/internal/dwarfgen/marker.go
new file mode 100644
index 0000000..ec6ce45
--- /dev/null
+++ b/src/cmd/compile/internal/dwarfgen/marker.go
@@ -0,0 +1,94 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package dwarfgen
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/internal/src"
+)
+
+// A ScopeMarker tracks scope nesting and boundaries for later use
+// during DWARF generation.
+type ScopeMarker struct {
+ parents []ir.ScopeID
+ marks []ir.Mark
+}
+
+// checkPos validates the given position and returns the current scope.
+func (m *ScopeMarker) checkPos(pos src.XPos) ir.ScopeID {
+ if !pos.IsKnown() {
+ base.Fatalf("unknown scope position")
+ }
+
+ if len(m.marks) == 0 {
+ return 0
+ }
+
+ last := &m.marks[len(m.marks)-1]
+ if xposBefore(pos, last.Pos) {
+ base.FatalfAt(pos, "non-monotonic scope positions\n\t%v: previous scope position", base.FmtPos(last.Pos))
+ }
+ return last.Scope
+}
+
+// Push records a transition to a new child scope of the current scope.
+func (m *ScopeMarker) Push(pos src.XPos) {
+ current := m.checkPos(pos)
+
+ m.parents = append(m.parents, current)
+ child := ir.ScopeID(len(m.parents))
+
+ m.marks = append(m.marks, ir.Mark{Pos: pos, Scope: child})
+}
+
+// Pop records a transition back to the current scope's parent.
+func (m *ScopeMarker) Pop(pos src.XPos) {
+ current := m.checkPos(pos)
+
+ parent := m.parents[current-1]
+
+ m.marks = append(m.marks, ir.Mark{Pos: pos, Scope: parent})
+}
+
+// Unpush removes the current scope, which must be empty.
+func (m *ScopeMarker) Unpush() {
+ i := len(m.marks) - 1
+ current := m.marks[i].Scope
+
+ if current != ir.ScopeID(len(m.parents)) {
+ base.FatalfAt(m.marks[i].Pos, "current scope is not empty")
+ }
+
+ m.parents = m.parents[:current-1]
+ m.marks = m.marks[:i]
+}
+
+// WriteTo writes the recorded scope marks to the given function,
+// and resets the marker for reuse.
+func (m *ScopeMarker) WriteTo(fn *ir.Func) {
+ m.compactMarks()
+
+ fn.Parents = make([]ir.ScopeID, len(m.parents))
+ copy(fn.Parents, m.parents)
+ m.parents = m.parents[:0]
+
+ fn.Marks = make([]ir.Mark, len(m.marks))
+ copy(fn.Marks, m.marks)
+ m.marks = m.marks[:0]
+}
+
+func (m *ScopeMarker) compactMarks() {
+ n := 0
+ for _, next := range m.marks {
+ if n > 0 && next.Pos == m.marks[n-1].Pos {
+ m.marks[n-1].Scope = next.Scope
+ continue
+ }
+ m.marks[n] = next
+ n++
+ }
+ m.marks = m.marks[:n]
+}
diff --git a/src/cmd/compile/internal/dwarfgen/scope.go b/src/cmd/compile/internal/dwarfgen/scope.go
new file mode 100644
index 0000000..b4ae69e
--- /dev/null
+++ b/src/cmd/compile/internal/dwarfgen/scope.go
@@ -0,0 +1,136 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package dwarfgen
+
+import (
+ "sort"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/internal/dwarf"
+ "cmd/internal/obj"
+ "cmd/internal/src"
+)
+
+// See golang.org/issue/20390.
+func xposBefore(p, q src.XPos) bool {
+ return base.Ctxt.PosTable.Pos(p).Before(base.Ctxt.PosTable.Pos(q))
+}
+
+func findScope(marks []ir.Mark, pos src.XPos) ir.ScopeID {
+ i := sort.Search(len(marks), func(i int) bool {
+ return xposBefore(pos, marks[i].Pos)
+ })
+ if i == 0 {
+ return 0
+ }
+ return marks[i-1].Scope
+}
+
+func assembleScopes(fnsym *obj.LSym, fn *ir.Func, dwarfVars []*dwarf.Var, varScopes []ir.ScopeID) []dwarf.Scope {
+ // Initialize the DWARF scope tree based on lexical scopes.
+ dwarfScopes := make([]dwarf.Scope, 1+len(fn.Parents))
+ for i, parent := range fn.Parents {
+ dwarfScopes[i+1].Parent = int32(parent)
+ }
+
+ scopeVariables(dwarfVars, varScopes, dwarfScopes, fnsym.ABI() != obj.ABI0)
+ if fnsym.Func().Text != nil {
+ scopePCs(fnsym, fn.Marks, dwarfScopes)
+ }
+ return compactScopes(dwarfScopes)
+}
+
+// scopeVariables assigns DWARF variable records to their scopes.
+func scopeVariables(dwarfVars []*dwarf.Var, varScopes []ir.ScopeID, dwarfScopes []dwarf.Scope, regabi bool) {
+ if regabi {
+ sort.Stable(varsByScope{dwarfVars, varScopes})
+ } else {
+ sort.Stable(varsByScopeAndOffset{dwarfVars, varScopes})
+ }
+
+ i0 := 0
+ for i := range dwarfVars {
+ if varScopes[i] == varScopes[i0] {
+ continue
+ }
+ dwarfScopes[varScopes[i0]].Vars = dwarfVars[i0:i]
+ i0 = i
+ }
+ if i0 < len(dwarfVars) {
+ dwarfScopes[varScopes[i0]].Vars = dwarfVars[i0:]
+ }
+}
+
+// scopePCs assigns PC ranges to their scopes.
+func scopePCs(fnsym *obj.LSym, marks []ir.Mark, dwarfScopes []dwarf.Scope) {
+ // If there aren't any child scopes (in particular, when scope
+ // tracking is disabled), we can skip a whole lot of work.
+ if len(marks) == 0 {
+ return
+ }
+ p0 := fnsym.Func().Text
+ scope := findScope(marks, p0.Pos)
+ for p := p0; p != nil; p = p.Link {
+ if p.Pos == p0.Pos {
+ continue
+ }
+ dwarfScopes[scope].AppendRange(dwarf.Range{Start: p0.Pc, End: p.Pc})
+ p0 = p
+ scope = findScope(marks, p0.Pos)
+ }
+ if p0.Pc < fnsym.Size {
+ dwarfScopes[scope].AppendRange(dwarf.Range{Start: p0.Pc, End: fnsym.Size})
+ }
+}
+
+func compactScopes(dwarfScopes []dwarf.Scope) []dwarf.Scope {
+ // Reverse pass to propagate PC ranges to parent scopes.
+ for i := len(dwarfScopes) - 1; i > 0; i-- {
+ s := &dwarfScopes[i]
+ dwarfScopes[s.Parent].UnifyRanges(s)
+ }
+
+ return dwarfScopes
+}
+
+type varsByScopeAndOffset struct {
+ vars []*dwarf.Var
+ scopes []ir.ScopeID
+}
+
+func (v varsByScopeAndOffset) Len() int {
+ return len(v.vars)
+}
+
+func (v varsByScopeAndOffset) Less(i, j int) bool {
+ if v.scopes[i] != v.scopes[j] {
+ return v.scopes[i] < v.scopes[j]
+ }
+ return v.vars[i].StackOffset < v.vars[j].StackOffset
+}
+
+func (v varsByScopeAndOffset) Swap(i, j int) {
+ v.vars[i], v.vars[j] = v.vars[j], v.vars[i]
+ v.scopes[i], v.scopes[j] = v.scopes[j], v.scopes[i]
+}
+
+type varsByScope struct {
+ vars []*dwarf.Var
+ scopes []ir.ScopeID
+}
+
+func (v varsByScope) Len() int {
+ return len(v.vars)
+}
+
+func (v varsByScope) Less(i, j int) bool {
+ return v.scopes[i] < v.scopes[j]
+}
+
+func (v varsByScope) Swap(i, j int) {
+ v.vars[i], v.vars[j] = v.vars[j], v.vars[i]
+ v.scopes[i], v.scopes[j] = v.scopes[j], v.scopes[i]
+}
diff --git a/src/cmd/compile/internal/dwarfgen/scope_test.go b/src/cmd/compile/internal/dwarfgen/scope_test.go
new file mode 100644
index 0000000..3df4c34
--- /dev/null
+++ b/src/cmd/compile/internal/dwarfgen/scope_test.go
@@ -0,0 +1,539 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package dwarfgen
+
+import (
+ "debug/dwarf"
+ "fmt"
+ "internal/testenv"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "runtime"
+ "sort"
+ "strconv"
+ "strings"
+ "testing"
+
+ "cmd/internal/objfile"
+)
+
+type testline struct {
+ // line is one line of go source
+ line string
+
+ // scopes is a list of scope IDs of all the lexical scopes that this line
+ // of code belongs to.
+ // Scope IDs are assigned by traversing the tree of lexical blocks of a
+ // function in pre-order
+ // Scope IDs are function specific, i.e. scope 0 is always the root scope
+ // of the function that this line belongs to. Empty scopes are not assigned
+ // an ID (because they are not saved in debug_info).
+ // Scope 0 is always omitted from this list since all lines always belong
+ // to it.
+ scopes []int
+
+ // vars is the list of variables that belong in scopes[len(scopes)-1].
+ // Local variables are prefixed with "var ", formal parameters with "arg ".
+ // Must be ordered alphabetically.
+ // Set to nil to skip the check.
+ vars []string
+
+ // decl is the list of variables declared at this line.
+ decl []string
+
+ // declBefore is the list of variables declared at or before this line.
+ declBefore []string
+}
+
+var testfile = []testline{
+ {line: "package main"},
+ {line: "func f1(x int) { }"},
+ {line: "func f2(x int) { }"},
+ {line: "func f3(x int) { }"},
+ {line: "func f4(x int) { }"},
+ {line: "func f5(x int) { }"},
+ {line: "func f6(x int) { }"},
+ {line: "func fi(x interface{}) { if a, ok := x.(error); ok { a.Error() } }"},
+ {line: "func gret1() int { return 2 }"},
+ {line: "func gretbool() bool { return true }"},
+ {line: "func gret3() (int, int, int) { return 0, 1, 2 }"},
+ {line: "var v = []int{ 0, 1, 2 }"},
+ {line: "var ch = make(chan int)"},
+ {line: "var floatch = make(chan float64)"},
+ {line: "var iface interface{}"},
+ {line: "func TestNestedFor() {", vars: []string{"var a int"}},
+ {line: " a := 0", decl: []string{"a"}},
+ {line: " f1(a)"},
+ {line: " for i := 0; i < 5; i++ {", scopes: []int{1}, vars: []string{"var i int"}, decl: []string{"i"}},
+ {line: " f2(i)", scopes: []int{1}},
+ {line: " for i := 0; i < 5; i++ {", scopes: []int{1, 2}, vars: []string{"var i int"}, decl: []string{"i"}},
+ {line: " f3(i)", scopes: []int{1, 2}},
+ {line: " }"},
+ {line: " f4(i)", scopes: []int{1}},
+ {line: " }"},
+ {line: " f5(a)"},
+ {line: "}"},
+ {line: "func TestOas2() {", vars: []string{}},
+ {line: " if a, b, c := gret3(); a != 1 {", scopes: []int{1}, vars: []string{"var a int", "var b int", "var c int"}},
+ {line: " f1(a)", scopes: []int{1}},
+ {line: " f1(b)", scopes: []int{1}},
+ {line: " f1(c)", scopes: []int{1}},
+ {line: " }"},
+ {line: " for i, x := range v {", scopes: []int{2}, vars: []string{"var i int", "var x int"}},
+ {line: " f1(i)", scopes: []int{2}},
+ {line: " f1(x)", scopes: []int{2}},
+ {line: " }"},
+ {line: " if a, ok := <- ch; ok {", scopes: []int{3}, vars: []string{"var a int", "var ok bool"}},
+ {line: " f1(a)", scopes: []int{3}},
+ {line: " }"},
+ {line: " if a, ok := iface.(int); ok {", scopes: []int{4}, vars: []string{"var a int", "var ok bool"}},
+ {line: " f1(a)", scopes: []int{4}},
+ {line: " }"},
+ {line: "}"},
+ {line: "func TestIfElse() {"},
+ {line: " if x := gret1(); x != 0 {", scopes: []int{1}, vars: []string{"var x int"}},
+ {line: " a := 0", scopes: []int{1, 2}, vars: []string{"var a int"}},
+ {line: " f1(a); f1(x)", scopes: []int{1, 2}},
+ {line: " } else {"},
+ {line: " b := 1", scopes: []int{1, 3}, vars: []string{"var b int"}},
+ {line: " f1(b); f1(x+1)", scopes: []int{1, 3}},
+ {line: " }"},
+ {line: "}"},
+ {line: "func TestSwitch() {", vars: []string{}},
+ {line: " switch x := gret1(); x {", scopes: []int{1}, vars: []string{"var x int"}},
+ {line: " case 0:", scopes: []int{1, 2}},
+ {line: " i := x + 5", scopes: []int{1, 2}, vars: []string{"var i int"}},
+ {line: " f1(x); f1(i)", scopes: []int{1, 2}},
+ {line: " case 1:", scopes: []int{1, 3}},
+ {line: " j := x + 10", scopes: []int{1, 3}, vars: []string{"var j int"}},
+ {line: " f1(x); f1(j)", scopes: []int{1, 3}},
+ {line: " case 2:", scopes: []int{1, 4}},
+ {line: " k := x + 2", scopes: []int{1, 4}, vars: []string{"var k int"}},
+ {line: " f1(x); f1(k)", scopes: []int{1, 4}},
+ {line: " }"},
+ {line: "}"},
+ {line: "func TestTypeSwitch() {", vars: []string{}},
+ {line: " switch x := iface.(type) {"},
+ {line: " case int:", scopes: []int{1}},
+ {line: " f1(x)", scopes: []int{1}, vars: []string{"var x int"}},
+ {line: " case uint8:", scopes: []int{2}},
+ {line: " f1(int(x))", scopes: []int{2}, vars: []string{"var x uint8"}},
+ {line: " case float64:", scopes: []int{3}},
+ {line: " f1(int(x)+1)", scopes: []int{3}, vars: []string{"var x float64"}},
+ {line: " }"},
+ {line: "}"},
+ {line: "func TestSelectScope() {"},
+ {line: " select {"},
+ {line: " case i := <- ch:", scopes: []int{1}},
+ {line: " f1(i)", scopes: []int{1}, vars: []string{"var i int"}},
+ {line: " case f := <- floatch:", scopes: []int{2}},
+ {line: " f1(int(f))", scopes: []int{2}, vars: []string{"var f float64"}},
+ {line: " }"},
+ {line: "}"},
+ {line: "func TestBlock() {", vars: []string{"var a int"}},
+ {line: " a := 1"},
+ {line: " {"},
+ {line: " b := 2", scopes: []int{1}, vars: []string{"var b int"}},
+ {line: " f1(b)", scopes: []int{1}},
+ {line: " f1(a)", scopes: []int{1}},
+ {line: " }"},
+ {line: "}"},
+ {line: "func TestDiscontiguousRanges() {", vars: []string{"var a int"}},
+ {line: " a := 0"},
+ {line: " f1(a)"},
+ {line: " {"},
+ {line: " b := 0", scopes: []int{1}, vars: []string{"var b int"}},
+ {line: " f2(b)", scopes: []int{1}},
+ {line: " if gretbool() {", scopes: []int{1}},
+ {line: " c := 0", scopes: []int{1, 2}, vars: []string{"var c int"}},
+ {line: " f3(c)", scopes: []int{1, 2}},
+ {line: " } else {"},
+ {line: " c := 1.1", scopes: []int{1, 3}, vars: []string{"var c float64"}},
+ {line: " f4(int(c))", scopes: []int{1, 3}},
+ {line: " }"},
+ {line: " f5(b)", scopes: []int{1}},
+ {line: " }"},
+ {line: " f6(a)"},
+ {line: "}"},
+ {line: "func TestClosureScope() {", vars: []string{"var a int", "var b int", "var f func(int)"}},
+ {line: " a := 1; b := 1"},
+ {line: " f := func(c int) {", scopes: []int{0}, vars: []string{"arg c int", "var &b *int", "var a int", "var d int"}, declBefore: []string{"&b", "a"}},
+ {line: " d := 3"},
+ {line: " f1(c); f1(d)"},
+ {line: " if e := 3; e != 0 {", scopes: []int{1}, vars: []string{"var e int"}},
+ {line: " f1(e)", scopes: []int{1}},
+ {line: " f1(a)", scopes: []int{1}},
+ {line: " b = 2", scopes: []int{1}},
+ {line: " }"},
+ {line: " }"},
+ {line: " f(3); f1(b)"},
+ {line: "}"},
+ {line: "func TestEscape() {"},
+ {line: " a := 1", vars: []string{"var a int"}},
+ {line: " {"},
+ {line: " b := 2", scopes: []int{1}, vars: []string{"var &b *int", "var p *int"}},
+ {line: " p := &b", scopes: []int{1}},
+ {line: " f1(a)", scopes: []int{1}},
+ {line: " fi(p)", scopes: []int{1}},
+ {line: " }"},
+ {line: "}"},
+ {line: "var fglob func() int"},
+ {line: "func TestCaptureVar(flag bool) {"},
+ {line: " a := 1", vars: []string{"arg flag bool", "var a int"}}, // TODO(register args) restore "arg ~r1 func() int",
+ {line: " if flag {"},
+ {line: " b := 2", scopes: []int{1}, vars: []string{"var b int", "var f func() int"}},
+ {line: " f := func() int {", scopes: []int{1, 0}},
+ {line: " return b + 1"},
+ {line: " }"},
+ {line: " fglob = f", scopes: []int{1}},
+ {line: " }"},
+ {line: " f1(a)"},
+ {line: "}"},
+ {line: "func main() {"},
+ {line: " TestNestedFor()"},
+ {line: " TestOas2()"},
+ {line: " TestIfElse()"},
+ {line: " TestSwitch()"},
+ {line: " TestTypeSwitch()"},
+ {line: " TestSelectScope()"},
+ {line: " TestBlock()"},
+ {line: " TestDiscontiguousRanges()"},
+ {line: " TestClosureScope()"},
+ {line: " TestEscape()"},
+ {line: " TestCaptureVar(true)"},
+ {line: "}"},
+}
+
+const detailOutput = false
+
+// Compiles testfile checks that the description of lexical blocks emitted
+// by the linker in debug_info, for each function in the main package,
+// corresponds to what we expect it to be.
+func TestScopeRanges(t *testing.T) {
+ testenv.MustHaveGoBuild(t)
+ t.Parallel()
+
+ if runtime.GOOS == "plan9" {
+ t.Skip("skipping on plan9; no DWARF symbol table in executables")
+ }
+
+ dir, err := ioutil.TempDir("", "TestScopeRanges")
+ if err != nil {
+ t.Fatalf("could not create directory: %v", err)
+ }
+ defer os.RemoveAll(dir)
+
+ src, f := gobuild(t, dir, false, testfile)
+ defer f.Close()
+
+ // the compiler uses forward slashes for paths even on windows
+ src = strings.Replace(src, "\\", "/", -1)
+
+ pcln, err := f.PCLineTable()
+ if err != nil {
+ t.Fatal(err)
+ }
+ dwarfData, err := f.DWARF()
+ if err != nil {
+ t.Fatal(err)
+ }
+ dwarfReader := dwarfData.Reader()
+
+ lines := make(map[line][]*lexblock)
+
+ for {
+ entry, err := dwarfReader.Next()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if entry == nil {
+ break
+ }
+
+ if entry.Tag != dwarf.TagSubprogram {
+ continue
+ }
+
+ name, ok := entry.Val(dwarf.AttrName).(string)
+ if !ok || !strings.HasPrefix(name, "main.Test") {
+ continue
+ }
+
+ var scope lexblock
+ ctxt := scopexplainContext{
+ dwarfData: dwarfData,
+ dwarfReader: dwarfReader,
+ scopegen: 1,
+ }
+
+ readScope(&ctxt, &scope, entry)
+
+ scope.markLines(pcln, lines)
+ }
+
+ anyerror := false
+ for i := range testfile {
+ tgt := testfile[i].scopes
+ out := lines[line{src, i + 1}]
+
+ if detailOutput {
+ t.Logf("%s // %v", testfile[i].line, out)
+ }
+
+ scopesok := checkScopes(tgt, out)
+ if !scopesok {
+ t.Logf("mismatch at line %d %q: expected: %v got: %v\n", i, testfile[i].line, tgt, scopesToString(out))
+ }
+
+ varsok := true
+ if testfile[i].vars != nil {
+ if len(out) > 0 {
+ varsok = checkVars(testfile[i].vars, out[len(out)-1].vars)
+ if !varsok {
+ t.Logf("variable mismatch at line %d %q for scope %d: expected: %v got: %v\n", i+1, testfile[i].line, out[len(out)-1].id, testfile[i].vars, out[len(out)-1].vars)
+ }
+ for j := range testfile[i].decl {
+ if line := declLineForVar(out[len(out)-1].vars, testfile[i].decl[j]); line != i+1 {
+ t.Errorf("wrong declaration line for variable %s, expected %d got: %d", testfile[i].decl[j], i+1, line)
+ }
+ }
+
+ for j := range testfile[i].declBefore {
+ if line := declLineForVar(out[len(out)-1].vars, testfile[i].declBefore[j]); line > i+1 {
+ t.Errorf("wrong declaration line for variable %s, expected %d (or less) got: %d", testfile[i].declBefore[j], i+1, line)
+ }
+ }
+ }
+ }
+
+ anyerror = anyerror || !scopesok || !varsok
+ }
+
+ if anyerror {
+ t.Fatalf("mismatched output")
+ }
+}
+
+func scopesToString(v []*lexblock) string {
+ r := make([]string, len(v))
+ for i, s := range v {
+ r[i] = strconv.Itoa(s.id)
+ }
+ return "[ " + strings.Join(r, ", ") + " ]"
+}
+
+func checkScopes(tgt []int, out []*lexblock) bool {
+ if len(out) > 0 {
+ // omit scope 0
+ out = out[1:]
+ }
+ if len(tgt) != len(out) {
+ return false
+ }
+ for i := range tgt {
+ if tgt[i] != out[i].id {
+ return false
+ }
+ }
+ return true
+}
+
+func checkVars(tgt []string, out []variable) bool {
+ if len(tgt) != len(out) {
+ return false
+ }
+ for i := range tgt {
+ if tgt[i] != out[i].expr {
+ return false
+ }
+ }
+ return true
+}
+
+func declLineForVar(scope []variable, name string) int {
+ for i := range scope {
+ if scope[i].name() == name {
+ return scope[i].declLine
+ }
+ }
+ return -1
+}
+
+type lexblock struct {
+ id int
+ ranges [][2]uint64
+ vars []variable
+ scopes []lexblock
+}
+
+type variable struct {
+ expr string
+ declLine int
+}
+
+func (v *variable) name() string {
+ return strings.Split(v.expr, " ")[1]
+}
+
+type line struct {
+ file string
+ lineno int
+}
+
+type scopexplainContext struct {
+ dwarfData *dwarf.Data
+ dwarfReader *dwarf.Reader
+ scopegen int
+}
+
+// readScope reads the DW_TAG_lexical_block or the DW_TAG_subprogram in
+// entry and writes a description in scope.
+// Nested DW_TAG_lexical_block entries are read recursively.
+func readScope(ctxt *scopexplainContext, scope *lexblock, entry *dwarf.Entry) {
+ var err error
+ scope.ranges, err = ctxt.dwarfData.Ranges(entry)
+ if err != nil {
+ panic(err)
+ }
+ for {
+ e, err := ctxt.dwarfReader.Next()
+ if err != nil {
+ panic(err)
+ }
+ switch e.Tag {
+ case 0:
+ sort.Slice(scope.vars, func(i, j int) bool {
+ return scope.vars[i].expr < scope.vars[j].expr
+ })
+ return
+ case dwarf.TagFormalParameter:
+ typ, err := ctxt.dwarfData.Type(e.Val(dwarf.AttrType).(dwarf.Offset))
+ if err != nil {
+ panic(err)
+ }
+ scope.vars = append(scope.vars, entryToVar(e, "arg", typ))
+ case dwarf.TagVariable:
+ typ, err := ctxt.dwarfData.Type(e.Val(dwarf.AttrType).(dwarf.Offset))
+ if err != nil {
+ panic(err)
+ }
+ scope.vars = append(scope.vars, entryToVar(e, "var", typ))
+ case dwarf.TagLexDwarfBlock:
+ scope.scopes = append(scope.scopes, lexblock{id: ctxt.scopegen})
+ ctxt.scopegen++
+ readScope(ctxt, &scope.scopes[len(scope.scopes)-1], e)
+ }
+ }
+}
+
+func entryToVar(e *dwarf.Entry, kind string, typ dwarf.Type) variable {
+ return variable{
+ fmt.Sprintf("%s %s %s", kind, e.Val(dwarf.AttrName).(string), typ.String()),
+ int(e.Val(dwarf.AttrDeclLine).(int64)),
+ }
+}
+
+// markLines marks all lines that belong to this scope with this scope
+// Recursively calls markLines for all children scopes.
+func (scope *lexblock) markLines(pcln objfile.Liner, lines map[line][]*lexblock) {
+ for _, r := range scope.ranges {
+ for pc := r[0]; pc < r[1]; pc++ {
+ file, lineno, _ := pcln.PCToLine(pc)
+ l := line{file, lineno}
+ if len(lines[l]) == 0 || lines[l][len(lines[l])-1] != scope {
+ lines[l] = append(lines[l], scope)
+ }
+ }
+ }
+
+ for i := range scope.scopes {
+ scope.scopes[i].markLines(pcln, lines)
+ }
+}
+
+func gobuild(t *testing.T, dir string, optimized bool, testfile []testline) (string, *objfile.File) {
+ src := filepath.Join(dir, "test.go")
+ dst := filepath.Join(dir, "out.o")
+
+ f, err := os.Create(src)
+ if err != nil {
+ t.Fatal(err)
+ }
+ for i := range testfile {
+ f.Write([]byte(testfile[i].line))
+ f.Write([]byte{'\n'})
+ }
+ f.Close()
+
+ args := []string{"build"}
+ if !optimized {
+ args = append(args, "-gcflags=-N -l")
+ }
+ args = append(args, "-o", dst, src)
+
+ cmd := exec.Command(testenv.GoToolPath(t), args...)
+ if b, err := cmd.CombinedOutput(); err != nil {
+ t.Logf("build: %s\n", string(b))
+ t.Fatal(err)
+ }
+
+ pkg, err := objfile.Open(dst)
+ if err != nil {
+ t.Fatal(err)
+ }
+ return src, pkg
+}
+
+// TestEmptyDwarfRanges tests that no list entry in debug_ranges has start == end.
+// See issue #23928.
+func TestEmptyDwarfRanges(t *testing.T) {
+ testenv.MustHaveGoRun(t)
+ t.Parallel()
+
+ if runtime.GOOS == "plan9" {
+ t.Skip("skipping on plan9; no DWARF symbol table in executables")
+ }
+
+ dir, err := ioutil.TempDir("", "TestEmptyDwarfRanges")
+ if err != nil {
+ t.Fatalf("could not create directory: %v", err)
+ }
+ defer os.RemoveAll(dir)
+
+ _, f := gobuild(t, dir, true, []testline{{line: "package main"}, {line: "func main(){ println(\"hello\") }"}})
+ defer f.Close()
+
+ dwarfData, err := f.DWARF()
+ if err != nil {
+ t.Fatal(err)
+ }
+ dwarfReader := dwarfData.Reader()
+
+ for {
+ entry, err := dwarfReader.Next()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if entry == nil {
+ break
+ }
+
+ ranges, err := dwarfData.Ranges(entry)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if ranges == nil {
+ continue
+ }
+
+ for _, rng := range ranges {
+ if rng[0] == rng[1] {
+ t.Errorf("range entry with start == end: %v", rng)
+ }
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/escape/assign.go b/src/cmd/compile/internal/escape/assign.go
new file mode 100644
index 0000000..80697bf
--- /dev/null
+++ b/src/cmd/compile/internal/escape/assign.go
@@ -0,0 +1,120 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package escape
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+)
+
+// addr evaluates an addressable expression n and returns a hole
+// that represents storing into the represented location.
+func (e *escape) addr(n ir.Node) hole {
+ if n == nil || ir.IsBlank(n) {
+ // Can happen in select case, range, maybe others.
+ return e.discardHole()
+ }
+
+ k := e.heapHole()
+
+ switch n.Op() {
+ default:
+ base.Fatalf("unexpected addr: %v", n)
+ case ir.ONAME:
+ n := n.(*ir.Name)
+ if n.Class == ir.PEXTERN {
+ break
+ }
+ k = e.oldLoc(n).asHole()
+ case ir.OLINKSYMOFFSET:
+ break
+ case ir.ODOT:
+ n := n.(*ir.SelectorExpr)
+ k = e.addr(n.X)
+ case ir.OINDEX:
+ n := n.(*ir.IndexExpr)
+ e.discard(n.Index)
+ if n.X.Type().IsArray() {
+ k = e.addr(n.X)
+ } else {
+ e.discard(n.X)
+ }
+ case ir.ODEREF, ir.ODOTPTR:
+ e.discard(n)
+ case ir.OINDEXMAP:
+ n := n.(*ir.IndexExpr)
+ e.discard(n.X)
+ e.assignHeap(n.Index, "key of map put", n)
+ }
+
+ return k
+}
+
+func (e *escape) addrs(l ir.Nodes) []hole {
+ var ks []hole
+ for _, n := range l {
+ ks = append(ks, e.addr(n))
+ }
+ return ks
+}
+
+func (e *escape) assignHeap(src ir.Node, why string, where ir.Node) {
+ e.expr(e.heapHole().note(where, why), src)
+}
+
+// assignList evaluates the assignment dsts... = srcs....
+func (e *escape) assignList(dsts, srcs []ir.Node, why string, where ir.Node) {
+ ks := e.addrs(dsts)
+ for i, k := range ks {
+ var src ir.Node
+ if i < len(srcs) {
+ src = srcs[i]
+ }
+
+ if dst := dsts[i]; dst != nil {
+ // Detect implicit conversion of uintptr to unsafe.Pointer when
+ // storing into reflect.{Slice,String}Header.
+ if dst.Op() == ir.ODOTPTR && ir.IsReflectHeaderDataField(dst) {
+ e.unsafeValue(e.heapHole().note(where, why), src)
+ continue
+ }
+
+ // Filter out some no-op assignments for escape analysis.
+ if src != nil && isSelfAssign(dst, src) {
+ if base.Flag.LowerM != 0 {
+ base.WarnfAt(where.Pos(), "%v ignoring self-assignment in %v", e.curfn, where)
+ }
+ k = e.discardHole()
+ }
+ }
+
+ e.expr(k.note(where, why), src)
+ }
+
+ e.reassigned(ks, where)
+}
+
+// reassigned marks the locations associated with the given holes as
+// reassigned, unless the location represents a variable declared and
+// assigned exactly once by where.
+func (e *escape) reassigned(ks []hole, where ir.Node) {
+ if as, ok := where.(*ir.AssignStmt); ok && as.Op() == ir.OAS && as.Y == nil {
+ if dst, ok := as.X.(*ir.Name); ok && dst.Op() == ir.ONAME && dst.Defn == nil {
+ // Zero-value assignment for variable declared without an
+ // explicit initial value. Assume this is its initialization
+ // statement.
+ return
+ }
+ }
+
+ for _, k := range ks {
+ loc := k.dst
+ // Variables declared by range statements are assigned on every iteration.
+ if n, ok := loc.n.(*ir.Name); ok && n.Defn == where && where.Op() != ir.ORANGE {
+ continue
+ }
+ loc.reassigned = true
+ }
+}
diff --git a/src/cmd/compile/internal/escape/call.go b/src/cmd/compile/internal/escape/call.go
new file mode 100644
index 0000000..ee76adb
--- /dev/null
+++ b/src/cmd/compile/internal/escape/call.go
@@ -0,0 +1,458 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package escape
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+)
+
+// call evaluates a call expressions, including builtin calls. ks
+// should contain the holes representing where the function callee's
+// results flows.
+func (e *escape) call(ks []hole, call ir.Node) {
+ var init ir.Nodes
+ e.callCommon(ks, call, &init, nil)
+ if len(init) != 0 {
+ call.(*ir.CallExpr).PtrInit().Append(init...)
+ }
+}
+
+func (e *escape) callCommon(ks []hole, call ir.Node, init *ir.Nodes, wrapper *ir.Func) {
+
+ // argumentPragma handles escape analysis of argument *argp to the
+ // given hole. If the function callee is known, pragma is the
+ // function's pragma flags; otherwise 0.
+ argumentFunc := func(fn *ir.Name, k hole, argp *ir.Node) {
+ e.rewriteArgument(argp, init, call, fn, wrapper)
+
+ e.expr(k.note(call, "call parameter"), *argp)
+ }
+
+ argument := func(k hole, argp *ir.Node) {
+ argumentFunc(nil, k, argp)
+ }
+
+ switch call.Op() {
+ default:
+ ir.Dump("esc", call)
+ base.Fatalf("unexpected call op: %v", call.Op())
+
+ case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER:
+ call := call.(*ir.CallExpr)
+ typecheck.FixVariadicCall(call)
+ typecheck.FixMethodCall(call)
+
+ // Pick out the function callee, if statically known.
+ //
+ // TODO(mdempsky): Change fn from *ir.Name to *ir.Func, but some
+ // functions (e.g., runtime builtins, method wrappers, generated
+ // eq/hash functions) don't have it set. Investigate whether
+ // that's a concern.
+ var fn *ir.Name
+ switch call.Op() {
+ case ir.OCALLFUNC:
+ // If we have a direct call to a closure (not just one we were
+ // able to statically resolve with ir.StaticValue), mark it as
+ // such so batch.outlives can optimize the flow results.
+ if call.X.Op() == ir.OCLOSURE {
+ call.X.(*ir.ClosureExpr).Func.SetClosureCalled(true)
+ }
+
+ switch v := ir.StaticValue(call.X); v.Op() {
+ case ir.ONAME:
+ if v := v.(*ir.Name); v.Class == ir.PFUNC {
+ fn = v
+ }
+ case ir.OCLOSURE:
+ fn = v.(*ir.ClosureExpr).Func.Nname
+ case ir.OMETHEXPR:
+ fn = ir.MethodExprName(v)
+ }
+ case ir.OCALLMETH:
+ base.FatalfAt(call.Pos(), "OCALLMETH missed by typecheck")
+ }
+
+ fntype := call.X.Type()
+ if fn != nil {
+ fntype = fn.Type()
+ }
+
+ if ks != nil && fn != nil && e.inMutualBatch(fn) {
+ for i, result := range fn.Type().Results().FieldSlice() {
+ e.expr(ks[i], ir.AsNode(result.Nname))
+ }
+ }
+
+ var recvp *ir.Node
+ if call.Op() == ir.OCALLFUNC {
+ // Evaluate callee function expression.
+ //
+ // Note: We use argument and not argumentFunc, because while
+ // call.X here may be an argument to runtime.{new,defer}proc,
+ // it's not an argument to fn itself.
+ argument(e.discardHole(), &call.X)
+ } else {
+ recvp = &call.X.(*ir.SelectorExpr).X
+ }
+
+ args := call.Args
+ if recv := fntype.Recv(); recv != nil {
+ if recvp == nil {
+ // Function call using method expression. Recevier argument is
+ // at the front of the regular arguments list.
+ recvp = &args[0]
+ args = args[1:]
+ }
+
+ argumentFunc(fn, e.tagHole(ks, fn, recv), recvp)
+ }
+
+ for i, param := range fntype.Params().FieldSlice() {
+ argumentFunc(fn, e.tagHole(ks, fn, param), &args[i])
+ }
+
+ case ir.OINLCALL:
+ call := call.(*ir.InlinedCallExpr)
+ e.stmts(call.Body)
+ for i, result := range call.ReturnVars {
+ k := e.discardHole()
+ if ks != nil {
+ k = ks[i]
+ }
+ e.expr(k, result)
+ }
+
+ case ir.OAPPEND:
+ call := call.(*ir.CallExpr)
+ args := call.Args
+
+ // Appendee slice may flow directly to the result, if
+ // it has enough capacity. Alternatively, a new heap
+ // slice might be allocated, and all slice elements
+ // might flow to heap.
+ appendeeK := ks[0]
+ if args[0].Type().Elem().HasPointers() {
+ appendeeK = e.teeHole(appendeeK, e.heapHole().deref(call, "appendee slice"))
+ }
+ argument(appendeeK, &args[0])
+
+ if call.IsDDD {
+ appendedK := e.discardHole()
+ if args[1].Type().IsSlice() && args[1].Type().Elem().HasPointers() {
+ appendedK = e.heapHole().deref(call, "appended slice...")
+ }
+ argument(appendedK, &args[1])
+ } else {
+ for i := 1; i < len(args); i++ {
+ argument(e.heapHole(), &args[i])
+ }
+ }
+
+ case ir.OCOPY:
+ call := call.(*ir.BinaryExpr)
+ argument(e.discardHole(), &call.X)
+
+ copiedK := e.discardHole()
+ if call.Y.Type().IsSlice() && call.Y.Type().Elem().HasPointers() {
+ copiedK = e.heapHole().deref(call, "copied slice")
+ }
+ argument(copiedK, &call.Y)
+
+ case ir.OPANIC:
+ call := call.(*ir.UnaryExpr)
+ argument(e.heapHole(), &call.X)
+
+ case ir.OCOMPLEX:
+ call := call.(*ir.BinaryExpr)
+ argument(e.discardHole(), &call.X)
+ argument(e.discardHole(), &call.Y)
+
+ case ir.ODELETE, ir.OPRINT, ir.OPRINTN, ir.ORECOVER:
+ call := call.(*ir.CallExpr)
+ fixRecoverCall(call)
+ for i := range call.Args {
+ argument(e.discardHole(), &call.Args[i])
+ }
+
+ case ir.OLEN, ir.OCAP, ir.OREAL, ir.OIMAG, ir.OCLOSE:
+ call := call.(*ir.UnaryExpr)
+ argument(e.discardHole(), &call.X)
+
+ case ir.OUNSAFEADD, ir.OUNSAFESLICE:
+ call := call.(*ir.BinaryExpr)
+ argument(ks[0], &call.X)
+ argument(e.discardHole(), &call.Y)
+ }
+}
+
+// goDeferStmt analyzes a "go" or "defer" statement.
+//
+// In the process, it also normalizes the statement to always use a
+// simple function call with no arguments and no results. For example,
+// it rewrites:
+//
+// defer f(x, y)
+//
+// into:
+//
+// x1, y1 := x, y
+// defer func() { f(x1, y1) }()
+func (e *escape) goDeferStmt(n *ir.GoDeferStmt) {
+ k := e.heapHole()
+ if n.Op() == ir.ODEFER && e.loopDepth == 1 {
+ // Top-level defer arguments don't escape to the heap,
+ // but they do need to last until they're invoked.
+ k = e.later(e.discardHole())
+
+ // force stack allocation of defer record, unless
+ // open-coded defers are used (see ssa.go)
+ n.SetEsc(ir.EscNever)
+ }
+
+ call := n.Call
+
+ init := n.PtrInit()
+ init.Append(ir.TakeInit(call)...)
+ e.stmts(*init)
+
+ // If the function is already a zero argument/result function call,
+ // just escape analyze it normally.
+ if call, ok := call.(*ir.CallExpr); ok && call.Op() == ir.OCALLFUNC {
+ if sig := call.X.Type(); sig.NumParams()+sig.NumResults() == 0 {
+ if clo, ok := call.X.(*ir.ClosureExpr); ok && n.Op() == ir.OGO {
+ clo.IsGoWrap = true
+ }
+ e.expr(k, call.X)
+ return
+ }
+ }
+
+ // Create a new no-argument function that we'll hand off to defer.
+ fn := ir.NewClosureFunc(n.Pos(), true)
+ fn.SetWrapper(true)
+ fn.Nname.SetType(types.NewSignature(types.LocalPkg, nil, nil, nil, nil))
+ fn.Body = []ir.Node{call}
+ if call, ok := call.(*ir.CallExpr); ok && call.Op() == ir.OCALLFUNC {
+ // If the callee is a named function, link to the original callee.
+ x := call.X
+ if x.Op() == ir.ONAME && x.(*ir.Name).Class == ir.PFUNC {
+ fn.WrappedFunc = call.X.(*ir.Name).Func
+ } else if x.Op() == ir.OMETHEXPR && ir.MethodExprFunc(x).Nname != nil {
+ fn.WrappedFunc = ir.MethodExprName(x).Func
+ }
+ }
+
+ clo := fn.OClosure
+ if n.Op() == ir.OGO {
+ clo.IsGoWrap = true
+ }
+
+ e.callCommon(nil, call, init, fn)
+ e.closures = append(e.closures, closure{e.spill(k, clo), clo})
+
+ // Create new top level call to closure.
+ n.Call = ir.NewCallExpr(call.Pos(), ir.OCALL, clo, nil)
+ ir.WithFunc(e.curfn, func() {
+ typecheck.Stmt(n.Call)
+ })
+}
+
+// rewriteArgument rewrites the argument *argp of the given call expression.
+// fn is the static callee function, if known.
+// wrapper is the go/defer wrapper function for call, if any.
+func (e *escape) rewriteArgument(argp *ir.Node, init *ir.Nodes, call ir.Node, fn *ir.Name, wrapper *ir.Func) {
+ var pragma ir.PragmaFlag
+ if fn != nil && fn.Func != nil {
+ pragma = fn.Func.Pragma
+ }
+
+ // unsafeUintptr rewrites "uintptr(ptr)" arguments to syscall-like
+ // functions, so that ptr is kept alive and/or escaped as
+ // appropriate. unsafeUintptr also reports whether it modified arg0.
+ unsafeUintptr := func(arg0 ir.Node) bool {
+ if pragma&(ir.UintptrKeepAlive|ir.UintptrEscapes) == 0 {
+ return false
+ }
+
+ // If the argument is really a pointer being converted to uintptr,
+ // arrange for the pointer to be kept alive until the call returns,
+ // by copying it into a temp and marking that temp
+ // still alive when we pop the temp stack.
+ if arg0.Op() != ir.OCONVNOP || !arg0.Type().IsUintptr() {
+ return false
+ }
+ arg := arg0.(*ir.ConvExpr)
+
+ if !arg.X.Type().IsUnsafePtr() {
+ return false
+ }
+
+ // Create and declare a new pointer-typed temp variable.
+ tmp := e.wrapExpr(arg.Pos(), &arg.X, init, call, wrapper)
+
+ if pragma&ir.UintptrEscapes != 0 {
+ e.flow(e.heapHole().note(arg, "//go:uintptrescapes"), e.oldLoc(tmp))
+ }
+
+ if pragma&ir.UintptrKeepAlive != 0 {
+ call := call.(*ir.CallExpr)
+
+ // SSA implements CallExpr.KeepAlive using OpVarLive, which
+ // doesn't support PAUTOHEAP variables. I tried changing it to
+ // use OpKeepAlive, but that ran into issues of its own.
+ // For now, the easy solution is to explicitly copy to (yet
+ // another) new temporary variable.
+ keep := tmp
+ if keep.Class == ir.PAUTOHEAP {
+ keep = e.copyExpr(arg.Pos(), tmp, call.PtrInit(), wrapper, false)
+ }
+
+ keep.SetAddrtaken(true) // ensure SSA keeps the tmp variable
+ call.KeepAlive = append(call.KeepAlive, keep)
+ }
+
+ return true
+ }
+
+ visit := func(pos src.XPos, argp *ir.Node) {
+ // Optimize a few common constant expressions. By leaving these
+ // untouched in the call expression, we let the wrapper handle
+ // evaluating them, rather than taking up closure context space.
+ switch arg := *argp; arg.Op() {
+ case ir.OLITERAL, ir.ONIL, ir.OMETHEXPR:
+ return
+ case ir.ONAME:
+ if arg.(*ir.Name).Class == ir.PFUNC {
+ return
+ }
+ }
+
+ if unsafeUintptr(*argp) {
+ return
+ }
+
+ if wrapper != nil {
+ e.wrapExpr(pos, argp, init, call, wrapper)
+ }
+ }
+
+ // Peel away any slice literals for better escape analyze
+ // them. For example:
+ //
+ // go F([]int{a, b})
+ //
+ // If F doesn't escape its arguments, then the slice can
+ // be allocated on the new goroutine's stack.
+ //
+ // For variadic functions, the compiler has already rewritten:
+ //
+ // f(a, b, c)
+ //
+ // to:
+ //
+ // f([]T{a, b, c}...)
+ //
+ // So we need to look into slice elements to handle uintptr(ptr)
+ // arguments to syscall-like functions correctly.
+ if arg := *argp; arg.Op() == ir.OSLICELIT {
+ list := arg.(*ir.CompLitExpr).List
+ for i := range list {
+ el := &list[i]
+ if list[i].Op() == ir.OKEY {
+ el = &list[i].(*ir.KeyExpr).Value
+ }
+ visit(arg.Pos(), el)
+ }
+ } else {
+ visit(call.Pos(), argp)
+ }
+}
+
+// wrapExpr replaces *exprp with a temporary variable copy. If wrapper
+// is non-nil, the variable will be captured for use within that
+// function.
+func (e *escape) wrapExpr(pos src.XPos, exprp *ir.Node, init *ir.Nodes, call ir.Node, wrapper *ir.Func) *ir.Name {
+ tmp := e.copyExpr(pos, *exprp, init, e.curfn, true)
+
+ if wrapper != nil {
+ // Currently for "defer i.M()" if i is nil it panics at the point
+ // of defer statement, not when deferred function is called. We
+ // need to do the nil check outside of the wrapper.
+ if call.Op() == ir.OCALLINTER && exprp == &call.(*ir.CallExpr).X.(*ir.SelectorExpr).X {
+ check := ir.NewUnaryExpr(pos, ir.OCHECKNIL, ir.NewUnaryExpr(pos, ir.OITAB, tmp))
+ init.Append(typecheck.Stmt(check))
+ }
+
+ e.oldLoc(tmp).captured = true
+
+ tmp = ir.NewClosureVar(pos, wrapper, tmp)
+ }
+
+ *exprp = tmp
+ return tmp
+}
+
+// copyExpr creates and returns a new temporary variable within fn;
+// appends statements to init to declare and initialize it to expr;
+// and escape analyzes the data flow if analyze is true.
+func (e *escape) copyExpr(pos src.XPos, expr ir.Node, init *ir.Nodes, fn *ir.Func, analyze bool) *ir.Name {
+ if ir.HasUniquePos(expr) {
+ pos = expr.Pos()
+ }
+
+ tmp := typecheck.TempAt(pos, fn, expr.Type())
+
+ stmts := []ir.Node{
+ ir.NewDecl(pos, ir.ODCL, tmp),
+ ir.NewAssignStmt(pos, tmp, expr),
+ }
+ typecheck.Stmts(stmts)
+ init.Append(stmts...)
+
+ if analyze {
+ e.newLoc(tmp, false)
+ e.stmts(stmts)
+ }
+
+ return tmp
+}
+
+// tagHole returns a hole for evaluating an argument passed to param.
+// ks should contain the holes representing where the function
+// callee's results flows. fn is the statically-known callee function,
+// if any.
+func (e *escape) tagHole(ks []hole, fn *ir.Name, param *types.Field) hole {
+ // If this is a dynamic call, we can't rely on param.Note.
+ if fn == nil {
+ return e.heapHole()
+ }
+
+ if e.inMutualBatch(fn) {
+ return e.addr(ir.AsNode(param.Nname))
+ }
+
+ // Call to previously tagged function.
+
+ var tagKs []hole
+
+ esc := parseLeaks(param.Note)
+ if x := esc.Heap(); x >= 0 {
+ tagKs = append(tagKs, e.heapHole().shift(x))
+ }
+
+ if ks != nil {
+ for i := 0; i < numEscResults; i++ {
+ if x := esc.Result(i); x >= 0 {
+ tagKs = append(tagKs, ks[i].shift(x))
+ }
+ }
+ }
+
+ return e.teeHole(tagKs...)
+}
diff --git a/src/cmd/compile/internal/escape/desugar.go b/src/cmd/compile/internal/escape/desugar.go
new file mode 100644
index 0000000..8b3cc25
--- /dev/null
+++ b/src/cmd/compile/internal/escape/desugar.go
@@ -0,0 +1,37 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package escape
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+)
+
+// TODO(mdempsky): Desugaring doesn't belong during escape analysis,
+// but for now it's the most convenient place for some rewrites.
+
+// fixRecoverCall rewrites an ORECOVER call into ORECOVERFP,
+// adding an explicit frame pointer argument.
+// If call is not an ORECOVER call, it's left unmodified.
+func fixRecoverCall(call *ir.CallExpr) {
+ if call.Op() != ir.ORECOVER {
+ return
+ }
+
+ pos := call.Pos()
+
+ // FP is equal to caller's SP plus FixedFrameSize().
+ var fp ir.Node = ir.NewCallExpr(pos, ir.OGETCALLERSP, nil, nil)
+ if off := base.Ctxt.FixedFrameSize(); off != 0 {
+ fp = ir.NewBinaryExpr(fp.Pos(), ir.OADD, fp, ir.NewInt(off))
+ }
+ // TODO(mdempsky): Replace *int32 with unsafe.Pointer, without upsetting checkptr.
+ fp = ir.NewConvExpr(pos, ir.OCONVNOP, types.NewPtr(types.Types[types.TINT32]), fp)
+
+ call.SetOp(ir.ORECOVERFP)
+ call.Args = []ir.Node{typecheck.Expr(fp)}
+}
diff --git a/src/cmd/compile/internal/escape/escape.go b/src/cmd/compile/internal/escape/escape.go
new file mode 100644
index 0000000..bc6f7c9
--- /dev/null
+++ b/src/cmd/compile/internal/escape/escape.go
@@ -0,0 +1,483 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package escape
+
+import (
+ "fmt"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/logopt"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+)
+
+// Escape analysis.
+//
+// Here we analyze functions to determine which Go variables
+// (including implicit allocations such as calls to "new" or "make",
+// composite literals, etc.) can be allocated on the stack. The two
+// key invariants we have to ensure are: (1) pointers to stack objects
+// cannot be stored in the heap, and (2) pointers to a stack object
+// cannot outlive that object (e.g., because the declaring function
+// returned and destroyed the object's stack frame, or its space is
+// reused across loop iterations for logically distinct variables).
+//
+// We implement this with a static data-flow analysis of the AST.
+// First, we construct a directed weighted graph where vertices
+// (termed "locations") represent variables allocated by statements
+// and expressions, and edges represent assignments between variables
+// (with weights representing addressing/dereference counts).
+//
+// Next we walk the graph looking for assignment paths that might
+// violate the invariants stated above. If a variable v's address is
+// stored in the heap or elsewhere that may outlive it, then v is
+// marked as requiring heap allocation.
+//
+// To support interprocedural analysis, we also record data-flow from
+// each function's parameters to the heap and to its result
+// parameters. This information is summarized as "parameter tags",
+// which are used at static call sites to improve escape analysis of
+// function arguments.
+
+// Constructing the location graph.
+//
+// Every allocating statement (e.g., variable declaration) or
+// expression (e.g., "new" or "make") is first mapped to a unique
+// "location."
+//
+// We also model every Go assignment as a directed edges between
+// locations. The number of dereference operations minus the number of
+// addressing operations is recorded as the edge's weight (termed
+// "derefs"). For example:
+//
+// p = &q // -1
+// p = q // 0
+// p = *q // 1
+// p = **q // 2
+//
+// p = **&**&q // 2
+//
+// Note that the & operator can only be applied to addressable
+// expressions, and the expression &x itself is not addressable, so
+// derefs cannot go below -1.
+//
+// Every Go language construct is lowered into this representation,
+// generally without sensitivity to flow, path, or context; and
+// without distinguishing elements within a compound variable. For
+// example:
+//
+// var x struct { f, g *int }
+// var u []*int
+//
+// x.f = u[0]
+//
+// is modeled simply as
+//
+// x = *u
+//
+// That is, we don't distinguish x.f from x.g, or u[0] from u[1],
+// u[2], etc. However, we do record the implicit dereference involved
+// in indexing a slice.
+
+// A batch holds escape analysis state that's shared across an entire
+// batch of functions being analyzed at once.
+type batch struct {
+ allLocs []*location
+ closures []closure
+
+ heapLoc location
+ blankLoc location
+}
+
+// A closure holds a closure expression and its spill hole (i.e.,
+// where the hole representing storing into its closure record).
+type closure struct {
+ k hole
+ clo *ir.ClosureExpr
+}
+
+// An escape holds state specific to a single function being analyzed
+// within a batch.
+type escape struct {
+ *batch
+
+ curfn *ir.Func // function being analyzed
+
+ labels map[*types.Sym]labelState // known labels
+
+ // loopDepth counts the current loop nesting depth within
+ // curfn. It increments within each "for" loop and at each
+ // label with a corresponding backwards "goto" (i.e.,
+ // unstructured loop).
+ loopDepth int
+}
+
+func Funcs(all []ir.Node) {
+ ir.VisitFuncsBottomUp(all, Batch)
+}
+
+// Batch performs escape analysis on a minimal batch of
+// functions.
+func Batch(fns []*ir.Func, recursive bool) {
+ for _, fn := range fns {
+ if fn.Op() != ir.ODCLFUNC {
+ base.Fatalf("unexpected node: %v", fn)
+ }
+ }
+
+ var b batch
+ b.heapLoc.escapes = true
+
+ // Construct data-flow graph from syntax trees.
+ for _, fn := range fns {
+ if base.Flag.W > 1 {
+ s := fmt.Sprintf("\nbefore escape %v", fn)
+ ir.Dump(s, fn)
+ }
+ b.initFunc(fn)
+ }
+ for _, fn := range fns {
+ if !fn.IsHiddenClosure() {
+ b.walkFunc(fn)
+ }
+ }
+
+ // We've walked the function bodies, so we've seen everywhere a
+ // variable might be reassigned or have it's address taken. Now we
+ // can decide whether closures should capture their free variables
+ // by value or reference.
+ for _, closure := range b.closures {
+ b.flowClosure(closure.k, closure.clo)
+ }
+ b.closures = nil
+
+ for _, loc := range b.allLocs {
+ if why := HeapAllocReason(loc.n); why != "" {
+ b.flow(b.heapHole().addr(loc.n, why), loc)
+ }
+ }
+
+ b.walkAll()
+ b.finish(fns)
+}
+
+func (b *batch) with(fn *ir.Func) *escape {
+ return &escape{
+ batch: b,
+ curfn: fn,
+ loopDepth: 1,
+ }
+}
+
+func (b *batch) initFunc(fn *ir.Func) {
+ e := b.with(fn)
+ if fn.Esc() != escFuncUnknown {
+ base.Fatalf("unexpected node: %v", fn)
+ }
+ fn.SetEsc(escFuncPlanned)
+ if base.Flag.LowerM > 3 {
+ ir.Dump("escAnalyze", fn)
+ }
+
+ // Allocate locations for local variables.
+ for _, n := range fn.Dcl {
+ e.newLoc(n, false)
+ }
+
+ // Also for hidden parameters (e.g., the ".this" parameter to a
+ // method value wrapper).
+ if fn.OClosure == nil {
+ for _, n := range fn.ClosureVars {
+ e.newLoc(n.Canonical(), false)
+ }
+ }
+
+ // Initialize resultIndex for result parameters.
+ for i, f := range fn.Type().Results().FieldSlice() {
+ e.oldLoc(f.Nname.(*ir.Name)).resultIndex = 1 + i
+ }
+}
+
+func (b *batch) walkFunc(fn *ir.Func) {
+ e := b.with(fn)
+ fn.SetEsc(escFuncStarted)
+
+ // Identify labels that mark the head of an unstructured loop.
+ ir.Visit(fn, func(n ir.Node) {
+ switch n.Op() {
+ case ir.OLABEL:
+ n := n.(*ir.LabelStmt)
+ if e.labels == nil {
+ e.labels = make(map[*types.Sym]labelState)
+ }
+ e.labels[n.Label] = nonlooping
+
+ case ir.OGOTO:
+ // If we visited the label before the goto,
+ // then this is a looping label.
+ n := n.(*ir.BranchStmt)
+ if e.labels[n.Label] == nonlooping {
+ e.labels[n.Label] = looping
+ }
+ }
+ })
+
+ e.block(fn.Body)
+
+ if len(e.labels) != 0 {
+ base.FatalfAt(fn.Pos(), "leftover labels after walkFunc")
+ }
+}
+
+func (b *batch) flowClosure(k hole, clo *ir.ClosureExpr) {
+ for _, cv := range clo.Func.ClosureVars {
+ n := cv.Canonical()
+ loc := b.oldLoc(cv)
+ if !loc.captured {
+ base.FatalfAt(cv.Pos(), "closure variable never captured: %v", cv)
+ }
+
+ // Capture by value for variables <= 128 bytes that are never reassigned.
+ n.SetByval(!loc.addrtaken && !loc.reassigned && n.Type().Size() <= 128)
+ if !n.Byval() {
+ n.SetAddrtaken(true)
+ if n.Sym().Name == typecheck.LocalDictName {
+ base.FatalfAt(n.Pos(), "dictionary variable not captured by value")
+ }
+ }
+
+ if base.Flag.LowerM > 1 {
+ how := "ref"
+ if n.Byval() {
+ how = "value"
+ }
+ base.WarnfAt(n.Pos(), "%v capturing by %s: %v (addr=%v assign=%v width=%d)", n.Curfn, how, n, loc.addrtaken, loc.reassigned, n.Type().Size())
+ }
+
+ // Flow captured variables to closure.
+ k := k
+ if !cv.Byval() {
+ k = k.addr(cv, "reference")
+ }
+ b.flow(k.note(cv, "captured by a closure"), loc)
+ }
+}
+
+func (b *batch) finish(fns []*ir.Func) {
+ // Record parameter tags for package export data.
+ for _, fn := range fns {
+ fn.SetEsc(escFuncTagged)
+
+ narg := 0
+ for _, fs := range &types.RecvsParams {
+ for _, f := range fs(fn.Type()).Fields().Slice() {
+ narg++
+ f.Note = b.paramTag(fn, narg, f)
+ }
+ }
+ }
+
+ for _, loc := range b.allLocs {
+ n := loc.n
+ if n == nil {
+ continue
+ }
+ if n.Op() == ir.ONAME {
+ n := n.(*ir.Name)
+ n.Opt = nil
+ }
+
+ // Update n.Esc based on escape analysis results.
+
+ // Omit escape diagnostics for go/defer wrappers, at least for now.
+ // Historically, we haven't printed them, and test cases don't expect them.
+ // TODO(mdempsky): Update tests to expect this.
+ goDeferWrapper := n.Op() == ir.OCLOSURE && n.(*ir.ClosureExpr).Func.Wrapper()
+
+ if n.Op() == ir.OCONVIDATA && n.(*ir.ConvExpr).NonEscaping {
+ // The allocation for the data word of an interface is known to not escape.
+ // See issue 50182.
+ // (But we do still need to process that allocation, as pointers inside
+ // the data word may escape.)
+ loc.escapes = false
+ }
+
+ if loc.escapes {
+ if n.Op() == ir.ONAME {
+ if base.Flag.CompilingRuntime {
+ base.ErrorfAt(n.Pos(), "%v escapes to heap, not allowed in runtime", n)
+ }
+ if base.Flag.LowerM != 0 {
+ base.WarnfAt(n.Pos(), "moved to heap: %v", n)
+ }
+ } else {
+ if base.Flag.LowerM != 0 && !goDeferWrapper {
+ base.WarnfAt(n.Pos(), "%v escapes to heap", n)
+ }
+ if logopt.Enabled() {
+ var e_curfn *ir.Func // TODO(mdempsky): Fix.
+ logopt.LogOpt(n.Pos(), "escape", "escape", ir.FuncName(e_curfn))
+ }
+ }
+ n.SetEsc(ir.EscHeap)
+ } else {
+ if base.Flag.LowerM != 0 && n.Op() != ir.ONAME && !goDeferWrapper {
+ base.WarnfAt(n.Pos(), "%v does not escape", n)
+ }
+ n.SetEsc(ir.EscNone)
+ if loc.transient {
+ switch n.Op() {
+ case ir.OCLOSURE:
+ n := n.(*ir.ClosureExpr)
+ n.SetTransient(true)
+ case ir.OMETHVALUE:
+ n := n.(*ir.SelectorExpr)
+ n.SetTransient(true)
+ case ir.OSLICELIT:
+ n := n.(*ir.CompLitExpr)
+ n.SetTransient(true)
+ }
+ }
+ }
+ }
+}
+
+// inMutualBatch reports whether function fn is in the batch of
+// mutually recursive functions being analyzed. When this is true,
+// fn has not yet been analyzed, so its parameters and results
+// should be incorporated directly into the flow graph instead of
+// relying on its escape analysis tagging.
+func (e *escape) inMutualBatch(fn *ir.Name) bool {
+ if fn.Defn != nil && fn.Defn.Esc() < escFuncTagged {
+ if fn.Defn.Esc() == escFuncUnknown {
+ base.Fatalf("graph inconsistency: %v", fn)
+ }
+ return true
+ }
+ return false
+}
+
+const (
+ escFuncUnknown = 0 + iota
+ escFuncPlanned
+ escFuncStarted
+ escFuncTagged
+)
+
+// Mark labels that have no backjumps to them as not increasing e.loopdepth.
+type labelState int
+
+const (
+ looping labelState = 1 + iota
+ nonlooping
+)
+
+func (b *batch) paramTag(fn *ir.Func, narg int, f *types.Field) string {
+ name := func() string {
+ if f.Sym != nil {
+ return f.Sym.Name
+ }
+ return fmt.Sprintf("arg#%d", narg)
+ }
+
+ // Only report diagnostics for user code;
+ // not for wrappers generated around them.
+ // TODO(mdempsky): Generalize this.
+ diagnose := base.Flag.LowerM != 0 && !(fn.Wrapper() || fn.Dupok())
+
+ if len(fn.Body) == 0 {
+ // Assume that uintptr arguments must be held live across the call.
+ // This is most important for syscall.Syscall.
+ // See golang.org/issue/13372.
+ // This really doesn't have much to do with escape analysis per se,
+ // but we are reusing the ability to annotate an individual function
+ // argument and pass those annotations along to importing code.
+ fn.Pragma |= ir.UintptrKeepAlive
+
+ if f.Type.IsUintptr() {
+ if diagnose {
+ base.WarnfAt(f.Pos, "assuming %v is unsafe uintptr", name())
+ }
+ return ""
+ }
+
+ if !f.Type.HasPointers() { // don't bother tagging for scalars
+ return ""
+ }
+
+ var esc leaks
+
+ // External functions are assumed unsafe, unless
+ // //go:noescape is given before the declaration.
+ if fn.Pragma&ir.Noescape != 0 {
+ if diagnose && f.Sym != nil {
+ base.WarnfAt(f.Pos, "%v does not escape", name())
+ }
+ } else {
+ if diagnose && f.Sym != nil {
+ base.WarnfAt(f.Pos, "leaking param: %v", name())
+ }
+ esc.AddHeap(0)
+ }
+
+ return esc.Encode()
+ }
+
+ if fn.Pragma&ir.UintptrEscapes != 0 {
+ fn.Pragma |= ir.UintptrKeepAlive
+
+ if f.Type.IsUintptr() {
+ if diagnose {
+ base.WarnfAt(f.Pos, "marking %v as escaping uintptr", name())
+ }
+ return ""
+ }
+ if f.IsDDD() && f.Type.Elem().IsUintptr() {
+ // final argument is ...uintptr.
+ if diagnose {
+ base.WarnfAt(f.Pos, "marking %v as escaping ...uintptr", name())
+ }
+ return ""
+ }
+ }
+
+ if !f.Type.HasPointers() { // don't bother tagging for scalars
+ return ""
+ }
+
+ // Unnamed parameters are unused and therefore do not escape.
+ if f.Sym == nil || f.Sym.IsBlank() {
+ var esc leaks
+ return esc.Encode()
+ }
+
+ n := f.Nname.(*ir.Name)
+ loc := b.oldLoc(n)
+ esc := loc.paramEsc
+ esc.Optimize()
+
+ if diagnose && !loc.escapes {
+ if esc.Empty() {
+ base.WarnfAt(f.Pos, "%v does not escape", name())
+ }
+ if x := esc.Heap(); x >= 0 {
+ if x == 0 {
+ base.WarnfAt(f.Pos, "leaking param: %v", name())
+ } else {
+ // TODO(mdempsky): Mention level=x like below?
+ base.WarnfAt(f.Pos, "leaking param content: %v", name())
+ }
+ }
+ for i := 0; i < numEscResults; i++ {
+ if x := esc.Result(i); x >= 0 {
+ res := fn.Type().Results().Field(i).Sym
+ base.WarnfAt(f.Pos, "leaking param: %v to result %v level=%d", name(), res, x)
+ }
+ }
+ }
+
+ return esc.Encode()
+}
diff --git a/src/cmd/compile/internal/escape/expr.go b/src/cmd/compile/internal/escape/expr.go
new file mode 100644
index 0000000..ced90a4
--- /dev/null
+++ b/src/cmd/compile/internal/escape/expr.go
@@ -0,0 +1,335 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package escape
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/types"
+)
+
+// expr models evaluating an expression n and flowing the result into
+// hole k.
+func (e *escape) expr(k hole, n ir.Node) {
+ if n == nil {
+ return
+ }
+ e.stmts(n.Init())
+ e.exprSkipInit(k, n)
+}
+
+func (e *escape) exprSkipInit(k hole, n ir.Node) {
+ if n == nil {
+ return
+ }
+
+ lno := ir.SetPos(n)
+ defer func() {
+ base.Pos = lno
+ }()
+
+ if k.derefs >= 0 && !n.Type().IsUntyped() && !n.Type().HasPointers() {
+ k.dst = &e.blankLoc
+ }
+
+ switch n.Op() {
+ default:
+ base.Fatalf("unexpected expr: %s %v", n.Op().String(), n)
+
+ case ir.OLITERAL, ir.ONIL, ir.OGETG, ir.OGETCALLERPC, ir.OGETCALLERSP, ir.OTYPE, ir.OMETHEXPR, ir.OLINKSYMOFFSET:
+ // nop
+
+ case ir.ONAME:
+ n := n.(*ir.Name)
+ if n.Class == ir.PFUNC || n.Class == ir.PEXTERN {
+ return
+ }
+ e.flow(k, e.oldLoc(n))
+
+ case ir.OPLUS, ir.ONEG, ir.OBITNOT, ir.ONOT:
+ n := n.(*ir.UnaryExpr)
+ e.discard(n.X)
+ case ir.OADD, ir.OSUB, ir.OOR, ir.OXOR, ir.OMUL, ir.ODIV, ir.OMOD, ir.OLSH, ir.ORSH, ir.OAND, ir.OANDNOT, ir.OEQ, ir.ONE, ir.OLT, ir.OLE, ir.OGT, ir.OGE:
+ n := n.(*ir.BinaryExpr)
+ e.discard(n.X)
+ e.discard(n.Y)
+ case ir.OANDAND, ir.OOROR:
+ n := n.(*ir.LogicalExpr)
+ e.discard(n.X)
+ e.discard(n.Y)
+ case ir.OADDR:
+ n := n.(*ir.AddrExpr)
+ e.expr(k.addr(n, "address-of"), n.X) // "address-of"
+ case ir.ODEREF:
+ n := n.(*ir.StarExpr)
+ e.expr(k.deref(n, "indirection"), n.X) // "indirection"
+ case ir.ODOT, ir.ODOTMETH, ir.ODOTINTER:
+ n := n.(*ir.SelectorExpr)
+ e.expr(k.note(n, "dot"), n.X)
+ case ir.ODOTPTR:
+ n := n.(*ir.SelectorExpr)
+ e.expr(k.deref(n, "dot of pointer"), n.X) // "dot of pointer"
+ case ir.ODOTTYPE, ir.ODOTTYPE2:
+ n := n.(*ir.TypeAssertExpr)
+ e.expr(k.dotType(n.Type(), n, "dot"), n.X)
+ case ir.ODYNAMICDOTTYPE, ir.ODYNAMICDOTTYPE2:
+ n := n.(*ir.DynamicTypeAssertExpr)
+ e.expr(k.dotType(n.Type(), n, "dot"), n.X)
+ // n.T doesn't need to be tracked; it always points to read-only storage.
+ case ir.OINDEX:
+ n := n.(*ir.IndexExpr)
+ if n.X.Type().IsArray() {
+ e.expr(k.note(n, "fixed-array-index-of"), n.X)
+ } else {
+ // TODO(mdempsky): Fix why reason text.
+ e.expr(k.deref(n, "dot of pointer"), n.X)
+ }
+ e.discard(n.Index)
+ case ir.OINDEXMAP:
+ n := n.(*ir.IndexExpr)
+ e.discard(n.X)
+ e.discard(n.Index)
+ case ir.OSLICE, ir.OSLICEARR, ir.OSLICE3, ir.OSLICE3ARR, ir.OSLICESTR:
+ n := n.(*ir.SliceExpr)
+ e.expr(k.note(n, "slice"), n.X)
+ e.discard(n.Low)
+ e.discard(n.High)
+ e.discard(n.Max)
+
+ case ir.OCONV, ir.OCONVNOP:
+ n := n.(*ir.ConvExpr)
+ if ir.ShouldCheckPtr(e.curfn, 2) && n.Type().IsUnsafePtr() && n.X.Type().IsPtr() {
+ // When -d=checkptr=2 is enabled, treat
+ // conversions to unsafe.Pointer as an
+ // escaping operation. This allows better
+ // runtime instrumentation, since we can more
+ // easily detect object boundaries on the heap
+ // than the stack.
+ e.assignHeap(n.X, "conversion to unsafe.Pointer", n)
+ } else if n.Type().IsUnsafePtr() && n.X.Type().IsUintptr() {
+ e.unsafeValue(k, n.X)
+ } else {
+ e.expr(k, n.X)
+ }
+ case ir.OCONVIFACE, ir.OCONVIDATA:
+ n := n.(*ir.ConvExpr)
+ if !n.X.Type().IsInterface() && !types.IsDirectIface(n.X.Type()) {
+ k = e.spill(k, n)
+ }
+ e.expr(k.note(n, "interface-converted"), n.X)
+ case ir.OEFACE:
+ n := n.(*ir.BinaryExpr)
+ // Note: n.X is not needed because it can never point to memory that might escape.
+ e.expr(k, n.Y)
+ case ir.OIDATA, ir.OSPTR:
+ n := n.(*ir.UnaryExpr)
+ e.expr(k, n.X)
+ case ir.OSLICE2ARRPTR:
+ // the slice pointer flows directly to the result
+ n := n.(*ir.ConvExpr)
+ e.expr(k, n.X)
+ case ir.ORECV:
+ n := n.(*ir.UnaryExpr)
+ e.discard(n.X)
+
+ case ir.OCALLMETH, ir.OCALLFUNC, ir.OCALLINTER, ir.OINLCALL, ir.OLEN, ir.OCAP, ir.OCOMPLEX, ir.OREAL, ir.OIMAG, ir.OAPPEND, ir.OCOPY, ir.ORECOVER, ir.OUNSAFEADD, ir.OUNSAFESLICE:
+ e.call([]hole{k}, n)
+
+ case ir.ONEW:
+ n := n.(*ir.UnaryExpr)
+ e.spill(k, n)
+
+ case ir.OMAKESLICE:
+ n := n.(*ir.MakeExpr)
+ e.spill(k, n)
+ e.discard(n.Len)
+ e.discard(n.Cap)
+ case ir.OMAKECHAN:
+ n := n.(*ir.MakeExpr)
+ e.discard(n.Len)
+ case ir.OMAKEMAP:
+ n := n.(*ir.MakeExpr)
+ e.spill(k, n)
+ e.discard(n.Len)
+
+ case ir.OMETHVALUE:
+ // Flow the receiver argument to both the closure and
+ // to the receiver parameter.
+
+ n := n.(*ir.SelectorExpr)
+ closureK := e.spill(k, n)
+
+ m := n.Selection
+
+ // We don't know how the method value will be called
+ // later, so conservatively assume the result
+ // parameters all flow to the heap.
+ //
+ // TODO(mdempsky): Change ks into a callback, so that
+ // we don't have to create this slice?
+ var ks []hole
+ for i := m.Type.NumResults(); i > 0; i-- {
+ ks = append(ks, e.heapHole())
+ }
+ name, _ := m.Nname.(*ir.Name)
+ paramK := e.tagHole(ks, name, m.Type.Recv())
+
+ e.expr(e.teeHole(paramK, closureK), n.X)
+
+ case ir.OPTRLIT:
+ n := n.(*ir.AddrExpr)
+ e.expr(e.spill(k, n), n.X)
+
+ case ir.OARRAYLIT:
+ n := n.(*ir.CompLitExpr)
+ for _, elt := range n.List {
+ if elt.Op() == ir.OKEY {
+ elt = elt.(*ir.KeyExpr).Value
+ }
+ e.expr(k.note(n, "array literal element"), elt)
+ }
+
+ case ir.OSLICELIT:
+ n := n.(*ir.CompLitExpr)
+ k = e.spill(k, n)
+
+ for _, elt := range n.List {
+ if elt.Op() == ir.OKEY {
+ elt = elt.(*ir.KeyExpr).Value
+ }
+ e.expr(k.note(n, "slice-literal-element"), elt)
+ }
+
+ case ir.OSTRUCTLIT:
+ n := n.(*ir.CompLitExpr)
+ for _, elt := range n.List {
+ e.expr(k.note(n, "struct literal element"), elt.(*ir.StructKeyExpr).Value)
+ }
+
+ case ir.OMAPLIT:
+ n := n.(*ir.CompLitExpr)
+ e.spill(k, n)
+
+ // Map keys and values are always stored in the heap.
+ for _, elt := range n.List {
+ elt := elt.(*ir.KeyExpr)
+ e.assignHeap(elt.Key, "map literal key", n)
+ e.assignHeap(elt.Value, "map literal value", n)
+ }
+
+ case ir.OCLOSURE:
+ n := n.(*ir.ClosureExpr)
+ k = e.spill(k, n)
+ e.closures = append(e.closures, closure{k, n})
+
+ if fn := n.Func; fn.IsHiddenClosure() {
+ for _, cv := range fn.ClosureVars {
+ if loc := e.oldLoc(cv); !loc.captured {
+ loc.captured = true
+
+ // Ignore reassignments to the variable in straightline code
+ // preceding the first capture by a closure.
+ if loc.loopDepth == e.loopDepth {
+ loc.reassigned = false
+ }
+ }
+ }
+
+ for _, n := range fn.Dcl {
+ // Add locations for local variables of the
+ // closure, if needed, in case we're not including
+ // the closure func in the batch for escape
+ // analysis (happens for escape analysis called
+ // from reflectdata.methodWrapper)
+ if n.Op() == ir.ONAME && n.Opt == nil {
+ e.with(fn).newLoc(n, false)
+ }
+ }
+ e.walkFunc(fn)
+ }
+
+ case ir.ORUNES2STR, ir.OBYTES2STR, ir.OSTR2RUNES, ir.OSTR2BYTES, ir.ORUNESTR:
+ n := n.(*ir.ConvExpr)
+ e.spill(k, n)
+ e.discard(n.X)
+
+ case ir.OADDSTR:
+ n := n.(*ir.AddStringExpr)
+ e.spill(k, n)
+
+ // Arguments of OADDSTR never escape;
+ // runtime.concatstrings makes sure of that.
+ e.discards(n.List)
+
+ case ir.ODYNAMICTYPE:
+ // Nothing to do - argument is a *runtime._type (+ maybe a *runtime.itab) pointing to static data section
+ }
+}
+
+// unsafeValue evaluates a uintptr-typed arithmetic expression looking
+// for conversions from an unsafe.Pointer.
+func (e *escape) unsafeValue(k hole, n ir.Node) {
+ if n.Type().Kind() != types.TUINTPTR {
+ base.Fatalf("unexpected type %v for %v", n.Type(), n)
+ }
+ if k.addrtaken {
+ base.Fatalf("unexpected addrtaken")
+ }
+
+ e.stmts(n.Init())
+
+ switch n.Op() {
+ case ir.OCONV, ir.OCONVNOP:
+ n := n.(*ir.ConvExpr)
+ if n.X.Type().IsUnsafePtr() {
+ e.expr(k, n.X)
+ } else {
+ e.discard(n.X)
+ }
+ case ir.ODOTPTR:
+ n := n.(*ir.SelectorExpr)
+ if ir.IsReflectHeaderDataField(n) {
+ e.expr(k.deref(n, "reflect.Header.Data"), n.X)
+ } else {
+ e.discard(n.X)
+ }
+ case ir.OPLUS, ir.ONEG, ir.OBITNOT:
+ n := n.(*ir.UnaryExpr)
+ e.unsafeValue(k, n.X)
+ case ir.OADD, ir.OSUB, ir.OOR, ir.OXOR, ir.OMUL, ir.ODIV, ir.OMOD, ir.OAND, ir.OANDNOT:
+ n := n.(*ir.BinaryExpr)
+ e.unsafeValue(k, n.X)
+ e.unsafeValue(k, n.Y)
+ case ir.OLSH, ir.ORSH:
+ n := n.(*ir.BinaryExpr)
+ e.unsafeValue(k, n.X)
+ // RHS need not be uintptr-typed (#32959) and can't meaningfully
+ // flow pointers anyway.
+ e.discard(n.Y)
+ default:
+ e.exprSkipInit(e.discardHole(), n)
+ }
+}
+
+// discard evaluates an expression n for side-effects, but discards
+// its value.
+func (e *escape) discard(n ir.Node) {
+ e.expr(e.discardHole(), n)
+}
+
+func (e *escape) discards(l ir.Nodes) {
+ for _, n := range l {
+ e.discard(n)
+ }
+}
+
+// spill allocates a new location associated with expression n, flows
+// its address to k, and returns a hole that flows values to it. It's
+// intended for use with most expressions that allocate storage.
+func (e *escape) spill(k hole, n ir.Node) hole {
+ loc := e.newLoc(n, true)
+ e.flow(k.addr(n, "spill"), loc)
+ return loc.asHole()
+}
diff --git a/src/cmd/compile/internal/escape/graph.go b/src/cmd/compile/internal/escape/graph.go
new file mode 100644
index 0000000..cc3d078
--- /dev/null
+++ b/src/cmd/compile/internal/escape/graph.go
@@ -0,0 +1,324 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package escape
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/logopt"
+ "cmd/compile/internal/types"
+ "fmt"
+)
+
+// Below we implement the methods for walking the AST and recording
+// data flow edges. Note that because a sub-expression might have
+// side-effects, it's important to always visit the entire AST.
+//
+// For example, write either:
+//
+// if x {
+// e.discard(n.Left)
+// } else {
+// e.value(k, n.Left)
+// }
+//
+// or
+//
+// if x {
+// k = e.discardHole()
+// }
+// e.value(k, n.Left)
+//
+// Do NOT write:
+//
+// // BAD: possibly loses side-effects within n.Left
+// if !x {
+// e.value(k, n.Left)
+// }
+
+// An location represents an abstract location that stores a Go
+// variable.
+type location struct {
+ n ir.Node // represented variable or expression, if any
+ curfn *ir.Func // enclosing function
+ edges []edge // incoming edges
+ loopDepth int // loopDepth at declaration
+
+ // resultIndex records the tuple index (starting at 1) for
+ // PPARAMOUT variables within their function's result type.
+ // For non-PPARAMOUT variables it's 0.
+ resultIndex int
+
+ // derefs and walkgen are used during walkOne to track the
+ // minimal dereferences from the walk root.
+ derefs int // >= -1
+ walkgen uint32
+
+ // dst and dstEdgeindex track the next immediate assignment
+ // destination location during walkone, along with the index
+ // of the edge pointing back to this location.
+ dst *location
+ dstEdgeIdx int
+
+ // queued is used by walkAll to track whether this location is
+ // in the walk queue.
+ queued bool
+
+ // escapes reports whether the represented variable's address
+ // escapes; that is, whether the variable must be heap
+ // allocated.
+ escapes bool
+
+ // transient reports whether the represented expression's
+ // address does not outlive the statement; that is, whether
+ // its storage can be immediately reused.
+ transient bool
+
+ // paramEsc records the represented parameter's leak set.
+ paramEsc leaks
+
+ captured bool // has a closure captured this variable?
+ reassigned bool // has this variable been reassigned?
+ addrtaken bool // has this variable's address been taken?
+}
+
+// An edge represents an assignment edge between two Go variables.
+type edge struct {
+ src *location
+ derefs int // >= -1
+ notes *note
+}
+
+func (l *location) asHole() hole {
+ return hole{dst: l}
+}
+
+// leak records that parameter l leaks to sink.
+func (l *location) leakTo(sink *location, derefs int) {
+ // If sink is a result parameter that doesn't escape (#44614)
+ // and we can fit return bits into the escape analysis tag,
+ // then record as a result leak.
+ if !sink.escapes && sink.isName(ir.PPARAMOUT) && sink.curfn == l.curfn {
+ ri := sink.resultIndex - 1
+ if ri < numEscResults {
+ // Leak to result parameter.
+ l.paramEsc.AddResult(ri, derefs)
+ return
+ }
+ }
+
+ // Otherwise, record as heap leak.
+ l.paramEsc.AddHeap(derefs)
+}
+
+func (l *location) isName(c ir.Class) bool {
+ return l.n != nil && l.n.Op() == ir.ONAME && l.n.(*ir.Name).Class == c
+}
+
+// A hole represents a context for evaluation of a Go
+// expression. E.g., when evaluating p in "x = **p", we'd have a hole
+// with dst==x and derefs==2.
+type hole struct {
+ dst *location
+ derefs int // >= -1
+ notes *note
+
+ // addrtaken indicates whether this context is taking the address of
+ // the expression, independent of whether the address will actually
+ // be stored into a variable.
+ addrtaken bool
+}
+
+type note struct {
+ next *note
+ where ir.Node
+ why string
+}
+
+func (k hole) note(where ir.Node, why string) hole {
+ if where == nil || why == "" {
+ base.Fatalf("note: missing where/why")
+ }
+ if base.Flag.LowerM >= 2 || logopt.Enabled() {
+ k.notes = &note{
+ next: k.notes,
+ where: where,
+ why: why,
+ }
+ }
+ return k
+}
+
+func (k hole) shift(delta int) hole {
+ k.derefs += delta
+ if k.derefs < -1 {
+ base.Fatalf("derefs underflow: %v", k.derefs)
+ }
+ k.addrtaken = delta < 0
+ return k
+}
+
+func (k hole) deref(where ir.Node, why string) hole { return k.shift(1).note(where, why) }
+func (k hole) addr(where ir.Node, why string) hole { return k.shift(-1).note(where, why) }
+
+func (k hole) dotType(t *types.Type, where ir.Node, why string) hole {
+ if !t.IsInterface() && !types.IsDirectIface(t) {
+ k = k.shift(1)
+ }
+ return k.note(where, why)
+}
+
+func (b *batch) flow(k hole, src *location) {
+ if k.addrtaken {
+ src.addrtaken = true
+ }
+
+ dst := k.dst
+ if dst == &b.blankLoc {
+ return
+ }
+ if dst == src && k.derefs >= 0 { // dst = dst, dst = *dst, ...
+ return
+ }
+ if dst.escapes && k.derefs < 0 { // dst = &src
+ if base.Flag.LowerM >= 2 || logopt.Enabled() {
+ pos := base.FmtPos(src.n.Pos())
+ if base.Flag.LowerM >= 2 {
+ fmt.Printf("%s: %v escapes to heap:\n", pos, src.n)
+ }
+ explanation := b.explainFlow(pos, dst, src, k.derefs, k.notes, []*logopt.LoggedOpt{})
+ if logopt.Enabled() {
+ var e_curfn *ir.Func // TODO(mdempsky): Fix.
+ logopt.LogOpt(src.n.Pos(), "escapes", "escape", ir.FuncName(e_curfn), fmt.Sprintf("%v escapes to heap", src.n), explanation)
+ }
+
+ }
+ src.escapes = true
+ return
+ }
+
+ // TODO(mdempsky): Deduplicate edges?
+ dst.edges = append(dst.edges, edge{src: src, derefs: k.derefs, notes: k.notes})
+}
+
+func (b *batch) heapHole() hole { return b.heapLoc.asHole() }
+func (b *batch) discardHole() hole { return b.blankLoc.asHole() }
+
+func (b *batch) oldLoc(n *ir.Name) *location {
+ if n.Canonical().Opt == nil {
+ base.Fatalf("%v has no location", n)
+ }
+ return n.Canonical().Opt.(*location)
+}
+
+func (e *escape) newLoc(n ir.Node, transient bool) *location {
+ if e.curfn == nil {
+ base.Fatalf("e.curfn isn't set")
+ }
+ if n != nil && n.Type() != nil && n.Type().NotInHeap() {
+ base.ErrorfAt(n.Pos(), "%v is incomplete (or unallocatable); stack allocation disallowed", n.Type())
+ }
+
+ if n != nil && n.Op() == ir.ONAME {
+ if canon := n.(*ir.Name).Canonical(); n != canon {
+ base.Fatalf("newLoc on non-canonical %v (canonical is %v)", n, canon)
+ }
+ }
+ loc := &location{
+ n: n,
+ curfn: e.curfn,
+ loopDepth: e.loopDepth,
+ transient: transient,
+ }
+ e.allLocs = append(e.allLocs, loc)
+ if n != nil {
+ if n.Op() == ir.ONAME {
+ n := n.(*ir.Name)
+ if n.Class == ir.PPARAM && n.Curfn == nil {
+ // ok; hidden parameter
+ } else if n.Curfn != e.curfn {
+ base.Fatalf("curfn mismatch: %v != %v for %v", n.Curfn, e.curfn, n)
+ }
+
+ if n.Opt != nil {
+ base.Fatalf("%v already has a location", n)
+ }
+ n.Opt = loc
+ }
+ }
+ return loc
+}
+
+// teeHole returns a new hole that flows into each hole of ks,
+// similar to the Unix tee(1) command.
+func (e *escape) teeHole(ks ...hole) hole {
+ if len(ks) == 0 {
+ return e.discardHole()
+ }
+ if len(ks) == 1 {
+ return ks[0]
+ }
+ // TODO(mdempsky): Optimize if there's only one non-discard hole?
+
+ // Given holes "l1 = _", "l2 = **_", "l3 = *_", ..., create a
+ // new temporary location ltmp, wire it into place, and return
+ // a hole for "ltmp = _".
+ loc := e.newLoc(nil, true)
+ for _, k := range ks {
+ // N.B., "p = &q" and "p = &tmp; tmp = q" are not
+ // semantically equivalent. To combine holes like "l1
+ // = _" and "l2 = &_", we'd need to wire them as "l1 =
+ // *ltmp" and "l2 = ltmp" and return "ltmp = &_"
+ // instead.
+ if k.derefs < 0 {
+ base.Fatalf("teeHole: negative derefs")
+ }
+
+ e.flow(k, loc)
+ }
+ return loc.asHole()
+}
+
+// later returns a new hole that flows into k, but some time later.
+// Its main effect is to prevent immediate reuse of temporary
+// variables introduced during Order.
+func (e *escape) later(k hole) hole {
+ loc := e.newLoc(nil, false)
+ e.flow(k, loc)
+ return loc.asHole()
+}
+
+// Fmt is called from node printing to print information about escape analysis results.
+func Fmt(n ir.Node) string {
+ text := ""
+ switch n.Esc() {
+ case ir.EscUnknown:
+ break
+
+ case ir.EscHeap:
+ text = "esc(h)"
+
+ case ir.EscNone:
+ text = "esc(no)"
+
+ case ir.EscNever:
+ text = "esc(N)"
+
+ default:
+ text = fmt.Sprintf("esc(%d)", n.Esc())
+ }
+
+ if n.Op() == ir.ONAME {
+ n := n.(*ir.Name)
+ if loc, ok := n.Opt.(*location); ok && loc.loopDepth != 0 {
+ if text != "" {
+ text += " "
+ }
+ text += fmt.Sprintf("ld(%d)", loc.loopDepth)
+ }
+ }
+
+ return text
+}
diff --git a/src/cmd/compile/internal/escape/leaks.go b/src/cmd/compile/internal/escape/leaks.go
new file mode 100644
index 0000000..4c848a5
--- /dev/null
+++ b/src/cmd/compile/internal/escape/leaks.go
@@ -0,0 +1,106 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package escape
+
+import (
+ "cmd/compile/internal/base"
+ "math"
+ "strings"
+)
+
+const numEscResults = 7
+
+// An leaks represents a set of assignment flows from a parameter
+// to the heap or to any of its function's (first numEscResults)
+// result parameters.
+type leaks [1 + numEscResults]uint8
+
+// Empty reports whether l is an empty set (i.e., no assignment flows).
+func (l leaks) Empty() bool { return l == leaks{} }
+
+// Heap returns the minimum deref count of any assignment flow from l
+// to the heap. If no such flows exist, Heap returns -1.
+func (l leaks) Heap() int { return l.get(0) }
+
+// Result returns the minimum deref count of any assignment flow from
+// l to its function's i'th result parameter. If no such flows exist,
+// Result returns -1.
+func (l leaks) Result(i int) int { return l.get(1 + i) }
+
+// AddHeap adds an assignment flow from l to the heap.
+func (l *leaks) AddHeap(derefs int) { l.add(0, derefs) }
+
+// AddResult adds an assignment flow from l to its function's i'th
+// result parameter.
+func (l *leaks) AddResult(i, derefs int) { l.add(1+i, derefs) }
+
+func (l *leaks) setResult(i, derefs int) { l.set(1+i, derefs) }
+
+func (l leaks) get(i int) int { return int(l[i]) - 1 }
+
+func (l *leaks) add(i, derefs int) {
+ if old := l.get(i); old < 0 || derefs < old {
+ l.set(i, derefs)
+ }
+}
+
+func (l *leaks) set(i, derefs int) {
+ v := derefs + 1
+ if v < 0 {
+ base.Fatalf("invalid derefs count: %v", derefs)
+ }
+ if v > math.MaxUint8 {
+ v = math.MaxUint8
+ }
+
+ l[i] = uint8(v)
+}
+
+// Optimize removes result flow paths that are equal in length or
+// longer than the shortest heap flow path.
+func (l *leaks) Optimize() {
+ // If we have a path to the heap, then there's no use in
+ // keeping equal or longer paths elsewhere.
+ if x := l.Heap(); x >= 0 {
+ for i := 0; i < numEscResults; i++ {
+ if l.Result(i) >= x {
+ l.setResult(i, -1)
+ }
+ }
+ }
+}
+
+var leakTagCache = map[leaks]string{}
+
+// Encode converts l into a binary string for export data.
+func (l leaks) Encode() string {
+ if l.Heap() == 0 {
+ // Space optimization: empty string encodes more
+ // efficiently in export data.
+ return ""
+ }
+ if s, ok := leakTagCache[l]; ok {
+ return s
+ }
+
+ n := len(l)
+ for n > 0 && l[n-1] == 0 {
+ n--
+ }
+ s := "esc:" + string(l[:n])
+ leakTagCache[l] = s
+ return s
+}
+
+// parseLeaks parses a binary string representing a leaks
+func parseLeaks(s string) leaks {
+ var l leaks
+ if !strings.HasPrefix(s, "esc:") {
+ l.AddHeap(0)
+ return l
+ }
+ copy(l[:], s[4:])
+ return l
+}
diff --git a/src/cmd/compile/internal/escape/solve.go b/src/cmd/compile/internal/escape/solve.go
new file mode 100644
index 0000000..77d6b27
--- /dev/null
+++ b/src/cmd/compile/internal/escape/solve.go
@@ -0,0 +1,289 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package escape
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/logopt"
+ "cmd/internal/src"
+ "fmt"
+ "strings"
+)
+
+// walkAll computes the minimal dereferences between all pairs of
+// locations.
+func (b *batch) walkAll() {
+ // We use a work queue to keep track of locations that we need
+ // to visit, and repeatedly walk until we reach a fixed point.
+ //
+ // We walk once from each location (including the heap), and
+ // then re-enqueue each location on its transition from
+ // transient->!transient and !escapes->escapes, which can each
+ // happen at most once. So we take Θ(len(e.allLocs)) walks.
+
+ // LIFO queue, has enough room for e.allLocs and e.heapLoc.
+ todo := make([]*location, 0, len(b.allLocs)+1)
+ enqueue := func(loc *location) {
+ if !loc.queued {
+ todo = append(todo, loc)
+ loc.queued = true
+ }
+ }
+
+ for _, loc := range b.allLocs {
+ enqueue(loc)
+ }
+ enqueue(&b.heapLoc)
+
+ var walkgen uint32
+ for len(todo) > 0 {
+ root := todo[len(todo)-1]
+ todo = todo[:len(todo)-1]
+ root.queued = false
+
+ walkgen++
+ b.walkOne(root, walkgen, enqueue)
+ }
+}
+
+// walkOne computes the minimal number of dereferences from root to
+// all other locations.
+func (b *batch) walkOne(root *location, walkgen uint32, enqueue func(*location)) {
+ // The data flow graph has negative edges (from addressing
+ // operations), so we use the Bellman-Ford algorithm. However,
+ // we don't have to worry about infinite negative cycles since
+ // we bound intermediate dereference counts to 0.
+
+ root.walkgen = walkgen
+ root.derefs = 0
+ root.dst = nil
+
+ todo := []*location{root} // LIFO queue
+ for len(todo) > 0 {
+ l := todo[len(todo)-1]
+ todo = todo[:len(todo)-1]
+
+ derefs := l.derefs
+
+ // If l.derefs < 0, then l's address flows to root.
+ addressOf := derefs < 0
+ if addressOf {
+ // For a flow path like "root = &l; l = x",
+ // l's address flows to root, but x's does
+ // not. We recognize this by lower bounding
+ // derefs at 0.
+ derefs = 0
+
+ // If l's address flows to a non-transient
+ // location, then l can't be transiently
+ // allocated.
+ if !root.transient && l.transient {
+ l.transient = false
+ enqueue(l)
+ }
+ }
+
+ if b.outlives(root, l) {
+ // l's value flows to root. If l is a function
+ // parameter and root is the heap or a
+ // corresponding result parameter, then record
+ // that value flow for tagging the function
+ // later.
+ if l.isName(ir.PPARAM) {
+ if (logopt.Enabled() || base.Flag.LowerM >= 2) && !l.escapes {
+ if base.Flag.LowerM >= 2 {
+ fmt.Printf("%s: parameter %v leaks to %s with derefs=%d:\n", base.FmtPos(l.n.Pos()), l.n, b.explainLoc(root), derefs)
+ }
+ explanation := b.explainPath(root, l)
+ if logopt.Enabled() {
+ var e_curfn *ir.Func // TODO(mdempsky): Fix.
+ logopt.LogOpt(l.n.Pos(), "leak", "escape", ir.FuncName(e_curfn),
+ fmt.Sprintf("parameter %v leaks to %s with derefs=%d", l.n, b.explainLoc(root), derefs), explanation)
+ }
+ }
+ l.leakTo(root, derefs)
+ }
+
+ // If l's address flows somewhere that
+ // outlives it, then l needs to be heap
+ // allocated.
+ if addressOf && !l.escapes {
+ if logopt.Enabled() || base.Flag.LowerM >= 2 {
+ if base.Flag.LowerM >= 2 {
+ fmt.Printf("%s: %v escapes to heap:\n", base.FmtPos(l.n.Pos()), l.n)
+ }
+ explanation := b.explainPath(root, l)
+ if logopt.Enabled() {
+ var e_curfn *ir.Func // TODO(mdempsky): Fix.
+ logopt.LogOpt(l.n.Pos(), "escape", "escape", ir.FuncName(e_curfn), fmt.Sprintf("%v escapes to heap", l.n), explanation)
+ }
+ }
+ l.escapes = true
+ enqueue(l)
+ continue
+ }
+ }
+
+ for i, edge := range l.edges {
+ if edge.src.escapes {
+ continue
+ }
+ d := derefs + edge.derefs
+ if edge.src.walkgen != walkgen || edge.src.derefs > d {
+ edge.src.walkgen = walkgen
+ edge.src.derefs = d
+ edge.src.dst = l
+ edge.src.dstEdgeIdx = i
+ todo = append(todo, edge.src)
+ }
+ }
+ }
+}
+
+// explainPath prints an explanation of how src flows to the walk root.
+func (b *batch) explainPath(root, src *location) []*logopt.LoggedOpt {
+ visited := make(map[*location]bool)
+ pos := base.FmtPos(src.n.Pos())
+ var explanation []*logopt.LoggedOpt
+ for {
+ // Prevent infinite loop.
+ if visited[src] {
+ if base.Flag.LowerM >= 2 {
+ fmt.Printf("%s: warning: truncated explanation due to assignment cycle; see golang.org/issue/35518\n", pos)
+ }
+ break
+ }
+ visited[src] = true
+ dst := src.dst
+ edge := &dst.edges[src.dstEdgeIdx]
+ if edge.src != src {
+ base.Fatalf("path inconsistency: %v != %v", edge.src, src)
+ }
+
+ explanation = b.explainFlow(pos, dst, src, edge.derefs, edge.notes, explanation)
+
+ if dst == root {
+ break
+ }
+ src = dst
+ }
+
+ return explanation
+}
+
+func (b *batch) explainFlow(pos string, dst, srcloc *location, derefs int, notes *note, explanation []*logopt.LoggedOpt) []*logopt.LoggedOpt {
+ ops := "&"
+ if derefs >= 0 {
+ ops = strings.Repeat("*", derefs)
+ }
+ print := base.Flag.LowerM >= 2
+
+ flow := fmt.Sprintf(" flow: %s = %s%v:", b.explainLoc(dst), ops, b.explainLoc(srcloc))
+ if print {
+ fmt.Printf("%s:%s\n", pos, flow)
+ }
+ if logopt.Enabled() {
+ var epos src.XPos
+ if notes != nil {
+ epos = notes.where.Pos()
+ } else if srcloc != nil && srcloc.n != nil {
+ epos = srcloc.n.Pos()
+ }
+ var e_curfn *ir.Func // TODO(mdempsky): Fix.
+ explanation = append(explanation, logopt.NewLoggedOpt(epos, "escflow", "escape", ir.FuncName(e_curfn), flow))
+ }
+
+ for note := notes; note != nil; note = note.next {
+ if print {
+ fmt.Printf("%s: from %v (%v) at %s\n", pos, note.where, note.why, base.FmtPos(note.where.Pos()))
+ }
+ if logopt.Enabled() {
+ var e_curfn *ir.Func // TODO(mdempsky): Fix.
+ explanation = append(explanation, logopt.NewLoggedOpt(note.where.Pos(), "escflow", "escape", ir.FuncName(e_curfn),
+ fmt.Sprintf(" from %v (%v)", note.where, note.why)))
+ }
+ }
+ return explanation
+}
+
+func (b *batch) explainLoc(l *location) string {
+ if l == &b.heapLoc {
+ return "{heap}"
+ }
+ if l.n == nil {
+ // TODO(mdempsky): Omit entirely.
+ return "{temp}"
+ }
+ if l.n.Op() == ir.ONAME {
+ return fmt.Sprintf("%v", l.n)
+ }
+ return fmt.Sprintf("{storage for %v}", l.n)
+}
+
+// outlives reports whether values stored in l may survive beyond
+// other's lifetime if stack allocated.
+func (b *batch) outlives(l, other *location) bool {
+ // The heap outlives everything.
+ if l.escapes {
+ return true
+ }
+
+ // We don't know what callers do with returned values, so
+ // pessimistically we need to assume they flow to the heap and
+ // outlive everything too.
+ if l.isName(ir.PPARAMOUT) {
+ // Exception: Directly called closures can return
+ // locations allocated outside of them without forcing
+ // them to the heap. For example:
+ //
+ // var u int // okay to stack allocate
+ // *(func() *int { return &u }()) = 42
+ if containsClosure(other.curfn, l.curfn) && l.curfn.ClosureCalled() {
+ return false
+ }
+
+ return true
+ }
+
+ // If l and other are within the same function, then l
+ // outlives other if it was declared outside other's loop
+ // scope. For example:
+ //
+ // var l *int
+ // for {
+ // l = new(int)
+ // }
+ if l.curfn == other.curfn && l.loopDepth < other.loopDepth {
+ return true
+ }
+
+ // If other is declared within a child closure of where l is
+ // declared, then l outlives it. For example:
+ //
+ // var l *int
+ // func() {
+ // l = new(int)
+ // }
+ if containsClosure(l.curfn, other.curfn) {
+ return true
+ }
+
+ return false
+}
+
+// containsClosure reports whether c is a closure contained within f.
+func containsClosure(f, c *ir.Func) bool {
+ // Common case.
+ if f == c {
+ return false
+ }
+
+ // Closures within function Foo are named like "Foo.funcN..."
+ // TODO(mdempsky): Better way to recognize this.
+ fn := f.Sym().Name
+ cn := c.Sym().Name
+ return len(cn) > len(fn) && cn[:len(fn)] == fn && cn[len(fn)] == '.'
+}
diff --git a/src/cmd/compile/internal/escape/stmt.go b/src/cmd/compile/internal/escape/stmt.go
new file mode 100644
index 0000000..0afb5d6
--- /dev/null
+++ b/src/cmd/compile/internal/escape/stmt.go
@@ -0,0 +1,208 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package escape
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "fmt"
+)
+
+// stmt evaluates a single Go statement.
+func (e *escape) stmt(n ir.Node) {
+ if n == nil {
+ return
+ }
+
+ lno := ir.SetPos(n)
+ defer func() {
+ base.Pos = lno
+ }()
+
+ if base.Flag.LowerM > 2 {
+ fmt.Printf("%v:[%d] %v stmt: %v\n", base.FmtPos(base.Pos), e.loopDepth, e.curfn, n)
+ }
+
+ e.stmts(n.Init())
+
+ switch n.Op() {
+ default:
+ base.Fatalf("unexpected stmt: %v", n)
+
+ case ir.ODCLCONST, ir.ODCLTYPE, ir.OFALL, ir.OINLMARK:
+ // nop
+
+ case ir.OBREAK, ir.OCONTINUE, ir.OGOTO:
+ // TODO(mdempsky): Handle dead code?
+
+ case ir.OBLOCK:
+ n := n.(*ir.BlockStmt)
+ e.stmts(n.List)
+
+ case ir.ODCL:
+ // Record loop depth at declaration.
+ n := n.(*ir.Decl)
+ if !ir.IsBlank(n.X) {
+ e.dcl(n.X)
+ }
+
+ case ir.OLABEL:
+ n := n.(*ir.LabelStmt)
+ switch e.labels[n.Label] {
+ case nonlooping:
+ if base.Flag.LowerM > 2 {
+ fmt.Printf("%v:%v non-looping label\n", base.FmtPos(base.Pos), n)
+ }
+ case looping:
+ if base.Flag.LowerM > 2 {
+ fmt.Printf("%v: %v looping label\n", base.FmtPos(base.Pos), n)
+ }
+ e.loopDepth++
+ default:
+ base.Fatalf("label missing tag")
+ }
+ delete(e.labels, n.Label)
+
+ case ir.OIF:
+ n := n.(*ir.IfStmt)
+ e.discard(n.Cond)
+ e.block(n.Body)
+ e.block(n.Else)
+
+ case ir.OFOR, ir.OFORUNTIL:
+ n := n.(*ir.ForStmt)
+ e.loopDepth++
+ e.discard(n.Cond)
+ e.stmt(n.Post)
+ e.block(n.Body)
+ e.loopDepth--
+
+ case ir.ORANGE:
+ // for Key, Value = range X { Body }
+ n := n.(*ir.RangeStmt)
+
+ // X is evaluated outside the loop.
+ tmp := e.newLoc(nil, false)
+ e.expr(tmp.asHole(), n.X)
+
+ e.loopDepth++
+ ks := e.addrs([]ir.Node{n.Key, n.Value})
+ if n.X.Type().IsArray() {
+ e.flow(ks[1].note(n, "range"), tmp)
+ } else {
+ e.flow(ks[1].deref(n, "range-deref"), tmp)
+ }
+ e.reassigned(ks, n)
+
+ e.block(n.Body)
+ e.loopDepth--
+
+ case ir.OSWITCH:
+ n := n.(*ir.SwitchStmt)
+
+ if guard, ok := n.Tag.(*ir.TypeSwitchGuard); ok {
+ var ks []hole
+ if guard.Tag != nil {
+ for _, cas := range n.Cases {
+ cv := cas.Var
+ k := e.dcl(cv) // type switch variables have no ODCL.
+ if cv.Type().HasPointers() {
+ ks = append(ks, k.dotType(cv.Type(), cas, "switch case"))
+ }
+ }
+ }
+ e.expr(e.teeHole(ks...), n.Tag.(*ir.TypeSwitchGuard).X)
+ } else {
+ e.discard(n.Tag)
+ }
+
+ for _, cas := range n.Cases {
+ e.discards(cas.List)
+ e.block(cas.Body)
+ }
+
+ case ir.OSELECT:
+ n := n.(*ir.SelectStmt)
+ for _, cas := range n.Cases {
+ e.stmt(cas.Comm)
+ e.block(cas.Body)
+ }
+ case ir.ORECV:
+ // TODO(mdempsky): Consider e.discard(n.Left).
+ n := n.(*ir.UnaryExpr)
+ e.exprSkipInit(e.discardHole(), n) // already visited n.Ninit
+ case ir.OSEND:
+ n := n.(*ir.SendStmt)
+ e.discard(n.Chan)
+ e.assignHeap(n.Value, "send", n)
+
+ case ir.OAS:
+ n := n.(*ir.AssignStmt)
+ e.assignList([]ir.Node{n.X}, []ir.Node{n.Y}, "assign", n)
+ case ir.OASOP:
+ n := n.(*ir.AssignOpStmt)
+ // TODO(mdempsky): Worry about OLSH/ORSH?
+ e.assignList([]ir.Node{n.X}, []ir.Node{n.Y}, "assign", n)
+ case ir.OAS2:
+ n := n.(*ir.AssignListStmt)
+ e.assignList(n.Lhs, n.Rhs, "assign-pair", n)
+
+ case ir.OAS2DOTTYPE: // v, ok = x.(type)
+ n := n.(*ir.AssignListStmt)
+ e.assignList(n.Lhs, n.Rhs, "assign-pair-dot-type", n)
+ case ir.OAS2MAPR: // v, ok = m[k]
+ n := n.(*ir.AssignListStmt)
+ e.assignList(n.Lhs, n.Rhs, "assign-pair-mapr", n)
+ case ir.OAS2RECV, ir.OSELRECV2: // v, ok = <-ch
+ n := n.(*ir.AssignListStmt)
+ e.assignList(n.Lhs, n.Rhs, "assign-pair-receive", n)
+
+ case ir.OAS2FUNC:
+ n := n.(*ir.AssignListStmt)
+ e.stmts(n.Rhs[0].Init())
+ ks := e.addrs(n.Lhs)
+ e.call(ks, n.Rhs[0])
+ e.reassigned(ks, n)
+ case ir.ORETURN:
+ n := n.(*ir.ReturnStmt)
+ results := e.curfn.Type().Results().FieldSlice()
+ dsts := make([]ir.Node, len(results))
+ for i, res := range results {
+ dsts[i] = res.Nname.(*ir.Name)
+ }
+ e.assignList(dsts, n.Results, "return", n)
+ case ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER, ir.OINLCALL, ir.OCLOSE, ir.OCOPY, ir.ODELETE, ir.OPANIC, ir.OPRINT, ir.OPRINTN, ir.ORECOVER:
+ e.call(nil, n)
+ case ir.OGO, ir.ODEFER:
+ n := n.(*ir.GoDeferStmt)
+ e.goDeferStmt(n)
+
+ case ir.OTAILCALL:
+ n := n.(*ir.TailCallStmt)
+ e.call(nil, n.Call)
+ }
+}
+
+func (e *escape) stmts(l ir.Nodes) {
+ for _, n := range l {
+ e.stmt(n)
+ }
+}
+
+// block is like stmts, but preserves loopDepth.
+func (e *escape) block(l ir.Nodes) {
+ old := e.loopDepth
+ e.stmts(l)
+ e.loopDepth = old
+}
+
+func (e *escape) dcl(n *ir.Name) hole {
+ if n.Curfn != e.curfn || n.IsClosureVar() {
+ base.Fatalf("bad declaration of %v", n)
+ }
+ loc := e.oldLoc(n)
+ loc.loopDepth = e.loopDepth
+ return loc.asHole()
+}
diff --git a/src/cmd/compile/internal/escape/utils.go b/src/cmd/compile/internal/escape/utils.go
new file mode 100644
index 0000000..2c6e9bc
--- /dev/null
+++ b/src/cmd/compile/internal/escape/utils.go
@@ -0,0 +1,215 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package escape
+
+import (
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/typecheck"
+)
+
+func isSliceSelfAssign(dst, src ir.Node) bool {
+ // Detect the following special case.
+ //
+ // func (b *Buffer) Foo() {
+ // n, m := ...
+ // b.buf = b.buf[n:m]
+ // }
+ //
+ // This assignment is a no-op for escape analysis,
+ // it does not store any new pointers into b that were not already there.
+ // However, without this special case b will escape, because we assign to OIND/ODOTPTR.
+ // Here we assume that the statement will not contain calls,
+ // that is, that order will move any calls to init.
+ // Otherwise base ONAME value could change between the moments
+ // when we evaluate it for dst and for src.
+
+ // dst is ONAME dereference.
+ var dstX ir.Node
+ switch dst.Op() {
+ default:
+ return false
+ case ir.ODEREF:
+ dst := dst.(*ir.StarExpr)
+ dstX = dst.X
+ case ir.ODOTPTR:
+ dst := dst.(*ir.SelectorExpr)
+ dstX = dst.X
+ }
+ if dstX.Op() != ir.ONAME {
+ return false
+ }
+ // src is a slice operation.
+ switch src.Op() {
+ case ir.OSLICE, ir.OSLICE3, ir.OSLICESTR:
+ // OK.
+ case ir.OSLICEARR, ir.OSLICE3ARR:
+ // Since arrays are embedded into containing object,
+ // slice of non-pointer array will introduce a new pointer into b that was not already there
+ // (pointer to b itself). After such assignment, if b contents escape,
+ // b escapes as well. If we ignore such OSLICEARR, we will conclude
+ // that b does not escape when b contents do.
+ //
+ // Pointer to an array is OK since it's not stored inside b directly.
+ // For slicing an array (not pointer to array), there is an implicit OADDR.
+ // We check that to determine non-pointer array slicing.
+ src := src.(*ir.SliceExpr)
+ if src.X.Op() == ir.OADDR {
+ return false
+ }
+ default:
+ return false
+ }
+ // slice is applied to ONAME dereference.
+ var baseX ir.Node
+ switch base := src.(*ir.SliceExpr).X; base.Op() {
+ default:
+ return false
+ case ir.ODEREF:
+ base := base.(*ir.StarExpr)
+ baseX = base.X
+ case ir.ODOTPTR:
+ base := base.(*ir.SelectorExpr)
+ baseX = base.X
+ }
+ if baseX.Op() != ir.ONAME {
+ return false
+ }
+ // dst and src reference the same base ONAME.
+ return dstX.(*ir.Name) == baseX.(*ir.Name)
+}
+
+// isSelfAssign reports whether assignment from src to dst can
+// be ignored by the escape analysis as it's effectively a self-assignment.
+func isSelfAssign(dst, src ir.Node) bool {
+ if isSliceSelfAssign(dst, src) {
+ return true
+ }
+
+ // Detect trivial assignments that assign back to the same object.
+ //
+ // It covers these cases:
+ // val.x = val.y
+ // val.x[i] = val.y[j]
+ // val.x1.x2 = val.x1.y2
+ // ... etc
+ //
+ // These assignments do not change assigned object lifetime.
+
+ if dst == nil || src == nil || dst.Op() != src.Op() {
+ return false
+ }
+
+ // The expression prefix must be both "safe" and identical.
+ switch dst.Op() {
+ case ir.ODOT, ir.ODOTPTR:
+ // Safe trailing accessors that are permitted to differ.
+ dst := dst.(*ir.SelectorExpr)
+ src := src.(*ir.SelectorExpr)
+ return ir.SameSafeExpr(dst.X, src.X)
+ case ir.OINDEX:
+ dst := dst.(*ir.IndexExpr)
+ src := src.(*ir.IndexExpr)
+ if mayAffectMemory(dst.Index) || mayAffectMemory(src.Index) {
+ return false
+ }
+ return ir.SameSafeExpr(dst.X, src.X)
+ default:
+ return false
+ }
+}
+
+// mayAffectMemory reports whether evaluation of n may affect the program's
+// memory state. If the expression can't affect memory state, then it can be
+// safely ignored by the escape analysis.
+func mayAffectMemory(n ir.Node) bool {
+ // We may want to use a list of "memory safe" ops instead of generally
+ // "side-effect free", which would include all calls and other ops that can
+ // allocate or change global state. For now, it's safer to start with the latter.
+ //
+ // We're ignoring things like division by zero, index out of range,
+ // and nil pointer dereference here.
+
+ // TODO(rsc): It seems like it should be possible to replace this with
+ // an ir.Any looking for any op that's not the ones in the case statement.
+ // But that produces changes in the compiled output detected by buildall.
+ switch n.Op() {
+ case ir.ONAME, ir.OLITERAL, ir.ONIL:
+ return false
+
+ case ir.OADD, ir.OSUB, ir.OOR, ir.OXOR, ir.OMUL, ir.OLSH, ir.ORSH, ir.OAND, ir.OANDNOT, ir.ODIV, ir.OMOD:
+ n := n.(*ir.BinaryExpr)
+ return mayAffectMemory(n.X) || mayAffectMemory(n.Y)
+
+ case ir.OINDEX:
+ n := n.(*ir.IndexExpr)
+ return mayAffectMemory(n.X) || mayAffectMemory(n.Index)
+
+ case ir.OCONVNOP, ir.OCONV:
+ n := n.(*ir.ConvExpr)
+ return mayAffectMemory(n.X)
+
+ case ir.OLEN, ir.OCAP, ir.ONOT, ir.OBITNOT, ir.OPLUS, ir.ONEG, ir.OALIGNOF, ir.OOFFSETOF, ir.OSIZEOF:
+ n := n.(*ir.UnaryExpr)
+ return mayAffectMemory(n.X)
+
+ case ir.ODOT, ir.ODOTPTR:
+ n := n.(*ir.SelectorExpr)
+ return mayAffectMemory(n.X)
+
+ case ir.ODEREF:
+ n := n.(*ir.StarExpr)
+ return mayAffectMemory(n.X)
+
+ default:
+ return true
+ }
+}
+
+// HeapAllocReason returns the reason the given Node must be heap
+// allocated, or the empty string if it doesn't.
+func HeapAllocReason(n ir.Node) string {
+ if n == nil || n.Type() == nil {
+ return ""
+ }
+
+ // Parameters are always passed via the stack.
+ if n.Op() == ir.ONAME {
+ n := n.(*ir.Name)
+ if n.Class == ir.PPARAM || n.Class == ir.PPARAMOUT {
+ return ""
+ }
+ }
+
+ if n.Type().Size() > ir.MaxStackVarSize {
+ return "too large for stack"
+ }
+
+ if (n.Op() == ir.ONEW || n.Op() == ir.OPTRLIT) && n.Type().Elem().Size() > ir.MaxImplicitStackVarSize {
+ return "too large for stack"
+ }
+
+ if n.Op() == ir.OCLOSURE && typecheck.ClosureType(n.(*ir.ClosureExpr)).Size() > ir.MaxImplicitStackVarSize {
+ return "too large for stack"
+ }
+ if n.Op() == ir.OMETHVALUE && typecheck.MethodValueType(n.(*ir.SelectorExpr)).Size() > ir.MaxImplicitStackVarSize {
+ return "too large for stack"
+ }
+
+ if n.Op() == ir.OMAKESLICE {
+ n := n.(*ir.MakeExpr)
+ r := n.Cap
+ if r == nil {
+ r = n.Len
+ }
+ if !ir.IsSmallIntConst(r) {
+ return "non-constant size"
+ }
+ if t := n.Type(); t.Elem().Size() != 0 && ir.Int64Val(r) > ir.MaxImplicitStackVarSize/t.Elem().Size() {
+ return "too large for stack"
+ }
+ }
+
+ return ""
+}
diff --git a/src/cmd/compile/internal/gc/bootstrap.go b/src/cmd/compile/internal/gc/bootstrap.go
new file mode 100644
index 0000000..37b0d59
--- /dev/null
+++ b/src/cmd/compile/internal/gc/bootstrap.go
@@ -0,0 +1,17 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.8
+// +build !go1.8
+
+package gc
+
+import (
+ "cmd/compile/internal/base"
+ "runtime"
+)
+
+func startMutexProfiling() {
+ base.Fatalf("mutex profiling unavailable in version %v", runtime.Version())
+}
diff --git a/src/cmd/compile/internal/gc/compile.go b/src/cmd/compile/internal/gc/compile.go
new file mode 100644
index 0000000..0050445
--- /dev/null
+++ b/src/cmd/compile/internal/gc/compile.go
@@ -0,0 +1,169 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "internal/race"
+ "math/rand"
+ "sort"
+ "sync"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/liveness"
+ "cmd/compile/internal/objw"
+ "cmd/compile/internal/ssagen"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "cmd/compile/internal/walk"
+ "cmd/internal/obj"
+)
+
+// "Portable" code generation.
+
+var (
+ compilequeue []*ir.Func // functions waiting to be compiled
+)
+
+func enqueueFunc(fn *ir.Func) {
+ if ir.CurFunc != nil {
+ base.FatalfAt(fn.Pos(), "enqueueFunc %v inside %v", fn, ir.CurFunc)
+ }
+
+ if ir.FuncName(fn) == "_" {
+ // Skip compiling blank functions.
+ // Frontend already reported any spec-mandated errors (#29870).
+ return
+ }
+
+ if clo := fn.OClosure; clo != nil && !ir.IsTrivialClosure(clo) {
+ return // we'll get this as part of its enclosing function
+ }
+
+ if len(fn.Body) == 0 {
+ // Initialize ABI wrappers if necessary.
+ ssagen.InitLSym(fn, false)
+ types.CalcSize(fn.Type())
+ a := ssagen.AbiForBodylessFuncStackMap(fn)
+ abiInfo := a.ABIAnalyzeFuncType(fn.Type().FuncType()) // abiInfo has spill/home locations for wrapper
+ liveness.WriteFuncMap(fn, abiInfo)
+ if fn.ABI == obj.ABI0 {
+ x := ssagen.EmitArgInfo(fn, abiInfo)
+ objw.Global(x, int32(len(x.P)), obj.RODATA|obj.LOCAL)
+ }
+ return
+ }
+
+ errorsBefore := base.Errors()
+
+ todo := []*ir.Func{fn}
+ for len(todo) > 0 {
+ next := todo[len(todo)-1]
+ todo = todo[:len(todo)-1]
+
+ prepareFunc(next)
+ todo = append(todo, next.Closures...)
+ }
+
+ if base.Errors() > errorsBefore {
+ return
+ }
+
+ // Enqueue just fn itself. compileFunctions will handle
+ // scheduling compilation of its closures after it's done.
+ compilequeue = append(compilequeue, fn)
+}
+
+// prepareFunc handles any remaining frontend compilation tasks that
+// aren't yet safe to perform concurrently.
+func prepareFunc(fn *ir.Func) {
+ // Set up the function's LSym early to avoid data races with the assemblers.
+ // Do this before walk, as walk needs the LSym to set attributes/relocations
+ // (e.g. in MarkTypeUsedInInterface).
+ ssagen.InitLSym(fn, true)
+
+ // Calculate parameter offsets.
+ types.CalcSize(fn.Type())
+
+ typecheck.DeclContext = ir.PAUTO
+ ir.CurFunc = fn
+ walk.Walk(fn)
+ ir.CurFunc = nil // enforce no further uses of CurFunc
+ typecheck.DeclContext = ir.PEXTERN
+}
+
+// compileFunctions compiles all functions in compilequeue.
+// It fans out nBackendWorkers to do the work
+// and waits for them to complete.
+func compileFunctions() {
+ if len(compilequeue) == 0 {
+ return
+ }
+
+ if race.Enabled {
+ // Randomize compilation order to try to shake out races.
+ tmp := make([]*ir.Func, len(compilequeue))
+ perm := rand.Perm(len(compilequeue))
+ for i, v := range perm {
+ tmp[v] = compilequeue[i]
+ }
+ copy(compilequeue, tmp)
+ } else {
+ // Compile the longest functions first,
+ // since they're most likely to be the slowest.
+ // This helps avoid stragglers.
+ sort.Slice(compilequeue, func(i, j int) bool {
+ return len(compilequeue[i].Body) > len(compilequeue[j].Body)
+ })
+ }
+
+ // By default, we perform work right away on the current goroutine
+ // as the solo worker.
+ queue := func(work func(int)) {
+ work(0)
+ }
+
+ if nWorkers := base.Flag.LowerC; nWorkers > 1 {
+ // For concurrent builds, we create a goroutine per task, but
+ // require them to hold a unique worker ID while performing work
+ // to limit parallelism.
+ workerIDs := make(chan int, nWorkers)
+ for i := 0; i < nWorkers; i++ {
+ workerIDs <- i
+ }
+
+ queue = func(work func(int)) {
+ go func() {
+ worker := <-workerIDs
+ work(worker)
+ workerIDs <- worker
+ }()
+ }
+ }
+
+ var wg sync.WaitGroup
+ var compile func([]*ir.Func)
+ compile = func(fns []*ir.Func) {
+ wg.Add(len(fns))
+ for _, fn := range fns {
+ fn := fn
+ queue(func(worker int) {
+ ssagen.Compile(fn, worker)
+ compile(fn.Closures)
+ wg.Done()
+ })
+ }
+ }
+
+ types.CalcSizeDisabled = true // not safe to calculate sizes concurrently
+ base.Ctxt.InParallel = true
+
+ compile(compilequeue)
+ compilequeue = nil
+ wg.Wait()
+
+ base.Ctxt.InParallel = false
+ types.CalcSizeDisabled = false
+}
diff --git a/src/cmd/compile/internal/gc/export.go b/src/cmd/compile/internal/gc/export.go
new file mode 100644
index 0000000..c9acfc1
--- /dev/null
+++ b/src/cmd/compile/internal/gc/export.go
@@ -0,0 +1,51 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "fmt"
+ "go/constant"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "cmd/internal/bio"
+)
+
+func dumpasmhdr() {
+ b, err := bio.Create(base.Flag.AsmHdr)
+ if err != nil {
+ base.Fatalf("%v", err)
+ }
+ fmt.Fprintf(b, "// generated by compile -asmhdr from package %s\n\n", types.LocalPkg.Name)
+ for _, n := range typecheck.Target.Asms {
+ if n.Sym().IsBlank() {
+ continue
+ }
+ switch n.Op() {
+ case ir.OLITERAL:
+ t := n.Val().Kind()
+ if t == constant.Float || t == constant.Complex {
+ break
+ }
+ fmt.Fprintf(b, "#define const_%s %v\n", n.Sym().Name, n.Val().ExactString())
+
+ case ir.OTYPE:
+ t := n.Type()
+ if !t.IsStruct() || t.StructType().Map != nil || t.IsFuncArgStruct() {
+ break
+ }
+ fmt.Fprintf(b, "#define %s__size %d\n", n.Sym().Name, int(t.Size()))
+ for _, f := range t.Fields().Slice() {
+ if !f.Sym.IsBlank() {
+ fmt.Fprintf(b, "#define %s_%s %d\n", n.Sym().Name, f.Sym.Name, int(f.Offset))
+ }
+ }
+ }
+ }
+
+ b.Close()
+}
diff --git a/src/cmd/compile/internal/gc/main.go b/src/cmd/compile/internal/gc/main.go
new file mode 100644
index 0000000..4c4a724
--- /dev/null
+++ b/src/cmd/compile/internal/gc/main.go
@@ -0,0 +1,380 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "bufio"
+ "bytes"
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/deadcode"
+ "cmd/compile/internal/devirtualize"
+ "cmd/compile/internal/dwarfgen"
+ "cmd/compile/internal/escape"
+ "cmd/compile/internal/inline"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/logopt"
+ "cmd/compile/internal/noder"
+ "cmd/compile/internal/pkginit"
+ "cmd/compile/internal/reflectdata"
+ "cmd/compile/internal/ssa"
+ "cmd/compile/internal/ssagen"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "cmd/internal/dwarf"
+ "cmd/internal/obj"
+ "cmd/internal/objabi"
+ "cmd/internal/src"
+ "flag"
+ "fmt"
+ "internal/buildcfg"
+ "log"
+ "os"
+ "runtime"
+ "sort"
+)
+
+// handlePanic ensures that we print out an "internal compiler error" for any panic
+// or runtime exception during front-end compiler processing (unless there have
+// already been some compiler errors). It may also be invoked from the explicit panic in
+// hcrash(), in which case, we pass the panic on through.
+func handlePanic() {
+ if err := recover(); err != nil {
+ if err == "-h" {
+ // Force real panic now with -h option (hcrash) - the error
+ // information will have already been printed.
+ panic(err)
+ }
+ base.Fatalf("panic: %v", err)
+ }
+}
+
+// Main parses flags and Go source files specified in the command-line
+// arguments, type-checks the parsed Go package, compiles functions to machine
+// code, and finally writes the compiled package definition to disk.
+func Main(archInit func(*ssagen.ArchInfo)) {
+ base.Timer.Start("fe", "init")
+
+ defer handlePanic()
+
+ archInit(&ssagen.Arch)
+
+ base.Ctxt = obj.Linknew(ssagen.Arch.LinkArch)
+ base.Ctxt.DiagFunc = base.Errorf
+ base.Ctxt.DiagFlush = base.FlushErrors
+ base.Ctxt.Bso = bufio.NewWriter(os.Stdout)
+
+ // UseBASEntries is preferred because it shaves about 2% off build time, but LLDB, dsymutil, and dwarfdump
+ // on Darwin don't support it properly, especially since macOS 10.14 (Mojave). This is exposed as a flag
+ // to allow testing with LLVM tools on Linux, and to help with reporting this bug to the LLVM project.
+ // See bugs 31188 and 21945 (CLs 170638, 98075, 72371).
+ base.Ctxt.UseBASEntries = base.Ctxt.Headtype != objabi.Hdarwin
+
+ types.LocalPkg = types.NewPkg("", "")
+ types.LocalPkg.Prefix = "\"\""
+
+ // We won't know localpkg's height until after import
+ // processing. In the mean time, set to MaxPkgHeight to ensure
+ // height comparisons at least work until then.
+ types.LocalPkg.Height = types.MaxPkgHeight
+
+ // pseudo-package, for scoping
+ types.BuiltinPkg = types.NewPkg("go.builtin", "") // TODO(gri) name this package go.builtin?
+ types.BuiltinPkg.Prefix = "go.builtin" // not go%2ebuiltin
+
+ // pseudo-package, accessed by import "unsafe"
+ types.UnsafePkg = types.NewPkg("unsafe", "unsafe")
+
+ // Pseudo-package that contains the compiler's builtin
+ // declarations for package runtime. These are declared in a
+ // separate package to avoid conflicts with package runtime's
+ // actual declarations, which may differ intentionally but
+ // insignificantly.
+ ir.Pkgs.Runtime = types.NewPkg("go.runtime", "runtime")
+ ir.Pkgs.Runtime.Prefix = "runtime"
+
+ // pseudo-packages used in symbol tables
+ ir.Pkgs.Itab = types.NewPkg("go.itab", "go.itab")
+ ir.Pkgs.Itab.Prefix = "go.itab" // not go%2eitab
+
+ // pseudo-package used for methods with anonymous receivers
+ ir.Pkgs.Go = types.NewPkg("go", "")
+
+ base.DebugSSA = ssa.PhaseOption
+ base.ParseFlags()
+
+ // Record flags that affect the build result. (And don't
+ // record flags that don't, since that would cause spurious
+ // changes in the binary.)
+ dwarfgen.RecordFlags("B", "N", "l", "msan", "race", "asan", "shared", "dynlink", "dwarf", "dwarflocationlists", "dwarfbasentries", "smallframes", "spectre")
+
+ if !base.EnableTrace && base.Flag.LowerT {
+ log.Fatalf("compiler not built with support for -t")
+ }
+
+ // Enable inlining (after RecordFlags, to avoid recording the rewritten -l). For now:
+ // default: inlining on. (Flag.LowerL == 1)
+ // -l: inlining off (Flag.LowerL == 0)
+ // -l=2, -l=3: inlining on again, with extra debugging (Flag.LowerL > 1)
+ if base.Flag.LowerL <= 1 {
+ base.Flag.LowerL = 1 - base.Flag.LowerL
+ }
+
+ if base.Flag.SmallFrames {
+ ir.MaxStackVarSize = 128 * 1024
+ ir.MaxImplicitStackVarSize = 16 * 1024
+ }
+
+ if base.Flag.Dwarf {
+ base.Ctxt.DebugInfo = dwarfgen.Info
+ base.Ctxt.GenAbstractFunc = dwarfgen.AbstractFunc
+ base.Ctxt.DwFixups = obj.NewDwarfFixupTable(base.Ctxt)
+ } else {
+ // turn off inline generation if no dwarf at all
+ base.Flag.GenDwarfInl = 0
+ base.Ctxt.Flag_locationlists = false
+ }
+ if base.Ctxt.Flag_locationlists && len(base.Ctxt.Arch.DWARFRegisters) == 0 {
+ log.Fatalf("location lists requested but register mapping not available on %v", base.Ctxt.Arch.Name)
+ }
+
+ types.ParseLangFlag()
+
+ symABIs := ssagen.NewSymABIs(base.Ctxt.Pkgpath)
+ if base.Flag.SymABIs != "" {
+ symABIs.ReadSymABIs(base.Flag.SymABIs)
+ }
+
+ if base.Compiling(base.NoInstrumentPkgs) {
+ base.Flag.Race = false
+ base.Flag.MSan = false
+ base.Flag.ASan = false
+ }
+
+ ssagen.Arch.LinkArch.Init(base.Ctxt)
+ startProfile()
+ if base.Flag.Race || base.Flag.MSan || base.Flag.ASan {
+ base.Flag.Cfg.Instrumenting = true
+ }
+ if base.Flag.Dwarf {
+ dwarf.EnableLogging(base.Debug.DwarfInl != 0)
+ }
+ if base.Debug.SoftFloat != 0 {
+ ssagen.Arch.SoftFloat = true
+ }
+
+ if base.Flag.JSON != "" { // parse version,destination from json logging optimization.
+ logopt.LogJsonOption(base.Flag.JSON)
+ }
+
+ ir.EscFmt = escape.Fmt
+ ir.IsIntrinsicCall = ssagen.IsIntrinsicCall
+ inline.SSADumpInline = ssagen.DumpInline
+ ssagen.InitEnv()
+ ssagen.InitTables()
+
+ types.PtrSize = ssagen.Arch.LinkArch.PtrSize
+ types.RegSize = ssagen.Arch.LinkArch.RegSize
+ types.MaxWidth = ssagen.Arch.MAXWIDTH
+
+ typecheck.Target = new(ir.Package)
+
+ typecheck.NeedRuntimeType = reflectdata.NeedRuntimeType // TODO(rsc): TypeSym for lock?
+
+ base.AutogeneratedPos = makePos(src.NewFileBase("<autogenerated>", "<autogenerated>"), 1, 0)
+
+ typecheck.InitUniverse()
+ typecheck.InitRuntime()
+
+ // Parse and typecheck input.
+ noder.LoadPackage(flag.Args())
+
+ dwarfgen.RecordPackageName()
+
+ // Prepare for backend processing. This must happen before pkginit,
+ // because it generates itabs for initializing global variables.
+ ssagen.InitConfig()
+
+ // Create "init" function for package-scope variable initialization
+ // statements, if any.
+ //
+ // Note: This needs to happen early, before any optimizations. The
+ // Go spec defines a precise order than initialization should be
+ // carried out in, and even mundane optimizations like dead code
+ // removal can skew the results (e.g., #43444).
+ pkginit.MakeInit()
+
+ // Stability quirk: sort top-level declarations, so we're not
+ // sensitive to the order that functions are added. In particular,
+ // the order that noder+typecheck add function closures is very
+ // subtle, and not important to reproduce.
+ if base.Debug.UnifiedQuirks != 0 {
+ s := typecheck.Target.Decls
+ sort.SliceStable(s, func(i, j int) bool {
+ return s[i].Pos().Before(s[j].Pos())
+ })
+ }
+
+ // Eliminate some obviously dead code.
+ // Must happen after typechecking.
+ for _, n := range typecheck.Target.Decls {
+ if n.Op() == ir.ODCLFUNC {
+ deadcode.Func(n.(*ir.Func))
+ }
+ }
+
+ // Compute Addrtaken for names.
+ // We need to wait until typechecking is done so that when we see &x[i]
+ // we know that x has its address taken if x is an array, but not if x is a slice.
+ // We compute Addrtaken in bulk here.
+ // After this phase, we maintain Addrtaken incrementally.
+ if typecheck.DirtyAddrtaken {
+ typecheck.ComputeAddrtaken(typecheck.Target.Decls)
+ typecheck.DirtyAddrtaken = false
+ }
+ typecheck.IncrementalAddrtaken = true
+
+ if base.Debug.TypecheckInl != 0 {
+ // Typecheck imported function bodies if Debug.l > 1,
+ // otherwise lazily when used or re-exported.
+ typecheck.AllImportedBodies()
+ }
+
+ // Inlining
+ base.Timer.Start("fe", "inlining")
+ if base.Flag.LowerL != 0 {
+ inline.InlinePackage()
+ }
+ noder.MakeWrappers(typecheck.Target) // must happen after inlining
+
+ // Devirtualize.
+ for _, n := range typecheck.Target.Decls {
+ if n.Op() == ir.ODCLFUNC {
+ devirtualize.Func(n.(*ir.Func))
+ }
+ }
+ ir.CurFunc = nil
+
+ // Build init task, if needed.
+ if initTask := pkginit.Task(); initTask != nil {
+ typecheck.Export(initTask)
+ }
+
+ // Generate ABI wrappers. Must happen before escape analysis
+ // and doesn't benefit from dead-coding or inlining.
+ symABIs.GenABIWrappers()
+
+ // Escape analysis.
+ // Required for moving heap allocations onto stack,
+ // which in turn is required by the closure implementation,
+ // which stores the addresses of stack variables into the closure.
+ // If the closure does not escape, it needs to be on the stack
+ // or else the stack copier will not update it.
+ // Large values are also moved off stack in escape analysis;
+ // because large values may contain pointers, it must happen early.
+ base.Timer.Start("fe", "escapes")
+ escape.Funcs(typecheck.Target.Decls)
+
+ // TODO(mdempsky): This is a hack. We need a proper, global work
+ // queue for scheduling function compilation so components don't
+ // need to adjust their behavior depending on when they're called.
+ reflectdata.AfterGlobalEscapeAnalysis = true
+
+ // Collect information for go:nowritebarrierrec
+ // checking. This must happen before transforming closures during Walk
+ // We'll do the final check after write barriers are
+ // inserted.
+ if base.Flag.CompilingRuntime {
+ ssagen.EnableNoWriteBarrierRecCheck()
+ }
+
+ ir.CurFunc = nil
+
+ // Compile top level functions.
+ // Don't use range--walk can add functions to Target.Decls.
+ base.Timer.Start("be", "compilefuncs")
+ fcount := int64(0)
+ for i := 0; i < len(typecheck.Target.Decls); i++ {
+ if fn, ok := typecheck.Target.Decls[i].(*ir.Func); ok {
+ // Don't try compiling dead hidden closure.
+ if fn.IsDeadcodeClosure() {
+ continue
+ }
+ enqueueFunc(fn)
+ fcount++
+ }
+ }
+ base.Timer.AddEvent(fcount, "funcs")
+
+ compileFunctions()
+
+ if base.Flag.CompilingRuntime {
+ // Write barriers are now known. Check the call graph.
+ ssagen.NoWriteBarrierRecCheck()
+ }
+
+ // Finalize DWARF inline routine DIEs, then explicitly turn off
+ // DWARF inlining gen so as to avoid problems with generated
+ // method wrappers.
+ if base.Ctxt.DwFixups != nil {
+ base.Ctxt.DwFixups.Finalize(base.Ctxt.Pkgpath, base.Debug.DwarfInl != 0)
+ base.Ctxt.DwFixups = nil
+ base.Flag.GenDwarfInl = 0
+ }
+
+ // Write object data to disk.
+ base.Timer.Start("be", "dumpobj")
+ dumpdata()
+ base.Ctxt.NumberSyms()
+ dumpobj()
+ if base.Flag.AsmHdr != "" {
+ dumpasmhdr()
+ }
+
+ ssagen.CheckLargeStacks()
+ typecheck.CheckFuncStack()
+
+ if len(compilequeue) != 0 {
+ base.Fatalf("%d uncompiled functions", len(compilequeue))
+ }
+
+ logopt.FlushLoggedOpts(base.Ctxt, base.Ctxt.Pkgpath)
+ base.ExitIfErrors()
+
+ base.FlushErrors()
+ base.Timer.Stop()
+
+ if base.Flag.Bench != "" {
+ if err := writebench(base.Flag.Bench); err != nil {
+ log.Fatalf("cannot write benchmark data: %v", err)
+ }
+ }
+}
+
+func writebench(filename string) error {
+ f, err := os.OpenFile(filename, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0666)
+ if err != nil {
+ return err
+ }
+
+ var buf bytes.Buffer
+ fmt.Fprintln(&buf, "commit:", buildcfg.Version)
+ fmt.Fprintln(&buf, "goos:", runtime.GOOS)
+ fmt.Fprintln(&buf, "goarch:", runtime.GOARCH)
+ base.Timer.Write(&buf, "BenchmarkCompile:"+base.Ctxt.Pkgpath+":")
+
+ n, err := f.Write(buf.Bytes())
+ if err != nil {
+ return err
+ }
+ if n != buf.Len() {
+ panic("bad writer")
+ }
+
+ return f.Close()
+}
+
+func makePos(b *src.PosBase, line, col uint) src.XPos {
+ return base.Ctxt.PosTable.XPos(src.MakePos(b, line, col))
+}
diff --git a/src/cmd/compile/internal/gc/obj.go b/src/cmd/compile/internal/gc/obj.go
new file mode 100644
index 0000000..74e4c0a
--- /dev/null
+++ b/src/cmd/compile/internal/gc/obj.go
@@ -0,0 +1,312 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/noder"
+ "cmd/compile/internal/objw"
+ "cmd/compile/internal/reflectdata"
+ "cmd/compile/internal/staticdata"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "cmd/internal/archive"
+ "cmd/internal/bio"
+ "cmd/internal/obj"
+ "cmd/internal/objabi"
+ "encoding/json"
+ "fmt"
+)
+
+// These modes say which kind of object file to generate.
+// The default use of the toolchain is to set both bits,
+// generating a combined compiler+linker object, one that
+// serves to describe the package to both the compiler and the linker.
+// In fact the compiler and linker read nearly disjoint sections of
+// that file, though, so in a distributed build setting it can be more
+// efficient to split the output into two files, supplying the compiler
+// object only to future compilations and the linker object only to
+// future links.
+//
+// By default a combined object is written, but if -linkobj is specified
+// on the command line then the default -o output is a compiler object
+// and the -linkobj output is a linker object.
+const (
+ modeCompilerObj = 1 << iota
+ modeLinkerObj
+)
+
+func dumpobj() {
+ if base.Flag.LinkObj == "" {
+ dumpobj1(base.Flag.LowerO, modeCompilerObj|modeLinkerObj)
+ return
+ }
+ dumpobj1(base.Flag.LowerO, modeCompilerObj)
+ dumpobj1(base.Flag.LinkObj, modeLinkerObj)
+}
+
+func dumpobj1(outfile string, mode int) {
+ bout, err := bio.Create(outfile)
+ if err != nil {
+ base.FlushErrors()
+ fmt.Printf("can't create %s: %v\n", outfile, err)
+ base.ErrorExit()
+ }
+ defer bout.Close()
+ bout.WriteString("!<arch>\n")
+
+ if mode&modeCompilerObj != 0 {
+ start := startArchiveEntry(bout)
+ dumpCompilerObj(bout)
+ finishArchiveEntry(bout, start, "__.PKGDEF")
+ }
+ if mode&modeLinkerObj != 0 {
+ start := startArchiveEntry(bout)
+ dumpLinkerObj(bout)
+ finishArchiveEntry(bout, start, "_go_.o")
+ }
+}
+
+func printObjHeader(bout *bio.Writer) {
+ bout.WriteString(objabi.HeaderString())
+ if base.Flag.BuildID != "" {
+ fmt.Fprintf(bout, "build id %q\n", base.Flag.BuildID)
+ }
+ if types.LocalPkg.Name == "main" {
+ fmt.Fprintf(bout, "main\n")
+ }
+ fmt.Fprintf(bout, "\n") // header ends with blank line
+}
+
+func startArchiveEntry(bout *bio.Writer) int64 {
+ var arhdr [archive.HeaderSize]byte
+ bout.Write(arhdr[:])
+ return bout.Offset()
+}
+
+func finishArchiveEntry(bout *bio.Writer, start int64, name string) {
+ bout.Flush()
+ size := bout.Offset() - start
+ if size&1 != 0 {
+ bout.WriteByte(0)
+ }
+ bout.MustSeek(start-archive.HeaderSize, 0)
+
+ var arhdr [archive.HeaderSize]byte
+ archive.FormatHeader(arhdr[:], name, size)
+ bout.Write(arhdr[:])
+ bout.Flush()
+ bout.MustSeek(start+size+(size&1), 0)
+}
+
+func dumpCompilerObj(bout *bio.Writer) {
+ printObjHeader(bout)
+ noder.WriteExports(bout)
+}
+
+func dumpdata() {
+ numExterns := len(typecheck.Target.Externs)
+ numDecls := len(typecheck.Target.Decls)
+
+ dumpglobls(typecheck.Target.Externs)
+ reflectdata.CollectPTabs()
+ numExports := len(typecheck.Target.Exports)
+ addsignats(typecheck.Target.Externs)
+ reflectdata.WriteRuntimeTypes()
+ reflectdata.WriteTabs()
+ numPTabs := reflectdata.CountPTabs()
+ reflectdata.WriteImportStrings()
+ reflectdata.WriteBasicTypes()
+ dumpembeds()
+
+ // Calls to WriteRuntimeTypes can generate functions,
+ // like method wrappers and hash and equality routines.
+ // Compile any generated functions, process any new resulting types, repeat.
+ // This can't loop forever, because there is no way to generate an infinite
+ // number of types in a finite amount of code.
+ // In the typical case, we loop 0 or 1 times.
+ // It was not until issue 24761 that we found any code that required a loop at all.
+ for {
+ for i := numDecls; i < len(typecheck.Target.Decls); i++ {
+ if n, ok := typecheck.Target.Decls[i].(*ir.Func); ok {
+ enqueueFunc(n)
+ }
+ }
+ numDecls = len(typecheck.Target.Decls)
+ compileFunctions()
+ reflectdata.WriteRuntimeTypes()
+ if numDecls == len(typecheck.Target.Decls) {
+ break
+ }
+ }
+
+ // Dump extra globals.
+ dumpglobls(typecheck.Target.Externs[numExterns:])
+
+ if reflectdata.ZeroSize > 0 {
+ zero := base.PkgLinksym("go.map", "zero", obj.ABI0)
+ objw.Global(zero, int32(reflectdata.ZeroSize), obj.DUPOK|obj.RODATA)
+ zero.Set(obj.AttrStatic, true)
+ }
+
+ staticdata.WriteFuncSyms()
+ addGCLocals()
+
+ if numExports != len(typecheck.Target.Exports) {
+ base.Fatalf("Target.Exports changed after compile functions loop")
+ }
+ newNumPTabs := reflectdata.CountPTabs()
+ if newNumPTabs != numPTabs {
+ base.Fatalf("ptabs changed after compile functions loop")
+ }
+}
+
+func dumpLinkerObj(bout *bio.Writer) {
+ printObjHeader(bout)
+
+ if len(typecheck.Target.CgoPragmas) != 0 {
+ // write empty export section; must be before cgo section
+ fmt.Fprintf(bout, "\n$$\n\n$$\n\n")
+ fmt.Fprintf(bout, "\n$$ // cgo\n")
+ if err := json.NewEncoder(bout).Encode(typecheck.Target.CgoPragmas); err != nil {
+ base.Fatalf("serializing pragcgobuf: %v", err)
+ }
+ fmt.Fprintf(bout, "\n$$\n\n")
+ }
+
+ fmt.Fprintf(bout, "\n!\n")
+
+ obj.WriteObjFile(base.Ctxt, bout)
+}
+
+func dumpGlobal(n *ir.Name) {
+ if n.Type() == nil {
+ base.Fatalf("external %v nil type\n", n)
+ }
+ if n.Class == ir.PFUNC {
+ return
+ }
+ if n.Sym().Pkg != types.LocalPkg {
+ return
+ }
+ types.CalcSize(n.Type())
+ ggloblnod(n)
+ base.Ctxt.DwarfGlobal(base.Ctxt.Pkgpath, types.TypeSymName(n.Type()), n.Linksym())
+}
+
+func dumpGlobalConst(n ir.Node) {
+ // only export typed constants
+ t := n.Type()
+ if t == nil {
+ return
+ }
+ if n.Sym().Pkg != types.LocalPkg {
+ return
+ }
+ // only export integer constants for now
+ if !t.IsInteger() {
+ return
+ }
+ v := n.Val()
+ if t.IsUntyped() {
+ // Export untyped integers as int (if they fit).
+ t = types.Types[types.TINT]
+ if ir.ConstOverflow(v, t) {
+ return
+ }
+ } else {
+ // If the type of the constant is an instantiated generic, we need to emit
+ // that type so the linker knows about it. See issue 51245.
+ _ = reflectdata.TypeLinksym(t)
+ }
+ base.Ctxt.DwarfIntConst(base.Ctxt.Pkgpath, n.Sym().Name, types.TypeSymName(t), ir.IntVal(t, v))
+}
+
+func dumpglobls(externs []ir.Node) {
+ // add globals
+ for _, n := range externs {
+ switch n.Op() {
+ case ir.ONAME:
+ dumpGlobal(n.(*ir.Name))
+ case ir.OLITERAL:
+ dumpGlobalConst(n)
+ }
+ }
+}
+
+// addGCLocals adds gcargs, gclocals, gcregs, and stack object symbols to Ctxt.Data.
+//
+// This is done during the sequential phase after compilation, since
+// global symbols can't be declared during parallel compilation.
+func addGCLocals() {
+ for _, s := range base.Ctxt.Text {
+ fn := s.Func()
+ if fn == nil {
+ continue
+ }
+ for _, gcsym := range []*obj.LSym{fn.GCArgs, fn.GCLocals} {
+ if gcsym != nil && !gcsym.OnList() {
+ objw.Global(gcsym, int32(len(gcsym.P)), obj.RODATA|obj.DUPOK)
+ }
+ }
+ if x := fn.StackObjects; x != nil {
+ objw.Global(x, int32(len(x.P)), obj.RODATA)
+ x.Set(obj.AttrStatic, true)
+ }
+ if x := fn.OpenCodedDeferInfo; x != nil {
+ objw.Global(x, int32(len(x.P)), obj.RODATA|obj.DUPOK)
+ }
+ if x := fn.ArgInfo; x != nil {
+ objw.Global(x, int32(len(x.P)), obj.RODATA|obj.DUPOK)
+ x.Set(obj.AttrStatic, true)
+ }
+ if x := fn.ArgLiveInfo; x != nil {
+ objw.Global(x, int32(len(x.P)), obj.RODATA|obj.DUPOK)
+ x.Set(obj.AttrStatic, true)
+ }
+ if x := fn.WrapInfo; x != nil && !x.OnList() {
+ objw.Global(x, int32(len(x.P)), obj.RODATA|obj.DUPOK)
+ x.Set(obj.AttrStatic, true)
+ }
+ }
+}
+
+func ggloblnod(nam *ir.Name) {
+ s := nam.Linksym()
+ s.Gotype = reflectdata.TypeLinksym(nam.Type())
+ flags := 0
+ if nam.Readonly() {
+ flags = obj.RODATA
+ }
+ if nam.Type() != nil && !nam.Type().HasPointers() {
+ flags |= obj.NOPTR
+ }
+ base.Ctxt.Globl(s, nam.Type().Size(), flags)
+ if nam.LibfuzzerExtraCounter() {
+ s.Type = objabi.SLIBFUZZER_EXTRA_COUNTER
+ }
+ if nam.Sym().Linkname != "" {
+ // Make sure linkname'd symbol is non-package. When a symbol is
+ // both imported and linkname'd, s.Pkg may not set to "_" in
+ // types.Sym.Linksym because LSym already exists. Set it here.
+ s.Pkg = "_"
+ }
+}
+
+func dumpembeds() {
+ for _, v := range typecheck.Target.Embeds {
+ staticdata.WriteEmbed(v)
+ }
+}
+
+func addsignats(dcls []ir.Node) {
+ // copy types from dcl list to signatset
+ for _, n := range dcls {
+ if n.Op() == ir.OTYPE {
+ reflectdata.NeedRuntimeType(n.Type())
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/gc/pprof.go b/src/cmd/compile/internal/gc/pprof.go
new file mode 100644
index 0000000..5f9b030
--- /dev/null
+++ b/src/cmd/compile/internal/gc/pprof.go
@@ -0,0 +1,14 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.8
+// +build go1.8
+
+package gc
+
+import "runtime"
+
+func startMutexProfiling() {
+ runtime.SetMutexProfileFraction(1)
+}
diff --git a/src/cmd/compile/internal/gc/trace.go b/src/cmd/compile/internal/gc/trace.go
new file mode 100644
index 0000000..8cdbd4b
--- /dev/null
+++ b/src/cmd/compile/internal/gc/trace.go
@@ -0,0 +1,30 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.7
+// +build go1.7
+
+package gc
+
+import (
+ "os"
+ tracepkg "runtime/trace"
+
+ "cmd/compile/internal/base"
+)
+
+func init() {
+ traceHandler = traceHandlerGo17
+}
+
+func traceHandlerGo17(traceprofile string) {
+ f, err := os.Create(traceprofile)
+ if err != nil {
+ base.Fatalf("%v", err)
+ }
+ if err := tracepkg.Start(f); err != nil {
+ base.Fatalf("%v", err)
+ }
+ base.AtExit(tracepkg.Stop)
+}
diff --git a/src/cmd/compile/internal/gc/util.go b/src/cmd/compile/internal/gc/util.go
new file mode 100644
index 0000000..56fd137
--- /dev/null
+++ b/src/cmd/compile/internal/gc/util.go
@@ -0,0 +1,76 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package gc
+
+import (
+ "os"
+ "runtime"
+ "runtime/pprof"
+
+ "cmd/compile/internal/base"
+)
+
+var traceHandler func(string)
+
+func startProfile() {
+ if base.Flag.CPUProfile != "" {
+ f, err := os.Create(base.Flag.CPUProfile)
+ if err != nil {
+ base.Fatalf("%v", err)
+ }
+ if err := pprof.StartCPUProfile(f); err != nil {
+ base.Fatalf("%v", err)
+ }
+ base.AtExit(pprof.StopCPUProfile)
+ }
+ if base.Flag.MemProfile != "" {
+ if base.Flag.MemProfileRate != 0 {
+ runtime.MemProfileRate = base.Flag.MemProfileRate
+ }
+ f, err := os.Create(base.Flag.MemProfile)
+ if err != nil {
+ base.Fatalf("%v", err)
+ }
+ base.AtExit(func() {
+ // Profile all outstanding allocations.
+ runtime.GC()
+ // compilebench parses the memory profile to extract memstats,
+ // which are only written in the legacy pprof format.
+ // See golang.org/issue/18641 and runtime/pprof/pprof.go:writeHeap.
+ const writeLegacyFormat = 1
+ if err := pprof.Lookup("heap").WriteTo(f, writeLegacyFormat); err != nil {
+ base.Fatalf("%v", err)
+ }
+ })
+ } else {
+ // Not doing memory profiling; disable it entirely.
+ runtime.MemProfileRate = 0
+ }
+ if base.Flag.BlockProfile != "" {
+ f, err := os.Create(base.Flag.BlockProfile)
+ if err != nil {
+ base.Fatalf("%v", err)
+ }
+ runtime.SetBlockProfileRate(1)
+ base.AtExit(func() {
+ pprof.Lookup("block").WriteTo(f, 0)
+ f.Close()
+ })
+ }
+ if base.Flag.MutexProfile != "" {
+ f, err := os.Create(base.Flag.MutexProfile)
+ if err != nil {
+ base.Fatalf("%v", err)
+ }
+ startMutexProfiling()
+ base.AtExit(func() {
+ pprof.Lookup("mutex").WriteTo(f, 0)
+ f.Close()
+ })
+ }
+ if base.Flag.TraceProfile != "" && traceHandler != nil {
+ traceHandler(base.Flag.TraceProfile)
+ }
+}
diff --git a/src/cmd/compile/internal/importer/exportdata.go b/src/cmd/compile/internal/importer/exportdata.go
new file mode 100644
index 0000000..6a672be
--- /dev/null
+++ b/src/cmd/compile/internal/importer/exportdata.go
@@ -0,0 +1,91 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements FindExportData.
+
+package importer
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+)
+
+func readGopackHeader(r *bufio.Reader) (name string, size int, err error) {
+ // See $GOROOT/include/ar.h.
+ hdr := make([]byte, 16+12+6+6+8+10+2)
+ _, err = io.ReadFull(r, hdr)
+ if err != nil {
+ return
+ }
+ // leave for debugging
+ if false {
+ fmt.Printf("header: %s", hdr)
+ }
+ s := strings.TrimSpace(string(hdr[16+12+6+6+8:][:10]))
+ size, err = strconv.Atoi(s)
+ if err != nil || hdr[len(hdr)-2] != '`' || hdr[len(hdr)-1] != '\n' {
+ err = fmt.Errorf("invalid archive header")
+ return
+ }
+ name = strings.TrimSpace(string(hdr[:16]))
+ return
+}
+
+// FindExportData positions the reader r at the beginning of the
+// export data section of an underlying GC-created object/archive
+// file by reading from it. The reader must be positioned at the
+// start of the file before calling this function. The hdr result
+// is the string before the export data, either "$$" or "$$B".
+//
+func FindExportData(r *bufio.Reader) (hdr string, err error) {
+ // Read first line to make sure this is an object file.
+ line, err := r.ReadSlice('\n')
+ if err != nil {
+ err = fmt.Errorf("can't find export data (%v)", err)
+ return
+ }
+
+ if string(line) == "!<arch>\n" {
+ // Archive file. Scan to __.PKGDEF.
+ var name string
+ if name, _, err = readGopackHeader(r); err != nil {
+ return
+ }
+
+ // First entry should be __.PKGDEF.
+ if name != "__.PKGDEF" {
+ err = fmt.Errorf("go archive is missing __.PKGDEF")
+ return
+ }
+
+ // Read first line of __.PKGDEF data, so that line
+ // is once again the first line of the input.
+ if line, err = r.ReadSlice('\n'); err != nil {
+ err = fmt.Errorf("can't find export data (%v)", err)
+ return
+ }
+ }
+
+ // Now at __.PKGDEF in archive or still at beginning of file.
+ // Either way, line should begin with "go object ".
+ if !strings.HasPrefix(string(line), "go object ") {
+ err = fmt.Errorf("not a Go object file")
+ return
+ }
+
+ // Skip over object header to export data.
+ // Begins after first line starting with $$.
+ for line[0] != '$' {
+ if line, err = r.ReadSlice('\n'); err != nil {
+ err = fmt.Errorf("can't find export data (%v)", err)
+ return
+ }
+ }
+ hdr = string(line)
+
+ return
+}
diff --git a/src/cmd/compile/internal/importer/gcimporter.go b/src/cmd/compile/internal/importer/gcimporter.go
new file mode 100644
index 0000000..ff40be6
--- /dev/null
+++ b/src/cmd/compile/internal/importer/gcimporter.go
@@ -0,0 +1,174 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// package importer implements Import for gc-generated object files.
+package importer
+
+import (
+ "bufio"
+ "cmd/compile/internal/types2"
+ "fmt"
+ "go/build"
+ "io"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "strings"
+)
+
+// debugging/development support
+const debug = false
+
+var pkgExts = [...]string{".a", ".o"}
+
+// FindPkg returns the filename and unique package id for an import
+// path based on package information provided by build.Import (using
+// the build.Default build.Context). A relative srcDir is interpreted
+// relative to the current working directory.
+// If no file was found, an empty filename is returned.
+//
+func FindPkg(path, srcDir string) (filename, id string) {
+ if path == "" {
+ return
+ }
+
+ var noext string
+ switch {
+ default:
+ // "x" -> "$GOPATH/pkg/$GOOS_$GOARCH/x.ext", "x"
+ // Don't require the source files to be present.
+ if abs, err := filepath.Abs(srcDir); err == nil { // see issue 14282
+ srcDir = abs
+ }
+ bp, _ := build.Import(path, srcDir, build.FindOnly|build.AllowBinary)
+ if bp.PkgObj == "" {
+ id = path // make sure we have an id to print in error message
+ return
+ }
+ noext = strings.TrimSuffix(bp.PkgObj, ".a")
+ id = bp.ImportPath
+
+ case build.IsLocalImport(path):
+ // "./x" -> "/this/directory/x.ext", "/this/directory/x"
+ noext = filepath.Join(srcDir, path)
+ id = noext
+
+ case filepath.IsAbs(path):
+ // for completeness only - go/build.Import
+ // does not support absolute imports
+ // "/x" -> "/x.ext", "/x"
+ noext = path
+ id = path
+ }
+
+ if false { // for debugging
+ if path != id {
+ fmt.Printf("%s -> %s\n", path, id)
+ }
+ }
+
+ // try extensions
+ for _, ext := range pkgExts {
+ filename = noext + ext
+ if f, err := os.Stat(filename); err == nil && !f.IsDir() {
+ return
+ }
+ }
+
+ filename = "" // not found
+ return
+}
+
+// Import imports a gc-generated package given its import path and srcDir, adds
+// the corresponding package object to the packages map, and returns the object.
+// The packages map must contain all packages already imported.
+//
+func Import(packages map[string]*types2.Package, path, srcDir string, lookup func(path string) (io.ReadCloser, error)) (pkg *types2.Package, err error) {
+ var rc io.ReadCloser
+ var id string
+ if lookup != nil {
+ // With custom lookup specified, assume that caller has
+ // converted path to a canonical import path for use in the map.
+ if path == "unsafe" {
+ return types2.Unsafe, nil
+ }
+ id = path
+
+ // No need to re-import if the package was imported completely before.
+ if pkg = packages[id]; pkg != nil && pkg.Complete() {
+ return
+ }
+ f, err := lookup(path)
+ if err != nil {
+ return nil, err
+ }
+ rc = f
+ } else {
+ var filename string
+ filename, id = FindPkg(path, srcDir)
+ if filename == "" {
+ if path == "unsafe" {
+ return types2.Unsafe, nil
+ }
+ return nil, fmt.Errorf("can't find import: %q", id)
+ }
+
+ // no need to re-import if the package was imported completely before
+ if pkg = packages[id]; pkg != nil && pkg.Complete() {
+ return
+ }
+
+ // open file
+ f, err := os.Open(filename)
+ if err != nil {
+ return nil, err
+ }
+ defer func() {
+ if err != nil {
+ // add file name to error
+ err = fmt.Errorf("%s: %v", filename, err)
+ }
+ }()
+ rc = f
+ }
+ defer rc.Close()
+
+ var hdr string
+ buf := bufio.NewReader(rc)
+ if hdr, err = FindExportData(buf); err != nil {
+ return
+ }
+
+ switch hdr {
+ case "$$\n":
+ err = fmt.Errorf("import %q: old textual export format no longer supported (recompile library)", path)
+
+ case "$$B\n":
+ var data []byte
+ data, err = ioutil.ReadAll(buf)
+ if err != nil {
+ break
+ }
+
+ // The indexed export format starts with an 'i'; the older
+ // binary export format starts with a 'c', 'd', or 'v'
+ // (from "version"). Select appropriate importer.
+ if len(data) > 0 && data[0] == 'i' {
+ pkg, err = ImportData(packages, string(data[1:]), id)
+ } else {
+ err = fmt.Errorf("import %q: old binary export format no longer supported (recompile library)", path)
+ }
+
+ default:
+ err = fmt.Errorf("import %q: unknown export data header: %q", path, hdr)
+ }
+
+ return
+}
+
+type byPath []*types2.Package
+
+func (a byPath) Len() int { return len(a) }
+func (a byPath) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+func (a byPath) Less(i, j int) bool { return a[i].Path() < a[j].Path() }
diff --git a/src/cmd/compile/internal/importer/gcimporter_test.go b/src/cmd/compile/internal/importer/gcimporter_test.go
new file mode 100644
index 0000000..5d80db2
--- /dev/null
+++ b/src/cmd/compile/internal/importer/gcimporter_test.go
@@ -0,0 +1,613 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package importer
+
+import (
+ "bytes"
+ "cmd/compile/internal/types2"
+ "fmt"
+ "internal/goexperiment"
+ "internal/testenv"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "testing"
+ "time"
+)
+
+// skipSpecialPlatforms causes the test to be skipped for platforms where
+// builders (build.golang.org) don't have access to compiled packages for
+// import.
+func skipSpecialPlatforms(t *testing.T) {
+ switch platform := runtime.GOOS + "-" + runtime.GOARCH; platform {
+ case "darwin-arm64":
+ t.Skipf("no compiled packages available for import on %s", platform)
+ }
+}
+
+// compile runs the compiler on filename, with dirname as the working directory,
+// and writes the output file to outdirname.
+func compile(t *testing.T, dirname, filename, outdirname string) string {
+ // filename must end with ".go"
+ if !strings.HasSuffix(filename, ".go") {
+ t.Fatalf("filename doesn't end in .go: %s", filename)
+ }
+ basename := filepath.Base(filename)
+ outname := filepath.Join(outdirname, basename[:len(basename)-2]+"o")
+ cmd := exec.Command(testenv.GoToolPath(t), "tool", "compile", "-o", outname, filename)
+ cmd.Dir = dirname
+ out, err := cmd.CombinedOutput()
+ if err != nil {
+ t.Logf("%s", out)
+ t.Fatalf("go tool compile %s failed: %s", filename, err)
+ }
+ return outname
+}
+
+func testPath(t *testing.T, path, srcDir string) *types2.Package {
+ t0 := time.Now()
+ pkg, err := Import(make(map[string]*types2.Package), path, srcDir, nil)
+ if err != nil {
+ t.Errorf("testPath(%s): %s", path, err)
+ return nil
+ }
+ t.Logf("testPath(%s): %v", path, time.Since(t0))
+ return pkg
+}
+
+const maxTime = 30 * time.Second
+
+func testDir(t *testing.T, dir string, endTime time.Time) (nimports int) {
+ dirname := filepath.Join(runtime.GOROOT(), "pkg", runtime.GOOS+"_"+runtime.GOARCH, dir)
+ list, err := os.ReadDir(dirname)
+ if err != nil {
+ t.Fatalf("testDir(%s): %s", dirname, err)
+ }
+ for _, f := range list {
+ if time.Now().After(endTime) {
+ t.Log("testing time used up")
+ return
+ }
+ switch {
+ case !f.IsDir():
+ // try extensions
+ for _, ext := range pkgExts {
+ if strings.HasSuffix(f.Name(), ext) {
+ name := f.Name()[0 : len(f.Name())-len(ext)] // remove extension
+ if testPath(t, filepath.Join(dir, name), dir) != nil {
+ nimports++
+ }
+ }
+ }
+ case f.IsDir():
+ nimports += testDir(t, filepath.Join(dir, f.Name()), endTime)
+ }
+ }
+ return
+}
+
+func mktmpdir(t *testing.T) string {
+ tmpdir, err := os.MkdirTemp("", "gcimporter_test")
+ if err != nil {
+ t.Fatal("mktmpdir:", err)
+ }
+ if err := os.Mkdir(filepath.Join(tmpdir, "testdata"), 0700); err != nil {
+ os.RemoveAll(tmpdir)
+ t.Fatal("mktmpdir:", err)
+ }
+ return tmpdir
+}
+
+func TestImportTestdata(t *testing.T) {
+ // This package only handles gc export data.
+ if runtime.Compiler != "gc" {
+ t.Skipf("gc-built packages not available (compiler = %s)", runtime.Compiler)
+ }
+
+ testfiles := map[string][]string{
+ "exports.go": {"go/ast", "go/token"},
+ }
+ if !goexperiment.Unified {
+ testfiles["generics.go"] = nil
+ }
+
+ for testfile, wantImports := range testfiles {
+ tmpdir := mktmpdir(t)
+ defer os.RemoveAll(tmpdir)
+
+ compile(t, "testdata", testfile, filepath.Join(tmpdir, "testdata"))
+ path := "./testdata/" + strings.TrimSuffix(testfile, ".go")
+
+ if pkg := testPath(t, path, tmpdir); pkg != nil {
+ // The package's Imports list must include all packages
+ // explicitly imported by testfile, plus all packages
+ // referenced indirectly via exported objects in testfile.
+ got := fmt.Sprint(pkg.Imports())
+ for _, want := range wantImports {
+ if !strings.Contains(got, want) {
+ t.Errorf(`Package("exports").Imports() = %s, does not contain %s`, got, want)
+ }
+ }
+ }
+ }
+}
+
+func TestVersionHandling(t *testing.T) {
+ skipSpecialPlatforms(t)
+
+ // This package only handles gc export data.
+ if runtime.Compiler != "gc" {
+ t.Skipf("gc-built packages not available (compiler = %s)", runtime.Compiler)
+ }
+
+ const dir = "./testdata/versions"
+ list, err := os.ReadDir(dir)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ tmpdir := mktmpdir(t)
+ defer os.RemoveAll(tmpdir)
+ corruptdir := filepath.Join(tmpdir, "testdata", "versions")
+ if err := os.Mkdir(corruptdir, 0700); err != nil {
+ t.Fatal(err)
+ }
+
+ for _, f := range list {
+ name := f.Name()
+ if !strings.HasSuffix(name, ".a") {
+ continue // not a package file
+ }
+ if strings.Contains(name, "corrupted") {
+ continue // don't process a leftover corrupted file
+ }
+ pkgpath := "./" + name[:len(name)-2]
+
+ if testing.Verbose() {
+ t.Logf("importing %s", name)
+ }
+
+ // test that export data can be imported
+ _, err := Import(make(map[string]*types2.Package), pkgpath, dir, nil)
+ if err != nil {
+ // ok to fail if it fails with a no longer supported error for select files
+ if strings.Contains(err.Error(), "no longer supported") {
+ switch name {
+ case "test_go1.7_0.a", "test_go1.7_1.a",
+ "test_go1.8_4.a", "test_go1.8_5.a",
+ "test_go1.11_6b.a", "test_go1.11_999b.a":
+ continue
+ }
+ // fall through
+ }
+ // ok to fail if it fails with a newer version error for select files
+ if strings.Contains(err.Error(), "newer version") {
+ switch name {
+ case "test_go1.11_999i.a":
+ continue
+ }
+ // fall through
+ }
+ t.Errorf("import %q failed: %v", pkgpath, err)
+ continue
+ }
+
+ // create file with corrupted export data
+ // 1) read file
+ data, err := os.ReadFile(filepath.Join(dir, name))
+ if err != nil {
+ t.Fatal(err)
+ }
+ // 2) find export data
+ i := bytes.Index(data, []byte("\n$$B\n")) + 5
+ j := bytes.Index(data[i:], []byte("\n$$\n")) + i
+ if i < 0 || j < 0 || i > j {
+ t.Fatalf("export data section not found (i = %d, j = %d)", i, j)
+ }
+ // 3) corrupt the data (increment every 7th byte)
+ for k := j - 13; k >= i; k -= 7 {
+ data[k]++
+ }
+ // 4) write the file
+ pkgpath += "_corrupted"
+ filename := filepath.Join(corruptdir, pkgpath) + ".a"
+ os.WriteFile(filename, data, 0666)
+
+ // test that importing the corrupted file results in an error
+ _, err = Import(make(map[string]*types2.Package), pkgpath, corruptdir, nil)
+ if err == nil {
+ t.Errorf("import corrupted %q succeeded", pkgpath)
+ } else if msg := err.Error(); !strings.Contains(msg, "version skew") {
+ t.Errorf("import %q error incorrect (%s)", pkgpath, msg)
+ }
+ }
+}
+
+func TestImportStdLib(t *testing.T) {
+ skipSpecialPlatforms(t)
+
+ // This package only handles gc export data.
+ if runtime.Compiler != "gc" {
+ t.Skipf("gc-built packages not available (compiler = %s)", runtime.Compiler)
+ }
+
+ dt := maxTime
+ if testing.Short() && testenv.Builder() == "" {
+ dt = 10 * time.Millisecond
+ }
+ nimports := testDir(t, "", time.Now().Add(dt)) // installed packages
+ t.Logf("tested %d imports", nimports)
+}
+
+var importedObjectTests = []struct {
+ name string
+ want string
+}{
+ // non-interfaces
+ {"crypto.Hash", "type Hash uint"},
+ {"go/ast.ObjKind", "type ObjKind int"},
+ {"go/types.Qualifier", "type Qualifier func(*Package) string"},
+ {"go/types.Comparable", "func Comparable(T Type) bool"},
+ {"math.Pi", "const Pi untyped float"},
+ {"math.Sin", "func Sin(x float64) float64"},
+ {"go/ast.NotNilFilter", "func NotNilFilter(_ string, v reflect.Value) bool"},
+ {"go/internal/gcimporter.FindPkg", "func FindPkg(path string, srcDir string) (filename string, id string)"},
+
+ // interfaces
+ {"context.Context", "type Context interface{Deadline() (deadline time.Time, ok bool); Done() <-chan struct{}; Err() error; Value(key any) any}"},
+ {"crypto.Decrypter", "type Decrypter interface{Decrypt(rand io.Reader, msg []byte, opts DecrypterOpts) (plaintext []byte, err error); Public() PublicKey}"},
+ {"encoding.BinaryMarshaler", "type BinaryMarshaler interface{MarshalBinary() (data []byte, err error)}"},
+ {"io.Reader", "type Reader interface{Read(p []byte) (n int, err error)}"},
+ {"io.ReadWriter", "type ReadWriter interface{Reader; Writer}"},
+ {"go/ast.Node", "type Node interface{End() go/token.Pos; Pos() go/token.Pos}"},
+ {"go/types.Type", "type Type interface{String() string; Underlying() Type}"},
+}
+
+func TestImportedTypes(t *testing.T) {
+ skipSpecialPlatforms(t)
+
+ // This package only handles gc export data.
+ if runtime.Compiler != "gc" {
+ t.Skipf("gc-built packages not available (compiler = %s)", runtime.Compiler)
+ }
+
+ for _, test := range importedObjectTests {
+ s := strings.Split(test.name, ".")
+ if len(s) != 2 {
+ t.Fatal("inconsistent test data")
+ }
+ importPath := s[0]
+ objName := s[1]
+
+ pkg, err := Import(make(map[string]*types2.Package), importPath, ".", nil)
+ if err != nil {
+ t.Error(err)
+ continue
+ }
+
+ obj := pkg.Scope().Lookup(objName)
+ if obj == nil {
+ t.Errorf("%s: object not found", test.name)
+ continue
+ }
+
+ got := types2.ObjectString(obj, types2.RelativeTo(pkg))
+ if got != test.want {
+ t.Errorf("%s: got %q; want %q", test.name, got, test.want)
+ }
+
+ if named, _ := obj.Type().(*types2.Named); named != nil {
+ verifyInterfaceMethodRecvs(t, named, 0)
+ }
+ }
+}
+
+// verifyInterfaceMethodRecvs verifies that method receiver types
+// are named if the methods belong to a named interface type.
+func verifyInterfaceMethodRecvs(t *testing.T, named *types2.Named, level int) {
+ // avoid endless recursion in case of an embedding bug that lead to a cycle
+ if level > 10 {
+ t.Errorf("%s: embeds itself", named)
+ return
+ }
+
+ iface, _ := named.Underlying().(*types2.Interface)
+ if iface == nil {
+ return // not an interface
+ }
+
+ // check explicitly declared methods
+ for i := 0; i < iface.NumExplicitMethods(); i++ {
+ m := iface.ExplicitMethod(i)
+ recv := m.Type().(*types2.Signature).Recv()
+ if recv == nil {
+ t.Errorf("%s: missing receiver type", m)
+ continue
+ }
+ if recv.Type() != named {
+ t.Errorf("%s: got recv type %s; want %s", m, recv.Type(), named)
+ }
+ }
+
+ // check embedded interfaces (if they are named, too)
+ for i := 0; i < iface.NumEmbeddeds(); i++ {
+ // embedding of interfaces cannot have cycles; recursion will terminate
+ if etype, _ := iface.EmbeddedType(i).(*types2.Named); etype != nil {
+ verifyInterfaceMethodRecvs(t, etype, level+1)
+ }
+ }
+}
+
+func TestIssue5815(t *testing.T) {
+ skipSpecialPlatforms(t)
+
+ // This package only handles gc export data.
+ if runtime.Compiler != "gc" {
+ t.Skipf("gc-built packages not available (compiler = %s)", runtime.Compiler)
+ }
+
+ pkg := importPkg(t, "strings", ".")
+
+ scope := pkg.Scope()
+ for _, name := range scope.Names() {
+ obj := scope.Lookup(name)
+ if obj.Pkg() == nil {
+ t.Errorf("no pkg for %s", obj)
+ }
+ if tname, _ := obj.(*types2.TypeName); tname != nil {
+ named := tname.Type().(*types2.Named)
+ for i := 0; i < named.NumMethods(); i++ {
+ m := named.Method(i)
+ if m.Pkg() == nil {
+ t.Errorf("no pkg for %s", m)
+ }
+ }
+ }
+ }
+}
+
+// Smoke test to ensure that imported methods get the correct package.
+func TestCorrectMethodPackage(t *testing.T) {
+ skipSpecialPlatforms(t)
+
+ // This package only handles gc export data.
+ if runtime.Compiler != "gc" {
+ t.Skipf("gc-built packages not available (compiler = %s)", runtime.Compiler)
+ }
+
+ imports := make(map[string]*types2.Package)
+ _, err := Import(imports, "net/http", ".", nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ mutex := imports["sync"].Scope().Lookup("Mutex").(*types2.TypeName).Type()
+ obj, _, _ := types2.LookupFieldOrMethod(types2.NewPointer(mutex), false, nil, "Lock")
+ lock := obj.(*types2.Func)
+ if got, want := lock.Pkg().Path(), "sync"; got != want {
+ t.Errorf("got package path %q; want %q", got, want)
+ }
+}
+
+func TestIssue13566(t *testing.T) {
+ skipSpecialPlatforms(t)
+
+ // This package only handles gc export data.
+ if runtime.Compiler != "gc" {
+ t.Skipf("gc-built packages not available (compiler = %s)", runtime.Compiler)
+ }
+
+ // On windows, we have to set the -D option for the compiler to avoid having a drive
+ // letter and an illegal ':' in the import path - just skip it (see also issue #3483).
+ if runtime.GOOS == "windows" {
+ t.Skip("avoid dealing with relative paths/drive letters on windows")
+ }
+
+ tmpdir := mktmpdir(t)
+ defer os.RemoveAll(tmpdir)
+ testoutdir := filepath.Join(tmpdir, "testdata")
+
+ // b.go needs to be compiled from the output directory so that the compiler can
+ // find the compiled package a. We pass the full path to compile() so that we
+ // don't have to copy the file to that directory.
+ bpath, err := filepath.Abs(filepath.Join("testdata", "b.go"))
+ if err != nil {
+ t.Fatal(err)
+ }
+ compile(t, "testdata", "a.go", testoutdir)
+ compile(t, testoutdir, bpath, testoutdir)
+
+ // import must succeed (test for issue at hand)
+ pkg := importPkg(t, "./testdata/b", tmpdir)
+
+ // make sure all indirectly imported packages have names
+ for _, imp := range pkg.Imports() {
+ if imp.Name() == "" {
+ t.Errorf("no name for %s package", imp.Path())
+ }
+ }
+}
+
+func TestIssue13898(t *testing.T) {
+ skipSpecialPlatforms(t)
+
+ // This package only handles gc export data.
+ if runtime.Compiler != "gc" {
+ t.Skipf("gc-built packages not available (compiler = %s)", runtime.Compiler)
+ }
+
+ // import go/internal/gcimporter which imports go/types partially
+ imports := make(map[string]*types2.Package)
+ _, err := Import(imports, "go/internal/gcimporter", ".", nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // look for go/types package
+ var goTypesPkg *types2.Package
+ for path, pkg := range imports {
+ if path == "go/types" {
+ goTypesPkg = pkg
+ break
+ }
+ }
+ if goTypesPkg == nil {
+ t.Fatal("go/types not found")
+ }
+
+ // look for go/types.Object type
+ obj := lookupObj(t, goTypesPkg.Scope(), "Object")
+ typ, ok := obj.Type().(*types2.Named)
+ if !ok {
+ t.Fatalf("go/types.Object type is %v; wanted named type", typ)
+ }
+
+ // lookup go/types.Object.Pkg method
+ m, index, indirect := types2.LookupFieldOrMethod(typ, false, nil, "Pkg")
+ if m == nil {
+ t.Fatalf("go/types.Object.Pkg not found (index = %v, indirect = %v)", index, indirect)
+ }
+
+ // the method must belong to go/types
+ if m.Pkg().Path() != "go/types" {
+ t.Fatalf("found %v; want go/types", m.Pkg())
+ }
+}
+
+func TestIssue15517(t *testing.T) {
+ skipSpecialPlatforms(t)
+
+ // This package only handles gc export data.
+ if runtime.Compiler != "gc" {
+ t.Skipf("gc-built packages not available (compiler = %s)", runtime.Compiler)
+ }
+
+ // On windows, we have to set the -D option for the compiler to avoid having a drive
+ // letter and an illegal ':' in the import path - just skip it (see also issue #3483).
+ if runtime.GOOS == "windows" {
+ t.Skip("avoid dealing with relative paths/drive letters on windows")
+ }
+
+ tmpdir := mktmpdir(t)
+ defer os.RemoveAll(tmpdir)
+
+ compile(t, "testdata", "p.go", filepath.Join(tmpdir, "testdata"))
+
+ // Multiple imports of p must succeed without redeclaration errors.
+ // We use an import path that's not cleaned up so that the eventual
+ // file path for the package is different from the package path; this
+ // will expose the error if it is present.
+ //
+ // (Issue: Both the textual and the binary importer used the file path
+ // of the package to be imported as key into the shared packages map.
+ // However, the binary importer then used the package path to identify
+ // the imported package to mark it as complete; effectively marking the
+ // wrong package as complete. By using an "unclean" package path, the
+ // file and package path are different, exposing the problem if present.
+ // The same issue occurs with vendoring.)
+ imports := make(map[string]*types2.Package)
+ for i := 0; i < 3; i++ {
+ if _, err := Import(imports, "./././testdata/p", tmpdir, nil); err != nil {
+ t.Fatal(err)
+ }
+ }
+}
+
+func TestIssue15920(t *testing.T) {
+ skipSpecialPlatforms(t)
+
+ // This package only handles gc export data.
+ if runtime.Compiler != "gc" {
+ t.Skipf("gc-built packages not available (compiler = %s)", runtime.Compiler)
+ }
+
+ // On windows, we have to set the -D option for the compiler to avoid having a drive
+ // letter and an illegal ':' in the import path - just skip it (see also issue #3483).
+ if runtime.GOOS == "windows" {
+ t.Skip("avoid dealing with relative paths/drive letters on windows")
+ }
+
+ compileAndImportPkg(t, "issue15920")
+}
+
+func TestIssue20046(t *testing.T) {
+ skipSpecialPlatforms(t)
+
+ // This package only handles gc export data.
+ if runtime.Compiler != "gc" {
+ t.Skipf("gc-built packages not available (compiler = %s)", runtime.Compiler)
+ }
+
+ // On windows, we have to set the -D option for the compiler to avoid having a drive
+ // letter and an illegal ':' in the import path - just skip it (see also issue #3483).
+ if runtime.GOOS == "windows" {
+ t.Skip("avoid dealing with relative paths/drive letters on windows")
+ }
+
+ // "./issue20046".V.M must exist
+ pkg := compileAndImportPkg(t, "issue20046")
+ obj := lookupObj(t, pkg.Scope(), "V")
+ if m, index, indirect := types2.LookupFieldOrMethod(obj.Type(), false, nil, "M"); m == nil {
+ t.Fatalf("V.M not found (index = %v, indirect = %v)", index, indirect)
+ }
+}
+func TestIssue25301(t *testing.T) {
+ skipSpecialPlatforms(t)
+
+ // This package only handles gc export data.
+ if runtime.Compiler != "gc" {
+ t.Skipf("gc-built packages not available (compiler = %s)", runtime.Compiler)
+ }
+
+ // On windows, we have to set the -D option for the compiler to avoid having a drive
+ // letter and an illegal ':' in the import path - just skip it (see also issue #3483).
+ if runtime.GOOS == "windows" {
+ t.Skip("avoid dealing with relative paths/drive letters on windows")
+ }
+
+ compileAndImportPkg(t, "issue25301")
+}
+
+func TestIssue25596(t *testing.T) {
+ skipSpecialPlatforms(t)
+
+ // This package only handles gc export data.
+ if runtime.Compiler != "gc" {
+ t.Skipf("gc-built packages not available (compiler = %s)", runtime.Compiler)
+ }
+
+ // On windows, we have to set the -D option for the compiler to avoid having a drive
+ // letter and an illegal ':' in the import path - just skip it (see also issue #3483).
+ if runtime.GOOS == "windows" {
+ t.Skip("avoid dealing with relative paths/drive letters on windows")
+ }
+
+ compileAndImportPkg(t, "issue25596")
+}
+
+func importPkg(t *testing.T, path, srcDir string) *types2.Package {
+ pkg, err := Import(make(map[string]*types2.Package), path, srcDir, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ return pkg
+}
+
+func compileAndImportPkg(t *testing.T, name string) *types2.Package {
+ tmpdir := mktmpdir(t)
+ defer os.RemoveAll(tmpdir)
+ compile(t, "testdata", name+".go", filepath.Join(tmpdir, "testdata"))
+ return importPkg(t, "./testdata/"+name, tmpdir)
+}
+
+func lookupObj(t *testing.T, scope *types2.Scope, name string) types2.Object {
+ if obj := scope.Lookup(name); obj != nil {
+ return obj
+ }
+ t.Fatalf("%s not found", name)
+ return nil
+}
diff --git a/src/cmd/compile/internal/importer/iimport.go b/src/cmd/compile/internal/importer/iimport.go
new file mode 100644
index 0000000..576036b
--- /dev/null
+++ b/src/cmd/compile/internal/importer/iimport.go
@@ -0,0 +1,804 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Indexed package import.
+// See cmd/compile/internal/typecheck/iexport.go for the export data format.
+
+package importer
+
+import (
+ "cmd/compile/internal/syntax"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types2"
+ "encoding/binary"
+ "fmt"
+ "go/constant"
+ "go/token"
+ "io"
+ "math/big"
+ "sort"
+ "strings"
+)
+
+type intReader struct {
+ *strings.Reader
+ path string
+}
+
+func (r *intReader) int64() int64 {
+ i, err := binary.ReadVarint(r.Reader)
+ if err != nil {
+ errorf("import %q: read varint error: %v", r.path, err)
+ }
+ return i
+}
+
+func (r *intReader) uint64() uint64 {
+ i, err := binary.ReadUvarint(r.Reader)
+ if err != nil {
+ errorf("import %q: read varint error: %v", r.path, err)
+ }
+ return i
+}
+
+// Keep this in sync with constants in iexport.go.
+const (
+ iexportVersionGo1_11 = 0
+ iexportVersionPosCol = 1
+ iexportVersionGenerics = 2
+ iexportVersionGo1_18 = 2
+
+ iexportVersionCurrent = 2
+)
+
+type ident struct {
+ pkg *types2.Package
+ name string
+}
+
+const predeclReserved = 32
+
+type itag uint64
+
+const (
+ // Types
+ definedType itag = iota
+ pointerType
+ sliceType
+ arrayType
+ chanType
+ mapType
+ signatureType
+ structType
+ interfaceType
+ typeParamType
+ instanceType
+ unionType
+)
+
+const io_SeekCurrent = 1 // io.SeekCurrent (not defined in Go 1.4)
+
+// iImportData imports a package from the serialized package data
+// and returns the number of bytes consumed and a reference to the package.
+// If the export data version is not recognized or the format is otherwise
+// compromised, an error is returned.
+func ImportData(imports map[string]*types2.Package, data, path string) (pkg *types2.Package, err error) {
+ const currentVersion = iexportVersionCurrent
+ version := int64(-1)
+ defer func() {
+ if e := recover(); e != nil {
+ if version > currentVersion {
+ err = fmt.Errorf("cannot import %q (%v), export data is newer version - update tool", path, e)
+ } else {
+ err = fmt.Errorf("cannot import %q (%v), possibly version skew - reinstall package", path, e)
+ }
+ }
+ }()
+
+ r := &intReader{strings.NewReader(data), path}
+
+ version = int64(r.uint64())
+ switch version {
+ case iexportVersionGo1_18, iexportVersionPosCol, iexportVersionGo1_11:
+ default:
+ errorf("unknown iexport format version %d", version)
+ }
+
+ sLen := int64(r.uint64())
+ dLen := int64(r.uint64())
+
+ whence, _ := r.Seek(0, io_SeekCurrent)
+ stringData := data[whence : whence+sLen]
+ declData := data[whence+sLen : whence+sLen+dLen]
+ r.Seek(sLen+dLen, io_SeekCurrent)
+
+ p := iimporter{
+ exportVersion: version,
+ ipath: path,
+ version: int(version),
+
+ stringData: stringData,
+ pkgCache: make(map[uint64]*types2.Package),
+ posBaseCache: make(map[uint64]*syntax.PosBase),
+
+ declData: declData,
+ pkgIndex: make(map[*types2.Package]map[string]uint64),
+ typCache: make(map[uint64]types2.Type),
+ // Separate map for typeparams, keyed by their package and unique
+ // name (name with subscript).
+ tparamIndex: make(map[ident]*types2.TypeParam),
+ }
+
+ for i, pt := range predeclared {
+ p.typCache[uint64(i)] = pt
+ }
+
+ pkgList := make([]*types2.Package, r.uint64())
+ for i := range pkgList {
+ pkgPathOff := r.uint64()
+ pkgPath := p.stringAt(pkgPathOff)
+ pkgName := p.stringAt(r.uint64())
+ pkgHeight := int(r.uint64())
+
+ if pkgPath == "" {
+ pkgPath = path
+ }
+ pkg := imports[pkgPath]
+ if pkg == nil {
+ pkg = types2.NewPackageHeight(pkgPath, pkgName, pkgHeight)
+ imports[pkgPath] = pkg
+ } else {
+ if pkg.Name() != pkgName {
+ errorf("conflicting names %s and %s for package %q", pkg.Name(), pkgName, path)
+ }
+ if pkg.Height() != pkgHeight {
+ errorf("conflicting heights %v and %v for package %q", pkg.Height(), pkgHeight, path)
+ }
+ }
+
+ p.pkgCache[pkgPathOff] = pkg
+
+ nameIndex := make(map[string]uint64)
+ for nSyms := r.uint64(); nSyms > 0; nSyms-- {
+ name := p.stringAt(r.uint64())
+ nameIndex[name] = r.uint64()
+ }
+
+ p.pkgIndex[pkg] = nameIndex
+ pkgList[i] = pkg
+ }
+
+ localpkg := pkgList[0]
+
+ names := make([]string, 0, len(p.pkgIndex[localpkg]))
+ for name := range p.pkgIndex[localpkg] {
+ names = append(names, name)
+ }
+ sort.Strings(names)
+ for _, name := range names {
+ p.doDecl(localpkg, name)
+ }
+
+ // SetConstraint can't be called if the constraint type is not yet complete.
+ // When type params are created in the 'P' case of (*importReader).obj(),
+ // the associated constraint type may not be complete due to recursion.
+ // Therefore, we defer calling SetConstraint there, and call it here instead
+ // after all types are complete.
+ for _, d := range p.later {
+ d.t.SetConstraint(d.constraint)
+ }
+ // record all referenced packages as imports
+ list := append(([]*types2.Package)(nil), pkgList[1:]...)
+ sort.Sort(byPath(list))
+ localpkg.SetImports(list)
+
+ // package was imported completely and without errors
+ localpkg.MarkComplete()
+
+ return localpkg, nil
+}
+
+type setConstraintArgs struct {
+ t *types2.TypeParam
+ constraint types2.Type
+}
+
+type iimporter struct {
+ exportVersion int64
+ ipath string
+ version int
+
+ stringData string
+ pkgCache map[uint64]*types2.Package
+ posBaseCache map[uint64]*syntax.PosBase
+
+ declData string
+ pkgIndex map[*types2.Package]map[string]uint64
+ typCache map[uint64]types2.Type
+ tparamIndex map[ident]*types2.TypeParam
+
+ interfaceList []*types2.Interface
+
+ // Arguments for calls to SetConstraint that are deferred due to recursive types
+ later []setConstraintArgs
+}
+
+func (p *iimporter) doDecl(pkg *types2.Package, name string) {
+ // See if we've already imported this declaration.
+ if obj := pkg.Scope().Lookup(name); obj != nil {
+ return
+ }
+
+ off, ok := p.pkgIndex[pkg][name]
+ if !ok {
+ errorf("%v.%v not in index", pkg, name)
+ }
+
+ r := &importReader{p: p, currPkg: pkg}
+ // Reader.Reset is not available in Go 1.4.
+ // Use bytes.NewReader for now.
+ // r.declReader.Reset(p.declData[off:])
+ r.declReader = *strings.NewReader(p.declData[off:])
+
+ r.obj(name)
+}
+
+func (p *iimporter) stringAt(off uint64) string {
+ var x [binary.MaxVarintLen64]byte
+ n := copy(x[:], p.stringData[off:])
+
+ slen, n := binary.Uvarint(x[:n])
+ if n <= 0 {
+ errorf("varint failed")
+ }
+ spos := off + uint64(n)
+ return p.stringData[spos : spos+slen]
+}
+
+func (p *iimporter) pkgAt(off uint64) *types2.Package {
+ if pkg, ok := p.pkgCache[off]; ok {
+ return pkg
+ }
+ path := p.stringAt(off)
+ errorf("missing package %q in %q", path, p.ipath)
+ return nil
+}
+
+func (p *iimporter) posBaseAt(off uint64) *syntax.PosBase {
+ if posBase, ok := p.posBaseCache[off]; ok {
+ return posBase
+ }
+ filename := p.stringAt(off)
+ posBase := syntax.NewTrimmedFileBase(filename, true)
+ p.posBaseCache[off] = posBase
+ return posBase
+}
+
+func (p *iimporter) typAt(off uint64, base *types2.Named) types2.Type {
+ if t, ok := p.typCache[off]; ok && canReuse(base, t) {
+ return t
+ }
+
+ if off < predeclReserved {
+ errorf("predeclared type missing from cache: %v", off)
+ }
+
+ r := &importReader{p: p}
+ // Reader.Reset is not available in Go 1.4.
+ // Use bytes.NewReader for now.
+ // r.declReader.Reset(p.declData[off-predeclReserved:])
+ r.declReader = *strings.NewReader(p.declData[off-predeclReserved:])
+ t := r.doType(base)
+
+ if canReuse(base, t) {
+ p.typCache[off] = t
+ }
+ return t
+}
+
+// canReuse reports whether the type rhs on the RHS of the declaration for def
+// may be re-used.
+//
+// Specifically, if def is non-nil and rhs is an interface type with methods, it
+// may not be re-used because we have a convention of setting the receiver type
+// for interface methods to def.
+func canReuse(def *types2.Named, rhs types2.Type) bool {
+ if def == nil {
+ return true
+ }
+ iface, _ := rhs.(*types2.Interface)
+ if iface == nil {
+ return true
+ }
+ // Don't use iface.Empty() here as iface may not be complete.
+ return iface.NumEmbeddeds() == 0 && iface.NumExplicitMethods() == 0
+}
+
+type importReader struct {
+ p *iimporter
+ declReader strings.Reader
+ currPkg *types2.Package
+ prevPosBase *syntax.PosBase
+ prevLine int64
+ prevColumn int64
+}
+
+func (r *importReader) obj(name string) {
+ tag := r.byte()
+ pos := r.pos()
+
+ switch tag {
+ case 'A':
+ typ := r.typ()
+
+ r.declare(types2.NewTypeName(pos, r.currPkg, name, typ))
+
+ case 'C':
+ typ, val := r.value()
+
+ r.declare(types2.NewConst(pos, r.currPkg, name, typ, val))
+
+ case 'F', 'G':
+ var tparams []*types2.TypeParam
+ if tag == 'G' {
+ tparams = r.tparamList()
+ }
+ sig := r.signature(nil, nil, tparams)
+ r.declare(types2.NewFunc(pos, r.currPkg, name, sig))
+
+ case 'T', 'U':
+ // Types can be recursive. We need to setup a stub
+ // declaration before recursing.
+ obj := types2.NewTypeName(pos, r.currPkg, name, nil)
+ named := types2.NewNamed(obj, nil, nil)
+ // Declare obj before calling r.tparamList, so the new type name is recognized
+ // if used in the constraint of one of its own typeparams (see #48280).
+ r.declare(obj)
+ if tag == 'U' {
+ tparams := r.tparamList()
+ named.SetTypeParams(tparams)
+ }
+
+ underlying := r.p.typAt(r.uint64(), named).Underlying()
+ named.SetUnderlying(underlying)
+
+ if !isInterface(underlying) {
+ for n := r.uint64(); n > 0; n-- {
+ mpos := r.pos()
+ mname := r.ident()
+ recv := r.param()
+
+ // If the receiver has any targs, set those as the
+ // rparams of the method (since those are the
+ // typeparams being used in the method sig/body).
+ targs := baseType(recv.Type()).TypeArgs()
+ var rparams []*types2.TypeParam
+ if targs.Len() > 0 {
+ rparams = make([]*types2.TypeParam, targs.Len())
+ for i := range rparams {
+ rparams[i], _ = targs.At(i).(*types2.TypeParam)
+ }
+ }
+ msig := r.signature(recv, rparams, nil)
+
+ named.AddMethod(types2.NewFunc(mpos, r.currPkg, mname, msig))
+ }
+ }
+
+ case 'P':
+ // We need to "declare" a typeparam in order to have a name that
+ // can be referenced recursively (if needed) in the type param's
+ // bound.
+ if r.p.exportVersion < iexportVersionGenerics {
+ errorf("unexpected type param type")
+ }
+ name0 := typecheck.TparamName(name)
+ if name0 == "" {
+ errorf("malformed type parameter export name %s: missing prefix", name)
+ }
+
+ tn := types2.NewTypeName(pos, r.currPkg, name0, nil)
+ t := types2.NewTypeParam(tn, nil)
+ // To handle recursive references to the typeparam within its
+ // bound, save the partial type in tparamIndex before reading the bounds.
+ id := ident{r.currPkg, name}
+ r.p.tparamIndex[id] = t
+
+ var implicit bool
+ if r.p.exportVersion >= iexportVersionGo1_18 {
+ implicit = r.bool()
+ }
+ constraint := r.typ()
+ if implicit {
+ iface, _ := constraint.(*types2.Interface)
+ if iface == nil {
+ errorf("non-interface constraint marked implicit")
+ }
+ iface.MarkImplicit()
+ }
+ // The constraint type may not be complete, if we
+ // are in the middle of a type recursion involving type
+ // constraints. So, we defer SetConstraint until we have
+ // completely set up all types in ImportData.
+ r.p.later = append(r.p.later, setConstraintArgs{t: t, constraint: constraint})
+
+ case 'V':
+ typ := r.typ()
+
+ r.declare(types2.NewVar(pos, r.currPkg, name, typ))
+
+ default:
+ errorf("unexpected tag: %v", tag)
+ }
+}
+
+func (r *importReader) declare(obj types2.Object) {
+ obj.Pkg().Scope().Insert(obj)
+}
+
+func (r *importReader) value() (typ types2.Type, val constant.Value) {
+ typ = r.typ()
+ if r.p.exportVersion >= iexportVersionGo1_18 {
+ // TODO: add support for using the kind
+ _ = constant.Kind(r.int64())
+ }
+
+ switch b := typ.Underlying().(*types2.Basic); b.Info() & types2.IsConstType {
+ case types2.IsBoolean:
+ val = constant.MakeBool(r.bool())
+
+ case types2.IsString:
+ val = constant.MakeString(r.string())
+
+ case types2.IsInteger:
+ var x big.Int
+ r.mpint(&x, b)
+ val = constant.Make(&x)
+
+ case types2.IsFloat:
+ val = r.mpfloat(b)
+
+ case types2.IsComplex:
+ re := r.mpfloat(b)
+ im := r.mpfloat(b)
+ val = constant.BinaryOp(re, token.ADD, constant.MakeImag(im))
+
+ default:
+ errorf("unexpected type %v", typ) // panics
+ panic("unreachable")
+ }
+
+ return
+}
+
+func intSize(b *types2.Basic) (signed bool, maxBytes uint) {
+ if (b.Info() & types2.IsUntyped) != 0 {
+ return true, 64
+ }
+
+ switch b.Kind() {
+ case types2.Float32, types2.Complex64:
+ return true, 3
+ case types2.Float64, types2.Complex128:
+ return true, 7
+ }
+
+ signed = (b.Info() & types2.IsUnsigned) == 0
+ switch b.Kind() {
+ case types2.Int8, types2.Uint8:
+ maxBytes = 1
+ case types2.Int16, types2.Uint16:
+ maxBytes = 2
+ case types2.Int32, types2.Uint32:
+ maxBytes = 4
+ default:
+ maxBytes = 8
+ }
+
+ return
+}
+
+func (r *importReader) mpint(x *big.Int, typ *types2.Basic) {
+ signed, maxBytes := intSize(typ)
+
+ maxSmall := 256 - maxBytes
+ if signed {
+ maxSmall = 256 - 2*maxBytes
+ }
+ if maxBytes == 1 {
+ maxSmall = 256
+ }
+
+ n, _ := r.declReader.ReadByte()
+ if uint(n) < maxSmall {
+ v := int64(n)
+ if signed {
+ v >>= 1
+ if n&1 != 0 {
+ v = ^v
+ }
+ }
+ x.SetInt64(v)
+ return
+ }
+
+ v := -n
+ if signed {
+ v = -(n &^ 1) >> 1
+ }
+ if v < 1 || uint(v) > maxBytes {
+ errorf("weird decoding: %v, %v => %v", n, signed, v)
+ }
+ b := make([]byte, v)
+ io.ReadFull(&r.declReader, b)
+ x.SetBytes(b)
+ if signed && n&1 != 0 {
+ x.Neg(x)
+ }
+}
+
+func (r *importReader) mpfloat(typ *types2.Basic) constant.Value {
+ var mant big.Int
+ r.mpint(&mant, typ)
+ var f big.Float
+ f.SetInt(&mant)
+ if f.Sign() != 0 {
+ f.SetMantExp(&f, int(r.int64()))
+ }
+ return constant.Make(&f)
+}
+
+func (r *importReader) ident() string {
+ return r.string()
+}
+
+func (r *importReader) qualifiedIdent() (*types2.Package, string) {
+ name := r.string()
+ pkg := r.pkg()
+ return pkg, name
+}
+
+func (r *importReader) pos() syntax.Pos {
+ if r.p.version >= 1 {
+ r.posv1()
+ } else {
+ r.posv0()
+ }
+
+ if (r.prevPosBase == nil || r.prevPosBase.Filename() == "") && r.prevLine == 0 && r.prevColumn == 0 {
+ return syntax.Pos{}
+ }
+
+ return syntax.MakePos(r.prevPosBase, uint(r.prevLine), uint(r.prevColumn))
+}
+
+func (r *importReader) posv0() {
+ delta := r.int64()
+ if delta != deltaNewFile {
+ r.prevLine += delta
+ } else if l := r.int64(); l == -1 {
+ r.prevLine += deltaNewFile
+ } else {
+ r.prevPosBase = r.posBase()
+ r.prevLine = l
+ }
+}
+
+func (r *importReader) posv1() {
+ delta := r.int64()
+ r.prevColumn += delta >> 1
+ if delta&1 != 0 {
+ delta = r.int64()
+ r.prevLine += delta >> 1
+ if delta&1 != 0 {
+ r.prevPosBase = r.posBase()
+ }
+ }
+}
+
+func (r *importReader) typ() types2.Type {
+ return r.p.typAt(r.uint64(), nil)
+}
+
+func isInterface(t types2.Type) bool {
+ _, ok := t.(*types2.Interface)
+ return ok
+}
+
+func (r *importReader) pkg() *types2.Package { return r.p.pkgAt(r.uint64()) }
+func (r *importReader) string() string { return r.p.stringAt(r.uint64()) }
+func (r *importReader) posBase() *syntax.PosBase { return r.p.posBaseAt(r.uint64()) }
+
+func (r *importReader) doType(base *types2.Named) types2.Type {
+ switch k := r.kind(); k {
+ default:
+ errorf("unexpected kind tag in %q: %v", r.p.ipath, k)
+ return nil
+
+ case definedType:
+ pkg, name := r.qualifiedIdent()
+ r.p.doDecl(pkg, name)
+ return pkg.Scope().Lookup(name).(*types2.TypeName).Type()
+ case pointerType:
+ return types2.NewPointer(r.typ())
+ case sliceType:
+ return types2.NewSlice(r.typ())
+ case arrayType:
+ n := r.uint64()
+ return types2.NewArray(r.typ(), int64(n))
+ case chanType:
+ dir := chanDir(int(r.uint64()))
+ return types2.NewChan(dir, r.typ())
+ case mapType:
+ return types2.NewMap(r.typ(), r.typ())
+ case signatureType:
+ r.currPkg = r.pkg()
+ return r.signature(nil, nil, nil)
+
+ case structType:
+ r.currPkg = r.pkg()
+
+ fields := make([]*types2.Var, r.uint64())
+ tags := make([]string, len(fields))
+ for i := range fields {
+ fpos := r.pos()
+ fname := r.ident()
+ ftyp := r.typ()
+ emb := r.bool()
+ tag := r.string()
+
+ fields[i] = types2.NewField(fpos, r.currPkg, fname, ftyp, emb)
+ tags[i] = tag
+ }
+ return types2.NewStruct(fields, tags)
+
+ case interfaceType:
+ r.currPkg = r.pkg()
+
+ embeddeds := make([]types2.Type, r.uint64())
+ for i := range embeddeds {
+ _ = r.pos()
+ embeddeds[i] = r.typ()
+ }
+
+ methods := make([]*types2.Func, r.uint64())
+ for i := range methods {
+ mpos := r.pos()
+ mname := r.ident()
+
+ // TODO(mdempsky): Matches bimport.go, but I
+ // don't agree with this.
+ var recv *types2.Var
+ if base != nil {
+ recv = types2.NewVar(syntax.Pos{}, r.currPkg, "", base)
+ }
+
+ msig := r.signature(recv, nil, nil)
+ methods[i] = types2.NewFunc(mpos, r.currPkg, mname, msig)
+ }
+
+ typ := types2.NewInterfaceType(methods, embeddeds)
+ r.p.interfaceList = append(r.p.interfaceList, typ)
+ return typ
+
+ case typeParamType:
+ if r.p.exportVersion < iexportVersionGenerics {
+ errorf("unexpected type param type")
+ }
+ pkg, name := r.qualifiedIdent()
+ id := ident{pkg, name}
+ if t, ok := r.p.tparamIndex[id]; ok {
+ // We're already in the process of importing this typeparam.
+ return t
+ }
+ // Otherwise, import the definition of the typeparam now.
+ r.p.doDecl(pkg, name)
+ return r.p.tparamIndex[id]
+
+ case instanceType:
+ if r.p.exportVersion < iexportVersionGenerics {
+ errorf("unexpected instantiation type")
+ }
+ // pos does not matter for instances: they are positioned on the original
+ // type.
+ _ = r.pos()
+ len := r.uint64()
+ targs := make([]types2.Type, len)
+ for i := range targs {
+ targs[i] = r.typ()
+ }
+ baseType := r.typ()
+ // The imported instantiated type doesn't include any methods, so
+ // we must always use the methods of the base (orig) type.
+ // TODO provide a non-nil *Context
+ t, _ := types2.Instantiate(nil, baseType, targs, false)
+ return t
+
+ case unionType:
+ if r.p.exportVersion < iexportVersionGenerics {
+ errorf("unexpected instantiation type")
+ }
+ terms := make([]*types2.Term, r.uint64())
+ for i := range terms {
+ terms[i] = types2.NewTerm(r.bool(), r.typ())
+ }
+ return types2.NewUnion(terms)
+ }
+}
+
+func (r *importReader) kind() itag {
+ return itag(r.uint64())
+}
+
+func (r *importReader) signature(recv *types2.Var, rparams, tparams []*types2.TypeParam) *types2.Signature {
+ params := r.paramList()
+ results := r.paramList()
+ variadic := params.Len() > 0 && r.bool()
+ return types2.NewSignatureType(recv, rparams, tparams, params, results, variadic)
+}
+
+func (r *importReader) tparamList() []*types2.TypeParam {
+ n := r.uint64()
+ if n == 0 {
+ return nil
+ }
+ xs := make([]*types2.TypeParam, n)
+ for i := range xs {
+ xs[i] = r.typ().(*types2.TypeParam)
+ }
+ return xs
+}
+
+func (r *importReader) paramList() *types2.Tuple {
+ xs := make([]*types2.Var, r.uint64())
+ for i := range xs {
+ xs[i] = r.param()
+ }
+ return types2.NewTuple(xs...)
+}
+
+func (r *importReader) param() *types2.Var {
+ pos := r.pos()
+ name := r.ident()
+ typ := r.typ()
+ return types2.NewParam(pos, r.currPkg, name, typ)
+}
+
+func (r *importReader) bool() bool {
+ return r.uint64() != 0
+}
+
+func (r *importReader) int64() int64 {
+ n, err := binary.ReadVarint(&r.declReader)
+ if err != nil {
+ errorf("readVarint: %v", err)
+ }
+ return n
+}
+
+func (r *importReader) uint64() uint64 {
+ n, err := binary.ReadUvarint(&r.declReader)
+ if err != nil {
+ errorf("readUvarint: %v", err)
+ }
+ return n
+}
+
+func (r *importReader) byte() byte {
+ x, err := r.declReader.ReadByte()
+ if err != nil {
+ errorf("declReader.ReadByte: %v", err)
+ }
+ return x
+}
+
+func baseType(typ types2.Type) *types2.Named {
+ // pointer receivers are never types2.Named types
+ if p, _ := typ.(*types2.Pointer); p != nil {
+ typ = p.Elem()
+ }
+ // receiver base types are always (possibly generic) types2.Named types
+ n, _ := typ.(*types2.Named)
+ return n
+}
diff --git a/src/cmd/compile/internal/importer/support.go b/src/cmd/compile/internal/importer/support.go
new file mode 100644
index 0000000..9377d99
--- /dev/null
+++ b/src/cmd/compile/internal/importer/support.go
@@ -0,0 +1,134 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements support functionality for iimport.go.
+
+package importer
+
+import (
+ "cmd/compile/internal/types2"
+ "fmt"
+ "go/token"
+ "sync"
+)
+
+func errorf(format string, args ...interface{}) {
+ panic(fmt.Sprintf(format, args...))
+}
+
+const deltaNewFile = -64 // see cmd/compile/internal/gc/bexport.go
+
+// Synthesize a token.Pos
+type fakeFileSet struct {
+ fset *token.FileSet
+ files map[string]*token.File
+}
+
+func (s *fakeFileSet) pos(file string, line, column int) token.Pos {
+ // TODO(mdempsky): Make use of column.
+
+ // Since we don't know the set of needed file positions, we
+ // reserve maxlines positions per file.
+ const maxlines = 64 * 1024
+ f := s.files[file]
+ if f == nil {
+ f = s.fset.AddFile(file, -1, maxlines)
+ s.files[file] = f
+ // Allocate the fake linebreak indices on first use.
+ // TODO(adonovan): opt: save ~512KB using a more complex scheme?
+ fakeLinesOnce.Do(func() {
+ fakeLines = make([]int, maxlines)
+ for i := range fakeLines {
+ fakeLines[i] = i
+ }
+ })
+ f.SetLines(fakeLines)
+ }
+
+ if line > maxlines {
+ line = 1
+ }
+
+ // Treat the file as if it contained only newlines
+ // and column=1: use the line number as the offset.
+ return f.Pos(line - 1)
+}
+
+var (
+ fakeLines []int
+ fakeLinesOnce sync.Once
+)
+
+func chanDir(d int) types2.ChanDir {
+ // tag values must match the constants in cmd/compile/internal/gc/go.go
+ switch d {
+ case 1 /* Crecv */ :
+ return types2.RecvOnly
+ case 2 /* Csend */ :
+ return types2.SendOnly
+ case 3 /* Cboth */ :
+ return types2.SendRecv
+ default:
+ errorf("unexpected channel dir %d", d)
+ return 0
+ }
+}
+
+var predeclared = []types2.Type{
+ // basic types
+ types2.Typ[types2.Bool],
+ types2.Typ[types2.Int],
+ types2.Typ[types2.Int8],
+ types2.Typ[types2.Int16],
+ types2.Typ[types2.Int32],
+ types2.Typ[types2.Int64],
+ types2.Typ[types2.Uint],
+ types2.Typ[types2.Uint8],
+ types2.Typ[types2.Uint16],
+ types2.Typ[types2.Uint32],
+ types2.Typ[types2.Uint64],
+ types2.Typ[types2.Uintptr],
+ types2.Typ[types2.Float32],
+ types2.Typ[types2.Float64],
+ types2.Typ[types2.Complex64],
+ types2.Typ[types2.Complex128],
+ types2.Typ[types2.String],
+
+ // basic type aliases
+ types2.Universe.Lookup("byte").Type(),
+ types2.Universe.Lookup("rune").Type(),
+
+ // error
+ types2.Universe.Lookup("error").Type(),
+
+ // untyped types
+ types2.Typ[types2.UntypedBool],
+ types2.Typ[types2.UntypedInt],
+ types2.Typ[types2.UntypedRune],
+ types2.Typ[types2.UntypedFloat],
+ types2.Typ[types2.UntypedComplex],
+ types2.Typ[types2.UntypedString],
+ types2.Typ[types2.UntypedNil],
+
+ // package unsafe
+ types2.Typ[types2.UnsafePointer],
+
+ // invalid type
+ types2.Typ[types2.Invalid], // only appears in packages with errors
+
+ // used internally by gc; never used by this package or in .a files
+ // not to be confused with the universe any
+ anyType{},
+
+ // comparable
+ types2.Universe.Lookup("comparable").Type(),
+
+ // any
+ types2.Universe.Lookup("any").Type(),
+}
+
+type anyType struct{}
+
+func (t anyType) Underlying() types2.Type { return t }
+func (t anyType) String() string { return "any" }
diff --git a/src/cmd/compile/internal/importer/testdata/a.go b/src/cmd/compile/internal/importer/testdata/a.go
new file mode 100644
index 0000000..56e4292
--- /dev/null
+++ b/src/cmd/compile/internal/importer/testdata/a.go
@@ -0,0 +1,14 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Input for TestIssue13566
+
+package a
+
+import "encoding/json"
+
+type A struct {
+ a *A
+ json json.RawMessage
+}
diff --git a/src/cmd/compile/internal/importer/testdata/b.go b/src/cmd/compile/internal/importer/testdata/b.go
new file mode 100644
index 0000000..4196678
--- /dev/null
+++ b/src/cmd/compile/internal/importer/testdata/b.go
@@ -0,0 +1,11 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Input for TestIssue13566
+
+package b
+
+import "./a"
+
+type A a.A
diff --git a/src/cmd/compile/internal/importer/testdata/exports.go b/src/cmd/compile/internal/importer/testdata/exports.go
new file mode 100644
index 0000000..91598c0
--- /dev/null
+++ b/src/cmd/compile/internal/importer/testdata/exports.go
@@ -0,0 +1,91 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file is used to generate an object file which
+// serves as test file for gcimporter_test.go.
+
+package exports
+
+import "go/ast"
+
+// Issue 3682: Correctly read dotted identifiers from export data.
+const init1 = 0
+
+func init() {}
+
+const (
+ C0 int = 0
+ C1 = 3.14159265
+ C2 = 2.718281828i
+ C3 = -123.456e-789
+ C4 = +123.456e+789
+ C5 = 1234i
+ C6 = "foo\n"
+ C7 = `bar\n`
+ C8 = 42
+ C9 int = 42
+ C10 float64 = 42
+)
+
+type (
+ T1 int
+ T2 [10]int
+ T3 []int
+ T4 *int
+ T5 chan int
+ T6a chan<- int
+ T6b chan (<-chan int)
+ T6c chan<- (chan int)
+ T7 <-chan *ast.File
+ T8 struct{}
+ T9 struct {
+ a int
+ b, c float32
+ d []string `go:"tag"`
+ }
+ T10 struct {
+ T8
+ T9
+ _ *T10
+ }
+ T11 map[int]string
+ T12 interface{}
+ T13 interface {
+ m1()
+ m2(int) float32
+ }
+ T14 interface {
+ T12
+ T13
+ m3(x ...struct{}) []T9
+ }
+ T15 func()
+ T16 func(int)
+ T17 func(x int)
+ T18 func() float32
+ T19 func() (x float32)
+ T20 func(...interface{})
+ T21 struct{ next *T21 }
+ T22 struct{ link *T23 }
+ T23 struct{ link *T22 }
+ T24 *T24
+ T25 *T26
+ T26 *T27
+ T27 *T25
+ T28 func(T28) T28
+)
+
+var (
+ V0 int
+ V1 = -991.0
+ V2 float32 = 1.2
+)
+
+func F1() {}
+func F2(x int) {}
+func F3() int { return 0 }
+func F4() float32 { return 0 }
+func F5(a, b, c int, u, v, w struct{ x, y T1 }, more ...interface{}) (p, q, r chan<- T10)
+
+func (p *T1) M1()
diff --git a/src/cmd/compile/internal/importer/testdata/generics.go b/src/cmd/compile/internal/importer/testdata/generics.go
new file mode 100644
index 0000000..00bf040
--- /dev/null
+++ b/src/cmd/compile/internal/importer/testdata/generics.go
@@ -0,0 +1,29 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file is used to generate an object file which
+// serves as test file for gcimporter_test.go.
+
+package generics
+
+type Any any
+
+var x any
+
+type T[A, B any] struct {
+ Left A
+ Right B
+}
+
+var X T[int, string] = T[int, string]{1, "hi"}
+
+func ToInt[P interface{ ~int }](p P) int { return int(p) }
+
+var IntID = ToInt[int]
+
+type G[C comparable] int
+
+func ImplicitFunc[T ~int]() {}
+
+type ImplicitType[T ~int] int
diff --git a/src/cmd/compile/internal/importer/testdata/issue15920.go b/src/cmd/compile/internal/importer/testdata/issue15920.go
new file mode 100644
index 0000000..c70f7d8
--- /dev/null
+++ b/src/cmd/compile/internal/importer/testdata/issue15920.go
@@ -0,0 +1,11 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+// The underlying type of Error is the underlying type of error.
+// Make sure we can import this again without problems.
+type Error error
+
+func F() Error { return nil }
diff --git a/src/cmd/compile/internal/importer/testdata/issue20046.go b/src/cmd/compile/internal/importer/testdata/issue20046.go
new file mode 100644
index 0000000..c63ee82
--- /dev/null
+++ b/src/cmd/compile/internal/importer/testdata/issue20046.go
@@ -0,0 +1,9 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+var V interface {
+ M()
+}
diff --git a/src/cmd/compile/internal/importer/testdata/issue25301.go b/src/cmd/compile/internal/importer/testdata/issue25301.go
new file mode 100644
index 0000000..e3dc98b
--- /dev/null
+++ b/src/cmd/compile/internal/importer/testdata/issue25301.go
@@ -0,0 +1,17 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package issue25301
+
+type (
+ A = interface {
+ M()
+ }
+ T interface {
+ A
+ }
+ S struct{}
+)
+
+func (S) M() { println("m") }
diff --git a/src/cmd/compile/internal/importer/testdata/issue25596.go b/src/cmd/compile/internal/importer/testdata/issue25596.go
new file mode 100644
index 0000000..8923373
--- /dev/null
+++ b/src/cmd/compile/internal/importer/testdata/issue25596.go
@@ -0,0 +1,13 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package issue25596
+
+type E interface {
+ M() T
+}
+
+type T interface {
+ E
+}
diff --git a/src/cmd/compile/internal/importer/testdata/p.go b/src/cmd/compile/internal/importer/testdata/p.go
new file mode 100644
index 0000000..9e2e705
--- /dev/null
+++ b/src/cmd/compile/internal/importer/testdata/p.go
@@ -0,0 +1,13 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Input for TestIssue15517
+
+package p
+
+const C = 0
+
+var V int
+
+func F() {}
diff --git a/src/cmd/compile/internal/importer/testdata/versions/test.go b/src/cmd/compile/internal/importer/testdata/versions/test.go
new file mode 100644
index 0000000..227fc09
--- /dev/null
+++ b/src/cmd/compile/internal/importer/testdata/versions/test.go
@@ -0,0 +1,28 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// To create a test case for a new export format version,
+// build this package with the latest compiler and store
+// the resulting .a file appropriately named in the versions
+// directory. The VersionHandling test will pick it up.
+//
+// In the testdata/versions:
+//
+// go build -o test_go1.$X_$Y.a test.go
+//
+// with $X = Go version and $Y = export format version
+// (add 'b' or 'i' to distinguish between binary and
+// indexed format starting with 1.11 as long as both
+// formats are supported).
+//
+// Make sure this source is extended such that it exercises
+// whatever export format change has taken place.
+
+package test
+
+// Any release before and including Go 1.7 didn't encode
+// the package for a blank struct field.
+type BlankField struct {
+ _ int
+}
diff --git a/src/cmd/compile/internal/inline/inl.go b/src/cmd/compile/internal/inline/inl.go
new file mode 100644
index 0000000..bc7ec5c
--- /dev/null
+++ b/src/cmd/compile/internal/inline/inl.go
@@ -0,0 +1,1407 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+//
+// The inlining facility makes 2 passes: first caninl determines which
+// functions are suitable for inlining, and for those that are it
+// saves a copy of the body. Then InlineCalls walks each function body to
+// expand calls to inlinable functions.
+//
+// The Debug.l flag controls the aggressiveness. Note that main() swaps level 0 and 1,
+// making 1 the default and -l disable. Additional levels (beyond -l) may be buggy and
+// are not supported.
+// 0: disabled
+// 1: 80-nodes leaf functions, oneliners, panic, lazy typechecking (default)
+// 2: (unassigned)
+// 3: (unassigned)
+// 4: allow non-leaf functions
+//
+// At some point this may get another default and become switch-offable with -N.
+//
+// The -d typcheckinl flag enables early typechecking of all imported bodies,
+// which is useful to flush out bugs.
+//
+// The Debug.m flag enables diagnostic output. a single -m is useful for verifying
+// which calls get inlined or not, more is for debugging, and may go away at any point.
+
+package inline
+
+import (
+ "fmt"
+ "go/constant"
+ "strings"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/logopt"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/src"
+)
+
+// Inlining budget parameters, gathered in one place
+const (
+ inlineMaxBudget = 80
+ inlineExtraAppendCost = 0
+ // default is to inline if there's at most one call. -l=4 overrides this by using 1 instead.
+ inlineExtraCallCost = 57 // 57 was benchmarked to provided most benefit with no bad surprises; see https://github.com/golang/go/issues/19348#issuecomment-439370742
+ inlineExtraPanicCost = 1 // do not penalize inlining panics.
+ inlineExtraThrowCost = inlineMaxBudget // with current (2018-05/1.11) code, inlining runtime.throw does not help.
+
+ inlineBigFunctionNodes = 5000 // Functions with this many nodes are considered "big".
+ inlineBigFunctionMaxCost = 20 // Max cost of inlinee when inlining into a "big" function.
+)
+
+// InlinePackage finds functions that can be inlined and clones them before walk expands them.
+func InlinePackage() {
+ ir.VisitFuncsBottomUp(typecheck.Target.Decls, func(list []*ir.Func, recursive bool) {
+ numfns := numNonClosures(list)
+ for _, n := range list {
+ if !recursive || numfns > 1 {
+ // We allow inlining if there is no
+ // recursion, or the recursion cycle is
+ // across more than one function.
+ CanInline(n)
+ } else {
+ if base.Flag.LowerM > 1 {
+ fmt.Printf("%v: cannot inline %v: recursive\n", ir.Line(n), n.Nname)
+ }
+ }
+ InlineCalls(n)
+ }
+ })
+}
+
+// CanInline determines whether fn is inlineable.
+// If so, CanInline saves copies of fn.Body and fn.Dcl in fn.Inl.
+// fn and fn.Body will already have been typechecked.
+func CanInline(fn *ir.Func) {
+ if fn.Nname == nil {
+ base.Fatalf("CanInline no nname %+v", fn)
+ }
+
+ var reason string // reason, if any, that the function was not inlined
+ if base.Flag.LowerM > 1 || logopt.Enabled() {
+ defer func() {
+ if reason != "" {
+ if base.Flag.LowerM > 1 {
+ fmt.Printf("%v: cannot inline %v: %s\n", ir.Line(fn), fn.Nname, reason)
+ }
+ if logopt.Enabled() {
+ logopt.LogOpt(fn.Pos(), "cannotInlineFunction", "inline", ir.FuncName(fn), reason)
+ }
+ }
+ }()
+ }
+
+ // If marked "go:noinline", don't inline
+ if fn.Pragma&ir.Noinline != 0 {
+ reason = "marked go:noinline"
+ return
+ }
+
+ // If marked "go:norace" and -race compilation, don't inline.
+ if base.Flag.Race && fn.Pragma&ir.Norace != 0 {
+ reason = "marked go:norace with -race compilation"
+ return
+ }
+
+ // If marked "go:nocheckptr" and -d checkptr compilation, don't inline.
+ if base.Debug.Checkptr != 0 && fn.Pragma&ir.NoCheckPtr != 0 {
+ reason = "marked go:nocheckptr"
+ return
+ }
+
+ // If marked "go:cgo_unsafe_args", don't inline, since the
+ // function makes assumptions about its argument frame layout.
+ if fn.Pragma&ir.CgoUnsafeArgs != 0 {
+ reason = "marked go:cgo_unsafe_args"
+ return
+ }
+
+ // If marked as "go:uintptrescapes", don't inline, since the
+ // escape information is lost during inlining.
+ if fn.Pragma&ir.UintptrEscapes != 0 {
+ reason = "marked as having an escaping uintptr argument"
+ return
+ }
+
+ // The nowritebarrierrec checker currently works at function
+ // granularity, so inlining yeswritebarrierrec functions can
+ // confuse it (#22342). As a workaround, disallow inlining
+ // them for now.
+ if fn.Pragma&ir.Yeswritebarrierrec != 0 {
+ reason = "marked go:yeswritebarrierrec"
+ return
+ }
+
+ // If fn has no body (is defined outside of Go), cannot inline it.
+ if len(fn.Body) == 0 {
+ reason = "no function body"
+ return
+ }
+
+ if fn.Typecheck() == 0 {
+ base.Fatalf("CanInline on non-typechecked function %v", fn)
+ }
+
+ n := fn.Nname
+ if n.Func.InlinabilityChecked() {
+ return
+ }
+ defer n.Func.SetInlinabilityChecked(true)
+
+ cc := int32(inlineExtraCallCost)
+ if base.Flag.LowerL == 4 {
+ cc = 1 // this appears to yield better performance than 0.
+ }
+
+ // At this point in the game the function we're looking at may
+ // have "stale" autos, vars that still appear in the Dcl list, but
+ // which no longer have any uses in the function body (due to
+ // elimination by deadcode). We'd like to exclude these dead vars
+ // when creating the "Inline.Dcl" field below; to accomplish this,
+ // the hairyVisitor below builds up a map of used/referenced
+ // locals, and we use this map to produce a pruned Inline.Dcl
+ // list. See issue 25249 for more context.
+
+ visitor := hairyVisitor{
+ budget: inlineMaxBudget,
+ extraCallCost: cc,
+ }
+ if visitor.tooHairy(fn) {
+ reason = visitor.reason
+ return
+ }
+
+ n.Func.Inl = &ir.Inline{
+ Cost: inlineMaxBudget - visitor.budget,
+ Dcl: pruneUnusedAutos(n.Defn.(*ir.Func).Dcl, &visitor),
+ Body: inlcopylist(fn.Body),
+
+ CanDelayResults: canDelayResults(fn),
+ }
+
+ if base.Flag.LowerM > 1 {
+ fmt.Printf("%v: can inline %v with cost %d as: %v { %v }\n", ir.Line(fn), n, inlineMaxBudget-visitor.budget, fn.Type(), ir.Nodes(n.Func.Inl.Body))
+ } else if base.Flag.LowerM != 0 {
+ fmt.Printf("%v: can inline %v\n", ir.Line(fn), n)
+ }
+ if logopt.Enabled() {
+ logopt.LogOpt(fn.Pos(), "canInlineFunction", "inline", ir.FuncName(fn), fmt.Sprintf("cost: %d", inlineMaxBudget-visitor.budget))
+ }
+}
+
+// canDelayResults reports whether inlined calls to fn can delay
+// declaring the result parameter until the "return" statement.
+func canDelayResults(fn *ir.Func) bool {
+ // We can delay declaring+initializing result parameters if:
+ // (1) there's exactly one "return" statement in the inlined function;
+ // (2) it's not an empty return statement (#44355); and
+ // (3) the result parameters aren't named.
+
+ nreturns := 0
+ ir.VisitList(fn.Body, func(n ir.Node) {
+ if n, ok := n.(*ir.ReturnStmt); ok {
+ nreturns++
+ if len(n.Results) == 0 {
+ nreturns++ // empty return statement (case 2)
+ }
+ }
+ })
+
+ if nreturns != 1 {
+ return false // not exactly one return statement (case 1)
+ }
+
+ // temporaries for return values.
+ for _, param := range fn.Type().Results().FieldSlice() {
+ if sym := types.OrigSym(param.Sym); sym != nil && !sym.IsBlank() {
+ return false // found a named result parameter (case 3)
+ }
+ }
+
+ return true
+}
+
+// hairyVisitor visits a function body to determine its inlining
+// hairiness and whether or not it can be inlined.
+type hairyVisitor struct {
+ budget int32
+ reason string
+ extraCallCost int32
+ usedLocals ir.NameSet
+ do func(ir.Node) bool
+}
+
+func (v *hairyVisitor) tooHairy(fn *ir.Func) bool {
+ v.do = v.doNode // cache closure
+ if ir.DoChildren(fn, v.do) {
+ return true
+ }
+ if v.budget < 0 {
+ v.reason = fmt.Sprintf("function too complex: cost %d exceeds budget %d", inlineMaxBudget-v.budget, inlineMaxBudget)
+ return true
+ }
+ return false
+}
+
+func (v *hairyVisitor) doNode(n ir.Node) bool {
+ if n == nil {
+ return false
+ }
+ switch n.Op() {
+ // Call is okay if inlinable and we have the budget for the body.
+ case ir.OCALLFUNC:
+ n := n.(*ir.CallExpr)
+ // Functions that call runtime.getcaller{pc,sp} can not be inlined
+ // because getcaller{pc,sp} expect a pointer to the caller's first argument.
+ //
+ // runtime.throw is a "cheap call" like panic in normal code.
+ if n.X.Op() == ir.ONAME {
+ name := n.X.(*ir.Name)
+ if name.Class == ir.PFUNC && types.IsRuntimePkg(name.Sym().Pkg) {
+ fn := name.Sym().Name
+ if fn == "getcallerpc" || fn == "getcallersp" {
+ v.reason = "call to " + fn
+ return true
+ }
+ if fn == "throw" {
+ v.budget -= inlineExtraThrowCost
+ break
+ }
+ }
+ }
+ if n.X.Op() == ir.OMETHEXPR {
+ if meth := ir.MethodExprName(n.X); meth != nil {
+ if fn := meth.Func; fn != nil {
+ s := fn.Sym()
+ var cheap bool
+ if types.IsRuntimePkg(s.Pkg) && s.Name == "heapBits.nextArena" {
+ // Special case: explicitly allow mid-stack inlining of
+ // runtime.heapBits.next even though it calls slow-path
+ // runtime.heapBits.nextArena.
+ cheap = true
+ }
+ // Special case: on architectures that can do unaligned loads,
+ // explicitly mark encoding/binary methods as cheap,
+ // because in practice they are, even though our inlining
+ // budgeting system does not see that. See issue 42958.
+ if base.Ctxt.Arch.CanMergeLoads && s.Pkg.Path == "encoding/binary" {
+ switch s.Name {
+ case "littleEndian.Uint64", "littleEndian.Uint32", "littleEndian.Uint16",
+ "bigEndian.Uint64", "bigEndian.Uint32", "bigEndian.Uint16",
+ "littleEndian.PutUint64", "littleEndian.PutUint32", "littleEndian.PutUint16",
+ "bigEndian.PutUint64", "bigEndian.PutUint32", "bigEndian.PutUint16":
+ cheap = true
+ }
+ }
+ if cheap {
+ break // treat like any other node, that is, cost of 1
+ }
+ }
+ }
+ }
+
+ if ir.IsIntrinsicCall(n) {
+ // Treat like any other node.
+ break
+ }
+
+ if fn := inlCallee(n.X); fn != nil && typecheck.HaveInlineBody(fn) {
+ v.budget -= fn.Inl.Cost
+ break
+ }
+
+ // Call cost for non-leaf inlining.
+ v.budget -= v.extraCallCost
+
+ case ir.OCALLMETH:
+ base.FatalfAt(n.Pos(), "OCALLMETH missed by typecheck")
+
+ // Things that are too hairy, irrespective of the budget
+ case ir.OCALL, ir.OCALLINTER:
+ // Call cost for non-leaf inlining.
+ v.budget -= v.extraCallCost
+
+ case ir.OPANIC:
+ n := n.(*ir.UnaryExpr)
+ if n.X.Op() == ir.OCONVIFACE && n.X.(*ir.ConvExpr).Implicit() {
+ // Hack to keep reflect.flag.mustBe inlinable for TestIntendedInlining.
+ // Before CL 284412, these conversions were introduced later in the
+ // compiler, so they didn't count against inlining budget.
+ v.budget++
+ }
+ v.budget -= inlineExtraPanicCost
+
+ case ir.ORECOVER:
+ // recover matches the argument frame pointer to find
+ // the right panic value, so it needs an argument frame.
+ v.reason = "call to recover"
+ return true
+
+ case ir.OCLOSURE:
+ if base.Debug.InlFuncsWithClosures == 0 {
+ v.reason = "not inlining functions with closures"
+ return true
+ }
+
+ // TODO(danscales): Maybe make budget proportional to number of closure
+ // variables, e.g.:
+ //v.budget -= int32(len(n.(*ir.ClosureExpr).Func.ClosureVars) * 3)
+ v.budget -= 15
+ // Scan body of closure (which DoChildren doesn't automatically
+ // do) to check for disallowed ops in the body and include the
+ // body in the budget.
+ if doList(n.(*ir.ClosureExpr).Func.Body, v.do) {
+ return true
+ }
+
+ case ir.OSELECT,
+ ir.OGO,
+ ir.ODEFER,
+ ir.ODCLTYPE, // can't print yet
+ ir.OTAILCALL:
+ v.reason = "unhandled op " + n.Op().String()
+ return true
+
+ case ir.OAPPEND:
+ v.budget -= inlineExtraAppendCost
+
+ case ir.ODEREF:
+ // *(*X)(unsafe.Pointer(&x)) is low-cost
+ n := n.(*ir.StarExpr)
+
+ ptr := n.X
+ for ptr.Op() == ir.OCONVNOP {
+ ptr = ptr.(*ir.ConvExpr).X
+ }
+ if ptr.Op() == ir.OADDR {
+ v.budget += 1 // undo half of default cost of ir.ODEREF+ir.OADDR
+ }
+
+ case ir.OCONVNOP:
+ // This doesn't produce code, but the children might.
+ v.budget++ // undo default cost
+
+ case ir.ODCLCONST, ir.OFALL:
+ // These nodes don't produce code; omit from inlining budget.
+ return false
+
+ case ir.OIF:
+ n := n.(*ir.IfStmt)
+ if ir.IsConst(n.Cond, constant.Bool) {
+ // This if and the condition cost nothing.
+ if doList(n.Init(), v.do) {
+ return true
+ }
+ if ir.BoolVal(n.Cond) {
+ return doList(n.Body, v.do)
+ } else {
+ return doList(n.Else, v.do)
+ }
+ }
+
+ case ir.ONAME:
+ n := n.(*ir.Name)
+ if n.Class == ir.PAUTO {
+ v.usedLocals.Add(n)
+ }
+
+ case ir.OBLOCK:
+ // The only OBLOCK we should see at this point is an empty one.
+ // In any event, let the visitList(n.List()) below take care of the statements,
+ // and don't charge for the OBLOCK itself. The ++ undoes the -- below.
+ v.budget++
+
+ case ir.OMETHVALUE, ir.OSLICELIT:
+ v.budget-- // Hack for toolstash -cmp.
+
+ case ir.OMETHEXPR:
+ v.budget++ // Hack for toolstash -cmp.
+ }
+
+ v.budget--
+
+ // When debugging, don't stop early, to get full cost of inlining this function
+ if v.budget < 0 && base.Flag.LowerM < 2 && !logopt.Enabled() {
+ v.reason = "too expensive"
+ return true
+ }
+
+ return ir.DoChildren(n, v.do)
+}
+
+func isBigFunc(fn *ir.Func) bool {
+ budget := inlineBigFunctionNodes
+ return ir.Any(fn, func(n ir.Node) bool {
+ budget--
+ return budget <= 0
+ })
+}
+
+// inlcopylist (together with inlcopy) recursively copies a list of nodes, except
+// that it keeps the same ONAME, OTYPE, and OLITERAL nodes. It is used for copying
+// the body and dcls of an inlineable function.
+func inlcopylist(ll []ir.Node) []ir.Node {
+ s := make([]ir.Node, len(ll))
+ for i, n := range ll {
+ s[i] = inlcopy(n)
+ }
+ return s
+}
+
+// inlcopy is like DeepCopy(), but does extra work to copy closures.
+func inlcopy(n ir.Node) ir.Node {
+ var edit func(ir.Node) ir.Node
+ edit = func(x ir.Node) ir.Node {
+ switch x.Op() {
+ case ir.ONAME, ir.OTYPE, ir.OLITERAL, ir.ONIL:
+ return x
+ }
+ m := ir.Copy(x)
+ ir.EditChildren(m, edit)
+ if x.Op() == ir.OCLOSURE {
+ x := x.(*ir.ClosureExpr)
+ // Need to save/duplicate x.Func.Nname,
+ // x.Func.Nname.Ntype, x.Func.Dcl, x.Func.ClosureVars, and
+ // x.Func.Body for iexport and local inlining.
+ oldfn := x.Func
+ newfn := ir.NewFunc(oldfn.Pos())
+ m.(*ir.ClosureExpr).Func = newfn
+ newfn.Nname = ir.NewNameAt(oldfn.Nname.Pos(), oldfn.Nname.Sym())
+ // XXX OK to share fn.Type() ??
+ newfn.Nname.SetType(oldfn.Nname.Type())
+ // Ntype can be nil for -G=3 mode.
+ if oldfn.Nname.Ntype != nil {
+ newfn.Nname.Ntype = inlcopy(oldfn.Nname.Ntype).(ir.Ntype)
+ }
+ newfn.Body = inlcopylist(oldfn.Body)
+ // Make shallow copy of the Dcl and ClosureVar slices
+ newfn.Dcl = append([]*ir.Name(nil), oldfn.Dcl...)
+ newfn.ClosureVars = append([]*ir.Name(nil), oldfn.ClosureVars...)
+ }
+ return m
+ }
+ return edit(n)
+}
+
+// InlineCalls/inlnode walks fn's statements and expressions and substitutes any
+// calls made to inlineable functions. This is the external entry point.
+func InlineCalls(fn *ir.Func) {
+ savefn := ir.CurFunc
+ ir.CurFunc = fn
+ maxCost := int32(inlineMaxBudget)
+ if isBigFunc(fn) {
+ maxCost = inlineBigFunctionMaxCost
+ }
+ // Map to keep track of functions that have been inlined at a particular
+ // call site, in order to stop inlining when we reach the beginning of a
+ // recursion cycle again. We don't inline immediately recursive functions,
+ // but allow inlining if there is a recursion cycle of many functions.
+ // Most likely, the inlining will stop before we even hit the beginning of
+ // the cycle again, but the map catches the unusual case.
+ inlMap := make(map[*ir.Func]bool)
+ var edit func(ir.Node) ir.Node
+ edit = func(n ir.Node) ir.Node {
+ return inlnode(n, maxCost, inlMap, edit)
+ }
+ ir.EditChildren(fn, edit)
+ ir.CurFunc = savefn
+}
+
+// inlnode recurses over the tree to find inlineable calls, which will
+// be turned into OINLCALLs by mkinlcall. When the recursion comes
+// back up will examine left, right, list, rlist, ninit, ntest, nincr,
+// nbody and nelse and use one of the 4 inlconv/glue functions above
+// to turn the OINLCALL into an expression, a statement, or patch it
+// in to this nodes list or rlist as appropriate.
+// NOTE it makes no sense to pass the glue functions down the
+// recursion to the level where the OINLCALL gets created because they
+// have to edit /this/ n, so you'd have to push that one down as well,
+// but then you may as well do it here. so this is cleaner and
+// shorter and less complicated.
+// The result of inlnode MUST be assigned back to n, e.g.
+// n.Left = inlnode(n.Left)
+func inlnode(n ir.Node, maxCost int32, inlMap map[*ir.Func]bool, edit func(ir.Node) ir.Node) ir.Node {
+ if n == nil {
+ return n
+ }
+
+ switch n.Op() {
+ case ir.ODEFER, ir.OGO:
+ n := n.(*ir.GoDeferStmt)
+ switch call := n.Call; call.Op() {
+ case ir.OCALLMETH:
+ base.FatalfAt(call.Pos(), "OCALLMETH missed by typecheck")
+ case ir.OCALLFUNC:
+ call := call.(*ir.CallExpr)
+ call.NoInline = true
+ }
+ case ir.OTAILCALL:
+ n := n.(*ir.TailCallStmt)
+ n.Call.NoInline = true // Not inline a tail call for now. Maybe we could inline it just like RETURN fn(arg)?
+
+ // TODO do them here (or earlier),
+ // so escape analysis can avoid more heapmoves.
+ case ir.OCLOSURE:
+ return n
+ case ir.OCALLMETH:
+ base.FatalfAt(n.Pos(), "OCALLMETH missed by typecheck")
+ case ir.OCALLFUNC:
+ n := n.(*ir.CallExpr)
+ if n.X.Op() == ir.OMETHEXPR {
+ // Prevent inlining some reflect.Value methods when using checkptr,
+ // even when package reflect was compiled without it (#35073).
+ if meth := ir.MethodExprName(n.X); meth != nil {
+ s := meth.Sym()
+ if base.Debug.Checkptr != 0 && types.IsReflectPkg(s.Pkg) && (s.Name == "Value.UnsafeAddr" || s.Name == "Value.Pointer") {
+ return n
+ }
+ }
+ }
+ }
+
+ lno := ir.SetPos(n)
+
+ ir.EditChildren(n, edit)
+
+ // with all the branches out of the way, it is now time to
+ // transmogrify this node itself unless inhibited by the
+ // switch at the top of this function.
+ switch n.Op() {
+ case ir.OCALLMETH:
+ base.FatalfAt(n.Pos(), "OCALLMETH missed by typecheck")
+
+ case ir.OCALLFUNC:
+ call := n.(*ir.CallExpr)
+ if call.NoInline {
+ break
+ }
+ if base.Flag.LowerM > 3 {
+ fmt.Printf("%v:call to func %+v\n", ir.Line(n), call.X)
+ }
+ if ir.IsIntrinsicCall(call) {
+ break
+ }
+ if fn := inlCallee(call.X); fn != nil && typecheck.HaveInlineBody(fn) {
+ n = mkinlcall(call, fn, maxCost, inlMap, edit)
+ }
+ }
+
+ base.Pos = lno
+
+ return n
+}
+
+// inlCallee takes a function-typed expression and returns the underlying function ONAME
+// that it refers to if statically known. Otherwise, it returns nil.
+func inlCallee(fn ir.Node) *ir.Func {
+ fn = ir.StaticValue(fn)
+ switch fn.Op() {
+ case ir.OMETHEXPR:
+ fn := fn.(*ir.SelectorExpr)
+ n := ir.MethodExprName(fn)
+ // Check that receiver type matches fn.X.
+ // TODO(mdempsky): Handle implicit dereference
+ // of pointer receiver argument?
+ if n == nil || !types.Identical(n.Type().Recv().Type, fn.X.Type()) {
+ return nil
+ }
+ return n.Func
+ case ir.ONAME:
+ fn := fn.(*ir.Name)
+ if fn.Class == ir.PFUNC {
+ return fn.Func
+ }
+ case ir.OCLOSURE:
+ fn := fn.(*ir.ClosureExpr)
+ c := fn.Func
+ CanInline(c)
+ return c
+ }
+ return nil
+}
+
+func inlParam(t *types.Field, as ir.InitNode, inlvars map[*ir.Name]*ir.Name) ir.Node {
+ if t.Nname == nil {
+ return ir.BlankNode
+ }
+ n := t.Nname.(*ir.Name)
+ if ir.IsBlank(n) {
+ return ir.BlankNode
+ }
+ inlvar := inlvars[n]
+ if inlvar == nil {
+ base.Fatalf("missing inlvar for %v", n)
+ }
+ as.PtrInit().Append(ir.NewDecl(base.Pos, ir.ODCL, inlvar))
+ inlvar.Name().Defn = as
+ return inlvar
+}
+
+var inlgen int
+
+// SSADumpInline gives the SSA back end a chance to dump the function
+// when producing output for debugging the compiler itself.
+var SSADumpInline = func(*ir.Func) {}
+
+// NewInline allows the inliner implementation to be overridden.
+// If it returns nil, the legacy inliner will handle this call
+// instead.
+var NewInline = func(call *ir.CallExpr, fn *ir.Func, inlIndex int) *ir.InlinedCallExpr { return nil }
+
+// If n is a OCALLFUNC node, and fn is an ONAME node for a
+// function with an inlinable body, return an OINLCALL node that can replace n.
+// The returned node's Ninit has the parameter assignments, the Nbody is the
+// inlined function body, and (List, Rlist) contain the (input, output)
+// parameters.
+// The result of mkinlcall MUST be assigned back to n, e.g.
+// n.Left = mkinlcall(n.Left, fn, isddd)
+func mkinlcall(n *ir.CallExpr, fn *ir.Func, maxCost int32, inlMap map[*ir.Func]bool, edit func(ir.Node) ir.Node) ir.Node {
+ if fn.Inl == nil {
+ if logopt.Enabled() {
+ logopt.LogOpt(n.Pos(), "cannotInlineCall", "inline", ir.FuncName(ir.CurFunc),
+ fmt.Sprintf("%s cannot be inlined", ir.PkgFuncName(fn)))
+ }
+ return n
+ }
+ if fn.Inl.Cost > maxCost {
+ // The inlined function body is too big. Typically we use this check to restrict
+ // inlining into very big functions. See issue 26546 and 17566.
+ if logopt.Enabled() {
+ logopt.LogOpt(n.Pos(), "cannotInlineCall", "inline", ir.FuncName(ir.CurFunc),
+ fmt.Sprintf("cost %d of %s exceeds max large caller cost %d", fn.Inl.Cost, ir.PkgFuncName(fn), maxCost))
+ }
+ return n
+ }
+
+ if fn == ir.CurFunc {
+ // Can't recursively inline a function into itself.
+ if logopt.Enabled() {
+ logopt.LogOpt(n.Pos(), "cannotInlineCall", "inline", fmt.Sprintf("recursive call to %s", ir.FuncName(ir.CurFunc)))
+ }
+ return n
+ }
+
+ // Don't inline a function fn that has no shape parameters, but is passed at
+ // least one shape arg. This means we must be inlining a non-generic function
+ // fn that was passed into a generic function, and can be called with a shape
+ // arg because it matches an appropriate type parameters. But fn may include
+ // an interface conversion (that may be applied to a shape arg) that was not
+ // apparent when we first created the instantiation of the generic function.
+ // We can't handle this if we actually do the inlining, since we want to know
+ // all interface conversions immediately after stenciling. So, we avoid
+ // inlining in this case. See #49309.
+ if !fn.Type().HasShape() {
+ for _, arg := range n.Args {
+ if arg.Type().HasShape() {
+ if logopt.Enabled() {
+ logopt.LogOpt(n.Pos(), "cannotInlineCall", "inline", ir.FuncName(ir.CurFunc),
+ fmt.Sprintf("inlining non-shape function %v with shape args", ir.FuncName(fn)))
+ }
+ return n
+ }
+ }
+ }
+
+ if base.Flag.Cfg.Instrumenting && types.IsRuntimePkg(fn.Sym().Pkg) {
+ // Runtime package must not be instrumented.
+ // Instrument skips runtime package. However, some runtime code can be
+ // inlined into other packages and instrumented there. To avoid this,
+ // we disable inlining of runtime functions when instrumenting.
+ // The example that we observed is inlining of LockOSThread,
+ // which lead to false race reports on m contents.
+ return n
+ }
+
+ if inlMap[fn] {
+ if base.Flag.LowerM > 1 {
+ fmt.Printf("%v: cannot inline %v into %v: repeated recursive cycle\n", ir.Line(n), fn, ir.FuncName(ir.CurFunc))
+ }
+ return n
+ }
+ inlMap[fn] = true
+ defer func() {
+ inlMap[fn] = false
+ }()
+
+ typecheck.FixVariadicCall(n)
+
+ parent := base.Ctxt.PosTable.Pos(n.Pos()).Base().InliningIndex()
+
+ sym := fn.Linksym()
+ inlIndex := base.Ctxt.InlTree.Add(parent, n.Pos(), sym)
+
+ if base.Flag.GenDwarfInl > 0 {
+ if !sym.WasInlined() {
+ base.Ctxt.DwFixups.SetPrecursorFunc(sym, fn)
+ sym.Set(obj.AttrWasInlined, true)
+ }
+ }
+
+ if base.Flag.LowerM != 0 {
+ fmt.Printf("%v: inlining call to %v\n", ir.Line(n), fn)
+ }
+ if base.Flag.LowerM > 2 {
+ fmt.Printf("%v: Before inlining: %+v\n", ir.Line(n), n)
+ }
+
+ res := NewInline(n, fn, inlIndex)
+ if res == nil {
+ res = oldInline(n, fn, inlIndex)
+ }
+
+ // transitive inlining
+ // might be nice to do this before exporting the body,
+ // but can't emit the body with inlining expanded.
+ // instead we emit the things that the body needs
+ // and each use must redo the inlining.
+ // luckily these are small.
+ ir.EditChildren(res, edit)
+
+ if base.Flag.LowerM > 2 {
+ fmt.Printf("%v: After inlining %+v\n\n", ir.Line(res), res)
+ }
+
+ return res
+}
+
+// CalleeEffects appends any side effects from evaluating callee to init.
+func CalleeEffects(init *ir.Nodes, callee ir.Node) {
+ for {
+ init.Append(ir.TakeInit(callee)...)
+
+ switch callee.Op() {
+ case ir.ONAME, ir.OCLOSURE, ir.OMETHEXPR:
+ return // done
+
+ case ir.OCONVNOP:
+ conv := callee.(*ir.ConvExpr)
+ callee = conv.X
+
+ case ir.OINLCALL:
+ ic := callee.(*ir.InlinedCallExpr)
+ init.Append(ic.Body.Take()...)
+ callee = ic.SingleResult()
+
+ default:
+ base.FatalfAt(callee.Pos(), "unexpected callee expression: %v", callee)
+ }
+ }
+}
+
+// oldInline creates an InlinedCallExpr to replace the given call
+// expression. fn is the callee function to be inlined. inlIndex is
+// the inlining tree position index, for use with src.NewInliningBase
+// when rewriting positions.
+func oldInline(call *ir.CallExpr, fn *ir.Func, inlIndex int) *ir.InlinedCallExpr {
+ if base.Debug.TypecheckInl == 0 {
+ typecheck.ImportedBody(fn)
+ }
+
+ SSADumpInline(fn)
+
+ ninit := call.Init()
+
+ // For normal function calls, the function callee expression
+ // may contain side effects. Make sure to preserve these,
+ // if necessary (#42703).
+ if call.Op() == ir.OCALLFUNC {
+ CalleeEffects(&ninit, call.X)
+ }
+
+ // Make temp names to use instead of the originals.
+ inlvars := make(map[*ir.Name]*ir.Name)
+
+ // record formals/locals for later post-processing
+ var inlfvars []*ir.Name
+
+ for _, ln := range fn.Inl.Dcl {
+ if ln.Op() != ir.ONAME {
+ continue
+ }
+ if ln.Class == ir.PPARAMOUT { // return values handled below.
+ continue
+ }
+ inlf := typecheck.Expr(inlvar(ln)).(*ir.Name)
+ inlvars[ln] = inlf
+ if base.Flag.GenDwarfInl > 0 {
+ if ln.Class == ir.PPARAM {
+ inlf.Name().SetInlFormal(true)
+ } else {
+ inlf.Name().SetInlLocal(true)
+ }
+ inlf.SetPos(ln.Pos())
+ inlfvars = append(inlfvars, inlf)
+ }
+ }
+
+ // We can delay declaring+initializing result parameters if:
+ // temporaries for return values.
+ var retvars []ir.Node
+ for i, t := range fn.Type().Results().Fields().Slice() {
+ var m *ir.Name
+ if nn := t.Nname; nn != nil && !ir.IsBlank(nn.(*ir.Name)) && !strings.HasPrefix(nn.Sym().Name, "~r") {
+ n := nn.(*ir.Name)
+ m = inlvar(n)
+ m = typecheck.Expr(m).(*ir.Name)
+ inlvars[n] = m
+ } else {
+ // anonymous return values, synthesize names for use in assignment that replaces return
+ m = retvar(t, i)
+ }
+
+ if base.Flag.GenDwarfInl > 0 {
+ // Don't update the src.Pos on a return variable if it
+ // was manufactured by the inliner (e.g. "~R2"); such vars
+ // were not part of the original callee.
+ if !strings.HasPrefix(m.Sym().Name, "~R") {
+ m.Name().SetInlFormal(true)
+ m.SetPos(t.Pos)
+ inlfvars = append(inlfvars, m)
+ }
+ }
+
+ retvars = append(retvars, m)
+ }
+
+ // Assign arguments to the parameters' temp names.
+ as := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil)
+ as.Def = true
+ if call.Op() == ir.OCALLMETH {
+ base.FatalfAt(call.Pos(), "OCALLMETH missed by typecheck")
+ }
+ as.Rhs.Append(call.Args...)
+
+ if recv := fn.Type().Recv(); recv != nil {
+ as.Lhs.Append(inlParam(recv, as, inlvars))
+ }
+ for _, param := range fn.Type().Params().Fields().Slice() {
+ as.Lhs.Append(inlParam(param, as, inlvars))
+ }
+
+ if len(as.Rhs) != 0 {
+ ninit.Append(typecheck.Stmt(as))
+ }
+
+ if !fn.Inl.CanDelayResults {
+ // Zero the return parameters.
+ for _, n := range retvars {
+ ninit.Append(ir.NewDecl(base.Pos, ir.ODCL, n.(*ir.Name)))
+ ras := ir.NewAssignStmt(base.Pos, n, nil)
+ ninit.Append(typecheck.Stmt(ras))
+ }
+ }
+
+ retlabel := typecheck.AutoLabel(".i")
+
+ inlgen++
+
+ // Add an inline mark just before the inlined body.
+ // This mark is inline in the code so that it's a reasonable spot
+ // to put a breakpoint. Not sure if that's really necessary or not
+ // (in which case it could go at the end of the function instead).
+ // Note issue 28603.
+ ninit.Append(ir.NewInlineMarkStmt(call.Pos().WithIsStmt(), int64(inlIndex)))
+
+ subst := inlsubst{
+ retlabel: retlabel,
+ retvars: retvars,
+ inlvars: inlvars,
+ defnMarker: ir.NilExpr{},
+ bases: make(map[*src.PosBase]*src.PosBase),
+ newInlIndex: inlIndex,
+ fn: fn,
+ }
+ subst.edit = subst.node
+
+ body := subst.list(ir.Nodes(fn.Inl.Body))
+
+ lab := ir.NewLabelStmt(base.Pos, retlabel)
+ body = append(body, lab)
+
+ if !typecheck.Go117ExportTypes {
+ typecheck.Stmts(body)
+ }
+
+ if base.Flag.GenDwarfInl > 0 {
+ for _, v := range inlfvars {
+ v.SetPos(subst.updatedPos(v.Pos()))
+ }
+ }
+
+ //dumplist("ninit post", ninit);
+
+ res := ir.NewInlinedCallExpr(base.Pos, body, retvars)
+ res.SetInit(ninit)
+ res.SetType(call.Type())
+ res.SetTypecheck(1)
+ return res
+}
+
+// Every time we expand a function we generate a new set of tmpnames,
+// PAUTO's in the calling functions, and link them off of the
+// PPARAM's, PAUTOS and PPARAMOUTs of the called function.
+func inlvar(var_ *ir.Name) *ir.Name {
+ if base.Flag.LowerM > 3 {
+ fmt.Printf("inlvar %+v\n", var_)
+ }
+
+ n := typecheck.NewName(var_.Sym())
+ n.SetType(var_.Type())
+ n.SetTypecheck(1)
+ n.Class = ir.PAUTO
+ n.SetUsed(true)
+ n.SetAutoTemp(var_.AutoTemp())
+ n.Curfn = ir.CurFunc // the calling function, not the called one
+ n.SetAddrtaken(var_.Addrtaken())
+
+ ir.CurFunc.Dcl = append(ir.CurFunc.Dcl, n)
+ return n
+}
+
+// Synthesize a variable to store the inlined function's results in.
+func retvar(t *types.Field, i int) *ir.Name {
+ n := typecheck.NewName(typecheck.LookupNum("~R", i))
+ n.SetType(t.Type)
+ n.SetTypecheck(1)
+ n.Class = ir.PAUTO
+ n.SetUsed(true)
+ n.Curfn = ir.CurFunc // the calling function, not the called one
+ ir.CurFunc.Dcl = append(ir.CurFunc.Dcl, n)
+ return n
+}
+
+// The inlsubst type implements the actual inlining of a single
+// function call.
+type inlsubst struct {
+ // Target of the goto substituted in place of a return.
+ retlabel *types.Sym
+
+ // Temporary result variables.
+ retvars []ir.Node
+
+ inlvars map[*ir.Name]*ir.Name
+ // defnMarker is used to mark a Node for reassignment.
+ // inlsubst.clovar set this during creating new ONAME.
+ // inlsubst.node will set the correct Defn for inlvar.
+ defnMarker ir.NilExpr
+
+ // bases maps from original PosBase to PosBase with an extra
+ // inlined call frame.
+ bases map[*src.PosBase]*src.PosBase
+
+ // newInlIndex is the index of the inlined call frame to
+ // insert for inlined nodes.
+ newInlIndex int
+
+ edit func(ir.Node) ir.Node // cached copy of subst.node method value closure
+
+ // If non-nil, we are inside a closure inside the inlined function, and
+ // newclofn is the Func of the new inlined closure.
+ newclofn *ir.Func
+
+ fn *ir.Func // For debug -- the func that is being inlined
+
+ // If true, then don't update source positions during substitution
+ // (retain old source positions).
+ noPosUpdate bool
+}
+
+// list inlines a list of nodes.
+func (subst *inlsubst) list(ll ir.Nodes) []ir.Node {
+ s := make([]ir.Node, 0, len(ll))
+ for _, n := range ll {
+ s = append(s, subst.node(n))
+ }
+ return s
+}
+
+// fields returns a list of the fields of a struct type representing receiver,
+// params, or results, after duplicating the field nodes and substituting the
+// Nname nodes inside the field nodes.
+func (subst *inlsubst) fields(oldt *types.Type) []*types.Field {
+ oldfields := oldt.FieldSlice()
+ newfields := make([]*types.Field, len(oldfields))
+ for i := range oldfields {
+ newfields[i] = oldfields[i].Copy()
+ if oldfields[i].Nname != nil {
+ newfields[i].Nname = subst.node(oldfields[i].Nname.(*ir.Name))
+ }
+ }
+ return newfields
+}
+
+// clovar creates a new ONAME node for a local variable or param of a closure
+// inside a function being inlined.
+func (subst *inlsubst) clovar(n *ir.Name) *ir.Name {
+ m := ir.NewNameAt(n.Pos(), n.Sym())
+ m.Class = n.Class
+ m.SetType(n.Type())
+ m.SetTypecheck(1)
+ if n.IsClosureVar() {
+ m.SetIsClosureVar(true)
+ }
+ if n.Addrtaken() {
+ m.SetAddrtaken(true)
+ }
+ if n.Used() {
+ m.SetUsed(true)
+ }
+ m.Defn = n.Defn
+
+ m.Curfn = subst.newclofn
+
+ switch defn := n.Defn.(type) {
+ case nil:
+ // ok
+ case *ir.Name:
+ if !n.IsClosureVar() {
+ base.FatalfAt(n.Pos(), "want closure variable, got: %+v", n)
+ }
+ if n.Sym().Pkg != types.LocalPkg {
+ // If the closure came from inlining a function from
+ // another package, must change package of captured
+ // variable to localpkg, so that the fields of the closure
+ // struct are local package and can be accessed even if
+ // name is not exported. If you disable this code, you can
+ // reproduce the problem by running 'go test
+ // go/internal/srcimporter'. TODO(mdempsky) - maybe change
+ // how we create closure structs?
+ m.SetSym(types.LocalPkg.Lookup(n.Sym().Name))
+ }
+ // Make sure any inlvar which is the Defn
+ // of an ONAME closure var is rewritten
+ // during inlining. Don't substitute
+ // if Defn node is outside inlined function.
+ if subst.inlvars[n.Defn.(*ir.Name)] != nil {
+ m.Defn = subst.node(n.Defn)
+ }
+ case *ir.AssignStmt, *ir.AssignListStmt:
+ // Mark node for reassignment at the end of inlsubst.node.
+ m.Defn = &subst.defnMarker
+ case *ir.TypeSwitchGuard:
+ // TODO(mdempsky): Set m.Defn properly. See discussion on #45743.
+ case *ir.RangeStmt:
+ // TODO: Set m.Defn properly if we support inlining range statement in the future.
+ default:
+ base.FatalfAt(n.Pos(), "unexpected Defn: %+v", defn)
+ }
+
+ if n.Outer != nil {
+ // Either the outer variable is defined in function being inlined,
+ // and we will replace it with the substituted variable, or it is
+ // defined outside the function being inlined, and we should just
+ // skip the outer variable (the closure variable of the function
+ // being inlined).
+ s := subst.node(n.Outer).(*ir.Name)
+ if s == n.Outer {
+ s = n.Outer.Outer
+ }
+ m.Outer = s
+ }
+ return m
+}
+
+// closure does the necessary substitions for a ClosureExpr n and returns the new
+// closure node.
+func (subst *inlsubst) closure(n *ir.ClosureExpr) ir.Node {
+ // Prior to the subst edit, set a flag in the inlsubst to indicate
+ // that we don't want to update the source positions in the new
+ // closure function. If we do this, it will appear that the
+ // closure itself has things inlined into it, which is not the
+ // case. See issue #46234 for more details. At the same time, we
+ // do want to update the position in the new ClosureExpr (which is
+ // part of the function we're working on). See #49171 for an
+ // example of what happens if we miss that update.
+ newClosurePos := subst.updatedPos(n.Pos())
+ defer func(prev bool) { subst.noPosUpdate = prev }(subst.noPosUpdate)
+ subst.noPosUpdate = true
+
+ //fmt.Printf("Inlining func %v with closure into %v\n", subst.fn, ir.FuncName(ir.CurFunc))
+
+ oldfn := n.Func
+ newfn := ir.NewClosureFunc(oldfn.Pos(), true)
+
+ // Ntype can be nil for -G=3 mode.
+ if oldfn.Nname.Ntype != nil {
+ newfn.Nname.Ntype = subst.node(oldfn.Nname.Ntype).(ir.Ntype)
+ }
+
+ if subst.newclofn != nil {
+ //fmt.Printf("Inlining a closure with a nested closure\n")
+ }
+ prevxfunc := subst.newclofn
+
+ // Mark that we are now substituting within a closure (within the
+ // inlined function), and create new nodes for all the local
+ // vars/params inside this closure.
+ subst.newclofn = newfn
+ newfn.Dcl = nil
+ newfn.ClosureVars = nil
+ for _, oldv := range oldfn.Dcl {
+ newv := subst.clovar(oldv)
+ subst.inlvars[oldv] = newv
+ newfn.Dcl = append(newfn.Dcl, newv)
+ }
+ for _, oldv := range oldfn.ClosureVars {
+ newv := subst.clovar(oldv)
+ subst.inlvars[oldv] = newv
+ newfn.ClosureVars = append(newfn.ClosureVars, newv)
+ }
+
+ // Need to replace ONAME nodes in
+ // newfn.Type().FuncType().Receiver/Params/Results.FieldSlice().Nname
+ oldt := oldfn.Type()
+ newrecvs := subst.fields(oldt.Recvs())
+ var newrecv *types.Field
+ if len(newrecvs) > 0 {
+ newrecv = newrecvs[0]
+ }
+ newt := types.NewSignature(oldt.Pkg(), newrecv,
+ nil, subst.fields(oldt.Params()), subst.fields(oldt.Results()))
+
+ newfn.Nname.SetType(newt)
+ newfn.Body = subst.list(oldfn.Body)
+
+ // Remove the nodes for the current closure from subst.inlvars
+ for _, oldv := range oldfn.Dcl {
+ delete(subst.inlvars, oldv)
+ }
+ for _, oldv := range oldfn.ClosureVars {
+ delete(subst.inlvars, oldv)
+ }
+ // Go back to previous closure func
+ subst.newclofn = prevxfunc
+
+ // Actually create the named function for the closure, now that
+ // the closure is inlined in a specific function.
+ newclo := newfn.OClosure
+ newclo.SetPos(newClosurePos)
+ newclo.SetInit(subst.list(n.Init()))
+ return typecheck.Expr(newclo)
+}
+
+// node recursively copies a node from the saved pristine body of the
+// inlined function, substituting references to input/output
+// parameters with ones to the tmpnames, and substituting returns with
+// assignments to the output.
+func (subst *inlsubst) node(n ir.Node) ir.Node {
+ if n == nil {
+ return nil
+ }
+
+ switch n.Op() {
+ case ir.ONAME:
+ n := n.(*ir.Name)
+
+ // Handle captured variables when inlining closures.
+ if n.IsClosureVar() && subst.newclofn == nil {
+ o := n.Outer
+
+ // Deal with case where sequence of closures are inlined.
+ // TODO(danscales) - write test case to see if we need to
+ // go up multiple levels.
+ if o.Curfn != ir.CurFunc {
+ o = o.Outer
+ }
+
+ // make sure the outer param matches the inlining location
+ if o == nil || o.Curfn != ir.CurFunc {
+ base.Fatalf("%v: unresolvable capture %v\n", ir.Line(n), n)
+ }
+
+ if base.Flag.LowerM > 2 {
+ fmt.Printf("substituting captured name %+v -> %+v\n", n, o)
+ }
+ return o
+ }
+
+ if inlvar := subst.inlvars[n]; inlvar != nil { // These will be set during inlnode
+ if base.Flag.LowerM > 2 {
+ fmt.Printf("substituting name %+v -> %+v\n", n, inlvar)
+ }
+ return inlvar
+ }
+
+ if base.Flag.LowerM > 2 {
+ fmt.Printf("not substituting name %+v\n", n)
+ }
+ return n
+
+ case ir.OMETHEXPR:
+ n := n.(*ir.SelectorExpr)
+ return n
+
+ case ir.OLITERAL, ir.ONIL, ir.OTYPE:
+ // If n is a named constant or type, we can continue
+ // using it in the inline copy. Otherwise, make a copy
+ // so we can update the line number.
+ if n.Sym() != nil {
+ return n
+ }
+
+ case ir.ORETURN:
+ if subst.newclofn != nil {
+ // Don't do special substitutions if inside a closure
+ break
+ }
+ // Because of the above test for subst.newclofn,
+ // this return is guaranteed to belong to the current inlined function.
+ n := n.(*ir.ReturnStmt)
+ init := subst.list(n.Init())
+ if len(subst.retvars) != 0 && len(n.Results) != 0 {
+ as := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil)
+
+ // Make a shallow copy of retvars.
+ // Otherwise OINLCALL.Rlist will be the same list,
+ // and later walk and typecheck may clobber it.
+ for _, n := range subst.retvars {
+ as.Lhs.Append(n)
+ }
+ as.Rhs = subst.list(n.Results)
+
+ if subst.fn.Inl.CanDelayResults {
+ for _, n := range as.Lhs {
+ as.PtrInit().Append(ir.NewDecl(base.Pos, ir.ODCL, n.(*ir.Name)))
+ n.Name().Defn = as
+ }
+ }
+
+ init = append(init, typecheck.Stmt(as))
+ }
+ init = append(init, ir.NewBranchStmt(base.Pos, ir.OGOTO, subst.retlabel))
+ typecheck.Stmts(init)
+ return ir.NewBlockStmt(base.Pos, init)
+
+ case ir.OGOTO, ir.OBREAK, ir.OCONTINUE:
+ if subst.newclofn != nil {
+ // Don't do special substitutions if inside a closure
+ break
+ }
+ n := n.(*ir.BranchStmt)
+ m := ir.Copy(n).(*ir.BranchStmt)
+ m.SetPos(subst.updatedPos(m.Pos()))
+ m.SetInit(nil)
+ m.Label = translateLabel(n.Label)
+ return m
+
+ case ir.OLABEL:
+ if subst.newclofn != nil {
+ // Don't do special substitutions if inside a closure
+ break
+ }
+ n := n.(*ir.LabelStmt)
+ m := ir.Copy(n).(*ir.LabelStmt)
+ m.SetPos(subst.updatedPos(m.Pos()))
+ m.SetInit(nil)
+ m.Label = translateLabel(n.Label)
+ return m
+
+ case ir.OCLOSURE:
+ return subst.closure(n.(*ir.ClosureExpr))
+
+ }
+
+ m := ir.Copy(n)
+ m.SetPos(subst.updatedPos(m.Pos()))
+ ir.EditChildren(m, subst.edit)
+
+ if subst.newclofn == nil {
+ // Translate any label on FOR, RANGE loops or SWITCH
+ switch m.Op() {
+ case ir.OFOR:
+ m := m.(*ir.ForStmt)
+ m.Label = translateLabel(m.Label)
+ return m
+
+ case ir.ORANGE:
+ m := m.(*ir.RangeStmt)
+ m.Label = translateLabel(m.Label)
+ return m
+
+ case ir.OSWITCH:
+ m := m.(*ir.SwitchStmt)
+ m.Label = translateLabel(m.Label)
+ return m
+ }
+
+ }
+
+ switch m := m.(type) {
+ case *ir.AssignStmt:
+ if lhs, ok := m.X.(*ir.Name); ok && lhs.Defn == &subst.defnMarker {
+ lhs.Defn = m
+ }
+ case *ir.AssignListStmt:
+ for _, lhs := range m.Lhs {
+ if lhs, ok := lhs.(*ir.Name); ok && lhs.Defn == &subst.defnMarker {
+ lhs.Defn = m
+ }
+ }
+ }
+
+ return m
+}
+
+// translateLabel makes a label from an inlined function (if non-nil) be unique by
+// adding "·inlgen".
+func translateLabel(l *types.Sym) *types.Sym {
+ if l == nil {
+ return nil
+ }
+ p := fmt.Sprintf("%s·%d", l.Name, inlgen)
+ return typecheck.Lookup(p)
+}
+
+func (subst *inlsubst) updatedPos(xpos src.XPos) src.XPos {
+ if subst.noPosUpdate {
+ return xpos
+ }
+ pos := base.Ctxt.PosTable.Pos(xpos)
+ oldbase := pos.Base() // can be nil
+ newbase := subst.bases[oldbase]
+ if newbase == nil {
+ newbase = src.NewInliningBase(oldbase, subst.newInlIndex)
+ subst.bases[oldbase] = newbase
+ }
+ pos.SetBase(newbase)
+ return base.Ctxt.PosTable.XPos(pos)
+}
+
+func pruneUnusedAutos(ll []*ir.Name, vis *hairyVisitor) []*ir.Name {
+ s := make([]*ir.Name, 0, len(ll))
+ for _, n := range ll {
+ if n.Class == ir.PAUTO {
+ if !vis.usedLocals.Has(n) {
+ continue
+ }
+ }
+ s = append(s, n)
+ }
+ return s
+}
+
+// numNonClosures returns the number of functions in list which are not closures.
+func numNonClosures(list []*ir.Func) int {
+ count := 0
+ for _, fn := range list {
+ if fn.OClosure == nil {
+ count++
+ }
+ }
+ return count
+}
+
+func doList(list []ir.Node, do func(ir.Node) bool) bool {
+ for _, x := range list {
+ if x != nil {
+ if do(x) {
+ return true
+ }
+ }
+ }
+ return false
+}
diff --git a/src/cmd/compile/internal/ir/bitset.go b/src/cmd/compile/internal/ir/bitset.go
new file mode 100644
index 0000000..0c7bd54
--- /dev/null
+++ b/src/cmd/compile/internal/ir/bitset.go
@@ -0,0 +1,71 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ir
+
+type bitset8 uint8
+
+func (f *bitset8) set(mask uint8, b bool) {
+ if b {
+ *(*uint8)(f) |= mask
+ } else {
+ *(*uint8)(f) &^= mask
+ }
+}
+
+func (f bitset8) get2(shift uint8) uint8 {
+ return uint8(f>>shift) & 3
+}
+
+// set2 sets two bits in f using the bottom two bits of b.
+func (f *bitset8) set2(shift uint8, b uint8) {
+ // Clear old bits.
+ *(*uint8)(f) &^= 3 << shift
+ // Set new bits.
+ *(*uint8)(f) |= uint8(b&3) << shift
+}
+
+type bitset16 uint16
+
+func (f *bitset16) set(mask uint16, b bool) {
+ if b {
+ *(*uint16)(f) |= mask
+ } else {
+ *(*uint16)(f) &^= mask
+ }
+}
+
+type bitset32 uint32
+
+func (f *bitset32) set(mask uint32, b bool) {
+ if b {
+ *(*uint32)(f) |= mask
+ } else {
+ *(*uint32)(f) &^= mask
+ }
+}
+
+func (f bitset32) get2(shift uint8) uint8 {
+ return uint8(f>>shift) & 3
+}
+
+// set2 sets two bits in f using the bottom two bits of b.
+func (f *bitset32) set2(shift uint8, b uint8) {
+ // Clear old bits.
+ *(*uint32)(f) &^= 3 << shift
+ // Set new bits.
+ *(*uint32)(f) |= uint32(b&3) << shift
+}
+
+func (f bitset32) get3(shift uint8) uint8 {
+ return uint8(f>>shift) & 7
+}
+
+// set3 sets three bits in f using the bottom three bits of b.
+func (f *bitset32) set3(shift uint8, b uint8) {
+ // Clear old bits.
+ *(*uint32)(f) &^= 7 << shift
+ // Set new bits.
+ *(*uint32)(f) |= uint32(b&7) << shift
+}
diff --git a/src/cmd/compile/internal/ir/cfg.go b/src/cmd/compile/internal/ir/cfg.go
new file mode 100644
index 0000000..d986ac3
--- /dev/null
+++ b/src/cmd/compile/internal/ir/cfg.go
@@ -0,0 +1,26 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ir
+
+var (
+ // maximum size variable which we will allocate on the stack.
+ // This limit is for explicit variable declarations like "var x T" or "x := ...".
+ // Note: the flag smallframes can update this value.
+ MaxStackVarSize = int64(10 * 1024 * 1024)
+
+ // maximum size of implicit variables that we will allocate on the stack.
+ // p := new(T) allocating T on the stack
+ // p := &T{} allocating T on the stack
+ // s := make([]T, n) allocating [n]T on the stack
+ // s := []byte("...") allocating [n]byte on the stack
+ // Note: the flag smallframes can update this value.
+ MaxImplicitStackVarSize = int64(64 * 1024)
+
+ // MaxSmallArraySize is the maximum size of an array which is considered small.
+ // Small arrays will be initialized directly with a sequence of constant stores.
+ // Large arrays will be initialized by copying from a static temp.
+ // 256 bytes was chosen to minimize generated code + statictmp size.
+ MaxSmallArraySize = int64(256)
+)
diff --git a/src/cmd/compile/internal/ir/class_string.go b/src/cmd/compile/internal/ir/class_string.go
new file mode 100644
index 0000000..11a94c0
--- /dev/null
+++ b/src/cmd/compile/internal/ir/class_string.go
@@ -0,0 +1,30 @@
+// Code generated by "stringer -type=Class name.go"; DO NOT EDIT.
+
+package ir
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[Pxxx-0]
+ _ = x[PEXTERN-1]
+ _ = x[PAUTO-2]
+ _ = x[PAUTOHEAP-3]
+ _ = x[PPARAM-4]
+ _ = x[PPARAMOUT-5]
+ _ = x[PTYPEPARAM-6]
+ _ = x[PFUNC-7]
+}
+
+const _Class_name = "PxxxPEXTERNPAUTOPAUTOHEAPPPARAMPPARAMOUTPTYPEPARAMPFUNC"
+
+var _Class_index = [...]uint8{0, 4, 11, 16, 25, 31, 40, 50, 55}
+
+func (i Class) String() string {
+ if i >= Class(len(_Class_index)-1) {
+ return "Class(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _Class_name[_Class_index[i]:_Class_index[i+1]]
+}
diff --git a/src/cmd/compile/internal/ir/const.go b/src/cmd/compile/internal/ir/const.go
new file mode 100644
index 0000000..eaa4d5b
--- /dev/null
+++ b/src/cmd/compile/internal/ir/const.go
@@ -0,0 +1,99 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ir
+
+import (
+ "go/constant"
+ "math"
+ "math/big"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/types"
+)
+
+func NewBool(b bool) Node {
+ return NewLiteral(constant.MakeBool(b))
+}
+
+func NewInt(v int64) Node {
+ return NewLiteral(constant.MakeInt64(v))
+}
+
+func NewString(s string) Node {
+ return NewLiteral(constant.MakeString(s))
+}
+
+const (
+ // Maximum size in bits for big.Ints before signalling
+ // overflow and also mantissa precision for big.Floats.
+ ConstPrec = 512
+)
+
+func BigFloat(v constant.Value) *big.Float {
+ f := new(big.Float)
+ f.SetPrec(ConstPrec)
+ switch u := constant.Val(v).(type) {
+ case int64:
+ f.SetInt64(u)
+ case *big.Int:
+ f.SetInt(u)
+ case *big.Float:
+ f.Set(u)
+ case *big.Rat:
+ f.SetRat(u)
+ default:
+ base.Fatalf("unexpected: %v", u)
+ }
+ return f
+}
+
+// ConstOverflow reports whether constant value v is too large
+// to represent with type t.
+func ConstOverflow(v constant.Value, t *types.Type) bool {
+ switch {
+ case t.IsInteger():
+ bits := uint(8 * t.Size())
+ if t.IsUnsigned() {
+ x, ok := constant.Uint64Val(v)
+ return !ok || x>>bits != 0
+ }
+ x, ok := constant.Int64Val(v)
+ if x < 0 {
+ x = ^x
+ }
+ return !ok || x>>(bits-1) != 0
+ case t.IsFloat():
+ switch t.Size() {
+ case 4:
+ f, _ := constant.Float32Val(v)
+ return math.IsInf(float64(f), 0)
+ case 8:
+ f, _ := constant.Float64Val(v)
+ return math.IsInf(f, 0)
+ }
+ case t.IsComplex():
+ ft := types.FloatForComplex(t)
+ return ConstOverflow(constant.Real(v), ft) || ConstOverflow(constant.Imag(v), ft)
+ }
+ base.Fatalf("ConstOverflow: %v, %v", v, t)
+ panic("unreachable")
+}
+
+// IsConstNode reports whether n is a Go language constant (as opposed to a
+// compile-time constant).
+//
+// Expressions derived from nil, like string([]byte(nil)), while they
+// may be known at compile time, are not Go language constants.
+func IsConstNode(n Node) bool {
+ return n.Op() == OLITERAL
+}
+
+func IsSmallIntConst(n Node) bool {
+ if n.Op() == OLITERAL {
+ v, ok := constant.Int64Val(n.Val())
+ return ok && int64(int32(v)) == v
+ }
+ return false
+}
diff --git a/src/cmd/compile/internal/ir/copy.go b/src/cmd/compile/internal/ir/copy.go
new file mode 100644
index 0000000..7da9b24
--- /dev/null
+++ b/src/cmd/compile/internal/ir/copy.go
@@ -0,0 +1,102 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ir
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/internal/src"
+)
+
+// A Node may implement the Orig and SetOrig method to
+// maintain a pointer to the "unrewritten" form of a Node.
+// If a Node does not implement OrigNode, it is its own Orig.
+//
+// Note that both SepCopy and Copy have definitions compatible
+// with a Node that does not implement OrigNode: such a Node
+// is its own Orig, and in that case, that's what both want to return
+// anyway (SepCopy unconditionally, and Copy only when the input
+// is its own Orig as well, but if the output does not implement
+// OrigNode, then neither does the input, making the condition true).
+type OrigNode interface {
+ Node
+ Orig() Node
+ SetOrig(Node)
+}
+
+// origNode may be embedded into a Node to make it implement OrigNode.
+type origNode struct {
+ orig Node `mknode:"-"`
+}
+
+func (n *origNode) Orig() Node { return n.orig }
+func (n *origNode) SetOrig(o Node) { n.orig = o }
+
+// Orig returns the “original” node for n.
+// If n implements OrigNode, Orig returns n.Orig().
+// Otherwise Orig returns n itself.
+func Orig(n Node) Node {
+ if n, ok := n.(OrigNode); ok {
+ o := n.Orig()
+ if o == nil {
+ Dump("Orig nil", n)
+ base.Fatalf("Orig returned nil")
+ }
+ return o
+ }
+ return n
+}
+
+// SepCopy returns a separate shallow copy of n,
+// breaking any Orig link to any other nodes.
+func SepCopy(n Node) Node {
+ n = n.copy()
+ if n, ok := n.(OrigNode); ok {
+ n.SetOrig(n)
+ }
+ return n
+}
+
+// Copy returns a shallow copy of n.
+// If Orig(n) == n, then Orig(Copy(n)) == the copy.
+// Otherwise the Orig link is preserved as well.
+//
+// The specific semantics surrounding Orig are subtle but right for most uses.
+// See issues #26855 and #27765 for pitfalls.
+func Copy(n Node) Node {
+ c := n.copy()
+ if n, ok := n.(OrigNode); ok && n.Orig() == n {
+ c.(OrigNode).SetOrig(c)
+ }
+ return c
+}
+
+// DeepCopy returns a “deep” copy of n, with its entire structure copied
+// (except for shared nodes like ONAME, ONONAME, OLITERAL, and OTYPE).
+// If pos.IsKnown(), it sets the source position of newly allocated Nodes to pos.
+func DeepCopy(pos src.XPos, n Node) Node {
+ var edit func(Node) Node
+ edit = func(x Node) Node {
+ switch x.Op() {
+ case OPACK, ONAME, ONONAME, OLITERAL, ONIL, OTYPE:
+ return x
+ }
+ x = Copy(x)
+ if pos.IsKnown() {
+ x.SetPos(pos)
+ }
+ EditChildren(x, edit)
+ return x
+ }
+ return edit(n)
+}
+
+// DeepCopyList returns a list of deep copies (using DeepCopy) of the nodes in list.
+func DeepCopyList(pos src.XPos, list []Node) []Node {
+ var out []Node
+ for _, n := range list {
+ out = append(out, DeepCopy(pos, n))
+ }
+ return out
+}
diff --git a/src/cmd/compile/internal/ir/dump.go b/src/cmd/compile/internal/ir/dump.go
new file mode 100644
index 0000000..59914ba
--- /dev/null
+++ b/src/cmd/compile/internal/ir/dump.go
@@ -0,0 +1,272 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements textual dumping of arbitrary data structures
+// for debugging purposes. The code is customized for Node graphs
+// and may be used for an alternative view of the node structure.
+
+package ir
+
+import (
+ "fmt"
+ "io"
+ "os"
+ "reflect"
+ "regexp"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+)
+
+// DumpAny is like FDumpAny but prints to stderr.
+func DumpAny(root interface{}, filter string, depth int) {
+ FDumpAny(os.Stderr, root, filter, depth)
+}
+
+// FDumpAny prints the structure of a rooted data structure
+// to w by depth-first traversal of the data structure.
+//
+// The filter parameter is a regular expression. If it is
+// non-empty, only struct fields whose names match filter
+// are printed.
+//
+// The depth parameter controls how deep traversal recurses
+// before it returns (higher value means greater depth).
+// If an empty field filter is given, a good depth default value
+// is 4. A negative depth means no depth limit, which may be fine
+// for small data structures or if there is a non-empty filter.
+//
+// In the output, Node structs are identified by their Op name
+// rather than their type; struct fields with zero values or
+// non-matching field names are omitted, and "…" means recursion
+// depth has been reached or struct fields have been omitted.
+func FDumpAny(w io.Writer, root interface{}, filter string, depth int) {
+ if root == nil {
+ fmt.Fprintln(w, "nil")
+ return
+ }
+
+ if filter == "" {
+ filter = ".*" // default
+ }
+
+ p := dumper{
+ output: w,
+ fieldrx: regexp.MustCompile(filter),
+ ptrmap: make(map[uintptr]int),
+ last: '\n', // force printing of line number on first line
+ }
+
+ p.dump(reflect.ValueOf(root), depth)
+ p.printf("\n")
+}
+
+type dumper struct {
+ output io.Writer
+ fieldrx *regexp.Regexp // field name filter
+ ptrmap map[uintptr]int // ptr -> dump line number
+ lastadr string // last address string printed (for shortening)
+
+ // output
+ indent int // current indentation level
+ last byte // last byte processed by Write
+ line int // current line number
+}
+
+var indentBytes = []byte(". ")
+
+func (p *dumper) Write(data []byte) (n int, err error) {
+ var m int
+ for i, b := range data {
+ // invariant: data[0:n] has been written
+ if b == '\n' {
+ m, err = p.output.Write(data[n : i+1])
+ n += m
+ if err != nil {
+ return
+ }
+ } else if p.last == '\n' {
+ p.line++
+ _, err = fmt.Fprintf(p.output, "%6d ", p.line)
+ if err != nil {
+ return
+ }
+ for j := p.indent; j > 0; j-- {
+ _, err = p.output.Write(indentBytes)
+ if err != nil {
+ return
+ }
+ }
+ }
+ p.last = b
+ }
+ if len(data) > n {
+ m, err = p.output.Write(data[n:])
+ n += m
+ }
+ return
+}
+
+// printf is a convenience wrapper.
+func (p *dumper) printf(format string, args ...interface{}) {
+ if _, err := fmt.Fprintf(p, format, args...); err != nil {
+ panic(err)
+ }
+}
+
+// addr returns the (hexadecimal) address string of the object
+// represented by x (or "?" if x is not addressable), with the
+// common prefix between this and the prior address replaced by
+// "0x…" to make it easier to visually match addresses.
+func (p *dumper) addr(x reflect.Value) string {
+ if !x.CanAddr() {
+ return "?"
+ }
+ adr := fmt.Sprintf("%p", x.Addr().Interface())
+ s := adr
+ if i := commonPrefixLen(p.lastadr, adr); i > 0 {
+ s = "0x…" + adr[i:]
+ }
+ p.lastadr = adr
+ return s
+}
+
+// dump prints the contents of x.
+func (p *dumper) dump(x reflect.Value, depth int) {
+ if depth == 0 {
+ p.printf("…")
+ return
+ }
+
+ if pos, ok := x.Interface().(src.XPos); ok {
+ p.printf("%s", base.FmtPos(pos))
+ return
+ }
+
+ switch x.Kind() {
+ case reflect.String:
+ p.printf("%q", x.Interface()) // print strings in quotes
+
+ case reflect.Interface:
+ if x.IsNil() {
+ p.printf("nil")
+ return
+ }
+ p.dump(x.Elem(), depth-1)
+
+ case reflect.Ptr:
+ if x.IsNil() {
+ p.printf("nil")
+ return
+ }
+
+ p.printf("*")
+ ptr := x.Pointer()
+ if line, exists := p.ptrmap[ptr]; exists {
+ p.printf("(@%d)", line)
+ return
+ }
+ p.ptrmap[ptr] = p.line
+ p.dump(x.Elem(), depth) // don't count pointer indirection towards depth
+
+ case reflect.Slice:
+ if x.IsNil() {
+ p.printf("nil")
+ return
+ }
+ p.printf("%s (%d entries) {", x.Type(), x.Len())
+ if x.Len() > 0 {
+ p.indent++
+ p.printf("\n")
+ for i, n := 0, x.Len(); i < n; i++ {
+ p.printf("%d: ", i)
+ p.dump(x.Index(i), depth-1)
+ p.printf("\n")
+ }
+ p.indent--
+ }
+ p.printf("}")
+
+ case reflect.Struct:
+ typ := x.Type()
+
+ isNode := false
+ if n, ok := x.Interface().(Node); ok {
+ isNode = true
+ p.printf("%s %s {", n.Op().String(), p.addr(x))
+ } else {
+ p.printf("%s {", typ)
+ }
+ p.indent++
+
+ first := true
+ omitted := false
+ for i, n := 0, typ.NumField(); i < n; i++ {
+ // Exclude non-exported fields because their
+ // values cannot be accessed via reflection.
+ if name := typ.Field(i).Name; types.IsExported(name) {
+ if !p.fieldrx.MatchString(name) {
+ omitted = true
+ continue // field name not selected by filter
+ }
+
+ // special cases
+ if isNode && name == "Op" {
+ omitted = true
+ continue // Op field already printed for Nodes
+ }
+ x := x.Field(i)
+ if isZeroVal(x) {
+ omitted = true
+ continue // exclude zero-valued fields
+ }
+ if n, ok := x.Interface().(Nodes); ok && len(n) == 0 {
+ omitted = true
+ continue // exclude empty Nodes slices
+ }
+
+ if first {
+ p.printf("\n")
+ first = false
+ }
+ p.printf("%s: ", name)
+ p.dump(x, depth-1)
+ p.printf("\n")
+ }
+ }
+ if omitted {
+ p.printf("…\n")
+ }
+
+ p.indent--
+ p.printf("}")
+
+ default:
+ p.printf("%v", x.Interface())
+ }
+}
+
+func isZeroVal(x reflect.Value) bool {
+ switch x.Kind() {
+ case reflect.Bool:
+ return !x.Bool()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return x.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return x.Uint() == 0
+ case reflect.String:
+ return x.String() == ""
+ case reflect.Interface, reflect.Ptr, reflect.Slice:
+ return x.IsNil()
+ }
+ return false
+}
+
+func commonPrefixLen(a, b string) (i int) {
+ for i < len(a) && i < len(b) && a[i] == b[i] {
+ i++
+ }
+ return
+}
diff --git a/src/cmd/compile/internal/ir/expr.go b/src/cmd/compile/internal/ir/expr.go
new file mode 100644
index 0000000..68303c0
--- /dev/null
+++ b/src/cmd/compile/internal/ir/expr.go
@@ -0,0 +1,1147 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ir
+
+import (
+ "bytes"
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/src"
+ "fmt"
+ "go/constant"
+ "go/token"
+)
+
+// An Expr is a Node that can appear as an expression.
+type Expr interface {
+ Node
+ isExpr()
+}
+
+// A miniExpr is a miniNode with extra fields common to expressions.
+// TODO(rsc): Once we are sure about the contents, compact the bools
+// into a bit field and leave extra bits available for implementations
+// embedding miniExpr. Right now there are ~60 unused bits sitting here.
+type miniExpr struct {
+ miniNode
+ typ *types.Type
+ init Nodes // TODO(rsc): Don't require every Node to have an init
+ flags bitset8
+}
+
+const (
+ miniExprNonNil = 1 << iota
+ miniExprTransient
+ miniExprBounded
+ miniExprImplicit // for use by implementations; not supported by every Expr
+ miniExprCheckPtr
+)
+
+func (*miniExpr) isExpr() {}
+
+func (n *miniExpr) Type() *types.Type { return n.typ }
+func (n *miniExpr) SetType(x *types.Type) { n.typ = x }
+func (n *miniExpr) NonNil() bool { return n.flags&miniExprNonNil != 0 }
+func (n *miniExpr) MarkNonNil() { n.flags |= miniExprNonNil }
+func (n *miniExpr) Transient() bool { return n.flags&miniExprTransient != 0 }
+func (n *miniExpr) SetTransient(b bool) { n.flags.set(miniExprTransient, b) }
+func (n *miniExpr) Bounded() bool { return n.flags&miniExprBounded != 0 }
+func (n *miniExpr) SetBounded(b bool) { n.flags.set(miniExprBounded, b) }
+func (n *miniExpr) Init() Nodes { return n.init }
+func (n *miniExpr) PtrInit() *Nodes { return &n.init }
+func (n *miniExpr) SetInit(x Nodes) { n.init = x }
+
+// An AddStringExpr is a string concatenation Expr[0] + Exprs[1] + ... + Expr[len(Expr)-1].
+type AddStringExpr struct {
+ miniExpr
+ List Nodes
+ Prealloc *Name
+}
+
+func NewAddStringExpr(pos src.XPos, list []Node) *AddStringExpr {
+ n := &AddStringExpr{}
+ n.pos = pos
+ n.op = OADDSTR
+ n.List = list
+ return n
+}
+
+// An AddrExpr is an address-of expression &X.
+// It may end up being a normal address-of or an allocation of a composite literal.
+type AddrExpr struct {
+ miniExpr
+ X Node
+ Prealloc *Name // preallocated storage if any
+}
+
+func NewAddrExpr(pos src.XPos, x Node) *AddrExpr {
+ n := &AddrExpr{X: x}
+ n.op = OADDR
+ n.pos = pos
+ return n
+}
+
+func (n *AddrExpr) Implicit() bool { return n.flags&miniExprImplicit != 0 }
+func (n *AddrExpr) SetImplicit(b bool) { n.flags.set(miniExprImplicit, b) }
+
+func (n *AddrExpr) SetOp(op Op) {
+ switch op {
+ default:
+ panic(n.no("SetOp " + op.String()))
+ case OADDR, OPTRLIT:
+ n.op = op
+ }
+}
+
+// A BasicLit is a literal of basic type.
+type BasicLit struct {
+ miniExpr
+ val constant.Value
+}
+
+func NewBasicLit(pos src.XPos, val constant.Value) Node {
+ n := &BasicLit{val: val}
+ n.op = OLITERAL
+ n.pos = pos
+ if k := val.Kind(); k != constant.Unknown {
+ n.SetType(idealType(k))
+ }
+ return n
+}
+
+func (n *BasicLit) Val() constant.Value { return n.val }
+func (n *BasicLit) SetVal(val constant.Value) { n.val = val }
+
+// A BinaryExpr is a binary expression X Op Y,
+// or Op(X, Y) for builtin functions that do not become calls.
+type BinaryExpr struct {
+ miniExpr
+ X Node
+ Y Node
+}
+
+func NewBinaryExpr(pos src.XPos, op Op, x, y Node) *BinaryExpr {
+ n := &BinaryExpr{X: x, Y: y}
+ n.pos = pos
+ n.SetOp(op)
+ return n
+}
+
+func (n *BinaryExpr) SetOp(op Op) {
+ switch op {
+ default:
+ panic(n.no("SetOp " + op.String()))
+ case OADD, OADDSTR, OAND, OANDNOT, ODIV, OEQ, OGE, OGT, OLE,
+ OLSH, OLT, OMOD, OMUL, ONE, OOR, ORSH, OSUB, OXOR,
+ OCOPY, OCOMPLEX, OUNSAFEADD, OUNSAFESLICE,
+ OEFACE:
+ n.op = op
+ }
+}
+
+// A CallExpr is a function call X(Args).
+type CallExpr struct {
+ miniExpr
+ origNode
+ X Node
+ Args Nodes
+ KeepAlive []*Name // vars to be kept alive until call returns
+ IsDDD bool
+ NoInline bool
+}
+
+func NewCallExpr(pos src.XPos, op Op, fun Node, args []Node) *CallExpr {
+ n := &CallExpr{X: fun}
+ n.pos = pos
+ n.orig = n
+ n.SetOp(op)
+ n.Args = args
+ return n
+}
+
+func (*CallExpr) isStmt() {}
+
+func (n *CallExpr) SetOp(op Op) {
+ switch op {
+ default:
+ panic(n.no("SetOp " + op.String()))
+ case OAPPEND,
+ OCALL, OCALLFUNC, OCALLINTER, OCALLMETH,
+ ODELETE,
+ OGETG, OGETCALLERPC, OGETCALLERSP,
+ OMAKE, OPRINT, OPRINTN,
+ ORECOVER, ORECOVERFP:
+ n.op = op
+ }
+}
+
+// A ClosureExpr is a function literal expression.
+type ClosureExpr struct {
+ miniExpr
+ Func *Func `mknode:"-"`
+ Prealloc *Name
+ IsGoWrap bool // whether this is wrapper closure of a go statement
+}
+
+// Deprecated: Use NewClosureFunc instead.
+func NewClosureExpr(pos src.XPos, fn *Func) *ClosureExpr {
+ n := &ClosureExpr{Func: fn}
+ n.op = OCLOSURE
+ n.pos = pos
+ return n
+}
+
+// A CompLitExpr is a composite literal Type{Vals}.
+// Before type-checking, the type is Ntype.
+type CompLitExpr struct {
+ miniExpr
+ origNode
+ Ntype Ntype
+ List Nodes // initialized values
+ Prealloc *Name
+ Len int64 // backing array length for OSLICELIT
+}
+
+func NewCompLitExpr(pos src.XPos, op Op, typ Ntype, list []Node) *CompLitExpr {
+ n := &CompLitExpr{Ntype: typ}
+ n.pos = pos
+ n.SetOp(op)
+ n.List = list
+ n.orig = n
+ return n
+}
+
+func (n *CompLitExpr) Implicit() bool { return n.flags&miniExprImplicit != 0 }
+func (n *CompLitExpr) SetImplicit(b bool) { n.flags.set(miniExprImplicit, b) }
+
+func (n *CompLitExpr) SetOp(op Op) {
+ switch op {
+ default:
+ panic(n.no("SetOp " + op.String()))
+ case OARRAYLIT, OCOMPLIT, OMAPLIT, OSTRUCTLIT, OSLICELIT:
+ n.op = op
+ }
+}
+
+type ConstExpr struct {
+ miniExpr
+ origNode
+ val constant.Value
+}
+
+func NewConstExpr(val constant.Value, orig Node) Node {
+ n := &ConstExpr{val: val}
+ n.op = OLITERAL
+ n.pos = orig.Pos()
+ n.orig = orig
+ n.SetType(orig.Type())
+ n.SetTypecheck(orig.Typecheck())
+ n.SetDiag(orig.Diag())
+ return n
+}
+
+func (n *ConstExpr) Sym() *types.Sym { return n.orig.Sym() }
+func (n *ConstExpr) Val() constant.Value { return n.val }
+
+// A ConvExpr is a conversion Type(X).
+// It may end up being a value or a type.
+type ConvExpr struct {
+ miniExpr
+ X Node
+ NonEscaping bool // The allocation needed for the conversion to interface is known not to escape
+}
+
+func NewConvExpr(pos src.XPos, op Op, typ *types.Type, x Node) *ConvExpr {
+ n := &ConvExpr{X: x}
+ n.pos = pos
+ n.typ = typ
+ n.SetOp(op)
+ return n
+}
+
+func (n *ConvExpr) Implicit() bool { return n.flags&miniExprImplicit != 0 }
+func (n *ConvExpr) SetImplicit(b bool) { n.flags.set(miniExprImplicit, b) }
+func (n *ConvExpr) CheckPtr() bool { return n.flags&miniExprCheckPtr != 0 }
+func (n *ConvExpr) SetCheckPtr(b bool) { n.flags.set(miniExprCheckPtr, b) }
+
+func (n *ConvExpr) SetOp(op Op) {
+ switch op {
+ default:
+ panic(n.no("SetOp " + op.String()))
+ case OCONV, OCONVIFACE, OCONVIDATA, OCONVNOP, OBYTES2STR, OBYTES2STRTMP, ORUNES2STR, OSTR2BYTES, OSTR2BYTESTMP, OSTR2RUNES, ORUNESTR, OSLICE2ARRPTR:
+ n.op = op
+ }
+}
+
+// An IndexExpr is an index expression X[Index].
+type IndexExpr struct {
+ miniExpr
+ X Node
+ Index Node
+ Assigned bool
+}
+
+func NewIndexExpr(pos src.XPos, x, index Node) *IndexExpr {
+ n := &IndexExpr{X: x, Index: index}
+ n.pos = pos
+ n.op = OINDEX
+ return n
+}
+
+func (n *IndexExpr) SetOp(op Op) {
+ switch op {
+ default:
+ panic(n.no("SetOp " + op.String()))
+ case OINDEX, OINDEXMAP:
+ n.op = op
+ }
+}
+
+// A KeyExpr is a Key: Value composite literal key.
+type KeyExpr struct {
+ miniExpr
+ Key Node
+ Value Node
+}
+
+func NewKeyExpr(pos src.XPos, key, value Node) *KeyExpr {
+ n := &KeyExpr{Key: key, Value: value}
+ n.pos = pos
+ n.op = OKEY
+ return n
+}
+
+// A StructKeyExpr is an Field: Value composite literal key.
+type StructKeyExpr struct {
+ miniExpr
+ Field *types.Field
+ Value Node
+}
+
+func NewStructKeyExpr(pos src.XPos, field *types.Field, value Node) *StructKeyExpr {
+ n := &StructKeyExpr{Field: field, Value: value}
+ n.pos = pos
+ n.op = OSTRUCTKEY
+ return n
+}
+
+func (n *StructKeyExpr) Sym() *types.Sym { return n.Field.Sym }
+
+// An InlinedCallExpr is an inlined function call.
+type InlinedCallExpr struct {
+ miniExpr
+ Body Nodes
+ ReturnVars Nodes // must be side-effect free
+}
+
+func NewInlinedCallExpr(pos src.XPos, body, retvars []Node) *InlinedCallExpr {
+ n := &InlinedCallExpr{}
+ n.pos = pos
+ n.op = OINLCALL
+ n.Body = body
+ n.ReturnVars = retvars
+ return n
+}
+
+func (n *InlinedCallExpr) SingleResult() Node {
+ if have := len(n.ReturnVars); have != 1 {
+ base.FatalfAt(n.Pos(), "inlined call has %v results, expected 1", have)
+ }
+ if !n.Type().HasShape() && n.ReturnVars[0].Type().HasShape() {
+ // If the type of the call is not a shape, but the type of the return value
+ // is a shape, we need to do an implicit conversion, so the real type
+ // of n is maintained.
+ r := NewConvExpr(n.Pos(), OCONVNOP, n.Type(), n.ReturnVars[0])
+ r.SetTypecheck(1)
+ return r
+ }
+ return n.ReturnVars[0]
+}
+
+// A LogicalExpr is a expression X Op Y where Op is && or ||.
+// It is separate from BinaryExpr to make room for statements
+// that must be executed before Y but after X.
+type LogicalExpr struct {
+ miniExpr
+ X Node
+ Y Node
+}
+
+func NewLogicalExpr(pos src.XPos, op Op, x, y Node) *LogicalExpr {
+ n := &LogicalExpr{X: x, Y: y}
+ n.pos = pos
+ n.SetOp(op)
+ return n
+}
+
+func (n *LogicalExpr) SetOp(op Op) {
+ switch op {
+ default:
+ panic(n.no("SetOp " + op.String()))
+ case OANDAND, OOROR:
+ n.op = op
+ }
+}
+
+// A MakeExpr is a make expression: make(Type[, Len[, Cap]]).
+// Op is OMAKECHAN, OMAKEMAP, OMAKESLICE, or OMAKESLICECOPY,
+// but *not* OMAKE (that's a pre-typechecking CallExpr).
+type MakeExpr struct {
+ miniExpr
+ Len Node
+ Cap Node
+}
+
+func NewMakeExpr(pos src.XPos, op Op, len, cap Node) *MakeExpr {
+ n := &MakeExpr{Len: len, Cap: cap}
+ n.pos = pos
+ n.SetOp(op)
+ return n
+}
+
+func (n *MakeExpr) SetOp(op Op) {
+ switch op {
+ default:
+ panic(n.no("SetOp " + op.String()))
+ case OMAKECHAN, OMAKEMAP, OMAKESLICE, OMAKESLICECOPY:
+ n.op = op
+ }
+}
+
+// A NilExpr represents the predefined untyped constant nil.
+// (It may be copied and assigned a type, though.)
+type NilExpr struct {
+ miniExpr
+ Sym_ *types.Sym // TODO: Remove
+}
+
+func NewNilExpr(pos src.XPos) *NilExpr {
+ n := &NilExpr{}
+ n.pos = pos
+ n.op = ONIL
+ return n
+}
+
+func (n *NilExpr) Sym() *types.Sym { return n.Sym_ }
+func (n *NilExpr) SetSym(x *types.Sym) { n.Sym_ = x }
+
+// A ParenExpr is a parenthesized expression (X).
+// It may end up being a value or a type.
+type ParenExpr struct {
+ miniExpr
+ X Node
+}
+
+func NewParenExpr(pos src.XPos, x Node) *ParenExpr {
+ n := &ParenExpr{X: x}
+ n.op = OPAREN
+ n.pos = pos
+ return n
+}
+
+func (n *ParenExpr) Implicit() bool { return n.flags&miniExprImplicit != 0 }
+func (n *ParenExpr) SetImplicit(b bool) { n.flags.set(miniExprImplicit, b) }
+
+func (*ParenExpr) CanBeNtype() {}
+
+// SetOTYPE changes n to be an OTYPE node returning t,
+// like all the type nodes in type.go.
+func (n *ParenExpr) SetOTYPE(t *types.Type) {
+ n.op = OTYPE
+ n.typ = t
+ t.SetNod(n)
+}
+
+// A RawOrigExpr represents an arbitrary Go expression as a string value.
+// When printed in diagnostics, the string value is written out exactly as-is.
+type RawOrigExpr struct {
+ miniExpr
+ Raw string
+}
+
+func NewRawOrigExpr(pos src.XPos, op Op, raw string) *RawOrigExpr {
+ n := &RawOrigExpr{Raw: raw}
+ n.pos = pos
+ n.op = op
+ return n
+}
+
+// A ResultExpr represents a direct access to a result.
+type ResultExpr struct {
+ miniExpr
+ Index int64 // index of the result expr.
+}
+
+func NewResultExpr(pos src.XPos, typ *types.Type, index int64) *ResultExpr {
+ n := &ResultExpr{Index: index}
+ n.pos = pos
+ n.op = ORESULT
+ n.typ = typ
+ return n
+}
+
+// A LinksymOffsetExpr refers to an offset within a global variable.
+// It is like a SelectorExpr but without the field name.
+type LinksymOffsetExpr struct {
+ miniExpr
+ Linksym *obj.LSym
+ Offset_ int64
+}
+
+func NewLinksymOffsetExpr(pos src.XPos, lsym *obj.LSym, offset int64, typ *types.Type) *LinksymOffsetExpr {
+ n := &LinksymOffsetExpr{Linksym: lsym, Offset_: offset}
+ n.typ = typ
+ n.op = OLINKSYMOFFSET
+ return n
+}
+
+// NewLinksymExpr is NewLinksymOffsetExpr, but with offset fixed at 0.
+func NewLinksymExpr(pos src.XPos, lsym *obj.LSym, typ *types.Type) *LinksymOffsetExpr {
+ return NewLinksymOffsetExpr(pos, lsym, 0, typ)
+}
+
+// NewNameOffsetExpr is NewLinksymOffsetExpr, but taking a *Name
+// representing a global variable instead of an *obj.LSym directly.
+func NewNameOffsetExpr(pos src.XPos, name *Name, offset int64, typ *types.Type) *LinksymOffsetExpr {
+ if name == nil || IsBlank(name) || !(name.Op() == ONAME && name.Class == PEXTERN) {
+ base.FatalfAt(pos, "cannot take offset of nil, blank name or non-global variable: %v", name)
+ }
+ return NewLinksymOffsetExpr(pos, name.Linksym(), offset, typ)
+}
+
+// A SelectorExpr is a selector expression X.Sel.
+type SelectorExpr struct {
+ miniExpr
+ X Node
+ // Sel is the name of the field or method being selected, without (in the
+ // case of methods) any preceding type specifier. If the field/method is
+ // exported, than the Sym uses the local package regardless of the package
+ // of the containing type.
+ Sel *types.Sym
+ // The actual selected field - may not be filled in until typechecking.
+ Selection *types.Field
+ Prealloc *Name // preallocated storage for OMETHVALUE, if any
+}
+
+func NewSelectorExpr(pos src.XPos, op Op, x Node, sel *types.Sym) *SelectorExpr {
+ n := &SelectorExpr{X: x, Sel: sel}
+ n.pos = pos
+ n.SetOp(op)
+ return n
+}
+
+func (n *SelectorExpr) SetOp(op Op) {
+ switch op {
+ default:
+ panic(n.no("SetOp " + op.String()))
+ case OXDOT, ODOT, ODOTPTR, ODOTMETH, ODOTINTER, OMETHVALUE, OMETHEXPR:
+ n.op = op
+ }
+}
+
+func (n *SelectorExpr) Sym() *types.Sym { return n.Sel }
+func (n *SelectorExpr) Implicit() bool { return n.flags&miniExprImplicit != 0 }
+func (n *SelectorExpr) SetImplicit(b bool) { n.flags.set(miniExprImplicit, b) }
+func (n *SelectorExpr) Offset() int64 { return n.Selection.Offset }
+
+func (n *SelectorExpr) FuncName() *Name {
+ if n.Op() != OMETHEXPR {
+ panic(n.no("FuncName"))
+ }
+ fn := NewNameAt(n.Selection.Pos, MethodSym(n.X.Type(), n.Sel))
+ fn.Class = PFUNC
+ fn.SetType(n.Type())
+ if n.Selection.Nname != nil {
+ // TODO(austin): Nname is nil for interface method
+ // expressions (I.M), so we can't attach a Func to
+ // those here. reflectdata.methodWrapper generates the
+ // Func.
+ fn.Func = n.Selection.Nname.(*Name).Func
+ }
+ return fn
+}
+
+// Before type-checking, bytes.Buffer is a SelectorExpr.
+// After type-checking it becomes a Name.
+func (*SelectorExpr) CanBeNtype() {}
+
+// A SliceExpr is a slice expression X[Low:High] or X[Low:High:Max].
+type SliceExpr struct {
+ miniExpr
+ X Node
+ Low Node
+ High Node
+ Max Node
+}
+
+func NewSliceExpr(pos src.XPos, op Op, x, low, high, max Node) *SliceExpr {
+ n := &SliceExpr{X: x, Low: low, High: high, Max: max}
+ n.pos = pos
+ n.op = op
+ return n
+}
+
+func (n *SliceExpr) SetOp(op Op) {
+ switch op {
+ default:
+ panic(n.no("SetOp " + op.String()))
+ case OSLICE, OSLICEARR, OSLICESTR, OSLICE3, OSLICE3ARR:
+ n.op = op
+ }
+}
+
+// IsSlice3 reports whether o is a slice3 op (OSLICE3, OSLICE3ARR).
+// o must be a slicing op.
+func (o Op) IsSlice3() bool {
+ switch o {
+ case OSLICE, OSLICEARR, OSLICESTR:
+ return false
+ case OSLICE3, OSLICE3ARR:
+ return true
+ }
+ base.Fatalf("IsSlice3 op %v", o)
+ return false
+}
+
+// A SliceHeader expression constructs a slice header from its parts.
+type SliceHeaderExpr struct {
+ miniExpr
+ Ptr Node
+ Len Node
+ Cap Node
+}
+
+func NewSliceHeaderExpr(pos src.XPos, typ *types.Type, ptr, len, cap Node) *SliceHeaderExpr {
+ n := &SliceHeaderExpr{Ptr: ptr, Len: len, Cap: cap}
+ n.pos = pos
+ n.op = OSLICEHEADER
+ n.typ = typ
+ return n
+}
+
+// A StarExpr is a dereference expression *X.
+// It may end up being a value or a type.
+type StarExpr struct {
+ miniExpr
+ X Node
+}
+
+func NewStarExpr(pos src.XPos, x Node) *StarExpr {
+ n := &StarExpr{X: x}
+ n.op = ODEREF
+ n.pos = pos
+ return n
+}
+
+func (n *StarExpr) Implicit() bool { return n.flags&miniExprImplicit != 0 }
+func (n *StarExpr) SetImplicit(b bool) { n.flags.set(miniExprImplicit, b) }
+
+func (*StarExpr) CanBeNtype() {}
+
+// SetOTYPE changes n to be an OTYPE node returning t,
+// like all the type nodes in type.go.
+func (n *StarExpr) SetOTYPE(t *types.Type) {
+ n.op = OTYPE
+ n.X = nil
+ n.typ = t
+ t.SetNod(n)
+}
+
+// A TypeAssertionExpr is a selector expression X.(Type).
+// Before type-checking, the type is Ntype.
+type TypeAssertExpr struct {
+ miniExpr
+ X Node
+ Ntype Ntype
+
+ // Runtime type information provided by walkDotType for
+ // assertions from non-empty interface to concrete type.
+ Itab *AddrExpr `mknode:"-"` // *runtime.itab for Type implementing X's type
+}
+
+func NewTypeAssertExpr(pos src.XPos, x Node, typ Ntype) *TypeAssertExpr {
+ n := &TypeAssertExpr{X: x, Ntype: typ}
+ n.pos = pos
+ n.op = ODOTTYPE
+ return n
+}
+
+func (n *TypeAssertExpr) SetOp(op Op) {
+ switch op {
+ default:
+ panic(n.no("SetOp " + op.String()))
+ case ODOTTYPE, ODOTTYPE2:
+ n.op = op
+ }
+}
+
+// A DynamicTypeAssertExpr asserts that X is of dynamic type T.
+type DynamicTypeAssertExpr struct {
+ miniExpr
+ X Node
+ // N = not an interface
+ // E = empty interface
+ // I = nonempty interface
+ // For E->N, T is a *runtime.type for N
+ // For I->N, T is a *runtime.itab for N+I
+ // For E->I, T is a *runtime.type for I
+ // For I->I, ditto
+ // For I->E, T is a *runtime.type for interface{} (unnecessary, but just to fill in the slot)
+ // For E->E, ditto
+ T Node
+}
+
+func NewDynamicTypeAssertExpr(pos src.XPos, op Op, x, t Node) *DynamicTypeAssertExpr {
+ n := &DynamicTypeAssertExpr{X: x, T: t}
+ n.pos = pos
+ n.op = op
+ return n
+}
+
+func (n *DynamicTypeAssertExpr) SetOp(op Op) {
+ switch op {
+ default:
+ panic(n.no("SetOp " + op.String()))
+ case ODYNAMICDOTTYPE, ODYNAMICDOTTYPE2:
+ n.op = op
+ }
+}
+
+// A UnaryExpr is a unary expression Op X,
+// or Op(X) for a builtin function that does not end up being a call.
+type UnaryExpr struct {
+ miniExpr
+ X Node
+}
+
+func NewUnaryExpr(pos src.XPos, op Op, x Node) *UnaryExpr {
+ n := &UnaryExpr{X: x}
+ n.pos = pos
+ n.SetOp(op)
+ return n
+}
+
+func (n *UnaryExpr) SetOp(op Op) {
+ switch op {
+ default:
+ panic(n.no("SetOp " + op.String()))
+ case OBITNOT, ONEG, ONOT, OPLUS, ORECV,
+ OALIGNOF, OCAP, OCLOSE, OIMAG, OLEN, ONEW,
+ OOFFSETOF, OPANIC, OREAL, OSIZEOF,
+ OCHECKNIL, OCFUNC, OIDATA, OITAB, OSPTR, OVARDEF, OVARKILL, OVARLIVE:
+ n.op = op
+ }
+}
+
+// Probably temporary: using Implicit() flag to mark generic function nodes that
+// are called to make getGfInfo analysis easier in one pre-order pass.
+func (n *InstExpr) Implicit() bool { return n.flags&miniExprImplicit != 0 }
+func (n *InstExpr) SetImplicit(b bool) { n.flags.set(miniExprImplicit, b) }
+
+// An InstExpr is a generic function or type instantiation.
+type InstExpr struct {
+ miniExpr
+ X Node
+ Targs []Node
+}
+
+func NewInstExpr(pos src.XPos, op Op, x Node, targs []Node) *InstExpr {
+ n := &InstExpr{X: x, Targs: targs}
+ n.pos = pos
+ n.op = op
+ return n
+}
+
+func IsZero(n Node) bool {
+ switch n.Op() {
+ case ONIL:
+ return true
+
+ case OLITERAL:
+ switch u := n.Val(); u.Kind() {
+ case constant.String:
+ return constant.StringVal(u) == ""
+ case constant.Bool:
+ return !constant.BoolVal(u)
+ default:
+ return constant.Sign(u) == 0
+ }
+
+ case OARRAYLIT:
+ n := n.(*CompLitExpr)
+ for _, n1 := range n.List {
+ if n1.Op() == OKEY {
+ n1 = n1.(*KeyExpr).Value
+ }
+ if !IsZero(n1) {
+ return false
+ }
+ }
+ return true
+
+ case OSTRUCTLIT:
+ n := n.(*CompLitExpr)
+ for _, n1 := range n.List {
+ n1 := n1.(*StructKeyExpr)
+ if !IsZero(n1.Value) {
+ return false
+ }
+ }
+ return true
+ }
+
+ return false
+}
+
+// lvalue etc
+func IsAddressable(n Node) bool {
+ switch n.Op() {
+ case OINDEX:
+ n := n.(*IndexExpr)
+ if n.X.Type() != nil && n.X.Type().IsArray() {
+ return IsAddressable(n.X)
+ }
+ if n.X.Type() != nil && n.X.Type().IsString() {
+ return false
+ }
+ fallthrough
+ case ODEREF, ODOTPTR:
+ return true
+
+ case ODOT:
+ n := n.(*SelectorExpr)
+ return IsAddressable(n.X)
+
+ case ONAME:
+ n := n.(*Name)
+ if n.Class == PFUNC {
+ return false
+ }
+ return true
+
+ case OLINKSYMOFFSET:
+ return true
+ }
+
+ return false
+}
+
+func StaticValue(n Node) Node {
+ for {
+ if n.Op() == OCONVNOP {
+ n = n.(*ConvExpr).X
+ continue
+ }
+
+ if n.Op() == OINLCALL {
+ n = n.(*InlinedCallExpr).SingleResult()
+ continue
+ }
+
+ n1 := staticValue1(n)
+ if n1 == nil {
+ return n
+ }
+ n = n1
+ }
+}
+
+// staticValue1 implements a simple SSA-like optimization. If n is a local variable
+// that is initialized and never reassigned, staticValue1 returns the initializer
+// expression. Otherwise, it returns nil.
+func staticValue1(nn Node) Node {
+ if nn.Op() != ONAME {
+ return nil
+ }
+ n := nn.(*Name)
+ if n.Class != PAUTO {
+ return nil
+ }
+
+ defn := n.Defn
+ if defn == nil {
+ return nil
+ }
+
+ var rhs Node
+FindRHS:
+ switch defn.Op() {
+ case OAS:
+ defn := defn.(*AssignStmt)
+ rhs = defn.Y
+ case OAS2:
+ defn := defn.(*AssignListStmt)
+ for i, lhs := range defn.Lhs {
+ if lhs == n {
+ rhs = defn.Rhs[i]
+ break FindRHS
+ }
+ }
+ base.Fatalf("%v missing from LHS of %v", n, defn)
+ default:
+ return nil
+ }
+ if rhs == nil {
+ base.Fatalf("RHS is nil: %v", defn)
+ }
+
+ if reassigned(n) {
+ return nil
+ }
+
+ return rhs
+}
+
+// reassigned takes an ONAME node, walks the function in which it is defined, and returns a boolean
+// indicating whether the name has any assignments other than its declaration.
+// The second return value is the first such assignment encountered in the walk, if any. It is mostly
+// useful for -m output documenting the reason for inhibited optimizations.
+// NB: global variables are always considered to be re-assigned.
+// TODO: handle initial declaration not including an assignment and followed by a single assignment?
+func reassigned(name *Name) bool {
+ if name.Op() != ONAME {
+ base.Fatalf("reassigned %v", name)
+ }
+ // no way to reliably check for no-reassignment of globals, assume it can be
+ if name.Curfn == nil {
+ return true
+ }
+
+ // TODO(mdempsky): This is inefficient and becoming increasingly
+ // unwieldy. Figure out a way to generalize escape analysis's
+ // reassignment detection for use by inlining and devirtualization.
+
+ // isName reports whether n is a reference to name.
+ isName := func(x Node) bool {
+ n, ok := x.(*Name)
+ return ok && n.Canonical() == name
+ }
+
+ var do func(n Node) bool
+ do = func(n Node) bool {
+ switch n.Op() {
+ case OAS:
+ n := n.(*AssignStmt)
+ if isName(n.X) && n != name.Defn {
+ return true
+ }
+ case OAS2, OAS2FUNC, OAS2MAPR, OAS2DOTTYPE, OAS2RECV, OSELRECV2:
+ n := n.(*AssignListStmt)
+ for _, p := range n.Lhs {
+ if isName(p) && n != name.Defn {
+ return true
+ }
+ }
+ case OADDR:
+ n := n.(*AddrExpr)
+ if isName(OuterValue(n.X)) {
+ return true
+ }
+ case OCLOSURE:
+ n := n.(*ClosureExpr)
+ if Any(n.Func, do) {
+ return true
+ }
+ }
+ return false
+ }
+ return Any(name.Curfn, do)
+}
+
+// IsIntrinsicCall reports whether the compiler back end will treat the call as an intrinsic operation.
+var IsIntrinsicCall = func(*CallExpr) bool { return false }
+
+// SameSafeExpr checks whether it is safe to reuse one of l and r
+// instead of computing both. SameSafeExpr assumes that l and r are
+// used in the same statement or expression. In order for it to be
+// safe to reuse l or r, they must:
+// * be the same expression
+// * not have side-effects (no function calls, no channel ops);
+// however, panics are ok
+// * not cause inappropriate aliasing; e.g. two string to []byte
+// conversions, must result in two distinct slices
+//
+// The handling of OINDEXMAP is subtle. OINDEXMAP can occur both
+// as an lvalue (map assignment) and an rvalue (map access). This is
+// currently OK, since the only place SameSafeExpr gets used on an
+// lvalue expression is for OSLICE and OAPPEND optimizations, and it
+// is correct in those settings.
+func SameSafeExpr(l Node, r Node) bool {
+ if l.Op() != r.Op() || !types.Identical(l.Type(), r.Type()) {
+ return false
+ }
+
+ switch l.Op() {
+ case ONAME:
+ return l == r
+
+ case ODOT, ODOTPTR:
+ l := l.(*SelectorExpr)
+ r := r.(*SelectorExpr)
+ return l.Sel != nil && r.Sel != nil && l.Sel == r.Sel && SameSafeExpr(l.X, r.X)
+
+ case ODEREF:
+ l := l.(*StarExpr)
+ r := r.(*StarExpr)
+ return SameSafeExpr(l.X, r.X)
+
+ case ONOT, OBITNOT, OPLUS, ONEG:
+ l := l.(*UnaryExpr)
+ r := r.(*UnaryExpr)
+ return SameSafeExpr(l.X, r.X)
+
+ case OCONVNOP:
+ l := l.(*ConvExpr)
+ r := r.(*ConvExpr)
+ return SameSafeExpr(l.X, r.X)
+
+ case OCONV:
+ l := l.(*ConvExpr)
+ r := r.(*ConvExpr)
+ // Some conversions can't be reused, such as []byte(str).
+ // Allow only numeric-ish types. This is a bit conservative.
+ return types.IsSimple[l.Type().Kind()] && SameSafeExpr(l.X, r.X)
+
+ case OINDEX, OINDEXMAP:
+ l := l.(*IndexExpr)
+ r := r.(*IndexExpr)
+ return SameSafeExpr(l.X, r.X) && SameSafeExpr(l.Index, r.Index)
+
+ case OADD, OSUB, OOR, OXOR, OMUL, OLSH, ORSH, OAND, OANDNOT, ODIV, OMOD:
+ l := l.(*BinaryExpr)
+ r := r.(*BinaryExpr)
+ return SameSafeExpr(l.X, r.X) && SameSafeExpr(l.Y, r.Y)
+
+ case OLITERAL:
+ return constant.Compare(l.Val(), token.EQL, r.Val())
+
+ case ONIL:
+ return true
+ }
+
+ return false
+}
+
+// ShouldCheckPtr reports whether pointer checking should be enabled for
+// function fn at a given level. See debugHelpFooter for defined
+// levels.
+func ShouldCheckPtr(fn *Func, level int) bool {
+ return base.Debug.Checkptr >= level && fn.Pragma&NoCheckPtr == 0
+}
+
+// IsReflectHeaderDataField reports whether l is an expression p.Data
+// where p has type reflect.SliceHeader or reflect.StringHeader.
+func IsReflectHeaderDataField(l Node) bool {
+ if l.Type() != types.Types[types.TUINTPTR] {
+ return false
+ }
+
+ var tsym *types.Sym
+ switch l.Op() {
+ case ODOT:
+ l := l.(*SelectorExpr)
+ tsym = l.X.Type().Sym()
+ case ODOTPTR:
+ l := l.(*SelectorExpr)
+ tsym = l.X.Type().Elem().Sym()
+ default:
+ return false
+ }
+
+ if tsym == nil || l.Sym().Name != "Data" || tsym.Pkg.Path != "reflect" {
+ return false
+ }
+ return tsym.Name == "SliceHeader" || tsym.Name == "StringHeader"
+}
+
+func ParamNames(ft *types.Type) []Node {
+ args := make([]Node, ft.NumParams())
+ for i, f := range ft.Params().FieldSlice() {
+ args[i] = AsNode(f.Nname)
+ }
+ return args
+}
+
+// MethodSym returns the method symbol representing a method name
+// associated with a specific receiver type.
+//
+// Method symbols can be used to distinguish the same method appearing
+// in different method sets. For example, T.M and (*T).M have distinct
+// method symbols.
+//
+// The returned symbol will be marked as a function.
+func MethodSym(recv *types.Type, msym *types.Sym) *types.Sym {
+ sym := MethodSymSuffix(recv, msym, "")
+ sym.SetFunc(true)
+ return sym
+}
+
+// MethodSymSuffix is like methodsym, but allows attaching a
+// distinguisher suffix. To avoid collisions, the suffix must not
+// start with a letter, number, or period.
+func MethodSymSuffix(recv *types.Type, msym *types.Sym, suffix string) *types.Sym {
+ if msym.IsBlank() {
+ base.Fatalf("blank method name")
+ }
+
+ rsym := recv.Sym()
+ if recv.IsPtr() {
+ if rsym != nil {
+ base.Fatalf("declared pointer receiver type: %v", recv)
+ }
+ rsym = recv.Elem().Sym()
+ }
+
+ // Find the package the receiver type appeared in. For
+ // anonymous receiver types (i.e., anonymous structs with
+ // embedded fields), use the "go" pseudo-package instead.
+ rpkg := Pkgs.Go
+ if rsym != nil {
+ rpkg = rsym.Pkg
+ }
+
+ var b bytes.Buffer
+ if recv.IsPtr() {
+ // The parentheses aren't really necessary, but
+ // they're pretty traditional at this point.
+ fmt.Fprintf(&b, "(%-S)", recv)
+ } else {
+ fmt.Fprintf(&b, "%-S", recv)
+ }
+
+ // A particular receiver type may have multiple non-exported
+ // methods with the same name. To disambiguate them, include a
+ // package qualifier for names that came from a different
+ // package than the receiver type.
+ if !types.IsExported(msym.Name) && msym.Pkg != rpkg {
+ b.WriteString(".")
+ b.WriteString(msym.Pkg.Prefix)
+ }
+
+ b.WriteString(".")
+ b.WriteString(msym.Name)
+ b.WriteString(suffix)
+
+ return rpkg.LookupBytes(b.Bytes())
+}
+
+// MethodExprName returns the ONAME representing the method
+// referenced by expression n, which must be a method selector,
+// method expression, or method value.
+func MethodExprName(n Node) *Name {
+ name, _ := MethodExprFunc(n).Nname.(*Name)
+ return name
+}
+
+// MethodExprFunc is like MethodExprName, but returns the types.Field instead.
+func MethodExprFunc(n Node) *types.Field {
+ switch n.Op() {
+ case ODOTMETH, OMETHEXPR, OMETHVALUE:
+ return n.(*SelectorExpr).Selection
+ }
+ base.Fatalf("unexpected node: %v (%v)", n, n.Op())
+ panic("unreachable")
+}
diff --git a/src/cmd/compile/internal/ir/fmt.go b/src/cmd/compile/internal/ir/fmt.go
new file mode 100644
index 0000000..0331885
--- /dev/null
+++ b/src/cmd/compile/internal/ir/fmt.go
@@ -0,0 +1,1359 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ir
+
+import (
+ "bytes"
+ "fmt"
+ "go/constant"
+ "io"
+ "math"
+ "os"
+ "path/filepath"
+ "reflect"
+ "strings"
+
+ "unicode/utf8"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+)
+
+// Op
+
+var OpNames = []string{
+ OADDR: "&",
+ OADD: "+",
+ OADDSTR: "+",
+ OALIGNOF: "unsafe.Alignof",
+ OANDAND: "&&",
+ OANDNOT: "&^",
+ OAND: "&",
+ OAPPEND: "append",
+ OAS: "=",
+ OAS2: "=",
+ OBREAK: "break",
+ OCALL: "function call", // not actual syntax
+ OCAP: "cap",
+ OCASE: "case",
+ OCLOSE: "close",
+ OCOMPLEX: "complex",
+ OBITNOT: "^",
+ OCONTINUE: "continue",
+ OCOPY: "copy",
+ ODELETE: "delete",
+ ODEFER: "defer",
+ ODIV: "/",
+ OEQ: "==",
+ OFALL: "fallthrough",
+ OFOR: "for",
+ OFORUNTIL: "foruntil", // not actual syntax; used to avoid off-end pointer live on backedge.892
+ OGE: ">=",
+ OGOTO: "goto",
+ OGT: ">",
+ OIF: "if",
+ OIMAG: "imag",
+ OINLMARK: "inlmark",
+ ODEREF: "*",
+ OLEN: "len",
+ OLE: "<=",
+ OLSH: "<<",
+ OLT: "<",
+ OMAKE: "make",
+ ONEG: "-",
+ OMOD: "%",
+ OMUL: "*",
+ ONEW: "new",
+ ONE: "!=",
+ ONOT: "!",
+ OOFFSETOF: "unsafe.Offsetof",
+ OOROR: "||",
+ OOR: "|",
+ OPANIC: "panic",
+ OPLUS: "+",
+ OPRINTN: "println",
+ OPRINT: "print",
+ ORANGE: "range",
+ OREAL: "real",
+ ORECV: "<-",
+ ORECOVER: "recover",
+ ORETURN: "return",
+ ORSH: ">>",
+ OSELECT: "select",
+ OSEND: "<-",
+ OSIZEOF: "unsafe.Sizeof",
+ OSUB: "-",
+ OSWITCH: "switch",
+ OUNSAFEADD: "unsafe.Add",
+ OUNSAFESLICE: "unsafe.Slice",
+ OXOR: "^",
+}
+
+// GoString returns the Go syntax for the Op, or else its name.
+func (o Op) GoString() string {
+ if int(o) < len(OpNames) && OpNames[o] != "" {
+ return OpNames[o]
+ }
+ return o.String()
+}
+
+// Format implements formatting for an Op.
+// The valid formats are:
+//
+// %v Go syntax ("+", "<-", "print")
+// %+v Debug syntax ("ADD", "RECV", "PRINT")
+//
+func (o Op) Format(s fmt.State, verb rune) {
+ switch verb {
+ default:
+ fmt.Fprintf(s, "%%!%c(Op=%d)", verb, int(o))
+ case 'v':
+ if s.Flag('+') {
+ // %+v is OMUL instead of "*"
+ io.WriteString(s, o.String())
+ return
+ }
+ io.WriteString(s, o.GoString())
+ }
+}
+
+// Node
+
+// FmtNode implements formatting for a Node n.
+// Every Node implementation must define a Format method that calls FmtNode.
+// The valid formats are:
+//
+// %v Go syntax
+// %L Go syntax followed by " (type T)" if type is known.
+// %+v Debug syntax, as in Dump.
+//
+func fmtNode(n Node, s fmt.State, verb rune) {
+ // %+v prints Dump.
+ // Otherwise we print Go syntax.
+ if s.Flag('+') && verb == 'v' {
+ dumpNode(s, n, 1)
+ return
+ }
+
+ if verb != 'v' && verb != 'S' && verb != 'L' {
+ fmt.Fprintf(s, "%%!%c(*Node=%p)", verb, n)
+ return
+ }
+
+ if n == nil {
+ fmt.Fprint(s, "<nil>")
+ return
+ }
+
+ t := n.Type()
+ if verb == 'L' && t != nil {
+ if t.Kind() == types.TNIL {
+ fmt.Fprint(s, "nil")
+ } else if n.Op() == ONAME && n.Name().AutoTemp() {
+ fmt.Fprintf(s, "%v value", t)
+ } else {
+ fmt.Fprintf(s, "%v (type %v)", n, t)
+ }
+ return
+ }
+
+ // TODO inlining produces expressions with ninits. we can't print these yet.
+
+ if OpPrec[n.Op()] < 0 {
+ stmtFmt(n, s)
+ return
+ }
+
+ exprFmt(n, s, 0)
+}
+
+var OpPrec = []int{
+ OALIGNOF: 8,
+ OAPPEND: 8,
+ OBYTES2STR: 8,
+ OARRAYLIT: 8,
+ OSLICELIT: 8,
+ ORUNES2STR: 8,
+ OCALLFUNC: 8,
+ OCALLINTER: 8,
+ OCALLMETH: 8,
+ OCALL: 8,
+ OCAP: 8,
+ OCLOSE: 8,
+ OCOMPLIT: 8,
+ OCONVIFACE: 8,
+ OCONVIDATA: 8,
+ OCONVNOP: 8,
+ OCONV: 8,
+ OCOPY: 8,
+ ODELETE: 8,
+ OGETG: 8,
+ OLEN: 8,
+ OLITERAL: 8,
+ OMAKESLICE: 8,
+ OMAKESLICECOPY: 8,
+ OMAKE: 8,
+ OMAPLIT: 8,
+ ONAME: 8,
+ ONEW: 8,
+ ONIL: 8,
+ ONONAME: 8,
+ OOFFSETOF: 8,
+ OPACK: 8,
+ OPANIC: 8,
+ OPAREN: 8,
+ OPRINTN: 8,
+ OPRINT: 8,
+ ORUNESTR: 8,
+ OSIZEOF: 8,
+ OSLICE2ARRPTR: 8,
+ OSTR2BYTES: 8,
+ OSTR2RUNES: 8,
+ OSTRUCTLIT: 8,
+ OTARRAY: 8,
+ OTSLICE: 8,
+ OTCHAN: 8,
+ OTFUNC: 8,
+ OTINTER: 8,
+ OTMAP: 8,
+ OTSTRUCT: 8,
+ OTYPE: 8,
+ OUNSAFEADD: 8,
+ OUNSAFESLICE: 8,
+ OINDEXMAP: 8,
+ OINDEX: 8,
+ OSLICE: 8,
+ OSLICESTR: 8,
+ OSLICEARR: 8,
+ OSLICE3: 8,
+ OSLICE3ARR: 8,
+ OSLICEHEADER: 8,
+ ODOTINTER: 8,
+ ODOTMETH: 8,
+ ODOTPTR: 8,
+ ODOTTYPE2: 8,
+ ODOTTYPE: 8,
+ ODOT: 8,
+ OXDOT: 8,
+ OMETHVALUE: 8,
+ OMETHEXPR: 8,
+ OPLUS: 7,
+ ONOT: 7,
+ OBITNOT: 7,
+ ONEG: 7,
+ OADDR: 7,
+ ODEREF: 7,
+ ORECV: 7,
+ OMUL: 6,
+ ODIV: 6,
+ OMOD: 6,
+ OLSH: 6,
+ ORSH: 6,
+ OAND: 6,
+ OANDNOT: 6,
+ OADD: 5,
+ OSUB: 5,
+ OOR: 5,
+ OXOR: 5,
+ OEQ: 4,
+ OLT: 4,
+ OLE: 4,
+ OGE: 4,
+ OGT: 4,
+ ONE: 4,
+ OSEND: 3,
+ OANDAND: 2,
+ OOROR: 1,
+
+ // Statements handled by stmtfmt
+ OAS: -1,
+ OAS2: -1,
+ OAS2DOTTYPE: -1,
+ OAS2FUNC: -1,
+ OAS2MAPR: -1,
+ OAS2RECV: -1,
+ OASOP: -1,
+ OBLOCK: -1,
+ OBREAK: -1,
+ OCASE: -1,
+ OCONTINUE: -1,
+ ODCL: -1,
+ ODEFER: -1,
+ OFALL: -1,
+ OFOR: -1,
+ OFORUNTIL: -1,
+ OGOTO: -1,
+ OIF: -1,
+ OLABEL: -1,
+ OGO: -1,
+ ORANGE: -1,
+ ORETURN: -1,
+ OSELECT: -1,
+ OSWITCH: -1,
+
+ OEND: 0,
+}
+
+// StmtWithInit reports whether op is a statement with an explicit init list.
+func StmtWithInit(op Op) bool {
+ switch op {
+ case OIF, OFOR, OFORUNTIL, OSWITCH:
+ return true
+ }
+ return false
+}
+
+func stmtFmt(n Node, s fmt.State) {
+ // NOTE(rsc): This code used to support the text-based
+ // which was more aggressive about printing full Go syntax
+ // (for example, an actual loop instead of "for loop").
+ // The code is preserved for now in case we want to expand
+ // any of those shortenings later. Or maybe we will delete
+ // the code. But for now, keep it.
+ const exportFormat = false
+
+ // some statements allow for an init, but at most one,
+ // but we may have an arbitrary number added, eg by typecheck
+ // and inlining. If it doesn't fit the syntax, emit an enclosing
+ // block starting with the init statements.
+
+ // if we can just say "for" n->ninit; ... then do so
+ simpleinit := len(n.Init()) == 1 && len(n.Init()[0].Init()) == 0 && StmtWithInit(n.Op())
+
+ // otherwise, print the inits as separate statements
+ complexinit := len(n.Init()) != 0 && !simpleinit && exportFormat
+
+ // but if it was for if/for/switch, put in an extra surrounding block to limit the scope
+ extrablock := complexinit && StmtWithInit(n.Op())
+
+ if extrablock {
+ fmt.Fprint(s, "{")
+ }
+
+ if complexinit {
+ fmt.Fprintf(s, " %v; ", n.Init())
+ }
+
+ switch n.Op() {
+ case ODCL:
+ n := n.(*Decl)
+ fmt.Fprintf(s, "var %v %v", n.X.Sym(), n.X.Type())
+
+ // Don't export "v = <N>" initializing statements, hope they're always
+ // preceded by the DCL which will be re-parsed and typechecked to reproduce
+ // the "v = <N>" again.
+ case OAS:
+ n := n.(*AssignStmt)
+ if n.Def && !complexinit {
+ fmt.Fprintf(s, "%v := %v", n.X, n.Y)
+ } else {
+ fmt.Fprintf(s, "%v = %v", n.X, n.Y)
+ }
+
+ case OASOP:
+ n := n.(*AssignOpStmt)
+ if n.IncDec {
+ if n.AsOp == OADD {
+ fmt.Fprintf(s, "%v++", n.X)
+ } else {
+ fmt.Fprintf(s, "%v--", n.X)
+ }
+ break
+ }
+
+ fmt.Fprintf(s, "%v %v= %v", n.X, n.AsOp, n.Y)
+
+ case OAS2, OAS2DOTTYPE, OAS2FUNC, OAS2MAPR, OAS2RECV:
+ n := n.(*AssignListStmt)
+ if n.Def && !complexinit {
+ fmt.Fprintf(s, "%.v := %.v", n.Lhs, n.Rhs)
+ } else {
+ fmt.Fprintf(s, "%.v = %.v", n.Lhs, n.Rhs)
+ }
+
+ case OBLOCK:
+ n := n.(*BlockStmt)
+ if len(n.List) != 0 {
+ fmt.Fprintf(s, "%v", n.List)
+ }
+
+ case ORETURN:
+ n := n.(*ReturnStmt)
+ fmt.Fprintf(s, "return %.v", n.Results)
+
+ case OTAILCALL:
+ n := n.(*TailCallStmt)
+ fmt.Fprintf(s, "tailcall %v", n.Call)
+
+ case OINLMARK:
+ n := n.(*InlineMarkStmt)
+ fmt.Fprintf(s, "inlmark %d", n.Index)
+
+ case OGO:
+ n := n.(*GoDeferStmt)
+ fmt.Fprintf(s, "go %v", n.Call)
+
+ case ODEFER:
+ n := n.(*GoDeferStmt)
+ fmt.Fprintf(s, "defer %v", n.Call)
+
+ case OIF:
+ n := n.(*IfStmt)
+ if simpleinit {
+ fmt.Fprintf(s, "if %v; %v { %v }", n.Init()[0], n.Cond, n.Body)
+ } else {
+ fmt.Fprintf(s, "if %v { %v }", n.Cond, n.Body)
+ }
+ if len(n.Else) != 0 {
+ fmt.Fprintf(s, " else { %v }", n.Else)
+ }
+
+ case OFOR, OFORUNTIL:
+ n := n.(*ForStmt)
+ opname := "for"
+ if n.Op() == OFORUNTIL {
+ opname = "foruntil"
+ }
+ if !exportFormat { // TODO maybe only if FmtShort, same below
+ fmt.Fprintf(s, "%s loop", opname)
+ break
+ }
+
+ fmt.Fprint(s, opname)
+ if simpleinit {
+ fmt.Fprintf(s, " %v;", n.Init()[0])
+ } else if n.Post != nil {
+ fmt.Fprint(s, " ;")
+ }
+
+ if n.Cond != nil {
+ fmt.Fprintf(s, " %v", n.Cond)
+ }
+
+ if n.Post != nil {
+ fmt.Fprintf(s, "; %v", n.Post)
+ } else if simpleinit {
+ fmt.Fprint(s, ";")
+ }
+
+ if n.Op() == OFORUNTIL && len(n.Late) != 0 {
+ fmt.Fprintf(s, "; %v", n.Late)
+ }
+
+ fmt.Fprintf(s, " { %v }", n.Body)
+
+ case ORANGE:
+ n := n.(*RangeStmt)
+ if !exportFormat {
+ fmt.Fprint(s, "for loop")
+ break
+ }
+
+ fmt.Fprint(s, "for")
+ if n.Key != nil {
+ fmt.Fprintf(s, " %v", n.Key)
+ if n.Value != nil {
+ fmt.Fprintf(s, ", %v", n.Value)
+ }
+ fmt.Fprint(s, " =")
+ }
+ fmt.Fprintf(s, " range %v { %v }", n.X, n.Body)
+
+ case OSELECT:
+ n := n.(*SelectStmt)
+ if !exportFormat {
+ fmt.Fprintf(s, "%v statement", n.Op())
+ break
+ }
+ fmt.Fprintf(s, "select { %v }", n.Cases)
+
+ case OSWITCH:
+ n := n.(*SwitchStmt)
+ if !exportFormat {
+ fmt.Fprintf(s, "%v statement", n.Op())
+ break
+ }
+ fmt.Fprintf(s, "switch")
+ if simpleinit {
+ fmt.Fprintf(s, " %v;", n.Init()[0])
+ }
+ if n.Tag != nil {
+ fmt.Fprintf(s, " %v ", n.Tag)
+ }
+ fmt.Fprintf(s, " { %v }", n.Cases)
+
+ case OCASE:
+ n := n.(*CaseClause)
+ if len(n.List) != 0 {
+ fmt.Fprintf(s, "case %.v", n.List)
+ } else {
+ fmt.Fprint(s, "default")
+ }
+ fmt.Fprintf(s, ": %v", n.Body)
+
+ case OBREAK, OCONTINUE, OGOTO, OFALL:
+ n := n.(*BranchStmt)
+ if n.Label != nil {
+ fmt.Fprintf(s, "%v %v", n.Op(), n.Label)
+ } else {
+ fmt.Fprintf(s, "%v", n.Op())
+ }
+
+ case OLABEL:
+ n := n.(*LabelStmt)
+ fmt.Fprintf(s, "%v: ", n.Label)
+ }
+
+ if extrablock {
+ fmt.Fprint(s, "}")
+ }
+}
+
+func exprFmt(n Node, s fmt.State, prec int) {
+ // NOTE(rsc): This code used to support the text-based
+ // which was more aggressive about printing full Go syntax
+ // (for example, an actual loop instead of "for loop").
+ // The code is preserved for now in case we want to expand
+ // any of those shortenings later. Or maybe we will delete
+ // the code. But for now, keep it.
+ const exportFormat = false
+
+ for {
+ if n == nil {
+ fmt.Fprint(s, "<nil>")
+ return
+ }
+
+ // We always want the original, if any.
+ if o := Orig(n); o != n {
+ n = o
+ continue
+ }
+
+ // Skip implicit operations introduced during typechecking.
+ switch nn := n; nn.Op() {
+ case OADDR:
+ nn := nn.(*AddrExpr)
+ if nn.Implicit() {
+ n = nn.X
+ continue
+ }
+ case ODEREF:
+ nn := nn.(*StarExpr)
+ if nn.Implicit() {
+ n = nn.X
+ continue
+ }
+ case OCONV, OCONVNOP, OCONVIFACE, OCONVIDATA:
+ nn := nn.(*ConvExpr)
+ if nn.Implicit() {
+ n = nn.X
+ continue
+ }
+ }
+
+ break
+ }
+
+ nprec := OpPrec[n.Op()]
+ if n.Op() == OTYPE && n.Type() != nil && n.Type().IsPtr() {
+ nprec = OpPrec[ODEREF]
+ }
+
+ if prec > nprec {
+ fmt.Fprintf(s, "(%v)", n)
+ return
+ }
+
+ if n, ok := n.(*RawOrigExpr); ok {
+ fmt.Fprint(s, n.Raw)
+ return
+ }
+
+ switch n.Op() {
+ case OPAREN:
+ n := n.(*ParenExpr)
+ fmt.Fprintf(s, "(%v)", n.X)
+
+ case ONIL:
+ fmt.Fprint(s, "nil")
+
+ case OLITERAL: // this is a bit of a mess
+ if !exportFormat && n.Sym() != nil {
+ fmt.Fprint(s, n.Sym())
+ return
+ }
+
+ needUnparen := false
+ if n.Type() != nil && !n.Type().IsUntyped() {
+ // Need parens when type begins with what might
+ // be misinterpreted as a unary operator: * or <-.
+ if n.Type().IsPtr() || (n.Type().IsChan() && n.Type().ChanDir() == types.Crecv) {
+ fmt.Fprintf(s, "(%v)(", n.Type())
+ } else {
+ fmt.Fprintf(s, "%v(", n.Type())
+ }
+ needUnparen = true
+ }
+
+ if n.Type() == types.UntypedRune {
+ switch x, ok := constant.Uint64Val(n.Val()); {
+ case !ok:
+ fallthrough
+ default:
+ fmt.Fprintf(s, "('\\x00' + %v)", n.Val())
+
+ case x < utf8.RuneSelf:
+ fmt.Fprintf(s, "%q", x)
+
+ case x < 1<<16:
+ fmt.Fprintf(s, "'\\u%04x'", x)
+
+ case x <= utf8.MaxRune:
+ fmt.Fprintf(s, "'\\U%08x'", x)
+ }
+ } else {
+ fmt.Fprint(s, types.FmtConst(n.Val(), s.Flag('#')))
+ }
+
+ if needUnparen {
+ fmt.Fprintf(s, ")")
+ }
+
+ case ODCLFUNC:
+ n := n.(*Func)
+ if sym := n.Sym(); sym != nil {
+ fmt.Fprint(s, sym)
+ return
+ }
+ fmt.Fprintf(s, "<unnamed Func>")
+
+ case ONAME:
+ n := n.(*Name)
+ // Special case: name used as local variable in export.
+ // _ becomes ~b%d internally; print as _ for export
+ if !exportFormat && n.Sym() != nil && n.Sym().Name[0] == '~' && n.Sym().Name[1] == 'b' {
+ fmt.Fprint(s, "_")
+ return
+ }
+ fallthrough
+ case OPACK, ONONAME:
+ fmt.Fprint(s, n.Sym())
+
+ case OLINKSYMOFFSET:
+ n := n.(*LinksymOffsetExpr)
+ fmt.Fprintf(s, "(%v)(%s@%d)", n.Type(), n.Linksym.Name, n.Offset_)
+
+ case OTYPE:
+ if n.Type() == nil && n.Sym() != nil {
+ fmt.Fprint(s, n.Sym())
+ return
+ }
+ fmt.Fprintf(s, "%v", n.Type())
+
+ case OTSLICE:
+ n := n.(*SliceType)
+ if n.DDD {
+ fmt.Fprintf(s, "...%v", n.Elem)
+ } else {
+ fmt.Fprintf(s, "[]%v", n.Elem) // happens before typecheck
+ }
+
+ case OTARRAY:
+ n := n.(*ArrayType)
+ if n.Len == nil {
+ fmt.Fprintf(s, "[...]%v", n.Elem)
+ } else {
+ fmt.Fprintf(s, "[%v]%v", n.Len, n.Elem)
+ }
+
+ case OTMAP:
+ n := n.(*MapType)
+ fmt.Fprintf(s, "map[%v]%v", n.Key, n.Elem)
+
+ case OTCHAN:
+ n := n.(*ChanType)
+ switch n.Dir {
+ case types.Crecv:
+ fmt.Fprintf(s, "<-chan %v", n.Elem)
+
+ case types.Csend:
+ fmt.Fprintf(s, "chan<- %v", n.Elem)
+
+ default:
+ if n.Elem != nil && n.Elem.Op() == OTCHAN && n.Elem.(*ChanType).Dir == types.Crecv {
+ fmt.Fprintf(s, "chan (%v)", n.Elem)
+ } else {
+ fmt.Fprintf(s, "chan %v", n.Elem)
+ }
+ }
+
+ case OTSTRUCT:
+ fmt.Fprint(s, "<struct>")
+
+ case OTINTER:
+ fmt.Fprint(s, "<inter>")
+
+ case OTFUNC:
+ fmt.Fprint(s, "<func>")
+
+ case OCLOSURE:
+ n := n.(*ClosureExpr)
+ if !exportFormat {
+ fmt.Fprint(s, "func literal")
+ return
+ }
+ fmt.Fprintf(s, "%v { %v }", n.Type(), n.Func.Body)
+
+ case OCOMPLIT:
+ n := n.(*CompLitExpr)
+ if !exportFormat {
+ if n.Implicit() {
+ fmt.Fprintf(s, "... argument")
+ return
+ }
+ if typ := n.Type(); typ != nil {
+ fmt.Fprintf(s, "%v{%s}", typ, ellipsisIf(len(n.List) != 0))
+ return
+ }
+ if n.Ntype != nil {
+ fmt.Fprintf(s, "%v{%s}", n.Ntype, ellipsisIf(len(n.List) != 0))
+ return
+ }
+
+ fmt.Fprint(s, "composite literal")
+ return
+ }
+ fmt.Fprintf(s, "(%v{ %.v })", n.Ntype, n.List)
+
+ case OPTRLIT:
+ n := n.(*AddrExpr)
+ fmt.Fprintf(s, "&%v", n.X)
+
+ case OSTRUCTLIT, OARRAYLIT, OSLICELIT, OMAPLIT:
+ n := n.(*CompLitExpr)
+ if !exportFormat {
+ fmt.Fprintf(s, "%v{%s}", n.Type(), ellipsisIf(len(n.List) != 0))
+ return
+ }
+ fmt.Fprintf(s, "(%v{ %.v })", n.Type(), n.List)
+
+ case OKEY:
+ n := n.(*KeyExpr)
+ if n.Key != nil && n.Value != nil {
+ fmt.Fprintf(s, "%v:%v", n.Key, n.Value)
+ return
+ }
+
+ if n.Key == nil && n.Value != nil {
+ fmt.Fprintf(s, ":%v", n.Value)
+ return
+ }
+ if n.Key != nil && n.Value == nil {
+ fmt.Fprintf(s, "%v:", n.Key)
+ return
+ }
+ fmt.Fprint(s, ":")
+
+ case OSTRUCTKEY:
+ n := n.(*StructKeyExpr)
+ fmt.Fprintf(s, "%v:%v", n.Field, n.Value)
+
+ case OXDOT, ODOT, ODOTPTR, ODOTINTER, ODOTMETH, OMETHVALUE, OMETHEXPR:
+ n := n.(*SelectorExpr)
+ exprFmt(n.X, s, nprec)
+ if n.Sel == nil {
+ fmt.Fprint(s, ".<nil>")
+ return
+ }
+ fmt.Fprintf(s, ".%s", n.Sel.Name)
+
+ case ODOTTYPE, ODOTTYPE2:
+ n := n.(*TypeAssertExpr)
+ exprFmt(n.X, s, nprec)
+ if n.Ntype != nil {
+ fmt.Fprintf(s, ".(%v)", n.Ntype)
+ return
+ }
+ fmt.Fprintf(s, ".(%v)", n.Type())
+
+ case OINDEX, OINDEXMAP:
+ n := n.(*IndexExpr)
+ exprFmt(n.X, s, nprec)
+ fmt.Fprintf(s, "[%v]", n.Index)
+
+ case OSLICE, OSLICESTR, OSLICEARR, OSLICE3, OSLICE3ARR:
+ n := n.(*SliceExpr)
+ exprFmt(n.X, s, nprec)
+ fmt.Fprint(s, "[")
+ if n.Low != nil {
+ fmt.Fprint(s, n.Low)
+ }
+ fmt.Fprint(s, ":")
+ if n.High != nil {
+ fmt.Fprint(s, n.High)
+ }
+ if n.Op().IsSlice3() {
+ fmt.Fprint(s, ":")
+ if n.Max != nil {
+ fmt.Fprint(s, n.Max)
+ }
+ }
+ fmt.Fprint(s, "]")
+
+ case OSLICEHEADER:
+ n := n.(*SliceHeaderExpr)
+ fmt.Fprintf(s, "sliceheader{%v,%v,%v}", n.Ptr, n.Len, n.Cap)
+
+ case OCOMPLEX, OCOPY, OUNSAFEADD, OUNSAFESLICE:
+ n := n.(*BinaryExpr)
+ fmt.Fprintf(s, "%v(%v, %v)", n.Op(), n.X, n.Y)
+
+ case OCONV,
+ OCONVIFACE,
+ OCONVIDATA,
+ OCONVNOP,
+ OBYTES2STR,
+ ORUNES2STR,
+ OSTR2BYTES,
+ OSTR2RUNES,
+ ORUNESTR,
+ OSLICE2ARRPTR:
+ n := n.(*ConvExpr)
+ if n.Type() == nil || n.Type().Sym() == nil {
+ fmt.Fprintf(s, "(%v)", n.Type())
+ } else {
+ fmt.Fprintf(s, "%v", n.Type())
+ }
+ fmt.Fprintf(s, "(%v)", n.X)
+
+ case OREAL,
+ OIMAG,
+ OCAP,
+ OCLOSE,
+ OLEN,
+ ONEW,
+ OPANIC,
+ OALIGNOF,
+ OOFFSETOF,
+ OSIZEOF:
+ n := n.(*UnaryExpr)
+ fmt.Fprintf(s, "%v(%v)", n.Op(), n.X)
+
+ case OAPPEND,
+ ODELETE,
+ OMAKE,
+ ORECOVER,
+ OPRINT,
+ OPRINTN:
+ n := n.(*CallExpr)
+ if n.IsDDD {
+ fmt.Fprintf(s, "%v(%.v...)", n.Op(), n.Args)
+ return
+ }
+ fmt.Fprintf(s, "%v(%.v)", n.Op(), n.Args)
+
+ case OCALL, OCALLFUNC, OCALLINTER, OCALLMETH, OGETG:
+ n := n.(*CallExpr)
+ exprFmt(n.X, s, nprec)
+ if n.IsDDD {
+ fmt.Fprintf(s, "(%.v...)", n.Args)
+ return
+ }
+ fmt.Fprintf(s, "(%.v)", n.Args)
+
+ case OINLCALL:
+ n := n.(*InlinedCallExpr)
+ // TODO(mdempsky): Print Init and/or Body?
+ if len(n.ReturnVars) == 1 {
+ fmt.Fprintf(s, "%v", n.ReturnVars[0])
+ return
+ }
+ fmt.Fprintf(s, "(.%v)", n.ReturnVars)
+
+ case OMAKEMAP, OMAKECHAN, OMAKESLICE:
+ n := n.(*MakeExpr)
+ if n.Cap != nil {
+ fmt.Fprintf(s, "make(%v, %v, %v)", n.Type(), n.Len, n.Cap)
+ return
+ }
+ if n.Len != nil && (n.Op() == OMAKESLICE || !n.Len.Type().IsUntyped()) {
+ fmt.Fprintf(s, "make(%v, %v)", n.Type(), n.Len)
+ return
+ }
+ fmt.Fprintf(s, "make(%v)", n.Type())
+
+ case OMAKESLICECOPY:
+ n := n.(*MakeExpr)
+ fmt.Fprintf(s, "makeslicecopy(%v, %v, %v)", n.Type(), n.Len, n.Cap)
+
+ case OPLUS, ONEG, OBITNOT, ONOT, ORECV:
+ // Unary
+ n := n.(*UnaryExpr)
+ fmt.Fprintf(s, "%v", n.Op())
+ if n.X != nil && n.X.Op() == n.Op() {
+ fmt.Fprint(s, " ")
+ }
+ exprFmt(n.X, s, nprec+1)
+
+ case OADDR:
+ n := n.(*AddrExpr)
+ fmt.Fprintf(s, "%v", n.Op())
+ if n.X != nil && n.X.Op() == n.Op() {
+ fmt.Fprint(s, " ")
+ }
+ exprFmt(n.X, s, nprec+1)
+
+ case ODEREF:
+ n := n.(*StarExpr)
+ fmt.Fprintf(s, "%v", n.Op())
+ exprFmt(n.X, s, nprec+1)
+
+ // Binary
+ case OADD,
+ OAND,
+ OANDNOT,
+ ODIV,
+ OEQ,
+ OGE,
+ OGT,
+ OLE,
+ OLT,
+ OLSH,
+ OMOD,
+ OMUL,
+ ONE,
+ OOR,
+ ORSH,
+ OSUB,
+ OXOR:
+ n := n.(*BinaryExpr)
+ exprFmt(n.X, s, nprec)
+ fmt.Fprintf(s, " %v ", n.Op())
+ exprFmt(n.Y, s, nprec+1)
+
+ case OANDAND,
+ OOROR:
+ n := n.(*LogicalExpr)
+ exprFmt(n.X, s, nprec)
+ fmt.Fprintf(s, " %v ", n.Op())
+ exprFmt(n.Y, s, nprec+1)
+
+ case OSEND:
+ n := n.(*SendStmt)
+ exprFmt(n.Chan, s, nprec)
+ fmt.Fprintf(s, " <- ")
+ exprFmt(n.Value, s, nprec+1)
+
+ case OADDSTR:
+ n := n.(*AddStringExpr)
+ for i, n1 := range n.List {
+ if i != 0 {
+ fmt.Fprint(s, " + ")
+ }
+ exprFmt(n1, s, nprec)
+ }
+ default:
+ fmt.Fprintf(s, "<node %v>", n.Op())
+ }
+}
+
+func ellipsisIf(b bool) string {
+ if b {
+ return "..."
+ }
+ return ""
+}
+
+// Nodes
+
+// Format implements formatting for a Nodes.
+// The valid formats are:
+//
+// %v Go syntax, semicolon-separated
+// %.v Go syntax, comma-separated
+// %+v Debug syntax, as in DumpList.
+//
+func (l Nodes) Format(s fmt.State, verb rune) {
+ if s.Flag('+') && verb == 'v' {
+ // %+v is DumpList output
+ dumpNodes(s, l, 1)
+ return
+ }
+
+ if verb != 'v' {
+ fmt.Fprintf(s, "%%!%c(Nodes)", verb)
+ return
+ }
+
+ sep := "; "
+ if _, ok := s.Precision(); ok { // %.v is expr list
+ sep = ", "
+ }
+
+ for i, n := range l {
+ fmt.Fprint(s, n)
+ if i+1 < len(l) {
+ fmt.Fprint(s, sep)
+ }
+ }
+}
+
+// Dump
+
+// Dump prints the message s followed by a debug dump of n.
+func Dump(s string, n Node) {
+ fmt.Printf("%s%+v\n", s, n)
+}
+
+// DumpList prints the message s followed by a debug dump of each node in the list.
+func DumpList(s string, list Nodes) {
+ var buf bytes.Buffer
+ FDumpList(&buf, s, list)
+ os.Stdout.Write(buf.Bytes())
+}
+
+// FDumpList prints to w the message s followed by a debug dump of each node in the list.
+func FDumpList(w io.Writer, s string, list Nodes) {
+ io.WriteString(w, s)
+ dumpNodes(w, list, 1)
+ io.WriteString(w, "\n")
+}
+
+// indent prints indentation to w.
+func indent(w io.Writer, depth int) {
+ fmt.Fprint(w, "\n")
+ for i := 0; i < depth; i++ {
+ fmt.Fprint(w, ". ")
+ }
+}
+
+// EscFmt is set by the escape analysis code to add escape analysis details to the node print.
+var EscFmt func(n Node) string
+
+// dumpNodeHeader prints the debug-format node header line to w.
+func dumpNodeHeader(w io.Writer, n Node) {
+ // Useful to see which nodes in an AST printout are actually identical
+ if base.Debug.DumpPtrs != 0 {
+ fmt.Fprintf(w, " p(%p)", n)
+ }
+
+ if base.Debug.DumpPtrs != 0 && n.Name() != nil && n.Name().Defn != nil {
+ // Useful to see where Defn is set and what node it points to
+ fmt.Fprintf(w, " defn(%p)", n.Name().Defn)
+ }
+
+ if base.Debug.DumpPtrs != 0 && n.Name() != nil && n.Name().Curfn != nil {
+ // Useful to see where Defn is set and what node it points to
+ fmt.Fprintf(w, " curfn(%p)", n.Name().Curfn)
+ }
+ if base.Debug.DumpPtrs != 0 && n.Name() != nil && n.Name().Outer != nil {
+ // Useful to see where Defn is set and what node it points to
+ fmt.Fprintf(w, " outer(%p)", n.Name().Outer)
+ }
+
+ if EscFmt != nil {
+ if esc := EscFmt(n); esc != "" {
+ fmt.Fprintf(w, " %s", esc)
+ }
+ }
+
+ if n.Sym() != nil && n.Op() != ONAME && n.Op() != ONONAME && n.Op() != OTYPE {
+ fmt.Fprintf(w, " %+v", n.Sym())
+ }
+
+ // Print Node-specific fields of basic type in header line.
+ v := reflect.ValueOf(n).Elem()
+ t := v.Type()
+ nf := t.NumField()
+ for i := 0; i < nf; i++ {
+ tf := t.Field(i)
+ if tf.PkgPath != "" {
+ // skip unexported field - Interface will fail
+ continue
+ }
+ k := tf.Type.Kind()
+ if reflect.Bool <= k && k <= reflect.Complex128 {
+ name := strings.TrimSuffix(tf.Name, "_")
+ vf := v.Field(i)
+ vfi := vf.Interface()
+ if name == "Offset" && vfi == types.BADWIDTH || name != "Offset" && isZero(vf) {
+ continue
+ }
+ if vfi == true {
+ fmt.Fprintf(w, " %s", name)
+ } else {
+ fmt.Fprintf(w, " %s:%+v", name, vf.Interface())
+ }
+ }
+ }
+
+ // Print Node-specific booleans by looking for methods.
+ // Different v, t from above - want *Struct not Struct, for methods.
+ v = reflect.ValueOf(n)
+ t = v.Type()
+ nm := t.NumMethod()
+ for i := 0; i < nm; i++ {
+ tm := t.Method(i)
+ if tm.PkgPath != "" {
+ // skip unexported method - call will fail
+ continue
+ }
+ m := v.Method(i)
+ mt := m.Type()
+ if mt.NumIn() == 0 && mt.NumOut() == 1 && mt.Out(0).Kind() == reflect.Bool {
+ // TODO(rsc): Remove the func/defer/recover wrapping,
+ // which is guarding against panics in miniExpr,
+ // once we get down to the simpler state in which
+ // nodes have no getter methods that aren't allowed to be called.
+ func() {
+ defer func() { recover() }()
+ if m.Call(nil)[0].Bool() {
+ name := strings.TrimSuffix(tm.Name, "_")
+ fmt.Fprintf(w, " %s", name)
+ }
+ }()
+ }
+ }
+
+ if n.Op() == OCLOSURE {
+ n := n.(*ClosureExpr)
+ if fn := n.Func; fn != nil && fn.Nname.Sym() != nil {
+ fmt.Fprintf(w, " fnName(%+v)", fn.Nname.Sym())
+ }
+ }
+
+ if n.Type() != nil {
+ if n.Op() == OTYPE {
+ fmt.Fprintf(w, " type")
+ }
+ fmt.Fprintf(w, " %+v", n.Type())
+ }
+ if n.Typecheck() != 0 {
+ fmt.Fprintf(w, " tc(%d)", n.Typecheck())
+ }
+
+ if n.Pos().IsKnown() {
+ fmt.Fprint(w, " # ")
+ switch n.Pos().IsStmt() {
+ case src.PosNotStmt:
+ fmt.Fprint(w, "_") // "-" would be confusing
+ case src.PosIsStmt:
+ fmt.Fprint(w, "+")
+ }
+ for i, pos := range base.Ctxt.AllPos(n.Pos(), nil) {
+ if i > 0 {
+ fmt.Fprint(w, ",")
+ }
+ // TODO(mdempsky): Print line pragma details too.
+ file := filepath.Base(pos.Filename())
+ // Note: this output will be parsed by ssa/html.go:(*HTMLWriter).WriteAST. Keep in sync.
+ fmt.Fprintf(w, "%s:%d:%d", file, pos.Line(), pos.Col())
+ }
+ }
+}
+
+func dumpNode(w io.Writer, n Node, depth int) {
+ indent(w, depth)
+ if depth > 40 {
+ fmt.Fprint(w, "...")
+ return
+ }
+
+ if n == nil {
+ fmt.Fprint(w, "NilIrNode")
+ return
+ }
+
+ if len(n.Init()) != 0 {
+ fmt.Fprintf(w, "%+v-init", n.Op())
+ dumpNodes(w, n.Init(), depth+1)
+ indent(w, depth)
+ }
+
+ switch n.Op() {
+ default:
+ fmt.Fprintf(w, "%+v", n.Op())
+ dumpNodeHeader(w, n)
+
+ case OLITERAL:
+ fmt.Fprintf(w, "%+v-%v", n.Op(), n.Val())
+ dumpNodeHeader(w, n)
+ return
+
+ case ONAME, ONONAME:
+ if n.Sym() != nil {
+ fmt.Fprintf(w, "%+v-%+v", n.Op(), n.Sym())
+ } else {
+ fmt.Fprintf(w, "%+v", n.Op())
+ }
+ dumpNodeHeader(w, n)
+ if n.Type() == nil && n.Name() != nil && n.Name().Ntype != nil {
+ indent(w, depth)
+ fmt.Fprintf(w, "%+v-ntype", n.Op())
+ dumpNode(w, n.Name().Ntype, depth+1)
+ }
+ return
+
+ case OASOP:
+ n := n.(*AssignOpStmt)
+ fmt.Fprintf(w, "%+v-%+v", n.Op(), n.AsOp)
+ dumpNodeHeader(w, n)
+
+ case OTYPE:
+ fmt.Fprintf(w, "%+v %+v", n.Op(), n.Sym())
+ dumpNodeHeader(w, n)
+ if n.Type() == nil && n.Name() != nil && n.Name().Ntype != nil {
+ indent(w, depth)
+ fmt.Fprintf(w, "%+v-ntype", n.Op())
+ dumpNode(w, n.Name().Ntype, depth+1)
+ }
+ return
+
+ case OCLOSURE:
+ fmt.Fprintf(w, "%+v", n.Op())
+ dumpNodeHeader(w, n)
+
+ case ODCLFUNC:
+ // Func has many fields we don't want to print.
+ // Bypass reflection and just print what we want.
+ n := n.(*Func)
+ fmt.Fprintf(w, "%+v", n.Op())
+ dumpNodeHeader(w, n)
+ fn := n
+ if len(fn.Dcl) > 0 {
+ indent(w, depth)
+ fmt.Fprintf(w, "%+v-Dcl", n.Op())
+ for _, dcl := range n.Dcl {
+ dumpNode(w, dcl, depth+1)
+ }
+ }
+ if len(fn.ClosureVars) > 0 {
+ indent(w, depth)
+ fmt.Fprintf(w, "%+v-ClosureVars", n.Op())
+ for _, cv := range fn.ClosureVars {
+ dumpNode(w, cv, depth+1)
+ }
+ }
+ if len(fn.Enter) > 0 {
+ indent(w, depth)
+ fmt.Fprintf(w, "%+v-Enter", n.Op())
+ dumpNodes(w, fn.Enter, depth+1)
+ }
+ if len(fn.Body) > 0 {
+ indent(w, depth)
+ fmt.Fprintf(w, "%+v-body", n.Op())
+ dumpNodes(w, fn.Body, depth+1)
+ }
+ return
+ }
+
+ v := reflect.ValueOf(n).Elem()
+ t := reflect.TypeOf(n).Elem()
+ nf := t.NumField()
+ for i := 0; i < nf; i++ {
+ tf := t.Field(i)
+ vf := v.Field(i)
+ if tf.PkgPath != "" {
+ // skip unexported field - Interface will fail
+ continue
+ }
+ switch tf.Type.Kind() {
+ case reflect.Interface, reflect.Ptr, reflect.Slice:
+ if vf.IsNil() {
+ continue
+ }
+ }
+ name := strings.TrimSuffix(tf.Name, "_")
+ // Do not bother with field name header lines for the
+ // most common positional arguments: unary, binary expr,
+ // index expr, send stmt, go and defer call expression.
+ switch name {
+ case "X", "Y", "Index", "Chan", "Value", "Call":
+ name = ""
+ }
+ switch val := vf.Interface().(type) {
+ case Node:
+ if name != "" {
+ indent(w, depth)
+ fmt.Fprintf(w, "%+v-%s", n.Op(), name)
+ }
+ dumpNode(w, val, depth+1)
+ case Nodes:
+ if len(val) == 0 {
+ continue
+ }
+ if name != "" {
+ indent(w, depth)
+ fmt.Fprintf(w, "%+v-%s", n.Op(), name)
+ }
+ dumpNodes(w, val, depth+1)
+ default:
+ if vf.Kind() == reflect.Slice && vf.Type().Elem().Implements(nodeType) {
+ if vf.Len() == 0 {
+ continue
+ }
+ if name != "" {
+ indent(w, depth)
+ fmt.Fprintf(w, "%+v-%s", n.Op(), name)
+ }
+ for i, n := 0, vf.Len(); i < n; i++ {
+ dumpNode(w, vf.Index(i).Interface().(Node), depth+1)
+ }
+ }
+ }
+ }
+}
+
+var nodeType = reflect.TypeOf((*Node)(nil)).Elem()
+
+func dumpNodes(w io.Writer, list Nodes, depth int) {
+ if len(list) == 0 {
+ fmt.Fprintf(w, " <nil>")
+ return
+ }
+
+ for _, n := range list {
+ dumpNode(w, n, depth)
+ }
+}
+
+// reflect.IsZero is not available in Go 1.4 (added in Go 1.13), so we use this copy instead.
+func isZero(v reflect.Value) bool {
+ switch v.Kind() {
+ case reflect.Bool:
+ return !v.Bool()
+ case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
+ return v.Int() == 0
+ case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr:
+ return v.Uint() == 0
+ case reflect.Float32, reflect.Float64:
+ return math.Float64bits(v.Float()) == 0
+ case reflect.Complex64, reflect.Complex128:
+ c := v.Complex()
+ return math.Float64bits(real(c)) == 0 && math.Float64bits(imag(c)) == 0
+ case reflect.Array:
+ for i := 0; i < v.Len(); i++ {
+ if !isZero(v.Index(i)) {
+ return false
+ }
+ }
+ return true
+ case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice, reflect.UnsafePointer:
+ return v.IsNil()
+ case reflect.String:
+ return v.Len() == 0
+ case reflect.Struct:
+ for i := 0; i < v.NumField(); i++ {
+ if !isZero(v.Field(i)) {
+ return false
+ }
+ }
+ return true
+ default:
+ return false
+ }
+}
diff --git a/src/cmd/compile/internal/ir/func.go b/src/cmd/compile/internal/ir/func.go
new file mode 100644
index 0000000..23d56f7
--- /dev/null
+++ b/src/cmd/compile/internal/ir/func.go
@@ -0,0 +1,438 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ir
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/src"
+ "fmt"
+)
+
+// A Func corresponds to a single function in a Go program
+// (and vice versa: each function is denoted by exactly one *Func).
+//
+// There are multiple nodes that represent a Func in the IR.
+//
+// The ONAME node (Func.Nname) is used for plain references to it.
+// The ODCLFUNC node (the Func itself) is used for its declaration code.
+// The OCLOSURE node (Func.OClosure) is used for a reference to a
+// function literal.
+//
+// An imported function will have an ONAME node which points to a Func
+// with an empty body.
+// A declared function or method has an ODCLFUNC (the Func itself) and an ONAME.
+// A function literal is represented directly by an OCLOSURE, but it also
+// has an ODCLFUNC (and a matching ONAME) representing the compiled
+// underlying form of the closure, which accesses the captured variables
+// using a special data structure passed in a register.
+//
+// A method declaration is represented like functions, except f.Sym
+// will be the qualified method name (e.g., "T.m") and
+// f.Func.Shortname is the bare method name (e.g., "m").
+//
+// A method expression (T.M) is represented as an OMETHEXPR node,
+// in which n.Left and n.Right point to the type and method, respectively.
+// Each distinct mention of a method expression in the source code
+// constructs a fresh node.
+//
+// A method value (t.M) is represented by ODOTMETH/ODOTINTER
+// when it is called directly and by OMETHVALUE otherwise.
+// These are like method expressions, except that for ODOTMETH/ODOTINTER,
+// the method name is stored in Sym instead of Right.
+// Each OMETHVALUE ends up being implemented as a new
+// function, a bit like a closure, with its own ODCLFUNC.
+// The OMETHVALUE uses n.Func to record the linkage to
+// the generated ODCLFUNC, but there is no
+// pointer from the Func back to the OMETHVALUE.
+type Func struct {
+ miniNode
+ Body Nodes
+ Iota int64
+
+ Nname *Name // ONAME node
+ OClosure *ClosureExpr // OCLOSURE node
+
+ Shortname *types.Sym
+
+ // Extra entry code for the function. For example, allocate and initialize
+ // memory for escaping parameters.
+ Enter Nodes
+ Exit Nodes
+
+ // ONAME nodes for all params/locals for this func/closure, does NOT
+ // include closurevars until transforming closures during walk.
+ // Names must be listed PPARAMs, PPARAMOUTs, then PAUTOs,
+ // with PPARAMs and PPARAMOUTs in order corresponding to the function signature.
+ // However, as anonymous or blank PPARAMs are not actually declared,
+ // they are omitted from Dcl.
+ // Anonymous and blank PPARAMOUTs are declared as ~rNN and ~bNN Names, respectively.
+ Dcl []*Name
+
+ // ClosureVars lists the free variables that are used within a
+ // function literal, but formally declared in an enclosing
+ // function. The variables in this slice are the closure function's
+ // own copy of the variables, which are used within its function
+ // body. They will also each have IsClosureVar set, and will have
+ // Byval set if they're captured by value.
+ ClosureVars []*Name
+
+ // Enclosed functions that need to be compiled.
+ // Populated during walk.
+ Closures []*Func
+
+ // Parents records the parent scope of each scope within a
+ // function. The root scope (0) has no parent, so the i'th
+ // scope's parent is stored at Parents[i-1].
+ Parents []ScopeID
+
+ // Marks records scope boundary changes.
+ Marks []Mark
+
+ FieldTrack map[*obj.LSym]struct{}
+ DebugInfo interface{}
+ LSym *obj.LSym // Linker object in this function's native ABI (Func.ABI)
+
+ Inl *Inline
+
+ // Closgen tracks how many closures have been generated within
+ // this function. Used by closurename for creating unique
+ // function names.
+ Closgen int32
+
+ Label int32 // largest auto-generated label in this function
+
+ Endlineno src.XPos
+ WBPos src.XPos // position of first write barrier; see SetWBPos
+
+ Pragma PragmaFlag // go:xxx function annotations
+
+ flags bitset16
+
+ // ABI is a function's "definition" ABI. This is the ABI that
+ // this function's generated code is expecting to be called by.
+ //
+ // For most functions, this will be obj.ABIInternal. It may be
+ // a different ABI for functions defined in assembly or ABI wrappers.
+ //
+ // This is included in the export data and tracked across packages.
+ ABI obj.ABI
+ // ABIRefs is the set of ABIs by which this function is referenced.
+ // For ABIs other than this function's definition ABI, the
+ // compiler generates ABI wrapper functions. This is only tracked
+ // within a package.
+ ABIRefs obj.ABISet
+
+ NumDefers int32 // number of defer calls in the function
+ NumReturns int32 // number of explicit returns in the function
+
+ // nwbrCalls records the LSyms of functions called by this
+ // function for go:nowritebarrierrec analysis. Only filled in
+ // if nowritebarrierrecCheck != nil.
+ NWBRCalls *[]SymAndPos
+
+ // For wrapper functions, WrappedFunc point to the original Func.
+ // Currently only used for go/defer wrappers.
+ WrappedFunc *Func
+}
+
+func NewFunc(pos src.XPos) *Func {
+ f := new(Func)
+ f.pos = pos
+ f.op = ODCLFUNC
+ f.Iota = -1
+ // Most functions are ABIInternal. The importer or symabis
+ // pass may override this.
+ f.ABI = obj.ABIInternal
+ return f
+}
+
+func (f *Func) isStmt() {}
+
+func (n *Func) copy() Node { panic(n.no("copy")) }
+func (n *Func) doChildren(do func(Node) bool) bool { return doNodes(n.Body, do) }
+func (n *Func) editChildren(edit func(Node) Node) { editNodes(n.Body, edit) }
+
+func (f *Func) Type() *types.Type { return f.Nname.Type() }
+func (f *Func) Sym() *types.Sym { return f.Nname.Sym() }
+func (f *Func) Linksym() *obj.LSym { return f.Nname.Linksym() }
+func (f *Func) LinksymABI(abi obj.ABI) *obj.LSym { return f.Nname.LinksymABI(abi) }
+
+// An Inline holds fields used for function bodies that can be inlined.
+type Inline struct {
+ Cost int32 // heuristic cost of inlining this function
+
+ // Copies of Func.Dcl and Func.Body for use during inlining. Copies are
+ // needed because the function's dcl/body may be changed by later compiler
+ // transformations. These fields are also populated when a function from
+ // another package is imported.
+ Dcl []*Name
+ Body []Node
+
+ // CanDelayResults reports whether it's safe for the inliner to delay
+ // initializing the result parameters until immediately before the
+ // "return" statement.
+ CanDelayResults bool
+}
+
+// A Mark represents a scope boundary.
+type Mark struct {
+ // Pos is the position of the token that marks the scope
+ // change.
+ Pos src.XPos
+
+ // Scope identifies the innermost scope to the right of Pos.
+ Scope ScopeID
+}
+
+// A ScopeID represents a lexical scope within a function.
+type ScopeID int32
+
+const (
+ funcDupok = 1 << iota // duplicate definitions ok
+ funcWrapper // hide frame from users (elide in tracebacks, don't count as a frame for recover())
+ funcABIWrapper // is an ABI wrapper (also set flagWrapper)
+ funcNeedctxt // function uses context register (has closure variables)
+ funcReflectMethod // function calls reflect.Type.Method or MethodByName
+ // true if closure inside a function; false if a simple function or a
+ // closure in a global variable initialization
+ funcIsHiddenClosure
+ funcIsDeadcodeClosure // true if closure is deadcode
+ funcHasDefer // contains a defer statement
+ funcNilCheckDisabled // disable nil checks when compiling this function
+ funcInlinabilityChecked // inliner has already determined whether the function is inlinable
+ funcExportInline // include inline body in export data
+ funcInstrumentBody // add race/msan/asan instrumentation during SSA construction
+ funcOpenCodedDeferDisallowed // can't do open-coded defers
+ funcClosureCalled // closure is only immediately called; used by escape analysis
+)
+
+type SymAndPos struct {
+ Sym *obj.LSym // LSym of callee
+ Pos src.XPos // line of call
+}
+
+func (f *Func) Dupok() bool { return f.flags&funcDupok != 0 }
+func (f *Func) Wrapper() bool { return f.flags&funcWrapper != 0 }
+func (f *Func) ABIWrapper() bool { return f.flags&funcABIWrapper != 0 }
+func (f *Func) Needctxt() bool { return f.flags&funcNeedctxt != 0 }
+func (f *Func) ReflectMethod() bool { return f.flags&funcReflectMethod != 0 }
+func (f *Func) IsHiddenClosure() bool { return f.flags&funcIsHiddenClosure != 0 }
+func (f *Func) IsDeadcodeClosure() bool { return f.flags&funcIsDeadcodeClosure != 0 }
+func (f *Func) HasDefer() bool { return f.flags&funcHasDefer != 0 }
+func (f *Func) NilCheckDisabled() bool { return f.flags&funcNilCheckDisabled != 0 }
+func (f *Func) InlinabilityChecked() bool { return f.flags&funcInlinabilityChecked != 0 }
+func (f *Func) ExportInline() bool { return f.flags&funcExportInline != 0 }
+func (f *Func) InstrumentBody() bool { return f.flags&funcInstrumentBody != 0 }
+func (f *Func) OpenCodedDeferDisallowed() bool { return f.flags&funcOpenCodedDeferDisallowed != 0 }
+func (f *Func) ClosureCalled() bool { return f.flags&funcClosureCalled != 0 }
+
+func (f *Func) SetDupok(b bool) { f.flags.set(funcDupok, b) }
+func (f *Func) SetWrapper(b bool) { f.flags.set(funcWrapper, b) }
+func (f *Func) SetABIWrapper(b bool) { f.flags.set(funcABIWrapper, b) }
+func (f *Func) SetNeedctxt(b bool) { f.flags.set(funcNeedctxt, b) }
+func (f *Func) SetReflectMethod(b bool) { f.flags.set(funcReflectMethod, b) }
+func (f *Func) SetIsHiddenClosure(b bool) { f.flags.set(funcIsHiddenClosure, b) }
+func (f *Func) SetIsDeadcodeClosure(b bool) { f.flags.set(funcIsDeadcodeClosure, b) }
+func (f *Func) SetHasDefer(b bool) { f.flags.set(funcHasDefer, b) }
+func (f *Func) SetNilCheckDisabled(b bool) { f.flags.set(funcNilCheckDisabled, b) }
+func (f *Func) SetInlinabilityChecked(b bool) { f.flags.set(funcInlinabilityChecked, b) }
+func (f *Func) SetExportInline(b bool) { f.flags.set(funcExportInline, b) }
+func (f *Func) SetInstrumentBody(b bool) { f.flags.set(funcInstrumentBody, b) }
+func (f *Func) SetOpenCodedDeferDisallowed(b bool) { f.flags.set(funcOpenCodedDeferDisallowed, b) }
+func (f *Func) SetClosureCalled(b bool) { f.flags.set(funcClosureCalled, b) }
+
+func (f *Func) SetWBPos(pos src.XPos) {
+ if base.Debug.WB != 0 {
+ base.WarnfAt(pos, "write barrier")
+ }
+ if !f.WBPos.IsKnown() {
+ f.WBPos = pos
+ }
+}
+
+// FuncName returns the name (without the package) of the function n.
+func FuncName(f *Func) string {
+ if f == nil || f.Nname == nil {
+ return "<nil>"
+ }
+ return f.Sym().Name
+}
+
+// PkgFuncName returns the name of the function referenced by n, with package prepended.
+// This differs from the compiler's internal convention where local functions lack a package
+// because the ultimate consumer of this is a human looking at an IDE; package is only empty
+// if the compilation package is actually the empty string.
+func PkgFuncName(f *Func) string {
+ if f == nil || f.Nname == nil {
+ return "<nil>"
+ }
+ s := f.Sym()
+ pkg := s.Pkg
+
+ p := base.Ctxt.Pkgpath
+ if pkg != nil && pkg.Path != "" {
+ p = pkg.Path
+ }
+ if p == "" {
+ return s.Name
+ }
+ return p + "." + s.Name
+}
+
+var CurFunc *Func
+
+// WithFunc invokes do with CurFunc and base.Pos set to curfn and
+// curfn.Pos(), respectively, and then restores their previous values
+// before returning.
+func WithFunc(curfn *Func, do func()) {
+ oldfn, oldpos := CurFunc, base.Pos
+ defer func() { CurFunc, base.Pos = oldfn, oldpos }()
+
+ CurFunc, base.Pos = curfn, curfn.Pos()
+ do()
+}
+
+func FuncSymName(s *types.Sym) string {
+ return s.Name + "·f"
+}
+
+// MarkFunc marks a node as a function.
+func MarkFunc(n *Name) {
+ if n.Op() != ONAME || n.Class != Pxxx {
+ base.FatalfAt(n.Pos(), "expected ONAME/Pxxx node, got %v (%v/%v)", n, n.Op(), n.Class)
+ }
+
+ n.Class = PFUNC
+ n.Sym().SetFunc(true)
+}
+
+// ClosureDebugRuntimeCheck applies boilerplate checks for debug flags
+// and compiling runtime
+func ClosureDebugRuntimeCheck(clo *ClosureExpr) {
+ if base.Debug.Closure > 0 {
+ if clo.Esc() == EscHeap {
+ base.WarnfAt(clo.Pos(), "heap closure, captured vars = %v", clo.Func.ClosureVars)
+ } else {
+ base.WarnfAt(clo.Pos(), "stack closure, captured vars = %v", clo.Func.ClosureVars)
+ }
+ }
+ if base.Flag.CompilingRuntime && clo.Esc() == EscHeap && !clo.IsGoWrap {
+ base.ErrorfAt(clo.Pos(), "heap-allocated closure %s, not allowed in runtime", FuncName(clo.Func))
+ }
+}
+
+// IsTrivialClosure reports whether closure clo has an
+// empty list of captured vars.
+func IsTrivialClosure(clo *ClosureExpr) bool {
+ return len(clo.Func.ClosureVars) == 0
+}
+
+// globClosgen is like Func.Closgen, but for the global scope.
+var globClosgen int32
+
+// closureName generates a new unique name for a closure within outerfn.
+func closureName(outerfn *Func) *types.Sym {
+ pkg := types.LocalPkg
+ outer := "glob."
+ prefix := "func"
+ gen := &globClosgen
+
+ if outerfn != nil {
+ if outerfn.OClosure != nil {
+ prefix = ""
+ }
+
+ pkg = outerfn.Sym().Pkg
+ outer = FuncName(outerfn)
+
+ // There may be multiple functions named "_". In those
+ // cases, we can't use their individual Closgens as it
+ // would lead to name clashes.
+ if !IsBlank(outerfn.Nname) {
+ gen = &outerfn.Closgen
+ }
+ }
+
+ *gen++
+ return pkg.Lookup(fmt.Sprintf("%s.%s%d", outer, prefix, *gen))
+}
+
+// NewClosureFunc creates a new Func to represent a function literal.
+// If hidden is true, then the closure is marked hidden (i.e., as a
+// function literal contained within another function, rather than a
+// package-scope variable initialization expression).
+func NewClosureFunc(pos src.XPos, hidden bool) *Func {
+ fn := NewFunc(pos)
+ fn.SetIsHiddenClosure(hidden)
+
+ fn.Nname = NewNameAt(pos, BlankNode.Sym())
+ fn.Nname.Func = fn
+ fn.Nname.Defn = fn
+
+ fn.OClosure = NewClosureExpr(pos, fn)
+
+ return fn
+}
+
+// NameClosure generates a unique for the given function literal,
+// which must have appeared within outerfn.
+func NameClosure(clo *ClosureExpr, outerfn *Func) {
+ fn := clo.Func
+ if fn.IsHiddenClosure() != (outerfn != nil) {
+ base.FatalfAt(clo.Pos(), "closure naming inconsistency: hidden %v, but outer %v", fn.IsHiddenClosure(), outerfn)
+ }
+
+ name := fn.Nname
+ if !IsBlank(name) {
+ base.FatalfAt(clo.Pos(), "closure already named: %v", name)
+ }
+
+ name.SetSym(closureName(outerfn))
+ MarkFunc(name)
+}
+
+// UseClosure checks that the ginen function literal has been setup
+// correctly, and then returns it as an expression.
+// It must be called after clo.Func.ClosureVars has been set.
+func UseClosure(clo *ClosureExpr, pkg *Package) Node {
+ fn := clo.Func
+ name := fn.Nname
+
+ if IsBlank(name) {
+ base.FatalfAt(fn.Pos(), "unnamed closure func: %v", fn)
+ }
+ // Caution: clo.Typecheck() is still 0 when UseClosure is called by
+ // tcClosure.
+ if fn.Typecheck() != 1 || name.Typecheck() != 1 {
+ base.FatalfAt(fn.Pos(), "missed typecheck: %v", fn)
+ }
+ if clo.Type() == nil || name.Type() == nil {
+ base.FatalfAt(fn.Pos(), "missing types: %v", fn)
+ }
+ if !types.Identical(clo.Type(), name.Type()) {
+ base.FatalfAt(fn.Pos(), "mismatched types: %v", fn)
+ }
+
+ if base.Flag.W > 1 {
+ s := fmt.Sprintf("new closure func: %v", fn)
+ Dump(s, fn)
+ }
+
+ if pkg != nil {
+ pkg.Decls = append(pkg.Decls, fn)
+ }
+
+ if false && IsTrivialClosure(clo) {
+ // TODO(mdempsky): Investigate if we can/should optimize this
+ // case. walkClosure already handles it later, but it could be
+ // useful to recognize earlier (e.g., it might allow multiple
+ // inlined calls to a function to share a common trivial closure
+ // func, rather than cloning it for each inlined call).
+ }
+
+ return clo
+}
diff --git a/src/cmd/compile/internal/ir/ir.go b/src/cmd/compile/internal/ir/ir.go
new file mode 100644
index 0000000..82224ca
--- /dev/null
+++ b/src/cmd/compile/internal/ir/ir.go
@@ -0,0 +1,5 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ir
diff --git a/src/cmd/compile/internal/ir/mini.go b/src/cmd/compile/internal/ir/mini.go
new file mode 100644
index 0000000..eeb7408
--- /dev/null
+++ b/src/cmd/compile/internal/ir/mini.go
@@ -0,0 +1,92 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:generate go run -mod=mod mknode.go
+
+package ir
+
+import (
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+ "fmt"
+ "go/constant"
+)
+
+// A miniNode is a minimal node implementation,
+// meant to be embedded as the first field in a larger node implementation,
+// at a cost of 8 bytes.
+//
+// A miniNode is NOT a valid Node by itself: the embedding struct
+// must at the least provide:
+//
+// func (n *MyNode) String() string { return fmt.Sprint(n) }
+// func (n *MyNode) rawCopy() Node { c := *n; return &c }
+// func (n *MyNode) Format(s fmt.State, verb rune) { FmtNode(n, s, verb) }
+//
+// The embedding struct should also fill in n.op in its constructor,
+// for more useful panic messages when invalid methods are called,
+// instead of implementing Op itself.
+//
+type miniNode struct {
+ pos src.XPos // uint32
+ op Op // uint8
+ bits bitset8
+ esc uint16
+}
+
+// posOr returns pos if known, or else n.pos.
+// For use in DeepCopy.
+func (n *miniNode) posOr(pos src.XPos) src.XPos {
+ if pos.IsKnown() {
+ return pos
+ }
+ return n.pos
+}
+
+// op can be read, but not written.
+// An embedding implementation can provide a SetOp if desired.
+// (The panicking SetOp is with the other panics below.)
+func (n *miniNode) Op() Op { return n.op }
+func (n *miniNode) Pos() src.XPos { return n.pos }
+func (n *miniNode) SetPos(x src.XPos) { n.pos = x }
+func (n *miniNode) Esc() uint16 { return n.esc }
+func (n *miniNode) SetEsc(x uint16) { n.esc = x }
+
+const (
+ miniWalkdefShift = 0 // TODO(mdempsky): Move to Name.flags.
+ miniTypecheckShift = 2
+ miniDiag = 1 << 4
+ miniWalked = 1 << 5 // to prevent/catch re-walking
+)
+
+func (n *miniNode) Typecheck() uint8 { return n.bits.get2(miniTypecheckShift) }
+func (n *miniNode) SetTypecheck(x uint8) {
+ if x > 2 {
+ panic(fmt.Sprintf("cannot SetTypecheck %d", x))
+ }
+ n.bits.set2(miniTypecheckShift, x)
+}
+
+func (n *miniNode) Diag() bool { return n.bits&miniDiag != 0 }
+func (n *miniNode) SetDiag(x bool) { n.bits.set(miniDiag, x) }
+
+func (n *miniNode) Walked() bool { return n.bits&miniWalked != 0 }
+func (n *miniNode) SetWalked(x bool) { n.bits.set(miniWalked, x) }
+
+// Empty, immutable graph structure.
+
+func (n *miniNode) Init() Nodes { return Nodes{} }
+
+// Additional functionality unavailable.
+
+func (n *miniNode) no(name string) string { return "cannot " + name + " on " + n.op.String() }
+
+func (n *miniNode) Type() *types.Type { return nil }
+func (n *miniNode) SetType(*types.Type) { panic(n.no("SetType")) }
+func (n *miniNode) Name() *Name { return nil }
+func (n *miniNode) Sym() *types.Sym { return nil }
+func (n *miniNode) Val() constant.Value { panic(n.no("Val")) }
+func (n *miniNode) SetVal(v constant.Value) { panic(n.no("SetVal")) }
+func (n *miniNode) NonNil() bool { return false }
+func (n *miniNode) MarkNonNil() { panic(n.no("MarkNonNil")) }
diff --git a/src/cmd/compile/internal/ir/mknode.go b/src/cmd/compile/internal/ir/mknode.go
new file mode 100644
index 0000000..5a0aaad
--- /dev/null
+++ b/src/cmd/compile/internal/ir/mknode.go
@@ -0,0 +1,229 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build ignore
+// +build ignore
+
+package main
+
+import (
+ "bytes"
+ "fmt"
+ "go/format"
+ "go/types"
+ "io/ioutil"
+ "log"
+ "reflect"
+ "sort"
+ "strings"
+
+ "golang.org/x/tools/go/packages"
+)
+
+var irPkg *types.Package
+var buf bytes.Buffer
+
+func main() {
+ cfg := &packages.Config{
+ Mode: packages.NeedSyntax | packages.NeedTypes,
+ }
+ pkgs, err := packages.Load(cfg, "cmd/compile/internal/ir")
+ if err != nil {
+ log.Fatal(err)
+ }
+ irPkg = pkgs[0].Types
+
+ fmt.Fprintln(&buf, "// Code generated by mknode.go. DO NOT EDIT.")
+ fmt.Fprintln(&buf)
+ fmt.Fprintln(&buf, "package ir")
+ fmt.Fprintln(&buf)
+ fmt.Fprintln(&buf, `import "fmt"`)
+
+ scope := irPkg.Scope()
+ for _, name := range scope.Names() {
+ if strings.HasPrefix(name, "mini") {
+ continue
+ }
+
+ obj, ok := scope.Lookup(name).(*types.TypeName)
+ if !ok {
+ continue
+ }
+ typ := obj.Type().(*types.Named)
+ if !implementsNode(types.NewPointer(typ)) {
+ continue
+ }
+
+ fmt.Fprintf(&buf, "\n")
+ fmt.Fprintf(&buf, "func (n *%s) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }\n", name)
+
+ switch name {
+ case "Name", "Func":
+ // Too specialized to automate.
+ continue
+ }
+
+ forNodeFields(typ,
+ "func (n *%[1]s) copy() Node { c := *n\n",
+ "",
+ "c.%[1]s = copy%[2]s(c.%[1]s)",
+ "return &c }\n")
+
+ forNodeFields(typ,
+ "func (n *%[1]s) doChildren(do func(Node) bool) bool {\n",
+ "if n.%[1]s != nil && do(n.%[1]s) { return true }",
+ "if do%[2]s(n.%[1]s, do) { return true }",
+ "return false }\n")
+
+ forNodeFields(typ,
+ "func (n *%[1]s) editChildren(edit func(Node) Node) {\n",
+ "if n.%[1]s != nil { n.%[1]s = edit(n.%[1]s).(%[2]s) }",
+ "edit%[2]s(n.%[1]s, edit)",
+ "}\n")
+ }
+
+ makeHelpers()
+
+ out, err := format.Source(buf.Bytes())
+ if err != nil {
+ // write out mangled source so we can see the bug.
+ out = buf.Bytes()
+ }
+
+ err = ioutil.WriteFile("node_gen.go", out, 0666)
+ if err != nil {
+ log.Fatal(err)
+ }
+}
+
+// needHelper maps needed slice helpers from their base name to their
+// respective slice-element type.
+var needHelper = map[string]string{}
+
+func makeHelpers() {
+ var names []string
+ for name := range needHelper {
+ names = append(names, name)
+ }
+ sort.Strings(names)
+
+ for _, name := range names {
+ fmt.Fprintf(&buf, sliceHelperTmpl, name, needHelper[name])
+ }
+}
+
+const sliceHelperTmpl = `
+func copy%[1]s(list []%[2]s) []%[2]s {
+ if list == nil {
+ return nil
+ }
+ c := make([]%[2]s, len(list))
+ copy(c, list)
+ return c
+}
+func do%[1]s(list []%[2]s, do func(Node) bool) bool {
+ for _, x := range list {
+ if x != nil && do(x) {
+ return true
+ }
+ }
+ return false
+}
+func edit%[1]s(list []%[2]s, edit func(Node) Node) {
+ for i, x := range list {
+ if x != nil {
+ list[i] = edit(x).(%[2]s)
+ }
+ }
+}
+`
+
+func forNodeFields(named *types.Named, prologue, singleTmpl, sliceTmpl, epilogue string) {
+ fmt.Fprintf(&buf, prologue, named.Obj().Name())
+
+ anyField(named.Underlying().(*types.Struct), func(f *types.Var) bool {
+ if f.Embedded() {
+ return false
+ }
+ name, typ := f.Name(), f.Type()
+
+ slice, _ := typ.Underlying().(*types.Slice)
+ if slice != nil {
+ typ = slice.Elem()
+ }
+
+ tmpl, what := singleTmpl, types.TypeString(typ, types.RelativeTo(irPkg))
+ if implementsNode(typ) {
+ if slice != nil {
+ helper := strings.TrimPrefix(what, "*") + "s"
+ needHelper[helper] = what
+ tmpl, what = sliceTmpl, helper
+ }
+ } else if what == "*Field" {
+ // Special case for *Field.
+ tmpl = sliceTmpl
+ if slice != nil {
+ what = "Fields"
+ } else {
+ what = "Field"
+ }
+ } else {
+ return false
+ }
+
+ if tmpl == "" {
+ return false
+ }
+
+ // Allow template to not use all arguments without
+ // upsetting fmt.Printf.
+ s := fmt.Sprintf(tmpl+"\x00 %[1]s %[2]s", name, what)
+ fmt.Fprintln(&buf, s[:strings.LastIndex(s, "\x00")])
+ return false
+ })
+
+ fmt.Fprintf(&buf, epilogue)
+}
+
+func implementsNode(typ types.Type) bool {
+ if _, ok := typ.Underlying().(*types.Interface); ok {
+ // TODO(mdempsky): Check the interface implements Node.
+ // Worst case, node_gen.go will fail to compile if we're wrong.
+ return true
+ }
+
+ if ptr, ok := typ.(*types.Pointer); ok {
+ if str, ok := ptr.Elem().Underlying().(*types.Struct); ok {
+ return anyField(str, func(f *types.Var) bool {
+ return f.Embedded() && f.Name() == "miniNode"
+ })
+ }
+ }
+
+ return false
+}
+
+func anyField(typ *types.Struct, pred func(f *types.Var) bool) bool {
+ for i, n := 0, typ.NumFields(); i < n; i++ {
+ if value, ok := reflect.StructTag(typ.Tag(i)).Lookup("mknode"); ok {
+ if value != "-" {
+ panic(fmt.Sprintf("unexpected tag value: %q", value))
+ }
+ continue
+ }
+
+ f := typ.Field(i)
+ if pred(f) {
+ return true
+ }
+ if f.Embedded() {
+ if typ, ok := f.Type().Underlying().(*types.Struct); ok {
+ if anyField(typ, pred) {
+ return true
+ }
+ }
+ }
+ }
+ return false
+}
diff --git a/src/cmd/compile/internal/ir/name.go b/src/cmd/compile/internal/ir/name.go
new file mode 100644
index 0000000..1d4110c
--- /dev/null
+++ b/src/cmd/compile/internal/ir/name.go
@@ -0,0 +1,557 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ir
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/objabi"
+ "cmd/internal/src"
+ "fmt"
+
+ "go/constant"
+)
+
+// An Ident is an identifier, possibly qualified.
+type Ident struct {
+ miniExpr
+ sym *types.Sym
+}
+
+func NewIdent(pos src.XPos, sym *types.Sym) *Ident {
+ n := new(Ident)
+ n.op = ONONAME
+ n.pos = pos
+ n.sym = sym
+ return n
+}
+
+func (n *Ident) Sym() *types.Sym { return n.sym }
+
+func (*Ident) CanBeNtype() {}
+
+// Name holds Node fields used only by named nodes (ONAME, OTYPE, some OLITERAL).
+type Name struct {
+ miniExpr
+ BuiltinOp Op // uint8
+ Class Class // uint8
+ pragma PragmaFlag // int16
+ flags bitset16
+ DictIndex uint16 // index of the dictionary entry describing the type of this variable declaration plus 1
+ sym *types.Sym
+ Func *Func // TODO(austin): nil for I.M, eqFor, hashfor, and hashmem
+ Offset_ int64
+ val constant.Value
+ Opt interface{} // for use by escape analysis
+ Embed *[]Embed // list of embedded files, for ONAME var
+
+ PkgName *PkgName // real package for import . names
+ // For a local variable (not param) or extern, the initializing assignment (OAS or OAS2).
+ // For a closure var, the ONAME node of the outer captured variable.
+ // For the case-local variables of a type switch, the type switch guard (OTYPESW).
+ // For a range variable, the range statement (ORANGE)
+ // For a recv variable in a case of a select statement, the receive assignment (OSELRECV2)
+ // For the name of a function, points to corresponding Func node.
+ Defn Node
+
+ // The function, method, or closure in which local variable or param is declared.
+ Curfn *Func
+
+ Ntype Ntype
+ Heapaddr *Name // temp holding heap address of param
+
+ // ONAME closure linkage
+ // Consider:
+ //
+ // func f() {
+ // x := 1 // x1
+ // func() {
+ // use(x) // x2
+ // func() {
+ // use(x) // x3
+ // --- parser is here ---
+ // }()
+ // }()
+ // }
+ //
+ // There is an original declaration of x and then a chain of mentions of x
+ // leading into the current function. Each time x is mentioned in a new closure,
+ // we create a variable representing x for use in that specific closure,
+ // since the way you get to x is different in each closure.
+ //
+ // Let's number the specific variables as shown in the code:
+ // x1 is the original x, x2 is when mentioned in the closure,
+ // and x3 is when mentioned in the closure in the closure.
+ //
+ // We keep these linked (assume N > 1):
+ //
+ // - x1.Defn = original declaration statement for x (like most variables)
+ // - x1.Innermost = current innermost closure x (in this case x3), or nil for none
+ // - x1.IsClosureVar() = false
+ //
+ // - xN.Defn = x1, N > 1
+ // - xN.IsClosureVar() = true, N > 1
+ // - x2.Outer = nil
+ // - xN.Outer = x(N-1), N > 2
+ //
+ //
+ // When we look up x in the symbol table, we always get x1.
+ // Then we can use x1.Innermost (if not nil) to get the x
+ // for the innermost known closure function,
+ // but the first reference in a closure will find either no x1.Innermost
+ // or an x1.Innermost with .Funcdepth < Funcdepth.
+ // In that case, a new xN must be created, linked in with:
+ //
+ // xN.Defn = x1
+ // xN.Outer = x1.Innermost
+ // x1.Innermost = xN
+ //
+ // When we finish the function, we'll process its closure variables
+ // and find xN and pop it off the list using:
+ //
+ // x1 := xN.Defn
+ // x1.Innermost = xN.Outer
+ //
+ // We leave x1.Innermost set so that we can still get to the original
+ // variable quickly. Not shown here, but once we're
+ // done parsing a function and no longer need xN.Outer for the
+ // lexical x reference links as described above, funcLit
+ // recomputes xN.Outer as the semantic x reference link tree,
+ // even filling in x in intermediate closures that might not
+ // have mentioned it along the way to inner closures that did.
+ // See funcLit for details.
+ //
+ // During the eventual compilation, then, for closure variables we have:
+ //
+ // xN.Defn = original variable
+ // xN.Outer = variable captured in next outward scope
+ // to make closure where xN appears
+ //
+ // Because of the sharding of pieces of the node, x.Defn means x.Name.Defn
+ // and x.Innermost/Outer means x.Name.Param.Innermost/Outer.
+ Innermost *Name
+ Outer *Name
+}
+
+func (n *Name) isExpr() {}
+
+func (n *Name) copy() Node { panic(n.no("copy")) }
+func (n *Name) doChildren(do func(Node) bool) bool { return false }
+func (n *Name) editChildren(edit func(Node) Node) {}
+
+// TypeDefn returns the type definition for a named OTYPE.
+// That is, given "type T Defn", it returns Defn.
+// It is used by package types.
+func (n *Name) TypeDefn() *types.Type {
+ if n.Ntype != nil {
+ return n.Ntype.Type()
+ }
+ return n.Type()
+}
+
+// RecordFrameOffset records the frame offset for the name.
+// It is used by package types when laying out function arguments.
+func (n *Name) RecordFrameOffset(offset int64) {
+ n.SetFrameOffset(offset)
+}
+
+// NewNameAt returns a new ONAME Node associated with symbol s at position pos.
+// The caller is responsible for setting Curfn.
+func NewNameAt(pos src.XPos, sym *types.Sym) *Name {
+ if sym == nil {
+ base.Fatalf("NewNameAt nil")
+ }
+ return newNameAt(pos, ONAME, sym)
+}
+
+// NewIota returns a new OIOTA Node.
+func NewIota(pos src.XPos, sym *types.Sym) *Name {
+ if sym == nil {
+ base.Fatalf("NewIota nil")
+ }
+ return newNameAt(pos, OIOTA, sym)
+}
+
+// NewDeclNameAt returns a new Name associated with symbol s at position pos.
+// The caller is responsible for setting Curfn.
+func NewDeclNameAt(pos src.XPos, op Op, sym *types.Sym) *Name {
+ if sym == nil {
+ base.Fatalf("NewDeclNameAt nil")
+ }
+ switch op {
+ case ONAME, OTYPE, OLITERAL:
+ // ok
+ default:
+ base.Fatalf("NewDeclNameAt op %v", op)
+ }
+ return newNameAt(pos, op, sym)
+}
+
+// NewConstAt returns a new OLITERAL Node associated with symbol s at position pos.
+func NewConstAt(pos src.XPos, sym *types.Sym, typ *types.Type, val constant.Value) *Name {
+ if sym == nil {
+ base.Fatalf("NewConstAt nil")
+ }
+ n := newNameAt(pos, OLITERAL, sym)
+ n.SetType(typ)
+ n.SetVal(val)
+ return n
+}
+
+// newNameAt is like NewNameAt but allows sym == nil.
+func newNameAt(pos src.XPos, op Op, sym *types.Sym) *Name {
+ n := new(Name)
+ n.op = op
+ n.pos = pos
+ n.sym = sym
+ return n
+}
+
+func (n *Name) Name() *Name { return n }
+func (n *Name) Sym() *types.Sym { return n.sym }
+func (n *Name) SetSym(x *types.Sym) { n.sym = x }
+func (n *Name) SubOp() Op { return n.BuiltinOp }
+func (n *Name) SetSubOp(x Op) { n.BuiltinOp = x }
+func (n *Name) SetFunc(x *Func) { n.Func = x }
+func (n *Name) Offset() int64 { panic("Name.Offset") }
+func (n *Name) SetOffset(x int64) {
+ if x != 0 {
+ panic("Name.SetOffset")
+ }
+}
+func (n *Name) FrameOffset() int64 { return n.Offset_ }
+func (n *Name) SetFrameOffset(x int64) { n.Offset_ = x }
+func (n *Name) Iota() int64 { return n.Offset_ }
+func (n *Name) SetIota(x int64) { n.Offset_ = x }
+func (n *Name) Walkdef() uint8 { return n.bits.get2(miniWalkdefShift) }
+func (n *Name) SetWalkdef(x uint8) {
+ if x > 3 {
+ panic(fmt.Sprintf("cannot SetWalkdef %d", x))
+ }
+ n.bits.set2(miniWalkdefShift, x)
+}
+
+func (n *Name) Linksym() *obj.LSym { return n.sym.Linksym() }
+func (n *Name) LinksymABI(abi obj.ABI) *obj.LSym { return n.sym.LinksymABI(abi) }
+
+func (*Name) CanBeNtype() {}
+func (*Name) CanBeAnSSASym() {}
+func (*Name) CanBeAnSSAAux() {}
+
+// Pragma returns the PragmaFlag for p, which must be for an OTYPE.
+func (n *Name) Pragma() PragmaFlag { return n.pragma }
+
+// SetPragma sets the PragmaFlag for p, which must be for an OTYPE.
+func (n *Name) SetPragma(flag PragmaFlag) { n.pragma = flag }
+
+// Alias reports whether p, which must be for an OTYPE, is a type alias.
+func (n *Name) Alias() bool { return n.flags&nameAlias != 0 }
+
+// SetAlias sets whether p, which must be for an OTYPE, is a type alias.
+func (n *Name) SetAlias(alias bool) { n.flags.set(nameAlias, alias) }
+
+const (
+ nameReadonly = 1 << iota
+ nameByval // is the variable captured by value or by reference
+ nameNeedzero // if it contains pointers, needs to be zeroed on function entry
+ nameAutoTemp // is the variable a temporary (implies no dwarf info. reset if escapes to heap)
+ nameUsed // for variable declared and not used error
+ nameIsClosureVar // PAUTOHEAP closure pseudo-variable; original (if any) at n.Defn
+ nameIsOutputParamHeapAddr // pointer to a result parameter's heap copy
+ nameIsOutputParamInRegisters // output parameter in registers spills as an auto
+ nameAddrtaken // address taken, even if not moved to heap
+ nameInlFormal // PAUTO created by inliner, derived from callee formal
+ nameInlLocal // PAUTO created by inliner, derived from callee local
+ nameOpenDeferSlot // if temporary var storing info for open-coded defers
+ nameLibfuzzerExtraCounter // if PEXTERN should be assigned to __libfuzzer_extra_counters section
+ nameAlias // is type name an alias
+)
+
+func (n *Name) Readonly() bool { return n.flags&nameReadonly != 0 }
+func (n *Name) Needzero() bool { return n.flags&nameNeedzero != 0 }
+func (n *Name) AutoTemp() bool { return n.flags&nameAutoTemp != 0 }
+func (n *Name) Used() bool { return n.flags&nameUsed != 0 }
+func (n *Name) IsClosureVar() bool { return n.flags&nameIsClosureVar != 0 }
+func (n *Name) IsOutputParamHeapAddr() bool { return n.flags&nameIsOutputParamHeapAddr != 0 }
+func (n *Name) IsOutputParamInRegisters() bool { return n.flags&nameIsOutputParamInRegisters != 0 }
+func (n *Name) Addrtaken() bool { return n.flags&nameAddrtaken != 0 }
+func (n *Name) InlFormal() bool { return n.flags&nameInlFormal != 0 }
+func (n *Name) InlLocal() bool { return n.flags&nameInlLocal != 0 }
+func (n *Name) OpenDeferSlot() bool { return n.flags&nameOpenDeferSlot != 0 }
+func (n *Name) LibfuzzerExtraCounter() bool { return n.flags&nameLibfuzzerExtraCounter != 0 }
+
+func (n *Name) setReadonly(b bool) { n.flags.set(nameReadonly, b) }
+func (n *Name) SetNeedzero(b bool) { n.flags.set(nameNeedzero, b) }
+func (n *Name) SetAutoTemp(b bool) { n.flags.set(nameAutoTemp, b) }
+func (n *Name) SetUsed(b bool) { n.flags.set(nameUsed, b) }
+func (n *Name) SetIsClosureVar(b bool) { n.flags.set(nameIsClosureVar, b) }
+func (n *Name) SetIsOutputParamHeapAddr(b bool) { n.flags.set(nameIsOutputParamHeapAddr, b) }
+func (n *Name) SetIsOutputParamInRegisters(b bool) { n.flags.set(nameIsOutputParamInRegisters, b) }
+func (n *Name) SetAddrtaken(b bool) { n.flags.set(nameAddrtaken, b) }
+func (n *Name) SetInlFormal(b bool) { n.flags.set(nameInlFormal, b) }
+func (n *Name) SetInlLocal(b bool) { n.flags.set(nameInlLocal, b) }
+func (n *Name) SetOpenDeferSlot(b bool) { n.flags.set(nameOpenDeferSlot, b) }
+func (n *Name) SetLibfuzzerExtraCounter(b bool) { n.flags.set(nameLibfuzzerExtraCounter, b) }
+
+// OnStack reports whether variable n may reside on the stack.
+func (n *Name) OnStack() bool {
+ if n.Op() == ONAME {
+ switch n.Class {
+ case PPARAM, PPARAMOUT, PAUTO:
+ return n.Esc() != EscHeap
+ case PEXTERN, PAUTOHEAP:
+ return false
+ }
+ }
+ // Note: fmt.go:dumpNodeHeader calls all "func() bool"-typed
+ // methods, but it can only recover from panics, not Fatalf.
+ panic(fmt.Sprintf("%v: not a variable: %v", base.FmtPos(n.Pos()), n))
+}
+
+// MarkReadonly indicates that n is an ONAME with readonly contents.
+func (n *Name) MarkReadonly() {
+ if n.Op() != ONAME {
+ base.Fatalf("Node.MarkReadonly %v", n.Op())
+ }
+ n.setReadonly(true)
+ // Mark the linksym as readonly immediately
+ // so that the SSA backend can use this information.
+ // It will be overridden later during dumpglobls.
+ n.Linksym().Type = objabi.SRODATA
+}
+
+// Val returns the constant.Value for the node.
+func (n *Name) Val() constant.Value {
+ if n.val == nil {
+ return constant.MakeUnknown()
+ }
+ return n.val
+}
+
+// SetVal sets the constant.Value for the node.
+func (n *Name) SetVal(v constant.Value) {
+ if n.op != OLITERAL {
+ panic(n.no("SetVal"))
+ }
+ AssertValidTypeForConst(n.Type(), v)
+ n.val = v
+}
+
+// Canonical returns the logical declaration that n represents. If n
+// is a closure variable, then Canonical returns the original Name as
+// it appears in the function that immediately contains the
+// declaration. Otherwise, Canonical simply returns n itself.
+func (n *Name) Canonical() *Name {
+ if n.IsClosureVar() && n.Defn != nil {
+ n = n.Defn.(*Name)
+ }
+ return n
+}
+
+func (n *Name) SetByval(b bool) {
+ if n.Canonical() != n {
+ base.Fatalf("SetByval called on non-canonical variable: %v", n)
+ }
+ n.flags.set(nameByval, b)
+}
+
+func (n *Name) Byval() bool {
+ // We require byval to be set on the canonical variable, but we
+ // allow it to be accessed from any instance.
+ return n.Canonical().flags&nameByval != 0
+}
+
+// NewClosureVar returns a new closure variable for fn to refer to
+// outer variable n.
+func NewClosureVar(pos src.XPos, fn *Func, n *Name) *Name {
+ c := NewNameAt(pos, n.Sym())
+ c.Curfn = fn
+ c.Class = PAUTOHEAP
+ c.SetIsClosureVar(true)
+ c.Defn = n.Canonical()
+ c.Outer = n
+
+ c.SetType(n.Type())
+ c.SetTypecheck(n.Typecheck())
+
+ fn.ClosureVars = append(fn.ClosureVars, c)
+
+ return c
+}
+
+// NewHiddenParam returns a new hidden parameter for fn with the given
+// name and type.
+func NewHiddenParam(pos src.XPos, fn *Func, sym *types.Sym, typ *types.Type) *Name {
+ if fn.OClosure != nil {
+ base.FatalfAt(fn.Pos(), "cannot add hidden parameters to closures")
+ }
+
+ fn.SetNeedctxt(true)
+
+ // Create a fake parameter, disassociated from any real function, to
+ // pretend to capture.
+ fake := NewNameAt(pos, sym)
+ fake.Class = PPARAM
+ fake.SetType(typ)
+ fake.SetByval(true)
+
+ return NewClosureVar(pos, fn, fake)
+}
+
+// CaptureName returns a Name suitable for referring to n from within function
+// fn or from the package block if fn is nil. If n is a free variable declared
+// within a function that encloses fn, then CaptureName returns the closure
+// variable that refers to n within fn, creating it if necessary.
+// Otherwise, it simply returns n.
+func CaptureName(pos src.XPos, fn *Func, n *Name) *Name {
+ if n.Op() != ONAME || n.Curfn == nil {
+ return n // okay to use directly
+ }
+ if n.IsClosureVar() {
+ base.FatalfAt(pos, "misuse of CaptureName on closure variable: %v", n)
+ }
+
+ c := n.Innermost
+ if c == nil {
+ c = n
+ }
+ if c.Curfn == fn {
+ return c
+ }
+
+ if fn == nil {
+ base.FatalfAt(pos, "package-block reference to %v, declared in %v", n, n.Curfn)
+ }
+
+ // Do not have a closure var for the active closure yet; make one.
+ c = NewClosureVar(pos, fn, c)
+
+ // Link into list of active closure variables.
+ // Popped from list in FinishCaptureNames.
+ n.Innermost = c
+
+ return c
+}
+
+// FinishCaptureNames handles any work leftover from calling CaptureName
+// earlier. outerfn should be the function that immediately encloses fn.
+func FinishCaptureNames(pos src.XPos, outerfn, fn *Func) {
+ // closure-specific variables are hanging off the
+ // ordinary ones; see CaptureName above.
+ // unhook them.
+ // make the list of pointers for the closure call.
+ for _, cv := range fn.ClosureVars {
+ // Unlink from n; see comment above on type Name for these fields.
+ n := cv.Defn.(*Name)
+ n.Innermost = cv.Outer
+
+ // If the closure usage of n is not dense, we need to make it
+ // dense by recapturing n within the enclosing function.
+ //
+ // That is, suppose we just finished parsing the innermost
+ // closure f4 in this code:
+ //
+ // func f() {
+ // n := 1
+ // func() { // f2
+ // use(n)
+ // func() { // f3
+ // func() { // f4
+ // use(n)
+ // }()
+ // }()
+ // }()
+ // }
+ //
+ // At this point cv.Outer is f2's n; there is no n for f3. To
+ // construct the closure f4 from within f3, we need to use f3's
+ // n and in this case we need to create f3's n with CaptureName.
+ //
+ // We'll decide later in walk whether to use v directly or &v.
+ cv.Outer = CaptureName(pos, outerfn, n)
+ }
+}
+
+// SameSource reports whether two nodes refer to the same source
+// element.
+//
+// It exists to help incrementally migrate the compiler towards
+// allowing the introduction of IdentExpr (#42990). Once we have
+// IdentExpr, it will no longer be safe to directly compare Node
+// values to tell if they refer to the same Name. Instead, code will
+// need to explicitly get references to the underlying Name object(s),
+// and compare those instead.
+//
+// It will still be safe to compare Nodes directly for checking if two
+// nodes are syntactically the same. The SameSource function exists to
+// indicate code that intentionally compares Nodes for syntactic
+// equality as opposed to code that has yet to be updated in
+// preparation for IdentExpr.
+func SameSource(n1, n2 Node) bool {
+ return n1 == n2
+}
+
+// Uses reports whether expression x is a (direct) use of the given
+// variable.
+func Uses(x Node, v *Name) bool {
+ if v == nil || v.Op() != ONAME {
+ base.Fatalf("RefersTo bad Name: %v", v)
+ }
+ return x.Op() == ONAME && x.Name() == v
+}
+
+// DeclaredBy reports whether expression x refers (directly) to a
+// variable that was declared by the given statement.
+func DeclaredBy(x, stmt Node) bool {
+ if stmt == nil {
+ base.Fatalf("DeclaredBy nil")
+ }
+ return x.Op() == ONAME && SameSource(x.Name().Defn, stmt)
+}
+
+// The Class of a variable/function describes the "storage class"
+// of a variable or function. During parsing, storage classes are
+// called declaration contexts.
+type Class uint8
+
+//go:generate stringer -type=Class name.go
+const (
+ Pxxx Class = iota // no class; used during ssa conversion to indicate pseudo-variables
+ PEXTERN // global variables
+ PAUTO // local variables
+ PAUTOHEAP // local variables or parameters moved to heap
+ PPARAM // input arguments
+ PPARAMOUT // output results
+ PTYPEPARAM // type params
+ PFUNC // global functions
+
+ // Careful: Class is stored in three bits in Node.flags.
+ _ = uint((1 << 3) - iota) // static assert for iota <= (1 << 3)
+)
+
+type Embed struct {
+ Pos src.XPos
+ Patterns []string
+}
+
+// A Pack is an identifier referring to an imported package.
+type PkgName struct {
+ miniNode
+ sym *types.Sym
+ Pkg *types.Pkg
+ Used bool
+}
+
+func (p *PkgName) Sym() *types.Sym { return p.sym }
+
+func (*PkgName) CanBeNtype() {}
+
+func NewPkgName(pos src.XPos, sym *types.Sym, pkg *types.Pkg) *PkgName {
+ p := &PkgName{sym: sym, Pkg: pkg}
+ p.op = OPACK
+ p.pos = pos
+ return p
+}
diff --git a/src/cmd/compile/internal/ir/node.go b/src/cmd/compile/internal/ir/node.go
new file mode 100644
index 0000000..5fdccf8
--- /dev/null
+++ b/src/cmd/compile/internal/ir/node.go
@@ -0,0 +1,620 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// “Abstract” syntax representation.
+
+package ir
+
+import (
+ "fmt"
+ "go/constant"
+ "sort"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+)
+
+// A Node is the abstract interface to an IR node.
+type Node interface {
+ // Formatting
+ Format(s fmt.State, verb rune)
+
+ // Source position.
+ Pos() src.XPos
+ SetPos(x src.XPos)
+
+ // For making copies. For Copy and SepCopy.
+ copy() Node
+
+ doChildren(func(Node) bool) bool
+ editChildren(func(Node) Node)
+
+ // Abstract graph structure, for generic traversals.
+ Op() Op
+ Init() Nodes
+
+ // Fields specific to certain Ops only.
+ Type() *types.Type
+ SetType(t *types.Type)
+ Name() *Name
+ Sym() *types.Sym
+ Val() constant.Value
+ SetVal(v constant.Value)
+
+ // Storage for analysis passes.
+ Esc() uint16
+ SetEsc(x uint16)
+ Diag() bool
+ SetDiag(x bool)
+
+ // Typecheck values:
+ // 0 means the node is not typechecked
+ // 1 means the node is completely typechecked
+ // 2 means typechecking of the node is in progress
+ // 3 means the node has its type from types2, but may need transformation
+ Typecheck() uint8
+ SetTypecheck(x uint8)
+ NonNil() bool
+ MarkNonNil()
+}
+
+// Line returns n's position as a string. If n has been inlined,
+// it uses the outermost position where n has been inlined.
+func Line(n Node) string {
+ return base.FmtPos(n.Pos())
+}
+
+func IsSynthetic(n Node) bool {
+ name := n.Sym().Name
+ return name[0] == '.' || name[0] == '~'
+}
+
+// IsAutoTmp indicates if n was created by the compiler as a temporary,
+// based on the setting of the .AutoTemp flag in n's Name.
+func IsAutoTmp(n Node) bool {
+ if n == nil || n.Op() != ONAME {
+ return false
+ }
+ return n.Name().AutoTemp()
+}
+
+// MayBeShared reports whether n may occur in multiple places in the AST.
+// Extra care must be taken when mutating such a node.
+func MayBeShared(n Node) bool {
+ switch n.Op() {
+ case ONAME, OLITERAL, ONIL, OTYPE:
+ return true
+ }
+ return false
+}
+
+type InitNode interface {
+ Node
+ PtrInit() *Nodes
+ SetInit(x Nodes)
+}
+
+func TakeInit(n Node) Nodes {
+ init := n.Init()
+ if len(init) != 0 {
+ n.(InitNode).SetInit(nil)
+ }
+ return init
+}
+
+//go:generate stringer -type=Op -trimprefix=O node.go
+
+type Op uint8
+
+// Node ops.
+const (
+ OXXX Op = iota
+
+ // names
+ ONAME // var or func name
+ // Unnamed arg or return value: f(int, string) (int, error) { etc }
+ // Also used for a qualified package identifier that hasn't been resolved yet.
+ ONONAME
+ OTYPE // type name
+ OPACK // import
+ OLITERAL // literal
+ ONIL // nil
+
+ // expressions
+ OADD // X + Y
+ OSUB // X - Y
+ OOR // X | Y
+ OXOR // X ^ Y
+ OADDSTR // +{List} (string addition, list elements are strings)
+ OADDR // &X
+ OANDAND // X && Y
+ OAPPEND // append(Args); after walk, X may contain elem type descriptor
+ OBYTES2STR // Type(X) (Type is string, X is a []byte)
+ OBYTES2STRTMP // Type(X) (Type is string, X is a []byte, ephemeral)
+ ORUNES2STR // Type(X) (Type is string, X is a []rune)
+ OSTR2BYTES // Type(X) (Type is []byte, X is a string)
+ OSTR2BYTESTMP // Type(X) (Type is []byte, X is a string, ephemeral)
+ OSTR2RUNES // Type(X) (Type is []rune, X is a string)
+ OSLICE2ARRPTR // Type(X) (Type is *[N]T, X is a []T)
+ // X = Y or (if Def=true) X := Y
+ // If Def, then Init includes a DCL node for X.
+ OAS
+ // Lhs = Rhs (x, y, z = a, b, c) or (if Def=true) Lhs := Rhs
+ // If Def, then Init includes DCL nodes for Lhs
+ OAS2
+ OAS2DOTTYPE // Lhs = Rhs (x, ok = I.(int))
+ OAS2FUNC // Lhs = Rhs (x, y = f())
+ OAS2MAPR // Lhs = Rhs (x, ok = m["foo"])
+ OAS2RECV // Lhs = Rhs (x, ok = <-c)
+ OASOP // X AsOp= Y (x += y)
+ OCALL // X(Args) (function call, method call or type conversion)
+
+ // OCALLFUNC, OCALLMETH, and OCALLINTER have the same structure.
+ // Prior to walk, they are: X(Args), where Args is all regular arguments.
+ // After walk, if any argument whose evaluation might requires temporary variable,
+ // that temporary variable will be pushed to Init, Args will contains an updated
+ // set of arguments. KeepAlive is all OVARLIVE nodes that are attached to OCALLxxx.
+ OCALLFUNC // X(Args) (function call f(args))
+ OCALLMETH // X(Args) (direct method call x.Method(args))
+ OCALLINTER // X(Args) (interface method call x.Method(args))
+ OCAP // cap(X)
+ OCLOSE // close(X)
+ OCLOSURE // func Type { Func.Closure.Body } (func literal)
+ OCOMPLIT // Type{List} (composite literal, not yet lowered to specific form)
+ OMAPLIT // Type{List} (composite literal, Type is map)
+ OSTRUCTLIT // Type{List} (composite literal, Type is struct)
+ OARRAYLIT // Type{List} (composite literal, Type is array)
+ OSLICELIT // Type{List} (composite literal, Type is slice), Len is slice length.
+ OPTRLIT // &X (X is composite literal)
+ OCONV // Type(X) (type conversion)
+ OCONVIFACE // Type(X) (type conversion, to interface)
+ OCONVIDATA // Builds a data word to store X in an interface. Equivalent to IDATA(CONVIFACE(X)). Is an ir.ConvExpr.
+ OCONVNOP // Type(X) (type conversion, no effect)
+ OCOPY // copy(X, Y)
+ ODCL // var X (declares X of type X.Type)
+
+ // Used during parsing but don't last.
+ ODCLFUNC // func f() or func (r) f()
+ ODCLCONST // const pi = 3.14
+ ODCLTYPE // type Int int or type Int = int
+
+ ODELETE // delete(Args)
+ ODOT // X.Sel (X is of struct type)
+ ODOTPTR // X.Sel (X is of pointer to struct type)
+ ODOTMETH // X.Sel (X is non-interface, Sel is method name)
+ ODOTINTER // X.Sel (X is interface, Sel is method name)
+ OXDOT // X.Sel (before rewrite to one of the preceding)
+ ODOTTYPE // X.Ntype or X.Type (.Ntype during parsing, .Type once resolved); after walk, Itab contains address of interface type descriptor and Itab.X contains address of concrete type descriptor
+ ODOTTYPE2 // X.Ntype or X.Type (.Ntype during parsing, .Type once resolved; on rhs of OAS2DOTTYPE); after walk, Itab contains address of interface type descriptor
+ OEQ // X == Y
+ ONE // X != Y
+ OLT // X < Y
+ OLE // X <= Y
+ OGE // X >= Y
+ OGT // X > Y
+ ODEREF // *X
+ OINDEX // X[Index] (index of array or slice)
+ OINDEXMAP // X[Index] (index of map)
+ OKEY // Key:Value (key:value in struct/array/map literal)
+ OSTRUCTKEY // Field:Value (key:value in struct literal, after type checking)
+ OLEN // len(X)
+ OMAKE // make(Args) (before type checking converts to one of the following)
+ OMAKECHAN // make(Type[, Len]) (type is chan)
+ OMAKEMAP // make(Type[, Len]) (type is map)
+ OMAKESLICE // make(Type[, Len[, Cap]]) (type is slice)
+ OMAKESLICECOPY // makeslicecopy(Type, Len, Cap) (type is slice; Len is length and Cap is the copied from slice)
+ // OMAKESLICECOPY is created by the order pass and corresponds to:
+ // s = make(Type, Len); copy(s, Cap)
+ //
+ // Bounded can be set on the node when Len == len(Cap) is known at compile time.
+ //
+ // This node is created so the walk pass can optimize this pattern which would
+ // otherwise be hard to detect after the order pass.
+ OMUL // X * Y
+ ODIV // X / Y
+ OMOD // X % Y
+ OLSH // X << Y
+ ORSH // X >> Y
+ OAND // X & Y
+ OANDNOT // X &^ Y
+ ONEW // new(X); corresponds to calls to new in source code
+ ONOT // !X
+ OBITNOT // ^X
+ OPLUS // +X
+ ONEG // -X
+ OOROR // X || Y
+ OPANIC // panic(X)
+ OPRINT // print(List)
+ OPRINTN // println(List)
+ OPAREN // (X)
+ OSEND // Chan <- Value
+ OSLICE // X[Low : High] (X is untypechecked or slice)
+ OSLICEARR // X[Low : High] (X is pointer to array)
+ OSLICESTR // X[Low : High] (X is string)
+ OSLICE3 // X[Low : High : Max] (X is untypedchecked or slice)
+ OSLICE3ARR // X[Low : High : Max] (X is pointer to array)
+ OSLICEHEADER // sliceheader{Ptr, Len, Cap} (Ptr is unsafe.Pointer, Len is length, Cap is capacity)
+ ORECOVER // recover()
+ ORECOVERFP // recover(Args) w/ explicit FP argument
+ ORECV // <-X
+ ORUNESTR // Type(X) (Type is string, X is rune)
+ OSELRECV2 // like OAS2: Lhs = Rhs where len(Lhs)=2, len(Rhs)=1, Rhs[0].Op = ORECV (appears as .Var of OCASE)
+ OIOTA // iota
+ OREAL // real(X)
+ OIMAG // imag(X)
+ OCOMPLEX // complex(X, Y)
+ OALIGNOF // unsafe.Alignof(X)
+ OOFFSETOF // unsafe.Offsetof(X)
+ OSIZEOF // unsafe.Sizeof(X)
+ OUNSAFEADD // unsafe.Add(X, Y)
+ OUNSAFESLICE // unsafe.Slice(X, Y)
+ OMETHEXPR // X(Args) (method expression T.Method(args), first argument is the method receiver)
+ OMETHVALUE // X.Sel (method expression t.Method, not called)
+
+ // statements
+ OBLOCK // { List } (block of code)
+ OBREAK // break [Label]
+ // OCASE: case List: Body (List==nil means default)
+ // For OTYPESW, List is a OTYPE node for the specified type (or OLITERAL
+ // for nil) or an ODYNAMICTYPE indicating a runtime type for generics.
+ // If a type-switch variable is specified, Var is an
+ // ONAME for the version of the type-switch variable with the specified
+ // type.
+ OCASE
+ OCONTINUE // continue [Label]
+ ODEFER // defer Call
+ OFALL // fallthrough
+ OFOR // for Init; Cond; Post { Body }
+ // OFORUNTIL is like OFOR, but the test (Cond) is applied after the body:
+ // Init
+ // top: { Body } // Execute the body at least once
+ // cont: Post
+ // if Cond { // And then test the loop condition
+ // List // Before looping to top, execute List
+ // goto top
+ // }
+ // OFORUNTIL is created by walk. There's no way to write this in Go code.
+ OFORUNTIL
+ OGOTO // goto Label
+ OIF // if Init; Cond { Then } else { Else }
+ OLABEL // Label:
+ OGO // go Call
+ ORANGE // for Key, Value = range X { Body }
+ ORETURN // return Results
+ OSELECT // select { Cases }
+ OSWITCH // switch Init; Expr { Cases }
+ // OTYPESW: X := Y.(type) (appears as .Tag of OSWITCH)
+ // X is nil if there is no type-switch variable
+ OTYPESW
+ OFUNCINST // instantiation of a generic function
+
+ // types
+ OTCHAN // chan int
+ OTMAP // map[string]int
+ OTSTRUCT // struct{}
+ OTINTER // interface{}
+ // OTFUNC: func() - Recv is receiver field, Params is list of param fields, Results is
+ // list of result fields.
+ OTFUNC
+ OTARRAY // [8]int or [...]int
+ OTSLICE // []int
+
+ // misc
+ // intermediate representation of an inlined call. Uses Init (assignments
+ // for the captured variables, parameters, retvars, & INLMARK op),
+ // Body (body of the inlined function), and ReturnVars (list of
+ // return values)
+ OINLCALL // intermediary representation of an inlined call.
+ OEFACE // itable and data words of an empty-interface value.
+ OITAB // itable word of an interface value.
+ OIDATA // data word of an interface value in X
+ OSPTR // base pointer of a slice or string.
+ OCFUNC // reference to c function pointer (not go func value)
+ OCHECKNIL // emit code to ensure pointer/interface not nil
+ OVARDEF // variable is about to be fully initialized
+ OVARKILL // variable is dead
+ OVARLIVE // variable is alive
+ ORESULT // result of a function call; Xoffset is stack offset
+ OINLMARK // start of an inlined body, with file/line of caller. Xoffset is an index into the inline tree.
+ OLINKSYMOFFSET // offset within a name
+
+ // opcodes for generics
+ ODYNAMICDOTTYPE // x = i.(T) where T is a type parameter (or derived from a type parameter)
+ ODYNAMICDOTTYPE2 // x, ok = i.(T) where T is a type parameter (or derived from a type parameter)
+ ODYNAMICTYPE // a type node for type switches (represents a dynamic target type for a type switch)
+
+ // arch-specific opcodes
+ OTAILCALL // tail call to another function
+ OGETG // runtime.getg() (read g pointer)
+ OGETCALLERPC // runtime.getcallerpc() (continuation PC in caller frame)
+ OGETCALLERSP // runtime.getcallersp() (stack pointer in caller frame)
+
+ OEND
+)
+
+// IsCmp reports whether op is a comparison operation (==, !=, <, <=,
+// >, or >=).
+func (op Op) IsCmp() bool {
+ switch op {
+ case OEQ, ONE, OLT, OLE, OGT, OGE:
+ return true
+ }
+ return false
+}
+
+// Nodes is a pointer to a slice of *Node.
+// For fields that are not used in most nodes, this is used instead of
+// a slice to save space.
+type Nodes []Node
+
+// Append appends entries to Nodes.
+func (n *Nodes) Append(a ...Node) {
+ if len(a) == 0 {
+ return
+ }
+ *n = append(*n, a...)
+}
+
+// Prepend prepends entries to Nodes.
+// If a slice is passed in, this will take ownership of it.
+func (n *Nodes) Prepend(a ...Node) {
+ if len(a) == 0 {
+ return
+ }
+ *n = append(a, *n...)
+}
+
+// Take clears n, returning its former contents.
+func (n *Nodes) Take() []Node {
+ ret := *n
+ *n = nil
+ return ret
+}
+
+// Copy returns a copy of the content of the slice.
+func (n Nodes) Copy() Nodes {
+ if n == nil {
+ return nil
+ }
+ c := make(Nodes, len(n))
+ copy(c, n)
+ return c
+}
+
+// NameQueue is a FIFO queue of *Name. The zero value of NameQueue is
+// a ready-to-use empty queue.
+type NameQueue struct {
+ ring []*Name
+ head, tail int
+}
+
+// Empty reports whether q contains no Names.
+func (q *NameQueue) Empty() bool {
+ return q.head == q.tail
+}
+
+// PushRight appends n to the right of the queue.
+func (q *NameQueue) PushRight(n *Name) {
+ if len(q.ring) == 0 {
+ q.ring = make([]*Name, 16)
+ } else if q.head+len(q.ring) == q.tail {
+ // Grow the ring.
+ nring := make([]*Name, len(q.ring)*2)
+ // Copy the old elements.
+ part := q.ring[q.head%len(q.ring):]
+ if q.tail-q.head <= len(part) {
+ part = part[:q.tail-q.head]
+ copy(nring, part)
+ } else {
+ pos := copy(nring, part)
+ copy(nring[pos:], q.ring[:q.tail%len(q.ring)])
+ }
+ q.ring, q.head, q.tail = nring, 0, q.tail-q.head
+ }
+
+ q.ring[q.tail%len(q.ring)] = n
+ q.tail++
+}
+
+// PopLeft pops a Name from the left of the queue. It panics if q is
+// empty.
+func (q *NameQueue) PopLeft() *Name {
+ if q.Empty() {
+ panic("dequeue empty")
+ }
+ n := q.ring[q.head%len(q.ring)]
+ q.head++
+ return n
+}
+
+// NameSet is a set of Names.
+type NameSet map[*Name]struct{}
+
+// Has reports whether s contains n.
+func (s NameSet) Has(n *Name) bool {
+ _, isPresent := s[n]
+ return isPresent
+}
+
+// Add adds n to s.
+func (s *NameSet) Add(n *Name) {
+ if *s == nil {
+ *s = make(map[*Name]struct{})
+ }
+ (*s)[n] = struct{}{}
+}
+
+// Sorted returns s sorted according to less.
+func (s NameSet) Sorted(less func(*Name, *Name) bool) []*Name {
+ var res []*Name
+ for n := range s {
+ res = append(res, n)
+ }
+ sort.Slice(res, func(i, j int) bool { return less(res[i], res[j]) })
+ return res
+}
+
+type PragmaFlag uint16
+
+const (
+ // Func pragmas.
+ Nointerface PragmaFlag = 1 << iota
+ Noescape // func parameters don't escape
+ Norace // func must not have race detector annotations
+ Nosplit // func should not execute on separate stack
+ Noinline // func should not be inlined
+ NoCheckPtr // func should not be instrumented by checkptr
+ CgoUnsafeArgs // treat a pointer to one arg as a pointer to them all
+ UintptrKeepAlive // pointers converted to uintptr must be kept alive (compiler internal only)
+ UintptrEscapes // pointers converted to uintptr escape
+
+ // Runtime-only func pragmas.
+ // See ../../../../runtime/HACKING.md for detailed descriptions.
+ Systemstack // func must run on system stack
+ Nowritebarrier // emit compiler error instead of write barrier
+ Nowritebarrierrec // error on write barrier in this or recursive callees
+ Yeswritebarrierrec // cancels Nowritebarrierrec in this function and callees
+
+ // Runtime and cgo type pragmas
+ NotInHeap // values of this type must not be heap allocated
+
+ // Go command pragmas
+ GoBuildPragma
+
+ RegisterParams // TODO(register args) remove after register abi is working
+
+)
+
+func AsNode(n types.Object) Node {
+ if n == nil {
+ return nil
+ }
+ return n.(Node)
+}
+
+var BlankNode Node
+
+func IsConst(n Node, ct constant.Kind) bool {
+ return ConstType(n) == ct
+}
+
+// IsNil reports whether n represents the universal untyped zero value "nil".
+func IsNil(n Node) bool {
+ // Check n.Orig because constant propagation may produce typed nil constants,
+ // which don't exist in the Go spec.
+ return n != nil && Orig(n).Op() == ONIL
+}
+
+func IsBlank(n Node) bool {
+ if n == nil {
+ return false
+ }
+ return n.Sym().IsBlank()
+}
+
+// IsMethod reports whether n is a method.
+// n must be a function or a method.
+func IsMethod(n Node) bool {
+ return n.Type().Recv() != nil
+}
+
+func HasNamedResults(fn *Func) bool {
+ typ := fn.Type()
+ return typ.NumResults() > 0 && types.OrigSym(typ.Results().Field(0).Sym) != nil
+}
+
+// HasUniquePos reports whether n has a unique position that can be
+// used for reporting error messages.
+//
+// It's primarily used to distinguish references to named objects,
+// whose Pos will point back to their declaration position rather than
+// their usage position.
+func HasUniquePos(n Node) bool {
+ switch n.Op() {
+ case ONAME, OPACK:
+ return false
+ case OLITERAL, ONIL, OTYPE:
+ if n.Sym() != nil {
+ return false
+ }
+ }
+
+ if !n.Pos().IsKnown() {
+ if base.Flag.K != 0 {
+ base.Warn("setlineno: unknown position (line 0)")
+ }
+ return false
+ }
+
+ return true
+}
+
+func SetPos(n Node) src.XPos {
+ lno := base.Pos
+ if n != nil && HasUniquePos(n) {
+ base.Pos = n.Pos()
+ }
+ return lno
+}
+
+// The result of InitExpr MUST be assigned back to n, e.g.
+// n.X = InitExpr(init, n.X)
+func InitExpr(init []Node, expr Node) Node {
+ if len(init) == 0 {
+ return expr
+ }
+
+ n, ok := expr.(InitNode)
+ if !ok || MayBeShared(n) {
+ // Introduce OCONVNOP to hold init list.
+ n = NewConvExpr(base.Pos, OCONVNOP, nil, expr)
+ n.SetType(expr.Type())
+ n.SetTypecheck(1)
+ }
+
+ n.PtrInit().Prepend(init...)
+ return n
+}
+
+// what's the outer value that a write to n affects?
+// outer value means containing struct or array.
+func OuterValue(n Node) Node {
+ for {
+ switch nn := n; nn.Op() {
+ case OXDOT:
+ base.FatalfAt(n.Pos(), "OXDOT in OuterValue: %v", n)
+ case ODOT:
+ nn := nn.(*SelectorExpr)
+ n = nn.X
+ continue
+ case OPAREN:
+ nn := nn.(*ParenExpr)
+ n = nn.X
+ continue
+ case OCONVNOP:
+ nn := nn.(*ConvExpr)
+ n = nn.X
+ continue
+ case OINDEX:
+ nn := nn.(*IndexExpr)
+ if nn.X.Type() == nil {
+ base.Fatalf("OuterValue needs type for %v", nn.X)
+ }
+ if nn.X.Type().IsArray() {
+ n = nn.X
+ continue
+ }
+ }
+
+ return n
+ }
+}
+
+const (
+ EscUnknown = iota
+ EscNone // Does not escape to heap, result, or parameters.
+ EscHeap // Reachable from the heap
+ EscNever // By construction will not escape.
+)
diff --git a/src/cmd/compile/internal/ir/node_gen.go b/src/cmd/compile/internal/ir/node_gen.go
new file mode 100644
index 0000000..4498888
--- /dev/null
+++ b/src/cmd/compile/internal/ir/node_gen.go
@@ -0,0 +1,1524 @@
+// Code generated by mknode.go. DO NOT EDIT.
+
+package ir
+
+import "fmt"
+
+func (n *AddStringExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *AddStringExpr) copy() Node {
+ c := *n
+ c.init = copyNodes(c.init)
+ c.List = copyNodes(c.List)
+ return &c
+}
+func (n *AddStringExpr) doChildren(do func(Node) bool) bool {
+ if doNodes(n.init, do) {
+ return true
+ }
+ if doNodes(n.List, do) {
+ return true
+ }
+ if n.Prealloc != nil && do(n.Prealloc) {
+ return true
+ }
+ return false
+}
+func (n *AddStringExpr) editChildren(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ editNodes(n.List, edit)
+ if n.Prealloc != nil {
+ n.Prealloc = edit(n.Prealloc).(*Name)
+ }
+}
+
+func (n *AddrExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *AddrExpr) copy() Node {
+ c := *n
+ c.init = copyNodes(c.init)
+ return &c
+}
+func (n *AddrExpr) doChildren(do func(Node) bool) bool {
+ if doNodes(n.init, do) {
+ return true
+ }
+ if n.X != nil && do(n.X) {
+ return true
+ }
+ if n.Prealloc != nil && do(n.Prealloc) {
+ return true
+ }
+ return false
+}
+func (n *AddrExpr) editChildren(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ if n.X != nil {
+ n.X = edit(n.X).(Node)
+ }
+ if n.Prealloc != nil {
+ n.Prealloc = edit(n.Prealloc).(*Name)
+ }
+}
+
+func (n *ArrayType) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *ArrayType) copy() Node {
+ c := *n
+ return &c
+}
+func (n *ArrayType) doChildren(do func(Node) bool) bool {
+ if n.Len != nil && do(n.Len) {
+ return true
+ }
+ if n.Elem != nil && do(n.Elem) {
+ return true
+ }
+ return false
+}
+func (n *ArrayType) editChildren(edit func(Node) Node) {
+ if n.Len != nil {
+ n.Len = edit(n.Len).(Node)
+ }
+ if n.Elem != nil {
+ n.Elem = edit(n.Elem).(Ntype)
+ }
+}
+
+func (n *AssignListStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *AssignListStmt) copy() Node {
+ c := *n
+ c.init = copyNodes(c.init)
+ c.Lhs = copyNodes(c.Lhs)
+ c.Rhs = copyNodes(c.Rhs)
+ return &c
+}
+func (n *AssignListStmt) doChildren(do func(Node) bool) bool {
+ if doNodes(n.init, do) {
+ return true
+ }
+ if doNodes(n.Lhs, do) {
+ return true
+ }
+ if doNodes(n.Rhs, do) {
+ return true
+ }
+ return false
+}
+func (n *AssignListStmt) editChildren(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ editNodes(n.Lhs, edit)
+ editNodes(n.Rhs, edit)
+}
+
+func (n *AssignOpStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *AssignOpStmt) copy() Node {
+ c := *n
+ c.init = copyNodes(c.init)
+ return &c
+}
+func (n *AssignOpStmt) doChildren(do func(Node) bool) bool {
+ if doNodes(n.init, do) {
+ return true
+ }
+ if n.X != nil && do(n.X) {
+ return true
+ }
+ if n.Y != nil && do(n.Y) {
+ return true
+ }
+ return false
+}
+func (n *AssignOpStmt) editChildren(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ if n.X != nil {
+ n.X = edit(n.X).(Node)
+ }
+ if n.Y != nil {
+ n.Y = edit(n.Y).(Node)
+ }
+}
+
+func (n *AssignStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *AssignStmt) copy() Node {
+ c := *n
+ c.init = copyNodes(c.init)
+ return &c
+}
+func (n *AssignStmt) doChildren(do func(Node) bool) bool {
+ if doNodes(n.init, do) {
+ return true
+ }
+ if n.X != nil && do(n.X) {
+ return true
+ }
+ if n.Y != nil && do(n.Y) {
+ return true
+ }
+ return false
+}
+func (n *AssignStmt) editChildren(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ if n.X != nil {
+ n.X = edit(n.X).(Node)
+ }
+ if n.Y != nil {
+ n.Y = edit(n.Y).(Node)
+ }
+}
+
+func (n *BasicLit) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *BasicLit) copy() Node {
+ c := *n
+ c.init = copyNodes(c.init)
+ return &c
+}
+func (n *BasicLit) doChildren(do func(Node) bool) bool {
+ if doNodes(n.init, do) {
+ return true
+ }
+ return false
+}
+func (n *BasicLit) editChildren(edit func(Node) Node) {
+ editNodes(n.init, edit)
+}
+
+func (n *BinaryExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *BinaryExpr) copy() Node {
+ c := *n
+ c.init = copyNodes(c.init)
+ return &c
+}
+func (n *BinaryExpr) doChildren(do func(Node) bool) bool {
+ if doNodes(n.init, do) {
+ return true
+ }
+ if n.X != nil && do(n.X) {
+ return true
+ }
+ if n.Y != nil && do(n.Y) {
+ return true
+ }
+ return false
+}
+func (n *BinaryExpr) editChildren(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ if n.X != nil {
+ n.X = edit(n.X).(Node)
+ }
+ if n.Y != nil {
+ n.Y = edit(n.Y).(Node)
+ }
+}
+
+func (n *BlockStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *BlockStmt) copy() Node {
+ c := *n
+ c.init = copyNodes(c.init)
+ c.List = copyNodes(c.List)
+ return &c
+}
+func (n *BlockStmt) doChildren(do func(Node) bool) bool {
+ if doNodes(n.init, do) {
+ return true
+ }
+ if doNodes(n.List, do) {
+ return true
+ }
+ return false
+}
+func (n *BlockStmt) editChildren(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ editNodes(n.List, edit)
+}
+
+func (n *BranchStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *BranchStmt) copy() Node {
+ c := *n
+ c.init = copyNodes(c.init)
+ return &c
+}
+func (n *BranchStmt) doChildren(do func(Node) bool) bool {
+ if doNodes(n.init, do) {
+ return true
+ }
+ return false
+}
+func (n *BranchStmt) editChildren(edit func(Node) Node) {
+ editNodes(n.init, edit)
+}
+
+func (n *CallExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *CallExpr) copy() Node {
+ c := *n
+ c.init = copyNodes(c.init)
+ c.Args = copyNodes(c.Args)
+ c.KeepAlive = copyNames(c.KeepAlive)
+ return &c
+}
+func (n *CallExpr) doChildren(do func(Node) bool) bool {
+ if doNodes(n.init, do) {
+ return true
+ }
+ if n.X != nil && do(n.X) {
+ return true
+ }
+ if doNodes(n.Args, do) {
+ return true
+ }
+ if doNames(n.KeepAlive, do) {
+ return true
+ }
+ return false
+}
+func (n *CallExpr) editChildren(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ if n.X != nil {
+ n.X = edit(n.X).(Node)
+ }
+ editNodes(n.Args, edit)
+ editNames(n.KeepAlive, edit)
+}
+
+func (n *CaseClause) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *CaseClause) copy() Node {
+ c := *n
+ c.init = copyNodes(c.init)
+ c.List = copyNodes(c.List)
+ c.Body = copyNodes(c.Body)
+ return &c
+}
+func (n *CaseClause) doChildren(do func(Node) bool) bool {
+ if doNodes(n.init, do) {
+ return true
+ }
+ if n.Var != nil && do(n.Var) {
+ return true
+ }
+ if doNodes(n.List, do) {
+ return true
+ }
+ if doNodes(n.Body, do) {
+ return true
+ }
+ return false
+}
+func (n *CaseClause) editChildren(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ if n.Var != nil {
+ n.Var = edit(n.Var).(*Name)
+ }
+ editNodes(n.List, edit)
+ editNodes(n.Body, edit)
+}
+
+func (n *ChanType) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *ChanType) copy() Node {
+ c := *n
+ return &c
+}
+func (n *ChanType) doChildren(do func(Node) bool) bool {
+ if n.Elem != nil && do(n.Elem) {
+ return true
+ }
+ return false
+}
+func (n *ChanType) editChildren(edit func(Node) Node) {
+ if n.Elem != nil {
+ n.Elem = edit(n.Elem).(Ntype)
+ }
+}
+
+func (n *ClosureExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *ClosureExpr) copy() Node {
+ c := *n
+ c.init = copyNodes(c.init)
+ return &c
+}
+func (n *ClosureExpr) doChildren(do func(Node) bool) bool {
+ if doNodes(n.init, do) {
+ return true
+ }
+ if n.Prealloc != nil && do(n.Prealloc) {
+ return true
+ }
+ return false
+}
+func (n *ClosureExpr) editChildren(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ if n.Prealloc != nil {
+ n.Prealloc = edit(n.Prealloc).(*Name)
+ }
+}
+
+func (n *CommClause) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *CommClause) copy() Node {
+ c := *n
+ c.init = copyNodes(c.init)
+ c.Body = copyNodes(c.Body)
+ return &c
+}
+func (n *CommClause) doChildren(do func(Node) bool) bool {
+ if doNodes(n.init, do) {
+ return true
+ }
+ if n.Comm != nil && do(n.Comm) {
+ return true
+ }
+ if doNodes(n.Body, do) {
+ return true
+ }
+ return false
+}
+func (n *CommClause) editChildren(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ if n.Comm != nil {
+ n.Comm = edit(n.Comm).(Node)
+ }
+ editNodes(n.Body, edit)
+}
+
+func (n *CompLitExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *CompLitExpr) copy() Node {
+ c := *n
+ c.init = copyNodes(c.init)
+ c.List = copyNodes(c.List)
+ return &c
+}
+func (n *CompLitExpr) doChildren(do func(Node) bool) bool {
+ if doNodes(n.init, do) {
+ return true
+ }
+ if n.Ntype != nil && do(n.Ntype) {
+ return true
+ }
+ if doNodes(n.List, do) {
+ return true
+ }
+ if n.Prealloc != nil && do(n.Prealloc) {
+ return true
+ }
+ return false
+}
+func (n *CompLitExpr) editChildren(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ if n.Ntype != nil {
+ n.Ntype = edit(n.Ntype).(Ntype)
+ }
+ editNodes(n.List, edit)
+ if n.Prealloc != nil {
+ n.Prealloc = edit(n.Prealloc).(*Name)
+ }
+}
+
+func (n *ConstExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *ConstExpr) copy() Node {
+ c := *n
+ c.init = copyNodes(c.init)
+ return &c
+}
+func (n *ConstExpr) doChildren(do func(Node) bool) bool {
+ if doNodes(n.init, do) {
+ return true
+ }
+ return false
+}
+func (n *ConstExpr) editChildren(edit func(Node) Node) {
+ editNodes(n.init, edit)
+}
+
+func (n *ConvExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *ConvExpr) copy() Node {
+ c := *n
+ c.init = copyNodes(c.init)
+ return &c
+}
+func (n *ConvExpr) doChildren(do func(Node) bool) bool {
+ if doNodes(n.init, do) {
+ return true
+ }
+ if n.X != nil && do(n.X) {
+ return true
+ }
+ return false
+}
+func (n *ConvExpr) editChildren(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ if n.X != nil {
+ n.X = edit(n.X).(Node)
+ }
+}
+
+func (n *Decl) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *Decl) copy() Node {
+ c := *n
+ return &c
+}
+func (n *Decl) doChildren(do func(Node) bool) bool {
+ if n.X != nil && do(n.X) {
+ return true
+ }
+ return false
+}
+func (n *Decl) editChildren(edit func(Node) Node) {
+ if n.X != nil {
+ n.X = edit(n.X).(*Name)
+ }
+}
+
+func (n *DynamicType) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *DynamicType) copy() Node {
+ c := *n
+ c.init = copyNodes(c.init)
+ return &c
+}
+func (n *DynamicType) doChildren(do func(Node) bool) bool {
+ if doNodes(n.init, do) {
+ return true
+ }
+ if n.X != nil && do(n.X) {
+ return true
+ }
+ if n.ITab != nil && do(n.ITab) {
+ return true
+ }
+ return false
+}
+func (n *DynamicType) editChildren(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ if n.X != nil {
+ n.X = edit(n.X).(Node)
+ }
+ if n.ITab != nil {
+ n.ITab = edit(n.ITab).(Node)
+ }
+}
+
+func (n *DynamicTypeAssertExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *DynamicTypeAssertExpr) copy() Node {
+ c := *n
+ c.init = copyNodes(c.init)
+ return &c
+}
+func (n *DynamicTypeAssertExpr) doChildren(do func(Node) bool) bool {
+ if doNodes(n.init, do) {
+ return true
+ }
+ if n.X != nil && do(n.X) {
+ return true
+ }
+ if n.T != nil && do(n.T) {
+ return true
+ }
+ return false
+}
+func (n *DynamicTypeAssertExpr) editChildren(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ if n.X != nil {
+ n.X = edit(n.X).(Node)
+ }
+ if n.T != nil {
+ n.T = edit(n.T).(Node)
+ }
+}
+
+func (n *ForStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *ForStmt) copy() Node {
+ c := *n
+ c.init = copyNodes(c.init)
+ c.Late = copyNodes(c.Late)
+ c.Body = copyNodes(c.Body)
+ return &c
+}
+func (n *ForStmt) doChildren(do func(Node) bool) bool {
+ if doNodes(n.init, do) {
+ return true
+ }
+ if n.Cond != nil && do(n.Cond) {
+ return true
+ }
+ if doNodes(n.Late, do) {
+ return true
+ }
+ if n.Post != nil && do(n.Post) {
+ return true
+ }
+ if doNodes(n.Body, do) {
+ return true
+ }
+ return false
+}
+func (n *ForStmt) editChildren(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ if n.Cond != nil {
+ n.Cond = edit(n.Cond).(Node)
+ }
+ editNodes(n.Late, edit)
+ if n.Post != nil {
+ n.Post = edit(n.Post).(Node)
+ }
+ editNodes(n.Body, edit)
+}
+
+func (n *Func) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+
+func (n *FuncType) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *FuncType) copy() Node {
+ c := *n
+ c.Recv = copyField(c.Recv)
+ c.Params = copyFields(c.Params)
+ c.Results = copyFields(c.Results)
+ return &c
+}
+func (n *FuncType) doChildren(do func(Node) bool) bool {
+ if doField(n.Recv, do) {
+ return true
+ }
+ if doFields(n.Params, do) {
+ return true
+ }
+ if doFields(n.Results, do) {
+ return true
+ }
+ return false
+}
+func (n *FuncType) editChildren(edit func(Node) Node) {
+ editField(n.Recv, edit)
+ editFields(n.Params, edit)
+ editFields(n.Results, edit)
+}
+
+func (n *GoDeferStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *GoDeferStmt) copy() Node {
+ c := *n
+ c.init = copyNodes(c.init)
+ return &c
+}
+func (n *GoDeferStmt) doChildren(do func(Node) bool) bool {
+ if doNodes(n.init, do) {
+ return true
+ }
+ if n.Call != nil && do(n.Call) {
+ return true
+ }
+ return false
+}
+func (n *GoDeferStmt) editChildren(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ if n.Call != nil {
+ n.Call = edit(n.Call).(Node)
+ }
+}
+
+func (n *Ident) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *Ident) copy() Node {
+ c := *n
+ c.init = copyNodes(c.init)
+ return &c
+}
+func (n *Ident) doChildren(do func(Node) bool) bool {
+ if doNodes(n.init, do) {
+ return true
+ }
+ return false
+}
+func (n *Ident) editChildren(edit func(Node) Node) {
+ editNodes(n.init, edit)
+}
+
+func (n *IfStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *IfStmt) copy() Node {
+ c := *n
+ c.init = copyNodes(c.init)
+ c.Body = copyNodes(c.Body)
+ c.Else = copyNodes(c.Else)
+ return &c
+}
+func (n *IfStmt) doChildren(do func(Node) bool) bool {
+ if doNodes(n.init, do) {
+ return true
+ }
+ if n.Cond != nil && do(n.Cond) {
+ return true
+ }
+ if doNodes(n.Body, do) {
+ return true
+ }
+ if doNodes(n.Else, do) {
+ return true
+ }
+ return false
+}
+func (n *IfStmt) editChildren(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ if n.Cond != nil {
+ n.Cond = edit(n.Cond).(Node)
+ }
+ editNodes(n.Body, edit)
+ editNodes(n.Else, edit)
+}
+
+func (n *IndexExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *IndexExpr) copy() Node {
+ c := *n
+ c.init = copyNodes(c.init)
+ return &c
+}
+func (n *IndexExpr) doChildren(do func(Node) bool) bool {
+ if doNodes(n.init, do) {
+ return true
+ }
+ if n.X != nil && do(n.X) {
+ return true
+ }
+ if n.Index != nil && do(n.Index) {
+ return true
+ }
+ return false
+}
+func (n *IndexExpr) editChildren(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ if n.X != nil {
+ n.X = edit(n.X).(Node)
+ }
+ if n.Index != nil {
+ n.Index = edit(n.Index).(Node)
+ }
+}
+
+func (n *InlineMarkStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *InlineMarkStmt) copy() Node {
+ c := *n
+ c.init = copyNodes(c.init)
+ return &c
+}
+func (n *InlineMarkStmt) doChildren(do func(Node) bool) bool {
+ if doNodes(n.init, do) {
+ return true
+ }
+ return false
+}
+func (n *InlineMarkStmt) editChildren(edit func(Node) Node) {
+ editNodes(n.init, edit)
+}
+
+func (n *InlinedCallExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *InlinedCallExpr) copy() Node {
+ c := *n
+ c.init = copyNodes(c.init)
+ c.Body = copyNodes(c.Body)
+ c.ReturnVars = copyNodes(c.ReturnVars)
+ return &c
+}
+func (n *InlinedCallExpr) doChildren(do func(Node) bool) bool {
+ if doNodes(n.init, do) {
+ return true
+ }
+ if doNodes(n.Body, do) {
+ return true
+ }
+ if doNodes(n.ReturnVars, do) {
+ return true
+ }
+ return false
+}
+func (n *InlinedCallExpr) editChildren(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ editNodes(n.Body, edit)
+ editNodes(n.ReturnVars, edit)
+}
+
+func (n *InstExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *InstExpr) copy() Node {
+ c := *n
+ c.init = copyNodes(c.init)
+ c.Targs = copyNodes(c.Targs)
+ return &c
+}
+func (n *InstExpr) doChildren(do func(Node) bool) bool {
+ if doNodes(n.init, do) {
+ return true
+ }
+ if n.X != nil && do(n.X) {
+ return true
+ }
+ if doNodes(n.Targs, do) {
+ return true
+ }
+ return false
+}
+func (n *InstExpr) editChildren(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ if n.X != nil {
+ n.X = edit(n.X).(Node)
+ }
+ editNodes(n.Targs, edit)
+}
+
+func (n *InterfaceType) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *InterfaceType) copy() Node {
+ c := *n
+ c.Methods = copyFields(c.Methods)
+ return &c
+}
+func (n *InterfaceType) doChildren(do func(Node) bool) bool {
+ if doFields(n.Methods, do) {
+ return true
+ }
+ return false
+}
+func (n *InterfaceType) editChildren(edit func(Node) Node) {
+ editFields(n.Methods, edit)
+}
+
+func (n *KeyExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *KeyExpr) copy() Node {
+ c := *n
+ c.init = copyNodes(c.init)
+ return &c
+}
+func (n *KeyExpr) doChildren(do func(Node) bool) bool {
+ if doNodes(n.init, do) {
+ return true
+ }
+ if n.Key != nil && do(n.Key) {
+ return true
+ }
+ if n.Value != nil && do(n.Value) {
+ return true
+ }
+ return false
+}
+func (n *KeyExpr) editChildren(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ if n.Key != nil {
+ n.Key = edit(n.Key).(Node)
+ }
+ if n.Value != nil {
+ n.Value = edit(n.Value).(Node)
+ }
+}
+
+func (n *LabelStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *LabelStmt) copy() Node {
+ c := *n
+ c.init = copyNodes(c.init)
+ return &c
+}
+func (n *LabelStmt) doChildren(do func(Node) bool) bool {
+ if doNodes(n.init, do) {
+ return true
+ }
+ return false
+}
+func (n *LabelStmt) editChildren(edit func(Node) Node) {
+ editNodes(n.init, edit)
+}
+
+func (n *LinksymOffsetExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *LinksymOffsetExpr) copy() Node {
+ c := *n
+ c.init = copyNodes(c.init)
+ return &c
+}
+func (n *LinksymOffsetExpr) doChildren(do func(Node) bool) bool {
+ if doNodes(n.init, do) {
+ return true
+ }
+ return false
+}
+func (n *LinksymOffsetExpr) editChildren(edit func(Node) Node) {
+ editNodes(n.init, edit)
+}
+
+func (n *LogicalExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *LogicalExpr) copy() Node {
+ c := *n
+ c.init = copyNodes(c.init)
+ return &c
+}
+func (n *LogicalExpr) doChildren(do func(Node) bool) bool {
+ if doNodes(n.init, do) {
+ return true
+ }
+ if n.X != nil && do(n.X) {
+ return true
+ }
+ if n.Y != nil && do(n.Y) {
+ return true
+ }
+ return false
+}
+func (n *LogicalExpr) editChildren(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ if n.X != nil {
+ n.X = edit(n.X).(Node)
+ }
+ if n.Y != nil {
+ n.Y = edit(n.Y).(Node)
+ }
+}
+
+func (n *MakeExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *MakeExpr) copy() Node {
+ c := *n
+ c.init = copyNodes(c.init)
+ return &c
+}
+func (n *MakeExpr) doChildren(do func(Node) bool) bool {
+ if doNodes(n.init, do) {
+ return true
+ }
+ if n.Len != nil && do(n.Len) {
+ return true
+ }
+ if n.Cap != nil && do(n.Cap) {
+ return true
+ }
+ return false
+}
+func (n *MakeExpr) editChildren(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ if n.Len != nil {
+ n.Len = edit(n.Len).(Node)
+ }
+ if n.Cap != nil {
+ n.Cap = edit(n.Cap).(Node)
+ }
+}
+
+func (n *MapType) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *MapType) copy() Node {
+ c := *n
+ return &c
+}
+func (n *MapType) doChildren(do func(Node) bool) bool {
+ if n.Key != nil && do(n.Key) {
+ return true
+ }
+ if n.Elem != nil && do(n.Elem) {
+ return true
+ }
+ return false
+}
+func (n *MapType) editChildren(edit func(Node) Node) {
+ if n.Key != nil {
+ n.Key = edit(n.Key).(Ntype)
+ }
+ if n.Elem != nil {
+ n.Elem = edit(n.Elem).(Ntype)
+ }
+}
+
+func (n *Name) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+
+func (n *NilExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *NilExpr) copy() Node {
+ c := *n
+ c.init = copyNodes(c.init)
+ return &c
+}
+func (n *NilExpr) doChildren(do func(Node) bool) bool {
+ if doNodes(n.init, do) {
+ return true
+ }
+ return false
+}
+func (n *NilExpr) editChildren(edit func(Node) Node) {
+ editNodes(n.init, edit)
+}
+
+func (n *ParenExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *ParenExpr) copy() Node {
+ c := *n
+ c.init = copyNodes(c.init)
+ return &c
+}
+func (n *ParenExpr) doChildren(do func(Node) bool) bool {
+ if doNodes(n.init, do) {
+ return true
+ }
+ if n.X != nil && do(n.X) {
+ return true
+ }
+ return false
+}
+func (n *ParenExpr) editChildren(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ if n.X != nil {
+ n.X = edit(n.X).(Node)
+ }
+}
+
+func (n *PkgName) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *PkgName) copy() Node {
+ c := *n
+ return &c
+}
+func (n *PkgName) doChildren(do func(Node) bool) bool {
+ return false
+}
+func (n *PkgName) editChildren(edit func(Node) Node) {
+}
+
+func (n *RangeStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *RangeStmt) copy() Node {
+ c := *n
+ c.init = copyNodes(c.init)
+ c.Body = copyNodes(c.Body)
+ return &c
+}
+func (n *RangeStmt) doChildren(do func(Node) bool) bool {
+ if doNodes(n.init, do) {
+ return true
+ }
+ if n.X != nil && do(n.X) {
+ return true
+ }
+ if n.Key != nil && do(n.Key) {
+ return true
+ }
+ if n.Value != nil && do(n.Value) {
+ return true
+ }
+ if doNodes(n.Body, do) {
+ return true
+ }
+ if n.Prealloc != nil && do(n.Prealloc) {
+ return true
+ }
+ return false
+}
+func (n *RangeStmt) editChildren(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ if n.X != nil {
+ n.X = edit(n.X).(Node)
+ }
+ if n.Key != nil {
+ n.Key = edit(n.Key).(Node)
+ }
+ if n.Value != nil {
+ n.Value = edit(n.Value).(Node)
+ }
+ editNodes(n.Body, edit)
+ if n.Prealloc != nil {
+ n.Prealloc = edit(n.Prealloc).(*Name)
+ }
+}
+
+func (n *RawOrigExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *RawOrigExpr) copy() Node {
+ c := *n
+ c.init = copyNodes(c.init)
+ return &c
+}
+func (n *RawOrigExpr) doChildren(do func(Node) bool) bool {
+ if doNodes(n.init, do) {
+ return true
+ }
+ return false
+}
+func (n *RawOrigExpr) editChildren(edit func(Node) Node) {
+ editNodes(n.init, edit)
+}
+
+func (n *ResultExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *ResultExpr) copy() Node {
+ c := *n
+ c.init = copyNodes(c.init)
+ return &c
+}
+func (n *ResultExpr) doChildren(do func(Node) bool) bool {
+ if doNodes(n.init, do) {
+ return true
+ }
+ return false
+}
+func (n *ResultExpr) editChildren(edit func(Node) Node) {
+ editNodes(n.init, edit)
+}
+
+func (n *ReturnStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *ReturnStmt) copy() Node {
+ c := *n
+ c.init = copyNodes(c.init)
+ c.Results = copyNodes(c.Results)
+ return &c
+}
+func (n *ReturnStmt) doChildren(do func(Node) bool) bool {
+ if doNodes(n.init, do) {
+ return true
+ }
+ if doNodes(n.Results, do) {
+ return true
+ }
+ return false
+}
+func (n *ReturnStmt) editChildren(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ editNodes(n.Results, edit)
+}
+
+func (n *SelectStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *SelectStmt) copy() Node {
+ c := *n
+ c.init = copyNodes(c.init)
+ c.Cases = copyCommClauses(c.Cases)
+ c.Compiled = copyNodes(c.Compiled)
+ return &c
+}
+func (n *SelectStmt) doChildren(do func(Node) bool) bool {
+ if doNodes(n.init, do) {
+ return true
+ }
+ if doCommClauses(n.Cases, do) {
+ return true
+ }
+ if doNodes(n.Compiled, do) {
+ return true
+ }
+ return false
+}
+func (n *SelectStmt) editChildren(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ editCommClauses(n.Cases, edit)
+ editNodes(n.Compiled, edit)
+}
+
+func (n *SelectorExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *SelectorExpr) copy() Node {
+ c := *n
+ c.init = copyNodes(c.init)
+ return &c
+}
+func (n *SelectorExpr) doChildren(do func(Node) bool) bool {
+ if doNodes(n.init, do) {
+ return true
+ }
+ if n.X != nil && do(n.X) {
+ return true
+ }
+ if n.Prealloc != nil && do(n.Prealloc) {
+ return true
+ }
+ return false
+}
+func (n *SelectorExpr) editChildren(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ if n.X != nil {
+ n.X = edit(n.X).(Node)
+ }
+ if n.Prealloc != nil {
+ n.Prealloc = edit(n.Prealloc).(*Name)
+ }
+}
+
+func (n *SendStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *SendStmt) copy() Node {
+ c := *n
+ c.init = copyNodes(c.init)
+ return &c
+}
+func (n *SendStmt) doChildren(do func(Node) bool) bool {
+ if doNodes(n.init, do) {
+ return true
+ }
+ if n.Chan != nil && do(n.Chan) {
+ return true
+ }
+ if n.Value != nil && do(n.Value) {
+ return true
+ }
+ return false
+}
+func (n *SendStmt) editChildren(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ if n.Chan != nil {
+ n.Chan = edit(n.Chan).(Node)
+ }
+ if n.Value != nil {
+ n.Value = edit(n.Value).(Node)
+ }
+}
+
+func (n *SliceExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *SliceExpr) copy() Node {
+ c := *n
+ c.init = copyNodes(c.init)
+ return &c
+}
+func (n *SliceExpr) doChildren(do func(Node) bool) bool {
+ if doNodes(n.init, do) {
+ return true
+ }
+ if n.X != nil && do(n.X) {
+ return true
+ }
+ if n.Low != nil && do(n.Low) {
+ return true
+ }
+ if n.High != nil && do(n.High) {
+ return true
+ }
+ if n.Max != nil && do(n.Max) {
+ return true
+ }
+ return false
+}
+func (n *SliceExpr) editChildren(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ if n.X != nil {
+ n.X = edit(n.X).(Node)
+ }
+ if n.Low != nil {
+ n.Low = edit(n.Low).(Node)
+ }
+ if n.High != nil {
+ n.High = edit(n.High).(Node)
+ }
+ if n.Max != nil {
+ n.Max = edit(n.Max).(Node)
+ }
+}
+
+func (n *SliceHeaderExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *SliceHeaderExpr) copy() Node {
+ c := *n
+ c.init = copyNodes(c.init)
+ return &c
+}
+func (n *SliceHeaderExpr) doChildren(do func(Node) bool) bool {
+ if doNodes(n.init, do) {
+ return true
+ }
+ if n.Ptr != nil && do(n.Ptr) {
+ return true
+ }
+ if n.Len != nil && do(n.Len) {
+ return true
+ }
+ if n.Cap != nil && do(n.Cap) {
+ return true
+ }
+ return false
+}
+func (n *SliceHeaderExpr) editChildren(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ if n.Ptr != nil {
+ n.Ptr = edit(n.Ptr).(Node)
+ }
+ if n.Len != nil {
+ n.Len = edit(n.Len).(Node)
+ }
+ if n.Cap != nil {
+ n.Cap = edit(n.Cap).(Node)
+ }
+}
+
+func (n *SliceType) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *SliceType) copy() Node {
+ c := *n
+ return &c
+}
+func (n *SliceType) doChildren(do func(Node) bool) bool {
+ if n.Elem != nil && do(n.Elem) {
+ return true
+ }
+ return false
+}
+func (n *SliceType) editChildren(edit func(Node) Node) {
+ if n.Elem != nil {
+ n.Elem = edit(n.Elem).(Ntype)
+ }
+}
+
+func (n *StarExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *StarExpr) copy() Node {
+ c := *n
+ c.init = copyNodes(c.init)
+ return &c
+}
+func (n *StarExpr) doChildren(do func(Node) bool) bool {
+ if doNodes(n.init, do) {
+ return true
+ }
+ if n.X != nil && do(n.X) {
+ return true
+ }
+ return false
+}
+func (n *StarExpr) editChildren(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ if n.X != nil {
+ n.X = edit(n.X).(Node)
+ }
+}
+
+func (n *StructKeyExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *StructKeyExpr) copy() Node {
+ c := *n
+ c.init = copyNodes(c.init)
+ return &c
+}
+func (n *StructKeyExpr) doChildren(do func(Node) bool) bool {
+ if doNodes(n.init, do) {
+ return true
+ }
+ if n.Value != nil && do(n.Value) {
+ return true
+ }
+ return false
+}
+func (n *StructKeyExpr) editChildren(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ if n.Value != nil {
+ n.Value = edit(n.Value).(Node)
+ }
+}
+
+func (n *StructType) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *StructType) copy() Node {
+ c := *n
+ c.Fields = copyFields(c.Fields)
+ return &c
+}
+func (n *StructType) doChildren(do func(Node) bool) bool {
+ if doFields(n.Fields, do) {
+ return true
+ }
+ return false
+}
+func (n *StructType) editChildren(edit func(Node) Node) {
+ editFields(n.Fields, edit)
+}
+
+func (n *SwitchStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *SwitchStmt) copy() Node {
+ c := *n
+ c.init = copyNodes(c.init)
+ c.Cases = copyCaseClauses(c.Cases)
+ c.Compiled = copyNodes(c.Compiled)
+ return &c
+}
+func (n *SwitchStmt) doChildren(do func(Node) bool) bool {
+ if doNodes(n.init, do) {
+ return true
+ }
+ if n.Tag != nil && do(n.Tag) {
+ return true
+ }
+ if doCaseClauses(n.Cases, do) {
+ return true
+ }
+ if doNodes(n.Compiled, do) {
+ return true
+ }
+ return false
+}
+func (n *SwitchStmt) editChildren(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ if n.Tag != nil {
+ n.Tag = edit(n.Tag).(Node)
+ }
+ editCaseClauses(n.Cases, edit)
+ editNodes(n.Compiled, edit)
+}
+
+func (n *TailCallStmt) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *TailCallStmt) copy() Node {
+ c := *n
+ c.init = copyNodes(c.init)
+ return &c
+}
+func (n *TailCallStmt) doChildren(do func(Node) bool) bool {
+ if doNodes(n.init, do) {
+ return true
+ }
+ if n.Call != nil && do(n.Call) {
+ return true
+ }
+ return false
+}
+func (n *TailCallStmt) editChildren(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ if n.Call != nil {
+ n.Call = edit(n.Call).(*CallExpr)
+ }
+}
+
+func (n *TypeAssertExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *TypeAssertExpr) copy() Node {
+ c := *n
+ c.init = copyNodes(c.init)
+ return &c
+}
+func (n *TypeAssertExpr) doChildren(do func(Node) bool) bool {
+ if doNodes(n.init, do) {
+ return true
+ }
+ if n.X != nil && do(n.X) {
+ return true
+ }
+ if n.Ntype != nil && do(n.Ntype) {
+ return true
+ }
+ return false
+}
+func (n *TypeAssertExpr) editChildren(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ if n.X != nil {
+ n.X = edit(n.X).(Node)
+ }
+ if n.Ntype != nil {
+ n.Ntype = edit(n.Ntype).(Ntype)
+ }
+}
+
+func (n *TypeSwitchGuard) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *TypeSwitchGuard) copy() Node {
+ c := *n
+ return &c
+}
+func (n *TypeSwitchGuard) doChildren(do func(Node) bool) bool {
+ if n.Tag != nil && do(n.Tag) {
+ return true
+ }
+ if n.X != nil && do(n.X) {
+ return true
+ }
+ return false
+}
+func (n *TypeSwitchGuard) editChildren(edit func(Node) Node) {
+ if n.Tag != nil {
+ n.Tag = edit(n.Tag).(*Ident)
+ }
+ if n.X != nil {
+ n.X = edit(n.X).(Node)
+ }
+}
+
+func (n *UnaryExpr) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *UnaryExpr) copy() Node {
+ c := *n
+ c.init = copyNodes(c.init)
+ return &c
+}
+func (n *UnaryExpr) doChildren(do func(Node) bool) bool {
+ if doNodes(n.init, do) {
+ return true
+ }
+ if n.X != nil && do(n.X) {
+ return true
+ }
+ return false
+}
+func (n *UnaryExpr) editChildren(edit func(Node) Node) {
+ editNodes(n.init, edit)
+ if n.X != nil {
+ n.X = edit(n.X).(Node)
+ }
+}
+
+func (n *typeNode) Format(s fmt.State, verb rune) { fmtNode(n, s, verb) }
+func (n *typeNode) copy() Node {
+ c := *n
+ return &c
+}
+func (n *typeNode) doChildren(do func(Node) bool) bool {
+ return false
+}
+func (n *typeNode) editChildren(edit func(Node) Node) {
+}
+
+func copyCaseClauses(list []*CaseClause) []*CaseClause {
+ if list == nil {
+ return nil
+ }
+ c := make([]*CaseClause, len(list))
+ copy(c, list)
+ return c
+}
+func doCaseClauses(list []*CaseClause, do func(Node) bool) bool {
+ for _, x := range list {
+ if x != nil && do(x) {
+ return true
+ }
+ }
+ return false
+}
+func editCaseClauses(list []*CaseClause, edit func(Node) Node) {
+ for i, x := range list {
+ if x != nil {
+ list[i] = edit(x).(*CaseClause)
+ }
+ }
+}
+
+func copyCommClauses(list []*CommClause) []*CommClause {
+ if list == nil {
+ return nil
+ }
+ c := make([]*CommClause, len(list))
+ copy(c, list)
+ return c
+}
+func doCommClauses(list []*CommClause, do func(Node) bool) bool {
+ for _, x := range list {
+ if x != nil && do(x) {
+ return true
+ }
+ }
+ return false
+}
+func editCommClauses(list []*CommClause, edit func(Node) Node) {
+ for i, x := range list {
+ if x != nil {
+ list[i] = edit(x).(*CommClause)
+ }
+ }
+}
+
+func copyNames(list []*Name) []*Name {
+ if list == nil {
+ return nil
+ }
+ c := make([]*Name, len(list))
+ copy(c, list)
+ return c
+}
+func doNames(list []*Name, do func(Node) bool) bool {
+ for _, x := range list {
+ if x != nil && do(x) {
+ return true
+ }
+ }
+ return false
+}
+func editNames(list []*Name, edit func(Node) Node) {
+ for i, x := range list {
+ if x != nil {
+ list[i] = edit(x).(*Name)
+ }
+ }
+}
+
+func copyNodes(list []Node) []Node {
+ if list == nil {
+ return nil
+ }
+ c := make([]Node, len(list))
+ copy(c, list)
+ return c
+}
+func doNodes(list []Node, do func(Node) bool) bool {
+ for _, x := range list {
+ if x != nil && do(x) {
+ return true
+ }
+ }
+ return false
+}
+func editNodes(list []Node, edit func(Node) Node) {
+ for i, x := range list {
+ if x != nil {
+ list[i] = edit(x).(Node)
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/ir/op_string.go b/src/cmd/compile/internal/ir/op_string.go
new file mode 100644
index 0000000..b8cee71
--- /dev/null
+++ b/src/cmd/compile/internal/ir/op_string.go
@@ -0,0 +1,184 @@
+// Code generated by "stringer -type=Op -trimprefix=O node.go"; DO NOT EDIT.
+
+package ir
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[OXXX-0]
+ _ = x[ONAME-1]
+ _ = x[ONONAME-2]
+ _ = x[OTYPE-3]
+ _ = x[OPACK-4]
+ _ = x[OLITERAL-5]
+ _ = x[ONIL-6]
+ _ = x[OADD-7]
+ _ = x[OSUB-8]
+ _ = x[OOR-9]
+ _ = x[OXOR-10]
+ _ = x[OADDSTR-11]
+ _ = x[OADDR-12]
+ _ = x[OANDAND-13]
+ _ = x[OAPPEND-14]
+ _ = x[OBYTES2STR-15]
+ _ = x[OBYTES2STRTMP-16]
+ _ = x[ORUNES2STR-17]
+ _ = x[OSTR2BYTES-18]
+ _ = x[OSTR2BYTESTMP-19]
+ _ = x[OSTR2RUNES-20]
+ _ = x[OSLICE2ARRPTR-21]
+ _ = x[OAS-22]
+ _ = x[OAS2-23]
+ _ = x[OAS2DOTTYPE-24]
+ _ = x[OAS2FUNC-25]
+ _ = x[OAS2MAPR-26]
+ _ = x[OAS2RECV-27]
+ _ = x[OASOP-28]
+ _ = x[OCALL-29]
+ _ = x[OCALLFUNC-30]
+ _ = x[OCALLMETH-31]
+ _ = x[OCALLINTER-32]
+ _ = x[OCAP-33]
+ _ = x[OCLOSE-34]
+ _ = x[OCLOSURE-35]
+ _ = x[OCOMPLIT-36]
+ _ = x[OMAPLIT-37]
+ _ = x[OSTRUCTLIT-38]
+ _ = x[OARRAYLIT-39]
+ _ = x[OSLICELIT-40]
+ _ = x[OPTRLIT-41]
+ _ = x[OCONV-42]
+ _ = x[OCONVIFACE-43]
+ _ = x[OCONVIDATA-44]
+ _ = x[OCONVNOP-45]
+ _ = x[OCOPY-46]
+ _ = x[ODCL-47]
+ _ = x[ODCLFUNC-48]
+ _ = x[ODCLCONST-49]
+ _ = x[ODCLTYPE-50]
+ _ = x[ODELETE-51]
+ _ = x[ODOT-52]
+ _ = x[ODOTPTR-53]
+ _ = x[ODOTMETH-54]
+ _ = x[ODOTINTER-55]
+ _ = x[OXDOT-56]
+ _ = x[ODOTTYPE-57]
+ _ = x[ODOTTYPE2-58]
+ _ = x[OEQ-59]
+ _ = x[ONE-60]
+ _ = x[OLT-61]
+ _ = x[OLE-62]
+ _ = x[OGE-63]
+ _ = x[OGT-64]
+ _ = x[ODEREF-65]
+ _ = x[OINDEX-66]
+ _ = x[OINDEXMAP-67]
+ _ = x[OKEY-68]
+ _ = x[OSTRUCTKEY-69]
+ _ = x[OLEN-70]
+ _ = x[OMAKE-71]
+ _ = x[OMAKECHAN-72]
+ _ = x[OMAKEMAP-73]
+ _ = x[OMAKESLICE-74]
+ _ = x[OMAKESLICECOPY-75]
+ _ = x[OMUL-76]
+ _ = x[ODIV-77]
+ _ = x[OMOD-78]
+ _ = x[OLSH-79]
+ _ = x[ORSH-80]
+ _ = x[OAND-81]
+ _ = x[OANDNOT-82]
+ _ = x[ONEW-83]
+ _ = x[ONOT-84]
+ _ = x[OBITNOT-85]
+ _ = x[OPLUS-86]
+ _ = x[ONEG-87]
+ _ = x[OOROR-88]
+ _ = x[OPANIC-89]
+ _ = x[OPRINT-90]
+ _ = x[OPRINTN-91]
+ _ = x[OPAREN-92]
+ _ = x[OSEND-93]
+ _ = x[OSLICE-94]
+ _ = x[OSLICEARR-95]
+ _ = x[OSLICESTR-96]
+ _ = x[OSLICE3-97]
+ _ = x[OSLICE3ARR-98]
+ _ = x[OSLICEHEADER-99]
+ _ = x[ORECOVER-100]
+ _ = x[ORECOVERFP-101]
+ _ = x[ORECV-102]
+ _ = x[ORUNESTR-103]
+ _ = x[OSELRECV2-104]
+ _ = x[OIOTA-105]
+ _ = x[OREAL-106]
+ _ = x[OIMAG-107]
+ _ = x[OCOMPLEX-108]
+ _ = x[OALIGNOF-109]
+ _ = x[OOFFSETOF-110]
+ _ = x[OSIZEOF-111]
+ _ = x[OUNSAFEADD-112]
+ _ = x[OUNSAFESLICE-113]
+ _ = x[OMETHEXPR-114]
+ _ = x[OMETHVALUE-115]
+ _ = x[OBLOCK-116]
+ _ = x[OBREAK-117]
+ _ = x[OCASE-118]
+ _ = x[OCONTINUE-119]
+ _ = x[ODEFER-120]
+ _ = x[OFALL-121]
+ _ = x[OFOR-122]
+ _ = x[OFORUNTIL-123]
+ _ = x[OGOTO-124]
+ _ = x[OIF-125]
+ _ = x[OLABEL-126]
+ _ = x[OGO-127]
+ _ = x[ORANGE-128]
+ _ = x[ORETURN-129]
+ _ = x[OSELECT-130]
+ _ = x[OSWITCH-131]
+ _ = x[OTYPESW-132]
+ _ = x[OFUNCINST-133]
+ _ = x[OTCHAN-134]
+ _ = x[OTMAP-135]
+ _ = x[OTSTRUCT-136]
+ _ = x[OTINTER-137]
+ _ = x[OTFUNC-138]
+ _ = x[OTARRAY-139]
+ _ = x[OTSLICE-140]
+ _ = x[OINLCALL-141]
+ _ = x[OEFACE-142]
+ _ = x[OITAB-143]
+ _ = x[OIDATA-144]
+ _ = x[OSPTR-145]
+ _ = x[OCFUNC-146]
+ _ = x[OCHECKNIL-147]
+ _ = x[OVARDEF-148]
+ _ = x[OVARKILL-149]
+ _ = x[OVARLIVE-150]
+ _ = x[ORESULT-151]
+ _ = x[OINLMARK-152]
+ _ = x[OLINKSYMOFFSET-153]
+ _ = x[ODYNAMICDOTTYPE-154]
+ _ = x[ODYNAMICDOTTYPE2-155]
+ _ = x[ODYNAMICTYPE-156]
+ _ = x[OTAILCALL-157]
+ _ = x[OGETG-158]
+ _ = x[OGETCALLERPC-159]
+ _ = x[OGETCALLERSP-160]
+ _ = x[OEND-161]
+}
+
+const _Op_name = "XXXNAMENONAMETYPEPACKLITERALNILADDSUBORXORADDSTRADDRANDANDAPPENDBYTES2STRBYTES2STRTMPRUNES2STRSTR2BYTESSTR2BYTESTMPSTR2RUNESSLICE2ARRPTRASAS2AS2DOTTYPEAS2FUNCAS2MAPRAS2RECVASOPCALLCALLFUNCCALLMETHCALLINTERCAPCLOSECLOSURECOMPLITMAPLITSTRUCTLITARRAYLITSLICELITPTRLITCONVCONVIFACECONVIDATACONVNOPCOPYDCLDCLFUNCDCLCONSTDCLTYPEDELETEDOTDOTPTRDOTMETHDOTINTERXDOTDOTTYPEDOTTYPE2EQNELTLEGEGTDEREFINDEXINDEXMAPKEYSTRUCTKEYLENMAKEMAKECHANMAKEMAPMAKESLICEMAKESLICECOPYMULDIVMODLSHRSHANDANDNOTNEWNOTBITNOTPLUSNEGORORPANICPRINTPRINTNPARENSENDSLICESLICEARRSLICESTRSLICE3SLICE3ARRSLICEHEADERRECOVERRECOVERFPRECVRUNESTRSELRECV2IOTAREALIMAGCOMPLEXALIGNOFOFFSETOFSIZEOFUNSAFEADDUNSAFESLICEMETHEXPRMETHVALUEBLOCKBREAKCASECONTINUEDEFERFALLFORFORUNTILGOTOIFLABELGORANGERETURNSELECTSWITCHTYPESWFUNCINSTTCHANTMAPTSTRUCTTINTERTFUNCTARRAYTSLICEINLCALLEFACEITABIDATASPTRCFUNCCHECKNILVARDEFVARKILLVARLIVERESULTINLMARKLINKSYMOFFSETDYNAMICDOTTYPEDYNAMICDOTTYPE2DYNAMICTYPETAILCALLGETGGETCALLERPCGETCALLERSPEND"
+
+var _Op_index = [...]uint16{0, 3, 7, 13, 17, 21, 28, 31, 34, 37, 39, 42, 48, 52, 58, 64, 73, 85, 94, 103, 115, 124, 136, 138, 141, 151, 158, 165, 172, 176, 180, 188, 196, 205, 208, 213, 220, 227, 233, 242, 250, 258, 264, 268, 277, 286, 293, 297, 300, 307, 315, 322, 328, 331, 337, 344, 352, 356, 363, 371, 373, 375, 377, 379, 381, 383, 388, 393, 401, 404, 413, 416, 420, 428, 435, 444, 457, 460, 463, 466, 469, 472, 475, 481, 484, 487, 493, 497, 500, 504, 509, 514, 520, 525, 529, 534, 542, 550, 556, 565, 576, 583, 592, 596, 603, 611, 615, 619, 623, 630, 637, 645, 651, 660, 671, 679, 688, 693, 698, 702, 710, 715, 719, 722, 730, 734, 736, 741, 743, 748, 754, 760, 766, 772, 780, 785, 789, 796, 802, 807, 813, 819, 826, 831, 835, 840, 844, 849, 857, 863, 870, 877, 883, 890, 903, 917, 932, 943, 951, 955, 966, 977, 980}
+
+func (i Op) String() string {
+ if i >= Op(len(_Op_index)-1) {
+ return "Op(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _Op_name[_Op_index[i]:_Op_index[i+1]]
+}
diff --git a/src/cmd/compile/internal/ir/package.go b/src/cmd/compile/internal/ir/package.go
new file mode 100644
index 0000000..3896e2b
--- /dev/null
+++ b/src/cmd/compile/internal/ir/package.go
@@ -0,0 +1,35 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ir
+
+import "cmd/compile/internal/types"
+
+// A Package holds information about the package being compiled.
+type Package struct {
+ // Imports, listed in source order.
+ // See golang.org/issue/31636.
+ Imports []*types.Pkg
+
+ // Init functions, listed in source order.
+ Inits []*Func
+
+ // Top-level declarations.
+ Decls []Node
+
+ // Extern (package global) declarations.
+ Externs []Node
+
+ // Assembly function declarations.
+ Asms []*Name
+
+ // Cgo directives.
+ CgoPragmas [][]string
+
+ // Variables with //go:embed lines.
+ Embeds []*Name
+
+ // Exported (or re-exported) symbols.
+ Exports []*Name
+}
diff --git a/src/cmd/compile/internal/ir/scc.go b/src/cmd/compile/internal/ir/scc.go
new file mode 100644
index 0000000..a42951c
--- /dev/null
+++ b/src/cmd/compile/internal/ir/scc.go
@@ -0,0 +1,131 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ir
+
+// Strongly connected components.
+//
+// Run analysis on minimal sets of mutually recursive functions
+// or single non-recursive functions, bottom up.
+//
+// Finding these sets is finding strongly connected components
+// by reverse topological order in the static call graph.
+// The algorithm (known as Tarjan's algorithm) for doing that is taken from
+// Sedgewick, Algorithms, Second Edition, p. 482, with two adaptations.
+//
+// First, a hidden closure function (n.Func.IsHiddenClosure()) cannot be the
+// root of a connected component. Refusing to use it as a root
+// forces it into the component of the function in which it appears.
+// This is more convenient for escape analysis.
+//
+// Second, each function becomes two virtual nodes in the graph,
+// with numbers n and n+1. We record the function's node number as n
+// but search from node n+1. If the search tells us that the component
+// number (min) is n+1, we know that this is a trivial component: one function
+// plus its closures. If the search tells us that the component number is
+// n, then there was a path from node n+1 back to node n, meaning that
+// the function set is mutually recursive. The escape analysis can be
+// more precise when analyzing a single non-recursive function than
+// when analyzing a set of mutually recursive functions.
+
+type bottomUpVisitor struct {
+ analyze func([]*Func, bool)
+ visitgen uint32
+ nodeID map[*Func]uint32
+ stack []*Func
+}
+
+// VisitFuncsBottomUp invokes analyze on the ODCLFUNC nodes listed in list.
+// It calls analyze with successive groups of functions, working from
+// the bottom of the call graph upward. Each time analyze is called with
+// a list of functions, every function on that list only calls other functions
+// on the list or functions that have been passed in previous invocations of
+// analyze. Closures appear in the same list as their outer functions.
+// The lists are as short as possible while preserving those requirements.
+// (In a typical program, many invocations of analyze will be passed just
+// a single function.) The boolean argument 'recursive' passed to analyze
+// specifies whether the functions on the list are mutually recursive.
+// If recursive is false, the list consists of only a single function and its closures.
+// If recursive is true, the list may still contain only a single function,
+// if that function is itself recursive.
+func VisitFuncsBottomUp(list []Node, analyze func(list []*Func, recursive bool)) {
+ var v bottomUpVisitor
+ v.analyze = analyze
+ v.nodeID = make(map[*Func]uint32)
+ for _, n := range list {
+ if n.Op() == ODCLFUNC {
+ n := n.(*Func)
+ if !n.IsHiddenClosure() {
+ v.visit(n)
+ }
+ }
+ }
+}
+
+func (v *bottomUpVisitor) visit(n *Func) uint32 {
+ if id := v.nodeID[n]; id > 0 {
+ // already visited
+ return id
+ }
+
+ v.visitgen++
+ id := v.visitgen
+ v.nodeID[n] = id
+ v.visitgen++
+ min := v.visitgen
+ v.stack = append(v.stack, n)
+
+ do := func(defn Node) {
+ if defn != nil {
+ if m := v.visit(defn.(*Func)); m < min {
+ min = m
+ }
+ }
+ }
+
+ Visit(n, func(n Node) {
+ switch n.Op() {
+ case ONAME:
+ if n := n.(*Name); n.Class == PFUNC {
+ do(n.Defn)
+ }
+ case ODOTMETH, OMETHVALUE, OMETHEXPR:
+ if fn := MethodExprName(n); fn != nil {
+ do(fn.Defn)
+ }
+ case OCLOSURE:
+ n := n.(*ClosureExpr)
+ do(n.Func)
+ }
+ })
+
+ if (min == id || min == id+1) && !n.IsHiddenClosure() {
+ // This node is the root of a strongly connected component.
+
+ // The original min passed to visitcodelist was v.nodeID[n]+1.
+ // If visitcodelist found its way back to v.nodeID[n], then this
+ // block is a set of mutually recursive functions.
+ // Otherwise it's just a lone function that does not recurse.
+ recursive := min == id
+
+ // Remove connected component from stack.
+ // Mark walkgen so that future visits return a large number
+ // so as not to affect the caller's min.
+
+ var i int
+ for i = len(v.stack) - 1; i >= 0; i-- {
+ x := v.stack[i]
+ v.nodeID[x] = ^uint32(0)
+ if x == n {
+ break
+ }
+ }
+ block := v.stack[i:]
+ // Run escape analysis on this set of functions.
+ v.stack = v.stack[:i]
+ v.analyze(block, recursive)
+ }
+
+ return min
+}
diff --git a/src/cmd/compile/internal/ir/sizeof_test.go b/src/cmd/compile/internal/ir/sizeof_test.go
new file mode 100644
index 0000000..72b6320
--- /dev/null
+++ b/src/cmd/compile/internal/ir/sizeof_test.go
@@ -0,0 +1,37 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ir
+
+import (
+ "reflect"
+ "testing"
+ "unsafe"
+)
+
+// Assert that the size of important structures do not change unexpectedly.
+
+func TestSizeof(t *testing.T) {
+ const _64bit = unsafe.Sizeof(uintptr(0)) == 8
+
+ var tests = []struct {
+ val interface{} // type as a value
+ _32bit uintptr // size on 32bit platforms
+ _64bit uintptr // size on 64bit platforms
+ }{
+ {Func{}, 196, 336},
+ {Name{}, 112, 200},
+ }
+
+ for _, tt := range tests {
+ want := tt._32bit
+ if _64bit {
+ want = tt._64bit
+ }
+ got := reflect.TypeOf(tt.val).Size()
+ if want != got {
+ t.Errorf("unsafe.Sizeof(%T) = %d, want %d", tt.val, got, want)
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/ir/stmt.go b/src/cmd/compile/internal/ir/stmt.go
new file mode 100644
index 0000000..80bd205
--- /dev/null
+++ b/src/cmd/compile/internal/ir/stmt.go
@@ -0,0 +1,411 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ir
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+)
+
+// A Decl is a declaration of a const, type, or var. (A declared func is a Func.)
+type Decl struct {
+ miniNode
+ X *Name // the thing being declared
+}
+
+func NewDecl(pos src.XPos, op Op, x *Name) *Decl {
+ n := &Decl{X: x}
+ n.pos = pos
+ switch op {
+ default:
+ panic("invalid Decl op " + op.String())
+ case ODCL, ODCLCONST, ODCLTYPE:
+ n.op = op
+ }
+ return n
+}
+
+func (*Decl) isStmt() {}
+
+// A Stmt is a Node that can appear as a statement.
+// This includes statement-like expressions such as f().
+//
+// (It's possible it should include <-c, but that would require
+// splitting ORECV out of UnaryExpr, which hasn't yet been
+// necessary. Maybe instead we will introduce ExprStmt at
+// some point.)
+type Stmt interface {
+ Node
+ isStmt()
+}
+
+// A miniStmt is a miniNode with extra fields common to statements.
+type miniStmt struct {
+ miniNode
+ init Nodes
+}
+
+func (*miniStmt) isStmt() {}
+
+func (n *miniStmt) Init() Nodes { return n.init }
+func (n *miniStmt) SetInit(x Nodes) { n.init = x }
+func (n *miniStmt) PtrInit() *Nodes { return &n.init }
+
+// An AssignListStmt is an assignment statement with
+// more than one item on at least one side: Lhs = Rhs.
+// If Def is true, the assignment is a :=.
+type AssignListStmt struct {
+ miniStmt
+ Lhs Nodes
+ Def bool
+ Rhs Nodes
+}
+
+func NewAssignListStmt(pos src.XPos, op Op, lhs, rhs []Node) *AssignListStmt {
+ n := &AssignListStmt{}
+ n.pos = pos
+ n.SetOp(op)
+ n.Lhs = lhs
+ n.Rhs = rhs
+ return n
+}
+
+func (n *AssignListStmt) SetOp(op Op) {
+ switch op {
+ default:
+ panic(n.no("SetOp " + op.String()))
+ case OAS2, OAS2DOTTYPE, OAS2FUNC, OAS2MAPR, OAS2RECV, OSELRECV2:
+ n.op = op
+ }
+}
+
+// An AssignStmt is a simple assignment statement: X = Y.
+// If Def is true, the assignment is a :=.
+type AssignStmt struct {
+ miniStmt
+ X Node
+ Def bool
+ Y Node
+}
+
+func NewAssignStmt(pos src.XPos, x, y Node) *AssignStmt {
+ n := &AssignStmt{X: x, Y: y}
+ n.pos = pos
+ n.op = OAS
+ return n
+}
+
+func (n *AssignStmt) SetOp(op Op) {
+ switch op {
+ default:
+ panic(n.no("SetOp " + op.String()))
+ case OAS:
+ n.op = op
+ }
+}
+
+// An AssignOpStmt is an AsOp= assignment statement: X AsOp= Y.
+type AssignOpStmt struct {
+ miniStmt
+ X Node
+ AsOp Op // OADD etc
+ Y Node
+ IncDec bool // actually ++ or --
+}
+
+func NewAssignOpStmt(pos src.XPos, asOp Op, x, y Node) *AssignOpStmt {
+ n := &AssignOpStmt{AsOp: asOp, X: x, Y: y}
+ n.pos = pos
+ n.op = OASOP
+ return n
+}
+
+// A BlockStmt is a block: { List }.
+type BlockStmt struct {
+ miniStmt
+ List Nodes
+}
+
+func NewBlockStmt(pos src.XPos, list []Node) *BlockStmt {
+ n := &BlockStmt{}
+ n.pos = pos
+ if !pos.IsKnown() {
+ n.pos = base.Pos
+ if len(list) > 0 {
+ n.pos = list[0].Pos()
+ }
+ }
+ n.op = OBLOCK
+ n.List = list
+ return n
+}
+
+// A BranchStmt is a break, continue, fallthrough, or goto statement.
+type BranchStmt struct {
+ miniStmt
+ Label *types.Sym // label if present
+}
+
+func NewBranchStmt(pos src.XPos, op Op, label *types.Sym) *BranchStmt {
+ switch op {
+ case OBREAK, OCONTINUE, OFALL, OGOTO:
+ // ok
+ default:
+ panic("NewBranch " + op.String())
+ }
+ n := &BranchStmt{Label: label}
+ n.pos = pos
+ n.op = op
+ return n
+}
+
+func (n *BranchStmt) Sym() *types.Sym { return n.Label }
+
+// A CaseClause is a case statement in a switch or select: case List: Body.
+type CaseClause struct {
+ miniStmt
+ Var *Name // declared variable for this case in type switch
+ List Nodes // list of expressions for switch, early select
+ Body Nodes
+}
+
+func NewCaseStmt(pos src.XPos, list, body []Node) *CaseClause {
+ n := &CaseClause{List: list, Body: body}
+ n.pos = pos
+ n.op = OCASE
+ return n
+}
+
+type CommClause struct {
+ miniStmt
+ Comm Node // communication case
+ Body Nodes
+}
+
+func NewCommStmt(pos src.XPos, comm Node, body []Node) *CommClause {
+ n := &CommClause{Comm: comm, Body: body}
+ n.pos = pos
+ n.op = OCASE
+ return n
+}
+
+// A ForStmt is a non-range for loop: for Init; Cond; Post { Body }
+// Op can be OFOR or OFORUNTIL (!Cond).
+type ForStmt struct {
+ miniStmt
+ Label *types.Sym
+ Cond Node
+ Late Nodes
+ Post Node
+ Body Nodes
+ HasBreak bool
+}
+
+func NewForStmt(pos src.XPos, init Node, cond, post Node, body []Node) *ForStmt {
+ n := &ForStmt{Cond: cond, Post: post}
+ n.pos = pos
+ n.op = OFOR
+ if init != nil {
+ n.init = []Node{init}
+ }
+ n.Body = body
+ return n
+}
+
+func (n *ForStmt) SetOp(op Op) {
+ if op != OFOR && op != OFORUNTIL {
+ panic(n.no("SetOp " + op.String()))
+ }
+ n.op = op
+}
+
+// A GoDeferStmt is a go or defer statement: go Call / defer Call.
+//
+// The two opcodes use a single syntax because the implementations
+// are very similar: both are concerned with saving Call and running it
+// in a different context (a separate goroutine or a later time).
+type GoDeferStmt struct {
+ miniStmt
+ Call Node
+}
+
+func NewGoDeferStmt(pos src.XPos, op Op, call Node) *GoDeferStmt {
+ n := &GoDeferStmt{Call: call}
+ n.pos = pos
+ switch op {
+ case ODEFER, OGO:
+ n.op = op
+ default:
+ panic("NewGoDeferStmt " + op.String())
+ }
+ return n
+}
+
+// An IfStmt is a return statement: if Init; Cond { Body } else { Else }.
+type IfStmt struct {
+ miniStmt
+ Cond Node
+ Body Nodes
+ Else Nodes
+ Likely bool // code layout hint
+}
+
+func NewIfStmt(pos src.XPos, cond Node, body, els []Node) *IfStmt {
+ n := &IfStmt{Cond: cond}
+ n.pos = pos
+ n.op = OIF
+ n.Body = body
+ n.Else = els
+ return n
+}
+
+// An InlineMarkStmt is a marker placed just before an inlined body.
+type InlineMarkStmt struct {
+ miniStmt
+ Index int64
+}
+
+func NewInlineMarkStmt(pos src.XPos, index int64) *InlineMarkStmt {
+ n := &InlineMarkStmt{Index: index}
+ n.pos = pos
+ n.op = OINLMARK
+ return n
+}
+
+func (n *InlineMarkStmt) Offset() int64 { return n.Index }
+func (n *InlineMarkStmt) SetOffset(x int64) { n.Index = x }
+
+// A LabelStmt is a label statement (just the label, not including the statement it labels).
+type LabelStmt struct {
+ miniStmt
+ Label *types.Sym // "Label:"
+}
+
+func NewLabelStmt(pos src.XPos, label *types.Sym) *LabelStmt {
+ n := &LabelStmt{Label: label}
+ n.pos = pos
+ n.op = OLABEL
+ return n
+}
+
+func (n *LabelStmt) Sym() *types.Sym { return n.Label }
+
+// A RangeStmt is a range loop: for Key, Value = range X { Body }
+type RangeStmt struct {
+ miniStmt
+ Label *types.Sym
+ Def bool
+ X Node
+ Key Node
+ Value Node
+ Body Nodes
+ HasBreak bool
+ Prealloc *Name
+}
+
+func NewRangeStmt(pos src.XPos, key, value, x Node, body []Node) *RangeStmt {
+ n := &RangeStmt{X: x, Key: key, Value: value}
+ n.pos = pos
+ n.op = ORANGE
+ n.Body = body
+ return n
+}
+
+// A ReturnStmt is a return statement.
+type ReturnStmt struct {
+ miniStmt
+ origNode // for typecheckargs rewrite
+ Results Nodes // return list
+}
+
+func NewReturnStmt(pos src.XPos, results []Node) *ReturnStmt {
+ n := &ReturnStmt{}
+ n.pos = pos
+ n.op = ORETURN
+ n.orig = n
+ n.Results = results
+ return n
+}
+
+// A SelectStmt is a block: { Cases }.
+type SelectStmt struct {
+ miniStmt
+ Label *types.Sym
+ Cases []*CommClause
+ HasBreak bool
+
+ // TODO(rsc): Instead of recording here, replace with a block?
+ Compiled Nodes // compiled form, after walkSelect
+}
+
+func NewSelectStmt(pos src.XPos, cases []*CommClause) *SelectStmt {
+ n := &SelectStmt{Cases: cases}
+ n.pos = pos
+ n.op = OSELECT
+ return n
+}
+
+// A SendStmt is a send statement: X <- Y.
+type SendStmt struct {
+ miniStmt
+ Chan Node
+ Value Node
+}
+
+func NewSendStmt(pos src.XPos, ch, value Node) *SendStmt {
+ n := &SendStmt{Chan: ch, Value: value}
+ n.pos = pos
+ n.op = OSEND
+ return n
+}
+
+// A SwitchStmt is a switch statement: switch Init; Tag { Cases }.
+type SwitchStmt struct {
+ miniStmt
+ Tag Node
+ Cases []*CaseClause
+ Label *types.Sym
+ HasBreak bool
+
+ // TODO(rsc): Instead of recording here, replace with a block?
+ Compiled Nodes // compiled form, after walkSwitch
+}
+
+func NewSwitchStmt(pos src.XPos, tag Node, cases []*CaseClause) *SwitchStmt {
+ n := &SwitchStmt{Tag: tag, Cases: cases}
+ n.pos = pos
+ n.op = OSWITCH
+ return n
+}
+
+// A TailCallStmt is a tail call statement, which is used for back-end
+// code generation to jump directly to another function entirely.
+type TailCallStmt struct {
+ miniStmt
+ Call *CallExpr // the underlying call
+}
+
+func NewTailCallStmt(pos src.XPos, call *CallExpr) *TailCallStmt {
+ n := &TailCallStmt{Call: call}
+ n.pos = pos
+ n.op = OTAILCALL
+ return n
+}
+
+// A TypeSwitchGuard is the [Name :=] X.(type) in a type switch.
+type TypeSwitchGuard struct {
+ miniNode
+ Tag *Ident
+ X Node
+ Used bool
+}
+
+func NewTypeSwitchGuard(pos src.XPos, tag *Ident, x Node) *TypeSwitchGuard {
+ n := &TypeSwitchGuard{Tag: tag, X: x}
+ n.pos = pos
+ n.op = OTYPESW
+ return n
+}
diff --git a/src/cmd/compile/internal/ir/symtab.go b/src/cmd/compile/internal/ir/symtab.go
new file mode 100644
index 0000000..148edb2
--- /dev/null
+++ b/src/cmd/compile/internal/ir/symtab.go
@@ -0,0 +1,75 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ir
+
+import (
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+)
+
+// Syms holds known symbols.
+var Syms struct {
+ AssertE2I *obj.LSym
+ AssertE2I2 *obj.LSym
+ AssertI2I *obj.LSym
+ AssertI2I2 *obj.LSym
+ Asanread *obj.LSym
+ Asanwrite *obj.LSym
+ CheckPtrAlignment *obj.LSym
+ Deferproc *obj.LSym
+ DeferprocStack *obj.LSym
+ Deferreturn *obj.LSym
+ Duffcopy *obj.LSym
+ Duffzero *obj.LSym
+ GCWriteBarrier *obj.LSym
+ Goschedguarded *obj.LSym
+ Growslice *obj.LSym
+ Memmove *obj.LSym
+ Msanread *obj.LSym
+ Msanwrite *obj.LSym
+ Msanmove *obj.LSym
+ Newobject *obj.LSym
+ Newproc *obj.LSym
+ Panicdivide *obj.LSym
+ Panicshift *obj.LSym
+ PanicdottypeE *obj.LSym
+ PanicdottypeI *obj.LSym
+ Panicnildottype *obj.LSym
+ Panicoverflow *obj.LSym
+ Raceread *obj.LSym
+ Racereadrange *obj.LSym
+ Racewrite *obj.LSym
+ Racewriterange *obj.LSym
+ // Wasm
+ SigPanic *obj.LSym
+ Staticuint64s *obj.LSym
+ Typedmemclr *obj.LSym
+ Typedmemmove *obj.LSym
+ Udiv *obj.LSym
+ WriteBarrier *obj.LSym
+ Zerobase *obj.LSym
+ ARM64HasATOMICS *obj.LSym
+ ARMHasVFPv4 *obj.LSym
+ X86HasFMA *obj.LSym
+ X86HasPOPCNT *obj.LSym
+ X86HasSSE41 *obj.LSym
+ // Wasm
+ WasmDiv *obj.LSym
+ // Wasm
+ WasmMove *obj.LSym
+ // Wasm
+ WasmZero *obj.LSym
+ // Wasm
+ WasmTruncS *obj.LSym
+ // Wasm
+ WasmTruncU *obj.LSym
+}
+
+// Pkgs holds known packages.
+var Pkgs struct {
+ Go *types.Pkg
+ Itab *types.Pkg
+ Runtime *types.Pkg
+}
diff --git a/src/cmd/compile/internal/ir/type.go b/src/cmd/compile/internal/ir/type.go
new file mode 100644
index 0000000..63dd673
--- /dev/null
+++ b/src/cmd/compile/internal/ir/type.go
@@ -0,0 +1,335 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ir
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+ "fmt"
+)
+
+// Nodes that represent the syntax of a type before type-checking.
+// After type-checking, they serve only as shells around a *types.Type.
+// Calling TypeNode converts a *types.Type to a Node shell.
+
+// An Ntype is a Node that syntactically looks like a type.
+// It can be the raw syntax for a type before typechecking,
+// or it can be an OTYPE with Type() set to a *types.Type.
+// Note that syntax doesn't guarantee it's a type: an expression
+// like *fmt is an Ntype (we don't know whether names are types yet),
+// but at least 1+1 is not an Ntype.
+type Ntype interface {
+ Node
+ CanBeNtype()
+}
+
+// A miniType is a minimal type syntax Node implementation,
+// to be embedded as the first field in a larger node implementation.
+type miniType struct {
+ miniNode
+ typ *types.Type
+}
+
+func (*miniType) CanBeNtype() {}
+
+func (n *miniType) Type() *types.Type { return n.typ }
+
+// setOTYPE changes n to be an OTYPE node returning t.
+// Rewriting the node in place this way should not be strictly
+// necessary (we should be able to update the uses with
+// proper OTYPE nodes), but it's mostly harmless and easy
+// to keep doing for now.
+//
+// setOTYPE also records t.Nod = self if t.Nod is not already set.
+// (Some types are shared by multiple OTYPE nodes, so only
+// the first such node is used as t.Nod.)
+func (n *miniType) setOTYPE(t *types.Type, self Ntype) {
+ if n.typ != nil {
+ panic(n.op.String() + " SetType: type already set")
+ }
+ n.op = OTYPE
+ n.typ = t
+ t.SetNod(self)
+}
+
+func (n *miniType) Sym() *types.Sym { return nil } // for Format OTYPE
+func (n *miniType) Implicit() bool { return false } // for Format OTYPE
+
+// A ChanType represents a chan Elem syntax with the direction Dir.
+type ChanType struct {
+ miniType
+ Elem Ntype
+ Dir types.ChanDir
+}
+
+func NewChanType(pos src.XPos, elem Ntype, dir types.ChanDir) *ChanType {
+ n := &ChanType{Elem: elem, Dir: dir}
+ n.op = OTCHAN
+ n.pos = pos
+ return n
+}
+
+func (n *ChanType) SetOTYPE(t *types.Type) {
+ n.setOTYPE(t, n)
+ n.Elem = nil
+}
+
+// A MapType represents a map[Key]Value type syntax.
+type MapType struct {
+ miniType
+ Key Ntype
+ Elem Ntype
+}
+
+func NewMapType(pos src.XPos, key, elem Ntype) *MapType {
+ n := &MapType{Key: key, Elem: elem}
+ n.op = OTMAP
+ n.pos = pos
+ return n
+}
+
+func (n *MapType) SetOTYPE(t *types.Type) {
+ n.setOTYPE(t, n)
+ n.Key = nil
+ n.Elem = nil
+}
+
+// A StructType represents a struct { ... } type syntax.
+type StructType struct {
+ miniType
+ Fields []*Field
+}
+
+func NewStructType(pos src.XPos, fields []*Field) *StructType {
+ n := &StructType{Fields: fields}
+ n.op = OTSTRUCT
+ n.pos = pos
+ return n
+}
+
+func (n *StructType) SetOTYPE(t *types.Type) {
+ n.setOTYPE(t, n)
+ n.Fields = nil
+}
+
+// An InterfaceType represents a struct { ... } type syntax.
+type InterfaceType struct {
+ miniType
+ Methods []*Field
+}
+
+func NewInterfaceType(pos src.XPos, methods []*Field) *InterfaceType {
+ n := &InterfaceType{Methods: methods}
+ n.op = OTINTER
+ n.pos = pos
+ return n
+}
+
+func (n *InterfaceType) SetOTYPE(t *types.Type) {
+ n.setOTYPE(t, n)
+ n.Methods = nil
+}
+
+// A FuncType represents a func(Args) Results type syntax.
+type FuncType struct {
+ miniType
+ Recv *Field
+ Params []*Field
+ Results []*Field
+}
+
+func NewFuncType(pos src.XPos, rcvr *Field, args, results []*Field) *FuncType {
+ n := &FuncType{Recv: rcvr, Params: args, Results: results}
+ n.op = OTFUNC
+ n.pos = pos
+ return n
+}
+
+func (n *FuncType) SetOTYPE(t *types.Type) {
+ n.setOTYPE(t, n)
+ n.Recv = nil
+ n.Params = nil
+ n.Results = nil
+}
+
+// A Field is a declared struct field, interface method, or function argument.
+// It is not a Node.
+type Field struct {
+ Pos src.XPos
+ Sym *types.Sym
+ Ntype Ntype
+ Type *types.Type
+ Embedded bool
+ IsDDD bool
+ Note string
+ Decl *Name
+}
+
+func NewField(pos src.XPos, sym *types.Sym, ntyp Ntype, typ *types.Type) *Field {
+ return &Field{Pos: pos, Sym: sym, Ntype: ntyp, Type: typ}
+}
+
+func (f *Field) String() string {
+ var typ string
+ if f.Type != nil {
+ typ = fmt.Sprint(f.Type)
+ } else {
+ typ = fmt.Sprint(f.Ntype)
+ }
+ if f.Sym != nil {
+ return fmt.Sprintf("%v %v", f.Sym, typ)
+ }
+ return typ
+}
+
+// TODO(mdempsky): Make Field a Node again so these can be generated?
+// Fields are Nodes in go/ast and cmd/compile/internal/syntax.
+
+func copyField(f *Field) *Field {
+ if f == nil {
+ return nil
+ }
+ c := *f
+ return &c
+}
+func doField(f *Field, do func(Node) bool) bool {
+ if f == nil {
+ return false
+ }
+ if f.Decl != nil && do(f.Decl) {
+ return true
+ }
+ if f.Ntype != nil && do(f.Ntype) {
+ return true
+ }
+ return false
+}
+func editField(f *Field, edit func(Node) Node) {
+ if f == nil {
+ return
+ }
+ if f.Decl != nil {
+ f.Decl = edit(f.Decl).(*Name)
+ }
+ if f.Ntype != nil {
+ f.Ntype = edit(f.Ntype).(Ntype)
+ }
+}
+
+func copyFields(list []*Field) []*Field {
+ out := make([]*Field, len(list))
+ for i, f := range list {
+ out[i] = copyField(f)
+ }
+ return out
+}
+func doFields(list []*Field, do func(Node) bool) bool {
+ for _, x := range list {
+ if doField(x, do) {
+ return true
+ }
+ }
+ return false
+}
+func editFields(list []*Field, edit func(Node) Node) {
+ for _, f := range list {
+ editField(f, edit)
+ }
+}
+
+// A SliceType represents a []Elem type syntax.
+// If DDD is true, it's the ...Elem at the end of a function list.
+type SliceType struct {
+ miniType
+ Elem Ntype
+ DDD bool
+}
+
+func NewSliceType(pos src.XPos, elem Ntype) *SliceType {
+ n := &SliceType{Elem: elem}
+ n.op = OTSLICE
+ n.pos = pos
+ return n
+}
+
+func (n *SliceType) SetOTYPE(t *types.Type) {
+ n.setOTYPE(t, n)
+ n.Elem = nil
+}
+
+// An ArrayType represents a [Len]Elem type syntax.
+// If Len is nil, the type is a [...]Elem in an array literal.
+type ArrayType struct {
+ miniType
+ Len Node
+ Elem Ntype
+}
+
+func NewArrayType(pos src.XPos, len Node, elem Ntype) *ArrayType {
+ n := &ArrayType{Len: len, Elem: elem}
+ n.op = OTARRAY
+ n.pos = pos
+ return n
+}
+
+func (n *ArrayType) SetOTYPE(t *types.Type) {
+ n.setOTYPE(t, n)
+ n.Len = nil
+ n.Elem = nil
+}
+
+// A typeNode is a Node wrapper for type t.
+type typeNode struct {
+ miniNode
+ typ *types.Type
+}
+
+func newTypeNode(pos src.XPos, typ *types.Type) *typeNode {
+ n := &typeNode{typ: typ}
+ n.pos = pos
+ n.op = OTYPE
+ return n
+}
+
+func (n *typeNode) Type() *types.Type { return n.typ }
+func (n *typeNode) Sym() *types.Sym { return n.typ.Sym() }
+func (n *typeNode) CanBeNtype() {}
+
+// TypeNode returns the Node representing the type t.
+func TypeNode(t *types.Type) Ntype {
+ return TypeNodeAt(src.NoXPos, t)
+}
+
+// TypeNodeAt is like TypeNode, but allows specifying the position
+// information if a new OTYPE needs to be constructed.
+//
+// Deprecated: Use TypeNode instead. For typical use, the position for
+// an anonymous OTYPE node should not matter. However, TypeNodeAt is
+// available for use with toolstash -cmp to refactor existing code
+// that is sensitive to OTYPE position.
+func TypeNodeAt(pos src.XPos, t *types.Type) Ntype {
+ if n := t.Obj(); n != nil {
+ if n.Type() != t {
+ base.Fatalf("type skew: %v has type %v, but expected %v", n, n.Type(), t)
+ }
+ return n.(Ntype)
+ }
+ return newTypeNode(pos, t)
+}
+
+// A DynamicType represents the target type in a type switch.
+type DynamicType struct {
+ miniExpr
+ X Node // a *runtime._type for the targeted type
+ ITab Node // for type switches from nonempty interfaces to non-interfaces, this is the itab for that pair.
+}
+
+func NewDynamicType(pos src.XPos, x Node) *DynamicType {
+ n := &DynamicType{X: x}
+ n.pos = pos
+ n.op = ODYNAMICTYPE
+ return n
+}
diff --git a/src/cmd/compile/internal/ir/val.go b/src/cmd/compile/internal/ir/val.go
new file mode 100644
index 0000000..bfe7d2b
--- /dev/null
+++ b/src/cmd/compile/internal/ir/val.go
@@ -0,0 +1,171 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ir
+
+import (
+ "go/constant"
+ "math"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/types"
+)
+
+func ConstType(n Node) constant.Kind {
+ if n == nil || n.Op() != OLITERAL {
+ return constant.Unknown
+ }
+ return n.Val().Kind()
+}
+
+// ConstValue returns the constant value stored in n as an interface{}.
+// It returns int64s for ints and runes, float64s for floats,
+// and complex128s for complex values.
+func ConstValue(n Node) interface{} {
+ switch v := n.Val(); v.Kind() {
+ default:
+ base.Fatalf("unexpected constant: %v", v)
+ panic("unreachable")
+ case constant.Bool:
+ return constant.BoolVal(v)
+ case constant.String:
+ return constant.StringVal(v)
+ case constant.Int:
+ return IntVal(n.Type(), v)
+ case constant.Float:
+ return Float64Val(v)
+ case constant.Complex:
+ return complex(Float64Val(constant.Real(v)), Float64Val(constant.Imag(v)))
+ }
+}
+
+// IntVal returns v converted to int64.
+// Note: if t is uint64, very large values will be converted to negative int64.
+func IntVal(t *types.Type, v constant.Value) int64 {
+ if t.IsUnsigned() {
+ if x, ok := constant.Uint64Val(v); ok {
+ return int64(x)
+ }
+ } else {
+ if x, ok := constant.Int64Val(v); ok {
+ return x
+ }
+ }
+ base.Fatalf("%v out of range for %v", v, t)
+ panic("unreachable")
+}
+
+func Float64Val(v constant.Value) float64 {
+ if x, _ := constant.Float64Val(v); !math.IsInf(x, 0) {
+ return x + 0 // avoid -0 (should not be needed, but be conservative)
+ }
+ base.Fatalf("bad float64 value: %v", v)
+ panic("unreachable")
+}
+
+func AssertValidTypeForConst(t *types.Type, v constant.Value) {
+ if !ValidTypeForConst(t, v) {
+ base.Fatalf("%v (%v) does not represent %v (%v)", t, t.Kind(), v, v.Kind())
+ }
+}
+
+func ValidTypeForConst(t *types.Type, v constant.Value) bool {
+ switch v.Kind() {
+ case constant.Unknown:
+ return OKForConst[t.Kind()]
+ case constant.Bool:
+ return t.IsBoolean()
+ case constant.String:
+ return t.IsString()
+ case constant.Int:
+ return t.IsInteger()
+ case constant.Float:
+ return t.IsFloat()
+ case constant.Complex:
+ return t.IsComplex()
+ }
+
+ base.Fatalf("unexpected constant kind: %v", v)
+ panic("unreachable")
+}
+
+// NewLiteral returns a new untyped constant with value v.
+func NewLiteral(v constant.Value) Node {
+ return NewBasicLit(base.Pos, v)
+}
+
+func idealType(ct constant.Kind) *types.Type {
+ switch ct {
+ case constant.String:
+ return types.UntypedString
+ case constant.Bool:
+ return types.UntypedBool
+ case constant.Int:
+ return types.UntypedInt
+ case constant.Float:
+ return types.UntypedFloat
+ case constant.Complex:
+ return types.UntypedComplex
+ }
+ base.Fatalf("unexpected Ctype: %v", ct)
+ return nil
+}
+
+var OKForConst [types.NTYPE]bool
+
+// CanInt64 reports whether it is safe to call Int64Val() on n.
+func CanInt64(n Node) bool {
+ if !IsConst(n, constant.Int) {
+ return false
+ }
+
+ // if the value inside n cannot be represented as an int64, the
+ // return value of Int64 is undefined
+ _, ok := constant.Int64Val(n.Val())
+ return ok
+}
+
+// Int64Val returns n as an int64.
+// n must be an integer or rune constant.
+func Int64Val(n Node) int64 {
+ if !IsConst(n, constant.Int) {
+ base.Fatalf("Int64Val(%v)", n)
+ }
+ x, ok := constant.Int64Val(n.Val())
+ if !ok {
+ base.Fatalf("Int64Val(%v)", n)
+ }
+ return x
+}
+
+// Uint64Val returns n as an uint64.
+// n must be an integer or rune constant.
+func Uint64Val(n Node) uint64 {
+ if !IsConst(n, constant.Int) {
+ base.Fatalf("Uint64Val(%v)", n)
+ }
+ x, ok := constant.Uint64Val(n.Val())
+ if !ok {
+ base.Fatalf("Uint64Val(%v)", n)
+ }
+ return x
+}
+
+// BoolVal returns n as a bool.
+// n must be a boolean constant.
+func BoolVal(n Node) bool {
+ if !IsConst(n, constant.Bool) {
+ base.Fatalf("BoolVal(%v)", n)
+ }
+ return constant.BoolVal(n.Val())
+}
+
+// StringVal returns the value of a literal string Node as a string.
+// n must be a string constant.
+func StringVal(n Node) string {
+ if !IsConst(n, constant.String) {
+ base.Fatalf("StringVal(%v)", n)
+ }
+ return constant.StringVal(n.Val())
+}
diff --git a/src/cmd/compile/internal/ir/visit.go b/src/cmd/compile/internal/ir/visit.go
new file mode 100644
index 0000000..e4aeae3
--- /dev/null
+++ b/src/cmd/compile/internal/ir/visit.go
@@ -0,0 +1,186 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// IR visitors for walking the IR tree.
+//
+// The lowest level helpers are DoChildren and EditChildren, which
+// nodes help implement and provide control over whether and when
+// recursion happens during the walk of the IR.
+//
+// Although these are both useful directly, two simpler patterns
+// are fairly common and also provided: Visit and Any.
+
+package ir
+
+// DoChildren calls do(x) on each of n's non-nil child nodes x.
+// If any call returns true, DoChildren stops and returns true.
+// Otherwise, DoChildren returns false.
+//
+// Note that DoChildren(n, do) only calls do(x) for n's immediate children.
+// If x's children should be processed, then do(x) must call DoChildren(x, do).
+//
+// DoChildren allows constructing general traversals of the IR graph
+// that can stop early if needed. The most general usage is:
+//
+// var do func(ir.Node) bool
+// do = func(x ir.Node) bool {
+// ... processing BEFORE visiting children ...
+// if ... should visit children ... {
+// ir.DoChildren(x, do)
+// ... processing AFTER visiting children ...
+// }
+// if ... should stop parent DoChildren call from visiting siblings ... {
+// return true
+// }
+// return false
+// }
+// do(root)
+//
+// Since DoChildren does not return true itself, if the do function
+// never wants to stop the traversal, it can assume that DoChildren
+// itself will always return false, simplifying to:
+//
+// var do func(ir.Node) bool
+// do = func(x ir.Node) bool {
+// ... processing BEFORE visiting children ...
+// if ... should visit children ... {
+// ir.DoChildren(x, do)
+// }
+// ... processing AFTER visiting children ...
+// return false
+// }
+// do(root)
+//
+// The Visit function illustrates a further simplification of the pattern,
+// only processing before visiting children and never stopping:
+//
+// func Visit(n ir.Node, visit func(ir.Node)) {
+// if n == nil {
+// return
+// }
+// var do func(ir.Node) bool
+// do = func(x ir.Node) bool {
+// visit(x)
+// return ir.DoChildren(x, do)
+// }
+// do(n)
+// }
+//
+// The Any function illustrates a different simplification of the pattern,
+// visiting each node and then its children, recursively, until finding
+// a node x for which cond(x) returns true, at which point the entire
+// traversal stops and returns true.
+//
+// func Any(n ir.Node, cond(ir.Node) bool) bool {
+// if n == nil {
+// return false
+// }
+// var do func(ir.Node) bool
+// do = func(x ir.Node) bool {
+// return cond(x) || ir.DoChildren(x, do)
+// }
+// return do(n)
+// }
+//
+// Visit and Any are presented above as examples of how to use
+// DoChildren effectively, but of course, usage that fits within the
+// simplifications captured by Visit or Any will be best served
+// by directly calling the ones provided by this package.
+func DoChildren(n Node, do func(Node) bool) bool {
+ if n == nil {
+ return false
+ }
+ return n.doChildren(do)
+}
+
+// Visit visits each non-nil node x in the IR tree rooted at n
+// in a depth-first preorder traversal, calling visit on each node visited.
+func Visit(n Node, visit func(Node)) {
+ if n == nil {
+ return
+ }
+ var do func(Node) bool
+ do = func(x Node) bool {
+ visit(x)
+ return DoChildren(x, do)
+ }
+ do(n)
+}
+
+// VisitList calls Visit(x, visit) for each node x in the list.
+func VisitList(list Nodes, visit func(Node)) {
+ for _, x := range list {
+ Visit(x, visit)
+ }
+}
+
+// Any looks for a non-nil node x in the IR tree rooted at n
+// for which cond(x) returns true.
+// Any considers nodes in a depth-first, preorder traversal.
+// When Any finds a node x such that cond(x) is true,
+// Any ends the traversal and returns true immediately.
+// Otherwise Any returns false after completing the entire traversal.
+func Any(n Node, cond func(Node) bool) bool {
+ if n == nil {
+ return false
+ }
+ var do func(Node) bool
+ do = func(x Node) bool {
+ return cond(x) || DoChildren(x, do)
+ }
+ return do(n)
+}
+
+// AnyList calls Any(x, cond) for each node x in the list, in order.
+// If any call returns true, AnyList stops and returns true.
+// Otherwise, AnyList returns false after calling Any(x, cond)
+// for every x in the list.
+func AnyList(list Nodes, cond func(Node) bool) bool {
+ for _, x := range list {
+ if Any(x, cond) {
+ return true
+ }
+ }
+ return false
+}
+
+// EditChildren edits the child nodes of n, replacing each child x with edit(x).
+//
+// Note that EditChildren(n, edit) only calls edit(x) for n's immediate children.
+// If x's children should be processed, then edit(x) must call EditChildren(x, edit).
+//
+// EditChildren allows constructing general editing passes of the IR graph.
+// The most general usage is:
+//
+// var edit func(ir.Node) ir.Node
+// edit = func(x ir.Node) ir.Node {
+// ... processing BEFORE editing children ...
+// if ... should edit children ... {
+// EditChildren(x, edit)
+// ... processing AFTER editing children ...
+// }
+// ... return x ...
+// }
+// n = edit(n)
+//
+// EditChildren edits the node in place. To edit a copy, call Copy first.
+// As an example, a simple deep copy implementation would be:
+//
+// func deepCopy(n ir.Node) ir.Node {
+// var edit func(ir.Node) ir.Node
+// edit = func(x ir.Node) ir.Node {
+// x = ir.Copy(x)
+// ir.EditChildren(x, edit)
+// return x
+// }
+// return edit(n)
+// }
+//
+// Of course, in this case it is better to call ir.DeepCopy than to build one anew.
+func EditChildren(n Node, edit func(Node) Node) {
+ if n == nil {
+ return
+ }
+ n.editChildren(edit)
+}
diff --git a/src/cmd/compile/internal/liveness/arg.go b/src/cmd/compile/internal/liveness/arg.go
new file mode 100644
index 0000000..2ca5d09
--- /dev/null
+++ b/src/cmd/compile/internal/liveness/arg.go
@@ -0,0 +1,339 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package liveness
+
+import (
+ "fmt"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/bitvec"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/objw"
+ "cmd/compile/internal/ssa"
+ "cmd/internal/obj"
+ "cmd/internal/objabi"
+)
+
+// Argument liveness tracking.
+//
+// For arguments passed in registers, this file tracks if their spill slots
+// are live for runtime traceback. An argument spill slot is live at a PC
+// if we know that an actual value has stored into it at or before this point.
+//
+// Stack args are always live and not tracked in this code. Stack args are
+// laid out before register spill slots, so we emit the smallest offset that
+// needs tracking. Slots before that offset are always live. That offset is
+// usually the offset of the first spill slot. But if the first spill slot is
+// always live (e.g. if it is address-taken), it will be the offset of a later
+// one.
+//
+// The liveness information is emitted as a FUNCDATA and a PCDATA.
+//
+// FUNCDATA format:
+// - start (smallest) offset that needs tracking (1 byte)
+// - a list of bitmaps.
+// In a bitmap bit i is set if the i-th spill slot is live.
+//
+// At a PC where the liveness info changes, a PCDATA indicates the
+// byte offset of the liveness map in the FUNCDATA. PCDATA -1 is a
+// special case indicating all slots are live (for binary size
+// saving).
+
+const allLiveIdx = -1
+
+// name and offset
+type nameOff struct {
+ n *ir.Name
+ off int64
+}
+
+func (a nameOff) FrameOffset() int64 { return a.n.FrameOffset() + a.off }
+func (a nameOff) String() string { return fmt.Sprintf("%v+%d", a.n, a.off) }
+
+type blockArgEffects struct {
+ livein bitvec.BitVec // variables live at block entry
+ liveout bitvec.BitVec // variables live at block exit
+}
+
+type argLiveness struct {
+ fn *ir.Func
+ f *ssa.Func
+ args []nameOff // name and offset of spill slots
+ idx map[nameOff]int32 // index in args
+
+ be []blockArgEffects // indexed by block ID
+
+ bvset bvecSet // Set of liveness bitmaps, used for uniquifying.
+
+ // Liveness map indices at each Value (where it changes) and Block entry.
+ // During the computation the indices are temporarily index to bvset.
+ // At the end they will be index (offset) to the output funcdata (changed
+ // in (*argLiveness).emit).
+ blockIdx map[ssa.ID]int
+ valueIdx map[ssa.ID]int
+}
+
+// ArgLiveness computes the liveness information of register argument spill slots.
+// An argument's spill slot is "live" if we know it contains a meaningful value,
+// that is, we have stored the register value to it.
+// Returns the liveness map indices at each Block entry and at each Value (where
+// it changes).
+func ArgLiveness(fn *ir.Func, f *ssa.Func, pp *objw.Progs) (blockIdx, valueIdx map[ssa.ID]int) {
+ if f.OwnAux.ABIInfo().InRegistersUsed() == 0 || base.Flag.N != 0 {
+ // No register args. Nothing to emit.
+ // Or if -N is used we spill everything upfront so it is always live.
+ return nil, nil
+ }
+
+ lv := &argLiveness{
+ fn: fn,
+ f: f,
+ idx: make(map[nameOff]int32),
+ be: make([]blockArgEffects, f.NumBlocks()),
+ blockIdx: make(map[ssa.ID]int),
+ valueIdx: make(map[ssa.ID]int),
+ }
+ // Gather all register arg spill slots.
+ for _, a := range f.OwnAux.ABIInfo().InParams() {
+ n, ok := a.Name.(*ir.Name)
+ if !ok || len(a.Registers) == 0 {
+ continue
+ }
+ _, offs := a.RegisterTypesAndOffsets()
+ for _, off := range offs {
+ if n.FrameOffset()+off > 0xff {
+ // We only print a limited number of args, with stack
+ // offsets no larger than 255.
+ continue
+ }
+ lv.args = append(lv.args, nameOff{n, off})
+ }
+ }
+ if len(lv.args) > 10 {
+ lv.args = lv.args[:10] // We print no more than 10 args.
+ }
+
+ // We spill address-taken or non-SSA-able value upfront, so they are always live.
+ alwaysLive := func(n *ir.Name) bool { return n.Addrtaken() || !f.Frontend().CanSSA(n.Type()) }
+
+ // We'll emit the smallest offset for the slots that need liveness info.
+ // No need to include a slot with a lower offset if it is always live.
+ for len(lv.args) > 0 && alwaysLive(lv.args[0].n) {
+ lv.args = lv.args[1:]
+ }
+ if len(lv.args) == 0 {
+ return // everything is always live
+ }
+
+ for i, a := range lv.args {
+ lv.idx[a] = int32(i)
+ }
+
+ nargs := int32(len(lv.args))
+ bulk := bitvec.NewBulk(nargs, int32(len(f.Blocks)*2))
+ for _, b := range f.Blocks {
+ be := &lv.be[b.ID]
+ be.livein = bulk.Next()
+ be.liveout = bulk.Next()
+
+ // initialize to all 1s, so we can AND them
+ be.livein.Not()
+ be.liveout.Not()
+ }
+
+ entrybe := &lv.be[f.Entry.ID]
+ entrybe.livein.Clear()
+ for i, a := range lv.args {
+ if alwaysLive(a.n) {
+ entrybe.livein.Set(int32(i))
+ }
+ }
+
+ // Visit blocks in reverse-postorder, compute block effects.
+ po := f.Postorder()
+ for i := len(po) - 1; i >= 0; i-- {
+ b := po[i]
+ be := &lv.be[b.ID]
+
+ // A slot is live at block entry if it is live in all predecessors.
+ for _, pred := range b.Preds {
+ pb := pred.Block()
+ be.livein.And(be.livein, lv.be[pb.ID].liveout)
+ }
+
+ be.liveout.Copy(be.livein)
+ for _, v := range b.Values {
+ lv.valueEffect(v, be.liveout)
+ }
+ }
+
+ // Coalesce identical live vectors. Compute liveness indices at each PC
+ // where it changes.
+ live := bitvec.New(nargs)
+ addToSet := func(bv bitvec.BitVec) (int, bool) {
+ if bv.Count() == int(nargs) { // special case for all live
+ return allLiveIdx, false
+ }
+ return lv.bvset.add(bv)
+ }
+ for _, b := range lv.f.Blocks {
+ be := &lv.be[b.ID]
+ lv.blockIdx[b.ID], _ = addToSet(be.livein)
+
+ live.Copy(be.livein)
+ var lastv *ssa.Value
+ for i, v := range b.Values {
+ if lv.valueEffect(v, live) {
+ // Record that liveness changes but not emit a map now.
+ // For a sequence of StoreRegs we only need to emit one
+ // at last.
+ lastv = v
+ }
+ if lastv != nil && (mayFault(v) || i == len(b.Values)-1) {
+ // Emit the liveness map if it may fault or at the end of
+ // the block. We may need a traceback if the instruction
+ // may cause a panic.
+ var added bool
+ lv.valueIdx[lastv.ID], added = addToSet(live)
+ if added {
+ // live is added to bvset and we cannot modify it now.
+ // Make a copy.
+ t := live
+ live = bitvec.New(nargs)
+ live.Copy(t)
+ }
+ lastv = nil
+ }
+ }
+
+ // Sanity check.
+ if !live.Eq(be.liveout) {
+ panic("wrong arg liveness map at block end")
+ }
+ }
+
+ // Emit funcdata symbol, update indices to offsets in the symbol data.
+ lsym := lv.emit()
+ fn.LSym.Func().ArgLiveInfo = lsym
+
+ //lv.print()
+
+ p := pp.Prog(obj.AFUNCDATA)
+ p.From.SetConst(objabi.FUNCDATA_ArgLiveInfo)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = lsym
+
+ return lv.blockIdx, lv.valueIdx
+}
+
+// valueEffect applies the effect of v to live, return whether it is changed.
+func (lv *argLiveness) valueEffect(v *ssa.Value, live bitvec.BitVec) bool {
+ if v.Op != ssa.OpStoreReg { // TODO: include other store instructions?
+ return false
+ }
+ n, off := ssa.AutoVar(v)
+ if n.Class != ir.PPARAM {
+ return false
+ }
+ i, ok := lv.idx[nameOff{n, off}]
+ if !ok || live.Get(i) {
+ return false
+ }
+ live.Set(i)
+ return true
+}
+
+func mayFault(v *ssa.Value) bool {
+ switch v.Op {
+ case ssa.OpLoadReg, ssa.OpStoreReg, ssa.OpCopy, ssa.OpPhi,
+ ssa.OpVarDef, ssa.OpVarKill, ssa.OpVarLive, ssa.OpKeepAlive,
+ ssa.OpSelect0, ssa.OpSelect1, ssa.OpSelectN, ssa.OpMakeResult,
+ ssa.OpConvert, ssa.OpInlMark, ssa.OpGetG:
+ return false
+ }
+ if len(v.Args) == 0 {
+ return false // assume constant op cannot fault
+ }
+ return true // conservatively assume all other ops could fault
+}
+
+func (lv *argLiveness) print() {
+ fmt.Println("argument liveness:", lv.f.Name)
+ live := bitvec.New(int32(len(lv.args)))
+ for _, b := range lv.f.Blocks {
+ be := &lv.be[b.ID]
+
+ fmt.Printf("%v: live in: ", b)
+ lv.printLivenessVec(be.livein)
+ if idx, ok := lv.blockIdx[b.ID]; ok {
+ fmt.Printf(" #%d", idx)
+ }
+ fmt.Println()
+
+ for _, v := range b.Values {
+ if lv.valueEffect(v, live) {
+ fmt.Printf(" %v: ", v)
+ lv.printLivenessVec(live)
+ if idx, ok := lv.valueIdx[v.ID]; ok {
+ fmt.Printf(" #%d", idx)
+ }
+ fmt.Println()
+ }
+ }
+
+ fmt.Printf("%v: live out: ", b)
+ lv.printLivenessVec(be.liveout)
+ fmt.Println()
+ }
+ fmt.Println("liveness maps data:", lv.fn.LSym.Func().ArgLiveInfo.P)
+}
+
+func (lv *argLiveness) printLivenessVec(bv bitvec.BitVec) {
+ for i, a := range lv.args {
+ if bv.Get(int32(i)) {
+ fmt.Printf("%v ", a)
+ }
+ }
+}
+
+func (lv *argLiveness) emit() *obj.LSym {
+ livenessMaps := lv.bvset.extractUnique()
+
+ // stack offsets of register arg spill slots
+ argOffsets := make([]uint8, len(lv.args))
+ for i, a := range lv.args {
+ off := a.FrameOffset()
+ if off > 0xff {
+ panic("offset too large")
+ }
+ argOffsets[i] = uint8(off)
+ }
+
+ idx2off := make([]int, len(livenessMaps))
+
+ lsym := base.Ctxt.Lookup(lv.fn.LSym.Name + ".argliveinfo")
+ lsym.Set(obj.AttrContentAddressable, true)
+
+ off := objw.Uint8(lsym, 0, argOffsets[0]) // smallest offset that needs liveness info.
+ for idx, live := range livenessMaps {
+ idx2off[idx] = off
+ off = objw.BitVec(lsym, off, live)
+ }
+
+ // Update liveness indices to offsets.
+ for i, x := range lv.blockIdx {
+ if x != allLiveIdx {
+ lv.blockIdx[i] = idx2off[x]
+ }
+ }
+ for i, x := range lv.valueIdx {
+ if x != allLiveIdx {
+ lv.valueIdx[i] = idx2off[x]
+ }
+ }
+
+ return lsym
+}
diff --git a/src/cmd/compile/internal/liveness/bvset.go b/src/cmd/compile/internal/liveness/bvset.go
new file mode 100644
index 0000000..60b2593
--- /dev/null
+++ b/src/cmd/compile/internal/liveness/bvset.go
@@ -0,0 +1,98 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package liveness
+
+import "cmd/compile/internal/bitvec"
+
+// FNV-1 hash function constants.
+const (
+ h0 = 2166136261
+ hp = 16777619
+)
+
+// bvecSet is a set of bvecs, in initial insertion order.
+type bvecSet struct {
+ index []int // hash -> uniq index. -1 indicates empty slot.
+ uniq []bitvec.BitVec // unique bvecs, in insertion order
+}
+
+func (m *bvecSet) grow() {
+ // Allocate new index.
+ n := len(m.index) * 2
+ if n == 0 {
+ n = 32
+ }
+ newIndex := make([]int, n)
+ for i := range newIndex {
+ newIndex[i] = -1
+ }
+
+ // Rehash into newIndex.
+ for i, bv := range m.uniq {
+ h := hashbitmap(h0, bv) % uint32(len(newIndex))
+ for {
+ j := newIndex[h]
+ if j < 0 {
+ newIndex[h] = i
+ break
+ }
+ h++
+ if h == uint32(len(newIndex)) {
+ h = 0
+ }
+ }
+ }
+ m.index = newIndex
+}
+
+// add adds bv to the set and returns its index in m.extractUnique,
+// and whether it is newly added.
+// If it is newly added, the caller must not modify bv after this.
+func (m *bvecSet) add(bv bitvec.BitVec) (int, bool) {
+ if len(m.uniq)*4 >= len(m.index) {
+ m.grow()
+ }
+
+ index := m.index
+ h := hashbitmap(h0, bv) % uint32(len(index))
+ for {
+ j := index[h]
+ if j < 0 {
+ // New bvec.
+ index[h] = len(m.uniq)
+ m.uniq = append(m.uniq, bv)
+ return len(m.uniq) - 1, true
+ }
+ jlive := m.uniq[j]
+ if bv.Eq(jlive) {
+ // Existing bvec.
+ return j, false
+ }
+
+ h++
+ if h == uint32(len(index)) {
+ h = 0
+ }
+ }
+}
+
+// extractUnique returns this slice of unique bit vectors in m, as
+// indexed by the result of bvecSet.add.
+func (m *bvecSet) extractUnique() []bitvec.BitVec {
+ return m.uniq
+}
+
+func hashbitmap(h uint32, bv bitvec.BitVec) uint32 {
+ n := int((bv.N + 31) / 32)
+ for i := 0; i < n; i++ {
+ w := bv.B[i]
+ h = (h * hp) ^ (w & 0xff)
+ h = (h * hp) ^ ((w >> 8) & 0xff)
+ h = (h * hp) ^ ((w >> 16) & 0xff)
+ h = (h * hp) ^ ((w >> 24) & 0xff)
+ }
+
+ return h
+}
diff --git a/src/cmd/compile/internal/liveness/plive.go b/src/cmd/compile/internal/liveness/plive.go
new file mode 100644
index 0000000..3202e50
--- /dev/null
+++ b/src/cmd/compile/internal/liveness/plive.go
@@ -0,0 +1,1530 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Garbage collector liveness bitmap generation.
+
+// The command line flag -live causes this code to print debug information.
+// The levels are:
+//
+// -live (aka -live=1): print liveness lists as code warnings at safe points
+// -live=2: print an assembly listing with liveness annotations
+//
+// Each level includes the earlier output as well.
+
+package liveness
+
+import (
+ "crypto/sha1"
+ "fmt"
+ "os"
+ "sort"
+ "strings"
+
+ "cmd/compile/internal/abi"
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/bitvec"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/objw"
+ "cmd/compile/internal/reflectdata"
+ "cmd/compile/internal/ssa"
+ "cmd/compile/internal/typebits"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/objabi"
+ "cmd/internal/src"
+)
+
+// OpVarDef is an annotation for the liveness analysis, marking a place
+// where a complete initialization (definition) of a variable begins.
+// Since the liveness analysis can see initialization of single-word
+// variables quite easy, OpVarDef is only needed for multi-word
+// variables satisfying isfat(n.Type). For simplicity though, buildssa
+// emits OpVarDef regardless of variable width.
+//
+// An 'OpVarDef x' annotation in the instruction stream tells the liveness
+// analysis to behave as though the variable x is being initialized at that
+// point in the instruction stream. The OpVarDef must appear before the
+// actual (multi-instruction) initialization, and it must also appear after
+// any uses of the previous value, if any. For example, if compiling:
+//
+// x = x[1:]
+//
+// it is important to generate code like:
+//
+// base, len, cap = pieces of x[1:]
+// OpVarDef x
+// x = {base, len, cap}
+//
+// If instead the generated code looked like:
+//
+// OpVarDef x
+// base, len, cap = pieces of x[1:]
+// x = {base, len, cap}
+//
+// then the liveness analysis would decide the previous value of x was
+// unnecessary even though it is about to be used by the x[1:] computation.
+// Similarly, if the generated code looked like:
+//
+// base, len, cap = pieces of x[1:]
+// x = {base, len, cap}
+// OpVarDef x
+//
+// then the liveness analysis will not preserve the new value of x, because
+// the OpVarDef appears to have "overwritten" it.
+//
+// OpVarDef is a bit of a kludge to work around the fact that the instruction
+// stream is working on single-word values but the liveness analysis
+// wants to work on individual variables, which might be multi-word
+// aggregates. It might make sense at some point to look into letting
+// the liveness analysis work on single-word values as well, although
+// there are complications around interface values, slices, and strings,
+// all of which cannot be treated as individual words.
+//
+// OpVarKill is the opposite of OpVarDef: it marks a value as no longer needed,
+// even if its address has been taken. That is, an OpVarKill annotation asserts
+// that its argument is certainly dead, for use when the liveness analysis
+// would not otherwise be able to deduce that fact.
+
+// TODO: get rid of OpVarKill here. It's useful for stack frame allocation
+// so the compiler can allocate two temps to the same location. Here it's now
+// useless, since the implementation of stack objects.
+
+// blockEffects summarizes the liveness effects on an SSA block.
+type blockEffects struct {
+ // Computed during Liveness.prologue using only the content of
+ // individual blocks:
+ //
+ // uevar: upward exposed variables (used before set in block)
+ // varkill: killed variables (set in block)
+ uevar bitvec.BitVec
+ varkill bitvec.BitVec
+
+ // Computed during Liveness.solve using control flow information:
+ //
+ // livein: variables live at block entry
+ // liveout: variables live at block exit
+ livein bitvec.BitVec
+ liveout bitvec.BitVec
+}
+
+// A collection of global state used by liveness analysis.
+type liveness struct {
+ fn *ir.Func
+ f *ssa.Func
+ vars []*ir.Name
+ idx map[*ir.Name]int32
+ stkptrsize int64
+
+ be []blockEffects
+
+ // allUnsafe indicates that all points in this function are
+ // unsafe-points.
+ allUnsafe bool
+ // unsafePoints bit i is set if Value ID i is an unsafe-point
+ // (preemption is not allowed). Only valid if !allUnsafe.
+ unsafePoints bitvec.BitVec
+
+ // An array with a bit vector for each safe point in the
+ // current Block during liveness.epilogue. Indexed in Value
+ // order for that block. Additionally, for the entry block
+ // livevars[0] is the entry bitmap. liveness.compact moves
+ // these to stackMaps.
+ livevars []bitvec.BitVec
+
+ // livenessMap maps from safe points (i.e., CALLs) to their
+ // liveness map indexes.
+ livenessMap Map
+ stackMapSet bvecSet
+ stackMaps []bitvec.BitVec
+
+ cache progeffectscache
+
+ // partLiveArgs includes input arguments (PPARAM) that may
+ // be partially live. That is, it is considered live because
+ // a part of it is used, but we may not initialize all parts.
+ partLiveArgs map[*ir.Name]bool
+
+ doClobber bool // Whether to clobber dead stack slots in this function.
+ noClobberArgs bool // Do not clobber function arguments
+}
+
+// Map maps from *ssa.Value to LivenessIndex.
+type Map struct {
+ Vals map[ssa.ID]objw.LivenessIndex
+ // The set of live, pointer-containing variables at the DeferReturn
+ // call (only set when open-coded defers are used).
+ DeferReturn objw.LivenessIndex
+}
+
+func (m *Map) reset() {
+ if m.Vals == nil {
+ m.Vals = make(map[ssa.ID]objw.LivenessIndex)
+ } else {
+ for k := range m.Vals {
+ delete(m.Vals, k)
+ }
+ }
+ m.DeferReturn = objw.LivenessDontCare
+}
+
+func (m *Map) set(v *ssa.Value, i objw.LivenessIndex) {
+ m.Vals[v.ID] = i
+}
+
+func (m Map) Get(v *ssa.Value) objw.LivenessIndex {
+ // If v isn't in the map, then it's a "don't care" and not an
+ // unsafe-point.
+ if idx, ok := m.Vals[v.ID]; ok {
+ return idx
+ }
+ return objw.LivenessIndex{StackMapIndex: objw.StackMapDontCare, IsUnsafePoint: false}
+}
+
+type progeffectscache struct {
+ retuevar []int32
+ tailuevar []int32
+ initialized bool
+}
+
+// shouldTrack reports whether the liveness analysis
+// should track the variable n.
+// We don't care about variables that have no pointers,
+// nor do we care about non-local variables,
+// nor do we care about empty structs (handled by the pointer check),
+// nor do we care about the fake PAUTOHEAP variables.
+func shouldTrack(n *ir.Name) bool {
+ return (n.Class == ir.PAUTO && n.Esc() != ir.EscHeap || n.Class == ir.PPARAM || n.Class == ir.PPARAMOUT) && n.Type().HasPointers()
+}
+
+// getvariables returns the list of on-stack variables that we need to track
+// and a map for looking up indices by *Node.
+func getvariables(fn *ir.Func) ([]*ir.Name, map[*ir.Name]int32) {
+ var vars []*ir.Name
+ for _, n := range fn.Dcl {
+ if shouldTrack(n) {
+ vars = append(vars, n)
+ }
+ }
+ idx := make(map[*ir.Name]int32, len(vars))
+ for i, n := range vars {
+ idx[n] = int32(i)
+ }
+ return vars, idx
+}
+
+func (lv *liveness) initcache() {
+ if lv.cache.initialized {
+ base.Fatalf("liveness cache initialized twice")
+ return
+ }
+ lv.cache.initialized = true
+
+ for i, node := range lv.vars {
+ switch node.Class {
+ case ir.PPARAM:
+ // A return instruction with a p.to is a tail return, which brings
+ // the stack pointer back up (if it ever went down) and then jumps
+ // to a new function entirely. That form of instruction must read
+ // all the parameters for correctness, and similarly it must not
+ // read the out arguments - they won't be set until the new
+ // function runs.
+ lv.cache.tailuevar = append(lv.cache.tailuevar, int32(i))
+
+ case ir.PPARAMOUT:
+ // All results are live at every return point.
+ // Note that this point is after escaping return values
+ // are copied back to the stack using their PAUTOHEAP references.
+ lv.cache.retuevar = append(lv.cache.retuevar, int32(i))
+ }
+ }
+}
+
+// A liveEffect is a set of flags that describe an instruction's
+// liveness effects on a variable.
+//
+// The possible flags are:
+// uevar - used by the instruction
+// varkill - killed by the instruction (set)
+// A kill happens after the use (for an instruction that updates a value, for example).
+type liveEffect int
+
+const (
+ uevar liveEffect = 1 << iota
+ varkill
+)
+
+// valueEffects returns the index of a variable in lv.vars and the
+// liveness effects v has on that variable.
+// If v does not affect any tracked variables, it returns -1, 0.
+func (lv *liveness) valueEffects(v *ssa.Value) (int32, liveEffect) {
+ n, e := affectedVar(v)
+ if e == 0 || n == nil { // cheapest checks first
+ return -1, 0
+ }
+ // AllocFrame has dropped unused variables from
+ // lv.fn.Func.Dcl, but they might still be referenced by
+ // OpVarFoo pseudo-ops. Ignore them to prevent "lost track of
+ // variable" ICEs (issue 19632).
+ switch v.Op {
+ case ssa.OpVarDef, ssa.OpVarKill, ssa.OpVarLive, ssa.OpKeepAlive:
+ if !n.Used() {
+ return -1, 0
+ }
+ }
+
+ if n.Class == ir.PPARAM && !n.Addrtaken() && n.Type().Size() > int64(types.PtrSize) {
+ // Only aggregate-typed arguments that are not address-taken can be
+ // partially live.
+ lv.partLiveArgs[n] = true
+ }
+
+ var effect liveEffect
+ // Read is a read, obviously.
+ //
+ // Addr is a read also, as any subsequent holder of the pointer must be able
+ // to see all the values (including initialization) written so far.
+ // This also prevents a variable from "coming back from the dead" and presenting
+ // stale pointers to the garbage collector. See issue 28445.
+ if e&(ssa.SymRead|ssa.SymAddr) != 0 {
+ effect |= uevar
+ }
+ if e&ssa.SymWrite != 0 && (!isfat(n.Type()) || v.Op == ssa.OpVarDef) {
+ effect |= varkill
+ }
+
+ if effect == 0 {
+ return -1, 0
+ }
+
+ if pos, ok := lv.idx[n]; ok {
+ return pos, effect
+ }
+ return -1, 0
+}
+
+// affectedVar returns the *ir.Name node affected by v
+func affectedVar(v *ssa.Value) (*ir.Name, ssa.SymEffect) {
+ // Special cases.
+ switch v.Op {
+ case ssa.OpLoadReg:
+ n, _ := ssa.AutoVar(v.Args[0])
+ return n, ssa.SymRead
+ case ssa.OpStoreReg:
+ n, _ := ssa.AutoVar(v)
+ return n, ssa.SymWrite
+
+ case ssa.OpArgIntReg:
+ // This forces the spill slot for the register to be live at function entry.
+ // one of the following holds for a function F with pointer-valued register arg X:
+ // 0. No GC (so an uninitialized spill slot is okay)
+ // 1. GC at entry of F. GC is precise, but the spills around morestack initialize X's spill slot
+ // 2. Stack growth at entry of F. Same as GC.
+ // 3. GC occurs within F itself. This has to be from preemption, and thus GC is conservative.
+ // a. X is in a register -- then X is seen, and the spill slot is also scanned conservatively.
+ // b. X is spilled -- the spill slot is initialized, and scanned conservatively
+ // c. X is not live -- the spill slot is scanned conservatively, and it may contain X from an earlier spill.
+ // 4. GC within G, transitively called from F
+ // a. X is live at call site, therefore is spilled, to its spill slot (which is live because of subsequent LoadReg).
+ // b. X is not live at call site -- but neither is its spill slot.
+ n, _ := ssa.AutoVar(v)
+ return n, ssa.SymRead
+
+ case ssa.OpVarLive:
+ return v.Aux.(*ir.Name), ssa.SymRead
+ case ssa.OpVarDef, ssa.OpVarKill:
+ return v.Aux.(*ir.Name), ssa.SymWrite
+ case ssa.OpKeepAlive:
+ n, _ := ssa.AutoVar(v.Args[0])
+ return n, ssa.SymRead
+ }
+
+ e := v.Op.SymEffect()
+ if e == 0 {
+ return nil, 0
+ }
+
+ switch a := v.Aux.(type) {
+ case nil, *obj.LSym:
+ // ok, but no node
+ return nil, e
+ case *ir.Name:
+ return a, e
+ default:
+ base.Fatalf("weird aux: %s", v.LongString())
+ return nil, e
+ }
+}
+
+type livenessFuncCache struct {
+ be []blockEffects
+ livenessMap Map
+}
+
+// Constructs a new liveness structure used to hold the global state of the
+// liveness computation. The cfg argument is a slice of *BasicBlocks and the
+// vars argument is a slice of *Nodes.
+func newliveness(fn *ir.Func, f *ssa.Func, vars []*ir.Name, idx map[*ir.Name]int32, stkptrsize int64) *liveness {
+ lv := &liveness{
+ fn: fn,
+ f: f,
+ vars: vars,
+ idx: idx,
+ stkptrsize: stkptrsize,
+ }
+
+ // Significant sources of allocation are kept in the ssa.Cache
+ // and reused. Surprisingly, the bit vectors themselves aren't
+ // a major source of allocation, but the liveness maps are.
+ if lc, _ := f.Cache.Liveness.(*livenessFuncCache); lc == nil {
+ // Prep the cache so liveness can fill it later.
+ f.Cache.Liveness = new(livenessFuncCache)
+ } else {
+ if cap(lc.be) >= f.NumBlocks() {
+ lv.be = lc.be[:f.NumBlocks()]
+ }
+ lv.livenessMap = Map{Vals: lc.livenessMap.Vals, DeferReturn: objw.LivenessDontCare}
+ lc.livenessMap.Vals = nil
+ }
+ if lv.be == nil {
+ lv.be = make([]blockEffects, f.NumBlocks())
+ }
+
+ nblocks := int32(len(f.Blocks))
+ nvars := int32(len(vars))
+ bulk := bitvec.NewBulk(nvars, nblocks*7)
+ for _, b := range f.Blocks {
+ be := lv.blockEffects(b)
+
+ be.uevar = bulk.Next()
+ be.varkill = bulk.Next()
+ be.livein = bulk.Next()
+ be.liveout = bulk.Next()
+ }
+ lv.livenessMap.reset()
+
+ lv.markUnsafePoints()
+
+ lv.partLiveArgs = make(map[*ir.Name]bool)
+
+ lv.enableClobber()
+
+ return lv
+}
+
+func (lv *liveness) blockEffects(b *ssa.Block) *blockEffects {
+ return &lv.be[b.ID]
+}
+
+// Generates live pointer value maps for arguments and local variables. The
+// this argument and the in arguments are always assumed live. The vars
+// argument is a slice of *Nodes.
+func (lv *liveness) pointerMap(liveout bitvec.BitVec, vars []*ir.Name, args, locals bitvec.BitVec) {
+ for i := int32(0); ; i++ {
+ i = liveout.Next(i)
+ if i < 0 {
+ break
+ }
+ node := vars[i]
+ switch node.Class {
+ case ir.PPARAM, ir.PPARAMOUT:
+ if !node.IsOutputParamInRegisters() {
+ if node.FrameOffset() < 0 {
+ lv.f.Fatalf("Node %v has frameoffset %d\n", node.Sym().Name, node.FrameOffset())
+ }
+ typebits.Set(node.Type(), node.FrameOffset(), args)
+ break
+ }
+ fallthrough // PPARAMOUT in registers acts memory-allocates like an AUTO
+ case ir.PAUTO:
+ typebits.Set(node.Type(), node.FrameOffset()+lv.stkptrsize, locals)
+ }
+ }
+}
+
+// IsUnsafe indicates that all points in this function are
+// unsafe-points.
+func IsUnsafe(f *ssa.Func) bool {
+ // The runtime assumes the only safe-points are function
+ // prologues (because that's how it used to be). We could and
+ // should improve that, but for now keep consider all points
+ // in the runtime unsafe. obj will add prologues and their
+ // safe-points.
+ //
+ // go:nosplit functions are similar. Since safe points used to
+ // be coupled with stack checks, go:nosplit often actually
+ // means "no safe points in this function".
+ return base.Flag.CompilingRuntime || f.NoSplit
+}
+
+// markUnsafePoints finds unsafe points and computes lv.unsafePoints.
+func (lv *liveness) markUnsafePoints() {
+ if IsUnsafe(lv.f) {
+ // No complex analysis necessary.
+ lv.allUnsafe = true
+ return
+ }
+
+ lv.unsafePoints = bitvec.New(int32(lv.f.NumValues()))
+
+ // Mark architecture-specific unsafe points.
+ for _, b := range lv.f.Blocks {
+ for _, v := range b.Values {
+ if v.Op.UnsafePoint() {
+ lv.unsafePoints.Set(int32(v.ID))
+ }
+ }
+ }
+
+ // Mark write barrier unsafe points.
+ for _, wbBlock := range lv.f.WBLoads {
+ if wbBlock.Kind == ssa.BlockPlain && len(wbBlock.Values) == 0 {
+ // The write barrier block was optimized away
+ // but we haven't done dead block elimination.
+ // (This can happen in -N mode.)
+ continue
+ }
+ // Check that we have the expected diamond shape.
+ if len(wbBlock.Succs) != 2 {
+ lv.f.Fatalf("expected branch at write barrier block %v", wbBlock)
+ }
+ s0, s1 := wbBlock.Succs[0].Block(), wbBlock.Succs[1].Block()
+ if s0 == s1 {
+ // There's no difference between write barrier on and off.
+ // Thus there's no unsafe locations. See issue 26024.
+ continue
+ }
+ if s0.Kind != ssa.BlockPlain || s1.Kind != ssa.BlockPlain {
+ lv.f.Fatalf("expected successors of write barrier block %v to be plain", wbBlock)
+ }
+ if s0.Succs[0].Block() != s1.Succs[0].Block() {
+ lv.f.Fatalf("expected successors of write barrier block %v to converge", wbBlock)
+ }
+
+ // Flow backwards from the control value to find the
+ // flag load. We don't know what lowered ops we're
+ // looking for, but all current arches produce a
+ // single op that does the memory load from the flag
+ // address, so we look for that.
+ var load *ssa.Value
+ v := wbBlock.Controls[0]
+ for {
+ if sym, ok := v.Aux.(*obj.LSym); ok && sym == ir.Syms.WriteBarrier {
+ load = v
+ break
+ }
+ switch v.Op {
+ case ssa.Op386TESTL:
+ // 386 lowers Neq32 to (TESTL cond cond),
+ if v.Args[0] == v.Args[1] {
+ v = v.Args[0]
+ continue
+ }
+ case ssa.Op386MOVLload, ssa.OpARM64MOVWUload, ssa.OpPPC64MOVWZload, ssa.OpWasmI64Load32U:
+ // Args[0] is the address of the write
+ // barrier control. Ignore Args[1],
+ // which is the mem operand.
+ // TODO: Just ignore mem operands?
+ v = v.Args[0]
+ continue
+ }
+ // Common case: just flow backwards.
+ if len(v.Args) != 1 {
+ v.Fatalf("write barrier control value has more than one argument: %s", v.LongString())
+ }
+ v = v.Args[0]
+ }
+
+ // Mark everything after the load unsafe.
+ found := false
+ for _, v := range wbBlock.Values {
+ found = found || v == load
+ if found {
+ lv.unsafePoints.Set(int32(v.ID))
+ }
+ }
+
+ // Mark the two successor blocks unsafe. These come
+ // back together immediately after the direct write in
+ // one successor and the last write barrier call in
+ // the other, so there's no need to be more precise.
+ for _, succ := range wbBlock.Succs {
+ for _, v := range succ.Block().Values {
+ lv.unsafePoints.Set(int32(v.ID))
+ }
+ }
+ }
+
+ // Find uintptr -> unsafe.Pointer conversions and flood
+ // unsafeness back to a call (which is always a safe point).
+ //
+ // Looking for the uintptr -> unsafe.Pointer conversion has a
+ // few advantages over looking for unsafe.Pointer -> uintptr
+ // conversions:
+ //
+ // 1. We avoid needlessly blocking safe-points for
+ // unsafe.Pointer -> uintptr conversions that never go back to
+ // a Pointer.
+ //
+ // 2. We don't have to detect calls to reflect.Value.Pointer,
+ // reflect.Value.UnsafeAddr, and reflect.Value.InterfaceData,
+ // which are implicit unsafe.Pointer -> uintptr conversions.
+ // We can't even reliably detect this if there's an indirect
+ // call to one of these methods.
+ //
+ // TODO: For trivial unsafe.Pointer arithmetic, it would be
+ // nice to only flood as far as the unsafe.Pointer -> uintptr
+ // conversion, but it's hard to know which argument of an Add
+ // or Sub to follow.
+ var flooded bitvec.BitVec
+ var flood func(b *ssa.Block, vi int)
+ flood = func(b *ssa.Block, vi int) {
+ if flooded.N == 0 {
+ flooded = bitvec.New(int32(lv.f.NumBlocks()))
+ }
+ if flooded.Get(int32(b.ID)) {
+ return
+ }
+ for i := vi - 1; i >= 0; i-- {
+ v := b.Values[i]
+ if v.Op.IsCall() {
+ // Uintptrs must not contain live
+ // pointers across calls, so stop
+ // flooding.
+ return
+ }
+ lv.unsafePoints.Set(int32(v.ID))
+ }
+ if vi == len(b.Values) {
+ // We marked all values in this block, so no
+ // need to flood this block again.
+ flooded.Set(int32(b.ID))
+ }
+ for _, pred := range b.Preds {
+ flood(pred.Block(), len(pred.Block().Values))
+ }
+ }
+ for _, b := range lv.f.Blocks {
+ for i, v := range b.Values {
+ if !(v.Op == ssa.OpConvert && v.Type.IsPtrShaped()) {
+ continue
+ }
+ // Flood the unsafe-ness of this backwards
+ // until we hit a call.
+ flood(b, i+1)
+ }
+ }
+}
+
+// Returns true for instructions that must have a stack map.
+//
+// This does not necessarily mean the instruction is a safe-point. In
+// particular, call Values can have a stack map in case the callee
+// grows the stack, but not themselves be a safe-point.
+func (lv *liveness) hasStackMap(v *ssa.Value) bool {
+ if !v.Op.IsCall() {
+ return false
+ }
+ // typedmemclr and typedmemmove are write barriers and
+ // deeply non-preemptible. They are unsafe points and
+ // hence should not have liveness maps.
+ if sym, ok := v.Aux.(*ssa.AuxCall); ok && (sym.Fn == ir.Syms.Typedmemclr || sym.Fn == ir.Syms.Typedmemmove) {
+ return false
+ }
+ return true
+}
+
+// Initializes the sets for solving the live variables. Visits all the
+// instructions in each basic block to summarizes the information at each basic
+// block
+func (lv *liveness) prologue() {
+ lv.initcache()
+
+ for _, b := range lv.f.Blocks {
+ be := lv.blockEffects(b)
+
+ // Walk the block instructions backward and update the block
+ // effects with the each prog effects.
+ for j := len(b.Values) - 1; j >= 0; j-- {
+ pos, e := lv.valueEffects(b.Values[j])
+ if e&varkill != 0 {
+ be.varkill.Set(pos)
+ be.uevar.Unset(pos)
+ }
+ if e&uevar != 0 {
+ be.uevar.Set(pos)
+ }
+ }
+ }
+}
+
+// Solve the liveness dataflow equations.
+func (lv *liveness) solve() {
+ // These temporary bitvectors exist to avoid successive allocations and
+ // frees within the loop.
+ nvars := int32(len(lv.vars))
+ newlivein := bitvec.New(nvars)
+ newliveout := bitvec.New(nvars)
+
+ // Walk blocks in postorder ordering. This improves convergence.
+ po := lv.f.Postorder()
+
+ // Iterate through the blocks in reverse round-robin fashion. A work
+ // queue might be slightly faster. As is, the number of iterations is
+ // so low that it hardly seems to be worth the complexity.
+
+ for change := true; change; {
+ change = false
+ for _, b := range po {
+ be := lv.blockEffects(b)
+
+ newliveout.Clear()
+ switch b.Kind {
+ case ssa.BlockRet:
+ for _, pos := range lv.cache.retuevar {
+ newliveout.Set(pos)
+ }
+ case ssa.BlockRetJmp:
+ for _, pos := range lv.cache.tailuevar {
+ newliveout.Set(pos)
+ }
+ case ssa.BlockExit:
+ // panic exit - nothing to do
+ default:
+ // A variable is live on output from this block
+ // if it is live on input to some successor.
+ //
+ // out[b] = \bigcup_{s \in succ[b]} in[s]
+ newliveout.Copy(lv.blockEffects(b.Succs[0].Block()).livein)
+ for _, succ := range b.Succs[1:] {
+ newliveout.Or(newliveout, lv.blockEffects(succ.Block()).livein)
+ }
+ }
+
+ if !be.liveout.Eq(newliveout) {
+ change = true
+ be.liveout.Copy(newliveout)
+ }
+
+ // A variable is live on input to this block
+ // if it is used by this block, or live on output from this block and
+ // not set by the code in this block.
+ //
+ // in[b] = uevar[b] \cup (out[b] \setminus varkill[b])
+ newlivein.AndNot(be.liveout, be.varkill)
+ be.livein.Or(newlivein, be.uevar)
+ }
+ }
+}
+
+// Visits all instructions in a basic block and computes a bit vector of live
+// variables at each safe point locations.
+func (lv *liveness) epilogue() {
+ nvars := int32(len(lv.vars))
+ liveout := bitvec.New(nvars)
+ livedefer := bitvec.New(nvars) // always-live variables
+
+ // If there is a defer (that could recover), then all output
+ // parameters are live all the time. In addition, any locals
+ // that are pointers to heap-allocated output parameters are
+ // also always live (post-deferreturn code needs these
+ // pointers to copy values back to the stack).
+ // TODO: if the output parameter is heap-allocated, then we
+ // don't need to keep the stack copy live?
+ if lv.fn.HasDefer() {
+ for i, n := range lv.vars {
+ if n.Class == ir.PPARAMOUT {
+ if n.IsOutputParamHeapAddr() {
+ // Just to be paranoid. Heap addresses are PAUTOs.
+ base.Fatalf("variable %v both output param and heap output param", n)
+ }
+ if n.Heapaddr != nil {
+ // If this variable moved to the heap, then
+ // its stack copy is not live.
+ continue
+ }
+ // Note: zeroing is handled by zeroResults in walk.go.
+ livedefer.Set(int32(i))
+ }
+ if n.IsOutputParamHeapAddr() {
+ // This variable will be overwritten early in the function
+ // prologue (from the result of a mallocgc) but we need to
+ // zero it in case that malloc causes a stack scan.
+ n.SetNeedzero(true)
+ livedefer.Set(int32(i))
+ }
+ if n.OpenDeferSlot() {
+ // Open-coded defer args slots must be live
+ // everywhere in a function, since a panic can
+ // occur (almost) anywhere. Because it is live
+ // everywhere, it must be zeroed on entry.
+ livedefer.Set(int32(i))
+ // It was already marked as Needzero when created.
+ if !n.Needzero() {
+ base.Fatalf("all pointer-containing defer arg slots should have Needzero set")
+ }
+ }
+ }
+ }
+
+ // We must analyze the entry block first. The runtime assumes
+ // the function entry map is index 0. Conveniently, layout
+ // already ensured that the entry block is first.
+ if lv.f.Entry != lv.f.Blocks[0] {
+ lv.f.Fatalf("entry block must be first")
+ }
+
+ {
+ // Reserve an entry for function entry.
+ live := bitvec.New(nvars)
+ lv.livevars = append(lv.livevars, live)
+ }
+
+ for _, b := range lv.f.Blocks {
+ be := lv.blockEffects(b)
+
+ // Walk forward through the basic block instructions and
+ // allocate liveness maps for those instructions that need them.
+ for _, v := range b.Values {
+ if !lv.hasStackMap(v) {
+ continue
+ }
+
+ live := bitvec.New(nvars)
+ lv.livevars = append(lv.livevars, live)
+ }
+
+ // walk backward, construct maps at each safe point
+ index := int32(len(lv.livevars) - 1)
+
+ liveout.Copy(be.liveout)
+ for i := len(b.Values) - 1; i >= 0; i-- {
+ v := b.Values[i]
+
+ if lv.hasStackMap(v) {
+ // Found an interesting instruction, record the
+ // corresponding liveness information.
+
+ live := &lv.livevars[index]
+ live.Or(*live, liveout)
+ live.Or(*live, livedefer) // only for non-entry safe points
+ index--
+ }
+
+ // Update liveness information.
+ pos, e := lv.valueEffects(v)
+ if e&varkill != 0 {
+ liveout.Unset(pos)
+ }
+ if e&uevar != 0 {
+ liveout.Set(pos)
+ }
+ }
+
+ if b == lv.f.Entry {
+ if index != 0 {
+ base.Fatalf("bad index for entry point: %v", index)
+ }
+
+ // Check to make sure only input variables are live.
+ for i, n := range lv.vars {
+ if !liveout.Get(int32(i)) {
+ continue
+ }
+ if n.Class == ir.PPARAM {
+ continue // ok
+ }
+ base.FatalfAt(n.Pos(), "bad live variable at entry of %v: %L", lv.fn.Nname, n)
+ }
+
+ // Record live variables.
+ live := &lv.livevars[index]
+ live.Or(*live, liveout)
+ }
+
+ if lv.doClobber {
+ lv.clobber(b)
+ }
+
+ // The liveness maps for this block are now complete. Compact them.
+ lv.compact(b)
+ }
+
+ // If we have an open-coded deferreturn call, make a liveness map for it.
+ if lv.fn.OpenCodedDeferDisallowed() {
+ lv.livenessMap.DeferReturn = objw.LivenessDontCare
+ } else {
+ idx, _ := lv.stackMapSet.add(livedefer)
+ lv.livenessMap.DeferReturn = objw.LivenessIndex{
+ StackMapIndex: idx,
+ IsUnsafePoint: false,
+ }
+ }
+
+ // Done compacting. Throw out the stack map set.
+ lv.stackMaps = lv.stackMapSet.extractUnique()
+ lv.stackMapSet = bvecSet{}
+
+ // Useful sanity check: on entry to the function,
+ // the only things that can possibly be live are the
+ // input parameters.
+ for j, n := range lv.vars {
+ if n.Class != ir.PPARAM && lv.stackMaps[0].Get(int32(j)) {
+ lv.f.Fatalf("%v %L recorded as live on entry", lv.fn.Nname, n)
+ }
+ }
+}
+
+// Compact coalesces identical bitmaps from lv.livevars into the sets
+// lv.stackMapSet.
+//
+// Compact clears lv.livevars.
+//
+// There are actually two lists of bitmaps, one list for the local variables and one
+// list for the function arguments. Both lists are indexed by the same PCDATA
+// index, so the corresponding pairs must be considered together when
+// merging duplicates. The argument bitmaps change much less often during
+// function execution than the local variable bitmaps, so it is possible that
+// we could introduce a separate PCDATA index for arguments vs locals and
+// then compact the set of argument bitmaps separately from the set of
+// local variable bitmaps. As of 2014-04-02, doing this to the godoc binary
+// is actually a net loss: we save about 50k of argument bitmaps but the new
+// PCDATA tables cost about 100k. So for now we keep using a single index for
+// both bitmap lists.
+func (lv *liveness) compact(b *ssa.Block) {
+ pos := 0
+ if b == lv.f.Entry {
+ // Handle entry stack map.
+ lv.stackMapSet.add(lv.livevars[0])
+ pos++
+ }
+ for _, v := range b.Values {
+ hasStackMap := lv.hasStackMap(v)
+ isUnsafePoint := lv.allUnsafe || v.Op != ssa.OpClobber && lv.unsafePoints.Get(int32(v.ID))
+ idx := objw.LivenessIndex{StackMapIndex: objw.StackMapDontCare, IsUnsafePoint: isUnsafePoint}
+ if hasStackMap {
+ idx.StackMapIndex, _ = lv.stackMapSet.add(lv.livevars[pos])
+ pos++
+ }
+ if hasStackMap || isUnsafePoint {
+ lv.livenessMap.set(v, idx)
+ }
+ }
+
+ // Reset livevars.
+ lv.livevars = lv.livevars[:0]
+}
+
+func (lv *liveness) enableClobber() {
+ // The clobberdead experiment inserts code to clobber pointer slots in all
+ // the dead variables (locals and args) at every synchronous safepoint.
+ if !base.Flag.ClobberDead {
+ return
+ }
+ if lv.fn.Pragma&ir.CgoUnsafeArgs != 0 {
+ // C or assembly code uses the exact frame layout. Don't clobber.
+ return
+ }
+ if len(lv.vars) > 10000 || len(lv.f.Blocks) > 10000 {
+ // Be careful to avoid doing too much work.
+ // Bail if >10000 variables or >10000 blocks.
+ // Otherwise, giant functions make this experiment generate too much code.
+ return
+ }
+ if lv.f.Name == "forkAndExecInChild" {
+ // forkAndExecInChild calls vfork on some platforms.
+ // The code we add here clobbers parts of the stack in the child.
+ // When the parent resumes, it is using the same stack frame. But the
+ // child has clobbered stack variables that the parent needs. Boom!
+ // In particular, the sys argument gets clobbered.
+ return
+ }
+ if lv.f.Name == "wbBufFlush" ||
+ ((lv.f.Name == "callReflect" || lv.f.Name == "callMethod") && lv.fn.ABIWrapper()) {
+ // runtime.wbBufFlush must not modify its arguments. See the comments
+ // in runtime/mwbbuf.go:wbBufFlush.
+ //
+ // reflect.callReflect and reflect.callMethod are called from special
+ // functions makeFuncStub and methodValueCall. The runtime expects
+ // that it can find the first argument (ctxt) at 0(SP) in makeFuncStub
+ // and methodValueCall's frame (see runtime/traceback.go:getArgInfo).
+ // Normally callReflect and callMethod already do not modify the
+ // argument, and keep it alive. But the compiler-generated ABI wrappers
+ // don't do that. Special case the wrappers to not clobber its arguments.
+ lv.noClobberArgs = true
+ }
+ if h := os.Getenv("GOCLOBBERDEADHASH"); h != "" {
+ // Clobber only functions where the hash of the function name matches a pattern.
+ // Useful for binary searching for a miscompiled function.
+ hstr := ""
+ for _, b := range sha1.Sum([]byte(lv.f.Name)) {
+ hstr += fmt.Sprintf("%08b", b)
+ }
+ if !strings.HasSuffix(hstr, h) {
+ return
+ }
+ fmt.Printf("\t\t\tCLOBBERDEAD %s\n", lv.f.Name)
+ }
+ lv.doClobber = true
+}
+
+// Inserts code to clobber pointer slots in all the dead variables (locals and args)
+// at every synchronous safepoint in b.
+func (lv *liveness) clobber(b *ssa.Block) {
+ // Copy block's values to a temporary.
+ oldSched := append([]*ssa.Value{}, b.Values...)
+ b.Values = b.Values[:0]
+ idx := 0
+
+ // Clobber pointer slots in all dead variables at entry.
+ if b == lv.f.Entry {
+ for len(oldSched) > 0 && len(oldSched[0].Args) == 0 {
+ // Skip argless ops. We need to skip at least
+ // the lowered ClosurePtr op, because it
+ // really wants to be first. This will also
+ // skip ops like InitMem and SP, which are ok.
+ b.Values = append(b.Values, oldSched[0])
+ oldSched = oldSched[1:]
+ }
+ clobber(lv, b, lv.livevars[0])
+ idx++
+ }
+
+ // Copy values into schedule, adding clobbering around safepoints.
+ for _, v := range oldSched {
+ if !lv.hasStackMap(v) {
+ b.Values = append(b.Values, v)
+ continue
+ }
+ clobber(lv, b, lv.livevars[idx])
+ b.Values = append(b.Values, v)
+ idx++
+ }
+}
+
+// clobber generates code to clobber pointer slots in all dead variables
+// (those not marked in live). Clobbering instructions are added to the end
+// of b.Values.
+func clobber(lv *liveness, b *ssa.Block, live bitvec.BitVec) {
+ for i, n := range lv.vars {
+ if !live.Get(int32(i)) && !n.Addrtaken() && !n.OpenDeferSlot() && !n.IsOutputParamHeapAddr() {
+ // Don't clobber stack objects (address-taken). They are
+ // tracked dynamically.
+ // Also don't clobber slots that are live for defers (see
+ // the code setting livedefer in epilogue).
+ if lv.noClobberArgs && n.Class == ir.PPARAM {
+ continue
+ }
+ clobberVar(b, n)
+ }
+ }
+}
+
+// clobberVar generates code to trash the pointers in v.
+// Clobbering instructions are added to the end of b.Values.
+func clobberVar(b *ssa.Block, v *ir.Name) {
+ clobberWalk(b, v, 0, v.Type())
+}
+
+// b = block to which we append instructions
+// v = variable
+// offset = offset of (sub-portion of) variable to clobber (in bytes)
+// t = type of sub-portion of v.
+func clobberWalk(b *ssa.Block, v *ir.Name, offset int64, t *types.Type) {
+ if !t.HasPointers() {
+ return
+ }
+ switch t.Kind() {
+ case types.TPTR,
+ types.TUNSAFEPTR,
+ types.TFUNC,
+ types.TCHAN,
+ types.TMAP:
+ clobberPtr(b, v, offset)
+
+ case types.TSTRING:
+ // struct { byte *str; int len; }
+ clobberPtr(b, v, offset)
+
+ case types.TINTER:
+ // struct { Itab *tab; void *data; }
+ // or, when isnilinter(t)==true:
+ // struct { Type *type; void *data; }
+ clobberPtr(b, v, offset)
+ clobberPtr(b, v, offset+int64(types.PtrSize))
+
+ case types.TSLICE:
+ // struct { byte *array; int len; int cap; }
+ clobberPtr(b, v, offset)
+
+ case types.TARRAY:
+ for i := int64(0); i < t.NumElem(); i++ {
+ clobberWalk(b, v, offset+i*t.Elem().Size(), t.Elem())
+ }
+
+ case types.TSTRUCT:
+ for _, t1 := range t.Fields().Slice() {
+ clobberWalk(b, v, offset+t1.Offset, t1.Type)
+ }
+
+ default:
+ base.Fatalf("clobberWalk: unexpected type, %v", t)
+ }
+}
+
+// clobberPtr generates a clobber of the pointer at offset offset in v.
+// The clobber instruction is added at the end of b.
+func clobberPtr(b *ssa.Block, v *ir.Name, offset int64) {
+ b.NewValue0IA(src.NoXPos, ssa.OpClobber, types.TypeVoid, offset, v)
+}
+
+func (lv *liveness) showlive(v *ssa.Value, live bitvec.BitVec) {
+ if base.Flag.Live == 0 || ir.FuncName(lv.fn) == "init" || strings.HasPrefix(ir.FuncName(lv.fn), ".") {
+ return
+ }
+ if lv.fn.Wrapper() || lv.fn.Dupok() {
+ // Skip reporting liveness information for compiler-generated wrappers.
+ return
+ }
+ if !(v == nil || v.Op.IsCall()) {
+ // Historically we only printed this information at
+ // calls. Keep doing so.
+ return
+ }
+ if live.IsEmpty() {
+ return
+ }
+
+ pos := lv.fn.Nname.Pos()
+ if v != nil {
+ pos = v.Pos
+ }
+
+ s := "live at "
+ if v == nil {
+ s += fmt.Sprintf("entry to %s:", ir.FuncName(lv.fn))
+ } else if sym, ok := v.Aux.(*ssa.AuxCall); ok && sym.Fn != nil {
+ fn := sym.Fn.Name
+ if pos := strings.Index(fn, "."); pos >= 0 {
+ fn = fn[pos+1:]
+ }
+ s += fmt.Sprintf("call to %s:", fn)
+ } else {
+ s += "indirect call:"
+ }
+
+ for j, n := range lv.vars {
+ if live.Get(int32(j)) {
+ s += fmt.Sprintf(" %v", n)
+ }
+ }
+
+ base.WarnfAt(pos, s)
+}
+
+func (lv *liveness) printbvec(printed bool, name string, live bitvec.BitVec) bool {
+ if live.IsEmpty() {
+ return printed
+ }
+
+ if !printed {
+ fmt.Printf("\t")
+ } else {
+ fmt.Printf(" ")
+ }
+ fmt.Printf("%s=", name)
+
+ comma := ""
+ for i, n := range lv.vars {
+ if !live.Get(int32(i)) {
+ continue
+ }
+ fmt.Printf("%s%s", comma, n.Sym().Name)
+ comma = ","
+ }
+ return true
+}
+
+// printeffect is like printbvec, but for valueEffects.
+func (lv *liveness) printeffect(printed bool, name string, pos int32, x bool) bool {
+ if !x {
+ return printed
+ }
+ if !printed {
+ fmt.Printf("\t")
+ } else {
+ fmt.Printf(" ")
+ }
+ fmt.Printf("%s=", name)
+ if x {
+ fmt.Printf("%s", lv.vars[pos].Sym().Name)
+ }
+
+ return true
+}
+
+// Prints the computed liveness information and inputs, for debugging.
+// This format synthesizes the information used during the multiple passes
+// into a single presentation.
+func (lv *liveness) printDebug() {
+ fmt.Printf("liveness: %s\n", ir.FuncName(lv.fn))
+
+ for i, b := range lv.f.Blocks {
+ if i > 0 {
+ fmt.Printf("\n")
+ }
+
+ // bb#0 pred=1,2 succ=3,4
+ fmt.Printf("bb#%d pred=", b.ID)
+ for j, pred := range b.Preds {
+ if j > 0 {
+ fmt.Printf(",")
+ }
+ fmt.Printf("%d", pred.Block().ID)
+ }
+ fmt.Printf(" succ=")
+ for j, succ := range b.Succs {
+ if j > 0 {
+ fmt.Printf(",")
+ }
+ fmt.Printf("%d", succ.Block().ID)
+ }
+ fmt.Printf("\n")
+
+ be := lv.blockEffects(b)
+
+ // initial settings
+ printed := false
+ printed = lv.printbvec(printed, "uevar", be.uevar)
+ printed = lv.printbvec(printed, "livein", be.livein)
+ if printed {
+ fmt.Printf("\n")
+ }
+
+ // program listing, with individual effects listed
+
+ if b == lv.f.Entry {
+ live := lv.stackMaps[0]
+ fmt.Printf("(%s) function entry\n", base.FmtPos(lv.fn.Nname.Pos()))
+ fmt.Printf("\tlive=")
+ printed = false
+ for j, n := range lv.vars {
+ if !live.Get(int32(j)) {
+ continue
+ }
+ if printed {
+ fmt.Printf(",")
+ }
+ fmt.Printf("%v", n)
+ printed = true
+ }
+ fmt.Printf("\n")
+ }
+
+ for _, v := range b.Values {
+ fmt.Printf("(%s) %v\n", base.FmtPos(v.Pos), v.LongString())
+
+ pcdata := lv.livenessMap.Get(v)
+
+ pos, effect := lv.valueEffects(v)
+ printed = false
+ printed = lv.printeffect(printed, "uevar", pos, effect&uevar != 0)
+ printed = lv.printeffect(printed, "varkill", pos, effect&varkill != 0)
+ if printed {
+ fmt.Printf("\n")
+ }
+
+ if pcdata.StackMapValid() {
+ fmt.Printf("\tlive=")
+ printed = false
+ if pcdata.StackMapValid() {
+ live := lv.stackMaps[pcdata.StackMapIndex]
+ for j, n := range lv.vars {
+ if !live.Get(int32(j)) {
+ continue
+ }
+ if printed {
+ fmt.Printf(",")
+ }
+ fmt.Printf("%v", n)
+ printed = true
+ }
+ }
+ fmt.Printf("\n")
+ }
+
+ if pcdata.IsUnsafePoint {
+ fmt.Printf("\tunsafe-point\n")
+ }
+ }
+
+ // bb bitsets
+ fmt.Printf("end\n")
+ printed = false
+ printed = lv.printbvec(printed, "varkill", be.varkill)
+ printed = lv.printbvec(printed, "liveout", be.liveout)
+ if printed {
+ fmt.Printf("\n")
+ }
+ }
+
+ fmt.Printf("\n")
+}
+
+// Dumps a slice of bitmaps to a symbol as a sequence of uint32 values. The
+// first word dumped is the total number of bitmaps. The second word is the
+// length of the bitmaps. All bitmaps are assumed to be of equal length. The
+// remaining bytes are the raw bitmaps.
+func (lv *liveness) emit() (argsSym, liveSym *obj.LSym) {
+ // Size args bitmaps to be just large enough to hold the largest pointer.
+ // First, find the largest Xoffset node we care about.
+ // (Nodes without pointers aren't in lv.vars; see ShouldTrack.)
+ var maxArgNode *ir.Name
+ for _, n := range lv.vars {
+ switch n.Class {
+ case ir.PPARAM, ir.PPARAMOUT:
+ if !n.IsOutputParamInRegisters() {
+ if maxArgNode == nil || n.FrameOffset() > maxArgNode.FrameOffset() {
+ maxArgNode = n
+ }
+ }
+ }
+ }
+ // Next, find the offset of the largest pointer in the largest node.
+ var maxArgs int64
+ if maxArgNode != nil {
+ maxArgs = maxArgNode.FrameOffset() + types.PtrDataSize(maxArgNode.Type())
+ }
+
+ // Size locals bitmaps to be stkptrsize sized.
+ // We cannot shrink them to only hold the largest pointer,
+ // because their size is used to calculate the beginning
+ // of the local variables frame.
+ // Further discussion in https://golang.org/cl/104175.
+ // TODO: consider trimming leading zeros.
+ // This would require shifting all bitmaps.
+ maxLocals := lv.stkptrsize
+
+ // Temporary symbols for encoding bitmaps.
+ var argsSymTmp, liveSymTmp obj.LSym
+
+ args := bitvec.New(int32(maxArgs / int64(types.PtrSize)))
+ aoff := objw.Uint32(&argsSymTmp, 0, uint32(len(lv.stackMaps))) // number of bitmaps
+ aoff = objw.Uint32(&argsSymTmp, aoff, uint32(args.N)) // number of bits in each bitmap
+
+ locals := bitvec.New(int32(maxLocals / int64(types.PtrSize)))
+ loff := objw.Uint32(&liveSymTmp, 0, uint32(len(lv.stackMaps))) // number of bitmaps
+ loff = objw.Uint32(&liveSymTmp, loff, uint32(locals.N)) // number of bits in each bitmap
+
+ for _, live := range lv.stackMaps {
+ args.Clear()
+ locals.Clear()
+
+ lv.pointerMap(live, lv.vars, args, locals)
+
+ aoff = objw.BitVec(&argsSymTmp, aoff, args)
+ loff = objw.BitVec(&liveSymTmp, loff, locals)
+ }
+
+ // These symbols will be added to Ctxt.Data by addGCLocals
+ // after parallel compilation is done.
+ return base.Ctxt.GCLocalsSym(argsSymTmp.P), base.Ctxt.GCLocalsSym(liveSymTmp.P)
+}
+
+// Entry pointer for Compute analysis. Solves for the Compute of
+// pointer variables in the function and emits a runtime data
+// structure read by the garbage collector.
+// Returns a map from GC safe points to their corresponding stack map index,
+// and a map that contains all input parameters that may be partially live.
+func Compute(curfn *ir.Func, f *ssa.Func, stkptrsize int64, pp *objw.Progs) (Map, map[*ir.Name]bool) {
+ // Construct the global liveness state.
+ vars, idx := getvariables(curfn)
+ lv := newliveness(curfn, f, vars, idx, stkptrsize)
+
+ // Run the dataflow framework.
+ lv.prologue()
+ lv.solve()
+ lv.epilogue()
+ if base.Flag.Live > 0 {
+ lv.showlive(nil, lv.stackMaps[0])
+ for _, b := range f.Blocks {
+ for _, val := range b.Values {
+ if idx := lv.livenessMap.Get(val); idx.StackMapValid() {
+ lv.showlive(val, lv.stackMaps[idx.StackMapIndex])
+ }
+ }
+ }
+ }
+ if base.Flag.Live >= 2 {
+ lv.printDebug()
+ }
+
+ // Update the function cache.
+ {
+ cache := f.Cache.Liveness.(*livenessFuncCache)
+ if cap(lv.be) < 2000 { // Threshold from ssa.Cache slices.
+ for i := range lv.be {
+ lv.be[i] = blockEffects{}
+ }
+ cache.be = lv.be
+ }
+ if len(lv.livenessMap.Vals) < 2000 {
+ cache.livenessMap = lv.livenessMap
+ }
+ }
+
+ // Emit the live pointer map data structures
+ ls := curfn.LSym
+ fninfo := ls.Func()
+ fninfo.GCArgs, fninfo.GCLocals = lv.emit()
+
+ p := pp.Prog(obj.AFUNCDATA)
+ p.From.SetConst(objabi.FUNCDATA_ArgsPointerMaps)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = fninfo.GCArgs
+
+ p = pp.Prog(obj.AFUNCDATA)
+ p.From.SetConst(objabi.FUNCDATA_LocalsPointerMaps)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = fninfo.GCLocals
+
+ if x := lv.emitStackObjects(); x != nil {
+ p := pp.Prog(obj.AFUNCDATA)
+ p.From.SetConst(objabi.FUNCDATA_StackObjects)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = x
+ }
+
+ return lv.livenessMap, lv.partLiveArgs
+}
+
+func (lv *liveness) emitStackObjects() *obj.LSym {
+ var vars []*ir.Name
+ for _, n := range lv.fn.Dcl {
+ if shouldTrack(n) && n.Addrtaken() && n.Esc() != ir.EscHeap {
+ vars = append(vars, n)
+ }
+ }
+ if len(vars) == 0 {
+ return nil
+ }
+
+ // Sort variables from lowest to highest address.
+ sort.Slice(vars, func(i, j int) bool { return vars[i].FrameOffset() < vars[j].FrameOffset() })
+
+ // Populate the stack object data.
+ // Format must match runtime/stack.go:stackObjectRecord.
+ x := base.Ctxt.Lookup(lv.fn.LSym.Name + ".stkobj")
+ x.Set(obj.AttrContentAddressable, true)
+ lv.fn.LSym.Func().StackObjects = x
+ off := 0
+ off = objw.Uintptr(x, off, uint64(len(vars)))
+ for _, v := range vars {
+ // Note: arguments and return values have non-negative Xoffset,
+ // in which case the offset is relative to argp.
+ // Locals have a negative Xoffset, in which case the offset is relative to varp.
+ // We already limit the frame size, so the offset and the object size
+ // should not be too big.
+ frameOffset := v.FrameOffset()
+ if frameOffset != int64(int32(frameOffset)) {
+ base.Fatalf("frame offset too big: %v %d", v, frameOffset)
+ }
+ off = objw.Uint32(x, off, uint32(frameOffset))
+
+ t := v.Type()
+ sz := t.Size()
+ if sz != int64(int32(sz)) {
+ base.Fatalf("stack object too big: %v of type %v, size %d", v, t, sz)
+ }
+ lsym, useGCProg, ptrdata := reflectdata.GCSym(t)
+ if useGCProg {
+ ptrdata = -ptrdata
+ }
+ off = objw.Uint32(x, off, uint32(sz))
+ off = objw.Uint32(x, off, uint32(ptrdata))
+ off = objw.SymPtrOff(x, off, lsym)
+ }
+
+ if base.Flag.Live != 0 {
+ for _, v := range vars {
+ base.WarnfAt(v.Pos(), "stack object %v %v", v, v.Type())
+ }
+ }
+
+ return x
+}
+
+// isfat reports whether a variable of type t needs multiple assignments to initialize.
+// For example:
+//
+// type T struct { x, y int }
+// x := T{x: 0, y: 1}
+//
+// Then we need:
+//
+// var t T
+// t.x = 0
+// t.y = 1
+//
+// to fully initialize t.
+func isfat(t *types.Type) bool {
+ if t != nil {
+ switch t.Kind() {
+ case types.TSLICE, types.TSTRING,
+ types.TINTER: // maybe remove later
+ return true
+ case types.TARRAY:
+ // Array of 1 element, check if element is fat
+ if t.NumElem() == 1 {
+ return isfat(t.Elem())
+ }
+ return true
+ case types.TSTRUCT:
+ // Struct with 1 field, check if field is fat
+ if t.NumFields() == 1 {
+ return isfat(t.Field(0).Type)
+ }
+ return true
+ }
+ }
+
+ return false
+}
+
+// WriteFuncMap writes the pointer bitmaps for bodyless function fn's
+// inputs and outputs as the value of symbol <fn>.args_stackmap.
+// If fn has outputs, two bitmaps are written, otherwise just one.
+func WriteFuncMap(fn *ir.Func, abiInfo *abi.ABIParamResultInfo) {
+ if ir.FuncName(fn) == "_" || fn.Sym().Linkname != "" {
+ return
+ }
+ nptr := int(abiInfo.ArgWidth() / int64(types.PtrSize))
+ bv := bitvec.New(int32(nptr) * 2)
+
+ for _, p := range abiInfo.InParams() {
+ typebits.Set(p.Type, p.FrameOffset(abiInfo), bv)
+ }
+
+ nbitmap := 1
+ if fn.Type().NumResults() > 0 {
+ nbitmap = 2
+ }
+ lsym := base.Ctxt.Lookup(fn.LSym.Name + ".args_stackmap")
+ off := objw.Uint32(lsym, 0, uint32(nbitmap))
+ off = objw.Uint32(lsym, off, uint32(bv.N))
+ off = objw.BitVec(lsym, off, bv)
+
+ if fn.Type().NumResults() > 0 {
+ for _, p := range abiInfo.OutParams() {
+ if len(p.Registers) == 0 {
+ typebits.Set(p.Type, p.FrameOffset(abiInfo), bv)
+ }
+ }
+ off = objw.BitVec(lsym, off, bv)
+ }
+
+ objw.Global(lsym, int32(off), obj.RODATA|obj.LOCAL)
+}
diff --git a/src/cmd/compile/internal/logopt/escape.go b/src/cmd/compile/internal/logopt/escape.go
new file mode 100644
index 0000000..9660e93
--- /dev/null
+++ b/src/cmd/compile/internal/logopt/escape.go
@@ -0,0 +1,14 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.8
+// +build go1.8
+
+package logopt
+
+import "net/url"
+
+func pathEscape(s string) string {
+ return url.PathEscape(s)
+}
diff --git a/src/cmd/compile/internal/logopt/escape_bootstrap.go b/src/cmd/compile/internal/logopt/escape_bootstrap.go
new file mode 100644
index 0000000..cc04eaa
--- /dev/null
+++ b/src/cmd/compile/internal/logopt/escape_bootstrap.go
@@ -0,0 +1,13 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.8
+// +build !go1.8
+
+package logopt
+
+// For bootstrapping with an early version of Go
+func pathEscape(s string) string {
+ panic("This should never be called; the compiler is not fully bootstrapped if it is.")
+}
diff --git a/src/cmd/compile/internal/logopt/log_opts.go b/src/cmd/compile/internal/logopt/log_opts.go
new file mode 100644
index 0000000..97ebf56
--- /dev/null
+++ b/src/cmd/compile/internal/logopt/log_opts.go
@@ -0,0 +1,523 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package logopt
+
+import (
+ "cmd/internal/obj"
+ "cmd/internal/src"
+ "encoding/json"
+ "fmt"
+ "internal/buildcfg"
+ "io"
+ "log"
+ "net/url"
+ "os"
+ "path/filepath"
+ "sort"
+ "strconv"
+ "strings"
+ "sync"
+ "unicode"
+)
+
+// This implements (non)optimization logging for -json option to the Go compiler
+// The option is -json 0,<destination>.
+//
+// 0 is the version number; to avoid the need for synchronized updates, if
+// new versions of the logging appear, the compiler will support both, for a while,
+// and clients will specify what they need.
+//
+// <destination> is a directory.
+// Directories are specified with a leading / or os.PathSeparator,
+// or more explicitly with file://directory. The second form is intended to
+// deal with corner cases on Windows, and to allow specification of a relative
+// directory path (which is normally a bad idea, because the local directory
+// varies a lot in a build, especially with modules and/or vendoring, and may
+// not be writeable).
+//
+// For each package pkg compiled, a url.PathEscape(pkg)-named subdirectory
+// is created. For each source file.go in that package that generates
+// diagnostics (no diagnostics means no file),
+// a url.PathEscape(file)+".json"-named file is created and contains the
+// logged diagnostics.
+//
+// For example, "cmd%2Finternal%2Fdwarf/%3Cautogenerated%3E.json"
+// for "cmd/internal/dwarf" and <autogenerated> (which is not really a file, but the compiler sees it)
+//
+// If the package string is empty, it is replaced internally with string(0) which encodes to %00.
+//
+// Each log file begins with a JSON record identifying version,
+// platform, and other context, followed by optimization-relevant
+// LSP Diagnostic records, one per line (LSP version 3.15, no difference from 3.14 on the subset used here
+// see https://microsoft.github.io/language-server-protocol/specifications/specification-3-15/ )
+//
+// The fields of a Diagnostic are used in the following way:
+// Range: the outermost source position, for now begin and end are equal.
+// Severity: (always) SeverityInformation (3)
+// Source: (always) "go compiler"
+// Code: a string describing the missed optimization, e.g., "nilcheck", "cannotInline", "isInBounds", "escape"
+// Message: depending on code, additional information, e.g., the reason a function cannot be inlined.
+// RelatedInformation: if the missed optimization actually occurred at a function inlined at Range,
+// then the sequence of inlined locations appears here, from (second) outermost to innermost,
+// each with message="inlineLoc".
+//
+// In the case of escape analysis explanations, after any outer inlining locations,
+// the lines of the explanation appear, each potentially followed with its own inlining
+// location if the escape flow occurred within an inlined function.
+//
+// For example <destination>/cmd%2Fcompile%2Finternal%2Fssa/prove.json
+// might begin with the following line (wrapped for legibility):
+//
+// {"version":0,"package":"cmd/compile/internal/ssa","goos":"darwin","goarch":"amd64",
+// "gc_version":"devel +e1b9a57852 Fri Nov 1 15:07:00 2019 -0400",
+// "file":"/Users/drchase/work/go/src/cmd/compile/internal/ssa/prove.go"}
+//
+// and later contain (also wrapped for legibility):
+//
+// {"range":{"start":{"line":191,"character":24},"end":{"line":191,"character":24}},
+// "severity":3,"code":"nilcheck","source":"go compiler","message":"",
+// "relatedInformation":[
+// {"location":{"uri":"file:///Users/drchase/work/go/src/cmd/compile/internal/ssa/func.go",
+// "range":{"start":{"line":153,"character":16},"end":{"line":153,"character":16}}},
+// "message":"inlineLoc"}]}
+//
+// That is, at prove.go (implicit from context, provided in both filename and header line),
+// line 191, column 24, a nilcheck occurred in the generated code.
+// The relatedInformation indicates that this code actually came from
+// an inlined call to func.go, line 153, character 16.
+//
+// prove.go:191:
+// ft.orderS = f.newPoset()
+// func.go:152 and 153:
+// func (f *Func) newPoset() *poset {
+// if len(f.Cache.scrPoset) > 0 {
+//
+// In the case that the package is empty, the string(0) package name is also used in the header record, for example
+//
+// go tool compile -json=0,file://logopt x.go # no -p option to set the package
+// head -1 logopt/%00/x.json
+// {"version":0,"package":"\u0000","goos":"darwin","goarch":"amd64","gc_version":"devel +86487adf6a Thu Nov 7 19:34:56 2019 -0500","file":"x.go"}
+
+type VersionHeader struct {
+ Version int `json:"version"`
+ Package string `json:"package"`
+ Goos string `json:"goos"`
+ Goarch string `json:"goarch"`
+ GcVersion string `json:"gc_version"`
+ File string `json:"file,omitempty"` // LSP requires an enclosing resource, i.e., a file
+}
+
+// DocumentURI, Position, Range, Location, Diagnostic, DiagnosticRelatedInformation all reuse json definitions from gopls.
+// See https://github.com/golang/tools/blob/22afafe3322a860fcd3d88448768f9db36f8bc5f/internal/lsp/protocol/tsprotocol.go
+
+type DocumentURI string
+
+type Position struct {
+ Line uint `json:"line"` // gopls uses float64, but json output is the same for integers
+ Character uint `json:"character"` // gopls uses float64, but json output is the same for integers
+}
+
+// A Range in a text document expressed as (zero-based) start and end positions.
+// A range is comparable to a selection in an editor. Therefore the end position is exclusive.
+// If you want to specify a range that contains a line including the line ending character(s)
+// then use an end position denoting the start of the next line.
+type Range struct {
+ /*Start defined:
+ * The range's start position
+ */
+ Start Position `json:"start"`
+
+ /*End defined:
+ * The range's end position
+ */
+ End Position `json:"end"` // exclusive
+}
+
+// A Location represents a location inside a resource, such as a line inside a text file.
+type Location struct {
+ // URI is
+ URI DocumentURI `json:"uri"`
+
+ // Range is
+ Range Range `json:"range"`
+}
+
+/* DiagnosticRelatedInformation defined:
+ * Represents a related message and source code location for a diagnostic. This should be
+ * used to point to code locations that cause or related to a diagnostics, e.g when duplicating
+ * a symbol in a scope.
+ */
+type DiagnosticRelatedInformation struct {
+
+ /*Location defined:
+ * The location of this related diagnostic information.
+ */
+ Location Location `json:"location"`
+
+ /*Message defined:
+ * The message of this related diagnostic information.
+ */
+ Message string `json:"message"`
+}
+
+// DiagnosticSeverity defines constants
+type DiagnosticSeverity uint
+
+const (
+ /*SeverityInformation defined:
+ * Reports an information.
+ */
+ SeverityInformation DiagnosticSeverity = 3
+)
+
+// DiagnosticTag defines constants
+type DiagnosticTag uint
+
+/*Diagnostic defined:
+ * Represents a diagnostic, such as a compiler error or warning. Diagnostic objects
+ * are only valid in the scope of a resource.
+ */
+type Diagnostic struct {
+
+ /*Range defined:
+ * The range at which the message applies
+ */
+ Range Range `json:"range"`
+
+ /*Severity defined:
+ * The diagnostic's severity. Can be omitted. If omitted it is up to the
+ * client to interpret diagnostics as error, warning, info or hint.
+ */
+ Severity DiagnosticSeverity `json:"severity,omitempty"` // always SeverityInformation for optimizer logging.
+
+ /*Code defined:
+ * The diagnostic's code, which usually appear in the user interface.
+ */
+ Code string `json:"code,omitempty"` // LSP uses 'number | string' = gopls interface{}, but only string here, e.g. "boundsCheck", "nilcheck", etc.
+
+ /*Source defined:
+ * A human-readable string describing the source of this
+ * diagnostic, e.g. 'typescript' or 'super lint'. It usually
+ * appears in the user interface.
+ */
+ Source string `json:"source,omitempty"` // "go compiler"
+
+ /*Message defined:
+ * The diagnostic's message. It usually appears in the user interface
+ */
+ Message string `json:"message"` // sometimes used, provides additional information.
+
+ /*Tags defined:
+ * Additional metadata about the diagnostic.
+ */
+ Tags []DiagnosticTag `json:"tags,omitempty"` // always empty for logging optimizations.
+
+ /*RelatedInformation defined:
+ * An array of related diagnostic information, e.g. when symbol-names within
+ * a scope collide all definitions can be marked via this property.
+ */
+ RelatedInformation []DiagnosticRelatedInformation `json:"relatedInformation,omitempty"`
+}
+
+// A LoggedOpt is what the compiler produces and accumulates,
+// to be converted to JSON for human or IDE consumption.
+type LoggedOpt struct {
+ pos src.XPos // Source code position at which the event occurred. If it is inlined, outer and all inlined locations will appear in JSON.
+ compilerPass string // Compiler pass. For human/adhoc consumption; does not appear in JSON (yet)
+ functionName string // Function name. For human/adhoc consumption; does not appear in JSON (yet)
+ what string // The (non) optimization; "nilcheck", "boundsCheck", "inline", "noInline"
+ target []interface{} // Optional target(s) or parameter(s) of "what" -- what was inlined, why it was not, size of copy, etc. 1st is most important/relevant.
+}
+
+type logFormat uint8
+
+const (
+ None logFormat = iota
+ Json0 // version 0 for LSP 3.14, 3.15; future versions of LSP may change the format and the compiler may need to support both as clients are updated.
+)
+
+var Format = None
+var dest string
+
+// LogJsonOption parses and validates the version,directory value attached to the -json compiler flag.
+func LogJsonOption(flagValue string) {
+ version, directory := parseLogFlag("json", flagValue)
+ if version != 0 {
+ log.Fatal("-json version must be 0")
+ }
+ dest = checkLogPath(directory)
+ Format = Json0
+}
+
+// parseLogFlag checks the flag passed to -json
+// for version,destination format and returns the two parts.
+func parseLogFlag(flag, value string) (version int, directory string) {
+ if Format != None {
+ log.Fatal("Cannot repeat -json flag")
+ }
+ commaAt := strings.Index(value, ",")
+ if commaAt <= 0 {
+ log.Fatalf("-%s option should be '<version>,<destination>' where <version> is a number", flag)
+ }
+ v, err := strconv.Atoi(value[:commaAt])
+ if err != nil {
+ log.Fatalf("-%s option should be '<version>,<destination>' where <version> is a number: err=%v", flag, err)
+ }
+ version = v
+ directory = value[commaAt+1:]
+ return
+}
+
+// isWindowsDriveURI returns true if the file URI is of the format used by
+// Windows URIs. The url.Parse package does not specially handle Windows paths
+// (see golang/go#6027), so we check if the URI path has a drive prefix (e.g. "/C:").
+// (copied from tools/internal/span/uri.go)
+// this is less comprehensive that the processing in filepath.IsAbs on Windows.
+func isWindowsDriveURIPath(uri string) bool {
+ if len(uri) < 4 {
+ return false
+ }
+ return uri[0] == '/' && unicode.IsLetter(rune(uri[1])) && uri[2] == ':'
+}
+
+func parseLogPath(destination string) (string, string) {
+ if filepath.IsAbs(destination) {
+ return filepath.Clean(destination), ""
+ }
+ if strings.HasPrefix(destination, "file://") { // IKWIAD, or Windows C:\foo\bar\baz
+ uri, err := url.Parse(destination)
+ if err != nil {
+ return "", fmt.Sprintf("optimizer logging destination looked like file:// URI but failed to parse: err=%v", err)
+ }
+ destination = uri.Host + uri.Path
+ if isWindowsDriveURIPath(destination) {
+ // strip leading / from /C:
+ // unlike tools/internal/span/uri.go, do not uppercase the drive letter -- let filepath.Clean do what it does.
+ destination = destination[1:]
+ }
+ return filepath.Clean(destination), ""
+ }
+ return "", fmt.Sprintf("optimizer logging destination %s was neither %s-prefixed directory nor file://-prefixed file URI", destination, string(filepath.Separator))
+}
+
+// checkLogPath does superficial early checking of the string specifying
+// the directory to which optimizer logging is directed, and if
+// it passes the test, stores the string in LO_dir
+func checkLogPath(destination string) string {
+ path, complaint := parseLogPath(destination)
+ if complaint != "" {
+ log.Fatalf(complaint)
+ }
+ err := os.MkdirAll(path, 0755)
+ if err != nil {
+ log.Fatalf("optimizer logging destination '<version>,<directory>' but could not create <directory>: err=%v", err)
+ }
+ return path
+}
+
+var loggedOpts []*LoggedOpt
+var mu = sync.Mutex{} // mu protects loggedOpts.
+
+// NewLoggedOpt allocates a new LoggedOpt, to later be passed to either NewLoggedOpt or LogOpt as "args".
+// Pos is the source position (including inlining), what is the message, pass is which pass created the message,
+// funcName is the name of the function
+// A typical use for this to accumulate an explanation for a missed optimization, for example, why did something escape?
+func NewLoggedOpt(pos src.XPos, what, pass, funcName string, args ...interface{}) *LoggedOpt {
+ pass = strings.Replace(pass, " ", "_", -1)
+ return &LoggedOpt{pos, pass, funcName, what, args}
+}
+
+// Logopt logs information about a (usually missed) optimization performed by the compiler.
+// Pos is the source position (including inlining), what is the message, pass is which pass created the message,
+// funcName is the name of the function
+func LogOpt(pos src.XPos, what, pass, funcName string, args ...interface{}) {
+ if Format == None {
+ return
+ }
+ lo := NewLoggedOpt(pos, what, pass, funcName, args...)
+ mu.Lock()
+ defer mu.Unlock()
+ // Because of concurrent calls from back end, no telling what the order will be, but is stable-sorted by outer Pos before use.
+ loggedOpts = append(loggedOpts, lo)
+}
+
+// Enabled returns whether optimization logging is enabled.
+func Enabled() bool {
+ switch Format {
+ case None:
+ return false
+ case Json0:
+ return true
+ }
+ panic("Unexpected optimizer-logging level")
+}
+
+// byPos sorts diagnostics by source position.
+type byPos struct {
+ ctxt *obj.Link
+ a []*LoggedOpt
+}
+
+func (x byPos) Len() int { return len(x.a) }
+func (x byPos) Less(i, j int) bool {
+ return x.ctxt.OutermostPos(x.a[i].pos).Before(x.ctxt.OutermostPos(x.a[j].pos))
+}
+func (x byPos) Swap(i, j int) { x.a[i], x.a[j] = x.a[j], x.a[i] }
+
+func writerForLSP(subdirpath, file string) io.WriteCloser {
+ basename := file
+ lastslash := strings.LastIndexAny(basename, "\\/")
+ if lastslash != -1 {
+ basename = basename[lastslash+1:]
+ }
+ lastdot := strings.LastIndex(basename, ".go")
+ if lastdot != -1 {
+ basename = basename[:lastdot]
+ }
+ basename = pathEscape(basename)
+
+ // Assume a directory, make a file
+ p := filepath.Join(subdirpath, basename+".json")
+ w, err := os.Create(p)
+ if err != nil {
+ log.Fatalf("Could not create file %s for logging optimizer actions, %v", p, err)
+ }
+ return w
+}
+
+func fixSlash(f string) string {
+ if os.PathSeparator == '/' {
+ return f
+ }
+ return strings.Replace(f, string(os.PathSeparator), "/", -1)
+}
+
+func uriIfy(f string) DocumentURI {
+ url := url.URL{
+ Scheme: "file",
+ Path: fixSlash(f),
+ }
+ return DocumentURI(url.String())
+}
+
+// Return filename, replacing a first occurrence of $GOROOT with the
+// actual value of the GOROOT (because LSP does not speak "$GOROOT").
+func uprootedPath(filename string) string {
+ if !strings.HasPrefix(filename, "$GOROOT/") {
+ return filename
+ }
+ return buildcfg.GOROOT + filename[len("$GOROOT"):]
+}
+
+// FlushLoggedOpts flushes all the accumulated optimization log entries.
+func FlushLoggedOpts(ctxt *obj.Link, slashPkgPath string) {
+ if Format == None {
+ return
+ }
+
+ sort.Stable(byPos{ctxt, loggedOpts}) // Stable is necessary to preserve the per-function order, which is repeatable.
+ switch Format {
+
+ case Json0: // LSP 3.15
+ var posTmp []src.Pos
+ var encoder *json.Encoder
+ var w io.WriteCloser
+
+ if slashPkgPath == "" {
+ slashPkgPath = "\000"
+ }
+ subdirpath := filepath.Join(dest, pathEscape(slashPkgPath))
+ err := os.MkdirAll(subdirpath, 0755)
+ if err != nil {
+ log.Fatalf("Could not create directory %s for logging optimizer actions, %v", subdirpath, err)
+ }
+ diagnostic := Diagnostic{Source: "go compiler", Severity: SeverityInformation}
+
+ // For LSP, make a subdirectory for the package, and for each file foo.go, create foo.json in that subdirectory.
+ currentFile := ""
+ for _, x := range loggedOpts {
+ posTmp, p0 := x.parsePos(ctxt, posTmp)
+ p0f := uprootedPath(p0.Filename())
+
+ if currentFile != p0f {
+ if w != nil {
+ w.Close()
+ }
+ currentFile = p0f
+ w = writerForLSP(subdirpath, currentFile)
+ encoder = json.NewEncoder(w)
+ encoder.Encode(VersionHeader{Version: 0, Package: slashPkgPath, Goos: buildcfg.GOOS, Goarch: buildcfg.GOARCH, GcVersion: buildcfg.Version, File: currentFile})
+ }
+
+ // The first "target" is the most important one.
+ var target string
+ if len(x.target) > 0 {
+ target = fmt.Sprint(x.target[0])
+ }
+
+ diagnostic.Code = x.what
+ diagnostic.Message = target
+ diagnostic.Range = newPointRange(p0)
+ diagnostic.RelatedInformation = diagnostic.RelatedInformation[:0]
+
+ appendInlinedPos(posTmp, &diagnostic)
+
+ // Diagnostic explanation is stored in RelatedInformation after inlining info
+ if len(x.target) > 1 {
+ switch y := x.target[1].(type) {
+ case []*LoggedOpt:
+ for _, z := range y {
+ posTmp, p0 := z.parsePos(ctxt, posTmp)
+ loc := newLocation(p0)
+ msg := z.what
+ if len(z.target) > 0 {
+ msg = msg + ": " + fmt.Sprint(z.target[0])
+ }
+
+ diagnostic.RelatedInformation = append(diagnostic.RelatedInformation, DiagnosticRelatedInformation{Location: loc, Message: msg})
+ appendInlinedPos(posTmp, &diagnostic)
+ }
+ }
+ }
+
+ encoder.Encode(diagnostic)
+ }
+ if w != nil {
+ w.Close()
+ }
+ }
+}
+
+// newPointRange returns a single-position Range for the compiler source location p.
+func newPointRange(p src.Pos) Range {
+ return Range{Start: Position{p.Line(), p.Col()},
+ End: Position{p.Line(), p.Col()}}
+}
+
+// newLocation returns the Location for the compiler source location p
+func newLocation(p src.Pos) Location {
+ loc := Location{URI: uriIfy(uprootedPath(p.Filename())), Range: newPointRange(p)}
+ return loc
+}
+
+// appendInlinedPos extracts inlining information from posTmp and append it to diagnostic
+func appendInlinedPos(posTmp []src.Pos, diagnostic *Diagnostic) {
+ for i := 1; i < len(posTmp); i++ {
+ p := posTmp[i]
+ loc := newLocation(p)
+ diagnostic.RelatedInformation = append(diagnostic.RelatedInformation, DiagnosticRelatedInformation{Location: loc, Message: "inlineLoc"})
+ }
+}
+
+func (x *LoggedOpt) parsePos(ctxt *obj.Link, posTmp []src.Pos) ([]src.Pos, src.Pos) {
+ posTmp = ctxt.AllPos(x.pos, posTmp)
+ // Reverse posTmp to put outermost first.
+ l := len(posTmp)
+ for i := 0; i < l/2; i++ {
+ posTmp[i], posTmp[l-i-1] = posTmp[l-i-1], posTmp[i]
+ }
+ p0 := posTmp[0]
+ return posTmp, p0
+}
diff --git a/src/cmd/compile/internal/logopt/logopt_test.go b/src/cmd/compile/internal/logopt/logopt_test.go
new file mode 100644
index 0000000..902cbc8
--- /dev/null
+++ b/src/cmd/compile/internal/logopt/logopt_test.go
@@ -0,0 +1,258 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package logopt
+
+import (
+ "internal/testenv"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "testing"
+)
+
+const srcCode = `package x
+type pair struct {a,b int}
+func bar(y *pair) *int {
+ return &y.b
+}
+var a []int
+func foo(w, z *pair) *int {
+ if *bar(w) > 0 {
+ return bar(z)
+ }
+ if a[1] > 0 {
+ a = a[:2]
+ }
+ return &a[0]
+}
+
+// address taking prevents closure inlining
+func n() int {
+ foo := func() int { return 1 }
+ bar := &foo
+ x := (*bar)() + foo()
+ return x
+}
+`
+
+func want(t *testing.T, out string, desired string) {
+ // On Windows, Unicode escapes in the JSON output end up "normalized" elsewhere to /u....,
+ // so "normalize" what we're looking for to match that.
+ s := strings.ReplaceAll(desired, string(os.PathSeparator), "/")
+ if !strings.Contains(out, s) {
+ t.Errorf("did not see phrase %s in \n%s", s, out)
+ }
+}
+
+func wantN(t *testing.T, out string, desired string, n int) {
+ if strings.Count(out, desired) != n {
+ t.Errorf("expected exactly %d occurrences of %s in \n%s", n, desired, out)
+ }
+}
+
+func TestPathStuff(t *testing.T) {
+ sep := string(filepath.Separator)
+ if path, whine := parseLogPath("file:///c:foo"); path != "c:foo" || whine != "" { // good path
+ t.Errorf("path='%s', whine='%s'", path, whine)
+ }
+ if path, whine := parseLogPath("file:///foo"); path != sep+"foo" || whine != "" { // good path
+ t.Errorf("path='%s', whine='%s'", path, whine)
+ }
+ if path, whine := parseLogPath("foo"); path != "" || whine == "" { // BAD path
+ t.Errorf("path='%s', whine='%s'", path, whine)
+ }
+ if sep == "\\" { // On WINDOWS ONLY
+ if path, whine := parseLogPath("C:/foo"); path != "C:\\foo" || whine != "" { // good path
+ t.Errorf("path='%s', whine='%s'", path, whine)
+ }
+ if path, whine := parseLogPath("c:foo"); path != "" || whine == "" { // BAD path
+ t.Errorf("path='%s', whine='%s'", path, whine)
+ }
+ if path, whine := parseLogPath("/foo"); path != "" || whine == "" { // BAD path
+ t.Errorf("path='%s', whine='%s'", path, whine)
+ }
+ } else { // ON UNIX ONLY
+ if path, whine := parseLogPath("/foo"); path != sep+"foo" || whine != "" { // good path
+ t.Errorf("path='%s', whine='%s'", path, whine)
+ }
+ }
+}
+
+func TestLogOpt(t *testing.T) {
+ t.Parallel()
+
+ testenv.MustHaveGoBuild(t)
+
+ dir, err := ioutil.TempDir("", "TestLogOpt")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(dir)
+
+ dir = fixSlash(dir) // Normalize the directory name as much as possible, for Windows testing
+ src := filepath.Join(dir, "file.go")
+ if err := ioutil.WriteFile(src, []byte(srcCode), 0644); err != nil {
+ t.Fatal(err)
+ }
+
+ outfile := filepath.Join(dir, "file.o")
+
+ t.Run("JSON_fails", func(t *testing.T) {
+ // Test malformed flag
+ out, err := testLogOpt(t, "-json=foo", src, outfile)
+ if err == nil {
+ t.Error("-json=foo succeeded unexpectedly")
+ }
+ want(t, out, "option should be")
+ want(t, out, "number")
+
+ // Test a version number that is currently unsupported (and should remain unsupported for a while)
+ out, err = testLogOpt(t, "-json=9,foo", src, outfile)
+ if err == nil {
+ t.Error("-json=0,foo succeeded unexpectedly")
+ }
+ want(t, out, "version must be")
+
+ })
+
+ // replace d (dir) with t ("tmpdir") and convert path separators to '/'
+ normalize := func(out []byte, d, t string) string {
+ s := string(out)
+ s = strings.ReplaceAll(s, d, t)
+ s = strings.ReplaceAll(s, string(os.PathSeparator), "/")
+ return s
+ }
+
+ // Ensure that <128 byte copies are not reported and that 128-byte copies are.
+ // Check at both 1 and 8-byte alignments.
+ t.Run("Copy", func(t *testing.T) {
+ const copyCode = `package x
+func s128a1(x *[128]int8) [128]int8 {
+ return *x
+}
+func s127a1(x *[127]int8) [127]int8 {
+ return *x
+}
+func s16a8(x *[16]int64) [16]int64 {
+ return *x
+}
+func s15a8(x *[15]int64) [15]int64 {
+ return *x
+}
+`
+ copy := filepath.Join(dir, "copy.go")
+ if err := ioutil.WriteFile(copy, []byte(copyCode), 0644); err != nil {
+ t.Fatal(err)
+ }
+ outcopy := filepath.Join(dir, "copy.o")
+
+ // On not-amd64, test the host architecture and os
+ arches := []string{runtime.GOARCH}
+ goos0 := runtime.GOOS
+ if runtime.GOARCH == "amd64" { // Test many things with "linux" (wasm will get "js")
+ arches = []string{"arm", "arm64", "386", "amd64", "mips", "mips64", "ppc64le", "riscv64", "s390x", "wasm"}
+ goos0 = "linux"
+ }
+
+ for _, arch := range arches {
+ t.Run(arch, func(t *testing.T) {
+ goos := goos0
+ if arch == "wasm" {
+ goos = "js"
+ }
+ _, err := testCopy(t, dir, arch, goos, copy, outcopy)
+ if err != nil {
+ t.Error("-json=0,file://log/opt should have succeeded")
+ }
+ logged, err := ioutil.ReadFile(filepath.Join(dir, "log", "opt", "x", "copy.json"))
+ if err != nil {
+ t.Error("-json=0,file://log/opt missing expected log file")
+ }
+ slogged := normalize(logged, string(uriIfy(dir)), string(uriIfy("tmpdir")))
+ t.Logf("%s", slogged)
+ want(t, slogged, `{"range":{"start":{"line":3,"character":2},"end":{"line":3,"character":2}},"severity":3,"code":"copy","source":"go compiler","message":"128 bytes"}`)
+ want(t, slogged, `{"range":{"start":{"line":9,"character":2},"end":{"line":9,"character":2}},"severity":3,"code":"copy","source":"go compiler","message":"128 bytes"}`)
+ wantN(t, slogged, `"code":"copy"`, 2)
+ })
+ }
+ })
+
+ // Some architectures don't fault on nil dereference, so nilchecks are eliminated differently.
+ // The N-way copy test also doesn't need to run N-ways N times.
+ if runtime.GOARCH != "amd64" {
+ return
+ }
+
+ t.Run("Success", func(t *testing.T) {
+ // This test is supposed to succeed
+
+ // Note 'file://' is the I-Know-What-I-Am-Doing way of specifying a file, also to deal with corner cases for Windows.
+ _, err := testLogOptDir(t, dir, "-json=0,file://log/opt", src, outfile)
+ if err != nil {
+ t.Error("-json=0,file://log/opt should have succeeded")
+ }
+ logged, err := ioutil.ReadFile(filepath.Join(dir, "log", "opt", "x", "file.json"))
+ if err != nil {
+ t.Error("-json=0,file://log/opt missing expected log file")
+ }
+ // All this delicacy with uriIfy and filepath.Join is to get this test to work right on Windows.
+ slogged := normalize(logged, string(uriIfy(dir)), string(uriIfy("tmpdir")))
+ t.Logf("%s", slogged)
+ // below shows proper nilcheck
+ want(t, slogged, `{"range":{"start":{"line":9,"character":13},"end":{"line":9,"character":13}},"severity":3,"code":"nilcheck","source":"go compiler","message":"",`+
+ `"relatedInformation":[{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":4,"character":11},"end":{"line":4,"character":11}}},"message":"inlineLoc"}]}`)
+ want(t, slogged, `{"range":{"start":{"line":11,"character":6},"end":{"line":11,"character":6}},"severity":3,"code":"isInBounds","source":"go compiler","message":""}`)
+ want(t, slogged, `{"range":{"start":{"line":7,"character":6},"end":{"line":7,"character":6}},"severity":3,"code":"canInlineFunction","source":"go compiler","message":"cost: 35"}`)
+ // escape analysis explanation
+ want(t, slogged, `{"range":{"start":{"line":7,"character":13},"end":{"line":7,"character":13}},"severity":3,"code":"leak","source":"go compiler","message":"parameter z leaks to ~r0 with derefs=0",`+
+ `"relatedInformation":[`+
+ `{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":13},"end":{"line":9,"character":13}}},"message":"escflow: flow: y = z:"},`+
+ `{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":13},"end":{"line":9,"character":13}}},"message":"escflow: from y := z (assign-pair)"},`+
+ `{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":13},"end":{"line":9,"character":13}}},"message":"escflow: flow: ~R0 = y:"},`+
+ `{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":4,"character":11},"end":{"line":4,"character":11}}},"message":"inlineLoc"},`+
+ `{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":13},"end":{"line":9,"character":13}}},"message":"escflow: from y.b (dot of pointer)"},`+
+ `{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":4,"character":11},"end":{"line":4,"character":11}}},"message":"inlineLoc"},`+
+ `{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":13},"end":{"line":9,"character":13}}},"message":"escflow: from \u0026y.b (address-of)"},`+
+ `{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":4,"character":9},"end":{"line":4,"character":9}}},"message":"inlineLoc"},`+
+ `{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":13},"end":{"line":9,"character":13}}},"message":"escflow: from ~R0 = \u0026y.b (assign-pair)"},`+
+ `{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":3},"end":{"line":9,"character":3}}},"message":"escflow: flow: ~r0 = ~R0:"},`+
+ `{"location":{"uri":"file://tmpdir/file.go","range":{"start":{"line":9,"character":3},"end":{"line":9,"character":3}}},"message":"escflow: from return ~R0 (return)"}]}`)
+ })
+}
+
+func testLogOpt(t *testing.T, flag, src, outfile string) (string, error) {
+ run := []string{testenv.GoToolPath(t), "tool", "compile", flag, "-o", outfile, src}
+ t.Log(run)
+ cmd := exec.Command(run[0], run[1:]...)
+ out, err := cmd.CombinedOutput()
+ t.Logf("%s", out)
+ return string(out), err
+}
+
+func testLogOptDir(t *testing.T, dir, flag, src, outfile string) (string, error) {
+ // Notice the specified import path "x"
+ run := []string{testenv.GoToolPath(t), "tool", "compile", "-p", "x", flag, "-o", outfile, src}
+ t.Log(run)
+ cmd := exec.Command(run[0], run[1:]...)
+ cmd.Dir = dir
+ out, err := cmd.CombinedOutput()
+ t.Logf("%s", out)
+ return string(out), err
+}
+
+func testCopy(t *testing.T, dir, goarch, goos, src, outfile string) (string, error) {
+ // Notice the specified import path "x"
+ run := []string{testenv.GoToolPath(t), "tool", "compile", "-p", "x", "-json=0,file://log/opt", "-o", outfile, src}
+ t.Log(run)
+ cmd := exec.Command(run[0], run[1:]...)
+ cmd.Dir = dir
+ cmd.Env = append(os.Environ(), "GOARCH="+goarch, "GOOS="+goos)
+ out, err := cmd.CombinedOutput()
+ t.Logf("%s", out)
+ return string(out), err
+}
diff --git a/src/cmd/compile/internal/mips/galign.go b/src/cmd/compile/internal/mips/galign.go
new file mode 100644
index 0000000..4e68970
--- /dev/null
+++ b/src/cmd/compile/internal/mips/galign.go
@@ -0,0 +1,27 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package mips
+
+import (
+ "cmd/compile/internal/ssa"
+ "cmd/compile/internal/ssagen"
+ "cmd/internal/obj/mips"
+ "internal/buildcfg"
+)
+
+func Init(arch *ssagen.ArchInfo) {
+ arch.LinkArch = &mips.Linkmips
+ if buildcfg.GOARCH == "mipsle" {
+ arch.LinkArch = &mips.Linkmipsle
+ }
+ arch.REGSP = mips.REGSP
+ arch.MAXWIDTH = (1 << 31) - 1
+ arch.SoftFloat = (buildcfg.GOMIPS == "softfloat")
+ arch.ZeroRange = zerorange
+ arch.Ginsnop = ginsnop
+ arch.SSAMarkMoves = func(s *ssagen.State, b *ssa.Block) {}
+ arch.SSAGenValue = ssaGenValue
+ arch.SSAGenBlock = ssaGenBlock
+}
diff --git a/src/cmd/compile/internal/mips/ggen.go b/src/cmd/compile/internal/mips/ggen.go
new file mode 100644
index 0000000..1a51252
--- /dev/null
+++ b/src/cmd/compile/internal/mips/ggen.go
@@ -0,0 +1,55 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package mips
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/objw"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/obj/mips"
+)
+
+// TODO(mips): implement DUFFZERO
+func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
+
+ if cnt == 0 {
+ return p
+ }
+ if cnt < int64(4*types.PtrSize) {
+ for i := int64(0); i < cnt; i += int64(types.PtrSize) {
+ p = pp.Append(p, mips.AMOVW, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGSP, base.Ctxt.FixedFrameSize()+off+i)
+ }
+ } else {
+ //fmt.Printf("zerorange frame:%v, lo: %v, hi:%v \n", frame ,lo, hi)
+ // ADD $(FIXED_FRAME+frame+lo-4), SP, r1
+ // ADD $cnt, r1, r2
+ // loop:
+ // MOVW R0, (Widthptr)r1
+ // ADD $Widthptr, r1
+ // BNE r1, r2, loop
+ p = pp.Append(p, mips.AADD, obj.TYPE_CONST, 0, base.Ctxt.FixedFrameSize()+off-4, obj.TYPE_REG, mips.REGRT1, 0)
+ p.Reg = mips.REGSP
+ p = pp.Append(p, mips.AADD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, mips.REGRT2, 0)
+ p.Reg = mips.REGRT1
+ p = pp.Append(p, mips.AMOVW, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGRT1, int64(types.PtrSize))
+ p1 := p
+ p = pp.Append(p, mips.AADD, obj.TYPE_CONST, 0, int64(types.PtrSize), obj.TYPE_REG, mips.REGRT1, 0)
+ p = pp.Append(p, mips.ABNE, obj.TYPE_REG, mips.REGRT1, 0, obj.TYPE_BRANCH, 0, 0)
+ p.Reg = mips.REGRT2
+ p.To.SetTarget(p1)
+ }
+
+ return p
+}
+
+func ginsnop(pp *objw.Progs) *obj.Prog {
+ p := pp.Prog(mips.ANOR)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = mips.REG_R0
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = mips.REG_R0
+ return p
+}
diff --git a/src/cmd/compile/internal/mips/ssa.go b/src/cmd/compile/internal/mips/ssa.go
new file mode 100644
index 0000000..6326f96
--- /dev/null
+++ b/src/cmd/compile/internal/mips/ssa.go
@@ -0,0 +1,876 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package mips
+
+import (
+ "math"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/logopt"
+ "cmd/compile/internal/ssa"
+ "cmd/compile/internal/ssagen"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/obj/mips"
+)
+
+// isFPreg reports whether r is an FP register
+func isFPreg(r int16) bool {
+ return mips.REG_F0 <= r && r <= mips.REG_F31
+}
+
+// isHILO reports whether r is HI or LO register
+func isHILO(r int16) bool {
+ return r == mips.REG_HI || r == mips.REG_LO
+}
+
+// loadByType returns the load instruction of the given type.
+func loadByType(t *types.Type, r int16) obj.As {
+ if isFPreg(r) {
+ if t.Size() == 4 { // float32 or int32
+ return mips.AMOVF
+ } else { // float64 or int64
+ return mips.AMOVD
+ }
+ } else {
+ switch t.Size() {
+ case 1:
+ if t.IsSigned() {
+ return mips.AMOVB
+ } else {
+ return mips.AMOVBU
+ }
+ case 2:
+ if t.IsSigned() {
+ return mips.AMOVH
+ } else {
+ return mips.AMOVHU
+ }
+ case 4:
+ return mips.AMOVW
+ }
+ }
+ panic("bad load type")
+}
+
+// storeByType returns the store instruction of the given type.
+func storeByType(t *types.Type, r int16) obj.As {
+ if isFPreg(r) {
+ if t.Size() == 4 { // float32 or int32
+ return mips.AMOVF
+ } else { // float64 or int64
+ return mips.AMOVD
+ }
+ } else {
+ switch t.Size() {
+ case 1:
+ return mips.AMOVB
+ case 2:
+ return mips.AMOVH
+ case 4:
+ return mips.AMOVW
+ }
+ }
+ panic("bad store type")
+}
+
+func ssaGenValue(s *ssagen.State, v *ssa.Value) {
+ switch v.Op {
+ case ssa.OpCopy, ssa.OpMIPSMOVWreg:
+ t := v.Type
+ if t.IsMemory() {
+ return
+ }
+ x := v.Args[0].Reg()
+ y := v.Reg()
+ if x == y {
+ return
+ }
+ as := mips.AMOVW
+ if isFPreg(x) && isFPreg(y) {
+ as = mips.AMOVF
+ if t.Size() == 8 {
+ as = mips.AMOVD
+ }
+ }
+
+ p := s.Prog(as)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = x
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = y
+ if isHILO(x) && isHILO(y) || isHILO(x) && isFPreg(y) || isFPreg(x) && isHILO(y) {
+ // cannot move between special registers, use TMP as intermediate
+ p.To.Reg = mips.REGTMP
+ p = s.Prog(mips.AMOVW)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = mips.REGTMP
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = y
+ }
+ case ssa.OpMIPSMOVWnop:
+ // nothing to do
+ case ssa.OpLoadReg:
+ if v.Type.IsFlags() {
+ v.Fatalf("load flags not implemented: %v", v.LongString())
+ return
+ }
+ r := v.Reg()
+ p := s.Prog(loadByType(v.Type, r))
+ ssagen.AddrAuto(&p.From, v.Args[0])
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ if isHILO(r) {
+ // cannot directly load, load to TMP and move
+ p.To.Reg = mips.REGTMP
+ p = s.Prog(mips.AMOVW)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = mips.REGTMP
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ }
+ case ssa.OpStoreReg:
+ if v.Type.IsFlags() {
+ v.Fatalf("store flags not implemented: %v", v.LongString())
+ return
+ }
+ r := v.Args[0].Reg()
+ if isHILO(r) {
+ // cannot directly store, move to TMP and store
+ p := s.Prog(mips.AMOVW)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = mips.REGTMP
+ r = mips.REGTMP
+ }
+ p := s.Prog(storeByType(v.Type, r))
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r
+ ssagen.AddrAuto(&p.To, v)
+ case ssa.OpMIPSADD,
+ ssa.OpMIPSSUB,
+ ssa.OpMIPSAND,
+ ssa.OpMIPSOR,
+ ssa.OpMIPSXOR,
+ ssa.OpMIPSNOR,
+ ssa.OpMIPSSLL,
+ ssa.OpMIPSSRL,
+ ssa.OpMIPSSRA,
+ ssa.OpMIPSADDF,
+ ssa.OpMIPSADDD,
+ ssa.OpMIPSSUBF,
+ ssa.OpMIPSSUBD,
+ ssa.OpMIPSMULF,
+ ssa.OpMIPSMULD,
+ ssa.OpMIPSDIVF,
+ ssa.OpMIPSDIVD,
+ ssa.OpMIPSMUL:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+ p.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpMIPSSGT,
+ ssa.OpMIPSSGTU:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpMIPSSGTzero,
+ ssa.OpMIPSSGTUzero:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.Reg = mips.REGZERO
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpMIPSADDconst,
+ ssa.OpMIPSSUBconst,
+ ssa.OpMIPSANDconst,
+ ssa.OpMIPSORconst,
+ ssa.OpMIPSXORconst,
+ ssa.OpMIPSNORconst,
+ ssa.OpMIPSSLLconst,
+ ssa.OpMIPSSRLconst,
+ ssa.OpMIPSSRAconst,
+ ssa.OpMIPSSGTconst,
+ ssa.OpMIPSSGTUconst:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpMIPSMULT,
+ ssa.OpMIPSMULTU,
+ ssa.OpMIPSDIV,
+ ssa.OpMIPSDIVU:
+ // result in hi,lo
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+ p.Reg = v.Args[0].Reg()
+ case ssa.OpMIPSMOVWconst:
+ r := v.Reg()
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ if isFPreg(r) || isHILO(r) {
+ // cannot move into FP or special registers, use TMP as intermediate
+ p.To.Reg = mips.REGTMP
+ p = s.Prog(mips.AMOVW)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = mips.REGTMP
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ }
+ case ssa.OpMIPSMOVFconst,
+ ssa.OpMIPSMOVDconst:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_FCONST
+ p.From.Val = math.Float64frombits(uint64(v.AuxInt))
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpMIPSCMOVZ:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[2].Reg()
+ p.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpMIPSCMOVZzero:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+ p.Reg = mips.REGZERO
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpMIPSCMPEQF,
+ ssa.OpMIPSCMPEQD,
+ ssa.OpMIPSCMPGEF,
+ ssa.OpMIPSCMPGED,
+ ssa.OpMIPSCMPGTF,
+ ssa.OpMIPSCMPGTD:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.Reg = v.Args[1].Reg()
+ case ssa.OpMIPSMOVWaddr:
+ p := s.Prog(mips.AMOVW)
+ p.From.Type = obj.TYPE_ADDR
+ p.From.Reg = v.Args[0].Reg()
+ var wantreg string
+ // MOVW $sym+off(base), R
+ // the assembler expands it as the following:
+ // - base is SP: add constant offset to SP (R29)
+ // when constant is large, tmp register (R23) may be used
+ // - base is SB: load external address with relocation
+ switch v.Aux.(type) {
+ default:
+ v.Fatalf("aux is of unknown type %T", v.Aux)
+ case *obj.LSym:
+ wantreg = "SB"
+ ssagen.AddAux(&p.From, v)
+ case *ir.Name:
+ wantreg = "SP"
+ ssagen.AddAux(&p.From, v)
+ case nil:
+ // No sym, just MOVW $off(SP), R
+ wantreg = "SP"
+ p.From.Offset = v.AuxInt
+ }
+ if reg := v.Args[0].RegName(); reg != wantreg {
+ v.Fatalf("bad reg %s for symbol type %T, want %s", reg, v.Aux, wantreg)
+ }
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpMIPSMOVBload,
+ ssa.OpMIPSMOVBUload,
+ ssa.OpMIPSMOVHload,
+ ssa.OpMIPSMOVHUload,
+ ssa.OpMIPSMOVWload,
+ ssa.OpMIPSMOVFload,
+ ssa.OpMIPSMOVDload:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpMIPSMOVBstore,
+ ssa.OpMIPSMOVHstore,
+ ssa.OpMIPSMOVWstore,
+ ssa.OpMIPSMOVFstore,
+ ssa.OpMIPSMOVDstore:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.To, v)
+ case ssa.OpMIPSMOVBstorezero,
+ ssa.OpMIPSMOVHstorezero,
+ ssa.OpMIPSMOVWstorezero:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = mips.REGZERO
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.To, v)
+ case ssa.OpMIPSMOVBreg,
+ ssa.OpMIPSMOVBUreg,
+ ssa.OpMIPSMOVHreg,
+ ssa.OpMIPSMOVHUreg:
+ a := v.Args[0]
+ for a.Op == ssa.OpCopy || a.Op == ssa.OpMIPSMOVWreg || a.Op == ssa.OpMIPSMOVWnop {
+ a = a.Args[0]
+ }
+ if a.Op == ssa.OpLoadReg {
+ t := a.Type
+ switch {
+ case v.Op == ssa.OpMIPSMOVBreg && t.Size() == 1 && t.IsSigned(),
+ v.Op == ssa.OpMIPSMOVBUreg && t.Size() == 1 && !t.IsSigned(),
+ v.Op == ssa.OpMIPSMOVHreg && t.Size() == 2 && t.IsSigned(),
+ v.Op == ssa.OpMIPSMOVHUreg && t.Size() == 2 && !t.IsSigned():
+ // arg is a proper-typed load, already zero/sign-extended, don't extend again
+ if v.Reg() == v.Args[0].Reg() {
+ return
+ }
+ p := s.Prog(mips.AMOVW)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ return
+ default:
+ }
+ }
+ fallthrough
+ case ssa.OpMIPSMOVWF,
+ ssa.OpMIPSMOVWD,
+ ssa.OpMIPSTRUNCFW,
+ ssa.OpMIPSTRUNCDW,
+ ssa.OpMIPSMOVFD,
+ ssa.OpMIPSMOVDF,
+ ssa.OpMIPSNEGF,
+ ssa.OpMIPSNEGD,
+ ssa.OpMIPSSQRTF,
+ ssa.OpMIPSSQRTD,
+ ssa.OpMIPSCLZ:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpMIPSNEG:
+ // SUB from REGZERO
+ p := s.Prog(mips.ASUBU)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.Reg = mips.REGZERO
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpMIPSLoweredZero:
+ // SUBU $4, R1
+ // MOVW R0, 4(R1)
+ // ADDU $4, R1
+ // BNE Rarg1, R1, -2(PC)
+ // arg1 is the address of the last element to zero
+ var sz int64
+ var mov obj.As
+ switch {
+ case v.AuxInt%4 == 0:
+ sz = 4
+ mov = mips.AMOVW
+ case v.AuxInt%2 == 0:
+ sz = 2
+ mov = mips.AMOVH
+ default:
+ sz = 1
+ mov = mips.AMOVB
+ }
+ p := s.Prog(mips.ASUBU)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = sz
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = mips.REG_R1
+ p2 := s.Prog(mov)
+ p2.From.Type = obj.TYPE_REG
+ p2.From.Reg = mips.REGZERO
+ p2.To.Type = obj.TYPE_MEM
+ p2.To.Reg = mips.REG_R1
+ p2.To.Offset = sz
+ p3 := s.Prog(mips.AADDU)
+ p3.From.Type = obj.TYPE_CONST
+ p3.From.Offset = sz
+ p3.To.Type = obj.TYPE_REG
+ p3.To.Reg = mips.REG_R1
+ p4 := s.Prog(mips.ABNE)
+ p4.From.Type = obj.TYPE_REG
+ p4.From.Reg = v.Args[1].Reg()
+ p4.Reg = mips.REG_R1
+ p4.To.Type = obj.TYPE_BRANCH
+ p4.To.SetTarget(p2)
+ case ssa.OpMIPSLoweredMove:
+ // SUBU $4, R1
+ // MOVW 4(R1), Rtmp
+ // MOVW Rtmp, (R2)
+ // ADDU $4, R1
+ // ADDU $4, R2
+ // BNE Rarg2, R1, -4(PC)
+ // arg2 is the address of the last element of src
+ var sz int64
+ var mov obj.As
+ switch {
+ case v.AuxInt%4 == 0:
+ sz = 4
+ mov = mips.AMOVW
+ case v.AuxInt%2 == 0:
+ sz = 2
+ mov = mips.AMOVH
+ default:
+ sz = 1
+ mov = mips.AMOVB
+ }
+ p := s.Prog(mips.ASUBU)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = sz
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = mips.REG_R1
+ p2 := s.Prog(mov)
+ p2.From.Type = obj.TYPE_MEM
+ p2.From.Reg = mips.REG_R1
+ p2.From.Offset = sz
+ p2.To.Type = obj.TYPE_REG
+ p2.To.Reg = mips.REGTMP
+ p3 := s.Prog(mov)
+ p3.From.Type = obj.TYPE_REG
+ p3.From.Reg = mips.REGTMP
+ p3.To.Type = obj.TYPE_MEM
+ p3.To.Reg = mips.REG_R2
+ p4 := s.Prog(mips.AADDU)
+ p4.From.Type = obj.TYPE_CONST
+ p4.From.Offset = sz
+ p4.To.Type = obj.TYPE_REG
+ p4.To.Reg = mips.REG_R1
+ p5 := s.Prog(mips.AADDU)
+ p5.From.Type = obj.TYPE_CONST
+ p5.From.Offset = sz
+ p5.To.Type = obj.TYPE_REG
+ p5.To.Reg = mips.REG_R2
+ p6 := s.Prog(mips.ABNE)
+ p6.From.Type = obj.TYPE_REG
+ p6.From.Reg = v.Args[2].Reg()
+ p6.Reg = mips.REG_R1
+ p6.To.Type = obj.TYPE_BRANCH
+ p6.To.SetTarget(p2)
+ case ssa.OpMIPSCALLstatic, ssa.OpMIPSCALLclosure, ssa.OpMIPSCALLinter:
+ s.Call(v)
+ case ssa.OpMIPSCALLtail:
+ s.TailCall(v)
+ case ssa.OpMIPSLoweredWB:
+ p := s.Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = v.Aux.(*obj.LSym)
+ case ssa.OpMIPSLoweredPanicBoundsA, ssa.OpMIPSLoweredPanicBoundsB, ssa.OpMIPSLoweredPanicBoundsC:
+ p := s.Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt]
+ s.UseArgs(8) // space used in callee args area by assembly stubs
+ case ssa.OpMIPSLoweredPanicExtendA, ssa.OpMIPSLoweredPanicExtendB, ssa.OpMIPSLoweredPanicExtendC:
+ p := s.Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = ssagen.ExtendCheckFunc[v.AuxInt]
+ s.UseArgs(12) // space used in callee args area by assembly stubs
+ case ssa.OpMIPSLoweredAtomicLoad8,
+ ssa.OpMIPSLoweredAtomicLoad32:
+ s.Prog(mips.ASYNC)
+
+ var op obj.As
+ switch v.Op {
+ case ssa.OpMIPSLoweredAtomicLoad8:
+ op = mips.AMOVB
+ case ssa.OpMIPSLoweredAtomicLoad32:
+ op = mips.AMOVW
+ }
+ p := s.Prog(op)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg0()
+
+ s.Prog(mips.ASYNC)
+ case ssa.OpMIPSLoweredAtomicStore8,
+ ssa.OpMIPSLoweredAtomicStore32:
+ s.Prog(mips.ASYNC)
+
+ var op obj.As
+ switch v.Op {
+ case ssa.OpMIPSLoweredAtomicStore8:
+ op = mips.AMOVB
+ case ssa.OpMIPSLoweredAtomicStore32:
+ op = mips.AMOVW
+ }
+ p := s.Prog(op)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+
+ s.Prog(mips.ASYNC)
+ case ssa.OpMIPSLoweredAtomicStorezero:
+ s.Prog(mips.ASYNC)
+
+ p := s.Prog(mips.AMOVW)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = mips.REGZERO
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+
+ s.Prog(mips.ASYNC)
+ case ssa.OpMIPSLoweredAtomicExchange:
+ // SYNC
+ // MOVW Rarg1, Rtmp
+ // LL (Rarg0), Rout
+ // SC Rtmp, (Rarg0)
+ // BEQ Rtmp, -3(PC)
+ // SYNC
+ s.Prog(mips.ASYNC)
+
+ p := s.Prog(mips.AMOVW)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = mips.REGTMP
+
+ p1 := s.Prog(mips.ALL)
+ p1.From.Type = obj.TYPE_MEM
+ p1.From.Reg = v.Args[0].Reg()
+ p1.To.Type = obj.TYPE_REG
+ p1.To.Reg = v.Reg0()
+
+ p2 := s.Prog(mips.ASC)
+ p2.From.Type = obj.TYPE_REG
+ p2.From.Reg = mips.REGTMP
+ p2.To.Type = obj.TYPE_MEM
+ p2.To.Reg = v.Args[0].Reg()
+
+ p3 := s.Prog(mips.ABEQ)
+ p3.From.Type = obj.TYPE_REG
+ p3.From.Reg = mips.REGTMP
+ p3.To.Type = obj.TYPE_BRANCH
+ p3.To.SetTarget(p)
+
+ s.Prog(mips.ASYNC)
+ case ssa.OpMIPSLoweredAtomicAdd:
+ // SYNC
+ // LL (Rarg0), Rout
+ // ADDU Rarg1, Rout, Rtmp
+ // SC Rtmp, (Rarg0)
+ // BEQ Rtmp, -3(PC)
+ // SYNC
+ // ADDU Rarg1, Rout
+ s.Prog(mips.ASYNC)
+
+ p := s.Prog(mips.ALL)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg0()
+
+ p1 := s.Prog(mips.AADDU)
+ p1.From.Type = obj.TYPE_REG
+ p1.From.Reg = v.Args[1].Reg()
+ p1.Reg = v.Reg0()
+ p1.To.Type = obj.TYPE_REG
+ p1.To.Reg = mips.REGTMP
+
+ p2 := s.Prog(mips.ASC)
+ p2.From.Type = obj.TYPE_REG
+ p2.From.Reg = mips.REGTMP
+ p2.To.Type = obj.TYPE_MEM
+ p2.To.Reg = v.Args[0].Reg()
+
+ p3 := s.Prog(mips.ABEQ)
+ p3.From.Type = obj.TYPE_REG
+ p3.From.Reg = mips.REGTMP
+ p3.To.Type = obj.TYPE_BRANCH
+ p3.To.SetTarget(p)
+
+ s.Prog(mips.ASYNC)
+
+ p4 := s.Prog(mips.AADDU)
+ p4.From.Type = obj.TYPE_REG
+ p4.From.Reg = v.Args[1].Reg()
+ p4.Reg = v.Reg0()
+ p4.To.Type = obj.TYPE_REG
+ p4.To.Reg = v.Reg0()
+
+ case ssa.OpMIPSLoweredAtomicAddconst:
+ // SYNC
+ // LL (Rarg0), Rout
+ // ADDU $auxInt, Rout, Rtmp
+ // SC Rtmp, (Rarg0)
+ // BEQ Rtmp, -3(PC)
+ // SYNC
+ // ADDU $auxInt, Rout
+ s.Prog(mips.ASYNC)
+
+ p := s.Prog(mips.ALL)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg0()
+
+ p1 := s.Prog(mips.AADDU)
+ p1.From.Type = obj.TYPE_CONST
+ p1.From.Offset = v.AuxInt
+ p1.Reg = v.Reg0()
+ p1.To.Type = obj.TYPE_REG
+ p1.To.Reg = mips.REGTMP
+
+ p2 := s.Prog(mips.ASC)
+ p2.From.Type = obj.TYPE_REG
+ p2.From.Reg = mips.REGTMP
+ p2.To.Type = obj.TYPE_MEM
+ p2.To.Reg = v.Args[0].Reg()
+
+ p3 := s.Prog(mips.ABEQ)
+ p3.From.Type = obj.TYPE_REG
+ p3.From.Reg = mips.REGTMP
+ p3.To.Type = obj.TYPE_BRANCH
+ p3.To.SetTarget(p)
+
+ s.Prog(mips.ASYNC)
+
+ p4 := s.Prog(mips.AADDU)
+ p4.From.Type = obj.TYPE_CONST
+ p4.From.Offset = v.AuxInt
+ p4.Reg = v.Reg0()
+ p4.To.Type = obj.TYPE_REG
+ p4.To.Reg = v.Reg0()
+
+ case ssa.OpMIPSLoweredAtomicAnd,
+ ssa.OpMIPSLoweredAtomicOr:
+ // SYNC
+ // LL (Rarg0), Rtmp
+ // AND/OR Rarg1, Rtmp
+ // SC Rtmp, (Rarg0)
+ // BEQ Rtmp, -3(PC)
+ // SYNC
+ s.Prog(mips.ASYNC)
+
+ p := s.Prog(mips.ALL)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = mips.REGTMP
+
+ p1 := s.Prog(v.Op.Asm())
+ p1.From.Type = obj.TYPE_REG
+ p1.From.Reg = v.Args[1].Reg()
+ p1.Reg = mips.REGTMP
+ p1.To.Type = obj.TYPE_REG
+ p1.To.Reg = mips.REGTMP
+
+ p2 := s.Prog(mips.ASC)
+ p2.From.Type = obj.TYPE_REG
+ p2.From.Reg = mips.REGTMP
+ p2.To.Type = obj.TYPE_MEM
+ p2.To.Reg = v.Args[0].Reg()
+
+ p3 := s.Prog(mips.ABEQ)
+ p3.From.Type = obj.TYPE_REG
+ p3.From.Reg = mips.REGTMP
+ p3.To.Type = obj.TYPE_BRANCH
+ p3.To.SetTarget(p)
+
+ s.Prog(mips.ASYNC)
+
+ case ssa.OpMIPSLoweredAtomicCas:
+ // MOVW $0, Rout
+ // SYNC
+ // LL (Rarg0), Rtmp
+ // BNE Rtmp, Rarg1, 4(PC)
+ // MOVW Rarg2, Rout
+ // SC Rout, (Rarg0)
+ // BEQ Rout, -4(PC)
+ // SYNC
+ p := s.Prog(mips.AMOVW)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = mips.REGZERO
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg0()
+
+ s.Prog(mips.ASYNC)
+
+ p1 := s.Prog(mips.ALL)
+ p1.From.Type = obj.TYPE_MEM
+ p1.From.Reg = v.Args[0].Reg()
+ p1.To.Type = obj.TYPE_REG
+ p1.To.Reg = mips.REGTMP
+
+ p2 := s.Prog(mips.ABNE)
+ p2.From.Type = obj.TYPE_REG
+ p2.From.Reg = v.Args[1].Reg()
+ p2.Reg = mips.REGTMP
+ p2.To.Type = obj.TYPE_BRANCH
+
+ p3 := s.Prog(mips.AMOVW)
+ p3.From.Type = obj.TYPE_REG
+ p3.From.Reg = v.Args[2].Reg()
+ p3.To.Type = obj.TYPE_REG
+ p3.To.Reg = v.Reg0()
+
+ p4 := s.Prog(mips.ASC)
+ p4.From.Type = obj.TYPE_REG
+ p4.From.Reg = v.Reg0()
+ p4.To.Type = obj.TYPE_MEM
+ p4.To.Reg = v.Args[0].Reg()
+
+ p5 := s.Prog(mips.ABEQ)
+ p5.From.Type = obj.TYPE_REG
+ p5.From.Reg = v.Reg0()
+ p5.To.Type = obj.TYPE_BRANCH
+ p5.To.SetTarget(p1)
+
+ s.Prog(mips.ASYNC)
+
+ p6 := s.Prog(obj.ANOP)
+ p2.To.SetTarget(p6)
+
+ case ssa.OpMIPSLoweredNilCheck:
+ // Issue a load which will fault if arg is nil.
+ p := s.Prog(mips.AMOVB)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = mips.REGTMP
+ if logopt.Enabled() {
+ logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name)
+ }
+ if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
+ base.WarnfAt(v.Pos, "generated nil check")
+ }
+ case ssa.OpMIPSFPFlagTrue,
+ ssa.OpMIPSFPFlagFalse:
+ // MOVW $1, r
+ // CMOVF R0, r
+
+ cmov := mips.ACMOVF
+ if v.Op == ssa.OpMIPSFPFlagFalse {
+ cmov = mips.ACMOVT
+ }
+ p := s.Prog(mips.AMOVW)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = 1
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ p1 := s.Prog(cmov)
+ p1.From.Type = obj.TYPE_REG
+ p1.From.Reg = mips.REGZERO
+ p1.To.Type = obj.TYPE_REG
+ p1.To.Reg = v.Reg()
+
+ case ssa.OpMIPSLoweredGetClosurePtr:
+ // Closure pointer is R22 (mips.REGCTXT).
+ ssagen.CheckLoweredGetClosurePtr(v)
+ case ssa.OpMIPSLoweredGetCallerSP:
+ // caller's SP is FixedFrameSize below the address of the first arg
+ p := s.Prog(mips.AMOVW)
+ p.From.Type = obj.TYPE_ADDR
+ p.From.Offset = -base.Ctxt.FixedFrameSize()
+ p.From.Name = obj.NAME_PARAM
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpMIPSLoweredGetCallerPC:
+ p := s.Prog(obj.AGETCALLERPC)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpClobber, ssa.OpClobberReg:
+ // TODO: implement for clobberdead experiment. Nop is ok for now.
+ default:
+ v.Fatalf("genValue not implemented: %s", v.LongString())
+ }
+}
+
+var blockJump = map[ssa.BlockKind]struct {
+ asm, invasm obj.As
+}{
+ ssa.BlockMIPSEQ: {mips.ABEQ, mips.ABNE},
+ ssa.BlockMIPSNE: {mips.ABNE, mips.ABEQ},
+ ssa.BlockMIPSLTZ: {mips.ABLTZ, mips.ABGEZ},
+ ssa.BlockMIPSGEZ: {mips.ABGEZ, mips.ABLTZ},
+ ssa.BlockMIPSLEZ: {mips.ABLEZ, mips.ABGTZ},
+ ssa.BlockMIPSGTZ: {mips.ABGTZ, mips.ABLEZ},
+ ssa.BlockMIPSFPT: {mips.ABFPT, mips.ABFPF},
+ ssa.BlockMIPSFPF: {mips.ABFPF, mips.ABFPT},
+}
+
+func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
+ switch b.Kind {
+ case ssa.BlockPlain:
+ if b.Succs[0].Block() != next {
+ p := s.Prog(obj.AJMP)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
+ }
+ case ssa.BlockDefer:
+ // defer returns in R1:
+ // 0 if we should continue executing
+ // 1 if we should jump to deferreturn call
+ p := s.Prog(mips.ABNE)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = mips.REGZERO
+ p.Reg = mips.REG_R1
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[1].Block()})
+ if b.Succs[0].Block() != next {
+ p := s.Prog(obj.AJMP)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
+ }
+ case ssa.BlockExit, ssa.BlockRetJmp:
+ case ssa.BlockRet:
+ s.Prog(obj.ARET)
+ case ssa.BlockMIPSEQ, ssa.BlockMIPSNE,
+ ssa.BlockMIPSLTZ, ssa.BlockMIPSGEZ,
+ ssa.BlockMIPSLEZ, ssa.BlockMIPSGTZ,
+ ssa.BlockMIPSFPT, ssa.BlockMIPSFPF:
+ jmp := blockJump[b.Kind]
+ var p *obj.Prog
+ switch next {
+ case b.Succs[0].Block():
+ p = s.Br(jmp.invasm, b.Succs[1].Block())
+ case b.Succs[1].Block():
+ p = s.Br(jmp.asm, b.Succs[0].Block())
+ default:
+ if b.Likely != ssa.BranchUnlikely {
+ p = s.Br(jmp.asm, b.Succs[0].Block())
+ s.Br(obj.AJMP, b.Succs[1].Block())
+ } else {
+ p = s.Br(jmp.invasm, b.Succs[1].Block())
+ s.Br(obj.AJMP, b.Succs[0].Block())
+ }
+ }
+ if !b.Controls[0].Type.IsFlags() {
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = b.Controls[0].Reg()
+ }
+ default:
+ b.Fatalf("branch not implemented: %s", b.LongString())
+ }
+}
diff --git a/src/cmd/compile/internal/mips64/galign.go b/src/cmd/compile/internal/mips64/galign.go
new file mode 100644
index 0000000..412bc71
--- /dev/null
+++ b/src/cmd/compile/internal/mips64/galign.go
@@ -0,0 +1,28 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package mips64
+
+import (
+ "cmd/compile/internal/ssa"
+ "cmd/compile/internal/ssagen"
+ "cmd/internal/obj/mips"
+ "internal/buildcfg"
+)
+
+func Init(arch *ssagen.ArchInfo) {
+ arch.LinkArch = &mips.Linkmips64
+ if buildcfg.GOARCH == "mips64le" {
+ arch.LinkArch = &mips.Linkmips64le
+ }
+ arch.REGSP = mips.REGSP
+ arch.MAXWIDTH = 1 << 50
+ arch.SoftFloat = buildcfg.GOMIPS64 == "softfloat"
+ arch.ZeroRange = zerorange
+ arch.Ginsnop = ginsnop
+
+ arch.SSAMarkMoves = func(s *ssagen.State, b *ssa.Block) {}
+ arch.SSAGenValue = ssaGenValue
+ arch.SSAGenBlock = ssaGenBlock
+}
diff --git a/src/cmd/compile/internal/mips64/ggen.go b/src/cmd/compile/internal/mips64/ggen.go
new file mode 100644
index 0000000..37bb871
--- /dev/null
+++ b/src/cmd/compile/internal/mips64/ggen.go
@@ -0,0 +1,59 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package mips64
+
+import (
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/objw"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/obj/mips"
+)
+
+func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
+ if cnt == 0 {
+ return p
+ }
+ if cnt < int64(4*types.PtrSize) {
+ for i := int64(0); i < cnt; i += int64(types.PtrSize) {
+ p = pp.Append(p, mips.AMOVV, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGSP, 8+off+i)
+ }
+ } else if cnt <= int64(128*types.PtrSize) {
+ p = pp.Append(p, mips.AADDV, obj.TYPE_CONST, 0, 8+off-8, obj.TYPE_REG, mips.REGRT1, 0)
+ p.Reg = mips.REGSP
+ p = pp.Append(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = ir.Syms.Duffzero
+ p.To.Offset = 8 * (128 - cnt/int64(types.PtrSize))
+ } else {
+ // ADDV $(8+frame+lo-8), SP, r1
+ // ADDV $cnt, r1, r2
+ // loop:
+ // MOVV R0, (Widthptr)r1
+ // ADDV $Widthptr, r1
+ // BNE r1, r2, loop
+ p = pp.Append(p, mips.AADDV, obj.TYPE_CONST, 0, 8+off-8, obj.TYPE_REG, mips.REGRT1, 0)
+ p.Reg = mips.REGSP
+ p = pp.Append(p, mips.AADDV, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, mips.REGRT2, 0)
+ p.Reg = mips.REGRT1
+ p = pp.Append(p, mips.AMOVV, obj.TYPE_REG, mips.REGZERO, 0, obj.TYPE_MEM, mips.REGRT1, int64(types.PtrSize))
+ p1 := p
+ p = pp.Append(p, mips.AADDV, obj.TYPE_CONST, 0, int64(types.PtrSize), obj.TYPE_REG, mips.REGRT1, 0)
+ p = pp.Append(p, mips.ABNE, obj.TYPE_REG, mips.REGRT1, 0, obj.TYPE_BRANCH, 0, 0)
+ p.Reg = mips.REGRT2
+ p.To.SetTarget(p1)
+ }
+
+ return p
+}
+
+func ginsnop(pp *objw.Progs) *obj.Prog {
+ p := pp.Prog(mips.ANOR)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = mips.REG_R0
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = mips.REG_R0
+ return p
+}
diff --git a/src/cmd/compile/internal/mips64/ssa.go b/src/cmd/compile/internal/mips64/ssa.go
new file mode 100644
index 0000000..6e12c6c
--- /dev/null
+++ b/src/cmd/compile/internal/mips64/ssa.go
@@ -0,0 +1,846 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package mips64
+
+import (
+ "math"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/logopt"
+ "cmd/compile/internal/ssa"
+ "cmd/compile/internal/ssagen"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/obj/mips"
+)
+
+// isFPreg reports whether r is an FP register
+func isFPreg(r int16) bool {
+ return mips.REG_F0 <= r && r <= mips.REG_F31
+}
+
+// isHILO reports whether r is HI or LO register
+func isHILO(r int16) bool {
+ return r == mips.REG_HI || r == mips.REG_LO
+}
+
+// loadByType returns the load instruction of the given type.
+func loadByType(t *types.Type, r int16) obj.As {
+ if isFPreg(r) {
+ if t.Size() == 4 { // float32 or int32
+ return mips.AMOVF
+ } else { // float64 or int64
+ return mips.AMOVD
+ }
+ } else {
+ switch t.Size() {
+ case 1:
+ if t.IsSigned() {
+ return mips.AMOVB
+ } else {
+ return mips.AMOVBU
+ }
+ case 2:
+ if t.IsSigned() {
+ return mips.AMOVH
+ } else {
+ return mips.AMOVHU
+ }
+ case 4:
+ if t.IsSigned() {
+ return mips.AMOVW
+ } else {
+ return mips.AMOVWU
+ }
+ case 8:
+ return mips.AMOVV
+ }
+ }
+ panic("bad load type")
+}
+
+// storeByType returns the store instruction of the given type.
+func storeByType(t *types.Type, r int16) obj.As {
+ if isFPreg(r) {
+ if t.Size() == 4 { // float32 or int32
+ return mips.AMOVF
+ } else { // float64 or int64
+ return mips.AMOVD
+ }
+ } else {
+ switch t.Size() {
+ case 1:
+ return mips.AMOVB
+ case 2:
+ return mips.AMOVH
+ case 4:
+ return mips.AMOVW
+ case 8:
+ return mips.AMOVV
+ }
+ }
+ panic("bad store type")
+}
+
+func ssaGenValue(s *ssagen.State, v *ssa.Value) {
+ switch v.Op {
+ case ssa.OpCopy, ssa.OpMIPS64MOVVreg:
+ if v.Type.IsMemory() {
+ return
+ }
+ x := v.Args[0].Reg()
+ y := v.Reg()
+ if x == y {
+ return
+ }
+ as := mips.AMOVV
+ if isFPreg(x) && isFPreg(y) {
+ as = mips.AMOVD
+ }
+ p := s.Prog(as)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = x
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = y
+ if isHILO(x) && isHILO(y) || isHILO(x) && isFPreg(y) || isFPreg(x) && isHILO(y) {
+ // cannot move between special registers, use TMP as intermediate
+ p.To.Reg = mips.REGTMP
+ p = s.Prog(mips.AMOVV)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = mips.REGTMP
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = y
+ }
+ case ssa.OpMIPS64MOVVnop:
+ // nothing to do
+ case ssa.OpLoadReg:
+ if v.Type.IsFlags() {
+ v.Fatalf("load flags not implemented: %v", v.LongString())
+ return
+ }
+ r := v.Reg()
+ p := s.Prog(loadByType(v.Type, r))
+ ssagen.AddrAuto(&p.From, v.Args[0])
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ if isHILO(r) {
+ // cannot directly load, load to TMP and move
+ p.To.Reg = mips.REGTMP
+ p = s.Prog(mips.AMOVV)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = mips.REGTMP
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ }
+ case ssa.OpStoreReg:
+ if v.Type.IsFlags() {
+ v.Fatalf("store flags not implemented: %v", v.LongString())
+ return
+ }
+ r := v.Args[0].Reg()
+ if isHILO(r) {
+ // cannot directly store, move to TMP and store
+ p := s.Prog(mips.AMOVV)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = mips.REGTMP
+ r = mips.REGTMP
+ }
+ p := s.Prog(storeByType(v.Type, r))
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r
+ ssagen.AddrAuto(&p.To, v)
+ case ssa.OpMIPS64ADDV,
+ ssa.OpMIPS64SUBV,
+ ssa.OpMIPS64AND,
+ ssa.OpMIPS64OR,
+ ssa.OpMIPS64XOR,
+ ssa.OpMIPS64NOR,
+ ssa.OpMIPS64SLLV,
+ ssa.OpMIPS64SRLV,
+ ssa.OpMIPS64SRAV,
+ ssa.OpMIPS64ADDF,
+ ssa.OpMIPS64ADDD,
+ ssa.OpMIPS64SUBF,
+ ssa.OpMIPS64SUBD,
+ ssa.OpMIPS64MULF,
+ ssa.OpMIPS64MULD,
+ ssa.OpMIPS64DIVF,
+ ssa.OpMIPS64DIVD:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+ p.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpMIPS64SGT,
+ ssa.OpMIPS64SGTU:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpMIPS64ADDVconst,
+ ssa.OpMIPS64SUBVconst,
+ ssa.OpMIPS64ANDconst,
+ ssa.OpMIPS64ORconst,
+ ssa.OpMIPS64XORconst,
+ ssa.OpMIPS64NORconst,
+ ssa.OpMIPS64SLLVconst,
+ ssa.OpMIPS64SRLVconst,
+ ssa.OpMIPS64SRAVconst,
+ ssa.OpMIPS64SGTconst,
+ ssa.OpMIPS64SGTUconst:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpMIPS64MULV,
+ ssa.OpMIPS64MULVU,
+ ssa.OpMIPS64DIVV,
+ ssa.OpMIPS64DIVVU:
+ // result in hi,lo
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+ p.Reg = v.Args[0].Reg()
+ case ssa.OpMIPS64MOVVconst:
+ r := v.Reg()
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ if isFPreg(r) || isHILO(r) {
+ // cannot move into FP or special registers, use TMP as intermediate
+ p.To.Reg = mips.REGTMP
+ p = s.Prog(mips.AMOVV)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = mips.REGTMP
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ }
+ case ssa.OpMIPS64MOVFconst,
+ ssa.OpMIPS64MOVDconst:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_FCONST
+ p.From.Val = math.Float64frombits(uint64(v.AuxInt))
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpMIPS64CMPEQF,
+ ssa.OpMIPS64CMPEQD,
+ ssa.OpMIPS64CMPGEF,
+ ssa.OpMIPS64CMPGED,
+ ssa.OpMIPS64CMPGTF,
+ ssa.OpMIPS64CMPGTD:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.Reg = v.Args[1].Reg()
+ case ssa.OpMIPS64MOVVaddr:
+ p := s.Prog(mips.AMOVV)
+ p.From.Type = obj.TYPE_ADDR
+ p.From.Reg = v.Args[0].Reg()
+ var wantreg string
+ // MOVV $sym+off(base), R
+ // the assembler expands it as the following:
+ // - base is SP: add constant offset to SP (R29)
+ // when constant is large, tmp register (R23) may be used
+ // - base is SB: load external address with relocation
+ switch v.Aux.(type) {
+ default:
+ v.Fatalf("aux is of unknown type %T", v.Aux)
+ case *obj.LSym:
+ wantreg = "SB"
+ ssagen.AddAux(&p.From, v)
+ case *ir.Name:
+ wantreg = "SP"
+ ssagen.AddAux(&p.From, v)
+ case nil:
+ // No sym, just MOVV $off(SP), R
+ wantreg = "SP"
+ p.From.Offset = v.AuxInt
+ }
+ if reg := v.Args[0].RegName(); reg != wantreg {
+ v.Fatalf("bad reg %s for symbol type %T, want %s", reg, v.Aux, wantreg)
+ }
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpMIPS64MOVBload,
+ ssa.OpMIPS64MOVBUload,
+ ssa.OpMIPS64MOVHload,
+ ssa.OpMIPS64MOVHUload,
+ ssa.OpMIPS64MOVWload,
+ ssa.OpMIPS64MOVWUload,
+ ssa.OpMIPS64MOVVload,
+ ssa.OpMIPS64MOVFload,
+ ssa.OpMIPS64MOVDload:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpMIPS64MOVBstore,
+ ssa.OpMIPS64MOVHstore,
+ ssa.OpMIPS64MOVWstore,
+ ssa.OpMIPS64MOVVstore,
+ ssa.OpMIPS64MOVFstore,
+ ssa.OpMIPS64MOVDstore:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.To, v)
+ case ssa.OpMIPS64MOVBstorezero,
+ ssa.OpMIPS64MOVHstorezero,
+ ssa.OpMIPS64MOVWstorezero,
+ ssa.OpMIPS64MOVVstorezero:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = mips.REGZERO
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.To, v)
+ case ssa.OpMIPS64MOVBreg,
+ ssa.OpMIPS64MOVBUreg,
+ ssa.OpMIPS64MOVHreg,
+ ssa.OpMIPS64MOVHUreg,
+ ssa.OpMIPS64MOVWreg,
+ ssa.OpMIPS64MOVWUreg:
+ a := v.Args[0]
+ for a.Op == ssa.OpCopy || a.Op == ssa.OpMIPS64MOVVreg {
+ a = a.Args[0]
+ }
+ if a.Op == ssa.OpLoadReg && mips.REG_R0 <= a.Reg() && a.Reg() <= mips.REG_R31 {
+ // LoadReg from a narrower type does an extension, except loading
+ // to a floating point register. So only eliminate the extension
+ // if it is loaded to an integer register.
+ t := a.Type
+ switch {
+ case v.Op == ssa.OpMIPS64MOVBreg && t.Size() == 1 && t.IsSigned(),
+ v.Op == ssa.OpMIPS64MOVBUreg && t.Size() == 1 && !t.IsSigned(),
+ v.Op == ssa.OpMIPS64MOVHreg && t.Size() == 2 && t.IsSigned(),
+ v.Op == ssa.OpMIPS64MOVHUreg && t.Size() == 2 && !t.IsSigned(),
+ v.Op == ssa.OpMIPS64MOVWreg && t.Size() == 4 && t.IsSigned(),
+ v.Op == ssa.OpMIPS64MOVWUreg && t.Size() == 4 && !t.IsSigned():
+ // arg is a proper-typed load, already zero/sign-extended, don't extend again
+ if v.Reg() == v.Args[0].Reg() {
+ return
+ }
+ p := s.Prog(mips.AMOVV)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ return
+ default:
+ }
+ }
+ fallthrough
+ case ssa.OpMIPS64MOVWF,
+ ssa.OpMIPS64MOVWD,
+ ssa.OpMIPS64TRUNCFW,
+ ssa.OpMIPS64TRUNCDW,
+ ssa.OpMIPS64MOVVF,
+ ssa.OpMIPS64MOVVD,
+ ssa.OpMIPS64TRUNCFV,
+ ssa.OpMIPS64TRUNCDV,
+ ssa.OpMIPS64MOVFD,
+ ssa.OpMIPS64MOVDF,
+ ssa.OpMIPS64NEGF,
+ ssa.OpMIPS64NEGD,
+ ssa.OpMIPS64SQRTF,
+ ssa.OpMIPS64SQRTD:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpMIPS64NEGV:
+ // SUB from REGZERO
+ p := s.Prog(mips.ASUBVU)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.Reg = mips.REGZERO
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpMIPS64DUFFZERO:
+ // runtime.duffzero expects start address - 8 in R1
+ p := s.Prog(mips.ASUBVU)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = 8
+ p.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = mips.REG_R1
+ p = s.Prog(obj.ADUFFZERO)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = ir.Syms.Duffzero
+ p.To.Offset = v.AuxInt
+ case ssa.OpMIPS64LoweredZero:
+ // SUBV $8, R1
+ // MOVV R0, 8(R1)
+ // ADDV $8, R1
+ // BNE Rarg1, R1, -2(PC)
+ // arg1 is the address of the last element to zero
+ var sz int64
+ var mov obj.As
+ switch {
+ case v.AuxInt%8 == 0:
+ sz = 8
+ mov = mips.AMOVV
+ case v.AuxInt%4 == 0:
+ sz = 4
+ mov = mips.AMOVW
+ case v.AuxInt%2 == 0:
+ sz = 2
+ mov = mips.AMOVH
+ default:
+ sz = 1
+ mov = mips.AMOVB
+ }
+ p := s.Prog(mips.ASUBVU)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = sz
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = mips.REG_R1
+ p2 := s.Prog(mov)
+ p2.From.Type = obj.TYPE_REG
+ p2.From.Reg = mips.REGZERO
+ p2.To.Type = obj.TYPE_MEM
+ p2.To.Reg = mips.REG_R1
+ p2.To.Offset = sz
+ p3 := s.Prog(mips.AADDVU)
+ p3.From.Type = obj.TYPE_CONST
+ p3.From.Offset = sz
+ p3.To.Type = obj.TYPE_REG
+ p3.To.Reg = mips.REG_R1
+ p4 := s.Prog(mips.ABNE)
+ p4.From.Type = obj.TYPE_REG
+ p4.From.Reg = v.Args[1].Reg()
+ p4.Reg = mips.REG_R1
+ p4.To.Type = obj.TYPE_BRANCH
+ p4.To.SetTarget(p2)
+ case ssa.OpMIPS64DUFFCOPY:
+ p := s.Prog(obj.ADUFFCOPY)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = ir.Syms.Duffcopy
+ p.To.Offset = v.AuxInt
+ case ssa.OpMIPS64LoweredMove:
+ // SUBV $8, R1
+ // MOVV 8(R1), Rtmp
+ // MOVV Rtmp, (R2)
+ // ADDV $8, R1
+ // ADDV $8, R2
+ // BNE Rarg2, R1, -4(PC)
+ // arg2 is the address of the last element of src
+ var sz int64
+ var mov obj.As
+ switch {
+ case v.AuxInt%8 == 0:
+ sz = 8
+ mov = mips.AMOVV
+ case v.AuxInt%4 == 0:
+ sz = 4
+ mov = mips.AMOVW
+ case v.AuxInt%2 == 0:
+ sz = 2
+ mov = mips.AMOVH
+ default:
+ sz = 1
+ mov = mips.AMOVB
+ }
+ p := s.Prog(mips.ASUBVU)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = sz
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = mips.REG_R1
+ p2 := s.Prog(mov)
+ p2.From.Type = obj.TYPE_MEM
+ p2.From.Reg = mips.REG_R1
+ p2.From.Offset = sz
+ p2.To.Type = obj.TYPE_REG
+ p2.To.Reg = mips.REGTMP
+ p3 := s.Prog(mov)
+ p3.From.Type = obj.TYPE_REG
+ p3.From.Reg = mips.REGTMP
+ p3.To.Type = obj.TYPE_MEM
+ p3.To.Reg = mips.REG_R2
+ p4 := s.Prog(mips.AADDVU)
+ p4.From.Type = obj.TYPE_CONST
+ p4.From.Offset = sz
+ p4.To.Type = obj.TYPE_REG
+ p4.To.Reg = mips.REG_R1
+ p5 := s.Prog(mips.AADDVU)
+ p5.From.Type = obj.TYPE_CONST
+ p5.From.Offset = sz
+ p5.To.Type = obj.TYPE_REG
+ p5.To.Reg = mips.REG_R2
+ p6 := s.Prog(mips.ABNE)
+ p6.From.Type = obj.TYPE_REG
+ p6.From.Reg = v.Args[2].Reg()
+ p6.Reg = mips.REG_R1
+ p6.To.Type = obj.TYPE_BRANCH
+ p6.To.SetTarget(p2)
+ case ssa.OpMIPS64CALLstatic, ssa.OpMIPS64CALLclosure, ssa.OpMIPS64CALLinter:
+ s.Call(v)
+ case ssa.OpMIPS64CALLtail:
+ s.TailCall(v)
+ case ssa.OpMIPS64LoweredWB:
+ p := s.Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = v.Aux.(*obj.LSym)
+ case ssa.OpMIPS64LoweredPanicBoundsA, ssa.OpMIPS64LoweredPanicBoundsB, ssa.OpMIPS64LoweredPanicBoundsC:
+ p := s.Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt]
+ s.UseArgs(16) // space used in callee args area by assembly stubs
+ case ssa.OpMIPS64LoweredAtomicLoad8, ssa.OpMIPS64LoweredAtomicLoad32, ssa.OpMIPS64LoweredAtomicLoad64:
+ as := mips.AMOVV
+ switch v.Op {
+ case ssa.OpMIPS64LoweredAtomicLoad8:
+ as = mips.AMOVB
+ case ssa.OpMIPS64LoweredAtomicLoad32:
+ as = mips.AMOVW
+ }
+ s.Prog(mips.ASYNC)
+ p := s.Prog(as)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg0()
+ s.Prog(mips.ASYNC)
+ case ssa.OpMIPS64LoweredAtomicStore8, ssa.OpMIPS64LoweredAtomicStore32, ssa.OpMIPS64LoweredAtomicStore64:
+ as := mips.AMOVV
+ switch v.Op {
+ case ssa.OpMIPS64LoweredAtomicStore8:
+ as = mips.AMOVB
+ case ssa.OpMIPS64LoweredAtomicStore32:
+ as = mips.AMOVW
+ }
+ s.Prog(mips.ASYNC)
+ p := s.Prog(as)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ s.Prog(mips.ASYNC)
+ case ssa.OpMIPS64LoweredAtomicStorezero32, ssa.OpMIPS64LoweredAtomicStorezero64:
+ as := mips.AMOVV
+ if v.Op == ssa.OpMIPS64LoweredAtomicStorezero32 {
+ as = mips.AMOVW
+ }
+ s.Prog(mips.ASYNC)
+ p := s.Prog(as)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = mips.REGZERO
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ s.Prog(mips.ASYNC)
+ case ssa.OpMIPS64LoweredAtomicExchange32, ssa.OpMIPS64LoweredAtomicExchange64:
+ // SYNC
+ // MOVV Rarg1, Rtmp
+ // LL (Rarg0), Rout
+ // SC Rtmp, (Rarg0)
+ // BEQ Rtmp, -3(PC)
+ // SYNC
+ ll := mips.ALLV
+ sc := mips.ASCV
+ if v.Op == ssa.OpMIPS64LoweredAtomicExchange32 {
+ ll = mips.ALL
+ sc = mips.ASC
+ }
+ s.Prog(mips.ASYNC)
+ p := s.Prog(mips.AMOVV)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = mips.REGTMP
+ p1 := s.Prog(ll)
+ p1.From.Type = obj.TYPE_MEM
+ p1.From.Reg = v.Args[0].Reg()
+ p1.To.Type = obj.TYPE_REG
+ p1.To.Reg = v.Reg0()
+ p2 := s.Prog(sc)
+ p2.From.Type = obj.TYPE_REG
+ p2.From.Reg = mips.REGTMP
+ p2.To.Type = obj.TYPE_MEM
+ p2.To.Reg = v.Args[0].Reg()
+ p3 := s.Prog(mips.ABEQ)
+ p3.From.Type = obj.TYPE_REG
+ p3.From.Reg = mips.REGTMP
+ p3.To.Type = obj.TYPE_BRANCH
+ p3.To.SetTarget(p)
+ s.Prog(mips.ASYNC)
+ case ssa.OpMIPS64LoweredAtomicAdd32, ssa.OpMIPS64LoweredAtomicAdd64:
+ // SYNC
+ // LL (Rarg0), Rout
+ // ADDV Rarg1, Rout, Rtmp
+ // SC Rtmp, (Rarg0)
+ // BEQ Rtmp, -3(PC)
+ // SYNC
+ // ADDV Rarg1, Rout
+ ll := mips.ALLV
+ sc := mips.ASCV
+ if v.Op == ssa.OpMIPS64LoweredAtomicAdd32 {
+ ll = mips.ALL
+ sc = mips.ASC
+ }
+ s.Prog(mips.ASYNC)
+ p := s.Prog(ll)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg0()
+ p1 := s.Prog(mips.AADDVU)
+ p1.From.Type = obj.TYPE_REG
+ p1.From.Reg = v.Args[1].Reg()
+ p1.Reg = v.Reg0()
+ p1.To.Type = obj.TYPE_REG
+ p1.To.Reg = mips.REGTMP
+ p2 := s.Prog(sc)
+ p2.From.Type = obj.TYPE_REG
+ p2.From.Reg = mips.REGTMP
+ p2.To.Type = obj.TYPE_MEM
+ p2.To.Reg = v.Args[0].Reg()
+ p3 := s.Prog(mips.ABEQ)
+ p3.From.Type = obj.TYPE_REG
+ p3.From.Reg = mips.REGTMP
+ p3.To.Type = obj.TYPE_BRANCH
+ p3.To.SetTarget(p)
+ s.Prog(mips.ASYNC)
+ p4 := s.Prog(mips.AADDVU)
+ p4.From.Type = obj.TYPE_REG
+ p4.From.Reg = v.Args[1].Reg()
+ p4.Reg = v.Reg0()
+ p4.To.Type = obj.TYPE_REG
+ p4.To.Reg = v.Reg0()
+ case ssa.OpMIPS64LoweredAtomicAddconst32, ssa.OpMIPS64LoweredAtomicAddconst64:
+ // SYNC
+ // LL (Rarg0), Rout
+ // ADDV $auxint, Rout, Rtmp
+ // SC Rtmp, (Rarg0)
+ // BEQ Rtmp, -3(PC)
+ // SYNC
+ // ADDV $auxint, Rout
+ ll := mips.ALLV
+ sc := mips.ASCV
+ if v.Op == ssa.OpMIPS64LoweredAtomicAddconst32 {
+ ll = mips.ALL
+ sc = mips.ASC
+ }
+ s.Prog(mips.ASYNC)
+ p := s.Prog(ll)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg0()
+ p1 := s.Prog(mips.AADDVU)
+ p1.From.Type = obj.TYPE_CONST
+ p1.From.Offset = v.AuxInt
+ p1.Reg = v.Reg0()
+ p1.To.Type = obj.TYPE_REG
+ p1.To.Reg = mips.REGTMP
+ p2 := s.Prog(sc)
+ p2.From.Type = obj.TYPE_REG
+ p2.From.Reg = mips.REGTMP
+ p2.To.Type = obj.TYPE_MEM
+ p2.To.Reg = v.Args[0].Reg()
+ p3 := s.Prog(mips.ABEQ)
+ p3.From.Type = obj.TYPE_REG
+ p3.From.Reg = mips.REGTMP
+ p3.To.Type = obj.TYPE_BRANCH
+ p3.To.SetTarget(p)
+ s.Prog(mips.ASYNC)
+ p4 := s.Prog(mips.AADDVU)
+ p4.From.Type = obj.TYPE_CONST
+ p4.From.Offset = v.AuxInt
+ p4.Reg = v.Reg0()
+ p4.To.Type = obj.TYPE_REG
+ p4.To.Reg = v.Reg0()
+ case ssa.OpMIPS64LoweredAtomicCas32, ssa.OpMIPS64LoweredAtomicCas64:
+ // MOVV $0, Rout
+ // SYNC
+ // LL (Rarg0), Rtmp
+ // BNE Rtmp, Rarg1, 4(PC)
+ // MOVV Rarg2, Rout
+ // SC Rout, (Rarg0)
+ // BEQ Rout, -4(PC)
+ // SYNC
+ ll := mips.ALLV
+ sc := mips.ASCV
+ if v.Op == ssa.OpMIPS64LoweredAtomicCas32 {
+ ll = mips.ALL
+ sc = mips.ASC
+ }
+ p := s.Prog(mips.AMOVV)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = mips.REGZERO
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg0()
+ s.Prog(mips.ASYNC)
+ p1 := s.Prog(ll)
+ p1.From.Type = obj.TYPE_MEM
+ p1.From.Reg = v.Args[0].Reg()
+ p1.To.Type = obj.TYPE_REG
+ p1.To.Reg = mips.REGTMP
+ p2 := s.Prog(mips.ABNE)
+ p2.From.Type = obj.TYPE_REG
+ p2.From.Reg = v.Args[1].Reg()
+ p2.Reg = mips.REGTMP
+ p2.To.Type = obj.TYPE_BRANCH
+ p3 := s.Prog(mips.AMOVV)
+ p3.From.Type = obj.TYPE_REG
+ p3.From.Reg = v.Args[2].Reg()
+ p3.To.Type = obj.TYPE_REG
+ p3.To.Reg = v.Reg0()
+ p4 := s.Prog(sc)
+ p4.From.Type = obj.TYPE_REG
+ p4.From.Reg = v.Reg0()
+ p4.To.Type = obj.TYPE_MEM
+ p4.To.Reg = v.Args[0].Reg()
+ p5 := s.Prog(mips.ABEQ)
+ p5.From.Type = obj.TYPE_REG
+ p5.From.Reg = v.Reg0()
+ p5.To.Type = obj.TYPE_BRANCH
+ p5.To.SetTarget(p1)
+ p6 := s.Prog(mips.ASYNC)
+ p2.To.SetTarget(p6)
+ case ssa.OpMIPS64LoweredNilCheck:
+ // Issue a load which will fault if arg is nil.
+ p := s.Prog(mips.AMOVB)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = mips.REGTMP
+ if logopt.Enabled() {
+ logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name)
+ }
+ if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
+ base.WarnfAt(v.Pos, "generated nil check")
+ }
+ case ssa.OpMIPS64FPFlagTrue,
+ ssa.OpMIPS64FPFlagFalse:
+ // MOVV $0, r
+ // BFPF 2(PC)
+ // MOVV $1, r
+ branch := mips.ABFPF
+ if v.Op == ssa.OpMIPS64FPFlagFalse {
+ branch = mips.ABFPT
+ }
+ p := s.Prog(mips.AMOVV)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = mips.REGZERO
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ p2 := s.Prog(branch)
+ p2.To.Type = obj.TYPE_BRANCH
+ p3 := s.Prog(mips.AMOVV)
+ p3.From.Type = obj.TYPE_CONST
+ p3.From.Offset = 1
+ p3.To.Type = obj.TYPE_REG
+ p3.To.Reg = v.Reg()
+ p4 := s.Prog(obj.ANOP) // not a machine instruction, for branch to land
+ p2.To.SetTarget(p4)
+ case ssa.OpMIPS64LoweredGetClosurePtr:
+ // Closure pointer is R22 (mips.REGCTXT).
+ ssagen.CheckLoweredGetClosurePtr(v)
+ case ssa.OpMIPS64LoweredGetCallerSP:
+ // caller's SP is FixedFrameSize below the address of the first arg
+ p := s.Prog(mips.AMOVV)
+ p.From.Type = obj.TYPE_ADDR
+ p.From.Offset = -base.Ctxt.FixedFrameSize()
+ p.From.Name = obj.NAME_PARAM
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpMIPS64LoweredGetCallerPC:
+ p := s.Prog(obj.AGETCALLERPC)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpClobber, ssa.OpClobberReg:
+ // TODO: implement for clobberdead experiment. Nop is ok for now.
+ default:
+ v.Fatalf("genValue not implemented: %s", v.LongString())
+ }
+}
+
+var blockJump = map[ssa.BlockKind]struct {
+ asm, invasm obj.As
+}{
+ ssa.BlockMIPS64EQ: {mips.ABEQ, mips.ABNE},
+ ssa.BlockMIPS64NE: {mips.ABNE, mips.ABEQ},
+ ssa.BlockMIPS64LTZ: {mips.ABLTZ, mips.ABGEZ},
+ ssa.BlockMIPS64GEZ: {mips.ABGEZ, mips.ABLTZ},
+ ssa.BlockMIPS64LEZ: {mips.ABLEZ, mips.ABGTZ},
+ ssa.BlockMIPS64GTZ: {mips.ABGTZ, mips.ABLEZ},
+ ssa.BlockMIPS64FPT: {mips.ABFPT, mips.ABFPF},
+ ssa.BlockMIPS64FPF: {mips.ABFPF, mips.ABFPT},
+}
+
+func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
+ switch b.Kind {
+ case ssa.BlockPlain:
+ if b.Succs[0].Block() != next {
+ p := s.Prog(obj.AJMP)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
+ }
+ case ssa.BlockDefer:
+ // defer returns in R1:
+ // 0 if we should continue executing
+ // 1 if we should jump to deferreturn call
+ p := s.Prog(mips.ABNE)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = mips.REGZERO
+ p.Reg = mips.REG_R1
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[1].Block()})
+ if b.Succs[0].Block() != next {
+ p := s.Prog(obj.AJMP)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
+ }
+ case ssa.BlockExit, ssa.BlockRetJmp:
+ case ssa.BlockRet:
+ s.Prog(obj.ARET)
+ case ssa.BlockMIPS64EQ, ssa.BlockMIPS64NE,
+ ssa.BlockMIPS64LTZ, ssa.BlockMIPS64GEZ,
+ ssa.BlockMIPS64LEZ, ssa.BlockMIPS64GTZ,
+ ssa.BlockMIPS64FPT, ssa.BlockMIPS64FPF:
+ jmp := blockJump[b.Kind]
+ var p *obj.Prog
+ switch next {
+ case b.Succs[0].Block():
+ p = s.Br(jmp.invasm, b.Succs[1].Block())
+ case b.Succs[1].Block():
+ p = s.Br(jmp.asm, b.Succs[0].Block())
+ default:
+ if b.Likely != ssa.BranchUnlikely {
+ p = s.Br(jmp.asm, b.Succs[0].Block())
+ s.Br(obj.AJMP, b.Succs[1].Block())
+ } else {
+ p = s.Br(jmp.invasm, b.Succs[1].Block())
+ s.Br(obj.AJMP, b.Succs[0].Block())
+ }
+ }
+ if !b.Controls[0].Type.IsFlags() {
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = b.Controls[0].Reg()
+ }
+ default:
+ b.Fatalf("branch not implemented: %s", b.LongString())
+ }
+}
diff --git a/src/cmd/compile/internal/noder/codes.go b/src/cmd/compile/internal/noder/codes.go
new file mode 100644
index 0000000..f8cb772
--- /dev/null
+++ b/src/cmd/compile/internal/noder/codes.go
@@ -0,0 +1,124 @@
+// UNREVIEWED
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package noder
+
+type code interface {
+ marker() syncMarker
+ value() int
+}
+
+type codeVal int
+
+func (c codeVal) marker() syncMarker { return syncVal }
+func (c codeVal) value() int { return int(c) }
+
+const (
+ valBool codeVal = iota
+ valString
+ valInt64
+ valBigInt
+ valBigRat
+ valBigFloat
+)
+
+type codeType int
+
+func (c codeType) marker() syncMarker { return syncType }
+func (c codeType) value() int { return int(c) }
+
+const (
+ typeBasic codeType = iota
+ typeNamed
+ typePointer
+ typeSlice
+ typeArray
+ typeChan
+ typeMap
+ typeSignature
+ typeStruct
+ typeInterface
+ typeUnion
+ typeTypeParam
+)
+
+type codeObj int
+
+func (c codeObj) marker() syncMarker { return syncCodeObj }
+func (c codeObj) value() int { return int(c) }
+
+const (
+ objAlias codeObj = iota
+ objConst
+ objType
+ objFunc
+ objVar
+ objStub
+)
+
+type codeStmt int
+
+func (c codeStmt) marker() syncMarker { return syncStmt1 }
+func (c codeStmt) value() int { return int(c) }
+
+const (
+ stmtEnd codeStmt = iota
+ stmtLabel
+ stmtBlock
+ stmtExpr
+ stmtSend
+ stmtAssign
+ stmtAssignOp
+ stmtIncDec
+ stmtBranch
+ stmtCall
+ stmtReturn
+ stmtIf
+ stmtFor
+ stmtSwitch
+ stmtSelect
+
+ // TODO(mdempsky): Remove after we don't care about toolstash -cmp.
+ stmtTypeDeclHack
+)
+
+type codeExpr int
+
+func (c codeExpr) marker() syncMarker { return syncExpr }
+func (c codeExpr) value() int { return int(c) }
+
+// TODO(mdempsky): Split expr into addr, for lvalues.
+const (
+ exprNone codeExpr = iota
+ exprConst
+ exprType // type expression
+ exprLocal // local variable
+ exprName // global variable or function
+ exprBlank
+ exprCompLit
+ exprFuncLit
+ exprSelector
+ exprIndex
+ exprSlice
+ exprAssert
+ exprUnaryOp
+ exprBinaryOp
+ exprCall
+ exprConvert
+)
+
+type codeDecl int
+
+func (c codeDecl) marker() syncMarker { return syncDecl }
+func (c codeDecl) value() int { return int(c) }
+
+const (
+ declEnd codeDecl = iota
+ declFunc
+ declMethod
+ declVar
+ declOther
+)
diff --git a/src/cmd/compile/internal/noder/decl.go b/src/cmd/compile/internal/noder/decl.go
new file mode 100644
index 0000000..f985648
--- /dev/null
+++ b/src/cmd/compile/internal/noder/decl.go
@@ -0,0 +1,358 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package noder
+
+import (
+ "go/constant"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/syntax"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "cmd/compile/internal/types2"
+)
+
+// TODO(mdempsky): Skip blank declarations? Probably only safe
+// for declarations without pragmas.
+
+func (g *irgen) decls(res *ir.Nodes, decls []syntax.Decl) {
+ for _, decl := range decls {
+ switch decl := decl.(type) {
+ case *syntax.ConstDecl:
+ g.constDecl(res, decl)
+ case *syntax.FuncDecl:
+ g.funcDecl(res, decl)
+ case *syntax.TypeDecl:
+ if ir.CurFunc == nil {
+ continue // already handled in irgen.generate
+ }
+ g.typeDecl(res, decl)
+ case *syntax.VarDecl:
+ g.varDecl(res, decl)
+ default:
+ g.unhandled("declaration", decl)
+ }
+ }
+}
+
+func (g *irgen) importDecl(p *noder, decl *syntax.ImportDecl) {
+ g.pragmaFlags(decl.Pragma, 0)
+
+ // Get the imported package's path, as resolved already by types2
+ // and gcimporter. This is the same path as would be computed by
+ // parseImportPath.
+ switch pkgNameOf(g.info, decl).Imported().Path() {
+ case "unsafe":
+ p.importedUnsafe = true
+ case "embed":
+ p.importedEmbed = true
+ }
+}
+
+// pkgNameOf returns the PkgName associated with the given ImportDecl.
+func pkgNameOf(info *types2.Info, decl *syntax.ImportDecl) *types2.PkgName {
+ if name := decl.LocalPkgName; name != nil {
+ return info.Defs[name].(*types2.PkgName)
+ }
+ return info.Implicits[decl].(*types2.PkgName)
+}
+
+func (g *irgen) constDecl(out *ir.Nodes, decl *syntax.ConstDecl) {
+ g.pragmaFlags(decl.Pragma, 0)
+
+ for _, name := range decl.NameList {
+ name, obj := g.def(name)
+
+ // For untyped numeric constants, make sure the value
+ // representation matches what the rest of the
+ // compiler (really just iexport) expects.
+ // TODO(mdempsky): Revisit after #43891 is resolved.
+ val := obj.(*types2.Const).Val()
+ switch name.Type() {
+ case types.UntypedInt, types.UntypedRune:
+ val = constant.ToInt(val)
+ case types.UntypedFloat:
+ val = constant.ToFloat(val)
+ case types.UntypedComplex:
+ val = constant.ToComplex(val)
+ }
+ name.SetVal(val)
+
+ out.Append(ir.NewDecl(g.pos(decl), ir.ODCLCONST, name))
+ }
+}
+
+func (g *irgen) funcDecl(out *ir.Nodes, decl *syntax.FuncDecl) {
+ assert(g.curDecl == "")
+ // Set g.curDecl to the function name, as context for the type params declared
+ // during types2-to-types1 translation if this is a generic function.
+ g.curDecl = decl.Name.Value
+ obj2 := g.info.Defs[decl.Name]
+ recv := types2.AsSignature(obj2.Type()).Recv()
+ if recv != nil {
+ t2 := deref2(recv.Type())
+ // This is a method, so set g.curDecl to recvTypeName.methName instead.
+ g.curDecl = t2.(*types2.Named).Obj().Name() + "." + g.curDecl
+ }
+
+ fn := ir.NewFunc(g.pos(decl))
+ fn.Nname, _ = g.def(decl.Name)
+ fn.Nname.Func = fn
+ fn.Nname.Defn = fn
+
+ fn.Pragma = g.pragmaFlags(decl.Pragma, funcPragmas)
+ if fn.Pragma&ir.Systemstack != 0 && fn.Pragma&ir.Nosplit != 0 {
+ base.ErrorfAt(fn.Pos(), "go:nosplit and go:systemstack cannot be combined")
+ }
+ if fn.Pragma&ir.Nointerface != 0 {
+ // Propagate //go:nointerface from Func.Pragma to Field.Nointerface.
+ // This is a bit roundabout, but this is the earliest point where we've
+ // processed the function's pragma flags, and we've also already created
+ // the Fields to represent the receiver's method set.
+ if recv := fn.Type().Recv(); recv != nil {
+ typ := types.ReceiverBaseType(recv.Type)
+ if orig := typ.OrigType(); orig != nil {
+ // For a generic method, we mark the methods on the
+ // base generic type, since those are the methods
+ // that will be stenciled.
+ typ = orig
+ }
+ meth := typecheck.Lookdot1(fn, typecheck.Lookup(decl.Name.Value), typ, typ.Methods(), 0)
+ meth.SetNointerface(true)
+ }
+ }
+
+ if decl.Body != nil && fn.Pragma&ir.Noescape != 0 {
+ base.ErrorfAt(fn.Pos(), "can only use //go:noescape with external func implementations")
+ }
+
+ if decl.Name.Value == "init" && decl.Recv == nil {
+ g.target.Inits = append(g.target.Inits, fn)
+ }
+
+ saveHaveEmbed := g.haveEmbed
+ saveCurDecl := g.curDecl
+ g.curDecl = ""
+ g.later(func() {
+ defer func(b bool, s string) {
+ // Revert haveEmbed and curDecl back to what they were before
+ // the "later" function.
+ g.haveEmbed = b
+ g.curDecl = s
+ }(g.haveEmbed, g.curDecl)
+
+ // Set haveEmbed and curDecl to what they were for this funcDecl.
+ g.haveEmbed = saveHaveEmbed
+ g.curDecl = saveCurDecl
+ if fn.Type().HasTParam() {
+ g.topFuncIsGeneric = true
+ }
+ g.funcBody(fn, decl.Recv, decl.Type, decl.Body)
+ g.topFuncIsGeneric = false
+ if fn.Type().HasTParam() && fn.Body != nil {
+ // Set pointers to the dcls/body of a generic function/method in
+ // the Inl struct, so it is marked for export, is available for
+ // stenciling, and works with Inline_Flood().
+ fn.Inl = &ir.Inline{
+ Cost: 1,
+ Dcl: fn.Dcl,
+ Body: fn.Body,
+ }
+ }
+
+ out.Append(fn)
+ })
+}
+
+func (g *irgen) typeDecl(out *ir.Nodes, decl *syntax.TypeDecl) {
+ // Set the position for any error messages we might print (e.g. too large types).
+ base.Pos = g.pos(decl)
+ assert(ir.CurFunc != nil || g.curDecl == "")
+ // Set g.curDecl to the type name, as context for the type params declared
+ // during types2-to-types1 translation if this is a generic type.
+ saveCurDecl := g.curDecl
+ g.curDecl = decl.Name.Value
+ if decl.Alias {
+ name, _ := g.def(decl.Name)
+ g.pragmaFlags(decl.Pragma, 0)
+ assert(name.Alias()) // should be set by irgen.obj
+
+ out.Append(ir.NewDecl(g.pos(decl), ir.ODCLTYPE, name))
+ g.curDecl = ""
+ return
+ }
+
+ // Prevent size calculations until we set the underlying type.
+ types.DeferCheckSize()
+
+ name, obj := g.def(decl.Name)
+ ntyp, otyp := name.Type(), obj.Type()
+ if ir.CurFunc != nil {
+ ntyp.SetVargen()
+ }
+
+ pragmas := g.pragmaFlags(decl.Pragma, typePragmas)
+ name.SetPragma(pragmas) // TODO(mdempsky): Is this still needed?
+
+ if pragmas&ir.NotInHeap != 0 {
+ ntyp.SetNotInHeap(true)
+ }
+
+ // We need to use g.typeExpr(decl.Type) here to ensure that for
+ // chained, defined-type declarations like:
+ //
+ // type T U
+ //
+ // //go:notinheap
+ // type U struct { … }
+ //
+ // we mark both T and U as NotInHeap. If we instead used just
+ // g.typ(otyp.Underlying()), then we'd instead set T's underlying
+ // type directly to the struct type (which is not marked NotInHeap)
+ // and fail to mark T as NotInHeap.
+ //
+ // Also, we rely here on Type.SetUnderlying allowing passing a
+ // defined type and handling forward references like from T to U
+ // above. Contrast with go/types's Named.SetUnderlying, which
+ // disallows this.
+ //
+ // [mdempsky: Subtleties like these are why I always vehemently
+ // object to new type pragmas.]
+ ntyp.SetUnderlying(g.typeExpr(decl.Type))
+
+ tparams := otyp.(*types2.Named).TypeParams()
+ if n := tparams.Len(); n > 0 {
+ rparams := make([]*types.Type, n)
+ for i := range rparams {
+ rparams[i] = g.typ(tparams.At(i))
+ }
+ // This will set hasTParam flag if any rparams are not concrete types.
+ ntyp.SetRParams(rparams)
+ }
+ types.ResumeCheckSize()
+
+ g.curDecl = saveCurDecl
+ if otyp, ok := otyp.(*types2.Named); ok && otyp.NumMethods() != 0 {
+ methods := make([]*types.Field, otyp.NumMethods())
+ for i := range methods {
+ m := otyp.Method(i)
+ // Set g.curDecl to recvTypeName.methName, as context for the
+ // method-specific type params in the receiver.
+ g.curDecl = decl.Name.Value + "." + m.Name()
+ meth := g.obj(m)
+ methods[i] = types.NewField(meth.Pos(), g.selector(m), meth.Type())
+ methods[i].Nname = meth
+ g.curDecl = ""
+ }
+ ntyp.Methods().Set(methods)
+ }
+
+ out.Append(ir.NewDecl(g.pos(decl), ir.ODCLTYPE, name))
+}
+
+func (g *irgen) varDecl(out *ir.Nodes, decl *syntax.VarDecl) {
+ pos := g.pos(decl)
+ // Set the position for any error messages we might print (e.g. too large types).
+ base.Pos = pos
+ names := make([]*ir.Name, len(decl.NameList))
+ for i, name := range decl.NameList {
+ names[i], _ = g.def(name)
+ }
+
+ if decl.Pragma != nil {
+ pragma := decl.Pragma.(*pragmas)
+ varEmbed(g.makeXPos, names[0], decl, pragma, g.haveEmbed)
+ g.reportUnused(pragma)
+ }
+
+ haveEmbed := g.haveEmbed
+ do := func() {
+ defer func(b bool) { g.haveEmbed = b }(g.haveEmbed)
+
+ g.haveEmbed = haveEmbed
+ values := g.exprList(decl.Values)
+
+ var as2 *ir.AssignListStmt
+ if len(values) != 0 && len(names) != len(values) {
+ as2 = ir.NewAssignListStmt(pos, ir.OAS2, make([]ir.Node, len(names)), values)
+ }
+
+ for i, name := range names {
+ if ir.CurFunc != nil {
+ out.Append(ir.NewDecl(pos, ir.ODCL, name))
+ }
+ if as2 != nil {
+ as2.Lhs[i] = name
+ name.Defn = as2
+ } else {
+ as := ir.NewAssignStmt(pos, name, nil)
+ if len(values) != 0 {
+ as.Y = values[i]
+ name.Defn = as
+ } else if ir.CurFunc == nil {
+ name.Defn = as
+ }
+ if !g.delayTransform() {
+ lhs := []ir.Node{as.X}
+ rhs := []ir.Node{}
+ if as.Y != nil {
+ rhs = []ir.Node{as.Y}
+ }
+ transformAssign(as, lhs, rhs)
+ as.X = lhs[0]
+ if as.Y != nil {
+ as.Y = rhs[0]
+ }
+ }
+ as.SetTypecheck(1)
+ out.Append(as)
+ }
+ }
+ if as2 != nil {
+ if !g.delayTransform() {
+ transformAssign(as2, as2.Lhs, as2.Rhs)
+ }
+ as2.SetTypecheck(1)
+ out.Append(as2)
+ }
+ }
+
+ // If we're within a function, we need to process the assignment
+ // part of the variable declaration right away. Otherwise, we leave
+ // it to be handled after all top-level declarations are processed.
+ if ir.CurFunc != nil {
+ do()
+ } else {
+ g.later(do)
+ }
+}
+
+// pragmaFlags returns any specified pragma flags included in allowed,
+// and reports errors about any other, unexpected pragmas.
+func (g *irgen) pragmaFlags(pragma syntax.Pragma, allowed ir.PragmaFlag) ir.PragmaFlag {
+ if pragma == nil {
+ return 0
+ }
+ p := pragma.(*pragmas)
+ present := p.Flag & allowed
+ p.Flag &^= allowed
+ g.reportUnused(p)
+ return present
+}
+
+// reportUnused reports errors about any unused pragmas.
+func (g *irgen) reportUnused(pragma *pragmas) {
+ for _, pos := range pragma.Pos {
+ if pos.Flag&pragma.Flag != 0 {
+ base.ErrorfAt(g.makeXPos(pos.Pos), "misplaced compiler directive")
+ }
+ }
+ if len(pragma.Embeds) > 0 {
+ for _, e := range pragma.Embeds {
+ base.ErrorfAt(g.makeXPos(e.Pos), "misplaced go:embed directive")
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/noder/decoder.go b/src/cmd/compile/internal/noder/decoder.go
new file mode 100644
index 0000000..2c18727
--- /dev/null
+++ b/src/cmd/compile/internal/noder/decoder.go
@@ -0,0 +1,302 @@
+// UNREVIEWED
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package noder
+
+import (
+ "encoding/binary"
+ "fmt"
+ "go/constant"
+ "go/token"
+ "math/big"
+ "os"
+ "runtime"
+ "strings"
+
+ "cmd/compile/internal/base"
+)
+
+type pkgDecoder struct {
+ pkgPath string
+
+ elemEndsEnds [numRelocs]uint32
+ elemEnds []uint32
+ elemData string
+}
+
+func newPkgDecoder(pkgPath, input string) pkgDecoder {
+ pr := pkgDecoder{
+ pkgPath: pkgPath,
+ }
+
+ // TODO(mdempsky): Implement direct indexing of input string to
+ // avoid copying the position information.
+
+ r := strings.NewReader(input)
+
+ assert(binary.Read(r, binary.LittleEndian, pr.elemEndsEnds[:]) == nil)
+
+ pr.elemEnds = make([]uint32, pr.elemEndsEnds[len(pr.elemEndsEnds)-1])
+ assert(binary.Read(r, binary.LittleEndian, pr.elemEnds[:]) == nil)
+
+ pos, err := r.Seek(0, os.SEEK_CUR)
+ assert(err == nil)
+
+ pr.elemData = input[pos:]
+ assert(len(pr.elemData) == int(pr.elemEnds[len(pr.elemEnds)-1]))
+
+ return pr
+}
+
+func (pr *pkgDecoder) numElems(k reloc) int {
+ count := int(pr.elemEndsEnds[k])
+ if k > 0 {
+ count -= int(pr.elemEndsEnds[k-1])
+ }
+ return count
+}
+
+func (pr *pkgDecoder) totalElems() int {
+ return len(pr.elemEnds)
+}
+
+func (pr *pkgDecoder) absIdx(k reloc, idx int) int {
+ absIdx := idx
+ if k > 0 {
+ absIdx += int(pr.elemEndsEnds[k-1])
+ }
+ if absIdx >= int(pr.elemEndsEnds[k]) {
+ base.Fatalf("%v:%v is out of bounds; %v", k, idx, pr.elemEndsEnds)
+ }
+ return absIdx
+}
+
+func (pr *pkgDecoder) dataIdx(k reloc, idx int) string {
+ absIdx := pr.absIdx(k, idx)
+
+ var start uint32
+ if absIdx > 0 {
+ start = pr.elemEnds[absIdx-1]
+ }
+ end := pr.elemEnds[absIdx]
+
+ return pr.elemData[start:end]
+}
+
+func (pr *pkgDecoder) stringIdx(idx int) string {
+ return pr.dataIdx(relocString, idx)
+}
+
+func (pr *pkgDecoder) newDecoder(k reloc, idx int, marker syncMarker) decoder {
+ r := pr.newDecoderRaw(k, idx)
+ r.sync(marker)
+ return r
+}
+
+func (pr *pkgDecoder) newDecoderRaw(k reloc, idx int) decoder {
+ r := decoder{
+ common: pr,
+ k: k,
+ idx: idx,
+ }
+
+ // TODO(mdempsky) r.data.Reset(...) after #44505 is resolved.
+ r.data = *strings.NewReader(pr.dataIdx(k, idx))
+
+ r.sync(syncRelocs)
+ r.relocs = make([]relocEnt, r.len())
+ for i := range r.relocs {
+ r.sync(syncReloc)
+ r.relocs[i] = relocEnt{reloc(r.len()), r.len()}
+ }
+
+ return r
+}
+
+type decoder struct {
+ common *pkgDecoder
+
+ relocs []relocEnt
+ data strings.Reader
+
+ k reloc
+ idx int
+}
+
+func (r *decoder) checkErr(err error) {
+ if err != nil {
+ base.Fatalf("unexpected error: %v", err)
+ }
+}
+
+func (r *decoder) rawUvarint() uint64 {
+ x, err := binary.ReadUvarint(&r.data)
+ r.checkErr(err)
+ return x
+}
+
+func (r *decoder) rawVarint() int64 {
+ ux := r.rawUvarint()
+
+ // Zig-zag decode.
+ x := int64(ux >> 1)
+ if ux&1 != 0 {
+ x = ^x
+ }
+ return x
+}
+
+func (r *decoder) rawReloc(k reloc, idx int) int {
+ e := r.relocs[idx]
+ assert(e.kind == k)
+ return e.idx
+}
+
+func (r *decoder) sync(mWant syncMarker) {
+ if !enableSync {
+ return
+ }
+
+ pos, _ := r.data.Seek(0, os.SEEK_CUR) // TODO(mdempsky): io.SeekCurrent after #44505 is resolved
+ mHave := syncMarker(r.rawUvarint())
+ writerPCs := make([]int, r.rawUvarint())
+ for i := range writerPCs {
+ writerPCs[i] = int(r.rawUvarint())
+ }
+
+ if mHave == mWant {
+ return
+ }
+
+ // There's some tension here between printing:
+ //
+ // (1) full file paths that tools can recognize (e.g., so emacs
+ // hyperlinks the "file:line" text for easy navigation), or
+ //
+ // (2) short file paths that are easier for humans to read (e.g., by
+ // omitting redundant or irrelevant details, so it's easier to
+ // focus on the useful bits that remain).
+ //
+ // The current formatting favors the former, as it seems more
+ // helpful in practice. But perhaps the formatting could be improved
+ // to better address both concerns. For example, use relative file
+ // paths if they would be shorter, or rewrite file paths to contain
+ // "$GOROOT" (like objabi.AbsFile does) if tools can be taught how
+ // to reliably expand that again.
+
+ fmt.Printf("export data desync: package %q, section %v, index %v, offset %v\n", r.common.pkgPath, r.k, r.idx, pos)
+
+ fmt.Printf("\nfound %v, written at:\n", mHave)
+ if len(writerPCs) == 0 {
+ fmt.Printf("\t[stack trace unavailable; recompile package %q with -d=syncframes]\n", r.common.pkgPath)
+ }
+ for _, pc := range writerPCs {
+ fmt.Printf("\t%s\n", r.common.stringIdx(r.rawReloc(relocString, pc)))
+ }
+
+ fmt.Printf("\nexpected %v, reading at:\n", mWant)
+ var readerPCs [32]uintptr // TODO(mdempsky): Dynamically size?
+ n := runtime.Callers(2, readerPCs[:])
+ for _, pc := range fmtFrames(readerPCs[:n]...) {
+ fmt.Printf("\t%s\n", pc)
+ }
+
+ // We already printed a stack trace for the reader, so now we can
+ // simply exit. Printing a second one with panic or base.Fatalf
+ // would just be noise.
+ os.Exit(1)
+}
+
+func (r *decoder) bool() bool {
+ r.sync(syncBool)
+ x, err := r.data.ReadByte()
+ r.checkErr(err)
+ assert(x < 2)
+ return x != 0
+}
+
+func (r *decoder) int64() int64 {
+ r.sync(syncInt64)
+ return r.rawVarint()
+}
+
+func (r *decoder) uint64() uint64 {
+ r.sync(syncUint64)
+ return r.rawUvarint()
+}
+
+func (r *decoder) len() int { x := r.uint64(); v := int(x); assert(uint64(v) == x); return v }
+func (r *decoder) int() int { x := r.int64(); v := int(x); assert(int64(v) == x); return v }
+func (r *decoder) uint() uint { x := r.uint64(); v := uint(x); assert(uint64(v) == x); return v }
+
+func (r *decoder) code(mark syncMarker) int {
+ r.sync(mark)
+ return r.len()
+}
+
+func (r *decoder) reloc(k reloc) int {
+ r.sync(syncUseReloc)
+ return r.rawReloc(k, r.len())
+}
+
+func (r *decoder) string() string {
+ r.sync(syncString)
+ return r.common.stringIdx(r.reloc(relocString))
+}
+
+func (r *decoder) strings() []string {
+ res := make([]string, r.len())
+ for i := range res {
+ res[i] = r.string()
+ }
+ return res
+}
+
+func (r *decoder) value() constant.Value {
+ r.sync(syncValue)
+ isComplex := r.bool()
+ val := r.scalar()
+ if isComplex {
+ val = constant.BinaryOp(val, token.ADD, constant.MakeImag(r.scalar()))
+ }
+ return val
+}
+
+func (r *decoder) scalar() constant.Value {
+ switch tag := codeVal(r.code(syncVal)); tag {
+ default:
+ panic(fmt.Sprintf("unexpected scalar tag: %v", tag))
+
+ case valBool:
+ return constant.MakeBool(r.bool())
+ case valString:
+ return constant.MakeString(r.string())
+ case valInt64:
+ return constant.MakeInt64(r.int64())
+ case valBigInt:
+ return constant.Make(r.bigInt())
+ case valBigRat:
+ num := r.bigInt()
+ denom := r.bigInt()
+ return constant.Make(new(big.Rat).SetFrac(num, denom))
+ case valBigFloat:
+ return constant.Make(r.bigFloat())
+ }
+}
+
+func (r *decoder) bigInt() *big.Int {
+ v := new(big.Int).SetBytes([]byte(r.string()))
+ if r.bool() {
+ v.Neg(v)
+ }
+ return v
+}
+
+func (r *decoder) bigFloat() *big.Float {
+ v := new(big.Float).SetPrec(512)
+ assert(v.UnmarshalText([]byte(r.string())) == nil)
+ return v
+}
diff --git a/src/cmd/compile/internal/noder/encoder.go b/src/cmd/compile/internal/noder/encoder.go
new file mode 100644
index 0000000..b07b3a4
--- /dev/null
+++ b/src/cmd/compile/internal/noder/encoder.go
@@ -0,0 +1,285 @@
+// UNREVIEWED
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package noder
+
+import (
+ "bytes"
+ "encoding/binary"
+ "fmt"
+ "go/constant"
+ "io"
+ "math/big"
+ "runtime"
+
+ "cmd/compile/internal/base"
+)
+
+type pkgEncoder struct {
+ elems [numRelocs][]string
+
+ stringsIdx map[string]int
+}
+
+func newPkgEncoder() pkgEncoder {
+ return pkgEncoder{
+ stringsIdx: make(map[string]int),
+ }
+}
+
+func (pw *pkgEncoder) dump(out io.Writer) {
+ writeUint32 := func(x uint32) {
+ assert(binary.Write(out, binary.LittleEndian, x) == nil)
+ }
+
+ var sum uint32
+ for _, elems := range &pw.elems {
+ sum += uint32(len(elems))
+ writeUint32(sum)
+ }
+
+ sum = 0
+ for _, elems := range &pw.elems {
+ for _, elem := range elems {
+ sum += uint32(len(elem))
+ writeUint32(sum)
+ }
+ }
+
+ for _, elems := range &pw.elems {
+ for _, elem := range elems {
+ _, err := io.WriteString(out, elem)
+ assert(err == nil)
+ }
+ }
+}
+
+func (pw *pkgEncoder) stringIdx(s string) int {
+ if idx, ok := pw.stringsIdx[s]; ok {
+ assert(pw.elems[relocString][idx] == s)
+ return idx
+ }
+
+ idx := len(pw.elems[relocString])
+ pw.elems[relocString] = append(pw.elems[relocString], s)
+ pw.stringsIdx[s] = idx
+ return idx
+}
+
+func (pw *pkgEncoder) newEncoder(k reloc, marker syncMarker) encoder {
+ e := pw.newEncoderRaw(k)
+ e.sync(marker)
+ return e
+}
+
+func (pw *pkgEncoder) newEncoderRaw(k reloc) encoder {
+ idx := len(pw.elems[k])
+ pw.elems[k] = append(pw.elems[k], "") // placeholder
+
+ return encoder{
+ p: pw,
+ k: k,
+ idx: idx,
+ }
+}
+
+// Encoders
+
+type encoder struct {
+ p *pkgEncoder
+
+ relocs []relocEnt
+ data bytes.Buffer
+
+ encodingRelocHeader bool
+
+ k reloc
+ idx int
+}
+
+func (w *encoder) flush() int {
+ var sb bytes.Buffer // TODO(mdempsky): strings.Builder after #44505 is resolved
+
+ // Backup the data so we write the relocations at the front.
+ var tmp bytes.Buffer
+ io.Copy(&tmp, &w.data)
+
+ // TODO(mdempsky): Consider writing these out separately so they're
+ // easier to strip, along with function bodies, so that we can prune
+ // down to just the data that's relevant to go/types.
+ if w.encodingRelocHeader {
+ base.Fatalf("encodingRelocHeader already true; recursive flush?")
+ }
+ w.encodingRelocHeader = true
+ w.sync(syncRelocs)
+ w.len(len(w.relocs))
+ for _, rent := range w.relocs {
+ w.sync(syncReloc)
+ w.len(int(rent.kind))
+ w.len(rent.idx)
+ }
+
+ io.Copy(&sb, &w.data)
+ io.Copy(&sb, &tmp)
+ w.p.elems[w.k][w.idx] = sb.String()
+
+ return w.idx
+}
+
+func (w *encoder) checkErr(err error) {
+ if err != nil {
+ base.Fatalf("unexpected error: %v", err)
+ }
+}
+
+func (w *encoder) rawUvarint(x uint64) {
+ var buf [binary.MaxVarintLen64]byte
+ n := binary.PutUvarint(buf[:], x)
+ _, err := w.data.Write(buf[:n])
+ w.checkErr(err)
+}
+
+func (w *encoder) rawVarint(x int64) {
+ // Zig-zag encode.
+ ux := uint64(x) << 1
+ if x < 0 {
+ ux = ^ux
+ }
+
+ w.rawUvarint(ux)
+}
+
+func (w *encoder) rawReloc(r reloc, idx int) int {
+ // TODO(mdempsky): Use map for lookup.
+ for i, rent := range w.relocs {
+ if rent.kind == r && rent.idx == idx {
+ return i
+ }
+ }
+
+ i := len(w.relocs)
+ w.relocs = append(w.relocs, relocEnt{r, idx})
+ return i
+}
+
+func (w *encoder) sync(m syncMarker) {
+ if !enableSync {
+ return
+ }
+
+ // Writing out stack frame string references requires working
+ // relocations, but writing out the relocations themselves involves
+ // sync markers. To prevent infinite recursion, we simply trim the
+ // stack frame for sync markers within the relocation header.
+ var frames []string
+ if !w.encodingRelocHeader && base.Debug.SyncFrames > 0 {
+ pcs := make([]uintptr, base.Debug.SyncFrames)
+ n := runtime.Callers(2, pcs)
+ frames = fmtFrames(pcs[:n]...)
+ }
+
+ // TODO(mdempsky): Save space by writing out stack frames as a
+ // linked list so we can share common stack frames.
+ w.rawUvarint(uint64(m))
+ w.rawUvarint(uint64(len(frames)))
+ for _, frame := range frames {
+ w.rawUvarint(uint64(w.rawReloc(relocString, w.p.stringIdx(frame))))
+ }
+}
+
+func (w *encoder) bool(b bool) bool {
+ w.sync(syncBool)
+ var x byte
+ if b {
+ x = 1
+ }
+ err := w.data.WriteByte(x)
+ w.checkErr(err)
+ return b
+}
+
+func (w *encoder) int64(x int64) {
+ w.sync(syncInt64)
+ w.rawVarint(x)
+}
+
+func (w *encoder) uint64(x uint64) {
+ w.sync(syncUint64)
+ w.rawUvarint(x)
+}
+
+func (w *encoder) len(x int) { assert(x >= 0); w.uint64(uint64(x)) }
+func (w *encoder) int(x int) { w.int64(int64(x)) }
+func (w *encoder) uint(x uint) { w.uint64(uint64(x)) }
+
+func (w *encoder) reloc(r reloc, idx int) {
+ w.sync(syncUseReloc)
+ w.len(w.rawReloc(r, idx))
+}
+
+func (w *encoder) code(c code) {
+ w.sync(c.marker())
+ w.len(c.value())
+}
+
+func (w *encoder) string(s string) {
+ w.sync(syncString)
+ w.reloc(relocString, w.p.stringIdx(s))
+}
+
+func (w *encoder) strings(ss []string) {
+ w.len(len(ss))
+ for _, s := range ss {
+ w.string(s)
+ }
+}
+
+func (w *encoder) value(val constant.Value) {
+ w.sync(syncValue)
+ if w.bool(val.Kind() == constant.Complex) {
+ w.scalar(constant.Real(val))
+ w.scalar(constant.Imag(val))
+ } else {
+ w.scalar(val)
+ }
+}
+
+func (w *encoder) scalar(val constant.Value) {
+ switch v := constant.Val(val).(type) {
+ default:
+ panic(fmt.Sprintf("unhandled %v (%v)", val, val.Kind()))
+ case bool:
+ w.code(valBool)
+ w.bool(v)
+ case string:
+ w.code(valString)
+ w.string(v)
+ case int64:
+ w.code(valInt64)
+ w.int64(v)
+ case *big.Int:
+ w.code(valBigInt)
+ w.bigInt(v)
+ case *big.Rat:
+ w.code(valBigRat)
+ w.bigInt(v.Num())
+ w.bigInt(v.Denom())
+ case *big.Float:
+ w.code(valBigFloat)
+ w.bigFloat(v)
+ }
+}
+
+func (w *encoder) bigInt(v *big.Int) {
+ b := v.Bytes()
+ w.string(string(b)) // TODO: More efficient encoding.
+ w.bool(v.Sign() < 0)
+}
+
+func (w *encoder) bigFloat(v *big.Float) {
+ b := v.Append(nil, 'p', -1)
+ w.string(string(b)) // TODO: More efficient encoding.
+}
diff --git a/src/cmd/compile/internal/noder/export.go b/src/cmd/compile/internal/noder/export.go
new file mode 100644
index 0000000..1a296e2
--- /dev/null
+++ b/src/cmd/compile/internal/noder/export.go
@@ -0,0 +1,65 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package noder
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/typecheck"
+ "cmd/internal/bio"
+)
+
+// writeNewExportFunc is a hook that can be added to append extra
+// export data after the normal export data section. It allows
+// experimenting with new export data format designs without requiring
+// immediate support in the go/internal or x/tools importers.
+var writeNewExportFunc func(out io.Writer)
+
+func WriteExports(out *bio.Writer) {
+ // When unified IR exports are enable, we simply append it to the
+ // end of the normal export data (with compiler extensions
+ // disabled), and write an extra header giving its size.
+ //
+ // If the compiler sees this header, it knows to read the new data
+ // instead; meanwhile the go/types importers will silently ignore it
+ // and continue processing the old export instead.
+ //
+ // This allows us to experiment with changes to the new export data
+ // format without needing to update the go/internal/gcimporter or
+ // (worse) x/tools/go/gcexportdata.
+
+ useNewExport := writeNewExportFunc != nil
+
+ var old, new bytes.Buffer
+
+ typecheck.WriteExports(&old, !useNewExport)
+
+ if useNewExport {
+ writeNewExportFunc(&new)
+ }
+
+ oldLen := old.Len()
+ newLen := new.Len()
+
+ if useNewExport {
+ fmt.Fprintf(out, "\nnewexportsize %v\n", newLen)
+ }
+
+ // The linker also looks for the $$ marker - use char after $$ to distinguish format.
+ out.WriteString("\n$$B\n") // indicate binary export format
+ io.Copy(out, &old)
+ out.WriteString("\n$$\n")
+ io.Copy(out, &new)
+
+ if base.Debug.Export != 0 {
+ fmt.Printf("BenchmarkExportSize:%s 1 %d bytes\n", base.Ctxt.Pkgpath, oldLen)
+ if useNewExport {
+ fmt.Printf("BenchmarkNewExportSize:%s 1 %d bytes\n", base.Ctxt.Pkgpath, newLen)
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/noder/expr.go b/src/cmd/compile/internal/noder/expr.go
new file mode 100644
index 0000000..4b5ae70
--- /dev/null
+++ b/src/cmd/compile/internal/noder/expr.go
@@ -0,0 +1,493 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package noder
+
+import (
+ "fmt"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/syntax"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "cmd/compile/internal/types2"
+ "cmd/internal/src"
+)
+
+func (g *irgen) expr(expr syntax.Expr) ir.Node {
+ expr = unparen(expr) // skip parens; unneeded after parse+typecheck
+
+ if expr == nil {
+ return nil
+ }
+
+ if expr, ok := expr.(*syntax.Name); ok && expr.Value == "_" {
+ return ir.BlankNode
+ }
+
+ tv, ok := g.info.Types[expr]
+ if !ok {
+ base.FatalfAt(g.pos(expr), "missing type for %v (%T)", expr, expr)
+ }
+ switch {
+ case tv.IsBuiltin():
+ // Qualified builtins, such as unsafe.Add and unsafe.Slice.
+ if expr, ok := expr.(*syntax.SelectorExpr); ok {
+ if name, ok := expr.X.(*syntax.Name); ok {
+ if _, ok := g.info.Uses[name].(*types2.PkgName); ok {
+ return g.use(expr.Sel)
+ }
+ }
+ }
+ return g.use(expr.(*syntax.Name))
+ case tv.IsType():
+ return ir.TypeNode(g.typ(tv.Type))
+ case tv.IsValue(), tv.IsVoid():
+ // ok
+ default:
+ base.FatalfAt(g.pos(expr), "unrecognized type-checker result")
+ }
+
+ base.Assert(g.exprStmtOK)
+
+ // The gc backend expects all expressions to have a concrete type, and
+ // types2 mostly satisfies this expectation already. But there are a few
+ // cases where the Go spec doesn't require converting to concrete type,
+ // and so types2 leaves them untyped. So we need to fix those up here.
+ typ := tv.Type
+ if basic, ok := typ.(*types2.Basic); ok && basic.Info()&types2.IsUntyped != 0 {
+ switch basic.Kind() {
+ case types2.UntypedNil:
+ // ok; can appear in type switch case clauses
+ // TODO(mdempsky): Handle as part of type switches instead?
+ case types2.UntypedBool:
+ typ = types2.Typ[types2.Bool] // expression in "if" or "for" condition
+ case types2.UntypedString:
+ typ = types2.Typ[types2.String] // argument to "append" or "copy" calls
+ default:
+ base.FatalfAt(g.pos(expr), "unexpected untyped type: %v", basic)
+ }
+ }
+
+ // Constant expression.
+ if tv.Value != nil {
+ typ := g.typ(typ)
+ value := FixValue(typ, tv.Value)
+ return OrigConst(g.pos(expr), typ, value, constExprOp(expr), syntax.String(expr))
+ }
+
+ n := g.expr0(typ, expr)
+ if n.Typecheck() != 1 && n.Typecheck() != 3 {
+ base.FatalfAt(g.pos(expr), "missed typecheck: %+v", n)
+ }
+ if n.Op() != ir.OFUNCINST && !g.match(n.Type(), typ, tv.HasOk()) {
+ base.FatalfAt(g.pos(expr), "expected %L to have type %v", n, typ)
+ }
+ return n
+}
+
+func (g *irgen) expr0(typ types2.Type, expr syntax.Expr) ir.Node {
+ pos := g.pos(expr)
+ assert(pos.IsKnown())
+
+ // Set base.Pos for transformation code that still uses base.Pos, rather than
+ // the pos of the node being converted.
+ base.Pos = pos
+
+ switch expr := expr.(type) {
+ case *syntax.Name:
+ if _, isNil := g.info.Uses[expr].(*types2.Nil); isNil {
+ return Nil(pos, g.typ(typ))
+ }
+ return g.use(expr)
+
+ case *syntax.CompositeLit:
+ return g.compLit(typ, expr)
+
+ case *syntax.FuncLit:
+ return g.funcLit(typ, expr)
+
+ case *syntax.AssertExpr:
+ return Assert(pos, g.expr(expr.X), g.typeExpr(expr.Type))
+
+ case *syntax.CallExpr:
+ fun := g.expr(expr.Fun)
+ return g.callExpr(pos, g.typ(typ), fun, g.exprs(expr.ArgList), expr.HasDots)
+
+ case *syntax.IndexExpr:
+ args := unpackListExpr(expr.Index)
+ if len(args) == 1 {
+ tv, ok := g.info.Types[args[0]]
+ assert(ok)
+ if tv.IsValue() {
+ // This is just a normal index expression
+ n := Index(pos, g.typ(typ), g.expr(expr.X), g.expr(args[0]))
+ if !g.delayTransform() {
+ // transformIndex will modify n.Type() for OINDEXMAP.
+ transformIndex(n)
+ }
+ return n
+ }
+ }
+
+ // expr.Index is a list of type args, so we ignore it, since types2 has
+ // already provided this info with the Info.Instances map.
+ return g.expr(expr.X)
+
+ case *syntax.SelectorExpr:
+ // Qualified identifier.
+ if name, ok := expr.X.(*syntax.Name); ok {
+ if _, ok := g.info.Uses[name].(*types2.PkgName); ok {
+ return g.use(expr.Sel)
+ }
+ }
+ return g.selectorExpr(pos, typ, expr)
+
+ case *syntax.SliceExpr:
+ n := Slice(pos, g.typ(typ), g.expr(expr.X), g.expr(expr.Index[0]), g.expr(expr.Index[1]), g.expr(expr.Index[2]))
+ if !g.delayTransform() {
+ transformSlice(n)
+ }
+ return n
+
+ case *syntax.Operation:
+ if expr.Y == nil {
+ n := Unary(pos, g.typ(typ), g.op(expr.Op, unOps[:]), g.expr(expr.X))
+ if n.Op() == ir.OADDR && !g.delayTransform() {
+ transformAddr(n.(*ir.AddrExpr))
+ }
+ return n
+ }
+ switch op := g.op(expr.Op, binOps[:]); op {
+ case ir.OEQ, ir.ONE, ir.OLT, ir.OLE, ir.OGT, ir.OGE:
+ n := Compare(pos, g.typ(typ), op, g.expr(expr.X), g.expr(expr.Y))
+ if !g.delayTransform() {
+ transformCompare(n)
+ }
+ return n
+ case ir.OANDAND, ir.OOROR:
+ x := g.expr(expr.X)
+ y := g.expr(expr.Y)
+ return typed(x.Type(), ir.NewLogicalExpr(pos, op, x, y))
+ default:
+ n := Binary(pos, op, g.typ(typ), g.expr(expr.X), g.expr(expr.Y))
+ if op == ir.OADD && !g.delayTransform() {
+ return transformAdd(n)
+ }
+ return n
+ }
+
+ default:
+ g.unhandled("expression", expr)
+ panic("unreachable")
+ }
+}
+
+// substType does a normal type substition, but tparams is in the form of a field
+// list, and targs is in terms of a slice of type nodes. substType records any newly
+// instantiated types into g.instTypeList.
+func (g *irgen) substType(typ *types.Type, tparams *types.Type, targs []ir.Node) *types.Type {
+ fields := tparams.FieldSlice()
+ tparams1 := make([]*types.Type, len(fields))
+ for i, f := range fields {
+ tparams1[i] = f.Type
+ }
+ targs1 := make([]*types.Type, len(targs))
+ for i, n := range targs {
+ targs1[i] = n.Type()
+ }
+ ts := typecheck.Tsubster{
+ Tparams: tparams1,
+ Targs: targs1,
+ }
+ newt := ts.Typ(typ)
+ return newt
+}
+
+// callExpr creates a call expression (which might be a type conversion, built-in
+// call, or a regular call) and does standard transforms, unless we are in a generic
+// function.
+func (g *irgen) callExpr(pos src.XPos, typ *types.Type, fun ir.Node, args []ir.Node, dots bool) ir.Node {
+ n := ir.NewCallExpr(pos, ir.OCALL, fun, args)
+ n.IsDDD = dots
+ typed(typ, n)
+
+ if fun.Op() == ir.OTYPE {
+ // Actually a type conversion, not a function call.
+ if !g.delayTransform() {
+ return transformConvCall(n)
+ }
+ return n
+ }
+
+ if fun, ok := fun.(*ir.Name); ok && fun.BuiltinOp != 0 {
+ if !g.delayTransform() {
+ return transformBuiltin(n)
+ }
+ return n
+ }
+
+ // Add information, now that we know that fun is actually being called.
+ switch fun := fun.(type) {
+ case *ir.SelectorExpr:
+ if fun.Op() == ir.OMETHVALUE {
+ op := ir.ODOTMETH
+ if fun.X.Type().IsInterface() {
+ op = ir.ODOTINTER
+ }
+ fun.SetOp(op)
+ // Set the type to include the receiver, since that's what
+ // later parts of the compiler expect
+ fun.SetType(fun.Selection.Type)
+ }
+ }
+
+ // A function instantiation (even if fully concrete) shouldn't be
+ // transformed yet, because we need to add the dictionary during the
+ // transformation.
+ if fun.Op() != ir.OFUNCINST && !g.delayTransform() {
+ transformCall(n)
+ }
+ return n
+}
+
+// selectorExpr resolves the choice of ODOT, ODOTPTR, OMETHVALUE (eventually
+// ODOTMETH & ODOTINTER), and OMETHEXPR and deals with embedded fields here rather
+// than in typecheck.go.
+func (g *irgen) selectorExpr(pos src.XPos, typ types2.Type, expr *syntax.SelectorExpr) ir.Node {
+ x := g.expr(expr.X)
+ if x.Type().HasTParam() {
+ // Leave a method call on a type param as an OXDOT, since it can
+ // only be fully transformed once it has an instantiated type.
+ n := ir.NewSelectorExpr(pos, ir.OXDOT, x, typecheck.Lookup(expr.Sel.Value))
+ typed(g.typ(typ), n)
+ return n
+ }
+
+ selinfo := g.info.Selections[expr]
+ // Everything up to the last selection is an implicit embedded field access,
+ // and the last selection is determined by selinfo.Kind().
+ index := selinfo.Index()
+ embeds, last := index[:len(index)-1], index[len(index)-1]
+
+ origx := x
+ for _, ix := range embeds {
+ x = Implicit(DotField(pos, x, ix))
+ }
+
+ kind := selinfo.Kind()
+ if kind == types2.FieldVal {
+ return DotField(pos, x, last)
+ }
+
+ var n ir.Node
+ method2 := selinfo.Obj().(*types2.Func)
+
+ if kind == types2.MethodExpr {
+ // OMETHEXPR is unusual in using directly the node and type of the
+ // original OTYPE node (origx) before passing through embedded
+ // fields, even though the method is selected from the type
+ // (x.Type()) reached after following the embedded fields. We will
+ // actually drop any ODOT nodes we created due to the embedded
+ // fields.
+ n = MethodExpr(pos, origx, x.Type(), last)
+ } else {
+ // Add implicit addr/deref for method values, if needed.
+ if x.Type().IsInterface() {
+ n = DotMethod(pos, x, last)
+ } else {
+ recvType2 := method2.Type().(*types2.Signature).Recv().Type()
+ _, wantPtr := recvType2.(*types2.Pointer)
+ havePtr := x.Type().IsPtr()
+
+ if havePtr != wantPtr {
+ if havePtr {
+ x = Implicit(Deref(pos, x.Type().Elem(), x))
+ } else {
+ x = Implicit(Addr(pos, x))
+ }
+ }
+ recvType2Base := recvType2
+ if wantPtr {
+ recvType2Base = types2.AsPointer(recvType2).Elem()
+ }
+ if recvType2Base.(*types2.Named).TypeParams().Len() > 0 {
+ // recvType2 is the original generic type that is
+ // instantiated for this method call.
+ // selinfo.Recv() is the instantiated type
+ recvType2 = recvType2Base
+ recvTypeSym := g.pkg(method2.Pkg()).Lookup(recvType2.(*types2.Named).Obj().Name())
+ recvType := recvTypeSym.Def.(*ir.Name).Type()
+ // method is the generic method associated with
+ // the base generic type. The instantiated type may not
+ // have method bodies filled in, if it was imported.
+ method := recvType.Methods().Index(last).Nname.(*ir.Name)
+ n = ir.NewSelectorExpr(pos, ir.OMETHVALUE, x, typecheck.Lookup(expr.Sel.Value))
+ n.(*ir.SelectorExpr).Selection = types.NewField(pos, method.Sym(), method.Type())
+ n.(*ir.SelectorExpr).Selection.Nname = method
+ typed(method.Type(), n)
+
+ xt := deref(x.Type())
+ targs := make([]ir.Node, len(xt.RParams()))
+ for i := range targs {
+ targs[i] = ir.TypeNode(xt.RParams()[i])
+ }
+
+ // Create function instantiation with the type
+ // args for the receiver type for the method call.
+ n = ir.NewInstExpr(pos, ir.OFUNCINST, n, targs)
+ typed(g.typ(typ), n)
+ return n
+ }
+
+ if !g.match(x.Type(), recvType2, false) {
+ base.FatalfAt(pos, "expected %L to have type %v", x, recvType2)
+ } else {
+ n = DotMethod(pos, x, last)
+ }
+ }
+ }
+ if have, want := n.Sym(), g.selector(method2); have != want {
+ base.FatalfAt(pos, "bad Sym: have %v, want %v", have, want)
+ }
+ return n
+}
+
+func (g *irgen) exprList(expr syntax.Expr) []ir.Node {
+ return g.exprs(unpackListExpr(expr))
+}
+
+func unpackListExpr(expr syntax.Expr) []syntax.Expr {
+ switch expr := expr.(type) {
+ case nil:
+ return nil
+ case *syntax.ListExpr:
+ return expr.ElemList
+ default:
+ return []syntax.Expr{expr}
+ }
+}
+
+func (g *irgen) exprs(exprs []syntax.Expr) []ir.Node {
+ nodes := make([]ir.Node, len(exprs))
+ for i, expr := range exprs {
+ nodes[i] = g.expr(expr)
+ }
+ return nodes
+}
+
+func (g *irgen) compLit(typ types2.Type, lit *syntax.CompositeLit) ir.Node {
+ if ptr, ok := types2.CoreType(typ).(*types2.Pointer); ok {
+ n := ir.NewAddrExpr(g.pos(lit), g.compLit(ptr.Elem(), lit))
+ n.SetOp(ir.OPTRLIT)
+ return typed(g.typ(typ), n)
+ }
+
+ _, isStruct := types2.CoreType(typ).(*types2.Struct)
+
+ exprs := make([]ir.Node, len(lit.ElemList))
+ for i, elem := range lit.ElemList {
+ switch elem := elem.(type) {
+ case *syntax.KeyValueExpr:
+ var key ir.Node
+ if isStruct {
+ key = ir.NewIdent(g.pos(elem.Key), g.name(elem.Key.(*syntax.Name)))
+ } else {
+ key = g.expr(elem.Key)
+ }
+ value := wrapname(g.pos(elem.Value), g.expr(elem.Value))
+ if value.Op() == ir.OPAREN {
+ // Make sure any PAREN node added by wrapper has a type
+ typed(value.(*ir.ParenExpr).X.Type(), value)
+ }
+ exprs[i] = ir.NewKeyExpr(g.pos(elem), key, value)
+ default:
+ exprs[i] = wrapname(g.pos(elem), g.expr(elem))
+ if exprs[i].Op() == ir.OPAREN {
+ // Make sure any PAREN node added by wrapper has a type
+ typed(exprs[i].(*ir.ParenExpr).X.Type(), exprs[i])
+ }
+ }
+ }
+
+ n := ir.NewCompLitExpr(g.pos(lit), ir.OCOMPLIT, nil, exprs)
+ typed(g.typ(typ), n)
+ var r ir.Node = n
+ if !g.delayTransform() {
+ r = transformCompLit(n)
+ }
+ return r
+}
+
+func (g *irgen) funcLit(typ2 types2.Type, expr *syntax.FuncLit) ir.Node {
+ fn := ir.NewClosureFunc(g.pos(expr), ir.CurFunc != nil)
+ ir.NameClosure(fn.OClosure, ir.CurFunc)
+
+ typ := g.typ(typ2)
+ typed(typ, fn.Nname)
+ typed(typ, fn.OClosure)
+ fn.SetTypecheck(1)
+
+ g.funcBody(fn, nil, expr.Type, expr.Body)
+
+ ir.FinishCaptureNames(fn.Pos(), ir.CurFunc, fn)
+
+ // TODO(mdempsky): ir.CaptureName should probably handle
+ // copying these fields from the canonical variable.
+ for _, cv := range fn.ClosureVars {
+ cv.SetType(cv.Canonical().Type())
+ cv.SetTypecheck(1)
+ cv.SetWalkdef(1)
+ }
+
+ if g.topFuncIsGeneric {
+ // Don't add any closure inside a generic function/method to the
+ // g.target.Decls list, even though it may not be generic itself.
+ // See issue #47514.
+ return ir.UseClosure(fn.OClosure, nil)
+ } else {
+ return ir.UseClosure(fn.OClosure, g.target)
+ }
+}
+
+func (g *irgen) typeExpr(typ syntax.Expr) *types.Type {
+ n := g.expr(typ)
+ if n.Op() != ir.OTYPE {
+ base.FatalfAt(g.pos(typ), "expected type: %L", n)
+ }
+ return n.Type()
+}
+
+// constExprOp returns an ir.Op that represents the outermost
+// operation of the given constant expression. It's intended for use
+// with ir.RawOrigExpr.
+func constExprOp(expr syntax.Expr) ir.Op {
+ switch expr := expr.(type) {
+ default:
+ panic(fmt.Sprintf("%s: unexpected expression: %T", expr.Pos(), expr))
+
+ case *syntax.BasicLit:
+ return ir.OLITERAL
+ case *syntax.Name, *syntax.SelectorExpr:
+ return ir.ONAME
+ case *syntax.CallExpr:
+ return ir.OCALL
+ case *syntax.Operation:
+ if expr.Y == nil {
+ return unOps[expr.Op]
+ }
+ return binOps[expr.Op]
+ }
+}
+
+func unparen(expr syntax.Expr) syntax.Expr {
+ for {
+ paren, ok := expr.(*syntax.ParenExpr)
+ if !ok {
+ return expr
+ }
+ expr = paren.X
+ }
+}
diff --git a/src/cmd/compile/internal/noder/frames_go1.go b/src/cmd/compile/internal/noder/frames_go1.go
new file mode 100644
index 0000000..d00e0f5
--- /dev/null
+++ b/src/cmd/compile/internal/noder/frames_go1.go
@@ -0,0 +1,21 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !go1.7
+// +build !go1.7
+
+// TODO(mdempsky): Remove after #44505 is resolved
+
+package noder
+
+import "runtime"
+
+func walkFrames(pcs []uintptr, visit frameVisitor) {
+ for _, pc := range pcs {
+ fn := runtime.FuncForPC(pc)
+ file, line := fn.FileLine(pc)
+
+ visit(file, line, fn.Name(), pc-fn.Entry())
+ }
+}
diff --git a/src/cmd/compile/internal/noder/frames_go17.go b/src/cmd/compile/internal/noder/frames_go17.go
new file mode 100644
index 0000000..48d7762
--- /dev/null
+++ b/src/cmd/compile/internal/noder/frames_go17.go
@@ -0,0 +1,25 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build go1.7
+// +build go1.7
+
+package noder
+
+import "runtime"
+
+func walkFrames(pcs []uintptr, visit frameVisitor) {
+ if len(pcs) == 0 {
+ return
+ }
+
+ frames := runtime.CallersFrames(pcs)
+ for {
+ frame, more := frames.Next()
+ visit(frame.File, frame.Line, frame.Function, frame.PC-frame.Entry)
+ if !more {
+ return
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/noder/func.go b/src/cmd/compile/internal/noder/func.go
new file mode 100644
index 0000000..6077b34
--- /dev/null
+++ b/src/cmd/compile/internal/noder/func.go
@@ -0,0 +1,73 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package noder
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/syntax"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+)
+
+func (g *irgen) funcBody(fn *ir.Func, recv *syntax.Field, sig *syntax.FuncType, block *syntax.BlockStmt) {
+ typecheck.Func(fn)
+
+ // TODO(mdempsky): Remove uses of ir.CurFunc and
+ // typecheck.DeclContext after we stop relying on typecheck
+ // for desugaring.
+ outerfn, outerctxt := ir.CurFunc, typecheck.DeclContext
+ ir.CurFunc = fn
+
+ typ := fn.Type()
+ if param := typ.Recv(); param != nil {
+ g.defParam(param, recv, ir.PPARAM)
+ }
+ for i, param := range typ.Params().FieldSlice() {
+ g.defParam(param, sig.ParamList[i], ir.PPARAM)
+ }
+ for i, result := range typ.Results().FieldSlice() {
+ g.defParam(result, sig.ResultList[i], ir.PPARAMOUT)
+ }
+
+ // We may have type-checked a call to this function already and
+ // calculated its size, including parameter offsets. Now that we've
+ // created the parameter Names, force a recalculation to ensure
+ // their offsets are correct.
+ types.RecalcSize(typ)
+
+ if block != nil {
+ typecheck.DeclContext = ir.PAUTO
+
+ fn.Body = g.stmts(block.List)
+ if fn.Body == nil {
+ fn.Body = []ir.Node{ir.NewBlockStmt(src.NoXPos, nil)}
+ }
+ fn.Endlineno = g.makeXPos(block.Rbrace)
+
+ if base.Flag.Dwarf {
+ g.recordScopes(fn, sig)
+ }
+ }
+
+ ir.CurFunc, typecheck.DeclContext = outerfn, outerctxt
+}
+
+func (g *irgen) defParam(param *types.Field, decl *syntax.Field, class ir.Class) {
+ typecheck.DeclContext = class
+
+ var name *ir.Name
+ if decl.Name != nil {
+ name, _ = g.def(decl.Name)
+ } else if class == ir.PPARAMOUT {
+ name = g.obj(g.info.Implicits[decl])
+ }
+
+ if name != nil {
+ param.Nname = name
+ param.Sym = name.Sym() // in case it was renamed
+ }
+}
diff --git a/src/cmd/compile/internal/noder/helpers.go b/src/cmd/compile/internal/noder/helpers.go
new file mode 100644
index 0000000..33acd60
--- /dev/null
+++ b/src/cmd/compile/internal/noder/helpers.go
@@ -0,0 +1,226 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package noder
+
+import (
+ "go/constant"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+)
+
+// Helpers for constructing typed IR nodes.
+//
+// TODO(mdempsky): Move into their own package so they can be easily
+// reused by iimport and frontend optimizations.
+
+type ImplicitNode interface {
+ ir.Node
+ SetImplicit(x bool)
+}
+
+// Implicit returns n after marking it as Implicit.
+func Implicit(n ImplicitNode) ImplicitNode {
+ n.SetImplicit(true)
+ return n
+}
+
+// typed returns n after setting its type to typ.
+func typed(typ *types.Type, n ir.Node) ir.Node {
+ n.SetType(typ)
+ n.SetTypecheck(1)
+ return n
+}
+
+// Values
+
+func Const(pos src.XPos, typ *types.Type, val constant.Value) ir.Node {
+ return typed(typ, ir.NewBasicLit(pos, val))
+}
+
+func OrigConst(pos src.XPos, typ *types.Type, val constant.Value, op ir.Op, raw string) ir.Node {
+ orig := ir.NewRawOrigExpr(pos, op, raw)
+ return ir.NewConstExpr(val, typed(typ, orig))
+}
+
+// FixValue returns val after converting and truncating it as
+// appropriate for typ.
+func FixValue(typ *types.Type, val constant.Value) constant.Value {
+ assert(typ.Kind() != types.TFORW)
+ switch {
+ case typ.IsInteger():
+ val = constant.ToInt(val)
+ case typ.IsFloat():
+ val = constant.ToFloat(val)
+ case typ.IsComplex():
+ val = constant.ToComplex(val)
+ }
+ if !typ.IsUntyped() {
+ val = typecheck.DefaultLit(ir.NewBasicLit(src.NoXPos, val), typ).Val()
+ }
+ if !typ.IsTypeParam() {
+ ir.AssertValidTypeForConst(typ, val)
+ }
+ return val
+}
+
+func Nil(pos src.XPos, typ *types.Type) ir.Node {
+ return typed(typ, ir.NewNilExpr(pos))
+}
+
+// Expressions
+
+func Addr(pos src.XPos, x ir.Node) *ir.AddrExpr {
+ n := typecheck.NodAddrAt(pos, x)
+ typed(types.NewPtr(x.Type()), n)
+ return n
+}
+
+func Assert(pos src.XPos, x ir.Node, typ *types.Type) ir.Node {
+ return typed(typ, ir.NewTypeAssertExpr(pos, x, nil))
+}
+
+func Binary(pos src.XPos, op ir.Op, typ *types.Type, x, y ir.Node) *ir.BinaryExpr {
+ switch op {
+ case ir.OADD:
+ n := ir.NewBinaryExpr(pos, op, x, y)
+ typed(typ, n)
+ return n
+ default:
+ n := ir.NewBinaryExpr(pos, op, x, y)
+ typed(x.Type(), n)
+ return n
+ }
+}
+
+func Compare(pos src.XPos, typ *types.Type, op ir.Op, x, y ir.Node) *ir.BinaryExpr {
+ n := ir.NewBinaryExpr(pos, op, x, y)
+ typed(typ, n)
+ return n
+}
+
+func Deref(pos src.XPos, typ *types.Type, x ir.Node) *ir.StarExpr {
+ n := ir.NewStarExpr(pos, x)
+ typed(typ, n)
+ return n
+}
+
+func DotField(pos src.XPos, x ir.Node, index int) *ir.SelectorExpr {
+ op, typ := ir.ODOT, x.Type()
+ if typ.IsPtr() {
+ op, typ = ir.ODOTPTR, typ.Elem()
+ }
+ if !typ.IsStruct() {
+ base.FatalfAt(pos, "DotField of non-struct: %L", x)
+ }
+
+ // TODO(mdempsky): This is the backend's responsibility.
+ types.CalcSize(typ)
+
+ field := typ.Field(index)
+ return dot(pos, field.Type, op, x, field)
+}
+
+func DotMethod(pos src.XPos, x ir.Node, index int) *ir.SelectorExpr {
+ method := method(x.Type(), index)
+
+ // Method value.
+ typ := typecheck.NewMethodType(method.Type, nil)
+ return dot(pos, typ, ir.OMETHVALUE, x, method)
+}
+
+// MethodExpr returns a OMETHEXPR node with the indicated index into the methods
+// of typ. The receiver type is set from recv, which is different from typ if the
+// method was accessed via embedded fields. Similarly, the X value of the
+// ir.SelectorExpr is recv, the original OTYPE node before passing through the
+// embedded fields.
+func MethodExpr(pos src.XPos, recv ir.Node, embed *types.Type, index int) *ir.SelectorExpr {
+ method := method(embed, index)
+ typ := typecheck.NewMethodType(method.Type, recv.Type())
+ // The method expression T.m requires a wrapper when T
+ // is different from m's declared receiver type. We
+ // normally generate these wrappers while writing out
+ // runtime type descriptors, which is always done for
+ // types declared at package scope. However, we need
+ // to make sure to generate wrappers for anonymous
+ // receiver types too.
+ if recv.Sym() == nil {
+ typecheck.NeedRuntimeType(recv.Type())
+ }
+ return dot(pos, typ, ir.OMETHEXPR, recv, method)
+}
+
+func dot(pos src.XPos, typ *types.Type, op ir.Op, x ir.Node, selection *types.Field) *ir.SelectorExpr {
+ n := ir.NewSelectorExpr(pos, op, x, selection.Sym)
+ n.Selection = selection
+ typed(typ, n)
+ return n
+}
+
+// TODO(mdempsky): Move to package types.
+func method(typ *types.Type, index int) *types.Field {
+ if typ.IsInterface() {
+ return typ.AllMethods().Index(index)
+ }
+ return types.ReceiverBaseType(typ).Methods().Index(index)
+}
+
+func Index(pos src.XPos, typ *types.Type, x, index ir.Node) *ir.IndexExpr {
+ n := ir.NewIndexExpr(pos, x, index)
+ typed(typ, n)
+ return n
+}
+
+func Slice(pos src.XPos, typ *types.Type, x, low, high, max ir.Node) *ir.SliceExpr {
+ op := ir.OSLICE
+ if max != nil {
+ op = ir.OSLICE3
+ }
+ n := ir.NewSliceExpr(pos, op, x, low, high, max)
+ typed(typ, n)
+ return n
+}
+
+func Unary(pos src.XPos, typ *types.Type, op ir.Op, x ir.Node) ir.Node {
+ switch op {
+ case ir.OADDR:
+ return Addr(pos, x)
+ case ir.ODEREF:
+ return Deref(pos, typ, x)
+ }
+
+ if op == ir.ORECV {
+ if typ.IsFuncArgStruct() && typ.NumFields() == 2 {
+ // Remove the second boolean type (if provided by type2),
+ // since that works better with the rest of the compiler
+ // (which will add it back in later).
+ assert(typ.Field(1).Type.Kind() == types.TBOOL)
+ typ = typ.Field(0).Type
+ }
+ }
+ return typed(typ, ir.NewUnaryExpr(pos, op, x))
+}
+
+// Statements
+
+var one = constant.MakeInt64(1)
+
+func IncDec(pos src.XPos, op ir.Op, x ir.Node) *ir.AssignOpStmt {
+ assert(x.Type() != nil)
+ bl := ir.NewBasicLit(pos, one)
+ if x.Type().HasTParam() {
+ // If the operand is generic, then types2 will have proved it must be
+ // a type that fits with increment/decrement, so just set the type of
+ // "one" to n.Type(). This works even for types that are eventually
+ // float or complex.
+ typed(x.Type(), bl)
+ } else {
+ bl = typecheck.DefaultLit(bl, x.Type())
+ }
+ return ir.NewAssignOpStmt(pos, op, x, bl)
+}
diff --git a/src/cmd/compile/internal/noder/import.go b/src/cmd/compile/internal/noder/import.go
new file mode 100644
index 0000000..58dffba
--- /dev/null
+++ b/src/cmd/compile/internal/noder/import.go
@@ -0,0 +1,601 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package noder
+
+import (
+ "errors"
+ "fmt"
+ "internal/buildcfg"
+ "os"
+ pathpkg "path"
+ "runtime"
+ "sort"
+ "strconv"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/importer"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/syntax"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "cmd/compile/internal/types2"
+ "cmd/internal/archive"
+ "cmd/internal/bio"
+ "cmd/internal/goobj"
+ "cmd/internal/objabi"
+ "cmd/internal/src"
+)
+
+// haveLegacyImports records whether we've imported any packages
+// without a new export data section. This is useful for experimenting
+// with new export data format designs, when you need to support
+// existing tests that manually compile files with inconsistent
+// compiler flags.
+var haveLegacyImports = false
+
+// newReadImportFunc is an extension hook for experimenting with new
+// export data formats. If a new export data payload was written out
+// for an imported package by overloading writeNewExportFunc, then
+// that payload will be mapped into memory and passed to
+// newReadImportFunc.
+var newReadImportFunc = func(data string, pkg1 *types.Pkg, env *types2.Context, packages map[string]*types2.Package) (pkg2 *types2.Package, err error) {
+ panic("unexpected new export data payload")
+}
+
+type gcimports struct {
+ ctxt *types2.Context
+ packages map[string]*types2.Package
+}
+
+func (m *gcimports) Import(path string) (*types2.Package, error) {
+ return m.ImportFrom(path, "" /* no vendoring */, 0)
+}
+
+func (m *gcimports) ImportFrom(path, srcDir string, mode types2.ImportMode) (*types2.Package, error) {
+ if mode != 0 {
+ panic("mode must be 0")
+ }
+
+ _, pkg, err := readImportFile(path, typecheck.Target, m.ctxt, m.packages)
+ return pkg, err
+}
+
+func isDriveLetter(b byte) bool {
+ return 'a' <= b && b <= 'z' || 'A' <= b && b <= 'Z'
+}
+
+// is this path a local name? begins with ./ or ../ or /
+func islocalname(name string) bool {
+ return strings.HasPrefix(name, "/") ||
+ runtime.GOOS == "windows" && len(name) >= 3 && isDriveLetter(name[0]) && name[1] == ':' && name[2] == '/' ||
+ strings.HasPrefix(name, "./") || name == "." ||
+ strings.HasPrefix(name, "../") || name == ".."
+}
+
+func openPackage(path string) (*os.File, error) {
+ if islocalname(path) {
+ if base.Flag.NoLocalImports {
+ return nil, errors.New("local imports disallowed")
+ }
+
+ if base.Flag.Cfg.PackageFile != nil {
+ return os.Open(base.Flag.Cfg.PackageFile[path])
+ }
+
+ // try .a before .o. important for building libraries:
+ // if there is an array.o in the array.a library,
+ // want to find all of array.a, not just array.o.
+ if file, err := os.Open(fmt.Sprintf("%s.a", path)); err == nil {
+ return file, nil
+ }
+ if file, err := os.Open(fmt.Sprintf("%s.o", path)); err == nil {
+ return file, nil
+ }
+ return nil, errors.New("file not found")
+ }
+
+ // local imports should be canonicalized already.
+ // don't want to see "encoding/../encoding/base64"
+ // as different from "encoding/base64".
+ if q := pathpkg.Clean(path); q != path {
+ return nil, fmt.Errorf("non-canonical import path %q (should be %q)", path, q)
+ }
+
+ if base.Flag.Cfg.PackageFile != nil {
+ return os.Open(base.Flag.Cfg.PackageFile[path])
+ }
+
+ for _, dir := range base.Flag.Cfg.ImportDirs {
+ if file, err := os.Open(fmt.Sprintf("%s/%s.a", dir, path)); err == nil {
+ return file, nil
+ }
+ if file, err := os.Open(fmt.Sprintf("%s/%s.o", dir, path)); err == nil {
+ return file, nil
+ }
+ }
+
+ if buildcfg.GOROOT != "" {
+ suffix := ""
+ if base.Flag.InstallSuffix != "" {
+ suffix = "_" + base.Flag.InstallSuffix
+ } else if base.Flag.Race {
+ suffix = "_race"
+ } else if base.Flag.MSan {
+ suffix = "_msan"
+ } else if base.Flag.ASan {
+ suffix = "_asan"
+ }
+
+ if file, err := os.Open(fmt.Sprintf("%s/pkg/%s_%s%s/%s.a", buildcfg.GOROOT, buildcfg.GOOS, buildcfg.GOARCH, suffix, path)); err == nil {
+ return file, nil
+ }
+ if file, err := os.Open(fmt.Sprintf("%s/pkg/%s_%s%s/%s.o", buildcfg.GOROOT, buildcfg.GOOS, buildcfg.GOARCH, suffix, path)); err == nil {
+ return file, nil
+ }
+ }
+ return nil, errors.New("file not found")
+}
+
+// myheight tracks the local package's height based on packages
+// imported so far.
+var myheight int
+
+// resolveImportPath resolves an import path as it appears in a Go
+// source file to the package's full path.
+func resolveImportPath(path string) (string, error) {
+ // The package name main is no longer reserved,
+ // but we reserve the import path "main" to identify
+ // the main package, just as we reserve the import
+ // path "math" to identify the standard math package.
+ if path == "main" {
+ return "", errors.New("cannot import \"main\"")
+ }
+
+ if base.Ctxt.Pkgpath != "" && path == base.Ctxt.Pkgpath {
+ return "", fmt.Errorf("import %q while compiling that package (import cycle)", path)
+ }
+
+ if mapped, ok := base.Flag.Cfg.ImportMap[path]; ok {
+ path = mapped
+ }
+
+ if islocalname(path) {
+ if path[0] == '/' {
+ return "", errors.New("import path cannot be absolute path")
+ }
+
+ prefix := base.Flag.D
+ if prefix == "" {
+ // Questionable, but when -D isn't specified, historically we
+ // resolve local import paths relative to the directory the
+ // compiler's current directory, not the respective source
+ // file's directory.
+ prefix = base.Ctxt.Pathname
+ }
+ path = pathpkg.Join(prefix, path)
+
+ if err := checkImportPath(path, true); err != nil {
+ return "", err
+ }
+ }
+
+ return path, nil
+}
+
+func importfile(decl *syntax.ImportDecl) *types.Pkg {
+ path, err := parseImportPath(decl.Path)
+ if err != nil {
+ base.Errorf("%s", err)
+ return nil
+ }
+
+ pkg, _, err := readImportFile(path, typecheck.Target, nil, nil)
+ if err != nil {
+ base.Errorf("%s", err)
+ return nil
+ }
+
+ if pkg != types.UnsafePkg && pkg.Height >= myheight {
+ myheight = pkg.Height + 1
+ }
+ return pkg
+}
+
+func parseImportPath(pathLit *syntax.BasicLit) (string, error) {
+ if pathLit.Kind != syntax.StringLit {
+ return "", errors.New("import path must be a string")
+ }
+
+ path, err := strconv.Unquote(pathLit.Value)
+ if err != nil {
+ return "", errors.New("import path must be a string")
+ }
+
+ if err := checkImportPath(path, false); err != nil {
+ return "", err
+ }
+
+ return path, err
+}
+
+// readImportFile reads the import file for the given package path and
+// returns its types.Pkg representation. If packages is non-nil, the
+// types2.Package representation is also returned.
+func readImportFile(path string, target *ir.Package, env *types2.Context, packages map[string]*types2.Package) (pkg1 *types.Pkg, pkg2 *types2.Package, err error) {
+ path, err = resolveImportPath(path)
+ if err != nil {
+ return
+ }
+
+ if path == "unsafe" {
+ pkg1, pkg2 = types.UnsafePkg, types2.Unsafe
+
+ // TODO(mdempsky): Investigate if this actually matters. Why would
+ // the linker or runtime care whether a package imported unsafe?
+ if !pkg1.Direct {
+ pkg1.Direct = true
+ target.Imports = append(target.Imports, pkg1)
+ }
+
+ return
+ }
+
+ pkg1 = types.NewPkg(path, "")
+ if packages != nil {
+ pkg2 = packages[path]
+ assert(pkg1.Direct == (pkg2 != nil && pkg2.Complete()))
+ }
+
+ if pkg1.Direct {
+ return
+ }
+ pkg1.Direct = true
+ target.Imports = append(target.Imports, pkg1)
+
+ f, err := openPackage(path)
+ if err != nil {
+ return
+ }
+ defer f.Close()
+
+ r, end, newsize, err := findExportData(f)
+ if err != nil {
+ return
+ }
+
+ if base.Debug.Export != 0 {
+ fmt.Printf("importing %s (%s)\n", path, f.Name())
+ }
+
+ if newsize != 0 {
+ // We have unified IR data. Map it, and feed to the importers.
+ end -= newsize
+ var data string
+ data, err = base.MapFile(r.File(), end, newsize)
+ if err != nil {
+ return
+ }
+
+ pkg2, err = newReadImportFunc(data, pkg1, env, packages)
+ } else {
+ // We only have old data. Oh well, fall back to the legacy importers.
+ haveLegacyImports = true
+
+ var c byte
+ switch c, err = r.ReadByte(); {
+ case err != nil:
+ return
+
+ case c != 'i':
+ // Indexed format is distinguished by an 'i' byte,
+ // whereas previous export formats started with 'c', 'd', or 'v'.
+ err = fmt.Errorf("unexpected package format byte: %v", c)
+ return
+ }
+
+ pos := r.Offset()
+
+ // Map string (and data) section into memory as a single large
+ // string. This reduces heap fragmentation and allows
+ // returning individual substrings very efficiently.
+ var data string
+ data, err = base.MapFile(r.File(), pos, end-pos)
+ if err != nil {
+ return
+ }
+
+ typecheck.ReadImports(pkg1, data)
+
+ if packages != nil {
+ pkg2, err = importer.ImportData(packages, data, path)
+ if err != nil {
+ return
+ }
+ }
+ }
+
+ err = addFingerprint(path, f, end)
+ return
+}
+
+// findExportData returns a *bio.Reader positioned at the start of the
+// binary export data section, and a file offset for where to stop
+// reading.
+func findExportData(f *os.File) (r *bio.Reader, end, newsize int64, err error) {
+ r = bio.NewReader(f)
+
+ // check object header
+ line, err := r.ReadString('\n')
+ if err != nil {
+ return
+ }
+
+ if line == "!<arch>\n" { // package archive
+ // package export block should be first
+ sz := int64(archive.ReadHeader(r.Reader, "__.PKGDEF"))
+ if sz <= 0 {
+ err = errors.New("not a package file")
+ return
+ }
+ end = r.Offset() + sz
+ line, err = r.ReadString('\n')
+ if err != nil {
+ return
+ }
+ } else {
+ // Not an archive; provide end of file instead.
+ // TODO(mdempsky): I don't think this happens anymore.
+ var fi os.FileInfo
+ fi, err = f.Stat()
+ if err != nil {
+ return
+ }
+ end = fi.Size()
+ }
+
+ if !strings.HasPrefix(line, "go object ") {
+ err = fmt.Errorf("not a go object file: %s", line)
+ return
+ }
+ if expect := objabi.HeaderString(); line != expect {
+ err = fmt.Errorf("object is [%s] expected [%s]", line, expect)
+ return
+ }
+
+ // process header lines
+ for !strings.HasPrefix(line, "$$") {
+ if strings.HasPrefix(line, "newexportsize ") {
+ fields := strings.Fields(line)
+ newsize, err = strconv.ParseInt(fields[1], 10, 64)
+ if err != nil {
+ return
+ }
+ }
+
+ line, err = r.ReadString('\n')
+ if err != nil {
+ return
+ }
+ }
+
+ // Expect $$B\n to signal binary import format.
+ if line != "$$B\n" {
+ err = errors.New("old export format no longer supported (recompile library)")
+ return
+ }
+
+ return
+}
+
+// addFingerprint reads the linker fingerprint included at the end of
+// the exportdata.
+func addFingerprint(path string, f *os.File, end int64) error {
+ const eom = "\n$$\n"
+ var fingerprint goobj.FingerprintType
+
+ var buf [len(fingerprint) + len(eom)]byte
+ if _, err := f.ReadAt(buf[:], end-int64(len(buf))); err != nil {
+ return err
+ }
+
+ // Caller should have given us the end position of the export data,
+ // which should end with the "\n$$\n" marker. As a consistency check
+ // to make sure we're reading at the right offset, make sure we
+ // found the marker.
+ if s := string(buf[len(fingerprint):]); s != eom {
+ return fmt.Errorf("expected $$ marker, but found %q", s)
+ }
+
+ copy(fingerprint[:], buf[:])
+
+ // assume files move (get installed) so don't record the full path
+ if base.Flag.Cfg.PackageFile != nil {
+ // If using a packageFile map, assume path_ can be recorded directly.
+ base.Ctxt.AddImport(path, fingerprint)
+ } else {
+ // For file "/Users/foo/go/pkg/darwin_amd64/math.a" record "math.a".
+ file := f.Name()
+ base.Ctxt.AddImport(file[len(file)-len(path)-len(".a"):], fingerprint)
+ }
+ return nil
+}
+
+// The linker uses the magic symbol prefixes "go." and "type."
+// Avoid potential confusion between import paths and symbols
+// by rejecting these reserved imports for now. Also, people
+// "can do weird things in GOPATH and we'd prefer they didn't
+// do _that_ weird thing" (per rsc). See also #4257.
+var reservedimports = []string{
+ "go",
+ "type",
+}
+
+func checkImportPath(path string, allowSpace bool) error {
+ if path == "" {
+ return errors.New("import path is empty")
+ }
+
+ if strings.Contains(path, "\x00") {
+ return errors.New("import path contains NUL")
+ }
+
+ for _, ri := range reservedimports {
+ if path == ri {
+ return fmt.Errorf("import path %q is reserved and cannot be used", path)
+ }
+ }
+
+ for _, r := range path {
+ switch {
+ case r == utf8.RuneError:
+ return fmt.Errorf("import path contains invalid UTF-8 sequence: %q", path)
+ case r < 0x20 || r == 0x7f:
+ return fmt.Errorf("import path contains control character: %q", path)
+ case r == '\\':
+ return fmt.Errorf("import path contains backslash; use slash: %q", path)
+ case !allowSpace && unicode.IsSpace(r):
+ return fmt.Errorf("import path contains space character: %q", path)
+ case strings.ContainsRune("!\"#$%&'()*,:;<=>?[]^`{|}", r):
+ return fmt.Errorf("import path contains invalid character '%c': %q", r, path)
+ }
+ }
+
+ return nil
+}
+
+func pkgnotused(lineno src.XPos, path string, name string) {
+ // If the package was imported with a name other than the final
+ // import path element, show it explicitly in the error message.
+ // Note that this handles both renamed imports and imports of
+ // packages containing unconventional package declarations.
+ // Note that this uses / always, even on Windows, because Go import
+ // paths always use forward slashes.
+ elem := path
+ if i := strings.LastIndex(elem, "/"); i >= 0 {
+ elem = elem[i+1:]
+ }
+ if name == "" || elem == name {
+ base.ErrorfAt(lineno, "imported and not used: %q", path)
+ } else {
+ base.ErrorfAt(lineno, "imported and not used: %q as %s", path, name)
+ }
+}
+
+func mkpackage(pkgname string) {
+ if types.LocalPkg.Name == "" {
+ if pkgname == "_" {
+ base.Errorf("invalid package name _")
+ }
+ types.LocalPkg.Name = pkgname
+ } else {
+ if pkgname != types.LocalPkg.Name {
+ base.Errorf("package %s; expected %s", pkgname, types.LocalPkg.Name)
+ }
+ }
+}
+
+func clearImports() {
+ type importedPkg struct {
+ pos src.XPos
+ path string
+ name string
+ }
+ var unused []importedPkg
+
+ for _, s := range types.LocalPkg.Syms {
+ n := ir.AsNode(s.Def)
+ if n == nil {
+ continue
+ }
+ if n.Op() == ir.OPACK {
+ // throw away top-level package name left over
+ // from previous file.
+ // leave s->block set to cause redeclaration
+ // errors if a conflicting top-level name is
+ // introduced by a different file.
+ p := n.(*ir.PkgName)
+ if !p.Used && base.SyntaxErrors() == 0 {
+ unused = append(unused, importedPkg{p.Pos(), p.Pkg.Path, s.Name})
+ }
+ s.Def = nil
+ continue
+ }
+ if s.Def != nil && s.Def.Sym() != s {
+ // throw away top-level name left over
+ // from previous import . "x"
+ // We'll report errors after type checking in CheckDotImports.
+ s.Def = nil
+ continue
+ }
+ }
+
+ sort.Slice(unused, func(i, j int) bool { return unused[i].pos.Before(unused[j].pos) })
+ for _, pkg := range unused {
+ pkgnotused(pkg.pos, pkg.path, pkg.name)
+ }
+}
+
+// CheckDotImports reports errors for any unused dot imports.
+func CheckDotImports() {
+ for _, pack := range dotImports {
+ if !pack.Used {
+ base.ErrorfAt(pack.Pos(), "imported and not used: %q", pack.Pkg.Path)
+ }
+ }
+
+ // No longer needed; release memory.
+ dotImports = nil
+ typecheck.DotImportRefs = nil
+}
+
+// dotImports tracks all PkgNames that have been dot-imported.
+var dotImports []*ir.PkgName
+
+// find all the exported symbols in package referenced by PkgName,
+// and make them available in the current package
+func importDot(pack *ir.PkgName) {
+ if typecheck.DotImportRefs == nil {
+ typecheck.DotImportRefs = make(map[*ir.Ident]*ir.PkgName)
+ }
+
+ opkg := pack.Pkg
+ for _, s := range opkg.Syms {
+ if s.Def == nil {
+ if _, ok := typecheck.DeclImporter[s]; !ok {
+ continue
+ }
+ }
+ if !types.IsExported(s.Name) || strings.ContainsRune(s.Name, 0xb7) { // 0xb7 = center dot
+ continue
+ }
+ s1 := typecheck.Lookup(s.Name)
+ if s1.Def != nil {
+ pkgerror := fmt.Sprintf("during import %q", opkg.Path)
+ typecheck.Redeclared(base.Pos, s1, pkgerror)
+ continue
+ }
+
+ id := ir.NewIdent(src.NoXPos, s)
+ typecheck.DotImportRefs[id] = pack
+ s1.Def = id
+ s1.Block = 1
+ }
+
+ dotImports = append(dotImports, pack)
+}
+
+// importName is like oldname,
+// but it reports an error if sym is from another package and not exported.
+func importName(sym *types.Sym) ir.Node {
+ n := oldname(sym)
+ if !types.IsExported(sym.Name) && sym.Pkg != types.LocalPkg {
+ n.SetDiag(true)
+ base.Errorf("cannot refer to unexported name %s.%s", sym.Pkg.Name, sym.Name)
+ }
+ return n
+}
diff --git a/src/cmd/compile/internal/noder/irgen.go b/src/cmd/compile/internal/noder/irgen.go
new file mode 100644
index 0000000..52224c4
--- /dev/null
+++ b/src/cmd/compile/internal/noder/irgen.go
@@ -0,0 +1,357 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package noder
+
+import (
+ "fmt"
+ "os"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/dwarfgen"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/syntax"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "cmd/compile/internal/types2"
+ "cmd/internal/src"
+)
+
+// checkFiles configures and runs the types2 checker on the given
+// parsed source files and then returns the result.
+func checkFiles(noders []*noder) (posMap, *types2.Package, *types2.Info) {
+ if base.SyntaxErrors() != 0 {
+ base.ErrorExit()
+ }
+
+ // setup and syntax error reporting
+ var m posMap
+ files := make([]*syntax.File, len(noders))
+ for i, p := range noders {
+ m.join(&p.posMap)
+ files[i] = p.file
+ }
+
+ // typechecking
+ ctxt := types2.NewContext()
+ importer := gcimports{
+ ctxt: ctxt,
+ packages: map[string]*types2.Package{"unsafe": types2.Unsafe},
+ }
+ conf := types2.Config{
+ Context: ctxt,
+ GoVersion: base.Flag.Lang,
+ IgnoreLabels: true, // parser already checked via syntax.CheckBranches mode
+ CompilerErrorMessages: true, // use error strings matching existing compiler errors
+ Error: func(err error) {
+ terr := err.(types2.Error)
+ base.ErrorfAt(m.makeXPos(terr.Pos), "%s", terr.Msg)
+ },
+ Importer: &importer,
+ Sizes: &gcSizes{},
+ }
+ info := &types2.Info{
+ Types: make(map[syntax.Expr]types2.TypeAndValue),
+ Defs: make(map[*syntax.Name]types2.Object),
+ Uses: make(map[*syntax.Name]types2.Object),
+ Selections: make(map[*syntax.SelectorExpr]*types2.Selection),
+ Implicits: make(map[syntax.Node]types2.Object),
+ Scopes: make(map[syntax.Node]*types2.Scope),
+ Instances: make(map[*syntax.Name]types2.Instance),
+ // expand as needed
+ }
+
+ pkg, err := conf.Check(base.Ctxt.Pkgpath, files, info)
+
+ base.ExitIfErrors()
+ if err != nil {
+ base.FatalfAt(src.NoXPos, "conf.Check error: %v", err)
+ }
+
+ return m, pkg, info
+}
+
+// check2 type checks a Go package using types2, and then generates IR
+// using the results.
+func check2(noders []*noder) {
+ m, pkg, info := checkFiles(noders)
+
+ if base.Flag.G < 2 {
+ os.Exit(0)
+ }
+
+ g := irgen{
+ target: typecheck.Target,
+ self: pkg,
+ info: info,
+ posMap: m,
+ objs: make(map[types2.Object]*ir.Name),
+ typs: make(map[types2.Type]*types.Type),
+ }
+ g.generate(noders)
+
+ if base.Flag.G < 3 {
+ os.Exit(0)
+ }
+}
+
+// Information about sub-dictionary entries in a dictionary
+type subDictInfo struct {
+ // Call or XDOT node that requires a dictionary.
+ callNode ir.Node
+ // Saved CallExpr.X node (*ir.SelectorExpr or *InstExpr node) for a generic
+ // method or function call, since this node will get dropped when the generic
+ // method/function call is transformed to a call on the instantiated shape
+ // function. Nil for other kinds of calls or XDOTs.
+ savedXNode ir.Node
+}
+
+// dictInfo is the dictionary format for an instantiation of a generic function with
+// particular shapes. shapeParams, derivedTypes, subDictCalls, and itabConvs describe
+// the actual dictionary entries in order, and the remaining fields are other info
+// needed in doing dictionary processing during compilation.
+type dictInfo struct {
+ // Types substituted for the type parameters, which are shape types.
+ shapeParams []*types.Type
+ // All types derived from those typeparams used in the instantiation.
+ derivedTypes []*types.Type
+ // Nodes in the instantiation that requires a subdictionary. Includes
+ // method and function calls (OCALL), function values (OFUNCINST), method
+ // values/expressions (OXDOT).
+ subDictCalls []subDictInfo
+ // Nodes in the instantiation that are a conversion from a typeparam/derived
+ // type to a specific interface.
+ itabConvs []ir.Node
+
+ // Mapping from each shape type that substitutes a type param, to its
+ // type bound (which is also substituted with shapes if it is parameterized)
+ shapeToBound map[*types.Type]*types.Type
+
+ // For type switches on nonempty interfaces, a map from OTYPE entries of
+ // HasShape type, to the interface type we're switching from.
+ type2switchType map[ir.Node]*types.Type
+
+ startSubDict int // Start of dict entries for subdictionaries
+ startItabConv int // Start of dict entries for itab conversions
+ dictLen int // Total number of entries in dictionary
+}
+
+// instInfo is information gathered on an shape instantiation of a function.
+type instInfo struct {
+ fun *ir.Func // The instantiated function (with body)
+ dictParam *ir.Name // The node inside fun that refers to the dictionary param
+
+ dictInfo *dictInfo
+}
+
+type irgen struct {
+ target *ir.Package
+ self *types2.Package
+ info *types2.Info
+
+ posMap
+ objs map[types2.Object]*ir.Name
+ typs map[types2.Type]*types.Type
+ marker dwarfgen.ScopeMarker
+
+ // laterFuncs records tasks that need to run after all declarations
+ // are processed.
+ laterFuncs []func()
+ // haveEmbed indicates whether the current node belongs to file that
+ // imports "embed" package.
+ haveEmbed bool
+
+ // exprStmtOK indicates whether it's safe to generate expressions or
+ // statements yet.
+ exprStmtOK bool
+
+ // types which we need to finish, by doing g.fillinMethods.
+ typesToFinalize []*typeDelayInfo
+
+ // True when we are compiling a top-level generic function or method. Use to
+ // avoid adding closures of generic functions/methods to the target.Decls
+ // list.
+ topFuncIsGeneric bool
+
+ // The context during type/function/method declarations that is used to
+ // uniquely name type parameters. We need unique names for type params so we
+ // can be sure they match up correctly between types2-to-types1 translation
+ // and types1 importing.
+ curDecl string
+}
+
+// genInst has the information for creating needed instantiations and modifying
+// functions to use instantiations.
+type genInst struct {
+ dnum int // for generating unique dictionary variables
+
+ // Map from the names of all instantiations to information about the
+ // instantiations.
+ instInfoMap map[*types.Sym]*instInfo
+
+ // Dictionary syms which we need to finish, by writing out any itabconv
+ // entries.
+ dictSymsToFinalize []*delayInfo
+
+ // New instantiations created during this round of buildInstantiations().
+ newInsts []ir.Node
+}
+
+func (g *irgen) later(fn func()) {
+ g.laterFuncs = append(g.laterFuncs, fn)
+}
+
+type delayInfo struct {
+ gf *ir.Name
+ targs []*types.Type
+ sym *types.Sym
+ off int
+ isMeth bool
+}
+
+type typeDelayInfo struct {
+ typ *types2.Named
+ ntyp *types.Type
+}
+
+func (g *irgen) generate(noders []*noder) {
+ types.LocalPkg.Name = g.self.Name()
+ types.LocalPkg.Height = g.self.Height()
+ typecheck.TypecheckAllowed = true
+
+ // Prevent size calculations until we set the underlying type
+ // for all package-block defined types.
+ types.DeferCheckSize()
+
+ // At this point, types2 has already handled name resolution and
+ // type checking. We just need to map from its object and type
+ // representations to those currently used by the rest of the
+ // compiler. This happens in a few passes.
+
+ // 1. Process all import declarations. We use the compiler's own
+ // importer for this, rather than types2's gcimporter-derived one,
+ // to handle extensions and inline function bodies correctly.
+ //
+ // Also, we need to do this in a separate pass, because mappings are
+ // instantiated on demand. If we interleaved processing import
+ // declarations with other declarations, it's likely we'd end up
+ // wanting to map an object/type from another source file, but not
+ // yet have the import data it relies on.
+ declLists := make([][]syntax.Decl, len(noders))
+Outer:
+ for i, p := range noders {
+ g.pragmaFlags(p.file.Pragma, ir.GoBuildPragma)
+ for j, decl := range p.file.DeclList {
+ switch decl := decl.(type) {
+ case *syntax.ImportDecl:
+ g.importDecl(p, decl)
+ default:
+ declLists[i] = p.file.DeclList[j:]
+ continue Outer // no more ImportDecls
+ }
+ }
+ }
+
+ // 2. Process all package-block type declarations. As with imports,
+ // we need to make sure all types are properly instantiated before
+ // trying to map any expressions that utilize them. In particular,
+ // we need to make sure type pragmas are already known (see comment
+ // in irgen.typeDecl).
+ //
+ // We could perhaps instead defer processing of package-block
+ // variable initializers and function bodies, like noder does, but
+ // special-casing just package-block type declarations minimizes the
+ // differences between processing package-block and function-scoped
+ // declarations.
+ for _, declList := range declLists {
+ for _, decl := range declList {
+ switch decl := decl.(type) {
+ case *syntax.TypeDecl:
+ g.typeDecl((*ir.Nodes)(&g.target.Decls), decl)
+ }
+ }
+ }
+ types.ResumeCheckSize()
+
+ // 3. Process all remaining declarations.
+ for i, declList := range declLists {
+ old := g.haveEmbed
+ g.haveEmbed = noders[i].importedEmbed
+ g.decls((*ir.Nodes)(&g.target.Decls), declList)
+ g.haveEmbed = old
+ }
+ g.exprStmtOK = true
+
+ // 4. Run any "later" tasks. Avoid using 'range' so that tasks can
+ // recursively queue further tasks. (Not currently utilized though.)
+ for len(g.laterFuncs) > 0 {
+ fn := g.laterFuncs[0]
+ g.laterFuncs = g.laterFuncs[1:]
+ fn()
+ }
+
+ if base.Flag.W > 1 {
+ for _, n := range g.target.Decls {
+ s := fmt.Sprintf("\nafter noder2 %v", n)
+ ir.Dump(s, n)
+ }
+ }
+
+ for _, p := range noders {
+ // Process linkname and cgo pragmas.
+ p.processPragmas()
+
+ // Double check for any type-checking inconsistencies. This can be
+ // removed once we're confident in IR generation results.
+ syntax.Crawl(p.file, func(n syntax.Node) bool {
+ g.validate(n)
+ return false
+ })
+ }
+
+ if base.Flag.Complete {
+ for _, n := range g.target.Decls {
+ if fn, ok := n.(*ir.Func); ok {
+ if fn.Body == nil && fn.Nname.Sym().Linkname == "" {
+ base.ErrorfAt(fn.Pos(), "missing function body")
+ }
+ }
+ }
+ }
+
+ // Check for unusual case where noder2 encounters a type error that types2
+ // doesn't check for (e.g. notinheap incompatibility).
+ base.ExitIfErrors()
+
+ typecheck.DeclareUniverse()
+
+ // Create any needed instantiations of generic functions and transform
+ // existing and new functions to use those instantiations.
+ BuildInstantiations()
+
+ // Remove all generic functions from g.target.Decl, since they have been
+ // used for stenciling, but don't compile. Generic functions will already
+ // have been marked for export as appropriate.
+ j := 0
+ for i, decl := range g.target.Decls {
+ if decl.Op() != ir.ODCLFUNC || !decl.Type().HasTParam() {
+ g.target.Decls[j] = g.target.Decls[i]
+ j++
+ }
+ }
+ g.target.Decls = g.target.Decls[:j]
+
+ base.Assertf(len(g.laterFuncs) == 0, "still have %d later funcs", len(g.laterFuncs))
+}
+
+func (g *irgen) unhandled(what string, p poser) {
+ base.FatalfAt(g.pos(p), "unhandled %s: %T", what, p)
+ panic("unreachable")
+}
+
+// delayTransform returns true if we should delay all transforms, because we are
+// creating the nodes for a generic function/method.
+func (g *irgen) delayTransform() bool {
+ return g.topFuncIsGeneric
+}
diff --git a/src/cmd/compile/internal/noder/lex.go b/src/cmd/compile/internal/noder/lex.go
new file mode 100644
index 0000000..66a56a5
--- /dev/null
+++ b/src/cmd/compile/internal/noder/lex.go
@@ -0,0 +1,193 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package noder
+
+import (
+ "fmt"
+ "internal/buildcfg"
+ "strings"
+
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/syntax"
+)
+
+func isSpace(c rune) bool {
+ return c == ' ' || c == '\t' || c == '\n' || c == '\r'
+}
+
+func isQuoted(s string) bool {
+ return len(s) >= 2 && s[0] == '"' && s[len(s)-1] == '"'
+}
+
+const (
+ funcPragmas = ir.Nointerface |
+ ir.Noescape |
+ ir.Norace |
+ ir.Nosplit |
+ ir.Noinline |
+ ir.NoCheckPtr |
+ ir.RegisterParams | // TODO(register args) remove after register abi is working
+ ir.CgoUnsafeArgs |
+ ir.UintptrEscapes |
+ ir.Systemstack |
+ ir.Nowritebarrier |
+ ir.Nowritebarrierrec |
+ ir.Yeswritebarrierrec
+
+ typePragmas = ir.NotInHeap
+)
+
+func pragmaFlag(verb string) ir.PragmaFlag {
+ switch verb {
+ case "go:build":
+ return ir.GoBuildPragma
+ case "go:nointerface":
+ if buildcfg.Experiment.FieldTrack {
+ return ir.Nointerface
+ }
+ case "go:noescape":
+ return ir.Noescape
+ case "go:norace":
+ return ir.Norace
+ case "go:nosplit":
+ return ir.Nosplit | ir.NoCheckPtr // implies NoCheckPtr (see #34972)
+ case "go:noinline":
+ return ir.Noinline
+ case "go:nocheckptr":
+ return ir.NoCheckPtr
+ case "go:systemstack":
+ return ir.Systemstack
+ case "go:nowritebarrier":
+ return ir.Nowritebarrier
+ case "go:nowritebarrierrec":
+ return ir.Nowritebarrierrec | ir.Nowritebarrier // implies Nowritebarrier
+ case "go:yeswritebarrierrec":
+ return ir.Yeswritebarrierrec
+ case "go:cgo_unsafe_args":
+ return ir.CgoUnsafeArgs | ir.NoCheckPtr // implies NoCheckPtr (see #34968)
+ case "go:uintptrescapes":
+ // For the next function declared in the file
+ // any uintptr arguments may be pointer values
+ // converted to uintptr. This directive
+ // ensures that the referenced allocated
+ // object, if any, is retained and not moved
+ // until the call completes, even though from
+ // the types alone it would appear that the
+ // object is no longer needed during the
+ // call. The conversion to uintptr must appear
+ // in the argument list.
+ // Used in syscall/dll_windows.go.
+ return ir.UintptrEscapes
+ case "go:registerparams": // TODO(register args) remove after register abi is working
+ return ir.RegisterParams
+ case "go:notinheap":
+ return ir.NotInHeap
+ }
+ return 0
+}
+
+// pragcgo is called concurrently if files are parsed concurrently.
+func (p *noder) pragcgo(pos syntax.Pos, text string) {
+ f := pragmaFields(text)
+
+ verb := strings.TrimPrefix(f[0], "go:")
+ f[0] = verb
+
+ switch verb {
+ case "cgo_export_static", "cgo_export_dynamic":
+ switch {
+ case len(f) == 2 && !isQuoted(f[1]):
+ case len(f) == 3 && !isQuoted(f[1]) && !isQuoted(f[2]):
+ default:
+ p.error(syntax.Error{Pos: pos, Msg: fmt.Sprintf(`usage: //go:%s local [remote]`, verb)})
+ return
+ }
+ case "cgo_import_dynamic":
+ switch {
+ case len(f) == 2 && !isQuoted(f[1]):
+ case len(f) == 3 && !isQuoted(f[1]) && !isQuoted(f[2]):
+ case len(f) == 4 && !isQuoted(f[1]) && !isQuoted(f[2]) && isQuoted(f[3]):
+ f[3] = strings.Trim(f[3], `"`)
+ if buildcfg.GOOS == "aix" && f[3] != "" {
+ // On Aix, library pattern must be "lib.a/object.o"
+ // or "lib.a/libname.so.X"
+ n := strings.Split(f[3], "/")
+ if len(n) != 2 || !strings.HasSuffix(n[0], ".a") || (!strings.HasSuffix(n[1], ".o") && !strings.Contains(n[1], ".so.")) {
+ p.error(syntax.Error{Pos: pos, Msg: `usage: //go:cgo_import_dynamic local [remote ["lib.a/object.o"]]`})
+ return
+ }
+ }
+ default:
+ p.error(syntax.Error{Pos: pos, Msg: `usage: //go:cgo_import_dynamic local [remote ["library"]]`})
+ return
+ }
+ case "cgo_import_static":
+ switch {
+ case len(f) == 2 && !isQuoted(f[1]):
+ default:
+ p.error(syntax.Error{Pos: pos, Msg: `usage: //go:cgo_import_static local`})
+ return
+ }
+ case "cgo_dynamic_linker":
+ switch {
+ case len(f) == 2 && isQuoted(f[1]):
+ f[1] = strings.Trim(f[1], `"`)
+ default:
+ p.error(syntax.Error{Pos: pos, Msg: `usage: //go:cgo_dynamic_linker "path"`})
+ return
+ }
+ case "cgo_ldflag":
+ switch {
+ case len(f) == 2 && isQuoted(f[1]):
+ f[1] = strings.Trim(f[1], `"`)
+ default:
+ p.error(syntax.Error{Pos: pos, Msg: `usage: //go:cgo_ldflag "arg"`})
+ return
+ }
+ default:
+ return
+ }
+ p.pragcgobuf = append(p.pragcgobuf, f)
+}
+
+// pragmaFields is similar to strings.FieldsFunc(s, isSpace)
+// but does not split when inside double quoted regions and always
+// splits before the start and after the end of a double quoted region.
+// pragmaFields does not recognize escaped quotes. If a quote in s is not
+// closed the part after the opening quote will not be returned as a field.
+func pragmaFields(s string) []string {
+ var a []string
+ inQuote := false
+ fieldStart := -1 // Set to -1 when looking for start of field.
+ for i, c := range s {
+ switch {
+ case c == '"':
+ if inQuote {
+ inQuote = false
+ a = append(a, s[fieldStart:i+1])
+ fieldStart = -1
+ } else {
+ inQuote = true
+ if fieldStart >= 0 {
+ a = append(a, s[fieldStart:i])
+ }
+ fieldStart = i
+ }
+ case !inQuote && isSpace(c):
+ if fieldStart >= 0 {
+ a = append(a, s[fieldStart:i])
+ fieldStart = -1
+ }
+ default:
+ if fieldStart == -1 {
+ fieldStart = i
+ }
+ }
+ }
+ if !inQuote && fieldStart >= 0 { // Last field might end at the end of the string.
+ a = append(a, s[fieldStart:])
+ }
+ return a
+}
diff --git a/src/cmd/compile/internal/noder/lex_test.go b/src/cmd/compile/internal/noder/lex_test.go
new file mode 100644
index 0000000..85a3f06
--- /dev/null
+++ b/src/cmd/compile/internal/noder/lex_test.go
@@ -0,0 +1,122 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package noder
+
+import (
+ "reflect"
+ "runtime"
+ "testing"
+
+ "cmd/compile/internal/syntax"
+)
+
+func eq(a, b []string) bool {
+ if len(a) != len(b) {
+ return false
+ }
+ for i := 0; i < len(a); i++ {
+ if a[i] != b[i] {
+ return false
+ }
+ }
+ return true
+}
+
+func TestPragmaFields(t *testing.T) {
+ var tests = []struct {
+ in string
+ want []string
+ }{
+ {"", []string{}},
+ {" \t ", []string{}},
+ {`""""`, []string{`""`, `""`}},
+ {" a'b'c ", []string{"a'b'c"}},
+ {"1 2 3 4", []string{"1", "2", "3", "4"}},
+ {"\n☺\t☹\n", []string{"☺", "☹"}},
+ {`"1 2 " 3 " 4 5"`, []string{`"1 2 "`, `3`, `" 4 5"`}},
+ {`"1""2 3""4"`, []string{`"1"`, `"2 3"`, `"4"`}},
+ {`12"34"`, []string{`12`, `"34"`}},
+ {`12"34 `, []string{`12`}},
+ }
+
+ for _, tt := range tests {
+ got := pragmaFields(tt.in)
+ if !eq(got, tt.want) {
+ t.Errorf("pragmaFields(%q) = %v; want %v", tt.in, got, tt.want)
+ continue
+ }
+ }
+}
+
+func TestPragcgo(t *testing.T) {
+ type testStruct struct {
+ in string
+ want []string
+ }
+
+ var tests = []testStruct{
+ {`go:cgo_export_dynamic local`, []string{`cgo_export_dynamic`, `local`}},
+ {`go:cgo_export_dynamic local remote`, []string{`cgo_export_dynamic`, `local`, `remote`}},
+ {`go:cgo_export_dynamic local' remote'`, []string{`cgo_export_dynamic`, `local'`, `remote'`}},
+ {`go:cgo_export_static local`, []string{`cgo_export_static`, `local`}},
+ {`go:cgo_export_static local remote`, []string{`cgo_export_static`, `local`, `remote`}},
+ {`go:cgo_export_static local' remote'`, []string{`cgo_export_static`, `local'`, `remote'`}},
+ {`go:cgo_import_dynamic local`, []string{`cgo_import_dynamic`, `local`}},
+ {`go:cgo_import_dynamic local remote`, []string{`cgo_import_dynamic`, `local`, `remote`}},
+ {`go:cgo_import_static local`, []string{`cgo_import_static`, `local`}},
+ {`go:cgo_import_static local'`, []string{`cgo_import_static`, `local'`}},
+ {`go:cgo_dynamic_linker "/path/"`, []string{`cgo_dynamic_linker`, `/path/`}},
+ {`go:cgo_dynamic_linker "/p ath/"`, []string{`cgo_dynamic_linker`, `/p ath/`}},
+ {`go:cgo_ldflag "arg"`, []string{`cgo_ldflag`, `arg`}},
+ {`go:cgo_ldflag "a rg"`, []string{`cgo_ldflag`, `a rg`}},
+ }
+
+ if runtime.GOOS != "aix" {
+ tests = append(tests, []testStruct{
+ {`go:cgo_import_dynamic local remote "library"`, []string{`cgo_import_dynamic`, `local`, `remote`, `library`}},
+ {`go:cgo_import_dynamic local' remote' "lib rary"`, []string{`cgo_import_dynamic`, `local'`, `remote'`, `lib rary`}},
+ }...)
+ } else {
+ // cgo_import_dynamic with a library is slightly different on AIX
+ // as the library field must follow the pattern [libc.a/object.o].
+ tests = append(tests, []testStruct{
+ {`go:cgo_import_dynamic local remote "lib.a/obj.o"`, []string{`cgo_import_dynamic`, `local`, `remote`, `lib.a/obj.o`}},
+ // This test must fail.
+ {`go:cgo_import_dynamic local' remote' "library"`, []string{`<unknown position>: usage: //go:cgo_import_dynamic local [remote ["lib.a/object.o"]]`}},
+ }...)
+
+ }
+
+ var p noder
+ var nopos syntax.Pos
+ for _, tt := range tests {
+
+ p.err = make(chan syntax.Error)
+ gotch := make(chan [][]string, 1)
+ go func() {
+ p.pragcgobuf = nil
+ p.pragcgo(nopos, tt.in)
+ if p.pragcgobuf != nil {
+ gotch <- p.pragcgobuf
+ }
+ }()
+
+ select {
+ case e := <-p.err:
+ want := tt.want[0]
+ if e.Error() != want {
+ t.Errorf("pragcgo(%q) = %q; want %q", tt.in, e, want)
+ continue
+ }
+ case got := <-gotch:
+ want := [][]string{tt.want}
+ if !reflect.DeepEqual(got, want) {
+ t.Errorf("pragcgo(%q) = %q; want %q", tt.in, got, want)
+ continue
+ }
+ }
+
+ }
+}
diff --git a/src/cmd/compile/internal/noder/linker.go b/src/cmd/compile/internal/noder/linker.go
new file mode 100644
index 0000000..2bc7f7c
--- /dev/null
+++ b/src/cmd/compile/internal/noder/linker.go
@@ -0,0 +1,296 @@
+// UNREVIEWED
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package noder
+
+import (
+ "io"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/reflectdata"
+ "cmd/compile/internal/types"
+ "cmd/internal/goobj"
+ "cmd/internal/obj"
+)
+
+// This file implements the unified IR linker, which combines the
+// local package's stub data with imported package data to produce a
+// complete export data file. It also rewrites the compiler's
+// extension data sections based on the results of compilation (e.g.,
+// the function inlining cost and linker symbol index assignments).
+//
+// TODO(mdempsky): Using the name "linker" here is confusing, because
+// readers are likely to mistake references to it for cmd/link. But
+// there's a shortage of good names for "something that combines
+// multiple parts into a cohesive whole"... e.g., "assembler" and
+// "compiler" are also already taken.
+
+type linker struct {
+ pw pkgEncoder
+
+ pkgs map[string]int
+ decls map[*types.Sym]int
+}
+
+func (l *linker) relocAll(pr *pkgReader, relocs []relocEnt) []relocEnt {
+ res := make([]relocEnt, len(relocs))
+ for i, rent := range relocs {
+ rent.idx = l.relocIdx(pr, rent.kind, rent.idx)
+ res[i] = rent
+ }
+ return res
+}
+
+func (l *linker) relocIdx(pr *pkgReader, k reloc, idx int) int {
+ assert(pr != nil)
+
+ absIdx := pr.absIdx(k, idx)
+
+ if newidx := pr.newindex[absIdx]; newidx != 0 {
+ return ^newidx
+ }
+
+ var newidx int
+ switch k {
+ case relocString:
+ newidx = l.relocString(pr, idx)
+ case relocPkg:
+ newidx = l.relocPkg(pr, idx)
+ case relocObj:
+ newidx = l.relocObj(pr, idx)
+
+ default:
+ // Generic relocations.
+ //
+ // TODO(mdempsky): Deduplicate more sections? In fact, I think
+ // every section could be deduplicated. This would also be easier
+ // if we do external relocations.
+
+ w := l.pw.newEncoderRaw(k)
+ l.relocCommon(pr, &w, k, idx)
+ newidx = w.idx
+ }
+
+ pr.newindex[absIdx] = ^newidx
+
+ return newidx
+}
+
+func (l *linker) relocString(pr *pkgReader, idx int) int {
+ return l.pw.stringIdx(pr.stringIdx(idx))
+}
+
+func (l *linker) relocPkg(pr *pkgReader, idx int) int {
+ path := pr.peekPkgPath(idx)
+
+ if newidx, ok := l.pkgs[path]; ok {
+ return newidx
+ }
+
+ r := pr.newDecoder(relocPkg, idx, syncPkgDef)
+ w := l.pw.newEncoder(relocPkg, syncPkgDef)
+ l.pkgs[path] = w.idx
+
+ // TODO(mdempsky): We end up leaving an empty string reference here
+ // from when the package was originally written as "". Probably not
+ // a big deal, but a little annoying. Maybe relocating
+ // cross-references in place is the way to go after all.
+ w.relocs = l.relocAll(pr, r.relocs)
+
+ _ = r.string() // original path
+ w.string(path)
+
+ io.Copy(&w.data, &r.data)
+
+ return w.flush()
+}
+
+func (l *linker) relocObj(pr *pkgReader, idx int) int {
+ path, name, tag := pr.peekObj(idx)
+ sym := types.NewPkg(path, "").Lookup(name)
+
+ if newidx, ok := l.decls[sym]; ok {
+ return newidx
+ }
+
+ if tag == objStub && path != "builtin" && path != "unsafe" {
+ pri, ok := objReader[sym]
+ if !ok {
+ base.Fatalf("missing reader for %q.%v", path, name)
+ }
+ assert(ok)
+
+ pr = pri.pr
+ idx = pri.idx
+
+ path2, name2, tag2 := pr.peekObj(idx)
+ sym2 := types.NewPkg(path2, "").Lookup(name2)
+ assert(sym == sym2)
+ assert(tag2 != objStub)
+ }
+
+ w := l.pw.newEncoderRaw(relocObj)
+ wext := l.pw.newEncoderRaw(relocObjExt)
+ wname := l.pw.newEncoderRaw(relocName)
+ wdict := l.pw.newEncoderRaw(relocObjDict)
+
+ l.decls[sym] = w.idx
+ assert(wext.idx == w.idx)
+ assert(wname.idx == w.idx)
+ assert(wdict.idx == w.idx)
+
+ l.relocCommon(pr, &w, relocObj, idx)
+ l.relocCommon(pr, &wname, relocName, idx)
+ l.relocCommon(pr, &wdict, relocObjDict, idx)
+
+ var obj *ir.Name
+ if path == "" {
+ var ok bool
+ obj, ok = sym.Def.(*ir.Name)
+
+ // Generic types and functions and declared constraint types won't
+ // have definitions.
+ // For now, just generically copy their extension data.
+ // TODO(mdempsky): Restore assertion.
+ if !ok && false {
+ base.Fatalf("missing definition for %v", sym)
+ }
+ }
+
+ if obj != nil {
+ wext.sync(syncObject1)
+ switch tag {
+ case objFunc:
+ l.relocFuncExt(&wext, obj)
+ case objType:
+ l.relocTypeExt(&wext, obj)
+ case objVar:
+ l.relocVarExt(&wext, obj)
+ }
+ wext.flush()
+ } else {
+ l.relocCommon(pr, &wext, relocObjExt, idx)
+ }
+
+ return w.idx
+}
+
+func (l *linker) relocCommon(pr *pkgReader, w *encoder, k reloc, idx int) {
+ r := pr.newDecoderRaw(k, idx)
+ w.relocs = l.relocAll(pr, r.relocs)
+ io.Copy(&w.data, &r.data)
+ w.flush()
+}
+
+func (l *linker) pragmaFlag(w *encoder, pragma ir.PragmaFlag) {
+ w.sync(syncPragma)
+ w.int(int(pragma))
+}
+
+func (l *linker) relocFuncExt(w *encoder, name *ir.Name) {
+ w.sync(syncFuncExt)
+
+ l.pragmaFlag(w, name.Func.Pragma)
+ l.linkname(w, name)
+
+ // Relocated extension data.
+ w.bool(true)
+
+ // Record definition ABI so cross-ABI calls can be direct.
+ // This is important for the performance of calling some
+ // common functions implemented in assembly (e.g., bytealg).
+ w.uint64(uint64(name.Func.ABI))
+
+ // Escape analysis.
+ for _, fs := range &types.RecvsParams {
+ for _, f := range fs(name.Type()).FieldSlice() {
+ w.string(f.Note)
+ }
+ }
+
+ if inl := name.Func.Inl; w.bool(inl != nil) {
+ w.len(int(inl.Cost))
+ w.bool(inl.CanDelayResults)
+
+ pri, ok := bodyReader[name.Func]
+ assert(ok)
+ w.reloc(relocBody, l.relocIdx(pri.pr, relocBody, pri.idx))
+ }
+
+ w.sync(syncEOF)
+}
+
+func (l *linker) relocTypeExt(w *encoder, name *ir.Name) {
+ w.sync(syncTypeExt)
+
+ typ := name.Type()
+
+ l.pragmaFlag(w, name.Pragma())
+
+ // For type T, export the index of type descriptor symbols of T and *T.
+ l.lsymIdx(w, "", reflectdata.TypeLinksym(typ))
+ l.lsymIdx(w, "", reflectdata.TypeLinksym(typ.PtrTo()))
+
+ if typ.Kind() != types.TINTER {
+ for _, method := range typ.Methods().Slice() {
+ l.relocFuncExt(w, method.Nname.(*ir.Name))
+ }
+ }
+}
+
+func (l *linker) relocVarExt(w *encoder, name *ir.Name) {
+ w.sync(syncVarExt)
+ l.linkname(w, name)
+}
+
+func (l *linker) linkname(w *encoder, name *ir.Name) {
+ w.sync(syncLinkname)
+
+ linkname := name.Sym().Linkname
+ if !l.lsymIdx(w, linkname, name.Linksym()) {
+ w.string(linkname)
+ }
+}
+
+func (l *linker) lsymIdx(w *encoder, linkname string, lsym *obj.LSym) bool {
+ if lsym.PkgIdx > goobj.PkgIdxSelf || (lsym.PkgIdx == goobj.PkgIdxInvalid && !lsym.Indexed()) || linkname != "" {
+ w.int64(-1)
+ return false
+ }
+
+ // For a defined symbol, export its index.
+ // For re-exporting an imported symbol, pass its index through.
+ w.int64(int64(lsym.SymIdx))
+ return true
+}
+
+// @@@ Helpers
+
+// TODO(mdempsky): These should probably be removed. I think they're a
+// smell that the export data format is not yet quite right.
+
+func (pr *pkgDecoder) peekPkgPath(idx int) string {
+ r := pr.newDecoder(relocPkg, idx, syncPkgDef)
+ path := r.string()
+ if path == "" {
+ path = pr.pkgPath
+ }
+ return path
+}
+
+func (pr *pkgDecoder) peekObj(idx int) (string, string, codeObj) {
+ r := pr.newDecoder(relocName, idx, syncObject1)
+ r.sync(syncSym)
+ r.sync(syncPkg)
+ path := pr.peekPkgPath(r.reloc(relocPkg))
+ name := r.string()
+ assert(name != "")
+
+ tag := codeObj(r.code(syncCodeObj))
+
+ return path, name, tag
+}
diff --git a/src/cmd/compile/internal/noder/noder.go b/src/cmd/compile/internal/noder/noder.go
new file mode 100644
index 0000000..17ec877
--- /dev/null
+++ b/src/cmd/compile/internal/noder/noder.go
@@ -0,0 +1,1917 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package noder
+
+import (
+ "errors"
+ "fmt"
+ "go/constant"
+ "go/token"
+ "internal/buildcfg"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strconv"
+ "strings"
+ "unicode"
+ "unicode/utf8"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/dwarfgen"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/syntax"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "cmd/internal/objabi"
+ "cmd/internal/src"
+)
+
+func LoadPackage(filenames []string) {
+ base.Timer.Start("fe", "parse")
+
+ // -G=3 and unified expect generics syntax, but -G=0 does not.
+ supportsGenerics := base.Flag.G != 0 || buildcfg.Experiment.Unified
+
+ mode := syntax.CheckBranches
+ if supportsGenerics {
+ mode |= syntax.AllowGenerics
+ }
+
+ // Limit the number of simultaneously open files.
+ sem := make(chan struct{}, runtime.GOMAXPROCS(0)+10)
+
+ noders := make([]*noder, len(filenames))
+ for i, filename := range filenames {
+ p := noder{
+ err: make(chan syntax.Error),
+ trackScopes: base.Flag.Dwarf,
+ }
+ noders[i] = &p
+
+ filename := filename
+ go func() {
+ sem <- struct{}{}
+ defer func() { <-sem }()
+ defer close(p.err)
+ fbase := syntax.NewFileBase(filename)
+
+ f, err := os.Open(filename)
+ if err != nil {
+ p.error(syntax.Error{Msg: err.Error()})
+ return
+ }
+ defer f.Close()
+
+ p.file, _ = syntax.Parse(fbase, f, p.error, p.pragma, mode) // errors are tracked via p.error
+ }()
+ }
+
+ var lines uint
+ for _, p := range noders {
+ for e := range p.err {
+ p.errorAt(e.Pos, "%s", e.Msg)
+ }
+ if p.file == nil {
+ base.ErrorExit()
+ }
+ lines += p.file.EOF.Line()
+ }
+ base.Timer.AddEvent(int64(lines), "lines")
+
+ if base.Debug.Unified != 0 {
+ unified(noders)
+ return
+ }
+
+ if base.Flag.G != 0 {
+ // Use types2 to type-check and possibly generate IR.
+ check2(noders)
+ return
+ }
+
+ for _, p := range noders {
+ p.node()
+ p.file = nil // release memory
+ }
+
+ if base.SyntaxErrors() != 0 {
+ base.ErrorExit()
+ }
+ types.CheckDclstack()
+
+ for _, p := range noders {
+ p.processPragmas()
+ }
+
+ // Typecheck.
+ types.LocalPkg.Height = myheight
+ typecheck.DeclareUniverse()
+ typecheck.TypecheckAllowed = true
+
+ // Process top-level declarations in phases.
+
+ // Phase 1: const, type, and names and types of funcs.
+ // This will gather all the information about types
+ // and methods but doesn't depend on any of it.
+ //
+ // We also defer type alias declarations until phase 2
+ // to avoid cycles like #18640.
+ // TODO(gri) Remove this again once we have a fix for #25838.
+ //
+ // Phase 2: Variable assignments.
+ // To check interface assignments, depends on phase 1.
+
+ // Don't use range--typecheck can add closures to Target.Decls.
+ for phase, name := range []string{"top1", "top2"} {
+ base.Timer.Start("fe", "typecheck", name)
+ for i := 0; i < len(typecheck.Target.Decls); i++ {
+ n := typecheck.Target.Decls[i]
+ op := n.Op()
+
+ // Closure function declarations are typechecked as part of the
+ // closure expression.
+ if fn, ok := n.(*ir.Func); ok && fn.OClosure != nil {
+ continue
+ }
+
+ // We don't actually add ir.ODCL nodes to Target.Decls. Make sure of that.
+ if op == ir.ODCL {
+ base.FatalfAt(n.Pos(), "unexpected top declaration: %v", op)
+ }
+
+ // Identify declarations that should be deferred to the second
+ // iteration.
+ late := op == ir.OAS || op == ir.OAS2 || op == ir.ODCLTYPE && n.(*ir.Decl).X.Alias()
+
+ if late == (phase == 1) {
+ typecheck.Target.Decls[i] = typecheck.Stmt(n)
+ }
+ }
+ }
+
+ // Phase 3: Type check function bodies.
+ // Don't use range--typecheck can add closures to Target.Decls.
+ base.Timer.Start("fe", "typecheck", "func")
+ for i := 0; i < len(typecheck.Target.Decls); i++ {
+ if fn, ok := typecheck.Target.Decls[i].(*ir.Func); ok {
+ if base.Flag.W > 1 {
+ s := fmt.Sprintf("\nbefore typecheck %v", fn)
+ ir.Dump(s, fn)
+ }
+ typecheck.FuncBody(fn)
+ if base.Flag.W > 1 {
+ s := fmt.Sprintf("\nafter typecheck %v", fn)
+ ir.Dump(s, fn)
+ }
+ }
+ }
+
+ // Phase 4: Check external declarations.
+ // TODO(mdempsky): This should be handled when type checking their
+ // corresponding ODCL nodes.
+ base.Timer.Start("fe", "typecheck", "externdcls")
+ for i, n := range typecheck.Target.Externs {
+ if n.Op() == ir.ONAME {
+ typecheck.Target.Externs[i] = typecheck.Expr(typecheck.Target.Externs[i])
+ }
+ }
+
+ // Phase 5: With all user code type-checked, it's now safe to verify map keys.
+ // With all user code typechecked, it's now safe to verify unused dot imports.
+ typecheck.CheckMapKeys()
+ CheckDotImports()
+ base.ExitIfErrors()
+}
+
+func (p *noder) errorAt(pos syntax.Pos, format string, args ...interface{}) {
+ base.ErrorfAt(p.makeXPos(pos), format, args...)
+}
+
+// trimFilename returns the "trimmed" filename of b, which is the
+// absolute filename after applying -trimpath processing. This
+// filename form is suitable for use in object files and export data.
+//
+// If b's filename has already been trimmed (i.e., because it was read
+// in from an imported package's export data), then the filename is
+// returned unchanged.
+func trimFilename(b *syntax.PosBase) string {
+ filename := b.Filename()
+ if !b.Trimmed() {
+ dir := ""
+ if b.IsFileBase() {
+ dir = base.Ctxt.Pathname
+ }
+ filename = objabi.AbsFile(dir, filename, base.Flag.TrimPath)
+ }
+ return filename
+}
+
+// noder transforms package syntax's AST into a Node tree.
+type noder struct {
+ posMap
+
+ file *syntax.File
+ linknames []linkname
+ pragcgobuf [][]string
+ err chan syntax.Error
+ importedUnsafe bool
+ importedEmbed bool
+ trackScopes bool
+
+ funcState *funcState
+}
+
+// funcState tracks all per-function state to make handling nested
+// functions easier.
+type funcState struct {
+ // scopeVars is a stack tracking the number of variables declared in
+ // the current function at the moment each open scope was opened.
+ scopeVars []int
+ marker dwarfgen.ScopeMarker
+
+ lastCloseScopePos syntax.Pos
+}
+
+func (p *noder) funcBody(fn *ir.Func, block *syntax.BlockStmt) {
+ outerFuncState := p.funcState
+ p.funcState = new(funcState)
+ typecheck.StartFuncBody(fn)
+
+ if block != nil {
+ body := p.stmts(block.List)
+ if body == nil {
+ body = []ir.Node{ir.NewBlockStmt(base.Pos, nil)}
+ }
+ fn.Body = body
+
+ base.Pos = p.makeXPos(block.Rbrace)
+ fn.Endlineno = base.Pos
+ }
+
+ typecheck.FinishFuncBody()
+ p.funcState.marker.WriteTo(fn)
+ p.funcState = outerFuncState
+}
+
+func (p *noder) openScope(pos syntax.Pos) {
+ fs := p.funcState
+ types.Markdcl()
+
+ if p.trackScopes {
+ fs.scopeVars = append(fs.scopeVars, len(ir.CurFunc.Dcl))
+ fs.marker.Push(p.makeXPos(pos))
+ }
+}
+
+func (p *noder) closeScope(pos syntax.Pos) {
+ fs := p.funcState
+ fs.lastCloseScopePos = pos
+ types.Popdcl()
+
+ if p.trackScopes {
+ scopeVars := fs.scopeVars[len(fs.scopeVars)-1]
+ fs.scopeVars = fs.scopeVars[:len(fs.scopeVars)-1]
+ if scopeVars == len(ir.CurFunc.Dcl) {
+ // no variables were declared in this scope, so we can retract it.
+ fs.marker.Unpush()
+ } else {
+ fs.marker.Pop(p.makeXPos(pos))
+ }
+ }
+}
+
+// closeAnotherScope is like closeScope, but it reuses the same mark
+// position as the last closeScope call. This is useful for "for" and
+// "if" statements, as their implicit blocks always end at the same
+// position as an explicit block.
+func (p *noder) closeAnotherScope() {
+ p.closeScope(p.funcState.lastCloseScopePos)
+}
+
+// linkname records a //go:linkname directive.
+type linkname struct {
+ pos syntax.Pos
+ local string
+ remote string
+}
+
+func (p *noder) node() {
+ p.importedUnsafe = false
+ p.importedEmbed = false
+
+ p.setlineno(p.file.PkgName)
+ mkpackage(p.file.PkgName.Value)
+
+ if pragma, ok := p.file.Pragma.(*pragmas); ok {
+ pragma.Flag &^= ir.GoBuildPragma
+ p.checkUnused(pragma)
+ }
+
+ typecheck.Target.Decls = append(typecheck.Target.Decls, p.decls(p.file.DeclList)...)
+
+ base.Pos = src.NoXPos
+ clearImports()
+}
+
+func (p *noder) processPragmas() {
+ for _, l := range p.linknames {
+ if !p.importedUnsafe {
+ p.errorAt(l.pos, "//go:linkname only allowed in Go files that import \"unsafe\"")
+ continue
+ }
+ n := ir.AsNode(typecheck.Lookup(l.local).Def)
+ if n == nil || n.Op() != ir.ONAME {
+ if types.AllowsGoVersion(types.LocalPkg, 1, 18) {
+ p.errorAt(l.pos, "//go:linkname must refer to declared function or variable")
+ }
+ continue
+ }
+ if n.Sym().Linkname != "" {
+ p.errorAt(l.pos, "duplicate //go:linkname for %s", l.local)
+ continue
+ }
+ n.Sym().Linkname = l.remote
+ }
+ typecheck.Target.CgoPragmas = append(typecheck.Target.CgoPragmas, p.pragcgobuf...)
+}
+
+func (p *noder) decls(decls []syntax.Decl) (l []ir.Node) {
+ var cs constState
+
+ for _, decl := range decls {
+ p.setlineno(decl)
+ switch decl := decl.(type) {
+ case *syntax.ImportDecl:
+ p.importDecl(decl)
+
+ case *syntax.VarDecl:
+ l = append(l, p.varDecl(decl)...)
+
+ case *syntax.ConstDecl:
+ l = append(l, p.constDecl(decl, &cs)...)
+
+ case *syntax.TypeDecl:
+ l = append(l, p.typeDecl(decl))
+
+ case *syntax.FuncDecl:
+ l = append(l, p.funcDecl(decl))
+
+ default:
+ panic("unhandled Decl")
+ }
+ }
+
+ return
+}
+
+func (p *noder) importDecl(imp *syntax.ImportDecl) {
+ if imp.Path == nil || imp.Path.Bad {
+ return // avoid follow-on errors if there was a syntax error
+ }
+
+ if pragma, ok := imp.Pragma.(*pragmas); ok {
+ p.checkUnused(pragma)
+ }
+
+ ipkg := importfile(imp)
+ if ipkg == nil {
+ if base.Errors() == 0 {
+ base.Fatalf("phase error in import")
+ }
+ return
+ }
+
+ if ipkg == types.UnsafePkg {
+ p.importedUnsafe = true
+ }
+ if ipkg.Path == "embed" {
+ p.importedEmbed = true
+ }
+
+ var my *types.Sym
+ if imp.LocalPkgName != nil {
+ my = p.name(imp.LocalPkgName)
+ } else {
+ my = typecheck.Lookup(ipkg.Name)
+ }
+
+ pack := ir.NewPkgName(p.pos(imp), my, ipkg)
+
+ switch my.Name {
+ case ".":
+ importDot(pack)
+ return
+ case "init":
+ base.ErrorfAt(pack.Pos(), "cannot import package as init - init must be a func")
+ return
+ case "_":
+ return
+ }
+ if my.Def != nil {
+ typecheck.Redeclared(pack.Pos(), my, "as imported package name")
+ }
+ my.Def = pack
+ my.Lastlineno = pack.Pos()
+ my.Block = 1 // at top level
+}
+
+func (p *noder) varDecl(decl *syntax.VarDecl) []ir.Node {
+ names := p.declNames(ir.ONAME, decl.NameList)
+ typ := p.typeExprOrNil(decl.Type)
+ exprs := p.exprList(decl.Values)
+
+ if pragma, ok := decl.Pragma.(*pragmas); ok {
+ varEmbed(p.makeXPos, names[0], decl, pragma, p.importedEmbed)
+ p.checkUnused(pragma)
+ }
+
+ var init []ir.Node
+ p.setlineno(decl)
+
+ if len(names) > 1 && len(exprs) == 1 {
+ as2 := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, exprs)
+ for _, v := range names {
+ as2.Lhs.Append(v)
+ typecheck.Declare(v, typecheck.DeclContext)
+ v.Ntype = typ
+ v.Defn = as2
+ if ir.CurFunc != nil {
+ init = append(init, ir.NewDecl(base.Pos, ir.ODCL, v))
+ }
+ }
+
+ return append(init, as2)
+ }
+
+ for i, v := range names {
+ var e ir.Node
+ if i < len(exprs) {
+ e = exprs[i]
+ }
+
+ typecheck.Declare(v, typecheck.DeclContext)
+ v.Ntype = typ
+
+ if ir.CurFunc != nil {
+ init = append(init, ir.NewDecl(base.Pos, ir.ODCL, v))
+ }
+ as := ir.NewAssignStmt(base.Pos, v, e)
+ init = append(init, as)
+ if e != nil || ir.CurFunc == nil {
+ v.Defn = as
+ }
+ }
+
+ if len(exprs) != 0 && len(names) != len(exprs) {
+ base.Errorf("assignment mismatch: %d variables but %d values", len(names), len(exprs))
+ }
+
+ return init
+}
+
+// constState tracks state between constant specifiers within a
+// declaration group. This state is kept separate from noder so nested
+// constant declarations are handled correctly (e.g., issue 15550).
+type constState struct {
+ group *syntax.Group
+ typ ir.Ntype
+ values syntax.Expr
+ iota int64
+}
+
+func (p *noder) constDecl(decl *syntax.ConstDecl, cs *constState) []ir.Node {
+ if decl.Group == nil || decl.Group != cs.group {
+ *cs = constState{
+ group: decl.Group,
+ }
+ }
+
+ if pragma, ok := decl.Pragma.(*pragmas); ok {
+ p.checkUnused(pragma)
+ }
+
+ names := p.declNames(ir.OLITERAL, decl.NameList)
+ typ := p.typeExprOrNil(decl.Type)
+
+ if decl.Values != nil {
+ cs.typ, cs.values = typ, decl.Values
+ } else {
+ if typ != nil {
+ base.Errorf("const declaration cannot have type without expression")
+ }
+ typ = cs.typ
+ }
+ values := p.exprList(cs.values)
+
+ nn := make([]ir.Node, 0, len(names))
+ for i, n := range names {
+ if i >= len(values) {
+ base.Errorf("missing value in const declaration")
+ break
+ }
+
+ v := values[i]
+ if decl.Values == nil {
+ ir.Visit(v, func(v ir.Node) {
+ if ir.HasUniquePos(v) {
+ v.SetPos(n.Pos())
+ }
+ })
+ }
+
+ typecheck.Declare(n, typecheck.DeclContext)
+
+ n.Ntype = typ
+ n.Defn = v
+ n.SetIota(cs.iota)
+
+ nn = append(nn, ir.NewDecl(p.pos(decl), ir.ODCLCONST, n))
+ }
+
+ if len(values) > len(names) {
+ base.Errorf("extra expression in const declaration")
+ }
+
+ cs.iota++
+
+ return nn
+}
+
+func (p *noder) typeDecl(decl *syntax.TypeDecl) ir.Node {
+ n := p.declName(ir.OTYPE, decl.Name)
+ typecheck.Declare(n, typecheck.DeclContext)
+
+ // decl.Type may be nil but in that case we got a syntax error during parsing
+ typ := p.typeExprOrNil(decl.Type)
+
+ n.Ntype = typ
+ n.SetAlias(decl.Alias)
+ if pragma, ok := decl.Pragma.(*pragmas); ok {
+ if !decl.Alias {
+ n.SetPragma(pragma.Flag & typePragmas)
+ pragma.Flag &^= typePragmas
+ }
+ p.checkUnused(pragma)
+ }
+
+ nod := ir.NewDecl(p.pos(decl), ir.ODCLTYPE, n)
+ if n.Alias() && !types.AllowsGoVersion(types.LocalPkg, 1, 9) {
+ base.ErrorfAt(nod.Pos(), "type aliases only supported as of -lang=go1.9")
+ }
+ return nod
+}
+
+func (p *noder) declNames(op ir.Op, names []*syntax.Name) []*ir.Name {
+ nodes := make([]*ir.Name, 0, len(names))
+ for _, name := range names {
+ nodes = append(nodes, p.declName(op, name))
+ }
+ return nodes
+}
+
+func (p *noder) declName(op ir.Op, name *syntax.Name) *ir.Name {
+ return ir.NewDeclNameAt(p.pos(name), op, p.name(name))
+}
+
+func (p *noder) funcDecl(fun *syntax.FuncDecl) ir.Node {
+ name := p.name(fun.Name)
+ t := p.signature(fun.Recv, fun.Type)
+ f := ir.NewFunc(p.pos(fun))
+
+ if fun.Recv == nil {
+ if name.Name == "init" {
+ name = renameinit()
+ if len(t.Params) > 0 || len(t.Results) > 0 {
+ base.ErrorfAt(f.Pos(), "func init must have no arguments and no return values")
+ }
+ typecheck.Target.Inits = append(typecheck.Target.Inits, f)
+ }
+
+ if types.LocalPkg.Name == "main" && name.Name == "main" {
+ if len(t.Params) > 0 || len(t.Results) > 0 {
+ base.ErrorfAt(f.Pos(), "func main must have no arguments and no return values")
+ }
+ }
+ } else {
+ f.Shortname = name
+ name = ir.BlankNode.Sym() // filled in by tcFunc
+ }
+
+ f.Nname = ir.NewNameAt(p.pos(fun.Name), name)
+ f.Nname.Func = f
+ f.Nname.Defn = f
+ f.Nname.Ntype = t
+
+ if pragma, ok := fun.Pragma.(*pragmas); ok {
+ f.Pragma = pragma.Flag & funcPragmas
+ if pragma.Flag&ir.Systemstack != 0 && pragma.Flag&ir.Nosplit != 0 {
+ base.ErrorfAt(f.Pos(), "go:nosplit and go:systemstack cannot be combined")
+ }
+ pragma.Flag &^= funcPragmas
+ p.checkUnused(pragma)
+ }
+
+ if fun.Recv == nil {
+ typecheck.Declare(f.Nname, ir.PFUNC)
+ }
+
+ p.funcBody(f, fun.Body)
+
+ if fun.Body != nil {
+ if f.Pragma&ir.Noescape != 0 {
+ base.ErrorfAt(f.Pos(), "can only use //go:noescape with external func implementations")
+ }
+ } else {
+ if base.Flag.Complete || strings.HasPrefix(ir.FuncName(f), "init.") {
+ // Linknamed functions are allowed to have no body. Hopefully
+ // the linkname target has a body. See issue 23311.
+ isLinknamed := false
+ for _, n := range p.linknames {
+ if ir.FuncName(f) == n.local {
+ isLinknamed = true
+ break
+ }
+ }
+ if !isLinknamed {
+ base.ErrorfAt(f.Pos(), "missing function body")
+ }
+ }
+ }
+
+ return f
+}
+
+func (p *noder) signature(recv *syntax.Field, typ *syntax.FuncType) *ir.FuncType {
+ var rcvr *ir.Field
+ if recv != nil {
+ rcvr = p.param(recv, false, false)
+ }
+ return ir.NewFuncType(p.pos(typ), rcvr,
+ p.params(typ.ParamList, true),
+ p.params(typ.ResultList, false))
+}
+
+func (p *noder) params(params []*syntax.Field, dddOk bool) []*ir.Field {
+ nodes := make([]*ir.Field, 0, len(params))
+ for i, param := range params {
+ p.setlineno(param)
+ nodes = append(nodes, p.param(param, dddOk, i+1 == len(params)))
+ if i > 0 && params[i].Type == params[i-1].Type {
+ nodes[i].Ntype = nodes[i-1].Ntype
+ }
+ }
+ return nodes
+}
+
+func (p *noder) param(param *syntax.Field, dddOk, final bool) *ir.Field {
+ var name *types.Sym
+ if param.Name != nil {
+ name = p.name(param.Name)
+ }
+
+ typ := p.typeExpr(param.Type)
+ n := ir.NewField(p.pos(param), name, typ, nil)
+
+ // rewrite ...T parameter
+ if typ, ok := typ.(*ir.SliceType); ok && typ.DDD {
+ if !dddOk {
+ // We mark these as syntax errors to get automatic elimination
+ // of multiple such errors per line (see ErrorfAt in subr.go).
+ base.Errorf("syntax error: cannot use ... in receiver or result parameter list")
+ } else if !final {
+ if param.Name == nil {
+ base.Errorf("syntax error: cannot use ... with non-final parameter")
+ } else {
+ p.errorAt(param.Name.Pos(), "syntax error: cannot use ... with non-final parameter %s", param.Name.Value)
+ }
+ }
+ typ.DDD = false
+ n.IsDDD = true
+ }
+
+ return n
+}
+
+func (p *noder) exprList(expr syntax.Expr) []ir.Node {
+ switch expr := expr.(type) {
+ case nil:
+ return nil
+ case *syntax.ListExpr:
+ return p.exprs(expr.ElemList)
+ default:
+ return []ir.Node{p.expr(expr)}
+ }
+}
+
+func (p *noder) exprs(exprs []syntax.Expr) []ir.Node {
+ nodes := make([]ir.Node, 0, len(exprs))
+ for _, expr := range exprs {
+ nodes = append(nodes, p.expr(expr))
+ }
+ return nodes
+}
+
+func (p *noder) expr(expr syntax.Expr) ir.Node {
+ p.setlineno(expr)
+ switch expr := expr.(type) {
+ case nil, *syntax.BadExpr:
+ return nil
+ case *syntax.Name:
+ return p.mkname(expr)
+ case *syntax.BasicLit:
+ n := ir.NewBasicLit(p.pos(expr), p.basicLit(expr))
+ if expr.Kind == syntax.RuneLit {
+ n.SetType(types.UntypedRune)
+ }
+ n.SetDiag(expr.Bad || n.Val().Kind() == constant.Unknown) // avoid follow-on errors if there was a syntax error
+ return n
+ case *syntax.CompositeLit:
+ n := ir.NewCompLitExpr(p.pos(expr), ir.OCOMPLIT, p.typeExpr(expr.Type), nil)
+ l := p.exprs(expr.ElemList)
+ for i, e := range l {
+ l[i] = p.wrapname(expr.ElemList[i], e)
+ }
+ n.List = l
+ base.Pos = p.makeXPos(expr.Rbrace)
+ return n
+ case *syntax.KeyValueExpr:
+ // use position of expr.Key rather than of expr (which has position of ':')
+ return ir.NewKeyExpr(p.pos(expr.Key), p.expr(expr.Key), p.wrapname(expr.Value, p.expr(expr.Value)))
+ case *syntax.FuncLit:
+ return p.funcLit(expr)
+ case *syntax.ParenExpr:
+ return ir.NewParenExpr(p.pos(expr), p.expr(expr.X))
+ case *syntax.SelectorExpr:
+ // parser.new_dotname
+ obj := p.expr(expr.X)
+ if obj.Op() == ir.OPACK {
+ pack := obj.(*ir.PkgName)
+ pack.Used = true
+ return importName(pack.Pkg.Lookup(expr.Sel.Value))
+ }
+ n := ir.NewSelectorExpr(base.Pos, ir.OXDOT, obj, p.name(expr.Sel))
+ n.SetPos(p.pos(expr)) // lineno may have been changed by p.expr(expr.X)
+ return n
+ case *syntax.IndexExpr:
+ return ir.NewIndexExpr(p.pos(expr), p.expr(expr.X), p.expr(expr.Index))
+ case *syntax.SliceExpr:
+ op := ir.OSLICE
+ if expr.Full {
+ op = ir.OSLICE3
+ }
+ x := p.expr(expr.X)
+ var index [3]ir.Node
+ for i, n := range &expr.Index {
+ if n != nil {
+ index[i] = p.expr(n)
+ }
+ }
+ return ir.NewSliceExpr(p.pos(expr), op, x, index[0], index[1], index[2])
+ case *syntax.AssertExpr:
+ return ir.NewTypeAssertExpr(p.pos(expr), p.expr(expr.X), p.typeExpr(expr.Type))
+ case *syntax.Operation:
+ if expr.Op == syntax.Add && expr.Y != nil {
+ return p.sum(expr)
+ }
+ x := p.expr(expr.X)
+ if expr.Y == nil {
+ pos, op := p.pos(expr), p.unOp(expr.Op)
+ switch op {
+ case ir.OADDR:
+ return typecheck.NodAddrAt(pos, x)
+ case ir.ODEREF:
+ return ir.NewStarExpr(pos, x)
+ }
+ return ir.NewUnaryExpr(pos, op, x)
+ }
+
+ pos, op, y := p.pos(expr), p.binOp(expr.Op), p.expr(expr.Y)
+ switch op {
+ case ir.OANDAND, ir.OOROR:
+ return ir.NewLogicalExpr(pos, op, x, y)
+ }
+ return ir.NewBinaryExpr(pos, op, x, y)
+ case *syntax.CallExpr:
+ n := ir.NewCallExpr(p.pos(expr), ir.OCALL, p.expr(expr.Fun), p.exprs(expr.ArgList))
+ n.IsDDD = expr.HasDots
+ return n
+
+ case *syntax.ArrayType:
+ var len ir.Node
+ if expr.Len != nil {
+ len = p.expr(expr.Len)
+ }
+ return ir.NewArrayType(p.pos(expr), len, p.typeExpr(expr.Elem))
+ case *syntax.SliceType:
+ return ir.NewSliceType(p.pos(expr), p.typeExpr(expr.Elem))
+ case *syntax.DotsType:
+ t := ir.NewSliceType(p.pos(expr), p.typeExpr(expr.Elem))
+ t.DDD = true
+ return t
+ case *syntax.StructType:
+ return p.structType(expr)
+ case *syntax.InterfaceType:
+ return p.interfaceType(expr)
+ case *syntax.FuncType:
+ return p.signature(nil, expr)
+ case *syntax.MapType:
+ return ir.NewMapType(p.pos(expr),
+ p.typeExpr(expr.Key), p.typeExpr(expr.Value))
+ case *syntax.ChanType:
+ return ir.NewChanType(p.pos(expr),
+ p.typeExpr(expr.Elem), p.chanDir(expr.Dir))
+
+ case *syntax.TypeSwitchGuard:
+ var tag *ir.Ident
+ if expr.Lhs != nil {
+ tag = ir.NewIdent(p.pos(expr.Lhs), p.name(expr.Lhs))
+ if ir.IsBlank(tag) {
+ base.Errorf("invalid variable name %v in type switch", tag)
+ }
+ }
+ return ir.NewTypeSwitchGuard(p.pos(expr), tag, p.expr(expr.X))
+ }
+ panic("unhandled Expr")
+}
+
+// sum efficiently handles very large summation expressions (such as
+// in issue #16394). In particular, it avoids left recursion and
+// collapses string literals.
+func (p *noder) sum(x syntax.Expr) ir.Node {
+ // While we need to handle long sums with asymptotic
+ // efficiency, the vast majority of sums are very small: ~95%
+ // have only 2 or 3 operands, and ~99% of string literals are
+ // never concatenated.
+
+ adds := make([]*syntax.Operation, 0, 2)
+ for {
+ add, ok := x.(*syntax.Operation)
+ if !ok || add.Op != syntax.Add || add.Y == nil {
+ break
+ }
+ adds = append(adds, add)
+ x = add.X
+ }
+
+ // nstr is the current rightmost string literal in the
+ // summation (if any), and chunks holds its accumulated
+ // substrings.
+ //
+ // Consider the expression x + "a" + "b" + "c" + y. When we
+ // reach the string literal "a", we assign nstr to point to
+ // its corresponding Node and initialize chunks to {"a"}.
+ // Visiting the subsequent string literals "b" and "c", we
+ // simply append their values to chunks. Finally, when we
+ // reach the non-constant operand y, we'll join chunks to form
+ // "abc" and reassign the "a" string literal's value.
+ //
+ // N.B., we need to be careful about named string constants
+ // (indicated by Sym != nil) because 1) we can't modify their
+ // value, as doing so would affect other uses of the string
+ // constant, and 2) they may have types, which we need to
+ // handle correctly. For now, we avoid these problems by
+ // treating named string constants the same as non-constant
+ // operands.
+ var nstr ir.Node
+ chunks := make([]string, 0, 1)
+
+ n := p.expr(x)
+ if ir.IsConst(n, constant.String) && n.Sym() == nil {
+ nstr = n
+ chunks = append(chunks, ir.StringVal(nstr))
+ }
+
+ for i := len(adds) - 1; i >= 0; i-- {
+ add := adds[i]
+
+ r := p.expr(add.Y)
+ if ir.IsConst(r, constant.String) && r.Sym() == nil {
+ if nstr != nil {
+ // Collapse r into nstr instead of adding to n.
+ chunks = append(chunks, ir.StringVal(r))
+ continue
+ }
+
+ nstr = r
+ chunks = append(chunks, ir.StringVal(nstr))
+ } else {
+ if len(chunks) > 1 {
+ nstr.SetVal(constant.MakeString(strings.Join(chunks, "")))
+ }
+ nstr = nil
+ chunks = chunks[:0]
+ }
+ n = ir.NewBinaryExpr(p.pos(add), ir.OADD, n, r)
+ }
+ if len(chunks) > 1 {
+ nstr.SetVal(constant.MakeString(strings.Join(chunks, "")))
+ }
+
+ return n
+}
+
+func (p *noder) typeExpr(typ syntax.Expr) ir.Ntype {
+ // TODO(mdempsky): Be stricter? typecheck should handle errors anyway.
+ n := p.expr(typ)
+ if n == nil {
+ return nil
+ }
+ return n.(ir.Ntype)
+}
+
+func (p *noder) typeExprOrNil(typ syntax.Expr) ir.Ntype {
+ if typ != nil {
+ return p.typeExpr(typ)
+ }
+ return nil
+}
+
+func (p *noder) chanDir(dir syntax.ChanDir) types.ChanDir {
+ switch dir {
+ case 0:
+ return types.Cboth
+ case syntax.SendOnly:
+ return types.Csend
+ case syntax.RecvOnly:
+ return types.Crecv
+ }
+ panic("unhandled ChanDir")
+}
+
+func (p *noder) structType(expr *syntax.StructType) ir.Node {
+ l := make([]*ir.Field, 0, len(expr.FieldList))
+ for i, field := range expr.FieldList {
+ p.setlineno(field)
+ var n *ir.Field
+ if field.Name == nil {
+ n = p.embedded(field.Type)
+ } else {
+ n = ir.NewField(p.pos(field), p.name(field.Name), p.typeExpr(field.Type), nil)
+ }
+ if i > 0 && expr.FieldList[i].Type == expr.FieldList[i-1].Type {
+ n.Ntype = l[i-1].Ntype
+ }
+ if i < len(expr.TagList) && expr.TagList[i] != nil {
+ n.Note = constant.StringVal(p.basicLit(expr.TagList[i]))
+ }
+ l = append(l, n)
+ }
+
+ p.setlineno(expr)
+ return ir.NewStructType(p.pos(expr), l)
+}
+
+func (p *noder) interfaceType(expr *syntax.InterfaceType) ir.Node {
+ l := make([]*ir.Field, 0, len(expr.MethodList))
+ for _, method := range expr.MethodList {
+ p.setlineno(method)
+ var n *ir.Field
+ if method.Name == nil {
+ n = ir.NewField(p.pos(method), nil, importName(p.packname(method.Type)).(ir.Ntype), nil)
+ } else {
+ mname := p.name(method.Name)
+ if mname.IsBlank() {
+ base.Errorf("methods must have a unique non-blank name")
+ continue
+ }
+ sig := p.typeExpr(method.Type).(*ir.FuncType)
+ sig.Recv = fakeRecv()
+ n = ir.NewField(p.pos(method), mname, sig, nil)
+ }
+ l = append(l, n)
+ }
+
+ return ir.NewInterfaceType(p.pos(expr), l)
+}
+
+func (p *noder) packname(expr syntax.Expr) *types.Sym {
+ switch expr := expr.(type) {
+ case *syntax.Name:
+ name := p.name(expr)
+ if n := oldname(name); n.Name() != nil && n.Name().PkgName != nil {
+ n.Name().PkgName.Used = true
+ }
+ return name
+ case *syntax.SelectorExpr:
+ name := p.name(expr.X.(*syntax.Name))
+ def := ir.AsNode(name.Def)
+ if def == nil {
+ base.Errorf("undefined: %v", name)
+ return name
+ }
+ var pkg *types.Pkg
+ if def.Op() != ir.OPACK {
+ base.Errorf("%v is not a package", name)
+ pkg = types.LocalPkg
+ } else {
+ def := def.(*ir.PkgName)
+ def.Used = true
+ pkg = def.Pkg
+ }
+ return pkg.Lookup(expr.Sel.Value)
+ }
+ panic(fmt.Sprintf("unexpected packname: %#v", expr))
+}
+
+func (p *noder) embedded(typ syntax.Expr) *ir.Field {
+ pos := p.pos(syntax.StartPos(typ))
+
+ op, isStar := typ.(*syntax.Operation)
+ if isStar {
+ if op.Op != syntax.Mul || op.Y != nil {
+ panic("unexpected Operation")
+ }
+ typ = op.X
+ }
+
+ sym := p.packname(typ)
+ n := ir.NewField(pos, typecheck.Lookup(sym.Name), importName(sym).(ir.Ntype), nil)
+ n.Embedded = true
+
+ if isStar {
+ n.Ntype = ir.NewStarExpr(pos, n.Ntype)
+ }
+ return n
+}
+
+func (p *noder) stmts(stmts []syntax.Stmt) []ir.Node {
+ return p.stmtsFall(stmts, false)
+}
+
+func (p *noder) stmtsFall(stmts []syntax.Stmt, fallOK bool) []ir.Node {
+ var nodes []ir.Node
+ for i, stmt := range stmts {
+ s := p.stmtFall(stmt, fallOK && i+1 == len(stmts))
+ if s == nil {
+ } else if s.Op() == ir.OBLOCK && len(s.(*ir.BlockStmt).List) > 0 {
+ // Inline non-empty block.
+ // Empty blocks must be preserved for CheckReturn.
+ nodes = append(nodes, s.(*ir.BlockStmt).List...)
+ } else {
+ nodes = append(nodes, s)
+ }
+ }
+ return nodes
+}
+
+func (p *noder) stmt(stmt syntax.Stmt) ir.Node {
+ return p.stmtFall(stmt, false)
+}
+
+func (p *noder) stmtFall(stmt syntax.Stmt, fallOK bool) ir.Node {
+ p.setlineno(stmt)
+ switch stmt := stmt.(type) {
+ case nil, *syntax.EmptyStmt:
+ return nil
+ case *syntax.LabeledStmt:
+ return p.labeledStmt(stmt, fallOK)
+ case *syntax.BlockStmt:
+ l := p.blockStmt(stmt)
+ if len(l) == 0 {
+ // TODO(mdempsky): Line number?
+ return ir.NewBlockStmt(base.Pos, nil)
+ }
+ return ir.NewBlockStmt(src.NoXPos, l)
+ case *syntax.ExprStmt:
+ return p.wrapname(stmt, p.expr(stmt.X))
+ case *syntax.SendStmt:
+ return ir.NewSendStmt(p.pos(stmt), p.expr(stmt.Chan), p.expr(stmt.Value))
+ case *syntax.DeclStmt:
+ return ir.NewBlockStmt(src.NoXPos, p.decls(stmt.DeclList))
+ case *syntax.AssignStmt:
+ if stmt.Rhs == nil {
+ pos := p.pos(stmt)
+ n := ir.NewAssignOpStmt(pos, p.binOp(stmt.Op), p.expr(stmt.Lhs), ir.NewBasicLit(pos, one))
+ n.IncDec = true
+ return n
+ }
+
+ if stmt.Op != 0 && stmt.Op != syntax.Def {
+ n := ir.NewAssignOpStmt(p.pos(stmt), p.binOp(stmt.Op), p.expr(stmt.Lhs), p.expr(stmt.Rhs))
+ return n
+ }
+
+ rhs := p.exprList(stmt.Rhs)
+ if list, ok := stmt.Lhs.(*syntax.ListExpr); ok && len(list.ElemList) != 1 || len(rhs) != 1 {
+ n := ir.NewAssignListStmt(p.pos(stmt), ir.OAS2, nil, nil)
+ n.Def = stmt.Op == syntax.Def
+ n.Lhs = p.assignList(stmt.Lhs, n, n.Def)
+ n.Rhs = rhs
+ return n
+ }
+
+ n := ir.NewAssignStmt(p.pos(stmt), nil, nil)
+ n.Def = stmt.Op == syntax.Def
+ n.X = p.assignList(stmt.Lhs, n, n.Def)[0]
+ n.Y = rhs[0]
+ return n
+
+ case *syntax.BranchStmt:
+ var op ir.Op
+ switch stmt.Tok {
+ case syntax.Break:
+ op = ir.OBREAK
+ case syntax.Continue:
+ op = ir.OCONTINUE
+ case syntax.Fallthrough:
+ if !fallOK {
+ base.Errorf("fallthrough statement out of place")
+ }
+ op = ir.OFALL
+ case syntax.Goto:
+ op = ir.OGOTO
+ default:
+ panic("unhandled BranchStmt")
+ }
+ var sym *types.Sym
+ if stmt.Label != nil {
+ sym = p.name(stmt.Label)
+ }
+ return ir.NewBranchStmt(p.pos(stmt), op, sym)
+ case *syntax.CallStmt:
+ var op ir.Op
+ switch stmt.Tok {
+ case syntax.Defer:
+ op = ir.ODEFER
+ case syntax.Go:
+ op = ir.OGO
+ default:
+ panic("unhandled CallStmt")
+ }
+ return ir.NewGoDeferStmt(p.pos(stmt), op, p.expr(stmt.Call))
+ case *syntax.ReturnStmt:
+ n := ir.NewReturnStmt(p.pos(stmt), p.exprList(stmt.Results))
+ if len(n.Results) == 0 && ir.CurFunc != nil {
+ for _, ln := range ir.CurFunc.Dcl {
+ if ln.Class == ir.PPARAM {
+ continue
+ }
+ if ln.Class != ir.PPARAMOUT {
+ break
+ }
+ if ln.Sym().Def != ln {
+ base.Errorf("%s is shadowed during return", ln.Sym().Name)
+ }
+ }
+ }
+ return n
+ case *syntax.IfStmt:
+ return p.ifStmt(stmt)
+ case *syntax.ForStmt:
+ return p.forStmt(stmt)
+ case *syntax.SwitchStmt:
+ return p.switchStmt(stmt)
+ case *syntax.SelectStmt:
+ return p.selectStmt(stmt)
+ }
+ panic("unhandled Stmt")
+}
+
+func (p *noder) assignList(expr syntax.Expr, defn ir.InitNode, colas bool) []ir.Node {
+ if !colas {
+ return p.exprList(expr)
+ }
+
+ var exprs []syntax.Expr
+ if list, ok := expr.(*syntax.ListExpr); ok {
+ exprs = list.ElemList
+ } else {
+ exprs = []syntax.Expr{expr}
+ }
+
+ res := make([]ir.Node, len(exprs))
+ seen := make(map[*types.Sym]bool, len(exprs))
+
+ newOrErr := false
+ for i, expr := range exprs {
+ p.setlineno(expr)
+ res[i] = ir.BlankNode
+
+ name, ok := expr.(*syntax.Name)
+ if !ok {
+ p.errorAt(expr.Pos(), "non-name %v on left side of :=", p.expr(expr))
+ newOrErr = true
+ continue
+ }
+
+ sym := p.name(name)
+ if sym.IsBlank() {
+ continue
+ }
+
+ if seen[sym] {
+ p.errorAt(expr.Pos(), "%v repeated on left side of :=", sym)
+ newOrErr = true
+ continue
+ }
+ seen[sym] = true
+
+ if sym.Block == types.Block {
+ res[i] = oldname(sym)
+ continue
+ }
+
+ newOrErr = true
+ n := typecheck.NewName(sym)
+ typecheck.Declare(n, typecheck.DeclContext)
+ n.Defn = defn
+ defn.PtrInit().Append(ir.NewDecl(base.Pos, ir.ODCL, n))
+ res[i] = n
+ }
+
+ if !newOrErr {
+ base.ErrorfAt(defn.Pos(), "no new variables on left side of :=")
+ }
+ return res
+}
+
+func (p *noder) blockStmt(stmt *syntax.BlockStmt) []ir.Node {
+ p.openScope(stmt.Pos())
+ nodes := p.stmts(stmt.List)
+ p.closeScope(stmt.Rbrace)
+ return nodes
+}
+
+func (p *noder) ifStmt(stmt *syntax.IfStmt) ir.Node {
+ p.openScope(stmt.Pos())
+ init := p.stmt(stmt.Init)
+ n := ir.NewIfStmt(p.pos(stmt), p.expr(stmt.Cond), p.blockStmt(stmt.Then), nil)
+ if init != nil {
+ n.SetInit([]ir.Node{init})
+ }
+ if stmt.Else != nil {
+ e := p.stmt(stmt.Else)
+ if e.Op() == ir.OBLOCK {
+ e := e.(*ir.BlockStmt)
+ n.Else = e.List
+ } else {
+ n.Else = []ir.Node{e}
+ }
+ }
+ p.closeAnotherScope()
+ return n
+}
+
+func (p *noder) forStmt(stmt *syntax.ForStmt) ir.Node {
+ p.openScope(stmt.Pos())
+ if r, ok := stmt.Init.(*syntax.RangeClause); ok {
+ if stmt.Cond != nil || stmt.Post != nil {
+ panic("unexpected RangeClause")
+ }
+
+ n := ir.NewRangeStmt(p.pos(r), nil, nil, p.expr(r.X), nil)
+ if r.Lhs != nil {
+ n.Def = r.Def
+ lhs := p.assignList(r.Lhs, n, n.Def)
+ n.Key = lhs[0]
+ if len(lhs) > 1 {
+ n.Value = lhs[1]
+ }
+ }
+ n.Body = p.blockStmt(stmt.Body)
+ p.closeAnotherScope()
+ return n
+ }
+
+ n := ir.NewForStmt(p.pos(stmt), p.stmt(stmt.Init), p.expr(stmt.Cond), p.stmt(stmt.Post), p.blockStmt(stmt.Body))
+ p.closeAnotherScope()
+ return n
+}
+
+func (p *noder) switchStmt(stmt *syntax.SwitchStmt) ir.Node {
+ p.openScope(stmt.Pos())
+
+ init := p.stmt(stmt.Init)
+ n := ir.NewSwitchStmt(p.pos(stmt), p.expr(stmt.Tag), nil)
+ if init != nil {
+ n.SetInit([]ir.Node{init})
+ }
+
+ var tswitch *ir.TypeSwitchGuard
+ if l := n.Tag; l != nil && l.Op() == ir.OTYPESW {
+ tswitch = l.(*ir.TypeSwitchGuard)
+ }
+ n.Cases = p.caseClauses(stmt.Body, tswitch, stmt.Rbrace)
+
+ p.closeScope(stmt.Rbrace)
+ return n
+}
+
+func (p *noder) caseClauses(clauses []*syntax.CaseClause, tswitch *ir.TypeSwitchGuard, rbrace syntax.Pos) []*ir.CaseClause {
+ nodes := make([]*ir.CaseClause, 0, len(clauses))
+ for i, clause := range clauses {
+ p.setlineno(clause)
+ if i > 0 {
+ p.closeScope(clause.Pos())
+ }
+ p.openScope(clause.Pos())
+
+ n := ir.NewCaseStmt(p.pos(clause), p.exprList(clause.Cases), nil)
+ if tswitch != nil && tswitch.Tag != nil {
+ nn := typecheck.NewName(tswitch.Tag.Sym())
+ typecheck.Declare(nn, typecheck.DeclContext)
+ n.Var = nn
+ // keep track of the instances for reporting unused
+ nn.Defn = tswitch
+ }
+
+ // Trim trailing empty statements. We omit them from
+ // the Node AST anyway, and it's easier to identify
+ // out-of-place fallthrough statements without them.
+ body := clause.Body
+ for len(body) > 0 {
+ if _, ok := body[len(body)-1].(*syntax.EmptyStmt); !ok {
+ break
+ }
+ body = body[:len(body)-1]
+ }
+
+ n.Body = p.stmtsFall(body, true)
+ if l := len(n.Body); l > 0 && n.Body[l-1].Op() == ir.OFALL {
+ if tswitch != nil {
+ base.Errorf("cannot fallthrough in type switch")
+ }
+ if i+1 == len(clauses) {
+ base.Errorf("cannot fallthrough final case in switch")
+ }
+ }
+
+ nodes = append(nodes, n)
+ }
+ if len(clauses) > 0 {
+ p.closeScope(rbrace)
+ }
+ return nodes
+}
+
+func (p *noder) selectStmt(stmt *syntax.SelectStmt) ir.Node {
+ return ir.NewSelectStmt(p.pos(stmt), p.commClauses(stmt.Body, stmt.Rbrace))
+}
+
+func (p *noder) commClauses(clauses []*syntax.CommClause, rbrace syntax.Pos) []*ir.CommClause {
+ nodes := make([]*ir.CommClause, len(clauses))
+ for i, clause := range clauses {
+ p.setlineno(clause)
+ if i > 0 {
+ p.closeScope(clause.Pos())
+ }
+ p.openScope(clause.Pos())
+
+ nodes[i] = ir.NewCommStmt(p.pos(clause), p.stmt(clause.Comm), p.stmts(clause.Body))
+ }
+ if len(clauses) > 0 {
+ p.closeScope(rbrace)
+ }
+ return nodes
+}
+
+func (p *noder) labeledStmt(label *syntax.LabeledStmt, fallOK bool) ir.Node {
+ sym := p.name(label.Label)
+ lhs := ir.NewLabelStmt(p.pos(label), sym)
+
+ var ls ir.Node
+ if label.Stmt != nil { // TODO(mdempsky): Should always be present.
+ ls = p.stmtFall(label.Stmt, fallOK)
+ // Attach label directly to control statement too.
+ if ls != nil {
+ switch ls.Op() {
+ case ir.OFOR:
+ ls := ls.(*ir.ForStmt)
+ ls.Label = sym
+ case ir.ORANGE:
+ ls := ls.(*ir.RangeStmt)
+ ls.Label = sym
+ case ir.OSWITCH:
+ ls := ls.(*ir.SwitchStmt)
+ ls.Label = sym
+ case ir.OSELECT:
+ ls := ls.(*ir.SelectStmt)
+ ls.Label = sym
+ }
+ }
+ }
+
+ l := []ir.Node{lhs}
+ if ls != nil {
+ if ls.Op() == ir.OBLOCK {
+ ls := ls.(*ir.BlockStmt)
+ l = append(l, ls.List...)
+ } else {
+ l = append(l, ls)
+ }
+ }
+ return ir.NewBlockStmt(src.NoXPos, l)
+}
+
+var unOps = [...]ir.Op{
+ syntax.Recv: ir.ORECV,
+ syntax.Mul: ir.ODEREF,
+ syntax.And: ir.OADDR,
+
+ syntax.Not: ir.ONOT,
+ syntax.Xor: ir.OBITNOT,
+ syntax.Add: ir.OPLUS,
+ syntax.Sub: ir.ONEG,
+}
+
+func (p *noder) unOp(op syntax.Operator) ir.Op {
+ if uint64(op) >= uint64(len(unOps)) || unOps[op] == 0 {
+ panic("invalid Operator")
+ }
+ return unOps[op]
+}
+
+var binOps = [...]ir.Op{
+ syntax.OrOr: ir.OOROR,
+ syntax.AndAnd: ir.OANDAND,
+
+ syntax.Eql: ir.OEQ,
+ syntax.Neq: ir.ONE,
+ syntax.Lss: ir.OLT,
+ syntax.Leq: ir.OLE,
+ syntax.Gtr: ir.OGT,
+ syntax.Geq: ir.OGE,
+
+ syntax.Add: ir.OADD,
+ syntax.Sub: ir.OSUB,
+ syntax.Or: ir.OOR,
+ syntax.Xor: ir.OXOR,
+
+ syntax.Mul: ir.OMUL,
+ syntax.Div: ir.ODIV,
+ syntax.Rem: ir.OMOD,
+ syntax.And: ir.OAND,
+ syntax.AndNot: ir.OANDNOT,
+ syntax.Shl: ir.OLSH,
+ syntax.Shr: ir.ORSH,
+}
+
+func (p *noder) binOp(op syntax.Operator) ir.Op {
+ if uint64(op) >= uint64(len(binOps)) || binOps[op] == 0 {
+ panic("invalid Operator")
+ }
+ return binOps[op]
+}
+
+// checkLangCompat reports an error if the representation of a numeric
+// literal is not compatible with the current language version.
+func checkLangCompat(lit *syntax.BasicLit) {
+ s := lit.Value
+ if len(s) <= 2 || types.AllowsGoVersion(types.LocalPkg, 1, 13) {
+ return
+ }
+ // len(s) > 2
+ if strings.Contains(s, "_") {
+ base.ErrorfVers("go1.13", "underscores in numeric literals")
+ return
+ }
+ if s[0] != '0' {
+ return
+ }
+ radix := s[1]
+ if radix == 'b' || radix == 'B' {
+ base.ErrorfVers("go1.13", "binary literals")
+ return
+ }
+ if radix == 'o' || radix == 'O' {
+ base.ErrorfVers("go1.13", "0o/0O-style octal literals")
+ return
+ }
+ if lit.Kind != syntax.IntLit && (radix == 'x' || radix == 'X') {
+ base.ErrorfVers("go1.13", "hexadecimal floating-point literals")
+ }
+}
+
+func (p *noder) basicLit(lit *syntax.BasicLit) constant.Value {
+ // We don't use the errors of the conversion routines to determine
+ // if a literal string is valid because the conversion routines may
+ // accept a wider syntax than the language permits. Rely on lit.Bad
+ // instead.
+ if lit.Bad {
+ return constant.MakeUnknown()
+ }
+
+ switch lit.Kind {
+ case syntax.IntLit, syntax.FloatLit, syntax.ImagLit:
+ checkLangCompat(lit)
+ // The max. mantissa precision for untyped numeric values
+ // is 512 bits, or 4048 bits for each of the two integer
+ // parts of a fraction for floating-point numbers that are
+ // represented accurately in the go/constant package.
+ // Constant literals that are longer than this many bits
+ // are not meaningful; and excessively long constants may
+ // consume a lot of space and time for a useless conversion.
+ // Cap constant length with a generous upper limit that also
+ // allows for separators between all digits.
+ const limit = 10000
+ if len(lit.Value) > limit {
+ p.errorAt(lit.Pos(), "excessively long constant: %s... (%d chars)", lit.Value[:10], len(lit.Value))
+ return constant.MakeUnknown()
+ }
+ }
+
+ v := constant.MakeFromLiteral(lit.Value, tokenForLitKind[lit.Kind], 0)
+ if v.Kind() == constant.Unknown {
+ // TODO(mdempsky): Better error message?
+ p.errorAt(lit.Pos(), "malformed constant: %s", lit.Value)
+ }
+
+ return v
+}
+
+var tokenForLitKind = [...]token.Token{
+ syntax.IntLit: token.INT,
+ syntax.RuneLit: token.CHAR,
+ syntax.FloatLit: token.FLOAT,
+ syntax.ImagLit: token.IMAG,
+ syntax.StringLit: token.STRING,
+}
+
+func (p *noder) name(name *syntax.Name) *types.Sym {
+ return typecheck.Lookup(name.Value)
+}
+
+func (p *noder) mkname(name *syntax.Name) ir.Node {
+ // TODO(mdempsky): Set line number?
+ return mkname(p.name(name))
+}
+
+func wrapname(pos src.XPos, x ir.Node) ir.Node {
+ // These nodes do not carry line numbers.
+ // Introduce a wrapper node to give them the correct line.
+ switch x.Op() {
+ case ir.OTYPE, ir.OLITERAL:
+ if x.Sym() == nil {
+ break
+ }
+ fallthrough
+ case ir.ONAME, ir.ONONAME, ir.OPACK:
+ p := ir.NewParenExpr(pos, x)
+ p.SetImplicit(true)
+ return p
+ }
+ return x
+}
+
+func (p *noder) wrapname(n syntax.Node, x ir.Node) ir.Node {
+ return wrapname(p.pos(n), x)
+}
+
+func (p *noder) setlineno(n syntax.Node) {
+ if n != nil {
+ base.Pos = p.pos(n)
+ }
+}
+
+// error is called concurrently if files are parsed concurrently.
+func (p *noder) error(err error) {
+ p.err <- err.(syntax.Error)
+}
+
+// pragmas that are allowed in the std lib, but don't have
+// a syntax.Pragma value (see lex.go) associated with them.
+var allowedStdPragmas = map[string]bool{
+ "go:cgo_export_static": true,
+ "go:cgo_export_dynamic": true,
+ "go:cgo_import_static": true,
+ "go:cgo_import_dynamic": true,
+ "go:cgo_ldflag": true,
+ "go:cgo_dynamic_linker": true,
+ "go:embed": true,
+ "go:generate": true,
+}
+
+// *pragmas is the value stored in a syntax.pragmas during parsing.
+type pragmas struct {
+ Flag ir.PragmaFlag // collected bits
+ Pos []pragmaPos // position of each individual flag
+ Embeds []pragmaEmbed
+}
+
+type pragmaPos struct {
+ Flag ir.PragmaFlag
+ Pos syntax.Pos
+}
+
+type pragmaEmbed struct {
+ Pos syntax.Pos
+ Patterns []string
+}
+
+func (p *noder) checkUnused(pragma *pragmas) {
+ for _, pos := range pragma.Pos {
+ if pos.Flag&pragma.Flag != 0 {
+ p.errorAt(pos.Pos, "misplaced compiler directive")
+ }
+ }
+ if len(pragma.Embeds) > 0 {
+ for _, e := range pragma.Embeds {
+ p.errorAt(e.Pos, "misplaced go:embed directive")
+ }
+ }
+}
+
+func (p *noder) checkUnusedDuringParse(pragma *pragmas) {
+ for _, pos := range pragma.Pos {
+ if pos.Flag&pragma.Flag != 0 {
+ p.error(syntax.Error{Pos: pos.Pos, Msg: "misplaced compiler directive"})
+ }
+ }
+ if len(pragma.Embeds) > 0 {
+ for _, e := range pragma.Embeds {
+ p.error(syntax.Error{Pos: e.Pos, Msg: "misplaced go:embed directive"})
+ }
+ }
+}
+
+// pragma is called concurrently if files are parsed concurrently.
+func (p *noder) pragma(pos syntax.Pos, blankLine bool, text string, old syntax.Pragma) syntax.Pragma {
+ pragma, _ := old.(*pragmas)
+ if pragma == nil {
+ pragma = new(pragmas)
+ }
+
+ if text == "" {
+ // unused pragma; only called with old != nil.
+ p.checkUnusedDuringParse(pragma)
+ return nil
+ }
+
+ if strings.HasPrefix(text, "line ") {
+ // line directives are handled by syntax package
+ panic("unreachable")
+ }
+
+ if !blankLine {
+ // directive must be on line by itself
+ p.error(syntax.Error{Pos: pos, Msg: "misplaced compiler directive"})
+ return pragma
+ }
+
+ switch {
+ case strings.HasPrefix(text, "go:linkname "):
+ f := strings.Fields(text)
+ if !(2 <= len(f) && len(f) <= 3) {
+ p.error(syntax.Error{Pos: pos, Msg: "usage: //go:linkname localname [linkname]"})
+ break
+ }
+ // The second argument is optional. If omitted, we use
+ // the default object symbol name for this and
+ // linkname only serves to mark this symbol as
+ // something that may be referenced via the object
+ // symbol name from another package.
+ var target string
+ if len(f) == 3 {
+ target = f[2]
+ } else if base.Ctxt.Pkgpath != "" {
+ // Use the default object symbol name if the
+ // user didn't provide one.
+ target = objabi.PathToPrefix(base.Ctxt.Pkgpath) + "." + f[1]
+ } else {
+ p.error(syntax.Error{Pos: pos, Msg: "//go:linkname requires linkname argument or -p compiler flag"})
+ break
+ }
+ p.linknames = append(p.linknames, linkname{pos, f[1], target})
+
+ case text == "go:embed", strings.HasPrefix(text, "go:embed "):
+ args, err := parseGoEmbed(text[len("go:embed"):])
+ if err != nil {
+ p.error(syntax.Error{Pos: pos, Msg: err.Error()})
+ }
+ if len(args) == 0 {
+ p.error(syntax.Error{Pos: pos, Msg: "usage: //go:embed pattern..."})
+ break
+ }
+ pragma.Embeds = append(pragma.Embeds, pragmaEmbed{pos, args})
+
+ case strings.HasPrefix(text, "go:cgo_import_dynamic "):
+ // This is permitted for general use because Solaris
+ // code relies on it in golang.org/x/sys/unix and others.
+ fields := pragmaFields(text)
+ if len(fields) >= 4 {
+ lib := strings.Trim(fields[3], `"`)
+ if lib != "" && !safeArg(lib) && !isCgoGeneratedFile(pos) {
+ p.error(syntax.Error{Pos: pos, Msg: fmt.Sprintf("invalid library name %q in cgo_import_dynamic directive", lib)})
+ }
+ p.pragcgo(pos, text)
+ pragma.Flag |= pragmaFlag("go:cgo_import_dynamic")
+ break
+ }
+ fallthrough
+ case strings.HasPrefix(text, "go:cgo_"):
+ // For security, we disallow //go:cgo_* directives other
+ // than cgo_import_dynamic outside cgo-generated files.
+ // Exception: they are allowed in the standard library, for runtime and syscall.
+ if !isCgoGeneratedFile(pos) && !base.Flag.Std {
+ p.error(syntax.Error{Pos: pos, Msg: fmt.Sprintf("//%s only allowed in cgo-generated code", text)})
+ }
+ p.pragcgo(pos, text)
+ fallthrough // because of //go:cgo_unsafe_args
+ default:
+ verb := text
+ if i := strings.Index(text, " "); i >= 0 {
+ verb = verb[:i]
+ }
+ flag := pragmaFlag(verb)
+ const runtimePragmas = ir.Systemstack | ir.Nowritebarrier | ir.Nowritebarrierrec | ir.Yeswritebarrierrec
+ if !base.Flag.CompilingRuntime && flag&runtimePragmas != 0 {
+ p.error(syntax.Error{Pos: pos, Msg: fmt.Sprintf("//%s only allowed in runtime", verb)})
+ }
+ if flag == 0 && !allowedStdPragmas[verb] && base.Flag.Std {
+ p.error(syntax.Error{Pos: pos, Msg: fmt.Sprintf("//%s is not allowed in the standard library", verb)})
+ }
+ pragma.Flag |= flag
+ pragma.Pos = append(pragma.Pos, pragmaPos{flag, pos})
+ }
+
+ return pragma
+}
+
+// isCgoGeneratedFile reports whether pos is in a file
+// generated by cgo, which is to say a file with name
+// beginning with "_cgo_". Such files are allowed to
+// contain cgo directives, and for security reasons
+// (primarily misuse of linker flags), other files are not.
+// See golang.org/issue/23672.
+func isCgoGeneratedFile(pos syntax.Pos) bool {
+ return strings.HasPrefix(filepath.Base(trimFilename(pos.Base())), "_cgo_")
+}
+
+// safeArg reports whether arg is a "safe" command-line argument,
+// meaning that when it appears in a command-line, it probably
+// doesn't have some special meaning other than its own name.
+// This is copied from SafeArg in cmd/go/internal/load/pkg.go.
+func safeArg(name string) bool {
+ if name == "" {
+ return false
+ }
+ c := name[0]
+ return '0' <= c && c <= '9' || 'A' <= c && c <= 'Z' || 'a' <= c && c <= 'z' || c == '.' || c == '_' || c == '/' || c >= utf8.RuneSelf
+}
+
+func mkname(sym *types.Sym) ir.Node {
+ n := oldname(sym)
+ if n.Name() != nil && n.Name().PkgName != nil {
+ n.Name().PkgName.Used = true
+ }
+ return n
+}
+
+// parseGoEmbed parses the text following "//go:embed" to extract the glob patterns.
+// It accepts unquoted space-separated patterns as well as double-quoted and back-quoted Go strings.
+// go/build/read.go also processes these strings and contains similar logic.
+func parseGoEmbed(args string) ([]string, error) {
+ var list []string
+ for args = strings.TrimSpace(args); args != ""; args = strings.TrimSpace(args) {
+ var path string
+ Switch:
+ switch args[0] {
+ default:
+ i := len(args)
+ for j, c := range args {
+ if unicode.IsSpace(c) {
+ i = j
+ break
+ }
+ }
+ path = args[:i]
+ args = args[i:]
+
+ case '`':
+ i := strings.Index(args[1:], "`")
+ if i < 0 {
+ return nil, fmt.Errorf("invalid quoted string in //go:embed: %s", args)
+ }
+ path = args[1 : 1+i]
+ args = args[1+i+1:]
+
+ case '"':
+ i := 1
+ for ; i < len(args); i++ {
+ if args[i] == '\\' {
+ i++
+ continue
+ }
+ if args[i] == '"' {
+ q, err := strconv.Unquote(args[:i+1])
+ if err != nil {
+ return nil, fmt.Errorf("invalid quoted string in //go:embed: %s", args[:i+1])
+ }
+ path = q
+ args = args[i+1:]
+ break Switch
+ }
+ }
+ if i >= len(args) {
+ return nil, fmt.Errorf("invalid quoted string in //go:embed: %s", args)
+ }
+ }
+
+ if args != "" {
+ r, _ := utf8.DecodeRuneInString(args)
+ if !unicode.IsSpace(r) {
+ return nil, fmt.Errorf("invalid quoted string in //go:embed: %s", args)
+ }
+ }
+ list = append(list, path)
+ }
+ return list, nil
+}
+
+func fakeRecv() *ir.Field {
+ return ir.NewField(base.Pos, nil, nil, types.FakeRecvType())
+}
+
+func (p *noder) funcLit(expr *syntax.FuncLit) ir.Node {
+ fn := ir.NewClosureFunc(p.pos(expr), ir.CurFunc != nil)
+ fn.Nname.Ntype = p.typeExpr(expr.Type)
+
+ p.funcBody(fn, expr.Body)
+
+ ir.FinishCaptureNames(base.Pos, ir.CurFunc, fn)
+
+ return fn.OClosure
+}
+
+// A function named init is a special case.
+// It is called by the initialization before main is run.
+// To make it unique within a package and also uncallable,
+// the name, normally "pkg.init", is altered to "pkg.init.0".
+var renameinitgen int
+
+func renameinit() *types.Sym {
+ s := typecheck.LookupNum("init.", renameinitgen)
+ renameinitgen++
+ return s
+}
+
+// oldname returns the Node that declares symbol s in the current scope.
+// If no such Node currently exists, an ONONAME Node is returned instead.
+// Automatically creates a new closure variable if the referenced symbol was
+// declared in a different (containing) function.
+func oldname(s *types.Sym) ir.Node {
+ if s.Pkg != types.LocalPkg {
+ return ir.NewIdent(base.Pos, s)
+ }
+
+ n := ir.AsNode(s.Def)
+ if n == nil {
+ // Maybe a top-level declaration will come along later to
+ // define s. resolve will check s.Def again once all input
+ // source has been processed.
+ return ir.NewIdent(base.Pos, s)
+ }
+
+ if n, ok := n.(*ir.Name); ok {
+ // TODO(rsc): If there is an outer variable x and we
+ // are parsing x := 5 inside the closure, until we get to
+ // the := it looks like a reference to the outer x so we'll
+ // make x a closure variable unnecessarily.
+ return ir.CaptureName(base.Pos, ir.CurFunc, n)
+ }
+
+ return n
+}
+
+func varEmbed(makeXPos func(syntax.Pos) src.XPos, name *ir.Name, decl *syntax.VarDecl, pragma *pragmas, haveEmbed bool) {
+ pragmaEmbeds := pragma.Embeds
+ pragma.Embeds = nil
+ if len(pragmaEmbeds) == 0 {
+ return
+ }
+
+ if err := checkEmbed(decl, haveEmbed, typecheck.DeclContext != ir.PEXTERN); err != nil {
+ base.ErrorfAt(makeXPos(pragmaEmbeds[0].Pos), "%s", err)
+ return
+ }
+
+ var embeds []ir.Embed
+ for _, e := range pragmaEmbeds {
+ embeds = append(embeds, ir.Embed{Pos: makeXPos(e.Pos), Patterns: e.Patterns})
+ }
+ typecheck.Target.Embeds = append(typecheck.Target.Embeds, name)
+ name.Embed = &embeds
+}
+
+func checkEmbed(decl *syntax.VarDecl, haveEmbed, withinFunc bool) error {
+ switch {
+ case !haveEmbed:
+ return errors.New("go:embed only allowed in Go files that import \"embed\"")
+ case len(decl.NameList) > 1:
+ return errors.New("go:embed cannot apply to multiple vars")
+ case decl.Values != nil:
+ return errors.New("go:embed cannot apply to var with initializer")
+ case decl.Type == nil:
+ // Should not happen, since Values == nil now.
+ return errors.New("go:embed cannot apply to var without type")
+ case withinFunc:
+ return errors.New("go:embed cannot apply to var inside func")
+ case !types.AllowsGoVersion(types.LocalPkg, 1, 16):
+ return fmt.Errorf("go:embed requires go1.16 or later (-lang was set to %s; check go.mod)", base.Flag.Lang)
+
+ default:
+ return nil
+ }
+}
diff --git a/src/cmd/compile/internal/noder/object.go b/src/cmd/compile/internal/noder/object.go
new file mode 100644
index 0000000..37a995b
--- /dev/null
+++ b/src/cmd/compile/internal/noder/object.go
@@ -0,0 +1,206 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package noder
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/syntax"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "cmd/compile/internal/types2"
+ "cmd/internal/src"
+)
+
+func (g *irgen) def(name *syntax.Name) (*ir.Name, types2.Object) {
+ obj, ok := g.info.Defs[name]
+ if !ok {
+ base.FatalfAt(g.pos(name), "unknown name %v", name)
+ }
+ return g.obj(obj), obj
+}
+
+// use returns the Name or InstExpr node associated with the use of name,
+// possibly instantiated by type arguments. The returned node will have
+// the correct type and be marked as typechecked.
+func (g *irgen) use(name *syntax.Name) ir.Node {
+ obj2, ok := g.info.Uses[name]
+ if !ok {
+ base.FatalfAt(g.pos(name), "unknown name %v", name)
+ }
+ obj := ir.CaptureName(g.pos(name), ir.CurFunc, g.obj(obj2))
+ if obj.Defn != nil && obj.Defn.Op() == ir.ONAME {
+ // If CaptureName created a closure variable, then transfer the
+ // type of the captured name to the new closure variable.
+ obj.SetTypecheck(1)
+ obj.SetType(obj.Defn.Type())
+ }
+
+ if obj.Class == ir.PFUNC {
+ if inst, ok := g.info.Instances[name]; ok {
+ // This is the case where inferring types required the
+ // types of the function arguments.
+ targs := make([]ir.Node, inst.TypeArgs.Len())
+ for i := range targs {
+ targs[i] = ir.TypeNode(g.typ(inst.TypeArgs.At(i)))
+ }
+ typ := g.substType(obj.Type(), obj.Type().TParams(), targs)
+ return typed(typ, ir.NewInstExpr(g.pos(name), ir.OFUNCINST, obj, targs))
+ }
+ }
+
+ return obj
+}
+
+// obj returns the Name that represents the given object. If no such Name exists
+// yet, it will be implicitly created. The returned node will have the correct
+// type and be marked as typechecked.
+//
+// For objects declared at function scope, ir.CurFunc must already be
+// set to the respective function when the Name is created.
+func (g *irgen) obj(obj types2.Object) *ir.Name {
+ // For imported objects, we use iimport directly instead of mapping
+ // the types2 representation.
+ if obj.Pkg() != g.self {
+ if sig, ok := obj.Type().(*types2.Signature); ok && sig.Recv() != nil {
+ // We can't import a method by name - must import the type
+ // and access the method from it.
+ base.FatalfAt(g.pos(obj), "tried to import a method directly")
+ }
+ sym := g.sym(obj)
+ if sym.Def != nil {
+ return sym.Def.(*ir.Name)
+ }
+ n := typecheck.Resolve(ir.NewIdent(src.NoXPos, sym))
+ if n, ok := n.(*ir.Name); ok {
+ n.SetTypecheck(1)
+ return n
+ }
+ base.FatalfAt(g.pos(obj), "failed to resolve %v", obj)
+ }
+
+ if name, ok := g.objs[obj]; ok {
+ return name // previously mapped
+ }
+
+ var name *ir.Name
+ pos := g.pos(obj)
+
+ class := typecheck.DeclContext
+ if obj.Parent() == g.self.Scope() {
+ class = ir.PEXTERN // forward reference to package-block declaration
+ }
+
+ // "You are in a maze of twisting little passages, all different."
+ switch obj := obj.(type) {
+ case *types2.Const:
+ name = g.objCommon(pos, ir.OLITERAL, g.sym(obj), class, g.typ(obj.Type()))
+
+ case *types2.Func:
+ sig := obj.Type().(*types2.Signature)
+ var sym *types.Sym
+ var typ *types.Type
+ if recv := sig.Recv(); recv == nil {
+ if obj.Name() == "init" {
+ sym = renameinit()
+ } else {
+ sym = g.sym(obj)
+ }
+ typ = g.typ(sig)
+ } else {
+ sym = g.selector(obj)
+ if !sym.IsBlank() {
+ sym = ir.MethodSym(g.typ(recv.Type()), sym)
+ }
+ typ = g.signature(g.param(recv), sig)
+ }
+ name = g.objCommon(pos, ir.ONAME, sym, ir.PFUNC, typ)
+
+ case *types2.TypeName:
+ if obj.IsAlias() {
+ name = g.objCommon(pos, ir.OTYPE, g.sym(obj), class, g.typ(obj.Type()))
+ name.SetAlias(true)
+ } else {
+ name = ir.NewDeclNameAt(pos, ir.OTYPE, g.sym(obj))
+ g.objFinish(name, class, types.NewNamed(name))
+ }
+
+ case *types2.Var:
+ sym := g.sym(obj)
+ if class == ir.PPARAMOUT && (sym == nil || sym.IsBlank()) {
+ // Backend needs names for result parameters,
+ // even if they're anonymous or blank.
+ nresults := 0
+ for _, n := range ir.CurFunc.Dcl {
+ if n.Class == ir.PPARAMOUT {
+ nresults++
+ }
+ }
+ if sym == nil {
+ sym = typecheck.LookupNum("~r", nresults) // 'r' for "result"
+ } else {
+ sym = typecheck.LookupNum("~b", nresults) // 'b' for "blank"
+ }
+ }
+ name = g.objCommon(pos, ir.ONAME, sym, class, g.typ(obj.Type()))
+
+ default:
+ g.unhandled("object", obj)
+ }
+
+ g.objs[obj] = name
+ name.SetTypecheck(1)
+ return name
+}
+
+func (g *irgen) objCommon(pos src.XPos, op ir.Op, sym *types.Sym, class ir.Class, typ *types.Type) *ir.Name {
+ name := ir.NewDeclNameAt(pos, op, sym)
+ g.objFinish(name, class, typ)
+ return name
+}
+
+func (g *irgen) objFinish(name *ir.Name, class ir.Class, typ *types.Type) {
+ sym := name.Sym()
+
+ name.SetType(typ)
+ name.Class = class
+ if name.Class == ir.PFUNC {
+ sym.SetFunc(true)
+ }
+
+ name.SetTypecheck(1)
+ name.SetWalkdef(1)
+
+ if ir.IsBlank(name) {
+ return
+ }
+
+ switch class {
+ case ir.PEXTERN:
+ g.target.Externs = append(g.target.Externs, name)
+ fallthrough
+ case ir.PFUNC:
+ sym.Def = name
+ if name.Class == ir.PFUNC && name.Type().Recv() != nil {
+ break // methods are exported with their receiver type
+ }
+ if types.IsExported(sym.Name) {
+ // Generic functions can be marked for export here, even
+ // though they will not be compiled until instantiated.
+ typecheck.Export(name)
+ }
+ if base.Flag.AsmHdr != "" && !name.Sym().Asm() {
+ name.Sym().SetAsm(true)
+ g.target.Asms = append(g.target.Asms, name)
+ }
+
+ default:
+ // Function-scoped declaration.
+ name.Curfn = ir.CurFunc
+ if name.Op() == ir.ONAME {
+ ir.CurFunc.Dcl = append(ir.CurFunc.Dcl, name)
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/noder/posmap.go b/src/cmd/compile/internal/noder/posmap.go
new file mode 100644
index 0000000..f22628f
--- /dev/null
+++ b/src/cmd/compile/internal/noder/posmap.go
@@ -0,0 +1,85 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package noder
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/syntax"
+ "cmd/internal/src"
+)
+
+// A posMap handles mapping from syntax.Pos to src.XPos.
+type posMap struct {
+ bases map[*syntax.PosBase]*src.PosBase
+ cache struct {
+ last *syntax.PosBase
+ base *src.PosBase
+ }
+}
+
+type poser interface{ Pos() syntax.Pos }
+type ender interface{ End() syntax.Pos }
+
+func (m *posMap) pos(p poser) src.XPos { return m.makeXPos(p.Pos()) }
+func (m *posMap) end(p ender) src.XPos { return m.makeXPos(p.End()) }
+
+func (m *posMap) makeXPos(pos syntax.Pos) src.XPos {
+ if !pos.IsKnown() {
+ // TODO(mdempsky): Investigate restoring base.Fatalf.
+ return src.NoXPos
+ }
+
+ posBase := m.makeSrcPosBase(pos.Base())
+ return base.Ctxt.PosTable.XPos(src.MakePos(posBase, pos.Line(), pos.Col()))
+}
+
+// makeSrcPosBase translates from a *syntax.PosBase to a *src.PosBase.
+func (m *posMap) makeSrcPosBase(b0 *syntax.PosBase) *src.PosBase {
+ // fast path: most likely PosBase hasn't changed
+ if m.cache.last == b0 {
+ return m.cache.base
+ }
+
+ b1, ok := m.bases[b0]
+ if !ok {
+ fn := b0.Filename()
+ absfn := trimFilename(b0)
+
+ if b0.IsFileBase() {
+ b1 = src.NewFileBase(fn, absfn)
+ } else {
+ // line directive base
+ p0 := b0.Pos()
+ p0b := p0.Base()
+ if p0b == b0 {
+ panic("infinite recursion in makeSrcPosBase")
+ }
+ p1 := src.MakePos(m.makeSrcPosBase(p0b), p0.Line(), p0.Col())
+ b1 = src.NewLinePragmaBase(p1, fn, absfn, b0.Line(), b0.Col())
+ }
+ if m.bases == nil {
+ m.bases = make(map[*syntax.PosBase]*src.PosBase)
+ }
+ m.bases[b0] = b1
+ }
+
+ // update cache
+ m.cache.last = b0
+ m.cache.base = b1
+
+ return b1
+}
+
+func (m *posMap) join(other *posMap) {
+ if m.bases == nil {
+ m.bases = make(map[*syntax.PosBase]*src.PosBase)
+ }
+ for k, v := range other.bases {
+ if m.bases[k] != nil {
+ base.Fatalf("duplicate posmap bases")
+ }
+ m.bases[k] = v
+ }
+}
diff --git a/src/cmd/compile/internal/noder/quirks.go b/src/cmd/compile/internal/noder/quirks.go
new file mode 100644
index 0000000..914c5d2
--- /dev/null
+++ b/src/cmd/compile/internal/noder/quirks.go
@@ -0,0 +1,450 @@
+// UNREVIEWED
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package noder
+
+import (
+ "fmt"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/syntax"
+ "cmd/compile/internal/types2"
+ "cmd/internal/src"
+)
+
+// This file defines helper functions useful for satisfying toolstash
+// -cmp when compared against the legacy frontend behavior, but can be
+// removed after that's no longer a concern.
+
+// quirksMode controls whether behavior specific to satisfying
+// toolstash -cmp is used.
+func quirksMode() bool {
+ return base.Debug.UnifiedQuirks != 0
+}
+
+// posBasesOf returns all of the position bases in the source files,
+// as seen in a straightforward traversal.
+//
+// This is necessary to ensure position bases (and thus file names)
+// get registered in the same order as noder would visit them.
+func posBasesOf(noders []*noder) []*syntax.PosBase {
+ seen := make(map[*syntax.PosBase]bool)
+ var bases []*syntax.PosBase
+
+ for _, p := range noders {
+ syntax.Crawl(p.file, func(n syntax.Node) bool {
+ if b := n.Pos().Base(); !seen[b] {
+ bases = append(bases, b)
+ seen[b] = true
+ }
+ return false
+ })
+ }
+
+ return bases
+}
+
+// importedObjsOf returns the imported objects (i.e., referenced
+// objects not declared by curpkg) from the parsed source files, in
+// the order that typecheck used to load their definitions.
+//
+// This is needed because loading the definitions for imported objects
+// can also add file names.
+func importedObjsOf(curpkg *types2.Package, info *types2.Info, noders []*noder) []types2.Object {
+ // This code is complex because it matches the precise order that
+ // typecheck recursively and repeatedly traverses the IR. It's meant
+ // to be thrown away eventually anyway.
+
+ seen := make(map[types2.Object]bool)
+ var objs []types2.Object
+
+ var phase int
+
+ decls := make(map[types2.Object]syntax.Decl)
+ assoc := func(decl syntax.Decl, names ...*syntax.Name) {
+ for _, name := range names {
+ obj, ok := info.Defs[name]
+ assert(ok)
+ decls[obj] = decl
+ }
+ }
+
+ for _, p := range noders {
+ syntax.Crawl(p.file, func(n syntax.Node) bool {
+ switch n := n.(type) {
+ case *syntax.ConstDecl:
+ assoc(n, n.NameList...)
+ case *syntax.FuncDecl:
+ assoc(n, n.Name)
+ case *syntax.TypeDecl:
+ assoc(n, n.Name)
+ case *syntax.VarDecl:
+ assoc(n, n.NameList...)
+ case *syntax.BlockStmt:
+ return true
+ }
+ return false
+ })
+ }
+
+ var visited map[syntax.Decl]bool
+
+ var resolveDecl func(n syntax.Decl)
+ var resolveNode func(n syntax.Node, top bool)
+
+ resolveDecl = func(n syntax.Decl) {
+ if visited[n] {
+ return
+ }
+ visited[n] = true
+
+ switch n := n.(type) {
+ case *syntax.ConstDecl:
+ resolveNode(n.Type, true)
+ resolveNode(n.Values, true)
+
+ case *syntax.FuncDecl:
+ if n.Recv != nil {
+ resolveNode(n.Recv, true)
+ }
+ resolveNode(n.Type, true)
+
+ case *syntax.TypeDecl:
+ resolveNode(n.Type, true)
+
+ case *syntax.VarDecl:
+ if n.Type != nil {
+ resolveNode(n.Type, true)
+ } else {
+ resolveNode(n.Values, true)
+ }
+ }
+ }
+
+ resolveObj := func(pos syntax.Pos, obj types2.Object) {
+ switch obj.Pkg() {
+ case nil:
+ // builtin; nothing to do
+
+ case curpkg:
+ if decl, ok := decls[obj]; ok {
+ resolveDecl(decl)
+ }
+
+ default:
+ if obj.Parent() == obj.Pkg().Scope() && !seen[obj] {
+ seen[obj] = true
+ objs = append(objs, obj)
+ }
+ }
+ }
+
+ checkdefat := func(pos syntax.Pos, n *syntax.Name) {
+ if n.Value == "_" {
+ return
+ }
+ obj, ok := info.Uses[n]
+ if !ok {
+ obj, ok = info.Defs[n]
+ if !ok {
+ return
+ }
+ }
+ if obj == nil {
+ return
+ }
+ resolveObj(pos, obj)
+ }
+ checkdef := func(n *syntax.Name) { checkdefat(n.Pos(), n) }
+
+ var later []syntax.Node
+
+ resolveNode = func(n syntax.Node, top bool) {
+ if n == nil {
+ return
+ }
+ syntax.Crawl(n, func(n syntax.Node) bool {
+ switch n := n.(type) {
+ case *syntax.Name:
+ checkdef(n)
+
+ case *syntax.SelectorExpr:
+ if name, ok := n.X.(*syntax.Name); ok {
+ if _, isPkg := info.Uses[name].(*types2.PkgName); isPkg {
+ checkdefat(n.X.Pos(), n.Sel)
+ return true
+ }
+ }
+
+ case *syntax.AssignStmt:
+ resolveNode(n.Rhs, top)
+ resolveNode(n.Lhs, top)
+ return true
+
+ case *syntax.VarDecl:
+ resolveNode(n.Values, top)
+
+ case *syntax.FuncLit:
+ if top {
+ resolveNode(n.Type, top)
+ later = append(later, n.Body)
+ return true
+ }
+
+ case *syntax.BlockStmt:
+ if phase >= 3 {
+ for _, stmt := range n.List {
+ resolveNode(stmt, false)
+ }
+ }
+ return true
+ }
+
+ return false
+ })
+ }
+
+ for phase = 1; phase <= 5; phase++ {
+ visited = map[syntax.Decl]bool{}
+
+ for _, p := range noders {
+ for _, decl := range p.file.DeclList {
+ switch decl := decl.(type) {
+ case *syntax.ConstDecl:
+ resolveDecl(decl)
+
+ case *syntax.FuncDecl:
+ resolveDecl(decl)
+ if phase >= 3 && decl.Body != nil {
+ resolveNode(decl.Body, true)
+ }
+
+ case *syntax.TypeDecl:
+ if !decl.Alias || phase >= 2 {
+ resolveDecl(decl)
+ }
+
+ case *syntax.VarDecl:
+ if phase >= 2 {
+ resolveNode(decl.Values, true)
+ resolveDecl(decl)
+ }
+ }
+ }
+
+ if phase >= 5 {
+ syntax.Crawl(p.file, func(n syntax.Node) bool {
+ if name, ok := n.(*syntax.Name); ok {
+ if obj, ok := info.Uses[name]; ok {
+ resolveObj(name.Pos(), obj)
+ }
+ }
+ return false
+ })
+ }
+ }
+
+ for i := 0; i < len(later); i++ {
+ resolveNode(later[i], true)
+ }
+ later = nil
+ }
+
+ return objs
+}
+
+// typeExprEndPos returns the position that noder would leave base.Pos
+// after parsing the given type expression.
+func typeExprEndPos(expr0 syntax.Expr) syntax.Pos {
+ for {
+ switch expr := expr0.(type) {
+ case *syntax.Name:
+ return expr.Pos()
+ case *syntax.SelectorExpr:
+ return expr.X.Pos()
+
+ case *syntax.ParenExpr:
+ expr0 = expr.X
+
+ case *syntax.Operation:
+ assert(expr.Op == syntax.Mul)
+ assert(expr.Y == nil)
+ expr0 = expr.X
+
+ case *syntax.ArrayType:
+ expr0 = expr.Elem
+ case *syntax.ChanType:
+ expr0 = expr.Elem
+ case *syntax.DotsType:
+ expr0 = expr.Elem
+ case *syntax.MapType:
+ expr0 = expr.Value
+ case *syntax.SliceType:
+ expr0 = expr.Elem
+
+ case *syntax.StructType:
+ return expr.Pos()
+
+ case *syntax.InterfaceType:
+ expr0 = lastFieldType(expr.MethodList)
+ if expr0 == nil {
+ return expr.Pos()
+ }
+
+ case *syntax.FuncType:
+ expr0 = lastFieldType(expr.ResultList)
+ if expr0 == nil {
+ expr0 = lastFieldType(expr.ParamList)
+ if expr0 == nil {
+ return expr.Pos()
+ }
+ }
+
+ case *syntax.IndexExpr: // explicit type instantiation
+ targs := unpackListExpr(expr.Index)
+ expr0 = targs[len(targs)-1]
+
+ default:
+ panic(fmt.Sprintf("%s: unexpected type expression %v", expr.Pos(), syntax.String(expr)))
+ }
+ }
+}
+
+func lastFieldType(fields []*syntax.Field) syntax.Expr {
+ if len(fields) == 0 {
+ return nil
+ }
+ return fields[len(fields)-1].Type
+}
+
+// sumPos returns the position that noder.sum would produce for
+// constant expression x.
+func sumPos(x syntax.Expr) syntax.Pos {
+ orig := x
+ for {
+ switch x1 := x.(type) {
+ case *syntax.BasicLit:
+ assert(x1.Kind == syntax.StringLit)
+ return x1.Pos()
+ case *syntax.Operation:
+ assert(x1.Op == syntax.Add && x1.Y != nil)
+ if r, ok := x1.Y.(*syntax.BasicLit); ok {
+ assert(r.Kind == syntax.StringLit)
+ x = x1.X
+ continue
+ }
+ }
+ return orig.Pos()
+ }
+}
+
+// funcParamsEndPos returns the value of base.Pos left by noder after
+// processing a function signature.
+func funcParamsEndPos(fn *ir.Func) src.XPos {
+ sig := fn.Nname.Type()
+
+ fields := sig.Results().FieldSlice()
+ if len(fields) == 0 {
+ fields = sig.Params().FieldSlice()
+ if len(fields) == 0 {
+ fields = sig.Recvs().FieldSlice()
+ if len(fields) == 0 {
+ if fn.OClosure != nil {
+ return fn.Nname.Ntype.Pos()
+ }
+ return fn.Pos()
+ }
+ }
+ }
+
+ return fields[len(fields)-1].Pos
+}
+
+type dupTypes struct {
+ origs map[types2.Type]types2.Type
+}
+
+func (d *dupTypes) orig(t types2.Type) types2.Type {
+ if orig, ok := d.origs[t]; ok {
+ return orig
+ }
+ return t
+}
+
+func (d *dupTypes) add(t, orig types2.Type) {
+ if t == orig {
+ return
+ }
+
+ if d.origs == nil {
+ d.origs = make(map[types2.Type]types2.Type)
+ }
+ assert(d.origs[t] == nil)
+ d.origs[t] = orig
+
+ switch t := t.(type) {
+ case *types2.Pointer:
+ orig := orig.(*types2.Pointer)
+ d.add(t.Elem(), orig.Elem())
+
+ case *types2.Slice:
+ orig := orig.(*types2.Slice)
+ d.add(t.Elem(), orig.Elem())
+
+ case *types2.Map:
+ orig := orig.(*types2.Map)
+ d.add(t.Key(), orig.Key())
+ d.add(t.Elem(), orig.Elem())
+
+ case *types2.Array:
+ orig := orig.(*types2.Array)
+ assert(t.Len() == orig.Len())
+ d.add(t.Elem(), orig.Elem())
+
+ case *types2.Chan:
+ orig := orig.(*types2.Chan)
+ assert(t.Dir() == orig.Dir())
+ d.add(t.Elem(), orig.Elem())
+
+ case *types2.Struct:
+ orig := orig.(*types2.Struct)
+ assert(t.NumFields() == orig.NumFields())
+ for i := 0; i < t.NumFields(); i++ {
+ d.add(t.Field(i).Type(), orig.Field(i).Type())
+ }
+
+ case *types2.Interface:
+ orig := orig.(*types2.Interface)
+ assert(t.NumExplicitMethods() == orig.NumExplicitMethods())
+ assert(t.NumEmbeddeds() == orig.NumEmbeddeds())
+ for i := 0; i < t.NumExplicitMethods(); i++ {
+ d.add(t.ExplicitMethod(i).Type(), orig.ExplicitMethod(i).Type())
+ }
+ for i := 0; i < t.NumEmbeddeds(); i++ {
+ d.add(t.EmbeddedType(i), orig.EmbeddedType(i))
+ }
+
+ case *types2.Signature:
+ orig := orig.(*types2.Signature)
+ assert((t.Recv() == nil) == (orig.Recv() == nil))
+ if t.Recv() != nil {
+ d.add(t.Recv().Type(), orig.Recv().Type())
+ }
+ d.add(t.Params(), orig.Params())
+ d.add(t.Results(), orig.Results())
+
+ case *types2.Tuple:
+ orig := orig.(*types2.Tuple)
+ assert(t.Len() == orig.Len())
+ for i := 0; i < t.Len(); i++ {
+ d.add(t.At(i).Type(), orig.At(i).Type())
+ }
+
+ default:
+ assert(types2.Identical(t, orig))
+ }
+}
diff --git a/src/cmd/compile/internal/noder/reader.go b/src/cmd/compile/internal/noder/reader.go
new file mode 100644
index 0000000..5d17c53
--- /dev/null
+++ b/src/cmd/compile/internal/noder/reader.go
@@ -0,0 +1,2460 @@
+// UNREVIEWED
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package noder
+
+import (
+ "bytes"
+ "fmt"
+ "go/constant"
+ "internal/buildcfg"
+ "strings"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/deadcode"
+ "cmd/compile/internal/dwarfgen"
+ "cmd/compile/internal/inline"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/reflectdata"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/src"
+)
+
+// TODO(mdempsky): Suppress duplicate type/const errors that can arise
+// during typecheck due to naive type substitution (e.g., see #42758).
+// I anticipate these will be handled as a consequence of adding
+// dictionaries support, so it's probably not important to focus on
+// this until after that's done.
+
+type pkgReader struct {
+ pkgDecoder
+
+ posBases []*src.PosBase
+ pkgs []*types.Pkg
+ typs []*types.Type
+
+ // offset for rewriting the given index into the output,
+ // but bitwise inverted so we can detect if we're missing the entry or not.
+ newindex []int
+}
+
+func newPkgReader(pr pkgDecoder) *pkgReader {
+ return &pkgReader{
+ pkgDecoder: pr,
+
+ posBases: make([]*src.PosBase, pr.numElems(relocPosBase)),
+ pkgs: make([]*types.Pkg, pr.numElems(relocPkg)),
+ typs: make([]*types.Type, pr.numElems(relocType)),
+
+ newindex: make([]int, pr.totalElems()),
+ }
+}
+
+type pkgReaderIndex struct {
+ pr *pkgReader
+ idx int
+ dict *readerDict
+}
+
+func (pri pkgReaderIndex) asReader(k reloc, marker syncMarker) *reader {
+ r := pri.pr.newReader(k, pri.idx, marker)
+ r.dict = pri.dict
+ return r
+}
+
+func (pr *pkgReader) newReader(k reloc, idx int, marker syncMarker) *reader {
+ return &reader{
+ decoder: pr.newDecoder(k, idx, marker),
+ p: pr,
+ }
+}
+
+type reader struct {
+ decoder
+
+ p *pkgReader
+
+ dict *readerDict
+
+ // TODO(mdempsky): The state below is all specific to reading
+ // function bodies. It probably makes sense to split it out
+ // separately so that it doesn't take up space in every reader
+ // instance.
+
+ curfn *ir.Func
+ locals []*ir.Name
+ closureVars []*ir.Name
+
+ funarghack bool
+
+ // scopeVars is a stack tracking the number of variables declared in
+ // the current function at the moment each open scope was opened.
+ scopeVars []int
+ marker dwarfgen.ScopeMarker
+ lastCloseScopePos src.XPos
+
+ // === details for handling inline body expansion ===
+
+ // If we're reading in a function body because of inlining, this is
+ // the call that we're inlining for.
+ inlCaller *ir.Func
+ inlCall *ir.CallExpr
+ inlFunc *ir.Func
+ inlTreeIndex int
+ inlPosBases map[*src.PosBase]*src.PosBase
+
+ delayResults bool
+
+ // Label to return to.
+ retlabel *types.Sym
+
+ inlvars, retvars ir.Nodes
+}
+
+type readerDict struct {
+ // targs holds the implicit and explicit type arguments in use for
+ // reading the current object. For example:
+ //
+ // func F[T any]() {
+ // type X[U any] struct { t T; u U }
+ // var _ X[string]
+ // }
+ //
+ // var _ = F[int]
+ //
+ // While instantiating F[int], we need to in turn instantiate
+ // X[string]. [int] and [string] are explicit type arguments for F
+ // and X, respectively; but [int] is also the implicit type
+ // arguments for X.
+ //
+ // (As an analogy to function literals, explicits are the function
+ // literal's formal parameters, while implicits are variables
+ // captured by the function literal.)
+ targs []*types.Type
+
+ // implicits counts how many of types within targs are implicit type
+ // arguments; the rest are explicit.
+ implicits int
+
+ derived []derivedInfo // reloc index of the derived type's descriptor
+ derivedTypes []*types.Type // slice of previously computed derived types
+
+ funcs []objInfo
+ funcsObj []ir.Node
+}
+
+func setType(n ir.Node, typ *types.Type) {
+ n.SetType(typ)
+ n.SetTypecheck(1)
+
+ if name, ok := n.(*ir.Name); ok {
+ name.SetWalkdef(1)
+ name.Ntype = ir.TypeNode(name.Type())
+ }
+}
+
+func setValue(name *ir.Name, val constant.Value) {
+ name.SetVal(val)
+ name.Defn = nil
+}
+
+// @@@ Positions
+
+func (r *reader) pos() src.XPos {
+ return base.Ctxt.PosTable.XPos(r.pos0())
+}
+
+func (r *reader) pos0() src.Pos {
+ r.sync(syncPos)
+ if !r.bool() {
+ return src.NoPos
+ }
+
+ posBase := r.posBase()
+ line := r.uint()
+ col := r.uint()
+ return src.MakePos(posBase, line, col)
+}
+
+func (r *reader) posBase() *src.PosBase {
+ return r.inlPosBase(r.p.posBaseIdx(r.reloc(relocPosBase)))
+}
+
+func (pr *pkgReader) posBaseIdx(idx int) *src.PosBase {
+ if b := pr.posBases[idx]; b != nil {
+ return b
+ }
+
+ r := pr.newReader(relocPosBase, idx, syncPosBase)
+ var b *src.PosBase
+
+ absFilename := r.string()
+ filename := absFilename
+
+ // For build artifact stability, the export data format only
+ // contains the "absolute" filename as returned by objabi.AbsFile.
+ // However, some tests (e.g., test/run.go's asmcheck tests) expect
+ // to see the full, original filename printed out. Re-expanding
+ // "$GOROOT" to buildcfg.GOROOT is a close-enough approximation to
+ // satisfy this.
+ //
+ // TODO(mdempsky): De-duplicate this logic with similar logic in
+ // cmd/link/internal/ld's expandGoroot. However, this will probably
+ // require being more consistent about when we use native vs UNIX
+ // file paths.
+ const dollarGOROOT = "$GOROOT"
+ if strings.HasPrefix(filename, dollarGOROOT) {
+ filename = buildcfg.GOROOT + filename[len(dollarGOROOT):]
+ }
+
+ if r.bool() {
+ b = src.NewFileBase(filename, absFilename)
+ } else {
+ pos := r.pos0()
+ line := r.uint()
+ col := r.uint()
+ b = src.NewLinePragmaBase(pos, filename, absFilename, line, col)
+ }
+
+ pr.posBases[idx] = b
+ return b
+}
+
+func (r *reader) inlPosBase(oldBase *src.PosBase) *src.PosBase {
+ if r.inlCall == nil {
+ return oldBase
+ }
+
+ if newBase, ok := r.inlPosBases[oldBase]; ok {
+ return newBase
+ }
+
+ newBase := src.NewInliningBase(oldBase, r.inlTreeIndex)
+ r.inlPosBases[oldBase] = newBase
+ return newBase
+}
+
+func (r *reader) updatePos(xpos src.XPos) src.XPos {
+ pos := base.Ctxt.PosTable.Pos(xpos)
+ pos.SetBase(r.inlPosBase(pos.Base()))
+ return base.Ctxt.PosTable.XPos(pos)
+}
+
+func (r *reader) origPos(xpos src.XPos) src.XPos {
+ if r.inlCall == nil {
+ return xpos
+ }
+
+ pos := base.Ctxt.PosTable.Pos(xpos)
+ for old, new := range r.inlPosBases {
+ if pos.Base() == new {
+ pos.SetBase(old)
+ return base.Ctxt.PosTable.XPos(pos)
+ }
+ }
+
+ base.FatalfAt(xpos, "pos base missing from inlPosBases")
+ panic("unreachable")
+}
+
+// @@@ Packages
+
+func (r *reader) pkg() *types.Pkg {
+ r.sync(syncPkg)
+ return r.p.pkgIdx(r.reloc(relocPkg))
+}
+
+func (pr *pkgReader) pkgIdx(idx int) *types.Pkg {
+ if pkg := pr.pkgs[idx]; pkg != nil {
+ return pkg
+ }
+
+ pkg := pr.newReader(relocPkg, idx, syncPkgDef).doPkg()
+ pr.pkgs[idx] = pkg
+ return pkg
+}
+
+func (r *reader) doPkg() *types.Pkg {
+ path := r.string()
+ if path == "builtin" {
+ return types.BuiltinPkg
+ }
+ if path == "" {
+ path = r.p.pkgPath
+ }
+
+ name := r.string()
+ height := r.len()
+
+ pkg := types.NewPkg(path, "")
+
+ if pkg.Name == "" {
+ pkg.Name = name
+ } else {
+ assert(pkg.Name == name)
+ }
+
+ if pkg.Height == 0 {
+ pkg.Height = height
+ } else {
+ assert(pkg.Height == height)
+ }
+
+ return pkg
+}
+
+// @@@ Types
+
+func (r *reader) typ() *types.Type {
+ return r.typWrapped(true)
+}
+
+// typWrapped is like typ, but allows suppressing generation of
+// unnecessary wrappers as a compile-time optimization.
+func (r *reader) typWrapped(wrapped bool) *types.Type {
+ return r.p.typIdx(r.typInfo(), r.dict, wrapped)
+}
+
+func (r *reader) typInfo() typeInfo {
+ r.sync(syncType)
+ if r.bool() {
+ return typeInfo{idx: r.len(), derived: true}
+ }
+ return typeInfo{idx: r.reloc(relocType), derived: false}
+}
+
+func (pr *pkgReader) typIdx(info typeInfo, dict *readerDict, wrapped bool) *types.Type {
+ idx := info.idx
+ var where **types.Type
+ if info.derived {
+ where = &dict.derivedTypes[idx]
+ idx = dict.derived[idx].idx
+ } else {
+ where = &pr.typs[idx]
+ }
+
+ if typ := *where; typ != nil {
+ return typ
+ }
+
+ r := pr.newReader(relocType, idx, syncTypeIdx)
+ r.dict = dict
+
+ typ := r.doTyp()
+ assert(typ != nil)
+
+ // For recursive type declarations involving interfaces and aliases,
+ // above r.doTyp() call may have already set pr.typs[idx], so just
+ // double check and return the type.
+ //
+ // Example:
+ //
+ // type F = func(I)
+ //
+ // type I interface {
+ // m(F)
+ // }
+ //
+ // The writer writes data types in following index order:
+ //
+ // 0: func(I)
+ // 1: I
+ // 2: interface{m(func(I))}
+ //
+ // The reader resolves it in following index order:
+ //
+ // 0 -> 1 -> 2 -> 0 -> 1
+ //
+ // and can divide in logically 2 steps:
+ //
+ // - 0 -> 1 : first time the reader reach type I,
+ // it creates new named type with symbol I.
+ //
+ // - 2 -> 0 -> 1: the reader ends up reaching symbol I again,
+ // now the symbol I was setup in above step, so
+ // the reader just return the named type.
+ //
+ // Now, the functions called return, the pr.typs looks like below:
+ //
+ // - 0 -> 1 -> 2 -> 0 : [<T> I <T>]
+ // - 0 -> 1 -> 2 : [func(I) I <T>]
+ // - 0 -> 1 : [func(I) I interface { "".m(func("".I)) }]
+ //
+ // The idx 1, corresponding with type I was resolved successfully
+ // after r.doTyp() call.
+
+ if prev := *where; prev != nil {
+ return prev
+ }
+
+ if wrapped {
+ // Only cache if we're adding wrappers, so that other callers that
+ // find a cached type know it was wrapped.
+ *where = typ
+
+ r.needWrapper(typ)
+ }
+
+ if !typ.IsUntyped() {
+ types.CheckSize(typ)
+ }
+
+ return typ
+}
+
+func (r *reader) doTyp() *types.Type {
+ switch tag := codeType(r.code(syncType)); tag {
+ default:
+ panic(fmt.Sprintf("unexpected type: %v", tag))
+
+ case typeBasic:
+ return *basics[r.len()]
+
+ case typeNamed:
+ obj := r.obj()
+ assert(obj.Op() == ir.OTYPE)
+ return obj.Type()
+
+ case typeTypeParam:
+ return r.dict.targs[r.len()]
+
+ case typeArray:
+ len := int64(r.uint64())
+ return types.NewArray(r.typ(), len)
+ case typeChan:
+ dir := dirs[r.len()]
+ return types.NewChan(r.typ(), dir)
+ case typeMap:
+ return types.NewMap(r.typ(), r.typ())
+ case typePointer:
+ return types.NewPtr(r.typ())
+ case typeSignature:
+ return r.signature(types.LocalPkg, nil)
+ case typeSlice:
+ return types.NewSlice(r.typ())
+ case typeStruct:
+ return r.structType()
+ case typeInterface:
+ return r.interfaceType()
+ }
+}
+
+func (r *reader) interfaceType() *types.Type {
+ tpkg := types.LocalPkg // TODO(mdempsky): Remove after iexport is gone.
+
+ nmethods, nembeddeds := r.len(), r.len()
+
+ fields := make([]*types.Field, nmethods+nembeddeds)
+ methods, embeddeds := fields[:nmethods], fields[nmethods:]
+
+ for i := range methods {
+ pos := r.pos()
+ pkg, sym := r.selector()
+ tpkg = pkg
+ mtyp := r.signature(pkg, types.FakeRecv())
+ methods[i] = types.NewField(pos, sym, mtyp)
+ }
+ for i := range embeddeds {
+ embeddeds[i] = types.NewField(src.NoXPos, nil, r.typ())
+ }
+
+ if len(fields) == 0 {
+ return types.Types[types.TINTER] // empty interface
+ }
+ return types.NewInterface(tpkg, fields, false)
+}
+
+func (r *reader) structType() *types.Type {
+ tpkg := types.LocalPkg // TODO(mdempsky): Remove after iexport is gone.
+ fields := make([]*types.Field, r.len())
+ for i := range fields {
+ pos := r.pos()
+ pkg, sym := r.selector()
+ tpkg = pkg
+ ftyp := r.typ()
+ tag := r.string()
+ embedded := r.bool()
+
+ f := types.NewField(pos, sym, ftyp)
+ f.Note = tag
+ if embedded {
+ f.Embedded = 1
+ }
+ fields[i] = f
+ }
+ return types.NewStruct(tpkg, fields)
+}
+
+func (r *reader) signature(tpkg *types.Pkg, recv *types.Field) *types.Type {
+ r.sync(syncSignature)
+
+ params := r.params(&tpkg)
+ results := r.params(&tpkg)
+ if r.bool() { // variadic
+ params[len(params)-1].SetIsDDD(true)
+ }
+
+ return types.NewSignature(tpkg, recv, nil, params, results)
+}
+
+func (r *reader) params(tpkg **types.Pkg) []*types.Field {
+ r.sync(syncParams)
+ fields := make([]*types.Field, r.len())
+ for i := range fields {
+ *tpkg, fields[i] = r.param()
+ }
+ return fields
+}
+
+func (r *reader) param() (*types.Pkg, *types.Field) {
+ r.sync(syncParam)
+
+ pos := r.pos()
+ pkg, sym := r.localIdent()
+ typ := r.typ()
+
+ return pkg, types.NewField(pos, sym, typ)
+}
+
+// @@@ Objects
+
+var objReader = map[*types.Sym]pkgReaderIndex{}
+
+func (r *reader) obj() ir.Node {
+ r.sync(syncObject)
+
+ if r.bool() {
+ idx := r.len()
+ obj := r.dict.funcsObj[idx]
+ if obj == nil {
+ fn := r.dict.funcs[idx]
+ targs := make([]*types.Type, len(fn.explicits))
+ for i, targ := range fn.explicits {
+ targs[i] = r.p.typIdx(targ, r.dict, true)
+ }
+
+ obj = r.p.objIdx(fn.idx, nil, targs)
+ assert(r.dict.funcsObj[idx] == nil)
+ r.dict.funcsObj[idx] = obj
+ }
+ return obj
+ }
+
+ idx := r.reloc(relocObj)
+
+ explicits := make([]*types.Type, r.len())
+ for i := range explicits {
+ explicits[i] = r.typ()
+ }
+
+ var implicits []*types.Type
+ if r.dict != nil {
+ implicits = r.dict.targs
+ }
+
+ return r.p.objIdx(idx, implicits, explicits)
+}
+
+func (pr *pkgReader) objIdx(idx int, implicits, explicits []*types.Type) ir.Node {
+ rname := pr.newReader(relocName, idx, syncObject1)
+ _, sym := rname.qualifiedIdent()
+ tag := codeObj(rname.code(syncCodeObj))
+
+ if tag == objStub {
+ assert(!sym.IsBlank())
+ switch sym.Pkg {
+ case types.BuiltinPkg, types.UnsafePkg:
+ return sym.Def.(ir.Node)
+ }
+ if pri, ok := objReader[sym]; ok {
+ return pri.pr.objIdx(pri.idx, nil, explicits)
+ }
+ if haveLegacyImports {
+ assert(len(explicits) == 0)
+ return typecheck.Resolve(ir.NewIdent(src.NoXPos, sym))
+ }
+ base.Fatalf("unresolved stub: %v", sym)
+ }
+
+ dict := pr.objDictIdx(sym, idx, implicits, explicits)
+
+ r := pr.newReader(relocObj, idx, syncObject1)
+ rext := pr.newReader(relocObjExt, idx, syncObject1)
+
+ r.dict = dict
+ rext.dict = dict
+
+ sym = r.mangle(sym)
+ if !sym.IsBlank() && sym.Def != nil {
+ return sym.Def.(*ir.Name)
+ }
+
+ do := func(op ir.Op, hasTParams bool) *ir.Name {
+ pos := r.pos()
+ if hasTParams {
+ r.typeParamNames()
+ }
+
+ name := ir.NewDeclNameAt(pos, op, sym)
+ name.Class = ir.PEXTERN // may be overridden later
+ if !sym.IsBlank() {
+ if sym.Def != nil {
+ base.FatalfAt(name.Pos(), "already have a definition for %v", name)
+ }
+ assert(sym.Def == nil)
+ sym.Def = name
+ }
+ return name
+ }
+
+ switch tag {
+ default:
+ panic("unexpected object")
+
+ case objAlias:
+ name := do(ir.OTYPE, false)
+ setType(name, r.typ())
+ name.SetAlias(true)
+ return name
+
+ case objConst:
+ name := do(ir.OLITERAL, false)
+ typ := r.typ()
+ val := FixValue(typ, r.value())
+ setType(name, typ)
+ setValue(name, val)
+ return name
+
+ case objFunc:
+ if sym.Name == "init" {
+ sym = renameinit()
+ }
+ name := do(ir.ONAME, true)
+ setType(name, r.signature(sym.Pkg, nil))
+
+ name.Func = ir.NewFunc(r.pos())
+ name.Func.Nname = name
+
+ rext.funcExt(name)
+ return name
+
+ case objType:
+ name := do(ir.OTYPE, true)
+ typ := types.NewNamed(name)
+ setType(name, typ)
+
+ // Important: We need to do this before SetUnderlying.
+ rext.typeExt(name)
+
+ // We need to defer CheckSize until we've called SetUnderlying to
+ // handle recursive types.
+ types.DeferCheckSize()
+ typ.SetUnderlying(r.typWrapped(false))
+ types.ResumeCheckSize()
+
+ methods := make([]*types.Field, r.len())
+ for i := range methods {
+ methods[i] = r.method(rext)
+ }
+ if len(methods) != 0 {
+ typ.Methods().Set(methods)
+ }
+
+ r.needWrapper(typ)
+
+ return name
+
+ case objVar:
+ name := do(ir.ONAME, false)
+ setType(name, r.typ())
+ rext.varExt(name)
+ return name
+ }
+}
+
+func (r *reader) mangle(sym *types.Sym) *types.Sym {
+ if !r.hasTypeParams() {
+ return sym
+ }
+
+ var buf bytes.Buffer
+ buf.WriteString(sym.Name)
+ buf.WriteByte('[')
+ for i, targ := range r.dict.targs {
+ if i > 0 {
+ if i == r.dict.implicits {
+ buf.WriteByte(';')
+ } else {
+ buf.WriteByte(',')
+ }
+ }
+ buf.WriteString(targ.LinkString())
+ }
+ buf.WriteByte(']')
+ return sym.Pkg.Lookup(buf.String())
+}
+
+func (pr *pkgReader) objDictIdx(sym *types.Sym, idx int, implicits, explicits []*types.Type) *readerDict {
+ r := pr.newReader(relocObjDict, idx, syncObject1)
+
+ var dict readerDict
+
+ nimplicits := r.len()
+ nexplicits := r.len()
+
+ if nimplicits > len(implicits) || nexplicits != len(explicits) {
+ base.Fatalf("%v has %v+%v params, but instantiated with %v+%v args", sym, nimplicits, nexplicits, len(implicits), len(explicits))
+ }
+
+ dict.targs = append(implicits[:nimplicits:nimplicits], explicits...)
+ dict.implicits = nimplicits
+
+ // For stenciling, we can just skip over the type parameters.
+ for range dict.targs[dict.implicits:] {
+ // Skip past bounds without actually evaluating them.
+ r.sync(syncType)
+ if r.bool() {
+ r.len()
+ } else {
+ r.reloc(relocType)
+ }
+ }
+
+ dict.derived = make([]derivedInfo, r.len())
+ dict.derivedTypes = make([]*types.Type, len(dict.derived))
+ for i := range dict.derived {
+ dict.derived[i] = derivedInfo{r.reloc(relocType), r.bool()}
+ }
+
+ dict.funcs = make([]objInfo, r.len())
+ dict.funcsObj = make([]ir.Node, len(dict.funcs))
+ for i := range dict.funcs {
+ objIdx := r.reloc(relocObj)
+ targs := make([]typeInfo, r.len())
+ for j := range targs {
+ targs[j] = r.typInfo()
+ }
+ dict.funcs[i] = objInfo{idx: objIdx, explicits: targs}
+ }
+
+ return &dict
+}
+
+func (r *reader) typeParamNames() {
+ r.sync(syncTypeParamNames)
+
+ for range r.dict.targs[r.dict.implicits:] {
+ r.pos()
+ r.localIdent()
+ }
+}
+
+func (r *reader) method(rext *reader) *types.Field {
+ r.sync(syncMethod)
+ pos := r.pos()
+ pkg, sym := r.selector()
+ r.typeParamNames()
+ _, recv := r.param()
+ typ := r.signature(pkg, recv)
+
+ fnsym := sym
+ fnsym = ir.MethodSym(recv.Type, fnsym)
+ name := ir.NewNameAt(pos, fnsym)
+ setType(name, typ)
+
+ name.Func = ir.NewFunc(r.pos())
+ name.Func.Nname = name
+
+ rext.funcExt(name)
+
+ meth := types.NewField(name.Func.Pos(), sym, typ)
+ meth.Nname = name
+ meth.SetNointerface(name.Func.Pragma&ir.Nointerface != 0)
+
+ return meth
+}
+
+func (r *reader) qualifiedIdent() (pkg *types.Pkg, sym *types.Sym) {
+ r.sync(syncSym)
+ pkg = r.pkg()
+ if name := r.string(); name != "" {
+ sym = pkg.Lookup(name)
+ }
+ return
+}
+
+func (r *reader) localIdent() (pkg *types.Pkg, sym *types.Sym) {
+ r.sync(syncLocalIdent)
+ pkg = r.pkg()
+ if name := r.string(); name != "" {
+ sym = pkg.Lookup(name)
+ }
+ return
+}
+
+func (r *reader) selector() (origPkg *types.Pkg, sym *types.Sym) {
+ r.sync(syncSelector)
+ origPkg = r.pkg()
+ name := r.string()
+ pkg := origPkg
+ if types.IsExported(name) {
+ pkg = types.LocalPkg
+ }
+ sym = pkg.Lookup(name)
+ return
+}
+
+func (r *reader) hasTypeParams() bool {
+ return r.dict.hasTypeParams()
+}
+
+func (dict *readerDict) hasTypeParams() bool {
+ return dict != nil && len(dict.targs) != 0
+}
+
+// @@@ Compiler extensions
+
+func (r *reader) funcExt(name *ir.Name) {
+ r.sync(syncFuncExt)
+
+ name.Class = 0 // so MarkFunc doesn't complain
+ ir.MarkFunc(name)
+
+ fn := name.Func
+
+ // XXX: Workaround because linker doesn't know how to copy Pos.
+ if !fn.Pos().IsKnown() {
+ fn.SetPos(name.Pos())
+ }
+
+ // Normally, we only compile local functions, which saves redundant compilation work.
+ // n.Defn is not nil for local functions, and is nil for imported function. But for
+ // generic functions, we might have an instantiation that no other package has seen before.
+ // So we need to be conservative and compile it again.
+ //
+ // That's why name.Defn is set here, so ir.VisitFuncsBottomUp can analyze function.
+ // TODO(mdempsky,cuonglm): find a cleaner way to handle this.
+ if name.Sym().Pkg == types.LocalPkg || r.hasTypeParams() {
+ name.Defn = fn
+ }
+
+ fn.Pragma = r.pragmaFlag()
+ r.linkname(name)
+
+ typecheck.Func(fn)
+
+ if r.bool() {
+ fn.ABI = obj.ABI(r.uint64())
+
+ // Escape analysis.
+ for _, fs := range &types.RecvsParams {
+ for _, f := range fs(name.Type()).FieldSlice() {
+ f.Note = r.string()
+ }
+ }
+
+ if r.bool() {
+ fn.Inl = &ir.Inline{
+ Cost: int32(r.len()),
+ CanDelayResults: r.bool(),
+ }
+ r.addBody(name.Func)
+ }
+ } else {
+ r.addBody(name.Func)
+ }
+ r.sync(syncEOF)
+}
+
+func (r *reader) typeExt(name *ir.Name) {
+ r.sync(syncTypeExt)
+
+ typ := name.Type()
+
+ if r.hasTypeParams() {
+ // Set "RParams" (really type arguments here, not parameters) so
+ // this type is treated as "fully instantiated". This ensures the
+ // type descriptor is written out as DUPOK and method wrappers are
+ // generated even for imported types.
+ var targs []*types.Type
+ targs = append(targs, r.dict.targs...)
+ typ.SetRParams(targs)
+ }
+
+ name.SetPragma(r.pragmaFlag())
+ if name.Pragma()&ir.NotInHeap != 0 {
+ typ.SetNotInHeap(true)
+ }
+
+ typecheck.SetBaseTypeIndex(typ, r.int64(), r.int64())
+}
+
+func (r *reader) varExt(name *ir.Name) {
+ r.sync(syncVarExt)
+ r.linkname(name)
+}
+
+func (r *reader) linkname(name *ir.Name) {
+ assert(name.Op() == ir.ONAME)
+ r.sync(syncLinkname)
+
+ if idx := r.int64(); idx >= 0 {
+ lsym := name.Linksym()
+ lsym.SymIdx = int32(idx)
+ lsym.Set(obj.AttrIndexed, true)
+ } else {
+ name.Sym().Linkname = r.string()
+ }
+}
+
+func (r *reader) pragmaFlag() ir.PragmaFlag {
+ r.sync(syncPragma)
+ return ir.PragmaFlag(r.int())
+}
+
+// @@@ Function bodies
+
+// bodyReader tracks where the serialized IR for a function's body can
+// be found.
+var bodyReader = map[*ir.Func]pkgReaderIndex{}
+
+// todoBodies holds the list of function bodies that still need to be
+// constructed.
+var todoBodies []*ir.Func
+
+// todoBodiesDone signals that we constructed all function in todoBodies.
+// This is necessary to prevent reader.addBody adds thing to todoBodies
+// when nested inlining happens.
+var todoBodiesDone = false
+
+func (r *reader) addBody(fn *ir.Func) {
+ pri := pkgReaderIndex{r.p, r.reloc(relocBody), r.dict}
+ bodyReader[fn] = pri
+
+ if fn.Nname.Defn == nil {
+ // Don't read in function body for imported functions.
+ // See comment in funcExt.
+ return
+ }
+
+ if r.curfn == nil && !todoBodiesDone {
+ todoBodies = append(todoBodies, fn)
+ return
+ }
+
+ pri.funcBody(fn)
+}
+
+func (pri pkgReaderIndex) funcBody(fn *ir.Func) {
+ r := pri.asReader(relocBody, syncFuncBody)
+ r.funcBody(fn)
+}
+
+func (r *reader) funcBody(fn *ir.Func) {
+ r.curfn = fn
+ r.closureVars = fn.ClosureVars
+
+ ir.WithFunc(fn, func() {
+ r.funcargs(fn)
+
+ if !r.bool() {
+ return
+ }
+
+ body := r.stmts()
+ if body == nil {
+ pos := src.NoXPos
+ if quirksMode() {
+ pos = funcParamsEndPos(fn)
+ }
+ body = []ir.Node{typecheck.Stmt(ir.NewBlockStmt(pos, nil))}
+ }
+ fn.Body = body
+ fn.Endlineno = r.pos()
+ })
+
+ r.marker.WriteTo(fn)
+}
+
+func (r *reader) funcargs(fn *ir.Func) {
+ sig := fn.Nname.Type()
+
+ if recv := sig.Recv(); recv != nil {
+ r.funcarg(recv, recv.Sym, ir.PPARAM)
+ }
+ for _, param := range sig.Params().FieldSlice() {
+ r.funcarg(param, param.Sym, ir.PPARAM)
+ }
+
+ for i, param := range sig.Results().FieldSlice() {
+ sym := types.OrigSym(param.Sym)
+
+ if sym == nil || sym.IsBlank() {
+ prefix := "~r"
+ if r.inlCall != nil {
+ prefix = "~R"
+ } else if sym != nil {
+ prefix = "~b"
+ }
+ sym = typecheck.LookupNum(prefix, i)
+ }
+
+ r.funcarg(param, sym, ir.PPARAMOUT)
+ }
+}
+
+func (r *reader) funcarg(param *types.Field, sym *types.Sym, ctxt ir.Class) {
+ if sym == nil {
+ assert(ctxt == ir.PPARAM)
+ if r.inlCall != nil {
+ r.inlvars.Append(ir.BlankNode)
+ }
+ return
+ }
+
+ name := ir.NewNameAt(r.updatePos(param.Pos), sym)
+ setType(name, param.Type)
+ r.addLocal(name, ctxt)
+
+ if r.inlCall == nil {
+ if !r.funarghack {
+ param.Sym = sym
+ param.Nname = name
+ }
+ } else {
+ if ctxt == ir.PPARAMOUT {
+ r.retvars.Append(name)
+ } else {
+ r.inlvars.Append(name)
+ }
+ }
+}
+
+func (r *reader) addLocal(name *ir.Name, ctxt ir.Class) {
+ assert(ctxt == ir.PAUTO || ctxt == ir.PPARAM || ctxt == ir.PPARAMOUT)
+
+ r.sync(syncAddLocal)
+ if enableSync {
+ want := r.int()
+ if have := len(r.locals); have != want {
+ base.FatalfAt(name.Pos(), "locals table has desynced")
+ }
+ }
+
+ name.SetUsed(true)
+ r.locals = append(r.locals, name)
+
+ // TODO(mdempsky): Move earlier.
+ if ir.IsBlank(name) {
+ return
+ }
+
+ if r.inlCall != nil {
+ if ctxt == ir.PAUTO {
+ name.SetInlLocal(true)
+ } else {
+ name.SetInlFormal(true)
+ ctxt = ir.PAUTO
+ }
+
+ // TODO(mdempsky): Rethink this hack.
+ if strings.HasPrefix(name.Sym().Name, "~") || base.Flag.GenDwarfInl == 0 {
+ name.SetPos(r.inlCall.Pos())
+ name.SetInlFormal(false)
+ name.SetInlLocal(false)
+ }
+ }
+
+ name.Class = ctxt
+ name.Curfn = r.curfn
+
+ r.curfn.Dcl = append(r.curfn.Dcl, name)
+
+ if ctxt == ir.PAUTO {
+ name.SetFrameOffset(0)
+ }
+}
+
+func (r *reader) useLocal() *ir.Name {
+ r.sync(syncUseObjLocal)
+ if r.bool() {
+ return r.locals[r.len()]
+ }
+ return r.closureVars[r.len()]
+}
+
+func (r *reader) openScope() {
+ r.sync(syncOpenScope)
+ pos := r.pos()
+
+ if base.Flag.Dwarf {
+ r.scopeVars = append(r.scopeVars, len(r.curfn.Dcl))
+ r.marker.Push(pos)
+ }
+}
+
+func (r *reader) closeScope() {
+ r.sync(syncCloseScope)
+ r.lastCloseScopePos = r.pos()
+
+ r.closeAnotherScope()
+}
+
+// closeAnotherScope is like closeScope, but it reuses the same mark
+// position as the last closeScope call. This is useful for "for" and
+// "if" statements, as their implicit blocks always end at the same
+// position as an explicit block.
+func (r *reader) closeAnotherScope() {
+ r.sync(syncCloseAnotherScope)
+
+ if base.Flag.Dwarf {
+ scopeVars := r.scopeVars[len(r.scopeVars)-1]
+ r.scopeVars = r.scopeVars[:len(r.scopeVars)-1]
+
+ // Quirkish: noder decides which scopes to keep before
+ // typechecking, whereas incremental typechecking during IR
+ // construction can result in new autotemps being allocated. To
+ // produce identical output, we ignore autotemps here for the
+ // purpose of deciding whether to retract the scope.
+ //
+ // This is important for net/http/fcgi, because it contains:
+ //
+ // var body io.ReadCloser
+ // if len(content) > 0 {
+ // body, req.pw = io.Pipe()
+ // } else { … }
+ //
+ // Notably, io.Pipe is inlinable, and inlining it introduces a ~R0
+ // variable at the call site.
+ //
+ // Noder does not preserve the scope where the io.Pipe() call
+ // resides, because it doesn't contain any declared variables in
+ // source. So the ~R0 variable ends up being assigned to the
+ // enclosing scope instead.
+ //
+ // However, typechecking this assignment also introduces
+ // autotemps, because io.Pipe's results need conversion before
+ // they can be assigned to their respective destination variables.
+ //
+ // TODO(mdempsky): We should probably just keep all scopes, and
+ // let dwarfgen take care of pruning them instead.
+ retract := true
+ for _, n := range r.curfn.Dcl[scopeVars:] {
+ if !n.AutoTemp() {
+ retract = false
+ break
+ }
+ }
+
+ if retract {
+ // no variables were declared in this scope, so we can retract it.
+ r.marker.Unpush()
+ } else {
+ r.marker.Pop(r.lastCloseScopePos)
+ }
+ }
+}
+
+// @@@ Statements
+
+func (r *reader) stmt() ir.Node {
+ switch stmts := r.stmts(); len(stmts) {
+ case 0:
+ return nil
+ case 1:
+ return stmts[0]
+ default:
+ return ir.NewBlockStmt(stmts[0].Pos(), stmts)
+ }
+}
+
+func (r *reader) stmts() []ir.Node {
+ assert(ir.CurFunc == r.curfn)
+ var res ir.Nodes
+
+ r.sync(syncStmts)
+ for {
+ tag := codeStmt(r.code(syncStmt1))
+ if tag == stmtEnd {
+ r.sync(syncStmtsEnd)
+ return res
+ }
+
+ if n := r.stmt1(tag, &res); n != nil {
+ res.Append(typecheck.Stmt(n))
+ }
+ }
+}
+
+func (r *reader) stmt1(tag codeStmt, out *ir.Nodes) ir.Node {
+ var label *types.Sym
+ if n := len(*out); n > 0 {
+ if ls, ok := (*out)[n-1].(*ir.LabelStmt); ok {
+ label = ls.Label
+ }
+ }
+
+ switch tag {
+ default:
+ panic("unexpected statement")
+
+ case stmtAssign:
+ pos := r.pos()
+
+ // TODO(mdempsky): After quirks mode is gone, swap these
+ // statements so we visit LHS before RHS again.
+ rhs := r.exprList()
+ names, lhs := r.assignList()
+
+ if len(rhs) == 0 {
+ for _, name := range names {
+ as := ir.NewAssignStmt(pos, name, nil)
+ as.PtrInit().Append(ir.NewDecl(pos, ir.ODCL, name))
+ out.Append(typecheck.Stmt(as))
+ }
+ return nil
+ }
+
+ if len(lhs) == 1 && len(rhs) == 1 {
+ n := ir.NewAssignStmt(pos, lhs[0], rhs[0])
+ n.Def = r.initDefn(n, names)
+ return n
+ }
+
+ n := ir.NewAssignListStmt(pos, ir.OAS2, lhs, rhs)
+ n.Def = r.initDefn(n, names)
+ return n
+
+ case stmtAssignOp:
+ op := r.op()
+ lhs := r.expr()
+ pos := r.pos()
+ rhs := r.expr()
+ return ir.NewAssignOpStmt(pos, op, lhs, rhs)
+
+ case stmtIncDec:
+ op := r.op()
+ lhs := r.expr()
+ pos := r.pos()
+ n := ir.NewAssignOpStmt(pos, op, lhs, ir.NewBasicLit(pos, one))
+ n.IncDec = true
+ return n
+
+ case stmtBlock:
+ out.Append(r.blockStmt()...)
+ return nil
+
+ case stmtBranch:
+ pos := r.pos()
+ op := r.op()
+ sym := r.optLabel()
+ return ir.NewBranchStmt(pos, op, sym)
+
+ case stmtCall:
+ pos := r.pos()
+ op := r.op()
+ call := r.expr()
+ return ir.NewGoDeferStmt(pos, op, call)
+
+ case stmtExpr:
+ return r.expr()
+
+ case stmtFor:
+ return r.forStmt(label)
+
+ case stmtIf:
+ return r.ifStmt()
+
+ case stmtLabel:
+ pos := r.pos()
+ sym := r.label()
+ return ir.NewLabelStmt(pos, sym)
+
+ case stmtReturn:
+ pos := r.pos()
+ results := r.exprList()
+ return ir.NewReturnStmt(pos, results)
+
+ case stmtSelect:
+ return r.selectStmt(label)
+
+ case stmtSend:
+ pos := r.pos()
+ ch := r.expr()
+ value := r.expr()
+ return ir.NewSendStmt(pos, ch, value)
+
+ case stmtSwitch:
+ return r.switchStmt(label)
+
+ case stmtTypeDeclHack:
+ // fake "type _ = int" declaration to prevent inlining in quirks mode.
+ assert(quirksMode())
+
+ name := ir.NewDeclNameAt(src.NoXPos, ir.OTYPE, ir.BlankNode.Sym())
+ name.SetAlias(true)
+ setType(name, types.Types[types.TINT])
+
+ n := ir.NewDecl(src.NoXPos, ir.ODCLTYPE, name)
+ n.SetTypecheck(1)
+ return n
+ }
+}
+
+func (r *reader) assignList() ([]*ir.Name, []ir.Node) {
+ lhs := make([]ir.Node, r.len())
+ var names []*ir.Name
+
+ for i := range lhs {
+ if r.bool() {
+ pos := r.pos()
+ _, sym := r.localIdent()
+ typ := r.typ()
+
+ name := ir.NewNameAt(pos, sym)
+ lhs[i] = name
+ names = append(names, name)
+ setType(name, typ)
+ r.addLocal(name, ir.PAUTO)
+ continue
+ }
+
+ lhs[i] = r.expr()
+ }
+
+ return names, lhs
+}
+
+func (r *reader) blockStmt() []ir.Node {
+ r.sync(syncBlockStmt)
+ r.openScope()
+ stmts := r.stmts()
+ r.closeScope()
+ return stmts
+}
+
+func (r *reader) forStmt(label *types.Sym) ir.Node {
+ r.sync(syncForStmt)
+
+ r.openScope()
+
+ if r.bool() {
+ pos := r.pos()
+
+ // TODO(mdempsky): After quirks mode is gone, swap these
+ // statements so we read LHS before X again.
+ x := r.expr()
+ names, lhs := r.assignList()
+
+ body := r.blockStmt()
+ r.closeAnotherScope()
+
+ rang := ir.NewRangeStmt(pos, nil, nil, x, body)
+ if len(lhs) >= 1 {
+ rang.Key = lhs[0]
+ if len(lhs) >= 2 {
+ rang.Value = lhs[1]
+ }
+ }
+ rang.Def = r.initDefn(rang, names)
+ rang.Label = label
+ return rang
+ }
+
+ pos := r.pos()
+ init := r.stmt()
+ cond := r.expr()
+ post := r.stmt()
+ body := r.blockStmt()
+ r.closeAnotherScope()
+
+ stmt := ir.NewForStmt(pos, init, cond, post, body)
+ stmt.Label = label
+ return stmt
+}
+
+func (r *reader) ifStmt() ir.Node {
+ r.sync(syncIfStmt)
+ r.openScope()
+ pos := r.pos()
+ init := r.stmts()
+ cond := r.expr()
+ then := r.blockStmt()
+ els := r.stmts()
+ n := ir.NewIfStmt(pos, cond, then, els)
+ n.SetInit(init)
+ r.closeAnotherScope()
+ return n
+}
+
+func (r *reader) selectStmt(label *types.Sym) ir.Node {
+ r.sync(syncSelectStmt)
+
+ pos := r.pos()
+ clauses := make([]*ir.CommClause, r.len())
+ for i := range clauses {
+ if i > 0 {
+ r.closeScope()
+ }
+ r.openScope()
+
+ pos := r.pos()
+ comm := r.stmt()
+ body := r.stmts()
+
+ clauses[i] = ir.NewCommStmt(pos, comm, body)
+ }
+ if len(clauses) > 0 {
+ r.closeScope()
+ }
+ n := ir.NewSelectStmt(pos, clauses)
+ n.Label = label
+ return n
+}
+
+func (r *reader) switchStmt(label *types.Sym) ir.Node {
+ r.sync(syncSwitchStmt)
+
+ r.openScope()
+ pos := r.pos()
+ init := r.stmt()
+
+ var tag ir.Node
+ if r.bool() {
+ pos := r.pos()
+ var ident *ir.Ident
+ if r.bool() {
+ pos := r.pos()
+ sym := typecheck.Lookup(r.string())
+ ident = ir.NewIdent(pos, sym)
+ }
+ x := r.expr()
+ tag = ir.NewTypeSwitchGuard(pos, ident, x)
+ } else {
+ tag = r.expr()
+ }
+
+ tswitch, ok := tag.(*ir.TypeSwitchGuard)
+ if ok && tswitch.Tag == nil {
+ tswitch = nil
+ }
+
+ clauses := make([]*ir.CaseClause, r.len())
+ for i := range clauses {
+ if i > 0 {
+ r.closeScope()
+ }
+ r.openScope()
+
+ pos := r.pos()
+ cases := r.exprList()
+
+ clause := ir.NewCaseStmt(pos, cases, nil)
+ if tswitch != nil {
+ pos := r.pos()
+ typ := r.typ()
+
+ name := ir.NewNameAt(pos, tswitch.Tag.Sym())
+ setType(name, typ)
+ r.addLocal(name, ir.PAUTO)
+ clause.Var = name
+ name.Defn = tswitch
+ }
+
+ clause.Body = r.stmts()
+ clauses[i] = clause
+ }
+ if len(clauses) > 0 {
+ r.closeScope()
+ }
+ r.closeScope()
+
+ n := ir.NewSwitchStmt(pos, tag, clauses)
+ n.Label = label
+ if init != nil {
+ n.SetInit([]ir.Node{init})
+ }
+ return n
+}
+
+func (r *reader) label() *types.Sym {
+ r.sync(syncLabel)
+ name := r.string()
+ if r.inlCall != nil {
+ name = fmt.Sprintf("~%s·%d", name, inlgen)
+ }
+ return typecheck.Lookup(name)
+}
+
+func (r *reader) optLabel() *types.Sym {
+ r.sync(syncOptLabel)
+ if r.bool() {
+ return r.label()
+ }
+ return nil
+}
+
+// initDefn marks the given names as declared by defn and populates
+// its Init field with ODCL nodes. It then reports whether any names
+// were so declared, which can be used to initialize defn.Def.
+func (r *reader) initDefn(defn ir.InitNode, names []*ir.Name) bool {
+ if len(names) == 0 {
+ return false
+ }
+
+ init := make([]ir.Node, len(names))
+ for i, name := range names {
+ name.Defn = defn
+ init[i] = ir.NewDecl(name.Pos(), ir.ODCL, name)
+ }
+ defn.SetInit(init)
+ return true
+}
+
+// @@@ Expressions
+
+// expr reads and returns a typechecked expression.
+func (r *reader) expr() (res ir.Node) {
+ defer func() {
+ if res != nil && res.Typecheck() == 0 {
+ base.FatalfAt(res.Pos(), "%v missed typecheck", res)
+ }
+ }()
+
+ switch tag := codeExpr(r.code(syncExpr)); tag {
+ default:
+ panic("unhandled expression")
+
+ case exprNone:
+ return nil
+
+ case exprBlank:
+ // blank only allowed in LHS of assignments
+ // TODO(mdempsky): Handle directly in assignList instead?
+ return typecheck.AssignExpr(ir.BlankNode)
+
+ case exprLocal:
+ return typecheck.Expr(r.useLocal())
+
+ case exprName:
+ // Callee instead of Expr allows builtins
+ // TODO(mdempsky): Handle builtins directly in exprCall, like method calls?
+ return typecheck.Callee(r.obj())
+
+ case exprType:
+ // TODO(mdempsky): ir.TypeNode should probably return a typecheck'd node.
+ n := ir.TypeNode(r.typ())
+ n.SetTypecheck(1)
+ return n
+
+ case exprConst:
+ pos := r.pos()
+ typ := r.typ()
+ val := FixValue(typ, r.value())
+ op := r.op()
+ orig := r.string()
+ return typecheck.Expr(OrigConst(pos, typ, val, op, orig))
+
+ case exprCompLit:
+ return r.compLit()
+
+ case exprFuncLit:
+ return r.funcLit()
+
+ case exprSelector:
+ x := r.expr()
+ pos := r.pos()
+ _, sym := r.selector()
+ n := typecheck.Expr(ir.NewSelectorExpr(pos, ir.OXDOT, x, sym)).(*ir.SelectorExpr)
+ if n.Op() == ir.OMETHVALUE {
+ wrapper := methodValueWrapper{
+ rcvr: n.X.Type(),
+ method: n.Selection,
+ }
+ if r.importedDef() {
+ haveMethodValueWrappers = append(haveMethodValueWrappers, wrapper)
+ } else {
+ needMethodValueWrappers = append(needMethodValueWrappers, wrapper)
+ }
+ }
+ return n
+
+ case exprIndex:
+ x := r.expr()
+ pos := r.pos()
+ index := r.expr()
+ return typecheck.Expr(ir.NewIndexExpr(pos, x, index))
+
+ case exprSlice:
+ x := r.expr()
+ pos := r.pos()
+ var index [3]ir.Node
+ for i := range index {
+ index[i] = r.expr()
+ }
+ op := ir.OSLICE
+ if index[2] != nil {
+ op = ir.OSLICE3
+ }
+ return typecheck.Expr(ir.NewSliceExpr(pos, op, x, index[0], index[1], index[2]))
+
+ case exprAssert:
+ x := r.expr()
+ pos := r.pos()
+ typ := r.expr().(ir.Ntype)
+ return typecheck.Expr(ir.NewTypeAssertExpr(pos, x, typ))
+
+ case exprUnaryOp:
+ op := r.op()
+ pos := r.pos()
+ x := r.expr()
+
+ switch op {
+ case ir.OADDR:
+ return typecheck.Expr(typecheck.NodAddrAt(pos, x))
+ case ir.ODEREF:
+ return typecheck.Expr(ir.NewStarExpr(pos, x))
+ }
+ return typecheck.Expr(ir.NewUnaryExpr(pos, op, x))
+
+ case exprBinaryOp:
+ op := r.op()
+ x := r.expr()
+ pos := r.pos()
+ y := r.expr()
+
+ switch op {
+ case ir.OANDAND, ir.OOROR:
+ return typecheck.Expr(ir.NewLogicalExpr(pos, op, x, y))
+ }
+ return typecheck.Expr(ir.NewBinaryExpr(pos, op, x, y))
+
+ case exprCall:
+ fun := r.expr()
+ if r.bool() { // method call
+ pos := r.pos()
+ _, sym := r.selector()
+ fun = typecheck.Callee(ir.NewSelectorExpr(pos, ir.OXDOT, fun, sym))
+ }
+ pos := r.pos()
+ args := r.exprs()
+ dots := r.bool()
+ return typecheck.Call(pos, fun, args, dots)
+
+ case exprConvert:
+ typ := r.typ()
+ pos := r.pos()
+ x := r.expr()
+ return typecheck.Expr(ir.NewConvExpr(pos, ir.OCONV, typ, x))
+ }
+}
+
+func (r *reader) compLit() ir.Node {
+ r.sync(syncCompLit)
+ pos := r.pos()
+ typ0 := r.typ()
+
+ typ := typ0
+ if typ.IsPtr() {
+ typ = typ.Elem()
+ }
+ if typ.Kind() == types.TFORW {
+ base.FatalfAt(pos, "unresolved composite literal type: %v", typ)
+ }
+ isStruct := typ.Kind() == types.TSTRUCT
+
+ elems := make([]ir.Node, r.len())
+ for i := range elems {
+ elemp := &elems[i]
+
+ if isStruct {
+ sk := ir.NewStructKeyExpr(r.pos(), typ.Field(r.len()), nil)
+ *elemp, elemp = sk, &sk.Value
+ } else if r.bool() {
+ kv := ir.NewKeyExpr(r.pos(), r.expr(), nil)
+ *elemp, elemp = kv, &kv.Value
+ }
+
+ *elemp = wrapName(r.pos(), r.expr())
+ }
+
+ lit := typecheck.Expr(ir.NewCompLitExpr(pos, ir.OCOMPLIT, ir.TypeNode(typ), elems))
+ if typ0.IsPtr() {
+ lit = typecheck.Expr(typecheck.NodAddrAt(pos, lit))
+ lit.SetType(typ0)
+ }
+ return lit
+}
+
+func wrapName(pos src.XPos, x ir.Node) ir.Node {
+ // These nodes do not carry line numbers.
+ // Introduce a wrapper node to give them the correct line.
+ switch ir.Orig(x).Op() {
+ case ir.OTYPE, ir.OLITERAL:
+ if x.Sym() == nil {
+ break
+ }
+ fallthrough
+ case ir.ONAME, ir.ONONAME, ir.OPACK, ir.ONIL:
+ p := ir.NewParenExpr(pos, x)
+ p.SetImplicit(true)
+ return p
+ }
+ return x
+}
+
+func (r *reader) funcLit() ir.Node {
+ r.sync(syncFuncLit)
+
+ pos := r.pos()
+ typPos := r.pos()
+ xtype2 := r.signature(types.LocalPkg, nil)
+
+ opos := pos
+ if quirksMode() {
+ opos = r.origPos(pos)
+ }
+
+ fn := ir.NewClosureFunc(opos, r.curfn != nil)
+ clo := fn.OClosure
+ ir.NameClosure(clo, r.curfn)
+
+ setType(fn.Nname, xtype2)
+ if quirksMode() {
+ fn.Nname.Ntype = ir.TypeNodeAt(typPos, xtype2)
+ }
+ typecheck.Func(fn)
+ setType(clo, fn.Type())
+
+ fn.ClosureVars = make([]*ir.Name, 0, r.len())
+ for len(fn.ClosureVars) < cap(fn.ClosureVars) {
+ ir.NewClosureVar(r.pos(), fn, r.useLocal())
+ }
+
+ r.addBody(fn)
+
+ // TODO(mdempsky): Remove hard-coding of typecheck.Target.
+ return ir.UseClosure(clo, typecheck.Target)
+}
+
+func (r *reader) exprList() []ir.Node {
+ r.sync(syncExprList)
+ return r.exprs()
+}
+
+func (r *reader) exprs() []ir.Node {
+ r.sync(syncExprs)
+ nodes := make([]ir.Node, r.len())
+ if len(nodes) == 0 {
+ return nil // TODO(mdempsky): Unclear if this matters.
+ }
+ for i := range nodes {
+ nodes[i] = r.expr()
+ }
+ return nodes
+}
+
+func (r *reader) op() ir.Op {
+ r.sync(syncOp)
+ return ir.Op(r.len())
+}
+
+// @@@ Package initialization
+
+func (r *reader) pkgInit(self *types.Pkg, target *ir.Package) {
+ if quirksMode() {
+ for i, n := 0, r.len(); i < n; i++ {
+ // Eagerly register position bases, so their filenames are
+ // assigned stable indices.
+ posBase := r.posBase()
+ _ = base.Ctxt.PosTable.XPos(src.MakePos(posBase, 0, 0))
+ }
+
+ for i, n := 0, r.len(); i < n; i++ {
+ // Eagerly resolve imported objects, so any filenames registered
+ // in the process are assigned stable indices too.
+ _, sym := r.qualifiedIdent()
+ typecheck.Resolve(ir.NewIdent(src.NoXPos, sym))
+ assert(sym.Def != nil)
+ }
+ }
+
+ cgoPragmas := make([][]string, r.len())
+ for i := range cgoPragmas {
+ cgoPragmas[i] = r.strings()
+ }
+ target.CgoPragmas = cgoPragmas
+
+ r.pkgDecls(target)
+
+ r.sync(syncEOF)
+}
+
+func (r *reader) pkgDecls(target *ir.Package) {
+ r.sync(syncDecls)
+ for {
+ switch code := codeDecl(r.code(syncDecl)); code {
+ default:
+ panic(fmt.Sprintf("unhandled decl: %v", code))
+
+ case declEnd:
+ return
+
+ case declFunc:
+ names := r.pkgObjs(target)
+ assert(len(names) == 1)
+ target.Decls = append(target.Decls, names[0].Func)
+
+ case declMethod:
+ typ := r.typ()
+ _, sym := r.selector()
+
+ method := typecheck.Lookdot1(nil, sym, typ, typ.Methods(), 0)
+ target.Decls = append(target.Decls, method.Nname.(*ir.Name).Func)
+
+ case declVar:
+ pos := r.pos()
+ names := r.pkgObjs(target)
+ values := r.exprList()
+
+ if len(names) > 1 && len(values) == 1 {
+ as := ir.NewAssignListStmt(pos, ir.OAS2, nil, values)
+ for _, name := range names {
+ as.Lhs.Append(name)
+ name.Defn = as
+ }
+ target.Decls = append(target.Decls, as)
+ } else {
+ for i, name := range names {
+ as := ir.NewAssignStmt(pos, name, nil)
+ if i < len(values) {
+ as.Y = values[i]
+ }
+ name.Defn = as
+ target.Decls = append(target.Decls, as)
+ }
+ }
+
+ if n := r.len(); n > 0 {
+ assert(len(names) == 1)
+ embeds := make([]ir.Embed, n)
+ for i := range embeds {
+ embeds[i] = ir.Embed{Pos: r.pos(), Patterns: r.strings()}
+ }
+ names[0].Embed = &embeds
+ target.Embeds = append(target.Embeds, names[0])
+ }
+
+ case declOther:
+ r.pkgObjs(target)
+ }
+ }
+}
+
+func (r *reader) pkgObjs(target *ir.Package) []*ir.Name {
+ r.sync(syncDeclNames)
+ nodes := make([]*ir.Name, r.len())
+ for i := range nodes {
+ r.sync(syncDeclName)
+
+ name := r.obj().(*ir.Name)
+ nodes[i] = name
+
+ sym := name.Sym()
+ if sym.IsBlank() {
+ continue
+ }
+
+ switch name.Class {
+ default:
+ base.FatalfAt(name.Pos(), "unexpected class: %v", name.Class)
+
+ case ir.PEXTERN:
+ target.Externs = append(target.Externs, name)
+
+ case ir.PFUNC:
+ assert(name.Type().Recv() == nil)
+
+ // TODO(mdempsky): Cleaner way to recognize init?
+ if strings.HasPrefix(sym.Name, "init.") {
+ target.Inits = append(target.Inits, name.Func)
+ }
+ }
+
+ if types.IsExported(sym.Name) {
+ assert(!sym.OnExportList())
+ target.Exports = append(target.Exports, name)
+ sym.SetOnExportList(true)
+ }
+
+ if base.Flag.AsmHdr != "" {
+ assert(!sym.Asm())
+ target.Asms = append(target.Asms, name)
+ sym.SetAsm(true)
+ }
+ }
+
+ return nodes
+}
+
+// @@@ Inlining
+
+var inlgen = 0
+
+func InlineCall(call *ir.CallExpr, fn *ir.Func, inlIndex int) *ir.InlinedCallExpr {
+ // TODO(mdempsky): Turn callerfn into an explicit parameter.
+ callerfn := ir.CurFunc
+
+ pri, ok := bodyReader[fn]
+ if !ok {
+ // Assume it's an imported function or something that we don't
+ // have access to in quirks mode.
+ if haveLegacyImports {
+ return nil
+ }
+
+ base.FatalfAt(call.Pos(), "missing function body for call to %v", fn)
+ }
+
+ if fn.Inl.Body == nil {
+ expandInline(fn, pri)
+ }
+
+ r := pri.asReader(relocBody, syncFuncBody)
+
+ // TODO(mdempsky): This still feels clumsy. Can we do better?
+ tmpfn := ir.NewFunc(fn.Pos())
+ tmpfn.Nname = ir.NewNameAt(fn.Nname.Pos(), callerfn.Sym())
+ tmpfn.Closgen = callerfn.Closgen
+ defer func() { callerfn.Closgen = tmpfn.Closgen }()
+
+ setType(tmpfn.Nname, fn.Type())
+ r.curfn = tmpfn
+
+ r.inlCaller = callerfn
+ r.inlCall = call
+ r.inlFunc = fn
+ r.inlTreeIndex = inlIndex
+ r.inlPosBases = make(map[*src.PosBase]*src.PosBase)
+
+ r.closureVars = make([]*ir.Name, len(r.inlFunc.ClosureVars))
+ for i, cv := range r.inlFunc.ClosureVars {
+ r.closureVars[i] = cv.Outer
+ }
+
+ r.funcargs(fn)
+
+ assert(r.bool()) // have body
+ r.delayResults = fn.Inl.CanDelayResults
+
+ r.retlabel = typecheck.AutoLabel(".i")
+ inlgen++
+
+ init := ir.TakeInit(call)
+
+ // For normal function calls, the function callee expression
+ // may contain side effects. Make sure to preserve these,
+ // if necessary (#42703).
+ if call.Op() == ir.OCALLFUNC {
+ inline.CalleeEffects(&init, call.X)
+ }
+
+ var args ir.Nodes
+ if call.Op() == ir.OCALLMETH {
+ base.FatalfAt(call.Pos(), "OCALLMETH missed by typecheck")
+ }
+ args.Append(call.Args...)
+
+ // Create assignment to declare and initialize inlvars.
+ as2 := ir.NewAssignListStmt(call.Pos(), ir.OAS2, r.inlvars, args)
+ as2.Def = true
+ var as2init ir.Nodes
+ for _, name := range r.inlvars {
+ if ir.IsBlank(name) {
+ continue
+ }
+ // TODO(mdempsky): Use inlined position of name.Pos() instead?
+ name := name.(*ir.Name)
+ as2init.Append(ir.NewDecl(call.Pos(), ir.ODCL, name))
+ name.Defn = as2
+ }
+ as2.SetInit(as2init)
+ init.Append(typecheck.Stmt(as2))
+
+ if !r.delayResults {
+ // If not delaying retvars, declare and zero initialize the
+ // result variables now.
+ for _, name := range r.retvars {
+ // TODO(mdempsky): Use inlined position of name.Pos() instead?
+ name := name.(*ir.Name)
+ init.Append(ir.NewDecl(call.Pos(), ir.ODCL, name))
+ ras := ir.NewAssignStmt(call.Pos(), name, nil)
+ init.Append(typecheck.Stmt(ras))
+ }
+ }
+
+ // Add an inline mark just before the inlined body.
+ // This mark is inline in the code so that it's a reasonable spot
+ // to put a breakpoint. Not sure if that's really necessary or not
+ // (in which case it could go at the end of the function instead).
+ // Note issue 28603.
+ init.Append(ir.NewInlineMarkStmt(call.Pos().WithIsStmt(), int64(r.inlTreeIndex)))
+
+ nparams := len(r.curfn.Dcl)
+
+ ir.WithFunc(r.curfn, func() {
+ r.curfn.Body = r.stmts()
+ r.curfn.Endlineno = r.pos()
+
+ deadcode.Func(r.curfn)
+
+ // Replace any "return" statements within the function body.
+ var edit func(ir.Node) ir.Node
+ edit = func(n ir.Node) ir.Node {
+ if ret, ok := n.(*ir.ReturnStmt); ok {
+ n = typecheck.Stmt(r.inlReturn(ret))
+ }
+ ir.EditChildren(n, edit)
+ return n
+ }
+ edit(r.curfn)
+ })
+
+ body := ir.Nodes(r.curfn.Body)
+
+ // Quirk: If deadcode elimination turned a non-empty function into
+ // an empty one, we need to set the position for the empty block
+ // left behind to the inlined position for src.NoXPos, so that
+ // an empty string gets added into the DWARF file name listing at
+ // the appropriate index.
+ if quirksMode() && len(body) == 1 {
+ if block, ok := body[0].(*ir.BlockStmt); ok && len(block.List) == 0 {
+ block.SetPos(r.updatePos(src.NoXPos))
+ }
+ }
+
+ // Quirkish: We need to eagerly prune variables added during
+ // inlining, but removed by deadcode.FuncBody above. Unused
+ // variables will get removed during stack frame layout anyway, but
+ // len(fn.Dcl) ends up influencing things like autotmp naming.
+
+ used := usedLocals(body)
+
+ for i, name := range r.curfn.Dcl {
+ if i < nparams || used.Has(name) {
+ name.Curfn = callerfn
+ callerfn.Dcl = append(callerfn.Dcl, name)
+
+ // Quirkish. TODO(mdempsky): Document why.
+ if name.AutoTemp() {
+ name.SetEsc(ir.EscUnknown)
+
+ if base.Flag.GenDwarfInl != 0 {
+ name.SetInlLocal(true)
+ } else {
+ name.SetPos(r.inlCall.Pos())
+ }
+ }
+ }
+ }
+
+ body.Append(ir.NewLabelStmt(call.Pos(), r.retlabel))
+
+ res := ir.NewInlinedCallExpr(call.Pos(), body, append([]ir.Node(nil), r.retvars...))
+ res.SetInit(init)
+ res.SetType(call.Type())
+ res.SetTypecheck(1)
+
+ // Inlining shouldn't add any functions to todoBodies.
+ assert(len(todoBodies) == 0)
+
+ return res
+}
+
+// inlReturn returns a statement that can substitute for the given
+// return statement when inlining.
+func (r *reader) inlReturn(ret *ir.ReturnStmt) *ir.BlockStmt {
+ pos := r.inlCall.Pos()
+
+ block := ir.TakeInit(ret)
+
+ if results := ret.Results; len(results) != 0 {
+ assert(len(r.retvars) == len(results))
+
+ as2 := ir.NewAssignListStmt(pos, ir.OAS2, append([]ir.Node(nil), r.retvars...), ret.Results)
+
+ if r.delayResults {
+ for _, name := range r.retvars {
+ // TODO(mdempsky): Use inlined position of name.Pos() instead?
+ name := name.(*ir.Name)
+ block.Append(ir.NewDecl(pos, ir.ODCL, name))
+ name.Defn = as2
+ }
+ }
+
+ block.Append(as2)
+ }
+
+ block.Append(ir.NewBranchStmt(pos, ir.OGOTO, r.retlabel))
+ return ir.NewBlockStmt(pos, block)
+}
+
+// expandInline reads in an extra copy of IR to populate
+// fn.Inl.{Dcl,Body}.
+func expandInline(fn *ir.Func, pri pkgReaderIndex) {
+ // TODO(mdempsky): Remove this function. It's currently needed by
+ // dwarfgen/dwarf.go:preInliningDcls, which requires fn.Inl.Dcl to
+ // create abstract function DIEs. But we should be able to provide it
+ // with the same information some other way.
+
+ fndcls := len(fn.Dcl)
+ topdcls := len(typecheck.Target.Decls)
+
+ tmpfn := ir.NewFunc(fn.Pos())
+ tmpfn.Nname = ir.NewNameAt(fn.Nname.Pos(), fn.Sym())
+ tmpfn.ClosureVars = fn.ClosureVars
+
+ {
+ r := pri.asReader(relocBody, syncFuncBody)
+ setType(tmpfn.Nname, fn.Type())
+
+ // Don't change parameter's Sym/Nname fields.
+ r.funarghack = true
+
+ r.funcBody(tmpfn)
+
+ ir.WithFunc(tmpfn, func() {
+ deadcode.Func(tmpfn)
+ })
+ }
+
+ used := usedLocals(tmpfn.Body)
+
+ for _, name := range tmpfn.Dcl {
+ if name.Class != ir.PAUTO || used.Has(name) {
+ name.Curfn = fn
+ fn.Inl.Dcl = append(fn.Inl.Dcl, name)
+ }
+ }
+ fn.Inl.Body = tmpfn.Body
+
+ // Double check that we didn't change fn.Dcl by accident.
+ assert(fndcls == len(fn.Dcl))
+
+ // typecheck.Stmts may have added function literals to
+ // typecheck.Target.Decls. Remove them again so we don't risk trying
+ // to compile them multiple times.
+ typecheck.Target.Decls = typecheck.Target.Decls[:topdcls]
+}
+
+// usedLocals returns a set of local variables that are used within body.
+func usedLocals(body []ir.Node) ir.NameSet {
+ var used ir.NameSet
+ ir.VisitList(body, func(n ir.Node) {
+ if n, ok := n.(*ir.Name); ok && n.Op() == ir.ONAME && n.Class == ir.PAUTO {
+ used.Add(n)
+ }
+ })
+ return used
+}
+
+// @@@ Method wrappers
+
+// needWrapperTypes lists types for which we may need to generate
+// method wrappers.
+var needWrapperTypes []*types.Type
+
+// haveWrapperTypes lists types for which we know we already have
+// method wrappers, because we found the type in an imported package.
+var haveWrapperTypes []*types.Type
+
+// needMethodValueWrappers lists methods for which we may need to
+// generate method value wrappers.
+var needMethodValueWrappers []methodValueWrapper
+
+// haveMethodValueWrappers lists methods for which we know we already
+// have method value wrappers, because we found it in an imported
+// package.
+var haveMethodValueWrappers []methodValueWrapper
+
+type methodValueWrapper struct {
+ rcvr *types.Type
+ method *types.Field
+}
+
+func (r *reader) needWrapper(typ *types.Type) {
+ if typ.IsPtr() {
+ return
+ }
+
+ // If a type was found in an imported package, then we can assume
+ // that package (or one of its transitive dependencies) already
+ // generated method wrappers for it.
+ if r.importedDef() {
+ haveWrapperTypes = append(haveWrapperTypes, typ)
+ } else {
+ needWrapperTypes = append(needWrapperTypes, typ)
+ }
+}
+
+func (r *reader) importedDef() bool {
+ // If a type was found in an imported package, then we can assume
+ // that package (or one of its transitive dependencies) already
+ // generated method wrappers for it.
+ //
+ // Exception: If we're instantiating an imported generic type or
+ // function, we might be instantiating it with type arguments not
+ // previously seen before.
+ //
+ // TODO(mdempsky): Distinguish when a generic function or type was
+ // instantiated in an imported package so that we can add types to
+ // haveWrapperTypes instead.
+ return r.p != localPkgReader && !r.hasTypeParams()
+}
+
+func MakeWrappers(target *ir.Package) {
+ // Only unified IR in non-quirks mode emits its own wrappers.
+ if base.Debug.Unified == 0 || quirksMode() {
+ return
+ }
+
+ // always generate a wrapper for error.Error (#29304)
+ needWrapperTypes = append(needWrapperTypes, types.ErrorType)
+
+ seen := make(map[string]*types.Type)
+
+ for _, typ := range haveWrapperTypes {
+ wrapType(typ, target, seen, false)
+ }
+ haveWrapperTypes = nil
+
+ for _, typ := range needWrapperTypes {
+ wrapType(typ, target, seen, true)
+ }
+ needWrapperTypes = nil
+
+ for _, wrapper := range haveMethodValueWrappers {
+ wrapMethodValue(wrapper.rcvr, wrapper.method, target, false)
+ }
+ haveMethodValueWrappers = nil
+
+ for _, wrapper := range needMethodValueWrappers {
+ wrapMethodValue(wrapper.rcvr, wrapper.method, target, true)
+ }
+ needMethodValueWrappers = nil
+}
+
+func wrapType(typ *types.Type, target *ir.Package, seen map[string]*types.Type, needed bool) {
+ key := typ.LinkString()
+ if prev := seen[key]; prev != nil {
+ if !types.Identical(typ, prev) {
+ base.Fatalf("collision: types %v and %v have link string %q", typ, prev, key)
+ }
+ return
+ }
+ seen[key] = typ
+
+ if !needed {
+ // Only called to add to 'seen'.
+ return
+ }
+
+ if !typ.IsInterface() {
+ typecheck.CalcMethods(typ)
+ }
+ for _, meth := range typ.AllMethods().Slice() {
+ if meth.Sym.IsBlank() || !meth.IsMethod() {
+ base.FatalfAt(meth.Pos, "invalid method: %v", meth)
+ }
+
+ methodWrapper(0, typ, meth, target)
+
+ // For non-interface types, we also want *T wrappers.
+ if !typ.IsInterface() {
+ methodWrapper(1, typ, meth, target)
+
+ // For not-in-heap types, *T is a scalar, not pointer shaped,
+ // so the interface wrappers use **T.
+ if typ.NotInHeap() {
+ methodWrapper(2, typ, meth, target)
+ }
+ }
+ }
+}
+
+func methodWrapper(derefs int, tbase *types.Type, method *types.Field, target *ir.Package) {
+ wrapper := tbase
+ for i := 0; i < derefs; i++ {
+ wrapper = types.NewPtr(wrapper)
+ }
+
+ sym := ir.MethodSym(wrapper, method.Sym)
+ base.Assertf(!sym.Siggen(), "already generated wrapper %v", sym)
+ sym.SetSiggen(true)
+
+ wrappee := method.Type.Recv().Type
+ if types.Identical(wrapper, wrappee) ||
+ !types.IsMethodApplicable(wrapper, method) ||
+ !reflectdata.NeedEmit(tbase) {
+ return
+ }
+
+ // TODO(mdempsky): Use method.Pos instead?
+ pos := base.AutogeneratedPos
+
+ fn := newWrapperFunc(pos, sym, wrapper, method)
+
+ var recv ir.Node = fn.Nname.Type().Recv().Nname.(*ir.Name)
+
+ // For simple *T wrappers around T methods, panicwrap produces a
+ // nicer panic message.
+ if wrapper.IsPtr() && types.Identical(wrapper.Elem(), wrappee) {
+ cond := ir.NewBinaryExpr(pos, ir.OEQ, recv, types.BuiltinPkg.Lookup("nil").Def.(ir.Node))
+ then := []ir.Node{ir.NewCallExpr(pos, ir.OCALL, typecheck.LookupRuntime("panicwrap"), nil)}
+ fn.Body.Append(ir.NewIfStmt(pos, cond, then, nil))
+ }
+
+ // typecheck will add one implicit deref, if necessary,
+ // but not-in-heap types require more for their **T wrappers.
+ for i := 1; i < derefs; i++ {
+ recv = Implicit(ir.NewStarExpr(pos, recv))
+ }
+
+ addTailCall(pos, fn, recv, method)
+
+ finishWrapperFunc(fn, target)
+}
+
+func wrapMethodValue(recvType *types.Type, method *types.Field, target *ir.Package, needed bool) {
+ sym := ir.MethodSymSuffix(recvType, method.Sym, "-fm")
+ if sym.Uniq() {
+ return
+ }
+ sym.SetUniq(true)
+
+ // TODO(mdempsky): Use method.Pos instead?
+ pos := base.AutogeneratedPos
+
+ fn := newWrapperFunc(pos, sym, nil, method)
+ sym.Def = fn.Nname
+
+ // Declare and initialize variable holding receiver.
+ recv := ir.NewHiddenParam(pos, fn, typecheck.Lookup(".this"), recvType)
+
+ if !needed {
+ typecheck.Func(fn)
+ return
+ }
+
+ addTailCall(pos, fn, recv, method)
+
+ finishWrapperFunc(fn, target)
+}
+
+func newWrapperFunc(pos src.XPos, sym *types.Sym, wrapper *types.Type, method *types.Field) *ir.Func {
+ fn := ir.NewFunc(pos)
+ fn.SetDupok(true) // TODO(mdempsky): Leave unset for local, non-generic wrappers?
+
+ name := ir.NewNameAt(pos, sym)
+ ir.MarkFunc(name)
+ name.Func = fn
+ name.Defn = fn
+ fn.Nname = name
+
+ sig := newWrapperType(wrapper, method)
+ setType(name, sig)
+
+ // TODO(mdempsky): De-duplicate with similar logic in funcargs.
+ defParams := func(class ir.Class, params *types.Type) {
+ for _, param := range params.FieldSlice() {
+ name := ir.NewNameAt(param.Pos, param.Sym)
+ name.Class = class
+ setType(name, param.Type)
+
+ name.Curfn = fn
+ fn.Dcl = append(fn.Dcl, name)
+
+ param.Nname = name
+ }
+ }
+
+ defParams(ir.PPARAM, sig.Recvs())
+ defParams(ir.PPARAM, sig.Params())
+ defParams(ir.PPARAMOUT, sig.Results())
+
+ return fn
+}
+
+func finishWrapperFunc(fn *ir.Func, target *ir.Package) {
+ typecheck.Func(fn)
+
+ ir.WithFunc(fn, func() {
+ typecheck.Stmts(fn.Body)
+ })
+
+ // We generate wrappers after the global inlining pass,
+ // so we're responsible for applying inlining ourselves here.
+ inline.InlineCalls(fn)
+
+ target.Decls = append(target.Decls, fn)
+}
+
+// newWrapperType returns a copy of the given signature type, but with
+// the receiver parameter type substituted with recvType.
+// If recvType is nil, newWrapperType returns a signature
+// without a receiver parameter.
+func newWrapperType(recvType *types.Type, method *types.Field) *types.Type {
+ clone := func(params []*types.Field) []*types.Field {
+ res := make([]*types.Field, len(params))
+ for i, param := range params {
+ sym := param.Sym
+ if sym == nil || sym.Name == "_" {
+ sym = typecheck.LookupNum(".anon", i)
+ }
+ res[i] = types.NewField(param.Pos, sym, param.Type)
+ res[i].SetIsDDD(param.IsDDD())
+ }
+ return res
+ }
+
+ sig := method.Type
+
+ var recv *types.Field
+ if recvType != nil {
+ recv = types.NewField(sig.Recv().Pos, typecheck.Lookup(".this"), recvType)
+ }
+ params := clone(sig.Params().FieldSlice())
+ results := clone(sig.Results().FieldSlice())
+
+ return types.NewSignature(types.NoPkg, recv, nil, params, results)
+}
+
+func addTailCall(pos src.XPos, fn *ir.Func, recv ir.Node, method *types.Field) {
+ sig := fn.Nname.Type()
+ args := make([]ir.Node, sig.NumParams())
+ for i, param := range sig.Params().FieldSlice() {
+ args[i] = param.Nname.(*ir.Name)
+ }
+
+ // TODO(mdempsky): Support creating OTAILCALL, when possible. See reflectdata.methodWrapper.
+ // Not urgent though, because tail calls are currently incompatible with regabi anyway.
+
+ fn.SetWrapper(true) // TODO(mdempsky): Leave unset for tail calls?
+
+ dot := ir.NewSelectorExpr(pos, ir.OXDOT, recv, method.Sym)
+ call := typecheck.Call(pos, dot, args, method.Type.IsVariadic()).(*ir.CallExpr)
+
+ if method.Type.NumResults() == 0 {
+ fn.Body.Append(call)
+ return
+ }
+
+ ret := ir.NewReturnStmt(pos, nil)
+ ret.Results = []ir.Node{call}
+ fn.Body.Append(ret)
+}
diff --git a/src/cmd/compile/internal/noder/reader2.go b/src/cmd/compile/internal/noder/reader2.go
new file mode 100644
index 0000000..c028d21
--- /dev/null
+++ b/src/cmd/compile/internal/noder/reader2.go
@@ -0,0 +1,506 @@
+// UNREVIEWED
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package noder
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/syntax"
+ "cmd/compile/internal/types2"
+ "cmd/internal/src"
+)
+
+type pkgReader2 struct {
+ pkgDecoder
+
+ ctxt *types2.Context
+ imports map[string]*types2.Package
+
+ posBases []*syntax.PosBase
+ pkgs []*types2.Package
+ typs []types2.Type
+}
+
+func readPackage2(ctxt *types2.Context, imports map[string]*types2.Package, input pkgDecoder) *types2.Package {
+ pr := pkgReader2{
+ pkgDecoder: input,
+
+ ctxt: ctxt,
+ imports: imports,
+
+ posBases: make([]*syntax.PosBase, input.numElems(relocPosBase)),
+ pkgs: make([]*types2.Package, input.numElems(relocPkg)),
+ typs: make([]types2.Type, input.numElems(relocType)),
+ }
+
+ r := pr.newReader(relocMeta, publicRootIdx, syncPublic)
+ pkg := r.pkg()
+ r.bool() // has init
+
+ for i, n := 0, r.len(); i < n; i++ {
+ // As if r.obj(), but avoiding the Scope.Lookup call,
+ // to avoid eager loading of imports.
+ r.sync(syncObject)
+ assert(!r.bool())
+ r.p.objIdx(r.reloc(relocObj))
+ assert(r.len() == 0)
+ }
+
+ r.sync(syncEOF)
+
+ pkg.MarkComplete()
+ return pkg
+}
+
+type reader2 struct {
+ decoder
+
+ p *pkgReader2
+
+ dict *reader2Dict
+}
+
+type reader2Dict struct {
+ bounds []typeInfo
+
+ tparams []*types2.TypeParam
+
+ derived []derivedInfo
+ derivedTypes []types2.Type
+}
+
+type reader2TypeBound struct {
+ derived bool
+ boundIdx int
+}
+
+func (pr *pkgReader2) newReader(k reloc, idx int, marker syncMarker) *reader2 {
+ return &reader2{
+ decoder: pr.newDecoder(k, idx, marker),
+ p: pr,
+ }
+}
+
+// @@@ Positions
+
+func (r *reader2) pos() syntax.Pos {
+ r.sync(syncPos)
+ if !r.bool() {
+ return syntax.Pos{}
+ }
+
+ // TODO(mdempsky): Delta encoding.
+ posBase := r.posBase()
+ line := r.uint()
+ col := r.uint()
+ return syntax.MakePos(posBase, line, col)
+}
+
+func (r *reader2) posBase() *syntax.PosBase {
+ return r.p.posBaseIdx(r.reloc(relocPosBase))
+}
+
+func (pr *pkgReader2) posBaseIdx(idx int) *syntax.PosBase {
+ if b := pr.posBases[idx]; b != nil {
+ return b
+ }
+
+ r := pr.newReader(relocPosBase, idx, syncPosBase)
+ var b *syntax.PosBase
+
+ filename := r.string()
+
+ if r.bool() {
+ b = syntax.NewTrimmedFileBase(filename, true)
+ } else {
+ pos := r.pos()
+ line := r.uint()
+ col := r.uint()
+ b = syntax.NewLineBase(pos, filename, true, line, col)
+ }
+
+ pr.posBases[idx] = b
+ return b
+}
+
+// @@@ Packages
+
+func (r *reader2) pkg() *types2.Package {
+ r.sync(syncPkg)
+ return r.p.pkgIdx(r.reloc(relocPkg))
+}
+
+func (pr *pkgReader2) pkgIdx(idx int) *types2.Package {
+ // TODO(mdempsky): Consider using some non-nil pointer to indicate
+ // the universe scope, so we don't need to keep re-reading it.
+ if pkg := pr.pkgs[idx]; pkg != nil {
+ return pkg
+ }
+
+ pkg := pr.newReader(relocPkg, idx, syncPkgDef).doPkg()
+ pr.pkgs[idx] = pkg
+ return pkg
+}
+
+func (r *reader2) doPkg() *types2.Package {
+ path := r.string()
+ if path == "builtin" {
+ return nil // universe
+ }
+ if path == "" {
+ path = r.p.pkgPath
+ }
+
+ if pkg := r.p.imports[path]; pkg != nil {
+ return pkg
+ }
+
+ name := r.string()
+ height := r.len()
+
+ pkg := types2.NewPackageHeight(path, name, height)
+ r.p.imports[path] = pkg
+
+ // TODO(mdempsky): The list of imported packages is important for
+ // go/types, but we could probably skip populating it for types2.
+ imports := make([]*types2.Package, r.len())
+ for i := range imports {
+ imports[i] = r.pkg()
+ }
+ pkg.SetImports(imports)
+
+ return pkg
+}
+
+// @@@ Types
+
+func (r *reader2) typ() types2.Type {
+ return r.p.typIdx(r.typInfo(), r.dict)
+}
+
+func (r *reader2) typInfo() typeInfo {
+ r.sync(syncType)
+ if r.bool() {
+ return typeInfo{idx: r.len(), derived: true}
+ }
+ return typeInfo{idx: r.reloc(relocType), derived: false}
+}
+
+func (pr *pkgReader2) typIdx(info typeInfo, dict *reader2Dict) types2.Type {
+ idx := info.idx
+ var where *types2.Type
+ if info.derived {
+ where = &dict.derivedTypes[idx]
+ idx = dict.derived[idx].idx
+ } else {
+ where = &pr.typs[idx]
+ }
+
+ if typ := *where; typ != nil {
+ return typ
+ }
+
+ r := pr.newReader(relocType, idx, syncTypeIdx)
+ r.dict = dict
+
+ typ := r.doTyp()
+ assert(typ != nil)
+
+ // See comment in pkgReader.typIdx explaining how this happens.
+ if prev := *where; prev != nil {
+ return prev
+ }
+
+ *where = typ
+ return typ
+}
+
+func (r *reader2) doTyp() (res types2.Type) {
+ switch tag := codeType(r.code(syncType)); tag {
+ default:
+ base.FatalfAt(src.NoXPos, "unhandled type tag: %v", tag)
+ panic("unreachable")
+
+ case typeBasic:
+ return types2.Typ[r.len()]
+
+ case typeNamed:
+ obj, targs := r.obj()
+ name := obj.(*types2.TypeName)
+ if len(targs) != 0 {
+ t, _ := types2.Instantiate(r.p.ctxt, name.Type(), targs, false)
+ return t
+ }
+ return name.Type()
+
+ case typeTypeParam:
+ return r.dict.tparams[r.len()]
+
+ case typeArray:
+ len := int64(r.uint64())
+ return types2.NewArray(r.typ(), len)
+ case typeChan:
+ dir := types2.ChanDir(r.len())
+ return types2.NewChan(dir, r.typ())
+ case typeMap:
+ return types2.NewMap(r.typ(), r.typ())
+ case typePointer:
+ return types2.NewPointer(r.typ())
+ case typeSignature:
+ return r.signature(nil, nil, nil)
+ case typeSlice:
+ return types2.NewSlice(r.typ())
+ case typeStruct:
+ return r.structType()
+ case typeInterface:
+ return r.interfaceType()
+ case typeUnion:
+ return r.unionType()
+ }
+}
+
+func (r *reader2) structType() *types2.Struct {
+ fields := make([]*types2.Var, r.len())
+ var tags []string
+ for i := range fields {
+ pos := r.pos()
+ pkg, name := r.selector()
+ ftyp := r.typ()
+ tag := r.string()
+ embedded := r.bool()
+
+ fields[i] = types2.NewField(pos, pkg, name, ftyp, embedded)
+ if tag != "" {
+ for len(tags) < i {
+ tags = append(tags, "")
+ }
+ tags = append(tags, tag)
+ }
+ }
+ return types2.NewStruct(fields, tags)
+}
+
+func (r *reader2) unionType() *types2.Union {
+ terms := make([]*types2.Term, r.len())
+ for i := range terms {
+ terms[i] = types2.NewTerm(r.bool(), r.typ())
+ }
+ return types2.NewUnion(terms)
+}
+
+func (r *reader2) interfaceType() *types2.Interface {
+ methods := make([]*types2.Func, r.len())
+ embeddeds := make([]types2.Type, r.len())
+
+ for i := range methods {
+ pos := r.pos()
+ pkg, name := r.selector()
+ mtyp := r.signature(nil, nil, nil)
+ methods[i] = types2.NewFunc(pos, pkg, name, mtyp)
+ }
+
+ for i := range embeddeds {
+ embeddeds[i] = r.typ()
+ }
+
+ return types2.NewInterfaceType(methods, embeddeds)
+}
+
+func (r *reader2) signature(recv *types2.Var, rtparams, tparams []*types2.TypeParam) *types2.Signature {
+ r.sync(syncSignature)
+
+ params := r.params()
+ results := r.params()
+ variadic := r.bool()
+
+ return types2.NewSignatureType(recv, rtparams, tparams, params, results, variadic)
+}
+
+func (r *reader2) params() *types2.Tuple {
+ r.sync(syncParams)
+ params := make([]*types2.Var, r.len())
+ for i := range params {
+ params[i] = r.param()
+ }
+ return types2.NewTuple(params...)
+}
+
+func (r *reader2) param() *types2.Var {
+ r.sync(syncParam)
+
+ pos := r.pos()
+ pkg, name := r.localIdent()
+ typ := r.typ()
+
+ return types2.NewParam(pos, pkg, name, typ)
+}
+
+// @@@ Objects
+
+func (r *reader2) obj() (types2.Object, []types2.Type) {
+ r.sync(syncObject)
+
+ assert(!r.bool())
+
+ pkg, name := r.p.objIdx(r.reloc(relocObj))
+ obj := pkg.Scope().Lookup(name)
+
+ targs := make([]types2.Type, r.len())
+ for i := range targs {
+ targs[i] = r.typ()
+ }
+
+ return obj, targs
+}
+
+func (pr *pkgReader2) objIdx(idx int) (*types2.Package, string) {
+ rname := pr.newReader(relocName, idx, syncObject1)
+
+ objPkg, objName := rname.qualifiedIdent()
+ assert(objName != "")
+
+ tag := codeObj(rname.code(syncCodeObj))
+
+ if tag == objStub {
+ assert(objPkg == nil || objPkg == types2.Unsafe)
+ return objPkg, objName
+ }
+
+ dict := pr.objDictIdx(idx)
+
+ r := pr.newReader(relocObj, idx, syncObject1)
+ r.dict = dict
+
+ objPkg.Scope().InsertLazy(objName, func() types2.Object {
+ switch tag {
+ default:
+ panic("weird")
+
+ case objAlias:
+ pos := r.pos()
+ typ := r.typ()
+ return types2.NewTypeName(pos, objPkg, objName, typ)
+
+ case objConst:
+ pos := r.pos()
+ typ := r.typ()
+ val := r.value()
+ return types2.NewConst(pos, objPkg, objName, typ, val)
+
+ case objFunc:
+ pos := r.pos()
+ tparams := r.typeParamNames()
+ sig := r.signature(nil, nil, tparams)
+ return types2.NewFunc(pos, objPkg, objName, sig)
+
+ case objType:
+ pos := r.pos()
+
+ return types2.NewTypeNameLazy(pos, objPkg, objName, func(named *types2.Named) (tparams []*types2.TypeParam, underlying types2.Type, methods []*types2.Func) {
+ tparams = r.typeParamNames()
+
+ // TODO(mdempsky): Rewrite receiver types to underlying is an
+ // Interface? The go/types importer does this (I think because
+ // unit tests expected that), but cmd/compile doesn't care
+ // about it, so maybe we can avoid worrying about that here.
+ underlying = r.typ().Underlying()
+
+ methods = make([]*types2.Func, r.len())
+ for i := range methods {
+ methods[i] = r.method()
+ }
+
+ return
+ })
+
+ case objVar:
+ pos := r.pos()
+ typ := r.typ()
+ return types2.NewVar(pos, objPkg, objName, typ)
+ }
+ })
+
+ return objPkg, objName
+}
+
+func (pr *pkgReader2) objDictIdx(idx int) *reader2Dict {
+ r := pr.newReader(relocObjDict, idx, syncObject1)
+
+ var dict reader2Dict
+
+ if implicits := r.len(); implicits != 0 {
+ base.Fatalf("unexpected object with %v implicit type parameter(s)", implicits)
+ }
+
+ dict.bounds = make([]typeInfo, r.len())
+ for i := range dict.bounds {
+ dict.bounds[i] = r.typInfo()
+ }
+
+ dict.derived = make([]derivedInfo, r.len())
+ dict.derivedTypes = make([]types2.Type, len(dict.derived))
+ for i := range dict.derived {
+ dict.derived[i] = derivedInfo{r.reloc(relocType), r.bool()}
+ }
+
+ // function references follow, but reader2 doesn't need those
+
+ return &dict
+}
+
+func (r *reader2) typeParamNames() []*types2.TypeParam {
+ r.sync(syncTypeParamNames)
+
+ // Note: This code assumes it only processes objects without
+ // implement type parameters. This is currently fine, because
+ // reader2 is only used to read in exported declarations, which are
+ // always package scoped.
+
+ if len(r.dict.bounds) == 0 {
+ return nil
+ }
+
+ // Careful: Type parameter lists may have cycles. To allow for this,
+ // we construct the type parameter list in two passes: first we
+ // create all the TypeNames and TypeParams, then we construct and
+ // set the bound type.
+
+ r.dict.tparams = make([]*types2.TypeParam, len(r.dict.bounds))
+ for i := range r.dict.bounds {
+ pos := r.pos()
+ pkg, name := r.localIdent()
+
+ tname := types2.NewTypeName(pos, pkg, name, nil)
+ r.dict.tparams[i] = types2.NewTypeParam(tname, nil)
+ }
+
+ for i, bound := range r.dict.bounds {
+ r.dict.tparams[i].SetConstraint(r.p.typIdx(bound, r.dict))
+ }
+
+ return r.dict.tparams
+}
+
+func (r *reader2) method() *types2.Func {
+ r.sync(syncMethod)
+ pos := r.pos()
+ pkg, name := r.selector()
+
+ rtparams := r.typeParamNames()
+ sig := r.signature(r.param(), rtparams, nil)
+
+ _ = r.pos() // TODO(mdempsky): Remove; this is a hacker for linker.go.
+ return types2.NewFunc(pos, pkg, name, sig)
+}
+
+func (r *reader2) qualifiedIdent() (*types2.Package, string) { return r.ident(syncSym) }
+func (r *reader2) localIdent() (*types2.Package, string) { return r.ident(syncLocalIdent) }
+func (r *reader2) selector() (*types2.Package, string) { return r.ident(syncSelector) }
+
+func (r *reader2) ident(marker syncMarker) (*types2.Package, string) {
+ r.sync(marker)
+ return r.pkg(), r.string()
+}
diff --git a/src/cmd/compile/internal/noder/reloc.go b/src/cmd/compile/internal/noder/reloc.go
new file mode 100644
index 0000000..669a618
--- /dev/null
+++ b/src/cmd/compile/internal/noder/reloc.go
@@ -0,0 +1,42 @@
+// UNREVIEWED
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package noder
+
+// A reloc indicates a particular section within a unified IR export.
+//
+// TODO(mdempsky): Rename to "section" or something similar?
+type reloc int
+
+// A relocEnt (relocation entry) is an entry in an atom's local
+// reference table.
+//
+// TODO(mdempsky): Rename this too.
+type relocEnt struct {
+ kind reloc
+ idx int
+}
+
+// Reserved indices within the meta relocation section.
+const (
+ publicRootIdx = 0
+ privateRootIdx = 1
+)
+
+const (
+ relocString reloc = iota
+ relocMeta
+ relocPosBase
+ relocPkg
+ relocName
+ relocType
+ relocObj
+ relocObjExt
+ relocObjDict
+ relocBody
+
+ numRelocs = iota
+)
diff --git a/src/cmd/compile/internal/noder/scopes.go b/src/cmd/compile/internal/noder/scopes.go
new file mode 100644
index 0000000..eb51847
--- /dev/null
+++ b/src/cmd/compile/internal/noder/scopes.go
@@ -0,0 +1,64 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package noder
+
+import (
+ "strings"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/syntax"
+ "cmd/compile/internal/types2"
+)
+
+// recordScopes populates fn.Parents and fn.Marks based on the scoping
+// information provided by types2.
+func (g *irgen) recordScopes(fn *ir.Func, sig *syntax.FuncType) {
+ scope, ok := g.info.Scopes[sig]
+ if !ok {
+ base.FatalfAt(fn.Pos(), "missing scope for %v", fn)
+ }
+
+ for i, n := 0, scope.NumChildren(); i < n; i++ {
+ g.walkScope(scope.Child(i))
+ }
+
+ g.marker.WriteTo(fn)
+}
+
+func (g *irgen) walkScope(scope *types2.Scope) bool {
+ // types2 doesn't provide a proper API for determining the
+ // lexical element a scope represents, so we have to resort to
+ // string matching. Conveniently though, this allows us to
+ // skip both function types and function literals, neither of
+ // which are interesting to us here.
+ if strings.HasPrefix(scope.String(), "function scope ") {
+ return false
+ }
+
+ g.marker.Push(g.pos(scope))
+
+ haveVars := false
+ for _, name := range scope.Names() {
+ if obj, ok := scope.Lookup(name).(*types2.Var); ok && obj.Name() != "_" {
+ haveVars = true
+ break
+ }
+ }
+
+ for i, n := 0, scope.NumChildren(); i < n; i++ {
+ if g.walkScope(scope.Child(i)) {
+ haveVars = true
+ }
+ }
+
+ if haveVars {
+ g.marker.Pop(g.end(scope))
+ } else {
+ g.marker.Unpush()
+ }
+
+ return haveVars
+}
diff --git a/src/cmd/compile/internal/noder/sizes.go b/src/cmd/compile/internal/noder/sizes.go
new file mode 100644
index 0000000..23f2062
--- /dev/null
+++ b/src/cmd/compile/internal/noder/sizes.go
@@ -0,0 +1,150 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package noder
+
+import (
+ "fmt"
+
+ "cmd/compile/internal/types"
+ "cmd/compile/internal/types2"
+)
+
+// Code below based on go/types.StdSizes.
+// Intentional differences are marked with "gc:".
+
+type gcSizes struct{}
+
+func (s *gcSizes) Alignof(T types2.Type) int64 {
+ // For arrays and structs, alignment is defined in terms
+ // of alignment of the elements and fields, respectively.
+ switch t := T.Underlying().(type) {
+ case *types2.Array:
+ // spec: "For a variable x of array type: unsafe.Alignof(x)
+ // is the same as unsafe.Alignof(x[0]), but at least 1."
+ return s.Alignof(t.Elem())
+ case *types2.Struct:
+ // spec: "For a variable x of struct type: unsafe.Alignof(x)
+ // is the largest of the values unsafe.Alignof(x.f) for each
+ // field f of x, but at least 1."
+ max := int64(1)
+ for i, nf := 0, t.NumFields(); i < nf; i++ {
+ if a := s.Alignof(t.Field(i).Type()); a > max {
+ max = a
+ }
+ }
+ return max
+ case *types2.Slice, *types2.Interface:
+ // Multiword data structures are effectively structs
+ // in which each element has size PtrSize.
+ return int64(types.PtrSize)
+ case *types2.Basic:
+ // Strings are like slices and interfaces.
+ if t.Info()&types2.IsString != 0 {
+ return int64(types.PtrSize)
+ }
+ }
+ a := s.Sizeof(T) // may be 0
+ // spec: "For a variable x of any type: unsafe.Alignof(x) is at least 1."
+ if a < 1 {
+ return 1
+ }
+ // complex{64,128} are aligned like [2]float{32,64}.
+ if isComplex(T) {
+ a /= 2
+ }
+ if a > int64(types.RegSize) {
+ return int64(types.RegSize)
+ }
+ return a
+}
+
+func isComplex(T types2.Type) bool {
+ basic, ok := T.Underlying().(*types2.Basic)
+ return ok && basic.Info()&types2.IsComplex != 0
+}
+
+func (s *gcSizes) Offsetsof(fields []*types2.Var) []int64 {
+ offsets := make([]int64, len(fields))
+ var o int64
+ for i, f := range fields {
+ typ := f.Type()
+ a := s.Alignof(typ)
+ o = types.Rnd(o, a)
+ offsets[i] = o
+ o += s.Sizeof(typ)
+ }
+ return offsets
+}
+
+func (s *gcSizes) Sizeof(T types2.Type) int64 {
+ switch t := T.Underlying().(type) {
+ case *types2.Basic:
+ k := t.Kind()
+ if int(k) < len(basicSizes) {
+ if s := basicSizes[k]; s > 0 {
+ return int64(s)
+ }
+ }
+ switch k {
+ case types2.String:
+ return int64(types.PtrSize) * 2
+ case types2.Int, types2.Uint, types2.Uintptr, types2.UnsafePointer:
+ return int64(types.PtrSize)
+ }
+ panic(fmt.Sprintf("unimplemented basic: %v (kind %v)", T, k))
+ case *types2.Array:
+ n := t.Len()
+ if n <= 0 {
+ return 0
+ }
+ // n > 0
+ // gc: Size includes alignment padding.
+ return s.Sizeof(t.Elem()) * n
+ case *types2.Slice:
+ return int64(types.PtrSize) * 3
+ case *types2.Struct:
+ n := t.NumFields()
+ if n == 0 {
+ return 0
+ }
+ fields := make([]*types2.Var, n)
+ for i := range fields {
+ fields[i] = t.Field(i)
+ }
+ offsets := s.Offsetsof(fields)
+
+ // gc: The last field of a non-zero-sized struct is not allowed to
+ // have size 0.
+ last := s.Sizeof(fields[n-1].Type())
+ if last == 0 && offsets[n-1] > 0 {
+ last = 1
+ }
+
+ // gc: Size includes alignment padding.
+ return types.Rnd(offsets[n-1]+last, s.Alignof(t))
+ case *types2.Interface:
+ return int64(types.PtrSize) * 2
+ case *types2.Chan, *types2.Map, *types2.Pointer, *types2.Signature:
+ return int64(types.PtrSize)
+ default:
+ panic(fmt.Sprintf("unimplemented type: %T", t))
+ }
+}
+
+var basicSizes = [...]byte{
+ types2.Bool: 1,
+ types2.Int8: 1,
+ types2.Int16: 2,
+ types2.Int32: 4,
+ types2.Int64: 8,
+ types2.Uint8: 1,
+ types2.Uint16: 2,
+ types2.Uint32: 4,
+ types2.Uint64: 8,
+ types2.Float32: 4,
+ types2.Float64: 8,
+ types2.Complex64: 8,
+ types2.Complex128: 16,
+}
diff --git a/src/cmd/compile/internal/noder/stencil.go b/src/cmd/compile/internal/noder/stencil.go
new file mode 100644
index 0000000..a41b35a
--- /dev/null
+++ b/src/cmd/compile/internal/noder/stencil.go
@@ -0,0 +1,2235 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file will evolve, since we plan to do a mix of stenciling and passing
+// around dictionaries.
+
+package noder
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/objw"
+ "cmd/compile/internal/reflectdata"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/src"
+ "fmt"
+ "go/constant"
+)
+
+// Enable extra consistency checks.
+const doubleCheck = false
+
+func assert(p bool) {
+ base.Assert(p)
+}
+
+// For outputting debug information on dictionary format and instantiated dictionaries
+// (type arg, derived types, sub-dictionary, and itab entries).
+var infoPrintMode = false
+
+func infoPrint(format string, a ...interface{}) {
+ if infoPrintMode {
+ fmt.Printf(format, a...)
+ }
+}
+
+var geninst genInst
+
+func BuildInstantiations() {
+ geninst.instInfoMap = make(map[*types.Sym]*instInfo)
+ geninst.buildInstantiations()
+ geninst.instInfoMap = nil
+}
+
+// buildInstantiations scans functions for generic function calls and methods, and
+// creates the required instantiations. It also creates instantiated methods for all
+// fully-instantiated generic types that have been encountered already or new ones
+// that are encountered during the instantiation process. It scans all declarations
+// in typecheck.Target.Decls first, before scanning any new instantiations created.
+func (g *genInst) buildInstantiations() {
+ // Instantiate the methods of instantiated generic types that we have seen so far.
+ g.instantiateMethods()
+
+ // Scan all currentdecls for call to generic functions/methods.
+ n := len(typecheck.Target.Decls)
+ for i := 0; i < n; i++ {
+ g.scanForGenCalls(typecheck.Target.Decls[i])
+ }
+
+ // Scan all new instantiations created due to g.instantiateMethods() and the
+ // scan of current decls. This loop purposely runs until no new
+ // instantiations are created.
+ for i := 0; i < len(g.newInsts); i++ {
+ g.scanForGenCalls(g.newInsts[i])
+ }
+
+ g.finalizeSyms()
+
+ // All the instantiations and dictionaries have been created. Now go through
+ // each new instantiation and transform the various operations that need to make
+ // use of their dictionary.
+ l := len(g.newInsts)
+ for _, fun := range g.newInsts {
+ info := g.instInfoMap[fun.Sym()]
+ g.dictPass(info)
+ if doubleCheck {
+ ir.Visit(info.fun, func(n ir.Node) {
+ if n.Op() != ir.OCONVIFACE {
+ return
+ }
+ c := n.(*ir.ConvExpr)
+ if c.X.Type().HasShape() && !c.X.Type().IsInterface() {
+ ir.Dump("BAD FUNCTION", info.fun)
+ ir.Dump("BAD CONVERSION", c)
+ base.Fatalf("converting shape type to interface")
+ }
+ })
+ }
+ if base.Flag.W > 1 {
+ ir.Dump(fmt.Sprintf("\ndictpass %v", info.fun), info.fun)
+ }
+ }
+ assert(l == len(g.newInsts))
+ g.newInsts = nil
+}
+
+// scanForGenCalls scans a single function (or global assignment), looking for
+// references to generic functions/methods. At each such reference, it creates any
+// required instantiation and transforms the reference.
+func (g *genInst) scanForGenCalls(decl ir.Node) {
+ switch decl.Op() {
+ case ir.ODCLFUNC:
+ if decl.Type().HasTParam() {
+ // Skip any generic functions
+ return
+ }
+ // transformCall() below depends on CurFunc being set.
+ ir.CurFunc = decl.(*ir.Func)
+
+ case ir.OAS, ir.OAS2, ir.OAS2DOTTYPE, ir.OAS2FUNC, ir.OAS2MAPR, ir.OAS2RECV, ir.OASOP:
+ // These are all the various kinds of global assignments,
+ // whose right-hand-sides might contain a function
+ // instantiation.
+
+ default:
+ // The other possible ops at the top level are ODCLCONST
+ // and ODCLTYPE, which don't have any function
+ // instantiations.
+ return
+ }
+
+ // Search for any function references using generic function/methods. Then
+ // create the needed instantiated function if it hasn't been created yet, and
+ // change to calling that function directly.
+ modified := false
+ closureRequired := false
+ // declInfo will be non-nil exactly if we are scanning an instantiated function
+ declInfo := g.instInfoMap[decl.Sym()]
+
+ ir.Visit(decl, func(n ir.Node) {
+ if n.Op() == ir.OFUNCINST {
+ // generic F, not immediately called
+ closureRequired = true
+ }
+ if (n.Op() == ir.OMETHEXPR || n.Op() == ir.OMETHVALUE) && len(deref(n.(*ir.SelectorExpr).X.Type()).RParams()) > 0 && !types.IsInterfaceMethod(n.(*ir.SelectorExpr).Selection.Type) {
+ // T.M or x.M, where T or x is generic, but not immediately
+ // called. Not necessary if the method selected is
+ // actually for an embedded interface field.
+ closureRequired = true
+ }
+ if n.Op() == ir.OCALL && n.(*ir.CallExpr).X.Op() == ir.OFUNCINST {
+ // We have found a function call using a generic function
+ // instantiation.
+ call := n.(*ir.CallExpr)
+ inst := call.X.(*ir.InstExpr)
+ nameNode, isMeth := g.getInstNameNode(inst)
+ targs := typecheck.TypesOf(inst.Targs)
+ st := g.getInstantiation(nameNode, targs, isMeth).fun
+ dictValue, usingSubdict := g.getDictOrSubdict(declInfo, n, nameNode, targs, isMeth)
+ if infoPrintMode {
+ dictkind := "Main dictionary"
+ if usingSubdict {
+ dictkind = "Sub-dictionary"
+ }
+ if inst.X.Op() == ir.OMETHVALUE {
+ fmt.Printf("%s in %v at generic method call: %v - %v\n", dictkind, decl, inst.X, call)
+ } else {
+ fmt.Printf("%s in %v at generic function call: %v - %v\n", dictkind, decl, inst.X, call)
+ }
+ }
+
+ // Transform the Call now, which changes OCALL to
+ // OCALLFUNC and does typecheckaste/assignconvfn. Do
+ // it before installing the instantiation, so we are
+ // checking against non-shape param types in
+ // typecheckaste.
+ transformCall(call)
+
+ // Replace the OFUNCINST with a direct reference to the
+ // new stenciled function
+ call.X = st.Nname
+ if inst.X.Op() == ir.OMETHVALUE {
+ // When we create an instantiation of a method
+ // call, we make it a function. So, move the
+ // receiver to be the first arg of the function
+ // call.
+ call.Args.Prepend(inst.X.(*ir.SelectorExpr).X)
+ }
+
+ // Add dictionary to argument list.
+ call.Args.Prepend(dictValue)
+ modified = true
+ }
+ if n.Op() == ir.OCALLMETH && n.(*ir.CallExpr).X.Op() == ir.ODOTMETH && len(deref(n.(*ir.CallExpr).X.Type().Recv().Type).RParams()) > 0 {
+ // Method call on a generic type, which was instantiated by stenciling.
+ // Method calls on explicitly instantiated types will have an OFUNCINST
+ // and are handled above.
+ call := n.(*ir.CallExpr)
+ meth := call.X.(*ir.SelectorExpr)
+ targs := deref(meth.Type().Recv().Type).RParams()
+
+ t := meth.X.Type()
+ baseType := deref(t).OrigType()
+ var gf *ir.Name
+ for _, m := range baseType.Methods().Slice() {
+ if meth.Sel == m.Sym {
+ gf = m.Nname.(*ir.Name)
+ break
+ }
+ }
+
+ // Transform the Call now, which changes OCALL
+ // to OCALLFUNC and does typecheckaste/assignconvfn.
+ transformCall(call)
+
+ st := g.getInstantiation(gf, targs, true).fun
+ dictValue, usingSubdict := g.getDictOrSubdict(declInfo, n, gf, targs, true)
+ // We have to be using a subdictionary, since this is
+ // a generic method call.
+ assert(usingSubdict)
+
+ // Transform to a function call, by appending the
+ // dictionary and the receiver to the args.
+ call.SetOp(ir.OCALLFUNC)
+ call.X = st.Nname
+ call.Args.Prepend(dictValue, meth.X)
+ modified = true
+ }
+ })
+
+ // If we found a reference to a generic instantiation that wasn't an
+ // immediate call, then traverse the nodes of decl again (with
+ // EditChildren rather than Visit), where we actually change the
+ // reference to the instantiation to a closure that captures the
+ // dictionary, then does a direct call.
+ // EditChildren is more expensive than Visit, so we only do this
+ // in the infrequent case of an OFUNCINST without a corresponding
+ // call.
+ if closureRequired {
+ modified = true
+ var edit func(ir.Node) ir.Node
+ var outer *ir.Func
+ if f, ok := decl.(*ir.Func); ok {
+ outer = f
+ }
+ edit = func(x ir.Node) ir.Node {
+ if x.Op() == ir.OFUNCINST {
+ child := x.(*ir.InstExpr).X
+ if child.Op() == ir.OMETHEXPR || child.Op() == ir.OMETHVALUE {
+ // Call EditChildren on child (x.X),
+ // not x, so that we don't do
+ // buildClosure() on the
+ // METHEXPR/METHVALUE nodes as well.
+ ir.EditChildren(child, edit)
+ return g.buildClosure(outer, x)
+ }
+ }
+ ir.EditChildren(x, edit)
+ switch {
+ case x.Op() == ir.OFUNCINST:
+ return g.buildClosure(outer, x)
+ case (x.Op() == ir.OMETHEXPR || x.Op() == ir.OMETHVALUE) &&
+ len(deref(x.(*ir.SelectorExpr).X.Type()).RParams()) > 0 &&
+ !types.IsInterfaceMethod(x.(*ir.SelectorExpr).Selection.Type):
+ return g.buildClosure(outer, x)
+ }
+ return x
+ }
+ edit(decl)
+ }
+ if base.Flag.W > 1 && modified {
+ ir.Dump(fmt.Sprintf("\nmodified %v", decl), decl)
+ }
+ ir.CurFunc = nil
+ // We may have seen new fully-instantiated generic types while
+ // instantiating any needed functions/methods in the above
+ // function. If so, instantiate all the methods of those types
+ // (which will then lead to more function/methods to scan in the loop).
+ g.instantiateMethods()
+}
+
+// buildClosure makes a closure to implement x, a OFUNCINST or OMETHEXPR/OMETHVALUE
+// of generic type. outer is the containing function (or nil if closure is
+// in a global assignment instead of a function).
+func (g *genInst) buildClosure(outer *ir.Func, x ir.Node) ir.Node {
+ pos := x.Pos()
+ var target *ir.Func // target instantiated function/method
+ var dictValue ir.Node // dictionary to use
+ var rcvrValue ir.Node // receiver, if a method value
+ typ := x.Type() // type of the closure
+ var outerInfo *instInfo
+ if outer != nil {
+ outerInfo = g.instInfoMap[outer.Sym()]
+ }
+ usingSubdict := false
+ valueMethod := false
+ if x.Op() == ir.OFUNCINST {
+ inst := x.(*ir.InstExpr)
+
+ // Type arguments we're instantiating with.
+ targs := typecheck.TypesOf(inst.Targs)
+
+ // Find the generic function/method.
+ var gf *ir.Name
+ if inst.X.Op() == ir.ONAME {
+ // Instantiating a generic function call.
+ gf = inst.X.(*ir.Name)
+ } else if inst.X.Op() == ir.OMETHVALUE {
+ // Instantiating a method value x.M.
+ se := inst.X.(*ir.SelectorExpr)
+ rcvrValue = se.X
+ gf = se.Selection.Nname.(*ir.Name)
+ } else {
+ panic("unhandled")
+ }
+
+ // target is the instantiated function we're trying to call.
+ // For functions, the target expects a dictionary as its first argument.
+ // For method values, the target expects a dictionary and the receiver
+ // as its first two arguments.
+ // dictValue is the value to use for the dictionary argument.
+ target = g.getInstantiation(gf, targs, rcvrValue != nil).fun
+ dictValue, usingSubdict = g.getDictOrSubdict(outerInfo, x, gf, targs, rcvrValue != nil)
+ if infoPrintMode {
+ dictkind := "Main dictionary"
+ if usingSubdict {
+ dictkind = "Sub-dictionary"
+ }
+ if rcvrValue == nil {
+ fmt.Printf("%s in %v for generic function value %v\n", dictkind, outer, inst.X)
+ } else {
+ fmt.Printf("%s in %v for generic method value %v\n", dictkind, outer, inst.X)
+ }
+ }
+ } else { // ir.OMETHEXPR or ir.METHVALUE
+ // Method expression T.M where T is a generic type.
+ se := x.(*ir.SelectorExpr)
+ targs := deref(se.X.Type()).RParams()
+ if len(targs) == 0 {
+ panic("bad")
+ }
+ if x.Op() == ir.OMETHVALUE {
+ rcvrValue = se.X
+ }
+
+ // se.X.Type() is the top-level type of the method expression. To
+ // correctly handle method expressions involving embedded fields,
+ // look up the generic method below using the type of the receiver
+ // of se.Selection, since that will be the type that actually has
+ // the method.
+ recv := deref(se.Selection.Type.Recv().Type)
+ if len(recv.RParams()) == 0 {
+ // The embedded type that actually has the method is not
+ // actually generic, so no need to build a closure.
+ return x
+ }
+ baseType := recv.OrigType()
+ var gf *ir.Name
+ for _, m := range baseType.Methods().Slice() {
+ if se.Sel == m.Sym {
+ gf = m.Nname.(*ir.Name)
+ break
+ }
+ }
+ if !gf.Type().Recv().Type.IsPtr() {
+ // Remember if value method, so we can detect (*T).M case.
+ valueMethod = true
+ }
+ target = g.getInstantiation(gf, targs, true).fun
+ dictValue, usingSubdict = g.getDictOrSubdict(outerInfo, x, gf, targs, true)
+ if infoPrintMode {
+ dictkind := "Main dictionary"
+ if usingSubdict {
+ dictkind = "Sub-dictionary"
+ }
+ fmt.Printf("%s in %v for method expression %v\n", dictkind, outer, x)
+ }
+ }
+
+ // Build a closure to implement a function instantiation.
+ //
+ // func f[T any] (int, int) (int, int) { ...whatever... }
+ //
+ // Then any reference to f[int] not directly called gets rewritten to
+ //
+ // .dictN := ... dictionary to use ...
+ // func(a0, a1 int) (r0, r1 int) {
+ // return .inst.f[int](.dictN, a0, a1)
+ // }
+ //
+ // Similarly for method expressions,
+ //
+ // type g[T any] ....
+ // func (rcvr g[T]) f(a0, a1 int) (r0, r1 int) { ... }
+ //
+ // Any reference to g[int].f not directly called gets rewritten to
+ //
+ // .dictN := ... dictionary to use ...
+ // func(rcvr g[int], a0, a1 int) (r0, r1 int) {
+ // return .inst.g[int].f(.dictN, rcvr, a0, a1)
+ // }
+ //
+ // Also method values
+ //
+ // var x g[int]
+ //
+ // Any reference to x.f not directly called gets rewritten to
+ //
+ // .dictN := ... dictionary to use ...
+ // x2 := x
+ // func(a0, a1 int) (r0, r1 int) {
+ // return .inst.g[int].f(.dictN, x2, a0, a1)
+ // }
+
+ // Make a new internal function.
+ fn, formalParams, formalResults := startClosure(pos, outer, typ)
+
+ // This is the dictionary we want to use.
+ // It may be a constant, it may be the outer functions's dictionary, or it may be
+ // a subdictionary acquired from the outer function's dictionary.
+ // For the latter, dictVar is a variable in the outer function's scope, set to the subdictionary
+ // read from the outer function's dictionary.
+ var dictVar *ir.Name
+ var dictAssign *ir.AssignStmt
+ if outer != nil {
+ dictVar = ir.NewNameAt(pos, typecheck.LookupNum(typecheck.LocalDictName, g.dnum))
+ dictVar.SetSym(outer.Sym().Pkg.Lookup(dictVar.Sym().Name))
+ g.dnum++
+ dictVar.Class = ir.PAUTO
+ typed(types.Types[types.TUINTPTR], dictVar)
+ dictVar.Curfn = outer
+ dictAssign = ir.NewAssignStmt(pos, dictVar, dictValue)
+ dictAssign.SetTypecheck(1)
+ dictVar.Defn = dictAssign
+ outer.Dcl = append(outer.Dcl, dictVar)
+ }
+ // assign the receiver to a temporary.
+ var rcvrVar *ir.Name
+ var rcvrAssign ir.Node
+ if rcvrValue != nil {
+ rcvrVar = ir.NewNameAt(pos, typecheck.LookupNum(".rcvr", g.dnum))
+ if outer != nil {
+ rcvrVar.SetSym(outer.Sym().Pkg.Lookup(rcvrVar.Sym().Name))
+ }
+ g.dnum++
+ typed(rcvrValue.Type(), rcvrVar)
+ rcvrAssign = ir.NewAssignStmt(pos, rcvrVar, rcvrValue)
+ rcvrAssign.SetTypecheck(1)
+ rcvrVar.Defn = rcvrAssign
+ if outer == nil {
+ rcvrVar.Class = ir.PEXTERN
+ typecheck.Target.Decls = append(typecheck.Target.Decls, rcvrAssign)
+ typecheck.Target.Externs = append(typecheck.Target.Externs, rcvrVar)
+ } else {
+ rcvrVar.Class = ir.PAUTO
+ rcvrVar.Curfn = outer
+ outer.Dcl = append(outer.Dcl, rcvrVar)
+ }
+ }
+
+ // Build body of closure. This involves just calling the wrapped function directly
+ // with the additional dictionary argument.
+
+ // First, figure out the dictionary argument.
+ var dict2Var ir.Node
+ if usingSubdict {
+ // Capture sub-dictionary calculated in the outer function
+ dict2Var = ir.CaptureName(pos, fn, dictVar)
+ typed(types.Types[types.TUINTPTR], dict2Var)
+ } else {
+ // Static dictionary, so can be used directly in the closure
+ dict2Var = dictValue
+ }
+ // Also capture the receiver variable.
+ var rcvr2Var *ir.Name
+ if rcvrValue != nil {
+ rcvr2Var = ir.CaptureName(pos, fn, rcvrVar)
+ }
+
+ // Build arguments to call inside the closure.
+ var args []ir.Node
+
+ // First the dictionary argument.
+ args = append(args, dict2Var)
+ // Then the receiver.
+ if rcvrValue != nil {
+ args = append(args, rcvr2Var)
+ }
+ // Then all the other arguments (including receiver for method expressions).
+ for i := 0; i < typ.NumParams(); i++ {
+ if x.Op() == ir.OMETHEXPR && i == 0 {
+ // If we are doing a method expression, we need to
+ // explicitly traverse any embedded fields in the receiver
+ // argument in order to call the method instantiation.
+ arg0 := formalParams[0].Nname.(ir.Node)
+ arg0 = typecheck.AddImplicitDots(ir.NewSelectorExpr(x.Pos(), ir.OXDOT, arg0, x.(*ir.SelectorExpr).Sel)).X
+ if valueMethod && arg0.Type().IsPtr() {
+ // For handling the (*T).M case: if we have a pointer
+ // receiver after following all the embedded fields,
+ // but it's a value method, add a star operator.
+ arg0 = ir.NewStarExpr(arg0.Pos(), arg0)
+ }
+ args = append(args, arg0)
+ } else {
+ args = append(args, formalParams[i].Nname.(*ir.Name))
+ }
+ }
+
+ // Build call itself.
+ var innerCall ir.Node = ir.NewCallExpr(pos, ir.OCALL, target.Nname, args)
+ innerCall.(*ir.CallExpr).IsDDD = typ.IsVariadic()
+ if len(formalResults) > 0 {
+ innerCall = ir.NewReturnStmt(pos, []ir.Node{innerCall})
+ }
+ // Finish building body of closure.
+ ir.CurFunc = fn
+ // TODO: set types directly here instead of using typecheck.Stmt
+ typecheck.Stmt(innerCall)
+ ir.CurFunc = nil
+ fn.Body = []ir.Node{innerCall}
+
+ // We're all done with the captured dictionary (and receiver, for method values).
+ ir.FinishCaptureNames(pos, outer, fn)
+
+ // Make a closure referencing our new internal function.
+ c := ir.UseClosure(fn.OClosure, typecheck.Target)
+ var init []ir.Node
+ if outer != nil {
+ init = append(init, dictAssign)
+ }
+ if rcvrValue != nil {
+ init = append(init, rcvrAssign)
+ }
+ return ir.InitExpr(init, c)
+}
+
+// instantiateMethods instantiates all the methods (and associated dictionaries) of
+// all fully-instantiated generic types that have been added to typecheck.instTypeList.
+// It continues until no more types are added to typecheck.instTypeList.
+func (g *genInst) instantiateMethods() {
+ for {
+ instTypeList := typecheck.GetInstTypeList()
+ if len(instTypeList) == 0 {
+ break
+ }
+ typecheck.ClearInstTypeList()
+ for _, typ := range instTypeList {
+ assert(!typ.HasShape())
+ // Mark runtime type as needed, since this ensures that the
+ // compiler puts out the needed DWARF symbols, when this
+ // instantiated type has a different package from the local
+ // package.
+ typecheck.NeedRuntimeType(typ)
+ // Lookup the method on the base generic type, since methods may
+ // not be set on imported instantiated types.
+ baseType := typ.OrigType()
+ for j, _ := range typ.Methods().Slice() {
+ if baseType.Methods().Slice()[j].Nointerface() {
+ typ.Methods().Slice()[j].SetNointerface(true)
+ }
+ baseNname := baseType.Methods().Slice()[j].Nname.(*ir.Name)
+ // Eagerly generate the instantiations and dictionaries that implement these methods.
+ // We don't use the instantiations here, just generate them (and any
+ // further instantiations those generate, etc.).
+ // Note that we don't set the Func for any methods on instantiated
+ // types. Their signatures don't match so that would be confusing.
+ // Direct method calls go directly to the instantiations, implemented above.
+ // Indirect method calls use wrappers generated in reflectcall. Those wrappers
+ // will use these instantiations if they are needed (for interface tables or reflection).
+ _ = g.getInstantiation(baseNname, typ.RParams(), true)
+ _ = g.getDictionarySym(baseNname, typ.RParams(), true)
+ }
+ }
+ }
+}
+
+// getInstNameNode returns the name node for the method or function being instantiated, and a bool which is true if a method is being instantiated.
+func (g *genInst) getInstNameNode(inst *ir.InstExpr) (*ir.Name, bool) {
+ if meth, ok := inst.X.(*ir.SelectorExpr); ok {
+ return meth.Selection.Nname.(*ir.Name), true
+ } else {
+ return inst.X.(*ir.Name), false
+ }
+}
+
+// getDictOrSubdict returns, for a method/function call or reference (node n) in an
+// instantiation (described by instInfo), a node which is accessing a sub-dictionary
+// or main/static dictionary, as needed, and also returns a boolean indicating if a
+// sub-dictionary was accessed. nameNode is the particular function or method being
+// called/referenced, and targs are the type arguments.
+func (g *genInst) getDictOrSubdict(declInfo *instInfo, n ir.Node, nameNode *ir.Name, targs []*types.Type, isMeth bool) (ir.Node, bool) {
+ var dict ir.Node
+ usingSubdict := false
+ if declInfo != nil {
+ entry := -1
+ for i, de := range declInfo.dictInfo.subDictCalls {
+ if n == de.callNode {
+ entry = declInfo.dictInfo.startSubDict + i
+ break
+ }
+ }
+ // If the entry is not found, it may be that this node did not have
+ // any type args that depend on type params, so we need a main
+ // dictionary, not a sub-dictionary.
+ if entry >= 0 {
+ dict = getDictionaryEntry(n.Pos(), declInfo.dictParam, entry, declInfo.dictInfo.dictLen)
+ usingSubdict = true
+ }
+ }
+ if !usingSubdict {
+ dict = g.getDictionaryValue(n.Pos(), nameNode, targs, isMeth)
+ }
+ return dict, usingSubdict
+}
+
+// checkFetchBody checks if a generic body can be fetched, but hasn't been loaded
+// yet. If so, it imports the body.
+func checkFetchBody(nameNode *ir.Name) {
+ if nameNode.Func.Body == nil && nameNode.Func.Inl != nil {
+ // If there is no body yet but Func.Inl exists, then we can
+ // import the whole generic body.
+ assert(nameNode.Func.Inl.Cost == 1 && nameNode.Sym().Pkg != types.LocalPkg)
+ typecheck.ImportBody(nameNode.Func)
+ assert(nameNode.Func.Inl.Body != nil)
+ nameNode.Func.Body = nameNode.Func.Inl.Body
+ nameNode.Func.Dcl = nameNode.Func.Inl.Dcl
+ }
+}
+
+// getInstantiation gets the instantiantion and dictionary of the function or method nameNode
+// with the type arguments shapes. If the instantiated function is not already
+// cached, then it calls genericSubst to create the new instantiation.
+func (g *genInst) getInstantiation(nameNode *ir.Name, shapes []*types.Type, isMeth bool) *instInfo {
+ if nameNode.Func == nil {
+ // If nameNode.Func is nil, this must be a reference to a method of
+ // an imported instantiated type. We will have already called
+ // g.instantiateMethods() on the fully-instantiated type, so
+ // g.instInfoMap[sym] will be non-nil below.
+ rcvr := nameNode.Type().Recv()
+ if rcvr == nil || !deref(rcvr.Type).IsFullyInstantiated() {
+ base.FatalfAt(nameNode.Pos(), "Unexpected function instantiation %v with no body", nameNode)
+ }
+ } else {
+ checkFetchBody(nameNode)
+ }
+
+ var tparams []*types.Type
+ if isMeth {
+ // Get the type params from the method receiver (after skipping
+ // over any pointer)
+ recvType := nameNode.Type().Recv().Type
+ recvType = deref(recvType)
+ if recvType.IsFullyInstantiated() {
+ // Get the type of the base generic type, so we get
+ // its original typeparams.
+ recvType = recvType.OrigType()
+ }
+ tparams = recvType.RParams()
+ } else {
+ fields := nameNode.Type().TParams().Fields().Slice()
+ tparams = make([]*types.Type, len(fields))
+ for i, f := range fields {
+ tparams[i] = f.Type
+ }
+ }
+
+ // Convert any non-shape type arguments to their shape, so we can reduce the
+ // number of instantiations we have to generate. You can actually have a mix
+ // of shape and non-shape arguments, because of inferred or explicitly
+ // specified concrete type args.
+ s1 := make([]*types.Type, len(shapes))
+ for i, t := range shapes {
+ var tparam *types.Type
+ // Shapes are grouped differently for structural types, so we
+ // pass the type param to Shapify(), so we can distinguish.
+ tparam = tparams[i]
+ if !t.IsShape() {
+ s1[i] = typecheck.Shapify(t, i, tparam)
+ } else {
+ // Already a shape, but make sure it has the correct index.
+ s1[i] = typecheck.Shapify(shapes[i].Underlying(), i, tparam)
+ }
+ }
+ shapes = s1
+
+ sym := typecheck.MakeFuncInstSym(nameNode.Sym(), shapes, false, isMeth)
+ info := g.instInfoMap[sym]
+ if info == nil {
+ // If instantiation doesn't exist yet, create it and add
+ // to the list of decls.
+ info = &instInfo{
+ dictInfo: &dictInfo{},
+ }
+ info.dictInfo.shapeToBound = make(map[*types.Type]*types.Type)
+
+ if sym.Def != nil {
+ // This instantiation must have been imported from another
+ // package (because it was needed for inlining), so we should
+ // not re-generate it and have conflicting definitions for the
+ // symbol (issue #50121). It will have already gone through the
+ // dictionary transformations of dictPass, so we don't actually
+ // need the info.dictParam and info.shapeToBound info filled in
+ // below. We just set the imported instantiation as info.fun.
+ assert(sym.Pkg != types.LocalPkg)
+ info.fun = sym.Def.(*ir.Name).Func
+ assert(info.fun != nil)
+ g.instInfoMap[sym] = info
+ return info
+ }
+
+ // genericSubst fills in info.dictParam and info.shapeToBound.
+ st := g.genericSubst(sym, nameNode, tparams, shapes, isMeth, info)
+ info.fun = st
+ g.instInfoMap[sym] = info
+
+ // getInstInfo fills in info.dictInfo.
+ g.getInstInfo(st, shapes, info)
+ if base.Flag.W > 1 {
+ ir.Dump(fmt.Sprintf("\nstenciled %v", st), st)
+ }
+
+ // This ensures that the linker drops duplicates of this instantiation.
+ // All just works!
+ st.SetDupok(true)
+ typecheck.Target.Decls = append(typecheck.Target.Decls, st)
+ g.newInsts = append(g.newInsts, st)
+ }
+ return info
+}
+
+// Struct containing info needed for doing the substitution as we create the
+// instantiation of a generic function with specified type arguments.
+type subster struct {
+ g *genInst
+ isMethod bool // If a method is being instantiated
+ newf *ir.Func // Func node for the new stenciled function
+ ts typecheck.Tsubster
+ info *instInfo // Place to put extra info in the instantiation
+
+ // Map from non-nil, non-ONAME node n to slice of all m, where m.Defn = n
+ defnMap map[ir.Node][]**ir.Name
+}
+
+// genericSubst returns a new function with name newsym. The function is an
+// instantiation of a generic function or method specified by namedNode with type
+// args shapes. For a method with a generic receiver, it returns an instantiated
+// function type where the receiver becomes the first parameter. For either a generic
+// method or function, a dictionary parameter is the added as the very first
+// parameter. genericSubst fills in info.dictParam and info.shapeToBound.
+func (g *genInst) genericSubst(newsym *types.Sym, nameNode *ir.Name, tparams []*types.Type, shapes []*types.Type, isMethod bool, info *instInfo) *ir.Func {
+ gf := nameNode.Func
+ // Pos of the instantiated function is same as the generic function
+ newf := ir.NewFunc(gf.Pos())
+ newf.Pragma = gf.Pragma // copy over pragmas from generic function to stenciled implementation.
+ newf.Nname = ir.NewNameAt(gf.Pos(), newsym)
+ newf.Nname.Func = newf
+ newf.Nname.Defn = newf
+ newsym.Def = newf.Nname
+ savef := ir.CurFunc
+ // transformCall/transformReturn (called during stenciling of the body)
+ // depend on ir.CurFunc being set.
+ ir.CurFunc = newf
+
+ assert(len(tparams) == len(shapes))
+
+ subst := &subster{
+ g: g,
+ isMethod: isMethod,
+ newf: newf,
+ info: info,
+ ts: typecheck.Tsubster{
+ Tparams: tparams,
+ Targs: shapes,
+ Vars: make(map[*ir.Name]*ir.Name),
+ },
+ defnMap: make(map[ir.Node][]**ir.Name),
+ }
+
+ newf.Dcl = make([]*ir.Name, 0, len(gf.Dcl)+1)
+
+ // Create the needed dictionary param
+ dictionarySym := newsym.Pkg.Lookup(typecheck.LocalDictName)
+ dictionaryType := types.Types[types.TUINTPTR]
+ dictionaryName := ir.NewNameAt(gf.Pos(), dictionarySym)
+ typed(dictionaryType, dictionaryName)
+ dictionaryName.Class = ir.PPARAM
+ dictionaryName.Curfn = newf
+ newf.Dcl = append(newf.Dcl, dictionaryName)
+ for _, n := range gf.Dcl {
+ if n.Sym().Name == typecheck.LocalDictName {
+ panic("already has dictionary")
+ }
+ newf.Dcl = append(newf.Dcl, subst.localvar(n))
+ }
+ dictionaryArg := types.NewField(gf.Pos(), dictionarySym, dictionaryType)
+ dictionaryArg.Nname = dictionaryName
+ info.dictParam = dictionaryName
+
+ // We add the dictionary as the first parameter in the function signature.
+ // We also transform a method type to the corresponding function type
+ // (make the receiver be the next parameter after the dictionary).
+ oldt := nameNode.Type()
+ var args []*types.Field
+ args = append(args, dictionaryArg)
+ args = append(args, oldt.Recvs().FieldSlice()...)
+ args = append(args, oldt.Params().FieldSlice()...)
+
+ // Replace the types in the function signature via subst.fields.
+ // Ugly: also, we have to insert the Name nodes of the parameters/results into
+ // the function type. The current function type has no Nname fields set,
+ // because it came via conversion from the types2 type.
+ newt := types.NewSignature(oldt.Pkg(), nil, nil,
+ subst.fields(ir.PPARAM, args, newf.Dcl),
+ subst.fields(ir.PPARAMOUT, oldt.Results().FieldSlice(), newf.Dcl))
+
+ typed(newt, newf.Nname)
+ ir.MarkFunc(newf.Nname)
+ newf.SetTypecheck(1)
+
+ // Make sure name/type of newf is set before substituting the body.
+ newf.Body = subst.list(gf.Body)
+ if len(newf.Body) == 0 {
+ // Ensure the body is nonempty, for issue 49524.
+ // TODO: have some other way to detect the difference between
+ // a function declared with no body, vs. one with an empty body?
+ newf.Body = append(newf.Body, ir.NewBlockStmt(gf.Pos(), nil))
+ }
+
+ if len(subst.defnMap) > 0 {
+ base.Fatalf("defnMap is not empty")
+ }
+
+ for i, tp := range tparams {
+ info.dictInfo.shapeToBound[shapes[i]] = subst.ts.Typ(tp.Bound())
+ }
+
+ ir.CurFunc = savef
+
+ return subst.newf
+}
+
+// localvar creates a new name node for the specified local variable and enters it
+// in subst.vars. It substitutes type arguments for type parameters in the type of
+// name as needed.
+func (subst *subster) localvar(name *ir.Name) *ir.Name {
+ m := ir.NewNameAt(name.Pos(), name.Sym())
+ if name.IsClosureVar() {
+ m.SetIsClosureVar(true)
+ }
+ m.SetType(subst.ts.Typ(name.Type()))
+ m.BuiltinOp = name.BuiltinOp
+ m.Curfn = subst.newf
+ m.Class = name.Class
+ assert(name.Class != ir.PEXTERN && name.Class != ir.PFUNC)
+ m.Func = name.Func
+ subst.ts.Vars[name] = m
+ m.SetTypecheck(1)
+ m.DictIndex = name.DictIndex
+ if name.Defn != nil {
+ if name.Defn.Op() == ir.ONAME {
+ // This is a closure variable, so its Defn is the outer
+ // captured variable, which has already been substituted.
+ m.Defn = subst.node(name.Defn)
+ } else {
+ // The other values of Defn are nodes in the body of the
+ // function, so just remember the mapping so we can set Defn
+ // properly in node() when we create the new body node. We
+ // always call localvar() on all the local variables before
+ // we substitute the body.
+ slice := subst.defnMap[name.Defn]
+ subst.defnMap[name.Defn] = append(slice, &m)
+ }
+ }
+ if name.Outer != nil {
+ m.Outer = subst.node(name.Outer).(*ir.Name)
+ }
+
+ return m
+}
+
+// getDictionaryEntry gets the i'th entry in the dictionary dict.
+func getDictionaryEntry(pos src.XPos, dict *ir.Name, i int, size int) ir.Node {
+ // Convert dictionary to *[N]uintptr
+ // All entries in the dictionary are pointers. They all point to static data, though, so we
+ // treat them as uintptrs so the GC doesn't need to keep track of them.
+ d := ir.NewConvExpr(pos, ir.OCONVNOP, types.Types[types.TUNSAFEPTR], dict)
+ d.SetTypecheck(1)
+ d = ir.NewConvExpr(pos, ir.OCONVNOP, types.NewArray(types.Types[types.TUINTPTR], int64(size)).PtrTo(), d)
+ d.SetTypecheck(1)
+ types.CheckSize(d.Type().Elem())
+
+ // Load entry i out of the dictionary.
+ deref := ir.NewStarExpr(pos, d)
+ typed(d.Type().Elem(), deref)
+ idx := ir.NewConstExpr(constant.MakeUint64(uint64(i)), dict) // TODO: what to set orig to?
+ typed(types.Types[types.TUINTPTR], idx)
+ r := ir.NewIndexExpr(pos, deref, idx)
+ typed(types.Types[types.TUINTPTR], r)
+ return r
+}
+
+// getDictionaryType returns a *runtime._type from the dictionary entry i (which
+// refers to a type param or a derived type that uses type params). It uses the
+// specified dictionary dictParam, rather than the one in info.dictParam.
+func getDictionaryType(info *instInfo, dictParam *ir.Name, pos src.XPos, i int) ir.Node {
+ if i < 0 || i >= info.dictInfo.startSubDict {
+ base.Fatalf(fmt.Sprintf("bad dict index %d", i))
+ }
+
+ r := getDictionaryEntry(pos, dictParam, i, info.dictInfo.startSubDict)
+ // change type of retrieved dictionary entry to *byte, which is the
+ // standard typing of a *runtime._type in the compiler
+ typed(types.Types[types.TUINT8].PtrTo(), r)
+ return r
+}
+
+// node is like DeepCopy(), but substitutes ONAME nodes based on subst.ts.vars, and
+// also descends into closures. It substitutes type arguments for type parameters in
+// all the new nodes and does the transformations that were delayed on the generic
+// function.
+func (subst *subster) node(n ir.Node) ir.Node {
+ // Use closure to capture all state needed by the ir.EditChildren argument.
+ var edit func(ir.Node) ir.Node
+ edit = func(x ir.Node) ir.Node {
+ // Analogous to ir.SetPos() at beginning of typecheck.typecheck() -
+ // allows using base.Pos during the transform functions, just like
+ // the tc*() functions.
+ ir.SetPos(x)
+ switch x.Op() {
+ case ir.OTYPE:
+ return ir.TypeNode(subst.ts.Typ(x.Type()))
+
+ case ir.ONAME:
+ if v := subst.ts.Vars[x.(*ir.Name)]; v != nil {
+ return v
+ }
+ if ir.IsBlank(x) {
+ // Special case, because a blank local variable is
+ // not in the fn.Dcl list.
+ m := ir.NewNameAt(x.Pos(), ir.BlankNode.Sym())
+ return typed(subst.ts.Typ(x.Type()), m)
+ }
+ return x
+ case ir.ONONAME:
+ // This handles the identifier in a type switch guard
+ fallthrough
+ case ir.OLITERAL, ir.ONIL:
+ if x.Sym() != nil {
+ return x
+ }
+ }
+ m := ir.Copy(x)
+
+ slice, ok := subst.defnMap[x]
+ if ok {
+ // We just copied a non-ONAME node which was the Defn value
+ // of a local variable. Set the Defn value of the copied
+ // local variable to this new Defn node.
+ for _, ptr := range slice {
+ (*ptr).Defn = m
+ }
+ delete(subst.defnMap, x)
+ }
+
+ if _, isExpr := m.(ir.Expr); isExpr {
+ t := x.Type()
+ if t == nil {
+ // Check for known cases where t can be nil (call
+ // that has no return values, and key expressions)
+ // and otherwise cause a fatal error.
+ _, isCallExpr := m.(*ir.CallExpr)
+ _, isStructKeyExpr := m.(*ir.StructKeyExpr)
+ _, isKeyExpr := m.(*ir.KeyExpr)
+ if !isCallExpr && !isStructKeyExpr && !isKeyExpr && x.Op() != ir.OPANIC &&
+ x.Op() != ir.OCLOSE {
+ base.FatalfAt(m.Pos(), "Nil type for %v", x)
+ }
+ } else if x.Op() != ir.OCLOSURE {
+ m.SetType(subst.ts.Typ(x.Type()))
+ }
+ }
+
+ ir.EditChildren(m, edit)
+
+ m.SetTypecheck(1)
+
+ // Do the transformations that we delayed on the generic function
+ // node, now that we have substituted in the type args.
+ switch x.Op() {
+ case ir.OEQ, ir.ONE, ir.OLT, ir.OLE, ir.OGT, ir.OGE:
+ transformCompare(m.(*ir.BinaryExpr))
+
+ case ir.OSLICE, ir.OSLICE3:
+ transformSlice(m.(*ir.SliceExpr))
+
+ case ir.OADD:
+ m = transformAdd(m.(*ir.BinaryExpr))
+
+ case ir.OINDEX:
+ transformIndex(m.(*ir.IndexExpr))
+
+ case ir.OAS2:
+ as2 := m.(*ir.AssignListStmt)
+ transformAssign(as2, as2.Lhs, as2.Rhs)
+
+ case ir.OAS:
+ as := m.(*ir.AssignStmt)
+ if as.Y != nil {
+ // transformAssign doesn't handle the case
+ // of zeroing assignment of a dcl (rhs[0] is nil).
+ lhs, rhs := []ir.Node{as.X}, []ir.Node{as.Y}
+ transformAssign(as, lhs, rhs)
+ as.X, as.Y = lhs[0], rhs[0]
+ }
+
+ case ir.OASOP:
+ as := m.(*ir.AssignOpStmt)
+ transformCheckAssign(as, as.X)
+
+ case ir.ORETURN:
+ transformReturn(m.(*ir.ReturnStmt))
+
+ case ir.OSEND:
+ transformSend(m.(*ir.SendStmt))
+
+ case ir.OSELECT:
+ transformSelect(m.(*ir.SelectStmt))
+
+ case ir.OCOMPLIT:
+ transformCompLit(m.(*ir.CompLitExpr))
+
+ case ir.OADDR:
+ transformAddr(m.(*ir.AddrExpr))
+
+ case ir.OLITERAL:
+ t := m.Type()
+ if t != x.Type() {
+ // types2 will give us a constant with a type T,
+ // if an untyped constant is used with another
+ // operand of type T (in a provably correct way).
+ // When we substitute in the type args during
+ // stenciling, we now know the real type of the
+ // constant. We may then need to change the
+ // BasicLit.val to be the correct type (e.g.
+ // convert an int64Val constant to a floatVal
+ // constant).
+ m.SetType(types.UntypedInt) // use any untyped type for DefaultLit to work
+ m = typecheck.DefaultLit(m, t)
+ }
+
+ case ir.OXDOT:
+ // Finish the transformation of an OXDOT, unless this is
+ // bound call or field access on a type param. A bound call
+ // or field access on a type param will be transformed during
+ // the dictPass. Otherwise, m will be transformed to an
+ // OMETHVALUE node. It will be transformed to an ODOTMETH or
+ // ODOTINTER node if we find in the OCALL case below that the
+ // method value is actually called.
+ mse := m.(*ir.SelectorExpr)
+ if src := mse.X.Type(); !src.IsShape() {
+ transformDot(mse, false)
+ }
+
+ case ir.OCALL:
+ call := m.(*ir.CallExpr)
+ switch call.X.Op() {
+ case ir.OTYPE:
+ // Transform the conversion, now that we know the
+ // type argument.
+ m = transformConvCall(call)
+
+ case ir.OMETHVALUE, ir.OMETHEXPR:
+ // Redo the transformation of OXDOT, now that we
+ // know the method value is being called. Then
+ // transform the call.
+ call.X.(*ir.SelectorExpr).SetOp(ir.OXDOT)
+ transformDot(call.X.(*ir.SelectorExpr), true)
+ transformCall(call)
+
+ case ir.ODOT, ir.ODOTPTR:
+ // An OXDOT for a generic receiver was resolved to
+ // an access to a field which has a function
+ // value. Transform the call to that function, now
+ // that the OXDOT was resolved.
+ transformCall(call)
+
+ case ir.ONAME:
+ name := call.X.Name()
+ if name.BuiltinOp != ir.OXXX {
+ m = transformBuiltin(call)
+ } else {
+ // This is the case of a function value that was a
+ // type parameter (implied to be a function via a
+ // structural constraint) which is now resolved.
+ transformCall(call)
+ }
+
+ case ir.OFUNCINST:
+ // A call with an OFUNCINST will get transformed
+ // in stencil() once we have created & attached the
+ // instantiation to be called.
+ // We must transform the arguments of the call now, though,
+ // so that any needed CONVIFACE nodes are exposed,
+ // so the dictionary format is correct.
+ transformEarlyCall(call)
+
+ case ir.OXDOT:
+ // This is the case of a bound call or a field access
+ // on a typeparam, which will be handled in the
+ // dictPass. As with OFUNCINST, we must transform the
+ // arguments of the call now, so any needed CONVIFACE
+ // nodes are exposed.
+ transformEarlyCall(call)
+
+ case ir.ODOTTYPE, ir.ODOTTYPE2:
+ // These are DOTTYPEs that could get transformed into
+ // ODYNAMIC DOTTYPEs by the dict pass.
+
+ default:
+ // Transform a call for all other values of
+ // call.X.Op() that don't require any special
+ // handling.
+ transformCall(call)
+
+ }
+
+ case ir.OCLOSURE:
+ // We're going to create a new closure from scratch, so clear m
+ // to avoid using the ir.Copy by accident until we reassign it.
+ m = nil
+
+ x := x.(*ir.ClosureExpr)
+ // Need to duplicate x.Func.Nname, x.Func.Dcl, x.Func.ClosureVars, and
+ // x.Func.Body.
+ oldfn := x.Func
+ newfn := ir.NewClosureFunc(oldfn.Pos(), subst.newf != nil)
+ ir.NameClosure(newfn.OClosure, subst.newf)
+
+ saveNewf := subst.newf
+ ir.CurFunc = newfn
+ subst.newf = newfn
+ newfn.Dcl = subst.namelist(oldfn.Dcl)
+
+ // Make a closure variable for the dictionary of the
+ // containing function.
+ cdict := ir.CaptureName(oldfn.Pos(), newfn, subst.info.dictParam)
+ typed(types.Types[types.TUINTPTR], cdict)
+ ir.FinishCaptureNames(oldfn.Pos(), saveNewf, newfn)
+ newfn.ClosureVars = append(newfn.ClosureVars, subst.namelist(oldfn.ClosureVars)...)
+
+ // Copy that closure variable to a local one.
+ // Note: this allows the dictionary to be captured by child closures.
+ // See issue 47723.
+ ldict := ir.NewNameAt(x.Pos(), newfn.Sym().Pkg.Lookup(typecheck.LocalDictName))
+ typed(types.Types[types.TUINTPTR], ldict)
+ ldict.Class = ir.PAUTO
+ ldict.Curfn = newfn
+ newfn.Dcl = append(newfn.Dcl, ldict)
+ as := ir.NewAssignStmt(x.Pos(), ldict, cdict)
+ as.SetTypecheck(1)
+ ldict.Defn = as
+ newfn.Body.Append(as)
+
+ // Create inst info for the instantiated closure. The dict
+ // param is the closure variable for the dictionary of the
+ // outer function. Since the dictionary is shared, use the
+ // same dictInfo.
+ cinfo := &instInfo{
+ fun: newfn,
+ dictParam: ldict,
+ dictInfo: subst.info.dictInfo,
+ }
+ subst.g.instInfoMap[newfn.Nname.Sym()] = cinfo
+
+ typed(subst.ts.Typ(oldfn.Nname.Type()), newfn.Nname)
+ typed(newfn.Nname.Type(), newfn.OClosure)
+ newfn.SetTypecheck(1)
+
+ outerinfo := subst.info
+ subst.info = cinfo
+ // Make sure type of closure function is set before doing body.
+ newfn.Body.Append(subst.list(oldfn.Body)...)
+ subst.info = outerinfo
+ subst.newf = saveNewf
+ ir.CurFunc = saveNewf
+
+ m = ir.UseClosure(newfn.OClosure, typecheck.Target)
+ subst.g.newInsts = append(subst.g.newInsts, m.(*ir.ClosureExpr).Func)
+ m.(*ir.ClosureExpr).SetInit(subst.list(x.Init()))
+
+ case ir.OSWITCH:
+ m := m.(*ir.SwitchStmt)
+ if m.Tag != nil && m.Tag.Op() == ir.OTYPESW {
+ break // Nothing to do here for type switches.
+ }
+ if m.Tag != nil && !m.Tag.Type().IsEmptyInterface() && m.Tag.Type().HasShape() {
+ // To implement a switch on a value that is or has a type parameter, we first convert
+ // that thing we're switching on to an interface{}.
+ m.Tag = assignconvfn(m.Tag, types.Types[types.TINTER])
+ }
+ for _, c := range m.Cases {
+ for i, x := range c.List {
+ // If we have a case that is or has a type parameter, convert that case
+ // to an interface{}.
+ if !x.Type().IsEmptyInterface() && x.Type().HasShape() {
+ c.List[i] = assignconvfn(x, types.Types[types.TINTER])
+ }
+ }
+ }
+
+ }
+ return m
+ }
+
+ return edit(n)
+}
+
+// dictPass takes a function instantiation and does the transformations on the
+// operations that need to make use of the dictionary param.
+func (g *genInst) dictPass(info *instInfo) {
+ savef := ir.CurFunc
+ ir.CurFunc = info.fun
+
+ var edit func(ir.Node) ir.Node
+ edit = func(m ir.Node) ir.Node {
+ ir.EditChildren(m, edit)
+
+ switch m.Op() {
+ case ir.OCLOSURE:
+ newf := m.(*ir.ClosureExpr).Func
+ ir.CurFunc = newf
+ outerinfo := info
+ info = g.instInfoMap[newf.Nname.Sym()]
+
+ body := newf.Body
+ for i, n := range body {
+ body[i] = edit(n)
+ }
+
+ info = outerinfo
+ ir.CurFunc = info.fun
+
+ case ir.OXDOT:
+ // This is the case of a dot access on a type param. This is
+ // typically a bound call on the type param, but could be a
+ // field access, if the constraint has a single structural type.
+ mse := m.(*ir.SelectorExpr)
+ src := mse.X.Type()
+ assert(src.IsShape())
+
+ if mse.X.Op() == ir.OTYPE {
+ // Method expression T.M
+ m = g.buildClosure2(info, m)
+ // No need for transformDot - buildClosure2 has already
+ // transformed to OCALLINTER/ODOTINTER.
+ } else {
+ // If we can't find the selected method in the
+ // AllMethods of the bound, then this must be an access
+ // to a field of a structural type. If so, we skip the
+ // dictionary lookups - transformDot() will convert to
+ // the desired direct field access.
+ if isBoundMethod(info.dictInfo, mse) {
+ dst := info.dictInfo.shapeToBound[mse.X.Type()]
+ // Implement x.M as a conversion-to-bound-interface
+ // 1) convert x to the bound interface
+ // 2) call M on that interface
+ if src.IsInterface() {
+ // If type arg is an interface (unusual case),
+ // we do a type assert to the type bound.
+ mse.X = assertToBound(info, info.dictParam, m.Pos(), mse.X, dst)
+ } else {
+ mse.X = convertUsingDictionary(info, info.dictParam, m.Pos(), mse.X, m, dst, true)
+ // Note: we set nonEscaping==true, because we can assume the backing store for the
+ // interface conversion doesn't escape. The method call will immediately go to
+ // a wrapper function which copies all the data out of the interface value.
+ // (It only matters for non-pointer-shaped interface conversions. See issue 50182.)
+ }
+ }
+ transformDot(mse, false)
+ }
+ case ir.OCALL:
+ call := m.(*ir.CallExpr)
+ op := call.X.Op()
+ if op == ir.OMETHVALUE {
+ // Redo the transformation of OXDOT, now that we
+ // know the method value is being called.
+ call.X.(*ir.SelectorExpr).SetOp(ir.OXDOT)
+ transformDot(call.X.(*ir.SelectorExpr), true)
+ }
+ transformCall(call)
+
+ case ir.OCONVIFACE:
+ if m.Type().IsEmptyInterface() && m.(*ir.ConvExpr).X.Type().IsEmptyInterface() {
+ // Was T->interface{}, after stenciling it is now interface{}->interface{}.
+ // No longer need the conversion. See issue 48276.
+ m.(*ir.ConvExpr).SetOp(ir.OCONVNOP)
+ break
+ }
+ mce := m.(*ir.ConvExpr)
+ // Note: x's argument is still typed as a type parameter.
+ // m's argument now has an instantiated type.
+
+ if mce.X.Type().HasShape() || m.Type().HasShape() {
+ m = convertUsingDictionary(info, info.dictParam, m.Pos(), mce.X, m, m.Type(), false)
+ }
+ case ir.ODOTTYPE, ir.ODOTTYPE2:
+ if !m.Type().HasShape() {
+ break
+ }
+ dt := m.(*ir.TypeAssertExpr)
+ var rt ir.Node
+ if dt.Type().IsInterface() || dt.X.Type().IsEmptyInterface() {
+ ix := findDictType(info, m.Type())
+ assert(ix >= 0)
+ rt = getDictionaryType(info, info.dictParam, dt.Pos(), ix)
+ } else {
+ // nonempty interface to noninterface. Need an itab.
+ ix := -1
+ for i, ic := range info.dictInfo.itabConvs {
+ if ic == m {
+ ix = info.dictInfo.startItabConv + i
+ break
+ }
+ }
+ assert(ix >= 0)
+ rt = getDictionaryEntry(dt.Pos(), info.dictParam, ix, info.dictInfo.dictLen)
+ }
+ op := ir.ODYNAMICDOTTYPE
+ if m.Op() == ir.ODOTTYPE2 {
+ op = ir.ODYNAMICDOTTYPE2
+ }
+ m = ir.NewDynamicTypeAssertExpr(dt.Pos(), op, dt.X, rt)
+ m.SetType(dt.Type())
+ m.SetTypecheck(1)
+ case ir.OCASE:
+ if _, ok := m.(*ir.CommClause); ok {
+ // This is not a type switch. TODO: Should we use an OSWITCH case here instead of OCASE?
+ break
+ }
+ m := m.(*ir.CaseClause)
+ for i, c := range m.List {
+ if c.Op() == ir.OTYPE && c.Type().HasShape() {
+ // Use a *runtime._type for the dynamic type.
+ ix := findDictType(info, m.List[i].Type())
+ assert(ix >= 0)
+ dt := ir.NewDynamicType(c.Pos(), getDictionaryEntry(c.Pos(), info.dictParam, ix, info.dictInfo.dictLen))
+
+ // For type switch from nonempty interfaces to non-interfaces, we need an itab as well.
+ if !m.List[i].Type().IsInterface() {
+ if _, ok := info.dictInfo.type2switchType[m.List[i]]; ok {
+ // Type switch from nonempty interface. We need a *runtime.itab
+ // for the dynamic type.
+ ix := -1
+ for j, ic := range info.dictInfo.itabConvs {
+ if ic == m.List[i] {
+ ix = info.dictInfo.startItabConv + j
+ break
+ }
+ }
+ assert(ix >= 0)
+ dt.ITab = getDictionaryEntry(c.Pos(), info.dictParam, ix, info.dictInfo.dictLen)
+ }
+ }
+ typed(m.List[i].Type(), dt)
+ m.List[i] = dt
+ }
+ }
+
+ }
+ return m
+ }
+ edit(info.fun)
+ ir.CurFunc = savef
+}
+
+// findDictType looks for type t in the typeparams or derived types in the generic
+// function info.gfInfo. This will indicate the dictionary entry with the
+// correct concrete type for the associated instantiated function.
+func findDictType(info *instInfo, t *types.Type) int {
+ for i, dt := range info.dictInfo.shapeParams {
+ if dt == t {
+ return i
+ }
+ }
+ for i, dt := range info.dictInfo.derivedTypes {
+ if types.IdenticalStrict(dt, t) {
+ return i + len(info.dictInfo.shapeParams)
+ }
+ }
+ return -1
+}
+
+// convertUsingDictionary converts instantiated value v (type v.Type()) to an interface
+// type dst, by returning a new set of nodes that make use of a dictionary entry. in is the
+// instantiated node of the CONVIFACE node or XDOT node (for a bound method call) that is causing the
+// conversion.
+// If nonEscaping is true, the caller guarantees that the backing store needed for the interface data
+// word will not escape.
+func convertUsingDictionary(info *instInfo, dictParam *ir.Name, pos src.XPos, v ir.Node, in ir.Node, dst *types.Type, nonEscaping bool) ir.Node {
+ assert(v.Type().HasShape() || in.Type().HasShape())
+ assert(dst.IsInterface())
+
+ if v.Type().IsInterface() {
+ // Converting from an interface. The shape-ness of the source doesn't really matter, as
+ // we'll be using the concrete type from the first interface word.
+ if dst.IsEmptyInterface() {
+ // Converting I2E. OCONVIFACE does that for us, and doesn't depend
+ // on what the empty interface was instantiated with. No dictionary entry needed.
+ v = ir.NewConvExpr(pos, ir.OCONVIFACE, dst, v)
+ v.SetTypecheck(1)
+ return v
+ }
+ if !in.Type().HasShape() {
+ // Regular OCONVIFACE works if the destination isn't parameterized.
+ v = ir.NewConvExpr(pos, ir.OCONVIFACE, dst, v)
+ v.SetTypecheck(1)
+ return v
+ }
+
+ // We get the destination interface type from the dictionary and the concrete
+ // type from the argument's itab. Call runtime.convI2I to get the new itab.
+ tmp := typecheck.Temp(v.Type())
+ as := ir.NewAssignStmt(pos, tmp, v)
+ as.SetTypecheck(1)
+ itab := ir.NewUnaryExpr(pos, ir.OITAB, tmp)
+ typed(types.Types[types.TUINTPTR].PtrTo(), itab)
+ idata := ir.NewUnaryExpr(pos, ir.OIDATA, tmp)
+ typed(types.Types[types.TUNSAFEPTR], idata)
+
+ fn := typecheck.LookupRuntime("convI2I")
+ fn.SetTypecheck(1)
+ types.CalcSize(fn.Type())
+ call := ir.NewCallExpr(pos, ir.OCALLFUNC, fn, nil)
+ typed(types.Types[types.TUINT8].PtrTo(), call)
+ ix := findDictType(info, in.Type())
+ assert(ix >= 0)
+ inter := getDictionaryType(info, dictParam, pos, ix)
+ call.Args = []ir.Node{inter, itab}
+ i := ir.NewBinaryExpr(pos, ir.OEFACE, call, idata)
+ typed(dst, i)
+ i.PtrInit().Append(as)
+ return i
+ }
+
+ var rt ir.Node
+ if !dst.IsEmptyInterface() {
+ // We should have an itab entry in the dictionary. Using this itab
+ // will be more efficient than converting to an empty interface first
+ // and then type asserting to dst.
+ ix := -1
+ for i, ic := range info.dictInfo.itabConvs {
+ if ic == in {
+ ix = info.dictInfo.startItabConv + i
+ break
+ }
+ }
+ assert(ix >= 0)
+ rt = getDictionaryEntry(pos, dictParam, ix, info.dictInfo.dictLen)
+ } else {
+ ix := findDictType(info, v.Type())
+ assert(ix >= 0)
+ // Load the actual runtime._type of the type parameter from the dictionary.
+ rt = getDictionaryType(info, dictParam, pos, ix)
+ }
+
+ // Figure out what the data field of the interface will be.
+ data := ir.NewConvExpr(pos, ir.OCONVIDATA, nil, v)
+ typed(types.Types[types.TUNSAFEPTR], data)
+ data.NonEscaping = nonEscaping
+
+ // Build an interface from the type and data parts.
+ var i ir.Node = ir.NewBinaryExpr(pos, ir.OEFACE, rt, data)
+ typed(dst, i)
+ return i
+}
+
+func (subst *subster) namelist(l []*ir.Name) []*ir.Name {
+ s := make([]*ir.Name, len(l))
+ for i, n := range l {
+ s[i] = subst.localvar(n)
+ }
+ return s
+}
+
+func (subst *subster) list(l []ir.Node) []ir.Node {
+ s := make([]ir.Node, len(l))
+ for i, n := range l {
+ s[i] = subst.node(n)
+ }
+ return s
+}
+
+// fields sets the Nname field for the Field nodes inside a type signature, based
+// on the corresponding in/out parameters in dcl. It depends on the in and out
+// parameters being in order in dcl.
+func (subst *subster) fields(class ir.Class, oldfields []*types.Field, dcl []*ir.Name) []*types.Field {
+ // Find the starting index in dcl of declarations of the class (either
+ // PPARAM or PPARAMOUT).
+ var i int
+ for i = range dcl {
+ if dcl[i].Class == class {
+ break
+ }
+ }
+
+ // Create newfields nodes that are copies of the oldfields nodes, but
+ // with substitution for any type params, and with Nname set to be the node in
+ // Dcl for the corresponding PPARAM or PPARAMOUT.
+ newfields := make([]*types.Field, len(oldfields))
+ for j := range oldfields {
+ newfields[j] = oldfields[j].Copy()
+ newfields[j].Type = subst.ts.Typ(oldfields[j].Type)
+ // A PPARAM field will be missing from dcl if its name is
+ // unspecified or specified as "_". So, we compare the dcl sym
+ // with the field sym (or sym of the field's Nname node). (Unnamed
+ // results still have a name like ~r2 in their Nname node.) If
+ // they don't match, this dcl (if there is one left) must apply to
+ // a later field.
+ if i < len(dcl) && (dcl[i].Sym() == oldfields[j].Sym ||
+ (oldfields[j].Nname != nil && dcl[i].Sym() == oldfields[j].Nname.Sym())) {
+ newfields[j].Nname = dcl[i]
+ i++
+ }
+ }
+ return newfields
+}
+
+// deref does a single deref of type t, if it is a pointer type.
+func deref(t *types.Type) *types.Type {
+ if t.IsPtr() {
+ return t.Elem()
+ }
+ return t
+}
+
+// markTypeUsed marks type t as used in order to help avoid dead-code elimination of
+// needed methods.
+func markTypeUsed(t *types.Type, lsym *obj.LSym) {
+ if t.IsInterface() {
+ return
+ }
+ // TODO: This is somewhat overkill, we really only need it
+ // for types that are put into interfaces.
+ // Note: this relocation is also used in cmd/link/internal/ld/dwarf.go
+ reflectdata.MarkTypeUsedInInterface(t, lsym)
+}
+
+// getDictionarySym returns the dictionary for the named generic function gf, which
+// is instantiated with the type arguments targs.
+func (g *genInst) getDictionarySym(gf *ir.Name, targs []*types.Type, isMeth bool) *types.Sym {
+ if len(targs) == 0 {
+ base.Fatalf("%s should have type arguments", gf.Sym().Name)
+ }
+
+ // Enforce that only concrete types can make it to here.
+ for _, t := range targs {
+ if t.HasShape() {
+ panic(fmt.Sprintf("shape %+v in dictionary for %s", t, gf.Sym().Name))
+ }
+ }
+
+ // Get a symbol representing the dictionary.
+ sym := typecheck.MakeDictSym(gf.Sym(), targs, isMeth)
+
+ // Initialize the dictionary, if we haven't yet already.
+ lsym := sym.Linksym()
+ if len(lsym.P) > 0 {
+ // We already started creating this dictionary and its lsym.
+ return sym
+ }
+
+ infoPrint("=== Creating dictionary %v\n", sym.Name)
+ off := 0
+ // Emit an entry for each targ (concrete type or gcshape).
+ for _, t := range targs {
+ infoPrint(" * %v\n", t)
+ s := reflectdata.TypeLinksym(t)
+ off = objw.SymPtr(lsym, off, s, 0)
+ markTypeUsed(t, lsym)
+ }
+
+ instInfo := g.getInstantiation(gf, targs, isMeth)
+ info := instInfo.dictInfo
+
+ subst := typecheck.Tsubster{
+ Tparams: info.shapeParams,
+ Targs: targs,
+ }
+ // Emit an entry for each derived type (after substituting targs)
+ for _, t := range info.derivedTypes {
+ ts := subst.Typ(t)
+ infoPrint(" - %v\n", ts)
+ s := reflectdata.TypeLinksym(ts)
+ off = objw.SymPtr(lsym, off, s, 0)
+ markTypeUsed(ts, lsym)
+ }
+ // Emit an entry for each subdictionary (after substituting targs)
+ for _, subDictInfo := range info.subDictCalls {
+ var sym *types.Sym
+ n := subDictInfo.callNode
+ switch n.Op() {
+ case ir.OCALL, ir.OCALLFUNC, ir.OCALLMETH:
+ call := n.(*ir.CallExpr)
+ if call.X.Op() == ir.OXDOT || call.X.Op() == ir.ODOTMETH {
+ var nameNode *ir.Name
+ se := call.X.(*ir.SelectorExpr)
+ if se.X.Type().IsShape() {
+ // This is a method call enabled by a type bound.
+ tparam := se.X.Type()
+ if call.X.Op() == ir.ODOTMETH {
+ // We need this extra check for method expressions,
+ // which don't add in the implicit XDOTs.
+ tmpse := ir.NewSelectorExpr(src.NoXPos, ir.OXDOT, se.X, se.Sel)
+ tmpse = typecheck.AddImplicitDots(tmpse)
+ tparam = tmpse.X.Type()
+ }
+ if !tparam.IsShape() {
+ // The method expression is not
+ // really on a typeparam.
+ break
+ }
+ ix := -1
+ for i, shape := range info.shapeParams {
+ if shape == tparam {
+ ix = i
+ break
+ }
+ }
+ assert(ix >= 0)
+ recvType := targs[ix]
+ if recvType.IsInterface() || len(recvType.RParams()) == 0 {
+ // No sub-dictionary entry is
+ // actually needed, since the
+ // type arg is not an
+ // instantiated type that
+ // will have generic methods.
+ break
+ }
+ // This is a method call for an
+ // instantiated type, so we need a
+ // sub-dictionary.
+ targs := recvType.RParams()
+ genRecvType := recvType.OrigType()
+ nameNode = typecheck.Lookdot1(call.X, se.Sel, genRecvType, genRecvType.Methods(), 1).Nname.(*ir.Name)
+ sym = g.getDictionarySym(nameNode, targs, true)
+ } else {
+ // This is the case of a normal
+ // method call on a generic type.
+ assert(subDictInfo.savedXNode == se)
+ sym = g.getSymForMethodCall(se, &subst)
+ }
+ } else {
+ inst, ok := call.X.(*ir.InstExpr)
+ if ok {
+ // Code hasn't been transformed yet
+ assert(subDictInfo.savedXNode == inst)
+ }
+ // If !ok, then the generic method/function call has
+ // already been transformed to a shape instantiation
+ // call. Either way, use the SelectorExpr/InstExpr
+ // node saved in info.
+ cex := subDictInfo.savedXNode
+ if se, ok := cex.(*ir.SelectorExpr); ok {
+ sym = g.getSymForMethodCall(se, &subst)
+ } else {
+ inst := cex.(*ir.InstExpr)
+ nameNode := inst.X.(*ir.Name)
+ subtargs := typecheck.TypesOf(inst.Targs)
+ for i, t := range subtargs {
+ subtargs[i] = subst.Typ(t)
+ }
+ sym = g.getDictionarySym(nameNode, subtargs, false)
+ }
+ }
+
+ case ir.OFUNCINST:
+ inst := n.(*ir.InstExpr)
+ nameNode := inst.X.(*ir.Name)
+ subtargs := typecheck.TypesOf(inst.Targs)
+ for i, t := range subtargs {
+ subtargs[i] = subst.Typ(t)
+ }
+ sym = g.getDictionarySym(nameNode, subtargs, false)
+
+ case ir.OXDOT, ir.OMETHEXPR, ir.OMETHVALUE:
+ sym = g.getSymForMethodCall(n.(*ir.SelectorExpr), &subst)
+
+ default:
+ assert(false)
+ }
+
+ if sym == nil {
+ // Unused sub-dictionary entry, just emit 0.
+ off = objw.Uintptr(lsym, off, 0)
+ infoPrint(" - Unused subdict entry\n")
+ } else {
+ off = objw.SymPtr(lsym, off, sym.Linksym(), 0)
+ infoPrint(" - Subdict %v\n", sym.Name)
+ }
+ }
+
+ g.instantiateMethods()
+ delay := &delayInfo{
+ gf: gf,
+ targs: targs,
+ sym: sym,
+ off: off,
+ isMeth: isMeth,
+ }
+ g.dictSymsToFinalize = append(g.dictSymsToFinalize, delay)
+ return sym
+}
+
+// getSymForMethodCall gets the dictionary sym for a method call, method value, or method
+// expression that has selector se. subst gives the substitution from shape types to
+// concrete types.
+func (g *genInst) getSymForMethodCall(se *ir.SelectorExpr, subst *typecheck.Tsubster) *types.Sym {
+ // For everything except method expressions, 'recvType = deref(se.X.Type)' would
+ // also give the receiver type. For method expressions with embedded types, we
+ // need to look at the type of the selection to get the final receiver type.
+ recvType := deref(se.Selection.Type.Recv().Type)
+ genRecvType := recvType.OrigType()
+ nameNode := typecheck.Lookdot1(se, se.Sel, genRecvType, genRecvType.Methods(), 1).Nname.(*ir.Name)
+ subtargs := recvType.RParams()
+ s2targs := make([]*types.Type, len(subtargs))
+ for i, t := range subtargs {
+ s2targs[i] = subst.Typ(t)
+ }
+ return g.getDictionarySym(nameNode, s2targs, true)
+}
+
+// finalizeSyms finishes up all dictionaries on g.dictSymsToFinalize, by writing out
+// any needed LSyms for itabs. The itab lsyms create wrappers which need various
+// dictionaries and method instantiations to be complete, so, to avoid recursive
+// dependencies, we finalize the itab lsyms only after all dictionaries syms and
+// instantiations have been created.
+func (g *genInst) finalizeSyms() {
+ for _, d := range g.dictSymsToFinalize {
+ infoPrint("=== Finalizing dictionary %s\n", d.sym.Name)
+
+ lsym := d.sym.Linksym()
+ instInfo := g.getInstantiation(d.gf, d.targs, d.isMeth)
+ info := instInfo.dictInfo
+
+ subst := typecheck.Tsubster{
+ Tparams: info.shapeParams,
+ Targs: d.targs,
+ }
+
+ // Emit an entry for each itab
+ for _, n := range info.itabConvs {
+ var srctype, dsttype *types.Type
+ switch n.Op() {
+ case ir.OXDOT, ir.OMETHVALUE:
+ se := n.(*ir.SelectorExpr)
+ srctype = subst.Typ(se.X.Type())
+ dsttype = subst.Typ(info.shapeToBound[se.X.Type()])
+ case ir.ODOTTYPE, ir.ODOTTYPE2:
+ srctype = subst.Typ(n.(*ir.TypeAssertExpr).Type())
+ dsttype = subst.Typ(n.(*ir.TypeAssertExpr).X.Type())
+ case ir.OCONVIFACE:
+ srctype = subst.Typ(n.(*ir.ConvExpr).X.Type())
+ dsttype = subst.Typ(n.Type())
+ case ir.OTYPE:
+ srctype = subst.Typ(n.Type())
+ dsttype = subst.Typ(info.type2switchType[n])
+ default:
+ base.Fatalf("itab entry with unknown op %s", n.Op())
+ }
+ if srctype.IsInterface() || dsttype.IsEmptyInterface() {
+ // No itab is wanted if src type is an interface. We
+ // will use a type assert instead.
+ d.off = objw.Uintptr(lsym, d.off, 0)
+ infoPrint(" + Unused itab entry for %v\n", srctype)
+ } else {
+ // Make sure all new fully-instantiated types have
+ // their methods created before generating any itabs.
+ g.instantiateMethods()
+ itabLsym := reflectdata.ITabLsym(srctype, dsttype)
+ d.off = objw.SymPtr(lsym, d.off, itabLsym, 0)
+ markTypeUsed(srctype, lsym)
+ infoPrint(" + Itab for (%v,%v)\n", srctype, dsttype)
+ }
+ }
+
+ objw.Global(lsym, int32(d.off), obj.DUPOK|obj.RODATA)
+ infoPrint("=== Finalized dictionary %s\n", d.sym.Name)
+ }
+ g.dictSymsToFinalize = nil
+}
+
+func (g *genInst) getDictionaryValue(pos src.XPos, gf *ir.Name, targs []*types.Type, isMeth bool) ir.Node {
+ sym := g.getDictionarySym(gf, targs, isMeth)
+
+ // Make (or reuse) a node referencing the dictionary symbol.
+ var n *ir.Name
+ if sym.Def != nil {
+ n = sym.Def.(*ir.Name)
+ } else {
+ // We set the position of a static dictionary to be the position of
+ // one of its uses.
+ n = ir.NewNameAt(pos, sym)
+ n.Curfn = ir.CurFunc
+ n.SetType(types.Types[types.TUINTPTR]) // should probably be [...]uintptr, but doesn't really matter
+ n.SetTypecheck(1)
+ n.Class = ir.PEXTERN
+ sym.Def = n
+ }
+
+ // Return the address of the dictionary. Addr node gets position that was passed in.
+ np := typecheck.NodAddrAt(pos, n)
+ // Note: treat dictionary pointers as uintptrs, so they aren't pointers
+ // with respect to GC. That saves on stack scanning work, write barriers, etc.
+ // We can get away with it because dictionaries are global variables.
+ // TODO: use a cast, or is typing directly ok?
+ np.SetType(types.Types[types.TUINTPTR])
+ np.SetTypecheck(1)
+ return np
+}
+
+// hasShapeNodes returns true if the type of any node in targs has a shape.
+func hasShapeNodes(targs []ir.Node) bool {
+ for _, n := range targs {
+ if n.Type().HasShape() {
+ return true
+ }
+ }
+ return false
+}
+
+// hasShapeTypes returns true if any type in targs has a shape.
+func hasShapeTypes(targs []*types.Type) bool {
+ for _, t := range targs {
+ if t.HasShape() {
+ return true
+ }
+ }
+ return false
+}
+
+// getInstInfo get the dictionary format for a function instantiation- type params, derived
+// types, and needed subdictionaries and itabs.
+func (g *genInst) getInstInfo(st *ir.Func, shapes []*types.Type, instInfo *instInfo) {
+ info := instInfo.dictInfo
+ info.shapeParams = shapes
+
+ for _, t := range info.shapeParams {
+ b := info.shapeToBound[t]
+ if b.HasShape() {
+ // If a type bound is parameterized (unusual case), then we
+ // may need its derived type to do a type assert when doing a
+ // bound call for a type arg that is an interface.
+ addType(info, nil, b)
+ }
+ }
+
+ for _, n := range st.Dcl {
+ addType(info, n, n.Type())
+ n.DictIndex = uint16(findDictType(instInfo, n.Type()) + 1)
+ }
+
+ if infoPrintMode {
+ fmt.Printf(">>> InstInfo for %v\n", st)
+ for _, t := range info.shapeParams {
+ fmt.Printf(" Typeparam %v\n", t)
+ }
+ }
+
+ // Map to remember when we have seen an instantiated function value or method
+ // expression/value as part of a call, so we can determine when we encounter
+ // an uncalled function value or method expression/value.
+ callMap := make(map[ir.Node]bool)
+
+ var visitFunc func(ir.Node)
+ visitFunc = func(n ir.Node) {
+ switch n.Op() {
+ case ir.OFUNCINST:
+ if !callMap[n] && hasShapeNodes(n.(*ir.InstExpr).Targs) {
+ infoPrint(" Closure&subdictionary required at generic function value %v\n", n.(*ir.InstExpr).X)
+ info.subDictCalls = append(info.subDictCalls, subDictInfo{callNode: n, savedXNode: nil})
+ }
+ case ir.OMETHEXPR, ir.OMETHVALUE:
+ if !callMap[n] && !types.IsInterfaceMethod(n.(*ir.SelectorExpr).Selection.Type) &&
+ len(deref(n.(*ir.SelectorExpr).X.Type()).RParams()) > 0 &&
+ hasShapeTypes(deref(n.(*ir.SelectorExpr).X.Type()).RParams()) {
+ if n.(*ir.SelectorExpr).X.Op() == ir.OTYPE {
+ infoPrint(" Closure&subdictionary required at generic meth expr %v\n", n)
+ } else {
+ infoPrint(" Closure&subdictionary required at generic meth value %v\n", n)
+ }
+ info.subDictCalls = append(info.subDictCalls, subDictInfo{callNode: n, savedXNode: nil})
+ }
+ case ir.OCALL:
+ ce := n.(*ir.CallExpr)
+ if ce.X.Op() == ir.OFUNCINST {
+ callMap[ce.X] = true
+ if hasShapeNodes(ce.X.(*ir.InstExpr).Targs) {
+ infoPrint(" Subdictionary at generic function/method call: %v - %v\n", ce.X.(*ir.InstExpr).X, n)
+ // Save the instExpr node for the function call,
+ // since we will lose this information when the
+ // generic function call is transformed to a call
+ // on the shape instantiation.
+ info.subDictCalls = append(info.subDictCalls, subDictInfo{callNode: n, savedXNode: ce.X})
+ }
+ }
+ // Note: this XDOT code is not actually needed as long as we
+ // continue to disable type parameters on RHS of type
+ // declarations (#45639).
+ if ce.X.Op() == ir.OXDOT {
+ callMap[ce.X] = true
+ if isBoundMethod(info, ce.X.(*ir.SelectorExpr)) {
+ infoPrint(" Optional subdictionary at generic bound call: %v\n", n)
+ info.subDictCalls = append(info.subDictCalls, subDictInfo{callNode: n, savedXNode: nil})
+ }
+ }
+ case ir.OCALLMETH:
+ ce := n.(*ir.CallExpr)
+ if ce.X.Op() == ir.ODOTMETH &&
+ len(deref(ce.X.(*ir.SelectorExpr).X.Type()).RParams()) > 0 {
+ callMap[ce.X] = true
+ if hasShapeTypes(deref(ce.X.(*ir.SelectorExpr).X.Type()).RParams()) {
+ infoPrint(" Subdictionary at generic method call: %v\n", n)
+ // Save the selector for the method call, since we
+ // will eventually lose this information when the
+ // generic method call is transformed into a
+ // function call on the method shape instantiation.
+ info.subDictCalls = append(info.subDictCalls, subDictInfo{callNode: n, savedXNode: ce.X})
+ }
+ }
+ case ir.OCONVIFACE:
+ if n.Type().IsInterface() && !n.Type().IsEmptyInterface() &&
+ (n.Type().HasShape() || n.(*ir.ConvExpr).X.Type().HasShape()) {
+ infoPrint(" Itab for interface conv: %v\n", n)
+ info.itabConvs = append(info.itabConvs, n)
+ }
+ case ir.OXDOT:
+ se := n.(*ir.SelectorExpr)
+ if isBoundMethod(info, se) {
+ infoPrint(" Itab for bound call: %v\n", n)
+ info.itabConvs = append(info.itabConvs, n)
+ }
+ case ir.ODOTTYPE, ir.ODOTTYPE2:
+ if !n.(*ir.TypeAssertExpr).Type().IsInterface() && !n.(*ir.TypeAssertExpr).X.Type().IsEmptyInterface() {
+ infoPrint(" Itab for dot type: %v\n", n)
+ info.itabConvs = append(info.itabConvs, n)
+ }
+ case ir.OCLOSURE:
+ // Visit the closure body and add all relevant entries to the
+ // dictionary of the outer function (closure will just use
+ // the dictionary of the outer function).
+ cfunc := n.(*ir.ClosureExpr).Func
+ for _, n1 := range cfunc.Body {
+ ir.Visit(n1, visitFunc)
+ }
+ for _, n := range cfunc.Dcl {
+ n.DictIndex = uint16(findDictType(instInfo, n.Type()) + 1)
+ }
+ case ir.OSWITCH:
+ ss := n.(*ir.SwitchStmt)
+ if ss.Tag != nil && ss.Tag.Op() == ir.OTYPESW &&
+ !ss.Tag.(*ir.TypeSwitchGuard).X.Type().IsEmptyInterface() {
+ for _, cc := range ss.Cases {
+ for _, c := range cc.List {
+ if c.Op() == ir.OTYPE && c.Type().HasShape() {
+ // Type switch from a non-empty interface - might need an itab.
+ infoPrint(" Itab for type switch: %v\n", c)
+ info.itabConvs = append(info.itabConvs, c)
+ if info.type2switchType == nil {
+ info.type2switchType = map[ir.Node]*types.Type{}
+ }
+ info.type2switchType[c] = ss.Tag.(*ir.TypeSwitchGuard).X.Type()
+ }
+ }
+ }
+ }
+ }
+ addType(info, n, n.Type())
+ }
+
+ for _, stmt := range st.Body {
+ ir.Visit(stmt, visitFunc)
+ }
+ if infoPrintMode {
+ for _, t := range info.derivedTypes {
+ fmt.Printf(" Derived type %v\n", t)
+ }
+ fmt.Printf(">>> Done Instinfo\n")
+ }
+ info.startSubDict = len(info.shapeParams) + len(info.derivedTypes)
+ info.startItabConv = len(info.shapeParams) + len(info.derivedTypes) + len(info.subDictCalls)
+ info.dictLen = len(info.shapeParams) + len(info.derivedTypes) + len(info.subDictCalls) + len(info.itabConvs)
+}
+
+// isBoundMethod returns true if the selection indicated by se is a bound method of
+// se.X. se.X must be a shape type (i.e. substituted directly from a type param). If
+// isBoundMethod returns false, then the selection must be a field access of a
+// structural type.
+func isBoundMethod(info *dictInfo, se *ir.SelectorExpr) bool {
+ bound := info.shapeToBound[se.X.Type()]
+ return typecheck.Lookdot1(se, se.Sel, bound, bound.AllMethods(), 1) != nil
+}
+
+// addType adds t to info.derivedTypes if it is parameterized type (which is not
+// just a simple shape) that is different from any existing type on
+// info.derivedTypes.
+func addType(info *dictInfo, n ir.Node, t *types.Type) {
+ if t == nil || !t.HasShape() {
+ return
+ }
+ if t.IsShape() {
+ return
+ }
+ if t.Kind() == types.TFUNC && n != nil &&
+ (t.Recv() != nil || n.Op() == ir.ONAME && n.Name().Class == ir.PFUNC) {
+ // Don't use the type of a named generic function or method,
+ // since that is parameterized by other typeparams.
+ // (They all come from arguments of a FUNCINST node.)
+ return
+ }
+ if doubleCheck && !parameterizedBy(t, info.shapeParams) {
+ base.Fatalf("adding type with invalid parameters %+v", t)
+ }
+ if t.Kind() == types.TSTRUCT && t.IsFuncArgStruct() {
+ // Multiple return values are not a relevant new type (?).
+ return
+ }
+ // Ignore a derived type we've already added.
+ for _, et := range info.derivedTypes {
+ if types.IdenticalStrict(t, et) {
+ return
+ }
+ }
+ info.derivedTypes = append(info.derivedTypes, t)
+}
+
+// parameterizedBy returns true if t is parameterized by (at most) params.
+func parameterizedBy(t *types.Type, params []*types.Type) bool {
+ return parameterizedBy1(t, params, map[*types.Type]bool{})
+}
+func parameterizedBy1(t *types.Type, params []*types.Type, visited map[*types.Type]bool) bool {
+ if visited[t] {
+ return true
+ }
+ visited[t] = true
+
+ if t.Sym() != nil && len(t.RParams()) > 0 {
+ // This defined type is instantiated. Check the instantiating types.
+ for _, r := range t.RParams() {
+ if !parameterizedBy1(r, params, visited) {
+ return false
+ }
+ }
+ return true
+ }
+ if t.IsShape() {
+ // Check if t is one of the allowed parameters in scope.
+ for _, p := range params {
+ if p == t {
+ return true
+ }
+ }
+ // Couldn't find t in the list of allowed parameters.
+ return false
+
+ }
+ switch t.Kind() {
+ case types.TARRAY, types.TPTR, types.TSLICE, types.TCHAN:
+ return parameterizedBy1(t.Elem(), params, visited)
+
+ case types.TMAP:
+ return parameterizedBy1(t.Key(), params, visited) && parameterizedBy1(t.Elem(), params, visited)
+
+ case types.TFUNC:
+ return parameterizedBy1(t.TParams(), params, visited) && parameterizedBy1(t.Recvs(), params, visited) && parameterizedBy1(t.Params(), params, visited) && parameterizedBy1(t.Results(), params, visited)
+
+ case types.TSTRUCT:
+ for _, f := range t.Fields().Slice() {
+ if !parameterizedBy1(f.Type, params, visited) {
+ return false
+ }
+ }
+ return true
+
+ case types.TINTER:
+ for _, f := range t.Methods().Slice() {
+ if !parameterizedBy1(f.Type, params, visited) {
+ return false
+ }
+ }
+ return true
+
+ case types.TINT, types.TINT8, types.TINT16, types.TINT32, types.TINT64,
+ types.TUINT, types.TUINT8, types.TUINT16, types.TUINT32, types.TUINT64,
+ types.TUINTPTR, types.TBOOL, types.TSTRING, types.TFLOAT32, types.TFLOAT64, types.TCOMPLEX64, types.TCOMPLEX128, types.TUNSAFEPTR:
+ return true
+
+ case types.TUNION:
+ for i := 0; i < t.NumTerms(); i++ {
+ tt, _ := t.Term(i)
+ if !parameterizedBy1(tt, params, visited) {
+ return false
+ }
+ }
+ return true
+
+ default:
+ base.Fatalf("bad type kind %+v", t)
+ return true
+ }
+}
+
+// startClosures starts creation of a closure that has the function type typ. It
+// creates all the formal params and results according to the type typ. On return,
+// the body and closure variables of the closure must still be filled in, and
+// ir.UseClosure() called.
+func startClosure(pos src.XPos, outer *ir.Func, typ *types.Type) (*ir.Func, []*types.Field, []*types.Field) {
+ // Make a new internal function.
+ fn := ir.NewClosureFunc(pos, outer != nil)
+ ir.NameClosure(fn.OClosure, outer)
+
+ // Build formal argument and return lists.
+ var formalParams []*types.Field // arguments of closure
+ var formalResults []*types.Field // returns of closure
+ for i := 0; i < typ.NumParams(); i++ {
+ t := typ.Params().Field(i).Type
+ arg := ir.NewNameAt(pos, typecheck.LookupNum("a", i))
+ if outer != nil {
+ arg.SetSym(outer.Sym().Pkg.Lookup(arg.Sym().Name))
+ }
+ arg.Class = ir.PPARAM
+ typed(t, arg)
+ arg.Curfn = fn
+ fn.Dcl = append(fn.Dcl, arg)
+ f := types.NewField(pos, arg.Sym(), t)
+ f.Nname = arg
+ f.SetIsDDD(typ.Params().Field(i).IsDDD())
+ formalParams = append(formalParams, f)
+ }
+ for i := 0; i < typ.NumResults(); i++ {
+ t := typ.Results().Field(i).Type
+ result := ir.NewNameAt(pos, typecheck.LookupNum("r", i)) // TODO: names not needed?
+ if outer != nil {
+ result.SetSym(outer.Sym().Pkg.Lookup(result.Sym().Name))
+ }
+ result.Class = ir.PPARAMOUT
+ typed(t, result)
+ result.Curfn = fn
+ fn.Dcl = append(fn.Dcl, result)
+ f := types.NewField(pos, result.Sym(), t)
+ f.Nname = result
+ formalResults = append(formalResults, f)
+ }
+
+ // Build an internal function with the right signature.
+ closureType := types.NewSignature(typ.Pkg(), nil, nil, formalParams, formalResults)
+ typed(closureType, fn.Nname)
+ typed(typ, fn.OClosure)
+ fn.SetTypecheck(1)
+ return fn, formalParams, formalResults
+
+}
+
+// assertToBound returns a new node that converts a node rcvr with interface type to
+// the 'dst' interface type.
+func assertToBound(info *instInfo, dictVar *ir.Name, pos src.XPos, rcvr ir.Node, dst *types.Type) ir.Node {
+ if dst.HasShape() {
+ ix := findDictType(info, dst)
+ assert(ix >= 0)
+ rt := getDictionaryType(info, dictVar, pos, ix)
+ rcvr = ir.NewDynamicTypeAssertExpr(pos, ir.ODYNAMICDOTTYPE, rcvr, rt)
+ typed(dst, rcvr)
+ } else {
+ rcvr = ir.NewTypeAssertExpr(pos, rcvr, nil)
+ typed(dst, rcvr)
+ }
+ return rcvr
+}
+
+// buildClosure2 makes a closure to implement a method expression m (generic form x)
+// which has a shape type as receiver. If the receiver is exactly a shape (i.e. from
+// a typeparam), then the body of the closure converts m.X (the receiver) to the
+// interface bound type, and makes an interface call with the remaining arguments.
+//
+// The returned closure is fully substituted and has already had any needed
+// transformations done.
+func (g *genInst) buildClosure2(info *instInfo, m ir.Node) ir.Node {
+ outer := info.fun
+ pos := m.Pos()
+ typ := m.Type() // type of the closure
+
+ fn, formalParams, formalResults := startClosure(pos, outer, typ)
+
+ // Capture dictionary calculated in the outer function
+ dictVar := ir.CaptureName(pos, fn, info.dictParam)
+ typed(types.Types[types.TUINTPTR], dictVar)
+
+ // Build arguments to call inside the closure.
+ var args []ir.Node
+ for i := 0; i < typ.NumParams(); i++ {
+ args = append(args, formalParams[i].Nname.(*ir.Name))
+ }
+
+ // Build call itself. This involves converting the first argument to the
+ // bound type (an interface) using the dictionary, and then making an
+ // interface call with the remaining arguments.
+ var innerCall ir.Node
+ rcvr := args[0]
+ args = args[1:]
+ assert(m.(*ir.SelectorExpr).X.Type().IsShape())
+ dst := info.dictInfo.shapeToBound[m.(*ir.SelectorExpr).X.Type()]
+ if m.(*ir.SelectorExpr).X.Type().IsInterface() {
+ // If type arg is an interface (unusual case), we do a type assert to
+ // the type bound.
+ rcvr = assertToBound(info, dictVar, pos, rcvr, dst)
+ } else {
+ rcvr = convertUsingDictionary(info, dictVar, pos, rcvr, m, dst, false)
+ }
+ dot := ir.NewSelectorExpr(pos, ir.ODOTINTER, rcvr, m.(*ir.SelectorExpr).Sel)
+ dot.Selection = typecheck.Lookdot1(dot, dot.Sel, dot.X.Type(), dot.X.Type().AllMethods(), 1)
+
+ typed(dot.Selection.Type, dot)
+ innerCall = ir.NewCallExpr(pos, ir.OCALLINTER, dot, args)
+ t := m.Type()
+ if t.NumResults() == 0 {
+ innerCall.SetTypecheck(1)
+ } else if t.NumResults() == 1 {
+ typed(t.Results().Field(0).Type, innerCall)
+ } else {
+ typed(t.Results(), innerCall)
+ }
+ if len(formalResults) > 0 {
+ innerCall = ir.NewReturnStmt(pos, []ir.Node{innerCall})
+ innerCall.SetTypecheck(1)
+ }
+ fn.Body = []ir.Node{innerCall}
+
+ // We're all done with the captured dictionary
+ ir.FinishCaptureNames(pos, outer, fn)
+
+ // Do final checks on closure and return it.
+ return ir.UseClosure(fn.OClosure, typecheck.Target)
+}
diff --git a/src/cmd/compile/internal/noder/stmt.go b/src/cmd/compile/internal/noder/stmt.go
new file mode 100644
index 0000000..a349a7e
--- /dev/null
+++ b/src/cmd/compile/internal/noder/stmt.go
@@ -0,0 +1,353 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package noder
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/syntax"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+)
+
+// stmts creates nodes for a slice of statements that form a scope.
+func (g *irgen) stmts(stmts []syntax.Stmt) []ir.Node {
+ var nodes []ir.Node
+ types.Markdcl()
+ for _, stmt := range stmts {
+ switch s := g.stmt(stmt).(type) {
+ case nil: // EmptyStmt
+ case *ir.BlockStmt:
+ nodes = append(nodes, s.List...)
+ default:
+ nodes = append(nodes, s)
+ }
+ }
+ types.Popdcl()
+ return nodes
+}
+
+func (g *irgen) stmt(stmt syntax.Stmt) ir.Node {
+ base.Assert(g.exprStmtOK)
+ switch stmt := stmt.(type) {
+ case nil, *syntax.EmptyStmt:
+ return nil
+ case *syntax.LabeledStmt:
+ return g.labeledStmt(stmt)
+ case *syntax.BlockStmt:
+ return ir.NewBlockStmt(g.pos(stmt), g.blockStmt(stmt))
+ case *syntax.ExprStmt:
+ return wrapname(g.pos(stmt.X), g.expr(stmt.X))
+ case *syntax.SendStmt:
+ n := ir.NewSendStmt(g.pos(stmt), g.expr(stmt.Chan), g.expr(stmt.Value))
+ if !g.delayTransform() {
+ transformSend(n)
+ }
+ n.SetTypecheck(1)
+ return n
+ case *syntax.DeclStmt:
+ if g.topFuncIsGeneric && len(stmt.DeclList) > 0 {
+ if _, ok := stmt.DeclList[0].(*syntax.TypeDecl); ok {
+ // TODO: remove this restriction. See issue 47631.
+ base.ErrorfAt(g.pos(stmt), "type declarations inside generic functions are not currently supported")
+ }
+ }
+ n := ir.NewBlockStmt(g.pos(stmt), nil)
+ g.decls(&n.List, stmt.DeclList)
+ return n
+
+ case *syntax.AssignStmt:
+ if stmt.Op != 0 && stmt.Op != syntax.Def {
+ op := g.op(stmt.Op, binOps[:])
+ var n *ir.AssignOpStmt
+ if stmt.Rhs == nil {
+ n = IncDec(g.pos(stmt), op, g.expr(stmt.Lhs))
+ } else {
+ // Eval rhs before lhs, for compatibility with noder1
+ rhs := g.expr(stmt.Rhs)
+ lhs := g.expr(stmt.Lhs)
+ n = ir.NewAssignOpStmt(g.pos(stmt), op, lhs, rhs)
+ }
+ if !g.delayTransform() {
+ transformAsOp(n)
+ }
+ n.SetTypecheck(1)
+ return n
+ }
+
+ // Eval rhs before lhs, for compatibility with noder1
+ rhs := g.exprList(stmt.Rhs)
+ names, lhs := g.assignList(stmt.Lhs, stmt.Op == syntax.Def)
+
+ if len(lhs) == 1 && len(rhs) == 1 {
+ n := ir.NewAssignStmt(g.pos(stmt), lhs[0], rhs[0])
+ n.Def = initDefn(n, names)
+
+ if !g.delayTransform() {
+ lhs, rhs := []ir.Node{n.X}, []ir.Node{n.Y}
+ transformAssign(n, lhs, rhs)
+ n.X, n.Y = lhs[0], rhs[0]
+ }
+ n.SetTypecheck(1)
+ return n
+ }
+
+ n := ir.NewAssignListStmt(g.pos(stmt), ir.OAS2, lhs, rhs)
+ n.Def = initDefn(n, names)
+ if !g.delayTransform() {
+ transformAssign(n, n.Lhs, n.Rhs)
+ }
+ n.SetTypecheck(1)
+ return n
+
+ case *syntax.BranchStmt:
+ return ir.NewBranchStmt(g.pos(stmt), g.tokOp(int(stmt.Tok), branchOps[:]), g.name(stmt.Label))
+ case *syntax.CallStmt:
+ return ir.NewGoDeferStmt(g.pos(stmt), g.tokOp(int(stmt.Tok), callOps[:]), g.expr(stmt.Call))
+ case *syntax.ReturnStmt:
+ n := ir.NewReturnStmt(g.pos(stmt), g.exprList(stmt.Results))
+ if !g.delayTransform() {
+ transformReturn(n)
+ }
+ n.SetTypecheck(1)
+ return n
+ case *syntax.IfStmt:
+ return g.ifStmt(stmt)
+ case *syntax.ForStmt:
+ return g.forStmt(stmt)
+ case *syntax.SelectStmt:
+ n := g.selectStmt(stmt)
+
+ if !g.delayTransform() {
+ transformSelect(n.(*ir.SelectStmt))
+ }
+ n.SetTypecheck(1)
+ return n
+ case *syntax.SwitchStmt:
+ return g.switchStmt(stmt)
+
+ default:
+ g.unhandled("statement", stmt)
+ panic("unreachable")
+ }
+}
+
+// TODO(mdempsky): Investigate replacing with switch statements or dense arrays.
+
+var branchOps = [...]ir.Op{
+ syntax.Break: ir.OBREAK,
+ syntax.Continue: ir.OCONTINUE,
+ syntax.Fallthrough: ir.OFALL,
+ syntax.Goto: ir.OGOTO,
+}
+
+var callOps = [...]ir.Op{
+ syntax.Defer: ir.ODEFER,
+ syntax.Go: ir.OGO,
+}
+
+func (g *irgen) tokOp(tok int, ops []ir.Op) ir.Op {
+ // TODO(mdempsky): Validate.
+ return ops[tok]
+}
+
+func (g *irgen) op(op syntax.Operator, ops []ir.Op) ir.Op {
+ // TODO(mdempsky): Validate.
+ return ops[op]
+}
+
+func (g *irgen) assignList(expr syntax.Expr, def bool) ([]*ir.Name, []ir.Node) {
+ if !def {
+ return nil, g.exprList(expr)
+ }
+
+ var exprs []syntax.Expr
+ if list, ok := expr.(*syntax.ListExpr); ok {
+ exprs = list.ElemList
+ } else {
+ exprs = []syntax.Expr{expr}
+ }
+
+ var names []*ir.Name
+ res := make([]ir.Node, len(exprs))
+ for i, expr := range exprs {
+ expr := expr.(*syntax.Name)
+ if expr.Value == "_" {
+ res[i] = ir.BlankNode
+ continue
+ }
+
+ if obj, ok := g.info.Uses[expr]; ok {
+ res[i] = g.obj(obj)
+ continue
+ }
+
+ name, _ := g.def(expr)
+ names = append(names, name)
+ res[i] = name
+ }
+
+ return names, res
+}
+
+// initDefn marks the given names as declared by defn and populates
+// its Init field with ODCL nodes. It then reports whether any names
+// were so declared, which can be used to initialize defn.Def.
+func initDefn(defn ir.InitNode, names []*ir.Name) bool {
+ if len(names) == 0 {
+ return false
+ }
+
+ init := make([]ir.Node, len(names))
+ for i, name := range names {
+ name.Defn = defn
+ init[i] = ir.NewDecl(name.Pos(), ir.ODCL, name)
+ }
+ defn.SetInit(init)
+ return true
+}
+
+func (g *irgen) blockStmt(stmt *syntax.BlockStmt) []ir.Node {
+ return g.stmts(stmt.List)
+}
+
+func (g *irgen) ifStmt(stmt *syntax.IfStmt) ir.Node {
+ init := g.stmt(stmt.Init)
+ n := ir.NewIfStmt(g.pos(stmt), g.expr(stmt.Cond), g.blockStmt(stmt.Then), nil)
+ if stmt.Else != nil {
+ e := g.stmt(stmt.Else)
+ if e.Op() == ir.OBLOCK {
+ e := e.(*ir.BlockStmt)
+ n.Else = e.List
+ } else {
+ n.Else = []ir.Node{e}
+ }
+ }
+ return g.init(init, n)
+}
+
+// unpackTwo returns the first two nodes in list. If list has fewer
+// than 2 nodes, then the missing nodes are replaced with nils.
+func unpackTwo(list []ir.Node) (fst, snd ir.Node) {
+ switch len(list) {
+ case 0:
+ return nil, nil
+ case 1:
+ return list[0], nil
+ default:
+ return list[0], list[1]
+ }
+}
+
+func (g *irgen) forStmt(stmt *syntax.ForStmt) ir.Node {
+ if r, ok := stmt.Init.(*syntax.RangeClause); ok {
+ names, lhs := g.assignList(r.Lhs, r.Def)
+ key, value := unpackTwo(lhs)
+ n := ir.NewRangeStmt(g.pos(r), key, value, g.expr(r.X), g.blockStmt(stmt.Body))
+ n.Def = initDefn(n, names)
+ if key != nil {
+ transformCheckAssign(n, key)
+ }
+ if value != nil {
+ transformCheckAssign(n, value)
+ }
+ return n
+ }
+
+ return ir.NewForStmt(g.pos(stmt), g.stmt(stmt.Init), g.expr(stmt.Cond), g.stmt(stmt.Post), g.blockStmt(stmt.Body))
+}
+
+func (g *irgen) selectStmt(stmt *syntax.SelectStmt) ir.Node {
+ body := make([]*ir.CommClause, len(stmt.Body))
+ for i, clause := range stmt.Body {
+ body[i] = ir.NewCommStmt(g.pos(clause), g.stmt(clause.Comm), g.stmts(clause.Body))
+ }
+ return ir.NewSelectStmt(g.pos(stmt), body)
+}
+
+func (g *irgen) switchStmt(stmt *syntax.SwitchStmt) ir.Node {
+ pos := g.pos(stmt)
+ init := g.stmt(stmt.Init)
+
+ var expr ir.Node
+ switch tag := stmt.Tag.(type) {
+ case *syntax.TypeSwitchGuard:
+ var ident *ir.Ident
+ if tag.Lhs != nil {
+ ident = ir.NewIdent(g.pos(tag.Lhs), g.name(tag.Lhs))
+ }
+ expr = ir.NewTypeSwitchGuard(pos, ident, g.expr(tag.X))
+ default:
+ expr = g.expr(tag)
+ }
+
+ body := make([]*ir.CaseClause, len(stmt.Body))
+ for i, clause := range stmt.Body {
+ // Check for an implicit clause variable before
+ // visiting body, because it may contain function
+ // literals that reference it, and then it'll be
+ // associated to the wrong function.
+ //
+ // Also, override its position to the clause's colon, so that
+ // dwarfgen can find the right scope for it later.
+ // TODO(mdempsky): We should probably just store the scope
+ // directly in the ir.Name.
+ var cv *ir.Name
+ if obj, ok := g.info.Implicits[clause]; ok {
+ cv = g.obj(obj)
+ cv.SetPos(g.makeXPos(clause.Colon))
+ assert(expr.Op() == ir.OTYPESW)
+ cv.Defn = expr
+ }
+ body[i] = ir.NewCaseStmt(g.pos(clause), g.exprList(clause.Cases), g.stmts(clause.Body))
+ body[i].Var = cv
+ }
+
+ return g.init(init, ir.NewSwitchStmt(pos, expr, body))
+}
+
+func (g *irgen) labeledStmt(label *syntax.LabeledStmt) ir.Node {
+ sym := g.name(label.Label)
+ lhs := ir.NewLabelStmt(g.pos(label), sym)
+ ls := g.stmt(label.Stmt)
+
+ // Attach label directly to control statement too.
+ switch ls := ls.(type) {
+ case *ir.ForStmt:
+ ls.Label = sym
+ case *ir.RangeStmt:
+ ls.Label = sym
+ case *ir.SelectStmt:
+ ls.Label = sym
+ case *ir.SwitchStmt:
+ ls.Label = sym
+ }
+
+ l := []ir.Node{lhs}
+ if ls != nil {
+ if ls.Op() == ir.OBLOCK {
+ ls := ls.(*ir.BlockStmt)
+ l = append(l, ls.List...)
+ } else {
+ l = append(l, ls)
+ }
+ }
+ return ir.NewBlockStmt(src.NoXPos, l)
+}
+
+func (g *irgen) init(init ir.Node, stmt ir.InitNode) ir.InitNode {
+ if init != nil {
+ stmt.SetInit([]ir.Node{init})
+ }
+ return stmt
+}
+
+func (g *irgen) name(name *syntax.Name) *types.Sym {
+ if name == nil {
+ return nil
+ }
+ return typecheck.Lookup(name.Value)
+}
diff --git a/src/cmd/compile/internal/noder/sync.go b/src/cmd/compile/internal/noder/sync.go
new file mode 100644
index 0000000..7af558f
--- /dev/null
+++ b/src/cmd/compile/internal/noder/sync.go
@@ -0,0 +1,187 @@
+// UNREVIEWED
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package noder
+
+import (
+ "fmt"
+ "strings"
+)
+
+// enableSync controls whether sync markers are written into unified
+// IR's export data format and also whether they're expected when
+// reading them back in. They're inessential to the correct
+// functioning of unified IR, but are helpful during development to
+// detect mistakes.
+//
+// When sync is enabled, writer stack frames will also be included in
+// the export data. Currently, a fixed number of frames are included,
+// controlled by -d=syncframes (default 0).
+const enableSync = true
+
+// fmtFrames formats a backtrace for reporting reader/writer desyncs.
+func fmtFrames(pcs ...uintptr) []string {
+ res := make([]string, 0, len(pcs))
+ walkFrames(pcs, func(file string, line int, name string, offset uintptr) {
+ // Trim package from function name. It's just redundant noise.
+ name = strings.TrimPrefix(name, "cmd/compile/internal/noder.")
+
+ res = append(res, fmt.Sprintf("%s:%v: %s +0x%v", file, line, name, offset))
+ })
+ return res
+}
+
+type frameVisitor func(file string, line int, name string, offset uintptr)
+
+// syncMarker is an enum type that represents markers that may be
+// written to export data to ensure the reader and writer stay
+// synchronized.
+type syncMarker int
+
+//go:generate stringer -type=syncMarker -trimprefix=sync
+
+// TODO(mdempsky): Cleanup unneeded sync markers.
+
+// TODO(mdempsky): Split these markers into public/stable markers, and
+// private ones. Also, trim unused ones.
+const (
+ _ syncMarker = iota
+ syncNode
+ syncBool
+ syncInt64
+ syncUint64
+ syncString
+ syncPos
+ syncPkg
+ syncSym
+ syncSelector
+ syncKind
+ syncType
+ syncTypePkg
+ syncSignature
+ syncParam
+ syncOp
+ syncObject
+ syncExpr
+ syncStmt
+ syncDecl
+ syncConstDecl
+ syncFuncDecl
+ syncTypeDecl
+ syncVarDecl
+ syncPragma
+ syncValue
+ syncEOF
+ syncMethod
+ syncFuncBody
+ syncUse
+ syncUseObj
+ syncObjectIdx
+ syncTypeIdx
+ syncBOF
+ syncEntry
+ syncOpenScope
+ syncCloseScope
+ syncGlobal
+ syncLocal
+ syncDefine
+ syncDefLocal
+ syncUseLocal
+ syncDefGlobal
+ syncUseGlobal
+ syncTypeParams
+ syncUseLabel
+ syncDefLabel
+ syncFuncLit
+ syncCommonFunc
+ syncBodyRef
+ syncLinksymExt
+ syncHack
+ syncSetlineno
+ syncName
+ syncImportDecl
+ syncDeclNames
+ syncDeclName
+ syncExprList
+ syncExprs
+ syncWrapname
+ syncTypeExpr
+ syncTypeExprOrNil
+ syncChanDir
+ syncParams
+ syncCloseAnotherScope
+ syncSum
+ syncUnOp
+ syncBinOp
+ syncStructType
+ syncInterfaceType
+ syncPackname
+ syncEmbedded
+ syncStmts
+ syncStmtsFall
+ syncStmtFall
+ syncBlockStmt
+ syncIfStmt
+ syncForStmt
+ syncSwitchStmt
+ syncRangeStmt
+ syncCaseClause
+ syncCommClause
+ syncSelectStmt
+ syncDecls
+ syncLabeledStmt
+ syncCompLit
+
+ sync1
+ sync2
+ sync3
+ sync4
+
+ syncN
+ syncDefImplicit
+ syncUseName
+ syncUseObjLocal
+ syncAddLocal
+ syncBothSignature
+ syncSetUnderlying
+ syncLinkname
+ syncStmt1
+ syncStmtsEnd
+ syncDeclare
+ syncTopDecls
+ syncTopConstDecl
+ syncTopFuncDecl
+ syncTopTypeDecl
+ syncTopVarDecl
+ syncObject1
+ syncAddBody
+ syncLabel
+ syncFuncExt
+ syncMethExt
+ syncOptLabel
+ syncScalar
+ syncStmtDecls
+ syncDeclLocal
+ syncObjLocal
+ syncObjLocal1
+ syncDeclareLocal
+ syncPublic
+ syncPrivate
+ syncRelocs
+ syncReloc
+ syncUseReloc
+ syncVarExt
+ syncPkgDef
+ syncTypeExt
+ syncVal
+ syncCodeObj
+ syncPosBase
+ syncLocalIdent
+ syncTypeParamNames
+ syncTypeParamBounds
+ syncImplicitTypes
+ syncObjectName
+)
diff --git a/src/cmd/compile/internal/noder/syncmarker_string.go b/src/cmd/compile/internal/noder/syncmarker_string.go
new file mode 100644
index 0000000..655cafc
--- /dev/null
+++ b/src/cmd/compile/internal/noder/syncmarker_string.go
@@ -0,0 +1,156 @@
+// Code generated by "stringer -type=syncMarker -trimprefix=sync"; DO NOT EDIT.
+
+package noder
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[syncNode-1]
+ _ = x[syncBool-2]
+ _ = x[syncInt64-3]
+ _ = x[syncUint64-4]
+ _ = x[syncString-5]
+ _ = x[syncPos-6]
+ _ = x[syncPkg-7]
+ _ = x[syncSym-8]
+ _ = x[syncSelector-9]
+ _ = x[syncKind-10]
+ _ = x[syncType-11]
+ _ = x[syncTypePkg-12]
+ _ = x[syncSignature-13]
+ _ = x[syncParam-14]
+ _ = x[syncOp-15]
+ _ = x[syncObject-16]
+ _ = x[syncExpr-17]
+ _ = x[syncStmt-18]
+ _ = x[syncDecl-19]
+ _ = x[syncConstDecl-20]
+ _ = x[syncFuncDecl-21]
+ _ = x[syncTypeDecl-22]
+ _ = x[syncVarDecl-23]
+ _ = x[syncPragma-24]
+ _ = x[syncValue-25]
+ _ = x[syncEOF-26]
+ _ = x[syncMethod-27]
+ _ = x[syncFuncBody-28]
+ _ = x[syncUse-29]
+ _ = x[syncUseObj-30]
+ _ = x[syncObjectIdx-31]
+ _ = x[syncTypeIdx-32]
+ _ = x[syncBOF-33]
+ _ = x[syncEntry-34]
+ _ = x[syncOpenScope-35]
+ _ = x[syncCloseScope-36]
+ _ = x[syncGlobal-37]
+ _ = x[syncLocal-38]
+ _ = x[syncDefine-39]
+ _ = x[syncDefLocal-40]
+ _ = x[syncUseLocal-41]
+ _ = x[syncDefGlobal-42]
+ _ = x[syncUseGlobal-43]
+ _ = x[syncTypeParams-44]
+ _ = x[syncUseLabel-45]
+ _ = x[syncDefLabel-46]
+ _ = x[syncFuncLit-47]
+ _ = x[syncCommonFunc-48]
+ _ = x[syncBodyRef-49]
+ _ = x[syncLinksymExt-50]
+ _ = x[syncHack-51]
+ _ = x[syncSetlineno-52]
+ _ = x[syncName-53]
+ _ = x[syncImportDecl-54]
+ _ = x[syncDeclNames-55]
+ _ = x[syncDeclName-56]
+ _ = x[syncExprList-57]
+ _ = x[syncExprs-58]
+ _ = x[syncWrapname-59]
+ _ = x[syncTypeExpr-60]
+ _ = x[syncTypeExprOrNil-61]
+ _ = x[syncChanDir-62]
+ _ = x[syncParams-63]
+ _ = x[syncCloseAnotherScope-64]
+ _ = x[syncSum-65]
+ _ = x[syncUnOp-66]
+ _ = x[syncBinOp-67]
+ _ = x[syncStructType-68]
+ _ = x[syncInterfaceType-69]
+ _ = x[syncPackname-70]
+ _ = x[syncEmbedded-71]
+ _ = x[syncStmts-72]
+ _ = x[syncStmtsFall-73]
+ _ = x[syncStmtFall-74]
+ _ = x[syncBlockStmt-75]
+ _ = x[syncIfStmt-76]
+ _ = x[syncForStmt-77]
+ _ = x[syncSwitchStmt-78]
+ _ = x[syncRangeStmt-79]
+ _ = x[syncCaseClause-80]
+ _ = x[syncCommClause-81]
+ _ = x[syncSelectStmt-82]
+ _ = x[syncDecls-83]
+ _ = x[syncLabeledStmt-84]
+ _ = x[syncCompLit-85]
+ _ = x[sync1-86]
+ _ = x[sync2-87]
+ _ = x[sync3-88]
+ _ = x[sync4-89]
+ _ = x[syncN-90]
+ _ = x[syncDefImplicit-91]
+ _ = x[syncUseName-92]
+ _ = x[syncUseObjLocal-93]
+ _ = x[syncAddLocal-94]
+ _ = x[syncBothSignature-95]
+ _ = x[syncSetUnderlying-96]
+ _ = x[syncLinkname-97]
+ _ = x[syncStmt1-98]
+ _ = x[syncStmtsEnd-99]
+ _ = x[syncDeclare-100]
+ _ = x[syncTopDecls-101]
+ _ = x[syncTopConstDecl-102]
+ _ = x[syncTopFuncDecl-103]
+ _ = x[syncTopTypeDecl-104]
+ _ = x[syncTopVarDecl-105]
+ _ = x[syncObject1-106]
+ _ = x[syncAddBody-107]
+ _ = x[syncLabel-108]
+ _ = x[syncFuncExt-109]
+ _ = x[syncMethExt-110]
+ _ = x[syncOptLabel-111]
+ _ = x[syncScalar-112]
+ _ = x[syncStmtDecls-113]
+ _ = x[syncDeclLocal-114]
+ _ = x[syncObjLocal-115]
+ _ = x[syncObjLocal1-116]
+ _ = x[syncDeclareLocal-117]
+ _ = x[syncPublic-118]
+ _ = x[syncPrivate-119]
+ _ = x[syncRelocs-120]
+ _ = x[syncReloc-121]
+ _ = x[syncUseReloc-122]
+ _ = x[syncVarExt-123]
+ _ = x[syncPkgDef-124]
+ _ = x[syncTypeExt-125]
+ _ = x[syncVal-126]
+ _ = x[syncCodeObj-127]
+ _ = x[syncPosBase-128]
+ _ = x[syncLocalIdent-129]
+ _ = x[syncTypeParamNames-130]
+ _ = x[syncTypeParamBounds-131]
+ _ = x[syncImplicitTypes-132]
+ _ = x[syncObjectName-133]
+}
+
+const _syncMarker_name = "NodeBoolInt64Uint64StringPosPkgSymSelectorKindTypeTypePkgSignatureParamOpObjectExprStmtDeclConstDeclFuncDeclTypeDeclVarDeclPragmaValueEOFMethodFuncBodyUseUseObjObjectIdxTypeIdxBOFEntryOpenScopeCloseScopeGlobalLocalDefineDefLocalUseLocalDefGlobalUseGlobalTypeParamsUseLabelDefLabelFuncLitCommonFuncBodyRefLinksymExtHackSetlinenoNameImportDeclDeclNamesDeclNameExprListExprsWrapnameTypeExprTypeExprOrNilChanDirParamsCloseAnotherScopeSumUnOpBinOpStructTypeInterfaceTypePacknameEmbeddedStmtsStmtsFallStmtFallBlockStmtIfStmtForStmtSwitchStmtRangeStmtCaseClauseCommClauseSelectStmtDeclsLabeledStmtCompLit1234NDefImplicitUseNameUseObjLocalAddLocalBothSignatureSetUnderlyingLinknameStmt1StmtsEndDeclareTopDeclsTopConstDeclTopFuncDeclTopTypeDeclTopVarDeclObject1AddBodyLabelFuncExtMethExtOptLabelScalarStmtDeclsDeclLocalObjLocalObjLocal1DeclareLocalPublicPrivateRelocsRelocUseRelocVarExtPkgDefTypeExtValCodeObjPosBaseLocalIdentTypeParamNamesTypeParamBoundsImplicitTypesObjectName"
+
+var _syncMarker_index = [...]uint16{0, 4, 8, 13, 19, 25, 28, 31, 34, 42, 46, 50, 57, 66, 71, 73, 79, 83, 87, 91, 100, 108, 116, 123, 129, 134, 137, 143, 151, 154, 160, 169, 176, 179, 184, 193, 203, 209, 214, 220, 228, 236, 245, 254, 264, 272, 280, 287, 297, 304, 314, 318, 327, 331, 341, 350, 358, 366, 371, 379, 387, 400, 407, 413, 430, 433, 437, 442, 452, 465, 473, 481, 486, 495, 503, 512, 518, 525, 535, 544, 554, 564, 574, 579, 590, 597, 598, 599, 600, 601, 602, 613, 620, 631, 639, 652, 665, 673, 678, 686, 693, 701, 713, 724, 735, 745, 752, 759, 764, 771, 778, 786, 792, 801, 810, 818, 827, 839, 845, 852, 858, 863, 871, 877, 883, 890, 893, 900, 907, 917, 931, 946, 959, 969}
+
+func (i syncMarker) String() string {
+ i -= 1
+ if i < 0 || i >= syncMarker(len(_syncMarker_index)-1) {
+ return "syncMarker(" + strconv.FormatInt(int64(i+1), 10) + ")"
+ }
+ return _syncMarker_name[_syncMarker_index[i]:_syncMarker_index[i+1]]
+}
diff --git a/src/cmd/compile/internal/noder/transform.go b/src/cmd/compile/internal/noder/transform.go
new file mode 100644
index 0000000..db28e8d
--- /dev/null
+++ b/src/cmd/compile/internal/noder/transform.go
@@ -0,0 +1,1091 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains transformation functions on nodes, which are the
+// transformations that the typecheck package does that are distinct from the
+// typechecking functionality. These transform functions are pared-down copies of
+// the original typechecking functions, with all code removed that is related to:
+//
+// - Detecting compile-time errors (already done by types2)
+// - Setting the actual type of existing nodes (already done based on
+// type info from types2)
+// - Dealing with untyped constants (which types2 has already resolved)
+//
+// Each of the transformation functions requires that node passed in has its type
+// and typecheck flag set. If the transformation function replaces or adds new
+// nodes, it will set the type and typecheck flag for those new nodes.
+
+package noder
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "fmt"
+ "go/constant"
+)
+
+// Transformation functions for expressions
+
+// transformAdd transforms an addition operation (currently just addition of
+// strings). Corresponds to the "binary operators" case in typecheck.typecheck1.
+func transformAdd(n *ir.BinaryExpr) ir.Node {
+ assert(n.Type() != nil && n.Typecheck() == 1)
+ l := n.X
+ if l.Type().IsString() {
+ var add *ir.AddStringExpr
+ if l.Op() == ir.OADDSTR {
+ add = l.(*ir.AddStringExpr)
+ add.SetPos(n.Pos())
+ } else {
+ add = ir.NewAddStringExpr(n.Pos(), []ir.Node{l})
+ }
+ r := n.Y
+ if r.Op() == ir.OADDSTR {
+ r := r.(*ir.AddStringExpr)
+ add.List.Append(r.List.Take()...)
+ } else {
+ add.List.Append(r)
+ }
+ typed(l.Type(), add)
+ return add
+ }
+ return n
+}
+
+// Corresponds to typecheck.stringtoruneslit.
+func stringtoruneslit(n *ir.ConvExpr) ir.Node {
+ if n.X.Op() != ir.OLITERAL || n.X.Val().Kind() != constant.String {
+ base.Fatalf("stringtoarraylit %v", n)
+ }
+
+ var list []ir.Node
+ i := 0
+ eltType := n.Type().Elem()
+ for _, r := range ir.StringVal(n.X) {
+ elt := ir.NewKeyExpr(base.Pos, ir.NewInt(int64(i)), ir.NewInt(int64(r)))
+ // Change from untyped int to the actual element type determined
+ // by types2. No need to change elt.Key, since the array indexes
+ // are just used for setting up the element ordering.
+ elt.Value.SetType(eltType)
+ list = append(list, elt)
+ i++
+ }
+
+ nn := ir.NewCompLitExpr(base.Pos, ir.OCOMPLIT, ir.TypeNode(n.Type()), nil)
+ nn.List = list
+ typed(n.Type(), nn)
+ // Need to transform the OCOMPLIT.
+ return transformCompLit(nn)
+}
+
+// transformConv transforms an OCONV node as needed, based on the types involved,
+// etc. Corresponds to typecheck.tcConv.
+func transformConv(n *ir.ConvExpr) ir.Node {
+ t := n.X.Type()
+ op, why := typecheck.Convertop(n.X.Op() == ir.OLITERAL, t, n.Type())
+ if op == ir.OXXX {
+ // types2 currently ignores pragmas, so a 'notinheap' mismatch is the
+ // one type-related error that it does not catch. This error will be
+ // caught here by Convertop (see two checks near beginning of
+ // Convertop) and reported at the end of noding.
+ base.ErrorfAt(n.Pos(), "cannot convert %L to type %v%s", n.X, n.Type(), why)
+ return n
+ }
+ n.SetOp(op)
+ switch n.Op() {
+ case ir.OCONVNOP:
+ if t.Kind() == n.Type().Kind() {
+ switch t.Kind() {
+ case types.TFLOAT32, types.TFLOAT64, types.TCOMPLEX64, types.TCOMPLEX128:
+ // Floating point casts imply rounding and
+ // so the conversion must be kept.
+ n.SetOp(ir.OCONV)
+ }
+ }
+
+ // Do not convert to []byte literal. See CL 125796.
+ // Generated code and compiler memory footprint is better without it.
+ case ir.OSTR2BYTES:
+ // ok
+
+ case ir.OSTR2RUNES:
+ if n.X.Op() == ir.OLITERAL {
+ return stringtoruneslit(n)
+ }
+
+ case ir.OBYTES2STR:
+ assert(t.IsSlice())
+ assert(t.Elem().Kind() == types.TUINT8)
+ if t.Elem() != types.ByteType && t.Elem() != types.Types[types.TUINT8] {
+ // If t is a slice of a user-defined byte type B (not uint8
+ // or byte), then add an extra CONVNOP from []B to []byte, so
+ // that the call to slicebytetostring() added in walk will
+ // typecheck correctly.
+ n.X = ir.NewConvExpr(n.X.Pos(), ir.OCONVNOP, types.NewSlice(types.ByteType), n.X)
+ n.X.SetTypecheck(1)
+ }
+
+ case ir.ORUNES2STR:
+ assert(t.IsSlice())
+ assert(t.Elem().Kind() == types.TINT32)
+ if t.Elem() != types.RuneType && t.Elem() != types.Types[types.TINT32] {
+ // If t is a slice of a user-defined rune type B (not uint32
+ // or rune), then add an extra CONVNOP from []B to []rune, so
+ // that the call to slicerunetostring() added in walk will
+ // typecheck correctly.
+ n.X = ir.NewConvExpr(n.X.Pos(), ir.OCONVNOP, types.NewSlice(types.RuneType), n.X)
+ n.X.SetTypecheck(1)
+ }
+
+ }
+ return n
+}
+
+// transformConvCall transforms a conversion call. Corresponds to the OTYPE part of
+// typecheck.tcCall.
+func transformConvCall(n *ir.CallExpr) ir.Node {
+ assert(n.Type() != nil && n.Typecheck() == 1)
+ arg := n.Args[0]
+ n1 := ir.NewConvExpr(n.Pos(), ir.OCONV, nil, arg)
+ typed(n.X.Type(), n1)
+ return transformConv(n1)
+}
+
+// transformCall transforms a normal function/method call. Corresponds to last half
+// (non-conversion, non-builtin part) of typecheck.tcCall. This code should work even
+// in the case of OCALL/OFUNCINST.
+func transformCall(n *ir.CallExpr) {
+ // Set base.Pos, since transformArgs below may need it, but transformCall
+ // is called in some passes that don't set base.Pos.
+ ir.SetPos(n)
+ // n.Type() can be nil for calls with no return value
+ assert(n.Typecheck() == 1)
+ transformArgs(n)
+ l := n.X
+ t := l.Type()
+
+ switch l.Op() {
+ case ir.ODOTINTER:
+ n.SetOp(ir.OCALLINTER)
+
+ case ir.ODOTMETH:
+ l := l.(*ir.SelectorExpr)
+ n.SetOp(ir.OCALLMETH)
+
+ tp := t.Recv().Type
+
+ if l.X == nil || !types.Identical(l.X.Type(), tp) {
+ base.Fatalf("method receiver")
+ }
+
+ default:
+ n.SetOp(ir.OCALLFUNC)
+ }
+
+ typecheckaste(ir.OCALL, n.X, n.IsDDD, t.Params(), n.Args)
+ if l.Op() == ir.ODOTMETH && len(deref(n.X.Type().Recv().Type).RParams()) == 0 {
+ typecheck.FixMethodCall(n)
+ }
+ if t.NumResults() == 1 {
+ if n.Op() == ir.OCALLFUNC && n.X.Op() == ir.ONAME {
+ if sym := n.X.(*ir.Name).Sym(); types.IsRuntimePkg(sym.Pkg) && sym.Name == "getg" {
+ // Emit code for runtime.getg() directly instead of calling function.
+ // Most such rewrites (for example the similar one for math.Sqrt) should be done in walk,
+ // so that the ordering pass can make sure to preserve the semantics of the original code
+ // (in particular, the exact time of the function call) by introducing temporaries.
+ // In this case, we know getg() always returns the same result within a given function
+ // and we want to avoid the temporaries, so we do the rewrite earlier than is typical.
+ n.SetOp(ir.OGETG)
+ }
+ }
+ return
+ }
+}
+
+// transformEarlyCall transforms the arguments of a call with an OFUNCINST node.
+func transformEarlyCall(n *ir.CallExpr) {
+ transformArgs(n)
+ typecheckaste(ir.OCALL, n.X, n.IsDDD, n.X.Type().Params(), n.Args)
+}
+
+// transformCompare transforms a compare operation (currently just equals/not
+// equals). Corresponds to the "comparison operators" case in
+// typecheck.typecheck1, including tcArith.
+func transformCompare(n *ir.BinaryExpr) {
+ assert(n.Type() != nil && n.Typecheck() == 1)
+ if (n.Op() == ir.OEQ || n.Op() == ir.ONE) && !types.Identical(n.X.Type(), n.Y.Type()) {
+ // Comparison is okay as long as one side is assignable to the
+ // other. The only allowed case where the conversion is not CONVNOP is
+ // "concrete == interface". In that case, check comparability of
+ // the concrete type. The conversion allocates, so only do it if
+ // the concrete type is huge.
+ l, r := n.X, n.Y
+ lt, rt := l.Type(), r.Type()
+ converted := false
+ if rt.Kind() != types.TBLANK {
+ aop, _ := typecheck.Assignop(lt, rt)
+ if aop != ir.OXXX {
+ types.CalcSize(lt)
+ if lt.HasShape() || rt.IsInterface() == lt.IsInterface() || lt.Size() >= 1<<16 {
+ l = ir.NewConvExpr(base.Pos, aop, rt, l)
+ l.SetTypecheck(1)
+ }
+
+ converted = true
+ }
+ }
+
+ if !converted && lt.Kind() != types.TBLANK {
+ aop, _ := typecheck.Assignop(rt, lt)
+ if aop != ir.OXXX {
+ types.CalcSize(rt)
+ if rt.HasShape() || rt.IsInterface() == lt.IsInterface() || rt.Size() >= 1<<16 {
+ r = ir.NewConvExpr(base.Pos, aop, lt, r)
+ r.SetTypecheck(1)
+ }
+ }
+ }
+ n.X, n.Y = l, r
+ }
+}
+
+// Corresponds to typecheck.implicitstar.
+func implicitstar(n ir.Node) ir.Node {
+ // insert implicit * if needed for fixed array
+ t := n.Type()
+ if !t.IsPtr() {
+ return n
+ }
+ t = t.Elem()
+ if !t.IsArray() {
+ return n
+ }
+ star := ir.NewStarExpr(base.Pos, n)
+ star.SetImplicit(true)
+ return typed(t, star)
+}
+
+// transformIndex transforms an index operation. Corresponds to typecheck.tcIndex.
+func transformIndex(n *ir.IndexExpr) {
+ assert(n.Type() != nil && n.Typecheck() == 1)
+ n.X = implicitstar(n.X)
+ l := n.X
+ t := l.Type()
+ if t.Kind() == types.TMAP {
+ n.Index = assignconvfn(n.Index, t.Key())
+ n.SetOp(ir.OINDEXMAP)
+ // Set type to just the map value, not (value, bool). This is
+ // different from types2, but fits the later stages of the
+ // compiler better.
+ n.SetType(t.Elem())
+ n.Assigned = false
+ }
+}
+
+// transformSlice transforms a slice operation. Corresponds to typecheck.tcSlice.
+func transformSlice(n *ir.SliceExpr) {
+ assert(n.Type() != nil && n.Typecheck() == 1)
+ l := n.X
+ if l.Type().IsArray() {
+ addr := typecheck.NodAddr(n.X)
+ addr.SetImplicit(true)
+ typed(types.NewPtr(n.X.Type()), addr)
+ n.X = addr
+ l = addr
+ }
+ t := l.Type()
+ if t.IsString() {
+ n.SetOp(ir.OSLICESTR)
+ } else if t.IsPtr() && t.Elem().IsArray() {
+ if n.Op().IsSlice3() {
+ n.SetOp(ir.OSLICE3ARR)
+ } else {
+ n.SetOp(ir.OSLICEARR)
+ }
+ }
+}
+
+// Transformation functions for statements
+
+// Corresponds to typecheck.checkassign.
+func transformCheckAssign(stmt ir.Node, n ir.Node) {
+ if n.Op() == ir.OINDEXMAP {
+ n := n.(*ir.IndexExpr)
+ n.Assigned = true
+ return
+ }
+}
+
+// Corresponds to typecheck.assign.
+func transformAssign(stmt ir.Node, lhs, rhs []ir.Node) {
+ checkLHS := func(i int, typ *types.Type) {
+ transformCheckAssign(stmt, lhs[i])
+ }
+
+ cr := len(rhs)
+ if len(rhs) == 1 {
+ if rtyp := rhs[0].Type(); rtyp != nil && rtyp.IsFuncArgStruct() {
+ cr = rtyp.NumFields()
+ }
+ }
+
+ // x, ok = y
+assignOK:
+ for len(lhs) == 2 && cr == 1 {
+ stmt := stmt.(*ir.AssignListStmt)
+ r := rhs[0]
+
+ switch r.Op() {
+ case ir.OINDEXMAP:
+ stmt.SetOp(ir.OAS2MAPR)
+ case ir.ORECV:
+ stmt.SetOp(ir.OAS2RECV)
+ case ir.ODOTTYPE:
+ r := r.(*ir.TypeAssertExpr)
+ stmt.SetOp(ir.OAS2DOTTYPE)
+ r.SetOp(ir.ODOTTYPE2)
+ case ir.ODYNAMICDOTTYPE:
+ r := r.(*ir.DynamicTypeAssertExpr)
+ stmt.SetOp(ir.OAS2DOTTYPE)
+ r.SetOp(ir.ODYNAMICDOTTYPE2)
+ default:
+ break assignOK
+ }
+ checkLHS(0, r.Type())
+ checkLHS(1, types.UntypedBool)
+ t := lhs[0].Type()
+ if t != nil && rhs[0].Type().HasShape() && t.IsInterface() && !types.IdenticalStrict(t, rhs[0].Type()) {
+ // This is a multi-value assignment (map, channel, or dot-type)
+ // where the main result is converted to an interface during the
+ // assignment. Normally, the needed CONVIFACE is not created
+ // until (*orderState).as2ok(), because the AS2* ops and their
+ // sub-ops are so tightly intertwined. But we need to create the
+ // CONVIFACE now to enable dictionary lookups. So, assign the
+ // results first to temps, so that we can manifest the CONVIFACE
+ // in assigning the first temp to lhs[0]. If we added the
+ // CONVIFACE into rhs[0] directly, we would break a lot of later
+ // code that depends on the tight coupling between the AS2* ops
+ // and their sub-ops. (Issue #50642).
+ v := typecheck.Temp(rhs[0].Type())
+ ok := typecheck.Temp(types.Types[types.TBOOL])
+ as := ir.NewAssignListStmt(base.Pos, stmt.Op(), []ir.Node{v, ok}, []ir.Node{r})
+ as.Def = true
+ as.PtrInit().Append(ir.NewDecl(base.Pos, ir.ODCL, v))
+ as.PtrInit().Append(ir.NewDecl(base.Pos, ir.ODCL, ok))
+ as.SetTypecheck(1)
+ // Change stmt to be a normal assignment of the temps to the final
+ // left-hand-sides. We re-create the original multi-value assignment
+ // so that it assigns to the temps and add it as an init of stmt.
+ //
+ // TODO: fix the order of evaluation, so that the lval of lhs[0]
+ // is evaluated before rhs[0] (similar to problem in #50672).
+ stmt.SetOp(ir.OAS2)
+ stmt.PtrInit().Append(as)
+ // assignconvfn inserts the CONVIFACE.
+ stmt.Rhs = []ir.Node{assignconvfn(v, t), ok}
+ }
+ return
+ }
+
+ if len(lhs) != cr {
+ for i := range lhs {
+ checkLHS(i, nil)
+ }
+ return
+ }
+
+ // x,y,z = f()
+ if cr > len(rhs) {
+ stmt := stmt.(*ir.AssignListStmt)
+ stmt.SetOp(ir.OAS2FUNC)
+ r := rhs[0].(*ir.CallExpr)
+ rtyp := r.Type()
+
+ mismatched := false
+ failed := false
+ for i := range lhs {
+ result := rtyp.Field(i).Type
+ checkLHS(i, result)
+
+ if lhs[i].Type() == nil || result == nil {
+ failed = true
+ } else if lhs[i] != ir.BlankNode && !types.Identical(lhs[i].Type(), result) {
+ mismatched = true
+ }
+ }
+ if mismatched && !failed {
+ typecheck.RewriteMultiValueCall(stmt, r)
+ }
+ return
+ }
+
+ for i, r := range rhs {
+ checkLHS(i, r.Type())
+ if lhs[i].Type() != nil {
+ rhs[i] = assignconvfn(r, lhs[i].Type())
+ }
+ }
+}
+
+// Corresponds to typecheck.typecheckargs. Really just deals with multi-value calls.
+func transformArgs(n ir.InitNode) {
+ var list []ir.Node
+ switch n := n.(type) {
+ default:
+ base.Fatalf("transformArgs %+v", n.Op())
+ case *ir.CallExpr:
+ list = n.Args
+ if n.IsDDD {
+ return
+ }
+ case *ir.ReturnStmt:
+ list = n.Results
+ }
+ if len(list) != 1 {
+ return
+ }
+
+ t := list[0].Type()
+ if t == nil || !t.IsFuncArgStruct() {
+ return
+ }
+
+ // Save n as n.Orig for fmt.go.
+ if ir.Orig(n) == n {
+ n.(ir.OrigNode).SetOrig(ir.SepCopy(n))
+ }
+
+ // Rewrite f(g()) into t1, t2, ... = g(); f(t1, t2, ...).
+ typecheck.RewriteMultiValueCall(n, list[0])
+}
+
+// assignconvfn converts node n for assignment to type t. Corresponds to
+// typecheck.assignconvfn.
+func assignconvfn(n ir.Node, t *types.Type) ir.Node {
+ if t.Kind() == types.TBLANK {
+ return n
+ }
+
+ if n.Op() == ir.OPAREN {
+ n = n.(*ir.ParenExpr).X
+ }
+
+ if types.IdenticalStrict(n.Type(), t) {
+ return n
+ }
+
+ op, why := Assignop(n.Type(), t)
+ if op == ir.OXXX {
+ base.Fatalf("found illegal assignment %+v -> %+v; %s", n.Type(), t, why)
+ }
+
+ r := ir.NewConvExpr(base.Pos, op, t, n)
+ r.SetTypecheck(1)
+ r.SetImplicit(true)
+ return r
+}
+
+func Assignop(src, dst *types.Type) (ir.Op, string) {
+ if src == dst {
+ return ir.OCONVNOP, ""
+ }
+ if src == nil || dst == nil || src.Kind() == types.TFORW || dst.Kind() == types.TFORW || src.Underlying() == nil || dst.Underlying() == nil {
+ return ir.OXXX, ""
+ }
+
+ // 1. src type is identical to dst (taking shapes into account)
+ if types.Identical(src, dst) {
+ // We already know from assignconvfn above that IdenticalStrict(src,
+ // dst) is false, so the types are not exactly the same and one of
+ // src or dst is a shape. If dst is an interface (which means src is
+ // an interface too), we need a real OCONVIFACE op; otherwise we need a
+ // OCONVNOP. See issue #48453.
+ if dst.IsInterface() {
+ return ir.OCONVIFACE, ""
+ } else {
+ return ir.OCONVNOP, ""
+ }
+ }
+ return typecheck.Assignop1(src, dst)
+}
+
+// Corresponds to typecheck.typecheckaste, but we add an extra flag convifaceOnly
+// only. If convifaceOnly is true, we only do interface conversion. We use this to do
+// early insertion of CONVIFACE nodes during noder2, when the function or args may
+// have typeparams.
+func typecheckaste(op ir.Op, call ir.Node, isddd bool, tstruct *types.Type, nl ir.Nodes) {
+ var t *types.Type
+ var i int
+
+ lno := base.Pos
+ defer func() { base.Pos = lno }()
+
+ var n ir.Node
+ if len(nl) == 1 {
+ n = nl[0]
+ }
+
+ i = 0
+ for _, tl := range tstruct.Fields().Slice() {
+ t = tl.Type
+ if tl.IsDDD() {
+ if isddd {
+ n = nl[i]
+ ir.SetPos(n)
+ if n.Type() != nil {
+ nl[i] = assignconvfn(n, t)
+ }
+ return
+ }
+
+ // TODO(mdempsky): Make into ... call with implicit slice.
+ for ; i < len(nl); i++ {
+ n = nl[i]
+ ir.SetPos(n)
+ if n.Type() != nil {
+ nl[i] = assignconvfn(n, t.Elem())
+ }
+ }
+ return
+ }
+
+ n = nl[i]
+ ir.SetPos(n)
+ if n.Type() != nil {
+ nl[i] = assignconvfn(n, t)
+ }
+ i++
+ }
+}
+
+// transformSend transforms a send statement, converting the value to appropriate
+// type for the channel, as needed. Corresponds of typecheck.tcSend.
+func transformSend(n *ir.SendStmt) {
+ n.Value = assignconvfn(n.Value, n.Chan.Type().Elem())
+}
+
+// transformReturn transforms a return node, by doing the needed assignments and
+// any necessary conversions. Corresponds to typecheck.tcReturn()
+func transformReturn(rs *ir.ReturnStmt) {
+ transformArgs(rs)
+ nl := rs.Results
+ if ir.HasNamedResults(ir.CurFunc) && len(nl) == 0 {
+ return
+ }
+
+ typecheckaste(ir.ORETURN, nil, false, ir.CurFunc.Type().Results(), nl)
+}
+
+// transformSelect transforms a select node, creating an assignment list as needed
+// for each case. Corresponds to typecheck.tcSelect().
+func transformSelect(sel *ir.SelectStmt) {
+ for _, ncase := range sel.Cases {
+ if ncase.Comm != nil {
+ n := ncase.Comm
+ oselrecv2 := func(dst, recv ir.Node, def bool) {
+ selrecv := ir.NewAssignListStmt(n.Pos(), ir.OSELRECV2, []ir.Node{dst, ir.BlankNode}, []ir.Node{recv})
+ if dst.Op() == ir.ONAME && dst.(*ir.Name).Defn == n {
+ // Must fix Defn for dst, since we are
+ // completely changing the node.
+ dst.(*ir.Name).Defn = selrecv
+ }
+ selrecv.Def = def
+ selrecv.SetTypecheck(1)
+ selrecv.SetInit(n.Init())
+ ncase.Comm = selrecv
+ }
+ switch n.Op() {
+ case ir.OAS:
+ // convert x = <-c into x, _ = <-c
+ // remove implicit conversions; the eventual assignment
+ // will reintroduce them.
+ n := n.(*ir.AssignStmt)
+ if r := n.Y; r.Op() == ir.OCONVNOP || r.Op() == ir.OCONVIFACE {
+ r := r.(*ir.ConvExpr)
+ if r.Implicit() {
+ n.Y = r.X
+ }
+ }
+ oselrecv2(n.X, n.Y, n.Def)
+
+ case ir.OAS2RECV:
+ n := n.(*ir.AssignListStmt)
+ n.SetOp(ir.OSELRECV2)
+
+ case ir.ORECV:
+ // convert <-c into _, _ = <-c
+ n := n.(*ir.UnaryExpr)
+ oselrecv2(ir.BlankNode, n, false)
+
+ case ir.OSEND:
+ break
+ }
+ }
+ }
+}
+
+// transformAsOp transforms an AssignOp statement. Corresponds to OASOP case in
+// typecheck1.
+func transformAsOp(n *ir.AssignOpStmt) {
+ transformCheckAssign(n, n.X)
+}
+
+// transformDot transforms an OXDOT (or ODOT) or ODOT, ODOTPTR, ODOTMETH,
+// ODOTINTER, or OMETHVALUE, as appropriate. It adds in extra nodes as needed to
+// access embedded fields. Corresponds to typecheck.tcDot.
+func transformDot(n *ir.SelectorExpr, isCall bool) ir.Node {
+ assert(n.Type() != nil && n.Typecheck() == 1)
+ if n.Op() == ir.OXDOT {
+ n = typecheck.AddImplicitDots(n)
+ n.SetOp(ir.ODOT)
+
+ // Set the Selection field and typecheck flag for any new ODOT nodes
+ // added by AddImplicitDots(), and also transform to ODOTPTR if
+ // needed. Equivalent to 'n.X = typecheck(n.X, ctxExpr|ctxType)' in
+ // tcDot.
+ for n1 := n; n1.X.Op() == ir.ODOT; {
+ n1 = n1.X.(*ir.SelectorExpr)
+ if !n1.Implicit() {
+ break
+ }
+ t1 := n1.X.Type()
+ if t1.IsPtr() && !t1.Elem().IsInterface() {
+ t1 = t1.Elem()
+ n1.SetOp(ir.ODOTPTR)
+ }
+ typecheck.Lookdot(n1, t1, 0)
+ n1.SetTypecheck(1)
+ }
+ }
+
+ t := n.X.Type()
+
+ if n.X.Op() == ir.OTYPE {
+ return transformMethodExpr(n)
+ }
+
+ if t.IsPtr() && !t.Elem().IsInterface() {
+ t = t.Elem()
+ n.SetOp(ir.ODOTPTR)
+ }
+
+ f := typecheck.Lookdot(n, t, 0)
+ assert(f != nil)
+
+ if (n.Op() == ir.ODOTINTER || n.Op() == ir.ODOTMETH) && !isCall {
+ n.SetOp(ir.OMETHVALUE)
+ // This converts a method type to a function type. See issue 47775.
+ n.SetType(typecheck.NewMethodType(n.Type(), nil))
+ }
+ return n
+}
+
+// Corresponds to typecheck.typecheckMethodExpr.
+func transformMethodExpr(n *ir.SelectorExpr) (res ir.Node) {
+ t := n.X.Type()
+
+ // Compute the method set for t.
+ var ms *types.Fields
+ if t.IsInterface() {
+ ms = t.AllMethods()
+ } else {
+ mt := types.ReceiverBaseType(t)
+ typecheck.CalcMethods(mt)
+ ms = mt.AllMethods()
+
+ // The method expression T.m requires a wrapper when T
+ // is different from m's declared receiver type. We
+ // normally generate these wrappers while writing out
+ // runtime type descriptors, which is always done for
+ // types declared at package scope. However, we need
+ // to make sure to generate wrappers for anonymous
+ // receiver types too.
+ if mt.Sym() == nil {
+ typecheck.NeedRuntimeType(t)
+ }
+ }
+
+ s := n.Sel
+ m := typecheck.Lookdot1(n, s, t, ms, 0)
+ if !t.HasShape() {
+ // It's OK to not find the method if t is instantiated by shape types,
+ // because we will use the methods on the generic type anyway.
+ assert(m != nil)
+ }
+
+ n.SetOp(ir.OMETHEXPR)
+ n.Selection = m
+ n.SetType(typecheck.NewMethodType(m.Type, n.X.Type()))
+ return n
+}
+
+// Corresponds to typecheck.tcAppend.
+func transformAppend(n *ir.CallExpr) ir.Node {
+ transformArgs(n)
+ args := n.Args
+ t := args[0].Type()
+ assert(t.IsSlice())
+
+ if n.IsDDD {
+ if t.Elem().IsKind(types.TUINT8) && args[1].Type().IsString() {
+ return n
+ }
+
+ args[1] = assignconvfn(args[1], t.Underlying())
+ return n
+ }
+
+ as := args[1:]
+ for i, n := range as {
+ assert(n.Type() != nil)
+ as[i] = assignconvfn(n, t.Elem())
+ }
+ return n
+}
+
+// Corresponds to typecheck.tcComplex.
+func transformComplex(n *ir.BinaryExpr) ir.Node {
+ l := n.X
+ r := n.Y
+
+ assert(types.Identical(l.Type(), r.Type()))
+
+ var t *types.Type
+ switch l.Type().Kind() {
+ case types.TFLOAT32:
+ t = types.Types[types.TCOMPLEX64]
+ case types.TFLOAT64:
+ t = types.Types[types.TCOMPLEX128]
+ default:
+ panic(fmt.Sprintf("transformComplex: unexpected type %v", l.Type()))
+ }
+
+ // Must set the type here for generics, because this can't be determined
+ // by substitution of the generic types.
+ typed(t, n)
+ return n
+}
+
+// Corresponds to typecheck.tcDelete.
+func transformDelete(n *ir.CallExpr) ir.Node {
+ transformArgs(n)
+ args := n.Args
+ assert(len(args) == 2)
+
+ l := args[0]
+ r := args[1]
+
+ args[1] = assignconvfn(r, l.Type().Key())
+ return n
+}
+
+// Corresponds to typecheck.tcMake.
+func transformMake(n *ir.CallExpr) ir.Node {
+ args := n.Args
+
+ n.Args = nil
+ l := args[0]
+ t := l.Type()
+ assert(t != nil)
+
+ i := 1
+ var nn ir.Node
+ switch t.Kind() {
+ case types.TSLICE:
+ l = args[i]
+ i++
+ var r ir.Node
+ if i < len(args) {
+ r = args[i]
+ i++
+ }
+ nn = ir.NewMakeExpr(n.Pos(), ir.OMAKESLICE, l, r)
+
+ case types.TMAP:
+ if i < len(args) {
+ l = args[i]
+ i++
+ } else {
+ l = ir.NewInt(0)
+ }
+ nn = ir.NewMakeExpr(n.Pos(), ir.OMAKEMAP, l, nil)
+ nn.SetEsc(n.Esc())
+
+ case types.TCHAN:
+ l = nil
+ if i < len(args) {
+ l = args[i]
+ i++
+ } else {
+ l = ir.NewInt(0)
+ }
+ nn = ir.NewMakeExpr(n.Pos(), ir.OMAKECHAN, l, nil)
+ default:
+ panic(fmt.Sprintf("transformMake: unexpected type %v", t))
+ }
+
+ assert(i == len(args))
+ typed(n.Type(), nn)
+ return nn
+}
+
+// Corresponds to typecheck.tcPanic.
+func transformPanic(n *ir.UnaryExpr) ir.Node {
+ n.X = assignconvfn(n.X, types.Types[types.TINTER])
+ return n
+}
+
+// Corresponds to typecheck.tcPrint.
+func transformPrint(n *ir.CallExpr) ir.Node {
+ transformArgs(n)
+ return n
+}
+
+// Corresponds to typecheck.tcRealImag.
+func transformRealImag(n *ir.UnaryExpr) ir.Node {
+ l := n.X
+ var t *types.Type
+
+ // Determine result type.
+ switch l.Type().Kind() {
+ case types.TCOMPLEX64:
+ t = types.Types[types.TFLOAT32]
+ case types.TCOMPLEX128:
+ t = types.Types[types.TFLOAT64]
+ default:
+ panic(fmt.Sprintf("transformRealImag: unexpected type %v", l.Type()))
+ }
+
+ // Must set the type here for generics, because this can't be determined
+ // by substitution of the generic types.
+ typed(t, n)
+ return n
+}
+
+// Corresponds to typecheck.tcLenCap.
+func transformLenCap(n *ir.UnaryExpr) ir.Node {
+ n.X = implicitstar(n.X)
+ return n
+}
+
+// Corresponds to Builtin part of tcCall.
+func transformBuiltin(n *ir.CallExpr) ir.Node {
+ // n.Type() can be nil for builtins with no return value
+ assert(n.Typecheck() == 1)
+ fun := n.X.(*ir.Name)
+ op := fun.BuiltinOp
+
+ switch op {
+ case ir.OAPPEND, ir.ODELETE, ir.OMAKE, ir.OPRINT, ir.OPRINTN, ir.ORECOVER:
+ n.SetOp(op)
+ n.X = nil
+ switch op {
+ case ir.OAPPEND:
+ return transformAppend(n)
+ case ir.ODELETE:
+ return transformDelete(n)
+ case ir.OMAKE:
+ return transformMake(n)
+ case ir.OPRINT, ir.OPRINTN:
+ return transformPrint(n)
+ case ir.ORECOVER:
+ // nothing more to do
+ return n
+ }
+
+ case ir.OCAP, ir.OCLOSE, ir.OIMAG, ir.OLEN, ir.OPANIC, ir.OREAL:
+ transformArgs(n)
+ fallthrough
+
+ case ir.ONEW, ir.OALIGNOF, ir.OOFFSETOF, ir.OSIZEOF:
+ u := ir.NewUnaryExpr(n.Pos(), op, n.Args[0])
+ u1 := typed(n.Type(), ir.InitExpr(n.Init(), u)) // typecheckargs can add to old.Init
+ switch op {
+ case ir.OCAP, ir.OLEN:
+ return transformLenCap(u1.(*ir.UnaryExpr))
+ case ir.OREAL, ir.OIMAG:
+ return transformRealImag(u1.(*ir.UnaryExpr))
+ case ir.OPANIC:
+ return transformPanic(u1.(*ir.UnaryExpr))
+ case ir.OALIGNOF, ir.OOFFSETOF, ir.OSIZEOF:
+ // This corresponds to the EvalConst() call near end of typecheck().
+ return typecheck.EvalConst(u1)
+ case ir.OCLOSE, ir.ONEW:
+ // nothing more to do
+ return u1
+ }
+
+ case ir.OCOMPLEX, ir.OCOPY, ir.OUNSAFEADD, ir.OUNSAFESLICE:
+ transformArgs(n)
+ b := ir.NewBinaryExpr(n.Pos(), op, n.Args[0], n.Args[1])
+ n1 := typed(n.Type(), ir.InitExpr(n.Init(), b))
+ if op != ir.OCOMPLEX {
+ // nothing more to do
+ return n1
+ }
+ return transformComplex(n1.(*ir.BinaryExpr))
+
+ default:
+ panic(fmt.Sprintf("transformBuiltin: unexpected op %v", op))
+ }
+
+ return n
+}
+
+func hasKeys(l ir.Nodes) bool {
+ for _, n := range l {
+ if n.Op() == ir.OKEY || n.Op() == ir.OSTRUCTKEY {
+ return true
+ }
+ }
+ return false
+}
+
+// transformArrayLit runs assignconvfn on each array element and returns the
+// length of the slice/array that is needed to hold all the array keys/indexes
+// (one more than the highest index). Corresponds to typecheck.typecheckarraylit.
+func transformArrayLit(elemType *types.Type, bound int64, elts []ir.Node) int64 {
+ var key, length int64
+ for i, elt := range elts {
+ ir.SetPos(elt)
+ r := elts[i]
+ var kv *ir.KeyExpr
+ if elt.Op() == ir.OKEY {
+ elt := elt.(*ir.KeyExpr)
+ key = typecheck.IndexConst(elt.Key)
+ assert(key >= 0)
+ kv = elt
+ r = elt.Value
+ }
+
+ r = assignconvfn(r, elemType)
+ if kv != nil {
+ kv.Value = r
+ } else {
+ elts[i] = r
+ }
+
+ key++
+ if key > length {
+ length = key
+ }
+ }
+
+ return length
+}
+
+// transformCompLit transforms n to an OARRAYLIT, OSLICELIT, OMAPLIT, or
+// OSTRUCTLIT node, with any needed conversions. Corresponds to
+// typecheck.tcCompLit (and includes parts corresponding to tcStructLitKey).
+func transformCompLit(n *ir.CompLitExpr) (res ir.Node) {
+ assert(n.Type() != nil && n.Typecheck() == 1)
+ lno := base.Pos
+ defer func() {
+ base.Pos = lno
+ }()
+
+ // Save original node (including n.Right)
+ n.SetOrig(ir.Copy(n))
+
+ ir.SetPos(n)
+
+ t := n.Type()
+
+ switch t.Kind() {
+ default:
+ base.Fatalf("transformCompLit %v", t.Kind())
+
+ case types.TARRAY:
+ transformArrayLit(t.Elem(), t.NumElem(), n.List)
+ n.SetOp(ir.OARRAYLIT)
+
+ case types.TSLICE:
+ length := transformArrayLit(t.Elem(), -1, n.List)
+ n.SetOp(ir.OSLICELIT)
+ n.Len = length
+
+ case types.TMAP:
+ for _, l := range n.List {
+ ir.SetPos(l)
+ assert(l.Op() == ir.OKEY)
+ l := l.(*ir.KeyExpr)
+
+ r := l.Key
+ l.Key = assignconvfn(r, t.Key())
+
+ r = l.Value
+ l.Value = assignconvfn(r, t.Elem())
+ }
+
+ n.SetOp(ir.OMAPLIT)
+
+ case types.TSTRUCT:
+ // Need valid field offsets for Xoffset below.
+ types.CalcSize(t)
+
+ if len(n.List) != 0 && !hasKeys(n.List) {
+ // simple list of values
+ ls := n.List
+ for i, n1 := range ls {
+ ir.SetPos(n1)
+
+ f := t.Field(i)
+ n1 = assignconvfn(n1, f.Type)
+ ls[i] = ir.NewStructKeyExpr(base.Pos, f, n1)
+ }
+ assert(len(ls) >= t.NumFields())
+ } else {
+ // keyed list
+ ls := n.List
+ for i, l := range ls {
+ ir.SetPos(l)
+
+ kv := l.(*ir.KeyExpr)
+ key := kv.Key
+
+ // Sym might have resolved to name in other top-level
+ // package, because of import dot. Redirect to correct sym
+ // before we do the lookup.
+ s := key.Sym()
+ if id, ok := key.(*ir.Ident); ok && typecheck.DotImportRefs[id] != nil {
+ s = typecheck.Lookup(s.Name)
+ }
+ if types.IsExported(s.Name) && s.Pkg != types.LocalPkg {
+ // Exported field names should always have
+ // local pkg. We only need to do this
+ // adjustment for generic functions that are
+ // being transformed after being imported
+ // from another package.
+ s = typecheck.Lookup(s.Name)
+ }
+
+ // An OXDOT uses the Sym field to hold
+ // the field to the right of the dot,
+ // so s will be non-nil, but an OXDOT
+ // is never a valid struct literal key.
+ assert(!(s == nil || key.Op() == ir.OXDOT || s.IsBlank()))
+
+ f := typecheck.Lookdot1(nil, s, t, t.Fields(), 0)
+ l := ir.NewStructKeyExpr(l.Pos(), f, kv.Value)
+ ls[i] = l
+
+ l.Value = assignconvfn(l.Value, f.Type)
+ }
+ }
+
+ n.SetOp(ir.OSTRUCTLIT)
+ }
+
+ return n
+}
+
+// transformAddr corresponds to typecheck.tcAddr.
+func transformAddr(n *ir.AddrExpr) {
+ switch n.X.Op() {
+ case ir.OARRAYLIT, ir.OMAPLIT, ir.OSLICELIT, ir.OSTRUCTLIT:
+ n.SetOp(ir.OPTRLIT)
+ }
+}
diff --git a/src/cmd/compile/internal/noder/types.go b/src/cmd/compile/internal/noder/types.go
new file mode 100644
index 0000000..ff3a4d9
--- /dev/null
+++ b/src/cmd/compile/internal/noder/types.go
@@ -0,0 +1,517 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package noder
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "cmd/compile/internal/types2"
+ "cmd/internal/src"
+ "strings"
+)
+
+func (g *irgen) pkg(pkg *types2.Package) *types.Pkg {
+ switch pkg {
+ case nil:
+ return types.BuiltinPkg
+ case g.self:
+ return types.LocalPkg
+ case types2.Unsafe:
+ return types.UnsafePkg
+ }
+ return types.NewPkg(pkg.Path(), pkg.Name())
+}
+
+var universeAny = types2.Universe.Lookup("any").Type()
+
+// typ converts a types2.Type to a types.Type, including caching of previously
+// translated types.
+func (g *irgen) typ(typ types2.Type) *types.Type {
+ // Defer the CheckSize calls until we have fully-defined a
+ // (possibly-recursive) top-level type.
+ types.DeferCheckSize()
+ res := g.typ1(typ)
+ types.ResumeCheckSize()
+
+ // Finish up any types on typesToFinalize, now that we are at the top of a
+ // fully-defined (possibly recursive) type. fillinMethods could create more
+ // types to finalize.
+ for len(g.typesToFinalize) > 0 {
+ l := len(g.typesToFinalize)
+ info := g.typesToFinalize[l-1]
+ g.typesToFinalize = g.typesToFinalize[:l-1]
+ types.DeferCheckSize()
+ g.fillinMethods(info.typ, info.ntyp)
+ types.ResumeCheckSize()
+ }
+ return res
+}
+
+// typ1 is like typ, but doesn't call CheckSize, since it may have only
+// constructed part of a recursive type. Should not be called from outside this
+// file (g.typ is the "external" entry point).
+func (g *irgen) typ1(typ types2.Type) *types.Type {
+ // See issue 49583: the type checker has trouble keeping track of aliases,
+ // but for such a common alias as any we can improve things by preserving a
+ // pointer identity that can be checked when formatting type strings.
+ if typ == universeAny {
+ return types.AnyType
+ }
+ // Cache type2-to-type mappings. Important so that each defined generic
+ // type (instantiated or not) has a single types.Type representation.
+ // Also saves a lot of computation and memory by avoiding re-translating
+ // types2 types repeatedly.
+ res, ok := g.typs[typ]
+ if !ok {
+ res = g.typ0(typ)
+ // Calculate the size for all concrete types seen by the frontend.
+ // This is the replacement for the CheckSize() calls in the types1
+ // typechecker. These will be deferred until the top-level g.typ().
+ if res != nil && !res.IsUntyped() && !res.IsFuncArgStruct() && !res.HasTParam() {
+ types.CheckSize(res)
+ }
+ g.typs[typ] = res
+ }
+ return res
+}
+
+// instTypeName2 creates a name for an instantiated type, base on the type args
+// (given as types2 types).
+func (g *irgen) instTypeName2(name string, targs *types2.TypeList) string {
+ rparams := make([]*types.Type, targs.Len())
+ for i := range rparams {
+ rparams[i] = g.typ(targs.At(i))
+ }
+ return typecheck.InstTypeName(name, rparams)
+}
+
+// typ0 converts a types2.Type to a types.Type, but doesn't do the caching check
+// at the top level.
+func (g *irgen) typ0(typ types2.Type) *types.Type {
+ switch typ := typ.(type) {
+ case *types2.Basic:
+ return g.basic(typ)
+ case *types2.Named:
+ // If tparams is set, but targs is not, typ is a base generic
+ // type. typ is appearing as part of the source type of an alias,
+ // since that is the only use of a generic type that doesn't
+ // involve instantiation. We just translate the named type in the
+ // normal way below using g.obj().
+ if typ.TypeParams() != nil && typ.TypeArgs() != nil {
+ // typ is an instantiation of a defined (named) generic type.
+ // This instantiation should also be a defined (named) type.
+ // types2 gives us the substituted type in t.Underlying()
+ // The substituted type may or may not still have type
+ // params. We might, for example, be substituting one type
+ // param for another type param.
+ //
+ // When converted to types.Type, typ has a unique name,
+ // based on the names of the type arguments.
+ instName := g.instTypeName2(typ.Obj().Name(), typ.TypeArgs())
+ s := g.pkg(typ.Obj().Pkg()).Lookup(instName)
+
+ // Make sure the base generic type exists in type1 (it may
+ // not yet if we are referecing an imported generic type, as
+ // opposed to a generic type declared in this package). Make
+ // sure to do this lookup before checking s.Def, in case
+ // s.Def gets defined while importing base (if an imported
+ // type). (Issue #50486).
+ base := g.obj(typ.Origin().Obj())
+
+ if s.Def != nil {
+ // We have already encountered this instantiation.
+ // Use the type we previously created, since there
+ // must be exactly one instance of a defined type.
+ return s.Def.Type()
+ }
+
+ if base.Class == ir.PAUTO {
+ // If the base type is a local type, we want to pop
+ // this instantiated type symbol/definition when we
+ // leave the containing block, so we don't use it
+ // incorrectly later.
+ types.Pushdcl(s)
+ }
+
+ // Create a forwarding type first and put it in the g.typs
+ // map, in order to deal with recursive generic types
+ // (including via method signatures). Set up the extra
+ // ntyp information (Def, RParams, which may set
+ // HasTParam) before translating the underlying type
+ // itself, so we handle recursion correctly.
+ ntyp := typecheck.NewIncompleteNamedType(g.pos(typ.Obj().Pos()), s)
+ g.typs[typ] = ntyp
+
+ // If ntyp still has type params, then we must be
+ // referencing something like 'value[T2]', as when
+ // specifying the generic receiver of a method, where
+ // value was defined as "type value[T any] ...". Save the
+ // type args, which will now be the new typeparams of the
+ // current type.
+ //
+ // If ntyp does not have type params, we are saving the
+ // non-generic types used to instantiate this type. We'll
+ // use these when instantiating the methods of the
+ // instantiated type.
+ targs := typ.TypeArgs()
+ rparams := make([]*types.Type, targs.Len())
+ for i := range rparams {
+ rparams[i] = g.typ1(targs.At(i))
+ }
+ ntyp.SetRParams(rparams)
+ //fmt.Printf("Saw new type %v %v\n", instName, ntyp.HasTParam())
+
+ // Save the symbol for the base generic type.
+ ntyp.SetOrigType(base.Type())
+ ntyp.SetUnderlying(g.typ1(typ.Underlying()))
+ if typ.NumMethods() != 0 {
+ // Save a delayed call to g.fillinMethods() (once
+ // potentially recursive types have been fully
+ // resolved).
+ g.typesToFinalize = append(g.typesToFinalize,
+ &typeDelayInfo{
+ typ: typ,
+ ntyp: ntyp,
+ })
+ }
+ return ntyp
+ }
+ obj := g.obj(typ.Obj())
+ if obj.Op() != ir.OTYPE {
+ base.FatalfAt(obj.Pos(), "expected type: %L", obj)
+ }
+ return obj.Type()
+
+ case *types2.Array:
+ return types.NewArray(g.typ1(typ.Elem()), typ.Len())
+ case *types2.Chan:
+ return types.NewChan(g.typ1(typ.Elem()), dirs[typ.Dir()])
+ case *types2.Map:
+ return types.NewMap(g.typ1(typ.Key()), g.typ1(typ.Elem()))
+ case *types2.Pointer:
+ return types.NewPtr(g.typ1(typ.Elem()))
+ case *types2.Signature:
+ return g.signature(nil, typ)
+ case *types2.Slice:
+ return types.NewSlice(g.typ1(typ.Elem()))
+
+ case *types2.Struct:
+ fields := make([]*types.Field, typ.NumFields())
+ for i := range fields {
+ v := typ.Field(i)
+ f := types.NewField(g.pos(v), g.selector(v), g.typ1(v.Type()))
+ f.Note = typ.Tag(i)
+ if v.Embedded() {
+ f.Embedded = 1
+ }
+ fields[i] = f
+ }
+ return types.NewStruct(g.tpkg(typ), fields)
+
+ case *types2.Interface:
+ embeddeds := make([]*types.Field, typ.NumEmbeddeds())
+ j := 0
+ for i := range embeddeds {
+ // TODO(mdempsky): Get embedding position.
+ e := typ.EmbeddedType(i)
+
+ // With Go 1.18, an embedded element can be any type, not
+ // just an interface.
+ embeddeds[j] = types.NewField(src.NoXPos, nil, g.typ1(e))
+ j++
+ }
+ embeddeds = embeddeds[:j]
+
+ methods := make([]*types.Field, typ.NumExplicitMethods())
+ for i := range methods {
+ m := typ.ExplicitMethod(i)
+ mtyp := g.signature(types.FakeRecv(), m.Type().(*types2.Signature))
+ methods[i] = types.NewField(g.pos(m), g.selector(m), mtyp)
+ }
+
+ return types.NewInterface(g.tpkg(typ), append(embeddeds, methods...), typ.IsImplicit())
+
+ case *types2.TypeParam:
+ // Save the name of the type parameter in the sym of the type.
+ // Include the types2 subscript in the sym name
+ pkg := g.tpkg(typ)
+ // Create the unique types1 name for a type param, using its context
+ // with a function, type, or method declaration. Also, map blank type
+ // param names to a unique name based on their type param index. The
+ // unique blank names will be exported, but will be reverted during
+ // types2 and gcimporter import.
+ assert(g.curDecl != "")
+ nm := typecheck.TparamExportName(g.curDecl, typ.Obj().Name(), typ.Index())
+ sym := pkg.Lookup(nm)
+ if sym.Def != nil {
+ // Make sure we use the same type param type for the same
+ // name, whether it is created during types1-import or
+ // this types2-to-types1 translation.
+ return sym.Def.Type()
+ }
+ tp := types.NewTypeParam(sym, typ.Index())
+ nname := ir.NewDeclNameAt(g.pos(typ.Obj().Pos()), ir.OTYPE, sym)
+ sym.Def = nname
+ nname.SetType(tp)
+ tp.SetNod(nname)
+ // Set g.typs[typ] in case the bound methods reference typ.
+ g.typs[typ] = tp
+
+ bound := g.typ1(typ.Constraint())
+ tp.SetBound(bound)
+ return tp
+
+ case *types2.Union:
+ nt := typ.Len()
+ tlist := make([]*types.Type, nt)
+ tildes := make([]bool, nt)
+ for i := range tlist {
+ t := typ.Term(i)
+ tlist[i] = g.typ1(t.Type())
+ tildes[i] = t.Tilde()
+ }
+ return types.NewUnion(tlist, tildes)
+
+ case *types2.Tuple:
+ // Tuples are used for the type of a function call (i.e. the
+ // return value of the function).
+ if typ == nil {
+ return (*types.Type)(nil)
+ }
+ fields := make([]*types.Field, typ.Len())
+ for i := range fields {
+ fields[i] = g.param(typ.At(i))
+ }
+ t := types.NewStruct(types.LocalPkg, fields)
+ t.StructType().Funarg = types.FunargResults
+ return t
+
+ default:
+ base.FatalfAt(src.NoXPos, "unhandled type: %v (%T)", typ, typ)
+ panic("unreachable")
+ }
+}
+
+// fillinMethods fills in the method name nodes and types for a defined type with at
+// least one method. This is needed for later typechecking when looking up methods of
+// instantiated types, and for actually generating the methods for instantiated
+// types.
+func (g *irgen) fillinMethods(typ *types2.Named, ntyp *types.Type) {
+ targs2 := typ.TypeArgs()
+ targs := make([]*types.Type, targs2.Len())
+ for i := range targs {
+ targs[i] = g.typ1(targs2.At(i))
+ }
+
+ methods := make([]*types.Field, typ.NumMethods())
+ for i := range methods {
+ m := typ.Method(i)
+ recvType := deref2(types2.AsSignature(m.Type()).Recv().Type())
+ var meth *ir.Name
+ imported := false
+ if m.Pkg() != g.self {
+ // Imported methods cannot be loaded by name (what
+ // g.obj() does) - they must be loaded via their
+ // type.
+ meth = g.obj(recvType.(*types2.Named).Obj()).Type().Methods().Index(i).Nname.(*ir.Name)
+ // XXX Because Obj() returns the object of the base generic
+ // type, we have to still do the method translation below.
+ imported = true
+ } else {
+ meth = g.obj(m)
+ }
+ assert(recvType == types2.Type(typ))
+ if imported {
+ // Unfortunately, meth is the type of the method of the
+ // generic type, so we have to do a substitution to get
+ // the name/type of the method of the instantiated type,
+ // using m.Type().RParams() and typ.TArgs()
+ inst2 := g.instTypeName2("", typ.TypeArgs())
+ name := meth.Sym().Name
+ i1 := strings.Index(name, "[")
+ i2 := strings.Index(name[i1:], "]")
+ assert(i1 >= 0 && i2 >= 0)
+ // Generate the name of the instantiated method.
+ name = name[0:i1] + inst2 + name[i1+i2+1:]
+ newsym := meth.Sym().Pkg.Lookup(name)
+ var meth2 *ir.Name
+ if newsym.Def != nil {
+ meth2 = newsym.Def.(*ir.Name)
+ } else {
+ meth2 = ir.NewNameAt(meth.Pos(), newsym)
+ rparams := types2.AsSignature(m.Type()).RecvTypeParams()
+ tparams := make([]*types.Type, rparams.Len())
+ // Set g.curDecl to be the method context, so type
+ // params in the receiver of the method that we are
+ // translating gets the right unique name. We could
+ // be in a top-level typeDecl, so save and restore
+ // the current contents of g.curDecl.
+ savedCurDecl := g.curDecl
+ g.curDecl = typ.Obj().Name() + "." + m.Name()
+ for i := range tparams {
+ tparams[i] = g.typ1(rparams.At(i))
+ }
+ g.curDecl = savedCurDecl
+ assert(len(tparams) == len(targs))
+ ts := typecheck.Tsubster{
+ Tparams: tparams,
+ Targs: targs,
+ }
+ // Do the substitution of the type
+ meth2.SetType(ts.Typ(meth.Type()))
+ newsym.Def = meth2
+ }
+ meth = meth2
+ }
+ methods[i] = types.NewField(meth.Pos(), g.selector(m), meth.Type())
+ methods[i].Nname = meth
+ }
+ ntyp.Methods().Set(methods)
+ if !ntyp.HasTParam() && !ntyp.HasShape() {
+ // Generate all the methods for a new fully-instantiated type.
+ typecheck.NeedInstType(ntyp)
+ }
+}
+
+func (g *irgen) signature(recv *types.Field, sig *types2.Signature) *types.Type {
+ tparams2 := sig.TypeParams()
+ tparams := make([]*types.Field, tparams2.Len())
+ for i := range tparams {
+ tp := tparams2.At(i).Obj()
+ tparams[i] = types.NewField(g.pos(tp), g.sym(tp), g.typ1(tp.Type()))
+ }
+
+ do := func(typ *types2.Tuple) []*types.Field {
+ fields := make([]*types.Field, typ.Len())
+ for i := range fields {
+ fields[i] = g.param(typ.At(i))
+ }
+ return fields
+ }
+ params := do(sig.Params())
+ results := do(sig.Results())
+ if sig.Variadic() {
+ params[len(params)-1].SetIsDDD(true)
+ }
+
+ return types.NewSignature(g.tpkg(sig), recv, tparams, params, results)
+}
+
+func (g *irgen) param(v *types2.Var) *types.Field {
+ return types.NewField(g.pos(v), g.sym(v), g.typ1(v.Type()))
+}
+
+func (g *irgen) sym(obj types2.Object) *types.Sym {
+ if name := obj.Name(); name != "" {
+ return g.pkg(obj.Pkg()).Lookup(obj.Name())
+ }
+ return nil
+}
+
+func (g *irgen) selector(obj types2.Object) *types.Sym {
+ pkg, name := g.pkg(obj.Pkg()), obj.Name()
+ if types.IsExported(name) {
+ pkg = types.LocalPkg
+ }
+ return pkg.Lookup(name)
+}
+
+// tpkg returns the package that a function, interface, struct, or typeparam type
+// expression appeared in.
+//
+// Caveat: For the degenerate types "func()", "interface{}", and
+// "struct{}", tpkg always returns LocalPkg. However, we only need the
+// package information so that go/types can report it via its API, and
+// the reason we fail to return the original package for these
+// particular types is because go/types does *not* report it for
+// them. So in practice this limitation is probably moot.
+func (g *irgen) tpkg(typ types2.Type) *types.Pkg {
+ if obj := anyObj(typ); obj != nil {
+ return g.pkg(obj.Pkg())
+ }
+ return types.LocalPkg
+}
+
+// anyObj returns some object accessible from typ, if any.
+func anyObj(typ types2.Type) types2.Object {
+ switch typ := typ.(type) {
+ case *types2.Signature:
+ if recv := typ.Recv(); recv != nil {
+ return recv
+ }
+ if params := typ.Params(); params.Len() > 0 {
+ return params.At(0)
+ }
+ if results := typ.Results(); results.Len() > 0 {
+ return results.At(0)
+ }
+ case *types2.Struct:
+ if typ.NumFields() > 0 {
+ return typ.Field(0)
+ }
+ case *types2.Interface:
+ if typ.NumExplicitMethods() > 0 {
+ return typ.ExplicitMethod(0)
+ }
+ case *types2.TypeParam:
+ return typ.Obj()
+ }
+ return nil
+}
+
+func (g *irgen) basic(typ *types2.Basic) *types.Type {
+ switch typ.Name() {
+ case "byte":
+ return types.ByteType
+ case "rune":
+ return types.RuneType
+ }
+ return *basics[typ.Kind()]
+}
+
+var basics = [...]**types.Type{
+ types2.Invalid: new(*types.Type),
+ types2.Bool: &types.Types[types.TBOOL],
+ types2.Int: &types.Types[types.TINT],
+ types2.Int8: &types.Types[types.TINT8],
+ types2.Int16: &types.Types[types.TINT16],
+ types2.Int32: &types.Types[types.TINT32],
+ types2.Int64: &types.Types[types.TINT64],
+ types2.Uint: &types.Types[types.TUINT],
+ types2.Uint8: &types.Types[types.TUINT8],
+ types2.Uint16: &types.Types[types.TUINT16],
+ types2.Uint32: &types.Types[types.TUINT32],
+ types2.Uint64: &types.Types[types.TUINT64],
+ types2.Uintptr: &types.Types[types.TUINTPTR],
+ types2.Float32: &types.Types[types.TFLOAT32],
+ types2.Float64: &types.Types[types.TFLOAT64],
+ types2.Complex64: &types.Types[types.TCOMPLEX64],
+ types2.Complex128: &types.Types[types.TCOMPLEX128],
+ types2.String: &types.Types[types.TSTRING],
+ types2.UnsafePointer: &types.Types[types.TUNSAFEPTR],
+ types2.UntypedBool: &types.UntypedBool,
+ types2.UntypedInt: &types.UntypedInt,
+ types2.UntypedRune: &types.UntypedRune,
+ types2.UntypedFloat: &types.UntypedFloat,
+ types2.UntypedComplex: &types.UntypedComplex,
+ types2.UntypedString: &types.UntypedString,
+ types2.UntypedNil: &types.Types[types.TNIL],
+}
+
+var dirs = [...]types.ChanDir{
+ types2.SendRecv: types.Cboth,
+ types2.SendOnly: types.Csend,
+ types2.RecvOnly: types.Crecv,
+}
+
+// deref2 does a single deref of types2 type t, if it is a pointer type.
+func deref2(t types2.Type) types2.Type {
+ if ptr := types2.AsPointer(t); ptr != nil {
+ t = ptr.Elem()
+ }
+ return t
+}
diff --git a/src/cmd/compile/internal/noder/unified.go b/src/cmd/compile/internal/noder/unified.go
new file mode 100644
index 0000000..ec0012d
--- /dev/null
+++ b/src/cmd/compile/internal/noder/unified.go
@@ -0,0 +1,334 @@
+// UNREVIEWED
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package noder
+
+import (
+ "bytes"
+ "fmt"
+ "internal/goversion"
+ "io"
+ "runtime"
+ "sort"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/inline"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "cmd/compile/internal/types2"
+ "cmd/internal/src"
+)
+
+// localPkgReader holds the package reader used for reading the local
+// package. It exists so the unified IR linker can refer back to it
+// later.
+var localPkgReader *pkgReader
+
+// unified construct the local package's IR from syntax's AST.
+//
+// The pipeline contains 2 steps:
+//
+// (1) Generate package export data "stub".
+//
+// (2) Generate package IR from package export data.
+//
+// The package data "stub" at step (1) contains everything from the local package,
+// but nothing that have been imported. When we're actually writing out export data
+// to the output files (see writeNewExport function), we run the "linker", which does
+// a few things:
+//
+// + Updates compiler extensions data (e.g., inlining cost, escape analysis results).
+//
+// + Handles re-exporting any transitive dependencies.
+//
+// + Prunes out any unnecessary details (e.g., non-inlineable functions, because any
+// downstream importers only care about inlinable functions).
+//
+// The source files are typechecked twice, once before writing export data
+// using types2 checker, once after read export data using gc/typecheck.
+// This duplication of work will go away once we always use types2 checker,
+// we can remove the gc/typecheck pass. The reason it is still here:
+//
+// + It reduces engineering costs in maintaining a fork of typecheck
+// (e.g., no need to backport fixes like CL 327651).
+//
+// + It makes it easier to pass toolstash -cmp.
+//
+// + Historically, we would always re-run the typechecker after import, even though
+// we know the imported data is valid. It's not ideal, but also not causing any
+// problem either.
+//
+// + There's still transformation that being done during gc/typecheck, like rewriting
+// multi-valued function call, or transform ir.OINDEX -> ir.OINDEXMAP.
+//
+// Using syntax+types2 tree, which already has a complete representation of generics,
+// the unified IR has the full typed AST for doing introspection during step (1).
+// In other words, we have all necessary information to build the generic IR form
+// (see writer.captureVars for an example).
+func unified(noders []*noder) {
+ inline.NewInline = InlineCall
+
+ if !quirksMode() {
+ writeNewExportFunc = writeNewExport
+ } else if base.Flag.G != 0 {
+ base.Errorf("cannot use -G and -d=quirksmode together")
+ }
+
+ newReadImportFunc = func(data string, pkg1 *types.Pkg, ctxt *types2.Context, packages map[string]*types2.Package) (pkg2 *types2.Package, err error) {
+ pr := newPkgDecoder(pkg1.Path, data)
+
+ // Read package descriptors for both types2 and compiler backend.
+ readPackage(newPkgReader(pr), pkg1)
+ pkg2 = readPackage2(ctxt, packages, pr)
+ return
+ }
+
+ data := writePkgStub(noders)
+
+ // We already passed base.Flag.Lang to types2 to handle validating
+ // the user's source code. Bump it up now to the current version and
+ // re-parse, so typecheck doesn't complain if we construct IR that
+ // utilizes newer Go features.
+ base.Flag.Lang = fmt.Sprintf("go1.%d", goversion.Version)
+ types.ParseLangFlag()
+
+ assert(types.LocalPkg.Path == "")
+ types.LocalPkg.Height = 0 // reset so pkgReader.pkgIdx doesn't complain
+ target := typecheck.Target
+
+ typecheck.TypecheckAllowed = true
+
+ localPkgReader = newPkgReader(newPkgDecoder(types.LocalPkg.Path, data))
+ readPackage(localPkgReader, types.LocalPkg)
+
+ r := localPkgReader.newReader(relocMeta, privateRootIdx, syncPrivate)
+ r.pkgInit(types.LocalPkg, target)
+
+ // Type-check any top-level assignments. We ignore non-assignments
+ // here because other declarations are typechecked as they're
+ // constructed.
+ for i, ndecls := 0, len(target.Decls); i < ndecls; i++ {
+ switch n := target.Decls[i]; n.Op() {
+ case ir.OAS, ir.OAS2:
+ target.Decls[i] = typecheck.Stmt(n)
+ }
+ }
+
+ // Don't use range--bodyIdx can add closures to todoBodies.
+ for len(todoBodies) > 0 {
+ // The order we expand bodies doesn't matter, so pop from the end
+ // to reduce todoBodies reallocations if it grows further.
+ fn := todoBodies[len(todoBodies)-1]
+ todoBodies = todoBodies[:len(todoBodies)-1]
+
+ pri, ok := bodyReader[fn]
+ assert(ok)
+ pri.funcBody(fn)
+
+ // Instantiated generic function: add to Decls for typechecking
+ // and compilation.
+ if fn.OClosure == nil && len(pri.dict.targs) != 0 {
+ target.Decls = append(target.Decls, fn)
+ }
+ }
+ todoBodies = nil
+ todoBodiesDone = true
+
+ // Check that nothing snuck past typechecking.
+ for _, n := range target.Decls {
+ if n.Typecheck() == 0 {
+ base.FatalfAt(n.Pos(), "missed typecheck: %v", n)
+ }
+
+ // For functions, check that at least their first statement (if
+ // any) was typechecked too.
+ if fn, ok := n.(*ir.Func); ok && len(fn.Body) != 0 {
+ if stmt := fn.Body[0]; stmt.Typecheck() == 0 {
+ base.FatalfAt(stmt.Pos(), "missed typecheck: %v", stmt)
+ }
+ }
+ }
+
+ base.ExitIfErrors() // just in case
+}
+
+// writePkgStub type checks the given parsed source files,
+// writes an export data package stub representing them,
+// and returns the result.
+func writePkgStub(noders []*noder) string {
+ m, pkg, info := checkFiles(noders)
+
+ pw := newPkgWriter(m, pkg, info)
+
+ pw.collectDecls(noders)
+
+ publicRootWriter := pw.newWriter(relocMeta, syncPublic)
+ privateRootWriter := pw.newWriter(relocMeta, syncPrivate)
+
+ assert(publicRootWriter.idx == publicRootIdx)
+ assert(privateRootWriter.idx == privateRootIdx)
+
+ {
+ w := publicRootWriter
+ w.pkg(pkg)
+ w.bool(false) // has init; XXX
+
+ scope := pkg.Scope()
+ names := scope.Names()
+ w.len(len(names))
+ for _, name := range scope.Names() {
+ w.obj(scope.Lookup(name), nil)
+ }
+
+ w.sync(syncEOF)
+ w.flush()
+ }
+
+ {
+ w := privateRootWriter
+ w.pkgInit(noders)
+ w.flush()
+ }
+
+ var sb bytes.Buffer // TODO(mdempsky): strings.Builder after #44505 is resolved
+ pw.dump(&sb)
+
+ // At this point, we're done with types2. Make sure the package is
+ // garbage collected.
+ freePackage(pkg)
+
+ return sb.String()
+}
+
+// freePackage ensures the given package is garbage collected.
+func freePackage(pkg *types2.Package) {
+ // The GC test below relies on a precise GC that runs finalizers as
+ // soon as objects are unreachable. Our implementation provides
+ // this, but other/older implementations may not (e.g., Go 1.4 does
+ // not because of #22350). To avoid imposing unnecessary
+ // restrictions on the GOROOT_BOOTSTRAP toolchain, we skip the test
+ // during bootstrapping.
+ if base.CompilerBootstrap {
+ return
+ }
+
+ // Set a finalizer on pkg so we can detect if/when it's collected.
+ done := make(chan struct{})
+ runtime.SetFinalizer(pkg, func(*types2.Package) { close(done) })
+
+ // Important: objects involved in cycles are not finalized, so zero
+ // out pkg to break its cycles and allow the finalizer to run.
+ *pkg = types2.Package{}
+
+ // It typically takes just 1 or 2 cycles to release pkg, but it
+ // doesn't hurt to try a few more times.
+ for i := 0; i < 10; i++ {
+ select {
+ case <-done:
+ return
+ default:
+ runtime.GC()
+ }
+ }
+
+ base.Fatalf("package never finalized")
+}
+
+func readPackage(pr *pkgReader, importpkg *types.Pkg) {
+ r := pr.newReader(relocMeta, publicRootIdx, syncPublic)
+
+ pkg := r.pkg()
+ assert(pkg == importpkg)
+
+ if r.bool() {
+ sym := pkg.Lookup(".inittask")
+ task := ir.NewNameAt(src.NoXPos, sym)
+ task.Class = ir.PEXTERN
+ sym.Def = task
+ }
+
+ for i, n := 0, r.len(); i < n; i++ {
+ r.sync(syncObject)
+ assert(!r.bool())
+ idx := r.reloc(relocObj)
+ assert(r.len() == 0)
+
+ path, name, code := r.p.peekObj(idx)
+ if code != objStub {
+ objReader[types.NewPkg(path, "").Lookup(name)] = pkgReaderIndex{pr, idx, nil}
+ }
+ }
+}
+
+func writeNewExport(out io.Writer) {
+ l := linker{
+ pw: newPkgEncoder(),
+
+ pkgs: make(map[string]int),
+ decls: make(map[*types.Sym]int),
+ }
+
+ publicRootWriter := l.pw.newEncoder(relocMeta, syncPublic)
+ assert(publicRootWriter.idx == publicRootIdx)
+
+ var selfPkgIdx int
+
+ {
+ pr := localPkgReader
+ r := pr.newDecoder(relocMeta, publicRootIdx, syncPublic)
+
+ r.sync(syncPkg)
+ selfPkgIdx = l.relocIdx(pr, relocPkg, r.reloc(relocPkg))
+
+ r.bool() // has init
+
+ for i, n := 0, r.len(); i < n; i++ {
+ r.sync(syncObject)
+ assert(!r.bool())
+ idx := r.reloc(relocObj)
+ assert(r.len() == 0)
+
+ xpath, xname, xtag := pr.peekObj(idx)
+ assert(xpath == pr.pkgPath)
+ assert(xtag != objStub)
+
+ if types.IsExported(xname) {
+ l.relocIdx(pr, relocObj, idx)
+ }
+ }
+
+ r.sync(syncEOF)
+ }
+
+ {
+ var idxs []int
+ for _, idx := range l.decls {
+ idxs = append(idxs, idx)
+ }
+ sort.Ints(idxs)
+
+ w := publicRootWriter
+
+ w.sync(syncPkg)
+ w.reloc(relocPkg, selfPkgIdx)
+
+ w.bool(typecheck.Lookup(".inittask").Def != nil)
+
+ w.len(len(idxs))
+ for _, idx := range idxs {
+ w.sync(syncObject)
+ w.bool(false)
+ w.reloc(relocObj, idx)
+ w.len(0)
+ }
+
+ w.sync(syncEOF)
+ w.flush()
+ }
+
+ l.pw.dump(out)
+}
diff --git a/src/cmd/compile/internal/noder/unified_test.go b/src/cmd/compile/internal/noder/unified_test.go
new file mode 100644
index 0000000..d7334df
--- /dev/null
+++ b/src/cmd/compile/internal/noder/unified_test.go
@@ -0,0 +1,160 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package noder_test
+
+import (
+ "encoding/json"
+ "flag"
+ exec "internal/execabs"
+ "os"
+ "reflect"
+ "runtime"
+ "strings"
+ "testing"
+)
+
+var (
+ flagCmp = flag.Bool("cmp", false, "enable TestUnifiedCompare")
+ flagPkgs = flag.String("pkgs", "std", "list of packages to compare (ignored in -short mode)")
+ flagAll = flag.Bool("all", false, "enable testing of all GOOS/GOARCH targets")
+ flagParallel = flag.Bool("parallel", false, "test GOOS/GOARCH targets in parallel")
+)
+
+// TestUnifiedCompare implements a test similar to running:
+//
+// $ go build -toolexec="toolstash -cmp" std
+//
+// The -pkgs flag controls the list of packages tested.
+//
+// By default, only the native GOOS/GOARCH target is enabled. The -all
+// flag enables testing of non-native targets. The -parallel flag
+// additionally enables testing of targets in parallel.
+//
+// Caution: Testing all targets is very resource intensive! On an IBM
+// P920 (dual Intel Xeon Gold 6154 CPUs; 36 cores, 192GB RAM), testing
+// all targets in parallel takes about 5 minutes. Using the 'go test'
+// command's -run flag for subtest matching is recommended for less
+// powerful machines.
+func TestUnifiedCompare(t *testing.T) {
+ // TODO(mdempsky): Either re-enable or delete. Disabled for now to
+ // avoid impeding others' forward progress.
+ if !*flagCmp {
+ t.Skip("skipping TestUnifiedCompare (use -cmp to enable)")
+ }
+
+ targets, err := exec.Command("go", "tool", "dist", "list").Output()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ for _, target := range strings.Fields(string(targets)) {
+ t.Run(target, func(t *testing.T) {
+ parts := strings.Split(target, "/")
+ goos, goarch := parts[0], parts[1]
+
+ if !(*flagAll || goos == runtime.GOOS && goarch == runtime.GOARCH) {
+ t.Skip("skipping non-native target (use -all to enable)")
+ }
+ if *flagParallel {
+ t.Parallel()
+ }
+
+ pkgs1 := loadPackages(t, goos, goarch, "-d=unified=0 -d=inlfuncswithclosures=0 -d=unifiedquirks=1 -G=0")
+ pkgs2 := loadPackages(t, goos, goarch, "-d=unified=1 -d=inlfuncswithclosures=0 -d=unifiedquirks=1 -G=0")
+
+ if len(pkgs1) != len(pkgs2) {
+ t.Fatalf("length mismatch: %v != %v", len(pkgs1), len(pkgs2))
+ }
+
+ for i := range pkgs1 {
+ pkg1 := pkgs1[i]
+ pkg2 := pkgs2[i]
+
+ path := pkg1.ImportPath
+ if path != pkg2.ImportPath {
+ t.Fatalf("mismatched paths: %q != %q", path, pkg2.ImportPath)
+ }
+
+ // Packages that don't have any source files (e.g., packages
+ // unsafe, embed/internal/embedtest, and cmd/internal/moddeps).
+ if pkg1.Export == "" && pkg2.Export == "" {
+ continue
+ }
+
+ if pkg1.BuildID == pkg2.BuildID {
+ t.Errorf("package %q: build IDs unexpectedly matched", path)
+ }
+
+ // Unlike toolstash -cmp, we're comparing the same compiler
+ // binary against itself, just with different flags. So we
+ // don't need to worry about skipping over mismatched version
+ // strings, but we do need to account for differing build IDs.
+ //
+ // Fortunately, build IDs are cryptographic 256-bit hashes,
+ // and cmd/go provides us with them up front. So we can just
+ // use them as delimeters to split the files, and then check
+ // that the substrings are all equal.
+ file1 := strings.Split(readFile(t, pkg1.Export), pkg1.BuildID)
+ file2 := strings.Split(readFile(t, pkg2.Export), pkg2.BuildID)
+ if !reflect.DeepEqual(file1, file2) {
+ t.Errorf("package %q: compile output differs", path)
+ }
+ }
+ })
+ }
+}
+
+type pkg struct {
+ ImportPath string
+ Export string
+ BuildID string
+ Incomplete bool
+}
+
+func loadPackages(t *testing.T, goos, goarch, gcflags string) []pkg {
+ args := []string{"list", "-e", "-export", "-json", "-gcflags=all=" + gcflags, "--"}
+ if testing.Short() {
+ t.Log("short testing mode; only testing package runtime")
+ args = append(args, "runtime")
+ } else {
+ args = append(args, strings.Fields(*flagPkgs)...)
+ }
+
+ cmd := exec.Command("go", args...)
+ cmd.Env = append(os.Environ(), "GOOS="+goos, "GOARCH="+goarch)
+ cmd.Stderr = os.Stderr
+ t.Logf("running %v", cmd)
+ stdout, err := cmd.StdoutPipe()
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := cmd.Start(); err != nil {
+ t.Fatal(err)
+ }
+
+ var res []pkg
+ for dec := json.NewDecoder(stdout); dec.More(); {
+ var pkg pkg
+ if err := dec.Decode(&pkg); err != nil {
+ t.Fatal(err)
+ }
+ if pkg.Incomplete {
+ t.Fatalf("incomplete package: %q", pkg.ImportPath)
+ }
+ res = append(res, pkg)
+ }
+ if err := cmd.Wait(); err != nil {
+ t.Fatal(err)
+ }
+ return res
+}
+
+func readFile(t *testing.T, name string) string {
+ buf, err := os.ReadFile(name)
+ if err != nil {
+ t.Fatal(err)
+ }
+ return string(buf)
+}
diff --git a/src/cmd/compile/internal/noder/validate.go b/src/cmd/compile/internal/noder/validate.go
new file mode 100644
index 0000000..dcacae7
--- /dev/null
+++ b/src/cmd/compile/internal/noder/validate.go
@@ -0,0 +1,132 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package noder
+
+import (
+ "go/constant"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/syntax"
+ "cmd/compile/internal/types"
+ "cmd/compile/internal/types2"
+)
+
+// match reports whether types t1 and t2 are consistent
+// representations for a given expression's type.
+func (g *irgen) match(t1 *types.Type, t2 types2.Type, hasOK bool) bool {
+ tuple, ok := t2.(*types2.Tuple)
+ if !ok {
+ // Not a tuple; can use simple type identity comparison.
+ return types.Identical(t1, g.typ(t2))
+ }
+
+ if hasOK {
+ // For has-ok values, types2 represents the expression's type as a
+ // 2-element tuple, whereas ir just uses the first type and infers
+ // that the second type is boolean. Must match either, since we
+ // sometimes delay the transformation to the ir form.
+ if tuple.Len() == 2 && types.Identical(t1, g.typ(tuple.At(0).Type())) {
+ return true
+ }
+ return types.Identical(t1, g.typ(t2))
+ }
+
+ if t1 == nil || tuple == nil {
+ return t1 == nil && tuple == nil
+ }
+ if !t1.IsFuncArgStruct() {
+ return false
+ }
+ if t1.NumFields() != tuple.Len() {
+ return false
+ }
+ for i, result := range t1.FieldSlice() {
+ if !types.Identical(result.Type, g.typ(tuple.At(i).Type())) {
+ return false
+ }
+ }
+ return true
+}
+
+func (g *irgen) validate(n syntax.Node) {
+ switch n := n.(type) {
+ case *syntax.CallExpr:
+ tv := g.info.Types[n.Fun]
+ if tv.IsBuiltin() {
+ fun := n.Fun
+ for {
+ builtin, ok := fun.(*syntax.ParenExpr)
+ if !ok {
+ break
+ }
+ fun = builtin.X
+ }
+ switch builtin := fun.(type) {
+ case *syntax.Name:
+ g.validateBuiltin(builtin.Value, n)
+ case *syntax.SelectorExpr:
+ g.validateBuiltin(builtin.Sel.Value, n)
+ default:
+ g.unhandled("builtin", n)
+ }
+ }
+ }
+}
+
+func (g *irgen) validateBuiltin(name string, call *syntax.CallExpr) {
+ switch name {
+ case "Alignof", "Offsetof", "Sizeof":
+ // Check that types2+gcSizes calculates sizes the same
+ // as cmd/compile does.
+
+ tv := g.info.Types[call]
+ if !tv.IsValue() {
+ base.FatalfAt(g.pos(call), "expected a value")
+ }
+
+ if tv.Value == nil {
+ break // unsafe op is not a constant, so no further validation
+ }
+
+ got, ok := constant.Int64Val(tv.Value)
+ if !ok {
+ base.FatalfAt(g.pos(call), "expected int64 constant value")
+ }
+
+ want := g.unsafeExpr(name, call.ArgList[0])
+ if got != want {
+ base.FatalfAt(g.pos(call), "got %v from types2, but want %v", got, want)
+ }
+ }
+}
+
+// unsafeExpr evaluates the given unsafe builtin function on arg.
+func (g *irgen) unsafeExpr(name string, arg syntax.Expr) int64 {
+ switch name {
+ case "Alignof":
+ return g.typ(g.info.Types[arg].Type).Alignment()
+ case "Sizeof":
+ return g.typ(g.info.Types[arg].Type).Size()
+ }
+
+ // Offsetof
+
+ sel := arg.(*syntax.SelectorExpr)
+ selection := g.info.Selections[sel]
+
+ typ := g.typ(g.info.Types[sel.X].Type)
+ typ = deref(typ)
+
+ var offset int64
+ for _, i := range selection.Index() {
+ // Ensure field offsets have been calculated.
+ types.CalcSize(typ)
+
+ f := typ.Field(i)
+ offset += f.Offset
+ typ = f.Type
+ }
+ return offset
+}
diff --git a/src/cmd/compile/internal/noder/writer.go b/src/cmd/compile/internal/noder/writer.go
new file mode 100644
index 0000000..46e8339
--- /dev/null
+++ b/src/cmd/compile/internal/noder/writer.go
@@ -0,0 +1,1862 @@
+// UNREVIEWED
+
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package noder
+
+import (
+ "fmt"
+ "go/constant"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/syntax"
+ "cmd/compile/internal/types2"
+)
+
+type pkgWriter struct {
+ pkgEncoder
+
+ m posMap
+ curpkg *types2.Package
+ info *types2.Info
+
+ posBasesIdx map[*syntax.PosBase]int
+ pkgsIdx map[*types2.Package]int
+ typsIdx map[types2.Type]int
+ globalsIdx map[types2.Object]int
+
+ funDecls map[*types2.Func]*syntax.FuncDecl
+ typDecls map[*types2.TypeName]typeDeclGen
+
+ linknames map[types2.Object]string
+ cgoPragmas [][]string
+
+ dups dupTypes
+}
+
+func newPkgWriter(m posMap, pkg *types2.Package, info *types2.Info) *pkgWriter {
+ return &pkgWriter{
+ pkgEncoder: newPkgEncoder(),
+
+ m: m,
+ curpkg: pkg,
+ info: info,
+
+ pkgsIdx: make(map[*types2.Package]int),
+ globalsIdx: make(map[types2.Object]int),
+ typsIdx: make(map[types2.Type]int),
+
+ posBasesIdx: make(map[*syntax.PosBase]int),
+
+ funDecls: make(map[*types2.Func]*syntax.FuncDecl),
+ typDecls: make(map[*types2.TypeName]typeDeclGen),
+
+ linknames: make(map[types2.Object]string),
+ }
+}
+
+func (pw *pkgWriter) errorf(p poser, msg string, args ...interface{}) {
+ base.ErrorfAt(pw.m.pos(p), msg, args...)
+}
+
+func (pw *pkgWriter) fatalf(p poser, msg string, args ...interface{}) {
+ base.FatalfAt(pw.m.pos(p), msg, args...)
+}
+
+func (pw *pkgWriter) unexpected(what string, p poser) {
+ pw.fatalf(p, "unexpected %s: %v (%T)", what, p, p)
+}
+
+type writer struct {
+ p *pkgWriter
+
+ encoder
+
+ // TODO(mdempsky): We should be able to prune localsIdx whenever a
+ // scope closes, and then maybe we can just use the same map for
+ // storing the TypeParams too (as their TypeName instead).
+
+ // variables declared within this function
+ localsIdx map[*types2.Var]int
+
+ closureVars []posObj
+ closureVarsIdx map[*types2.Var]int
+
+ dict *writerDict
+ derived bool
+}
+
+// A writerDict tracks types and objects that are used by a declaration.
+type writerDict struct {
+ implicits []*types2.TypeName
+
+ // derived is a slice of type indices for computing derived types
+ // (i.e., types that depend on the declaration's type parameters).
+ derived []derivedInfo
+
+ // derivedIdx maps a Type to its corresponding index within the
+ // derived slice, if present.
+ derivedIdx map[types2.Type]int
+
+ // funcs lists references to generic functions that were
+ // instantiated with derived types (i.e., that require
+ // sub-dictionaries when called at run time).
+ funcs []objInfo
+}
+
+type derivedInfo struct {
+ idx int
+ needed bool
+}
+
+type typeInfo struct {
+ idx int
+ derived bool
+}
+
+type objInfo struct {
+ idx int // index for the generic function declaration
+ explicits []typeInfo // info for the type arguments
+}
+
+func (info objInfo) anyDerived() bool {
+ for _, explicit := range info.explicits {
+ if explicit.derived {
+ return true
+ }
+ }
+ return false
+}
+
+func (info objInfo) equals(other objInfo) bool {
+ if info.idx != other.idx {
+ return false
+ }
+ assert(len(info.explicits) == len(other.explicits))
+ for i, targ := range info.explicits {
+ if targ != other.explicits[i] {
+ return false
+ }
+ }
+ return true
+}
+
+func (pw *pkgWriter) newWriter(k reloc, marker syncMarker) *writer {
+ return &writer{
+ encoder: pw.newEncoder(k, marker),
+ p: pw,
+ }
+}
+
+// @@@ Positions
+
+func (w *writer) pos(p poser) {
+ w.sync(syncPos)
+ pos := p.Pos()
+
+ // TODO(mdempsky): Track down the remaining cases here and fix them.
+ if !w.bool(pos.IsKnown()) {
+ return
+ }
+
+ // TODO(mdempsky): Delta encoding. Also, if there's a b-side, update
+ // its position base too (but not vice versa!).
+ w.posBase(pos.Base())
+ w.uint(pos.Line())
+ w.uint(pos.Col())
+}
+
+func (w *writer) posBase(b *syntax.PosBase) {
+ w.reloc(relocPosBase, w.p.posBaseIdx(b))
+}
+
+func (pw *pkgWriter) posBaseIdx(b *syntax.PosBase) int {
+ if idx, ok := pw.posBasesIdx[b]; ok {
+ return idx
+ }
+
+ w := pw.newWriter(relocPosBase, syncPosBase)
+ w.p.posBasesIdx[b] = w.idx
+
+ w.string(trimFilename(b))
+
+ if !w.bool(b.IsFileBase()) {
+ w.pos(b)
+ w.uint(b.Line())
+ w.uint(b.Col())
+ }
+
+ return w.flush()
+}
+
+// @@@ Packages
+
+func (w *writer) pkg(pkg *types2.Package) {
+ w.sync(syncPkg)
+ w.reloc(relocPkg, w.p.pkgIdx(pkg))
+}
+
+func (pw *pkgWriter) pkgIdx(pkg *types2.Package) int {
+ if idx, ok := pw.pkgsIdx[pkg]; ok {
+ return idx
+ }
+
+ w := pw.newWriter(relocPkg, syncPkgDef)
+ pw.pkgsIdx[pkg] = w.idx
+
+ if pkg == nil {
+ w.string("builtin")
+ } else {
+ var path string
+ if pkg != w.p.curpkg {
+ path = pkg.Path()
+ }
+ w.string(path)
+ w.string(pkg.Name())
+ w.len(pkg.Height())
+
+ w.len(len(pkg.Imports()))
+ for _, imp := range pkg.Imports() {
+ w.pkg(imp)
+ }
+ }
+
+ return w.flush()
+}
+
+// @@@ Types
+
+var anyTypeName = types2.Universe.Lookup("any").(*types2.TypeName)
+
+func (w *writer) typ(typ types2.Type) {
+ w.typInfo(w.p.typIdx(typ, w.dict))
+}
+
+func (w *writer) typInfo(info typeInfo) {
+ w.sync(syncType)
+ if w.bool(info.derived) {
+ w.len(info.idx)
+ w.derived = true
+ } else {
+ w.reloc(relocType, info.idx)
+ }
+}
+
+// typIdx returns the index where the export data description of type
+// can be read back in. If no such index exists yet, it's created.
+//
+// typIdx also reports whether typ is a derived type; that is, whether
+// its identity depends on type parameters.
+func (pw *pkgWriter) typIdx(typ types2.Type, dict *writerDict) typeInfo {
+ if quirksMode() {
+ typ = pw.dups.orig(typ)
+ }
+
+ if idx, ok := pw.typsIdx[typ]; ok {
+ return typeInfo{idx: idx, derived: false}
+ }
+ if dict != nil {
+ if idx, ok := dict.derivedIdx[typ]; ok {
+ return typeInfo{idx: idx, derived: true}
+ }
+ }
+
+ w := pw.newWriter(relocType, syncTypeIdx)
+ w.dict = dict
+
+ switch typ := typ.(type) {
+ default:
+ base.Fatalf("unexpected type: %v (%T)", typ, typ)
+
+ case *types2.Basic:
+ switch kind := typ.Kind(); {
+ case kind == types2.Invalid:
+ base.Fatalf("unexpected types2.Invalid")
+
+ case types2.Typ[kind] == typ:
+ w.code(typeBasic)
+ w.len(int(kind))
+
+ default:
+ // Handle "byte" and "rune" as references to their TypeName.
+ obj := types2.Universe.Lookup(typ.Name())
+ assert(obj.Type() == typ)
+
+ w.code(typeNamed)
+ w.obj(obj, nil)
+ }
+
+ case *types2.Named:
+ // Type aliases can refer to uninstantiated generic types, so we
+ // might see len(TParams) != 0 && len(TArgs) == 0 here.
+ // TODO(mdempsky): Revisit after #46477 is resolved.
+ assert(typ.TypeParams().Len() == typ.TypeArgs().Len() || typ.TypeArgs().Len() == 0)
+
+ // TODO(mdempsky): Why do we need to loop here?
+ orig := typ
+ for orig.TypeArgs() != nil {
+ orig = orig.Origin()
+ }
+
+ w.code(typeNamed)
+ w.obj(orig.Obj(), typ.TypeArgs())
+
+ case *types2.TypeParam:
+ index := func() int {
+ for idx, name := range w.dict.implicits {
+ if name.Type().(*types2.TypeParam) == typ {
+ return idx
+ }
+ }
+
+ return len(w.dict.implicits) + typ.Index()
+ }()
+
+ w.derived = true
+ w.code(typeTypeParam)
+ w.len(index)
+
+ case *types2.Array:
+ w.code(typeArray)
+ w.uint64(uint64(typ.Len()))
+ w.typ(typ.Elem())
+
+ case *types2.Chan:
+ w.code(typeChan)
+ w.len(int(typ.Dir()))
+ w.typ(typ.Elem())
+
+ case *types2.Map:
+ w.code(typeMap)
+ w.typ(typ.Key())
+ w.typ(typ.Elem())
+
+ case *types2.Pointer:
+ w.code(typePointer)
+ w.typ(typ.Elem())
+
+ case *types2.Signature:
+ base.Assertf(typ.TypeParams() == nil, "unexpected type params: %v", typ)
+ w.code(typeSignature)
+ w.signature(typ)
+
+ case *types2.Slice:
+ w.code(typeSlice)
+ w.typ(typ.Elem())
+
+ case *types2.Struct:
+ w.code(typeStruct)
+ w.structType(typ)
+
+ case *types2.Interface:
+ if typ == anyTypeName.Type() {
+ w.code(typeNamed)
+ w.obj(anyTypeName, nil)
+ break
+ }
+
+ w.code(typeInterface)
+ w.interfaceType(typ)
+
+ case *types2.Union:
+ w.code(typeUnion)
+ w.unionType(typ)
+ }
+
+ if w.derived {
+ idx := len(dict.derived)
+ dict.derived = append(dict.derived, derivedInfo{idx: w.flush()})
+ dict.derivedIdx[typ] = idx
+ return typeInfo{idx: idx, derived: true}
+ }
+
+ pw.typsIdx[typ] = w.idx
+ return typeInfo{idx: w.flush(), derived: false}
+}
+
+func (w *writer) structType(typ *types2.Struct) {
+ w.len(typ.NumFields())
+ for i := 0; i < typ.NumFields(); i++ {
+ f := typ.Field(i)
+ w.pos(f)
+ w.selector(f)
+ w.typ(f.Type())
+ w.string(typ.Tag(i))
+ w.bool(f.Embedded())
+ }
+}
+
+func (w *writer) unionType(typ *types2.Union) {
+ w.len(typ.Len())
+ for i := 0; i < typ.Len(); i++ {
+ t := typ.Term(i)
+ w.bool(t.Tilde())
+ w.typ(t.Type())
+ }
+}
+
+func (w *writer) interfaceType(typ *types2.Interface) {
+ w.len(typ.NumExplicitMethods())
+ w.len(typ.NumEmbeddeds())
+
+ for i := 0; i < typ.NumExplicitMethods(); i++ {
+ m := typ.ExplicitMethod(i)
+ sig := m.Type().(*types2.Signature)
+ assert(sig.TypeParams() == nil)
+
+ w.pos(m)
+ w.selector(m)
+ w.signature(sig)
+ }
+
+ for i := 0; i < typ.NumEmbeddeds(); i++ {
+ w.typ(typ.EmbeddedType(i))
+ }
+}
+
+func (w *writer) signature(sig *types2.Signature) {
+ w.sync(syncSignature)
+ w.params(sig.Params())
+ w.params(sig.Results())
+ w.bool(sig.Variadic())
+}
+
+func (w *writer) params(typ *types2.Tuple) {
+ w.sync(syncParams)
+ w.len(typ.Len())
+ for i := 0; i < typ.Len(); i++ {
+ w.param(typ.At(i))
+ }
+}
+
+func (w *writer) param(param *types2.Var) {
+ w.sync(syncParam)
+ w.pos(param)
+ w.localIdent(param)
+ w.typ(param.Type())
+}
+
+// @@@ Objects
+
+func (w *writer) obj(obj types2.Object, explicits *types2.TypeList) {
+ explicitInfos := make([]typeInfo, explicits.Len())
+ for i := range explicitInfos {
+ explicitInfos[i] = w.p.typIdx(explicits.At(i), w.dict)
+ }
+ info := objInfo{idx: w.p.objIdx(obj), explicits: explicitInfos}
+
+ if _, ok := obj.(*types2.Func); ok && info.anyDerived() {
+ idx := -1
+ for i, prev := range w.dict.funcs {
+ if prev.equals(info) {
+ idx = i
+ }
+ }
+ if idx < 0 {
+ idx = len(w.dict.funcs)
+ w.dict.funcs = append(w.dict.funcs, info)
+ }
+
+ // TODO(mdempsky): Push up into expr; this shouldn't appear
+ // outside of expression context.
+ w.sync(syncObject)
+ w.bool(true)
+ w.len(idx)
+ return
+ }
+
+ // TODO(mdempsky): Push up into typIdx; this shouldn't be needed
+ // except while writing out types.
+ if isDefinedType(obj) && obj.Pkg() == w.p.curpkg {
+ decl, ok := w.p.typDecls[obj.(*types2.TypeName)]
+ assert(ok)
+ if len(decl.implicits) != 0 {
+ w.derived = true
+ }
+ }
+
+ w.sync(syncObject)
+ w.bool(false)
+ w.reloc(relocObj, info.idx)
+
+ w.len(len(info.explicits))
+ for _, info := range info.explicits {
+ w.typInfo(info)
+ }
+}
+
+func (pw *pkgWriter) objIdx(obj types2.Object) int {
+ if idx, ok := pw.globalsIdx[obj]; ok {
+ return idx
+ }
+
+ dict := &writerDict{
+ derivedIdx: make(map[types2.Type]int),
+ }
+
+ if isDefinedType(obj) && obj.Pkg() == pw.curpkg {
+ decl, ok := pw.typDecls[obj.(*types2.TypeName)]
+ assert(ok)
+ dict.implicits = decl.implicits
+ }
+
+ w := pw.newWriter(relocObj, syncObject1)
+ wext := pw.newWriter(relocObjExt, syncObject1)
+ wname := pw.newWriter(relocName, syncObject1)
+ wdict := pw.newWriter(relocObjDict, syncObject1)
+
+ pw.globalsIdx[obj] = w.idx // break cycles
+ assert(wext.idx == w.idx)
+ assert(wname.idx == w.idx)
+ assert(wdict.idx == w.idx)
+
+ w.dict = dict
+ wext.dict = dict
+
+ code := w.doObj(wext, obj)
+ w.flush()
+ wext.flush()
+
+ wname.qualifiedIdent(obj)
+ wname.code(code)
+ wname.flush()
+
+ wdict.objDict(obj, w.dict)
+ wdict.flush()
+
+ return w.idx
+}
+
+func (w *writer) doObj(wext *writer, obj types2.Object) codeObj {
+ if obj.Pkg() != w.p.curpkg {
+ return objStub
+ }
+
+ switch obj := obj.(type) {
+ default:
+ w.p.unexpected("object", obj)
+ panic("unreachable")
+
+ case *types2.Const:
+ w.pos(obj)
+ w.typ(obj.Type())
+ w.value(obj.Val())
+ return objConst
+
+ case *types2.Func:
+ decl, ok := w.p.funDecls[obj]
+ assert(ok)
+ sig := obj.Type().(*types2.Signature)
+
+ w.pos(obj)
+ w.typeParamNames(sig.TypeParams())
+ w.signature(sig)
+ w.pos(decl)
+ wext.funcExt(obj)
+ return objFunc
+
+ case *types2.TypeName:
+ decl, ok := w.p.typDecls[obj]
+ assert(ok)
+
+ if obj.IsAlias() {
+ w.pos(obj)
+ w.typ(obj.Type())
+ return objAlias
+ }
+
+ named := obj.Type().(*types2.Named)
+ assert(named.TypeArgs() == nil)
+
+ w.pos(obj)
+ w.typeParamNames(named.TypeParams())
+ wext.typeExt(obj)
+ w.typExpr(decl.Type)
+
+ w.len(named.NumMethods())
+ for i := 0; i < named.NumMethods(); i++ {
+ w.method(wext, named.Method(i))
+ }
+
+ return objType
+
+ case *types2.Var:
+ w.pos(obj)
+ w.typ(obj.Type())
+ wext.varExt(obj)
+ return objVar
+ }
+}
+
+// typExpr writes the type represented by the given expression.
+func (w *writer) typExpr(expr syntax.Expr) {
+ tv, ok := w.p.info.Types[expr]
+ assert(ok)
+ assert(tv.IsType())
+ w.typ(tv.Type)
+}
+
+// objDict writes the dictionary needed for reading the given object.
+func (w *writer) objDict(obj types2.Object, dict *writerDict) {
+ // TODO(mdempsky): Split objDict into multiple entries? reader.go
+ // doesn't care about the type parameter bounds, and reader2.go
+ // doesn't care about referenced functions.
+
+ w.dict = dict // TODO(mdempsky): This is a bit sketchy.
+
+ w.len(len(dict.implicits))
+
+ tparams := objTypeParams(obj)
+ ntparams := tparams.Len()
+ w.len(ntparams)
+ for i := 0; i < ntparams; i++ {
+ w.typ(tparams.At(i).Constraint())
+ }
+
+ nderived := len(dict.derived)
+ w.len(nderived)
+ for _, typ := range dict.derived {
+ w.reloc(relocType, typ.idx)
+ w.bool(typ.needed)
+ }
+
+ nfuncs := len(dict.funcs)
+ w.len(nfuncs)
+ for _, fn := range dict.funcs {
+ w.reloc(relocObj, fn.idx)
+ w.len(len(fn.explicits))
+ for _, targ := range fn.explicits {
+ w.typInfo(targ)
+ }
+ }
+
+ assert(len(dict.derived) == nderived)
+ assert(len(dict.funcs) == nfuncs)
+}
+
+func (w *writer) typeParamNames(tparams *types2.TypeParamList) {
+ w.sync(syncTypeParamNames)
+
+ ntparams := tparams.Len()
+ for i := 0; i < ntparams; i++ {
+ tparam := tparams.At(i).Obj()
+ w.pos(tparam)
+ w.localIdent(tparam)
+ }
+}
+
+func (w *writer) method(wext *writer, meth *types2.Func) {
+ decl, ok := w.p.funDecls[meth]
+ assert(ok)
+ sig := meth.Type().(*types2.Signature)
+
+ w.sync(syncMethod)
+ w.pos(meth)
+ w.selector(meth)
+ w.typeParamNames(sig.RecvTypeParams())
+ w.param(sig.Recv())
+ w.signature(sig)
+
+ w.pos(decl) // XXX: Hack to workaround linker limitations.
+ wext.funcExt(meth)
+}
+
+// qualifiedIdent writes out the name of an object declared at package
+// scope. (For now, it's also used to refer to local defined types.)
+func (w *writer) qualifiedIdent(obj types2.Object) {
+ w.sync(syncSym)
+
+ name := obj.Name()
+ if isDefinedType(obj) && obj.Pkg() == w.p.curpkg {
+ decl, ok := w.p.typDecls[obj.(*types2.TypeName)]
+ assert(ok)
+ if decl.gen != 0 {
+ // TODO(mdempsky): Find a better solution than embedding middle
+ // dot in the symbol name; this is terrible.
+ name = fmt.Sprintf("%s·%v", name, decl.gen)
+ }
+ }
+
+ w.pkg(obj.Pkg())
+ w.string(name)
+}
+
+// TODO(mdempsky): We should be able to omit pkg from both localIdent
+// and selector, because they should always be known from context.
+// However, past frustrations with this optimization in iexport make
+// me a little nervous to try it again.
+
+// localIdent writes the name of a locally declared object (i.e.,
+// objects that can only be accessed by name, within the context of a
+// particular function).
+func (w *writer) localIdent(obj types2.Object) {
+ assert(!isGlobal(obj))
+ w.sync(syncLocalIdent)
+ w.pkg(obj.Pkg())
+ w.string(obj.Name())
+}
+
+// selector writes the name of a field or method (i.e., objects that
+// can only be accessed using selector expressions).
+func (w *writer) selector(obj types2.Object) {
+ w.sync(syncSelector)
+ w.pkg(obj.Pkg())
+ w.string(obj.Name())
+}
+
+// @@@ Compiler extensions
+
+func (w *writer) funcExt(obj *types2.Func) {
+ decl, ok := w.p.funDecls[obj]
+ assert(ok)
+
+ // TODO(mdempsky): Extend these pragma validation flags to account
+ // for generics. E.g., linkname probably doesn't make sense at
+ // least.
+
+ pragma := asPragmaFlag(decl.Pragma)
+ if pragma&ir.Systemstack != 0 && pragma&ir.Nosplit != 0 {
+ w.p.errorf(decl, "go:nosplit and go:systemstack cannot be combined")
+ }
+
+ if decl.Body != nil {
+ if pragma&ir.Noescape != 0 {
+ w.p.errorf(decl, "can only use //go:noescape with external func implementations")
+ }
+ } else {
+ if base.Flag.Complete || decl.Name.Value == "init" {
+ // Linknamed functions are allowed to have no body. Hopefully
+ // the linkname target has a body. See issue 23311.
+ if _, ok := w.p.linknames[obj]; !ok {
+ w.p.errorf(decl, "missing function body")
+ }
+ }
+ }
+
+ sig, block := obj.Type().(*types2.Signature), decl.Body
+ body, closureVars := w.p.bodyIdx(w.p.curpkg, sig, block, w.dict)
+ assert(len(closureVars) == 0)
+
+ w.sync(syncFuncExt)
+ w.pragmaFlag(pragma)
+ w.linkname(obj)
+ w.bool(false) // stub extension
+ w.reloc(relocBody, body)
+ w.sync(syncEOF)
+}
+
+func (w *writer) typeExt(obj *types2.TypeName) {
+ decl, ok := w.p.typDecls[obj]
+ assert(ok)
+
+ w.sync(syncTypeExt)
+
+ w.pragmaFlag(asPragmaFlag(decl.Pragma))
+
+ // No LSym.SymIdx info yet.
+ w.int64(-1)
+ w.int64(-1)
+}
+
+func (w *writer) varExt(obj *types2.Var) {
+ w.sync(syncVarExt)
+ w.linkname(obj)
+}
+
+func (w *writer) linkname(obj types2.Object) {
+ w.sync(syncLinkname)
+ w.int64(-1)
+ w.string(w.p.linknames[obj])
+}
+
+func (w *writer) pragmaFlag(p ir.PragmaFlag) {
+ w.sync(syncPragma)
+ w.int(int(p))
+}
+
+// @@@ Function bodies
+
+func (pw *pkgWriter) bodyIdx(pkg *types2.Package, sig *types2.Signature, block *syntax.BlockStmt, dict *writerDict) (idx int, closureVars []posObj) {
+ w := pw.newWriter(relocBody, syncFuncBody)
+ w.dict = dict
+
+ w.funcargs(sig)
+ if w.bool(block != nil) {
+ w.stmts(block.List)
+ w.pos(block.Rbrace)
+ }
+
+ return w.flush(), w.closureVars
+}
+
+func (w *writer) funcargs(sig *types2.Signature) {
+ do := func(params *types2.Tuple, result bool) {
+ for i := 0; i < params.Len(); i++ {
+ w.funcarg(params.At(i), result)
+ }
+ }
+
+ if recv := sig.Recv(); recv != nil {
+ w.funcarg(recv, false)
+ }
+ do(sig.Params(), false)
+ do(sig.Results(), true)
+}
+
+func (w *writer) funcarg(param *types2.Var, result bool) {
+ if param.Name() != "" || result {
+ w.addLocal(param)
+ }
+}
+
+func (w *writer) addLocal(obj *types2.Var) {
+ w.sync(syncAddLocal)
+ idx := len(w.localsIdx)
+ if enableSync {
+ w.int(idx)
+ }
+ if w.localsIdx == nil {
+ w.localsIdx = make(map[*types2.Var]int)
+ }
+ w.localsIdx[obj] = idx
+}
+
+func (w *writer) useLocal(pos syntax.Pos, obj *types2.Var) {
+ w.sync(syncUseObjLocal)
+
+ if idx, ok := w.localsIdx[obj]; w.bool(ok) {
+ w.len(idx)
+ return
+ }
+
+ idx, ok := w.closureVarsIdx[obj]
+ if !ok {
+ if w.closureVarsIdx == nil {
+ w.closureVarsIdx = make(map[*types2.Var]int)
+ }
+ idx = len(w.closureVars)
+ w.closureVars = append(w.closureVars, posObj{pos, obj})
+ w.closureVarsIdx[obj] = idx
+ }
+ w.len(idx)
+}
+
+func (w *writer) openScope(pos syntax.Pos) {
+ w.sync(syncOpenScope)
+ w.pos(pos)
+}
+
+func (w *writer) closeScope(pos syntax.Pos) {
+ w.sync(syncCloseScope)
+ w.pos(pos)
+ w.closeAnotherScope()
+}
+
+func (w *writer) closeAnotherScope() {
+ w.sync(syncCloseAnotherScope)
+}
+
+// @@@ Statements
+
+func (w *writer) stmt(stmt syntax.Stmt) {
+ var stmts []syntax.Stmt
+ if stmt != nil {
+ stmts = []syntax.Stmt{stmt}
+ }
+ w.stmts(stmts)
+}
+
+func (w *writer) stmts(stmts []syntax.Stmt) {
+ w.sync(syncStmts)
+ for _, stmt := range stmts {
+ w.stmt1(stmt)
+ }
+ w.code(stmtEnd)
+ w.sync(syncStmtsEnd)
+}
+
+func (w *writer) stmt1(stmt syntax.Stmt) {
+ switch stmt := stmt.(type) {
+ default:
+ w.p.unexpected("statement", stmt)
+
+ case nil, *syntax.EmptyStmt:
+ return
+
+ case *syntax.AssignStmt:
+ switch {
+ case stmt.Rhs == nil:
+ w.code(stmtIncDec)
+ w.op(binOps[stmt.Op])
+ w.expr(stmt.Lhs)
+ w.pos(stmt)
+
+ case stmt.Op != 0 && stmt.Op != syntax.Def:
+ w.code(stmtAssignOp)
+ w.op(binOps[stmt.Op])
+ w.expr(stmt.Lhs)
+ w.pos(stmt)
+ w.expr(stmt.Rhs)
+
+ default:
+ w.code(stmtAssign)
+ w.pos(stmt)
+ w.exprList(stmt.Rhs)
+ w.assignList(stmt.Lhs)
+ }
+
+ case *syntax.BlockStmt:
+ w.code(stmtBlock)
+ w.blockStmt(stmt)
+
+ case *syntax.BranchStmt:
+ w.code(stmtBranch)
+ w.pos(stmt)
+ w.op(branchOps[stmt.Tok])
+ w.optLabel(stmt.Label)
+
+ case *syntax.CallStmt:
+ w.code(stmtCall)
+ w.pos(stmt)
+ w.op(callOps[stmt.Tok])
+ w.expr(stmt.Call)
+
+ case *syntax.DeclStmt:
+ for _, decl := range stmt.DeclList {
+ w.declStmt(decl)
+ }
+
+ case *syntax.ExprStmt:
+ w.code(stmtExpr)
+ w.expr(stmt.X)
+
+ case *syntax.ForStmt:
+ w.code(stmtFor)
+ w.forStmt(stmt)
+
+ case *syntax.IfStmt:
+ w.code(stmtIf)
+ w.ifStmt(stmt)
+
+ case *syntax.LabeledStmt:
+ w.code(stmtLabel)
+ w.pos(stmt)
+ w.label(stmt.Label)
+ w.stmt1(stmt.Stmt)
+
+ case *syntax.ReturnStmt:
+ w.code(stmtReturn)
+ w.pos(stmt)
+ w.exprList(stmt.Results)
+
+ case *syntax.SelectStmt:
+ w.code(stmtSelect)
+ w.selectStmt(stmt)
+
+ case *syntax.SendStmt:
+ w.code(stmtSend)
+ w.pos(stmt)
+ w.expr(stmt.Chan)
+ w.expr(stmt.Value)
+
+ case *syntax.SwitchStmt:
+ w.code(stmtSwitch)
+ w.switchStmt(stmt)
+ }
+}
+
+func (w *writer) assignList(expr syntax.Expr) {
+ exprs := unpackListExpr(expr)
+ w.len(len(exprs))
+
+ for _, expr := range exprs {
+ if name, ok := expr.(*syntax.Name); ok && name.Value != "_" {
+ if obj, ok := w.p.info.Defs[name]; ok {
+ obj := obj.(*types2.Var)
+
+ w.bool(true)
+ w.pos(obj)
+ w.localIdent(obj)
+ w.typ(obj.Type())
+
+ // TODO(mdempsky): Minimize locals index size by deferring
+ // this until the variables actually come into scope.
+ w.addLocal(obj)
+ continue
+ }
+ }
+
+ w.bool(false)
+ w.expr(expr)
+ }
+}
+
+func (w *writer) declStmt(decl syntax.Decl) {
+ switch decl := decl.(type) {
+ default:
+ w.p.unexpected("declaration", decl)
+
+ case *syntax.ConstDecl:
+
+ case *syntax.TypeDecl:
+ // Quirk: The legacy inliner doesn't support inlining functions
+ // with type declarations. Unified IR doesn't have any need to
+ // write out type declarations explicitly (they're always looked
+ // up via global index tables instead), so we just write out a
+ // marker so the reader knows to synthesize a fake declaration to
+ // prevent inlining.
+ if quirksMode() {
+ w.code(stmtTypeDeclHack)
+ }
+
+ case *syntax.VarDecl:
+ values := unpackListExpr(decl.Values)
+
+ // Quirk: When N variables are declared with N initialization
+ // values, we need to decompose that into N interleaved
+ // declarations+initializations, because it leads to different
+ // (albeit semantically equivalent) code generation.
+ if quirksMode() && len(decl.NameList) == len(values) {
+ for i, name := range decl.NameList {
+ w.code(stmtAssign)
+ w.pos(decl)
+ w.exprList(values[i])
+ w.assignList(name)
+ }
+ break
+ }
+
+ w.code(stmtAssign)
+ w.pos(decl)
+ w.exprList(decl.Values)
+ w.assignList(namesAsExpr(decl.NameList))
+ }
+}
+
+func (w *writer) blockStmt(stmt *syntax.BlockStmt) {
+ w.sync(syncBlockStmt)
+ w.openScope(stmt.Pos())
+ w.stmts(stmt.List)
+ w.closeScope(stmt.Rbrace)
+}
+
+func (w *writer) forStmt(stmt *syntax.ForStmt) {
+ w.sync(syncForStmt)
+ w.openScope(stmt.Pos())
+
+ if rang, ok := stmt.Init.(*syntax.RangeClause); w.bool(ok) {
+ w.pos(rang)
+ w.expr(rang.X)
+ w.assignList(rang.Lhs)
+ } else {
+ w.pos(stmt)
+ w.stmt(stmt.Init)
+ w.expr(stmt.Cond)
+ w.stmt(stmt.Post)
+ }
+
+ w.blockStmt(stmt.Body)
+ w.closeAnotherScope()
+}
+
+func (w *writer) ifStmt(stmt *syntax.IfStmt) {
+ w.sync(syncIfStmt)
+ w.openScope(stmt.Pos())
+ w.pos(stmt)
+ w.stmt(stmt.Init)
+ w.expr(stmt.Cond)
+ w.blockStmt(stmt.Then)
+ w.stmt(stmt.Else)
+ w.closeAnotherScope()
+}
+
+func (w *writer) selectStmt(stmt *syntax.SelectStmt) {
+ w.sync(syncSelectStmt)
+
+ w.pos(stmt)
+ w.len(len(stmt.Body))
+ for i, clause := range stmt.Body {
+ if i > 0 {
+ w.closeScope(clause.Pos())
+ }
+ w.openScope(clause.Pos())
+
+ w.pos(clause)
+ w.stmt(clause.Comm)
+ w.stmts(clause.Body)
+ }
+ if len(stmt.Body) > 0 {
+ w.closeScope(stmt.Rbrace)
+ }
+}
+
+func (w *writer) switchStmt(stmt *syntax.SwitchStmt) {
+ w.sync(syncSwitchStmt)
+
+ w.openScope(stmt.Pos())
+ w.pos(stmt)
+ w.stmt(stmt.Init)
+
+ if guard, ok := stmt.Tag.(*syntax.TypeSwitchGuard); w.bool(ok) {
+ w.pos(guard)
+ if tag := guard.Lhs; w.bool(tag != nil) {
+ w.pos(tag)
+ w.string(tag.Value)
+ }
+ w.expr(guard.X)
+ } else {
+ w.expr(stmt.Tag)
+ }
+
+ w.len(len(stmt.Body))
+ for i, clause := range stmt.Body {
+ if i > 0 {
+ w.closeScope(clause.Pos())
+ }
+ w.openScope(clause.Pos())
+
+ w.pos(clause)
+ w.exprList(clause.Cases)
+
+ if obj, ok := w.p.info.Implicits[clause]; ok {
+ // TODO(mdempsky): These pos details are quirkish, but also
+ // necessary so the variable's position is correct for DWARF
+ // scope assignment later. It would probably be better for us to
+ // instead just set the variable's DWARF scoping info earlier so
+ // we can give it the correct position information.
+ pos := clause.Pos()
+ if typs := unpackListExpr(clause.Cases); len(typs) != 0 {
+ pos = typeExprEndPos(typs[len(typs)-1])
+ }
+ w.pos(pos)
+
+ obj := obj.(*types2.Var)
+ w.typ(obj.Type())
+ w.addLocal(obj)
+ }
+
+ w.stmts(clause.Body)
+ }
+ if len(stmt.Body) > 0 {
+ w.closeScope(stmt.Rbrace)
+ }
+
+ w.closeScope(stmt.Rbrace)
+}
+
+func (w *writer) label(label *syntax.Name) {
+ w.sync(syncLabel)
+
+ // TODO(mdempsky): Replace label strings with dense indices.
+ w.string(label.Value)
+}
+
+func (w *writer) optLabel(label *syntax.Name) {
+ w.sync(syncOptLabel)
+ if w.bool(label != nil) {
+ w.label(label)
+ }
+}
+
+// @@@ Expressions
+
+func (w *writer) expr(expr syntax.Expr) {
+ expr = unparen(expr) // skip parens; unneeded after typecheck
+
+ obj, inst := lookupObj(w.p.info, expr)
+ targs := inst.TypeArgs
+
+ if tv, ok := w.p.info.Types[expr]; ok {
+ // TODO(mdempsky): Be more judicious about which types are marked as "needed".
+ if inst.Type != nil {
+ w.needType(inst.Type)
+ } else {
+ w.needType(tv.Type)
+ }
+
+ if tv.IsType() {
+ w.code(exprType)
+ w.typ(tv.Type)
+ return
+ }
+
+ if tv.Value != nil {
+ pos := expr.Pos()
+ if quirksMode() {
+ if obj != nil {
+ // Quirk: IR (and thus iexport) doesn't track position
+ // information for uses of declared objects.
+ pos = syntax.Pos{}
+ } else if tv.Value.Kind() == constant.String {
+ // Quirk: noder.sum picks a particular position for certain
+ // string concatenations.
+ pos = sumPos(expr)
+ }
+ }
+
+ w.code(exprConst)
+ w.pos(pos)
+ w.typ(tv.Type)
+ w.value(tv.Value)
+
+ // TODO(mdempsky): These details are only important for backend
+ // diagnostics. Explore writing them out separately.
+ w.op(constExprOp(expr))
+ w.string(syntax.String(expr))
+ return
+ }
+ }
+
+ if obj != nil {
+ if isGlobal(obj) {
+ w.code(exprName)
+ w.obj(obj, targs)
+ return
+ }
+
+ obj := obj.(*types2.Var)
+ assert(!obj.IsField())
+ assert(targs.Len() == 0)
+
+ w.code(exprLocal)
+ w.useLocal(expr.Pos(), obj)
+ return
+ }
+
+ switch expr := expr.(type) {
+ default:
+ w.p.unexpected("expression", expr)
+
+ case nil: // absent slice index, for condition, or switch tag
+ w.code(exprNone)
+
+ case *syntax.Name:
+ assert(expr.Value == "_")
+ w.code(exprBlank)
+
+ case *syntax.CompositeLit:
+ w.code(exprCompLit)
+ w.compLit(expr)
+
+ case *syntax.FuncLit:
+ w.code(exprFuncLit)
+ w.funcLit(expr)
+
+ case *syntax.SelectorExpr:
+ sel, ok := w.p.info.Selections[expr]
+ assert(ok)
+
+ w.code(exprSelector)
+ w.expr(expr.X)
+ w.pos(expr)
+ w.selector(sel.Obj())
+
+ case *syntax.IndexExpr:
+ tv, ok := w.p.info.Types[expr.Index]
+ assert(ok && tv.IsValue())
+
+ w.code(exprIndex)
+ w.expr(expr.X)
+ w.pos(expr)
+ w.expr(expr.Index)
+
+ case *syntax.SliceExpr:
+ w.code(exprSlice)
+ w.expr(expr.X)
+ w.pos(expr)
+ for _, n := range &expr.Index {
+ w.expr(n)
+ }
+
+ case *syntax.AssertExpr:
+ w.code(exprAssert)
+ w.expr(expr.X)
+ w.pos(expr)
+ w.expr(expr.Type)
+
+ case *syntax.Operation:
+ if expr.Y == nil {
+ w.code(exprUnaryOp)
+ w.op(unOps[expr.Op])
+ w.pos(expr)
+ w.expr(expr.X)
+ break
+ }
+
+ w.code(exprBinaryOp)
+ w.op(binOps[expr.Op])
+ w.expr(expr.X)
+ w.pos(expr)
+ w.expr(expr.Y)
+
+ case *syntax.CallExpr:
+ tv, ok := w.p.info.Types[expr.Fun]
+ assert(ok)
+ if tv.IsType() {
+ assert(len(expr.ArgList) == 1)
+ assert(!expr.HasDots)
+
+ w.code(exprConvert)
+ w.typ(tv.Type)
+ w.pos(expr)
+ w.expr(expr.ArgList[0])
+ break
+ }
+
+ writeFunExpr := func() {
+ if selector, ok := unparen(expr.Fun).(*syntax.SelectorExpr); ok {
+ if sel, ok := w.p.info.Selections[selector]; ok && sel.Kind() == types2.MethodVal {
+ w.expr(selector.X)
+ w.bool(true) // method call
+ w.pos(selector)
+ w.selector(sel.Obj())
+ return
+ }
+ }
+
+ w.expr(expr.Fun)
+ w.bool(false) // not a method call (i.e., normal function call)
+ }
+
+ w.code(exprCall)
+ writeFunExpr()
+ w.pos(expr)
+ w.exprs(expr.ArgList)
+ w.bool(expr.HasDots)
+ }
+}
+
+func (w *writer) compLit(lit *syntax.CompositeLit) {
+ tv, ok := w.p.info.Types[lit]
+ assert(ok)
+
+ w.sync(syncCompLit)
+ w.pos(lit)
+ w.typ(tv.Type)
+
+ typ := tv.Type
+ if ptr, ok := types2.CoreType(typ).(*types2.Pointer); ok {
+ typ = ptr.Elem()
+ }
+ str, isStruct := types2.CoreType(typ).(*types2.Struct)
+
+ w.len(len(lit.ElemList))
+ for i, elem := range lit.ElemList {
+ if isStruct {
+ if kv, ok := elem.(*syntax.KeyValueExpr); ok {
+ // use position of expr.Key rather than of elem (which has position of ':')
+ w.pos(kv.Key)
+ w.len(fieldIndex(w.p.info, str, kv.Key.(*syntax.Name)))
+ elem = kv.Value
+ } else {
+ w.pos(elem)
+ w.len(i)
+ }
+ } else {
+ if kv, ok := elem.(*syntax.KeyValueExpr); w.bool(ok) {
+ // use position of expr.Key rather than of elem (which has position of ':')
+ w.pos(kv.Key)
+ w.expr(kv.Key)
+ elem = kv.Value
+ }
+ }
+ w.pos(elem)
+ w.expr(elem)
+ }
+}
+
+func (w *writer) funcLit(expr *syntax.FuncLit) {
+ tv, ok := w.p.info.Types[expr]
+ assert(ok)
+ sig := tv.Type.(*types2.Signature)
+
+ body, closureVars := w.p.bodyIdx(w.p.curpkg, sig, expr.Body, w.dict)
+
+ w.sync(syncFuncLit)
+ w.pos(expr)
+ w.pos(expr.Type) // for QuirksMode
+ w.signature(sig)
+
+ w.len(len(closureVars))
+ for _, cv := range closureVars {
+ w.pos(cv.pos)
+ if quirksMode() {
+ cv.pos = expr.Body.Rbrace
+ }
+ w.useLocal(cv.pos, cv.obj)
+ }
+
+ w.reloc(relocBody, body)
+}
+
+type posObj struct {
+ pos syntax.Pos
+ obj *types2.Var
+}
+
+func (w *writer) exprList(expr syntax.Expr) {
+ w.sync(syncExprList)
+ w.exprs(unpackListExpr(expr))
+}
+
+func (w *writer) exprs(exprs []syntax.Expr) {
+ if len(exprs) == 0 {
+ assert(exprs == nil)
+ }
+
+ w.sync(syncExprs)
+ w.len(len(exprs))
+ for _, expr := range exprs {
+ w.expr(expr)
+ }
+}
+
+func (w *writer) op(op ir.Op) {
+ // TODO(mdempsky): Remove in favor of explicit codes? Would make
+ // export data more stable against internal refactorings, but low
+ // priority at the moment.
+ assert(op != 0)
+ w.sync(syncOp)
+ w.len(int(op))
+}
+
+func (w *writer) needType(typ types2.Type) {
+ // Decompose tuple into component element types.
+ if typ, ok := typ.(*types2.Tuple); ok {
+ for i := 0; i < typ.Len(); i++ {
+ w.needType(typ.At(i).Type())
+ }
+ return
+ }
+
+ if info := w.p.typIdx(typ, w.dict); info.derived {
+ w.dict.derived[info.idx].needed = true
+ }
+}
+
+// @@@ Package initialization
+
+// Caution: This code is still clumsy, because toolstash -cmp is
+// particularly sensitive to it.
+
+type typeDeclGen struct {
+ *syntax.TypeDecl
+ gen int
+
+ // Implicit type parameters in scope at this type declaration.
+ implicits []*types2.TypeName
+}
+
+type fileImports struct {
+ importedEmbed, importedUnsafe bool
+}
+
+type declCollector struct {
+ pw *pkgWriter
+ typegen *int
+ file *fileImports
+ withinFunc bool
+ implicits []*types2.TypeName
+}
+
+func (c *declCollector) withTParams(obj types2.Object) *declCollector {
+ tparams := objTypeParams(obj)
+ n := tparams.Len()
+ if n == 0 {
+ return c
+ }
+
+ copy := *c
+ copy.implicits = copy.implicits[:len(copy.implicits):len(copy.implicits)]
+ for i := 0; i < n; i++ {
+ copy.implicits = append(copy.implicits, tparams.At(i).Obj())
+ }
+ return &copy
+}
+
+func (c *declCollector) Visit(n syntax.Node) syntax.Visitor {
+ pw := c.pw
+
+ switch n := n.(type) {
+ case *syntax.File:
+ pw.checkPragmas(n.Pragma, ir.GoBuildPragma, false)
+
+ case *syntax.ImportDecl:
+ pw.checkPragmas(n.Pragma, 0, false)
+
+ switch pkgNameOf(pw.info, n).Imported().Path() {
+ case "embed":
+ c.file.importedEmbed = true
+ case "unsafe":
+ c.file.importedUnsafe = true
+ }
+
+ case *syntax.ConstDecl:
+ pw.checkPragmas(n.Pragma, 0, false)
+
+ case *syntax.FuncDecl:
+ pw.checkPragmas(n.Pragma, funcPragmas, false)
+
+ obj := pw.info.Defs[n.Name].(*types2.Func)
+ pw.funDecls[obj] = n
+
+ return c.withTParams(obj)
+
+ case *syntax.TypeDecl:
+ obj := pw.info.Defs[n.Name].(*types2.TypeName)
+ d := typeDeclGen{TypeDecl: n, implicits: c.implicits}
+
+ if n.Alias {
+ pw.checkPragmas(n.Pragma, 0, false)
+ } else {
+ pw.checkPragmas(n.Pragma, typePragmas, false)
+
+ // Assign a unique ID to function-scoped defined types.
+ if c.withinFunc {
+ *c.typegen++
+ d.gen = *c.typegen
+ }
+ }
+
+ pw.typDecls[obj] = d
+
+ // TODO(mdempsky): Omit? Not strictly necessary; only matters for
+ // type declarations within function literals within parameterized
+ // type declarations, but types2 the function literals will be
+ // constant folded away.
+ return c.withTParams(obj)
+
+ case *syntax.VarDecl:
+ pw.checkPragmas(n.Pragma, 0, true)
+
+ if p, ok := n.Pragma.(*pragmas); ok && len(p.Embeds) > 0 {
+ if err := checkEmbed(n, c.file.importedEmbed, c.withinFunc); err != nil {
+ pw.errorf(p.Embeds[0].Pos, "%s", err)
+ }
+ }
+
+ // Workaround for #46208. For variable declarations that
+ // declare multiple variables and have an explicit type
+ // expression, the type expression is evaluated multiple
+ // times. This affects toolstash -cmp, because iexport is
+ // sensitive to *types.Type pointer identity.
+ if quirksMode() && n.Type != nil {
+ tv, ok := pw.info.Types[n.Type]
+ assert(ok)
+ assert(tv.IsType())
+ for _, name := range n.NameList {
+ obj := pw.info.Defs[name].(*types2.Var)
+ pw.dups.add(obj.Type(), tv.Type)
+ }
+ }
+
+ case *syntax.BlockStmt:
+ if !c.withinFunc {
+ copy := *c
+ copy.withinFunc = true
+ return &copy
+ }
+ }
+
+ return c
+}
+
+func (pw *pkgWriter) collectDecls(noders []*noder) {
+ var typegen int
+ for _, p := range noders {
+ var file fileImports
+
+ syntax.Walk(p.file, &declCollector{
+ pw: pw,
+ typegen: &typegen,
+ file: &file,
+ })
+
+ pw.cgoPragmas = append(pw.cgoPragmas, p.pragcgobuf...)
+
+ for _, l := range p.linknames {
+ if !file.importedUnsafe {
+ pw.errorf(l.pos, "//go:linkname only allowed in Go files that import \"unsafe\"")
+ continue
+ }
+
+ switch obj := pw.curpkg.Scope().Lookup(l.local).(type) {
+ case *types2.Func, *types2.Var:
+ if _, ok := pw.linknames[obj]; !ok {
+ pw.linknames[obj] = l.remote
+ } else {
+ pw.errorf(l.pos, "duplicate //go:linkname for %s", l.local)
+ }
+
+ default:
+ // TODO(mdempsky): Enable after #42938 is fixed.
+ if false {
+ pw.errorf(l.pos, "//go:linkname must refer to declared function or variable")
+ }
+ }
+ }
+ }
+}
+
+func (pw *pkgWriter) checkPragmas(p syntax.Pragma, allowed ir.PragmaFlag, embedOK bool) {
+ if p == nil {
+ return
+ }
+ pragma := p.(*pragmas)
+
+ for _, pos := range pragma.Pos {
+ if pos.Flag&^allowed != 0 {
+ pw.errorf(pos.Pos, "misplaced compiler directive")
+ }
+ }
+
+ if !embedOK {
+ for _, e := range pragma.Embeds {
+ pw.errorf(e.Pos, "misplaced go:embed directive")
+ }
+ }
+}
+
+func (w *writer) pkgInit(noders []*noder) {
+ if quirksMode() {
+ posBases := posBasesOf(noders)
+ w.len(len(posBases))
+ for _, posBase := range posBases {
+ w.posBase(posBase)
+ }
+
+ objs := importedObjsOf(w.p.curpkg, w.p.info, noders)
+ w.len(len(objs))
+ for _, obj := range objs {
+ w.qualifiedIdent(obj)
+ }
+ }
+
+ w.len(len(w.p.cgoPragmas))
+ for _, cgoPragma := range w.p.cgoPragmas {
+ w.strings(cgoPragma)
+ }
+
+ w.sync(syncDecls)
+ for _, p := range noders {
+ for _, decl := range p.file.DeclList {
+ w.pkgDecl(decl)
+ }
+ }
+ w.code(declEnd)
+
+ w.sync(syncEOF)
+}
+
+func (w *writer) pkgDecl(decl syntax.Decl) {
+ switch decl := decl.(type) {
+ default:
+ w.p.unexpected("declaration", decl)
+
+ case *syntax.ImportDecl:
+
+ case *syntax.ConstDecl:
+ w.code(declOther)
+ w.pkgObjs(decl.NameList...)
+
+ case *syntax.FuncDecl:
+ if decl.Name.Value == "_" {
+ break // skip blank functions
+ }
+
+ obj := w.p.info.Defs[decl.Name].(*types2.Func)
+ sig := obj.Type().(*types2.Signature)
+
+ if sig.RecvTypeParams() != nil || sig.TypeParams() != nil {
+ break // skip generic functions
+ }
+
+ if recv := sig.Recv(); recv != nil {
+ w.code(declMethod)
+ w.typ(recvBase(recv))
+ w.selector(obj)
+ break
+ }
+
+ w.code(declFunc)
+ w.pkgObjs(decl.Name)
+
+ case *syntax.TypeDecl:
+ if len(decl.TParamList) != 0 {
+ break // skip generic type decls
+ }
+
+ if decl.Name.Value == "_" {
+ break // skip blank type decls
+ }
+
+ name := w.p.info.Defs[decl.Name].(*types2.TypeName)
+ // Skip type declarations for interfaces that are only usable as
+ // type parameter bounds.
+ if iface, ok := name.Type().Underlying().(*types2.Interface); ok && !iface.IsMethodSet() {
+ break
+ }
+
+ // Skip aliases to uninstantiated generic types.
+ // TODO(mdempsky): Revisit after #46477 is resolved.
+ if name.IsAlias() {
+ named, ok := name.Type().(*types2.Named)
+ if ok && named.TypeParams().Len() != 0 && named.TypeArgs().Len() == 0 {
+ break
+ }
+ }
+
+ w.code(declOther)
+ w.pkgObjs(decl.Name)
+
+ case *syntax.VarDecl:
+ w.code(declVar)
+ w.pos(decl)
+ w.pkgObjs(decl.NameList...)
+ w.exprList(decl.Values)
+
+ var embeds []pragmaEmbed
+ if p, ok := decl.Pragma.(*pragmas); ok {
+ embeds = p.Embeds
+ }
+ w.len(len(embeds))
+ for _, embed := range embeds {
+ w.pos(embed.Pos)
+ w.strings(embed.Patterns)
+ }
+ }
+}
+
+func (w *writer) pkgObjs(names ...*syntax.Name) {
+ w.sync(syncDeclNames)
+ w.len(len(names))
+
+ for _, name := range names {
+ obj, ok := w.p.info.Defs[name]
+ assert(ok)
+
+ w.sync(syncDeclName)
+ w.obj(obj, nil)
+ }
+}
+
+// @@@ Helpers
+
+// isDefinedType reports whether obj is a defined type.
+func isDefinedType(obj types2.Object) bool {
+ if obj, ok := obj.(*types2.TypeName); ok {
+ return !obj.IsAlias()
+ }
+ return false
+}
+
+// isGlobal reports whether obj was declared at package scope.
+//
+// Caveat: blank objects are not declared.
+func isGlobal(obj types2.Object) bool {
+ return obj.Parent() == obj.Pkg().Scope()
+}
+
+// lookupObj returns the object that expr refers to, if any. If expr
+// is an explicit instantiation of a generic object, then the instance
+// object is returned as well.
+func lookupObj(info *types2.Info, expr syntax.Expr) (obj types2.Object, inst types2.Instance) {
+ if index, ok := expr.(*syntax.IndexExpr); ok {
+ args := unpackListExpr(index.Index)
+ if len(args) == 1 {
+ tv, ok := info.Types[args[0]]
+ assert(ok)
+ if tv.IsValue() {
+ return // normal index expression
+ }
+ }
+
+ expr = index.X
+ }
+
+ // Strip package qualifier, if present.
+ if sel, ok := expr.(*syntax.SelectorExpr); ok {
+ if !isPkgQual(info, sel) {
+ return // normal selector expression
+ }
+ expr = sel.Sel
+ }
+
+ if name, ok := expr.(*syntax.Name); ok {
+ obj = info.Uses[name]
+ inst = info.Instances[name]
+ }
+ return
+}
+
+// isPkgQual reports whether the given selector expression is a
+// package-qualified identifier.
+func isPkgQual(info *types2.Info, sel *syntax.SelectorExpr) bool {
+ if name, ok := sel.X.(*syntax.Name); ok {
+ _, isPkgName := info.Uses[name].(*types2.PkgName)
+ return isPkgName
+ }
+ return false
+}
+
+// recvBase returns the base type for the given receiver parameter.
+func recvBase(recv *types2.Var) *types2.Named {
+ typ := recv.Type()
+ if ptr, ok := typ.(*types2.Pointer); ok {
+ typ = ptr.Elem()
+ }
+ return typ.(*types2.Named)
+}
+
+// namesAsExpr returns a list of names as a syntax.Expr.
+func namesAsExpr(names []*syntax.Name) syntax.Expr {
+ if len(names) == 1 {
+ return names[0]
+ }
+
+ exprs := make([]syntax.Expr, len(names))
+ for i, name := range names {
+ exprs[i] = name
+ }
+ return &syntax.ListExpr{ElemList: exprs}
+}
+
+// fieldIndex returns the index of the struct field named by key.
+func fieldIndex(info *types2.Info, str *types2.Struct, key *syntax.Name) int {
+ field := info.Uses[key].(*types2.Var)
+
+ for i := 0; i < str.NumFields(); i++ {
+ if str.Field(i) == field {
+ return i
+ }
+ }
+
+ panic(fmt.Sprintf("%s: %v is not a field of %v", key.Pos(), field, str))
+}
+
+// objTypeParams returns the type parameters on the given object.
+func objTypeParams(obj types2.Object) *types2.TypeParamList {
+ switch obj := obj.(type) {
+ case *types2.Func:
+ sig := obj.Type().(*types2.Signature)
+ if sig.Recv() != nil {
+ return sig.RecvTypeParams()
+ }
+ return sig.TypeParams()
+ case *types2.TypeName:
+ if !obj.IsAlias() {
+ return obj.Type().(*types2.Named).TypeParams()
+ }
+ }
+ return nil
+}
+
+func asPragmaFlag(p syntax.Pragma) ir.PragmaFlag {
+ if p == nil {
+ return 0
+ }
+ return p.(*pragmas).Flag
+}
diff --git a/src/cmd/compile/internal/objw/objw.go b/src/cmd/compile/internal/objw/objw.go
new file mode 100644
index 0000000..ed5ad75
--- /dev/null
+++ b/src/cmd/compile/internal/objw/objw.go
@@ -0,0 +1,85 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package objw
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/bitvec"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+)
+
+// Uint8 writes an unsigned byte v into s at offset off,
+// and returns the next unused offset (i.e., off+1).
+func Uint8(s *obj.LSym, off int, v uint8) int {
+ return UintN(s, off, uint64(v), 1)
+}
+
+func Uint16(s *obj.LSym, off int, v uint16) int {
+ return UintN(s, off, uint64(v), 2)
+}
+
+func Uint32(s *obj.LSym, off int, v uint32) int {
+ return UintN(s, off, uint64(v), 4)
+}
+
+func Uintptr(s *obj.LSym, off int, v uint64) int {
+ return UintN(s, off, v, types.PtrSize)
+}
+
+// UintN writes an unsigned integer v of size wid bytes into s at offset off,
+// and returns the next unused offset.
+func UintN(s *obj.LSym, off int, v uint64, wid int) int {
+ if off&(wid-1) != 0 {
+ base.Fatalf("duintxxLSym: misaligned: v=%d wid=%d off=%d", v, wid, off)
+ }
+ s.WriteInt(base.Ctxt, int64(off), wid, int64(v))
+ return off + wid
+}
+
+func SymPtr(s *obj.LSym, off int, x *obj.LSym, xoff int) int {
+ off = int(types.Rnd(int64(off), int64(types.PtrSize)))
+ s.WriteAddr(base.Ctxt, int64(off), types.PtrSize, x, int64(xoff))
+ off += types.PtrSize
+ return off
+}
+
+func SymPtrWeak(s *obj.LSym, off int, x *obj.LSym, xoff int) int {
+ off = int(types.Rnd(int64(off), int64(types.PtrSize)))
+ s.WriteWeakAddr(base.Ctxt, int64(off), types.PtrSize, x, int64(xoff))
+ off += types.PtrSize
+ return off
+}
+
+func SymPtrOff(s *obj.LSym, off int, x *obj.LSym) int {
+ s.WriteOff(base.Ctxt, int64(off), x, 0)
+ off += 4
+ return off
+}
+
+func SymPtrWeakOff(s *obj.LSym, off int, x *obj.LSym) int {
+ s.WriteWeakOff(base.Ctxt, int64(off), x, 0)
+ off += 4
+ return off
+}
+
+func Global(s *obj.LSym, width int32, flags int16) {
+ if flags&obj.LOCAL != 0 {
+ s.Set(obj.AttrLocal, true)
+ flags &^= obj.LOCAL
+ }
+ base.Ctxt.Globl(s, int64(width), int(flags))
+}
+
+// Bitvec writes the contents of bv into s as sequence of bytes
+// in little-endian order, and returns the next unused offset.
+func BitVec(s *obj.LSym, off int, bv bitvec.BitVec) int {
+ // Runtime reads the bitmaps as byte arrays. Oblige.
+ for j := 0; int32(j) < bv.N; j += 8 {
+ word := bv.B[j/32]
+ off = Uint8(s, off, uint8(word>>(uint(j)%32)))
+ }
+ return off
+}
diff --git a/src/cmd/compile/internal/objw/prog.go b/src/cmd/compile/internal/objw/prog.go
new file mode 100644
index 0000000..b5ac4dd
--- /dev/null
+++ b/src/cmd/compile/internal/objw/prog.go
@@ -0,0 +1,226 @@
+// Derived from Inferno utils/6c/txt.c
+// https://bitbucket.org/inferno-os/inferno-os/src/master/utils/6c/txt.c
+//
+// Copyright © 1994-1999 Lucent Technologies Inc. All rights reserved.
+// Portions Copyright © 1995-1997 C H Forsyth (forsyth@terzarima.net)
+// Portions Copyright © 1997-1999 Vita Nuova Limited
+// Portions Copyright © 2000-2007 Vita Nuova Holdings Limited (www.vitanuova.com)
+// Portions Copyright © 2004,2006 Bruce Ellis
+// Portions Copyright © 2005-2007 C H Forsyth (forsyth@terzarima.net)
+// Revisions Copyright © 2000-2007 Lucent Technologies Inc. and others
+// Portions Copyright © 2009 The Go Authors. All rights reserved.
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy
+// of this software and associated documentation files (the "Software"), to deal
+// in the Software without restriction, including without limitation the rights
+// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+// copies of the Software, and to permit persons to whom the Software is
+// furnished to do so, subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in
+// all copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
+// THE SOFTWARE.
+
+package objw
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/internal/obj"
+ "cmd/internal/objabi"
+ "cmd/internal/src"
+)
+
+var sharedProgArray = new([10000]obj.Prog) // *T instead of T to work around issue 19839
+
+// NewProgs returns a new Progs for fn.
+// worker indicates which of the backend workers will use the Progs.
+func NewProgs(fn *ir.Func, worker int) *Progs {
+ pp := new(Progs)
+ if base.Ctxt.CanReuseProgs() {
+ sz := len(sharedProgArray) / base.Flag.LowerC
+ pp.Cache = sharedProgArray[sz*worker : sz*(worker+1)]
+ }
+ pp.CurFunc = fn
+
+ // prime the pump
+ pp.Next = pp.NewProg()
+ pp.Clear(pp.Next)
+
+ pp.Pos = fn.Pos()
+ pp.SetText(fn)
+ // PCDATA tables implicitly start with index -1.
+ pp.PrevLive = LivenessIndex{-1, false}
+ pp.NextLive = pp.PrevLive
+ return pp
+}
+
+// Progs accumulates Progs for a function and converts them into machine code.
+type Progs struct {
+ Text *obj.Prog // ATEXT Prog for this function
+ Next *obj.Prog // next Prog
+ PC int64 // virtual PC; count of Progs
+ Pos src.XPos // position to use for new Progs
+ CurFunc *ir.Func // fn these Progs are for
+ Cache []obj.Prog // local progcache
+ CacheIndex int // first free element of progcache
+
+ NextLive LivenessIndex // liveness index for the next Prog
+ PrevLive LivenessIndex // last emitted liveness index
+}
+
+// LivenessIndex stores the liveness map information for a Value.
+type LivenessIndex struct {
+ StackMapIndex int
+
+ // IsUnsafePoint indicates that this is an unsafe-point.
+ //
+ // Note that it's possible for a call Value to have a stack
+ // map while also being an unsafe-point. This means it cannot
+ // be preempted at this instruction, but that a preemption or
+ // stack growth may happen in the called function.
+ IsUnsafePoint bool
+}
+
+// StackMapDontCare indicates that the stack map index at a Value
+// doesn't matter.
+//
+// This is a sentinel value that should never be emitted to the PCDATA
+// stream. We use -1000 because that's obviously never a valid stack
+// index (but -1 is).
+const StackMapDontCare = -1000
+
+// LivenessDontCare indicates that the liveness information doesn't
+// matter. Currently it is used in deferreturn liveness when we don't
+// actually need it. It should never be emitted to the PCDATA stream.
+var LivenessDontCare = LivenessIndex{StackMapDontCare, true}
+
+func (idx LivenessIndex) StackMapValid() bool {
+ return idx.StackMapIndex != StackMapDontCare
+}
+
+func (pp *Progs) NewProg() *obj.Prog {
+ var p *obj.Prog
+ if pp.CacheIndex < len(pp.Cache) {
+ p = &pp.Cache[pp.CacheIndex]
+ pp.CacheIndex++
+ } else {
+ p = new(obj.Prog)
+ }
+ p.Ctxt = base.Ctxt
+ return p
+}
+
+// Flush converts from pp to machine code.
+func (pp *Progs) Flush() {
+ plist := &obj.Plist{Firstpc: pp.Text, Curfn: pp.CurFunc}
+ obj.Flushplist(base.Ctxt, plist, pp.NewProg, base.Ctxt.Pkgpath)
+}
+
+// Free clears pp and any associated resources.
+func (pp *Progs) Free() {
+ if base.Ctxt.CanReuseProgs() {
+ // Clear progs to enable GC and avoid abuse.
+ s := pp.Cache[:pp.CacheIndex]
+ for i := range s {
+ s[i] = obj.Prog{}
+ }
+ }
+ // Clear pp to avoid abuse.
+ *pp = Progs{}
+}
+
+// Prog adds a Prog with instruction As to pp.
+func (pp *Progs) Prog(as obj.As) *obj.Prog {
+ if pp.NextLive.StackMapValid() && pp.NextLive.StackMapIndex != pp.PrevLive.StackMapIndex {
+ // Emit stack map index change.
+ idx := pp.NextLive.StackMapIndex
+ pp.PrevLive.StackMapIndex = idx
+ p := pp.Prog(obj.APCDATA)
+ p.From.SetConst(objabi.PCDATA_StackMapIndex)
+ p.To.SetConst(int64(idx))
+ }
+ if pp.NextLive.IsUnsafePoint != pp.PrevLive.IsUnsafePoint {
+ // Emit unsafe-point marker.
+ pp.PrevLive.IsUnsafePoint = pp.NextLive.IsUnsafePoint
+ p := pp.Prog(obj.APCDATA)
+ p.From.SetConst(objabi.PCDATA_UnsafePoint)
+ if pp.NextLive.IsUnsafePoint {
+ p.To.SetConst(objabi.PCDATA_UnsafePointUnsafe)
+ } else {
+ p.To.SetConst(objabi.PCDATA_UnsafePointSafe)
+ }
+ }
+
+ p := pp.Next
+ pp.Next = pp.NewProg()
+ pp.Clear(pp.Next)
+ p.Link = pp.Next
+
+ if !pp.Pos.IsKnown() && base.Flag.K != 0 {
+ base.Warn("prog: unknown position (line 0)")
+ }
+
+ p.As = as
+ p.Pos = pp.Pos
+ if pp.Pos.IsStmt() == src.PosIsStmt {
+ // Clear IsStmt for later Progs at this pos provided that as can be marked as a stmt
+ if LosesStmtMark(as) {
+ return p
+ }
+ pp.Pos = pp.Pos.WithNotStmt()
+ }
+ return p
+}
+
+func (pp *Progs) Clear(p *obj.Prog) {
+ obj.Nopout(p)
+ p.As = obj.AEND
+ p.Pc = pp.PC
+ pp.PC++
+}
+
+func (pp *Progs) Append(p *obj.Prog, as obj.As, ftype obj.AddrType, freg int16, foffset int64, ttype obj.AddrType, treg int16, toffset int64) *obj.Prog {
+ q := pp.NewProg()
+ pp.Clear(q)
+ q.As = as
+ q.Pos = p.Pos
+ q.From.Type = ftype
+ q.From.Reg = freg
+ q.From.Offset = foffset
+ q.To.Type = ttype
+ q.To.Reg = treg
+ q.To.Offset = toffset
+ q.Link = p.Link
+ p.Link = q
+ return q
+}
+
+func (pp *Progs) SetText(fn *ir.Func) {
+ if pp.Text != nil {
+ base.Fatalf("Progs.SetText called twice")
+ }
+ ptxt := pp.Prog(obj.ATEXT)
+ pp.Text = ptxt
+
+ fn.LSym.Func().Text = ptxt
+ ptxt.From.Type = obj.TYPE_MEM
+ ptxt.From.Name = obj.NAME_EXTERN
+ ptxt.From.Sym = fn.LSym
+}
+
+// LosesStmtMark reports whether a prog with op as loses its statement mark on the way to DWARF.
+// The attributes from some opcodes are lost in translation.
+// TODO: this is an artifact of how funcpctab combines information for instructions at a single PC.
+// Should try to fix it there.
+func LosesStmtMark(as obj.As) bool {
+ // is_stmt does not work for these; it DOES for ANOP even though that generates no code.
+ return as == obj.APCDATA || as == obj.AFUNCDATA
+}
diff --git a/src/cmd/compile/internal/pkginit/init.go b/src/cmd/compile/internal/pkginit/init.go
new file mode 100644
index 0000000..40f1408
--- /dev/null
+++ b/src/cmd/compile/internal/pkginit/init.go
@@ -0,0 +1,144 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pkginit
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/objw"
+ "cmd/compile/internal/staticinit"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/src"
+)
+
+// MakeInit creates a synthetic init function to handle any
+// package-scope initialization statements.
+//
+// TODO(mdempsky): Move into noder, so that the types2-based frontends
+// can use Info.InitOrder instead.
+func MakeInit() {
+ nf := initOrder(typecheck.Target.Decls)
+ if len(nf) == 0 {
+ return
+ }
+
+ // Make a function that contains all the initialization statements.
+ base.Pos = nf[0].Pos() // prolog/epilog gets line number of first init stmt
+ initializers := typecheck.Lookup("init")
+ fn := typecheck.DeclFunc(initializers, ir.NewFuncType(base.Pos, nil, nil, nil))
+ for _, dcl := range typecheck.InitTodoFunc.Dcl {
+ dcl.Curfn = fn
+ }
+ fn.Dcl = append(fn.Dcl, typecheck.InitTodoFunc.Dcl...)
+ typecheck.InitTodoFunc.Dcl = nil
+
+ // Suppress useless "can inline" diagnostics.
+ // Init functions are only called dynamically.
+ fn.SetInlinabilityChecked(true)
+
+ fn.Body = nf
+ typecheck.FinishFuncBody()
+
+ typecheck.Func(fn)
+ ir.WithFunc(fn, func() {
+ typecheck.Stmts(nf)
+ })
+ typecheck.Target.Decls = append(typecheck.Target.Decls, fn)
+
+ // Prepend to Inits, so it runs first, before any user-declared init
+ // functions.
+ typecheck.Target.Inits = append([]*ir.Func{fn}, typecheck.Target.Inits...)
+
+ if typecheck.InitTodoFunc.Dcl != nil {
+ // We only generate temps using InitTodoFunc if there
+ // are package-scope initialization statements, so
+ // something's weird if we get here.
+ base.Fatalf("InitTodoFunc still has declarations")
+ }
+ typecheck.InitTodoFunc = nil
+}
+
+// Task makes and returns an initialization record for the package.
+// See runtime/proc.go:initTask for its layout.
+// The 3 tasks for initialization are:
+// 1) Initialize all of the packages the current package depends on.
+// 2) Initialize all the variables that have initializers.
+// 3) Run any init functions.
+func Task() *ir.Name {
+ var deps []*obj.LSym // initTask records for packages the current package depends on
+ var fns []*obj.LSym // functions to call for package initialization
+
+ // Find imported packages with init tasks.
+ for _, pkg := range typecheck.Target.Imports {
+ n := typecheck.Resolve(ir.NewIdent(base.Pos, pkg.Lookup(".inittask")))
+ if n.Op() == ir.ONONAME {
+ continue
+ }
+ if n.Op() != ir.ONAME || n.(*ir.Name).Class != ir.PEXTERN {
+ base.Fatalf("bad inittask: %v", n)
+ }
+ deps = append(deps, n.(*ir.Name).Linksym())
+ }
+
+ // Record user init functions.
+ for _, fn := range typecheck.Target.Inits {
+ if fn.Sym().Name == "init" {
+ // Synthetic init function for initialization of package-scope
+ // variables. We can use staticinit to optimize away static
+ // assignments.
+ s := staticinit.Schedule{
+ Plans: make(map[ir.Node]*staticinit.Plan),
+ Temps: make(map[ir.Node]*ir.Name),
+ }
+ for _, n := range fn.Body {
+ s.StaticInit(n)
+ }
+ fn.Body = s.Out
+ ir.WithFunc(fn, func() {
+ typecheck.Stmts(fn.Body)
+ })
+
+ if len(fn.Body) == 0 {
+ fn.Body = []ir.Node{ir.NewBlockStmt(src.NoXPos, nil)}
+ }
+ }
+
+ // Skip init functions with empty bodies.
+ if len(fn.Body) == 1 {
+ if stmt := fn.Body[0]; stmt.Op() == ir.OBLOCK && len(stmt.(*ir.BlockStmt).List) == 0 {
+ continue
+ }
+ }
+ fns = append(fns, fn.Nname.Linksym())
+ }
+
+ if len(deps) == 0 && len(fns) == 0 && types.LocalPkg.Name != "main" && types.LocalPkg.Name != "runtime" {
+ return nil // nothing to initialize
+ }
+
+ // Make an .inittask structure.
+ sym := typecheck.Lookup(".inittask")
+ task := typecheck.NewName(sym)
+ task.SetType(types.Types[types.TUINT8]) // fake type
+ task.Class = ir.PEXTERN
+ sym.Def = task
+ lsym := task.Linksym()
+ ot := 0
+ ot = objw.Uintptr(lsym, ot, 0) // state: not initialized yet
+ ot = objw.Uintptr(lsym, ot, uint64(len(deps)))
+ ot = objw.Uintptr(lsym, ot, uint64(len(fns)))
+ for _, d := range deps {
+ ot = objw.SymPtr(lsym, ot, d, 0)
+ }
+ for _, f := range fns {
+ ot = objw.SymPtr(lsym, ot, f, 0)
+ }
+ // An initTask has pointers, but none into the Go heap.
+ // It's not quite read only, the state field must be modifiable.
+ objw.Global(lsym, int32(ot), obj.NOPTR)
+ return task
+}
diff --git a/src/cmd/compile/internal/pkginit/initorder.go b/src/cmd/compile/internal/pkginit/initorder.go
new file mode 100644
index 0000000..a509753
--- /dev/null
+++ b/src/cmd/compile/internal/pkginit/initorder.go
@@ -0,0 +1,368 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pkginit
+
+import (
+ "bytes"
+ "container/heap"
+ "fmt"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+)
+
+// Package initialization
+//
+// Here we implement the algorithm for ordering package-level variable
+// initialization. The spec is written in terms of variable
+// initialization, but multiple variables initialized by a single
+// assignment are handled together, so here we instead focus on
+// ordering initialization assignments. Conveniently, this maps well
+// to how we represent package-level initializations using the Node
+// AST.
+//
+// Assignments are in one of three phases: NotStarted, Pending, or
+// Done. For assignments in the Pending phase, we use Xoffset to
+// record the number of unique variable dependencies whose
+// initialization assignment is not yet Done. We also maintain a
+// "blocking" map that maps assignments back to all of the assignments
+// that depend on it.
+//
+// For example, for an initialization like:
+//
+// var x = f(a, b, b)
+// var a, b = g()
+//
+// the "x = f(a, b, b)" assignment depends on two variables (a and b),
+// so its Xoffset will be 2. Correspondingly, the "a, b = g()"
+// assignment's "blocking" entry will have two entries back to x's
+// assignment.
+//
+// Logically, initialization works by (1) taking all NotStarted
+// assignments, calculating their dependencies, and marking them
+// Pending; (2) adding all Pending assignments with Xoffset==0 to a
+// "ready" priority queue (ordered by variable declaration position);
+// and (3) iteratively processing the next Pending assignment from the
+// queue, decreasing the Xoffset of assignments it's blocking, and
+// adding them to the queue if decremented to 0.
+//
+// As an optimization, we actually apply each of these three steps for
+// each assignment. This yields the same order, but keeps queue size
+// down and thus also heap operation costs.
+
+// Static initialization phase.
+// These values are stored in two bits in Node.flags.
+const (
+ InitNotStarted = iota
+ InitDone
+ InitPending
+)
+
+type InitOrder struct {
+ // blocking maps initialization assignments to the assignments
+ // that depend on it.
+ blocking map[ir.Node][]ir.Node
+
+ // ready is the queue of Pending initialization assignments
+ // that are ready for initialization.
+ ready declOrder
+
+ order map[ir.Node]int
+}
+
+// initOrder computes initialization order for a list l of
+// package-level declarations (in declaration order) and outputs the
+// corresponding list of statements to include in the init() function
+// body.
+func initOrder(l []ir.Node) []ir.Node {
+ var res ir.Nodes
+ o := InitOrder{
+ blocking: make(map[ir.Node][]ir.Node),
+ order: make(map[ir.Node]int),
+ }
+
+ // Process all package-level assignment in declaration order.
+ for _, n := range l {
+ switch n.Op() {
+ case ir.OAS, ir.OAS2DOTTYPE, ir.OAS2FUNC, ir.OAS2MAPR, ir.OAS2RECV:
+ o.processAssign(n)
+ o.flushReady(func(n ir.Node) { res.Append(n) })
+ case ir.ODCLCONST, ir.ODCLFUNC, ir.ODCLTYPE:
+ // nop
+ default:
+ base.Fatalf("unexpected package-level statement: %v", n)
+ }
+ }
+
+ // Check that all assignments are now Done; if not, there must
+ // have been a dependency cycle.
+ for _, n := range l {
+ switch n.Op() {
+ case ir.OAS, ir.OAS2DOTTYPE, ir.OAS2FUNC, ir.OAS2MAPR, ir.OAS2RECV:
+ if o.order[n] != orderDone {
+ // If there have already been errors
+ // printed, those errors may have
+ // confused us and there might not be
+ // a loop. Let the user fix those
+ // first.
+ base.ExitIfErrors()
+
+ o.findInitLoopAndExit(firstLHS(n), new([]*ir.Name), new(ir.NameSet))
+ base.Fatalf("initialization unfinished, but failed to identify loop")
+ }
+ }
+ }
+
+ // Invariant consistency check. If this is non-zero, then we
+ // should have found a cycle above.
+ if len(o.blocking) != 0 {
+ base.Fatalf("expected empty map: %v", o.blocking)
+ }
+
+ return res
+}
+
+func (o *InitOrder) processAssign(n ir.Node) {
+ if _, ok := o.order[n]; ok {
+ base.Fatalf("unexpected state: %v, %v", n, o.order[n])
+ }
+ o.order[n] = 0
+
+ // Compute number of variable dependencies and build the
+ // inverse dependency ("blocking") graph.
+ for dep := range collectDeps(n, true) {
+ defn := dep.Defn
+ // Skip dependencies on functions (PFUNC) and
+ // variables already initialized (InitDone).
+ if dep.Class != ir.PEXTERN || o.order[defn] == orderDone {
+ continue
+ }
+ o.order[n]++
+ o.blocking[defn] = append(o.blocking[defn], n)
+ }
+
+ if o.order[n] == 0 {
+ heap.Push(&o.ready, n)
+ }
+}
+
+const orderDone = -1000
+
+// flushReady repeatedly applies initialize to the earliest (in
+// declaration order) assignment ready for initialization and updates
+// the inverse dependency ("blocking") graph.
+func (o *InitOrder) flushReady(initialize func(ir.Node)) {
+ for o.ready.Len() != 0 {
+ n := heap.Pop(&o.ready).(ir.Node)
+ if order, ok := o.order[n]; !ok || order != 0 {
+ base.Fatalf("unexpected state: %v, %v, %v", n, ok, order)
+ }
+
+ initialize(n)
+ o.order[n] = orderDone
+
+ blocked := o.blocking[n]
+ delete(o.blocking, n)
+
+ for _, m := range blocked {
+ if o.order[m]--; o.order[m] == 0 {
+ heap.Push(&o.ready, m)
+ }
+ }
+ }
+}
+
+// findInitLoopAndExit searches for an initialization loop involving variable
+// or function n. If one is found, it reports the loop as an error and exits.
+//
+// path points to a slice used for tracking the sequence of
+// variables/functions visited. Using a pointer to a slice allows the
+// slice capacity to grow and limit reallocations.
+func (o *InitOrder) findInitLoopAndExit(n *ir.Name, path *[]*ir.Name, ok *ir.NameSet) {
+ for i, x := range *path {
+ if x == n {
+ reportInitLoopAndExit((*path)[i:])
+ return
+ }
+ }
+
+ // There might be multiple loops involving n; by sorting
+ // references, we deterministically pick the one reported.
+ refers := collectDeps(n.Defn, false).Sorted(func(ni, nj *ir.Name) bool {
+ return ni.Pos().Before(nj.Pos())
+ })
+
+ *path = append(*path, n)
+ for _, ref := range refers {
+ // Short-circuit variables that were initialized.
+ if ref.Class == ir.PEXTERN && o.order[ref.Defn] == orderDone || ok.Has(ref) {
+ continue
+ }
+
+ o.findInitLoopAndExit(ref, path, ok)
+ }
+
+ // n is not involved in a cycle.
+ // Record that fact to avoid checking it again when reached another way,
+ // or else this traversal will take exponential time traversing all paths
+ // through the part of the package's call graph implicated in the cycle.
+ ok.Add(n)
+
+ *path = (*path)[:len(*path)-1]
+}
+
+// reportInitLoopAndExit reports and initialization loop as an error
+// and exits. However, if l is not actually an initialization loop, it
+// simply returns instead.
+func reportInitLoopAndExit(l []*ir.Name) {
+ // Rotate loop so that the earliest variable declaration is at
+ // the start.
+ i := -1
+ for j, n := range l {
+ if n.Class == ir.PEXTERN && (i == -1 || n.Pos().Before(l[i].Pos())) {
+ i = j
+ }
+ }
+ if i == -1 {
+ // False positive: loop only involves recursive
+ // functions. Return so that findInitLoop can continue
+ // searching.
+ return
+ }
+ l = append(l[i:], l[:i]...)
+
+ // TODO(mdempsky): Method values are printed as "T.m-fm"
+ // rather than "T.m". Figure out how to avoid that.
+
+ var msg bytes.Buffer
+ fmt.Fprintf(&msg, "initialization loop:\n")
+ for _, n := range l {
+ fmt.Fprintf(&msg, "\t%v: %v refers to\n", ir.Line(n), n)
+ }
+ fmt.Fprintf(&msg, "\t%v: %v", ir.Line(l[0]), l[0])
+
+ base.ErrorfAt(l[0].Pos(), msg.String())
+ base.ErrorExit()
+}
+
+// collectDeps returns all of the package-level functions and
+// variables that declaration n depends on. If transitive is true,
+// then it also includes the transitive dependencies of any depended
+// upon functions (but not variables).
+func collectDeps(n ir.Node, transitive bool) ir.NameSet {
+ d := initDeps{transitive: transitive}
+ switch n.Op() {
+ case ir.OAS:
+ n := n.(*ir.AssignStmt)
+ d.inspect(n.Y)
+ case ir.OAS2DOTTYPE, ir.OAS2FUNC, ir.OAS2MAPR, ir.OAS2RECV:
+ n := n.(*ir.AssignListStmt)
+ d.inspect(n.Rhs[0])
+ case ir.ODCLFUNC:
+ n := n.(*ir.Func)
+ d.inspectList(n.Body)
+ default:
+ base.Fatalf("unexpected Op: %v", n.Op())
+ }
+ return d.seen
+}
+
+type initDeps struct {
+ transitive bool
+ seen ir.NameSet
+ cvisit func(ir.Node)
+}
+
+func (d *initDeps) cachedVisit() func(ir.Node) {
+ if d.cvisit == nil {
+ d.cvisit = d.visit // cache closure
+ }
+ return d.cvisit
+}
+
+func (d *initDeps) inspect(n ir.Node) { ir.Visit(n, d.cachedVisit()) }
+func (d *initDeps) inspectList(l ir.Nodes) { ir.VisitList(l, d.cachedVisit()) }
+
+// visit calls foundDep on any package-level functions or variables
+// referenced by n, if any.
+func (d *initDeps) visit(n ir.Node) {
+ switch n.Op() {
+ case ir.ONAME:
+ n := n.(*ir.Name)
+ switch n.Class {
+ case ir.PEXTERN, ir.PFUNC:
+ d.foundDep(n)
+ }
+
+ case ir.OCLOSURE:
+ n := n.(*ir.ClosureExpr)
+ d.inspectList(n.Func.Body)
+
+ case ir.ODOTMETH, ir.OMETHVALUE, ir.OMETHEXPR:
+ d.foundDep(ir.MethodExprName(n))
+ }
+}
+
+// foundDep records that we've found a dependency on n by adding it to
+// seen.
+func (d *initDeps) foundDep(n *ir.Name) {
+ // Can happen with method expressions involving interface
+ // types; e.g., fixedbugs/issue4495.go.
+ if n == nil {
+ return
+ }
+
+ // Names without definitions aren't interesting as far as
+ // initialization ordering goes.
+ if n.Defn == nil {
+ return
+ }
+
+ if d.seen.Has(n) {
+ return
+ }
+ d.seen.Add(n)
+ if d.transitive && n.Class == ir.PFUNC {
+ d.inspectList(n.Defn.(*ir.Func).Body)
+ }
+}
+
+// declOrder implements heap.Interface, ordering assignment statements
+// by the position of their first LHS expression.
+//
+// N.B., the Pos of the first LHS expression is used because because
+// an OAS node's Pos may not be unique. For example, given the
+// declaration "var a, b = f(), g()", "a" must be ordered before "b",
+// but both OAS nodes use the "=" token's position as their Pos.
+type declOrder []ir.Node
+
+func (s declOrder) Len() int { return len(s) }
+func (s declOrder) Less(i, j int) bool {
+ return firstLHS(s[i]).Pos().Before(firstLHS(s[j]).Pos())
+}
+func (s declOrder) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+
+func (s *declOrder) Push(x interface{}) { *s = append(*s, x.(ir.Node)) }
+func (s *declOrder) Pop() interface{} {
+ n := (*s)[len(*s)-1]
+ *s = (*s)[:len(*s)-1]
+ return n
+}
+
+// firstLHS returns the first expression on the left-hand side of
+// assignment n.
+func firstLHS(n ir.Node) *ir.Name {
+ switch n.Op() {
+ case ir.OAS:
+ n := n.(*ir.AssignStmt)
+ return n.X.Name()
+ case ir.OAS2DOTTYPE, ir.OAS2FUNC, ir.OAS2RECV, ir.OAS2MAPR:
+ n := n.(*ir.AssignListStmt)
+ return n.Lhs[0].Name()
+ }
+
+ base.Fatalf("unexpected Op: %v", n.Op())
+ return nil
+}
diff --git a/src/cmd/compile/internal/ppc64/galign.go b/src/cmd/compile/internal/ppc64/galign.go
new file mode 100644
index 0000000..20fd8ce
--- /dev/null
+++ b/src/cmd/compile/internal/ppc64/galign.go
@@ -0,0 +1,29 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ppc64
+
+import (
+ "cmd/compile/internal/ssagen"
+ "cmd/internal/obj/ppc64"
+ "internal/buildcfg"
+)
+
+func Init(arch *ssagen.ArchInfo) {
+ arch.LinkArch = &ppc64.Linkppc64
+ if buildcfg.GOARCH == "ppc64le" {
+ arch.LinkArch = &ppc64.Linkppc64le
+ }
+ arch.REGSP = ppc64.REGSP
+ arch.MAXWIDTH = 1 << 50
+
+ arch.ZeroRange = zerorange
+ arch.Ginsnop = ginsnop
+
+ arch.SSAMarkMoves = ssaMarkMoves
+ arch.SSAGenValue = ssaGenValue
+ arch.SSAGenBlock = ssaGenBlock
+ arch.LoadRegResult = loadRegResult
+ arch.SpillArgReg = spillArgReg
+}
diff --git a/src/cmd/compile/internal/ppc64/ggen.go b/src/cmd/compile/internal/ppc64/ggen.go
new file mode 100644
index 0000000..3ae6422
--- /dev/null
+++ b/src/cmd/compile/internal/ppc64/ggen.go
@@ -0,0 +1,55 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ppc64
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/objw"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/obj/ppc64"
+)
+
+func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
+ if cnt == 0 {
+ return p
+ }
+ if cnt < int64(4*types.PtrSize) {
+ for i := int64(0); i < cnt; i += int64(types.PtrSize) {
+ p = pp.Append(p, ppc64.AMOVD, obj.TYPE_REG, ppc64.REGZERO, 0, obj.TYPE_MEM, ppc64.REGSP, base.Ctxt.FixedFrameSize()+off+i)
+ }
+ } else if cnt <= int64(128*types.PtrSize) {
+ p = pp.Append(p, ppc64.AADD, obj.TYPE_CONST, 0, base.Ctxt.FixedFrameSize()+off-8, obj.TYPE_REG, ppc64.REGRT1, 0)
+ p.Reg = ppc64.REGSP
+ p = pp.Append(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = ir.Syms.Duffzero
+ p.To.Offset = 4 * (128 - cnt/int64(types.PtrSize))
+ } else {
+ p = pp.Append(p, ppc64.AMOVD, obj.TYPE_CONST, 0, base.Ctxt.FixedFrameSize()+off-8, obj.TYPE_REG, ppc64.REGTMP, 0)
+ p = pp.Append(p, ppc64.AADD, obj.TYPE_REG, ppc64.REGTMP, 0, obj.TYPE_REG, ppc64.REGRT1, 0)
+ p.Reg = ppc64.REGSP
+ p = pp.Append(p, ppc64.AMOVD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, ppc64.REGTMP, 0)
+ p = pp.Append(p, ppc64.AADD, obj.TYPE_REG, ppc64.REGTMP, 0, obj.TYPE_REG, ppc64.REGRT2, 0)
+ p.Reg = ppc64.REGRT1
+ p = pp.Append(p, ppc64.AMOVDU, obj.TYPE_REG, ppc64.REGZERO, 0, obj.TYPE_MEM, ppc64.REGRT1, int64(types.PtrSize))
+ p1 := p
+ p = pp.Append(p, ppc64.ACMP, obj.TYPE_REG, ppc64.REGRT1, 0, obj.TYPE_REG, ppc64.REGRT2, 0)
+ p = pp.Append(p, ppc64.ABNE, obj.TYPE_NONE, 0, 0, obj.TYPE_BRANCH, 0, 0)
+ p.To.SetTarget(p1)
+ }
+
+ return p
+}
+
+func ginsnop(pp *objw.Progs) *obj.Prog {
+ p := pp.Prog(ppc64.AOR)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REG_R0
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REG_R0
+ return p
+}
diff --git a/src/cmd/compile/internal/ppc64/opt.go b/src/cmd/compile/internal/ppc64/opt.go
new file mode 100644
index 0000000..4f81aa9
--- /dev/null
+++ b/src/cmd/compile/internal/ppc64/opt.go
@@ -0,0 +1,12 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ppc64
+
+// Many Power ISA arithmetic and logical instructions come in four
+// standard variants. These bits let us map between variants.
+const (
+ V_CC = 1 << 0 // xCC (affect CR field 0 flags)
+ V_V = 1 << 1 // xV (affect SO and OV flags)
+)
diff --git a/src/cmd/compile/internal/ppc64/ssa.go b/src/cmd/compile/internal/ppc64/ssa.go
new file mode 100644
index 0000000..98316c1
--- /dev/null
+++ b/src/cmd/compile/internal/ppc64/ssa.go
@@ -0,0 +1,2068 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ppc64
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/logopt"
+ "cmd/compile/internal/objw"
+ "cmd/compile/internal/ssa"
+ "cmd/compile/internal/ssagen"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/obj/ppc64"
+ "internal/buildcfg"
+ "math"
+ "strings"
+)
+
+// markMoves marks any MOVXconst ops that need to avoid clobbering flags.
+func ssaMarkMoves(s *ssagen.State, b *ssa.Block) {
+ // flive := b.FlagsLiveAtEnd
+ // if b.Control != nil && b.Control.Type.IsFlags() {
+ // flive = true
+ // }
+ // for i := len(b.Values) - 1; i >= 0; i-- {
+ // v := b.Values[i]
+ // if flive && (v.Op == v.Op == ssa.OpPPC64MOVDconst) {
+ // // The "mark" is any non-nil Aux value.
+ // v.Aux = v
+ // }
+ // if v.Type.IsFlags() {
+ // flive = false
+ // }
+ // for _, a := range v.Args {
+ // if a.Type.IsFlags() {
+ // flive = true
+ // }
+ // }
+ // }
+}
+
+// loadByType returns the load instruction of the given type.
+func loadByType(t *types.Type) obj.As {
+ if t.IsFloat() {
+ switch t.Size() {
+ case 4:
+ return ppc64.AFMOVS
+ case 8:
+ return ppc64.AFMOVD
+ }
+ } else {
+ switch t.Size() {
+ case 1:
+ if t.IsSigned() {
+ return ppc64.AMOVB
+ } else {
+ return ppc64.AMOVBZ
+ }
+ case 2:
+ if t.IsSigned() {
+ return ppc64.AMOVH
+ } else {
+ return ppc64.AMOVHZ
+ }
+ case 4:
+ if t.IsSigned() {
+ return ppc64.AMOVW
+ } else {
+ return ppc64.AMOVWZ
+ }
+ case 8:
+ return ppc64.AMOVD
+ }
+ }
+ panic("bad load type")
+}
+
+// storeByType returns the store instruction of the given type.
+func storeByType(t *types.Type) obj.As {
+ if t.IsFloat() {
+ switch t.Size() {
+ case 4:
+ return ppc64.AFMOVS
+ case 8:
+ return ppc64.AFMOVD
+ }
+ } else {
+ switch t.Size() {
+ case 1:
+ return ppc64.AMOVB
+ case 2:
+ return ppc64.AMOVH
+ case 4:
+ return ppc64.AMOVW
+ case 8:
+ return ppc64.AMOVD
+ }
+ }
+ panic("bad store type")
+}
+
+func ssaGenValue(s *ssagen.State, v *ssa.Value) {
+ switch v.Op {
+ case ssa.OpCopy:
+ t := v.Type
+ if t.IsMemory() {
+ return
+ }
+ x := v.Args[0].Reg()
+ y := v.Reg()
+ if x != y {
+ rt := obj.TYPE_REG
+ op := ppc64.AMOVD
+
+ if t.IsFloat() {
+ op = ppc64.AFMOVD
+ }
+ p := s.Prog(op)
+ p.From.Type = rt
+ p.From.Reg = x
+ p.To.Type = rt
+ p.To.Reg = y
+ }
+
+ case ssa.OpPPC64LoweredMuluhilo:
+ // MULHDU Rarg1, Rarg0, Reg0
+ // MULLD Rarg1, Rarg0, Reg1
+ r0 := v.Args[0].Reg()
+ r1 := v.Args[1].Reg()
+ p := s.Prog(ppc64.AMULHDU)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r1
+ p.Reg = r0
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg0()
+ p1 := s.Prog(ppc64.AMULLD)
+ p1.From.Type = obj.TYPE_REG
+ p1.From.Reg = r1
+ p1.Reg = r0
+ p1.To.Type = obj.TYPE_REG
+ p1.To.Reg = v.Reg1()
+
+ case ssa.OpPPC64LoweredAdd64Carry:
+ // ADDC Rarg2, -1, Rtmp
+ // ADDE Rarg1, Rarg0, Reg0
+ // ADDZE Rzero, Reg1
+ r0 := v.Args[0].Reg()
+ r1 := v.Args[1].Reg()
+ r2 := v.Args[2].Reg()
+ p := s.Prog(ppc64.AADDC)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = -1
+ p.Reg = r2
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REGTMP
+ p1 := s.Prog(ppc64.AADDE)
+ p1.From.Type = obj.TYPE_REG
+ p1.From.Reg = r1
+ p1.Reg = r0
+ p1.To.Type = obj.TYPE_REG
+ p1.To.Reg = v.Reg0()
+ p2 := s.Prog(ppc64.AADDZE)
+ p2.From.Type = obj.TYPE_REG
+ p2.From.Reg = ppc64.REGZERO
+ p2.To.Type = obj.TYPE_REG
+ p2.To.Reg = v.Reg1()
+
+ case ssa.OpPPC64LoweredAtomicAnd8,
+ ssa.OpPPC64LoweredAtomicAnd32,
+ ssa.OpPPC64LoweredAtomicOr8,
+ ssa.OpPPC64LoweredAtomicOr32:
+ // LWSYNC
+ // LBAR/LWAR (Rarg0), Rtmp
+ // AND/OR Rarg1, Rtmp
+ // STBCCC/STWCCC Rtmp, (Rarg0)
+ // BNE -3(PC)
+ ld := ppc64.ALBAR
+ st := ppc64.ASTBCCC
+ if v.Op == ssa.OpPPC64LoweredAtomicAnd32 || v.Op == ssa.OpPPC64LoweredAtomicOr32 {
+ ld = ppc64.ALWAR
+ st = ppc64.ASTWCCC
+ }
+ r0 := v.Args[0].Reg()
+ r1 := v.Args[1].Reg()
+ // LWSYNC - Assuming shared data not write-through-required nor
+ // caching-inhibited. See Appendix B.2.2.2 in the ISA 2.07b.
+ plwsync := s.Prog(ppc64.ALWSYNC)
+ plwsync.To.Type = obj.TYPE_NONE
+ // LBAR or LWAR
+ p := s.Prog(ld)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = r0
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REGTMP
+ // AND/OR reg1,out
+ p1 := s.Prog(v.Op.Asm())
+ p1.From.Type = obj.TYPE_REG
+ p1.From.Reg = r1
+ p1.To.Type = obj.TYPE_REG
+ p1.To.Reg = ppc64.REGTMP
+ // STBCCC or STWCCC
+ p2 := s.Prog(st)
+ p2.From.Type = obj.TYPE_REG
+ p2.From.Reg = ppc64.REGTMP
+ p2.To.Type = obj.TYPE_MEM
+ p2.To.Reg = r0
+ p2.RegTo2 = ppc64.REGTMP
+ // BNE retry
+ p3 := s.Prog(ppc64.ABNE)
+ p3.To.Type = obj.TYPE_BRANCH
+ p3.To.SetTarget(p)
+
+ case ssa.OpPPC64LoweredAtomicAdd32,
+ ssa.OpPPC64LoweredAtomicAdd64:
+ // LWSYNC
+ // LDAR/LWAR (Rarg0), Rout
+ // ADD Rarg1, Rout
+ // STDCCC/STWCCC Rout, (Rarg0)
+ // BNE -3(PC)
+ // MOVW Rout,Rout (if Add32)
+ ld := ppc64.ALDAR
+ st := ppc64.ASTDCCC
+ if v.Op == ssa.OpPPC64LoweredAtomicAdd32 {
+ ld = ppc64.ALWAR
+ st = ppc64.ASTWCCC
+ }
+ r0 := v.Args[0].Reg()
+ r1 := v.Args[1].Reg()
+ out := v.Reg0()
+ // LWSYNC - Assuming shared data not write-through-required nor
+ // caching-inhibited. See Appendix B.2.2.2 in the ISA 2.07b.
+ plwsync := s.Prog(ppc64.ALWSYNC)
+ plwsync.To.Type = obj.TYPE_NONE
+ // LDAR or LWAR
+ p := s.Prog(ld)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = r0
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = out
+ // ADD reg1,out
+ p1 := s.Prog(ppc64.AADD)
+ p1.From.Type = obj.TYPE_REG
+ p1.From.Reg = r1
+ p1.To.Reg = out
+ p1.To.Type = obj.TYPE_REG
+ // STDCCC or STWCCC
+ p3 := s.Prog(st)
+ p3.From.Type = obj.TYPE_REG
+ p3.From.Reg = out
+ p3.To.Type = obj.TYPE_MEM
+ p3.To.Reg = r0
+ // BNE retry
+ p4 := s.Prog(ppc64.ABNE)
+ p4.To.Type = obj.TYPE_BRANCH
+ p4.To.SetTarget(p)
+
+ // Ensure a 32 bit result
+ if v.Op == ssa.OpPPC64LoweredAtomicAdd32 {
+ p5 := s.Prog(ppc64.AMOVWZ)
+ p5.To.Type = obj.TYPE_REG
+ p5.To.Reg = out
+ p5.From.Type = obj.TYPE_REG
+ p5.From.Reg = out
+ }
+
+ case ssa.OpPPC64LoweredAtomicExchange32,
+ ssa.OpPPC64LoweredAtomicExchange64:
+ // LWSYNC
+ // LDAR/LWAR (Rarg0), Rout
+ // STDCCC/STWCCC Rout, (Rarg0)
+ // BNE -2(PC)
+ // ISYNC
+ ld := ppc64.ALDAR
+ st := ppc64.ASTDCCC
+ if v.Op == ssa.OpPPC64LoweredAtomicExchange32 {
+ ld = ppc64.ALWAR
+ st = ppc64.ASTWCCC
+ }
+ r0 := v.Args[0].Reg()
+ r1 := v.Args[1].Reg()
+ out := v.Reg0()
+ // LWSYNC - Assuming shared data not write-through-required nor
+ // caching-inhibited. See Appendix B.2.2.2 in the ISA 2.07b.
+ plwsync := s.Prog(ppc64.ALWSYNC)
+ plwsync.To.Type = obj.TYPE_NONE
+ // LDAR or LWAR
+ p := s.Prog(ld)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = r0
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = out
+ // STDCCC or STWCCC
+ p1 := s.Prog(st)
+ p1.From.Type = obj.TYPE_REG
+ p1.From.Reg = r1
+ p1.To.Type = obj.TYPE_MEM
+ p1.To.Reg = r0
+ // BNE retry
+ p2 := s.Prog(ppc64.ABNE)
+ p2.To.Type = obj.TYPE_BRANCH
+ p2.To.SetTarget(p)
+ // ISYNC
+ pisync := s.Prog(ppc64.AISYNC)
+ pisync.To.Type = obj.TYPE_NONE
+
+ case ssa.OpPPC64LoweredAtomicLoad8,
+ ssa.OpPPC64LoweredAtomicLoad32,
+ ssa.OpPPC64LoweredAtomicLoad64,
+ ssa.OpPPC64LoweredAtomicLoadPtr:
+ // SYNC
+ // MOVB/MOVD/MOVW (Rarg0), Rout
+ // CMP Rout,Rout
+ // BNE 1(PC)
+ // ISYNC
+ ld := ppc64.AMOVD
+ cmp := ppc64.ACMP
+ switch v.Op {
+ case ssa.OpPPC64LoweredAtomicLoad8:
+ ld = ppc64.AMOVBZ
+ case ssa.OpPPC64LoweredAtomicLoad32:
+ ld = ppc64.AMOVWZ
+ cmp = ppc64.ACMPW
+ }
+ arg0 := v.Args[0].Reg()
+ out := v.Reg0()
+ // SYNC when AuxInt == 1; otherwise, load-acquire
+ if v.AuxInt == 1 {
+ psync := s.Prog(ppc64.ASYNC)
+ psync.To.Type = obj.TYPE_NONE
+ }
+ // Load
+ p := s.Prog(ld)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = arg0
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = out
+ // CMP
+ p1 := s.Prog(cmp)
+ p1.From.Type = obj.TYPE_REG
+ p1.From.Reg = out
+ p1.To.Type = obj.TYPE_REG
+ p1.To.Reg = out
+ // BNE
+ p2 := s.Prog(ppc64.ABNE)
+ p2.To.Type = obj.TYPE_BRANCH
+ // ISYNC
+ pisync := s.Prog(ppc64.AISYNC)
+ pisync.To.Type = obj.TYPE_NONE
+ p2.To.SetTarget(pisync)
+
+ case ssa.OpPPC64LoweredAtomicStore8,
+ ssa.OpPPC64LoweredAtomicStore32,
+ ssa.OpPPC64LoweredAtomicStore64:
+ // SYNC or LWSYNC
+ // MOVB/MOVW/MOVD arg1,(arg0)
+ st := ppc64.AMOVD
+ switch v.Op {
+ case ssa.OpPPC64LoweredAtomicStore8:
+ st = ppc64.AMOVB
+ case ssa.OpPPC64LoweredAtomicStore32:
+ st = ppc64.AMOVW
+ }
+ arg0 := v.Args[0].Reg()
+ arg1 := v.Args[1].Reg()
+ // If AuxInt == 0, LWSYNC (Store-Release), else SYNC
+ // SYNC
+ syncOp := ppc64.ASYNC
+ if v.AuxInt == 0 {
+ syncOp = ppc64.ALWSYNC
+ }
+ psync := s.Prog(syncOp)
+ psync.To.Type = obj.TYPE_NONE
+ // Store
+ p := s.Prog(st)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = arg0
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = arg1
+
+ case ssa.OpPPC64LoweredAtomicCas64,
+ ssa.OpPPC64LoweredAtomicCas32:
+ // LWSYNC
+ // loop:
+ // LDAR (Rarg0), MutexHint, Rtmp
+ // CMP Rarg1, Rtmp
+ // BNE fail
+ // STDCCC Rarg2, (Rarg0)
+ // BNE loop
+ // LWSYNC // Only for sequential consistency; not required in CasRel.
+ // MOVD $1, Rout
+ // BR end
+ // fail:
+ // MOVD $0, Rout
+ // end:
+ ld := ppc64.ALDAR
+ st := ppc64.ASTDCCC
+ cmp := ppc64.ACMP
+ if v.Op == ssa.OpPPC64LoweredAtomicCas32 {
+ ld = ppc64.ALWAR
+ st = ppc64.ASTWCCC
+ cmp = ppc64.ACMPW
+ }
+ r0 := v.Args[0].Reg()
+ r1 := v.Args[1].Reg()
+ r2 := v.Args[2].Reg()
+ out := v.Reg0()
+ // LWSYNC - Assuming shared data not write-through-required nor
+ // caching-inhibited. See Appendix B.2.2.2 in the ISA 2.07b.
+ plwsync1 := s.Prog(ppc64.ALWSYNC)
+ plwsync1.To.Type = obj.TYPE_NONE
+ // LDAR or LWAR
+ p := s.Prog(ld)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = r0
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REGTMP
+ // If it is a Compare-and-Swap-Release operation, set the EH field with
+ // the release hint.
+ if v.AuxInt == 0 {
+ p.SetFrom3Const(0)
+ }
+ // CMP reg1,reg2
+ p1 := s.Prog(cmp)
+ p1.From.Type = obj.TYPE_REG
+ p1.From.Reg = r1
+ p1.To.Reg = ppc64.REGTMP
+ p1.To.Type = obj.TYPE_REG
+ // BNE cas_fail
+ p2 := s.Prog(ppc64.ABNE)
+ p2.To.Type = obj.TYPE_BRANCH
+ // STDCCC or STWCCC
+ p3 := s.Prog(st)
+ p3.From.Type = obj.TYPE_REG
+ p3.From.Reg = r2
+ p3.To.Type = obj.TYPE_MEM
+ p3.To.Reg = r0
+ // BNE retry
+ p4 := s.Prog(ppc64.ABNE)
+ p4.To.Type = obj.TYPE_BRANCH
+ p4.To.SetTarget(p)
+ // LWSYNC - Assuming shared data not write-through-required nor
+ // caching-inhibited. See Appendix B.2.1.1 in the ISA 2.07b.
+ // If the operation is a CAS-Release, then synchronization is not necessary.
+ if v.AuxInt != 0 {
+ plwsync2 := s.Prog(ppc64.ALWSYNC)
+ plwsync2.To.Type = obj.TYPE_NONE
+ }
+ // return true
+ p5 := s.Prog(ppc64.AMOVD)
+ p5.From.Type = obj.TYPE_CONST
+ p5.From.Offset = 1
+ p5.To.Type = obj.TYPE_REG
+ p5.To.Reg = out
+ // BR done
+ p6 := s.Prog(obj.AJMP)
+ p6.To.Type = obj.TYPE_BRANCH
+ // return false
+ p7 := s.Prog(ppc64.AMOVD)
+ p7.From.Type = obj.TYPE_CONST
+ p7.From.Offset = 0
+ p7.To.Type = obj.TYPE_REG
+ p7.To.Reg = out
+ p2.To.SetTarget(p7)
+ // done (label)
+ p8 := s.Prog(obj.ANOP)
+ p6.To.SetTarget(p8)
+
+ case ssa.OpPPC64LoweredGetClosurePtr:
+ // Closure pointer is R11 (already)
+ ssagen.CheckLoweredGetClosurePtr(v)
+
+ case ssa.OpPPC64LoweredGetCallerSP:
+ // caller's SP is FixedFrameSize below the address of the first arg
+ p := s.Prog(ppc64.AMOVD)
+ p.From.Type = obj.TYPE_ADDR
+ p.From.Offset = -base.Ctxt.FixedFrameSize()
+ p.From.Name = obj.NAME_PARAM
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+
+ case ssa.OpPPC64LoweredGetCallerPC:
+ p := s.Prog(obj.AGETCALLERPC)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+
+ case ssa.OpPPC64LoweredRound32F, ssa.OpPPC64LoweredRound64F:
+ // input is already rounded
+
+ case ssa.OpLoadReg:
+ loadOp := loadByType(v.Type)
+ p := s.Prog(loadOp)
+ ssagen.AddrAuto(&p.From, v.Args[0])
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+
+ case ssa.OpStoreReg:
+ storeOp := storeByType(v.Type)
+ p := s.Prog(storeOp)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ ssagen.AddrAuto(&p.To, v)
+
+ case ssa.OpArgIntReg, ssa.OpArgFloatReg:
+ // The assembler needs to wrap the entry safepoint/stack growth code with spill/unspill
+ // The loop only runs once.
+ for _, a := range v.Block.Func.RegArgs {
+ // Pass the spill/unspill information along to the assembler, offset by size of
+ // the saved LR slot.
+ addr := ssagen.SpillSlotAddr(a, ppc64.REGSP, base.Ctxt.FixedFrameSize())
+ s.FuncInfo().AddSpill(
+ obj.RegSpill{Reg: a.Reg, Addr: addr, Unspill: loadByType(a.Type), Spill: storeByType(a.Type)})
+ }
+ v.Block.Func.RegArgs = nil
+
+ ssagen.CheckArgReg(v)
+
+ case ssa.OpPPC64DIVD:
+ // For now,
+ //
+ // cmp arg1, -1
+ // be ahead
+ // v = arg0 / arg1
+ // b over
+ // ahead: v = - arg0
+ // over: nop
+ r := v.Reg()
+ r0 := v.Args[0].Reg()
+ r1 := v.Args[1].Reg()
+
+ p := s.Prog(ppc64.ACMP)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r1
+ p.To.Type = obj.TYPE_CONST
+ p.To.Offset = -1
+
+ pbahead := s.Prog(ppc64.ABEQ)
+ pbahead.To.Type = obj.TYPE_BRANCH
+
+ p = s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r1
+ p.Reg = r0
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+
+ pbover := s.Prog(obj.AJMP)
+ pbover.To.Type = obj.TYPE_BRANCH
+
+ p = s.Prog(ppc64.ANEG)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r0
+ pbahead.To.SetTarget(p)
+
+ p = s.Prog(obj.ANOP)
+ pbover.To.SetTarget(p)
+
+ case ssa.OpPPC64DIVW:
+ // word-width version of above
+ r := v.Reg()
+ r0 := v.Args[0].Reg()
+ r1 := v.Args[1].Reg()
+
+ p := s.Prog(ppc64.ACMPW)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r1
+ p.To.Type = obj.TYPE_CONST
+ p.To.Offset = -1
+
+ pbahead := s.Prog(ppc64.ABEQ)
+ pbahead.To.Type = obj.TYPE_BRANCH
+
+ p = s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r1
+ p.Reg = r0
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+
+ pbover := s.Prog(obj.AJMP)
+ pbover.To.Type = obj.TYPE_BRANCH
+
+ p = s.Prog(ppc64.ANEG)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r0
+ pbahead.To.SetTarget(p)
+
+ p = s.Prog(obj.ANOP)
+ pbover.To.SetTarget(p)
+
+ case ssa.OpPPC64CLRLSLWI:
+ r := v.Reg()
+ r1 := v.Args[0].Reg()
+ shifts := v.AuxInt
+ p := s.Prog(v.Op.Asm())
+ // clrlslwi ra,rs,mb,sh will become rlwinm ra,rs,sh,mb-sh,31-sh as described in ISA
+ p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: ssa.GetPPC64Shiftmb(shifts)}
+ p.SetFrom3Const(ssa.GetPPC64Shiftsh(shifts))
+ p.Reg = r1
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+
+ case ssa.OpPPC64CLRLSLDI:
+ r := v.Reg()
+ r1 := v.Args[0].Reg()
+ shifts := v.AuxInt
+ p := s.Prog(v.Op.Asm())
+ // clrlsldi ra,rs,mb,sh will become rldic ra,rs,sh,mb-sh
+ p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: ssa.GetPPC64Shiftmb(shifts)}
+ p.SetFrom3Const(ssa.GetPPC64Shiftsh(shifts))
+ p.Reg = r1
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+
+ // Mask has been set as sh
+ case ssa.OpPPC64RLDICL:
+ r := v.Reg()
+ r1 := v.Args[0].Reg()
+ shifts := v.AuxInt
+ p := s.Prog(v.Op.Asm())
+ p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: ssa.GetPPC64Shiftsh(shifts)}
+ p.SetFrom3Const(ssa.GetPPC64Shiftmb(shifts))
+ p.Reg = r1
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+
+ case ssa.OpPPC64ADD, ssa.OpPPC64FADD, ssa.OpPPC64FADDS, ssa.OpPPC64SUB, ssa.OpPPC64FSUB, ssa.OpPPC64FSUBS,
+ ssa.OpPPC64MULLD, ssa.OpPPC64MULLW, ssa.OpPPC64DIVDU, ssa.OpPPC64DIVWU,
+ ssa.OpPPC64SRAD, ssa.OpPPC64SRAW, ssa.OpPPC64SRD, ssa.OpPPC64SRW, ssa.OpPPC64SLD, ssa.OpPPC64SLW,
+ ssa.OpPPC64ROTL, ssa.OpPPC64ROTLW,
+ ssa.OpPPC64MULHD, ssa.OpPPC64MULHW, ssa.OpPPC64MULHDU, ssa.OpPPC64MULHWU,
+ ssa.OpPPC64FMUL, ssa.OpPPC64FMULS, ssa.OpPPC64FDIV, ssa.OpPPC64FDIVS, ssa.OpPPC64FCPSGN,
+ ssa.OpPPC64AND, ssa.OpPPC64OR, ssa.OpPPC64ANDN, ssa.OpPPC64ORN, ssa.OpPPC64NOR, ssa.OpPPC64XOR, ssa.OpPPC64EQV,
+ ssa.OpPPC64MODUD, ssa.OpPPC64MODSD, ssa.OpPPC64MODUW, ssa.OpPPC64MODSW:
+ r := v.Reg()
+ r1 := v.Args[0].Reg()
+ r2 := v.Args[1].Reg()
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r2
+ p.Reg = r1
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+
+ case ssa.OpPPC64ANDCC, ssa.OpPPC64ORCC, ssa.OpPPC64XORCC:
+ r1 := v.Args[0].Reg()
+ r2 := v.Args[1].Reg()
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r2
+ p.Reg = r1
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REGTMP // result is not needed
+
+ case ssa.OpPPC64ROTLconst, ssa.OpPPC64ROTLWconst:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+
+ // Auxint holds encoded rotate + mask
+ case ssa.OpPPC64RLWINM, ssa.OpPPC64RLWMI:
+ rot, mb, me, _ := ssa.DecodePPC64RotateMask(v.AuxInt)
+ p := s.Prog(v.Op.Asm())
+ p.To = obj.Addr{Type: obj.TYPE_REG, Reg: v.Reg()}
+ p.Reg = v.Args[0].Reg()
+ p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: int64(rot)}
+ p.SetRestArgs([]obj.Addr{{Type: obj.TYPE_CONST, Offset: mb}, {Type: obj.TYPE_CONST, Offset: me}})
+
+ // Auxint holds mask
+ case ssa.OpPPC64RLWNM:
+ _, mb, me, _ := ssa.DecodePPC64RotateMask(v.AuxInt)
+ p := s.Prog(v.Op.Asm())
+ p.To = obj.Addr{Type: obj.TYPE_REG, Reg: v.Reg()}
+ p.Reg = v.Args[0].Reg()
+ p.From = obj.Addr{Type: obj.TYPE_REG, Reg: v.Args[1].Reg()}
+ p.SetRestArgs([]obj.Addr{{Type: obj.TYPE_CONST, Offset: mb}, {Type: obj.TYPE_CONST, Offset: me}})
+
+ case ssa.OpPPC64MADDLD:
+ r := v.Reg()
+ r1 := v.Args[0].Reg()
+ r2 := v.Args[1].Reg()
+ r3 := v.Args[2].Reg()
+ // r = r1*r2 ± r3
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r1
+ p.Reg = r2
+ p.SetFrom3Reg(r3)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+
+ case ssa.OpPPC64FMADD, ssa.OpPPC64FMADDS, ssa.OpPPC64FMSUB, ssa.OpPPC64FMSUBS:
+ r := v.Reg()
+ r1 := v.Args[0].Reg()
+ r2 := v.Args[1].Reg()
+ r3 := v.Args[2].Reg()
+ // r = r1*r2 ± r3
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r1
+ p.Reg = r3
+ p.SetFrom3Reg(r2)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+
+ case ssa.OpPPC64NEG, ssa.OpPPC64FNEG, ssa.OpPPC64FSQRT, ssa.OpPPC64FSQRTS, ssa.OpPPC64FFLOOR, ssa.OpPPC64FTRUNC, ssa.OpPPC64FCEIL,
+ ssa.OpPPC64FCTIDZ, ssa.OpPPC64FCTIWZ, ssa.OpPPC64FCFID, ssa.OpPPC64FCFIDS, ssa.OpPPC64FRSP, ssa.OpPPC64CNTLZD, ssa.OpPPC64CNTLZW,
+ ssa.OpPPC64POPCNTD, ssa.OpPPC64POPCNTW, ssa.OpPPC64POPCNTB, ssa.OpPPC64MFVSRD, ssa.OpPPC64MTVSRD, ssa.OpPPC64FABS, ssa.OpPPC64FNABS,
+ ssa.OpPPC64FROUND, ssa.OpPPC64CNTTZW, ssa.OpPPC64CNTTZD:
+ r := v.Reg()
+ p := s.Prog(v.Op.Asm())
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+
+ case ssa.OpPPC64ADDconst, ssa.OpPPC64ANDconst, ssa.OpPPC64ORconst, ssa.OpPPC64XORconst,
+ ssa.OpPPC64SRADconst, ssa.OpPPC64SRAWconst, ssa.OpPPC64SRDconst, ssa.OpPPC64SRWconst,
+ ssa.OpPPC64SLDconst, ssa.OpPPC64SLWconst, ssa.OpPPC64EXTSWSLconst, ssa.OpPPC64MULLWconst, ssa.OpPPC64MULLDconst:
+ p := s.Prog(v.Op.Asm())
+ p.Reg = v.Args[0].Reg()
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+
+ case ssa.OpPPC64SUBFCconst:
+ p := s.Prog(v.Op.Asm())
+ p.SetFrom3Const(v.AuxInt)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+
+ case ssa.OpPPC64ANDCCconst:
+ p := s.Prog(v.Op.Asm())
+ p.Reg = v.Args[0].Reg()
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REGTMP // discard result
+
+ case ssa.OpPPC64MOVDaddr:
+ switch v.Aux.(type) {
+ default:
+ v.Fatalf("aux in MOVDaddr is of unknown type %T", v.Aux)
+ case nil:
+ // If aux offset and aux int are both 0, and the same
+ // input and output regs are used, no instruction
+ // needs to be generated, since it would just be
+ // addi rx, rx, 0.
+ if v.AuxInt != 0 || v.Args[0].Reg() != v.Reg() {
+ p := s.Prog(ppc64.AMOVD)
+ p.From.Type = obj.TYPE_ADDR
+ p.From.Reg = v.Args[0].Reg()
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ }
+
+ case *obj.LSym, ir.Node:
+ p := s.Prog(ppc64.AMOVD)
+ p.From.Type = obj.TYPE_ADDR
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ ssagen.AddAux(&p.From, v)
+
+ }
+
+ case ssa.OpPPC64MOVDconst:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+
+ case ssa.OpPPC64FMOVDconst, ssa.OpPPC64FMOVSconst:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_FCONST
+ p.From.Val = math.Float64frombits(uint64(v.AuxInt))
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+
+ case ssa.OpPPC64FCMPU, ssa.OpPPC64CMP, ssa.OpPPC64CMPW, ssa.OpPPC64CMPU, ssa.OpPPC64CMPWU:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Args[1].Reg()
+
+ case ssa.OpPPC64CMPconst, ssa.OpPPC64CMPUconst, ssa.OpPPC64CMPWconst, ssa.OpPPC64CMPWUconst:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_CONST
+ p.To.Offset = v.AuxInt
+
+ case ssa.OpPPC64MOVBreg, ssa.OpPPC64MOVBZreg, ssa.OpPPC64MOVHreg, ssa.OpPPC64MOVHZreg, ssa.OpPPC64MOVWreg, ssa.OpPPC64MOVWZreg:
+ // Shift in register to required size
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Reg = v.Reg()
+ p.To.Type = obj.TYPE_REG
+
+ case ssa.OpPPC64MOVDload, ssa.OpPPC64MOVWload:
+
+ // MOVDload and MOVWload are DS form instructions that are restricted to
+ // offsets that are a multiple of 4. If the offset is not a multple of 4,
+ // then the address of the symbol to be loaded is computed (base + offset)
+ // and used as the new base register and the offset field in the instruction
+ // can be set to zero.
+
+ // This same problem can happen with gostrings since the final offset is not
+ // known yet, but could be unaligned after the relocation is resolved.
+ // So gostrings are handled the same way.
+
+ // This allows the MOVDload and MOVWload to be generated in more cases and
+ // eliminates some offset and alignment checking in the rules file.
+
+ fromAddr := obj.Addr{Type: obj.TYPE_MEM, Reg: v.Args[0].Reg()}
+ ssagen.AddAux(&fromAddr, v)
+
+ genAddr := false
+
+ switch fromAddr.Name {
+ case obj.NAME_EXTERN, obj.NAME_STATIC:
+ // Special case for a rule combines the bytes of gostring.
+ // The v alignment might seem OK, but we don't want to load it
+ // using an offset because relocation comes later.
+ genAddr = strings.HasPrefix(fromAddr.Sym.Name, "go.string") || v.Type.Alignment()%4 != 0 || fromAddr.Offset%4 != 0
+ default:
+ genAddr = fromAddr.Offset%4 != 0
+ }
+ if genAddr {
+ // Load full address into the temp register.
+ p := s.Prog(ppc64.AMOVD)
+ p.From.Type = obj.TYPE_ADDR
+ p.From.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.From, v)
+ // Load target using temp as base register
+ // and offset zero. Setting NAME_NONE
+ // prevents any extra offsets from being
+ // added.
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REGTMP
+ fromAddr.Reg = ppc64.REGTMP
+ // Clear the offset field and other
+ // information that might be used
+ // by the assembler to add to the
+ // final offset value.
+ fromAddr.Offset = 0
+ fromAddr.Name = obj.NAME_NONE
+ fromAddr.Sym = nil
+ }
+ p := s.Prog(v.Op.Asm())
+ p.From = fromAddr
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ break
+
+ case ssa.OpPPC64MOVHload, ssa.OpPPC64MOVWZload, ssa.OpPPC64MOVBZload, ssa.OpPPC64MOVHZload, ssa.OpPPC64FMOVDload, ssa.OpPPC64FMOVSload:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+
+ case ssa.OpPPC64MOVDBRload, ssa.OpPPC64MOVWBRload, ssa.OpPPC64MOVHBRload:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+
+ case ssa.OpPPC64MOVDBRstore, ssa.OpPPC64MOVWBRstore, ssa.OpPPC64MOVHBRstore:
+ p := s.Prog(v.Op.Asm())
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+
+ case ssa.OpPPC64MOVDloadidx, ssa.OpPPC64MOVWloadidx, ssa.OpPPC64MOVHloadidx, ssa.OpPPC64MOVWZloadidx,
+ ssa.OpPPC64MOVBZloadidx, ssa.OpPPC64MOVHZloadidx, ssa.OpPPC64FMOVDloadidx, ssa.OpPPC64FMOVSloadidx,
+ ssa.OpPPC64MOVDBRloadidx, ssa.OpPPC64MOVWBRloadidx, ssa.OpPPC64MOVHBRloadidx:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ p.From.Index = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+
+ case ssa.OpPPC64DCBT:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_CONST
+ p.To.Offset = v.AuxInt
+
+ case ssa.OpPPC64MOVWstorezero, ssa.OpPPC64MOVHstorezero, ssa.OpPPC64MOVBstorezero:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REGZERO
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.To, v)
+
+ case ssa.OpPPC64MOVDstore, ssa.OpPPC64MOVDstorezero:
+
+ // MOVDstore and MOVDstorezero become DS form instructions that are restricted
+ // to offset values that are a multple of 4. If the offset field is not a
+ // multiple of 4, then the full address of the store target is computed (base +
+ // offset) and used as the new base register and the offset in the instruction
+ // is set to 0.
+
+ // This allows the MOVDstore and MOVDstorezero to be generated in more cases,
+ // and prevents checking of the offset value and alignment in the rules.
+
+ toAddr := obj.Addr{Type: obj.TYPE_MEM, Reg: v.Args[0].Reg()}
+ ssagen.AddAux(&toAddr, v)
+
+ if toAddr.Offset%4 != 0 {
+ p := s.Prog(ppc64.AMOVD)
+ p.From.Type = obj.TYPE_ADDR
+ p.From.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REGTMP
+ toAddr.Reg = ppc64.REGTMP
+ // Clear the offset field and other
+ // information that might be used
+ // by the assembler to add to the
+ // final offset value.
+ toAddr.Offset = 0
+ toAddr.Name = obj.NAME_NONE
+ toAddr.Sym = nil
+ }
+ p := s.Prog(v.Op.Asm())
+ p.To = toAddr
+ p.From.Type = obj.TYPE_REG
+ if v.Op == ssa.OpPPC64MOVDstorezero {
+ p.From.Reg = ppc64.REGZERO
+ } else {
+ p.From.Reg = v.Args[1].Reg()
+ }
+
+ case ssa.OpPPC64MOVWstore, ssa.OpPPC64MOVHstore, ssa.OpPPC64MOVBstore, ssa.OpPPC64FMOVDstore, ssa.OpPPC64FMOVSstore:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.To, v)
+
+ case ssa.OpPPC64MOVDstoreidx, ssa.OpPPC64MOVWstoreidx, ssa.OpPPC64MOVHstoreidx, ssa.OpPPC64MOVBstoreidx,
+ ssa.OpPPC64FMOVDstoreidx, ssa.OpPPC64FMOVSstoreidx, ssa.OpPPC64MOVDBRstoreidx, ssa.OpPPC64MOVWBRstoreidx,
+ ssa.OpPPC64MOVHBRstoreidx:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[2].Reg()
+ p.To.Index = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+
+ case ssa.OpPPC64ISEL, ssa.OpPPC64ISELB:
+ // ISEL, ISELB
+ // AuxInt value indicates condition: 0=LT 1=GT 2=EQ 4=GE 5=LE 6=NE
+ // ISEL only accepts 0, 1, 2 condition values but the others can be
+ // achieved by swapping operand order.
+ // arg0 ? arg1 : arg2 with conditions LT, GT, EQ
+ // arg0 ? arg2 : arg1 for conditions GE, LE, NE
+ // ISELB is used when a boolean result is needed, returning 0 or 1
+ p := s.Prog(ppc64.AISEL)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ // For ISELB, boolean result 0 or 1. Use R0 for 0 operand to avoid load.
+ r := obj.Addr{Type: obj.TYPE_REG, Reg: ppc64.REG_R0}
+ if v.Op == ssa.OpPPC64ISEL {
+ r.Reg = v.Args[1].Reg()
+ }
+ // AuxInt values 4,5,6 implemented with reverse operand order from 0,1,2
+ if v.AuxInt > 3 {
+ p.Reg = r.Reg
+ p.SetFrom3Reg(v.Args[0].Reg())
+ } else {
+ p.Reg = v.Args[0].Reg()
+ p.SetFrom3(r)
+ }
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt & 3
+
+ case ssa.OpPPC64LoweredQuadZero, ssa.OpPPC64LoweredQuadZeroShort:
+ // The LoweredQuad code generation
+ // generates STXV instructions on
+ // power9. The Short variation is used
+ // if no loop is generated.
+
+ // sizes >= 64 generate a loop as follows:
+
+ // Set up loop counter in CTR, used by BC
+ // XXLXOR clears VS32
+ // XXLXOR VS32,VS32,VS32
+ // MOVD len/64,REG_TMP
+ // MOVD REG_TMP,CTR
+ // loop:
+ // STXV VS32,0(R20)
+ // STXV VS32,16(R20)
+ // STXV VS32,32(R20)
+ // STXV VS32,48(R20)
+ // ADD $64,R20
+ // BC 16, 0, loop
+
+ // Bytes per iteration
+ ctr := v.AuxInt / 64
+
+ // Remainder bytes
+ rem := v.AuxInt % 64
+
+ // Only generate a loop if there is more
+ // than 1 iteration.
+ if ctr > 1 {
+ // Set up VS32 (V0) to hold 0s
+ p := s.Prog(ppc64.AXXLXOR)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REG_VS32
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REG_VS32
+ p.Reg = ppc64.REG_VS32
+
+ // Set up CTR loop counter
+ p = s.Prog(ppc64.AMOVD)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = ctr
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REGTMP
+
+ p = s.Prog(ppc64.AMOVD)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REGTMP
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REG_CTR
+
+ // Don't generate padding for
+ // loops with few iterations.
+ if ctr > 3 {
+ p = s.Prog(obj.APCALIGN)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = 16
+ }
+
+ // generate 4 STXVs to zero 64 bytes
+ var top *obj.Prog
+
+ p = s.Prog(ppc64.ASTXV)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REG_VS32
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+
+ // Save the top of loop
+ if top == nil {
+ top = p
+ }
+ p = s.Prog(ppc64.ASTXV)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REG_VS32
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ p.To.Offset = 16
+
+ p = s.Prog(ppc64.ASTXV)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REG_VS32
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ p.To.Offset = 32
+
+ p = s.Prog(ppc64.ASTXV)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REG_VS32
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ p.To.Offset = 48
+
+ // Increment address for the
+ // 64 bytes just zeroed.
+ p = s.Prog(ppc64.AADD)
+ p.Reg = v.Args[0].Reg()
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = 64
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Args[0].Reg()
+
+ // Branch back to top of loop
+ // based on CTR
+ // BC with BO_BCTR generates bdnz
+ p = s.Prog(ppc64.ABC)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = ppc64.BO_BCTR
+ p.Reg = ppc64.REG_R0
+ p.To.Type = obj.TYPE_BRANCH
+ p.To.SetTarget(top)
+ }
+ // When ctr == 1 the loop was not generated but
+ // there are at least 64 bytes to clear, so add
+ // that to the remainder to generate the code
+ // to clear those doublewords
+ if ctr == 1 {
+ rem += 64
+ }
+
+ // Clear the remainder starting at offset zero
+ offset := int64(0)
+
+ if rem >= 16 && ctr <= 1 {
+ // If the XXLXOR hasn't already been
+ // generated, do it here to initialize
+ // VS32 (V0) to 0.
+ p := s.Prog(ppc64.AXXLXOR)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REG_VS32
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REG_VS32
+ p.Reg = ppc64.REG_VS32
+ }
+ // Generate STXV for 32 or 64
+ // bytes.
+ for rem >= 32 {
+ p := s.Prog(ppc64.ASTXV)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REG_VS32
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ p.To.Offset = offset
+
+ p = s.Prog(ppc64.ASTXV)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REG_VS32
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ p.To.Offset = offset + 16
+ offset += 32
+ rem -= 32
+ }
+ // Generate 16 bytes
+ if rem >= 16 {
+ p := s.Prog(ppc64.ASTXV)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REG_VS32
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ p.To.Offset = offset
+ offset += 16
+ rem -= 16
+ }
+
+ // first clear as many doublewords as possible
+ // then clear remaining sizes as available
+ for rem > 0 {
+ op, size := ppc64.AMOVB, int64(1)
+ switch {
+ case rem >= 8:
+ op, size = ppc64.AMOVD, 8
+ case rem >= 4:
+ op, size = ppc64.AMOVW, 4
+ case rem >= 2:
+ op, size = ppc64.AMOVH, 2
+ }
+ p := s.Prog(op)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REG_R0
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ p.To.Offset = offset
+ rem -= size
+ offset += size
+ }
+
+ case ssa.OpPPC64LoweredZero, ssa.OpPPC64LoweredZeroShort:
+
+ // Unaligned data doesn't hurt performance
+ // for these instructions on power8.
+
+ // For sizes >= 64 generate a loop as follows:
+
+ // Set up loop counter in CTR, used by BC
+ // XXLXOR VS32,VS32,VS32
+ // MOVD len/32,REG_TMP
+ // MOVD REG_TMP,CTR
+ // MOVD $16,REG_TMP
+ // loop:
+ // STXVD2X VS32,(R0)(R20)
+ // STXVD2X VS32,(R31)(R20)
+ // ADD $32,R20
+ // BC 16, 0, loop
+ //
+ // any remainder is done as described below
+
+ // for sizes < 64 bytes, first clear as many doublewords as possible,
+ // then handle the remainder
+ // MOVD R0,(R20)
+ // MOVD R0,8(R20)
+ // .... etc.
+ //
+ // the remainder bytes are cleared using one or more
+ // of the following instructions with the appropriate
+ // offsets depending which instructions are needed
+ //
+ // MOVW R0,n1(R20) 4 bytes
+ // MOVH R0,n2(R20) 2 bytes
+ // MOVB R0,n3(R20) 1 byte
+ //
+ // 7 bytes: MOVW, MOVH, MOVB
+ // 6 bytes: MOVW, MOVH
+ // 5 bytes: MOVW, MOVB
+ // 3 bytes: MOVH, MOVB
+
+ // each loop iteration does 32 bytes
+ ctr := v.AuxInt / 32
+
+ // remainder bytes
+ rem := v.AuxInt % 32
+
+ // only generate a loop if there is more
+ // than 1 iteration.
+ if ctr > 1 {
+ // Set up VS32 (V0) to hold 0s
+ p := s.Prog(ppc64.AXXLXOR)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REG_VS32
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REG_VS32
+ p.Reg = ppc64.REG_VS32
+
+ // Set up CTR loop counter
+ p = s.Prog(ppc64.AMOVD)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = ctr
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REGTMP
+
+ p = s.Prog(ppc64.AMOVD)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REGTMP
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REG_CTR
+
+ // Set up R31 to hold index value 16
+ p = s.Prog(ppc64.AMOVD)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = 16
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REGTMP
+
+ // Don't add padding for alignment
+ // with few loop iterations.
+ if ctr > 3 {
+ p = s.Prog(obj.APCALIGN)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = 16
+ }
+
+ // generate 2 STXVD2Xs to store 16 bytes
+ // when this is a loop then the top must be saved
+ var top *obj.Prog
+ // This is the top of loop
+
+ p = s.Prog(ppc64.ASTXVD2X)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REG_VS32
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ p.To.Index = ppc64.REGZERO
+ // Save the top of loop
+ if top == nil {
+ top = p
+ }
+ p = s.Prog(ppc64.ASTXVD2X)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REG_VS32
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ p.To.Index = ppc64.REGTMP
+
+ // Increment address for the
+ // 4 doublewords just zeroed.
+ p = s.Prog(ppc64.AADD)
+ p.Reg = v.Args[0].Reg()
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = 32
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Args[0].Reg()
+
+ // Branch back to top of loop
+ // based on CTR
+ // BC with BO_BCTR generates bdnz
+ p = s.Prog(ppc64.ABC)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = ppc64.BO_BCTR
+ p.Reg = ppc64.REG_R0
+ p.To.Type = obj.TYPE_BRANCH
+ p.To.SetTarget(top)
+ }
+
+ // when ctr == 1 the loop was not generated but
+ // there are at least 32 bytes to clear, so add
+ // that to the remainder to generate the code
+ // to clear those doublewords
+ if ctr == 1 {
+ rem += 32
+ }
+
+ // clear the remainder starting at offset zero
+ offset := int64(0)
+
+ // first clear as many doublewords as possible
+ // then clear remaining sizes as available
+ for rem > 0 {
+ op, size := ppc64.AMOVB, int64(1)
+ switch {
+ case rem >= 8:
+ op, size = ppc64.AMOVD, 8
+ case rem >= 4:
+ op, size = ppc64.AMOVW, 4
+ case rem >= 2:
+ op, size = ppc64.AMOVH, 2
+ }
+ p := s.Prog(op)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REG_R0
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ p.To.Offset = offset
+ rem -= size
+ offset += size
+ }
+
+ case ssa.OpPPC64LoweredMove, ssa.OpPPC64LoweredMoveShort:
+
+ bytesPerLoop := int64(32)
+ // This will be used when moving more
+ // than 8 bytes. Moves start with
+ // as many 8 byte moves as possible, then
+ // 4, 2, or 1 byte(s) as remaining. This will
+ // work and be efficient for power8 or later.
+ // If there are 64 or more bytes, then a
+ // loop is generated to move 32 bytes and
+ // update the src and dst addresses on each
+ // iteration. When < 64 bytes, the appropriate
+ // number of moves are generated based on the
+ // size.
+ // When moving >= 64 bytes a loop is used
+ // MOVD len/32,REG_TMP
+ // MOVD REG_TMP,CTR
+ // MOVD $16,REG_TMP
+ // top:
+ // LXVD2X (R0)(R21),VS32
+ // LXVD2X (R31)(R21),VS33
+ // ADD $32,R21
+ // STXVD2X VS32,(R0)(R20)
+ // STXVD2X VS33,(R31)(R20)
+ // ADD $32,R20
+ // BC 16,0,top
+ // Bytes not moved by this loop are moved
+ // with a combination of the following instructions,
+ // starting with the largest sizes and generating as
+ // many as needed, using the appropriate offset value.
+ // MOVD n(R21),R31
+ // MOVD R31,n(R20)
+ // MOVW n1(R21),R31
+ // MOVW R31,n1(R20)
+ // MOVH n2(R21),R31
+ // MOVH R31,n2(R20)
+ // MOVB n3(R21),R31
+ // MOVB R31,n3(R20)
+
+ // Each loop iteration moves 32 bytes
+ ctr := v.AuxInt / bytesPerLoop
+
+ // Remainder after the loop
+ rem := v.AuxInt % bytesPerLoop
+
+ dstReg := v.Args[0].Reg()
+ srcReg := v.Args[1].Reg()
+
+ // The set of registers used here, must match the clobbered reg list
+ // in PPC64Ops.go.
+ offset := int64(0)
+
+ // top of the loop
+ var top *obj.Prog
+ // Only generate looping code when loop counter is > 1 for >= 64 bytes
+ if ctr > 1 {
+ // Set up the CTR
+ p := s.Prog(ppc64.AMOVD)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = ctr
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REGTMP
+
+ p = s.Prog(ppc64.AMOVD)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REGTMP
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REG_CTR
+
+ // Use REGTMP as index reg
+ p = s.Prog(ppc64.AMOVD)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = 16
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REGTMP
+
+ // Don't adding padding for
+ // alignment with small iteration
+ // counts.
+ if ctr > 3 {
+ p = s.Prog(obj.APCALIGN)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = 16
+ }
+
+ // Generate 16 byte loads and stores.
+ // Use temp register for index (16)
+ // on the second one.
+
+ p = s.Prog(ppc64.ALXVD2X)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = srcReg
+ p.From.Index = ppc64.REGZERO
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REG_VS32
+ if top == nil {
+ top = p
+ }
+ p = s.Prog(ppc64.ALXVD2X)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = srcReg
+ p.From.Index = ppc64.REGTMP
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REG_VS33
+
+ // increment the src reg for next iteration
+ p = s.Prog(ppc64.AADD)
+ p.Reg = srcReg
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = bytesPerLoop
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = srcReg
+
+ // generate 16 byte stores
+ p = s.Prog(ppc64.ASTXVD2X)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REG_VS32
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = dstReg
+ p.To.Index = ppc64.REGZERO
+
+ p = s.Prog(ppc64.ASTXVD2X)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REG_VS33
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = dstReg
+ p.To.Index = ppc64.REGTMP
+
+ // increment the dst reg for next iteration
+ p = s.Prog(ppc64.AADD)
+ p.Reg = dstReg
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = bytesPerLoop
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = dstReg
+
+ // BC with BO_BCTR generates bdnz to branch on nonzero CTR
+ // to loop top.
+ p = s.Prog(ppc64.ABC)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = ppc64.BO_BCTR
+ p.Reg = ppc64.REG_R0
+ p.To.Type = obj.TYPE_BRANCH
+ p.To.SetTarget(top)
+
+ // srcReg and dstReg were incremented in the loop, so
+ // later instructions start with offset 0.
+ offset = int64(0)
+ }
+
+ // No loop was generated for one iteration, so
+ // add 32 bytes to the remainder to move those bytes.
+ if ctr == 1 {
+ rem += bytesPerLoop
+ }
+
+ if rem >= 16 {
+ // Generate 16 byte loads and stores.
+ // Use temp register for index (value 16)
+ // on the second one.
+ p := s.Prog(ppc64.ALXVD2X)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = srcReg
+ p.From.Index = ppc64.REGZERO
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REG_VS32
+
+ p = s.Prog(ppc64.ASTXVD2X)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REG_VS32
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = dstReg
+ p.To.Index = ppc64.REGZERO
+
+ offset = 16
+ rem -= 16
+
+ if rem >= 16 {
+ // Use REGTMP as index reg
+ p := s.Prog(ppc64.AMOVD)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = 16
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REGTMP
+
+ p = s.Prog(ppc64.ALXVD2X)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = srcReg
+ p.From.Index = ppc64.REGTMP
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REG_VS32
+
+ p = s.Prog(ppc64.ASTXVD2X)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REG_VS32
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = dstReg
+ p.To.Index = ppc64.REGTMP
+
+ offset = 32
+ rem -= 16
+ }
+ }
+
+ // Generate all the remaining load and store pairs, starting with
+ // as many 8 byte moves as possible, then 4, 2, 1.
+ for rem > 0 {
+ op, size := ppc64.AMOVB, int64(1)
+ switch {
+ case rem >= 8:
+ op, size = ppc64.AMOVD, 8
+ case rem >= 4:
+ op, size = ppc64.AMOVWZ, 4
+ case rem >= 2:
+ op, size = ppc64.AMOVH, 2
+ }
+ // Load
+ p := s.Prog(op)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REGTMP
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = srcReg
+ p.From.Offset = offset
+
+ // Store
+ p = s.Prog(op)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REGTMP
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = dstReg
+ p.To.Offset = offset
+ rem -= size
+ offset += size
+ }
+
+ case ssa.OpPPC64LoweredQuadMove, ssa.OpPPC64LoweredQuadMoveShort:
+ bytesPerLoop := int64(64)
+ // This is used when moving more
+ // than 8 bytes on power9. Moves start with
+ // as many 8 byte moves as possible, then
+ // 4, 2, or 1 byte(s) as remaining. This will
+ // work and be efficient for power8 or later.
+ // If there are 64 or more bytes, then a
+ // loop is generated to move 32 bytes and
+ // update the src and dst addresses on each
+ // iteration. When < 64 bytes, the appropriate
+ // number of moves are generated based on the
+ // size.
+ // When moving >= 64 bytes a loop is used
+ // MOVD len/32,REG_TMP
+ // MOVD REG_TMP,CTR
+ // top:
+ // LXV 0(R21),VS32
+ // LXV 16(R21),VS33
+ // ADD $32,R21
+ // STXV VS32,0(R20)
+ // STXV VS33,16(R20)
+ // ADD $32,R20
+ // BC 16,0,top
+ // Bytes not moved by this loop are moved
+ // with a combination of the following instructions,
+ // starting with the largest sizes and generating as
+ // many as needed, using the appropriate offset value.
+ // MOVD n(R21),R31
+ // MOVD R31,n(R20)
+ // MOVW n1(R21),R31
+ // MOVW R31,n1(R20)
+ // MOVH n2(R21),R31
+ // MOVH R31,n2(R20)
+ // MOVB n3(R21),R31
+ // MOVB R31,n3(R20)
+
+ // Each loop iteration moves 32 bytes
+ ctr := v.AuxInt / bytesPerLoop
+
+ // Remainder after the loop
+ rem := v.AuxInt % bytesPerLoop
+
+ dstReg := v.Args[0].Reg()
+ srcReg := v.Args[1].Reg()
+
+ offset := int64(0)
+
+ // top of the loop
+ var top *obj.Prog
+
+ // Only generate looping code when loop counter is > 1 for >= 64 bytes
+ if ctr > 1 {
+ // Set up the CTR
+ p := s.Prog(ppc64.AMOVD)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = ctr
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REGTMP
+
+ p = s.Prog(ppc64.AMOVD)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REGTMP
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REG_CTR
+
+ p = s.Prog(obj.APCALIGN)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = 16
+
+ // Generate 16 byte loads and stores.
+ p = s.Prog(ppc64.ALXV)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = srcReg
+ p.From.Offset = offset
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REG_VS32
+ if top == nil {
+ top = p
+ }
+ p = s.Prog(ppc64.ALXV)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = srcReg
+ p.From.Offset = offset + 16
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REG_VS33
+
+ // generate 16 byte stores
+ p = s.Prog(ppc64.ASTXV)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REG_VS32
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = dstReg
+ p.To.Offset = offset
+
+ p = s.Prog(ppc64.ASTXV)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REG_VS33
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = dstReg
+ p.To.Offset = offset + 16
+
+ // Generate 16 byte loads and stores.
+ p = s.Prog(ppc64.ALXV)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = srcReg
+ p.From.Offset = offset + 32
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REG_VS32
+
+ p = s.Prog(ppc64.ALXV)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = srcReg
+ p.From.Offset = offset + 48
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REG_VS33
+
+ // generate 16 byte stores
+ p = s.Prog(ppc64.ASTXV)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REG_VS32
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = dstReg
+ p.To.Offset = offset + 32
+
+ p = s.Prog(ppc64.ASTXV)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REG_VS33
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = dstReg
+ p.To.Offset = offset + 48
+
+ // increment the src reg for next iteration
+ p = s.Prog(ppc64.AADD)
+ p.Reg = srcReg
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = bytesPerLoop
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = srcReg
+
+ // increment the dst reg for next iteration
+ p = s.Prog(ppc64.AADD)
+ p.Reg = dstReg
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = bytesPerLoop
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = dstReg
+
+ // BC with BO_BCTR generates bdnz to branch on nonzero CTR
+ // to loop top.
+ p = s.Prog(ppc64.ABC)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = ppc64.BO_BCTR
+ p.Reg = ppc64.REG_R0
+ p.To.Type = obj.TYPE_BRANCH
+ p.To.SetTarget(top)
+
+ // srcReg and dstReg were incremented in the loop, so
+ // later instructions start with offset 0.
+ offset = int64(0)
+ }
+
+ // No loop was generated for one iteration, so
+ // add 32 bytes to the remainder to move those bytes.
+ if ctr == 1 {
+ rem += bytesPerLoop
+ }
+ if rem >= 32 {
+ p := s.Prog(ppc64.ALXV)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = srcReg
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REG_VS32
+
+ p = s.Prog(ppc64.ALXV)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = srcReg
+ p.From.Offset = 16
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REG_VS33
+
+ p = s.Prog(ppc64.ASTXV)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REG_VS32
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = dstReg
+
+ p = s.Prog(ppc64.ASTXV)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REG_VS33
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = dstReg
+ p.To.Offset = 16
+
+ offset = 32
+ rem -= 32
+ }
+
+ if rem >= 16 {
+ // Generate 16 byte loads and stores.
+ p := s.Prog(ppc64.ALXV)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = srcReg
+ p.From.Offset = offset
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REG_VS32
+
+ p = s.Prog(ppc64.ASTXV)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REG_VS32
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = dstReg
+ p.To.Offset = offset
+
+ offset += 16
+ rem -= 16
+
+ if rem >= 16 {
+ p := s.Prog(ppc64.ALXV)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = srcReg
+ p.From.Offset = offset
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REG_VS32
+
+ p = s.Prog(ppc64.ASTXV)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REG_VS32
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = dstReg
+ p.To.Offset = offset
+
+ offset += 16
+ rem -= 16
+ }
+ }
+ // Generate all the remaining load and store pairs, starting with
+ // as many 8 byte moves as possible, then 4, 2, 1.
+ for rem > 0 {
+ op, size := ppc64.AMOVB, int64(1)
+ switch {
+ case rem >= 8:
+ op, size = ppc64.AMOVD, 8
+ case rem >= 4:
+ op, size = ppc64.AMOVWZ, 4
+ case rem >= 2:
+ op, size = ppc64.AMOVH, 2
+ }
+ // Load
+ p := s.Prog(op)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REGTMP
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = srcReg
+ p.From.Offset = offset
+
+ // Store
+ p = s.Prog(op)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REGTMP
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = dstReg
+ p.To.Offset = offset
+ rem -= size
+ offset += size
+ }
+
+ case ssa.OpPPC64CALLstatic:
+ s.Call(v)
+
+ case ssa.OpPPC64CALLtail:
+ s.TailCall(v)
+
+ case ssa.OpPPC64CALLclosure, ssa.OpPPC64CALLinter:
+ p := s.Prog(ppc64.AMOVD)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REG_LR
+
+ if v.Args[0].Reg() != ppc64.REG_R12 {
+ v.Fatalf("Function address for %v should be in R12 %d but is in %d", v.LongString(), ppc64.REG_R12, p.From.Reg)
+ }
+
+ pp := s.Call(v)
+ pp.To.Reg = ppc64.REG_LR
+
+ // Insert a hint this is not a subroutine return.
+ pp.SetFrom3Const(1)
+
+ if base.Ctxt.Flag_shared {
+ // When compiling Go into PIC, the function we just
+ // called via pointer might have been implemented in
+ // a separate module and so overwritten the TOC
+ // pointer in R2; reload it.
+ q := s.Prog(ppc64.AMOVD)
+ q.From.Type = obj.TYPE_MEM
+ q.From.Offset = 24
+ q.From.Reg = ppc64.REGSP
+ q.To.Type = obj.TYPE_REG
+ q.To.Reg = ppc64.REG_R2
+ }
+
+ case ssa.OpPPC64LoweredWB:
+ p := s.Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = v.Aux.(*obj.LSym)
+
+ case ssa.OpPPC64LoweredPanicBoundsA, ssa.OpPPC64LoweredPanicBoundsB, ssa.OpPPC64LoweredPanicBoundsC:
+ p := s.Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt]
+ s.UseArgs(16) // space used in callee args area by assembly stubs
+
+ case ssa.OpPPC64LoweredNilCheck:
+ if buildcfg.GOOS == "aix" {
+ // CMP Rarg0, R0
+ // BNE 2(PC)
+ // STW R0, 0(R0)
+ // NOP (so the BNE has somewhere to land)
+
+ // CMP Rarg0, R0
+ p := s.Prog(ppc64.ACMP)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REG_R0
+
+ // BNE 2(PC)
+ p2 := s.Prog(ppc64.ABNE)
+ p2.To.Type = obj.TYPE_BRANCH
+
+ // STW R0, 0(R0)
+ // Write at 0 is forbidden and will trigger a SIGSEGV
+ p = s.Prog(ppc64.AMOVW)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REG_R0
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = ppc64.REG_R0
+
+ // NOP (so the BNE has somewhere to land)
+ nop := s.Prog(obj.ANOP)
+ p2.To.SetTarget(nop)
+
+ } else {
+ // Issue a load which will fault if arg is nil.
+ p := s.Prog(ppc64.AMOVBZ)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REGTMP
+ }
+ if logopt.Enabled() {
+ logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name)
+ }
+ if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
+ base.WarnfAt(v.Pos, "generated nil check")
+ }
+
+ // These should be resolved by rules and not make it here.
+ case ssa.OpPPC64Equal, ssa.OpPPC64NotEqual, ssa.OpPPC64LessThan, ssa.OpPPC64FLessThan,
+ ssa.OpPPC64LessEqual, ssa.OpPPC64GreaterThan, ssa.OpPPC64FGreaterThan, ssa.OpPPC64GreaterEqual,
+ ssa.OpPPC64FLessEqual, ssa.OpPPC64FGreaterEqual:
+ v.Fatalf("Pseudo-op should not make it to codegen: %s ###\n", v.LongString())
+ case ssa.OpPPC64InvertFlags:
+ v.Fatalf("InvertFlags should never make it to codegen %v", v.LongString())
+ case ssa.OpPPC64FlagEQ, ssa.OpPPC64FlagLT, ssa.OpPPC64FlagGT:
+ v.Fatalf("Flag* ops should never make it to codegen %v", v.LongString())
+ case ssa.OpClobber, ssa.OpClobberReg:
+ // TODO: implement for clobberdead experiment. Nop is ok for now.
+ default:
+ v.Fatalf("genValue not implemented: %s", v.LongString())
+ }
+}
+
+var blockJump = [...]struct {
+ asm, invasm obj.As
+ asmeq, invasmun bool
+}{
+ ssa.BlockPPC64EQ: {ppc64.ABEQ, ppc64.ABNE, false, false},
+ ssa.BlockPPC64NE: {ppc64.ABNE, ppc64.ABEQ, false, false},
+
+ ssa.BlockPPC64LT: {ppc64.ABLT, ppc64.ABGE, false, false},
+ ssa.BlockPPC64GE: {ppc64.ABGE, ppc64.ABLT, false, false},
+ ssa.BlockPPC64LE: {ppc64.ABLE, ppc64.ABGT, false, false},
+ ssa.BlockPPC64GT: {ppc64.ABGT, ppc64.ABLE, false, false},
+
+ // TODO: need to work FP comparisons into block jumps
+ ssa.BlockPPC64FLT: {ppc64.ABLT, ppc64.ABGE, false, false},
+ ssa.BlockPPC64FGE: {ppc64.ABGT, ppc64.ABLT, true, true}, // GE = GT or EQ; !GE = LT or UN
+ ssa.BlockPPC64FLE: {ppc64.ABLT, ppc64.ABGT, true, true}, // LE = LT or EQ; !LE = GT or UN
+ ssa.BlockPPC64FGT: {ppc64.ABGT, ppc64.ABLE, false, false},
+}
+
+func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
+ switch b.Kind {
+ case ssa.BlockDefer:
+ // defer returns in R3:
+ // 0 if we should continue executing
+ // 1 if we should jump to deferreturn call
+ p := s.Prog(ppc64.ACMP)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = ppc64.REG_R3
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = ppc64.REG_R0
+
+ p = s.Prog(ppc64.ABNE)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[1].Block()})
+ if b.Succs[0].Block() != next {
+ p := s.Prog(obj.AJMP)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
+ }
+
+ case ssa.BlockPlain:
+ if b.Succs[0].Block() != next {
+ p := s.Prog(obj.AJMP)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
+ }
+ case ssa.BlockExit, ssa.BlockRetJmp:
+ case ssa.BlockRet:
+ s.Prog(obj.ARET)
+
+ case ssa.BlockPPC64EQ, ssa.BlockPPC64NE,
+ ssa.BlockPPC64LT, ssa.BlockPPC64GE,
+ ssa.BlockPPC64LE, ssa.BlockPPC64GT,
+ ssa.BlockPPC64FLT, ssa.BlockPPC64FGE,
+ ssa.BlockPPC64FLE, ssa.BlockPPC64FGT:
+ jmp := blockJump[b.Kind]
+ switch next {
+ case b.Succs[0].Block():
+ s.Br(jmp.invasm, b.Succs[1].Block())
+ if jmp.invasmun {
+ // TODO: The second branch is probably predict-not-taken since it is for FP unordered
+ s.Br(ppc64.ABVS, b.Succs[1].Block())
+ }
+ case b.Succs[1].Block():
+ s.Br(jmp.asm, b.Succs[0].Block())
+ if jmp.asmeq {
+ s.Br(ppc64.ABEQ, b.Succs[0].Block())
+ }
+ default:
+ if b.Likely != ssa.BranchUnlikely {
+ s.Br(jmp.asm, b.Succs[0].Block())
+ if jmp.asmeq {
+ s.Br(ppc64.ABEQ, b.Succs[0].Block())
+ }
+ s.Br(obj.AJMP, b.Succs[1].Block())
+ } else {
+ s.Br(jmp.invasm, b.Succs[1].Block())
+ if jmp.invasmun {
+ // TODO: The second branch is probably predict-not-taken since it is for FP unordered
+ s.Br(ppc64.ABVS, b.Succs[1].Block())
+ }
+ s.Br(obj.AJMP, b.Succs[0].Block())
+ }
+ }
+ default:
+ b.Fatalf("branch not implemented: %s", b.LongString())
+ }
+}
+
+func loadRegResult(s *ssagen.State, f *ssa.Func, t *types.Type, reg int16, n *ir.Name, off int64) *obj.Prog {
+ p := s.Prog(loadByType(t))
+ p.From.Type = obj.TYPE_MEM
+ p.From.Name = obj.NAME_AUTO
+ p.From.Sym = n.Linksym()
+ p.From.Offset = n.FrameOffset() + off
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = reg
+ return p
+}
+
+func spillArgReg(pp *objw.Progs, p *obj.Prog, f *ssa.Func, t *types.Type, reg int16, n *ir.Name, off int64) *obj.Prog {
+ p = pp.Append(p, storeByType(t), obj.TYPE_REG, reg, 0, obj.TYPE_MEM, 0, n.FrameOffset()+off)
+ p.To.Name = obj.NAME_PARAM
+ p.To.Sym = n.Linksym()
+ p.Pos = p.Pos.WithNotStmt()
+ return p
+}
diff --git a/src/cmd/compile/internal/reflectdata/alg.go b/src/cmd/compile/internal/reflectdata/alg.go
new file mode 100644
index 0000000..d000618
--- /dev/null
+++ b/src/cmd/compile/internal/reflectdata/alg.go
@@ -0,0 +1,808 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package reflectdata
+
+import (
+ "fmt"
+ "math/bits"
+ "sort"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/objw"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+)
+
+// isRegularMemory reports whether t can be compared/hashed as regular memory.
+func isRegularMemory(t *types.Type) bool {
+ a, _ := types.AlgType(t)
+ return a == types.AMEM
+}
+
+// eqCanPanic reports whether == on type t could panic (has an interface somewhere).
+// t must be comparable.
+func eqCanPanic(t *types.Type) bool {
+ switch t.Kind() {
+ default:
+ return false
+ case types.TINTER:
+ return true
+ case types.TARRAY:
+ return eqCanPanic(t.Elem())
+ case types.TSTRUCT:
+ for _, f := range t.FieldSlice() {
+ if !f.Sym.IsBlank() && eqCanPanic(f.Type) {
+ return true
+ }
+ }
+ return false
+ }
+}
+
+// AlgType returns the fixed-width AMEMxx variants instead of the general
+// AMEM kind when possible.
+func AlgType(t *types.Type) types.AlgKind {
+ a, _ := types.AlgType(t)
+ if a == types.AMEM {
+ if t.Alignment() < int64(base.Ctxt.Arch.Alignment) && t.Alignment() < t.Size() {
+ // For example, we can't treat [2]int16 as an int32 if int32s require
+ // 4-byte alignment. See issue 46283.
+ return a
+ }
+ switch t.Size() {
+ case 0:
+ return types.AMEM0
+ case 1:
+ return types.AMEM8
+ case 2:
+ return types.AMEM16
+ case 4:
+ return types.AMEM32
+ case 8:
+ return types.AMEM64
+ case 16:
+ return types.AMEM128
+ }
+ }
+
+ return a
+}
+
+// genhash returns a symbol which is the closure used to compute
+// the hash of a value of type t.
+// Note: the generated function must match runtime.typehash exactly.
+func genhash(t *types.Type) *obj.LSym {
+ switch AlgType(t) {
+ default:
+ // genhash is only called for types that have equality
+ base.Fatalf("genhash %v", t)
+ case types.AMEM0:
+ return sysClosure("memhash0")
+ case types.AMEM8:
+ return sysClosure("memhash8")
+ case types.AMEM16:
+ return sysClosure("memhash16")
+ case types.AMEM32:
+ return sysClosure("memhash32")
+ case types.AMEM64:
+ return sysClosure("memhash64")
+ case types.AMEM128:
+ return sysClosure("memhash128")
+ case types.ASTRING:
+ return sysClosure("strhash")
+ case types.AINTER:
+ return sysClosure("interhash")
+ case types.ANILINTER:
+ return sysClosure("nilinterhash")
+ case types.AFLOAT32:
+ return sysClosure("f32hash")
+ case types.AFLOAT64:
+ return sysClosure("f64hash")
+ case types.ACPLX64:
+ return sysClosure("c64hash")
+ case types.ACPLX128:
+ return sysClosure("c128hash")
+ case types.AMEM:
+ // For other sizes of plain memory, we build a closure
+ // that calls memhash_varlen. The size of the memory is
+ // encoded in the first slot of the closure.
+ closure := TypeLinksymLookup(fmt.Sprintf(".hashfunc%d", t.Size()))
+ if len(closure.P) > 0 { // already generated
+ return closure
+ }
+ if memhashvarlen == nil {
+ memhashvarlen = typecheck.LookupRuntimeFunc("memhash_varlen")
+ }
+ ot := 0
+ ot = objw.SymPtr(closure, ot, memhashvarlen, 0)
+ ot = objw.Uintptr(closure, ot, uint64(t.Size())) // size encoded in closure
+ objw.Global(closure, int32(ot), obj.DUPOK|obj.RODATA)
+ return closure
+ case types.ASPECIAL:
+ break
+ }
+
+ closure := TypeLinksymPrefix(".hashfunc", t)
+ if len(closure.P) > 0 { // already generated
+ return closure
+ }
+
+ // Generate hash functions for subtypes.
+ // There are cases where we might not use these hashes,
+ // but in that case they will get dead-code eliminated.
+ // (And the closure generated by genhash will also get
+ // dead-code eliminated, as we call the subtype hashers
+ // directly.)
+ switch t.Kind() {
+ case types.TARRAY:
+ genhash(t.Elem())
+ case types.TSTRUCT:
+ for _, f := range t.FieldSlice() {
+ genhash(f.Type)
+ }
+ }
+
+ sym := TypeSymPrefix(".hash", t)
+ if base.Flag.LowerR != 0 {
+ fmt.Printf("genhash %v %v %v\n", closure, sym, t)
+ }
+
+ base.Pos = base.AutogeneratedPos // less confusing than end of input
+ typecheck.DeclContext = ir.PEXTERN
+
+ // func sym(p *T, h uintptr) uintptr
+ args := []*ir.Field{
+ ir.NewField(base.Pos, typecheck.Lookup("p"), nil, types.NewPtr(t)),
+ ir.NewField(base.Pos, typecheck.Lookup("h"), nil, types.Types[types.TUINTPTR]),
+ }
+ results := []*ir.Field{ir.NewField(base.Pos, nil, nil, types.Types[types.TUINTPTR])}
+ tfn := ir.NewFuncType(base.Pos, nil, args, results)
+
+ fn := typecheck.DeclFunc(sym, tfn)
+ np := ir.AsNode(tfn.Type().Params().Field(0).Nname)
+ nh := ir.AsNode(tfn.Type().Params().Field(1).Nname)
+
+ switch t.Kind() {
+ case types.TARRAY:
+ // An array of pure memory would be handled by the
+ // standard algorithm, so the element type must not be
+ // pure memory.
+ hashel := hashfor(t.Elem())
+
+ // for i := 0; i < nelem; i++
+ ni := typecheck.Temp(types.Types[types.TINT])
+ init := ir.NewAssignStmt(base.Pos, ni, ir.NewInt(0))
+ cond := ir.NewBinaryExpr(base.Pos, ir.OLT, ni, ir.NewInt(t.NumElem()))
+ post := ir.NewAssignStmt(base.Pos, ni, ir.NewBinaryExpr(base.Pos, ir.OADD, ni, ir.NewInt(1)))
+ loop := ir.NewForStmt(base.Pos, nil, cond, post, nil)
+ loop.PtrInit().Append(init)
+
+ // h = hashel(&p[i], h)
+ call := ir.NewCallExpr(base.Pos, ir.OCALL, hashel, nil)
+
+ nx := ir.NewIndexExpr(base.Pos, np, ni)
+ nx.SetBounded(true)
+ na := typecheck.NodAddr(nx)
+ call.Args.Append(na)
+ call.Args.Append(nh)
+ loop.Body.Append(ir.NewAssignStmt(base.Pos, nh, call))
+
+ fn.Body.Append(loop)
+
+ case types.TSTRUCT:
+ // Walk the struct using memhash for runs of AMEM
+ // and calling specific hash functions for the others.
+ for i, fields := 0, t.FieldSlice(); i < len(fields); {
+ f := fields[i]
+
+ // Skip blank fields.
+ if f.Sym.IsBlank() {
+ i++
+ continue
+ }
+
+ // Hash non-memory fields with appropriate hash function.
+ if !isRegularMemory(f.Type) {
+ hashel := hashfor(f.Type)
+ call := ir.NewCallExpr(base.Pos, ir.OCALL, hashel, nil)
+ nx := ir.NewSelectorExpr(base.Pos, ir.OXDOT, np, f.Sym) // TODO: fields from other packages?
+ na := typecheck.NodAddr(nx)
+ call.Args.Append(na)
+ call.Args.Append(nh)
+ fn.Body.Append(ir.NewAssignStmt(base.Pos, nh, call))
+ i++
+ continue
+ }
+
+ // Otherwise, hash a maximal length run of raw memory.
+ size, next := memrun(t, i)
+
+ // h = hashel(&p.first, size, h)
+ hashel := hashmem(f.Type)
+ call := ir.NewCallExpr(base.Pos, ir.OCALL, hashel, nil)
+ nx := ir.NewSelectorExpr(base.Pos, ir.OXDOT, np, f.Sym) // TODO: fields from other packages?
+ na := typecheck.NodAddr(nx)
+ call.Args.Append(na)
+ call.Args.Append(nh)
+ call.Args.Append(ir.NewInt(size))
+ fn.Body.Append(ir.NewAssignStmt(base.Pos, nh, call))
+
+ i = next
+ }
+ }
+
+ r := ir.NewReturnStmt(base.Pos, nil)
+ r.Results.Append(nh)
+ fn.Body.Append(r)
+
+ if base.Flag.LowerR != 0 {
+ ir.DumpList("genhash body", fn.Body)
+ }
+
+ typecheck.FinishFuncBody()
+
+ fn.SetDupok(true)
+ typecheck.Func(fn)
+
+ ir.CurFunc = fn
+ typecheck.Stmts(fn.Body)
+ ir.CurFunc = nil
+
+ if base.Debug.DclStack != 0 {
+ types.CheckDclstack()
+ }
+
+ fn.SetNilCheckDisabled(true)
+ typecheck.Target.Decls = append(typecheck.Target.Decls, fn)
+
+ // Build closure. It doesn't close over any variables, so
+ // it contains just the function pointer.
+ objw.SymPtr(closure, 0, fn.Linksym(), 0)
+ objw.Global(closure, int32(types.PtrSize), obj.DUPOK|obj.RODATA)
+
+ return closure
+}
+
+func hashfor(t *types.Type) ir.Node {
+ var sym *types.Sym
+
+ switch a, _ := types.AlgType(t); a {
+ case types.AMEM:
+ base.Fatalf("hashfor with AMEM type")
+ case types.AINTER:
+ sym = ir.Pkgs.Runtime.Lookup("interhash")
+ case types.ANILINTER:
+ sym = ir.Pkgs.Runtime.Lookup("nilinterhash")
+ case types.ASTRING:
+ sym = ir.Pkgs.Runtime.Lookup("strhash")
+ case types.AFLOAT32:
+ sym = ir.Pkgs.Runtime.Lookup("f32hash")
+ case types.AFLOAT64:
+ sym = ir.Pkgs.Runtime.Lookup("f64hash")
+ case types.ACPLX64:
+ sym = ir.Pkgs.Runtime.Lookup("c64hash")
+ case types.ACPLX128:
+ sym = ir.Pkgs.Runtime.Lookup("c128hash")
+ default:
+ // Note: the caller of hashfor ensured that this symbol
+ // exists and has a body by calling genhash for t.
+ sym = TypeSymPrefix(".hash", t)
+ }
+
+ // TODO(austin): This creates an ir.Name with a nil Func.
+ n := typecheck.NewName(sym)
+ ir.MarkFunc(n)
+ n.SetType(types.NewSignature(types.NoPkg, nil, nil, []*types.Field{
+ types.NewField(base.Pos, nil, types.NewPtr(t)),
+ types.NewField(base.Pos, nil, types.Types[types.TUINTPTR]),
+ }, []*types.Field{
+ types.NewField(base.Pos, nil, types.Types[types.TUINTPTR]),
+ }))
+ return n
+}
+
+// sysClosure returns a closure which will call the
+// given runtime function (with no closed-over variables).
+func sysClosure(name string) *obj.LSym {
+ s := typecheck.LookupRuntimeVar(name + "·f")
+ if len(s.P) == 0 {
+ f := typecheck.LookupRuntimeFunc(name)
+ objw.SymPtr(s, 0, f, 0)
+ objw.Global(s, int32(types.PtrSize), obj.DUPOK|obj.RODATA)
+ }
+ return s
+}
+
+// geneq returns a symbol which is the closure used to compute
+// equality for two objects of type t.
+func geneq(t *types.Type) *obj.LSym {
+ switch AlgType(t) {
+ case types.ANOEQ:
+ // The runtime will panic if it tries to compare
+ // a type with a nil equality function.
+ return nil
+ case types.AMEM0:
+ return sysClosure("memequal0")
+ case types.AMEM8:
+ return sysClosure("memequal8")
+ case types.AMEM16:
+ return sysClosure("memequal16")
+ case types.AMEM32:
+ return sysClosure("memequal32")
+ case types.AMEM64:
+ return sysClosure("memequal64")
+ case types.AMEM128:
+ return sysClosure("memequal128")
+ case types.ASTRING:
+ return sysClosure("strequal")
+ case types.AINTER:
+ return sysClosure("interequal")
+ case types.ANILINTER:
+ return sysClosure("nilinterequal")
+ case types.AFLOAT32:
+ return sysClosure("f32equal")
+ case types.AFLOAT64:
+ return sysClosure("f64equal")
+ case types.ACPLX64:
+ return sysClosure("c64equal")
+ case types.ACPLX128:
+ return sysClosure("c128equal")
+ case types.AMEM:
+ // make equality closure. The size of the type
+ // is encoded in the closure.
+ closure := TypeLinksymLookup(fmt.Sprintf(".eqfunc%d", t.Size()))
+ if len(closure.P) != 0 {
+ return closure
+ }
+ if memequalvarlen == nil {
+ memequalvarlen = typecheck.LookupRuntimeFunc("memequal_varlen")
+ }
+ ot := 0
+ ot = objw.SymPtr(closure, ot, memequalvarlen, 0)
+ ot = objw.Uintptr(closure, ot, uint64(t.Size()))
+ objw.Global(closure, int32(ot), obj.DUPOK|obj.RODATA)
+ return closure
+ case types.ASPECIAL:
+ break
+ }
+
+ closure := TypeLinksymPrefix(".eqfunc", t)
+ if len(closure.P) > 0 { // already generated
+ return closure
+ }
+ sym := TypeSymPrefix(".eq", t)
+ if base.Flag.LowerR != 0 {
+ fmt.Printf("geneq %v\n", t)
+ }
+
+ // Autogenerate code for equality of structs and arrays.
+
+ base.Pos = base.AutogeneratedPos // less confusing than end of input
+ typecheck.DeclContext = ir.PEXTERN
+
+ // func sym(p, q *T) bool
+ tfn := ir.NewFuncType(base.Pos, nil,
+ []*ir.Field{ir.NewField(base.Pos, typecheck.Lookup("p"), nil, types.NewPtr(t)), ir.NewField(base.Pos, typecheck.Lookup("q"), nil, types.NewPtr(t))},
+ []*ir.Field{ir.NewField(base.Pos, typecheck.Lookup("r"), nil, types.Types[types.TBOOL])})
+
+ fn := typecheck.DeclFunc(sym, tfn)
+ np := ir.AsNode(tfn.Type().Params().Field(0).Nname)
+ nq := ir.AsNode(tfn.Type().Params().Field(1).Nname)
+ nr := ir.AsNode(tfn.Type().Results().Field(0).Nname)
+
+ // Label to jump to if an equality test fails.
+ neq := typecheck.AutoLabel(".neq")
+
+ // We reach here only for types that have equality but
+ // cannot be handled by the standard algorithms,
+ // so t must be either an array or a struct.
+ switch t.Kind() {
+ default:
+ base.Fatalf("geneq %v", t)
+
+ case types.TARRAY:
+ nelem := t.NumElem()
+
+ // checkAll generates code to check the equality of all array elements.
+ // If unroll is greater than nelem, checkAll generates:
+ //
+ // if eq(p[0], q[0]) && eq(p[1], q[1]) && ... {
+ // } else {
+ // return
+ // }
+ //
+ // And so on.
+ //
+ // Otherwise it generates:
+ //
+ // for i := 0; i < nelem; i++ {
+ // if eq(p[i], q[i]) {
+ // } else {
+ // goto neq
+ // }
+ // }
+ //
+ // TODO(josharian): consider doing some loop unrolling
+ // for larger nelem as well, processing a few elements at a time in a loop.
+ checkAll := func(unroll int64, last bool, eq func(pi, qi ir.Node) ir.Node) {
+ // checkIdx generates a node to check for equality at index i.
+ checkIdx := func(i ir.Node) ir.Node {
+ // pi := p[i]
+ pi := ir.NewIndexExpr(base.Pos, np, i)
+ pi.SetBounded(true)
+ pi.SetType(t.Elem())
+ // qi := q[i]
+ qi := ir.NewIndexExpr(base.Pos, nq, i)
+ qi.SetBounded(true)
+ qi.SetType(t.Elem())
+ return eq(pi, qi)
+ }
+
+ if nelem <= unroll {
+ if last {
+ // Do last comparison in a different manner.
+ nelem--
+ }
+ // Generate a series of checks.
+ for i := int64(0); i < nelem; i++ {
+ // if check {} else { goto neq }
+ nif := ir.NewIfStmt(base.Pos, checkIdx(ir.NewInt(i)), nil, nil)
+ nif.Else.Append(ir.NewBranchStmt(base.Pos, ir.OGOTO, neq))
+ fn.Body.Append(nif)
+ }
+ if last {
+ fn.Body.Append(ir.NewAssignStmt(base.Pos, nr, checkIdx(ir.NewInt(nelem))))
+ }
+ } else {
+ // Generate a for loop.
+ // for i := 0; i < nelem; i++
+ i := typecheck.Temp(types.Types[types.TINT])
+ init := ir.NewAssignStmt(base.Pos, i, ir.NewInt(0))
+ cond := ir.NewBinaryExpr(base.Pos, ir.OLT, i, ir.NewInt(nelem))
+ post := ir.NewAssignStmt(base.Pos, i, ir.NewBinaryExpr(base.Pos, ir.OADD, i, ir.NewInt(1)))
+ loop := ir.NewForStmt(base.Pos, nil, cond, post, nil)
+ loop.PtrInit().Append(init)
+ // if eq(pi, qi) {} else { goto neq }
+ nif := ir.NewIfStmt(base.Pos, checkIdx(i), nil, nil)
+ nif.Else.Append(ir.NewBranchStmt(base.Pos, ir.OGOTO, neq))
+ loop.Body.Append(nif)
+ fn.Body.Append(loop)
+ if last {
+ fn.Body.Append(ir.NewAssignStmt(base.Pos, nr, ir.NewBool(true)))
+ }
+ }
+ }
+
+ switch t.Elem().Kind() {
+ case types.TSTRING:
+ // Do two loops. First, check that all the lengths match (cheap).
+ // Second, check that all the contents match (expensive).
+ // TODO: when the array size is small, unroll the length match checks.
+ checkAll(3, false, func(pi, qi ir.Node) ir.Node {
+ // Compare lengths.
+ eqlen, _ := EqString(pi, qi)
+ return eqlen
+ })
+ checkAll(1, true, func(pi, qi ir.Node) ir.Node {
+ // Compare contents.
+ _, eqmem := EqString(pi, qi)
+ return eqmem
+ })
+ case types.TFLOAT32, types.TFLOAT64:
+ checkAll(2, true, func(pi, qi ir.Node) ir.Node {
+ // p[i] == q[i]
+ return ir.NewBinaryExpr(base.Pos, ir.OEQ, pi, qi)
+ })
+ // TODO: pick apart structs, do them piecemeal too
+ default:
+ checkAll(1, true, func(pi, qi ir.Node) ir.Node {
+ // p[i] == q[i]
+ return ir.NewBinaryExpr(base.Pos, ir.OEQ, pi, qi)
+ })
+ }
+
+ case types.TSTRUCT:
+ // Build a list of conditions to satisfy.
+ // The conditions are a list-of-lists. Conditions are reorderable
+ // within each inner list. The outer lists must be evaluated in order.
+ var conds [][]ir.Node
+ conds = append(conds, []ir.Node{})
+ and := func(n ir.Node) {
+ i := len(conds) - 1
+ conds[i] = append(conds[i], n)
+ }
+
+ // Walk the struct using memequal for runs of AMEM
+ // and calling specific equality tests for the others.
+ for i, fields := 0, t.FieldSlice(); i < len(fields); {
+ f := fields[i]
+
+ // Skip blank-named fields.
+ if f.Sym.IsBlank() {
+ i++
+ continue
+ }
+
+ // Compare non-memory fields with field equality.
+ if !isRegularMemory(f.Type) {
+ if eqCanPanic(f.Type) {
+ // Enforce ordering by starting a new set of reorderable conditions.
+ conds = append(conds, []ir.Node{})
+ }
+ p := ir.NewSelectorExpr(base.Pos, ir.OXDOT, np, f.Sym)
+ q := ir.NewSelectorExpr(base.Pos, ir.OXDOT, nq, f.Sym)
+ switch {
+ case f.Type.IsString():
+ eqlen, eqmem := EqString(p, q)
+ and(eqlen)
+ and(eqmem)
+ default:
+ and(ir.NewBinaryExpr(base.Pos, ir.OEQ, p, q))
+ }
+ if eqCanPanic(f.Type) {
+ // Also enforce ordering after something that can panic.
+ conds = append(conds, []ir.Node{})
+ }
+ i++
+ continue
+ }
+
+ // Find maximal length run of memory-only fields.
+ size, next := memrun(t, i)
+
+ // TODO(rsc): All the calls to newname are wrong for
+ // cross-package unexported fields.
+ if s := fields[i:next]; len(s) <= 2 {
+ // Two or fewer fields: use plain field equality.
+ for _, f := range s {
+ and(eqfield(np, nq, f.Sym))
+ }
+ } else {
+ // More than two fields: use memequal.
+ and(eqmem(np, nq, f.Sym, size))
+ }
+ i = next
+ }
+
+ // Sort conditions to put runtime calls last.
+ // Preserve the rest of the ordering.
+ var flatConds []ir.Node
+ for _, c := range conds {
+ isCall := func(n ir.Node) bool {
+ return n.Op() == ir.OCALL || n.Op() == ir.OCALLFUNC
+ }
+ sort.SliceStable(c, func(i, j int) bool {
+ return !isCall(c[i]) && isCall(c[j])
+ })
+ flatConds = append(flatConds, c...)
+ }
+
+ if len(flatConds) == 0 {
+ fn.Body.Append(ir.NewAssignStmt(base.Pos, nr, ir.NewBool(true)))
+ } else {
+ for _, c := range flatConds[:len(flatConds)-1] {
+ // if cond {} else { goto neq }
+ n := ir.NewIfStmt(base.Pos, c, nil, nil)
+ n.Else.Append(ir.NewBranchStmt(base.Pos, ir.OGOTO, neq))
+ fn.Body.Append(n)
+ }
+ fn.Body.Append(ir.NewAssignStmt(base.Pos, nr, flatConds[len(flatConds)-1]))
+ }
+ }
+
+ // ret:
+ // return
+ ret := typecheck.AutoLabel(".ret")
+ fn.Body.Append(ir.NewLabelStmt(base.Pos, ret))
+ fn.Body.Append(ir.NewReturnStmt(base.Pos, nil))
+
+ // neq:
+ // r = false
+ // return (or goto ret)
+ fn.Body.Append(ir.NewLabelStmt(base.Pos, neq))
+ fn.Body.Append(ir.NewAssignStmt(base.Pos, nr, ir.NewBool(false)))
+ if eqCanPanic(t) || anyCall(fn) {
+ // Epilogue is large, so share it with the equal case.
+ fn.Body.Append(ir.NewBranchStmt(base.Pos, ir.OGOTO, ret))
+ } else {
+ // Epilogue is small, so don't bother sharing.
+ fn.Body.Append(ir.NewReturnStmt(base.Pos, nil))
+ }
+ // TODO(khr): the epilogue size detection condition above isn't perfect.
+ // We should really do a generic CL that shares epilogues across
+ // the board. See #24936.
+
+ if base.Flag.LowerR != 0 {
+ ir.DumpList("geneq body", fn.Body)
+ }
+
+ typecheck.FinishFuncBody()
+
+ fn.SetDupok(true)
+ typecheck.Func(fn)
+
+ ir.CurFunc = fn
+ typecheck.Stmts(fn.Body)
+ ir.CurFunc = nil
+
+ if base.Debug.DclStack != 0 {
+ types.CheckDclstack()
+ }
+
+ // Disable checknils while compiling this code.
+ // We are comparing a struct or an array,
+ // neither of which can be nil, and our comparisons
+ // are shallow.
+ fn.SetNilCheckDisabled(true)
+ typecheck.Target.Decls = append(typecheck.Target.Decls, fn)
+
+ // Generate a closure which points at the function we just generated.
+ objw.SymPtr(closure, 0, fn.Linksym(), 0)
+ objw.Global(closure, int32(types.PtrSize), obj.DUPOK|obj.RODATA)
+ return closure
+}
+
+func anyCall(fn *ir.Func) bool {
+ return ir.Any(fn, func(n ir.Node) bool {
+ // TODO(rsc): No methods?
+ op := n.Op()
+ return op == ir.OCALL || op == ir.OCALLFUNC
+ })
+}
+
+// eqfield returns the node
+// p.field == q.field
+func eqfield(p ir.Node, q ir.Node, field *types.Sym) ir.Node {
+ nx := ir.NewSelectorExpr(base.Pos, ir.OXDOT, p, field)
+ ny := ir.NewSelectorExpr(base.Pos, ir.OXDOT, q, field)
+ ne := ir.NewBinaryExpr(base.Pos, ir.OEQ, nx, ny)
+ return ne
+}
+
+// EqString returns the nodes
+// len(s) == len(t)
+// and
+// memequal(s.ptr, t.ptr, len(s))
+// which can be used to construct string equality comparison.
+// eqlen must be evaluated before eqmem, and shortcircuiting is required.
+func EqString(s, t ir.Node) (eqlen *ir.BinaryExpr, eqmem *ir.CallExpr) {
+ s = typecheck.Conv(s, types.Types[types.TSTRING])
+ t = typecheck.Conv(t, types.Types[types.TSTRING])
+ sptr := ir.NewUnaryExpr(base.Pos, ir.OSPTR, s)
+ tptr := ir.NewUnaryExpr(base.Pos, ir.OSPTR, t)
+ slen := typecheck.Conv(ir.NewUnaryExpr(base.Pos, ir.OLEN, s), types.Types[types.TUINTPTR])
+ tlen := typecheck.Conv(ir.NewUnaryExpr(base.Pos, ir.OLEN, t), types.Types[types.TUINTPTR])
+
+ fn := typecheck.LookupRuntime("memequal")
+ fn = typecheck.SubstArgTypes(fn, types.Types[types.TUINT8], types.Types[types.TUINT8])
+ call := typecheck.Call(base.Pos, fn, []ir.Node{sptr, tptr, ir.Copy(slen)}, false).(*ir.CallExpr)
+
+ cmp := ir.NewBinaryExpr(base.Pos, ir.OEQ, slen, tlen)
+ cmp = typecheck.Expr(cmp).(*ir.BinaryExpr)
+ cmp.SetType(types.Types[types.TBOOL])
+ return cmp, call
+}
+
+// EqInterface returns the nodes
+// s.tab == t.tab (or s.typ == t.typ, as appropriate)
+// and
+// ifaceeq(s.tab, s.data, t.data) (or efaceeq(s.typ, s.data, t.data), as appropriate)
+// which can be used to construct interface equality comparison.
+// eqtab must be evaluated before eqdata, and shortcircuiting is required.
+func EqInterface(s, t ir.Node) (eqtab *ir.BinaryExpr, eqdata *ir.CallExpr) {
+ if !types.Identical(s.Type(), t.Type()) {
+ base.Fatalf("EqInterface %v %v", s.Type(), t.Type())
+ }
+ // func ifaceeq(tab *uintptr, x, y unsafe.Pointer) (ret bool)
+ // func efaceeq(typ *uintptr, x, y unsafe.Pointer) (ret bool)
+ var fn ir.Node
+ if s.Type().IsEmptyInterface() {
+ fn = typecheck.LookupRuntime("efaceeq")
+ } else {
+ fn = typecheck.LookupRuntime("ifaceeq")
+ }
+
+ stab := ir.NewUnaryExpr(base.Pos, ir.OITAB, s)
+ ttab := ir.NewUnaryExpr(base.Pos, ir.OITAB, t)
+ sdata := ir.NewUnaryExpr(base.Pos, ir.OIDATA, s)
+ tdata := ir.NewUnaryExpr(base.Pos, ir.OIDATA, t)
+ sdata.SetType(types.Types[types.TUNSAFEPTR])
+ tdata.SetType(types.Types[types.TUNSAFEPTR])
+ sdata.SetTypecheck(1)
+ tdata.SetTypecheck(1)
+
+ call := typecheck.Call(base.Pos, fn, []ir.Node{stab, sdata, tdata}, false).(*ir.CallExpr)
+
+ cmp := ir.NewBinaryExpr(base.Pos, ir.OEQ, stab, ttab)
+ cmp = typecheck.Expr(cmp).(*ir.BinaryExpr)
+ cmp.SetType(types.Types[types.TBOOL])
+ return cmp, call
+}
+
+// eqmem returns the node
+// memequal(&p.field, &q.field [, size])
+func eqmem(p ir.Node, q ir.Node, field *types.Sym, size int64) ir.Node {
+ nx := typecheck.Expr(typecheck.NodAddr(ir.NewSelectorExpr(base.Pos, ir.OXDOT, p, field)))
+ ny := typecheck.Expr(typecheck.NodAddr(ir.NewSelectorExpr(base.Pos, ir.OXDOT, q, field)))
+
+ fn, needsize := eqmemfunc(size, nx.Type().Elem())
+ call := ir.NewCallExpr(base.Pos, ir.OCALL, fn, nil)
+ call.Args.Append(nx)
+ call.Args.Append(ny)
+ if needsize {
+ call.Args.Append(ir.NewInt(size))
+ }
+
+ return call
+}
+
+func eqmemfunc(size int64, t *types.Type) (fn *ir.Name, needsize bool) {
+ switch size {
+ default:
+ fn = typecheck.LookupRuntime("memequal")
+ needsize = true
+ case 1, 2, 4, 8, 16:
+ buf := fmt.Sprintf("memequal%d", int(size)*8)
+ fn = typecheck.LookupRuntime(buf)
+ }
+
+ fn = typecheck.SubstArgTypes(fn, t, t)
+ return fn, needsize
+}
+
+// memrun finds runs of struct fields for which memory-only algs are appropriate.
+// t is the parent struct type, and start is the field index at which to start the run.
+// size is the length in bytes of the memory included in the run.
+// next is the index just after the end of the memory run.
+func memrun(t *types.Type, start int) (size int64, next int) {
+ next = start
+ for {
+ next++
+ if next == t.NumFields() {
+ break
+ }
+ // Stop run after a padded field.
+ if types.IsPaddedField(t, next-1) {
+ break
+ }
+ // Also, stop before a blank or non-memory field.
+ if f := t.Field(next); f.Sym.IsBlank() || !isRegularMemory(f.Type) {
+ break
+ }
+ // For issue 46283, don't combine fields if the resulting load would
+ // require a larger alignment than the component fields.
+ if base.Ctxt.Arch.Alignment > 1 {
+ align := t.Alignment()
+ if off := t.Field(start).Offset; off&(align-1) != 0 {
+ // Offset is less aligned than the containing type.
+ // Use offset to determine alignment.
+ align = 1 << uint(bits.TrailingZeros64(uint64(off)))
+ }
+ size := t.Field(next).End() - t.Field(start).Offset
+ if size > align {
+ break
+ }
+ }
+ }
+ return t.Field(next-1).End() - t.Field(start).Offset, next
+}
+
+func hashmem(t *types.Type) ir.Node {
+ sym := ir.Pkgs.Runtime.Lookup("memhash")
+
+ // TODO(austin): This creates an ir.Name with a nil Func.
+ n := typecheck.NewName(sym)
+ ir.MarkFunc(n)
+ n.SetType(types.NewSignature(types.NoPkg, nil, nil, []*types.Field{
+ types.NewField(base.Pos, nil, types.NewPtr(t)),
+ types.NewField(base.Pos, nil, types.Types[types.TUINTPTR]),
+ types.NewField(base.Pos, nil, types.Types[types.TUINTPTR]),
+ }, []*types.Field{
+ types.NewField(base.Pos, nil, types.Types[types.TUINTPTR]),
+ }))
+ return n
+}
diff --git a/src/cmd/compile/internal/reflectdata/reflect.go b/src/cmd/compile/internal/reflectdata/reflect.go
new file mode 100644
index 0000000..4ee9830
--- /dev/null
+++ b/src/cmd/compile/internal/reflectdata/reflect.go
@@ -0,0 +1,2120 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package reflectdata
+
+import (
+ "encoding/binary"
+ "fmt"
+ "os"
+ "sort"
+ "strings"
+ "sync"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/bitvec"
+ "cmd/compile/internal/escape"
+ "cmd/compile/internal/inline"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/objw"
+ "cmd/compile/internal/staticdata"
+ "cmd/compile/internal/typebits"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "cmd/internal/gcprog"
+ "cmd/internal/obj"
+ "cmd/internal/objabi"
+ "cmd/internal/src"
+)
+
+type ptabEntry struct {
+ s *types.Sym
+ t *types.Type
+}
+
+func CountPTabs() int {
+ return len(ptabs)
+}
+
+// runtime interface and reflection data structures
+var (
+ // protects signatset and signatslice
+ signatmu sync.Mutex
+ // Tracking which types need runtime type descriptor
+ signatset = make(map[*types.Type]struct{})
+ // Queue of types wait to be generated runtime type descriptor
+ signatslice []typeAndStr
+
+ gcsymmu sync.Mutex // protects gcsymset and gcsymslice
+ gcsymset = make(map[*types.Type]struct{})
+
+ ptabs []*ir.Name
+)
+
+type typeSig struct {
+ name *types.Sym
+ isym *obj.LSym
+ tsym *obj.LSym
+ type_ *types.Type
+ mtype *types.Type
+}
+
+// Builds a type representing a Bucket structure for
+// the given map type. This type is not visible to users -
+// we include only enough information to generate a correct GC
+// program for it.
+// Make sure this stays in sync with runtime/map.go.
+const (
+ BUCKETSIZE = 8
+ MAXKEYSIZE = 128
+ MAXELEMSIZE = 128
+)
+
+func structfieldSize() int { return 3 * types.PtrSize } // Sizeof(runtime.structfield{})
+func imethodSize() int { return 4 + 4 } // Sizeof(runtime.imethod{})
+func commonSize() int { return 4*types.PtrSize + 8 + 8 } // Sizeof(runtime._type{})
+
+func uncommonSize(t *types.Type) int { // Sizeof(runtime.uncommontype{})
+ if t.Sym() == nil && len(methods(t)) == 0 {
+ return 0
+ }
+ return 4 + 2 + 2 + 4 + 4
+}
+
+func makefield(name string, t *types.Type) *types.Field {
+ sym := (*types.Pkg)(nil).Lookup(name)
+ return types.NewField(src.NoXPos, sym, t)
+}
+
+// MapBucketType makes the map bucket type given the type of the map.
+func MapBucketType(t *types.Type) *types.Type {
+ if t.MapType().Bucket != nil {
+ return t.MapType().Bucket
+ }
+
+ keytype := t.Key()
+ elemtype := t.Elem()
+ types.CalcSize(keytype)
+ types.CalcSize(elemtype)
+ if keytype.Size() > MAXKEYSIZE {
+ keytype = types.NewPtr(keytype)
+ }
+ if elemtype.Size() > MAXELEMSIZE {
+ elemtype = types.NewPtr(elemtype)
+ }
+
+ field := make([]*types.Field, 0, 5)
+
+ // The first field is: uint8 topbits[BUCKETSIZE].
+ arr := types.NewArray(types.Types[types.TUINT8], BUCKETSIZE)
+ field = append(field, makefield("topbits", arr))
+
+ arr = types.NewArray(keytype, BUCKETSIZE)
+ arr.SetNoalg(true)
+ keys := makefield("keys", arr)
+ field = append(field, keys)
+
+ arr = types.NewArray(elemtype, BUCKETSIZE)
+ arr.SetNoalg(true)
+ elems := makefield("elems", arr)
+ field = append(field, elems)
+
+ // If keys and elems have no pointers, the map implementation
+ // can keep a list of overflow pointers on the side so that
+ // buckets can be marked as having no pointers.
+ // Arrange for the bucket to have no pointers by changing
+ // the type of the overflow field to uintptr in this case.
+ // See comment on hmap.overflow in runtime/map.go.
+ otyp := types.Types[types.TUNSAFEPTR]
+ if !elemtype.HasPointers() && !keytype.HasPointers() {
+ otyp = types.Types[types.TUINTPTR]
+ }
+ overflow := makefield("overflow", otyp)
+ field = append(field, overflow)
+
+ // link up fields
+ bucket := types.NewStruct(types.NoPkg, field[:])
+ bucket.SetNoalg(true)
+ types.CalcSize(bucket)
+
+ // Check invariants that map code depends on.
+ if !types.IsComparable(t.Key()) {
+ base.Fatalf("unsupported map key type for %v", t)
+ }
+ if BUCKETSIZE < 8 {
+ base.Fatalf("bucket size too small for proper alignment")
+ }
+ if uint8(keytype.Alignment()) > BUCKETSIZE {
+ base.Fatalf("key align too big for %v", t)
+ }
+ if uint8(elemtype.Alignment()) > BUCKETSIZE {
+ base.Fatalf("elem align too big for %v", t)
+ }
+ if keytype.Size() > MAXKEYSIZE {
+ base.Fatalf("key size to large for %v", t)
+ }
+ if elemtype.Size() > MAXELEMSIZE {
+ base.Fatalf("elem size to large for %v", t)
+ }
+ if t.Key().Size() > MAXKEYSIZE && !keytype.IsPtr() {
+ base.Fatalf("key indirect incorrect for %v", t)
+ }
+ if t.Elem().Size() > MAXELEMSIZE && !elemtype.IsPtr() {
+ base.Fatalf("elem indirect incorrect for %v", t)
+ }
+ if keytype.Size()%keytype.Alignment() != 0 {
+ base.Fatalf("key size not a multiple of key align for %v", t)
+ }
+ if elemtype.Size()%elemtype.Alignment() != 0 {
+ base.Fatalf("elem size not a multiple of elem align for %v", t)
+ }
+ if uint8(bucket.Alignment())%uint8(keytype.Alignment()) != 0 {
+ base.Fatalf("bucket align not multiple of key align %v", t)
+ }
+ if uint8(bucket.Alignment())%uint8(elemtype.Alignment()) != 0 {
+ base.Fatalf("bucket align not multiple of elem align %v", t)
+ }
+ if keys.Offset%keytype.Alignment() != 0 {
+ base.Fatalf("bad alignment of keys in bmap for %v", t)
+ }
+ if elems.Offset%elemtype.Alignment() != 0 {
+ base.Fatalf("bad alignment of elems in bmap for %v", t)
+ }
+
+ // Double-check that overflow field is final memory in struct,
+ // with no padding at end.
+ if overflow.Offset != bucket.Size()-int64(types.PtrSize) {
+ base.Fatalf("bad offset of overflow in bmap for %v", t)
+ }
+
+ t.MapType().Bucket = bucket
+
+ bucket.StructType().Map = t
+ return bucket
+}
+
+// MapType builds a type representing a Hmap structure for the given map type.
+// Make sure this stays in sync with runtime/map.go.
+func MapType(t *types.Type) *types.Type {
+ if t.MapType().Hmap != nil {
+ return t.MapType().Hmap
+ }
+
+ bmap := MapBucketType(t)
+
+ // build a struct:
+ // type hmap struct {
+ // count int
+ // flags uint8
+ // B uint8
+ // noverflow uint16
+ // hash0 uint32
+ // buckets *bmap
+ // oldbuckets *bmap
+ // nevacuate uintptr
+ // extra unsafe.Pointer // *mapextra
+ // }
+ // must match runtime/map.go:hmap.
+ fields := []*types.Field{
+ makefield("count", types.Types[types.TINT]),
+ makefield("flags", types.Types[types.TUINT8]),
+ makefield("B", types.Types[types.TUINT8]),
+ makefield("noverflow", types.Types[types.TUINT16]),
+ makefield("hash0", types.Types[types.TUINT32]), // Used in walk.go for OMAKEMAP.
+ makefield("buckets", types.NewPtr(bmap)), // Used in walk.go for OMAKEMAP.
+ makefield("oldbuckets", types.NewPtr(bmap)),
+ makefield("nevacuate", types.Types[types.TUINTPTR]),
+ makefield("extra", types.Types[types.TUNSAFEPTR]),
+ }
+
+ hmap := types.NewStruct(types.NoPkg, fields)
+ hmap.SetNoalg(true)
+ types.CalcSize(hmap)
+
+ // The size of hmap should be 48 bytes on 64 bit
+ // and 28 bytes on 32 bit platforms.
+ if size := int64(8 + 5*types.PtrSize); hmap.Size() != size {
+ base.Fatalf("hmap size not correct: got %d, want %d", hmap.Size(), size)
+ }
+
+ t.MapType().Hmap = hmap
+ hmap.StructType().Map = t
+ return hmap
+}
+
+// MapIterType builds a type representing an Hiter structure for the given map type.
+// Make sure this stays in sync with runtime/map.go.
+func MapIterType(t *types.Type) *types.Type {
+ if t.MapType().Hiter != nil {
+ return t.MapType().Hiter
+ }
+
+ hmap := MapType(t)
+ bmap := MapBucketType(t)
+
+ // build a struct:
+ // type hiter struct {
+ // key *Key
+ // elem *Elem
+ // t unsafe.Pointer // *MapType
+ // h *hmap
+ // buckets *bmap
+ // bptr *bmap
+ // overflow unsafe.Pointer // *[]*bmap
+ // oldoverflow unsafe.Pointer // *[]*bmap
+ // startBucket uintptr
+ // offset uint8
+ // wrapped bool
+ // B uint8
+ // i uint8
+ // bucket uintptr
+ // checkBucket uintptr
+ // }
+ // must match runtime/map.go:hiter.
+ fields := []*types.Field{
+ makefield("key", types.NewPtr(t.Key())), // Used in range.go for TMAP.
+ makefield("elem", types.NewPtr(t.Elem())), // Used in range.go for TMAP.
+ makefield("t", types.Types[types.TUNSAFEPTR]),
+ makefield("h", types.NewPtr(hmap)),
+ makefield("buckets", types.NewPtr(bmap)),
+ makefield("bptr", types.NewPtr(bmap)),
+ makefield("overflow", types.Types[types.TUNSAFEPTR]),
+ makefield("oldoverflow", types.Types[types.TUNSAFEPTR]),
+ makefield("startBucket", types.Types[types.TUINTPTR]),
+ makefield("offset", types.Types[types.TUINT8]),
+ makefield("wrapped", types.Types[types.TBOOL]),
+ makefield("B", types.Types[types.TUINT8]),
+ makefield("i", types.Types[types.TUINT8]),
+ makefield("bucket", types.Types[types.TUINTPTR]),
+ makefield("checkBucket", types.Types[types.TUINTPTR]),
+ }
+
+ // build iterator struct holding the above fields
+ hiter := types.NewStruct(types.NoPkg, fields)
+ hiter.SetNoalg(true)
+ types.CalcSize(hiter)
+ if hiter.Size() != int64(12*types.PtrSize) {
+ base.Fatalf("hash_iter size not correct %d %d", hiter.Size(), 12*types.PtrSize)
+ }
+ t.MapType().Hiter = hiter
+ hiter.StructType().Map = t
+ return hiter
+}
+
+// methods returns the methods of the non-interface type t, sorted by name.
+// Generates stub functions as needed.
+func methods(t *types.Type) []*typeSig {
+ if t.HasShape() {
+ // Shape types have no methods.
+ return nil
+ }
+ // method type
+ mt := types.ReceiverBaseType(t)
+
+ if mt == nil {
+ return nil
+ }
+ typecheck.CalcMethods(mt)
+
+ // make list of methods for t,
+ // generating code if necessary.
+ var ms []*typeSig
+ for _, f := range mt.AllMethods().Slice() {
+ if f.Sym == nil {
+ base.Fatalf("method with no sym on %v", mt)
+ }
+ if !f.IsMethod() {
+ base.Fatalf("non-method on %v method %v %v", mt, f.Sym, f)
+ }
+ if f.Type.Recv() == nil {
+ base.Fatalf("receiver with no type on %v method %v %v", mt, f.Sym, f)
+ }
+ if f.Nointerface() && !t.IsFullyInstantiated() {
+ // Skip creating method wrappers if f is nointerface. But, if
+ // t is an instantiated type, we still have to call
+ // methodWrapper, because methodWrapper generates the actual
+ // generic method on the type as well.
+ continue
+ }
+
+ // get receiver type for this particular method.
+ // if pointer receiver but non-pointer t and
+ // this is not an embedded pointer inside a struct,
+ // method does not apply.
+ if !types.IsMethodApplicable(t, f) {
+ continue
+ }
+
+ sig := &typeSig{
+ name: f.Sym,
+ isym: methodWrapper(t, f, true),
+ tsym: methodWrapper(t, f, false),
+ type_: typecheck.NewMethodType(f.Type, t),
+ mtype: typecheck.NewMethodType(f.Type, nil),
+ }
+ if f.Nointerface() {
+ // In the case of a nointerface method on an instantiated
+ // type, don't actually apppend the typeSig.
+ continue
+ }
+ ms = append(ms, sig)
+ }
+
+ return ms
+}
+
+// imethods returns the methods of the interface type t, sorted by name.
+func imethods(t *types.Type) []*typeSig {
+ var methods []*typeSig
+ for _, f := range t.AllMethods().Slice() {
+ if f.Type.Kind() != types.TFUNC || f.Sym == nil {
+ continue
+ }
+ if f.Sym.IsBlank() {
+ base.Fatalf("unexpected blank symbol in interface method set")
+ }
+ if n := len(methods); n > 0 {
+ last := methods[n-1]
+ if !last.name.Less(f.Sym) {
+ base.Fatalf("sigcmp vs sortinter %v %v", last.name, f.Sym)
+ }
+ }
+
+ sig := &typeSig{
+ name: f.Sym,
+ mtype: f.Type,
+ type_: typecheck.NewMethodType(f.Type, nil),
+ }
+ methods = append(methods, sig)
+
+ // NOTE(rsc): Perhaps an oversight that
+ // IfaceType.Method is not in the reflect data.
+ // Generate the method body, so that compiled
+ // code can refer to it.
+ methodWrapper(t, f, false)
+ }
+
+ return methods
+}
+
+func dimportpath(p *types.Pkg) {
+ if p.Pathsym != nil {
+ return
+ }
+
+ // If we are compiling the runtime package, there are two runtime packages around
+ // -- localpkg and Pkgs.Runtime. We don't want to produce import path symbols for
+ // both of them, so just produce one for localpkg.
+ if base.Ctxt.Pkgpath == "runtime" && p == ir.Pkgs.Runtime {
+ return
+ }
+
+ str := p.Path
+ if p == types.LocalPkg {
+ // Note: myimportpath != "", or else dgopkgpath won't call dimportpath.
+ str = base.Ctxt.Pkgpath
+ }
+
+ s := base.Ctxt.Lookup("type..importpath." + p.Prefix + ".")
+ ot := dnameData(s, 0, str, "", nil, false)
+ objw.Global(s, int32(ot), obj.DUPOK|obj.RODATA)
+ s.Set(obj.AttrContentAddressable, true)
+ p.Pathsym = s
+}
+
+func dgopkgpath(s *obj.LSym, ot int, pkg *types.Pkg) int {
+ if pkg == nil {
+ return objw.Uintptr(s, ot, 0)
+ }
+
+ if pkg == types.LocalPkg && base.Ctxt.Pkgpath == "" {
+ // If we don't know the full import path of the package being compiled
+ // (i.e. -p was not passed on the compiler command line), emit a reference to
+ // type..importpath.""., which the linker will rewrite using the correct import path.
+ // Every package that imports this one directly defines the symbol.
+ // See also https://groups.google.com/forum/#!topic/golang-dev/myb9s53HxGQ.
+ ns := base.Ctxt.Lookup(`type..importpath."".`)
+ return objw.SymPtr(s, ot, ns, 0)
+ }
+
+ dimportpath(pkg)
+ return objw.SymPtr(s, ot, pkg.Pathsym, 0)
+}
+
+// dgopkgpathOff writes an offset relocation in s at offset ot to the pkg path symbol.
+func dgopkgpathOff(s *obj.LSym, ot int, pkg *types.Pkg) int {
+ if pkg == nil {
+ return objw.Uint32(s, ot, 0)
+ }
+ if pkg == types.LocalPkg && base.Ctxt.Pkgpath == "" {
+ // If we don't know the full import path of the package being compiled
+ // (i.e. -p was not passed on the compiler command line), emit a reference to
+ // type..importpath.""., which the linker will rewrite using the correct import path.
+ // Every package that imports this one directly defines the symbol.
+ // See also https://groups.google.com/forum/#!topic/golang-dev/myb9s53HxGQ.
+ ns := base.Ctxt.Lookup(`type..importpath."".`)
+ return objw.SymPtrOff(s, ot, ns)
+ }
+
+ dimportpath(pkg)
+ return objw.SymPtrOff(s, ot, pkg.Pathsym)
+}
+
+// dnameField dumps a reflect.name for a struct field.
+func dnameField(lsym *obj.LSym, ot int, spkg *types.Pkg, ft *types.Field) int {
+ if !types.IsExported(ft.Sym.Name) && ft.Sym.Pkg != spkg {
+ base.Fatalf("package mismatch for %v", ft.Sym)
+ }
+ nsym := dname(ft.Sym.Name, ft.Note, nil, types.IsExported(ft.Sym.Name))
+ return objw.SymPtr(lsym, ot, nsym, 0)
+}
+
+// dnameData writes the contents of a reflect.name into s at offset ot.
+func dnameData(s *obj.LSym, ot int, name, tag string, pkg *types.Pkg, exported bool) int {
+ if len(name) >= 1<<29 {
+ base.Fatalf("name too long: %d %s...", len(name), name[:1024])
+ }
+ if len(tag) >= 1<<29 {
+ base.Fatalf("tag too long: %d %s...", len(tag), tag[:1024])
+ }
+ var nameLen [binary.MaxVarintLen64]byte
+ nameLenLen := binary.PutUvarint(nameLen[:], uint64(len(name)))
+ var tagLen [binary.MaxVarintLen64]byte
+ tagLenLen := binary.PutUvarint(tagLen[:], uint64(len(tag)))
+
+ // Encode name and tag. See reflect/type.go for details.
+ var bits byte
+ l := 1 + nameLenLen + len(name)
+ if exported {
+ bits |= 1 << 0
+ }
+ if len(tag) > 0 {
+ l += tagLenLen + len(tag)
+ bits |= 1 << 1
+ }
+ if pkg != nil {
+ bits |= 1 << 2
+ }
+ b := make([]byte, l)
+ b[0] = bits
+ copy(b[1:], nameLen[:nameLenLen])
+ copy(b[1+nameLenLen:], name)
+ if len(tag) > 0 {
+ tb := b[1+nameLenLen+len(name):]
+ copy(tb, tagLen[:tagLenLen])
+ copy(tb[tagLenLen:], tag)
+ }
+
+ ot = int(s.WriteBytes(base.Ctxt, int64(ot), b))
+
+ if pkg != nil {
+ ot = dgopkgpathOff(s, ot, pkg)
+ }
+
+ return ot
+}
+
+var dnameCount int
+
+// dname creates a reflect.name for a struct field or method.
+func dname(name, tag string, pkg *types.Pkg, exported bool) *obj.LSym {
+ // Write out data as "type.." to signal two things to the
+ // linker, first that when dynamically linking, the symbol
+ // should be moved to a relro section, and second that the
+ // contents should not be decoded as a type.
+ sname := "type..namedata."
+ if pkg == nil {
+ // In the common case, share data with other packages.
+ if name == "" {
+ if exported {
+ sname += "-noname-exported." + tag
+ } else {
+ sname += "-noname-unexported." + tag
+ }
+ } else {
+ if exported {
+ sname += name + "." + tag
+ } else {
+ sname += name + "-" + tag
+ }
+ }
+ } else {
+ sname = fmt.Sprintf(`%s"".%d`, sname, dnameCount)
+ dnameCount++
+ }
+ s := base.Ctxt.Lookup(sname)
+ if len(s.P) > 0 {
+ return s
+ }
+ ot := dnameData(s, 0, name, tag, pkg, exported)
+ objw.Global(s, int32(ot), obj.DUPOK|obj.RODATA)
+ s.Set(obj.AttrContentAddressable, true)
+ return s
+}
+
+// dextratype dumps the fields of a runtime.uncommontype.
+// dataAdd is the offset in bytes after the header where the
+// backing array of the []method field is written (by dextratypeData).
+func dextratype(lsym *obj.LSym, ot int, t *types.Type, dataAdd int) int {
+ m := methods(t)
+ if t.Sym() == nil && len(m) == 0 {
+ return ot
+ }
+ noff := int(types.Rnd(int64(ot), int64(types.PtrSize)))
+ if noff != ot {
+ base.Fatalf("unexpected alignment in dextratype for %v", t)
+ }
+
+ for _, a := range m {
+ writeType(a.type_)
+ }
+
+ ot = dgopkgpathOff(lsym, ot, typePkg(t))
+
+ dataAdd += uncommonSize(t)
+ mcount := len(m)
+ if mcount != int(uint16(mcount)) {
+ base.Fatalf("too many methods on %v: %d", t, mcount)
+ }
+ xcount := sort.Search(mcount, func(i int) bool { return !types.IsExported(m[i].name.Name) })
+ if dataAdd != int(uint32(dataAdd)) {
+ base.Fatalf("methods are too far away on %v: %d", t, dataAdd)
+ }
+
+ ot = objw.Uint16(lsym, ot, uint16(mcount))
+ ot = objw.Uint16(lsym, ot, uint16(xcount))
+ ot = objw.Uint32(lsym, ot, uint32(dataAdd))
+ ot = objw.Uint32(lsym, ot, 0)
+ return ot
+}
+
+func typePkg(t *types.Type) *types.Pkg {
+ tsym := t.Sym()
+ if tsym == nil {
+ switch t.Kind() {
+ case types.TARRAY, types.TSLICE, types.TPTR, types.TCHAN:
+ if t.Elem() != nil {
+ tsym = t.Elem().Sym()
+ }
+ }
+ }
+ if tsym != nil && tsym.Pkg != types.BuiltinPkg {
+ return tsym.Pkg
+ }
+ return nil
+}
+
+// dextratypeData dumps the backing array for the []method field of
+// runtime.uncommontype.
+func dextratypeData(lsym *obj.LSym, ot int, t *types.Type) int {
+ for _, a := range methods(t) {
+ // ../../../../runtime/type.go:/method
+ exported := types.IsExported(a.name.Name)
+ var pkg *types.Pkg
+ if !exported && a.name.Pkg != typePkg(t) {
+ pkg = a.name.Pkg
+ }
+ nsym := dname(a.name.Name, "", pkg, exported)
+
+ ot = objw.SymPtrOff(lsym, ot, nsym)
+ ot = dmethodptrOff(lsym, ot, writeType(a.mtype))
+ ot = dmethodptrOff(lsym, ot, a.isym)
+ ot = dmethodptrOff(lsym, ot, a.tsym)
+ }
+ return ot
+}
+
+func dmethodptrOff(s *obj.LSym, ot int, x *obj.LSym) int {
+ objw.Uint32(s, ot, 0)
+ r := obj.Addrel(s)
+ r.Off = int32(ot)
+ r.Siz = 4
+ r.Sym = x
+ r.Type = objabi.R_METHODOFF
+ return ot + 4
+}
+
+var kinds = []int{
+ types.TINT: objabi.KindInt,
+ types.TUINT: objabi.KindUint,
+ types.TINT8: objabi.KindInt8,
+ types.TUINT8: objabi.KindUint8,
+ types.TINT16: objabi.KindInt16,
+ types.TUINT16: objabi.KindUint16,
+ types.TINT32: objabi.KindInt32,
+ types.TUINT32: objabi.KindUint32,
+ types.TINT64: objabi.KindInt64,
+ types.TUINT64: objabi.KindUint64,
+ types.TUINTPTR: objabi.KindUintptr,
+ types.TFLOAT32: objabi.KindFloat32,
+ types.TFLOAT64: objabi.KindFloat64,
+ types.TBOOL: objabi.KindBool,
+ types.TSTRING: objabi.KindString,
+ types.TPTR: objabi.KindPtr,
+ types.TSTRUCT: objabi.KindStruct,
+ types.TINTER: objabi.KindInterface,
+ types.TCHAN: objabi.KindChan,
+ types.TMAP: objabi.KindMap,
+ types.TARRAY: objabi.KindArray,
+ types.TSLICE: objabi.KindSlice,
+ types.TFUNC: objabi.KindFunc,
+ types.TCOMPLEX64: objabi.KindComplex64,
+ types.TCOMPLEX128: objabi.KindComplex128,
+ types.TUNSAFEPTR: objabi.KindUnsafePointer,
+}
+
+// tflag is documented in reflect/type.go.
+//
+// tflag values must be kept in sync with copies in:
+// cmd/compile/internal/reflectdata/reflect.go
+// cmd/link/internal/ld/decodesym.go
+// reflect/type.go
+// runtime/type.go
+const (
+ tflagUncommon = 1 << 0
+ tflagExtraStar = 1 << 1
+ tflagNamed = 1 << 2
+ tflagRegularMemory = 1 << 3
+)
+
+var (
+ memhashvarlen *obj.LSym
+ memequalvarlen *obj.LSym
+)
+
+// dcommontype dumps the contents of a reflect.rtype (runtime._type).
+func dcommontype(lsym *obj.LSym, t *types.Type) int {
+ types.CalcSize(t)
+ eqfunc := geneq(t)
+
+ sptrWeak := true
+ var sptr *obj.LSym
+ if !t.IsPtr() || t.IsPtrElem() {
+ tptr := types.NewPtr(t)
+ if t.Sym() != nil || methods(tptr) != nil {
+ sptrWeak = false
+ }
+ sptr = writeType(tptr)
+ }
+
+ gcsym, useGCProg, ptrdata := dgcsym(t, true)
+ delete(gcsymset, t)
+
+ // ../../../../reflect/type.go:/^type.rtype
+ // actual type structure
+ // type rtype struct {
+ // size uintptr
+ // ptrdata uintptr
+ // hash uint32
+ // tflag tflag
+ // align uint8
+ // fieldAlign uint8
+ // kind uint8
+ // equal func(unsafe.Pointer, unsafe.Pointer) bool
+ // gcdata *byte
+ // str nameOff
+ // ptrToThis typeOff
+ // }
+ ot := 0
+ ot = objw.Uintptr(lsym, ot, uint64(t.Size()))
+ ot = objw.Uintptr(lsym, ot, uint64(ptrdata))
+ ot = objw.Uint32(lsym, ot, types.TypeHash(t))
+
+ var tflag uint8
+ if uncommonSize(t) != 0 {
+ tflag |= tflagUncommon
+ }
+ if t.Sym() != nil && t.Sym().Name != "" {
+ tflag |= tflagNamed
+ }
+ if isRegularMemory(t) {
+ tflag |= tflagRegularMemory
+ }
+
+ exported := false
+ p := t.NameString()
+ // If we're writing out type T,
+ // we are very likely to write out type *T as well.
+ // Use the string "*T"[1:] for "T", so that the two
+ // share storage. This is a cheap way to reduce the
+ // amount of space taken up by reflect strings.
+ if !strings.HasPrefix(p, "*") {
+ p = "*" + p
+ tflag |= tflagExtraStar
+ if t.Sym() != nil {
+ exported = types.IsExported(t.Sym().Name)
+ }
+ } else {
+ if t.Elem() != nil && t.Elem().Sym() != nil {
+ exported = types.IsExported(t.Elem().Sym().Name)
+ }
+ }
+
+ ot = objw.Uint8(lsym, ot, tflag)
+
+ // runtime (and common sense) expects alignment to be a power of two.
+ i := int(uint8(t.Alignment()))
+
+ if i == 0 {
+ i = 1
+ }
+ if i&(i-1) != 0 {
+ base.Fatalf("invalid alignment %d for %v", uint8(t.Alignment()), t)
+ }
+ ot = objw.Uint8(lsym, ot, uint8(t.Alignment())) // align
+ ot = objw.Uint8(lsym, ot, uint8(t.Alignment())) // fieldAlign
+
+ i = kinds[t.Kind()]
+ if types.IsDirectIface(t) {
+ i |= objabi.KindDirectIface
+ }
+ if useGCProg {
+ i |= objabi.KindGCProg
+ }
+ ot = objw.Uint8(lsym, ot, uint8(i)) // kind
+ if eqfunc != nil {
+ ot = objw.SymPtr(lsym, ot, eqfunc, 0) // equality function
+ } else {
+ ot = objw.Uintptr(lsym, ot, 0) // type we can't do == with
+ }
+ ot = objw.SymPtr(lsym, ot, gcsym, 0) // gcdata
+
+ nsym := dname(p, "", nil, exported)
+ ot = objw.SymPtrOff(lsym, ot, nsym) // str
+ // ptrToThis
+ if sptr == nil {
+ ot = objw.Uint32(lsym, ot, 0)
+ } else if sptrWeak {
+ ot = objw.SymPtrWeakOff(lsym, ot, sptr)
+ } else {
+ ot = objw.SymPtrOff(lsym, ot, sptr)
+ }
+
+ return ot
+}
+
+// TrackSym returns the symbol for tracking use of field/method f, assumed
+// to be a member of struct/interface type t.
+func TrackSym(t *types.Type, f *types.Field) *obj.LSym {
+ return base.PkgLinksym("go.track", t.LinkString()+"."+f.Sym.Name, obj.ABI0)
+}
+
+func TypeSymPrefix(prefix string, t *types.Type) *types.Sym {
+ p := prefix + "." + t.LinkString()
+ s := types.TypeSymLookup(p)
+
+ // This function is for looking up type-related generated functions
+ // (e.g. eq and hash). Make sure they are indeed generated.
+ signatmu.Lock()
+ NeedRuntimeType(t)
+ signatmu.Unlock()
+
+ //print("algsym: %s -> %+S\n", p, s);
+
+ return s
+}
+
+func TypeSym(t *types.Type) *types.Sym {
+ if t == nil || (t.IsPtr() && t.Elem() == nil) || t.IsUntyped() {
+ base.Fatalf("TypeSym %v", t)
+ }
+ if t.Kind() == types.TFUNC && t.Recv() != nil {
+ base.Fatalf("misuse of method type: %v", t)
+ }
+ s := types.TypeSym(t)
+ signatmu.Lock()
+ NeedRuntimeType(t)
+ signatmu.Unlock()
+ return s
+}
+
+func TypeLinksymPrefix(prefix string, t *types.Type) *obj.LSym {
+ return TypeSymPrefix(prefix, t).Linksym()
+}
+
+func TypeLinksymLookup(name string) *obj.LSym {
+ return types.TypeSymLookup(name).Linksym()
+}
+
+func TypeLinksym(t *types.Type) *obj.LSym {
+ return TypeSym(t).Linksym()
+}
+
+func TypePtr(t *types.Type) *ir.AddrExpr {
+ n := ir.NewLinksymExpr(base.Pos, TypeLinksym(t), types.Types[types.TUINT8])
+ return typecheck.Expr(typecheck.NodAddr(n)).(*ir.AddrExpr)
+}
+
+// ITabLsym returns the LSym representing the itab for concrete type typ implementing
+// interface iface. A dummy tab will be created in the unusual case where typ doesn't
+// implement iface. Normally, this wouldn't happen, because the typechecker would
+// have reported a compile-time error. This situation can only happen when the
+// destination type of a type assert or a type in a type switch is parameterized, so
+// it may sometimes, but not always, be a type that can't implement the specified
+// interface.
+func ITabLsym(typ, iface *types.Type) *obj.LSym {
+ s, existed := ir.Pkgs.Itab.LookupOK(typ.LinkString() + "," + iface.LinkString())
+ lsym := s.Linksym()
+
+ if !existed {
+ writeITab(lsym, typ, iface, true)
+ }
+ return lsym
+}
+
+// ITabAddr returns an expression representing a pointer to the itab
+// for concrete type typ implementing interface iface.
+func ITabAddr(typ, iface *types.Type) *ir.AddrExpr {
+ s, existed := ir.Pkgs.Itab.LookupOK(typ.LinkString() + "," + iface.LinkString())
+ lsym := s.Linksym()
+
+ if !existed {
+ writeITab(lsym, typ, iface, false)
+ }
+
+ n := ir.NewLinksymExpr(base.Pos, lsym, types.Types[types.TUINT8])
+ return typecheck.Expr(typecheck.NodAddr(n)).(*ir.AddrExpr)
+}
+
+// needkeyupdate reports whether map updates with t as a key
+// need the key to be updated.
+func needkeyupdate(t *types.Type) bool {
+ switch t.Kind() {
+ case types.TBOOL, types.TINT, types.TUINT, types.TINT8, types.TUINT8, types.TINT16, types.TUINT16, types.TINT32, types.TUINT32,
+ types.TINT64, types.TUINT64, types.TUINTPTR, types.TPTR, types.TUNSAFEPTR, types.TCHAN:
+ return false
+
+ case types.TFLOAT32, types.TFLOAT64, types.TCOMPLEX64, types.TCOMPLEX128, // floats and complex can be +0/-0
+ types.TINTER,
+ types.TSTRING: // strings might have smaller backing stores
+ return true
+
+ case types.TARRAY:
+ return needkeyupdate(t.Elem())
+
+ case types.TSTRUCT:
+ for _, t1 := range t.Fields().Slice() {
+ if needkeyupdate(t1.Type) {
+ return true
+ }
+ }
+ return false
+
+ default:
+ base.Fatalf("bad type for map key: %v", t)
+ return true
+ }
+}
+
+// hashMightPanic reports whether the hash of a map key of type t might panic.
+func hashMightPanic(t *types.Type) bool {
+ switch t.Kind() {
+ case types.TINTER:
+ return true
+
+ case types.TARRAY:
+ return hashMightPanic(t.Elem())
+
+ case types.TSTRUCT:
+ for _, t1 := range t.Fields().Slice() {
+ if hashMightPanic(t1.Type) {
+ return true
+ }
+ }
+ return false
+
+ default:
+ return false
+ }
+}
+
+// formalType replaces predeclared aliases with real types.
+// They've been separate internally to make error messages
+// better, but we have to merge them in the reflect tables.
+func formalType(t *types.Type) *types.Type {
+ switch t {
+ case types.AnyType, types.ByteType, types.RuneType:
+ return types.Types[t.Kind()]
+ }
+ return t
+}
+
+func writeType(t *types.Type) *obj.LSym {
+ t = formalType(t)
+ if t.IsUntyped() || t.HasTParam() {
+ base.Fatalf("writeType %v", t)
+ }
+
+ s := types.TypeSym(t)
+ lsym := s.Linksym()
+ if s.Siggen() {
+ return lsym
+ }
+ s.SetSiggen(true)
+
+ // special case (look for runtime below):
+ // when compiling package runtime,
+ // emit the type structures for int, float, etc.
+ tbase := t
+
+ if t.IsPtr() && t.Sym() == nil && t.Elem().Sym() != nil {
+ tbase = t.Elem()
+ }
+ if tbase.Kind() == types.TFORW {
+ base.Fatalf("unresolved defined type: %v", tbase)
+ }
+
+ if !NeedEmit(tbase) {
+ if i := typecheck.BaseTypeIndex(t); i >= 0 {
+ lsym.Pkg = tbase.Sym().Pkg.Prefix
+ lsym.SymIdx = int32(i)
+ lsym.Set(obj.AttrIndexed, true)
+ }
+
+ // TODO(mdempsky): Investigate whether this still happens.
+ // If we know we don't need to emit code for a type,
+ // we should have a link-symbol index for it.
+ // See also TODO in NeedEmit.
+ return lsym
+ }
+
+ ot := 0
+ switch t.Kind() {
+ default:
+ ot = dcommontype(lsym, t)
+ ot = dextratype(lsym, ot, t, 0)
+
+ case types.TARRAY:
+ // ../../../../runtime/type.go:/arrayType
+ s1 := writeType(t.Elem())
+ t2 := types.NewSlice(t.Elem())
+ s2 := writeType(t2)
+ ot = dcommontype(lsym, t)
+ ot = objw.SymPtr(lsym, ot, s1, 0)
+ ot = objw.SymPtr(lsym, ot, s2, 0)
+ ot = objw.Uintptr(lsym, ot, uint64(t.NumElem()))
+ ot = dextratype(lsym, ot, t, 0)
+
+ case types.TSLICE:
+ // ../../../../runtime/type.go:/sliceType
+ s1 := writeType(t.Elem())
+ ot = dcommontype(lsym, t)
+ ot = objw.SymPtr(lsym, ot, s1, 0)
+ ot = dextratype(lsym, ot, t, 0)
+
+ case types.TCHAN:
+ // ../../../../runtime/type.go:/chanType
+ s1 := writeType(t.Elem())
+ ot = dcommontype(lsym, t)
+ ot = objw.SymPtr(lsym, ot, s1, 0)
+ ot = objw.Uintptr(lsym, ot, uint64(t.ChanDir()))
+ ot = dextratype(lsym, ot, t, 0)
+
+ case types.TFUNC:
+ for _, t1 := range t.Recvs().Fields().Slice() {
+ writeType(t1.Type)
+ }
+ isddd := false
+ for _, t1 := range t.Params().Fields().Slice() {
+ isddd = t1.IsDDD()
+ writeType(t1.Type)
+ }
+ for _, t1 := range t.Results().Fields().Slice() {
+ writeType(t1.Type)
+ }
+
+ ot = dcommontype(lsym, t)
+ inCount := t.NumRecvs() + t.NumParams()
+ outCount := t.NumResults()
+ if isddd {
+ outCount |= 1 << 15
+ }
+ ot = objw.Uint16(lsym, ot, uint16(inCount))
+ ot = objw.Uint16(lsym, ot, uint16(outCount))
+ if types.PtrSize == 8 {
+ ot += 4 // align for *rtype
+ }
+
+ dataAdd := (inCount + t.NumResults()) * types.PtrSize
+ ot = dextratype(lsym, ot, t, dataAdd)
+
+ // Array of rtype pointers follows funcType.
+ for _, t1 := range t.Recvs().Fields().Slice() {
+ ot = objw.SymPtr(lsym, ot, writeType(t1.Type), 0)
+ }
+ for _, t1 := range t.Params().Fields().Slice() {
+ ot = objw.SymPtr(lsym, ot, writeType(t1.Type), 0)
+ }
+ for _, t1 := range t.Results().Fields().Slice() {
+ ot = objw.SymPtr(lsym, ot, writeType(t1.Type), 0)
+ }
+
+ case types.TINTER:
+ m := imethods(t)
+ n := len(m)
+ for _, a := range m {
+ writeType(a.type_)
+ }
+
+ // ../../../../runtime/type.go:/interfaceType
+ ot = dcommontype(lsym, t)
+
+ var tpkg *types.Pkg
+ if t.Sym() != nil && t != types.Types[t.Kind()] && t != types.ErrorType {
+ tpkg = t.Sym().Pkg
+ }
+ ot = dgopkgpath(lsym, ot, tpkg)
+
+ ot = objw.SymPtr(lsym, ot, lsym, ot+3*types.PtrSize+uncommonSize(t))
+ ot = objw.Uintptr(lsym, ot, uint64(n))
+ ot = objw.Uintptr(lsym, ot, uint64(n))
+ dataAdd := imethodSize() * n
+ ot = dextratype(lsym, ot, t, dataAdd)
+
+ for _, a := range m {
+ // ../../../../runtime/type.go:/imethod
+ exported := types.IsExported(a.name.Name)
+ var pkg *types.Pkg
+ if !exported && a.name.Pkg != tpkg {
+ pkg = a.name.Pkg
+ }
+ nsym := dname(a.name.Name, "", pkg, exported)
+
+ ot = objw.SymPtrOff(lsym, ot, nsym)
+ ot = objw.SymPtrOff(lsym, ot, writeType(a.type_))
+ }
+
+ // ../../../../runtime/type.go:/mapType
+ case types.TMAP:
+ s1 := writeType(t.Key())
+ s2 := writeType(t.Elem())
+ s3 := writeType(MapBucketType(t))
+ hasher := genhash(t.Key())
+
+ ot = dcommontype(lsym, t)
+ ot = objw.SymPtr(lsym, ot, s1, 0)
+ ot = objw.SymPtr(lsym, ot, s2, 0)
+ ot = objw.SymPtr(lsym, ot, s3, 0)
+ ot = objw.SymPtr(lsym, ot, hasher, 0)
+ var flags uint32
+ // Note: flags must match maptype accessors in ../../../../runtime/type.go
+ // and maptype builder in ../../../../reflect/type.go:MapOf.
+ if t.Key().Size() > MAXKEYSIZE {
+ ot = objw.Uint8(lsym, ot, uint8(types.PtrSize))
+ flags |= 1 // indirect key
+ } else {
+ ot = objw.Uint8(lsym, ot, uint8(t.Key().Size()))
+ }
+
+ if t.Elem().Size() > MAXELEMSIZE {
+ ot = objw.Uint8(lsym, ot, uint8(types.PtrSize))
+ flags |= 2 // indirect value
+ } else {
+ ot = objw.Uint8(lsym, ot, uint8(t.Elem().Size()))
+ }
+ ot = objw.Uint16(lsym, ot, uint16(MapBucketType(t).Size()))
+ if types.IsReflexive(t.Key()) {
+ flags |= 4 // reflexive key
+ }
+ if needkeyupdate(t.Key()) {
+ flags |= 8 // need key update
+ }
+ if hashMightPanic(t.Key()) {
+ flags |= 16 // hash might panic
+ }
+ ot = objw.Uint32(lsym, ot, flags)
+ ot = dextratype(lsym, ot, t, 0)
+ if u := t.Underlying(); u != t {
+ // If t is a named map type, also keep the underlying map
+ // type live in the binary. This is important to make sure that
+ // a named map and that same map cast to its underlying type via
+ // reflection, use the same hash function. See issue 37716.
+ r := obj.Addrel(lsym)
+ r.Sym = writeType(u)
+ r.Type = objabi.R_KEEP
+ }
+
+ case types.TPTR:
+ if t.Elem().Kind() == types.TANY {
+ // ../../../../runtime/type.go:/UnsafePointerType
+ ot = dcommontype(lsym, t)
+ ot = dextratype(lsym, ot, t, 0)
+
+ break
+ }
+
+ // ../../../../runtime/type.go:/ptrType
+ s1 := writeType(t.Elem())
+
+ ot = dcommontype(lsym, t)
+ ot = objw.SymPtr(lsym, ot, s1, 0)
+ ot = dextratype(lsym, ot, t, 0)
+
+ // ../../../../runtime/type.go:/structType
+ // for security, only the exported fields.
+ case types.TSTRUCT:
+ fields := t.Fields().Slice()
+ for _, t1 := range fields {
+ writeType(t1.Type)
+ }
+
+ // All non-exported struct field names within a struct
+ // type must originate from a single package. By
+ // identifying and recording that package within the
+ // struct type descriptor, we can omit that
+ // information from the field descriptors.
+ var spkg *types.Pkg
+ for _, f := range fields {
+ if !types.IsExported(f.Sym.Name) {
+ spkg = f.Sym.Pkg
+ break
+ }
+ }
+
+ ot = dcommontype(lsym, t)
+ ot = dgopkgpath(lsym, ot, spkg)
+ ot = objw.SymPtr(lsym, ot, lsym, ot+3*types.PtrSize+uncommonSize(t))
+ ot = objw.Uintptr(lsym, ot, uint64(len(fields)))
+ ot = objw.Uintptr(lsym, ot, uint64(len(fields)))
+
+ dataAdd := len(fields) * structfieldSize()
+ ot = dextratype(lsym, ot, t, dataAdd)
+
+ for _, f := range fields {
+ // ../../../../runtime/type.go:/structField
+ ot = dnameField(lsym, ot, spkg, f)
+ ot = objw.SymPtr(lsym, ot, writeType(f.Type), 0)
+ offsetAnon := uint64(f.Offset) << 1
+ if offsetAnon>>1 != uint64(f.Offset) {
+ base.Fatalf("%v: bad field offset for %s", t, f.Sym.Name)
+ }
+ if f.Embedded != 0 {
+ offsetAnon |= 1
+ }
+ ot = objw.Uintptr(lsym, ot, offsetAnon)
+ }
+ }
+
+ ot = dextratypeData(lsym, ot, t)
+ objw.Global(lsym, int32(ot), int16(obj.DUPOK|obj.RODATA))
+ // Note: DUPOK is required to ensure that we don't end up with more
+ // than one type descriptor for a given type.
+
+ // The linker will leave a table of all the typelinks for
+ // types in the binary, so the runtime can find them.
+ //
+ // When buildmode=shared, all types are in typelinks so the
+ // runtime can deduplicate type pointers.
+ keep := base.Ctxt.Flag_dynlink
+ if !keep && t.Sym() == nil {
+ // For an unnamed type, we only need the link if the type can
+ // be created at run time by reflect.PtrTo and similar
+ // functions. If the type exists in the program, those
+ // functions must return the existing type structure rather
+ // than creating a new one.
+ switch t.Kind() {
+ case types.TPTR, types.TARRAY, types.TCHAN, types.TFUNC, types.TMAP, types.TSLICE, types.TSTRUCT:
+ keep = true
+ }
+ }
+ // Do not put Noalg types in typelinks. See issue #22605.
+ if types.TypeHasNoAlg(t) {
+ keep = false
+ }
+ lsym.Set(obj.AttrMakeTypelink, keep)
+
+ return lsym
+}
+
+// InterfaceMethodOffset returns the offset of the i-th method in the interface
+// type descriptor, ityp.
+func InterfaceMethodOffset(ityp *types.Type, i int64) int64 {
+ // interface type descriptor layout is struct {
+ // _type // commonSize
+ // pkgpath // 1 word
+ // []imethod // 3 words (pointing to [...]imethod below)
+ // uncommontype // uncommonSize
+ // [...]imethod
+ // }
+ // The size of imethod is 8.
+ return int64(commonSize()+4*types.PtrSize+uncommonSize(ityp)) + i*8
+}
+
+// NeedRuntimeType ensures that a runtime type descriptor is emitted for t.
+func NeedRuntimeType(t *types.Type) {
+ if t.HasTParam() {
+ // Generic types don't really exist at run-time and have no runtime
+ // type descriptor. But we do write out shape types.
+ return
+ }
+ if _, ok := signatset[t]; !ok {
+ signatset[t] = struct{}{}
+ signatslice = append(signatslice, typeAndStr{t: t, short: types.TypeSymName(t), regular: t.String()})
+ }
+}
+
+func WriteRuntimeTypes() {
+ // Process signatslice. Use a loop, as writeType adds
+ // entries to signatslice while it is being processed.
+ for len(signatslice) > 0 {
+ signats := signatslice
+ // Sort for reproducible builds.
+ sort.Sort(typesByString(signats))
+ for _, ts := range signats {
+ t := ts.t
+ writeType(t)
+ if t.Sym() != nil {
+ writeType(types.NewPtr(t))
+ }
+ }
+ signatslice = signatslice[len(signats):]
+ }
+
+ // Emit GC data symbols.
+ gcsyms := make([]typeAndStr, 0, len(gcsymset))
+ for t := range gcsymset {
+ gcsyms = append(gcsyms, typeAndStr{t: t, short: types.TypeSymName(t), regular: t.String()})
+ }
+ sort.Sort(typesByString(gcsyms))
+ for _, ts := range gcsyms {
+ dgcsym(ts.t, true)
+ }
+}
+
+// writeITab writes the itab for concrete type typ implementing interface iface. If
+// allowNonImplement is true, allow the case where typ does not implement iface, and just
+// create a dummy itab with zeroed-out method entries.
+func writeITab(lsym *obj.LSym, typ, iface *types.Type, allowNonImplement bool) {
+ // TODO(mdempsky): Fix methodWrapper, geneq, and genhash (and maybe
+ // others) to stop clobbering these.
+ oldpos, oldfn := base.Pos, ir.CurFunc
+ defer func() { base.Pos, ir.CurFunc = oldpos, oldfn }()
+
+ if typ == nil || (typ.IsPtr() && typ.Elem() == nil) || typ.IsUntyped() || iface == nil || !iface.IsInterface() || iface.IsEmptyInterface() {
+ base.Fatalf("writeITab(%v, %v)", typ, iface)
+ }
+
+ sigs := iface.AllMethods().Slice()
+ entries := make([]*obj.LSym, 0, len(sigs))
+
+ // both sigs and methods are sorted by name,
+ // so we can find the intersection in a single pass
+ for _, m := range methods(typ) {
+ if m.name == sigs[0].Sym {
+ entries = append(entries, m.isym)
+ if m.isym == nil {
+ panic("NO ISYM")
+ }
+ sigs = sigs[1:]
+ if len(sigs) == 0 {
+ break
+ }
+ }
+ }
+ completeItab := len(sigs) == 0
+ if !allowNonImplement && !completeItab {
+ base.Fatalf("incomplete itab")
+ }
+
+ // dump empty itab symbol into i.sym
+ // type itab struct {
+ // inter *interfacetype
+ // _type *_type
+ // hash uint32 // copy of _type.hash. Used for type switches.
+ // _ [4]byte
+ // fun [1]uintptr // variable sized. fun[0]==0 means _type does not implement inter.
+ // }
+ o := objw.SymPtr(lsym, 0, writeType(iface), 0)
+ o = objw.SymPtr(lsym, o, writeType(typ), 0)
+ o = objw.Uint32(lsym, o, types.TypeHash(typ)) // copy of type hash
+ o += 4 // skip unused field
+ if !completeItab {
+ // If typ doesn't implement iface, make method entries be zero.
+ o = objw.Uintptr(lsym, o, 0)
+ entries = entries[:0]
+ }
+ for _, fn := range entries {
+ o = objw.SymPtrWeak(lsym, o, fn, 0) // method pointer for each method
+ }
+ // Nothing writes static itabs, so they are read only.
+ objw.Global(lsym, int32(o), int16(obj.DUPOK|obj.RODATA))
+ lsym.Set(obj.AttrContentAddressable, true)
+}
+
+func WriteTabs() {
+ // process ptabs
+ if types.LocalPkg.Name == "main" && len(ptabs) > 0 {
+ ot := 0
+ s := base.Ctxt.Lookup("go.plugin.tabs")
+ for _, p := range ptabs {
+ // Dump ptab symbol into go.pluginsym package.
+ //
+ // type ptab struct {
+ // name nameOff
+ // typ typeOff // pointer to symbol
+ // }
+ nsym := dname(p.Sym().Name, "", nil, true)
+ t := p.Type()
+ if p.Class != ir.PFUNC {
+ t = types.NewPtr(t)
+ }
+ tsym := writeType(t)
+ ot = objw.SymPtrOff(s, ot, nsym)
+ ot = objw.SymPtrOff(s, ot, tsym)
+ // Plugin exports symbols as interfaces. Mark their types
+ // as UsedInIface.
+ tsym.Set(obj.AttrUsedInIface, true)
+ }
+ objw.Global(s, int32(ot), int16(obj.RODATA))
+
+ ot = 0
+ s = base.Ctxt.Lookup("go.plugin.exports")
+ for _, p := range ptabs {
+ ot = objw.SymPtr(s, ot, p.Linksym(), 0)
+ }
+ objw.Global(s, int32(ot), int16(obj.RODATA))
+ }
+}
+
+func WriteImportStrings() {
+ // generate import strings for imported packages
+ for _, p := range types.ImportedPkgList() {
+ dimportpath(p)
+ }
+}
+
+func WriteBasicTypes() {
+ // do basic types if compiling package runtime.
+ // they have to be in at least one package,
+ // and runtime is always loaded implicitly,
+ // so this is as good as any.
+ // another possible choice would be package main,
+ // but using runtime means fewer copies in object files.
+ if base.Ctxt.Pkgpath == "runtime" {
+ for i := types.Kind(1); i <= types.TBOOL; i++ {
+ writeType(types.NewPtr(types.Types[i]))
+ }
+ writeType(types.NewPtr(types.Types[types.TSTRING]))
+ writeType(types.NewPtr(types.Types[types.TUNSAFEPTR]))
+ if base.Flag.G > 0 {
+ writeType(types.AnyType)
+ }
+
+ // emit type structs for error and func(error) string.
+ // The latter is the type of an auto-generated wrapper.
+ writeType(types.NewPtr(types.ErrorType))
+
+ writeType(types.NewSignature(types.NoPkg, nil, nil, []*types.Field{
+ types.NewField(base.Pos, nil, types.ErrorType),
+ }, []*types.Field{
+ types.NewField(base.Pos, nil, types.Types[types.TSTRING]),
+ }))
+
+ // add paths for runtime and main, which 6l imports implicitly.
+ dimportpath(ir.Pkgs.Runtime)
+
+ if base.Flag.Race {
+ dimportpath(types.NewPkg("runtime/race", ""))
+ }
+ if base.Flag.MSan {
+ dimportpath(types.NewPkg("runtime/msan", ""))
+ }
+ if base.Flag.ASan {
+ dimportpath(types.NewPkg("runtime/asan", ""))
+ }
+
+ dimportpath(types.NewPkg("main", ""))
+ }
+}
+
+type typeAndStr struct {
+ t *types.Type
+ short string // "short" here means TypeSymName
+ regular string
+}
+
+type typesByString []typeAndStr
+
+func (a typesByString) Len() int { return len(a) }
+func (a typesByString) Less(i, j int) bool {
+ if a[i].short != a[j].short {
+ return a[i].short < a[j].short
+ }
+ // When the only difference between the types is whether
+ // they refer to byte or uint8, such as **byte vs **uint8,
+ // the types' NameStrings can be identical.
+ // To preserve deterministic sort ordering, sort these by String().
+ //
+ // TODO(mdempsky): This all seems suspect. Using LinkString would
+ // avoid naming collisions, and there shouldn't be a reason to care
+ // about "byte" vs "uint8": they share the same runtime type
+ // descriptor anyway.
+ if a[i].regular != a[j].regular {
+ return a[i].regular < a[j].regular
+ }
+ // Identical anonymous interfaces defined in different locations
+ // will be equal for the above checks, but different in DWARF output.
+ // Sort by source position to ensure deterministic order.
+ // See issues 27013 and 30202.
+ if a[i].t.Kind() == types.TINTER && a[i].t.AllMethods().Len() > 0 {
+ return a[i].t.AllMethods().Index(0).Pos.Before(a[j].t.AllMethods().Index(0).Pos)
+ }
+ return false
+}
+func (a typesByString) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+
+// maxPtrmaskBytes is the maximum length of a GC ptrmask bitmap,
+// which holds 1-bit entries describing where pointers are in a given type.
+// Above this length, the GC information is recorded as a GC program,
+// which can express repetition compactly. In either form, the
+// information is used by the runtime to initialize the heap bitmap,
+// and for large types (like 128 or more words), they are roughly the
+// same speed. GC programs are never much larger and often more
+// compact. (If large arrays are involved, they can be arbitrarily
+// more compact.)
+//
+// The cutoff must be large enough that any allocation large enough to
+// use a GC program is large enough that it does not share heap bitmap
+// bytes with any other objects, allowing the GC program execution to
+// assume an aligned start and not use atomic operations. In the current
+// runtime, this means all malloc size classes larger than the cutoff must
+// be multiples of four words. On 32-bit systems that's 16 bytes, and
+// all size classes >= 16 bytes are 16-byte aligned, so no real constraint.
+// On 64-bit systems, that's 32 bytes, and 32-byte alignment is guaranteed
+// for size classes >= 256 bytes. On a 64-bit system, 256 bytes allocated
+// is 32 pointers, the bits for which fit in 4 bytes. So maxPtrmaskBytes
+// must be >= 4.
+//
+// We used to use 16 because the GC programs do have some constant overhead
+// to get started, and processing 128 pointers seems to be enough to
+// amortize that overhead well.
+//
+// To make sure that the runtime's chansend can call typeBitsBulkBarrier,
+// we raised the limit to 2048, so that even 32-bit systems are guaranteed to
+// use bitmaps for objects up to 64 kB in size.
+//
+// Also known to reflect/type.go.
+//
+const maxPtrmaskBytes = 2048
+
+// GCSym returns a data symbol containing GC information for type t, along
+// with a boolean reporting whether the UseGCProg bit should be set in the
+// type kind, and the ptrdata field to record in the reflect type information.
+// GCSym may be called in concurrent backend, so it does not emit the symbol
+// content.
+func GCSym(t *types.Type) (lsym *obj.LSym, useGCProg bool, ptrdata int64) {
+ // Record that we need to emit the GC symbol.
+ gcsymmu.Lock()
+ if _, ok := gcsymset[t]; !ok {
+ gcsymset[t] = struct{}{}
+ }
+ gcsymmu.Unlock()
+
+ return dgcsym(t, false)
+}
+
+// dgcsym returns a data symbol containing GC information for type t, along
+// with a boolean reporting whether the UseGCProg bit should be set in the
+// type kind, and the ptrdata field to record in the reflect type information.
+// When write is true, it writes the symbol data.
+func dgcsym(t *types.Type, write bool) (lsym *obj.LSym, useGCProg bool, ptrdata int64) {
+ ptrdata = types.PtrDataSize(t)
+ if ptrdata/int64(types.PtrSize) <= maxPtrmaskBytes*8 {
+ lsym = dgcptrmask(t, write)
+ return
+ }
+
+ useGCProg = true
+ lsym, ptrdata = dgcprog(t, write)
+ return
+}
+
+// dgcptrmask emits and returns the symbol containing a pointer mask for type t.
+func dgcptrmask(t *types.Type, write bool) *obj.LSym {
+ ptrmask := make([]byte, (types.PtrDataSize(t)/int64(types.PtrSize)+7)/8)
+ fillptrmask(t, ptrmask)
+ p := fmt.Sprintf("runtime.gcbits.%x", ptrmask)
+
+ lsym := base.Ctxt.Lookup(p)
+ if write && !lsym.OnList() {
+ for i, x := range ptrmask {
+ objw.Uint8(lsym, i, x)
+ }
+ objw.Global(lsym, int32(len(ptrmask)), obj.DUPOK|obj.RODATA|obj.LOCAL)
+ lsym.Set(obj.AttrContentAddressable, true)
+ }
+ return lsym
+}
+
+// fillptrmask fills in ptrmask with 1s corresponding to the
+// word offsets in t that hold pointers.
+// ptrmask is assumed to fit at least types.PtrDataSize(t)/PtrSize bits.
+func fillptrmask(t *types.Type, ptrmask []byte) {
+ for i := range ptrmask {
+ ptrmask[i] = 0
+ }
+ if !t.HasPointers() {
+ return
+ }
+
+ vec := bitvec.New(8 * int32(len(ptrmask)))
+ typebits.Set(t, 0, vec)
+
+ nptr := types.PtrDataSize(t) / int64(types.PtrSize)
+ for i := int64(0); i < nptr; i++ {
+ if vec.Get(int32(i)) {
+ ptrmask[i/8] |= 1 << (uint(i) % 8)
+ }
+ }
+}
+
+// dgcprog emits and returns the symbol containing a GC program for type t
+// along with the size of the data described by the program (in the range
+// [types.PtrDataSize(t), t.Width]).
+// In practice, the size is types.PtrDataSize(t) except for non-trivial arrays.
+// For non-trivial arrays, the program describes the full t.Width size.
+func dgcprog(t *types.Type, write bool) (*obj.LSym, int64) {
+ types.CalcSize(t)
+ if t.Size() == types.BADWIDTH {
+ base.Fatalf("dgcprog: %v badwidth", t)
+ }
+ lsym := TypeLinksymPrefix(".gcprog", t)
+ var p gcProg
+ p.init(lsym, write)
+ p.emit(t, 0)
+ offset := p.w.BitIndex() * int64(types.PtrSize)
+ p.end()
+ if ptrdata := types.PtrDataSize(t); offset < ptrdata || offset > t.Size() {
+ base.Fatalf("dgcprog: %v: offset=%d but ptrdata=%d size=%d", t, offset, ptrdata, t.Size())
+ }
+ return lsym, offset
+}
+
+type gcProg struct {
+ lsym *obj.LSym
+ symoff int
+ w gcprog.Writer
+ write bool
+}
+
+func (p *gcProg) init(lsym *obj.LSym, write bool) {
+ p.lsym = lsym
+ p.write = write && !lsym.OnList()
+ p.symoff = 4 // first 4 bytes hold program length
+ if !write {
+ p.w.Init(func(byte) {})
+ return
+ }
+ p.w.Init(p.writeByte)
+ if base.Debug.GCProg > 0 {
+ fmt.Fprintf(os.Stderr, "compile: start GCProg for %v\n", lsym)
+ p.w.Debug(os.Stderr)
+ }
+}
+
+func (p *gcProg) writeByte(x byte) {
+ p.symoff = objw.Uint8(p.lsym, p.symoff, x)
+}
+
+func (p *gcProg) end() {
+ p.w.End()
+ if !p.write {
+ return
+ }
+ objw.Uint32(p.lsym, 0, uint32(p.symoff-4))
+ objw.Global(p.lsym, int32(p.symoff), obj.DUPOK|obj.RODATA|obj.LOCAL)
+ p.lsym.Set(obj.AttrContentAddressable, true)
+ if base.Debug.GCProg > 0 {
+ fmt.Fprintf(os.Stderr, "compile: end GCProg for %v\n", p.lsym)
+ }
+}
+
+func (p *gcProg) emit(t *types.Type, offset int64) {
+ types.CalcSize(t)
+ if !t.HasPointers() {
+ return
+ }
+ if t.Size() == int64(types.PtrSize) {
+ p.w.Ptr(offset / int64(types.PtrSize))
+ return
+ }
+ switch t.Kind() {
+ default:
+ base.Fatalf("gcProg.emit: unexpected type %v", t)
+
+ case types.TSTRING:
+ p.w.Ptr(offset / int64(types.PtrSize))
+
+ case types.TINTER:
+ // Note: the first word isn't a pointer. See comment in typebits.Set
+ p.w.Ptr(offset/int64(types.PtrSize) + 1)
+
+ case types.TSLICE:
+ p.w.Ptr(offset / int64(types.PtrSize))
+
+ case types.TARRAY:
+ if t.NumElem() == 0 {
+ // should have been handled by haspointers check above
+ base.Fatalf("gcProg.emit: empty array")
+ }
+
+ // Flatten array-of-array-of-array to just a big array by multiplying counts.
+ count := t.NumElem()
+ elem := t.Elem()
+ for elem.IsArray() {
+ count *= elem.NumElem()
+ elem = elem.Elem()
+ }
+
+ if !p.w.ShouldRepeat(elem.Size()/int64(types.PtrSize), count) {
+ // Cheaper to just emit the bits.
+ for i := int64(0); i < count; i++ {
+ p.emit(elem, offset+i*elem.Size())
+ }
+ return
+ }
+ p.emit(elem, offset)
+ p.w.ZeroUntil((offset + elem.Size()) / int64(types.PtrSize))
+ p.w.Repeat(elem.Size()/int64(types.PtrSize), count-1)
+
+ case types.TSTRUCT:
+ for _, t1 := range t.Fields().Slice() {
+ p.emit(t1.Type, offset+t1.Offset)
+ }
+ }
+}
+
+// ZeroAddr returns the address of a symbol with at least
+// size bytes of zeros.
+func ZeroAddr(size int64) ir.Node {
+ if size >= 1<<31 {
+ base.Fatalf("map elem too big %d", size)
+ }
+ if ZeroSize < size {
+ ZeroSize = size
+ }
+ lsym := base.PkgLinksym("go.map", "zero", obj.ABI0)
+ x := ir.NewLinksymExpr(base.Pos, lsym, types.Types[types.TUINT8])
+ return typecheck.Expr(typecheck.NodAddr(x))
+}
+
+func CollectPTabs() {
+ if !base.Ctxt.Flag_dynlink || types.LocalPkg.Name != "main" {
+ return
+ }
+ for _, exportn := range typecheck.Target.Exports {
+ s := exportn.Sym()
+ nn := ir.AsNode(s.Def)
+ if nn == nil {
+ continue
+ }
+ if nn.Op() != ir.ONAME {
+ continue
+ }
+ n := nn.(*ir.Name)
+ if !types.IsExported(s.Name) {
+ continue
+ }
+ if s.Pkg.Name != "main" {
+ continue
+ }
+ ptabs = append(ptabs, n)
+ }
+}
+
+// NeedEmit reports whether typ is a type that we need to emit code
+// for (e.g., runtime type descriptors, method wrappers).
+func NeedEmit(typ *types.Type) bool {
+ // TODO(mdempsky): Export data should keep track of which anonymous
+ // and instantiated types were emitted, so at least downstream
+ // packages can skip re-emitting them.
+ //
+ // Perhaps we can just generalize the linker-symbol indexing to
+ // track the index of arbitrary types, not just defined types, and
+ // use its presence to detect this. The same idea would work for
+ // instantiated generic functions too.
+
+ switch sym := typ.Sym(); {
+ case sym == nil:
+ // Anonymous type; possibly never seen before or ever again.
+ // Need to emit to be safe (however, see TODO above).
+ return true
+
+ case sym.Pkg == types.LocalPkg:
+ // Local defined type; our responsibility.
+ return true
+
+ case base.Ctxt.Pkgpath == "runtime" && (sym.Pkg == types.BuiltinPkg || sym.Pkg == types.UnsafePkg):
+ // Package runtime is responsible for including code for builtin
+ // types (predeclared and package unsafe).
+ return true
+
+ case typ.IsFullyInstantiated():
+ // Instantiated type; possibly instantiated with unique type arguments.
+ // Need to emit to be safe (however, see TODO above).
+ return true
+
+ case typ.HasShape():
+ // Shape type; need to emit even though it lives in the .shape package.
+ // TODO: make sure the linker deduplicates them (see dupok in writeType above).
+ return true
+
+ default:
+ // Should have been emitted by an imported package.
+ return false
+ }
+}
+
+// Generate a wrapper function to convert from
+// a receiver of type T to a receiver of type U.
+// That is,
+//
+// func (t T) M() {
+// ...
+// }
+//
+// already exists; this function generates
+//
+// func (u U) M() {
+// u.M()
+// }
+//
+// where the types T and U are such that u.M() is valid
+// and calls the T.M method.
+// The resulting function is for use in method tables.
+//
+// rcvr - U
+// method - M func (t T)(), a TFIELD type struct
+//
+// Also wraps methods on instantiated generic types for use in itab entries.
+// For an instantiated generic type G[int], we generate wrappers like:
+// G[int] pointer shaped:
+// func (x G[int]) f(arg) {
+// .inst.G[int].f(dictionary, x, arg)
+// }
+// G[int] not pointer shaped:
+// func (x *G[int]) f(arg) {
+// .inst.G[int].f(dictionary, *x, arg)
+// }
+// These wrappers are always fully stenciled.
+func methodWrapper(rcvr *types.Type, method *types.Field, forItab bool) *obj.LSym {
+ orig := rcvr
+ if forItab && !types.IsDirectIface(rcvr) {
+ rcvr = rcvr.PtrTo()
+ }
+
+ generic := false
+ // We don't need a dictionary if we are reaching a method (possibly via an
+ // embedded field) which is an interface method.
+ if !types.IsInterfaceMethod(method.Type) {
+ rcvr1 := deref(rcvr)
+ if len(rcvr1.RParams()) > 0 {
+ // If rcvr has rparams, remember method as generic, which
+ // means we need to add a dictionary to the wrapper.
+ generic = true
+ if rcvr.HasShape() {
+ base.Fatalf("method on type instantiated with shapes, rcvr:%+v", rcvr)
+ }
+ }
+ }
+
+ newnam := ir.MethodSym(rcvr, method.Sym)
+ lsym := newnam.Linksym()
+ if newnam.Siggen() {
+ return lsym
+ }
+ newnam.SetSiggen(true)
+
+ // Except in quirks mode, unified IR creates its own wrappers.
+ if base.Debug.Unified != 0 && base.Debug.UnifiedQuirks == 0 {
+ return lsym
+ }
+
+ methodrcvr := method.Type.Recv().Type
+ // For generic methods, we need to generate the wrapper even if the receiver
+ // types are identical, because we want to add the dictionary.
+ if !generic && types.Identical(rcvr, methodrcvr) {
+ return lsym
+ }
+
+ if !NeedEmit(rcvr) || rcvr.IsPtr() && !NeedEmit(rcvr.Elem()) {
+ return lsym
+ }
+
+ base.Pos = base.AutogeneratedPos
+ typecheck.DeclContext = ir.PEXTERN
+
+ tfn := ir.NewFuncType(base.Pos,
+ ir.NewField(base.Pos, typecheck.Lookup(".this"), nil, rcvr),
+ typecheck.NewFuncParams(method.Type.Params(), true),
+ typecheck.NewFuncParams(method.Type.Results(), false))
+
+ // TODO(austin): SelectorExpr may have created one or more
+ // ir.Names for these already with a nil Func field. We should
+ // consolidate these and always attach a Func to the Name.
+ fn := typecheck.DeclFunc(newnam, tfn)
+ fn.SetDupok(true)
+
+ nthis := ir.AsNode(tfn.Type().Recv().Nname)
+
+ indirect := rcvr.IsPtr() && rcvr.Elem() == methodrcvr
+
+ // generate nil pointer check for better error
+ if indirect {
+ // generating wrapper from *T to T.
+ n := ir.NewIfStmt(base.Pos, nil, nil, nil)
+ n.Cond = ir.NewBinaryExpr(base.Pos, ir.OEQ, nthis, typecheck.NodNil())
+ call := ir.NewCallExpr(base.Pos, ir.OCALL, typecheck.LookupRuntime("panicwrap"), nil)
+ n.Body = []ir.Node{call}
+ fn.Body.Append(n)
+ }
+
+ dot := typecheck.AddImplicitDots(ir.NewSelectorExpr(base.Pos, ir.OXDOT, nthis, method.Sym))
+ // generate call
+ // It's not possible to use a tail call when dynamic linking on ppc64le. The
+ // bad scenario is when a local call is made to the wrapper: the wrapper will
+ // call the implementation, which might be in a different module and so set
+ // the TOC to the appropriate value for that module. But if it returns
+ // directly to the wrapper's caller, nothing will reset it to the correct
+ // value for that function.
+ var call *ir.CallExpr
+ if !base.Flag.Cfg.Instrumenting && rcvr.IsPtr() && methodrcvr.IsPtr() && method.Embedded != 0 && !types.IsInterfaceMethod(method.Type) && !(base.Ctxt.Arch.Name == "ppc64le" && base.Ctxt.Flag_dynlink) && !generic {
+ call = ir.NewCallExpr(base.Pos, ir.OCALL, dot, nil)
+ call.Args = ir.ParamNames(tfn.Type())
+ call.IsDDD = tfn.Type().IsVariadic()
+ fn.Body.Append(ir.NewTailCallStmt(base.Pos, call))
+ } else {
+ fn.SetWrapper(true) // ignore frame for panic+recover matching
+
+ if generic && dot.X != nthis {
+ // If there is embedding involved, then we should do the
+ // normal non-generic embedding wrapper below, which calls
+ // the wrapper for the real receiver type using dot as an
+ // argument. There is no need for generic processing (adding
+ // a dictionary) for this wrapper.
+ generic = false
+ }
+
+ if generic {
+ targs := deref(rcvr).RParams()
+ // The wrapper for an auto-generated pointer/non-pointer
+ // receiver method should share the same dictionary as the
+ // corresponding original (user-written) method.
+ baseOrig := orig
+ if baseOrig.IsPtr() && !methodrcvr.IsPtr() {
+ baseOrig = baseOrig.Elem()
+ } else if !baseOrig.IsPtr() && methodrcvr.IsPtr() {
+ baseOrig = types.NewPtr(baseOrig)
+ }
+ args := []ir.Node{getDictionary(ir.MethodSym(baseOrig, method.Sym), targs)}
+ if indirect {
+ args = append(args, ir.NewStarExpr(base.Pos, dot.X))
+ } else if methodrcvr.IsPtr() && methodrcvr.Elem() == dot.X.Type() {
+ // Case where method call is via a non-pointer
+ // embedded field with a pointer method.
+ args = append(args, typecheck.NodAddrAt(base.Pos, dot.X))
+ } else {
+ args = append(args, dot.X)
+ }
+ args = append(args, ir.ParamNames(tfn.Type())...)
+
+ // Target method uses shaped names.
+ targs2 := make([]*types.Type, len(targs))
+ origRParams := deref(orig).OrigType().RParams()
+ for i, t := range targs {
+ targs2[i] = typecheck.Shapify(t, i, origRParams[i])
+ }
+ targs = targs2
+
+ sym := typecheck.MakeFuncInstSym(ir.MethodSym(methodrcvr, method.Sym), targs, false, true)
+ if sym.Def == nil {
+ // Currently we make sure that we have all the
+ // instantiations we need by generating them all in
+ // ../noder/stencil.go:instantiateMethods
+ // Extra instantiations because of an inlined function
+ // should have been exported, and so available via
+ // Resolve.
+ in := typecheck.Resolve(ir.NewIdent(src.NoXPos, sym))
+ if in.Op() == ir.ONONAME {
+ base.Fatalf("instantiation %s not found", sym.Name)
+ }
+ sym = in.Sym()
+ }
+ target := ir.AsNode(sym.Def)
+ call = ir.NewCallExpr(base.Pos, ir.OCALL, target, args)
+ // Fill-in the generic method node that was not filled in
+ // in instantiateMethod.
+ method.Nname = fn.Nname
+ } else {
+ call = ir.NewCallExpr(base.Pos, ir.OCALL, dot, nil)
+ call.Args = ir.ParamNames(tfn.Type())
+ }
+ call.IsDDD = tfn.Type().IsVariadic()
+ if method.Type.NumResults() > 0 {
+ ret := ir.NewReturnStmt(base.Pos, nil)
+ ret.Results = []ir.Node{call}
+ fn.Body.Append(ret)
+ } else {
+ fn.Body.Append(call)
+ }
+ }
+
+ typecheck.FinishFuncBody()
+ if base.Debug.DclStack != 0 {
+ types.CheckDclstack()
+ }
+
+ typecheck.Func(fn)
+ ir.CurFunc = fn
+ typecheck.Stmts(fn.Body)
+
+ if AfterGlobalEscapeAnalysis {
+ // Inlining the method may reveal closures, which require walking all function bodies
+ // to decide whether to capture free variables by value or by ref. So we only do inline
+ // if the method do not contain any closures, otherwise, the escape analysis may make
+ // dead variables resurrected, and causing liveness analysis confused, see issue #53702.
+ var canInline bool
+ switch x := call.X.(type) {
+ case *ir.Name:
+ canInline = len(x.Func.Closures) == 0
+ case *ir.SelectorExpr:
+ if x.Op() == ir.OMETHEXPR {
+ canInline = x.FuncName().Func != nil && len(x.FuncName().Func.Closures) == 0
+ }
+ }
+ if canInline {
+ inline.InlineCalls(fn)
+ }
+ escape.Batch([]*ir.Func{fn}, false)
+ }
+
+ ir.CurFunc = nil
+ typecheck.Target.Decls = append(typecheck.Target.Decls, fn)
+
+ return lsym
+}
+
+// AfterGlobalEscapeAnalysis tracks whether package gc has already
+// performed the main, global escape analysis pass. If so,
+// methodWrapper takes responsibility for escape analyzing any
+// generated wrappers.
+var AfterGlobalEscapeAnalysis bool
+
+var ZeroSize int64
+
+// MarkTypeUsedInInterface marks that type t is converted to an interface.
+// This information is used in the linker in dead method elimination.
+func MarkTypeUsedInInterface(t *types.Type, from *obj.LSym) {
+ if t.HasShape() {
+ // Shape types shouldn't be put in interfaces, so we shouldn't ever get here.
+ base.Fatalf("shape types have no methods %+v", t)
+ }
+ tsym := TypeLinksym(t)
+ // Emit a marker relocation. The linker will know the type is converted
+ // to an interface if "from" is reachable.
+ r := obj.Addrel(from)
+ r.Sym = tsym
+ r.Type = objabi.R_USEIFACE
+}
+
+// MarkUsedIfaceMethod marks that an interface method is used in the current
+// function. n is OCALLINTER node.
+func MarkUsedIfaceMethod(n *ir.CallExpr) {
+ // skip unnamed functions (func _())
+ if ir.CurFunc.LSym == nil {
+ return
+ }
+ dot := n.X.(*ir.SelectorExpr)
+ ityp := dot.X.Type()
+ if ityp.HasShape() {
+ // Here we're calling a method on a generic interface. Something like:
+ //
+ // type I[T any] interface { foo() T }
+ // func f[T any](x I[T]) {
+ // ... = x.foo()
+ // }
+ // f[int](...)
+ // f[string](...)
+ //
+ // In this case, in f we're calling foo on a generic interface.
+ // Which method could that be? Normally we could match the method
+ // both by name and by type. But in this case we don't really know
+ // the type of the method we're calling. It could be func()int
+ // or func()string. So we match on just the function name, instead
+ // of both the name and the type used for the non-generic case below.
+ // TODO: instantiations at least know the shape of the instantiated
+ // type, and the linker could do more complicated matching using
+ // some sort of fuzzy shape matching. For now, only use the name
+ // of the method for matching.
+ r := obj.Addrel(ir.CurFunc.LSym)
+ // We use a separate symbol just to tell the linker the method name.
+ // (The symbol itself is not needed in the final binary.)
+ r.Sym = staticdata.StringSym(src.NoXPos, dot.Sel.Name)
+ r.Type = objabi.R_USEGENERICIFACEMETHOD
+ return
+ }
+
+ tsym := TypeLinksym(ityp)
+ r := obj.Addrel(ir.CurFunc.LSym)
+ r.Sym = tsym
+ // dot.Offset() is the method index * PtrSize (the offset of code pointer
+ // in itab).
+ midx := dot.Offset() / int64(types.PtrSize)
+ r.Add = InterfaceMethodOffset(ityp, midx)
+ r.Type = objabi.R_USEIFACEMETHOD
+}
+
+// getDictionary returns the dictionary for the given named generic function
+// or method, with the given type arguments.
+func getDictionary(gf *types.Sym, targs []*types.Type) ir.Node {
+ if len(targs) == 0 {
+ base.Fatalf("%s should have type arguments", gf.Name)
+ }
+ for _, t := range targs {
+ if t.HasShape() {
+ base.Fatalf("dictionary for %s should only use concrete types: %+v", gf.Name, t)
+ }
+ }
+
+ sym := typecheck.MakeDictSym(gf, targs, true)
+
+ // Dictionary should already have been generated by instantiateMethods().
+ // Extra dictionaries needed because of an inlined function should have been
+ // exported, and so available via Resolve.
+ if lsym := sym.Linksym(); len(lsym.P) == 0 {
+ in := typecheck.Resolve(ir.NewIdent(src.NoXPos, sym))
+ if in.Op() == ir.ONONAME {
+ base.Fatalf("Dictionary should have already been generated: %s.%s", sym.Pkg.Path, sym.Name)
+ }
+ sym = in.Sym()
+ }
+
+ // Make (or reuse) a node referencing the dictionary symbol.
+ var n *ir.Name
+ if sym.Def != nil {
+ n = sym.Def.(*ir.Name)
+ } else {
+ n = typecheck.NewName(sym)
+ n.SetType(types.Types[types.TUINTPTR]) // should probably be [...]uintptr, but doesn't really matter
+ n.SetTypecheck(1)
+ n.Class = ir.PEXTERN
+ sym.Def = n
+ }
+
+ // Return the address of the dictionary.
+ np := typecheck.NodAddr(n)
+ // Note: treat dictionary pointers as uintptrs, so they aren't pointers
+ // with respect to GC. That saves on stack scanning work, write barriers, etc.
+ // We can get away with it because dictionaries are global variables.
+ np.SetType(types.Types[types.TUINTPTR])
+ np.SetTypecheck(1)
+ return np
+}
+
+func deref(t *types.Type) *types.Type {
+ if t.IsPtr() {
+ return t.Elem()
+ }
+ return t
+}
diff --git a/src/cmd/compile/internal/riscv64/galign.go b/src/cmd/compile/internal/riscv64/galign.go
new file mode 100644
index 0000000..846ed8f
--- /dev/null
+++ b/src/cmd/compile/internal/riscv64/galign.go
@@ -0,0 +1,24 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package riscv64
+
+import (
+ "cmd/compile/internal/ssagen"
+ "cmd/internal/obj/riscv"
+)
+
+func Init(arch *ssagen.ArchInfo) {
+ arch.LinkArch = &riscv.LinkRISCV64
+
+ arch.REGSP = riscv.REG_SP
+ arch.MAXWIDTH = 1 << 50
+
+ arch.Ginsnop = ginsnop
+ arch.ZeroRange = zeroRange
+
+ arch.SSAMarkMoves = ssaMarkMoves
+ arch.SSAGenValue = ssaGenValue
+ arch.SSAGenBlock = ssaGenBlock
+}
diff --git a/src/cmd/compile/internal/riscv64/ggen.go b/src/cmd/compile/internal/riscv64/ggen.go
new file mode 100644
index 0000000..9df7394
--- /dev/null
+++ b/src/cmd/compile/internal/riscv64/ggen.go
@@ -0,0 +1,59 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package riscv64
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/objw"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/obj/riscv"
+)
+
+func zeroRange(pp *objw.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
+ if cnt == 0 {
+ return p
+ }
+
+ // Adjust the frame to account for LR.
+ off += base.Ctxt.FixedFrameSize()
+
+ if cnt < int64(4*types.PtrSize) {
+ for i := int64(0); i < cnt; i += int64(types.PtrSize) {
+ p = pp.Append(p, riscv.AMOV, obj.TYPE_REG, riscv.REG_ZERO, 0, obj.TYPE_MEM, riscv.REG_SP, off+i)
+ }
+ return p
+ }
+
+ if cnt <= int64(128*types.PtrSize) {
+ p = pp.Append(p, riscv.AADDI, obj.TYPE_CONST, 0, off, obj.TYPE_REG, riscv.REG_A0, 0)
+ p.Reg = riscv.REG_SP
+ p = pp.Append(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_MEM, 0, 0)
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = ir.Syms.Duffzero
+ p.To.Offset = 8 * (128 - cnt/int64(types.PtrSize))
+ return p
+ }
+
+ // Loop, zeroing pointer width bytes at a time.
+ // ADD $(off), SP, T0
+ // ADD $(cnt), T0, T1
+ // loop:
+ // MOV ZERO, (T0)
+ // ADD $Widthptr, T0
+ // BNE T0, T1, loop
+ p = pp.Append(p, riscv.AADD, obj.TYPE_CONST, 0, off, obj.TYPE_REG, riscv.REG_T0, 0)
+ p.Reg = riscv.REG_SP
+ p = pp.Append(p, riscv.AADD, obj.TYPE_CONST, 0, cnt, obj.TYPE_REG, riscv.REG_T1, 0)
+ p.Reg = riscv.REG_T0
+ p = pp.Append(p, riscv.AMOV, obj.TYPE_REG, riscv.REG_ZERO, 0, obj.TYPE_MEM, riscv.REG_T0, 0)
+ loop := p
+ p = pp.Append(p, riscv.AADD, obj.TYPE_CONST, 0, int64(types.PtrSize), obj.TYPE_REG, riscv.REG_T0, 0)
+ p = pp.Append(p, riscv.ABNE, obj.TYPE_REG, riscv.REG_T0, 0, obj.TYPE_BRANCH, 0, 0)
+ p.Reg = riscv.REG_T1
+ p.To.SetTarget(loop)
+ return p
+}
diff --git a/src/cmd/compile/internal/riscv64/gsubr.go b/src/cmd/compile/internal/riscv64/gsubr.go
new file mode 100644
index 0000000..74bccf8
--- /dev/null
+++ b/src/cmd/compile/internal/riscv64/gsubr.go
@@ -0,0 +1,20 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package riscv64
+
+import (
+ "cmd/compile/internal/objw"
+ "cmd/internal/obj"
+ "cmd/internal/obj/riscv"
+)
+
+func ginsnop(pp *objw.Progs) *obj.Prog {
+ // Hardware nop is ADD $0, ZERO
+ p := pp.Prog(riscv.AADD)
+ p.From.Type = obj.TYPE_CONST
+ p.Reg = riscv.REG_ZERO
+ p.To = obj.Addr{Type: obj.TYPE_REG, Reg: riscv.REG_ZERO}
+ return p
+}
diff --git a/src/cmd/compile/internal/riscv64/ssa.go b/src/cmd/compile/internal/riscv64/ssa.go
new file mode 100644
index 0000000..1359b6a
--- /dev/null
+++ b/src/cmd/compile/internal/riscv64/ssa.go
@@ -0,0 +1,775 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package riscv64
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/ssa"
+ "cmd/compile/internal/ssagen"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/obj/riscv"
+)
+
+// ssaRegToReg maps ssa register numbers to obj register numbers.
+var ssaRegToReg = []int16{
+ riscv.REG_X0,
+ // X1 (LR): unused
+ riscv.REG_X2,
+ riscv.REG_X3,
+ riscv.REG_X4,
+ riscv.REG_X5,
+ riscv.REG_X6,
+ riscv.REG_X7,
+ riscv.REG_X8,
+ riscv.REG_X9,
+ riscv.REG_X10,
+ riscv.REG_X11,
+ riscv.REG_X12,
+ riscv.REG_X13,
+ riscv.REG_X14,
+ riscv.REG_X15,
+ riscv.REG_X16,
+ riscv.REG_X17,
+ riscv.REG_X18,
+ riscv.REG_X19,
+ riscv.REG_X20,
+ riscv.REG_X21,
+ riscv.REG_X22,
+ riscv.REG_X23,
+ riscv.REG_X24,
+ riscv.REG_X25,
+ riscv.REG_X26,
+ riscv.REG_X27,
+ riscv.REG_X28,
+ riscv.REG_X29,
+ riscv.REG_X30,
+ riscv.REG_X31,
+ riscv.REG_F0,
+ riscv.REG_F1,
+ riscv.REG_F2,
+ riscv.REG_F3,
+ riscv.REG_F4,
+ riscv.REG_F5,
+ riscv.REG_F6,
+ riscv.REG_F7,
+ riscv.REG_F8,
+ riscv.REG_F9,
+ riscv.REG_F10,
+ riscv.REG_F11,
+ riscv.REG_F12,
+ riscv.REG_F13,
+ riscv.REG_F14,
+ riscv.REG_F15,
+ riscv.REG_F16,
+ riscv.REG_F17,
+ riscv.REG_F18,
+ riscv.REG_F19,
+ riscv.REG_F20,
+ riscv.REG_F21,
+ riscv.REG_F22,
+ riscv.REG_F23,
+ riscv.REG_F24,
+ riscv.REG_F25,
+ riscv.REG_F26,
+ riscv.REG_F27,
+ riscv.REG_F28,
+ riscv.REG_F29,
+ riscv.REG_F30,
+ riscv.REG_F31,
+ 0, // SB isn't a real register. We fill an Addr.Reg field with 0 in this case.
+}
+
+func loadByType(t *types.Type) obj.As {
+ width := t.Size()
+
+ if t.IsFloat() {
+ switch width {
+ case 4:
+ return riscv.AMOVF
+ case 8:
+ return riscv.AMOVD
+ default:
+ base.Fatalf("unknown float width for load %d in type %v", width, t)
+ return 0
+ }
+ }
+
+ switch width {
+ case 1:
+ if t.IsSigned() {
+ return riscv.AMOVB
+ } else {
+ return riscv.AMOVBU
+ }
+ case 2:
+ if t.IsSigned() {
+ return riscv.AMOVH
+ } else {
+ return riscv.AMOVHU
+ }
+ case 4:
+ if t.IsSigned() {
+ return riscv.AMOVW
+ } else {
+ return riscv.AMOVWU
+ }
+ case 8:
+ return riscv.AMOV
+ default:
+ base.Fatalf("unknown width for load %d in type %v", width, t)
+ return 0
+ }
+}
+
+// storeByType returns the store instruction of the given type.
+func storeByType(t *types.Type) obj.As {
+ width := t.Size()
+
+ if t.IsFloat() {
+ switch width {
+ case 4:
+ return riscv.AMOVF
+ case 8:
+ return riscv.AMOVD
+ default:
+ base.Fatalf("unknown float width for store %d in type %v", width, t)
+ return 0
+ }
+ }
+
+ switch width {
+ case 1:
+ return riscv.AMOVB
+ case 2:
+ return riscv.AMOVH
+ case 4:
+ return riscv.AMOVW
+ case 8:
+ return riscv.AMOV
+ default:
+ base.Fatalf("unknown width for store %d in type %v", width, t)
+ return 0
+ }
+}
+
+// largestMove returns the largest move instruction possible and its size,
+// given the alignment of the total size of the move.
+//
+// e.g., a 16-byte move may use MOV, but an 11-byte move must use MOVB.
+//
+// Note that the moves may not be on naturally aligned addresses depending on
+// the source and destination.
+//
+// This matches the calculation in ssa.moveSize.
+func largestMove(alignment int64) (obj.As, int64) {
+ switch {
+ case alignment%8 == 0:
+ return riscv.AMOV, 8
+ case alignment%4 == 0:
+ return riscv.AMOVW, 4
+ case alignment%2 == 0:
+ return riscv.AMOVH, 2
+ default:
+ return riscv.AMOVB, 1
+ }
+}
+
+// markMoves marks any MOVXconst ops that need to avoid clobbering flags.
+// RISC-V has no flags, so this is a no-op.
+func ssaMarkMoves(s *ssagen.State, b *ssa.Block) {}
+
+func ssaGenValue(s *ssagen.State, v *ssa.Value) {
+ s.SetPos(v.Pos)
+
+ switch v.Op {
+ case ssa.OpInitMem:
+ // memory arg needs no code
+ case ssa.OpArg:
+ // input args need no code
+ case ssa.OpPhi:
+ ssagen.CheckLoweredPhi(v)
+ case ssa.OpCopy, ssa.OpRISCV64MOVconvert, ssa.OpRISCV64MOVDreg:
+ if v.Type.IsMemory() {
+ return
+ }
+ rs := v.Args[0].Reg()
+ rd := v.Reg()
+ if rs == rd {
+ return
+ }
+ as := riscv.AMOV
+ if v.Type.IsFloat() {
+ as = riscv.AMOVD
+ }
+ p := s.Prog(as)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = rs
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = rd
+ case ssa.OpRISCV64MOVDnop:
+ // nothing to do
+ case ssa.OpLoadReg:
+ if v.Type.IsFlags() {
+ v.Fatalf("load flags not implemented: %v", v.LongString())
+ return
+ }
+ p := s.Prog(loadByType(v.Type))
+ ssagen.AddrAuto(&p.From, v.Args[0])
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpStoreReg:
+ if v.Type.IsFlags() {
+ v.Fatalf("store flags not implemented: %v", v.LongString())
+ return
+ }
+ p := s.Prog(storeByType(v.Type))
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ ssagen.AddrAuto(&p.To, v)
+ case ssa.OpSP, ssa.OpSB, ssa.OpGetG:
+ // nothing to do
+ case ssa.OpRISCV64MOVBreg, ssa.OpRISCV64MOVHreg, ssa.OpRISCV64MOVWreg,
+ ssa.OpRISCV64MOVBUreg, ssa.OpRISCV64MOVHUreg, ssa.OpRISCV64MOVWUreg:
+ a := v.Args[0]
+ for a.Op == ssa.OpCopy || a.Op == ssa.OpRISCV64MOVDreg {
+ a = a.Args[0]
+ }
+ as := v.Op.Asm()
+ rs := v.Args[0].Reg()
+ rd := v.Reg()
+ if a.Op == ssa.OpLoadReg {
+ t := a.Type
+ switch {
+ case v.Op == ssa.OpRISCV64MOVBreg && t.Size() == 1 && t.IsSigned(),
+ v.Op == ssa.OpRISCV64MOVHreg && t.Size() == 2 && t.IsSigned(),
+ v.Op == ssa.OpRISCV64MOVWreg && t.Size() == 4 && t.IsSigned(),
+ v.Op == ssa.OpRISCV64MOVBUreg && t.Size() == 1 && !t.IsSigned(),
+ v.Op == ssa.OpRISCV64MOVHUreg && t.Size() == 2 && !t.IsSigned(),
+ v.Op == ssa.OpRISCV64MOVWUreg && t.Size() == 4 && !t.IsSigned():
+ // arg is a proper-typed load and already sign/zero-extended
+ if rs == rd {
+ return
+ }
+ as = riscv.AMOV
+ default:
+ }
+ }
+ p := s.Prog(as)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = rs
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = rd
+ case ssa.OpRISCV64ADD, ssa.OpRISCV64SUB, ssa.OpRISCV64SUBW, ssa.OpRISCV64XOR, ssa.OpRISCV64OR, ssa.OpRISCV64AND,
+ ssa.OpRISCV64SLL, ssa.OpRISCV64SRA, ssa.OpRISCV64SRL,
+ ssa.OpRISCV64SLT, ssa.OpRISCV64SLTU, ssa.OpRISCV64MUL, ssa.OpRISCV64MULW, ssa.OpRISCV64MULH,
+ ssa.OpRISCV64MULHU, ssa.OpRISCV64DIV, ssa.OpRISCV64DIVU, ssa.OpRISCV64DIVW,
+ ssa.OpRISCV64DIVUW, ssa.OpRISCV64REM, ssa.OpRISCV64REMU, ssa.OpRISCV64REMW,
+ ssa.OpRISCV64REMUW,
+ ssa.OpRISCV64FADDS, ssa.OpRISCV64FSUBS, ssa.OpRISCV64FMULS, ssa.OpRISCV64FDIVS,
+ ssa.OpRISCV64FEQS, ssa.OpRISCV64FNES, ssa.OpRISCV64FLTS, ssa.OpRISCV64FLES,
+ ssa.OpRISCV64FADDD, ssa.OpRISCV64FSUBD, ssa.OpRISCV64FMULD, ssa.OpRISCV64FDIVD,
+ ssa.OpRISCV64FEQD, ssa.OpRISCV64FNED, ssa.OpRISCV64FLTD, ssa.OpRISCV64FLED,
+ ssa.OpRISCV64FSGNJD:
+ r := v.Reg()
+ r1 := v.Args[0].Reg()
+ r2 := v.Args[1].Reg()
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r2
+ p.Reg = r1
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ case ssa.OpRISCV64LoweredMuluhilo:
+ r0 := v.Args[0].Reg()
+ r1 := v.Args[1].Reg()
+ p := s.Prog(riscv.AMULHU)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r1
+ p.Reg = r0
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg0()
+ p1 := s.Prog(riscv.AMUL)
+ p1.From.Type = obj.TYPE_REG
+ p1.From.Reg = r1
+ p1.Reg = r0
+ p1.To.Type = obj.TYPE_REG
+ p1.To.Reg = v.Reg1()
+ case ssa.OpRISCV64LoweredMuluover:
+ r0 := v.Args[0].Reg()
+ r1 := v.Args[1].Reg()
+ p := s.Prog(riscv.AMULHU)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r1
+ p.Reg = r0
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg1()
+ p1 := s.Prog(riscv.AMUL)
+ p1.From.Type = obj.TYPE_REG
+ p1.From.Reg = r1
+ p1.Reg = r0
+ p1.To.Type = obj.TYPE_REG
+ p1.To.Reg = v.Reg0()
+ p2 := s.Prog(riscv.ASNEZ)
+ p2.From.Type = obj.TYPE_REG
+ p2.From.Reg = v.Reg1()
+ p2.To.Type = obj.TYPE_REG
+ p2.To.Reg = v.Reg1()
+ case ssa.OpRISCV64FMADDD, ssa.OpRISCV64FMSUBD, ssa.OpRISCV64FNMADDD, ssa.OpRISCV64FNMSUBD:
+ r := v.Reg()
+ r1 := v.Args[0].Reg()
+ r2 := v.Args[1].Reg()
+ r3 := v.Args[2].Reg()
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r2
+ p.Reg = r1
+ p.SetRestArgs([]obj.Addr{{Type: obj.TYPE_REG, Reg: r3}})
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ case ssa.OpRISCV64FSQRTS, ssa.OpRISCV64FNEGS, ssa.OpRISCV64FABSD, ssa.OpRISCV64FSQRTD, ssa.OpRISCV64FNEGD,
+ ssa.OpRISCV64FMVSX, ssa.OpRISCV64FMVDX,
+ ssa.OpRISCV64FCVTSW, ssa.OpRISCV64FCVTSL, ssa.OpRISCV64FCVTWS, ssa.OpRISCV64FCVTLS,
+ ssa.OpRISCV64FCVTDW, ssa.OpRISCV64FCVTDL, ssa.OpRISCV64FCVTWD, ssa.OpRISCV64FCVTLD, ssa.OpRISCV64FCVTDS, ssa.OpRISCV64FCVTSD,
+ ssa.OpRISCV64NOT, ssa.OpRISCV64NEG, ssa.OpRISCV64NEGW:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpRISCV64ADDI, ssa.OpRISCV64ADDIW, ssa.OpRISCV64XORI, ssa.OpRISCV64ORI, ssa.OpRISCV64ANDI,
+ ssa.OpRISCV64SLLI, ssa.OpRISCV64SRAI, ssa.OpRISCV64SRLI, ssa.OpRISCV64SLTI,
+ ssa.OpRISCV64SLTIU:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpRISCV64MOVDconst:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpRISCV64MOVaddr:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_ADDR
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+
+ var wantreg string
+ // MOVW $sym+off(base), R
+ switch v.Aux.(type) {
+ default:
+ v.Fatalf("aux is of unknown type %T", v.Aux)
+ case *obj.LSym:
+ wantreg = "SB"
+ ssagen.AddAux(&p.From, v)
+ case *ir.Name:
+ wantreg = "SP"
+ ssagen.AddAux(&p.From, v)
+ case nil:
+ // No sym, just MOVW $off(SP), R
+ wantreg = "SP"
+ p.From.Reg = riscv.REG_SP
+ p.From.Offset = v.AuxInt
+ }
+ if reg := v.Args[0].RegName(); reg != wantreg {
+ v.Fatalf("bad reg %s for symbol type %T, want %s", reg, v.Aux, wantreg)
+ }
+ case ssa.OpRISCV64MOVBload, ssa.OpRISCV64MOVHload, ssa.OpRISCV64MOVWload, ssa.OpRISCV64MOVDload,
+ ssa.OpRISCV64MOVBUload, ssa.OpRISCV64MOVHUload, ssa.OpRISCV64MOVWUload,
+ ssa.OpRISCV64FMOVWload, ssa.OpRISCV64FMOVDload:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpRISCV64MOVBstore, ssa.OpRISCV64MOVHstore, ssa.OpRISCV64MOVWstore, ssa.OpRISCV64MOVDstore,
+ ssa.OpRISCV64FMOVWstore, ssa.OpRISCV64FMOVDstore:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.To, v)
+ case ssa.OpRISCV64MOVBstorezero, ssa.OpRISCV64MOVHstorezero, ssa.OpRISCV64MOVWstorezero, ssa.OpRISCV64MOVDstorezero:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = riscv.REG_ZERO
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.To, v)
+ case ssa.OpRISCV64SEQZ, ssa.OpRISCV64SNEZ:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpRISCV64CALLstatic, ssa.OpRISCV64CALLclosure, ssa.OpRISCV64CALLinter:
+ s.Call(v)
+ case ssa.OpRISCV64CALLtail:
+ s.TailCall(v)
+ case ssa.OpRISCV64LoweredWB:
+ p := s.Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = v.Aux.(*obj.LSym)
+ case ssa.OpRISCV64LoweredPanicBoundsA, ssa.OpRISCV64LoweredPanicBoundsB, ssa.OpRISCV64LoweredPanicBoundsC:
+ p := s.Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt]
+ s.UseArgs(16) // space used in callee args area by assembly stubs
+
+ case ssa.OpRISCV64LoweredAtomicLoad8:
+ s.Prog(riscv.AFENCE)
+ p := s.Prog(riscv.AMOVBU)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg0()
+ s.Prog(riscv.AFENCE)
+
+ case ssa.OpRISCV64LoweredAtomicLoad32, ssa.OpRISCV64LoweredAtomicLoad64:
+ as := riscv.ALRW
+ if v.Op == ssa.OpRISCV64LoweredAtomicLoad64 {
+ as = riscv.ALRD
+ }
+ p := s.Prog(as)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg0()
+
+ case ssa.OpRISCV64LoweredAtomicStore8:
+ s.Prog(riscv.AFENCE)
+ p := s.Prog(riscv.AMOVB)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ s.Prog(riscv.AFENCE)
+
+ case ssa.OpRISCV64LoweredAtomicStore32, ssa.OpRISCV64LoweredAtomicStore64:
+ as := riscv.AAMOSWAPW
+ if v.Op == ssa.OpRISCV64LoweredAtomicStore64 {
+ as = riscv.AAMOSWAPD
+ }
+ p := s.Prog(as)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ p.RegTo2 = riscv.REG_ZERO
+
+ case ssa.OpRISCV64LoweredAtomicAdd32, ssa.OpRISCV64LoweredAtomicAdd64:
+ as := riscv.AAMOADDW
+ if v.Op == ssa.OpRISCV64LoweredAtomicAdd64 {
+ as = riscv.AAMOADDD
+ }
+ p := s.Prog(as)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ p.RegTo2 = riscv.REG_TMP
+
+ p2 := s.Prog(riscv.AADD)
+ p2.From.Type = obj.TYPE_REG
+ p2.From.Reg = riscv.REG_TMP
+ p2.Reg = v.Args[1].Reg()
+ p2.To.Type = obj.TYPE_REG
+ p2.To.Reg = v.Reg0()
+
+ case ssa.OpRISCV64LoweredAtomicExchange32, ssa.OpRISCV64LoweredAtomicExchange64:
+ as := riscv.AAMOSWAPW
+ if v.Op == ssa.OpRISCV64LoweredAtomicExchange64 {
+ as = riscv.AAMOSWAPD
+ }
+ p := s.Prog(as)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ p.RegTo2 = v.Reg0()
+
+ case ssa.OpRISCV64LoweredAtomicCas32, ssa.OpRISCV64LoweredAtomicCas64:
+ // MOV ZERO, Rout
+ // LR (Rarg0), Rtmp
+ // BNE Rtmp, Rarg1, 3(PC)
+ // SC Rarg2, (Rarg0), Rtmp
+ // BNE Rtmp, ZERO, -3(PC)
+ // MOV $1, Rout
+
+ lr := riscv.ALRW
+ sc := riscv.ASCW
+ if v.Op == ssa.OpRISCV64LoweredAtomicCas64 {
+ lr = riscv.ALRD
+ sc = riscv.ASCD
+ }
+
+ r0 := v.Args[0].Reg()
+ r1 := v.Args[1].Reg()
+ r2 := v.Args[2].Reg()
+ out := v.Reg0()
+
+ p := s.Prog(riscv.AMOV)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = riscv.REG_ZERO
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = out
+
+ p1 := s.Prog(lr)
+ p1.From.Type = obj.TYPE_MEM
+ p1.From.Reg = r0
+ p1.To.Type = obj.TYPE_REG
+ p1.To.Reg = riscv.REG_TMP
+
+ p2 := s.Prog(riscv.ABNE)
+ p2.From.Type = obj.TYPE_REG
+ p2.From.Reg = r1
+ p2.Reg = riscv.REG_TMP
+ p2.To.Type = obj.TYPE_BRANCH
+
+ p3 := s.Prog(sc)
+ p3.From.Type = obj.TYPE_REG
+ p3.From.Reg = r2
+ p3.To.Type = obj.TYPE_MEM
+ p3.To.Reg = r0
+ p3.RegTo2 = riscv.REG_TMP
+
+ p4 := s.Prog(riscv.ABNE)
+ p4.From.Type = obj.TYPE_REG
+ p4.From.Reg = riscv.REG_TMP
+ p4.Reg = riscv.REG_ZERO
+ p4.To.Type = obj.TYPE_BRANCH
+ p4.To.SetTarget(p1)
+
+ p5 := s.Prog(riscv.AMOV)
+ p5.From.Type = obj.TYPE_CONST
+ p5.From.Offset = 1
+ p5.To.Type = obj.TYPE_REG
+ p5.To.Reg = out
+
+ p6 := s.Prog(obj.ANOP)
+ p2.To.SetTarget(p6)
+
+ case ssa.OpRISCV64LoweredAtomicAnd32, ssa.OpRISCV64LoweredAtomicOr32:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ p.RegTo2 = riscv.REG_ZERO
+
+ case ssa.OpRISCV64LoweredZero:
+ mov, sz := largestMove(v.AuxInt)
+
+ // mov ZERO, (Rarg0)
+ // ADD $sz, Rarg0
+ // BGEU Rarg1, Rarg0, -2(PC)
+
+ p := s.Prog(mov)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = riscv.REG_ZERO
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+
+ p2 := s.Prog(riscv.AADD)
+ p2.From.Type = obj.TYPE_CONST
+ p2.From.Offset = sz
+ p2.To.Type = obj.TYPE_REG
+ p2.To.Reg = v.Args[0].Reg()
+
+ p3 := s.Prog(riscv.ABGEU)
+ p3.To.Type = obj.TYPE_BRANCH
+ p3.Reg = v.Args[0].Reg()
+ p3.From.Type = obj.TYPE_REG
+ p3.From.Reg = v.Args[1].Reg()
+ p3.To.SetTarget(p)
+
+ case ssa.OpRISCV64LoweredMove:
+ mov, sz := largestMove(v.AuxInt)
+
+ // mov (Rarg1), T2
+ // mov T2, (Rarg0)
+ // ADD $sz, Rarg0
+ // ADD $sz, Rarg1
+ // BGEU Rarg2, Rarg0, -4(PC)
+
+ p := s.Prog(mov)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = riscv.REG_T2
+
+ p2 := s.Prog(mov)
+ p2.From.Type = obj.TYPE_REG
+ p2.From.Reg = riscv.REG_T2
+ p2.To.Type = obj.TYPE_MEM
+ p2.To.Reg = v.Args[0].Reg()
+
+ p3 := s.Prog(riscv.AADD)
+ p3.From.Type = obj.TYPE_CONST
+ p3.From.Offset = sz
+ p3.To.Type = obj.TYPE_REG
+ p3.To.Reg = v.Args[0].Reg()
+
+ p4 := s.Prog(riscv.AADD)
+ p4.From.Type = obj.TYPE_CONST
+ p4.From.Offset = sz
+ p4.To.Type = obj.TYPE_REG
+ p4.To.Reg = v.Args[1].Reg()
+
+ p5 := s.Prog(riscv.ABGEU)
+ p5.To.Type = obj.TYPE_BRANCH
+ p5.Reg = v.Args[1].Reg()
+ p5.From.Type = obj.TYPE_REG
+ p5.From.Reg = v.Args[2].Reg()
+ p5.To.SetTarget(p)
+
+ case ssa.OpRISCV64LoweredNilCheck:
+ // Issue a load which will fault if arg is nil.
+ // TODO: optimizations. See arm and amd64 LoweredNilCheck.
+ p := s.Prog(riscv.AMOVB)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = riscv.REG_ZERO
+ if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos == 1 in generated wrappers
+ base.WarnfAt(v.Pos, "generated nil check")
+ }
+
+ case ssa.OpRISCV64LoweredGetClosurePtr:
+ // Closure pointer is S4 (riscv.REG_CTXT).
+ ssagen.CheckLoweredGetClosurePtr(v)
+
+ case ssa.OpRISCV64LoweredGetCallerSP:
+ // caller's SP is FixedFrameSize below the address of the first arg
+ p := s.Prog(riscv.AMOV)
+ p.From.Type = obj.TYPE_ADDR
+ p.From.Offset = -base.Ctxt.FixedFrameSize()
+ p.From.Name = obj.NAME_PARAM
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+
+ case ssa.OpRISCV64LoweredGetCallerPC:
+ p := s.Prog(obj.AGETCALLERPC)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+
+ case ssa.OpRISCV64DUFFZERO:
+ p := s.Prog(obj.ADUFFZERO)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = ir.Syms.Duffzero
+ p.To.Offset = v.AuxInt
+
+ case ssa.OpRISCV64DUFFCOPY:
+ p := s.Prog(obj.ADUFFCOPY)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = ir.Syms.Duffcopy
+ p.To.Offset = v.AuxInt
+
+ case ssa.OpClobber, ssa.OpClobberReg:
+ // TODO: implement for clobberdead experiment. Nop is ok for now.
+
+ default:
+ v.Fatalf("Unhandled op %v", v.Op)
+ }
+}
+
+var blockBranch = [...]obj.As{
+ ssa.BlockRISCV64BEQ: riscv.ABEQ,
+ ssa.BlockRISCV64BEQZ: riscv.ABEQZ,
+ ssa.BlockRISCV64BGE: riscv.ABGE,
+ ssa.BlockRISCV64BGEU: riscv.ABGEU,
+ ssa.BlockRISCV64BGEZ: riscv.ABGEZ,
+ ssa.BlockRISCV64BGTZ: riscv.ABGTZ,
+ ssa.BlockRISCV64BLEZ: riscv.ABLEZ,
+ ssa.BlockRISCV64BLT: riscv.ABLT,
+ ssa.BlockRISCV64BLTU: riscv.ABLTU,
+ ssa.BlockRISCV64BLTZ: riscv.ABLTZ,
+ ssa.BlockRISCV64BNE: riscv.ABNE,
+ ssa.BlockRISCV64BNEZ: riscv.ABNEZ,
+}
+
+func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
+ s.SetPos(b.Pos)
+
+ switch b.Kind {
+ case ssa.BlockDefer:
+ // defer returns in A0:
+ // 0 if we should continue executing
+ // 1 if we should jump to deferreturn call
+ p := s.Prog(riscv.ABNE)
+ p.To.Type = obj.TYPE_BRANCH
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = riscv.REG_ZERO
+ p.Reg = riscv.REG_A0
+ s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[1].Block()})
+ if b.Succs[0].Block() != next {
+ p := s.Prog(obj.AJMP)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
+ }
+ case ssa.BlockPlain:
+ if b.Succs[0].Block() != next {
+ p := s.Prog(obj.AJMP)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
+ }
+ case ssa.BlockExit, ssa.BlockRetJmp:
+ case ssa.BlockRet:
+ s.Prog(obj.ARET)
+ case ssa.BlockRISCV64BEQ, ssa.BlockRISCV64BEQZ, ssa.BlockRISCV64BNE, ssa.BlockRISCV64BNEZ,
+ ssa.BlockRISCV64BLT, ssa.BlockRISCV64BLEZ, ssa.BlockRISCV64BGE, ssa.BlockRISCV64BGEZ,
+ ssa.BlockRISCV64BLTZ, ssa.BlockRISCV64BGTZ, ssa.BlockRISCV64BLTU, ssa.BlockRISCV64BGEU:
+
+ as := blockBranch[b.Kind]
+ invAs := riscv.InvertBranch(as)
+
+ var p *obj.Prog
+ switch next {
+ case b.Succs[0].Block():
+ p = s.Br(invAs, b.Succs[1].Block())
+ case b.Succs[1].Block():
+ p = s.Br(as, b.Succs[0].Block())
+ default:
+ if b.Likely != ssa.BranchUnlikely {
+ p = s.Br(as, b.Succs[0].Block())
+ s.Br(obj.AJMP, b.Succs[1].Block())
+ } else {
+ p = s.Br(invAs, b.Succs[1].Block())
+ s.Br(obj.AJMP, b.Succs[0].Block())
+ }
+ }
+
+ p.From.Type = obj.TYPE_REG
+ switch b.Kind {
+ case ssa.BlockRISCV64BEQ, ssa.BlockRISCV64BNE, ssa.BlockRISCV64BLT, ssa.BlockRISCV64BGE, ssa.BlockRISCV64BLTU, ssa.BlockRISCV64BGEU:
+ if b.NumControls() != 2 {
+ b.Fatalf("Unexpected number of controls (%d != 2): %s", b.NumControls(), b.LongString())
+ }
+ p.From.Reg = b.Controls[0].Reg()
+ p.Reg = b.Controls[1].Reg()
+
+ case ssa.BlockRISCV64BEQZ, ssa.BlockRISCV64BNEZ, ssa.BlockRISCV64BGEZ, ssa.BlockRISCV64BLEZ, ssa.BlockRISCV64BLTZ, ssa.BlockRISCV64BGTZ:
+ if b.NumControls() != 1 {
+ b.Fatalf("Unexpected number of controls (%d != 1): %s", b.NumControls(), b.LongString())
+ }
+ p.From.Reg = b.Controls[0].Reg()
+ }
+
+ default:
+ b.Fatalf("Unhandled block: %s", b.LongString())
+ }
+}
diff --git a/src/cmd/compile/internal/s390x/galign.go b/src/cmd/compile/internal/s390x/galign.go
new file mode 100644
index 0000000..d880834
--- /dev/null
+++ b/src/cmd/compile/internal/s390x/galign.go
@@ -0,0 +1,23 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package s390x
+
+import (
+ "cmd/compile/internal/ssagen"
+ "cmd/internal/obj/s390x"
+)
+
+func Init(arch *ssagen.ArchInfo) {
+ arch.LinkArch = &s390x.Links390x
+ arch.REGSP = s390x.REGSP
+ arch.MAXWIDTH = 1 << 50
+
+ arch.ZeroRange = zerorange
+ arch.Ginsnop = ginsnop
+
+ arch.SSAMarkMoves = ssaMarkMoves
+ arch.SSAGenValue = ssaGenValue
+ arch.SSAGenBlock = ssaGenBlock
+}
diff --git a/src/cmd/compile/internal/s390x/ggen.go b/src/cmd/compile/internal/s390x/ggen.go
new file mode 100644
index 0000000..488a080
--- /dev/null
+++ b/src/cmd/compile/internal/s390x/ggen.go
@@ -0,0 +1,89 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package s390x
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/objw"
+ "cmd/internal/obj"
+ "cmd/internal/obj/s390x"
+)
+
+// clearLoopCutOff is the (somewhat arbitrary) value above which it is better
+// to have a loop of clear instructions (e.g. XCs) rather than just generating
+// multiple instructions (i.e. loop unrolling).
+// Must be between 256 and 4096.
+const clearLoopCutoff = 1024
+
+// zerorange clears the stack in the given range.
+func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, _ *uint32) *obj.Prog {
+ if cnt == 0 {
+ return p
+ }
+
+ // Adjust the frame to account for LR.
+ off += base.Ctxt.FixedFrameSize()
+ reg := int16(s390x.REGSP)
+
+ // If the off cannot fit in a 12-bit unsigned displacement then we
+ // need to create a copy of the stack pointer that we can adjust.
+ // We also need to do this if we are going to loop.
+ if off < 0 || off > 4096-clearLoopCutoff || cnt > clearLoopCutoff {
+ p = pp.Append(p, s390x.AADD, obj.TYPE_CONST, 0, off, obj.TYPE_REG, s390x.REGRT1, 0)
+ p.Reg = int16(s390x.REGSP)
+ reg = s390x.REGRT1
+ off = 0
+ }
+
+ // Generate a loop of large clears.
+ if cnt > clearLoopCutoff {
+ ireg := int16(s390x.REGRT2) // register holds number of remaining loop iterations
+ p = pp.Append(p, s390x.AMOVD, obj.TYPE_CONST, 0, cnt/256, obj.TYPE_REG, ireg, 0)
+ p = pp.Append(p, s390x.ACLEAR, obj.TYPE_CONST, 0, 256, obj.TYPE_MEM, reg, off)
+ pl := p
+ p = pp.Append(p, s390x.AADD, obj.TYPE_CONST, 0, 256, obj.TYPE_REG, reg, 0)
+ p = pp.Append(p, s390x.ABRCTG, obj.TYPE_REG, ireg, 0, obj.TYPE_BRANCH, 0, 0)
+ p.To.SetTarget(pl)
+ cnt = cnt % 256
+ }
+
+ // Generate remaining clear instructions without a loop.
+ for cnt > 0 {
+ n := cnt
+
+ // Can clear at most 256 bytes per instruction.
+ if n > 256 {
+ n = 256
+ }
+
+ switch n {
+ // Handle very small clears with move instructions.
+ case 8, 4, 2, 1:
+ ins := s390x.AMOVB
+ switch n {
+ case 8:
+ ins = s390x.AMOVD
+ case 4:
+ ins = s390x.AMOVW
+ case 2:
+ ins = s390x.AMOVH
+ }
+ p = pp.Append(p, ins, obj.TYPE_CONST, 0, 0, obj.TYPE_MEM, reg, off)
+
+ // Handle clears that would require multiple move instructions with CLEAR (assembled as XC).
+ default:
+ p = pp.Append(p, s390x.ACLEAR, obj.TYPE_CONST, 0, n, obj.TYPE_MEM, reg, off)
+ }
+
+ cnt -= n
+ off += n
+ }
+
+ return p
+}
+
+func ginsnop(pp *objw.Progs) *obj.Prog {
+ return pp.Prog(s390x.ANOPH)
+}
diff --git a/src/cmd/compile/internal/s390x/ssa.go b/src/cmd/compile/internal/s390x/ssa.go
new file mode 100644
index 0000000..deb6c79
--- /dev/null
+++ b/src/cmd/compile/internal/s390x/ssa.go
@@ -0,0 +1,953 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package s390x
+
+import (
+ "math"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/logopt"
+ "cmd/compile/internal/ssa"
+ "cmd/compile/internal/ssagen"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/obj/s390x"
+)
+
+// markMoves marks any MOVXconst ops that need to avoid clobbering flags.
+func ssaMarkMoves(s *ssagen.State, b *ssa.Block) {
+ flive := b.FlagsLiveAtEnd
+ for _, c := range b.ControlValues() {
+ flive = c.Type.IsFlags() || flive
+ }
+ for i := len(b.Values) - 1; i >= 0; i-- {
+ v := b.Values[i]
+ if flive && v.Op == ssa.OpS390XMOVDconst {
+ // The "mark" is any non-nil Aux value.
+ v.Aux = v
+ }
+ if v.Type.IsFlags() {
+ flive = false
+ }
+ for _, a := range v.Args {
+ if a.Type.IsFlags() {
+ flive = true
+ }
+ }
+ }
+}
+
+// loadByType returns the load instruction of the given type.
+func loadByType(t *types.Type) obj.As {
+ if t.IsFloat() {
+ switch t.Size() {
+ case 4:
+ return s390x.AFMOVS
+ case 8:
+ return s390x.AFMOVD
+ }
+ } else {
+ switch t.Size() {
+ case 1:
+ if t.IsSigned() {
+ return s390x.AMOVB
+ } else {
+ return s390x.AMOVBZ
+ }
+ case 2:
+ if t.IsSigned() {
+ return s390x.AMOVH
+ } else {
+ return s390x.AMOVHZ
+ }
+ case 4:
+ if t.IsSigned() {
+ return s390x.AMOVW
+ } else {
+ return s390x.AMOVWZ
+ }
+ case 8:
+ return s390x.AMOVD
+ }
+ }
+ panic("bad load type")
+}
+
+// storeByType returns the store instruction of the given type.
+func storeByType(t *types.Type) obj.As {
+ width := t.Size()
+ if t.IsFloat() {
+ switch width {
+ case 4:
+ return s390x.AFMOVS
+ case 8:
+ return s390x.AFMOVD
+ }
+ } else {
+ switch width {
+ case 1:
+ return s390x.AMOVB
+ case 2:
+ return s390x.AMOVH
+ case 4:
+ return s390x.AMOVW
+ case 8:
+ return s390x.AMOVD
+ }
+ }
+ panic("bad store type")
+}
+
+// moveByType returns the reg->reg move instruction of the given type.
+func moveByType(t *types.Type) obj.As {
+ if t.IsFloat() {
+ return s390x.AFMOVD
+ } else {
+ switch t.Size() {
+ case 1:
+ if t.IsSigned() {
+ return s390x.AMOVB
+ } else {
+ return s390x.AMOVBZ
+ }
+ case 2:
+ if t.IsSigned() {
+ return s390x.AMOVH
+ } else {
+ return s390x.AMOVHZ
+ }
+ case 4:
+ if t.IsSigned() {
+ return s390x.AMOVW
+ } else {
+ return s390x.AMOVWZ
+ }
+ case 8:
+ return s390x.AMOVD
+ }
+ }
+ panic("bad load type")
+}
+
+// opregreg emits instructions for
+// dest := dest(To) op src(From)
+// and also returns the created obj.Prog so it
+// may be further adjusted (offset, scale, etc).
+func opregreg(s *ssagen.State, op obj.As, dest, src int16) *obj.Prog {
+ p := s.Prog(op)
+ p.From.Type = obj.TYPE_REG
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = dest
+ p.From.Reg = src
+ return p
+}
+
+// opregregimm emits instructions for
+// dest := src(From) op off
+// and also returns the created obj.Prog so it
+// may be further adjusted (offset, scale, etc).
+func opregregimm(s *ssagen.State, op obj.As, dest, src int16, off int64) *obj.Prog {
+ p := s.Prog(op)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = off
+ p.Reg = src
+ p.To.Reg = dest
+ p.To.Type = obj.TYPE_REG
+ return p
+}
+
+func ssaGenValue(s *ssagen.State, v *ssa.Value) {
+ switch v.Op {
+ case ssa.OpS390XSLD, ssa.OpS390XSLW,
+ ssa.OpS390XSRD, ssa.OpS390XSRW,
+ ssa.OpS390XSRAD, ssa.OpS390XSRAW,
+ ssa.OpS390XRLLG, ssa.OpS390XRLL:
+ r := v.Reg()
+ r1 := v.Args[0].Reg()
+ r2 := v.Args[1].Reg()
+ if r2 == s390x.REG_R0 {
+ v.Fatalf("cannot use R0 as shift value %s", v.LongString())
+ }
+ p := opregreg(s, v.Op.Asm(), r, r2)
+ if r != r1 {
+ p.Reg = r1
+ }
+ case ssa.OpS390XRXSBG:
+ r2 := v.Args[1].Reg()
+ i := v.Aux.(s390x.RotateParams)
+ p := s.Prog(v.Op.Asm())
+ p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: int64(i.Start)}
+ p.SetRestArgs([]obj.Addr{
+ {Type: obj.TYPE_CONST, Offset: int64(i.End)},
+ {Type: obj.TYPE_CONST, Offset: int64(i.Amount)},
+ {Type: obj.TYPE_REG, Reg: r2},
+ })
+ p.To = obj.Addr{Type: obj.TYPE_REG, Reg: v.Reg()}
+ case ssa.OpS390XRISBGZ:
+ r1 := v.Reg()
+ r2 := v.Args[0].Reg()
+ i := v.Aux.(s390x.RotateParams)
+ p := s.Prog(v.Op.Asm())
+ p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: int64(i.Start)}
+ p.SetRestArgs([]obj.Addr{
+ {Type: obj.TYPE_CONST, Offset: int64(i.End)},
+ {Type: obj.TYPE_CONST, Offset: int64(i.Amount)},
+ {Type: obj.TYPE_REG, Reg: r2},
+ })
+ p.To = obj.Addr{Type: obj.TYPE_REG, Reg: r1}
+ case ssa.OpS390XADD, ssa.OpS390XADDW,
+ ssa.OpS390XSUB, ssa.OpS390XSUBW,
+ ssa.OpS390XAND, ssa.OpS390XANDW,
+ ssa.OpS390XOR, ssa.OpS390XORW,
+ ssa.OpS390XXOR, ssa.OpS390XXORW:
+ r := v.Reg()
+ r1 := v.Args[0].Reg()
+ r2 := v.Args[1].Reg()
+ p := opregreg(s, v.Op.Asm(), r, r2)
+ if r != r1 {
+ p.Reg = r1
+ }
+ case ssa.OpS390XADDC:
+ r1 := v.Reg0()
+ r2 := v.Args[0].Reg()
+ r3 := v.Args[1].Reg()
+ if r1 == r2 {
+ r2, r3 = r3, r2
+ }
+ p := opregreg(s, v.Op.Asm(), r1, r2)
+ if r3 != r1 {
+ p.Reg = r3
+ }
+ case ssa.OpS390XSUBC:
+ r1 := v.Reg0()
+ r2 := v.Args[0].Reg()
+ r3 := v.Args[1].Reg()
+ p := opregreg(s, v.Op.Asm(), r1, r3)
+ if r1 != r2 {
+ p.Reg = r2
+ }
+ case ssa.OpS390XADDE, ssa.OpS390XSUBE:
+ r2 := v.Args[1].Reg()
+ opregreg(s, v.Op.Asm(), v.Reg0(), r2)
+ case ssa.OpS390XADDCconst:
+ r1 := v.Reg0()
+ r3 := v.Args[0].Reg()
+ i2 := int64(int16(v.AuxInt))
+ opregregimm(s, v.Op.Asm(), r1, r3, i2)
+ // 2-address opcode arithmetic
+ case ssa.OpS390XMULLD, ssa.OpS390XMULLW,
+ ssa.OpS390XMULHD, ssa.OpS390XMULHDU,
+ ssa.OpS390XFMULS, ssa.OpS390XFMUL, ssa.OpS390XFDIVS, ssa.OpS390XFDIV:
+ opregreg(s, v.Op.Asm(), v.Reg(), v.Args[1].Reg())
+ case ssa.OpS390XFSUBS, ssa.OpS390XFSUB,
+ ssa.OpS390XFADDS, ssa.OpS390XFADD:
+ opregreg(s, v.Op.Asm(), v.Reg0(), v.Args[1].Reg())
+ case ssa.OpS390XMLGR:
+ // MLGR Rx R3 -> R2:R3
+ r0 := v.Args[0].Reg()
+ r1 := v.Args[1].Reg()
+ if r1 != s390x.REG_R3 {
+ v.Fatalf("We require the multiplcand to be stored in R3 for MLGR %s", v.LongString())
+ }
+ p := s.Prog(s390x.AMLGR)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r0
+ p.To.Reg = s390x.REG_R2
+ p.To.Type = obj.TYPE_REG
+ case ssa.OpS390XFMADD, ssa.OpS390XFMADDS,
+ ssa.OpS390XFMSUB, ssa.OpS390XFMSUBS:
+ r1 := v.Args[1].Reg()
+ r2 := v.Args[2].Reg()
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r1
+ p.Reg = r2
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpS390XFIDBR:
+ switch v.AuxInt {
+ case 0, 1, 3, 4, 5, 6, 7:
+ opregregimm(s, v.Op.Asm(), v.Reg(), v.Args[0].Reg(), v.AuxInt)
+ default:
+ v.Fatalf("invalid FIDBR mask: %v", v.AuxInt)
+ }
+ case ssa.OpS390XCPSDR:
+ p := opregreg(s, v.Op.Asm(), v.Reg(), v.Args[1].Reg())
+ p.Reg = v.Args[0].Reg()
+ case ssa.OpS390XDIVD, ssa.OpS390XDIVW,
+ ssa.OpS390XDIVDU, ssa.OpS390XDIVWU,
+ ssa.OpS390XMODD, ssa.OpS390XMODW,
+ ssa.OpS390XMODDU, ssa.OpS390XMODWU:
+
+ // TODO(mundaym): use the temp registers every time like x86 does with AX?
+ dividend := v.Args[0].Reg()
+ divisor := v.Args[1].Reg()
+
+ // CPU faults upon signed overflow, which occurs when most
+ // negative int is divided by -1.
+ var j *obj.Prog
+ if v.Op == ssa.OpS390XDIVD || v.Op == ssa.OpS390XDIVW ||
+ v.Op == ssa.OpS390XMODD || v.Op == ssa.OpS390XMODW {
+
+ var c *obj.Prog
+ c = s.Prog(s390x.ACMP)
+ j = s.Prog(s390x.ABEQ)
+
+ c.From.Type = obj.TYPE_REG
+ c.From.Reg = divisor
+ c.To.Type = obj.TYPE_CONST
+ c.To.Offset = -1
+
+ j.To.Type = obj.TYPE_BRANCH
+
+ }
+
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = divisor
+ p.Reg = 0
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = dividend
+
+ // signed division, rest of the check for -1 case
+ if j != nil {
+ j2 := s.Prog(s390x.ABR)
+ j2.To.Type = obj.TYPE_BRANCH
+
+ var n *obj.Prog
+ if v.Op == ssa.OpS390XDIVD || v.Op == ssa.OpS390XDIVW {
+ // n * -1 = -n
+ n = s.Prog(s390x.ANEG)
+ n.To.Type = obj.TYPE_REG
+ n.To.Reg = dividend
+ } else {
+ // n % -1 == 0
+ n = s.Prog(s390x.AXOR)
+ n.From.Type = obj.TYPE_REG
+ n.From.Reg = dividend
+ n.To.Type = obj.TYPE_REG
+ n.To.Reg = dividend
+ }
+
+ j.To.SetTarget(n)
+ j2.To.SetTarget(s.Pc())
+ }
+ case ssa.OpS390XADDconst, ssa.OpS390XADDWconst:
+ opregregimm(s, v.Op.Asm(), v.Reg(), v.Args[0].Reg(), v.AuxInt)
+ case ssa.OpS390XMULLDconst, ssa.OpS390XMULLWconst,
+ ssa.OpS390XSUBconst, ssa.OpS390XSUBWconst,
+ ssa.OpS390XANDconst, ssa.OpS390XANDWconst,
+ ssa.OpS390XORconst, ssa.OpS390XORWconst,
+ ssa.OpS390XXORconst, ssa.OpS390XXORWconst:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpS390XSLDconst, ssa.OpS390XSLWconst,
+ ssa.OpS390XSRDconst, ssa.OpS390XSRWconst,
+ ssa.OpS390XSRADconst, ssa.OpS390XSRAWconst,
+ ssa.OpS390XRLLconst:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ r := v.Reg()
+ r1 := v.Args[0].Reg()
+ if r != r1 {
+ p.Reg = r1
+ }
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ case ssa.OpS390XMOVDaddridx:
+ r := v.Args[0].Reg()
+ i := v.Args[1].Reg()
+ p := s.Prog(s390x.AMOVD)
+ p.From.Scale = 1
+ if i == s390x.REGSP {
+ r, i = i, r
+ }
+ p.From.Type = obj.TYPE_ADDR
+ p.From.Reg = r
+ p.From.Index = i
+ ssagen.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpS390XMOVDaddr:
+ p := s.Prog(s390x.AMOVD)
+ p.From.Type = obj.TYPE_ADDR
+ p.From.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpS390XCMP, ssa.OpS390XCMPW, ssa.OpS390XCMPU, ssa.OpS390XCMPWU:
+ opregreg(s, v.Op.Asm(), v.Args[1].Reg(), v.Args[0].Reg())
+ case ssa.OpS390XFCMPS, ssa.OpS390XFCMP:
+ opregreg(s, v.Op.Asm(), v.Args[1].Reg(), v.Args[0].Reg())
+ case ssa.OpS390XCMPconst, ssa.OpS390XCMPWconst:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_CONST
+ p.To.Offset = v.AuxInt
+ case ssa.OpS390XCMPUconst, ssa.OpS390XCMPWUconst:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_CONST
+ p.To.Offset = int64(uint32(v.AuxInt))
+ case ssa.OpS390XMOVDconst:
+ x := v.Reg()
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = x
+ case ssa.OpS390XFMOVSconst, ssa.OpS390XFMOVDconst:
+ x := v.Reg()
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_FCONST
+ p.From.Val = math.Float64frombits(uint64(v.AuxInt))
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = x
+ case ssa.OpS390XADDWload, ssa.OpS390XADDload,
+ ssa.OpS390XMULLWload, ssa.OpS390XMULLDload,
+ ssa.OpS390XSUBWload, ssa.OpS390XSUBload,
+ ssa.OpS390XANDWload, ssa.OpS390XANDload,
+ ssa.OpS390XORWload, ssa.OpS390XORload,
+ ssa.OpS390XXORWload, ssa.OpS390XXORload:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[1].Reg()
+ ssagen.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpS390XMOVDload,
+ ssa.OpS390XMOVWZload, ssa.OpS390XMOVHZload, ssa.OpS390XMOVBZload,
+ ssa.OpS390XMOVDBRload, ssa.OpS390XMOVWBRload, ssa.OpS390XMOVHBRload,
+ ssa.OpS390XMOVBload, ssa.OpS390XMOVHload, ssa.OpS390XMOVWload,
+ ssa.OpS390XFMOVSload, ssa.OpS390XFMOVDload:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpS390XMOVBZloadidx, ssa.OpS390XMOVHZloadidx, ssa.OpS390XMOVWZloadidx,
+ ssa.OpS390XMOVBloadidx, ssa.OpS390XMOVHloadidx, ssa.OpS390XMOVWloadidx, ssa.OpS390XMOVDloadidx,
+ ssa.OpS390XMOVHBRloadidx, ssa.OpS390XMOVWBRloadidx, ssa.OpS390XMOVDBRloadidx,
+ ssa.OpS390XFMOVSloadidx, ssa.OpS390XFMOVDloadidx:
+ r := v.Args[0].Reg()
+ i := v.Args[1].Reg()
+ if i == s390x.REGSP {
+ r, i = i, r
+ }
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = r
+ p.From.Scale = 1
+ p.From.Index = i
+ ssagen.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpS390XMOVBstore, ssa.OpS390XMOVHstore, ssa.OpS390XMOVWstore, ssa.OpS390XMOVDstore,
+ ssa.OpS390XMOVHBRstore, ssa.OpS390XMOVWBRstore, ssa.OpS390XMOVDBRstore,
+ ssa.OpS390XFMOVSstore, ssa.OpS390XFMOVDstore:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.To, v)
+ case ssa.OpS390XMOVBstoreidx, ssa.OpS390XMOVHstoreidx, ssa.OpS390XMOVWstoreidx, ssa.OpS390XMOVDstoreidx,
+ ssa.OpS390XMOVHBRstoreidx, ssa.OpS390XMOVWBRstoreidx, ssa.OpS390XMOVDBRstoreidx,
+ ssa.OpS390XFMOVSstoreidx, ssa.OpS390XFMOVDstoreidx:
+ r := v.Args[0].Reg()
+ i := v.Args[1].Reg()
+ if i == s390x.REGSP {
+ r, i = i, r
+ }
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[2].Reg()
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = r
+ p.To.Scale = 1
+ p.To.Index = i
+ ssagen.AddAux(&p.To, v)
+ case ssa.OpS390XMOVDstoreconst, ssa.OpS390XMOVWstoreconst, ssa.OpS390XMOVHstoreconst, ssa.OpS390XMOVBstoreconst:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ sc := v.AuxValAndOff()
+ p.From.Offset = sc.Val64()
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ ssagen.AddAux2(&p.To, v, sc.Off64())
+ case ssa.OpS390XMOVBreg, ssa.OpS390XMOVHreg, ssa.OpS390XMOVWreg,
+ ssa.OpS390XMOVBZreg, ssa.OpS390XMOVHZreg, ssa.OpS390XMOVWZreg,
+ ssa.OpS390XLDGR, ssa.OpS390XLGDR,
+ ssa.OpS390XCEFBRA, ssa.OpS390XCDFBRA, ssa.OpS390XCEGBRA, ssa.OpS390XCDGBRA,
+ ssa.OpS390XCFEBRA, ssa.OpS390XCFDBRA, ssa.OpS390XCGEBRA, ssa.OpS390XCGDBRA,
+ ssa.OpS390XCELFBR, ssa.OpS390XCDLFBR, ssa.OpS390XCELGBR, ssa.OpS390XCDLGBR,
+ ssa.OpS390XCLFEBR, ssa.OpS390XCLFDBR, ssa.OpS390XCLGEBR, ssa.OpS390XCLGDBR,
+ ssa.OpS390XLDEBR, ssa.OpS390XLEDBR,
+ ssa.OpS390XFNEG, ssa.OpS390XFNEGS,
+ ssa.OpS390XLPDFR, ssa.OpS390XLNDFR:
+ opregreg(s, v.Op.Asm(), v.Reg(), v.Args[0].Reg())
+ case ssa.OpS390XCLEAR:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ sc := v.AuxValAndOff()
+ p.From.Offset = sc.Val64()
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ ssagen.AddAux2(&p.To, v, sc.Off64())
+ case ssa.OpCopy:
+ if v.Type.IsMemory() {
+ return
+ }
+ x := v.Args[0].Reg()
+ y := v.Reg()
+ if x != y {
+ opregreg(s, moveByType(v.Type), y, x)
+ }
+ case ssa.OpLoadReg:
+ if v.Type.IsFlags() {
+ v.Fatalf("load flags not implemented: %v", v.LongString())
+ return
+ }
+ p := s.Prog(loadByType(v.Type))
+ ssagen.AddrAuto(&p.From, v.Args[0])
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpStoreReg:
+ if v.Type.IsFlags() {
+ v.Fatalf("store flags not implemented: %v", v.LongString())
+ return
+ }
+ p := s.Prog(storeByType(v.Type))
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ ssagen.AddrAuto(&p.To, v)
+ case ssa.OpS390XLoweredGetClosurePtr:
+ // Closure pointer is R12 (already)
+ ssagen.CheckLoweredGetClosurePtr(v)
+ case ssa.OpS390XLoweredRound32F, ssa.OpS390XLoweredRound64F:
+ // input is already rounded
+ case ssa.OpS390XLoweredGetG:
+ r := v.Reg()
+ p := s.Prog(s390x.AMOVD)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = s390x.REGG
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ case ssa.OpS390XLoweredGetCallerSP:
+ // caller's SP is FixedFrameSize below the address of the first arg
+ p := s.Prog(s390x.AMOVD)
+ p.From.Type = obj.TYPE_ADDR
+ p.From.Offset = -base.Ctxt.FixedFrameSize()
+ p.From.Name = obj.NAME_PARAM
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpS390XLoweredGetCallerPC:
+ p := s.Prog(obj.AGETCALLERPC)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpS390XCALLstatic, ssa.OpS390XCALLclosure, ssa.OpS390XCALLinter:
+ s.Call(v)
+ case ssa.OpS390XCALLtail:
+ s.TailCall(v)
+ case ssa.OpS390XLoweredWB:
+ p := s.Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = v.Aux.(*obj.LSym)
+ case ssa.OpS390XLoweredPanicBoundsA, ssa.OpS390XLoweredPanicBoundsB, ssa.OpS390XLoweredPanicBoundsC:
+ p := s.Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt]
+ s.UseArgs(16) // space used in callee args area by assembly stubs
+ case ssa.OpS390XFLOGR, ssa.OpS390XPOPCNT,
+ ssa.OpS390XNEG, ssa.OpS390XNEGW,
+ ssa.OpS390XMOVWBR, ssa.OpS390XMOVDBR:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpS390XNOT, ssa.OpS390XNOTW:
+ v.Fatalf("NOT/NOTW generated %s", v.LongString())
+ case ssa.OpS390XSumBytes2, ssa.OpS390XSumBytes4, ssa.OpS390XSumBytes8:
+ v.Fatalf("SumBytes generated %s", v.LongString())
+ case ssa.OpS390XLOCGR:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = int64(v.Aux.(s390x.CCMask))
+ p.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpS390XFSQRTS, ssa.OpS390XFSQRT:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.OpS390XLTDBR, ssa.OpS390XLTEBR:
+ opregreg(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[0].Reg())
+ case ssa.OpS390XInvertFlags:
+ v.Fatalf("InvertFlags should never make it to codegen %v", v.LongString())
+ case ssa.OpS390XFlagEQ, ssa.OpS390XFlagLT, ssa.OpS390XFlagGT, ssa.OpS390XFlagOV:
+ v.Fatalf("Flag* ops should never make it to codegen %v", v.LongString())
+ case ssa.OpS390XAddTupleFirst32, ssa.OpS390XAddTupleFirst64:
+ v.Fatalf("AddTupleFirst* should never make it to codegen %v", v.LongString())
+ case ssa.OpS390XLoweredNilCheck:
+ // Issue a load which will fault if the input is nil.
+ p := s.Prog(s390x.AMOVBZ)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = s390x.REGTMP
+ if logopt.Enabled() {
+ logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name)
+ }
+ if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
+ base.WarnfAt(v.Pos, "generated nil check")
+ }
+ case ssa.OpS390XMVC:
+ vo := v.AuxValAndOff()
+ p := s.Prog(s390x.AMVC)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = vo.Val64()
+ p.SetFrom3(obj.Addr{
+ Type: obj.TYPE_MEM,
+ Reg: v.Args[1].Reg(),
+ Offset: vo.Off64(),
+ })
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ p.To.Offset = vo.Off64()
+ case ssa.OpS390XSTMG2, ssa.OpS390XSTMG3, ssa.OpS390XSTMG4,
+ ssa.OpS390XSTM2, ssa.OpS390XSTM3, ssa.OpS390XSTM4:
+ for i := 2; i < len(v.Args)-1; i++ {
+ if v.Args[i].Reg() != v.Args[i-1].Reg()+1 {
+ v.Fatalf("invalid store multiple %s", v.LongString())
+ }
+ }
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+ p.Reg = v.Args[len(v.Args)-2].Reg()
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.To, v)
+ case ssa.OpS390XLoweredMove:
+ // Inputs must be valid pointers to memory,
+ // so adjust arg0 and arg1 as part of the expansion.
+ // arg2 should be src+size,
+ //
+ // mvc: MVC $256, 0(R2), 0(R1)
+ // MOVD $256(R1), R1
+ // MOVD $256(R2), R2
+ // CMP R2, Rarg2
+ // BNE mvc
+ // MVC $rem, 0(R2), 0(R1) // if rem > 0
+ // arg2 is the last address to move in the loop + 256
+ mvc := s.Prog(s390x.AMVC)
+ mvc.From.Type = obj.TYPE_CONST
+ mvc.From.Offset = 256
+ mvc.SetFrom3(obj.Addr{Type: obj.TYPE_MEM, Reg: v.Args[1].Reg()})
+ mvc.To.Type = obj.TYPE_MEM
+ mvc.To.Reg = v.Args[0].Reg()
+
+ for i := 0; i < 2; i++ {
+ movd := s.Prog(s390x.AMOVD)
+ movd.From.Type = obj.TYPE_ADDR
+ movd.From.Reg = v.Args[i].Reg()
+ movd.From.Offset = 256
+ movd.To.Type = obj.TYPE_REG
+ movd.To.Reg = v.Args[i].Reg()
+ }
+
+ cmpu := s.Prog(s390x.ACMPU)
+ cmpu.From.Reg = v.Args[1].Reg()
+ cmpu.From.Type = obj.TYPE_REG
+ cmpu.To.Reg = v.Args[2].Reg()
+ cmpu.To.Type = obj.TYPE_REG
+
+ bne := s.Prog(s390x.ABLT)
+ bne.To.Type = obj.TYPE_BRANCH
+ bne.To.SetTarget(mvc)
+
+ if v.AuxInt > 0 {
+ mvc := s.Prog(s390x.AMVC)
+ mvc.From.Type = obj.TYPE_CONST
+ mvc.From.Offset = v.AuxInt
+ mvc.SetFrom3(obj.Addr{Type: obj.TYPE_MEM, Reg: v.Args[1].Reg()})
+ mvc.To.Type = obj.TYPE_MEM
+ mvc.To.Reg = v.Args[0].Reg()
+ }
+ case ssa.OpS390XLoweredZero:
+ // Input must be valid pointers to memory,
+ // so adjust arg0 as part of the expansion.
+ // arg1 should be src+size,
+ //
+ // clear: CLEAR $256, 0(R1)
+ // MOVD $256(R1), R1
+ // CMP R1, Rarg1
+ // BNE clear
+ // CLEAR $rem, 0(R1) // if rem > 0
+ // arg1 is the last address to zero in the loop + 256
+ clear := s.Prog(s390x.ACLEAR)
+ clear.From.Type = obj.TYPE_CONST
+ clear.From.Offset = 256
+ clear.To.Type = obj.TYPE_MEM
+ clear.To.Reg = v.Args[0].Reg()
+
+ movd := s.Prog(s390x.AMOVD)
+ movd.From.Type = obj.TYPE_ADDR
+ movd.From.Reg = v.Args[0].Reg()
+ movd.From.Offset = 256
+ movd.To.Type = obj.TYPE_REG
+ movd.To.Reg = v.Args[0].Reg()
+
+ cmpu := s.Prog(s390x.ACMPU)
+ cmpu.From.Reg = v.Args[0].Reg()
+ cmpu.From.Type = obj.TYPE_REG
+ cmpu.To.Reg = v.Args[1].Reg()
+ cmpu.To.Type = obj.TYPE_REG
+
+ bne := s.Prog(s390x.ABLT)
+ bne.To.Type = obj.TYPE_BRANCH
+ bne.To.SetTarget(clear)
+
+ if v.AuxInt > 0 {
+ clear := s.Prog(s390x.ACLEAR)
+ clear.From.Type = obj.TYPE_CONST
+ clear.From.Offset = v.AuxInt
+ clear.To.Type = obj.TYPE_MEM
+ clear.To.Reg = v.Args[0].Reg()
+ }
+ case ssa.OpS390XMOVBZatomicload, ssa.OpS390XMOVWZatomicload, ssa.OpS390XMOVDatomicload:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg0()
+ case ssa.OpS390XMOVBatomicstore, ssa.OpS390XMOVWatomicstore, ssa.OpS390XMOVDatomicstore:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.To, v)
+ case ssa.OpS390XLAN, ssa.OpS390XLAO:
+ // LA(N|O) Ry, TMP, 0(Rx)
+ op := s.Prog(v.Op.Asm())
+ op.From.Type = obj.TYPE_REG
+ op.From.Reg = v.Args[1].Reg()
+ op.Reg = s390x.REGTMP
+ op.To.Type = obj.TYPE_MEM
+ op.To.Reg = v.Args[0].Reg()
+ case ssa.OpS390XLANfloor, ssa.OpS390XLAOfloor:
+ r := v.Args[0].Reg() // clobbered, assumed R1 in comments
+
+ // Round ptr down to nearest multiple of 4.
+ // ANDW $~3, R1
+ ptr := s.Prog(s390x.AANDW)
+ ptr.From.Type = obj.TYPE_CONST
+ ptr.From.Offset = 0xfffffffc
+ ptr.To.Type = obj.TYPE_REG
+ ptr.To.Reg = r
+
+ // Redirect output of LA(N|O) into R1 since it is clobbered anyway.
+ // LA(N|O) Rx, R1, 0(R1)
+ op := s.Prog(v.Op.Asm())
+ op.From.Type = obj.TYPE_REG
+ op.From.Reg = v.Args[1].Reg()
+ op.Reg = r
+ op.To.Type = obj.TYPE_MEM
+ op.To.Reg = r
+ case ssa.OpS390XLAA, ssa.OpS390XLAAG:
+ p := s.Prog(v.Op.Asm())
+ p.Reg = v.Reg0()
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.To, v)
+ case ssa.OpS390XLoweredAtomicCas32, ssa.OpS390XLoweredAtomicCas64:
+ // Convert the flags output of CS{,G} into a bool.
+ // CS{,G} arg1, arg2, arg0
+ // MOVD $0, ret
+ // BNE 2(PC)
+ // MOVD $1, ret
+ // NOP (so the BNE has somewhere to land)
+
+ // CS{,G} arg1, arg2, arg0
+ cs := s.Prog(v.Op.Asm())
+ cs.From.Type = obj.TYPE_REG
+ cs.From.Reg = v.Args[1].Reg() // old
+ cs.Reg = v.Args[2].Reg() // new
+ cs.To.Type = obj.TYPE_MEM
+ cs.To.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&cs.To, v)
+
+ // MOVD $0, ret
+ movd := s.Prog(s390x.AMOVD)
+ movd.From.Type = obj.TYPE_CONST
+ movd.From.Offset = 0
+ movd.To.Type = obj.TYPE_REG
+ movd.To.Reg = v.Reg0()
+
+ // BNE 2(PC)
+ bne := s.Prog(s390x.ABNE)
+ bne.To.Type = obj.TYPE_BRANCH
+
+ // MOVD $1, ret
+ movd = s.Prog(s390x.AMOVD)
+ movd.From.Type = obj.TYPE_CONST
+ movd.From.Offset = 1
+ movd.To.Type = obj.TYPE_REG
+ movd.To.Reg = v.Reg0()
+
+ // NOP (so the BNE has somewhere to land)
+ nop := s.Prog(obj.ANOP)
+ bne.To.SetTarget(nop)
+ case ssa.OpS390XLoweredAtomicExchange32, ssa.OpS390XLoweredAtomicExchange64:
+ // Loop until the CS{,G} succeeds.
+ // MOV{WZ,D} arg0, ret
+ // cs: CS{,G} ret, arg1, arg0
+ // BNE cs
+
+ // MOV{WZ,D} arg0, ret
+ load := s.Prog(loadByType(v.Type.FieldType(0)))
+ load.From.Type = obj.TYPE_MEM
+ load.From.Reg = v.Args[0].Reg()
+ load.To.Type = obj.TYPE_REG
+ load.To.Reg = v.Reg0()
+ ssagen.AddAux(&load.From, v)
+
+ // CS{,G} ret, arg1, arg0
+ cs := s.Prog(v.Op.Asm())
+ cs.From.Type = obj.TYPE_REG
+ cs.From.Reg = v.Reg0() // old
+ cs.Reg = v.Args[1].Reg() // new
+ cs.To.Type = obj.TYPE_MEM
+ cs.To.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&cs.To, v)
+
+ // BNE cs
+ bne := s.Prog(s390x.ABNE)
+ bne.To.Type = obj.TYPE_BRANCH
+ bne.To.SetTarget(cs)
+ case ssa.OpS390XSYNC:
+ s.Prog(s390x.ASYNC)
+ case ssa.OpClobber, ssa.OpClobberReg:
+ // TODO: implement for clobberdead experiment. Nop is ok for now.
+ default:
+ v.Fatalf("genValue not implemented: %s", v.LongString())
+ }
+}
+
+func blockAsm(b *ssa.Block) obj.As {
+ switch b.Kind {
+ case ssa.BlockS390XBRC:
+ return s390x.ABRC
+ case ssa.BlockS390XCRJ:
+ return s390x.ACRJ
+ case ssa.BlockS390XCGRJ:
+ return s390x.ACGRJ
+ case ssa.BlockS390XCLRJ:
+ return s390x.ACLRJ
+ case ssa.BlockS390XCLGRJ:
+ return s390x.ACLGRJ
+ case ssa.BlockS390XCIJ:
+ return s390x.ACIJ
+ case ssa.BlockS390XCGIJ:
+ return s390x.ACGIJ
+ case ssa.BlockS390XCLIJ:
+ return s390x.ACLIJ
+ case ssa.BlockS390XCLGIJ:
+ return s390x.ACLGIJ
+ }
+ b.Fatalf("blockAsm not implemented: %s", b.LongString())
+ panic("unreachable")
+}
+
+func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
+ // Handle generic blocks first.
+ switch b.Kind {
+ case ssa.BlockPlain:
+ if b.Succs[0].Block() != next {
+ p := s.Prog(s390x.ABR)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
+ }
+ return
+ case ssa.BlockDefer:
+ // defer returns in R3:
+ // 0 if we should continue executing
+ // 1 if we should jump to deferreturn call
+ p := s.Br(s390x.ACIJ, b.Succs[1].Block())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = int64(s390x.NotEqual & s390x.NotUnordered) // unordered is not possible
+ p.Reg = s390x.REG_R3
+ p.SetFrom3Const(0)
+ if b.Succs[0].Block() != next {
+ s.Br(s390x.ABR, b.Succs[0].Block())
+ }
+ return
+ case ssa.BlockExit, ssa.BlockRetJmp:
+ return
+ case ssa.BlockRet:
+ s.Prog(obj.ARET)
+ return
+ }
+
+ // Handle s390x-specific blocks. These blocks all have a
+ // condition code mask in the Aux value and 2 successors.
+ succs := [...]*ssa.Block{b.Succs[0].Block(), b.Succs[1].Block()}
+ mask := b.Aux.(s390x.CCMask)
+
+ // TODO: take into account Likely property for forward/backward
+ // branches. We currently can't do this because we don't know
+ // whether a block has already been emitted. In general forward
+ // branches are assumed 'not taken' and backward branches are
+ // assumed 'taken'.
+ if next == succs[0] {
+ succs[0], succs[1] = succs[1], succs[0]
+ mask = mask.Inverse()
+ }
+
+ p := s.Br(blockAsm(b), succs[0])
+ switch b.Kind {
+ case ssa.BlockS390XBRC:
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = int64(mask)
+ case ssa.BlockS390XCGRJ, ssa.BlockS390XCRJ,
+ ssa.BlockS390XCLGRJ, ssa.BlockS390XCLRJ:
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = int64(mask & s390x.NotUnordered) // unordered is not possible
+ p.Reg = b.Controls[0].Reg()
+ p.SetFrom3Reg(b.Controls[1].Reg())
+ case ssa.BlockS390XCGIJ, ssa.BlockS390XCIJ:
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = int64(mask & s390x.NotUnordered) // unordered is not possible
+ p.Reg = b.Controls[0].Reg()
+ p.SetFrom3Const(int64(int8(b.AuxInt)))
+ case ssa.BlockS390XCLGIJ, ssa.BlockS390XCLIJ:
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = int64(mask & s390x.NotUnordered) // unordered is not possible
+ p.Reg = b.Controls[0].Reg()
+ p.SetFrom3Const(int64(uint8(b.AuxInt)))
+ default:
+ b.Fatalf("branch not implemented: %s", b.LongString())
+ }
+ if next != succs[1] {
+ s.Br(s390x.ABR, succs[1])
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/README.md b/src/cmd/compile/internal/ssa/README.md
new file mode 100644
index 0000000..833bf1d
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/README.md
@@ -0,0 +1,222 @@
+<!---
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+-->
+
+## Introduction to the Go compiler's SSA backend
+
+This package contains the compiler's Static Single Assignment form component. If
+you're not familiar with SSA, its [Wikipedia
+article](https://en.wikipedia.org/wiki/Static_single_assignment_form) is a good
+starting point.
+
+It is recommended that you first read [cmd/compile/README.md](../../README.md)
+if you are not familiar with the Go compiler already. That document gives an
+overview of the compiler, and explains what is SSA's part and purpose in it.
+
+### Key concepts
+
+The names described below may be loosely related to their Go counterparts, but
+note that they are not equivalent. For example, a Go block statement has a
+variable scope, yet SSA has no notion of variables nor variable scopes.
+
+It may also be surprising that values and blocks are named after their unique
+sequential IDs. They rarely correspond to named entities in the original code,
+such as variables or function parameters. The sequential IDs also allow the
+compiler to avoid maps, and it is always possible to track back the values to Go
+code using debug and position information.
+
+#### Values
+
+Values are the basic building blocks of SSA. Per SSA's very definition, a
+value is defined exactly once, but it may be used any number of times. A value
+mainly consists of a unique identifier, an operator, a type, and some arguments.
+
+An operator or `Op` describes the operation that computes the value. The
+semantics of each operator can be found in `gen/*Ops.go`. For example, `OpAdd8`
+takes two value arguments holding 8-bit integers and results in their addition.
+Here is a possible SSA representation of the addition of two `uint8` values:
+
+ // var c uint8 = a + b
+ v4 = Add8 <uint8> v2 v3
+
+A value's type will usually be a Go type. For example, the value in the example
+above has a `uint8` type, and a constant boolean value will have a `bool` type.
+However, certain types don't come from Go and are special; below we will cover
+`memory`, the most common of them.
+
+See [value.go](value.go) for more information.
+
+#### Memory types
+
+`memory` represents the global memory state. An `Op` that takes a memory
+argument depends on that memory state, and an `Op` which has the memory type
+impacts the state of memory. This ensures that memory operations are kept in the
+right order. For example:
+
+ // *a = 3
+ // *b = *a
+ v10 = Store <mem> {int} v6 v8 v1
+ v14 = Store <mem> {int} v7 v8 v10
+
+Here, `Store` stores its second argument (of type `int`) into the first argument
+(of type `*int`). The last argument is the memory state; since the second store
+depends on the memory value defined by the first store, the two stores cannot be
+reordered.
+
+See [cmd/compile/internal/types/type.go](../types/type.go) for more information.
+
+#### Blocks
+
+A block represents a basic block in the control flow graph of a function. It is,
+essentially, a list of values that define the operation of this block. Besides
+the list of values, blocks mainly consist of a unique identifier, a kind, and a
+list of successor blocks.
+
+The simplest kind is a `plain` block; it simply hands the control flow to
+another block, thus its successors list contains one block.
+
+Another common block kind is the `exit` block. These have a final value, called
+control value, which must return a memory state. This is necessary for functions
+to return some values, for example - the caller needs some memory state to
+depend on, to ensure that it receives those return values correctly.
+
+The last important block kind we will mention is the `if` block. It has a single
+control value that must be a boolean value, and it has exactly two successor
+blocks. The control flow is handed to the first successor if the bool is true,
+and to the second otherwise.
+
+Here is a sample if-else control flow represented with basic blocks:
+
+ // func(b bool) int {
+ // if b {
+ // return 2
+ // }
+ // return 3
+ // }
+ b1:
+ v1 = InitMem <mem>
+ v2 = SP <uintptr>
+ v5 = Addr <*int> {~r1} v2
+ v6 = Arg <bool> {b}
+ v8 = Const64 <int> [2]
+ v12 = Const64 <int> [3]
+ If v6 -> b2 b3
+ b2: <- b1
+ v10 = VarDef <mem> {~r1} v1
+ v11 = Store <mem> {int} v5 v8 v10
+ Ret v11
+ b3: <- b1
+ v14 = VarDef <mem> {~r1} v1
+ v15 = Store <mem> {int} v5 v12 v14
+ Ret v15
+
+<!---
+TODO: can we come up with a shorter example that still shows the control flow?
+-->
+
+See [block.go](block.go) for more information.
+
+#### Functions
+
+A function represents a function declaration along with its body. It mainly
+consists of a name, a type (its signature), a list of blocks that form its body,
+and the entry block within said list.
+
+When a function is called, the control flow is handed to its entry block. If the
+function terminates, the control flow will eventually reach an exit block, thus
+ending the function call.
+
+Note that a function may have zero or multiple exit blocks, just like a Go
+function can have any number of return points, but it must have exactly one
+entry point block.
+
+Also note that some SSA functions are autogenerated, such as the hash functions
+for each type used as a map key.
+
+For example, this is what an empty function can look like in SSA, with a single
+exit block that returns an uninteresting memory state:
+
+ foo func()
+ b1:
+ v1 = InitMem <mem>
+ Ret v1
+
+See [func.go](func.go) for more information.
+
+### Compiler passes
+
+Having a program in SSA form is not very useful on its own. Its advantage lies
+in how easy it is to write optimizations that modify the program to make it
+better. The way the Go compiler accomplishes this is via a list of passes.
+
+Each pass transforms a SSA function in some way. For example, a dead code
+elimination pass will remove blocks and values that it can prove will never be
+executed, and a nil check elimination pass will remove nil checks which it can
+prove to be redundant.
+
+Compiler passes work on one function at a time, and by default run sequentially
+and exactly once.
+
+The `lower` pass is special; it converts the SSA representation from being
+machine-independent to being machine-dependent. That is, some abstract operators
+are replaced with their non-generic counterparts, potentially reducing or
+increasing the final number of values.
+
+<!---
+TODO: Probably explain here why the ordering of the passes matters, and why some
+passes like deadstore have multiple variants at different stages.
+-->
+
+See the `passes` list defined in [compile.go](compile.go) for more information.
+
+### Playing with SSA
+
+A good way to see and get used to the compiler's SSA in action is via
+`GOSSAFUNC`. For example, to see func `Foo`'s initial SSA form and final
+generated assembly, one can run:
+
+ GOSSAFUNC=Foo go build
+
+The generated `ssa.html` file will also contain the SSA func at each of the
+compile passes, making it easy to see what each pass does to a particular
+program. You can also click on values and blocks to highlight them, to help
+follow the control flow and values.
+
+The value specified in GOSSAFUNC can also be a package-qualified function
+name, e.g.
+
+ GOSSAFUNC=blah.Foo go build
+
+This will match any function named "Foo" within a package whose final
+suffix is "blah" (e.g. something/blah.Foo, anotherthing/extra/blah.Foo).
+
+If non-HTML dumps are needed, append a "+" to the GOSSAFUNC value
+and dumps will be written to stdout:
+
+ GOSSAFUNC=Bar+ go build
+
+<!---
+TODO: need more ideas for this section
+-->
+
+### Hacking on SSA
+
+While most compiler passes are implemented directly in Go code, some others are
+code generated. This is currently done via rewrite rules, which have their own
+syntax and are maintained in `gen/*.rules`. Simpler optimizations can be written
+easily and quickly this way, but rewrite rules are not suitable for more complex
+optimizations.
+
+To read more on rewrite rules, have a look at the top comments in
+[gen/generic.rules](gen/generic.rules) and [gen/rulegen.go](gen/rulegen.go).
+
+Similarly, the code to manage operators is also code generated from
+`gen/*Ops.go`, as it is easier to maintain a few tables than a lot of code.
+After changing the rules or operators, see [gen/README](gen/README) for
+instructions on how to generate the Go code again.
+
+<!---
+TODO: more tips and info could likely go here
+-->
diff --git a/src/cmd/compile/internal/ssa/TODO b/src/cmd/compile/internal/ssa/TODO
new file mode 100644
index 0000000..f4e4382
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/TODO
@@ -0,0 +1,24 @@
+This is a list of possible improvements to the SSA pass of the compiler.
+
+Optimizations (better compiled code)
+------------------------------------
+- Reduce register pressure in scheduler
+- Make dead store pass inter-block
+- If there are a lot of MOVQ $0, ..., then load
+ 0 into a register and use the register as the source instead.
+- Allow large structs to be SSAable (issue 24416)
+- Allow arrays of length >1 to be SSAable
+- If strings are being passed around without being interpreted (ptr
+ and len fields being accessed) pass them in xmm registers?
+ Same for interfaces?
+- any pointer generated by unsafe arithmetic must be non-nil?
+ (Of course that may not be true in general, but it is for all uses
+ in the runtime, and we can play games with unsafe.)
+
+Optimizations (better compiler)
+-------------------------------
+- Handle signed division overflow and sign extension earlier
+
+Regalloc
+--------
+- Make liveness analysis non-quadratic
diff --git a/src/cmd/compile/internal/ssa/addressingmodes.go b/src/cmd/compile/internal/ssa/addressingmodes.go
new file mode 100644
index 0000000..1baf143
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/addressingmodes.go
@@ -0,0 +1,460 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// addressingModes combines address calculations into memory operations
+// that can perform complicated addressing modes.
+func addressingModes(f *Func) {
+ isInImmediateRange := is32Bit
+ switch f.Config.arch {
+ default:
+ // Most architectures can't do this.
+ return
+ case "amd64", "386":
+ case "s390x":
+ isInImmediateRange = is20Bit
+ }
+
+ var tmp []*Value
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ if !combineFirst[v.Op] {
+ continue
+ }
+ // All matched operations have the pointer in arg[0].
+ // All results have the pointer in arg[0] and the index in arg[1].
+ // *Except* for operations which update a register,
+ // which are marked with resultInArg0. Those have
+ // the pointer in arg[1], and the corresponding result op
+ // has the pointer in arg[1] and the index in arg[2].
+ ptrIndex := 0
+ if opcodeTable[v.Op].resultInArg0 {
+ ptrIndex = 1
+ }
+ p := v.Args[ptrIndex]
+ c, ok := combine[[2]Op{v.Op, p.Op}]
+ if !ok {
+ continue
+ }
+ // See if we can combine the Aux/AuxInt values.
+ switch [2]auxType{opcodeTable[v.Op].auxType, opcodeTable[p.Op].auxType} {
+ case [2]auxType{auxSymOff, auxInt32}:
+ // TODO: introduce auxSymOff32
+ if !isInImmediateRange(v.AuxInt + p.AuxInt) {
+ continue
+ }
+ v.AuxInt += p.AuxInt
+ case [2]auxType{auxSymOff, auxSymOff}:
+ if v.Aux != nil && p.Aux != nil {
+ continue
+ }
+ if !isInImmediateRange(v.AuxInt + p.AuxInt) {
+ continue
+ }
+ if p.Aux != nil {
+ v.Aux = p.Aux
+ }
+ v.AuxInt += p.AuxInt
+ case [2]auxType{auxSymValAndOff, auxInt32}:
+ vo := ValAndOff(v.AuxInt)
+ if !vo.canAdd64(p.AuxInt) {
+ continue
+ }
+ v.AuxInt = int64(vo.addOffset64(p.AuxInt))
+ case [2]auxType{auxSymValAndOff, auxSymOff}:
+ vo := ValAndOff(v.AuxInt)
+ if v.Aux != nil && p.Aux != nil {
+ continue
+ }
+ if !vo.canAdd64(p.AuxInt) {
+ continue
+ }
+ if p.Aux != nil {
+ v.Aux = p.Aux
+ }
+ v.AuxInt = int64(vo.addOffset64(p.AuxInt))
+ case [2]auxType{auxSymOff, auxNone}:
+ // nothing to do
+ case [2]auxType{auxSymValAndOff, auxNone}:
+ // nothing to do
+ default:
+ f.Fatalf("unknown aux combining for %s and %s\n", v.Op, p.Op)
+ }
+ // Combine the operations.
+ tmp = append(tmp[:0], v.Args[:ptrIndex]...)
+ tmp = append(tmp, p.Args...)
+ tmp = append(tmp, v.Args[ptrIndex+1:]...)
+ v.resetArgs()
+ v.Op = c
+ v.AddArgs(tmp...)
+ if needSplit[c] {
+ // It turns out that some of the combined instructions have faster two-instruction equivalents,
+ // but not the two instructions that led to them being combined here. For example
+ // (CMPBconstload c (ADDQ x y)) -> (CMPBconstloadidx1 c x y) -> (CMPB c (MOVBloadidx1 x y))
+ // The final pair of instructions turns out to be notably faster, at least in some benchmarks.
+ f.Config.splitLoad(v)
+ }
+ }
+ }
+}
+
+// combineFirst contains ops which appear in combine as the
+// first part of the key.
+var combineFirst = map[Op]bool{}
+
+func init() {
+ for k := range combine {
+ combineFirst[k[0]] = true
+ }
+}
+
+// needSplit contains instructions that should be postprocessed by splitLoad
+// into a more-efficient two-instruction form.
+var needSplit = map[Op]bool{
+ OpAMD64CMPBloadidx1: true,
+ OpAMD64CMPWloadidx1: true,
+ OpAMD64CMPLloadidx1: true,
+ OpAMD64CMPQloadidx1: true,
+ OpAMD64CMPWloadidx2: true,
+ OpAMD64CMPLloadidx4: true,
+ OpAMD64CMPQloadidx8: true,
+
+ OpAMD64CMPBconstloadidx1: true,
+ OpAMD64CMPWconstloadidx1: true,
+ OpAMD64CMPLconstloadidx1: true,
+ OpAMD64CMPQconstloadidx1: true,
+ OpAMD64CMPWconstloadidx2: true,
+ OpAMD64CMPLconstloadidx4: true,
+ OpAMD64CMPQconstloadidx8: true,
+}
+
+// For each entry k, v in this map, if we have a value x with:
+// x.Op == k[0]
+// x.Args[0].Op == k[1]
+// then we can set x.Op to v and set x.Args like this:
+// x.Args[0].Args + x.Args[1:]
+// Additionally, the Aux/AuxInt from x.Args[0] is merged into x.
+var combine = map[[2]Op]Op{
+ // amd64
+ [2]Op{OpAMD64MOVBload, OpAMD64ADDQ}: OpAMD64MOVBloadidx1,
+ [2]Op{OpAMD64MOVWload, OpAMD64ADDQ}: OpAMD64MOVWloadidx1,
+ [2]Op{OpAMD64MOVLload, OpAMD64ADDQ}: OpAMD64MOVLloadidx1,
+ [2]Op{OpAMD64MOVQload, OpAMD64ADDQ}: OpAMD64MOVQloadidx1,
+ [2]Op{OpAMD64MOVSSload, OpAMD64ADDQ}: OpAMD64MOVSSloadidx1,
+ [2]Op{OpAMD64MOVSDload, OpAMD64ADDQ}: OpAMD64MOVSDloadidx1,
+
+ [2]Op{OpAMD64MOVBstore, OpAMD64ADDQ}: OpAMD64MOVBstoreidx1,
+ [2]Op{OpAMD64MOVWstore, OpAMD64ADDQ}: OpAMD64MOVWstoreidx1,
+ [2]Op{OpAMD64MOVLstore, OpAMD64ADDQ}: OpAMD64MOVLstoreidx1,
+ [2]Op{OpAMD64MOVQstore, OpAMD64ADDQ}: OpAMD64MOVQstoreidx1,
+ [2]Op{OpAMD64MOVSSstore, OpAMD64ADDQ}: OpAMD64MOVSSstoreidx1,
+ [2]Op{OpAMD64MOVSDstore, OpAMD64ADDQ}: OpAMD64MOVSDstoreidx1,
+
+ [2]Op{OpAMD64MOVBstoreconst, OpAMD64ADDQ}: OpAMD64MOVBstoreconstidx1,
+ [2]Op{OpAMD64MOVWstoreconst, OpAMD64ADDQ}: OpAMD64MOVWstoreconstidx1,
+ [2]Op{OpAMD64MOVLstoreconst, OpAMD64ADDQ}: OpAMD64MOVLstoreconstidx1,
+ [2]Op{OpAMD64MOVQstoreconst, OpAMD64ADDQ}: OpAMD64MOVQstoreconstidx1,
+
+ [2]Op{OpAMD64MOVBload, OpAMD64LEAQ1}: OpAMD64MOVBloadidx1,
+ [2]Op{OpAMD64MOVWload, OpAMD64LEAQ1}: OpAMD64MOVWloadidx1,
+ [2]Op{OpAMD64MOVWload, OpAMD64LEAQ2}: OpAMD64MOVWloadidx2,
+ [2]Op{OpAMD64MOVLload, OpAMD64LEAQ1}: OpAMD64MOVLloadidx1,
+ [2]Op{OpAMD64MOVLload, OpAMD64LEAQ4}: OpAMD64MOVLloadidx4,
+ [2]Op{OpAMD64MOVLload, OpAMD64LEAQ8}: OpAMD64MOVLloadidx8,
+ [2]Op{OpAMD64MOVQload, OpAMD64LEAQ1}: OpAMD64MOVQloadidx1,
+ [2]Op{OpAMD64MOVQload, OpAMD64LEAQ8}: OpAMD64MOVQloadidx8,
+ [2]Op{OpAMD64MOVSSload, OpAMD64LEAQ1}: OpAMD64MOVSSloadidx1,
+ [2]Op{OpAMD64MOVSSload, OpAMD64LEAQ4}: OpAMD64MOVSSloadidx4,
+ [2]Op{OpAMD64MOVSDload, OpAMD64LEAQ1}: OpAMD64MOVSDloadidx1,
+ [2]Op{OpAMD64MOVSDload, OpAMD64LEAQ8}: OpAMD64MOVSDloadidx8,
+
+ [2]Op{OpAMD64MOVBstore, OpAMD64LEAQ1}: OpAMD64MOVBstoreidx1,
+ [2]Op{OpAMD64MOVWstore, OpAMD64LEAQ1}: OpAMD64MOVWstoreidx1,
+ [2]Op{OpAMD64MOVWstore, OpAMD64LEAQ2}: OpAMD64MOVWstoreidx2,
+ [2]Op{OpAMD64MOVLstore, OpAMD64LEAQ1}: OpAMD64MOVLstoreidx1,
+ [2]Op{OpAMD64MOVLstore, OpAMD64LEAQ4}: OpAMD64MOVLstoreidx4,
+ [2]Op{OpAMD64MOVLstore, OpAMD64LEAQ8}: OpAMD64MOVLstoreidx8,
+ [2]Op{OpAMD64MOVQstore, OpAMD64LEAQ1}: OpAMD64MOVQstoreidx1,
+ [2]Op{OpAMD64MOVQstore, OpAMD64LEAQ8}: OpAMD64MOVQstoreidx8,
+ [2]Op{OpAMD64MOVSSstore, OpAMD64LEAQ1}: OpAMD64MOVSSstoreidx1,
+ [2]Op{OpAMD64MOVSSstore, OpAMD64LEAQ4}: OpAMD64MOVSSstoreidx4,
+ [2]Op{OpAMD64MOVSDstore, OpAMD64LEAQ1}: OpAMD64MOVSDstoreidx1,
+ [2]Op{OpAMD64MOVSDstore, OpAMD64LEAQ8}: OpAMD64MOVSDstoreidx8,
+
+ [2]Op{OpAMD64MOVBstoreconst, OpAMD64LEAQ1}: OpAMD64MOVBstoreconstidx1,
+ [2]Op{OpAMD64MOVWstoreconst, OpAMD64LEAQ1}: OpAMD64MOVWstoreconstidx1,
+ [2]Op{OpAMD64MOVWstoreconst, OpAMD64LEAQ2}: OpAMD64MOVWstoreconstidx2,
+ [2]Op{OpAMD64MOVLstoreconst, OpAMD64LEAQ1}: OpAMD64MOVLstoreconstidx1,
+ [2]Op{OpAMD64MOVLstoreconst, OpAMD64LEAQ4}: OpAMD64MOVLstoreconstidx4,
+ [2]Op{OpAMD64MOVQstoreconst, OpAMD64LEAQ1}: OpAMD64MOVQstoreconstidx1,
+ [2]Op{OpAMD64MOVQstoreconst, OpAMD64LEAQ8}: OpAMD64MOVQstoreconstidx8,
+
+ // These instructions are re-split differently for performance, see needSplit above.
+ // TODO if 386 versions are created, also update needSplit and gen/386splitload.rules
+ [2]Op{OpAMD64CMPBload, OpAMD64ADDQ}: OpAMD64CMPBloadidx1,
+ [2]Op{OpAMD64CMPWload, OpAMD64ADDQ}: OpAMD64CMPWloadidx1,
+ [2]Op{OpAMD64CMPLload, OpAMD64ADDQ}: OpAMD64CMPLloadidx1,
+ [2]Op{OpAMD64CMPQload, OpAMD64ADDQ}: OpAMD64CMPQloadidx1,
+
+ [2]Op{OpAMD64CMPBload, OpAMD64LEAQ1}: OpAMD64CMPBloadidx1,
+ [2]Op{OpAMD64CMPWload, OpAMD64LEAQ1}: OpAMD64CMPWloadidx1,
+ [2]Op{OpAMD64CMPWload, OpAMD64LEAQ2}: OpAMD64CMPWloadidx2,
+ [2]Op{OpAMD64CMPLload, OpAMD64LEAQ1}: OpAMD64CMPLloadidx1,
+ [2]Op{OpAMD64CMPLload, OpAMD64LEAQ4}: OpAMD64CMPLloadidx4,
+ [2]Op{OpAMD64CMPQload, OpAMD64LEAQ1}: OpAMD64CMPQloadidx1,
+ [2]Op{OpAMD64CMPQload, OpAMD64LEAQ8}: OpAMD64CMPQloadidx8,
+
+ [2]Op{OpAMD64CMPBconstload, OpAMD64ADDQ}: OpAMD64CMPBconstloadidx1,
+ [2]Op{OpAMD64CMPWconstload, OpAMD64ADDQ}: OpAMD64CMPWconstloadidx1,
+ [2]Op{OpAMD64CMPLconstload, OpAMD64ADDQ}: OpAMD64CMPLconstloadidx1,
+ [2]Op{OpAMD64CMPQconstload, OpAMD64ADDQ}: OpAMD64CMPQconstloadidx1,
+
+ [2]Op{OpAMD64CMPBconstload, OpAMD64LEAQ1}: OpAMD64CMPBconstloadidx1,
+ [2]Op{OpAMD64CMPWconstload, OpAMD64LEAQ1}: OpAMD64CMPWconstloadidx1,
+ [2]Op{OpAMD64CMPWconstload, OpAMD64LEAQ2}: OpAMD64CMPWconstloadidx2,
+ [2]Op{OpAMD64CMPLconstload, OpAMD64LEAQ1}: OpAMD64CMPLconstloadidx1,
+ [2]Op{OpAMD64CMPLconstload, OpAMD64LEAQ4}: OpAMD64CMPLconstloadidx4,
+ [2]Op{OpAMD64CMPQconstload, OpAMD64LEAQ1}: OpAMD64CMPQconstloadidx1,
+ [2]Op{OpAMD64CMPQconstload, OpAMD64LEAQ8}: OpAMD64CMPQconstloadidx8,
+
+ [2]Op{OpAMD64ADDLload, OpAMD64ADDQ}: OpAMD64ADDLloadidx1,
+ [2]Op{OpAMD64ADDQload, OpAMD64ADDQ}: OpAMD64ADDQloadidx1,
+ [2]Op{OpAMD64SUBLload, OpAMD64ADDQ}: OpAMD64SUBLloadidx1,
+ [2]Op{OpAMD64SUBQload, OpAMD64ADDQ}: OpAMD64SUBQloadidx1,
+ [2]Op{OpAMD64ANDLload, OpAMD64ADDQ}: OpAMD64ANDLloadidx1,
+ [2]Op{OpAMD64ANDQload, OpAMD64ADDQ}: OpAMD64ANDQloadidx1,
+ [2]Op{OpAMD64ORLload, OpAMD64ADDQ}: OpAMD64ORLloadidx1,
+ [2]Op{OpAMD64ORQload, OpAMD64ADDQ}: OpAMD64ORQloadidx1,
+ [2]Op{OpAMD64XORLload, OpAMD64ADDQ}: OpAMD64XORLloadidx1,
+ [2]Op{OpAMD64XORQload, OpAMD64ADDQ}: OpAMD64XORQloadidx1,
+
+ [2]Op{OpAMD64ADDLload, OpAMD64LEAQ1}: OpAMD64ADDLloadidx1,
+ [2]Op{OpAMD64ADDLload, OpAMD64LEAQ4}: OpAMD64ADDLloadidx4,
+ [2]Op{OpAMD64ADDLload, OpAMD64LEAQ8}: OpAMD64ADDLloadidx8,
+ [2]Op{OpAMD64ADDQload, OpAMD64LEAQ1}: OpAMD64ADDQloadidx1,
+ [2]Op{OpAMD64ADDQload, OpAMD64LEAQ8}: OpAMD64ADDQloadidx8,
+ [2]Op{OpAMD64SUBLload, OpAMD64LEAQ1}: OpAMD64SUBLloadidx1,
+ [2]Op{OpAMD64SUBLload, OpAMD64LEAQ4}: OpAMD64SUBLloadidx4,
+ [2]Op{OpAMD64SUBLload, OpAMD64LEAQ8}: OpAMD64SUBLloadidx8,
+ [2]Op{OpAMD64SUBQload, OpAMD64LEAQ1}: OpAMD64SUBQloadidx1,
+ [2]Op{OpAMD64SUBQload, OpAMD64LEAQ8}: OpAMD64SUBQloadidx8,
+ [2]Op{OpAMD64ANDLload, OpAMD64LEAQ1}: OpAMD64ANDLloadidx1,
+ [2]Op{OpAMD64ANDLload, OpAMD64LEAQ4}: OpAMD64ANDLloadidx4,
+ [2]Op{OpAMD64ANDLload, OpAMD64LEAQ8}: OpAMD64ANDLloadidx8,
+ [2]Op{OpAMD64ANDQload, OpAMD64LEAQ1}: OpAMD64ANDQloadidx1,
+ [2]Op{OpAMD64ANDQload, OpAMD64LEAQ8}: OpAMD64ANDQloadidx8,
+ [2]Op{OpAMD64ORLload, OpAMD64LEAQ1}: OpAMD64ORLloadidx1,
+ [2]Op{OpAMD64ORLload, OpAMD64LEAQ4}: OpAMD64ORLloadidx4,
+ [2]Op{OpAMD64ORLload, OpAMD64LEAQ8}: OpAMD64ORLloadidx8,
+ [2]Op{OpAMD64ORQload, OpAMD64LEAQ1}: OpAMD64ORQloadidx1,
+ [2]Op{OpAMD64ORQload, OpAMD64LEAQ8}: OpAMD64ORQloadidx8,
+ [2]Op{OpAMD64XORLload, OpAMD64LEAQ1}: OpAMD64XORLloadidx1,
+ [2]Op{OpAMD64XORLload, OpAMD64LEAQ4}: OpAMD64XORLloadidx4,
+ [2]Op{OpAMD64XORLload, OpAMD64LEAQ8}: OpAMD64XORLloadidx8,
+ [2]Op{OpAMD64XORQload, OpAMD64LEAQ1}: OpAMD64XORQloadidx1,
+ [2]Op{OpAMD64XORQload, OpAMD64LEAQ8}: OpAMD64XORQloadidx8,
+
+ [2]Op{OpAMD64ADDLmodify, OpAMD64ADDQ}: OpAMD64ADDLmodifyidx1,
+ [2]Op{OpAMD64ADDQmodify, OpAMD64ADDQ}: OpAMD64ADDQmodifyidx1,
+ [2]Op{OpAMD64SUBLmodify, OpAMD64ADDQ}: OpAMD64SUBLmodifyidx1,
+ [2]Op{OpAMD64SUBQmodify, OpAMD64ADDQ}: OpAMD64SUBQmodifyidx1,
+ [2]Op{OpAMD64ANDLmodify, OpAMD64ADDQ}: OpAMD64ANDLmodifyidx1,
+ [2]Op{OpAMD64ANDQmodify, OpAMD64ADDQ}: OpAMD64ANDQmodifyidx1,
+ [2]Op{OpAMD64ORLmodify, OpAMD64ADDQ}: OpAMD64ORLmodifyidx1,
+ [2]Op{OpAMD64ORQmodify, OpAMD64ADDQ}: OpAMD64ORQmodifyidx1,
+ [2]Op{OpAMD64XORLmodify, OpAMD64ADDQ}: OpAMD64XORLmodifyidx1,
+ [2]Op{OpAMD64XORQmodify, OpAMD64ADDQ}: OpAMD64XORQmodifyidx1,
+
+ [2]Op{OpAMD64ADDLmodify, OpAMD64LEAQ1}: OpAMD64ADDLmodifyidx1,
+ [2]Op{OpAMD64ADDLmodify, OpAMD64LEAQ4}: OpAMD64ADDLmodifyidx4,
+ [2]Op{OpAMD64ADDLmodify, OpAMD64LEAQ8}: OpAMD64ADDLmodifyidx8,
+ [2]Op{OpAMD64ADDQmodify, OpAMD64LEAQ1}: OpAMD64ADDQmodifyidx1,
+ [2]Op{OpAMD64ADDQmodify, OpAMD64LEAQ8}: OpAMD64ADDQmodifyidx8,
+ [2]Op{OpAMD64SUBLmodify, OpAMD64LEAQ1}: OpAMD64SUBLmodifyidx1,
+ [2]Op{OpAMD64SUBLmodify, OpAMD64LEAQ4}: OpAMD64SUBLmodifyidx4,
+ [2]Op{OpAMD64SUBLmodify, OpAMD64LEAQ8}: OpAMD64SUBLmodifyidx8,
+ [2]Op{OpAMD64SUBQmodify, OpAMD64LEAQ1}: OpAMD64SUBQmodifyidx1,
+ [2]Op{OpAMD64SUBQmodify, OpAMD64LEAQ8}: OpAMD64SUBQmodifyidx8,
+ [2]Op{OpAMD64ANDLmodify, OpAMD64LEAQ1}: OpAMD64ANDLmodifyidx1,
+ [2]Op{OpAMD64ANDLmodify, OpAMD64LEAQ4}: OpAMD64ANDLmodifyidx4,
+ [2]Op{OpAMD64ANDLmodify, OpAMD64LEAQ8}: OpAMD64ANDLmodifyidx8,
+ [2]Op{OpAMD64ANDQmodify, OpAMD64LEAQ1}: OpAMD64ANDQmodifyidx1,
+ [2]Op{OpAMD64ANDQmodify, OpAMD64LEAQ8}: OpAMD64ANDQmodifyidx8,
+ [2]Op{OpAMD64ORLmodify, OpAMD64LEAQ1}: OpAMD64ORLmodifyidx1,
+ [2]Op{OpAMD64ORLmodify, OpAMD64LEAQ4}: OpAMD64ORLmodifyidx4,
+ [2]Op{OpAMD64ORLmodify, OpAMD64LEAQ8}: OpAMD64ORLmodifyidx8,
+ [2]Op{OpAMD64ORQmodify, OpAMD64LEAQ1}: OpAMD64ORQmodifyidx1,
+ [2]Op{OpAMD64ORQmodify, OpAMD64LEAQ8}: OpAMD64ORQmodifyidx8,
+ [2]Op{OpAMD64XORLmodify, OpAMD64LEAQ1}: OpAMD64XORLmodifyidx1,
+ [2]Op{OpAMD64XORLmodify, OpAMD64LEAQ4}: OpAMD64XORLmodifyidx4,
+ [2]Op{OpAMD64XORLmodify, OpAMD64LEAQ8}: OpAMD64XORLmodifyidx8,
+ [2]Op{OpAMD64XORQmodify, OpAMD64LEAQ1}: OpAMD64XORQmodifyidx1,
+ [2]Op{OpAMD64XORQmodify, OpAMD64LEAQ8}: OpAMD64XORQmodifyidx8,
+
+ [2]Op{OpAMD64ADDLconstmodify, OpAMD64ADDQ}: OpAMD64ADDLconstmodifyidx1,
+ [2]Op{OpAMD64ADDQconstmodify, OpAMD64ADDQ}: OpAMD64ADDQconstmodifyidx1,
+ [2]Op{OpAMD64ANDLconstmodify, OpAMD64ADDQ}: OpAMD64ANDLconstmodifyidx1,
+ [2]Op{OpAMD64ANDQconstmodify, OpAMD64ADDQ}: OpAMD64ANDQconstmodifyidx1,
+ [2]Op{OpAMD64ORLconstmodify, OpAMD64ADDQ}: OpAMD64ORLconstmodifyidx1,
+ [2]Op{OpAMD64ORQconstmodify, OpAMD64ADDQ}: OpAMD64ORQconstmodifyidx1,
+ [2]Op{OpAMD64XORLconstmodify, OpAMD64ADDQ}: OpAMD64XORLconstmodifyidx1,
+ [2]Op{OpAMD64XORQconstmodify, OpAMD64ADDQ}: OpAMD64XORQconstmodifyidx1,
+
+ [2]Op{OpAMD64ADDLconstmodify, OpAMD64LEAQ1}: OpAMD64ADDLconstmodifyidx1,
+ [2]Op{OpAMD64ADDLconstmodify, OpAMD64LEAQ4}: OpAMD64ADDLconstmodifyidx4,
+ [2]Op{OpAMD64ADDLconstmodify, OpAMD64LEAQ8}: OpAMD64ADDLconstmodifyidx8,
+ [2]Op{OpAMD64ADDQconstmodify, OpAMD64LEAQ1}: OpAMD64ADDQconstmodifyidx1,
+ [2]Op{OpAMD64ADDQconstmodify, OpAMD64LEAQ8}: OpAMD64ADDQconstmodifyidx8,
+ [2]Op{OpAMD64ANDLconstmodify, OpAMD64LEAQ1}: OpAMD64ANDLconstmodifyidx1,
+ [2]Op{OpAMD64ANDLconstmodify, OpAMD64LEAQ4}: OpAMD64ANDLconstmodifyidx4,
+ [2]Op{OpAMD64ANDLconstmodify, OpAMD64LEAQ8}: OpAMD64ANDLconstmodifyidx8,
+ [2]Op{OpAMD64ANDQconstmodify, OpAMD64LEAQ1}: OpAMD64ANDQconstmodifyidx1,
+ [2]Op{OpAMD64ANDQconstmodify, OpAMD64LEAQ8}: OpAMD64ANDQconstmodifyidx8,
+ [2]Op{OpAMD64ORLconstmodify, OpAMD64LEAQ1}: OpAMD64ORLconstmodifyidx1,
+ [2]Op{OpAMD64ORLconstmodify, OpAMD64LEAQ4}: OpAMD64ORLconstmodifyidx4,
+ [2]Op{OpAMD64ORLconstmodify, OpAMD64LEAQ8}: OpAMD64ORLconstmodifyidx8,
+ [2]Op{OpAMD64ORQconstmodify, OpAMD64LEAQ1}: OpAMD64ORQconstmodifyidx1,
+ [2]Op{OpAMD64ORQconstmodify, OpAMD64LEAQ8}: OpAMD64ORQconstmodifyidx8,
+ [2]Op{OpAMD64XORLconstmodify, OpAMD64LEAQ1}: OpAMD64XORLconstmodifyidx1,
+ [2]Op{OpAMD64XORLconstmodify, OpAMD64LEAQ4}: OpAMD64XORLconstmodifyidx4,
+ [2]Op{OpAMD64XORLconstmodify, OpAMD64LEAQ8}: OpAMD64XORLconstmodifyidx8,
+ [2]Op{OpAMD64XORQconstmodify, OpAMD64LEAQ1}: OpAMD64XORQconstmodifyidx1,
+ [2]Op{OpAMD64XORQconstmodify, OpAMD64LEAQ8}: OpAMD64XORQconstmodifyidx8,
+
+ [2]Op{OpAMD64ADDSSload, OpAMD64LEAQ1}: OpAMD64ADDSSloadidx1,
+ [2]Op{OpAMD64ADDSSload, OpAMD64LEAQ4}: OpAMD64ADDSSloadidx4,
+ [2]Op{OpAMD64ADDSDload, OpAMD64LEAQ1}: OpAMD64ADDSDloadidx1,
+ [2]Op{OpAMD64ADDSDload, OpAMD64LEAQ8}: OpAMD64ADDSDloadidx8,
+ [2]Op{OpAMD64SUBSSload, OpAMD64LEAQ1}: OpAMD64SUBSSloadidx1,
+ [2]Op{OpAMD64SUBSSload, OpAMD64LEAQ4}: OpAMD64SUBSSloadidx4,
+ [2]Op{OpAMD64SUBSDload, OpAMD64LEAQ1}: OpAMD64SUBSDloadidx1,
+ [2]Op{OpAMD64SUBSDload, OpAMD64LEAQ8}: OpAMD64SUBSDloadidx8,
+ [2]Op{OpAMD64MULSSload, OpAMD64LEAQ1}: OpAMD64MULSSloadidx1,
+ [2]Op{OpAMD64MULSSload, OpAMD64LEAQ4}: OpAMD64MULSSloadidx4,
+ [2]Op{OpAMD64MULSDload, OpAMD64LEAQ1}: OpAMD64MULSDloadidx1,
+ [2]Op{OpAMD64MULSDload, OpAMD64LEAQ8}: OpAMD64MULSDloadidx8,
+ [2]Op{OpAMD64DIVSSload, OpAMD64LEAQ1}: OpAMD64DIVSSloadidx1,
+ [2]Op{OpAMD64DIVSSload, OpAMD64LEAQ4}: OpAMD64DIVSSloadidx4,
+ [2]Op{OpAMD64DIVSDload, OpAMD64LEAQ1}: OpAMD64DIVSDloadidx1,
+ [2]Op{OpAMD64DIVSDload, OpAMD64LEAQ8}: OpAMD64DIVSDloadidx8,
+
+ // 386
+ [2]Op{Op386MOVBload, Op386ADDL}: Op386MOVBloadidx1,
+ [2]Op{Op386MOVWload, Op386ADDL}: Op386MOVWloadidx1,
+ [2]Op{Op386MOVLload, Op386ADDL}: Op386MOVLloadidx1,
+ [2]Op{Op386MOVSSload, Op386ADDL}: Op386MOVSSloadidx1,
+ [2]Op{Op386MOVSDload, Op386ADDL}: Op386MOVSDloadidx1,
+
+ [2]Op{Op386MOVBstore, Op386ADDL}: Op386MOVBstoreidx1,
+ [2]Op{Op386MOVWstore, Op386ADDL}: Op386MOVWstoreidx1,
+ [2]Op{Op386MOVLstore, Op386ADDL}: Op386MOVLstoreidx1,
+ [2]Op{Op386MOVSSstore, Op386ADDL}: Op386MOVSSstoreidx1,
+ [2]Op{Op386MOVSDstore, Op386ADDL}: Op386MOVSDstoreidx1,
+
+ [2]Op{Op386MOVBstoreconst, Op386ADDL}: Op386MOVBstoreconstidx1,
+ [2]Op{Op386MOVWstoreconst, Op386ADDL}: Op386MOVWstoreconstidx1,
+ [2]Op{Op386MOVLstoreconst, Op386ADDL}: Op386MOVLstoreconstidx1,
+
+ [2]Op{Op386MOVBload, Op386LEAL1}: Op386MOVBloadidx1,
+ [2]Op{Op386MOVWload, Op386LEAL1}: Op386MOVWloadidx1,
+ [2]Op{Op386MOVWload, Op386LEAL2}: Op386MOVWloadidx2,
+ [2]Op{Op386MOVLload, Op386LEAL1}: Op386MOVLloadidx1,
+ [2]Op{Op386MOVLload, Op386LEAL4}: Op386MOVLloadidx4,
+ [2]Op{Op386MOVSSload, Op386LEAL1}: Op386MOVSSloadidx1,
+ [2]Op{Op386MOVSSload, Op386LEAL4}: Op386MOVSSloadidx4,
+ [2]Op{Op386MOVSDload, Op386LEAL1}: Op386MOVSDloadidx1,
+ [2]Op{Op386MOVSDload, Op386LEAL8}: Op386MOVSDloadidx8,
+
+ [2]Op{Op386MOVBstore, Op386LEAL1}: Op386MOVBstoreidx1,
+ [2]Op{Op386MOVWstore, Op386LEAL1}: Op386MOVWstoreidx1,
+ [2]Op{Op386MOVWstore, Op386LEAL2}: Op386MOVWstoreidx2,
+ [2]Op{Op386MOVLstore, Op386LEAL1}: Op386MOVLstoreidx1,
+ [2]Op{Op386MOVLstore, Op386LEAL4}: Op386MOVLstoreidx4,
+ [2]Op{Op386MOVSSstore, Op386LEAL1}: Op386MOVSSstoreidx1,
+ [2]Op{Op386MOVSSstore, Op386LEAL4}: Op386MOVSSstoreidx4,
+ [2]Op{Op386MOVSDstore, Op386LEAL1}: Op386MOVSDstoreidx1,
+ [2]Op{Op386MOVSDstore, Op386LEAL8}: Op386MOVSDstoreidx8,
+
+ [2]Op{Op386MOVBstoreconst, Op386LEAL1}: Op386MOVBstoreconstidx1,
+ [2]Op{Op386MOVWstoreconst, Op386LEAL1}: Op386MOVWstoreconstidx1,
+ [2]Op{Op386MOVWstoreconst, Op386LEAL2}: Op386MOVWstoreconstidx2,
+ [2]Op{Op386MOVLstoreconst, Op386LEAL1}: Op386MOVLstoreconstidx1,
+ [2]Op{Op386MOVLstoreconst, Op386LEAL4}: Op386MOVLstoreconstidx4,
+
+ [2]Op{Op386ADDLload, Op386LEAL4}: Op386ADDLloadidx4,
+ [2]Op{Op386SUBLload, Op386LEAL4}: Op386SUBLloadidx4,
+ [2]Op{Op386MULLload, Op386LEAL4}: Op386MULLloadidx4,
+ [2]Op{Op386ANDLload, Op386LEAL4}: Op386ANDLloadidx4,
+ [2]Op{Op386ORLload, Op386LEAL4}: Op386ORLloadidx4,
+ [2]Op{Op386XORLload, Op386LEAL4}: Op386XORLloadidx4,
+
+ [2]Op{Op386ADDLmodify, Op386LEAL4}: Op386ADDLmodifyidx4,
+ [2]Op{Op386SUBLmodify, Op386LEAL4}: Op386SUBLmodifyidx4,
+ [2]Op{Op386ANDLmodify, Op386LEAL4}: Op386ANDLmodifyidx4,
+ [2]Op{Op386ORLmodify, Op386LEAL4}: Op386ORLmodifyidx4,
+ [2]Op{Op386XORLmodify, Op386LEAL4}: Op386XORLmodifyidx4,
+
+ [2]Op{Op386ADDLconstmodify, Op386LEAL4}: Op386ADDLconstmodifyidx4,
+ [2]Op{Op386ANDLconstmodify, Op386LEAL4}: Op386ANDLconstmodifyidx4,
+ [2]Op{Op386ORLconstmodify, Op386LEAL4}: Op386ORLconstmodifyidx4,
+ [2]Op{Op386XORLconstmodify, Op386LEAL4}: Op386XORLconstmodifyidx4,
+
+ // s390x
+ [2]Op{OpS390XMOVDload, OpS390XADD}: OpS390XMOVDloadidx,
+ [2]Op{OpS390XMOVWload, OpS390XADD}: OpS390XMOVWloadidx,
+ [2]Op{OpS390XMOVHload, OpS390XADD}: OpS390XMOVHloadidx,
+ [2]Op{OpS390XMOVBload, OpS390XADD}: OpS390XMOVBloadidx,
+
+ [2]Op{OpS390XMOVWZload, OpS390XADD}: OpS390XMOVWZloadidx,
+ [2]Op{OpS390XMOVHZload, OpS390XADD}: OpS390XMOVHZloadidx,
+ [2]Op{OpS390XMOVBZload, OpS390XADD}: OpS390XMOVBZloadidx,
+
+ [2]Op{OpS390XMOVDBRload, OpS390XADD}: OpS390XMOVDBRloadidx,
+ [2]Op{OpS390XMOVWBRload, OpS390XADD}: OpS390XMOVWBRloadidx,
+ [2]Op{OpS390XMOVHBRload, OpS390XADD}: OpS390XMOVHBRloadidx,
+
+ [2]Op{OpS390XFMOVDload, OpS390XADD}: OpS390XFMOVDloadidx,
+ [2]Op{OpS390XFMOVSload, OpS390XADD}: OpS390XFMOVSloadidx,
+
+ [2]Op{OpS390XMOVDstore, OpS390XADD}: OpS390XMOVDstoreidx,
+ [2]Op{OpS390XMOVWstore, OpS390XADD}: OpS390XMOVWstoreidx,
+ [2]Op{OpS390XMOVHstore, OpS390XADD}: OpS390XMOVHstoreidx,
+ [2]Op{OpS390XMOVBstore, OpS390XADD}: OpS390XMOVBstoreidx,
+
+ [2]Op{OpS390XMOVDBRstore, OpS390XADD}: OpS390XMOVDBRstoreidx,
+ [2]Op{OpS390XMOVWBRstore, OpS390XADD}: OpS390XMOVWBRstoreidx,
+ [2]Op{OpS390XMOVHBRstore, OpS390XADD}: OpS390XMOVHBRstoreidx,
+
+ [2]Op{OpS390XFMOVDstore, OpS390XADD}: OpS390XFMOVDstoreidx,
+ [2]Op{OpS390XFMOVSstore, OpS390XADD}: OpS390XFMOVSstoreidx,
+
+ [2]Op{OpS390XMOVDload, OpS390XMOVDaddridx}: OpS390XMOVDloadidx,
+ [2]Op{OpS390XMOVWload, OpS390XMOVDaddridx}: OpS390XMOVWloadidx,
+ [2]Op{OpS390XMOVHload, OpS390XMOVDaddridx}: OpS390XMOVHloadidx,
+ [2]Op{OpS390XMOVBload, OpS390XMOVDaddridx}: OpS390XMOVBloadidx,
+
+ [2]Op{OpS390XMOVWZload, OpS390XMOVDaddridx}: OpS390XMOVWZloadidx,
+ [2]Op{OpS390XMOVHZload, OpS390XMOVDaddridx}: OpS390XMOVHZloadidx,
+ [2]Op{OpS390XMOVBZload, OpS390XMOVDaddridx}: OpS390XMOVBZloadidx,
+
+ [2]Op{OpS390XMOVDBRload, OpS390XMOVDaddridx}: OpS390XMOVDBRloadidx,
+ [2]Op{OpS390XMOVWBRload, OpS390XMOVDaddridx}: OpS390XMOVWBRloadidx,
+ [2]Op{OpS390XMOVHBRload, OpS390XMOVDaddridx}: OpS390XMOVHBRloadidx,
+
+ [2]Op{OpS390XFMOVDload, OpS390XMOVDaddridx}: OpS390XFMOVDloadidx,
+ [2]Op{OpS390XFMOVSload, OpS390XMOVDaddridx}: OpS390XFMOVSloadidx,
+
+ [2]Op{OpS390XMOVDstore, OpS390XMOVDaddridx}: OpS390XMOVDstoreidx,
+ [2]Op{OpS390XMOVWstore, OpS390XMOVDaddridx}: OpS390XMOVWstoreidx,
+ [2]Op{OpS390XMOVHstore, OpS390XMOVDaddridx}: OpS390XMOVHstoreidx,
+ [2]Op{OpS390XMOVBstore, OpS390XMOVDaddridx}: OpS390XMOVBstoreidx,
+
+ [2]Op{OpS390XMOVDBRstore, OpS390XMOVDaddridx}: OpS390XMOVDBRstoreidx,
+ [2]Op{OpS390XMOVWBRstore, OpS390XMOVDaddridx}: OpS390XMOVWBRstoreidx,
+ [2]Op{OpS390XMOVHBRstore, OpS390XMOVDaddridx}: OpS390XMOVHBRstoreidx,
+
+ [2]Op{OpS390XFMOVDstore, OpS390XMOVDaddridx}: OpS390XFMOVDstoreidx,
+ [2]Op{OpS390XFMOVSstore, OpS390XMOVDaddridx}: OpS390XFMOVSstoreidx,
+}
diff --git a/src/cmd/compile/internal/ssa/bench_test.go b/src/cmd/compile/internal/ssa/bench_test.go
new file mode 100644
index 0000000..0971667
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/bench_test.go
@@ -0,0 +1,32 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+package ssa
+
+import (
+ "math/rand"
+ "testing"
+)
+
+var d int
+
+//go:noinline
+func fn(a, b int) bool {
+ c := false
+ if a > 0 {
+ if b < 0 {
+ d = d + 1
+ }
+ c = true
+ }
+ return c
+}
+
+func BenchmarkPhioptPass(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ a := rand.Perm(i/10 + 10)
+ for i := 1; i < len(a)/2; i++ {
+ fn(a[i]-a[i-1], a[i+len(a)/2-2]-a[i+len(a)/2-1])
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/biasedsparsemap.go b/src/cmd/compile/internal/ssa/biasedsparsemap.go
new file mode 100644
index 0000000..0d35154
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/biasedsparsemap.go
@@ -0,0 +1,112 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/internal/src"
+ "math"
+)
+
+// A biasedSparseMap is a sparseMap for integers between J and K inclusive,
+// where J might be somewhat larger than zero (and K-J is probably much smaller than J).
+// (The motivating use case is the line numbers of statements for a single function.)
+// Not all features of a SparseMap are exported, and it is also easy to treat a
+// biasedSparseMap like a SparseSet.
+type biasedSparseMap struct {
+ s *sparseMap
+ first int
+}
+
+// newBiasedSparseMap returns a new biasedSparseMap for values between first and last, inclusive.
+func newBiasedSparseMap(first, last int) *biasedSparseMap {
+ if first > last {
+ return &biasedSparseMap{first: math.MaxInt32, s: nil}
+ }
+ return &biasedSparseMap{first: first, s: newSparseMap(1 + last - first)}
+}
+
+// cap returns one more than the largest key valid for s
+func (s *biasedSparseMap) cap() int {
+ if s == nil || s.s == nil {
+ return 0
+ }
+ return s.s.cap() + int(s.first)
+}
+
+// size returns the number of entries stored in s
+func (s *biasedSparseMap) size() int {
+ if s == nil || s.s == nil {
+ return 0
+ }
+ return s.s.size()
+}
+
+// contains reports whether x is a key in s
+func (s *biasedSparseMap) contains(x uint) bool {
+ if s == nil || s.s == nil {
+ return false
+ }
+ if int(x) < s.first {
+ return false
+ }
+ if int(x) >= s.cap() {
+ return false
+ }
+ return s.s.contains(ID(int(x) - s.first))
+}
+
+// get returns the value s maps for key x, or -1 if
+// x is not mapped or is out of range for s.
+func (s *biasedSparseMap) get(x uint) int32 {
+ if s == nil || s.s == nil {
+ return -1
+ }
+ if int(x) < s.first {
+ return -1
+ }
+ if int(x) >= s.cap() {
+ return -1
+ }
+ return s.s.get(ID(int(x) - s.first))
+}
+
+// getEntry returns the i'th key and value stored in s,
+// where 0 <= i < s.size()
+func (s *biasedSparseMap) getEntry(i int) (x uint, v int32) {
+ e := s.s.contents()[i]
+ x = uint(int(e.key) + s.first)
+ v = e.val
+ return
+}
+
+// add inserts x->0 into s, provided that x is in the range of keys stored in s.
+func (s *biasedSparseMap) add(x uint) {
+ if int(x) < s.first || int(x) >= s.cap() {
+ return
+ }
+ s.s.set(ID(int(x)-s.first), 0, src.NoXPos)
+}
+
+// add inserts x->v into s, provided that x is in the range of keys stored in s.
+func (s *biasedSparseMap) set(x uint, v int32) {
+ if int(x) < s.first || int(x) >= s.cap() {
+ return
+ }
+ s.s.set(ID(int(x)-s.first), v, src.NoXPos)
+}
+
+// remove removes key x from s.
+func (s *biasedSparseMap) remove(x uint) {
+ if int(x) < s.first || int(x) >= s.cap() {
+ return
+ }
+ s.s.remove(ID(int(x) - s.first))
+}
+
+func (s *biasedSparseMap) clear() {
+ if s.s != nil {
+ s.s.clear()
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/block.go b/src/cmd/compile/internal/ssa/block.go
new file mode 100644
index 0000000..6ff3188
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/block.go
@@ -0,0 +1,410 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/internal/src"
+ "fmt"
+)
+
+// Block represents a basic block in the control flow graph of a function.
+type Block struct {
+ // A unique identifier for the block. The system will attempt to allocate
+ // these IDs densely, but no guarantees.
+ ID ID
+
+ // Source position for block's control operation
+ Pos src.XPos
+
+ // The kind of block this is.
+ Kind BlockKind
+
+ // Likely direction for branches.
+ // If BranchLikely, Succs[0] is the most likely branch taken.
+ // If BranchUnlikely, Succs[1] is the most likely branch taken.
+ // Ignored if len(Succs) < 2.
+ // Fatal if not BranchUnknown and len(Succs) > 2.
+ Likely BranchPrediction
+
+ // After flagalloc, records whether flags are live at the end of the block.
+ FlagsLiveAtEnd bool
+
+ // Subsequent blocks, if any. The number and order depend on the block kind.
+ Succs []Edge
+
+ // Inverse of successors.
+ // The order is significant to Phi nodes in the block.
+ // TODO: predecessors is a pain to maintain. Can we somehow order phi
+ // arguments by block id and have this field computed explicitly when needed?
+ Preds []Edge
+
+ // A list of values that determine how the block is exited. The number
+ // and type of control values depends on the Kind of the block. For
+ // instance, a BlockIf has a single boolean control value and BlockExit
+ // has a single memory control value.
+ //
+ // The ControlValues() method may be used to get a slice with the non-nil
+ // control values that can be ranged over.
+ //
+ // Controls[1] must be nil if Controls[0] is nil.
+ Controls [2]*Value
+
+ // Auxiliary info for the block. Its value depends on the Kind.
+ Aux Aux
+ AuxInt int64
+
+ // The unordered set of Values that define the operation of this block.
+ // After the scheduling pass, this list is ordered.
+ Values []*Value
+
+ // The containing function
+ Func *Func
+
+ // Storage for Succs, Preds and Values.
+ succstorage [2]Edge
+ predstorage [4]Edge
+ valstorage [9]*Value
+}
+
+// Edge represents a CFG edge.
+// Example edges for b branching to either c or d.
+// (c and d have other predecessors.)
+// b.Succs = [{c,3}, {d,1}]
+// c.Preds = [?, ?, ?, {b,0}]
+// d.Preds = [?, {b,1}, ?]
+// These indexes allow us to edit the CFG in constant time.
+// In addition, it informs phi ops in degenerate cases like:
+// b:
+// if k then c else c
+// c:
+// v = Phi(x, y)
+// Then the indexes tell you whether x is chosen from
+// the if or else branch from b.
+// b.Succs = [{c,0},{c,1}]
+// c.Preds = [{b,0},{b,1}]
+// means x is chosen if k is true.
+type Edge struct {
+ // block edge goes to (in a Succs list) or from (in a Preds list)
+ b *Block
+ // index of reverse edge. Invariant:
+ // e := x.Succs[idx]
+ // e.b.Preds[e.i] = Edge{x,idx}
+ // and similarly for predecessors.
+ i int
+}
+
+func (e Edge) Block() *Block {
+ return e.b
+}
+func (e Edge) Index() int {
+ return e.i
+}
+func (e Edge) String() string {
+ return fmt.Sprintf("{%v,%d}", e.b, e.i)
+}
+
+// kind controls successors
+// ------------------------------------------
+// Exit [return mem] []
+// Plain [] [next]
+// If [boolean Value] [then, else]
+// Defer [mem] [nopanic, panic] (control opcode should be OpStaticCall to runtime.deferproc)
+type BlockKind int8
+
+// short form print
+func (b *Block) String() string {
+ return fmt.Sprintf("b%d", b.ID)
+}
+
+// long form print
+func (b *Block) LongString() string {
+ s := b.Kind.String()
+ if b.Aux != nil {
+ s += fmt.Sprintf(" {%s}", b.Aux)
+ }
+ if t := b.AuxIntString(); t != "" {
+ s += fmt.Sprintf(" [%s]", t)
+ }
+ for _, c := range b.ControlValues() {
+ s += fmt.Sprintf(" %s", c)
+ }
+ if len(b.Succs) > 0 {
+ s += " ->"
+ for _, c := range b.Succs {
+ s += " " + c.b.String()
+ }
+ }
+ switch b.Likely {
+ case BranchUnlikely:
+ s += " (unlikely)"
+ case BranchLikely:
+ s += " (likely)"
+ }
+ return s
+}
+
+// NumControls returns the number of non-nil control values the
+// block has.
+func (b *Block) NumControls() int {
+ if b.Controls[0] == nil {
+ return 0
+ }
+ if b.Controls[1] == nil {
+ return 1
+ }
+ return 2
+}
+
+// ControlValues returns a slice containing the non-nil control
+// values of the block. The index of each control value will be
+// the same as it is in the Controls property and can be used
+// in ReplaceControl calls.
+func (b *Block) ControlValues() []*Value {
+ if b.Controls[0] == nil {
+ return b.Controls[:0]
+ }
+ if b.Controls[1] == nil {
+ return b.Controls[:1]
+ }
+ return b.Controls[:2]
+}
+
+// SetControl removes all existing control values and then adds
+// the control value provided. The number of control values after
+// a call to SetControl will always be 1.
+func (b *Block) SetControl(v *Value) {
+ b.ResetControls()
+ b.Controls[0] = v
+ v.Uses++
+}
+
+// ResetControls sets the number of controls for the block to 0.
+func (b *Block) ResetControls() {
+ if b.Controls[0] != nil {
+ b.Controls[0].Uses--
+ }
+ if b.Controls[1] != nil {
+ b.Controls[1].Uses--
+ }
+ b.Controls = [2]*Value{} // reset both controls to nil
+}
+
+// AddControl appends a control value to the existing list of control values.
+func (b *Block) AddControl(v *Value) {
+ i := b.NumControls()
+ b.Controls[i] = v // panics if array is full
+ v.Uses++
+}
+
+// ReplaceControl exchanges the existing control value at the index provided
+// for the new value. The index must refer to a valid control value.
+func (b *Block) ReplaceControl(i int, v *Value) {
+ b.Controls[i].Uses--
+ b.Controls[i] = v
+ v.Uses++
+}
+
+// CopyControls replaces the controls for this block with those from the
+// provided block. The provided block is not modified.
+func (b *Block) CopyControls(from *Block) {
+ if b == from {
+ return
+ }
+ b.ResetControls()
+ for _, c := range from.ControlValues() {
+ b.AddControl(c)
+ }
+}
+
+// Reset sets the block to the provided kind and clears all the blocks control
+// and auxiliary values. Other properties of the block, such as its successors,
+// predecessors and values are left unmodified.
+func (b *Block) Reset(kind BlockKind) {
+ b.Kind = kind
+ b.ResetControls()
+ b.Aux = nil
+ b.AuxInt = 0
+}
+
+// resetWithControl resets b and adds control v.
+// It is equivalent to b.Reset(kind); b.AddControl(v),
+// except that it is one call instead of two and avoids a bounds check.
+// It is intended for use by rewrite rules, where this matters.
+func (b *Block) resetWithControl(kind BlockKind, v *Value) {
+ b.Kind = kind
+ b.ResetControls()
+ b.Aux = nil
+ b.AuxInt = 0
+ b.Controls[0] = v
+ v.Uses++
+}
+
+// resetWithControl2 resets b and adds controls v and w.
+// It is equivalent to b.Reset(kind); b.AddControl(v); b.AddControl(w),
+// except that it is one call instead of three and avoids two bounds checks.
+// It is intended for use by rewrite rules, where this matters.
+func (b *Block) resetWithControl2(kind BlockKind, v, w *Value) {
+ b.Kind = kind
+ b.ResetControls()
+ b.Aux = nil
+ b.AuxInt = 0
+ b.Controls[0] = v
+ b.Controls[1] = w
+ v.Uses++
+ w.Uses++
+}
+
+// truncateValues truncates b.Values at the ith element, zeroing subsequent elements.
+// The values in b.Values after i must already have had their args reset,
+// to maintain correct value uses counts.
+func (b *Block) truncateValues(i int) {
+ tail := b.Values[i:]
+ for j := range tail {
+ tail[j] = nil
+ }
+ b.Values = b.Values[:i]
+}
+
+// AddEdgeTo adds an edge from block b to block c. Used during building of the
+// SSA graph; do not use on an already-completed SSA graph.
+func (b *Block) AddEdgeTo(c *Block) {
+ i := len(b.Succs)
+ j := len(c.Preds)
+ b.Succs = append(b.Succs, Edge{c, j})
+ c.Preds = append(c.Preds, Edge{b, i})
+ b.Func.invalidateCFG()
+}
+
+// removePred removes the ith input edge from b.
+// It is the responsibility of the caller to remove
+// the corresponding successor edge, and adjust any
+// phi values by calling b.removePhiArg(v, i).
+func (b *Block) removePred(i int) {
+ n := len(b.Preds) - 1
+ if i != n {
+ e := b.Preds[n]
+ b.Preds[i] = e
+ // Update the other end of the edge we moved.
+ e.b.Succs[e.i].i = i
+ }
+ b.Preds[n] = Edge{}
+ b.Preds = b.Preds[:n]
+ b.Func.invalidateCFG()
+}
+
+// removeSucc removes the ith output edge from b.
+// It is the responsibility of the caller to remove
+// the corresponding predecessor edge.
+func (b *Block) removeSucc(i int) {
+ n := len(b.Succs) - 1
+ if i != n {
+ e := b.Succs[n]
+ b.Succs[i] = e
+ // Update the other end of the edge we moved.
+ e.b.Preds[e.i].i = i
+ }
+ b.Succs[n] = Edge{}
+ b.Succs = b.Succs[:n]
+ b.Func.invalidateCFG()
+}
+
+func (b *Block) swapSuccessors() {
+ if len(b.Succs) != 2 {
+ b.Fatalf("swapSuccessors with len(Succs)=%d", len(b.Succs))
+ }
+ e0 := b.Succs[0]
+ e1 := b.Succs[1]
+ b.Succs[0] = e1
+ b.Succs[1] = e0
+ e0.b.Preds[e0.i].i = 1
+ e1.b.Preds[e1.i].i = 0
+ b.Likely *= -1
+}
+
+// removePhiArg removes the ith arg from phi.
+// It must be called after calling b.removePred(i) to
+// adjust the corresponding phi value of the block:
+//
+// b.removePred(i)
+// for _, v := range b.Values {
+// if v.Op != OpPhi {
+// continue
+// }
+// b.removeArg(v, i)
+// }
+func (b *Block) removePhiArg(phi *Value, i int) {
+ n := len(b.Preds)
+ if numPhiArgs := len(phi.Args); numPhiArgs-1 != n {
+ b.Fatalf("inconsistent state, num predecessors: %d, num phi args: %d", n, numPhiArgs)
+ }
+ phi.Args[i].Uses--
+ phi.Args[i] = phi.Args[n]
+ phi.Args[n] = nil
+ phi.Args = phi.Args[:n]
+}
+
+// LackingPos indicates whether b is a block whose position should be inherited
+// from its successors. This is true if all the values within it have unreliable positions
+// and if it is "plain", meaning that there is no control flow that is also very likely
+// to correspond to a well-understood source position.
+func (b *Block) LackingPos() bool {
+ // Non-plain predecessors are If or Defer, which both (1) have two successors,
+ // which might have different line numbers and (2) correspond to statements
+ // in the source code that have positions, so this case ought not occur anyway.
+ if b.Kind != BlockPlain {
+ return false
+ }
+ if b.Pos != src.NoXPos {
+ return false
+ }
+ for _, v := range b.Values {
+ if v.LackingPos() {
+ continue
+ }
+ return false
+ }
+ return true
+}
+
+func (b *Block) AuxIntString() string {
+ switch b.Kind.AuxIntType() {
+ case "int8":
+ return fmt.Sprintf("%v", int8(b.AuxInt))
+ case "uint8":
+ return fmt.Sprintf("%v", uint8(b.AuxInt))
+ default: // type specified but not implemented - print as int64
+ return fmt.Sprintf("%v", b.AuxInt)
+ case "": // no aux int type
+ return ""
+ }
+}
+
+// likelyBranch reports whether block b is the likely branch of all of its predecessors.
+func (b *Block) likelyBranch() bool {
+ if len(b.Preds) == 0 {
+ return false
+ }
+ for _, e := range b.Preds {
+ p := e.b
+ if len(p.Succs) == 1 || len(p.Succs) == 2 && (p.Likely == BranchLikely && p.Succs[0].b == b ||
+ p.Likely == BranchUnlikely && p.Succs[1].b == b) {
+ continue
+ }
+ return false
+ }
+ return true
+}
+
+func (b *Block) Logf(msg string, args ...interface{}) { b.Func.Logf(msg, args...) }
+func (b *Block) Log() bool { return b.Func.Log() }
+func (b *Block) Fatalf(msg string, args ...interface{}) { b.Func.Fatalf(msg, args...) }
+
+type BranchPrediction int8
+
+const (
+ BranchUnlikely = BranchPrediction(-1)
+ BranchUnknown = BranchPrediction(0)
+ BranchLikely = BranchPrediction(+1)
+)
diff --git a/src/cmd/compile/internal/ssa/branchelim.go b/src/cmd/compile/internal/ssa/branchelim.go
new file mode 100644
index 0000000..be5f9e0
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/branchelim.go
@@ -0,0 +1,449 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import "cmd/internal/src"
+
+// branchelim tries to eliminate branches by
+// generating CondSelect instructions.
+//
+// Search for basic blocks that look like
+//
+// bb0 bb0
+// | \ / \
+// | bb1 or bb1 bb2 <- trivial if/else blocks
+// | / \ /
+// bb2 bb3
+//
+// where the intermediate blocks are mostly empty (with no side-effects);
+// rewrite Phis in the postdominator as CondSelects.
+func branchelim(f *Func) {
+ // FIXME: add support for lowering CondSelects on more architectures
+ switch f.Config.arch {
+ case "arm64", "ppc64le", "ppc64", "amd64", "wasm":
+ // implemented
+ default:
+ return
+ }
+
+ // Find all the values used in computing the address of any load.
+ // Typically these values have operations like AddPtr, Lsh64x64, etc.
+ loadAddr := f.newSparseSet(f.NumValues())
+ defer f.retSparseSet(loadAddr)
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ switch v.Op {
+ case OpLoad, OpAtomicLoad8, OpAtomicLoad32, OpAtomicLoad64, OpAtomicLoadPtr, OpAtomicLoadAcq32, OpAtomicLoadAcq64:
+ loadAddr.add(v.Args[0].ID)
+ case OpMove:
+ loadAddr.add(v.Args[1].ID)
+ }
+ }
+ }
+ po := f.postorder()
+ for {
+ n := loadAddr.size()
+ for _, b := range po {
+ for i := len(b.Values) - 1; i >= 0; i-- {
+ v := b.Values[i]
+ if !loadAddr.contains(v.ID) {
+ continue
+ }
+ for _, a := range v.Args {
+ if a.Type.IsInteger() || a.Type.IsPtr() || a.Type.IsUnsafePtr() {
+ loadAddr.add(a.ID)
+ }
+ }
+ }
+ }
+ if loadAddr.size() == n {
+ break
+ }
+ }
+
+ change := true
+ for change {
+ change = false
+ for _, b := range f.Blocks {
+ change = elimIf(f, loadAddr, b) || elimIfElse(f, loadAddr, b) || change
+ }
+ }
+}
+
+func canCondSelect(v *Value, arch string, loadAddr *sparseSet) bool {
+ if loadAddr.contains(v.ID) {
+ // The result of the soon-to-be conditional move is used to compute a load address.
+ // We want to avoid generating a conditional move in this case
+ // because the load address would now be data-dependent on the condition.
+ // Previously it would only be control-dependent on the condition, which is faster
+ // if the branch predicts well (or possibly even if it doesn't, if the load will
+ // be an expensive cache miss).
+ // See issue #26306.
+ return false
+ }
+ // For now, stick to simple scalars that fit in registers
+ switch {
+ case v.Type.Size() > v.Block.Func.Config.RegSize:
+ return false
+ case v.Type.IsPtrShaped():
+ return true
+ case v.Type.IsInteger():
+ if arch == "amd64" && v.Type.Size() < 2 {
+ // amd64 doesn't support CMOV with byte registers
+ return false
+ }
+ return true
+ default:
+ return false
+ }
+}
+
+// elimIf converts the one-way branch starting at dom in f to a conditional move if possible.
+// loadAddr is a set of values which are used to compute the address of a load.
+// Those values are exempt from CMOV generation.
+func elimIf(f *Func, loadAddr *sparseSet, dom *Block) bool {
+ // See if dom is an If with one arm that
+ // is trivial and succeeded by the other
+ // successor of dom.
+ if dom.Kind != BlockIf || dom.Likely != BranchUnknown {
+ return false
+ }
+ var simple, post *Block
+ for i := range dom.Succs {
+ bb, other := dom.Succs[i].Block(), dom.Succs[i^1].Block()
+ if isLeafPlain(bb) && bb.Succs[0].Block() == other {
+ simple = bb
+ post = other
+ break
+ }
+ }
+ if simple == nil || len(post.Preds) != 2 || post == dom {
+ return false
+ }
+
+ // We've found our diamond CFG of blocks.
+ // Now decide if fusing 'simple' into dom+post
+ // looks profitable.
+
+ // Check that there are Phis, and that all of them
+ // can be safely rewritten to CondSelect.
+ hasphis := false
+ for _, v := range post.Values {
+ if v.Op == OpPhi {
+ hasphis = true
+ if !canCondSelect(v, f.Config.arch, loadAddr) {
+ return false
+ }
+ }
+ }
+ if !hasphis {
+ return false
+ }
+
+ // Pick some upper bound for the number of instructions
+ // we'd be willing to execute just to generate a dead
+ // argument to CondSelect. In the worst case, this is
+ // the number of useless instructions executed.
+ const maxfuseinsts = 2
+
+ if len(simple.Values) > maxfuseinsts || !canSpeculativelyExecute(simple) {
+ return false
+ }
+
+ // Replace Phi instructions in b with CondSelect instructions
+ swap := (post.Preds[0].Block() == dom) != (dom.Succs[0].Block() == post)
+ for _, v := range post.Values {
+ if v.Op != OpPhi {
+ continue
+ }
+ v.Op = OpCondSelect
+ if swap {
+ v.Args[0], v.Args[1] = v.Args[1], v.Args[0]
+ }
+ v.AddArg(dom.Controls[0])
+ }
+
+ // Put all of the instructions into 'dom'
+ // and update the CFG appropriately.
+ dom.Kind = post.Kind
+ dom.CopyControls(post)
+ dom.Aux = post.Aux
+ dom.Succs = append(dom.Succs[:0], post.Succs...)
+ for i := range dom.Succs {
+ e := dom.Succs[i]
+ e.b.Preds[e.i].b = dom
+ }
+
+ // Try really hard to preserve statement marks attached to blocks.
+ simplePos := simple.Pos
+ postPos := post.Pos
+ simpleStmt := simplePos.IsStmt() == src.PosIsStmt
+ postStmt := postPos.IsStmt() == src.PosIsStmt
+
+ for _, v := range simple.Values {
+ v.Block = dom
+ }
+ for _, v := range post.Values {
+ v.Block = dom
+ }
+
+ // findBlockPos determines if b contains a stmt-marked value
+ // that has the same line number as the Pos for b itself.
+ // (i.e. is the position on b actually redundant?)
+ findBlockPos := func(b *Block) bool {
+ pos := b.Pos
+ for _, v := range b.Values {
+ // See if there is a stmt-marked value already that matches simple.Pos (and perhaps post.Pos)
+ if pos.SameFileAndLine(v.Pos) && v.Pos.IsStmt() == src.PosIsStmt {
+ return true
+ }
+ }
+ return false
+ }
+ if simpleStmt {
+ simpleStmt = !findBlockPos(simple)
+ if !simpleStmt && simplePos.SameFileAndLine(postPos) {
+ postStmt = false
+ }
+
+ }
+ if postStmt {
+ postStmt = !findBlockPos(post)
+ }
+
+ // If simpleStmt and/or postStmt are still true, then try harder
+ // to find the corresponding statement marks new homes.
+
+ // setBlockPos determines if b contains a can-be-statement value
+ // that has the same line number as the Pos for b itself, and
+ // puts a statement mark on it, and returns whether it succeeded
+ // in this operation.
+ setBlockPos := func(b *Block) bool {
+ pos := b.Pos
+ for _, v := range b.Values {
+ if pos.SameFileAndLine(v.Pos) && !isPoorStatementOp(v.Op) {
+ v.Pos = v.Pos.WithIsStmt()
+ return true
+ }
+ }
+ return false
+ }
+ // If necessary and possible, add a mark to a value in simple
+ if simpleStmt {
+ if setBlockPos(simple) && simplePos.SameFileAndLine(postPos) {
+ postStmt = false
+ }
+ }
+ // If necessary and possible, add a mark to a value in post
+ if postStmt {
+ postStmt = !setBlockPos(post)
+ }
+
+ // Before giving up (this was added because it helps), try the end of "dom", and if that is not available,
+ // try the values in the successor block if it is uncomplicated.
+ if postStmt {
+ if dom.Pos.IsStmt() != src.PosIsStmt {
+ dom.Pos = postPos
+ } else {
+ // Try the successor block
+ if len(dom.Succs) == 1 && len(dom.Succs[0].Block().Preds) == 1 {
+ succ := dom.Succs[0].Block()
+ for _, v := range succ.Values {
+ if isPoorStatementOp(v.Op) {
+ continue
+ }
+ if postPos.SameFileAndLine(v.Pos) {
+ v.Pos = v.Pos.WithIsStmt()
+ }
+ postStmt = false
+ break
+ }
+ // If postStmt still true, tag the block itself if possible
+ if postStmt && succ.Pos.IsStmt() != src.PosIsStmt {
+ succ.Pos = postPos
+ }
+ }
+ }
+ }
+
+ dom.Values = append(dom.Values, simple.Values...)
+ dom.Values = append(dom.Values, post.Values...)
+
+ // Trash 'post' and 'simple'
+ clobberBlock(post)
+ clobberBlock(simple)
+
+ f.invalidateCFG()
+ return true
+}
+
+// is this a BlockPlain with one predecessor?
+func isLeafPlain(b *Block) bool {
+ return b.Kind == BlockPlain && len(b.Preds) == 1
+}
+
+func clobberBlock(b *Block) {
+ b.Values = nil
+ b.Preds = nil
+ b.Succs = nil
+ b.Aux = nil
+ b.ResetControls()
+ b.Likely = BranchUnknown
+ b.Kind = BlockInvalid
+}
+
+// elimIfElse converts the two-way branch starting at dom in f to a conditional move if possible.
+// loadAddr is a set of values which are used to compute the address of a load.
+// Those values are exempt from CMOV generation.
+func elimIfElse(f *Func, loadAddr *sparseSet, b *Block) bool {
+ // See if 'b' ends in an if/else: it should
+ // have two successors, both of which are BlockPlain
+ // and succeeded by the same block.
+ if b.Kind != BlockIf || b.Likely != BranchUnknown {
+ return false
+ }
+ yes, no := b.Succs[0].Block(), b.Succs[1].Block()
+ if !isLeafPlain(yes) || len(yes.Values) > 1 || !canSpeculativelyExecute(yes) {
+ return false
+ }
+ if !isLeafPlain(no) || len(no.Values) > 1 || !canSpeculativelyExecute(no) {
+ return false
+ }
+ if b.Succs[0].Block().Succs[0].Block() != b.Succs[1].Block().Succs[0].Block() {
+ return false
+ }
+ // block that postdominates the if/else
+ post := b.Succs[0].Block().Succs[0].Block()
+ if len(post.Preds) != 2 || post == b {
+ return false
+ }
+ hasphis := false
+ for _, v := range post.Values {
+ if v.Op == OpPhi {
+ hasphis = true
+ if !canCondSelect(v, f.Config.arch, loadAddr) {
+ return false
+ }
+ }
+ }
+ if !hasphis {
+ return false
+ }
+
+ // Don't generate CondSelects if branch is cheaper.
+ if !shouldElimIfElse(no, yes, post, f.Config.arch) {
+ return false
+ }
+
+ // now we're committed: rewrite each Phi as a CondSelect
+ swap := post.Preds[0].Block() != b.Succs[0].Block()
+ for _, v := range post.Values {
+ if v.Op != OpPhi {
+ continue
+ }
+ v.Op = OpCondSelect
+ if swap {
+ v.Args[0], v.Args[1] = v.Args[1], v.Args[0]
+ }
+ v.AddArg(b.Controls[0])
+ }
+
+ // Move the contents of all of these
+ // blocks into 'b' and update CFG edges accordingly
+ b.Kind = post.Kind
+ b.CopyControls(post)
+ b.Aux = post.Aux
+ b.Succs = append(b.Succs[:0], post.Succs...)
+ for i := range b.Succs {
+ e := b.Succs[i]
+ e.b.Preds[e.i].b = b
+ }
+ for i := range post.Values {
+ post.Values[i].Block = b
+ }
+ for i := range yes.Values {
+ yes.Values[i].Block = b
+ }
+ for i := range no.Values {
+ no.Values[i].Block = b
+ }
+ b.Values = append(b.Values, yes.Values...)
+ b.Values = append(b.Values, no.Values...)
+ b.Values = append(b.Values, post.Values...)
+
+ // trash post, yes, and no
+ clobberBlock(yes)
+ clobberBlock(no)
+ clobberBlock(post)
+
+ f.invalidateCFG()
+ return true
+}
+
+// shouldElimIfElse reports whether estimated cost of eliminating branch
+// is lower than threshold.
+func shouldElimIfElse(no, yes, post *Block, arch string) bool {
+ switch arch {
+ default:
+ return true
+ case "amd64":
+ const maxcost = 2
+ phi := 0
+ other := 0
+ for _, v := range post.Values {
+ if v.Op == OpPhi {
+ // Each phi results in CondSelect, which lowers into CMOV,
+ // CMOV has latency >1 on most CPUs.
+ phi++
+ }
+ for _, x := range v.Args {
+ if x.Block == no || x.Block == yes {
+ other++
+ }
+ }
+ }
+ cost := phi * 1
+ if phi > 1 {
+ // If we have more than 1 phi and some values in post have args
+ // in yes or no blocks, we may have to recalculate condition, because
+ // those args may clobber flags. For now assume that all operations clobber flags.
+ cost += other * 1
+ }
+ return cost < maxcost
+ }
+}
+
+// canSpeculativelyExecute reports whether every value in the block can
+// be evaluated without causing any observable side effects (memory
+// accesses, panics and so on) except for execution time changes. It
+// also ensures that the block does not contain any phis which we can't
+// speculatively execute.
+// Warning: this function cannot currently detect values that represent
+// instructions the execution of which need to be guarded with CPU
+// hardware feature checks. See issue #34950.
+func canSpeculativelyExecute(b *Block) bool {
+ // don't fuse memory ops, Phi ops, divides (can panic),
+ // or anything else with side-effects
+ for _, v := range b.Values {
+ if v.Op == OpPhi || isDivMod(v.Op) || v.Type.IsMemory() ||
+ v.MemoryArg() != nil || opcodeTable[v.Op].hasSideEffects {
+ return false
+ }
+ }
+ return true
+}
+
+func isDivMod(op Op) bool {
+ switch op {
+ case OpDiv8, OpDiv8u, OpDiv16, OpDiv16u,
+ OpDiv32, OpDiv32u, OpDiv64, OpDiv64u, OpDiv128u,
+ OpDiv32F, OpDiv64F,
+ OpMod8, OpMod8u, OpMod16, OpMod16u,
+ OpMod32, OpMod32u, OpMod64, OpMod64u:
+ return true
+ default:
+ return false
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/branchelim_test.go b/src/cmd/compile/internal/ssa/branchelim_test.go
new file mode 100644
index 0000000..20fa84d
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/branchelim_test.go
@@ -0,0 +1,172 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/compile/internal/types"
+ "testing"
+)
+
+// Test that a trivial 'if' is eliminated
+func TestBranchElimIf(t *testing.T) {
+ var testData = []struct {
+ arch string
+ intType string
+ ok bool
+ }{
+ {"arm64", "int32", true},
+ {"amd64", "int32", true},
+ {"amd64", "int8", false},
+ }
+
+ for _, data := range testData {
+ t.Run(data.arch+"/"+data.intType, func(t *testing.T) {
+ c := testConfigArch(t, data.arch)
+ boolType := c.config.Types.Bool
+ var intType *types.Type
+ switch data.intType {
+ case "int32":
+ intType = c.config.Types.Int32
+ case "int8":
+ intType = c.config.Types.Int8
+ default:
+ t.Fatal("invalid integer type:", data.intType)
+ }
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("start", OpInitMem, types.TypeMem, 0, nil),
+ Valu("sb", OpSB, c.config.Types.Uintptr, 0, nil),
+ Valu("const1", OpConst32, intType, 1, nil),
+ Valu("const2", OpConst32, intType, 2, nil),
+ Valu("addr", OpAddr, boolType.PtrTo(), 0, nil, "sb"),
+ Valu("cond", OpLoad, boolType, 0, nil, "addr", "start"),
+ If("cond", "b2", "b3")),
+ Bloc("b2",
+ Goto("b3")),
+ Bloc("b3",
+ Valu("phi", OpPhi, intType, 0, nil, "const1", "const2"),
+ Valu("retstore", OpStore, types.TypeMem, 0, nil, "phi", "sb", "start"),
+ Exit("retstore")))
+
+ CheckFunc(fun.f)
+ branchelim(fun.f)
+ CheckFunc(fun.f)
+ Deadcode(fun.f)
+ CheckFunc(fun.f)
+
+ if data.ok {
+
+ if len(fun.f.Blocks) != 1 {
+ t.Fatalf("expected 1 block after branchelim and deadcode; found %d", len(fun.f.Blocks))
+ }
+ if fun.values["phi"].Op != OpCondSelect {
+ t.Fatalf("expected phi op to be CondSelect; found op %s", fun.values["phi"].Op)
+ }
+ if fun.values["phi"].Args[2] != fun.values["cond"] {
+ t.Errorf("expected CondSelect condition to be %s; found %s", fun.values["cond"], fun.values["phi"].Args[2])
+ }
+ if fun.blocks["entry"].Kind != BlockExit {
+ t.Errorf("expected entry to be BlockExit; found kind %s", fun.blocks["entry"].Kind.String())
+ }
+ } else {
+ if len(fun.f.Blocks) != 3 {
+ t.Fatalf("expected 3 block after branchelim and deadcode; found %d", len(fun.f.Blocks))
+ }
+ }
+ })
+ }
+}
+
+// Test that a trivial if/else is eliminated
+func TestBranchElimIfElse(t *testing.T) {
+ for _, arch := range []string{"arm64", "amd64"} {
+ t.Run(arch, func(t *testing.T) {
+ c := testConfigArch(t, arch)
+ boolType := c.config.Types.Bool
+ intType := c.config.Types.Int32
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("start", OpInitMem, types.TypeMem, 0, nil),
+ Valu("sb", OpSB, c.config.Types.Uintptr, 0, nil),
+ Valu("const1", OpConst32, intType, 1, nil),
+ Valu("const2", OpConst32, intType, 2, nil),
+ Valu("addr", OpAddr, boolType.PtrTo(), 0, nil, "sb"),
+ Valu("cond", OpLoad, boolType, 0, nil, "addr", "start"),
+ If("cond", "b2", "b3")),
+ Bloc("b2",
+ Goto("b4")),
+ Bloc("b3",
+ Goto("b4")),
+ Bloc("b4",
+ Valu("phi", OpPhi, intType, 0, nil, "const1", "const2"),
+ Valu("retstore", OpStore, types.TypeMem, 0, nil, "phi", "sb", "start"),
+ Exit("retstore")))
+
+ CheckFunc(fun.f)
+ branchelim(fun.f)
+ CheckFunc(fun.f)
+ Deadcode(fun.f)
+ CheckFunc(fun.f)
+
+ if len(fun.f.Blocks) != 1 {
+ t.Fatalf("expected 1 block after branchelim; found %d", len(fun.f.Blocks))
+ }
+ if fun.values["phi"].Op != OpCondSelect {
+ t.Fatalf("expected phi op to be CondSelect; found op %s", fun.values["phi"].Op)
+ }
+ if fun.values["phi"].Args[2] != fun.values["cond"] {
+ t.Errorf("expected CondSelect condition to be %s; found %s", fun.values["cond"], fun.values["phi"].Args[2])
+ }
+ if fun.blocks["entry"].Kind != BlockExit {
+ t.Errorf("expected entry to be BlockExit; found kind %s", fun.blocks["entry"].Kind.String())
+ }
+ })
+ }
+}
+
+// Test that an if/else CFG that loops back
+// into itself does *not* get eliminated.
+func TestNoBranchElimLoop(t *testing.T) {
+ for _, arch := range []string{"arm64", "amd64"} {
+ t.Run(arch, func(t *testing.T) {
+ c := testConfigArch(t, arch)
+ boolType := c.config.Types.Bool
+ intType := c.config.Types.Int32
+
+ // The control flow here is totally bogus,
+ // but a dead cycle seems like the only plausible
+ // way to arrive at a diamond CFG that is also a loop.
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("start", OpInitMem, types.TypeMem, 0, nil),
+ Valu("sb", OpSB, c.config.Types.Uintptr, 0, nil),
+ Valu("const2", OpConst32, intType, 2, nil),
+ Valu("const3", OpConst32, intType, 3, nil),
+ Goto("b5")),
+ Bloc("b2",
+ Valu("addr", OpAddr, boolType.PtrTo(), 0, nil, "sb"),
+ Valu("cond", OpLoad, boolType, 0, nil, "addr", "start"),
+ Valu("phi", OpPhi, intType, 0, nil, "const2", "const3"),
+ If("cond", "b3", "b4")),
+ Bloc("b3",
+ Goto("b2")),
+ Bloc("b4",
+ Goto("b2")),
+ Bloc("b5",
+ Exit("start")))
+
+ CheckFunc(fun.f)
+ branchelim(fun.f)
+ CheckFunc(fun.f)
+
+ if len(fun.f.Blocks) != 5 {
+ t.Errorf("expected 5 block after branchelim; found %d", len(fun.f.Blocks))
+ }
+ if fun.values["phi"].Op != OpPhi {
+ t.Errorf("expected phi op to be CondSelect; found op %s", fun.values["phi"].Op)
+ }
+ })
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/cache.go b/src/cmd/compile/internal/ssa/cache.go
new file mode 100644
index 0000000..dbec2e1
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/cache.go
@@ -0,0 +1,81 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/internal/obj"
+ "sort"
+)
+
+// A Cache holds reusable compiler state.
+// It is intended to be re-used for multiple Func compilations.
+type Cache struct {
+ // Storage for low-numbered values and blocks.
+ values [2000]Value
+ blocks [200]Block
+ locs [2000]Location
+
+ // Reusable stackAllocState.
+ // See stackalloc.go's {new,put}StackAllocState.
+ stackAllocState *stackAllocState
+
+ domblockstore []ID // scratch space for computing dominators
+ scrSparseSet []*sparseSet // scratch sparse sets to be re-used.
+ scrSparseMap []*sparseMap // scratch sparse maps to be re-used.
+ scrPoset []*poset // scratch poset to be reused
+ // deadcode contains reusable slices specifically for the deadcode pass.
+ // It gets special treatment because of the frequency with which it is run.
+ deadcode struct {
+ liveOrderStmts []*Value
+ live []bool
+ q []*Value
+ }
+ // Reusable regalloc state.
+ regallocValues []valState
+
+ ValueToProgAfter []*obj.Prog
+ debugState debugState
+
+ Liveness interface{} // *gc.livenessFuncCache
+}
+
+func (c *Cache) Reset() {
+ nv := sort.Search(len(c.values), func(i int) bool { return c.values[i].ID == 0 })
+ xv := c.values[:nv]
+ for i := range xv {
+ xv[i] = Value{}
+ }
+ nb := sort.Search(len(c.blocks), func(i int) bool { return c.blocks[i].ID == 0 })
+ xb := c.blocks[:nb]
+ for i := range xb {
+ xb[i] = Block{}
+ }
+ nl := sort.Search(len(c.locs), func(i int) bool { return c.locs[i] == nil })
+ xl := c.locs[:nl]
+ for i := range xl {
+ xl[i] = nil
+ }
+
+ // regalloc sets the length of c.regallocValues to whatever it may use,
+ // so clear according to length.
+ for i := range c.regallocValues {
+ c.regallocValues[i] = valState{}
+ }
+
+ // liveOrderStmts gets used multiple times during compilation of a function.
+ // We don't know where the high water mark was, so reslice to cap and search.
+ c.deadcode.liveOrderStmts = c.deadcode.liveOrderStmts[:cap(c.deadcode.liveOrderStmts)]
+ no := sort.Search(len(c.deadcode.liveOrderStmts), func(i int) bool { return c.deadcode.liveOrderStmts[i] == nil })
+ xo := c.deadcode.liveOrderStmts[:no]
+ for i := range xo {
+ xo[i] = nil
+ }
+ c.deadcode.q = c.deadcode.q[:cap(c.deadcode.q)]
+ nq := sort.Search(len(c.deadcode.q), func(i int) bool { return c.deadcode.q[i] == nil })
+ xq := c.deadcode.q[:nq]
+ for i := range xq {
+ xq[i] = nil
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/check.go b/src/cmd/compile/internal/ssa/check.go
new file mode 100644
index 0000000..28edfd2
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/check.go
@@ -0,0 +1,600 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/internal/obj/s390x"
+ "math"
+ "math/bits"
+)
+
+// checkFunc checks invariants of f.
+func checkFunc(f *Func) {
+ blockMark := make([]bool, f.NumBlocks())
+ valueMark := make([]bool, f.NumValues())
+
+ for _, b := range f.Blocks {
+ if blockMark[b.ID] {
+ f.Fatalf("block %s appears twice in %s!", b, f.Name)
+ }
+ blockMark[b.ID] = true
+ if b.Func != f {
+ f.Fatalf("%s.Func=%s, want %s", b, b.Func.Name, f.Name)
+ }
+
+ for i, e := range b.Preds {
+ if se := e.b.Succs[e.i]; se.b != b || se.i != i {
+ f.Fatalf("block pred/succ not crosslinked correctly %d:%s %d:%s", i, b, se.i, se.b)
+ }
+ }
+ for i, e := range b.Succs {
+ if pe := e.b.Preds[e.i]; pe.b != b || pe.i != i {
+ f.Fatalf("block succ/pred not crosslinked correctly %d:%s %d:%s", i, b, pe.i, pe.b)
+ }
+ }
+
+ switch b.Kind {
+ case BlockExit:
+ if len(b.Succs) != 0 {
+ f.Fatalf("exit block %s has successors", b)
+ }
+ if b.NumControls() != 1 {
+ f.Fatalf("exit block %s has no control value", b)
+ }
+ if !b.Controls[0].Type.IsMemory() {
+ f.Fatalf("exit block %s has non-memory control value %s", b, b.Controls[0].LongString())
+ }
+ case BlockRet:
+ if len(b.Succs) != 0 {
+ f.Fatalf("ret block %s has successors", b)
+ }
+ if b.NumControls() != 1 {
+ f.Fatalf("ret block %s has nil control", b)
+ }
+ if !b.Controls[0].Type.IsMemory() {
+ f.Fatalf("ret block %s has non-memory control value %s", b, b.Controls[0].LongString())
+ }
+ case BlockRetJmp:
+ if len(b.Succs) != 0 {
+ f.Fatalf("retjmp block %s len(Succs)==%d, want 0", b, len(b.Succs))
+ }
+ if b.NumControls() != 1 {
+ f.Fatalf("retjmp block %s has nil control", b)
+ }
+ if !b.Controls[0].Type.IsMemory() {
+ f.Fatalf("retjmp block %s has non-memory control value %s", b, b.Controls[0].LongString())
+ }
+ case BlockPlain:
+ if len(b.Succs) != 1 {
+ f.Fatalf("plain block %s len(Succs)==%d, want 1", b, len(b.Succs))
+ }
+ if b.NumControls() != 0 {
+ f.Fatalf("plain block %s has non-nil control %s", b, b.Controls[0].LongString())
+ }
+ case BlockIf:
+ if len(b.Succs) != 2 {
+ f.Fatalf("if block %s len(Succs)==%d, want 2", b, len(b.Succs))
+ }
+ if b.NumControls() != 1 {
+ f.Fatalf("if block %s has no control value", b)
+ }
+ if !b.Controls[0].Type.IsBoolean() {
+ f.Fatalf("if block %s has non-bool control value %s", b, b.Controls[0].LongString())
+ }
+ case BlockDefer:
+ if len(b.Succs) != 2 {
+ f.Fatalf("defer block %s len(Succs)==%d, want 2", b, len(b.Succs))
+ }
+ if b.NumControls() != 1 {
+ f.Fatalf("defer block %s has no control value", b)
+ }
+ if !b.Controls[0].Type.IsMemory() {
+ f.Fatalf("defer block %s has non-memory control value %s", b, b.Controls[0].LongString())
+ }
+ case BlockFirst:
+ if len(b.Succs) != 2 {
+ f.Fatalf("plain/dead block %s len(Succs)==%d, want 2", b, len(b.Succs))
+ }
+ if b.NumControls() != 0 {
+ f.Fatalf("plain/dead block %s has a control value", b)
+ }
+ }
+ if len(b.Succs) != 2 && b.Likely != BranchUnknown {
+ f.Fatalf("likeliness prediction %d for block %s with %d successors", b.Likely, b, len(b.Succs))
+ }
+
+ for _, v := range b.Values {
+ // Check to make sure argument count makes sense (argLen of -1 indicates
+ // variable length args)
+ nArgs := opcodeTable[v.Op].argLen
+ if nArgs != -1 && int32(len(v.Args)) != nArgs {
+ f.Fatalf("value %s has %d args, expected %d", v.LongString(),
+ len(v.Args), nArgs)
+ }
+
+ // Check to make sure aux values make sense.
+ canHaveAux := false
+ canHaveAuxInt := false
+ // TODO: enforce types of Aux in this switch (like auxString does below)
+ switch opcodeTable[v.Op].auxType {
+ case auxNone:
+ case auxBool:
+ if v.AuxInt < 0 || v.AuxInt > 1 {
+ f.Fatalf("bad bool AuxInt value for %v", v)
+ }
+ canHaveAuxInt = true
+ case auxInt8:
+ if v.AuxInt != int64(int8(v.AuxInt)) {
+ f.Fatalf("bad int8 AuxInt value for %v", v)
+ }
+ canHaveAuxInt = true
+ case auxInt16:
+ if v.AuxInt != int64(int16(v.AuxInt)) {
+ f.Fatalf("bad int16 AuxInt value for %v", v)
+ }
+ canHaveAuxInt = true
+ case auxInt32:
+ if v.AuxInt != int64(int32(v.AuxInt)) {
+ f.Fatalf("bad int32 AuxInt value for %v", v)
+ }
+ canHaveAuxInt = true
+ case auxInt64, auxARM64BitField:
+ canHaveAuxInt = true
+ case auxInt128:
+ // AuxInt must be zero, so leave canHaveAuxInt set to false.
+ case auxUInt8:
+ if v.AuxInt != int64(uint8(v.AuxInt)) {
+ f.Fatalf("bad uint8 AuxInt value for %v", v)
+ }
+ canHaveAuxInt = true
+ case auxFloat32:
+ canHaveAuxInt = true
+ if math.IsNaN(v.AuxFloat()) {
+ f.Fatalf("value %v has an AuxInt that encodes a NaN", v)
+ }
+ if !isExactFloat32(v.AuxFloat()) {
+ f.Fatalf("value %v has an AuxInt value that is not an exact float32", v)
+ }
+ case auxFloat64:
+ canHaveAuxInt = true
+ if math.IsNaN(v.AuxFloat()) {
+ f.Fatalf("value %v has an AuxInt that encodes a NaN", v)
+ }
+ case auxString:
+ if _, ok := v.Aux.(stringAux); !ok {
+ f.Fatalf("value %v has Aux type %T, want string", v, v.Aux)
+ }
+ canHaveAux = true
+ case auxCallOff:
+ canHaveAuxInt = true
+ fallthrough
+ case auxCall:
+ if ac, ok := v.Aux.(*AuxCall); ok {
+ if v.Op == OpStaticCall && ac.Fn == nil {
+ f.Fatalf("value %v has *AuxCall with nil Fn", v)
+ }
+ } else {
+ f.Fatalf("value %v has Aux type %T, want *AuxCall", v, v.Aux)
+ }
+ canHaveAux = true
+ case auxNameOffsetInt8:
+ if _, ok := v.Aux.(*AuxNameOffset); !ok {
+ f.Fatalf("value %v has Aux type %T, want *AuxNameOffset", v, v.Aux)
+ }
+ canHaveAux = true
+ canHaveAuxInt = true
+ case auxSym, auxTyp:
+ canHaveAux = true
+ case auxSymOff, auxSymValAndOff, auxTypSize:
+ canHaveAuxInt = true
+ canHaveAux = true
+ case auxCCop:
+ if opcodeTable[Op(v.AuxInt)].name == "OpInvalid" {
+ f.Fatalf("value %v has an AuxInt value that is a valid opcode", v)
+ }
+ canHaveAuxInt = true
+ case auxS390XCCMask:
+ if _, ok := v.Aux.(s390x.CCMask); !ok {
+ f.Fatalf("bad type %T for S390XCCMask in %v", v.Aux, v)
+ }
+ canHaveAux = true
+ case auxS390XRotateParams:
+ if _, ok := v.Aux.(s390x.RotateParams); !ok {
+ f.Fatalf("bad type %T for S390XRotateParams in %v", v.Aux, v)
+ }
+ canHaveAux = true
+ case auxFlagConstant:
+ if v.AuxInt < 0 || v.AuxInt > 15 {
+ f.Fatalf("bad FlagConstant AuxInt value for %v", v)
+ }
+ canHaveAuxInt = true
+ default:
+ f.Fatalf("unknown aux type for %s", v.Op)
+ }
+ if !canHaveAux && v.Aux != nil {
+ f.Fatalf("value %s has an Aux value %v but shouldn't", v.LongString(), v.Aux)
+ }
+ if !canHaveAuxInt && v.AuxInt != 0 {
+ f.Fatalf("value %s has an AuxInt value %d but shouldn't", v.LongString(), v.AuxInt)
+ }
+
+ for i, arg := range v.Args {
+ if arg == nil {
+ f.Fatalf("value %s has nil arg", v.LongString())
+ }
+ if v.Op != OpPhi {
+ // For non-Phi ops, memory args must be last, if present
+ if arg.Type.IsMemory() && i != len(v.Args)-1 {
+ f.Fatalf("value %s has non-final memory arg (%d < %d)", v.LongString(), i, len(v.Args)-1)
+ }
+ }
+ }
+
+ if valueMark[v.ID] {
+ f.Fatalf("value %s appears twice!", v.LongString())
+ }
+ valueMark[v.ID] = true
+
+ if v.Block != b {
+ f.Fatalf("%s.block != %s", v, b)
+ }
+ if v.Op == OpPhi && len(v.Args) != len(b.Preds) {
+ f.Fatalf("phi length %s does not match pred length %d for block %s", v.LongString(), len(b.Preds), b)
+ }
+
+ if v.Op == OpAddr {
+ if len(v.Args) == 0 {
+ f.Fatalf("no args for OpAddr %s", v.LongString())
+ }
+ if v.Args[0].Op != OpSB {
+ f.Fatalf("bad arg to OpAddr %v", v)
+ }
+ }
+
+ if v.Op == OpLocalAddr {
+ if len(v.Args) != 2 {
+ f.Fatalf("wrong # of args for OpLocalAddr %s", v.LongString())
+ }
+ if v.Args[0].Op != OpSP {
+ f.Fatalf("bad arg 0 to OpLocalAddr %v", v)
+ }
+ if !v.Args[1].Type.IsMemory() {
+ f.Fatalf("bad arg 1 to OpLocalAddr %v", v)
+ }
+ }
+
+ if f.RegAlloc != nil && f.Config.SoftFloat && v.Type.IsFloat() {
+ f.Fatalf("unexpected floating-point type %v", v.LongString())
+ }
+
+ // Check types.
+ // TODO: more type checks?
+ switch c := f.Config; v.Op {
+ case OpSP, OpSB:
+ if v.Type != c.Types.Uintptr {
+ f.Fatalf("bad %s type: want uintptr, have %s",
+ v.Op, v.Type.String())
+ }
+ case OpStringLen:
+ if v.Type != c.Types.Int {
+ f.Fatalf("bad %s type: want int, have %s",
+ v.Op, v.Type.String())
+ }
+ case OpLoad:
+ if !v.Args[1].Type.IsMemory() {
+ f.Fatalf("bad arg 1 type to %s: want mem, have %s",
+ v.Op, v.Args[1].Type.String())
+ }
+ case OpStore:
+ if !v.Type.IsMemory() {
+ f.Fatalf("bad %s type: want mem, have %s",
+ v.Op, v.Type.String())
+ }
+ if !v.Args[2].Type.IsMemory() {
+ f.Fatalf("bad arg 2 type to %s: want mem, have %s",
+ v.Op, v.Args[2].Type.String())
+ }
+ case OpCondSelect:
+ if !v.Args[2].Type.IsBoolean() {
+ f.Fatalf("bad arg 2 type to %s: want boolean, have %s",
+ v.Op, v.Args[2].Type.String())
+ }
+ case OpAddPtr:
+ if !v.Args[0].Type.IsPtrShaped() && v.Args[0].Type != c.Types.Uintptr {
+ f.Fatalf("bad arg 0 type to %s: want ptr, have %s", v.Op, v.Args[0].LongString())
+ }
+ if !v.Args[1].Type.IsInteger() {
+ f.Fatalf("bad arg 1 type to %s: want integer, have %s", v.Op, v.Args[1].LongString())
+ }
+
+ }
+
+ // TODO: check for cycles in values
+ }
+ }
+
+ // Check to make sure all Blocks referenced are in the function.
+ if !blockMark[f.Entry.ID] {
+ f.Fatalf("entry block %v is missing", f.Entry)
+ }
+ for _, b := range f.Blocks {
+ for _, c := range b.Preds {
+ if !blockMark[c.b.ID] {
+ f.Fatalf("predecessor block %v for %v is missing", c, b)
+ }
+ }
+ for _, c := range b.Succs {
+ if !blockMark[c.b.ID] {
+ f.Fatalf("successor block %v for %v is missing", c, b)
+ }
+ }
+ }
+
+ if len(f.Entry.Preds) > 0 {
+ f.Fatalf("entry block %s of %s has predecessor(s) %v", f.Entry, f.Name, f.Entry.Preds)
+ }
+
+ // Check to make sure all Values referenced are in the function.
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ for i, a := range v.Args {
+ if !valueMark[a.ID] {
+ f.Fatalf("%v, arg %d of %s, is missing", a, i, v.LongString())
+ }
+ }
+ }
+ for _, c := range b.ControlValues() {
+ if !valueMark[c.ID] {
+ f.Fatalf("control value for %s is missing: %v", b, c)
+ }
+ }
+ }
+ for b := f.freeBlocks; b != nil; b = b.succstorage[0].b {
+ if blockMark[b.ID] {
+ f.Fatalf("used block b%d in free list", b.ID)
+ }
+ }
+ for v := f.freeValues; v != nil; v = v.argstorage[0] {
+ if valueMark[v.ID] {
+ f.Fatalf("used value v%d in free list", v.ID)
+ }
+ }
+
+ // Check to make sure all args dominate uses.
+ if f.RegAlloc == nil {
+ // Note: regalloc introduces non-dominating args.
+ // See TODO in regalloc.go.
+ sdom := f.Sdom()
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ for i, arg := range v.Args {
+ x := arg.Block
+ y := b
+ if v.Op == OpPhi {
+ y = b.Preds[i].b
+ }
+ if !domCheck(f, sdom, x, y) {
+ f.Fatalf("arg %d of value %s does not dominate, arg=%s", i, v.LongString(), arg.LongString())
+ }
+ }
+ }
+ for _, c := range b.ControlValues() {
+ if !domCheck(f, sdom, c.Block, b) {
+ f.Fatalf("control value %s for %s doesn't dominate", c, b)
+ }
+ }
+ }
+ }
+
+ // Check loop construction
+ if f.RegAlloc == nil && f.pass != nil { // non-nil pass allows better-targeted debug printing
+ ln := f.loopnest()
+ if !ln.hasIrreducible {
+ po := f.postorder() // use po to avoid unreachable blocks.
+ for _, b := range po {
+ for _, s := range b.Succs {
+ bb := s.Block()
+ if ln.b2l[b.ID] == nil && ln.b2l[bb.ID] != nil && bb != ln.b2l[bb.ID].header {
+ f.Fatalf("block %s not in loop branches to non-header block %s in loop", b.String(), bb.String())
+ }
+ if ln.b2l[b.ID] != nil && ln.b2l[bb.ID] != nil && bb != ln.b2l[bb.ID].header && !ln.b2l[b.ID].isWithinOrEq(ln.b2l[bb.ID]) {
+ f.Fatalf("block %s in loop branches to non-header block %s in non-containing loop", b.String(), bb.String())
+ }
+ }
+ }
+ }
+ }
+
+ // Check use counts
+ uses := make([]int32, f.NumValues())
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ for _, a := range v.Args {
+ uses[a.ID]++
+ }
+ }
+ for _, c := range b.ControlValues() {
+ uses[c.ID]++
+ }
+ }
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ if v.Uses != uses[v.ID] {
+ f.Fatalf("%s has %d uses, but has Uses=%d", v, uses[v.ID], v.Uses)
+ }
+ }
+ }
+
+ memCheck(f)
+}
+
+func memCheck(f *Func) {
+ // Check that if a tuple has a memory type, it is second.
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ if v.Type.IsTuple() && v.Type.FieldType(0).IsMemory() {
+ f.Fatalf("memory is first in a tuple: %s\n", v.LongString())
+ }
+ }
+ }
+
+ // Single live memory checks.
+ // These checks only work if there are no memory copies.
+ // (Memory copies introduce ambiguity about which mem value is really live.
+ // probably fixable, but it's easier to avoid the problem.)
+ // For the same reason, disable this check if some memory ops are unused.
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ if (v.Op == OpCopy || v.Uses == 0) && v.Type.IsMemory() {
+ return
+ }
+ }
+ if b != f.Entry && len(b.Preds) == 0 {
+ return
+ }
+ }
+
+ // Compute live memory at the end of each block.
+ lastmem := make([]*Value, f.NumBlocks())
+ ss := newSparseSet(f.NumValues())
+ for _, b := range f.Blocks {
+ // Mark overwritten memory values. Those are args of other
+ // ops that generate memory values.
+ ss.clear()
+ for _, v := range b.Values {
+ if v.Op == OpPhi || !v.Type.IsMemory() {
+ continue
+ }
+ if m := v.MemoryArg(); m != nil {
+ ss.add(m.ID)
+ }
+ }
+ // There should be at most one remaining unoverwritten memory value.
+ for _, v := range b.Values {
+ if !v.Type.IsMemory() {
+ continue
+ }
+ if ss.contains(v.ID) {
+ continue
+ }
+ if lastmem[b.ID] != nil {
+ f.Fatalf("two live memory values in %s: %s and %s", b, lastmem[b.ID], v)
+ }
+ lastmem[b.ID] = v
+ }
+ // If there is no remaining memory value, that means there was no memory update.
+ // Take any memory arg.
+ if lastmem[b.ID] == nil {
+ for _, v := range b.Values {
+ if v.Op == OpPhi {
+ continue
+ }
+ m := v.MemoryArg()
+ if m == nil {
+ continue
+ }
+ if lastmem[b.ID] != nil && lastmem[b.ID] != m {
+ f.Fatalf("two live memory values in %s: %s and %s", b, lastmem[b.ID], m)
+ }
+ lastmem[b.ID] = m
+ }
+ }
+ }
+ // Propagate last live memory through storeless blocks.
+ for {
+ changed := false
+ for _, b := range f.Blocks {
+ if lastmem[b.ID] != nil {
+ continue
+ }
+ for _, e := range b.Preds {
+ p := e.b
+ if lastmem[p.ID] != nil {
+ lastmem[b.ID] = lastmem[p.ID]
+ changed = true
+ break
+ }
+ }
+ }
+ if !changed {
+ break
+ }
+ }
+ // Check merge points.
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ if v.Op == OpPhi && v.Type.IsMemory() {
+ for i, a := range v.Args {
+ if a != lastmem[b.Preds[i].b.ID] {
+ f.Fatalf("inconsistent memory phi %s %d %s %s", v.LongString(), i, a, lastmem[b.Preds[i].b.ID])
+ }
+ }
+ }
+ }
+ }
+
+ // Check that only one memory is live at any point.
+ if f.scheduled {
+ for _, b := range f.Blocks {
+ var mem *Value // the current live memory in the block
+ for _, v := range b.Values {
+ if v.Op == OpPhi {
+ if v.Type.IsMemory() {
+ mem = v
+ }
+ continue
+ }
+ if mem == nil && len(b.Preds) > 0 {
+ // If no mem phi, take mem of any predecessor.
+ mem = lastmem[b.Preds[0].b.ID]
+ }
+ for _, a := range v.Args {
+ if a.Type.IsMemory() && a != mem {
+ f.Fatalf("two live mems @ %s: %s and %s", v, mem, a)
+ }
+ }
+ if v.Type.IsMemory() {
+ mem = v
+ }
+ }
+ }
+ }
+
+ // Check that after scheduling, phis are always first in the block.
+ if f.scheduled {
+ for _, b := range f.Blocks {
+ seenNonPhi := false
+ for _, v := range b.Values {
+ switch v.Op {
+ case OpPhi:
+ if seenNonPhi {
+ f.Fatalf("phi after non-phi @ %s: %s", b, v)
+ }
+ default:
+ seenNonPhi = true
+ }
+ }
+ }
+ }
+}
+
+// domCheck reports whether x dominates y (including x==y).
+func domCheck(f *Func, sdom SparseTree, x, y *Block) bool {
+ if !sdom.IsAncestorEq(f.Entry, y) {
+ // unreachable - ignore
+ return true
+ }
+ return sdom.IsAncestorEq(x, y)
+}
+
+// isExactFloat32 reports whether x can be exactly represented as a float32.
+func isExactFloat32(x float64) bool {
+ // Check the mantissa is in range.
+ if bits.TrailingZeros64(math.Float64bits(x)) < 52-23 {
+ return false
+ }
+ // Check the exponent is in range. The mantissa check above is sufficient for NaN values.
+ return math.IsNaN(x) || x == float64(float32(x))
+}
diff --git a/src/cmd/compile/internal/ssa/checkbce.go b/src/cmd/compile/internal/ssa/checkbce.go
new file mode 100644
index 0000000..6a9ce2b
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/checkbce.go
@@ -0,0 +1,35 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import "cmd/compile/internal/logopt"
+
+// checkbce prints all bounds checks that are present in the function.
+// Useful to find regressions. checkbce is only activated when with
+// corresponding debug options, so it's off by default.
+// See test/checkbce.go
+func checkbce(f *Func) {
+ if f.pass.debug <= 0 && !logopt.Enabled() {
+ return
+ }
+
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ if v.Op == OpIsInBounds || v.Op == OpIsSliceInBounds {
+ if f.pass.debug > 0 {
+ f.Warnl(v.Pos, "Found %v", v.Op)
+ }
+ if logopt.Enabled() {
+ if v.Op == OpIsInBounds {
+ logopt.LogOpt(v.Pos, "isInBounds", "checkbce", f.Name)
+ }
+ if v.Op == OpIsSliceInBounds {
+ logopt.LogOpt(v.Pos, "isSliceInBounds", "checkbce", f.Name)
+ }
+ }
+ }
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/compile.go b/src/cmd/compile/internal/ssa/compile.go
new file mode 100644
index 0000000..f87ea5b
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/compile.go
@@ -0,0 +1,604 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "bytes"
+ "cmd/internal/src"
+ "fmt"
+ "hash/crc32"
+ "internal/buildcfg"
+ "io"
+ "log"
+ "math/rand"
+ "os"
+ "path/filepath"
+ "regexp"
+ "runtime"
+ "sort"
+ "strings"
+ "time"
+)
+
+// Compile is the main entry point for this package.
+// Compile modifies f so that on return:
+// · all Values in f map to 0 or 1 assembly instructions of the target architecture
+// · the order of f.Blocks is the order to emit the Blocks
+// · the order of b.Values is the order to emit the Values in each Block
+// · f has a non-nil regAlloc field
+func Compile(f *Func) {
+ // TODO: debugging - set flags to control verbosity of compiler,
+ // which phases to dump IR before/after, etc.
+ if f.Log() {
+ f.Logf("compiling %s\n", f.Name)
+ }
+
+ var rnd *rand.Rand
+ if checkEnabled {
+ seed := int64(crc32.ChecksumIEEE(([]byte)(f.Name))) ^ int64(checkRandSeed)
+ rnd = rand.New(rand.NewSource(seed))
+ }
+
+ // hook to print function & phase if panic happens
+ phaseName := "init"
+ defer func() {
+ if phaseName != "" {
+ err := recover()
+ stack := make([]byte, 16384)
+ n := runtime.Stack(stack, false)
+ stack = stack[:n]
+ if f.HTMLWriter != nil {
+ f.HTMLWriter.flushPhases()
+ }
+ f.Fatalf("panic during %s while compiling %s:\n\n%v\n\n%s\n", phaseName, f.Name, err, stack)
+ }
+ }()
+
+ // Run all the passes
+ if f.Log() {
+ printFunc(f)
+ }
+ f.HTMLWriter.WritePhase("start", "start")
+ if BuildDump[f.Name] {
+ f.dumpFile("build")
+ }
+ if checkEnabled {
+ checkFunc(f)
+ }
+ const logMemStats = false
+ for _, p := range passes {
+ if !f.Config.optimize && !p.required || p.disabled {
+ continue
+ }
+ f.pass = &p
+ phaseName = p.name
+ if f.Log() {
+ f.Logf(" pass %s begin\n", p.name)
+ }
+ // TODO: capture logging during this pass, add it to the HTML
+ var mStart runtime.MemStats
+ if logMemStats || p.mem {
+ runtime.ReadMemStats(&mStart)
+ }
+
+ if checkEnabled && !f.scheduled {
+ // Test that we don't depend on the value order, by randomizing
+ // the order of values in each block. See issue 18169.
+ for _, b := range f.Blocks {
+ for i := 0; i < len(b.Values)-1; i++ {
+ j := i + rnd.Intn(len(b.Values)-i)
+ b.Values[i], b.Values[j] = b.Values[j], b.Values[i]
+ }
+ }
+ }
+
+ tStart := time.Now()
+ p.fn(f)
+ tEnd := time.Now()
+
+ // Need something less crude than "Log the whole intermediate result".
+ if f.Log() || f.HTMLWriter != nil {
+ time := tEnd.Sub(tStart).Nanoseconds()
+ var stats string
+ if logMemStats {
+ var mEnd runtime.MemStats
+ runtime.ReadMemStats(&mEnd)
+ nBytes := mEnd.TotalAlloc - mStart.TotalAlloc
+ nAllocs := mEnd.Mallocs - mStart.Mallocs
+ stats = fmt.Sprintf("[%d ns %d allocs %d bytes]", time, nAllocs, nBytes)
+ } else {
+ stats = fmt.Sprintf("[%d ns]", time)
+ }
+
+ if f.Log() {
+ f.Logf(" pass %s end %s\n", p.name, stats)
+ printFunc(f)
+ }
+ f.HTMLWriter.WritePhase(phaseName, fmt.Sprintf("%s <span class=\"stats\">%s</span>", phaseName, stats))
+ }
+ if p.time || p.mem {
+ // Surround timing information w/ enough context to allow comparisons.
+ time := tEnd.Sub(tStart).Nanoseconds()
+ if p.time {
+ f.LogStat("TIME(ns)", time)
+ }
+ if p.mem {
+ var mEnd runtime.MemStats
+ runtime.ReadMemStats(&mEnd)
+ nBytes := mEnd.TotalAlloc - mStart.TotalAlloc
+ nAllocs := mEnd.Mallocs - mStart.Mallocs
+ f.LogStat("TIME(ns):BYTES:ALLOCS", time, nBytes, nAllocs)
+ }
+ }
+ if p.dump != nil && p.dump[f.Name] {
+ // Dump function to appropriately named file
+ f.dumpFile(phaseName)
+ }
+ if checkEnabled {
+ checkFunc(f)
+ }
+ }
+
+ if f.HTMLWriter != nil {
+ // Ensure we write any pending phases to the html
+ f.HTMLWriter.flushPhases()
+ }
+
+ if f.ruleMatches != nil {
+ var keys []string
+ for key := range f.ruleMatches {
+ keys = append(keys, key)
+ }
+ sort.Strings(keys)
+ buf := new(bytes.Buffer)
+ fmt.Fprintf(buf, "%s: ", f.Name)
+ for _, key := range keys {
+ fmt.Fprintf(buf, "%s=%d ", key, f.ruleMatches[key])
+ }
+ fmt.Fprint(buf, "\n")
+ fmt.Print(buf.String())
+ }
+
+ // Squash error printing defer
+ phaseName = ""
+}
+
+// DumpFileForPhase creates a file from the function name and phase name,
+// warning and returning nil if this is not possible.
+func (f *Func) DumpFileForPhase(phaseName string) io.WriteCloser {
+ f.dumpFileSeq++
+ fname := fmt.Sprintf("%s_%02d__%s.dump", f.Name, int(f.dumpFileSeq), phaseName)
+ fname = strings.Replace(fname, " ", "_", -1)
+ fname = strings.Replace(fname, "/", "_", -1)
+ fname = strings.Replace(fname, ":", "_", -1)
+
+ if ssaDir := os.Getenv("GOSSADIR"); ssaDir != "" {
+ fname = filepath.Join(ssaDir, fname)
+ }
+
+ fi, err := os.Create(fname)
+ if err != nil {
+ f.Warnl(src.NoXPos, "Unable to create after-phase dump file %s", fname)
+ return nil
+ }
+ return fi
+}
+
+// dumpFile creates a file from the phase name and function name
+// Dumping is done to files to avoid buffering huge strings before
+// output.
+func (f *Func) dumpFile(phaseName string) {
+ fi := f.DumpFileForPhase(phaseName)
+ if fi != nil {
+ p := stringFuncPrinter{w: fi}
+ fprintFunc(p, f)
+ fi.Close()
+ }
+}
+
+type pass struct {
+ name string
+ fn func(*Func)
+ required bool
+ disabled bool
+ time bool // report time to run pass
+ mem bool // report mem stats to run pass
+ stats int // pass reports own "stats" (e.g., branches removed)
+ debug int // pass performs some debugging. =1 should be in error-testing-friendly Warnl format.
+ test int // pass-specific ad-hoc option, perhaps useful in development
+ dump map[string]bool // dump if function name matches
+}
+
+func (p *pass) addDump(s string) {
+ if p.dump == nil {
+ p.dump = make(map[string]bool)
+ }
+ p.dump[s] = true
+}
+
+func (p *pass) String() string {
+ if p == nil {
+ return "nil pass"
+ }
+ return p.name
+}
+
+// Run consistency checker between each phase
+var (
+ checkEnabled = false
+ checkRandSeed = 0
+)
+
+// Debug output
+var IntrinsicsDebug int
+var IntrinsicsDisable bool
+
+var BuildDebug int
+var BuildTest int
+var BuildStats int
+var BuildDump map[string]bool = make(map[string]bool) // names of functions to dump after initial build of ssa
+
+var GenssaDump map[string]bool = make(map[string]bool) // names of functions to dump after ssa has been converted to asm
+
+// PhaseOption sets the specified flag in the specified ssa phase,
+// returning empty string if this was successful or a string explaining
+// the error if it was not.
+// A version of the phase name with "_" replaced by " " is also checked for a match.
+// If the phase name begins a '~' then the rest of the underscores-replaced-with-blanks
+// version is used as a regular expression to match the phase name(s).
+//
+// Special cases that have turned out to be useful:
+// ssa/check/on enables checking after each phase
+// ssa/all/time enables time reporting for all phases
+//
+// See gc/lex.go for dissection of the option string.
+// Example uses:
+//
+// GO_GCFLAGS=-d=ssa/generic_cse/time,ssa/generic_cse/stats,ssa/generic_cse/debug=3 ./make.bash
+//
+// BOOT_GO_GCFLAGS=-d='ssa/~^.*scc$/off' GO_GCFLAGS='-d=ssa/~^.*scc$/off' ./make.bash
+//
+func PhaseOption(phase, flag string, val int, valString string) string {
+ switch phase {
+ case "", "help":
+ lastcr := 0
+ phasenames := " check, all, build, intrinsics, genssa"
+ for _, p := range passes {
+ pn := strings.Replace(p.name, " ", "_", -1)
+ if len(pn)+len(phasenames)-lastcr > 70 {
+ phasenames += "\n "
+ lastcr = len(phasenames)
+ phasenames += pn
+ } else {
+ phasenames += ", " + pn
+ }
+ }
+ return `PhaseOptions usage:
+
+ go tool compile -d=ssa/<phase>/<flag>[=<value>|<function_name>]
+
+where:
+
+- <phase> is one of:
+` + phasenames + `
+
+- <flag> is one of:
+ on, off, debug, mem, time, test, stats, dump, seed
+
+- <value> defaults to 1
+
+- <function_name> is required for the "dump" flag, and specifies the
+ name of function to dump after <phase>
+
+Phase "all" supports flags "time", "mem", and "dump".
+Phase "intrinsics" supports flags "on", "off", and "debug".
+Phase "genssa" (assembly generation) supports the flag "dump".
+
+If the "dump" flag is specified, the output is written on a file named
+<phase>__<function_name>_<seq>.dump; otherwise it is directed to stdout.
+
+Examples:
+
+ -d=ssa/check/on
+enables checking after each phase
+
+ -d=ssa/check/seed=1234
+enables checking after each phase, using 1234 to seed the PRNG
+used for value order randomization
+
+ -d=ssa/all/time
+enables time reporting for all phases
+
+ -d=ssa/prove/debug=2
+sets debugging level to 2 in the prove pass
+
+Be aware that when "/debug=X" is applied to a pass, some passes
+will emit debug output for all functions, and other passes will
+only emit debug output for functions that match the current
+GOSSAFUNC value.
+
+Multiple flags can be passed at once, by separating them with
+commas. For example:
+
+ -d=ssa/check/on,ssa/all/time
+`
+ }
+
+ if phase == "check" {
+ switch flag {
+ case "on":
+ checkEnabled = val != 0
+ debugPoset = checkEnabled // also turn on advanced self-checking in prove's datastructure
+ return ""
+ case "off":
+ checkEnabled = val == 0
+ debugPoset = checkEnabled
+ return ""
+ case "seed":
+ checkEnabled = true
+ checkRandSeed = val
+ debugPoset = checkEnabled
+ return ""
+ }
+ }
+
+ alltime := false
+ allmem := false
+ alldump := false
+ if phase == "all" {
+ switch flag {
+ case "time":
+ alltime = val != 0
+ case "mem":
+ allmem = val != 0
+ case "dump":
+ alldump = val != 0
+ if alldump {
+ BuildDump[valString] = true
+ GenssaDump[valString] = true
+ }
+ default:
+ return fmt.Sprintf("Did not find a flag matching %s in -d=ssa/%s debug option (expected ssa/all/{time,mem,dump=function_name})", flag, phase)
+ }
+ }
+
+ if phase == "intrinsics" {
+ switch flag {
+ case "on":
+ IntrinsicsDisable = val == 0
+ case "off":
+ IntrinsicsDisable = val != 0
+ case "debug":
+ IntrinsicsDebug = val
+ default:
+ return fmt.Sprintf("Did not find a flag matching %s in -d=ssa/%s debug option (expected ssa/intrinsics/{on,off,debug})", flag, phase)
+ }
+ return ""
+ }
+ if phase == "build" {
+ switch flag {
+ case "debug":
+ BuildDebug = val
+ case "test":
+ BuildTest = val
+ case "stats":
+ BuildStats = val
+ case "dump":
+ BuildDump[valString] = true
+ default:
+ return fmt.Sprintf("Did not find a flag matching %s in -d=ssa/%s debug option (expected ssa/build/{debug,test,stats,dump=function_name})", flag, phase)
+ }
+ return ""
+ }
+ if phase == "genssa" {
+ switch flag {
+ case "dump":
+ GenssaDump[valString] = true
+ default:
+ return fmt.Sprintf("Did not find a flag matching %s in -d=ssa/%s debug option (expected ssa/genssa/dump=function_name)", flag, phase)
+ }
+ return ""
+ }
+
+ underphase := strings.Replace(phase, "_", " ", -1)
+ var re *regexp.Regexp
+ if phase[0] == '~' {
+ r, ok := regexp.Compile(underphase[1:])
+ if ok != nil {
+ return fmt.Sprintf("Error %s in regexp for phase %s, flag %s", ok.Error(), phase, flag)
+ }
+ re = r
+ }
+ matchedOne := false
+ for i, p := range passes {
+ if phase == "all" {
+ p.time = alltime
+ p.mem = allmem
+ if alldump {
+ p.addDump(valString)
+ }
+ passes[i] = p
+ matchedOne = true
+ } else if p.name == phase || p.name == underphase || re != nil && re.MatchString(p.name) {
+ switch flag {
+ case "on":
+ p.disabled = val == 0
+ case "off":
+ p.disabled = val != 0
+ case "time":
+ p.time = val != 0
+ case "mem":
+ p.mem = val != 0
+ case "debug":
+ p.debug = val
+ case "stats":
+ p.stats = val
+ case "test":
+ p.test = val
+ case "dump":
+ p.addDump(valString)
+ default:
+ return fmt.Sprintf("Did not find a flag matching %s in -d=ssa/%s debug option", flag, phase)
+ }
+ if p.disabled && p.required {
+ return fmt.Sprintf("Cannot disable required SSA phase %s using -d=ssa/%s debug option", phase, phase)
+ }
+ passes[i] = p
+ matchedOne = true
+ }
+ }
+ if matchedOne {
+ return ""
+ }
+ return fmt.Sprintf("Did not find a phase matching %s in -d=ssa/... debug option", phase)
+}
+
+// list of passes for the compiler
+var passes = [...]pass{
+ // TODO: combine phielim and copyelim into a single pass?
+ {name: "number lines", fn: numberLines, required: true},
+ {name: "early phielim", fn: phielim},
+ {name: "early copyelim", fn: copyelim},
+ {name: "early deadcode", fn: deadcode}, // remove generated dead code to avoid doing pointless work during opt
+ {name: "short circuit", fn: shortcircuit},
+ {name: "decompose user", fn: decomposeUser, required: true},
+ {name: "pre-opt deadcode", fn: deadcode},
+ {name: "opt", fn: opt, required: true}, // NB: some generic rules know the name of the opt pass. TODO: split required rules and optimizing rules
+ {name: "zero arg cse", fn: zcse, required: true}, // required to merge OpSB values
+ {name: "opt deadcode", fn: deadcode, required: true}, // remove any blocks orphaned during opt
+ {name: "generic cse", fn: cse},
+ {name: "phiopt", fn: phiopt},
+ {name: "gcse deadcode", fn: deadcode, required: true}, // clean out after cse and phiopt
+ {name: "nilcheckelim", fn: nilcheckelim},
+ {name: "prove", fn: prove},
+ {name: "early fuse", fn: fuseEarly},
+ {name: "decompose builtin", fn: decomposeBuiltIn, required: true},
+ {name: "expand calls", fn: expandCalls, required: true},
+ {name: "softfloat", fn: softfloat, required: true},
+ {name: "late opt", fn: opt, required: true}, // TODO: split required rules and optimizing rules
+ {name: "dead auto elim", fn: elimDeadAutosGeneric},
+ {name: "generic deadcode", fn: deadcode, required: true}, // remove dead stores, which otherwise mess up store chain
+ {name: "check bce", fn: checkbce},
+ {name: "branchelim", fn: branchelim},
+ {name: "late fuse", fn: fuseLate},
+ {name: "dse", fn: dse},
+ {name: "writebarrier", fn: writebarrier, required: true}, // expand write barrier ops
+ {name: "insert resched checks", fn: insertLoopReschedChecks,
+ disabled: !buildcfg.Experiment.PreemptibleLoops}, // insert resched checks in loops.
+ {name: "lower", fn: lower, required: true},
+ {name: "addressing modes", fn: addressingModes, required: false},
+ {name: "lowered deadcode for cse", fn: deadcode}, // deadcode immediately before CSE avoids CSE making dead values live again
+ {name: "lowered cse", fn: cse},
+ {name: "elim unread autos", fn: elimUnreadAutos},
+ {name: "tighten tuple selectors", fn: tightenTupleSelectors, required: true},
+ {name: "lowered deadcode", fn: deadcode, required: true},
+ {name: "checkLower", fn: checkLower, required: true},
+ {name: "late phielim", fn: phielim},
+ {name: "late copyelim", fn: copyelim},
+ {name: "tighten", fn: tighten}, // move values closer to their uses
+ {name: "late deadcode", fn: deadcode},
+ {name: "critical", fn: critical, required: true}, // remove critical edges
+ {name: "phi tighten", fn: phiTighten}, // place rematerializable phi args near uses to reduce value lifetimes
+ {name: "likelyadjust", fn: likelyadjust},
+ {name: "layout", fn: layout, required: true}, // schedule blocks
+ {name: "schedule", fn: schedule, required: true}, // schedule values
+ {name: "late nilcheck", fn: nilcheckelim2},
+ {name: "flagalloc", fn: flagalloc, required: true}, // allocate flags register
+ {name: "regalloc", fn: regalloc, required: true}, // allocate int & float registers + stack slots
+ {name: "loop rotate", fn: loopRotate},
+ {name: "stackframe", fn: stackframe, required: true},
+ {name: "trim", fn: trim}, // remove empty blocks
+}
+
+// Double-check phase ordering constraints.
+// This code is intended to document the ordering requirements
+// between different phases. It does not override the passes
+// list above.
+type constraint struct {
+ a, b string // a must come before b
+}
+
+var passOrder = [...]constraint{
+ // "insert resched checks" uses mem, better to clean out stores first.
+ {"dse", "insert resched checks"},
+ // insert resched checks adds new blocks containing generic instructions
+ {"insert resched checks", "lower"},
+ {"insert resched checks", "tighten"},
+
+ // prove relies on common-subexpression elimination for maximum benefits.
+ {"generic cse", "prove"},
+ // deadcode after prove to eliminate all new dead blocks.
+ {"prove", "generic deadcode"},
+ // common-subexpression before dead-store elim, so that we recognize
+ // when two address expressions are the same.
+ {"generic cse", "dse"},
+ // cse substantially improves nilcheckelim efficacy
+ {"generic cse", "nilcheckelim"},
+ // allow deadcode to clean up after nilcheckelim
+ {"nilcheckelim", "generic deadcode"},
+ // nilcheckelim generates sequences of plain basic blocks
+ {"nilcheckelim", "late fuse"},
+ // nilcheckelim relies on opt to rewrite user nil checks
+ {"opt", "nilcheckelim"},
+ // tighten will be most effective when as many values have been removed as possible
+ {"generic deadcode", "tighten"},
+ {"generic cse", "tighten"},
+ // checkbce needs the values removed
+ {"generic deadcode", "check bce"},
+ // don't run optimization pass until we've decomposed builtin objects
+ {"decompose builtin", "late opt"},
+ // decompose builtin is the last pass that may introduce new float ops, so run softfloat after it
+ {"decompose builtin", "softfloat"},
+ // tuple selectors must be tightened to generators and de-duplicated before scheduling
+ {"tighten tuple selectors", "schedule"},
+ // remove critical edges before phi tighten, so that phi args get better placement
+ {"critical", "phi tighten"},
+ // don't layout blocks until critical edges have been removed
+ {"critical", "layout"},
+ // regalloc requires the removal of all critical edges
+ {"critical", "regalloc"},
+ // regalloc requires all the values in a block to be scheduled
+ {"schedule", "regalloc"},
+ // checkLower must run after lowering & subsequent dead code elim
+ {"lower", "checkLower"},
+ {"lowered deadcode", "checkLower"},
+ // late nilcheck needs instructions to be scheduled.
+ {"schedule", "late nilcheck"},
+ // flagalloc needs instructions to be scheduled.
+ {"schedule", "flagalloc"},
+ // regalloc needs flags to be allocated first.
+ {"flagalloc", "regalloc"},
+ // loopRotate will confuse regalloc.
+ {"regalloc", "loop rotate"},
+ // stackframe needs to know about spilled registers.
+ {"regalloc", "stackframe"},
+ // trim needs regalloc to be done first.
+ {"regalloc", "trim"},
+}
+
+func init() {
+ for _, c := range passOrder {
+ a, b := c.a, c.b
+ i := -1
+ j := -1
+ for k, p := range passes {
+ if p.name == a {
+ i = k
+ }
+ if p.name == b {
+ j = k
+ }
+ }
+ if i < 0 {
+ log.Panicf("pass %s not found", a)
+ }
+ if j < 0 {
+ log.Panicf("pass %s not found", b)
+ }
+ if i >= j {
+ log.Panicf("passes %s and %s out of order", a, b)
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/config.go b/src/cmd/compile/internal/ssa/config.go
new file mode 100644
index 0000000..5ab7240
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/config.go
@@ -0,0 +1,369 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/compile/internal/abi"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/src"
+ "internal/buildcfg"
+)
+
+// A Config holds readonly compilation information.
+// It is created once, early during compilation,
+// and shared across all compilations.
+type Config struct {
+ arch string // "amd64", etc.
+ PtrSize int64 // 4 or 8; copy of cmd/internal/sys.Arch.PtrSize
+ RegSize int64 // 4 or 8; copy of cmd/internal/sys.Arch.RegSize
+ Types Types
+ lowerBlock blockRewriter // lowering function
+ lowerValue valueRewriter // lowering function
+ splitLoad valueRewriter // function for splitting merged load ops; only used on some architectures
+ registers []Register // machine registers
+ gpRegMask regMask // general purpose integer register mask
+ fpRegMask regMask // floating point register mask
+ fp32RegMask regMask // floating point register mask
+ fp64RegMask regMask // floating point register mask
+ specialRegMask regMask // special register mask
+ intParamRegs []int8 // register numbers of integer param (in/out) registers
+ floatParamRegs []int8 // register numbers of floating param (in/out) registers
+ ABI1 *abi.ABIConfig // "ABIInternal" under development // TODO change comment when this becomes current
+ ABI0 *abi.ABIConfig
+ GCRegMap []*Register // garbage collector register map, by GC register index
+ FPReg int8 // register number of frame pointer, -1 if not used
+ LinkReg int8 // register number of link register if it is a general purpose register, -1 if not used
+ hasGReg bool // has hardware g register
+ ctxt *obj.Link // Generic arch information
+ optimize bool // Do optimization
+ noDuffDevice bool // Don't use Duff's device
+ useSSE bool // Use SSE for non-float operations
+ useAvg bool // Use optimizations that need Avg* operations
+ useHmul bool // Use optimizations that need Hmul* operations
+ SoftFloat bool //
+ Race bool // race detector enabled
+ BigEndian bool //
+ UseFMA bool // Use hardware FMA operation
+}
+
+type (
+ blockRewriter func(*Block) bool
+ valueRewriter func(*Value) bool
+)
+
+type Types struct {
+ Bool *types.Type
+ Int8 *types.Type
+ Int16 *types.Type
+ Int32 *types.Type
+ Int64 *types.Type
+ UInt8 *types.Type
+ UInt16 *types.Type
+ UInt32 *types.Type
+ UInt64 *types.Type
+ Int *types.Type
+ Float32 *types.Type
+ Float64 *types.Type
+ UInt *types.Type
+ Uintptr *types.Type
+ String *types.Type
+ BytePtr *types.Type // TODO: use unsafe.Pointer instead?
+ Int32Ptr *types.Type
+ UInt32Ptr *types.Type
+ IntPtr *types.Type
+ UintptrPtr *types.Type
+ Float32Ptr *types.Type
+ Float64Ptr *types.Type
+ BytePtrPtr *types.Type
+}
+
+// NewTypes creates and populates a Types.
+func NewTypes() *Types {
+ t := new(Types)
+ t.SetTypPtrs()
+ return t
+}
+
+// SetTypPtrs populates t.
+func (t *Types) SetTypPtrs() {
+ t.Bool = types.Types[types.TBOOL]
+ t.Int8 = types.Types[types.TINT8]
+ t.Int16 = types.Types[types.TINT16]
+ t.Int32 = types.Types[types.TINT32]
+ t.Int64 = types.Types[types.TINT64]
+ t.UInt8 = types.Types[types.TUINT8]
+ t.UInt16 = types.Types[types.TUINT16]
+ t.UInt32 = types.Types[types.TUINT32]
+ t.UInt64 = types.Types[types.TUINT64]
+ t.Int = types.Types[types.TINT]
+ t.Float32 = types.Types[types.TFLOAT32]
+ t.Float64 = types.Types[types.TFLOAT64]
+ t.UInt = types.Types[types.TUINT]
+ t.Uintptr = types.Types[types.TUINTPTR]
+ t.String = types.Types[types.TSTRING]
+ t.BytePtr = types.NewPtr(types.Types[types.TUINT8])
+ t.Int32Ptr = types.NewPtr(types.Types[types.TINT32])
+ t.UInt32Ptr = types.NewPtr(types.Types[types.TUINT32])
+ t.IntPtr = types.NewPtr(types.Types[types.TINT])
+ t.UintptrPtr = types.NewPtr(types.Types[types.TUINTPTR])
+ t.Float32Ptr = types.NewPtr(types.Types[types.TFLOAT32])
+ t.Float64Ptr = types.NewPtr(types.Types[types.TFLOAT64])
+ t.BytePtrPtr = types.NewPtr(types.NewPtr(types.Types[types.TUINT8]))
+}
+
+type Logger interface {
+ // Logf logs a message from the compiler.
+ Logf(string, ...interface{})
+
+ // Log reports whether logging is not a no-op
+ // some logging calls account for more than a few heap allocations.
+ Log() bool
+
+ // Fatal reports a compiler error and exits.
+ Fatalf(pos src.XPos, msg string, args ...interface{})
+
+ // Warnl writes compiler messages in the form expected by "errorcheck" tests
+ Warnl(pos src.XPos, fmt_ string, args ...interface{})
+
+ // Forwards the Debug flags from gc
+ Debug_checknil() bool
+}
+
+type Frontend interface {
+ CanSSA(t *types.Type) bool
+
+ Logger
+
+ // StringData returns a symbol pointing to the given string's contents.
+ StringData(string) *obj.LSym
+
+ // Auto returns a Node for an auto variable of the given type.
+ // The SSA compiler uses this function to allocate space for spills.
+ Auto(src.XPos, *types.Type) *ir.Name
+
+ // Given the name for a compound type, returns the name we should use
+ // for the parts of that compound type.
+ SplitSlot(parent *LocalSlot, suffix string, offset int64, t *types.Type) LocalSlot
+
+ // Line returns a string describing the given position.
+ Line(src.XPos) string
+
+ // AllocFrame assigns frame offsets to all live auto variables.
+ AllocFrame(f *Func)
+
+ // Syslook returns a symbol of the runtime function/variable with the
+ // given name.
+ Syslook(string) *obj.LSym
+
+ // UseWriteBarrier reports whether write barrier is enabled
+ UseWriteBarrier() bool
+
+ // SetWBPos indicates that a write barrier has been inserted
+ // in this function at position pos.
+ SetWBPos(pos src.XPos)
+
+ // MyImportPath provides the import name (roughly, the package) for the function being compiled.
+ MyImportPath() string
+}
+
+// NewConfig returns a new configuration object for the given architecture.
+func NewConfig(arch string, types Types, ctxt *obj.Link, optimize, softfloat bool) *Config {
+ c := &Config{arch: arch, Types: types}
+ c.useAvg = true
+ c.useHmul = true
+ switch arch {
+ case "amd64":
+ c.PtrSize = 8
+ c.RegSize = 8
+ c.lowerBlock = rewriteBlockAMD64
+ c.lowerValue = rewriteValueAMD64
+ c.splitLoad = rewriteValueAMD64splitload
+ c.registers = registersAMD64[:]
+ c.gpRegMask = gpRegMaskAMD64
+ c.fpRegMask = fpRegMaskAMD64
+ c.specialRegMask = specialRegMaskAMD64
+ c.intParamRegs = paramIntRegAMD64
+ c.floatParamRegs = paramFloatRegAMD64
+ c.FPReg = framepointerRegAMD64
+ c.LinkReg = linkRegAMD64
+ c.hasGReg = true
+ case "386":
+ c.PtrSize = 4
+ c.RegSize = 4
+ c.lowerBlock = rewriteBlock386
+ c.lowerValue = rewriteValue386
+ c.splitLoad = rewriteValue386splitload
+ c.registers = registers386[:]
+ c.gpRegMask = gpRegMask386
+ c.fpRegMask = fpRegMask386
+ c.FPReg = framepointerReg386
+ c.LinkReg = linkReg386
+ c.hasGReg = false
+ case "arm":
+ c.PtrSize = 4
+ c.RegSize = 4
+ c.lowerBlock = rewriteBlockARM
+ c.lowerValue = rewriteValueARM
+ c.registers = registersARM[:]
+ c.gpRegMask = gpRegMaskARM
+ c.fpRegMask = fpRegMaskARM
+ c.FPReg = framepointerRegARM
+ c.LinkReg = linkRegARM
+ c.hasGReg = true
+ case "arm64":
+ c.PtrSize = 8
+ c.RegSize = 8
+ c.lowerBlock = rewriteBlockARM64
+ c.lowerValue = rewriteValueARM64
+ c.registers = registersARM64[:]
+ c.gpRegMask = gpRegMaskARM64
+ c.fpRegMask = fpRegMaskARM64
+ c.intParamRegs = paramIntRegARM64
+ c.floatParamRegs = paramFloatRegARM64
+ c.FPReg = framepointerRegARM64
+ c.LinkReg = linkRegARM64
+ c.hasGReg = true
+ c.noDuffDevice = buildcfg.GOOS == "darwin" || buildcfg.GOOS == "ios" // darwin linker cannot handle BR26 reloc with non-zero addend
+ case "ppc64":
+ c.BigEndian = true
+ fallthrough
+ case "ppc64le":
+ c.PtrSize = 8
+ c.RegSize = 8
+ c.lowerBlock = rewriteBlockPPC64
+ c.lowerValue = rewriteValuePPC64
+ c.registers = registersPPC64[:]
+ c.gpRegMask = gpRegMaskPPC64
+ c.fpRegMask = fpRegMaskPPC64
+ c.intParamRegs = paramIntRegPPC64
+ c.floatParamRegs = paramFloatRegPPC64
+ c.FPReg = framepointerRegPPC64
+ c.LinkReg = linkRegPPC64
+ c.hasGReg = true
+ case "mips64":
+ c.BigEndian = true
+ fallthrough
+ case "mips64le":
+ c.PtrSize = 8
+ c.RegSize = 8
+ c.lowerBlock = rewriteBlockMIPS64
+ c.lowerValue = rewriteValueMIPS64
+ c.registers = registersMIPS64[:]
+ c.gpRegMask = gpRegMaskMIPS64
+ c.fpRegMask = fpRegMaskMIPS64
+ c.specialRegMask = specialRegMaskMIPS64
+ c.FPReg = framepointerRegMIPS64
+ c.LinkReg = linkRegMIPS64
+ c.hasGReg = true
+ case "s390x":
+ c.PtrSize = 8
+ c.RegSize = 8
+ c.lowerBlock = rewriteBlockS390X
+ c.lowerValue = rewriteValueS390X
+ c.registers = registersS390X[:]
+ c.gpRegMask = gpRegMaskS390X
+ c.fpRegMask = fpRegMaskS390X
+ c.FPReg = framepointerRegS390X
+ c.LinkReg = linkRegS390X
+ c.hasGReg = true
+ c.noDuffDevice = true
+ c.BigEndian = true
+ case "mips":
+ c.BigEndian = true
+ fallthrough
+ case "mipsle":
+ c.PtrSize = 4
+ c.RegSize = 4
+ c.lowerBlock = rewriteBlockMIPS
+ c.lowerValue = rewriteValueMIPS
+ c.registers = registersMIPS[:]
+ c.gpRegMask = gpRegMaskMIPS
+ c.fpRegMask = fpRegMaskMIPS
+ c.specialRegMask = specialRegMaskMIPS
+ c.FPReg = framepointerRegMIPS
+ c.LinkReg = linkRegMIPS
+ c.hasGReg = true
+ c.noDuffDevice = true
+ case "riscv64":
+ c.PtrSize = 8
+ c.RegSize = 8
+ c.lowerBlock = rewriteBlockRISCV64
+ c.lowerValue = rewriteValueRISCV64
+ c.registers = registersRISCV64[:]
+ c.gpRegMask = gpRegMaskRISCV64
+ c.fpRegMask = fpRegMaskRISCV64
+ c.FPReg = framepointerRegRISCV64
+ c.hasGReg = true
+ case "wasm":
+ c.PtrSize = 8
+ c.RegSize = 8
+ c.lowerBlock = rewriteBlockWasm
+ c.lowerValue = rewriteValueWasm
+ c.registers = registersWasm[:]
+ c.gpRegMask = gpRegMaskWasm
+ c.fpRegMask = fpRegMaskWasm
+ c.fp32RegMask = fp32RegMaskWasm
+ c.fp64RegMask = fp64RegMaskWasm
+ c.FPReg = framepointerRegWasm
+ c.LinkReg = linkRegWasm
+ c.hasGReg = true
+ c.noDuffDevice = true
+ c.useAvg = false
+ c.useHmul = false
+ default:
+ ctxt.Diag("arch %s not implemented", arch)
+ }
+ c.ctxt = ctxt
+ c.optimize = optimize
+ c.useSSE = true
+ c.UseFMA = true
+ c.SoftFloat = softfloat
+ if softfloat {
+ c.floatParamRegs = nil // no FP registers in softfloat mode
+ }
+
+ c.ABI0 = abi.NewABIConfig(0, 0, ctxt.FixedFrameSize())
+ c.ABI1 = abi.NewABIConfig(len(c.intParamRegs), len(c.floatParamRegs), ctxt.FixedFrameSize())
+
+ // On Plan 9, floating point operations are not allowed in note handler.
+ if buildcfg.GOOS == "plan9" {
+ // Don't use FMA on Plan 9
+ c.UseFMA = false
+
+ // Don't use Duff's device and SSE on Plan 9 AMD64.
+ if arch == "amd64" {
+ c.noDuffDevice = true
+ c.useSSE = false
+ }
+ }
+
+ if ctxt.Flag_shared {
+ // LoweredWB is secretly a CALL and CALLs on 386 in
+ // shared mode get rewritten by obj6.go to go through
+ // the GOT, which clobbers BX.
+ opcodeTable[Op386LoweredWB].reg.clobbers |= 1 << 3 // BX
+ }
+
+ // Create the GC register map index.
+ // TODO: This is only used for debug printing. Maybe export config.registers?
+ gcRegMapSize := int16(0)
+ for _, r := range c.registers {
+ if r.gcNum+1 > gcRegMapSize {
+ gcRegMapSize = r.gcNum + 1
+ }
+ }
+ c.GCRegMap = make([]*Register, gcRegMapSize)
+ for i, r := range c.registers {
+ if r.gcNum != -1 {
+ c.GCRegMap[r.gcNum] = &c.registers[i]
+ }
+ }
+
+ return c
+}
+
+func (c *Config) Ctxt() *obj.Link { return c.ctxt }
diff --git a/src/cmd/compile/internal/ssa/copyelim.go b/src/cmd/compile/internal/ssa/copyelim.go
new file mode 100644
index 0000000..17f6512
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/copyelim.go
@@ -0,0 +1,84 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// copyelim removes all uses of OpCopy values from f.
+// A subsequent deadcode pass is needed to actually remove the copies.
+func copyelim(f *Func) {
+ // Modify all values so no arg (including args
+ // of OpCopy) is a copy.
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ copyelimValue(v)
+ }
+ }
+
+ // Update block control values.
+ for _, b := range f.Blocks {
+ for i, v := range b.ControlValues() {
+ if v.Op == OpCopy {
+ b.ReplaceControl(i, v.Args[0])
+ }
+ }
+ }
+
+ // Update named values.
+ for _, name := range f.Names {
+ values := f.NamedValues[*name]
+ for i, v := range values {
+ if v.Op == OpCopy {
+ values[i] = v.Args[0]
+ }
+ }
+ }
+}
+
+// copySource returns the (non-copy) op which is the
+// ultimate source of v. v must be a copy op.
+func copySource(v *Value) *Value {
+ w := v.Args[0]
+
+ // This loop is just:
+ // for w.Op == OpCopy {
+ // w = w.Args[0]
+ // }
+ // but we take some extra care to make sure we
+ // don't get stuck in an infinite loop.
+ // Infinite copy loops may happen in unreachable code.
+ // (TODO: or can they? Needs a test.)
+ slow := w
+ var advance bool
+ for w.Op == OpCopy {
+ w = w.Args[0]
+ if w == slow {
+ w.reset(OpUnknown)
+ break
+ }
+ if advance {
+ slow = slow.Args[0]
+ }
+ advance = !advance
+ }
+
+ // The answer is w. Update all the copies we saw
+ // to point directly to w. Doing this update makes
+ // sure that we don't end up doing O(n^2) work
+ // for a chain of n copies.
+ for v != w {
+ x := v.Args[0]
+ v.SetArg(0, w)
+ v = x
+ }
+ return w
+}
+
+// copyelimValue ensures that no args of v are copies.
+func copyelimValue(v *Value) {
+ for i, a := range v.Args {
+ if a.Op == OpCopy {
+ v.SetArg(i, copySource(a))
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/copyelim_test.go b/src/cmd/compile/internal/ssa/copyelim_test.go
new file mode 100644
index 0000000..fe31b12
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/copyelim_test.go
@@ -0,0 +1,41 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/compile/internal/types"
+ "fmt"
+ "testing"
+)
+
+func BenchmarkCopyElim1(b *testing.B) { benchmarkCopyElim(b, 1) }
+func BenchmarkCopyElim10(b *testing.B) { benchmarkCopyElim(b, 10) }
+func BenchmarkCopyElim100(b *testing.B) { benchmarkCopyElim(b, 100) }
+func BenchmarkCopyElim1000(b *testing.B) { benchmarkCopyElim(b, 1000) }
+func BenchmarkCopyElim10000(b *testing.B) { benchmarkCopyElim(b, 10000) }
+func BenchmarkCopyElim100000(b *testing.B) { benchmarkCopyElim(b, 100000) }
+
+func benchmarkCopyElim(b *testing.B, n int) {
+ c := testConfig(b)
+
+ values := make([]interface{}, 0, n+2)
+ values = append(values, Valu("mem", OpInitMem, types.TypeMem, 0, nil))
+ last := "mem"
+ for i := 0; i < n; i++ {
+ name := fmt.Sprintf("copy%d", i)
+ values = append(values, Valu(name, OpCopy, types.TypeMem, 0, nil, last))
+ last = name
+ }
+ values = append(values, Exit(last))
+ // Reverse values array to make it hard
+ for i := 0; i < len(values)/2; i++ {
+ values[i], values[len(values)-1-i] = values[len(values)-1-i], values[i]
+ }
+
+ for i := 0; i < b.N; i++ {
+ fun := c.Fun("entry", Bloc("entry", values...))
+ Copyelim(fun.f)
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/critical.go b/src/cmd/compile/internal/ssa/critical.go
new file mode 100644
index 0000000..500ce3a
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/critical.go
@@ -0,0 +1,115 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// critical splits critical edges (those that go from a block with
+// more than one outedge to a block with more than one inedge).
+// Regalloc wants a critical-edge-free CFG so it can implement phi values.
+func critical(f *Func) {
+ // maps from phi arg ID to the new block created for that argument
+ blocks := make([]*Block, f.NumValues())
+ // need to iterate over f.Blocks without range, as we might
+ // need to split critical edges on newly constructed blocks
+ for j := 0; j < len(f.Blocks); j++ {
+ b := f.Blocks[j]
+ if len(b.Preds) <= 1 {
+ continue
+ }
+
+ var phi *Value
+ // determine if we've only got a single phi in this
+ // block, this is easier to handle than the general
+ // case of a block with multiple phi values.
+ for _, v := range b.Values {
+ if v.Op == OpPhi {
+ if phi != nil {
+ phi = nil
+ break
+ }
+ phi = v
+ }
+ }
+
+ // reset our block map
+ if phi != nil {
+ for _, v := range phi.Args {
+ blocks[v.ID] = nil
+ }
+ }
+
+ // split input edges coming from multi-output blocks.
+ for i := 0; i < len(b.Preds); {
+ e := b.Preds[i]
+ p := e.b
+ pi := e.i
+ if p.Kind == BlockPlain {
+ i++
+ continue // only single output block
+ }
+
+ var d *Block // new block used to remove critical edge
+ reusedBlock := false // if true, then this is not the first use of this block
+ if phi != nil {
+ argID := phi.Args[i].ID
+ // find or record the block that we used to split
+ // critical edges for this argument
+ if d = blocks[argID]; d == nil {
+ // splitting doesn't necessarily remove the critical edge,
+ // since we're iterating over len(f.Blocks) above, this forces
+ // the new blocks to be re-examined.
+ d = f.NewBlock(BlockPlain)
+ d.Pos = p.Pos
+ blocks[argID] = d
+ if f.pass.debug > 0 {
+ f.Warnl(p.Pos, "split critical edge")
+ }
+ } else {
+ reusedBlock = true
+ }
+ } else {
+ // no existing block, so allocate a new block
+ // to place on the edge
+ d = f.NewBlock(BlockPlain)
+ d.Pos = p.Pos
+ if f.pass.debug > 0 {
+ f.Warnl(p.Pos, "split critical edge")
+ }
+ }
+
+ // if this not the first argument for the
+ // block, then we need to remove the
+ // corresponding elements from the block
+ // predecessors and phi args
+ if reusedBlock {
+ // Add p->d edge
+ p.Succs[pi] = Edge{d, len(d.Preds)}
+ d.Preds = append(d.Preds, Edge{p, pi})
+
+ // Remove p as a predecessor from b.
+ b.removePred(i)
+
+ // Update corresponding phi args
+ b.removePhiArg(phi, i)
+
+ // splitting occasionally leads to a phi having
+ // a single argument (occurs with -N)
+ // TODO(cuonglm,khr): replace this with phielimValue, and
+ // make removePhiArg incorporates that.
+ if len(b.Preds) == 1 {
+ phi.Op = OpCopy
+ }
+ // Don't increment i in this case because we moved
+ // an unprocessed predecessor down into slot i.
+ } else {
+ // splice it in
+ p.Succs[pi] = Edge{d, 0}
+ b.Preds[i] = Edge{d, 0}
+ d.Preds = append(d.Preds, Edge{p, pi})
+ d.Succs = append(d.Succs, Edge{b, i})
+ i++
+ }
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/cse.go b/src/cmd/compile/internal/ssa/cse.go
new file mode 100644
index 0000000..ade5e06
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/cse.go
@@ -0,0 +1,373 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+ "fmt"
+ "sort"
+)
+
+// cse does common-subexpression elimination on the Function.
+// Values are just relinked, nothing is deleted. A subsequent deadcode
+// pass is required to actually remove duplicate expressions.
+func cse(f *Func) {
+ // Two values are equivalent if they satisfy the following definition:
+ // equivalent(v, w):
+ // v.op == w.op
+ // v.type == w.type
+ // v.aux == w.aux
+ // v.auxint == w.auxint
+ // len(v.args) == len(w.args)
+ // v.block == w.block if v.op == OpPhi
+ // equivalent(v.args[i], w.args[i]) for i in 0..len(v.args)-1
+
+ // The algorithm searches for a partition of f's values into
+ // equivalence classes using the above definition.
+ // It starts with a coarse partition and iteratively refines it
+ // until it reaches a fixed point.
+
+ // Make initial coarse partitions by using a subset of the conditions above.
+ a := make([]*Value, 0, f.NumValues())
+ if f.auxmap == nil {
+ f.auxmap = auxmap{}
+ }
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ if v.Type.IsMemory() {
+ continue // memory values can never cse
+ }
+ if f.auxmap[v.Aux] == 0 {
+ f.auxmap[v.Aux] = int32(len(f.auxmap)) + 1
+ }
+ a = append(a, v)
+ }
+ }
+ partition := partitionValues(a, f.auxmap)
+
+ // map from value id back to eqclass id
+ valueEqClass := make([]ID, f.NumValues())
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ // Use negative equivalence class #s for unique values.
+ valueEqClass[v.ID] = -v.ID
+ }
+ }
+ var pNum ID = 1
+ for _, e := range partition {
+ if f.pass.debug > 1 && len(e) > 500 {
+ fmt.Printf("CSE.large partition (%d): ", len(e))
+ for j := 0; j < 3; j++ {
+ fmt.Printf("%s ", e[j].LongString())
+ }
+ fmt.Println()
+ }
+
+ for _, v := range e {
+ valueEqClass[v.ID] = pNum
+ }
+ if f.pass.debug > 2 && len(e) > 1 {
+ fmt.Printf("CSE.partition #%d:", pNum)
+ for _, v := range e {
+ fmt.Printf(" %s", v.String())
+ }
+ fmt.Printf("\n")
+ }
+ pNum++
+ }
+
+ // Split equivalence classes at points where they have
+ // non-equivalent arguments. Repeat until we can't find any
+ // more splits.
+ var splitPoints []int
+ byArgClass := new(partitionByArgClass) // reuseable partitionByArgClass to reduce allocations
+ for {
+ changed := false
+
+ // partition can grow in the loop. By not using a range loop here,
+ // we process new additions as they arrive, avoiding O(n^2) behavior.
+ for i := 0; i < len(partition); i++ {
+ e := partition[i]
+
+ if opcodeTable[e[0].Op].commutative {
+ // Order the first two args before comparison.
+ for _, v := range e {
+ if valueEqClass[v.Args[0].ID] > valueEqClass[v.Args[1].ID] {
+ v.Args[0], v.Args[1] = v.Args[1], v.Args[0]
+ }
+ }
+ }
+
+ // Sort by eq class of arguments.
+ byArgClass.a = e
+ byArgClass.eqClass = valueEqClass
+ sort.Sort(byArgClass)
+
+ // Find split points.
+ splitPoints = append(splitPoints[:0], 0)
+ for j := 1; j < len(e); j++ {
+ v, w := e[j-1], e[j]
+ // Note: commutative args already correctly ordered by byArgClass.
+ eqArgs := true
+ for k, a := range v.Args {
+ b := w.Args[k]
+ if valueEqClass[a.ID] != valueEqClass[b.ID] {
+ eqArgs = false
+ break
+ }
+ }
+ if !eqArgs {
+ splitPoints = append(splitPoints, j)
+ }
+ }
+ if len(splitPoints) == 1 {
+ continue // no splits, leave equivalence class alone.
+ }
+
+ // Move another equivalence class down in place of e.
+ partition[i] = partition[len(partition)-1]
+ partition = partition[:len(partition)-1]
+ i--
+
+ // Add new equivalence classes for the parts of e we found.
+ splitPoints = append(splitPoints, len(e))
+ for j := 0; j < len(splitPoints)-1; j++ {
+ f := e[splitPoints[j]:splitPoints[j+1]]
+ if len(f) == 1 {
+ // Don't add singletons.
+ valueEqClass[f[0].ID] = -f[0].ID
+ continue
+ }
+ for _, v := range f {
+ valueEqClass[v.ID] = pNum
+ }
+ pNum++
+ partition = append(partition, f)
+ }
+ changed = true
+ }
+
+ if !changed {
+ break
+ }
+ }
+
+ sdom := f.Sdom()
+
+ // Compute substitutions we would like to do. We substitute v for w
+ // if v and w are in the same equivalence class and v dominates w.
+ rewrite := make([]*Value, f.NumValues())
+ byDom := new(partitionByDom) // reusable partitionByDom to reduce allocs
+ for _, e := range partition {
+ byDom.a = e
+ byDom.sdom = sdom
+ sort.Sort(byDom)
+ for i := 0; i < len(e)-1; i++ {
+ // e is sorted by domorder, so a maximal dominant element is first in the slice
+ v := e[i]
+ if v == nil {
+ continue
+ }
+
+ e[i] = nil
+ // Replace all elements of e which v dominates
+ for j := i + 1; j < len(e); j++ {
+ w := e[j]
+ if w == nil {
+ continue
+ }
+ if sdom.IsAncestorEq(v.Block, w.Block) {
+ rewrite[w.ID] = v
+ e[j] = nil
+ } else {
+ // e is sorted by domorder, so v.Block doesn't dominate any subsequent blocks in e
+ break
+ }
+ }
+ }
+ }
+
+ rewrites := int64(0)
+
+ // Apply substitutions
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ for i, w := range v.Args {
+ if x := rewrite[w.ID]; x != nil {
+ if w.Pos.IsStmt() == src.PosIsStmt {
+ // about to lose a statement marker, w
+ // w is an input to v; if they're in the same block
+ // and the same line, v is a good-enough new statement boundary.
+ if w.Block == v.Block && w.Pos.Line() == v.Pos.Line() {
+ v.Pos = v.Pos.WithIsStmt()
+ w.Pos = w.Pos.WithNotStmt()
+ } // TODO and if this fails?
+ }
+ v.SetArg(i, x)
+ rewrites++
+ }
+ }
+ }
+ for i, v := range b.ControlValues() {
+ if x := rewrite[v.ID]; x != nil {
+ if v.Op == OpNilCheck {
+ // nilcheck pass will remove the nil checks and log
+ // them appropriately, so don't mess with them here.
+ continue
+ }
+ b.ReplaceControl(i, x)
+ }
+ }
+ }
+
+ if f.pass.stats > 0 {
+ f.LogStat("CSE REWRITES", rewrites)
+ }
+}
+
+// An eqclass approximates an equivalence class. During the
+// algorithm it may represent the union of several of the
+// final equivalence classes.
+type eqclass []*Value
+
+// partitionValues partitions the values into equivalence classes
+// based on having all the following features match:
+// - opcode
+// - type
+// - auxint
+// - aux
+// - nargs
+// - block # if a phi op
+// - first two arg's opcodes and auxint
+// - NOT first two arg's aux; that can break CSE.
+// partitionValues returns a list of equivalence classes, each
+// being a sorted by ID list of *Values. The eqclass slices are
+// backed by the same storage as the input slice.
+// Equivalence classes of size 1 are ignored.
+func partitionValues(a []*Value, auxIDs auxmap) []eqclass {
+ sort.Sort(sortvalues{a, auxIDs})
+
+ var partition []eqclass
+ for len(a) > 0 {
+ v := a[0]
+ j := 1
+ for ; j < len(a); j++ {
+ w := a[j]
+ if cmpVal(v, w, auxIDs) != types.CMPeq {
+ break
+ }
+ }
+ if j > 1 {
+ partition = append(partition, a[:j])
+ }
+ a = a[j:]
+ }
+
+ return partition
+}
+func lt2Cmp(isLt bool) types.Cmp {
+ if isLt {
+ return types.CMPlt
+ }
+ return types.CMPgt
+}
+
+type auxmap map[Aux]int32
+
+func cmpVal(v, w *Value, auxIDs auxmap) types.Cmp {
+ // Try to order these comparison by cost (cheaper first)
+ if v.Op != w.Op {
+ return lt2Cmp(v.Op < w.Op)
+ }
+ if v.AuxInt != w.AuxInt {
+ return lt2Cmp(v.AuxInt < w.AuxInt)
+ }
+ if len(v.Args) != len(w.Args) {
+ return lt2Cmp(len(v.Args) < len(w.Args))
+ }
+ if v.Op == OpPhi && v.Block != w.Block {
+ return lt2Cmp(v.Block.ID < w.Block.ID)
+ }
+ if v.Type.IsMemory() {
+ // We will never be able to CSE two values
+ // that generate memory.
+ return lt2Cmp(v.ID < w.ID)
+ }
+ // OpSelect is a pseudo-op. We need to be more aggressive
+ // regarding CSE to keep multiple OpSelect's of the same
+ // argument from existing.
+ if v.Op != OpSelect0 && v.Op != OpSelect1 && v.Op != OpSelectN {
+ if tc := v.Type.Compare(w.Type); tc != types.CMPeq {
+ return tc
+ }
+ }
+
+ if v.Aux != w.Aux {
+ if v.Aux == nil {
+ return types.CMPlt
+ }
+ if w.Aux == nil {
+ return types.CMPgt
+ }
+ return lt2Cmp(auxIDs[v.Aux] < auxIDs[w.Aux])
+ }
+
+ return types.CMPeq
+}
+
+// Sort values to make the initial partition.
+type sortvalues struct {
+ a []*Value // array of values
+ auxIDs auxmap // aux -> aux ID map
+}
+
+func (sv sortvalues) Len() int { return len(sv.a) }
+func (sv sortvalues) Swap(i, j int) { sv.a[i], sv.a[j] = sv.a[j], sv.a[i] }
+func (sv sortvalues) Less(i, j int) bool {
+ v := sv.a[i]
+ w := sv.a[j]
+ if cmp := cmpVal(v, w, sv.auxIDs); cmp != types.CMPeq {
+ return cmp == types.CMPlt
+ }
+
+ // Sort by value ID last to keep the sort result deterministic.
+ return v.ID < w.ID
+}
+
+type partitionByDom struct {
+ a []*Value // array of values
+ sdom SparseTree
+}
+
+func (sv partitionByDom) Len() int { return len(sv.a) }
+func (sv partitionByDom) Swap(i, j int) { sv.a[i], sv.a[j] = sv.a[j], sv.a[i] }
+func (sv partitionByDom) Less(i, j int) bool {
+ v := sv.a[i]
+ w := sv.a[j]
+ return sv.sdom.domorder(v.Block) < sv.sdom.domorder(w.Block)
+}
+
+type partitionByArgClass struct {
+ a []*Value // array of values
+ eqClass []ID // equivalence class IDs of values
+}
+
+func (sv partitionByArgClass) Len() int { return len(sv.a) }
+func (sv partitionByArgClass) Swap(i, j int) { sv.a[i], sv.a[j] = sv.a[j], sv.a[i] }
+func (sv partitionByArgClass) Less(i, j int) bool {
+ v := sv.a[i]
+ w := sv.a[j]
+ for i, a := range v.Args {
+ b := w.Args[i]
+ if sv.eqClass[a.ID] < sv.eqClass[b.ID] {
+ return true
+ }
+ if sv.eqClass[a.ID] > sv.eqClass[b.ID] {
+ return false
+ }
+ }
+ return false
+}
diff --git a/src/cmd/compile/internal/ssa/cse_test.go b/src/cmd/compile/internal/ssa/cse_test.go
new file mode 100644
index 0000000..8052016
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/cse_test.go
@@ -0,0 +1,131 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+ "testing"
+)
+
+type tstAux struct {
+ s string
+}
+
+func (*tstAux) CanBeAnSSAAux() {}
+
+// This tests for a bug found when partitioning, but not sorting by the Aux value.
+func TestCSEAuxPartitionBug(t *testing.T) {
+ c := testConfig(t)
+ arg1Aux := &tstAux{"arg1-aux"}
+ arg2Aux := &tstAux{"arg2-aux"}
+ arg3Aux := &tstAux{"arg3-aux"}
+ a := c.Frontend().Auto(src.NoXPos, c.config.Types.Int8)
+
+ // construct lots of values with args that have aux values and place
+ // them in an order that triggers the bug
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("start", OpInitMem, types.TypeMem, 0, nil),
+ Valu("sp", OpSP, c.config.Types.Uintptr, 0, nil),
+ Valu("r7", OpAdd64, c.config.Types.Int64, 0, nil, "arg3", "arg1"),
+ Valu("r1", OpAdd64, c.config.Types.Int64, 0, nil, "arg1", "arg2"),
+ Valu("arg1", OpArg, c.config.Types.Int64, 0, arg1Aux),
+ Valu("arg2", OpArg, c.config.Types.Int64, 0, arg2Aux),
+ Valu("arg3", OpArg, c.config.Types.Int64, 0, arg3Aux),
+ Valu("r9", OpAdd64, c.config.Types.Int64, 0, nil, "r7", "r8"),
+ Valu("r4", OpAdd64, c.config.Types.Int64, 0, nil, "r1", "r2"),
+ Valu("r8", OpAdd64, c.config.Types.Int64, 0, nil, "arg3", "arg2"),
+ Valu("r2", OpAdd64, c.config.Types.Int64, 0, nil, "arg1", "arg2"),
+ Valu("raddr", OpLocalAddr, c.config.Types.Int64.PtrTo(), 0, nil, "sp", "start"),
+ Valu("raddrdef", OpVarDef, types.TypeMem, 0, a, "start"),
+ Valu("r6", OpAdd64, c.config.Types.Int64, 0, nil, "r4", "r5"),
+ Valu("r3", OpAdd64, c.config.Types.Int64, 0, nil, "arg1", "arg2"),
+ Valu("r5", OpAdd64, c.config.Types.Int64, 0, nil, "r2", "r3"),
+ Valu("r10", OpAdd64, c.config.Types.Int64, 0, nil, "r6", "r9"),
+ Valu("rstore", OpStore, types.TypeMem, 0, c.config.Types.Int64, "raddr", "r10", "raddrdef"),
+ Goto("exit")),
+ Bloc("exit",
+ Exit("rstore")))
+
+ CheckFunc(fun.f)
+ cse(fun.f)
+ deadcode(fun.f)
+ CheckFunc(fun.f)
+
+ s1Cnt := 2
+ // r1 == r2 == r3, needs to remove two of this set
+ s2Cnt := 1
+ // r4 == r5, needs to remove one of these
+ for k, v := range fun.values {
+ if v.Op == OpInvalid {
+ switch k {
+ case "r1":
+ fallthrough
+ case "r2":
+ fallthrough
+ case "r3":
+ if s1Cnt == 0 {
+ t.Errorf("cse removed all of r1,r2,r3")
+ }
+ s1Cnt--
+
+ case "r4":
+ fallthrough
+ case "r5":
+ if s2Cnt == 0 {
+ t.Errorf("cse removed all of r4,r5")
+ }
+ s2Cnt--
+ default:
+ t.Errorf("cse removed %s, but shouldn't have", k)
+ }
+ }
+ }
+
+ if s1Cnt != 0 || s2Cnt != 0 {
+ t.Errorf("%d values missed during cse", s1Cnt+s2Cnt)
+ }
+}
+
+// TestZCSE tests the zero arg cse.
+func TestZCSE(t *testing.T) {
+ c := testConfig(t)
+ a := c.Frontend().Auto(src.NoXPos, c.config.Types.Int8)
+
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("start", OpInitMem, types.TypeMem, 0, nil),
+ Valu("sp", OpSP, c.config.Types.Uintptr, 0, nil),
+ Valu("sb1", OpSB, c.config.Types.Uintptr, 0, nil),
+ Valu("sb2", OpSB, c.config.Types.Uintptr, 0, nil),
+ Valu("addr1", OpAddr, c.config.Types.Int64.PtrTo(), 0, nil, "sb1"),
+ Valu("addr2", OpAddr, c.config.Types.Int64.PtrTo(), 0, nil, "sb2"),
+ Valu("a1ld", OpLoad, c.config.Types.Int64, 0, nil, "addr1", "start"),
+ Valu("a2ld", OpLoad, c.config.Types.Int64, 0, nil, "addr2", "start"),
+ Valu("c1", OpConst64, c.config.Types.Int64, 1, nil),
+ Valu("r1", OpAdd64, c.config.Types.Int64, 0, nil, "a1ld", "c1"),
+ Valu("c2", OpConst64, c.config.Types.Int64, 1, nil),
+ Valu("r2", OpAdd64, c.config.Types.Int64, 0, nil, "a2ld", "c2"),
+ Valu("r3", OpAdd64, c.config.Types.Int64, 0, nil, "r1", "r2"),
+ Valu("raddr", OpLocalAddr, c.config.Types.Int64.PtrTo(), 0, nil, "sp", "start"),
+ Valu("raddrdef", OpVarDef, types.TypeMem, 0, a, "start"),
+ Valu("rstore", OpStore, types.TypeMem, 0, c.config.Types.Int64, "raddr", "r3", "raddrdef"),
+ Goto("exit")),
+ Bloc("exit",
+ Exit("rstore")))
+
+ CheckFunc(fun.f)
+ zcse(fun.f)
+ deadcode(fun.f)
+ CheckFunc(fun.f)
+
+ if fun.values["c1"].Op != OpInvalid && fun.values["c2"].Op != OpInvalid {
+ t.Errorf("zsce should have removed c1 or c2")
+ }
+ if fun.values["sb1"].Op != OpInvalid && fun.values["sb2"].Op != OpInvalid {
+ t.Errorf("zsce should have removed sb1 or sb2")
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/deadcode.go b/src/cmd/compile/internal/ssa/deadcode.go
new file mode 100644
index 0000000..b47b106
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/deadcode.go
@@ -0,0 +1,389 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/internal/src"
+)
+
+// findlive returns the reachable blocks and live values in f.
+// The caller should call f.retDeadcodeLive(live) when it is done with it.
+func findlive(f *Func) (reachable []bool, live []bool) {
+ reachable = ReachableBlocks(f)
+ var order []*Value
+ live, order = liveValues(f, reachable)
+ f.retDeadcodeLiveOrderStmts(order)
+ return
+}
+
+// ReachableBlocks returns the reachable blocks in f.
+func ReachableBlocks(f *Func) []bool {
+ reachable := make([]bool, f.NumBlocks())
+ reachable[f.Entry.ID] = true
+ p := make([]*Block, 0, 64) // stack-like worklist
+ p = append(p, f.Entry)
+ for len(p) > 0 {
+ // Pop a reachable block
+ b := p[len(p)-1]
+ p = p[:len(p)-1]
+ // Mark successors as reachable
+ s := b.Succs
+ if b.Kind == BlockFirst {
+ s = s[:1]
+ }
+ for _, e := range s {
+ c := e.b
+ if int(c.ID) >= len(reachable) {
+ f.Fatalf("block %s >= f.NumBlocks()=%d?", c, len(reachable))
+ }
+ if !reachable[c.ID] {
+ reachable[c.ID] = true
+ p = append(p, c) // push
+ }
+ }
+ }
+ return reachable
+}
+
+// liveValues returns the live values in f and a list of values that are eligible
+// to be statements in reversed data flow order.
+// The second result is used to help conserve statement boundaries for debugging.
+// reachable is a map from block ID to whether the block is reachable.
+// The caller should call f.retDeadcodeLive(live) and f.retDeadcodeLiveOrderStmts(liveOrderStmts)
+// when they are done with the return values.
+func liveValues(f *Func, reachable []bool) (live []bool, liveOrderStmts []*Value) {
+ live = f.newDeadcodeLive()
+ if cap(live) < f.NumValues() {
+ live = make([]bool, f.NumValues())
+ } else {
+ live = live[:f.NumValues()]
+ for i := range live {
+ live[i] = false
+ }
+ }
+
+ liveOrderStmts = f.newDeadcodeLiveOrderStmts()
+ liveOrderStmts = liveOrderStmts[:0]
+
+ // After regalloc, consider all values to be live.
+ // See the comment at the top of regalloc.go and in deadcode for details.
+ if f.RegAlloc != nil {
+ for i := range live {
+ live[i] = true
+ }
+ return
+ }
+
+ // Record all the inline indexes we need
+ var liveInlIdx map[int]bool
+ pt := f.Config.ctxt.PosTable
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ i := pt.Pos(v.Pos).Base().InliningIndex()
+ if i < 0 {
+ continue
+ }
+ if liveInlIdx == nil {
+ liveInlIdx = map[int]bool{}
+ }
+ liveInlIdx[i] = true
+ }
+ i := pt.Pos(b.Pos).Base().InliningIndex()
+ if i < 0 {
+ continue
+ }
+ if liveInlIdx == nil {
+ liveInlIdx = map[int]bool{}
+ }
+ liveInlIdx[i] = true
+ }
+
+ // Find all live values
+ q := f.Cache.deadcode.q[:0]
+ defer func() { f.Cache.deadcode.q = q }()
+
+ // Starting set: all control values of reachable blocks are live.
+ // Calls are live (because callee can observe the memory state).
+ for _, b := range f.Blocks {
+ if !reachable[b.ID] {
+ continue
+ }
+ for _, v := range b.ControlValues() {
+ if !live[v.ID] {
+ live[v.ID] = true
+ q = append(q, v)
+ if v.Pos.IsStmt() != src.PosNotStmt {
+ liveOrderStmts = append(liveOrderStmts, v)
+ }
+ }
+ }
+ for _, v := range b.Values {
+ if (opcodeTable[v.Op].call || opcodeTable[v.Op].hasSideEffects) && !live[v.ID] {
+ live[v.ID] = true
+ q = append(q, v)
+ if v.Pos.IsStmt() != src.PosNotStmt {
+ liveOrderStmts = append(liveOrderStmts, v)
+ }
+ }
+ if v.Type.IsVoid() && !live[v.ID] {
+ // The only Void ops are nil checks and inline marks. We must keep these.
+ if v.Op == OpInlMark && !liveInlIdx[int(v.AuxInt)] {
+ // We don't need marks for bodies that
+ // have been completely optimized away.
+ // TODO: save marks only for bodies which
+ // have a faulting instruction or a call?
+ continue
+ }
+ live[v.ID] = true
+ q = append(q, v)
+ if v.Pos.IsStmt() != src.PosNotStmt {
+ liveOrderStmts = append(liveOrderStmts, v)
+ }
+ }
+ }
+ }
+
+ // Compute transitive closure of live values.
+ for len(q) > 0 {
+ // pop a reachable value
+ v := q[len(q)-1]
+ q = q[:len(q)-1]
+ for i, x := range v.Args {
+ if v.Op == OpPhi && !reachable[v.Block.Preds[i].b.ID] {
+ continue
+ }
+ if !live[x.ID] {
+ live[x.ID] = true
+ q = append(q, x) // push
+ if x.Pos.IsStmt() != src.PosNotStmt {
+ liveOrderStmts = append(liveOrderStmts, x)
+ }
+ }
+ }
+ }
+
+ return
+}
+
+// deadcode removes dead code from f.
+func deadcode(f *Func) {
+ // deadcode after regalloc is forbidden for now. Regalloc
+ // doesn't quite generate legal SSA which will lead to some
+ // required moves being eliminated. See the comment at the
+ // top of regalloc.go for details.
+ if f.RegAlloc != nil {
+ f.Fatalf("deadcode after regalloc")
+ }
+
+ // Find reachable blocks.
+ reachable := ReachableBlocks(f)
+
+ // Get rid of edges from dead to live code.
+ for _, b := range f.Blocks {
+ if reachable[b.ID] {
+ continue
+ }
+ for i := 0; i < len(b.Succs); {
+ e := b.Succs[i]
+ if reachable[e.b.ID] {
+ b.removeEdge(i)
+ } else {
+ i++
+ }
+ }
+ }
+
+ // Get rid of dead edges from live code.
+ for _, b := range f.Blocks {
+ if !reachable[b.ID] {
+ continue
+ }
+ if b.Kind != BlockFirst {
+ continue
+ }
+ b.removeEdge(1)
+ b.Kind = BlockPlain
+ b.Likely = BranchUnknown
+ }
+
+ // Splice out any copies introduced during dead block removal.
+ copyelim(f)
+
+ // Find live values.
+ live, order := liveValues(f, reachable)
+ defer f.retDeadcodeLive(live)
+ defer f.retDeadcodeLiveOrderStmts(order)
+
+ // Remove dead & duplicate entries from namedValues map.
+ s := f.newSparseSet(f.NumValues())
+ defer f.retSparseSet(s)
+ i := 0
+ for _, name := range f.Names {
+ j := 0
+ s.clear()
+ values := f.NamedValues[*name]
+ for _, v := range values {
+ if live[v.ID] && !s.contains(v.ID) {
+ values[j] = v
+ j++
+ s.add(v.ID)
+ }
+ }
+ if j == 0 {
+ delete(f.NamedValues, *name)
+ } else {
+ f.Names[i] = name
+ i++
+ for k := len(values) - 1; k >= j; k-- {
+ values[k] = nil
+ }
+ f.NamedValues[*name] = values[:j]
+ }
+ }
+ clearNames := f.Names[i:]
+ for j := range clearNames {
+ clearNames[j] = nil
+ }
+ f.Names = f.Names[:i]
+
+ pendingLines := f.cachedLineStarts // Holds statement boundaries that need to be moved to a new value/block
+ pendingLines.clear()
+
+ // Unlink values and conserve statement boundaries
+ for i, b := range f.Blocks {
+ if !reachable[b.ID] {
+ // TODO what if control is statement boundary? Too late here.
+ b.ResetControls()
+ }
+ for _, v := range b.Values {
+ if !live[v.ID] {
+ v.resetArgs()
+ if v.Pos.IsStmt() == src.PosIsStmt && reachable[b.ID] {
+ pendingLines.set(v.Pos, int32(i)) // TODO could be more than one pos for a line
+ }
+ }
+ }
+ }
+
+ // Find new homes for lost lines -- require earliest in data flow with same line that is also in same block
+ for i := len(order) - 1; i >= 0; i-- {
+ w := order[i]
+ if j := pendingLines.get(w.Pos); j > -1 && f.Blocks[j] == w.Block {
+ w.Pos = w.Pos.WithIsStmt()
+ pendingLines.remove(w.Pos)
+ }
+ }
+
+ // Any boundary that failed to match a live value can move to a block end
+ pendingLines.foreachEntry(func(j int32, l uint, bi int32) {
+ b := f.Blocks[bi]
+ if b.Pos.Line() == l && b.Pos.FileIndex() == j {
+ b.Pos = b.Pos.WithIsStmt()
+ }
+ })
+
+ // Remove dead values from blocks' value list. Return dead
+ // values to the allocator.
+ for _, b := range f.Blocks {
+ i := 0
+ for _, v := range b.Values {
+ if live[v.ID] {
+ b.Values[i] = v
+ i++
+ } else {
+ f.freeValue(v)
+ }
+ }
+ b.truncateValues(i)
+ }
+
+ // Remove dead blocks from WBLoads list.
+ i = 0
+ for _, b := range f.WBLoads {
+ if reachable[b.ID] {
+ f.WBLoads[i] = b
+ i++
+ }
+ }
+ clearWBLoads := f.WBLoads[i:]
+ for j := range clearWBLoads {
+ clearWBLoads[j] = nil
+ }
+ f.WBLoads = f.WBLoads[:i]
+
+ // Remove unreachable blocks. Return dead blocks to allocator.
+ i = 0
+ for _, b := range f.Blocks {
+ if reachable[b.ID] {
+ f.Blocks[i] = b
+ i++
+ } else {
+ if len(b.Values) > 0 {
+ b.Fatalf("live values in unreachable block %v: %v", b, b.Values)
+ }
+ f.freeBlock(b)
+ }
+ }
+ // zero remainder to help GC
+ tail := f.Blocks[i:]
+ for j := range tail {
+ tail[j] = nil
+ }
+ f.Blocks = f.Blocks[:i]
+}
+
+// removeEdge removes the i'th outgoing edge from b (and
+// the corresponding incoming edge from b.Succs[i].b).
+func (b *Block) removeEdge(i int) {
+ e := b.Succs[i]
+ c := e.b
+ j := e.i
+
+ // Adjust b.Succs
+ b.removeSucc(i)
+
+ // Adjust c.Preds
+ c.removePred(j)
+
+ // Remove phi args from c's phis.
+ for _, v := range c.Values {
+ if v.Op != OpPhi {
+ continue
+ }
+ c.removePhiArg(v, j)
+ phielimValue(v)
+ // Note: this is trickier than it looks. Replacing
+ // a Phi with a Copy can in general cause problems because
+ // Phi and Copy don't have exactly the same semantics.
+ // Phi arguments always come from a predecessor block,
+ // whereas copies don't. This matters in loops like:
+ // 1: x = (Phi y)
+ // y = (Add x 1)
+ // goto 1
+ // If we replace Phi->Copy, we get
+ // 1: x = (Copy y)
+ // y = (Add x 1)
+ // goto 1
+ // (Phi y) refers to the *previous* value of y, whereas
+ // (Copy y) refers to the *current* value of y.
+ // The modified code has a cycle and the scheduler
+ // will barf on it.
+ //
+ // Fortunately, this situation can only happen for dead
+ // code loops. We know the code we're working with is
+ // not dead, so we're ok.
+ // Proof: If we have a potential bad cycle, we have a
+ // situation like this:
+ // x = (Phi z)
+ // y = (op1 x ...)
+ // z = (op2 y ...)
+ // Where opX are not Phi ops. But such a situation
+ // implies a cycle in the dominator graph. In the
+ // example, x.Block dominates y.Block, y.Block dominates
+ // z.Block, and z.Block dominates x.Block (treating
+ // "dominates" as reflexive). Cycles in the dominator
+ // graph can only happen in an unreachable cycle.
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/deadcode_test.go b/src/cmd/compile/internal/ssa/deadcode_test.go
new file mode 100644
index 0000000..5777b84
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/deadcode_test.go
@@ -0,0 +1,161 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/compile/internal/types"
+ "fmt"
+ "strconv"
+ "testing"
+)
+
+func TestDeadLoop(t *testing.T) {
+ c := testConfig(t)
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Goto("exit")),
+ Bloc("exit",
+ Exit("mem")),
+ // dead loop
+ Bloc("deadblock",
+ // dead value in dead block
+ Valu("deadval", OpConstBool, c.config.Types.Bool, 1, nil),
+ If("deadval", "deadblock", "exit")))
+
+ CheckFunc(fun.f)
+ Deadcode(fun.f)
+ CheckFunc(fun.f)
+
+ for _, b := range fun.f.Blocks {
+ if b == fun.blocks["deadblock"] {
+ t.Errorf("dead block not removed")
+ }
+ for _, v := range b.Values {
+ if v == fun.values["deadval"] {
+ t.Errorf("control value of dead block not removed")
+ }
+ }
+ }
+}
+
+func TestDeadValue(t *testing.T) {
+ c := testConfig(t)
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("deadval", OpConst64, c.config.Types.Int64, 37, nil),
+ Goto("exit")),
+ Bloc("exit",
+ Exit("mem")))
+
+ CheckFunc(fun.f)
+ Deadcode(fun.f)
+ CheckFunc(fun.f)
+
+ for _, b := range fun.f.Blocks {
+ for _, v := range b.Values {
+ if v == fun.values["deadval"] {
+ t.Errorf("dead value not removed")
+ }
+ }
+ }
+}
+
+func TestNeverTaken(t *testing.T) {
+ c := testConfig(t)
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("cond", OpConstBool, c.config.Types.Bool, 0, nil),
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ If("cond", "then", "else")),
+ Bloc("then",
+ Goto("exit")),
+ Bloc("else",
+ Goto("exit")),
+ Bloc("exit",
+ Exit("mem")))
+
+ CheckFunc(fun.f)
+ Opt(fun.f)
+ Deadcode(fun.f)
+ CheckFunc(fun.f)
+
+ if fun.blocks["entry"].Kind != BlockPlain {
+ t.Errorf("if(false) not simplified")
+ }
+ for _, b := range fun.f.Blocks {
+ if b == fun.blocks["then"] {
+ t.Errorf("then block still present")
+ }
+ for _, v := range b.Values {
+ if v == fun.values["cond"] {
+ t.Errorf("constant condition still present")
+ }
+ }
+ }
+
+}
+
+func TestNestedDeadBlocks(t *testing.T) {
+ c := testConfig(t)
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("cond", OpConstBool, c.config.Types.Bool, 0, nil),
+ If("cond", "b2", "b4")),
+ Bloc("b2",
+ If("cond", "b3", "b4")),
+ Bloc("b3",
+ If("cond", "b3", "b4")),
+ Bloc("b4",
+ If("cond", "b3", "exit")),
+ Bloc("exit",
+ Exit("mem")))
+
+ CheckFunc(fun.f)
+ Opt(fun.f)
+ CheckFunc(fun.f)
+ Deadcode(fun.f)
+ CheckFunc(fun.f)
+ if fun.blocks["entry"].Kind != BlockPlain {
+ t.Errorf("if(false) not simplified")
+ }
+ for _, b := range fun.f.Blocks {
+ if b == fun.blocks["b2"] {
+ t.Errorf("b2 block still present")
+ }
+ if b == fun.blocks["b3"] {
+ t.Errorf("b3 block still present")
+ }
+ for _, v := range b.Values {
+ if v == fun.values["cond"] {
+ t.Errorf("constant condition still present")
+ }
+ }
+ }
+}
+
+func BenchmarkDeadCode(b *testing.B) {
+ for _, n := range [...]int{1, 10, 100, 1000, 10000, 100000, 200000} {
+ b.Run(strconv.Itoa(n), func(b *testing.B) {
+ c := testConfig(b)
+ blocks := make([]bloc, 0, n+2)
+ blocks = append(blocks,
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Goto("exit")))
+ blocks = append(blocks, Bloc("exit", Exit("mem")))
+ for i := 0; i < n; i++ {
+ blocks = append(blocks, Bloc(fmt.Sprintf("dead%d", i), Goto("exit")))
+ }
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ fun := c.Fun("entry", blocks...)
+ Deadcode(fun.f)
+ }
+ })
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/deadstore.go b/src/cmd/compile/internal/ssa/deadstore.go
new file mode 100644
index 0000000..d694133
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/deadstore.go
@@ -0,0 +1,350 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+)
+
+// dse does dead-store elimination on the Function.
+// Dead stores are those which are unconditionally followed by
+// another store to the same location, with no intervening load.
+// This implementation only works within a basic block. TODO: use something more global.
+func dse(f *Func) {
+ var stores []*Value
+ loadUse := f.newSparseSet(f.NumValues())
+ defer f.retSparseSet(loadUse)
+ storeUse := f.newSparseSet(f.NumValues())
+ defer f.retSparseSet(storeUse)
+ shadowed := f.newSparseMap(f.NumValues())
+ defer f.retSparseMap(shadowed)
+ for _, b := range f.Blocks {
+ // Find all the stores in this block. Categorize their uses:
+ // loadUse contains stores which are used by a subsequent load.
+ // storeUse contains stores which are used by a subsequent store.
+ loadUse.clear()
+ storeUse.clear()
+ stores = stores[:0]
+ for _, v := range b.Values {
+ if v.Op == OpPhi {
+ // Ignore phis - they will always be first and can't be eliminated
+ continue
+ }
+ if v.Type.IsMemory() {
+ stores = append(stores, v)
+ for _, a := range v.Args {
+ if a.Block == b && a.Type.IsMemory() {
+ storeUse.add(a.ID)
+ if v.Op != OpStore && v.Op != OpZero && v.Op != OpVarDef && v.Op != OpVarKill {
+ // CALL, DUFFCOPY, etc. are both
+ // reads and writes.
+ loadUse.add(a.ID)
+ }
+ }
+ }
+ } else {
+ for _, a := range v.Args {
+ if a.Block == b && a.Type.IsMemory() {
+ loadUse.add(a.ID)
+ }
+ }
+ }
+ }
+ if len(stores) == 0 {
+ continue
+ }
+
+ // find last store in the block
+ var last *Value
+ for _, v := range stores {
+ if storeUse.contains(v.ID) {
+ continue
+ }
+ if last != nil {
+ b.Fatalf("two final stores - simultaneous live stores %s %s", last.LongString(), v.LongString())
+ }
+ last = v
+ }
+ if last == nil {
+ b.Fatalf("no last store found - cycle?")
+ }
+
+ // Walk backwards looking for dead stores. Keep track of shadowed addresses.
+ // A "shadowed address" is a pointer and a size describing a memory region that
+ // is known to be written. We keep track of shadowed addresses in the shadowed
+ // map, mapping the ID of the address to the size of the shadowed region.
+ // Since we're walking backwards, writes to a shadowed region are useless,
+ // as they will be immediately overwritten.
+ shadowed.clear()
+ v := last
+
+ walkloop:
+ if loadUse.contains(v.ID) {
+ // Someone might be reading this memory state.
+ // Clear all shadowed addresses.
+ shadowed.clear()
+ }
+ if v.Op == OpStore || v.Op == OpZero {
+ var sz int64
+ if v.Op == OpStore {
+ sz = v.Aux.(*types.Type).Size()
+ } else { // OpZero
+ sz = v.AuxInt
+ }
+ if shadowedSize := int64(shadowed.get(v.Args[0].ID)); shadowedSize != -1 && shadowedSize >= sz {
+ // Modify the store/zero into a copy of the memory state,
+ // effectively eliding the store operation.
+ if v.Op == OpStore {
+ // store addr value mem
+ v.SetArgs1(v.Args[2])
+ } else {
+ // zero addr mem
+ v.SetArgs1(v.Args[1])
+ }
+ v.Aux = nil
+ v.AuxInt = 0
+ v.Op = OpCopy
+ } else {
+ if sz > 0x7fffffff { // work around sparseMap's int32 value type
+ sz = 0x7fffffff
+ }
+ shadowed.set(v.Args[0].ID, int32(sz), src.NoXPos)
+ }
+ }
+ // walk to previous store
+ if v.Op == OpPhi {
+ // At start of block. Move on to next block.
+ // The memory phi, if it exists, is always
+ // the first logical store in the block.
+ // (Even if it isn't the first in the current b.Values order.)
+ continue
+ }
+ for _, a := range v.Args {
+ if a.Block == b && a.Type.IsMemory() {
+ v = a
+ goto walkloop
+ }
+ }
+ }
+}
+
+// elimDeadAutosGeneric deletes autos that are never accessed. To achieve this
+// we track the operations that the address of each auto reaches and if it only
+// reaches stores then we delete all the stores. The other operations will then
+// be eliminated by the dead code elimination pass.
+func elimDeadAutosGeneric(f *Func) {
+ addr := make(map[*Value]*ir.Name) // values that the address of the auto reaches
+ elim := make(map[*Value]*ir.Name) // values that could be eliminated if the auto is
+ var used ir.NameSet // used autos that must be kept
+
+ // visit the value and report whether any of the maps are updated
+ visit := func(v *Value) (changed bool) {
+ args := v.Args
+ switch v.Op {
+ case OpAddr, OpLocalAddr:
+ // Propagate the address if it points to an auto.
+ n, ok := v.Aux.(*ir.Name)
+ if !ok || n.Class != ir.PAUTO {
+ return
+ }
+ if addr[v] == nil {
+ addr[v] = n
+ changed = true
+ }
+ return
+ case OpVarDef, OpVarKill:
+ // v should be eliminated if we eliminate the auto.
+ n, ok := v.Aux.(*ir.Name)
+ if !ok || n.Class != ir.PAUTO {
+ return
+ }
+ if elim[v] == nil {
+ elim[v] = n
+ changed = true
+ }
+ return
+ case OpVarLive:
+ // Don't delete the auto if it needs to be kept alive.
+
+ // We depend on this check to keep the autotmp stack slots
+ // for open-coded defers from being removed (since they
+ // may not be used by the inline code, but will be used by
+ // panic processing).
+ n, ok := v.Aux.(*ir.Name)
+ if !ok || n.Class != ir.PAUTO {
+ return
+ }
+ if !used.Has(n) {
+ used.Add(n)
+ changed = true
+ }
+ return
+ case OpStore, OpMove, OpZero:
+ // v should be eliminated if we eliminate the auto.
+ n, ok := addr[args[0]]
+ if ok && elim[v] == nil {
+ elim[v] = n
+ changed = true
+ }
+ // Other args might hold pointers to autos.
+ args = args[1:]
+ }
+
+ // The code below assumes that we have handled all the ops
+ // with sym effects already. Sanity check that here.
+ // Ignore Args since they can't be autos.
+ if v.Op.SymEffect() != SymNone && v.Op != OpArg {
+ panic("unhandled op with sym effect")
+ }
+
+ if v.Uses == 0 && v.Op != OpNilCheck && !v.Op.IsCall() && !v.Op.HasSideEffects() || len(args) == 0 {
+ // Nil check has no use, but we need to keep it.
+ // Also keep calls and values that have side effects.
+ return
+ }
+
+ // If the address of the auto reaches a memory or control
+ // operation not covered above then we probably need to keep it.
+ // We also need to keep autos if they reach Phis (issue #26153).
+ if v.Type.IsMemory() || v.Type.IsFlags() || v.Op == OpPhi || v.MemoryArg() != nil {
+ for _, a := range args {
+ if n, ok := addr[a]; ok {
+ if !used.Has(n) {
+ used.Add(n)
+ changed = true
+ }
+ }
+ }
+ return
+ }
+
+ // Propagate any auto addresses through v.
+ var node *ir.Name
+ for _, a := range args {
+ if n, ok := addr[a]; ok && !used.Has(n) {
+ if node == nil {
+ node = n
+ } else if node != n {
+ // Most of the time we only see one pointer
+ // reaching an op, but some ops can take
+ // multiple pointers (e.g. NeqPtr, Phi etc.).
+ // This is rare, so just propagate the first
+ // value to keep things simple.
+ used.Add(n)
+ changed = true
+ }
+ }
+ }
+ if node == nil {
+ return
+ }
+ if addr[v] == nil {
+ // The address of an auto reaches this op.
+ addr[v] = node
+ changed = true
+ return
+ }
+ if addr[v] != node {
+ // This doesn't happen in practice, but catch it just in case.
+ used.Add(node)
+ changed = true
+ }
+ return
+ }
+
+ iterations := 0
+ for {
+ if iterations == 4 {
+ // give up
+ return
+ }
+ iterations++
+ changed := false
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ changed = visit(v) || changed
+ }
+ // keep the auto if its address reaches a control value
+ for _, c := range b.ControlValues() {
+ if n, ok := addr[c]; ok && !used.Has(n) {
+ used.Add(n)
+ changed = true
+ }
+ }
+ }
+ if !changed {
+ break
+ }
+ }
+
+ // Eliminate stores to unread autos.
+ for v, n := range elim {
+ if used.Has(n) {
+ continue
+ }
+ // replace with OpCopy
+ v.SetArgs1(v.MemoryArg())
+ v.Aux = nil
+ v.AuxInt = 0
+ v.Op = OpCopy
+ }
+}
+
+// elimUnreadAutos deletes stores (and associated bookkeeping ops VarDef and VarKill)
+// to autos that are never read from.
+func elimUnreadAutos(f *Func) {
+ // Loop over all ops that affect autos taking note of which
+ // autos we need and also stores that we might be able to
+ // eliminate.
+ var seen ir.NameSet
+ var stores []*Value
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ n, ok := v.Aux.(*ir.Name)
+ if !ok {
+ continue
+ }
+ if n.Class != ir.PAUTO {
+ continue
+ }
+
+ effect := v.Op.SymEffect()
+ switch effect {
+ case SymNone, SymWrite:
+ // If we haven't seen the auto yet
+ // then this might be a store we can
+ // eliminate.
+ if !seen.Has(n) {
+ stores = append(stores, v)
+ }
+ default:
+ // Assume the auto is needed (loaded,
+ // has its address taken, etc.).
+ // Note we have to check the uses
+ // because dead loads haven't been
+ // eliminated yet.
+ if v.Uses > 0 {
+ seen.Add(n)
+ }
+ }
+ }
+ }
+
+ // Eliminate stores to unread autos.
+ for _, store := range stores {
+ n, _ := store.Aux.(*ir.Name)
+ if seen.Has(n) {
+ continue
+ }
+
+ // replace store with OpCopy
+ store.SetArgs1(store.MemoryArg())
+ store.Aux = nil
+ store.AuxInt = 0
+ store.Op = OpCopy
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/deadstore_test.go b/src/cmd/compile/internal/ssa/deadstore_test.go
new file mode 100644
index 0000000..33cb4b9
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/deadstore_test.go
@@ -0,0 +1,129 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/compile/internal/types"
+ "testing"
+)
+
+func TestDeadStore(t *testing.T) {
+ c := testConfig(t)
+ ptrType := c.config.Types.BytePtr
+ t.Logf("PTRTYPE %v", ptrType)
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("start", OpInitMem, types.TypeMem, 0, nil),
+ Valu("sb", OpSB, c.config.Types.Uintptr, 0, nil),
+ Valu("v", OpConstBool, c.config.Types.Bool, 1, nil),
+ Valu("addr1", OpAddr, ptrType, 0, nil, "sb"),
+ Valu("addr2", OpAddr, ptrType, 0, nil, "sb"),
+ Valu("addr3", OpAddr, ptrType, 0, nil, "sb"),
+ Valu("zero1", OpZero, types.TypeMem, 1, c.config.Types.Bool, "addr3", "start"),
+ Valu("store1", OpStore, types.TypeMem, 0, c.config.Types.Bool, "addr1", "v", "zero1"),
+ Valu("store2", OpStore, types.TypeMem, 0, c.config.Types.Bool, "addr2", "v", "store1"),
+ Valu("store3", OpStore, types.TypeMem, 0, c.config.Types.Bool, "addr1", "v", "store2"),
+ Valu("store4", OpStore, types.TypeMem, 0, c.config.Types.Bool, "addr3", "v", "store3"),
+ Goto("exit")),
+ Bloc("exit",
+ Exit("store3")))
+
+ CheckFunc(fun.f)
+ dse(fun.f)
+ CheckFunc(fun.f)
+
+ v1 := fun.values["store1"]
+ if v1.Op != OpCopy {
+ t.Errorf("dead store not removed")
+ }
+
+ v2 := fun.values["zero1"]
+ if v2.Op != OpCopy {
+ t.Errorf("dead store (zero) not removed")
+ }
+}
+func TestDeadStorePhi(t *testing.T) {
+ // make sure we don't get into an infinite loop with phi values.
+ c := testConfig(t)
+ ptrType := c.config.Types.BytePtr
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("start", OpInitMem, types.TypeMem, 0, nil),
+ Valu("sb", OpSB, c.config.Types.Uintptr, 0, nil),
+ Valu("v", OpConstBool, c.config.Types.Bool, 1, nil),
+ Valu("addr", OpAddr, ptrType, 0, nil, "sb"),
+ Goto("loop")),
+ Bloc("loop",
+ Valu("phi", OpPhi, types.TypeMem, 0, nil, "start", "store"),
+ Valu("store", OpStore, types.TypeMem, 0, c.config.Types.Bool, "addr", "v", "phi"),
+ If("v", "loop", "exit")),
+ Bloc("exit",
+ Exit("store")))
+
+ CheckFunc(fun.f)
+ dse(fun.f)
+ CheckFunc(fun.f)
+}
+
+func TestDeadStoreTypes(t *testing.T) {
+ // Make sure a narrow store can't shadow a wider one. We test an even
+ // stronger restriction, that one store can't shadow another unless the
+ // types of the address fields are identical (where identicalness is
+ // decided by the CSE pass).
+ c := testConfig(t)
+ t1 := c.config.Types.UInt64.PtrTo()
+ t2 := c.config.Types.UInt32.PtrTo()
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("start", OpInitMem, types.TypeMem, 0, nil),
+ Valu("sb", OpSB, c.config.Types.Uintptr, 0, nil),
+ Valu("v", OpConstBool, c.config.Types.Bool, 1, nil),
+ Valu("addr1", OpAddr, t1, 0, nil, "sb"),
+ Valu("addr2", OpAddr, t2, 0, nil, "sb"),
+ Valu("store1", OpStore, types.TypeMem, 0, c.config.Types.Bool, "addr1", "v", "start"),
+ Valu("store2", OpStore, types.TypeMem, 0, c.config.Types.Bool, "addr2", "v", "store1"),
+ Goto("exit")),
+ Bloc("exit",
+ Exit("store2")))
+
+ CheckFunc(fun.f)
+ cse(fun.f)
+ dse(fun.f)
+ CheckFunc(fun.f)
+
+ v := fun.values["store1"]
+ if v.Op == OpCopy {
+ t.Errorf("store %s incorrectly removed", v)
+ }
+}
+
+func TestDeadStoreUnsafe(t *testing.T) {
+ // Make sure a narrow store can't shadow a wider one. The test above
+ // covers the case of two different types, but unsafe pointer casting
+ // can get to a point where the size is changed but type unchanged.
+ c := testConfig(t)
+ ptrType := c.config.Types.UInt64.PtrTo()
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("start", OpInitMem, types.TypeMem, 0, nil),
+ Valu("sb", OpSB, c.config.Types.Uintptr, 0, nil),
+ Valu("v", OpConstBool, c.config.Types.Bool, 1, nil),
+ Valu("addr1", OpAddr, ptrType, 0, nil, "sb"),
+ Valu("store1", OpStore, types.TypeMem, 0, c.config.Types.Int64, "addr1", "v", "start"), // store 8 bytes
+ Valu("store2", OpStore, types.TypeMem, 0, c.config.Types.Bool, "addr1", "v", "store1"), // store 1 byte
+ Goto("exit")),
+ Bloc("exit",
+ Exit("store2")))
+
+ CheckFunc(fun.f)
+ cse(fun.f)
+ dse(fun.f)
+ CheckFunc(fun.f)
+
+ v := fun.values["store1"]
+ if v.Op == OpCopy {
+ t.Errorf("store %s incorrectly removed", v)
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/debug.go b/src/cmd/compile/internal/ssa/debug.go
new file mode 100644
index 0000000..aad59fa
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/debug.go
@@ -0,0 +1,1734 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/compile/internal/abi"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/types"
+ "cmd/internal/dwarf"
+ "cmd/internal/obj"
+ "cmd/internal/src"
+ "encoding/hex"
+ "fmt"
+ "internal/buildcfg"
+ "math/bits"
+ "sort"
+ "strings"
+)
+
+type SlotID int32
+type VarID int32
+
+// A FuncDebug contains all the debug information for the variables in a
+// function. Variables are identified by their LocalSlot, which may be the
+// result of decomposing a larger variable.
+type FuncDebug struct {
+ // Slots is all the slots used in the debug info, indexed by their SlotID.
+ Slots []LocalSlot
+ // The user variables, indexed by VarID.
+ Vars []*ir.Name
+ // The slots that make up each variable, indexed by VarID.
+ VarSlots [][]SlotID
+ // The location list data, indexed by VarID. Must be processed by PutLocationList.
+ LocationLists [][]byte
+ // Register-resident output parameters for the function. This is filled in at
+ // SSA generation time.
+ RegOutputParams []*ir.Name
+
+ // Filled in by the user. Translates Block and Value ID to PC.
+ GetPC func(ID, ID) int64
+}
+
+type BlockDebug struct {
+ // Whether the block had any changes to user variables at all.
+ relevant bool
+ // State at the end of the block if it's fully processed. Immutable once initialized.
+ endState []liveSlot
+}
+
+// A liveSlot is a slot that's live in loc at entry/exit of a block.
+type liveSlot struct {
+ // An inlined VarLoc, so it packs into 16 bytes instead of 20.
+ Registers RegisterSet
+ StackOffset
+
+ slot SlotID
+}
+
+func (loc liveSlot) absent() bool {
+ return loc.Registers == 0 && !loc.onStack()
+}
+
+// StackOffset encodes whether a value is on the stack and if so, where. It is
+// a 31-bit integer followed by a presence flag at the low-order bit.
+type StackOffset int32
+
+func (s StackOffset) onStack() bool {
+ return s != 0
+}
+
+func (s StackOffset) stackOffsetValue() int32 {
+ return int32(s) >> 1
+}
+
+// stateAtPC is the current state of all variables at some point.
+type stateAtPC struct {
+ // The location of each known slot, indexed by SlotID.
+ slots []VarLoc
+ // The slots present in each register, indexed by register number.
+ registers [][]SlotID
+}
+
+// reset fills state with the live variables from live.
+func (state *stateAtPC) reset(live []liveSlot) {
+ slots, registers := state.slots, state.registers
+ for i := range slots {
+ slots[i] = VarLoc{}
+ }
+ for i := range registers {
+ registers[i] = registers[i][:0]
+ }
+ for _, live := range live {
+ slots[live.slot] = VarLoc{live.Registers, live.StackOffset}
+ if live.Registers == 0 {
+ continue
+ }
+
+ mask := uint64(live.Registers)
+ for {
+ if mask == 0 {
+ break
+ }
+ reg := uint8(bits.TrailingZeros64(mask))
+ mask &^= 1 << reg
+
+ registers[reg] = append(registers[reg], live.slot)
+ }
+ }
+ state.slots, state.registers = slots, registers
+}
+
+func (s *debugState) LocString(loc VarLoc) string {
+ if loc.absent() {
+ return "<nil>"
+ }
+
+ var storage []string
+ if loc.onStack() {
+ storage = append(storage, "stack")
+ }
+
+ mask := uint64(loc.Registers)
+ for {
+ if mask == 0 {
+ break
+ }
+ reg := uint8(bits.TrailingZeros64(mask))
+ mask &^= 1 << reg
+
+ storage = append(storage, s.registers[reg].String())
+ }
+ return strings.Join(storage, ",")
+}
+
+// A VarLoc describes the storage for part of a user variable.
+type VarLoc struct {
+ // The registers this variable is available in. There can be more than
+ // one in various situations, e.g. it's being moved between registers.
+ Registers RegisterSet
+
+ StackOffset
+}
+
+func (loc VarLoc) absent() bool {
+ return loc.Registers == 0 && !loc.onStack()
+}
+
+var BlockStart = &Value{
+ ID: -10000,
+ Op: OpInvalid,
+ Aux: StringToAux("BlockStart"),
+}
+
+var BlockEnd = &Value{
+ ID: -20000,
+ Op: OpInvalid,
+ Aux: StringToAux("BlockEnd"),
+}
+
+var FuncEnd = &Value{
+ ID: -30000,
+ Op: OpInvalid,
+ Aux: StringToAux("FuncEnd"),
+}
+
+// RegisterSet is a bitmap of registers, indexed by Register.num.
+type RegisterSet uint64
+
+// logf prints debug-specific logging to stdout (always stdout) if the current
+// function is tagged by GOSSAFUNC (for ssa output directed either to stdout or html).
+func (s *debugState) logf(msg string, args ...interface{}) {
+ if s.f.PrintOrHtmlSSA {
+ fmt.Printf(msg, args...)
+ }
+}
+
+type debugState struct {
+ // See FuncDebug.
+ slots []LocalSlot
+ vars []*ir.Name
+ varSlots [][]SlotID
+ lists [][]byte
+
+ // The user variable that each slot rolls up to, indexed by SlotID.
+ slotVars []VarID
+
+ f *Func
+ loggingEnabled bool
+ registers []Register
+ stackOffset func(LocalSlot) int32
+ ctxt *obj.Link
+
+ // The names (slots) associated with each value, indexed by Value ID.
+ valueNames [][]SlotID
+
+ // The current state of whatever analysis is running.
+ currentState stateAtPC
+ liveCount []int
+ changedVars *sparseSet
+
+ // The pending location list entry for each user variable, indexed by VarID.
+ pendingEntries []pendingEntry
+
+ varParts map[*ir.Name][]SlotID
+ blockDebug []BlockDebug
+ pendingSlotLocs []VarLoc
+ liveSlots []liveSlot
+ liveSlotSliceBegin int
+ partsByVarOffset sort.Interface
+}
+
+func (state *debugState) initializeCache(f *Func, numVars, numSlots int) {
+ // One blockDebug per block. Initialized in allocBlock.
+ if cap(state.blockDebug) < f.NumBlocks() {
+ state.blockDebug = make([]BlockDebug, f.NumBlocks())
+ } else {
+ // This local variable, and the ones like it below, enable compiler
+ // optimizations. Don't inline them.
+ b := state.blockDebug[:f.NumBlocks()]
+ for i := range b {
+ b[i] = BlockDebug{}
+ }
+ }
+
+ // A list of slots per Value. Reuse the previous child slices.
+ if cap(state.valueNames) < f.NumValues() {
+ old := state.valueNames
+ state.valueNames = make([][]SlotID, f.NumValues())
+ copy(state.valueNames, old)
+ }
+ vn := state.valueNames[:f.NumValues()]
+ for i := range vn {
+ vn[i] = vn[i][:0]
+ }
+
+ // Slot and register contents for currentState. Cleared by reset().
+ if cap(state.currentState.slots) < numSlots {
+ state.currentState.slots = make([]VarLoc, numSlots)
+ } else {
+ state.currentState.slots = state.currentState.slots[:numSlots]
+ }
+ if cap(state.currentState.registers) < len(state.registers) {
+ state.currentState.registers = make([][]SlotID, len(state.registers))
+ } else {
+ state.currentState.registers = state.currentState.registers[:len(state.registers)]
+ }
+
+ // Used many times by mergePredecessors.
+ if cap(state.liveCount) < numSlots {
+ state.liveCount = make([]int, numSlots)
+ } else {
+ state.liveCount = state.liveCount[:numSlots]
+ }
+
+ // A relatively small slice, but used many times as the return from processValue.
+ state.changedVars = newSparseSet(numVars)
+
+ // A pending entry per user variable, with space to track each of its pieces.
+ numPieces := 0
+ for i := range state.varSlots {
+ numPieces += len(state.varSlots[i])
+ }
+ if cap(state.pendingSlotLocs) < numPieces {
+ state.pendingSlotLocs = make([]VarLoc, numPieces)
+ } else {
+ psl := state.pendingSlotLocs[:numPieces]
+ for i := range psl {
+ psl[i] = VarLoc{}
+ }
+ }
+ if cap(state.pendingEntries) < numVars {
+ state.pendingEntries = make([]pendingEntry, numVars)
+ }
+ pe := state.pendingEntries[:numVars]
+ freePieceIdx := 0
+ for varID, slots := range state.varSlots {
+ pe[varID] = pendingEntry{
+ pieces: state.pendingSlotLocs[freePieceIdx : freePieceIdx+len(slots)],
+ }
+ freePieceIdx += len(slots)
+ }
+ state.pendingEntries = pe
+
+ if cap(state.lists) < numVars {
+ state.lists = make([][]byte, numVars)
+ } else {
+ state.lists = state.lists[:numVars]
+ for i := range state.lists {
+ state.lists[i] = nil
+ }
+ }
+
+ state.liveSlots = state.liveSlots[:0]
+ state.liveSlotSliceBegin = 0
+}
+
+func (state *debugState) allocBlock(b *Block) *BlockDebug {
+ return &state.blockDebug[b.ID]
+}
+
+func (state *debugState) appendLiveSlot(ls liveSlot) {
+ state.liveSlots = append(state.liveSlots, ls)
+}
+
+func (state *debugState) getLiveSlotSlice() []liveSlot {
+ s := state.liveSlots[state.liveSlotSliceBegin:]
+ state.liveSlotSliceBegin = len(state.liveSlots)
+ return s
+}
+
+func (s *debugState) blockEndStateString(b *BlockDebug) string {
+ endState := stateAtPC{slots: make([]VarLoc, len(s.slots)), registers: make([][]SlotID, len(s.registers))}
+ endState.reset(b.endState)
+ return s.stateString(endState)
+}
+
+func (s *debugState) stateString(state stateAtPC) string {
+ var strs []string
+ for slotID, loc := range state.slots {
+ if !loc.absent() {
+ strs = append(strs, fmt.Sprintf("\t%v = %v\n", s.slots[slotID], s.LocString(loc)))
+ }
+ }
+
+ strs = append(strs, "\n")
+ for reg, slots := range state.registers {
+ if len(slots) != 0 {
+ var slotStrs []string
+ for _, slot := range slots {
+ slotStrs = append(slotStrs, s.slots[slot].String())
+ }
+ strs = append(strs, fmt.Sprintf("\t%v = %v\n", &s.registers[reg], slotStrs))
+ }
+ }
+
+ if len(strs) == 1 {
+ return "(no vars)\n"
+ }
+ return strings.Join(strs, "")
+}
+
+// slotCanonicalizer is a table used to lookup and canonicalize
+// LocalSlot's in a type insensitive way (e.g. taking into account the
+// base name, offset, and width of the slot, but ignoring the slot
+// type).
+type slotCanonicalizer struct {
+ slmap map[slotKey]SlKeyIdx
+ slkeys []LocalSlot
+}
+
+func newSlotCanonicalizer() *slotCanonicalizer {
+ return &slotCanonicalizer{
+ slmap: make(map[slotKey]SlKeyIdx),
+ slkeys: []LocalSlot{LocalSlot{N: nil}},
+ }
+}
+
+type SlKeyIdx uint32
+
+const noSlot = SlKeyIdx(0)
+
+// slotKey is a type-insensitive encapsulation of a LocalSlot; it
+// is used to key a map within slotCanonicalizer.
+type slotKey struct {
+ name *ir.Name
+ offset int64
+ width int64
+ splitOf SlKeyIdx // idx in slkeys slice in slotCanonicalizer
+ splitOffset int64
+}
+
+// lookup looks up a LocalSlot in the slot canonicalizer "sc", returning
+// a canonical index for the slot, and adding it to the table if need
+// be. Return value is the canonical slot index, and a boolean indicating
+// whether the slot was found in the table already (TRUE => found).
+func (sc *slotCanonicalizer) lookup(ls LocalSlot) (SlKeyIdx, bool) {
+ split := noSlot
+ if ls.SplitOf != nil {
+ split, _ = sc.lookup(*ls.SplitOf)
+ }
+ k := slotKey{
+ name: ls.N, offset: ls.Off, width: ls.Type.Size(),
+ splitOf: split, splitOffset: ls.SplitOffset,
+ }
+ if idx, ok := sc.slmap[k]; ok {
+ return idx, true
+ }
+ rv := SlKeyIdx(len(sc.slkeys))
+ sc.slkeys = append(sc.slkeys, ls)
+ sc.slmap[k] = rv
+ return rv, false
+}
+
+func (sc *slotCanonicalizer) canonSlot(idx SlKeyIdx) LocalSlot {
+ return sc.slkeys[idx]
+}
+
+// PopulateABIInRegArgOps examines the entry block of the function
+// and looks for incoming parameters that have missing or partial
+// OpArg{Int,Float}Reg values, inserting additional values in
+// cases where they are missing. Example:
+//
+// func foo(s string, used int, notused int) int {
+// return len(s) + used
+// }
+//
+// In the function above, the incoming parameter "used" is fully live,
+// "notused" is not live, and "s" is partially live (only the length
+// field of the string is used). At the point where debug value
+// analysis runs, we might expect to see an entry block with:
+//
+// b1:
+// v4 = ArgIntReg <uintptr> {s+8} [0] : BX
+// v5 = ArgIntReg <int> {used} [0] : CX
+//
+// While this is an accurate picture of the live incoming params,
+// we also want to have debug locations for non-live params (or
+// their non-live pieces), e.g. something like
+//
+// b1:
+// v9 = ArgIntReg <*uint8> {s+0} [0] : AX
+// v4 = ArgIntReg <uintptr> {s+8} [0] : BX
+// v5 = ArgIntReg <int> {used} [0] : CX
+// v10 = ArgIntReg <int> {unused} [0] : DI
+//
+// This function examines the live OpArg{Int,Float}Reg values and
+// synthesizes new (dead) values for the non-live params or the
+// non-live pieces of partially live params.
+//
+func PopulateABIInRegArgOps(f *Func) {
+ pri := f.ABISelf.ABIAnalyzeFuncType(f.Type.FuncType())
+
+ // When manufacturing new slots that correspond to splits of
+ // composite parameters, we want to avoid creating a new sub-slot
+ // that differs from some existing sub-slot only by type, since
+ // the debug location analysis will treat that slot as a separate
+ // entity. To achieve this, create a lookup table of existing
+ // slots that is type-insenstitive.
+ sc := newSlotCanonicalizer()
+ for _, sl := range f.Names {
+ sc.lookup(*sl)
+ }
+
+ // Add slot -> value entry to f.NamedValues if not already present.
+ addToNV := func(v *Value, sl LocalSlot) {
+ values, ok := f.NamedValues[sl]
+ if !ok {
+ // Haven't seen this slot yet.
+ sla := f.localSlotAddr(sl)
+ f.Names = append(f.Names, sla)
+ } else {
+ for _, ev := range values {
+ if v == ev {
+ return
+ }
+ }
+ }
+ values = append(values, v)
+ f.NamedValues[sl] = values
+ }
+
+ newValues := []*Value{}
+
+ abiRegIndexToRegister := func(reg abi.RegIndex) int8 {
+ i := f.ABISelf.FloatIndexFor(reg)
+ if i >= 0 { // float PR
+ return f.Config.floatParamRegs[i]
+ } else {
+ return f.Config.intParamRegs[reg]
+ }
+ }
+
+ // Helper to construct a new OpArg{Float,Int}Reg op value.
+ var pos src.XPos
+ if len(f.Entry.Values) != 0 {
+ pos = f.Entry.Values[0].Pos
+ }
+ synthesizeOpIntFloatArg := func(n *ir.Name, t *types.Type, reg abi.RegIndex, sl LocalSlot) *Value {
+ aux := &AuxNameOffset{n, sl.Off}
+ op, auxInt := ArgOpAndRegisterFor(reg, f.ABISelf)
+ v := f.newValueNoBlock(op, t, pos)
+ v.AuxInt = auxInt
+ v.Aux = aux
+ v.Args = nil
+ v.Block = f.Entry
+ newValues = append(newValues, v)
+ addToNV(v, sl)
+ f.setHome(v, &f.Config.registers[abiRegIndexToRegister(reg)])
+ return v
+ }
+
+ // Make a pass through the entry block looking for
+ // OpArg{Int,Float}Reg ops. Record the slots they use in a table
+ // ("sc"). We use a type-insensitive lookup for the slot table,
+ // since the type we get from the ABI analyzer won't always match
+ // what the compiler uses when creating OpArg{Int,Float}Reg ops.
+ for _, v := range f.Entry.Values {
+ if v.Op == OpArgIntReg || v.Op == OpArgFloatReg {
+ aux := v.Aux.(*AuxNameOffset)
+ sl := LocalSlot{N: aux.Name, Type: v.Type, Off: aux.Offset}
+ // install slot in lookup table
+ idx, _ := sc.lookup(sl)
+ // add to f.NamedValues if not already present
+ addToNV(v, sc.canonSlot(idx))
+ } else if v.Op.IsCall() {
+ // if we hit a call, we've gone too far.
+ break
+ }
+ }
+
+ // Now make a pass through the ABI in-params, looking for params
+ // or pieces of params that we didn't encounter in the loop above.
+ for _, inp := range pri.InParams() {
+ if !isNamedRegParam(inp) {
+ continue
+ }
+ n := inp.Name.(*ir.Name)
+
+ // Param is spread across one or more registers. Walk through
+ // each piece to see whether we've seen an arg reg op for it.
+ types, offsets := inp.RegisterTypesAndOffsets()
+ for k, t := range types {
+ // Note: this recipe for creating a LocalSlot is designed
+ // to be compatible with the one used in expand_calls.go
+ // as opposed to decompose.go. The expand calls code just
+ // takes the base name and creates an offset into it,
+ // without using the SplitOf/SplitOffset fields. The code
+ // in decompose.go does the opposite -- it creates a
+ // LocalSlot object with "Off" set to zero, but with
+ // SplitOf pointing to a parent slot, and SplitOffset
+ // holding the offset into the parent object.
+ pieceSlot := LocalSlot{N: n, Type: t, Off: offsets[k]}
+
+ // Look up this piece to see if we've seen a reg op
+ // for it. If not, create one.
+ _, found := sc.lookup(pieceSlot)
+ if !found {
+ // This slot doesn't appear in the map, meaning it
+ // corresponds to an in-param that is not live, or
+ // a portion of an in-param that is not live/used.
+ // Add a new dummy OpArg{Int,Float}Reg for it.
+ synthesizeOpIntFloatArg(n, t, inp.Registers[k],
+ pieceSlot)
+ }
+ }
+ }
+
+ // Insert the new values into the head of the block.
+ f.Entry.Values = append(newValues, f.Entry.Values...)
+}
+
+// BuildFuncDebug debug information for f, placing the results in "rval".
+// f must be fully processed, so that each Value is where it will be when
+// machine code is emitted.
+func BuildFuncDebug(ctxt *obj.Link, f *Func, loggingEnabled bool, stackOffset func(LocalSlot) int32, rval *FuncDebug) {
+ if f.RegAlloc == nil {
+ f.Fatalf("BuildFuncDebug on func %v that has not been fully processed", f)
+ }
+ state := &f.Cache.debugState
+ state.loggingEnabled = loggingEnabled
+ state.f = f
+ state.registers = f.Config.registers
+ state.stackOffset = stackOffset
+ state.ctxt = ctxt
+
+ if buildcfg.Experiment.RegabiArgs {
+ PopulateABIInRegArgOps(f)
+ }
+
+ if state.loggingEnabled {
+ state.logf("Generating location lists for function %q\n", f.Name)
+ }
+
+ if state.varParts == nil {
+ state.varParts = make(map[*ir.Name][]SlotID)
+ } else {
+ for n := range state.varParts {
+ delete(state.varParts, n)
+ }
+ }
+
+ // Recompose any decomposed variables, and establish the canonical
+ // IDs for each var and slot by filling out state.vars and state.slots.
+
+ state.slots = state.slots[:0]
+ state.vars = state.vars[:0]
+ for i, slot := range f.Names {
+ state.slots = append(state.slots, *slot)
+ if ir.IsSynthetic(slot.N) {
+ continue
+ }
+
+ topSlot := slot
+ for topSlot.SplitOf != nil {
+ topSlot = topSlot.SplitOf
+ }
+ if _, ok := state.varParts[topSlot.N]; !ok {
+ state.vars = append(state.vars, topSlot.N)
+ }
+ state.varParts[topSlot.N] = append(state.varParts[topSlot.N], SlotID(i))
+ }
+
+ // Recreate the LocalSlot for each stack-only variable.
+ // This would probably be better as an output from stackframe.
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ if v.Op == OpVarDef || v.Op == OpVarKill {
+ n := v.Aux.(*ir.Name)
+ if ir.IsSynthetic(n) {
+ continue
+ }
+
+ if _, ok := state.varParts[n]; !ok {
+ slot := LocalSlot{N: n, Type: v.Type, Off: 0}
+ state.slots = append(state.slots, slot)
+ state.varParts[n] = []SlotID{SlotID(len(state.slots) - 1)}
+ state.vars = append(state.vars, n)
+ }
+ }
+ }
+ }
+
+ // Fill in the var<->slot mappings.
+ if cap(state.varSlots) < len(state.vars) {
+ state.varSlots = make([][]SlotID, len(state.vars))
+ } else {
+ state.varSlots = state.varSlots[:len(state.vars)]
+ for i := range state.varSlots {
+ state.varSlots[i] = state.varSlots[i][:0]
+ }
+ }
+ if cap(state.slotVars) < len(state.slots) {
+ state.slotVars = make([]VarID, len(state.slots))
+ } else {
+ state.slotVars = state.slotVars[:len(state.slots)]
+ }
+
+ if state.partsByVarOffset == nil {
+ state.partsByVarOffset = &partsByVarOffset{}
+ }
+ for varID, n := range state.vars {
+ parts := state.varParts[n]
+ state.varSlots[varID] = parts
+ for _, slotID := range parts {
+ state.slotVars[slotID] = VarID(varID)
+ }
+ *state.partsByVarOffset.(*partsByVarOffset) = partsByVarOffset{parts, state.slots}
+ sort.Sort(state.partsByVarOffset)
+ }
+
+ state.initializeCache(f, len(state.varParts), len(state.slots))
+
+ for i, slot := range f.Names {
+ if ir.IsSynthetic(slot.N) {
+ continue
+ }
+ for _, value := range f.NamedValues[*slot] {
+ state.valueNames[value.ID] = append(state.valueNames[value.ID], SlotID(i))
+ }
+ }
+
+ blockLocs := state.liveness()
+ state.buildLocationLists(blockLocs)
+
+ // Populate "rval" with what we've computed.
+ rval.Slots = state.slots
+ rval.VarSlots = state.varSlots
+ rval.Vars = state.vars
+ rval.LocationLists = state.lists
+}
+
+// liveness walks the function in control flow order, calculating the start
+// and end state of each block.
+func (state *debugState) liveness() []*BlockDebug {
+ blockLocs := make([]*BlockDebug, state.f.NumBlocks())
+
+ // Reverse postorder: visit a block after as many as possible of its
+ // predecessors have been visited.
+ po := state.f.Postorder()
+ for i := len(po) - 1; i >= 0; i-- {
+ b := po[i]
+
+ // Build the starting state for the block from the final
+ // state of its predecessors.
+ startState, startValid := state.mergePredecessors(b, blockLocs, nil)
+ changed := false
+ if state.loggingEnabled {
+ state.logf("Processing %v, initial state:\n%v", b, state.stateString(state.currentState))
+ }
+
+ // Update locs/registers with the effects of each Value.
+ for _, v := range b.Values {
+ slots := state.valueNames[v.ID]
+
+ // Loads and stores inherit the names of their sources.
+ var source *Value
+ switch v.Op {
+ case OpStoreReg:
+ source = v.Args[0]
+ case OpLoadReg:
+ switch a := v.Args[0]; a.Op {
+ case OpArg, OpPhi:
+ source = a
+ case OpStoreReg:
+ source = a.Args[0]
+ default:
+ if state.loggingEnabled {
+ state.logf("at %v: load with unexpected source op: %v (%v)\n", v, a.Op, a)
+ }
+ }
+ }
+ // Update valueNames with the source so that later steps
+ // don't need special handling.
+ if source != nil {
+ slots = append(slots, state.valueNames[source.ID]...)
+ state.valueNames[v.ID] = slots
+ }
+
+ reg, _ := state.f.getHome(v.ID).(*Register)
+ c := state.processValue(v, slots, reg)
+ changed = changed || c
+ }
+
+ if state.loggingEnabled {
+ state.f.Logf("Block %v done, locs:\n%v", b, state.stateString(state.currentState))
+ }
+
+ locs := state.allocBlock(b)
+ locs.relevant = changed
+ if !changed && startValid {
+ locs.endState = startState
+ } else {
+ for slotID, slotLoc := range state.currentState.slots {
+ if slotLoc.absent() {
+ continue
+ }
+ state.appendLiveSlot(liveSlot{slot: SlotID(slotID), Registers: slotLoc.Registers, StackOffset: slotLoc.StackOffset})
+ }
+ locs.endState = state.getLiveSlotSlice()
+ }
+ blockLocs[b.ID] = locs
+ }
+ return blockLocs
+}
+
+// mergePredecessors takes the end state of each of b's predecessors and
+// intersects them to form the starting state for b. It puts that state in
+// blockLocs, and fills state.currentState with it. If convenient, it returns
+// a reused []liveSlot, true that represents the starting state.
+// If previousBlock is non-nil, it registers changes vs. that block's end
+// state in state.changedVars. Note that previousBlock will often not be a
+// predecessor.
+func (state *debugState) mergePredecessors(b *Block, blockLocs []*BlockDebug, previousBlock *Block) ([]liveSlot, bool) {
+ // Filter out back branches.
+ var predsBuf [10]*Block
+ preds := predsBuf[:0]
+ for _, pred := range b.Preds {
+ if blockLocs[pred.b.ID] != nil {
+ preds = append(preds, pred.b)
+ }
+ }
+
+ if state.loggingEnabled {
+ // The logf below would cause preds to be heap-allocated if
+ // it were passed directly.
+ preds2 := make([]*Block, len(preds))
+ copy(preds2, preds)
+ state.logf("Merging %v into %v\n", preds2, b)
+ }
+
+ // TODO all the calls to this are overkill; only need to do this for slots that are not present in the merge.
+ markChangedVars := func(slots []liveSlot) {
+ for _, live := range slots {
+ state.changedVars.add(ID(state.slotVars[live.slot]))
+ }
+ }
+
+ if len(preds) == 0 {
+ if previousBlock != nil {
+ // Mark everything in previous block as changed because it is not a predecessor.
+ markChangedVars(blockLocs[previousBlock.ID].endState)
+ }
+ state.currentState.reset(nil)
+ return nil, true
+ }
+
+ p0 := blockLocs[preds[0].ID].endState
+ if len(preds) == 1 {
+ if previousBlock != nil && preds[0].ID != previousBlock.ID {
+ // Mark everything in previous block as changed because it is not a predecessor.
+ markChangedVars(blockLocs[previousBlock.ID].endState)
+ }
+ state.currentState.reset(p0)
+ return p0, true
+ }
+
+ baseID := preds[0].ID
+ baseState := p0
+
+ // If previous block is not a predecessor, its location information changes at boundary with this block.
+ previousBlockIsNotPredecessor := previousBlock != nil // If it's nil, no info to change.
+
+ if previousBlock != nil {
+ // Try to use previousBlock as the base state
+ // if possible.
+ for _, pred := range preds[1:] {
+ if pred.ID == previousBlock.ID {
+ baseID = pred.ID
+ baseState = blockLocs[pred.ID].endState
+ previousBlockIsNotPredecessor = false
+ break
+ }
+ }
+ }
+
+ if state.loggingEnabled {
+ state.logf("Starting %v with state from b%v:\n%v", b, baseID, state.blockEndStateString(blockLocs[baseID]))
+ }
+
+ slotLocs := state.currentState.slots
+ for _, predSlot := range baseState {
+ slotLocs[predSlot.slot] = VarLoc{predSlot.Registers, predSlot.StackOffset}
+ state.liveCount[predSlot.slot] = 1
+ }
+ for _, pred := range preds {
+ if pred.ID == baseID {
+ continue
+ }
+ if state.loggingEnabled {
+ state.logf("Merging in state from %v:\n%v", pred, state.blockEndStateString(blockLocs[pred.ID]))
+ }
+ for _, predSlot := range blockLocs[pred.ID].endState {
+ state.liveCount[predSlot.slot]++
+ liveLoc := slotLocs[predSlot.slot]
+ if !liveLoc.onStack() || !predSlot.onStack() || liveLoc.StackOffset != predSlot.StackOffset {
+ liveLoc.StackOffset = 0
+ }
+ liveLoc.Registers &= predSlot.Registers
+ slotLocs[predSlot.slot] = liveLoc
+ }
+ }
+
+ // Check if the final state is the same as the first predecessor's
+ // final state, and reuse it if so. In principle it could match any,
+ // but it's probably not worth checking more than the first.
+ unchanged := true
+ for _, predSlot := range baseState {
+ if state.liveCount[predSlot.slot] != len(preds) ||
+ slotLocs[predSlot.slot].Registers != predSlot.Registers ||
+ slotLocs[predSlot.slot].StackOffset != predSlot.StackOffset {
+ unchanged = false
+ break
+ }
+ }
+ if unchanged {
+ if state.loggingEnabled {
+ state.logf("After merge, %v matches b%v exactly.\n", b, baseID)
+ }
+ if previousBlockIsNotPredecessor {
+ // Mark everything in previous block as changed because it is not a predecessor.
+ markChangedVars(blockLocs[previousBlock.ID].endState)
+ }
+ state.currentState.reset(baseState)
+ return baseState, true
+ }
+
+ for reg := range state.currentState.registers {
+ state.currentState.registers[reg] = state.currentState.registers[reg][:0]
+ }
+
+ // A slot is live if it was seen in all predecessors, and they all had
+ // some storage in common.
+ for _, predSlot := range baseState {
+ slotLoc := slotLocs[predSlot.slot]
+
+ if state.liveCount[predSlot.slot] != len(preds) {
+ // Seen in only some predecessors. Clear it out.
+ slotLocs[predSlot.slot] = VarLoc{}
+ continue
+ }
+
+ // Present in all predecessors.
+ mask := uint64(slotLoc.Registers)
+ for {
+ if mask == 0 {
+ break
+ }
+ reg := uint8(bits.TrailingZeros64(mask))
+ mask &^= 1 << reg
+ state.currentState.registers[reg] = append(state.currentState.registers[reg], predSlot.slot)
+ }
+ }
+
+ if previousBlockIsNotPredecessor {
+ // Mark everything in previous block as changed because it is not a predecessor.
+ markChangedVars(blockLocs[previousBlock.ID].endState)
+
+ }
+ return nil, false
+}
+
+// processValue updates locs and state.registerContents to reflect v, a value with
+// the names in vSlots and homed in vReg. "v" becomes visible after execution of
+// the instructions evaluating it. It returns which VarIDs were modified by the
+// Value's execution.
+func (state *debugState) processValue(v *Value, vSlots []SlotID, vReg *Register) bool {
+ locs := state.currentState
+ changed := false
+ setSlot := func(slot SlotID, loc VarLoc) {
+ changed = true
+ state.changedVars.add(ID(state.slotVars[slot]))
+ state.currentState.slots[slot] = loc
+ }
+
+ // Handle any register clobbering. Call operations, for example,
+ // clobber all registers even though they don't explicitly write to
+ // them.
+ clobbers := uint64(opcodeTable[v.Op].reg.clobbers)
+ for {
+ if clobbers == 0 {
+ break
+ }
+ reg := uint8(bits.TrailingZeros64(clobbers))
+ clobbers &^= 1 << reg
+
+ for _, slot := range locs.registers[reg] {
+ if state.loggingEnabled {
+ state.logf("at %v: %v clobbered out of %v\n", v, state.slots[slot], &state.registers[reg])
+ }
+
+ last := locs.slots[slot]
+ if last.absent() {
+ state.f.Fatalf("at %v: slot %v in register %v with no location entry", v, state.slots[slot], &state.registers[reg])
+ continue
+ }
+ regs := last.Registers &^ (1 << reg)
+ setSlot(slot, VarLoc{regs, last.StackOffset})
+ }
+
+ locs.registers[reg] = locs.registers[reg][:0]
+ }
+
+ switch {
+ case v.Op == OpVarDef, v.Op == OpVarKill:
+ n := v.Aux.(*ir.Name)
+ if ir.IsSynthetic(n) {
+ break
+ }
+
+ slotID := state.varParts[n][0]
+ var stackOffset StackOffset
+ if v.Op == OpVarDef {
+ stackOffset = StackOffset(state.stackOffset(state.slots[slotID])<<1 | 1)
+ }
+ setSlot(slotID, VarLoc{0, stackOffset})
+ if state.loggingEnabled {
+ if v.Op == OpVarDef {
+ state.logf("at %v: stack-only var %v now live\n", v, state.slots[slotID])
+ } else {
+ state.logf("at %v: stack-only var %v now dead\n", v, state.slots[slotID])
+ }
+ }
+
+ case v.Op == OpArg:
+ home := state.f.getHome(v.ID).(LocalSlot)
+ stackOffset := state.stackOffset(home)<<1 | 1
+ for _, slot := range vSlots {
+ if state.loggingEnabled {
+ state.logf("at %v: arg %v now on stack in location %v\n", v, state.slots[slot], home)
+ if last := locs.slots[slot]; !last.absent() {
+ state.logf("at %v: unexpected arg op on already-live slot %v\n", v, state.slots[slot])
+ }
+ }
+
+ setSlot(slot, VarLoc{0, StackOffset(stackOffset)})
+ }
+
+ case v.Op == OpStoreReg:
+ home := state.f.getHome(v.ID).(LocalSlot)
+ stackOffset := state.stackOffset(home)<<1 | 1
+ for _, slot := range vSlots {
+ last := locs.slots[slot]
+ if last.absent() {
+ if state.loggingEnabled {
+ state.logf("at %v: unexpected spill of unnamed register %s\n", v, vReg)
+ }
+ break
+ }
+
+ setSlot(slot, VarLoc{last.Registers, StackOffset(stackOffset)})
+ if state.loggingEnabled {
+ state.logf("at %v: %v spilled to stack location %v\n", v, state.slots[slot], home)
+ }
+ }
+
+ case vReg != nil:
+ if state.loggingEnabled {
+ newSlots := make([]bool, len(state.slots))
+ for _, slot := range vSlots {
+ newSlots[slot] = true
+ }
+
+ for _, slot := range locs.registers[vReg.num] {
+ if !newSlots[slot] {
+ state.logf("at %v: overwrote %v in register %v\n", v, state.slots[slot], vReg)
+ }
+ }
+ }
+
+ for _, slot := range locs.registers[vReg.num] {
+ last := locs.slots[slot]
+ setSlot(slot, VarLoc{last.Registers &^ (1 << uint8(vReg.num)), last.StackOffset})
+ }
+ locs.registers[vReg.num] = locs.registers[vReg.num][:0]
+ locs.registers[vReg.num] = append(locs.registers[vReg.num], vSlots...)
+ for _, slot := range vSlots {
+ if state.loggingEnabled {
+ state.logf("at %v: %v now in %s\n", v, state.slots[slot], vReg)
+ }
+
+ last := locs.slots[slot]
+ setSlot(slot, VarLoc{1<<uint8(vReg.num) | last.Registers, last.StackOffset})
+ }
+ }
+ return changed
+}
+
+// varOffset returns the offset of slot within the user variable it was
+// decomposed from. This has nothing to do with its stack offset.
+func varOffset(slot LocalSlot) int64 {
+ offset := slot.Off
+ s := &slot
+ for ; s.SplitOf != nil; s = s.SplitOf {
+ offset += s.SplitOffset
+ }
+ return offset
+}
+
+type partsByVarOffset struct {
+ slotIDs []SlotID
+ slots []LocalSlot
+}
+
+func (a partsByVarOffset) Len() int { return len(a.slotIDs) }
+func (a partsByVarOffset) Less(i, j int) bool {
+ return varOffset(a.slots[a.slotIDs[i]]) < varOffset(a.slots[a.slotIDs[j]])
+}
+func (a partsByVarOffset) Swap(i, j int) { a.slotIDs[i], a.slotIDs[j] = a.slotIDs[j], a.slotIDs[i] }
+
+// A pendingEntry represents the beginning of a location list entry, missing
+// only its end coordinate.
+type pendingEntry struct {
+ present bool
+ startBlock, startValue ID
+ // The location of each piece of the variable, in the same order as the
+ // SlotIDs in varParts.
+ pieces []VarLoc
+}
+
+func (e *pendingEntry) clear() {
+ e.present = false
+ e.startBlock = 0
+ e.startValue = 0
+ for i := range e.pieces {
+ e.pieces[i] = VarLoc{}
+ }
+}
+
+// canMerge reports whether the location description for new is the same as
+// pending.
+func canMerge(pending, new VarLoc) bool {
+ if pending.absent() && new.absent() {
+ return true
+ }
+ if pending.absent() || new.absent() {
+ return false
+ }
+ if pending.onStack() {
+ return pending.StackOffset == new.StackOffset
+ }
+ if pending.Registers != 0 && new.Registers != 0 {
+ return firstReg(pending.Registers) == firstReg(new.Registers)
+ }
+ return false
+}
+
+// firstReg returns the first register in set that is present.
+func firstReg(set RegisterSet) uint8 {
+ if set == 0 {
+ // This is wrong, but there seem to be some situations where we
+ // produce locations with no storage.
+ return 0
+ }
+ return uint8(bits.TrailingZeros64(uint64(set)))
+}
+
+// buildLocationLists builds location lists for all the user variables in
+// state.f, using the information about block state in blockLocs.
+// The returned location lists are not fully complete. They are in terms of
+// SSA values rather than PCs, and have no base address/end entries. They will
+// be finished by PutLocationList.
+func (state *debugState) buildLocationLists(blockLocs []*BlockDebug) {
+ // Run through the function in program text order, building up location
+ // lists as we go. The heavy lifting has mostly already been done.
+
+ var prevBlock *Block
+ for _, b := range state.f.Blocks {
+ state.mergePredecessors(b, blockLocs, prevBlock)
+
+ if !blockLocs[b.ID].relevant {
+ // Handle any differences among predecessor blocks and previous block (perhaps not a predecessor)
+ for _, varID := range state.changedVars.contents() {
+ state.updateVar(VarID(varID), b, BlockStart)
+ }
+ continue
+ }
+
+ mustBeFirst := func(v *Value) bool {
+ return v.Op == OpPhi || v.Op.isLoweredGetClosurePtr() ||
+ v.Op == OpArgIntReg || v.Op == OpArgFloatReg
+ }
+
+ blockPrologComplete := func(v *Value) bool {
+ if b.ID != state.f.Entry.ID {
+ return !opcodeTable[v.Op].zeroWidth
+ } else {
+ return v.Op == OpInitMem
+ }
+ }
+
+ // Examine the prolog portion of the block to process special
+ // zero-width ops such as Arg, Phi, LoweredGetClosurePtr (etc)
+ // whose lifetimes begin at the block starting point. In an
+ // entry block, allow for the possibility that we may see Arg
+ // ops that appear _after_ other non-zero-width operations.
+ // Example:
+ //
+ // v33 = ArgIntReg <uintptr> {foo+0} [0] : AX (foo)
+ // v34 = ArgIntReg <uintptr> {bar+0} [0] : BX (bar)
+ // ...
+ // v77 = StoreReg <unsafe.Pointer> v67 : ctx+8[unsafe.Pointer]
+ // v78 = StoreReg <unsafe.Pointer> v68 : ctx[unsafe.Pointer]
+ // v79 = Arg <*uint8> {args} : args[*uint8] (args[*uint8])
+ // v80 = Arg <int> {args} [8] : args+8[int] (args+8[int])
+ // ...
+ // v1 = InitMem <mem>
+ //
+ // We can stop scanning the initial portion of the block when
+ // we either see the InitMem op (for entry blocks) or the
+ // first non-zero-width op (for other blocks).
+ for idx := 0; idx < len(b.Values); idx++ {
+ v := b.Values[idx]
+ if blockPrologComplete(v) {
+ break
+ }
+ // Consider only "lifetime begins at block start" ops.
+ if !mustBeFirst(v) && v.Op != OpArg {
+ continue
+ }
+ slots := state.valueNames[v.ID]
+ reg, _ := state.f.getHome(v.ID).(*Register)
+ changed := state.processValue(v, slots, reg) // changed == added to state.changedVars
+ if changed {
+ for _, varID := range state.changedVars.contents() {
+ state.updateVar(VarID(varID), v.Block, BlockStart)
+ }
+ state.changedVars.clear()
+ }
+ }
+
+ // Now examine the block again, handling things other than the
+ // "begins at block start" lifetimes.
+ zeroWidthPending := false
+ prologComplete := false
+ // expect to see values in pattern (apc)* (zerowidth|real)*
+ for _, v := range b.Values {
+ if blockPrologComplete(v) {
+ prologComplete = true
+ }
+ slots := state.valueNames[v.ID]
+ reg, _ := state.f.getHome(v.ID).(*Register)
+ changed := state.processValue(v, slots, reg) // changed == added to state.changedVars
+
+ if opcodeTable[v.Op].zeroWidth {
+ if prologComplete && mustBeFirst(v) {
+ panic(fmt.Errorf("Unexpected placement of op '%s' appearing after non-pseudo-op at beginning of block %s in %s\n%s", v.LongString(), b, b.Func.Name, b.Func))
+ }
+ if changed {
+ if mustBeFirst(v) || v.Op == OpArg {
+ // already taken care of above
+ continue
+ }
+ zeroWidthPending = true
+ }
+ continue
+ }
+ if !changed && !zeroWidthPending {
+ continue
+ }
+
+ // Not zero-width; i.e., a "real" instruction.
+ zeroWidthPending = false
+ for _, varID := range state.changedVars.contents() {
+ state.updateVar(VarID(varID), v.Block, v)
+ }
+ state.changedVars.clear()
+ }
+ for _, varID := range state.changedVars.contents() {
+ state.updateVar(VarID(varID), b, BlockEnd)
+ }
+
+ prevBlock = b
+ }
+
+ if state.loggingEnabled {
+ state.logf("location lists:\n")
+ }
+
+ // Flush any leftover entries live at the end of the last block.
+ for varID := range state.lists {
+ state.writePendingEntry(VarID(varID), state.f.Blocks[len(state.f.Blocks)-1].ID, FuncEnd.ID)
+ list := state.lists[varID]
+ if state.loggingEnabled {
+ if len(list) == 0 {
+ state.logf("\t%v : empty list\n", state.vars[varID])
+ } else {
+ state.logf("\t%v : %q\n", state.vars[varID], hex.EncodeToString(state.lists[varID]))
+ }
+ }
+ }
+}
+
+// updateVar updates the pending location list entry for varID to
+// reflect the new locations in curLoc, beginning at v in block b.
+// v may be one of the special values indicating block start or end.
+func (state *debugState) updateVar(varID VarID, b *Block, v *Value) {
+ curLoc := state.currentState.slots
+ // Assemble the location list entry with whatever's live.
+ empty := true
+ for _, slotID := range state.varSlots[varID] {
+ if !curLoc[slotID].absent() {
+ empty = false
+ break
+ }
+ }
+ pending := &state.pendingEntries[varID]
+ if empty {
+ state.writePendingEntry(varID, b.ID, v.ID)
+ pending.clear()
+ return
+ }
+
+ // Extend the previous entry if possible.
+ if pending.present {
+ merge := true
+ for i, slotID := range state.varSlots[varID] {
+ if !canMerge(pending.pieces[i], curLoc[slotID]) {
+ merge = false
+ break
+ }
+ }
+ if merge {
+ return
+ }
+ }
+
+ state.writePendingEntry(varID, b.ID, v.ID)
+ pending.present = true
+ pending.startBlock = b.ID
+ pending.startValue = v.ID
+ for i, slot := range state.varSlots[varID] {
+ pending.pieces[i] = curLoc[slot]
+ }
+}
+
+// writePendingEntry writes out the pending entry for varID, if any,
+// terminated at endBlock/Value.
+func (state *debugState) writePendingEntry(varID VarID, endBlock, endValue ID) {
+ pending := state.pendingEntries[varID]
+ if !pending.present {
+ return
+ }
+
+ // Pack the start/end coordinates into the start/end addresses
+ // of the entry, for decoding by PutLocationList.
+ start, startOK := encodeValue(state.ctxt, pending.startBlock, pending.startValue)
+ end, endOK := encodeValue(state.ctxt, endBlock, endValue)
+ if !startOK || !endOK {
+ // If someone writes a function that uses >65K values,
+ // they get incomplete debug info on 32-bit platforms.
+ return
+ }
+ if start == end {
+ if state.loggingEnabled {
+ // Printf not logf so not gated by GOSSAFUNC; this should fire very rarely.
+ fmt.Printf("Skipping empty location list for %v in %s\n", state.vars[varID], state.f.Name)
+ }
+ return
+ }
+
+ list := state.lists[varID]
+ list = appendPtr(state.ctxt, list, start)
+ list = appendPtr(state.ctxt, list, end)
+ // Where to write the length of the location description once
+ // we know how big it is.
+ sizeIdx := len(list)
+ list = list[:len(list)+2]
+
+ if state.loggingEnabled {
+ var partStrs []string
+ for i, slot := range state.varSlots[varID] {
+ partStrs = append(partStrs, fmt.Sprintf("%v@%v", state.slots[slot], state.LocString(pending.pieces[i])))
+ }
+ state.logf("Add entry for %v: \tb%vv%v-b%vv%v = \t%v\n", state.vars[varID], pending.startBlock, pending.startValue, endBlock, endValue, strings.Join(partStrs, " "))
+ }
+
+ for i, slotID := range state.varSlots[varID] {
+ loc := pending.pieces[i]
+ slot := state.slots[slotID]
+
+ if !loc.absent() {
+ if loc.onStack() {
+ if loc.stackOffsetValue() == 0 {
+ list = append(list, dwarf.DW_OP_call_frame_cfa)
+ } else {
+ list = append(list, dwarf.DW_OP_fbreg)
+ list = dwarf.AppendSleb128(list, int64(loc.stackOffsetValue()))
+ }
+ } else {
+ regnum := state.ctxt.Arch.DWARFRegisters[state.registers[firstReg(loc.Registers)].ObjNum()]
+ if regnum < 32 {
+ list = append(list, dwarf.DW_OP_reg0+byte(regnum))
+ } else {
+ list = append(list, dwarf.DW_OP_regx)
+ list = dwarf.AppendUleb128(list, uint64(regnum))
+ }
+ }
+ }
+
+ if len(state.varSlots[varID]) > 1 {
+ list = append(list, dwarf.DW_OP_piece)
+ list = dwarf.AppendUleb128(list, uint64(slot.Type.Size()))
+ }
+ }
+ state.ctxt.Arch.ByteOrder.PutUint16(list[sizeIdx:], uint16(len(list)-sizeIdx-2))
+ state.lists[varID] = list
+}
+
+// PutLocationList adds list (a location list in its intermediate representation) to listSym.
+func (debugInfo *FuncDebug) PutLocationList(list []byte, ctxt *obj.Link, listSym, startPC *obj.LSym) {
+ getPC := debugInfo.GetPC
+
+ if ctxt.UseBASEntries {
+ listSym.WriteInt(ctxt, listSym.Size, ctxt.Arch.PtrSize, ^0)
+ listSym.WriteAddr(ctxt, listSym.Size, ctxt.Arch.PtrSize, startPC, 0)
+ }
+
+ // Re-read list, translating its address from block/value ID to PC.
+ for i := 0; i < len(list); {
+ begin := getPC(decodeValue(ctxt, readPtr(ctxt, list[i:])))
+ end := getPC(decodeValue(ctxt, readPtr(ctxt, list[i+ctxt.Arch.PtrSize:])))
+
+ // Horrible hack. If a range contains only zero-width
+ // instructions, e.g. an Arg, and it's at the beginning of the
+ // function, this would be indistinguishable from an
+ // end entry. Fudge it.
+ if begin == 0 && end == 0 {
+ end = 1
+ }
+
+ if ctxt.UseBASEntries {
+ listSym.WriteInt(ctxt, listSym.Size, ctxt.Arch.PtrSize, int64(begin))
+ listSym.WriteInt(ctxt, listSym.Size, ctxt.Arch.PtrSize, int64(end))
+ } else {
+ listSym.WriteCURelativeAddr(ctxt, listSym.Size, startPC, int64(begin))
+ listSym.WriteCURelativeAddr(ctxt, listSym.Size, startPC, int64(end))
+ }
+
+ i += 2 * ctxt.Arch.PtrSize
+ datalen := 2 + int(ctxt.Arch.ByteOrder.Uint16(list[i:]))
+ listSym.WriteBytes(ctxt, listSym.Size, list[i:i+datalen]) // copy datalen and location encoding
+ i += datalen
+ }
+
+ // Location list contents, now with real PCs.
+ // End entry.
+ listSym.WriteInt(ctxt, listSym.Size, ctxt.Arch.PtrSize, 0)
+ listSym.WriteInt(ctxt, listSym.Size, ctxt.Arch.PtrSize, 0)
+}
+
+// Pack a value and block ID into an address-sized uint, returning encoded
+// value and boolean indicating whether the encoding succeeded. For
+// 32-bit architectures the process may fail for very large procedures
+// (the theory being that it's ok to have degraded debug quality in
+// this case).
+func encodeValue(ctxt *obj.Link, b, v ID) (uint64, bool) {
+ if ctxt.Arch.PtrSize == 8 {
+ result := uint64(b)<<32 | uint64(uint32(v))
+ //ctxt.Logf("b %#x (%d) v %#x (%d) -> %#x\n", b, b, v, v, result)
+ return result, true
+ }
+ if ctxt.Arch.PtrSize != 4 {
+ panic("unexpected pointer size")
+ }
+ if ID(int16(b)) != b || ID(int16(v)) != v {
+ return 0, false
+ }
+ return uint64(b)<<16 | uint64(uint16(v)), true
+}
+
+// Unpack a value and block ID encoded by encodeValue.
+func decodeValue(ctxt *obj.Link, word uint64) (ID, ID) {
+ if ctxt.Arch.PtrSize == 8 {
+ b, v := ID(word>>32), ID(word)
+ //ctxt.Logf("%#x -> b %#x (%d) v %#x (%d)\n", word, b, b, v, v)
+ return b, v
+ }
+ if ctxt.Arch.PtrSize != 4 {
+ panic("unexpected pointer size")
+ }
+ return ID(word >> 16), ID(int16(word))
+}
+
+// Append a pointer-sized uint to buf.
+func appendPtr(ctxt *obj.Link, buf []byte, word uint64) []byte {
+ if cap(buf) < len(buf)+20 {
+ b := make([]byte, len(buf), 20+cap(buf)*2)
+ copy(b, buf)
+ buf = b
+ }
+ writeAt := len(buf)
+ buf = buf[0 : len(buf)+ctxt.Arch.PtrSize]
+ writePtr(ctxt, buf[writeAt:], word)
+ return buf
+}
+
+// Write a pointer-sized uint to the beginning of buf.
+func writePtr(ctxt *obj.Link, buf []byte, word uint64) {
+ switch ctxt.Arch.PtrSize {
+ case 4:
+ ctxt.Arch.ByteOrder.PutUint32(buf, uint32(word))
+ case 8:
+ ctxt.Arch.ByteOrder.PutUint64(buf, word)
+ default:
+ panic("unexpected pointer size")
+ }
+
+}
+
+// Read a pointer-sized uint from the beginning of buf.
+func readPtr(ctxt *obj.Link, buf []byte) uint64 {
+ switch ctxt.Arch.PtrSize {
+ case 4:
+ return uint64(ctxt.Arch.ByteOrder.Uint32(buf))
+ case 8:
+ return ctxt.Arch.ByteOrder.Uint64(buf)
+ default:
+ panic("unexpected pointer size")
+ }
+
+}
+
+// setupLocList creates the initial portion of a location list for a
+// user variable. It emits the encoded start/end of the range and a
+// placeholder for the size. Return value is the new list plus the
+// slot in the list holding the size (to be updated later).
+func setupLocList(ctxt *obj.Link, f *Func, list []byte, st, en ID) ([]byte, int) {
+ start, startOK := encodeValue(ctxt, f.Entry.ID, st)
+ end, endOK := encodeValue(ctxt, f.Entry.ID, en)
+ if !startOK || !endOK {
+ // This could happen if someone writes a function that uses
+ // >65K values on a 32-bit platform. Hopefully a degraded debugging
+ // experience is ok in that case.
+ return nil, 0
+ }
+ list = appendPtr(ctxt, list, start)
+ list = appendPtr(ctxt, list, end)
+
+ // Where to write the length of the location description once
+ // we know how big it is.
+ sizeIdx := len(list)
+ list = list[:len(list)+2]
+ return list, sizeIdx
+}
+
+// locatePrologEnd walks the entry block of a function with incoming
+// register arguments and locates the last instruction in the prolog
+// that spills a register arg. It returns the ID of that instruction
+// Example:
+//
+// b1:
+// v3 = ArgIntReg <int> {p1+0} [0] : AX
+// ... more arg regs ..
+// v4 = ArgFloatReg <float32> {f1+0} [0] : X0
+// v52 = MOVQstore <mem> {p1} v2 v3 v1
+// ... more stores ...
+// v68 = MOVSSstore <mem> {f4} v2 v67 v66
+// v38 = MOVQstoreconst <mem> {blob} [val=0,off=0] v2 v32
+//
+// Important: locatePrologEnd is expected to work properly only with
+// optimization turned off (e.g. "-N"). If optimization is enabled
+// we can't be assured of finding all input arguments spilled in the
+// entry block prolog.
+func locatePrologEnd(f *Func) ID {
+
+ // returns true if this instruction looks like it moves an ABI
+ // register to the stack, along with the value being stored.
+ isRegMoveLike := func(v *Value) (bool, ID) {
+ n, ok := v.Aux.(*ir.Name)
+ var r ID
+ if !ok || n.Class != ir.PPARAM {
+ return false, r
+ }
+ regInputs, memInputs, spInputs := 0, 0, 0
+ for _, a := range v.Args {
+ if a.Op == OpArgIntReg || a.Op == OpArgFloatReg {
+ regInputs++
+ r = a.ID
+ } else if a.Type.IsMemory() {
+ memInputs++
+ } else if a.Op == OpSP {
+ spInputs++
+ } else {
+ return false, r
+ }
+ }
+ return v.Type.IsMemory() && memInputs == 1 &&
+ regInputs == 1 && spInputs == 1, r
+ }
+
+ // OpArg*Reg values we've seen so far on our forward walk,
+ // for which we have not yet seen a corresponding spill.
+ regArgs := make([]ID, 0, 32)
+
+ // removeReg tries to remove a value from regArgs, returning true
+ // if found and removed, or false otherwise.
+ removeReg := func(r ID) bool {
+ for i := 0; i < len(regArgs); i++ {
+ if regArgs[i] == r {
+ regArgs = append(regArgs[:i], regArgs[i+1:]...)
+ return true
+ }
+ }
+ return false
+ }
+
+ // Walk forwards through the block. When we see OpArg*Reg, record
+ // the value it produces in the regArgs list. When see a store that uses
+ // the value, remove the entry. When we hit the last store (use)
+ // then we've arrived at the end of the prolog.
+ for k, v := range f.Entry.Values {
+ if v.Op == OpArgIntReg || v.Op == OpArgFloatReg {
+ regArgs = append(regArgs, v.ID)
+ continue
+ }
+ if ok, r := isRegMoveLike(v); ok {
+ if removed := removeReg(r); removed {
+ if len(regArgs) == 0 {
+ // Found our last spill; return the value after
+ // it. Note that it is possible that this spill is
+ // the last instruction in the block. If so, then
+ // return the "end of block" sentinel.
+ if k < len(f.Entry.Values)-1 {
+ return f.Entry.Values[k+1].ID
+ }
+ return BlockEnd.ID
+ }
+ }
+ }
+ if v.Op.IsCall() {
+ // if we hit a call, we've gone too far.
+ return v.ID
+ }
+ }
+ // nothing found
+ return ID(-1)
+}
+
+// isNamedRegParam returns true if the param corresponding to "p"
+// is a named, non-blank input parameter assigned to one or more
+// registers.
+func isNamedRegParam(p abi.ABIParamAssignment) bool {
+ if p.Name == nil {
+ return false
+ }
+ n := p.Name.(*ir.Name)
+ if n.Sym() == nil || n.Sym().IsBlank() {
+ return false
+ }
+ if len(p.Registers) == 0 {
+ return false
+ }
+ return true
+}
+
+// BuildFuncDebugNoOptimized populates a FuncDebug object "rval" with
+// entries corresponding to the register-resident input parameters for
+// the function "f"; it is used when we are compiling without
+// optimization but the register ABI is enabled. For each reg param,
+// it constructs a 2-element location list: the first element holds
+// the input register, and the second element holds the stack location
+// of the param (the assumption being that when optimization is off,
+// each input param reg will be spilled in the prolog.
+func BuildFuncDebugNoOptimized(ctxt *obj.Link, f *Func, loggingEnabled bool, stackOffset func(LocalSlot) int32, rval *FuncDebug) {
+
+ pri := f.ABISelf.ABIAnalyzeFuncType(f.Type.FuncType())
+
+ // Look to see if we have any named register-promoted parameters.
+ // If there are none, bail early and let the caller sort things
+ // out for the remainder of the params/locals.
+ numRegParams := 0
+ for _, inp := range pri.InParams() {
+ if isNamedRegParam(inp) {
+ numRegParams++
+ }
+ }
+ if numRegParams == 0 {
+ return
+ }
+
+ state := debugState{f: f}
+
+ if loggingEnabled {
+ state.logf("generating -N reg param loc lists for func %q\n", f.Name)
+ }
+
+ // Allocate location lists.
+ rval.LocationLists = make([][]byte, numRegParams)
+
+ // Locate the value corresponding to the last spill of
+ // an input register.
+ afterPrologVal := locatePrologEnd(f)
+
+ // Walk the input params again and process the register-resident elements.
+ pidx := 0
+ for _, inp := range pri.InParams() {
+ if !isNamedRegParam(inp) {
+ // will be sorted out elsewhere
+ continue
+ }
+
+ n := inp.Name.(*ir.Name)
+ sl := LocalSlot{N: n, Type: inp.Type, Off: 0}
+ rval.Vars = append(rval.Vars, n)
+ rval.Slots = append(rval.Slots, sl)
+ slid := len(rval.VarSlots)
+ rval.VarSlots = append(rval.VarSlots, []SlotID{SlotID(slid)})
+
+ if afterPrologVal == ID(-1) {
+ // This can happen for degenerate functions with infinite
+ // loops such as that in issue 45948. In such cases, leave
+ // the var/slot set up for the param, but don't try to
+ // emit a location list.
+ if loggingEnabled {
+ state.logf("locatePrologEnd failed, skipping %v\n", n)
+ }
+ pidx++
+ continue
+ }
+
+ // Param is arriving in one or more registers. We need a 2-element
+ // location expression for it. First entry in location list
+ // will correspond to lifetime in input registers.
+ list, sizeIdx := setupLocList(ctxt, f, rval.LocationLists[pidx],
+ BlockStart.ID, afterPrologVal)
+ if list == nil {
+ pidx++
+ continue
+ }
+ if loggingEnabled {
+ state.logf("param %v:\n [<entry>, %d]:\n", n, afterPrologVal)
+ }
+ rtypes, _ := inp.RegisterTypesAndOffsets()
+ padding := make([]uint64, 0, 32)
+ padding = inp.ComputePadding(padding)
+ for k, r := range inp.Registers {
+ reg := ObjRegForAbiReg(r, f.Config)
+ dwreg := ctxt.Arch.DWARFRegisters[reg]
+ if dwreg < 32 {
+ list = append(list, dwarf.DW_OP_reg0+byte(dwreg))
+ } else {
+ list = append(list, dwarf.DW_OP_regx)
+ list = dwarf.AppendUleb128(list, uint64(dwreg))
+ }
+ if loggingEnabled {
+ state.logf(" piece %d -> dwreg %d", k, dwreg)
+ }
+ if len(inp.Registers) > 1 {
+ list = append(list, dwarf.DW_OP_piece)
+ ts := rtypes[k].Size()
+ list = dwarf.AppendUleb128(list, uint64(ts))
+ if padding[k] > 0 {
+ if loggingEnabled {
+ state.logf(" [pad %d bytes]", padding[k])
+ }
+ list = append(list, dwarf.DW_OP_piece)
+ list = dwarf.AppendUleb128(list, padding[k])
+ }
+ }
+ if loggingEnabled {
+ state.logf("\n")
+ }
+ }
+ // fill in length of location expression element
+ ctxt.Arch.ByteOrder.PutUint16(list[sizeIdx:], uint16(len(list)-sizeIdx-2))
+
+ // Second entry in the location list will be the stack home
+ // of the param, once it has been spilled. Emit that now.
+ list, sizeIdx = setupLocList(ctxt, f, list,
+ afterPrologVal, FuncEnd.ID)
+ if list == nil {
+ pidx++
+ continue
+ }
+ soff := stackOffset(sl)
+ if soff == 0 {
+ list = append(list, dwarf.DW_OP_call_frame_cfa)
+ } else {
+ list = append(list, dwarf.DW_OP_fbreg)
+ list = dwarf.AppendSleb128(list, int64(soff))
+ }
+ if loggingEnabled {
+ state.logf(" [%d, <end>): stackOffset=%d\n", afterPrologVal, soff)
+ }
+
+ // fill in size
+ ctxt.Arch.ByteOrder.PutUint16(list[sizeIdx:], uint16(len(list)-sizeIdx-2))
+
+ rval.LocationLists[pidx] = list
+ pidx++
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/debug_lines_test.go b/src/cmd/compile/internal/ssa/debug_lines_test.go
new file mode 100644
index 0000000..c0ccdb1
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/debug_lines_test.go
@@ -0,0 +1,258 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa_test
+
+import (
+ "bufio"
+ "bytes"
+ "flag"
+ "internal/buildcfg"
+ "runtime"
+ "sort"
+
+ "fmt"
+ "internal/testenv"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "reflect"
+ "regexp"
+ "strconv"
+ "testing"
+)
+
+// Matches lines in genssa output that are marked "isstmt", and the parenthesized plus-prefixed line number is a submatch
+var asmLine *regexp.Regexp = regexp.MustCompile(`^\s[vb][0-9]+\s+[0-9]+\s\(\+([0-9]+)\)`)
+
+// this matches e.g. ` v123456789 000007 (+9876654310) MOVUPS X15, ""..autotmp_2-32(SP)`
+
+// Matches lines in genssa output that describe an inlined file.
+// Note it expects an unadventurous choice of basename.
+var sepRE = regexp.QuoteMeta(string(filepath.Separator))
+var inlineLine *regexp.Regexp = regexp.MustCompile(`^#\s.*` + sepRE + `[-a-zA-Z0-9_]+\.go:([0-9]+)`)
+
+// this matches e.g. # /pa/inline-dumpxxxx.go:6
+
+var testGoArchFlag = flag.String("arch", "", "run test for specified architecture")
+
+func testGoArch() string {
+ if *testGoArchFlag == "" {
+ return runtime.GOARCH
+ }
+ return *testGoArchFlag
+}
+
+func TestDebugLinesSayHi(t *testing.T) {
+ // This test is potentially fragile, the goal is that debugging should step properly through "sayhi"
+ // If the blocks are reordered in a way that changes the statement order but execution flows correctly,
+ // then rearrange the expected numbers. Register abi and not-register-abi also have different sequences,
+ // at least for now.
+
+ switch testGoArch() {
+ case "arm64", "amd64": // register ABI
+ testDebugLines(t, "-N -l", "sayhi.go", "sayhi", []int{8, 9, 10, 11}, false)
+
+ case "arm", "386": // probably not register ABI for a while
+ testDebugLines(t, "-N -l", "sayhi.go", "sayhi", []int{9, 10, 11}, false)
+
+ default: // expect ppc64le and riscv will pick up register ABI soonish, not sure about others
+ t.Skip("skipped for many architectures, also changes w/ register ABI")
+ }
+}
+
+func TestDebugLinesPushback(t *testing.T) {
+ if runtime.GOOS != "linux" && runtime.GOOS != "darwin" { // in particular, it could be windows.
+ t.Skip("this test depends on creating a file with a wonky name, only works for sure on Linux and Darwin")
+ }
+
+ switch testGoArch() {
+ default:
+ t.Skip("skipped for many architectures")
+
+ case "arm64", "amd64": // register ABI
+ fn := "(*List[go.shape.int_0]).PushBack"
+ if buildcfg.Experiment.Unified {
+ // Unified mangles differently
+ fn = "(*List[int]).PushBack"
+ }
+ testDebugLines(t, "-N -l -G=3", "pushback.go", fn, []int{17, 18, 19, 20, 21, 22, 24}, true)
+ }
+}
+
+func TestDebugLinesConvert(t *testing.T) {
+ if runtime.GOOS != "linux" && runtime.GOOS != "darwin" { // in particular, it could be windows.
+ t.Skip("this test depends on creating a file with a wonky name, only works for sure on Linux and Darwin")
+ }
+
+ switch testGoArch() {
+ default:
+ t.Skip("skipped for many architectures")
+
+ case "arm64", "amd64": // register ABI
+ fn := "G[go.shape.int_0]"
+ if buildcfg.Experiment.Unified {
+ // Unified mangles differently
+ fn = "G[int]"
+ }
+ testDebugLines(t, "-N -l -G=3", "convertline.go", fn, []int{9, 10, 11}, true)
+ }
+}
+
+func TestInlineLines(t *testing.T) {
+ if runtime.GOARCH != "amd64" && *testGoArchFlag == "" {
+ // As of september 2021, works for everything except mips64, but still potentially fragile
+ t.Skip("only runs for amd64 unless -arch explicitly supplied")
+ }
+
+ want := [][]int{{3}, {4, 10}, {4, 10, 16}, {4, 10}, {4, 11, 16}, {4, 11}, {4}, {5, 10}, {5, 10, 16}, {5, 10}, {5, 11, 16}, {5, 11}, {5}}
+ testInlineStack(t, "inline-dump.go", "f", want)
+}
+
+func compileAndDump(t *testing.T, file, function, moreGCFlags string) []byte {
+ testenv.MustHaveGoBuild(t)
+
+ tmpdir, err := ioutil.TempDir("", "debug_lines_test")
+ if err != nil {
+ panic(fmt.Sprintf("Problem creating TempDir, error %v", err))
+ }
+ if testing.Verbose() {
+ fmt.Printf("Preserving temporary directory %s\n", tmpdir)
+ } else {
+ defer os.RemoveAll(tmpdir)
+ }
+
+ source, err := filepath.Abs(filepath.Join("testdata", file))
+ if err != nil {
+ panic(fmt.Sprintf("Could not get abspath of testdata directory and file, %v", err))
+ }
+
+ cmd := exec.Command(testenv.GoToolPath(t), "build", "-o", "foo.o", "-gcflags=-d=ssa/genssa/dump="+function+" "+moreGCFlags, source)
+ cmd.Dir = tmpdir
+ cmd.Env = replaceEnv(cmd.Env, "GOSSADIR", tmpdir)
+ testGoos := "linux" // default to linux
+ if testGoArch() == "wasm" {
+ testGoos = "js"
+ }
+ cmd.Env = replaceEnv(cmd.Env, "GOOS", testGoos)
+ cmd.Env = replaceEnv(cmd.Env, "GOARCH", testGoArch())
+
+ if testing.Verbose() {
+ fmt.Printf("About to run %s\n", asCommandLine("", cmd))
+ }
+
+ var stdout, stderr bytes.Buffer
+ cmd.Stdout = &stdout
+ cmd.Stderr = &stderr
+
+ if err := cmd.Run(); err != nil {
+ t.Fatalf("error running cmd %s: %v\nstdout:\n%sstderr:\n%s\n", asCommandLine("", cmd), err, stdout.String(), stderr.String())
+ }
+
+ if s := stderr.String(); s != "" {
+ t.Fatalf("Wanted empty stderr, instead got:\n%s\n", s)
+ }
+
+ dumpFile := filepath.Join(tmpdir, function+"_01__genssa.dump")
+ dumpBytes, err := os.ReadFile(dumpFile)
+ if err != nil {
+ t.Fatalf("Could not read dump file %s, err=%v", dumpFile, err)
+ }
+ return dumpBytes
+}
+
+func sortInlineStacks(x [][]int) {
+ sort.Slice(x, func(i, j int) bool {
+ if len(x[i]) != len(x[j]) {
+ return len(x[i]) < len(x[j])
+ }
+ for k := range x[i] {
+ if x[i][k] != x[j][k] {
+ return x[i][k] < x[j][k]
+ }
+ }
+ return false
+ })
+}
+
+// testInlineStack ensures that inlining is described properly in the comments in the dump file
+func testInlineStack(t *testing.T, file, function string, wantStacks [][]int) {
+ // this is an inlining reporting test, not an optimization test. -N makes it less fragile
+ dumpBytes := compileAndDump(t, file, function, "-N")
+ dump := bufio.NewScanner(bytes.NewReader(dumpBytes))
+ dumpLineNum := 0
+ var gotStmts []int
+ var gotStacks [][]int
+ for dump.Scan() {
+ line := dump.Text()
+ dumpLineNum++
+ matches := inlineLine.FindStringSubmatch(line)
+ if len(matches) == 2 {
+ stmt, err := strconv.ParseInt(matches[1], 10, 32)
+ if err != nil {
+ t.Fatalf("Expected to parse a line number but saw %s instead on dump line #%d, error %v", matches[1], dumpLineNum, err)
+ }
+ if testing.Verbose() {
+ fmt.Printf("Saw stmt# %d for submatch '%s' on dump line #%d = '%s'\n", stmt, matches[1], dumpLineNum, line)
+ }
+ gotStmts = append(gotStmts, int(stmt))
+ } else if len(gotStmts) > 0 {
+ gotStacks = append(gotStacks, gotStmts)
+ gotStmts = nil
+ }
+ }
+ if len(gotStmts) > 0 {
+ gotStacks = append(gotStacks, gotStmts)
+ gotStmts = nil
+ }
+ sortInlineStacks(gotStacks)
+ sortInlineStacks(wantStacks)
+ if !reflect.DeepEqual(wantStacks, gotStacks) {
+ t.Errorf("wanted inlines %+v but got %+v", wantStacks, gotStacks)
+ }
+
+}
+
+// testDebugLines compiles testdata/<file> with flags -N -l and -d=ssa/genssa/dump=<function>
+// then verifies that the statement-marked lines in that file are the same as those in wantStmts
+// These files must all be short because this is super-fragile.
+// "go build" is run in a temporary directory that is normally deleted, unless -test.v
+func testDebugLines(t *testing.T, gcflags, file, function string, wantStmts []int, ignoreRepeats bool) {
+ dumpBytes := compileAndDump(t, file, function, gcflags)
+ dump := bufio.NewScanner(bytes.NewReader(dumpBytes))
+ var gotStmts []int
+ dumpLineNum := 0
+ for dump.Scan() {
+ line := dump.Text()
+ dumpLineNum++
+ matches := asmLine.FindStringSubmatch(line)
+ if len(matches) == 2 {
+ stmt, err := strconv.ParseInt(matches[1], 10, 32)
+ if err != nil {
+ t.Fatalf("Expected to parse a line number but saw %s instead on dump line #%d, error %v", matches[1], dumpLineNum, err)
+ }
+ if testing.Verbose() {
+ fmt.Printf("Saw stmt# %d for submatch '%s' on dump line #%d = '%s'\n", stmt, matches[1], dumpLineNum, line)
+ }
+ gotStmts = append(gotStmts, int(stmt))
+ }
+ }
+ if ignoreRepeats { // remove repeats from gotStmts
+ newGotStmts := []int{gotStmts[0]}
+ for _, x := range gotStmts {
+ if x != newGotStmts[len(newGotStmts)-1] {
+ newGotStmts = append(newGotStmts, x)
+ }
+ }
+ if !reflect.DeepEqual(wantStmts, newGotStmts) {
+ t.Errorf("wanted stmts %v but got %v (with repeats still in: %v)", wantStmts, newGotStmts, gotStmts)
+ }
+
+ } else {
+ if !reflect.DeepEqual(wantStmts, gotStmts) {
+ t.Errorf("wanted stmts %v but got %v", wantStmts, gotStmts)
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/debug_test.go b/src/cmd/compile/internal/ssa/debug_test.go
new file mode 100644
index 0000000..b20041c
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/debug_test.go
@@ -0,0 +1,1023 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa_test
+
+import (
+ "bytes"
+ "flag"
+ "fmt"
+ "internal/testenv"
+ "io"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "regexp"
+ "runtime"
+ "strconv"
+ "strings"
+ "testing"
+ "time"
+)
+
+var (
+ update = flag.Bool("u", false, "update test reference files")
+ verbose = flag.Bool("v", false, "print debugger interactions (very verbose)")
+ dryrun = flag.Bool("n", false, "just print the command line and first debugging bits")
+ useGdb = flag.Bool("g", false, "use Gdb instead of Delve (dlv), use gdb reference files")
+ force = flag.Bool("f", false, "force run under not linux-amd64; also do not use tempdir")
+ repeats = flag.Bool("r", false, "detect repeats in debug steps and don't ignore them")
+ inlines = flag.Bool("i", false, "do inlining for gdb (makes testing flaky till inlining info is correct)")
+)
+
+var (
+ hexRe = regexp.MustCompile("0x[a-zA-Z0-9]+")
+ numRe = regexp.MustCompile("-?[0-9]+")
+ stringRe = regexp.MustCompile("\"([^\\\"]|(\\.))*\"")
+ leadingDollarNumberRe = regexp.MustCompile("^[$][0-9]+")
+ optOutGdbRe = regexp.MustCompile("[<]optimized out[>]")
+ numberColonRe = regexp.MustCompile("^ *[0-9]+:")
+)
+
+var gdb = "gdb" // Might be "ggdb" on Darwin, because gdb no longer part of XCode
+var debugger = "dlv" // For naming files, etc.
+
+var gogcflags = os.Getenv("GO_GCFLAGS")
+
+// optimizedLibs usually means "not running in a noopt test builder".
+var optimizedLibs = (!strings.Contains(gogcflags, "-N") && !strings.Contains(gogcflags, "-l"))
+
+// TestNexting go-builds a file, then uses a debugger (default delve, optionally gdb)
+// to next through the generated executable, recording each line landed at, and
+// then compares those lines with reference file(s).
+// Flag -u updates the reference file(s).
+// Flag -g changes the debugger to gdb (and uses gdb-specific reference files)
+// Flag -v is ever-so-slightly verbose.
+// Flag -n is for dry-run, and prints the shell and first debug commands.
+//
+// Because this test (combined with existing compiler deficiencies) is flaky,
+// for gdb-based testing by default inlining is disabled
+// (otherwise output depends on library internals)
+// and for both gdb and dlv by default repeated lines in the next stream are ignored
+// (because this appears to be timing-dependent in gdb, and the cleanest fix is in code common to gdb and dlv).
+//
+// Also by default, any source code outside of .../testdata/ is not mentioned
+// in the debugging histories. This deals both with inlined library code once
+// the compiler is generating clean inline records, and also deals with
+// runtime code between return from main and process exit. This is hidden
+// so that those files (in the runtime/library) can change without affecting
+// this test.
+//
+// These choices can be reversed with -i (inlining on) and -r (repeats detected) which
+// will also cause their own failures against the expected outputs. Note that if the compiler
+// and debugger were behaving properly, the inlined code and repeated lines would not appear,
+// so the expected output is closer to what we hope to see, though it also encodes all our
+// current bugs.
+//
+// The file being tested may contain comments of the form
+// //DBG-TAG=(v1,v2,v3)
+// where DBG = {gdb,dlv} and TAG={dbg,opt}
+// each variable may optionally be followed by a / and one or more of S,A,N,O
+// to indicate normalization of Strings, (hex) addresses, and numbers.
+// "O" is an explicit indication that we expect it to be optimized out.
+// For example:
+//
+// if len(os.Args) > 1 { //gdb-dbg=(hist/A,cannedInput/A) //dlv-dbg=(hist/A,cannedInput/A)
+//
+// TODO: not implemented for Delve yet, but this is the plan
+//
+// After a compiler change that causes a difference in the debug behavior, check
+// to see if it is sensible or not, and if it is, update the reference files with
+// go test debug_test.go -args -u
+// (for Delve)
+// go test debug_test.go -args -u -d
+//
+func TestNexting(t *testing.T) {
+ testenv.SkipFlaky(t, 37404)
+
+ skipReasons := "" // Many possible skip reasons, list all that apply
+ if testing.Short() {
+ skipReasons = "not run in short mode; "
+ }
+ testenv.MustHaveGoBuild(t)
+
+ if *useGdb && !*force && !(runtime.GOOS == "linux" && runtime.GOARCH == "amd64") {
+ // Running gdb on OSX/darwin is very flaky.
+ // Sometimes it is called ggdb, depending on how it is installed.
+ // It also sometimes requires an admin password typed into a dialog box.
+ // Various architectures tend to differ slightly sometimes, and keeping them
+ // all in sync is a pain for people who don't have them all at hand,
+ // so limit testing to amd64 (for now)
+ skipReasons += "not run when testing gdb (-g) unless forced (-f) or linux-amd64; "
+ }
+
+ if !*useGdb && !*force && testenv.Builder() == "linux-386-longtest" {
+ // The latest version of Delve does support linux/386. However, the version currently
+ // installed in the linux-386-longtest builder does not. See golang.org/issue/39309.
+ skipReasons += "not run when testing delve on linux-386-longtest builder unless forced (-f); "
+ }
+
+ if *useGdb {
+ debugger = "gdb"
+ _, err := exec.LookPath(gdb)
+ if err != nil {
+ if runtime.GOOS != "darwin" {
+ skipReasons += "not run because gdb not on path; "
+ } else {
+ // On Darwin, MacPorts installs gdb as "ggdb".
+ _, err = exec.LookPath("ggdb")
+ if err != nil {
+ skipReasons += "not run because gdb (and also ggdb) request by -g option not on path; "
+ } else {
+ gdb = "ggdb"
+ }
+ }
+ }
+ } else { // Delve
+ debugger = "dlv"
+ _, err := exec.LookPath("dlv")
+ if err != nil {
+ skipReasons += "not run because dlv not on path; "
+ }
+ }
+
+ if skipReasons != "" {
+ t.Skip(skipReasons[:len(skipReasons)-2])
+ }
+
+ optFlags := "" // Whatever flags are needed to test debugging of optimized code.
+ dbgFlags := "-N -l"
+ if *useGdb && !*inlines {
+ // For gdb (default), disable inlining so that a compiler test does not depend on library code.
+ // TODO: Technically not necessary in 1.10 and later, but it causes a largish regression that needs investigation.
+ optFlags += " -l"
+ }
+
+ moreargs := []string{}
+ if *useGdb && (runtime.GOOS == "darwin" || runtime.GOOS == "windows") {
+ // gdb and lldb on Darwin do not deal with compressed dwarf.
+ // also, Windows.
+ moreargs = append(moreargs, "-ldflags=-compressdwarf=false")
+ }
+
+ subTest(t, debugger+"-dbg", "hist", dbgFlags, moreargs...)
+ subTest(t, debugger+"-dbg", "scopes", dbgFlags, moreargs...)
+ subTest(t, debugger+"-dbg", "i22558", dbgFlags, moreargs...)
+
+ subTest(t, debugger+"-dbg-race", "i22600", dbgFlags, append(moreargs, "-race")...)
+
+ optSubTest(t, debugger+"-opt", "hist", optFlags, 1000, moreargs...)
+ optSubTest(t, debugger+"-opt", "scopes", optFlags, 1000, moreargs...)
+
+ // Was optSubtest, this test is observed flaky on Linux in Docker on (busy) macOS, probably because of timing
+ // glitches in this harness.
+ // TODO get rid of timing glitches in this harness.
+ skipSubTest(t, debugger+"-opt", "infloop", optFlags, 10, moreargs...)
+
+}
+
+// subTest creates a subtest that compiles basename.go with the specified gcflags and additional compiler arguments,
+// then runs the debugger on the resulting binary, with any comment-specified actions matching tag triggered.
+func subTest(t *testing.T, tag string, basename string, gcflags string, moreargs ...string) {
+ t.Run(tag+"-"+basename, func(t *testing.T) {
+ if t.Name() == "TestNexting/gdb-dbg-i22558" {
+ testenv.SkipFlaky(t, 31263)
+ }
+ testNexting(t, basename, tag, gcflags, 1000, moreargs...)
+ })
+}
+
+// skipSubTest is the same as subTest except that it skips the test if execution is not forced (-f)
+func skipSubTest(t *testing.T, tag string, basename string, gcflags string, count int, moreargs ...string) {
+ t.Run(tag+"-"+basename, func(t *testing.T) {
+ if *force {
+ testNexting(t, basename, tag, gcflags, count, moreargs...)
+ } else {
+ t.Skip("skipping flaky test becaused not forced (-f)")
+ }
+ })
+}
+
+// optSubTest is the same as subTest except that it skips the test if the runtime and libraries
+// were not compiled with optimization turned on. (The skip may not be necessary with Go 1.10 and later)
+func optSubTest(t *testing.T, tag string, basename string, gcflags string, count int, moreargs ...string) {
+ // If optimized test is run with unoptimized libraries (compiled with -N -l), it is very likely to fail.
+ // This occurs in the noopt builders (for example).
+ t.Run(tag+"-"+basename, func(t *testing.T) {
+ if *force || optimizedLibs {
+ testNexting(t, basename, tag, gcflags, count, moreargs...)
+ } else {
+ t.Skip("skipping for unoptimized stdlib/runtime")
+ }
+ })
+}
+
+func testNexting(t *testing.T, base, tag, gcflags string, count int, moreArgs ...string) {
+ // (1) In testdata, build sample.go into test-sample.<tag>
+ // (2) Run debugger gathering a history
+ // (3) Read expected history from testdata/sample.<tag>.nexts
+ // optionally, write out testdata/sample.<tag>.nexts
+
+ testbase := filepath.Join("testdata", base) + "." + tag
+ tmpbase := filepath.Join("testdata", "test-"+base+"."+tag)
+
+ // Use a temporary directory unless -f is specified
+ if !*force {
+ tmpdir, err := ioutil.TempDir("", "debug_test")
+ if err != nil {
+ panic(fmt.Sprintf("Problem creating TempDir, error %v\n", err))
+ }
+ tmpbase = filepath.Join(tmpdir, "test-"+base+"."+tag)
+ if *verbose {
+ fmt.Printf("Tempdir is %s\n", tmpdir)
+ }
+ defer os.RemoveAll(tmpdir)
+ }
+ exe := tmpbase
+
+ runGoArgs := []string{"build", "-o", exe, "-gcflags=all=" + gcflags}
+ runGoArgs = append(runGoArgs, moreArgs...)
+ runGoArgs = append(runGoArgs, filepath.Join("testdata", base+".go"))
+
+ runGo(t, "", runGoArgs...)
+
+ nextlog := testbase + ".nexts"
+ tmplog := tmpbase + ".nexts"
+ var dbg dbgr
+ if *useGdb {
+ dbg = newGdb(tag, exe)
+ } else {
+ dbg = newDelve(tag, exe)
+ }
+ h1 := runDbgr(dbg, count)
+ if *dryrun {
+ fmt.Printf("# Tag for above is %s\n", dbg.tag())
+ return
+ }
+ if *update {
+ h1.write(nextlog)
+ } else {
+ h0 := &nextHist{}
+ h0.read(nextlog)
+ if !h0.equals(h1) {
+ // Be very noisy about exactly what's wrong to simplify debugging.
+ h1.write(tmplog)
+ cmd := exec.Command("diff", "-u", nextlog, tmplog)
+ line := asCommandLine("", cmd)
+ bytes, err := cmd.CombinedOutput()
+ if err != nil && len(bytes) == 0 {
+ t.Fatalf("step/next histories differ, diff command %s failed with error=%v", line, err)
+ }
+ t.Fatalf("step/next histories differ, diff=\n%s", string(bytes))
+ }
+ }
+}
+
+type dbgr interface {
+ start()
+ stepnext(s string) bool // step or next, possible with parameter, gets line etc. returns true for success, false for unsure response
+ quit()
+ hist() *nextHist
+ tag() string
+}
+
+func runDbgr(dbg dbgr, maxNext int) *nextHist {
+ dbg.start()
+ if *dryrun {
+ return nil
+ }
+ for i := 0; i < maxNext; i++ {
+ if !dbg.stepnext("n") {
+ break
+ }
+ }
+ dbg.quit()
+ h := dbg.hist()
+ return h
+}
+
+func runGo(t *testing.T, dir string, args ...string) string {
+ var stdout, stderr bytes.Buffer
+ cmd := exec.Command(testenv.GoToolPath(t), args...)
+ cmd.Dir = dir
+ if *dryrun {
+ fmt.Printf("%s\n", asCommandLine("", cmd))
+ return ""
+ }
+ cmd.Stdout = &stdout
+ cmd.Stderr = &stderr
+
+ if err := cmd.Run(); err != nil {
+ t.Fatalf("error running cmd (%s): %v\nstdout:\n%sstderr:\n%s\n", asCommandLine("", cmd), err, stdout.String(), stderr.String())
+ }
+
+ if s := stderr.String(); s != "" {
+ t.Fatalf("Stderr = %s\nWant empty", s)
+ }
+
+ return stdout.String()
+}
+
+// tstring provides two strings, o (stdout) and e (stderr)
+type tstring struct {
+ o string
+ e string
+}
+
+func (t tstring) String() string {
+ return t.o + t.e
+}
+
+type pos struct {
+ line uint32
+ file uint8 // Artifact of plans to implement differencing instead of calling out to diff.
+}
+
+type nextHist struct {
+ f2i map[string]uint8
+ fs []string
+ ps []pos
+ texts []string
+ vars [][]string
+}
+
+func (h *nextHist) write(filename string) {
+ file, err := os.Create(filename)
+ if err != nil {
+ panic(fmt.Sprintf("Problem opening %s, error %v\n", filename, err))
+ }
+ defer file.Close()
+ var lastfile uint8
+ for i, x := range h.texts {
+ p := h.ps[i]
+ if lastfile != p.file {
+ fmt.Fprintf(file, " %s\n", h.fs[p.file-1])
+ lastfile = p.file
+ }
+ fmt.Fprintf(file, "%d:%s\n", p.line, x)
+ // TODO, normalize between gdb and dlv into a common, comparable format.
+ for _, y := range h.vars[i] {
+ y = strings.TrimSpace(y)
+ fmt.Fprintf(file, "%s\n", y)
+ }
+ }
+ file.Close()
+}
+
+func (h *nextHist) read(filename string) {
+ h.f2i = make(map[string]uint8)
+ bytes, err := ioutil.ReadFile(filename)
+ if err != nil {
+ panic(fmt.Sprintf("Problem reading %s, error %v\n", filename, err))
+ }
+ var lastfile string
+ lines := strings.Split(string(bytes), "\n")
+ for i, l := range lines {
+ if len(l) > 0 && l[0] != '#' {
+ if l[0] == ' ' {
+ // file -- first two characters expected to be " "
+ lastfile = strings.TrimSpace(l)
+ } else if numberColonRe.MatchString(l) {
+ // line number -- <number>:<line>
+ colonPos := strings.Index(l, ":")
+ if colonPos == -1 {
+ panic(fmt.Sprintf("Line %d (%s) in file %s expected to contain '<number>:' but does not.\n", i+1, l, filename))
+ }
+ h.add(lastfile, l[0:colonPos], l[colonPos+1:])
+ } else {
+ h.addVar(l)
+ }
+ }
+ }
+}
+
+// add appends file (name), line (number) and text (string) to the history,
+// provided that the file+line combo does not repeat the previous position,
+// and provided that the file is within the testdata directory. The return
+// value indicates whether the append occurred.
+func (h *nextHist) add(file, line, text string) bool {
+ // Only record source code in testdata unless the inlines flag is set
+ if !*inlines && !strings.Contains(file, "/testdata/") {
+ return false
+ }
+ fi := h.f2i[file]
+ if fi == 0 {
+ h.fs = append(h.fs, file)
+ fi = uint8(len(h.fs))
+ h.f2i[file] = fi
+ }
+
+ line = strings.TrimSpace(line)
+ var li int
+ var err error
+ if line != "" {
+ li, err = strconv.Atoi(line)
+ if err != nil {
+ panic(fmt.Sprintf("Non-numeric line: %s, error %v\n", line, err))
+ }
+ }
+ l := len(h.ps)
+ p := pos{line: uint32(li), file: fi}
+
+ if l == 0 || *repeats || h.ps[l-1] != p {
+ h.ps = append(h.ps, p)
+ h.texts = append(h.texts, text)
+ h.vars = append(h.vars, []string{})
+ return true
+ }
+ return false
+}
+
+func (h *nextHist) addVar(text string) {
+ l := len(h.texts)
+ h.vars[l-1] = append(h.vars[l-1], text)
+}
+
+func invertMapSU8(hf2i map[string]uint8) map[uint8]string {
+ hi2f := make(map[uint8]string)
+ for hs, i := range hf2i {
+ hi2f[i] = hs
+ }
+ return hi2f
+}
+
+func (h *nextHist) equals(k *nextHist) bool {
+ if len(h.f2i) != len(k.f2i) {
+ return false
+ }
+ if len(h.ps) != len(k.ps) {
+ return false
+ }
+ hi2f := invertMapSU8(h.f2i)
+ ki2f := invertMapSU8(k.f2i)
+
+ for i, hs := range hi2f {
+ if hs != ki2f[i] {
+ return false
+ }
+ }
+
+ for i, x := range h.ps {
+ if k.ps[i] != x {
+ return false
+ }
+ }
+
+ for i, hv := range h.vars {
+ kv := k.vars[i]
+ if len(hv) != len(kv) {
+ return false
+ }
+ for j, hvt := range hv {
+ if hvt != kv[j] {
+ return false
+ }
+ }
+ }
+
+ return true
+}
+
+// canonFileName strips everything before "/src/" from a filename.
+// This makes file names portable across different machines,
+// home directories, and temporary directories.
+func canonFileName(f string) string {
+ i := strings.Index(f, "/src/")
+ if i != -1 {
+ f = f[i+1:]
+ }
+ return f
+}
+
+/* Delve */
+
+type delveState struct {
+ cmd *exec.Cmd
+ tagg string
+ *ioState
+ atLineRe *regexp.Regexp // "\n =>"
+ funcFileLinePCre *regexp.Regexp // "^> ([^ ]+) ([^:]+):([0-9]+) .*[(]PC: (0x[a-z0-9]+)"
+ line string
+ file string
+ function string
+}
+
+func newDelve(tag, executable string, args ...string) dbgr {
+ cmd := exec.Command("dlv", "exec", executable)
+ cmd.Env = replaceEnv(cmd.Env, "TERM", "dumb")
+ if len(args) > 0 {
+ cmd.Args = append(cmd.Args, "--")
+ cmd.Args = append(cmd.Args, args...)
+ }
+ s := &delveState{tagg: tag, cmd: cmd}
+ // HAHA Delve has control characters embedded to change the color of the => and the line number
+ // that would be '(\\x1b\\[[0-9;]+m)?' OR TERM=dumb
+ s.atLineRe = regexp.MustCompile("\n=>[[:space:]]+[0-9]+:(.*)")
+ s.funcFileLinePCre = regexp.MustCompile("> ([^ ]+) ([^:]+):([0-9]+) .*[(]PC: (0x[a-z0-9]+)[)]\n")
+ s.ioState = newIoState(s.cmd)
+ return s
+}
+
+func (s *delveState) tag() string {
+ return s.tagg
+}
+
+func (s *delveState) stepnext(ss string) bool {
+ x := s.ioState.writeReadExpect(ss+"\n", "[(]dlv[)] ")
+ excerpts := s.atLineRe.FindStringSubmatch(x.o)
+ locations := s.funcFileLinePCre.FindStringSubmatch(x.o)
+ excerpt := ""
+ if len(excerpts) > 1 {
+ excerpt = excerpts[1]
+ }
+ if len(locations) > 0 {
+ fn := canonFileName(locations[2])
+ if *verbose {
+ if s.file != fn {
+ fmt.Printf("%s\n", locations[2]) // don't canonocalize verbose logging
+ }
+ fmt.Printf(" %s\n", locations[3])
+ }
+ s.line = locations[3]
+ s.file = fn
+ s.function = locations[1]
+ s.ioState.history.add(s.file, s.line, excerpt)
+ // TODO: here is where variable processing will be added. See gdbState.stepnext as a guide.
+ // Adding this may require some amount of normalization so that logs are comparable.
+ return true
+ }
+ if *verbose {
+ fmt.Printf("DID NOT MATCH EXPECTED NEXT OUTPUT\nO='%s'\nE='%s'\n", x.o, x.e)
+ }
+ return false
+}
+
+func (s *delveState) start() {
+ if *dryrun {
+ fmt.Printf("%s\n", asCommandLine("", s.cmd))
+ fmt.Printf("b main.test\n")
+ fmt.Printf("c\n")
+ return
+ }
+ err := s.cmd.Start()
+ if err != nil {
+ line := asCommandLine("", s.cmd)
+ panic(fmt.Sprintf("There was an error [start] running '%s', %v\n", line, err))
+ }
+ s.ioState.readExpecting(-1, 5000, "Type 'help' for list of commands.")
+ s.ioState.writeReadExpect("b main.test\n", "[(]dlv[)] ")
+ s.stepnext("c")
+}
+
+func (s *delveState) quit() {
+ expect("", s.ioState.writeRead("q\n"))
+}
+
+/* Gdb */
+
+type gdbState struct {
+ cmd *exec.Cmd
+ tagg string
+ args []string
+ *ioState
+ atLineRe *regexp.Regexp
+ funcFileLinePCre *regexp.Regexp
+ line string
+ file string
+ function string
+}
+
+func newGdb(tag, executable string, args ...string) dbgr {
+ // Turn off shell, necessary for Darwin apparently
+ cmd := exec.Command(gdb, "-nx",
+ "-iex", fmt.Sprintf("add-auto-load-safe-path %s/src/runtime", runtime.GOROOT()),
+ "-ex", "set startup-with-shell off", executable)
+ cmd.Env = replaceEnv(cmd.Env, "TERM", "dumb")
+ s := &gdbState{tagg: tag, cmd: cmd, args: args}
+ s.atLineRe = regexp.MustCompile("(^|\n)([0-9]+)(.*)")
+ s.funcFileLinePCre = regexp.MustCompile(
+ "([^ ]+) [(][^)]*[)][ \\t\\n]+at ([^:]+):([0-9]+)")
+ // runtime.main () at /Users/drchase/GoogleDrive/work/go/src/runtime/proc.go:201
+ // function file line
+ // Thread 2 hit Breakpoint 1, main.main () at /Users/drchase/GoogleDrive/work/debug/hist.go:18
+ s.ioState = newIoState(s.cmd)
+ return s
+}
+
+func (s *gdbState) tag() string {
+ return s.tagg
+}
+
+func (s *gdbState) start() {
+ run := "run"
+ for _, a := range s.args {
+ run += " " + a // Can't quote args for gdb, it will pass them through including the quotes
+ }
+ if *dryrun {
+ fmt.Printf("%s\n", asCommandLine("", s.cmd))
+ fmt.Printf("tbreak main.test\n")
+ fmt.Printf("%s\n", run)
+ return
+ }
+ err := s.cmd.Start()
+ if err != nil {
+ line := asCommandLine("", s.cmd)
+ panic(fmt.Sprintf("There was an error [start] running '%s', %v\n", line, err))
+ }
+ s.ioState.readSimpleExpecting("[(]gdb[)] ")
+ x := s.ioState.writeReadExpect("b main.test\n", "[(]gdb[)] ")
+ expect("Breakpoint [0-9]+ at", x)
+ s.stepnext(run)
+}
+
+func (s *gdbState) stepnext(ss string) bool {
+ x := s.ioState.writeReadExpect(ss+"\n", "[(]gdb[)] ")
+ excerpts := s.atLineRe.FindStringSubmatch(x.o)
+ locations := s.funcFileLinePCre.FindStringSubmatch(x.o)
+ excerpt := ""
+ addedLine := false
+ if len(excerpts) == 0 && len(locations) == 0 {
+ if *verbose {
+ fmt.Printf("DID NOT MATCH %s", x.o)
+ }
+ return false
+ }
+ if len(excerpts) > 0 {
+ excerpt = excerpts[3]
+ }
+ if len(locations) > 0 {
+ fn := canonFileName(locations[2])
+ if *verbose {
+ if s.file != fn {
+ fmt.Printf("%s\n", locations[2])
+ }
+ fmt.Printf(" %s\n", locations[3])
+ }
+ s.line = locations[3]
+ s.file = fn
+ s.function = locations[1]
+ addedLine = s.ioState.history.add(s.file, s.line, excerpt)
+ }
+ if len(excerpts) > 0 {
+ if *verbose {
+ fmt.Printf(" %s\n", excerpts[2])
+ }
+ s.line = excerpts[2]
+ addedLine = s.ioState.history.add(s.file, s.line, excerpt)
+ }
+
+ if !addedLine {
+ // True if this was a repeat line
+ return true
+ }
+ // Look for //gdb-<tag>=(v1,v2,v3) and print v1, v2, v3
+ vars := varsToPrint(excerpt, "//"+s.tag()+"=(")
+ for _, v := range vars {
+ response := printVariableAndNormalize(v, func(v string) string {
+ return s.ioState.writeReadExpect("p "+v+"\n", "[(]gdb[)] ").String()
+ })
+ s.ioState.history.addVar(response)
+ }
+ return true
+}
+
+// printVariableAndNormalize extracts any slash-indicated normalizing requests from the variable
+// name, then uses printer to get the value of the variable from the debugger, and then
+// normalizes and returns the response.
+func printVariableAndNormalize(v string, printer func(v string) string) string {
+ slashIndex := strings.Index(v, "/")
+ substitutions := ""
+ if slashIndex != -1 {
+ substitutions = v[slashIndex:]
+ v = v[:slashIndex]
+ }
+ response := printer(v)
+ // expect something like "$1 = ..."
+ dollar := strings.Index(response, "$")
+ cr := strings.Index(response, "\n")
+
+ if dollar == -1 { // some not entirely expected response, whine and carry on.
+ if cr == -1 {
+ response = strings.TrimSpace(response) // discards trailing newline
+ response = strings.Replace(response, "\n", "<BR>", -1)
+ return "$ Malformed response " + response
+ }
+ response = strings.TrimSpace(response[:cr])
+ return "$ " + response
+ }
+ if cr == -1 {
+ cr = len(response)
+ }
+ // Convert the leading $<number> into the variable name to enhance readability
+ // and reduce scope of diffs if an earlier print-variable is added.
+ response = strings.TrimSpace(response[dollar:cr])
+ response = leadingDollarNumberRe.ReplaceAllString(response, v)
+
+ // Normalize value as requested.
+ if strings.Contains(substitutions, "A") {
+ response = hexRe.ReplaceAllString(response, "<A>")
+ }
+ if strings.Contains(substitutions, "N") {
+ response = numRe.ReplaceAllString(response, "<N>")
+ }
+ if strings.Contains(substitutions, "S") {
+ response = stringRe.ReplaceAllString(response, "<S>")
+ }
+ if strings.Contains(substitutions, "O") {
+ response = optOutGdbRe.ReplaceAllString(response, "<Optimized out, as expected>")
+ }
+ return response
+}
+
+// varsToPrint takes a source code line, and extracts the comma-separated variable names
+// found between lookfor and the next ")".
+// For example, if line includes "... //gdb-foo=(v1,v2,v3)" and
+// lookfor="//gdb-foo=(", then varsToPrint returns ["v1", "v2", "v3"]
+func varsToPrint(line, lookfor string) []string {
+ var vars []string
+ if strings.Contains(line, lookfor) {
+ x := line[strings.Index(line, lookfor)+len(lookfor):]
+ end := strings.Index(x, ")")
+ if end == -1 {
+ panic(fmt.Sprintf("Saw variable list begin %s in %s but no closing ')'", lookfor, line))
+ }
+ vars = strings.Split(x[:end], ",")
+ for i, y := range vars {
+ vars[i] = strings.TrimSpace(y)
+ }
+ }
+ return vars
+}
+
+func (s *gdbState) quit() {
+ response := s.ioState.writeRead("q\n")
+ if strings.Contains(response.o, "Quit anyway? (y or n)") {
+ defer func() {
+ if r := recover(); r != nil {
+ if s, ok := r.(string); !(ok && strings.Contains(s, "'Y\n'")) {
+ // Not the panic that was expected.
+ fmt.Printf("Expected a broken pipe panic, but saw the following panic instead")
+ panic(r)
+ }
+ }
+ }()
+ s.ioState.writeRead("Y\n")
+ }
+}
+
+type ioState struct {
+ stdout io.ReadCloser
+ stderr io.ReadCloser
+ stdin io.WriteCloser
+ outChan chan string
+ errChan chan string
+ last tstring // Output of previous step
+ history *nextHist
+}
+
+func newIoState(cmd *exec.Cmd) *ioState {
+ var err error
+ s := &ioState{}
+ s.history = &nextHist{}
+ s.history.f2i = make(map[string]uint8)
+ s.stdout, err = cmd.StdoutPipe()
+ line := asCommandLine("", cmd)
+ if err != nil {
+ panic(fmt.Sprintf("There was an error [stdoutpipe] running '%s', %v\n", line, err))
+ }
+ s.stderr, err = cmd.StderrPipe()
+ if err != nil {
+ panic(fmt.Sprintf("There was an error [stdouterr] running '%s', %v\n", line, err))
+ }
+ s.stdin, err = cmd.StdinPipe()
+ if err != nil {
+ panic(fmt.Sprintf("There was an error [stdinpipe] running '%s', %v\n", line, err))
+ }
+
+ s.outChan = make(chan string, 1)
+ s.errChan = make(chan string, 1)
+ go func() {
+ buffer := make([]byte, 4096)
+ for {
+ n, err := s.stdout.Read(buffer)
+ if n > 0 {
+ s.outChan <- string(buffer[0:n])
+ }
+ if err == io.EOF || n == 0 {
+ break
+ }
+ if err != nil {
+ fmt.Printf("Saw an error forwarding stdout")
+ break
+ }
+ }
+ close(s.outChan)
+ s.stdout.Close()
+ }()
+
+ go func() {
+ buffer := make([]byte, 4096)
+ for {
+ n, err := s.stderr.Read(buffer)
+ if n > 0 {
+ s.errChan <- string(buffer[0:n])
+ }
+ if err == io.EOF || n == 0 {
+ break
+ }
+ if err != nil {
+ fmt.Printf("Saw an error forwarding stderr")
+ break
+ }
+ }
+ close(s.errChan)
+ s.stderr.Close()
+ }()
+ return s
+}
+
+func (s *ioState) hist() *nextHist {
+ return s.history
+}
+
+// writeRead writes ss, then reads stdout and stderr, waiting 500ms to
+// be sure all the output has appeared.
+func (s *ioState) writeRead(ss string) tstring {
+ if *verbose {
+ fmt.Printf("=> %s", ss)
+ }
+ _, err := io.WriteString(s.stdin, ss)
+ if err != nil {
+ panic(fmt.Sprintf("There was an error writing '%s', %v\n", ss, err))
+ }
+ return s.readExpecting(-1, 500, "")
+}
+
+// writeReadExpect writes ss, then reads stdout and stderr until something
+// that matches expectRE appears. expectRE should not be ""
+func (s *ioState) writeReadExpect(ss, expectRE string) tstring {
+ if *verbose {
+ fmt.Printf("=> %s", ss)
+ }
+ if expectRE == "" {
+ panic("expectRE should not be empty; use .* instead")
+ }
+ _, err := io.WriteString(s.stdin, ss)
+ if err != nil {
+ panic(fmt.Sprintf("There was an error writing '%s', %v\n", ss, err))
+ }
+ return s.readSimpleExpecting(expectRE)
+}
+
+func (s *ioState) readExpecting(millis, interlineTimeout int, expectedRE string) tstring {
+ timeout := time.Millisecond * time.Duration(millis)
+ interline := time.Millisecond * time.Duration(interlineTimeout)
+ s.last = tstring{}
+ var re *regexp.Regexp
+ if expectedRE != "" {
+ re = regexp.MustCompile(expectedRE)
+ }
+loop:
+ for {
+ var timer <-chan time.Time
+ if timeout > 0 {
+ timer = time.After(timeout)
+ }
+ select {
+ case x, ok := <-s.outChan:
+ if !ok {
+ s.outChan = nil
+ }
+ s.last.o += x
+ case x, ok := <-s.errChan:
+ if !ok {
+ s.errChan = nil
+ }
+ s.last.e += x
+ case <-timer:
+ break loop
+ }
+ if re != nil {
+ if re.MatchString(s.last.o) {
+ break
+ }
+ if re.MatchString(s.last.e) {
+ break
+ }
+ }
+ timeout = interline
+ }
+ if *verbose {
+ fmt.Printf("<= %s%s", s.last.o, s.last.e)
+ }
+ return s.last
+}
+
+func (s *ioState) readSimpleExpecting(expectedRE string) tstring {
+ s.last = tstring{}
+ var re *regexp.Regexp
+ if expectedRE != "" {
+ re = regexp.MustCompile(expectedRE)
+ }
+ for {
+ select {
+ case x, ok := <-s.outChan:
+ if !ok {
+ s.outChan = nil
+ }
+ s.last.o += x
+ case x, ok := <-s.errChan:
+ if !ok {
+ s.errChan = nil
+ }
+ s.last.e += x
+ }
+ if re != nil {
+ if re.MatchString(s.last.o) {
+ break
+ }
+ if re.MatchString(s.last.e) {
+ break
+ }
+ }
+ }
+ if *verbose {
+ fmt.Printf("<= %s%s", s.last.o, s.last.e)
+ }
+ return s.last
+}
+
+// replaceEnv returns a new environment derived from env
+// by removing any existing definition of ev and adding ev=evv.
+func replaceEnv(env []string, ev string, evv string) []string {
+ if env == nil {
+ env = os.Environ()
+ }
+ evplus := ev + "="
+ var found bool
+ for i, v := range env {
+ if strings.HasPrefix(v, evplus) {
+ found = true
+ env[i] = evplus + evv
+ }
+ }
+ if !found {
+ env = append(env, evplus+evv)
+ }
+ return env
+}
+
+// asCommandLine renders cmd as something that could be copy-and-pasted into a command line
+// If cwd is not empty and different from the command's directory, prepend an appropriate "cd"
+func asCommandLine(cwd string, cmd *exec.Cmd) string {
+ s := "("
+ if cmd.Dir != "" && cmd.Dir != cwd {
+ s += "cd" + escape(cmd.Dir) + ";"
+ }
+ for _, e := range cmd.Env {
+ if !strings.HasPrefix(e, "PATH=") &&
+ !strings.HasPrefix(e, "HOME=") &&
+ !strings.HasPrefix(e, "USER=") &&
+ !strings.HasPrefix(e, "SHELL=") {
+ s += escape(e)
+ }
+ }
+ for _, a := range cmd.Args {
+ s += escape(a)
+ }
+ s += " )"
+ return s
+}
+
+// escape inserts escapes appropriate for use in a shell command line
+func escape(s string) string {
+ s = strings.Replace(s, "\\", "\\\\", -1)
+ s = strings.Replace(s, "'", "\\'", -1)
+ // Conservative guess at characters that will force quoting
+ if strings.ContainsAny(s, "\\ ;#*&$~?!|[]()<>{}`") {
+ s = " '" + s + "'"
+ } else {
+ s = " " + s
+ }
+ return s
+}
+
+func expect(want string, got tstring) {
+ if want != "" {
+ match, err := regexp.MatchString(want, got.o)
+ if err != nil {
+ panic(fmt.Sprintf("Error for regexp %s, %v\n", want, err))
+ }
+ if match {
+ return
+ }
+ // Ignore error as we have already checked for it before
+ match, _ = regexp.MatchString(want, got.e)
+ if match {
+ return
+ }
+ fmt.Printf("EXPECTED '%s'\n GOT O='%s'\nAND E='%s'\n", want, got.o, got.e)
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/decompose.go b/src/cmd/compile/internal/ssa/decompose.go
new file mode 100644
index 0000000..753d69c
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/decompose.go
@@ -0,0 +1,479 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/compile/internal/types"
+ "sort"
+)
+
+// decompose converts phi ops on compound builtin types into phi
+// ops on simple types, then invokes rewrite rules to decompose
+// other ops on those types.
+func decomposeBuiltIn(f *Func) {
+ // Decompose phis
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ if v.Op != OpPhi {
+ continue
+ }
+ decomposeBuiltInPhi(v)
+ }
+ }
+
+ // Decompose other values
+ // Note: Leave dead values because we need to keep the original
+ // values around so the name component resolution below can still work.
+ applyRewrite(f, rewriteBlockdec, rewriteValuedec, leaveDeadValues)
+ if f.Config.RegSize == 4 {
+ applyRewrite(f, rewriteBlockdec64, rewriteValuedec64, leaveDeadValues)
+ }
+
+ // Split up named values into their components.
+ // accumulate old names for aggregates (that are decomposed) in toDelete for efficient bulk deletion,
+ // accumulate new LocalSlots in newNames for addition after the iteration. This decomposition is for
+ // builtin types with leaf components, and thus there is no need to reprocess the newly create LocalSlots.
+ var toDelete []namedVal
+ var newNames []*LocalSlot
+ for i, name := range f.Names {
+ t := name.Type
+ switch {
+ case t.IsInteger() && t.Size() > f.Config.RegSize:
+ hiName, loName := f.SplitInt64(name)
+ newNames = maybeAppend2(f, newNames, hiName, loName)
+ for j, v := range f.NamedValues[*name] {
+ if v.Op != OpInt64Make {
+ continue
+ }
+ f.NamedValues[*hiName] = append(f.NamedValues[*hiName], v.Args[0])
+ f.NamedValues[*loName] = append(f.NamedValues[*loName], v.Args[1])
+ toDelete = append(toDelete, namedVal{i, j})
+ }
+ case t.IsComplex():
+ rName, iName := f.SplitComplex(name)
+ newNames = maybeAppend2(f, newNames, rName, iName)
+ for j, v := range f.NamedValues[*name] {
+ if v.Op != OpComplexMake {
+ continue
+ }
+ f.NamedValues[*rName] = append(f.NamedValues[*rName], v.Args[0])
+ f.NamedValues[*iName] = append(f.NamedValues[*iName], v.Args[1])
+ toDelete = append(toDelete, namedVal{i, j})
+ }
+ case t.IsString():
+ ptrName, lenName := f.SplitString(name)
+ newNames = maybeAppend2(f, newNames, ptrName, lenName)
+ for j, v := range f.NamedValues[*name] {
+ if v.Op != OpStringMake {
+ continue
+ }
+ f.NamedValues[*ptrName] = append(f.NamedValues[*ptrName], v.Args[0])
+ f.NamedValues[*lenName] = append(f.NamedValues[*lenName], v.Args[1])
+ toDelete = append(toDelete, namedVal{i, j})
+ }
+ case t.IsSlice():
+ ptrName, lenName, capName := f.SplitSlice(name)
+ newNames = maybeAppend2(f, newNames, ptrName, lenName)
+ newNames = maybeAppend(f, newNames, capName)
+ for j, v := range f.NamedValues[*name] {
+ if v.Op != OpSliceMake {
+ continue
+ }
+ f.NamedValues[*ptrName] = append(f.NamedValues[*ptrName], v.Args[0])
+ f.NamedValues[*lenName] = append(f.NamedValues[*lenName], v.Args[1])
+ f.NamedValues[*capName] = append(f.NamedValues[*capName], v.Args[2])
+ toDelete = append(toDelete, namedVal{i, j})
+ }
+ case t.IsInterface():
+ typeName, dataName := f.SplitInterface(name)
+ newNames = maybeAppend2(f, newNames, typeName, dataName)
+ for j, v := range f.NamedValues[*name] {
+ if v.Op != OpIMake {
+ continue
+ }
+ f.NamedValues[*typeName] = append(f.NamedValues[*typeName], v.Args[0])
+ f.NamedValues[*dataName] = append(f.NamedValues[*dataName], v.Args[1])
+ toDelete = append(toDelete, namedVal{i, j})
+ }
+ case t.IsFloat():
+ // floats are never decomposed, even ones bigger than RegSize
+ case t.Size() > f.Config.RegSize:
+ f.Fatalf("undecomposed named type %s %v", name, t)
+ }
+ }
+
+ deleteNamedVals(f, toDelete)
+ f.Names = append(f.Names, newNames...)
+}
+
+func maybeAppend(f *Func, ss []*LocalSlot, s *LocalSlot) []*LocalSlot {
+ if _, ok := f.NamedValues[*s]; !ok {
+ f.NamedValues[*s] = nil
+ return append(ss, s)
+ }
+ return ss
+}
+
+func maybeAppend2(f *Func, ss []*LocalSlot, s1, s2 *LocalSlot) []*LocalSlot {
+ return maybeAppend(f, maybeAppend(f, ss, s1), s2)
+}
+
+func decomposeBuiltInPhi(v *Value) {
+ switch {
+ case v.Type.IsInteger() && v.Type.Size() > v.Block.Func.Config.RegSize:
+ decomposeInt64Phi(v)
+ case v.Type.IsComplex():
+ decomposeComplexPhi(v)
+ case v.Type.IsString():
+ decomposeStringPhi(v)
+ case v.Type.IsSlice():
+ decomposeSlicePhi(v)
+ case v.Type.IsInterface():
+ decomposeInterfacePhi(v)
+ case v.Type.IsFloat():
+ // floats are never decomposed, even ones bigger than RegSize
+ case v.Type.Size() > v.Block.Func.Config.RegSize:
+ v.Fatalf("undecomposed type %s", v.Type)
+ }
+}
+
+func decomposeStringPhi(v *Value) {
+ types := &v.Block.Func.Config.Types
+ ptrType := types.BytePtr
+ lenType := types.Int
+
+ ptr := v.Block.NewValue0(v.Pos, OpPhi, ptrType)
+ len := v.Block.NewValue0(v.Pos, OpPhi, lenType)
+ for _, a := range v.Args {
+ ptr.AddArg(a.Block.NewValue1(v.Pos, OpStringPtr, ptrType, a))
+ len.AddArg(a.Block.NewValue1(v.Pos, OpStringLen, lenType, a))
+ }
+ v.reset(OpStringMake)
+ v.AddArg(ptr)
+ v.AddArg(len)
+}
+
+func decomposeSlicePhi(v *Value) {
+ types := &v.Block.Func.Config.Types
+ ptrType := v.Type.Elem().PtrTo()
+ lenType := types.Int
+
+ ptr := v.Block.NewValue0(v.Pos, OpPhi, ptrType)
+ len := v.Block.NewValue0(v.Pos, OpPhi, lenType)
+ cap := v.Block.NewValue0(v.Pos, OpPhi, lenType)
+ for _, a := range v.Args {
+ ptr.AddArg(a.Block.NewValue1(v.Pos, OpSlicePtr, ptrType, a))
+ len.AddArg(a.Block.NewValue1(v.Pos, OpSliceLen, lenType, a))
+ cap.AddArg(a.Block.NewValue1(v.Pos, OpSliceCap, lenType, a))
+ }
+ v.reset(OpSliceMake)
+ v.AddArg(ptr)
+ v.AddArg(len)
+ v.AddArg(cap)
+}
+
+func decomposeInt64Phi(v *Value) {
+ cfgtypes := &v.Block.Func.Config.Types
+ var partType *types.Type
+ if v.Type.IsSigned() {
+ partType = cfgtypes.Int32
+ } else {
+ partType = cfgtypes.UInt32
+ }
+
+ hi := v.Block.NewValue0(v.Pos, OpPhi, partType)
+ lo := v.Block.NewValue0(v.Pos, OpPhi, cfgtypes.UInt32)
+ for _, a := range v.Args {
+ hi.AddArg(a.Block.NewValue1(v.Pos, OpInt64Hi, partType, a))
+ lo.AddArg(a.Block.NewValue1(v.Pos, OpInt64Lo, cfgtypes.UInt32, a))
+ }
+ v.reset(OpInt64Make)
+ v.AddArg(hi)
+ v.AddArg(lo)
+}
+
+func decomposeComplexPhi(v *Value) {
+ cfgtypes := &v.Block.Func.Config.Types
+ var partType *types.Type
+ switch z := v.Type.Size(); z {
+ case 8:
+ partType = cfgtypes.Float32
+ case 16:
+ partType = cfgtypes.Float64
+ default:
+ v.Fatalf("decomposeComplexPhi: bad complex size %d", z)
+ }
+
+ real := v.Block.NewValue0(v.Pos, OpPhi, partType)
+ imag := v.Block.NewValue0(v.Pos, OpPhi, partType)
+ for _, a := range v.Args {
+ real.AddArg(a.Block.NewValue1(v.Pos, OpComplexReal, partType, a))
+ imag.AddArg(a.Block.NewValue1(v.Pos, OpComplexImag, partType, a))
+ }
+ v.reset(OpComplexMake)
+ v.AddArg(real)
+ v.AddArg(imag)
+}
+
+func decomposeInterfacePhi(v *Value) {
+ uintptrType := v.Block.Func.Config.Types.Uintptr
+ ptrType := v.Block.Func.Config.Types.BytePtr
+
+ itab := v.Block.NewValue0(v.Pos, OpPhi, uintptrType)
+ data := v.Block.NewValue0(v.Pos, OpPhi, ptrType)
+ for _, a := range v.Args {
+ itab.AddArg(a.Block.NewValue1(v.Pos, OpITab, uintptrType, a))
+ data.AddArg(a.Block.NewValue1(v.Pos, OpIData, ptrType, a))
+ }
+ v.reset(OpIMake)
+ v.AddArg(itab)
+ v.AddArg(data)
+}
+
+func decomposeUser(f *Func) {
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ if v.Op != OpPhi {
+ continue
+ }
+ decomposeUserPhi(v)
+ }
+ }
+ // Split up named values into their components.
+ i := 0
+ var newNames []*LocalSlot
+ for _, name := range f.Names {
+ t := name.Type
+ switch {
+ case t.IsStruct():
+ newNames = decomposeUserStructInto(f, name, newNames)
+ case t.IsArray():
+ newNames = decomposeUserArrayInto(f, name, newNames)
+ default:
+ f.Names[i] = name
+ i++
+ }
+ }
+ f.Names = f.Names[:i]
+ f.Names = append(f.Names, newNames...)
+}
+
+// decomposeUserArrayInto creates names for the element(s) of arrays referenced
+// by name where possible, and appends those new names to slots, which is then
+// returned.
+func decomposeUserArrayInto(f *Func, name *LocalSlot, slots []*LocalSlot) []*LocalSlot {
+ t := name.Type
+ if t.NumElem() == 0 {
+ // TODO(khr): Not sure what to do here. Probably nothing.
+ // Names for empty arrays aren't important.
+ return slots
+ }
+ if t.NumElem() != 1 {
+ // shouldn't get here due to CanSSA
+ f.Fatalf("array not of size 1")
+ }
+ elemName := f.SplitArray(name)
+ var keep []*Value
+ for _, v := range f.NamedValues[*name] {
+ if v.Op != OpArrayMake1 {
+ keep = append(keep, v)
+ continue
+ }
+ f.NamedValues[*elemName] = append(f.NamedValues[*elemName], v.Args[0])
+ }
+ if len(keep) == 0 {
+ // delete the name for the array as a whole
+ delete(f.NamedValues, *name)
+ } else {
+ f.NamedValues[*name] = keep
+ }
+
+ if t.Elem().IsArray() {
+ return decomposeUserArrayInto(f, elemName, slots)
+ } else if t.Elem().IsStruct() {
+ return decomposeUserStructInto(f, elemName, slots)
+ }
+
+ return append(slots, elemName)
+}
+
+// decomposeUserStructInto creates names for the fields(s) of structs referenced
+// by name where possible, and appends those new names to slots, which is then
+// returned.
+func decomposeUserStructInto(f *Func, name *LocalSlot, slots []*LocalSlot) []*LocalSlot {
+ fnames := []*LocalSlot{} // slots for struct in name
+ t := name.Type
+ n := t.NumFields()
+
+ for i := 0; i < n; i++ {
+ fs := f.SplitStruct(name, i)
+ fnames = append(fnames, fs)
+ // arrays and structs will be decomposed further, so
+ // there's no need to record a name
+ if !fs.Type.IsArray() && !fs.Type.IsStruct() {
+ slots = maybeAppend(f, slots, fs)
+ }
+ }
+
+ makeOp := StructMakeOp(n)
+ var keep []*Value
+ // create named values for each struct field
+ for _, v := range f.NamedValues[*name] {
+ if v.Op != makeOp {
+ keep = append(keep, v)
+ continue
+ }
+ for i := 0; i < len(fnames); i++ {
+ f.NamedValues[*fnames[i]] = append(f.NamedValues[*fnames[i]], v.Args[i])
+ }
+ }
+ if len(keep) == 0 {
+ // delete the name for the struct as a whole
+ delete(f.NamedValues, *name)
+ } else {
+ f.NamedValues[*name] = keep
+ }
+
+ // now that this f.NamedValues contains values for the struct
+ // fields, recurse into nested structs
+ for i := 0; i < n; i++ {
+ if name.Type.FieldType(i).IsStruct() {
+ slots = decomposeUserStructInto(f, fnames[i], slots)
+ delete(f.NamedValues, *fnames[i])
+ } else if name.Type.FieldType(i).IsArray() {
+ slots = decomposeUserArrayInto(f, fnames[i], slots)
+ delete(f.NamedValues, *fnames[i])
+ }
+ }
+ return slots
+}
+func decomposeUserPhi(v *Value) {
+ switch {
+ case v.Type.IsStruct():
+ decomposeStructPhi(v)
+ case v.Type.IsArray():
+ decomposeArrayPhi(v)
+ }
+}
+
+// decomposeStructPhi replaces phi-of-struct with structmake(phi-for-each-field),
+// and then recursively decomposes the phis for each field.
+func decomposeStructPhi(v *Value) {
+ t := v.Type
+ n := t.NumFields()
+ var fields [MaxStruct]*Value
+ for i := 0; i < n; i++ {
+ fields[i] = v.Block.NewValue0(v.Pos, OpPhi, t.FieldType(i))
+ }
+ for _, a := range v.Args {
+ for i := 0; i < n; i++ {
+ fields[i].AddArg(a.Block.NewValue1I(v.Pos, OpStructSelect, t.FieldType(i), int64(i), a))
+ }
+ }
+ v.reset(StructMakeOp(n))
+ v.AddArgs(fields[:n]...)
+
+ // Recursively decompose phis for each field.
+ for _, f := range fields[:n] {
+ decomposeUserPhi(f)
+ }
+}
+
+// decomposeArrayPhi replaces phi-of-array with arraymake(phi-of-array-element),
+// and then recursively decomposes the element phi.
+func decomposeArrayPhi(v *Value) {
+ t := v.Type
+ if t.NumElem() == 0 {
+ v.reset(OpArrayMake0)
+ return
+ }
+ if t.NumElem() != 1 {
+ v.Fatalf("SSAable array must have no more than 1 element")
+ }
+ elem := v.Block.NewValue0(v.Pos, OpPhi, t.Elem())
+ for _, a := range v.Args {
+ elem.AddArg(a.Block.NewValue1I(v.Pos, OpArraySelect, t.Elem(), 0, a))
+ }
+ v.reset(OpArrayMake1)
+ v.AddArg(elem)
+
+ // Recursively decompose elem phi.
+ decomposeUserPhi(elem)
+}
+
+// MaxStruct is the maximum number of fields a struct
+// can have and still be SSAable.
+const MaxStruct = 4
+
+// StructMakeOp returns the opcode to construct a struct with the
+// given number of fields.
+func StructMakeOp(nf int) Op {
+ switch nf {
+ case 0:
+ return OpStructMake0
+ case 1:
+ return OpStructMake1
+ case 2:
+ return OpStructMake2
+ case 3:
+ return OpStructMake3
+ case 4:
+ return OpStructMake4
+ }
+ panic("too many fields in an SSAable struct")
+}
+
+type namedVal struct {
+ locIndex, valIndex int // f.NamedValues[f.Names[locIndex]][valIndex] = key
+}
+
+// deleteNamedVals removes particular values with debugger names from f's naming data structures,
+// removes all values with OpInvalid, and re-sorts the list of Names.
+func deleteNamedVals(f *Func, toDelete []namedVal) {
+ // Arrange to delete from larger indices to smaller, to ensure swap-with-end deletion does not invalidate pending indices.
+ sort.Slice(toDelete, func(i, j int) bool {
+ if toDelete[i].locIndex != toDelete[j].locIndex {
+ return toDelete[i].locIndex > toDelete[j].locIndex
+ }
+ return toDelete[i].valIndex > toDelete[j].valIndex
+
+ })
+
+ // Get rid of obsolete names
+ for _, d := range toDelete {
+ loc := f.Names[d.locIndex]
+ vals := f.NamedValues[*loc]
+ l := len(vals) - 1
+ if l > 0 {
+ vals[d.valIndex] = vals[l]
+ }
+ vals[l] = nil
+ f.NamedValues[*loc] = vals[:l]
+ }
+ // Delete locations with no values attached.
+ end := len(f.Names)
+ for i := len(f.Names) - 1; i >= 0; i-- {
+ loc := f.Names[i]
+ vals := f.NamedValues[*loc]
+ last := len(vals)
+ for j := len(vals) - 1; j >= 0; j-- {
+ if vals[j].Op == OpInvalid {
+ last--
+ vals[j] = vals[last]
+ vals[last] = nil
+ }
+ }
+ if last < len(vals) {
+ f.NamedValues[*loc] = vals[:last]
+ }
+ if len(vals) == 0 {
+ delete(f.NamedValues, *loc)
+ end--
+ f.Names[i] = f.Names[end]
+ f.Names[end] = nil
+ }
+ }
+ f.Names = f.Names[:end]
+}
diff --git a/src/cmd/compile/internal/ssa/dom.go b/src/cmd/compile/internal/ssa/dom.go
new file mode 100644
index 0000000..f31e7df
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/dom.go
@@ -0,0 +1,302 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// This file contains code to compute the dominator tree
+// of a control-flow graph.
+
+// postorder computes a postorder traversal ordering for the
+// basic blocks in f. Unreachable blocks will not appear.
+func postorder(f *Func) []*Block {
+ return postorderWithNumbering(f, nil)
+}
+
+type blockAndIndex struct {
+ b *Block
+ index int // index is the number of successor edges of b that have already been explored.
+}
+
+// postorderWithNumbering provides a DFS postordering.
+// This seems to make loop-finding more robust.
+func postorderWithNumbering(f *Func, ponums []int32) []*Block {
+ seen := make([]bool, f.NumBlocks())
+
+ // result ordering
+ order := make([]*Block, 0, len(f.Blocks))
+
+ // stack of blocks and next child to visit
+ // A constant bound allows this to be stack-allocated. 32 is
+ // enough to cover almost every postorderWithNumbering call.
+ s := make([]blockAndIndex, 0, 32)
+ s = append(s, blockAndIndex{b: f.Entry})
+ seen[f.Entry.ID] = true
+ for len(s) > 0 {
+ tos := len(s) - 1
+ x := s[tos]
+ b := x.b
+ if i := x.index; i < len(b.Succs) {
+ s[tos].index++
+ bb := b.Succs[i].Block()
+ if !seen[bb.ID] {
+ seen[bb.ID] = true
+ s = append(s, blockAndIndex{b: bb})
+ }
+ continue
+ }
+ s = s[:tos]
+ if ponums != nil {
+ ponums[b.ID] = int32(len(order))
+ }
+ order = append(order, b)
+ }
+ return order
+}
+
+type linkedBlocks func(*Block) []Edge
+
+const nscratchslices = 7
+
+// experimentally, functions with 512 or fewer blocks account
+// for 75% of memory (size) allocation for dominator computation
+// in make.bash.
+const minscratchblocks = 512
+
+func (cache *Cache) scratchBlocksForDom(maxBlockID int) (a, b, c, d, e, f, g []ID) {
+ tot := maxBlockID * nscratchslices
+ scratch := cache.domblockstore
+ if len(scratch) < tot {
+ // req = min(1.5*tot, nscratchslices*minscratchblocks)
+ // 50% padding allows for graph growth in later phases.
+ req := (tot * 3) >> 1
+ if req < nscratchslices*minscratchblocks {
+ req = nscratchslices * minscratchblocks
+ }
+ scratch = make([]ID, req)
+ cache.domblockstore = scratch
+ } else {
+ // Clear as much of scratch as we will (re)use
+ scratch = scratch[0:tot]
+ for i := range scratch {
+ scratch[i] = 0
+ }
+ }
+
+ a = scratch[0*maxBlockID : 1*maxBlockID]
+ b = scratch[1*maxBlockID : 2*maxBlockID]
+ c = scratch[2*maxBlockID : 3*maxBlockID]
+ d = scratch[3*maxBlockID : 4*maxBlockID]
+ e = scratch[4*maxBlockID : 5*maxBlockID]
+ f = scratch[5*maxBlockID : 6*maxBlockID]
+ g = scratch[6*maxBlockID : 7*maxBlockID]
+
+ return
+}
+
+func dominators(f *Func) []*Block {
+ preds := func(b *Block) []Edge { return b.Preds }
+ succs := func(b *Block) []Edge { return b.Succs }
+
+ //TODO: benchmark and try to find criteria for swapping between
+ // dominatorsSimple and dominatorsLT
+ return f.dominatorsLTOrig(f.Entry, preds, succs)
+}
+
+// dominatorsLTOrig runs Lengauer-Tarjan to compute a dominator tree starting at
+// entry and using predFn/succFn to find predecessors/successors to allow
+// computing both dominator and post-dominator trees.
+func (f *Func) dominatorsLTOrig(entry *Block, predFn linkedBlocks, succFn linkedBlocks) []*Block {
+ // Adapted directly from the original TOPLAS article's "simple" algorithm
+
+ maxBlockID := entry.Func.NumBlocks()
+ semi, vertex, label, parent, ancestor, bucketHead, bucketLink := f.Cache.scratchBlocksForDom(maxBlockID)
+
+ // This version uses integers for most of the computation,
+ // to make the work arrays smaller and pointer-free.
+ // fromID translates from ID to *Block where that is needed.
+ fromID := make([]*Block, maxBlockID)
+ for _, v := range f.Blocks {
+ fromID[v.ID] = v
+ }
+ idom := make([]*Block, maxBlockID)
+
+ // Step 1. Carry out a depth first search of the problem graph. Number
+ // the vertices from 1 to n as they are reached during the search.
+ n := f.dfsOrig(entry, succFn, semi, vertex, label, parent)
+
+ for i := n; i >= 2; i-- {
+ w := vertex[i]
+
+ // step2 in TOPLAS paper
+ for _, e := range predFn(fromID[w]) {
+ v := e.b
+ if semi[v.ID] == 0 {
+ // skip unreachable predecessor
+ // not in original, but we're using existing pred instead of building one.
+ continue
+ }
+ u := evalOrig(v.ID, ancestor, semi, label)
+ if semi[u] < semi[w] {
+ semi[w] = semi[u]
+ }
+ }
+
+ // add w to bucket[vertex[semi[w]]]
+ // implement bucket as a linked list implemented
+ // in a pair of arrays.
+ vsw := vertex[semi[w]]
+ bucketLink[w] = bucketHead[vsw]
+ bucketHead[vsw] = w
+
+ linkOrig(parent[w], w, ancestor)
+
+ // step3 in TOPLAS paper
+ for v := bucketHead[parent[w]]; v != 0; v = bucketLink[v] {
+ u := evalOrig(v, ancestor, semi, label)
+ if semi[u] < semi[v] {
+ idom[v] = fromID[u]
+ } else {
+ idom[v] = fromID[parent[w]]
+ }
+ }
+ }
+ // step 4 in toplas paper
+ for i := ID(2); i <= n; i++ {
+ w := vertex[i]
+ if idom[w].ID != vertex[semi[w]] {
+ idom[w] = idom[idom[w].ID]
+ }
+ }
+
+ return idom
+}
+
+// dfs performs a depth first search over the blocks starting at entry block
+// (in arbitrary order). This is a de-recursed version of dfs from the
+// original Tarjan-Lengauer TOPLAS article. It's important to return the
+// same values for parent as the original algorithm.
+func (f *Func) dfsOrig(entry *Block, succFn linkedBlocks, semi, vertex, label, parent []ID) ID {
+ n := ID(0)
+ s := make([]*Block, 0, 256)
+ s = append(s, entry)
+
+ for len(s) > 0 {
+ v := s[len(s)-1]
+ s = s[:len(s)-1]
+ // recursing on v
+
+ if semi[v.ID] != 0 {
+ continue // already visited
+ }
+ n++
+ semi[v.ID] = n
+ vertex[n] = v.ID
+ label[v.ID] = v.ID
+ // ancestor[v] already zero
+ for _, e := range succFn(v) {
+ w := e.b
+ // if it has a dfnum, we've already visited it
+ if semi[w.ID] == 0 {
+ // yes, w can be pushed multiple times.
+ s = append(s, w)
+ parent[w.ID] = v.ID // keep overwriting this till it is visited.
+ }
+ }
+ }
+ return n
+}
+
+// compressOrig is the "simple" compress function from LT paper
+func compressOrig(v ID, ancestor, semi, label []ID) {
+ if ancestor[ancestor[v]] != 0 {
+ compressOrig(ancestor[v], ancestor, semi, label)
+ if semi[label[ancestor[v]]] < semi[label[v]] {
+ label[v] = label[ancestor[v]]
+ }
+ ancestor[v] = ancestor[ancestor[v]]
+ }
+}
+
+// evalOrig is the "simple" eval function from LT paper
+func evalOrig(v ID, ancestor, semi, label []ID) ID {
+ if ancestor[v] == 0 {
+ return v
+ }
+ compressOrig(v, ancestor, semi, label)
+ return label[v]
+}
+
+func linkOrig(v, w ID, ancestor []ID) {
+ ancestor[w] = v
+}
+
+// dominators computes the dominator tree for f. It returns a slice
+// which maps block ID to the immediate dominator of that block.
+// Unreachable blocks map to nil. The entry block maps to nil.
+func dominatorsSimple(f *Func) []*Block {
+ // A simple algorithm for now
+ // Cooper, Harvey, Kennedy
+ idom := make([]*Block, f.NumBlocks())
+
+ // Compute postorder walk
+ post := f.postorder()
+
+ // Make map from block id to order index (for intersect call)
+ postnum := make([]int, f.NumBlocks())
+ for i, b := range post {
+ postnum[b.ID] = i
+ }
+
+ // Make the entry block a self-loop
+ idom[f.Entry.ID] = f.Entry
+ if postnum[f.Entry.ID] != len(post)-1 {
+ f.Fatalf("entry block %v not last in postorder", f.Entry)
+ }
+
+ // Compute relaxation of idom entries
+ for {
+ changed := false
+
+ for i := len(post) - 2; i >= 0; i-- {
+ b := post[i]
+ var d *Block
+ for _, e := range b.Preds {
+ p := e.b
+ if idom[p.ID] == nil {
+ continue
+ }
+ if d == nil {
+ d = p
+ continue
+ }
+ d = intersect(d, p, postnum, idom)
+ }
+ if d != idom[b.ID] {
+ idom[b.ID] = d
+ changed = true
+ }
+ }
+ if !changed {
+ break
+ }
+ }
+ // Set idom of entry block to nil instead of itself.
+ idom[f.Entry.ID] = nil
+ return idom
+}
+
+// intersect finds the closest dominator of both b and c.
+// It requires a postorder numbering of all the blocks.
+func intersect(b, c *Block, postnum []int, idom []*Block) *Block {
+ // TODO: This loop is O(n^2). It used to be used in nilcheck,
+ // see BenchmarkNilCheckDeep*.
+ for b != c {
+ if postnum[b.ID] < postnum[c.ID] {
+ b = idom[b.ID]
+ } else {
+ c = idom[c.ID]
+ }
+ }
+ return b
+}
diff --git a/src/cmd/compile/internal/ssa/dom_test.go b/src/cmd/compile/internal/ssa/dom_test.go
new file mode 100644
index 0000000..fa51718
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/dom_test.go
@@ -0,0 +1,608 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/compile/internal/types"
+ "testing"
+)
+
+func BenchmarkDominatorsLinear(b *testing.B) { benchmarkDominators(b, 10000, genLinear) }
+func BenchmarkDominatorsFwdBack(b *testing.B) { benchmarkDominators(b, 10000, genFwdBack) }
+func BenchmarkDominatorsManyPred(b *testing.B) { benchmarkDominators(b, 10000, genManyPred) }
+func BenchmarkDominatorsMaxPred(b *testing.B) { benchmarkDominators(b, 10000, genMaxPred) }
+func BenchmarkDominatorsMaxPredVal(b *testing.B) { benchmarkDominators(b, 10000, genMaxPredValue) }
+
+type blockGen func(size int) []bloc
+
+// genLinear creates an array of blocks that succeed one another
+// b_n -> [b_n+1].
+func genLinear(size int) []bloc {
+ var blocs []bloc
+ blocs = append(blocs,
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Goto(blockn(0)),
+ ),
+ )
+ for i := 0; i < size; i++ {
+ blocs = append(blocs, Bloc(blockn(i),
+ Goto(blockn(i+1))))
+ }
+
+ blocs = append(blocs,
+ Bloc(blockn(size), Goto("exit")),
+ Bloc("exit", Exit("mem")),
+ )
+
+ return blocs
+}
+
+// genLinear creates an array of blocks that alternate between
+// b_n -> [b_n+1], b_n -> [b_n+1, b_n-1] , b_n -> [b_n+1, b_n+2]
+func genFwdBack(size int) []bloc {
+ var blocs []bloc
+ blocs = append(blocs,
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("p", OpConstBool, types.Types[types.TBOOL], 1, nil),
+ Goto(blockn(0)),
+ ),
+ )
+ for i := 0; i < size; i++ {
+ switch i % 2 {
+ case 0:
+ blocs = append(blocs, Bloc(blockn(i),
+ If("p", blockn(i+1), blockn(i+2))))
+ case 1:
+ blocs = append(blocs, Bloc(blockn(i),
+ If("p", blockn(i+1), blockn(i-1))))
+ }
+ }
+
+ blocs = append(blocs,
+ Bloc(blockn(size), Goto("exit")),
+ Bloc("exit", Exit("mem")),
+ )
+
+ return blocs
+}
+
+// genManyPred creates an array of blocks where 1/3rd have a successor of the
+// first block, 1/3rd the last block, and the remaining third are plain.
+func genManyPred(size int) []bloc {
+ var blocs []bloc
+ blocs = append(blocs,
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("p", OpConstBool, types.Types[types.TBOOL], 1, nil),
+ Goto(blockn(0)),
+ ),
+ )
+
+ // We want predecessor lists to be long, so 2/3rds of the blocks have a
+ // successor of the first or last block.
+ for i := 0; i < size; i++ {
+ switch i % 3 {
+ case 0:
+ blocs = append(blocs, Bloc(blockn(i),
+ Valu("a", OpConstBool, types.Types[types.TBOOL], 1, nil),
+ Goto(blockn(i+1))))
+ case 1:
+ blocs = append(blocs, Bloc(blockn(i),
+ Valu("a", OpConstBool, types.Types[types.TBOOL], 1, nil),
+ If("p", blockn(i+1), blockn(0))))
+ case 2:
+ blocs = append(blocs, Bloc(blockn(i),
+ Valu("a", OpConstBool, types.Types[types.TBOOL], 1, nil),
+ If("p", blockn(i+1), blockn(size))))
+ }
+ }
+
+ blocs = append(blocs,
+ Bloc(blockn(size), Goto("exit")),
+ Bloc("exit", Exit("mem")),
+ )
+
+ return blocs
+}
+
+// genMaxPred maximizes the size of the 'exit' predecessor list.
+func genMaxPred(size int) []bloc {
+ var blocs []bloc
+ blocs = append(blocs,
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("p", OpConstBool, types.Types[types.TBOOL], 1, nil),
+ Goto(blockn(0)),
+ ),
+ )
+
+ for i := 0; i < size; i++ {
+ blocs = append(blocs, Bloc(blockn(i),
+ If("p", blockn(i+1), "exit")))
+ }
+
+ blocs = append(blocs,
+ Bloc(blockn(size), Goto("exit")),
+ Bloc("exit", Exit("mem")),
+ )
+
+ return blocs
+}
+
+// genMaxPredValue is identical to genMaxPred but contains an
+// additional value.
+func genMaxPredValue(size int) []bloc {
+ var blocs []bloc
+ blocs = append(blocs,
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("p", OpConstBool, types.Types[types.TBOOL], 1, nil),
+ Goto(blockn(0)),
+ ),
+ )
+
+ for i := 0; i < size; i++ {
+ blocs = append(blocs, Bloc(blockn(i),
+ Valu("a", OpConstBool, types.Types[types.TBOOL], 1, nil),
+ If("p", blockn(i+1), "exit")))
+ }
+
+ blocs = append(blocs,
+ Bloc(blockn(size), Goto("exit")),
+ Bloc("exit", Exit("mem")),
+ )
+
+ return blocs
+}
+
+// sink for benchmark
+var domBenchRes []*Block
+
+func benchmarkDominators(b *testing.B, size int, bg blockGen) {
+ c := testConfig(b)
+ fun := c.Fun("entry", bg(size)...)
+
+ CheckFunc(fun.f)
+ b.SetBytes(int64(size))
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ domBenchRes = dominators(fun.f)
+ }
+}
+
+type domFunc func(f *Func) []*Block
+
+// verifyDominators verifies that the dominators of fut (function under test)
+// as determined by domFn, match the map node->dominator
+func verifyDominators(t *testing.T, fut fun, domFn domFunc, doms map[string]string) {
+ blockNames := map[*Block]string{}
+ for n, b := range fut.blocks {
+ blockNames[b] = n
+ }
+
+ calcDom := domFn(fut.f)
+
+ for n, d := range doms {
+ nblk, ok := fut.blocks[n]
+ if !ok {
+ t.Errorf("invalid block name %s", n)
+ }
+ dblk, ok := fut.blocks[d]
+ if !ok {
+ t.Errorf("invalid block name %s", d)
+ }
+
+ domNode := calcDom[nblk.ID]
+ switch {
+ case calcDom[nblk.ID] == dblk:
+ calcDom[nblk.ID] = nil
+ continue
+ case calcDom[nblk.ID] != dblk:
+ t.Errorf("expected %s as dominator of %s, found %s", d, n, blockNames[domNode])
+ default:
+ t.Fatal("unexpected dominator condition")
+ }
+ }
+
+ for id, d := range calcDom {
+ // If nil, we've already verified it
+ if d == nil {
+ continue
+ }
+ for _, b := range fut.blocks {
+ if int(b.ID) == id {
+ t.Errorf("unexpected dominator of %s for %s", blockNames[d], blockNames[b])
+ }
+ }
+ }
+
+}
+
+func TestDominatorsSingleBlock(t *testing.T) {
+ c := testConfig(t)
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Exit("mem")))
+
+ doms := map[string]string{}
+
+ CheckFunc(fun.f)
+ verifyDominators(t, fun, dominators, doms)
+ verifyDominators(t, fun, dominatorsSimple, doms)
+
+}
+
+func TestDominatorsSimple(t *testing.T) {
+ c := testConfig(t)
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Goto("a")),
+ Bloc("a",
+ Goto("b")),
+ Bloc("b",
+ Goto("c")),
+ Bloc("c",
+ Goto("exit")),
+ Bloc("exit",
+ Exit("mem")))
+
+ doms := map[string]string{
+ "a": "entry",
+ "b": "a",
+ "c": "b",
+ "exit": "c",
+ }
+
+ CheckFunc(fun.f)
+ verifyDominators(t, fun, dominators, doms)
+ verifyDominators(t, fun, dominatorsSimple, doms)
+
+}
+
+func TestDominatorsMultPredFwd(t *testing.T) {
+ c := testConfig(t)
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("p", OpConstBool, types.Types[types.TBOOL], 1, nil),
+ If("p", "a", "c")),
+ Bloc("a",
+ If("p", "b", "c")),
+ Bloc("b",
+ Goto("c")),
+ Bloc("c",
+ Goto("exit")),
+ Bloc("exit",
+ Exit("mem")))
+
+ doms := map[string]string{
+ "a": "entry",
+ "b": "a",
+ "c": "entry",
+ "exit": "c",
+ }
+
+ CheckFunc(fun.f)
+ verifyDominators(t, fun, dominators, doms)
+ verifyDominators(t, fun, dominatorsSimple, doms)
+}
+
+func TestDominatorsDeadCode(t *testing.T) {
+ c := testConfig(t)
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("p", OpConstBool, types.Types[types.TBOOL], 0, nil),
+ If("p", "b3", "b5")),
+ Bloc("b2", Exit("mem")),
+ Bloc("b3", Goto("b2")),
+ Bloc("b4", Goto("b2")),
+ Bloc("b5", Goto("b2")))
+
+ doms := map[string]string{
+ "b2": "entry",
+ "b3": "entry",
+ "b5": "entry",
+ }
+
+ CheckFunc(fun.f)
+ verifyDominators(t, fun, dominators, doms)
+ verifyDominators(t, fun, dominatorsSimple, doms)
+}
+
+func TestDominatorsMultPredRev(t *testing.T) {
+ c := testConfig(t)
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Goto("first")),
+ Bloc("first",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("p", OpConstBool, types.Types[types.TBOOL], 1, nil),
+ Goto("a")),
+ Bloc("a",
+ If("p", "b", "first")),
+ Bloc("b",
+ Goto("c")),
+ Bloc("c",
+ If("p", "exit", "b")),
+ Bloc("exit",
+ Exit("mem")))
+
+ doms := map[string]string{
+ "first": "entry",
+ "a": "first",
+ "b": "a",
+ "c": "b",
+ "exit": "c",
+ }
+
+ CheckFunc(fun.f)
+ verifyDominators(t, fun, dominators, doms)
+ verifyDominators(t, fun, dominatorsSimple, doms)
+}
+
+func TestDominatorsMultPred(t *testing.T) {
+ c := testConfig(t)
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("p", OpConstBool, types.Types[types.TBOOL], 1, nil),
+ If("p", "a", "c")),
+ Bloc("a",
+ If("p", "b", "c")),
+ Bloc("b",
+ Goto("c")),
+ Bloc("c",
+ If("p", "b", "exit")),
+ Bloc("exit",
+ Exit("mem")))
+
+ doms := map[string]string{
+ "a": "entry",
+ "b": "entry",
+ "c": "entry",
+ "exit": "c",
+ }
+
+ CheckFunc(fun.f)
+ verifyDominators(t, fun, dominators, doms)
+ verifyDominators(t, fun, dominatorsSimple, doms)
+}
+
+func TestInfiniteLoop(t *testing.T) {
+ c := testConfig(t)
+ // note lack of an exit block
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("p", OpConstBool, types.Types[types.TBOOL], 1, nil),
+ Goto("a")),
+ Bloc("a",
+ Goto("b")),
+ Bloc("b",
+ Goto("a")))
+
+ CheckFunc(fun.f)
+ doms := map[string]string{"a": "entry",
+ "b": "a"}
+ verifyDominators(t, fun, dominators, doms)
+}
+
+func TestDomTricky(t *testing.T) {
+ doms := map[string]string{
+ "4": "1",
+ "2": "4",
+ "5": "4",
+ "11": "4",
+ "15": "4", // the incorrect answer is "5"
+ "10": "15",
+ "19": "15",
+ }
+
+ if4 := [2]string{"2", "5"}
+ if5 := [2]string{"15", "11"}
+ if15 := [2]string{"19", "10"}
+
+ for i := 0; i < 8; i++ {
+ a := 1 & i
+ b := 1 & i >> 1
+ c := 1 & i >> 2
+
+ cfg := testConfig(t)
+ fun := cfg.Fun("1",
+ Bloc("1",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("p", OpConstBool, types.Types[types.TBOOL], 1, nil),
+ Goto("4")),
+ Bloc("2",
+ Goto("11")),
+ Bloc("4",
+ If("p", if4[a], if4[1-a])), // 2, 5
+ Bloc("5",
+ If("p", if5[b], if5[1-b])), //15, 11
+ Bloc("10",
+ Exit("mem")),
+ Bloc("11",
+ Goto("15")),
+ Bloc("15",
+ If("p", if15[c], if15[1-c])), //19, 10
+ Bloc("19",
+ Goto("10")))
+ CheckFunc(fun.f)
+ verifyDominators(t, fun, dominators, doms)
+ verifyDominators(t, fun, dominatorsSimple, doms)
+ }
+}
+
+// generateDominatorMap uses dominatorsSimple to obtain a
+// reference dominator tree for testing faster algorithms.
+func generateDominatorMap(fut fun) map[string]string {
+ blockNames := map[*Block]string{}
+ for n, b := range fut.blocks {
+ blockNames[b] = n
+ }
+ referenceDom := dominatorsSimple(fut.f)
+ doms := make(map[string]string)
+ for _, b := range fut.f.Blocks {
+ if d := referenceDom[b.ID]; d != nil {
+ doms[blockNames[b]] = blockNames[d]
+ }
+ }
+ return doms
+}
+
+func TestDominatorsPostTrickyA(t *testing.T) {
+ testDominatorsPostTricky(t, "b8", "b11", "b10", "b8", "b14", "b15")
+}
+
+func TestDominatorsPostTrickyB(t *testing.T) {
+ testDominatorsPostTricky(t, "b11", "b8", "b10", "b8", "b14", "b15")
+}
+
+func TestDominatorsPostTrickyC(t *testing.T) {
+ testDominatorsPostTricky(t, "b8", "b11", "b8", "b10", "b14", "b15")
+}
+
+func TestDominatorsPostTrickyD(t *testing.T) {
+ testDominatorsPostTricky(t, "b11", "b8", "b8", "b10", "b14", "b15")
+}
+
+func TestDominatorsPostTrickyE(t *testing.T) {
+ testDominatorsPostTricky(t, "b8", "b11", "b10", "b8", "b15", "b14")
+}
+
+func TestDominatorsPostTrickyF(t *testing.T) {
+ testDominatorsPostTricky(t, "b11", "b8", "b10", "b8", "b15", "b14")
+}
+
+func TestDominatorsPostTrickyG(t *testing.T) {
+ testDominatorsPostTricky(t, "b8", "b11", "b8", "b10", "b15", "b14")
+}
+
+func TestDominatorsPostTrickyH(t *testing.T) {
+ testDominatorsPostTricky(t, "b11", "b8", "b8", "b10", "b15", "b14")
+}
+
+func testDominatorsPostTricky(t *testing.T, b7then, b7else, b12then, b12else, b13then, b13else string) {
+ c := testConfig(t)
+ fun := c.Fun("b1",
+ Bloc("b1",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("p", OpConstBool, types.Types[types.TBOOL], 1, nil),
+ If("p", "b3", "b2")),
+ Bloc("b3",
+ If("p", "b5", "b6")),
+ Bloc("b5",
+ Goto("b7")),
+ Bloc("b7",
+ If("p", b7then, b7else)),
+ Bloc("b8",
+ Goto("b13")),
+ Bloc("b13",
+ If("p", b13then, b13else)),
+ Bloc("b14",
+ Goto("b10")),
+ Bloc("b15",
+ Goto("b16")),
+ Bloc("b16",
+ Goto("b9")),
+ Bloc("b9",
+ Goto("b7")),
+ Bloc("b11",
+ Goto("b12")),
+ Bloc("b12",
+ If("p", b12then, b12else)),
+ Bloc("b10",
+ Goto("b6")),
+ Bloc("b6",
+ Goto("b17")),
+ Bloc("b17",
+ Goto("b18")),
+ Bloc("b18",
+ If("p", "b22", "b19")),
+ Bloc("b22",
+ Goto("b23")),
+ Bloc("b23",
+ If("p", "b21", "b19")),
+ Bloc("b19",
+ If("p", "b24", "b25")),
+ Bloc("b24",
+ Goto("b26")),
+ Bloc("b26",
+ Goto("b25")),
+ Bloc("b25",
+ If("p", "b27", "b29")),
+ Bloc("b27",
+ Goto("b30")),
+ Bloc("b30",
+ Goto("b28")),
+ Bloc("b29",
+ Goto("b31")),
+ Bloc("b31",
+ Goto("b28")),
+ Bloc("b28",
+ If("p", "b32", "b33")),
+ Bloc("b32",
+ Goto("b21")),
+ Bloc("b21",
+ Goto("b47")),
+ Bloc("b47",
+ If("p", "b45", "b46")),
+ Bloc("b45",
+ Goto("b48")),
+ Bloc("b48",
+ Goto("b49")),
+ Bloc("b49",
+ If("p", "b50", "b51")),
+ Bloc("b50",
+ Goto("b52")),
+ Bloc("b52",
+ Goto("b53")),
+ Bloc("b53",
+ Goto("b51")),
+ Bloc("b51",
+ Goto("b54")),
+ Bloc("b54",
+ Goto("b46")),
+ Bloc("b46",
+ Exit("mem")),
+ Bloc("b33",
+ Goto("b34")),
+ Bloc("b34",
+ Goto("b37")),
+ Bloc("b37",
+ If("p", "b35", "b36")),
+ Bloc("b35",
+ Goto("b38")),
+ Bloc("b38",
+ Goto("b39")),
+ Bloc("b39",
+ If("p", "b40", "b41")),
+ Bloc("b40",
+ Goto("b42")),
+ Bloc("b42",
+ Goto("b43")),
+ Bloc("b43",
+ Goto("b41")),
+ Bloc("b41",
+ Goto("b44")),
+ Bloc("b44",
+ Goto("b36")),
+ Bloc("b36",
+ Goto("b20")),
+ Bloc("b20",
+ Goto("b18")),
+ Bloc("b2",
+ Goto("b4")),
+ Bloc("b4",
+ Exit("mem")))
+ CheckFunc(fun.f)
+ doms := generateDominatorMap(fun)
+ verifyDominators(t, fun, dominators, doms)
+}
diff --git a/src/cmd/compile/internal/ssa/expand_calls.go b/src/cmd/compile/internal/ssa/expand_calls.go
new file mode 100644
index 0000000..a3cea85
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/expand_calls.go
@@ -0,0 +1,1795 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/compile/internal/abi"
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+ "fmt"
+ "sort"
+)
+
+type selKey struct {
+ from *Value // what is selected from
+ offsetOrIndex int64 // whatever is appropriate for the selector
+ size int64
+ typ *types.Type
+}
+
+type Abi1RO uint8 // An offset within a parameter's slice of register indices, for abi1.
+
+func isBlockMultiValueExit(b *Block) bool {
+ return (b.Kind == BlockRet || b.Kind == BlockRetJmp) && b.Controls[0] != nil && b.Controls[0].Op == OpMakeResult
+}
+
+func badVal(s string, v *Value) error {
+ return fmt.Errorf("%s %s", s, v.LongString())
+}
+
+// removeTrivialWrapperTypes unwraps layers of
+// struct { singleField SomeType } and [1]SomeType
+// until a non-wrapper type is reached. This is useful
+// for working with assignments to/from interface data
+// fields (either second operand to OpIMake or OpIData)
+// where the wrapping or type conversion can be elided
+// because of type conversions/assertions in source code
+// that do not appear in SSA.
+func removeTrivialWrapperTypes(t *types.Type) *types.Type {
+ for {
+ if t.IsStruct() && t.NumFields() == 1 {
+ t = t.Field(0).Type
+ continue
+ }
+ if t.IsArray() && t.NumElem() == 1 {
+ t = t.Elem()
+ continue
+ }
+ break
+ }
+ return t
+}
+
+// A registerCursor tracks which register is used for an Arg or regValues, or a piece of such.
+type registerCursor struct {
+ // TODO(register args) convert this to a generalized target cursor.
+ storeDest *Value // if there are no register targets, then this is the base of the store.
+ regsLen int // the number of registers available for this Arg/result (which is all in registers or not at all)
+ nextSlice Abi1RO // the next register/register-slice offset
+ config *abi.ABIConfig
+ regValues *[]*Value // values assigned to registers accumulate here
+}
+
+func (rc *registerCursor) String() string {
+ dest := "<none>"
+ if rc.storeDest != nil {
+ dest = rc.storeDest.String()
+ }
+ regs := "<none>"
+ if rc.regValues != nil {
+ regs = ""
+ for i, x := range *rc.regValues {
+ if i > 0 {
+ regs = regs + "; "
+ }
+ regs = regs + x.LongString()
+ }
+ }
+ // not printing the config because that has not been useful
+ return fmt.Sprintf("RCSR{storeDest=%v, regsLen=%d, nextSlice=%d, regValues=[%s]}", dest, rc.regsLen, rc.nextSlice, regs)
+}
+
+// next effectively post-increments the register cursor; the receiver is advanced,
+// the old value is returned.
+func (c *registerCursor) next(t *types.Type) registerCursor {
+ rc := *c
+ if int(c.nextSlice) < c.regsLen {
+ w := c.config.NumParamRegs(t)
+ c.nextSlice += Abi1RO(w)
+ }
+ return rc
+}
+
+// plus returns a register cursor offset from the original, without modifying the original.
+func (c *registerCursor) plus(regWidth Abi1RO) registerCursor {
+ rc := *c
+ rc.nextSlice += regWidth
+ return rc
+}
+
+const (
+ // Register offsets for fields of built-in aggregate types; the ones not listed are zero.
+ RO_complex_imag = 1
+ RO_string_len = 1
+ RO_slice_len = 1
+ RO_slice_cap = 2
+ RO_iface_data = 1
+)
+
+func (x *expandState) regWidth(t *types.Type) Abi1RO {
+ return Abi1RO(x.abi1.NumParamRegs(t))
+}
+
+// regOffset returns the register offset of the i'th element of type t
+func (x *expandState) regOffset(t *types.Type, i int) Abi1RO {
+ // TODO maybe cache this in a map if profiling recommends.
+ if i == 0 {
+ return 0
+ }
+ if t.IsArray() {
+ return Abi1RO(i) * x.regWidth(t.Elem())
+ }
+ if t.IsStruct() {
+ k := Abi1RO(0)
+ for j := 0; j < i; j++ {
+ k += x.regWidth(t.FieldType(j))
+ }
+ return k
+ }
+ panic("Haven't implemented this case yet, do I need to?")
+}
+
+// at returns the register cursor for component i of t, where the first
+// component is numbered 0.
+func (c *registerCursor) at(t *types.Type, i int) registerCursor {
+ rc := *c
+ if i == 0 || c.regsLen == 0 {
+ return rc
+ }
+ if t.IsArray() {
+ w := c.config.NumParamRegs(t.Elem())
+ rc.nextSlice += Abi1RO(i * w)
+ return rc
+ }
+ if t.IsStruct() {
+ for j := 0; j < i; j++ {
+ rc.next(t.FieldType(j))
+ }
+ return rc
+ }
+ panic("Haven't implemented this case yet, do I need to?")
+}
+
+func (c *registerCursor) init(regs []abi.RegIndex, info *abi.ABIParamResultInfo, result *[]*Value, storeDest *Value) {
+ c.regsLen = len(regs)
+ c.nextSlice = 0
+ if len(regs) == 0 {
+ c.storeDest = storeDest // only save this if there are no registers, will explode if misused.
+ return
+ }
+ c.config = info.Config()
+ c.regValues = result
+}
+
+func (c *registerCursor) addArg(v *Value) {
+ *c.regValues = append(*c.regValues, v)
+}
+
+func (c *registerCursor) hasRegs() bool {
+ return c.regsLen > 0
+}
+
+type expandState struct {
+ f *Func
+ abi1 *abi.ABIConfig
+ debug int // odd values log lost statement markers, so likely settings are 1 (stmts), 2 (expansion), and 3 (both)
+ canSSAType func(*types.Type) bool
+ regSize int64
+ sp *Value
+ typs *Types
+ ptrSize int64
+ hiOffset int64
+ lowOffset int64
+ hiRo Abi1RO
+ loRo Abi1RO
+ namedSelects map[*Value][]namedVal
+ sdom SparseTree
+ commonSelectors map[selKey]*Value // used to de-dupe selectors
+ commonArgs map[selKey]*Value // used to de-dupe OpArg/OpArgIntReg/OpArgFloatReg
+ memForCall map[ID]*Value // For a call, need to know the unique selector that gets the mem.
+ transformedSelects map[ID]bool // OpSelectN after rewriting, either created or renumbered.
+ indentLevel int // Indentation for debugging recursion
+}
+
+// intPairTypes returns the pair of 32-bit int types needed to encode a 64-bit integer type on a target
+// that has no 64-bit integer registers.
+func (x *expandState) intPairTypes(et types.Kind) (tHi, tLo *types.Type) {
+ tHi = x.typs.UInt32
+ if et == types.TINT64 {
+ tHi = x.typs.Int32
+ }
+ tLo = x.typs.UInt32
+ return
+}
+
+// isAlreadyExpandedAggregateType returns whether a type is an SSA-able "aggregate" (multiple register) type
+// that was expanded in an earlier phase (currently, expand_calls is intended to run after decomposeBuiltin,
+// so this is all aggregate types -- small struct and array, complex, interface, string, slice, and 64-bit
+// integer on 32-bit).
+func (x *expandState) isAlreadyExpandedAggregateType(t *types.Type) bool {
+ if !x.canSSAType(t) {
+ return false
+ }
+ return t.IsStruct() || t.IsArray() || t.IsComplex() || t.IsInterface() || t.IsString() || t.IsSlice() ||
+ (t.Size() > x.regSize && (t.IsInteger() || (x.f.Config.SoftFloat && t.IsFloat())))
+}
+
+// offsetFrom creates an offset from a pointer, simplifying chained offsets and offsets from SP
+// TODO should also optimize offsets from SB?
+func (x *expandState) offsetFrom(b *Block, from *Value, offset int64, pt *types.Type) *Value {
+ ft := from.Type
+ if offset == 0 {
+ if ft == pt {
+ return from
+ }
+ // This captures common, (apparently) safe cases. The unsafe cases involve ft == uintptr
+ if (ft.IsPtr() || ft.IsUnsafePtr()) && pt.IsPtr() {
+ return from
+ }
+ }
+ // Simplify, canonicalize
+ for from.Op == OpOffPtr {
+ offset += from.AuxInt
+ from = from.Args[0]
+ }
+ if from == x.sp {
+ return x.f.ConstOffPtrSP(pt, offset, x.sp)
+ }
+ return b.NewValue1I(from.Pos.WithNotStmt(), OpOffPtr, pt, offset, from)
+}
+
+// splitSlots splits one "field" (specified by sfx, offset, and ty) out of the LocalSlots in ls and returns the new LocalSlots this generates.
+func (x *expandState) splitSlots(ls []*LocalSlot, sfx string, offset int64, ty *types.Type) []*LocalSlot {
+ var locs []*LocalSlot
+ for i := range ls {
+ locs = append(locs, x.f.SplitSlot(ls[i], sfx, offset, ty))
+ }
+ return locs
+}
+
+// prAssignForArg returns the ABIParamAssignment for v, assumed to be an OpArg.
+func (x *expandState) prAssignForArg(v *Value) *abi.ABIParamAssignment {
+ if v.Op != OpArg {
+ panic(badVal("Wanted OpArg, instead saw", v))
+ }
+ return ParamAssignmentForArgName(x.f, v.Aux.(*ir.Name))
+}
+
+// ParamAssignmentForArgName returns the ABIParamAssignment for f's arg with matching name.
+func ParamAssignmentForArgName(f *Func, name *ir.Name) *abi.ABIParamAssignment {
+ abiInfo := f.OwnAux.abiInfo
+ ip := abiInfo.InParams()
+ for i, a := range ip {
+ if a.Name == name {
+ return &ip[i]
+ }
+ }
+ panic(fmt.Errorf("Did not match param %v in prInfo %+v", name, abiInfo.InParams()))
+}
+
+// indent increments (or decrements) the indentation.
+func (x *expandState) indent(n int) {
+ x.indentLevel += n
+}
+
+// Printf does an indented fmt.Printf on te format and args.
+func (x *expandState) Printf(format string, a ...interface{}) (n int, err error) {
+ if x.indentLevel > 0 {
+ fmt.Printf("%[1]*s", x.indentLevel, "")
+ }
+ return fmt.Printf(format, a...)
+}
+
+// Calls that need lowering have some number of inputs, including a memory input,
+// and produce a tuple of (value1, value2, ..., mem) where valueK may or may not be SSA-able.
+
+// With the current ABI those inputs need to be converted into stores to memory,
+// rethreading the call's memory input to the first, and the new call now receiving the last.
+
+// With the current ABI, the outputs need to be converted to loads, which will all use the call's
+// memory output as their input.
+
+// rewriteSelect recursively walks from leaf selector to a root (OpSelectN, OpLoad, OpArg)
+// through a chain of Struct/Array/builtin Select operations. If the chain of selectors does not
+// end in an expected root, it does nothing (this can happen depending on compiler phase ordering).
+// The "leaf" provides the type, the root supplies the container, and the leaf-to-root path
+// accumulates the offset.
+// It emits the code necessary to implement the leaf select operation that leads to the root.
+//
+// TODO when registers really arrive, must also decompose anything split across two registers or registers and memory.
+func (x *expandState) rewriteSelect(leaf *Value, selector *Value, offset int64, regOffset Abi1RO) []*LocalSlot {
+ if x.debug > 1 {
+ x.indent(3)
+ defer x.indent(-3)
+ x.Printf("rewriteSelect(%s; %s; memOff=%d; regOff=%d)\n", leaf.LongString(), selector.LongString(), offset, regOffset)
+ }
+ var locs []*LocalSlot
+ leafType := leaf.Type
+ if len(selector.Args) > 0 {
+ w := selector.Args[0]
+ if w.Op == OpCopy {
+ for w.Op == OpCopy {
+ w = w.Args[0]
+ }
+ selector.SetArg(0, w)
+ }
+ }
+ switch selector.Op {
+ case OpArgIntReg, OpArgFloatReg:
+ if leafType == selector.Type { // OpIData leads us here, sometimes.
+ leaf.copyOf(selector)
+ } else {
+ x.f.Fatalf("Unexpected %s type, selector=%s, leaf=%s\n", selector.Op.String(), selector.LongString(), leaf.LongString())
+ }
+ if x.debug > 1 {
+ x.Printf("---%s, break\n", selector.Op.String())
+ }
+ case OpArg:
+ if !x.isAlreadyExpandedAggregateType(selector.Type) {
+ if leafType == selector.Type { // OpIData leads us here, sometimes.
+ x.newArgToMemOrRegs(selector, leaf, offset, regOffset, leafType, leaf.Pos)
+ } else {
+ x.f.Fatalf("Unexpected OpArg type, selector=%s, leaf=%s\n", selector.LongString(), leaf.LongString())
+ }
+ if x.debug > 1 {
+ x.Printf("---OpArg, break\n")
+ }
+ break
+ }
+ switch leaf.Op {
+ case OpIData, OpStructSelect, OpArraySelect:
+ leafType = removeTrivialWrapperTypes(leaf.Type)
+ }
+ x.newArgToMemOrRegs(selector, leaf, offset, regOffset, leafType, leaf.Pos)
+
+ for _, s := range x.namedSelects[selector] {
+ locs = append(locs, x.f.Names[s.locIndex])
+ }
+
+ case OpLoad: // We end up here because of IData of immediate structures.
+ // Failure case:
+ // (note the failure case is very rare; w/o this case, make.bash and run.bash both pass, as well as
+ // the hard cases of building {syscall,math,math/cmplx,math/bits,go/constant} on ppc64le and mips-softfloat).
+ //
+ // GOSSAFUNC='(*dumper).dump' go build -gcflags=-l -tags=math_big_pure_go cmd/compile/internal/gc
+ // cmd/compile/internal/gc/dump.go:136:14: internal compiler error: '(*dumper).dump': not lowered: v827, StructSelect PTR PTR
+ // b2: ← b1
+ // v20 (+142) = StaticLECall <interface {},mem> {AuxCall{reflect.Value.Interface([reflect.Value,0])[interface {},24]}} [40] v8 v1
+ // v21 (142) = SelectN <mem> [1] v20
+ // v22 (142) = SelectN <interface {}> [0] v20
+ // b15: ← b8
+ // v71 (+143) = IData <Nodes> v22 (v[Nodes])
+ // v73 (+146) = StaticLECall <[]*Node,mem> {AuxCall{"".Nodes.Slice([Nodes,0])[[]*Node,8]}} [32] v71 v21
+ //
+ // translates (w/o the "case OpLoad:" above) to:
+ //
+ // b2: ← b1
+ // v20 (+142) = StaticCall <mem> {AuxCall{reflect.Value.Interface([reflect.Value,0])[interface {},24]}} [40] v715
+ // v23 (142) = Load <*uintptr> v19 v20
+ // v823 (142) = IsNonNil <bool> v23
+ // v67 (+143) = Load <*[]*Node> v880 v20
+ // b15: ← b8
+ // v827 (146) = StructSelect <*[]*Node> [0] v67
+ // v846 (146) = Store <mem> {*[]*Node} v769 v827 v20
+ // v73 (+146) = StaticCall <mem> {AuxCall{"".Nodes.Slice([Nodes,0])[[]*Node,8]}} [32] v846
+ // i.e., the struct select is generated and remains in because it is not applied to an actual structure.
+ // The OpLoad was created to load the single field of the IData
+ // This case removes that StructSelect.
+ if leafType != selector.Type {
+ if x.f.Config.SoftFloat && selector.Type.IsFloat() {
+ if x.debug > 1 {
+ x.Printf("---OpLoad, break\n")
+ }
+ break // softfloat pass will take care of that
+ }
+ x.f.Fatalf("Unexpected Load as selector, leaf=%s, selector=%s\n", leaf.LongString(), selector.LongString())
+ }
+ leaf.copyOf(selector)
+ for _, s := range x.namedSelects[selector] {
+ locs = append(locs, x.f.Names[s.locIndex])
+ }
+
+ case OpSelectN:
+ // TODO(register args) result case
+ // if applied to Op-mumble-call, the Aux tells us which result, regOffset specifies offset within result. If a register, should rewrite to OpSelectN for new call.
+ // TODO these may be duplicated. Should memoize. Intermediate selectors will go dead, no worries there.
+ call := selector.Args[0]
+ call0 := call
+ aux := call.Aux.(*AuxCall)
+ which := selector.AuxInt
+ if x.transformedSelects[selector.ID] {
+ // This is a minor hack. Either this select has had its operand adjusted (mem) or
+ // it is some other intermediate node that was rewritten to reference a register (not a generic arg).
+ // This can occur with chains of selection/indexing from single field/element aggregates.
+ leaf.copyOf(selector)
+ break
+ }
+ if which == aux.NResults() { // mem is after the results.
+ // rewrite v as a Copy of call -- the replacement call will produce a mem.
+ if leaf != selector {
+ panic(fmt.Errorf("Unexpected selector of memory, selector=%s, call=%s, leaf=%s", selector.LongString(), call.LongString(), leaf.LongString()))
+ }
+ if aux.abiInfo == nil {
+ panic(badVal("aux.abiInfo nil for call", call))
+ }
+ if existing := x.memForCall[call.ID]; existing == nil {
+ selector.AuxInt = int64(aux.abiInfo.OutRegistersUsed())
+ x.memForCall[call.ID] = selector
+ x.transformedSelects[selector.ID] = true // operand adjusted
+ } else {
+ selector.copyOf(existing)
+ }
+
+ } else {
+ leafType := removeTrivialWrapperTypes(leaf.Type)
+ if x.canSSAType(leafType) {
+ pt := types.NewPtr(leafType)
+ // Any selection right out of the arg area/registers has to be same Block as call, use call as mem input.
+ // Create a "mem" for any loads that need to occur.
+ if mem := x.memForCall[call.ID]; mem != nil {
+ if mem.Block != call.Block {
+ panic(fmt.Errorf("selector and call need to be in same block, selector=%s; call=%s", selector.LongString(), call.LongString()))
+ }
+ call = mem
+ } else {
+ mem = call.Block.NewValue1I(call.Pos.WithNotStmt(), OpSelectN, types.TypeMem, int64(aux.abiInfo.OutRegistersUsed()), call)
+ x.transformedSelects[mem.ID] = true // select uses post-expansion indexing
+ x.memForCall[call.ID] = mem
+ call = mem
+ }
+ outParam := aux.abiInfo.OutParam(int(which))
+ if len(outParam.Registers) > 0 {
+ firstReg := uint32(0)
+ for i := 0; i < int(which); i++ {
+ firstReg += uint32(len(aux.abiInfo.OutParam(i).Registers))
+ }
+ reg := int64(regOffset + Abi1RO(firstReg))
+ if leaf.Block == call.Block {
+ leaf.reset(OpSelectN)
+ leaf.SetArgs1(call0)
+ leaf.Type = leafType
+ leaf.AuxInt = reg
+ x.transformedSelects[leaf.ID] = true // leaf, rewritten to use post-expansion indexing.
+ } else {
+ w := call.Block.NewValue1I(leaf.Pos, OpSelectN, leafType, reg, call0)
+ x.transformedSelects[w.ID] = true // select, using post-expansion indexing.
+ leaf.copyOf(w)
+ }
+ } else {
+ off := x.offsetFrom(x.f.Entry, x.sp, offset+aux.OffsetOfResult(which), pt)
+ if leaf.Block == call.Block {
+ leaf.reset(OpLoad)
+ leaf.SetArgs2(off, call)
+ leaf.Type = leafType
+ } else {
+ w := call.Block.NewValue2(leaf.Pos, OpLoad, leafType, off, call)
+ leaf.copyOf(w)
+ if x.debug > 1 {
+ x.Printf("---new %s\n", w.LongString())
+ }
+ }
+ }
+ for _, s := range x.namedSelects[selector] {
+ locs = append(locs, x.f.Names[s.locIndex])
+ }
+ } else {
+ x.f.Fatalf("Should not have non-SSA-able OpSelectN, selector=%s", selector.LongString())
+ }
+ }
+
+ case OpStructSelect:
+ w := selector.Args[0]
+ var ls []*LocalSlot
+ if w.Type.Kind() != types.TSTRUCT { // IData artifact
+ ls = x.rewriteSelect(leaf, w, offset, regOffset)
+ } else {
+ fldi := int(selector.AuxInt)
+ ls = x.rewriteSelect(leaf, w, offset+w.Type.FieldOff(fldi), regOffset+x.regOffset(w.Type, fldi))
+ if w.Op != OpIData {
+ for _, l := range ls {
+ locs = append(locs, x.f.SplitStruct(l, int(selector.AuxInt)))
+ }
+ }
+ }
+
+ case OpArraySelect:
+ w := selector.Args[0]
+ index := selector.AuxInt
+ x.rewriteSelect(leaf, w, offset+selector.Type.Size()*index, regOffset+x.regOffset(w.Type, int(index)))
+
+ case OpInt64Hi:
+ w := selector.Args[0]
+ ls := x.rewriteSelect(leaf, w, offset+x.hiOffset, regOffset+x.hiRo)
+ locs = x.splitSlots(ls, ".hi", x.hiOffset, leafType)
+
+ case OpInt64Lo:
+ w := selector.Args[0]
+ ls := x.rewriteSelect(leaf, w, offset+x.lowOffset, regOffset+x.loRo)
+ locs = x.splitSlots(ls, ".lo", x.lowOffset, leafType)
+
+ case OpStringPtr:
+ ls := x.rewriteSelect(leaf, selector.Args[0], offset, regOffset)
+ locs = x.splitSlots(ls, ".ptr", 0, x.typs.BytePtr)
+
+ case OpSlicePtr, OpSlicePtrUnchecked:
+ w := selector.Args[0]
+ ls := x.rewriteSelect(leaf, w, offset, regOffset)
+ locs = x.splitSlots(ls, ".ptr", 0, types.NewPtr(w.Type.Elem()))
+
+ case OpITab:
+ w := selector.Args[0]
+ ls := x.rewriteSelect(leaf, w, offset, regOffset)
+ sfx := ".itab"
+ if w.Type.IsEmptyInterface() {
+ sfx = ".type"
+ }
+ locs = x.splitSlots(ls, sfx, 0, x.typs.Uintptr)
+
+ case OpComplexReal:
+ ls := x.rewriteSelect(leaf, selector.Args[0], offset, regOffset)
+ locs = x.splitSlots(ls, ".real", 0, selector.Type)
+
+ case OpComplexImag:
+ ls := x.rewriteSelect(leaf, selector.Args[0], offset+selector.Type.Size(), regOffset+RO_complex_imag) // result is FloatNN, width of result is offset of imaginary part.
+ locs = x.splitSlots(ls, ".imag", selector.Type.Size(), selector.Type)
+
+ case OpStringLen, OpSliceLen:
+ ls := x.rewriteSelect(leaf, selector.Args[0], offset+x.ptrSize, regOffset+RO_slice_len)
+ locs = x.splitSlots(ls, ".len", x.ptrSize, leafType)
+
+ case OpIData:
+ ls := x.rewriteSelect(leaf, selector.Args[0], offset+x.ptrSize, regOffset+RO_iface_data)
+ locs = x.splitSlots(ls, ".data", x.ptrSize, leafType)
+
+ case OpSliceCap:
+ ls := x.rewriteSelect(leaf, selector.Args[0], offset+2*x.ptrSize, regOffset+RO_slice_cap)
+ locs = x.splitSlots(ls, ".cap", 2*x.ptrSize, leafType)
+
+ case OpCopy: // If it's an intermediate result, recurse
+ locs = x.rewriteSelect(leaf, selector.Args[0], offset, regOffset)
+ for _, s := range x.namedSelects[selector] {
+ // this copy may have had its own name, preserve that, too.
+ locs = append(locs, x.f.Names[s.locIndex])
+ }
+
+ default:
+ // Ignore dead ends. These can occur if this phase is run before decompose builtin (which is not intended, but allowed).
+ }
+
+ return locs
+}
+
+func (x *expandState) rewriteDereference(b *Block, base, a, mem *Value, offset, size int64, typ *types.Type, pos src.XPos) *Value {
+ source := a.Args[0]
+ dst := x.offsetFrom(b, base, offset, source.Type)
+ if a.Uses == 1 && a.Block == b {
+ a.reset(OpMove)
+ a.Pos = pos
+ a.Type = types.TypeMem
+ a.Aux = typ
+ a.AuxInt = size
+ a.SetArgs3(dst, source, mem)
+ mem = a
+ } else {
+ mem = b.NewValue3A(pos, OpMove, types.TypeMem, typ, dst, source, mem)
+ mem.AuxInt = size
+ }
+ return mem
+}
+
+var indexNames [1]string = [1]string{"[0]"}
+
+// pathTo returns the selection path to the leaf type at offset within container.
+// e.g. len(thing.field[0]) => ".field[0].len"
+// this is for purposes of generating names ultimately fed to a debugger.
+func (x *expandState) pathTo(container, leaf *types.Type, offset int64) string {
+ if container == leaf || offset == 0 && container.Size() == leaf.Size() {
+ return ""
+ }
+ path := ""
+outer:
+ for {
+ switch container.Kind() {
+ case types.TARRAY:
+ container = container.Elem()
+ if container.Size() == 0 {
+ return path
+ }
+ i := offset / container.Size()
+ offset = offset % container.Size()
+ // If a future compiler/ABI supports larger SSA/Arg-able arrays, expand indexNames.
+ path = path + indexNames[i]
+ continue
+ case types.TSTRUCT:
+ for i := 0; i < container.NumFields(); i++ {
+ fld := container.Field(i)
+ if fld.Offset+fld.Type.Size() > offset {
+ offset -= fld.Offset
+ path += "." + fld.Sym.Name
+ container = fld.Type
+ continue outer
+ }
+ }
+ return path
+ case types.TINT64, types.TUINT64:
+ if container.Size() == x.regSize {
+ return path
+ }
+ if offset == x.hiOffset {
+ return path + ".hi"
+ }
+ return path + ".lo"
+ case types.TINTER:
+ if offset != 0 {
+ return path + ".data"
+ }
+ if container.IsEmptyInterface() {
+ return path + ".type"
+ }
+ return path + ".itab"
+
+ case types.TSLICE:
+ if offset == 2*x.regSize {
+ return path + ".cap"
+ }
+ fallthrough
+ case types.TSTRING:
+ if offset == 0 {
+ return path + ".ptr"
+ }
+ return path + ".len"
+ case types.TCOMPLEX64, types.TCOMPLEX128:
+ if offset == 0 {
+ return path + ".real"
+ }
+ return path + ".imag"
+ }
+ return path
+ }
+}
+
+// decomposeArg is a helper for storeArgOrLoad.
+// It decomposes a Load or an Arg into smaller parts and returns the new mem.
+// If the type does not match one of the expected aggregate types, it returns nil instead.
+// Parameters:
+// pos -- the location of any generated code.
+// b -- the block into which any generated code should normally be placed
+// source -- the value, possibly an aggregate, to be stored.
+// mem -- the mem flowing into this decomposition (loads depend on it, stores updated it)
+// t -- the type of the value to be stored
+// storeOffset -- if the value is stored in memory, it is stored at base (see storeRc) + storeOffset
+// loadRegOffset -- regarding source as a value in registers, the register offset in ABI1. Meaningful only if source is OpArg.
+// storeRc -- storeRC; if the value is stored in registers, this specifies the registers.
+// StoreRc also identifies whether the target is registers or memory, and has the base for the store operation.
+func (x *expandState) decomposeArg(pos src.XPos, b *Block, source, mem *Value, t *types.Type, storeOffset int64, loadRegOffset Abi1RO, storeRc registerCursor) *Value {
+
+ pa := x.prAssignForArg(source)
+ var locs []*LocalSlot
+ for _, s := range x.namedSelects[source] {
+ locs = append(locs, x.f.Names[s.locIndex])
+ }
+
+ if len(pa.Registers) > 0 {
+ // Handle the in-registers case directly
+ rts, offs := pa.RegisterTypesAndOffsets()
+ last := loadRegOffset + x.regWidth(t)
+ if offs[loadRegOffset] != 0 {
+ // Document the problem before panicking.
+ for i := 0; i < len(rts); i++ {
+ rt := rts[i]
+ off := offs[i]
+ fmt.Printf("rt=%s, off=%d, rt.Width=%d, rt.Align=%d\n", rt.String(), off, rt.Size(), uint8(rt.Alignment()))
+ }
+ panic(fmt.Errorf("offset %d of requested register %d should be zero, source=%s", offs[loadRegOffset], loadRegOffset, source.LongString()))
+ }
+
+ if x.debug > 1 {
+ x.Printf("decompose arg %s has %d locs\n", source.LongString(), len(locs))
+ }
+
+ for i := loadRegOffset; i < last; i++ {
+ rt := rts[i]
+ off := offs[i]
+ w := x.commonArgs[selKey{source, off, rt.Size(), rt}]
+ if w == nil {
+ w = x.newArgToMemOrRegs(source, w, off, i, rt, pos)
+ suffix := x.pathTo(source.Type, rt, off)
+ if suffix != "" {
+ x.splitSlotsIntoNames(locs, suffix, off, rt, w)
+ }
+ }
+ if t.IsPtrShaped() {
+ // Preserve the original store type. This ensures pointer type
+ // properties aren't discarded (e.g, notinheap).
+ if rt.Size() != t.Size() || len(pa.Registers) != 1 || i != loadRegOffset {
+ b.Func.Fatalf("incompatible store type %v and %v, i=%d", t, rt, i)
+ }
+ rt = t
+ }
+ mem = x.storeArgOrLoad(pos, b, w, mem, rt, storeOffset+off, i, storeRc.next(rt))
+ }
+ return mem
+ }
+
+ u := source.Type
+ switch u.Kind() {
+ case types.TARRAY:
+ elem := u.Elem()
+ elemRO := x.regWidth(elem)
+ for i := int64(0); i < u.NumElem(); i++ {
+ elemOff := i * elem.Size()
+ mem = storeOneArg(x, pos, b, locs, indexNames[i], source, mem, elem, elemOff, storeOffset+elemOff, loadRegOffset, storeRc.next(elem))
+ loadRegOffset += elemRO
+ pos = pos.WithNotStmt()
+ }
+ return mem
+ case types.TSTRUCT:
+ for i := 0; i < u.NumFields(); i++ {
+ fld := u.Field(i)
+ mem = storeOneArg(x, pos, b, locs, "."+fld.Sym.Name, source, mem, fld.Type, fld.Offset, storeOffset+fld.Offset, loadRegOffset, storeRc.next(fld.Type))
+ loadRegOffset += x.regWidth(fld.Type)
+ pos = pos.WithNotStmt()
+ }
+ return mem
+ case types.TINT64, types.TUINT64:
+ if t.Size() == x.regSize {
+ break
+ }
+ tHi, tLo := x.intPairTypes(t.Kind())
+ mem = storeOneArg(x, pos, b, locs, ".hi", source, mem, tHi, x.hiOffset, storeOffset+x.hiOffset, loadRegOffset+x.hiRo, storeRc.plus(x.hiRo))
+ pos = pos.WithNotStmt()
+ return storeOneArg(x, pos, b, locs, ".lo", source, mem, tLo, x.lowOffset, storeOffset+x.lowOffset, loadRegOffset+x.loRo, storeRc.plus(x.loRo))
+ case types.TINTER:
+ sfx := ".itab"
+ if u.IsEmptyInterface() {
+ sfx = ".type"
+ }
+ return storeTwoArg(x, pos, b, locs, sfx, ".idata", source, mem, x.typs.Uintptr, x.typs.BytePtr, 0, storeOffset, loadRegOffset, storeRc)
+ case types.TSTRING:
+ return storeTwoArg(x, pos, b, locs, ".ptr", ".len", source, mem, x.typs.BytePtr, x.typs.Int, 0, storeOffset, loadRegOffset, storeRc)
+ case types.TCOMPLEX64:
+ return storeTwoArg(x, pos, b, locs, ".real", ".imag", source, mem, x.typs.Float32, x.typs.Float32, 0, storeOffset, loadRegOffset, storeRc)
+ case types.TCOMPLEX128:
+ return storeTwoArg(x, pos, b, locs, ".real", ".imag", source, mem, x.typs.Float64, x.typs.Float64, 0, storeOffset, loadRegOffset, storeRc)
+ case types.TSLICE:
+ mem = storeOneArg(x, pos, b, locs, ".ptr", source, mem, x.typs.BytePtr, 0, storeOffset, loadRegOffset, storeRc.next(x.typs.BytePtr))
+ return storeTwoArg(x, pos, b, locs, ".len", ".cap", source, mem, x.typs.Int, x.typs.Int, x.ptrSize, storeOffset+x.ptrSize, loadRegOffset+RO_slice_len, storeRc)
+ }
+ return nil
+}
+
+func (x *expandState) splitSlotsIntoNames(locs []*LocalSlot, suffix string, off int64, rt *types.Type, w *Value) {
+ wlocs := x.splitSlots(locs, suffix, off, rt)
+ for _, l := range wlocs {
+ old, ok := x.f.NamedValues[*l]
+ x.f.NamedValues[*l] = append(old, w)
+ if !ok {
+ x.f.Names = append(x.f.Names, l)
+ }
+ }
+}
+
+// decomposeLoad is a helper for storeArgOrLoad.
+// It decomposes a Load into smaller parts and returns the new mem.
+// If the type does not match one of the expected aggregate types, it returns nil instead.
+// Parameters:
+// pos -- the location of any generated code.
+// b -- the block into which any generated code should normally be placed
+// source -- the value, possibly an aggregate, to be stored.
+// mem -- the mem flowing into this decomposition (loads depend on it, stores updated it)
+// t -- the type of the value to be stored
+// storeOffset -- if the value is stored in memory, it is stored at base (see storeRc) + offset
+// loadRegOffset -- regarding source as a value in registers, the register offset in ABI1. Meaningful only if source is OpArg.
+// storeRc -- storeRC; if the value is stored in registers, this specifies the registers.
+// StoreRc also identifies whether the target is registers or memory, and has the base for the store operation.
+//
+// TODO -- this needs cleanup; it just works for SSA-able aggregates, and won't fully generalize to register-args aggregates.
+func (x *expandState) decomposeLoad(pos src.XPos, b *Block, source, mem *Value, t *types.Type, storeOffset int64, loadRegOffset Abi1RO, storeRc registerCursor) *Value {
+ u := source.Type
+ switch u.Kind() {
+ case types.TARRAY:
+ elem := u.Elem()
+ elemRO := x.regWidth(elem)
+ for i := int64(0); i < u.NumElem(); i++ {
+ elemOff := i * elem.Size()
+ mem = storeOneLoad(x, pos, b, source, mem, elem, elemOff, storeOffset+elemOff, loadRegOffset, storeRc.next(elem))
+ loadRegOffset += elemRO
+ pos = pos.WithNotStmt()
+ }
+ return mem
+ case types.TSTRUCT:
+ for i := 0; i < u.NumFields(); i++ {
+ fld := u.Field(i)
+ mem = storeOneLoad(x, pos, b, source, mem, fld.Type, fld.Offset, storeOffset+fld.Offset, loadRegOffset, storeRc.next(fld.Type))
+ loadRegOffset += x.regWidth(fld.Type)
+ pos = pos.WithNotStmt()
+ }
+ return mem
+ case types.TINT64, types.TUINT64:
+ if t.Size() == x.regSize {
+ break
+ }
+ tHi, tLo := x.intPairTypes(t.Kind())
+ mem = storeOneLoad(x, pos, b, source, mem, tHi, x.hiOffset, storeOffset+x.hiOffset, loadRegOffset+x.hiRo, storeRc.plus(x.hiRo))
+ pos = pos.WithNotStmt()
+ return storeOneLoad(x, pos, b, source, mem, tLo, x.lowOffset, storeOffset+x.lowOffset, loadRegOffset+x.loRo, storeRc.plus(x.loRo))
+ case types.TINTER:
+ return storeTwoLoad(x, pos, b, source, mem, x.typs.Uintptr, x.typs.BytePtr, 0, storeOffset, loadRegOffset, storeRc)
+ case types.TSTRING:
+ return storeTwoLoad(x, pos, b, source, mem, x.typs.BytePtr, x.typs.Int, 0, storeOffset, loadRegOffset, storeRc)
+ case types.TCOMPLEX64:
+ return storeTwoLoad(x, pos, b, source, mem, x.typs.Float32, x.typs.Float32, 0, storeOffset, loadRegOffset, storeRc)
+ case types.TCOMPLEX128:
+ return storeTwoLoad(x, pos, b, source, mem, x.typs.Float64, x.typs.Float64, 0, storeOffset, loadRegOffset, storeRc)
+ case types.TSLICE:
+ mem = storeOneLoad(x, pos, b, source, mem, x.typs.BytePtr, 0, storeOffset, loadRegOffset, storeRc.next(x.typs.BytePtr))
+ return storeTwoLoad(x, pos, b, source, mem, x.typs.Int, x.typs.Int, x.ptrSize, storeOffset+x.ptrSize, loadRegOffset+RO_slice_len, storeRc)
+ }
+ return nil
+}
+
+// storeOneArg creates a decomposed (one step) arg that is then stored.
+// pos and b locate the store instruction, source is the "base" of the value input,
+// mem is the input mem, t is the type in question, and offArg and offStore are the offsets from the respective bases.
+func storeOneArg(x *expandState, pos src.XPos, b *Block, locs []*LocalSlot, suffix string, source, mem *Value, t *types.Type, argOffset, storeOffset int64, loadRegOffset Abi1RO, storeRc registerCursor) *Value {
+ if x.debug > 1 {
+ x.indent(3)
+ defer x.indent(-3)
+ x.Printf("storeOneArg(%s; %s; %s; aO=%d; sO=%d; lrO=%d; %s)\n", source.LongString(), mem.String(), t.String(), argOffset, storeOffset, loadRegOffset, storeRc.String())
+ }
+
+ w := x.commonArgs[selKey{source, argOffset, t.Size(), t}]
+ if w == nil {
+ w = x.newArgToMemOrRegs(source, w, argOffset, loadRegOffset, t, pos)
+ x.splitSlotsIntoNames(locs, suffix, argOffset, t, w)
+ }
+ return x.storeArgOrLoad(pos, b, w, mem, t, storeOffset, loadRegOffset, storeRc)
+}
+
+// storeOneLoad creates a decomposed (one step) load that is then stored.
+func storeOneLoad(x *expandState, pos src.XPos, b *Block, source, mem *Value, t *types.Type, offArg, offStore int64, loadRegOffset Abi1RO, storeRc registerCursor) *Value {
+ from := x.offsetFrom(source.Block, source.Args[0], offArg, types.NewPtr(t))
+ w := source.Block.NewValue2(source.Pos, OpLoad, t, from, mem)
+ return x.storeArgOrLoad(pos, b, w, mem, t, offStore, loadRegOffset, storeRc)
+}
+
+func storeTwoArg(x *expandState, pos src.XPos, b *Block, locs []*LocalSlot, suffix1 string, suffix2 string, source, mem *Value, t1, t2 *types.Type, offArg, offStore int64, loadRegOffset Abi1RO, storeRc registerCursor) *Value {
+ mem = storeOneArg(x, pos, b, locs, suffix1, source, mem, t1, offArg, offStore, loadRegOffset, storeRc.next(t1))
+ pos = pos.WithNotStmt()
+ t1Size := t1.Size()
+ return storeOneArg(x, pos, b, locs, suffix2, source, mem, t2, offArg+t1Size, offStore+t1Size, loadRegOffset+1, storeRc)
+}
+
+// storeTwoLoad creates a pair of decomposed (one step) loads that are then stored.
+// the elements of the pair must not require any additional alignment.
+func storeTwoLoad(x *expandState, pos src.XPos, b *Block, source, mem *Value, t1, t2 *types.Type, offArg, offStore int64, loadRegOffset Abi1RO, storeRc registerCursor) *Value {
+ mem = storeOneLoad(x, pos, b, source, mem, t1, offArg, offStore, loadRegOffset, storeRc.next(t1))
+ pos = pos.WithNotStmt()
+ t1Size := t1.Size()
+ return storeOneLoad(x, pos, b, source, mem, t2, offArg+t1Size, offStore+t1Size, loadRegOffset+1, storeRc)
+}
+
+// storeArgOrLoad converts stores of SSA-able potentially aggregatable arguments (passed to a call) into a series of primitive-typed
+// stores of non-aggregate types. It recursively walks up a chain of selectors until it reaches a Load or an Arg.
+// If it does not reach a Load or an Arg, nothing happens; this allows a little freedom in phase ordering.
+func (x *expandState) storeArgOrLoad(pos src.XPos, b *Block, source, mem *Value, t *types.Type, storeOffset int64, loadRegOffset Abi1RO, storeRc registerCursor) *Value {
+ if x.debug > 1 {
+ x.indent(3)
+ defer x.indent(-3)
+ x.Printf("storeArgOrLoad(%s; %s; %s; %d; %s)\n", source.LongString(), mem.String(), t.String(), storeOffset, storeRc.String())
+ }
+
+ // Start with Opcodes that can be disassembled
+ switch source.Op {
+ case OpCopy:
+ return x.storeArgOrLoad(pos, b, source.Args[0], mem, t, storeOffset, loadRegOffset, storeRc)
+
+ case OpLoad, OpDereference:
+ ret := x.decomposeLoad(pos, b, source, mem, t, storeOffset, loadRegOffset, storeRc)
+ if ret != nil {
+ return ret
+ }
+
+ case OpArg:
+ ret := x.decomposeArg(pos, b, source, mem, t, storeOffset, loadRegOffset, storeRc)
+ if ret != nil {
+ return ret
+ }
+
+ case OpArrayMake0, OpStructMake0:
+ // TODO(register args) is this correct for registers?
+ return mem
+
+ case OpStructMake1, OpStructMake2, OpStructMake3, OpStructMake4:
+ for i := 0; i < t.NumFields(); i++ {
+ fld := t.Field(i)
+ mem = x.storeArgOrLoad(pos, b, source.Args[i], mem, fld.Type, storeOffset+fld.Offset, 0, storeRc.next(fld.Type))
+ pos = pos.WithNotStmt()
+ }
+ return mem
+
+ case OpArrayMake1:
+ return x.storeArgOrLoad(pos, b, source.Args[0], mem, t.Elem(), storeOffset, 0, storeRc.at(t, 0))
+
+ case OpInt64Make:
+ tHi, tLo := x.intPairTypes(t.Kind())
+ mem = x.storeArgOrLoad(pos, b, source.Args[0], mem, tHi, storeOffset+x.hiOffset, 0, storeRc.next(tHi))
+ pos = pos.WithNotStmt()
+ return x.storeArgOrLoad(pos, b, source.Args[1], mem, tLo, storeOffset+x.lowOffset, 0, storeRc)
+
+ case OpComplexMake:
+ tPart := x.typs.Float32
+ wPart := t.Size() / 2
+ if wPart == 8 {
+ tPart = x.typs.Float64
+ }
+ mem = x.storeArgOrLoad(pos, b, source.Args[0], mem, tPart, storeOffset, 0, storeRc.next(tPart))
+ pos = pos.WithNotStmt()
+ return x.storeArgOrLoad(pos, b, source.Args[1], mem, tPart, storeOffset+wPart, 0, storeRc)
+
+ case OpIMake:
+ mem = x.storeArgOrLoad(pos, b, source.Args[0], mem, x.typs.Uintptr, storeOffset, 0, storeRc.next(x.typs.Uintptr))
+ pos = pos.WithNotStmt()
+ return x.storeArgOrLoad(pos, b, source.Args[1], mem, x.typs.BytePtr, storeOffset+x.ptrSize, 0, storeRc)
+
+ case OpStringMake:
+ mem = x.storeArgOrLoad(pos, b, source.Args[0], mem, x.typs.BytePtr, storeOffset, 0, storeRc.next(x.typs.BytePtr))
+ pos = pos.WithNotStmt()
+ return x.storeArgOrLoad(pos, b, source.Args[1], mem, x.typs.Int, storeOffset+x.ptrSize, 0, storeRc)
+
+ case OpSliceMake:
+ mem = x.storeArgOrLoad(pos, b, source.Args[0], mem, x.typs.BytePtr, storeOffset, 0, storeRc.next(x.typs.BytePtr))
+ pos = pos.WithNotStmt()
+ mem = x.storeArgOrLoad(pos, b, source.Args[1], mem, x.typs.Int, storeOffset+x.ptrSize, 0, storeRc.next(x.typs.Int))
+ return x.storeArgOrLoad(pos, b, source.Args[2], mem, x.typs.Int, storeOffset+2*x.ptrSize, 0, storeRc)
+ }
+
+ // For nodes that cannot be taken apart -- OpSelectN, other structure selectors.
+ switch t.Kind() {
+ case types.TARRAY:
+ elt := t.Elem()
+ if source.Type != t && t.NumElem() == 1 && elt.Size() == t.Size() && t.Size() == x.regSize {
+ t = removeTrivialWrapperTypes(t)
+ // it could be a leaf type, but the "leaf" could be complex64 (for example)
+ return x.storeArgOrLoad(pos, b, source, mem, t, storeOffset, loadRegOffset, storeRc)
+ }
+ eltRO := x.regWidth(elt)
+ source.Type = t
+ for i := int64(0); i < t.NumElem(); i++ {
+ sel := source.Block.NewValue1I(pos, OpArraySelect, elt, i, source)
+ mem = x.storeArgOrLoad(pos, b, sel, mem, elt, storeOffset+i*elt.Size(), loadRegOffset, storeRc.at(t, 0))
+ loadRegOffset += eltRO
+ pos = pos.WithNotStmt()
+ }
+ return mem
+
+ case types.TSTRUCT:
+ if source.Type != t && t.NumFields() == 1 && t.Field(0).Type.Size() == t.Size() && t.Size() == x.regSize {
+ // This peculiar test deals with accesses to immediate interface data.
+ // It works okay because everything is the same size.
+ // Example code that triggers this can be found in go/constant/value.go, function ToComplex
+ // v119 (+881) = IData <intVal> v6
+ // v121 (+882) = StaticLECall <floatVal,mem> {AuxCall{"".itof([intVal,0])[floatVal,8]}} [16] v119 v1
+ // This corresponds to the generic rewrite rule "(StructSelect [0] (IData x)) => (IData x)"
+ // Guard against "struct{struct{*foo}}"
+ // Other rewriting phases create minor glitches when they transform IData, for instance the
+ // interface-typed Arg "x" of ToFloat in go/constant/value.go
+ // v6 (858) = Arg <Value> {x} (x[Value], x[Value])
+ // is rewritten by decomposeArgs into
+ // v141 (858) = Arg <uintptr> {x}
+ // v139 (858) = Arg <*uint8> {x} [8]
+ // because of a type case clause on line 862 of go/constant/value.go
+ // case intVal:
+ // return itof(x)
+ // v139 is later stored as an intVal == struct{val *big.Int} which naively requires the fields of
+ // of a *uint8, which does not succeed.
+ t = removeTrivialWrapperTypes(t)
+ // it could be a leaf type, but the "leaf" could be complex64 (for example)
+ return x.storeArgOrLoad(pos, b, source, mem, t, storeOffset, loadRegOffset, storeRc)
+ }
+
+ source.Type = t
+ for i := 0; i < t.NumFields(); i++ {
+ fld := t.Field(i)
+ sel := source.Block.NewValue1I(pos, OpStructSelect, fld.Type, int64(i), source)
+ mem = x.storeArgOrLoad(pos, b, sel, mem, fld.Type, storeOffset+fld.Offset, loadRegOffset, storeRc.next(fld.Type))
+ loadRegOffset += x.regWidth(fld.Type)
+ pos = pos.WithNotStmt()
+ }
+ return mem
+
+ case types.TINT64, types.TUINT64:
+ if t.Size() == x.regSize {
+ break
+ }
+ tHi, tLo := x.intPairTypes(t.Kind())
+ sel := source.Block.NewValue1(pos, OpInt64Hi, tHi, source)
+ mem = x.storeArgOrLoad(pos, b, sel, mem, tHi, storeOffset+x.hiOffset, loadRegOffset+x.hiRo, storeRc.plus(x.hiRo))
+ pos = pos.WithNotStmt()
+ sel = source.Block.NewValue1(pos, OpInt64Lo, tLo, source)
+ return x.storeArgOrLoad(pos, b, sel, mem, tLo, storeOffset+x.lowOffset, loadRegOffset+x.loRo, storeRc.plus(x.hiRo))
+
+ case types.TINTER:
+ sel := source.Block.NewValue1(pos, OpITab, x.typs.BytePtr, source)
+ mem = x.storeArgOrLoad(pos, b, sel, mem, x.typs.BytePtr, storeOffset, loadRegOffset, storeRc.next(x.typs.BytePtr))
+ pos = pos.WithNotStmt()
+ sel = source.Block.NewValue1(pos, OpIData, x.typs.BytePtr, source)
+ return x.storeArgOrLoad(pos, b, sel, mem, x.typs.BytePtr, storeOffset+x.ptrSize, loadRegOffset+RO_iface_data, storeRc)
+
+ case types.TSTRING:
+ sel := source.Block.NewValue1(pos, OpStringPtr, x.typs.BytePtr, source)
+ mem = x.storeArgOrLoad(pos, b, sel, mem, x.typs.BytePtr, storeOffset, loadRegOffset, storeRc.next(x.typs.BytePtr))
+ pos = pos.WithNotStmt()
+ sel = source.Block.NewValue1(pos, OpStringLen, x.typs.Int, source)
+ return x.storeArgOrLoad(pos, b, sel, mem, x.typs.Int, storeOffset+x.ptrSize, loadRegOffset+RO_string_len, storeRc)
+
+ case types.TSLICE:
+ et := types.NewPtr(t.Elem())
+ sel := source.Block.NewValue1(pos, OpSlicePtr, et, source)
+ mem = x.storeArgOrLoad(pos, b, sel, mem, et, storeOffset, loadRegOffset, storeRc.next(et))
+ pos = pos.WithNotStmt()
+ sel = source.Block.NewValue1(pos, OpSliceLen, x.typs.Int, source)
+ mem = x.storeArgOrLoad(pos, b, sel, mem, x.typs.Int, storeOffset+x.ptrSize, loadRegOffset+RO_slice_len, storeRc.next(x.typs.Int))
+ sel = source.Block.NewValue1(pos, OpSliceCap, x.typs.Int, source)
+ return x.storeArgOrLoad(pos, b, sel, mem, x.typs.Int, storeOffset+2*x.ptrSize, loadRegOffset+RO_slice_cap, storeRc)
+
+ case types.TCOMPLEX64:
+ sel := source.Block.NewValue1(pos, OpComplexReal, x.typs.Float32, source)
+ mem = x.storeArgOrLoad(pos, b, sel, mem, x.typs.Float32, storeOffset, loadRegOffset, storeRc.next(x.typs.Float32))
+ pos = pos.WithNotStmt()
+ sel = source.Block.NewValue1(pos, OpComplexImag, x.typs.Float32, source)
+ return x.storeArgOrLoad(pos, b, sel, mem, x.typs.Float32, storeOffset+4, loadRegOffset+RO_complex_imag, storeRc)
+
+ case types.TCOMPLEX128:
+ sel := source.Block.NewValue1(pos, OpComplexReal, x.typs.Float64, source)
+ mem = x.storeArgOrLoad(pos, b, sel, mem, x.typs.Float64, storeOffset, loadRegOffset, storeRc.next(x.typs.Float64))
+ pos = pos.WithNotStmt()
+ sel = source.Block.NewValue1(pos, OpComplexImag, x.typs.Float64, source)
+ return x.storeArgOrLoad(pos, b, sel, mem, x.typs.Float64, storeOffset+8, loadRegOffset+RO_complex_imag, storeRc)
+ }
+
+ s := mem
+ if source.Op == OpDereference {
+ source.Op = OpLoad // For purposes of parameter passing expansion, a Dereference is a Load.
+ }
+ if storeRc.hasRegs() {
+ storeRc.addArg(source)
+ } else {
+ dst := x.offsetFrom(b, storeRc.storeDest, storeOffset, types.NewPtr(t))
+ s = b.NewValue3A(pos, OpStore, types.TypeMem, t, dst, source, mem)
+ }
+ if x.debug > 1 {
+ x.Printf("-->storeArg returns %s, storeRc=%s\n", s.LongString(), storeRc.String())
+ }
+ return s
+}
+
+// rewriteArgs replaces all the call-parameter Args to a call with their register translation (if any).
+// Preceding parameters (code pointers, closure pointer) are preserved, and the memory input is modified
+// to account for any parameter stores required.
+// Any of the old Args that have their use count fall to zero are marked OpInvalid.
+func (x *expandState) rewriteArgs(v *Value, firstArg int) {
+ if x.debug > 1 {
+ x.indent(3)
+ defer x.indent(-3)
+ x.Printf("rewriteArgs(%s; %d)\n", v.LongString(), firstArg)
+ }
+ // Thread the stores on the memory arg
+ aux := v.Aux.(*AuxCall)
+ m0 := v.MemoryArg()
+ mem := m0
+ newArgs := []*Value{}
+ oldArgs := []*Value{}
+ sp := x.sp
+ if v.Op == OpTailLECall {
+ // For tail call, we unwind the frame before the call so we'll use the caller's
+ // SP.
+ sp = x.f.Entry.NewValue0(src.NoXPos, OpGetCallerSP, x.typs.Uintptr)
+ }
+ for i, a := range v.Args[firstArg : len(v.Args)-1] { // skip leading non-parameter SSA Args and trailing mem SSA Arg.
+ oldArgs = append(oldArgs, a)
+ auxI := int64(i)
+ aRegs := aux.RegsOfArg(auxI)
+ aType := aux.TypeOfArg(auxI)
+ if len(aRegs) == 0 && a.Op == OpDereference {
+ aOffset := aux.OffsetOfArg(auxI)
+ if a.MemoryArg() != m0 {
+ x.f.Fatalf("Op...LECall and OpDereference have mismatched mem, %s and %s", v.LongString(), a.LongString())
+ }
+ if v.Op == OpTailLECall {
+ // It's common for a tail call passing the same arguments (e.g. method wrapper),
+ // so this would be a self copy. Detect this and optimize it out.
+ a0 := a.Args[0]
+ if a0.Op == OpLocalAddr {
+ n := a0.Aux.(*ir.Name)
+ if n.Class == ir.PPARAM && n.FrameOffset()+x.f.Config.ctxt.FixedFrameSize() == aOffset {
+ continue
+ }
+ }
+ }
+ // "Dereference" of addressed (probably not-SSA-eligible) value becomes Move
+ // TODO(register args) this will be more complicated with registers in the picture.
+ mem = x.rewriteDereference(v.Block, sp, a, mem, aOffset, aux.SizeOfArg(auxI), aType, a.Pos)
+ } else {
+ var rc registerCursor
+ var result *[]*Value
+ var aOffset int64
+ if len(aRegs) > 0 {
+ result = &newArgs
+ } else {
+ aOffset = aux.OffsetOfArg(auxI)
+ }
+ if v.Op == OpTailLECall && a.Op == OpArg && a.AuxInt == 0 {
+ // It's common for a tail call passing the same arguments (e.g. method wrapper),
+ // so this would be a self copy. Detect this and optimize it out.
+ n := a.Aux.(*ir.Name)
+ if n.Class == ir.PPARAM && n.FrameOffset()+x.f.Config.ctxt.FixedFrameSize() == aOffset {
+ continue
+ }
+ }
+ if x.debug > 1 {
+ x.Printf("...storeArg %s, %v, %d\n", a.LongString(), aType, aOffset)
+ }
+ rc.init(aRegs, aux.abiInfo, result, sp)
+ mem = x.storeArgOrLoad(a.Pos, v.Block, a, mem, aType, aOffset, 0, rc)
+ }
+ }
+ var preArgStore [2]*Value
+ preArgs := append(preArgStore[:0], v.Args[0:firstArg]...)
+ v.resetArgs()
+ v.AddArgs(preArgs...)
+ v.AddArgs(newArgs...)
+ v.AddArg(mem)
+ for _, a := range oldArgs {
+ if a.Uses == 0 {
+ x.invalidateRecursively(a)
+ }
+ }
+
+ return
+}
+
+func (x *expandState) invalidateRecursively(a *Value) {
+ var s string
+ if x.debug > 0 {
+ plus := " "
+ if a.Pos.IsStmt() == src.PosIsStmt {
+ plus = " +"
+ }
+ s = a.String() + plus + a.Pos.LineNumber() + " " + a.LongString()
+ if x.debug > 1 {
+ x.Printf("...marking %v unused\n", s)
+ }
+ }
+ lost := a.invalidateRecursively()
+ if x.debug&1 != 0 && lost { // For odd values of x.debug, do this.
+ x.Printf("Lost statement marker in %s on former %s\n", base.Ctxt.Pkgpath+"."+x.f.Name, s)
+ }
+}
+
+// expandCalls converts LE (Late Expansion) calls that act like they receive value args into a lower-level form
+// that is more oriented to a platform's ABI. The SelectN operations that extract results are rewritten into
+// more appropriate forms, and any StructMake or ArrayMake inputs are decomposed until non-struct values are
+// reached. On the callee side, OpArg nodes are not decomposed until this phase is run.
+// TODO results should not be lowered until this phase.
+func expandCalls(f *Func) {
+ // Calls that need lowering have some number of inputs, including a memory input,
+ // and produce a tuple of (value1, value2, ..., mem) where valueK may or may not be SSA-able.
+
+ // With the current ABI those inputs need to be converted into stores to memory,
+ // rethreading the call's memory input to the first, and the new call now receiving the last.
+
+ // With the current ABI, the outputs need to be converted to loads, which will all use the call's
+ // memory output as their input.
+ sp, _ := f.spSb()
+ x := &expandState{
+ f: f,
+ abi1: f.ABI1,
+ debug: f.pass.debug,
+ canSSAType: f.fe.CanSSA,
+ regSize: f.Config.RegSize,
+ sp: sp,
+ typs: &f.Config.Types,
+ ptrSize: f.Config.PtrSize,
+ namedSelects: make(map[*Value][]namedVal),
+ sdom: f.Sdom(),
+ commonArgs: make(map[selKey]*Value),
+ memForCall: make(map[ID]*Value),
+ transformedSelects: make(map[ID]bool),
+ }
+
+ // For 32-bit, need to deal with decomposition of 64-bit integers, which depends on endianness.
+ if f.Config.BigEndian {
+ x.lowOffset, x.hiOffset = 4, 0
+ x.loRo, x.hiRo = 1, 0
+ } else {
+ x.lowOffset, x.hiOffset = 0, 4
+ x.loRo, x.hiRo = 0, 1
+ }
+
+ if x.debug > 1 {
+ x.Printf("\nexpandsCalls(%s)\n", f.Name)
+ }
+
+ for i, name := range f.Names {
+ t := name.Type
+ if x.isAlreadyExpandedAggregateType(t) {
+ for j, v := range f.NamedValues[*name] {
+ if v.Op == OpSelectN || v.Op == OpArg && x.isAlreadyExpandedAggregateType(v.Type) {
+ ns := x.namedSelects[v]
+ x.namedSelects[v] = append(ns, namedVal{locIndex: i, valIndex: j})
+ }
+ }
+ }
+ }
+
+ // TODO if too slow, whole program iteration can be replaced w/ slices of appropriate values, accumulated in first loop here.
+
+ // Step 0: rewrite the calls to convert args to calls into stores/register movement.
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ firstArg := 0
+ switch v.Op {
+ case OpStaticLECall, OpTailLECall:
+ case OpInterLECall:
+ firstArg = 1
+ case OpClosureLECall:
+ firstArg = 2
+ default:
+ continue
+ }
+ x.rewriteArgs(v, firstArg)
+ }
+ if isBlockMultiValueExit(b) {
+ x.indent(3)
+ // Very similar to code in rewriteArgs, but results instead of args.
+ v := b.Controls[0]
+ m0 := v.MemoryArg()
+ mem := m0
+ aux := f.OwnAux
+ allResults := []*Value{}
+ if x.debug > 1 {
+ x.Printf("multiValueExit rewriting %s\n", v.LongString())
+ }
+ var oldArgs []*Value
+ for j, a := range v.Args[:len(v.Args)-1] {
+ oldArgs = append(oldArgs, a)
+ i := int64(j)
+ auxType := aux.TypeOfResult(i)
+ auxBase := b.NewValue2A(v.Pos, OpLocalAddr, types.NewPtr(auxType), aux.NameOfResult(i), x.sp, mem)
+ auxOffset := int64(0)
+ auxSize := aux.SizeOfResult(i)
+ aRegs := aux.RegsOfResult(int64(j))
+ if len(aRegs) == 0 && a.Op == OpDereference {
+ // Avoid a self-move, and if one is detected try to remove the already-inserted VarDef for the assignment that won't happen.
+ if dAddr, dMem := a.Args[0], a.Args[1]; dAddr.Op == OpLocalAddr && dAddr.Args[0].Op == OpSP &&
+ dAddr.Args[1] == dMem && dAddr.Aux == aux.NameOfResult(i) {
+ if dMem.Op == OpVarDef && dMem.Aux == dAddr.Aux {
+ dMem.copyOf(dMem.MemoryArg()) // elide the VarDef
+ }
+ continue
+ }
+ mem = x.rewriteDereference(v.Block, auxBase, a, mem, auxOffset, auxSize, auxType, a.Pos)
+ } else {
+ if a.Op == OpLoad && a.Args[0].Op == OpLocalAddr {
+ addr := a.Args[0] // This is a self-move. // TODO(register args) do what here for registers?
+ if addr.MemoryArg() == a.MemoryArg() && addr.Aux == aux.NameOfResult(i) {
+ continue
+ }
+ }
+ var rc registerCursor
+ var result *[]*Value
+ if len(aRegs) > 0 {
+ result = &allResults
+ }
+ rc.init(aRegs, aux.abiInfo, result, auxBase)
+ mem = x.storeArgOrLoad(v.Pos, b, a, mem, aux.TypeOfResult(i), auxOffset, 0, rc)
+ }
+ }
+ v.resetArgs()
+ v.AddArgs(allResults...)
+ v.AddArg(mem)
+ v.Type = types.NewResults(append(abi.RegisterTypes(aux.abiInfo.OutParams()), types.TypeMem))
+ b.SetControl(v)
+ for _, a := range oldArgs {
+ if a.Uses == 0 {
+ if x.debug > 1 {
+ x.Printf("...marking %v unused\n", a.LongString())
+ }
+ x.invalidateRecursively(a)
+ }
+ }
+ if x.debug > 1 {
+ x.Printf("...multiValueExit new result %s\n", v.LongString())
+ }
+ x.indent(-3)
+ }
+ }
+
+ // Step 1: any stores of aggregates remaining are believed to be sourced from call results or args.
+ // Decompose those stores into a series of smaller stores, adding selection ops as necessary.
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ if v.Op == OpStore {
+ t := v.Aux.(*types.Type)
+ source := v.Args[1]
+ tSrc := source.Type
+ iAEATt := x.isAlreadyExpandedAggregateType(t)
+
+ if !iAEATt {
+ // guarding against store immediate struct into interface data field -- store type is *uint8
+ // TODO can this happen recursively?
+ iAEATt = x.isAlreadyExpandedAggregateType(tSrc)
+ if iAEATt {
+ t = tSrc
+ }
+ }
+ dst, mem := v.Args[0], v.Args[2]
+ mem = x.storeArgOrLoad(v.Pos, b, source, mem, t, 0, 0, registerCursor{storeDest: dst})
+ v.copyOf(mem)
+ }
+ }
+ }
+
+ val2Preds := make(map[*Value]int32) // Used to accumulate dependency graph of selection operations for topological ordering.
+
+ // Step 2: transform or accumulate selection operations for rewrite in topological order.
+ //
+ // Aggregate types that have already (in earlier phases) been transformed must be lowered comprehensively to finish
+ // the transformation (user-defined structs and arrays, slices, strings, interfaces, complex, 64-bit on 32-bit architectures),
+ //
+ // Any select-for-addressing applied to call results can be transformed directly.
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ // Accumulate chains of selectors for processing in topological order
+ switch v.Op {
+ case OpStructSelect, OpArraySelect,
+ OpIData, OpITab,
+ OpStringPtr, OpStringLen,
+ OpSlicePtr, OpSliceLen, OpSliceCap, OpSlicePtrUnchecked,
+ OpComplexReal, OpComplexImag,
+ OpInt64Hi, OpInt64Lo:
+ w := v.Args[0]
+ switch w.Op {
+ case OpStructSelect, OpArraySelect, OpSelectN, OpArg:
+ val2Preds[w] += 1
+ if x.debug > 1 {
+ x.Printf("v2p[%s] = %d\n", w.LongString(), val2Preds[w])
+ }
+ }
+ fallthrough
+
+ case OpSelectN:
+ if _, ok := val2Preds[v]; !ok {
+ val2Preds[v] = 0
+ if x.debug > 1 {
+ x.Printf("v2p[%s] = %d\n", v.LongString(), val2Preds[v])
+ }
+ }
+
+ case OpArg:
+ if !x.isAlreadyExpandedAggregateType(v.Type) {
+ continue
+ }
+ if _, ok := val2Preds[v]; !ok {
+ val2Preds[v] = 0
+ if x.debug > 1 {
+ x.Printf("v2p[%s] = %d\n", v.LongString(), val2Preds[v])
+ }
+ }
+
+ case OpSelectNAddr:
+ // Do these directly, there are no chains of selectors.
+ call := v.Args[0]
+ which := v.AuxInt
+ aux := call.Aux.(*AuxCall)
+ pt := v.Type
+ off := x.offsetFrom(x.f.Entry, x.sp, aux.OffsetOfResult(which), pt)
+ v.copyOf(off)
+ }
+ }
+ }
+
+ // Step 3: Compute topological order of selectors,
+ // then process it in reverse to eliminate duplicates,
+ // then forwards to rewrite selectors.
+ //
+ // All chains of selectors end up in same block as the call.
+
+ // Compilation must be deterministic, so sort after extracting first zeroes from map.
+ // Sorting allows dominators-last order within each batch,
+ // so that the backwards scan for duplicates will most often find copies from dominating blocks (it is best-effort).
+ var toProcess []*Value
+ less := func(i, j int) bool {
+ vi, vj := toProcess[i], toProcess[j]
+ bi, bj := vi.Block, vj.Block
+ if bi == bj {
+ return vi.ID < vj.ID
+ }
+ return x.sdom.domorder(bi) > x.sdom.domorder(bj) // reverse the order to put dominators last.
+ }
+
+ // Accumulate order in allOrdered
+ var allOrdered []*Value
+ for v, n := range val2Preds {
+ if n == 0 {
+ allOrdered = append(allOrdered, v)
+ }
+ }
+ last := 0 // allOrdered[0:last] has been top-sorted and processed
+ for len(val2Preds) > 0 {
+ toProcess = allOrdered[last:]
+ last = len(allOrdered)
+ sort.SliceStable(toProcess, less)
+ for _, v := range toProcess {
+ delete(val2Preds, v)
+ if v.Op == OpArg {
+ continue // no Args[0], hence done.
+ }
+ w := v.Args[0]
+ n, ok := val2Preds[w]
+ if !ok {
+ continue
+ }
+ if n == 1 {
+ allOrdered = append(allOrdered, w)
+ delete(val2Preds, w)
+ continue
+ }
+ val2Preds[w] = n - 1
+ }
+ }
+
+ x.commonSelectors = make(map[selKey]*Value)
+ // Rewrite duplicate selectors as copies where possible.
+ for i := len(allOrdered) - 1; i >= 0; i-- {
+ v := allOrdered[i]
+ if v.Op == OpArg {
+ continue
+ }
+ w := v.Args[0]
+ if w.Op == OpCopy {
+ for w.Op == OpCopy {
+ w = w.Args[0]
+ }
+ v.SetArg(0, w)
+ }
+ typ := v.Type
+ if typ.IsMemory() {
+ continue // handled elsewhere, not an indexable result
+ }
+ size := typ.Size()
+ offset := int64(0)
+ switch v.Op {
+ case OpStructSelect:
+ if w.Type.Kind() == types.TSTRUCT {
+ offset = w.Type.FieldOff(int(v.AuxInt))
+ } else { // Immediate interface data artifact, offset is zero.
+ f.Fatalf("Expand calls interface data problem, func %s, v=%s, w=%s\n", f.Name, v.LongString(), w.LongString())
+ }
+ case OpArraySelect:
+ offset = size * v.AuxInt
+ case OpSelectN:
+ offset = v.AuxInt // offset is just a key, really.
+ case OpInt64Hi:
+ offset = x.hiOffset
+ case OpInt64Lo:
+ offset = x.lowOffset
+ case OpStringLen, OpSliceLen, OpIData:
+ offset = x.ptrSize
+ case OpSliceCap:
+ offset = 2 * x.ptrSize
+ case OpComplexImag:
+ offset = size
+ }
+ sk := selKey{from: w, size: size, offsetOrIndex: offset, typ: typ}
+ dupe := x.commonSelectors[sk]
+ if dupe == nil {
+ x.commonSelectors[sk] = v
+ } else if x.sdom.IsAncestorEq(dupe.Block, v.Block) {
+ if x.debug > 1 {
+ x.Printf("Duplicate, make %s copy of %s\n", v, dupe)
+ }
+ v.copyOf(dupe)
+ } else {
+ // Because values are processed in dominator order, the old common[s] will never dominate after a miss is seen.
+ // Installing the new value might match some future values.
+ x.commonSelectors[sk] = v
+ }
+ }
+
+ // Indices of entries in f.Names that need to be deleted.
+ var toDelete []namedVal
+
+ // Rewrite selectors.
+ for i, v := range allOrdered {
+ if x.debug > 1 {
+ b := v.Block
+ x.Printf("allOrdered[%d] = b%d, %s, uses=%d\n", i, b.ID, v.LongString(), v.Uses)
+ }
+ if v.Uses == 0 {
+ x.invalidateRecursively(v)
+ continue
+ }
+ if v.Op == OpCopy {
+ continue
+ }
+ locs := x.rewriteSelect(v, v, 0, 0)
+ // Install new names.
+ if v.Type.IsMemory() {
+ continue
+ }
+ // Leaf types may have debug locations
+ if !x.isAlreadyExpandedAggregateType(v.Type) {
+ for _, l := range locs {
+ if _, ok := f.NamedValues[*l]; !ok {
+ f.Names = append(f.Names, l)
+ }
+ f.NamedValues[*l] = append(f.NamedValues[*l], v)
+ }
+ continue
+ }
+ if ns, ok := x.namedSelects[v]; ok {
+ // Not-leaf types that had debug locations need to lose them.
+
+ toDelete = append(toDelete, ns...)
+ }
+ }
+
+ deleteNamedVals(f, toDelete)
+
+ // Step 4: rewrite the calls themselves, correcting the type.
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ switch v.Op {
+ case OpArg:
+ x.rewriteArgToMemOrRegs(v)
+ case OpStaticLECall:
+ v.Op = OpStaticCall
+ rts := abi.RegisterTypes(v.Aux.(*AuxCall).abiInfo.OutParams())
+ v.Type = types.NewResults(append(rts, types.TypeMem))
+ case OpTailLECall:
+ v.Op = OpTailCall
+ rts := abi.RegisterTypes(v.Aux.(*AuxCall).abiInfo.OutParams())
+ v.Type = types.NewResults(append(rts, types.TypeMem))
+ case OpClosureLECall:
+ v.Op = OpClosureCall
+ rts := abi.RegisterTypes(v.Aux.(*AuxCall).abiInfo.OutParams())
+ v.Type = types.NewResults(append(rts, types.TypeMem))
+ case OpInterLECall:
+ v.Op = OpInterCall
+ rts := abi.RegisterTypes(v.Aux.(*AuxCall).abiInfo.OutParams())
+ v.Type = types.NewResults(append(rts, types.TypeMem))
+ }
+ }
+ }
+
+ // Step 5: dedup OpArgXXXReg values. Mostly it is already dedup'd by commonArgs,
+ // but there are cases that we have same OpArgXXXReg values with different types.
+ // E.g. string is sometimes decomposed as { *int8, int }, sometimes as { unsafe.Pointer, uintptr }.
+ // (Can we avoid that?)
+ var IArg, FArg [32]*Value
+ for _, v := range f.Entry.Values {
+ switch v.Op {
+ case OpArgIntReg:
+ i := v.AuxInt
+ if w := IArg[i]; w != nil {
+ if w.Type.Size() != v.Type.Size() {
+ f.Fatalf("incompatible OpArgIntReg [%d]: %s and %s", i, v.LongString(), w.LongString())
+ }
+ if w.Type.IsUnsafePtr() && !v.Type.IsUnsafePtr() {
+ // Update unsafe.Pointer type if we know the actual pointer type.
+ w.Type = v.Type
+ }
+ // TODO: don't dedup pointer and scalar? Rewrite to OpConvert? Can it happen?
+ v.copyOf(w)
+ } else {
+ IArg[i] = v
+ }
+ case OpArgFloatReg:
+ i := v.AuxInt
+ if w := FArg[i]; w != nil {
+ if w.Type.Size() != v.Type.Size() {
+ f.Fatalf("incompatible OpArgFloatReg [%d]: %v and %v", i, v, w)
+ }
+ v.copyOf(w)
+ } else {
+ FArg[i] = v
+ }
+ }
+ }
+
+ // Step 6: elide any copies introduced.
+ // Update named values.
+ for _, name := range f.Names {
+ values := f.NamedValues[*name]
+ for i, v := range values {
+ if v.Op == OpCopy {
+ a := v.Args[0]
+ for a.Op == OpCopy {
+ a = a.Args[0]
+ }
+ values[i] = a
+ }
+ }
+ }
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ for i, a := range v.Args {
+ if a.Op != OpCopy {
+ continue
+ }
+ aa := copySource(a)
+ v.SetArg(i, aa)
+ for a.Uses == 0 {
+ b := a.Args[0]
+ x.invalidateRecursively(a)
+ a = b
+ }
+ }
+ }
+ }
+
+ // Rewriting can attach lines to values that are unlikely to survive code generation, so move them to a use.
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ for _, a := range v.Args {
+ if a.Pos.IsStmt() != src.PosIsStmt {
+ continue
+ }
+ if a.Type.IsMemory() {
+ continue
+ }
+ if a.Pos.Line() != v.Pos.Line() {
+ continue
+ }
+ if !a.Pos.SameFile(v.Pos) {
+ continue
+ }
+ switch a.Op {
+ case OpArgIntReg, OpArgFloatReg, OpSelectN:
+ v.Pos = v.Pos.WithIsStmt()
+ a.Pos = a.Pos.WithDefaultStmt()
+ }
+ }
+ }
+ }
+}
+
+// rewriteArgToMemOrRegs converts OpArg v in-place into the register version of v,
+// if that is appropriate.
+func (x *expandState) rewriteArgToMemOrRegs(v *Value) *Value {
+ if x.debug > 1 {
+ x.indent(3)
+ defer x.indent(-3)
+ x.Printf("rewriteArgToMemOrRegs(%s)\n", v.LongString())
+ }
+ pa := x.prAssignForArg(v)
+ switch len(pa.Registers) {
+ case 0:
+ frameOff := v.Aux.(*ir.Name).FrameOffset()
+ if pa.Offset() != int32(frameOff+x.f.ABISelf.LocalsOffset()) {
+ panic(fmt.Errorf("Parameter assignment %d and OpArg.Aux frameOffset %d disagree, op=%s",
+ pa.Offset(), frameOff, v.LongString()))
+ }
+ case 1:
+ t := v.Type
+ key := selKey{v, 0, t.Size(), t}
+ w := x.commonArgs[key]
+ if w != nil && w.Uses != 0 { // do not reuse dead value
+ v.copyOf(w)
+ break
+ }
+ r := pa.Registers[0]
+ var i int64
+ v.Op, i = ArgOpAndRegisterFor(r, x.f.ABISelf)
+ v.Aux = &AuxNameOffset{v.Aux.(*ir.Name), 0}
+ v.AuxInt = i
+ x.commonArgs[key] = v
+
+ default:
+ panic(badVal("Saw unexpanded OpArg", v))
+ }
+ if x.debug > 1 {
+ x.Printf("-->%s\n", v.LongString())
+ }
+ return v
+}
+
+// newArgToMemOrRegs either rewrites toReplace into an OpArg referencing memory or into an OpArgXXXReg to a register,
+// or rewrites it into a copy of the appropriate OpArgXXX. The actual OpArgXXX is determined by combining baseArg (an OpArg)
+// with offset, regOffset, and t to determine which portion of it to reference (either all or a part, in memory or in registers).
+func (x *expandState) newArgToMemOrRegs(baseArg, toReplace *Value, offset int64, regOffset Abi1RO, t *types.Type, pos src.XPos) *Value {
+ if x.debug > 1 {
+ x.indent(3)
+ defer x.indent(-3)
+ x.Printf("newArgToMemOrRegs(base=%s; toReplace=%s; t=%s; memOff=%d; regOff=%d)\n", baseArg.String(), toReplace.LongString(), t.String(), offset, regOffset)
+ }
+ key := selKey{baseArg, offset, t.Size(), t}
+ w := x.commonArgs[key]
+ if w != nil && w.Uses != 0 { // do not reuse dead value
+ if toReplace != nil {
+ toReplace.copyOf(w)
+ if x.debug > 1 {
+ x.Printf("...replace %s\n", toReplace.LongString())
+ }
+ }
+ if x.debug > 1 {
+ x.Printf("-->%s\n", w.LongString())
+ }
+ return w
+ }
+
+ pa := x.prAssignForArg(baseArg)
+ if len(pa.Registers) == 0 { // Arg is on stack
+ frameOff := baseArg.Aux.(*ir.Name).FrameOffset()
+ if pa.Offset() != int32(frameOff+x.f.ABISelf.LocalsOffset()) {
+ panic(fmt.Errorf("Parameter assignment %d and OpArg.Aux frameOffset %d disagree, op=%s",
+ pa.Offset(), frameOff, baseArg.LongString()))
+ }
+ aux := baseArg.Aux
+ auxInt := baseArg.AuxInt + offset
+ if toReplace != nil && toReplace.Block == baseArg.Block {
+ toReplace.reset(OpArg)
+ toReplace.Aux = aux
+ toReplace.AuxInt = auxInt
+ toReplace.Type = t
+ w = toReplace
+ } else {
+ w = baseArg.Block.NewValue0IA(pos, OpArg, t, auxInt, aux)
+ }
+ x.commonArgs[key] = w
+ if toReplace != nil {
+ toReplace.copyOf(w)
+ }
+ if x.debug > 1 {
+ x.Printf("-->%s\n", w.LongString())
+ }
+ return w
+ }
+ // Arg is in registers
+ r := pa.Registers[regOffset]
+ op, auxInt := ArgOpAndRegisterFor(r, x.f.ABISelf)
+ if op == OpArgIntReg && t.IsFloat() || op == OpArgFloatReg && t.IsInteger() {
+ fmt.Printf("pa=%v\nx.f.OwnAux.abiInfo=%s\n",
+ pa.ToString(x.f.ABISelf, true),
+ x.f.OwnAux.abiInfo.String())
+ panic(fmt.Errorf("Op/Type mismatch, op=%s, type=%s", op.String(), t.String()))
+ }
+ if baseArg.AuxInt != 0 {
+ base.Fatalf("BaseArg %s bound to registers has non-zero AuxInt", baseArg.LongString())
+ }
+ aux := &AuxNameOffset{baseArg.Aux.(*ir.Name), offset}
+ if toReplace != nil && toReplace.Block == baseArg.Block {
+ toReplace.reset(op)
+ toReplace.Aux = aux
+ toReplace.AuxInt = auxInt
+ toReplace.Type = t
+ w = toReplace
+ } else {
+ w = baseArg.Block.NewValue0IA(pos, op, t, auxInt, aux)
+ }
+ x.commonArgs[key] = w
+ if toReplace != nil {
+ toReplace.copyOf(w)
+ }
+ if x.debug > 1 {
+ x.Printf("-->%s\n", w.LongString())
+ }
+ return w
+
+}
+
+// argOpAndRegisterFor converts an abi register index into an ssa Op and corresponding
+// arg register index.
+func ArgOpAndRegisterFor(r abi.RegIndex, abiConfig *abi.ABIConfig) (Op, int64) {
+ i := abiConfig.FloatIndexFor(r)
+ if i >= 0 { // float PR
+ return OpArgFloatReg, i
+ }
+ return OpArgIntReg, int64(r)
+}
diff --git a/src/cmd/compile/internal/ssa/export_test.go b/src/cmd/compile/internal/ssa/export_test.go
new file mode 100644
index 0000000..c4e87ec
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/export_test.go
@@ -0,0 +1,123 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "testing"
+
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/obj/arm64"
+ "cmd/internal/obj/s390x"
+ "cmd/internal/obj/x86"
+ "cmd/internal/src"
+)
+
+var CheckFunc = checkFunc
+var Opt = opt
+var Deadcode = deadcode
+var Copyelim = copyelim
+
+var testCtxts = map[string]*obj.Link{
+ "amd64": obj.Linknew(&x86.Linkamd64),
+ "s390x": obj.Linknew(&s390x.Links390x),
+ "arm64": obj.Linknew(&arm64.Linkarm64),
+}
+
+func testConfig(tb testing.TB) *Conf { return testConfigArch(tb, "amd64") }
+func testConfigS390X(tb testing.TB) *Conf { return testConfigArch(tb, "s390x") }
+func testConfigARM64(tb testing.TB) *Conf { return testConfigArch(tb, "arm64") }
+
+func testConfigArch(tb testing.TB, arch string) *Conf {
+ ctxt, ok := testCtxts[arch]
+ if !ok {
+ tb.Fatalf("unknown arch %s", arch)
+ }
+ if ctxt.Arch.PtrSize != 8 {
+ tb.Fatal("testTypes is 64-bit only")
+ }
+ c := &Conf{
+ config: NewConfig(arch, testTypes, ctxt, true, false),
+ tb: tb,
+ }
+ return c
+}
+
+type Conf struct {
+ config *Config
+ tb testing.TB
+ fe Frontend
+}
+
+func (c *Conf) Frontend() Frontend {
+ if c.fe == nil {
+ c.fe = TestFrontend{t: c.tb, ctxt: c.config.ctxt}
+ }
+ return c.fe
+}
+
+// TestFrontend is a test-only frontend.
+// It assumes 64 bit integers and pointers.
+type TestFrontend struct {
+ t testing.TB
+ ctxt *obj.Link
+}
+
+func (TestFrontend) StringData(s string) *obj.LSym {
+ return nil
+}
+func (TestFrontend) Auto(pos src.XPos, t *types.Type) *ir.Name {
+ n := ir.NewNameAt(pos, &types.Sym{Name: "aFakeAuto"})
+ n.Class = ir.PAUTO
+ return n
+}
+func (d TestFrontend) SplitSlot(parent *LocalSlot, suffix string, offset int64, t *types.Type) LocalSlot {
+ return LocalSlot{N: parent.N, Type: t, Off: offset}
+}
+func (TestFrontend) Line(_ src.XPos) string {
+ return "unknown.go:0"
+}
+func (TestFrontend) AllocFrame(f *Func) {
+}
+func (d TestFrontend) Syslook(s string) *obj.LSym {
+ return d.ctxt.Lookup(s)
+}
+func (TestFrontend) UseWriteBarrier() bool {
+ return true // only writebarrier_test cares
+}
+func (TestFrontend) SetWBPos(pos src.XPos) {
+}
+
+func (d TestFrontend) Logf(msg string, args ...interface{}) { d.t.Logf(msg, args...) }
+func (d TestFrontend) Log() bool { return true }
+
+func (d TestFrontend) Fatalf(_ src.XPos, msg string, args ...interface{}) { d.t.Fatalf(msg, args...) }
+func (d TestFrontend) Warnl(_ src.XPos, msg string, args ...interface{}) { d.t.Logf(msg, args...) }
+func (d TestFrontend) Debug_checknil() bool { return false }
+
+func (d TestFrontend) MyImportPath() string {
+ return "my/import/path"
+}
+
+var testTypes Types
+
+func init() {
+ // TODO(mdempsky): Push into types.InitUniverse or typecheck.InitUniverse.
+ types.PtrSize = 8
+ types.RegSize = 8
+ types.MaxWidth = 1 << 50
+
+ typecheck.InitUniverse()
+ testTypes.SetTypPtrs()
+}
+
+func (d TestFrontend) DerefItab(sym *obj.LSym, off int64) *obj.LSym { return nil }
+
+func (d TestFrontend) CanSSA(t *types.Type) bool {
+ // There are no un-SSAable types in test land.
+ return true
+}
diff --git a/src/cmd/compile/internal/ssa/flagalloc.go b/src/cmd/compile/internal/ssa/flagalloc.go
new file mode 100644
index 0000000..61c45a6
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/flagalloc.go
@@ -0,0 +1,269 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// flagalloc allocates the flag register among all the flag-generating
+// instructions. Flag values are recomputed if they need to be
+// spilled/restored.
+func flagalloc(f *Func) {
+ // Compute the in-register flag value we want at the end of
+ // each block. This is basically a best-effort live variable
+ // analysis, so it can be much simpler than a full analysis.
+ end := make([]*Value, f.NumBlocks())
+ po := f.postorder()
+ for n := 0; n < 2; n++ {
+ for _, b := range po {
+ // Walk values backwards to figure out what flag
+ // value we want in the flag register at the start
+ // of the block.
+ var flag *Value
+ for _, c := range b.ControlValues() {
+ if c.Type.IsFlags() {
+ if flag != nil {
+ panic("cannot have multiple controls using flags")
+ }
+ flag = c
+ }
+ }
+ if flag == nil {
+ flag = end[b.ID]
+ }
+ for j := len(b.Values) - 1; j >= 0; j-- {
+ v := b.Values[j]
+ if v == flag {
+ flag = nil
+ }
+ if v.clobbersFlags() {
+ flag = nil
+ }
+ for _, a := range v.Args {
+ if a.Type.IsFlags() {
+ flag = a
+ }
+ }
+ }
+ if flag != nil {
+ for _, e := range b.Preds {
+ p := e.b
+ end[p.ID] = flag
+ }
+ }
+ }
+ }
+
+ // For blocks which have a flags control value, that's the only value
+ // we can leave in the flags register at the end of the block. (There
+ // is no place to put a flag regeneration instruction.)
+ for _, b := range f.Blocks {
+ if b.Kind == BlockDefer {
+ // Defer blocks internally use/clobber the flags value.
+ end[b.ID] = nil
+ continue
+ }
+ for _, v := range b.ControlValues() {
+ if v.Type.IsFlags() && end[b.ID] != v {
+ end[b.ID] = nil
+ }
+ }
+ }
+
+ // Compute which flags values will need to be spilled.
+ spill := map[ID]bool{}
+ for _, b := range f.Blocks {
+ var flag *Value
+ if len(b.Preds) > 0 {
+ flag = end[b.Preds[0].b.ID]
+ }
+ for _, v := range b.Values {
+ for _, a := range v.Args {
+ if !a.Type.IsFlags() {
+ continue
+ }
+ if a == flag {
+ continue
+ }
+ // a will need to be restored here.
+ spill[a.ID] = true
+ flag = a
+ }
+ if v.clobbersFlags() {
+ flag = nil
+ }
+ if v.Type.IsFlags() {
+ flag = v
+ }
+ }
+ for _, v := range b.ControlValues() {
+ if v != flag && v.Type.IsFlags() {
+ spill[v.ID] = true
+ }
+ }
+ if v := end[b.ID]; v != nil && v != flag {
+ spill[v.ID] = true
+ }
+ }
+
+ // Add flag spill and recomputation where they are needed.
+ var remove []*Value // values that should be checked for possible removal
+ var oldSched []*Value
+ for _, b := range f.Blocks {
+ oldSched = append(oldSched[:0], b.Values...)
+ b.Values = b.Values[:0]
+ // The current live flag value (the pre-flagalloc copy).
+ var flag *Value
+ if len(b.Preds) > 0 {
+ flag = end[b.Preds[0].b.ID]
+ // Note: the following condition depends on the lack of critical edges.
+ for _, e := range b.Preds[1:] {
+ p := e.b
+ if end[p.ID] != flag {
+ f.Fatalf("live flag in %s's predecessors not consistent", b)
+ }
+ }
+ }
+ for _, v := range oldSched {
+ if v.Op == OpPhi && v.Type.IsFlags() {
+ f.Fatalf("phi of flags not supported: %s", v.LongString())
+ }
+
+ // If v will be spilled, and v uses memory, then we must split it
+ // into a load + a flag generator.
+ if spill[v.ID] && v.MemoryArg() != nil {
+ remove = append(remove, v)
+ if !f.Config.splitLoad(v) {
+ f.Fatalf("can't split flag generator: %s", v.LongString())
+ }
+ }
+
+ // Make sure any flag arg of v is in the flags register.
+ // If not, recompute it.
+ for i, a := range v.Args {
+ if !a.Type.IsFlags() {
+ continue
+ }
+ if a == flag {
+ continue
+ }
+ // Recalculate a
+ c := copyFlags(a, b)
+ // Update v.
+ v.SetArg(i, c)
+ // Remember the most-recently computed flag value.
+ flag = a
+ }
+ // Issue v.
+ b.Values = append(b.Values, v)
+ if v.clobbersFlags() {
+ flag = nil
+ }
+ if v.Type.IsFlags() {
+ flag = v
+ }
+ }
+ for i, v := range b.ControlValues() {
+ if v != flag && v.Type.IsFlags() {
+ // Recalculate control value.
+ remove = append(remove, v)
+ c := copyFlags(v, b)
+ b.ReplaceControl(i, c)
+ flag = v
+ }
+ }
+ if v := end[b.ID]; v != nil && v != flag {
+ // Need to reissue flag generator for use by
+ // subsequent blocks.
+ remove = append(remove, v)
+ copyFlags(v, b)
+ // Note: this flag generator is not properly linked up
+ // with the flag users. This breaks the SSA representation.
+ // We could fix up the users with another pass, but for now
+ // we'll just leave it. (Regalloc has the same issue for
+ // standard regs, and it runs next.)
+ // For this reason, take care not to add this flag
+ // generator to the remove list.
+ }
+ }
+
+ // Save live flag state for later.
+ for _, b := range f.Blocks {
+ b.FlagsLiveAtEnd = end[b.ID] != nil
+ }
+
+ // Remove any now-dead values.
+ // The number of values to remove is likely small,
+ // and removing them requires processing all values in a block,
+ // so minimize the number of blocks that we touch.
+
+ // Shrink remove to contain only dead values, and clobber those dead values.
+ for i := 0; i < len(remove); i++ {
+ v := remove[i]
+ if v.Uses == 0 {
+ v.reset(OpInvalid)
+ continue
+ }
+ // Remove v.
+ last := len(remove) - 1
+ remove[i] = remove[last]
+ remove[last] = nil
+ remove = remove[:last]
+ i-- // reprocess value at i
+ }
+
+ if len(remove) == 0 {
+ return
+ }
+
+ removeBlocks := f.newSparseSet(f.NumBlocks())
+ defer f.retSparseSet(removeBlocks)
+ for _, v := range remove {
+ removeBlocks.add(v.Block.ID)
+ }
+
+ // Process affected blocks, preserving value order.
+ for _, b := range f.Blocks {
+ if !removeBlocks.contains(b.ID) {
+ continue
+ }
+ i := 0
+ for j := 0; j < len(b.Values); j++ {
+ v := b.Values[j]
+ if v.Op == OpInvalid {
+ continue
+ }
+ b.Values[i] = v
+ i++
+ }
+ b.truncateValues(i)
+ }
+}
+
+func (v *Value) clobbersFlags() bool {
+ if opcodeTable[v.Op].clobberFlags {
+ return true
+ }
+ if v.Type.IsTuple() && (v.Type.FieldType(0).IsFlags() || v.Type.FieldType(1).IsFlags()) {
+ // This case handles the possibility where a flag value is generated but never used.
+ // In that case, there's no corresponding Select to overwrite the flags value,
+ // so we must consider flags clobbered by the tuple-generating instruction.
+ return true
+ }
+ return false
+}
+
+// copyFlags copies v (flag generator) into b, returns the copy.
+// If v's arg is also flags, copy recursively.
+func copyFlags(v *Value, b *Block) *Value {
+ flagsArgs := make(map[int]*Value)
+ for i, a := range v.Args {
+ if a.Type.IsFlags() || a.Type.IsTuple() {
+ flagsArgs[i] = copyFlags(a, b)
+ }
+ }
+ c := v.copyInto(b)
+ for i, a := range flagsArgs {
+ c.SetArg(i, a)
+ }
+ return c
+}
diff --git a/src/cmd/compile/internal/ssa/flags_amd64_test.s b/src/cmd/compile/internal/ssa/flags_amd64_test.s
new file mode 100644
index 0000000..7402f6b
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/flags_amd64_test.s
@@ -0,0 +1,29 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+
+TEXT ·asmAddFlags(SB),NOSPLIT,$0-24
+ MOVQ x+0(FP), AX
+ ADDQ y+8(FP), AX
+ PUSHFQ
+ POPQ AX
+ MOVQ AX, ret+16(FP)
+ RET
+
+TEXT ·asmSubFlags(SB),NOSPLIT,$0-24
+ MOVQ x+0(FP), AX
+ SUBQ y+8(FP), AX
+ PUSHFQ
+ POPQ AX
+ MOVQ AX, ret+16(FP)
+ RET
+
+TEXT ·asmAndFlags(SB),NOSPLIT,$0-24
+ MOVQ x+0(FP), AX
+ ANDQ y+8(FP), AX
+ PUSHFQ
+ POPQ AX
+ MOVQ AX, ret+16(FP)
+ RET
diff --git a/src/cmd/compile/internal/ssa/flags_arm64_test.s b/src/cmd/compile/internal/ssa/flags_arm64_test.s
new file mode 100644
index 0000000..639d7e3
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/flags_arm64_test.s
@@ -0,0 +1,30 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+#include "textflag.h"
+
+TEXT ·asmAddFlags(SB),NOSPLIT,$0-24
+ MOVD x+0(FP), R0
+ MOVD y+8(FP), R1
+ CMN R0, R1
+ WORD $0xd53b4200 // MOVD NZCV, R0
+ MOVD R0, ret+16(FP)
+ RET
+
+TEXT ·asmSubFlags(SB),NOSPLIT,$0-24
+ MOVD x+0(FP), R0
+ MOVD y+8(FP), R1
+ CMP R1, R0
+ WORD $0xd53b4200 // MOVD NZCV, R0
+ MOVD R0, ret+16(FP)
+ RET
+
+TEXT ·asmAndFlags(SB),NOSPLIT,$0-24
+ MOVD x+0(FP), R0
+ MOVD y+8(FP), R1
+ TST R1, R0
+ WORD $0xd53b4200 // MOVD NZCV, R0
+ BIC $0x30000000, R0 // clear C, V bits, as TST does not change those flags
+ MOVD R0, ret+16(FP)
+ RET
diff --git a/src/cmd/compile/internal/ssa/flags_test.go b/src/cmd/compile/internal/ssa/flags_test.go
new file mode 100644
index 0000000..0bc1097
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/flags_test.go
@@ -0,0 +1,109 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build amd64 || arm64
+// +build amd64 arm64
+
+package ssa
+
+// This file tests the functions addFlags64 and subFlags64 by comparing their
+// results to what the chip calculates.
+
+import (
+ "runtime"
+ "testing"
+)
+
+func TestAddFlagsNative(t *testing.T) {
+ var numbers = []int64{
+ 1, 0, -1,
+ 2, -2,
+ 1<<63 - 1, -1 << 63,
+ }
+ coverage := map[flagConstant]bool{}
+ for _, x := range numbers {
+ for _, y := range numbers {
+ a := addFlags64(x, y)
+ b := flagRegister2flagConstant(asmAddFlags(x, y), false)
+ if a != b {
+ t.Errorf("asmAdd diff: x=%x y=%x got=%s want=%s\n", x, y, a, b)
+ }
+ coverage[a] = true
+ }
+ }
+ if len(coverage) != 9 { // TODO: can we cover all outputs?
+ t.Errorf("coverage too small, got %d want 9", len(coverage))
+ }
+}
+
+func TestSubFlagsNative(t *testing.T) {
+ var numbers = []int64{
+ 1, 0, -1,
+ 2, -2,
+ 1<<63 - 1, -1 << 63,
+ }
+ coverage := map[flagConstant]bool{}
+ for _, x := range numbers {
+ for _, y := range numbers {
+ a := subFlags64(x, y)
+ b := flagRegister2flagConstant(asmSubFlags(x, y), true)
+ if a != b {
+ t.Errorf("asmSub diff: x=%x y=%x got=%s want=%s\n", x, y, a, b)
+ }
+ coverage[a] = true
+ }
+ }
+ if len(coverage) != 7 { // TODO: can we cover all outputs?
+ t.Errorf("coverage too small, got %d want 7", len(coverage))
+ }
+}
+
+func TestAndFlagsNative(t *testing.T) {
+ var numbers = []int64{
+ 1, 0, -1,
+ 2, -2,
+ 1<<63 - 1, -1 << 63,
+ }
+ coverage := map[flagConstant]bool{}
+ for _, x := range numbers {
+ for _, y := range numbers {
+ a := logicFlags64(x & y)
+ b := flagRegister2flagConstant(asmAndFlags(x, y), false)
+ if a != b {
+ t.Errorf("asmAnd diff: x=%x y=%x got=%s want=%s\n", x, y, a, b)
+ }
+ coverage[a] = true
+ }
+ }
+ if len(coverage) != 3 {
+ t.Errorf("coverage too small, got %d want 3", len(coverage))
+ }
+}
+
+func asmAddFlags(x, y int64) int
+func asmSubFlags(x, y int64) int
+func asmAndFlags(x, y int64) int
+
+func flagRegister2flagConstant(x int, sub bool) flagConstant {
+ var fcb flagConstantBuilder
+ switch runtime.GOARCH {
+ case "amd64":
+ fcb.Z = x>>6&1 != 0
+ fcb.N = x>>7&1 != 0
+ fcb.C = x>>0&1 != 0
+ if sub {
+ // Convert from amd64-sense to arm-sense
+ fcb.C = !fcb.C
+ }
+ fcb.V = x>>11&1 != 0
+ case "arm64":
+ fcb.Z = x>>30&1 != 0
+ fcb.N = x>>31&1 != 0
+ fcb.C = x>>29&1 != 0
+ fcb.V = x>>28&1 != 0
+ default:
+ panic("unsupported architecture: " + runtime.GOARCH)
+ }
+ return fcb.encode()
+}
diff --git a/src/cmd/compile/internal/ssa/func.go b/src/cmd/compile/internal/ssa/func.go
new file mode 100644
index 0000000..7728a39
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/func.go
@@ -0,0 +1,921 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/compile/internal/abi"
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+ "crypto/sha1"
+ "fmt"
+ "io"
+ "math"
+ "os"
+ "strings"
+)
+
+type writeSyncer interface {
+ io.Writer
+ Sync() error
+}
+
+// A Func represents a Go func declaration (or function literal) and its body.
+// This package compiles each Func independently.
+// Funcs are single-use; a new Func must be created for every compiled function.
+type Func struct {
+ Config *Config // architecture information
+ Cache *Cache // re-usable cache
+ fe Frontend // frontend state associated with this Func, callbacks into compiler frontend
+ pass *pass // current pass information (name, options, etc.)
+ Name string // e.g. NewFunc or (*Func).NumBlocks (no package prefix)
+ Type *types.Type // type signature of the function.
+ Blocks []*Block // unordered set of all basic blocks (note: not indexable by ID)
+ Entry *Block // the entry basic block
+
+ bid idAlloc // block ID allocator
+ vid idAlloc // value ID allocator
+
+ // Given an environment variable used for debug hash match,
+ // what file (if any) receives the yes/no logging?
+ logfiles map[string]writeSyncer
+ HTMLWriter *HTMLWriter // html writer, for debugging
+ DebugTest bool // default true unless $GOSSAHASH != ""; as a debugging aid, make new code conditional on this and use GOSSAHASH to binary search for failing cases
+ PrintOrHtmlSSA bool // true if GOSSAFUNC matches, true even if fe.Log() (spew phase results to stdout) is false. There's an odd dependence on this in debug.go for method logf.
+ ruleMatches map[string]int // number of times countRule was called during compilation for any given string
+ ABI0 *abi.ABIConfig // A copy, for no-sync access
+ ABI1 *abi.ABIConfig // A copy, for no-sync access
+ ABISelf *abi.ABIConfig // ABI for function being compiled
+ ABIDefault *abi.ABIConfig // ABI for rtcall and other no-parsed-signature/pragma functions.
+
+ scheduled bool // Values in Blocks are in final order
+ laidout bool // Blocks are ordered
+ NoSplit bool // true if function is marked as nosplit. Used by schedule check pass.
+ dumpFileSeq uint8 // the sequence numbers of dump file. (%s_%02d__%s.dump", funcname, dumpFileSeq, phaseName)
+
+ // when register allocation is done, maps value ids to locations
+ RegAlloc []Location
+
+ // map from LocalSlot to set of Values that we want to store in that slot.
+ NamedValues map[LocalSlot][]*Value
+ // Names is a copy of NamedValues.Keys. We keep a separate list
+ // of keys to make iteration order deterministic.
+ Names []*LocalSlot
+ // Canonicalize root/top-level local slots, and canonicalize their pieces.
+ // Because LocalSlot pieces refer to their parents with a pointer, this ensures that equivalent slots really are equal.
+ CanonicalLocalSlots map[LocalSlot]*LocalSlot
+ CanonicalLocalSplits map[LocalSlotSplitKey]*LocalSlot
+
+ // RegArgs is a slice of register-memory pairs that must be spilled and unspilled in the uncommon path of function entry.
+ RegArgs []Spill
+ // AuxCall describing parameters and results for this function.
+ OwnAux *AuxCall
+
+ // WBLoads is a list of Blocks that branch on the write
+ // barrier flag. Safe-points are disabled from the OpLoad that
+ // reads the write-barrier flag until the control flow rejoins
+ // below the two successors of this block.
+ WBLoads []*Block
+
+ freeValues *Value // free Values linked by argstorage[0]. All other fields except ID are 0/nil.
+ freeBlocks *Block // free Blocks linked by succstorage[0].b. All other fields except ID are 0/nil.
+
+ cachedPostorder []*Block // cached postorder traversal
+ cachedIdom []*Block // cached immediate dominators
+ cachedSdom SparseTree // cached dominator tree
+ cachedLoopnest *loopnest // cached loop nest information
+ cachedLineStarts *xposmap // cached map/set of xpos to integers
+
+ auxmap auxmap // map from aux values to opaque ids used by CSE
+ constants map[int64][]*Value // constants cache, keyed by constant value; users must check value's Op and Type
+}
+
+type LocalSlotSplitKey struct {
+ parent *LocalSlot
+ Off int64 // offset of slot in N
+ Type *types.Type // type of slot
+}
+
+// NewFunc returns a new, empty function object.
+// Caller must set f.Config and f.Cache before using f.
+func NewFunc(fe Frontend) *Func {
+ return &Func{fe: fe, NamedValues: make(map[LocalSlot][]*Value), CanonicalLocalSlots: make(map[LocalSlot]*LocalSlot), CanonicalLocalSplits: make(map[LocalSlotSplitKey]*LocalSlot)}
+}
+
+// NumBlocks returns an integer larger than the id of any Block in the Func.
+func (f *Func) NumBlocks() int {
+ return f.bid.num()
+}
+
+// NumValues returns an integer larger than the id of any Value in the Func.
+func (f *Func) NumValues() int {
+ return f.vid.num()
+}
+
+// newSparseSet returns a sparse set that can store at least up to n integers.
+func (f *Func) newSparseSet(n int) *sparseSet {
+ for i, scr := range f.Cache.scrSparseSet {
+ if scr != nil && scr.cap() >= n {
+ f.Cache.scrSparseSet[i] = nil
+ scr.clear()
+ return scr
+ }
+ }
+ return newSparseSet(n)
+}
+
+// retSparseSet returns a sparse set to the config's cache of sparse
+// sets to be reused by f.newSparseSet.
+func (f *Func) retSparseSet(ss *sparseSet) {
+ for i, scr := range f.Cache.scrSparseSet {
+ if scr == nil {
+ f.Cache.scrSparseSet[i] = ss
+ return
+ }
+ }
+ f.Cache.scrSparseSet = append(f.Cache.scrSparseSet, ss)
+}
+
+// newSparseMap returns a sparse map that can store at least up to n integers.
+func (f *Func) newSparseMap(n int) *sparseMap {
+ for i, scr := range f.Cache.scrSparseMap {
+ if scr != nil && scr.cap() >= n {
+ f.Cache.scrSparseMap[i] = nil
+ scr.clear()
+ return scr
+ }
+ }
+ return newSparseMap(n)
+}
+
+// retSparseMap returns a sparse map to the config's cache of sparse
+// sets to be reused by f.newSparseMap.
+func (f *Func) retSparseMap(ss *sparseMap) {
+ for i, scr := range f.Cache.scrSparseMap {
+ if scr == nil {
+ f.Cache.scrSparseMap[i] = ss
+ return
+ }
+ }
+ f.Cache.scrSparseMap = append(f.Cache.scrSparseMap, ss)
+}
+
+// newPoset returns a new poset from the internal cache
+func (f *Func) newPoset() *poset {
+ if len(f.Cache.scrPoset) > 0 {
+ po := f.Cache.scrPoset[len(f.Cache.scrPoset)-1]
+ f.Cache.scrPoset = f.Cache.scrPoset[:len(f.Cache.scrPoset)-1]
+ return po
+ }
+ return newPoset()
+}
+
+// retPoset returns a poset to the internal cache
+func (f *Func) retPoset(po *poset) {
+ f.Cache.scrPoset = append(f.Cache.scrPoset, po)
+}
+
+// newDeadcodeLive returns a slice for the
+// deadcode pass to use to indicate which values are live.
+func (f *Func) newDeadcodeLive() []bool {
+ r := f.Cache.deadcode.live
+ f.Cache.deadcode.live = nil
+ return r
+}
+
+// retDeadcodeLive returns a deadcode live value slice for re-use.
+func (f *Func) retDeadcodeLive(live []bool) {
+ f.Cache.deadcode.live = live
+}
+
+// newDeadcodeLiveOrderStmts returns a slice for the
+// deadcode pass to use to indicate which values
+// need special treatment for statement boundaries.
+func (f *Func) newDeadcodeLiveOrderStmts() []*Value {
+ r := f.Cache.deadcode.liveOrderStmts
+ f.Cache.deadcode.liveOrderStmts = nil
+ return r
+}
+
+// retDeadcodeLiveOrderStmts returns a deadcode liveOrderStmts slice for re-use.
+func (f *Func) retDeadcodeLiveOrderStmts(liveOrderStmts []*Value) {
+ f.Cache.deadcode.liveOrderStmts = liveOrderStmts
+}
+
+func (f *Func) localSlotAddr(slot LocalSlot) *LocalSlot {
+ a, ok := f.CanonicalLocalSlots[slot]
+ if !ok {
+ a = new(LocalSlot)
+ *a = slot // don't escape slot
+ f.CanonicalLocalSlots[slot] = a
+ }
+ return a
+}
+
+func (f *Func) SplitString(name *LocalSlot) (*LocalSlot, *LocalSlot) {
+ ptrType := types.NewPtr(types.Types[types.TUINT8])
+ lenType := types.Types[types.TINT]
+ // Split this string up into two separate variables.
+ p := f.SplitSlot(name, ".ptr", 0, ptrType)
+ l := f.SplitSlot(name, ".len", ptrType.Size(), lenType)
+ return p, l
+}
+
+func (f *Func) SplitInterface(name *LocalSlot) (*LocalSlot, *LocalSlot) {
+ n := name.N
+ u := types.Types[types.TUINTPTR]
+ t := types.NewPtr(types.Types[types.TUINT8])
+ // Split this interface up into two separate variables.
+ sfx := ".itab"
+ if n.Type().IsEmptyInterface() {
+ sfx = ".type"
+ }
+ c := f.SplitSlot(name, sfx, 0, u) // see comment in typebits.Set
+ d := f.SplitSlot(name, ".data", u.Size(), t)
+ return c, d
+}
+
+func (f *Func) SplitSlice(name *LocalSlot) (*LocalSlot, *LocalSlot, *LocalSlot) {
+ ptrType := types.NewPtr(name.Type.Elem())
+ lenType := types.Types[types.TINT]
+ p := f.SplitSlot(name, ".ptr", 0, ptrType)
+ l := f.SplitSlot(name, ".len", ptrType.Size(), lenType)
+ c := f.SplitSlot(name, ".cap", ptrType.Size()+lenType.Size(), lenType)
+ return p, l, c
+}
+
+func (f *Func) SplitComplex(name *LocalSlot) (*LocalSlot, *LocalSlot) {
+ s := name.Type.Size() / 2
+ var t *types.Type
+ if s == 8 {
+ t = types.Types[types.TFLOAT64]
+ } else {
+ t = types.Types[types.TFLOAT32]
+ }
+ r := f.SplitSlot(name, ".real", 0, t)
+ i := f.SplitSlot(name, ".imag", t.Size(), t)
+ return r, i
+}
+
+func (f *Func) SplitInt64(name *LocalSlot) (*LocalSlot, *LocalSlot) {
+ var t *types.Type
+ if name.Type.IsSigned() {
+ t = types.Types[types.TINT32]
+ } else {
+ t = types.Types[types.TUINT32]
+ }
+ if f.Config.BigEndian {
+ return f.SplitSlot(name, ".hi", 0, t), f.SplitSlot(name, ".lo", t.Size(), types.Types[types.TUINT32])
+ }
+ return f.SplitSlot(name, ".hi", t.Size(), t), f.SplitSlot(name, ".lo", 0, types.Types[types.TUINT32])
+}
+
+func (f *Func) SplitStruct(name *LocalSlot, i int) *LocalSlot {
+ st := name.Type
+ return f.SplitSlot(name, st.FieldName(i), st.FieldOff(i), st.FieldType(i))
+}
+func (f *Func) SplitArray(name *LocalSlot) *LocalSlot {
+ n := name.N
+ at := name.Type
+ if at.NumElem() != 1 {
+ base.FatalfAt(n.Pos(), "bad array size")
+ }
+ et := at.Elem()
+ return f.SplitSlot(name, "[0]", 0, et)
+}
+
+func (f *Func) SplitSlot(name *LocalSlot, sfx string, offset int64, t *types.Type) *LocalSlot {
+ lssk := LocalSlotSplitKey{name, offset, t}
+ if als, ok := f.CanonicalLocalSplits[lssk]; ok {
+ return als
+ }
+ // Note: the _ field may appear several times. But
+ // have no fear, identically-named but distinct Autos are
+ // ok, albeit maybe confusing for a debugger.
+ ls := f.fe.SplitSlot(name, sfx, offset, t)
+ f.CanonicalLocalSplits[lssk] = &ls
+ return &ls
+}
+
+// newValue allocates a new Value with the given fields and places it at the end of b.Values.
+func (f *Func) newValue(op Op, t *types.Type, b *Block, pos src.XPos) *Value {
+ var v *Value
+ if f.freeValues != nil {
+ v = f.freeValues
+ f.freeValues = v.argstorage[0]
+ v.argstorage[0] = nil
+ } else {
+ ID := f.vid.get()
+ if int(ID) < len(f.Cache.values) {
+ v = &f.Cache.values[ID]
+ v.ID = ID
+ } else {
+ v = &Value{ID: ID}
+ }
+ }
+ v.Op = op
+ v.Type = t
+ v.Block = b
+ if notStmtBoundary(op) {
+ pos = pos.WithNotStmt()
+ }
+ v.Pos = pos
+ b.Values = append(b.Values, v)
+ return v
+}
+
+// newValueNoBlock allocates a new Value with the given fields.
+// The returned value is not placed in any block. Once the caller
+// decides on a block b, it must set b.Block and append
+// the returned value to b.Values.
+func (f *Func) newValueNoBlock(op Op, t *types.Type, pos src.XPos) *Value {
+ var v *Value
+ if f.freeValues != nil {
+ v = f.freeValues
+ f.freeValues = v.argstorage[0]
+ v.argstorage[0] = nil
+ } else {
+ ID := f.vid.get()
+ if int(ID) < len(f.Cache.values) {
+ v = &f.Cache.values[ID]
+ v.ID = ID
+ } else {
+ v = &Value{ID: ID}
+ }
+ }
+ v.Op = op
+ v.Type = t
+ v.Block = nil // caller must fix this.
+ if notStmtBoundary(op) {
+ pos = pos.WithNotStmt()
+ }
+ v.Pos = pos
+ return v
+}
+
+// logPassStat writes a string key and int value as a warning in a
+// tab-separated format easily handled by spreadsheets or awk.
+// file names, lines, and function names are included to provide enough (?)
+// context to allow item-by-item comparisons across runs.
+// For example:
+// awk 'BEGIN {FS="\t"} $3~/TIME/{sum+=$4} END{print "t(ns)=",sum}' t.log
+func (f *Func) LogStat(key string, args ...interface{}) {
+ value := ""
+ for _, a := range args {
+ value += fmt.Sprintf("\t%v", a)
+ }
+ n := "missing_pass"
+ if f.pass != nil {
+ n = strings.Replace(f.pass.name, " ", "_", -1)
+ }
+ f.Warnl(f.Entry.Pos, "\t%s\t%s%s\t%s", n, key, value, f.Name)
+}
+
+// unCacheLine removes v from f's constant cache "line" for aux,
+// resets v.InCache when it is found (and removed),
+// and returns whether v was found in that line.
+func (f *Func) unCacheLine(v *Value, aux int64) bool {
+ vv := f.constants[aux]
+ for i, cv := range vv {
+ if v == cv {
+ vv[i] = vv[len(vv)-1]
+ vv[len(vv)-1] = nil
+ f.constants[aux] = vv[0 : len(vv)-1]
+ v.InCache = false
+ return true
+ }
+ }
+ return false
+}
+
+// unCache removes v from f's constant cache.
+func (f *Func) unCache(v *Value) {
+ if v.InCache {
+ aux := v.AuxInt
+ if f.unCacheLine(v, aux) {
+ return
+ }
+ if aux == 0 {
+ switch v.Op {
+ case OpConstNil:
+ aux = constNilMagic
+ case OpConstSlice:
+ aux = constSliceMagic
+ case OpConstString:
+ aux = constEmptyStringMagic
+ case OpConstInterface:
+ aux = constInterfaceMagic
+ }
+ if aux != 0 && f.unCacheLine(v, aux) {
+ return
+ }
+ }
+ f.Fatalf("unCached value %s not found in cache, auxInt=0x%x, adjusted aux=0x%x", v.LongString(), v.AuxInt, aux)
+ }
+}
+
+// freeValue frees a value. It must no longer be referenced or have any args.
+func (f *Func) freeValue(v *Value) {
+ if v.Block == nil {
+ f.Fatalf("trying to free an already freed value")
+ }
+ if v.Uses != 0 {
+ f.Fatalf("value %s still has %d uses", v, v.Uses)
+ }
+ if len(v.Args) != 0 {
+ f.Fatalf("value %s still has %d args", v, len(v.Args))
+ }
+ // Clear everything but ID (which we reuse).
+ id := v.ID
+ if v.InCache {
+ f.unCache(v)
+ }
+ *v = Value{}
+ v.ID = id
+ v.argstorage[0] = f.freeValues
+ f.freeValues = v
+}
+
+// newBlock allocates a new Block of the given kind and places it at the end of f.Blocks.
+func (f *Func) NewBlock(kind BlockKind) *Block {
+ var b *Block
+ if f.freeBlocks != nil {
+ b = f.freeBlocks
+ f.freeBlocks = b.succstorage[0].b
+ b.succstorage[0].b = nil
+ } else {
+ ID := f.bid.get()
+ if int(ID) < len(f.Cache.blocks) {
+ b = &f.Cache.blocks[ID]
+ b.ID = ID
+ } else {
+ b = &Block{ID: ID}
+ }
+ }
+ b.Kind = kind
+ b.Func = f
+ b.Preds = b.predstorage[:0]
+ b.Succs = b.succstorage[:0]
+ b.Values = b.valstorage[:0]
+ f.Blocks = append(f.Blocks, b)
+ f.invalidateCFG()
+ return b
+}
+
+func (f *Func) freeBlock(b *Block) {
+ if b.Func == nil {
+ f.Fatalf("trying to free an already freed block")
+ }
+ // Clear everything but ID (which we reuse).
+ id := b.ID
+ *b = Block{}
+ b.ID = id
+ b.succstorage[0].b = f.freeBlocks
+ f.freeBlocks = b
+}
+
+// NewValue0 returns a new value in the block with no arguments and zero aux values.
+func (b *Block) NewValue0(pos src.XPos, op Op, t *types.Type) *Value {
+ v := b.Func.newValue(op, t, b, pos)
+ v.AuxInt = 0
+ v.Args = v.argstorage[:0]
+ return v
+}
+
+// NewValue returns a new value in the block with no arguments and an auxint value.
+func (b *Block) NewValue0I(pos src.XPos, op Op, t *types.Type, auxint int64) *Value {
+ v := b.Func.newValue(op, t, b, pos)
+ v.AuxInt = auxint
+ v.Args = v.argstorage[:0]
+ return v
+}
+
+// NewValue returns a new value in the block with no arguments and an aux value.
+func (b *Block) NewValue0A(pos src.XPos, op Op, t *types.Type, aux Aux) *Value {
+ v := b.Func.newValue(op, t, b, pos)
+ v.AuxInt = 0
+ v.Aux = aux
+ v.Args = v.argstorage[:0]
+ return v
+}
+
+// NewValue returns a new value in the block with no arguments and both an auxint and aux values.
+func (b *Block) NewValue0IA(pos src.XPos, op Op, t *types.Type, auxint int64, aux Aux) *Value {
+ v := b.Func.newValue(op, t, b, pos)
+ v.AuxInt = auxint
+ v.Aux = aux
+ v.Args = v.argstorage[:0]
+ return v
+}
+
+// NewValue1 returns a new value in the block with one argument and zero aux values.
+func (b *Block) NewValue1(pos src.XPos, op Op, t *types.Type, arg *Value) *Value {
+ v := b.Func.newValue(op, t, b, pos)
+ v.AuxInt = 0
+ v.Args = v.argstorage[:1]
+ v.argstorage[0] = arg
+ arg.Uses++
+ return v
+}
+
+// NewValue1I returns a new value in the block with one argument and an auxint value.
+func (b *Block) NewValue1I(pos src.XPos, op Op, t *types.Type, auxint int64, arg *Value) *Value {
+ v := b.Func.newValue(op, t, b, pos)
+ v.AuxInt = auxint
+ v.Args = v.argstorage[:1]
+ v.argstorage[0] = arg
+ arg.Uses++
+ return v
+}
+
+// NewValue1A returns a new value in the block with one argument and an aux value.
+func (b *Block) NewValue1A(pos src.XPos, op Op, t *types.Type, aux Aux, arg *Value) *Value {
+ v := b.Func.newValue(op, t, b, pos)
+ v.AuxInt = 0
+ v.Aux = aux
+ v.Args = v.argstorage[:1]
+ v.argstorage[0] = arg
+ arg.Uses++
+ return v
+}
+
+// NewValue1IA returns a new value in the block with one argument and both an auxint and aux values.
+func (b *Block) NewValue1IA(pos src.XPos, op Op, t *types.Type, auxint int64, aux Aux, arg *Value) *Value {
+ v := b.Func.newValue(op, t, b, pos)
+ v.AuxInt = auxint
+ v.Aux = aux
+ v.Args = v.argstorage[:1]
+ v.argstorage[0] = arg
+ arg.Uses++
+ return v
+}
+
+// NewValue2 returns a new value in the block with two arguments and zero aux values.
+func (b *Block) NewValue2(pos src.XPos, op Op, t *types.Type, arg0, arg1 *Value) *Value {
+ v := b.Func.newValue(op, t, b, pos)
+ v.AuxInt = 0
+ v.Args = v.argstorage[:2]
+ v.argstorage[0] = arg0
+ v.argstorage[1] = arg1
+ arg0.Uses++
+ arg1.Uses++
+ return v
+}
+
+// NewValue2A returns a new value in the block with two arguments and one aux values.
+func (b *Block) NewValue2A(pos src.XPos, op Op, t *types.Type, aux Aux, arg0, arg1 *Value) *Value {
+ v := b.Func.newValue(op, t, b, pos)
+ v.AuxInt = 0
+ v.Aux = aux
+ v.Args = v.argstorage[:2]
+ v.argstorage[0] = arg0
+ v.argstorage[1] = arg1
+ arg0.Uses++
+ arg1.Uses++
+ return v
+}
+
+// NewValue2I returns a new value in the block with two arguments and an auxint value.
+func (b *Block) NewValue2I(pos src.XPos, op Op, t *types.Type, auxint int64, arg0, arg1 *Value) *Value {
+ v := b.Func.newValue(op, t, b, pos)
+ v.AuxInt = auxint
+ v.Args = v.argstorage[:2]
+ v.argstorage[0] = arg0
+ v.argstorage[1] = arg1
+ arg0.Uses++
+ arg1.Uses++
+ return v
+}
+
+// NewValue2IA returns a new value in the block with two arguments and both an auxint and aux values.
+func (b *Block) NewValue2IA(pos src.XPos, op Op, t *types.Type, auxint int64, aux Aux, arg0, arg1 *Value) *Value {
+ v := b.Func.newValue(op, t, b, pos)
+ v.AuxInt = auxint
+ v.Aux = aux
+ v.Args = v.argstorage[:2]
+ v.argstorage[0] = arg0
+ v.argstorage[1] = arg1
+ arg0.Uses++
+ arg1.Uses++
+ return v
+}
+
+// NewValue3 returns a new value in the block with three arguments and zero aux values.
+func (b *Block) NewValue3(pos src.XPos, op Op, t *types.Type, arg0, arg1, arg2 *Value) *Value {
+ v := b.Func.newValue(op, t, b, pos)
+ v.AuxInt = 0
+ v.Args = v.argstorage[:3]
+ v.argstorage[0] = arg0
+ v.argstorage[1] = arg1
+ v.argstorage[2] = arg2
+ arg0.Uses++
+ arg1.Uses++
+ arg2.Uses++
+ return v
+}
+
+// NewValue3I returns a new value in the block with three arguments and an auxint value.
+func (b *Block) NewValue3I(pos src.XPos, op Op, t *types.Type, auxint int64, arg0, arg1, arg2 *Value) *Value {
+ v := b.Func.newValue(op, t, b, pos)
+ v.AuxInt = auxint
+ v.Args = v.argstorage[:3]
+ v.argstorage[0] = arg0
+ v.argstorage[1] = arg1
+ v.argstorage[2] = arg2
+ arg0.Uses++
+ arg1.Uses++
+ arg2.Uses++
+ return v
+}
+
+// NewValue3A returns a new value in the block with three argument and an aux value.
+func (b *Block) NewValue3A(pos src.XPos, op Op, t *types.Type, aux Aux, arg0, arg1, arg2 *Value) *Value {
+ v := b.Func.newValue(op, t, b, pos)
+ v.AuxInt = 0
+ v.Aux = aux
+ v.Args = v.argstorage[:3]
+ v.argstorage[0] = arg0
+ v.argstorage[1] = arg1
+ v.argstorage[2] = arg2
+ arg0.Uses++
+ arg1.Uses++
+ arg2.Uses++
+ return v
+}
+
+// NewValue4 returns a new value in the block with four arguments and zero aux values.
+func (b *Block) NewValue4(pos src.XPos, op Op, t *types.Type, arg0, arg1, arg2, arg3 *Value) *Value {
+ v := b.Func.newValue(op, t, b, pos)
+ v.AuxInt = 0
+ v.Args = []*Value{arg0, arg1, arg2, arg3}
+ arg0.Uses++
+ arg1.Uses++
+ arg2.Uses++
+ arg3.Uses++
+ return v
+}
+
+// NewValue4I returns a new value in the block with four arguments and auxint value.
+func (b *Block) NewValue4I(pos src.XPos, op Op, t *types.Type, auxint int64, arg0, arg1, arg2, arg3 *Value) *Value {
+ v := b.Func.newValue(op, t, b, pos)
+ v.AuxInt = auxint
+ v.Args = []*Value{arg0, arg1, arg2, arg3}
+ arg0.Uses++
+ arg1.Uses++
+ arg2.Uses++
+ arg3.Uses++
+ return v
+}
+
+// constVal returns a constant value for c.
+func (f *Func) constVal(op Op, t *types.Type, c int64, setAuxInt bool) *Value {
+ if f.constants == nil {
+ f.constants = make(map[int64][]*Value)
+ }
+ vv := f.constants[c]
+ for _, v := range vv {
+ if v.Op == op && v.Type.Compare(t) == types.CMPeq {
+ if setAuxInt && v.AuxInt != c {
+ panic(fmt.Sprintf("cached const %s should have AuxInt of %d", v.LongString(), c))
+ }
+ return v
+ }
+ }
+ var v *Value
+ if setAuxInt {
+ v = f.Entry.NewValue0I(src.NoXPos, op, t, c)
+ } else {
+ v = f.Entry.NewValue0(src.NoXPos, op, t)
+ }
+ f.constants[c] = append(vv, v)
+ v.InCache = true
+ return v
+}
+
+// These magic auxint values let us easily cache non-numeric constants
+// using the same constants map while making collisions unlikely.
+// These values are unlikely to occur in regular code and
+// are easy to grep for in case of bugs.
+const (
+ constSliceMagic = 1122334455
+ constInterfaceMagic = 2233445566
+ constNilMagic = 3344556677
+ constEmptyStringMagic = 4455667788
+)
+
+// ConstInt returns an int constant representing its argument.
+func (f *Func) ConstBool(t *types.Type, c bool) *Value {
+ i := int64(0)
+ if c {
+ i = 1
+ }
+ return f.constVal(OpConstBool, t, i, true)
+}
+func (f *Func) ConstInt8(t *types.Type, c int8) *Value {
+ return f.constVal(OpConst8, t, int64(c), true)
+}
+func (f *Func) ConstInt16(t *types.Type, c int16) *Value {
+ return f.constVal(OpConst16, t, int64(c), true)
+}
+func (f *Func) ConstInt32(t *types.Type, c int32) *Value {
+ return f.constVal(OpConst32, t, int64(c), true)
+}
+func (f *Func) ConstInt64(t *types.Type, c int64) *Value {
+ return f.constVal(OpConst64, t, c, true)
+}
+func (f *Func) ConstFloat32(t *types.Type, c float64) *Value {
+ return f.constVal(OpConst32F, t, int64(math.Float64bits(float64(float32(c)))), true)
+}
+func (f *Func) ConstFloat64(t *types.Type, c float64) *Value {
+ return f.constVal(OpConst64F, t, int64(math.Float64bits(c)), true)
+}
+
+func (f *Func) ConstSlice(t *types.Type) *Value {
+ return f.constVal(OpConstSlice, t, constSliceMagic, false)
+}
+func (f *Func) ConstInterface(t *types.Type) *Value {
+ return f.constVal(OpConstInterface, t, constInterfaceMagic, false)
+}
+func (f *Func) ConstNil(t *types.Type) *Value {
+ return f.constVal(OpConstNil, t, constNilMagic, false)
+}
+func (f *Func) ConstEmptyString(t *types.Type) *Value {
+ v := f.constVal(OpConstString, t, constEmptyStringMagic, false)
+ v.Aux = StringToAux("")
+ return v
+}
+func (f *Func) ConstOffPtrSP(t *types.Type, c int64, sp *Value) *Value {
+ v := f.constVal(OpOffPtr, t, c, true)
+ if len(v.Args) == 0 {
+ v.AddArg(sp)
+ }
+ return v
+
+}
+
+func (f *Func) Frontend() Frontend { return f.fe }
+func (f *Func) Warnl(pos src.XPos, msg string, args ...interface{}) { f.fe.Warnl(pos, msg, args...) }
+func (f *Func) Logf(msg string, args ...interface{}) { f.fe.Logf(msg, args...) }
+func (f *Func) Log() bool { return f.fe.Log() }
+
+func (f *Func) Fatalf(msg string, args ...interface{}) {
+ stats := "crashed"
+ if f.Log() {
+ f.Logf(" pass %s end %s\n", f.pass.name, stats)
+ printFunc(f)
+ }
+ if f.HTMLWriter != nil {
+ f.HTMLWriter.WritePhase(f.pass.name, fmt.Sprintf("%s <span class=\"stats\">%s</span>", f.pass.name, stats))
+ f.HTMLWriter.flushPhases()
+ }
+ f.fe.Fatalf(f.Entry.Pos, msg, args...)
+}
+
+// postorder returns the reachable blocks in f in a postorder traversal.
+func (f *Func) postorder() []*Block {
+ if f.cachedPostorder == nil {
+ f.cachedPostorder = postorder(f)
+ }
+ return f.cachedPostorder
+}
+
+func (f *Func) Postorder() []*Block {
+ return f.postorder()
+}
+
+// Idom returns a map from block ID to the immediate dominator of that block.
+// f.Entry.ID maps to nil. Unreachable blocks map to nil as well.
+func (f *Func) Idom() []*Block {
+ if f.cachedIdom == nil {
+ f.cachedIdom = dominators(f)
+ }
+ return f.cachedIdom
+}
+
+// Sdom returns a sparse tree representing the dominator relationships
+// among the blocks of f.
+func (f *Func) Sdom() SparseTree {
+ if f.cachedSdom == nil {
+ f.cachedSdom = newSparseTree(f, f.Idom())
+ }
+ return f.cachedSdom
+}
+
+// loopnest returns the loop nest information for f.
+func (f *Func) loopnest() *loopnest {
+ if f.cachedLoopnest == nil {
+ f.cachedLoopnest = loopnestfor(f)
+ }
+ return f.cachedLoopnest
+}
+
+// invalidateCFG tells f that its CFG has changed.
+func (f *Func) invalidateCFG() {
+ f.cachedPostorder = nil
+ f.cachedIdom = nil
+ f.cachedSdom = nil
+ f.cachedLoopnest = nil
+}
+
+// DebugHashMatch reports whether environment variable evname
+// 1) is empty (this is a special more-quickly implemented case of 3)
+// 2) is "y" or "Y"
+// 3) is a suffix of the sha1 hash of name
+// 4) is a suffix of the environment variable
+// fmt.Sprintf("%s%d", evname, n)
+// provided that all such variables are nonempty for 0 <= i <= n
+// Otherwise it returns false.
+// When true is returned the message
+// "%s triggered %s\n", evname, name
+// is printed on the file named in environment variable
+// GSHS_LOGFILE
+// or standard out if that is empty or there is an error
+// opening the file.
+func (f *Func) DebugHashMatch(evname string) bool {
+ name := f.fe.MyImportPath() + "." + f.Name
+ evhash := os.Getenv(evname)
+ switch evhash {
+ case "":
+ return true // default behavior with no EV is "on"
+ case "y", "Y":
+ f.logDebugHashMatch(evname, name)
+ return true
+ case "n", "N":
+ return false
+ }
+ // Check the hash of the name against a partial input hash.
+ // We use this feature to do a binary search to
+ // find a function that is incorrectly compiled.
+ hstr := ""
+ for _, b := range sha1.Sum([]byte(name)) {
+ hstr += fmt.Sprintf("%08b", b)
+ }
+
+ if strings.HasSuffix(hstr, evhash) {
+ f.logDebugHashMatch(evname, name)
+ return true
+ }
+
+ // Iteratively try additional hashes to allow tests for multi-point
+ // failure.
+ for i := 0; true; i++ {
+ ev := fmt.Sprintf("%s%d", evname, i)
+ evv := os.Getenv(ev)
+ if evv == "" {
+ break
+ }
+ if strings.HasSuffix(hstr, evv) {
+ f.logDebugHashMatch(ev, name)
+ return true
+ }
+ }
+ return false
+}
+
+func (f *Func) logDebugHashMatch(evname, name string) {
+ if f.logfiles == nil {
+ f.logfiles = make(map[string]writeSyncer)
+ }
+ file := f.logfiles[evname]
+ if file == nil {
+ file = os.Stdout
+ if tmpfile := os.Getenv("GSHS_LOGFILE"); tmpfile != "" {
+ var err error
+ file, err = os.OpenFile(tmpfile, os.O_RDWR|os.O_CREATE|os.O_APPEND, 0666)
+ if err != nil {
+ f.Fatalf("could not open hash-testing logfile %s", tmpfile)
+ }
+ }
+ f.logfiles[evname] = file
+ }
+ fmt.Fprintf(file, "%s triggered %s\n", evname, name)
+ file.Sync()
+}
+
+func DebugNameMatch(evname, name string) bool {
+ return os.Getenv(evname) == name
+}
+
+func (f *Func) spSb() (sp, sb *Value) {
+ initpos := src.NoXPos // These are originally created with no position in ssa.go; if they are optimized out then recreated, should be the same.
+ for _, v := range f.Entry.Values {
+ if v.Op == OpSB {
+ sb = v
+ }
+ if v.Op == OpSP {
+ sp = v
+ }
+ if sb != nil && sp != nil {
+ return
+ }
+ }
+ if sb == nil {
+ sb = f.Entry.NewValue0(initpos.WithNotStmt(), OpSB, f.Config.Types.Uintptr)
+ }
+ if sp == nil {
+ sp = f.Entry.NewValue0(initpos.WithNotStmt(), OpSP, f.Config.Types.Uintptr)
+ }
+ return
+}
diff --git a/src/cmd/compile/internal/ssa/func_test.go b/src/cmd/compile/internal/ssa/func_test.go
new file mode 100644
index 0000000..276c444
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/func_test.go
@@ -0,0 +1,484 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains some utility functions to help define Funcs for testing.
+// As an example, the following func
+//
+// b1:
+// v1 = InitMem <mem>
+// Plain -> b2
+// b2:
+// Exit v1
+// b3:
+// v2 = Const <bool> [true]
+// If v2 -> b3 b2
+//
+// can be defined as
+//
+// fun := Fun("entry",
+// Bloc("entry",
+// Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+// Goto("exit")),
+// Bloc("exit",
+// Exit("mem")),
+// Bloc("deadblock",
+// Valu("deadval", OpConstBool, c.config.Types.Bool, 0, true),
+// If("deadval", "deadblock", "exit")))
+//
+// and the Blocks or Values used in the Func can be accessed
+// like this:
+// fun.blocks["entry"] or fun.values["deadval"]
+
+package ssa
+
+// TODO(matloob): Choose better names for Fun, Bloc, Goto, etc.
+// TODO(matloob): Write a parser for the Func disassembly. Maybe
+// the parser can be used instead of Fun.
+
+import (
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/src"
+ "fmt"
+ "reflect"
+ "testing"
+)
+
+// Compare two Funcs for equivalence. Their CFGs must be isomorphic,
+// and their values must correspond.
+// Requires that values and predecessors are in the same order, even
+// though Funcs could be equivalent when they are not.
+// TODO(matloob): Allow values and predecessors to be in different
+// orders if the CFG are otherwise equivalent.
+func Equiv(f, g *Func) bool {
+ valcor := make(map[*Value]*Value)
+ var checkVal func(fv, gv *Value) bool
+ checkVal = func(fv, gv *Value) bool {
+ if fv == nil && gv == nil {
+ return true
+ }
+ if valcor[fv] == nil && valcor[gv] == nil {
+ valcor[fv] = gv
+ valcor[gv] = fv
+ // Ignore ids. Ops and Types are compared for equality.
+ // TODO(matloob): Make sure types are canonical and can
+ // be compared for equality.
+ if fv.Op != gv.Op || fv.Type != gv.Type || fv.AuxInt != gv.AuxInt {
+ return false
+ }
+ if !reflect.DeepEqual(fv.Aux, gv.Aux) {
+ // This makes the assumption that aux values can be compared
+ // using DeepEqual.
+ // TODO(matloob): Aux values may be *gc.Sym pointers in the near
+ // future. Make sure they are canonical.
+ return false
+ }
+ if len(fv.Args) != len(gv.Args) {
+ return false
+ }
+ for i := range fv.Args {
+ if !checkVal(fv.Args[i], gv.Args[i]) {
+ return false
+ }
+ }
+ }
+ return valcor[fv] == gv && valcor[gv] == fv
+ }
+ blkcor := make(map[*Block]*Block)
+ var checkBlk func(fb, gb *Block) bool
+ checkBlk = func(fb, gb *Block) bool {
+ if blkcor[fb] == nil && blkcor[gb] == nil {
+ blkcor[fb] = gb
+ blkcor[gb] = fb
+ // ignore ids
+ if fb.Kind != gb.Kind {
+ return false
+ }
+ if len(fb.Values) != len(gb.Values) {
+ return false
+ }
+ for i := range fb.Values {
+ if !checkVal(fb.Values[i], gb.Values[i]) {
+ return false
+ }
+ }
+ if len(fb.Succs) != len(gb.Succs) {
+ return false
+ }
+ for i := range fb.Succs {
+ if !checkBlk(fb.Succs[i].b, gb.Succs[i].b) {
+ return false
+ }
+ }
+ if len(fb.Preds) != len(gb.Preds) {
+ return false
+ }
+ for i := range fb.Preds {
+ if !checkBlk(fb.Preds[i].b, gb.Preds[i].b) {
+ return false
+ }
+ }
+ return true
+
+ }
+ return blkcor[fb] == gb && blkcor[gb] == fb
+ }
+
+ return checkBlk(f.Entry, g.Entry)
+}
+
+// fun is the return type of Fun. It contains the created func
+// itself as well as indexes from block and value names into the
+// corresponding Blocks and Values.
+type fun struct {
+ f *Func
+ blocks map[string]*Block
+ values map[string]*Value
+}
+
+var emptyPass pass = pass{
+ name: "empty pass",
+}
+
+// AuxCallLSym returns an AuxCall initialized with an LSym that should pass "check"
+// as the Aux of a static call.
+func AuxCallLSym(name string) *AuxCall {
+ return &AuxCall{Fn: &obj.LSym{}}
+}
+
+// Fun takes the name of an entry bloc and a series of Bloc calls, and
+// returns a fun containing the composed Func. entry must be a name
+// supplied to one of the Bloc functions. Each of the bloc names and
+// valu names should be unique across the Fun.
+func (c *Conf) Fun(entry string, blocs ...bloc) fun {
+ f := NewFunc(c.Frontend())
+ f.Config = c.config
+ // TODO: Either mark some SSA tests as t.Parallel,
+ // or set up a shared Cache and Reset it between tests.
+ // But not both.
+ f.Cache = new(Cache)
+ f.pass = &emptyPass
+ f.cachedLineStarts = newXposmap(map[int]lineRange{0: {0, 100}, 1: {0, 100}, 2: {0, 100}, 3: {0, 100}, 4: {0, 100}})
+
+ blocks := make(map[string]*Block)
+ values := make(map[string]*Value)
+ // Create all the blocks and values.
+ for _, bloc := range blocs {
+ b := f.NewBlock(bloc.control.kind)
+ blocks[bloc.name] = b
+ for _, valu := range bloc.valus {
+ // args are filled in the second pass.
+ values[valu.name] = b.NewValue0IA(src.NoXPos, valu.op, valu.t, valu.auxint, valu.aux)
+ }
+ }
+ // Connect the blocks together and specify control values.
+ f.Entry = blocks[entry]
+ for _, bloc := range blocs {
+ b := blocks[bloc.name]
+ c := bloc.control
+ // Specify control values.
+ if c.control != "" {
+ cval, ok := values[c.control]
+ if !ok {
+ f.Fatalf("control value for block %s missing", bloc.name)
+ }
+ b.SetControl(cval)
+ }
+ // Fill in args.
+ for _, valu := range bloc.valus {
+ v := values[valu.name]
+ for _, arg := range valu.args {
+ a, ok := values[arg]
+ if !ok {
+ b.Fatalf("arg %s missing for value %s in block %s",
+ arg, valu.name, bloc.name)
+ }
+ v.AddArg(a)
+ }
+ }
+ // Connect to successors.
+ for _, succ := range c.succs {
+ b.AddEdgeTo(blocks[succ])
+ }
+ }
+ return fun{f, blocks, values}
+}
+
+// Bloc defines a block for Fun. The bloc name should be unique
+// across the containing Fun. entries should consist of calls to valu,
+// as well as one call to Goto, If, or Exit to specify the block kind.
+func Bloc(name string, entries ...interface{}) bloc {
+ b := bloc{}
+ b.name = name
+ seenCtrl := false
+ for _, e := range entries {
+ switch v := e.(type) {
+ case ctrl:
+ // there should be exactly one Ctrl entry.
+ if seenCtrl {
+ panic(fmt.Sprintf("already seen control for block %s", name))
+ }
+ b.control = v
+ seenCtrl = true
+ case valu:
+ b.valus = append(b.valus, v)
+ }
+ }
+ if !seenCtrl {
+ panic(fmt.Sprintf("block %s doesn't have control", b.name))
+ }
+ return b
+}
+
+// Valu defines a value in a block.
+func Valu(name string, op Op, t *types.Type, auxint int64, aux Aux, args ...string) valu {
+ return valu{name, op, t, auxint, aux, args}
+}
+
+// Goto specifies that this is a BlockPlain and names the single successor.
+// TODO(matloob): choose a better name.
+func Goto(succ string) ctrl {
+ return ctrl{BlockPlain, "", []string{succ}}
+}
+
+// If specifies a BlockIf.
+func If(cond, sub, alt string) ctrl {
+ return ctrl{BlockIf, cond, []string{sub, alt}}
+}
+
+// Exit specifies a BlockExit.
+func Exit(arg string) ctrl {
+ return ctrl{BlockExit, arg, []string{}}
+}
+
+// Eq specifies a BlockAMD64EQ.
+func Eq(cond, sub, alt string) ctrl {
+ return ctrl{BlockAMD64EQ, cond, []string{sub, alt}}
+}
+
+// bloc, ctrl, and valu are internal structures used by Bloc, Valu, Goto,
+// If, and Exit to help define blocks.
+
+type bloc struct {
+ name string
+ control ctrl
+ valus []valu
+}
+
+type ctrl struct {
+ kind BlockKind
+ control string
+ succs []string
+}
+
+type valu struct {
+ name string
+ op Op
+ t *types.Type
+ auxint int64
+ aux Aux
+ args []string
+}
+
+func TestArgs(t *testing.T) {
+ c := testConfig(t)
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("a", OpConst64, c.config.Types.Int64, 14, nil),
+ Valu("b", OpConst64, c.config.Types.Int64, 26, nil),
+ Valu("sum", OpAdd64, c.config.Types.Int64, 0, nil, "a", "b"),
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Goto("exit")),
+ Bloc("exit",
+ Exit("mem")))
+ sum := fun.values["sum"]
+ for i, name := range []string{"a", "b"} {
+ if sum.Args[i] != fun.values[name] {
+ t.Errorf("arg %d for sum is incorrect: want %s, got %s",
+ i, sum.Args[i], fun.values[name])
+ }
+ }
+}
+
+func TestEquiv(t *testing.T) {
+ cfg := testConfig(t)
+ equivalentCases := []struct{ f, g fun }{
+ // simple case
+ {
+ cfg.Fun("entry",
+ Bloc("entry",
+ Valu("a", OpConst64, cfg.config.Types.Int64, 14, nil),
+ Valu("b", OpConst64, cfg.config.Types.Int64, 26, nil),
+ Valu("sum", OpAdd64, cfg.config.Types.Int64, 0, nil, "a", "b"),
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Goto("exit")),
+ Bloc("exit",
+ Exit("mem"))),
+ cfg.Fun("entry",
+ Bloc("entry",
+ Valu("a", OpConst64, cfg.config.Types.Int64, 14, nil),
+ Valu("b", OpConst64, cfg.config.Types.Int64, 26, nil),
+ Valu("sum", OpAdd64, cfg.config.Types.Int64, 0, nil, "a", "b"),
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Goto("exit")),
+ Bloc("exit",
+ Exit("mem"))),
+ },
+ // block order changed
+ {
+ cfg.Fun("entry",
+ Bloc("entry",
+ Valu("a", OpConst64, cfg.config.Types.Int64, 14, nil),
+ Valu("b", OpConst64, cfg.config.Types.Int64, 26, nil),
+ Valu("sum", OpAdd64, cfg.config.Types.Int64, 0, nil, "a", "b"),
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Goto("exit")),
+ Bloc("exit",
+ Exit("mem"))),
+ cfg.Fun("entry",
+ Bloc("exit",
+ Exit("mem")),
+ Bloc("entry",
+ Valu("a", OpConst64, cfg.config.Types.Int64, 14, nil),
+ Valu("b", OpConst64, cfg.config.Types.Int64, 26, nil),
+ Valu("sum", OpAdd64, cfg.config.Types.Int64, 0, nil, "a", "b"),
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Goto("exit"))),
+ },
+ }
+ for _, c := range equivalentCases {
+ if !Equiv(c.f.f, c.g.f) {
+ t.Error("expected equivalence. Func definitions:")
+ t.Error(c.f.f)
+ t.Error(c.g.f)
+ }
+ }
+
+ differentCases := []struct{ f, g fun }{
+ // different shape
+ {
+ cfg.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Goto("exit")),
+ Bloc("exit",
+ Exit("mem"))),
+ cfg.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Exit("mem"))),
+ },
+ // value order changed
+ {
+ cfg.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("b", OpConst64, cfg.config.Types.Int64, 26, nil),
+ Valu("a", OpConst64, cfg.config.Types.Int64, 14, nil),
+ Exit("mem"))),
+ cfg.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("a", OpConst64, cfg.config.Types.Int64, 14, nil),
+ Valu("b", OpConst64, cfg.config.Types.Int64, 26, nil),
+ Exit("mem"))),
+ },
+ // value auxint different
+ {
+ cfg.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("a", OpConst64, cfg.config.Types.Int64, 14, nil),
+ Exit("mem"))),
+ cfg.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("a", OpConst64, cfg.config.Types.Int64, 26, nil),
+ Exit("mem"))),
+ },
+ // value aux different
+ {
+ cfg.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("a", OpConstString, cfg.config.Types.String, 0, StringToAux("foo")),
+ Exit("mem"))),
+ cfg.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("a", OpConstString, cfg.config.Types.String, 0, StringToAux("bar")),
+ Exit("mem"))),
+ },
+ // value args different
+ {
+ cfg.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("a", OpConst64, cfg.config.Types.Int64, 14, nil),
+ Valu("b", OpConst64, cfg.config.Types.Int64, 26, nil),
+ Valu("sum", OpAdd64, cfg.config.Types.Int64, 0, nil, "a", "b"),
+ Exit("mem"))),
+ cfg.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("a", OpConst64, cfg.config.Types.Int64, 0, nil),
+ Valu("b", OpConst64, cfg.config.Types.Int64, 14, nil),
+ Valu("sum", OpAdd64, cfg.config.Types.Int64, 0, nil, "b", "a"),
+ Exit("mem"))),
+ },
+ }
+ for _, c := range differentCases {
+ if Equiv(c.f.f, c.g.f) {
+ t.Error("expected difference. Func definitions:")
+ t.Error(c.f.f)
+ t.Error(c.g.f)
+ }
+ }
+}
+
+// TestConstCache ensures that the cache will not return
+// reused free'd values with a non-matching AuxInt
+func TestConstCache(t *testing.T) {
+ c := testConfig(t)
+ f := c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Exit("mem")))
+ v1 := f.f.ConstBool(c.config.Types.Bool, false)
+ v2 := f.f.ConstBool(c.config.Types.Bool, true)
+ f.f.freeValue(v1)
+ f.f.freeValue(v2)
+ v3 := f.f.ConstBool(c.config.Types.Bool, false)
+ v4 := f.f.ConstBool(c.config.Types.Bool, true)
+ if v3.AuxInt != 0 {
+ t.Errorf("expected %s to have auxint of 0\n", v3.LongString())
+ }
+ if v4.AuxInt != 1 {
+ t.Errorf("expected %s to have auxint of 1\n", v4.LongString())
+ }
+
+}
+
+// opcodeMap returns a map from opcode to the number of times that opcode
+// appears in the function.
+func opcodeMap(f *Func) map[Op]int {
+ m := map[Op]int{}
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ m[v.Op]++
+ }
+ }
+ return m
+}
+
+// opcodeCounts checks that the number of opcodes listed in m agree with the
+// number of opcodes that appear in the function.
+func checkOpcodeCounts(t *testing.T, f *Func, m map[Op]int) {
+ n := opcodeMap(f)
+ for op, cnt := range m {
+ if n[op] != cnt {
+ t.Errorf("%s appears %d times, want %d times", op, n[op], cnt)
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/fuse.go b/src/cmd/compile/internal/ssa/fuse.go
new file mode 100644
index 0000000..fec2ba8
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/fuse.go
@@ -0,0 +1,254 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/internal/src"
+)
+
+// fuseEarly runs fuse(f, fuseTypePlain|fuseTypeIntInRange).
+func fuseEarly(f *Func) { fuse(f, fuseTypePlain|fuseTypeIntInRange) }
+
+// fuseLate runs fuse(f, fuseTypePlain|fuseTypeIf|fuseTypeBranchRedirect).
+func fuseLate(f *Func) { fuse(f, fuseTypePlain|fuseTypeIf|fuseTypeBranchRedirect) }
+
+type fuseType uint8
+
+const (
+ fuseTypePlain fuseType = 1 << iota
+ fuseTypeIf
+ fuseTypeIntInRange
+ fuseTypeBranchRedirect
+ fuseTypeShortCircuit
+)
+
+// fuse simplifies control flow by joining basic blocks.
+func fuse(f *Func, typ fuseType) {
+ for changed := true; changed; {
+ changed = false
+ // Fuse from end to beginning, to avoid quadratic behavior in fuseBlockPlain. See issue 13554.
+ for i := len(f.Blocks) - 1; i >= 0; i-- {
+ b := f.Blocks[i]
+ if typ&fuseTypeIf != 0 {
+ changed = fuseBlockIf(b) || changed
+ }
+ if typ&fuseTypeIntInRange != 0 {
+ changed = fuseIntegerComparisons(b) || changed
+ }
+ if typ&fuseTypePlain != 0 {
+ changed = fuseBlockPlain(b) || changed
+ }
+ if typ&fuseTypeShortCircuit != 0 {
+ changed = shortcircuitBlock(b) || changed
+ }
+ }
+ if typ&fuseTypeBranchRedirect != 0 {
+ changed = fuseBranchRedirect(f) || changed
+ }
+ if changed {
+ f.invalidateCFG()
+ }
+ }
+}
+
+// fuseBlockIf handles the following cases where s0 and s1 are empty blocks.
+//
+// b b b b
+// \ / \ / | \ / \ / | | |
+// s0 s1 | s1 s0 | | |
+// \ / | / \ | | |
+// ss ss ss ss
+//
+// If all Phi ops in ss have identical variables for slots corresponding to
+// s0, s1 and b then the branch can be dropped.
+// This optimization often comes up in switch statements with multiple
+// expressions in a case clause:
+// switch n {
+// case 1,2,3: return 4
+// }
+// TODO: If ss doesn't contain any OpPhis, are s0 and s1 dead code anyway.
+func fuseBlockIf(b *Block) bool {
+ if b.Kind != BlockIf {
+ return false
+ }
+ // It doesn't matter how much Preds does s0 or s1 have.
+ var ss0, ss1 *Block
+ s0 := b.Succs[0].b
+ i0 := b.Succs[0].i
+ if s0.Kind != BlockPlain || !isEmpty(s0) {
+ s0, ss0 = b, s0
+ } else {
+ ss0 = s0.Succs[0].b
+ i0 = s0.Succs[0].i
+ }
+ s1 := b.Succs[1].b
+ i1 := b.Succs[1].i
+ if s1.Kind != BlockPlain || !isEmpty(s1) {
+ s1, ss1 = b, s1
+ } else {
+ ss1 = s1.Succs[0].b
+ i1 = s1.Succs[0].i
+ }
+ if ss0 != ss1 {
+ if s0.Kind == BlockPlain && isEmpty(s0) && s1.Kind == BlockPlain && isEmpty(s1) {
+ // Two special cases where both s0, s1 and ss are empty blocks.
+ if s0 == ss1 {
+ s0, ss0 = b, ss1
+ } else if ss0 == s1 {
+ s1, ss1 = b, ss0
+ } else {
+ return false
+ }
+ } else {
+ return false
+ }
+ }
+ ss := ss0
+
+ // s0 and s1 are equal with b if the corresponding block is missing
+ // (2nd, 3rd and 4th case in the figure).
+
+ for _, v := range ss.Values {
+ if v.Op == OpPhi && v.Uses > 0 && v.Args[i0] != v.Args[i1] {
+ return false
+ }
+ }
+
+ // We do not need to redirect the Preds of s0 and s1 to ss,
+ // the following optimization will do this.
+ b.removeEdge(0)
+ if s0 != b && len(s0.Preds) == 0 {
+ s0.removeEdge(0)
+ // Move any (dead) values in s0 to b,
+ // where they will be eliminated by the next deadcode pass.
+ for _, v := range s0.Values {
+ v.Block = b
+ }
+ b.Values = append(b.Values, s0.Values...)
+ // Clear s0.
+ s0.Kind = BlockInvalid
+ s0.Values = nil
+ s0.Succs = nil
+ s0.Preds = nil
+ }
+
+ b.Kind = BlockPlain
+ b.Likely = BranchUnknown
+ b.ResetControls()
+ // The values in b may be dead codes, and clearing them in time may
+ // obtain new optimization opportunities.
+ // First put dead values that can be deleted into a slice walkValues.
+ // Then put their arguments in walkValues before resetting the dead values
+ // in walkValues, because the arguments may also become dead values.
+ walkValues := []*Value{}
+ for _, v := range b.Values {
+ if v.Uses == 0 && v.removeable() {
+ walkValues = append(walkValues, v)
+ }
+ }
+ for len(walkValues) != 0 {
+ v := walkValues[len(walkValues)-1]
+ walkValues = walkValues[:len(walkValues)-1]
+ if v.Uses == 0 && v.removeable() {
+ walkValues = append(walkValues, v.Args...)
+ v.reset(OpInvalid)
+ }
+ }
+ return true
+}
+
+// isEmpty reports whether b contains any live values.
+// There may be false positives.
+func isEmpty(b *Block) bool {
+ for _, v := range b.Values {
+ if v.Uses > 0 || v.Op.IsCall() || v.Op.HasSideEffects() || v.Type.IsVoid() {
+ return false
+ }
+ }
+ return true
+}
+
+func fuseBlockPlain(b *Block) bool {
+ if b.Kind != BlockPlain {
+ return false
+ }
+
+ c := b.Succs[0].b
+ if len(c.Preds) != 1 {
+ return false
+ }
+
+ // If a block happened to end in a statement marker,
+ // try to preserve it.
+ if b.Pos.IsStmt() == src.PosIsStmt {
+ l := b.Pos.Line()
+ for _, v := range c.Values {
+ if v.Pos.IsStmt() == src.PosNotStmt {
+ continue
+ }
+ if l == v.Pos.Line() {
+ v.Pos = v.Pos.WithIsStmt()
+ l = 0
+ break
+ }
+ }
+ if l != 0 && c.Pos.Line() == l {
+ c.Pos = c.Pos.WithIsStmt()
+ }
+ }
+
+ // move all of b's values to c.
+ for _, v := range b.Values {
+ v.Block = c
+ }
+ // Use whichever value slice is larger, in the hopes of avoiding growth.
+ // However, take care to avoid c.Values pointing to b.valstorage.
+ // See golang.org/issue/18602.
+ // It's important to keep the elements in the same order; maintenance of
+ // debugging information depends on the order of *Values in Blocks.
+ // This can also cause changes in the order (which may affect other
+ // optimizations and possibly compiler output) for 32-vs-64 bit compilation
+ // platforms (word size affects allocation bucket size affects slice capacity).
+ if cap(c.Values) >= cap(b.Values) || len(b.Values) <= len(b.valstorage) {
+ bl := len(b.Values)
+ cl := len(c.Values)
+ var t []*Value // construct t = b.Values followed-by c.Values, but with attention to allocation.
+ if cap(c.Values) < bl+cl {
+ // reallocate
+ t = make([]*Value, bl+cl)
+ } else {
+ // in place.
+ t = c.Values[0 : bl+cl]
+ }
+ copy(t[bl:], c.Values) // possibly in-place
+ c.Values = t
+ copy(c.Values, b.Values)
+ } else {
+ c.Values = append(b.Values, c.Values...)
+ }
+
+ // replace b->c edge with preds(b) -> c
+ c.predstorage[0] = Edge{}
+ if len(b.Preds) > len(b.predstorage) {
+ c.Preds = b.Preds
+ } else {
+ c.Preds = append(c.predstorage[:0], b.Preds...)
+ }
+ for i, e := range c.Preds {
+ p := e.b
+ p.Succs[e.i] = Edge{c, i}
+ }
+ f := b.Func
+ if f.Entry == b {
+ f.Entry = c
+ }
+
+ // trash b, just in case
+ b.Kind = BlockInvalid
+ b.Values = nil
+ b.Preds = nil
+ b.Succs = nil
+ return true
+}
diff --git a/src/cmd/compile/internal/ssa/fuse_branchredirect.go b/src/cmd/compile/internal/ssa/fuse_branchredirect.go
new file mode 100644
index 0000000..751dca7
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/fuse_branchredirect.go
@@ -0,0 +1,110 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// fuseBranchRedirect checks for a CFG in which the outbound branch
+// of an If block can be derived from its predecessor If block, in
+// some such cases, we can redirect the predecessor If block to the
+// corresponding successor block directly. For example:
+// p:
+// v11 = Less64 <bool> v10 v8
+// If v11 goto b else u
+// b: <- p ...
+// v17 = Leq64 <bool> v10 v8
+// If v17 goto s else o
+// We can redirect p to s directly.
+//
+// The implementation here borrows the framework of the prove pass.
+// 1, Traverse all blocks of function f to find If blocks.
+// 2, For any If block b, traverse all its predecessors to find If blocks.
+// 3, For any If block predecessor p, update relationship p->b.
+// 4, Traverse all successors of b.
+// 5, For any successor s of b, try to update relationship b->s, if a
+// contradiction is found then redirect p to another successor of b.
+func fuseBranchRedirect(f *Func) bool {
+ ft := newFactsTable(f)
+ ft.checkpoint()
+
+ changed := false
+ for i := len(f.Blocks) - 1; i >= 0; i-- {
+ b := f.Blocks[i]
+ if b.Kind != BlockIf {
+ continue
+ }
+ // b is either empty or only contains the control value.
+ // TODO: if b contains only OpCopy or OpNot related to b.Controls,
+ // such as Copy(Not(Copy(Less64(v1, v2)))), perhaps it can be optimized.
+ bCtl := b.Controls[0]
+ if bCtl.Block != b && len(b.Values) != 0 || (len(b.Values) != 1 || bCtl.Uses != 1) && bCtl.Block == b {
+ continue
+ }
+
+ for k := 0; k < len(b.Preds); k++ {
+ pk := b.Preds[k]
+ p := pk.b
+ if p.Kind != BlockIf || p == b {
+ continue
+ }
+ pbranch := positive
+ if pk.i == 1 {
+ pbranch = negative
+ }
+ ft.checkpoint()
+ // Assume branch p->b is taken.
+ addBranchRestrictions(ft, p, pbranch)
+ // Check if any outgoing branch is unreachable based on the above condition.
+ parent := b
+ for j, bbranch := range [...]branch{positive, negative} {
+ ft.checkpoint()
+ // Try to update relationship b->child, and check if the contradiction occurs.
+ addBranchRestrictions(ft, parent, bbranch)
+ unsat := ft.unsat
+ ft.restore()
+ if !unsat {
+ continue
+ }
+ // This branch is impossible,so redirect p directly to another branch.
+ out := 1 ^ j
+ child := parent.Succs[out].b
+ if child == b {
+ continue
+ }
+ b.removePred(k)
+ p.Succs[pk.i] = Edge{child, len(child.Preds)}
+ // Fix up Phi value in b to have one less argument.
+ for _, v := range b.Values {
+ if v.Op != OpPhi {
+ continue
+ }
+ b.removePhiArg(v, k)
+ phielimValue(v)
+ }
+ // Fix up child to have one more predecessor.
+ child.Preds = append(child.Preds, Edge{p, pk.i})
+ ai := b.Succs[out].i
+ for _, v := range child.Values {
+ if v.Op != OpPhi {
+ continue
+ }
+ v.AddArg(v.Args[ai])
+ }
+ if b.Func.pass.debug > 0 {
+ b.Func.Warnl(b.Controls[0].Pos, "Redirect %s based on %s", b.Controls[0].Op, p.Controls[0].Op)
+ }
+ changed = true
+ k--
+ break
+ }
+ ft.restore()
+ }
+ if len(b.Preds) == 0 && b != f.Entry {
+ // Block is now dead.
+ b.Kind = BlockInvalid
+ }
+ }
+ ft.restore()
+ ft.cleanup(f)
+ return changed
+}
diff --git a/src/cmd/compile/internal/ssa/fuse_comparisons.go b/src/cmd/compile/internal/ssa/fuse_comparisons.go
new file mode 100644
index 0000000..d843fc3
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/fuse_comparisons.go
@@ -0,0 +1,157 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// fuseIntegerComparisons optimizes inequalities such as '1 <= x && x < 5',
+// which can be optimized to 'unsigned(x-1) < 4'.
+//
+// Look for branch structure like:
+//
+// p
+// |\
+// | b
+// |/ \
+// s0 s1
+//
+// In our example, p has control '1 <= x', b has control 'x < 5',
+// and s0 and s1 are the if and else results of the comparison.
+//
+// This will be optimized into:
+//
+// p
+// \
+// b
+// / \
+// s0 s1
+//
+// where b has the combined control value 'unsigned(x-1) < 4'.
+// Later passes will then fuse p and b.
+func fuseIntegerComparisons(b *Block) bool {
+ if len(b.Preds) != 1 {
+ return false
+ }
+ p := b.Preds[0].Block()
+ if b.Kind != BlockIf || p.Kind != BlockIf {
+ return false
+ }
+
+ // Don't merge control values if b is likely to be bypassed anyway.
+ if p.Likely == BranchLikely && p.Succs[0].Block() != b {
+ return false
+ }
+ if p.Likely == BranchUnlikely && p.Succs[1].Block() != b {
+ return false
+ }
+
+ // Check if the control values combine to make an integer inequality that
+ // can be further optimized later.
+ bc := b.Controls[0]
+ pc := p.Controls[0]
+ if !areMergeableInequalities(bc, pc) {
+ return false
+ }
+
+ // If the first (true) successors match then we have a disjunction (||).
+ // If the second (false) successors match then we have a conjunction (&&).
+ for i, op := range [2]Op{OpOrB, OpAndB} {
+ if p.Succs[i].Block() != b.Succs[i].Block() {
+ continue
+ }
+
+ // TODO(mundaym): should we also check the cost of executing b?
+ // Currently we might speculatively execute b even if b contains
+ // a lot of instructions. We could just check that len(b.Values)
+ // is lower than a fixed amount. Bear in mind however that the
+ // other optimization passes might yet reduce the cost of b
+ // significantly so we shouldn't be overly conservative.
+ if !canSpeculativelyExecute(b) {
+ return false
+ }
+
+ // Logically combine the control values for p and b.
+ v := b.NewValue0(bc.Pos, op, bc.Type)
+ v.AddArg(pc)
+ v.AddArg(bc)
+
+ // Set the combined control value as the control value for b.
+ b.SetControl(v)
+
+ // Modify p so that it jumps directly to b.
+ p.removeEdge(i)
+ p.Kind = BlockPlain
+ p.Likely = BranchUnknown
+ p.ResetControls()
+
+ return true
+ }
+
+ // TODO: could negate condition(s) to merge controls.
+ return false
+}
+
+// getConstIntArgIndex returns the index of the first argument that is a
+// constant integer or -1 if no such argument exists.
+func getConstIntArgIndex(v *Value) int {
+ for i, a := range v.Args {
+ switch a.Op {
+ case OpConst8, OpConst16, OpConst32, OpConst64:
+ return i
+ }
+ }
+ return -1
+}
+
+// isSignedInequality reports whether op represents the inequality < or ≤
+// in the signed domain.
+func isSignedInequality(v *Value) bool {
+ switch v.Op {
+ case OpLess64, OpLess32, OpLess16, OpLess8,
+ OpLeq64, OpLeq32, OpLeq16, OpLeq8:
+ return true
+ }
+ return false
+}
+
+// isUnsignedInequality reports whether op represents the inequality < or ≤
+// in the unsigned domain.
+func isUnsignedInequality(v *Value) bool {
+ switch v.Op {
+ case OpLess64U, OpLess32U, OpLess16U, OpLess8U,
+ OpLeq64U, OpLeq32U, OpLeq16U, OpLeq8U:
+ return true
+ }
+ return false
+}
+
+func areMergeableInequalities(x, y *Value) bool {
+ // We need both inequalities to be either in the signed or unsigned domain.
+ // TODO(mundaym): it would also be good to merge when we have an Eq op that
+ // could be transformed into a Less/Leq. For example in the unsigned
+ // domain 'x == 0 || 3 < x' is equivalent to 'x <= 0 || 3 < x'
+ inequalityChecks := [...]func(*Value) bool{
+ isSignedInequality,
+ isUnsignedInequality,
+ }
+ for _, f := range inequalityChecks {
+ if !f(x) || !f(y) {
+ continue
+ }
+
+ // Check that both inequalities are comparisons with constants.
+ xi := getConstIntArgIndex(x)
+ if xi < 0 {
+ return false
+ }
+ yi := getConstIntArgIndex(y)
+ if yi < 0 {
+ return false
+ }
+
+ // Check that the non-constant arguments to the inequalities
+ // are the same.
+ return x.Args[xi^1] == y.Args[yi^1]
+ }
+ return false
+}
diff --git a/src/cmd/compile/internal/ssa/fuse_test.go b/src/cmd/compile/internal/ssa/fuse_test.go
new file mode 100644
index 0000000..27a14b1
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/fuse_test.go
@@ -0,0 +1,301 @@
+package ssa
+
+import (
+ "cmd/compile/internal/types"
+ "fmt"
+ "strconv"
+ "testing"
+)
+
+func TestFuseEliminatesOneBranch(t *testing.T) {
+ c := testConfig(t)
+ ptrType := c.config.Types.BytePtr
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("sb", OpSB, c.config.Types.Uintptr, 0, nil),
+ Goto("checkPtr")),
+ Bloc("checkPtr",
+ Valu("ptr1", OpLoad, ptrType, 0, nil, "sb", "mem"),
+ Valu("nilptr", OpConstNil, ptrType, 0, nil),
+ Valu("bool1", OpNeqPtr, c.config.Types.Bool, 0, nil, "ptr1", "nilptr"),
+ If("bool1", "then", "exit")),
+ Bloc("then",
+ Goto("exit")),
+ Bloc("exit",
+ Exit("mem")))
+
+ CheckFunc(fun.f)
+ fuseLate(fun.f)
+
+ for _, b := range fun.f.Blocks {
+ if b == fun.blocks["then"] && b.Kind != BlockInvalid {
+ t.Errorf("then was not eliminated, but should have")
+ }
+ }
+}
+
+func TestFuseEliminatesBothBranches(t *testing.T) {
+ c := testConfig(t)
+ ptrType := c.config.Types.BytePtr
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("sb", OpSB, c.config.Types.Uintptr, 0, nil),
+ Goto("checkPtr")),
+ Bloc("checkPtr",
+ Valu("ptr1", OpLoad, ptrType, 0, nil, "sb", "mem"),
+ Valu("nilptr", OpConstNil, ptrType, 0, nil),
+ Valu("bool1", OpNeqPtr, c.config.Types.Bool, 0, nil, "ptr1", "nilptr"),
+ If("bool1", "then", "else")),
+ Bloc("then",
+ Goto("exit")),
+ Bloc("else",
+ Goto("exit")),
+ Bloc("exit",
+ Exit("mem")))
+
+ CheckFunc(fun.f)
+ fuseLate(fun.f)
+
+ for _, b := range fun.f.Blocks {
+ if b == fun.blocks["then"] && b.Kind != BlockInvalid {
+ t.Errorf("then was not eliminated, but should have")
+ }
+ if b == fun.blocks["else"] && b.Kind != BlockInvalid {
+ t.Errorf("else was not eliminated, but should have")
+ }
+ }
+}
+
+func TestFuseHandlesPhis(t *testing.T) {
+ c := testConfig(t)
+ ptrType := c.config.Types.BytePtr
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("sb", OpSB, c.config.Types.Uintptr, 0, nil),
+ Goto("checkPtr")),
+ Bloc("checkPtr",
+ Valu("ptr1", OpLoad, ptrType, 0, nil, "sb", "mem"),
+ Valu("nilptr", OpConstNil, ptrType, 0, nil),
+ Valu("bool1", OpNeqPtr, c.config.Types.Bool, 0, nil, "ptr1", "nilptr"),
+ If("bool1", "then", "else")),
+ Bloc("then",
+ Goto("exit")),
+ Bloc("else",
+ Goto("exit")),
+ Bloc("exit",
+ Valu("phi", OpPhi, ptrType, 0, nil, "ptr1", "ptr1"),
+ Exit("mem")))
+
+ CheckFunc(fun.f)
+ fuseLate(fun.f)
+
+ for _, b := range fun.f.Blocks {
+ if b == fun.blocks["then"] && b.Kind != BlockInvalid {
+ t.Errorf("then was not eliminated, but should have")
+ }
+ if b == fun.blocks["else"] && b.Kind != BlockInvalid {
+ t.Errorf("else was not eliminated, but should have")
+ }
+ }
+}
+
+func TestFuseEliminatesEmptyBlocks(t *testing.T) {
+ c := testConfig(t)
+ // Case 1, plain type empty blocks z0 ~ z3 will be eliminated.
+ // entry
+ // |
+ // z0
+ // |
+ // z1
+ // |
+ // z2
+ // |
+ // z3
+ // |
+ // exit
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("sb", OpSB, c.config.Types.Uintptr, 0, nil),
+ Goto("z0")),
+ Bloc("z1",
+ Goto("z2")),
+ Bloc("z3",
+ Goto("exit")),
+ Bloc("z2",
+ Goto("z3")),
+ Bloc("z0",
+ Goto("z1")),
+ Bloc("exit",
+ Exit("mem"),
+ ))
+
+ CheckFunc(fun.f)
+ fuseLate(fun.f)
+
+ for k, b := range fun.blocks {
+ if k[:1] == "z" && b.Kind != BlockInvalid {
+ t.Errorf("case1 %s was not eliminated, but should have", k)
+ }
+ }
+
+ // Case 2, empty blocks with If branch, z0 and z1 will be eliminated.
+ // entry
+ // / \
+ // z0 z1
+ // \ /
+ // exit
+ fun = c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("c", OpArg, c.config.Types.Bool, 0, nil),
+ If("c", "z0", "z1")),
+ Bloc("z0",
+ Goto("exit")),
+ Bloc("z1",
+ Goto("exit")),
+ Bloc("exit",
+ Exit("mem"),
+ ))
+
+ CheckFunc(fun.f)
+ fuseLate(fun.f)
+
+ for k, b := range fun.blocks {
+ if k[:1] == "z" && b.Kind != BlockInvalid {
+ t.Errorf("case2 %s was not eliminated, but should have", k)
+ }
+ }
+
+ // Case 3, empty blocks with multiple predecessors, z0 and z1 will be eliminated.
+ // entry
+ // | \
+ // | b0
+ // | / \
+ // z0 z1
+ // \ /
+ // exit
+ fun = c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("c1", OpArg, c.config.Types.Bool, 0, nil),
+ If("c1", "b0", "z0")),
+ Bloc("b0",
+ Valu("c2", OpArg, c.config.Types.Bool, 0, nil),
+ If("c2", "z1", "z0")),
+ Bloc("z0",
+ Goto("exit")),
+ Bloc("z1",
+ Goto("exit")),
+ Bloc("exit",
+ Exit("mem"),
+ ))
+
+ CheckFunc(fun.f)
+ fuseLate(fun.f)
+
+ for k, b := range fun.blocks {
+ if k[:1] == "z" && b.Kind != BlockInvalid {
+ t.Errorf("case3 %s was not eliminated, but should have", k)
+ }
+ }
+}
+
+func TestFuseSideEffects(t *testing.T) {
+ c := testConfig(t)
+ // Case1, test that we don't fuse branches that have side effects but
+ // have no use (e.g. followed by infinite loop).
+ // See issue #36005.
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("b", OpArg, c.config.Types.Bool, 0, nil),
+ If("b", "then", "else")),
+ Bloc("then",
+ Valu("call1", OpStaticCall, types.TypeMem, 0, AuxCallLSym("_"), "mem"),
+ Goto("empty")),
+ Bloc("else",
+ Valu("call2", OpStaticCall, types.TypeMem, 0, AuxCallLSym("_"), "mem"),
+ Goto("empty")),
+ Bloc("empty",
+ Goto("loop")),
+ Bloc("loop",
+ Goto("loop")))
+
+ CheckFunc(fun.f)
+ fuseLate(fun.f)
+
+ for _, b := range fun.f.Blocks {
+ if b == fun.blocks["then"] && b.Kind == BlockInvalid {
+ t.Errorf("then is eliminated, but should not")
+ }
+ if b == fun.blocks["else"] && b.Kind == BlockInvalid {
+ t.Errorf("else is eliminated, but should not")
+ }
+ }
+
+ // Case2, z0 contains a value that has side effect, z0 shouldn't be eliminated.
+ // entry
+ // | \
+ // | z0
+ // | /
+ // exit
+ fun = c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("c1", OpArg, c.config.Types.Bool, 0, nil),
+ Valu("p", OpArg, c.config.Types.IntPtr, 0, nil),
+ If("c1", "z0", "exit")),
+ Bloc("z0",
+ Valu("nilcheck", OpNilCheck, types.TypeVoid, 0, nil, "p", "mem"),
+ Goto("exit")),
+ Bloc("exit",
+ Exit("mem"),
+ ))
+ CheckFunc(fun.f)
+ fuseLate(fun.f)
+ z0, ok := fun.blocks["z0"]
+ if !ok || z0.Kind == BlockInvalid {
+ t.Errorf("case2 z0 is eliminated, but should not")
+ }
+}
+
+func BenchmarkFuse(b *testing.B) {
+ for _, n := range [...]int{1, 10, 100, 1000, 10000} {
+ b.Run(strconv.Itoa(n), func(b *testing.B) {
+ c := testConfig(b)
+
+ blocks := make([]bloc, 0, 2*n+3)
+ blocks = append(blocks,
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("cond", OpArg, c.config.Types.Bool, 0, nil),
+ Valu("x", OpArg, c.config.Types.Int64, 0, nil),
+ Goto("exit")))
+
+ phiArgs := make([]string, 0, 2*n)
+ for i := 0; i < n; i++ {
+ cname := fmt.Sprintf("c%d", i)
+ blocks = append(blocks,
+ Bloc(fmt.Sprintf("b%d", i), If("cond", cname, "merge")),
+ Bloc(cname, Goto("merge")))
+ phiArgs = append(phiArgs, "x", "x")
+ }
+ blocks = append(blocks,
+ Bloc("merge",
+ Valu("phi", OpPhi, types.TypeMem, 0, nil, phiArgs...),
+ Goto("exit")),
+ Bloc("exit",
+ Exit("mem")))
+
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ fun := c.Fun("entry", blocks...)
+ fuseLate(fun.f)
+ }
+ })
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/gen/386.rules b/src/cmd/compile/internal/ssa/gen/386.rules
new file mode 100644
index 0000000..7bdebed
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/gen/386.rules
@@ -0,0 +1,1112 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Lowering arithmetic
+(Add(Ptr|32|16|8) ...) => (ADDL ...)
+(Add(32|64)F ...) => (ADDS(S|D) ...)
+(Add32carry ...) => (ADDLcarry ...)
+(Add32withcarry ...) => (ADCL ...)
+
+(Sub(Ptr|32|16|8) ...) => (SUBL ...)
+(Sub(32|64)F ...) => (SUBS(S|D) ...)
+(Sub32carry ...) => (SUBLcarry ...)
+(Sub32withcarry ...) => (SBBL ...)
+
+(Mul(32|16|8) ...) => (MULL ...)
+(Mul(32|64)F ...) => (MULS(S|D) ...)
+(Mul32uhilo ...) => (MULLQU ...)
+
+(Select0 (Mul32uover x y)) => (Select0 <typ.UInt32> (MULLU x y))
+(Select1 (Mul32uover x y)) => (SETO (Select1 <types.TypeFlags> (MULLU x y)))
+
+(Avg32u ...) => (AVGLU ...)
+
+(Div(32|64)F ...) => (DIVS(S|D) ...)
+(Div(32|32u|16|16u) ...) => (DIV(L|LU|W|WU) ...)
+(Div8 x y) => (DIVW (SignExt8to16 x) (SignExt8to16 y))
+(Div8u x y) => (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))
+
+(Hmul(32|32u) ...) => (HMUL(L|LU) ...)
+
+(Mod(32|32u|16|16u) ...) => (MOD(L|LU|W|WU) ...)
+(Mod8 x y) => (MODW (SignExt8to16 x) (SignExt8to16 y))
+(Mod8u x y) => (MODWU (ZeroExt8to16 x) (ZeroExt8to16 y))
+
+(And(32|16|8) ...) => (ANDL ...)
+(Or(32|16|8) ...) => (ORL ...)
+(Xor(32|16|8) ...) => (XORL ...)
+
+(Neg(32|16|8) ...) => (NEGL ...)
+(Neg32F x) => (PXOR x (MOVSSconst <typ.Float32> [float32(math.Copysign(0, -1))]))
+(Neg64F x) => (PXOR x (MOVSDconst <typ.Float64> [math.Copysign(0, -1)]))
+
+(Com(32|16|8) ...) => (NOTL ...)
+
+// Lowering boolean ops
+(AndB ...) => (ANDL ...)
+(OrB ...) => (ORL ...)
+(Not x) => (XORLconst [1] x)
+
+// Lowering pointer arithmetic
+(OffPtr [off] ptr) => (ADDLconst [int32(off)] ptr)
+
+(Bswap32 ...) => (BSWAPL ...)
+
+(Sqrt ...) => (SQRTSD ...)
+(Sqrt32 ...) => (SQRTSS ...)
+
+(Ctz16 x) => (BSFL (ORLconst <typ.UInt32> [0x10000] x))
+(Ctz16NonZero ...) => (BSFL ...)
+
+// Lowering extension
+(SignExt8to16 ...) => (MOVBLSX ...)
+(SignExt8to32 ...) => (MOVBLSX ...)
+(SignExt16to32 ...) => (MOVWLSX ...)
+
+(ZeroExt8to16 ...) => (MOVBLZX ...)
+(ZeroExt8to32 ...) => (MOVBLZX ...)
+(ZeroExt16to32 ...) => (MOVWLZX ...)
+
+(Signmask x) => (SARLconst x [31])
+(Zeromask <t> x) => (XORLconst [-1] (SBBLcarrymask <t> (CMPLconst x [1])))
+(Slicemask <t> x) => (SARLconst (NEGL <t> x) [31])
+
+// Lowering truncation
+// Because we ignore high parts of registers, truncates are just copies.
+(Trunc16to8 ...) => (Copy ...)
+(Trunc32to8 ...) => (Copy ...)
+(Trunc32to16 ...) => (Copy ...)
+
+// Lowering float-int conversions
+(Cvt32to32F ...) => (CVTSL2SS ...)
+(Cvt32to64F ...) => (CVTSL2SD ...)
+
+(Cvt32Fto32 ...) => (CVTTSS2SL ...)
+(Cvt64Fto32 ...) => (CVTTSD2SL ...)
+
+(Cvt32Fto64F ...) => (CVTSS2SD ...)
+(Cvt64Fto32F ...) => (CVTSD2SS ...)
+
+(Round32F ...) => (Copy ...)
+(Round64F ...) => (Copy ...)
+
+(CvtBoolToUint8 ...) => (Copy ...)
+
+// Lowering shifts
+// Unsigned shifts need to return 0 if shift amount is >= width of shifted value.
+// result = (arg << shift) & (shift >= argbits ? 0 : 0xffffffffffffffff)
+(Lsh32x(32|16|8) <t> x y) && !shiftIsBounded(v) => (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMP(L|W|B)const y [32])))
+(Lsh16x(32|16|8) <t> x y) && !shiftIsBounded(v) => (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMP(L|W|B)const y [32])))
+(Lsh8x(32|16|8) <t> x y) && !shiftIsBounded(v) => (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMP(L|W|B)const y [32])))
+
+(Lsh32x(32|16|8) <t> x y) && shiftIsBounded(v) => (SHLL <t> x y)
+(Lsh16x(32|16|8) <t> x y) && shiftIsBounded(v) => (SHLL <t> x y)
+(Lsh8x(32|16|8) <t> x y) && shiftIsBounded(v) => (SHLL <t> x y)
+
+(Rsh32Ux(32|16|8) <t> x y) && !shiftIsBounded(v) => (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMP(L|W|B)const y [32])))
+(Rsh16Ux(32|16|8) <t> x y) && !shiftIsBounded(v) => (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMP(L|W|B)const y [16])))
+(Rsh8Ux(32|16|8) <t> x y) && !shiftIsBounded(v) => (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMP(L|W|B)const y [8])))
+
+(Rsh32Ux(32|16|8) <t> x y) && shiftIsBounded(v) => (SHRL <t> x y)
+(Rsh16Ux(32|16|8) <t> x y) && shiftIsBounded(v) => (SHRW <t> x y)
+(Rsh8Ux(32|16|8) <t> x y) && shiftIsBounded(v) => (SHRB <t> x y)
+
+// Signed right shift needs to return 0/-1 if shift amount is >= width of shifted value.
+// We implement this by setting the shift value to -1 (all ones) if the shift value is >= width.
+
+(Rsh32x(32|16|8) <t> x y) && !shiftIsBounded(v) => (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMP(L|W|B)const y [32])))))
+(Rsh16x(32|16|8) <t> x y) && !shiftIsBounded(v) => (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMP(L|W|B)const y [16])))))
+(Rsh8x(32|16|8) <t> x y) && !shiftIsBounded(v) => (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMP(L|W|B)const y [8])))))
+
+(Rsh32x(32|16|8) <t> x y) && shiftIsBounded(v) => (SARL x y)
+(Rsh16x(32|16|8) <t> x y) && shiftIsBounded(v) => (SARW x y)
+(Rsh8x(32|16|8) <t> x y) && shiftIsBounded(v) => (SARB x y)
+
+// constant shifts
+// generic opt rewrites all constant shifts to shift by Const64
+(Lsh32x64 x (Const64 [c])) && uint64(c) < 32 => (SHLLconst x [int32(c)])
+(Rsh32x64 x (Const64 [c])) && uint64(c) < 32 => (SARLconst x [int32(c)])
+(Rsh32Ux64 x (Const64 [c])) && uint64(c) < 32 => (SHRLconst x [int32(c)])
+(Lsh16x64 x (Const64 [c])) && uint64(c) < 16 => (SHLLconst x [int32(c)])
+(Rsh16x64 x (Const64 [c])) && uint64(c) < 16 => (SARWconst x [int16(c)])
+(Rsh16Ux64 x (Const64 [c])) && uint64(c) < 16 => (SHRWconst x [int16(c)])
+(Lsh8x64 x (Const64 [c])) && uint64(c) < 8 => (SHLLconst x [int32(c)])
+(Rsh8x64 x (Const64 [c])) && uint64(c) < 8 => (SARBconst x [int8(c)])
+(Rsh8Ux64 x (Const64 [c])) && uint64(c) < 8 => (SHRBconst x [int8(c)])
+
+// large constant shifts
+(Lsh32x64 _ (Const64 [c])) && uint64(c) >= 32 => (Const32 [0])
+(Rsh32Ux64 _ (Const64 [c])) && uint64(c) >= 32 => (Const32 [0])
+(Lsh16x64 _ (Const64 [c])) && uint64(c) >= 16 => (Const16 [0])
+(Rsh16Ux64 _ (Const64 [c])) && uint64(c) >= 16 => (Const16 [0])
+(Lsh8x64 _ (Const64 [c])) && uint64(c) >= 8 => (Const8 [0])
+(Rsh8Ux64 _ (Const64 [c])) && uint64(c) >= 8 => (Const8 [0])
+
+// large constant signed right shift, we leave the sign bit
+(Rsh32x64 x (Const64 [c])) && uint64(c) >= 32 => (SARLconst x [31])
+(Rsh16x64 x (Const64 [c])) && uint64(c) >= 16 => (SARWconst x [15])
+(Rsh8x64 x (Const64 [c])) && uint64(c) >= 8 => (SARBconst x [7])
+
+// constant rotates
+(RotateLeft32 x (MOVLconst [c])) => (ROLLconst [c&31] x)
+(RotateLeft16 x (MOVLconst [c])) => (ROLWconst [int16(c&15)] x)
+(RotateLeft8 x (MOVLconst [c])) => (ROLBconst [int8(c&7)] x)
+
+// Lowering comparisons
+(Less32 x y) => (SETL (CMPL x y))
+(Less16 x y) => (SETL (CMPW x y))
+(Less8 x y) => (SETL (CMPB x y))
+(Less32U x y) => (SETB (CMPL x y))
+(Less16U x y) => (SETB (CMPW x y))
+(Less8U x y) => (SETB (CMPB x y))
+// Use SETGF with reversed operands to dodge NaN case
+(Less64F x y) => (SETGF (UCOMISD y x))
+(Less32F x y) => (SETGF (UCOMISS y x))
+
+(Leq32 x y) => (SETLE (CMPL x y))
+(Leq16 x y) => (SETLE (CMPW x y))
+(Leq8 x y) => (SETLE (CMPB x y))
+(Leq32U x y) => (SETBE (CMPL x y))
+(Leq16U x y) => (SETBE (CMPW x y))
+(Leq8U x y) => (SETBE (CMPB x y))
+// Use SETGEF with reversed operands to dodge NaN case
+(Leq64F x y) => (SETGEF (UCOMISD y x))
+(Leq32F x y) => (SETGEF (UCOMISS y x))
+
+(Eq32 x y) => (SETEQ (CMPL x y))
+(Eq16 x y) => (SETEQ (CMPW x y))
+(Eq8 x y) => (SETEQ (CMPB x y))
+(EqB x y) => (SETEQ (CMPB x y))
+(EqPtr x y) => (SETEQ (CMPL x y))
+(Eq64F x y) => (SETEQF (UCOMISD x y))
+(Eq32F x y) => (SETEQF (UCOMISS x y))
+
+(Neq32 x y) => (SETNE (CMPL x y))
+(Neq16 x y) => (SETNE (CMPW x y))
+(Neq8 x y) => (SETNE (CMPB x y))
+(NeqB x y) => (SETNE (CMPB x y))
+(NeqPtr x y) => (SETNE (CMPL x y))
+(Neq64F x y) => (SETNEF (UCOMISD x y))
+(Neq32F x y) => (SETNEF (UCOMISS x y))
+
+// Lowering loads
+(Load <t> ptr mem) && (is32BitInt(t) || isPtr(t)) => (MOVLload ptr mem)
+(Load <t> ptr mem) && is16BitInt(t) => (MOVWload ptr mem)
+(Load <t> ptr mem) && (t.IsBoolean() || is8BitInt(t)) => (MOVBload ptr mem)
+(Load <t> ptr mem) && is32BitFloat(t) => (MOVSSload ptr mem)
+(Load <t> ptr mem) && is64BitFloat(t) => (MOVSDload ptr mem)
+
+// Lowering stores
+// These more-specific FP versions of Store pattern should come first.
+(Store {t} ptr val mem) && t.Size() == 8 && is64BitFloat(val.Type) => (MOVSDstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 4 && is32BitFloat(val.Type) => (MOVSSstore ptr val mem)
+
+(Store {t} ptr val mem) && t.Size() == 4 => (MOVLstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 2 => (MOVWstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 1 => (MOVBstore ptr val mem)
+
+// Lowering moves
+(Move [0] _ _ mem) => mem
+(Move [1] dst src mem) => (MOVBstore dst (MOVBload src mem) mem)
+(Move [2] dst src mem) => (MOVWstore dst (MOVWload src mem) mem)
+(Move [4] dst src mem) => (MOVLstore dst (MOVLload src mem) mem)
+(Move [3] dst src mem) =>
+ (MOVBstore [2] dst (MOVBload [2] src mem)
+ (MOVWstore dst (MOVWload src mem) mem))
+(Move [5] dst src mem) =>
+ (MOVBstore [4] dst (MOVBload [4] src mem)
+ (MOVLstore dst (MOVLload src mem) mem))
+(Move [6] dst src mem) =>
+ (MOVWstore [4] dst (MOVWload [4] src mem)
+ (MOVLstore dst (MOVLload src mem) mem))
+(Move [7] dst src mem) =>
+ (MOVLstore [3] dst (MOVLload [3] src mem)
+ (MOVLstore dst (MOVLload src mem) mem))
+(Move [8] dst src mem) =>
+ (MOVLstore [4] dst (MOVLload [4] src mem)
+ (MOVLstore dst (MOVLload src mem) mem))
+
+// Adjust moves to be a multiple of 4 bytes.
+(Move [s] dst src mem)
+ && s > 8 && s%4 != 0 =>
+ (Move [s-s%4]
+ (ADDLconst <dst.Type> dst [int32(s%4)])
+ (ADDLconst <src.Type> src [int32(s%4)])
+ (MOVLstore dst (MOVLload src mem) mem))
+
+// Medium copying uses a duff device.
+(Move [s] dst src mem)
+ && s > 8 && s <= 4*128 && s%4 == 0
+ && !config.noDuffDevice && logLargeCopy(v, s) =>
+ (DUFFCOPY [10*(128-s/4)] dst src mem)
+// 10 and 128 are magic constants. 10 is the number of bytes to encode:
+// MOVL (SI), CX
+// ADDL $4, SI
+// MOVL CX, (DI)
+// ADDL $4, DI
+// and 128 is the number of such blocks. See src/runtime/duff_386.s:duffcopy.
+
+// Large copying uses REP MOVSL.
+(Move [s] dst src mem) && (s > 4*128 || config.noDuffDevice) && s%4 == 0 && logLargeCopy(v, s) =>
+ (REPMOVSL dst src (MOVLconst [int32(s/4)]) mem)
+
+// Lowering Zero instructions
+(Zero [0] _ mem) => mem
+(Zero [1] destptr mem) => (MOVBstoreconst [0] destptr mem)
+(Zero [2] destptr mem) => (MOVWstoreconst [0] destptr mem)
+(Zero [4] destptr mem) => (MOVLstoreconst [0] destptr mem)
+
+(Zero [3] destptr mem) =>
+ (MOVBstoreconst [makeValAndOff(0,2)] destptr
+ (MOVWstoreconst [makeValAndOff(0,0)] destptr mem))
+(Zero [5] destptr mem) =>
+ (MOVBstoreconst [makeValAndOff(0,4)] destptr
+ (MOVLstoreconst [makeValAndOff(0,0)] destptr mem))
+(Zero [6] destptr mem) =>
+ (MOVWstoreconst [makeValAndOff(0,4)] destptr
+ (MOVLstoreconst [makeValAndOff(0,0)] destptr mem))
+(Zero [7] destptr mem) =>
+ (MOVLstoreconst [makeValAndOff(0,3)] destptr
+ (MOVLstoreconst [makeValAndOff(0,0)] destptr mem))
+
+// Strip off any fractional word zeroing.
+(Zero [s] destptr mem) && s%4 != 0 && s > 4 =>
+ (Zero [s-s%4] (ADDLconst destptr [int32(s%4)])
+ (MOVLstoreconst [0] destptr mem))
+
+// Zero small numbers of words directly.
+(Zero [8] destptr mem) =>
+ (MOVLstoreconst [makeValAndOff(0,4)] destptr
+ (MOVLstoreconst [makeValAndOff(0,0)] destptr mem))
+(Zero [12] destptr mem) =>
+ (MOVLstoreconst [makeValAndOff(0,8)] destptr
+ (MOVLstoreconst [makeValAndOff(0,4)] destptr
+ (MOVLstoreconst [makeValAndOff(0,0)] destptr mem)))
+(Zero [16] destptr mem) =>
+ (MOVLstoreconst [makeValAndOff(0,12)] destptr
+ (MOVLstoreconst [makeValAndOff(0,8)] destptr
+ (MOVLstoreconst [makeValAndOff(0,4)] destptr
+ (MOVLstoreconst [makeValAndOff(0,0)] destptr mem))))
+
+// Medium zeroing uses a duff device.
+(Zero [s] destptr mem)
+ && s > 16 && s <= 4*128 && s%4 == 0
+ && !config.noDuffDevice =>
+ (DUFFZERO [1*(128-s/4)] destptr (MOVLconst [0]) mem)
+// 1 and 128 are magic constants. 1 is the number of bytes to encode STOSL.
+// 128 is the number of STOSL instructions in duffzero.
+// See src/runtime/duff_386.s:duffzero.
+
+// Large zeroing uses REP STOSQ.
+(Zero [s] destptr mem)
+ && (s > 4*128 || (config.noDuffDevice && s > 16))
+ && s%4 == 0 =>
+ (REPSTOSL destptr (MOVLconst [int32(s/4)]) (MOVLconst [0]) mem)
+
+
+// Lowering constants
+(Const8 [c]) => (MOVLconst [int32(c)])
+(Const16 [c]) => (MOVLconst [int32(c)])
+(Const32 ...) => (MOVLconst ...)
+(Const(32|64)F ...) => (MOVS(S|D)const ...)
+(ConstNil) => (MOVLconst [0])
+(ConstBool [c]) => (MOVLconst [b2i32(c)])
+
+// Lowering calls
+(StaticCall ...) => (CALLstatic ...)
+(ClosureCall ...) => (CALLclosure ...)
+(InterCall ...) => (CALLinter ...)
+(TailCall ...) => (CALLtail ...)
+
+// Miscellaneous
+(IsNonNil p) => (SETNE (TESTL p p))
+(IsInBounds idx len) => (SETB (CMPL idx len))
+(IsSliceInBounds idx len) => (SETBE (CMPL idx len))
+(NilCheck ...) => (LoweredNilCheck ...)
+(GetG ...) => (LoweredGetG ...)
+(GetClosurePtr ...) => (LoweredGetClosurePtr ...)
+(GetCallerPC ...) => (LoweredGetCallerPC ...)
+(GetCallerSP ...) => (LoweredGetCallerSP ...)
+(Addr {sym} base) => (LEAL {sym} base)
+(LocalAddr {sym} base _) => (LEAL {sym} base)
+
+// block rewrites
+(If (SETL cmp) yes no) => (LT cmp yes no)
+(If (SETLE cmp) yes no) => (LE cmp yes no)
+(If (SETG cmp) yes no) => (GT cmp yes no)
+(If (SETGE cmp) yes no) => (GE cmp yes no)
+(If (SETEQ cmp) yes no) => (EQ cmp yes no)
+(If (SETNE cmp) yes no) => (NE cmp yes no)
+(If (SETB cmp) yes no) => (ULT cmp yes no)
+(If (SETBE cmp) yes no) => (ULE cmp yes no)
+(If (SETA cmp) yes no) => (UGT cmp yes no)
+(If (SETAE cmp) yes no) => (UGE cmp yes no)
+(If (SETO cmp) yes no) => (OS cmp yes no)
+
+// Special case for floating point - LF/LEF not generated
+(If (SETGF cmp) yes no) => (UGT cmp yes no)
+(If (SETGEF cmp) yes no) => (UGE cmp yes no)
+(If (SETEQF cmp) yes no) => (EQF cmp yes no)
+(If (SETNEF cmp) yes no) => (NEF cmp yes no)
+
+(If cond yes no) => (NE (TESTB cond cond) yes no)
+
+// Write barrier.
+(WB ...) => (LoweredWB ...)
+
+(PanicBounds [kind] x y mem) && boundsABI(kind) == 0 => (LoweredPanicBoundsA [kind] x y mem)
+(PanicBounds [kind] x y mem) && boundsABI(kind) == 1 => (LoweredPanicBoundsB [kind] x y mem)
+(PanicBounds [kind] x y mem) && boundsABI(kind) == 2 => (LoweredPanicBoundsC [kind] x y mem)
+
+(PanicExtend [kind] hi lo y mem) && boundsABI(kind) == 0 => (LoweredPanicExtendA [kind] hi lo y mem)
+(PanicExtend [kind] hi lo y mem) && boundsABI(kind) == 1 => (LoweredPanicExtendB [kind] hi lo y mem)
+(PanicExtend [kind] hi lo y mem) && boundsABI(kind) == 2 => (LoweredPanicExtendC [kind] hi lo y mem)
+
+// ***************************
+// Above: lowering rules
+// Below: optimizations
+// ***************************
+// TODO: Should the optimizations be a separate pass?
+
+// Fold boolean tests into blocks
+(NE (TESTB (SETL cmp) (SETL cmp)) yes no) => (LT cmp yes no)
+(NE (TESTB (SETLE cmp) (SETLE cmp)) yes no) => (LE cmp yes no)
+(NE (TESTB (SETG cmp) (SETG cmp)) yes no) => (GT cmp yes no)
+(NE (TESTB (SETGE cmp) (SETGE cmp)) yes no) => (GE cmp yes no)
+(NE (TESTB (SETEQ cmp) (SETEQ cmp)) yes no) => (EQ cmp yes no)
+(NE (TESTB (SETNE cmp) (SETNE cmp)) yes no) => (NE cmp yes no)
+(NE (TESTB (SETB cmp) (SETB cmp)) yes no) => (ULT cmp yes no)
+(NE (TESTB (SETBE cmp) (SETBE cmp)) yes no) => (ULE cmp yes no)
+(NE (TESTB (SETA cmp) (SETA cmp)) yes no) => (UGT cmp yes no)
+(NE (TESTB (SETAE cmp) (SETAE cmp)) yes no) => (UGE cmp yes no)
+(NE (TESTB (SETO cmp) (SETO cmp)) yes no) => (OS cmp yes no)
+
+// Special case for floating point - LF/LEF not generated
+(NE (TESTB (SETGF cmp) (SETGF cmp)) yes no) => (UGT cmp yes no)
+(NE (TESTB (SETGEF cmp) (SETGEF cmp)) yes no) => (UGE cmp yes no)
+(NE (TESTB (SETEQF cmp) (SETEQF cmp)) yes no) => (EQF cmp yes no)
+(NE (TESTB (SETNEF cmp) (SETNEF cmp)) yes no) => (NEF cmp yes no)
+
+// fold constants into instructions
+(ADDL x (MOVLconst [c])) => (ADDLconst [c] x)
+(ADDLcarry x (MOVLconst [c])) => (ADDLconstcarry [c] x)
+(ADCL x (MOVLconst [c]) f) => (ADCLconst [c] x f)
+
+(SUBL x (MOVLconst [c])) => (SUBLconst x [c])
+(SUBL (MOVLconst [c]) x) => (NEGL (SUBLconst <v.Type> x [c]))
+(SUBLcarry x (MOVLconst [c])) => (SUBLconstcarry [c] x)
+(SBBL x (MOVLconst [c]) f) => (SBBLconst [c] x f)
+
+(MULL x (MOVLconst [c])) => (MULLconst [c] x)
+(ANDL x (MOVLconst [c])) => (ANDLconst [c] x)
+
+(ANDLconst [c] (ANDLconst [d] x)) => (ANDLconst [c & d] x)
+(XORLconst [c] (XORLconst [d] x)) => (XORLconst [c ^ d] x)
+(MULLconst [c] (MULLconst [d] x)) => (MULLconst [c * d] x)
+
+(ORL x (MOVLconst [c])) => (ORLconst [c] x)
+(XORL x (MOVLconst [c])) => (XORLconst [c] x)
+
+(SHLL x (MOVLconst [c])) => (SHLLconst [c&31] x)
+(SHRL x (MOVLconst [c])) => (SHRLconst [c&31] x)
+(SHRW x (MOVLconst [c])) && c&31 < 16 => (SHRWconst [int16(c&31)] x)
+(SHRW _ (MOVLconst [c])) && c&31 >= 16 => (MOVLconst [0])
+(SHRB x (MOVLconst [c])) && c&31 < 8 => (SHRBconst [int8(c&31)] x)
+(SHRB _ (MOVLconst [c])) && c&31 >= 8 => (MOVLconst [0])
+
+(SARL x (MOVLconst [c])) => (SARLconst [c&31] x)
+(SARW x (MOVLconst [c])) => (SARWconst [int16(min(int64(c&31),15))] x)
+(SARB x (MOVLconst [c])) => (SARBconst [int8(min(int64(c&31),7))] x)
+
+(SARL x (ANDLconst [31] y)) => (SARL x y)
+(SHLL x (ANDLconst [31] y)) => (SHLL x y)
+(SHRL x (ANDLconst [31] y)) => (SHRL x y)
+
+// Rotate instructions
+
+(ADDL (SHLLconst [c] x) (SHRLconst [d] x)) && d == 32-c => (ROLLconst [c] x)
+( ORL (SHLLconst [c] x) (SHRLconst [d] x)) && d == 32-c => (ROLLconst [c] x)
+(XORL (SHLLconst [c] x) (SHRLconst [d] x)) && d == 32-c => (ROLLconst [c] x)
+
+(ADDL <t> (SHLLconst x [c]) (SHRWconst x [d])) && c < 16 && d == int16(16-c) && t.Size() == 2
+ => (ROLWconst x [int16(c)])
+( ORL <t> (SHLLconst x [c]) (SHRWconst x [d])) && c < 16 && d == int16(16-c) && t.Size() == 2
+ => (ROLWconst x [int16(c)])
+(XORL <t> (SHLLconst x [c]) (SHRWconst x [d])) && c < 16 && d == int16(16-c) && t.Size() == 2
+ => (ROLWconst x [int16(c)])
+
+(ADDL <t> (SHLLconst x [c]) (SHRBconst x [d])) && c < 8 && d == int8(8-c) && t.Size() == 1
+ => (ROLBconst x [int8(c)])
+( ORL <t> (SHLLconst x [c]) (SHRBconst x [d])) && c < 8 && d == int8(8-c) && t.Size() == 1
+ => (ROLBconst x [int8(c)])
+(XORL <t> (SHLLconst x [c]) (SHRBconst x [d])) && c < 8 && d == int8(8-c) && t.Size() == 1
+ => (ROLBconst x [int8(c)])
+
+(ROLLconst [c] (ROLLconst [d] x)) => (ROLLconst [(c+d)&31] x)
+(ROLWconst [c] (ROLWconst [d] x)) => (ROLWconst [(c+d)&15] x)
+(ROLBconst [c] (ROLBconst [d] x)) => (ROLBconst [(c+d)& 7] x)
+
+
+// Constant shift simplifications
+
+(SHLLconst x [0]) => x
+(SHRLconst x [0]) => x
+(SARLconst x [0]) => x
+
+(SHRWconst x [0]) => x
+(SARWconst x [0]) => x
+
+(SHRBconst x [0]) => x
+(SARBconst x [0]) => x
+
+(ROLLconst [0] x) => x
+(ROLWconst [0] x) => x
+(ROLBconst [0] x) => x
+
+// Note: the word and byte shifts keep the low 5 bits (not the low 4 or 3 bits)
+// because the x86 instructions are defined to use all 5 bits of the shift even
+// for the small shifts. I don't think we'll ever generate a weird shift (e.g.
+// (SHRW x (MOVLconst [24])), but just in case.
+
+(CMPL x (MOVLconst [c])) => (CMPLconst x [c])
+(CMPL (MOVLconst [c]) x) => (InvertFlags (CMPLconst x [c]))
+(CMPW x (MOVLconst [c])) => (CMPWconst x [int16(c)])
+(CMPW (MOVLconst [c]) x) => (InvertFlags (CMPWconst x [int16(c)]))
+(CMPB x (MOVLconst [c])) => (CMPBconst x [int8(c)])
+(CMPB (MOVLconst [c]) x) => (InvertFlags (CMPBconst x [int8(c)]))
+
+// Canonicalize the order of arguments to comparisons - helps with CSE.
+(CMP(L|W|B) x y) && canonLessThan(x,y) => (InvertFlags (CMP(L|W|B) y x))
+
+// strength reduction
+// Assumes that the following costs from https://gmplib.org/~tege/x86-timing.pdf:
+// 1 - addl, shll, leal, negl, subl
+// 3 - imull
+// This limits the rewrites to two instructions.
+// Note that negl always operates in-place,
+// which can require a register-register move
+// to preserve the original value,
+// so it must be used with care.
+(MULLconst [-9] x) => (NEGL (LEAL8 <v.Type> x x))
+(MULLconst [-5] x) => (NEGL (LEAL4 <v.Type> x x))
+(MULLconst [-3] x) => (NEGL (LEAL2 <v.Type> x x))
+(MULLconst [-1] x) => (NEGL x)
+(MULLconst [0] _) => (MOVLconst [0])
+(MULLconst [1] x) => x
+(MULLconst [3] x) => (LEAL2 x x)
+(MULLconst [5] x) => (LEAL4 x x)
+(MULLconst [7] x) => (LEAL2 x (LEAL2 <v.Type> x x))
+(MULLconst [9] x) => (LEAL8 x x)
+(MULLconst [11] x) => (LEAL2 x (LEAL4 <v.Type> x x))
+(MULLconst [13] x) => (LEAL4 x (LEAL2 <v.Type> x x))
+(MULLconst [19] x) => (LEAL2 x (LEAL8 <v.Type> x x))
+(MULLconst [21] x) => (LEAL4 x (LEAL4 <v.Type> x x))
+(MULLconst [25] x) => (LEAL8 x (LEAL2 <v.Type> x x))
+(MULLconst [27] x) => (LEAL8 (LEAL2 <v.Type> x x) (LEAL2 <v.Type> x x))
+(MULLconst [37] x) => (LEAL4 x (LEAL8 <v.Type> x x))
+(MULLconst [41] x) => (LEAL8 x (LEAL4 <v.Type> x x))
+(MULLconst [45] x) => (LEAL8 (LEAL4 <v.Type> x x) (LEAL4 <v.Type> x x))
+(MULLconst [73] x) => (LEAL8 x (LEAL8 <v.Type> x x))
+(MULLconst [81] x) => (LEAL8 (LEAL8 <v.Type> x x) (LEAL8 <v.Type> x x))
+
+(MULLconst [c] x) && isPowerOfTwo32(c+1) && c >= 15 => (SUBL (SHLLconst <v.Type> [int32(log32(c+1))] x) x)
+(MULLconst [c] x) && isPowerOfTwo32(c-1) && c >= 17 => (LEAL1 (SHLLconst <v.Type> [int32(log32(c-1))] x) x)
+(MULLconst [c] x) && isPowerOfTwo32(c-2) && c >= 34 => (LEAL2 (SHLLconst <v.Type> [int32(log32(c-2))] x) x)
+(MULLconst [c] x) && isPowerOfTwo32(c-4) && c >= 68 => (LEAL4 (SHLLconst <v.Type> [int32(log32(c-4))] x) x)
+(MULLconst [c] x) && isPowerOfTwo32(c-8) && c >= 136 => (LEAL8 (SHLLconst <v.Type> [int32(log32(c-8))] x) x)
+(MULLconst [c] x) && c%3 == 0 && isPowerOfTwo32(c/3) => (SHLLconst [int32(log32(c/3))] (LEAL2 <v.Type> x x))
+(MULLconst [c] x) && c%5 == 0 && isPowerOfTwo32(c/5) => (SHLLconst [int32(log32(c/5))] (LEAL4 <v.Type> x x))
+(MULLconst [c] x) && c%9 == 0 && isPowerOfTwo32(c/9) => (SHLLconst [int32(log32(c/9))] (LEAL8 <v.Type> x x))
+
+// combine add/shift into LEAL
+(ADDL x (SHLLconst [3] y)) => (LEAL8 x y)
+(ADDL x (SHLLconst [2] y)) => (LEAL4 x y)
+(ADDL x (SHLLconst [1] y)) => (LEAL2 x y)
+(ADDL x (ADDL y y)) => (LEAL2 x y)
+(ADDL x (ADDL x y)) => (LEAL2 y x)
+
+// combine ADDL/ADDLconst into LEAL1
+(ADDLconst [c] (ADDL x y)) => (LEAL1 [c] x y)
+(ADDL (ADDLconst [c] x) y) => (LEAL1 [c] x y)
+
+// fold ADDL into LEAL
+(ADDLconst [c] (LEAL [d] {s} x)) && is32Bit(int64(c)+int64(d)) => (LEAL [c+d] {s} x)
+(LEAL [c] {s} (ADDLconst [d] x)) && is32Bit(int64(c)+int64(d)) => (LEAL [c+d] {s} x)
+(ADDLconst [c] x:(SP)) => (LEAL [c] x) // so it is rematerializeable
+(LEAL [c] {s} (ADDL x y)) && x.Op != OpSB && y.Op != OpSB => (LEAL1 [c] {s} x y)
+(ADDL x (LEAL [c] {s} y)) && x.Op != OpSB && y.Op != OpSB => (LEAL1 [c] {s} x y)
+
+// fold ADDLconst into LEALx
+(ADDLconst [c] (LEAL1 [d] {s} x y)) && is32Bit(int64(c)+int64(d)) => (LEAL1 [c+d] {s} x y)
+(ADDLconst [c] (LEAL2 [d] {s} x y)) && is32Bit(int64(c)+int64(d)) => (LEAL2 [c+d] {s} x y)
+(ADDLconst [c] (LEAL4 [d] {s} x y)) && is32Bit(int64(c)+int64(d)) => (LEAL4 [c+d] {s} x y)
+(ADDLconst [c] (LEAL8 [d] {s} x y)) && is32Bit(int64(c)+int64(d)) => (LEAL8 [c+d] {s} x y)
+(LEAL1 [c] {s} (ADDLconst [d] x) y) && is32Bit(int64(c)+int64(d)) && x.Op != OpSB => (LEAL1 [c+d] {s} x y)
+(LEAL2 [c] {s} (ADDLconst [d] x) y) && is32Bit(int64(c)+int64(d)) && x.Op != OpSB => (LEAL2 [c+d] {s} x y)
+(LEAL2 [c] {s} x (ADDLconst [d] y)) && is32Bit(int64(c)+2*int64(d)) && y.Op != OpSB => (LEAL2 [c+2*d] {s} x y)
+(LEAL4 [c] {s} (ADDLconst [d] x) y) && is32Bit(int64(c)+int64(d)) && x.Op != OpSB => (LEAL4 [c+d] {s} x y)
+(LEAL4 [c] {s} x (ADDLconst [d] y)) && is32Bit(int64(c)+4*int64(d)) && y.Op != OpSB => (LEAL4 [c+4*d] {s} x y)
+(LEAL8 [c] {s} (ADDLconst [d] x) y) && is32Bit(int64(c)+int64(d)) && x.Op != OpSB => (LEAL8 [c+d] {s} x y)
+(LEAL8 [c] {s} x (ADDLconst [d] y)) && is32Bit(int64(c)+8*int64(d)) && y.Op != OpSB => (LEAL8 [c+8*d] {s} x y)
+
+// fold shifts into LEALx
+(LEAL1 [c] {s} x (SHLLconst [1] y)) => (LEAL2 [c] {s} x y)
+(LEAL1 [c] {s} x (SHLLconst [2] y)) => (LEAL4 [c] {s} x y)
+(LEAL1 [c] {s} x (SHLLconst [3] y)) => (LEAL8 [c] {s} x y)
+(LEAL2 [c] {s} x (SHLLconst [1] y)) => (LEAL4 [c] {s} x y)
+(LEAL2 [c] {s} x (SHLLconst [2] y)) => (LEAL8 [c] {s} x y)
+(LEAL4 [c] {s} x (SHLLconst [1] y)) => (LEAL8 [c] {s} x y)
+
+// reverse ordering of compare instruction
+(SETL (InvertFlags x)) => (SETG x)
+(SETG (InvertFlags x)) => (SETL x)
+(SETB (InvertFlags x)) => (SETA x)
+(SETA (InvertFlags x)) => (SETB x)
+(SETLE (InvertFlags x)) => (SETGE x)
+(SETGE (InvertFlags x)) => (SETLE x)
+(SETBE (InvertFlags x)) => (SETAE x)
+(SETAE (InvertFlags x)) => (SETBE x)
+(SETEQ (InvertFlags x)) => (SETEQ x)
+(SETNE (InvertFlags x)) => (SETNE x)
+
+// sign extended loads
+// Note: The combined instruction must end up in the same block
+// as the original load. If not, we end up making a value with
+// memory type live in two different blocks, which can lead to
+// multiple memory values alive simultaneously.
+// Make sure we don't combine these ops if the load has another use.
+// This prevents a single load from being split into multiple loads
+// which then might return different values. See test/atomicload.go.
+(MOVBLSX x:(MOVBload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBLSXload <v.Type> [off] {sym} ptr mem)
+(MOVBLZX x:(MOVBload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
+(MOVWLSX x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVWLSXload <v.Type> [off] {sym} ptr mem)
+(MOVWLZX x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVWload <v.Type> [off] {sym} ptr mem)
+
+// replace load from same location as preceding store with zero/sign extension (or copy in case of full width)
+(MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVBLZX x)
+(MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVWLZX x)
+(MOVLload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => x
+(MOVBLSXload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVBLSX x)
+(MOVWLSXload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVWLSX x)
+
+// Fold extensions and ANDs together.
+(MOVBLZX (ANDLconst [c] x)) => (ANDLconst [c & 0xff] x)
+(MOVWLZX (ANDLconst [c] x)) => (ANDLconst [c & 0xffff] x)
+(MOVBLSX (ANDLconst [c] x)) && c & 0x80 == 0 => (ANDLconst [c & 0x7f] x)
+(MOVWLSX (ANDLconst [c] x)) && c & 0x8000 == 0 => (ANDLconst [c & 0x7fff] x)
+
+// Don't extend before storing
+(MOVWstore [off] {sym} ptr (MOVWL(S|Z)X x) mem) => (MOVWstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVBL(S|Z)X x) mem) => (MOVBstore [off] {sym} ptr x mem)
+
+// fold constants into memory operations
+// Note that this is not always a good idea because if not all the uses of
+// the ADDLconst get eliminated, we still have to compute the ADDLconst and we now
+// have potentially two live values (ptr and (ADDLconst [off] ptr)) instead of one.
+// Nevertheless, let's do it!
+(MOV(L|W|B|SS|SD)load [off1] {sym} (ADDLconst [off2] ptr) mem) && is32Bit(int64(off1)+int64(off2)) =>
+ (MOV(L|W|B|SS|SD)load [off1+off2] {sym} ptr mem)
+(MOV(L|W|B|SS|SD)store [off1] {sym} (ADDLconst [off2] ptr) val mem) && is32Bit(int64(off1)+int64(off2)) =>
+ (MOV(L|W|B|SS|SD)store [off1+off2] {sym} ptr val mem)
+
+((ADD|SUB|MUL|AND|OR|XOR)Lload [off1] {sym} val (ADDLconst [off2] base) mem) && is32Bit(int64(off1)+int64(off2)) =>
+ ((ADD|SUB|MUL|AND|OR|XOR)Lload [off1+off2] {sym} val base mem)
+((ADD|SUB|MUL|DIV)SSload [off1] {sym} val (ADDLconst [off2] base) mem) && is32Bit(int64(off1)+int64(off2)) =>
+ ((ADD|SUB|MUL|DIV)SSload [off1+off2] {sym} val base mem)
+((ADD|SUB|MUL|DIV)SDload [off1] {sym} val (ADDLconst [off2] base) mem) && is32Bit(int64(off1)+int64(off2)) =>
+ ((ADD|SUB|MUL|DIV)SDload [off1+off2] {sym} val base mem)
+((ADD|SUB|AND|OR|XOR)Lmodify [off1] {sym} (ADDLconst [off2] base) val mem) && is32Bit(int64(off1)+int64(off2)) =>
+ ((ADD|SUB|AND|OR|XOR)Lmodify [off1+off2] {sym} base val mem)
+((ADD|AND|OR|XOR)Lconstmodify [valoff1] {sym} (ADDLconst [off2] base) mem) && valoff1.canAdd32(off2) =>
+ ((ADD|AND|OR|XOR)Lconstmodify [valoff1.addOffset32(off2)] {sym} base mem)
+
+// Fold constants into stores.
+(MOVLstore [off] {sym} ptr (MOVLconst [c]) mem) =>
+ (MOVLstoreconst [makeValAndOff(c,off)] {sym} ptr mem)
+(MOVWstore [off] {sym} ptr (MOVLconst [c]) mem) =>
+ (MOVWstoreconst [makeValAndOff(c,off)] {sym} ptr mem)
+(MOVBstore [off] {sym} ptr (MOVLconst [c]) mem) =>
+ (MOVBstoreconst [makeValAndOff(c,off)] {sym} ptr mem)
+
+// Fold address offsets into constant stores.
+(MOV(L|W|B)storeconst [sc] {s} (ADDLconst [off] ptr) mem) && sc.canAdd32(off) =>
+ (MOV(L|W|B)storeconst [sc.addOffset32(off)] {s} ptr mem)
+
+// We need to fold LEAL into the MOVx ops so that the live variable analysis knows
+// what variables are being read/written by the ops.
+// Note: we turn off this merging for operations on globals when building
+// position-independent code (when Flag_shared is set).
+// PIC needs a spare register to load the PC into. Having the LEAL be
+// a separate instruction gives us that register. Having the LEAL be
+// a separate instruction also allows it to be CSEd (which is good because
+// it compiles to a thunk call).
+(MOV(L|W|B|SS|SD|BLSX|WLSX)load [off1] {sym1} (LEAL [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ && (base.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOV(L|W|B|SS|SD|BLSX|WLSX)load [off1+off2] {mergeSym(sym1,sym2)} base mem)
+
+(MOV(L|W|B|SS|SD)store [off1] {sym1} (LEAL [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ && (base.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOV(L|W|B|SS|SD)store [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+
+(MOV(L|W|B)storeconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && sc.canAdd32(off)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOV(L|W|B)storeconst [sc.addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
+
+((ADD|SUB|MUL|AND|OR|XOR)Lload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
+ && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) =>
+ ((ADD|SUB|MUL|AND|OR|XOR)Lload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+((ADD|SUB|MUL|DIV)SSload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
+ && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) =>
+ ((ADD|SUB|MUL|DIV)SSload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+((ADD|SUB|MUL|DIV)SDload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
+ && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) =>
+ ((ADD|SUB|MUL|DIV)SDload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+((ADD|SUB|AND|OR|XOR)Lmodify [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
+ && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) =>
+ ((ADD|SUB|AND|OR|XOR)Lmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+((ADD|AND|OR|XOR)Lconstmodify [valoff1] {sym1} (LEAL [off2] {sym2} base) mem)
+ && valoff1.canAdd32(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared) =>
+ ((ADD|AND|OR|XOR)Lconstmodify [valoff1.addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
+
+// Merge load/store to op
+((ADD|AND|OR|XOR|SUB|MUL)L x l:(MOVLload [off] {sym} ptr mem)) && canMergeLoadClobber(v, l, x) && clobber(l) => ((ADD|AND|OR|XOR|SUB|MUL)Lload x [off] {sym} ptr mem)
+((ADD|SUB|MUL|DIV)SD x l:(MOVSDload [off] {sym} ptr mem)) && canMergeLoadClobber(v, l, x) && clobber(l) => ((ADD|SUB|MUL|DIV)SDload x [off] {sym} ptr mem)
+((ADD|SUB|MUL|DIV)SS x l:(MOVSSload [off] {sym} ptr mem)) && canMergeLoadClobber(v, l, x) && clobber(l) => ((ADD|SUB|MUL|DIV)SSload x [off] {sym} ptr mem)
+(MOVLstore {sym} [off] ptr y:((ADD|AND|OR|XOR)Lload x [off] {sym} ptr mem) mem) && y.Uses==1 && clobber(y) => ((ADD|AND|OR|XOR)Lmodify [off] {sym} ptr x mem)
+(MOVLstore {sym} [off] ptr y:((ADD|SUB|AND|OR|XOR)L l:(MOVLload [off] {sym} ptr mem) x) mem) && y.Uses==1 && l.Uses==1 && clobber(y, l) =>
+ ((ADD|SUB|AND|OR|XOR)Lmodify [off] {sym} ptr x mem)
+(MOVLstore {sym} [off] ptr y:((ADD|AND|OR|XOR)Lconst [c] l:(MOVLload [off] {sym} ptr mem)) mem)
+ && y.Uses==1 && l.Uses==1 && clobber(y, l) =>
+ ((ADD|AND|OR|XOR)Lconstmodify [makeValAndOff(c,off)] {sym} ptr mem)
+
+// fold LEALs together
+(LEAL [off1] {sym1} (LEAL [off2] {sym2} x)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (LEAL [off1+off2] {mergeSym(sym1,sym2)} x)
+
+// LEAL into LEAL1
+(LEAL1 [off1] {sym1} (LEAL [off2] {sym2} x) y) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB =>
+ (LEAL1 [off1+off2] {mergeSym(sym1,sym2)} x y)
+
+// LEAL1 into LEAL
+(LEAL [off1] {sym1} (LEAL1 [off2] {sym2} x y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (LEAL1 [off1+off2] {mergeSym(sym1,sym2)} x y)
+
+// LEAL into LEAL[248]
+(LEAL2 [off1] {sym1} (LEAL [off2] {sym2} x) y) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB =>
+ (LEAL2 [off1+off2] {mergeSym(sym1,sym2)} x y)
+(LEAL4 [off1] {sym1} (LEAL [off2] {sym2} x) y) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB =>
+ (LEAL4 [off1+off2] {mergeSym(sym1,sym2)} x y)
+(LEAL8 [off1] {sym1} (LEAL [off2] {sym2} x) y) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB =>
+ (LEAL8 [off1+off2] {mergeSym(sym1,sym2)} x y)
+
+// LEAL[248] into LEAL
+(LEAL [off1] {sym1} (LEAL2 [off2] {sym2} x y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (LEAL2 [off1+off2] {mergeSym(sym1,sym2)} x y)
+(LEAL [off1] {sym1} (LEAL4 [off2] {sym2} x y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (LEAL4 [off1+off2] {mergeSym(sym1,sym2)} x y)
+(LEAL [off1] {sym1} (LEAL8 [off2] {sym2} x y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (LEAL8 [off1+off2] {mergeSym(sym1,sym2)} x y)
+
+// LEAL[1248] into LEAL[1248]. Only some such merges are possible.
+(LEAL1 [off1] {sym1} x (LEAL1 [off2] {sym2} y y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (LEAL2 [off1+off2] {mergeSym(sym1, sym2)} x y)
+(LEAL1 [off1] {sym1} x (LEAL1 [off2] {sym2} x y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (LEAL2 [off1+off2] {mergeSym(sym1, sym2)} y x)
+(LEAL2 [off1] {sym} x (LEAL1 [off2] {nil} y y)) && is32Bit(int64(off1)+2*int64(off2)) =>
+ (LEAL4 [off1+2*off2] {sym} x y)
+(LEAL4 [off1] {sym} x (LEAL1 [off2] {nil} y y)) && is32Bit(int64(off1)+4*int64(off2)) =>
+ (LEAL8 [off1+4*off2] {sym} x y)
+
+// Absorb InvertFlags into branches.
+(LT (InvertFlags cmp) yes no) => (GT cmp yes no)
+(GT (InvertFlags cmp) yes no) => (LT cmp yes no)
+(LE (InvertFlags cmp) yes no) => (GE cmp yes no)
+(GE (InvertFlags cmp) yes no) => (LE cmp yes no)
+(ULT (InvertFlags cmp) yes no) => (UGT cmp yes no)
+(UGT (InvertFlags cmp) yes no) => (ULT cmp yes no)
+(ULE (InvertFlags cmp) yes no) => (UGE cmp yes no)
+(UGE (InvertFlags cmp) yes no) => (ULE cmp yes no)
+(EQ (InvertFlags cmp) yes no) => (EQ cmp yes no)
+(NE (InvertFlags cmp) yes no) => (NE cmp yes no)
+
+// Constant comparisons.
+(CMPLconst (MOVLconst [x]) [y]) && x==y => (FlagEQ)
+(CMPLconst (MOVLconst [x]) [y]) && x<y && uint32(x)<uint32(y) => (FlagLT_ULT)
+(CMPLconst (MOVLconst [x]) [y]) && x<y && uint32(x)>uint32(y) => (FlagLT_UGT)
+(CMPLconst (MOVLconst [x]) [y]) && x>y && uint32(x)<uint32(y) => (FlagGT_ULT)
+(CMPLconst (MOVLconst [x]) [y]) && x>y && uint32(x)>uint32(y) => (FlagGT_UGT)
+
+(CMPWconst (MOVLconst [x]) [y]) && int16(x)==y => (FlagEQ)
+(CMPWconst (MOVLconst [x]) [y]) && int16(x)<y && uint16(x)<uint16(y) => (FlagLT_ULT)
+(CMPWconst (MOVLconst [x]) [y]) && int16(x)<y && uint16(x)>uint16(y) => (FlagLT_UGT)
+(CMPWconst (MOVLconst [x]) [y]) && int16(x)>y && uint16(x)<uint16(y) => (FlagGT_ULT)
+(CMPWconst (MOVLconst [x]) [y]) && int16(x)>y && uint16(x)>uint16(y) => (FlagGT_UGT)
+
+(CMPBconst (MOVLconst [x]) [y]) && int8(x)==y => (FlagEQ)
+(CMPBconst (MOVLconst [x]) [y]) && int8(x)<y && uint8(x)<uint8(y) => (FlagLT_ULT)
+(CMPBconst (MOVLconst [x]) [y]) && int8(x)<y && uint8(x)>uint8(y) => (FlagLT_UGT)
+(CMPBconst (MOVLconst [x]) [y]) && int8(x)>y && uint8(x)<uint8(y) => (FlagGT_ULT)
+(CMPBconst (MOVLconst [x]) [y]) && int8(x)>y && uint8(x)>uint8(y) => (FlagGT_UGT)
+
+// Other known comparisons.
+(CMPLconst (SHRLconst _ [c]) [n]) && 0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n) => (FlagLT_ULT)
+(CMPLconst (ANDLconst _ [m]) [n]) && 0 <= m && m < n => (FlagLT_ULT)
+(CMPWconst (ANDLconst _ [m]) [n]) && 0 <= int16(m) && int16(m) < n => (FlagLT_ULT)
+(CMPBconst (ANDLconst _ [m]) [n]) && 0 <= int8(m) && int8(m) < n => (FlagLT_ULT)
+// TODO: DIVxU also.
+
+// Absorb flag constants into SBB ops.
+(SBBLcarrymask (FlagEQ)) => (MOVLconst [0])
+(SBBLcarrymask (FlagLT_ULT)) => (MOVLconst [-1])
+(SBBLcarrymask (FlagLT_UGT)) => (MOVLconst [0])
+(SBBLcarrymask (FlagGT_ULT)) => (MOVLconst [-1])
+(SBBLcarrymask (FlagGT_UGT)) => (MOVLconst [0])
+
+// Absorb flag constants into branches.
+(EQ (FlagEQ) yes no) => (First yes no)
+(EQ (FlagLT_ULT) yes no) => (First no yes)
+(EQ (FlagLT_UGT) yes no) => (First no yes)
+(EQ (FlagGT_ULT) yes no) => (First no yes)
+(EQ (FlagGT_UGT) yes no) => (First no yes)
+
+(NE (FlagEQ) yes no) => (First no yes)
+(NE (FlagLT_ULT) yes no) => (First yes no)
+(NE (FlagLT_UGT) yes no) => (First yes no)
+(NE (FlagGT_ULT) yes no) => (First yes no)
+(NE (FlagGT_UGT) yes no) => (First yes no)
+
+(LT (FlagEQ) yes no) => (First no yes)
+(LT (FlagLT_ULT) yes no) => (First yes no)
+(LT (FlagLT_UGT) yes no) => (First yes no)
+(LT (FlagGT_ULT) yes no) => (First no yes)
+(LT (FlagGT_UGT) yes no) => (First no yes)
+
+(LE (FlagEQ) yes no) => (First yes no)
+(LE (FlagLT_ULT) yes no) => (First yes no)
+(LE (FlagLT_UGT) yes no) => (First yes no)
+(LE (FlagGT_ULT) yes no) => (First no yes)
+(LE (FlagGT_UGT) yes no) => (First no yes)
+
+(GT (FlagEQ) yes no) => (First no yes)
+(GT (FlagLT_ULT) yes no) => (First no yes)
+(GT (FlagLT_UGT) yes no) => (First no yes)
+(GT (FlagGT_ULT) yes no) => (First yes no)
+(GT (FlagGT_UGT) yes no) => (First yes no)
+
+(GE (FlagEQ) yes no) => (First yes no)
+(GE (FlagLT_ULT) yes no) => (First no yes)
+(GE (FlagLT_UGT) yes no) => (First no yes)
+(GE (FlagGT_ULT) yes no) => (First yes no)
+(GE (FlagGT_UGT) yes no) => (First yes no)
+
+(ULT (FlagEQ) yes no) => (First no yes)
+(ULT (FlagLT_ULT) yes no) => (First yes no)
+(ULT (FlagLT_UGT) yes no) => (First no yes)
+(ULT (FlagGT_ULT) yes no) => (First yes no)
+(ULT (FlagGT_UGT) yes no) => (First no yes)
+
+(ULE (FlagEQ) yes no) => (First yes no)
+(ULE (FlagLT_ULT) yes no) => (First yes no)
+(ULE (FlagLT_UGT) yes no) => (First no yes)
+(ULE (FlagGT_ULT) yes no) => (First yes no)
+(ULE (FlagGT_UGT) yes no) => (First no yes)
+
+(UGT (FlagEQ) yes no) => (First no yes)
+(UGT (FlagLT_ULT) yes no) => (First no yes)
+(UGT (FlagLT_UGT) yes no) => (First yes no)
+(UGT (FlagGT_ULT) yes no) => (First no yes)
+(UGT (FlagGT_UGT) yes no) => (First yes no)
+
+(UGE (FlagEQ) yes no) => (First yes no)
+(UGE (FlagLT_ULT) yes no) => (First no yes)
+(UGE (FlagLT_UGT) yes no) => (First yes no)
+(UGE (FlagGT_ULT) yes no) => (First no yes)
+(UGE (FlagGT_UGT) yes no) => (First yes no)
+
+// Absorb flag constants into SETxx ops.
+(SETEQ (FlagEQ)) => (MOVLconst [1])
+(SETEQ (FlagLT_ULT)) => (MOVLconst [0])
+(SETEQ (FlagLT_UGT)) => (MOVLconst [0])
+(SETEQ (FlagGT_ULT)) => (MOVLconst [0])
+(SETEQ (FlagGT_UGT)) => (MOVLconst [0])
+
+(SETNE (FlagEQ)) => (MOVLconst [0])
+(SETNE (FlagLT_ULT)) => (MOVLconst [1])
+(SETNE (FlagLT_UGT)) => (MOVLconst [1])
+(SETNE (FlagGT_ULT)) => (MOVLconst [1])
+(SETNE (FlagGT_UGT)) => (MOVLconst [1])
+
+(SETL (FlagEQ)) => (MOVLconst [0])
+(SETL (FlagLT_ULT)) => (MOVLconst [1])
+(SETL (FlagLT_UGT)) => (MOVLconst [1])
+(SETL (FlagGT_ULT)) => (MOVLconst [0])
+(SETL (FlagGT_UGT)) => (MOVLconst [0])
+
+(SETLE (FlagEQ)) => (MOVLconst [1])
+(SETLE (FlagLT_ULT)) => (MOVLconst [1])
+(SETLE (FlagLT_UGT)) => (MOVLconst [1])
+(SETLE (FlagGT_ULT)) => (MOVLconst [0])
+(SETLE (FlagGT_UGT)) => (MOVLconst [0])
+
+(SETG (FlagEQ)) => (MOVLconst [0])
+(SETG (FlagLT_ULT)) => (MOVLconst [0])
+(SETG (FlagLT_UGT)) => (MOVLconst [0])
+(SETG (FlagGT_ULT)) => (MOVLconst [1])
+(SETG (FlagGT_UGT)) => (MOVLconst [1])
+
+(SETGE (FlagEQ)) => (MOVLconst [1])
+(SETGE (FlagLT_ULT)) => (MOVLconst [0])
+(SETGE (FlagLT_UGT)) => (MOVLconst [0])
+(SETGE (FlagGT_ULT)) => (MOVLconst [1])
+(SETGE (FlagGT_UGT)) => (MOVLconst [1])
+
+(SETB (FlagEQ)) => (MOVLconst [0])
+(SETB (FlagLT_ULT)) => (MOVLconst [1])
+(SETB (FlagLT_UGT)) => (MOVLconst [0])
+(SETB (FlagGT_ULT)) => (MOVLconst [1])
+(SETB (FlagGT_UGT)) => (MOVLconst [0])
+
+(SETBE (FlagEQ)) => (MOVLconst [1])
+(SETBE (FlagLT_ULT)) => (MOVLconst [1])
+(SETBE (FlagLT_UGT)) => (MOVLconst [0])
+(SETBE (FlagGT_ULT)) => (MOVLconst [1])
+(SETBE (FlagGT_UGT)) => (MOVLconst [0])
+
+(SETA (FlagEQ)) => (MOVLconst [0])
+(SETA (FlagLT_ULT)) => (MOVLconst [0])
+(SETA (FlagLT_UGT)) => (MOVLconst [1])
+(SETA (FlagGT_ULT)) => (MOVLconst [0])
+(SETA (FlagGT_UGT)) => (MOVLconst [1])
+
+(SETAE (FlagEQ)) => (MOVLconst [1])
+(SETAE (FlagLT_ULT)) => (MOVLconst [0])
+(SETAE (FlagLT_UGT)) => (MOVLconst [1])
+(SETAE (FlagGT_ULT)) => (MOVLconst [0])
+(SETAE (FlagGT_UGT)) => (MOVLconst [1])
+
+// Remove redundant *const ops
+(ADDLconst [c] x) && c==0 => x
+(SUBLconst [c] x) && c==0 => x
+(ANDLconst [c] _) && c==0 => (MOVLconst [0])
+(ANDLconst [c] x) && c==-1 => x
+(ORLconst [c] x) && c==0 => x
+(ORLconst [c] _) && c==-1 => (MOVLconst [-1])
+(XORLconst [c] x) && c==0 => x
+// TODO: since we got rid of the W/B versions, we might miss
+// things like (ANDLconst [0x100] x) which were formerly
+// (ANDBconst [0] x). Probably doesn't happen very often.
+// If we cared, we might do:
+// (ANDLconst <t> [c] x) && t.Size()==1 && int8(x)==0 => (MOVLconst [0])
+
+// Convert constant subtracts to constant adds
+(SUBLconst [c] x) => (ADDLconst [-c] x)
+
+// generic constant folding
+// TODO: more of this
+(ADDLconst [c] (MOVLconst [d])) => (MOVLconst [c+d])
+(ADDLconst [c] (ADDLconst [d] x)) => (ADDLconst [c+d] x)
+(SARLconst [c] (MOVLconst [d])) => (MOVLconst [d>>uint64(c)])
+(SARWconst [c] (MOVLconst [d])) => (MOVLconst [d>>uint64(c)])
+(SARBconst [c] (MOVLconst [d])) => (MOVLconst [d>>uint64(c)])
+(NEGL (MOVLconst [c])) => (MOVLconst [-c])
+(MULLconst [c] (MOVLconst [d])) => (MOVLconst [c*d])
+(ANDLconst [c] (MOVLconst [d])) => (MOVLconst [c&d])
+(ORLconst [c] (MOVLconst [d])) => (MOVLconst [c|d])
+(XORLconst [c] (MOVLconst [d])) => (MOVLconst [c^d])
+(NOTL (MOVLconst [c])) => (MOVLconst [^c])
+
+// generic simplifications
+// TODO: more of this
+(ADDL x (NEGL y)) => (SUBL x y)
+(SUBL x x) => (MOVLconst [0])
+(ANDL x x) => x
+(ORL x x) => x
+(XORL x x) => (MOVLconst [0])
+
+// checking AND against 0.
+(CMP(L|W|B)const l:(ANDL x y) [0]) && l.Uses==1 => (TEST(L|W|B) x y)
+(CMPLconst l:(ANDLconst [c] x) [0]) && l.Uses==1 => (TESTLconst [c] x)
+(CMPWconst l:(ANDLconst [c] x) [0]) && l.Uses==1 => (TESTWconst [int16(c)] x)
+(CMPBconst l:(ANDLconst [c] x) [0]) && l.Uses==1 => (TESTBconst [int8(c)] x)
+
+// TEST %reg,%reg is shorter than CMP
+(CMP(L|W|B)const x [0]) => (TEST(L|W|B) x x)
+
+// Convert LEAL1 back to ADDL if we can
+(LEAL1 [0] {nil} x y) => (ADDL x y)
+
+// Combining byte loads into larger (unaligned) loads.
+// There are many ways these combinations could occur. This is
+// designed to match the way encoding/binary.LittleEndian does it.
+(ORL x0:(MOVBload [i0] {s} p mem)
+ s0:(SHLLconst [8] x1:(MOVBload [i1] {s} p mem)))
+ && i1 == i0+1
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && s0.Uses == 1
+ && mergePoint(b,x0,x1) != nil
+ && clobber(x0, x1, s0)
+ => @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem)
+
+(ORL x0:(MOVBload [i] {s} p0 mem)
+ s0:(SHLLconst [8] x1:(MOVBload [i] {s} p1 mem)))
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && s0.Uses == 1
+ && sequentialAddresses(p0, p1, 1)
+ && mergePoint(b,x0,x1) != nil
+ && clobber(x0, x1, s0)
+ => @mergePoint(b,x0,x1) (MOVWload [i] {s} p0 mem)
+
+(ORL o0:(ORL
+ x0:(MOVWload [i0] {s} p mem)
+ s0:(SHLLconst [16] x1:(MOVBload [i2] {s} p mem)))
+ s1:(SHLLconst [24] x2:(MOVBload [i3] {s} p mem)))
+ && i2 == i0+2
+ && i3 == i0+3
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && x2.Uses == 1
+ && s0.Uses == 1
+ && s1.Uses == 1
+ && o0.Uses == 1
+ && mergePoint(b,x0,x1,x2) != nil
+ && clobber(x0, x1, x2, s0, s1, o0)
+ => @mergePoint(b,x0,x1,x2) (MOVLload [i0] {s} p mem)
+
+(ORL o0:(ORL
+ x0:(MOVWload [i] {s} p0 mem)
+ s0:(SHLLconst [16] x1:(MOVBload [i] {s} p1 mem)))
+ s1:(SHLLconst [24] x2:(MOVBload [i] {s} p2 mem)))
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && x2.Uses == 1
+ && s0.Uses == 1
+ && s1.Uses == 1
+ && o0.Uses == 1
+ && sequentialAddresses(p0, p1, 2)
+ && sequentialAddresses(p1, p2, 1)
+ && mergePoint(b,x0,x1,x2) != nil
+ && clobber(x0, x1, x2, s0, s1, o0)
+ => @mergePoint(b,x0,x1,x2) (MOVLload [i] {s} p0 mem)
+
+// Combine constant stores into larger (unaligned) stores.
+(MOVBstoreconst [c] {s} p x:(MOVBstoreconst [a] {s} p mem))
+ && x.Uses == 1
+ && a.Off() + 1 == c.Off()
+ && clobber(x)
+ => (MOVWstoreconst [makeValAndOff(a.Val()&0xff | c.Val()<<8, a.Off())] {s} p mem)
+(MOVBstoreconst [a] {s} p x:(MOVBstoreconst [c] {s} p mem))
+ && x.Uses == 1
+ && a.Off() + 1 == c.Off()
+ && clobber(x)
+ => (MOVWstoreconst [makeValAndOff(a.Val()&0xff | c.Val()<<8, a.Off())] {s} p mem)
+
+(MOVBstoreconst [c] {s} p1 x:(MOVBstoreconst [a] {s} p0 mem))
+ && x.Uses == 1
+ && a.Off() == c.Off()
+ && sequentialAddresses(p0, p1, 1)
+ && clobber(x)
+ => (MOVWstoreconst [makeValAndOff(a.Val()&0xff | c.Val()<<8, a.Off())] {s} p0 mem)
+(MOVBstoreconst [a] {s} p0 x:(MOVBstoreconst [c] {s} p1 mem))
+ && x.Uses == 1
+ && a.Off() == c.Off()
+ && sequentialAddresses(p0, p1, 1)
+ && clobber(x)
+ => (MOVWstoreconst [makeValAndOff(a.Val()&0xff | c.Val()<<8, a.Off())] {s} p0 mem)
+
+(MOVWstoreconst [c] {s} p x:(MOVWstoreconst [a] {s} p mem))
+ && x.Uses == 1
+ && a.Off() + 2 == c.Off()
+ && clobber(x)
+ => (MOVLstoreconst [makeValAndOff(a.Val()&0xffff | c.Val()<<16, a.Off())] {s} p mem)
+(MOVWstoreconst [a] {s} p x:(MOVWstoreconst [c] {s} p mem))
+ && x.Uses == 1
+ && ValAndOff(a).Off() + 2 == ValAndOff(c).Off()
+ && clobber(x)
+ => (MOVLstoreconst [makeValAndOff(a.Val()&0xffff | c.Val()<<16, a.Off())] {s} p mem)
+
+(MOVWstoreconst [c] {s} p1 x:(MOVWstoreconst [a] {s} p0 mem))
+ && x.Uses == 1
+ && a.Off() == c.Off()
+ && sequentialAddresses(p0, p1, 2)
+ && clobber(x)
+ => (MOVLstoreconst [makeValAndOff(a.Val()&0xffff | c.Val()<<16, a.Off())] {s} p0 mem)
+(MOVWstoreconst [a] {s} p0 x:(MOVWstoreconst [c] {s} p1 mem))
+ && x.Uses == 1
+ && a.Off() == c.Off()
+ && sequentialAddresses(p0, p1, 2)
+ && clobber(x)
+ => (MOVLstoreconst [makeValAndOff(a.Val()&0xffff | c.Val()<<16, a.Off())] {s} p0 mem)
+
+// Combine stores into larger (unaligned) stores.
+(MOVBstore [i] {s} p (SHR(W|L)const [8] w) x:(MOVBstore [i-1] {s} p w mem))
+ && x.Uses == 1
+ && clobber(x)
+ => (MOVWstore [i-1] {s} p w mem)
+(MOVBstore [i] {s} p w x:(MOVBstore {s} [i+1] p (SHR(W|L)const [8] w) mem))
+ && x.Uses == 1
+ && clobber(x)
+ => (MOVWstore [i] {s} p w mem)
+(MOVBstore [i] {s} p (SHRLconst [j] w) x:(MOVBstore [i-1] {s} p w0:(SHRLconst [j-8] w) mem))
+ && x.Uses == 1
+ && clobber(x)
+ => (MOVWstore [i-1] {s} p w0 mem)
+
+(MOVBstore [i] {s} p1 (SHR(W|L)const [8] w) x:(MOVBstore [i] {s} p0 w mem))
+ && x.Uses == 1
+ && sequentialAddresses(p0, p1, 1)
+ && clobber(x)
+ => (MOVWstore [i] {s} p0 w mem)
+(MOVBstore [i] {s} p0 w x:(MOVBstore {s} [i] p1 (SHR(W|L)const [8] w) mem))
+ && x.Uses == 1
+ && sequentialAddresses(p0, p1, 1)
+ && clobber(x)
+ => (MOVWstore [i] {s} p0 w mem)
+(MOVBstore [i] {s} p1 (SHRLconst [j] w) x:(MOVBstore [i] {s} p0 w0:(SHRLconst [j-8] w) mem))
+ && x.Uses == 1
+ && sequentialAddresses(p0, p1, 1)
+ && clobber(x)
+ => (MOVWstore [i] {s} p0 w0 mem)
+
+(MOVWstore [i] {s} p (SHRLconst [16] w) x:(MOVWstore [i-2] {s} p w mem))
+ && x.Uses == 1
+ && clobber(x)
+ => (MOVLstore [i-2] {s} p w mem)
+(MOVWstore [i] {s} p (SHRLconst [j] w) x:(MOVWstore [i-2] {s} p w0:(SHRLconst [j-16] w) mem))
+ && x.Uses == 1
+ && clobber(x)
+ => (MOVLstore [i-2] {s} p w0 mem)
+
+(MOVWstore [i] {s} p1 (SHRLconst [16] w) x:(MOVWstore [i] {s} p0 w mem))
+ && x.Uses == 1
+ && sequentialAddresses(p0, p1, 2)
+ && clobber(x)
+ => (MOVLstore [i] {s} p0 w mem)
+(MOVWstore [i] {s} p1 (SHRLconst [j] w) x:(MOVWstore [i] {s} p0 w0:(SHRLconst [j-16] w) mem))
+ && x.Uses == 1
+ && sequentialAddresses(p0, p1, 2)
+ && clobber(x)
+ => (MOVLstore [i] {s} p0 w0 mem)
+
+// For PIC, break floating-point constant loading into two instructions so we have
+// a register to use for holding the address of the constant pool entry.
+(MOVSSconst [c]) && config.ctxt.Flag_shared => (MOVSSconst2 (MOVSSconst1 [c]))
+(MOVSDconst [c]) && config.ctxt.Flag_shared => (MOVSDconst2 (MOVSDconst1 [c]))
+
+(CMP(L|W|B) l:(MOV(L|W|B)load {sym} [off] ptr mem) x) && canMergeLoad(v, l) && clobber(l) => (CMP(L|W|B)load {sym} [off] ptr x mem)
+(CMP(L|W|B) x l:(MOV(L|W|B)load {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (InvertFlags (CMP(L|W|B)load {sym} [off] ptr x mem))
+
+(CMP(L|W|B)const l:(MOV(L|W|B)load {sym} [off] ptr mem) [c])
+ && l.Uses == 1
+ && clobber(l) =>
+ @l.Block (CMP(L|W|B)constload {sym} [makeValAndOff(int32(c),off)] ptr mem)
+
+(CMPLload {sym} [off] ptr (MOVLconst [c]) mem) => (CMPLconstload {sym} [makeValAndOff(c,off)] ptr mem)
+(CMPWload {sym} [off] ptr (MOVLconst [c]) mem) => (CMPWconstload {sym} [makeValAndOff(int32(int16(c)),off)] ptr mem)
+(CMPBload {sym} [off] ptr (MOVLconst [c]) mem) => (CMPBconstload {sym} [makeValAndOff(int32(int8(c)),off)] ptr mem)
+
+(MOVBload [off] {sym} (SB) _) && symIsRO(sym) => (MOVLconst [int32(read8(sym, int64(off)))])
+(MOVWload [off] {sym} (SB) _) && symIsRO(sym) => (MOVLconst [int32(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))])
+(MOVLload [off] {sym} (SB) _) && symIsRO(sym) => (MOVLconst [int32(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))])
diff --git a/src/cmd/compile/internal/ssa/gen/386Ops.go b/src/cmd/compile/internal/ssa/gen/386Ops.go
new file mode 100644
index 0000000..f4c89b0
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/gen/386Ops.go
@@ -0,0 +1,588 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build ignore
+// +build ignore
+
+package main
+
+import "strings"
+
+// Notes:
+// - Integer types live in the low portion of registers. Upper portions are junk.
+// - Boolean types use the low-order byte of a register. 0=false, 1=true.
+// Upper bytes are junk.
+// - Floating-point types live in the low natural slot of an sse2 register.
+// Unused portions are junk.
+// - We do not use AH,BH,CH,DH registers.
+// - When doing sub-register operations, we try to write the whole
+// destination register to avoid a partial-register write.
+// - Unused portions of AuxInt (or the Val portion of ValAndOff) are
+// filled by sign-extending the used portion. Users of AuxInt which interpret
+// AuxInt as unsigned (e.g. shifts) must be careful.
+
+// Suffixes encode the bit width of various instructions.
+// L (long word) = 32 bit
+// W (word) = 16 bit
+// B (byte) = 8 bit
+
+// copied from ../../x86/reg.go
+var regNames386 = []string{
+ "AX",
+ "CX",
+ "DX",
+ "BX",
+ "SP",
+ "BP",
+ "SI",
+ "DI",
+ "X0",
+ "X1",
+ "X2",
+ "X3",
+ "X4",
+ "X5",
+ "X6",
+ "X7",
+
+ // If you add registers, update asyncPreempt in runtime
+
+ // pseudo-registers
+ "SB",
+}
+
+func init() {
+ // Make map from reg names to reg integers.
+ if len(regNames386) > 64 {
+ panic("too many registers")
+ }
+ num := map[string]int{}
+ for i, name := range regNames386 {
+ num[name] = i
+ }
+ buildReg := func(s string) regMask {
+ m := regMask(0)
+ for _, r := range strings.Split(s, " ") {
+ if n, ok := num[r]; ok {
+ m |= regMask(1) << uint(n)
+ continue
+ }
+ panic("register " + r + " not found")
+ }
+ return m
+ }
+
+ // Common individual register masks
+ var (
+ ax = buildReg("AX")
+ cx = buildReg("CX")
+ dx = buildReg("DX")
+ bx = buildReg("BX")
+ si = buildReg("SI")
+ gp = buildReg("AX CX DX BX BP SI DI")
+ fp = buildReg("X0 X1 X2 X3 X4 X5 X6 X7")
+ gpsp = gp | buildReg("SP")
+ gpspsb = gpsp | buildReg("SB")
+ callerSave = gp | fp
+ )
+ // Common slices of register masks
+ var (
+ gponly = []regMask{gp}
+ fponly = []regMask{fp}
+ )
+
+ // Common regInfo
+ var (
+ gp01 = regInfo{inputs: nil, outputs: gponly}
+ gp11 = regInfo{inputs: []regMask{gp}, outputs: gponly}
+ gp11sp = regInfo{inputs: []regMask{gpsp}, outputs: gponly}
+ gp11sb = regInfo{inputs: []regMask{gpspsb}, outputs: gponly}
+ gp21 = regInfo{inputs: []regMask{gp, gp}, outputs: gponly}
+ gp11carry = regInfo{inputs: []regMask{gp}, outputs: []regMask{gp, 0}}
+ gp21carry = regInfo{inputs: []regMask{gp, gp}, outputs: []regMask{gp, 0}}
+ gp1carry1 = regInfo{inputs: []regMask{gp}, outputs: gponly}
+ gp2carry1 = regInfo{inputs: []regMask{gp, gp}, outputs: gponly}
+ gp21sp = regInfo{inputs: []regMask{gpsp, gp}, outputs: gponly}
+ gp21sb = regInfo{inputs: []regMask{gpspsb, gpsp}, outputs: gponly}
+ gp21shift = regInfo{inputs: []regMask{gp, cx}, outputs: []regMask{gp}}
+ gp11div = regInfo{inputs: []regMask{ax, gpsp &^ dx}, outputs: []regMask{ax}, clobbers: dx}
+ gp21hmul = regInfo{inputs: []regMask{ax, gpsp}, outputs: []regMask{dx}, clobbers: ax}
+ gp11mod = regInfo{inputs: []regMask{ax, gpsp &^ dx}, outputs: []regMask{dx}, clobbers: ax}
+ gp21mul = regInfo{inputs: []regMask{ax, gpsp}, outputs: []regMask{dx, ax}}
+
+ gp2flags = regInfo{inputs: []regMask{gpsp, gpsp}}
+ gp1flags = regInfo{inputs: []regMask{gpsp}}
+ gp0flagsLoad = regInfo{inputs: []regMask{gpspsb, 0}}
+ gp1flagsLoad = regInfo{inputs: []regMask{gpspsb, gpsp, 0}}
+ flagsgp = regInfo{inputs: nil, outputs: gponly}
+
+ readflags = regInfo{inputs: nil, outputs: gponly}
+ flagsgpax = regInfo{inputs: nil, clobbers: ax, outputs: []regMask{gp &^ ax}}
+
+ gpload = regInfo{inputs: []regMask{gpspsb, 0}, outputs: gponly}
+ gp21load = regInfo{inputs: []regMask{gp, gpspsb, 0}, outputs: gponly}
+ gploadidx = regInfo{inputs: []regMask{gpspsb, gpsp, 0}, outputs: gponly}
+ gp21loadidx = regInfo{inputs: []regMask{gp, gpspsb, gpsp, 0}, outputs: gponly}
+
+ gpstore = regInfo{inputs: []regMask{gpspsb, gpsp, 0}}
+ gpstoreconst = regInfo{inputs: []regMask{gpspsb, 0}}
+ gpstoreidx = regInfo{inputs: []regMask{gpspsb, gpsp, gpsp, 0}}
+ gpstoreconstidx = regInfo{inputs: []regMask{gpspsb, gpsp, 0}}
+
+ fp01 = regInfo{inputs: nil, outputs: fponly}
+ fp21 = regInfo{inputs: []regMask{fp, fp}, outputs: fponly}
+ fp21load = regInfo{inputs: []regMask{fp, gpspsb, 0}, outputs: fponly}
+ fpgp = regInfo{inputs: fponly, outputs: gponly}
+ gpfp = regInfo{inputs: gponly, outputs: fponly}
+ fp11 = regInfo{inputs: fponly, outputs: fponly}
+ fp2flags = regInfo{inputs: []regMask{fp, fp}}
+
+ fpload = regInfo{inputs: []regMask{gpspsb, 0}, outputs: fponly}
+ fploadidx = regInfo{inputs: []regMask{gpspsb, gpsp, 0}, outputs: fponly}
+
+ fpstore = regInfo{inputs: []regMask{gpspsb, fp, 0}}
+ fpstoreidx = regInfo{inputs: []regMask{gpspsb, gpsp, fp, 0}}
+ )
+
+ var _386ops = []opData{
+ // fp ops
+ {name: "ADDSS", argLength: 2, reg: fp21, asm: "ADDSS", commutative: true, resultInArg0: true}, // fp32 add
+ {name: "ADDSD", argLength: 2, reg: fp21, asm: "ADDSD", commutative: true, resultInArg0: true}, // fp64 add
+ {name: "SUBSS", argLength: 2, reg: fp21, asm: "SUBSS", resultInArg0: true}, // fp32 sub
+ {name: "SUBSD", argLength: 2, reg: fp21, asm: "SUBSD", resultInArg0: true}, // fp64 sub
+ {name: "MULSS", argLength: 2, reg: fp21, asm: "MULSS", commutative: true, resultInArg0: true}, // fp32 mul
+ {name: "MULSD", argLength: 2, reg: fp21, asm: "MULSD", commutative: true, resultInArg0: true}, // fp64 mul
+ {name: "DIVSS", argLength: 2, reg: fp21, asm: "DIVSS", resultInArg0: true}, // fp32 div
+ {name: "DIVSD", argLength: 2, reg: fp21, asm: "DIVSD", resultInArg0: true}, // fp64 div
+
+ {name: "MOVSSload", argLength: 2, reg: fpload, asm: "MOVSS", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, // fp32 load
+ {name: "MOVSDload", argLength: 2, reg: fpload, asm: "MOVSD", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, // fp64 load
+ {name: "MOVSSconst", reg: fp01, asm: "MOVSS", aux: "Float32", rematerializeable: true}, // fp32 constant
+ {name: "MOVSDconst", reg: fp01, asm: "MOVSD", aux: "Float64", rematerializeable: true}, // fp64 constant
+ {name: "MOVSSloadidx1", argLength: 3, reg: fploadidx, asm: "MOVSS", aux: "SymOff", symEffect: "Read"}, // fp32 load indexed by i
+ {name: "MOVSSloadidx4", argLength: 3, reg: fploadidx, asm: "MOVSS", aux: "SymOff", symEffect: "Read"}, // fp32 load indexed by 4*i
+ {name: "MOVSDloadidx1", argLength: 3, reg: fploadidx, asm: "MOVSD", aux: "SymOff", symEffect: "Read"}, // fp64 load indexed by i
+ {name: "MOVSDloadidx8", argLength: 3, reg: fploadidx, asm: "MOVSD", aux: "SymOff", symEffect: "Read"}, // fp64 load indexed by 8*i
+
+ {name: "MOVSSstore", argLength: 3, reg: fpstore, asm: "MOVSS", aux: "SymOff", faultOnNilArg0: true, symEffect: "Write"}, // fp32 store
+ {name: "MOVSDstore", argLength: 3, reg: fpstore, asm: "MOVSD", aux: "SymOff", faultOnNilArg0: true, symEffect: "Write"}, // fp64 store
+ {name: "MOVSSstoreidx1", argLength: 4, reg: fpstoreidx, asm: "MOVSS", aux: "SymOff", symEffect: "Write"}, // fp32 indexed by i store
+ {name: "MOVSSstoreidx4", argLength: 4, reg: fpstoreidx, asm: "MOVSS", aux: "SymOff", symEffect: "Write"}, // fp32 indexed by 4i store
+ {name: "MOVSDstoreidx1", argLength: 4, reg: fpstoreidx, asm: "MOVSD", aux: "SymOff", symEffect: "Write"}, // fp64 indexed by i store
+ {name: "MOVSDstoreidx8", argLength: 4, reg: fpstoreidx, asm: "MOVSD", aux: "SymOff", symEffect: "Write"}, // fp64 indexed by 8i store
+
+ {name: "ADDSSload", argLength: 3, reg: fp21load, asm: "ADDSS", aux: "SymOff", resultInArg0: true, faultOnNilArg1: true, symEffect: "Read"}, // fp32 arg0 + tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
+ {name: "ADDSDload", argLength: 3, reg: fp21load, asm: "ADDSD", aux: "SymOff", resultInArg0: true, faultOnNilArg1: true, symEffect: "Read"}, // fp64 arg0 + tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
+ {name: "SUBSSload", argLength: 3, reg: fp21load, asm: "SUBSS", aux: "SymOff", resultInArg0: true, faultOnNilArg1: true, symEffect: "Read"}, // fp32 arg0 - tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
+ {name: "SUBSDload", argLength: 3, reg: fp21load, asm: "SUBSD", aux: "SymOff", resultInArg0: true, faultOnNilArg1: true, symEffect: "Read"}, // fp64 arg0 - tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
+ {name: "MULSSload", argLength: 3, reg: fp21load, asm: "MULSS", aux: "SymOff", resultInArg0: true, faultOnNilArg1: true, symEffect: "Read"}, // fp32 arg0 * tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
+ {name: "MULSDload", argLength: 3, reg: fp21load, asm: "MULSD", aux: "SymOff", resultInArg0: true, faultOnNilArg1: true, symEffect: "Read"}, // fp64 arg0 * tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
+ {name: "DIVSSload", argLength: 3, reg: fp21load, asm: "DIVSS", aux: "SymOff", resultInArg0: true, faultOnNilArg1: true, symEffect: "Read"}, // fp32 arg0 / tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
+ {name: "DIVSDload", argLength: 3, reg: fp21load, asm: "DIVSD", aux: "SymOff", resultInArg0: true, faultOnNilArg1: true, symEffect: "Read"}, // fp64 arg0 / tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
+
+ // binary ops
+ {name: "ADDL", argLength: 2, reg: gp21sp, asm: "ADDL", commutative: true, clobberFlags: true}, // arg0 + arg1
+ {name: "ADDLconst", argLength: 1, reg: gp11sp, asm: "ADDL", aux: "Int32", typ: "UInt32", clobberFlags: true}, // arg0 + auxint
+
+ {name: "ADDLcarry", argLength: 2, reg: gp21carry, asm: "ADDL", commutative: true, resultInArg0: true}, // arg0 + arg1, generates <carry,result> pair
+ {name: "ADDLconstcarry", argLength: 1, reg: gp11carry, asm: "ADDL", aux: "Int32", resultInArg0: true}, // arg0 + auxint, generates <carry,result> pair
+ {name: "ADCL", argLength: 3, reg: gp2carry1, asm: "ADCL", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0+arg1+carry(arg2), where arg2 is flags
+ {name: "ADCLconst", argLength: 2, reg: gp1carry1, asm: "ADCL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0+auxint+carry(arg1), where arg1 is flags
+
+ {name: "SUBL", argLength: 2, reg: gp21, asm: "SUBL", resultInArg0: true, clobberFlags: true}, // arg0 - arg1
+ {name: "SUBLconst", argLength: 1, reg: gp11, asm: "SUBL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 - auxint
+
+ {name: "SUBLcarry", argLength: 2, reg: gp21carry, asm: "SUBL", resultInArg0: true}, // arg0-arg1, generates <borrow,result> pair
+ {name: "SUBLconstcarry", argLength: 1, reg: gp11carry, asm: "SUBL", aux: "Int32", resultInArg0: true}, // arg0-auxint, generates <borrow,result> pair
+ {name: "SBBL", argLength: 3, reg: gp2carry1, asm: "SBBL", resultInArg0: true, clobberFlags: true}, // arg0-arg1-borrow(arg2), where arg2 is flags
+ {name: "SBBLconst", argLength: 2, reg: gp1carry1, asm: "SBBL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0-auxint-borrow(arg1), where arg1 is flags
+
+ {name: "MULL", argLength: 2, reg: gp21, asm: "IMULL", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 * arg1
+ {name: "MULLconst", argLength: 1, reg: gp11, asm: "IMUL3L", aux: "Int32", clobberFlags: true}, // arg0 * auxint
+
+ {name: "MULLU", argLength: 2, reg: regInfo{inputs: []regMask{ax, gpsp}, outputs: []regMask{ax, 0}, clobbers: dx}, typ: "(UInt32,Flags)", asm: "MULL", commutative: true, clobberFlags: true}, // Let x = arg0*arg1 (full 32x32->64 unsigned multiply). Returns uint32(x), and flags set to overflow if uint32(x) != x.
+
+ {name: "HMULL", argLength: 2, reg: gp21hmul, commutative: true, asm: "IMULL", clobberFlags: true}, // (arg0 * arg1) >> width
+ {name: "HMULLU", argLength: 2, reg: gp21hmul, commutative: true, asm: "MULL", clobberFlags: true}, // (arg0 * arg1) >> width
+
+ {name: "MULLQU", argLength: 2, reg: gp21mul, commutative: true, asm: "MULL", clobberFlags: true}, // arg0 * arg1, high 32 in result[0], low 32 in result[1]
+
+ {name: "AVGLU", argLength: 2, reg: gp21, commutative: true, resultInArg0: true, clobberFlags: true}, // (arg0 + arg1) / 2 as unsigned, all 32 result bits
+
+ // For DIVL, DIVW, MODL and MODW, AuxInt non-zero means that the divisor has been proved to be not -1.
+ {name: "DIVL", argLength: 2, reg: gp11div, asm: "IDIVL", aux: "Bool", clobberFlags: true}, // arg0 / arg1
+ {name: "DIVW", argLength: 2, reg: gp11div, asm: "IDIVW", aux: "Bool", clobberFlags: true}, // arg0 / arg1
+ {name: "DIVLU", argLength: 2, reg: gp11div, asm: "DIVL", clobberFlags: true}, // arg0 / arg1
+ {name: "DIVWU", argLength: 2, reg: gp11div, asm: "DIVW", clobberFlags: true}, // arg0 / arg1
+
+ {name: "MODL", argLength: 2, reg: gp11mod, asm: "IDIVL", aux: "Bool", clobberFlags: true}, // arg0 % arg1
+ {name: "MODW", argLength: 2, reg: gp11mod, asm: "IDIVW", aux: "Bool", clobberFlags: true}, // arg0 % arg1
+ {name: "MODLU", argLength: 2, reg: gp11mod, asm: "DIVL", clobberFlags: true}, // arg0 % arg1
+ {name: "MODWU", argLength: 2, reg: gp11mod, asm: "DIVW", clobberFlags: true}, // arg0 % arg1
+
+ {name: "ANDL", argLength: 2, reg: gp21, asm: "ANDL", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 & arg1
+ {name: "ANDLconst", argLength: 1, reg: gp11, asm: "ANDL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 & auxint
+
+ {name: "ORL", argLength: 2, reg: gp21, asm: "ORL", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 | arg1
+ {name: "ORLconst", argLength: 1, reg: gp11, asm: "ORL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 | auxint
+
+ {name: "XORL", argLength: 2, reg: gp21, asm: "XORL", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 ^ arg1
+ {name: "XORLconst", argLength: 1, reg: gp11, asm: "XORL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 ^ auxint
+
+ {name: "CMPL", argLength: 2, reg: gp2flags, asm: "CMPL", typ: "Flags"}, // arg0 compare to arg1
+ {name: "CMPW", argLength: 2, reg: gp2flags, asm: "CMPW", typ: "Flags"}, // arg0 compare to arg1
+ {name: "CMPB", argLength: 2, reg: gp2flags, asm: "CMPB", typ: "Flags"}, // arg0 compare to arg1
+ {name: "CMPLconst", argLength: 1, reg: gp1flags, asm: "CMPL", typ: "Flags", aux: "Int32"}, // arg0 compare to auxint
+ {name: "CMPWconst", argLength: 1, reg: gp1flags, asm: "CMPW", typ: "Flags", aux: "Int16"}, // arg0 compare to auxint
+ {name: "CMPBconst", argLength: 1, reg: gp1flags, asm: "CMPB", typ: "Flags", aux: "Int8"}, // arg0 compare to auxint
+
+ // compare *(arg0+auxint+aux) to arg1 (in that order). arg2=mem.
+ {name: "CMPLload", argLength: 3, reg: gp1flagsLoad, asm: "CMPL", aux: "SymOff", typ: "Flags", symEffect: "Read", faultOnNilArg0: true},
+ {name: "CMPWload", argLength: 3, reg: gp1flagsLoad, asm: "CMPW", aux: "SymOff", typ: "Flags", symEffect: "Read", faultOnNilArg0: true},
+ {name: "CMPBload", argLength: 3, reg: gp1flagsLoad, asm: "CMPB", aux: "SymOff", typ: "Flags", symEffect: "Read", faultOnNilArg0: true},
+
+ // compare *(arg0+ValAndOff(AuxInt).Off()+aux) to ValAndOff(AuxInt).Val() (in that order). arg1=mem.
+ {name: "CMPLconstload", argLength: 2, reg: gp0flagsLoad, asm: "CMPL", aux: "SymValAndOff", typ: "Flags", symEffect: "Read", faultOnNilArg0: true},
+ {name: "CMPWconstload", argLength: 2, reg: gp0flagsLoad, asm: "CMPW", aux: "SymValAndOff", typ: "Flags", symEffect: "Read", faultOnNilArg0: true},
+ {name: "CMPBconstload", argLength: 2, reg: gp0flagsLoad, asm: "CMPB", aux: "SymValAndOff", typ: "Flags", symEffect: "Read", faultOnNilArg0: true},
+
+ {name: "UCOMISS", argLength: 2, reg: fp2flags, asm: "UCOMISS", typ: "Flags"}, // arg0 compare to arg1, f32
+ {name: "UCOMISD", argLength: 2, reg: fp2flags, asm: "UCOMISD", typ: "Flags"}, // arg0 compare to arg1, f64
+
+ {name: "TESTL", argLength: 2, reg: gp2flags, commutative: true, asm: "TESTL", typ: "Flags"}, // (arg0 & arg1) compare to 0
+ {name: "TESTW", argLength: 2, reg: gp2flags, commutative: true, asm: "TESTW", typ: "Flags"}, // (arg0 & arg1) compare to 0
+ {name: "TESTB", argLength: 2, reg: gp2flags, commutative: true, asm: "TESTB", typ: "Flags"}, // (arg0 & arg1) compare to 0
+ {name: "TESTLconst", argLength: 1, reg: gp1flags, asm: "TESTL", typ: "Flags", aux: "Int32"}, // (arg0 & auxint) compare to 0
+ {name: "TESTWconst", argLength: 1, reg: gp1flags, asm: "TESTW", typ: "Flags", aux: "Int16"}, // (arg0 & auxint) compare to 0
+ {name: "TESTBconst", argLength: 1, reg: gp1flags, asm: "TESTB", typ: "Flags", aux: "Int8"}, // (arg0 & auxint) compare to 0
+
+ {name: "SHLL", argLength: 2, reg: gp21shift, asm: "SHLL", resultInArg0: true, clobberFlags: true}, // arg0 << arg1, shift amount is mod 32
+ {name: "SHLLconst", argLength: 1, reg: gp11, asm: "SHLL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 << auxint, shift amount 0-31
+ // Note: x86 is weird, the 16 and 8 byte shifts still use all 5 bits of shift amount!
+
+ {name: "SHRL", argLength: 2, reg: gp21shift, asm: "SHRL", resultInArg0: true, clobberFlags: true}, // unsigned arg0 >> arg1, shift amount is mod 32
+ {name: "SHRW", argLength: 2, reg: gp21shift, asm: "SHRW", resultInArg0: true, clobberFlags: true}, // unsigned arg0 >> arg1, shift amount is mod 32
+ {name: "SHRB", argLength: 2, reg: gp21shift, asm: "SHRB", resultInArg0: true, clobberFlags: true}, // unsigned arg0 >> arg1, shift amount is mod 32
+ {name: "SHRLconst", argLength: 1, reg: gp11, asm: "SHRL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // unsigned arg0 >> auxint, shift amount 0-31
+ {name: "SHRWconst", argLength: 1, reg: gp11, asm: "SHRW", aux: "Int16", resultInArg0: true, clobberFlags: true}, // unsigned arg0 >> auxint, shift amount 0-15
+ {name: "SHRBconst", argLength: 1, reg: gp11, asm: "SHRB", aux: "Int8", resultInArg0: true, clobberFlags: true}, // unsigned arg0 >> auxint, shift amount 0-7
+
+ {name: "SARL", argLength: 2, reg: gp21shift, asm: "SARL", resultInArg0: true, clobberFlags: true}, // signed arg0 >> arg1, shift amount is mod 32
+ {name: "SARW", argLength: 2, reg: gp21shift, asm: "SARW", resultInArg0: true, clobberFlags: true}, // signed arg0 >> arg1, shift amount is mod 32
+ {name: "SARB", argLength: 2, reg: gp21shift, asm: "SARB", resultInArg0: true, clobberFlags: true}, // signed arg0 >> arg1, shift amount is mod 32
+ {name: "SARLconst", argLength: 1, reg: gp11, asm: "SARL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // signed arg0 >> auxint, shift amount 0-31
+ {name: "SARWconst", argLength: 1, reg: gp11, asm: "SARW", aux: "Int16", resultInArg0: true, clobberFlags: true}, // signed arg0 >> auxint, shift amount 0-15
+ {name: "SARBconst", argLength: 1, reg: gp11, asm: "SARB", aux: "Int8", resultInArg0: true, clobberFlags: true}, // signed arg0 >> auxint, shift amount 0-7
+
+ {name: "ROLLconst", argLength: 1, reg: gp11, asm: "ROLL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 rotate left auxint, rotate amount 0-31
+ {name: "ROLWconst", argLength: 1, reg: gp11, asm: "ROLW", aux: "Int16", resultInArg0: true, clobberFlags: true}, // arg0 rotate left auxint, rotate amount 0-15
+ {name: "ROLBconst", argLength: 1, reg: gp11, asm: "ROLB", aux: "Int8", resultInArg0: true, clobberFlags: true}, // arg0 rotate left auxint, rotate amount 0-7
+
+ // binary-op with a memory source operand
+ {name: "ADDLload", argLength: 3, reg: gp21load, asm: "ADDL", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 + tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
+ {name: "SUBLload", argLength: 3, reg: gp21load, asm: "SUBL", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 - tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
+ {name: "MULLload", argLength: 3, reg: gp21load, asm: "IMULL", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 * tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
+ {name: "ANDLload", argLength: 3, reg: gp21load, asm: "ANDL", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 & tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
+ {name: "ORLload", argLength: 3, reg: gp21load, asm: "ORL", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 | tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
+ {name: "XORLload", argLength: 3, reg: gp21load, asm: "XORL", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 ^ tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
+
+ // binary-op with an indexed memory source operand
+ {name: "ADDLloadidx4", argLength: 4, reg: gp21loadidx, asm: "ADDL", aux: "SymOff", resultInArg0: true, clobberFlags: true, symEffect: "Read"}, // arg0 + tmp, tmp loaded from arg1+arg2*4+auxint+aux, arg3 = mem
+ {name: "SUBLloadidx4", argLength: 4, reg: gp21loadidx, asm: "SUBL", aux: "SymOff", resultInArg0: true, clobberFlags: true, symEffect: "Read"}, // arg0 - tmp, tmp loaded from arg1+arg2*4+auxint+aux, arg3 = mem
+ {name: "MULLloadidx4", argLength: 4, reg: gp21loadidx, asm: "IMULL", aux: "SymOff", resultInArg0: true, clobberFlags: true, symEffect: "Read"}, // arg0 * tmp, tmp loaded from arg1+arg2*4+auxint+aux, arg3 = mem
+ {name: "ANDLloadidx4", argLength: 4, reg: gp21loadidx, asm: "ANDL", aux: "SymOff", resultInArg0: true, clobberFlags: true, symEffect: "Read"}, // arg0 & tmp, tmp loaded from arg1+arg2*4+auxint+aux, arg3 = mem
+ {name: "ORLloadidx4", argLength: 4, reg: gp21loadidx, asm: "ORL", aux: "SymOff", resultInArg0: true, clobberFlags: true, symEffect: "Read"}, // arg0 | tmp, tmp loaded from arg1+arg2*4+auxint+aux, arg3 = mem
+ {name: "XORLloadidx4", argLength: 4, reg: gp21loadidx, asm: "XORL", aux: "SymOff", resultInArg0: true, clobberFlags: true, symEffect: "Read"}, // arg0 ^ tmp, tmp loaded from arg1+arg2*4+auxint+aux, arg3 = mem
+
+ // unary ops
+ {name: "NEGL", argLength: 1, reg: gp11, asm: "NEGL", resultInArg0: true, clobberFlags: true}, // -arg0
+
+ {name: "NOTL", argLength: 1, reg: gp11, asm: "NOTL", resultInArg0: true}, // ^arg0
+
+ {name: "BSFL", argLength: 1, reg: gp11, asm: "BSFL", clobberFlags: true}, // arg0 # of low-order zeroes ; undef if zero
+ {name: "BSFW", argLength: 1, reg: gp11, asm: "BSFW", clobberFlags: true}, // arg0 # of low-order zeroes ; undef if zero
+
+ {name: "BSRL", argLength: 1, reg: gp11, asm: "BSRL", clobberFlags: true}, // arg0 # of high-order zeroes ; undef if zero
+ {name: "BSRW", argLength: 1, reg: gp11, asm: "BSRW", clobberFlags: true}, // arg0 # of high-order zeroes ; undef if zero
+
+ {name: "BSWAPL", argLength: 1, reg: gp11, asm: "BSWAPL", resultInArg0: true, clobberFlags: true}, // arg0 swap bytes
+
+ {name: "SQRTSD", argLength: 1, reg: fp11, asm: "SQRTSD"}, // sqrt(arg0)
+ {name: "SQRTSS", argLength: 1, reg: fp11, asm: "SQRTSS"}, // sqrt(arg0), float32
+
+ {name: "SBBLcarrymask", argLength: 1, reg: flagsgp, asm: "SBBL"}, // (int32)(-1) if carry is set, 0 if carry is clear.
+ // Note: SBBW and SBBB are subsumed by SBBL
+
+ {name: "SETEQ", argLength: 1, reg: readflags, asm: "SETEQ"}, // extract == condition from arg0
+ {name: "SETNE", argLength: 1, reg: readflags, asm: "SETNE"}, // extract != condition from arg0
+ {name: "SETL", argLength: 1, reg: readflags, asm: "SETLT"}, // extract signed < condition from arg0
+ {name: "SETLE", argLength: 1, reg: readflags, asm: "SETLE"}, // extract signed <= condition from arg0
+ {name: "SETG", argLength: 1, reg: readflags, asm: "SETGT"}, // extract signed > condition from arg0
+ {name: "SETGE", argLength: 1, reg: readflags, asm: "SETGE"}, // extract signed >= condition from arg0
+ {name: "SETB", argLength: 1, reg: readflags, asm: "SETCS"}, // extract unsigned < condition from arg0
+ {name: "SETBE", argLength: 1, reg: readflags, asm: "SETLS"}, // extract unsigned <= condition from arg0
+ {name: "SETA", argLength: 1, reg: readflags, asm: "SETHI"}, // extract unsigned > condition from arg0
+ {name: "SETAE", argLength: 1, reg: readflags, asm: "SETCC"}, // extract unsigned >= condition from arg0
+ {name: "SETO", argLength: 1, reg: readflags, asm: "SETOS"}, // extract if overflow flag is set from arg0
+ // Need different opcodes for floating point conditions because
+ // any comparison involving a NaN is always FALSE and thus
+ // the patterns for inverting conditions cannot be used.
+ {name: "SETEQF", argLength: 1, reg: flagsgpax, asm: "SETEQ", clobberFlags: true}, // extract == condition from arg0
+ {name: "SETNEF", argLength: 1, reg: flagsgpax, asm: "SETNE", clobberFlags: true}, // extract != condition from arg0
+ {name: "SETORD", argLength: 1, reg: flagsgp, asm: "SETPC"}, // extract "ordered" (No Nan present) condition from arg0
+ {name: "SETNAN", argLength: 1, reg: flagsgp, asm: "SETPS"}, // extract "unordered" (Nan present) condition from arg0
+
+ {name: "SETGF", argLength: 1, reg: flagsgp, asm: "SETHI"}, // extract floating > condition from arg0
+ {name: "SETGEF", argLength: 1, reg: flagsgp, asm: "SETCC"}, // extract floating >= condition from arg0
+
+ {name: "MOVBLSX", argLength: 1, reg: gp11, asm: "MOVBLSX"}, // sign extend arg0 from int8 to int32
+ {name: "MOVBLZX", argLength: 1, reg: gp11, asm: "MOVBLZX"}, // zero extend arg0 from int8 to int32
+ {name: "MOVWLSX", argLength: 1, reg: gp11, asm: "MOVWLSX"}, // sign extend arg0 from int16 to int32
+ {name: "MOVWLZX", argLength: 1, reg: gp11, asm: "MOVWLZX"}, // zero extend arg0 from int16 to int32
+
+ {name: "MOVLconst", reg: gp01, asm: "MOVL", typ: "UInt32", aux: "Int32", rematerializeable: true}, // 32 low bits of auxint
+
+ {name: "CVTTSD2SL", argLength: 1, reg: fpgp, asm: "CVTTSD2SL"}, // convert float64 to int32
+ {name: "CVTTSS2SL", argLength: 1, reg: fpgp, asm: "CVTTSS2SL"}, // convert float32 to int32
+ {name: "CVTSL2SS", argLength: 1, reg: gpfp, asm: "CVTSL2SS"}, // convert int32 to float32
+ {name: "CVTSL2SD", argLength: 1, reg: gpfp, asm: "CVTSL2SD"}, // convert int32 to float64
+ {name: "CVTSD2SS", argLength: 1, reg: fp11, asm: "CVTSD2SS"}, // convert float64 to float32
+ {name: "CVTSS2SD", argLength: 1, reg: fp11, asm: "CVTSS2SD"}, // convert float32 to float64
+
+ {name: "PXOR", argLength: 2, reg: fp21, asm: "PXOR", commutative: true, resultInArg0: true}, // exclusive or, applied to X regs for float negation.
+
+ {name: "LEAL", argLength: 1, reg: gp11sb, aux: "SymOff", rematerializeable: true, symEffect: "Addr"}, // arg0 + auxint + offset encoded in aux
+ {name: "LEAL1", argLength: 2, reg: gp21sb, commutative: true, aux: "SymOff", symEffect: "Addr"}, // arg0 + arg1 + auxint + aux
+ {name: "LEAL2", argLength: 2, reg: gp21sb, aux: "SymOff", symEffect: "Addr"}, // arg0 + 2*arg1 + auxint + aux
+ {name: "LEAL4", argLength: 2, reg: gp21sb, aux: "SymOff", symEffect: "Addr"}, // arg0 + 4*arg1 + auxint + aux
+ {name: "LEAL8", argLength: 2, reg: gp21sb, aux: "SymOff", symEffect: "Addr"}, // arg0 + 8*arg1 + auxint + aux
+ // Note: LEAL{1,2,4,8} must not have OpSB as either argument.
+
+ // auxint+aux == add auxint and the offset of the symbol in aux (if any) to the effective address
+ {name: "MOVBload", argLength: 2, reg: gpload, asm: "MOVBLZX", aux: "SymOff", typ: "UInt8", faultOnNilArg0: true, symEffect: "Read"}, // load byte from arg0+auxint+aux. arg1=mem. Zero extend.
+ {name: "MOVBLSXload", argLength: 2, reg: gpload, asm: "MOVBLSX", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, // ditto, sign extend to int32
+ {name: "MOVWload", argLength: 2, reg: gpload, asm: "MOVWLZX", aux: "SymOff", typ: "UInt16", faultOnNilArg0: true, symEffect: "Read"}, // load 2 bytes from arg0+auxint+aux. arg1=mem. Zero extend.
+ {name: "MOVWLSXload", argLength: 2, reg: gpload, asm: "MOVWLSX", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, // ditto, sign extend to int32
+ {name: "MOVLload", argLength: 2, reg: gpload, asm: "MOVL", aux: "SymOff", typ: "UInt32", faultOnNilArg0: true, symEffect: "Read"}, // load 4 bytes from arg0+auxint+aux. arg1=mem. Zero extend.
+ {name: "MOVBstore", argLength: 3, reg: gpstore, asm: "MOVB", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store byte in arg1 to arg0+auxint+aux. arg2=mem
+ {name: "MOVWstore", argLength: 3, reg: gpstore, asm: "MOVW", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 2 bytes in arg1 to arg0+auxint+aux. arg2=mem
+ {name: "MOVLstore", argLength: 3, reg: gpstore, asm: "MOVL", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 4 bytes in arg1 to arg0+auxint+aux. arg2=mem
+
+ // direct binary-op on memory (read-modify-write)
+ {name: "ADDLmodify", argLength: 3, reg: gpstore, asm: "ADDL", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+auxint+aux) += arg1, arg2=mem
+ {name: "SUBLmodify", argLength: 3, reg: gpstore, asm: "SUBL", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+auxint+aux) -= arg1, arg2=mem
+ {name: "ANDLmodify", argLength: 3, reg: gpstore, asm: "ANDL", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+auxint+aux) &= arg1, arg2=mem
+ {name: "ORLmodify", argLength: 3, reg: gpstore, asm: "ORL", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+auxint+aux) |= arg1, arg2=mem
+ {name: "XORLmodify", argLength: 3, reg: gpstore, asm: "XORL", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+auxint+aux) ^= arg1, arg2=mem
+
+ // direct binary-op on indexed memory (read-modify-write)
+ {name: "ADDLmodifyidx4", argLength: 4, reg: gpstoreidx, asm: "ADDL", aux: "SymOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+arg1*4+auxint+aux) += arg2, arg3=mem
+ {name: "SUBLmodifyidx4", argLength: 4, reg: gpstoreidx, asm: "SUBL", aux: "SymOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+arg1*4+auxint+aux) -= arg2, arg3=mem
+ {name: "ANDLmodifyidx4", argLength: 4, reg: gpstoreidx, asm: "ANDL", aux: "SymOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+arg1*4+auxint+aux) &= arg2, arg3=mem
+ {name: "ORLmodifyidx4", argLength: 4, reg: gpstoreidx, asm: "ORL", aux: "SymOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+arg1*4+auxint+aux) |= arg2, arg3=mem
+ {name: "XORLmodifyidx4", argLength: 4, reg: gpstoreidx, asm: "XORL", aux: "SymOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+arg1*4+auxint+aux) ^= arg2, arg3=mem
+
+ // direct binary-op on memory with a constant (read-modify-write)
+ {name: "ADDLconstmodify", argLength: 2, reg: gpstoreconst, asm: "ADDL", aux: "SymValAndOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // add ValAndOff(AuxInt).Val() to arg0+ValAndOff(AuxInt).Off()+aux, arg1=mem
+ {name: "ANDLconstmodify", argLength: 2, reg: gpstoreconst, asm: "ANDL", aux: "SymValAndOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // and ValAndOff(AuxInt).Val() to arg0+ValAndOff(AuxInt).Off()+aux, arg1=mem
+ {name: "ORLconstmodify", argLength: 2, reg: gpstoreconst, asm: "ORL", aux: "SymValAndOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // or ValAndOff(AuxInt).Val() to arg0+ValAndOff(AuxInt).Off()+aux, arg1=mem
+ {name: "XORLconstmodify", argLength: 2, reg: gpstoreconst, asm: "XORL", aux: "SymValAndOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // xor ValAndOff(AuxInt).Val() to arg0+ValAndOff(AuxInt).Off()+aux, arg1=mem
+
+ // direct binary-op on indexed memory with a constant (read-modify-write)
+ {name: "ADDLconstmodifyidx4", argLength: 3, reg: gpstoreconstidx, asm: "ADDL", aux: "SymValAndOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // add ValAndOff(AuxInt).Val() to arg0+arg1*4+ValAndOff(AuxInt).Off()+aux, arg2=mem
+ {name: "ANDLconstmodifyidx4", argLength: 3, reg: gpstoreconstidx, asm: "ANDL", aux: "SymValAndOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // and ValAndOff(AuxInt).Val() to arg0+arg1*4+ValAndOff(AuxInt).Off()+aux, arg2=mem
+ {name: "ORLconstmodifyidx4", argLength: 3, reg: gpstoreconstidx, asm: "ORL", aux: "SymValAndOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // or ValAndOff(AuxInt).Val() to arg0+arg1*4+ValAndOff(AuxInt).Off()+aux, arg2=mem
+ {name: "XORLconstmodifyidx4", argLength: 3, reg: gpstoreconstidx, asm: "XORL", aux: "SymValAndOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // xor ValAndOff(AuxInt).Val() to arg0+arg1*4+ValAndOff(AuxInt).Off()+aux, arg2=mem
+
+ // indexed loads/stores
+ {name: "MOVBloadidx1", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVBLZX", aux: "SymOff", symEffect: "Read"}, // load a byte from arg0+arg1+auxint+aux. arg2=mem
+ {name: "MOVWloadidx1", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVWLZX", aux: "SymOff", symEffect: "Read"}, // load 2 bytes from arg0+arg1+auxint+aux. arg2=mem
+ {name: "MOVWloadidx2", argLength: 3, reg: gploadidx, asm: "MOVWLZX", aux: "SymOff", symEffect: "Read"}, // load 2 bytes from arg0+2*arg1+auxint+aux. arg2=mem
+ {name: "MOVLloadidx1", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVL", aux: "SymOff", symEffect: "Read"}, // load 4 bytes from arg0+arg1+auxint+aux. arg2=mem
+ {name: "MOVLloadidx4", argLength: 3, reg: gploadidx, asm: "MOVL", aux: "SymOff", symEffect: "Read"}, // load 4 bytes from arg0+4*arg1+auxint+aux. arg2=mem
+ // TODO: sign-extending indexed loads
+ {name: "MOVBstoreidx1", argLength: 4, reg: gpstoreidx, commutative: true, asm: "MOVB", aux: "SymOff", symEffect: "Write"}, // store byte in arg2 to arg0+arg1+auxint+aux. arg3=mem
+ {name: "MOVWstoreidx1", argLength: 4, reg: gpstoreidx, commutative: true, asm: "MOVW", aux: "SymOff", symEffect: "Write"}, // store 2 bytes in arg2 to arg0+arg1+auxint+aux. arg3=mem
+ {name: "MOVWstoreidx2", argLength: 4, reg: gpstoreidx, asm: "MOVW", aux: "SymOff", symEffect: "Write"}, // store 2 bytes in arg2 to arg0+2*arg1+auxint+aux. arg3=mem
+ {name: "MOVLstoreidx1", argLength: 4, reg: gpstoreidx, commutative: true, asm: "MOVL", aux: "SymOff", symEffect: "Write"}, // store 4 bytes in arg2 to arg0+arg1+auxint+aux. arg3=mem
+ {name: "MOVLstoreidx4", argLength: 4, reg: gpstoreidx, asm: "MOVL", aux: "SymOff", symEffect: "Write"}, // store 4 bytes in arg2 to arg0+4*arg1+auxint+aux. arg3=mem
+ // TODO: add size-mismatched indexed loads, like MOVBstoreidx4.
+
+ // For storeconst ops, the AuxInt field encodes both
+ // the value to store and an address offset of the store.
+ // Cast AuxInt to a ValAndOff to extract Val and Off fields.
+ {name: "MOVBstoreconst", argLength: 2, reg: gpstoreconst, asm: "MOVB", aux: "SymValAndOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store low byte of ValAndOff(AuxInt).Val() to arg0+ValAndOff(AuxInt).Off()+aux. arg1=mem
+ {name: "MOVWstoreconst", argLength: 2, reg: gpstoreconst, asm: "MOVW", aux: "SymValAndOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store low 2 bytes of ...
+ {name: "MOVLstoreconst", argLength: 2, reg: gpstoreconst, asm: "MOVL", aux: "SymValAndOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store low 4 bytes of ...
+
+ {name: "MOVBstoreconstidx1", argLength: 3, reg: gpstoreconstidx, asm: "MOVB", aux: "SymValAndOff", typ: "Mem", symEffect: "Write"}, // store low byte of ValAndOff(AuxInt).Val() to arg0+1*arg1+ValAndOff(AuxInt).Off()+aux. arg2=mem
+ {name: "MOVWstoreconstidx1", argLength: 3, reg: gpstoreconstidx, asm: "MOVW", aux: "SymValAndOff", typ: "Mem", symEffect: "Write"}, // store low 2 bytes of ... arg1 ...
+ {name: "MOVWstoreconstidx2", argLength: 3, reg: gpstoreconstidx, asm: "MOVW", aux: "SymValAndOff", typ: "Mem", symEffect: "Write"}, // store low 2 bytes of ... 2*arg1 ...
+ {name: "MOVLstoreconstidx1", argLength: 3, reg: gpstoreconstidx, asm: "MOVL", aux: "SymValAndOff", typ: "Mem", symEffect: "Write"}, // store low 4 bytes of ... arg1 ...
+ {name: "MOVLstoreconstidx4", argLength: 3, reg: gpstoreconstidx, asm: "MOVL", aux: "SymValAndOff", typ: "Mem", symEffect: "Write"}, // store low 4 bytes of ... 4*arg1 ...
+
+ // arg0 = pointer to start of memory to zero
+ // arg1 = value to store (will always be zero)
+ // arg2 = mem
+ // auxint = offset into duffzero code to start executing
+ // returns mem
+ {
+ name: "DUFFZERO",
+ aux: "Int64",
+ argLength: 3,
+ reg: regInfo{
+ inputs: []regMask{buildReg("DI"), buildReg("AX")},
+ clobbers: buildReg("DI CX"),
+ // Note: CX is only clobbered when dynamic linking.
+ },
+ faultOnNilArg0: true,
+ },
+
+ // arg0 = address of memory to zero
+ // arg1 = # of 4-byte words to zero
+ // arg2 = value to store (will always be zero)
+ // arg3 = mem
+ // returns mem
+ {
+ name: "REPSTOSL",
+ argLength: 4,
+ reg: regInfo{
+ inputs: []regMask{buildReg("DI"), buildReg("CX"), buildReg("AX")},
+ clobbers: buildReg("DI CX"),
+ },
+ faultOnNilArg0: true,
+ },
+
+ {name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem
+ {name: "CALLtail", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true, tailCall: true}, // tail call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem
+ {name: "CALLclosure", argLength: 3, reg: regInfo{inputs: []regMask{gpsp, buildReg("DX"), 0}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem
+ {name: "CALLinter", argLength: 2, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem
+
+ // arg0 = destination pointer
+ // arg1 = source pointer
+ // arg2 = mem
+ // auxint = offset from duffcopy symbol to call
+ // returns memory
+ {
+ name: "DUFFCOPY",
+ aux: "Int64",
+ argLength: 3,
+ reg: regInfo{
+ inputs: []regMask{buildReg("DI"), buildReg("SI")},
+ clobbers: buildReg("DI SI CX"), // uses CX as a temporary
+ },
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ },
+
+ // arg0 = destination pointer
+ // arg1 = source pointer
+ // arg2 = # of 8-byte words to copy
+ // arg3 = mem
+ // returns memory
+ {
+ name: "REPMOVSL",
+ argLength: 4,
+ reg: regInfo{
+ inputs: []regMask{buildReg("DI"), buildReg("SI"), buildReg("CX")},
+ clobbers: buildReg("DI SI CX"),
+ },
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ },
+
+ // (InvertFlags (CMPL a b)) == (CMPL b a)
+ // So if we want (SETL (CMPL a b)) but we can't do that because a is a constant,
+ // then we do (SETL (InvertFlags (CMPL b a))) instead.
+ // Rewrites will convert this to (SETG (CMPL b a)).
+ // InvertFlags is a pseudo-op which can't appear in assembly output.
+ {name: "InvertFlags", argLength: 1}, // reverse direction of arg0
+
+ // Pseudo-ops
+ {name: "LoweredGetG", argLength: 1, reg: gp01}, // arg0=mem
+ // Scheduler ensures LoweredGetClosurePtr occurs only in entry block,
+ // and sorts it to the very beginning of the block to prevent other
+ // use of DX (the closure pointer)
+ {name: "LoweredGetClosurePtr", reg: regInfo{outputs: []regMask{buildReg("DX")}}, zeroWidth: true},
+ // LoweredGetCallerPC evaluates to the PC to which its "caller" will return.
+ // I.e., if f calls g "calls" getcallerpc,
+ // the result should be the PC within f that g will return to.
+ // See runtime/stubs.go for a more detailed discussion.
+ {name: "LoweredGetCallerPC", reg: gp01, rematerializeable: true},
+ // LoweredGetCallerSP returns the SP of the caller of the current function.
+ {name: "LoweredGetCallerSP", reg: gp01, rematerializeable: true},
+ //arg0=ptr,arg1=mem, returns void. Faults if ptr is nil.
+ {name: "LoweredNilCheck", argLength: 2, reg: regInfo{inputs: []regMask{gpsp}}, clobberFlags: true, nilCheck: true, faultOnNilArg0: true},
+
+ // LoweredWB invokes runtime.gcWriteBarrier. arg0=destptr, arg1=srcptr, arg2=mem, aux=runtime.gcWriteBarrier
+ // It saves all GP registers if necessary, but may clobber others.
+ {name: "LoweredWB", argLength: 3, reg: regInfo{inputs: []regMask{buildReg("DI"), ax}, clobbers: callerSave &^ gp}, clobberFlags: true, aux: "Sym", symEffect: "None"},
+
+ // There are three of these functions so that they can have three different register inputs.
+ // When we check 0 <= c <= cap (A), then 0 <= b <= c (B), then 0 <= a <= b (C), we want the
+ // default registers to match so we don't need to copy registers around unnecessarily.
+ {name: "LoweredPanicBoundsA", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{dx, bx}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go).
+ {name: "LoweredPanicBoundsB", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{cx, dx}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go).
+ {name: "LoweredPanicBoundsC", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{ax, cx}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go).
+ // Extend ops are the same as Bounds ops except the indexes are 64-bit.
+ {name: "LoweredPanicExtendA", argLength: 4, aux: "Int64", reg: regInfo{inputs: []regMask{si, dx, bx}}, typ: "Mem", call: true}, // arg0=idxHi, arg1=idxLo, arg2=len, arg3=mem, returns memory. AuxInt contains report code (see PanicExtend in genericOps.go).
+ {name: "LoweredPanicExtendB", argLength: 4, aux: "Int64", reg: regInfo{inputs: []regMask{si, cx, dx}}, typ: "Mem", call: true}, // arg0=idxHi, arg1=idxLo, arg2=len, arg3=mem, returns memory. AuxInt contains report code (see PanicExtend in genericOps.go).
+ {name: "LoweredPanicExtendC", argLength: 4, aux: "Int64", reg: regInfo{inputs: []regMask{si, ax, cx}}, typ: "Mem", call: true}, // arg0=idxHi, arg1=idxLo, arg2=len, arg3=mem, returns memory. AuxInt contains report code (see PanicExtend in genericOps.go).
+
+ // Constant flag values. For any comparison, there are 5 possible
+ // outcomes: the three from the signed total order (<,==,>) and the
+ // three from the unsigned total order. The == cases overlap.
+ // Note: there's a sixth "unordered" outcome for floating-point
+ // comparisons, but we don't use such a beast yet.
+ // These ops are for temporary use by rewrite rules. They
+ // cannot appear in the generated assembly.
+ {name: "FlagEQ"}, // equal
+ {name: "FlagLT_ULT"}, // signed < and unsigned <
+ {name: "FlagLT_UGT"}, // signed < and unsigned >
+ {name: "FlagGT_UGT"}, // signed > and unsigned <
+ {name: "FlagGT_ULT"}, // signed > and unsigned >
+
+ // Special ops for PIC floating-point constants.
+ // MOVSXconst1 loads the address of the constant-pool entry into a register.
+ // MOVSXconst2 loads the constant from that address.
+ // MOVSXconst1 returns a pointer, but we type it as uint32 because it can never point to the Go heap.
+ {name: "MOVSSconst1", reg: gp01, typ: "UInt32", aux: "Float32"},
+ {name: "MOVSDconst1", reg: gp01, typ: "UInt32", aux: "Float64"},
+ {name: "MOVSSconst2", argLength: 1, reg: gpfp, asm: "MOVSS"},
+ {name: "MOVSDconst2", argLength: 1, reg: gpfp, asm: "MOVSD"},
+ }
+
+ var _386blocks = []blockData{
+ {name: "EQ", controls: 1},
+ {name: "NE", controls: 1},
+ {name: "LT", controls: 1},
+ {name: "LE", controls: 1},
+ {name: "GT", controls: 1},
+ {name: "GE", controls: 1},
+ {name: "OS", controls: 1},
+ {name: "OC", controls: 1},
+ {name: "ULT", controls: 1},
+ {name: "ULE", controls: 1},
+ {name: "UGT", controls: 1},
+ {name: "UGE", controls: 1},
+ {name: "EQF", controls: 1},
+ {name: "NEF", controls: 1},
+ {name: "ORD", controls: 1}, // FP, ordered comparison (parity zero)
+ {name: "NAN", controls: 1}, // FP, unordered comparison (parity one)
+ }
+
+ archs = append(archs, arch{
+ name: "386",
+ pkg: "cmd/internal/obj/x86",
+ genfile: "../../x86/ssa.go",
+ ops: _386ops,
+ blocks: _386blocks,
+ regnames: regNames386,
+ gpregmask: gp,
+ fpregmask: fp,
+ framepointerreg: int8(num["BP"]),
+ linkreg: -1, // not used
+ })
+}
diff --git a/src/cmd/compile/internal/ssa/gen/386splitload.rules b/src/cmd/compile/internal/ssa/gen/386splitload.rules
new file mode 100644
index 0000000..29d4f8c
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/gen/386splitload.rules
@@ -0,0 +1,11 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// See the top of AMD64splitload.rules for discussion of these rules.
+
+(CMP(L|W|B)load {sym} [off] ptr x mem) => (CMP(L|W|B) (MOV(L|W|B)load {sym} [off] ptr mem) x)
+
+(CMPLconstload {sym} [vo] ptr mem) => (CMPLconst (MOVLload {sym} [vo.Off()] ptr mem) [vo.Val()])
+(CMPWconstload {sym} [vo] ptr mem) => (CMPWconst (MOVWload {sym} [vo.Off()] ptr mem) [vo.Val16()])
+(CMPBconstload {sym} [vo] ptr mem) => (CMPBconst (MOVBload {sym} [vo.Off()] ptr mem) [vo.Val8()])
diff --git a/src/cmd/compile/internal/ssa/gen/AMD64.rules b/src/cmd/compile/internal/ssa/gen/AMD64.rules
new file mode 100644
index 0000000..47a6af0
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/gen/AMD64.rules
@@ -0,0 +1,2247 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Lowering arithmetic
+(Add(64|32|16|8) ...) => (ADD(Q|L|L|L) ...)
+(AddPtr ...) => (ADDQ ...)
+(Add(32|64)F ...) => (ADDS(S|D) ...)
+
+(Sub(64|32|16|8) ...) => (SUB(Q|L|L|L) ...)
+(SubPtr ...) => (SUBQ ...)
+(Sub(32|64)F ...) => (SUBS(S|D) ...)
+
+(Mul(64|32|16|8) ...) => (MUL(Q|L|L|L) ...)
+(Mul(32|64)F ...) => (MULS(S|D) ...)
+
+(Select0 (Mul64uover x y)) => (Select0 <typ.UInt64> (MULQU x y))
+(Select0 (Mul32uover x y)) => (Select0 <typ.UInt32> (MULLU x y))
+(Select1 (Mul(64|32)uover x y)) => (SETO (Select1 <types.TypeFlags> (MUL(Q|L)U x y)))
+
+(Hmul(64|32) ...) => (HMUL(Q|L) ...)
+(Hmul(64|32)u ...) => (HMUL(Q|L)U ...)
+
+(Div(64|32|16) [a] x y) => (Select0 (DIV(Q|L|W) [a] x y))
+(Div8 x y) => (Select0 (DIVW (SignExt8to16 x) (SignExt8to16 y)))
+(Div(64|32|16)u x y) => (Select0 (DIV(Q|L|W)U x y))
+(Div8u x y) => (Select0 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y)))
+(Div(32|64)F ...) => (DIVS(S|D) ...)
+
+(Select0 (Add64carry x y c)) =>
+ (Select0 <typ.UInt64> (ADCQ x y (Select1 <types.TypeFlags> (NEGLflags c))))
+(Select1 (Add64carry x y c)) =>
+ (NEGQ <typ.UInt64> (SBBQcarrymask <typ.UInt64> (Select1 <types.TypeFlags> (ADCQ x y (Select1 <types.TypeFlags> (NEGLflags c))))))
+(Select0 (Sub64borrow x y c)) =>
+ (Select0 <typ.UInt64> (SBBQ x y (Select1 <types.TypeFlags> (NEGLflags c))))
+(Select1 (Sub64borrow x y c)) =>
+ (NEGQ <typ.UInt64> (SBBQcarrymask <typ.UInt64> (Select1 <types.TypeFlags> (SBBQ x y (Select1 <types.TypeFlags> (NEGLflags c))))))
+
+// Optimize ADCQ and friends
+(ADCQ x (MOVQconst [c]) carry) && is32Bit(c) => (ADCQconst x [int32(c)] carry)
+(ADCQ x y (FlagEQ)) => (ADDQcarry x y)
+(ADCQconst x [c] (FlagEQ)) => (ADDQconstcarry x [c])
+(ADDQcarry x (MOVQconst [c])) && is32Bit(c) => (ADDQconstcarry x [int32(c)])
+(SBBQ x (MOVQconst [c]) borrow) && is32Bit(c) => (SBBQconst x [int32(c)] borrow)
+(SBBQ x y (FlagEQ)) => (SUBQborrow x y)
+(SBBQconst x [c] (FlagEQ)) => (SUBQconstborrow x [c])
+(SUBQborrow x (MOVQconst [c])) && is32Bit(c) => (SUBQconstborrow x [int32(c)])
+(Select1 (NEGLflags (MOVQconst [0]))) => (FlagEQ)
+(Select1 (NEGLflags (NEGQ (SBBQcarrymask x)))) => x
+
+
+(Mul64uhilo ...) => (MULQU2 ...)
+(Div128u ...) => (DIVQU2 ...)
+
+(Avg64u ...) => (AVGQU ...)
+
+(Mod(64|32|16) [a] x y) => (Select1 (DIV(Q|L|W) [a] x y))
+(Mod8 x y) => (Select1 (DIVW (SignExt8to16 x) (SignExt8to16 y)))
+(Mod(64|32|16)u x y) => (Select1 (DIV(Q|L|W)U x y))
+(Mod8u x y) => (Select1 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y)))
+
+(And(64|32|16|8) ...) => (AND(Q|L|L|L) ...)
+(Or(64|32|16|8) ...) => (OR(Q|L|L|L) ...)
+(Xor(64|32|16|8) ...) => (XOR(Q|L|L|L) ...)
+(Com(64|32|16|8) ...) => (NOT(Q|L|L|L) ...)
+
+(Neg(64|32|16|8) ...) => (NEG(Q|L|L|L) ...)
+(Neg32F x) => (PXOR x (MOVSSconst <typ.Float32> [float32(math.Copysign(0, -1))]))
+(Neg64F x) => (PXOR x (MOVSDconst <typ.Float64> [math.Copysign(0, -1)]))
+
+// Lowering boolean ops
+(AndB ...) => (ANDL ...)
+(OrB ...) => (ORL ...)
+(Not x) => (XORLconst [1] x)
+
+// Lowering pointer arithmetic
+(OffPtr [off] ptr) && is32Bit(off) => (ADDQconst [int32(off)] ptr)
+(OffPtr [off] ptr) => (ADDQ (MOVQconst [off]) ptr)
+
+// Lowering other arithmetic
+(Ctz64 x) && buildcfg.GOAMD64 >= 3 => (TZCNTQ x)
+(Ctz32 x) && buildcfg.GOAMD64 >= 3 => (TZCNTL x)
+(Ctz64 <t> x) && buildcfg.GOAMD64 < 3 => (CMOVQEQ (Select0 <t> (BSFQ x)) (MOVQconst <t> [64]) (Select1 <types.TypeFlags> (BSFQ x)))
+(Ctz32 x) && buildcfg.GOAMD64 < 3 => (Select0 (BSFQ (BTSQconst <typ.UInt64> [32] x)))
+(Ctz16 x) => (BSFL (BTSLconst <typ.UInt32> [16] x))
+(Ctz8 x) => (BSFL (BTSLconst <typ.UInt32> [ 8] x))
+
+(Ctz64NonZero x) && buildcfg.GOAMD64 >= 3 => (TZCNTQ x)
+(Ctz32NonZero x) && buildcfg.GOAMD64 >= 3 => (TZCNTL x)
+(Ctz16NonZero x) && buildcfg.GOAMD64 >= 3 => (TZCNTL x)
+(Ctz8NonZero x) && buildcfg.GOAMD64 >= 3 => (TZCNTL x)
+(Ctz64NonZero x) && buildcfg.GOAMD64 < 3 => (Select0 (BSFQ x))
+(Ctz32NonZero x) && buildcfg.GOAMD64 < 3 => (BSFL x)
+(Ctz16NonZero x) && buildcfg.GOAMD64 < 3 => (BSFL x)
+(Ctz8NonZero x) && buildcfg.GOAMD64 < 3 => (BSFL x)
+
+// BitLen64 of a 64 bit value x requires checking whether x == 0, since BSRQ is undefined when x == 0.
+// However, for zero-extended values, we can cheat a bit, and calculate
+// BSR(x<<1 + 1), which is guaranteed to be non-zero, and which conveniently
+// places the index of the highest set bit where we want it.
+(BitLen64 <t> x) => (ADDQconst [1] (CMOVQEQ <t> (Select0 <t> (BSRQ x)) (MOVQconst <t> [-1]) (Select1 <types.TypeFlags> (BSRQ x))))
+(BitLen32 x) => (Select0 (BSRQ (LEAQ1 <typ.UInt64> [1] (MOVLQZX <typ.UInt64> x) (MOVLQZX <typ.UInt64> x))))
+(BitLen16 x) => (BSRL (LEAL1 <typ.UInt32> [1] (MOVWQZX <typ.UInt32> x) (MOVWQZX <typ.UInt32> x)))
+(BitLen8 x) => (BSRL (LEAL1 <typ.UInt32> [1] (MOVBQZX <typ.UInt32> x) (MOVBQZX <typ.UInt32> x)))
+
+(Bswap(64|32) ...) => (BSWAP(Q|L) ...)
+
+(PopCount(64|32) ...) => (POPCNT(Q|L) ...)
+(PopCount16 x) => (POPCNTL (MOVWQZX <typ.UInt32> x))
+(PopCount8 x) => (POPCNTL (MOVBQZX <typ.UInt32> x))
+
+(Sqrt ...) => (SQRTSD ...)
+(Sqrt32 ...) => (SQRTSS ...)
+
+(RoundToEven x) => (ROUNDSD [0] x)
+(Floor x) => (ROUNDSD [1] x)
+(Ceil x) => (ROUNDSD [2] x)
+(Trunc x) => (ROUNDSD [3] x)
+
+(FMA x y z) => (VFMADD231SD z x y)
+
+// Lowering extension
+// Note: we always extend to 64 bits even though some ops don't need that many result bits.
+(SignExt8to16 ...) => (MOVBQSX ...)
+(SignExt8to32 ...) => (MOVBQSX ...)
+(SignExt8to64 ...) => (MOVBQSX ...)
+(SignExt16to32 ...) => (MOVWQSX ...)
+(SignExt16to64 ...) => (MOVWQSX ...)
+(SignExt32to64 ...) => (MOVLQSX ...)
+
+(ZeroExt8to16 ...) => (MOVBQZX ...)
+(ZeroExt8to32 ...) => (MOVBQZX ...)
+(ZeroExt8to64 ...) => (MOVBQZX ...)
+(ZeroExt16to32 ...) => (MOVWQZX ...)
+(ZeroExt16to64 ...) => (MOVWQZX ...)
+(ZeroExt32to64 ...) => (MOVLQZX ...)
+
+(Slicemask <t> x) => (SARQconst (NEGQ <t> x) [63])
+
+(SpectreIndex <t> x y) => (CMOVQCC x (MOVQconst [0]) (CMPQ x y))
+(SpectreSliceIndex <t> x y) => (CMOVQHI x (MOVQconst [0]) (CMPQ x y))
+
+// Lowering truncation
+// Because we ignore high parts of registers, truncates are just copies.
+(Trunc16to8 ...) => (Copy ...)
+(Trunc32to8 ...) => (Copy ...)
+(Trunc32to16 ...) => (Copy ...)
+(Trunc64to8 ...) => (Copy ...)
+(Trunc64to16 ...) => (Copy ...)
+(Trunc64to32 ...) => (Copy ...)
+
+// Lowering float <-> int
+(Cvt32to32F ...) => (CVTSL2SS ...)
+(Cvt32to64F ...) => (CVTSL2SD ...)
+(Cvt64to32F ...) => (CVTSQ2SS ...)
+(Cvt64to64F ...) => (CVTSQ2SD ...)
+
+(Cvt32Fto32 ...) => (CVTTSS2SL ...)
+(Cvt32Fto64 ...) => (CVTTSS2SQ ...)
+(Cvt64Fto32 ...) => (CVTTSD2SL ...)
+(Cvt64Fto64 ...) => (CVTTSD2SQ ...)
+
+(Cvt32Fto64F ...) => (CVTSS2SD ...)
+(Cvt64Fto32F ...) => (CVTSD2SS ...)
+
+(Round(32|64)F ...) => (Copy ...)
+
+(CvtBoolToUint8 ...) => (Copy ...)
+
+// Lowering shifts
+// Unsigned shifts need to return 0 if shift amount is >= width of shifted value.
+// result = (arg << shift) & (shift >= argbits ? 0 : 0xffffffffffffffff)
+(Lsh64x(64|32|16|8) <t> x y) && !shiftIsBounded(v) => (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMP(Q|L|W|B)const y [64])))
+(Lsh32x(64|32|16|8) <t> x y) && !shiftIsBounded(v) => (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMP(Q|L|W|B)const y [32])))
+(Lsh16x(64|32|16|8) <t> x y) && !shiftIsBounded(v) => (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMP(Q|L|W|B)const y [32])))
+(Lsh8x(64|32|16|8) <t> x y) && !shiftIsBounded(v) => (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMP(Q|L|W|B)const y [32])))
+
+(Lsh64x(64|32|16|8) x y) && shiftIsBounded(v) => (SHLQ x y)
+(Lsh32x(64|32|16|8) x y) && shiftIsBounded(v) => (SHLL x y)
+(Lsh16x(64|32|16|8) x y) && shiftIsBounded(v) => (SHLL x y)
+(Lsh8x(64|32|16|8) x y) && shiftIsBounded(v) => (SHLL x y)
+
+(Rsh64Ux(64|32|16|8) <t> x y) && !shiftIsBounded(v) => (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMP(Q|L|W|B)const y [64])))
+(Rsh32Ux(64|32|16|8) <t> x y) && !shiftIsBounded(v) => (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMP(Q|L|W|B)const y [32])))
+(Rsh16Ux(64|32|16|8) <t> x y) && !shiftIsBounded(v) => (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMP(Q|L|W|B)const y [16])))
+(Rsh8Ux(64|32|16|8) <t> x y) && !shiftIsBounded(v) => (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMP(Q|L|W|B)const y [8])))
+
+(Rsh64Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SHRQ x y)
+(Rsh32Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SHRL x y)
+(Rsh16Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SHRW x y)
+(Rsh8Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SHRB x y)
+
+// Signed right shift needs to return 0/-1 if shift amount is >= width of shifted value.
+// We implement this by setting the shift value to -1 (all ones) if the shift value is >= width.
+(Rsh64x(64|32|16|8) <t> x y) && !shiftIsBounded(v) => (SARQ <t> x (OR(Q|L|L|L) <y.Type> y (NOT(Q|L|L|L) <y.Type> (SBB(Q|L|L|L)carrymask <y.Type> (CMP(Q|L|W|B)const y [64])))))
+(Rsh32x(64|32|16|8) <t> x y) && !shiftIsBounded(v) => (SARL <t> x (OR(Q|L|L|L) <y.Type> y (NOT(Q|L|L|L) <y.Type> (SBB(Q|L|L|L)carrymask <y.Type> (CMP(Q|L|W|B)const y [32])))))
+(Rsh16x(64|32|16|8) <t> x y) && !shiftIsBounded(v) => (SARW <t> x (OR(Q|L|L|L) <y.Type> y (NOT(Q|L|L|L) <y.Type> (SBB(Q|L|L|L)carrymask <y.Type> (CMP(Q|L|W|B)const y [16])))))
+(Rsh8x(64|32|16|8) <t> x y) && !shiftIsBounded(v) => (SARB <t> x (OR(Q|L|L|L) <y.Type> y (NOT(Q|L|L|L) <y.Type> (SBB(Q|L|L|L)carrymask <y.Type> (CMP(Q|L|W|B)const y [8])))))
+
+(Rsh64x(64|32|16|8) x y) && shiftIsBounded(v) => (SARQ x y)
+(Rsh32x(64|32|16|8) x y) && shiftIsBounded(v) => (SARL x y)
+(Rsh16x(64|32|16|8) x y) && shiftIsBounded(v) => (SARW x y)
+(Rsh8x(64|32|16|8) x y) && shiftIsBounded(v) => (SARB x y)
+
+// Lowering integer comparisons
+(Less(64|32|16|8) x y) => (SETL (CMP(Q|L|W|B) x y))
+(Less(64|32|16|8)U x y) => (SETB (CMP(Q|L|W|B) x y))
+(Leq(64|32|16|8) x y) => (SETLE (CMP(Q|L|W|B) x y))
+(Leq(64|32|16|8)U x y) => (SETBE (CMP(Q|L|W|B) x y))
+(Eq(Ptr|64|32|16|8|B) x y) => (SETEQ (CMP(Q|Q|L|W|B|B) x y))
+(Neq(Ptr|64|32|16|8|B) x y) => (SETNE (CMP(Q|Q|L|W|B|B) x y))
+
+// Lowering floating point comparisons
+// Note Go assembler gets UCOMISx operand order wrong, but it is right here
+// and the operands are reversed when generating assembly language.
+(Eq(32|64)F x y) => (SETEQF (UCOMIS(S|D) x y))
+(Neq(32|64)F x y) => (SETNEF (UCOMIS(S|D) x y))
+// Use SETGF/SETGEF with reversed operands to dodge NaN case.
+(Less(32|64)F x y) => (SETGF (UCOMIS(S|D) y x))
+(Leq(32|64)F x y) => (SETGEF (UCOMIS(S|D) y x))
+
+// Lowering loads
+(Load <t> ptr mem) && (is64BitInt(t) || isPtr(t)) => (MOVQload ptr mem)
+(Load <t> ptr mem) && is32BitInt(t) => (MOVLload ptr mem)
+(Load <t> ptr mem) && is16BitInt(t) => (MOVWload ptr mem)
+(Load <t> ptr mem) && (t.IsBoolean() || is8BitInt(t)) => (MOVBload ptr mem)
+(Load <t> ptr mem) && is32BitFloat(t) => (MOVSSload ptr mem)
+(Load <t> ptr mem) && is64BitFloat(t) => (MOVSDload ptr mem)
+
+// Lowering stores
+// These more-specific FP versions of Store pattern should come first.
+(Store {t} ptr val mem) && t.Size() == 8 && is64BitFloat(val.Type) => (MOVSDstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 4 && is32BitFloat(val.Type) => (MOVSSstore ptr val mem)
+
+(Store {t} ptr val mem) && t.Size() == 8 => (MOVQstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 4 => (MOVLstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 2 => (MOVWstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 1 => (MOVBstore ptr val mem)
+
+// Lowering moves
+(Move [0] _ _ mem) => mem
+(Move [1] dst src mem) => (MOVBstore dst (MOVBload src mem) mem)
+(Move [2] dst src mem) => (MOVWstore dst (MOVWload src mem) mem)
+(Move [4] dst src mem) => (MOVLstore dst (MOVLload src mem) mem)
+(Move [8] dst src mem) => (MOVQstore dst (MOVQload src mem) mem)
+(Move [16] dst src mem) && config.useSSE => (MOVOstore dst (MOVOload src mem) mem)
+(Move [16] dst src mem) && !config.useSSE =>
+ (MOVQstore [8] dst (MOVQload [8] src mem)
+ (MOVQstore dst (MOVQload src mem) mem))
+
+(Move [32] dst src mem) =>
+ (Move [16]
+ (OffPtr <dst.Type> dst [16])
+ (OffPtr <src.Type> src [16])
+ (Move [16] dst src mem))
+
+(Move [48] dst src mem) && config.useSSE =>
+ (Move [32]
+ (OffPtr <dst.Type> dst [16])
+ (OffPtr <src.Type> src [16])
+ (Move [16] dst src mem))
+
+(Move [64] dst src mem) && config.useSSE =>
+ (Move [32]
+ (OffPtr <dst.Type> dst [32])
+ (OffPtr <src.Type> src [32])
+ (Move [32] dst src mem))
+
+(Move [3] dst src mem) =>
+ (MOVBstore [2] dst (MOVBload [2] src mem)
+ (MOVWstore dst (MOVWload src mem) mem))
+(Move [5] dst src mem) =>
+ (MOVBstore [4] dst (MOVBload [4] src mem)
+ (MOVLstore dst (MOVLload src mem) mem))
+(Move [6] dst src mem) =>
+ (MOVWstore [4] dst (MOVWload [4] src mem)
+ (MOVLstore dst (MOVLload src mem) mem))
+(Move [7] dst src mem) =>
+ (MOVLstore [3] dst (MOVLload [3] src mem)
+ (MOVLstore dst (MOVLload src mem) mem))
+(Move [9] dst src mem) =>
+ (MOVBstore [8] dst (MOVBload [8] src mem)
+ (MOVQstore dst (MOVQload src mem) mem))
+(Move [10] dst src mem) =>
+ (MOVWstore [8] dst (MOVWload [8] src mem)
+ (MOVQstore dst (MOVQload src mem) mem))
+(Move [12] dst src mem) =>
+ (MOVLstore [8] dst (MOVLload [8] src mem)
+ (MOVQstore dst (MOVQload src mem) mem))
+(Move [s] dst src mem) && s == 11 || s >= 13 && s <= 15 =>
+ (MOVQstore [int32(s-8)] dst (MOVQload [int32(s-8)] src mem)
+ (MOVQstore dst (MOVQload src mem) mem))
+
+// Adjust moves to be a multiple of 16 bytes.
+(Move [s] dst src mem)
+ && s > 16 && s%16 != 0 && s%16 <= 8 =>
+ (Move [s-s%16]
+ (OffPtr <dst.Type> dst [s%16])
+ (OffPtr <src.Type> src [s%16])
+ (MOVQstore dst (MOVQload src mem) mem))
+(Move [s] dst src mem)
+ && s > 16 && s%16 != 0 && s%16 > 8 && config.useSSE =>
+ (Move [s-s%16]
+ (OffPtr <dst.Type> dst [s%16])
+ (OffPtr <src.Type> src [s%16])
+ (MOVOstore dst (MOVOload src mem) mem))
+(Move [s] dst src mem)
+ && s > 16 && s%16 != 0 && s%16 > 8 && !config.useSSE =>
+ (Move [s-s%16]
+ (OffPtr <dst.Type> dst [s%16])
+ (OffPtr <src.Type> src [s%16])
+ (MOVQstore [8] dst (MOVQload [8] src mem)
+ (MOVQstore dst (MOVQload src mem) mem)))
+
+// Medium copying uses a duff device.
+(Move [s] dst src mem)
+ && s > 64 && s <= 16*64 && s%16 == 0
+ && !config.noDuffDevice && logLargeCopy(v, s) =>
+ (DUFFCOPY [s] dst src mem)
+
+// Large copying uses REP MOVSQ.
+(Move [s] dst src mem) && (s > 16*64 || config.noDuffDevice) && s%8 == 0 && logLargeCopy(v, s) =>
+ (REPMOVSQ dst src (MOVQconst [s/8]) mem)
+
+// Lowering Zero instructions
+(Zero [0] _ mem) => mem
+(Zero [1] destptr mem) => (MOVBstoreconst [makeValAndOff(0,0)] destptr mem)
+(Zero [2] destptr mem) => (MOVWstoreconst [makeValAndOff(0,0)] destptr mem)
+(Zero [4] destptr mem) => (MOVLstoreconst [makeValAndOff(0,0)] destptr mem)
+(Zero [8] destptr mem) => (MOVQstoreconst [makeValAndOff(0,0)] destptr mem)
+
+(Zero [3] destptr mem) =>
+ (MOVBstoreconst [makeValAndOff(0,2)] destptr
+ (MOVWstoreconst [makeValAndOff(0,0)] destptr mem))
+(Zero [5] destptr mem) =>
+ (MOVBstoreconst [makeValAndOff(0,4)] destptr
+ (MOVLstoreconst [makeValAndOff(0,0)] destptr mem))
+(Zero [6] destptr mem) =>
+ (MOVWstoreconst [makeValAndOff(0,4)] destptr
+ (MOVLstoreconst [makeValAndOff(0,0)] destptr mem))
+(Zero [7] destptr mem) =>
+ (MOVLstoreconst [makeValAndOff(0,3)] destptr
+ (MOVLstoreconst [makeValAndOff(0,0)] destptr mem))
+
+// Strip off any fractional word zeroing.
+(Zero [s] destptr mem) && s%8 != 0 && s > 8 && !config.useSSE =>
+ (Zero [s-s%8] (OffPtr <destptr.Type> destptr [s%8])
+ (MOVQstoreconst [makeValAndOff(0,0)] destptr mem))
+
+// Zero small numbers of words directly.
+(Zero [16] destptr mem) && !config.useSSE =>
+ (MOVQstoreconst [makeValAndOff(0,8)] destptr
+ (MOVQstoreconst [makeValAndOff(0,0)] destptr mem))
+(Zero [24] destptr mem) && !config.useSSE =>
+ (MOVQstoreconst [makeValAndOff(0,16)] destptr
+ (MOVQstoreconst [makeValAndOff(0,8)] destptr
+ (MOVQstoreconst [makeValAndOff(0,0)] destptr mem)))
+(Zero [32] destptr mem) && !config.useSSE =>
+ (MOVQstoreconst [makeValAndOff(0,24)] destptr
+ (MOVQstoreconst [makeValAndOff(0,16)] destptr
+ (MOVQstoreconst [makeValAndOff(0,8)] destptr
+ (MOVQstoreconst [makeValAndOff(0,0)] destptr mem))))
+
+(Zero [s] destptr mem) && s > 8 && s < 16 && config.useSSE =>
+ (MOVQstoreconst [makeValAndOff(0,int32(s-8))] destptr
+ (MOVQstoreconst [makeValAndOff(0,0)] destptr mem))
+
+// Adjust zeros to be a multiple of 16 bytes.
+(Zero [s] destptr mem) && s%16 != 0 && s > 16 && s%16 > 8 && config.useSSE =>
+ (Zero [s-s%16] (OffPtr <destptr.Type> destptr [s%16])
+ (MOVOstoreconst [makeValAndOff(0,0)] destptr mem))
+
+(Zero [s] destptr mem) && s%16 != 0 && s > 16 && s%16 <= 8 && config.useSSE =>
+ (Zero [s-s%16] (OffPtr <destptr.Type> destptr [s%16])
+ (MOVOstoreconst [makeValAndOff(0,0)] destptr mem))
+
+(Zero [16] destptr mem) && config.useSSE =>
+ (MOVOstoreconst [makeValAndOff(0,0)] destptr mem)
+(Zero [32] destptr mem) && config.useSSE =>
+ (MOVOstoreconst [makeValAndOff(0,16)] destptr
+ (MOVOstoreconst [makeValAndOff(0,0)] destptr mem))
+(Zero [48] destptr mem) && config.useSSE =>
+ (MOVOstoreconst [makeValAndOff(0,32)] destptr
+ (MOVOstoreconst [makeValAndOff(0,16)] destptr
+ (MOVOstoreconst [makeValAndOff(0,0)] destptr mem)))
+(Zero [64] destptr mem) && config.useSSE =>
+ (MOVOstoreconst [makeValAndOff(0,48)] destptr
+ (MOVOstoreconst [makeValAndOff(0,32)] destptr
+ (MOVOstoreconst [makeValAndOff(0,16)] destptr
+ (MOVOstoreconst [makeValAndOff(0,0)] destptr mem))))
+
+// Medium zeroing uses a duff device.
+(Zero [s] destptr mem)
+ && s > 64 && s <= 1024 && s%16 == 0 && !config.noDuffDevice =>
+ (DUFFZERO [s] destptr mem)
+
+// Large zeroing uses REP STOSQ.
+(Zero [s] destptr mem)
+ && (s > 1024 || (config.noDuffDevice && s > 64 || !config.useSSE && s > 32))
+ && s%8 == 0 =>
+ (REPSTOSQ destptr (MOVQconst [s/8]) (MOVQconst [0]) mem)
+
+// Lowering constants
+(Const8 [c]) => (MOVLconst [int32(c)])
+(Const16 [c]) => (MOVLconst [int32(c)])
+(Const32 ...) => (MOVLconst ...)
+(Const64 ...) => (MOVQconst ...)
+(Const32F ...) => (MOVSSconst ...)
+(Const64F ...) => (MOVSDconst ...)
+(ConstNil ) => (MOVQconst [0])
+(ConstBool [c]) => (MOVLconst [b2i32(c)])
+
+// Lowering calls
+(StaticCall ...) => (CALLstatic ...)
+(ClosureCall ...) => (CALLclosure ...)
+(InterCall ...) => (CALLinter ...)
+(TailCall ...) => (CALLtail ...)
+
+// Lowering conditional moves
+// If the condition is a SETxx, we can just run a CMOV from the comparison that was
+// setting the flags.
+// Legend: HI=unsigned ABOVE, CS=unsigned BELOW, CC=unsigned ABOVE EQUAL, LS=unsigned BELOW EQUAL
+(CondSelect <t> x y (SET(EQ|NE|L|G|LE|GE|A|B|AE|BE|EQF|NEF|GF|GEF) cond)) && (is64BitInt(t) || isPtr(t))
+ => (CMOVQ(EQ|NE|LT|GT|LE|GE|HI|CS|CC|LS|EQF|NEF|GTF|GEF) y x cond)
+(CondSelect <t> x y (SET(EQ|NE|L|G|LE|GE|A|B|AE|BE|EQF|NEF|GF|GEF) cond)) && is32BitInt(t)
+ => (CMOVL(EQ|NE|LT|GT|LE|GE|HI|CS|CC|LS|EQF|NEF|GTF|GEF) y x cond)
+(CondSelect <t> x y (SET(EQ|NE|L|G|LE|GE|A|B|AE|BE|EQF|NEF|GF|GEF) cond)) && is16BitInt(t)
+ => (CMOVW(EQ|NE|LT|GT|LE|GE|HI|CS|CC|LS|EQF|NEF|GTF|GEF) y x cond)
+
+// If the condition does not set the flags, we need to generate a comparison.
+(CondSelect <t> x y check) && !check.Type.IsFlags() && check.Type.Size() == 1
+ => (CondSelect <t> x y (MOVBQZX <typ.UInt64> check))
+(CondSelect <t> x y check) && !check.Type.IsFlags() && check.Type.Size() == 2
+ => (CondSelect <t> x y (MOVWQZX <typ.UInt64> check))
+(CondSelect <t> x y check) && !check.Type.IsFlags() && check.Type.Size() == 4
+ => (CondSelect <t> x y (MOVLQZX <typ.UInt64> check))
+
+(CondSelect <t> x y check) && !check.Type.IsFlags() && check.Type.Size() == 8 && (is64BitInt(t) || isPtr(t))
+ => (CMOVQNE y x (CMPQconst [0] check))
+(CondSelect <t> x y check) && !check.Type.IsFlags() && check.Type.Size() == 8 && is32BitInt(t)
+ => (CMOVLNE y x (CMPQconst [0] check))
+(CondSelect <t> x y check) && !check.Type.IsFlags() && check.Type.Size() == 8 && is16BitInt(t)
+ => (CMOVWNE y x (CMPQconst [0] check))
+
+// Absorb InvertFlags
+(CMOVQ(EQ|NE|LT|GT|LE|GE|HI|CS|CC|LS) x y (InvertFlags cond))
+ => (CMOVQ(EQ|NE|GT|LT|GE|LE|CS|HI|LS|CC) x y cond)
+(CMOVL(EQ|NE|LT|GT|LE|GE|HI|CS|CC|LS) x y (InvertFlags cond))
+ => (CMOVL(EQ|NE|GT|LT|GE|LE|CS|HI|LS|CC) x y cond)
+(CMOVW(EQ|NE|LT|GT|LE|GE|HI|CS|CC|LS) x y (InvertFlags cond))
+ => (CMOVW(EQ|NE|GT|LT|GE|LE|CS|HI|LS|CC) x y cond)
+
+// Absorb constants generated during lower
+(CMOV(QEQ|QLE|QGE|QCC|QLS|LEQ|LLE|LGE|LCC|LLS|WEQ|WLE|WGE|WCC|WLS) _ x (FlagEQ)) => x
+(CMOV(QNE|QLT|QGT|QCS|QHI|LNE|LLT|LGT|LCS|LHI|WNE|WLT|WGT|WCS|WHI) y _ (FlagEQ)) => y
+(CMOV(QNE|QGT|QGE|QHI|QCC|LNE|LGT|LGE|LHI|LCC|WNE|WGT|WGE|WHI|WCC) _ x (FlagGT_UGT)) => x
+(CMOV(QEQ|QLE|QLT|QLS|QCS|LEQ|LLE|LLT|LLS|LCS|WEQ|WLE|WLT|WLS|WCS) y _ (FlagGT_UGT)) => y
+(CMOV(QNE|QGT|QGE|QLS|QCS|LNE|LGT|LGE|LLS|LCS|WNE|WGT|WGE|WLS|WCS) _ x (FlagGT_ULT)) => x
+(CMOV(QEQ|QLE|QLT|QHI|QCC|LEQ|LLE|LLT|LHI|LCC|WEQ|WLE|WLT|WHI|WCC) y _ (FlagGT_ULT)) => y
+(CMOV(QNE|QLT|QLE|QCS|QLS|LNE|LLT|LLE|LCS|LLS|WNE|WLT|WLE|WCS|WLS) _ x (FlagLT_ULT)) => x
+(CMOV(QEQ|QGT|QGE|QHI|QCC|LEQ|LGT|LGE|LHI|LCC|WEQ|WGT|WGE|WHI|WCC) y _ (FlagLT_ULT)) => y
+(CMOV(QNE|QLT|QLE|QHI|QCC|LNE|LLT|LLE|LHI|LCC|WNE|WLT|WLE|WHI|WCC) _ x (FlagLT_UGT)) => x
+(CMOV(QEQ|QGT|QGE|QCS|QLS|LEQ|LGT|LGE|LCS|LLS|WEQ|WGT|WGE|WCS|WLS) y _ (FlagLT_UGT)) => y
+
+// Miscellaneous
+(IsNonNil p) => (SETNE (TESTQ p p))
+(IsInBounds idx len) => (SETB (CMPQ idx len))
+(IsSliceInBounds idx len) => (SETBE (CMPQ idx len))
+(NilCheck ...) => (LoweredNilCheck ...)
+(GetG mem) && v.Block.Func.OwnAux.Fn.ABI() != obj.ABIInternal => (LoweredGetG mem) // only lower in old ABI. in new ABI we have a G register.
+(GetClosurePtr ...) => (LoweredGetClosurePtr ...)
+(GetCallerPC ...) => (LoweredGetCallerPC ...)
+(GetCallerSP ...) => (LoweredGetCallerSP ...)
+
+(HasCPUFeature {s}) => (SETNE (CMPLconst [0] (LoweredHasCPUFeature {s})))
+(Addr {sym} base) => (LEAQ {sym} base)
+(LocalAddr {sym} base _) => (LEAQ {sym} base)
+
+(MOVBstore [off] {sym} ptr y:(SETL x) mem) && y.Uses == 1 => (SETLstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr y:(SETLE x) mem) && y.Uses == 1 => (SETLEstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr y:(SETG x) mem) && y.Uses == 1 => (SETGstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr y:(SETGE x) mem) && y.Uses == 1 => (SETGEstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr y:(SETEQ x) mem) && y.Uses == 1 => (SETEQstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr y:(SETNE x) mem) && y.Uses == 1 => (SETNEstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr y:(SETB x) mem) && y.Uses == 1 => (SETBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr y:(SETBE x) mem) && y.Uses == 1 => (SETBEstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr y:(SETA x) mem) && y.Uses == 1 => (SETAstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr y:(SETAE x) mem) && y.Uses == 1 => (SETAEstore [off] {sym} ptr x mem)
+
+// block rewrites
+(If (SETL cmp) yes no) => (LT cmp yes no)
+(If (SETLE cmp) yes no) => (LE cmp yes no)
+(If (SETG cmp) yes no) => (GT cmp yes no)
+(If (SETGE cmp) yes no) => (GE cmp yes no)
+(If (SETEQ cmp) yes no) => (EQ cmp yes no)
+(If (SETNE cmp) yes no) => (NE cmp yes no)
+(If (SETB cmp) yes no) => (ULT cmp yes no)
+(If (SETBE cmp) yes no) => (ULE cmp yes no)
+(If (SETA cmp) yes no) => (UGT cmp yes no)
+(If (SETAE cmp) yes no) => (UGE cmp yes no)
+(If (SETO cmp) yes no) => (OS cmp yes no)
+
+// Special case for floating point - LF/LEF not generated
+(If (SETGF cmp) yes no) => (UGT cmp yes no)
+(If (SETGEF cmp) yes no) => (UGE cmp yes no)
+(If (SETEQF cmp) yes no) => (EQF cmp yes no)
+(If (SETNEF cmp) yes no) => (NEF cmp yes no)
+
+(If cond yes no) => (NE (TESTB cond cond) yes no)
+
+// Atomic loads. Other than preserving their ordering with respect to other loads, nothing special here.
+(AtomicLoad8 ptr mem) => (MOVBatomicload ptr mem)
+(AtomicLoad32 ptr mem) => (MOVLatomicload ptr mem)
+(AtomicLoad64 ptr mem) => (MOVQatomicload ptr mem)
+(AtomicLoadPtr ptr mem) => (MOVQatomicload ptr mem)
+
+// Atomic stores. We use XCHG to prevent the hardware reordering a subsequent load.
+// TODO: most runtime uses of atomic stores don't need that property. Use normal stores for those?
+(AtomicStore8 ptr val mem) => (Select1 (XCHGB <types.NewTuple(typ.UInt8,types.TypeMem)> val ptr mem))
+(AtomicStore32 ptr val mem) => (Select1 (XCHGL <types.NewTuple(typ.UInt32,types.TypeMem)> val ptr mem))
+(AtomicStore64 ptr val mem) => (Select1 (XCHGQ <types.NewTuple(typ.UInt64,types.TypeMem)> val ptr mem))
+(AtomicStorePtrNoWB ptr val mem) => (Select1 (XCHGQ <types.NewTuple(typ.BytePtr,types.TypeMem)> val ptr mem))
+
+// Atomic exchanges.
+(AtomicExchange32 ptr val mem) => (XCHGL val ptr mem)
+(AtomicExchange64 ptr val mem) => (XCHGQ val ptr mem)
+
+// Atomic adds.
+(AtomicAdd32 ptr val mem) => (AddTupleFirst32 val (XADDLlock val ptr mem))
+(AtomicAdd64 ptr val mem) => (AddTupleFirst64 val (XADDQlock val ptr mem))
+(Select0 <t> (AddTupleFirst32 val tuple)) => (ADDL val (Select0 <t> tuple))
+(Select1 (AddTupleFirst32 _ tuple)) => (Select1 tuple)
+(Select0 <t> (AddTupleFirst64 val tuple)) => (ADDQ val (Select0 <t> tuple))
+(Select1 (AddTupleFirst64 _ tuple)) => (Select1 tuple)
+
+// Atomic compare and swap.
+(AtomicCompareAndSwap32 ptr old new_ mem) => (CMPXCHGLlock ptr old new_ mem)
+(AtomicCompareAndSwap64 ptr old new_ mem) => (CMPXCHGQlock ptr old new_ mem)
+
+// Atomic memory updates.
+(AtomicAnd8 ptr val mem) => (ANDBlock ptr val mem)
+(AtomicAnd32 ptr val mem) => (ANDLlock ptr val mem)
+(AtomicOr8 ptr val mem) => (ORBlock ptr val mem)
+(AtomicOr32 ptr val mem) => (ORLlock ptr val mem)
+
+// Write barrier.
+(WB ...) => (LoweredWB ...)
+
+(PanicBounds [kind] x y mem) && boundsABI(kind) == 0 => (LoweredPanicBoundsA [kind] x y mem)
+(PanicBounds [kind] x y mem) && boundsABI(kind) == 1 => (LoweredPanicBoundsB [kind] x y mem)
+(PanicBounds [kind] x y mem) && boundsABI(kind) == 2 => (LoweredPanicBoundsC [kind] x y mem)
+
+// ***************************
+// Above: lowering rules
+// Below: optimizations
+// ***************************
+// TODO: Should the optimizations be a separate pass?
+
+// Fold boolean tests into blocks
+(NE (TESTB (SETL cmp) (SETL cmp)) yes no) => (LT cmp yes no)
+(NE (TESTB (SETLE cmp) (SETLE cmp)) yes no) => (LE cmp yes no)
+(NE (TESTB (SETG cmp) (SETG cmp)) yes no) => (GT cmp yes no)
+(NE (TESTB (SETGE cmp) (SETGE cmp)) yes no) => (GE cmp yes no)
+(NE (TESTB (SETEQ cmp) (SETEQ cmp)) yes no) => (EQ cmp yes no)
+(NE (TESTB (SETNE cmp) (SETNE cmp)) yes no) => (NE cmp yes no)
+(NE (TESTB (SETB cmp) (SETB cmp)) yes no) => (ULT cmp yes no)
+(NE (TESTB (SETBE cmp) (SETBE cmp)) yes no) => (ULE cmp yes no)
+(NE (TESTB (SETA cmp) (SETA cmp)) yes no) => (UGT cmp yes no)
+(NE (TESTB (SETAE cmp) (SETAE cmp)) yes no) => (UGE cmp yes no)
+(NE (TESTB (SETO cmp) (SETO cmp)) yes no) => (OS cmp yes no)
+
+// Unsigned comparisons to 0/1
+(ULT (TEST(Q|L|W|B) x x) yes no) => (First no yes)
+(UGE (TEST(Q|L|W|B) x x) yes no) => (First yes no)
+(SETB (TEST(Q|L|W|B) x x)) => (ConstBool [false])
+(SETAE (TEST(Q|L|W|B) x x)) => (ConstBool [true])
+
+// x & 1 != 0 -> x & 1
+(SETNE (TEST(B|W)const [1] x)) => (AND(L|L)const [1] x)
+(SETB (BT(L|Q)const [0] x)) => (AND(L|Q)const [1] x)
+
+// Recognize bit tests: a&(1<<b) != 0 for b suitably bounded
+// Note that BTx instructions use the carry bit, so we need to convert tests for zero flag
+// into tests for carry flags.
+// ULT and SETB check the carry flag; they are identical to CS and SETCS. Same, mutatis
+// mutandis, for UGE and SETAE, and CC and SETCC.
+((NE|EQ) (TESTL (SHLL (MOVLconst [1]) x) y)) => ((ULT|UGE) (BTL x y))
+((NE|EQ) (TESTQ (SHLQ (MOVQconst [1]) x) y)) => ((ULT|UGE) (BTQ x y))
+((NE|EQ) (TESTLconst [c] x)) && isUint32PowerOfTwo(int64(c))
+ => ((ULT|UGE) (BTLconst [int8(log32(c))] x))
+((NE|EQ) (TESTQconst [c] x)) && isUint64PowerOfTwo(int64(c))
+ => ((ULT|UGE) (BTQconst [int8(log32(c))] x))
+((NE|EQ) (TESTQ (MOVQconst [c]) x)) && isUint64PowerOfTwo(c)
+ => ((ULT|UGE) (BTQconst [int8(log64(c))] x))
+(SET(NE|EQ) (TESTL (SHLL (MOVLconst [1]) x) y)) => (SET(B|AE) (BTL x y))
+(SET(NE|EQ) (TESTQ (SHLQ (MOVQconst [1]) x) y)) => (SET(B|AE) (BTQ x y))
+(SET(NE|EQ) (TESTLconst [c] x)) && isUint32PowerOfTwo(int64(c))
+ => (SET(B|AE) (BTLconst [int8(log32(c))] x))
+(SET(NE|EQ) (TESTQconst [c] x)) && isUint64PowerOfTwo(int64(c))
+ => (SET(B|AE) (BTQconst [int8(log32(c))] x))
+(SET(NE|EQ) (TESTQ (MOVQconst [c]) x)) && isUint64PowerOfTwo(c)
+ => (SET(B|AE) (BTQconst [int8(log64(c))] x))
+// SET..store variant
+(SET(NE|EQ)store [off] {sym} ptr (TESTL (SHLL (MOVLconst [1]) x) y) mem)
+ => (SET(B|AE)store [off] {sym} ptr (BTL x y) mem)
+(SET(NE|EQ)store [off] {sym} ptr (TESTQ (SHLQ (MOVQconst [1]) x) y) mem)
+ => (SET(B|AE)store [off] {sym} ptr (BTQ x y) mem)
+(SET(NE|EQ)store [off] {sym} ptr (TESTLconst [c] x) mem) && isUint32PowerOfTwo(int64(c))
+ => (SET(B|AE)store [off] {sym} ptr (BTLconst [int8(log32(c))] x) mem)
+(SET(NE|EQ)store [off] {sym} ptr (TESTQconst [c] x) mem) && isUint64PowerOfTwo(int64(c))
+ => (SET(B|AE)store [off] {sym} ptr (BTQconst [int8(log32(c))] x) mem)
+(SET(NE|EQ)store [off] {sym} ptr (TESTQ (MOVQconst [c]) x) mem) && isUint64PowerOfTwo(c)
+ => (SET(B|AE)store [off] {sym} ptr (BTQconst [int8(log64(c))] x) mem)
+
+// Handle bit-testing in the form (a>>b)&1 != 0 by building the above rules
+// and further combining shifts.
+(BT(Q|L)const [c] (SHRQconst [d] x)) && (c+d)<64 => (BTQconst [c+d] x)
+(BT(Q|L)const [c] (SHLQconst [d] x)) && c>d => (BT(Q|L)const [c-d] x)
+(BT(Q|L)const [0] s:(SHRQ x y)) => (BTQ y x)
+(BTLconst [c] (SHRLconst [d] x)) && (c+d)<32 => (BTLconst [c+d] x)
+(BTLconst [c] (SHLLconst [d] x)) && c>d => (BTLconst [c-d] x)
+(BTLconst [0] s:(SHRL x y)) => (BTL y x)
+
+// Rewrite a & 1 != 1 into a & 1 == 0.
+// Among other things, this lets us turn (a>>b)&1 != 1 into a bit test.
+(SET(NE|EQ) (CMPLconst [1] s:(ANDLconst [1] _))) => (SET(EQ|NE) (CMPLconst [0] s))
+(SET(NE|EQ)store [off] {sym} ptr (CMPLconst [1] s:(ANDLconst [1] _)) mem) => (SET(EQ|NE)store [off] {sym} ptr (CMPLconst [0] s) mem)
+(SET(NE|EQ) (CMPQconst [1] s:(ANDQconst [1] _))) => (SET(EQ|NE) (CMPQconst [0] s))
+(SET(NE|EQ)store [off] {sym} ptr (CMPQconst [1] s:(ANDQconst [1] _)) mem) => (SET(EQ|NE)store [off] {sym} ptr (CMPQconst [0] s) mem)
+
+// Recognize bit setting (a |= 1<<b) and toggling (a ^= 1<<b)
+(OR(Q|L) (SHL(Q|L) (MOV(Q|L)const [1]) y) x) => (BTS(Q|L) x y)
+(XOR(Q|L) (SHL(Q|L) (MOV(Q|L)const [1]) y) x) => (BTC(Q|L) x y)
+
+// Convert ORconst into BTS, if the code gets smaller, with boundary being
+// (ORL $40,AX is 3 bytes, ORL $80,AX is 6 bytes).
+((ORQ|XORQ)const [c] x) && isUint64PowerOfTwo(int64(c)) && uint64(c) >= 128
+ => (BT(S|C)Qconst [int8(log32(c))] x)
+((ORL|XORL)const [c] x) && isUint32PowerOfTwo(int64(c)) && uint64(c) >= 128
+ => (BT(S|C)Lconst [int8(log32(c))] x)
+((ORQ|XORQ) (MOVQconst [c]) x) && isUint64PowerOfTwo(c) && uint64(c) >= 128
+ => (BT(S|C)Qconst [int8(log64(c))] x)
+((ORL|XORL) (MOVLconst [c]) x) && isUint32PowerOfTwo(int64(c)) && uint64(c) >= 128
+ => (BT(S|C)Lconst [int8(log32(c))] x)
+
+// Recognize bit clearing: a &^= 1<<b
+(AND(Q|L) (NOT(Q|L) (SHL(Q|L) (MOV(Q|L)const [1]) y)) x) => (BTR(Q|L) x y)
+(ANDN(Q|L) x (SHL(Q|L) (MOV(Q|L)const [1]) y)) => (BTR(Q|L) x y)
+(ANDQconst [c] x) && isUint64PowerOfTwo(int64(^c)) && uint64(^c) >= 128
+ => (BTRQconst [int8(log32(^c))] x)
+(ANDLconst [c] x) && isUint32PowerOfTwo(int64(^c)) && uint64(^c) >= 128
+ => (BTRLconst [int8(log32(^c))] x)
+(ANDQ (MOVQconst [c]) x) && isUint64PowerOfTwo(^c) && uint64(^c) >= 128
+ => (BTRQconst [int8(log64(^c))] x)
+(ANDL (MOVLconst [c]) x) && isUint32PowerOfTwo(int64(^c)) && uint64(^c) >= 128
+ => (BTRLconst [int8(log32(^c))] x)
+
+// Special-case bit patterns on first/last bit.
+// generic.rules changes ANDs of high-part/low-part masks into a couple of shifts,
+// for instance:
+// x & 0xFFFF0000 -> (x >> 16) << 16
+// x & 0x80000000 -> (x >> 31) << 31
+//
+// In case the mask is just one bit (like second example above), it conflicts
+// with the above rules to detect bit-testing / bit-clearing of first/last bit.
+// We thus special-case them, by detecting the shift patterns.
+
+// Special case resetting first/last bit
+(SHL(L|Q)const [1] (SHR(L|Q)const [1] x))
+ => (BTR(L|Q)const [0] x)
+(SHRLconst [1] (SHLLconst [1] x))
+ => (BTRLconst [31] x)
+(SHRQconst [1] (SHLQconst [1] x))
+ => (BTRQconst [63] x)
+
+// Special case testing first/last bit (with double-shift generated by generic.rules)
+((SETNE|SETEQ|NE|EQ) (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2)) && z1==z2
+ => ((SETB|SETAE|ULT|UGE) (BTQconst [63] x))
+((SETNE|SETEQ|NE|EQ) (TESTL z1:(SHLLconst [31] (SHRQconst [31] x)) z2)) && z1==z2
+ => ((SETB|SETAE|ULT|UGE) (BTQconst [31] x))
+(SET(NE|EQ)store [off] {sym} ptr (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2) mem) && z1==z2
+ => (SET(B|AE)store [off] {sym} ptr (BTQconst [63] x) mem)
+(SET(NE|EQ)store [off] {sym} ptr (TESTL z1:(SHLLconst [31] (SHRLconst [31] x)) z2) mem) && z1==z2
+ => (SET(B|AE)store [off] {sym} ptr (BTLconst [31] x) mem)
+
+((SETNE|SETEQ|NE|EQ) (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2)) && z1==z2
+ => ((SETB|SETAE|ULT|UGE) (BTQconst [0] x))
+((SETNE|SETEQ|NE|EQ) (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2)) && z1==z2
+ => ((SETB|SETAE|ULT|UGE) (BTLconst [0] x))
+(SET(NE|EQ)store [off] {sym} ptr (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2) mem) && z1==z2
+ => (SET(B|AE)store [off] {sym} ptr (BTQconst [0] x) mem)
+(SET(NE|EQ)store [off] {sym} ptr (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2) mem) && z1==z2
+ => (SET(B|AE)store [off] {sym} ptr (BTLconst [0] x) mem)
+
+// Special-case manually testing last bit with "a>>63 != 0" (without "&1")
+((SETNE|SETEQ|NE|EQ) (TESTQ z1:(SHRQconst [63] x) z2)) && z1==z2
+ => ((SETB|SETAE|ULT|UGE) (BTQconst [63] x))
+((SETNE|SETEQ|NE|EQ) (TESTL z1:(SHRLconst [31] x) z2)) && z1==z2
+ => ((SETB|SETAE|ULT|UGE) (BTLconst [31] x))
+(SET(NE|EQ)store [off] {sym} ptr (TESTQ z1:(SHRQconst [63] x) z2) mem) && z1==z2
+ => (SET(B|AE)store [off] {sym} ptr (BTQconst [63] x) mem)
+(SET(NE|EQ)store [off] {sym} ptr (TESTL z1:(SHRLconst [31] x) z2) mem) && z1==z2
+ => (SET(B|AE)store [off] {sym} ptr (BTLconst [31] x) mem)
+
+// Fold combinations of bit ops on same bit. An example is math.Copysign(c,-1)
+(BTS(Q|L)const [c] (BTR(Q|L)const [c] x)) => (BTS(Q|L)const [c] x)
+(BTS(Q|L)const [c] (BTC(Q|L)const [c] x)) => (BTS(Q|L)const [c] x)
+(BTR(Q|L)const [c] (BTS(Q|L)const [c] x)) => (BTR(Q|L)const [c] x)
+(BTR(Q|L)const [c] (BTC(Q|L)const [c] x)) => (BTR(Q|L)const [c] x)
+
+// Fold boolean negation into SETcc.
+(XORLconst [1] (SETNE x)) => (SETEQ x)
+(XORLconst [1] (SETEQ x)) => (SETNE x)
+(XORLconst [1] (SETL x)) => (SETGE x)
+(XORLconst [1] (SETGE x)) => (SETL x)
+(XORLconst [1] (SETLE x)) => (SETG x)
+(XORLconst [1] (SETG x)) => (SETLE x)
+(XORLconst [1] (SETB x)) => (SETAE x)
+(XORLconst [1] (SETAE x)) => (SETB x)
+(XORLconst [1] (SETBE x)) => (SETA x)
+(XORLconst [1] (SETA x)) => (SETBE x)
+
+// Special case for floating point - LF/LEF not generated
+(NE (TESTB (SETGF cmp) (SETGF cmp)) yes no) => (UGT cmp yes no)
+(NE (TESTB (SETGEF cmp) (SETGEF cmp)) yes no) => (UGE cmp yes no)
+(NE (TESTB (SETEQF cmp) (SETEQF cmp)) yes no) => (EQF cmp yes no)
+(NE (TESTB (SETNEF cmp) (SETNEF cmp)) yes no) => (NEF cmp yes no)
+
+// Disabled because it interferes with the pattern match above and makes worse code.
+// (SETNEF x) => (ORQ (SETNE <typ.Int8> x) (SETNAN <typ.Int8> x))
+// (SETEQF x) => (ANDQ (SETEQ <typ.Int8> x) (SETORD <typ.Int8> x))
+
+// fold constants into instructions
+(ADDQ x (MOVQconst [c])) && is32Bit(c) => (ADDQconst [int32(c)] x)
+(ADDQ x (MOVLconst [c])) => (ADDQconst [c] x)
+(ADDL x (MOVLconst [c])) => (ADDLconst [c] x)
+
+(SUBQ x (MOVQconst [c])) && is32Bit(c) => (SUBQconst x [int32(c)])
+(SUBQ (MOVQconst [c]) x) && is32Bit(c) => (NEGQ (SUBQconst <v.Type> x [int32(c)]))
+(SUBL x (MOVLconst [c])) => (SUBLconst x [c])
+(SUBL (MOVLconst [c]) x) => (NEGL (SUBLconst <v.Type> x [c]))
+
+(MULQ x (MOVQconst [c])) && is32Bit(c) => (MULQconst [int32(c)] x)
+(MULL x (MOVLconst [c])) => (MULLconst [c] x)
+
+(ANDQ x (MOVQconst [c])) && is32Bit(c) => (ANDQconst [int32(c)] x)
+(ANDL x (MOVLconst [c])) => (ANDLconst [c] x)
+
+(AND(L|Q)const [c] (AND(L|Q)const [d] x)) => (AND(L|Q)const [c & d] x)
+(XOR(L|Q)const [c] (XOR(L|Q)const [d] x)) => (XOR(L|Q)const [c ^ d] x)
+(OR(L|Q)const [c] (OR(L|Q)const [d] x)) => (OR(L|Q)const [c | d] x)
+
+(BTRLconst [c] (ANDLconst [d] x)) => (ANDLconst [d &^ (1<<uint32(c))] x)
+(ANDLconst [c] (BTRLconst [d] x)) => (ANDLconst [c &^ (1<<uint32(d))] x)
+(BTRLconst [c] (BTRLconst [d] x)) => (ANDLconst [^(1<<uint32(c) | 1<<uint32(d))] x)
+
+(BTCLconst [c] (XORLconst [d] x)) => (XORLconst [d ^ 1<<uint32(c)] x)
+(XORLconst [c] (BTCLconst [d] x)) => (XORLconst [c ^ 1<<uint32(d)] x)
+(BTCLconst [c] (BTCLconst [d] x)) => (XORLconst [1<<uint32(c) | 1<<uint32(d)] x)
+
+(BTSLconst [c] (ORLconst [d] x)) => (ORLconst [d | 1<<uint32(c)] x)
+(ORLconst [c] (BTSLconst [d] x)) => (ORLconst [c | 1<<uint32(d)] x)
+(BTSLconst [c] (BTSLconst [d] x)) => (ORLconst [1<<uint32(c) | 1<<uint32(d)] x)
+
+(BTRQconst [c] (ANDQconst [d] x)) && is32Bit(int64(d) &^ (1<<uint32(c))) => (ANDQconst [d &^ (1<<uint32(c))] x)
+(ANDQconst [c] (BTRQconst [d] x)) && is32Bit(int64(c) &^ (1<<uint32(d))) => (ANDQconst [c &^ (1<<uint32(d))] x)
+(BTRQconst [c] (BTRQconst [d] x)) && is32Bit(^(1<<uint32(c) | 1<<uint32(d))) => (ANDQconst [^(1<<uint32(c) | 1<<uint32(d))] x)
+
+(BTCQconst [c] (XORQconst [d] x)) && is32Bit(int64(d) ^ 1<<uint32(c)) => (XORQconst [d ^ 1<<uint32(c)] x)
+(XORQconst [c] (BTCQconst [d] x)) && is32Bit(int64(c) ^ 1<<uint32(d)) => (XORQconst [c ^ 1<<uint32(d)] x)
+(BTCQconst [c] (BTCQconst [d] x)) && is32Bit(1<<uint32(c) ^ 1<<uint32(d)) => (XORQconst [1<<uint32(c) ^ 1<<uint32(d)] x)
+
+(BTSQconst [c] (ORQconst [d] x)) && is32Bit(int64(d) | 1<<uint32(c)) => (ORQconst [d | 1<<uint32(c)] x)
+(ORQconst [c] (BTSQconst [d] x)) && is32Bit(int64(c) | 1<<uint32(d)) => (ORQconst [c | 1<<uint32(d)] x)
+(BTSQconst [c] (BTSQconst [d] x)) && is32Bit(1<<uint32(c) | 1<<uint32(d)) => (ORQconst [1<<uint32(c) | 1<<uint32(d)] x)
+
+
+(MULLconst [c] (MULLconst [d] x)) => (MULLconst [c * d] x)
+(MULQconst [c] (MULQconst [d] x)) && is32Bit(int64(c)*int64(d)) => (MULQconst [c * d] x)
+
+(ORQ x (MOVQconst [c])) && is32Bit(c) => (ORQconst [int32(c)] x)
+(ORQ x (MOVLconst [c])) => (ORQconst [c] x)
+(ORL x (MOVLconst [c])) => (ORLconst [c] x)
+
+(XORQ x (MOVQconst [c])) && is32Bit(c) => (XORQconst [int32(c)] x)
+(XORL x (MOVLconst [c])) => (XORLconst [c] x)
+
+(SHLQ x (MOV(Q|L)const [c])) => (SHLQconst [int8(c&63)] x)
+(SHLL x (MOV(Q|L)const [c])) => (SHLLconst [int8(c&31)] x)
+
+(SHRQ x (MOV(Q|L)const [c])) => (SHRQconst [int8(c&63)] x)
+(SHRL x (MOV(Q|L)const [c])) => (SHRLconst [int8(c&31)] x)
+(SHRW x (MOV(Q|L)const [c])) && c&31 < 16 => (SHRWconst [int8(c&31)] x)
+(SHRW _ (MOV(Q|L)const [c])) && c&31 >= 16 => (MOVLconst [0])
+(SHRB x (MOV(Q|L)const [c])) && c&31 < 8 => (SHRBconst [int8(c&31)] x)
+(SHRB _ (MOV(Q|L)const [c])) && c&31 >= 8 => (MOVLconst [0])
+
+(SARQ x (MOV(Q|L)const [c])) => (SARQconst [int8(c&63)] x)
+(SARL x (MOV(Q|L)const [c])) => (SARLconst [int8(c&31)] x)
+(SARW x (MOV(Q|L)const [c])) => (SARWconst [int8(min(int64(c)&31,15))] x)
+(SARB x (MOV(Q|L)const [c])) => (SARBconst [int8(min(int64(c)&31,7))] x)
+
+
+// Operations which don't affect the low 6/5 bits of the shift amount are NOPs.
+((SHLQ|SHRQ|SARQ) x (ADDQconst [c] y)) && c & 63 == 0 => ((SHLQ|SHRQ|SARQ) x y)
+((SHLQ|SHRQ|SARQ) x (NEGQ <t> (ADDQconst [c] y))) && c & 63 == 0 => ((SHLQ|SHRQ|SARQ) x (NEGQ <t> y))
+((SHLQ|SHRQ|SARQ) x (ANDQconst [c] y)) && c & 63 == 63 => ((SHLQ|SHRQ|SARQ) x y)
+((SHLQ|SHRQ|SARQ) x (NEGQ <t> (ANDQconst [c] y))) && c & 63 == 63 => ((SHLQ|SHRQ|SARQ) x (NEGQ <t> y))
+
+((SHLL|SHRL|SARL) x (ADDQconst [c] y)) && c & 31 == 0 => ((SHLL|SHRL|SARL) x y)
+((SHLL|SHRL|SARL) x (NEGQ <t> (ADDQconst [c] y))) && c & 31 == 0 => ((SHLL|SHRL|SARL) x (NEGQ <t> y))
+((SHLL|SHRL|SARL) x (ANDQconst [c] y)) && c & 31 == 31 => ((SHLL|SHRL|SARL) x y)
+((SHLL|SHRL|SARL) x (NEGQ <t> (ANDQconst [c] y))) && c & 31 == 31 => ((SHLL|SHRL|SARL) x (NEGQ <t> y))
+
+((SHLQ|SHRQ|SARQ) x (ADDLconst [c] y)) && c & 63 == 0 => ((SHLQ|SHRQ|SARQ) x y)
+((SHLQ|SHRQ|SARQ) x (NEGL <t> (ADDLconst [c] y))) && c & 63 == 0 => ((SHLQ|SHRQ|SARQ) x (NEGL <t> y))
+((SHLQ|SHRQ|SARQ) x (ANDLconst [c] y)) && c & 63 == 63 => ((SHLQ|SHRQ|SARQ) x y)
+((SHLQ|SHRQ|SARQ) x (NEGL <t> (ANDLconst [c] y))) && c & 63 == 63 => ((SHLQ|SHRQ|SARQ) x (NEGL <t> y))
+
+((SHLL|SHRL|SARL) x (ADDLconst [c] y)) && c & 31 == 0 => ((SHLL|SHRL|SARL) x y)
+((SHLL|SHRL|SARL) x (NEGL <t> (ADDLconst [c] y))) && c & 31 == 0 => ((SHLL|SHRL|SARL) x (NEGL <t> y))
+((SHLL|SHRL|SARL) x (ANDLconst [c] y)) && c & 31 == 31 => ((SHLL|SHRL|SARL) x y)
+((SHLL|SHRL|SARL) x (NEGL <t> (ANDLconst [c] y))) && c & 31 == 31 => ((SHLL|SHRL|SARL) x (NEGL <t> y))
+
+// Constant rotate instructions
+((ADDQ|ORQ|XORQ) (SHLQconst x [c]) (SHRQconst x [d])) && d==64-c => (ROLQconst x [c])
+((ADDL|ORL|XORL) (SHLLconst x [c]) (SHRLconst x [d])) && d==32-c => (ROLLconst x [c])
+
+((ADDL|ORL|XORL) <t> (SHLLconst x [c]) (SHRWconst x [d])) && d==16-c && c < 16 && t.Size() == 2 => (ROLWconst x [c])
+((ADDL|ORL|XORL) <t> (SHLLconst x [c]) (SHRBconst x [d])) && d==8-c && c < 8 && t.Size() == 1 => (ROLBconst x [c])
+
+(ROLQconst [c] (ROLQconst [d] x)) => (ROLQconst [(c+d)&63] x)
+(ROLLconst [c] (ROLLconst [d] x)) => (ROLLconst [(c+d)&31] x)
+(ROLWconst [c] (ROLWconst [d] x)) => (ROLWconst [(c+d)&15] x)
+(ROLBconst [c] (ROLBconst [d] x)) => (ROLBconst [(c+d)& 7] x)
+
+(RotateLeft8 ...) => (ROLB ...)
+(RotateLeft16 ...) => (ROLW ...)
+(RotateLeft32 ...) => (ROLL ...)
+(RotateLeft64 ...) => (ROLQ ...)
+
+// Non-constant rotates.
+// We want to issue a rotate when the Go source contains code like
+// y &= 63
+// x << y | x >> (64-y)
+// The shift rules above convert << to SHLx and >> to SHRx.
+// SHRx converts its shift argument from 64-y to -y.
+// A tricky situation occurs when y==0. Then the original code would be:
+// x << 0 | x >> 64
+// But x >> 64 is 0, not x. So there's an additional mask that is ANDed in
+// to force the second term to 0. We don't need that mask, but we must match
+// it in order to strip it out.
+(ORQ (SHLQ x y) (ANDQ (SHRQ x (NEG(Q|L) y)) (SBBQcarrymask (CMP(Q|L)const (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [63]) [-64])) [64])))) => (ROLQ x y)
+(ORQ (SHRQ x y) (ANDQ (SHLQ x (NEG(Q|L) y)) (SBBQcarrymask (CMP(Q|L)const (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [63]) [-64])) [64])))) => (RORQ x y)
+
+(ORL (SHLL x y) (ANDL (SHRL x (NEG(Q|L) y)) (SBBLcarrymask (CMP(Q|L)const (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [31]) [-32])) [32])))) => (ROLL x y)
+(ORL (SHRL x y) (ANDL (SHLL x (NEG(Q|L) y)) (SBBLcarrymask (CMP(Q|L)const (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [31]) [-32])) [32])))) => (RORL x y)
+
+// Help with rotate detection
+(CMPQconst (NEGQ (ADDQconst [-16] (ANDQconst [15] _))) [32]) => (FlagLT_ULT)
+(CMPQconst (NEGQ (ADDQconst [ -8] (ANDQconst [7] _))) [32]) => (FlagLT_ULT)
+
+(ORL (SHLL x (AND(Q|L)const y [15]))
+ (ANDL (SHRW x (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [15]) [-16])))
+ (SBBLcarrymask (CMP(Q|L)const (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [15]) [-16])) [16]))))
+ && v.Type.Size() == 2
+ => (ROLW x y)
+(ORL (SHRW x (AND(Q|L)const y [15]))
+ (SHLL x (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [15]) [-16]))))
+ && v.Type.Size() == 2
+ => (RORW x y)
+
+(ORL (SHLL x (AND(Q|L)const y [ 7]))
+ (ANDL (SHRB x (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [ 7]) [ -8])))
+ (SBBLcarrymask (CMP(Q|L)const (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [ 7]) [ -8])) [ 8]))))
+ && v.Type.Size() == 1
+ => (ROLB x y)
+(ORL (SHRB x (AND(Q|L)const y [ 7]))
+ (SHLL x (NEG(Q|L) (ADD(Q|L)const (AND(Q|L)const y [ 7]) [ -8]))))
+ && v.Type.Size() == 1
+ => (RORB x y)
+
+// rotate left negative = rotate right
+(ROLQ x (NEG(Q|L) y)) => (RORQ x y)
+(ROLL x (NEG(Q|L) y)) => (RORL x y)
+(ROLW x (NEG(Q|L) y)) => (RORW x y)
+(ROLB x (NEG(Q|L) y)) => (RORB x y)
+
+// rotate right negative = rotate left
+(RORQ x (NEG(Q|L) y)) => (ROLQ x y)
+(RORL x (NEG(Q|L) y)) => (ROLL x y)
+(RORW x (NEG(Q|L) y)) => (ROLW x y)
+(RORB x (NEG(Q|L) y)) => (ROLB x y)
+
+// rotate by constants
+(ROLQ x (MOV(Q|L)const [c])) => (ROLQconst [int8(c&63)] x)
+(ROLL x (MOV(Q|L)const [c])) => (ROLLconst [int8(c&31)] x)
+(ROLW x (MOV(Q|L)const [c])) => (ROLWconst [int8(c&15)] x)
+(ROLB x (MOV(Q|L)const [c])) => (ROLBconst [int8(c&7) ] x)
+
+(RORQ x (MOV(Q|L)const [c])) => (ROLQconst [int8((-c)&63)] x)
+(RORL x (MOV(Q|L)const [c])) => (ROLLconst [int8((-c)&31)] x)
+(RORW x (MOV(Q|L)const [c])) => (ROLWconst [int8((-c)&15)] x)
+(RORB x (MOV(Q|L)const [c])) => (ROLBconst [int8((-c)&7) ] x)
+
+// Constant shift simplifications
+((SHLQ|SHRQ|SARQ)const x [0]) => x
+((SHLL|SHRL|SARL)const x [0]) => x
+((SHRW|SARW)const x [0]) => x
+((SHRB|SARB)const x [0]) => x
+((ROLQ|ROLL|ROLW|ROLB)const x [0]) => x
+
+// Multi-register shifts
+(ORQ (SH(R|L)Q lo bits) (SH(L|R)Q hi (NEGQ bits))) => (SH(R|L)DQ lo hi bits)
+
+// Note: the word and byte shifts keep the low 5 bits (not the low 4 or 3 bits)
+// because the x86 instructions are defined to use all 5 bits of the shift even
+// for the small shifts. I don't think we'll ever generate a weird shift (e.g.
+// (SHRW x (MOVLconst [24])), but just in case.
+
+(CMPQ x (MOVQconst [c])) && is32Bit(c) => (CMPQconst x [int32(c)])
+(CMPQ (MOVQconst [c]) x) && is32Bit(c) => (InvertFlags (CMPQconst x [int32(c)]))
+(CMPL x (MOVLconst [c])) => (CMPLconst x [c])
+(CMPL (MOVLconst [c]) x) => (InvertFlags (CMPLconst x [c]))
+(CMPW x (MOVLconst [c])) => (CMPWconst x [int16(c)])
+(CMPW (MOVLconst [c]) x) => (InvertFlags (CMPWconst x [int16(c)]))
+(CMPB x (MOVLconst [c])) => (CMPBconst x [int8(c)])
+(CMPB (MOVLconst [c]) x) => (InvertFlags (CMPBconst x [int8(c)]))
+
+// Canonicalize the order of arguments to comparisons - helps with CSE.
+(CMP(Q|L|W|B) x y) && canonLessThan(x,y) => (InvertFlags (CMP(Q|L|W|B) y x))
+
+// Using MOVZX instead of AND is cheaper.
+(AND(Q|L)const [ 0xFF] x) => (MOVBQZX x)
+(AND(Q|L)const [0xFFFF] x) => (MOVWQZX x)
+// This rule is currently invalid because 0xFFFFFFFF is not representable by a signed int32.
+// Commenting out for now, because it also can't trigger because of the is32bit guard on the
+// ANDQconst lowering-rule, above, prevents 0xFFFFFFFF from matching (for the same reason)
+// Using an alternate form of this rule segfaults some binaries because of
+// adverse interactions with other passes.
+// (ANDQconst [0xFFFFFFFF] x) => (MOVLQZX x)
+
+// strength reduction
+// Assumes that the following costs from https://gmplib.org/~tege/x86-timing.pdf:
+// 1 - addq, shlq, leaq, negq, subq
+// 3 - imulq
+// This limits the rewrites to two instructions.
+// Note that negq always operates in-place,
+// which can require a register-register move
+// to preserve the original value,
+// so it must be used with care.
+(MUL(Q|L)const [-9] x) => (NEG(Q|L) (LEA(Q|L)8 <v.Type> x x))
+(MUL(Q|L)const [-5] x) => (NEG(Q|L) (LEA(Q|L)4 <v.Type> x x))
+(MUL(Q|L)const [-3] x) => (NEG(Q|L) (LEA(Q|L)2 <v.Type> x x))
+(MUL(Q|L)const [-1] x) => (NEG(Q|L) x)
+(MUL(Q|L)const [ 0] _) => (MOV(Q|L)const [0])
+(MUL(Q|L)const [ 1] x) => x
+(MUL(Q|L)const [ 3] x) => (LEA(Q|L)2 x x)
+(MUL(Q|L)const [ 5] x) => (LEA(Q|L)4 x x)
+(MUL(Q|L)const [ 7] x) => (LEA(Q|L)2 x (LEA(Q|L)2 <v.Type> x x))
+(MUL(Q|L)const [ 9] x) => (LEA(Q|L)8 x x)
+(MUL(Q|L)const [11] x) => (LEA(Q|L)2 x (LEA(Q|L)4 <v.Type> x x))
+(MUL(Q|L)const [13] x) => (LEA(Q|L)4 x (LEA(Q|L)2 <v.Type> x x))
+(MUL(Q|L)const [19] x) => (LEA(Q|L)2 x (LEA(Q|L)8 <v.Type> x x))
+(MUL(Q|L)const [21] x) => (LEA(Q|L)4 x (LEA(Q|L)4 <v.Type> x x))
+(MUL(Q|L)const [25] x) => (LEA(Q|L)8 x (LEA(Q|L)2 <v.Type> x x))
+(MUL(Q|L)const [27] x) => (LEA(Q|L)8 (LEA(Q|L)2 <v.Type> x x) (LEA(Q|L)2 <v.Type> x x))
+(MUL(Q|L)const [37] x) => (LEA(Q|L)4 x (LEA(Q|L)8 <v.Type> x x))
+(MUL(Q|L)const [41] x) => (LEA(Q|L)8 x (LEA(Q|L)4 <v.Type> x x))
+(MUL(Q|L)const [45] x) => (LEA(Q|L)8 (LEA(Q|L)4 <v.Type> x x) (LEA(Q|L)4 <v.Type> x x))
+(MUL(Q|L)const [73] x) => (LEA(Q|L)8 x (LEA(Q|L)8 <v.Type> x x))
+(MUL(Q|L)const [81] x) => (LEA(Q|L)8 (LEA(Q|L)8 <v.Type> x x) (LEA(Q|L)8 <v.Type> x x))
+
+(MUL(Q|L)const [c] x) && isPowerOfTwo64(int64(c)+1) && c >= 15 => (SUB(Q|L) (SHL(Q|L)const <v.Type> [int8(log64(int64(c)+1))] x) x)
+(MUL(Q|L)const [c] x) && isPowerOfTwo32(c-1) && c >= 17 => (LEA(Q|L)1 (SHL(Q|L)const <v.Type> [int8(log32(c-1))] x) x)
+(MUL(Q|L)const [c] x) && isPowerOfTwo32(c-2) && c >= 34 => (LEA(Q|L)2 (SHL(Q|L)const <v.Type> [int8(log32(c-2))] x) x)
+(MUL(Q|L)const [c] x) && isPowerOfTwo32(c-4) && c >= 68 => (LEA(Q|L)4 (SHL(Q|L)const <v.Type> [int8(log32(c-4))] x) x)
+(MUL(Q|L)const [c] x) && isPowerOfTwo32(c-8) && c >= 136 => (LEA(Q|L)8 (SHL(Q|L)const <v.Type> [int8(log32(c-8))] x) x)
+(MUL(Q|L)const [c] x) && c%3 == 0 && isPowerOfTwo32(c/3) => (SHL(Q|L)const [int8(log32(c/3))] (LEA(Q|L)2 <v.Type> x x))
+(MUL(Q|L)const [c] x) && c%5 == 0 && isPowerOfTwo32(c/5) => (SHL(Q|L)const [int8(log32(c/5))] (LEA(Q|L)4 <v.Type> x x))
+(MUL(Q|L)const [c] x) && c%9 == 0 && isPowerOfTwo32(c/9) => (SHL(Q|L)const [int8(log32(c/9))] (LEA(Q|L)8 <v.Type> x x))
+
+// combine add/shift into LEAQ/LEAL
+(ADD(L|Q) x (SHL(L|Q)const [3] y)) => (LEA(L|Q)8 x y)
+(ADD(L|Q) x (SHL(L|Q)const [2] y)) => (LEA(L|Q)4 x y)
+(ADD(L|Q) x (SHL(L|Q)const [1] y)) => (LEA(L|Q)2 x y)
+(ADD(L|Q) x (ADD(L|Q) y y)) => (LEA(L|Q)2 x y)
+(ADD(L|Q) x (ADD(L|Q) x y)) => (LEA(L|Q)2 y x)
+
+// combine ADDQ/ADDQconst into LEAQ1/LEAL1
+(ADD(Q|L)const [c] (ADD(Q|L) x y)) => (LEA(Q|L)1 [c] x y)
+(ADD(Q|L) (ADD(Q|L)const [c] x) y) => (LEA(Q|L)1 [c] x y)
+(ADD(Q|L)const [c] (SHL(Q|L)const [1] x)) => (LEA(Q|L)1 [c] x x)
+
+// fold ADDQ/ADDL into LEAQ/LEAL
+(ADD(Q|L)const [c] (LEA(Q|L) [d] {s} x)) && is32Bit(int64(c)+int64(d)) => (LEA(Q|L) [c+d] {s} x)
+(LEA(Q|L) [c] {s} (ADD(Q|L)const [d] x)) && is32Bit(int64(c)+int64(d)) => (LEA(Q|L) [c+d] {s} x)
+(LEA(Q|L) [c] {s} (ADD(Q|L) x y)) && x.Op != OpSB && y.Op != OpSB => (LEA(Q|L)1 [c] {s} x y)
+(ADD(Q|L) x (LEA(Q|L) [c] {s} y)) && x.Op != OpSB && y.Op != OpSB => (LEA(Q|L)1 [c] {s} x y)
+
+// fold ADDQconst/ADDLconst into LEAQx/LEALx
+(ADD(Q|L)const [c] (LEA(Q|L)1 [d] {s} x y)) && is32Bit(int64(c)+int64(d)) => (LEA(Q|L)1 [c+d] {s} x y)
+(ADD(Q|L)const [c] (LEA(Q|L)2 [d] {s} x y)) && is32Bit(int64(c)+int64(d)) => (LEA(Q|L)2 [c+d] {s} x y)
+(ADD(Q|L)const [c] (LEA(Q|L)4 [d] {s} x y)) && is32Bit(int64(c)+int64(d)) => (LEA(Q|L)4 [c+d] {s} x y)
+(ADD(Q|L)const [c] (LEA(Q|L)8 [d] {s} x y)) && is32Bit(int64(c)+int64(d)) => (LEA(Q|L)8 [c+d] {s} x y)
+(LEA(Q|L)1 [c] {s} (ADD(Q|L)const [d] x) y) && is32Bit(int64(c)+int64(d)) && x.Op != OpSB => (LEA(Q|L)1 [c+d] {s} x y)
+(LEA(Q|L)2 [c] {s} (ADD(Q|L)const [d] x) y) && is32Bit(int64(c)+int64(d)) && x.Op != OpSB => (LEA(Q|L)2 [c+d] {s} x y)
+(LEA(Q|L)2 [c] {s} x (ADD(Q|L)const [d] y)) && is32Bit(int64(c)+2*int64(d)) && y.Op != OpSB => (LEA(Q|L)2 [c+2*d] {s} x y)
+(LEA(Q|L)4 [c] {s} (ADD(Q|L)const [d] x) y) && is32Bit(int64(c)+int64(d)) && x.Op != OpSB => (LEA(Q|L)4 [c+d] {s} x y)
+(LEA(Q|L)4 [c] {s} x (ADD(Q|L)const [d] y)) && is32Bit(int64(c)+4*int64(d)) && y.Op != OpSB => (LEA(Q|L)4 [c+4*d] {s} x y)
+(LEA(Q|L)8 [c] {s} (ADD(Q|L)const [d] x) y) && is32Bit(int64(c)+int64(d)) && x.Op != OpSB => (LEA(Q|L)8 [c+d] {s} x y)
+(LEA(Q|L)8 [c] {s} x (ADD(Q|L)const [d] y)) && is32Bit(int64(c)+8*int64(d)) && y.Op != OpSB => (LEA(Q|L)8 [c+8*d] {s} x y)
+
+// fold shifts into LEAQx/LEALx
+(LEA(Q|L)1 [c] {s} x (SHL(Q|L)const [1] y)) => (LEA(Q|L)2 [c] {s} x y)
+(LEA(Q|L)1 [c] {s} x (SHL(Q|L)const [2] y)) => (LEA(Q|L)4 [c] {s} x y)
+(LEA(Q|L)1 [c] {s} x (SHL(Q|L)const [3] y)) => (LEA(Q|L)8 [c] {s} x y)
+(LEA(Q|L)2 [c] {s} x (SHL(Q|L)const [1] y)) => (LEA(Q|L)4 [c] {s} x y)
+(LEA(Q|L)2 [c] {s} x (SHL(Q|L)const [2] y)) => (LEA(Q|L)8 [c] {s} x y)
+(LEA(Q|L)4 [c] {s} x (SHL(Q|L)const [1] y)) => (LEA(Q|L)8 [c] {s} x y)
+
+// reverse ordering of compare instruction
+(SETL (InvertFlags x)) => (SETG x)
+(SETG (InvertFlags x)) => (SETL x)
+(SETB (InvertFlags x)) => (SETA x)
+(SETA (InvertFlags x)) => (SETB x)
+(SETLE (InvertFlags x)) => (SETGE x)
+(SETGE (InvertFlags x)) => (SETLE x)
+(SETBE (InvertFlags x)) => (SETAE x)
+(SETAE (InvertFlags x)) => (SETBE x)
+(SETEQ (InvertFlags x)) => (SETEQ x)
+(SETNE (InvertFlags x)) => (SETNE x)
+
+(SETLstore [off] {sym} ptr (InvertFlags x) mem) => (SETGstore [off] {sym} ptr x mem)
+(SETGstore [off] {sym} ptr (InvertFlags x) mem) => (SETLstore [off] {sym} ptr x mem)
+(SETBstore [off] {sym} ptr (InvertFlags x) mem) => (SETAstore [off] {sym} ptr x mem)
+(SETAstore [off] {sym} ptr (InvertFlags x) mem) => (SETBstore [off] {sym} ptr x mem)
+(SETLEstore [off] {sym} ptr (InvertFlags x) mem) => (SETGEstore [off] {sym} ptr x mem)
+(SETGEstore [off] {sym} ptr (InvertFlags x) mem) => (SETLEstore [off] {sym} ptr x mem)
+(SETBEstore [off] {sym} ptr (InvertFlags x) mem) => (SETAEstore [off] {sym} ptr x mem)
+(SETAEstore [off] {sym} ptr (InvertFlags x) mem) => (SETBEstore [off] {sym} ptr x mem)
+(SETEQstore [off] {sym} ptr (InvertFlags x) mem) => (SETEQstore [off] {sym} ptr x mem)
+(SETNEstore [off] {sym} ptr (InvertFlags x) mem) => (SETNEstore [off] {sym} ptr x mem)
+
+// sign extended loads
+// Note: The combined instruction must end up in the same block
+// as the original load. If not, we end up making a value with
+// memory type live in two different blocks, which can lead to
+// multiple memory values alive simultaneously.
+// Make sure we don't combine these ops if the load has another use.
+// This prevents a single load from being split into multiple loads
+// which then might return different values. See test/atomicload.go.
+(MOVBQSX x:(MOVBload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem)
+(MOVBQSX x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem)
+(MOVBQSX x:(MOVLload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem)
+(MOVBQSX x:(MOVQload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem)
+(MOVBQZX x:(MOVBload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
+(MOVBQZX x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
+(MOVBQZX x:(MOVLload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
+(MOVBQZX x:(MOVQload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
+(MOVWQSX x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem)
+(MOVWQSX x:(MOVLload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem)
+(MOVWQSX x:(MOVQload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem)
+(MOVWQZX x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVWload <v.Type> [off] {sym} ptr mem)
+(MOVWQZX x:(MOVLload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVWload <v.Type> [off] {sym} ptr mem)
+(MOVWQZX x:(MOVQload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVWload <v.Type> [off] {sym} ptr mem)
+(MOVLQSX x:(MOVLload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVLQSXload <v.Type> [off] {sym} ptr mem)
+(MOVLQSX x:(MOVQload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVLQSXload <v.Type> [off] {sym} ptr mem)
+(MOVLQZX x:(MOVLload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVLload <v.Type> [off] {sym} ptr mem)
+(MOVLQZX x:(MOVQload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVLload <v.Type> [off] {sym} ptr mem)
+
+(MOVLQZX x) && zeroUpper32Bits(x,3) => x
+(MOVWQZX x) && zeroUpper48Bits(x,3) => x
+(MOVBQZX x) && zeroUpper56Bits(x,3) => x
+
+// replace load from same location as preceding store with zero/sign extension (or copy in case of full width)
+(MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVBQZX x)
+(MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVWQZX x)
+(MOVLload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVLQZX x)
+(MOVQload [off] {sym} ptr (MOVQstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => x
+(MOVBQSXload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVBQSX x)
+(MOVWQSXload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVWQSX x)
+(MOVLQSXload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVLQSX x)
+
+// Fold extensions and ANDs together.
+(MOVBQZX (ANDLconst [c] x)) => (ANDLconst [c & 0xff] x)
+(MOVWQZX (ANDLconst [c] x)) => (ANDLconst [c & 0xffff] x)
+(MOVLQZX (ANDLconst [c] x)) => (ANDLconst [c] x)
+(MOVBQSX (ANDLconst [c] x)) && c & 0x80 == 0 => (ANDLconst [c & 0x7f] x)
+(MOVWQSX (ANDLconst [c] x)) && c & 0x8000 == 0 => (ANDLconst [c & 0x7fff] x)
+(MOVLQSX (ANDLconst [c] x)) && uint32(c) & 0x80000000 == 0 => (ANDLconst [c & 0x7fffffff] x)
+
+// Don't extend before storing
+(MOVLstore [off] {sym} ptr (MOVLQSX x) mem) => (MOVLstore [off] {sym} ptr x mem)
+(MOVWstore [off] {sym} ptr (MOVWQSX x) mem) => (MOVWstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVBQSX x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVLstore [off] {sym} ptr (MOVLQZX x) mem) => (MOVLstore [off] {sym} ptr x mem)
+(MOVWstore [off] {sym} ptr (MOVWQZX x) mem) => (MOVWstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVBQZX x) mem) => (MOVBstore [off] {sym} ptr x mem)
+
+// fold constants into memory operations
+// Note that this is not always a good idea because if not all the uses of
+// the ADDQconst get eliminated, we still have to compute the ADDQconst and we now
+// have potentially two live values (ptr and (ADDQconst [off] ptr)) instead of one.
+// Nevertheless, let's do it!
+(MOV(Q|L|W|B|SS|SD|O)load [off1] {sym} (ADDQconst [off2] ptr) mem) && is32Bit(int64(off1)+int64(off2)) =>
+ (MOV(Q|L|W|B|SS|SD|O)load [off1+off2] {sym} ptr mem)
+(MOV(Q|L|W|B|SS|SD|O)store [off1] {sym} (ADDQconst [off2] ptr) val mem) && is32Bit(int64(off1)+int64(off2)) =>
+ (MOV(Q|L|W|B|SS|SD|O)store [off1+off2] {sym} ptr val mem)
+(SET(L|G|B|A|LE|GE|BE|AE|EQ|NE)store [off1] {sym} (ADDQconst [off2] base) val mem) && is32Bit(int64(off1)+int64(off2)) =>
+ (SET(L|G|B|A|LE|GE|BE|AE|EQ|NE)store [off1+off2] {sym} base val mem)
+((ADD|SUB|AND|OR|XOR)Qload [off1] {sym} val (ADDQconst [off2] base) mem) && is32Bit(int64(off1)+int64(off2)) =>
+ ((ADD|SUB|AND|OR|XOR)Qload [off1+off2] {sym} val base mem)
+((ADD|SUB|AND|OR|XOR)Lload [off1] {sym} val (ADDQconst [off2] base) mem) && is32Bit(int64(off1)+int64(off2)) =>
+ ((ADD|SUB|AND|OR|XOR)Lload [off1+off2] {sym} val base mem)
+(CMP(Q|L|W|B)load [off1] {sym} (ADDQconst [off2] base) val mem) && is32Bit(int64(off1)+int64(off2)) =>
+ (CMP(Q|L|W|B)load [off1+off2] {sym} base val mem)
+(CMP(Q|L|W|B)constload [valoff1] {sym} (ADDQconst [off2] base) mem) && ValAndOff(valoff1).canAdd32(off2) =>
+ (CMP(Q|L|W|B)constload [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
+
+((ADD|SUB|MUL|DIV)SSload [off1] {sym} val (ADDQconst [off2] base) mem) && is32Bit(int64(off1)+int64(off2)) =>
+ ((ADD|SUB|MUL|DIV)SSload [off1+off2] {sym} val base mem)
+((ADD|SUB|MUL|DIV)SDload [off1] {sym} val (ADDQconst [off2] base) mem) && is32Bit(int64(off1)+int64(off2)) =>
+ ((ADD|SUB|MUL|DIV)SDload [off1+off2] {sym} val base mem)
+((ADD|AND|OR|XOR)Qconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) && ValAndOff(valoff1).canAdd32(off2) =>
+ ((ADD|AND|OR|XOR)Qconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
+((ADD|AND|OR|XOR)Lconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem) && ValAndOff(valoff1).canAdd32(off2) =>
+ ((ADD|AND|OR|XOR)Lconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
+((ADD|SUB|AND|OR|XOR)Qmodify [off1] {sym} (ADDQconst [off2] base) val mem) && is32Bit(int64(off1)+int64(off2)) =>
+ ((ADD|SUB|AND|OR|XOR)Qmodify [off1+off2] {sym} base val mem)
+((ADD|SUB|AND|OR|XOR)Lmodify [off1] {sym} (ADDQconst [off2] base) val mem) && is32Bit(int64(off1)+int64(off2)) =>
+ ((ADD|SUB|AND|OR|XOR)Lmodify [off1+off2] {sym} base val mem)
+
+// Fold constants into stores.
+(MOVQstore [off] {sym} ptr (MOVQconst [c]) mem) && validVal(c) =>
+ (MOVQstoreconst [makeValAndOff(int32(c),off)] {sym} ptr mem)
+(MOVLstore [off] {sym} ptr (MOV(L|Q)const [c]) mem) =>
+ (MOVLstoreconst [makeValAndOff(int32(c),off)] {sym} ptr mem)
+(MOVWstore [off] {sym} ptr (MOV(L|Q)const [c]) mem) =>
+ (MOVWstoreconst [makeValAndOff(int32(int16(c)),off)] {sym} ptr mem)
+(MOVBstore [off] {sym} ptr (MOV(L|Q)const [c]) mem) =>
+ (MOVBstoreconst [makeValAndOff(int32(int8(c)),off)] {sym} ptr mem)
+
+// Fold address offsets into constant stores.
+(MOV(Q|L|W|B|O)storeconst [sc] {s} (ADDQconst [off] ptr) mem) && ValAndOff(sc).canAdd32(off) =>
+ (MOV(Q|L|W|B|O)storeconst [ValAndOff(sc).addOffset32(off)] {s} ptr mem)
+
+// We need to fold LEAQ into the MOVx ops so that the live variable analysis knows
+// what variables are being read/written by the ops.
+(MOV(Q|L|W|B|SS|SD|O|BQSX|WQSX|LQSX)load [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (MOV(Q|L|W|B|SS|SD|O|BQSX|WQSX|LQSX)load [off1+off2] {mergeSym(sym1,sym2)} base mem)
+(MOV(Q|L|W|B|SS|SD|O)store [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (MOV(Q|L|W|B|SS|SD|O)store [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+(MOV(Q|L|W|B|O)storeconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem) && canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off) =>
+ (MOV(Q|L|W|B|O)storeconst [ValAndOff(sc).addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
+(SET(L|G|B|A|LE|GE|BE|AE|EQ|NE)store [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (SET(L|G|B|A|LE|GE|BE|AE|EQ|NE)store [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+((ADD|SUB|AND|OR|XOR)Qload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ ((ADD|SUB|AND|OR|XOR)Qload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+((ADD|SUB|AND|OR|XOR)Lload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ ((ADD|SUB|AND|OR|XOR)Lload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+(CMP(Q|L|W|B)load [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (CMP(Q|L|W|B)load [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+(CMP(Q|L|W|B)constload [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ && ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2) =>
+ (CMP(Q|L|W|B)constload [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
+
+((ADD|SUB|MUL|DIV)SSload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ ((ADD|SUB|MUL|DIV)SSload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+((ADD|SUB|MUL|DIV)SDload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ ((ADD|SUB|MUL|DIV)SDload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+((ADD|AND|OR|XOR)Qconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ && ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2) =>
+ ((ADD|AND|OR|XOR)Qconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
+((ADD|AND|OR|XOR)Lconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ && ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2) =>
+ ((ADD|AND|OR|XOR)Lconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
+((ADD|SUB|AND|OR|XOR)Qmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ ((ADD|SUB|AND|OR|XOR)Qmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+((ADD|SUB|AND|OR|XOR)Lmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ ((ADD|SUB|AND|OR|XOR)Lmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+
+// fold LEAQs together
+(LEAQ [off1] {sym1} (LEAQ [off2] {sym2} x)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (LEAQ [off1+off2] {mergeSym(sym1,sym2)} x)
+
+// LEAQ into LEAQ1
+(LEAQ1 [off1] {sym1} (LEAQ [off2] {sym2} x) y) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB =>
+ (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y)
+
+// LEAQ1 into LEAQ
+(LEAQ [off1] {sym1} (LEAQ1 [off2] {sym2} x y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y)
+
+// LEAQ into LEAQ[248]
+(LEAQ2 [off1] {sym1} (LEAQ [off2] {sym2} x) y) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB =>
+ (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y)
+(LEAQ4 [off1] {sym1} (LEAQ [off2] {sym2} x) y) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB =>
+ (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y)
+(LEAQ8 [off1] {sym1} (LEAQ [off2] {sym2} x) y) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB =>
+ (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y)
+
+// LEAQ[248] into LEAQ
+(LEAQ [off1] {sym1} (LEAQ2 [off2] {sym2} x y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y)
+(LEAQ [off1] {sym1} (LEAQ4 [off2] {sym2} x y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y)
+(LEAQ [off1] {sym1} (LEAQ8 [off2] {sym2} x y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y)
+
+// LEAQ[1248] into LEAQ[1248]. Only some such merges are possible.
+(LEAQ1 [off1] {sym1} x (LEAQ1 [off2] {sym2} y y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (LEAQ2 [off1+off2] {mergeSym(sym1, sym2)} x y)
+(LEAQ1 [off1] {sym1} x (LEAQ1 [off2] {sym2} x y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (LEAQ2 [off1+off2] {mergeSym(sym1, sym2)} y x)
+(LEAQ2 [off1] {sym1} x (LEAQ1 [off2] {sym2} y y)) && is32Bit(int64(off1)+2*int64(off2)) && sym2 == nil =>
+ (LEAQ4 [off1+2*off2] {sym1} x y)
+(LEAQ4 [off1] {sym1} x (LEAQ1 [off2] {sym2} y y)) && is32Bit(int64(off1)+4*int64(off2)) && sym2 == nil =>
+ (LEAQ8 [off1+4*off2] {sym1} x y)
+// TODO: more?
+
+// Lower LEAQ2/4/8 when the offset is a constant
+(LEAQ2 [off] {sym} x (MOV(Q|L)const [scale])) && is32Bit(int64(off)+int64(scale)*2) =>
+ (LEAQ [off+int32(scale)*2] {sym} x)
+(LEAQ4 [off] {sym} x (MOV(Q|L)const [scale])) && is32Bit(int64(off)+int64(scale)*4) =>
+ (LEAQ [off+int32(scale)*4] {sym} x)
+(LEAQ8 [off] {sym} x (MOV(Q|L)const [scale])) && is32Bit(int64(off)+int64(scale)*8) =>
+ (LEAQ [off+int32(scale)*8] {sym} x)
+
+// Absorb InvertFlags into branches.
+(LT (InvertFlags cmp) yes no) => (GT cmp yes no)
+(GT (InvertFlags cmp) yes no) => (LT cmp yes no)
+(LE (InvertFlags cmp) yes no) => (GE cmp yes no)
+(GE (InvertFlags cmp) yes no) => (LE cmp yes no)
+(ULT (InvertFlags cmp) yes no) => (UGT cmp yes no)
+(UGT (InvertFlags cmp) yes no) => (ULT cmp yes no)
+(ULE (InvertFlags cmp) yes no) => (UGE cmp yes no)
+(UGE (InvertFlags cmp) yes no) => (ULE cmp yes no)
+(EQ (InvertFlags cmp) yes no) => (EQ cmp yes no)
+(NE (InvertFlags cmp) yes no) => (NE cmp yes no)
+
+// Constant comparisons.
+(CMPQconst (MOVQconst [x]) [y]) && x==int64(y) => (FlagEQ)
+(CMPQconst (MOVQconst [x]) [y]) && x<int64(y) && uint64(x)<uint64(int64(y)) => (FlagLT_ULT)
+(CMPQconst (MOVQconst [x]) [y]) && x<int64(y) && uint64(x)>uint64(int64(y)) => (FlagLT_UGT)
+(CMPQconst (MOVQconst [x]) [y]) && x>int64(y) && uint64(x)<uint64(int64(y)) => (FlagGT_ULT)
+(CMPQconst (MOVQconst [x]) [y]) && x>int64(y) && uint64(x)>uint64(int64(y)) => (FlagGT_UGT)
+(CMPLconst (MOVLconst [x]) [y]) && x==y => (FlagEQ)
+(CMPLconst (MOVLconst [x]) [y]) && x<y && uint32(x)<uint32(y) => (FlagLT_ULT)
+(CMPLconst (MOVLconst [x]) [y]) && x<y && uint32(x)>uint32(y) => (FlagLT_UGT)
+(CMPLconst (MOVLconst [x]) [y]) && x>y && uint32(x)<uint32(y) => (FlagGT_ULT)
+(CMPLconst (MOVLconst [x]) [y]) && x>y && uint32(x)>uint32(y) => (FlagGT_UGT)
+(CMPWconst (MOVLconst [x]) [y]) && int16(x)==y => (FlagEQ)
+(CMPWconst (MOVLconst [x]) [y]) && int16(x)<y && uint16(x)<uint16(y) => (FlagLT_ULT)
+(CMPWconst (MOVLconst [x]) [y]) && int16(x)<y && uint16(x)>uint16(y) => (FlagLT_UGT)
+(CMPWconst (MOVLconst [x]) [y]) && int16(x)>y && uint16(x)<uint16(y) => (FlagGT_ULT)
+(CMPWconst (MOVLconst [x]) [y]) && int16(x)>y && uint16(x)>uint16(y) => (FlagGT_UGT)
+(CMPBconst (MOVLconst [x]) [y]) && int8(x)==y => (FlagEQ)
+(CMPBconst (MOVLconst [x]) [y]) && int8(x)<y && uint8(x)<uint8(y) => (FlagLT_ULT)
+(CMPBconst (MOVLconst [x]) [y]) && int8(x)<y && uint8(x)>uint8(y) => (FlagLT_UGT)
+(CMPBconst (MOVLconst [x]) [y]) && int8(x)>y && uint8(x)<uint8(y) => (FlagGT_ULT)
+(CMPBconst (MOVLconst [x]) [y]) && int8(x)>y && uint8(x)>uint8(y) => (FlagGT_UGT)
+
+// CMPQconst requires a 32 bit const, but we can still constant-fold 64 bit consts.
+// In theory this applies to any of the simplifications above,
+// but CMPQ is the only one I've actually seen occur.
+(CMPQ (MOVQconst [x]) (MOVQconst [y])) && x==y => (FlagEQ)
+(CMPQ (MOVQconst [x]) (MOVQconst [y])) && x<y && uint64(x)<uint64(y) => (FlagLT_ULT)
+(CMPQ (MOVQconst [x]) (MOVQconst [y])) && x<y && uint64(x)>uint64(y) => (FlagLT_UGT)
+(CMPQ (MOVQconst [x]) (MOVQconst [y])) && x>y && uint64(x)<uint64(y) => (FlagGT_ULT)
+(CMPQ (MOVQconst [x]) (MOVQconst [y])) && x>y && uint64(x)>uint64(y) => (FlagGT_UGT)
+
+// Other known comparisons.
+(CMPQconst (MOVBQZX _) [c]) && 0xFF < c => (FlagLT_ULT)
+(CMPQconst (MOVWQZX _) [c]) && 0xFFFF < c => (FlagLT_ULT)
+(CMPLconst (SHRLconst _ [c]) [n]) && 0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n) => (FlagLT_ULT)
+(CMPQconst (SHRQconst _ [c]) [n]) && 0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n) => (FlagLT_ULT)
+(CMPQconst (ANDQconst _ [m]) [n]) && 0 <= m && m < n => (FlagLT_ULT)
+(CMPQconst (ANDLconst _ [m]) [n]) && 0 <= m && m < n => (FlagLT_ULT)
+(CMPLconst (ANDLconst _ [m]) [n]) && 0 <= m && m < n => (FlagLT_ULT)
+(CMPWconst (ANDLconst _ [m]) [n]) && 0 <= int16(m) && int16(m) < n => (FlagLT_ULT)
+(CMPBconst (ANDLconst _ [m]) [n]) && 0 <= int8(m) && int8(m) < n => (FlagLT_ULT)
+
+// TESTQ c c sets flags like CMPQ c 0.
+(TESTQconst [c] (MOVQconst [d])) && int64(c) == d && c == 0 => (FlagEQ)
+(TESTLconst [c] (MOVLconst [c])) && c == 0 => (FlagEQ)
+(TESTQconst [c] (MOVQconst [d])) && int64(c) == d && c < 0 => (FlagLT_UGT)
+(TESTLconst [c] (MOVLconst [c])) && c < 0 => (FlagLT_UGT)
+(TESTQconst [c] (MOVQconst [d])) && int64(c) == d && c > 0 => (FlagGT_UGT)
+(TESTLconst [c] (MOVLconst [c])) && c > 0 => (FlagGT_UGT)
+
+// TODO: DIVxU also.
+
+// Absorb flag constants into SBB ops.
+(SBBQcarrymask (FlagEQ)) => (MOVQconst [0])
+(SBBQcarrymask (FlagLT_ULT)) => (MOVQconst [-1])
+(SBBQcarrymask (FlagLT_UGT)) => (MOVQconst [0])
+(SBBQcarrymask (FlagGT_ULT)) => (MOVQconst [-1])
+(SBBQcarrymask (FlagGT_UGT)) => (MOVQconst [0])
+(SBBLcarrymask (FlagEQ)) => (MOVLconst [0])
+(SBBLcarrymask (FlagLT_ULT)) => (MOVLconst [-1])
+(SBBLcarrymask (FlagLT_UGT)) => (MOVLconst [0])
+(SBBLcarrymask (FlagGT_ULT)) => (MOVLconst [-1])
+(SBBLcarrymask (FlagGT_UGT)) => (MOVLconst [0])
+
+// Absorb flag constants into branches.
+((EQ|LE|GE|ULE|UGE) (FlagEQ) yes no) => (First yes no)
+((NE|LT|GT|ULT|UGT) (FlagEQ) yes no) => (First no yes)
+((NE|LT|LE|ULT|ULE) (FlagLT_ULT) yes no) => (First yes no)
+((EQ|GT|GE|UGT|UGE) (FlagLT_ULT) yes no) => (First no yes)
+((NE|LT|LE|UGT|UGE) (FlagLT_UGT) yes no) => (First yes no)
+((EQ|GT|GE|ULT|ULE) (FlagLT_UGT) yes no) => (First no yes)
+((NE|GT|GE|ULT|ULE) (FlagGT_ULT) yes no) => (First yes no)
+((EQ|LT|LE|UGT|UGE) (FlagGT_ULT) yes no) => (First no yes)
+((NE|GT|GE|UGT|UGE) (FlagGT_UGT) yes no) => (First yes no)
+((EQ|LT|LE|ULT|ULE) (FlagGT_UGT) yes no) => (First no yes)
+
+// Absorb flag constants into SETxx ops.
+((SETEQ|SETLE|SETGE|SETBE|SETAE) (FlagEQ)) => (MOVLconst [1])
+((SETNE|SETL|SETG|SETB|SETA) (FlagEQ)) => (MOVLconst [0])
+((SETNE|SETL|SETLE|SETB|SETBE) (FlagLT_ULT)) => (MOVLconst [1])
+((SETEQ|SETG|SETGE|SETA|SETAE) (FlagLT_ULT)) => (MOVLconst [0])
+((SETNE|SETL|SETLE|SETA|SETAE) (FlagLT_UGT)) => (MOVLconst [1])
+((SETEQ|SETG|SETGE|SETB|SETBE) (FlagLT_UGT)) => (MOVLconst [0])
+((SETNE|SETG|SETGE|SETB|SETBE) (FlagGT_ULT)) => (MOVLconst [1])
+((SETEQ|SETL|SETLE|SETA|SETAE) (FlagGT_ULT)) => (MOVLconst [0])
+((SETNE|SETG|SETGE|SETA|SETAE) (FlagGT_UGT)) => (MOVLconst [1])
+((SETEQ|SETL|SETLE|SETB|SETBE) (FlagGT_UGT)) => (MOVLconst [0])
+
+(SETEQstore [off] {sym} ptr (FlagEQ) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+(SETEQstore [off] {sym} ptr (FlagLT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+(SETEQstore [off] {sym} ptr (FlagLT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+(SETEQstore [off] {sym} ptr (FlagGT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+(SETEQstore [off] {sym} ptr (FlagGT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+
+(SETNEstore [off] {sym} ptr (FlagEQ) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+(SETNEstore [off] {sym} ptr (FlagLT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+(SETNEstore [off] {sym} ptr (FlagLT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+(SETNEstore [off] {sym} ptr (FlagGT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+(SETNEstore [off] {sym} ptr (FlagGT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+
+(SETLstore [off] {sym} ptr (FlagEQ) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+(SETLstore [off] {sym} ptr (FlagLT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+(SETLstore [off] {sym} ptr (FlagLT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+(SETLstore [off] {sym} ptr (FlagGT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+(SETLstore [off] {sym} ptr (FlagGT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+
+(SETLEstore [off] {sym} ptr (FlagEQ) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+(SETLEstore [off] {sym} ptr (FlagLT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+(SETLEstore [off] {sym} ptr (FlagLT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+(SETLEstore [off] {sym} ptr (FlagGT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+(SETLEstore [off] {sym} ptr (FlagGT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+
+(SETGstore [off] {sym} ptr (FlagEQ) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+(SETGstore [off] {sym} ptr (FlagLT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+(SETGstore [off] {sym} ptr (FlagLT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+(SETGstore [off] {sym} ptr (FlagGT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+(SETGstore [off] {sym} ptr (FlagGT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+
+(SETGEstore [off] {sym} ptr (FlagEQ) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+(SETGEstore [off] {sym} ptr (FlagLT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+(SETGEstore [off] {sym} ptr (FlagLT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+(SETGEstore [off] {sym} ptr (FlagGT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+(SETGEstore [off] {sym} ptr (FlagGT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+
+(SETBstore [off] {sym} ptr (FlagEQ) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+(SETBstore [off] {sym} ptr (FlagLT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+(SETBstore [off] {sym} ptr (FlagLT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+(SETBstore [off] {sym} ptr (FlagGT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+(SETBstore [off] {sym} ptr (FlagGT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+
+(SETBEstore [off] {sym} ptr (FlagEQ) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+(SETBEstore [off] {sym} ptr (FlagLT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+(SETBEstore [off] {sym} ptr (FlagLT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+(SETBEstore [off] {sym} ptr (FlagGT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+(SETBEstore [off] {sym} ptr (FlagGT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+
+(SETAstore [off] {sym} ptr (FlagEQ) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+(SETAstore [off] {sym} ptr (FlagLT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+(SETAstore [off] {sym} ptr (FlagLT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+(SETAstore [off] {sym} ptr (FlagGT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+(SETAstore [off] {sym} ptr (FlagGT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+
+(SETAEstore [off] {sym} ptr (FlagEQ) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+(SETAEstore [off] {sym} ptr (FlagLT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+(SETAEstore [off] {sym} ptr (FlagLT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+(SETAEstore [off] {sym} ptr (FlagGT_ULT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+(SETAEstore [off] {sym} ptr (FlagGT_UGT) mem) => (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+
+// Remove redundant *const ops
+(ADDQconst [0] x) => x
+(ADDLconst [c] x) && c==0 => x
+(SUBQconst [0] x) => x
+(SUBLconst [c] x) && c==0 => x
+(ANDQconst [0] _) => (MOVQconst [0])
+(ANDLconst [c] _) && c==0 => (MOVLconst [0])
+(ANDQconst [-1] x) => x
+(ANDLconst [c] x) && c==-1 => x
+(ORQconst [0] x) => x
+(ORLconst [c] x) && c==0 => x
+(ORQconst [-1] _) => (MOVQconst [-1])
+(ORLconst [c] _) && c==-1 => (MOVLconst [-1])
+(XORQconst [0] x) => x
+(XORLconst [c] x) && c==0 => x
+// TODO: since we got rid of the W/B versions, we might miss
+// things like (ANDLconst [0x100] x) which were formerly
+// (ANDBconst [0] x). Probably doesn't happen very often.
+// If we cared, we might do:
+// (ANDLconst <t> [c] x) && t.Size()==1 && int8(x)==0 -> (MOVLconst [0])
+
+// Remove redundant ops
+// Not in generic rules, because they may appear after lowering e. g. Slicemask
+(NEG(Q|L) (NEG(Q|L) x)) => x
+(NEG(Q|L) s:(SUB(Q|L) x y)) && s.Uses == 1 => (SUB(Q|L) y x)
+
+// Convert constant subtracts to constant adds
+(SUBQconst [c] x) && c != -(1<<31) => (ADDQconst [-c] x)
+(SUBLconst [c] x) => (ADDLconst [-c] x)
+
+// generic constant folding
+// TODO: more of this
+(ADDQconst [c] (MOVQconst [d])) => (MOVQconst [int64(c)+d])
+(ADDLconst [c] (MOVLconst [d])) => (MOVLconst [c+d])
+(ADDQconst [c] (ADDQconst [d] x)) && is32Bit(int64(c)+int64(d)) => (ADDQconst [c+d] x)
+(ADDLconst [c] (ADDLconst [d] x)) => (ADDLconst [c+d] x)
+(SUBQconst (MOVQconst [d]) [c]) => (MOVQconst [d-int64(c)])
+(SUBQconst (SUBQconst x [d]) [c]) && is32Bit(int64(-c)-int64(d)) => (ADDQconst [-c-d] x)
+(SARQconst [c] (MOVQconst [d])) => (MOVQconst [d>>uint64(c)])
+(SARLconst [c] (MOVQconst [d])) => (MOVQconst [int64(int32(d))>>uint64(c)])
+(SARWconst [c] (MOVQconst [d])) => (MOVQconst [int64(int16(d))>>uint64(c)])
+(SARBconst [c] (MOVQconst [d])) => (MOVQconst [int64(int8(d))>>uint64(c)])
+(NEGQ (MOVQconst [c])) => (MOVQconst [-c])
+(NEGL (MOVLconst [c])) => (MOVLconst [-c])
+(MULQconst [c] (MOVQconst [d])) => (MOVQconst [int64(c)*d])
+(MULLconst [c] (MOVLconst [d])) => (MOVLconst [c*d])
+(ANDQconst [c] (MOVQconst [d])) => (MOVQconst [int64(c)&d])
+(ANDLconst [c] (MOVLconst [d])) => (MOVLconst [c&d])
+(ORQconst [c] (MOVQconst [d])) => (MOVQconst [int64(c)|d])
+(ORLconst [c] (MOVLconst [d])) => (MOVLconst [c|d])
+(XORQconst [c] (MOVQconst [d])) => (MOVQconst [int64(c)^d])
+(XORLconst [c] (MOVLconst [d])) => (MOVLconst [c^d])
+(NOTQ (MOVQconst [c])) => (MOVQconst [^c])
+(NOTL (MOVLconst [c])) => (MOVLconst [^c])
+(BTSQconst [c] (MOVQconst [d])) => (MOVQconst [d|(1<<uint32(c))])
+(BTSLconst [c] (MOVLconst [d])) => (MOVLconst [d|(1<<uint32(c))])
+(BTRQconst [c] (MOVQconst [d])) => (MOVQconst [d&^(1<<uint32(c))])
+(BTRLconst [c] (MOVLconst [d])) => (MOVLconst [d&^(1<<uint32(c))])
+(BTCQconst [c] (MOVQconst [d])) => (MOVQconst [d^(1<<uint32(c))])
+(BTCLconst [c] (MOVLconst [d])) => (MOVLconst [d^(1<<uint32(c))])
+
+// If c or d doesn't fit into 32 bits, then we can't construct ORQconst,
+// but we can still constant-fold.
+// In theory this applies to any of the simplifications above,
+// but ORQ is the only one I've actually seen occur.
+(ORQ (MOVQconst [c]) (MOVQconst [d])) => (MOVQconst [c|d])
+
+// generic simplifications
+// TODO: more of this
+(ADDQ x (NEGQ y)) => (SUBQ x y)
+(ADDL x (NEGL y)) => (SUBL x y)
+(SUBQ x x) => (MOVQconst [0])
+(SUBL x x) => (MOVLconst [0])
+(ANDQ x x) => x
+(ANDL x x) => x
+(ORQ x x) => x
+(ORL x x) => x
+(XORQ x x) => (MOVQconst [0])
+(XORL x x) => (MOVLconst [0])
+
+(SHLLconst [d] (MOVLconst [c])) => (MOVLconst [c << uint64(d)])
+(SHLQconst [d] (MOVQconst [c])) => (MOVQconst [c << uint64(d)])
+(SHLQconst [d] (MOVLconst [c])) => (MOVQconst [int64(c) << uint64(d)])
+
+// Fold NEG into ADDconst/MULconst. Take care to keep c in 32 bit range.
+(NEGQ (ADDQconst [c] (NEGQ x))) && c != -(1<<31) => (ADDQconst [-c] x)
+(MULQconst [c] (NEGQ x)) && c != -(1<<31) => (MULQconst [-c] x)
+
+// checking AND against 0.
+(CMPQconst a:(ANDQ x y) [0]) && a.Uses == 1 => (TESTQ x y)
+(CMPLconst a:(ANDL x y) [0]) && a.Uses == 1 => (TESTL x y)
+(CMPWconst a:(ANDL x y) [0]) && a.Uses == 1 => (TESTW x y)
+(CMPBconst a:(ANDL x y) [0]) && a.Uses == 1 => (TESTB x y)
+(CMPQconst a:(ANDQconst [c] x) [0]) && a.Uses == 1 => (TESTQconst [c] x)
+(CMPLconst a:(ANDLconst [c] x) [0]) && a.Uses == 1 => (TESTLconst [c] x)
+(CMPWconst a:(ANDLconst [c] x) [0]) && a.Uses == 1 => (TESTWconst [int16(c)] x)
+(CMPBconst a:(ANDLconst [c] x) [0]) && a.Uses == 1 => (TESTBconst [int8(c)] x)
+
+// Convert TESTx to TESTxconst if possible.
+(TESTQ (MOVQconst [c]) x) && is32Bit(c) => (TESTQconst [int32(c)] x)
+(TESTL (MOVLconst [c]) x) => (TESTLconst [c] x)
+(TESTW (MOVLconst [c]) x) => (TESTWconst [int16(c)] x)
+(TESTB (MOVLconst [c]) x) => (TESTBconst [int8(c)] x)
+
+// TEST %reg,%reg is shorter than CMP
+(CMPQconst x [0]) => (TESTQ x x)
+(CMPLconst x [0]) => (TESTL x x)
+(CMPWconst x [0]) => (TESTW x x)
+(CMPBconst x [0]) => (TESTB x x)
+(TESTQconst [-1] x) && x.Op != OpAMD64MOVQconst => (TESTQ x x)
+(TESTLconst [-1] x) && x.Op != OpAMD64MOVLconst => (TESTL x x)
+(TESTWconst [-1] x) && x.Op != OpAMD64MOVLconst => (TESTW x x)
+(TESTBconst [-1] x) && x.Op != OpAMD64MOVLconst => (TESTB x x)
+
+// Convert LEAQ1 back to ADDQ if we can
+(LEAQ1 [0] x y) && v.Aux == nil => (ADDQ x y)
+
+// Combining byte loads into larger (unaligned) loads.
+// There are many ways these combinations could occur. This is
+// designed to match the way encoding/binary.LittleEndian does it.
+
+// Little-endian loads
+
+(OR(L|Q) x0:(MOVBload [i0] {s} p mem)
+ sh:(SHL(L|Q)const [8] x1:(MOVBload [i1] {s} p mem)))
+ && i1 == i0+1
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && sh.Uses == 1
+ && mergePoint(b,x0,x1) != nil
+ && clobber(x0, x1, sh)
+ => @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem)
+
+(OR(L|Q) x0:(MOVBload [i] {s} p0 mem)
+ sh:(SHL(L|Q)const [8] x1:(MOVBload [i] {s} p1 mem)))
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && sh.Uses == 1
+ && sequentialAddresses(p0, p1, 1)
+ && mergePoint(b,x0,x1) != nil
+ && clobber(x0, x1, sh)
+ => @mergePoint(b,x0,x1) (MOVWload [i] {s} p0 mem)
+
+(OR(L|Q) x0:(MOVWload [i0] {s} p mem)
+ sh:(SHL(L|Q)const [16] x1:(MOVWload [i1] {s} p mem)))
+ && i1 == i0+2
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && sh.Uses == 1
+ && mergePoint(b,x0,x1) != nil
+ && clobber(x0, x1, sh)
+ => @mergePoint(b,x0,x1) (MOVLload [i0] {s} p mem)
+
+(OR(L|Q) x0:(MOVWload [i] {s} p0 mem)
+ sh:(SHL(L|Q)const [16] x1:(MOVWload [i] {s} p1 mem)))
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && sh.Uses == 1
+ && sequentialAddresses(p0, p1, 2)
+ && mergePoint(b,x0,x1) != nil
+ && clobber(x0, x1, sh)
+ => @mergePoint(b,x0,x1) (MOVLload [i] {s} p0 mem)
+
+(ORQ x0:(MOVLload [i0] {s} p mem)
+ sh:(SHLQconst [32] x1:(MOVLload [i1] {s} p mem)))
+ && i1 == i0+4
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && sh.Uses == 1
+ && mergePoint(b,x0,x1) != nil
+ && clobber(x0, x1, sh)
+ => @mergePoint(b,x0,x1) (MOVQload [i0] {s} p mem)
+
+(ORQ x0:(MOVLload [i] {s} p0 mem)
+ sh:(SHLQconst [32] x1:(MOVLload [i] {s} p1 mem)))
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && sh.Uses == 1
+ && sequentialAddresses(p0, p1, 4)
+ && mergePoint(b,x0,x1) != nil
+ && clobber(x0, x1, sh)
+ => @mergePoint(b,x0,x1) (MOVQload [i] {s} p0 mem)
+
+(OR(L|Q)
+ s1:(SHL(L|Q)const [j1] x1:(MOVBload [i1] {s} p mem))
+ or:(OR(L|Q)
+ s0:(SHL(L|Q)const [j0] x0:(MOVBload [i0] {s} p mem))
+ y))
+ && i1 == i0+1
+ && j1 == j0+8
+ && j0 % 16 == 0
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && s0.Uses == 1
+ && s1.Uses == 1
+ && or.Uses == 1
+ && mergePoint(b,x0,x1,y) != nil
+ && clobber(x0, x1, s0, s1, or)
+ => @mergePoint(b,x0,x1,y) (OR(L|Q) <v.Type> (SHL(L|Q)const <v.Type> [j0] (MOVWload [i0] {s} p mem)) y)
+
+(OR(L|Q)
+ s1:(SHL(L|Q)const [j1] x1:(MOVBload [i] {s} p1 mem))
+ or:(OR(L|Q)
+ s0:(SHL(L|Q)const [j0] x0:(MOVBload [i] {s} p0 mem))
+ y))
+ && j1 == j0+8
+ && j0 % 16 == 0
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && s0.Uses == 1
+ && s1.Uses == 1
+ && or.Uses == 1
+ && sequentialAddresses(p0, p1, 1)
+ && mergePoint(b,x0,x1,y) != nil
+ && clobber(x0, x1, s0, s1, or)
+ => @mergePoint(b,x0,x1,y) (OR(L|Q) <v.Type> (SHL(L|Q)const <v.Type> [j0] (MOVWload [i] {s} p0 mem)) y)
+
+(ORQ
+ s1:(SHLQconst [j1] x1:(MOVWload [i1] {s} p mem))
+ or:(ORQ
+ s0:(SHLQconst [j0] x0:(MOVWload [i0] {s} p mem))
+ y))
+ && i1 == i0+2
+ && j1 == j0+16
+ && j0 % 32 == 0
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && s0.Uses == 1
+ && s1.Uses == 1
+ && or.Uses == 1
+ && mergePoint(b,x0,x1,y) != nil
+ && clobber(x0, x1, s0, s1, or)
+ => @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLload [i0] {s} p mem)) y)
+
+(ORQ
+ s1:(SHLQconst [j1] x1:(MOVWload [i] {s} p1 mem))
+ or:(ORQ
+ s0:(SHLQconst [j0] x0:(MOVWload [i] {s} p0 mem))
+ y))
+ && j1 == j0+16
+ && j0 % 32 == 0
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && s0.Uses == 1
+ && s1.Uses == 1
+ && or.Uses == 1
+ && sequentialAddresses(p0, p1, 2)
+ && mergePoint(b,x0,x1,y) != nil
+ && clobber(x0, x1, s0, s1, or)
+ => @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLload [i] {s} p0 mem)) y)
+
+// Big-endian loads
+
+(OR(L|Q)
+ x1:(MOVBload [i1] {s} p mem)
+ sh:(SHL(L|Q)const [8] x0:(MOVBload [i0] {s} p mem)))
+ && i1 == i0+1
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && sh.Uses == 1
+ && mergePoint(b,x0,x1) != nil
+ && clobber(x0, x1, sh)
+ => @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWload [i0] {s} p mem))
+
+(OR(L|Q)
+ x1:(MOVBload [i] {s} p1 mem)
+ sh:(SHL(L|Q)const [8] x0:(MOVBload [i] {s} p0 mem)))
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && sh.Uses == 1
+ && sequentialAddresses(p0, p1, 1)
+ && mergePoint(b,x0,x1) != nil
+ && clobber(x0, x1, sh)
+ => @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWload [i] {s} p0 mem))
+
+(OR(L|Q)
+ r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem))
+ sh:(SHL(L|Q)const [16] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem))))
+ && i1 == i0+2
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && r0.Uses == 1
+ && r1.Uses == 1
+ && sh.Uses == 1
+ && mergePoint(b,x0,x1) != nil
+ && clobber(x0, x1, r0, r1, sh)
+ => @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLload [i0] {s} p mem))
+
+(OR(L|Q)
+ r1:(ROLWconst [8] x1:(MOVWload [i] {s} p1 mem))
+ sh:(SHL(L|Q)const [16] r0:(ROLWconst [8] x0:(MOVWload [i] {s} p0 mem))))
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && r0.Uses == 1
+ && r1.Uses == 1
+ && sh.Uses == 1
+ && sequentialAddresses(p0, p1, 2)
+ && mergePoint(b,x0,x1) != nil
+ && clobber(x0, x1, r0, r1, sh)
+ => @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLload [i] {s} p0 mem))
+
+(ORQ
+ r1:(BSWAPL x1:(MOVLload [i1] {s} p mem))
+ sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLload [i0] {s} p mem))))
+ && i1 == i0+4
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && r0.Uses == 1
+ && r1.Uses == 1
+ && sh.Uses == 1
+ && mergePoint(b,x0,x1) != nil
+ && clobber(x0, x1, r0, r1, sh)
+ => @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQload [i0] {s} p mem))
+
+(ORQ
+ r1:(BSWAPL x1:(MOVLload [i] {s} p1 mem))
+ sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLload [i] {s} p0 mem))))
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && r0.Uses == 1
+ && r1.Uses == 1
+ && sh.Uses == 1
+ && sequentialAddresses(p0, p1, 4)
+ && mergePoint(b,x0,x1) != nil
+ && clobber(x0, x1, r0, r1, sh)
+ => @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQload [i] {s} p0 mem))
+
+(OR(L|Q)
+ s0:(SHL(L|Q)const [j0] x0:(MOVBload [i0] {s} p mem))
+ or:(OR(L|Q)
+ s1:(SHL(L|Q)const [j1] x1:(MOVBload [i1] {s} p mem))
+ y))
+ && i1 == i0+1
+ && j1 == j0-8
+ && j1 % 16 == 0
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && s0.Uses == 1
+ && s1.Uses == 1
+ && or.Uses == 1
+ && mergePoint(b,x0,x1,y) != nil
+ && clobber(x0, x1, s0, s1, or)
+ => @mergePoint(b,x0,x1,y) (OR(L|Q) <v.Type> (SHL(L|Q)const <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y)
+
+(OR(L|Q)
+ s0:(SHL(L|Q)const [j0] x0:(MOVBload [i] {s} p0 mem))
+ or:(OR(L|Q)
+ s1:(SHL(L|Q)const [j1] x1:(MOVBload [i] {s} p1 mem))
+ y))
+ && j1 == j0-8
+ && j1 % 16 == 0
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && s0.Uses == 1
+ && s1.Uses == 1
+ && or.Uses == 1
+ && sequentialAddresses(p0, p1, 1)
+ && mergePoint(b,x0,x1,y) != nil
+ && clobber(x0, x1, s0, s1, or)
+ => @mergePoint(b,x0,x1,y) (OR(L|Q) <v.Type> (SHL(L|Q)const <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i] {s} p0 mem))) y)
+
+(ORQ
+ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem)))
+ or:(ORQ
+ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem)))
+ y))
+ && i1 == i0+2
+ && j1 == j0-16
+ && j1 % 32 == 0
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && r0.Uses == 1
+ && r1.Uses == 1
+ && s0.Uses == 1
+ && s1.Uses == 1
+ && or.Uses == 1
+ && mergePoint(b,x0,x1,y) != nil
+ && clobber(x0, x1, r0, r1, s0, s1, or)
+ => @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLload [i0] {s} p mem))) y)
+
+(ORQ
+ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWload [i] {s} p0 mem)))
+ or:(ORQ
+ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWload [i] {s} p1 mem)))
+ y))
+ && j1 == j0-16
+ && j1 % 32 == 0
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && r0.Uses == 1
+ && r1.Uses == 1
+ && s0.Uses == 1
+ && s1.Uses == 1
+ && or.Uses == 1
+ && sequentialAddresses(p0, p1, 2)
+ && mergePoint(b,x0,x1,y) != nil
+ && clobber(x0, x1, r0, r1, s0, s1, or)
+ => @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLload [i] {s} p0 mem))) y)
+
+// Combine 2 byte stores + shift into rolw 8 + word store
+(MOVBstore [i] {s} p w
+ x0:(MOVBstore [i-1] {s} p (SHRWconst [8] w) mem))
+ && x0.Uses == 1
+ && clobber(x0)
+ => (MOVWstore [i-1] {s} p (ROLWconst <w.Type> [8] w) mem)
+(MOVBstore [i] {s} p1 w
+ x0:(MOVBstore [i] {s} p0 (SHRWconst [8] w) mem))
+ && x0.Uses == 1
+ && sequentialAddresses(p0, p1, 1)
+ && clobber(x0)
+ => (MOVWstore [i] {s} p0 (ROLWconst <w.Type> [8] w) mem)
+
+// Combine stores + shifts into bswap and larger (unaligned) stores
+(MOVBstore [i] {s} p w
+ x2:(MOVBstore [i-1] {s} p (SHRLconst [8] w)
+ x1:(MOVBstore [i-2] {s} p (SHRLconst [16] w)
+ x0:(MOVBstore [i-3] {s} p (SHRLconst [24] w) mem))))
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && x2.Uses == 1
+ && clobber(x0, x1, x2)
+ => (MOVLstore [i-3] {s} p (BSWAPL <w.Type> w) mem)
+(MOVBstore [i] {s} p3 w
+ x2:(MOVBstore [i] {s} p2 (SHRLconst [8] w)
+ x1:(MOVBstore [i] {s} p1 (SHRLconst [16] w)
+ x0:(MOVBstore [i] {s} p0 (SHRLconst [24] w) mem))))
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && x2.Uses == 1
+ && sequentialAddresses(p0, p1, 1)
+ && sequentialAddresses(p1, p2, 1)
+ && sequentialAddresses(p2, p3, 1)
+ && clobber(x0, x1, x2)
+ => (MOVLstore [i] {s} p0 (BSWAPL <w.Type> w) mem)
+
+(MOVBstore [i] {s} p w
+ x6:(MOVBstore [i-1] {s} p (SHRQconst [8] w)
+ x5:(MOVBstore [i-2] {s} p (SHRQconst [16] w)
+ x4:(MOVBstore [i-3] {s} p (SHRQconst [24] w)
+ x3:(MOVBstore [i-4] {s} p (SHRQconst [32] w)
+ x2:(MOVBstore [i-5] {s} p (SHRQconst [40] w)
+ x1:(MOVBstore [i-6] {s} p (SHRQconst [48] w)
+ x0:(MOVBstore [i-7] {s} p (SHRQconst [56] w) mem))))))))
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && x2.Uses == 1
+ && x3.Uses == 1
+ && x4.Uses == 1
+ && x5.Uses == 1
+ && x6.Uses == 1
+ && clobber(x0, x1, x2, x3, x4, x5, x6)
+ => (MOVQstore [i-7] {s} p (BSWAPQ <w.Type> w) mem)
+(MOVBstore [i] {s} p7 w
+ x6:(MOVBstore [i] {s} p6 (SHRQconst [8] w)
+ x5:(MOVBstore [i] {s} p5 (SHRQconst [16] w)
+ x4:(MOVBstore [i] {s} p4 (SHRQconst [24] w)
+ x3:(MOVBstore [i] {s} p3 (SHRQconst [32] w)
+ x2:(MOVBstore [i] {s} p2 (SHRQconst [40] w)
+ x1:(MOVBstore [i] {s} p1 (SHRQconst [48] w)
+ x0:(MOVBstore [i] {s} p0 (SHRQconst [56] w) mem))))))))
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && x2.Uses == 1
+ && x3.Uses == 1
+ && x4.Uses == 1
+ && x5.Uses == 1
+ && x6.Uses == 1
+ && sequentialAddresses(p0, p1, 1)
+ && sequentialAddresses(p1, p2, 1)
+ && sequentialAddresses(p2, p3, 1)
+ && sequentialAddresses(p3, p4, 1)
+ && sequentialAddresses(p4, p5, 1)
+ && sequentialAddresses(p5, p6, 1)
+ && sequentialAddresses(p6, p7, 1)
+ && clobber(x0, x1, x2, x3, x4, x5, x6)
+ => (MOVQstore [i] {s} p0 (BSWAPQ <w.Type> w) mem)
+
+// Combine constant stores into larger (unaligned) stores.
+(MOVBstoreconst [c] {s} p x:(MOVBstoreconst [a] {s} p mem))
+ && x.Uses == 1
+ && a.Off() + 1 == c.Off()
+ && clobber(x)
+ => (MOVWstoreconst [makeValAndOff(a.Val()&0xff | c.Val()<<8, a.Off())] {s} p mem)
+(MOVBstoreconst [a] {s} p x:(MOVBstoreconst [c] {s} p mem))
+ && x.Uses == 1
+ && a.Off() + 1 == c.Off()
+ && clobber(x)
+ => (MOVWstoreconst [makeValAndOff(a.Val()&0xff | c.Val()<<8, a.Off())] {s} p mem)
+(MOVWstoreconst [c] {s} p x:(MOVWstoreconst [a] {s} p mem))
+ && x.Uses == 1
+ && a.Off() + 2 == c.Off()
+ && clobber(x)
+ => (MOVLstoreconst [makeValAndOff(a.Val()&0xffff | c.Val()<<16, a.Off())] {s} p mem)
+(MOVWstoreconst [a] {s} p x:(MOVWstoreconst [c] {s} p mem))
+ && x.Uses == 1
+ && a.Off() + 2 == c.Off()
+ && clobber(x)
+ => (MOVLstoreconst [makeValAndOff(a.Val()&0xffff | c.Val()<<16, a.Off())] {s} p mem)
+(MOVLstoreconst [c] {s} p x:(MOVLstoreconst [a] {s} p mem))
+ && x.Uses == 1
+ && a.Off() + 4 == c.Off()
+ && clobber(x)
+ => (MOVQstore [a.Off()] {s} p (MOVQconst [a.Val64()&0xffffffff | c.Val64()<<32]) mem)
+(MOVLstoreconst [a] {s} p x:(MOVLstoreconst [c] {s} p mem))
+ && x.Uses == 1
+ && a.Off() + 4 == c.Off()
+ && clobber(x)
+ => (MOVQstore [a.Off()] {s} p (MOVQconst [a.Val64()&0xffffffff | c.Val64()<<32]) mem)
+(MOVQstoreconst [c] {s} p x:(MOVQstoreconst [a] {s} p mem))
+ && config.useSSE
+ && x.Uses == 1
+ && a.Off() + 8 == c.Off()
+ && a.Val() == 0
+ && c.Val() == 0
+ && clobber(x)
+ => (MOVOstoreconst [makeValAndOff(0,a.Off())] {s} p mem)
+(MOVQstoreconst [a] {s} p x:(MOVQstoreconst [c] {s} p mem))
+ && config.useSSE
+ && x.Uses == 1
+ && a.Off() + 8 == c.Off()
+ && a.Val() == 0
+ && c.Val() == 0
+ && clobber(x)
+ => (MOVOstoreconst [makeValAndOff(0,a.Off())] {s} p mem)
+
+// Combine stores into larger (unaligned) stores. Little endian.
+(MOVBstore [i] {s} p (SHR(W|L|Q)const [8] w) x:(MOVBstore [i-1] {s} p w mem))
+ && x.Uses == 1
+ && clobber(x)
+ => (MOVWstore [i-1] {s} p w mem)
+(MOVBstore [i] {s} p w x:(MOVBstore [i+1] {s} p (SHR(W|L|Q)const [8] w) mem))
+ && x.Uses == 1
+ && clobber(x)
+ => (MOVWstore [i] {s} p w mem)
+(MOVBstore [i] {s} p (SHR(L|Q)const [j] w) x:(MOVBstore [i-1] {s} p w0:(SHR(L|Q)const [j-8] w) mem))
+ && x.Uses == 1
+ && clobber(x)
+ => (MOVWstore [i-1] {s} p w0 mem)
+(MOVBstore [i] {s} p1 (SHR(W|L|Q)const [8] w) x:(MOVBstore [i] {s} p0 w mem))
+ && x.Uses == 1
+ && sequentialAddresses(p0, p1, 1)
+ && clobber(x)
+ => (MOVWstore [i] {s} p0 w mem)
+(MOVBstore [i] {s} p0 w x:(MOVBstore [i] {s} p1 (SHR(W|L|Q)const [8] w) mem))
+ && x.Uses == 1
+ && sequentialAddresses(p0, p1, 1)
+ && clobber(x)
+ => (MOVWstore [i] {s} p0 w mem)
+(MOVBstore [i] {s} p1 (SHR(L|Q)const [j] w) x:(MOVBstore [i] {s} p0 w0:(SHR(L|Q)const [j-8] w) mem))
+ && x.Uses == 1
+ && sequentialAddresses(p0, p1, 1)
+ && clobber(x)
+ => (MOVWstore [i] {s} p0 w0 mem)
+
+(MOVWstore [i] {s} p (SHR(L|Q)const [16] w) x:(MOVWstore [i-2] {s} p w mem))
+ && x.Uses == 1
+ && clobber(x)
+ => (MOVLstore [i-2] {s} p w mem)
+(MOVWstore [i] {s} p (SHR(L|Q)const [j] w) x:(MOVWstore [i-2] {s} p w0:(SHR(L|Q)const [j-16] w) mem))
+ && x.Uses == 1
+ && clobber(x)
+ => (MOVLstore [i-2] {s} p w0 mem)
+(MOVWstore [i] {s} p1 (SHR(L|Q)const [16] w) x:(MOVWstore [i] {s} p0 w mem))
+ && x.Uses == 1
+ && sequentialAddresses(p0, p1, 2)
+ && clobber(x)
+ => (MOVLstore [i] {s} p0 w mem)
+(MOVWstore [i] {s} p1 (SHR(L|Q)const [j] w) x:(MOVWstore [i] {s} p0 w0:(SHR(L|Q)const [j-16] w) mem))
+ && x.Uses == 1
+ && sequentialAddresses(p0, p1, 2)
+ && clobber(x)
+ => (MOVLstore [i] {s} p0 w0 mem)
+
+(MOVLstore [i] {s} p (SHRQconst [32] w) x:(MOVLstore [i-4] {s} p w mem))
+ && x.Uses == 1
+ && clobber(x)
+ => (MOVQstore [i-4] {s} p w mem)
+(MOVLstore [i] {s} p (SHRQconst [j] w) x:(MOVLstore [i-4] {s} p w0:(SHRQconst [j-32] w) mem))
+ && x.Uses == 1
+ && clobber(x)
+ => (MOVQstore [i-4] {s} p w0 mem)
+(MOVLstore [i] {s} p1 (SHRQconst [32] w) x:(MOVLstore [i] {s} p0 w mem))
+ && x.Uses == 1
+ && sequentialAddresses(p0, p1, 4)
+ && clobber(x)
+ => (MOVQstore [i] {s} p0 w mem)
+(MOVLstore [i] {s} p1 (SHRQconst [j] w) x:(MOVLstore [i] {s} p0 w0:(SHRQconst [j-32] w) mem))
+ && x.Uses == 1
+ && sequentialAddresses(p0, p1, 4)
+ && clobber(x)
+ => (MOVQstore [i] {s} p0 w0 mem)
+
+(MOVBstore [7] {s} p1 (SHRQconst [56] w)
+ x1:(MOVWstore [5] {s} p1 (SHRQconst [40] w)
+ x2:(MOVLstore [1] {s} p1 (SHRQconst [8] w)
+ x3:(MOVBstore [0] {s} p1 w mem))))
+ && x1.Uses == 1
+ && x2.Uses == 1
+ && x3.Uses == 1
+ && clobber(x1, x2, x3)
+ => (MOVQstore {s} p1 w mem)
+
+(MOVBstore [i] {s} p
+ x1:(MOVBload [j] {s2} p2 mem)
+ mem2:(MOVBstore [i-1] {s} p
+ x2:(MOVBload [j-1] {s2} p2 mem) mem))
+ && x1.Uses == 1
+ && x2.Uses == 1
+ && mem2.Uses == 1
+ && clobber(x1, x2, mem2)
+ => (MOVWstore [i-1] {s} p (MOVWload [j-1] {s2} p2 mem) mem)
+
+(MOVWstore [i] {s} p
+ x1:(MOVWload [j] {s2} p2 mem)
+ mem2:(MOVWstore [i-2] {s} p
+ x2:(MOVWload [j-2] {s2} p2 mem) mem))
+ && x1.Uses == 1
+ && x2.Uses == 1
+ && mem2.Uses == 1
+ && clobber(x1, x2, mem2)
+ => (MOVLstore [i-2] {s} p (MOVLload [j-2] {s2} p2 mem) mem)
+
+(MOVLstore [i] {s} p
+ x1:(MOVLload [j] {s2} p2 mem)
+ mem2:(MOVLstore [i-4] {s} p
+ x2:(MOVLload [j-4] {s2} p2 mem) mem))
+ && x1.Uses == 1
+ && x2.Uses == 1
+ && mem2.Uses == 1
+ && clobber(x1, x2, mem2)
+ => (MOVQstore [i-4] {s} p (MOVQload [j-4] {s2} p2 mem) mem)
+
+// Merge load and op
+// TODO: add indexed variants?
+((ADD|SUB|AND|OR|XOR)Q x l:(MOVQload [off] {sym} ptr mem)) && canMergeLoadClobber(v, l, x) && clobber(l) => ((ADD|SUB|AND|OR|XOR)Qload x [off] {sym} ptr mem)
+((ADD|SUB|AND|OR|XOR)L x l:(MOVLload [off] {sym} ptr mem)) && canMergeLoadClobber(v, l, x) && clobber(l) => ((ADD|SUB|AND|OR|XOR)Lload x [off] {sym} ptr mem)
+((ADD|SUB|MUL|DIV)SD x l:(MOVSDload [off] {sym} ptr mem)) && canMergeLoadClobber(v, l, x) && clobber(l) => ((ADD|SUB|MUL|DIV)SDload x [off] {sym} ptr mem)
+((ADD|SUB|MUL|DIV)SS x l:(MOVSSload [off] {sym} ptr mem)) && canMergeLoadClobber(v, l, x) && clobber(l) => ((ADD|SUB|MUL|DIV)SSload x [off] {sym} ptr mem)
+(MOVLstore {sym} [off] ptr y:((ADD|AND|OR|XOR)Lload x [off] {sym} ptr mem) mem) && y.Uses==1 && clobber(y) => ((ADD|AND|OR|XOR)Lmodify [off] {sym} ptr x mem)
+(MOVLstore {sym} [off] ptr y:((ADD|SUB|AND|OR|XOR)L l:(MOVLload [off] {sym} ptr mem) x) mem) && y.Uses==1 && l.Uses==1 && clobber(y, l) =>
+ ((ADD|SUB|AND|OR|XOR)Lmodify [off] {sym} ptr x mem)
+(MOVQstore {sym} [off] ptr y:((ADD|AND|OR|XOR)Qload x [off] {sym} ptr mem) mem) && y.Uses==1 && clobber(y) => ((ADD|AND|OR|XOR)Qmodify [off] {sym} ptr x mem)
+(MOVQstore {sym} [off] ptr y:((ADD|SUB|AND|OR|XOR)Q l:(MOVQload [off] {sym} ptr mem) x) mem) && y.Uses==1 && l.Uses==1 && clobber(y, l) =>
+ ((ADD|SUB|AND|OR|XOR)Qmodify [off] {sym} ptr x mem)
+
+// Merge ADDQconst and LEAQ into atomic loads.
+(MOV(Q|L|B)atomicload [off1] {sym} (ADDQconst [off2] ptr) mem) && is32Bit(int64(off1)+int64(off2)) =>
+ (MOV(Q|L|B)atomicload [off1+off2] {sym} ptr mem)
+(MOV(Q|L|B)atomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (MOV(Q|L|B)atomicload [off1+off2] {mergeSym(sym1, sym2)} ptr mem)
+
+// Merge ADDQconst and LEAQ into atomic stores.
+(XCHGQ [off1] {sym} val (ADDQconst [off2] ptr) mem) && is32Bit(int64(off1)+int64(off2)) =>
+ (XCHGQ [off1+off2] {sym} val ptr mem)
+(XCHGQ [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && ptr.Op != OpSB =>
+ (XCHGQ [off1+off2] {mergeSym(sym1,sym2)} val ptr mem)
+(XCHGL [off1] {sym} val (ADDQconst [off2] ptr) mem) && is32Bit(int64(off1)+int64(off2)) =>
+ (XCHGL [off1+off2] {sym} val ptr mem)
+(XCHGL [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && ptr.Op != OpSB =>
+ (XCHGL [off1+off2] {mergeSym(sym1,sym2)} val ptr mem)
+
+// Merge ADDQconst into atomic adds.
+// TODO: merging LEAQ doesn't work, assembler doesn't like the resulting instructions.
+(XADDQlock [off1] {sym} val (ADDQconst [off2] ptr) mem) && is32Bit(int64(off1)+int64(off2)) =>
+ (XADDQlock [off1+off2] {sym} val ptr mem)
+(XADDLlock [off1] {sym} val (ADDQconst [off2] ptr) mem) && is32Bit(int64(off1)+int64(off2)) =>
+ (XADDLlock [off1+off2] {sym} val ptr mem)
+
+// Merge ADDQconst into atomic compare and swaps.
+// TODO: merging LEAQ doesn't work, assembler doesn't like the resulting instructions.
+(CMPXCHGQlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem) && is32Bit(int64(off1)+int64(off2)) =>
+ (CMPXCHGQlock [off1+off2] {sym} ptr old new_ mem)
+(CMPXCHGLlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem) && is32Bit(int64(off1)+int64(off2)) =>
+ (CMPXCHGLlock [off1+off2] {sym} ptr old new_ mem)
+
+// We don't need the conditional move if we know the arg of BSF is not zero.
+(CMOVQEQ x _ (Select1 (BSFQ (ORQconst [c] _)))) && c != 0 => x
+// Extension is unnecessary for trailing zeros.
+(BSFQ (ORQconst <t> [1<<8] (MOVBQZX x))) => (BSFQ (ORQconst <t> [1<<8] x))
+(BSFQ (ORQconst <t> [1<<16] (MOVWQZX x))) => (BSFQ (ORQconst <t> [1<<16] x))
+
+// Redundant sign/zero extensions
+// Note: see issue 21963. We have to make sure we use the right type on
+// the resulting extension (the outer type, not the inner type).
+(MOVLQSX (MOVLQSX x)) => (MOVLQSX x)
+(MOVLQSX (MOVWQSX x)) => (MOVWQSX x)
+(MOVLQSX (MOVBQSX x)) => (MOVBQSX x)
+(MOVWQSX (MOVWQSX x)) => (MOVWQSX x)
+(MOVWQSX (MOVBQSX x)) => (MOVBQSX x)
+(MOVBQSX (MOVBQSX x)) => (MOVBQSX x)
+(MOVLQZX (MOVLQZX x)) => (MOVLQZX x)
+(MOVLQZX (MOVWQZX x)) => (MOVWQZX x)
+(MOVLQZX (MOVBQZX x)) => (MOVBQZX x)
+(MOVWQZX (MOVWQZX x)) => (MOVWQZX x)
+(MOVWQZX (MOVBQZX x)) => (MOVBQZX x)
+(MOVBQZX (MOVBQZX x)) => (MOVBQZX x)
+
+(MOVQstore [off] {sym} ptr a:((ADD|AND|OR|XOR)Qconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem)
+ && isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a) =>
+ ((ADD|AND|OR|XOR)Qconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem)
+(MOVLstore [off] {sym} ptr a:((ADD|AND|OR|XOR)Lconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem)
+ && isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a) =>
+ ((ADD|AND|OR|XOR)Lconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem)
+
+// float <-> int register moves, with no conversion.
+// These come up when compiling math.{Float{32,64}bits,Float{32,64}frombits}.
+(MOVQload [off] {sym} ptr (MOVSDstore [off] {sym} ptr val _)) => (MOVQf2i val)
+(MOVLload [off] {sym} ptr (MOVSSstore [off] {sym} ptr val _)) => (MOVLf2i val)
+(MOVSDload [off] {sym} ptr (MOVQstore [off] {sym} ptr val _)) => (MOVQi2f val)
+(MOVSSload [off] {sym} ptr (MOVLstore [off] {sym} ptr val _)) => (MOVLi2f val)
+
+// Other load-like ops.
+(ADDQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) => (ADDQ x (MOVQf2i y))
+(ADDLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) => (ADDL x (MOVLf2i y))
+(SUBQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) => (SUBQ x (MOVQf2i y))
+(SUBLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) => (SUBL x (MOVLf2i y))
+(ANDQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) => (ANDQ x (MOVQf2i y))
+(ANDLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) => (ANDL x (MOVLf2i y))
+( ORQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) => ( ORQ x (MOVQf2i y))
+( ORLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) => ( ORL x (MOVLf2i y))
+(XORQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _)) => (XORQ x (MOVQf2i y))
+(XORLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _)) => (XORL x (MOVLf2i y))
+
+(ADDSDload x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) => (ADDSD x (MOVQi2f y))
+(ADDSSload x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) => (ADDSS x (MOVLi2f y))
+(SUBSDload x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) => (SUBSD x (MOVQi2f y))
+(SUBSSload x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) => (SUBSS x (MOVLi2f y))
+(MULSDload x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _)) => (MULSD x (MOVQi2f y))
+(MULSSload x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _)) => (MULSS x (MOVLi2f y))
+
+// Redirect stores to use the other register set.
+(MOVQstore [off] {sym} ptr (MOVQf2i val) mem) => (MOVSDstore [off] {sym} ptr val mem)
+(MOVLstore [off] {sym} ptr (MOVLf2i val) mem) => (MOVSSstore [off] {sym} ptr val mem)
+(MOVSDstore [off] {sym} ptr (MOVQi2f val) mem) => (MOVQstore [off] {sym} ptr val mem)
+(MOVSSstore [off] {sym} ptr (MOVLi2f val) mem) => (MOVLstore [off] {sym} ptr val mem)
+
+// Load args directly into the register class where it will be used.
+// We do this by just modifying the type of the Arg.
+(MOVQf2i <t> (Arg <u> [off] {sym})) && t.Size() == u.Size() => @b.Func.Entry (Arg <t> [off] {sym})
+(MOVLf2i <t> (Arg <u> [off] {sym})) && t.Size() == u.Size() => @b.Func.Entry (Arg <t> [off] {sym})
+(MOVQi2f <t> (Arg <u> [off] {sym})) && t.Size() == u.Size() => @b.Func.Entry (Arg <t> [off] {sym})
+(MOVLi2f <t> (Arg <u> [off] {sym})) && t.Size() == u.Size() => @b.Func.Entry (Arg <t> [off] {sym})
+
+// LEAQ is rematerializeable, so this helps to avoid register spill.
+// See issue 22947 for details
+(ADD(Q|L)const [off] x:(SP)) => (LEA(Q|L) [off] x)
+
+// HMULx is commutative, but its first argument must go in AX.
+// If possible, put a rematerializeable value in the first argument slot,
+// to reduce the odds that another value will be have to spilled
+// specifically to free up AX.
+(HMUL(Q|L) x y) && !x.rematerializeable() && y.rematerializeable() => (HMUL(Q|L) y x)
+(HMUL(Q|L)U x y) && !x.rematerializeable() && y.rematerializeable() => (HMUL(Q|L)U y x)
+
+// Fold loads into compares
+// Note: these may be undone by the flagalloc pass.
+(CMP(Q|L|W|B) l:(MOV(Q|L|W|B)load {sym} [off] ptr mem) x) && canMergeLoad(v, l) && clobber(l) => (CMP(Q|L|W|B)load {sym} [off] ptr x mem)
+(CMP(Q|L|W|B) x l:(MOV(Q|L|W|B)load {sym} [off] ptr mem)) && canMergeLoad(v, l) && clobber(l) => (InvertFlags (CMP(Q|L|W|B)load {sym} [off] ptr x mem))
+
+(CMP(Q|L)const l:(MOV(Q|L)load {sym} [off] ptr mem) [c])
+ && l.Uses == 1
+ && clobber(l) =>
+@l.Block (CMP(Q|L)constload {sym} [makeValAndOff(c,off)] ptr mem)
+(CMP(W|B)const l:(MOV(W|B)load {sym} [off] ptr mem) [c])
+ && l.Uses == 1
+ && clobber(l) =>
+@l.Block (CMP(W|B)constload {sym} [makeValAndOff(int32(c),off)] ptr mem)
+
+(CMPQload {sym} [off] ptr (MOVQconst [c]) mem) && validVal(c) => (CMPQconstload {sym} [makeValAndOff(int32(c),off)] ptr mem)
+(CMPLload {sym} [off] ptr (MOVLconst [c]) mem) => (CMPLconstload {sym} [makeValAndOff(c,off)] ptr mem)
+(CMPWload {sym} [off] ptr (MOVLconst [c]) mem) => (CMPWconstload {sym} [makeValAndOff(int32(int16(c)),off)] ptr mem)
+(CMPBload {sym} [off] ptr (MOVLconst [c]) mem) => (CMPBconstload {sym} [makeValAndOff(int32(int8(c)),off)] ptr mem)
+
+(TEST(Q|L|W|B) l:(MOV(Q|L|W|B)load {sym} [off] ptr mem) l2)
+ && l == l2
+ && l.Uses == 2
+ && clobber(l) =>
+ @l.Block (CMP(Q|L|W|B)constload {sym} [makeValAndOff(0, off)] ptr mem)
+
+// Convert ANDload to MOVload when we can do the AND in a containing TEST op.
+// Only do when it's within the same block, so we don't have flags live across basic block boundaries.
+// See issue 44228.
+(TEST(Q|L) a:(AND(Q|L)load [off] {sym} x ptr mem) a) && a.Uses == 2 && a.Block == v.Block && clobber(a) => (TEST(Q|L) (MOV(Q|L)load <a.Type> [off] {sym} ptr mem) x)
+
+(MOVBload [off] {sym} (SB) _) && symIsRO(sym) => (MOVLconst [int32(read8(sym, int64(off)))])
+(MOVWload [off] {sym} (SB) _) && symIsRO(sym) => (MOVLconst [int32(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))])
+(MOVLload [off] {sym} (SB) _) && symIsRO(sym) => (MOVQconst [int64(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))])
+(MOVQload [off] {sym} (SB) _) && symIsRO(sym) => (MOVQconst [int64(read64(sym, int64(off), config.ctxt.Arch.ByteOrder))])
+(MOVOstore [dstOff] {dstSym} ptr (MOVOload [srcOff] {srcSym} (SB) _) mem) && symIsRO(srcSym) =>
+ (MOVQstore [dstOff+8] {dstSym} ptr (MOVQconst [int64(read64(srcSym, int64(srcOff)+8, config.ctxt.Arch.ByteOrder))])
+ (MOVQstore [dstOff] {dstSym} ptr (MOVQconst [int64(read64(srcSym, int64(srcOff), config.ctxt.Arch.ByteOrder))]) mem))
+
+// Arch-specific inlining for small or disjoint runtime.memmove
+// Match post-lowering calls, memory version.
+(SelectN [0] call:(CALLstatic {sym} s1:(MOVQstoreconst _ [sc] s2:(MOVQstore _ src s3:(MOVQstore _ dst mem)))))
+ && sc.Val64() >= 0
+ && isSameCall(sym, "runtime.memmove")
+ && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1
+ && isInlinableMemmove(dst, src, sc.Val64(), config)
+ && clobber(s1, s2, s3, call)
+ => (Move [sc.Val64()] dst src mem)
+
+// Match post-lowering calls, register version.
+(SelectN [0] call:(CALLstatic {sym} dst src (MOVQconst [sz]) mem))
+ && sz >= 0
+ && isSameCall(sym, "runtime.memmove")
+ && call.Uses == 1
+ && isInlinableMemmove(dst, src, sz, config)
+ && clobber(call)
+ => (Move [sz] dst src mem)
+
+// Prefetch instructions
+(PrefetchCache ...) => (PrefetchT0 ...)
+(PrefetchCacheStreamed ...) => (PrefetchNTA ...)
+
+// CPUID feature: BMI1.
+(AND(Q|L) x (NOT(Q|L) y)) && buildcfg.GOAMD64 >= 3 => (ANDN(Q|L) x y)
+(AND(Q|L) x (NEG(Q|L) x)) && buildcfg.GOAMD64 >= 3 => (BLSI(Q|L) x)
+(XOR(Q|L) x (ADD(Q|L)const [-1] x)) && buildcfg.GOAMD64 >= 3 => (BLSMSK(Q|L) x)
+(AND(Q|L) x (ADD(Q|L)const [-1] x)) && buildcfg.GOAMD64 >= 3 => (BLSR(Q|L) x)
+
+(BSWAP(Q|L) (BSWAP(Q|L) p)) => p
+
+// CPUID feature: MOVBE.
+(MOV(Q|L)store [i] {s} p x:(BSWAP(Q|L) w) mem) && x.Uses == 1 && buildcfg.GOAMD64 >= 3 => (MOVBE(Q|L)store [i] {s} p w mem)
+(BSWAP(Q|L) x:(MOV(Q|L)load [i] {s} p mem)) && x.Uses == 1 && buildcfg.GOAMD64 >= 3 => (MOVBE(Q|L)load [i] {s} p mem)
+(BSWAP(Q|L) (MOVBE(Q|L)load [i] {s} p m)) => (MOV(Q|L)load [i] {s} p m)
+(MOVBE(Q|L)store [i] {s} p (BSWAP(Q|L) x) m) => (MOV(Q|L)store [i] {s} p x m)
+
+(ORQ x0:(MOVBELload [i0] {s} p mem)
+ sh:(SHLQconst [32] x1:(MOVBELload [i1] {s} p mem)))
+ && i0 == i1+4
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && sh.Uses == 1
+ && mergePoint(b,x0,x1) != nil
+ && clobber(x0, x1, sh)
+ => @mergePoint(b,x0,x1) (MOVBEQload [i1] {s} p mem)
+
+(ORQ x0:(MOVBELload [i] {s} p0 mem)
+ sh:(SHLQconst [32] x1:(MOVBELload [i] {s} p1 mem)))
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && sh.Uses == 1
+ && sequentialAddresses(p1, p0, 4)
+ && mergePoint(b,x0,x1) != nil
+ && clobber(x0, x1, sh)
+ => @mergePoint(b,x0,x1) (MOVBEQload [i] {s} p1 mem)
diff --git a/src/cmd/compile/internal/ssa/gen/AMD64Ops.go b/src/cmd/compile/internal/ssa/gen/AMD64Ops.go
new file mode 100644
index 0000000..a6906be
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/gen/AMD64Ops.go
@@ -0,0 +1,967 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build ignore
+// +build ignore
+
+package main
+
+import "strings"
+
+// Notes:
+// - Integer types live in the low portion of registers. Upper portions are junk.
+// - Boolean types use the low-order byte of a register. 0=false, 1=true.
+// Upper bytes are junk.
+// - Floating-point types live in the low natural slot of an sse2 register.
+// Unused portions are junk.
+// - We do not use AH,BH,CH,DH registers.
+// - When doing sub-register operations, we try to write the whole
+// destination register to avoid a partial-register write.
+// - Unused portions of AuxInt (or the Val portion of ValAndOff) are
+// filled by sign-extending the used portion. Users of AuxInt which interpret
+// AuxInt as unsigned (e.g. shifts) must be careful.
+// - All SymOff opcodes require their offset to fit in an int32.
+
+// Suffixes encode the bit width of various instructions.
+// Q (quad word) = 64 bit
+// L (long word) = 32 bit
+// W (word) = 16 bit
+// B (byte) = 8 bit
+
+// copied from ../../amd64/reg.go
+var regNamesAMD64 = []string{
+ "AX",
+ "CX",
+ "DX",
+ "BX",
+ "SP",
+ "BP",
+ "SI",
+ "DI",
+ "R8",
+ "R9",
+ "R10",
+ "R11",
+ "R12",
+ "R13",
+ "g", // a.k.a. R14
+ "R15",
+ "X0",
+ "X1",
+ "X2",
+ "X3",
+ "X4",
+ "X5",
+ "X6",
+ "X7",
+ "X8",
+ "X9",
+ "X10",
+ "X11",
+ "X12",
+ "X13",
+ "X14",
+ "X15", // constant 0 in ABIInternal
+
+ // If you add registers, update asyncPreempt in runtime
+
+ // pseudo-registers
+ "SB",
+}
+
+func init() {
+ // Make map from reg names to reg integers.
+ if len(regNamesAMD64) > 64 {
+ panic("too many registers")
+ }
+ num := map[string]int{}
+ for i, name := range regNamesAMD64 {
+ num[name] = i
+ }
+ buildReg := func(s string) regMask {
+ m := regMask(0)
+ for _, r := range strings.Split(s, " ") {
+ if n, ok := num[r]; ok {
+ m |= regMask(1) << uint(n)
+ continue
+ }
+ panic("register " + r + " not found")
+ }
+ return m
+ }
+
+ // Common individual register masks
+ var (
+ ax = buildReg("AX")
+ cx = buildReg("CX")
+ dx = buildReg("DX")
+ bx = buildReg("BX")
+ gp = buildReg("AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15")
+ g = buildReg("g")
+ fp = buildReg("X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14")
+ x15 = buildReg("X15")
+ gpsp = gp | buildReg("SP")
+ gpspsb = gpsp | buildReg("SB")
+ gpspsbg = gpspsb | g
+ callerSave = gp | fp | g // runtime.setg (and anything calling it) may clobber g
+ )
+ // Common slices of register masks
+ var (
+ gponly = []regMask{gp}
+ fponly = []regMask{fp}
+ )
+
+ // Common regInfo
+ var (
+ gp01 = regInfo{inputs: nil, outputs: gponly}
+ gp11 = regInfo{inputs: []regMask{gp}, outputs: gponly}
+ gp11sp = regInfo{inputs: []regMask{gpsp}, outputs: gponly}
+ gp11sb = regInfo{inputs: []regMask{gpspsbg}, outputs: gponly}
+ gp21 = regInfo{inputs: []regMask{gp, gp}, outputs: gponly}
+ gp21sp = regInfo{inputs: []regMask{gpsp, gp}, outputs: gponly}
+ gp21sb = regInfo{inputs: []regMask{gpspsbg, gpsp}, outputs: gponly}
+ gp21shift = regInfo{inputs: []regMask{gp, cx}, outputs: []regMask{gp}}
+ gp31shift = regInfo{inputs: []regMask{gp, gp, cx}, outputs: []regMask{gp}}
+ gp11div = regInfo{inputs: []regMask{ax, gpsp &^ dx}, outputs: []regMask{ax, dx}}
+ gp21hmul = regInfo{inputs: []regMask{ax, gpsp}, outputs: []regMask{dx}, clobbers: ax}
+ gp21flags = regInfo{inputs: []regMask{gp, gp}, outputs: []regMask{gp, 0}}
+ gp2flags1flags = regInfo{inputs: []regMask{gp, gp, 0}, outputs: []regMask{gp, 0}}
+
+ gp2flags = regInfo{inputs: []regMask{gpsp, gpsp}}
+ gp1flags = regInfo{inputs: []regMask{gpsp}}
+ gp0flagsLoad = regInfo{inputs: []regMask{gpspsbg, 0}}
+ gp1flagsLoad = regInfo{inputs: []regMask{gpspsbg, gpsp, 0}}
+ gp2flagsLoad = regInfo{inputs: []regMask{gpspsbg, gpsp, gpsp, 0}}
+ flagsgp = regInfo{inputs: nil, outputs: gponly}
+
+ gp11flags = regInfo{inputs: []regMask{gp}, outputs: []regMask{gp, 0}}
+ gp1flags1flags = regInfo{inputs: []regMask{gp, 0}, outputs: []regMask{gp, 0}}
+
+ readflags = regInfo{inputs: nil, outputs: gponly}
+ flagsgpax = regInfo{inputs: nil, clobbers: ax, outputs: []regMask{gp &^ ax}}
+
+ gpload = regInfo{inputs: []regMask{gpspsbg, 0}, outputs: gponly}
+ gp21load = regInfo{inputs: []regMask{gp, gpspsbg, 0}, outputs: gponly}
+ gploadidx = regInfo{inputs: []regMask{gpspsbg, gpsp, 0}, outputs: gponly}
+ gp21loadidx = regInfo{inputs: []regMask{gp, gpspsbg, gpsp, 0}, outputs: gponly}
+ gp21pax = regInfo{inputs: []regMask{gp &^ ax, gp}, outputs: []regMask{gp &^ ax}, clobbers: ax}
+
+ gpstore = regInfo{inputs: []regMask{gpspsbg, gpsp, 0}}
+ gpstoreconst = regInfo{inputs: []regMask{gpspsbg, 0}}
+ gpstoreidx = regInfo{inputs: []regMask{gpspsbg, gpsp, gpsp, 0}}
+ gpstoreconstidx = regInfo{inputs: []regMask{gpspsbg, gpsp, 0}}
+ gpstorexchg = regInfo{inputs: []regMask{gp, gpspsbg, 0}, outputs: []regMask{gp}}
+ cmpxchg = regInfo{inputs: []regMask{gp, ax, gp, 0}, outputs: []regMask{gp, 0}, clobbers: ax}
+
+ fp01 = regInfo{inputs: nil, outputs: fponly}
+ fp21 = regInfo{inputs: []regMask{fp, fp}, outputs: fponly}
+ fp31 = regInfo{inputs: []regMask{fp, fp, fp}, outputs: fponly}
+ fp21load = regInfo{inputs: []regMask{fp, gpspsbg, 0}, outputs: fponly}
+ fp21loadidx = regInfo{inputs: []regMask{fp, gpspsbg, gpspsb, 0}, outputs: fponly}
+ fpgp = regInfo{inputs: fponly, outputs: gponly}
+ gpfp = regInfo{inputs: gponly, outputs: fponly}
+ fp11 = regInfo{inputs: fponly, outputs: fponly}
+ fp2flags = regInfo{inputs: []regMask{fp, fp}}
+
+ fpload = regInfo{inputs: []regMask{gpspsb, 0}, outputs: fponly}
+ fploadidx = regInfo{inputs: []regMask{gpspsb, gpsp, 0}, outputs: fponly}
+
+ fpstore = regInfo{inputs: []regMask{gpspsb, fp, 0}}
+ fpstoreidx = regInfo{inputs: []regMask{gpspsb, gpsp, fp, 0}}
+
+ prefreg = regInfo{inputs: []regMask{gpspsbg}}
+ )
+
+ var AMD64ops = []opData{
+ // fp ops
+ {name: "ADDSS", argLength: 2, reg: fp21, asm: "ADDSS", commutative: true, resultInArg0: true}, // fp32 add
+ {name: "ADDSD", argLength: 2, reg: fp21, asm: "ADDSD", commutative: true, resultInArg0: true}, // fp64 add
+ {name: "SUBSS", argLength: 2, reg: fp21, asm: "SUBSS", resultInArg0: true}, // fp32 sub
+ {name: "SUBSD", argLength: 2, reg: fp21, asm: "SUBSD", resultInArg0: true}, // fp64 sub
+ {name: "MULSS", argLength: 2, reg: fp21, asm: "MULSS", commutative: true, resultInArg0: true}, // fp32 mul
+ {name: "MULSD", argLength: 2, reg: fp21, asm: "MULSD", commutative: true, resultInArg0: true}, // fp64 mul
+ {name: "DIVSS", argLength: 2, reg: fp21, asm: "DIVSS", resultInArg0: true}, // fp32 div
+ {name: "DIVSD", argLength: 2, reg: fp21, asm: "DIVSD", resultInArg0: true}, // fp64 div
+
+ {name: "MOVSSload", argLength: 2, reg: fpload, asm: "MOVSS", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, // fp32 load
+ {name: "MOVSDload", argLength: 2, reg: fpload, asm: "MOVSD", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, // fp64 load
+ {name: "MOVSSconst", reg: fp01, asm: "MOVSS", aux: "Float32", rematerializeable: true}, // fp32 constant
+ {name: "MOVSDconst", reg: fp01, asm: "MOVSD", aux: "Float64", rematerializeable: true}, // fp64 constant
+ {name: "MOVSSloadidx1", argLength: 3, reg: fploadidx, asm: "MOVSS", scale: 1, aux: "SymOff", symEffect: "Read"}, // fp32 load indexed by i
+ {name: "MOVSSloadidx4", argLength: 3, reg: fploadidx, asm: "MOVSS", scale: 4, aux: "SymOff", symEffect: "Read"}, // fp32 load indexed by 4*i
+ {name: "MOVSDloadidx1", argLength: 3, reg: fploadidx, asm: "MOVSD", scale: 1, aux: "SymOff", symEffect: "Read"}, // fp64 load indexed by i
+ {name: "MOVSDloadidx8", argLength: 3, reg: fploadidx, asm: "MOVSD", scale: 8, aux: "SymOff", symEffect: "Read"}, // fp64 load indexed by 8*i
+
+ {name: "MOVSSstore", argLength: 3, reg: fpstore, asm: "MOVSS", aux: "SymOff", faultOnNilArg0: true, symEffect: "Write"}, // fp32 store
+ {name: "MOVSDstore", argLength: 3, reg: fpstore, asm: "MOVSD", aux: "SymOff", faultOnNilArg0: true, symEffect: "Write"}, // fp64 store
+ {name: "MOVSSstoreidx1", argLength: 4, reg: fpstoreidx, asm: "MOVSS", scale: 1, aux: "SymOff", symEffect: "Write"}, // fp32 indexed by i store
+ {name: "MOVSSstoreidx4", argLength: 4, reg: fpstoreidx, asm: "MOVSS", scale: 4, aux: "SymOff", symEffect: "Write"}, // fp32 indexed by 4i store
+ {name: "MOVSDstoreidx1", argLength: 4, reg: fpstoreidx, asm: "MOVSD", scale: 1, aux: "SymOff", symEffect: "Write"}, // fp64 indexed by i store
+ {name: "MOVSDstoreidx8", argLength: 4, reg: fpstoreidx, asm: "MOVSD", scale: 8, aux: "SymOff", symEffect: "Write"}, // fp64 indexed by 8i store
+
+ {name: "ADDSSload", argLength: 3, reg: fp21load, asm: "ADDSS", aux: "SymOff", resultInArg0: true, faultOnNilArg1: true, symEffect: "Read"}, // fp32 arg0 + tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
+ {name: "ADDSDload", argLength: 3, reg: fp21load, asm: "ADDSD", aux: "SymOff", resultInArg0: true, faultOnNilArg1: true, symEffect: "Read"}, // fp64 arg0 + tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
+ {name: "SUBSSload", argLength: 3, reg: fp21load, asm: "SUBSS", aux: "SymOff", resultInArg0: true, faultOnNilArg1: true, symEffect: "Read"}, // fp32 arg0 - tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
+ {name: "SUBSDload", argLength: 3, reg: fp21load, asm: "SUBSD", aux: "SymOff", resultInArg0: true, faultOnNilArg1: true, symEffect: "Read"}, // fp64 arg0 - tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
+ {name: "MULSSload", argLength: 3, reg: fp21load, asm: "MULSS", aux: "SymOff", resultInArg0: true, faultOnNilArg1: true, symEffect: "Read"}, // fp32 arg0 * tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
+ {name: "MULSDload", argLength: 3, reg: fp21load, asm: "MULSD", aux: "SymOff", resultInArg0: true, faultOnNilArg1: true, symEffect: "Read"}, // fp64 arg0 * tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
+ {name: "DIVSSload", argLength: 3, reg: fp21load, asm: "DIVSS", aux: "SymOff", resultInArg0: true, faultOnNilArg1: true, symEffect: "Read"}, // fp32 arg0 / tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
+ {name: "DIVSDload", argLength: 3, reg: fp21load, asm: "DIVSD", aux: "SymOff", resultInArg0: true, faultOnNilArg1: true, symEffect: "Read"}, // fp64 arg0 / tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
+
+ {name: "ADDSSloadidx1", argLength: 4, reg: fp21loadidx, asm: "ADDSS", scale: 1, aux: "SymOff", resultInArg0: true, symEffect: "Read"}, // fp32 arg0 + tmp, tmp loaded from arg1+arg2+auxint+aux, arg3 = mem
+ {name: "ADDSSloadidx4", argLength: 4, reg: fp21loadidx, asm: "ADDSS", scale: 4, aux: "SymOff", resultInArg0: true, symEffect: "Read"}, // fp32 arg0 + tmp, tmp loaded from arg1+4*arg2+auxint+aux, arg3 = mem
+ {name: "ADDSDloadidx1", argLength: 4, reg: fp21loadidx, asm: "ADDSD", scale: 1, aux: "SymOff", resultInArg0: true, symEffect: "Read"}, // fp64 arg0 + tmp, tmp loaded from arg1+arg2+auxint+aux, arg3 = mem
+ {name: "ADDSDloadidx8", argLength: 4, reg: fp21loadidx, asm: "ADDSD", scale: 8, aux: "SymOff", resultInArg0: true, symEffect: "Read"}, // fp64 arg0 + tmp, tmp loaded from arg1+8*arg2+auxint+aux, arg3 = mem
+ {name: "SUBSSloadidx1", argLength: 4, reg: fp21loadidx, asm: "SUBSS", scale: 1, aux: "SymOff", resultInArg0: true, symEffect: "Read"}, // fp32 arg0 - tmp, tmp loaded from arg1+arg2+auxint+aux, arg3 = mem
+ {name: "SUBSSloadidx4", argLength: 4, reg: fp21loadidx, asm: "SUBSS", scale: 4, aux: "SymOff", resultInArg0: true, symEffect: "Read"}, // fp32 arg0 - tmp, tmp loaded from arg1+4*arg2+auxint+aux, arg3 = mem
+ {name: "SUBSDloadidx1", argLength: 4, reg: fp21loadidx, asm: "SUBSD", scale: 1, aux: "SymOff", resultInArg0: true, symEffect: "Read"}, // fp64 arg0 - tmp, tmp loaded from arg1+arg2+auxint+aux, arg3 = mem
+ {name: "SUBSDloadidx8", argLength: 4, reg: fp21loadidx, asm: "SUBSD", scale: 8, aux: "SymOff", resultInArg0: true, symEffect: "Read"}, // fp64 arg0 - tmp, tmp loaded from arg1+8*arg2+auxint+aux, arg3 = mem
+ {name: "MULSSloadidx1", argLength: 4, reg: fp21loadidx, asm: "MULSS", scale: 1, aux: "SymOff", resultInArg0: true, symEffect: "Read"}, // fp32 arg0 * tmp, tmp loaded from arg1+arg2+auxint+aux, arg3 = mem
+ {name: "MULSSloadidx4", argLength: 4, reg: fp21loadidx, asm: "MULSS", scale: 4, aux: "SymOff", resultInArg0: true, symEffect: "Read"}, // fp32 arg0 * tmp, tmp loaded from arg1+4*arg2+auxint+aux, arg3 = mem
+ {name: "MULSDloadidx1", argLength: 4, reg: fp21loadidx, asm: "MULSD", scale: 1, aux: "SymOff", resultInArg0: true, symEffect: "Read"}, // fp64 arg0 * tmp, tmp loaded from arg1+arg2+auxint+aux, arg3 = mem
+ {name: "MULSDloadidx8", argLength: 4, reg: fp21loadidx, asm: "MULSD", scale: 8, aux: "SymOff", resultInArg0: true, symEffect: "Read"}, // fp64 arg0 * tmp, tmp loaded from arg1+8*arg2+auxint+aux, arg3 = mem
+ {name: "DIVSSloadidx1", argLength: 4, reg: fp21loadidx, asm: "DIVSS", scale: 1, aux: "SymOff", resultInArg0: true, symEffect: "Read"}, // fp32 arg0 / tmp, tmp loaded from arg1+arg2+auxint+aux, arg3 = mem
+ {name: "DIVSSloadidx4", argLength: 4, reg: fp21loadidx, asm: "DIVSS", scale: 4, aux: "SymOff", resultInArg0: true, symEffect: "Read"}, // fp32 arg0 / tmp, tmp loaded from arg1+4*arg2+auxint+aux, arg3 = mem
+ {name: "DIVSDloadidx1", argLength: 4, reg: fp21loadidx, asm: "DIVSD", scale: 1, aux: "SymOff", resultInArg0: true, symEffect: "Read"}, // fp64 arg0 / tmp, tmp loaded from arg1+arg2+auxint+aux, arg3 = mem
+ {name: "DIVSDloadidx8", argLength: 4, reg: fp21loadidx, asm: "DIVSD", scale: 8, aux: "SymOff", resultInArg0: true, symEffect: "Read"}, // fp64 arg0 / tmp, tmp loaded from arg1+8*arg2+auxint+aux, arg3 = mem
+
+ // binary ops
+ {name: "ADDQ", argLength: 2, reg: gp21sp, asm: "ADDQ", commutative: true, clobberFlags: true}, // arg0 + arg1
+ {name: "ADDL", argLength: 2, reg: gp21sp, asm: "ADDL", commutative: true, clobberFlags: true}, // arg0 + arg1
+ {name: "ADDQconst", argLength: 1, reg: gp11sp, asm: "ADDQ", aux: "Int32", typ: "UInt64", clobberFlags: true}, // arg0 + auxint
+ {name: "ADDLconst", argLength: 1, reg: gp11sp, asm: "ADDL", aux: "Int32", clobberFlags: true}, // arg0 + auxint
+ {name: "ADDQconstmodify", argLength: 2, reg: gpstoreconst, asm: "ADDQ", aux: "SymValAndOff", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // add ValAndOff(AuxInt).Val() to arg0+ValAndOff(AuxInt).Off()+aux, arg1=mem
+ {name: "ADDLconstmodify", argLength: 2, reg: gpstoreconst, asm: "ADDL", aux: "SymValAndOff", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // add ValAndOff(AuxInt).Val() to arg0+ValAndOff(AuxInt).Off()+aux, arg1=mem
+
+ {name: "SUBQ", argLength: 2, reg: gp21, asm: "SUBQ", resultInArg0: true, clobberFlags: true}, // arg0 - arg1
+ {name: "SUBL", argLength: 2, reg: gp21, asm: "SUBL", resultInArg0: true, clobberFlags: true}, // arg0 - arg1
+ {name: "SUBQconst", argLength: 1, reg: gp11, asm: "SUBQ", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 - auxint
+ {name: "SUBLconst", argLength: 1, reg: gp11, asm: "SUBL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 - auxint
+
+ {name: "MULQ", argLength: 2, reg: gp21, asm: "IMULQ", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 * arg1
+ {name: "MULL", argLength: 2, reg: gp21, asm: "IMULL", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 * arg1
+ {name: "MULQconst", argLength: 1, reg: gp11, asm: "IMUL3Q", aux: "Int32", clobberFlags: true}, // arg0 * auxint
+ {name: "MULLconst", argLength: 1, reg: gp11, asm: "IMUL3L", aux: "Int32", clobberFlags: true}, // arg0 * auxint
+
+ {name: "MULLU", argLength: 2, reg: regInfo{inputs: []regMask{ax, gpsp}, outputs: []regMask{ax, 0}, clobbers: dx}, typ: "(UInt32,Flags)", asm: "MULL", commutative: true, clobberFlags: true}, // Let x = arg0*arg1 (full 32x32->64 unsigned multiply). Returns uint32(x), and flags set to overflow if uint32(x) != x.
+ {name: "MULQU", argLength: 2, reg: regInfo{inputs: []regMask{ax, gpsp}, outputs: []regMask{ax, 0}, clobbers: dx}, typ: "(UInt64,Flags)", asm: "MULQ", commutative: true, clobberFlags: true}, // Let x = arg0*arg1 (full 64x64->128 unsigned multiply). Returns uint64(x), and flags set to overflow if uint64(x) != x.
+
+ // HMULx[U] are intentionally not marked as commutative, even though they are.
+ // This is because they have asymmetric register requirements.
+ // There are rewrite rules to try to place arguments in preferable slots.
+ {name: "HMULQ", argLength: 2, reg: gp21hmul, asm: "IMULQ", clobberFlags: true}, // (arg0 * arg1) >> width
+ {name: "HMULL", argLength: 2, reg: gp21hmul, asm: "IMULL", clobberFlags: true}, // (arg0 * arg1) >> width
+ {name: "HMULQU", argLength: 2, reg: gp21hmul, asm: "MULQ", clobberFlags: true}, // (arg0 * arg1) >> width
+ {name: "HMULLU", argLength: 2, reg: gp21hmul, asm: "MULL", clobberFlags: true}, // (arg0 * arg1) >> width
+
+ {name: "AVGQU", argLength: 2, reg: gp21, commutative: true, resultInArg0: true, clobberFlags: true}, // (arg0 + arg1) / 2 as unsigned, all 64 result bits
+
+ // For DIVQ, DIVL and DIVW, AuxInt non-zero means that the divisor has been proved to be not -1.
+ {name: "DIVQ", argLength: 2, reg: gp11div, typ: "(Int64,Int64)", asm: "IDIVQ", aux: "Bool", clobberFlags: true}, // [arg0 / arg1, arg0 % arg1]
+ {name: "DIVL", argLength: 2, reg: gp11div, typ: "(Int32,Int32)", asm: "IDIVL", aux: "Bool", clobberFlags: true}, // [arg0 / arg1, arg0 % arg1]
+ {name: "DIVW", argLength: 2, reg: gp11div, typ: "(Int16,Int16)", asm: "IDIVW", aux: "Bool", clobberFlags: true}, // [arg0 / arg1, arg0 % arg1]
+
+ {name: "DIVQU", argLength: 2, reg: gp11div, typ: "(UInt64,UInt64)", asm: "DIVQ", clobberFlags: true}, // [arg0 / arg1, arg0 % arg1]
+ {name: "DIVLU", argLength: 2, reg: gp11div, typ: "(UInt32,UInt32)", asm: "DIVL", clobberFlags: true}, // [arg0 / arg1, arg0 % arg1]
+ {name: "DIVWU", argLength: 2, reg: gp11div, typ: "(UInt16,UInt16)", asm: "DIVW", clobberFlags: true}, // [arg0 / arg1, arg0 % arg1]
+
+ {name: "NEGLflags", argLength: 1, reg: gp11flags, typ: "(UInt32,Flags)", asm: "NEGL", resultInArg0: true}, // -arg0, flags set for 0-arg0.
+ // The following 4 add opcodes return the low 64 bits of the sum in the first result and
+ // the carry (the 65th bit) in the carry flag.
+ {name: "ADDQcarry", argLength: 2, reg: gp21flags, typ: "(UInt64,Flags)", asm: "ADDQ", commutative: true, resultInArg0: true}, // r = arg0+arg1
+ {name: "ADCQ", argLength: 3, reg: gp2flags1flags, typ: "(UInt64,Flags)", asm: "ADCQ", commutative: true, resultInArg0: true}, // r = arg0+arg1+carry(arg2)
+ {name: "ADDQconstcarry", argLength: 1, reg: gp11flags, typ: "(UInt64,Flags)", asm: "ADDQ", aux: "Int32", resultInArg0: true}, // r = arg0+auxint
+ {name: "ADCQconst", argLength: 2, reg: gp1flags1flags, typ: "(UInt64,Flags)", asm: "ADCQ", aux: "Int32", resultInArg0: true}, // r = arg0+auxint+carry(arg1)
+
+ // The following 4 add opcodes return the low 64 bits of the difference in the first result and
+ // the borrow (if the result is negative) in the carry flag.
+ {name: "SUBQborrow", argLength: 2, reg: gp21flags, typ: "(UInt64,Flags)", asm: "SUBQ", resultInArg0: true}, // r = arg0-arg1
+ {name: "SBBQ", argLength: 3, reg: gp2flags1flags, typ: "(UInt64,Flags)", asm: "SBBQ", resultInArg0: true}, // r = arg0-(arg1+carry(arg2))
+ {name: "SUBQconstborrow", argLength: 1, reg: gp11flags, typ: "(UInt64,Flags)", asm: "SUBQ", aux: "Int32", resultInArg0: true}, // r = arg0-auxint
+ {name: "SBBQconst", argLength: 2, reg: gp1flags1flags, typ: "(UInt64,Flags)", asm: "SBBQ", aux: "Int32", resultInArg0: true}, // r = arg0-(auxint+carry(arg1))
+
+ {name: "MULQU2", argLength: 2, reg: regInfo{inputs: []regMask{ax, gpsp}, outputs: []regMask{dx, ax}}, commutative: true, asm: "MULQ", clobberFlags: true}, // arg0 * arg1, returns (hi, lo)
+ {name: "DIVQU2", argLength: 3, reg: regInfo{inputs: []regMask{dx, ax, gpsp}, outputs: []regMask{ax, dx}}, asm: "DIVQ", clobberFlags: true}, // arg0:arg1 / arg2 (128-bit divided by 64-bit), returns (q, r)
+
+ {name: "ANDQ", argLength: 2, reg: gp21, asm: "ANDQ", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 & arg1
+ {name: "ANDL", argLength: 2, reg: gp21, asm: "ANDL", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 & arg1
+ {name: "ANDQconst", argLength: 1, reg: gp11, asm: "ANDQ", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 & auxint
+ {name: "ANDLconst", argLength: 1, reg: gp11, asm: "ANDL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 & auxint
+ {name: "ANDQconstmodify", argLength: 2, reg: gpstoreconst, asm: "ANDQ", aux: "SymValAndOff", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // and ValAndOff(AuxInt).Val() to arg0+ValAndOff(AuxInt).Off()+aux, arg1=mem
+ {name: "ANDLconstmodify", argLength: 2, reg: gpstoreconst, asm: "ANDL", aux: "SymValAndOff", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // and ValAndOff(AuxInt).Val() to arg0+ValAndOff(AuxInt).Off()+aux, arg1=mem
+
+ {name: "ORQ", argLength: 2, reg: gp21, asm: "ORQ", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 | arg1
+ {name: "ORL", argLength: 2, reg: gp21, asm: "ORL", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 | arg1
+ {name: "ORQconst", argLength: 1, reg: gp11, asm: "ORQ", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 | auxint
+ {name: "ORLconst", argLength: 1, reg: gp11, asm: "ORL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 | auxint
+ {name: "ORQconstmodify", argLength: 2, reg: gpstoreconst, asm: "ORQ", aux: "SymValAndOff", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // or ValAndOff(AuxInt).Val() to arg0+ValAndOff(AuxInt).Off()+aux, arg1=mem
+ {name: "ORLconstmodify", argLength: 2, reg: gpstoreconst, asm: "ORL", aux: "SymValAndOff", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // or ValAndOff(AuxInt).Val() to arg0+ValAndOff(AuxInt).Off()+aux, arg1=mem
+
+ {name: "XORQ", argLength: 2, reg: gp21, asm: "XORQ", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 ^ arg1
+ {name: "XORL", argLength: 2, reg: gp21, asm: "XORL", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 ^ arg1
+ {name: "XORQconst", argLength: 1, reg: gp11, asm: "XORQ", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 ^ auxint
+ {name: "XORLconst", argLength: 1, reg: gp11, asm: "XORL", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 ^ auxint
+ {name: "XORQconstmodify", argLength: 2, reg: gpstoreconst, asm: "XORQ", aux: "SymValAndOff", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // xor ValAndOff(AuxInt).Val() to arg0+ValAndOff(AuxInt).Off()+aux, arg1=mem
+ {name: "XORLconstmodify", argLength: 2, reg: gpstoreconst, asm: "XORL", aux: "SymValAndOff", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // xor ValAndOff(AuxInt).Val() to arg0+ValAndOff(AuxInt).Off()+aux, arg1=mem
+
+ {name: "CMPQ", argLength: 2, reg: gp2flags, asm: "CMPQ", typ: "Flags"}, // arg0 compare to arg1
+ {name: "CMPL", argLength: 2, reg: gp2flags, asm: "CMPL", typ: "Flags"}, // arg0 compare to arg1
+ {name: "CMPW", argLength: 2, reg: gp2flags, asm: "CMPW", typ: "Flags"}, // arg0 compare to arg1
+ {name: "CMPB", argLength: 2, reg: gp2flags, asm: "CMPB", typ: "Flags"}, // arg0 compare to arg1
+ {name: "CMPQconst", argLength: 1, reg: gp1flags, asm: "CMPQ", typ: "Flags", aux: "Int32"}, // arg0 compare to auxint
+ {name: "CMPLconst", argLength: 1, reg: gp1flags, asm: "CMPL", typ: "Flags", aux: "Int32"}, // arg0 compare to auxint
+ {name: "CMPWconst", argLength: 1, reg: gp1flags, asm: "CMPW", typ: "Flags", aux: "Int16"}, // arg0 compare to auxint
+ {name: "CMPBconst", argLength: 1, reg: gp1flags, asm: "CMPB", typ: "Flags", aux: "Int8"}, // arg0 compare to auxint
+
+ // compare *(arg0+auxint+aux) to arg1 (in that order). arg2=mem.
+ {name: "CMPQload", argLength: 3, reg: gp1flagsLoad, asm: "CMPQ", aux: "SymOff", typ: "Flags", symEffect: "Read", faultOnNilArg0: true},
+ {name: "CMPLload", argLength: 3, reg: gp1flagsLoad, asm: "CMPL", aux: "SymOff", typ: "Flags", symEffect: "Read", faultOnNilArg0: true},
+ {name: "CMPWload", argLength: 3, reg: gp1flagsLoad, asm: "CMPW", aux: "SymOff", typ: "Flags", symEffect: "Read", faultOnNilArg0: true},
+ {name: "CMPBload", argLength: 3, reg: gp1flagsLoad, asm: "CMPB", aux: "SymOff", typ: "Flags", symEffect: "Read", faultOnNilArg0: true},
+
+ // compare *(arg0+ValAndOff(AuxInt).Off()+aux) to ValAndOff(AuxInt).Val() (in that order). arg1=mem.
+ {name: "CMPQconstload", argLength: 2, reg: gp0flagsLoad, asm: "CMPQ", aux: "SymValAndOff", typ: "Flags", symEffect: "Read", faultOnNilArg0: true},
+ {name: "CMPLconstload", argLength: 2, reg: gp0flagsLoad, asm: "CMPL", aux: "SymValAndOff", typ: "Flags", symEffect: "Read", faultOnNilArg0: true},
+ {name: "CMPWconstload", argLength: 2, reg: gp0flagsLoad, asm: "CMPW", aux: "SymValAndOff", typ: "Flags", symEffect: "Read", faultOnNilArg0: true},
+ {name: "CMPBconstload", argLength: 2, reg: gp0flagsLoad, asm: "CMPB", aux: "SymValAndOff", typ: "Flags", symEffect: "Read", faultOnNilArg0: true},
+
+ // compare *(arg0+N*arg1+auxint+aux) to arg2 (in that order). arg3=mem.
+ {name: "CMPQloadidx8", argLength: 4, reg: gp2flagsLoad, asm: "CMPQ", scale: 8, aux: "SymOff", typ: "Flags", symEffect: "Read"},
+ {name: "CMPQloadidx1", argLength: 4, reg: gp2flagsLoad, asm: "CMPQ", scale: 1, commutative: true, aux: "SymOff", typ: "Flags", symEffect: "Read"},
+ {name: "CMPLloadidx4", argLength: 4, reg: gp2flagsLoad, asm: "CMPL", scale: 4, aux: "SymOff", typ: "Flags", symEffect: "Read"},
+ {name: "CMPLloadidx1", argLength: 4, reg: gp2flagsLoad, asm: "CMPL", scale: 1, commutative: true, aux: "SymOff", typ: "Flags", symEffect: "Read"},
+ {name: "CMPWloadidx2", argLength: 4, reg: gp2flagsLoad, asm: "CMPW", scale: 2, aux: "SymOff", typ: "Flags", symEffect: "Read"},
+ {name: "CMPWloadidx1", argLength: 4, reg: gp2flagsLoad, asm: "CMPW", scale: 1, commutative: true, aux: "SymOff", typ: "Flags", symEffect: "Read"},
+ {name: "CMPBloadidx1", argLength: 4, reg: gp2flagsLoad, asm: "CMPB", scale: 1, commutative: true, aux: "SymOff", typ: "Flags", symEffect: "Read"},
+
+ // compare *(arg0+N*arg1+ValAndOff(AuxInt).Off()+aux) to ValAndOff(AuxInt).Val() (in that order). arg2=mem.
+ {name: "CMPQconstloadidx8", argLength: 3, reg: gp1flagsLoad, asm: "CMPQ", scale: 8, aux: "SymValAndOff", typ: "Flags", symEffect: "Read"},
+ {name: "CMPQconstloadidx1", argLength: 3, reg: gp1flagsLoad, asm: "CMPQ", scale: 1, commutative: true, aux: "SymValAndOff", typ: "Flags", symEffect: "Read"},
+ {name: "CMPLconstloadidx4", argLength: 3, reg: gp1flagsLoad, asm: "CMPL", scale: 4, aux: "SymValAndOff", typ: "Flags", symEffect: "Read"},
+ {name: "CMPLconstloadidx1", argLength: 3, reg: gp1flagsLoad, asm: "CMPL", scale: 1, commutative: true, aux: "SymValAndOff", typ: "Flags", symEffect: "Read"},
+ {name: "CMPWconstloadidx2", argLength: 3, reg: gp1flagsLoad, asm: "CMPW", scale: 2, aux: "SymValAndOff", typ: "Flags", symEffect: "Read"},
+ {name: "CMPWconstloadidx1", argLength: 3, reg: gp1flagsLoad, asm: "CMPW", scale: 1, commutative: true, aux: "SymValAndOff", typ: "Flags", symEffect: "Read"},
+ {name: "CMPBconstloadidx1", argLength: 3, reg: gp1flagsLoad, asm: "CMPB", scale: 1, commutative: true, aux: "SymValAndOff", typ: "Flags", symEffect: "Read"},
+
+ {name: "UCOMISS", argLength: 2, reg: fp2flags, asm: "UCOMISS", typ: "Flags"}, // arg0 compare to arg1, f32
+ {name: "UCOMISD", argLength: 2, reg: fp2flags, asm: "UCOMISD", typ: "Flags"}, // arg0 compare to arg1, f64
+
+ {name: "BTL", argLength: 2, reg: gp2flags, asm: "BTL", typ: "Flags"}, // test whether bit arg0%32 in arg1 is set
+ {name: "BTQ", argLength: 2, reg: gp2flags, asm: "BTQ", typ: "Flags"}, // test whether bit arg0%64 in arg1 is set
+ {name: "BTCL", argLength: 2, reg: gp21, asm: "BTCL", resultInArg0: true, clobberFlags: true}, // complement bit arg1%32 in arg0
+ {name: "BTCQ", argLength: 2, reg: gp21, asm: "BTCQ", resultInArg0: true, clobberFlags: true}, // complement bit arg1%64 in arg0
+ {name: "BTRL", argLength: 2, reg: gp21, asm: "BTRL", resultInArg0: true, clobberFlags: true}, // reset bit arg1%32 in arg0
+ {name: "BTRQ", argLength: 2, reg: gp21, asm: "BTRQ", resultInArg0: true, clobberFlags: true}, // reset bit arg1%64 in arg0
+ {name: "BTSL", argLength: 2, reg: gp21, asm: "BTSL", resultInArg0: true, clobberFlags: true}, // set bit arg1%32 in arg0
+ {name: "BTSQ", argLength: 2, reg: gp21, asm: "BTSQ", resultInArg0: true, clobberFlags: true}, // set bit arg1%64 in arg0
+ {name: "BTLconst", argLength: 1, reg: gp1flags, asm: "BTL", typ: "Flags", aux: "Int8"}, // test whether bit auxint in arg0 is set, 0 <= auxint < 32
+ {name: "BTQconst", argLength: 1, reg: gp1flags, asm: "BTQ", typ: "Flags", aux: "Int8"}, // test whether bit auxint in arg0 is set, 0 <= auxint < 64
+ {name: "BTCLconst", argLength: 1, reg: gp11, asm: "BTCL", resultInArg0: true, clobberFlags: true, aux: "Int8"}, // complement bit auxint in arg0, 0 <= auxint < 32
+ {name: "BTCQconst", argLength: 1, reg: gp11, asm: "BTCQ", resultInArg0: true, clobberFlags: true, aux: "Int8"}, // complement bit auxint in arg0, 0 <= auxint < 64
+ {name: "BTRLconst", argLength: 1, reg: gp11, asm: "BTRL", resultInArg0: true, clobberFlags: true, aux: "Int8"}, // reset bit auxint in arg0, 0 <= auxint < 32
+ {name: "BTRQconst", argLength: 1, reg: gp11, asm: "BTRQ", resultInArg0: true, clobberFlags: true, aux: "Int8"}, // reset bit auxint in arg0, 0 <= auxint < 64
+ {name: "BTSLconst", argLength: 1, reg: gp11, asm: "BTSL", resultInArg0: true, clobberFlags: true, aux: "Int8"}, // set bit auxint in arg0, 0 <= auxint < 32
+ {name: "BTSQconst", argLength: 1, reg: gp11, asm: "BTSQ", resultInArg0: true, clobberFlags: true, aux: "Int8"}, // set bit auxint in arg0, 0 <= auxint < 64
+
+ {name: "TESTQ", argLength: 2, reg: gp2flags, commutative: true, asm: "TESTQ", typ: "Flags"}, // (arg0 & arg1) compare to 0
+ {name: "TESTL", argLength: 2, reg: gp2flags, commutative: true, asm: "TESTL", typ: "Flags"}, // (arg0 & arg1) compare to 0
+ {name: "TESTW", argLength: 2, reg: gp2flags, commutative: true, asm: "TESTW", typ: "Flags"}, // (arg0 & arg1) compare to 0
+ {name: "TESTB", argLength: 2, reg: gp2flags, commutative: true, asm: "TESTB", typ: "Flags"}, // (arg0 & arg1) compare to 0
+ {name: "TESTQconst", argLength: 1, reg: gp1flags, asm: "TESTQ", typ: "Flags", aux: "Int32"}, // (arg0 & auxint) compare to 0
+ {name: "TESTLconst", argLength: 1, reg: gp1flags, asm: "TESTL", typ: "Flags", aux: "Int32"}, // (arg0 & auxint) compare to 0
+ {name: "TESTWconst", argLength: 1, reg: gp1flags, asm: "TESTW", typ: "Flags", aux: "Int16"}, // (arg0 & auxint) compare to 0
+ {name: "TESTBconst", argLength: 1, reg: gp1flags, asm: "TESTB", typ: "Flags", aux: "Int8"}, // (arg0 & auxint) compare to 0
+
+ {name: "SHLQ", argLength: 2, reg: gp21shift, asm: "SHLQ", resultInArg0: true, clobberFlags: true}, // arg0 << arg1, shift amount is mod 64
+ {name: "SHLL", argLength: 2, reg: gp21shift, asm: "SHLL", resultInArg0: true, clobberFlags: true}, // arg0 << arg1, shift amount is mod 32
+ {name: "SHLQconst", argLength: 1, reg: gp11, asm: "SHLQ", aux: "Int8", resultInArg0: true, clobberFlags: true}, // arg0 << auxint, shift amount 0-63
+ {name: "SHLLconst", argLength: 1, reg: gp11, asm: "SHLL", aux: "Int8", resultInArg0: true, clobberFlags: true}, // arg0 << auxint, shift amount 0-31
+ // Note: x86 is weird, the 16 and 8 byte shifts still use all 5 bits of shift amount!
+
+ {name: "SHRQ", argLength: 2, reg: gp21shift, asm: "SHRQ", resultInArg0: true, clobberFlags: true}, // unsigned arg0 >> arg1, shift amount is mod 64
+ {name: "SHRL", argLength: 2, reg: gp21shift, asm: "SHRL", resultInArg0: true, clobberFlags: true}, // unsigned uint32(arg0) >> arg1, shift amount is mod 32
+ {name: "SHRW", argLength: 2, reg: gp21shift, asm: "SHRW", resultInArg0: true, clobberFlags: true}, // unsigned uint16(arg0) >> arg1, shift amount is mod 32
+ {name: "SHRB", argLength: 2, reg: gp21shift, asm: "SHRB", resultInArg0: true, clobberFlags: true}, // unsigned uint8(arg0) >> arg1, shift amount is mod 32
+ {name: "SHRQconst", argLength: 1, reg: gp11, asm: "SHRQ", aux: "Int8", resultInArg0: true, clobberFlags: true}, // unsigned arg0 >> auxint, shift amount 0-63
+ {name: "SHRLconst", argLength: 1, reg: gp11, asm: "SHRL", aux: "Int8", resultInArg0: true, clobberFlags: true}, // unsigned uint32(arg0) >> auxint, shift amount 0-31
+ {name: "SHRWconst", argLength: 1, reg: gp11, asm: "SHRW", aux: "Int8", resultInArg0: true, clobberFlags: true}, // unsigned uint16(arg0) >> auxint, shift amount 0-15
+ {name: "SHRBconst", argLength: 1, reg: gp11, asm: "SHRB", aux: "Int8", resultInArg0: true, clobberFlags: true}, // unsigned uint8(arg0) >> auxint, shift amount 0-7
+
+ {name: "SARQ", argLength: 2, reg: gp21shift, asm: "SARQ", resultInArg0: true, clobberFlags: true}, // signed arg0 >> arg1, shift amount is mod 64
+ {name: "SARL", argLength: 2, reg: gp21shift, asm: "SARL", resultInArg0: true, clobberFlags: true}, // signed int32(arg0) >> arg1, shift amount is mod 32
+ {name: "SARW", argLength: 2, reg: gp21shift, asm: "SARW", resultInArg0: true, clobberFlags: true}, // signed int16(arg0) >> arg1, shift amount is mod 32
+ {name: "SARB", argLength: 2, reg: gp21shift, asm: "SARB", resultInArg0: true, clobberFlags: true}, // signed int8(arg0) >> arg1, shift amount is mod 32
+ {name: "SARQconst", argLength: 1, reg: gp11, asm: "SARQ", aux: "Int8", resultInArg0: true, clobberFlags: true}, // signed arg0 >> auxint, shift amount 0-63
+ {name: "SARLconst", argLength: 1, reg: gp11, asm: "SARL", aux: "Int8", resultInArg0: true, clobberFlags: true}, // signed int32(arg0) >> auxint, shift amount 0-31
+ {name: "SARWconst", argLength: 1, reg: gp11, asm: "SARW", aux: "Int8", resultInArg0: true, clobberFlags: true}, // signed int16(arg0) >> auxint, shift amount 0-15
+ {name: "SARBconst", argLength: 1, reg: gp11, asm: "SARB", aux: "Int8", resultInArg0: true, clobberFlags: true}, // signed int8(arg0) >> auxint, shift amount 0-7
+
+ {name: "SHRDQ", argLength: 3, reg: gp31shift, asm: "SHRQ", resultInArg0: true, clobberFlags: true}, // unsigned arg0 >> arg2, shifting in bits from arg1 (==(arg1<<64+arg0)>>arg2, keeping low 64 bits), shift amount is mod 64
+ {name: "SHLDQ", argLength: 3, reg: gp31shift, asm: "SHLQ", resultInArg0: true, clobberFlags: true}, // unsigned arg0 << arg2, shifting in bits from arg1 (==(arg0<<64+arg1)<<arg2, keeping high 64 bits), shift amount is mod 64
+
+ {name: "ROLQ", argLength: 2, reg: gp21shift, asm: "ROLQ", resultInArg0: true, clobberFlags: true}, // arg0 rotate left arg1 bits.
+ {name: "ROLL", argLength: 2, reg: gp21shift, asm: "ROLL", resultInArg0: true, clobberFlags: true}, // arg0 rotate left arg1 bits.
+ {name: "ROLW", argLength: 2, reg: gp21shift, asm: "ROLW", resultInArg0: true, clobberFlags: true}, // arg0 rotate left arg1 bits.
+ {name: "ROLB", argLength: 2, reg: gp21shift, asm: "ROLB", resultInArg0: true, clobberFlags: true}, // arg0 rotate left arg1 bits.
+ {name: "RORQ", argLength: 2, reg: gp21shift, asm: "RORQ", resultInArg0: true, clobberFlags: true}, // arg0 rotate right arg1 bits.
+ {name: "RORL", argLength: 2, reg: gp21shift, asm: "RORL", resultInArg0: true, clobberFlags: true}, // arg0 rotate right arg1 bits.
+ {name: "RORW", argLength: 2, reg: gp21shift, asm: "RORW", resultInArg0: true, clobberFlags: true}, // arg0 rotate right arg1 bits.
+ {name: "RORB", argLength: 2, reg: gp21shift, asm: "RORB", resultInArg0: true, clobberFlags: true}, // arg0 rotate right arg1 bits.
+ {name: "ROLQconst", argLength: 1, reg: gp11, asm: "ROLQ", aux: "Int8", resultInArg0: true, clobberFlags: true}, // arg0 rotate left auxint, rotate amount 0-63
+ {name: "ROLLconst", argLength: 1, reg: gp11, asm: "ROLL", aux: "Int8", resultInArg0: true, clobberFlags: true}, // arg0 rotate left auxint, rotate amount 0-31
+ {name: "ROLWconst", argLength: 1, reg: gp11, asm: "ROLW", aux: "Int8", resultInArg0: true, clobberFlags: true}, // arg0 rotate left auxint, rotate amount 0-15
+ {name: "ROLBconst", argLength: 1, reg: gp11, asm: "ROLB", aux: "Int8", resultInArg0: true, clobberFlags: true}, // arg0 rotate left auxint, rotate amount 0-7
+
+ {name: "ADDLload", argLength: 3, reg: gp21load, asm: "ADDL", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 + tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
+ {name: "ADDQload", argLength: 3, reg: gp21load, asm: "ADDQ", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 + tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
+ {name: "SUBQload", argLength: 3, reg: gp21load, asm: "SUBQ", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 - tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
+ {name: "SUBLload", argLength: 3, reg: gp21load, asm: "SUBL", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 - tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
+ {name: "ANDLload", argLength: 3, reg: gp21load, asm: "ANDL", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 & tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
+ {name: "ANDQload", argLength: 3, reg: gp21load, asm: "ANDQ", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 & tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
+ {name: "ORQload", argLength: 3, reg: gp21load, asm: "ORQ", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 | tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
+ {name: "ORLload", argLength: 3, reg: gp21load, asm: "ORL", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 | tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
+ {name: "XORQload", argLength: 3, reg: gp21load, asm: "XORQ", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 ^ tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
+ {name: "XORLload", argLength: 3, reg: gp21load, asm: "XORL", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 ^ tmp, tmp loaded from arg1+auxint+aux, arg2 = mem
+
+ {name: "ADDLloadidx1", argLength: 4, reg: gp21loadidx, asm: "ADDL", scale: 1, aux: "SymOff", resultInArg0: true, clobberFlags: true, symEffect: "Read"}, // arg0 + tmp, tmp loaded from arg1+ arg2+auxint+aux, arg3 = mem
+ {name: "ADDLloadidx4", argLength: 4, reg: gp21loadidx, asm: "ADDL", scale: 4, aux: "SymOff", resultInArg0: true, clobberFlags: true, symEffect: "Read"}, // arg0 + tmp, tmp loaded from arg1+4*arg2+auxint+aux, arg3 = mem
+ {name: "ADDLloadidx8", argLength: 4, reg: gp21loadidx, asm: "ADDL", scale: 8, aux: "SymOff", resultInArg0: true, clobberFlags: true, symEffect: "Read"}, // arg0 + tmp, tmp loaded from arg1+8*arg2+auxint+aux, arg3 = mem
+ {name: "ADDQloadidx1", argLength: 4, reg: gp21loadidx, asm: "ADDQ", scale: 1, aux: "SymOff", resultInArg0: true, clobberFlags: true, symEffect: "Read"}, // arg0 + tmp, tmp loaded from arg1+ arg2+auxint+aux, arg3 = mem
+ {name: "ADDQloadidx8", argLength: 4, reg: gp21loadidx, asm: "ADDQ", scale: 8, aux: "SymOff", resultInArg0: true, clobberFlags: true, symEffect: "Read"}, // arg0 + tmp, tmp loaded from arg1+8*arg2+auxint+aux, arg3 = mem
+ {name: "SUBLloadidx1", argLength: 4, reg: gp21loadidx, asm: "SUBL", scale: 1, aux: "SymOff", resultInArg0: true, clobberFlags: true, symEffect: "Read"}, // arg0 - tmp, tmp loaded from arg1+ arg2+auxint+aux, arg3 = mem
+ {name: "SUBLloadidx4", argLength: 4, reg: gp21loadidx, asm: "SUBL", scale: 4, aux: "SymOff", resultInArg0: true, clobberFlags: true, symEffect: "Read"}, // arg0 - tmp, tmp loaded from arg1+4*arg2+auxint+aux, arg3 = mem
+ {name: "SUBLloadidx8", argLength: 4, reg: gp21loadidx, asm: "SUBL", scale: 8, aux: "SymOff", resultInArg0: true, clobberFlags: true, symEffect: "Read"}, // arg0 - tmp, tmp loaded from arg1+8*arg2+auxint+aux, arg3 = mem
+ {name: "SUBQloadidx1", argLength: 4, reg: gp21loadidx, asm: "SUBQ", scale: 1, aux: "SymOff", resultInArg0: true, clobberFlags: true, symEffect: "Read"}, // arg0 - tmp, tmp loaded from arg1+ arg2+auxint+aux, arg3 = mem
+ {name: "SUBQloadidx8", argLength: 4, reg: gp21loadidx, asm: "SUBQ", scale: 8, aux: "SymOff", resultInArg0: true, clobberFlags: true, symEffect: "Read"}, // arg0 - tmp, tmp loaded from arg1+8*arg2+auxint+aux, arg3 = mem
+ {name: "ANDLloadidx1", argLength: 4, reg: gp21loadidx, asm: "ANDL", scale: 1, aux: "SymOff", resultInArg0: true, clobberFlags: true, symEffect: "Read"}, // arg0 & tmp, tmp loaded from arg1+ arg2+auxint+aux, arg3 = mem
+ {name: "ANDLloadidx4", argLength: 4, reg: gp21loadidx, asm: "ANDL", scale: 4, aux: "SymOff", resultInArg0: true, clobberFlags: true, symEffect: "Read"}, // arg0 & tmp, tmp loaded from arg1+4*arg2+auxint+aux, arg3 = mem
+ {name: "ANDLloadidx8", argLength: 4, reg: gp21loadidx, asm: "ANDL", scale: 8, aux: "SymOff", resultInArg0: true, clobberFlags: true, symEffect: "Read"}, // arg0 & tmp, tmp loaded from arg1+8*arg2+auxint+aux, arg3 = mem
+ {name: "ANDQloadidx1", argLength: 4, reg: gp21loadidx, asm: "ANDQ", scale: 1, aux: "SymOff", resultInArg0: true, clobberFlags: true, symEffect: "Read"}, // arg0 & tmp, tmp loaded from arg1+ arg2+auxint+aux, arg3 = mem
+ {name: "ANDQloadidx8", argLength: 4, reg: gp21loadidx, asm: "ANDQ", scale: 8, aux: "SymOff", resultInArg0: true, clobberFlags: true, symEffect: "Read"}, // arg0 & tmp, tmp loaded from arg1+8*arg2+auxint+aux, arg3 = mem
+ {name: "ORLloadidx1", argLength: 4, reg: gp21loadidx, asm: "ORL", scale: 1, aux: "SymOff", resultInArg0: true, clobberFlags: true, symEffect: "Read"}, // arg0 | tmp, tmp loaded from arg1+ arg2+auxint+aux, arg3 = mem
+ {name: "ORLloadidx4", argLength: 4, reg: gp21loadidx, asm: "ORL", scale: 4, aux: "SymOff", resultInArg0: true, clobberFlags: true, symEffect: "Read"}, // arg0 | tmp, tmp loaded from arg1+4*arg2+auxint+aux, arg3 = mem
+ {name: "ORLloadidx8", argLength: 4, reg: gp21loadidx, asm: "ORL", scale: 8, aux: "SymOff", resultInArg0: true, clobberFlags: true, symEffect: "Read"}, // arg0 | tmp, tmp loaded from arg1+8*arg2+auxint+aux, arg3 = mem
+ {name: "ORQloadidx1", argLength: 4, reg: gp21loadidx, asm: "ORQ", scale: 1, aux: "SymOff", resultInArg0: true, clobberFlags: true, symEffect: "Read"}, // arg0 | tmp, tmp loaded from arg1+ arg2+auxint+aux, arg3 = mem
+ {name: "ORQloadidx8", argLength: 4, reg: gp21loadidx, asm: "ORQ", scale: 8, aux: "SymOff", resultInArg0: true, clobberFlags: true, symEffect: "Read"}, // arg0 | tmp, tmp loaded from arg1+8*arg2+auxint+aux, arg3 = mem
+ {name: "XORLloadidx1", argLength: 4, reg: gp21loadidx, asm: "XORL", scale: 1, aux: "SymOff", resultInArg0: true, clobberFlags: true, symEffect: "Read"}, // arg0 ^ tmp, tmp loaded from arg1+ arg2+auxint+aux, arg3 = mem
+ {name: "XORLloadidx4", argLength: 4, reg: gp21loadidx, asm: "XORL", scale: 4, aux: "SymOff", resultInArg0: true, clobberFlags: true, symEffect: "Read"}, // arg0 ^ tmp, tmp loaded from arg1+4*arg2+auxint+aux, arg3 = mem
+ {name: "XORLloadidx8", argLength: 4, reg: gp21loadidx, asm: "XORL", scale: 8, aux: "SymOff", resultInArg0: true, clobberFlags: true, symEffect: "Read"}, // arg0 ^ tmp, tmp loaded from arg1+8*arg2+auxint+aux, arg3 = mem
+ {name: "XORQloadidx1", argLength: 4, reg: gp21loadidx, asm: "XORQ", scale: 1, aux: "SymOff", resultInArg0: true, clobberFlags: true, symEffect: "Read"}, // arg0 ^ tmp, tmp loaded from arg1+ arg2+auxint+aux, arg3 = mem
+ {name: "XORQloadidx8", argLength: 4, reg: gp21loadidx, asm: "XORQ", scale: 8, aux: "SymOff", resultInArg0: true, clobberFlags: true, symEffect: "Read"}, // arg0 ^ tmp, tmp loaded from arg1+8*arg2+auxint+aux, arg3 = mem
+
+ // direct binary-op on memory (read-modify-write)
+ {name: "ADDQmodify", argLength: 3, reg: gpstore, asm: "ADDQ", aux: "SymOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // *(arg0+auxint+aux) += arg1, arg2=mem
+ {name: "SUBQmodify", argLength: 3, reg: gpstore, asm: "SUBQ", aux: "SymOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // *(arg0+auxint+aux) -= arg1, arg2=mem
+ {name: "ANDQmodify", argLength: 3, reg: gpstore, asm: "ANDQ", aux: "SymOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // *(arg0+auxint+aux) &= arg1, arg2=mem
+ {name: "ORQmodify", argLength: 3, reg: gpstore, asm: "ORQ", aux: "SymOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // *(arg0+auxint+aux) |= arg1, arg2=mem
+ {name: "XORQmodify", argLength: 3, reg: gpstore, asm: "XORQ", aux: "SymOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // *(arg0+auxint+aux) ^= arg1, arg2=mem
+ {name: "ADDLmodify", argLength: 3, reg: gpstore, asm: "ADDL", aux: "SymOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // *(arg0+auxint+aux) += arg1, arg2=mem
+ {name: "SUBLmodify", argLength: 3, reg: gpstore, asm: "SUBL", aux: "SymOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // *(arg0+auxint+aux) -= arg1, arg2=mem
+ {name: "ANDLmodify", argLength: 3, reg: gpstore, asm: "ANDL", aux: "SymOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // *(arg0+auxint+aux) &= arg1, arg2=mem
+ {name: "ORLmodify", argLength: 3, reg: gpstore, asm: "ORL", aux: "SymOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // *(arg0+auxint+aux) |= arg1, arg2=mem
+ {name: "XORLmodify", argLength: 3, reg: gpstore, asm: "XORL", aux: "SymOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, symEffect: "Read,Write"}, // *(arg0+auxint+aux) ^= arg1, arg2=mem
+
+ {name: "ADDQmodifyidx1", argLength: 4, reg: gpstoreidx, asm: "ADDQ", scale: 1, aux: "SymOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+1*arg1+auxint+aux) += arg2, arg3=mem
+ {name: "ADDQmodifyidx8", argLength: 4, reg: gpstoreidx, asm: "ADDQ", scale: 8, aux: "SymOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+8*arg1+auxint+aux) += arg2, arg3=mem
+ {name: "SUBQmodifyidx1", argLength: 4, reg: gpstoreidx, asm: "SUBQ", scale: 1, aux: "SymOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+1*arg1+auxint+aux) -= arg2, arg3=mem
+ {name: "SUBQmodifyidx8", argLength: 4, reg: gpstoreidx, asm: "SUBQ", scale: 8, aux: "SymOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+8*arg1+auxint+aux) -= arg2, arg3=mem
+ {name: "ANDQmodifyidx1", argLength: 4, reg: gpstoreidx, asm: "ANDQ", scale: 1, aux: "SymOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+1*arg1+auxint+aux) &= arg2, arg3=mem
+ {name: "ANDQmodifyidx8", argLength: 4, reg: gpstoreidx, asm: "ANDQ", scale: 8, aux: "SymOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+8*arg1+auxint+aux) &= arg2, arg3=mem
+ {name: "ORQmodifyidx1", argLength: 4, reg: gpstoreidx, asm: "ORQ", scale: 1, aux: "SymOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+1*arg1+auxint+aux) |= arg2, arg3=mem
+ {name: "ORQmodifyidx8", argLength: 4, reg: gpstoreidx, asm: "ORQ", scale: 8, aux: "SymOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+8*arg1+auxint+aux) |= arg2, arg3=mem
+ {name: "XORQmodifyidx1", argLength: 4, reg: gpstoreidx, asm: "XORQ", scale: 1, aux: "SymOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+1*arg1+auxint+aux) ^= arg2, arg3=mem
+ {name: "XORQmodifyidx8", argLength: 4, reg: gpstoreidx, asm: "XORQ", scale: 8, aux: "SymOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+8*arg1+auxint+aux) ^= arg2, arg3=mem
+ {name: "ADDLmodifyidx1", argLength: 4, reg: gpstoreidx, asm: "ADDL", scale: 1, aux: "SymOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+1*arg1+auxint+aux) += arg2, arg3=mem
+ {name: "ADDLmodifyidx4", argLength: 4, reg: gpstoreidx, asm: "ADDL", scale: 4, aux: "SymOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+4*arg1+auxint+aux) += arg2, arg3=mem
+ {name: "ADDLmodifyidx8", argLength: 4, reg: gpstoreidx, asm: "ADDL", scale: 8, aux: "SymOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+8*arg1+auxint+aux) += arg2, arg3=mem
+ {name: "SUBLmodifyidx1", argLength: 4, reg: gpstoreidx, asm: "SUBL", scale: 1, aux: "SymOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+1*arg1+auxint+aux) -= arg2, arg3=mem
+ {name: "SUBLmodifyidx4", argLength: 4, reg: gpstoreidx, asm: "SUBL", scale: 4, aux: "SymOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+4*arg1+auxint+aux) -= arg2, arg3=mem
+ {name: "SUBLmodifyidx8", argLength: 4, reg: gpstoreidx, asm: "SUBL", scale: 8, aux: "SymOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+8*arg1+auxint+aux) -= arg2, arg3=mem
+ {name: "ANDLmodifyidx1", argLength: 4, reg: gpstoreidx, asm: "ANDL", scale: 1, aux: "SymOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+1*arg1+auxint+aux) &= arg2, arg3=mem
+ {name: "ANDLmodifyidx4", argLength: 4, reg: gpstoreidx, asm: "ANDL", scale: 4, aux: "SymOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+4*arg1+auxint+aux) &= arg2, arg3=mem
+ {name: "ANDLmodifyidx8", argLength: 4, reg: gpstoreidx, asm: "ANDL", scale: 8, aux: "SymOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+8*arg1+auxint+aux) &= arg2, arg3=mem
+ {name: "ORLmodifyidx1", argLength: 4, reg: gpstoreidx, asm: "ORL", scale: 1, aux: "SymOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+1*arg1+auxint+aux) |= arg2, arg3=mem
+ {name: "ORLmodifyidx4", argLength: 4, reg: gpstoreidx, asm: "ORL", scale: 4, aux: "SymOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+4*arg1+auxint+aux) |= arg2, arg3=mem
+ {name: "ORLmodifyidx8", argLength: 4, reg: gpstoreidx, asm: "ORL", scale: 8, aux: "SymOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+8*arg1+auxint+aux) |= arg2, arg3=mem
+ {name: "XORLmodifyidx1", argLength: 4, reg: gpstoreidx, asm: "XORL", scale: 1, aux: "SymOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+1*arg1+auxint+aux) ^= arg2, arg3=mem
+ {name: "XORLmodifyidx4", argLength: 4, reg: gpstoreidx, asm: "XORL", scale: 4, aux: "SymOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+4*arg1+auxint+aux) ^= arg2, arg3=mem
+ {name: "XORLmodifyidx8", argLength: 4, reg: gpstoreidx, asm: "XORL", scale: 8, aux: "SymOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+8*arg1+auxint+aux) ^= arg2, arg3=mem
+
+ {name: "ADDQconstmodifyidx1", argLength: 3, reg: gpstoreconstidx, asm: "ADDQ", scale: 1, aux: "SymValAndOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+1*arg1+ValAndOff(AuxInt).Off()+aux) += ValAndOff(AuxInt).Val(), arg2=mem
+ {name: "ADDQconstmodifyidx8", argLength: 3, reg: gpstoreconstidx, asm: "ADDQ", scale: 8, aux: "SymValAndOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+8*arg1+ValAndOff(AuxInt).Off()+aux) += ValAndOff(AuxInt).Val(), arg2=mem
+ {name: "ANDQconstmodifyidx1", argLength: 3, reg: gpstoreconstidx, asm: "ANDQ", scale: 1, aux: "SymValAndOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+1*arg1+ValAndOff(AuxInt).Off()+aux) &= ValAndOff(AuxInt).Val(), arg2=mem
+ {name: "ANDQconstmodifyidx8", argLength: 3, reg: gpstoreconstidx, asm: "ANDQ", scale: 8, aux: "SymValAndOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+8*arg1+ValAndOff(AuxInt).Off()+aux) &= ValAndOff(AuxInt).Val(), arg2=mem
+ {name: "ORQconstmodifyidx1", argLength: 3, reg: gpstoreconstidx, asm: "ORQ", scale: 1, aux: "SymValAndOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+1*arg1+ValAndOff(AuxInt).Off()+aux) |= ValAndOff(AuxInt).Val(), arg2=mem
+ {name: "ORQconstmodifyidx8", argLength: 3, reg: gpstoreconstidx, asm: "ORQ", scale: 8, aux: "SymValAndOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+8*arg1+ValAndOff(AuxInt).Off()+aux) |= ValAndOff(AuxInt).Val(), arg2=mem
+ {name: "XORQconstmodifyidx1", argLength: 3, reg: gpstoreconstidx, asm: "XORQ", scale: 1, aux: "SymValAndOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+1*arg1+ValAndOff(AuxInt).Off()+aux) ^= ValAndOff(AuxInt).Val(), arg2=mem
+ {name: "XORQconstmodifyidx8", argLength: 3, reg: gpstoreconstidx, asm: "XORQ", scale: 8, aux: "SymValAndOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+8*arg1+ValAndOff(AuxInt).Off()+aux) ^= ValAndOff(AuxInt).Val(), arg2=mem
+ {name: "ADDLconstmodifyidx1", argLength: 3, reg: gpstoreconstidx, asm: "ADDL", scale: 1, aux: "SymValAndOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+1*arg1+ValAndOff(AuxInt).Off()+aux) += ValAndOff(AuxInt).Val(), arg2=mem
+ {name: "ADDLconstmodifyidx4", argLength: 3, reg: gpstoreconstidx, asm: "ADDL", scale: 4, aux: "SymValAndOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+4*arg1+ValAndOff(AuxInt).Off()+aux) += ValAndOff(AuxInt).Val(), arg2=mem
+ {name: "ADDLconstmodifyidx8", argLength: 3, reg: gpstoreconstidx, asm: "ADDL", scale: 8, aux: "SymValAndOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+8*arg1+ValAndOff(AuxInt).Off()+aux) += ValAndOff(AuxInt).Val(), arg2=mem
+ {name: "ANDLconstmodifyidx1", argLength: 3, reg: gpstoreconstidx, asm: "ANDL", scale: 1, aux: "SymValAndOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+1*arg1+ValAndOff(AuxInt).Off()+aux) &= ValAndOff(AuxInt).Val(), arg2=mem
+ {name: "ANDLconstmodifyidx4", argLength: 3, reg: gpstoreconstidx, asm: "ANDL", scale: 4, aux: "SymValAndOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+4*arg1+ValAndOff(AuxInt).Off()+aux) &= ValAndOff(AuxInt).Val(), arg2=mem
+ {name: "ANDLconstmodifyidx8", argLength: 3, reg: gpstoreconstidx, asm: "ANDL", scale: 8, aux: "SymValAndOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+8*arg1+ValAndOff(AuxInt).Off()+aux) &= ValAndOff(AuxInt).Val(), arg2=mem
+ {name: "ORLconstmodifyidx1", argLength: 3, reg: gpstoreconstidx, asm: "ORL", scale: 1, aux: "SymValAndOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+1*arg1+ValAndOff(AuxInt).Off()+aux) |= ValAndOff(AuxInt).Val(), arg2=mem
+ {name: "ORLconstmodifyidx4", argLength: 3, reg: gpstoreconstidx, asm: "ORL", scale: 4, aux: "SymValAndOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+4*arg1+ValAndOff(AuxInt).Off()+aux) |= ValAndOff(AuxInt).Val(), arg2=mem
+ {name: "ORLconstmodifyidx8", argLength: 3, reg: gpstoreconstidx, asm: "ORL", scale: 8, aux: "SymValAndOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+8*arg1+ValAndOff(AuxInt).Off()+aux) |= ValAndOff(AuxInt).Val(), arg2=mem
+ {name: "XORLconstmodifyidx1", argLength: 3, reg: gpstoreconstidx, asm: "XORL", scale: 1, aux: "SymValAndOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+1*arg1+ValAndOff(AuxInt).Off()+aux) ^= ValAndOff(AuxInt).Val(), arg2=mem
+ {name: "XORLconstmodifyidx4", argLength: 3, reg: gpstoreconstidx, asm: "XORL", scale: 4, aux: "SymValAndOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+4*arg1+ValAndOff(AuxInt).Off()+aux) ^= ValAndOff(AuxInt).Val(), arg2=mem
+ {name: "XORLconstmodifyidx8", argLength: 3, reg: gpstoreconstidx, asm: "XORL", scale: 8, aux: "SymValAndOff", typ: "Mem", clobberFlags: true, symEffect: "Read,Write"}, // *(arg0+8*arg1+ValAndOff(AuxInt).Off()+aux) ^= ValAndOff(AuxInt).Val(), arg2=mem
+
+ // unary ops
+ {name: "NEGQ", argLength: 1, reg: gp11, asm: "NEGQ", resultInArg0: true, clobberFlags: true}, // -arg0
+ {name: "NEGL", argLength: 1, reg: gp11, asm: "NEGL", resultInArg0: true, clobberFlags: true}, // -arg0
+
+ {name: "NOTQ", argLength: 1, reg: gp11, asm: "NOTQ", resultInArg0: true}, // ^arg0
+ {name: "NOTL", argLength: 1, reg: gp11, asm: "NOTL", resultInArg0: true}, // ^arg0
+
+ // BS{F,R}Q returns a tuple [result, flags]
+ // result is undefined if the input is zero.
+ // flags are set to "equal" if the input is zero, "not equal" otherwise.
+ // BS{F,R}L returns only the result.
+ {name: "BSFQ", argLength: 1, reg: gp11flags, asm: "BSFQ", typ: "(UInt64,Flags)"}, // # of low-order zeroes in 64-bit arg
+ {name: "BSFL", argLength: 1, reg: gp11, asm: "BSFL", typ: "UInt32", clobberFlags: true}, // # of low-order zeroes in 32-bit arg
+ {name: "BSRQ", argLength: 1, reg: gp11flags, asm: "BSRQ", typ: "(UInt64,Flags)"}, // # of high-order zeroes in 64-bit arg
+ {name: "BSRL", argLength: 1, reg: gp11, asm: "BSRL", typ: "UInt32", clobberFlags: true}, // # of high-order zeroes in 32-bit arg
+
+ // CMOV instructions: 64, 32 and 16-bit sizes.
+ // if arg2 encodes a true result, return arg1, else arg0
+ {name: "CMOVQEQ", argLength: 3, reg: gp21, asm: "CMOVQEQ", resultInArg0: true},
+ {name: "CMOVQNE", argLength: 3, reg: gp21, asm: "CMOVQNE", resultInArg0: true},
+ {name: "CMOVQLT", argLength: 3, reg: gp21, asm: "CMOVQLT", resultInArg0: true},
+ {name: "CMOVQGT", argLength: 3, reg: gp21, asm: "CMOVQGT", resultInArg0: true},
+ {name: "CMOVQLE", argLength: 3, reg: gp21, asm: "CMOVQLE", resultInArg0: true},
+ {name: "CMOVQGE", argLength: 3, reg: gp21, asm: "CMOVQGE", resultInArg0: true},
+ {name: "CMOVQLS", argLength: 3, reg: gp21, asm: "CMOVQLS", resultInArg0: true},
+ {name: "CMOVQHI", argLength: 3, reg: gp21, asm: "CMOVQHI", resultInArg0: true},
+ {name: "CMOVQCC", argLength: 3, reg: gp21, asm: "CMOVQCC", resultInArg0: true},
+ {name: "CMOVQCS", argLength: 3, reg: gp21, asm: "CMOVQCS", resultInArg0: true},
+
+ {name: "CMOVLEQ", argLength: 3, reg: gp21, asm: "CMOVLEQ", resultInArg0: true},
+ {name: "CMOVLNE", argLength: 3, reg: gp21, asm: "CMOVLNE", resultInArg0: true},
+ {name: "CMOVLLT", argLength: 3, reg: gp21, asm: "CMOVLLT", resultInArg0: true},
+ {name: "CMOVLGT", argLength: 3, reg: gp21, asm: "CMOVLGT", resultInArg0: true},
+ {name: "CMOVLLE", argLength: 3, reg: gp21, asm: "CMOVLLE", resultInArg0: true},
+ {name: "CMOVLGE", argLength: 3, reg: gp21, asm: "CMOVLGE", resultInArg0: true},
+ {name: "CMOVLLS", argLength: 3, reg: gp21, asm: "CMOVLLS", resultInArg0: true},
+ {name: "CMOVLHI", argLength: 3, reg: gp21, asm: "CMOVLHI", resultInArg0: true},
+ {name: "CMOVLCC", argLength: 3, reg: gp21, asm: "CMOVLCC", resultInArg0: true},
+ {name: "CMOVLCS", argLength: 3, reg: gp21, asm: "CMOVLCS", resultInArg0: true},
+
+ {name: "CMOVWEQ", argLength: 3, reg: gp21, asm: "CMOVWEQ", resultInArg0: true},
+ {name: "CMOVWNE", argLength: 3, reg: gp21, asm: "CMOVWNE", resultInArg0: true},
+ {name: "CMOVWLT", argLength: 3, reg: gp21, asm: "CMOVWLT", resultInArg0: true},
+ {name: "CMOVWGT", argLength: 3, reg: gp21, asm: "CMOVWGT", resultInArg0: true},
+ {name: "CMOVWLE", argLength: 3, reg: gp21, asm: "CMOVWLE", resultInArg0: true},
+ {name: "CMOVWGE", argLength: 3, reg: gp21, asm: "CMOVWGE", resultInArg0: true},
+ {name: "CMOVWLS", argLength: 3, reg: gp21, asm: "CMOVWLS", resultInArg0: true},
+ {name: "CMOVWHI", argLength: 3, reg: gp21, asm: "CMOVWHI", resultInArg0: true},
+ {name: "CMOVWCC", argLength: 3, reg: gp21, asm: "CMOVWCC", resultInArg0: true},
+ {name: "CMOVWCS", argLength: 3, reg: gp21, asm: "CMOVWCS", resultInArg0: true},
+
+ // CMOV with floating point instructions. We need separate pseudo-op to handle
+ // InvertFlags correctly, and to generate special code that handles NaN (unordered flag).
+ // NOTE: the fact that CMOV*EQF here is marked to generate CMOV*NE is not a bug. See
+ // code generation in amd64/ssa.go.
+ {name: "CMOVQEQF", argLength: 3, reg: gp21pax, asm: "CMOVQNE", resultInArg0: true},
+ {name: "CMOVQNEF", argLength: 3, reg: gp21, asm: "CMOVQNE", resultInArg0: true},
+ {name: "CMOVQGTF", argLength: 3, reg: gp21, asm: "CMOVQHI", resultInArg0: true},
+ {name: "CMOVQGEF", argLength: 3, reg: gp21, asm: "CMOVQCC", resultInArg0: true},
+ {name: "CMOVLEQF", argLength: 3, reg: gp21pax, asm: "CMOVLNE", resultInArg0: true},
+ {name: "CMOVLNEF", argLength: 3, reg: gp21, asm: "CMOVLNE", resultInArg0: true},
+ {name: "CMOVLGTF", argLength: 3, reg: gp21, asm: "CMOVLHI", resultInArg0: true},
+ {name: "CMOVLGEF", argLength: 3, reg: gp21, asm: "CMOVLCC", resultInArg0: true},
+ {name: "CMOVWEQF", argLength: 3, reg: gp21pax, asm: "CMOVWNE", resultInArg0: true},
+ {name: "CMOVWNEF", argLength: 3, reg: gp21, asm: "CMOVWNE", resultInArg0: true},
+ {name: "CMOVWGTF", argLength: 3, reg: gp21, asm: "CMOVWHI", resultInArg0: true},
+ {name: "CMOVWGEF", argLength: 3, reg: gp21, asm: "CMOVWCC", resultInArg0: true},
+
+ {name: "BSWAPQ", argLength: 1, reg: gp11, asm: "BSWAPQ", resultInArg0: true, clobberFlags: true}, // arg0 swap bytes
+ {name: "BSWAPL", argLength: 1, reg: gp11, asm: "BSWAPL", resultInArg0: true, clobberFlags: true}, // arg0 swap bytes
+
+ // POPCNT instructions aren't guaranteed to be on the target platform (they are SSE4).
+ // Any use must be preceded by a successful check of runtime.x86HasPOPCNT.
+ {name: "POPCNTQ", argLength: 1, reg: gp11, asm: "POPCNTQ", clobberFlags: true}, // count number of set bits in arg0
+ {name: "POPCNTL", argLength: 1, reg: gp11, asm: "POPCNTL", clobberFlags: true}, // count number of set bits in arg0
+
+ {name: "SQRTSD", argLength: 1, reg: fp11, asm: "SQRTSD"}, // sqrt(arg0)
+ {name: "SQRTSS", argLength: 1, reg: fp11, asm: "SQRTSS"}, // sqrt(arg0), float32
+
+ // ROUNDSD instruction isn't guaranteed to be on the target platform (it is SSE4.1)
+ // Any use must be preceded by a successful check of runtime.x86HasSSE41.
+ {name: "ROUNDSD", argLength: 1, reg: fp11, aux: "Int8", asm: "ROUNDSD"}, // rounds arg0 depending on auxint, 1 means math.Floor, 2 Ceil, 3 Trunc
+
+ // VFMADD231SD only exists on platforms with the FMA3 instruction set.
+ // Any use must be preceded by a successful check of runtime.support_fma.
+ {name: "VFMADD231SD", argLength: 3, reg: fp31, resultInArg0: true, asm: "VFMADD231SD"},
+
+ {name: "SBBQcarrymask", argLength: 1, reg: flagsgp, asm: "SBBQ"}, // (int64)(-1) if carry is set, 0 if carry is clear.
+ {name: "SBBLcarrymask", argLength: 1, reg: flagsgp, asm: "SBBL"}, // (int32)(-1) if carry is set, 0 if carry is clear.
+ // Note: SBBW and SBBB are subsumed by SBBL
+
+ {name: "SETEQ", argLength: 1, reg: readflags, asm: "SETEQ"}, // extract == condition from arg0
+ {name: "SETNE", argLength: 1, reg: readflags, asm: "SETNE"}, // extract != condition from arg0
+ {name: "SETL", argLength: 1, reg: readflags, asm: "SETLT"}, // extract signed < condition from arg0
+ {name: "SETLE", argLength: 1, reg: readflags, asm: "SETLE"}, // extract signed <= condition from arg0
+ {name: "SETG", argLength: 1, reg: readflags, asm: "SETGT"}, // extract signed > condition from arg0
+ {name: "SETGE", argLength: 1, reg: readflags, asm: "SETGE"}, // extract signed >= condition from arg0
+ {name: "SETB", argLength: 1, reg: readflags, asm: "SETCS"}, // extract unsigned < condition from arg0
+ {name: "SETBE", argLength: 1, reg: readflags, asm: "SETLS"}, // extract unsigned <= condition from arg0
+ {name: "SETA", argLength: 1, reg: readflags, asm: "SETHI"}, // extract unsigned > condition from arg0
+ {name: "SETAE", argLength: 1, reg: readflags, asm: "SETCC"}, // extract unsigned >= condition from arg0
+ {name: "SETO", argLength: 1, reg: readflags, asm: "SETOS"}, // extract if overflow flag is set from arg0
+ // Variants that store result to memory
+ {name: "SETEQstore", argLength: 3, reg: gpstoreconst, asm: "SETEQ", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract == condition from arg1 to arg0+auxint+aux, arg2=mem
+ {name: "SETNEstore", argLength: 3, reg: gpstoreconst, asm: "SETNE", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract != condition from arg1 to arg0+auxint+aux, arg2=mem
+ {name: "SETLstore", argLength: 3, reg: gpstoreconst, asm: "SETLT", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract signed < condition from arg1 to arg0+auxint+aux, arg2=mem
+ {name: "SETLEstore", argLength: 3, reg: gpstoreconst, asm: "SETLE", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract signed <= condition from arg1 to arg0+auxint+aux, arg2=mem
+ {name: "SETGstore", argLength: 3, reg: gpstoreconst, asm: "SETGT", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract signed > condition from arg1 to arg0+auxint+aux, arg2=mem
+ {name: "SETGEstore", argLength: 3, reg: gpstoreconst, asm: "SETGE", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract signed >= condition from arg1 to arg0+auxint+aux, arg2=mem
+ {name: "SETBstore", argLength: 3, reg: gpstoreconst, asm: "SETCS", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract unsigned < condition from arg1 to arg0+auxint+aux, arg2=mem
+ {name: "SETBEstore", argLength: 3, reg: gpstoreconst, asm: "SETLS", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract unsigned <= condition from arg1 to arg0+auxint+aux, arg2=mem
+ {name: "SETAstore", argLength: 3, reg: gpstoreconst, asm: "SETHI", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract unsigned > condition from arg1 to arg0+auxint+aux, arg2=mem
+ {name: "SETAEstore", argLength: 3, reg: gpstoreconst, asm: "SETCC", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // extract unsigned >= condition from arg1 to arg0+auxint+aux, arg2=mem
+ // Need different opcodes for floating point conditions because
+ // any comparison involving a NaN is always FALSE and thus
+ // the patterns for inverting conditions cannot be used.
+ {name: "SETEQF", argLength: 1, reg: flagsgpax, asm: "SETEQ", clobberFlags: true}, // extract == condition from arg0
+ {name: "SETNEF", argLength: 1, reg: flagsgpax, asm: "SETNE", clobberFlags: true}, // extract != condition from arg0
+ {name: "SETORD", argLength: 1, reg: flagsgp, asm: "SETPC"}, // extract "ordered" (No Nan present) condition from arg0
+ {name: "SETNAN", argLength: 1, reg: flagsgp, asm: "SETPS"}, // extract "unordered" (Nan present) condition from arg0
+
+ {name: "SETGF", argLength: 1, reg: flagsgp, asm: "SETHI"}, // extract floating > condition from arg0
+ {name: "SETGEF", argLength: 1, reg: flagsgp, asm: "SETCC"}, // extract floating >= condition from arg0
+
+ {name: "MOVBQSX", argLength: 1, reg: gp11, asm: "MOVBQSX"}, // sign extend arg0 from int8 to int64
+ {name: "MOVBQZX", argLength: 1, reg: gp11, asm: "MOVBLZX"}, // zero extend arg0 from int8 to int64
+ {name: "MOVWQSX", argLength: 1, reg: gp11, asm: "MOVWQSX"}, // sign extend arg0 from int16 to int64
+ {name: "MOVWQZX", argLength: 1, reg: gp11, asm: "MOVWLZX"}, // zero extend arg0 from int16 to int64
+ {name: "MOVLQSX", argLength: 1, reg: gp11, asm: "MOVLQSX"}, // sign extend arg0 from int32 to int64
+ {name: "MOVLQZX", argLength: 1, reg: gp11, asm: "MOVL"}, // zero extend arg0 from int32 to int64
+
+ {name: "MOVLconst", reg: gp01, asm: "MOVL", typ: "UInt32", aux: "Int32", rematerializeable: true}, // 32 low bits of auxint
+ {name: "MOVQconst", reg: gp01, asm: "MOVQ", typ: "UInt64", aux: "Int64", rematerializeable: true}, // auxint
+
+ {name: "CVTTSD2SL", argLength: 1, reg: fpgp, asm: "CVTTSD2SL"}, // convert float64 to int32
+ {name: "CVTTSD2SQ", argLength: 1, reg: fpgp, asm: "CVTTSD2SQ"}, // convert float64 to int64
+ {name: "CVTTSS2SL", argLength: 1, reg: fpgp, asm: "CVTTSS2SL"}, // convert float32 to int32
+ {name: "CVTTSS2SQ", argLength: 1, reg: fpgp, asm: "CVTTSS2SQ"}, // convert float32 to int64
+ {name: "CVTSL2SS", argLength: 1, reg: gpfp, asm: "CVTSL2SS"}, // convert int32 to float32
+ {name: "CVTSL2SD", argLength: 1, reg: gpfp, asm: "CVTSL2SD"}, // convert int32 to float64
+ {name: "CVTSQ2SS", argLength: 1, reg: gpfp, asm: "CVTSQ2SS"}, // convert int64 to float32
+ {name: "CVTSQ2SD", argLength: 1, reg: gpfp, asm: "CVTSQ2SD"}, // convert int64 to float64
+ {name: "CVTSD2SS", argLength: 1, reg: fp11, asm: "CVTSD2SS"}, // convert float64 to float32
+ {name: "CVTSS2SD", argLength: 1, reg: fp11, asm: "CVTSS2SD"}, // convert float32 to float64
+
+ // Move values between int and float registers, with no conversion.
+ // TODO: should we have generic versions of these?
+ {name: "MOVQi2f", argLength: 1, reg: gpfp, typ: "Float64"}, // move 64 bits from int to float reg
+ {name: "MOVQf2i", argLength: 1, reg: fpgp, typ: "UInt64"}, // move 64 bits from float to int reg
+ {name: "MOVLi2f", argLength: 1, reg: gpfp, typ: "Float32"}, // move 32 bits from int to float reg
+ {name: "MOVLf2i", argLength: 1, reg: fpgp, typ: "UInt32"}, // move 32 bits from float to int reg, zero extend
+
+ {name: "PXOR", argLength: 2, reg: fp21, asm: "PXOR", commutative: true, resultInArg0: true}, // exclusive or, applied to X regs for float negation.
+
+ {name: "LEAQ", argLength: 1, reg: gp11sb, asm: "LEAQ", aux: "SymOff", rematerializeable: true, symEffect: "Addr"}, // arg0 + auxint + offset encoded in aux
+ {name: "LEAL", argLength: 1, reg: gp11sb, asm: "LEAL", aux: "SymOff", rematerializeable: true, symEffect: "Addr"}, // arg0 + auxint + offset encoded in aux
+ {name: "LEAW", argLength: 1, reg: gp11sb, asm: "LEAW", aux: "SymOff", rematerializeable: true, symEffect: "Addr"}, // arg0 + auxint + offset encoded in aux
+ {name: "LEAQ1", argLength: 2, reg: gp21sb, asm: "LEAQ", scale: 1, commutative: true, aux: "SymOff", symEffect: "Addr"}, // arg0 + arg1 + auxint + aux
+ {name: "LEAL1", argLength: 2, reg: gp21sb, asm: "LEAL", scale: 1, commutative: true, aux: "SymOff", symEffect: "Addr"}, // arg0 + arg1 + auxint + aux
+ {name: "LEAW1", argLength: 2, reg: gp21sb, asm: "LEAW", scale: 1, commutative: true, aux: "SymOff", symEffect: "Addr"}, // arg0 + arg1 + auxint + aux
+ {name: "LEAQ2", argLength: 2, reg: gp21sb, asm: "LEAQ", scale: 2, aux: "SymOff", symEffect: "Addr"}, // arg0 + 2*arg1 + auxint + aux
+ {name: "LEAL2", argLength: 2, reg: gp21sb, asm: "LEAL", scale: 2, aux: "SymOff", symEffect: "Addr"}, // arg0 + 2*arg1 + auxint + aux
+ {name: "LEAW2", argLength: 2, reg: gp21sb, asm: "LEAW", scale: 2, aux: "SymOff", symEffect: "Addr"}, // arg0 + 2*arg1 + auxint + aux
+ {name: "LEAQ4", argLength: 2, reg: gp21sb, asm: "LEAQ", scale: 4, aux: "SymOff", symEffect: "Addr"}, // arg0 + 4*arg1 + auxint + aux
+ {name: "LEAL4", argLength: 2, reg: gp21sb, asm: "LEAL", scale: 4, aux: "SymOff", symEffect: "Addr"}, // arg0 + 4*arg1 + auxint + aux
+ {name: "LEAW4", argLength: 2, reg: gp21sb, asm: "LEAW", scale: 4, aux: "SymOff", symEffect: "Addr"}, // arg0 + 4*arg1 + auxint + aux
+ {name: "LEAQ8", argLength: 2, reg: gp21sb, asm: "LEAQ", scale: 8, aux: "SymOff", symEffect: "Addr"}, // arg0 + 8*arg1 + auxint + aux
+ {name: "LEAL8", argLength: 2, reg: gp21sb, asm: "LEAL", scale: 8, aux: "SymOff", symEffect: "Addr"}, // arg0 + 8*arg1 + auxint + aux
+ {name: "LEAW8", argLength: 2, reg: gp21sb, asm: "LEAW", scale: 8, aux: "SymOff", symEffect: "Addr"}, // arg0 + 8*arg1 + auxint + aux
+ // Note: LEAx{1,2,4,8} must not have OpSB as either argument.
+
+ // auxint+aux == add auxint and the offset of the symbol in aux (if any) to the effective address
+ {name: "MOVBload", argLength: 2, reg: gpload, asm: "MOVBLZX", aux: "SymOff", typ: "UInt8", faultOnNilArg0: true, symEffect: "Read"}, // load byte from arg0+auxint+aux. arg1=mem. Zero extend.
+ {name: "MOVBQSXload", argLength: 2, reg: gpload, asm: "MOVBQSX", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, // ditto, sign extend to int64
+ {name: "MOVWload", argLength: 2, reg: gpload, asm: "MOVWLZX", aux: "SymOff", typ: "UInt16", faultOnNilArg0: true, symEffect: "Read"}, // load 2 bytes from arg0+auxint+aux. arg1=mem. Zero extend.
+ {name: "MOVWQSXload", argLength: 2, reg: gpload, asm: "MOVWQSX", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, // ditto, sign extend to int64
+ {name: "MOVLload", argLength: 2, reg: gpload, asm: "MOVL", aux: "SymOff", typ: "UInt32", faultOnNilArg0: true, symEffect: "Read"}, // load 4 bytes from arg0+auxint+aux. arg1=mem. Zero extend.
+ {name: "MOVLQSXload", argLength: 2, reg: gpload, asm: "MOVLQSX", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, // ditto, sign extend to int64
+ {name: "MOVQload", argLength: 2, reg: gpload, asm: "MOVQ", aux: "SymOff", typ: "UInt64", faultOnNilArg0: true, symEffect: "Read"}, // load 8 bytes from arg0+auxint+aux. arg1=mem
+ {name: "MOVBstore", argLength: 3, reg: gpstore, asm: "MOVB", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store byte in arg1 to arg0+auxint+aux. arg2=mem
+ {name: "MOVWstore", argLength: 3, reg: gpstore, asm: "MOVW", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 2 bytes in arg1 to arg0+auxint+aux. arg2=mem
+ {name: "MOVLstore", argLength: 3, reg: gpstore, asm: "MOVL", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 4 bytes in arg1 to arg0+auxint+aux. arg2=mem
+ {name: "MOVQstore", argLength: 3, reg: gpstore, asm: "MOVQ", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 8 bytes in arg1 to arg0+auxint+aux. arg2=mem
+ {name: "MOVOload", argLength: 2, reg: fpload, asm: "MOVUPS", aux: "SymOff", typ: "Int128", faultOnNilArg0: true, symEffect: "Read"}, // load 16 bytes from arg0+auxint+aux. arg1=mem
+ {name: "MOVOstore", argLength: 3, reg: fpstore, asm: "MOVUPS", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 16 bytes in arg1 to arg0+auxint+aux. arg2=mem
+
+ // indexed loads/stores
+ {name: "MOVBloadidx1", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVBLZX", scale: 1, aux: "SymOff", typ: "UInt8", symEffect: "Read"}, // load a byte from arg0+arg1+auxint+aux. arg2=mem
+ {name: "MOVWloadidx1", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVWLZX", scale: 1, aux: "SymOff", typ: "UInt16", symEffect: "Read"}, // load 2 bytes from arg0+arg1+auxint+aux. arg2=mem
+ {name: "MOVWloadidx2", argLength: 3, reg: gploadidx, asm: "MOVWLZX", scale: 2, aux: "SymOff", typ: "UInt16", symEffect: "Read"}, // load 2 bytes from arg0+2*arg1+auxint+aux. arg2=mem
+ {name: "MOVLloadidx1", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVL", scale: 1, aux: "SymOff", typ: "UInt32", symEffect: "Read"}, // load 4 bytes from arg0+arg1+auxint+aux. arg2=mem
+ {name: "MOVLloadidx4", argLength: 3, reg: gploadidx, asm: "MOVL", scale: 4, aux: "SymOff", typ: "UInt32", symEffect: "Read"}, // load 4 bytes from arg0+4*arg1+auxint+aux. arg2=mem
+ {name: "MOVLloadidx8", argLength: 3, reg: gploadidx, asm: "MOVL", scale: 8, aux: "SymOff", typ: "UInt32", symEffect: "Read"}, // load 4 bytes from arg0+8*arg1+auxint+aux. arg2=mem
+ {name: "MOVQloadidx1", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVQ", scale: 1, aux: "SymOff", typ: "UInt64", symEffect: "Read"}, // load 8 bytes from arg0+arg1+auxint+aux. arg2=mem
+ {name: "MOVQloadidx8", argLength: 3, reg: gploadidx, asm: "MOVQ", scale: 8, aux: "SymOff", typ: "UInt64", symEffect: "Read"}, // load 8 bytes from arg0+8*arg1+auxint+aux. arg2=mem
+ // TODO: sign-extending indexed loads
+ {name: "MOVBstoreidx1", argLength: 4, reg: gpstoreidx, commutative: true, asm: "MOVB", scale: 1, aux: "SymOff", symEffect: "Write"}, // store byte in arg2 to arg0+arg1+auxint+aux. arg3=mem
+ {name: "MOVWstoreidx1", argLength: 4, reg: gpstoreidx, commutative: true, asm: "MOVW", scale: 1, aux: "SymOff", symEffect: "Write"}, // store 2 bytes in arg2 to arg0+arg1+auxint+aux. arg3=mem
+ {name: "MOVWstoreidx2", argLength: 4, reg: gpstoreidx, asm: "MOVW", scale: 2, aux: "SymOff", symEffect: "Write"}, // store 2 bytes in arg2 to arg0+2*arg1+auxint+aux. arg3=mem
+ {name: "MOVLstoreidx1", argLength: 4, reg: gpstoreidx, commutative: true, asm: "MOVL", scale: 1, aux: "SymOff", symEffect: "Write"}, // store 4 bytes in arg2 to arg0+arg1+auxint+aux. arg3=mem
+ {name: "MOVLstoreidx4", argLength: 4, reg: gpstoreidx, asm: "MOVL", scale: 4, aux: "SymOff", symEffect: "Write"}, // store 4 bytes in arg2 to arg0+4*arg1+auxint+aux. arg3=mem
+ {name: "MOVLstoreidx8", argLength: 4, reg: gpstoreidx, asm: "MOVL", scale: 8, aux: "SymOff", symEffect: "Write"}, // store 4 bytes in arg2 to arg0+8*arg1+auxint+aux. arg3=mem
+ {name: "MOVQstoreidx1", argLength: 4, reg: gpstoreidx, commutative: true, asm: "MOVQ", scale: 1, aux: "SymOff", symEffect: "Write"}, // store 8 bytes in arg2 to arg0+arg1+auxint+aux. arg3=mem
+ {name: "MOVQstoreidx8", argLength: 4, reg: gpstoreidx, asm: "MOVQ", scale: 8, aux: "SymOff", symEffect: "Write"}, // store 8 bytes in arg2 to arg0+8*arg1+auxint+aux. arg3=mem
+ // TODO: add size-mismatched indexed loads, like MOVBstoreidx4.
+
+ // For storeconst ops, the AuxInt field encodes both
+ // the value to store and an address offset of the store.
+ // Cast AuxInt to a ValAndOff to extract Val and Off fields.
+ {name: "MOVBstoreconst", argLength: 2, reg: gpstoreconst, asm: "MOVB", aux: "SymValAndOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store low byte of ValAndOff(AuxInt).Val() to arg0+ValAndOff(AuxInt).Off()+aux. arg1=mem
+ {name: "MOVWstoreconst", argLength: 2, reg: gpstoreconst, asm: "MOVW", aux: "SymValAndOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store low 2 bytes of ...
+ {name: "MOVLstoreconst", argLength: 2, reg: gpstoreconst, asm: "MOVL", aux: "SymValAndOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store low 4 bytes of ...
+ {name: "MOVQstoreconst", argLength: 2, reg: gpstoreconst, asm: "MOVQ", aux: "SymValAndOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 8 bytes of ...
+ {name: "MOVOstoreconst", argLength: 2, reg: gpstoreconst, asm: "MOVUPS", aux: "SymValAndOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 16 bytes of ...
+
+ {name: "MOVBstoreconstidx1", argLength: 3, reg: gpstoreconstidx, commutative: true, asm: "MOVB", scale: 1, aux: "SymValAndOff", typ: "Mem", symEffect: "Write"}, // store low byte of ValAndOff(AuxInt).Val() to arg0+1*arg1+ValAndOff(AuxInt).Off()+aux. arg2=mem
+ {name: "MOVWstoreconstidx1", argLength: 3, reg: gpstoreconstidx, commutative: true, asm: "MOVW", scale: 1, aux: "SymValAndOff", typ: "Mem", symEffect: "Write"}, // store low 2 bytes of ... arg1 ...
+ {name: "MOVWstoreconstidx2", argLength: 3, reg: gpstoreconstidx, asm: "MOVW", scale: 2, aux: "SymValAndOff", typ: "Mem", symEffect: "Write"}, // store low 2 bytes of ... 2*arg1 ...
+ {name: "MOVLstoreconstidx1", argLength: 3, reg: gpstoreconstidx, commutative: true, asm: "MOVL", scale: 1, aux: "SymValAndOff", typ: "Mem", symEffect: "Write"}, // store low 4 bytes of ... arg1 ...
+ {name: "MOVLstoreconstidx4", argLength: 3, reg: gpstoreconstidx, asm: "MOVL", scale: 4, aux: "SymValAndOff", typ: "Mem", symEffect: "Write"}, // store low 4 bytes of ... 4*arg1 ...
+ {name: "MOVQstoreconstidx1", argLength: 3, reg: gpstoreconstidx, commutative: true, asm: "MOVQ", scale: 1, aux: "SymValAndOff", typ: "Mem", symEffect: "Write"}, // store 8 bytes of ... arg1 ...
+ {name: "MOVQstoreconstidx8", argLength: 3, reg: gpstoreconstidx, asm: "MOVQ", scale: 8, aux: "SymValAndOff", typ: "Mem", symEffect: "Write"}, // store 8 bytes of ... 8*arg1 ...
+
+ // arg0 = pointer to start of memory to zero
+ // arg1 = mem
+ // auxint = # of bytes to zero
+ // returns mem
+ {
+ name: "DUFFZERO",
+ aux: "Int64",
+ argLength: 2,
+ reg: regInfo{
+ inputs: []regMask{buildReg("DI")},
+ clobbers: buildReg("DI"),
+ },
+ faultOnNilArg0: true,
+ unsafePoint: true, // FP maintenance around DUFFCOPY can be clobbered by interrupts
+ },
+
+ // arg0 = address of memory to zero
+ // arg1 = # of 8-byte words to zero
+ // arg2 = value to store (will always be zero)
+ // arg3 = mem
+ // returns mem
+ {
+ name: "REPSTOSQ",
+ argLength: 4,
+ reg: regInfo{
+ inputs: []regMask{buildReg("DI"), buildReg("CX"), buildReg("AX")},
+ clobbers: buildReg("DI CX"),
+ },
+ faultOnNilArg0: true,
+ },
+
+ // With a register ABI, the actual register info for these instructions (i.e., what is used in regalloc) is augmented with per-call-site bindings of additional arguments to specific in and out registers.
+ {name: "CALLstatic", argLength: -1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call static function aux.(*obj.LSym). last arg=mem, auxint=argsize, returns mem
+ {name: "CALLtail", argLength: -1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true, tailCall: true}, // tail call static function aux.(*obj.LSym). last arg=mem, auxint=argsize, returns mem
+ {name: "CALLclosure", argLength: -1, reg: regInfo{inputs: []regMask{gpsp, buildReg("DX"), 0}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, last arg=mem, auxint=argsize, returns mem
+ {name: "CALLinter", argLength: -1, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, last arg=mem, auxint=argsize, returns mem
+
+ // arg0 = destination pointer
+ // arg1 = source pointer
+ // arg2 = mem
+ // auxint = # of bytes to copy, must be multiple of 16
+ // returns memory
+ {
+ name: "DUFFCOPY",
+ aux: "Int64",
+ argLength: 3,
+ reg: regInfo{
+ inputs: []regMask{buildReg("DI"), buildReg("SI")},
+ clobbers: buildReg("DI SI X0"), // uses X0 as a temporary
+ },
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ unsafePoint: true, // FP maintenance around DUFFCOPY can be clobbered by interrupts
+ },
+
+ // arg0 = destination pointer
+ // arg1 = source pointer
+ // arg2 = # of 8-byte words to copy
+ // arg3 = mem
+ // returns memory
+ {
+ name: "REPMOVSQ",
+ argLength: 4,
+ reg: regInfo{
+ inputs: []regMask{buildReg("DI"), buildReg("SI"), buildReg("CX")},
+ clobbers: buildReg("DI SI CX"),
+ },
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ },
+
+ // (InvertFlags (CMPQ a b)) == (CMPQ b a)
+ // So if we want (SETL (CMPQ a b)) but we can't do that because a is a constant,
+ // then we do (SETL (InvertFlags (CMPQ b a))) instead.
+ // Rewrites will convert this to (SETG (CMPQ b a)).
+ // InvertFlags is a pseudo-op which can't appear in assembly output.
+ {name: "InvertFlags", argLength: 1}, // reverse direction of arg0
+
+ // Pseudo-ops
+ {name: "LoweredGetG", argLength: 1, reg: gp01}, // arg0=mem
+ // Scheduler ensures LoweredGetClosurePtr occurs only in entry block,
+ // and sorts it to the very beginning of the block to prevent other
+ // use of DX (the closure pointer)
+ {name: "LoweredGetClosurePtr", reg: regInfo{outputs: []regMask{buildReg("DX")}}, zeroWidth: true},
+ // LoweredGetCallerPC evaluates to the PC to which its "caller" will return.
+ // I.e., if f calls g "calls" getcallerpc,
+ // the result should be the PC within f that g will return to.
+ // See runtime/stubs.go for a more detailed discussion.
+ {name: "LoweredGetCallerPC", reg: gp01, rematerializeable: true},
+ // LoweredGetCallerSP returns the SP of the caller of the current function.
+ {name: "LoweredGetCallerSP", reg: gp01, rematerializeable: true},
+ //arg0=ptr,arg1=mem, returns void. Faults if ptr is nil.
+ {name: "LoweredNilCheck", argLength: 2, reg: regInfo{inputs: []regMask{gpsp}}, clobberFlags: true, nilCheck: true, faultOnNilArg0: true},
+ // LoweredWB invokes runtime.gcWriteBarrier. arg0=destptr, arg1=srcptr, arg2=mem, aux=runtime.gcWriteBarrier
+ // It saves all GP registers if necessary, but may clobber others.
+ {name: "LoweredWB", argLength: 3, reg: regInfo{inputs: []regMask{buildReg("DI"), buildReg("AX CX DX BX BP SI R8 R9")}, clobbers: callerSave &^ (gp | g)}, clobberFlags: true, aux: "Sym", symEffect: "None"},
+
+ {name: "LoweredHasCPUFeature", argLength: 0, reg: gp01, rematerializeable: true, typ: "UInt64", aux: "Sym", symEffect: "None"},
+
+ // There are three of these functions so that they can have three different register inputs.
+ // When we check 0 <= c <= cap (A), then 0 <= b <= c (B), then 0 <= a <= b (C), we want the
+ // default registers to match so we don't need to copy registers around unnecessarily.
+ {name: "LoweredPanicBoundsA", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{dx, bx}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in generic.go).
+ {name: "LoweredPanicBoundsB", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{cx, dx}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in generic.go).
+ {name: "LoweredPanicBoundsC", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{ax, cx}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in generic.go).
+
+ // Constant flag values. For any comparison, there are 5 possible
+ // outcomes: the three from the signed total order (<,==,>) and the
+ // three from the unsigned total order. The == cases overlap.
+ // Note: there's a sixth "unordered" outcome for floating-point
+ // comparisons, but we don't use such a beast yet.
+ // These ops are for temporary use by rewrite rules. They
+ // cannot appear in the generated assembly.
+ {name: "FlagEQ"}, // equal
+ {name: "FlagLT_ULT"}, // signed < and unsigned <
+ {name: "FlagLT_UGT"}, // signed < and unsigned >
+ {name: "FlagGT_UGT"}, // signed > and unsigned >
+ {name: "FlagGT_ULT"}, // signed > and unsigned <
+
+ // Atomic loads. These are just normal loads but return <value,memory> tuples
+ // so they can be properly ordered with other loads.
+ // load from arg0+auxint+aux. arg1=mem.
+ {name: "MOVBatomicload", argLength: 2, reg: gpload, asm: "MOVB", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"},
+ {name: "MOVLatomicload", argLength: 2, reg: gpload, asm: "MOVL", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"},
+ {name: "MOVQatomicload", argLength: 2, reg: gpload, asm: "MOVQ", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"},
+
+ // Atomic stores and exchanges. Stores use XCHG to get the right memory ordering semantics.
+ // store arg0 to arg1+auxint+aux, arg2=mem.
+ // These ops return a tuple of <old contents of *(arg1+auxint+aux), memory>.
+ // Note: arg0 and arg1 are backwards compared to MOVLstore (to facilitate resultInArg0)!
+ {name: "XCHGB", argLength: 3, reg: gpstorexchg, asm: "XCHGB", aux: "SymOff", resultInArg0: true, faultOnNilArg1: true, hasSideEffects: true, symEffect: "RdWr"},
+ {name: "XCHGL", argLength: 3, reg: gpstorexchg, asm: "XCHGL", aux: "SymOff", resultInArg0: true, faultOnNilArg1: true, hasSideEffects: true, symEffect: "RdWr"},
+ {name: "XCHGQ", argLength: 3, reg: gpstorexchg, asm: "XCHGQ", aux: "SymOff", resultInArg0: true, faultOnNilArg1: true, hasSideEffects: true, symEffect: "RdWr"},
+
+ // Atomic adds.
+ // *(arg1+auxint+aux) += arg0. arg2=mem.
+ // Returns a tuple of <old contents of *(arg1+auxint+aux), memory>.
+ // Note: arg0 and arg1 are backwards compared to MOVLstore (to facilitate resultInArg0)!
+ {name: "XADDLlock", argLength: 3, reg: gpstorexchg, asm: "XADDL", typ: "(UInt32,Mem)", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, hasSideEffects: true, symEffect: "RdWr"},
+ {name: "XADDQlock", argLength: 3, reg: gpstorexchg, asm: "XADDQ", typ: "(UInt64,Mem)", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, hasSideEffects: true, symEffect: "RdWr"},
+ {name: "AddTupleFirst32", argLength: 2}, // arg1=tuple <x,y>. Returns <x+arg0,y>.
+ {name: "AddTupleFirst64", argLength: 2}, // arg1=tuple <x,y>. Returns <x+arg0,y>.
+
+ // Compare and swap.
+ // arg0 = pointer, arg1 = old value, arg2 = new value, arg3 = memory.
+ // if *(arg0+auxint+aux) == arg1 {
+ // *(arg0+auxint+aux) = arg2
+ // return (true, memory)
+ // } else {
+ // return (false, memory)
+ // }
+ // Note that these instructions also return the old value in AX, but we ignore it.
+ // TODO: have these return flags instead of bool. The current system generates:
+ // CMPXCHGQ ...
+ // SETEQ AX
+ // CMPB AX, $0
+ // JNE ...
+ // instead of just
+ // CMPXCHGQ ...
+ // JEQ ...
+ // but we can't do that because memory-using ops can't generate flags yet
+ // (flagalloc wants to move flag-generating instructions around).
+ {name: "CMPXCHGLlock", argLength: 4, reg: cmpxchg, asm: "CMPXCHGL", aux: "SymOff", clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true, symEffect: "RdWr"},
+ {name: "CMPXCHGQlock", argLength: 4, reg: cmpxchg, asm: "CMPXCHGQ", aux: "SymOff", clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true, symEffect: "RdWr"},
+
+ // Atomic memory updates.
+ {name: "ANDBlock", argLength: 3, reg: gpstore, asm: "ANDB", aux: "SymOff", clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true, symEffect: "RdWr"}, // *(arg0+auxint+aux) &= arg1
+ {name: "ANDLlock", argLength: 3, reg: gpstore, asm: "ANDL", aux: "SymOff", clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true, symEffect: "RdWr"}, // *(arg0+auxint+aux) &= arg1
+ {name: "ORBlock", argLength: 3, reg: gpstore, asm: "ORB", aux: "SymOff", clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true, symEffect: "RdWr"}, // *(arg0+auxint+aux) |= arg1
+ {name: "ORLlock", argLength: 3, reg: gpstore, asm: "ORL", aux: "SymOff", clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true, symEffect: "RdWr"}, // *(arg0+auxint+aux) |= arg1
+
+ // Prefetch instructions
+ // Do prefetch arg0 address. arg0=addr, arg1=memory. Instruction variant selects locality hint
+ {name: "PrefetchT0", argLength: 2, reg: prefreg, asm: "PREFETCHT0", hasSideEffects: true},
+ {name: "PrefetchNTA", argLength: 2, reg: prefreg, asm: "PREFETCHNTA", hasSideEffects: true},
+
+ // CPUID feature: BMI1.
+ {name: "ANDNQ", argLength: 2, reg: gp21, asm: "ANDNQ", clobberFlags: true}, // arg0 &^ arg1
+ {name: "ANDNL", argLength: 2, reg: gp21, asm: "ANDNL", clobberFlags: true}, // arg0 &^ arg1
+ {name: "BLSIQ", argLength: 1, reg: gp11, asm: "BLSIQ", clobberFlags: true}, // arg0 & -arg0
+ {name: "BLSIL", argLength: 1, reg: gp11, asm: "BLSIL", clobberFlags: true}, // arg0 & -arg0
+ {name: "BLSMSKQ", argLength: 1, reg: gp11, asm: "BLSMSKQ", clobberFlags: true}, // arg0 ^ (arg0 - 1)
+ {name: "BLSMSKL", argLength: 1, reg: gp11, asm: "BLSMSKL", clobberFlags: true}, // arg0 ^ (arg0 - 1)
+ {name: "BLSRQ", argLength: 1, reg: gp11, asm: "BLSRQ", clobberFlags: true}, // arg0 & (arg0 - 1)
+ {name: "BLSRL", argLength: 1, reg: gp11, asm: "BLSRL", clobberFlags: true}, // arg0 & (arg0 - 1)
+ // count the number of trailing zero bits, prefer TZCNTQ over BSFQ, as TZCNTQ(0)==64
+ // and BSFQ(0) is undefined. Same for TZCNTL(0)==32
+ {name: "TZCNTQ", argLength: 1, reg: gp11, asm: "TZCNTQ", clobberFlags: true},
+ {name: "TZCNTL", argLength: 1, reg: gp11, asm: "TZCNTL", clobberFlags: true},
+
+ // CPUID feature: MOVBE
+ {name: "MOVBELload", argLength: 2, reg: gpload, asm: "MOVBEL", aux: "SymOff", typ: "UInt32", faultOnNilArg0: true, symEffect: "Read"}, // load and swap 4 bytes from arg0+auxint+aux. arg1=mem. Zero extend.
+ {name: "MOVBELstore", argLength: 3, reg: gpstore, asm: "MOVBEL", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // swap and store 4 bytes in arg1 to arg0+auxint+aux. arg2=mem
+ {name: "MOVBEQload", argLength: 2, reg: gpload, asm: "MOVBEQ", aux: "SymOff", typ: "UInt64", faultOnNilArg0: true, symEffect: "Read"}, // load and swap 8 bytes from arg0+auxint+aux. arg1=mem
+ {name: "MOVBEQstore", argLength: 3, reg: gpstore, asm: "MOVBEQ", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // swap and store 8 bytes in arg1 to arg0+auxint+aux. arg2=mem
+ }
+
+ var AMD64blocks = []blockData{
+ {name: "EQ", controls: 1},
+ {name: "NE", controls: 1},
+ {name: "LT", controls: 1},
+ {name: "LE", controls: 1},
+ {name: "GT", controls: 1},
+ {name: "GE", controls: 1},
+ {name: "OS", controls: 1},
+ {name: "OC", controls: 1},
+ {name: "ULT", controls: 1},
+ {name: "ULE", controls: 1},
+ {name: "UGT", controls: 1},
+ {name: "UGE", controls: 1},
+ {name: "EQF", controls: 1},
+ {name: "NEF", controls: 1},
+ {name: "ORD", controls: 1}, // FP, ordered comparison (parity zero)
+ {name: "NAN", controls: 1}, // FP, unordered comparison (parity one)
+ }
+
+ archs = append(archs, arch{
+ name: "AMD64",
+ pkg: "cmd/internal/obj/x86",
+ genfile: "../../amd64/ssa.go",
+ ops: AMD64ops,
+ blocks: AMD64blocks,
+ regnames: regNamesAMD64,
+ ParamIntRegNames: "AX BX CX DI SI R8 R9 R10 R11",
+ ParamFloatRegNames: "X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14",
+ gpregmask: gp,
+ fpregmask: fp,
+ specialregmask: x15,
+ framepointerreg: int8(num["BP"]),
+ linkreg: -1, // not used
+ })
+}
diff --git a/src/cmd/compile/internal/ssa/gen/AMD64splitload.rules b/src/cmd/compile/internal/ssa/gen/AMD64splitload.rules
new file mode 100644
index 0000000..dd8f8ac
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/gen/AMD64splitload.rules
@@ -0,0 +1,45 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains rules used by flagalloc and addressingmodes to
+// split a flag-generating merged load op into separate load and op.
+// Unlike with the other rules files, not all of these
+// rules will be applied to all values.
+// Rather, flagalloc will request for rules to be applied
+// to a particular problematic value.
+// These are often the exact inverse of rules in AMD64.rules,
+// only with the conditions removed.
+//
+// For addressingmodes, certain single instructions are slower than the two instruction
+// split generated here (which is different from the inputs to addressingmodes).
+// For example:
+// (CMPBconstload c (ADDQ x y)) -> (CMPBconstloadidx1 c x y) -> (CMPB c (MOVBloadidx1 x y))
+
+(CMP(Q|L|W|B)load {sym} [off] ptr x mem) => (CMP(Q|L|W|B) (MOV(Q|L|W|B)load {sym} [off] ptr mem) x)
+
+(CMP(Q|L|W|B)constload {sym} [vo] ptr mem) && vo.Val() == 0 => (TEST(Q|L|W|B) x:(MOV(Q|L|W|B)load {sym} [vo.Off()] ptr mem) x)
+
+(CMPQconstload {sym} [vo] ptr mem) && vo.Val() != 0 => (CMPQconst (MOVQload {sym} [vo.Off()] ptr mem) [vo.Val()])
+(CMPLconstload {sym} [vo] ptr mem) && vo.Val() != 0 => (CMPLconst (MOVLload {sym} [vo.Off()] ptr mem) [vo.Val()])
+(CMPWconstload {sym} [vo] ptr mem) && vo.Val() != 0 => (CMPWconst (MOVWload {sym} [vo.Off()] ptr mem) [vo.Val16()])
+(CMPBconstload {sym} [vo] ptr mem) && vo.Val() != 0 => (CMPBconst (MOVBload {sym} [vo.Off()] ptr mem) [vo.Val8()])
+
+(CMP(Q|L|W|B)loadidx1 {sym} [off] ptr idx x mem) => (CMP(Q|L|W|B) (MOV(Q|L|W|B)loadidx1 {sym} [off] ptr idx mem) x)
+(CMPQloadidx8 {sym} [off] ptr idx x mem) => (CMPQ (MOVQloadidx8 {sym} [off] ptr idx mem) x)
+(CMPLloadidx4 {sym} [off] ptr idx x mem) => (CMPL (MOVLloadidx4 {sym} [off] ptr idx mem) x)
+(CMPWloadidx2 {sym} [off] ptr idx x mem) => (CMPW (MOVWloadidx2 {sym} [off] ptr idx mem) x)
+
+(CMP(Q|L|W|B)constloadidx1 {sym} [vo] ptr idx mem) && vo.Val() == 0 => (TEST(Q|L|W|B) x:(MOV(Q|L|W|B)loadidx1 {sym} [vo.Off()] ptr idx mem) x)
+(CMPQconstloadidx8 {sym} [vo] ptr idx mem) && vo.Val() == 0 => (TESTQ x:(MOVQloadidx8 {sym} [vo.Off()] ptr idx mem) x)
+(CMPLconstloadidx4 {sym} [vo] ptr idx mem) && vo.Val() == 0 => (TESTL x:(MOVLloadidx4 {sym} [vo.Off()] ptr idx mem) x)
+(CMPWconstloadidx2 {sym} [vo] ptr idx mem) && vo.Val() == 0 => (TESTW x:(MOVWloadidx2 {sym} [vo.Off()] ptr idx mem) x)
+
+(CMPQconstloadidx1 {sym} [vo] ptr idx mem) && vo.Val() != 0 => (CMPQconst (MOVQloadidx1 {sym} [vo.Off()] ptr idx mem) [vo.Val()])
+(CMPLconstloadidx1 {sym} [vo] ptr idx mem) && vo.Val() != 0 => (CMPLconst (MOVLloadidx1 {sym} [vo.Off()] ptr idx mem) [vo.Val()])
+(CMPWconstloadidx1 {sym} [vo] ptr idx mem) && vo.Val() != 0 => (CMPWconst (MOVWloadidx1 {sym} [vo.Off()] ptr idx mem) [vo.Val16()])
+(CMPBconstloadidx1 {sym} [vo] ptr idx mem) && vo.Val() != 0 => (CMPBconst (MOVBloadidx1 {sym} [vo.Off()] ptr idx mem) [vo.Val8()])
+
+(CMPQconstloadidx8 {sym} [vo] ptr idx mem) && vo.Val() != 0 => (CMPQconst (MOVQloadidx8 {sym} [vo.Off()] ptr idx mem) [vo.Val()])
+(CMPLconstloadidx4 {sym} [vo] ptr idx mem) && vo.Val() != 0 => (CMPLconst (MOVLloadidx4 {sym} [vo.Off()] ptr idx mem) [vo.Val()])
+(CMPWconstloadidx2 {sym} [vo] ptr idx mem) && vo.Val() != 0 => (CMPWconst (MOVWloadidx2 {sym} [vo.Off()] ptr idx mem) [vo.Val16()])
diff --git a/src/cmd/compile/internal/ssa/gen/ARM.rules b/src/cmd/compile/internal/ssa/gen/ARM.rules
new file mode 100644
index 0000000..23f1132
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/gen/ARM.rules
@@ -0,0 +1,1482 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+(Add(Ptr|32|16|8) ...) => (ADD ...)
+(Add(32|64)F ...) => (ADD(F|D) ...)
+(Add32carry ...) => (ADDS ...)
+(Add32withcarry ...) => (ADC ...)
+
+(Sub(Ptr|32|16|8) ...) => (SUB ...)
+(Sub(32|64)F ...) => (SUB(F|D) ...)
+(Sub32carry ...) => (SUBS ...)
+(Sub32withcarry ...) => (SBC ...)
+
+(Mul(32|16|8) ...) => (MUL ...)
+(Mul(32|64)F ...) => (MUL(F|D) ...)
+(Hmul(32|32u) ...) => (HMU(L|LU) ...)
+(Mul32uhilo ...) => (MULLU ...)
+
+(Div32 x y) =>
+ (SUB (XOR <typ.UInt32> // negate the result if one operand is negative
+ (Select0 <typ.UInt32> (CALLudiv
+ (SUB <typ.UInt32> (XOR x <typ.UInt32> (Signmask x)) (Signmask x)) // negate x if negative
+ (SUB <typ.UInt32> (XOR y <typ.UInt32> (Signmask y)) (Signmask y)))) // negate y if negative
+ (Signmask (XOR <typ.UInt32> x y))) (Signmask (XOR <typ.UInt32> x y)))
+(Div32u x y) => (Select0 <typ.UInt32> (CALLudiv x y))
+(Div16 x y) => (Div32 (SignExt16to32 x) (SignExt16to32 y))
+(Div16u x y) => (Div32u (ZeroExt16to32 x) (ZeroExt16to32 y))
+(Div8 x y) => (Div32 (SignExt8to32 x) (SignExt8to32 y))
+(Div8u x y) => (Div32u (ZeroExt8to32 x) (ZeroExt8to32 y))
+(Div(32|64)F ...) => (DIV(F|D) ...)
+
+(Mod32 x y) =>
+ (SUB (XOR <typ.UInt32> // negate the result if x is negative
+ (Select1 <typ.UInt32> (CALLudiv
+ (SUB <typ.UInt32> (XOR <typ.UInt32> x (Signmask x)) (Signmask x)) // negate x if negative
+ (SUB <typ.UInt32> (XOR <typ.UInt32> y (Signmask y)) (Signmask y)))) // negate y if negative
+ (Signmask x)) (Signmask x))
+(Mod32u x y) => (Select1 <typ.UInt32> (CALLudiv x y))
+(Mod16 x y) => (Mod32 (SignExt16to32 x) (SignExt16to32 y))
+(Mod16u x y) => (Mod32u (ZeroExt16to32 x) (ZeroExt16to32 y))
+(Mod8 x y) => (Mod32 (SignExt8to32 x) (SignExt8to32 y))
+(Mod8u x y) => (Mod32u (ZeroExt8to32 x) (ZeroExt8to32 y))
+
+// (x + y) / 2 with x>=y -> (x - y) / 2 + y
+(Avg32u <t> x y) => (ADD (SRLconst <t> (SUB <t> x y) [1]) y)
+
+(And(32|16|8) ...) => (AND ...)
+(Or(32|16|8) ...) => (OR ...)
+(Xor(32|16|8) ...) => (XOR ...)
+
+// unary ops
+(Neg(32|16|8) x) => (RSBconst [0] x)
+(Neg(32|64)F ...) => (NEG(F|D) ...)
+
+(Com(32|16|8) ...) => (MVN ...)
+
+(Sqrt ...) => (SQRTD ...)
+(Sqrt32 ...) => (SQRTF ...)
+(Abs ...) => (ABSD ...)
+
+// TODO: optimize this for ARMv5 and ARMv6
+(Ctz32NonZero ...) => (Ctz32 ...)
+(Ctz16NonZero ...) => (Ctz32 ...)
+(Ctz8NonZero ...) => (Ctz32 ...)
+
+// count trailing zero for ARMv5 and ARMv6
+// 32 - CLZ(x&-x - 1)
+(Ctz32 <t> x) && buildcfg.GOARM<=6 =>
+ (RSBconst [32] (CLZ <t> (SUBconst <t> (AND <t> x (RSBconst <t> [0] x)) [1])))
+(Ctz16 <t> x) && buildcfg.GOARM<=6 =>
+ (RSBconst [32] (CLZ <t> (SUBconst <typ.UInt32> (AND <typ.UInt32> (ORconst <typ.UInt32> [0x10000] x) (RSBconst <typ.UInt32> [0] (ORconst <typ.UInt32> [0x10000] x))) [1])))
+(Ctz8 <t> x) && buildcfg.GOARM<=6 =>
+ (RSBconst [32] (CLZ <t> (SUBconst <typ.UInt32> (AND <typ.UInt32> (ORconst <typ.UInt32> [0x100] x) (RSBconst <typ.UInt32> [0] (ORconst <typ.UInt32> [0x100] x))) [1])))
+
+// count trailing zero for ARMv7
+(Ctz32 <t> x) && buildcfg.GOARM==7 => (CLZ <t> (RBIT <t> x))
+(Ctz16 <t> x) && buildcfg.GOARM==7 => (CLZ <t> (RBIT <typ.UInt32> (ORconst <typ.UInt32> [0x10000] x)))
+(Ctz8 <t> x) && buildcfg.GOARM==7 => (CLZ <t> (RBIT <typ.UInt32> (ORconst <typ.UInt32> [0x100] x)))
+
+// bit length
+(BitLen32 <t> x) => (RSBconst [32] (CLZ <t> x))
+
+// byte swap for ARMv5
+// let (a, b, c, d) be the bytes of x from high to low
+// t1 = x right rotate 16 bits -- (c, d, a, b )
+// t2 = x ^ t1 -- (a^c, b^d, a^c, b^d)
+// t3 = t2 &^ 0xff0000 -- (a^c, 0, a^c, b^d)
+// t4 = t3 >> 8 -- (0, a^c, 0, a^c)
+// t5 = x right rotate 8 bits -- (d, a, b, c )
+// result = t4 ^ t5 -- (d, c, b, a )
+// using shifted ops this can be done in 4 instructions.
+(Bswap32 <t> x) && buildcfg.GOARM==5 =>
+ (XOR <t>
+ (SRLconst <t> (BICconst <t> (XOR <t> x (SRRconst <t> [16] x)) [0xff0000]) [8])
+ (SRRconst <t> x [8]))
+
+// byte swap for ARMv6 and above
+(Bswap32 x) && buildcfg.GOARM>=6 => (REV x)
+
+// boolean ops -- booleans are represented with 0=false, 1=true
+(AndB ...) => (AND ...)
+(OrB ...) => (OR ...)
+(EqB x y) => (XORconst [1] (XOR <typ.Bool> x y))
+(NeqB ...) => (XOR ...)
+(Not x) => (XORconst [1] x)
+
+// shifts
+// hardware instruction uses only the low byte of the shift
+// we compare to 256 to ensure Go semantics for large shifts
+(Lsh32x32 x y) => (CMOVWHSconst (SLL <x.Type> x y) (CMPconst [256] y) [0])
+(Lsh32x16 x y) => (CMOVWHSconst (SLL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
+(Lsh32x8 x y) => (SLL x (ZeroExt8to32 y))
+
+(Lsh16x32 x y) => (CMOVWHSconst (SLL <x.Type> x y) (CMPconst [256] y) [0])
+(Lsh16x16 x y) => (CMOVWHSconst (SLL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
+(Lsh16x8 x y) => (SLL x (ZeroExt8to32 y))
+
+(Lsh8x32 x y) => (CMOVWHSconst (SLL <x.Type> x y) (CMPconst [256] y) [0])
+(Lsh8x16 x y) => (CMOVWHSconst (SLL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
+(Lsh8x8 x y) => (SLL x (ZeroExt8to32 y))
+
+(Rsh32Ux32 x y) => (CMOVWHSconst (SRL <x.Type> x y) (CMPconst [256] y) [0])
+(Rsh32Ux16 x y) => (CMOVWHSconst (SRL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
+(Rsh32Ux8 x y) => (SRL x (ZeroExt8to32 y))
+
+(Rsh16Ux32 x y) => (CMOVWHSconst (SRL <x.Type> (ZeroExt16to32 x) y) (CMPconst [256] y) [0])
+(Rsh16Ux16 x y) => (CMOVWHSconst (SRL <x.Type> (ZeroExt16to32 x) (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
+(Rsh16Ux8 x y) => (SRL (ZeroExt16to32 x) (ZeroExt8to32 y))
+
+(Rsh8Ux32 x y) => (CMOVWHSconst (SRL <x.Type> (ZeroExt8to32 x) y) (CMPconst [256] y) [0])
+(Rsh8Ux16 x y) => (CMOVWHSconst (SRL <x.Type> (ZeroExt8to32 x) (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
+(Rsh8Ux8 x y) => (SRL (ZeroExt8to32 x) (ZeroExt8to32 y))
+
+(Rsh32x32 x y) => (SRAcond x y (CMPconst [256] y))
+(Rsh32x16 x y) => (SRAcond x (ZeroExt16to32 y) (CMPconst [256] (ZeroExt16to32 y)))
+(Rsh32x8 x y) => (SRA x (ZeroExt8to32 y))
+
+(Rsh16x32 x y) => (SRAcond (SignExt16to32 x) y (CMPconst [256] y))
+(Rsh16x16 x y) => (SRAcond (SignExt16to32 x) (ZeroExt16to32 y) (CMPconst [256] (ZeroExt16to32 y)))
+(Rsh16x8 x y) => (SRA (SignExt16to32 x) (ZeroExt8to32 y))
+
+(Rsh8x32 x y) => (SRAcond (SignExt8to32 x) y (CMPconst [256] y))
+(Rsh8x16 x y) => (SRAcond (SignExt8to32 x) (ZeroExt16to32 y) (CMPconst [256] (ZeroExt16to32 y)))
+(Rsh8x8 x y) => (SRA (SignExt8to32 x) (ZeroExt8to32 y))
+
+// constant shifts
+// generic opt rewrites all constant shifts to shift by Const64
+(Lsh32x64 x (Const64 [c])) && uint64(c) < 32 => (SLLconst x [int32(c)])
+(Rsh32x64 x (Const64 [c])) && uint64(c) < 32 => (SRAconst x [int32(c)])
+(Rsh32Ux64 x (Const64 [c])) && uint64(c) < 32 => (SRLconst x [int32(c)])
+(Lsh16x64 x (Const64 [c])) && uint64(c) < 16 => (SLLconst x [int32(c)])
+(Rsh16x64 x (Const64 [c])) && uint64(c) < 16 => (SRAconst (SLLconst <typ.UInt32> x [16]) [int32(c+16)])
+(Rsh16Ux64 x (Const64 [c])) && uint64(c) < 16 => (SRLconst (SLLconst <typ.UInt32> x [16]) [int32(c+16)])
+(Lsh8x64 x (Const64 [c])) && uint64(c) < 8 => (SLLconst x [int32(c)])
+(Rsh8x64 x (Const64 [c])) && uint64(c) < 8 => (SRAconst (SLLconst <typ.UInt32> x [24]) [int32(c+24)])
+(Rsh8Ux64 x (Const64 [c])) && uint64(c) < 8 => (SRLconst (SLLconst <typ.UInt32> x [24]) [int32(c+24)])
+
+// large constant shifts
+(Lsh32x64 _ (Const64 [c])) && uint64(c) >= 32 => (Const32 [0])
+(Rsh32Ux64 _ (Const64 [c])) && uint64(c) >= 32 => (Const32 [0])
+(Lsh16x64 _ (Const64 [c])) && uint64(c) >= 16 => (Const16 [0])
+(Rsh16Ux64 _ (Const64 [c])) && uint64(c) >= 16 => (Const16 [0])
+(Lsh8x64 _ (Const64 [c])) && uint64(c) >= 8 => (Const8 [0])
+(Rsh8Ux64 _ (Const64 [c])) && uint64(c) >= 8 => (Const8 [0])
+
+// large constant signed right shift, we leave the sign bit
+(Rsh32x64 x (Const64 [c])) && uint64(c) >= 32 => (SRAconst x [31])
+(Rsh16x64 x (Const64 [c])) && uint64(c) >= 16 => (SRAconst (SLLconst <typ.UInt32> x [16]) [31])
+(Rsh8x64 x (Const64 [c])) && uint64(c) >= 8 => (SRAconst (SLLconst <typ.UInt32> x [24]) [31])
+
+// constants
+(Const(8|16|32) [val]) => (MOVWconst [int32(val)])
+(Const(32|64)F [val]) => (MOV(F|D)const [float64(val)])
+(ConstNil) => (MOVWconst [0])
+(ConstBool [t]) => (MOVWconst [b2i32(t)])
+
+// truncations
+// Because we ignore high parts of registers, truncates are just copies.
+(Trunc16to8 ...) => (Copy ...)
+(Trunc32to8 ...) => (Copy ...)
+(Trunc32to16 ...) => (Copy ...)
+
+// Zero-/Sign-extensions
+(ZeroExt8to16 ...) => (MOVBUreg ...)
+(ZeroExt8to32 ...) => (MOVBUreg ...)
+(ZeroExt16to32 ...) => (MOVHUreg ...)
+
+(SignExt8to16 ...) => (MOVBreg ...)
+(SignExt8to32 ...) => (MOVBreg ...)
+(SignExt16to32 ...) => (MOVHreg ...)
+
+(Signmask x) => (SRAconst x [31])
+(Zeromask x) => (SRAconst (RSBshiftRL <typ.Int32> x x [1]) [31]) // sign bit of uint32(x)>>1 - x
+(Slicemask <t> x) => (SRAconst (RSBconst <t> [0] x) [31])
+
+// float <-> int conversion
+(Cvt32to32F ...) => (MOVWF ...)
+(Cvt32to64F ...) => (MOVWD ...)
+(Cvt32Uto32F ...) => (MOVWUF ...)
+(Cvt32Uto64F ...) => (MOVWUD ...)
+(Cvt32Fto32 ...) => (MOVFW ...)
+(Cvt64Fto32 ...) => (MOVDW ...)
+(Cvt32Fto32U ...) => (MOVFWU ...)
+(Cvt64Fto32U ...) => (MOVDWU ...)
+(Cvt32Fto64F ...) => (MOVFD ...)
+(Cvt64Fto32F ...) => (MOVDF ...)
+
+(Round(32|64)F ...) => (Copy ...)
+
+(CvtBoolToUint8 ...) => (Copy ...)
+
+// fused-multiply-add
+(FMA x y z) => (FMULAD z x y)
+
+// comparisons
+(Eq8 x y) => (Equal (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
+(Eq16 x y) => (Equal (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
+(Eq32 x y) => (Equal (CMP x y))
+(EqPtr x y) => (Equal (CMP x y))
+(Eq(32|64)F x y) => (Equal (CMP(F|D) x y))
+
+(Neq8 x y) => (NotEqual (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
+(Neq16 x y) => (NotEqual (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
+(Neq32 x y) => (NotEqual (CMP x y))
+(NeqPtr x y) => (NotEqual (CMP x y))
+(Neq(32|64)F x y) => (NotEqual (CMP(F|D) x y))
+
+(Less8 x y) => (LessThan (CMP (SignExt8to32 x) (SignExt8to32 y)))
+(Less16 x y) => (LessThan (CMP (SignExt16to32 x) (SignExt16to32 y)))
+(Less32 x y) => (LessThan (CMP x y))
+(Less(32|64)F x y) => (GreaterThan (CMP(F|D) y x)) // reverse operands to work around NaN
+
+(Less8U x y) => (LessThanU (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
+(Less16U x y) => (LessThanU (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
+(Less32U x y) => (LessThanU (CMP x y))
+
+(Leq8 x y) => (LessEqual (CMP (SignExt8to32 x) (SignExt8to32 y)))
+(Leq16 x y) => (LessEqual (CMP (SignExt16to32 x) (SignExt16to32 y)))
+(Leq32 x y) => (LessEqual (CMP x y))
+(Leq(32|64)F x y) => (GreaterEqual (CMP(F|D) y x)) // reverse operands to work around NaN
+
+(Leq8U x y) => (LessEqualU (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
+(Leq16U x y) => (LessEqualU (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
+(Leq32U x y) => (LessEqualU (CMP x y))
+
+(OffPtr [off] ptr:(SP)) => (MOVWaddr [int32(off)] ptr)
+(OffPtr [off] ptr) => (ADDconst [int32(off)] ptr)
+
+(Addr {sym} base) => (MOVWaddr {sym} base)
+(LocalAddr {sym} base _) => (MOVWaddr {sym} base)
+
+// loads
+(Load <t> ptr mem) && t.IsBoolean() => (MOVBUload ptr mem)
+(Load <t> ptr mem) && (is8BitInt(t) && isSigned(t)) => (MOVBload ptr mem)
+(Load <t> ptr mem) && (is8BitInt(t) && !isSigned(t)) => (MOVBUload ptr mem)
+(Load <t> ptr mem) && (is16BitInt(t) && isSigned(t)) => (MOVHload ptr mem)
+(Load <t> ptr mem) && (is16BitInt(t) && !isSigned(t)) => (MOVHUload ptr mem)
+(Load <t> ptr mem) && (is32BitInt(t) || isPtr(t)) => (MOVWload ptr mem)
+(Load <t> ptr mem) && is32BitFloat(t) => (MOVFload ptr mem)
+(Load <t> ptr mem) && is64BitFloat(t) => (MOVDload ptr mem)
+
+// stores
+(Store {t} ptr val mem) && t.Size() == 1 => (MOVBstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 2 => (MOVHstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 4 && !is32BitFloat(val.Type) => (MOVWstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 4 && is32BitFloat(val.Type) => (MOVFstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 8 && is64BitFloat(val.Type) => (MOVDstore ptr val mem)
+
+// zero instructions
+(Zero [0] _ mem) => mem
+(Zero [1] ptr mem) => (MOVBstore ptr (MOVWconst [0]) mem)
+(Zero [2] {t} ptr mem) && t.Alignment()%2 == 0 =>
+ (MOVHstore ptr (MOVWconst [0]) mem)
+(Zero [2] ptr mem) =>
+ (MOVBstore [1] ptr (MOVWconst [0])
+ (MOVBstore [0] ptr (MOVWconst [0]) mem))
+(Zero [4] {t} ptr mem) && t.Alignment()%4 == 0 =>
+ (MOVWstore ptr (MOVWconst [0]) mem)
+(Zero [4] {t} ptr mem) && t.Alignment()%2 == 0 =>
+ (MOVHstore [2] ptr (MOVWconst [0])
+ (MOVHstore [0] ptr (MOVWconst [0]) mem))
+(Zero [4] ptr mem) =>
+ (MOVBstore [3] ptr (MOVWconst [0])
+ (MOVBstore [2] ptr (MOVWconst [0])
+ (MOVBstore [1] ptr (MOVWconst [0])
+ (MOVBstore [0] ptr (MOVWconst [0]) mem))))
+
+(Zero [3] ptr mem) =>
+ (MOVBstore [2] ptr (MOVWconst [0])
+ (MOVBstore [1] ptr (MOVWconst [0])
+ (MOVBstore [0] ptr (MOVWconst [0]) mem)))
+
+// Medium zeroing uses a duff device
+// 4 and 128 are magic constants, see runtime/mkduff.go
+(Zero [s] {t} ptr mem)
+ && s%4 == 0 && s > 4 && s <= 512
+ && t.Alignment()%4 == 0 && !config.noDuffDevice =>
+ (DUFFZERO [4 * (128 - s/4)] ptr (MOVWconst [0]) mem)
+
+// Large zeroing uses a loop
+(Zero [s] {t} ptr mem)
+ && (s > 512 || config.noDuffDevice) || t.Alignment()%4 != 0 =>
+ (LoweredZero [t.Alignment()]
+ ptr
+ (ADDconst <ptr.Type> ptr [int32(s-moveSize(t.Alignment(), config))])
+ (MOVWconst [0])
+ mem)
+
+// moves
+(Move [0] _ _ mem) => mem
+(Move [1] dst src mem) => (MOVBstore dst (MOVBUload src mem) mem)
+(Move [2] {t} dst src mem) && t.Alignment()%2 == 0 =>
+ (MOVHstore dst (MOVHUload src mem) mem)
+(Move [2] dst src mem) =>
+ (MOVBstore [1] dst (MOVBUload [1] src mem)
+ (MOVBstore dst (MOVBUload src mem) mem))
+(Move [4] {t} dst src mem) && t.Alignment()%4 == 0 =>
+ (MOVWstore dst (MOVWload src mem) mem)
+(Move [4] {t} dst src mem) && t.Alignment()%2 == 0 =>
+ (MOVHstore [2] dst (MOVHUload [2] src mem)
+ (MOVHstore dst (MOVHUload src mem) mem))
+(Move [4] dst src mem) =>
+ (MOVBstore [3] dst (MOVBUload [3] src mem)
+ (MOVBstore [2] dst (MOVBUload [2] src mem)
+ (MOVBstore [1] dst (MOVBUload [1] src mem)
+ (MOVBstore dst (MOVBUload src mem) mem))))
+
+(Move [3] dst src mem) =>
+ (MOVBstore [2] dst (MOVBUload [2] src mem)
+ (MOVBstore [1] dst (MOVBUload [1] src mem)
+ (MOVBstore dst (MOVBUload src mem) mem)))
+
+// Medium move uses a duff device
+// 8 and 128 are magic constants, see runtime/mkduff.go
+(Move [s] {t} dst src mem)
+ && s%4 == 0 && s > 4 && s <= 512
+ && t.Alignment()%4 == 0 && !config.noDuffDevice && logLargeCopy(v, s) =>
+ (DUFFCOPY [8 * (128 - s/4)] dst src mem)
+
+// Large move uses a loop
+(Move [s] {t} dst src mem)
+ && ((s > 512 || config.noDuffDevice) || t.Alignment()%4 != 0) && logLargeCopy(v, s) =>
+ (LoweredMove [t.Alignment()]
+ dst
+ src
+ (ADDconst <src.Type> src [int32(s-moveSize(t.Alignment(), config))])
+ mem)
+
+// calls
+(StaticCall ...) => (CALLstatic ...)
+(ClosureCall ...) => (CALLclosure ...)
+(InterCall ...) => (CALLinter ...)
+(TailCall ...) => (CALLtail ...)
+
+// checks
+(NilCheck ...) => (LoweredNilCheck ...)
+(IsNonNil ptr) => (NotEqual (CMPconst [0] ptr))
+(IsInBounds idx len) => (LessThanU (CMP idx len))
+(IsSliceInBounds idx len) => (LessEqualU (CMP idx len))
+
+// pseudo-ops
+(GetClosurePtr ...) => (LoweredGetClosurePtr ...)
+(GetCallerSP ...) => (LoweredGetCallerSP ...)
+(GetCallerPC ...) => (LoweredGetCallerPC ...)
+
+// Absorb pseudo-ops into blocks.
+(If (Equal cc) yes no) => (EQ cc yes no)
+(If (NotEqual cc) yes no) => (NE cc yes no)
+(If (LessThan cc) yes no) => (LT cc yes no)
+(If (LessThanU cc) yes no) => (ULT cc yes no)
+(If (LessEqual cc) yes no) => (LE cc yes no)
+(If (LessEqualU cc) yes no) => (ULE cc yes no)
+(If (GreaterThan cc) yes no) => (GT cc yes no)
+(If (GreaterThanU cc) yes no) => (UGT cc yes no)
+(If (GreaterEqual cc) yes no) => (GE cc yes no)
+(If (GreaterEqualU cc) yes no) => (UGE cc yes no)
+
+(If cond yes no) => (NE (CMPconst [0] cond) yes no)
+
+// Absorb boolean tests into block
+(NE (CMPconst [0] (Equal cc)) yes no) => (EQ cc yes no)
+(NE (CMPconst [0] (NotEqual cc)) yes no) => (NE cc yes no)
+(NE (CMPconst [0] (LessThan cc)) yes no) => (LT cc yes no)
+(NE (CMPconst [0] (LessThanU cc)) yes no) => (ULT cc yes no)
+(NE (CMPconst [0] (LessEqual cc)) yes no) => (LE cc yes no)
+(NE (CMPconst [0] (LessEqualU cc)) yes no) => (ULE cc yes no)
+(NE (CMPconst [0] (GreaterThan cc)) yes no) => (GT cc yes no)
+(NE (CMPconst [0] (GreaterThanU cc)) yes no) => (UGT cc yes no)
+(NE (CMPconst [0] (GreaterEqual cc)) yes no) => (GE cc yes no)
+(NE (CMPconst [0] (GreaterEqualU cc)) yes no) => (UGE cc yes no)
+
+// Write barrier.
+(WB ...) => (LoweredWB ...)
+
+(PanicBounds [kind] x y mem) && boundsABI(kind) == 0 => (LoweredPanicBoundsA [kind] x y mem)
+(PanicBounds [kind] x y mem) && boundsABI(kind) == 1 => (LoweredPanicBoundsB [kind] x y mem)
+(PanicBounds [kind] x y mem) && boundsABI(kind) == 2 => (LoweredPanicBoundsC [kind] x y mem)
+
+(PanicExtend [kind] hi lo y mem) && boundsABI(kind) == 0 => (LoweredPanicExtendA [kind] hi lo y mem)
+(PanicExtend [kind] hi lo y mem) && boundsABI(kind) == 1 => (LoweredPanicExtendB [kind] hi lo y mem)
+(PanicExtend [kind] hi lo y mem) && boundsABI(kind) == 2 => (LoweredPanicExtendC [kind] hi lo y mem)
+
+// Optimizations
+
+// fold offset into address
+(ADDconst [off1] (MOVWaddr [off2] {sym} ptr)) => (MOVWaddr [off1+off2] {sym} ptr)
+(SUBconst [off1] (MOVWaddr [off2] {sym} ptr)) => (MOVWaddr [off2-off1] {sym} ptr)
+
+// fold address into load/store
+(MOVBload [off1] {sym} (ADDconst [off2] ptr) mem) => (MOVBload [off1+off2] {sym} ptr mem)
+(MOVBload [off1] {sym} (SUBconst [off2] ptr) mem) => (MOVBload [off1-off2] {sym} ptr mem)
+(MOVBUload [off1] {sym} (ADDconst [off2] ptr) mem) => (MOVBUload [off1+off2] {sym} ptr mem)
+(MOVBUload [off1] {sym} (SUBconst [off2] ptr) mem) => (MOVBUload [off1-off2] {sym} ptr mem)
+(MOVHload [off1] {sym} (ADDconst [off2] ptr) mem) => (MOVHload [off1+off2] {sym} ptr mem)
+(MOVHload [off1] {sym} (SUBconst [off2] ptr) mem) => (MOVHload [off1-off2] {sym} ptr mem)
+(MOVHUload [off1] {sym} (ADDconst [off2] ptr) mem) => (MOVHUload [off1+off2] {sym} ptr mem)
+(MOVHUload [off1] {sym} (SUBconst [off2] ptr) mem) => (MOVHUload [off1-off2] {sym} ptr mem)
+(MOVWload [off1] {sym} (ADDconst [off2] ptr) mem) => (MOVWload [off1+off2] {sym} ptr mem)
+(MOVWload [off1] {sym} (SUBconst [off2] ptr) mem) => (MOVWload [off1-off2] {sym} ptr mem)
+(MOVFload [off1] {sym} (ADDconst [off2] ptr) mem) => (MOVFload [off1+off2] {sym} ptr mem)
+(MOVFload [off1] {sym} (SUBconst [off2] ptr) mem) => (MOVFload [off1-off2] {sym} ptr mem)
+(MOVDload [off1] {sym} (ADDconst [off2] ptr) mem) => (MOVDload [off1+off2] {sym} ptr mem)
+(MOVDload [off1] {sym} (SUBconst [off2] ptr) mem) => (MOVDload [off1-off2] {sym} ptr mem)
+
+(MOVBstore [off1] {sym} (ADDconst [off2] ptr) val mem) => (MOVBstore [off1+off2] {sym} ptr val mem)
+(MOVBstore [off1] {sym} (SUBconst [off2] ptr) val mem) => (MOVBstore [off1-off2] {sym} ptr val mem)
+(MOVHstore [off1] {sym} (ADDconst [off2] ptr) val mem) => (MOVHstore [off1+off2] {sym} ptr val mem)
+(MOVHstore [off1] {sym} (SUBconst [off2] ptr) val mem) => (MOVHstore [off1-off2] {sym} ptr val mem)
+(MOVWstore [off1] {sym} (ADDconst [off2] ptr) val mem) => (MOVWstore [off1+off2] {sym} ptr val mem)
+(MOVWstore [off1] {sym} (SUBconst [off2] ptr) val mem) => (MOVWstore [off1-off2] {sym} ptr val mem)
+(MOVFstore [off1] {sym} (ADDconst [off2] ptr) val mem) => (MOVFstore [off1+off2] {sym} ptr val mem)
+(MOVFstore [off1] {sym} (SUBconst [off2] ptr) val mem) => (MOVFstore [off1-off2] {sym} ptr val mem)
+(MOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) => (MOVDstore [off1+off2] {sym} ptr val mem)
+(MOVDstore [off1] {sym} (SUBconst [off2] ptr) val mem) => (MOVDstore [off1-off2] {sym} ptr val mem)
+
+(MOVBload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) =>
+ (MOVBload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVBUload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) =>
+ (MOVBUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVHload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) =>
+ (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVHUload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) =>
+ (MOVHUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVWload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) =>
+ (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVFload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) =>
+ (MOVFload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVDload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) =>
+ (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+
+(MOVBstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) =>
+ (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+(MOVHstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) =>
+ (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+(MOVWstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) =>
+ (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+(MOVFstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) =>
+ (MOVFstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+(MOVDstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) =>
+ (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+
+// replace load from same location as preceding store with zero/sign extension (or copy in case of full width)
+(MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVBreg x)
+(MOVBUload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVBUreg x)
+(MOVHload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVHreg x)
+(MOVHUload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVHUreg x)
+(MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => x
+
+(MOVFload [off] {sym} ptr (MOVFstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => x
+(MOVDload [off] {sym} ptr (MOVDstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => x
+
+(MOVWloadidx ptr idx (MOVWstoreidx ptr2 idx x _)) && isSamePtr(ptr, ptr2) => x
+(MOVWloadshiftLL ptr idx [c] (MOVWstoreshiftLL ptr2 idx [d] x _)) && c==d && isSamePtr(ptr, ptr2) => x
+(MOVWloadshiftRL ptr idx [c] (MOVWstoreshiftRL ptr2 idx [d] x _)) && c==d && isSamePtr(ptr, ptr2) => x
+(MOVWloadshiftRA ptr idx [c] (MOVWstoreshiftRA ptr2 idx [d] x _)) && c==d && isSamePtr(ptr, ptr2) => x
+(MOVBUloadidx ptr idx (MOVBstoreidx ptr2 idx x _)) && isSamePtr(ptr, ptr2) => (MOVBUreg x)
+(MOVBloadidx ptr idx (MOVBstoreidx ptr2 idx x _)) && isSamePtr(ptr, ptr2) => (MOVBreg x)
+(MOVHUloadidx ptr idx (MOVHstoreidx ptr2 idx x _)) && isSamePtr(ptr, ptr2) => (MOVHUreg x)
+(MOVHloadidx ptr idx (MOVHstoreidx ptr2 idx x _)) && isSamePtr(ptr, ptr2) => (MOVHreg x)
+
+// fold constant into arithmatic ops
+(ADD x (MOVWconst [c])) => (ADDconst [c] x)
+(SUB (MOVWconst [c]) x) => (RSBconst [c] x)
+(SUB x (MOVWconst [c])) => (SUBconst [c] x)
+(RSB (MOVWconst [c]) x) => (SUBconst [c] x)
+(RSB x (MOVWconst [c])) => (RSBconst [c] x)
+
+(ADDS x (MOVWconst [c])) => (ADDSconst [c] x)
+(SUBS x (MOVWconst [c])) => (SUBSconst [c] x)
+
+(ADC (MOVWconst [c]) x flags) => (ADCconst [c] x flags)
+(SBC (MOVWconst [c]) x flags) => (RSCconst [c] x flags)
+(SBC x (MOVWconst [c]) flags) => (SBCconst [c] x flags)
+
+(AND x (MOVWconst [c])) => (ANDconst [c] x)
+(OR x (MOVWconst [c])) => (ORconst [c] x)
+(XOR x (MOVWconst [c])) => (XORconst [c] x)
+(BIC x (MOVWconst [c])) => (BICconst [c] x)
+
+(SLL x (MOVWconst [c])) && 0 <= c && c < 32 => (SLLconst x [c])
+(SRL x (MOVWconst [c])) && 0 <= c && c < 32 => (SRLconst x [c])
+(SRA x (MOVWconst [c])) && 0 <= c && c < 32 => (SRAconst x [c])
+
+(CMP x (MOVWconst [c])) => (CMPconst [c] x)
+(CMP (MOVWconst [c]) x) => (InvertFlags (CMPconst [c] x))
+(CMN x (MOVWconst [c])) => (CMNconst [c] x)
+(TST x (MOVWconst [c])) => (TSTconst [c] x)
+(TEQ x (MOVWconst [c])) => (TEQconst [c] x)
+
+(SRR x (MOVWconst [c])) => (SRRconst x [c&31])
+
+// Canonicalize the order of arguments to comparisons - helps with CSE.
+(CMP x y) && canonLessThan(x,y) => (InvertFlags (CMP y x))
+
+// don't extend after proper load
+// MOVWreg instruction is not emitted if src and dst registers are same, but it ensures the type.
+(MOVBreg x:(MOVBload _ _)) => (MOVWreg x)
+(MOVBUreg x:(MOVBUload _ _)) => (MOVWreg x)
+(MOVHreg x:(MOVBload _ _)) => (MOVWreg x)
+(MOVHreg x:(MOVBUload _ _)) => (MOVWreg x)
+(MOVHreg x:(MOVHload _ _)) => (MOVWreg x)
+(MOVHUreg x:(MOVBUload _ _)) => (MOVWreg x)
+(MOVHUreg x:(MOVHUload _ _)) => (MOVWreg x)
+
+// fold extensions and ANDs together
+(MOVBUreg (ANDconst [c] x)) => (ANDconst [c&0xff] x)
+(MOVHUreg (ANDconst [c] x)) => (ANDconst [c&0xffff] x)
+(MOVBreg (ANDconst [c] x)) && c & 0x80 == 0 => (ANDconst [c&0x7f] x)
+(MOVHreg (ANDconst [c] x)) && c & 0x8000 == 0 => (ANDconst [c&0x7fff] x)
+
+// fold double extensions
+(MOVBreg x:(MOVBreg _)) => (MOVWreg x)
+(MOVBUreg x:(MOVBUreg _)) => (MOVWreg x)
+(MOVHreg x:(MOVBreg _)) => (MOVWreg x)
+(MOVHreg x:(MOVBUreg _)) => (MOVWreg x)
+(MOVHreg x:(MOVHreg _)) => (MOVWreg x)
+(MOVHUreg x:(MOVBUreg _)) => (MOVWreg x)
+(MOVHUreg x:(MOVHUreg _)) => (MOVWreg x)
+
+// don't extend before store
+(MOVBstore [off] {sym} ptr (MOVBreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVBUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVHreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVHUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVHstore [off] {sym} ptr (MOVHreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
+(MOVHstore [off] {sym} ptr (MOVHUreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
+
+// if a register move has only 1 use, just use the same register without emitting instruction
+// MOVWnop doesn't emit instruction, only for ensuring the type.
+(MOVWreg x) && x.Uses == 1 => (MOVWnop x)
+
+// TODO: we should be able to get rid of MOVWnop all together.
+// But for now, this is enough to get rid of lots of them.
+(MOVWnop (MOVWconst [c])) => (MOVWconst [c])
+
+// mul by constant
+(MUL x (MOVWconst [c])) && int32(c) == -1 => (RSBconst [0] x)
+(MUL _ (MOVWconst [0])) => (MOVWconst [0])
+(MUL x (MOVWconst [1])) => x
+(MUL x (MOVWconst [c])) && isPowerOfTwo32(c) => (SLLconst [int32(log32(c))] x)
+(MUL x (MOVWconst [c])) && isPowerOfTwo32(c-1) && c >= 3 => (ADDshiftLL x x [int32(log32(c-1))])
+(MUL x (MOVWconst [c])) && isPowerOfTwo32(c+1) && c >= 7 => (RSBshiftLL x x [int32(log32(c+1))])
+(MUL x (MOVWconst [c])) && c%3 == 0 && isPowerOfTwo32(c/3) => (SLLconst [int32(log32(c/3))] (ADDshiftLL <x.Type> x x [1]))
+(MUL x (MOVWconst [c])) && c%5 == 0 && isPowerOfTwo32(c/5) => (SLLconst [int32(log32(c/5))] (ADDshiftLL <x.Type> x x [2]))
+(MUL x (MOVWconst [c])) && c%7 == 0 && isPowerOfTwo32(c/7) => (SLLconst [int32(log32(c/7))] (RSBshiftLL <x.Type> x x [3]))
+(MUL x (MOVWconst [c])) && c%9 == 0 && isPowerOfTwo32(c/9) => (SLLconst [int32(log32(c/9))] (ADDshiftLL <x.Type> x x [3]))
+
+(MULA x (MOVWconst [c]) a) && c == -1 => (SUB a x)
+(MULA _ (MOVWconst [0]) a) => a
+(MULA x (MOVWconst [1]) a) => (ADD x a)
+(MULA x (MOVWconst [c]) a) && isPowerOfTwo32(c) => (ADD (SLLconst <x.Type> [int32(log32(c))] x) a)
+(MULA x (MOVWconst [c]) a) && isPowerOfTwo32(c-1) && c >= 3 => (ADD (ADDshiftLL <x.Type> x x [int32(log32(c-1))]) a)
+(MULA x (MOVWconst [c]) a) && isPowerOfTwo32(c+1) && c >= 7 => (ADD (RSBshiftLL <x.Type> x x [int32(log32(c+1))]) a)
+(MULA x (MOVWconst [c]) a) && c%3 == 0 && isPowerOfTwo32(c/3) => (ADD (SLLconst <x.Type> [int32(log32(c/3))] (ADDshiftLL <x.Type> x x [1])) a)
+(MULA x (MOVWconst [c]) a) && c%5 == 0 && isPowerOfTwo32(c/5) => (ADD (SLLconst <x.Type> [int32(log32(c/5))] (ADDshiftLL <x.Type> x x [2])) a)
+(MULA x (MOVWconst [c]) a) && c%7 == 0 && isPowerOfTwo32(c/7) => (ADD (SLLconst <x.Type> [int32(log32(c/7))] (RSBshiftLL <x.Type> x x [3])) a)
+(MULA x (MOVWconst [c]) a) && c%9 == 0 && isPowerOfTwo32(c/9) => (ADD (SLLconst <x.Type> [int32(log32(c/9))] (ADDshiftLL <x.Type> x x [3])) a)
+
+(MULA (MOVWconst [c]) x a) && c == -1 => (SUB a x)
+(MULA (MOVWconst [0]) _ a) => a
+(MULA (MOVWconst [1]) x a) => (ADD x a)
+(MULA (MOVWconst [c]) x a) && isPowerOfTwo32(c) => (ADD (SLLconst <x.Type> [int32(log32(c))] x) a)
+(MULA (MOVWconst [c]) x a) && isPowerOfTwo32(c-1) && c >= 3 => (ADD (ADDshiftLL <x.Type> x x [int32(log32(c-1))]) a)
+(MULA (MOVWconst [c]) x a) && isPowerOfTwo32(c+1) && c >= 7 => (ADD (RSBshiftLL <x.Type> x x [int32(log32(c+1))]) a)
+(MULA (MOVWconst [c]) x a) && c%3 == 0 && isPowerOfTwo32(c/3) => (ADD (SLLconst <x.Type> [int32(log32(c/3))] (ADDshiftLL <x.Type> x x [1])) a)
+(MULA (MOVWconst [c]) x a) && c%5 == 0 && isPowerOfTwo32(c/5) => (ADD (SLLconst <x.Type> [int32(log32(c/5))] (ADDshiftLL <x.Type> x x [2])) a)
+(MULA (MOVWconst [c]) x a) && c%7 == 0 && isPowerOfTwo32(c/7) => (ADD (SLLconst <x.Type> [int32(log32(c/7))] (RSBshiftLL <x.Type> x x [3])) a)
+(MULA (MOVWconst [c]) x a) && c%9 == 0 && isPowerOfTwo32(c/9) => (ADD (SLLconst <x.Type> [int32(log32(c/9))] (ADDshiftLL <x.Type> x x [3])) a)
+
+(MULS x (MOVWconst [c]) a) && c == -1 => (ADD a x)
+(MULS _ (MOVWconst [0]) a) => a
+(MULS x (MOVWconst [1]) a) => (RSB x a)
+(MULS x (MOVWconst [c]) a) && isPowerOfTwo32(c) => (RSB (SLLconst <x.Type> [int32(log32(c))] x) a)
+(MULS x (MOVWconst [c]) a) && isPowerOfTwo32(c-1) && c >= 3 => (RSB (ADDshiftLL <x.Type> x x [int32(log32(c-1))]) a)
+(MULS x (MOVWconst [c]) a) && isPowerOfTwo32(c+1) && c >= 7 => (RSB (RSBshiftLL <x.Type> x x [int32(log32(c+1))]) a)
+(MULS x (MOVWconst [c]) a) && c%3 == 0 && isPowerOfTwo32(c/3) => (RSB (SLLconst <x.Type> [int32(log32(c/3))] (ADDshiftLL <x.Type> x x [1])) a)
+(MULS x (MOVWconst [c]) a) && c%5 == 0 && isPowerOfTwo32(c/5) => (RSB (SLLconst <x.Type> [int32(log32(c/5))] (ADDshiftLL <x.Type> x x [2])) a)
+(MULS x (MOVWconst [c]) a) && c%7 == 0 && isPowerOfTwo32(c/7) => (RSB (SLLconst <x.Type> [int32(log32(c/7))] (RSBshiftLL <x.Type> x x [3])) a)
+(MULS x (MOVWconst [c]) a) && c%9 == 0 && isPowerOfTwo32(c/9) => (RSB (SLLconst <x.Type> [int32(log32(c/9))] (ADDshiftLL <x.Type> x x [3])) a)
+
+(MULS (MOVWconst [c]) x a) && c == -1 => (ADD a x)
+(MULS (MOVWconst [0]) _ a) => a
+(MULS (MOVWconst [1]) x a) => (RSB x a)
+(MULS (MOVWconst [c]) x a) && isPowerOfTwo32(c) => (RSB (SLLconst <x.Type> [int32(log32(c))] x) a)
+(MULS (MOVWconst [c]) x a) && isPowerOfTwo32(c-1) && c >= 3 => (RSB (ADDshiftLL <x.Type> x x [int32(log32(c-1))]) a)
+(MULS (MOVWconst [c]) x a) && isPowerOfTwo32(c+1) && c >= 7 => (RSB (RSBshiftLL <x.Type> x x [int32(log32(c+1))]) a)
+(MULS (MOVWconst [c]) x a) && c%3 == 0 && isPowerOfTwo32(c/3) => (RSB (SLLconst <x.Type> [int32(log32(c/3))] (ADDshiftLL <x.Type> x x [1])) a)
+(MULS (MOVWconst [c]) x a) && c%5 == 0 && isPowerOfTwo32(c/5) => (RSB (SLLconst <x.Type> [int32(log32(c/5))] (ADDshiftLL <x.Type> x x [2])) a)
+(MULS (MOVWconst [c]) x a) && c%7 == 0 && isPowerOfTwo32(c/7) => (RSB (SLLconst <x.Type> [int32(log32(c/7))] (RSBshiftLL <x.Type> x x [3])) a)
+(MULS (MOVWconst [c]) x a) && c%9 == 0 && isPowerOfTwo32(c/9) => (RSB (SLLconst <x.Type> [int32(log32(c/9))] (ADDshiftLL <x.Type> x x [3])) a)
+
+// div by constant
+(Select0 (CALLudiv x (MOVWconst [1]))) => x
+(Select1 (CALLudiv _ (MOVWconst [1]))) => (MOVWconst [0])
+(Select0 (CALLudiv x (MOVWconst [c]))) && isPowerOfTwo32(c) => (SRLconst [int32(log32(c))] x)
+(Select1 (CALLudiv x (MOVWconst [c]))) && isPowerOfTwo32(c) => (ANDconst [c-1] x)
+
+// constant comparisons
+(CMPconst (MOVWconst [x]) [y]) => (FlagConstant [subFlags32(x,y)])
+(CMNconst (MOVWconst [x]) [y]) => (FlagConstant [addFlags32(x,y)])
+(TSTconst (MOVWconst [x]) [y]) => (FlagConstant [logicFlags32(x&y)])
+(TEQconst (MOVWconst [x]) [y]) => (FlagConstant [logicFlags32(x^y)])
+
+// other known comparisons
+(CMPconst (MOVBUreg _) [c]) && 0xff < c => (FlagConstant [subFlags32(0, 1)])
+(CMPconst (MOVHUreg _) [c]) && 0xffff < c => (FlagConstant [subFlags32(0, 1)])
+(CMPconst (ANDconst _ [m]) [n]) && 0 <= m && m < n => (FlagConstant [subFlags32(0, 1)])
+(CMPconst (SRLconst _ [c]) [n]) && 0 <= n && 0 < c && c <= 32 && (1<<uint32(32-c)) <= uint32(n) => (FlagConstant [subFlags32(0, 1)])
+
+// absorb flag constants into branches
+(EQ (FlagConstant [fc]) yes no) && fc.eq() => (First yes no)
+(EQ (FlagConstant [fc]) yes no) && !fc.eq() => (First no yes)
+
+(NE (FlagConstant [fc]) yes no) && fc.ne() => (First yes no)
+(NE (FlagConstant [fc]) yes no) && !fc.ne() => (First no yes)
+
+(LT (FlagConstant [fc]) yes no) && fc.lt() => (First yes no)
+(LT (FlagConstant [fc]) yes no) && !fc.lt() => (First no yes)
+
+(LE (FlagConstant [fc]) yes no) && fc.le() => (First yes no)
+(LE (FlagConstant [fc]) yes no) && !fc.le() => (First no yes)
+
+(GT (FlagConstant [fc]) yes no) && fc.gt() => (First yes no)
+(GT (FlagConstant [fc]) yes no) && !fc.gt() => (First no yes)
+
+(GE (FlagConstant [fc]) yes no) && fc.ge() => (First yes no)
+(GE (FlagConstant [fc]) yes no) && !fc.ge() => (First no yes)
+
+(ULT (FlagConstant [fc]) yes no) && fc.ult() => (First yes no)
+(ULT (FlagConstant [fc]) yes no) && !fc.ult() => (First no yes)
+
+(ULE (FlagConstant [fc]) yes no) && fc.ule() => (First yes no)
+(ULE (FlagConstant [fc]) yes no) && !fc.ule() => (First no yes)
+
+(UGT (FlagConstant [fc]) yes no) && fc.ugt() => (First yes no)
+(UGT (FlagConstant [fc]) yes no) && !fc.ugt() => (First no yes)
+
+(UGE (FlagConstant [fc]) yes no) && fc.uge() => (First yes no)
+(UGE (FlagConstant [fc]) yes no) && !fc.uge() => (First no yes)
+
+(LTnoov (FlagConstant [fc]) yes no) && fc.ltNoov() => (First yes no)
+(LTnoov (FlagConstant [fc]) yes no) && !fc.ltNoov() => (First no yes)
+
+(LEnoov (FlagConstant [fc]) yes no) && fc.leNoov() => (First yes no)
+(LEnoov (FlagConstant [fc]) yes no) && !fc.leNoov() => (First no yes)
+
+(GTnoov (FlagConstant [fc]) yes no) && fc.gtNoov() => (First yes no)
+(GTnoov (FlagConstant [fc]) yes no) && !fc.gtNoov() => (First no yes)
+
+(GEnoov (FlagConstant [fc]) yes no) && fc.geNoov() => (First yes no)
+(GEnoov (FlagConstant [fc]) yes no) && !fc.geNoov() => (First no yes)
+
+// absorb InvertFlags into branches
+(LT (InvertFlags cmp) yes no) => (GT cmp yes no)
+(GT (InvertFlags cmp) yes no) => (LT cmp yes no)
+(LE (InvertFlags cmp) yes no) => (GE cmp yes no)
+(GE (InvertFlags cmp) yes no) => (LE cmp yes no)
+(ULT (InvertFlags cmp) yes no) => (UGT cmp yes no)
+(UGT (InvertFlags cmp) yes no) => (ULT cmp yes no)
+(ULE (InvertFlags cmp) yes no) => (UGE cmp yes no)
+(UGE (InvertFlags cmp) yes no) => (ULE cmp yes no)
+(EQ (InvertFlags cmp) yes no) => (EQ cmp yes no)
+(NE (InvertFlags cmp) yes no) => (NE cmp yes no)
+(LTnoov (InvertFlags cmp) yes no) => (GTnoov cmp yes no)
+(GEnoov (InvertFlags cmp) yes no) => (LEnoov cmp yes no)
+(LEnoov (InvertFlags cmp) yes no) => (GEnoov cmp yes no)
+(GTnoov (InvertFlags cmp) yes no) => (LTnoov cmp yes no)
+
+// absorb flag constants into boolean values
+(Equal (FlagConstant [fc])) => (MOVWconst [b2i32(fc.eq())])
+(NotEqual (FlagConstant [fc])) => (MOVWconst [b2i32(fc.ne())])
+(LessThan (FlagConstant [fc])) => (MOVWconst [b2i32(fc.lt())])
+(LessThanU (FlagConstant [fc])) => (MOVWconst [b2i32(fc.ult())])
+(LessEqual (FlagConstant [fc])) => (MOVWconst [b2i32(fc.le())])
+(LessEqualU (FlagConstant [fc])) => (MOVWconst [b2i32(fc.ule())])
+(GreaterThan (FlagConstant [fc])) => (MOVWconst [b2i32(fc.gt())])
+(GreaterThanU (FlagConstant [fc])) => (MOVWconst [b2i32(fc.ugt())])
+(GreaterEqual (FlagConstant [fc])) => (MOVWconst [b2i32(fc.ge())])
+(GreaterEqualU (FlagConstant [fc])) => (MOVWconst [b2i32(fc.uge())])
+
+// absorb InvertFlags into boolean values
+(Equal (InvertFlags x)) => (Equal x)
+(NotEqual (InvertFlags x)) => (NotEqual x)
+(LessThan (InvertFlags x)) => (GreaterThan x)
+(LessThanU (InvertFlags x)) => (GreaterThanU x)
+(GreaterThan (InvertFlags x)) => (LessThan x)
+(GreaterThanU (InvertFlags x)) => (LessThanU x)
+(LessEqual (InvertFlags x)) => (GreaterEqual x)
+(LessEqualU (InvertFlags x)) => (GreaterEqualU x)
+(GreaterEqual (InvertFlags x)) => (LessEqual x)
+(GreaterEqualU (InvertFlags x)) => (LessEqualU x)
+
+// absorb flag constants into conditional instructions
+(CMOVWLSconst _ (FlagConstant [fc]) [c]) && fc.ule() => (MOVWconst [c])
+(CMOVWLSconst x (FlagConstant [fc]) [c]) && fc.ugt() => x
+
+(CMOVWHSconst _ (FlagConstant [fc]) [c]) && fc.uge() => (MOVWconst [c])
+(CMOVWHSconst x (FlagConstant [fc]) [c]) && fc.ult() => x
+
+(CMOVWLSconst x (InvertFlags flags) [c]) => (CMOVWHSconst x flags [c])
+(CMOVWHSconst x (InvertFlags flags) [c]) => (CMOVWLSconst x flags [c])
+
+(SRAcond x _ (FlagConstant [fc])) && fc.uge() => (SRAconst x [31])
+(SRAcond x y (FlagConstant [fc])) && fc.ult() => (SRA x y)
+
+// remove redundant *const ops
+(ADDconst [0] x) => x
+(SUBconst [0] x) => x
+(ANDconst [0] _) => (MOVWconst [0])
+(ANDconst [c] x) && int32(c)==-1 => x
+(ORconst [0] x) => x
+(ORconst [c] _) && int32(c)==-1 => (MOVWconst [-1])
+(XORconst [0] x) => x
+(BICconst [0] x) => x
+(BICconst [c] _) && int32(c)==-1 => (MOVWconst [0])
+
+// generic constant folding
+(ADDconst [c] x) && !isARMImmRot(uint32(c)) && isARMImmRot(uint32(-c)) => (SUBconst [-c] x)
+(SUBconst [c] x) && !isARMImmRot(uint32(c)) && isARMImmRot(uint32(-c)) => (ADDconst [-c] x)
+(ANDconst [c] x) && !isARMImmRot(uint32(c)) && isARMImmRot(^uint32(c)) => (BICconst [int32(^uint32(c))] x)
+(BICconst [c] x) && !isARMImmRot(uint32(c)) && isARMImmRot(^uint32(c)) => (ANDconst [int32(^uint32(c))] x)
+(ADDconst [c] x) && buildcfg.GOARM==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && uint32(-c)<=0xffff => (SUBconst [-c] x)
+(SUBconst [c] x) && buildcfg.GOARM==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && uint32(-c)<=0xffff => (ADDconst [-c] x)
+(ANDconst [c] x) && buildcfg.GOARM==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && ^uint32(c)<=0xffff => (BICconst [int32(^uint32(c))] x)
+(BICconst [c] x) && buildcfg.GOARM==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && ^uint32(c)<=0xffff => (ANDconst [int32(^uint32(c))] x)
+(ADDconst [c] (MOVWconst [d])) => (MOVWconst [c+d])
+(ADDconst [c] (ADDconst [d] x)) => (ADDconst [c+d] x)
+(ADDconst [c] (SUBconst [d] x)) => (ADDconst [c-d] x)
+(ADDconst [c] (RSBconst [d] x)) => (RSBconst [c+d] x)
+(ADCconst [c] (ADDconst [d] x) flags) => (ADCconst [c+d] x flags)
+(ADCconst [c] (SUBconst [d] x) flags) => (ADCconst [c-d] x flags)
+(SUBconst [c] (MOVWconst [d])) => (MOVWconst [d-c])
+(SUBconst [c] (SUBconst [d] x)) => (ADDconst [-c-d] x)
+(SUBconst [c] (ADDconst [d] x)) => (ADDconst [-c+d] x)
+(SUBconst [c] (RSBconst [d] x)) => (RSBconst [-c+d] x)
+(SBCconst [c] (ADDconst [d] x) flags) => (SBCconst [c-d] x flags)
+(SBCconst [c] (SUBconst [d] x) flags) => (SBCconst [c+d] x flags)
+(RSBconst [c] (MOVWconst [d])) => (MOVWconst [c-d])
+(RSBconst [c] (RSBconst [d] x)) => (ADDconst [c-d] x)
+(RSBconst [c] (ADDconst [d] x)) => (RSBconst [c-d] x)
+(RSBconst [c] (SUBconst [d] x)) => (RSBconst [c+d] x)
+(RSCconst [c] (ADDconst [d] x) flags) => (RSCconst [c-d] x flags)
+(RSCconst [c] (SUBconst [d] x) flags) => (RSCconst [c+d] x flags)
+(SLLconst [c] (MOVWconst [d])) => (MOVWconst [d<<uint64(c)])
+(SRLconst [c] (MOVWconst [d])) => (MOVWconst [int32(uint32(d)>>uint64(c))])
+(SRAconst [c] (MOVWconst [d])) => (MOVWconst [d>>uint64(c)])
+(MUL (MOVWconst [c]) (MOVWconst [d])) => (MOVWconst [c*d])
+(MULA (MOVWconst [c]) (MOVWconst [d]) a) => (ADDconst [c*d] a)
+(MULS (MOVWconst [c]) (MOVWconst [d]) a) => (SUBconst [c*d] a)
+(Select0 (CALLudiv (MOVWconst [c]) (MOVWconst [d]))) && d != 0 => (MOVWconst [int32(uint32(c)/uint32(d))])
+(Select1 (CALLudiv (MOVWconst [c]) (MOVWconst [d]))) && d != 0 => (MOVWconst [int32(uint32(c)%uint32(d))])
+(ANDconst [c] (MOVWconst [d])) => (MOVWconst [c&d])
+(ANDconst [c] (ANDconst [d] x)) => (ANDconst [c&d] x)
+(ORconst [c] (MOVWconst [d])) => (MOVWconst [c|d])
+(ORconst [c] (ORconst [d] x)) => (ORconst [c|d] x)
+(XORconst [c] (MOVWconst [d])) => (MOVWconst [c^d])
+(XORconst [c] (XORconst [d] x)) => (XORconst [c^d] x)
+(BICconst [c] (MOVWconst [d])) => (MOVWconst [d&^c])
+(BICconst [c] (BICconst [d] x)) => (BICconst [c|d] x)
+(MVN (MOVWconst [c])) => (MOVWconst [^c])
+(MOVBreg (MOVWconst [c])) => (MOVWconst [int32(int8(c))])
+(MOVBUreg (MOVWconst [c])) => (MOVWconst [int32(uint8(c))])
+(MOVHreg (MOVWconst [c])) => (MOVWconst [int32(int16(c))])
+(MOVHUreg (MOVWconst [c])) => (MOVWconst [int32(uint16(c))])
+(MOVWreg (MOVWconst [c])) => (MOVWconst [c])
+// BFX: Width = c >> 8, LSB = c & 0xff, result = d << (32 - Width - LSB) >> (32 - Width)
+(BFX [c] (MOVWconst [d])) => (MOVWconst [d<<(32-uint32(c&0xff)-uint32(c>>8))>>(32-uint32(c>>8))])
+(BFXU [c] (MOVWconst [d])) => (MOVWconst [int32(uint32(d)<<(32-uint32(c&0xff)-uint32(c>>8))>>(32-uint32(c>>8)))])
+
+// absorb shifts into ops
+(ADD x (SLLconst [c] y)) => (ADDshiftLL x y [c])
+(ADD x (SRLconst [c] y)) => (ADDshiftRL x y [c])
+(ADD x (SRAconst [c] y)) => (ADDshiftRA x y [c])
+(ADD x (SLL y z)) => (ADDshiftLLreg x y z)
+(ADD x (SRL y z)) => (ADDshiftRLreg x y z)
+(ADD x (SRA y z)) => (ADDshiftRAreg x y z)
+(ADC x (SLLconst [c] y) flags) => (ADCshiftLL x y [c] flags)
+(ADC x (SRLconst [c] y) flags) => (ADCshiftRL x y [c] flags)
+(ADC x (SRAconst [c] y) flags) => (ADCshiftRA x y [c] flags)
+(ADC x (SLL y z) flags) => (ADCshiftLLreg x y z flags)
+(ADC x (SRL y z) flags) => (ADCshiftRLreg x y z flags)
+(ADC x (SRA y z) flags) => (ADCshiftRAreg x y z flags)
+(ADDS x (SLLconst [c] y)) => (ADDSshiftLL x y [c])
+(ADDS x (SRLconst [c] y)) => (ADDSshiftRL x y [c])
+(ADDS x (SRAconst [c] y)) => (ADDSshiftRA x y [c])
+(ADDS x (SLL y z)) => (ADDSshiftLLreg x y z)
+(ADDS x (SRL y z)) => (ADDSshiftRLreg x y z)
+(ADDS x (SRA y z)) => (ADDSshiftRAreg x y z)
+(SUB x (SLLconst [c] y)) => (SUBshiftLL x y [c])
+(SUB (SLLconst [c] y) x) => (RSBshiftLL x y [c])
+(SUB x (SRLconst [c] y)) => (SUBshiftRL x y [c])
+(SUB (SRLconst [c] y) x) => (RSBshiftRL x y [c])
+(SUB x (SRAconst [c] y)) => (SUBshiftRA x y [c])
+(SUB (SRAconst [c] y) x) => (RSBshiftRA x y [c])
+(SUB x (SLL y z)) => (SUBshiftLLreg x y z)
+(SUB (SLL y z) x) => (RSBshiftLLreg x y z)
+(SUB x (SRL y z)) => (SUBshiftRLreg x y z)
+(SUB (SRL y z) x) => (RSBshiftRLreg x y z)
+(SUB x (SRA y z)) => (SUBshiftRAreg x y z)
+(SUB (SRA y z) x) => (RSBshiftRAreg x y z)
+(SBC x (SLLconst [c] y) flags) => (SBCshiftLL x y [c] flags)
+(SBC (SLLconst [c] y) x flags) => (RSCshiftLL x y [c] flags)
+(SBC x (SRLconst [c] y) flags) => (SBCshiftRL x y [c] flags)
+(SBC (SRLconst [c] y) x flags) => (RSCshiftRL x y [c] flags)
+(SBC x (SRAconst [c] y) flags) => (SBCshiftRA x y [c] flags)
+(SBC (SRAconst [c] y) x flags) => (RSCshiftRA x y [c] flags)
+(SBC x (SLL y z) flags) => (SBCshiftLLreg x y z flags)
+(SBC (SLL y z) x flags) => (RSCshiftLLreg x y z flags)
+(SBC x (SRL y z) flags) => (SBCshiftRLreg x y z flags)
+(SBC (SRL y z) x flags) => (RSCshiftRLreg x y z flags)
+(SBC x (SRA y z) flags) => (SBCshiftRAreg x y z flags)
+(SBC (SRA y z) x flags) => (RSCshiftRAreg x y z flags)
+(SUBS x (SLLconst [c] y)) => (SUBSshiftLL x y [c])
+(SUBS (SLLconst [c] y) x) => (RSBSshiftLL x y [c])
+(SUBS x (SRLconst [c] y)) => (SUBSshiftRL x y [c])
+(SUBS (SRLconst [c] y) x) => (RSBSshiftRL x y [c])
+(SUBS x (SRAconst [c] y)) => (SUBSshiftRA x y [c])
+(SUBS (SRAconst [c] y) x) => (RSBSshiftRA x y [c])
+(SUBS x (SLL y z)) => (SUBSshiftLLreg x y z)
+(SUBS (SLL y z) x) => (RSBSshiftLLreg x y z)
+(SUBS x (SRL y z)) => (SUBSshiftRLreg x y z)
+(SUBS (SRL y z) x) => (RSBSshiftRLreg x y z)
+(SUBS x (SRA y z)) => (SUBSshiftRAreg x y z)
+(SUBS (SRA y z) x) => (RSBSshiftRAreg x y z)
+(RSB x (SLLconst [c] y)) => (RSBshiftLL x y [c])
+(RSB (SLLconst [c] y) x) => (SUBshiftLL x y [c])
+(RSB x (SRLconst [c] y)) => (RSBshiftRL x y [c])
+(RSB (SRLconst [c] y) x) => (SUBshiftRL x y [c])
+(RSB x (SRAconst [c] y)) => (RSBshiftRA x y [c])
+(RSB (SRAconst [c] y) x) => (SUBshiftRA x y [c])
+(RSB x (SLL y z)) => (RSBshiftLLreg x y z)
+(RSB (SLL y z) x) => (SUBshiftLLreg x y z)
+(RSB x (SRL y z)) => (RSBshiftRLreg x y z)
+(RSB (SRL y z) x) => (SUBshiftRLreg x y z)
+(RSB x (SRA y z)) => (RSBshiftRAreg x y z)
+(RSB (SRA y z) x) => (SUBshiftRAreg x y z)
+(AND x (SLLconst [c] y)) => (ANDshiftLL x y [c])
+(AND x (SRLconst [c] y)) => (ANDshiftRL x y [c])
+(AND x (SRAconst [c] y)) => (ANDshiftRA x y [c])
+(AND x (SLL y z)) => (ANDshiftLLreg x y z)
+(AND x (SRL y z)) => (ANDshiftRLreg x y z)
+(AND x (SRA y z)) => (ANDshiftRAreg x y z)
+(OR x (SLLconst [c] y)) => (ORshiftLL x y [c])
+(OR x (SRLconst [c] y)) => (ORshiftRL x y [c])
+(OR x (SRAconst [c] y)) => (ORshiftRA x y [c])
+(OR x (SLL y z)) => (ORshiftLLreg x y z)
+(OR x (SRL y z)) => (ORshiftRLreg x y z)
+(OR x (SRA y z)) => (ORshiftRAreg x y z)
+(XOR x (SLLconst [c] y)) => (XORshiftLL x y [c])
+(XOR x (SRLconst [c] y)) => (XORshiftRL x y [c])
+(XOR x (SRAconst [c] y)) => (XORshiftRA x y [c])
+(XOR x (SRRconst [c] y)) => (XORshiftRR x y [c])
+(XOR x (SLL y z)) => (XORshiftLLreg x y z)
+(XOR x (SRL y z)) => (XORshiftRLreg x y z)
+(XOR x (SRA y z)) => (XORshiftRAreg x y z)
+(BIC x (SLLconst [c] y)) => (BICshiftLL x y [c])
+(BIC x (SRLconst [c] y)) => (BICshiftRL x y [c])
+(BIC x (SRAconst [c] y)) => (BICshiftRA x y [c])
+(BIC x (SLL y z)) => (BICshiftLLreg x y z)
+(BIC x (SRL y z)) => (BICshiftRLreg x y z)
+(BIC x (SRA y z)) => (BICshiftRAreg x y z)
+(MVN (SLLconst [c] x)) => (MVNshiftLL x [c])
+(MVN (SRLconst [c] x)) => (MVNshiftRL x [c])
+(MVN (SRAconst [c] x)) => (MVNshiftRA x [c])
+(MVN (SLL x y)) => (MVNshiftLLreg x y)
+(MVN (SRL x y)) => (MVNshiftRLreg x y)
+(MVN (SRA x y)) => (MVNshiftRAreg x y)
+
+(CMP x (SLLconst [c] y)) => (CMPshiftLL x y [c])
+(CMP (SLLconst [c] y) x) => (InvertFlags (CMPshiftLL x y [c]))
+(CMP x (SRLconst [c] y)) => (CMPshiftRL x y [c])
+(CMP (SRLconst [c] y) x) => (InvertFlags (CMPshiftRL x y [c]))
+(CMP x (SRAconst [c] y)) => (CMPshiftRA x y [c])
+(CMP (SRAconst [c] y) x) => (InvertFlags (CMPshiftRA x y [c]))
+(CMP x (SLL y z)) => (CMPshiftLLreg x y z)
+(CMP (SLL y z) x) => (InvertFlags (CMPshiftLLreg x y z))
+(CMP x (SRL y z)) => (CMPshiftRLreg x y z)
+(CMP (SRL y z) x) => (InvertFlags (CMPshiftRLreg x y z))
+(CMP x (SRA y z)) => (CMPshiftRAreg x y z)
+(CMP (SRA y z) x) => (InvertFlags (CMPshiftRAreg x y z))
+(TST x (SLLconst [c] y)) => (TSTshiftLL x y [c])
+(TST x (SRLconst [c] y)) => (TSTshiftRL x y [c])
+(TST x (SRAconst [c] y)) => (TSTshiftRA x y [c])
+(TST x (SLL y z)) => (TSTshiftLLreg x y z)
+(TST x (SRL y z)) => (TSTshiftRLreg x y z)
+(TST x (SRA y z)) => (TSTshiftRAreg x y z)
+(TEQ x (SLLconst [c] y)) => (TEQshiftLL x y [c])
+(TEQ x (SRLconst [c] y)) => (TEQshiftRL x y [c])
+(TEQ x (SRAconst [c] y)) => (TEQshiftRA x y [c])
+(TEQ x (SLL y z)) => (TEQshiftLLreg x y z)
+(TEQ x (SRL y z)) => (TEQshiftRLreg x y z)
+(TEQ x (SRA y z)) => (TEQshiftRAreg x y z)
+(CMN x (SLLconst [c] y)) => (CMNshiftLL x y [c])
+(CMN x (SRLconst [c] y)) => (CMNshiftRL x y [c])
+(CMN x (SRAconst [c] y)) => (CMNshiftRA x y [c])
+(CMN x (SLL y z)) => (CMNshiftLLreg x y z)
+(CMN x (SRL y z)) => (CMNshiftRLreg x y z)
+(CMN x (SRA y z)) => (CMNshiftRAreg x y z)
+
+// prefer *const ops to *shift ops
+(ADDshiftLL (MOVWconst [c]) x [d]) => (ADDconst [c] (SLLconst <x.Type> x [d]))
+(ADDshiftRL (MOVWconst [c]) x [d]) => (ADDconst [c] (SRLconst <x.Type> x [d]))
+(ADDshiftRA (MOVWconst [c]) x [d]) => (ADDconst [c] (SRAconst <x.Type> x [d]))
+(ADCshiftLL (MOVWconst [c]) x [d] flags) => (ADCconst [c] (SLLconst <x.Type> x [d]) flags)
+(ADCshiftRL (MOVWconst [c]) x [d] flags) => (ADCconst [c] (SRLconst <x.Type> x [d]) flags)
+(ADCshiftRA (MOVWconst [c]) x [d] flags) => (ADCconst [c] (SRAconst <x.Type> x [d]) flags)
+(ADDSshiftLL (MOVWconst [c]) x [d]) => (ADDSconst [c] (SLLconst <x.Type> x [d]))
+(ADDSshiftRL (MOVWconst [c]) x [d]) => (ADDSconst [c] (SRLconst <x.Type> x [d]))
+(ADDSshiftRA (MOVWconst [c]) x [d]) => (ADDSconst [c] (SRAconst <x.Type> x [d]))
+(SUBshiftLL (MOVWconst [c]) x [d]) => (RSBconst [c] (SLLconst <x.Type> x [d]))
+(SUBshiftRL (MOVWconst [c]) x [d]) => (RSBconst [c] (SRLconst <x.Type> x [d]))
+(SUBshiftRA (MOVWconst [c]) x [d]) => (RSBconst [c] (SRAconst <x.Type> x [d]))
+(SBCshiftLL (MOVWconst [c]) x [d] flags) => (RSCconst [c] (SLLconst <x.Type> x [d]) flags)
+(SBCshiftRL (MOVWconst [c]) x [d] flags) => (RSCconst [c] (SRLconst <x.Type> x [d]) flags)
+(SBCshiftRA (MOVWconst [c]) x [d] flags) => (RSCconst [c] (SRAconst <x.Type> x [d]) flags)
+(SUBSshiftLL (MOVWconst [c]) x [d]) => (RSBSconst [c] (SLLconst <x.Type> x [d]))
+(SUBSshiftRL (MOVWconst [c]) x [d]) => (RSBSconst [c] (SRLconst <x.Type> x [d]))
+(SUBSshiftRA (MOVWconst [c]) x [d]) => (RSBSconst [c] (SRAconst <x.Type> x [d]))
+(RSBshiftLL (MOVWconst [c]) x [d]) => (SUBconst [c] (SLLconst <x.Type> x [d]))
+(RSBshiftRL (MOVWconst [c]) x [d]) => (SUBconst [c] (SRLconst <x.Type> x [d]))
+(RSBshiftRA (MOVWconst [c]) x [d]) => (SUBconst [c] (SRAconst <x.Type> x [d]))
+(RSCshiftLL (MOVWconst [c]) x [d] flags) => (SBCconst [c] (SLLconst <x.Type> x [d]) flags)
+(RSCshiftRL (MOVWconst [c]) x [d] flags) => (SBCconst [c] (SRLconst <x.Type> x [d]) flags)
+(RSCshiftRA (MOVWconst [c]) x [d] flags) => (SBCconst [c] (SRAconst <x.Type> x [d]) flags)
+(RSBSshiftLL (MOVWconst [c]) x [d]) => (SUBSconst [c] (SLLconst <x.Type> x [d]))
+(RSBSshiftRL (MOVWconst [c]) x [d]) => (SUBSconst [c] (SRLconst <x.Type> x [d]))
+(RSBSshiftRA (MOVWconst [c]) x [d]) => (SUBSconst [c] (SRAconst <x.Type> x [d]))
+(ANDshiftLL (MOVWconst [c]) x [d]) => (ANDconst [c] (SLLconst <x.Type> x [d]))
+(ANDshiftRL (MOVWconst [c]) x [d]) => (ANDconst [c] (SRLconst <x.Type> x [d]))
+(ANDshiftRA (MOVWconst [c]) x [d]) => (ANDconst [c] (SRAconst <x.Type> x [d]))
+(ORshiftLL (MOVWconst [c]) x [d]) => (ORconst [c] (SLLconst <x.Type> x [d]))
+(ORshiftRL (MOVWconst [c]) x [d]) => (ORconst [c] (SRLconst <x.Type> x [d]))
+(ORshiftRA (MOVWconst [c]) x [d]) => (ORconst [c] (SRAconst <x.Type> x [d]))
+(XORshiftLL (MOVWconst [c]) x [d]) => (XORconst [c] (SLLconst <x.Type> x [d]))
+(XORshiftRL (MOVWconst [c]) x [d]) => (XORconst [c] (SRLconst <x.Type> x [d]))
+(XORshiftRA (MOVWconst [c]) x [d]) => (XORconst [c] (SRAconst <x.Type> x [d]))
+(XORshiftRR (MOVWconst [c]) x [d]) => (XORconst [c] (SRRconst <x.Type> x [d]))
+(CMPshiftLL (MOVWconst [c]) x [d]) => (InvertFlags (CMPconst [c] (SLLconst <x.Type> x [d])))
+(CMPshiftRL (MOVWconst [c]) x [d]) => (InvertFlags (CMPconst [c] (SRLconst <x.Type> x [d])))
+(CMPshiftRA (MOVWconst [c]) x [d]) => (InvertFlags (CMPconst [c] (SRAconst <x.Type> x [d])))
+(TSTshiftLL (MOVWconst [c]) x [d]) => (TSTconst [c] (SLLconst <x.Type> x [d]))
+(TSTshiftRL (MOVWconst [c]) x [d]) => (TSTconst [c] (SRLconst <x.Type> x [d]))
+(TSTshiftRA (MOVWconst [c]) x [d]) => (TSTconst [c] (SRAconst <x.Type> x [d]))
+(TEQshiftLL (MOVWconst [c]) x [d]) => (TEQconst [c] (SLLconst <x.Type> x [d]))
+(TEQshiftRL (MOVWconst [c]) x [d]) => (TEQconst [c] (SRLconst <x.Type> x [d]))
+(TEQshiftRA (MOVWconst [c]) x [d]) => (TEQconst [c] (SRAconst <x.Type> x [d]))
+(CMNshiftLL (MOVWconst [c]) x [d]) => (CMNconst [c] (SLLconst <x.Type> x [d]))
+(CMNshiftRL (MOVWconst [c]) x [d]) => (CMNconst [c] (SRLconst <x.Type> x [d]))
+(CMNshiftRA (MOVWconst [c]) x [d]) => (CMNconst [c] (SRAconst <x.Type> x [d]))
+
+(ADDshiftLLreg (MOVWconst [c]) x y) => (ADDconst [c] (SLL <x.Type> x y))
+(ADDshiftRLreg (MOVWconst [c]) x y) => (ADDconst [c] (SRL <x.Type> x y))
+(ADDshiftRAreg (MOVWconst [c]) x y) => (ADDconst [c] (SRA <x.Type> x y))
+(ADCshiftLLreg (MOVWconst [c]) x y flags) => (ADCconst [c] (SLL <x.Type> x y) flags)
+(ADCshiftRLreg (MOVWconst [c]) x y flags) => (ADCconst [c] (SRL <x.Type> x y) flags)
+(ADCshiftRAreg (MOVWconst [c]) x y flags) => (ADCconst [c] (SRA <x.Type> x y) flags)
+(ADDSshiftLLreg (MOVWconst [c]) x y) => (ADDSconst [c] (SLL <x.Type> x y))
+(ADDSshiftRLreg (MOVWconst [c]) x y) => (ADDSconst [c] (SRL <x.Type> x y))
+(ADDSshiftRAreg (MOVWconst [c]) x y) => (ADDSconst [c] (SRA <x.Type> x y))
+(SUBshiftLLreg (MOVWconst [c]) x y) => (RSBconst [c] (SLL <x.Type> x y))
+(SUBshiftRLreg (MOVWconst [c]) x y) => (RSBconst [c] (SRL <x.Type> x y))
+(SUBshiftRAreg (MOVWconst [c]) x y) => (RSBconst [c] (SRA <x.Type> x y))
+(SBCshiftLLreg (MOVWconst [c]) x y flags) => (RSCconst [c] (SLL <x.Type> x y) flags)
+(SBCshiftRLreg (MOVWconst [c]) x y flags) => (RSCconst [c] (SRL <x.Type> x y) flags)
+(SBCshiftRAreg (MOVWconst [c]) x y flags) => (RSCconst [c] (SRA <x.Type> x y) flags)
+(SUBSshiftLLreg (MOVWconst [c]) x y) => (RSBSconst [c] (SLL <x.Type> x y))
+(SUBSshiftRLreg (MOVWconst [c]) x y) => (RSBSconst [c] (SRL <x.Type> x y))
+(SUBSshiftRAreg (MOVWconst [c]) x y) => (RSBSconst [c] (SRA <x.Type> x y))
+(RSBshiftLLreg (MOVWconst [c]) x y) => (SUBconst [c] (SLL <x.Type> x y))
+(RSBshiftRLreg (MOVWconst [c]) x y) => (SUBconst [c] (SRL <x.Type> x y))
+(RSBshiftRAreg (MOVWconst [c]) x y) => (SUBconst [c] (SRA <x.Type> x y))
+(RSCshiftLLreg (MOVWconst [c]) x y flags) => (SBCconst [c] (SLL <x.Type> x y) flags)
+(RSCshiftRLreg (MOVWconst [c]) x y flags) => (SBCconst [c] (SRL <x.Type> x y) flags)
+(RSCshiftRAreg (MOVWconst [c]) x y flags) => (SBCconst [c] (SRA <x.Type> x y) flags)
+(RSBSshiftLLreg (MOVWconst [c]) x y) => (SUBSconst [c] (SLL <x.Type> x y))
+(RSBSshiftRLreg (MOVWconst [c]) x y) => (SUBSconst [c] (SRL <x.Type> x y))
+(RSBSshiftRAreg (MOVWconst [c]) x y) => (SUBSconst [c] (SRA <x.Type> x y))
+(ANDshiftLLreg (MOVWconst [c]) x y) => (ANDconst [c] (SLL <x.Type> x y))
+(ANDshiftRLreg (MOVWconst [c]) x y) => (ANDconst [c] (SRL <x.Type> x y))
+(ANDshiftRAreg (MOVWconst [c]) x y) => (ANDconst [c] (SRA <x.Type> x y))
+(ORshiftLLreg (MOVWconst [c]) x y) => (ORconst [c] (SLL <x.Type> x y))
+(ORshiftRLreg (MOVWconst [c]) x y) => (ORconst [c] (SRL <x.Type> x y))
+(ORshiftRAreg (MOVWconst [c]) x y) => (ORconst [c] (SRA <x.Type> x y))
+(XORshiftLLreg (MOVWconst [c]) x y) => (XORconst [c] (SLL <x.Type> x y))
+(XORshiftRLreg (MOVWconst [c]) x y) => (XORconst [c] (SRL <x.Type> x y))
+(XORshiftRAreg (MOVWconst [c]) x y) => (XORconst [c] (SRA <x.Type> x y))
+(CMPshiftLLreg (MOVWconst [c]) x y) => (InvertFlags (CMPconst [c] (SLL <x.Type> x y)))
+(CMPshiftRLreg (MOVWconst [c]) x y) => (InvertFlags (CMPconst [c] (SRL <x.Type> x y)))
+(CMPshiftRAreg (MOVWconst [c]) x y) => (InvertFlags (CMPconst [c] (SRA <x.Type> x y)))
+(TSTshiftLLreg (MOVWconst [c]) x y) => (TSTconst [c] (SLL <x.Type> x y))
+(TSTshiftRLreg (MOVWconst [c]) x y) => (TSTconst [c] (SRL <x.Type> x y))
+(TSTshiftRAreg (MOVWconst [c]) x y) => (TSTconst [c] (SRA <x.Type> x y))
+(TEQshiftLLreg (MOVWconst [c]) x y) => (TEQconst [c] (SLL <x.Type> x y))
+(TEQshiftRLreg (MOVWconst [c]) x y) => (TEQconst [c] (SRL <x.Type> x y))
+(TEQshiftRAreg (MOVWconst [c]) x y) => (TEQconst [c] (SRA <x.Type> x y))
+(CMNshiftLLreg (MOVWconst [c]) x y) => (CMNconst [c] (SLL <x.Type> x y))
+(CMNshiftRLreg (MOVWconst [c]) x y) => (CMNconst [c] (SRL <x.Type> x y))
+(CMNshiftRAreg (MOVWconst [c]) x y) => (CMNconst [c] (SRA <x.Type> x y))
+
+// constant folding in *shift ops
+(ADDshiftLL x (MOVWconst [c]) [d]) => (ADDconst x [c<<uint64(d)])
+(ADDshiftRL x (MOVWconst [c]) [d]) => (ADDconst x [int32(uint32(c)>>uint64(d))])
+(ADDshiftRA x (MOVWconst [c]) [d]) => (ADDconst x [c>>uint64(d)])
+(ADCshiftLL x (MOVWconst [c]) [d] flags) => (ADCconst x [c<<uint64(d)] flags)
+(ADCshiftRL x (MOVWconst [c]) [d] flags) => (ADCconst x [int32(uint32(c)>>uint64(d))] flags)
+(ADCshiftRA x (MOVWconst [c]) [d] flags) => (ADCconst x [c>>uint64(d)] flags)
+(ADDSshiftLL x (MOVWconst [c]) [d]) => (ADDSconst x [c<<uint64(d)])
+(ADDSshiftRL x (MOVWconst [c]) [d]) => (ADDSconst x [int32(uint32(c)>>uint64(d))])
+(ADDSshiftRA x (MOVWconst [c]) [d]) => (ADDSconst x [c>>uint64(d)])
+(SUBshiftLL x (MOVWconst [c]) [d]) => (SUBconst x [c<<uint64(d)])
+(SUBshiftRL x (MOVWconst [c]) [d]) => (SUBconst x [int32(uint32(c)>>uint64(d))])
+(SUBshiftRA x (MOVWconst [c]) [d]) => (SUBconst x [c>>uint64(d)])
+(SBCshiftLL x (MOVWconst [c]) [d] flags) => (SBCconst x [c<<uint64(d)] flags)
+(SBCshiftRL x (MOVWconst [c]) [d] flags) => (SBCconst x [int32(uint32(c)>>uint64(d))] flags)
+(SBCshiftRA x (MOVWconst [c]) [d] flags) => (SBCconst x [c>>uint64(d)] flags)
+(SUBSshiftLL x (MOVWconst [c]) [d]) => (SUBSconst x [c<<uint64(d)])
+(SUBSshiftRL x (MOVWconst [c]) [d]) => (SUBSconst x [int32(uint32(c)>>uint64(d))])
+(SUBSshiftRA x (MOVWconst [c]) [d]) => (SUBSconst x [c>>uint64(d)])
+(RSBshiftLL x (MOVWconst [c]) [d]) => (RSBconst x [c<<uint64(d)])
+(RSBshiftRL x (MOVWconst [c]) [d]) => (RSBconst x [int32(uint32(c)>>uint64(d))])
+(RSBshiftRA x (MOVWconst [c]) [d]) => (RSBconst x [c>>uint64(d)])
+(RSCshiftLL x (MOVWconst [c]) [d] flags) => (RSCconst x [c<<uint64(d)] flags)
+(RSCshiftRL x (MOVWconst [c]) [d] flags) => (RSCconst x [int32(uint32(c)>>uint64(d))] flags)
+(RSCshiftRA x (MOVWconst [c]) [d] flags) => (RSCconst x [c>>uint64(d)] flags)
+(RSBSshiftLL x (MOVWconst [c]) [d]) => (RSBSconst x [c<<uint64(d)])
+(RSBSshiftRL x (MOVWconst [c]) [d]) => (RSBSconst x [int32(uint32(c)>>uint64(d))])
+(RSBSshiftRA x (MOVWconst [c]) [d]) => (RSBSconst x [c>>uint64(d)])
+(ANDshiftLL x (MOVWconst [c]) [d]) => (ANDconst x [c<<uint64(d)])
+(ANDshiftRL x (MOVWconst [c]) [d]) => (ANDconst x [int32(uint32(c)>>uint64(d))])
+(ANDshiftRA x (MOVWconst [c]) [d]) => (ANDconst x [c>>uint64(d)])
+(ORshiftLL x (MOVWconst [c]) [d]) => (ORconst x [c<<uint64(d)])
+(ORshiftRL x (MOVWconst [c]) [d]) => (ORconst x [int32(uint32(c)>>uint64(d))])
+(ORshiftRA x (MOVWconst [c]) [d]) => (ORconst x [c>>uint64(d)])
+(XORshiftLL x (MOVWconst [c]) [d]) => (XORconst x [c<<uint64(d)])
+(XORshiftRL x (MOVWconst [c]) [d]) => (XORconst x [int32(uint32(c)>>uint64(d))])
+(XORshiftRA x (MOVWconst [c]) [d]) => (XORconst x [c>>uint64(d)])
+(XORshiftRR x (MOVWconst [c]) [d]) => (XORconst x [int32(uint32(c)>>uint64(d)|uint32(c)<<uint64(32-d))])
+(BICshiftLL x (MOVWconst [c]) [d]) => (BICconst x [c<<uint64(d)])
+(BICshiftRL x (MOVWconst [c]) [d]) => (BICconst x [int32(uint32(c)>>uint64(d))])
+(BICshiftRA x (MOVWconst [c]) [d]) => (BICconst x [c>>uint64(d)])
+(MVNshiftLL (MOVWconst [c]) [d]) => (MOVWconst [^(c<<uint64(d))])
+(MVNshiftRL (MOVWconst [c]) [d]) => (MOVWconst [^int32(uint32(c)>>uint64(d))])
+(MVNshiftRA (MOVWconst [c]) [d]) => (MOVWconst [int32(c)>>uint64(d)])
+(CMPshiftLL x (MOVWconst [c]) [d]) => (CMPconst x [c<<uint64(d)])
+(CMPshiftRL x (MOVWconst [c]) [d]) => (CMPconst x [int32(uint32(c)>>uint64(d))])
+(CMPshiftRA x (MOVWconst [c]) [d]) => (CMPconst x [c>>uint64(d)])
+(TSTshiftLL x (MOVWconst [c]) [d]) => (TSTconst x [c<<uint64(d)])
+(TSTshiftRL x (MOVWconst [c]) [d]) => (TSTconst x [int32(uint32(c)>>uint64(d))])
+(TSTshiftRA x (MOVWconst [c]) [d]) => (TSTconst x [c>>uint64(d)])
+(TEQshiftLL x (MOVWconst [c]) [d]) => (TEQconst x [c<<uint64(d)])
+(TEQshiftRL x (MOVWconst [c]) [d]) => (TEQconst x [int32(uint32(c)>>uint64(d))])
+(TEQshiftRA x (MOVWconst [c]) [d]) => (TEQconst x [c>>uint64(d)])
+(CMNshiftLL x (MOVWconst [c]) [d]) => (CMNconst x [c<<uint64(d)])
+(CMNshiftRL x (MOVWconst [c]) [d]) => (CMNconst x [int32(uint32(c)>>uint64(d))])
+(CMNshiftRA x (MOVWconst [c]) [d]) => (CMNconst x [c>>uint64(d)])
+
+(ADDshiftLLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (ADDshiftLL x y [c])
+(ADDshiftRLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (ADDshiftRL x y [c])
+(ADDshiftRAreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (ADDshiftRA x y [c])
+(ADCshiftLLreg x y (MOVWconst [c]) flags) && 0 <= c && c < 32 => (ADCshiftLL x y [c] flags)
+(ADCshiftRLreg x y (MOVWconst [c]) flags) && 0 <= c && c < 32 => (ADCshiftRL x y [c] flags)
+(ADCshiftRAreg x y (MOVWconst [c]) flags) && 0 <= c && c < 32 => (ADCshiftRA x y [c] flags)
+(ADDSshiftLLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (ADDSshiftLL x y [c])
+(ADDSshiftRLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (ADDSshiftRL x y [c])
+(ADDSshiftRAreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (ADDSshiftRA x y [c])
+(SUBshiftLLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (SUBshiftLL x y [c])
+(SUBshiftRLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (SUBshiftRL x y [c])
+(SUBshiftRAreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (SUBshiftRA x y [c])
+(SBCshiftLLreg x y (MOVWconst [c]) flags) && 0 <= c && c < 32 => (SBCshiftLL x y [c] flags)
+(SBCshiftRLreg x y (MOVWconst [c]) flags) && 0 <= c && c < 32 => (SBCshiftRL x y [c] flags)
+(SBCshiftRAreg x y (MOVWconst [c]) flags) && 0 <= c && c < 32 => (SBCshiftRA x y [c] flags)
+(SUBSshiftLLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (SUBSshiftLL x y [c])
+(SUBSshiftRLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (SUBSshiftRL x y [c])
+(SUBSshiftRAreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (SUBSshiftRA x y [c])
+(RSBshiftLLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (RSBshiftLL x y [c])
+(RSBshiftRLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (RSBshiftRL x y [c])
+(RSBshiftRAreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (RSBshiftRA x y [c])
+(RSCshiftLLreg x y (MOVWconst [c]) flags) && 0 <= c && c < 32 => (RSCshiftLL x y [c] flags)
+(RSCshiftRLreg x y (MOVWconst [c]) flags) && 0 <= c && c < 32 => (RSCshiftRL x y [c] flags)
+(RSCshiftRAreg x y (MOVWconst [c]) flags) && 0 <= c && c < 32 => (RSCshiftRA x y [c] flags)
+(RSBSshiftLLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (RSBSshiftLL x y [c])
+(RSBSshiftRLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (RSBSshiftRL x y [c])
+(RSBSshiftRAreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (RSBSshiftRA x y [c])
+(ANDshiftLLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (ANDshiftLL x y [c])
+(ANDshiftRLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (ANDshiftRL x y [c])
+(ANDshiftRAreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (ANDshiftRA x y [c])
+(ORshiftLLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (ORshiftLL x y [c])
+(ORshiftRLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (ORshiftRL x y [c])
+(ORshiftRAreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (ORshiftRA x y [c])
+(XORshiftLLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (XORshiftLL x y [c])
+(XORshiftRLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (XORshiftRL x y [c])
+(XORshiftRAreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (XORshiftRA x y [c])
+(BICshiftLLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (BICshiftLL x y [c])
+(BICshiftRLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (BICshiftRL x y [c])
+(BICshiftRAreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (BICshiftRA x y [c])
+(MVNshiftLLreg x (MOVWconst [c])) && 0 <= c && c < 32 => (MVNshiftLL x [c])
+(MVNshiftRLreg x (MOVWconst [c])) && 0 <= c && c < 32 => (MVNshiftRL x [c])
+(MVNshiftRAreg x (MOVWconst [c])) && 0 <= c && c < 32 => (MVNshiftRA x [c])
+(CMPshiftLLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (CMPshiftLL x y [c])
+(CMPshiftRLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (CMPshiftRL x y [c])
+(CMPshiftRAreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (CMPshiftRA x y [c])
+(TSTshiftLLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (TSTshiftLL x y [c])
+(TSTshiftRLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (TSTshiftRL x y [c])
+(TSTshiftRAreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (TSTshiftRA x y [c])
+(TEQshiftLLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (TEQshiftLL x y [c])
+(TEQshiftRLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (TEQshiftRL x y [c])
+(TEQshiftRAreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (TEQshiftRA x y [c])
+(CMNshiftLLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (CMNshiftLL x y [c])
+(CMNshiftRLreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (CMNshiftRL x y [c])
+(CMNshiftRAreg x y (MOVWconst [c])) && 0 <= c && c < 32 => (CMNshiftRA x y [c])
+
+// Generate rotates
+(ADDshiftLL [c] (SRLconst x [32-c]) x) => (SRRconst [32-c] x)
+( ORshiftLL [c] (SRLconst x [32-c]) x) => (SRRconst [32-c] x)
+(XORshiftLL [c] (SRLconst x [32-c]) x) => (SRRconst [32-c] x)
+(ADDshiftRL [c] (SLLconst x [32-c]) x) => (SRRconst [ c] x)
+( ORshiftRL [c] (SLLconst x [32-c]) x) => (SRRconst [ c] x)
+(XORshiftRL [c] (SLLconst x [32-c]) x) => (SRRconst [ c] x)
+
+(RotateLeft16 <t> x (MOVWconst [c])) => (Or16 (Lsh16x32 <t> x (MOVWconst [c&15])) (Rsh16Ux32 <t> x (MOVWconst [-c&15])))
+(RotateLeft8 <t> x (MOVWconst [c])) => (Or8 (Lsh8x32 <t> x (MOVWconst [c&7])) (Rsh8Ux32 <t> x (MOVWconst [-c&7])))
+(RotateLeft32 x y) => (SRR x (RSBconst [0] <y.Type> y))
+
+// ((x>>8) | (x<<8)) -> (REV16 x), the type of x is uint16, "|" can also be "^" or "+".
+// UBFX instruction is supported by ARMv6T2, ARMv7 and above versions, REV16 is supported by
+// ARMv6 and above versions. So for ARMv6, we need to match SLLconst, SRLconst and ORshiftLL.
+((ADDshiftLL|ORshiftLL|XORshiftLL) <typ.UInt16> [8] (BFXU <typ.UInt16> [int32(armBFAuxInt(8, 8))] x) x) => (REV16 x)
+((ADDshiftLL|ORshiftLL|XORshiftLL) <typ.UInt16> [8] (SRLconst <typ.UInt16> [24] (SLLconst [16] x)) x) && buildcfg.GOARM>=6 => (REV16 x)
+
+// use indexed loads and stores
+(MOVWload [0] {sym} (ADD ptr idx) mem) && sym == nil => (MOVWloadidx ptr idx mem)
+(MOVWstore [0] {sym} (ADD ptr idx) val mem) && sym == nil => (MOVWstoreidx ptr idx val mem)
+(MOVWload [0] {sym} (ADDshiftLL ptr idx [c]) mem) && sym == nil => (MOVWloadshiftLL ptr idx [c] mem)
+(MOVWload [0] {sym} (ADDshiftRL ptr idx [c]) mem) && sym == nil => (MOVWloadshiftRL ptr idx [c] mem)
+(MOVWload [0] {sym} (ADDshiftRA ptr idx [c]) mem) && sym == nil => (MOVWloadshiftRA ptr idx [c] mem)
+(MOVWstore [0] {sym} (ADDshiftLL ptr idx [c]) val mem) && sym == nil => (MOVWstoreshiftLL ptr idx [c] val mem)
+(MOVWstore [0] {sym} (ADDshiftRL ptr idx [c]) val mem) && sym == nil => (MOVWstoreshiftRL ptr idx [c] val mem)
+(MOVWstore [0] {sym} (ADDshiftRA ptr idx [c]) val mem) && sym == nil => (MOVWstoreshiftRA ptr idx [c] val mem)
+(MOVBUload [0] {sym} (ADD ptr idx) mem) && sym == nil => (MOVBUloadidx ptr idx mem)
+(MOVBload [0] {sym} (ADD ptr idx) mem) && sym == nil => (MOVBloadidx ptr idx mem)
+(MOVBstore [0] {sym} (ADD ptr idx) val mem) && sym == nil => (MOVBstoreidx ptr idx val mem)
+(MOVHUload [0] {sym} (ADD ptr idx) mem) && sym == nil => (MOVHUloadidx ptr idx mem)
+(MOVHload [0] {sym} (ADD ptr idx) mem) && sym == nil => (MOVHloadidx ptr idx mem)
+(MOVHstore [0] {sym} (ADD ptr idx) val mem) && sym == nil => (MOVHstoreidx ptr idx val mem)
+
+// constant folding in indexed loads and stores
+(MOVWloadidx ptr (MOVWconst [c]) mem) => (MOVWload [c] ptr mem)
+(MOVWloadidx (MOVWconst [c]) ptr mem) => (MOVWload [c] ptr mem)
+(MOVBloadidx ptr (MOVWconst [c]) mem) => (MOVBload [c] ptr mem)
+(MOVBloadidx (MOVWconst [c]) ptr mem) => (MOVBload [c] ptr mem)
+(MOVBUloadidx ptr (MOVWconst [c]) mem) => (MOVBUload [c] ptr mem)
+(MOVBUloadidx (MOVWconst [c]) ptr mem) => (MOVBUload [c] ptr mem)
+(MOVHUloadidx ptr (MOVWconst [c]) mem) => (MOVHUload [c] ptr mem)
+(MOVHUloadidx (MOVWconst [c]) ptr mem) => (MOVHUload [c] ptr mem)
+(MOVHloadidx ptr (MOVWconst [c]) mem) => (MOVHload [c] ptr mem)
+(MOVHloadidx (MOVWconst [c]) ptr mem) => (MOVHload [c] ptr mem)
+
+(MOVWstoreidx ptr (MOVWconst [c]) val mem) => (MOVWstore [c] ptr val mem)
+(MOVWstoreidx (MOVWconst [c]) ptr val mem) => (MOVWstore [c] ptr val mem)
+(MOVBstoreidx ptr (MOVWconst [c]) val mem) => (MOVBstore [c] ptr val mem)
+(MOVBstoreidx (MOVWconst [c]) ptr val mem) => (MOVBstore [c] ptr val mem)
+(MOVHstoreidx ptr (MOVWconst [c]) val mem) => (MOVHstore [c] ptr val mem)
+(MOVHstoreidx (MOVWconst [c]) ptr val mem) => (MOVHstore [c] ptr val mem)
+
+(MOVWloadidx ptr (SLLconst idx [c]) mem) => (MOVWloadshiftLL ptr idx [c] mem)
+(MOVWloadidx (SLLconst idx [c]) ptr mem) => (MOVWloadshiftLL ptr idx [c] mem)
+(MOVWloadidx ptr (SRLconst idx [c]) mem) => (MOVWloadshiftRL ptr idx [c] mem)
+(MOVWloadidx (SRLconst idx [c]) ptr mem) => (MOVWloadshiftRL ptr idx [c] mem)
+(MOVWloadidx ptr (SRAconst idx [c]) mem) => (MOVWloadshiftRA ptr idx [c] mem)
+(MOVWloadidx (SRAconst idx [c]) ptr mem) => (MOVWloadshiftRA ptr idx [c] mem)
+
+(MOVWstoreidx ptr (SLLconst idx [c]) val mem) => (MOVWstoreshiftLL ptr idx [c] val mem)
+(MOVWstoreidx (SLLconst idx [c]) ptr val mem) => (MOVWstoreshiftLL ptr idx [c] val mem)
+(MOVWstoreidx ptr (SRLconst idx [c]) val mem) => (MOVWstoreshiftRL ptr idx [c] val mem)
+(MOVWstoreidx (SRLconst idx [c]) ptr val mem) => (MOVWstoreshiftRL ptr idx [c] val mem)
+(MOVWstoreidx ptr (SRAconst idx [c]) val mem) => (MOVWstoreshiftRA ptr idx [c] val mem)
+(MOVWstoreidx (SRAconst idx [c]) ptr val mem) => (MOVWstoreshiftRA ptr idx [c] val mem)
+
+(MOVWloadshiftLL ptr (MOVWconst [c]) [d] mem) => (MOVWload [int32(uint32(c)<<uint64(d))] ptr mem)
+(MOVWloadshiftRL ptr (MOVWconst [c]) [d] mem) => (MOVWload [int32(uint32(c)>>uint64(d))] ptr mem)
+(MOVWloadshiftRA ptr (MOVWconst [c]) [d] mem) => (MOVWload [c>>uint64(d)] ptr mem)
+
+(MOVWstoreshiftLL ptr (MOVWconst [c]) [d] val mem) => (MOVWstore [int32(uint32(c)<<uint64(d))] ptr val mem)
+(MOVWstoreshiftRL ptr (MOVWconst [c]) [d] val mem) => (MOVWstore [int32(uint32(c)>>uint64(d))] ptr val mem)
+(MOVWstoreshiftRA ptr (MOVWconst [c]) [d] val mem) => (MOVWstore [c>>uint64(d)] ptr val mem)
+
+// generic simplifications
+(ADD x (RSBconst [0] y)) => (SUB x y)
+(ADD <t> (RSBconst [c] x) (RSBconst [d] y)) => (RSBconst [c+d] (ADD <t> x y))
+(SUB x x) => (MOVWconst [0])
+(RSB x x) => (MOVWconst [0])
+(AND x x) => x
+(OR x x) => x
+(XOR x x) => (MOVWconst [0])
+(BIC x x) => (MOVWconst [0])
+
+(ADD (MUL x y) a) => (MULA x y a)
+(SUB a (MUL x y)) && buildcfg.GOARM == 7 => (MULS x y a)
+(RSB (MUL x y) a) && buildcfg.GOARM == 7 => (MULS x y a)
+
+(NEGF (MULF x y)) && buildcfg.GOARM >= 6 => (NMULF x y)
+(NEGD (MULD x y)) && buildcfg.GOARM >= 6 => (NMULD x y)
+(MULF (NEGF x) y) && buildcfg.GOARM >= 6 => (NMULF x y)
+(MULD (NEGD x) y) && buildcfg.GOARM >= 6 => (NMULD x y)
+(NMULF (NEGF x) y) => (MULF x y)
+(NMULD (NEGD x) y) => (MULD x y)
+
+// the result will overwrite the addend, since they are in the same register
+(ADDF a (MULF x y)) && a.Uses == 1 && buildcfg.GOARM >= 6 => (MULAF a x y)
+(ADDF a (NMULF x y)) && a.Uses == 1 && buildcfg.GOARM >= 6 => (MULSF a x y)
+(ADDD a (MULD x y)) && a.Uses == 1 && buildcfg.GOARM >= 6 => (MULAD a x y)
+(ADDD a (NMULD x y)) && a.Uses == 1 && buildcfg.GOARM >= 6 => (MULSD a x y)
+(SUBF a (MULF x y)) && a.Uses == 1 && buildcfg.GOARM >= 6 => (MULSF a x y)
+(SUBF a (NMULF x y)) && a.Uses == 1 && buildcfg.GOARM >= 6 => (MULAF a x y)
+(SUBD a (MULD x y)) && a.Uses == 1 && buildcfg.GOARM >= 6 => (MULSD a x y)
+(SUBD a (NMULD x y)) && a.Uses == 1 && buildcfg.GOARM >= 6 => (MULAD a x y)
+
+(AND x (MVN y)) => (BIC x y)
+
+// simplification with *shift ops
+(SUBshiftLL (SLLconst x [c]) x [c]) => (MOVWconst [0])
+(SUBshiftRL (SRLconst x [c]) x [c]) => (MOVWconst [0])
+(SUBshiftRA (SRAconst x [c]) x [c]) => (MOVWconst [0])
+(RSBshiftLL (SLLconst x [c]) x [c]) => (MOVWconst [0])
+(RSBshiftRL (SRLconst x [c]) x [c]) => (MOVWconst [0])
+(RSBshiftRA (SRAconst x [c]) x [c]) => (MOVWconst [0])
+(ANDshiftLL y:(SLLconst x [c]) x [c]) => y
+(ANDshiftRL y:(SRLconst x [c]) x [c]) => y
+(ANDshiftRA y:(SRAconst x [c]) x [c]) => y
+(ORshiftLL y:(SLLconst x [c]) x [c]) => y
+(ORshiftRL y:(SRLconst x [c]) x [c]) => y
+(ORshiftRA y:(SRAconst x [c]) x [c]) => y
+(XORshiftLL (SLLconst x [c]) x [c]) => (MOVWconst [0])
+(XORshiftRL (SRLconst x [c]) x [c]) => (MOVWconst [0])
+(XORshiftRA (SRAconst x [c]) x [c]) => (MOVWconst [0])
+(BICshiftLL (SLLconst x [c]) x [c]) => (MOVWconst [0])
+(BICshiftRL (SRLconst x [c]) x [c]) => (MOVWconst [0])
+(BICshiftRA (SRAconst x [c]) x [c]) => (MOVWconst [0])
+(AND x (MVNshiftLL y [c])) => (BICshiftLL x y [c])
+(AND x (MVNshiftRL y [c])) => (BICshiftRL x y [c])
+(AND x (MVNshiftRA y [c])) => (BICshiftRA x y [c])
+
+// floating point optimizations
+(CMPF x (MOVFconst [0])) => (CMPF0 x)
+(CMPD x (MOVDconst [0])) => (CMPD0 x)
+
+// bit extraction
+(SRAconst (SLLconst x [c]) [d]) && buildcfg.GOARM==7 && uint64(d)>=uint64(c) && uint64(d)<=31 => (BFX [(d-c)|(32-d)<<8] x)
+(SRLconst (SLLconst x [c]) [d]) && buildcfg.GOARM==7 && uint64(d)>=uint64(c) && uint64(d)<=31 => (BFXU [(d-c)|(32-d)<<8] x)
+
+// comparison simplification
+((EQ|NE) (CMP x (RSBconst [0] y))) => ((EQ|NE) (CMN x y)) // sense of carry bit not preserved; see also #50854
+((EQ|NE) (CMN x (RSBconst [0] y))) => ((EQ|NE) (CMP x y)) // sense of carry bit not preserved; see also #50864
+(EQ (CMPconst [0] l:(SUB x y)) yes no) && l.Uses==1 => (EQ (CMP x y) yes no)
+(EQ (CMPconst [0] l:(MULS x y a)) yes no) && l.Uses==1 => (EQ (CMP a (MUL <x.Type> x y)) yes no)
+(EQ (CMPconst [0] l:(SUBconst [c] x)) yes no) && l.Uses==1 => (EQ (CMPconst [c] x) yes no)
+(EQ (CMPconst [0] l:(SUBshiftLL x y [c])) yes no) && l.Uses==1 => (EQ (CMPshiftLL x y [c]) yes no)
+(EQ (CMPconst [0] l:(SUBshiftRL x y [c])) yes no) && l.Uses==1 => (EQ (CMPshiftRL x y [c]) yes no)
+(EQ (CMPconst [0] l:(SUBshiftRA x y [c])) yes no) && l.Uses==1 => (EQ (CMPshiftRA x y [c]) yes no)
+(EQ (CMPconst [0] l:(SUBshiftLLreg x y z)) yes no) && l.Uses==1 => (EQ (CMPshiftLLreg x y z) yes no)
+(EQ (CMPconst [0] l:(SUBshiftRLreg x y z)) yes no) && l.Uses==1 => (EQ (CMPshiftRLreg x y z) yes no)
+(EQ (CMPconst [0] l:(SUBshiftRAreg x y z)) yes no) && l.Uses==1 => (EQ (CMPshiftRAreg x y z) yes no)
+(NE (CMPconst [0] l:(SUB x y)) yes no) && l.Uses==1 => (NE (CMP x y) yes no)
+(NE (CMPconst [0] l:(MULS x y a)) yes no) && l.Uses==1 => (NE (CMP a (MUL <x.Type> x y)) yes no)
+(NE (CMPconst [0] l:(SUBconst [c] x)) yes no) && l.Uses==1 => (NE (CMPconst [c] x) yes no)
+(NE (CMPconst [0] l:(SUBshiftLL x y [c])) yes no) && l.Uses==1 => (NE (CMPshiftLL x y [c]) yes no)
+(NE (CMPconst [0] l:(SUBshiftRL x y [c])) yes no) && l.Uses==1 => (NE (CMPshiftRL x y [c]) yes no)
+(NE (CMPconst [0] l:(SUBshiftRA x y [c])) yes no) && l.Uses==1 => (NE (CMPshiftRA x y [c]) yes no)
+(NE (CMPconst [0] l:(SUBshiftLLreg x y z)) yes no) && l.Uses==1 => (NE (CMPshiftLLreg x y z) yes no)
+(NE (CMPconst [0] l:(SUBshiftRLreg x y z)) yes no) && l.Uses==1 => (NE (CMPshiftRLreg x y z) yes no)
+(NE (CMPconst [0] l:(SUBshiftRAreg x y z)) yes no) && l.Uses==1 => (NE (CMPshiftRAreg x y z) yes no)
+(EQ (CMPconst [0] l:(ADD x y)) yes no) && l.Uses==1 => (EQ (CMN x y) yes no)
+(EQ (CMPconst [0] l:(MULA x y a)) yes no) && l.Uses==1 => (EQ (CMN a (MUL <x.Type> x y)) yes no)
+(EQ (CMPconst [0] l:(ADDconst [c] x)) yes no) && l.Uses==1 => (EQ (CMNconst [c] x) yes no)
+(EQ (CMPconst [0] l:(ADDshiftLL x y [c])) yes no) && l.Uses==1 => (EQ (CMNshiftLL x y [c]) yes no)
+(EQ (CMPconst [0] l:(ADDshiftRL x y [c])) yes no) && l.Uses==1 => (EQ (CMNshiftRL x y [c]) yes no)
+(EQ (CMPconst [0] l:(ADDshiftRA x y [c])) yes no) && l.Uses==1 => (EQ (CMNshiftRA x y [c]) yes no)
+(EQ (CMPconst [0] l:(ADDshiftLLreg x y z)) yes no) && l.Uses==1 => (EQ (CMNshiftLLreg x y z) yes no)
+(EQ (CMPconst [0] l:(ADDshiftRLreg x y z)) yes no) && l.Uses==1 => (EQ (CMNshiftRLreg x y z) yes no)
+(EQ (CMPconst [0] l:(ADDshiftRAreg x y z)) yes no) && l.Uses==1 => (EQ (CMNshiftRAreg x y z) yes no)
+(NE (CMPconst [0] l:(ADD x y)) yes no) && l.Uses==1 => (NE (CMN x y) yes no)
+(NE (CMPconst [0] l:(MULA x y a)) yes no) && l.Uses==1 => (NE (CMN a (MUL <x.Type> x y)) yes no)
+(NE (CMPconst [0] l:(ADDconst [c] x)) yes no) && l.Uses==1 => (NE (CMNconst [c] x) yes no)
+(NE (CMPconst [0] l:(ADDshiftLL x y [c])) yes no) && l.Uses==1 => (NE (CMNshiftLL x y [c]) yes no)
+(NE (CMPconst [0] l:(ADDshiftRL x y [c])) yes no) && l.Uses==1 => (NE (CMNshiftRL x y [c]) yes no)
+(NE (CMPconst [0] l:(ADDshiftRA x y [c])) yes no) && l.Uses==1 => (NE (CMNshiftRA x y [c]) yes no)
+(NE (CMPconst [0] l:(ADDshiftLLreg x y z)) yes no) && l.Uses==1 => (NE (CMNshiftLLreg x y z) yes no)
+(NE (CMPconst [0] l:(ADDshiftRLreg x y z)) yes no) && l.Uses==1 => (NE (CMNshiftRLreg x y z) yes no)
+(NE (CMPconst [0] l:(ADDshiftRAreg x y z)) yes no) && l.Uses==1 => (NE (CMNshiftRAreg x y z) yes no)
+(EQ (CMPconst [0] l:(AND x y)) yes no) && l.Uses==1 => (EQ (TST x y) yes no)
+(EQ (CMPconst [0] l:(ANDconst [c] x)) yes no) && l.Uses==1 => (EQ (TSTconst [c] x) yes no)
+(EQ (CMPconst [0] l:(ANDshiftLL x y [c])) yes no) && l.Uses==1 => (EQ (TSTshiftLL x y [c]) yes no)
+(EQ (CMPconst [0] l:(ANDshiftRL x y [c])) yes no) && l.Uses==1 => (EQ (TSTshiftRL x y [c]) yes no)
+(EQ (CMPconst [0] l:(ANDshiftRA x y [c])) yes no) && l.Uses==1 => (EQ (TSTshiftRA x y [c]) yes no)
+(EQ (CMPconst [0] l:(ANDshiftLLreg x y z)) yes no) && l.Uses==1 => (EQ (TSTshiftLLreg x y z) yes no)
+(EQ (CMPconst [0] l:(ANDshiftRLreg x y z)) yes no) && l.Uses==1 => (EQ (TSTshiftRLreg x y z) yes no)
+(EQ (CMPconst [0] l:(ANDshiftRAreg x y z)) yes no) && l.Uses==1 => (EQ (TSTshiftRAreg x y z) yes no)
+(NE (CMPconst [0] l:(AND x y)) yes no) && l.Uses==1 => (NE (TST x y) yes no)
+(NE (CMPconst [0] l:(ANDconst [c] x)) yes no) && l.Uses==1 => (NE (TSTconst [c] x) yes no)
+(NE (CMPconst [0] l:(ANDshiftLL x y [c])) yes no) && l.Uses==1 => (NE (TSTshiftLL x y [c]) yes no)
+(NE (CMPconst [0] l:(ANDshiftRL x y [c])) yes no) && l.Uses==1 => (NE (TSTshiftRL x y [c]) yes no)
+(NE (CMPconst [0] l:(ANDshiftRA x y [c])) yes no) && l.Uses==1 => (NE (TSTshiftRA x y [c]) yes no)
+(NE (CMPconst [0] l:(ANDshiftLLreg x y z)) yes no) && l.Uses==1 => (NE (TSTshiftLLreg x y z) yes no)
+(NE (CMPconst [0] l:(ANDshiftRLreg x y z)) yes no) && l.Uses==1 => (NE (TSTshiftRLreg x y z) yes no)
+(NE (CMPconst [0] l:(ANDshiftRAreg x y z)) yes no) && l.Uses==1 => (NE (TSTshiftRAreg x y z) yes no)
+(EQ (CMPconst [0] l:(XOR x y)) yes no) && l.Uses==1 => (EQ (TEQ x y) yes no)
+(EQ (CMPconst [0] l:(XORconst [c] x)) yes no) && l.Uses==1 => (EQ (TEQconst [c] x) yes no)
+(EQ (CMPconst [0] l:(XORshiftLL x y [c])) yes no) && l.Uses==1 => (EQ (TEQshiftLL x y [c]) yes no)
+(EQ (CMPconst [0] l:(XORshiftRL x y [c])) yes no) && l.Uses==1 => (EQ (TEQshiftRL x y [c]) yes no)
+(EQ (CMPconst [0] l:(XORshiftRA x y [c])) yes no) && l.Uses==1 => (EQ (TEQshiftRA x y [c]) yes no)
+(EQ (CMPconst [0] l:(XORshiftLLreg x y z)) yes no) && l.Uses==1 => (EQ (TEQshiftLLreg x y z) yes no)
+(EQ (CMPconst [0] l:(XORshiftRLreg x y z)) yes no) && l.Uses==1 => (EQ (TEQshiftRLreg x y z) yes no)
+(EQ (CMPconst [0] l:(XORshiftRAreg x y z)) yes no) && l.Uses==1 => (EQ (TEQshiftRAreg x y z) yes no)
+(NE (CMPconst [0] l:(XOR x y)) yes no) && l.Uses==1 => (NE (TEQ x y) yes no)
+(NE (CMPconst [0] l:(XORconst [c] x)) yes no) && l.Uses==1 => (NE (TEQconst [c] x) yes no)
+(NE (CMPconst [0] l:(XORshiftLL x y [c])) yes no) && l.Uses==1 => (NE (TEQshiftLL x y [c]) yes no)
+(NE (CMPconst [0] l:(XORshiftRL x y [c])) yes no) && l.Uses==1 => (NE (TEQshiftRL x y [c]) yes no)
+(NE (CMPconst [0] l:(XORshiftRA x y [c])) yes no) && l.Uses==1 => (NE (TEQshiftRA x y [c]) yes no)
+(NE (CMPconst [0] l:(XORshiftLLreg x y z)) yes no) && l.Uses==1 => (NE (TEQshiftLLreg x y z) yes no)
+(NE (CMPconst [0] l:(XORshiftRLreg x y z)) yes no) && l.Uses==1 => (NE (TEQshiftRLreg x y z) yes no)
+(NE (CMPconst [0] l:(XORshiftRAreg x y z)) yes no) && l.Uses==1 => (NE (TEQshiftRAreg x y z) yes no)
+(LT (CMPconst [0] l:(SUB x y)) yes no) && l.Uses==1 => (LTnoov (CMP x y) yes no)
+(LT (CMPconst [0] l:(MULS x y a)) yes no) && l.Uses==1 => (LTnoov (CMP a (MUL <x.Type> x y)) yes no)
+(LT (CMPconst [0] l:(SUBconst [c] x)) yes no) && l.Uses==1 => (LTnoov (CMPconst [c] x) yes no)
+(LT (CMPconst [0] l:(SUBshiftLL x y [c])) yes no) && l.Uses==1 => (LTnoov (CMPshiftLL x y [c]) yes no)
+(LT (CMPconst [0] l:(SUBshiftRL x y [c])) yes no) && l.Uses==1 => (LTnoov (CMPshiftRL x y [c]) yes no)
+(LT (CMPconst [0] l:(SUBshiftRA x y [c])) yes no) && l.Uses==1 => (LTnoov (CMPshiftRA x y [c]) yes no)
+(LT (CMPconst [0] l:(SUBshiftLLreg x y z)) yes no) && l.Uses==1 => (LTnoov (CMPshiftLLreg x y z) yes no)
+(LT (CMPconst [0] l:(SUBshiftRLreg x y z)) yes no) && l.Uses==1 => (LTnoov (CMPshiftRLreg x y z) yes no)
+(LT (CMPconst [0] l:(SUBshiftRAreg x y z)) yes no) && l.Uses==1 => (LTnoov (CMPshiftRAreg x y z) yes no)
+(LE (CMPconst [0] l:(SUB x y)) yes no) && l.Uses==1 => (LEnoov (CMP x y) yes no)
+(LE (CMPconst [0] l:(MULS x y a)) yes no) && l.Uses==1 => (LEnoov (CMP a (MUL <x.Type> x y)) yes no)
+(LE (CMPconst [0] l:(SUBconst [c] x)) yes no) && l.Uses==1 => (LEnoov (CMPconst [c] x) yes no)
+(LE (CMPconst [0] l:(SUBshiftLL x y [c])) yes no) && l.Uses==1 => (LEnoov (CMPshiftLL x y [c]) yes no)
+(LE (CMPconst [0] l:(SUBshiftRL x y [c])) yes no) && l.Uses==1 => (LEnoov (CMPshiftRL x y [c]) yes no)
+(LE (CMPconst [0] l:(SUBshiftRA x y [c])) yes no) && l.Uses==1 => (LEnoov (CMPshiftRA x y [c]) yes no)
+(LE (CMPconst [0] l:(SUBshiftLLreg x y z)) yes no) && l.Uses==1 => (LEnoov (CMPshiftLLreg x y z) yes no)
+(LE (CMPconst [0] l:(SUBshiftRLreg x y z)) yes no) && l.Uses==1 => (LEnoov (CMPshiftRLreg x y z) yes no)
+(LE (CMPconst [0] l:(SUBshiftRAreg x y z)) yes no) && l.Uses==1 => (LEnoov (CMPshiftRAreg x y z) yes no)
+(LT (CMPconst [0] l:(ADD x y)) yes no) && l.Uses==1 => (LTnoov (CMN x y) yes no)
+(LT (CMPconst [0] l:(MULA x y a)) yes no) && l.Uses==1 => (LTnoov (CMN a (MUL <x.Type> x y)) yes no)
+(LT (CMPconst [0] l:(ADDconst [c] x)) yes no) && l.Uses==1 => (LTnoov (CMNconst [c] x) yes no)
+(LT (CMPconst [0] l:(ADDshiftLL x y [c])) yes no) && l.Uses==1 => (LTnoov (CMNshiftLL x y [c]) yes no)
+(LT (CMPconst [0] l:(ADDshiftRL x y [c])) yes no) && l.Uses==1 => (LTnoov (CMNshiftRL x y [c]) yes no)
+(LT (CMPconst [0] l:(ADDshiftRA x y [c])) yes no) && l.Uses==1 => (LTnoov (CMNshiftRA x y [c]) yes no)
+(LT (CMPconst [0] l:(ADDshiftLLreg x y z)) yes no) && l.Uses==1 => (LTnoov (CMNshiftLLreg x y z) yes no)
+(LT (CMPconst [0] l:(ADDshiftRLreg x y z)) yes no) && l.Uses==1 => (LTnoov (CMNshiftRLreg x y z) yes no)
+(LT (CMPconst [0] l:(ADDshiftRAreg x y z)) yes no) && l.Uses==1 => (LTnoov (CMNshiftRAreg x y z) yes no)
+(LE (CMPconst [0] l:(ADD x y)) yes no) && l.Uses==1 => (LEnoov (CMN x y) yes no)
+(LE (CMPconst [0] l:(MULA x y a)) yes no) && l.Uses==1 => (LEnoov (CMN a (MUL <x.Type> x y)) yes no)
+(LE (CMPconst [0] l:(ADDconst [c] x)) yes no) && l.Uses==1 => (LEnoov (CMNconst [c] x) yes no)
+(LE (CMPconst [0] l:(ADDshiftLL x y [c])) yes no) && l.Uses==1 => (LEnoov (CMNshiftLL x y [c]) yes no)
+(LE (CMPconst [0] l:(ADDshiftRL x y [c])) yes no) && l.Uses==1 => (LEnoov (CMNshiftRL x y [c]) yes no)
+(LE (CMPconst [0] l:(ADDshiftRA x y [c])) yes no) && l.Uses==1 => (LEnoov (CMNshiftRA x y [c]) yes no)
+(LE (CMPconst [0] l:(ADDshiftLLreg x y z)) yes no) && l.Uses==1 => (LEnoov (CMNshiftLLreg x y z) yes no)
+(LE (CMPconst [0] l:(ADDshiftRLreg x y z)) yes no) && l.Uses==1 => (LEnoov (CMNshiftRLreg x y z) yes no)
+(LE (CMPconst [0] l:(ADDshiftRAreg x y z)) yes no) && l.Uses==1 => (LEnoov (CMNshiftRAreg x y z) yes no)
+(LT (CMPconst [0] l:(AND x y)) yes no) && l.Uses==1 => (LTnoov (TST x y) yes no)
+(LT (CMPconst [0] l:(ANDconst [c] x)) yes no) && l.Uses==1 => (LTnoov (TSTconst [c] x) yes no)
+(LT (CMPconst [0] l:(ANDshiftLL x y [c])) yes no) && l.Uses==1 => (LTnoov (TSTshiftLL x y [c]) yes no)
+(LT (CMPconst [0] l:(ANDshiftRL x y [c])) yes no) && l.Uses==1 => (LTnoov (TSTshiftRL x y [c]) yes no)
+(LT (CMPconst [0] l:(ANDshiftRA x y [c])) yes no) && l.Uses==1 => (LTnoov (TSTshiftRA x y [c]) yes no)
+(LT (CMPconst [0] l:(ANDshiftLLreg x y z)) yes no) && l.Uses==1 => (LTnoov (TSTshiftLLreg x y z) yes no)
+(LT (CMPconst [0] l:(ANDshiftRLreg x y z)) yes no) && l.Uses==1 => (LTnoov (TSTshiftRLreg x y z) yes no)
+(LT (CMPconst [0] l:(ANDshiftRAreg x y z)) yes no) && l.Uses==1 => (LTnoov (TSTshiftRAreg x y z) yes no)
+(LE (CMPconst [0] l:(AND x y)) yes no) && l.Uses==1 => (LEnoov (TST x y) yes no)
+(LE (CMPconst [0] l:(ANDconst [c] x)) yes no) && l.Uses==1 => (LEnoov (TSTconst [c] x) yes no)
+(LE (CMPconst [0] l:(ANDshiftLL x y [c])) yes no) && l.Uses==1 => (LEnoov (TSTshiftLL x y [c]) yes no)
+(LE (CMPconst [0] l:(ANDshiftRL x y [c])) yes no) && l.Uses==1 => (LEnoov (TSTshiftRL x y [c]) yes no)
+(LE (CMPconst [0] l:(ANDshiftRA x y [c])) yes no) && l.Uses==1 => (LEnoov (TSTshiftRA x y [c]) yes no)
+(LE (CMPconst [0] l:(ANDshiftLLreg x y z)) yes no) && l.Uses==1 => (LEnoov (TSTshiftLLreg x y z) yes no)
+(LE (CMPconst [0] l:(ANDshiftRLreg x y z)) yes no) && l.Uses==1 => (LEnoov (TSTshiftRLreg x y z) yes no)
+(LE (CMPconst [0] l:(ANDshiftRAreg x y z)) yes no) && l.Uses==1 => (LEnoov (TSTshiftRAreg x y z) yes no)
+(LT (CMPconst [0] l:(XOR x y)) yes no) && l.Uses==1 => (LTnoov (TEQ x y) yes no)
+(LT (CMPconst [0] l:(XORconst [c] x)) yes no) && l.Uses==1 => (LTnoov (TEQconst [c] x) yes no)
+(LT (CMPconst [0] l:(XORshiftLL x y [c])) yes no) && l.Uses==1 => (LTnoov (TEQshiftLL x y [c]) yes no)
+(LT (CMPconst [0] l:(XORshiftRL x y [c])) yes no) && l.Uses==1 => (LTnoov (TEQshiftRL x y [c]) yes no)
+(LT (CMPconst [0] l:(XORshiftRA x y [c])) yes no) && l.Uses==1 => (LTnoov (TEQshiftRA x y [c]) yes no)
+(LT (CMPconst [0] l:(XORshiftLLreg x y z)) yes no) && l.Uses==1 => (LTnoov (TEQshiftLLreg x y z) yes no)
+(LT (CMPconst [0] l:(XORshiftRLreg x y z)) yes no) && l.Uses==1 => (LTnoov (TEQshiftRLreg x y z) yes no)
+(LT (CMPconst [0] l:(XORshiftRAreg x y z)) yes no) && l.Uses==1 => (LTnoov (TEQshiftRAreg x y z) yes no)
+(LE (CMPconst [0] l:(XOR x y)) yes no) && l.Uses==1 => (LEnoov (TEQ x y) yes no)
+(LE (CMPconst [0] l:(XORconst [c] x)) yes no) && l.Uses==1 => (LEnoov (TEQconst [c] x) yes no)
+(LE (CMPconst [0] l:(XORshiftLL x y [c])) yes no) && l.Uses==1 => (LEnoov (TEQshiftLL x y [c]) yes no)
+(LE (CMPconst [0] l:(XORshiftRL x y [c])) yes no) && l.Uses==1 => (LEnoov (TEQshiftRL x y [c]) yes no)
+(LE (CMPconst [0] l:(XORshiftRA x y [c])) yes no) && l.Uses==1 => (LEnoov (TEQshiftRA x y [c]) yes no)
+(LE (CMPconst [0] l:(XORshiftLLreg x y z)) yes no) && l.Uses==1 => (LEnoov (TEQshiftLLreg x y z) yes no)
+(LE (CMPconst [0] l:(XORshiftRLreg x y z)) yes no) && l.Uses==1 => (LEnoov (TEQshiftRLreg x y z) yes no)
+(LE (CMPconst [0] l:(XORshiftRAreg x y z)) yes no) && l.Uses==1 => (LEnoov (TEQshiftRAreg x y z) yes no)
+(GT (CMPconst [0] l:(SUB x y)) yes no) && l.Uses==1 => (GTnoov (CMP x y) yes no)
+(GT (CMPconst [0] l:(MULS x y a)) yes no) && l.Uses==1 => (GTnoov (CMP a (MUL <x.Type> x y)) yes no)
+(GT (CMPconst [0] l:(SUBconst [c] x)) yes no) && l.Uses==1 => (GTnoov (CMPconst [c] x) yes no)
+(GT (CMPconst [0] l:(SUBshiftLL x y [c])) yes no) && l.Uses==1 => (GTnoov (CMPshiftLL x y [c]) yes no)
+(GT (CMPconst [0] l:(SUBshiftRL x y [c])) yes no) && l.Uses==1 => (GTnoov (CMPshiftRL x y [c]) yes no)
+(GT (CMPconst [0] l:(SUBshiftRA x y [c])) yes no) && l.Uses==1 => (GTnoov (CMPshiftRA x y [c]) yes no)
+(GT (CMPconst [0] l:(SUBshiftLLreg x y z)) yes no) && l.Uses==1 => (GTnoov (CMPshiftLLreg x y z) yes no)
+(GT (CMPconst [0] l:(SUBshiftRLreg x y z)) yes no) && l.Uses==1 => (GTnoov (CMPshiftRLreg x y z) yes no)
+(GT (CMPconst [0] l:(SUBshiftRAreg x y z)) yes no) && l.Uses==1 => (GTnoov (CMPshiftRAreg x y z) yes no)
+(GE (CMPconst [0] l:(SUB x y)) yes no) && l.Uses==1 => (GEnoov (CMP x y) yes no)
+(GE (CMPconst [0] l:(MULS x y a)) yes no) && l.Uses==1 => (GEnoov (CMP a (MUL <x.Type> x y)) yes no)
+(GE (CMPconst [0] l:(SUBconst [c] x)) yes no) && l.Uses==1 => (GEnoov (CMPconst [c] x) yes no)
+(GE (CMPconst [0] l:(SUBshiftLL x y [c])) yes no) && l.Uses==1 => (GEnoov (CMPshiftLL x y [c]) yes no)
+(GE (CMPconst [0] l:(SUBshiftRL x y [c])) yes no) && l.Uses==1 => (GEnoov (CMPshiftRL x y [c]) yes no)
+(GE (CMPconst [0] l:(SUBshiftRA x y [c])) yes no) && l.Uses==1 => (GEnoov (CMPshiftRA x y [c]) yes no)
+(GE (CMPconst [0] l:(SUBshiftLLreg x y z)) yes no) && l.Uses==1 => (GEnoov (CMPshiftLLreg x y z) yes no)
+(GE (CMPconst [0] l:(SUBshiftRLreg x y z)) yes no) && l.Uses==1 => (GEnoov (CMPshiftRLreg x y z) yes no)
+(GE (CMPconst [0] l:(SUBshiftRAreg x y z)) yes no) && l.Uses==1 => (GEnoov (CMPshiftRAreg x y z) yes no)
+(GT (CMPconst [0] l:(ADD x y)) yes no) && l.Uses==1 => (GTnoov (CMN x y) yes no)
+(GT (CMPconst [0] l:(ADDconst [c] x)) yes no) && l.Uses==1 => (GTnoov (CMNconst [c] x) yes no)
+(GT (CMPconst [0] l:(ADDshiftLL x y [c])) yes no) && l.Uses==1 => (GTnoov (CMNshiftLL x y [c]) yes no)
+(GT (CMPconst [0] l:(ADDshiftRL x y [c])) yes no) && l.Uses==1 => (GTnoov (CMNshiftRL x y [c]) yes no)
+(GT (CMPconst [0] l:(ADDshiftRA x y [c])) yes no) && l.Uses==1 => (GTnoov (CMNshiftRA x y [c]) yes no)
+(GT (CMPconst [0] l:(ADDshiftLLreg x y z)) yes no) && l.Uses==1 => (GTnoov (CMNshiftLLreg x y z) yes no)
+(GT (CMPconst [0] l:(ADDshiftRLreg x y z)) yes no) && l.Uses==1 => (GTnoov (CMNshiftRLreg x y z) yes no)
+(GT (CMPconst [0] l:(ADDshiftRAreg x y z)) yes no) && l.Uses==1 => (GTnoov (CMNshiftRAreg x y z) yes no)
+(GE (CMPconst [0] l:(ADD x y)) yes no) && l.Uses==1 => (GEnoov (CMN x y) yes no)
+(GE (CMPconst [0] l:(MULA x y a)) yes no) && l.Uses==1 => (GEnoov (CMN a (MUL <x.Type> x y)) yes no)
+(GE (CMPconst [0] l:(ADDconst [c] x)) yes no) && l.Uses==1 => (GEnoov (CMNconst [c] x) yes no)
+(GE (CMPconst [0] l:(ADDshiftLL x y [c])) yes no) && l.Uses==1 => (GEnoov (CMNshiftLL x y [c]) yes no)
+(GE (CMPconst [0] l:(ADDshiftRL x y [c])) yes no) && l.Uses==1 => (GEnoov (CMNshiftRL x y [c]) yes no)
+(GE (CMPconst [0] l:(ADDshiftRA x y [c])) yes no) && l.Uses==1 => (GEnoov (CMNshiftRA x y [c]) yes no)
+(GE (CMPconst [0] l:(ADDshiftLLreg x y z)) yes no) && l.Uses==1 => (GEnoov (CMNshiftLLreg x y z) yes no)
+(GE (CMPconst [0] l:(ADDshiftRLreg x y z)) yes no) && l.Uses==1 => (GEnoov (CMNshiftRLreg x y z) yes no)
+(GE (CMPconst [0] l:(ADDshiftRAreg x y z)) yes no) && l.Uses==1 => (GEnoov (CMNshiftRAreg x y z) yes no)
+(GT (CMPconst [0] l:(MULA x y a)) yes no) && l.Uses==1 => (GTnoov (CMN a (MUL <x.Type> x y)) yes no)
+(GT (CMPconst [0] l:(AND x y)) yes no) && l.Uses==1 => (GTnoov (TST x y) yes no)
+(GT (CMPconst [0] l:(ANDconst [c] x)) yes no) && l.Uses==1 => (GTnoov (TSTconst [c] x) yes no)
+(GT (CMPconst [0] l:(ANDshiftLL x y [c])) yes no) && l.Uses==1 => (GTnoov (TSTshiftLL x y [c]) yes no)
+(GT (CMPconst [0] l:(ANDshiftRL x y [c])) yes no) && l.Uses==1 => (GTnoov (TSTshiftRL x y [c]) yes no)
+(GT (CMPconst [0] l:(ANDshiftRA x y [c])) yes no) && l.Uses==1 => (GTnoov (TSTshiftRA x y [c]) yes no)
+(GT (CMPconst [0] l:(ANDshiftLLreg x y z)) yes no) && l.Uses==1 => (GTnoov (TSTshiftLLreg x y z) yes no)
+(GT (CMPconst [0] l:(ANDshiftRLreg x y z)) yes no) && l.Uses==1 => (GTnoov (TSTshiftRLreg x y z) yes no)
+(GT (CMPconst [0] l:(ANDshiftRAreg x y z)) yes no) && l.Uses==1 => (GTnoov (TSTshiftRAreg x y z) yes no)
+(GE (CMPconst [0] l:(AND x y)) yes no) && l.Uses==1 => (GEnoov (TST x y) yes no)
+(GE (CMPconst [0] l:(ANDconst [c] x)) yes no) && l.Uses==1 => (GEnoov (TSTconst [c] x) yes no)
+(GE (CMPconst [0] l:(ANDshiftLL x y [c])) yes no) && l.Uses==1 => (GEnoov (TSTshiftLL x y [c]) yes no)
+(GE (CMPconst [0] l:(ANDshiftRL x y [c])) yes no) && l.Uses==1 => (GEnoov (TSTshiftRL x y [c]) yes no)
+(GE (CMPconst [0] l:(ANDshiftRA x y [c])) yes no) && l.Uses==1 => (GEnoov (TSTshiftRA x y [c]) yes no)
+(GE (CMPconst [0] l:(ANDshiftLLreg x y z)) yes no) && l.Uses==1 => (GEnoov (TSTshiftLLreg x y z) yes no)
+(GE (CMPconst [0] l:(ANDshiftRLreg x y z)) yes no) && l.Uses==1 => (GEnoov (TSTshiftRLreg x y z) yes no)
+(GE (CMPconst [0] l:(ANDshiftRAreg x y z)) yes no) && l.Uses==1 => (GEnoov (TSTshiftRAreg x y z) yes no)
+(GT (CMPconst [0] l:(XOR x y)) yes no) && l.Uses==1 => (GTnoov (TEQ x y) yes no)
+(GT (CMPconst [0] l:(XORconst [c] x)) yes no) && l.Uses==1 => (GTnoov (TEQconst [c] x) yes no)
+(GT (CMPconst [0] l:(XORshiftLL x y [c])) yes no) && l.Uses==1 => (GTnoov (TEQshiftLL x y [c]) yes no)
+(GT (CMPconst [0] l:(XORshiftRL x y [c])) yes no) && l.Uses==1 => (GTnoov (TEQshiftRL x y [c]) yes no)
+(GT (CMPconst [0] l:(XORshiftRA x y [c])) yes no) && l.Uses==1 => (GTnoov (TEQshiftRA x y [c]) yes no)
+(GT (CMPconst [0] l:(XORshiftLLreg x y z)) yes no) && l.Uses==1 => (GTnoov (TEQshiftLLreg x y z) yes no)
+(GT (CMPconst [0] l:(XORshiftRLreg x y z)) yes no) && l.Uses==1 => (GTnoov (TEQshiftRLreg x y z) yes no)
+(GT (CMPconst [0] l:(XORshiftRAreg x y z)) yes no) && l.Uses==1 => (GTnoov (TEQshiftRAreg x y z) yes no)
+(GE (CMPconst [0] l:(XOR x y)) yes no) && l.Uses==1 => (GEnoov (TEQ x y) yes no)
+(GE (CMPconst [0] l:(XORconst [c] x)) yes no) && l.Uses==1 => (GEnoov (TEQconst [c] x) yes no)
+(GE (CMPconst [0] l:(XORshiftLL x y [c])) yes no) && l.Uses==1 => (GEnoov (TEQshiftLL x y [c]) yes no)
+(GE (CMPconst [0] l:(XORshiftRL x y [c])) yes no) && l.Uses==1 => (GEnoov (TEQshiftRL x y [c]) yes no)
+(GE (CMPconst [0] l:(XORshiftRA x y [c])) yes no) && l.Uses==1 => (GEnoov (TEQshiftRA x y [c]) yes no)
+(GE (CMPconst [0] l:(XORshiftLLreg x y z)) yes no) && l.Uses==1 => (GEnoov (TEQshiftLLreg x y z) yes no)
+(GE (CMPconst [0] l:(XORshiftRLreg x y z)) yes no) && l.Uses==1 => (GEnoov (TEQshiftRLreg x y z) yes no)
+(GE (CMPconst [0] l:(XORshiftRAreg x y z)) yes no) && l.Uses==1 => (GEnoov (TEQshiftRAreg x y z) yes no)
+
+(MOVBUload [off] {sym} (SB) _) && symIsRO(sym) => (MOVWconst [int32(read8(sym, int64(off)))])
+(MOVHUload [off] {sym} (SB) _) && symIsRO(sym) => (MOVWconst [int32(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))])
+(MOVWload [off] {sym} (SB) _) && symIsRO(sym) => (MOVWconst [int32(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))])
diff --git a/src/cmd/compile/internal/ssa/gen/ARM64.rules b/src/cmd/compile/internal/ssa/gen/ARM64.rules
new file mode 100644
index 0000000..ad99960
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/gen/ARM64.rules
@@ -0,0 +1,2944 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+(Add(Ptr|64|32|16|8) ...) => (ADD ...)
+(Add(32F|64F) ...) => (FADD(S|D) ...)
+
+(Sub(Ptr|64|32|16|8) ...) => (SUB ...)
+(Sub(32F|64F) ...) => (FSUB(S|D) ...)
+
+(Mul64 ...) => (MUL ...)
+(Mul(32|16|8) ...) => (MULW ...)
+(Mul(32F|64F) ...) => (FMUL(S|D) ...)
+
+(Hmul64 ...) => (MULH ...)
+(Hmul64u ...) => (UMULH ...)
+(Hmul32 x y) => (SRAconst (MULL <typ.Int64> x y) [32])
+(Hmul32u x y) => (SRAconst (UMULL <typ.UInt64> x y) [32])
+(Mul64uhilo ...) => (LoweredMuluhilo ...)
+
+(Div64 [false] x y) => (DIV x y)
+(Div64u ...) => (UDIV ...)
+(Div32 [false] x y) => (DIVW x y)
+(Div32u ...) => (UDIVW ...)
+(Div16 [false] x y) => (DIVW (SignExt16to32 x) (SignExt16to32 y))
+(Div16u x y) => (UDIVW (ZeroExt16to32 x) (ZeroExt16to32 y))
+(Div8 x y) => (DIVW (SignExt8to32 x) (SignExt8to32 y))
+(Div8u x y) => (UDIVW (ZeroExt8to32 x) (ZeroExt8to32 y))
+(Div32F ...) => (FDIVS ...)
+(Div64F ...) => (FDIVD ...)
+
+(Mod64 x y) => (MOD x y)
+(Mod64u ...) => (UMOD ...)
+(Mod32 x y) => (MODW x y)
+(Mod32u ...) => (UMODW ...)
+(Mod16 x y) => (MODW (SignExt16to32 x) (SignExt16to32 y))
+(Mod16u x y) => (UMODW (ZeroExt16to32 x) (ZeroExt16to32 y))
+(Mod8 x y) => (MODW (SignExt8to32 x) (SignExt8to32 y))
+(Mod8u x y) => (UMODW (ZeroExt8to32 x) (ZeroExt8to32 y))
+
+// (x + y) / 2 with x>=y => (x - y) / 2 + y
+(Avg64u <t> x y) => (ADD (SRLconst <t> (SUB <t> x y) [1]) y)
+
+(And(64|32|16|8) ...) => (AND ...)
+(Or(64|32|16|8) ...) => (OR ...)
+(Xor(64|32|16|8) ...) => (XOR ...)
+
+// unary ops
+(Neg(64|32|16|8) ...) => (NEG ...)
+(Neg(32F|64F) ...) => (FNEG(S|D) ...)
+(Com(64|32|16|8) ...) => (MVN ...)
+
+// math package intrinsics
+(Abs ...) => (FABSD ...)
+(Sqrt ...) => (FSQRTD ...)
+(Ceil ...) => (FRINTPD ...)
+(Floor ...) => (FRINTMD ...)
+(Round ...) => (FRINTAD ...)
+(RoundToEven ...) => (FRINTND ...)
+(Trunc ...) => (FRINTZD ...)
+(FMA x y z) => (FMADDD z x y)
+
+(Sqrt32 ...) => (FSQRTS ...)
+
+// lowering rotates
+(RotateLeft8 <t> x (MOVDconst [c])) => (Or8 (Lsh8x64 <t> x (MOVDconst [c&7])) (Rsh8Ux64 <t> x (MOVDconst [-c&7])))
+(RotateLeft16 <t> x (MOVDconst [c])) => (Or16 (Lsh16x64 <t> x (MOVDconst [c&15])) (Rsh16Ux64 <t> x (MOVDconst [-c&15])))
+(RotateLeft32 x y) => (RORW x (NEG <y.Type> y))
+(RotateLeft64 x y) => (ROR x (NEG <y.Type> y))
+
+(Ctz(64|32|16|8)NonZero ...) => (Ctz(64|32|32|32) ...)
+
+(Ctz64 <t> x) => (CLZ (RBIT <t> x))
+(Ctz32 <t> x) => (CLZW (RBITW <t> x))
+(Ctz16 <t> x) => (CLZW <t> (RBITW <typ.UInt32> (ORconst <typ.UInt32> [0x10000] x)))
+(Ctz8 <t> x) => (CLZW <t> (RBITW <typ.UInt32> (ORconst <typ.UInt32> [0x100] x)))
+
+(PopCount64 <t> x) => (FMOVDfpgp <t> (VUADDLV <typ.Float64> (VCNT <typ.Float64> (FMOVDgpfp <typ.Float64> x))))
+(PopCount32 <t> x) => (FMOVDfpgp <t> (VUADDLV <typ.Float64> (VCNT <typ.Float64> (FMOVDgpfp <typ.Float64> (ZeroExt32to64 x)))))
+(PopCount16 <t> x) => (FMOVDfpgp <t> (VUADDLV <typ.Float64> (VCNT <typ.Float64> (FMOVDgpfp <typ.Float64> (ZeroExt16to64 x)))))
+
+// Load args directly into the register class where it will be used.
+(FMOVDgpfp <t> (Arg [off] {sym})) => @b.Func.Entry (Arg <t> [off] {sym})
+(FMOVDfpgp <t> (Arg [off] {sym})) => @b.Func.Entry (Arg <t> [off] {sym})
+
+// Similarly for stores, if we see a store after FPR <=> GPR move, then redirect store to use the other register set.
+(MOVDstore [off] {sym} ptr (FMOVDfpgp val) mem) => (FMOVDstore [off] {sym} ptr val mem)
+(FMOVDstore [off] {sym} ptr (FMOVDgpfp val) mem) => (MOVDstore [off] {sym} ptr val mem)
+(MOVWstore [off] {sym} ptr (FMOVSfpgp val) mem) => (FMOVSstore [off] {sym} ptr val mem)
+(FMOVSstore [off] {sym} ptr (FMOVSgpfp val) mem) => (MOVWstore [off] {sym} ptr val mem)
+
+// float <=> int register moves, with no conversion.
+// These come up when compiling math.{Float64bits, Float64frombits, Float32bits, Float32frombits}.
+(MOVDload [off] {sym} ptr (FMOVDstore [off] {sym} ptr val _)) => (FMOVDfpgp val)
+(FMOVDload [off] {sym} ptr (MOVDstore [off] {sym} ptr val _)) => (FMOVDgpfp val)
+(MOVWUload [off] {sym} ptr (FMOVSstore [off] {sym} ptr val _)) => (FMOVSfpgp val)
+(FMOVSload [off] {sym} ptr (MOVWstore [off] {sym} ptr val _)) => (FMOVSgpfp val)
+
+(BitLen64 x) => (SUB (MOVDconst [64]) (CLZ <typ.Int> x))
+(BitLen32 x) => (SUB (MOVDconst [32]) (CLZW <typ.Int> x))
+
+(Bswap64 ...) => (REV ...)
+(Bswap32 ...) => (REVW ...)
+
+(BitRev64 ...) => (RBIT ...)
+(BitRev32 ...) => (RBITW ...)
+(BitRev16 x) => (SRLconst [48] (RBIT <typ.UInt64> x))
+(BitRev8 x) => (SRLconst [56] (RBIT <typ.UInt64> x))
+
+// In fact, UMOD will be translated into UREM instruction, and UREM is originally translated into
+// UDIV and MSUB instructions. But if there is already an identical UDIV instruction just before or
+// after UREM (case like quo, rem := z/y, z%y), then the second UDIV instruction becomes redundant.
+// The purpose of this rule is to have this extra UDIV instruction removed in CSE pass.
+(UMOD <typ.UInt64> x y) => (MSUB <typ.UInt64> x y (UDIV <typ.UInt64> x y))
+(UMODW <typ.UInt32> x y) => (MSUBW <typ.UInt32> x y (UDIVW <typ.UInt32> x y))
+
+// 64-bit addition with carry.
+(Select0 (Add64carry x y c)) => (Select0 <typ.UInt64> (ADCSflags x y (Select1 <types.TypeFlags> (ADDSconstflags [-1] c))))
+(Select1 (Add64carry x y c)) => (ADCzerocarry <typ.UInt64> (Select1 <types.TypeFlags> (ADCSflags x y (Select1 <types.TypeFlags> (ADDSconstflags [-1] c)))))
+
+// 64-bit subtraction with borrowing.
+(Select0 (Sub64borrow x y bo)) => (Select0 <typ.UInt64> (SBCSflags x y (Select1 <types.TypeFlags> (NEGSflags bo))))
+(Select1 (Sub64borrow x y bo)) => (NEG <typ.UInt64> (NGCzerocarry <typ.UInt64> (Select1 <types.TypeFlags> (SBCSflags x y (Select1 <types.TypeFlags> (NEGSflags bo))))))
+
+// boolean ops -- booleans are represented with 0=false, 1=true
+(AndB ...) => (AND ...)
+(OrB ...) => (OR ...)
+(EqB x y) => (XOR (MOVDconst [1]) (XOR <typ.Bool> x y))
+(NeqB ...) => (XOR ...)
+(Not x) => (XOR (MOVDconst [1]) x)
+
+// shifts
+// hardware instruction uses only the low 6 bits of the shift
+// we compare to 64 to ensure Go semantics for large shifts
+// Rules about rotates with non-const shift are based on the following rules,
+// if the following rules change, please also modify the rules based on them.
+(Lsh64x64 <t> x y) => (CSEL [OpARM64LessThanU] (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] y))
+(Lsh64x32 <t> x y) => (CSEL [OpARM64LessThanU] (SLL <t> x (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
+(Lsh64x16 <t> x y) => (CSEL [OpARM64LessThanU] (SLL <t> x (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
+(Lsh64x8 <t> x y) => (CSEL [OpARM64LessThanU] (SLL <t> x (ZeroExt8to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64 y)))
+
+(Lsh32x64 <t> x y) => (CSEL [OpARM64LessThanU] (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] y))
+(Lsh32x32 <t> x y) => (CSEL [OpARM64LessThanU] (SLL <t> x (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
+(Lsh32x16 <t> x y) => (CSEL [OpARM64LessThanU] (SLL <t> x (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
+(Lsh32x8 <t> x y) => (CSEL [OpARM64LessThanU] (SLL <t> x (ZeroExt8to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64 y)))
+
+(Lsh16x64 <t> x y) => (CSEL [OpARM64LessThanU] (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] y))
+(Lsh16x32 <t> x y) => (CSEL [OpARM64LessThanU] (SLL <t> x (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
+(Lsh16x16 <t> x y) => (CSEL [OpARM64LessThanU] (SLL <t> x (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
+(Lsh16x8 <t> x y) => (CSEL [OpARM64LessThanU] (SLL <t> x (ZeroExt8to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64 y)))
+
+(Lsh8x64 <t> x y) => (CSEL [OpARM64LessThanU] (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] y))
+(Lsh8x32 <t> x y) => (CSEL [OpARM64LessThanU] (SLL <t> x (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
+(Lsh8x16 <t> x y) => (CSEL [OpARM64LessThanU] (SLL <t> x (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
+(Lsh8x8 <t> x y) => (CSEL [OpARM64LessThanU] (SLL <t> x (ZeroExt8to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64 y)))
+
+(Rsh64Ux64 <t> x y) => (CSEL [OpARM64LessThanU] (SRL <t> x y) (Const64 <t> [0]) (CMPconst [64] y))
+(Rsh64Ux32 <t> x y) => (CSEL [OpARM64LessThanU] (SRL <t> x (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
+(Rsh64Ux16 <t> x y) => (CSEL [OpARM64LessThanU] (SRL <t> x (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
+(Rsh64Ux8 <t> x y) => (CSEL [OpARM64LessThanU] (SRL <t> x (ZeroExt8to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64 y)))
+
+(Rsh32Ux64 <t> x y) => (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt32to64 x) y) (Const64 <t> [0]) (CMPconst [64] y))
+(Rsh32Ux32 <t> x y) => (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt32to64 x) (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
+(Rsh32Ux16 <t> x y) => (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt32to64 x) (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
+(Rsh32Ux8 <t> x y) => (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt32to64 x) (ZeroExt8to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64 y)))
+
+(Rsh16Ux64 <t> x y) => (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt16to64 x) y) (Const64 <t> [0]) (CMPconst [64] y))
+(Rsh16Ux32 <t> x y) => (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt16to64 x) (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
+(Rsh16Ux16 <t> x y) => (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt16to64 x) (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
+(Rsh16Ux8 <t> x y) => (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt16to64 x) (ZeroExt8to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64 y)))
+
+(Rsh8Ux64 <t> x y) => (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt8to64 x) y) (Const64 <t> [0]) (CMPconst [64] y))
+(Rsh8Ux32 <t> x y) => (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt8to64 x) (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
+(Rsh8Ux16 <t> x y) => (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt8to64 x) (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
+(Rsh8Ux8 <t> x y) => (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt8to64 x) (ZeroExt8to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64 y)))
+
+(Rsh64x64 x y) => (SRA x (CSEL [OpARM64LessThanU] <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] y)))
+(Rsh64x32 x y) => (SRA x (CSEL [OpARM64LessThanU] <y.Type> (ZeroExt32to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt32to64 y))))
+(Rsh64x16 x y) => (SRA x (CSEL [OpARM64LessThanU] <y.Type> (ZeroExt16to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt16to64 y))))
+(Rsh64x8 x y) => (SRA x (CSEL [OpARM64LessThanU] <y.Type> (ZeroExt8to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt8to64 y))))
+
+(Rsh32x64 x y) => (SRA (SignExt32to64 x) (CSEL [OpARM64LessThanU] <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] y)))
+(Rsh32x32 x y) => (SRA (SignExt32to64 x) (CSEL [OpARM64LessThanU] <y.Type> (ZeroExt32to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt32to64 y))))
+(Rsh32x16 x y) => (SRA (SignExt32to64 x) (CSEL [OpARM64LessThanU] <y.Type> (ZeroExt16to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt16to64 y))))
+(Rsh32x8 x y) => (SRA (SignExt32to64 x) (CSEL [OpARM64LessThanU] <y.Type> (ZeroExt8to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt8to64 y))))
+
+(Rsh16x64 x y) => (SRA (SignExt16to64 x) (CSEL [OpARM64LessThanU] <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] y)))
+(Rsh16x32 x y) => (SRA (SignExt16to64 x) (CSEL [OpARM64LessThanU] <y.Type> (ZeroExt32to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt32to64 y))))
+(Rsh16x16 x y) => (SRA (SignExt16to64 x) (CSEL [OpARM64LessThanU] <y.Type> (ZeroExt16to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt16to64 y))))
+(Rsh16x8 x y) => (SRA (SignExt16to64 x) (CSEL [OpARM64LessThanU] <y.Type> (ZeroExt8to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt8to64 y))))
+
+(Rsh8x64 x y) => (SRA (SignExt8to64 x) (CSEL [OpARM64LessThanU] <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] y)))
+(Rsh8x32 x y) => (SRA (SignExt8to64 x) (CSEL [OpARM64LessThanU] <y.Type> (ZeroExt32to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt32to64 y))))
+(Rsh8x16 x y) => (SRA (SignExt8to64 x) (CSEL [OpARM64LessThanU] <y.Type> (ZeroExt16to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt16to64 y))))
+(Rsh8x8 x y) => (SRA (SignExt8to64 x) (CSEL [OpARM64LessThanU] <y.Type> (ZeroExt8to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt8to64 y))))
+
+// constants
+(Const(64|32|16|8) [val]) => (MOVDconst [int64(val)])
+(Const(32F|64F) [val]) => (FMOV(S|D)const [float64(val)])
+(ConstNil) => (MOVDconst [0])
+(ConstBool [t]) => (MOVDconst [b2i(t)])
+
+(Slicemask <t> x) => (SRAconst (NEG <t> x) [63])
+
+// truncations
+// Because we ignore high parts of registers, truncates are just copies.
+(Trunc16to8 ...) => (Copy ...)
+(Trunc32to8 ...) => (Copy ...)
+(Trunc32to16 ...) => (Copy ...)
+(Trunc64to8 ...) => (Copy ...)
+(Trunc64to16 ...) => (Copy ...)
+(Trunc64to32 ...) => (Copy ...)
+
+// Zero-/Sign-extensions
+(ZeroExt8to16 ...) => (MOVBUreg ...)
+(ZeroExt8to32 ...) => (MOVBUreg ...)
+(ZeroExt16to32 ...) => (MOVHUreg ...)
+(ZeroExt8to64 ...) => (MOVBUreg ...)
+(ZeroExt16to64 ...) => (MOVHUreg ...)
+(ZeroExt32to64 ...) => (MOVWUreg ...)
+
+(SignExt8to16 ...) => (MOVBreg ...)
+(SignExt8to32 ...) => (MOVBreg ...)
+(SignExt16to32 ...) => (MOVHreg ...)
+(SignExt8to64 ...) => (MOVBreg ...)
+(SignExt16to64 ...) => (MOVHreg ...)
+(SignExt32to64 ...) => (MOVWreg ...)
+
+// float <=> int conversion
+(Cvt32to32F ...) => (SCVTFWS ...)
+(Cvt32to64F ...) => (SCVTFWD ...)
+(Cvt64to32F ...) => (SCVTFS ...)
+(Cvt64to64F ...) => (SCVTFD ...)
+(Cvt32Uto32F ...) => (UCVTFWS ...)
+(Cvt32Uto64F ...) => (UCVTFWD ...)
+(Cvt64Uto32F ...) => (UCVTFS ...)
+(Cvt64Uto64F ...) => (UCVTFD ...)
+(Cvt32Fto32 ...) => (FCVTZSSW ...)
+(Cvt64Fto32 ...) => (FCVTZSDW ...)
+(Cvt32Fto64 ...) => (FCVTZSS ...)
+(Cvt64Fto64 ...) => (FCVTZSD ...)
+(Cvt32Fto32U ...) => (FCVTZUSW ...)
+(Cvt64Fto32U ...) => (FCVTZUDW ...)
+(Cvt32Fto64U ...) => (FCVTZUS ...)
+(Cvt64Fto64U ...) => (FCVTZUD ...)
+(Cvt32Fto64F ...) => (FCVTSD ...)
+(Cvt64Fto32F ...) => (FCVTDS ...)
+
+(CvtBoolToUint8 ...) => (Copy ...)
+
+(Round32F ...) => (LoweredRound32F ...)
+(Round64F ...) => (LoweredRound64F ...)
+
+// comparisons
+(Eq8 x y) => (Equal (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
+(Eq16 x y) => (Equal (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
+(Eq32 x y) => (Equal (CMPW x y))
+(Eq64 x y) => (Equal (CMP x y))
+(EqPtr x y) => (Equal (CMP x y))
+(Eq32F x y) => (Equal (FCMPS x y))
+(Eq64F x y) => (Equal (FCMPD x y))
+
+(Neq8 x y) => (NotEqual (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
+(Neq16 x y) => (NotEqual (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
+(Neq32 x y) => (NotEqual (CMPW x y))
+(Neq64 x y) => (NotEqual (CMP x y))
+(NeqPtr x y) => (NotEqual (CMP x y))
+(Neq32F x y) => (NotEqual (FCMPS x y))
+(Neq64F x y) => (NotEqual (FCMPD x y))
+
+(Less8 x y) => (LessThan (CMPW (SignExt8to32 x) (SignExt8to32 y)))
+(Less16 x y) => (LessThan (CMPW (SignExt16to32 x) (SignExt16to32 y)))
+(Less32 x y) => (LessThan (CMPW x y))
+(Less64 x y) => (LessThan (CMP x y))
+
+// Set condition flags for floating-point comparisons "x < y"
+// and "x <= y". Because if either or both of the operands are
+// NaNs, all three of (x < y), (x == y) and (x > y) are false,
+// and ARM Manual says FCMP instruction sets PSTATE.<N,Z,C,V>
+// of this case to (0, 0, 1, 1).
+(Less32F x y) => (LessThanF (FCMPS x y))
+(Less64F x y) => (LessThanF (FCMPD x y))
+
+// For an unsigned integer x, the following rules are useful when combining branch
+// 0 < x => x != 0
+// x <= 0 => x == 0
+// x < 1 => x == 0
+// 1 <= x => x != 0
+(Less(8U|16U|32U|64U) zero:(MOVDconst [0]) x) => (Neq(8|16|32|64) zero x)
+(Leq(8U|16U|32U|64U) x zero:(MOVDconst [0])) => (Eq(8|16|32|64) x zero)
+(Less(8U|16U|32U|64U) x (MOVDconst [1])) => (Eq(8|16|32|64) x (MOVDconst [0]))
+(Leq(8U|16U|32U|64U) (MOVDconst [1]) x) => (Neq(8|16|32|64) (MOVDconst [0]) x)
+
+(Less8U x y) => (LessThanU (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
+(Less16U x y) => (LessThanU (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
+(Less32U x y) => (LessThanU (CMPW x y))
+(Less64U x y) => (LessThanU (CMP x y))
+
+(Leq8 x y) => (LessEqual (CMPW (SignExt8to32 x) (SignExt8to32 y)))
+(Leq16 x y) => (LessEqual (CMPW (SignExt16to32 x) (SignExt16to32 y)))
+(Leq32 x y) => (LessEqual (CMPW x y))
+(Leq64 x y) => (LessEqual (CMP x y))
+
+// Refer to the comments for op Less64F above.
+(Leq32F x y) => (LessEqualF (FCMPS x y))
+(Leq64F x y) => (LessEqualF (FCMPD x y))
+
+(Leq8U x y) => (LessEqualU (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
+(Leq16U x y) => (LessEqualU (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
+(Leq32U x y) => (LessEqualU (CMPW x y))
+(Leq64U x y) => (LessEqualU (CMP x y))
+
+// Optimize comparison between a floating-point value and 0.0 with "FCMP $(0.0), Fn"
+(FCMPS x (FMOVSconst [0])) => (FCMPS0 x)
+(FCMPS (FMOVSconst [0]) x) => (InvertFlags (FCMPS0 x))
+(FCMPD x (FMOVDconst [0])) => (FCMPD0 x)
+(FCMPD (FMOVDconst [0]) x) => (InvertFlags (FCMPD0 x))
+
+// CSEL needs a flag-generating argument. Synthesize a TSTW if necessary.
+(CondSelect x y boolval) && flagArg(boolval) != nil => (CSEL [boolval.Op] x y flagArg(boolval))
+(CondSelect x y boolval) && flagArg(boolval) == nil => (CSEL [OpARM64NotEqual] x y (TSTWconst [1] boolval))
+
+(OffPtr [off] ptr:(SP)) && is32Bit(off) => (MOVDaddr [int32(off)] ptr)
+(OffPtr [off] ptr) => (ADDconst [off] ptr)
+
+(Addr {sym} base) => (MOVDaddr {sym} base)
+(LocalAddr {sym} base _) => (MOVDaddr {sym} base)
+
+// loads
+(Load <t> ptr mem) && t.IsBoolean() => (MOVBUload ptr mem)
+(Load <t> ptr mem) && (is8BitInt(t) && isSigned(t)) => (MOVBload ptr mem)
+(Load <t> ptr mem) && (is8BitInt(t) && !isSigned(t)) => (MOVBUload ptr mem)
+(Load <t> ptr mem) && (is16BitInt(t) && isSigned(t)) => (MOVHload ptr mem)
+(Load <t> ptr mem) && (is16BitInt(t) && !isSigned(t)) => (MOVHUload ptr mem)
+(Load <t> ptr mem) && (is32BitInt(t) && isSigned(t)) => (MOVWload ptr mem)
+(Load <t> ptr mem) && (is32BitInt(t) && !isSigned(t)) => (MOVWUload ptr mem)
+(Load <t> ptr mem) && (is64BitInt(t) || isPtr(t)) => (MOVDload ptr mem)
+(Load <t> ptr mem) && is32BitFloat(t) => (FMOVSload ptr mem)
+(Load <t> ptr mem) && is64BitFloat(t) => (FMOVDload ptr mem)
+
+// stores
+(Store {t} ptr val mem) && t.Size() == 1 => (MOVBstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 2 => (MOVHstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 4 && !is32BitFloat(val.Type) => (MOVWstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 8 && !is64BitFloat(val.Type) => (MOVDstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 4 && is32BitFloat(val.Type) => (FMOVSstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 8 && is64BitFloat(val.Type) => (FMOVDstore ptr val mem)
+
+// zeroing
+(Zero [0] _ mem) => mem
+(Zero [1] ptr mem) => (MOVBstore ptr (MOVDconst [0]) mem)
+(Zero [2] ptr mem) => (MOVHstore ptr (MOVDconst [0]) mem)
+(Zero [4] ptr mem) => (MOVWstore ptr (MOVDconst [0]) mem)
+(Zero [8] ptr mem) => (MOVDstore ptr (MOVDconst [0]) mem)
+
+(Zero [3] ptr mem) =>
+ (MOVBstore [2] ptr (MOVDconst [0])
+ (MOVHstore ptr (MOVDconst [0]) mem))
+(Zero [5] ptr mem) =>
+ (MOVBstore [4] ptr (MOVDconst [0])
+ (MOVWstore ptr (MOVDconst [0]) mem))
+(Zero [6] ptr mem) =>
+ (MOVHstore [4] ptr (MOVDconst [0])
+ (MOVWstore ptr (MOVDconst [0]) mem))
+(Zero [7] ptr mem) =>
+ (MOVBstore [6] ptr (MOVDconst [0])
+ (MOVHstore [4] ptr (MOVDconst [0])
+ (MOVWstore ptr (MOVDconst [0]) mem)))
+(Zero [9] ptr mem) =>
+ (MOVBstore [8] ptr (MOVDconst [0])
+ (MOVDstore ptr (MOVDconst [0]) mem))
+(Zero [10] ptr mem) =>
+ (MOVHstore [8] ptr (MOVDconst [0])
+ (MOVDstore ptr (MOVDconst [0]) mem))
+(Zero [11] ptr mem) =>
+ (MOVBstore [10] ptr (MOVDconst [0])
+ (MOVHstore [8] ptr (MOVDconst [0])
+ (MOVDstore ptr (MOVDconst [0]) mem)))
+(Zero [12] ptr mem) =>
+ (MOVWstore [8] ptr (MOVDconst [0])
+ (MOVDstore ptr (MOVDconst [0]) mem))
+(Zero [13] ptr mem) =>
+ (MOVBstore [12] ptr (MOVDconst [0])
+ (MOVWstore [8] ptr (MOVDconst [0])
+ (MOVDstore ptr (MOVDconst [0]) mem)))
+(Zero [14] ptr mem) =>
+ (MOVHstore [12] ptr (MOVDconst [0])
+ (MOVWstore [8] ptr (MOVDconst [0])
+ (MOVDstore ptr (MOVDconst [0]) mem)))
+(Zero [15] ptr mem) =>
+ (MOVBstore [14] ptr (MOVDconst [0])
+ (MOVHstore [12] ptr (MOVDconst [0])
+ (MOVWstore [8] ptr (MOVDconst [0])
+ (MOVDstore ptr (MOVDconst [0]) mem))))
+(Zero [16] ptr mem) =>
+ (STP [0] ptr (MOVDconst [0]) (MOVDconst [0]) mem)
+
+(Zero [32] ptr mem) =>
+ (STP [16] ptr (MOVDconst [0]) (MOVDconst [0])
+ (STP [0] ptr (MOVDconst [0]) (MOVDconst [0]) mem))
+
+(Zero [48] ptr mem) =>
+ (STP [32] ptr (MOVDconst [0]) (MOVDconst [0])
+ (STP [16] ptr (MOVDconst [0]) (MOVDconst [0])
+ (STP [0] ptr (MOVDconst [0]) (MOVDconst [0]) mem)))
+
+(Zero [64] ptr mem) =>
+ (STP [48] ptr (MOVDconst [0]) (MOVDconst [0])
+ (STP [32] ptr (MOVDconst [0]) (MOVDconst [0])
+ (STP [16] ptr (MOVDconst [0]) (MOVDconst [0])
+ (STP [0] ptr (MOVDconst [0]) (MOVDconst [0]) mem))))
+
+// strip off fractional word zeroing
+(Zero [s] ptr mem) && s%16 != 0 && s%16 <= 8 && s > 16 =>
+ (Zero [8]
+ (OffPtr <ptr.Type> ptr [s-8])
+ (Zero [s-s%16] ptr mem))
+(Zero [s] ptr mem) && s%16 != 0 && s%16 > 8 && s > 16 =>
+ (Zero [16]
+ (OffPtr <ptr.Type> ptr [s-16])
+ (Zero [s-s%16] ptr mem))
+
+// medium zeroing uses a duff device
+// 4, 16, and 64 are magic constants, see runtime/mkduff.go
+(Zero [s] ptr mem)
+ && s%16 == 0 && s > 64 && s <= 16*64
+ && !config.noDuffDevice =>
+ (DUFFZERO [4 * (64 - s/16)] ptr mem)
+
+// large zeroing uses a loop
+(Zero [s] ptr mem)
+ && s%16 == 0 && (s > 16*64 || config.noDuffDevice) =>
+ (LoweredZero
+ ptr
+ (ADDconst <ptr.Type> [s-16] ptr)
+ mem)
+
+// moves
+(Move [0] _ _ mem) => mem
+(Move [1] dst src mem) => (MOVBstore dst (MOVBUload src mem) mem)
+(Move [2] dst src mem) => (MOVHstore dst (MOVHUload src mem) mem)
+(Move [4] dst src mem) => (MOVWstore dst (MOVWUload src mem) mem)
+(Move [8] dst src mem) => (MOVDstore dst (MOVDload src mem) mem)
+
+(Move [3] dst src mem) =>
+ (MOVBstore [2] dst (MOVBUload [2] src mem)
+ (MOVHstore dst (MOVHUload src mem) mem))
+(Move [5] dst src mem) =>
+ (MOVBstore [4] dst (MOVBUload [4] src mem)
+ (MOVWstore dst (MOVWUload src mem) mem))
+(Move [6] dst src mem) =>
+ (MOVHstore [4] dst (MOVHUload [4] src mem)
+ (MOVWstore dst (MOVWUload src mem) mem))
+(Move [7] dst src mem) =>
+ (MOVBstore [6] dst (MOVBUload [6] src mem)
+ (MOVHstore [4] dst (MOVHUload [4] src mem)
+ (MOVWstore dst (MOVWUload src mem) mem)))
+(Move [12] dst src mem) =>
+ (MOVWstore [8] dst (MOVWUload [8] src mem)
+ (MOVDstore dst (MOVDload src mem) mem))
+(Move [16] dst src mem) =>
+ (MOVDstore [8] dst (MOVDload [8] src mem)
+ (MOVDstore dst (MOVDload src mem) mem))
+(Move [24] dst src mem) =>
+ (MOVDstore [16] dst (MOVDload [16] src mem)
+ (MOVDstore [8] dst (MOVDload [8] src mem)
+ (MOVDstore dst (MOVDload src mem) mem)))
+
+// strip off fractional word move
+(Move [s] dst src mem) && s%8 != 0 && s > 8 =>
+ (Move [s%8]
+ (OffPtr <dst.Type> dst [s-s%8])
+ (OffPtr <src.Type> src [s-s%8])
+ (Move [s-s%8] dst src mem))
+
+// medium move uses a duff device
+(Move [s] dst src mem)
+ && s > 32 && s <= 16*64 && s%16 == 8
+ && !config.noDuffDevice && logLargeCopy(v, s) =>
+ (MOVDstore [int32(s-8)] dst (MOVDload [int32(s-8)] src mem)
+ (DUFFCOPY <types.TypeMem> [8*(64-(s-8)/16)] dst src mem))
+(Move [s] dst src mem)
+ && s > 32 && s <= 16*64 && s%16 == 0
+ && !config.noDuffDevice && logLargeCopy(v, s) =>
+ (DUFFCOPY [8 * (64 - s/16)] dst src mem)
+// 8 is the number of bytes to encode:
+//
+// LDP.P 16(R16), (R26, R27)
+// STP.P (R26, R27), 16(R17)
+//
+// 64 is number of these blocks. See runtime/duff_arm64.s:duffcopy
+
+// large move uses a loop
+(Move [s] dst src mem)
+ && s > 24 && s%8 == 0 && logLargeCopy(v, s) =>
+ (LoweredMove
+ dst
+ src
+ (ADDconst <src.Type> src [s-8])
+ mem)
+
+// calls
+(StaticCall ...) => (CALLstatic ...)
+(ClosureCall ...) => (CALLclosure ...)
+(InterCall ...) => (CALLinter ...)
+(TailCall ...) => (CALLtail ...)
+
+// checks
+(NilCheck ...) => (LoweredNilCheck ...)
+(IsNonNil ptr) => (NotEqual (CMPconst [0] ptr))
+(IsInBounds idx len) => (LessThanU (CMP idx len))
+(IsSliceInBounds idx len) => (LessEqualU (CMP idx len))
+
+// pseudo-ops
+(GetClosurePtr ...) => (LoweredGetClosurePtr ...)
+(GetCallerSP ...) => (LoweredGetCallerSP ...)
+(GetCallerPC ...) => (LoweredGetCallerPC ...)
+
+// Absorb pseudo-ops into blocks.
+(If (Equal cc) yes no) => (EQ cc yes no)
+(If (NotEqual cc) yes no) => (NE cc yes no)
+(If (LessThan cc) yes no) => (LT cc yes no)
+(If (LessThanU cc) yes no) => (ULT cc yes no)
+(If (LessEqual cc) yes no) => (LE cc yes no)
+(If (LessEqualU cc) yes no) => (ULE cc yes no)
+(If (GreaterThan cc) yes no) => (GT cc yes no)
+(If (GreaterThanU cc) yes no) => (UGT cc yes no)
+(If (GreaterEqual cc) yes no) => (GE cc yes no)
+(If (GreaterEqualU cc) yes no) => (UGE cc yes no)
+(If (LessThanF cc) yes no) => (FLT cc yes no)
+(If (LessEqualF cc) yes no) => (FLE cc yes no)
+(If (GreaterThanF cc) yes no) => (FGT cc yes no)
+(If (GreaterEqualF cc) yes no) => (FGE cc yes no)
+
+(If cond yes no) => (TBNZ [0] cond yes no)
+
+// atomic intrinsics
+// Note: these ops do not accept offset.
+(AtomicLoad8 ...) => (LDARB ...)
+(AtomicLoad32 ...) => (LDARW ...)
+(AtomicLoad64 ...) => (LDAR ...)
+(AtomicLoadPtr ...) => (LDAR ...)
+
+(AtomicStore8 ...) => (STLRB ...)
+(AtomicStore32 ...) => (STLRW ...)
+(AtomicStore64 ...) => (STLR ...)
+(AtomicStorePtrNoWB ...) => (STLR ...)
+
+(AtomicExchange(32|64) ...) => (LoweredAtomicExchange(32|64) ...)
+(AtomicAdd(32|64) ...) => (LoweredAtomicAdd(32|64) ...)
+(AtomicCompareAndSwap(32|64) ...) => (LoweredAtomicCas(32|64) ...)
+
+(AtomicAdd(32|64)Variant ...) => (LoweredAtomicAdd(32|64)Variant ...)
+(AtomicExchange(32|64)Variant ...) => (LoweredAtomicExchange(32|64)Variant ...)
+(AtomicCompareAndSwap(32|64)Variant ...) => (LoweredAtomicCas(32|64)Variant ...)
+
+// Currently the updated value is not used, but we need a register to temporarily hold it.
+(AtomicAnd8 ptr val mem) => (Select1 (LoweredAtomicAnd8 ptr val mem))
+(AtomicAnd32 ptr val mem) => (Select1 (LoweredAtomicAnd32 ptr val mem))
+(AtomicOr8 ptr val mem) => (Select1 (LoweredAtomicOr8 ptr val mem))
+(AtomicOr32 ptr val mem) => (Select1 (LoweredAtomicOr32 ptr val mem))
+
+(AtomicAnd8Variant ptr val mem) => (Select1 (LoweredAtomicAnd8Variant ptr val mem))
+(AtomicAnd32Variant ptr val mem) => (Select1 (LoweredAtomicAnd32Variant ptr val mem))
+(AtomicOr8Variant ptr val mem) => (Select1 (LoweredAtomicOr8Variant ptr val mem))
+(AtomicOr32Variant ptr val mem) => (Select1 (LoweredAtomicOr32Variant ptr val mem))
+
+// Write barrier.
+(WB ...) => (LoweredWB ...)
+
+// Publication barrier (0xe is ST option)
+(PubBarrier mem) => (DMB [0xe] mem)
+
+(PanicBounds [kind] x y mem) && boundsABI(kind) == 0 => (LoweredPanicBoundsA [kind] x y mem)
+(PanicBounds [kind] x y mem) && boundsABI(kind) == 1 => (LoweredPanicBoundsB [kind] x y mem)
+(PanicBounds [kind] x y mem) && boundsABI(kind) == 2 => (LoweredPanicBoundsC [kind] x y mem)
+
+// Optimizations
+
+// Absorb boolean tests into block
+(NZ (Equal cc) yes no) => (EQ cc yes no)
+(NZ (NotEqual cc) yes no) => (NE cc yes no)
+(NZ (LessThan cc) yes no) => (LT cc yes no)
+(NZ (LessThanU cc) yes no) => (ULT cc yes no)
+(NZ (LessEqual cc) yes no) => (LE cc yes no)
+(NZ (LessEqualU cc) yes no) => (ULE cc yes no)
+(NZ (GreaterThan cc) yes no) => (GT cc yes no)
+(NZ (GreaterThanU cc) yes no) => (UGT cc yes no)
+(NZ (GreaterEqual cc) yes no) => (GE cc yes no)
+(NZ (GreaterEqualU cc) yes no) => (UGE cc yes no)
+(NZ (LessThanF cc) yes no) => (FLT cc yes no)
+(NZ (LessEqualF cc) yes no) => (FLE cc yes no)
+(NZ (GreaterThanF cc) yes no) => (FGT cc yes no)
+(NZ (GreaterEqualF cc) yes no) => (FGE cc yes no)
+
+(TBNZ [0] (Equal cc) yes no) => (EQ cc yes no)
+(TBNZ [0] (NotEqual cc) yes no) => (NE cc yes no)
+(TBNZ [0] (LessThan cc) yes no) => (LT cc yes no)
+(TBNZ [0] (LessThanU cc) yes no) => (ULT cc yes no)
+(TBNZ [0] (LessEqual cc) yes no) => (LE cc yes no)
+(TBNZ [0] (LessEqualU cc) yes no) => (ULE cc yes no)
+(TBNZ [0] (GreaterThan cc) yes no) => (GT cc yes no)
+(TBNZ [0] (GreaterThanU cc) yes no) => (UGT cc yes no)
+(TBNZ [0] (GreaterEqual cc) yes no) => (GE cc yes no)
+(TBNZ [0] (GreaterEqualU cc) yes no) => (UGE cc yes no)
+(TBNZ [0] (LessThanF cc) yes no) => (FLT cc yes no)
+(TBNZ [0] (LessEqualF cc) yes no) => (FLE cc yes no)
+(TBNZ [0] (GreaterThanF cc) yes no) => (FGT cc yes no)
+(TBNZ [0] (GreaterEqualF cc) yes no) => (FGE cc yes no)
+
+(EQ (CMPWconst [0] x:(ANDconst [c] y)) yes no) && x.Uses == 1 => (EQ (TSTWconst [int32(c)] y) yes no)
+(NE (CMPWconst [0] x:(ANDconst [c] y)) yes no) && x.Uses == 1 => (NE (TSTWconst [int32(c)] y) yes no)
+(LT (CMPWconst [0] x:(ANDconst [c] y)) yes no) && x.Uses == 1 => (LT (TSTWconst [int32(c)] y) yes no)
+(LE (CMPWconst [0] x:(ANDconst [c] y)) yes no) && x.Uses == 1 => (LE (TSTWconst [int32(c)] y) yes no)
+(GT (CMPWconst [0] x:(ANDconst [c] y)) yes no) && x.Uses == 1 => (GT (TSTWconst [int32(c)] y) yes no)
+(GE (CMPWconst [0] x:(ANDconst [c] y)) yes no) && x.Uses == 1 => (GE (TSTWconst [int32(c)] y) yes no)
+
+(EQ (CMPconst [0] z:(AND x y)) yes no) && z.Uses == 1 => (EQ (TST x y) yes no)
+(NE (CMPconst [0] z:(AND x y)) yes no) && z.Uses == 1 => (NE (TST x y) yes no)
+(LT (CMPconst [0] z:(AND x y)) yes no) && z.Uses == 1 => (LT (TST x y) yes no)
+(LE (CMPconst [0] z:(AND x y)) yes no) && z.Uses == 1 => (LE (TST x y) yes no)
+(GT (CMPconst [0] z:(AND x y)) yes no) && z.Uses == 1 => (GT (TST x y) yes no)
+(GE (CMPconst [0] z:(AND x y)) yes no) && z.Uses == 1 => (GE (TST x y) yes no)
+
+(EQ (CMPWconst [0] z:(AND x y)) yes no) && z.Uses == 1 => (EQ (TSTW x y) yes no)
+(NE (CMPWconst [0] z:(AND x y)) yes no) && z.Uses == 1 => (NE (TSTW x y) yes no)
+(LT (CMPWconst [0] z:(AND x y)) yes no) && z.Uses == 1 => (LT (TSTW x y) yes no)
+(LE (CMPWconst [0] z:(AND x y)) yes no) && z.Uses == 1 => (LE (TSTW x y) yes no)
+(GT (CMPWconst [0] z:(AND x y)) yes no) && z.Uses == 1 => (GT (TSTW x y) yes no)
+(GE (CMPWconst [0] z:(AND x y)) yes no) && z.Uses == 1 => (GE (TSTW x y) yes no)
+
+(EQ (CMPconst [0] x:(ANDconst [c] y)) yes no) && x.Uses == 1 => (EQ (TSTconst [c] y) yes no)
+(NE (CMPconst [0] x:(ANDconst [c] y)) yes no) && x.Uses == 1 => (NE (TSTconst [c] y) yes no)
+(LT (CMPconst [0] x:(ANDconst [c] y)) yes no) && x.Uses == 1 => (LT (TSTconst [c] y) yes no)
+(LE (CMPconst [0] x:(ANDconst [c] y)) yes no) && x.Uses == 1 => (LE (TSTconst [c] y) yes no)
+(GT (CMPconst [0] x:(ANDconst [c] y)) yes no) && x.Uses == 1 => (GT (TSTconst [c] y) yes no)
+(GE (CMPconst [0] x:(ANDconst [c] y)) yes no) && x.Uses == 1 => (GE (TSTconst [c] y) yes no)
+
+(EQ (CMPconst [0] x:(ADDconst [c] y)) yes no) && x.Uses == 1 => (EQ (CMNconst [c] y) yes no)
+(NE (CMPconst [0] x:(ADDconst [c] y)) yes no) && x.Uses == 1 => (NE (CMNconst [c] y) yes no)
+(LT (CMPconst [0] x:(ADDconst [c] y)) yes no) && x.Uses == 1 => (LTnoov (CMNconst [c] y) yes no)
+(LE (CMPconst [0] x:(ADDconst [c] y)) yes no) && x.Uses == 1 => (LEnoov (CMNconst [c] y) yes no)
+(GT (CMPconst [0] x:(ADDconst [c] y)) yes no) && x.Uses == 1 => (GTnoov (CMNconst [c] y) yes no)
+(GE (CMPconst [0] x:(ADDconst [c] y)) yes no) && x.Uses == 1 => (GEnoov (CMNconst [c] y) yes no)
+
+(EQ (CMPWconst [0] x:(ADDconst [c] y)) yes no) && x.Uses == 1 => (EQ (CMNWconst [int32(c)] y) yes no)
+(NE (CMPWconst [0] x:(ADDconst [c] y)) yes no) && x.Uses == 1 => (NE (CMNWconst [int32(c)] y) yes no)
+(LT (CMPWconst [0] x:(ADDconst [c] y)) yes no) && x.Uses == 1 => (LTnoov (CMNWconst [int32(c)] y) yes no)
+(LE (CMPWconst [0] x:(ADDconst [c] y)) yes no) && x.Uses == 1 => (LEnoov (CMNWconst [int32(c)] y) yes no)
+(GT (CMPWconst [0] x:(ADDconst [c] y)) yes no) && x.Uses == 1 => (GTnoov (CMNWconst [int32(c)] y) yes no)
+(GE (CMPWconst [0] x:(ADDconst [c] y)) yes no) && x.Uses == 1 => (GEnoov (CMNWconst [int32(c)] y) yes no)
+
+(EQ (CMPconst [0] z:(ADD x y)) yes no) && z.Uses == 1 => (EQ (CMN x y) yes no)
+(NE (CMPconst [0] z:(ADD x y)) yes no) && z.Uses == 1 => (NE (CMN x y) yes no)
+(LT (CMPconst [0] z:(ADD x y)) yes no) && z.Uses == 1 => (LTnoov (CMN x y) yes no)
+(LE (CMPconst [0] z:(ADD x y)) yes no) && z.Uses == 1 => (LEnoov (CMN x y) yes no)
+(GT (CMPconst [0] z:(ADD x y)) yes no) && z.Uses == 1 => (GTnoov (CMN x y) yes no)
+(GE (CMPconst [0] z:(ADD x y)) yes no) && z.Uses == 1 => (GEnoov (CMN x y) yes no)
+
+(EQ (CMPWconst [0] z:(ADD x y)) yes no) && z.Uses == 1 => (EQ (CMNW x y) yes no)
+(NE (CMPWconst [0] z:(ADD x y)) yes no) && z.Uses == 1 => (NE (CMNW x y) yes no)
+(LT (CMPWconst [0] z:(ADD x y)) yes no) && z.Uses == 1 => (LTnoov (CMNW x y) yes no)
+(LE (CMPWconst [0] z:(ADD x y)) yes no) && z.Uses == 1 => (LEnoov (CMNW x y) yes no)
+(GT (CMPWconst [0] z:(ADD x y)) yes no) && z.Uses == 1 => (GTnoov (CMNW x y) yes no)
+(GE (CMPWconst [0] z:(ADD x y)) yes no) && z.Uses == 1 => (GEnoov (CMNW x y) yes no)
+
+// CMP(x,-y) -> CMN(x,y) is only valid for unordered comparison, if y can be -1<<63
+(EQ (CMP x z:(NEG y)) yes no) && z.Uses == 1 => (EQ (CMN x y) yes no)
+(NE (CMP x z:(NEG y)) yes no) && z.Uses == 1 => (NE (CMN x y) yes no)
+
+// CMPW(x,-y) -> CMNW(x,y) is only valid for unordered comparison, if y can be -1<<31
+(EQ (CMPW x z:(NEG y)) yes no) && z.Uses == 1 => (EQ (CMNW x y) yes no)
+(NE (CMPW x z:(NEG y)) yes no) && z.Uses == 1 => (NE (CMNW x y) yes no)
+
+(EQ (CMPconst [0] x) yes no) => (Z x yes no)
+(NE (CMPconst [0] x) yes no) => (NZ x yes no)
+(EQ (CMPWconst [0] x) yes no) => (ZW x yes no)
+(NE (CMPWconst [0] x) yes no) => (NZW x yes no)
+
+(EQ (CMPconst [0] z:(MADD a x y)) yes no) && z.Uses==1 => (EQ (CMN a (MUL <x.Type> x y)) yes no)
+(NE (CMPconst [0] z:(MADD a x y)) yes no) && z.Uses==1 => (NE (CMN a (MUL <x.Type> x y)) yes no)
+(LT (CMPconst [0] z:(MADD a x y)) yes no) && z.Uses==1 => (LTnoov (CMN a (MUL <x.Type> x y)) yes no)
+(LE (CMPconst [0] z:(MADD a x y)) yes no) && z.Uses==1 => (LEnoov (CMN a (MUL <x.Type> x y)) yes no)
+(GT (CMPconst [0] z:(MADD a x y)) yes no) && z.Uses==1 => (GTnoov (CMN a (MUL <x.Type> x y)) yes no)
+(GE (CMPconst [0] z:(MADD a x y)) yes no) && z.Uses==1 => (GEnoov (CMN a (MUL <x.Type> x y)) yes no)
+
+(EQ (CMPconst [0] z:(MSUB a x y)) yes no) && z.Uses==1 => (EQ (CMP a (MUL <x.Type> x y)) yes no)
+(NE (CMPconst [0] z:(MSUB a x y)) yes no) && z.Uses==1 => (NE (CMP a (MUL <x.Type> x y)) yes no)
+(LE (CMPconst [0] z:(MSUB a x y)) yes no) && z.Uses==1 => (LEnoov (CMP a (MUL <x.Type> x y)) yes no)
+(LT (CMPconst [0] z:(MSUB a x y)) yes no) && z.Uses==1 => (LTnoov (CMP a (MUL <x.Type> x y)) yes no)
+(GE (CMPconst [0] z:(MSUB a x y)) yes no) && z.Uses==1 => (GEnoov (CMP a (MUL <x.Type> x y)) yes no)
+(GT (CMPconst [0] z:(MSUB a x y)) yes no) && z.Uses==1 => (GTnoov (CMP a (MUL <x.Type> x y)) yes no)
+
+(EQ (CMPWconst [0] z:(MADDW a x y)) yes no) && z.Uses==1 => (EQ (CMNW a (MULW <x.Type> x y)) yes no)
+(NE (CMPWconst [0] z:(MADDW a x y)) yes no) && z.Uses==1 => (NE (CMNW a (MULW <x.Type> x y)) yes no)
+(LE (CMPWconst [0] z:(MADDW a x y)) yes no) && z.Uses==1 => (LEnoov (CMNW a (MULW <x.Type> x y)) yes no)
+(LT (CMPWconst [0] z:(MADDW a x y)) yes no) && z.Uses==1 => (LTnoov (CMNW a (MULW <x.Type> x y)) yes no)
+(GE (CMPWconst [0] z:(MADDW a x y)) yes no) && z.Uses==1 => (GEnoov (CMNW a (MULW <x.Type> x y)) yes no)
+(GT (CMPWconst [0] z:(MADDW a x y)) yes no) && z.Uses==1 => (GTnoov (CMNW a (MULW <x.Type> x y)) yes no)
+
+(EQ (CMPWconst [0] z:(MSUBW a x y)) yes no) && z.Uses==1 => (EQ (CMPW a (MULW <x.Type> x y)) yes no)
+(NE (CMPWconst [0] z:(MSUBW a x y)) yes no) && z.Uses==1 => (NE (CMPW a (MULW <x.Type> x y)) yes no)
+(LE (CMPWconst [0] z:(MSUBW a x y)) yes no) && z.Uses==1 => (LEnoov (CMPW a (MULW <x.Type> x y)) yes no)
+(LT (CMPWconst [0] z:(MSUBW a x y)) yes no) && z.Uses==1 => (LTnoov (CMPW a (MULW <x.Type> x y)) yes no)
+(GE (CMPWconst [0] z:(MSUBW a x y)) yes no) && z.Uses==1 => (GEnoov (CMPW a (MULW <x.Type> x y)) yes no)
+(GT (CMPWconst [0] z:(MSUBW a x y)) yes no) && z.Uses==1 => (GTnoov (CMPW a (MULW <x.Type> x y)) yes no)
+
+// Absorb bit-tests into block
+(Z (ANDconst [c] x) yes no) && oneBit(c) => (TBZ [int64(ntz64(c))] x yes no)
+(NZ (ANDconst [c] x) yes no) && oneBit(c) => (TBNZ [int64(ntz64(c))] x yes no)
+(ZW (ANDconst [c] x) yes no) && oneBit(int64(uint32(c))) => (TBZ [int64(ntz64(int64(uint32(c))))] x yes no)
+(NZW (ANDconst [c] x) yes no) && oneBit(int64(uint32(c))) => (TBNZ [int64(ntz64(int64(uint32(c))))] x yes no)
+(EQ (TSTconst [c] x) yes no) && oneBit(c) => (TBZ [int64(ntz64(c))] x yes no)
+(NE (TSTconst [c] x) yes no) && oneBit(c) => (TBNZ [int64(ntz64(c))] x yes no)
+(EQ (TSTWconst [c] x) yes no) && oneBit(int64(uint32(c))) => (TBZ [int64(ntz64(int64(uint32(c))))] x yes no)
+(NE (TSTWconst [c] x) yes no) && oneBit(int64(uint32(c))) => (TBNZ [int64(ntz64(int64(uint32(c))))] x yes no)
+
+// Test sign-bit for signed comparisons against zero
+(GE (CMPWconst [0] x) yes no) => (TBZ [31] x yes no)
+(GE (CMPconst [0] x) yes no) => (TBZ [63] x yes no)
+(LT (CMPWconst [0] x) yes no) => (TBNZ [31] x yes no)
+(LT (CMPconst [0] x) yes no) => (TBNZ [63] x yes no)
+
+// fold offset into address
+(ADDconst [off1] (MOVDaddr [off2] {sym} ptr)) && is32Bit(off1+int64(off2)) =>
+ (MOVDaddr [int32(off1)+off2] {sym} ptr)
+
+// fold address into load/store
+(MOVBload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVBload [off1+int32(off2)] {sym} ptr mem)
+(MOVBUload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVBUload [off1+int32(off2)] {sym} ptr mem)
+(MOVHload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVHload [off1+int32(off2)] {sym} ptr mem)
+(MOVHUload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVHUload [off1+int32(off2)] {sym} ptr mem)
+(MOVWload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVWload [off1+int32(off2)] {sym} ptr mem)
+(MOVWUload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVWUload [off1+int32(off2)] {sym} ptr mem)
+(MOVDload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVDload [off1+int32(off2)] {sym} ptr mem)
+(FMOVSload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (FMOVSload [off1+int32(off2)] {sym} ptr mem)
+(FMOVDload [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (FMOVDload [off1+int32(off2)] {sym} ptr mem)
+
+// register indexed load
+(MOVDload [off] {sym} (ADD ptr idx) mem) && off == 0 && sym == nil => (MOVDloadidx ptr idx mem)
+(MOVWUload [off] {sym} (ADD ptr idx) mem) && off == 0 && sym == nil => (MOVWUloadidx ptr idx mem)
+(MOVWload [off] {sym} (ADD ptr idx) mem) && off == 0 && sym == nil => (MOVWloadidx ptr idx mem)
+(MOVHUload [off] {sym} (ADD ptr idx) mem) && off == 0 && sym == nil => (MOVHUloadidx ptr idx mem)
+(MOVHload [off] {sym} (ADD ptr idx) mem) && off == 0 && sym == nil => (MOVHloadidx ptr idx mem)
+(MOVBUload [off] {sym} (ADD ptr idx) mem) && off == 0 && sym == nil => (MOVBUloadidx ptr idx mem)
+(MOVBload [off] {sym} (ADD ptr idx) mem) && off == 0 && sym == nil => (MOVBloadidx ptr idx mem)
+(FMOVSload [off] {sym} (ADD ptr idx) mem) && off == 0 && sym == nil => (FMOVSloadidx ptr idx mem)
+(FMOVDload [off] {sym} (ADD ptr idx) mem) && off == 0 && sym == nil => (FMOVDloadidx ptr idx mem)
+(MOVDloadidx ptr (MOVDconst [c]) mem) && is32Bit(c) => (MOVDload [int32(c)] ptr mem)
+(MOVDloadidx (MOVDconst [c]) ptr mem) && is32Bit(c) => (MOVDload [int32(c)] ptr mem)
+(MOVWUloadidx ptr (MOVDconst [c]) mem) && is32Bit(c) => (MOVWUload [int32(c)] ptr mem)
+(MOVWUloadidx (MOVDconst [c]) ptr mem) && is32Bit(c) => (MOVWUload [int32(c)] ptr mem)
+(MOVWloadidx ptr (MOVDconst [c]) mem) && is32Bit(c) => (MOVWload [int32(c)] ptr mem)
+(MOVWloadidx (MOVDconst [c]) ptr mem) && is32Bit(c) => (MOVWload [int32(c)] ptr mem)
+(MOVHUloadidx ptr (MOVDconst [c]) mem) && is32Bit(c) => (MOVHUload [int32(c)] ptr mem)
+(MOVHUloadidx (MOVDconst [c]) ptr mem) && is32Bit(c) => (MOVHUload [int32(c)] ptr mem)
+(MOVHloadidx ptr (MOVDconst [c]) mem) && is32Bit(c) => (MOVHload [int32(c)] ptr mem)
+(MOVHloadidx (MOVDconst [c]) ptr mem) && is32Bit(c) => (MOVHload [int32(c)] ptr mem)
+(MOVBUloadidx ptr (MOVDconst [c]) mem) && is32Bit(c) => (MOVBUload [int32(c)] ptr mem)
+(MOVBUloadidx (MOVDconst [c]) ptr mem) && is32Bit(c) => (MOVBUload [int32(c)] ptr mem)
+(MOVBloadidx ptr (MOVDconst [c]) mem) && is32Bit(c) => (MOVBload [int32(c)] ptr mem)
+(MOVBloadidx (MOVDconst [c]) ptr mem) && is32Bit(c) => (MOVBload [int32(c)] ptr mem)
+(FMOVSloadidx ptr (MOVDconst [c]) mem) && is32Bit(c) => (FMOVSload [int32(c)] ptr mem)
+(FMOVSloadidx (MOVDconst [c]) ptr mem) && is32Bit(c) => (FMOVSload [int32(c)] ptr mem)
+(FMOVDloadidx ptr (MOVDconst [c]) mem) && is32Bit(c) => (FMOVDload [int32(c)] ptr mem)
+(FMOVDloadidx (MOVDconst [c]) ptr mem) && is32Bit(c) => (FMOVDload [int32(c)] ptr mem)
+
+// shifted register indexed load
+(MOVDload [off] {sym} (ADDshiftLL [3] ptr idx) mem) && off == 0 && sym == nil => (MOVDloadidx8 ptr idx mem)
+(MOVWUload [off] {sym} (ADDshiftLL [2] ptr idx) mem) && off == 0 && sym == nil => (MOVWUloadidx4 ptr idx mem)
+(MOVWload [off] {sym} (ADDshiftLL [2] ptr idx) mem) && off == 0 && sym == nil => (MOVWloadidx4 ptr idx mem)
+(MOVHUload [off] {sym} (ADDshiftLL [1] ptr idx) mem) && off == 0 && sym == nil => (MOVHUloadidx2 ptr idx mem)
+(MOVHload [off] {sym} (ADDshiftLL [1] ptr idx) mem) && off == 0 && sym == nil => (MOVHloadidx2 ptr idx mem)
+(MOVDloadidx ptr (SLLconst [3] idx) mem) => (MOVDloadidx8 ptr idx mem)
+(MOVWloadidx ptr (SLLconst [2] idx) mem) => (MOVWloadidx4 ptr idx mem)
+(MOVWUloadidx ptr (SLLconst [2] idx) mem) => (MOVWUloadidx4 ptr idx mem)
+(MOVHloadidx ptr (SLLconst [1] idx) mem) => (MOVHloadidx2 ptr idx mem)
+(MOVHUloadidx ptr (SLLconst [1] idx) mem) => (MOVHUloadidx2 ptr idx mem)
+(MOVHloadidx ptr (ADD idx idx) mem) => (MOVHloadidx2 ptr idx mem)
+(MOVHUloadidx ptr (ADD idx idx) mem) => (MOVHUloadidx2 ptr idx mem)
+(MOVDloadidx (SLLconst [3] idx) ptr mem) => (MOVDloadidx8 ptr idx mem)
+(MOVWloadidx (SLLconst [2] idx) ptr mem) => (MOVWloadidx4 ptr idx mem)
+(MOVWUloadidx (SLLconst [2] idx) ptr mem) => (MOVWUloadidx4 ptr idx mem)
+(MOVHloadidx (ADD idx idx) ptr mem) => (MOVHloadidx2 ptr idx mem)
+(MOVHUloadidx (ADD idx idx) ptr mem) => (MOVHUloadidx2 ptr idx mem)
+(MOVDloadidx8 ptr (MOVDconst [c]) mem) && is32Bit(c<<3) => (MOVDload [int32(c)<<3] ptr mem)
+(MOVWUloadidx4 ptr (MOVDconst [c]) mem) && is32Bit(c<<2) => (MOVWUload [int32(c)<<2] ptr mem)
+(MOVWloadidx4 ptr (MOVDconst [c]) mem) && is32Bit(c<<2) => (MOVWload [int32(c)<<2] ptr mem)
+(MOVHUloadidx2 ptr (MOVDconst [c]) mem) && is32Bit(c<<1) => (MOVHUload [int32(c)<<1] ptr mem)
+(MOVHloadidx2 ptr (MOVDconst [c]) mem) && is32Bit(c<<1) => (MOVHload [int32(c)<<1] ptr mem)
+
+(FMOVDload [off] {sym} (ADDshiftLL [3] ptr idx) mem) && off == 0 && sym == nil => (FMOVDloadidx8 ptr idx mem)
+(FMOVSload [off] {sym} (ADDshiftLL [2] ptr idx) mem) && off == 0 && sym == nil => (FMOVSloadidx4 ptr idx mem)
+(FMOVDloadidx ptr (SLLconst [3] idx) mem) => (FMOVDloadidx8 ptr idx mem)
+(FMOVSloadidx ptr (SLLconst [2] idx) mem) => (FMOVSloadidx4 ptr idx mem)
+(FMOVDloadidx (SLLconst [3] idx) ptr mem) => (FMOVDloadidx8 ptr idx mem)
+(FMOVSloadidx (SLLconst [2] idx) ptr mem) => (FMOVSloadidx4 ptr idx mem)
+(FMOVDloadidx8 ptr (MOVDconst [c]) mem) && is32Bit(c<<3) => (FMOVDload ptr [int32(c)<<3] mem)
+(FMOVSloadidx4 ptr (MOVDconst [c]) mem) && is32Bit(c<<2) => (FMOVSload ptr [int32(c)<<2] mem)
+
+(MOVBstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVBstore [off1+int32(off2)] {sym} ptr val mem)
+(MOVHstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVHstore [off1+int32(off2)] {sym} ptr val mem)
+(MOVWstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVWstore [off1+int32(off2)] {sym} ptr val mem)
+(MOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVDstore [off1+int32(off2)] {sym} ptr val mem)
+(STP [off1] {sym} (ADDconst [off2] ptr) val1 val2 mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (STP [off1+int32(off2)] {sym} ptr val1 val2 mem)
+(FMOVSstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (FMOVSstore [off1+int32(off2)] {sym} ptr val mem)
+(FMOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (FMOVDstore [off1+int32(off2)] {sym} ptr val mem)
+(MOVBstorezero [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVBstorezero [off1+int32(off2)] {sym} ptr mem)
+(MOVHstorezero [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVHstorezero [off1+int32(off2)] {sym} ptr mem)
+(MOVWstorezero [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVWstorezero [off1+int32(off2)] {sym} ptr mem)
+(MOVDstorezero [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVDstorezero [off1+int32(off2)] {sym} ptr mem)
+(MOVQstorezero [off1] {sym} (ADDconst [off2] ptr) mem) && is32Bit(int64(off1)+off2)
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVQstorezero [off1+int32(off2)] {sym} ptr mem)
+
+// register indexed store
+(MOVDstore [off] {sym} (ADD ptr idx) val mem) && off == 0 && sym == nil => (MOVDstoreidx ptr idx val mem)
+(MOVWstore [off] {sym} (ADD ptr idx) val mem) && off == 0 && sym == nil => (MOVWstoreidx ptr idx val mem)
+(MOVHstore [off] {sym} (ADD ptr idx) val mem) && off == 0 && sym == nil => (MOVHstoreidx ptr idx val mem)
+(MOVBstore [off] {sym} (ADD ptr idx) val mem) && off == 0 && sym == nil => (MOVBstoreidx ptr idx val mem)
+(FMOVDstore [off] {sym} (ADD ptr idx) val mem) && off == 0 && sym == nil => (FMOVDstoreidx ptr idx val mem)
+(FMOVSstore [off] {sym} (ADD ptr idx) val mem) && off == 0 && sym == nil => (FMOVSstoreidx ptr idx val mem)
+(MOVDstoreidx ptr (MOVDconst [c]) val mem) && is32Bit(c) => (MOVDstore [int32(c)] ptr val mem)
+(MOVDstoreidx (MOVDconst [c]) idx val mem) && is32Bit(c) => (MOVDstore [int32(c)] idx val mem)
+(MOVWstoreidx ptr (MOVDconst [c]) val mem) && is32Bit(c) => (MOVWstore [int32(c)] ptr val mem)
+(MOVWstoreidx (MOVDconst [c]) idx val mem) && is32Bit(c) => (MOVWstore [int32(c)] idx val mem)
+(MOVHstoreidx ptr (MOVDconst [c]) val mem) && is32Bit(c) => (MOVHstore [int32(c)] ptr val mem)
+(MOVHstoreidx (MOVDconst [c]) idx val mem) && is32Bit(c) => (MOVHstore [int32(c)] idx val mem)
+(MOVBstoreidx ptr (MOVDconst [c]) val mem) && is32Bit(c) => (MOVBstore [int32(c)] ptr val mem)
+(MOVBstoreidx (MOVDconst [c]) idx val mem) && is32Bit(c) => (MOVBstore [int32(c)] idx val mem)
+(FMOVDstoreidx ptr (MOVDconst [c]) val mem) && is32Bit(c) => (FMOVDstore [int32(c)] ptr val mem)
+(FMOVDstoreidx (MOVDconst [c]) idx val mem) && is32Bit(c) => (FMOVDstore [int32(c)] idx val mem)
+(FMOVSstoreidx ptr (MOVDconst [c]) val mem) && is32Bit(c) => (FMOVSstore [int32(c)] ptr val mem)
+(FMOVSstoreidx (MOVDconst [c]) idx val mem) && is32Bit(c) => (FMOVSstore [int32(c)] idx val mem)
+
+// shifted register indexed store
+(MOVDstore [off] {sym} (ADDshiftLL [3] ptr idx) val mem) && off == 0 && sym == nil => (MOVDstoreidx8 ptr idx val mem)
+(MOVWstore [off] {sym} (ADDshiftLL [2] ptr idx) val mem) && off == 0 && sym == nil => (MOVWstoreidx4 ptr idx val mem)
+(MOVHstore [off] {sym} (ADDshiftLL [1] ptr idx) val mem) && off == 0 && sym == nil => (MOVHstoreidx2 ptr idx val mem)
+(MOVDstoreidx ptr (SLLconst [3] idx) val mem) => (MOVDstoreidx8 ptr idx val mem)
+(MOVWstoreidx ptr (SLLconst [2] idx) val mem) => (MOVWstoreidx4 ptr idx val mem)
+(MOVHstoreidx ptr (SLLconst [1] idx) val mem) => (MOVHstoreidx2 ptr idx val mem)
+(MOVHstoreidx ptr (ADD idx idx) val mem) => (MOVHstoreidx2 ptr idx val mem)
+(MOVDstoreidx (SLLconst [3] idx) ptr val mem) => (MOVDstoreidx8 ptr idx val mem)
+(MOVWstoreidx (SLLconst [2] idx) ptr val mem) => (MOVWstoreidx4 ptr idx val mem)
+(MOVHstoreidx (SLLconst [1] idx) ptr val mem) => (MOVHstoreidx2 ptr idx val mem)
+(MOVHstoreidx (ADD idx idx) ptr val mem) => (MOVHstoreidx2 ptr idx val mem)
+(MOVDstoreidx8 ptr (MOVDconst [c]) val mem) && is32Bit(c<<3) => (MOVDstore [int32(c)<<3] ptr val mem)
+(MOVWstoreidx4 ptr (MOVDconst [c]) val mem) && is32Bit(c<<2) => (MOVWstore [int32(c)<<2] ptr val mem)
+(MOVHstoreidx2 ptr (MOVDconst [c]) val mem) && is32Bit(c<<1) => (MOVHstore [int32(c)<<1] ptr val mem)
+
+(FMOVDstore [off] {sym} (ADDshiftLL [3] ptr idx) val mem) && off == 0 && sym == nil => (FMOVDstoreidx8 ptr idx val mem)
+(FMOVSstore [off] {sym} (ADDshiftLL [2] ptr idx) val mem) && off == 0 && sym == nil => (FMOVSstoreidx4 ptr idx val mem)
+(FMOVDstoreidx ptr (SLLconst [3] idx) val mem) => (FMOVDstoreidx8 ptr idx val mem)
+(FMOVSstoreidx ptr (SLLconst [2] idx) val mem) => (FMOVSstoreidx4 ptr idx val mem)
+(FMOVDstoreidx (SLLconst [3] idx) ptr val mem) => (FMOVDstoreidx8 ptr idx val mem)
+(FMOVSstoreidx (SLLconst [2] idx) ptr val mem) => (FMOVSstoreidx4 ptr idx val mem)
+(FMOVDstoreidx8 ptr (MOVDconst [c]) val mem) && is32Bit(c<<3) => (FMOVDstore [int32(c)<<3] ptr val mem)
+(FMOVSstoreidx4 ptr (MOVDconst [c]) val mem) && is32Bit(c<<2) => (FMOVSstore [int32(c)<<2] ptr val mem)
+
+(MOVBload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVBload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVBUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVBUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVHload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVHUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVHUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVWload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVWUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVWUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(FMOVSload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (FMOVSload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(FMOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (FMOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+
+(MOVBstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
+ && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+(MOVHstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
+ && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+(MOVWstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
+ && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+(MOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
+ && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+(STP [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val1 val2 mem)
+ && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (STP [off1+off2] {mergeSym(sym1,sym2)} ptr val1 val2 mem)
+(FMOVSstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
+ && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (FMOVSstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+(FMOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
+ && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (FMOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+(MOVBstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVHstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVWstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVDstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVDstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVQstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ && (ptr.Op != OpSB || !config.ctxt.Flag_shared) =>
+ (MOVQstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+
+// store zero
+(MOVBstore [off] {sym} ptr (MOVDconst [0]) mem) => (MOVBstorezero [off] {sym} ptr mem)
+(MOVHstore [off] {sym} ptr (MOVDconst [0]) mem) => (MOVHstorezero [off] {sym} ptr mem)
+(MOVWstore [off] {sym} ptr (MOVDconst [0]) mem) => (MOVWstorezero [off] {sym} ptr mem)
+(MOVDstore [off] {sym} ptr (MOVDconst [0]) mem) => (MOVDstorezero [off] {sym} ptr mem)
+(STP [off] {sym} ptr (MOVDconst [0]) (MOVDconst [0]) mem) => (MOVQstorezero [off] {sym} ptr mem)
+
+// register indexed store zero
+(MOVDstorezero [off] {sym} (ADD ptr idx) mem) && off == 0 && sym == nil => (MOVDstorezeroidx ptr idx mem)
+(MOVWstorezero [off] {sym} (ADD ptr idx) mem) && off == 0 && sym == nil => (MOVWstorezeroidx ptr idx mem)
+(MOVHstorezero [off] {sym} (ADD ptr idx) mem) && off == 0 && sym == nil => (MOVHstorezeroidx ptr idx mem)
+(MOVBstorezero [off] {sym} (ADD ptr idx) mem) && off == 0 && sym == nil => (MOVBstorezeroidx ptr idx mem)
+(MOVDstoreidx ptr idx (MOVDconst [0]) mem) => (MOVDstorezeroidx ptr idx mem)
+(MOVWstoreidx ptr idx (MOVDconst [0]) mem) => (MOVWstorezeroidx ptr idx mem)
+(MOVHstoreidx ptr idx (MOVDconst [0]) mem) => (MOVHstorezeroidx ptr idx mem)
+(MOVBstoreidx ptr idx (MOVDconst [0]) mem) => (MOVBstorezeroidx ptr idx mem)
+(MOVDstorezeroidx ptr (MOVDconst [c]) mem) && is32Bit(c) => (MOVDstorezero [int32(c)] ptr mem)
+(MOVDstorezeroidx (MOVDconst [c]) idx mem) && is32Bit(c) => (MOVDstorezero [int32(c)] idx mem)
+(MOVWstorezeroidx ptr (MOVDconst [c]) mem) && is32Bit(c) => (MOVWstorezero [int32(c)] ptr mem)
+(MOVWstorezeroidx (MOVDconst [c]) idx mem) && is32Bit(c) => (MOVWstorezero [int32(c)] idx mem)
+(MOVHstorezeroidx ptr (MOVDconst [c]) mem) && is32Bit(c) => (MOVHstorezero [int32(c)] ptr mem)
+(MOVHstorezeroidx (MOVDconst [c]) idx mem) && is32Bit(c) => (MOVHstorezero [int32(c)] idx mem)
+(MOVBstorezeroidx ptr (MOVDconst [c]) mem) && is32Bit(c) => (MOVBstorezero [int32(c)] ptr mem)
+(MOVBstorezeroidx (MOVDconst [c]) idx mem) && is32Bit(c) => (MOVBstorezero [int32(c)] idx mem)
+
+// shifted register indexed store zero
+(MOVDstorezero [off] {sym} (ADDshiftLL [3] ptr idx) mem) && off == 0 && sym == nil => (MOVDstorezeroidx8 ptr idx mem)
+(MOVWstorezero [off] {sym} (ADDshiftLL [2] ptr idx) mem) && off == 0 && sym == nil => (MOVWstorezeroidx4 ptr idx mem)
+(MOVHstorezero [off] {sym} (ADDshiftLL [1] ptr idx) mem) && off == 0 && sym == nil => (MOVHstorezeroidx2 ptr idx mem)
+(MOVDstorezeroidx ptr (SLLconst [3] idx) mem) => (MOVDstorezeroidx8 ptr idx mem)
+(MOVWstorezeroidx ptr (SLLconst [2] idx) mem) => (MOVWstorezeroidx4 ptr idx mem)
+(MOVHstorezeroidx ptr (SLLconst [1] idx) mem) => (MOVHstorezeroidx2 ptr idx mem)
+(MOVHstorezeroidx ptr (ADD idx idx) mem) => (MOVHstorezeroidx2 ptr idx mem)
+(MOVDstorezeroidx (SLLconst [3] idx) ptr mem) => (MOVDstorezeroidx8 ptr idx mem)
+(MOVWstorezeroidx (SLLconst [2] idx) ptr mem) => (MOVWstorezeroidx4 ptr idx mem)
+(MOVHstorezeroidx (SLLconst [1] idx) ptr mem) => (MOVHstorezeroidx2 ptr idx mem)
+(MOVHstorezeroidx (ADD idx idx) ptr mem) => (MOVHstorezeroidx2 ptr idx mem)
+(MOVDstoreidx8 ptr idx (MOVDconst [0]) mem) => (MOVDstorezeroidx8 ptr idx mem)
+(MOVWstoreidx4 ptr idx (MOVDconst [0]) mem) => (MOVWstorezeroidx4 ptr idx mem)
+(MOVHstoreidx2 ptr idx (MOVDconst [0]) mem) => (MOVHstorezeroidx2 ptr idx mem)
+(MOVDstorezeroidx8 ptr (MOVDconst [c]) mem) && is32Bit(c<<3) => (MOVDstorezero [int32(c<<3)] ptr mem)
+(MOVWstorezeroidx4 ptr (MOVDconst [c]) mem) && is32Bit(c<<2) => (MOVWstorezero [int32(c<<2)] ptr mem)
+(MOVHstorezeroidx2 ptr (MOVDconst [c]) mem) && is32Bit(c<<1) => (MOVHstorezero [int32(c<<1)] ptr mem)
+
+// replace load from same location as preceding store with zero/sign extension (or copy in case of full width)
+// these seem to have bad interaction with other rules, resulting in slower code
+//(MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVBreg x)
+//(MOVBUload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVBUreg x)
+//(MOVHload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVHreg x)
+//(MOVHUload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVHUreg x)
+//(MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVWreg x)
+//(MOVWUload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> (MOVWUreg x)
+//(MOVDload [off] {sym} ptr (MOVDstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x
+//(FMOVSload [off] {sym} ptr (FMOVSstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x
+//(FMOVDload [off] {sym} ptr (FMOVDstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) -> x
+
+(MOVBload [off] {sym} ptr (MOVBstorezero [off2] {sym2} ptr2 _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVDconst [0])
+(MOVBUload [off] {sym} ptr (MOVBstorezero [off2] {sym2} ptr2 _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVDconst [0])
+(MOVHload [off] {sym} ptr (MOVHstorezero [off2] {sym2} ptr2 _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVDconst [0])
+(MOVHUload [off] {sym} ptr (MOVHstorezero [off2] {sym2} ptr2 _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVDconst [0])
+(MOVWload [off] {sym} ptr (MOVWstorezero [off2] {sym2} ptr2 _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVDconst [0])
+(MOVWUload [off] {sym} ptr (MOVWstorezero [off2] {sym2} ptr2 _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVDconst [0])
+(MOVDload [off] {sym} ptr (MOVDstorezero [off2] {sym2} ptr2 _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVDconst [0])
+
+(MOVBloadidx ptr idx (MOVBstorezeroidx ptr2 idx2 _))
+ && (isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) => (MOVDconst [0])
+(MOVBUloadidx ptr idx (MOVBstorezeroidx ptr2 idx2 _))
+ && (isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) => (MOVDconst [0])
+(MOVHloadidx ptr idx (MOVHstorezeroidx ptr2 idx2 _))
+ && (isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) => (MOVDconst [0])
+(MOVHUloadidx ptr idx (MOVHstorezeroidx ptr2 idx2 _))
+ && (isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) => (MOVDconst [0])
+(MOVWloadidx ptr idx (MOVWstorezeroidx ptr2 idx2 _))
+ && (isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) => (MOVDconst [0])
+(MOVWUloadidx ptr idx (MOVWstorezeroidx ptr2 idx2 _))
+ && (isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) => (MOVDconst [0])
+(MOVDloadidx ptr idx (MOVDstorezeroidx ptr2 idx2 _))
+ && (isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) => (MOVDconst [0])
+
+(MOVHloadidx2 ptr idx (MOVHstorezeroidx2 ptr2 idx2 _)) && isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) => (MOVDconst [0])
+(MOVHUloadidx2 ptr idx (MOVHstorezeroidx2 ptr2 idx2 _)) && isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) => (MOVDconst [0])
+(MOVWloadidx4 ptr idx (MOVWstorezeroidx4 ptr2 idx2 _)) && isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) => (MOVDconst [0])
+(MOVWUloadidx4 ptr idx (MOVWstorezeroidx4 ptr2 idx2 _)) && isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) => (MOVDconst [0])
+(MOVDloadidx8 ptr idx (MOVDstorezeroidx8 ptr2 idx2 _)) && isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) => (MOVDconst [0])
+
+// don't extend after proper load
+(MOVBreg x:(MOVBload _ _)) => (MOVDreg x)
+(MOVBUreg x:(MOVBUload _ _)) => (MOVDreg x)
+(MOVHreg x:(MOVBload _ _)) => (MOVDreg x)
+(MOVHreg x:(MOVBUload _ _)) => (MOVDreg x)
+(MOVHreg x:(MOVHload _ _)) => (MOVDreg x)
+(MOVHUreg x:(MOVBUload _ _)) => (MOVDreg x)
+(MOVHUreg x:(MOVHUload _ _)) => (MOVDreg x)
+(MOVWreg x:(MOVBload _ _)) => (MOVDreg x)
+(MOVWreg x:(MOVBUload _ _)) => (MOVDreg x)
+(MOVWreg x:(MOVHload _ _)) => (MOVDreg x)
+(MOVWreg x:(MOVHUload _ _)) => (MOVDreg x)
+(MOVWreg x:(MOVWload _ _)) => (MOVDreg x)
+(MOVWUreg x:(MOVBUload _ _)) => (MOVDreg x)
+(MOVWUreg x:(MOVHUload _ _)) => (MOVDreg x)
+(MOVWUreg x:(MOVWUload _ _)) => (MOVDreg x)
+(MOVBreg x:(MOVBloadidx _ _ _)) => (MOVDreg x)
+(MOVBUreg x:(MOVBUloadidx _ _ _)) => (MOVDreg x)
+(MOVHreg x:(MOVBloadidx _ _ _)) => (MOVDreg x)
+(MOVHreg x:(MOVBUloadidx _ _ _)) => (MOVDreg x)
+(MOVHreg x:(MOVHloadidx _ _ _)) => (MOVDreg x)
+(MOVHUreg x:(MOVBUloadidx _ _ _)) => (MOVDreg x)
+(MOVHUreg x:(MOVHUloadidx _ _ _)) => (MOVDreg x)
+(MOVWreg x:(MOVBloadidx _ _ _)) => (MOVDreg x)
+(MOVWreg x:(MOVBUloadidx _ _ _)) => (MOVDreg x)
+(MOVWreg x:(MOVHloadidx _ _ _)) => (MOVDreg x)
+(MOVWreg x:(MOVHUloadidx _ _ _)) => (MOVDreg x)
+(MOVWreg x:(MOVWloadidx _ _ _)) => (MOVDreg x)
+(MOVWUreg x:(MOVBUloadidx _ _ _)) => (MOVDreg x)
+(MOVWUreg x:(MOVHUloadidx _ _ _)) => (MOVDreg x)
+(MOVWUreg x:(MOVWUloadidx _ _ _)) => (MOVDreg x)
+(MOVHreg x:(MOVHloadidx2 _ _ _)) => (MOVDreg x)
+(MOVHUreg x:(MOVHUloadidx2 _ _ _)) => (MOVDreg x)
+(MOVWreg x:(MOVHloadidx2 _ _ _)) => (MOVDreg x)
+(MOVWreg x:(MOVHUloadidx2 _ _ _)) => (MOVDreg x)
+(MOVWreg x:(MOVWloadidx4 _ _ _)) => (MOVDreg x)
+(MOVWUreg x:(MOVHUloadidx2 _ _ _)) => (MOVDreg x)
+(MOVWUreg x:(MOVWUloadidx4 _ _ _)) => (MOVDreg x)
+
+// fold double extensions
+(MOVBreg x:(MOVBreg _)) => (MOVDreg x)
+(MOVBUreg x:(MOVBUreg _)) => (MOVDreg x)
+(MOVHreg x:(MOVBreg _)) => (MOVDreg x)
+(MOVHreg x:(MOVBUreg _)) => (MOVDreg x)
+(MOVHreg x:(MOVHreg _)) => (MOVDreg x)
+(MOVHUreg x:(MOVBUreg _)) => (MOVDreg x)
+(MOVHUreg x:(MOVHUreg _)) => (MOVDreg x)
+(MOVWreg x:(MOVBreg _)) => (MOVDreg x)
+(MOVWreg x:(MOVBUreg _)) => (MOVDreg x)
+(MOVWreg x:(MOVHreg _)) => (MOVDreg x)
+(MOVWreg x:(MOVWreg _)) => (MOVDreg x)
+(MOVWUreg x:(MOVBUreg _)) => (MOVDreg x)
+(MOVWUreg x:(MOVHUreg _)) => (MOVDreg x)
+(MOVWUreg x:(MOVWUreg _)) => (MOVDreg x)
+
+// don't extend before store
+(MOVBstore [off] {sym} ptr (MOVBreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVBUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVHreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVHUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVWreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVHstore [off] {sym} ptr (MOVHreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
+(MOVHstore [off] {sym} ptr (MOVHUreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
+(MOVHstore [off] {sym} ptr (MOVWreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
+(MOVHstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
+(MOVWstore [off] {sym} ptr (MOVWreg x) mem) => (MOVWstore [off] {sym} ptr x mem)
+(MOVWstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVWstore [off] {sym} ptr x mem)
+(MOVBstoreidx ptr idx (MOVBreg x) mem) => (MOVBstoreidx ptr idx x mem)
+(MOVBstoreidx ptr idx (MOVBUreg x) mem) => (MOVBstoreidx ptr idx x mem)
+(MOVBstoreidx ptr idx (MOVHreg x) mem) => (MOVBstoreidx ptr idx x mem)
+(MOVBstoreidx ptr idx (MOVHUreg x) mem) => (MOVBstoreidx ptr idx x mem)
+(MOVBstoreidx ptr idx (MOVWreg x) mem) => (MOVBstoreidx ptr idx x mem)
+(MOVBstoreidx ptr idx (MOVWUreg x) mem) => (MOVBstoreidx ptr idx x mem)
+(MOVHstoreidx ptr idx (MOVHreg x) mem) => (MOVHstoreidx ptr idx x mem)
+(MOVHstoreidx ptr idx (MOVHUreg x) mem) => (MOVHstoreidx ptr idx x mem)
+(MOVHstoreidx ptr idx (MOVWreg x) mem) => (MOVHstoreidx ptr idx x mem)
+(MOVHstoreidx ptr idx (MOVWUreg x) mem) => (MOVHstoreidx ptr idx x mem)
+(MOVWstoreidx ptr idx (MOVWreg x) mem) => (MOVWstoreidx ptr idx x mem)
+(MOVWstoreidx ptr idx (MOVWUreg x) mem) => (MOVWstoreidx ptr idx x mem)
+(MOVHstoreidx2 ptr idx (MOVHreg x) mem) => (MOVHstoreidx2 ptr idx x mem)
+(MOVHstoreidx2 ptr idx (MOVHUreg x) mem) => (MOVHstoreidx2 ptr idx x mem)
+(MOVHstoreidx2 ptr idx (MOVWreg x) mem) => (MOVHstoreidx2 ptr idx x mem)
+(MOVHstoreidx2 ptr idx (MOVWUreg x) mem) => (MOVHstoreidx2 ptr idx x mem)
+(MOVWstoreidx4 ptr idx (MOVWreg x) mem) => (MOVWstoreidx4 ptr idx x mem)
+(MOVWstoreidx4 ptr idx (MOVWUreg x) mem) => (MOVWstoreidx4 ptr idx x mem)
+
+// if a register move has only 1 use, just use the same register without emitting instruction
+// MOVDnop doesn't emit instruction, only for ensuring the type.
+(MOVDreg x) && x.Uses == 1 => (MOVDnop x)
+
+// TODO: we should be able to get rid of MOVDnop all together.
+// But for now, this is enough to get rid of lots of them.
+(MOVDnop (MOVDconst [c])) => (MOVDconst [c])
+
+// fold constant into arithmatic ops
+(ADD x (MOVDconst [c])) => (ADDconst [c] x)
+(SUB x (MOVDconst [c])) => (SUBconst [c] x)
+(AND x (MOVDconst [c])) => (ANDconst [c] x)
+(OR x (MOVDconst [c])) => (ORconst [c] x)
+(XOR x (MOVDconst [c])) => (XORconst [c] x)
+(TST x (MOVDconst [c])) => (TSTconst [c] x)
+(TSTW x (MOVDconst [c])) => (TSTWconst [int32(c)] x)
+(CMN x (MOVDconst [c])) => (CMNconst [c] x)
+(CMNW x (MOVDconst [c])) => (CMNWconst [int32(c)] x)
+(BIC x (MOVDconst [c])) => (ANDconst [^c] x)
+(EON x (MOVDconst [c])) => (XORconst [^c] x)
+(ORN x (MOVDconst [c])) => (ORconst [^c] x)
+
+(SLL x (MOVDconst [c])) => (SLLconst x [c&63]) // Note: I don't think we ever generate bad constant shifts (i.e. c>=64)
+(SRL x (MOVDconst [c])) => (SRLconst x [c&63])
+(SRA x (MOVDconst [c])) => (SRAconst x [c&63])
+
+(CMP x (MOVDconst [c])) => (CMPconst [c] x)
+(CMP (MOVDconst [c]) x) => (InvertFlags (CMPconst [c] x))
+(CMPW x (MOVDconst [c])) => (CMPWconst [int32(c)] x)
+(CMPW (MOVDconst [c]) x) => (InvertFlags (CMPWconst [int32(c)] x))
+
+(ROR x (MOVDconst [c])) => (RORconst x [c&63])
+(RORW x (MOVDconst [c])) => (RORWconst x [c&31])
+
+// Canonicalize the order of arguments to comparisons - helps with CSE.
+((CMP|CMPW) x y) && canonLessThan(x,y) => (InvertFlags ((CMP|CMPW) y x))
+
+// mul-neg => mneg
+(NEG (MUL x y)) => (MNEG x y)
+(NEG (MULW x y)) => (MNEGW x y)
+(MUL (NEG x) y) => (MNEG x y)
+(MULW (NEG x) y) => (MNEGW x y)
+
+// madd/msub
+(ADD a l:(MUL x y)) && l.Uses==1 && clobber(l) => (MADD a x y)
+(SUB a l:(MUL x y)) && l.Uses==1 && clobber(l) => (MSUB a x y)
+(ADD a l:(MNEG x y)) && l.Uses==1 && clobber(l) => (MSUB a x y)
+(SUB a l:(MNEG x y)) && l.Uses==1 && clobber(l) => (MADD a x y)
+
+(ADD a l:(MULW x y)) && a.Type.Size() != 8 && l.Uses==1 && clobber(l) => (MADDW a x y)
+(SUB a l:(MULW x y)) && a.Type.Size() != 8 && l.Uses==1 && clobber(l) => (MSUBW a x y)
+(ADD a l:(MNEGW x y)) && a.Type.Size() != 8 && l.Uses==1 && clobber(l) => (MSUBW a x y)
+(SUB a l:(MNEGW x y)) && a.Type.Size() != 8 && l.Uses==1 && clobber(l) => (MADDW a x y)
+
+// optimize ADCSflags, SBCSflags and friends
+(ADCSflags x y (Select1 <types.TypeFlags> (ADDSconstflags [-1] (ADCzerocarry <typ.UInt64> c)))) => (ADCSflags x y c)
+(ADCSflags x y (Select1 <types.TypeFlags> (ADDSconstflags [-1] (MOVDconst [0])))) => (ADDSflags x y)
+(SBCSflags x y (Select1 <types.TypeFlags> (NEGSflags (NEG <typ.UInt64> (NGCzerocarry <typ.UInt64> bo))))) => (SBCSflags x y bo)
+(SBCSflags x y (Select1 <types.TypeFlags> (NEGSflags (MOVDconst [0])))) => (SUBSflags x y)
+
+// mul by constant
+(MUL x (MOVDconst [-1])) => (NEG x)
+(MUL _ (MOVDconst [0])) => (MOVDconst [0])
+(MUL x (MOVDconst [1])) => x
+(MUL x (MOVDconst [c])) && isPowerOfTwo64(c) => (SLLconst [log64(c)] x)
+(MUL x (MOVDconst [c])) && isPowerOfTwo64(c-1) && c >= 3 => (ADDshiftLL x x [log64(c-1)])
+(MUL x (MOVDconst [c])) && isPowerOfTwo64(c+1) && c >= 7 => (ADDshiftLL (NEG <x.Type> x) x [log64(c+1)])
+(MUL x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo64(c/3) => (SLLconst [log64(c/3)] (ADDshiftLL <x.Type> x x [1]))
+(MUL x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo64(c/5) => (SLLconst [log64(c/5)] (ADDshiftLL <x.Type> x x [2]))
+(MUL x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo64(c/7) => (SLLconst [log64(c/7)] (ADDshiftLL <x.Type> (NEG <x.Type> x) x [3]))
+(MUL x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo64(c/9) => (SLLconst [log64(c/9)] (ADDshiftLL <x.Type> x x [3]))
+
+(MULW x (MOVDconst [c])) && int32(c)==-1 => (NEG x)
+(MULW _ (MOVDconst [c])) && int32(c)==0 => (MOVDconst [0])
+(MULW x (MOVDconst [c])) && int32(c)==1 => x
+(MULW x (MOVDconst [c])) && isPowerOfTwo64(c) => (SLLconst [log64(c)] x)
+(MULW x (MOVDconst [c])) && isPowerOfTwo64(c-1) && int32(c) >= 3 => (ADDshiftLL x x [log64(c-1)])
+(MULW x (MOVDconst [c])) && isPowerOfTwo64(c+1) && int32(c) >= 7 => (ADDshiftLL (NEG <x.Type> x) x [log64(c+1)])
+(MULW x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c) => (SLLconst [log64(c/3)] (ADDshiftLL <x.Type> x x [1]))
+(MULW x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c) => (SLLconst [log64(c/5)] (ADDshiftLL <x.Type> x x [2]))
+(MULW x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c) => (SLLconst [log64(c/7)] (ADDshiftLL <x.Type> (NEG <x.Type> x) x [3]))
+(MULW x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c) => (SLLconst [log64(c/9)] (ADDshiftLL <x.Type> x x [3]))
+
+// mneg by constant
+(MNEG x (MOVDconst [-1])) => x
+(MNEG _ (MOVDconst [0])) => (MOVDconst [0])
+(MNEG x (MOVDconst [1])) => (NEG x)
+(MNEG x (MOVDconst [c])) && isPowerOfTwo64(c) => (NEG (SLLconst <x.Type> [log64(c)] x))
+(MNEG x (MOVDconst [c])) && isPowerOfTwo64(c-1) && c >= 3 => (NEG (ADDshiftLL <x.Type> x x [log64(c-1)]))
+(MNEG x (MOVDconst [c])) && isPowerOfTwo64(c+1) && c >= 7 => (NEG (ADDshiftLL <x.Type> (NEG <x.Type> x) x [log64(c+1)]))
+(MNEG x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo64(c/3) => (SLLconst <x.Type> [log64(c/3)] (SUBshiftLL <x.Type> x x [2]))
+(MNEG x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo64(c/5) => (NEG (SLLconst <x.Type> [log64(c/5)] (ADDshiftLL <x.Type> x x [2])))
+(MNEG x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo64(c/7) => (SLLconst <x.Type> [log64(c/7)] (SUBshiftLL <x.Type> x x [3]))
+(MNEG x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo64(c/9) => (NEG (SLLconst <x.Type> [log64(c/9)] (ADDshiftLL <x.Type> x x [3])))
+
+
+(MNEGW x (MOVDconst [c])) && int32(c)==-1 => x
+(MNEGW _ (MOVDconst [c])) && int32(c)==0 => (MOVDconst [0])
+(MNEGW x (MOVDconst [c])) && int32(c)==1 => (NEG x)
+(MNEGW x (MOVDconst [c])) && isPowerOfTwo64(c) => (NEG (SLLconst <x.Type> [log64(c)] x))
+(MNEGW x (MOVDconst [c])) && isPowerOfTwo64(c-1) && int32(c) >= 3 => (NEG (ADDshiftLL <x.Type> x x [log64(c-1)]))
+(MNEGW x (MOVDconst [c])) && isPowerOfTwo64(c+1) && int32(c) >= 7 => (NEG (ADDshiftLL <x.Type> (NEG <x.Type> x) x [log64(c+1)]))
+(MNEGW x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c) => (SLLconst <x.Type> [log64(c/3)] (SUBshiftLL <x.Type> x x [2]))
+(MNEGW x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c) => (NEG (SLLconst <x.Type> [log64(c/5)] (ADDshiftLL <x.Type> x x [2])))
+(MNEGW x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c) => (SLLconst <x.Type> [log64(c/7)] (SUBshiftLL <x.Type> x x [3]))
+(MNEGW x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c) => (NEG (SLLconst <x.Type> [log64(c/9)] (ADDshiftLL <x.Type> x x [3])))
+
+
+(MADD a x (MOVDconst [-1])) => (SUB a x)
+(MADD a _ (MOVDconst [0])) => a
+(MADD a x (MOVDconst [1])) => (ADD a x)
+(MADD a x (MOVDconst [c])) && isPowerOfTwo64(c) => (ADDshiftLL a x [log64(c)])
+(MADD a x (MOVDconst [c])) && isPowerOfTwo64(c-1) && c>=3 => (ADD a (ADDshiftLL <x.Type> x x [log64(c-1)]))
+(MADD a x (MOVDconst [c])) && isPowerOfTwo64(c+1) && c>=7 => (SUB a (SUBshiftLL <x.Type> x x [log64(c+1)]))
+(MADD a x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo64(c/3) => (SUBshiftLL a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)])
+(MADD a x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo64(c/5) => (ADDshiftLL a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)])
+(MADD a x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo64(c/7) => (SUBshiftLL a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)])
+(MADD a x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo64(c/9) => (ADDshiftLL a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)])
+
+(MADD a (MOVDconst [-1]) x) => (SUB a x)
+(MADD a (MOVDconst [0]) _) => a
+(MADD a (MOVDconst [1]) x) => (ADD a x)
+(MADD a (MOVDconst [c]) x) && isPowerOfTwo64(c) => (ADDshiftLL a x [log64(c)])
+(MADD a (MOVDconst [c]) x) && isPowerOfTwo64(c-1) && c>=3 => (ADD a (ADDshiftLL <x.Type> x x [log64(c-1)]))
+(MADD a (MOVDconst [c]) x) && isPowerOfTwo64(c+1) && c>=7 => (SUB a (SUBshiftLL <x.Type> x x [log64(c+1)]))
+(MADD a (MOVDconst [c]) x) && c%3 == 0 && isPowerOfTwo64(c/3) => (SUBshiftLL a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)])
+(MADD a (MOVDconst [c]) x) && c%5 == 0 && isPowerOfTwo64(c/5) => (ADDshiftLL a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)])
+(MADD a (MOVDconst [c]) x) && c%7 == 0 && isPowerOfTwo64(c/7) => (SUBshiftLL a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)])
+(MADD a (MOVDconst [c]) x) && c%9 == 0 && isPowerOfTwo64(c/9) => (ADDshiftLL a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)])
+
+(MADDW a x (MOVDconst [c])) && int32(c)==-1 => (SUB a x)
+(MADDW a _ (MOVDconst [c])) && int32(c)==0 => a
+(MADDW a x (MOVDconst [c])) && int32(c)==1 => (ADD a x)
+(MADDW a x (MOVDconst [c])) && isPowerOfTwo64(c) => (ADDshiftLL a x [log64(c)])
+(MADDW a x (MOVDconst [c])) && isPowerOfTwo64(c-1) && int32(c)>=3 => (ADD a (ADDshiftLL <x.Type> x x [log64(c-1)]))
+(MADDW a x (MOVDconst [c])) && isPowerOfTwo64(c+1) && int32(c)>=7 => (SUB a (SUBshiftLL <x.Type> x x [log64(c+1)]))
+(MADDW a x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c) => (SUBshiftLL a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)])
+(MADDW a x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c) => (ADDshiftLL a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)])
+(MADDW a x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c) => (SUBshiftLL a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)])
+(MADDW a x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c) => (ADDshiftLL a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)])
+
+(MADDW a (MOVDconst [c]) x) && int32(c)==-1 => (SUB a x)
+(MADDW a (MOVDconst [c]) _) && int32(c)==0 => a
+(MADDW a (MOVDconst [c]) x) && int32(c)==1 => (ADD a x)
+(MADDW a (MOVDconst [c]) x) && isPowerOfTwo64(c) => (ADDshiftLL a x [log64(c)])
+(MADDW a (MOVDconst [c]) x) && isPowerOfTwo64(c-1) && int32(c)>=3 => (ADD a (ADDshiftLL <x.Type> x x [log64(c-1)]))
+(MADDW a (MOVDconst [c]) x) && isPowerOfTwo64(c+1) && int32(c)>=7 => (SUB a (SUBshiftLL <x.Type> x x [log64(c+1)]))
+(MADDW a (MOVDconst [c]) x) && c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c) => (SUBshiftLL a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)])
+(MADDW a (MOVDconst [c]) x) && c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c) => (ADDshiftLL a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)])
+(MADDW a (MOVDconst [c]) x) && c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c) => (SUBshiftLL a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)])
+(MADDW a (MOVDconst [c]) x) && c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c) => (ADDshiftLL a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)])
+
+(MSUB a x (MOVDconst [-1])) => (ADD a x)
+(MSUB a _ (MOVDconst [0])) => a
+(MSUB a x (MOVDconst [1])) => (SUB a x)
+(MSUB a x (MOVDconst [c])) && isPowerOfTwo64(c) => (SUBshiftLL a x [log64(c)])
+(MSUB a x (MOVDconst [c])) && isPowerOfTwo64(c-1) && c>=3 => (SUB a (ADDshiftLL <x.Type> x x [log64(c-1)]))
+(MSUB a x (MOVDconst [c])) && isPowerOfTwo64(c+1) && c>=7 => (ADD a (SUBshiftLL <x.Type> x x [log64(c+1)]))
+(MSUB a x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo64(c/3) => (ADDshiftLL a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)])
+(MSUB a x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo64(c/5) => (SUBshiftLL a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)])
+(MSUB a x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo64(c/7) => (ADDshiftLL a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)])
+(MSUB a x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo64(c/9) => (SUBshiftLL a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)])
+
+(MSUB a (MOVDconst [-1]) x) => (ADD a x)
+(MSUB a (MOVDconst [0]) _) => a
+(MSUB a (MOVDconst [1]) x) => (SUB a x)
+(MSUB a (MOVDconst [c]) x) && isPowerOfTwo64(c) => (SUBshiftLL a x [log64(c)])
+(MSUB a (MOVDconst [c]) x) && isPowerOfTwo64(c-1) && c>=3 => (SUB a (ADDshiftLL <x.Type> x x [log64(c-1)]))
+(MSUB a (MOVDconst [c]) x) && isPowerOfTwo64(c+1) && c>=7 => (ADD a (SUBshiftLL <x.Type> x x [log64(c+1)]))
+(MSUB a (MOVDconst [c]) x) && c%3 == 0 && isPowerOfTwo64(c/3) => (ADDshiftLL a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)])
+(MSUB a (MOVDconst [c]) x) && c%5 == 0 && isPowerOfTwo64(c/5) => (SUBshiftLL a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)])
+(MSUB a (MOVDconst [c]) x) && c%7 == 0 && isPowerOfTwo64(c/7) => (ADDshiftLL a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)])
+(MSUB a (MOVDconst [c]) x) && c%9 == 0 && isPowerOfTwo64(c/9) => (SUBshiftLL a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)])
+
+(MSUBW a x (MOVDconst [c])) && int32(c)==-1 => (ADD a x)
+(MSUBW a _ (MOVDconst [c])) && int32(c)==0 => a
+(MSUBW a x (MOVDconst [c])) && int32(c)==1 => (SUB a x)
+(MSUBW a x (MOVDconst [c])) && isPowerOfTwo64(c) => (SUBshiftLL a x [log64(c)])
+(MSUBW a x (MOVDconst [c])) && isPowerOfTwo64(c-1) && int32(c)>=3 => (SUB a (ADDshiftLL <x.Type> x x [log64(c-1)]))
+(MSUBW a x (MOVDconst [c])) && isPowerOfTwo64(c+1) && int32(c)>=7 => (ADD a (SUBshiftLL <x.Type> x x [log64(c+1)]))
+(MSUBW a x (MOVDconst [c])) && c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c) => (ADDshiftLL a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)])
+(MSUBW a x (MOVDconst [c])) && c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c) => (SUBshiftLL a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)])
+(MSUBW a x (MOVDconst [c])) && c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c) => (ADDshiftLL a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)])
+(MSUBW a x (MOVDconst [c])) && c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c) => (SUBshiftLL a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)])
+
+(MSUBW a (MOVDconst [c]) x) && int32(c)==-1 => (ADD a x)
+(MSUBW a (MOVDconst [c]) _) && int32(c)==0 => a
+(MSUBW a (MOVDconst [c]) x) && int32(c)==1 => (SUB a x)
+(MSUBW a (MOVDconst [c]) x) && isPowerOfTwo64(c) => (SUBshiftLL a x [log64(c)])
+(MSUBW a (MOVDconst [c]) x) && isPowerOfTwo64(c-1) && int32(c)>=3 => (SUB a (ADDshiftLL <x.Type> x x [log64(c-1)]))
+(MSUBW a (MOVDconst [c]) x) && isPowerOfTwo64(c+1) && int32(c)>=7 => (ADD a (SUBshiftLL <x.Type> x x [log64(c+1)]))
+(MSUBW a (MOVDconst [c]) x) && c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c) => (ADDshiftLL a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)])
+(MSUBW a (MOVDconst [c]) x) && c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c) => (SUBshiftLL a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)])
+(MSUBW a (MOVDconst [c]) x) && c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c) => (ADDshiftLL a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)])
+(MSUBW a (MOVDconst [c]) x) && c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c) => (SUBshiftLL a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)])
+
+// div by constant
+(UDIV x (MOVDconst [1])) => x
+(UDIV x (MOVDconst [c])) && isPowerOfTwo64(c) => (SRLconst [log64(c)] x)
+(UDIVW x (MOVDconst [c])) && uint32(c)==1 => x
+(UDIVW x (MOVDconst [c])) && isPowerOfTwo64(c) && is32Bit(c) => (SRLconst [log64(c)] x)
+(UMOD _ (MOVDconst [1])) => (MOVDconst [0])
+(UMOD x (MOVDconst [c])) && isPowerOfTwo64(c) => (ANDconst [c-1] x)
+(UMODW _ (MOVDconst [c])) && uint32(c)==1 => (MOVDconst [0])
+(UMODW x (MOVDconst [c])) && isPowerOfTwo64(c) && is32Bit(c) => (ANDconst [c-1] x)
+
+// generic simplifications
+(ADD x (NEG y)) => (SUB x y)
+(SUB x x) => (MOVDconst [0])
+(AND x x) => x
+(OR x x) => x
+(XOR x x) => (MOVDconst [0])
+(BIC x x) => (MOVDconst [0])
+(EON x x) => (MOVDconst [-1])
+(ORN x x) => (MOVDconst [-1])
+(AND x (MVN y)) => (BIC x y)
+(XOR x (MVN y)) => (EON x y)
+(OR x (MVN y)) => (ORN x y)
+(MVN (XOR x y)) => (EON x y)
+(NEG (NEG x)) => x
+
+(CSEL [cc] (MOVDconst [-1]) (MOVDconst [0]) flag) => (CSETM [cc] flag)
+(CSEL [cc] (MOVDconst [0]) (MOVDconst [-1]) flag) => (CSETM [arm64Negate(cc)] flag)
+(CSEL [cc] x (MOVDconst [0]) flag) => (CSEL0 [cc] x flag)
+(CSEL [cc] (MOVDconst [0]) y flag) => (CSEL0 [arm64Negate(cc)] y flag)
+(CSEL [cc] x (ADDconst [1] a) flag) => (CSINC [cc] x a flag)
+(CSEL [cc] (ADDconst [1] a) x flag) => (CSINC [arm64Negate(cc)] x a flag)
+(CSEL [cc] x (MVN a) flag) => (CSINV [cc] x a flag)
+(CSEL [cc] (MVN a) x flag) => (CSINV [arm64Negate(cc)] x a flag)
+(CSEL [cc] x (NEG a) flag) => (CSNEG [cc] x a flag)
+(CSEL [cc] (NEG a) x flag) => (CSNEG [arm64Negate(cc)] x a flag)
+
+(SUB x (SUB y z)) => (SUB (ADD <v.Type> x z) y)
+(SUB (SUB x y) z) => (SUB x (ADD <y.Type> y z))
+
+// remove redundant *const ops
+(ADDconst [0] x) => x
+(SUBconst [0] x) => x
+(ANDconst [0] _) => (MOVDconst [0])
+(ANDconst [-1] x) => x
+(ORconst [0] x) => x
+(ORconst [-1] _) => (MOVDconst [-1])
+(XORconst [0] x) => x
+(XORconst [-1] x) => (MVN x)
+
+// generic constant folding
+(ADDconst [c] (MOVDconst [d])) => (MOVDconst [c+d])
+(ADDconst [c] (ADDconst [d] x)) => (ADDconst [c+d] x)
+(ADDconst [c] (SUBconst [d] x)) => (ADDconst [c-d] x)
+(SUBconst [c] (MOVDconst [d])) => (MOVDconst [d-c])
+(SUBconst [c] (SUBconst [d] x)) => (ADDconst [-c-d] x)
+(SUBconst [c] (ADDconst [d] x)) => (ADDconst [-c+d] x)
+(SLLconst [c] (MOVDconst [d])) => (MOVDconst [d<<uint64(c)])
+(SRLconst [c] (MOVDconst [d])) => (MOVDconst [int64(uint64(d)>>uint64(c))])
+(SRAconst [c] (MOVDconst [d])) => (MOVDconst [d>>uint64(c)])
+(MUL (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [c*d])
+(MULW (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [int64(int32(c)*int32(d))])
+(MNEG (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [-c*d])
+(MNEGW (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [-int64(int32(c)*int32(d))])
+(MADD (MOVDconst [c]) x y) => (ADDconst [c] (MUL <x.Type> x y))
+(MADDW (MOVDconst [c]) x y) => (ADDconst [c] (MULW <x.Type> x y))
+(MSUB (MOVDconst [c]) x y) => (ADDconst [c] (MNEG <x.Type> x y))
+(MSUBW (MOVDconst [c]) x y) => (ADDconst [c] (MNEGW <x.Type> x y))
+(MADD a (MOVDconst [c]) (MOVDconst [d])) => (ADDconst [c*d] a)
+(MADDW a (MOVDconst [c]) (MOVDconst [d])) => (ADDconst [int64(int32(c)*int32(d))] a)
+(MSUB a (MOVDconst [c]) (MOVDconst [d])) => (SUBconst [c*d] a)
+(MSUBW a (MOVDconst [c]) (MOVDconst [d])) => (SUBconst [int64(int32(c)*int32(d))] a)
+(DIV (MOVDconst [c]) (MOVDconst [d])) && d != 0 => (MOVDconst [c/d])
+(UDIV (MOVDconst [c]) (MOVDconst [d])) && d != 0 => (MOVDconst [int64(uint64(c)/uint64(d))])
+(DIVW (MOVDconst [c]) (MOVDconst [d])) && d != 0 => (MOVDconst [int64(int32(c)/int32(d))])
+(UDIVW (MOVDconst [c]) (MOVDconst [d])) && d != 0 => (MOVDconst [int64(uint32(c)/uint32(d))])
+(MOD (MOVDconst [c]) (MOVDconst [d])) && d != 0 => (MOVDconst [c%d])
+(UMOD (MOVDconst [c]) (MOVDconst [d])) && d != 0 => (MOVDconst [int64(uint64(c)%uint64(d))])
+(MODW (MOVDconst [c]) (MOVDconst [d])) && d != 0 => (MOVDconst [int64(int32(c)%int32(d))])
+(UMODW (MOVDconst [c]) (MOVDconst [d])) && d != 0 => (MOVDconst [int64(uint32(c)%uint32(d))])
+(ANDconst [c] (MOVDconst [d])) => (MOVDconst [c&d])
+(ANDconst [c] (ANDconst [d] x)) => (ANDconst [c&d] x)
+(ANDconst [c] (MOVWUreg x)) => (ANDconst [c&(1<<32-1)] x)
+(ANDconst [c] (MOVHUreg x)) => (ANDconst [c&(1<<16-1)] x)
+(ANDconst [c] (MOVBUreg x)) => (ANDconst [c&(1<<8-1)] x)
+(MOVWUreg (ANDconst [c] x)) => (ANDconst [c&(1<<32-1)] x)
+(MOVHUreg (ANDconst [c] x)) => (ANDconst [c&(1<<16-1)] x)
+(MOVBUreg (ANDconst [c] x)) => (ANDconst [c&(1<<8-1)] x)
+(ORconst [c] (MOVDconst [d])) => (MOVDconst [c|d])
+(ORconst [c] (ORconst [d] x)) => (ORconst [c|d] x)
+(XORconst [c] (MOVDconst [d])) => (MOVDconst [c^d])
+(XORconst [c] (XORconst [d] x)) => (XORconst [c^d] x)
+(MVN (MOVDconst [c])) => (MOVDconst [^c])
+(NEG (MOVDconst [c])) => (MOVDconst [-c])
+(MOVBreg (MOVDconst [c])) => (MOVDconst [int64(int8(c))])
+(MOVBUreg (MOVDconst [c])) => (MOVDconst [int64(uint8(c))])
+(MOVHreg (MOVDconst [c])) => (MOVDconst [int64(int16(c))])
+(MOVHUreg (MOVDconst [c])) => (MOVDconst [int64(uint16(c))])
+(MOVWreg (MOVDconst [c])) => (MOVDconst [int64(int32(c))])
+(MOVWUreg (MOVDconst [c])) => (MOVDconst [int64(uint32(c))])
+(MOVDreg (MOVDconst [c])) => (MOVDconst [c])
+
+// constant comparisons
+(CMPconst (MOVDconst [x]) [y]) => (FlagConstant [subFlags64(x,y)])
+(CMPWconst (MOVDconst [x]) [y]) => (FlagConstant [subFlags32(int32(x),y)])
+(TSTconst (MOVDconst [x]) [y]) => (FlagConstant [logicFlags64(x&y)])
+(TSTWconst (MOVDconst [x]) [y]) => (FlagConstant [logicFlags32(int32(x)&y)])
+(CMNconst (MOVDconst [x]) [y]) => (FlagConstant [addFlags64(x,y)])
+(CMNWconst (MOVDconst [x]) [y]) => (FlagConstant [addFlags32(int32(x),y)])
+
+// other known comparisons
+(CMPconst (MOVBUreg _) [c]) && 0xff < c => (FlagConstant [subFlags64(0,1)])
+(CMPconst (MOVHUreg _) [c]) && 0xffff < c => (FlagConstant [subFlags64(0,1)])
+(CMPconst (MOVWUreg _) [c]) && 0xffffffff < c => (FlagConstant [subFlags64(0,1)])
+(CMPconst (ANDconst _ [m]) [n]) && 0 <= m && m < n => (FlagConstant [subFlags64(0,1)])
+(CMPconst (SRLconst _ [c]) [n]) && 0 <= n && 0 < c && c <= 63 && (1<<uint64(64-c)) <= uint64(n) => (FlagConstant [subFlags64(0,1)])
+(CMPWconst (MOVBUreg _) [c]) && 0xff < c => (FlagConstant [subFlags64(0,1)])
+(CMPWconst (MOVHUreg _) [c]) && 0xffff < c => (FlagConstant [subFlags64(0,1)])
+
+// absorb flag constants into branches
+(EQ (FlagConstant [fc]) yes no) && fc.eq() => (First yes no)
+(EQ (FlagConstant [fc]) yes no) && !fc.eq() => (First no yes)
+
+(NE (FlagConstant [fc]) yes no) && fc.ne() => (First yes no)
+(NE (FlagConstant [fc]) yes no) && !fc.ne() => (First no yes)
+
+(LT (FlagConstant [fc]) yes no) && fc.lt() => (First yes no)
+(LT (FlagConstant [fc]) yes no) && !fc.lt() => (First no yes)
+
+(LE (FlagConstant [fc]) yes no) && fc.le() => (First yes no)
+(LE (FlagConstant [fc]) yes no) && !fc.le() => (First no yes)
+
+(GT (FlagConstant [fc]) yes no) && fc.gt() => (First yes no)
+(GT (FlagConstant [fc]) yes no) && !fc.gt() => (First no yes)
+
+(GE (FlagConstant [fc]) yes no) && fc.ge() => (First yes no)
+(GE (FlagConstant [fc]) yes no) && !fc.ge() => (First no yes)
+
+(ULT (FlagConstant [fc]) yes no) && fc.ult() => (First yes no)
+(ULT (FlagConstant [fc]) yes no) && !fc.ult() => (First no yes)
+
+(ULE (FlagConstant [fc]) yes no) && fc.ule() => (First yes no)
+(ULE (FlagConstant [fc]) yes no) && !fc.ule() => (First no yes)
+
+(UGT (FlagConstant [fc]) yes no) && fc.ugt() => (First yes no)
+(UGT (FlagConstant [fc]) yes no) && !fc.ugt() => (First no yes)
+
+(UGE (FlagConstant [fc]) yes no) && fc.uge() => (First yes no)
+(UGE (FlagConstant [fc]) yes no) && !fc.uge() => (First no yes)
+
+(LTnoov (FlagConstant [fc]) yes no) && fc.ltNoov() => (First yes no)
+(LTnoov (FlagConstant [fc]) yes no) && !fc.ltNoov() => (First no yes)
+
+(LEnoov (FlagConstant [fc]) yes no) && fc.leNoov() => (First yes no)
+(LEnoov (FlagConstant [fc]) yes no) && !fc.leNoov() => (First no yes)
+
+(GTnoov (FlagConstant [fc]) yes no) && fc.gtNoov() => (First yes no)
+(GTnoov (FlagConstant [fc]) yes no) && !fc.gtNoov() => (First no yes)
+
+(GEnoov (FlagConstant [fc]) yes no) && fc.geNoov() => (First yes no)
+(GEnoov (FlagConstant [fc]) yes no) && !fc.geNoov() => (First no yes)
+
+(Z (MOVDconst [0]) yes no) => (First yes no)
+(Z (MOVDconst [c]) yes no) && c != 0 => (First no yes)
+(NZ (MOVDconst [0]) yes no) => (First no yes)
+(NZ (MOVDconst [c]) yes no) && c != 0 => (First yes no)
+(ZW (MOVDconst [c]) yes no) && int32(c) == 0 => (First yes no)
+(ZW (MOVDconst [c]) yes no) && int32(c) != 0 => (First no yes)
+(NZW (MOVDconst [c]) yes no) && int32(c) == 0 => (First no yes)
+(NZW (MOVDconst [c]) yes no) && int32(c) != 0 => (First yes no)
+
+// absorb InvertFlags into branches
+(LT (InvertFlags cmp) yes no) => (GT cmp yes no)
+(GT (InvertFlags cmp) yes no) => (LT cmp yes no)
+(LE (InvertFlags cmp) yes no) => (GE cmp yes no)
+(GE (InvertFlags cmp) yes no) => (LE cmp yes no)
+(ULT (InvertFlags cmp) yes no) => (UGT cmp yes no)
+(UGT (InvertFlags cmp) yes no) => (ULT cmp yes no)
+(ULE (InvertFlags cmp) yes no) => (UGE cmp yes no)
+(UGE (InvertFlags cmp) yes no) => (ULE cmp yes no)
+(EQ (InvertFlags cmp) yes no) => (EQ cmp yes no)
+(NE (InvertFlags cmp) yes no) => (NE cmp yes no)
+(FLT (InvertFlags cmp) yes no) => (FGT cmp yes no)
+(FGT (InvertFlags cmp) yes no) => (FLT cmp yes no)
+(FLE (InvertFlags cmp) yes no) => (FGE cmp yes no)
+(FGE (InvertFlags cmp) yes no) => (FLE cmp yes no)
+(LTnoov (InvertFlags cmp) yes no) => (GTnoov cmp yes no)
+(GEnoov (InvertFlags cmp) yes no) => (LEnoov cmp yes no)
+(LEnoov (InvertFlags cmp) yes no) => (GEnoov cmp yes no)
+(GTnoov (InvertFlags cmp) yes no) => (LTnoov cmp yes no)
+
+// absorb InvertFlags into conditional instructions
+(CSEL [cc] x y (InvertFlags cmp)) => (CSEL [arm64Invert(cc)] x y cmp)
+(CSEL0 [cc] x (InvertFlags cmp)) => (CSEL0 [arm64Invert(cc)] x cmp)
+(CSETM [cc] (InvertFlags cmp)) => (CSETM [arm64Invert(cc)] cmp)
+(CSINC [cc] x y (InvertFlags cmp)) => (CSINC [arm64Invert(cc)] x y cmp)
+(CSINV [cc] x y (InvertFlags cmp)) => (CSINV [arm64Invert(cc)] x y cmp)
+(CSNEG [cc] x y (InvertFlags cmp)) => (CSNEG [arm64Invert(cc)] x y cmp)
+
+// absorb flag constants into boolean values
+(Equal (FlagConstant [fc])) => (MOVDconst [b2i(fc.eq())])
+(NotEqual (FlagConstant [fc])) => (MOVDconst [b2i(fc.ne())])
+(LessThan (FlagConstant [fc])) => (MOVDconst [b2i(fc.lt())])
+(LessThanU (FlagConstant [fc])) => (MOVDconst [b2i(fc.ult())])
+(LessEqual (FlagConstant [fc])) => (MOVDconst [b2i(fc.le())])
+(LessEqualU (FlagConstant [fc])) => (MOVDconst [b2i(fc.ule())])
+(GreaterThan (FlagConstant [fc])) => (MOVDconst [b2i(fc.gt())])
+(GreaterThanU (FlagConstant [fc])) => (MOVDconst [b2i(fc.ugt())])
+(GreaterEqual (FlagConstant [fc])) => (MOVDconst [b2i(fc.ge())])
+(GreaterEqualU (FlagConstant [fc])) => (MOVDconst [b2i(fc.uge())])
+
+// absorb InvertFlags into boolean values
+(Equal (InvertFlags x)) => (Equal x)
+(NotEqual (InvertFlags x)) => (NotEqual x)
+(LessThan (InvertFlags x)) => (GreaterThan x)
+(LessThanU (InvertFlags x)) => (GreaterThanU x)
+(GreaterThan (InvertFlags x)) => (LessThan x)
+(GreaterThanU (InvertFlags x)) => (LessThanU x)
+(LessEqual (InvertFlags x)) => (GreaterEqual x)
+(LessEqualU (InvertFlags x)) => (GreaterEqualU x)
+(GreaterEqual (InvertFlags x)) => (LessEqual x)
+(GreaterEqualU (InvertFlags x)) => (LessEqualU x)
+(LessThanF (InvertFlags x)) => (GreaterThanF x)
+(LessEqualF (InvertFlags x)) => (GreaterEqualF x)
+(GreaterThanF (InvertFlags x)) => (LessThanF x)
+(GreaterEqualF (InvertFlags x)) => (LessEqualF x)
+
+// Boolean-generating instructions (NOTE: NOT all boolean Values) always
+// zero upper bit of the register; no need to zero-extend
+(MOVBUreg x:((Equal|NotEqual|LessThan|LessThanU|LessThanF|LessEqual|LessEqualU|LessEqualF|GreaterThan|GreaterThanU|GreaterThanF|GreaterEqual|GreaterEqualU|GreaterEqualF) _)) => (MOVDreg x)
+
+// absorb flag constants into conditional instructions
+(CSEL [cc] x _ flag) && ccARM64Eval(cc, flag) > 0 => x
+(CSEL [cc] _ y flag) && ccARM64Eval(cc, flag) < 0 => y
+(CSEL0 [cc] x flag) && ccARM64Eval(cc, flag) > 0 => x
+(CSEL0 [cc] _ flag) && ccARM64Eval(cc, flag) < 0 => (MOVDconst [0])
+(CSNEG [cc] x _ flag) && ccARM64Eval(cc, flag) > 0 => x
+(CSNEG [cc] _ y flag) && ccARM64Eval(cc, flag) < 0 => (NEG y)
+(CSINV [cc] x _ flag) && ccARM64Eval(cc, flag) > 0 => x
+(CSINV [cc] _ y flag) && ccARM64Eval(cc, flag) < 0 => (Not y)
+(CSINC [cc] x _ flag) && ccARM64Eval(cc, flag) > 0 => x
+(CSINC [cc] _ y flag) && ccARM64Eval(cc, flag) < 0 => (ADDconst [1] y)
+(CSETM [cc] flag) && ccARM64Eval(cc, flag) > 0 => (MOVDconst [-1])
+(CSETM [cc] flag) && ccARM64Eval(cc, flag) < 0 => (MOVDconst [0])
+
+// absorb flags back into boolean CSEL
+(CSEL [cc] x y (CMPWconst [0] boolval)) && cc == OpARM64NotEqual && flagArg(boolval) != nil =>
+ (CSEL [boolval.Op] x y flagArg(boolval))
+(CSEL [cc] x y (CMPWconst [0] boolval)) && cc == OpARM64Equal && flagArg(boolval) != nil =>
+ (CSEL [arm64Negate(boolval.Op)] x y flagArg(boolval))
+(CSEL0 [cc] x (CMPWconst [0] boolval)) && cc == OpARM64NotEqual && flagArg(boolval) != nil =>
+ (CSEL0 [boolval.Op] x flagArg(boolval))
+(CSEL0 [cc] x (CMPWconst [0] boolval)) && cc == OpARM64Equal && flagArg(boolval) != nil =>
+ (CSEL0 [arm64Negate(boolval.Op)] x flagArg(boolval))
+
+// absorb shifts into ops
+(NEG x:(SLLconst [c] y)) && clobberIfDead(x) => (NEGshiftLL [c] y)
+(NEG x:(SRLconst [c] y)) && clobberIfDead(x) => (NEGshiftRL [c] y)
+(NEG x:(SRAconst [c] y)) && clobberIfDead(x) => (NEGshiftRA [c] y)
+(MVN x:(SLLconst [c] y)) && clobberIfDead(x) => (MVNshiftLL [c] y)
+(MVN x:(SRLconst [c] y)) && clobberIfDead(x) => (MVNshiftRL [c] y)
+(MVN x:(SRAconst [c] y)) && clobberIfDead(x) => (MVNshiftRA [c] y)
+(MVN x:(RORconst [c] y)) && clobberIfDead(x) => (MVNshiftRO [c] y)
+(ADD x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (ADDshiftLL x0 y [c])
+(ADD x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (ADDshiftRL x0 y [c])
+(ADD x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (ADDshiftRA x0 y [c])
+(SUB x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (SUBshiftLL x0 y [c])
+(SUB x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (SUBshiftRL x0 y [c])
+(SUB x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (SUBshiftRA x0 y [c])
+(AND x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (ANDshiftLL x0 y [c])
+(AND x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (ANDshiftRL x0 y [c])
+(AND x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (ANDshiftRA x0 y [c])
+(AND x0 x1:(RORconst [c] y)) && clobberIfDead(x1) => (ANDshiftRO x0 y [c])
+(OR x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (ORshiftLL x0 y [c]) // useful for combined load
+(OR x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (ORshiftRL x0 y [c])
+(OR x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (ORshiftRA x0 y [c])
+(OR x0 x1:(RORconst [c] y)) && clobberIfDead(x1) => (ORshiftRO x0 y [c])
+(XOR x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (XORshiftLL x0 y [c])
+(XOR x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (XORshiftRL x0 y [c])
+(XOR x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (XORshiftRA x0 y [c])
+(XOR x0 x1:(RORconst [c] y)) && clobberIfDead(x1) => (XORshiftRO x0 y [c])
+(BIC x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (BICshiftLL x0 y [c])
+(BIC x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (BICshiftRL x0 y [c])
+(BIC x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (BICshiftRA x0 y [c])
+(BIC x0 x1:(RORconst [c] y)) && clobberIfDead(x1) => (BICshiftRO x0 y [c])
+(ORN x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (ORNshiftLL x0 y [c])
+(ORN x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (ORNshiftRL x0 y [c])
+(ORN x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (ORNshiftRA x0 y [c])
+(ORN x0 x1:(RORconst [c] y)) && clobberIfDead(x1) => (ORNshiftRO x0 y [c])
+(EON x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (EONshiftLL x0 y [c])
+(EON x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (EONshiftRL x0 y [c])
+(EON x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (EONshiftRA x0 y [c])
+(EON x0 x1:(RORconst [c] y)) && clobberIfDead(x1) => (EONshiftRO x0 y [c])
+(CMP x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (CMPshiftLL x0 y [c])
+(CMP x0:(SLLconst [c] y) x1) && clobberIfDead(x0) => (InvertFlags (CMPshiftLL x1 y [c]))
+(CMP x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (CMPshiftRL x0 y [c])
+(CMP x0:(SRLconst [c] y) x1) && clobberIfDead(x0) => (InvertFlags (CMPshiftRL x1 y [c]))
+(CMP x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (CMPshiftRA x0 y [c])
+(CMP x0:(SRAconst [c] y) x1) && clobberIfDead(x0) => (InvertFlags (CMPshiftRA x1 y [c]))
+(CMN x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (CMNshiftLL x0 y [c])
+(CMN x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (CMNshiftRL x0 y [c])
+(CMN x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (CMNshiftRA x0 y [c])
+(TST x0 x1:(SLLconst [c] y)) && clobberIfDead(x1) => (TSTshiftLL x0 y [c])
+(TST x0 x1:(SRLconst [c] y)) && clobberIfDead(x1) => (TSTshiftRL x0 y [c])
+(TST x0 x1:(SRAconst [c] y)) && clobberIfDead(x1) => (TSTshiftRA x0 y [c])
+(TST x0 x1:(RORconst [c] y)) && clobberIfDead(x1) => (TSTshiftRO x0 y [c])
+
+// prefer *const ops to *shift ops
+(ADDshiftLL (MOVDconst [c]) x [d]) => (ADDconst [c] (SLLconst <x.Type> x [d]))
+(ADDshiftRL (MOVDconst [c]) x [d]) => (ADDconst [c] (SRLconst <x.Type> x [d]))
+(ADDshiftRA (MOVDconst [c]) x [d]) => (ADDconst [c] (SRAconst <x.Type> x [d]))
+(ANDshiftLL (MOVDconst [c]) x [d]) => (ANDconst [c] (SLLconst <x.Type> x [d]))
+(ANDshiftRL (MOVDconst [c]) x [d]) => (ANDconst [c] (SRLconst <x.Type> x [d]))
+(ANDshiftRA (MOVDconst [c]) x [d]) => (ANDconst [c] (SRAconst <x.Type> x [d]))
+(ANDshiftRO (MOVDconst [c]) x [d]) => (ANDconst [c] (RORconst <x.Type> x [d]))
+(ORshiftLL (MOVDconst [c]) x [d]) => (ORconst [c] (SLLconst <x.Type> x [d]))
+(ORshiftRL (MOVDconst [c]) x [d]) => (ORconst [c] (SRLconst <x.Type> x [d]))
+(ORshiftRA (MOVDconst [c]) x [d]) => (ORconst [c] (SRAconst <x.Type> x [d]))
+(ORshiftRO (MOVDconst [c]) x [d]) => (ORconst [c] (RORconst <x.Type> x [d]))
+(XORshiftLL (MOVDconst [c]) x [d]) => (XORconst [c] (SLLconst <x.Type> x [d]))
+(XORshiftRL (MOVDconst [c]) x [d]) => (XORconst [c] (SRLconst <x.Type> x [d]))
+(XORshiftRA (MOVDconst [c]) x [d]) => (XORconst [c] (SRAconst <x.Type> x [d]))
+(XORshiftRO (MOVDconst [c]) x [d]) => (XORconst [c] (RORconst <x.Type> x [d]))
+(CMPshiftLL (MOVDconst [c]) x [d]) => (InvertFlags (CMPconst [c] (SLLconst <x.Type> x [d])))
+(CMPshiftRL (MOVDconst [c]) x [d]) => (InvertFlags (CMPconst [c] (SRLconst <x.Type> x [d])))
+(CMPshiftRA (MOVDconst [c]) x [d]) => (InvertFlags (CMPconst [c] (SRAconst <x.Type> x [d])))
+(CMNshiftLL (MOVDconst [c]) x [d]) => (CMNconst [c] (SLLconst <x.Type> x [d]))
+(CMNshiftRL (MOVDconst [c]) x [d]) => (CMNconst [c] (SRLconst <x.Type> x [d]))
+(CMNshiftRA (MOVDconst [c]) x [d]) => (CMNconst [c] (SRAconst <x.Type> x [d]))
+(TSTshiftLL (MOVDconst [c]) x [d]) => (TSTconst [c] (SLLconst <x.Type> x [d]))
+(TSTshiftRL (MOVDconst [c]) x [d]) => (TSTconst [c] (SRLconst <x.Type> x [d]))
+(TSTshiftRA (MOVDconst [c]) x [d]) => (TSTconst [c] (SRAconst <x.Type> x [d]))
+(TSTshiftRO (MOVDconst [c]) x [d]) => (TSTconst [c] (RORconst <x.Type> x [d]))
+
+// constant folding in *shift ops
+(MVNshiftLL (MOVDconst [c]) [d]) => (MOVDconst [^int64(uint64(c)<<uint64(d))])
+(MVNshiftRL (MOVDconst [c]) [d]) => (MOVDconst [^int64(uint64(c)>>uint64(d))])
+(MVNshiftRA (MOVDconst [c]) [d]) => (MOVDconst [^(c>>uint64(d))])
+(MVNshiftRO (MOVDconst [c]) [d]) => (MOVDconst [^rotateRight64(c, d)])
+(NEGshiftLL (MOVDconst [c]) [d]) => (MOVDconst [-int64(uint64(c)<<uint64(d))])
+(NEGshiftRL (MOVDconst [c]) [d]) => (MOVDconst [-int64(uint64(c)>>uint64(d))])
+(NEGshiftRA (MOVDconst [c]) [d]) => (MOVDconst [-(c>>uint64(d))])
+(ADDshiftLL x (MOVDconst [c]) [d]) => (ADDconst x [int64(uint64(c)<<uint64(d))])
+(ADDshiftRL x (MOVDconst [c]) [d]) => (ADDconst x [int64(uint64(c)>>uint64(d))])
+(ADDshiftRA x (MOVDconst [c]) [d]) => (ADDconst x [c>>uint64(d)])
+(SUBshiftLL x (MOVDconst [c]) [d]) => (SUBconst x [int64(uint64(c)<<uint64(d))])
+(SUBshiftRL x (MOVDconst [c]) [d]) => (SUBconst x [int64(uint64(c)>>uint64(d))])
+(SUBshiftRA x (MOVDconst [c]) [d]) => (SUBconst x [c>>uint64(d)])
+(ANDshiftLL x (MOVDconst [c]) [d]) => (ANDconst x [int64(uint64(c)<<uint64(d))])
+(ANDshiftRL x (MOVDconst [c]) [d]) => (ANDconst x [int64(uint64(c)>>uint64(d))])
+(ANDshiftRA x (MOVDconst [c]) [d]) => (ANDconst x [c>>uint64(d)])
+(ANDshiftRO x (MOVDconst [c]) [d]) => (ANDconst x [rotateRight64(c, d)])
+(ORshiftLL x (MOVDconst [c]) [d]) => (ORconst x [int64(uint64(c)<<uint64(d))])
+(ORshiftRL x (MOVDconst [c]) [d]) => (ORconst x [int64(uint64(c)>>uint64(d))])
+(ORshiftRA x (MOVDconst [c]) [d]) => (ORconst x [c>>uint64(d)])
+(ORshiftRO x (MOVDconst [c]) [d]) => (ORconst x [rotateRight64(c, d)])
+(XORshiftLL x (MOVDconst [c]) [d]) => (XORconst x [int64(uint64(c)<<uint64(d))])
+(XORshiftRL x (MOVDconst [c]) [d]) => (XORconst x [int64(uint64(c)>>uint64(d))])
+(XORshiftRA x (MOVDconst [c]) [d]) => (XORconst x [c>>uint64(d)])
+(XORshiftRO x (MOVDconst [c]) [d]) => (XORconst x [rotateRight64(c, d)])
+(BICshiftLL x (MOVDconst [c]) [d]) => (ANDconst x [^int64(uint64(c)<<uint64(d))])
+(BICshiftRL x (MOVDconst [c]) [d]) => (ANDconst x [^int64(uint64(c)>>uint64(d))])
+(BICshiftRA x (MOVDconst [c]) [d]) => (ANDconst x [^(c>>uint64(d))])
+(BICshiftRO x (MOVDconst [c]) [d]) => (ANDconst x [^rotateRight64(c, d)])
+(ORNshiftLL x (MOVDconst [c]) [d]) => (ORconst x [^int64(uint64(c)<<uint64(d))])
+(ORNshiftRL x (MOVDconst [c]) [d]) => (ORconst x [^int64(uint64(c)>>uint64(d))])
+(ORNshiftRA x (MOVDconst [c]) [d]) => (ORconst x [^(c>>uint64(d))])
+(ORNshiftRO x (MOVDconst [c]) [d]) => (ORconst x [^rotateRight64(c, d)])
+(EONshiftLL x (MOVDconst [c]) [d]) => (XORconst x [^int64(uint64(c)<<uint64(d))])
+(EONshiftRL x (MOVDconst [c]) [d]) => (XORconst x [^int64(uint64(c)>>uint64(d))])
+(EONshiftRA x (MOVDconst [c]) [d]) => (XORconst x [^(c>>uint64(d))])
+(EONshiftRO x (MOVDconst [c]) [d]) => (XORconst x [^rotateRight64(c, d)])
+(CMPshiftLL x (MOVDconst [c]) [d]) => (CMPconst x [int64(uint64(c)<<uint64(d))])
+(CMPshiftRL x (MOVDconst [c]) [d]) => (CMPconst x [int64(uint64(c)>>uint64(d))])
+(CMPshiftRA x (MOVDconst [c]) [d]) => (CMPconst x [c>>uint64(d)])
+(CMNshiftLL x (MOVDconst [c]) [d]) => (CMNconst x [int64(uint64(c)<<uint64(d))])
+(CMNshiftRL x (MOVDconst [c]) [d]) => (CMNconst x [int64(uint64(c)>>uint64(d))])
+(CMNshiftRA x (MOVDconst [c]) [d]) => (CMNconst x [c>>uint64(d)])
+(TSTshiftLL x (MOVDconst [c]) [d]) => (TSTconst x [int64(uint64(c)<<uint64(d))])
+(TSTshiftRL x (MOVDconst [c]) [d]) => (TSTconst x [int64(uint64(c)>>uint64(d))])
+(TSTshiftRA x (MOVDconst [c]) [d]) => (TSTconst x [c>>uint64(d)])
+(TSTshiftRO x (MOVDconst [c]) [d]) => (TSTconst x [rotateRight64(c, d)])
+
+// simplification with *shift ops
+(SUBshiftLL (SLLconst x [c]) x [c]) => (MOVDconst [0])
+(SUBshiftRL (SRLconst x [c]) x [c]) => (MOVDconst [0])
+(SUBshiftRA (SRAconst x [c]) x [c]) => (MOVDconst [0])
+(ANDshiftLL y:(SLLconst x [c]) x [c]) => y
+(ANDshiftRL y:(SRLconst x [c]) x [c]) => y
+(ANDshiftRA y:(SRAconst x [c]) x [c]) => y
+(ANDshiftRO y:(RORconst x [c]) x [c]) => y
+(ORshiftLL y:(SLLconst x [c]) x [c]) => y
+(ORshiftRL y:(SRLconst x [c]) x [c]) => y
+(ORshiftRA y:(SRAconst x [c]) x [c]) => y
+(ORshiftRO y:(RORconst x [c]) x [c]) => y
+(XORshiftLL (SLLconst x [c]) x [c]) => (MOVDconst [0])
+(XORshiftRL (SRLconst x [c]) x [c]) => (MOVDconst [0])
+(XORshiftRA (SRAconst x [c]) x [c]) => (MOVDconst [0])
+(XORshiftRO (RORconst x [c]) x [c]) => (MOVDconst [0])
+(BICshiftLL (SLLconst x [c]) x [c]) => (MOVDconst [0])
+(BICshiftRL (SRLconst x [c]) x [c]) => (MOVDconst [0])
+(BICshiftRA (SRAconst x [c]) x [c]) => (MOVDconst [0])
+(BICshiftRO (RORconst x [c]) x [c]) => (MOVDconst [0])
+(EONshiftLL (SLLconst x [c]) x [c]) => (MOVDconst [-1])
+(EONshiftRL (SRLconst x [c]) x [c]) => (MOVDconst [-1])
+(EONshiftRA (SRAconst x [c]) x [c]) => (MOVDconst [-1])
+(EONshiftRO (RORconst x [c]) x [c]) => (MOVDconst [-1])
+(ORNshiftLL (SLLconst x [c]) x [c]) => (MOVDconst [-1])
+(ORNshiftRL (SRLconst x [c]) x [c]) => (MOVDconst [-1])
+(ORNshiftRA (SRAconst x [c]) x [c]) => (MOVDconst [-1])
+(ORNshiftRO (RORconst x [c]) x [c]) => (MOVDconst [-1])
+
+// Generate rotates with const shift
+(ADDshiftLL [c] (SRLconst x [64-c]) x) => (RORconst [64-c] x)
+( ORshiftLL [c] (SRLconst x [64-c]) x) => (RORconst [64-c] x)
+(XORshiftLL [c] (SRLconst x [64-c]) x) => (RORconst [64-c] x)
+(ADDshiftRL [c] (SLLconst x [64-c]) x) => (RORconst [ c] x)
+( ORshiftRL [c] (SLLconst x [64-c]) x) => (RORconst [ c] x)
+(XORshiftRL [c] (SLLconst x [64-c]) x) => (RORconst [ c] x)
+
+(ADDshiftLL <t> [c] (UBFX [bfc] x) x) && c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)
+ => (RORWconst [32-c] x)
+( ORshiftLL <t> [c] (UBFX [bfc] x) x) && c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)
+ => (RORWconst [32-c] x)
+(XORshiftLL <t> [c] (UBFX [bfc] x) x) && c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)
+ => (RORWconst [32-c] x)
+(ADDshiftRL <t> [c] (SLLconst x [32-c]) (MOVWUreg x)) && c < 32 && t.Size() == 4 => (RORWconst [c] x)
+( ORshiftRL <t> [c] (SLLconst x [32-c]) (MOVWUreg x)) && c < 32 && t.Size() == 4 => (RORWconst [c] x)
+(XORshiftRL <t> [c] (SLLconst x [32-c]) (MOVWUreg x)) && c < 32 && t.Size() == 4 => (RORWconst [c] x)
+
+(RORconst [c] (RORconst [d] x)) => (RORconst [(c+d)&63] x)
+(RORWconst [c] (RORWconst [d] x)) => (RORWconst [(c+d)&31] x)
+
+// Generate rotates with non-const shift.
+// These rules match the Go source code like
+// y &= 63
+// x << y | x >> (64-y)
+// "|" can also be "^" or "+".
+// As arm64 does not have a ROL instruction, so ROL(x, y) is replaced by ROR(x, -y).
+((ADD|OR|XOR) (SLL x (ANDconst <t> [63] y))
+ (CSEL0 <typ.UInt64> [cc] (SRL <typ.UInt64> x (SUB <t> (MOVDconst [64]) (ANDconst <t> [63] y)))
+ (CMPconst [64] (SUB <t> (MOVDconst [64]) (ANDconst <t> [63] y))))) && cc == OpARM64LessThanU
+ => (ROR x (NEG <t> y))
+((ADD|OR|XOR) (SRL <typ.UInt64> x (ANDconst <t> [63] y))
+ (CSEL0 <typ.UInt64> [cc] (SLL x (SUB <t> (MOVDconst [64]) (ANDconst <t> [63] y)))
+ (CMPconst [64] (SUB <t> (MOVDconst [64]) (ANDconst <t> [63] y))))) && cc == OpARM64LessThanU
+ => (ROR x y)
+
+// These rules match the Go source code like
+// y &= 31
+// x << y | x >> (32-y)
+// "|" can also be "^" or "+".
+// As arm64 does not have a ROLW instruction, so ROLW(x, y) is replaced by RORW(x, -y).
+((ADD|OR|XOR) (SLL x (ANDconst <t> [31] y))
+ (CSEL0 <typ.UInt32> [cc] (SRL <typ.UInt32> (MOVWUreg x) (SUB <t> (MOVDconst [32]) (ANDconst <t> [31] y)))
+ (CMPconst [64] (SUB <t> (MOVDconst [32]) (ANDconst <t> [31] y))))) && cc == OpARM64LessThanU
+ => (RORW x (NEG <t> y))
+((ADD|OR|XOR) (SRL <typ.UInt32> (MOVWUreg x) (ANDconst <t> [31] y))
+ (CSEL0 <typ.UInt32> [cc] (SLL x (SUB <t> (MOVDconst [32]) (ANDconst <t> [31] y)))
+ (CMPconst [64] (SUB <t> (MOVDconst [32]) (ANDconst <t> [31] y))))) && cc == OpARM64LessThanU
+ => (RORW x y)
+
+// rev16w | rev16
+// ((x>>8) | (x<<8)) => (REV16W x), the type of x is uint16, "|" can also be "^" or "+".
+((ADDshiftLL|ORshiftLL|XORshiftLL) <typ.UInt16> [8] (UBFX <typ.UInt16> [armBFAuxInt(8, 8)] x) x) => (REV16W x)
+
+// ((x & 0xff00ff00)>>8) | ((x & 0x00ff00ff)<<8), "|" can also be "^" or "+".
+((ADDshiftLL|ORshiftLL|XORshiftLL) [8] (UBFX [armBFAuxInt(8, 24)] (ANDconst [c1] x)) (ANDconst [c2] x))
+ && uint32(c1) == 0xff00ff00 && uint32(c2) == 0x00ff00ff
+ => (REV16W x)
+
+// ((x & 0xff00ff00ff00ff00)>>8) | ((x & 0x00ff00ff00ff00ff)<<8), "|" can also be "^" or "+".
+((ADDshiftLL|ORshiftLL|XORshiftLL) [8] (SRLconst [8] (ANDconst [c1] x)) (ANDconst [c2] x))
+ && (uint64(c1) == 0xff00ff00ff00ff00 && uint64(c2) == 0x00ff00ff00ff00ff)
+ => (REV16 x)
+
+// ((x & 0xff00ff00)>>8) | ((x & 0x00ff00ff)<<8), "|" can also be "^" or "+".
+((ADDshiftLL|ORshiftLL|XORshiftLL) [8] (SRLconst [8] (ANDconst [c1] x)) (ANDconst [c2] x))
+ && (uint64(c1) == 0xff00ff00 && uint64(c2) == 0x00ff00ff)
+ => (REV16 (ANDconst <x.Type> [0xffffffff] x))
+
+// Extract from reg pair
+(ADDshiftLL [c] (SRLconst x [64-c]) x2) => (EXTRconst [64-c] x2 x)
+( ORshiftLL [c] (SRLconst x [64-c]) x2) => (EXTRconst [64-c] x2 x)
+(XORshiftLL [c] (SRLconst x [64-c]) x2) => (EXTRconst [64-c] x2 x)
+
+(ADDshiftLL <t> [c] (UBFX [bfc] x) x2) && c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)
+ => (EXTRWconst [32-c] x2 x)
+( ORshiftLL <t> [c] (UBFX [bfc] x) x2) && c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)
+ => (EXTRWconst [32-c] x2 x)
+(XORshiftLL <t> [c] (UBFX [bfc] x) x2) && c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)
+ => (EXTRWconst [32-c] x2 x)
+
+// Rewrite special pairs of shifts to AND.
+// On ARM64 the bitmask can fit into an instruction.
+(SRLconst [c] (SLLconst [c] x)) && 0 < c && c < 64 => (ANDconst [1<<uint(64-c)-1] x) // mask out high bits
+(SLLconst [c] (SRLconst [c] x)) && 0 < c && c < 64 => (ANDconst [^(1<<uint(c)-1)] x) // mask out low bits
+
+// Special case setting bit as 1. An example is math.Copysign(c,-1)
+(ORconst [c1] (ANDconst [c2] x)) && c2|c1 == ^0 => (ORconst [c1] x)
+
+// If the shift amount is larger than the datasize(32, 16, 8), we can optimize to constant 0.
+(MOVWUreg (SLLconst [lc] x)) && lc >= 32 => (MOVDconst [0])
+(MOVHUreg (SLLconst [lc] x)) && lc >= 16 => (MOVDconst [0])
+(MOVBUreg (SLLconst [lc] x)) && lc >= 8 => (MOVDconst [0])
+
+// After zero extension, the upper (64-datasize(32|16|8)) bits are zero, we can optimiza to constant 0.
+(SRLconst [rc] (MOVWUreg x)) && rc >= 32 => (MOVDconst [0])
+(SRLconst [rc] (MOVHUreg x)) && rc >= 16 => (MOVDconst [0])
+(SRLconst [rc] (MOVBUreg x)) && rc >= 8 => (MOVDconst [0])
+
+// bitfield ops
+
+// sbfiz
+// (x << lc) >> rc
+(SRAconst [rc] (SLLconst [lc] x)) && lc > rc => (SBFIZ [armBFAuxInt(lc-rc, 64-lc)] x)
+// int64(x << lc)
+(MOVWreg (SLLconst [lc] x)) && lc < 32 => (SBFIZ [armBFAuxInt(lc, 32-lc)] x)
+(MOVHreg (SLLconst [lc] x)) && lc < 16 => (SBFIZ [armBFAuxInt(lc, 16-lc)] x)
+(MOVBreg (SLLconst [lc] x)) && lc < 8 => (SBFIZ [armBFAuxInt(lc, 8-lc)] x)
+// int64(x) << lc
+(SLLconst [lc] (MOVWreg x)) => (SBFIZ [armBFAuxInt(lc, min(32, 64-lc))] x)
+(SLLconst [lc] (MOVHreg x)) => (SBFIZ [armBFAuxInt(lc, min(16, 64-lc))] x)
+(SLLconst [lc] (MOVBreg x)) => (SBFIZ [armBFAuxInt(lc, min(8, 64-lc))] x)
+
+// sbfx
+// (x << lc) >> rc
+(SRAconst [rc] (SLLconst [lc] x)) && lc <= rc => (SBFX [armBFAuxInt(rc-lc, 64-rc)] x)
+// int64(x) >> rc
+(SRAconst [rc] (MOVWreg x)) && rc < 32 => (SBFX [armBFAuxInt(rc, 32-rc)] x)
+(SRAconst [rc] (MOVHreg x)) && rc < 16 => (SBFX [armBFAuxInt(rc, 16-rc)] x)
+(SRAconst [rc] (MOVBreg x)) && rc < 8 => (SBFX [armBFAuxInt(rc, 8-rc)] x)
+// merge sbfx and sign-extension into sbfx
+(MOVWreg (SBFX [bfc] x)) && bfc.getARM64BFwidth() <= 32 => (SBFX [bfc] x)
+(MOVHreg (SBFX [bfc] x)) && bfc.getARM64BFwidth() <= 16 => (SBFX [bfc] x)
+(MOVBreg (SBFX [bfc] x)) && bfc.getARM64BFwidth() <= 8 => (SBFX [bfc] x)
+
+// sbfiz/sbfx combinations: merge shifts into bitfield ops
+(SRAconst [sc] (SBFIZ [bfc] x)) && sc < bfc.getARM64BFlsb()
+ => (SBFIZ [armBFAuxInt(bfc.getARM64BFlsb()-sc, bfc.getARM64BFwidth())] x)
+(SRAconst [sc] (SBFIZ [bfc] x)) && sc >= bfc.getARM64BFlsb()
+ && sc < bfc.getARM64BFlsb()+bfc.getARM64BFwidth()
+ => (SBFX [armBFAuxInt(sc-bfc.getARM64BFlsb(), bfc.getARM64BFlsb()+bfc.getARM64BFwidth()-sc)] x)
+
+// ubfiz
+// (x << lc) >> rc
+(SRLconst [rc] (SLLconst [lc] x)) && lc > rc => (UBFIZ [armBFAuxInt(lc-rc, 64-lc)] x)
+// uint64(x) << lc
+(SLLconst [lc] (MOVWUreg x)) => (UBFIZ [armBFAuxInt(lc, min(32, 64-lc))] x)
+(SLLconst [lc] (MOVHUreg x)) => (UBFIZ [armBFAuxInt(lc, min(16, 64-lc))] x)
+(SLLconst [lc] (MOVBUreg x)) => (UBFIZ [armBFAuxInt(lc, min(8, 64-lc))] x)
+// uint64(x << lc)
+(MOVWUreg (SLLconst [lc] x)) && lc < 32 => (UBFIZ [armBFAuxInt(lc, 32-lc)] x)
+(MOVHUreg (SLLconst [lc] x)) && lc < 16 => (UBFIZ [armBFAuxInt(lc, 16-lc)] x)
+(MOVBUreg (SLLconst [lc] x)) && lc < 8 => (UBFIZ [armBFAuxInt(lc, 8-lc)] x)
+
+// merge ANDconst into ubfiz
+// (x & ac) << sc
+(SLLconst [sc] (ANDconst [ac] x)) && isARM64BFMask(sc, ac, 0)
+ => (UBFIZ [armBFAuxInt(sc, arm64BFWidth(ac, 0))] x)
+// (x << sc) & ac
+(ANDconst [ac] (SLLconst [sc] x)) && isARM64BFMask(sc, ac, sc)
+ => (UBFIZ [armBFAuxInt(sc, arm64BFWidth(ac, sc))] x)
+
+// ubfx
+// (x << lc) >> rc
+(SRLconst [rc] (SLLconst [lc] x)) && lc < rc => (UBFX [armBFAuxInt(rc-lc, 64-rc)] x)
+// uint64(x) >> rc
+(SRLconst [rc] (MOVWUreg x)) && rc < 32 => (UBFX [armBFAuxInt(rc, 32-rc)] x)
+(SRLconst [rc] (MOVHUreg x)) && rc < 16 => (UBFX [armBFAuxInt(rc, 16-rc)] x)
+(SRLconst [rc] (MOVBUreg x)) && rc < 8 => (UBFX [armBFAuxInt(rc, 8-rc)] x)
+// uint64(x >> rc)
+(MOVWUreg (SRLconst [rc] x)) && rc < 32 => (UBFX [armBFAuxInt(rc, 32)] x)
+(MOVHUreg (SRLconst [rc] x)) && rc < 16 => (UBFX [armBFAuxInt(rc, 16)] x)
+(MOVBUreg (SRLconst [rc] x)) && rc < 8 => (UBFX [armBFAuxInt(rc, 8)] x)
+// merge ANDconst into ubfx
+// (x >> sc) & ac
+(ANDconst [ac] (SRLconst [sc] x)) && isARM64BFMask(sc, ac, 0)
+ => (UBFX [armBFAuxInt(sc, arm64BFWidth(ac, 0))] x)
+// (x & ac) >> sc
+(SRLconst [sc] (ANDconst [ac] x)) && isARM64BFMask(sc, ac, sc)
+ => (UBFX [armBFAuxInt(sc, arm64BFWidth(ac, sc))] x)
+
+// merge ubfx and zerso-extension into ubfx
+(MOVWUreg (UBFX [bfc] x)) && bfc.getARM64BFwidth() <= 32 => (UBFX [bfc] x)
+(MOVHUreg (UBFX [bfc] x)) && bfc.getARM64BFwidth() <= 16 => (UBFX [bfc] x)
+(MOVBUreg (UBFX [bfc] x)) && bfc.getARM64BFwidth() <= 8 => (UBFX [bfc] x)
+
+// ubfiz/ubfx combinations: merge shifts into bitfield ops
+(SRLconst [sc] (UBFX [bfc] x)) && sc < bfc.getARM64BFwidth()
+ => (UBFX [armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth()-sc)] x)
+(UBFX [bfc] (SRLconst [sc] x)) && sc+bfc.getARM64BFwidth()+bfc.getARM64BFlsb() < 64
+ => (UBFX [armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth())] x)
+(SLLconst [sc] (UBFIZ [bfc] x)) && sc+bfc.getARM64BFwidth()+bfc.getARM64BFlsb() < 64
+ => (UBFIZ [armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth())] x)
+(UBFIZ [bfc] (SLLconst [sc] x)) && sc < bfc.getARM64BFwidth()
+ => (UBFIZ [armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth()-sc)] x)
+// ((x << c1) >> c2) >> c3
+(SRLconst [sc] (UBFIZ [bfc] x)) && sc == bfc.getARM64BFlsb()
+ => (ANDconst [1<<uint(bfc.getARM64BFwidth())-1] x)
+(SRLconst [sc] (UBFIZ [bfc] x)) && sc < bfc.getARM64BFlsb()
+ => (UBFIZ [armBFAuxInt(bfc.getARM64BFlsb()-sc, bfc.getARM64BFwidth())] x)
+(SRLconst [sc] (UBFIZ [bfc] x)) && sc > bfc.getARM64BFlsb()
+ && sc < bfc.getARM64BFlsb()+bfc.getARM64BFwidth()
+ => (UBFX [armBFAuxInt(sc-bfc.getARM64BFlsb(), bfc.getARM64BFlsb()+bfc.getARM64BFwidth()-sc)] x)
+// ((x << c1) << c2) >> c3
+(UBFX [bfc] (SLLconst [sc] x)) && sc == bfc.getARM64BFlsb()
+ => (ANDconst [1<<uint(bfc.getARM64BFwidth())-1] x)
+(UBFX [bfc] (SLLconst [sc] x)) && sc < bfc.getARM64BFlsb()
+ => (UBFX [armBFAuxInt(bfc.getARM64BFlsb()-sc, bfc.getARM64BFwidth())] x)
+(UBFX [bfc] (SLLconst [sc] x)) && sc > bfc.getARM64BFlsb()
+ && sc < bfc.getARM64BFlsb()+bfc.getARM64BFwidth()
+ => (UBFIZ [armBFAuxInt(sc-bfc.getARM64BFlsb(), bfc.getARM64BFlsb()+bfc.getARM64BFwidth()-sc)] x)
+
+// bfi
+(OR (UBFIZ [bfc] x) (ANDconst [ac] y))
+ && ac == ^((1<<uint(bfc.getARM64BFwidth())-1) << uint(bfc.getARM64BFlsb()))
+ => (BFI [bfc] y x)
+(ORshiftRL [rc] (ANDconst [ac] x) (SLLconst [lc] y))
+ && lc > rc && ac == ^((1<<uint(64-lc)-1) << uint64(lc-rc))
+ => (BFI [armBFAuxInt(lc-rc, 64-lc)] x y)
+// bfxil
+(OR (UBFX [bfc] x) (ANDconst [ac] y)) && ac == ^(1<<uint(bfc.getARM64BFwidth())-1)
+ => (BFXIL [bfc] y x)
+(ORshiftLL [sc] (UBFX [bfc] x) (SRLconst [sc] y)) && sc == bfc.getARM64BFwidth()
+ => (BFXIL [bfc] y x)
+(ORshiftRL [rc] (ANDconst [ac] y) (SLLconst [lc] x)) && lc < rc && ac == ^((1<<uint(64-rc)-1))
+ => (BFXIL [armBFAuxInt(rc-lc, 64-rc)] y x)
+
+// do combined loads
+// little endian loads
+// b[0] | b[1]<<8 => load 16-bit
+(ORshiftLL <t> [8]
+ y0:(MOVDnop x0:(MOVBUload [i0] {s} p mem))
+ y1:(MOVDnop x1:(MOVBUload [i1] {s} p mem)))
+ && i1 == i0+1
+ && x0.Uses == 1 && x1.Uses == 1
+ && y0.Uses == 1 && y1.Uses == 1
+ && mergePoint(b,x0,x1) != nil
+ && clobber(x0, x1, y0, y1)
+ => @mergePoint(b,x0,x1) (MOVHUload <t> {s} (OffPtr <p.Type> [int64(i0)] p) mem)
+(ORshiftLL <t> [8]
+ y0:(MOVDnop x0:(MOVBUloadidx ptr0 idx0 mem))
+ y1:(MOVDnop x1:(MOVBUload [1] {s} p1:(ADD ptr1 idx1) mem)))
+ && s == nil
+ && x0.Uses == 1 && x1.Uses == 1
+ && y0.Uses == 1 && y1.Uses == 1
+ && mergePoint(b,x0,x1) != nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && clobber(x0, x1, y0, y1)
+ => @mergePoint(b,x0,x1) (MOVHUloadidx <t> ptr0 idx0 mem)
+(ORshiftLL <t> [8]
+ y0:(MOVDnop x0:(MOVBUloadidx ptr idx mem))
+ y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [1] idx) mem)))
+ && x0.Uses == 1 && x1.Uses == 1
+ && y0.Uses == 1 && y1.Uses == 1
+ && mergePoint(b,x0,x1) != nil
+ && clobber(x0, x1, y0, y1)
+ => @mergePoint(b,x0,x1) (MOVHUloadidx <t> ptr idx mem)
+
+// b[0] | b[1]<<8 | b[2]<<16 | b[3]<<24 => load 32-bit
+(ORshiftLL <t> [24] o0:(ORshiftLL [16]
+ x0:(MOVHUload [i0] {s} p mem)
+ y1:(MOVDnop x1:(MOVBUload [i2] {s} p mem)))
+ y2:(MOVDnop x2:(MOVBUload [i3] {s} p mem)))
+ && i2 == i0+2
+ && i3 == i0+3
+ && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1
+ && y1.Uses == 1 && y2.Uses == 1
+ && o0.Uses == 1
+ && mergePoint(b,x0,x1,x2) != nil
+ && clobber(x0, x1, x2, y1, y2, o0)
+ => @mergePoint(b,x0,x1,x2) (MOVWUload <t> {s} (OffPtr <p.Type> [int64(i0)] p) mem)
+(ORshiftLL <t> [24] o0:(ORshiftLL [16]
+ x0:(MOVHUloadidx ptr0 idx0 mem)
+ y1:(MOVDnop x1:(MOVBUload [2] {s} p1:(ADD ptr1 idx1) mem)))
+ y2:(MOVDnop x2:(MOVBUload [3] {s} p mem)))
+ && s == nil
+ && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1
+ && y1.Uses == 1 && y2.Uses == 1
+ && o0.Uses == 1
+ && mergePoint(b,x0,x1,x2) != nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && isSamePtr(p1, p)
+ && clobber(x0, x1, x2, y1, y2, o0)
+ => @mergePoint(b,x0,x1,x2) (MOVWUloadidx <t> ptr0 idx0 mem)
+(ORshiftLL <t> [24] o0:(ORshiftLL [16]
+ x0:(MOVHUloadidx ptr idx mem)
+ y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [2] idx) mem)))
+ y2:(MOVDnop x2:(MOVBUloadidx ptr (ADDconst [3] idx) mem)))
+ && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1
+ && y1.Uses == 1 && y2.Uses == 1
+ && o0.Uses == 1
+ && mergePoint(b,x0,x1,x2) != nil
+ && clobber(x0, x1, x2, y1, y2, o0)
+ => @mergePoint(b,x0,x1,x2) (MOVWUloadidx <t> ptr idx mem)
+(ORshiftLL <t> [24] o0:(ORshiftLL [16]
+ x0:(MOVHUloadidx2 ptr0 idx0 mem)
+ y1:(MOVDnop x1:(MOVBUload [2] {s} p1:(ADDshiftLL [1] ptr1 idx1) mem)))
+ y2:(MOVDnop x2:(MOVBUload [3] {s} p mem)))
+ && s == nil
+ && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1
+ && y1.Uses == 1 && y2.Uses == 1
+ && o0.Uses == 1
+ && mergePoint(b,x0,x1,x2) != nil
+ && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1)
+ && isSamePtr(p1, p)
+ && clobber(x0, x1, x2, y1, y2, o0)
+ => @mergePoint(b,x0,x1,x2) (MOVWUloadidx <t> ptr0 (SLLconst <idx0.Type> [1] idx0) mem)
+
+// b[0] | b[1]<<8 | b[2]<<16 | b[3]<<24 | b[4]<<32 | b[5]<<40 | b[6]<<48 | b[7]<<56 => load 64-bit
+(ORshiftLL <t> [56] o0:(ORshiftLL [48] o1:(ORshiftLL [40] o2:(ORshiftLL [32]
+ x0:(MOVWUload [i0] {s} p mem)
+ y1:(MOVDnop x1:(MOVBUload [i4] {s} p mem)))
+ y2:(MOVDnop x2:(MOVBUload [i5] {s} p mem)))
+ y3:(MOVDnop x3:(MOVBUload [i6] {s} p mem)))
+ y4:(MOVDnop x4:(MOVBUload [i7] {s} p mem)))
+ && i4 == i0+4
+ && i5 == i0+5
+ && i6 == i0+6
+ && i7 == i0+7
+ && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1
+ && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1
+ && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1
+ && mergePoint(b,x0,x1,x2,x3,x4) != nil
+ && clobber(x0, x1, x2, x3, x4, y1, y2, y3, y4, o0, o1, o2)
+ => @mergePoint(b,x0,x1,x2,x3,x4) (MOVDload <t> {s} (OffPtr <p.Type> [int64(i0)] p) mem)
+(ORshiftLL <t> [56] o0:(ORshiftLL [48] o1:(ORshiftLL [40] o2:(ORshiftLL [32]
+ x0:(MOVWUloadidx ptr0 idx0 mem)
+ y1:(MOVDnop x1:(MOVBUload [4] {s} p1:(ADD ptr1 idx1) mem)))
+ y2:(MOVDnop x2:(MOVBUload [5] {s} p mem)))
+ y3:(MOVDnop x3:(MOVBUload [6] {s} p mem)))
+ y4:(MOVDnop x4:(MOVBUload [7] {s} p mem)))
+ && s == nil
+ && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1
+ && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1
+ && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1
+ && mergePoint(b,x0,x1,x2,x3,x4) != nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && isSamePtr(p1, p)
+ && clobber(x0, x1, x2, x3, x4, y1, y2, y3, y4, o0, o1, o2)
+ => @mergePoint(b,x0,x1,x2,x3,x4) (MOVDloadidx <t> ptr0 idx0 mem)
+(ORshiftLL <t> [56] o0:(ORshiftLL [48] o1:(ORshiftLL [40] o2:(ORshiftLL [32]
+ x0:(MOVWUloadidx4 ptr0 idx0 mem)
+ y1:(MOVDnop x1:(MOVBUload [4] {s} p1:(ADDshiftLL [2] ptr1 idx1) mem)))
+ y2:(MOVDnop x2:(MOVBUload [5] {s} p mem)))
+ y3:(MOVDnop x3:(MOVBUload [6] {s} p mem)))
+ y4:(MOVDnop x4:(MOVBUload [7] {s} p mem)))
+ && s == nil
+ && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1
+ && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1
+ && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1
+ && mergePoint(b,x0,x1,x2,x3,x4) != nil
+ && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1)
+ && isSamePtr(p1, p)
+ && clobber(x0, x1, x2, x3, x4, y1, y2, y3, y4, o0, o1, o2)
+ => @mergePoint(b,x0,x1,x2,x3,x4) (MOVDloadidx <t> ptr0 (SLLconst <idx0.Type> [2] idx0) mem)
+(ORshiftLL <t> [56] o0:(ORshiftLL [48] o1:(ORshiftLL [40] o2:(ORshiftLL [32]
+ x0:(MOVWUloadidx ptr idx mem)
+ y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [4] idx) mem)))
+ y2:(MOVDnop x2:(MOVBUloadidx ptr (ADDconst [5] idx) mem)))
+ y3:(MOVDnop x3:(MOVBUloadidx ptr (ADDconst [6] idx) mem)))
+ y4:(MOVDnop x4:(MOVBUloadidx ptr (ADDconst [7] idx) mem)))
+ && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1
+ && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1
+ && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1
+ && mergePoint(b,x0,x1,x2,x3,x4) != nil
+ && clobber(x0, x1, x2, x3, x4, y1, y2, y3, y4, o0, o1, o2)
+ => @mergePoint(b,x0,x1,x2,x3,x4) (MOVDloadidx <t> ptr idx mem)
+
+// b[3]<<24 | b[2]<<16 | b[1]<<8 | b[0] => load 32-bit
+(OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24]
+ y0:(MOVDnop x0:(MOVBUload [i3] {s} p mem)))
+ y1:(MOVDnop x1:(MOVBUload [i2] {s} p mem)))
+ y2:(MOVDnop x2:(MOVBUload [i1] {s} p mem)))
+ y3:(MOVDnop x3:(MOVBUload [i0] {s} p mem)))
+ && i1 == i0+1
+ && i2 == i0+2
+ && i3 == i0+3
+ && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1
+ && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1
+ && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1
+ && mergePoint(b,x0,x1,x2,x3) != nil
+ && clobber(x0, x1, x2, x3, y0, y1, y2, y3, o0, o1, s0)
+ => @mergePoint(b,x0,x1,x2,x3) (MOVWUload <t> {s} (OffPtr <p.Type> [int64(i0)] p) mem)
+(OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24]
+ y0:(MOVDnop x0:(MOVBUload [3] {s} p mem)))
+ y1:(MOVDnop x1:(MOVBUload [2] {s} p mem)))
+ y2:(MOVDnop x2:(MOVBUload [1] {s} p1:(ADD ptr1 idx1) mem)))
+ y3:(MOVDnop x3:(MOVBUloadidx ptr0 idx0 mem)))
+ && s == nil
+ && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1
+ && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1
+ && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1
+ && mergePoint(b,x0,x1,x2,x3) != nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && isSamePtr(p1, p)
+ && clobber(x0, x1, x2, x3, y0, y1, y2, y3, o0, o1, s0)
+ => @mergePoint(b,x0,x1,x2,x3) (MOVWUloadidx <t> ptr0 idx0 mem)
+(OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24]
+ y0:(MOVDnop x0:(MOVBUloadidx ptr (ADDconst [3] idx) mem)))
+ y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [2] idx) mem)))
+ y2:(MOVDnop x2:(MOVBUloadidx ptr (ADDconst [1] idx) mem)))
+ y3:(MOVDnop x3:(MOVBUloadidx ptr idx mem)))
+ && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1
+ && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1
+ && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1
+ && mergePoint(b,x0,x1,x2,x3) != nil
+ && clobber(x0, x1, x2, x3, y0, y1, y2, y3, o0, o1, s0)
+ => @mergePoint(b,x0,x1,x2,x3) (MOVWUloadidx <t> ptr idx mem)
+
+// b[7]<<56 | b[6]<<48 | b[5]<<40 | b[4]<<32 | b[3]<<24 | b[2]<<16 | b[1]<<8 | b[0] => load 64-bit
+(OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56]
+ y0:(MOVDnop x0:(MOVBUload [i7] {s} p mem)))
+ y1:(MOVDnop x1:(MOVBUload [i6] {s} p mem)))
+ y2:(MOVDnop x2:(MOVBUload [i5] {s} p mem)))
+ y3:(MOVDnop x3:(MOVBUload [i4] {s} p mem)))
+ y4:(MOVDnop x4:(MOVBUload [i3] {s} p mem)))
+ y5:(MOVDnop x5:(MOVBUload [i2] {s} p mem)))
+ y6:(MOVDnop x6:(MOVBUload [i1] {s} p mem)))
+ y7:(MOVDnop x7:(MOVBUload [i0] {s} p mem)))
+ && i1 == i0+1
+ && i2 == i0+2
+ && i3 == i0+3
+ && i4 == i0+4
+ && i5 == i0+5
+ && i6 == i0+6
+ && i7 == i0+7
+ && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1
+ && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1
+ && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1
+ && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1
+ && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1
+ && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1
+ && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil
+ && clobber(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, y6, y7, o0, o1, o2, o3, o4, o5, s0)
+ => @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload <t> {s} (OffPtr <p.Type> [int64(i0)] p) mem)
+(OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56]
+ y0:(MOVDnop x0:(MOVBUload [7] {s} p mem)))
+ y1:(MOVDnop x1:(MOVBUload [6] {s} p mem)))
+ y2:(MOVDnop x2:(MOVBUload [5] {s} p mem)))
+ y3:(MOVDnop x3:(MOVBUload [4] {s} p mem)))
+ y4:(MOVDnop x4:(MOVBUload [3] {s} p mem)))
+ y5:(MOVDnop x5:(MOVBUload [2] {s} p mem)))
+ y6:(MOVDnop x6:(MOVBUload [1] {s} p1:(ADD ptr1 idx1) mem)))
+ y7:(MOVDnop x7:(MOVBUloadidx ptr0 idx0 mem)))
+ && s == nil
+ && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1
+ && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1
+ && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1
+ && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1
+ && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1
+ && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1
+ && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && isSamePtr(p1, p)
+ && clobber(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, y6, y7, o0, o1, o2, o3, o4, o5, s0)
+ => @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDloadidx <t> ptr0 idx0 mem)
+(OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56]
+ y0:(MOVDnop x0:(MOVBUloadidx ptr (ADDconst [7] idx) mem)))
+ y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [6] idx) mem)))
+ y2:(MOVDnop x2:(MOVBUloadidx ptr (ADDconst [5] idx) mem)))
+ y3:(MOVDnop x3:(MOVBUloadidx ptr (ADDconst [4] idx) mem)))
+ y4:(MOVDnop x4:(MOVBUloadidx ptr (ADDconst [3] idx) mem)))
+ y5:(MOVDnop x5:(MOVBUloadidx ptr (ADDconst [2] idx) mem)))
+ y6:(MOVDnop x6:(MOVBUloadidx ptr (ADDconst [1] idx) mem)))
+ y7:(MOVDnop x7:(MOVBUloadidx ptr idx mem)))
+ && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1
+ && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1
+ && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1
+ && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1
+ && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1
+ && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1
+ && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil
+ && clobber(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, y6, y7, o0, o1, o2, o3, o4, o5, s0)
+ => @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDloadidx <t> ptr idx mem)
+
+// big endian loads
+// b[1] | b[0]<<8 => load 16-bit, reverse
+(ORshiftLL <t> [8]
+ y0:(MOVDnop x0:(MOVBUload [i1] {s} p mem))
+ y1:(MOVDnop x1:(MOVBUload [i0] {s} p mem)))
+ && i1 == i0+1
+ && x0.Uses == 1 && x1.Uses == 1
+ && y0.Uses == 1 && y1.Uses == 1
+ && mergePoint(b,x0,x1) != nil
+ && clobber(x0, x1, y0, y1)
+ => @mergePoint(b,x0,x1) (REV16W <t> (MOVHUload <t> [i0] {s} p mem))
+(ORshiftLL <t> [8]
+ y0:(MOVDnop x0:(MOVBUload [1] {s} p1:(ADD ptr1 idx1) mem))
+ y1:(MOVDnop x1:(MOVBUloadidx ptr0 idx0 mem)))
+ && s == nil
+ && x0.Uses == 1 && x1.Uses == 1
+ && y0.Uses == 1 && y1.Uses == 1
+ && mergePoint(b,x0,x1) != nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && clobber(x0, x1, y0, y1)
+ => @mergePoint(b,x0,x1) (REV16W <t> (MOVHUloadidx <t> ptr0 idx0 mem))
+(ORshiftLL <t> [8]
+ y0:(MOVDnop x0:(MOVBUloadidx ptr (ADDconst [1] idx) mem))
+ y1:(MOVDnop x1:(MOVBUloadidx ptr idx mem)))
+ && x0.Uses == 1 && x1.Uses == 1
+ && y0.Uses == 1 && y1.Uses == 1
+ && mergePoint(b,x0,x1) != nil
+ && clobber(x0, x1, y0, y1)
+ => @mergePoint(b,x0,x1) (REV16W <t> (MOVHUloadidx <t> ptr idx mem))
+
+// b[3] | b[2]<<8 | b[1]<<16 | b[0]<<24 => load 32-bit, reverse
+(ORshiftLL <t> [24] o0:(ORshiftLL [16]
+ y0:(REV16W x0:(MOVHUload [i2] {s} p mem))
+ y1:(MOVDnop x1:(MOVBUload [i1] {s} p mem)))
+ y2:(MOVDnop x2:(MOVBUload [i0] {s} p mem)))
+ && i1 == i0+1
+ && i2 == i0+2
+ && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1
+ && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1
+ && o0.Uses == 1
+ && mergePoint(b,x0,x1,x2) != nil
+ && clobber(x0, x1, x2, y0, y1, y2, o0)
+ => @mergePoint(b,x0,x1,x2) (REVW <t> (MOVWUload <t> {s} (OffPtr <p.Type> [int64(i0)] p) mem))
+(ORshiftLL <t> [24] o0:(ORshiftLL [16]
+ y0:(REV16W x0:(MOVHUload [2] {s} p mem))
+ y1:(MOVDnop x1:(MOVBUload [1] {s} p1:(ADD ptr1 idx1) mem)))
+ y2:(MOVDnop x2:(MOVBUloadidx ptr0 idx0 mem)))
+ && s == nil
+ && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1
+ && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1
+ && o0.Uses == 1
+ && mergePoint(b,x0,x1,x2) != nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && isSamePtr(p1, p)
+ && clobber(x0, x1, x2, y0, y1, y2, o0)
+ => @mergePoint(b,x0,x1,x2) (REVW <t> (MOVWUloadidx <t> ptr0 idx0 mem))
+(ORshiftLL <t> [24] o0:(ORshiftLL [16]
+ y0:(REV16W x0:(MOVHUloadidx ptr (ADDconst [2] idx) mem))
+ y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [1] idx) mem)))
+ y2:(MOVDnop x2:(MOVBUloadidx ptr idx mem)))
+ && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1
+ && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1
+ && o0.Uses == 1
+ && mergePoint(b,x0,x1,x2) != nil
+ && clobber(x0, x1, x2, y0, y1, y2, o0)
+ => @mergePoint(b,x0,x1,x2) (REVW <t> (MOVWUloadidx <t> ptr idx mem))
+
+// b[7] | b[6]<<8 | b[5]<<16 | b[4]<<24 | b[3]<<32 | b[2]<<40 | b[1]<<48 | b[0]<<56 => load 64-bit, reverse
+(ORshiftLL <t> [56] o0:(ORshiftLL [48] o1:(ORshiftLL [40] o2:(ORshiftLL [32]
+ y0:(REVW x0:(MOVWUload [i4] {s} p mem))
+ y1:(MOVDnop x1:(MOVBUload [i3] {s} p mem)))
+ y2:(MOVDnop x2:(MOVBUload [i2] {s} p mem)))
+ y3:(MOVDnop x3:(MOVBUload [i1] {s} p mem)))
+ y4:(MOVDnop x4:(MOVBUload [i0] {s} p mem)))
+ && i1 == i0+1
+ && i2 == i0+2
+ && i3 == i0+3
+ && i4 == i0+4
+ && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1
+ && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1
+ && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1
+ && mergePoint(b,x0,x1,x2,x3,x4) != nil
+ && clobber(x0, x1, x2, x3, x4, y0, y1, y2, y3, y4, o0, o1, o2)
+ => @mergePoint(b,x0,x1,x2,x3,x4) (REV <t> (MOVDload <t> {s} (OffPtr <p.Type> [int64(i0)] p) mem))
+(ORshiftLL <t> [56] o0:(ORshiftLL [48] o1:(ORshiftLL [40] o2:(ORshiftLL [32]
+ y0:(REVW x0:(MOVWUload [4] {s} p mem))
+ y1:(MOVDnop x1:(MOVBUload [3] {s} p mem)))
+ y2:(MOVDnop x2:(MOVBUload [2] {s} p mem)))
+ y3:(MOVDnop x3:(MOVBUload [1] {s} p1:(ADD ptr1 idx1) mem)))
+ y4:(MOVDnop x4:(MOVBUloadidx ptr0 idx0 mem)))
+ && s == nil
+ && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1
+ && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1
+ && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1
+ && mergePoint(b,x0,x1,x2,x3,x4) != nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && isSamePtr(p1, p)
+ && clobber(x0, x1, x2, x3, x4, y0, y1, y2, y3, y4, o0, o1, o2)
+ => @mergePoint(b,x0,x1,x2,x3,x4) (REV <t> (MOVDloadidx <t> ptr0 idx0 mem))
+(ORshiftLL <t> [56] o0:(ORshiftLL [48] o1:(ORshiftLL [40] o2:(ORshiftLL [32]
+ y0:(REVW x0:(MOVWUloadidx ptr (ADDconst [4] idx) mem))
+ y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [3] idx) mem)))
+ y2:(MOVDnop x2:(MOVBUloadidx ptr (ADDconst [2] idx) mem)))
+ y3:(MOVDnop x3:(MOVBUloadidx ptr (ADDconst [1] idx) mem)))
+ y4:(MOVDnop x4:(MOVBUloadidx ptr idx mem)))
+ && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1
+ && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1
+ && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1
+ && mergePoint(b,x0,x1,x2,x3,x4) != nil
+ && clobber(x0, x1, x2, x3, x4, y0, y1, y2, y3, y4, o0, o1, o2)
+ => @mergePoint(b,x0,x1,x2,x3,x4) (REV <t> (MOVDloadidx <t> ptr idx mem))
+
+// b[0]<<24 | b[1]<<16 | b[2]<<8 | b[3] => load 32-bit, reverse
+(OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24]
+ y0:(MOVDnop x0:(MOVBUload [i0] {s} p mem)))
+ y1:(MOVDnop x1:(MOVBUload [i1] {s} p mem)))
+ y2:(MOVDnop x2:(MOVBUload [i2] {s} p mem)))
+ y3:(MOVDnop x3:(MOVBUload [i3] {s} p mem)))
+ && i1 == i0+1
+ && i2 == i0+2
+ && i3 == i0+3
+ && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1
+ && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1
+ && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1
+ && mergePoint(b,x0,x1,x2,x3) != nil
+ && clobber(x0, x1, x2, x3, y0, y1, y2, y3, o0, o1, s0)
+ => @mergePoint(b,x0,x1,x2,x3) (REVW <t> (MOVWUload <t> {s} (OffPtr <p.Type> [int64(i0)] p) mem))
+(OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24]
+ y0:(MOVDnop x0:(MOVBUloadidx ptr0 idx0 mem)))
+ y1:(MOVDnop x1:(MOVBUload [1] {s} p1:(ADD ptr1 idx1) mem)))
+ y2:(MOVDnop x2:(MOVBUload [2] {s} p mem)))
+ y3:(MOVDnop x3:(MOVBUload [3] {s} p mem)))
+ && s == nil
+ && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1
+ && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1
+ && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1
+ && mergePoint(b,x0,x1,x2,x3) != nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && isSamePtr(p1, p)
+ && clobber(x0, x1, x2, x3, y0, y1, y2, y3, o0, o1, s0)
+ => @mergePoint(b,x0,x1,x2,x3) (REVW <t> (MOVWUloadidx <t> ptr0 idx0 mem))
+(OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24]
+ y0:(MOVDnop x0:(MOVBUloadidx ptr idx mem)))
+ y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [1] idx) mem)))
+ y2:(MOVDnop x2:(MOVBUloadidx ptr (ADDconst [2] idx) mem)))
+ y3:(MOVDnop x3:(MOVBUloadidx ptr (ADDconst [3] idx) mem)))
+ && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1
+ && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1
+ && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1
+ && mergePoint(b,x0,x1,x2,x3) != nil
+ && clobber(x0, x1, x2, x3, y0, y1, y2, y3, o0, o1, s0)
+ => @mergePoint(b,x0,x1,x2,x3) (REVW <t> (MOVWUloadidx <t> ptr idx mem))
+
+// b[0]<<56 | b[1]<<48 | b[2]<<40 | b[3]<<32 | b[4]<<24 | b[5]<<16 | b[6]<<8 | b[7] => load 64-bit, reverse
+(OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56]
+ y0:(MOVDnop x0:(MOVBUload [i0] {s} p mem)))
+ y1:(MOVDnop x1:(MOVBUload [i1] {s} p mem)))
+ y2:(MOVDnop x2:(MOVBUload [i2] {s} p mem)))
+ y3:(MOVDnop x3:(MOVBUload [i3] {s} p mem)))
+ y4:(MOVDnop x4:(MOVBUload [i4] {s} p mem)))
+ y5:(MOVDnop x5:(MOVBUload [i5] {s} p mem)))
+ y6:(MOVDnop x6:(MOVBUload [i6] {s} p mem)))
+ y7:(MOVDnop x7:(MOVBUload [i7] {s} p mem)))
+ && i1 == i0+1
+ && i2 == i0+2
+ && i3 == i0+3
+ && i4 == i0+4
+ && i5 == i0+5
+ && i6 == i0+6
+ && i7 == i0+7
+ && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1
+ && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1
+ && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1
+ && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1
+ && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1
+ && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1
+ && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil
+ && clobber(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, y6, y7, o0, o1, o2, o3, o4, o5, s0)
+ => @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (REV <t> (MOVDload <t> {s} (OffPtr <p.Type> [int64(i0)] p) mem))
+(OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56]
+ y0:(MOVDnop x0:(MOVBUloadidx ptr0 idx0 mem)))
+ y1:(MOVDnop x1:(MOVBUload [1] {s} p1:(ADD ptr1 idx1) mem)))
+ y2:(MOVDnop x2:(MOVBUload [2] {s} p mem)))
+ y3:(MOVDnop x3:(MOVBUload [3] {s} p mem)))
+ y4:(MOVDnop x4:(MOVBUload [4] {s} p mem)))
+ y5:(MOVDnop x5:(MOVBUload [5] {s} p mem)))
+ y6:(MOVDnop x6:(MOVBUload [6] {s} p mem)))
+ y7:(MOVDnop x7:(MOVBUload [7] {s} p mem)))
+ && s == nil
+ && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1
+ && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1
+ && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1
+ && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1
+ && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1
+ && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1
+ && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && isSamePtr(p1, p)
+ && clobber(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, y6, y7, o0, o1, o2, o3, o4, o5, s0)
+ => @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (REV <t> (MOVDloadidx <t> ptr0 idx0 mem))
+(OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56]
+ y0:(MOVDnop x0:(MOVBUloadidx ptr idx mem)))
+ y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [1] idx) mem)))
+ y2:(MOVDnop x2:(MOVBUloadidx ptr (ADDconst [2] idx) mem)))
+ y3:(MOVDnop x3:(MOVBUloadidx ptr (ADDconst [3] idx) mem)))
+ y4:(MOVDnop x4:(MOVBUloadidx ptr (ADDconst [4] idx) mem)))
+ y5:(MOVDnop x5:(MOVBUloadidx ptr (ADDconst [5] idx) mem)))
+ y6:(MOVDnop x6:(MOVBUloadidx ptr (ADDconst [6] idx) mem)))
+ y7:(MOVDnop x7:(MOVBUloadidx ptr (ADDconst [7] idx) mem)))
+ && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1
+ && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1
+ && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1
+ && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1
+ && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1
+ && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1
+ && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil
+ && clobber(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, y6, y7, o0, o1, o2, o3, o4, o5, s0)
+ => @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (REV <t> (MOVDloadidx <t> ptr idx mem))
+
+// Combine zero stores into larger (unaligned) stores.
+(MOVBstorezero [i] {s} ptr0 x:(MOVBstorezero [j] {s} ptr1 mem))
+ && x.Uses == 1
+ && areAdjacentOffsets(int64(i),int64(j),1)
+ && isSamePtr(ptr0, ptr1)
+ && clobber(x)
+ => (MOVHstorezero [int32(min(int64(i),int64(j)))] {s} ptr0 mem)
+(MOVBstorezero [1] {s} (ADD ptr0 idx0) x:(MOVBstorezeroidx ptr1 idx1 mem))
+ && x.Uses == 1
+ && s == nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && clobber(x)
+ => (MOVHstorezeroidx ptr1 idx1 mem)
+(MOVBstorezeroidx ptr (ADDconst [1] idx) x:(MOVBstorezeroidx ptr idx mem))
+ && x.Uses == 1
+ && clobber(x)
+ => (MOVHstorezeroidx ptr idx mem)
+(MOVHstorezero [i] {s} ptr0 x:(MOVHstorezero [j] {s} ptr1 mem))
+ && x.Uses == 1
+ && areAdjacentOffsets(int64(i),int64(j),2)
+ && isSamePtr(ptr0, ptr1)
+ && clobber(x)
+ => (MOVWstorezero [int32(min(int64(i),int64(j)))] {s} ptr0 mem)
+(MOVHstorezero [2] {s} (ADD ptr0 idx0) x:(MOVHstorezeroidx ptr1 idx1 mem))
+ && x.Uses == 1
+ && s == nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && clobber(x)
+ => (MOVWstorezeroidx ptr1 idx1 mem)
+(MOVHstorezeroidx ptr (ADDconst [2] idx) x:(MOVHstorezeroidx ptr idx mem))
+ && x.Uses == 1
+ && clobber(x)
+ => (MOVWstorezeroidx ptr idx mem)
+(MOVHstorezero [2] {s} (ADDshiftLL [1] ptr0 idx0) x:(MOVHstorezeroidx2 ptr1 idx1 mem))
+ && x.Uses == 1
+ && s == nil
+ && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1)
+ && clobber(x)
+ => (MOVWstorezeroidx ptr1 (SLLconst <idx1.Type> [1] idx1) mem)
+(MOVWstorezero [i] {s} ptr0 x:(MOVWstorezero [j] {s} ptr1 mem))
+ && x.Uses == 1
+ && areAdjacentOffsets(int64(i),int64(j),4)
+ && isSamePtr(ptr0, ptr1)
+ && clobber(x)
+ => (MOVDstorezero [int32(min(int64(i),int64(j)))] {s} ptr0 mem)
+(MOVWstorezero [4] {s} (ADD ptr0 idx0) x:(MOVWstorezeroidx ptr1 idx1 mem))
+ && x.Uses == 1
+ && s == nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && clobber(x)
+ => (MOVDstorezeroidx ptr1 idx1 mem)
+(MOVWstorezeroidx ptr (ADDconst [4] idx) x:(MOVWstorezeroidx ptr idx mem))
+ && x.Uses == 1
+ && clobber(x)
+ => (MOVDstorezeroidx ptr idx mem)
+(MOVWstorezero [4] {s} (ADDshiftLL [2] ptr0 idx0) x:(MOVWstorezeroidx4 ptr1 idx1 mem))
+ && x.Uses == 1
+ && s == nil
+ && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1)
+ && clobber(x)
+ => (MOVDstorezeroidx ptr1 (SLLconst <idx1.Type> [2] idx1) mem)
+(MOVDstorezero [i] {s} ptr0 x:(MOVDstorezero [j] {s} ptr1 mem))
+ && x.Uses == 1
+ && areAdjacentOffsets(int64(i),int64(j),8)
+ && isSamePtr(ptr0, ptr1)
+ && clobber(x)
+ => (MOVQstorezero [int32(min(int64(i),int64(j)))] {s} ptr0 mem)
+(MOVDstorezero [8] {s} p0:(ADD ptr0 idx0) x:(MOVDstorezeroidx ptr1 idx1 mem))
+ && x.Uses == 1
+ && s == nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && clobber(x)
+ => (MOVQstorezero [0] {s} p0 mem)
+(MOVDstorezero [8] {s} p0:(ADDshiftLL [3] ptr0 idx0) x:(MOVDstorezeroidx8 ptr1 idx1 mem))
+ && x.Uses == 1
+ && s == nil
+ && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1)
+ && clobber(x)
+ => (MOVQstorezero [0] {s} p0 mem)
+
+// Combine stores into larger (unaligned) stores.
+(MOVBstore [i] {s} ptr0 (SRLconst [8] w) x:(MOVBstore [i-1] {s} ptr1 w mem))
+ && x.Uses == 1
+ && isSamePtr(ptr0, ptr1)
+ && clobber(x)
+ => (MOVHstore [i-1] {s} ptr0 w mem)
+(MOVBstore [1] {s} (ADD ptr0 idx0) (SRLconst [8] w) x:(MOVBstoreidx ptr1 idx1 w mem))
+ && x.Uses == 1
+ && s == nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && clobber(x)
+ => (MOVHstoreidx ptr1 idx1 w mem)
+(MOVBstoreidx ptr (ADDconst [1] idx) (SRLconst [8] w) x:(MOVBstoreidx ptr idx w mem))
+ && x.Uses == 1
+ && clobber(x)
+ => (MOVHstoreidx ptr idx w mem)
+(MOVBstore [i] {s} ptr0 (UBFX [armBFAuxInt(8, 8)] w) x:(MOVBstore [i-1] {s} ptr1 w mem))
+ && x.Uses == 1
+ && isSamePtr(ptr0, ptr1)
+ && clobber(x)
+ => (MOVHstore [i-1] {s} ptr0 w mem)
+(MOVBstore [1] {s} (ADD ptr0 idx0) (UBFX [armBFAuxInt(8, 8)] w) x:(MOVBstoreidx ptr1 idx1 w mem))
+ && x.Uses == 1
+ && s == nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && clobber(x)
+ => (MOVHstoreidx ptr1 idx1 w mem)
+(MOVBstore [i] {s} ptr0 (UBFX [armBFAuxInt(8, 24)] w) x:(MOVBstore [i-1] {s} ptr1 w mem))
+ && x.Uses == 1
+ && isSamePtr(ptr0, ptr1)
+ && clobber(x)
+ => (MOVHstore [i-1] {s} ptr0 w mem)
+(MOVBstore [1] {s} (ADD ptr0 idx0) (UBFX [armBFAuxInt(8, 24)] w) x:(MOVBstoreidx ptr1 idx1 w mem))
+ && x.Uses == 1
+ && s == nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && clobber(x)
+ => (MOVHstoreidx ptr1 idx1 w mem)
+(MOVBstore [i] {s} ptr0 (SRLconst [8] (MOVDreg w)) x:(MOVBstore [i-1] {s} ptr1 w mem))
+ && x.Uses == 1
+ && isSamePtr(ptr0, ptr1)
+ && clobber(x)
+ => (MOVHstore [i-1] {s} ptr0 w mem)
+(MOVBstore [1] {s} (ADD ptr0 idx0) (SRLconst [8] (MOVDreg w)) x:(MOVBstoreidx ptr1 idx1 w mem))
+ && x.Uses == 1
+ && s == nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && clobber(x)
+ => (MOVHstoreidx ptr1 idx1 w mem)
+(MOVBstore [i] {s} ptr0 (SRLconst [j] w) x:(MOVBstore [i-1] {s} ptr1 w0:(SRLconst [j-8] w) mem))
+ && x.Uses == 1
+ && isSamePtr(ptr0, ptr1)
+ && clobber(x)
+ => (MOVHstore [i-1] {s} ptr0 w0 mem)
+(MOVBstore [1] {s} (ADD ptr0 idx0) (SRLconst [j] w) x:(MOVBstoreidx ptr1 idx1 w0:(SRLconst [j-8] w) mem))
+ && x.Uses == 1
+ && s == nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && clobber(x)
+ => (MOVHstoreidx ptr1 idx1 w0 mem)
+(MOVBstore [i] {s} ptr0 (UBFX [bfc] w) x:(MOVBstore [i-1] {s} ptr1 w0:(UBFX [bfc2] w) mem))
+ && x.Uses == 1
+ && isSamePtr(ptr0, ptr1)
+ && bfc.getARM64BFwidth() == 32 - bfc.getARM64BFlsb()
+ && bfc2.getARM64BFwidth() == 32 - bfc2.getARM64BFlsb()
+ && bfc2.getARM64BFlsb() == bfc.getARM64BFlsb() - 8
+ && clobber(x)
+ => (MOVHstore [i-1] {s} ptr0 w0 mem)
+(MOVBstore [1] {s} (ADD ptr0 idx0) (UBFX [bfc] w) x:(MOVBstoreidx ptr1 idx1 w0:(UBFX [bfc2] w) mem))
+ && x.Uses == 1
+ && s == nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && bfc.getARM64BFwidth() == 32 - bfc.getARM64BFlsb()
+ && bfc2.getARM64BFwidth() == 32 - bfc2.getARM64BFlsb()
+ && bfc2.getARM64BFlsb() == bfc.getARM64BFlsb() - 8
+ && clobber(x)
+ => (MOVHstoreidx ptr1 idx1 w0 mem)
+(MOVBstore [i] {s} ptr0 (SRLconst [j] (MOVDreg w)) x:(MOVBstore [i-1] {s} ptr1 w0:(SRLconst [j-8] (MOVDreg w)) mem))
+ && x.Uses == 1
+ && isSamePtr(ptr0, ptr1)
+ && clobber(x)
+ => (MOVHstore [i-1] {s} ptr0 w0 mem)
+(MOVBstore [1] {s} (ADD ptr0 idx0) (SRLconst [j] (MOVDreg w)) x:(MOVBstoreidx ptr1 idx1 w0:(SRLconst [j-8] (MOVDreg w)) mem))
+ && x.Uses == 1
+ && s == nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && clobber(x)
+ => (MOVHstoreidx ptr1 idx1 w0 mem)
+(MOVHstore [i] {s} ptr0 (SRLconst [16] w) x:(MOVHstore [i-2] {s} ptr1 w mem))
+ && x.Uses == 1
+ && isSamePtr(ptr0, ptr1)
+ && clobber(x)
+ => (MOVWstore [i-2] {s} ptr0 w mem)
+(MOVHstore [2] {s} (ADD ptr0 idx0) (SRLconst [16] w) x:(MOVHstoreidx ptr1 idx1 w mem))
+ && x.Uses == 1
+ && s == nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && clobber(x)
+ => (MOVWstoreidx ptr1 idx1 w mem)
+(MOVHstoreidx ptr (ADDconst [2] idx) (SRLconst [16] w) x:(MOVHstoreidx ptr idx w mem))
+ && x.Uses == 1
+ && clobber(x)
+ => (MOVWstoreidx ptr idx w mem)
+(MOVHstore [2] {s} (ADDshiftLL [1] ptr0 idx0) (SRLconst [16] w) x:(MOVHstoreidx2 ptr1 idx1 w mem))
+ && x.Uses == 1
+ && s == nil
+ && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1)
+ && clobber(x)
+ => (MOVWstoreidx ptr1 (SLLconst <idx1.Type> [1] idx1) w mem)
+(MOVHstore [i] {s} ptr0 (UBFX [armBFAuxInt(16, 16)] w) x:(MOVHstore [i-2] {s} ptr1 w mem))
+ && x.Uses == 1
+ && isSamePtr(ptr0, ptr1)
+ && clobber(x)
+ => (MOVWstore [i-2] {s} ptr0 w mem)
+(MOVHstore [2] {s} (ADD ptr0 idx0) (UBFX [armBFAuxInt(16, 16)] w) x:(MOVHstoreidx ptr1 idx1 w mem))
+ && x.Uses == 1
+ && s == nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && clobber(x)
+ => (MOVWstoreidx ptr1 idx1 w mem)
+(MOVHstore [2] {s} (ADDshiftLL [1] ptr0 idx0) (UBFX [armBFAuxInt(16, 16)] w) x:(MOVHstoreidx2 ptr1 idx1 w mem))
+ && x.Uses == 1
+ && s == nil
+ && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1)
+ && clobber(x)
+ => (MOVWstoreidx ptr1 (SLLconst <idx1.Type> [1] idx1) w mem)
+(MOVHstore [i] {s} ptr0 (SRLconst [16] (MOVDreg w)) x:(MOVHstore [i-2] {s} ptr1 w mem))
+ && x.Uses == 1
+ && isSamePtr(ptr0, ptr1)
+ && clobber(x)
+ => (MOVWstore [i-2] {s} ptr0 w mem)
+(MOVHstore [2] {s} (ADD ptr0 idx0) (SRLconst [16] (MOVDreg w)) x:(MOVHstoreidx ptr1 idx1 w mem))
+ && x.Uses == 1
+ && s == nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && clobber(x)
+ => (MOVWstoreidx ptr1 idx1 w mem)
+(MOVHstore [2] {s} (ADDshiftLL [1] ptr0 idx0) (SRLconst [16] (MOVDreg w)) x:(MOVHstoreidx2 ptr1 idx1 w mem))
+ && x.Uses == 1
+ && s == nil
+ && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1)
+ && clobber(x)
+ => (MOVWstoreidx ptr1 (SLLconst <idx1.Type> [1] idx1) w mem)
+(MOVHstore [i] {s} ptr0 (SRLconst [j] w) x:(MOVHstore [i-2] {s} ptr1 w0:(SRLconst [j-16] w) mem))
+ && x.Uses == 1
+ && isSamePtr(ptr0, ptr1)
+ && clobber(x)
+ => (MOVWstore [i-2] {s} ptr0 w0 mem)
+(MOVHstore [2] {s} (ADD ptr0 idx0) (SRLconst [j] w) x:(MOVHstoreidx ptr1 idx1 w0:(SRLconst [j-16] w) mem))
+ && x.Uses == 1
+ && s == nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && clobber(x)
+ => (MOVWstoreidx ptr1 idx1 w0 mem)
+(MOVHstore [2] {s} (ADDshiftLL [1] ptr0 idx0) (SRLconst [j] w) x:(MOVHstoreidx2 ptr1 idx1 w0:(SRLconst [j-16] w) mem))
+ && x.Uses == 1
+ && s == nil
+ && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1)
+ && clobber(x)
+ => (MOVWstoreidx ptr1 (SLLconst <idx1.Type> [1] idx1) w0 mem)
+(MOVWstore [i] {s} ptr0 (SRLconst [32] w) x:(MOVWstore [i-4] {s} ptr1 w mem))
+ && x.Uses == 1
+ && isSamePtr(ptr0, ptr1)
+ && clobber(x)
+ => (MOVDstore [i-4] {s} ptr0 w mem)
+(MOVWstore [4] {s} (ADD ptr0 idx0) (SRLconst [32] w) x:(MOVWstoreidx ptr1 idx1 w mem))
+ && x.Uses == 1
+ && s == nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && clobber(x)
+ => (MOVDstoreidx ptr1 idx1 w mem)
+(MOVWstoreidx ptr (ADDconst [4] idx) (SRLconst [32] w) x:(MOVWstoreidx ptr idx w mem))
+ && x.Uses == 1
+ && clobber(x)
+ => (MOVDstoreidx ptr idx w mem)
+(MOVWstore [4] {s} (ADDshiftLL [2] ptr0 idx0) (SRLconst [32] w) x:(MOVWstoreidx4 ptr1 idx1 w mem))
+ && x.Uses == 1
+ && s == nil
+ && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1)
+ && clobber(x)
+ => (MOVDstoreidx ptr1 (SLLconst <idx1.Type> [2] idx1) w mem)
+(MOVWstore [i] {s} ptr0 (SRLconst [j] w) x:(MOVWstore [i-4] {s} ptr1 w0:(SRLconst [j-32] w) mem))
+ && x.Uses == 1
+ && isSamePtr(ptr0, ptr1)
+ && clobber(x)
+ => (MOVDstore [i-4] {s} ptr0 w0 mem)
+(MOVWstore [4] {s} (ADD ptr0 idx0) (SRLconst [j] w) x:(MOVWstoreidx ptr1 idx1 w0:(SRLconst [j-32] w) mem))
+ && x.Uses == 1
+ && s == nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && clobber(x)
+ => (MOVDstoreidx ptr1 idx1 w0 mem)
+(MOVWstore [4] {s} (ADDshiftLL [2] ptr0 idx0) (SRLconst [j] w) x:(MOVWstoreidx4 ptr1 idx1 w0:(SRLconst [j-32] w) mem))
+ && x.Uses == 1
+ && s == nil
+ && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1)
+ && clobber(x)
+ => (MOVDstoreidx ptr1 (SLLconst <idx1.Type> [2] idx1) w0 mem)
+(MOVBstore [i] {s} ptr w
+ x0:(MOVBstore [i-1] {s} ptr (SRLconst [8] w)
+ x1:(MOVBstore [i-2] {s} ptr (SRLconst [16] w)
+ x2:(MOVBstore [i-3] {s} ptr (SRLconst [24] w)
+ x3:(MOVBstore [i-4] {s} ptr (SRLconst [32] w)
+ x4:(MOVBstore [i-5] {s} ptr (SRLconst [40] w)
+ x5:(MOVBstore [i-6] {s} ptr (SRLconst [48] w)
+ x6:(MOVBstore [i-7] {s} ptr (SRLconst [56] w) mem))))))))
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && x2.Uses == 1
+ && x3.Uses == 1
+ && x4.Uses == 1
+ && x5.Uses == 1
+ && x6.Uses == 1
+ && clobber(x0, x1, x2, x3, x4, x5, x6)
+ => (MOVDstore [i-7] {s} ptr (REV <w.Type> w) mem)
+(MOVBstore [7] {s} p w
+ x0:(MOVBstore [6] {s} p (SRLconst [8] w)
+ x1:(MOVBstore [5] {s} p (SRLconst [16] w)
+ x2:(MOVBstore [4] {s} p (SRLconst [24] w)
+ x3:(MOVBstore [3] {s} p (SRLconst [32] w)
+ x4:(MOVBstore [2] {s} p (SRLconst [40] w)
+ x5:(MOVBstore [1] {s} p1:(ADD ptr1 idx1) (SRLconst [48] w)
+ x6:(MOVBstoreidx ptr0 idx0 (SRLconst [56] w) mem))))))))
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && x2.Uses == 1
+ && x3.Uses == 1
+ && x4.Uses == 1
+ && x5.Uses == 1
+ && x6.Uses == 1
+ && s == nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && isSamePtr(p1, p)
+ && clobber(x0, x1, x2, x3, x4, x5, x6)
+ => (MOVDstoreidx ptr0 idx0 (REV <w.Type> w) mem)
+(MOVBstore [i] {s} ptr w
+ x0:(MOVBstore [i-1] {s} ptr (UBFX [armBFAuxInt(8, 24)] w)
+ x1:(MOVBstore [i-2] {s} ptr (UBFX [armBFAuxInt(16, 16)] w)
+ x2:(MOVBstore [i-3] {s} ptr (UBFX [armBFAuxInt(24, 8)] w) mem))))
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && x2.Uses == 1
+ && clobber(x0, x1, x2)
+ => (MOVWstore [i-3] {s} ptr (REVW <w.Type> w) mem)
+(MOVBstore [3] {s} p w
+ x0:(MOVBstore [2] {s} p (UBFX [armBFAuxInt(8, 24)] w)
+ x1:(MOVBstore [1] {s} p1:(ADD ptr1 idx1) (UBFX [armBFAuxInt(16, 16)] w)
+ x2:(MOVBstoreidx ptr0 idx0 (UBFX [armBFAuxInt(24, 8)] w) mem))))
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && x2.Uses == 1
+ && s == nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && isSamePtr(p1, p)
+ && clobber(x0, x1, x2)
+ => (MOVWstoreidx ptr0 idx0 (REVW <w.Type> w) mem)
+(MOVBstoreidx ptr (ADDconst [3] idx) w
+ x0:(MOVBstoreidx ptr (ADDconst [2] idx) (UBFX [armBFAuxInt(8, 24)] w)
+ x1:(MOVBstoreidx ptr (ADDconst [1] idx) (UBFX [armBFAuxInt(16, 16)] w)
+ x2:(MOVBstoreidx ptr idx (UBFX [armBFAuxInt(24, 8)] w) mem))))
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && x2.Uses == 1
+ && clobber(x0, x1, x2)
+ => (MOVWstoreidx ptr idx (REVW <w.Type> w) mem)
+(MOVBstoreidx ptr idx w
+ x0:(MOVBstoreidx ptr (ADDconst [1] idx) (UBFX [armBFAuxInt(8, 24)] w)
+ x1:(MOVBstoreidx ptr (ADDconst [2] idx) (UBFX [armBFAuxInt(16, 16)] w)
+ x2:(MOVBstoreidx ptr (ADDconst [3] idx) (UBFX [armBFAuxInt(24, 8)] w) mem))))
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && x2.Uses == 1
+ && clobber(x0, x1, x2)
+ => (MOVWstoreidx ptr idx w mem)
+(MOVBstore [i] {s} ptr w
+ x0:(MOVBstore [i-1] {s} ptr (SRLconst [8] (MOVDreg w))
+ x1:(MOVBstore [i-2] {s} ptr (SRLconst [16] (MOVDreg w))
+ x2:(MOVBstore [i-3] {s} ptr (SRLconst [24] (MOVDreg w)) mem))))
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && x2.Uses == 1
+ && clobber(x0, x1, x2)
+ => (MOVWstore [i-3] {s} ptr (REVW <w.Type> w) mem)
+(MOVBstore [3] {s} p w
+ x0:(MOVBstore [2] {s} p (SRLconst [8] (MOVDreg w))
+ x1:(MOVBstore [1] {s} p1:(ADD ptr1 idx1) (SRLconst [16] (MOVDreg w))
+ x2:(MOVBstoreidx ptr0 idx0 (SRLconst [24] (MOVDreg w)) mem))))
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && x2.Uses == 1
+ && s == nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && isSamePtr(p1, p)
+ && clobber(x0, x1, x2)
+ => (MOVWstoreidx ptr0 idx0 (REVW <w.Type> w) mem)
+(MOVBstore [i] {s} ptr w
+ x0:(MOVBstore [i-1] {s} ptr (SRLconst [8] w)
+ x1:(MOVBstore [i-2] {s} ptr (SRLconst [16] w)
+ x2:(MOVBstore [i-3] {s} ptr (SRLconst [24] w) mem))))
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && x2.Uses == 1
+ && clobber(x0, x1, x2)
+ => (MOVWstore [i-3] {s} ptr (REVW <w.Type> w) mem)
+(MOVBstore [3] {s} p w
+ x0:(MOVBstore [2] {s} p (SRLconst [8] w)
+ x1:(MOVBstore [1] {s} p1:(ADD ptr1 idx1) (SRLconst [16] w)
+ x2:(MOVBstoreidx ptr0 idx0 (SRLconst [24] w) mem))))
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && x2.Uses == 1
+ && s == nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && isSamePtr(p1, p)
+ && clobber(x0, x1, x2)
+ => (MOVWstoreidx ptr0 idx0 (REVW <w.Type> w) mem)
+(MOVBstore [i] {s} ptr w x:(MOVBstore [i-1] {s} ptr (SRLconst [8] w) mem))
+ && x.Uses == 1
+ && clobber(x)
+ => (MOVHstore [i-1] {s} ptr (REV16W <w.Type> w) mem)
+(MOVBstore [1] {s} (ADD ptr1 idx1) w x:(MOVBstoreidx ptr0 idx0 (SRLconst [8] w) mem))
+ && x.Uses == 1
+ && s == nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && clobber(x)
+ => (MOVHstoreidx ptr0 idx0 (REV16W <w.Type> w) mem)
+(MOVBstore [i] {s} ptr w x:(MOVBstore [i-1] {s} ptr (UBFX [armBFAuxInt(8, 8)] w) mem))
+ && x.Uses == 1
+ && clobber(x)
+ => (MOVHstore [i-1] {s} ptr (REV16W <w.Type> w) mem)
+(MOVBstore [1] {s} (ADD ptr1 idx1) w x:(MOVBstoreidx ptr0 idx0 (UBFX [armBFAuxInt(8, 8)] w) mem))
+ && x.Uses == 1
+ && s == nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && clobber(x)
+ => (MOVHstoreidx ptr0 idx0 (REV16W <w.Type> w) mem)
+(MOVBstoreidx ptr (ADDconst [1] idx) w x:(MOVBstoreidx ptr idx (UBFX [armBFAuxInt(8, 8)] w) mem))
+ && x.Uses == 1
+ && clobber(x)
+ => (MOVHstoreidx ptr idx (REV16W <w.Type> w) mem)
+(MOVBstoreidx ptr idx w x:(MOVBstoreidx ptr (ADDconst [1] idx) (UBFX [armBFAuxInt(8, 8)] w) mem))
+ && x.Uses == 1
+ && clobber(x)
+ => (MOVHstoreidx ptr idx w mem)
+(MOVBstore [i] {s} ptr w x:(MOVBstore [i-1] {s} ptr (SRLconst [8] (MOVDreg w)) mem))
+ && x.Uses == 1
+ && clobber(x)
+ => (MOVHstore [i-1] {s} ptr (REV16W <w.Type> w) mem)
+(MOVBstore [1] {s} (ADD ptr1 idx1) w x:(MOVBstoreidx ptr0 idx0 (SRLconst [8] (MOVDreg w)) mem))
+ && x.Uses == 1
+ && s == nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && clobber(x)
+ => (MOVHstoreidx ptr0 idx0 (REV16W <w.Type> w) mem)
+(MOVBstore [i] {s} ptr w x:(MOVBstore [i-1] {s} ptr (UBFX [armBFAuxInt(8, 24)] w) mem))
+ && x.Uses == 1
+ && clobber(x)
+ => (MOVHstore [i-1] {s} ptr (REV16W <w.Type> w) mem)
+(MOVBstore [1] {s} (ADD ptr1 idx1) w x:(MOVBstoreidx ptr0 idx0 (UBFX [armBFAuxInt(8, 24)] w) mem))
+ && x.Uses == 1
+ && s == nil
+ && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1))
+ && clobber(x)
+ => (MOVHstoreidx ptr0 idx0 (REV16W <w.Type> w) mem)
+
+// FP simplification
+(FNEGS (FMULS x y)) => (FNMULS x y)
+(FNEGD (FMULD x y)) => (FNMULD x y)
+(FMULS (FNEGS x) y) => (FNMULS x y)
+(FMULD (FNEGD x) y) => (FNMULD x y)
+(FNEGS (FNMULS x y)) => (FMULS x y)
+(FNEGD (FNMULD x y)) => (FMULD x y)
+(FNMULS (FNEGS x) y) => (FMULS x y)
+(FNMULD (FNEGD x) y) => (FMULD x y)
+(FADDS a (FMULS x y)) => (FMADDS a x y)
+(FADDD a (FMULD x y)) => (FMADDD a x y)
+(FSUBS a (FMULS x y)) => (FMSUBS a x y)
+(FSUBD a (FMULD x y)) => (FMSUBD a x y)
+(FSUBS (FMULS x y) a) => (FNMSUBS a x y)
+(FSUBD (FMULD x y) a) => (FNMSUBD a x y)
+(FADDS a (FNMULS x y)) => (FMSUBS a x y)
+(FADDD a (FNMULD x y)) => (FMSUBD a x y)
+(FSUBS a (FNMULS x y)) => (FMADDS a x y)
+(FSUBD a (FNMULD x y)) => (FMADDD a x y)
+(FSUBS (FNMULS x y) a) => (FNMADDS a x y)
+(FSUBD (FNMULD x y) a) => (FNMADDD a x y)
+
+(MOVBUload [off] {sym} (SB) _) && symIsRO(sym) => (MOVDconst [int64(read8(sym, int64(off)))])
+(MOVHUload [off] {sym} (SB) _) && symIsRO(sym) => (MOVDconst [int64(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))])
+(MOVWUload [off] {sym} (SB) _) && symIsRO(sym) => (MOVDconst [int64(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))])
+(MOVDload [off] {sym} (SB) _) && symIsRO(sym) => (MOVDconst [int64(read64(sym, int64(off), config.ctxt.Arch.ByteOrder))])
+
+// Prefetch instructions (aux is option: 0 - PLDL1KEEP; 1 - PLDL1STRM)
+(PrefetchCache addr mem) => (PRFM [0] addr mem)
+(PrefetchCacheStreamed addr mem) => (PRFM [1] addr mem)
+
+// Arch-specific inlining for small or disjoint runtime.memmove
+(SelectN [0] call:(CALLstatic {sym} s1:(MOVDstore _ (MOVDconst [sz]) s2:(MOVDstore _ src s3:(MOVDstore {t} _ dst mem)))))
+ && sz >= 0
+ && isSameCall(sym, "runtime.memmove")
+ && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1
+ && isInlinableMemmove(dst, src, sz, config)
+ && clobber(s1, s2, s3, call)
+ => (Move [sz] dst src mem)
+
+// Match post-lowering calls, register version.
+(SelectN [0] call:(CALLstatic {sym} dst src (MOVDconst [sz]) mem))
+ && sz >= 0
+ && isSameCall(sym, "runtime.memmove")
+ && call.Uses == 1
+ && isInlinableMemmove(dst, src, sz, config)
+ && clobber(call)
+ => (Move [sz] dst src mem)
+
+((REV|REVW) ((REV|REVW) p)) => p
diff --git a/src/cmd/compile/internal/ssa/gen/ARM64Ops.go b/src/cmd/compile/internal/ssa/gen/ARM64Ops.go
new file mode 100644
index 0000000..2d03c44
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/gen/ARM64Ops.go
@@ -0,0 +1,792 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build ignore
+// +build ignore
+
+package main
+
+import "strings"
+
+// Notes:
+// - Integer types live in the low portion of registers. Upper portions are junk.
+// - Boolean types use the low-order byte of a register. 0=false, 1=true.
+// Upper bytes are junk.
+// - *const instructions may use a constant larger than the instruction can encode.
+// In this case the assembler expands to multiple instructions and uses tmp
+// register (R27).
+
+// Suffixes encode the bit width of various instructions.
+// D (double word) = 64 bit
+// W (word) = 32 bit
+// H (half word) = 16 bit
+// HU = 16 bit unsigned
+// B (byte) = 8 bit
+// BU = 8 bit unsigned
+// S (single) = 32 bit float
+// D (double) = 64 bit float
+
+// Note: registers not used in regalloc are not included in this list,
+// so that regmask stays within int64
+// Be careful when hand coding regmasks.
+var regNamesARM64 = []string{
+ "R0",
+ "R1",
+ "R2",
+ "R3",
+ "R4",
+ "R5",
+ "R6",
+ "R7",
+ "R8",
+ "R9",
+ "R10",
+ "R11",
+ "R12",
+ "R13",
+ "R14",
+ "R15",
+ "R16",
+ "R17",
+ "R18", // platform register, not used
+ "R19",
+ "R20",
+ "R21",
+ "R22",
+ "R23",
+ "R24",
+ "R25",
+ "R26",
+ // R27 = REGTMP not used in regalloc
+ "g", // aka R28
+ "R29", // frame pointer, not used
+ "R30", // aka REGLINK
+ "SP", // aka R31
+
+ "F0",
+ "F1",
+ "F2",
+ "F3",
+ "F4",
+ "F5",
+ "F6",
+ "F7",
+ "F8",
+ "F9",
+ "F10",
+ "F11",
+ "F12",
+ "F13",
+ "F14",
+ "F15",
+ "F16",
+ "F17",
+ "F18",
+ "F19",
+ "F20",
+ "F21",
+ "F22",
+ "F23",
+ "F24",
+ "F25",
+ "F26",
+ "F27",
+ "F28",
+ "F29",
+ "F30",
+ "F31",
+
+ // If you add registers, update asyncPreempt in runtime.
+
+ // pseudo-registers
+ "SB",
+}
+
+func init() {
+ // Make map from reg names to reg integers.
+ if len(regNamesARM64) > 64 {
+ panic("too many registers")
+ }
+ num := map[string]int{}
+ for i, name := range regNamesARM64 {
+ num[name] = i
+ }
+ buildReg := func(s string) regMask {
+ m := regMask(0)
+ for _, r := range strings.Split(s, " ") {
+ if n, ok := num[r]; ok {
+ m |= regMask(1) << uint(n)
+ continue
+ }
+ panic("register " + r + " not found")
+ }
+ return m
+ }
+
+ // Common individual register masks
+ var (
+ gp = buildReg("R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30")
+ gpg = gp | buildReg("g")
+ gpsp = gp | buildReg("SP")
+ gpspg = gpg | buildReg("SP")
+ gpspsbg = gpspg | buildReg("SB")
+ fp = buildReg("F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31")
+ callerSave = gp | fp | buildReg("g") // runtime.setg (and anything calling it) may clobber g
+ r0 = buildReg("R0")
+ r1 = buildReg("R1")
+ r2 = buildReg("R2")
+ r3 = buildReg("R3")
+ )
+ // Common regInfo
+ var (
+ gp01 = regInfo{inputs: nil, outputs: []regMask{gp}}
+ gp0flags1 = regInfo{inputs: []regMask{0}, outputs: []regMask{gp}}
+ gp11 = regInfo{inputs: []regMask{gpg}, outputs: []regMask{gp}}
+ gp11sp = regInfo{inputs: []regMask{gpspg}, outputs: []regMask{gp}}
+ gp1flags = regInfo{inputs: []regMask{gpg}}
+ gp1flags1 = regInfo{inputs: []regMask{gpg}, outputs: []regMask{gp}}
+ gp11flags = regInfo{inputs: []regMask{gpg}, outputs: []regMask{gp, 0}}
+ gp21 = regInfo{inputs: []regMask{gpg, gpg}, outputs: []regMask{gp}}
+ gp21nog = regInfo{inputs: []regMask{gp, gp}, outputs: []regMask{gp}}
+ gp21flags = regInfo{inputs: []regMask{gp, gp}, outputs: []regMask{gp, 0}}
+ gp2flags = regInfo{inputs: []regMask{gpg, gpg}}
+ gp2flags1 = regInfo{inputs: []regMask{gp, gp}, outputs: []regMask{gp}}
+ gp2flags1flags = regInfo{inputs: []regMask{gp, gp, 0}, outputs: []regMask{gp, 0}}
+ gp2load = regInfo{inputs: []regMask{gpspsbg, gpg}, outputs: []regMask{gp}}
+ gp22 = regInfo{inputs: []regMask{gpg, gpg}, outputs: []regMask{gp, gp}}
+ gp31 = regInfo{inputs: []regMask{gpg, gpg, gpg}, outputs: []regMask{gp}}
+ gpload = regInfo{inputs: []regMask{gpspsbg}, outputs: []regMask{gp}}
+ gpstore = regInfo{inputs: []regMask{gpspsbg, gpg}}
+ gpstore0 = regInfo{inputs: []regMask{gpspsbg}}
+ gpstore2 = regInfo{inputs: []regMask{gpspsbg, gpg, gpg}}
+ gpxchg = regInfo{inputs: []regMask{gpspsbg, gpg}, outputs: []regMask{gp}}
+ gpcas = regInfo{inputs: []regMask{gpspsbg, gpg, gpg}, outputs: []regMask{gp}}
+ fp01 = regInfo{inputs: nil, outputs: []regMask{fp}}
+ fp11 = regInfo{inputs: []regMask{fp}, outputs: []regMask{fp}}
+ fpgp = regInfo{inputs: []regMask{fp}, outputs: []regMask{gp}}
+ gpfp = regInfo{inputs: []regMask{gp}, outputs: []regMask{fp}}
+ fp21 = regInfo{inputs: []regMask{fp, fp}, outputs: []regMask{fp}}
+ fp31 = regInfo{inputs: []regMask{fp, fp, fp}, outputs: []regMask{fp}}
+ fp2flags = regInfo{inputs: []regMask{fp, fp}}
+ fp1flags = regInfo{inputs: []regMask{fp}}
+ fpload = regInfo{inputs: []regMask{gpspsbg}, outputs: []regMask{fp}}
+ fp2load = regInfo{inputs: []regMask{gpspsbg, gpg}, outputs: []regMask{fp}}
+ fpstore = regInfo{inputs: []regMask{gpspsbg, fp}}
+ fpstore2 = regInfo{inputs: []regMask{gpspsbg, gpg, fp}}
+ readflags = regInfo{inputs: nil, outputs: []regMask{gp}}
+ prefreg = regInfo{inputs: []regMask{gpspsbg}}
+ )
+ ops := []opData{
+ // binary ops
+ {name: "ADCSflags", argLength: 3, reg: gp2flags1flags, typ: "(UInt64,Flags)", asm: "ADCS", commutative: true}, // arg0+arg1+carry, set flags.
+ {name: "ADCzerocarry", argLength: 1, reg: gp0flags1, typ: "UInt64", asm: "ADC"}, // ZR+ZR+carry
+ {name: "ADD", argLength: 2, reg: gp21, asm: "ADD", commutative: true}, // arg0 + arg1
+ {name: "ADDconst", argLength: 1, reg: gp11sp, asm: "ADD", aux: "Int64"}, // arg0 + auxInt
+ {name: "ADDSconstflags", argLength: 1, reg: gp11flags, typ: "(UInt64,Flags)", asm: "ADDS", aux: "Int64"}, // arg0+auxint, set flags.
+ {name: "ADDSflags", argLength: 2, reg: gp21flags, typ: "(UInt64,Flags)", asm: "ADDS", commutative: true}, // arg0+arg1, set flags.
+ {name: "SUB", argLength: 2, reg: gp21, asm: "SUB"}, // arg0 - arg1
+ {name: "SUBconst", argLength: 1, reg: gp11, asm: "SUB", aux: "Int64"}, // arg0 - auxInt
+ {name: "SBCSflags", argLength: 3, reg: gp2flags1flags, typ: "(UInt64,Flags)", asm: "SBCS"}, // arg0-(arg1+borrowing), set flags.
+ {name: "SUBSflags", argLength: 2, reg: gp21flags, typ: "(UInt64,Flags)", asm: "SUBS"}, // arg0 - arg1, set flags.
+ {name: "MUL", argLength: 2, reg: gp21, asm: "MUL", commutative: true}, // arg0 * arg1
+ {name: "MULW", argLength: 2, reg: gp21, asm: "MULW", commutative: true}, // arg0 * arg1, 32-bit
+ {name: "MNEG", argLength: 2, reg: gp21, asm: "MNEG", commutative: true}, // -arg0 * arg1
+ {name: "MNEGW", argLength: 2, reg: gp21, asm: "MNEGW", commutative: true}, // -arg0 * arg1, 32-bit
+ {name: "MULH", argLength: 2, reg: gp21, asm: "SMULH", commutative: true}, // (arg0 * arg1) >> 64, signed
+ {name: "UMULH", argLength: 2, reg: gp21, asm: "UMULH", commutative: true}, // (arg0 * arg1) >> 64, unsigned
+ {name: "MULL", argLength: 2, reg: gp21, asm: "SMULL", commutative: true}, // arg0 * arg1, signed, 32-bit mult results in 64-bit
+ {name: "UMULL", argLength: 2, reg: gp21, asm: "UMULL", commutative: true}, // arg0 * arg1, unsigned, 32-bit mult results in 64-bit
+ {name: "DIV", argLength: 2, reg: gp21, asm: "SDIV"}, // arg0 / arg1, signed
+ {name: "UDIV", argLength: 2, reg: gp21, asm: "UDIV"}, // arg0 / arg1, unsighed
+ {name: "DIVW", argLength: 2, reg: gp21, asm: "SDIVW"}, // arg0 / arg1, signed, 32 bit
+ {name: "UDIVW", argLength: 2, reg: gp21, asm: "UDIVW"}, // arg0 / arg1, unsighed, 32 bit
+ {name: "MOD", argLength: 2, reg: gp21, asm: "REM"}, // arg0 % arg1, signed
+ {name: "UMOD", argLength: 2, reg: gp21, asm: "UREM"}, // arg0 % arg1, unsigned
+ {name: "MODW", argLength: 2, reg: gp21, asm: "REMW"}, // arg0 % arg1, signed, 32 bit
+ {name: "UMODW", argLength: 2, reg: gp21, asm: "UREMW"}, // arg0 % arg1, unsigned, 32 bit
+
+ {name: "FADDS", argLength: 2, reg: fp21, asm: "FADDS", commutative: true}, // arg0 + arg1
+ {name: "FADDD", argLength: 2, reg: fp21, asm: "FADDD", commutative: true}, // arg0 + arg1
+ {name: "FSUBS", argLength: 2, reg: fp21, asm: "FSUBS"}, // arg0 - arg1
+ {name: "FSUBD", argLength: 2, reg: fp21, asm: "FSUBD"}, // arg0 - arg1
+ {name: "FMULS", argLength: 2, reg: fp21, asm: "FMULS", commutative: true}, // arg0 * arg1
+ {name: "FMULD", argLength: 2, reg: fp21, asm: "FMULD", commutative: true}, // arg0 * arg1
+ {name: "FNMULS", argLength: 2, reg: fp21, asm: "FNMULS", commutative: true}, // -(arg0 * arg1)
+ {name: "FNMULD", argLength: 2, reg: fp21, asm: "FNMULD", commutative: true}, // -(arg0 * arg1)
+ {name: "FDIVS", argLength: 2, reg: fp21, asm: "FDIVS"}, // arg0 / arg1
+ {name: "FDIVD", argLength: 2, reg: fp21, asm: "FDIVD"}, // arg0 / arg1
+
+ {name: "AND", argLength: 2, reg: gp21, asm: "AND", commutative: true}, // arg0 & arg1
+ {name: "ANDconst", argLength: 1, reg: gp11, asm: "AND", aux: "Int64"}, // arg0 & auxInt
+ {name: "OR", argLength: 2, reg: gp21, asm: "ORR", commutative: true}, // arg0 | arg1
+ {name: "ORconst", argLength: 1, reg: gp11, asm: "ORR", aux: "Int64"}, // arg0 | auxInt
+ {name: "XOR", argLength: 2, reg: gp21, asm: "EOR", commutative: true}, // arg0 ^ arg1
+ {name: "XORconst", argLength: 1, reg: gp11, asm: "EOR", aux: "Int64"}, // arg0 ^ auxInt
+ {name: "BIC", argLength: 2, reg: gp21, asm: "BIC"}, // arg0 &^ arg1
+ {name: "EON", argLength: 2, reg: gp21, asm: "EON"}, // arg0 ^ ^arg1
+ {name: "ORN", argLength: 2, reg: gp21, asm: "ORN"}, // arg0 | ^arg1
+
+ {name: "LoweredMuluhilo", argLength: 2, reg: gp22, resultNotInArgs: true}, // arg0 * arg1, returns (hi, lo)
+
+ // unary ops
+ {name: "MVN", argLength: 1, reg: gp11, asm: "MVN"}, // ^arg0
+ {name: "NEG", argLength: 1, reg: gp11, asm: "NEG"}, // -arg0
+ {name: "NEGSflags", argLength: 1, reg: gp11flags, typ: "(UInt64,Flags)", asm: "NEGS"}, // -arg0, set flags.
+ {name: "NGCzerocarry", argLength: 1, reg: gp0flags1, typ: "UInt64", asm: "NGC"}, // -1 if borrowing, 0 otherwise.
+ {name: "FABSD", argLength: 1, reg: fp11, asm: "FABSD"}, // abs(arg0), float64
+ {name: "FNEGS", argLength: 1, reg: fp11, asm: "FNEGS"}, // -arg0, float32
+ {name: "FNEGD", argLength: 1, reg: fp11, asm: "FNEGD"}, // -arg0, float64
+ {name: "FSQRTD", argLength: 1, reg: fp11, asm: "FSQRTD"}, // sqrt(arg0), float64
+ {name: "FSQRTS", argLength: 1, reg: fp11, asm: "FSQRTS"}, // sqrt(arg0), float32
+ {name: "REV", argLength: 1, reg: gp11, asm: "REV"}, // byte reverse, 64-bit
+ {name: "REVW", argLength: 1, reg: gp11, asm: "REVW"}, // byte reverse, 32-bit
+ {name: "REV16", argLength: 1, reg: gp11, asm: "REV16"}, // byte reverse in each 16-bit halfword, 64-bit
+ {name: "REV16W", argLength: 1, reg: gp11, asm: "REV16W"}, // byte reverse in each 16-bit halfword, 32-bit
+ {name: "RBIT", argLength: 1, reg: gp11, asm: "RBIT"}, // bit reverse, 64-bit
+ {name: "RBITW", argLength: 1, reg: gp11, asm: "RBITW"}, // bit reverse, 32-bit
+ {name: "CLZ", argLength: 1, reg: gp11, asm: "CLZ"}, // count leading zero, 64-bit
+ {name: "CLZW", argLength: 1, reg: gp11, asm: "CLZW"}, // count leading zero, 32-bit
+ {name: "VCNT", argLength: 1, reg: fp11, asm: "VCNT"}, // count set bits for each 8-bit unit and store the result in each 8-bit unit
+ {name: "VUADDLV", argLength: 1, reg: fp11, asm: "VUADDLV"}, // unsigned sum of eight bytes in a 64-bit value, zero extended to 64-bit.
+ {name: "LoweredRound32F", argLength: 1, reg: fp11, resultInArg0: true, zeroWidth: true},
+ {name: "LoweredRound64F", argLength: 1, reg: fp11, resultInArg0: true, zeroWidth: true},
+
+ // 3-operand, the addend comes first
+ {name: "FMADDS", argLength: 3, reg: fp31, asm: "FMADDS"}, // +arg0 + (arg1 * arg2)
+ {name: "FMADDD", argLength: 3, reg: fp31, asm: "FMADDD"}, // +arg0 + (arg1 * arg2)
+ {name: "FNMADDS", argLength: 3, reg: fp31, asm: "FNMADDS"}, // -arg0 - (arg1 * arg2)
+ {name: "FNMADDD", argLength: 3, reg: fp31, asm: "FNMADDD"}, // -arg0 - (arg1 * arg2)
+ {name: "FMSUBS", argLength: 3, reg: fp31, asm: "FMSUBS"}, // +arg0 - (arg1 * arg2)
+ {name: "FMSUBD", argLength: 3, reg: fp31, asm: "FMSUBD"}, // +arg0 - (arg1 * arg2)
+ {name: "FNMSUBS", argLength: 3, reg: fp31, asm: "FNMSUBS"}, // -arg0 + (arg1 * arg2)
+ {name: "FNMSUBD", argLength: 3, reg: fp31, asm: "FNMSUBD"}, // -arg0 + (arg1 * arg2)
+ {name: "MADD", argLength: 3, reg: gp31, asm: "MADD"}, // +arg0 + (arg1 * arg2)
+ {name: "MADDW", argLength: 3, reg: gp31, asm: "MADDW"}, // +arg0 + (arg1 * arg2), 32-bit
+ {name: "MSUB", argLength: 3, reg: gp31, asm: "MSUB"}, // +arg0 - (arg1 * arg2)
+ {name: "MSUBW", argLength: 3, reg: gp31, asm: "MSUBW"}, // +arg0 - (arg1 * arg2), 32-bit
+
+ // shifts
+ {name: "SLL", argLength: 2, reg: gp21, asm: "LSL"}, // arg0 << arg1, shift amount is mod 64
+ {name: "SLLconst", argLength: 1, reg: gp11, asm: "LSL", aux: "Int64"}, // arg0 << auxInt, auxInt should be in the range 0 to 63.
+ {name: "SRL", argLength: 2, reg: gp21, asm: "LSR"}, // arg0 >> arg1, unsigned, shift amount is mod 64
+ {name: "SRLconst", argLength: 1, reg: gp11, asm: "LSR", aux: "Int64"}, // arg0 >> auxInt, unsigned, auxInt should be in the range 0 to 63.
+ {name: "SRA", argLength: 2, reg: gp21, asm: "ASR"}, // arg0 >> arg1, signed, shift amount is mod 64
+ {name: "SRAconst", argLength: 1, reg: gp11, asm: "ASR", aux: "Int64"}, // arg0 >> auxInt, signed, auxInt should be in the range 0 to 63.
+ {name: "ROR", argLength: 2, reg: gp21, asm: "ROR"}, // arg0 right rotate by (arg1 mod 64) bits
+ {name: "RORW", argLength: 2, reg: gp21, asm: "RORW"}, // arg0 right rotate by (arg1 mod 32) bits
+ {name: "RORconst", argLength: 1, reg: gp11, asm: "ROR", aux: "Int64"}, // arg0 right rotate by auxInt bits, auxInt should be in the range 0 to 63.
+ {name: "RORWconst", argLength: 1, reg: gp11, asm: "RORW", aux: "Int64"}, // uint32(arg0) right rotate by auxInt bits, auxInt should be in the range 0 to 31.
+ {name: "EXTRconst", argLength: 2, reg: gp21, asm: "EXTR", aux: "Int64"}, // extract 64 bits from arg0:arg1 starting at lsb auxInt, auxInt should be in the range 0 to 63.
+ {name: "EXTRWconst", argLength: 2, reg: gp21, asm: "EXTRW", aux: "Int64"}, // extract 32 bits from arg0[31:0]:arg1[31:0] starting at lsb auxInt and zero top 32 bits, auxInt should be in the range 0 to 31.
+
+ // comparisons
+ {name: "CMP", argLength: 2, reg: gp2flags, asm: "CMP", typ: "Flags"}, // arg0 compare to arg1
+ {name: "CMPconst", argLength: 1, reg: gp1flags, asm: "CMP", aux: "Int64", typ: "Flags"}, // arg0 compare to auxInt
+ {name: "CMPW", argLength: 2, reg: gp2flags, asm: "CMPW", typ: "Flags"}, // arg0 compare to arg1, 32 bit
+ {name: "CMPWconst", argLength: 1, reg: gp1flags, asm: "CMPW", aux: "Int32", typ: "Flags"}, // arg0 compare to auxInt, 32 bit
+ {name: "CMN", argLength: 2, reg: gp2flags, asm: "CMN", typ: "Flags", commutative: true}, // arg0 compare to -arg1, provided arg1 is not 1<<63
+ {name: "CMNconst", argLength: 1, reg: gp1flags, asm: "CMN", aux: "Int64", typ: "Flags"}, // arg0 compare to -auxInt
+ {name: "CMNW", argLength: 2, reg: gp2flags, asm: "CMNW", typ: "Flags", commutative: true}, // arg0 compare to -arg1, 32 bit, provided arg1 is not 1<<31
+ {name: "CMNWconst", argLength: 1, reg: gp1flags, asm: "CMNW", aux: "Int32", typ: "Flags"}, // arg0 compare to -auxInt, 32 bit
+ {name: "TST", argLength: 2, reg: gp2flags, asm: "TST", typ: "Flags", commutative: true}, // arg0 & arg1 compare to 0
+ {name: "TSTconst", argLength: 1, reg: gp1flags, asm: "TST", aux: "Int64", typ: "Flags"}, // arg0 & auxInt compare to 0
+ {name: "TSTW", argLength: 2, reg: gp2flags, asm: "TSTW", typ: "Flags", commutative: true}, // arg0 & arg1 compare to 0, 32 bit
+ {name: "TSTWconst", argLength: 1, reg: gp1flags, asm: "TSTW", aux: "Int32", typ: "Flags"}, // arg0 & auxInt compare to 0, 32 bit
+ {name: "FCMPS", argLength: 2, reg: fp2flags, asm: "FCMPS", typ: "Flags"}, // arg0 compare to arg1, float32
+ {name: "FCMPD", argLength: 2, reg: fp2flags, asm: "FCMPD", typ: "Flags"}, // arg0 compare to arg1, float64
+ {name: "FCMPS0", argLength: 1, reg: fp1flags, asm: "FCMPS", typ: "Flags"}, // arg0 compare to 0, float32
+ {name: "FCMPD0", argLength: 1, reg: fp1flags, asm: "FCMPD", typ: "Flags"}, // arg0 compare to 0, float64
+
+ // shifted ops
+ {name: "MVNshiftLL", argLength: 1, reg: gp11, asm: "MVN", aux: "Int64"}, // ^(arg0<<auxInt), auxInt should be in the range 0 to 63.
+ {name: "MVNshiftRL", argLength: 1, reg: gp11, asm: "MVN", aux: "Int64"}, // ^(arg0>>auxInt), unsigned shift, auxInt should be in the range 0 to 63.
+ {name: "MVNshiftRA", argLength: 1, reg: gp11, asm: "MVN", aux: "Int64"}, // ^(arg0>>auxInt), signed shift, auxInt should be in the range 0 to 63.
+ {name: "MVNshiftRO", argLength: 1, reg: gp11, asm: "MVN", aux: "Int64"}, // ^(arg0 ROR auxInt), signed shift, auxInt should be in the range 0 to 63.
+ {name: "NEGshiftLL", argLength: 1, reg: gp11, asm: "NEG", aux: "Int64"}, // -(arg0<<auxInt), auxInt should be in the range 0 to 63.
+ {name: "NEGshiftRL", argLength: 1, reg: gp11, asm: "NEG", aux: "Int64"}, // -(arg0>>auxInt), unsigned shift, auxInt should be in the range 0 to 63.
+ {name: "NEGshiftRA", argLength: 1, reg: gp11, asm: "NEG", aux: "Int64"}, // -(arg0>>auxInt), signed shift, auxInt should be in the range 0 to 63.
+ {name: "ADDshiftLL", argLength: 2, reg: gp21, asm: "ADD", aux: "Int64"}, // arg0 + arg1<<auxInt, auxInt should be in the range 0 to 63.
+ {name: "ADDshiftRL", argLength: 2, reg: gp21, asm: "ADD", aux: "Int64"}, // arg0 + arg1>>auxInt, unsigned shift, auxInt should be in the range 0 to 63.
+ {name: "ADDshiftRA", argLength: 2, reg: gp21, asm: "ADD", aux: "Int64"}, // arg0 + arg1>>auxInt, signed shift, auxInt should be in the range 0 to 63.
+ {name: "SUBshiftLL", argLength: 2, reg: gp21, asm: "SUB", aux: "Int64"}, // arg0 - arg1<<auxInt, auxInt should be in the range 0 to 63.
+ {name: "SUBshiftRL", argLength: 2, reg: gp21, asm: "SUB", aux: "Int64"}, // arg0 - arg1>>auxInt, unsigned shift, auxInt should be in the range 0 to 63.
+ {name: "SUBshiftRA", argLength: 2, reg: gp21, asm: "SUB", aux: "Int64"}, // arg0 - arg1>>auxInt, signed shift, auxInt should be in the range 0 to 63.
+ {name: "ANDshiftLL", argLength: 2, reg: gp21, asm: "AND", aux: "Int64"}, // arg0 & (arg1<<auxInt), auxInt should be in the range 0 to 63.
+ {name: "ANDshiftRL", argLength: 2, reg: gp21, asm: "AND", aux: "Int64"}, // arg0 & (arg1>>auxInt), unsigned shift, auxInt should be in the range 0 to 63.
+ {name: "ANDshiftRA", argLength: 2, reg: gp21, asm: "AND", aux: "Int64"}, // arg0 & (arg1>>auxInt), signed shift, auxInt should be in the range 0 to 63.
+ {name: "ANDshiftRO", argLength: 2, reg: gp21, asm: "AND", aux: "Int64"}, // arg0 & (arg1 ROR auxInt), signed shift, auxInt should be in the range 0 to 63.
+ {name: "ORshiftLL", argLength: 2, reg: gp21, asm: "ORR", aux: "Int64"}, // arg0 | arg1<<auxInt, auxInt should be in the range 0 to 63.
+ {name: "ORshiftRL", argLength: 2, reg: gp21, asm: "ORR", aux: "Int64"}, // arg0 | arg1>>auxInt, unsigned shift, auxInt should be in the range 0 to 63.
+ {name: "ORshiftRA", argLength: 2, reg: gp21, asm: "ORR", aux: "Int64"}, // arg0 | arg1>>auxInt, signed shift, auxInt should be in the range 0 to 63.
+ {name: "ORshiftRO", argLength: 2, reg: gp21, asm: "ORR", aux: "Int64"}, // arg0 | arg1 ROR auxInt, signed shift, auxInt should be in the range 0 to 63.
+ {name: "XORshiftLL", argLength: 2, reg: gp21, asm: "EOR", aux: "Int64"}, // arg0 ^ arg1<<auxInt, auxInt should be in the range 0 to 63.
+ {name: "XORshiftRL", argLength: 2, reg: gp21, asm: "EOR", aux: "Int64"}, // arg0 ^ arg1>>auxInt, unsigned shift, auxInt should be in the range 0 to 63.
+ {name: "XORshiftRA", argLength: 2, reg: gp21, asm: "EOR", aux: "Int64"}, // arg0 ^ arg1>>auxInt, signed shift, auxInt should be in the range 0 to 63.
+ {name: "XORshiftRO", argLength: 2, reg: gp21, asm: "EOR", aux: "Int64"}, // arg0 ^ arg1 ROR auxInt, signed shift, auxInt should be in the range 0 to 63.
+ {name: "BICshiftLL", argLength: 2, reg: gp21, asm: "BIC", aux: "Int64"}, // arg0 &^ (arg1<<auxInt), auxInt should be in the range 0 to 63.
+ {name: "BICshiftRL", argLength: 2, reg: gp21, asm: "BIC", aux: "Int64"}, // arg0 &^ (arg1>>auxInt), unsigned shift, auxInt should be in the range 0 to 63.
+ {name: "BICshiftRA", argLength: 2, reg: gp21, asm: "BIC", aux: "Int64"}, // arg0 &^ (arg1>>auxInt), signed shift, auxInt should be in the range 0 to 63.
+ {name: "BICshiftRO", argLength: 2, reg: gp21, asm: "BIC", aux: "Int64"}, // arg0 &^ (arg1 ROR auxInt), signed shift, auxInt should be in the range 0 to 63.
+ {name: "EONshiftLL", argLength: 2, reg: gp21, asm: "EON", aux: "Int64"}, // arg0 ^ ^(arg1<<auxInt), auxInt should be in the range 0 to 63.
+ {name: "EONshiftRL", argLength: 2, reg: gp21, asm: "EON", aux: "Int64"}, // arg0 ^ ^(arg1>>auxInt), unsigned shift, auxInt should be in the range 0 to 63.
+ {name: "EONshiftRA", argLength: 2, reg: gp21, asm: "EON", aux: "Int64"}, // arg0 ^ ^(arg1>>auxInt), signed shift, auxInt should be in the range 0 to 63.
+ {name: "EONshiftRO", argLength: 2, reg: gp21, asm: "EON", aux: "Int64"}, // arg0 ^ ^(arg1 ROR auxInt), signed shift, auxInt should be in the range 0 to 63.
+ {name: "ORNshiftLL", argLength: 2, reg: gp21, asm: "ORN", aux: "Int64"}, // arg0 | ^(arg1<<auxInt), auxInt should be in the range 0 to 63.
+ {name: "ORNshiftRL", argLength: 2, reg: gp21, asm: "ORN", aux: "Int64"}, // arg0 | ^(arg1>>auxInt), unsigned shift, auxInt should be in the range 0 to 63.
+ {name: "ORNshiftRA", argLength: 2, reg: gp21, asm: "ORN", aux: "Int64"}, // arg0 | ^(arg1>>auxInt), signed shift, auxInt should be in the range 0 to 63.
+ {name: "ORNshiftRO", argLength: 2, reg: gp21, asm: "ORN", aux: "Int64"}, // arg0 | ^(arg1 ROR auxInt), signed shift, auxInt should be in the range 0 to 63.
+ {name: "CMPshiftLL", argLength: 2, reg: gp2flags, asm: "CMP", aux: "Int64", typ: "Flags"}, // arg0 compare to arg1<<auxInt, auxInt should be in the range 0 to 63.
+ {name: "CMPshiftRL", argLength: 2, reg: gp2flags, asm: "CMP", aux: "Int64", typ: "Flags"}, // arg0 compare to arg1>>auxInt, unsigned shift, auxInt should be in the range 0 to 63.
+ {name: "CMPshiftRA", argLength: 2, reg: gp2flags, asm: "CMP", aux: "Int64", typ: "Flags"}, // arg0 compare to arg1>>auxInt, signed shift, auxInt should be in the range 0 to 63.
+ {name: "CMNshiftLL", argLength: 2, reg: gp2flags, asm: "CMN", aux: "Int64", typ: "Flags"}, // (arg0 + arg1<<auxInt) compare to 0, auxInt should be in the range 0 to 63.
+ {name: "CMNshiftRL", argLength: 2, reg: gp2flags, asm: "CMN", aux: "Int64", typ: "Flags"}, // (arg0 + arg1>>auxInt) compare to 0, unsigned shift, auxInt should be in the range 0 to 63.
+ {name: "CMNshiftRA", argLength: 2, reg: gp2flags, asm: "CMN", aux: "Int64", typ: "Flags"}, // (arg0 + arg1>>auxInt) compare to 0, signed shift, auxInt should be in the range 0 to 63.
+ {name: "TSTshiftLL", argLength: 2, reg: gp2flags, asm: "TST", aux: "Int64", typ: "Flags"}, // (arg0 & arg1<<auxInt) compare to 0, auxInt should be in the range 0 to 63.
+ {name: "TSTshiftRL", argLength: 2, reg: gp2flags, asm: "TST", aux: "Int64", typ: "Flags"}, // (arg0 & arg1>>auxInt) compare to 0, unsigned shift, auxInt should be in the range 0 to 63.
+ {name: "TSTshiftRA", argLength: 2, reg: gp2flags, asm: "TST", aux: "Int64", typ: "Flags"}, // (arg0 & arg1>>auxInt) compare to 0, signed shift, auxInt should be in the range 0 to 63.
+ {name: "TSTshiftRO", argLength: 2, reg: gp2flags, asm: "TST", aux: "Int64", typ: "Flags"}, // (arg0 & arg1 ROR auxInt) compare to 0, signed shift, auxInt should be in the range 0 to 63.
+
+ // bitfield ops
+ // for all bitfield ops lsb is auxInt>>8, width is auxInt&0xff
+ // insert low width bits of arg1 into the result starting at bit lsb, copy other bits from arg0
+ {name: "BFI", argLength: 2, reg: gp21nog, asm: "BFI", aux: "ARM64BitField", resultInArg0: true},
+ // extract width bits of arg1 starting at bit lsb and insert at low end of result, copy other bits from arg0
+ {name: "BFXIL", argLength: 2, reg: gp21nog, asm: "BFXIL", aux: "ARM64BitField", resultInArg0: true},
+ // insert low width bits of arg0 into the result starting at bit lsb, bits to the left of the inserted bit field are set to the high/sign bit of the inserted bit field, bits to the right are zeroed
+ {name: "SBFIZ", argLength: 1, reg: gp11, asm: "SBFIZ", aux: "ARM64BitField"},
+ // extract width bits of arg0 starting at bit lsb and insert at low end of result, remaining high bits are set to the high/sign bit of the extracted bitfield
+ {name: "SBFX", argLength: 1, reg: gp11, asm: "SBFX", aux: "ARM64BitField"},
+ // insert low width bits of arg0 into the result starting at bit lsb, bits to the left and right of the inserted bit field are zeroed
+ {name: "UBFIZ", argLength: 1, reg: gp11, asm: "UBFIZ", aux: "ARM64BitField"},
+ // extract width bits of arg0 starting at bit lsb and insert at low end of result, remaining high bits are zeroed
+ {name: "UBFX", argLength: 1, reg: gp11, asm: "UBFX", aux: "ARM64BitField"},
+
+ // moves
+ {name: "MOVDconst", argLength: 0, reg: gp01, aux: "Int64", asm: "MOVD", typ: "UInt64", rematerializeable: true}, // 64 bits from auxint
+ {name: "FMOVSconst", argLength: 0, reg: fp01, aux: "Float64", asm: "FMOVS", typ: "Float32", rematerializeable: true}, // auxint as 64-bit float, convert to 32-bit float
+ {name: "FMOVDconst", argLength: 0, reg: fp01, aux: "Float64", asm: "FMOVD", typ: "Float64", rematerializeable: true}, // auxint as 64-bit float
+
+ {name: "MOVDaddr", argLength: 1, reg: regInfo{inputs: []regMask{buildReg("SP") | buildReg("SB")}, outputs: []regMask{gp}}, aux: "SymOff", asm: "MOVD", rematerializeable: true, symEffect: "Addr"}, // arg0 + auxInt + aux.(*gc.Sym), arg0=SP/SB
+
+ {name: "MOVBload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVB", typ: "Int8", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVBUload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVBU", typ: "UInt8", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVHload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVH", typ: "Int16", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVHUload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVHU", typ: "UInt16", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVWload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVW", typ: "Int32", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVWUload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVWU", typ: "UInt32", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVDload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVD", typ: "UInt64", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "FMOVSload", argLength: 2, reg: fpload, aux: "SymOff", asm: "FMOVS", typ: "Float32", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "FMOVDload", argLength: 2, reg: fpload, aux: "SymOff", asm: "FMOVD", typ: "Float64", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+
+ // register indexed load
+ {name: "MOVDloadidx", argLength: 3, reg: gp2load, asm: "MOVD", typ: "UInt64"}, // load 64-bit dword from arg0 + arg1, arg2 = mem.
+ {name: "MOVWloadidx", argLength: 3, reg: gp2load, asm: "MOVW", typ: "Int32"}, // load 32-bit word from arg0 + arg1, sign-extended to 64-bit, arg2=mem.
+ {name: "MOVWUloadidx", argLength: 3, reg: gp2load, asm: "MOVWU", typ: "UInt32"}, // load 32-bit word from arg0 + arg1, zero-extended to 64-bit, arg2=mem.
+ {name: "MOVHloadidx", argLength: 3, reg: gp2load, asm: "MOVH", typ: "Int16"}, // load 16-bit word from arg0 + arg1, sign-extended to 64-bit, arg2=mem.
+ {name: "MOVHUloadidx", argLength: 3, reg: gp2load, asm: "MOVHU", typ: "UInt16"}, // load 16-bit word from arg0 + arg1, zero-extended to 64-bit, arg2=mem.
+ {name: "MOVBloadidx", argLength: 3, reg: gp2load, asm: "MOVB", typ: "Int8"}, // load 8-bit word from arg0 + arg1, sign-extended to 64-bit, arg2=mem.
+ {name: "MOVBUloadidx", argLength: 3, reg: gp2load, asm: "MOVBU", typ: "UInt8"}, // load 8-bit word from arg0 + arg1, zero-extended to 64-bit, arg2=mem.
+ {name: "FMOVSloadidx", argLength: 3, reg: fp2load, asm: "FMOVS", typ: "Float32"}, // load 32-bit float from arg0 + arg1, arg2=mem.
+ {name: "FMOVDloadidx", argLength: 3, reg: fp2load, asm: "FMOVD", typ: "Float64"}, // load 64-bit float from arg0 + arg1, arg2=mem.
+
+ // shifted register indexed load
+ {name: "MOVHloadidx2", argLength: 3, reg: gp2load, asm: "MOVH", typ: "Int16"}, // load 16-bit half-word from arg0 + arg1*2, sign-extended to 64-bit, arg2=mem.
+ {name: "MOVHUloadidx2", argLength: 3, reg: gp2load, asm: "MOVHU", typ: "UInt16"}, // load 16-bit half-word from arg0 + arg1*2, zero-extended to 64-bit, arg2=mem.
+ {name: "MOVWloadidx4", argLength: 3, reg: gp2load, asm: "MOVW", typ: "Int32"}, // load 32-bit word from arg0 + arg1*4, sign-extended to 64-bit, arg2=mem.
+ {name: "MOVWUloadidx4", argLength: 3, reg: gp2load, asm: "MOVWU", typ: "UInt32"}, // load 32-bit word from arg0 + arg1*4, zero-extended to 64-bit, arg2=mem.
+ {name: "MOVDloadidx8", argLength: 3, reg: gp2load, asm: "MOVD", typ: "UInt64"}, // load 64-bit double-word from arg0 + arg1*8, arg2 = mem.
+ {name: "FMOVSloadidx4", argLength: 3, reg: fp2load, asm: "FMOVS", typ: "Float32"}, // load 32-bit float from arg0 + arg1*4, arg2 = mem.
+ {name: "FMOVDloadidx8", argLength: 3, reg: fp2load, asm: "FMOVD", typ: "Float64"}, // load 64-bit float from arg0 + arg1*8, arg2 = mem.
+
+ {name: "MOVBstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVB", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 1 byte of arg1 to arg0 + auxInt + aux. arg2=mem.
+ {name: "MOVHstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVH", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 2 bytes of arg1 to arg0 + auxInt + aux. arg2=mem.
+ {name: "MOVWstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVW", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 4 bytes of arg1 to arg0 + auxInt + aux. arg2=mem.
+ {name: "MOVDstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVD", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 8 bytes of arg1 to arg0 + auxInt + aux. arg2=mem.
+ {name: "STP", argLength: 4, reg: gpstore2, aux: "SymOff", asm: "STP", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 16 bytes of arg1 and arg2 to arg0 + auxInt + aux. arg3=mem.
+ {name: "FMOVSstore", argLength: 3, reg: fpstore, aux: "SymOff", asm: "FMOVS", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 4 bytes of arg1 to arg0 + auxInt + aux. arg2=mem.
+ {name: "FMOVDstore", argLength: 3, reg: fpstore, aux: "SymOff", asm: "FMOVD", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 8 bytes of arg1 to arg0 + auxInt + aux. arg2=mem.
+
+ // register indexed store
+ {name: "MOVBstoreidx", argLength: 4, reg: gpstore2, asm: "MOVB", typ: "Mem"}, // store 1 byte of arg2 to arg0 + arg1, arg3 = mem.
+ {name: "MOVHstoreidx", argLength: 4, reg: gpstore2, asm: "MOVH", typ: "Mem"}, // store 2 bytes of arg2 to arg0 + arg1, arg3 = mem.
+ {name: "MOVWstoreidx", argLength: 4, reg: gpstore2, asm: "MOVW", typ: "Mem"}, // store 4 bytes of arg2 to arg0 + arg1, arg3 = mem.
+ {name: "MOVDstoreidx", argLength: 4, reg: gpstore2, asm: "MOVD", typ: "Mem"}, // store 8 bytes of arg2 to arg0 + arg1, arg3 = mem.
+ {name: "FMOVSstoreidx", argLength: 4, reg: fpstore2, asm: "FMOVS", typ: "Mem"}, // store 32-bit float of arg2 to arg0 + arg1, arg3=mem.
+ {name: "FMOVDstoreidx", argLength: 4, reg: fpstore2, asm: "FMOVD", typ: "Mem"}, // store 64-bit float of arg2 to arg0 + arg1, arg3=mem.
+
+ // shifted register indexed store
+ {name: "MOVHstoreidx2", argLength: 4, reg: gpstore2, asm: "MOVH", typ: "Mem"}, // store 2 bytes of arg2 to arg0 + arg1*2, arg3 = mem.
+ {name: "MOVWstoreidx4", argLength: 4, reg: gpstore2, asm: "MOVW", typ: "Mem"}, // store 4 bytes of arg2 to arg0 + arg1*4, arg3 = mem.
+ {name: "MOVDstoreidx8", argLength: 4, reg: gpstore2, asm: "MOVD", typ: "Mem"}, // store 8 bytes of arg2 to arg0 + arg1*8, arg3 = mem.
+ {name: "FMOVSstoreidx4", argLength: 4, reg: fpstore2, asm: "FMOVS", typ: "Mem"}, // store 32-bit float of arg2 to arg0 + arg1*4, arg3=mem.
+ {name: "FMOVDstoreidx8", argLength: 4, reg: fpstore2, asm: "FMOVD", typ: "Mem"}, // store 64-bit float of arg2 to arg0 + arg1*8, arg3=mem.
+
+ {name: "MOVBstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVB", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 1 byte of zero to arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVHstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVH", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 2 bytes of zero to arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVWstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVW", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 4 bytes of zero to arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVDstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVD", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 8 bytes of zero to arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVQstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "STP", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 16 bytes of zero to arg0 + auxInt + aux. arg1=mem.
+
+ // register indexed store zero
+ {name: "MOVBstorezeroidx", argLength: 3, reg: gpstore, asm: "MOVB", typ: "Mem"}, // store 1 byte of zero to arg0 + arg1, arg2 = mem.
+ {name: "MOVHstorezeroidx", argLength: 3, reg: gpstore, asm: "MOVH", typ: "Mem"}, // store 2 bytes of zero to arg0 + arg1, arg2 = mem.
+ {name: "MOVWstorezeroidx", argLength: 3, reg: gpstore, asm: "MOVW", typ: "Mem"}, // store 4 bytes of zero to arg0 + arg1, arg2 = mem.
+ {name: "MOVDstorezeroidx", argLength: 3, reg: gpstore, asm: "MOVD", typ: "Mem"}, // store 8 bytes of zero to arg0 + arg1, arg2 = mem.
+
+ // shifted register indexed store zero
+ {name: "MOVHstorezeroidx2", argLength: 3, reg: gpstore, asm: "MOVH", typ: "Mem"}, // store 2 bytes of zero to arg0 + arg1*2, arg2 = mem.
+ {name: "MOVWstorezeroidx4", argLength: 3, reg: gpstore, asm: "MOVW", typ: "Mem"}, // store 4 bytes of zero to arg0 + arg1*4, arg2 = mem.
+ {name: "MOVDstorezeroidx8", argLength: 3, reg: gpstore, asm: "MOVD", typ: "Mem"}, // store 8 bytes of zero to arg0 + arg1*8, arg2 = mem.
+
+ {name: "FMOVDgpfp", argLength: 1, reg: gpfp, asm: "FMOVD"}, // move int64 to float64 (no conversion)
+ {name: "FMOVDfpgp", argLength: 1, reg: fpgp, asm: "FMOVD"}, // move float64 to int64 (no conversion)
+ {name: "FMOVSgpfp", argLength: 1, reg: gpfp, asm: "FMOVS"}, // move 32bits from int to float reg (no conversion)
+ {name: "FMOVSfpgp", argLength: 1, reg: fpgp, asm: "FMOVS"}, // move 32bits from float to int reg, zero extend (no conversion)
+
+ // conversions
+ {name: "MOVBreg", argLength: 1, reg: gp11, asm: "MOVB"}, // move from arg0, sign-extended from byte
+ {name: "MOVBUreg", argLength: 1, reg: gp11, asm: "MOVBU"}, // move from arg0, unsign-extended from byte
+ {name: "MOVHreg", argLength: 1, reg: gp11, asm: "MOVH"}, // move from arg0, sign-extended from half
+ {name: "MOVHUreg", argLength: 1, reg: gp11, asm: "MOVHU"}, // move from arg0, unsign-extended from half
+ {name: "MOVWreg", argLength: 1, reg: gp11, asm: "MOVW"}, // move from arg0, sign-extended from word
+ {name: "MOVWUreg", argLength: 1, reg: gp11, asm: "MOVWU"}, // move from arg0, unsign-extended from word
+ {name: "MOVDreg", argLength: 1, reg: gp11, asm: "MOVD"}, // move from arg0
+
+ {name: "MOVDnop", argLength: 1, reg: regInfo{inputs: []regMask{gp}, outputs: []regMask{gp}}, resultInArg0: true}, // nop, return arg0 in same register
+
+ {name: "SCVTFWS", argLength: 1, reg: gpfp, asm: "SCVTFWS"}, // int32 -> float32
+ {name: "SCVTFWD", argLength: 1, reg: gpfp, asm: "SCVTFWD"}, // int32 -> float64
+ {name: "UCVTFWS", argLength: 1, reg: gpfp, asm: "UCVTFWS"}, // uint32 -> float32
+ {name: "UCVTFWD", argLength: 1, reg: gpfp, asm: "UCVTFWD"}, // uint32 -> float64
+ {name: "SCVTFS", argLength: 1, reg: gpfp, asm: "SCVTFS"}, // int64 -> float32
+ {name: "SCVTFD", argLength: 1, reg: gpfp, asm: "SCVTFD"}, // int64 -> float64
+ {name: "UCVTFS", argLength: 1, reg: gpfp, asm: "UCVTFS"}, // uint64 -> float32
+ {name: "UCVTFD", argLength: 1, reg: gpfp, asm: "UCVTFD"}, // uint64 -> float64
+ {name: "FCVTZSSW", argLength: 1, reg: fpgp, asm: "FCVTZSSW"}, // float32 -> int32
+ {name: "FCVTZSDW", argLength: 1, reg: fpgp, asm: "FCVTZSDW"}, // float64 -> int32
+ {name: "FCVTZUSW", argLength: 1, reg: fpgp, asm: "FCVTZUSW"}, // float32 -> uint32
+ {name: "FCVTZUDW", argLength: 1, reg: fpgp, asm: "FCVTZUDW"}, // float64 -> uint32
+ {name: "FCVTZSS", argLength: 1, reg: fpgp, asm: "FCVTZSS"}, // float32 -> int64
+ {name: "FCVTZSD", argLength: 1, reg: fpgp, asm: "FCVTZSD"}, // float64 -> int64
+ {name: "FCVTZUS", argLength: 1, reg: fpgp, asm: "FCVTZUS"}, // float32 -> uint64
+ {name: "FCVTZUD", argLength: 1, reg: fpgp, asm: "FCVTZUD"}, // float64 -> uint64
+ {name: "FCVTSD", argLength: 1, reg: fp11, asm: "FCVTSD"}, // float32 -> float64
+ {name: "FCVTDS", argLength: 1, reg: fp11, asm: "FCVTDS"}, // float64 -> float32
+
+ // floating-point round to integral
+ {name: "FRINTAD", argLength: 1, reg: fp11, asm: "FRINTAD"},
+ {name: "FRINTMD", argLength: 1, reg: fp11, asm: "FRINTMD"},
+ {name: "FRINTND", argLength: 1, reg: fp11, asm: "FRINTND"},
+ {name: "FRINTPD", argLength: 1, reg: fp11, asm: "FRINTPD"},
+ {name: "FRINTZD", argLength: 1, reg: fp11, asm: "FRINTZD"},
+
+ // conditional instructions; auxint is
+ // one of the arm64 comparison pseudo-ops (LessThan, LessThanU, etc.)
+ {name: "CSEL", argLength: 3, reg: gp2flags1, asm: "CSEL", aux: "CCop"}, // auxint(flags) ? arg0 : arg1
+ {name: "CSEL0", argLength: 2, reg: gp1flags1, asm: "CSEL", aux: "CCop"}, // auxint(flags) ? arg0 : 0
+ {name: "CSINC", argLength: 3, reg: gp2flags1, asm: "CSINC", aux: "CCop"}, // auxint(flags) ? arg0 : arg1 + 1
+ {name: "CSINV", argLength: 3, reg: gp2flags1, asm: "CSINV", aux: "CCop"}, // auxint(flags) ? arg0 : ^arg1
+ {name: "CSNEG", argLength: 3, reg: gp2flags1, asm: "CSNEG", aux: "CCop"}, // auxint(flags) ? arg0 : -arg1
+ {name: "CSETM", argLength: 1, reg: readflags, asm: "CSETM", aux: "CCop"}, // auxint(flags) ? -1 : 0
+
+ // function calls
+ {name: "CALLstatic", argLength: -1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call static function aux.(*obj.LSym). last arg=mem, auxint=argsize, returns mem
+ {name: "CALLtail", argLength: -1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true, tailCall: true}, // tail call static function aux.(*obj.LSym). last arg=mem, auxint=argsize, returns mem
+ {name: "CALLclosure", argLength: -1, reg: regInfo{inputs: []regMask{gpsp, buildReg("R26"), 0}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, last arg=mem, auxint=argsize, returns mem
+ {name: "CALLinter", argLength: -1, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, last arg=mem, auxint=argsize, returns mem
+
+ // pseudo-ops
+ {name: "LoweredNilCheck", argLength: 2, reg: regInfo{inputs: []regMask{gpg}}, nilCheck: true, faultOnNilArg0: true}, // panic if arg0 is nil. arg1=mem.
+
+ {name: "Equal", argLength: 1, reg: readflags}, // bool, true flags encode x==y false otherwise.
+ {name: "NotEqual", argLength: 1, reg: readflags}, // bool, true flags encode x!=y false otherwise.
+ {name: "LessThan", argLength: 1, reg: readflags}, // bool, true flags encode signed x<y false otherwise.
+ {name: "LessEqual", argLength: 1, reg: readflags}, // bool, true flags encode signed x<=y false otherwise.
+ {name: "GreaterThan", argLength: 1, reg: readflags}, // bool, true flags encode signed x>y false otherwise.
+ {name: "GreaterEqual", argLength: 1, reg: readflags}, // bool, true flags encode signed x>=y false otherwise.
+ {name: "LessThanU", argLength: 1, reg: readflags}, // bool, true flags encode unsigned x<y false otherwise.
+ {name: "LessEqualU", argLength: 1, reg: readflags}, // bool, true flags encode unsigned x<=y false otherwise.
+ {name: "GreaterThanU", argLength: 1, reg: readflags}, // bool, true flags encode unsigned x>y false otherwise.
+ {name: "GreaterEqualU", argLength: 1, reg: readflags}, // bool, true flags encode unsigned x>=y false otherwise.
+ {name: "LessThanF", argLength: 1, reg: readflags}, // bool, true flags encode floating-point x<y false otherwise.
+ {name: "LessEqualF", argLength: 1, reg: readflags}, // bool, true flags encode floating-point x<=y false otherwise.
+ {name: "GreaterThanF", argLength: 1, reg: readflags}, // bool, true flags encode floating-point x>y false otherwise.
+ {name: "GreaterEqualF", argLength: 1, reg: readflags}, // bool, true flags encode floating-point x>=y false otherwise.
+ {name: "NotLessThanF", argLength: 1, reg: readflags}, // bool, true flags encode floating-point x>=y || x is unordered with y, false otherwise.
+ {name: "NotLessEqualF", argLength: 1, reg: readflags}, // bool, true flags encode floating-point x>y || x is unordered with y, false otherwise.
+ {name: "NotGreaterThanF", argLength: 1, reg: readflags}, // bool, true flags encode floating-point x<=y || x is unordered with y, false otherwise.
+ {name: "NotGreaterEqualF", argLength: 1, reg: readflags}, // bool, true flags encode floating-point x<y || x is unordered with y, false otherwise.
+ // duffzero
+ // arg0 = address of memory to zero
+ // arg1 = mem
+ // auxint = offset into duffzero code to start executing
+ // returns mem
+ // R20 changed as side effect
+ // R16 and R17 may be clobbered by linker trampoline.
+ {
+ name: "DUFFZERO",
+ aux: "Int64",
+ argLength: 2,
+ reg: regInfo{
+ inputs: []regMask{buildReg("R20")},
+ clobbers: buildReg("R16 R17 R20 R30"),
+ },
+ faultOnNilArg0: true,
+ unsafePoint: true, // FP maintenance around DUFFZERO can be clobbered by interrupts
+ },
+
+ // large zeroing
+ // arg0 = address of memory to zero (in R16 aka arm64.REGRT1, changed as side effect)
+ // arg1 = address of the last 16-byte unit to zero
+ // arg2 = mem
+ // returns mem
+ // STP.P (ZR,ZR), 16(R16)
+ // CMP Rarg1, R16
+ // BLE -2(PC)
+ // Note: the-end-of-the-memory may be not a valid pointer. it's a problem if it is spilled.
+ // the-end-of-the-memory - 16 is with the area to zero, ok to spill.
+ {
+ name: "LoweredZero",
+ argLength: 3,
+ reg: regInfo{
+ inputs: []regMask{buildReg("R16"), gp},
+ clobbers: buildReg("R16"),
+ },
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ },
+
+ // duffcopy
+ // arg0 = address of dst memory (in R21, changed as side effect)
+ // arg1 = address of src memory (in R20, changed as side effect)
+ // arg2 = mem
+ // auxint = offset into duffcopy code to start executing
+ // returns mem
+ // R20, R21 changed as side effect
+ // R16 and R17 may be clobbered by linker trampoline.
+ {
+ name: "DUFFCOPY",
+ aux: "Int64",
+ argLength: 3,
+ reg: regInfo{
+ inputs: []regMask{buildReg("R21"), buildReg("R20")},
+ clobbers: buildReg("R16 R17 R20 R21 R26 R30"),
+ },
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ unsafePoint: true, // FP maintenance around DUFFCOPY can be clobbered by interrupts
+ },
+
+ // large move
+ // arg0 = address of dst memory (in R17 aka arm64.REGRT2, changed as side effect)
+ // arg1 = address of src memory (in R16 aka arm64.REGRT1, changed as side effect)
+ // arg2 = address of the last element of src
+ // arg3 = mem
+ // returns mem
+ // MOVD.P 8(R16), Rtmp
+ // MOVD.P Rtmp, 8(R17)
+ // CMP Rarg2, R16
+ // BLE -3(PC)
+ // Note: the-end-of-src may be not a valid pointer. it's a problem if it is spilled.
+ // the-end-of-src - 8 is within the area to copy, ok to spill.
+ {
+ name: "LoweredMove",
+ argLength: 4,
+ reg: regInfo{
+ inputs: []regMask{buildReg("R17"), buildReg("R16"), gp},
+ clobbers: buildReg("R16 R17"),
+ },
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ },
+
+ // Scheduler ensures LoweredGetClosurePtr occurs only in entry block,
+ // and sorts it to the very beginning of the block to prevent other
+ // use of R26 (arm64.REGCTXT, the closure pointer)
+ {name: "LoweredGetClosurePtr", reg: regInfo{outputs: []regMask{buildReg("R26")}}, zeroWidth: true},
+
+ // LoweredGetCallerSP returns the SP of the caller of the current function.
+ {name: "LoweredGetCallerSP", reg: gp01, rematerializeable: true},
+
+ // LoweredGetCallerPC evaluates to the PC to which its "caller" will return.
+ // I.e., if f calls g "calls" getcallerpc,
+ // the result should be the PC within f that g will return to.
+ // See runtime/stubs.go for a more detailed discussion.
+ {name: "LoweredGetCallerPC", reg: gp01, rematerializeable: true},
+
+ // Constant flag value.
+ // Note: there's an "unordered" outcome for floating-point
+ // comparisons, but we don't use such a beast yet.
+ // This op is for temporary use by rewrite rules. It
+ // cannot appear in the generated assembly.
+ {name: "FlagConstant", aux: "FlagConstant"},
+
+ // (InvertFlags (CMP a b)) == (CMP b a)
+ // InvertFlags is a pseudo-op which can't appear in assembly output.
+ {name: "InvertFlags", argLength: 1}, // reverse direction of arg0
+
+ // atomic loads.
+ // load from arg0. arg1=mem. auxint must be zero.
+ // returns <value,memory> so they can be properly ordered with other loads.
+ {name: "LDAR", argLength: 2, reg: gpload, asm: "LDAR", faultOnNilArg0: true},
+ {name: "LDARB", argLength: 2, reg: gpload, asm: "LDARB", faultOnNilArg0: true},
+ {name: "LDARW", argLength: 2, reg: gpload, asm: "LDARW", faultOnNilArg0: true},
+
+ // atomic stores.
+ // store arg1 to arg0. arg2=mem. returns memory. auxint must be zero.
+ {name: "STLRB", argLength: 3, reg: gpstore, asm: "STLRB", faultOnNilArg0: true, hasSideEffects: true},
+ {name: "STLR", argLength: 3, reg: gpstore, asm: "STLR", faultOnNilArg0: true, hasSideEffects: true},
+ {name: "STLRW", argLength: 3, reg: gpstore, asm: "STLRW", faultOnNilArg0: true, hasSideEffects: true},
+
+ // atomic exchange.
+ // store arg1 to arg0. arg2=mem. returns <old content of *arg0, memory>. auxint must be zero.
+ // LDAXR (Rarg0), Rout
+ // STLXR Rarg1, (Rarg0), Rtmp
+ // CBNZ Rtmp, -2(PC)
+ {name: "LoweredAtomicExchange64", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+ {name: "LoweredAtomicExchange32", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+
+ // atomic exchange variant.
+ // store arg1 to arg0. arg2=mem. returns <old content of *arg0, memory>. auxint must be zero.
+ // SWPALD Rarg1, (Rarg0), Rout
+ {name: "LoweredAtomicExchange64Variant", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true},
+ {name: "LoweredAtomicExchange32Variant", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true},
+
+ // atomic add.
+ // *arg0 += arg1. arg2=mem. returns <new content of *arg0, memory>. auxint must be zero.
+ // LDAXR (Rarg0), Rout
+ // ADD Rarg1, Rout
+ // STLXR Rout, (Rarg0), Rtmp
+ // CBNZ Rtmp, -3(PC)
+ {name: "LoweredAtomicAdd64", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+ {name: "LoweredAtomicAdd32", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+
+ // atomic add variant.
+ // *arg0 += arg1. arg2=mem. returns <new content of *arg0, memory>. auxint must be zero.
+ // LDADDAL (Rarg0), Rarg1, Rout
+ // ADD Rarg1, Rout
+ {name: "LoweredAtomicAdd64Variant", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true},
+ {name: "LoweredAtomicAdd32Variant", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true},
+
+ // atomic compare and swap.
+ // arg0 = pointer, arg1 = old value, arg2 = new value, arg3 = memory. auxint must be zero.
+ // if *arg0 == arg1 {
+ // *arg0 = arg2
+ // return (true, memory)
+ // } else {
+ // return (false, memory)
+ // }
+ // LDAXR (Rarg0), Rtmp
+ // CMP Rarg1, Rtmp
+ // BNE 3(PC)
+ // STLXR Rarg2, (Rarg0), Rtmp
+ // CBNZ Rtmp, -4(PC)
+ // CSET EQ, Rout
+ {name: "LoweredAtomicCas64", argLength: 4, reg: gpcas, resultNotInArgs: true, clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+ {name: "LoweredAtomicCas32", argLength: 4, reg: gpcas, resultNotInArgs: true, clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+
+ // atomic compare and swap variant.
+ // arg0 = pointer, arg1 = old value, arg2 = new value, arg3 = memory. auxint must be zero.
+ // if *arg0 == arg1 {
+ // *arg0 = arg2
+ // return (true, memory)
+ // } else {
+ // return (false, memory)
+ // }
+ // MOV Rarg1, Rtmp
+ // CASAL Rtmp, (Rarg0), Rarg2
+ // CMP Rarg1, Rtmp
+ // CSET EQ, Rout
+ {name: "LoweredAtomicCas64Variant", argLength: 4, reg: gpcas, resultNotInArgs: true, clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+ {name: "LoweredAtomicCas32Variant", argLength: 4, reg: gpcas, resultNotInArgs: true, clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+
+ // atomic and/or.
+ // *arg0 &= (|=) arg1. arg2=mem. returns <new content of *arg0, memory>. auxint must be zero.
+ // LDAXR (Rarg0), Rout
+ // AND/OR Rarg1, Rout
+ // STLXR Rout, (Rarg0), Rtmp
+ // CBNZ Rtmp, -3(PC)
+ {name: "LoweredAtomicAnd8", argLength: 3, reg: gpxchg, resultNotInArgs: true, asm: "AND", typ: "(UInt8,Mem)", faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+ {name: "LoweredAtomicAnd32", argLength: 3, reg: gpxchg, resultNotInArgs: true, asm: "AND", typ: "(UInt32,Mem)", faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+ {name: "LoweredAtomicOr8", argLength: 3, reg: gpxchg, resultNotInArgs: true, asm: "ORR", typ: "(UInt8,Mem)", faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+ {name: "LoweredAtomicOr32", argLength: 3, reg: gpxchg, resultNotInArgs: true, asm: "ORR", typ: "(UInt32,Mem)", faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+
+ // atomic and/or variant.
+ // *arg0 &= (|=) arg1. arg2=mem. returns <new content of *arg0, memory>. auxint must be zero.
+ // AND:
+ // MNV Rarg1, Rtemp
+ // LDANDALB Rtemp, (Rarg0), Rout
+ // AND Rarg1, Rout
+ // OR:
+ // LDORALB Rarg1, (Rarg0), Rout
+ // ORR Rarg1, Rout
+ {name: "LoweredAtomicAnd8Variant", argLength: 3, reg: gpxchg, resultNotInArgs: true, typ: "(UInt8,Mem)", faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+ {name: "LoweredAtomicAnd32Variant", argLength: 3, reg: gpxchg, resultNotInArgs: true, typ: "(UInt32,Mem)", faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+ {name: "LoweredAtomicOr8Variant", argLength: 3, reg: gpxchg, resultNotInArgs: true, typ: "(UInt8,Mem)", faultOnNilArg0: true, hasSideEffects: true},
+ {name: "LoweredAtomicOr32Variant", argLength: 3, reg: gpxchg, resultNotInArgs: true, typ: "(UInt32,Mem)", faultOnNilArg0: true, hasSideEffects: true},
+
+ // LoweredWB invokes runtime.gcWriteBarrier. arg0=destptr, arg1=srcptr, arg2=mem, aux=runtime.gcWriteBarrier
+ // It saves all GP registers if necessary,
+ // but clobbers R30 (LR) because it's a call.
+ // R16 and R17 may be clobbered by linker trampoline.
+ {name: "LoweredWB", argLength: 3, reg: regInfo{inputs: []regMask{buildReg("R2"), buildReg("R3")}, clobbers: (callerSave &^ gpg) | buildReg("R16 R17 R30")}, clobberFlags: true, aux: "Sym", symEffect: "None"},
+
+ // There are three of these functions so that they can have three different register inputs.
+ // When we check 0 <= c <= cap (A), then 0 <= b <= c (B), then 0 <= a <= b (C), we want the
+ // default registers to match so we don't need to copy registers around unnecessarily.
+ {name: "LoweredPanicBoundsA", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r2, r3}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in generic.go).
+ {name: "LoweredPanicBoundsB", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r1, r2}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in generic.go).
+ {name: "LoweredPanicBoundsC", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r0, r1}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in generic.go).
+
+ // Prefetch instruction
+ // Do prefetch arg0 address with option aux. arg0=addr, arg1=memory, aux=option.
+ {name: "PRFM", argLength: 2, aux: "Int64", reg: prefreg, asm: "PRFM", hasSideEffects: true},
+
+ // Publication barrier
+ {name: "DMB", argLength: 1, aux: "Int64", asm: "DMB", hasSideEffects: true}, // Do data barrier. arg0=memory, aux=option.
+ }
+
+ blocks := []blockData{
+ {name: "EQ", controls: 1},
+ {name: "NE", controls: 1},
+ {name: "LT", controls: 1},
+ {name: "LE", controls: 1},
+ {name: "GT", controls: 1},
+ {name: "GE", controls: 1},
+ {name: "ULT", controls: 1},
+ {name: "ULE", controls: 1},
+ {name: "UGT", controls: 1},
+ {name: "UGE", controls: 1},
+ {name: "Z", controls: 1}, // Control == 0 (take a register instead of flags)
+ {name: "NZ", controls: 1}, // Control != 0
+ {name: "ZW", controls: 1}, // Control == 0, 32-bit
+ {name: "NZW", controls: 1}, // Control != 0, 32-bit
+ {name: "TBZ", controls: 1, aux: "Int64"}, // Control & (1 << AuxInt) == 0
+ {name: "TBNZ", controls: 1, aux: "Int64"}, // Control & (1 << AuxInt) != 0
+ {name: "FLT", controls: 1},
+ {name: "FLE", controls: 1},
+ {name: "FGT", controls: 1},
+ {name: "FGE", controls: 1},
+ {name: "LTnoov", controls: 1}, // 'LT' but without honoring overflow
+ {name: "LEnoov", controls: 1}, // 'LE' but without honoring overflow
+ {name: "GTnoov", controls: 1}, // 'GT' but without honoring overflow
+ {name: "GEnoov", controls: 1}, // 'GE' but without honoring overflow
+ }
+
+ archs = append(archs, arch{
+ name: "ARM64",
+ pkg: "cmd/internal/obj/arm64",
+ genfile: "../../arm64/ssa.go",
+ ops: ops,
+ blocks: blocks,
+ regnames: regNamesARM64,
+ ParamIntRegNames: "R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15",
+ ParamFloatRegNames: "F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15",
+ gpregmask: gp,
+ fpregmask: fp,
+ framepointerreg: -1, // not used
+ linkreg: int8(num["R30"]),
+ })
+}
diff --git a/src/cmd/compile/internal/ssa/gen/ARMOps.go b/src/cmd/compile/internal/ssa/gen/ARMOps.go
new file mode 100644
index 0000000..3803f27
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/gen/ARMOps.go
@@ -0,0 +1,603 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build ignore
+// +build ignore
+
+package main
+
+import "strings"
+
+// Notes:
+// - Integer types live in the low portion of registers. Upper portions are junk.
+// - Boolean types use the low-order byte of a register. 0=false, 1=true.
+// Upper bytes are junk.
+// - *const instructions may use a constant larger than the instruction can encode.
+// In this case the assembler expands to multiple instructions and uses tmp
+// register (R11).
+
+// Suffixes encode the bit width of various instructions.
+// W (word) = 32 bit
+// H (half word) = 16 bit
+// HU = 16 bit unsigned
+// B (byte) = 8 bit
+// BU = 8 bit unsigned
+// F (float) = 32 bit float
+// D (double) = 64 bit float
+
+var regNamesARM = []string{
+ "R0",
+ "R1",
+ "R2",
+ "R3",
+ "R4",
+ "R5",
+ "R6",
+ "R7",
+ "R8",
+ "R9",
+ "g", // aka R10
+ "R11", // tmp
+ "R12",
+ "SP", // aka R13
+ "R14", // link
+ "R15", // pc
+
+ "F0",
+ "F1",
+ "F2",
+ "F3",
+ "F4",
+ "F5",
+ "F6",
+ "F7",
+ "F8",
+ "F9",
+ "F10",
+ "F11",
+ "F12",
+ "F13",
+ "F14",
+ "F15", // tmp
+
+ // If you add registers, update asyncPreempt in runtime.
+
+ // pseudo-registers
+ "SB",
+}
+
+func init() {
+ // Make map from reg names to reg integers.
+ if len(regNamesARM) > 64 {
+ panic("too many registers")
+ }
+ num := map[string]int{}
+ for i, name := range regNamesARM {
+ num[name] = i
+ }
+ buildReg := func(s string) regMask {
+ m := regMask(0)
+ for _, r := range strings.Split(s, " ") {
+ if n, ok := num[r]; ok {
+ m |= regMask(1) << uint(n)
+ continue
+ }
+ panic("register " + r + " not found")
+ }
+ return m
+ }
+
+ // Common individual register masks
+ var (
+ gp = buildReg("R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14")
+ gpg = gp | buildReg("g")
+ gpsp = gp | buildReg("SP")
+ gpspg = gpg | buildReg("SP")
+ gpspsbg = gpspg | buildReg("SB")
+ fp = buildReg("F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15")
+ callerSave = gp | fp | buildReg("g") // runtime.setg (and anything calling it) may clobber g
+ r0 = buildReg("R0")
+ r1 = buildReg("R1")
+ r2 = buildReg("R2")
+ r3 = buildReg("R3")
+ r4 = buildReg("R4")
+ )
+ // Common regInfo
+ var (
+ gp01 = regInfo{inputs: nil, outputs: []regMask{gp}}
+ gp11 = regInfo{inputs: []regMask{gpg}, outputs: []regMask{gp}}
+ gp11carry = regInfo{inputs: []regMask{gpg}, outputs: []regMask{gp, 0}}
+ gp11sp = regInfo{inputs: []regMask{gpspg}, outputs: []regMask{gp}}
+ gp1flags = regInfo{inputs: []regMask{gpg}}
+ gp1flags1 = regInfo{inputs: []regMask{gp}, outputs: []regMask{gp}}
+ gp21 = regInfo{inputs: []regMask{gpg, gpg}, outputs: []regMask{gp}}
+ gp21carry = regInfo{inputs: []regMask{gpg, gpg}, outputs: []regMask{gp, 0}}
+ gp2flags = regInfo{inputs: []regMask{gpg, gpg}}
+ gp2flags1 = regInfo{inputs: []regMask{gp, gp}, outputs: []regMask{gp}}
+ gp22 = regInfo{inputs: []regMask{gpg, gpg}, outputs: []regMask{gp, gp}}
+ gp31 = regInfo{inputs: []regMask{gp, gp, gp}, outputs: []regMask{gp}}
+ gp31carry = regInfo{inputs: []regMask{gp, gp, gp}, outputs: []regMask{gp, 0}}
+ gp3flags = regInfo{inputs: []regMask{gp, gp, gp}}
+ gp3flags1 = regInfo{inputs: []regMask{gp, gp, gp}, outputs: []regMask{gp}}
+ gpload = regInfo{inputs: []regMask{gpspsbg}, outputs: []regMask{gp}}
+ gpstore = regInfo{inputs: []regMask{gpspsbg, gpg}}
+ gp2load = regInfo{inputs: []regMask{gpspsbg, gpg}, outputs: []regMask{gp}}
+ gp2store = regInfo{inputs: []regMask{gpspsbg, gpg, gpg}}
+ fp01 = regInfo{inputs: nil, outputs: []regMask{fp}}
+ fp11 = regInfo{inputs: []regMask{fp}, outputs: []regMask{fp}}
+ fp1flags = regInfo{inputs: []regMask{fp}}
+ fpgp = regInfo{inputs: []regMask{fp}, outputs: []regMask{gp}, clobbers: buildReg("F15")} // int-float conversion uses F15 as tmp
+ gpfp = regInfo{inputs: []regMask{gp}, outputs: []regMask{fp}, clobbers: buildReg("F15")}
+ fp21 = regInfo{inputs: []regMask{fp, fp}, outputs: []regMask{fp}}
+ fp31 = regInfo{inputs: []regMask{fp, fp, fp}, outputs: []regMask{fp}}
+ fp2flags = regInfo{inputs: []regMask{fp, fp}}
+ fpload = regInfo{inputs: []regMask{gpspsbg}, outputs: []regMask{fp}}
+ fpstore = regInfo{inputs: []regMask{gpspsbg, fp}}
+ readflags = regInfo{inputs: nil, outputs: []regMask{gp}}
+ )
+ ops := []opData{
+ // binary ops
+ {name: "ADD", argLength: 2, reg: gp21, asm: "ADD", commutative: true}, // arg0 + arg1
+ {name: "ADDconst", argLength: 1, reg: gp11sp, asm: "ADD", aux: "Int32"}, // arg0 + auxInt
+ {name: "SUB", argLength: 2, reg: gp21, asm: "SUB"}, // arg0 - arg1
+ {name: "SUBconst", argLength: 1, reg: gp11, asm: "SUB", aux: "Int32"}, // arg0 - auxInt
+ {name: "RSB", argLength: 2, reg: gp21, asm: "RSB"}, // arg1 - arg0
+ {name: "RSBconst", argLength: 1, reg: gp11, asm: "RSB", aux: "Int32"}, // auxInt - arg0
+ {name: "MUL", argLength: 2, reg: gp21, asm: "MUL", commutative: true}, // arg0 * arg1
+ {name: "HMUL", argLength: 2, reg: gp21, asm: "MULL", commutative: true}, // (arg0 * arg1) >> 32, signed
+ {name: "HMULU", argLength: 2, reg: gp21, asm: "MULLU", commutative: true}, // (arg0 * arg1) >> 32, unsigned
+
+ // udiv runtime call for soft division
+ // output0 = arg0/arg1, output1 = arg0%arg1
+ // see ../../../../../runtime/vlop_arm.s
+ {
+ name: "CALLudiv",
+ argLength: 2,
+ reg: regInfo{
+ inputs: []regMask{buildReg("R1"), buildReg("R0")},
+ outputs: []regMask{buildReg("R0"), buildReg("R1")},
+ clobbers: buildReg("R2 R3 R12 R14"), // R14 is LR, R12 is linker trampoline scratch register
+ },
+ clobberFlags: true,
+ typ: "(UInt32,UInt32)",
+ call: false, // TODO(mdempsky): Should this be true?
+ },
+
+ {name: "ADDS", argLength: 2, reg: gp21carry, asm: "ADD", commutative: true}, // arg0 + arg1, set carry flag
+ {name: "ADDSconst", argLength: 1, reg: gp11carry, asm: "ADD", aux: "Int32"}, // arg0 + auxInt, set carry flag
+ {name: "ADC", argLength: 3, reg: gp2flags1, asm: "ADC", commutative: true}, // arg0 + arg1 + carry, arg2=flags
+ {name: "ADCconst", argLength: 2, reg: gp1flags1, asm: "ADC", aux: "Int32"}, // arg0 + auxInt + carry, arg1=flags
+ {name: "SUBS", argLength: 2, reg: gp21carry, asm: "SUB"}, // arg0 - arg1, set carry flag
+ {name: "SUBSconst", argLength: 1, reg: gp11carry, asm: "SUB", aux: "Int32"}, // arg0 - auxInt, set carry flag
+ {name: "RSBSconst", argLength: 1, reg: gp11carry, asm: "RSB", aux: "Int32"}, // auxInt - arg0, set carry flag
+ {name: "SBC", argLength: 3, reg: gp2flags1, asm: "SBC"}, // arg0 - arg1 - carry, arg2=flags
+ {name: "SBCconst", argLength: 2, reg: gp1flags1, asm: "SBC", aux: "Int32"}, // arg0 - auxInt - carry, arg1=flags
+ {name: "RSCconst", argLength: 2, reg: gp1flags1, asm: "RSC", aux: "Int32"}, // auxInt - arg0 - carry, arg1=flags
+
+ {name: "MULLU", argLength: 2, reg: gp22, asm: "MULLU", commutative: true}, // arg0 * arg1, high 32 bits in out0, low 32 bits in out1
+ {name: "MULA", argLength: 3, reg: gp31, asm: "MULA"}, // arg0 * arg1 + arg2
+ {name: "MULS", argLength: 3, reg: gp31, asm: "MULS"}, // arg2 - arg0 * arg1
+
+ {name: "ADDF", argLength: 2, reg: fp21, asm: "ADDF", commutative: true}, // arg0 + arg1
+ {name: "ADDD", argLength: 2, reg: fp21, asm: "ADDD", commutative: true}, // arg0 + arg1
+ {name: "SUBF", argLength: 2, reg: fp21, asm: "SUBF"}, // arg0 - arg1
+ {name: "SUBD", argLength: 2, reg: fp21, asm: "SUBD"}, // arg0 - arg1
+ {name: "MULF", argLength: 2, reg: fp21, asm: "MULF", commutative: true}, // arg0 * arg1
+ {name: "MULD", argLength: 2, reg: fp21, asm: "MULD", commutative: true}, // arg0 * arg1
+ {name: "NMULF", argLength: 2, reg: fp21, asm: "NMULF", commutative: true}, // -(arg0 * arg1)
+ {name: "NMULD", argLength: 2, reg: fp21, asm: "NMULD", commutative: true}, // -(arg0 * arg1)
+ {name: "DIVF", argLength: 2, reg: fp21, asm: "DIVF"}, // arg0 / arg1
+ {name: "DIVD", argLength: 2, reg: fp21, asm: "DIVD"}, // arg0 / arg1
+
+ {name: "MULAF", argLength: 3, reg: fp31, asm: "MULAF", resultInArg0: true}, // arg0 + (arg1 * arg2)
+ {name: "MULAD", argLength: 3, reg: fp31, asm: "MULAD", resultInArg0: true}, // arg0 + (arg1 * arg2)
+ {name: "MULSF", argLength: 3, reg: fp31, asm: "MULSF", resultInArg0: true}, // arg0 - (arg1 * arg2)
+ {name: "MULSD", argLength: 3, reg: fp31, asm: "MULSD", resultInArg0: true}, // arg0 - (arg1 * arg2)
+
+ // FMULAD only exists on platforms with the VFPv4 instruction set.
+ // Any use must be preceded by a successful check of runtime.arm_support_vfpv4.
+ {name: "FMULAD", argLength: 3, reg: fp31, asm: "FMULAD", resultInArg0: true}, // arg0 + (arg1 * arg2)
+
+ {name: "AND", argLength: 2, reg: gp21, asm: "AND", commutative: true}, // arg0 & arg1
+ {name: "ANDconst", argLength: 1, reg: gp11, asm: "AND", aux: "Int32"}, // arg0 & auxInt
+ {name: "OR", argLength: 2, reg: gp21, asm: "ORR", commutative: true}, // arg0 | arg1
+ {name: "ORconst", argLength: 1, reg: gp11, asm: "ORR", aux: "Int32"}, // arg0 | auxInt
+ {name: "XOR", argLength: 2, reg: gp21, asm: "EOR", commutative: true}, // arg0 ^ arg1
+ {name: "XORconst", argLength: 1, reg: gp11, asm: "EOR", aux: "Int32"}, // arg0 ^ auxInt
+ {name: "BIC", argLength: 2, reg: gp21, asm: "BIC"}, // arg0 &^ arg1
+ {name: "BICconst", argLength: 1, reg: gp11, asm: "BIC", aux: "Int32"}, // arg0 &^ auxInt
+
+ // bit extraction, AuxInt = Width<<8 | LSB
+ {name: "BFX", argLength: 1, reg: gp11, asm: "BFX", aux: "Int32"}, // extract W bits from bit L in arg0, then signed extend
+ {name: "BFXU", argLength: 1, reg: gp11, asm: "BFXU", aux: "Int32"}, // extract W bits from bit L in arg0, then unsigned extend
+
+ // unary ops
+ {name: "MVN", argLength: 1, reg: gp11, asm: "MVN"}, // ^arg0
+
+ {name: "NEGF", argLength: 1, reg: fp11, asm: "NEGF"}, // -arg0, float32
+ {name: "NEGD", argLength: 1, reg: fp11, asm: "NEGD"}, // -arg0, float64
+ {name: "SQRTD", argLength: 1, reg: fp11, asm: "SQRTD"}, // sqrt(arg0), float64
+ {name: "SQRTF", argLength: 1, reg: fp11, asm: "SQRTF"}, // sqrt(arg0), float32
+ {name: "ABSD", argLength: 1, reg: fp11, asm: "ABSD"}, // abs(arg0), float64
+
+ {name: "CLZ", argLength: 1, reg: gp11, asm: "CLZ"}, // count leading zero
+ {name: "REV", argLength: 1, reg: gp11, asm: "REV"}, // reverse byte order
+ {name: "REV16", argLength: 1, reg: gp11, asm: "REV16"}, // reverse byte order in 16-bit halfwords
+ {name: "RBIT", argLength: 1, reg: gp11, asm: "RBIT"}, // reverse bit order
+
+ // shifts
+ {name: "SLL", argLength: 2, reg: gp21, asm: "SLL"}, // arg0 << arg1, shift amount is mod 256
+ {name: "SLLconst", argLength: 1, reg: gp11, asm: "SLL", aux: "Int32"}, // arg0 << auxInt, 0 <= auxInt < 32
+ {name: "SRL", argLength: 2, reg: gp21, asm: "SRL"}, // arg0 >> arg1, unsigned, shift amount is mod 256
+ {name: "SRLconst", argLength: 1, reg: gp11, asm: "SRL", aux: "Int32"}, // arg0 >> auxInt, unsigned, 0 <= auxInt < 32
+ {name: "SRA", argLength: 2, reg: gp21, asm: "SRA"}, // arg0 >> arg1, signed, shift amount is mod 256
+ {name: "SRAconst", argLength: 1, reg: gp11, asm: "SRA", aux: "Int32"}, // arg0 >> auxInt, signed, 0 <= auxInt < 32
+ {name: "SRR", argLength: 2, reg: gp21}, // arg0 right rotate by arg1 bits
+ {name: "SRRconst", argLength: 1, reg: gp11, aux: "Int32"}, // arg0 right rotate by auxInt bits, 0 <= auxInt < 32
+
+ // auxInt for all of these satisfy 0 <= auxInt < 32
+ {name: "ADDshiftLL", argLength: 2, reg: gp21, asm: "ADD", aux: "Int32"}, // arg0 + arg1<<auxInt
+ {name: "ADDshiftRL", argLength: 2, reg: gp21, asm: "ADD", aux: "Int32"}, // arg0 + arg1>>auxInt, unsigned shift
+ {name: "ADDshiftRA", argLength: 2, reg: gp21, asm: "ADD", aux: "Int32"}, // arg0 + arg1>>auxInt, signed shift
+ {name: "SUBshiftLL", argLength: 2, reg: gp21, asm: "SUB", aux: "Int32"}, // arg0 - arg1<<auxInt
+ {name: "SUBshiftRL", argLength: 2, reg: gp21, asm: "SUB", aux: "Int32"}, // arg0 - arg1>>auxInt, unsigned shift
+ {name: "SUBshiftRA", argLength: 2, reg: gp21, asm: "SUB", aux: "Int32"}, // arg0 - arg1>>auxInt, signed shift
+ {name: "RSBshiftLL", argLength: 2, reg: gp21, asm: "RSB", aux: "Int32"}, // arg1<<auxInt - arg0
+ {name: "RSBshiftRL", argLength: 2, reg: gp21, asm: "RSB", aux: "Int32"}, // arg1>>auxInt - arg0, unsigned shift
+ {name: "RSBshiftRA", argLength: 2, reg: gp21, asm: "RSB", aux: "Int32"}, // arg1>>auxInt - arg0, signed shift
+ {name: "ANDshiftLL", argLength: 2, reg: gp21, asm: "AND", aux: "Int32"}, // arg0 & (arg1<<auxInt)
+ {name: "ANDshiftRL", argLength: 2, reg: gp21, asm: "AND", aux: "Int32"}, // arg0 & (arg1>>auxInt), unsigned shift
+ {name: "ANDshiftRA", argLength: 2, reg: gp21, asm: "AND", aux: "Int32"}, // arg0 & (arg1>>auxInt), signed shift
+ {name: "ORshiftLL", argLength: 2, reg: gp21, asm: "ORR", aux: "Int32"}, // arg0 | arg1<<auxInt
+ {name: "ORshiftRL", argLength: 2, reg: gp21, asm: "ORR", aux: "Int32"}, // arg0 | arg1>>auxInt, unsigned shift
+ {name: "ORshiftRA", argLength: 2, reg: gp21, asm: "ORR", aux: "Int32"}, // arg0 | arg1>>auxInt, signed shift
+ {name: "XORshiftLL", argLength: 2, reg: gp21, asm: "EOR", aux: "Int32"}, // arg0 ^ arg1<<auxInt
+ {name: "XORshiftRL", argLength: 2, reg: gp21, asm: "EOR", aux: "Int32"}, // arg0 ^ arg1>>auxInt, unsigned shift
+ {name: "XORshiftRA", argLength: 2, reg: gp21, asm: "EOR", aux: "Int32"}, // arg0 ^ arg1>>auxInt, signed shift
+ {name: "XORshiftRR", argLength: 2, reg: gp21, asm: "EOR", aux: "Int32"}, // arg0 ^ (arg1 right rotate by auxInt)
+ {name: "BICshiftLL", argLength: 2, reg: gp21, asm: "BIC", aux: "Int32"}, // arg0 &^ (arg1<<auxInt)
+ {name: "BICshiftRL", argLength: 2, reg: gp21, asm: "BIC", aux: "Int32"}, // arg0 &^ (arg1>>auxInt), unsigned shift
+ {name: "BICshiftRA", argLength: 2, reg: gp21, asm: "BIC", aux: "Int32"}, // arg0 &^ (arg1>>auxInt), signed shift
+ {name: "MVNshiftLL", argLength: 1, reg: gp11, asm: "MVN", aux: "Int32"}, // ^(arg0<<auxInt)
+ {name: "MVNshiftRL", argLength: 1, reg: gp11, asm: "MVN", aux: "Int32"}, // ^(arg0>>auxInt), unsigned shift
+ {name: "MVNshiftRA", argLength: 1, reg: gp11, asm: "MVN", aux: "Int32"}, // ^(arg0>>auxInt), signed shift
+
+ {name: "ADCshiftLL", argLength: 3, reg: gp2flags1, asm: "ADC", aux: "Int32"}, // arg0 + arg1<<auxInt + carry, arg2=flags
+ {name: "ADCshiftRL", argLength: 3, reg: gp2flags1, asm: "ADC", aux: "Int32"}, // arg0 + arg1>>auxInt + carry, unsigned shift, arg2=flags
+ {name: "ADCshiftRA", argLength: 3, reg: gp2flags1, asm: "ADC", aux: "Int32"}, // arg0 + arg1>>auxInt + carry, signed shift, arg2=flags
+ {name: "SBCshiftLL", argLength: 3, reg: gp2flags1, asm: "SBC", aux: "Int32"}, // arg0 - arg1<<auxInt - carry, arg2=flags
+ {name: "SBCshiftRL", argLength: 3, reg: gp2flags1, asm: "SBC", aux: "Int32"}, // arg0 - arg1>>auxInt - carry, unsigned shift, arg2=flags
+ {name: "SBCshiftRA", argLength: 3, reg: gp2flags1, asm: "SBC", aux: "Int32"}, // arg0 - arg1>>auxInt - carry, signed shift, arg2=flags
+ {name: "RSCshiftLL", argLength: 3, reg: gp2flags1, asm: "RSC", aux: "Int32"}, // arg1<<auxInt - arg0 - carry, arg2=flags
+ {name: "RSCshiftRL", argLength: 3, reg: gp2flags1, asm: "RSC", aux: "Int32"}, // arg1>>auxInt - arg0 - carry, unsigned shift, arg2=flags
+ {name: "RSCshiftRA", argLength: 3, reg: gp2flags1, asm: "RSC", aux: "Int32"}, // arg1>>auxInt - arg0 - carry, signed shift, arg2=flags
+
+ {name: "ADDSshiftLL", argLength: 2, reg: gp21carry, asm: "ADD", aux: "Int32"}, // arg0 + arg1<<auxInt, set carry flag
+ {name: "ADDSshiftRL", argLength: 2, reg: gp21carry, asm: "ADD", aux: "Int32"}, // arg0 + arg1>>auxInt, unsigned shift, set carry flag
+ {name: "ADDSshiftRA", argLength: 2, reg: gp21carry, asm: "ADD", aux: "Int32"}, // arg0 + arg1>>auxInt, signed shift, set carry flag
+ {name: "SUBSshiftLL", argLength: 2, reg: gp21carry, asm: "SUB", aux: "Int32"}, // arg0 - arg1<<auxInt, set carry flag
+ {name: "SUBSshiftRL", argLength: 2, reg: gp21carry, asm: "SUB", aux: "Int32"}, // arg0 - arg1>>auxInt, unsigned shift, set carry flag
+ {name: "SUBSshiftRA", argLength: 2, reg: gp21carry, asm: "SUB", aux: "Int32"}, // arg0 - arg1>>auxInt, signed shift, set carry flag
+ {name: "RSBSshiftLL", argLength: 2, reg: gp21carry, asm: "RSB", aux: "Int32"}, // arg1<<auxInt - arg0, set carry flag
+ {name: "RSBSshiftRL", argLength: 2, reg: gp21carry, asm: "RSB", aux: "Int32"}, // arg1>>auxInt - arg0, unsigned shift, set carry flag
+ {name: "RSBSshiftRA", argLength: 2, reg: gp21carry, asm: "RSB", aux: "Int32"}, // arg1>>auxInt - arg0, signed shift, set carry flag
+
+ {name: "ADDshiftLLreg", argLength: 3, reg: gp31, asm: "ADD"}, // arg0 + arg1<<arg2
+ {name: "ADDshiftRLreg", argLength: 3, reg: gp31, asm: "ADD"}, // arg0 + arg1>>arg2, unsigned shift
+ {name: "ADDshiftRAreg", argLength: 3, reg: gp31, asm: "ADD"}, // arg0 + arg1>>arg2, signed shift
+ {name: "SUBshiftLLreg", argLength: 3, reg: gp31, asm: "SUB"}, // arg0 - arg1<<arg2
+ {name: "SUBshiftRLreg", argLength: 3, reg: gp31, asm: "SUB"}, // arg0 - arg1>>arg2, unsigned shift
+ {name: "SUBshiftRAreg", argLength: 3, reg: gp31, asm: "SUB"}, // arg0 - arg1>>arg2, signed shift
+ {name: "RSBshiftLLreg", argLength: 3, reg: gp31, asm: "RSB"}, // arg1<<arg2 - arg0
+ {name: "RSBshiftRLreg", argLength: 3, reg: gp31, asm: "RSB"}, // arg1>>arg2 - arg0, unsigned shift
+ {name: "RSBshiftRAreg", argLength: 3, reg: gp31, asm: "RSB"}, // arg1>>arg2 - arg0, signed shift
+ {name: "ANDshiftLLreg", argLength: 3, reg: gp31, asm: "AND"}, // arg0 & (arg1<<arg2)
+ {name: "ANDshiftRLreg", argLength: 3, reg: gp31, asm: "AND"}, // arg0 & (arg1>>arg2), unsigned shift
+ {name: "ANDshiftRAreg", argLength: 3, reg: gp31, asm: "AND"}, // arg0 & (arg1>>arg2), signed shift
+ {name: "ORshiftLLreg", argLength: 3, reg: gp31, asm: "ORR"}, // arg0 | arg1<<arg2
+ {name: "ORshiftRLreg", argLength: 3, reg: gp31, asm: "ORR"}, // arg0 | arg1>>arg2, unsigned shift
+ {name: "ORshiftRAreg", argLength: 3, reg: gp31, asm: "ORR"}, // arg0 | arg1>>arg2, signed shift
+ {name: "XORshiftLLreg", argLength: 3, reg: gp31, asm: "EOR"}, // arg0 ^ arg1<<arg2
+ {name: "XORshiftRLreg", argLength: 3, reg: gp31, asm: "EOR"}, // arg0 ^ arg1>>arg2, unsigned shift
+ {name: "XORshiftRAreg", argLength: 3, reg: gp31, asm: "EOR"}, // arg0 ^ arg1>>arg2, signed shift
+ {name: "BICshiftLLreg", argLength: 3, reg: gp31, asm: "BIC"}, // arg0 &^ (arg1<<arg2)
+ {name: "BICshiftRLreg", argLength: 3, reg: gp31, asm: "BIC"}, // arg0 &^ (arg1>>arg2), unsigned shift
+ {name: "BICshiftRAreg", argLength: 3, reg: gp31, asm: "BIC"}, // arg0 &^ (arg1>>arg2), signed shift
+ {name: "MVNshiftLLreg", argLength: 2, reg: gp21, asm: "MVN"}, // ^(arg0<<arg1)
+ {name: "MVNshiftRLreg", argLength: 2, reg: gp21, asm: "MVN"}, // ^(arg0>>arg1), unsigned shift
+ {name: "MVNshiftRAreg", argLength: 2, reg: gp21, asm: "MVN"}, // ^(arg0>>arg1), signed shift
+
+ {name: "ADCshiftLLreg", argLength: 4, reg: gp3flags1, asm: "ADC"}, // arg0 + arg1<<arg2 + carry, arg3=flags
+ {name: "ADCshiftRLreg", argLength: 4, reg: gp3flags1, asm: "ADC"}, // arg0 + arg1>>arg2 + carry, unsigned shift, arg3=flags
+ {name: "ADCshiftRAreg", argLength: 4, reg: gp3flags1, asm: "ADC"}, // arg0 + arg1>>arg2 + carry, signed shift, arg3=flags
+ {name: "SBCshiftLLreg", argLength: 4, reg: gp3flags1, asm: "SBC"}, // arg0 - arg1<<arg2 - carry, arg3=flags
+ {name: "SBCshiftRLreg", argLength: 4, reg: gp3flags1, asm: "SBC"}, // arg0 - arg1>>arg2 - carry, unsigned shift, arg3=flags
+ {name: "SBCshiftRAreg", argLength: 4, reg: gp3flags1, asm: "SBC"}, // arg0 - arg1>>arg2 - carry, signed shift, arg3=flags
+ {name: "RSCshiftLLreg", argLength: 4, reg: gp3flags1, asm: "RSC"}, // arg1<<arg2 - arg0 - carry, arg3=flags
+ {name: "RSCshiftRLreg", argLength: 4, reg: gp3flags1, asm: "RSC"}, // arg1>>arg2 - arg0 - carry, unsigned shift, arg3=flags
+ {name: "RSCshiftRAreg", argLength: 4, reg: gp3flags1, asm: "RSC"}, // arg1>>arg2 - arg0 - carry, signed shift, arg3=flags
+
+ {name: "ADDSshiftLLreg", argLength: 3, reg: gp31carry, asm: "ADD"}, // arg0 + arg1<<arg2, set carry flag
+ {name: "ADDSshiftRLreg", argLength: 3, reg: gp31carry, asm: "ADD"}, // arg0 + arg1>>arg2, unsigned shift, set carry flag
+ {name: "ADDSshiftRAreg", argLength: 3, reg: gp31carry, asm: "ADD"}, // arg0 + arg1>>arg2, signed shift, set carry flag
+ {name: "SUBSshiftLLreg", argLength: 3, reg: gp31carry, asm: "SUB"}, // arg0 - arg1<<arg2, set carry flag
+ {name: "SUBSshiftRLreg", argLength: 3, reg: gp31carry, asm: "SUB"}, // arg0 - arg1>>arg2, unsigned shift, set carry flag
+ {name: "SUBSshiftRAreg", argLength: 3, reg: gp31carry, asm: "SUB"}, // arg0 - arg1>>arg2, signed shift, set carry flag
+ {name: "RSBSshiftLLreg", argLength: 3, reg: gp31carry, asm: "RSB"}, // arg1<<arg2 - arg0, set carry flag
+ {name: "RSBSshiftRLreg", argLength: 3, reg: gp31carry, asm: "RSB"}, // arg1>>arg2 - arg0, unsigned shift, set carry flag
+ {name: "RSBSshiftRAreg", argLength: 3, reg: gp31carry, asm: "RSB"}, // arg1>>arg2 - arg0, signed shift, set carry flag
+
+ // comparisons
+ {name: "CMP", argLength: 2, reg: gp2flags, asm: "CMP", typ: "Flags"}, // arg0 compare to arg1
+ {name: "CMPconst", argLength: 1, reg: gp1flags, asm: "CMP", aux: "Int32", typ: "Flags"}, // arg0 compare to auxInt
+ {name: "CMN", argLength: 2, reg: gp2flags, asm: "CMN", typ: "Flags", commutative: true}, // arg0 compare to -arg1, provided arg1 is not 1<<63
+ {name: "CMNconst", argLength: 1, reg: gp1flags, asm: "CMN", aux: "Int32", typ: "Flags"}, // arg0 compare to -auxInt
+ {name: "TST", argLength: 2, reg: gp2flags, asm: "TST", typ: "Flags", commutative: true}, // arg0 & arg1 compare to 0
+ {name: "TSTconst", argLength: 1, reg: gp1flags, asm: "TST", aux: "Int32", typ: "Flags"}, // arg0 & auxInt compare to 0
+ {name: "TEQ", argLength: 2, reg: gp2flags, asm: "TEQ", typ: "Flags", commutative: true}, // arg0 ^ arg1 compare to 0
+ {name: "TEQconst", argLength: 1, reg: gp1flags, asm: "TEQ", aux: "Int32", typ: "Flags"}, // arg0 ^ auxInt compare to 0
+ {name: "CMPF", argLength: 2, reg: fp2flags, asm: "CMPF", typ: "Flags"}, // arg0 compare to arg1, float32
+ {name: "CMPD", argLength: 2, reg: fp2flags, asm: "CMPD", typ: "Flags"}, // arg0 compare to arg1, float64
+
+ {name: "CMPshiftLL", argLength: 2, reg: gp2flags, asm: "CMP", aux: "Int32", typ: "Flags"}, // arg0 compare to arg1<<auxInt
+ {name: "CMPshiftRL", argLength: 2, reg: gp2flags, asm: "CMP", aux: "Int32", typ: "Flags"}, // arg0 compare to arg1>>auxInt, unsigned shift
+ {name: "CMPshiftRA", argLength: 2, reg: gp2flags, asm: "CMP", aux: "Int32", typ: "Flags"}, // arg0 compare to arg1>>auxInt, signed shift
+ {name: "CMNshiftLL", argLength: 2, reg: gp2flags, asm: "CMN", aux: "Int32", typ: "Flags"}, // arg0 compare to -(arg1<<auxInt)
+ {name: "CMNshiftRL", argLength: 2, reg: gp2flags, asm: "CMN", aux: "Int32", typ: "Flags"}, // arg0 compare to -(arg1>>auxInt), unsigned shift
+ {name: "CMNshiftRA", argLength: 2, reg: gp2flags, asm: "CMN", aux: "Int32", typ: "Flags"}, // arg0 compare to -(arg1>>auxInt), signed shift
+ {name: "TSTshiftLL", argLength: 2, reg: gp2flags, asm: "TST", aux: "Int32", typ: "Flags"}, // arg0 & (arg1<<auxInt) compare to 0
+ {name: "TSTshiftRL", argLength: 2, reg: gp2flags, asm: "TST", aux: "Int32", typ: "Flags"}, // arg0 & (arg1>>auxInt) compare to 0, unsigned shift
+ {name: "TSTshiftRA", argLength: 2, reg: gp2flags, asm: "TST", aux: "Int32", typ: "Flags"}, // arg0 & (arg1>>auxInt) compare to 0, signed shift
+ {name: "TEQshiftLL", argLength: 2, reg: gp2flags, asm: "TEQ", aux: "Int32", typ: "Flags"}, // arg0 ^ (arg1<<auxInt) compare to 0
+ {name: "TEQshiftRL", argLength: 2, reg: gp2flags, asm: "TEQ", aux: "Int32", typ: "Flags"}, // arg0 ^ (arg1>>auxInt) compare to 0, unsigned shift
+ {name: "TEQshiftRA", argLength: 2, reg: gp2flags, asm: "TEQ", aux: "Int32", typ: "Flags"}, // arg0 ^ (arg1>>auxInt) compare to 0, signed shift
+
+ {name: "CMPshiftLLreg", argLength: 3, reg: gp3flags, asm: "CMP", typ: "Flags"}, // arg0 compare to arg1<<arg2
+ {name: "CMPshiftRLreg", argLength: 3, reg: gp3flags, asm: "CMP", typ: "Flags"}, // arg0 compare to arg1>>arg2, unsigned shift
+ {name: "CMPshiftRAreg", argLength: 3, reg: gp3flags, asm: "CMP", typ: "Flags"}, // arg0 compare to arg1>>arg2, signed shift
+ {name: "CMNshiftLLreg", argLength: 3, reg: gp3flags, asm: "CMN", typ: "Flags"}, // arg0 + (arg1<<arg2) compare to 0
+ {name: "CMNshiftRLreg", argLength: 3, reg: gp3flags, asm: "CMN", typ: "Flags"}, // arg0 + (arg1>>arg2) compare to 0, unsigned shift
+ {name: "CMNshiftRAreg", argLength: 3, reg: gp3flags, asm: "CMN", typ: "Flags"}, // arg0 + (arg1>>arg2) compare to 0, signed shift
+ {name: "TSTshiftLLreg", argLength: 3, reg: gp3flags, asm: "TST", typ: "Flags"}, // arg0 & (arg1<<arg2) compare to 0
+ {name: "TSTshiftRLreg", argLength: 3, reg: gp3flags, asm: "TST", typ: "Flags"}, // arg0 & (arg1>>arg2) compare to 0, unsigned shift
+ {name: "TSTshiftRAreg", argLength: 3, reg: gp3flags, asm: "TST", typ: "Flags"}, // arg0 & (arg1>>arg2) compare to 0, signed shift
+ {name: "TEQshiftLLreg", argLength: 3, reg: gp3flags, asm: "TEQ", typ: "Flags"}, // arg0 ^ (arg1<<arg2) compare to 0
+ {name: "TEQshiftRLreg", argLength: 3, reg: gp3flags, asm: "TEQ", typ: "Flags"}, // arg0 ^ (arg1>>arg2) compare to 0, unsigned shift
+ {name: "TEQshiftRAreg", argLength: 3, reg: gp3flags, asm: "TEQ", typ: "Flags"}, // arg0 ^ (arg1>>arg2) compare to 0, signed shift
+
+ {name: "CMPF0", argLength: 1, reg: fp1flags, asm: "CMPF", typ: "Flags"}, // arg0 compare to 0, float32
+ {name: "CMPD0", argLength: 1, reg: fp1flags, asm: "CMPD", typ: "Flags"}, // arg0 compare to 0, float64
+
+ // moves
+ {name: "MOVWconst", argLength: 0, reg: gp01, aux: "Int32", asm: "MOVW", typ: "UInt32", rematerializeable: true}, // 32 low bits of auxint
+ {name: "MOVFconst", argLength: 0, reg: fp01, aux: "Float64", asm: "MOVF", typ: "Float32", rematerializeable: true}, // auxint as 64-bit float, convert to 32-bit float
+ {name: "MOVDconst", argLength: 0, reg: fp01, aux: "Float64", asm: "MOVD", typ: "Float64", rematerializeable: true}, // auxint as 64-bit float
+
+ {name: "MOVWaddr", argLength: 1, reg: regInfo{inputs: []regMask{buildReg("SP") | buildReg("SB")}, outputs: []regMask{gp}}, aux: "SymOff", asm: "MOVW", rematerializeable: true, symEffect: "Addr"}, // arg0 + auxInt + aux.(*gc.Sym), arg0=SP/SB
+
+ {name: "MOVBload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVB", typ: "Int8", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVBUload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVBU", typ: "UInt8", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVHload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVH", typ: "Int16", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVHUload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVHU", typ: "UInt16", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVWload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVW", typ: "UInt32", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVFload", argLength: 2, reg: fpload, aux: "SymOff", asm: "MOVF", typ: "Float32", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVDload", argLength: 2, reg: fpload, aux: "SymOff", asm: "MOVD", typ: "Float64", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+
+ {name: "MOVBstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVB", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 1 byte of arg1 to arg0 + auxInt + aux. arg2=mem.
+ {name: "MOVHstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVH", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 2 bytes of arg1 to arg0 + auxInt + aux. arg2=mem.
+ {name: "MOVWstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVW", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 4 bytes of arg1 to arg0 + auxInt + aux. arg2=mem.
+ {name: "MOVFstore", argLength: 3, reg: fpstore, aux: "SymOff", asm: "MOVF", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 4 bytes of arg1 to arg0 + auxInt + aux. arg2=mem.
+ {name: "MOVDstore", argLength: 3, reg: fpstore, aux: "SymOff", asm: "MOVD", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 8 bytes of arg1 to arg0 + auxInt + aux. arg2=mem.
+
+ {name: "MOVWloadidx", argLength: 3, reg: gp2load, asm: "MOVW", typ: "UInt32"}, // load from arg0 + arg1. arg2=mem
+ {name: "MOVWloadshiftLL", argLength: 3, reg: gp2load, asm: "MOVW", aux: "Int32", typ: "UInt32"}, // load from arg0 + arg1<<auxInt. arg2=mem
+ {name: "MOVWloadshiftRL", argLength: 3, reg: gp2load, asm: "MOVW", aux: "Int32", typ: "UInt32"}, // load from arg0 + arg1>>auxInt, unsigned shift. arg2=mem
+ {name: "MOVWloadshiftRA", argLength: 3, reg: gp2load, asm: "MOVW", aux: "Int32", typ: "UInt32"}, // load from arg0 + arg1>>auxInt, signed shift. arg2=mem
+ {name: "MOVBUloadidx", argLength: 3, reg: gp2load, asm: "MOVBU", typ: "UInt8"}, // load from arg0 + arg1. arg2=mem
+ {name: "MOVBloadidx", argLength: 3, reg: gp2load, asm: "MOVB", typ: "Int8"}, // load from arg0 + arg1. arg2=mem
+ {name: "MOVHUloadidx", argLength: 3, reg: gp2load, asm: "MOVHU", typ: "UInt16"}, // load from arg0 + arg1. arg2=mem
+ {name: "MOVHloadidx", argLength: 3, reg: gp2load, asm: "MOVH", typ: "Int16"}, // load from arg0 + arg1. arg2=mem
+
+ {name: "MOVWstoreidx", argLength: 4, reg: gp2store, asm: "MOVW", typ: "Mem"}, // store arg2 to arg0 + arg1. arg3=mem
+ {name: "MOVWstoreshiftLL", argLength: 4, reg: gp2store, asm: "MOVW", aux: "Int32", typ: "Mem"}, // store arg2 to arg0 + arg1<<auxInt. arg3=mem
+ {name: "MOVWstoreshiftRL", argLength: 4, reg: gp2store, asm: "MOVW", aux: "Int32", typ: "Mem"}, // store arg2 to arg0 + arg1>>auxInt, unsigned shift. arg3=mem
+ {name: "MOVWstoreshiftRA", argLength: 4, reg: gp2store, asm: "MOVW", aux: "Int32", typ: "Mem"}, // store arg2 to arg0 + arg1>>auxInt, signed shift. arg3=mem
+ {name: "MOVBstoreidx", argLength: 4, reg: gp2store, asm: "MOVB", typ: "Mem"}, // store arg2 to arg0 + arg1. arg3=mem
+ {name: "MOVHstoreidx", argLength: 4, reg: gp2store, asm: "MOVH", typ: "Mem"}, // store arg2 to arg0 + arg1. arg3=mem
+
+ {name: "MOVBreg", argLength: 1, reg: gp11, asm: "MOVBS"}, // move from arg0, sign-extended from byte
+ {name: "MOVBUreg", argLength: 1, reg: gp11, asm: "MOVBU"}, // move from arg0, unsign-extended from byte
+ {name: "MOVHreg", argLength: 1, reg: gp11, asm: "MOVHS"}, // move from arg0, sign-extended from half
+ {name: "MOVHUreg", argLength: 1, reg: gp11, asm: "MOVHU"}, // move from arg0, unsign-extended from half
+ {name: "MOVWreg", argLength: 1, reg: gp11, asm: "MOVW"}, // move from arg0
+
+ {name: "MOVWnop", argLength: 1, reg: regInfo{inputs: []regMask{gp}, outputs: []regMask{gp}}, resultInArg0: true}, // nop, return arg0 in same register
+
+ {name: "MOVWF", argLength: 1, reg: gpfp, asm: "MOVWF"}, // int32 -> float32
+ {name: "MOVWD", argLength: 1, reg: gpfp, asm: "MOVWD"}, // int32 -> float64
+ {name: "MOVWUF", argLength: 1, reg: gpfp, asm: "MOVWF"}, // uint32 -> float32, set U bit in the instruction
+ {name: "MOVWUD", argLength: 1, reg: gpfp, asm: "MOVWD"}, // uint32 -> float64, set U bit in the instruction
+ {name: "MOVFW", argLength: 1, reg: fpgp, asm: "MOVFW"}, // float32 -> int32
+ {name: "MOVDW", argLength: 1, reg: fpgp, asm: "MOVDW"}, // float64 -> int32
+ {name: "MOVFWU", argLength: 1, reg: fpgp, asm: "MOVFW"}, // float32 -> uint32, set U bit in the instruction
+ {name: "MOVDWU", argLength: 1, reg: fpgp, asm: "MOVDW"}, // float64 -> uint32, set U bit in the instruction
+ {name: "MOVFD", argLength: 1, reg: fp11, asm: "MOVFD"}, // float32 -> float64
+ {name: "MOVDF", argLength: 1, reg: fp11, asm: "MOVDF"}, // float64 -> float32
+
+ // conditional instructions, for lowering shifts
+ {name: "CMOVWHSconst", argLength: 2, reg: gp1flags1, asm: "MOVW", aux: "Int32", resultInArg0: true}, // replace arg0 w/ const if flags indicates HS, arg1=flags
+ {name: "CMOVWLSconst", argLength: 2, reg: gp1flags1, asm: "MOVW", aux: "Int32", resultInArg0: true}, // replace arg0 w/ const if flags indicates LS, arg1=flags
+ {name: "SRAcond", argLength: 3, reg: gp2flags1, asm: "SRA"}, // arg0 >> 31 if flags indicates HS, arg0 >> arg1 otherwise, signed shift, arg2=flags
+
+ // function calls
+ {name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem
+ {name: "CALLtail", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true, tailCall: true}, // tail call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem
+ {name: "CALLclosure", argLength: 3, reg: regInfo{inputs: []regMask{gpsp, buildReg("R7"), 0}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem
+ {name: "CALLinter", argLength: 2, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem
+
+ // pseudo-ops
+ {name: "LoweredNilCheck", argLength: 2, reg: regInfo{inputs: []regMask{gpg}}, nilCheck: true, faultOnNilArg0: true}, // panic if arg0 is nil. arg1=mem.
+
+ {name: "Equal", argLength: 1, reg: readflags}, // bool, true flags encode x==y false otherwise.
+ {name: "NotEqual", argLength: 1, reg: readflags}, // bool, true flags encode x!=y false otherwise.
+ {name: "LessThan", argLength: 1, reg: readflags}, // bool, true flags encode signed x<y false otherwise.
+ {name: "LessEqual", argLength: 1, reg: readflags}, // bool, true flags encode signed x<=y false otherwise.
+ {name: "GreaterThan", argLength: 1, reg: readflags}, // bool, true flags encode signed x>y false otherwise.
+ {name: "GreaterEqual", argLength: 1, reg: readflags}, // bool, true flags encode signed x>=y false otherwise.
+ {name: "LessThanU", argLength: 1, reg: readflags}, // bool, true flags encode unsigned x<y false otherwise.
+ {name: "LessEqualU", argLength: 1, reg: readflags}, // bool, true flags encode unsigned x<=y false otherwise.
+ {name: "GreaterThanU", argLength: 1, reg: readflags}, // bool, true flags encode unsigned x>y false otherwise.
+ {name: "GreaterEqualU", argLength: 1, reg: readflags}, // bool, true flags encode unsigned x>=y false otherwise.
+
+ // duffzero (must be 4-byte aligned)
+ // arg0 = address of memory to zero (in R1, changed as side effect)
+ // arg1 = value to store (always zero)
+ // arg2 = mem
+ // auxint = offset into duffzero code to start executing
+ // returns mem
+ {
+ name: "DUFFZERO",
+ aux: "Int64",
+ argLength: 3,
+ reg: regInfo{
+ inputs: []regMask{buildReg("R1"), buildReg("R0")},
+ clobbers: buildReg("R1 R12 R14"), // R14 is LR, R12 is linker trampoline scratch register
+ },
+ faultOnNilArg0: true,
+ },
+
+ // duffcopy (must be 4-byte aligned)
+ // arg0 = address of dst memory (in R2, changed as side effect)
+ // arg1 = address of src memory (in R1, changed as side effect)
+ // arg2 = mem
+ // auxint = offset into duffcopy code to start executing
+ // returns mem
+ {
+ name: "DUFFCOPY",
+ aux: "Int64",
+ argLength: 3,
+ reg: regInfo{
+ inputs: []regMask{buildReg("R2"), buildReg("R1")},
+ clobbers: buildReg("R0 R1 R2 R12 R14"), // R14 is LR, R12 is linker trampoline scratch register
+ },
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ },
+
+ // large or unaligned zeroing
+ // arg0 = address of memory to zero (in R1, changed as side effect)
+ // arg1 = address of the last element to zero
+ // arg2 = value to store (always zero)
+ // arg3 = mem
+ // returns mem
+ // MOVW.P Rarg2, 4(R1)
+ // CMP R1, Rarg1
+ // BLE -2(PC)
+ {
+ name: "LoweredZero",
+ aux: "Int64",
+ argLength: 4,
+ reg: regInfo{
+ inputs: []regMask{buildReg("R1"), gp, gp},
+ clobbers: buildReg("R1"),
+ },
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ },
+
+ // large or unaligned move
+ // arg0 = address of dst memory (in R2, changed as side effect)
+ // arg1 = address of src memory (in R1, changed as side effect)
+ // arg2 = address of the last element of src
+ // arg3 = mem
+ // returns mem
+ // MOVW.P 4(R1), Rtmp
+ // MOVW.P Rtmp, 4(R2)
+ // CMP R1, Rarg2
+ // BLE -3(PC)
+ {
+ name: "LoweredMove",
+ aux: "Int64",
+ argLength: 4,
+ reg: regInfo{
+ inputs: []regMask{buildReg("R2"), buildReg("R1"), gp},
+ clobbers: buildReg("R1 R2"),
+ },
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ },
+
+ // Scheduler ensures LoweredGetClosurePtr occurs only in entry block,
+ // and sorts it to the very beginning of the block to prevent other
+ // use of R7 (arm.REGCTXT, the closure pointer)
+ {name: "LoweredGetClosurePtr", reg: regInfo{outputs: []regMask{buildReg("R7")}}, zeroWidth: true},
+
+ // LoweredGetCallerSP returns the SP of the caller of the current function.
+ {name: "LoweredGetCallerSP", reg: gp01, rematerializeable: true},
+
+ // LoweredGetCallerPC evaluates to the PC to which its "caller" will return.
+ // I.e., if f calls g "calls" getcallerpc,
+ // the result should be the PC within f that g will return to.
+ // See runtime/stubs.go for a more detailed discussion.
+ {name: "LoweredGetCallerPC", reg: gp01, rematerializeable: true},
+
+ // There are three of these functions so that they can have three different register inputs.
+ // When we check 0 <= c <= cap (A), then 0 <= b <= c (B), then 0 <= a <= b (C), we want the
+ // default registers to match so we don't need to copy registers around unnecessarily.
+ {name: "LoweredPanicBoundsA", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r2, r3}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go).
+ {name: "LoweredPanicBoundsB", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r1, r2}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go).
+ {name: "LoweredPanicBoundsC", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r0, r1}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go).
+ // Extend ops are the same as Bounds ops except the indexes are 64-bit.
+ {name: "LoweredPanicExtendA", argLength: 4, aux: "Int64", reg: regInfo{inputs: []regMask{r4, r2, r3}}, typ: "Mem", call: true}, // arg0=idxHi, arg1=idxLo, arg2=len, arg3=mem, returns memory. AuxInt contains report code (see PanicExtend in genericOps.go).
+ {name: "LoweredPanicExtendB", argLength: 4, aux: "Int64", reg: regInfo{inputs: []regMask{r4, r1, r2}}, typ: "Mem", call: true}, // arg0=idxHi, arg1=idxLo, arg2=len, arg3=mem, returns memory. AuxInt contains report code (see PanicExtend in genericOps.go).
+ {name: "LoweredPanicExtendC", argLength: 4, aux: "Int64", reg: regInfo{inputs: []regMask{r4, r0, r1}}, typ: "Mem", call: true}, // arg0=idxHi, arg1=idxLo, arg2=len, arg3=mem, returns memory. AuxInt contains report code (see PanicExtend in genericOps.go).
+
+ // Constant flag value.
+ // Note: there's an "unordered" outcome for floating-point
+ // comparisons, but we don't use such a beast yet.
+ // This op is for temporary use by rewrite rules. It
+ // cannot appear in the generated assembly.
+ {name: "FlagConstant", aux: "FlagConstant"},
+
+ // (InvertFlags (CMP a b)) == (CMP b a)
+ // InvertFlags is a pseudo-op which can't appear in assembly output.
+ {name: "InvertFlags", argLength: 1}, // reverse direction of arg0
+
+ // LoweredWB invokes runtime.gcWriteBarrier. arg0=destptr, arg1=srcptr, arg2=mem, aux=runtime.gcWriteBarrier
+ // It saves all GP registers if necessary,
+ // but clobbers R14 (LR) because it's a call, and R12 which is linker trampoline scratch register.
+ {name: "LoweredWB", argLength: 3, reg: regInfo{inputs: []regMask{buildReg("R2"), buildReg("R3")}, clobbers: (callerSave &^ gpg) | buildReg("R12 R14")}, clobberFlags: true, aux: "Sym", symEffect: "None"},
+ }
+
+ blocks := []blockData{
+ {name: "EQ", controls: 1},
+ {name: "NE", controls: 1},
+ {name: "LT", controls: 1},
+ {name: "LE", controls: 1},
+ {name: "GT", controls: 1},
+ {name: "GE", controls: 1},
+ {name: "ULT", controls: 1},
+ {name: "ULE", controls: 1},
+ {name: "UGT", controls: 1},
+ {name: "UGE", controls: 1},
+ {name: "LTnoov", controls: 1}, // 'LT' but without honoring overflow
+ {name: "LEnoov", controls: 1}, // 'LE' but without honoring overflow
+ {name: "GTnoov", controls: 1}, // 'GT' but without honoring overflow
+ {name: "GEnoov", controls: 1}, // 'GE' but without honoring overflow
+ }
+
+ archs = append(archs, arch{
+ name: "ARM",
+ pkg: "cmd/internal/obj/arm",
+ genfile: "../../arm/ssa.go",
+ ops: ops,
+ blocks: blocks,
+ regnames: regNamesARM,
+ gpregmask: gp,
+ fpregmask: fp,
+ framepointerreg: -1, // not used
+ linkreg: int8(num["R14"]),
+ })
+}
diff --git a/src/cmd/compile/internal/ssa/gen/MIPS.rules b/src/cmd/compile/internal/ssa/gen/MIPS.rules
new file mode 100644
index 0000000..639dda4
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/gen/MIPS.rules
@@ -0,0 +1,703 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+(Add(Ptr|32|16|8) ...) => (ADD ...)
+(Add(32|64)F ...) => (ADD(F|D) ...)
+
+(Select0 (Add32carry <t> x y)) => (ADD <t.FieldType(0)> x y)
+(Select1 (Add32carry <t> x y)) => (SGTU <typ.Bool> x (ADD <t.FieldType(0)> x y))
+(Add32withcarry <t> x y c) => (ADD c (ADD <t> x y))
+
+(Sub(Ptr|32|16|8) ...) => (SUB ...)
+(Sub(32|64)F ...) => (SUB(F|D) ...)
+
+(Select0 (Sub32carry <t> x y)) => (SUB <t.FieldType(0)> x y)
+(Select1 (Sub32carry <t> x y)) => (SGTU <typ.Bool> (SUB <t.FieldType(0)> x y) x)
+(Sub32withcarry <t> x y c) => (SUB (SUB <t> x y) c)
+
+(Mul(32|16|8) ...) => (MUL ...)
+(Mul(32|64)F ...) => (MUL(F|D) ...)
+
+(Hmul(32|32u) x y) => (Select0 (MUL(T|TU) x y))
+(Mul32uhilo ...) => (MULTU ...)
+
+(Div32 x y) => (Select1 (DIV x y))
+(Div32u x y) => (Select1 (DIVU x y))
+(Div16 x y) => (Select1 (DIV (SignExt16to32 x) (SignExt16to32 y)))
+(Div16u x y) => (Select1 (DIVU (ZeroExt16to32 x) (ZeroExt16to32 y)))
+(Div8 x y) => (Select1 (DIV (SignExt8to32 x) (SignExt8to32 y)))
+(Div8u x y) => (Select1 (DIVU (ZeroExt8to32 x) (ZeroExt8to32 y)))
+(Div(32|64)F ...) => (DIV(F|D) ...)
+
+(Mod32 x y) => (Select0 (DIV x y))
+(Mod32u x y) => (Select0 (DIVU x y))
+(Mod16 x y) => (Select0 (DIV (SignExt16to32 x) (SignExt16to32 y)))
+(Mod16u x y) => (Select0 (DIVU (ZeroExt16to32 x) (ZeroExt16to32 y)))
+(Mod8 x y) => (Select0 (DIV (SignExt8to32 x) (SignExt8to32 y)))
+(Mod8u x y) => (Select0 (DIVU (ZeroExt8to32 x) (ZeroExt8to32 y)))
+
+// (x + y) / 2 with x>=y becomes (x - y) / 2 + y
+(Avg32u <t> x y) => (ADD (SRLconst <t> (SUB <t> x y) [1]) y)
+
+(And(32|16|8) ...) => (AND ...)
+(Or(32|16|8) ...) => (OR ...)
+(Xor(32|16|8) ...) => (XOR ...)
+
+// constant shifts
+// generic opt rewrites all constant shifts to shift by Const64
+(Lsh32x64 x (Const64 [c])) && uint32(c) < 32 => (SLLconst x [int32(c)])
+(Rsh32x64 x (Const64 [c])) && uint32(c) < 32 => (SRAconst x [int32(c)])
+(Rsh32Ux64 x (Const64 [c])) && uint32(c) < 32 => (SRLconst x [int32(c)])
+(Lsh16x64 x (Const64 [c])) && uint32(c) < 16 => (SLLconst x [int32(c)])
+(Rsh16x64 x (Const64 [c])) && uint32(c) < 16 => (SRAconst (SLLconst <typ.UInt32> x [16]) [int32(c+16)])
+(Rsh16Ux64 x (Const64 [c])) && uint32(c) < 16 => (SRLconst (SLLconst <typ.UInt32> x [16]) [int32(c+16)])
+(Lsh8x64 x (Const64 [c])) && uint32(c) < 8 => (SLLconst x [int32(c)])
+(Rsh8x64 x (Const64 [c])) && uint32(c) < 8 => (SRAconst (SLLconst <typ.UInt32> x [24]) [int32(c+24)])
+(Rsh8Ux64 x (Const64 [c])) && uint32(c) < 8 => (SRLconst (SLLconst <typ.UInt32> x [24]) [int32(c+24)])
+
+// large constant shifts
+(Lsh32x64 _ (Const64 [c])) && uint32(c) >= 32 => (MOVWconst [0])
+(Rsh32Ux64 _ (Const64 [c])) && uint32(c) >= 32 => (MOVWconst [0])
+(Lsh16x64 _ (Const64 [c])) && uint32(c) >= 16 => (MOVWconst [0])
+(Rsh16Ux64 _ (Const64 [c])) && uint32(c) >= 16 => (MOVWconst [0])
+(Lsh8x64 _ (Const64 [c])) && uint32(c) >= 8 => (MOVWconst [0])
+(Rsh8Ux64 _ (Const64 [c])) && uint32(c) >= 8 => (MOVWconst [0])
+
+// large constant signed right shift, we leave the sign bit
+(Rsh32x64 x (Const64 [c])) && uint32(c) >= 32 => (SRAconst x [31])
+(Rsh16x64 x (Const64 [c])) && uint32(c) >= 16 => (SRAconst (SLLconst <typ.UInt32> x [16]) [31])
+(Rsh8x64 x (Const64 [c])) && uint32(c) >= 8 => (SRAconst (SLLconst <typ.UInt32> x [24]) [31])
+
+// shifts
+// hardware instruction uses only the low 5 bits of the shift
+// we compare to 32 to ensure Go semantics for large shifts
+(Lsh32x32 <t> x y) => (CMOVZ (SLL <t> x y) (MOVWconst [0]) (SGTUconst [32] y))
+(Lsh32x16 <t> x y) => (CMOVZ (SLL <t> x (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y)))
+(Lsh32x8 <t> x y) => (CMOVZ (SLL <t> x (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y)))
+
+(Lsh16x32 <t> x y) => (CMOVZ (SLL <t> x y) (MOVWconst [0]) (SGTUconst [32] y))
+(Lsh16x16 <t> x y) => (CMOVZ (SLL <t> x (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y)))
+(Lsh16x8 <t> x y) => (CMOVZ (SLL <t> x (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y)))
+
+(Lsh8x32 <t> x y) => (CMOVZ (SLL <t> x y) (MOVWconst [0]) (SGTUconst [32] y))
+(Lsh8x16 <t> x y) => (CMOVZ (SLL <t> x (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y)))
+(Lsh8x8 <t> x y) => (CMOVZ (SLL <t> x (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y)))
+
+(Rsh32Ux32 <t> x y) => (CMOVZ (SRL <t> x y) (MOVWconst [0]) (SGTUconst [32] y))
+(Rsh32Ux16 <t> x y) => (CMOVZ (SRL <t> x (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y)))
+(Rsh32Ux8 <t> x y) => (CMOVZ (SRL <t> x (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y)))
+
+(Rsh16Ux32 <t> x y) => (CMOVZ (SRL <t> (ZeroExt16to32 x) y) (MOVWconst [0]) (SGTUconst [32] y))
+(Rsh16Ux16 <t> x y) => (CMOVZ (SRL <t> (ZeroExt16to32 x) (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y)))
+(Rsh16Ux8 <t> x y) => (CMOVZ (SRL <t> (ZeroExt16to32 x) (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y)))
+
+(Rsh8Ux32 <t> x y) => (CMOVZ (SRL <t> (ZeroExt8to32 x) y) (MOVWconst [0]) (SGTUconst [32] y))
+(Rsh8Ux16 <t> x y) => (CMOVZ (SRL <t> (ZeroExt8to32 x) (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y)))
+(Rsh8Ux8 <t> x y) => (CMOVZ (SRL <t> (ZeroExt8to32 x) (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y)))
+
+(Rsh32x32 x y) => (SRA x ( CMOVZ <typ.UInt32> y (MOVWconst [31]) (SGTUconst [32] y)))
+(Rsh32x16 x y) => (SRA x ( CMOVZ <typ.UInt32> (ZeroExt16to32 y) (MOVWconst [31]) (SGTUconst [32] (ZeroExt16to32 y))))
+(Rsh32x8 x y) => (SRA x ( CMOVZ <typ.UInt32> (ZeroExt8to32 y) (MOVWconst [31]) (SGTUconst [32] (ZeroExt8to32 y))))
+
+(Rsh16x32 x y) => (SRA (SignExt16to32 x) ( CMOVZ <typ.UInt32> y (MOVWconst [31]) (SGTUconst [32] y)))
+(Rsh16x16 x y) => (SRA (SignExt16to32 x) ( CMOVZ <typ.UInt32> (ZeroExt16to32 y) (MOVWconst [31]) (SGTUconst [32] (ZeroExt16to32 y))))
+(Rsh16x8 x y) => (SRA (SignExt16to32 x) ( CMOVZ <typ.UInt32> (ZeroExt8to32 y) (MOVWconst [31]) (SGTUconst [32] (ZeroExt8to32 y))))
+
+(Rsh8x32 x y) => (SRA (SignExt16to32 x) ( CMOVZ <typ.UInt32> y (MOVWconst [31]) (SGTUconst [32] y)))
+(Rsh8x16 x y) => (SRA (SignExt16to32 x) ( CMOVZ <typ.UInt32> (ZeroExt16to32 y) (MOVWconst [31]) (SGTUconst [32] (ZeroExt16to32 y))))
+(Rsh8x8 x y) => (SRA (SignExt16to32 x) ( CMOVZ <typ.UInt32> (ZeroExt8to32 y) (MOVWconst [31]) (SGTUconst [32] (ZeroExt8to32 y))))
+
+// rotates
+(RotateLeft8 <t> x (MOVWconst [c])) => (Or8 (Lsh8x32 <t> x (MOVWconst [c&7])) (Rsh8Ux32 <t> x (MOVWconst [-c&7])))
+(RotateLeft16 <t> x (MOVWconst [c])) => (Or16 (Lsh16x32 <t> x (MOVWconst [c&15])) (Rsh16Ux32 <t> x (MOVWconst [-c&15])))
+(RotateLeft32 <t> x (MOVWconst [c])) => (Or32 (Lsh32x32 <t> x (MOVWconst [c&31])) (Rsh32Ux32 <t> x (MOVWconst [-c&31])))
+(RotateLeft64 <t> x (MOVWconst [c])) => (Or64 (Lsh64x32 <t> x (MOVWconst [c&63])) (Rsh64Ux32 <t> x (MOVWconst [-c&63])))
+
+// unary ops
+(Neg(32|16|8) ...) => (NEG ...)
+(Neg(32|64)F ...) => (NEG(F|D) ...)
+
+(Com(32|16|8) x) => (NORconst [0] x)
+
+(Sqrt ...) => (SQRTD ...)
+(Sqrt32 ...) => (SQRTF ...)
+
+// TODO: optimize this case?
+(Ctz32NonZero ...) => (Ctz32 ...)
+
+// count trailing zero
+// 32 - CLZ(x&-x - 1)
+(Ctz32 <t> x) => (SUB (MOVWconst [32]) (CLZ <t> (SUBconst <t> [1] (AND <t> x (NEG <t> x)))))
+
+// bit length
+(BitLen32 <t> x) => (SUB (MOVWconst [32]) (CLZ <t> x))
+
+// boolean ops -- booleans are represented with 0=false, 1=true
+(AndB ...) => (AND ...)
+(OrB ...) => (OR ...)
+(EqB x y) => (XORconst [1] (XOR <typ.Bool> x y))
+(NeqB ...) => (XOR ...)
+(Not x) => (XORconst [1] x)
+
+// constants
+(Const(32|16|8) [val]) => (MOVWconst [int32(val)])
+(Const(32|64)F ...) => (MOV(F|D)const ...)
+(ConstNil) => (MOVWconst [0])
+(ConstBool [t]) => (MOVWconst [b2i32(t)])
+
+// truncations
+// Because we ignore high parts of registers, truncates are just copies.
+(Trunc16to8 ...) => (Copy ...)
+(Trunc32to8 ...) => (Copy ...)
+(Trunc32to16 ...) => (Copy ...)
+
+// Zero-/Sign-extensions
+(ZeroExt8to16 ...) => (MOVBUreg ...)
+(ZeroExt8to32 ...) => (MOVBUreg ...)
+(ZeroExt16to32 ...) => (MOVHUreg ...)
+
+(SignExt8to16 ...) => (MOVBreg ...)
+(SignExt8to32 ...) => (MOVBreg ...)
+(SignExt16to32 ...) => (MOVHreg ...)
+
+(Signmask x) => (SRAconst x [31])
+(Zeromask x) => (NEG (SGTU x (MOVWconst [0])))
+(Slicemask <t> x) => (SRAconst (NEG <t> x) [31])
+
+// float-int conversion
+(Cvt32to(32|64)F ...) => (MOVW(F|D) ...)
+(Cvt(32|64)Fto32 ...) => (TRUNC(F|D)W ...)
+(Cvt32Fto64F ...) => (MOVFD ...)
+(Cvt64Fto32F ...) => (MOVDF ...)
+
+(CvtBoolToUint8 ...) => (Copy ...)
+
+(Round(32|64)F ...) => (Copy ...)
+
+// comparisons
+(Eq8 x y) => (SGTUconst [1] (XOR (ZeroExt8to32 x) (ZeroExt8to32 y)))
+(Eq16 x y) => (SGTUconst [1] (XOR (ZeroExt16to32 x) (ZeroExt16to32 y)))
+(Eq32 x y) => (SGTUconst [1] (XOR x y))
+(EqPtr x y) => (SGTUconst [1] (XOR x y))
+(Eq(32|64)F x y) => (FPFlagTrue (CMPEQ(F|D) x y))
+
+(Neq8 x y) => (SGTU (XOR (ZeroExt8to32 x) (ZeroExt8to32 y)) (MOVWconst [0]))
+(Neq16 x y) => (SGTU (XOR (ZeroExt16to32 x) (ZeroExt16to32 y)) (MOVWconst [0]))
+(Neq32 x y) => (SGTU (XOR x y) (MOVWconst [0]))
+(NeqPtr x y) => (SGTU (XOR x y) (MOVWconst [0]))
+(Neq(32|64)F x y) => (FPFlagFalse (CMPEQ(F|D) x y))
+
+(Less8 x y) => (SGT (SignExt8to32 y) (SignExt8to32 x))
+(Less16 x y) => (SGT (SignExt16to32 y) (SignExt16to32 x))
+(Less32 x y) => (SGT y x)
+(Less(32|64)F x y) => (FPFlagTrue (CMPGT(F|D) y x)) // reverse operands to work around NaN
+
+(Less8U x y) => (SGTU (ZeroExt8to32 y) (ZeroExt8to32 x))
+(Less16U x y) => (SGTU (ZeroExt16to32 y) (ZeroExt16to32 x))
+(Less32U x y) => (SGTU y x)
+
+(Leq8 x y) => (XORconst [1] (SGT (SignExt8to32 x) (SignExt8to32 y)))
+(Leq16 x y) => (XORconst [1] (SGT (SignExt16to32 x) (SignExt16to32 y)))
+(Leq32 x y) => (XORconst [1] (SGT x y))
+(Leq(32|64)F x y) => (FPFlagTrue (CMPGE(F|D) y x)) // reverse operands to work around NaN
+
+(Leq8U x y) => (XORconst [1] (SGTU (ZeroExt8to32 x) (ZeroExt8to32 y)))
+(Leq16U x y) => (XORconst [1] (SGTU (ZeroExt16to32 x) (ZeroExt16to32 y)))
+(Leq32U x y) => (XORconst [1] (SGTU x y))
+
+(OffPtr [off] ptr:(SP)) => (MOVWaddr [int32(off)] ptr)
+(OffPtr [off] ptr) => (ADDconst [int32(off)] ptr)
+
+(Addr {sym} base) => (MOVWaddr {sym} base)
+(LocalAddr {sym} base _) => (MOVWaddr {sym} base)
+
+// loads
+(Load <t> ptr mem) && t.IsBoolean() => (MOVBUload ptr mem)
+(Load <t> ptr mem) && (is8BitInt(t) && isSigned(t)) => (MOVBload ptr mem)
+(Load <t> ptr mem) && (is8BitInt(t) && !isSigned(t)) => (MOVBUload ptr mem)
+(Load <t> ptr mem) && (is16BitInt(t) && isSigned(t)) => (MOVHload ptr mem)
+(Load <t> ptr mem) && (is16BitInt(t) && !isSigned(t)) => (MOVHUload ptr mem)
+(Load <t> ptr mem) && (is32BitInt(t) || isPtr(t)) => (MOVWload ptr mem)
+(Load <t> ptr mem) && is32BitFloat(t) => (MOVFload ptr mem)
+(Load <t> ptr mem) && is64BitFloat(t) => (MOVDload ptr mem)
+
+// stores
+(Store {t} ptr val mem) && t.Size() == 1 => (MOVBstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 2 => (MOVHstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 4 && !is32BitFloat(val.Type) => (MOVWstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 4 && is32BitFloat(val.Type) => (MOVFstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 8 && is64BitFloat(val.Type) => (MOVDstore ptr val mem)
+
+// zero instructions
+(Zero [0] _ mem) => mem
+(Zero [1] ptr mem) => (MOVBstore ptr (MOVWconst [0]) mem)
+(Zero [2] {t} ptr mem) && t.Alignment()%2 == 0 =>
+ (MOVHstore ptr (MOVWconst [0]) mem)
+(Zero [2] ptr mem) =>
+ (MOVBstore [1] ptr (MOVWconst [0])
+ (MOVBstore [0] ptr (MOVWconst [0]) mem))
+(Zero [4] {t} ptr mem) && t.Alignment()%4 == 0 =>
+ (MOVWstore ptr (MOVWconst [0]) mem)
+(Zero [4] {t} ptr mem) && t.Alignment()%2 == 0 =>
+ (MOVHstore [2] ptr (MOVWconst [0])
+ (MOVHstore [0] ptr (MOVWconst [0]) mem))
+(Zero [4] ptr mem) =>
+ (MOVBstore [3] ptr (MOVWconst [0])
+ (MOVBstore [2] ptr (MOVWconst [0])
+ (MOVBstore [1] ptr (MOVWconst [0])
+ (MOVBstore [0] ptr (MOVWconst [0]) mem))))
+(Zero [3] ptr mem) =>
+ (MOVBstore [2] ptr (MOVWconst [0])
+ (MOVBstore [1] ptr (MOVWconst [0])
+ (MOVBstore [0] ptr (MOVWconst [0]) mem)))
+(Zero [6] {t} ptr mem) && t.Alignment()%2 == 0 =>
+ (MOVHstore [4] ptr (MOVWconst [0])
+ (MOVHstore [2] ptr (MOVWconst [0])
+ (MOVHstore [0] ptr (MOVWconst [0]) mem)))
+(Zero [8] {t} ptr mem) && t.Alignment()%4 == 0 =>
+ (MOVWstore [4] ptr (MOVWconst [0])
+ (MOVWstore [0] ptr (MOVWconst [0]) mem))
+(Zero [12] {t} ptr mem) && t.Alignment()%4 == 0 =>
+ (MOVWstore [8] ptr (MOVWconst [0])
+ (MOVWstore [4] ptr (MOVWconst [0])
+ (MOVWstore [0] ptr (MOVWconst [0]) mem)))
+(Zero [16] {t} ptr mem) && t.Alignment()%4 == 0 =>
+ (MOVWstore [12] ptr (MOVWconst [0])
+ (MOVWstore [8] ptr (MOVWconst [0])
+ (MOVWstore [4] ptr (MOVWconst [0])
+ (MOVWstore [0] ptr (MOVWconst [0]) mem))))
+
+// large or unaligned zeroing uses a loop
+(Zero [s] {t} ptr mem)
+ && (s > 16 || t.Alignment()%4 != 0) =>
+ (LoweredZero [int32(t.Alignment())]
+ ptr
+ (ADDconst <ptr.Type> ptr [int32(s-moveSize(t.Alignment(), config))])
+ mem)
+
+// moves
+(Move [0] _ _ mem) => mem
+(Move [1] dst src mem) => (MOVBstore dst (MOVBUload src mem) mem)
+(Move [2] {t} dst src mem) && t.Alignment()%2 == 0 =>
+ (MOVHstore dst (MOVHUload src mem) mem)
+(Move [2] dst src mem) =>
+ (MOVBstore [1] dst (MOVBUload [1] src mem)
+ (MOVBstore dst (MOVBUload src mem) mem))
+(Move [4] {t} dst src mem) && t.Alignment()%4 == 0 =>
+ (MOVWstore dst (MOVWload src mem) mem)
+(Move [4] {t} dst src mem) && t.Alignment()%2 == 0 =>
+ (MOVHstore [2] dst (MOVHUload [2] src mem)
+ (MOVHstore dst (MOVHUload src mem) mem))
+(Move [4] dst src mem) =>
+ (MOVBstore [3] dst (MOVBUload [3] src mem)
+ (MOVBstore [2] dst (MOVBUload [2] src mem)
+ (MOVBstore [1] dst (MOVBUload [1] src mem)
+ (MOVBstore dst (MOVBUload src mem) mem))))
+(Move [3] dst src mem) =>
+ (MOVBstore [2] dst (MOVBUload [2] src mem)
+ (MOVBstore [1] dst (MOVBUload [1] src mem)
+ (MOVBstore dst (MOVBUload src mem) mem)))
+(Move [8] {t} dst src mem) && t.Alignment()%4 == 0 =>
+ (MOVWstore [4] dst (MOVWload [4] src mem)
+ (MOVWstore dst (MOVWload src mem) mem))
+(Move [8] {t} dst src mem) && t.Alignment()%2 == 0 =>
+ (MOVHstore [6] dst (MOVHload [6] src mem)
+ (MOVHstore [4] dst (MOVHload [4] src mem)
+ (MOVHstore [2] dst (MOVHload [2] src mem)
+ (MOVHstore dst (MOVHload src mem) mem))))
+(Move [6] {t} dst src mem) && t.Alignment()%2 == 0 =>
+ (MOVHstore [4] dst (MOVHload [4] src mem)
+ (MOVHstore [2] dst (MOVHload [2] src mem)
+ (MOVHstore dst (MOVHload src mem) mem)))
+(Move [12] {t} dst src mem) && t.Alignment()%4 == 0 =>
+ (MOVWstore [8] dst (MOVWload [8] src mem)
+ (MOVWstore [4] dst (MOVWload [4] src mem)
+ (MOVWstore dst (MOVWload src mem) mem)))
+(Move [16] {t} dst src mem) && t.Alignment()%4 == 0 =>
+ (MOVWstore [12] dst (MOVWload [12] src mem)
+ (MOVWstore [8] dst (MOVWload [8] src mem)
+ (MOVWstore [4] dst (MOVWload [4] src mem)
+ (MOVWstore dst (MOVWload src mem) mem))))
+
+
+// large or unaligned move uses a loop
+(Move [s] {t} dst src mem)
+ && (s > 16 && logLargeCopy(v, s) || t.Alignment()%4 != 0) =>
+ (LoweredMove [int32(t.Alignment())]
+ dst
+ src
+ (ADDconst <src.Type> src [int32(s-moveSize(t.Alignment(), config))])
+ mem)
+
+// calls
+(StaticCall ...) => (CALLstatic ...)
+(ClosureCall ...) => (CALLclosure ...)
+(InterCall ...) => (CALLinter ...)
+(TailCall ...) => (CALLtail ...)
+
+// atomic intrinsics
+(AtomicLoad(8|32) ...) => (LoweredAtomicLoad(8|32) ...)
+(AtomicLoadPtr ...) => (LoweredAtomicLoad32 ...)
+
+(AtomicStore(8|32) ...) => (LoweredAtomicStore(8|32) ...)
+(AtomicStorePtrNoWB ...) => (LoweredAtomicStore32 ...)
+
+(AtomicExchange32 ...) => (LoweredAtomicExchange ...)
+(AtomicAdd32 ...) => (LoweredAtomicAdd ...)
+
+(AtomicCompareAndSwap32 ...) => (LoweredAtomicCas ...)
+
+// AtomicOr8(ptr,val) => LoweredAtomicOr(ptr&^3,uint32(val) << ((ptr & 3) * 8))
+(AtomicOr8 ptr val mem) && !config.BigEndian =>
+ (LoweredAtomicOr (AND <typ.UInt32Ptr> (MOVWconst [^3]) ptr)
+ (SLL <typ.UInt32> (ZeroExt8to32 val)
+ (SLLconst <typ.UInt32> [3]
+ (ANDconst <typ.UInt32> [3] ptr))) mem)
+
+// AtomicAnd8(ptr,val) => LoweredAtomicAnd(ptr&^3,(uint32(val) << ((ptr & 3) * 8)) | ^(uint32(0xFF) << ((ptr & 3) * 8))))
+(AtomicAnd8 ptr val mem) && !config.BigEndian =>
+ (LoweredAtomicAnd (AND <typ.UInt32Ptr> (MOVWconst [^3]) ptr)
+ (OR <typ.UInt32> (SLL <typ.UInt32> (ZeroExt8to32 val)
+ (SLLconst <typ.UInt32> [3]
+ (ANDconst <typ.UInt32> [3] ptr)))
+ (NORconst [0] <typ.UInt32> (SLL <typ.UInt32>
+ (MOVWconst [0xff]) (SLLconst <typ.UInt32> [3]
+ (ANDconst <typ.UInt32> [3] ptr))))) mem)
+
+// AtomicOr8(ptr,val) => LoweredAtomicOr(ptr&^3,uint32(val) << (((ptr^3) & 3) * 8))
+(AtomicOr8 ptr val mem) && config.BigEndian =>
+ (LoweredAtomicOr (AND <typ.UInt32Ptr> (MOVWconst [^3]) ptr)
+ (SLL <typ.UInt32> (ZeroExt8to32 val)
+ (SLLconst <typ.UInt32> [3]
+ (ANDconst <typ.UInt32> [3]
+ (XORconst <typ.UInt32> [3] ptr)))) mem)
+
+// AtomicAnd8(ptr,val) => LoweredAtomicAnd(ptr&^3,(uint32(val) << (((ptr^3) & 3) * 8)) | ^(uint32(0xFF) << (((ptr^3) & 3) * 8))))
+(AtomicAnd8 ptr val mem) && config.BigEndian =>
+ (LoweredAtomicAnd (AND <typ.UInt32Ptr> (MOVWconst [^3]) ptr)
+ (OR <typ.UInt32> (SLL <typ.UInt32> (ZeroExt8to32 val)
+ (SLLconst <typ.UInt32> [3]
+ (ANDconst <typ.UInt32> [3]
+ (XORconst <typ.UInt32> [3] ptr))))
+ (NORconst [0] <typ.UInt32> (SLL <typ.UInt32>
+ (MOVWconst [0xff]) (SLLconst <typ.UInt32> [3]
+ (ANDconst <typ.UInt32> [3]
+ (XORconst <typ.UInt32> [3] ptr)))))) mem)
+
+(AtomicAnd32 ...) => (LoweredAtomicAnd ...)
+(AtomicOr32 ...) => (LoweredAtomicOr ...)
+
+
+// checks
+(NilCheck ...) => (LoweredNilCheck ...)
+(IsNonNil ptr) => (SGTU ptr (MOVWconst [0]))
+(IsInBounds idx len) => (SGTU len idx)
+(IsSliceInBounds idx len) => (XORconst [1] (SGTU idx len))
+
+// pseudo-ops
+(GetClosurePtr ...) => (LoweredGetClosurePtr ...)
+(GetCallerSP ...) => (LoweredGetCallerSP ...)
+(GetCallerPC ...) => (LoweredGetCallerPC ...)
+
+(If cond yes no) => (NE cond yes no)
+
+// Write barrier.
+(WB ...) => (LoweredWB ...)
+
+(PanicBounds [kind] x y mem) && boundsABI(kind) == 0 => (LoweredPanicBoundsA [kind] x y mem)
+(PanicBounds [kind] x y mem) && boundsABI(kind) == 1 => (LoweredPanicBoundsB [kind] x y mem)
+(PanicBounds [kind] x y mem) && boundsABI(kind) == 2 => (LoweredPanicBoundsC [kind] x y mem)
+
+(PanicExtend [kind] hi lo y mem) && boundsABI(kind) == 0 => (LoweredPanicExtendA [kind] hi lo y mem)
+(PanicExtend [kind] hi lo y mem) && boundsABI(kind) == 1 => (LoweredPanicExtendB [kind] hi lo y mem)
+(PanicExtend [kind] hi lo y mem) && boundsABI(kind) == 2 => (LoweredPanicExtendC [kind] hi lo y mem)
+
+// Optimizations
+
+// Absorb boolean tests into block
+(NE (FPFlagTrue cmp) yes no) => (FPT cmp yes no)
+(NE (FPFlagFalse cmp) yes no) => (FPF cmp yes no)
+(EQ (FPFlagTrue cmp) yes no) => (FPF cmp yes no)
+(EQ (FPFlagFalse cmp) yes no) => (FPT cmp yes no)
+(NE (XORconst [1] cmp:(SGT _ _)) yes no) => (EQ cmp yes no)
+(NE (XORconst [1] cmp:(SGTU _ _)) yes no) => (EQ cmp yes no)
+(NE (XORconst [1] cmp:(SGTconst _)) yes no) => (EQ cmp yes no)
+(NE (XORconst [1] cmp:(SGTUconst _)) yes no) => (EQ cmp yes no)
+(NE (XORconst [1] cmp:(SGTzero _)) yes no) => (EQ cmp yes no)
+(NE (XORconst [1] cmp:(SGTUzero _)) yes no) => (EQ cmp yes no)
+(EQ (XORconst [1] cmp:(SGT _ _)) yes no) => (NE cmp yes no)
+(EQ (XORconst [1] cmp:(SGTU _ _)) yes no) => (NE cmp yes no)
+(EQ (XORconst [1] cmp:(SGTconst _)) yes no) => (NE cmp yes no)
+(EQ (XORconst [1] cmp:(SGTUconst _)) yes no) => (NE cmp yes no)
+(EQ (XORconst [1] cmp:(SGTzero _)) yes no) => (NE cmp yes no)
+(EQ (XORconst [1] cmp:(SGTUzero _)) yes no) => (NE cmp yes no)
+(NE (SGTUconst [1] x) yes no) => (EQ x yes no)
+(EQ (SGTUconst [1] x) yes no) => (NE x yes no)
+(NE (SGTUzero x) yes no) => (NE x yes no)
+(EQ (SGTUzero x) yes no) => (EQ x yes no)
+(NE (SGTconst [0] x) yes no) => (LTZ x yes no)
+(EQ (SGTconst [0] x) yes no) => (GEZ x yes no)
+(NE (SGTzero x) yes no) => (GTZ x yes no)
+(EQ (SGTzero x) yes no) => (LEZ x yes no)
+
+// fold offset into address
+(ADDconst [off1] (MOVWaddr [off2] {sym} ptr)) => (MOVWaddr [off1+off2] {sym} ptr)
+
+// fold address into load/store
+(MOVBload [off1] {sym} x:(ADDconst [off2] ptr) mem) && (is16Bit(int64(off1+off2)) || x.Uses == 1) => (MOVBload [off1+off2] {sym} ptr mem)
+(MOVBUload [off1] {sym} x:(ADDconst [off2] ptr) mem) && (is16Bit(int64(off1+off2)) || x.Uses == 1) => (MOVBUload [off1+off2] {sym} ptr mem)
+(MOVHload [off1] {sym} x:(ADDconst [off2] ptr) mem) && (is16Bit(int64(off1+off2)) || x.Uses == 1) => (MOVHload [off1+off2] {sym} ptr mem)
+(MOVHUload [off1] {sym} x:(ADDconst [off2] ptr) mem) && (is16Bit(int64(off1+off2)) || x.Uses == 1) => (MOVHUload [off1+off2] {sym} ptr mem)
+(MOVWload [off1] {sym} x:(ADDconst [off2] ptr) mem) && (is16Bit(int64(off1+off2)) || x.Uses == 1) => (MOVWload [off1+off2] {sym} ptr mem)
+(MOVFload [off1] {sym} x:(ADDconst [off2] ptr) mem) && (is16Bit(int64(off1+off2)) || x.Uses == 1) => (MOVFload [off1+off2] {sym} ptr mem)
+(MOVDload [off1] {sym} x:(ADDconst [off2] ptr) mem) && (is16Bit(int64(off1+off2)) || x.Uses == 1) => (MOVDload [off1+off2] {sym} ptr mem)
+
+(MOVBstore [off1] {sym} x:(ADDconst [off2] ptr) val mem) && (is16Bit(int64(off1+off2)) || x.Uses == 1) => (MOVBstore [off1+off2] {sym} ptr val mem)
+(MOVHstore [off1] {sym} x:(ADDconst [off2] ptr) val mem) && (is16Bit(int64(off1+off2)) || x.Uses == 1) => (MOVHstore [off1+off2] {sym} ptr val mem)
+(MOVWstore [off1] {sym} x:(ADDconst [off2] ptr) val mem) && (is16Bit(int64(off1+off2)) || x.Uses == 1) => (MOVWstore [off1+off2] {sym} ptr val mem)
+(MOVFstore [off1] {sym} x:(ADDconst [off2] ptr) val mem) && (is16Bit(int64(off1+off2)) || x.Uses == 1) => (MOVFstore [off1+off2] {sym} ptr val mem)
+(MOVDstore [off1] {sym} x:(ADDconst [off2] ptr) val mem) && (is16Bit(int64(off1+off2)) || x.Uses == 1) => (MOVDstore [off1+off2] {sym} ptr val mem)
+
+(MOVBstorezero [off1] {sym} x:(ADDconst [off2] ptr) mem) && (is16Bit(int64(off1+off2)) || x.Uses == 1) => (MOVBstorezero [off1+off2] {sym} ptr mem)
+(MOVHstorezero [off1] {sym} x:(ADDconst [off2] ptr) mem) && (is16Bit(int64(off1+off2)) || x.Uses == 1) => (MOVHstorezero [off1+off2] {sym} ptr mem)
+(MOVWstorezero [off1] {sym} x:(ADDconst [off2] ptr) mem) && (is16Bit(int64(off1+off2)) || x.Uses == 1) => (MOVWstorezero [off1+off2] {sym} ptr mem)
+
+(MOVBload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) =>
+ (MOVBload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVBUload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) =>
+ (MOVBUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVHload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) =>
+ (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVHUload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) =>
+ (MOVHUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVWload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) =>
+ (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVFload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) =>
+ (MOVFload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVDload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) =>
+ (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+
+(MOVBstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) =>
+ (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+(MOVHstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) =>
+ (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+(MOVWstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) =>
+ (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+(MOVFstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) =>
+ (MOVFstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+(MOVDstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) =>
+ (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+(MOVBstorezero [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) =>
+ (MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVHstorezero [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) =>
+ (MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVWstorezero [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) =>
+ (MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+
+// replace load from same location as preceding store with zero/sign extension (or copy in case of full width)
+(MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVBreg x)
+(MOVBUload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVBUreg x)
+(MOVHload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVHreg x)
+(MOVHUload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => (MOVHUreg x)
+(MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => x
+(MOVFload [off] {sym} ptr (MOVFstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => x
+(MOVDload [off] {sym} ptr (MOVDstore [off2] {sym2} ptr2 x _)) && sym == sym2 && off == off2 && isSamePtr(ptr, ptr2) => x
+
+// store zero
+(MOVBstore [off] {sym} ptr (MOVWconst [0]) mem) => (MOVBstorezero [off] {sym} ptr mem)
+(MOVHstore [off] {sym} ptr (MOVWconst [0]) mem) => (MOVHstorezero [off] {sym} ptr mem)
+(MOVWstore [off] {sym} ptr (MOVWconst [0]) mem) => (MOVWstorezero [off] {sym} ptr mem)
+
+// don't extend after proper load
+(MOVBreg x:(MOVBload _ _)) => (MOVWreg x)
+(MOVBUreg x:(MOVBUload _ _)) => (MOVWreg x)
+(MOVHreg x:(MOVBload _ _)) => (MOVWreg x)
+(MOVHreg x:(MOVBUload _ _)) => (MOVWreg x)
+(MOVHreg x:(MOVHload _ _)) => (MOVWreg x)
+(MOVHUreg x:(MOVBUload _ _)) => (MOVWreg x)
+(MOVHUreg x:(MOVHUload _ _)) => (MOVWreg x)
+
+// fold double extensions
+(MOVBreg x:(MOVBreg _)) => (MOVWreg x)
+(MOVBUreg x:(MOVBUreg _)) => (MOVWreg x)
+(MOVHreg x:(MOVBreg _)) => (MOVWreg x)
+(MOVHreg x:(MOVBUreg _)) => (MOVWreg x)
+(MOVHreg x:(MOVHreg _)) => (MOVWreg x)
+(MOVHUreg x:(MOVBUreg _)) => (MOVWreg x)
+(MOVHUreg x:(MOVHUreg _)) => (MOVWreg x)
+
+// sign extended loads
+// Note: The combined instruction must end up in the same block
+// as the original load. If not, we end up making a value with
+// memory type live in two different blocks, which can lead to
+// multiple memory values alive simultaneously.
+// Make sure we don't combine these ops if the load has another use.
+// This prevents a single load from being split into multiple loads
+// which then might return different values. See test/atomicload.go.
+(MOVBreg <t> x:(MOVBUload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBload <t> [off] {sym} ptr mem)
+(MOVBUreg <t> x:(MOVBload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBUload <t> [off] {sym} ptr mem)
+(MOVHreg <t> x:(MOVHUload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVHload <t> [off] {sym} ptr mem)
+(MOVHUreg <t> x:(MOVHload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVHUload <t> [off] {sym} ptr mem)
+
+// fold extensions and ANDs together
+(MOVBUreg (ANDconst [c] x)) => (ANDconst [c&0xff] x)
+(MOVHUreg (ANDconst [c] x)) => (ANDconst [c&0xffff] x)
+(MOVBreg (ANDconst [c] x)) && c & 0x80 == 0 => (ANDconst [c&0x7f] x)
+(MOVHreg (ANDconst [c] x)) && c & 0x8000 == 0 => (ANDconst [c&0x7fff] x)
+
+// don't extend before store
+(MOVBstore [off] {sym} ptr (MOVBreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVBUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVHreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVHUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVWreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVHstore [off] {sym} ptr (MOVHreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
+(MOVHstore [off] {sym} ptr (MOVHUreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
+(MOVHstore [off] {sym} ptr (MOVWreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
+(MOVWstore [off] {sym} ptr (MOVWreg x) mem) => (MOVWstore [off] {sym} ptr x mem)
+
+// if a register move has only 1 use, just use the same register without emitting instruction
+// MOVWnop doesn't emit instruction, only for ensuring the type.
+(MOVWreg x) && x.Uses == 1 => (MOVWnop x)
+
+// TODO: we should be able to get rid of MOVWnop all together.
+// But for now, this is enough to get rid of lots of them.
+(MOVWnop (MOVWconst [c])) => (MOVWconst [c])
+
+// fold constant into arithmatic ops
+(ADD x (MOVWconst [c])) => (ADDconst [c] x)
+(SUB x (MOVWconst [c])) => (SUBconst [c] x)
+(AND x (MOVWconst [c])) => (ANDconst [c] x)
+(OR x (MOVWconst [c])) => (ORconst [c] x)
+(XOR x (MOVWconst [c])) => (XORconst [c] x)
+(NOR x (MOVWconst [c])) => (NORconst [c] x)
+
+(SLL x (MOVWconst [c])) => (SLLconst x [c&31])
+(SRL x (MOVWconst [c])) => (SRLconst x [c&31])
+(SRA x (MOVWconst [c])) => (SRAconst x [c&31])
+
+(SGT (MOVWconst [c]) x) => (SGTconst [c] x)
+(SGTU (MOVWconst [c]) x) => (SGTUconst [c] x)
+(SGT x (MOVWconst [0])) => (SGTzero x)
+(SGTU x (MOVWconst [0])) => (SGTUzero x)
+
+// mul with constant
+(Select1 (MULTU (MOVWconst [0]) _ )) => (MOVWconst [0])
+(Select0 (MULTU (MOVWconst [0]) _ )) => (MOVWconst [0])
+(Select1 (MULTU (MOVWconst [1]) x )) => x
+(Select0 (MULTU (MOVWconst [1]) _ )) => (MOVWconst [0])
+(Select1 (MULTU (MOVWconst [-1]) x )) => (NEG <x.Type> x)
+(Select0 (MULTU (MOVWconst [-1]) x )) => (CMOVZ (ADDconst <x.Type> [-1] x) (MOVWconst [0]) x)
+(Select1 (MULTU (MOVWconst [c]) x )) && isPowerOfTwo64(int64(uint32(c))) => (SLLconst [int32(log2uint32(int64(c)))] x)
+(Select0 (MULTU (MOVWconst [c]) x )) && isPowerOfTwo64(int64(uint32(c))) => (SRLconst [int32(32-log2uint32(int64(c)))] x)
+
+(MUL (MOVWconst [0]) _ ) => (MOVWconst [0])
+(MUL (MOVWconst [1]) x ) => x
+(MUL (MOVWconst [-1]) x ) => (NEG x)
+(MUL (MOVWconst [c]) x ) && isPowerOfTwo64(int64(uint32(c))) => (SLLconst [int32(log2uint32(int64(c)))] x)
+
+// generic simplifications
+(ADD x (NEG y)) => (SUB x y)
+(SUB x x) => (MOVWconst [0])
+(SUB (MOVWconst [0]) x) => (NEG x)
+(AND x x) => x
+(OR x x) => x
+(XOR x x) => (MOVWconst [0])
+
+// miscellaneous patterns generated by dec64
+(AND (SGTUconst [1] x) (SGTUconst [1] y)) => (SGTUconst [1] (OR <x.Type> x y))
+(OR (SGTUzero x) (SGTUzero y)) => (SGTUzero (OR <x.Type> x y))
+
+// remove redundant *const ops
+(ADDconst [0] x) => x
+(SUBconst [0] x) => x
+(ANDconst [0] _) => (MOVWconst [0])
+(ANDconst [-1] x) => x
+(ORconst [0] x) => x
+(ORconst [-1] _) => (MOVWconst [-1])
+(XORconst [0] x) => x
+(XORconst [-1] x) => (NORconst [0] x)
+
+// generic constant folding
+(ADDconst [c] (MOVWconst [d])) => (MOVWconst [int32(c+d)])
+(ADDconst [c] (ADDconst [d] x)) => (ADDconst [c+d] x)
+(ADDconst [c] (SUBconst [d] x)) => (ADDconst [c-d] x)
+(SUBconst [c] (MOVWconst [d])) => (MOVWconst [d-c])
+(SUBconst [c] (SUBconst [d] x)) => (ADDconst [-c-d] x)
+(SUBconst [c] (ADDconst [d] x)) => (ADDconst [-c+d] x)
+(SLLconst [c] (MOVWconst [d])) => (MOVWconst [d<<uint32(c)])
+(SRLconst [c] (MOVWconst [d])) => (MOVWconst [int32(uint32(d)>>uint32(c))])
+(SRAconst [c] (MOVWconst [d])) => (MOVWconst [d>>uint32(c)])
+(MUL (MOVWconst [c]) (MOVWconst [d])) => (MOVWconst [c*d])
+(Select1 (MULTU (MOVWconst [c]) (MOVWconst [d]))) => (MOVWconst [int32(uint32(c)*uint32(d))])
+(Select0 (MULTU (MOVWconst [c]) (MOVWconst [d]))) => (MOVWconst [int32((int64(uint32(c))*int64(uint32(d)))>>32)])
+(Select1 (DIV (MOVWconst [c]) (MOVWconst [d]))) && d != 0 => (MOVWconst [c/d])
+(Select1 (DIVU (MOVWconst [c]) (MOVWconst [d]))) && d != 0 => (MOVWconst [int32(uint32(c)/uint32(d))])
+(Select0 (DIV (MOVWconst [c]) (MOVWconst [d]))) && d != 0 => (MOVWconst [c%d])
+(Select0 (DIVU (MOVWconst [c]) (MOVWconst [d]))) && d != 0 => (MOVWconst [int32(uint32(c)%uint32(d))])
+(ANDconst [c] (MOVWconst [d])) => (MOVWconst [c&d])
+(ANDconst [c] (ANDconst [d] x)) => (ANDconst [c&d] x)
+(ORconst [c] (MOVWconst [d])) => (MOVWconst [c|d])
+(ORconst [c] (ORconst [d] x)) => (ORconst [c|d] x)
+(XORconst [c] (MOVWconst [d])) => (MOVWconst [c^d])
+(XORconst [c] (XORconst [d] x)) => (XORconst [c^d] x)
+(NORconst [c] (MOVWconst [d])) => (MOVWconst [^(c|d)])
+(NEG (MOVWconst [c])) => (MOVWconst [-c])
+(MOVBreg (MOVWconst [c])) => (MOVWconst [int32(int8(c))])
+(MOVBUreg (MOVWconst [c])) => (MOVWconst [int32(uint8(c))])
+(MOVHreg (MOVWconst [c])) => (MOVWconst [int32(int16(c))])
+(MOVHUreg (MOVWconst [c])) => (MOVWconst [int32(uint16(c))])
+(MOVWreg (MOVWconst [c])) => (MOVWconst [c])
+
+// constant comparisons
+(SGTconst [c] (MOVWconst [d])) && c > d => (MOVWconst [1])
+(SGTconst [c] (MOVWconst [d])) && c <= d => (MOVWconst [0])
+(SGTUconst [c] (MOVWconst [d])) && uint32(c) > uint32(d) => (MOVWconst [1])
+(SGTUconst [c] (MOVWconst [d])) && uint32(c) <= uint32(d) => (MOVWconst [0])
+(SGTzero (MOVWconst [d])) && d > 0 => (MOVWconst [1])
+(SGTzero (MOVWconst [d])) && d <= 0 => (MOVWconst [0])
+(SGTUzero (MOVWconst [d])) && d != 0 => (MOVWconst [1])
+(SGTUzero (MOVWconst [d])) && d == 0 => (MOVWconst [0])
+
+// other known comparisons
+(SGTconst [c] (MOVBreg _)) && 0x7f < c => (MOVWconst [1])
+(SGTconst [c] (MOVBreg _)) && c <= -0x80 => (MOVWconst [0])
+(SGTconst [c] (MOVBUreg _)) && 0xff < c => (MOVWconst [1])
+(SGTconst [c] (MOVBUreg _)) && c < 0 => (MOVWconst [0])
+(SGTUconst [c] (MOVBUreg _)) && 0xff < uint32(c) => (MOVWconst [1])
+(SGTconst [c] (MOVHreg _)) && 0x7fff < c => (MOVWconst [1])
+(SGTconst [c] (MOVHreg _)) && c <= -0x8000 => (MOVWconst [0])
+(SGTconst [c] (MOVHUreg _)) && 0xffff < c => (MOVWconst [1])
+(SGTconst [c] (MOVHUreg _)) && c < 0 => (MOVWconst [0])
+(SGTUconst [c] (MOVHUreg _)) && 0xffff < uint32(c) => (MOVWconst [1])
+(SGTconst [c] (ANDconst [m] _)) && 0 <= m && m < c => (MOVWconst [1])
+(SGTUconst [c] (ANDconst [m] _)) && uint32(m) < uint32(c) => (MOVWconst [1])
+(SGTconst [c] (SRLconst _ [d])) && 0 <= c && uint32(d) <= 31 && 0xffffffff>>uint32(d) < uint32(c) => (MOVWconst [1])
+(SGTUconst [c] (SRLconst _ [d])) && uint32(d) <= 31 && 0xffffffff>>uint32(d) < uint32(c) => (MOVWconst [1])
+
+// absorb constants into branches
+(EQ (MOVWconst [0]) yes no) => (First yes no)
+(EQ (MOVWconst [c]) yes no) && c != 0 => (First no yes)
+(NE (MOVWconst [0]) yes no) => (First no yes)
+(NE (MOVWconst [c]) yes no) && c != 0 => (First yes no)
+(LTZ (MOVWconst [c]) yes no) && c < 0 => (First yes no)
+(LTZ (MOVWconst [c]) yes no) && c >= 0 => (First no yes)
+(LEZ (MOVWconst [c]) yes no) && c <= 0 => (First yes no)
+(LEZ (MOVWconst [c]) yes no) && c > 0 => (First no yes)
+(GTZ (MOVWconst [c]) yes no) && c > 0 => (First yes no)
+(GTZ (MOVWconst [c]) yes no) && c <= 0 => (First no yes)
+(GEZ (MOVWconst [c]) yes no) && c >= 0 => (First yes no)
+(GEZ (MOVWconst [c]) yes no) && c < 0 => (First no yes)
+
+// conditional move
+(CMOVZ _ f (MOVWconst [0])) => f
+(CMOVZ a _ (MOVWconst [c])) && c!=0 => a
+(CMOVZzero _ (MOVWconst [0])) => (MOVWconst [0])
+(CMOVZzero a (MOVWconst [c])) && c!=0 => a
+(CMOVZ a (MOVWconst [0]) c) => (CMOVZzero a c)
+
+// atomic
+(LoweredAtomicStore32 ptr (MOVWconst [0]) mem) => (LoweredAtomicStorezero ptr mem)
+(LoweredAtomicAdd ptr (MOVWconst [c]) mem) && is16Bit(int64(c)) => (LoweredAtomicAddconst [c] ptr mem)
+
diff --git a/src/cmd/compile/internal/ssa/gen/MIPS64.rules b/src/cmd/compile/internal/ssa/gen/MIPS64.rules
new file mode 100644
index 0000000..0d6d30f
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/gen/MIPS64.rules
@@ -0,0 +1,691 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+(Add(Ptr|64|32|16|8) ...) => (ADDV ...)
+(Add(32|64)F ...) => (ADD(F|D) ...)
+
+(Sub(Ptr|64|32|16|8) ...) => (SUBV ...)
+(Sub(32|64)F ...) => (SUB(F|D) ...)
+
+(Mul(64|32|16|8) x y) => (Select1 (MULVU x y))
+(Mul(32|64)F ...) => (MUL(F|D) ...)
+(Mul64uhilo ...) => (MULVU ...)
+(Select0 (Mul64uover x y)) => (Select1 <typ.UInt64> (MULVU x y))
+(Select1 (Mul64uover x y)) => (SGTU <typ.Bool> (Select0 <typ.UInt64> (MULVU x y)) (MOVVconst <typ.UInt64> [0]))
+
+(Hmul64 x y) => (Select0 (MULV x y))
+(Hmul64u x y) => (Select0 (MULVU x y))
+(Hmul32 x y) => (SRAVconst (Select1 <typ.Int64> (MULV (SignExt32to64 x) (SignExt32to64 y))) [32])
+(Hmul32u x y) => (SRLVconst (Select1 <typ.UInt64> (MULVU (ZeroExt32to64 x) (ZeroExt32to64 y))) [32])
+
+(Div64 x y) => (Select1 (DIVV x y))
+(Div64u x y) => (Select1 (DIVVU x y))
+(Div32 x y) => (Select1 (DIVV (SignExt32to64 x) (SignExt32to64 y)))
+(Div32u x y) => (Select1 (DIVVU (ZeroExt32to64 x) (ZeroExt32to64 y)))
+(Div16 x y) => (Select1 (DIVV (SignExt16to64 x) (SignExt16to64 y)))
+(Div16u x y) => (Select1 (DIVVU (ZeroExt16to64 x) (ZeroExt16to64 y)))
+(Div8 x y) => (Select1 (DIVV (SignExt8to64 x) (SignExt8to64 y)))
+(Div8u x y) => (Select1 (DIVVU (ZeroExt8to64 x) (ZeroExt8to64 y)))
+(Div(32|64)F ...) => (DIV(F|D) ...)
+
+(Mod64 x y) => (Select0 (DIVV x y))
+(Mod64u x y) => (Select0 (DIVVU x y))
+(Mod32 x y) => (Select0 (DIVV (SignExt32to64 x) (SignExt32to64 y)))
+(Mod32u x y) => (Select0 (DIVVU (ZeroExt32to64 x) (ZeroExt32to64 y)))
+(Mod16 x y) => (Select0 (DIVV (SignExt16to64 x) (SignExt16to64 y)))
+(Mod16u x y) => (Select0 (DIVVU (ZeroExt16to64 x) (ZeroExt16to64 y)))
+(Mod8 x y) => (Select0 (DIVV (SignExt8to64 x) (SignExt8to64 y)))
+(Mod8u x y) => (Select0 (DIVVU (ZeroExt8to64 x) (ZeroExt8to64 y)))
+
+// (x + y) / 2 with x>=y => (x - y) / 2 + y
+(Avg64u <t> x y) => (ADDV (SRLVconst <t> (SUBV <t> x y) [1]) y)
+
+(And(64|32|16|8) ...) => (AND ...)
+(Or(64|32|16|8) ...) => (OR ...)
+(Xor(64|32|16|8) ...) => (XOR ...)
+
+// shifts
+// hardware instruction uses only the low 6 bits of the shift
+// we compare to 64 to ensure Go semantics for large shifts
+(Lsh64x64 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) y)) (SLLV <t> x y))
+(Lsh64x32 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y))) (SLLV <t> x (ZeroExt32to64 y)))
+(Lsh64x16 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y))) (SLLV <t> x (ZeroExt16to64 y)))
+(Lsh64x8 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y))) (SLLV <t> x (ZeroExt8to64 y)))
+
+(Lsh32x64 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) y)) (SLLV <t> x y))
+(Lsh32x32 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y))) (SLLV <t> x (ZeroExt32to64 y)))
+(Lsh32x16 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y))) (SLLV <t> x (ZeroExt16to64 y)))
+(Lsh32x8 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y))) (SLLV <t> x (ZeroExt8to64 y)))
+
+(Lsh16x64 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) y)) (SLLV <t> x y))
+(Lsh16x32 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y))) (SLLV <t> x (ZeroExt32to64 y)))
+(Lsh16x16 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y))) (SLLV <t> x (ZeroExt16to64 y)))
+(Lsh16x8 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y))) (SLLV <t> x (ZeroExt8to64 y)))
+
+(Lsh8x64 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) y)) (SLLV <t> x y))
+(Lsh8x32 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y))) (SLLV <t> x (ZeroExt32to64 y)))
+(Lsh8x16 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y))) (SLLV <t> x (ZeroExt16to64 y)))
+(Lsh8x8 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y))) (SLLV <t> x (ZeroExt8to64 y)))
+
+(Rsh64Ux64 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) y)) (SRLV <t> x y))
+(Rsh64Ux32 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y))) (SRLV <t> x (ZeroExt32to64 y)))
+(Rsh64Ux16 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y))) (SRLV <t> x (ZeroExt16to64 y)))
+(Rsh64Ux8 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y))) (SRLV <t> x (ZeroExt8to64 y)))
+
+(Rsh32Ux64 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) y)) (SRLV <t> (ZeroExt32to64 x) y))
+(Rsh32Ux32 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y))) (SRLV <t> (ZeroExt32to64 x) (ZeroExt32to64 y)))
+(Rsh32Ux16 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y))) (SRLV <t> (ZeroExt32to64 x) (ZeroExt16to64 y)))
+(Rsh32Ux8 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y))) (SRLV <t> (ZeroExt32to64 x) (ZeroExt8to64 y)))
+
+(Rsh16Ux64 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) y)) (SRLV <t> (ZeroExt16to64 x) y))
+(Rsh16Ux32 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y))) (SRLV <t> (ZeroExt16to64 x) (ZeroExt32to64 y)))
+(Rsh16Ux16 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y))) (SRLV <t> (ZeroExt16to64 x) (ZeroExt16to64 y)))
+(Rsh16Ux8 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y))) (SRLV <t> (ZeroExt16to64 x) (ZeroExt8to64 y)))
+
+(Rsh8Ux64 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) y)) (SRLV <t> (ZeroExt8to64 x) y))
+(Rsh8Ux32 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y))) (SRLV <t> (ZeroExt8to64 x) (ZeroExt32to64 y)))
+(Rsh8Ux16 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y))) (SRLV <t> (ZeroExt8to64 x) (ZeroExt16to64 y)))
+(Rsh8Ux8 <t> x y) => (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y))) (SRLV <t> (ZeroExt8to64 x) (ZeroExt8to64 y)))
+
+(Rsh64x64 <t> x y) => (SRAV x (OR <t> (NEGV <t> (SGTU y (MOVVconst <typ.UInt64> [63]))) y))
+(Rsh64x32 <t> x y) => (SRAV x (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt32to64 y)))
+(Rsh64x16 <t> x y) => (SRAV x (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt16to64 y)))
+(Rsh64x8 <t> x y) => (SRAV x (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt8to64 y)))
+
+(Rsh32x64 <t> x y) => (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU y (MOVVconst <typ.UInt64> [63]))) y))
+(Rsh32x32 <t> x y) => (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt32to64 y)))
+(Rsh32x16 <t> x y) => (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt16to64 y)))
+(Rsh32x8 <t> x y) => (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt8to64 y)))
+
+(Rsh16x64 <t> x y) => (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU y (MOVVconst <typ.UInt64> [63]))) y))
+(Rsh16x32 <t> x y) => (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt32to64 y)))
+(Rsh16x16 <t> x y) => (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt16to64 y)))
+(Rsh16x8 <t> x y) => (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt8to64 y)))
+
+(Rsh8x64 <t> x y) => (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU y (MOVVconst <typ.UInt64> [63]))) y))
+(Rsh8x32 <t> x y) => (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt32to64 y)))
+(Rsh8x16 <t> x y) => (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt16to64 y)))
+(Rsh8x8 <t> x y) => (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt8to64 y)))
+
+// rotates
+(RotateLeft8 <t> x (MOVVconst [c])) => (Or8 (Lsh8x64 <t> x (MOVVconst [c&7])) (Rsh8Ux64 <t> x (MOVVconst [-c&7])))
+(RotateLeft16 <t> x (MOVVconst [c])) => (Or16 (Lsh16x64 <t> x (MOVVconst [c&15])) (Rsh16Ux64 <t> x (MOVVconst [-c&15])))
+(RotateLeft32 <t> x (MOVVconst [c])) => (Or32 (Lsh32x64 <t> x (MOVVconst [c&31])) (Rsh32Ux64 <t> x (MOVVconst [-c&31])))
+(RotateLeft64 <t> x (MOVVconst [c])) => (Or64 (Lsh64x64 <t> x (MOVVconst [c&63])) (Rsh64Ux64 <t> x (MOVVconst [-c&63])))
+
+// unary ops
+(Neg(64|32|16|8) ...) => (NEGV ...)
+(Neg(32|64)F ...) => (NEG(F|D) ...)
+
+(Com(64|32|16|8) x) => (NOR (MOVVconst [0]) x)
+
+(Sqrt ...) => (SQRTD ...)
+(Sqrt32 ...) => (SQRTF ...)
+
+// boolean ops -- booleans are represented with 0=false, 1=true
+(AndB ...) => (AND ...)
+(OrB ...) => (OR ...)
+(EqB x y) => (XOR (MOVVconst [1]) (XOR <typ.Bool> x y))
+(NeqB ...) => (XOR ...)
+(Not x) => (XORconst [1] x)
+
+// constants
+(Const(64|32|16|8) [val]) => (MOVVconst [int64(val)])
+(Const(32|64)F [val]) => (MOV(F|D)const [float64(val)])
+(ConstNil) => (MOVVconst [0])
+(ConstBool [t]) => (MOVVconst [int64(b2i(t))])
+
+(Slicemask <t> x) => (SRAVconst (NEGV <t> x) [63])
+
+// truncations
+// Because we ignore high parts of registers, truncates are just copies.
+(Trunc16to8 ...) => (Copy ...)
+(Trunc32to8 ...) => (Copy ...)
+(Trunc32to16 ...) => (Copy ...)
+(Trunc64to8 ...) => (Copy ...)
+(Trunc64to16 ...) => (Copy ...)
+(Trunc64to32 ...) => (Copy ...)
+
+// Zero-/Sign-extensions
+(ZeroExt8to16 ...) => (MOVBUreg ...)
+(ZeroExt8to32 ...) => (MOVBUreg ...)
+(ZeroExt16to32 ...) => (MOVHUreg ...)
+(ZeroExt8to64 ...) => (MOVBUreg ...)
+(ZeroExt16to64 ...) => (MOVHUreg ...)
+(ZeroExt32to64 ...) => (MOVWUreg ...)
+
+(SignExt8to16 ...) => (MOVBreg ...)
+(SignExt8to32 ...) => (MOVBreg ...)
+(SignExt16to32 ...) => (MOVHreg ...)
+(SignExt8to64 ...) => (MOVBreg ...)
+(SignExt16to64 ...) => (MOVHreg ...)
+(SignExt32to64 ...) => (MOVWreg ...)
+
+// float <=> int conversion
+(Cvt32to32F ...) => (MOVWF ...)
+(Cvt32to64F ...) => (MOVWD ...)
+(Cvt64to32F ...) => (MOVVF ...)
+(Cvt64to64F ...) => (MOVVD ...)
+(Cvt32Fto32 ...) => (TRUNCFW ...)
+(Cvt64Fto32 ...) => (TRUNCDW ...)
+(Cvt32Fto64 ...) => (TRUNCFV ...)
+(Cvt64Fto64 ...) => (TRUNCDV ...)
+(Cvt32Fto64F ...) => (MOVFD ...)
+(Cvt64Fto32F ...) => (MOVDF ...)
+
+(CvtBoolToUint8 ...) => (Copy ...)
+
+(Round(32|64)F ...) => (Copy ...)
+
+// comparisons
+(Eq8 x y) => (SGTU (MOVVconst [1]) (XOR (ZeroExt8to64 x) (ZeroExt8to64 y)))
+(Eq16 x y) => (SGTU (MOVVconst [1]) (XOR (ZeroExt16to64 x) (ZeroExt16to64 y)))
+(Eq32 x y) => (SGTU (MOVVconst [1]) (XOR (ZeroExt32to64 x) (ZeroExt32to64 y)))
+(Eq64 x y) => (SGTU (MOVVconst [1]) (XOR x y))
+(EqPtr x y) => (SGTU (MOVVconst [1]) (XOR x y))
+(Eq(32|64)F x y) => (FPFlagTrue (CMPEQ(F|D) x y))
+
+(Neq8 x y) => (SGTU (XOR (ZeroExt8to64 x) (ZeroExt8to64 y)) (MOVVconst [0]))
+(Neq16 x y) => (SGTU (XOR (ZeroExt16to32 x) (ZeroExt16to64 y)) (MOVVconst [0]))
+(Neq32 x y) => (SGTU (XOR (ZeroExt32to64 x) (ZeroExt32to64 y)) (MOVVconst [0]))
+(Neq64 x y) => (SGTU (XOR x y) (MOVVconst [0]))
+(NeqPtr x y) => (SGTU (XOR x y) (MOVVconst [0]))
+(Neq(32|64)F x y) => (FPFlagFalse (CMPEQ(F|D) x y))
+
+(Less8 x y) => (SGT (SignExt8to64 y) (SignExt8to64 x))
+(Less16 x y) => (SGT (SignExt16to64 y) (SignExt16to64 x))
+(Less32 x y) => (SGT (SignExt32to64 y) (SignExt32to64 x))
+(Less64 x y) => (SGT y x)
+(Less(32|64)F x y) => (FPFlagTrue (CMPGT(F|D) y x)) // reverse operands to work around NaN
+
+(Less8U x y) => (SGTU (ZeroExt8to64 y) (ZeroExt8to64 x))
+(Less16U x y) => (SGTU (ZeroExt16to64 y) (ZeroExt16to64 x))
+(Less32U x y) => (SGTU (ZeroExt32to64 y) (ZeroExt32to64 x))
+(Less64U x y) => (SGTU y x)
+
+(Leq8 x y) => (XOR (MOVVconst [1]) (SGT (SignExt8to64 x) (SignExt8to64 y)))
+(Leq16 x y) => (XOR (MOVVconst [1]) (SGT (SignExt16to64 x) (SignExt16to64 y)))
+(Leq32 x y) => (XOR (MOVVconst [1]) (SGT (SignExt32to64 x) (SignExt32to64 y)))
+(Leq64 x y) => (XOR (MOVVconst [1]) (SGT x y))
+(Leq(32|64)F x y) => (FPFlagTrue (CMPGE(F|D) y x)) // reverse operands to work around NaN
+
+(Leq8U x y) => (XOR (MOVVconst [1]) (SGTU (ZeroExt8to64 x) (ZeroExt8to64 y)))
+(Leq16U x y) => (XOR (MOVVconst [1]) (SGTU (ZeroExt16to64 x) (ZeroExt16to64 y)))
+(Leq32U x y) => (XOR (MOVVconst [1]) (SGTU (ZeroExt32to64 x) (ZeroExt32to64 y)))
+(Leq64U x y) => (XOR (MOVVconst [1]) (SGTU x y))
+
+(OffPtr [off] ptr:(SP)) && is32Bit(off) => (MOVVaddr [int32(off)] ptr)
+(OffPtr [off] ptr) => (ADDVconst [off] ptr)
+
+(Addr {sym} base) => (MOVVaddr {sym} base)
+(LocalAddr {sym} base _) => (MOVVaddr {sym} base)
+
+// loads
+(Load <t> ptr mem) && t.IsBoolean() => (MOVBUload ptr mem)
+(Load <t> ptr mem) && (is8BitInt(t) && isSigned(t)) => (MOVBload ptr mem)
+(Load <t> ptr mem) && (is8BitInt(t) && !isSigned(t)) => (MOVBUload ptr mem)
+(Load <t> ptr mem) && (is16BitInt(t) && isSigned(t)) => (MOVHload ptr mem)
+(Load <t> ptr mem) && (is16BitInt(t) && !isSigned(t)) => (MOVHUload ptr mem)
+(Load <t> ptr mem) && (is32BitInt(t) && isSigned(t)) => (MOVWload ptr mem)
+(Load <t> ptr mem) && (is32BitInt(t) && !isSigned(t)) => (MOVWUload ptr mem)
+(Load <t> ptr mem) && (is64BitInt(t) || isPtr(t)) => (MOVVload ptr mem)
+(Load <t> ptr mem) && is32BitFloat(t) => (MOVFload ptr mem)
+(Load <t> ptr mem) && is64BitFloat(t) => (MOVDload ptr mem)
+
+// stores
+(Store {t} ptr val mem) && t.Size() == 1 => (MOVBstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 2 => (MOVHstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 4 && !is32BitFloat(val.Type) => (MOVWstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 8 && !is64BitFloat(val.Type) => (MOVVstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 4 && is32BitFloat(val.Type) => (MOVFstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 8 && is64BitFloat(val.Type) => (MOVDstore ptr val mem)
+
+// zeroing
+(Zero [0] _ mem) => mem
+(Zero [1] ptr mem) => (MOVBstore ptr (MOVVconst [0]) mem)
+(Zero [2] {t} ptr mem) && t.Alignment()%2 == 0 =>
+ (MOVHstore ptr (MOVVconst [0]) mem)
+(Zero [2] ptr mem) =>
+ (MOVBstore [1] ptr (MOVVconst [0])
+ (MOVBstore [0] ptr (MOVVconst [0]) mem))
+(Zero [4] {t} ptr mem) && t.Alignment()%4 == 0 =>
+ (MOVWstore ptr (MOVVconst [0]) mem)
+(Zero [4] {t} ptr mem) && t.Alignment()%2 == 0 =>
+ (MOVHstore [2] ptr (MOVVconst [0])
+ (MOVHstore [0] ptr (MOVVconst [0]) mem))
+(Zero [4] ptr mem) =>
+ (MOVBstore [3] ptr (MOVVconst [0])
+ (MOVBstore [2] ptr (MOVVconst [0])
+ (MOVBstore [1] ptr (MOVVconst [0])
+ (MOVBstore [0] ptr (MOVVconst [0]) mem))))
+(Zero [8] {t} ptr mem) && t.Alignment()%8 == 0 =>
+ (MOVVstore ptr (MOVVconst [0]) mem)
+(Zero [8] {t} ptr mem) && t.Alignment()%4 == 0 =>
+ (MOVWstore [4] ptr (MOVVconst [0])
+ (MOVWstore [0] ptr (MOVVconst [0]) mem))
+(Zero [8] {t} ptr mem) && t.Alignment()%2 == 0 =>
+ (MOVHstore [6] ptr (MOVVconst [0])
+ (MOVHstore [4] ptr (MOVVconst [0])
+ (MOVHstore [2] ptr (MOVVconst [0])
+ (MOVHstore [0] ptr (MOVVconst [0]) mem))))
+
+(Zero [3] ptr mem) =>
+ (MOVBstore [2] ptr (MOVVconst [0])
+ (MOVBstore [1] ptr (MOVVconst [0])
+ (MOVBstore [0] ptr (MOVVconst [0]) mem)))
+(Zero [6] {t} ptr mem) && t.Alignment()%2 == 0 =>
+ (MOVHstore [4] ptr (MOVVconst [0])
+ (MOVHstore [2] ptr (MOVVconst [0])
+ (MOVHstore [0] ptr (MOVVconst [0]) mem)))
+(Zero [12] {t} ptr mem) && t.Alignment()%4 == 0 =>
+ (MOVWstore [8] ptr (MOVVconst [0])
+ (MOVWstore [4] ptr (MOVVconst [0])
+ (MOVWstore [0] ptr (MOVVconst [0]) mem)))
+(Zero [16] {t} ptr mem) && t.Alignment()%8 == 0 =>
+ (MOVVstore [8] ptr (MOVVconst [0])
+ (MOVVstore [0] ptr (MOVVconst [0]) mem))
+(Zero [24] {t} ptr mem) && t.Alignment()%8 == 0 =>
+ (MOVVstore [16] ptr (MOVVconst [0])
+ (MOVVstore [8] ptr (MOVVconst [0])
+ (MOVVstore [0] ptr (MOVVconst [0]) mem)))
+
+// medium zeroing uses a duff device
+// 8, and 128 are magic constants, see runtime/mkduff.go
+(Zero [s] {t} ptr mem)
+ && s%8 == 0 && s > 24 && s <= 8*128
+ && t.Alignment()%8 == 0 && !config.noDuffDevice =>
+ (DUFFZERO [8 * (128 - s/8)] ptr mem)
+
+// large or unaligned zeroing uses a loop
+(Zero [s] {t} ptr mem)
+ && (s > 8*128 || config.noDuffDevice) || t.Alignment()%8 != 0 =>
+ (LoweredZero [t.Alignment()]
+ ptr
+ (ADDVconst <ptr.Type> ptr [s-moveSize(t.Alignment(), config)])
+ mem)
+
+// moves
+(Move [0] _ _ mem) => mem
+(Move [1] dst src mem) => (MOVBstore dst (MOVBload src mem) mem)
+(Move [2] {t} dst src mem) && t.Alignment()%2 == 0 =>
+ (MOVHstore dst (MOVHload src mem) mem)
+(Move [2] dst src mem) =>
+ (MOVBstore [1] dst (MOVBload [1] src mem)
+ (MOVBstore dst (MOVBload src mem) mem))
+(Move [4] {t} dst src mem) && t.Alignment()%4 == 0 =>
+ (MOVWstore dst (MOVWload src mem) mem)
+(Move [4] {t} dst src mem) && t.Alignment()%2 == 0 =>
+ (MOVHstore [2] dst (MOVHload [2] src mem)
+ (MOVHstore dst (MOVHload src mem) mem))
+(Move [4] dst src mem) =>
+ (MOVBstore [3] dst (MOVBload [3] src mem)
+ (MOVBstore [2] dst (MOVBload [2] src mem)
+ (MOVBstore [1] dst (MOVBload [1] src mem)
+ (MOVBstore dst (MOVBload src mem) mem))))
+(Move [8] {t} dst src mem) && t.Alignment()%8 == 0 =>
+ (MOVVstore dst (MOVVload src mem) mem)
+(Move [8] {t} dst src mem) && t.Alignment()%4 == 0 =>
+ (MOVWstore [4] dst (MOVWload [4] src mem)
+ (MOVWstore dst (MOVWload src mem) mem))
+(Move [8] {t} dst src mem) && t.Alignment()%2 == 0 =>
+ (MOVHstore [6] dst (MOVHload [6] src mem)
+ (MOVHstore [4] dst (MOVHload [4] src mem)
+ (MOVHstore [2] dst (MOVHload [2] src mem)
+ (MOVHstore dst (MOVHload src mem) mem))))
+
+(Move [3] dst src mem) =>
+ (MOVBstore [2] dst (MOVBload [2] src mem)
+ (MOVBstore [1] dst (MOVBload [1] src mem)
+ (MOVBstore dst (MOVBload src mem) mem)))
+(Move [6] {t} dst src mem) && t.Alignment()%2 == 0 =>
+ (MOVHstore [4] dst (MOVHload [4] src mem)
+ (MOVHstore [2] dst (MOVHload [2] src mem)
+ (MOVHstore dst (MOVHload src mem) mem)))
+(Move [12] {t} dst src mem) && t.Alignment()%4 == 0 =>
+ (MOVWstore [8] dst (MOVWload [8] src mem)
+ (MOVWstore [4] dst (MOVWload [4] src mem)
+ (MOVWstore dst (MOVWload src mem) mem)))
+(Move [16] {t} dst src mem) && t.Alignment()%8 == 0 =>
+ (MOVVstore [8] dst (MOVVload [8] src mem)
+ (MOVVstore dst (MOVVload src mem) mem))
+(Move [24] {t} dst src mem) && t.Alignment()%8 == 0 =>
+ (MOVVstore [16] dst (MOVVload [16] src mem)
+ (MOVVstore [8] dst (MOVVload [8] src mem)
+ (MOVVstore dst (MOVVload src mem) mem)))
+
+// medium move uses a duff device
+(Move [s] {t} dst src mem)
+ && s%8 == 0 && s >= 24 && s <= 8*128 && t.Alignment()%8 == 0
+ && !config.noDuffDevice && logLargeCopy(v, s) =>
+ (DUFFCOPY [16 * (128 - s/8)] dst src mem)
+// 16 and 128 are magic constants. 16 is the number of bytes to encode:
+// MOVV (R1), R23
+// ADDV $8, R1
+// MOVV R23, (R2)
+// ADDV $8, R2
+// and 128 is the number of such blocks. See runtime/duff_mips64.s:duffcopy.
+
+// large or unaligned move uses a loop
+(Move [s] {t} dst src mem)
+ && s > 24 && logLargeCopy(v, s) || t.Alignment()%8 != 0 =>
+ (LoweredMove [t.Alignment()]
+ dst
+ src
+ (ADDVconst <src.Type> src [s-moveSize(t.Alignment(), config)])
+ mem)
+
+// calls
+(StaticCall ...) => (CALLstatic ...)
+(ClosureCall ...) => (CALLclosure ...)
+(InterCall ...) => (CALLinter ...)
+(TailCall ...) => (CALLtail ...)
+
+// atomic intrinsics
+(AtomicLoad(8|32|64) ...) => (LoweredAtomicLoad(8|32|64) ...)
+(AtomicLoadPtr ...) => (LoweredAtomicLoad64 ...)
+
+(AtomicStore(8|32|64) ...) => (LoweredAtomicStore(8|32|64) ...)
+(AtomicStorePtrNoWB ...) => (LoweredAtomicStore64 ...)
+
+(AtomicExchange(32|64) ...) => (LoweredAtomicExchange(32|64) ...)
+
+(AtomicAdd(32|64) ...) => (LoweredAtomicAdd(32|64) ...)
+
+(AtomicCompareAndSwap32 ptr old new mem) => (LoweredAtomicCas32 ptr (SignExt32to64 old) new mem)
+(AtomicCompareAndSwap64 ...) => (LoweredAtomicCas64 ...)
+
+// checks
+(NilCheck ...) => (LoweredNilCheck ...)
+(IsNonNil ptr) => (SGTU ptr (MOVVconst [0]))
+(IsInBounds idx len) => (SGTU len idx)
+(IsSliceInBounds idx len) => (XOR (MOVVconst [1]) (SGTU idx len))
+
+// pseudo-ops
+(GetClosurePtr ...) => (LoweredGetClosurePtr ...)
+(GetCallerSP ...) => (LoweredGetCallerSP ...)
+(GetCallerPC ...) => (LoweredGetCallerPC ...)
+
+(If cond yes no) => (NE cond yes no)
+
+// Write barrier.
+(WB ...) => (LoweredWB ...)
+
+(PanicBounds [kind] x y mem) && boundsABI(kind) == 0 => (LoweredPanicBoundsA [kind] x y mem)
+(PanicBounds [kind] x y mem) && boundsABI(kind) == 1 => (LoweredPanicBoundsB [kind] x y mem)
+(PanicBounds [kind] x y mem) && boundsABI(kind) == 2 => (LoweredPanicBoundsC [kind] x y mem)
+
+// Optimizations
+
+// Absorb boolean tests into block
+(NE (FPFlagTrue cmp) yes no) => (FPT cmp yes no)
+(NE (FPFlagFalse cmp) yes no) => (FPF cmp yes no)
+(EQ (FPFlagTrue cmp) yes no) => (FPF cmp yes no)
+(EQ (FPFlagFalse cmp) yes no) => (FPT cmp yes no)
+(NE (XORconst [1] cmp:(SGT _ _)) yes no) => (EQ cmp yes no)
+(NE (XORconst [1] cmp:(SGTU _ _)) yes no) => (EQ cmp yes no)
+(NE (XORconst [1] cmp:(SGTconst _)) yes no) => (EQ cmp yes no)
+(NE (XORconst [1] cmp:(SGTUconst _)) yes no) => (EQ cmp yes no)
+(EQ (XORconst [1] cmp:(SGT _ _)) yes no) => (NE cmp yes no)
+(EQ (XORconst [1] cmp:(SGTU _ _)) yes no) => (NE cmp yes no)
+(EQ (XORconst [1] cmp:(SGTconst _)) yes no) => (NE cmp yes no)
+(EQ (XORconst [1] cmp:(SGTUconst _)) yes no) => (NE cmp yes no)
+(NE (SGTUconst [1] x) yes no) => (EQ x yes no)
+(EQ (SGTUconst [1] x) yes no) => (NE x yes no)
+(NE (SGTU x (MOVVconst [0])) yes no) => (NE x yes no)
+(EQ (SGTU x (MOVVconst [0])) yes no) => (EQ x yes no)
+(NE (SGTconst [0] x) yes no) => (LTZ x yes no)
+(EQ (SGTconst [0] x) yes no) => (GEZ x yes no)
+(NE (SGT x (MOVVconst [0])) yes no) => (GTZ x yes no)
+(EQ (SGT x (MOVVconst [0])) yes no) => (LEZ x yes no)
+
+// fold offset into address
+(ADDVconst [off1] (MOVVaddr [off2] {sym} ptr)) && is32Bit(off1+int64(off2)) => (MOVVaddr [int32(off1)+int32(off2)] {sym} ptr)
+
+// fold address into load/store
+(MOVBload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVBload [off1+int32(off2)] {sym} ptr mem)
+(MOVBUload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVBUload [off1+int32(off2)] {sym} ptr mem)
+(MOVHload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVHload [off1+int32(off2)] {sym} ptr mem)
+(MOVHUload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVHUload [off1+int32(off2)] {sym} ptr mem)
+(MOVWload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVWload [off1+int32(off2)] {sym} ptr mem)
+(MOVWUload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVWUload [off1+int32(off2)] {sym} ptr mem)
+(MOVVload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVVload [off1+int32(off2)] {sym} ptr mem)
+(MOVFload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVFload [off1+int32(off2)] {sym} ptr mem)
+(MOVDload [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVDload [off1+int32(off2)] {sym} ptr mem)
+
+(MOVBstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2) => (MOVBstore [off1+int32(off2)] {sym} ptr val mem)
+(MOVHstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2) => (MOVHstore [off1+int32(off2)] {sym} ptr val mem)
+(MOVWstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2) => (MOVWstore [off1+int32(off2)] {sym} ptr val mem)
+(MOVVstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2) => (MOVVstore [off1+int32(off2)] {sym} ptr val mem)
+(MOVFstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2) => (MOVFstore [off1+int32(off2)] {sym} ptr val mem)
+(MOVDstore [off1] {sym} (ADDVconst [off2] ptr) val mem) && is32Bit(int64(off1)+off2) => (MOVDstore [off1+int32(off2)] {sym} ptr val mem)
+(MOVBstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVBstorezero [off1+int32(off2)] {sym} ptr mem)
+(MOVHstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVHstorezero [off1+int32(off2)] {sym} ptr mem)
+(MOVWstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVWstorezero [off1+int32(off2)] {sym} ptr mem)
+(MOVVstorezero [off1] {sym} (ADDVconst [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVVstorezero [off1+int32(off2)] {sym} ptr mem)
+
+(MOVBload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
+ (MOVBload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
+(MOVBUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
+ (MOVBUload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
+(MOVHload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
+ (MOVHload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
+(MOVHUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
+ (MOVHUload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
+(MOVWload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
+ (MOVWload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
+(MOVWUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
+ (MOVWUload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
+(MOVVload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
+ (MOVVload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
+(MOVFload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
+ (MOVFload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
+(MOVDload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
+ (MOVDload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
+
+(MOVBstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
+ (MOVBstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem)
+(MOVHstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
+ (MOVHstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem)
+(MOVWstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
+ (MOVWstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem)
+(MOVVstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
+ (MOVVstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem)
+(MOVFstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
+ (MOVFstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem)
+(MOVDstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
+ (MOVDstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem)
+(MOVBstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
+ (MOVBstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
+(MOVHstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
+ (MOVHstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
+(MOVWstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
+ (MOVWstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
+(MOVVstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
+ (MOVVstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
+
+// store zero
+(MOVBstore [off] {sym} ptr (MOVVconst [0]) mem) => (MOVBstorezero [off] {sym} ptr mem)
+(MOVHstore [off] {sym} ptr (MOVVconst [0]) mem) => (MOVHstorezero [off] {sym} ptr mem)
+(MOVWstore [off] {sym} ptr (MOVVconst [0]) mem) => (MOVWstorezero [off] {sym} ptr mem)
+(MOVVstore [off] {sym} ptr (MOVVconst [0]) mem) => (MOVVstorezero [off] {sym} ptr mem)
+
+// don't extend after proper load
+(MOVBreg x:(MOVBload _ _)) => (MOVVreg x)
+(MOVBUreg x:(MOVBUload _ _)) => (MOVVreg x)
+(MOVHreg x:(MOVBload _ _)) => (MOVVreg x)
+(MOVHreg x:(MOVBUload _ _)) => (MOVVreg x)
+(MOVHreg x:(MOVHload _ _)) => (MOVVreg x)
+(MOVHUreg x:(MOVBUload _ _)) => (MOVVreg x)
+(MOVHUreg x:(MOVHUload _ _)) => (MOVVreg x)
+(MOVWreg x:(MOVBload _ _)) => (MOVVreg x)
+(MOVWreg x:(MOVBUload _ _)) => (MOVVreg x)
+(MOVWreg x:(MOVHload _ _)) => (MOVVreg x)
+(MOVWreg x:(MOVHUload _ _)) => (MOVVreg x)
+(MOVWreg x:(MOVWload _ _)) => (MOVVreg x)
+(MOVWUreg x:(MOVBUload _ _)) => (MOVVreg x)
+(MOVWUreg x:(MOVHUload _ _)) => (MOVVreg x)
+(MOVWUreg x:(MOVWUload _ _)) => (MOVVreg x)
+
+// fold double extensions
+(MOVBreg x:(MOVBreg _)) => (MOVVreg x)
+(MOVBUreg x:(MOVBUreg _)) => (MOVVreg x)
+(MOVHreg x:(MOVBreg _)) => (MOVVreg x)
+(MOVHreg x:(MOVBUreg _)) => (MOVVreg x)
+(MOVHreg x:(MOVHreg _)) => (MOVVreg x)
+(MOVHUreg x:(MOVBUreg _)) => (MOVVreg x)
+(MOVHUreg x:(MOVHUreg _)) => (MOVVreg x)
+(MOVWreg x:(MOVBreg _)) => (MOVVreg x)
+(MOVWreg x:(MOVBUreg _)) => (MOVVreg x)
+(MOVWreg x:(MOVHreg _)) => (MOVVreg x)
+(MOVWreg x:(MOVWreg _)) => (MOVVreg x)
+(MOVWUreg x:(MOVBUreg _)) => (MOVVreg x)
+(MOVWUreg x:(MOVHUreg _)) => (MOVVreg x)
+(MOVWUreg x:(MOVWUreg _)) => (MOVVreg x)
+
+// don't extend before store
+(MOVBstore [off] {sym} ptr (MOVBreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVBUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVHreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVHUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVWreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVHstore [off] {sym} ptr (MOVHreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
+(MOVHstore [off] {sym} ptr (MOVHUreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
+(MOVHstore [off] {sym} ptr (MOVWreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
+(MOVHstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
+(MOVWstore [off] {sym} ptr (MOVWreg x) mem) => (MOVWstore [off] {sym} ptr x mem)
+(MOVWstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVWstore [off] {sym} ptr x mem)
+
+// if a register move has only 1 use, just use the same register without emitting instruction
+// MOVVnop doesn't emit instruction, only for ensuring the type.
+(MOVVreg x) && x.Uses == 1 => (MOVVnop x)
+
+// TODO: we should be able to get rid of MOVVnop all together.
+// But for now, this is enough to get rid of lots of them.
+(MOVVnop (MOVVconst [c])) => (MOVVconst [c])
+
+// fold constant into arithmatic ops
+(ADDV x (MOVVconst [c])) && is32Bit(c) => (ADDVconst [c] x)
+(SUBV x (MOVVconst [c])) && is32Bit(c) => (SUBVconst [c] x)
+(AND x (MOVVconst [c])) && is32Bit(c) => (ANDconst [c] x)
+(OR x (MOVVconst [c])) && is32Bit(c) => (ORconst [c] x)
+(XOR x (MOVVconst [c])) && is32Bit(c) => (XORconst [c] x)
+(NOR x (MOVVconst [c])) && is32Bit(c) => (NORconst [c] x)
+
+(SLLV _ (MOVVconst [c])) && uint64(c)>=64 => (MOVVconst [0])
+(SRLV _ (MOVVconst [c])) && uint64(c)>=64 => (MOVVconst [0])
+(SRAV x (MOVVconst [c])) && uint64(c)>=64 => (SRAVconst x [63])
+(SLLV x (MOVVconst [c])) => (SLLVconst x [c])
+(SRLV x (MOVVconst [c])) => (SRLVconst x [c])
+(SRAV x (MOVVconst [c])) => (SRAVconst x [c])
+
+(SGT (MOVVconst [c]) x) && is32Bit(c) => (SGTconst [c] x)
+(SGTU (MOVVconst [c]) x) && is32Bit(c) => (SGTUconst [c] x)
+
+// mul by constant
+(Select1 (MULVU x (MOVVconst [-1]))) => (NEGV x)
+(Select1 (MULVU _ (MOVVconst [0]))) => (MOVVconst [0])
+(Select1 (MULVU x (MOVVconst [1]))) => x
+(Select1 (MULVU x (MOVVconst [c]))) && isPowerOfTwo64(c) => (SLLVconst [log64(c)] x)
+
+// div by constant
+(Select1 (DIVVU x (MOVVconst [1]))) => x
+(Select1 (DIVVU x (MOVVconst [c]))) && isPowerOfTwo64(c) => (SRLVconst [log64(c)] x)
+(Select0 (DIVVU _ (MOVVconst [1]))) => (MOVVconst [0]) // mod
+(Select0 (DIVVU x (MOVVconst [c]))) && isPowerOfTwo64(c) => (ANDconst [c-1] x) // mod
+
+// generic simplifications
+(ADDV x (NEGV y)) => (SUBV x y)
+(SUBV x x) => (MOVVconst [0])
+(SUBV (MOVVconst [0]) x) => (NEGV x)
+(AND x x) => x
+(OR x x) => x
+(XOR x x) => (MOVVconst [0])
+
+// remove redundant *const ops
+(ADDVconst [0] x) => x
+(SUBVconst [0] x) => x
+(ANDconst [0] _) => (MOVVconst [0])
+(ANDconst [-1] x) => x
+(ORconst [0] x) => x
+(ORconst [-1] _) => (MOVVconst [-1])
+(XORconst [0] x) => x
+(XORconst [-1] x) => (NORconst [0] x)
+
+// generic constant folding
+(ADDVconst [c] (MOVVconst [d])) => (MOVVconst [c+d])
+(ADDVconst [c] (ADDVconst [d] x)) && is32Bit(c+d) => (ADDVconst [c+d] x)
+(ADDVconst [c] (SUBVconst [d] x)) && is32Bit(c-d) => (ADDVconst [c-d] x)
+(SUBVconst [c] (MOVVconst [d])) => (MOVVconst [d-c])
+(SUBVconst [c] (SUBVconst [d] x)) && is32Bit(-c-d) => (ADDVconst [-c-d] x)
+(SUBVconst [c] (ADDVconst [d] x)) && is32Bit(-c+d) => (ADDVconst [-c+d] x)
+(SLLVconst [c] (MOVVconst [d])) => (MOVVconst [d<<uint64(c)])
+(SRLVconst [c] (MOVVconst [d])) => (MOVVconst [int64(uint64(d)>>uint64(c))])
+(SRAVconst [c] (MOVVconst [d])) => (MOVVconst [d>>uint64(c)])
+(Select1 (MULVU (MOVVconst [c]) (MOVVconst [d]))) => (MOVVconst [c*d])
+(Select1 (DIVV (MOVVconst [c]) (MOVVconst [d]))) && d != 0 => (MOVVconst [c/d])
+(Select1 (DIVVU (MOVVconst [c]) (MOVVconst [d]))) && d != 0 => (MOVVconst [int64(uint64(c)/uint64(d))])
+(Select0 (DIVV (MOVVconst [c]) (MOVVconst [d]))) && d != 0 => (MOVVconst [c%d]) // mod
+(Select0 (DIVVU (MOVVconst [c]) (MOVVconst [d]))) && d != 0 => (MOVVconst [int64(uint64(c)%uint64(d))]) // mod
+(ANDconst [c] (MOVVconst [d])) => (MOVVconst [c&d])
+(ANDconst [c] (ANDconst [d] x)) => (ANDconst [c&d] x)
+(ORconst [c] (MOVVconst [d])) => (MOVVconst [c|d])
+(ORconst [c] (ORconst [d] x)) && is32Bit(c|d) => (ORconst [c|d] x)
+(XORconst [c] (MOVVconst [d])) => (MOVVconst [c^d])
+(XORconst [c] (XORconst [d] x)) && is32Bit(c^d) => (XORconst [c^d] x)
+(NORconst [c] (MOVVconst [d])) => (MOVVconst [^(c|d)])
+(NEGV (MOVVconst [c])) => (MOVVconst [-c])
+(MOVBreg (MOVVconst [c])) => (MOVVconst [int64(int8(c))])
+(MOVBUreg (MOVVconst [c])) => (MOVVconst [int64(uint8(c))])
+(MOVHreg (MOVVconst [c])) => (MOVVconst [int64(int16(c))])
+(MOVHUreg (MOVVconst [c])) => (MOVVconst [int64(uint16(c))])
+(MOVWreg (MOVVconst [c])) => (MOVVconst [int64(int32(c))])
+(MOVWUreg (MOVVconst [c])) => (MOVVconst [int64(uint32(c))])
+(MOVVreg (MOVVconst [c])) => (MOVVconst [c])
+(LoweredAtomicStore(32|64) ptr (MOVVconst [0]) mem) => (LoweredAtomicStorezero(32|64) ptr mem)
+(LoweredAtomicAdd32 ptr (MOVVconst [c]) mem) && is32Bit(c) => (LoweredAtomicAddconst32 [int32(c)] ptr mem)
+(LoweredAtomicAdd64 ptr (MOVVconst [c]) mem) && is32Bit(c) => (LoweredAtomicAddconst64 [c] ptr mem)
+
+// constant comparisons
+(SGTconst [c] (MOVVconst [d])) && c>d => (MOVVconst [1])
+(SGTconst [c] (MOVVconst [d])) && c<=d => (MOVVconst [0])
+(SGTUconst [c] (MOVVconst [d])) && uint64(c)>uint64(d) => (MOVVconst [1])
+(SGTUconst [c] (MOVVconst [d])) && uint64(c)<=uint64(d) => (MOVVconst [0])
+
+// other known comparisons
+(SGTconst [c] (MOVBreg _)) && 0x7f < c => (MOVVconst [1])
+(SGTconst [c] (MOVBreg _)) && c <= -0x80 => (MOVVconst [0])
+(SGTconst [c] (MOVBUreg _)) && 0xff < c => (MOVVconst [1])
+(SGTconst [c] (MOVBUreg _)) && c < 0 => (MOVVconst [0])
+(SGTUconst [c] (MOVBUreg _)) && 0xff < uint64(c) => (MOVVconst [1])
+(SGTconst [c] (MOVHreg _)) && 0x7fff < c => (MOVVconst [1])
+(SGTconst [c] (MOVHreg _)) && c <= -0x8000 => (MOVVconst [0])
+(SGTconst [c] (MOVHUreg _)) && 0xffff < c => (MOVVconst [1])
+(SGTconst [c] (MOVHUreg _)) && c < 0 => (MOVVconst [0])
+(SGTUconst [c] (MOVHUreg _)) && 0xffff < uint64(c) => (MOVVconst [1])
+(SGTconst [c] (MOVWUreg _)) && c < 0 => (MOVVconst [0])
+(SGTconst [c] (ANDconst [m] _)) && 0 <= m && m < c => (MOVVconst [1])
+(SGTUconst [c] (ANDconst [m] _)) && uint64(m) < uint64(c) => (MOVVconst [1])
+(SGTconst [c] (SRLVconst _ [d])) && 0 <= c && 0 < d && d <= 63 && 0xffffffffffffffff>>uint64(d) < uint64(c) => (MOVVconst [1])
+(SGTUconst [c] (SRLVconst _ [d])) && 0 < d && d <= 63 && 0xffffffffffffffff>>uint64(d) < uint64(c) => (MOVVconst [1])
+
+// absorb constants into branches
+(EQ (MOVVconst [0]) yes no) => (First yes no)
+(EQ (MOVVconst [c]) yes no) && c != 0 => (First no yes)
+(NE (MOVVconst [0]) yes no) => (First no yes)
+(NE (MOVVconst [c]) yes no) && c != 0 => (First yes no)
+(LTZ (MOVVconst [c]) yes no) && c < 0 => (First yes no)
+(LTZ (MOVVconst [c]) yes no) && c >= 0 => (First no yes)
+(LEZ (MOVVconst [c]) yes no) && c <= 0 => (First yes no)
+(LEZ (MOVVconst [c]) yes no) && c > 0 => (First no yes)
+(GTZ (MOVVconst [c]) yes no) && c > 0 => (First yes no)
+(GTZ (MOVVconst [c]) yes no) && c <= 0 => (First no yes)
+(GEZ (MOVVconst [c]) yes no) && c >= 0 => (First yes no)
+(GEZ (MOVVconst [c]) yes no) && c < 0 => (First no yes)
+
+// fold readonly sym load
+(MOVBload [off] {sym} (SB) _) && symIsRO(sym) => (MOVVconst [int64(read8(sym, int64(off)))])
+(MOVHload [off] {sym} (SB) _) && symIsRO(sym) => (MOVVconst [int64(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))])
+(MOVWload [off] {sym} (SB) _) && symIsRO(sym) => (MOVVconst [int64(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))])
+(MOVVload [off] {sym} (SB) _) && symIsRO(sym) => (MOVVconst [int64(read64(sym, int64(off), config.ctxt.Arch.ByteOrder))])
diff --git a/src/cmd/compile/internal/ssa/gen/MIPS64Ops.go b/src/cmd/compile/internal/ssa/gen/MIPS64Ops.go
new file mode 100644
index 0000000..7b18c42
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/gen/MIPS64Ops.go
@@ -0,0 +1,485 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build ignore
+// +build ignore
+
+package main
+
+import "strings"
+
+// Notes:
+// - Integer types live in the low portion of registers. Upper portions are junk.
+// - Boolean types use the low-order byte of a register. 0=false, 1=true.
+// Upper bytes are junk.
+// - *const instructions may use a constant larger than the instruction can encode.
+// In this case the assembler expands to multiple instructions and uses tmp
+// register (R23).
+
+// Suffixes encode the bit width of various instructions.
+// V (vlong) = 64 bit
+// WU (word) = 32 bit unsigned
+// W (word) = 32 bit
+// H (half word) = 16 bit
+// HU = 16 bit unsigned
+// B (byte) = 8 bit
+// BU = 8 bit unsigned
+// F (float) = 32 bit float
+// D (double) = 64 bit float
+
+// Note: registers not used in regalloc are not included in this list,
+// so that regmask stays within int64
+// Be careful when hand coding regmasks.
+var regNamesMIPS64 = []string{
+ "R0", // constant 0
+ "R1",
+ "R2",
+ "R3",
+ "R4",
+ "R5",
+ "R6",
+ "R7",
+ "R8",
+ "R9",
+ "R10",
+ "R11",
+ "R12",
+ "R13",
+ "R14",
+ "R15",
+ "R16",
+ "R17",
+ "R18",
+ "R19",
+ "R20",
+ "R21",
+ "R22",
+ // R23 = REGTMP not used in regalloc
+ "R24",
+ "R25",
+ // R26 reserved by kernel
+ // R27 reserved by kernel
+ // R28 = REGSB not used in regalloc
+ "SP", // aka R29
+ "g", // aka R30
+ "R31", // aka REGLINK
+
+ "F0",
+ "F1",
+ "F2",
+ "F3",
+ "F4",
+ "F5",
+ "F6",
+ "F7",
+ "F8",
+ "F9",
+ "F10",
+ "F11",
+ "F12",
+ "F13",
+ "F14",
+ "F15",
+ "F16",
+ "F17",
+ "F18",
+ "F19",
+ "F20",
+ "F21",
+ "F22",
+ "F23",
+ "F24",
+ "F25",
+ "F26",
+ "F27",
+ "F28",
+ "F29",
+ "F30",
+ "F31",
+
+ "HI", // high bits of multiplication
+ "LO", // low bits of multiplication
+
+ // If you add registers, update asyncPreempt in runtime.
+
+ // pseudo-registers
+ "SB",
+}
+
+func init() {
+ // Make map from reg names to reg integers.
+ if len(regNamesMIPS64) > 64 {
+ panic("too many registers")
+ }
+ num := map[string]int{}
+ for i, name := range regNamesMIPS64 {
+ num[name] = i
+ }
+ buildReg := func(s string) regMask {
+ m := regMask(0)
+ for _, r := range strings.Split(s, " ") {
+ if n, ok := num[r]; ok {
+ m |= regMask(1) << uint(n)
+ continue
+ }
+ panic("register " + r + " not found")
+ }
+ return m
+ }
+
+ // Common individual register masks
+ var (
+ gp = buildReg("R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31")
+ gpg = gp | buildReg("g")
+ gpsp = gp | buildReg("SP")
+ gpspg = gpg | buildReg("SP")
+ gpspsbg = gpspg | buildReg("SB")
+ fp = buildReg("F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31")
+ lo = buildReg("LO")
+ hi = buildReg("HI")
+ callerSave = gp | fp | lo | hi | buildReg("g") // runtime.setg (and anything calling it) may clobber g
+ r1 = buildReg("R1")
+ r2 = buildReg("R2")
+ r3 = buildReg("R3")
+ r4 = buildReg("R4")
+ )
+ // Common regInfo
+ var (
+ gp01 = regInfo{inputs: nil, outputs: []regMask{gp}}
+ gp11 = regInfo{inputs: []regMask{gpg}, outputs: []regMask{gp}}
+ gp11sp = regInfo{inputs: []regMask{gpspg}, outputs: []regMask{gp}}
+ gp21 = regInfo{inputs: []regMask{gpg, gpg}, outputs: []regMask{gp}}
+ gp2hilo = regInfo{inputs: []regMask{gpg, gpg}, outputs: []regMask{hi, lo}}
+ gpload = regInfo{inputs: []regMask{gpspsbg}, outputs: []regMask{gp}}
+ gpstore = regInfo{inputs: []regMask{gpspsbg, gpg}}
+ gpstore0 = regInfo{inputs: []regMask{gpspsbg}}
+ gpxchg = regInfo{inputs: []regMask{gpspsbg, gpg}, outputs: []regMask{gp}}
+ gpcas = regInfo{inputs: []regMask{gpspsbg, gpg, gpg}, outputs: []regMask{gp}}
+ fp01 = regInfo{inputs: nil, outputs: []regMask{fp}}
+ fp11 = regInfo{inputs: []regMask{fp}, outputs: []regMask{fp}}
+ //fp1flags = regInfo{inputs: []regMask{fp}}
+ //fpgp = regInfo{inputs: []regMask{fp}, outputs: []regMask{gp}}
+ //gpfp = regInfo{inputs: []regMask{gp}, outputs: []regMask{fp}}
+ fp21 = regInfo{inputs: []regMask{fp, fp}, outputs: []regMask{fp}}
+ fp2flags = regInfo{inputs: []regMask{fp, fp}}
+ fpload = regInfo{inputs: []regMask{gpspsbg}, outputs: []regMask{fp}}
+ fpstore = regInfo{inputs: []regMask{gpspsbg, fp}}
+ readflags = regInfo{inputs: nil, outputs: []regMask{gp}}
+ )
+ ops := []opData{
+ // binary ops
+ {name: "ADDV", argLength: 2, reg: gp21, asm: "ADDVU", commutative: true}, // arg0 + arg1
+ {name: "ADDVconst", argLength: 1, reg: gp11sp, asm: "ADDVU", aux: "Int64"}, // arg0 + auxInt. auxInt is 32-bit, also in other *const ops.
+ {name: "SUBV", argLength: 2, reg: gp21, asm: "SUBVU"}, // arg0 - arg1
+ {name: "SUBVconst", argLength: 1, reg: gp11, asm: "SUBVU", aux: "Int64"}, // arg0 - auxInt
+ {name: "MULV", argLength: 2, reg: gp2hilo, asm: "MULV", commutative: true, typ: "(Int64,Int64)"}, // arg0 * arg1, signed, results hi,lo
+ {name: "MULVU", argLength: 2, reg: gp2hilo, asm: "MULVU", commutative: true, typ: "(UInt64,UInt64)"}, // arg0 * arg1, unsigned, results hi,lo
+ {name: "DIVV", argLength: 2, reg: gp2hilo, asm: "DIVV", typ: "(Int64,Int64)"}, // arg0 / arg1, signed, results hi=arg0%arg1,lo=arg0/arg1
+ {name: "DIVVU", argLength: 2, reg: gp2hilo, asm: "DIVVU", typ: "(UInt64,UInt64)"}, // arg0 / arg1, signed, results hi=arg0%arg1,lo=arg0/arg1
+
+ {name: "ADDF", argLength: 2, reg: fp21, asm: "ADDF", commutative: true}, // arg0 + arg1
+ {name: "ADDD", argLength: 2, reg: fp21, asm: "ADDD", commutative: true}, // arg0 + arg1
+ {name: "SUBF", argLength: 2, reg: fp21, asm: "SUBF"}, // arg0 - arg1
+ {name: "SUBD", argLength: 2, reg: fp21, asm: "SUBD"}, // arg0 - arg1
+ {name: "MULF", argLength: 2, reg: fp21, asm: "MULF", commutative: true}, // arg0 * arg1
+ {name: "MULD", argLength: 2, reg: fp21, asm: "MULD", commutative: true}, // arg0 * arg1
+ {name: "DIVF", argLength: 2, reg: fp21, asm: "DIVF"}, // arg0 / arg1
+ {name: "DIVD", argLength: 2, reg: fp21, asm: "DIVD"}, // arg0 / arg1
+
+ {name: "AND", argLength: 2, reg: gp21, asm: "AND", commutative: true}, // arg0 & arg1
+ {name: "ANDconst", argLength: 1, reg: gp11, asm: "AND", aux: "Int64"}, // arg0 & auxInt
+ {name: "OR", argLength: 2, reg: gp21, asm: "OR", commutative: true}, // arg0 | arg1
+ {name: "ORconst", argLength: 1, reg: gp11, asm: "OR", aux: "Int64"}, // arg0 | auxInt
+ {name: "XOR", argLength: 2, reg: gp21, asm: "XOR", commutative: true, typ: "UInt64"}, // arg0 ^ arg1
+ {name: "XORconst", argLength: 1, reg: gp11, asm: "XOR", aux: "Int64", typ: "UInt64"}, // arg0 ^ auxInt
+ {name: "NOR", argLength: 2, reg: gp21, asm: "NOR", commutative: true}, // ^(arg0 | arg1)
+ {name: "NORconst", argLength: 1, reg: gp11, asm: "NOR", aux: "Int64"}, // ^(arg0 | auxInt)
+
+ {name: "NEGV", argLength: 1, reg: gp11}, // -arg0
+ {name: "NEGF", argLength: 1, reg: fp11, asm: "NEGF"}, // -arg0, float32
+ {name: "NEGD", argLength: 1, reg: fp11, asm: "NEGD"}, // -arg0, float64
+ {name: "SQRTD", argLength: 1, reg: fp11, asm: "SQRTD"}, // sqrt(arg0), float64
+ {name: "SQRTF", argLength: 1, reg: fp11, asm: "SQRTF"}, // sqrt(arg0), float32
+
+ // shifts
+ {name: "SLLV", argLength: 2, reg: gp21, asm: "SLLV"}, // arg0 << arg1, shift amount is mod 64
+ {name: "SLLVconst", argLength: 1, reg: gp11, asm: "SLLV", aux: "Int64"}, // arg0 << auxInt
+ {name: "SRLV", argLength: 2, reg: gp21, asm: "SRLV"}, // arg0 >> arg1, unsigned, shift amount is mod 64
+ {name: "SRLVconst", argLength: 1, reg: gp11, asm: "SRLV", aux: "Int64"}, // arg0 >> auxInt, unsigned
+ {name: "SRAV", argLength: 2, reg: gp21, asm: "SRAV"}, // arg0 >> arg1, signed, shift amount is mod 64
+ {name: "SRAVconst", argLength: 1, reg: gp11, asm: "SRAV", aux: "Int64"}, // arg0 >> auxInt, signed
+
+ // comparisons
+ {name: "SGT", argLength: 2, reg: gp21, asm: "SGT", typ: "Bool"}, // 1 if arg0 > arg1 (signed), 0 otherwise
+ {name: "SGTconst", argLength: 1, reg: gp11, asm: "SGT", aux: "Int64", typ: "Bool"}, // 1 if auxInt > arg0 (signed), 0 otherwise
+ {name: "SGTU", argLength: 2, reg: gp21, asm: "SGTU", typ: "Bool"}, // 1 if arg0 > arg1 (unsigned), 0 otherwise
+ {name: "SGTUconst", argLength: 1, reg: gp11, asm: "SGTU", aux: "Int64", typ: "Bool"}, // 1 if auxInt > arg0 (unsigned), 0 otherwise
+
+ {name: "CMPEQF", argLength: 2, reg: fp2flags, asm: "CMPEQF", typ: "Flags"}, // flags=true if arg0 = arg1, float32
+ {name: "CMPEQD", argLength: 2, reg: fp2flags, asm: "CMPEQD", typ: "Flags"}, // flags=true if arg0 = arg1, float64
+ {name: "CMPGEF", argLength: 2, reg: fp2flags, asm: "CMPGEF", typ: "Flags"}, // flags=true if arg0 >= arg1, float32
+ {name: "CMPGED", argLength: 2, reg: fp2flags, asm: "CMPGED", typ: "Flags"}, // flags=true if arg0 >= arg1, float64
+ {name: "CMPGTF", argLength: 2, reg: fp2flags, asm: "CMPGTF", typ: "Flags"}, // flags=true if arg0 > arg1, float32
+ {name: "CMPGTD", argLength: 2, reg: fp2flags, asm: "CMPGTD", typ: "Flags"}, // flags=true if arg0 > arg1, float64
+
+ // moves
+ {name: "MOVVconst", argLength: 0, reg: gp01, aux: "Int64", asm: "MOVV", typ: "UInt64", rematerializeable: true}, // auxint
+ {name: "MOVFconst", argLength: 0, reg: fp01, aux: "Float64", asm: "MOVF", typ: "Float32", rematerializeable: true}, // auxint as 64-bit float, convert to 32-bit float
+ {name: "MOVDconst", argLength: 0, reg: fp01, aux: "Float64", asm: "MOVD", typ: "Float64", rematerializeable: true}, // auxint as 64-bit float
+
+ {name: "MOVVaddr", argLength: 1, reg: regInfo{inputs: []regMask{buildReg("SP") | buildReg("SB")}, outputs: []regMask{gp}}, aux: "SymOff", asm: "MOVV", rematerializeable: true, symEffect: "Addr"}, // arg0 + auxInt + aux.(*gc.Sym), arg0=SP/SB
+
+ {name: "MOVBload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVB", typ: "Int8", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVBUload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVBU", typ: "UInt8", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVHload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVH", typ: "Int16", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVHUload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVHU", typ: "UInt16", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVWload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVW", typ: "Int32", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVWUload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVWU", typ: "UInt32", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVVload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVV", typ: "UInt64", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVFload", argLength: 2, reg: fpload, aux: "SymOff", asm: "MOVF", typ: "Float32", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVDload", argLength: 2, reg: fpload, aux: "SymOff", asm: "MOVD", typ: "Float64", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+
+ {name: "MOVBstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVB", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 1 byte of arg1 to arg0 + auxInt + aux. arg2=mem.
+ {name: "MOVHstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVH", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 2 bytes of arg1 to arg0 + auxInt + aux. arg2=mem.
+ {name: "MOVWstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVW", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 4 bytes of arg1 to arg0 + auxInt + aux. arg2=mem.
+ {name: "MOVVstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVV", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 8 bytes of arg1 to arg0 + auxInt + aux. arg2=mem.
+ {name: "MOVFstore", argLength: 3, reg: fpstore, aux: "SymOff", asm: "MOVF", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 4 bytes of arg1 to arg0 + auxInt + aux. arg2=mem.
+ {name: "MOVDstore", argLength: 3, reg: fpstore, aux: "SymOff", asm: "MOVD", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 8 bytes of arg1 to arg0 + auxInt + aux. arg2=mem.
+
+ {name: "MOVBstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVB", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 1 byte of zero to arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVHstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVH", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 2 bytes of zero to arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVWstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVW", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 4 bytes of zero to arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVVstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVV", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 8 bytes of zero to arg0 + auxInt + aux. ar12=mem.
+
+ // conversions
+ {name: "MOVBreg", argLength: 1, reg: gp11, asm: "MOVB"}, // move from arg0, sign-extended from byte
+ {name: "MOVBUreg", argLength: 1, reg: gp11, asm: "MOVBU"}, // move from arg0, unsign-extended from byte
+ {name: "MOVHreg", argLength: 1, reg: gp11, asm: "MOVH"}, // move from arg0, sign-extended from half
+ {name: "MOVHUreg", argLength: 1, reg: gp11, asm: "MOVHU"}, // move from arg0, unsign-extended from half
+ {name: "MOVWreg", argLength: 1, reg: gp11, asm: "MOVW"}, // move from arg0, sign-extended from word
+ {name: "MOVWUreg", argLength: 1, reg: gp11, asm: "MOVWU"}, // move from arg0, unsign-extended from word
+ {name: "MOVVreg", argLength: 1, reg: gp11, asm: "MOVV"}, // move from arg0
+
+ {name: "MOVVnop", argLength: 1, reg: regInfo{inputs: []regMask{gp}, outputs: []regMask{gp}}, resultInArg0: true}, // nop, return arg0 in same register
+
+ {name: "MOVWF", argLength: 1, reg: fp11, asm: "MOVWF"}, // int32 -> float32
+ {name: "MOVWD", argLength: 1, reg: fp11, asm: "MOVWD"}, // int32 -> float64
+ {name: "MOVVF", argLength: 1, reg: fp11, asm: "MOVVF"}, // int64 -> float32
+ {name: "MOVVD", argLength: 1, reg: fp11, asm: "MOVVD"}, // int64 -> float64
+ {name: "TRUNCFW", argLength: 1, reg: fp11, asm: "TRUNCFW"}, // float32 -> int32
+ {name: "TRUNCDW", argLength: 1, reg: fp11, asm: "TRUNCDW"}, // float64 -> int32
+ {name: "TRUNCFV", argLength: 1, reg: fp11, asm: "TRUNCFV"}, // float32 -> int64
+ {name: "TRUNCDV", argLength: 1, reg: fp11, asm: "TRUNCDV"}, // float64 -> int64
+ {name: "MOVFD", argLength: 1, reg: fp11, asm: "MOVFD"}, // float32 -> float64
+ {name: "MOVDF", argLength: 1, reg: fp11, asm: "MOVDF"}, // float64 -> float32
+
+ // function calls
+ {name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem
+ {name: "CALLtail", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true, tailCall: true}, // tail call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem
+ {name: "CALLclosure", argLength: 3, reg: regInfo{inputs: []regMask{gpsp, buildReg("R22"), 0}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem
+ {name: "CALLinter", argLength: 2, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem
+
+ // duffzero
+ // arg0 = address of memory to zero
+ // arg1 = mem
+ // auxint = offset into duffzero code to start executing
+ // returns mem
+ // R1 aka mips.REGRT1 changed as side effect
+ {
+ name: "DUFFZERO",
+ aux: "Int64",
+ argLength: 2,
+ reg: regInfo{
+ inputs: []regMask{gp},
+ clobbers: buildReg("R1 R31"),
+ },
+ faultOnNilArg0: true,
+ },
+
+ // duffcopy
+ // arg0 = address of dst memory (in R2, changed as side effect)
+ // arg1 = address of src memory (in R1, changed as side effect)
+ // arg2 = mem
+ // auxint = offset into duffcopy code to start executing
+ // returns mem
+ {
+ name: "DUFFCOPY",
+ aux: "Int64",
+ argLength: 3,
+ reg: regInfo{
+ inputs: []regMask{buildReg("R2"), buildReg("R1")},
+ clobbers: buildReg("R1 R2 R31"),
+ },
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ },
+
+ // large or unaligned zeroing
+ // arg0 = address of memory to zero (in R1, changed as side effect)
+ // arg1 = address of the last element to zero
+ // arg2 = mem
+ // auxint = alignment
+ // returns mem
+ // SUBV $8, R1
+ // MOVV R0, 8(R1)
+ // ADDV $8, R1
+ // BNE Rarg1, R1, -2(PC)
+ {
+ name: "LoweredZero",
+ aux: "Int64",
+ argLength: 3,
+ reg: regInfo{
+ inputs: []regMask{buildReg("R1"), gp},
+ clobbers: buildReg("R1"),
+ },
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ },
+
+ // large or unaligned move
+ // arg0 = address of dst memory (in R2, changed as side effect)
+ // arg1 = address of src memory (in R1, changed as side effect)
+ // arg2 = address of the last element of src
+ // arg3 = mem
+ // auxint = alignment
+ // returns mem
+ // SUBV $8, R1
+ // MOVV 8(R1), Rtmp
+ // MOVV Rtmp, (R2)
+ // ADDV $8, R1
+ // ADDV $8, R2
+ // BNE Rarg2, R1, -4(PC)
+ {
+ name: "LoweredMove",
+ aux: "Int64",
+ argLength: 4,
+ reg: regInfo{
+ inputs: []regMask{buildReg("R2"), buildReg("R1"), gp},
+ clobbers: buildReg("R1 R2"),
+ },
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ },
+
+ // atomic loads.
+ // load from arg0. arg1=mem.
+ // returns <value,memory> so they can be properly ordered with other loads.
+ {name: "LoweredAtomicLoad8", argLength: 2, reg: gpload, faultOnNilArg0: true},
+ {name: "LoweredAtomicLoad32", argLength: 2, reg: gpload, faultOnNilArg0: true},
+ {name: "LoweredAtomicLoad64", argLength: 2, reg: gpload, faultOnNilArg0: true},
+
+ // atomic stores.
+ // store arg1 to arg0. arg2=mem. returns memory.
+ {name: "LoweredAtomicStore8", argLength: 3, reg: gpstore, faultOnNilArg0: true, hasSideEffects: true},
+ {name: "LoweredAtomicStore32", argLength: 3, reg: gpstore, faultOnNilArg0: true, hasSideEffects: true},
+ {name: "LoweredAtomicStore64", argLength: 3, reg: gpstore, faultOnNilArg0: true, hasSideEffects: true},
+ // store zero to arg0. arg1=mem. returns memory.
+ {name: "LoweredAtomicStorezero32", argLength: 2, reg: gpstore0, faultOnNilArg0: true, hasSideEffects: true},
+ {name: "LoweredAtomicStorezero64", argLength: 2, reg: gpstore0, faultOnNilArg0: true, hasSideEffects: true},
+
+ // atomic exchange.
+ // store arg1 to arg0. arg2=mem. returns <old content of *arg0, memory>.
+ // SYNC
+ // LL (Rarg0), Rout
+ // MOVV Rarg1, Rtmp
+ // SC Rtmp, (Rarg0)
+ // BEQ Rtmp, -3(PC)
+ // SYNC
+ {name: "LoweredAtomicExchange32", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+ {name: "LoweredAtomicExchange64", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+
+ // atomic add.
+ // *arg0 += arg1. arg2=mem. returns <new content of *arg0, memory>.
+ // SYNC
+ // LL (Rarg0), Rout
+ // ADDV Rarg1, Rout, Rtmp
+ // SC Rtmp, (Rarg0)
+ // BEQ Rtmp, -3(PC)
+ // SYNC
+ // ADDV Rarg1, Rout
+ {name: "LoweredAtomicAdd32", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+ {name: "LoweredAtomicAdd64", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+ // *arg0 += auxint. arg1=mem. returns <new content of *arg0, memory>. auxint is 32-bit.
+ {name: "LoweredAtomicAddconst32", argLength: 2, reg: regInfo{inputs: []regMask{gpspsbg}, outputs: []regMask{gp}}, aux: "Int32", resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+ {name: "LoweredAtomicAddconst64", argLength: 2, reg: regInfo{inputs: []regMask{gpspsbg}, outputs: []regMask{gp}}, aux: "Int64", resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+
+ // atomic compare and swap.
+ // arg0 = pointer, arg1 = old value, arg2 = new value, arg3 = memory.
+ // if *arg0 == arg1 {
+ // *arg0 = arg2
+ // return (true, memory)
+ // } else {
+ // return (false, memory)
+ // }
+ // SYNC
+ // MOVV $0, Rout
+ // LL (Rarg0), Rtmp
+ // BNE Rtmp, Rarg1, 4(PC)
+ // MOVV Rarg2, Rout
+ // SC Rout, (Rarg0)
+ // BEQ Rout, -4(PC)
+ // SYNC
+ {name: "LoweredAtomicCas32", argLength: 4, reg: gpcas, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+ {name: "LoweredAtomicCas64", argLength: 4, reg: gpcas, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+
+ // pseudo-ops
+ {name: "LoweredNilCheck", argLength: 2, reg: regInfo{inputs: []regMask{gpg}}, nilCheck: true, faultOnNilArg0: true}, // panic if arg0 is nil. arg1=mem.
+
+ {name: "FPFlagTrue", argLength: 1, reg: readflags}, // bool, true if FP flag is true
+ {name: "FPFlagFalse", argLength: 1, reg: readflags}, // bool, true if FP flag is false
+
+ // Scheduler ensures LoweredGetClosurePtr occurs only in entry block,
+ // and sorts it to the very beginning of the block to prevent other
+ // use of R22 (mips.REGCTXT, the closure pointer)
+ {name: "LoweredGetClosurePtr", reg: regInfo{outputs: []regMask{buildReg("R22")}}, zeroWidth: true},
+
+ // LoweredGetCallerSP returns the SP of the caller of the current function.
+ {name: "LoweredGetCallerSP", reg: gp01, rematerializeable: true},
+
+ // LoweredGetCallerPC evaluates to the PC to which its "caller" will return.
+ // I.e., if f calls g "calls" getcallerpc,
+ // the result should be the PC within f that g will return to.
+ // See runtime/stubs.go for a more detailed discussion.
+ {name: "LoweredGetCallerPC", reg: gp01, rematerializeable: true},
+
+ // LoweredWB invokes runtime.gcWriteBarrier. arg0=destptr, arg1=srcptr, arg2=mem, aux=runtime.gcWriteBarrier
+ // It saves all GP registers if necessary,
+ // but clobbers R31 (LR) because it's a call
+ // and R23 (REGTMP).
+ {name: "LoweredWB", argLength: 3, reg: regInfo{inputs: []regMask{buildReg("R20"), buildReg("R21")}, clobbers: (callerSave &^ gpg) | buildReg("R31")}, clobberFlags: true, aux: "Sym", symEffect: "None"},
+
+ // There are three of these functions so that they can have three different register inputs.
+ // When we check 0 <= c <= cap (A), then 0 <= b <= c (B), then 0 <= a <= b (C), we want the
+ // default registers to match so we don't need to copy registers around unnecessarily.
+ {name: "LoweredPanicBoundsA", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r3, r4}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go).
+ {name: "LoweredPanicBoundsB", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r2, r3}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go).
+ {name: "LoweredPanicBoundsC", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r1, r2}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go).
+ }
+
+ blocks := []blockData{
+ {name: "EQ", controls: 1},
+ {name: "NE", controls: 1},
+ {name: "LTZ", controls: 1}, // < 0
+ {name: "LEZ", controls: 1}, // <= 0
+ {name: "GTZ", controls: 1}, // > 0
+ {name: "GEZ", controls: 1}, // >= 0
+ {name: "FPT", controls: 1}, // FP flag is true
+ {name: "FPF", controls: 1}, // FP flag is false
+ }
+
+ archs = append(archs, arch{
+ name: "MIPS64",
+ pkg: "cmd/internal/obj/mips",
+ genfile: "../../mips64/ssa.go",
+ ops: ops,
+ blocks: blocks,
+ regnames: regNamesMIPS64,
+ gpregmask: gp,
+ fpregmask: fp,
+ specialregmask: hi | lo,
+ framepointerreg: -1, // not used
+ linkreg: int8(num["R31"]),
+ })
+}
diff --git a/src/cmd/compile/internal/ssa/gen/MIPSOps.go b/src/cmd/compile/internal/ssa/gen/MIPSOps.go
new file mode 100644
index 0000000..523847b
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/gen/MIPSOps.go
@@ -0,0 +1,442 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build ignore
+// +build ignore
+
+package main
+
+import "strings"
+
+// Notes:
+// - Integer types live in the low portion of registers. Upper portions are junk.
+// - Boolean types use the low-order byte of a register. 0=false, 1=true.
+// Upper bytes are junk.
+// - Unused portions of AuxInt are filled by sign-extending the used portion.
+// - *const instructions may use a constant larger than the instruction can encode.
+// In this case the assembler expands to multiple instructions and uses tmp
+// register (R23).
+
+// Suffixes encode the bit width of various instructions.
+// W (word) = 32 bit
+// H (half word) = 16 bit
+// HU = 16 bit unsigned
+// B (byte) = 8 bit
+// BU = 8 bit unsigned
+// F (float) = 32 bit float
+// D (double) = 64 bit float
+
+// Note: registers not used in regalloc are not included in this list,
+// so that regmask stays within int64
+// Be careful when hand coding regmasks.
+var regNamesMIPS = []string{
+ "R0", // constant 0
+ "R1",
+ "R2",
+ "R3",
+ "R4",
+ "R5",
+ "R6",
+ "R7",
+ "R8",
+ "R9",
+ "R10",
+ "R11",
+ "R12",
+ "R13",
+ "R14",
+ "R15",
+ "R16",
+ "R17",
+ "R18",
+ "R19",
+ "R20",
+ "R21",
+ "R22",
+ //REGTMP
+ "R24",
+ "R25",
+ // R26 reserved by kernel
+ // R27 reserved by kernel
+ "R28",
+ "SP", // aka R29
+ "g", // aka R30
+ "R31", // REGLINK
+
+ // odd FP registers contain high parts of 64-bit FP values
+ "F0",
+ "F2",
+ "F4",
+ "F6",
+ "F8",
+ "F10",
+ "F12",
+ "F14",
+ "F16",
+ "F18",
+ "F20",
+ "F22",
+ "F24",
+ "F26",
+ "F28",
+ "F30",
+
+ "HI", // high bits of multiplication
+ "LO", // low bits of multiplication
+
+ // If you add registers, update asyncPreempt in runtime.
+
+ // pseudo-registers
+ "SB",
+}
+
+func init() {
+ // Make map from reg names to reg integers.
+ if len(regNamesMIPS) > 64 {
+ panic("too many registers")
+ }
+ num := map[string]int{}
+ for i, name := range regNamesMIPS {
+ num[name] = i
+ }
+ buildReg := func(s string) regMask {
+ m := regMask(0)
+ for _, r := range strings.Split(s, " ") {
+ if n, ok := num[r]; ok {
+ m |= regMask(1) << uint(n)
+ continue
+ }
+ panic("register " + r + " not found")
+ }
+ return m
+ }
+
+ // Common individual register masks
+ var (
+ gp = buildReg("R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31")
+ gpg = gp | buildReg("g")
+ gpsp = gp | buildReg("SP")
+ gpspg = gpg | buildReg("SP")
+ gpspsbg = gpspg | buildReg("SB")
+ fp = buildReg("F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30")
+ lo = buildReg("LO")
+ hi = buildReg("HI")
+ callerSave = gp | fp | lo | hi | buildReg("g") // runtime.setg (and anything calling it) may clobber g
+ r1 = buildReg("R1")
+ r2 = buildReg("R2")
+ r3 = buildReg("R3")
+ r4 = buildReg("R4")
+ r5 = buildReg("R5")
+ )
+ // Common regInfo
+ var (
+ gp01 = regInfo{inputs: nil, outputs: []regMask{gp}}
+ gp11 = regInfo{inputs: []regMask{gpg}, outputs: []regMask{gp}}
+ gp11sp = regInfo{inputs: []regMask{gpspg}, outputs: []regMask{gp}}
+ gp21 = regInfo{inputs: []regMask{gpg, gpg}, outputs: []regMask{gp}}
+ gp31 = regInfo{inputs: []regMask{gp, gp, gp}, outputs: []regMask{gp}}
+ gp2hilo = regInfo{inputs: []regMask{gpg, gpg}, outputs: []regMask{hi, lo}}
+ gpload = regInfo{inputs: []regMask{gpspsbg}, outputs: []regMask{gp}}
+ gpstore = regInfo{inputs: []regMask{gpspsbg, gpg}}
+ gpxchg = regInfo{inputs: []regMask{gpspsbg, gpg}, outputs: []regMask{gp}}
+ gpcas = regInfo{inputs: []regMask{gpspsbg, gpg, gpg}, outputs: []regMask{gp}}
+ gpstore0 = regInfo{inputs: []regMask{gpspsbg}}
+ fp01 = regInfo{inputs: nil, outputs: []regMask{fp}}
+ fp11 = regInfo{inputs: []regMask{fp}, outputs: []regMask{fp}}
+ fp21 = regInfo{inputs: []regMask{fp, fp}, outputs: []regMask{fp}}
+ fp2flags = regInfo{inputs: []regMask{fp, fp}}
+ fpload = regInfo{inputs: []regMask{gpspsbg}, outputs: []regMask{fp}}
+ fpstore = regInfo{inputs: []regMask{gpspsbg, fp}}
+ readflags = regInfo{inputs: nil, outputs: []regMask{gp}}
+ )
+ ops := []opData{
+ {name: "ADD", argLength: 2, reg: gp21, asm: "ADDU", commutative: true}, // arg0 + arg1
+ {name: "ADDconst", argLength: 1, reg: gp11sp, asm: "ADDU", aux: "Int32"}, // arg0 + auxInt
+ {name: "SUB", argLength: 2, reg: gp21, asm: "SUBU"}, // arg0 - arg1
+ {name: "SUBconst", argLength: 1, reg: gp11, asm: "SUBU", aux: "Int32"}, // arg0 - auxInt
+ {name: "MUL", argLength: 2, reg: regInfo{inputs: []regMask{gpg, gpg}, outputs: []regMask{gp}, clobbers: hi | lo}, asm: "MUL", commutative: true}, // arg0 * arg1
+ {name: "MULT", argLength: 2, reg: gp2hilo, asm: "MUL", commutative: true, typ: "(Int32,Int32)"}, // arg0 * arg1, signed, results hi,lo
+ {name: "MULTU", argLength: 2, reg: gp2hilo, asm: "MULU", commutative: true, typ: "(UInt32,UInt32)"}, // arg0 * arg1, unsigned, results hi,lo
+ {name: "DIV", argLength: 2, reg: gp2hilo, asm: "DIV", typ: "(Int32,Int32)"}, // arg0 / arg1, signed, results hi=arg0%arg1,lo=arg0/arg1
+ {name: "DIVU", argLength: 2, reg: gp2hilo, asm: "DIVU", typ: "(UInt32,UInt32)"}, // arg0 / arg1, signed, results hi=arg0%arg1,lo=arg0/arg1
+
+ {name: "ADDF", argLength: 2, reg: fp21, asm: "ADDF", commutative: true}, // arg0 + arg1
+ {name: "ADDD", argLength: 2, reg: fp21, asm: "ADDD", commutative: true}, // arg0 + arg1
+ {name: "SUBF", argLength: 2, reg: fp21, asm: "SUBF"}, // arg0 - arg1
+ {name: "SUBD", argLength: 2, reg: fp21, asm: "SUBD"}, // arg0 - arg1
+ {name: "MULF", argLength: 2, reg: fp21, asm: "MULF", commutative: true}, // arg0 * arg1
+ {name: "MULD", argLength: 2, reg: fp21, asm: "MULD", commutative: true}, // arg0 * arg1
+ {name: "DIVF", argLength: 2, reg: fp21, asm: "DIVF"}, // arg0 / arg1
+ {name: "DIVD", argLength: 2, reg: fp21, asm: "DIVD"}, // arg0 / arg1
+
+ {name: "AND", argLength: 2, reg: gp21, asm: "AND", commutative: true}, // arg0 & arg1
+ {name: "ANDconst", argLength: 1, reg: gp11, asm: "AND", aux: "Int32"}, // arg0 & auxInt
+ {name: "OR", argLength: 2, reg: gp21, asm: "OR", commutative: true}, // arg0 | arg1
+ {name: "ORconst", argLength: 1, reg: gp11, asm: "OR", aux: "Int32"}, // arg0 | auxInt
+ {name: "XOR", argLength: 2, reg: gp21, asm: "XOR", commutative: true, typ: "UInt32"}, // arg0 ^ arg1
+ {name: "XORconst", argLength: 1, reg: gp11, asm: "XOR", aux: "Int32", typ: "UInt32"}, // arg0 ^ auxInt
+ {name: "NOR", argLength: 2, reg: gp21, asm: "NOR", commutative: true}, // ^(arg0 | arg1)
+ {name: "NORconst", argLength: 1, reg: gp11, asm: "NOR", aux: "Int32"}, // ^(arg0 | auxInt)
+
+ {name: "NEG", argLength: 1, reg: gp11}, // -arg0
+ {name: "NEGF", argLength: 1, reg: fp11, asm: "NEGF"}, // -arg0, float32
+ {name: "NEGD", argLength: 1, reg: fp11, asm: "NEGD"}, // -arg0, float64
+ {name: "SQRTD", argLength: 1, reg: fp11, asm: "SQRTD"}, // sqrt(arg0), float64
+ {name: "SQRTF", argLength: 1, reg: fp11, asm: "SQRTF"}, // sqrt(arg0), float32
+
+ // shifts
+ {name: "SLL", argLength: 2, reg: gp21, asm: "SLL"}, // arg0 << arg1, shift amount is mod 32
+ {name: "SLLconst", argLength: 1, reg: gp11, asm: "SLL", aux: "Int32"}, // arg0 << auxInt, shift amount must be 0 through 31 inclusive
+ {name: "SRL", argLength: 2, reg: gp21, asm: "SRL"}, // arg0 >> arg1, unsigned, shift amount is mod 32
+ {name: "SRLconst", argLength: 1, reg: gp11, asm: "SRL", aux: "Int32"}, // arg0 >> auxInt, shift amount must be 0 through 31 inclusive
+ {name: "SRA", argLength: 2, reg: gp21, asm: "SRA"}, // arg0 >> arg1, signed, shift amount is mod 32
+ {name: "SRAconst", argLength: 1, reg: gp11, asm: "SRA", aux: "Int32"}, // arg0 >> auxInt, signed, shift amount must be 0 through 31 inclusive
+
+ {name: "CLZ", argLength: 1, reg: gp11, asm: "CLZ"},
+
+ // comparisons
+ {name: "SGT", argLength: 2, reg: gp21, asm: "SGT", typ: "Bool"}, // 1 if arg0 > arg1 (signed), 0 otherwise
+ {name: "SGTconst", argLength: 1, reg: gp11, asm: "SGT", aux: "Int32", typ: "Bool"}, // 1 if auxInt > arg0 (signed), 0 otherwise
+ {name: "SGTzero", argLength: 1, reg: gp11, asm: "SGT", typ: "Bool"}, // 1 if arg0 > 0 (signed), 0 otherwise
+ {name: "SGTU", argLength: 2, reg: gp21, asm: "SGTU", typ: "Bool"}, // 1 if arg0 > arg1 (unsigned), 0 otherwise
+ {name: "SGTUconst", argLength: 1, reg: gp11, asm: "SGTU", aux: "Int32", typ: "Bool"}, // 1 if auxInt > arg0 (unsigned), 0 otherwise
+ {name: "SGTUzero", argLength: 1, reg: gp11, asm: "SGTU", typ: "Bool"}, // 1 if arg0 > 0 (unsigned), 0 otherwise
+
+ {name: "CMPEQF", argLength: 2, reg: fp2flags, asm: "CMPEQF", typ: "Flags"}, // flags=true if arg0 = arg1, float32
+ {name: "CMPEQD", argLength: 2, reg: fp2flags, asm: "CMPEQD", typ: "Flags"}, // flags=true if arg0 = arg1, float64
+ {name: "CMPGEF", argLength: 2, reg: fp2flags, asm: "CMPGEF", typ: "Flags"}, // flags=true if arg0 >= arg1, float32
+ {name: "CMPGED", argLength: 2, reg: fp2flags, asm: "CMPGED", typ: "Flags"}, // flags=true if arg0 >= arg1, float64
+ {name: "CMPGTF", argLength: 2, reg: fp2flags, asm: "CMPGTF", typ: "Flags"}, // flags=true if arg0 > arg1, float32
+ {name: "CMPGTD", argLength: 2, reg: fp2flags, asm: "CMPGTD", typ: "Flags"}, // flags=true if arg0 > arg1, float64
+
+ // moves
+ {name: "MOVWconst", argLength: 0, reg: gp01, aux: "Int32", asm: "MOVW", typ: "UInt32", rematerializeable: true}, // auxint
+ {name: "MOVFconst", argLength: 0, reg: fp01, aux: "Float32", asm: "MOVF", typ: "Float32", rematerializeable: true}, // auxint as 64-bit float, convert to 32-bit float
+ {name: "MOVDconst", argLength: 0, reg: fp01, aux: "Float64", asm: "MOVD", typ: "Float64", rematerializeable: true}, // auxint as 64-bit float
+
+ {name: "MOVWaddr", argLength: 1, reg: regInfo{inputs: []regMask{buildReg("SP") | buildReg("SB")}, outputs: []regMask{gp}}, aux: "SymOff", asm: "MOVW", rematerializeable: true, symEffect: "Addr"}, // arg0 + auxInt + aux.(*gc.Sym), arg0=SP/SB
+
+ {name: "MOVBload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVB", typ: "Int8", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVBUload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVBU", typ: "UInt8", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVHload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVH", typ: "Int16", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVHUload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVHU", typ: "UInt16", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVWload", argLength: 2, reg: gpload, aux: "SymOff", asm: "MOVW", typ: "UInt32", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVFload", argLength: 2, reg: fpload, aux: "SymOff", asm: "MOVF", typ: "Float32", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVDload", argLength: 2, reg: fpload, aux: "SymOff", asm: "MOVD", typ: "Float64", faultOnNilArg0: true, symEffect: "Read"}, // load from arg0 + auxInt + aux. arg1=mem.
+
+ {name: "MOVBstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVB", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 1 byte of arg1 to arg0 + auxInt + aux. arg2=mem.
+ {name: "MOVHstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVH", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 2 bytes of arg1 to arg0 + auxInt + aux. arg2=mem.
+ {name: "MOVWstore", argLength: 3, reg: gpstore, aux: "SymOff", asm: "MOVW", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 4 bytes of arg1 to arg0 + auxInt + aux. arg2=mem.
+ {name: "MOVFstore", argLength: 3, reg: fpstore, aux: "SymOff", asm: "MOVF", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 4 bytes of arg1 to arg0 + auxInt + aux. arg2=mem.
+ {name: "MOVDstore", argLength: 3, reg: fpstore, aux: "SymOff", asm: "MOVD", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 8 bytes of arg1 to arg0 + auxInt + aux. arg2=mem.
+
+ {name: "MOVBstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVB", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 1 byte of zero to arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVHstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVH", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 2 bytes of zero to arg0 + auxInt + aux. arg1=mem.
+ {name: "MOVWstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVW", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 4 bytes of zero to arg0 + auxInt + aux. arg1=mem.
+
+ // conversions
+ {name: "MOVBreg", argLength: 1, reg: gp11, asm: "MOVB"}, // move from arg0, sign-extended from byte
+ {name: "MOVBUreg", argLength: 1, reg: gp11, asm: "MOVBU"}, // move from arg0, unsign-extended from byte
+ {name: "MOVHreg", argLength: 1, reg: gp11, asm: "MOVH"}, // move from arg0, sign-extended from half
+ {name: "MOVHUreg", argLength: 1, reg: gp11, asm: "MOVHU"}, // move from arg0, unsign-extended from half
+ {name: "MOVWreg", argLength: 1, reg: gp11, asm: "MOVW"}, // move from arg0
+
+ {name: "MOVWnop", argLength: 1, reg: regInfo{inputs: []regMask{gp}, outputs: []regMask{gp}}, resultInArg0: true}, // nop, return arg0 in same register
+
+ // conditional move on zero (returns arg1 if arg2 is 0, otherwise arg0)
+ // order of parameters is reversed so we can use resultInArg0 (OpCMOVZ result arg1 arg2-> CMOVZ arg2reg, arg1reg, resultReg)
+ {name: "CMOVZ", argLength: 3, reg: gp31, asm: "CMOVZ", resultInArg0: true},
+ {name: "CMOVZzero", argLength: 2, reg: regInfo{inputs: []regMask{gp, gpg}, outputs: []regMask{gp}}, asm: "CMOVZ", resultInArg0: true},
+
+ {name: "MOVWF", argLength: 1, reg: fp11, asm: "MOVWF"}, // int32 -> float32
+ {name: "MOVWD", argLength: 1, reg: fp11, asm: "MOVWD"}, // int32 -> float64
+ {name: "TRUNCFW", argLength: 1, reg: fp11, asm: "TRUNCFW"}, // float32 -> int32
+ {name: "TRUNCDW", argLength: 1, reg: fp11, asm: "TRUNCDW"}, // float64 -> int32
+ {name: "MOVFD", argLength: 1, reg: fp11, asm: "MOVFD"}, // float32 -> float64
+ {name: "MOVDF", argLength: 1, reg: fp11, asm: "MOVDF"}, // float64 -> float32
+
+ // function calls
+ {name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem
+ {name: "CALLtail", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true, tailCall: true}, // tail call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem
+ {name: "CALLclosure", argLength: 3, reg: regInfo{inputs: []regMask{gpsp, buildReg("R22"), 0}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem
+ {name: "CALLinter", argLength: 2, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem
+
+ // atomic ops
+
+ // load from arg0. arg1=mem.
+ // returns <value,memory> so they can be properly ordered with other loads.
+ // SYNC
+ // MOV(B|W) (Rarg0), Rout
+ // SYNC
+ {name: "LoweredAtomicLoad8", argLength: 2, reg: gpload, faultOnNilArg0: true},
+ {name: "LoweredAtomicLoad32", argLength: 2, reg: gpload, faultOnNilArg0: true},
+
+ // store arg1 to arg0. arg2=mem. returns memory.
+ // SYNC
+ // MOV(B|W) Rarg1, (Rarg0)
+ // SYNC
+ {name: "LoweredAtomicStore8", argLength: 3, reg: gpstore, faultOnNilArg0: true, hasSideEffects: true},
+ {name: "LoweredAtomicStore32", argLength: 3, reg: gpstore, faultOnNilArg0: true, hasSideEffects: true},
+ {name: "LoweredAtomicStorezero", argLength: 2, reg: gpstore0, faultOnNilArg0: true, hasSideEffects: true},
+
+ // atomic exchange.
+ // store arg1 to arg0. arg2=mem. returns <old content of *arg0, memory>.
+ // SYNC
+ // LL (Rarg0), Rout
+ // MOVW Rarg1, Rtmp
+ // SC Rtmp, (Rarg0)
+ // BEQ Rtmp, -3(PC)
+ // SYNC
+ {name: "LoweredAtomicExchange", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+
+ // atomic add.
+ // *arg0 += arg1. arg2=mem. returns <new content of *arg0, memory>.
+ // SYNC
+ // LL (Rarg0), Rout
+ // ADDU Rarg1, Rout, Rtmp
+ // SC Rtmp, (Rarg0)
+ // BEQ Rtmp, -3(PC)
+ // SYNC
+ // ADDU Rarg1, Rout
+ {name: "LoweredAtomicAdd", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+ {name: "LoweredAtomicAddconst", argLength: 2, reg: regInfo{inputs: []regMask{gpspsbg}, outputs: []regMask{gp}}, aux: "Int32", resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+
+ // atomic compare and swap.
+ // arg0 = pointer, arg1 = old value, arg2 = new value, arg3 = memory.
+ // if *arg0 == arg1 {
+ // *arg0 = arg2
+ // return (true, memory)
+ // } else {
+ // return (false, memory)
+ // }
+ // SYNC
+ // MOVW $0, Rout
+ // LL (Rarg0), Rtmp
+ // BNE Rtmp, Rarg1, 4(PC)
+ // MOVW Rarg2, Rout
+ // SC Rout, (Rarg0)
+ // BEQ Rout, -4(PC)
+ // SYNC
+ {name: "LoweredAtomicCas", argLength: 4, reg: gpcas, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+
+ // atomic and/or.
+ // *arg0 &= (|=) arg1. arg2=mem. returns memory.
+ // SYNC
+ // LL (Rarg0), Rtmp
+ // AND Rarg1, Rtmp
+ // SC Rtmp, (Rarg0)
+ // BEQ Rtmp, -3(PC)
+ // SYNC
+ {name: "LoweredAtomicAnd", argLength: 3, reg: gpstore, asm: "AND", faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+ {name: "LoweredAtomicOr", argLength: 3, reg: gpstore, asm: "OR", faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+
+ // large or unaligned zeroing
+ // arg0 = address of memory to zero (in R1, changed as side effect)
+ // arg1 = address of the last element to zero
+ // arg2 = mem
+ // auxint = alignment
+ // returns mem
+ // SUBU $4, R1
+ // MOVW R0, 4(R1)
+ // ADDU $4, R1
+ // BNE Rarg1, R1, -2(PC)
+ {
+ name: "LoweredZero",
+ aux: "Int32",
+ argLength: 3,
+ reg: regInfo{
+ inputs: []regMask{buildReg("R1"), gp},
+ clobbers: buildReg("R1"),
+ },
+ faultOnNilArg0: true,
+ },
+
+ // large or unaligned move
+ // arg0 = address of dst memory (in R2, changed as side effect)
+ // arg1 = address of src memory (in R1, changed as side effect)
+ // arg2 = address of the last element of src
+ // arg3 = mem
+ // auxint = alignment
+ // returns mem
+ // SUBU $4, R1
+ // MOVW 4(R1), Rtmp
+ // MOVW Rtmp, (R2)
+ // ADDU $4, R1
+ // ADDU $4, R2
+ // BNE Rarg2, R1, -4(PC)
+ {
+ name: "LoweredMove",
+ aux: "Int32",
+ argLength: 4,
+ reg: regInfo{
+ inputs: []regMask{buildReg("R2"), buildReg("R1"), gp},
+ clobbers: buildReg("R1 R2"),
+ },
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ },
+
+ // pseudo-ops
+ {name: "LoweredNilCheck", argLength: 2, reg: regInfo{inputs: []regMask{gpg}}, nilCheck: true, faultOnNilArg0: true}, // panic if arg0 is nil. arg1=mem.
+
+ {name: "FPFlagTrue", argLength: 1, reg: readflags}, // bool, true if FP flag is true
+ {name: "FPFlagFalse", argLength: 1, reg: readflags}, // bool, true if FP flag is false
+
+ // Scheduler ensures LoweredGetClosurePtr occurs only in entry block,
+ // and sorts it to the very beginning of the block to prevent other
+ // use of R22 (mips.REGCTXT, the closure pointer)
+ {name: "LoweredGetClosurePtr", reg: regInfo{outputs: []regMask{buildReg("R22")}}, zeroWidth: true},
+
+ // LoweredGetCallerSP returns the SP of the caller of the current function.
+ {name: "LoweredGetCallerSP", reg: gp01, rematerializeable: true},
+
+ // LoweredGetCallerPC evaluates to the PC to which its "caller" will return.
+ // I.e., if f calls g "calls" getcallerpc,
+ // the result should be the PC within f that g will return to.
+ // See runtime/stubs.go for a more detailed discussion.
+ {name: "LoweredGetCallerPC", reg: gp01, rematerializeable: true},
+
+ // LoweredWB invokes runtime.gcWriteBarrier. arg0=destptr, arg1=srcptr, arg2=mem, aux=runtime.gcWriteBarrier
+ // It saves all GP registers if necessary,
+ // but clobbers R31 (LR) because it's a call
+ // and R23 (REGTMP).
+ {name: "LoweredWB", argLength: 3, reg: regInfo{inputs: []regMask{buildReg("R20"), buildReg("R21")}, clobbers: (callerSave &^ gpg) | buildReg("R31")}, clobberFlags: true, aux: "Sym", symEffect: "None"},
+
+ // There are three of these functions so that they can have three different register inputs.
+ // When we check 0 <= c <= cap (A), then 0 <= b <= c (B), then 0 <= a <= b (C), we want the
+ // default registers to match so we don't need to copy registers around unnecessarily.
+ {name: "LoweredPanicBoundsA", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r3, r4}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go).
+ {name: "LoweredPanicBoundsB", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r2, r3}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go).
+ {name: "LoweredPanicBoundsC", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r1, r2}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go).
+ // Extend ops are the same as Bounds ops except the indexes are 64-bit.
+ {name: "LoweredPanicExtendA", argLength: 4, aux: "Int64", reg: regInfo{inputs: []regMask{r5, r3, r4}}, typ: "Mem", call: true}, // arg0=idxHi, arg1=idxLo, arg2=len, arg3=mem, returns memory. AuxInt contains report code (see PanicExtend in genericOps.go).
+ {name: "LoweredPanicExtendB", argLength: 4, aux: "Int64", reg: regInfo{inputs: []regMask{r5, r2, r3}}, typ: "Mem", call: true}, // arg0=idxHi, arg1=idxLo, arg2=len, arg3=mem, returns memory. AuxInt contains report code (see PanicExtend in genericOps.go).
+ {name: "LoweredPanicExtendC", argLength: 4, aux: "Int64", reg: regInfo{inputs: []regMask{r5, r1, r2}}, typ: "Mem", call: true}, // arg0=idxHi, arg1=idxLo, arg2=len, arg3=mem, returns memory. AuxInt contains report code (see PanicExtend in genericOps.go).
+ }
+
+ blocks := []blockData{
+ {name: "EQ", controls: 1},
+ {name: "NE", controls: 1},
+ {name: "LTZ", controls: 1}, // < 0
+ {name: "LEZ", controls: 1}, // <= 0
+ {name: "GTZ", controls: 1}, // > 0
+ {name: "GEZ", controls: 1}, // >= 0
+ {name: "FPT", controls: 1}, // FP flag is true
+ {name: "FPF", controls: 1}, // FP flag is false
+ }
+
+ archs = append(archs, arch{
+ name: "MIPS",
+ pkg: "cmd/internal/obj/mips",
+ genfile: "../../mips/ssa.go",
+ ops: ops,
+ blocks: blocks,
+ regnames: regNamesMIPS,
+ gpregmask: gp,
+ fpregmask: fp,
+ specialregmask: hi | lo,
+ framepointerreg: -1, // not used
+ linkreg: int8(num["R31"]),
+ })
+}
diff --git a/src/cmd/compile/internal/ssa/gen/PPC64.rules b/src/cmd/compile/internal/ssa/gen/PPC64.rules
new file mode 100644
index 0000000..f83ed78
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/gen/PPC64.rules
@@ -0,0 +1,1485 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Lowering arithmetic
+(Add(Ptr|64|32|16|8) ...) => (ADD ...)
+(Add64F ...) => (FADD ...)
+(Add32F ...) => (FADDS ...)
+
+(Sub(Ptr|64|32|16|8) ...) => (SUB ...)
+(Sub32F ...) => (FSUBS ...)
+(Sub64F ...) => (FSUB ...)
+
+// Combine 64 bit integer multiply and adds
+(ADD l:(MULLD x y) z) && buildcfg.GOPPC64 >= 9 && l.Uses == 1 && clobber(l) => (MADDLD x y z)
+
+(Mod16 x y) => (Mod32 (SignExt16to32 x) (SignExt16to32 y))
+(Mod16u x y) => (Mod32u (ZeroExt16to32 x) (ZeroExt16to32 y))
+(Mod8 x y) => (Mod32 (SignExt8to32 x) (SignExt8to32 y))
+(Mod8u x y) => (Mod32u (ZeroExt8to32 x) (ZeroExt8to32 y))
+(Mod64 x y) && buildcfg.GOPPC64 >=9 => (MODSD x y)
+(Mod64 x y) && buildcfg.GOPPC64 <=8 => (SUB x (MULLD y (DIVD x y)))
+(Mod64u x y) && buildcfg.GOPPC64 >= 9 => (MODUD x y)
+(Mod64u x y) && buildcfg.GOPPC64 <= 8 => (SUB x (MULLD y (DIVDU x y)))
+(Mod32 x y) && buildcfg.GOPPC64 >= 9 => (MODSW x y)
+(Mod32 x y) && buildcfg.GOPPC64 <= 8 => (SUB x (MULLW y (DIVW x y)))
+(Mod32u x y) && buildcfg.GOPPC64 >= 9 => (MODUW x y)
+(Mod32u x y) && buildcfg.GOPPC64 <= 8 => (SUB x (MULLW y (DIVWU x y)))
+
+// (x + y) / 2 with x>=y => (x - y) / 2 + y
+(Avg64u <t> x y) => (ADD (SRDconst <t> (SUB <t> x y) [1]) y)
+
+(Add64carry ...) => (LoweredAdd64Carry ...)
+(Mul64 ...) => (MULLD ...)
+(Mul(32|16|8) ...) => (MULLW ...)
+(Mul64uhilo ...) => (LoweredMuluhilo ...)
+
+(Div64 [false] x y) => (DIVD x y)
+(Div64u ...) => (DIVDU ...)
+(Div32 [false] x y) => (DIVW x y)
+(Div32u ...) => (DIVWU ...)
+(Div16 [false] x y) => (DIVW (SignExt16to32 x) (SignExt16to32 y))
+(Div16u x y) => (DIVWU (ZeroExt16to32 x) (ZeroExt16to32 y))
+(Div8 x y) => (DIVW (SignExt8to32 x) (SignExt8to32 y))
+(Div8u x y) => (DIVWU (ZeroExt8to32 x) (ZeroExt8to32 y))
+
+(Hmul(64|64u|32|32u) ...) => (MULH(D|DU|W|WU) ...)
+
+(Mul32F ...) => (FMULS ...)
+(Mul64F ...) => (FMUL ...)
+
+(Div32F ...) => (FDIVS ...)
+(Div64F ...) => (FDIV ...)
+
+// Lowering float <=> int
+(Cvt32to32F x) => (FCFIDS (MTVSRD (SignExt32to64 x)))
+(Cvt32to64F x) => (FCFID (MTVSRD (SignExt32to64 x)))
+(Cvt64to32F x) => (FCFIDS (MTVSRD x))
+(Cvt64to64F x) => (FCFID (MTVSRD x))
+
+(Cvt32Fto32 x) => (MFVSRD (FCTIWZ x))
+(Cvt32Fto64 x) => (MFVSRD (FCTIDZ x))
+(Cvt64Fto32 x) => (MFVSRD (FCTIWZ x))
+(Cvt64Fto64 x) => (MFVSRD (FCTIDZ x))
+
+(Cvt32Fto64F ...) => (Copy ...) // Note v will have the wrong type for patterns dependent on Float32/Float64
+(Cvt64Fto32F ...) => (FRSP ...)
+
+(CvtBoolToUint8 ...) => (Copy ...)
+
+(Round(32|64)F ...) => (LoweredRound(32|64)F ...)
+
+(Sqrt ...) => (FSQRT ...)
+(Sqrt32 ...) => (FSQRTS ...)
+(Floor ...) => (FFLOOR ...)
+(Ceil ...) => (FCEIL ...)
+(Trunc ...) => (FTRUNC ...)
+(Round ...) => (FROUND ...)
+(Copysign x y) => (FCPSGN y x)
+(Abs ...) => (FABS ...)
+(FMA ...) => (FMADD ...)
+
+// Lowering extension
+// Note: we always extend to 64 bits even though some ops don't need that many result bits.
+(SignExt8to(16|32|64) ...) => (MOVBreg ...)
+(SignExt16to(32|64) ...) => (MOVHreg ...)
+(SignExt32to64 ...) => (MOVWreg ...)
+
+(ZeroExt8to(16|32|64) ...) => (MOVBZreg ...)
+(ZeroExt16to(32|64) ...) => (MOVHZreg ...)
+(ZeroExt32to64 ...) => (MOVWZreg ...)
+
+(Trunc(16|32|64)to8 <t> x) && isSigned(t) => (MOVBreg x)
+(Trunc(16|32|64)to8 x) => (MOVBZreg x)
+(Trunc(32|64)to16 <t> x) && isSigned(t) => (MOVHreg x)
+(Trunc(32|64)to16 x) => (MOVHZreg x)
+(Trunc64to32 <t> x) && isSigned(t) => (MOVWreg x)
+(Trunc64to32 x) => (MOVWZreg x)
+
+// Lowering constants
+(Const(64|32|16|8) [val]) => (MOVDconst [int64(val)])
+(Const(32|64)F ...) => (FMOV(S|D)const ...)
+(ConstNil) => (MOVDconst [0])
+(ConstBool [t]) => (MOVDconst [b2i(t)])
+
+// Constant folding
+(FABS (FMOVDconst [x])) => (FMOVDconst [math.Abs(x)])
+(FSQRT (FMOVDconst [x])) && x >= 0 => (FMOVDconst [math.Sqrt(x)])
+(FFLOOR (FMOVDconst [x])) => (FMOVDconst [math.Floor(x)])
+(FCEIL (FMOVDconst [x])) => (FMOVDconst [math.Ceil(x)])
+(FTRUNC (FMOVDconst [x])) => (FMOVDconst [math.Trunc(x)])
+
+// Rotates
+(RotateLeft8 <t> x (MOVDconst [c])) => (Or8 (Lsh8x64 <t> x (MOVDconst [c&7])) (Rsh8Ux64 <t> x (MOVDconst [-c&7])))
+(RotateLeft16 <t> x (MOVDconst [c])) => (Or16 (Lsh16x64 <t> x (MOVDconst [c&15])) (Rsh16Ux64 <t> x (MOVDconst [-c&15])))
+(RotateLeft32 x (MOVDconst [c])) => (ROTLWconst [c&31] x)
+(RotateLeft64 x (MOVDconst [c])) => (ROTLconst [c&63] x)
+
+// Rotate generation with const shift
+(ADD (SLDconst x [c]) (SRDconst x [d])) && d == 64-c => (ROTLconst [c] x)
+( OR (SLDconst x [c]) (SRDconst x [d])) && d == 64-c => (ROTLconst [c] x)
+(XOR (SLDconst x [c]) (SRDconst x [d])) && d == 64-c => (ROTLconst [c] x)
+
+(ADD (SLWconst x [c]) (SRWconst x [d])) && d == 32-c => (ROTLWconst [c] x)
+( OR (SLWconst x [c]) (SRWconst x [d])) && d == 32-c => (ROTLWconst [c] x)
+(XOR (SLWconst x [c]) (SRWconst x [d])) && d == 32-c => (ROTLWconst [c] x)
+
+// Rotate generation with non-const shift
+// these match patterns from math/bits/RotateLeft[32|64], but there could be others
+(ADD (SLD x (ANDconst <typ.Int64> [63] y)) (SRD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))) => (ROTL x y)
+(ADD (SLD x (ANDconst <typ.Int64> [63] y)) (SRD x (SUBFCconst <typ.UInt> [64] (ANDconst <typ.UInt> [63] y)))) => (ROTL x y)
+( OR (SLD x (ANDconst <typ.Int64> [63] y)) (SRD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))) => (ROTL x y)
+( OR (SLD x (ANDconst <typ.Int64> [63] y)) (SRD x (SUBFCconst <typ.UInt> [64] (ANDconst <typ.UInt> [63] y)))) => (ROTL x y)
+(XOR (SLD x (ANDconst <typ.Int64> [63] y)) (SRD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))) => (ROTL x y)
+(XOR (SLD x (ANDconst <typ.Int64> [63] y)) (SRD x (SUBFCconst <typ.UInt> [64] (ANDconst <typ.UInt> [63] y)))) => (ROTL x y)
+
+
+(ADD (SLW x (ANDconst <typ.Int32> [31] y)) (SRW x (SUBFCconst <typ.UInt> [32] (ANDconst <typ.UInt> [31] y)))) => (ROTLW x y)
+(ADD (SLW x (ANDconst <typ.Int32> [31] y)) (SRW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))) => (ROTLW x y)
+( OR (SLW x (ANDconst <typ.Int32> [31] y)) (SRW x (SUBFCconst <typ.UInt> [32] (ANDconst <typ.UInt> [31] y)))) => (ROTLW x y)
+( OR (SLW x (ANDconst <typ.Int32> [31] y)) (SRW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))) => (ROTLW x y)
+(XOR (SLW x (ANDconst <typ.Int32> [31] y)) (SRW x (SUBFCconst <typ.UInt> [32] (ANDconst <typ.UInt> [31] y)))) => (ROTLW x y)
+(XOR (SLW x (ANDconst <typ.Int32> [31] y)) (SRW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))) => (ROTLW x y)
+
+
+// Lowering rotates
+(RotateLeft32 x y) => (ROTLW x y)
+(RotateLeft64 x y) => (ROTL x y)
+
+// Constant rotate generation
+(ROTLW x (MOVDconst [c])) => (ROTLWconst x [c&31])
+(ROTL x (MOVDconst [c])) => (ROTLconst x [c&63])
+
+// Combine rotate and mask operations
+(ANDconst [m] (ROTLWconst [r] x)) && isPPC64WordRotateMask(m) => (RLWINM [encodePPC64RotateMask(r,m,32)] x)
+(AND (MOVDconst [m]) (ROTLWconst [r] x)) && isPPC64WordRotateMask(m) => (RLWINM [encodePPC64RotateMask(r,m,32)] x)
+(ANDconst [m] (ROTLW x r)) && isPPC64WordRotateMask(m) => (RLWNM [encodePPC64RotateMask(0,m,32)] x r)
+(AND (MOVDconst [m]) (ROTLW x r)) && isPPC64WordRotateMask(m) => (RLWNM [encodePPC64RotateMask(0,m,32)] x r)
+
+// Note, any rotated word bitmask is still a valid word bitmask.
+(ROTLWconst [r] (AND (MOVDconst [m]) x)) && isPPC64WordRotateMask(m) => (RLWINM [encodePPC64RotateMask(r,rotateLeft32(m,r),32)] x)
+(ROTLWconst [r] (ANDconst [m] x)) && isPPC64WordRotateMask(m) => (RLWINM [encodePPC64RotateMask(r,rotateLeft32(m,r),32)] x)
+
+(ANDconst [m] (SRWconst x [s])) && mergePPC64RShiftMask(m,s,32) == 0 => (MOVDconst [0])
+(ANDconst [m] (SRWconst x [s])) && mergePPC64AndSrwi(m,s) != 0 => (RLWINM [mergePPC64AndSrwi(m,s)] x)
+(AND (MOVDconst [m]) (SRWconst x [s])) && mergePPC64RShiftMask(m,s,32) == 0 => (MOVDconst [0])
+(AND (MOVDconst [m]) (SRWconst x [s])) && mergePPC64AndSrwi(m,s) != 0 => (RLWINM [mergePPC64AndSrwi(m,s)] x)
+
+(SRWconst (ANDconst [m] x) [s]) && mergePPC64RShiftMask(m>>uint(s),s,32) == 0 => (MOVDconst [0])
+(SRWconst (ANDconst [m] x) [s]) && mergePPC64AndSrwi(m>>uint(s),s) != 0 => (RLWINM [mergePPC64AndSrwi(m>>uint(s),s)] x)
+(SRWconst (AND (MOVDconst [m]) x) [s]) && mergePPC64RShiftMask(m>>uint(s),s,32) == 0 => (MOVDconst [0])
+(SRWconst (AND (MOVDconst [m]) x) [s]) && mergePPC64AndSrwi(m>>uint(s),s) != 0 => (RLWINM [mergePPC64AndSrwi(m>>uint(s),s)] x)
+
+// Merge shift right + shift left and clear left (e.g for a table lookup)
+(CLRLSLDI [c] (SRWconst [s] x)) && mergePPC64ClrlsldiSrw(int64(c),s) != 0 => (RLWINM [mergePPC64ClrlsldiSrw(int64(c),s)] x)
+(SLDconst [l] (SRWconst [r] x)) && mergePPC64SldiSrw(l,r) != 0 => (RLWINM [mergePPC64SldiSrw(l,r)] x)
+// The following reduction shows up frequently too. e.g b[(x>>14)&0xFF]
+(CLRLSLDI [c] i:(RLWINM [s] x)) && mergePPC64ClrlsldiRlwinm(c,s) != 0 => (RLWINM [mergePPC64ClrlsldiRlwinm(c,s)] x)
+
+// large constant shifts
+(Lsh64x64 _ (MOVDconst [c])) && uint64(c) >= 64 => (MOVDconst [0])
+(Rsh64Ux64 _ (MOVDconst [c])) && uint64(c) >= 64 => (MOVDconst [0])
+(Lsh32x64 _ (MOVDconst [c])) && uint64(c) >= 32 => (MOVDconst [0])
+(Rsh32Ux64 _ (MOVDconst [c])) && uint64(c) >= 32 => (MOVDconst [0])
+(Lsh16x64 _ (MOVDconst [c])) && uint64(c) >= 16 => (MOVDconst [0])
+(Rsh16Ux64 _ (MOVDconst [c])) && uint64(c) >= 16 => (MOVDconst [0])
+(Lsh8x64 _ (MOVDconst [c])) && uint64(c) >= 8 => (MOVDconst [0])
+(Rsh8Ux64 _ (MOVDconst [c])) && uint64(c) >= 8 => (MOVDconst [0])
+
+// large constant signed right shift, we leave the sign bit
+(Rsh64x64 x (MOVDconst [c])) && uint64(c) >= 64 => (SRADconst x [63])
+(Rsh32x64 x (MOVDconst [c])) && uint64(c) >= 32 => (SRAWconst x [63])
+(Rsh16x64 x (MOVDconst [c])) && uint64(c) >= 16 => (SRAWconst (SignExt16to32 x) [63])
+(Rsh8x64 x (MOVDconst [c])) && uint64(c) >= 8 => (SRAWconst (SignExt8to32 x) [63])
+
+// constant shifts
+(Lsh64x64 x (MOVDconst [c])) && uint64(c) < 64 => (SLDconst x [c])
+(Rsh64x64 x (MOVDconst [c])) && uint64(c) < 64 => (SRADconst x [c])
+(Rsh64Ux64 x (MOVDconst [c])) && uint64(c) < 64 => (SRDconst x [c])
+(Lsh32x64 x (MOVDconst [c])) && uint64(c) < 32 => (SLWconst x [c])
+(Rsh32x64 x (MOVDconst [c])) && uint64(c) < 32 => (SRAWconst x [c])
+(Rsh32Ux64 x (MOVDconst [c])) && uint64(c) < 32 => (SRWconst x [c])
+(Lsh16x64 x (MOVDconst [c])) && uint64(c) < 16 => (SLWconst x [c])
+(Rsh16x64 x (MOVDconst [c])) && uint64(c) < 16 => (SRAWconst (SignExt16to32 x) [c])
+(Rsh16Ux64 x (MOVDconst [c])) && uint64(c) < 16 => (SRWconst (ZeroExt16to32 x) [c])
+(Lsh8x64 x (MOVDconst [c])) && uint64(c) < 8 => (SLWconst x [c])
+(Rsh8x64 x (MOVDconst [c])) && uint64(c) < 8 => (SRAWconst (SignExt8to32 x) [c])
+(Rsh8Ux64 x (MOVDconst [c])) && uint64(c) < 8 => (SRWconst (ZeroExt8to32 x) [c])
+
+(Lsh64x32 x (MOVDconst [c])) && uint32(c) < 64 => (SLDconst x [c&63])
+(Rsh64x32 x (MOVDconst [c])) && uint32(c) < 64 => (SRADconst x [c&63])
+(Rsh64Ux32 x (MOVDconst [c])) && uint32(c) < 64 => (SRDconst x [c&63])
+(Lsh32x32 x (MOVDconst [c])) && uint32(c) < 32 => (SLWconst x [c&31])
+(Rsh32x32 x (MOVDconst [c])) && uint32(c) < 32 => (SRAWconst x [c&31])
+(Rsh32Ux32 x (MOVDconst [c])) && uint32(c) < 32 => (SRWconst x [c&31])
+(Lsh16x32 x (MOVDconst [c])) && uint32(c) < 16 => (SLWconst x [c&31])
+(Rsh16x32 x (MOVDconst [c])) && uint32(c) < 16 => (SRAWconst (SignExt16to32 x) [c&15])
+(Rsh16Ux32 x (MOVDconst [c])) && uint32(c) < 16 => (SRWconst (ZeroExt16to32 x) [c&15])
+(Lsh8x32 x (MOVDconst [c])) && uint32(c) < 8 => (SLWconst x [c&7])
+(Rsh8x32 x (MOVDconst [c])) && uint32(c) < 8 => (SRAWconst (SignExt8to32 x) [c&7])
+(Rsh8Ux32 x (MOVDconst [c])) && uint32(c) < 8 => (SRWconst (ZeroExt8to32 x) [c&7])
+
+// Lower bounded shifts first. No need to check shift value.
+(Lsh64x(64|32|16|8) x y) && shiftIsBounded(v) => (SLD x y)
+(Lsh32x(64|32|16|8) x y) && shiftIsBounded(v) => (SLW x y)
+(Lsh16x(64|32|16|8) x y) && shiftIsBounded(v) => (SLW x y)
+(Lsh8x(64|32|16|8) x y) && shiftIsBounded(v) => (SLW x y)
+(Rsh64Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRD x y)
+(Rsh32Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRW x y)
+(Rsh16Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRW (MOVHZreg x) y)
+(Rsh8Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRW (MOVBZreg x) y)
+(Rsh64x(64|32|16|8) x y) && shiftIsBounded(v) => (SRAD x y)
+(Rsh32x(64|32|16|8) x y) && shiftIsBounded(v) => (SRAW x y)
+(Rsh16x(64|32|16|8) x y) && shiftIsBounded(v) => (SRAW (MOVHreg x) y)
+(Rsh8x(64|32|16|8) x y) && shiftIsBounded(v) => (SRAW (MOVBreg x) y)
+
+// non-constant rotates
+// These are subexpressions found in statements that can become rotates
+// In these cases the shift count is known to be < 64 so the more complicated expressions
+// with Mask & Carry is not needed
+(Lsh64x64 x (AND y (MOVDconst [63]))) => (SLD x (ANDconst <typ.Int64> [63] y))
+(Lsh64x64 x (ANDconst <typ.Int64> [63] y)) => (SLD x (ANDconst <typ.Int64> [63] y))
+(Rsh64Ux64 x (AND y (MOVDconst [63]))) => (SRD x (ANDconst <typ.Int64> [63] y))
+(Rsh64Ux64 x (ANDconst <typ.UInt> [63] y)) => (SRD x (ANDconst <typ.UInt> [63] y))
+(Rsh64Ux64 x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y))) => (SRD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))
+(Rsh64Ux64 x (SUBFCconst <typ.UInt> [64] (ANDconst <typ.UInt> [63] y))) => (SRD x (SUBFCconst <typ.UInt> [64] (ANDconst <typ.UInt> [63] y)))
+(Rsh64Ux64 x (SUB <typ.UInt> (MOVDconst [64]) (AND <typ.UInt> y (MOVDconst [63])))) => (SRD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))
+(Rsh64Ux64 x (SUBFCconst <typ.UInt> [64] (AND <typ.UInt> y (MOVDconst [63])))) => (SRD x (SUBFCconst <typ.UInt> [64] (ANDconst <typ.UInt> [63] y)))
+(Rsh64x64 x (AND y (MOVDconst [63]))) => (SRAD x (ANDconst <typ.Int64> [63] y))
+(Rsh64x64 x (ANDconst <typ.UInt> [63] y)) => (SRAD x (ANDconst <typ.UInt> [63] y))
+(Rsh64x64 x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y))) => (SRAD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))
+(Rsh64x64 x (SUBFCconst <typ.UInt> [64] (ANDconst <typ.UInt> [63] y))) => (SRAD x (SUBFCconst <typ.UInt> [64] (ANDconst <typ.UInt> [63] y)))
+(Rsh64x64 x (SUB <typ.UInt> (MOVDconst [64]) (AND <typ.UInt> y (MOVDconst [63])))) => (SRAD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))
+(Rsh64x64 x (SUBFCconst <typ.UInt> [64] (AND <typ.UInt> y (MOVDconst [63])))) => (SRAD x (SUBFCconst <typ.UInt> [64] (ANDconst <typ.UInt> [63] y)))
+
+(Lsh64x64 x y) => (SLD x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [64]))))
+(Rsh64x64 x y) => (SRAD x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [64]))))
+(Rsh64Ux64 x y) => (SRD x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [64]))))
+
+(Lsh32x64 x (AND y (MOVDconst [31]))) => (SLW x (ANDconst <typ.Int32> [31] y))
+(Lsh32x64 x (ANDconst <typ.Int32> [31] y)) => (SLW x (ANDconst <typ.Int32> [31] y))
+
+(Rsh32Ux64 x (AND y (MOVDconst [31]))) => (SRW x (ANDconst <typ.Int32> [31] y))
+(Rsh32Ux64 x (ANDconst <typ.UInt> [31] y)) => (SRW x (ANDconst <typ.UInt> [31] y))
+(Rsh32Ux64 x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y))) => (SRW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))
+(Rsh32Ux64 x (SUBFCconst <typ.UInt> [32] (ANDconst <typ.UInt> [31] y))) => (SRW x (SUBFCconst <typ.UInt> [32] (ANDconst <typ.UInt> [31] y)))
+(Rsh32Ux64 x (SUB <typ.UInt> (MOVDconst [32]) (AND <typ.UInt> y (MOVDconst [31])))) => (SRW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))
+(Rsh32Ux64 x (SUBFCconst <typ.UInt> [32] (AND <typ.UInt> y (MOVDconst [31])))) => (SRW x (SUBFCconst <typ.UInt> [32] (ANDconst <typ.UInt> [31] y)))
+
+(Rsh32x64 x (AND y (MOVDconst [31]))) => (SRAW x (ANDconst <typ.Int32> [31] y))
+(Rsh32x64 x (ANDconst <typ.UInt> [31] y)) => (SRAW x (ANDconst <typ.UInt> [31] y))
+(Rsh32x64 x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y))) => (SRAW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))
+(Rsh32x64 x (SUBFCconst <typ.UInt> [32] (ANDconst <typ.UInt> [31] y))) => (SRAW x (SUBFCconst <typ.UInt> [32] (ANDconst <typ.UInt> [31] y)))
+(Rsh32x64 x (SUB <typ.UInt> (MOVDconst [32]) (AND <typ.UInt> y (MOVDconst [31])))) => (SRAW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))
+(Rsh32x64 x (SUBFCconst <typ.UInt> [32] (AND <typ.UInt> y (MOVDconst [31])))) => (SRAW x (SUBFCconst <typ.UInt> [32] (ANDconst <typ.UInt> [31] y)))
+
+(Rsh32x64 x y) => (SRAW x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [32]))))
+(Rsh32Ux64 x y) => (SRW x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [32]))))
+(Lsh32x64 x y) => (SLW x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [32]))))
+
+(Rsh16x64 x y) => (SRAW (SignExt16to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [16]))))
+(Rsh16Ux64 x y) => (SRW (ZeroExt16to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [16]))))
+(Lsh16x64 x y) => (SLW x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [16]))))
+
+(Rsh8x64 x y) => (SRAW (SignExt8to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [8]))))
+(Rsh8Ux64 x y) => (SRW (ZeroExt8to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [8]))))
+(Lsh8x64 x y) => (SLW x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [8]))))
+
+(Rsh64x32 x y) => (SRAD x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [64]))))
+(Rsh64Ux32 x y) => (SRD x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [64]))))
+(Lsh64x32 x y) => (SLD x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [64]))))
+(Rsh32x32 x y) => (SRAW x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [32]))))
+(Rsh32Ux32 x y) => (SRW x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [32]))))
+(Lsh32x32 x y) => (SLW x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [32]))))
+
+(Rsh16x32 x y) => (SRAW (SignExt16to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [16]))))
+(Rsh16Ux32 x y) => (SRW (ZeroExt16to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [16]))))
+(Lsh16x32 x y) => (SLW x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [16]))))
+
+(Rsh8x32 x y) => (SRAW (SignExt8to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [8]))))
+(Rsh8Ux32 x y) => (SRW (ZeroExt8to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [8]))))
+(Lsh8x32 x y) => (SLW x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [8]))))
+
+
+(Rsh64x16 x y) => (SRAD x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt16to64 y) (MOVDconst [64]))))
+(Rsh64Ux16 x y) => (SRD x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt16to64 y) (MOVDconst [64]))))
+(Lsh64x16 x y) => (SLD x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt16to64 y) (MOVDconst [64]))))
+
+(Rsh32x16 x y) => (SRAW x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt16to64 y) (MOVDconst [32]))))
+(Rsh32Ux16 x y) => (SRW x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt16to64 y) (MOVDconst [32]))))
+(Lsh32x16 x y) => (SLW x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt16to64 y) (MOVDconst [32]))))
+
+(Rsh16x16 x y) => (SRAW (SignExt16to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt16to64 y) (MOVDconst [16]))))
+(Rsh16Ux16 x y) => (SRW (ZeroExt16to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt16to64 y) (MOVDconst [16]))))
+(Lsh16x16 x y) => (SLW x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt16to64 y) (MOVDconst [16]))))
+
+(Rsh8x16 x y) => (SRAW (SignExt8to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt16to64 y) (MOVDconst [8]))))
+(Rsh8Ux16 x y) => (SRW (ZeroExt8to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt16to64 y) (MOVDconst [8]))))
+(Lsh8x16 x y) => (SLW x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt16to64 y) (MOVDconst [8]))))
+
+
+(Rsh64x8 x y) => (SRAD x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt8to64 y) (MOVDconst [64]))))
+(Rsh64Ux8 x y) => (SRD x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt8to64 y) (MOVDconst [64]))))
+(Lsh64x8 x y) => (SLD x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt8to64 y) (MOVDconst [64]))))
+
+(Rsh32x8 x y) => (SRAW x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt8to64 y) (MOVDconst [32]))))
+(Rsh32Ux8 x y) => (SRW x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt8to64 y) (MOVDconst [32]))))
+(Lsh32x8 x y) => (SLW x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt8to64 y) (MOVDconst [32]))))
+
+(Rsh16x8 x y) => (SRAW (SignExt16to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt8to64 y) (MOVDconst [16]))))
+(Rsh16Ux8 x y) => (SRW (ZeroExt16to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt8to64 y) (MOVDconst [16]))))
+(Lsh16x8 x y) => (SLW x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt8to64 y) (MOVDconst [16]))))
+
+(Rsh8x8 x y) => (SRAW (SignExt8to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt8to64 y) (MOVDconst [8]))))
+(Rsh8Ux8 x y) => (SRW (ZeroExt8to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt8to64 y) (MOVDconst [8]))))
+(Lsh8x8 x y) => (SLW x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt8to64 y) (MOVDconst [8]))))
+
+// Cleaning up shift ops
+(ISEL [0] (ANDconst [d] y) (MOVDconst [-1]) (CMPU (ANDconst [d] y) (MOVDconst [c]))) && c >= d => (ANDconst [d] y)
+(ISEL [0] (ANDconst [d] y) (MOVDconst [-1]) (CMPUconst [c] (ANDconst [d] y))) && c >= d => (ANDconst [d] y)
+(ORN x (MOVDconst [-1])) => x
+
+(S(RAD|RD|LD) x (MOVDconst [c])) => (S(RAD|RD|LD)const [c&63 | (c>>6&1*63)] x)
+(S(RAW|RW|LW) x (MOVDconst [c])) => (S(RAW|RW|LW)const [c&31 | (c>>5&1*31)] x)
+
+(Addr {sym} base) => (MOVDaddr {sym} [0] base)
+(LocalAddr {sym} base _) => (MOVDaddr {sym} base)
+(OffPtr [off] ptr) => (ADD (MOVDconst <typ.Int64> [off]) ptr)
+
+// TODO: optimize these cases?
+(Ctz32NonZero ...) => (Ctz32 ...)
+(Ctz64NonZero ...) => (Ctz64 ...)
+
+(Ctz64 x) && buildcfg.GOPPC64<=8 => (POPCNTD (ANDN <typ.Int64> (ADDconst <typ.Int64> [-1] x) x))
+(Ctz64 x) => (CNTTZD x)
+(Ctz32 x) && buildcfg.GOPPC64<=8 => (POPCNTW (MOVWZreg (ANDN <typ.Int> (ADDconst <typ.Int> [-1] x) x)))
+(Ctz32 x) => (CNTTZW (MOVWZreg x))
+(Ctz16 x) => (POPCNTW (MOVHZreg (ANDN <typ.Int16> (ADDconst <typ.Int16> [-1] x) x)))
+(Ctz8 x) => (POPCNTB (MOVBZreg (ANDN <typ.UInt8> (ADDconst <typ.UInt8> [-1] x) x)))
+
+(BitLen64 x) => (SUBFCconst [64] (CNTLZD <typ.Int> x))
+(BitLen32 x) => (SUBFCconst [32] (CNTLZW <typ.Int> x))
+
+(PopCount64 ...) => (POPCNTD ...)
+(PopCount32 x) => (POPCNTW (MOVWZreg x))
+(PopCount16 x) => (POPCNTW (MOVHZreg x))
+(PopCount8 x) => (POPCNTB (MOVBZreg x))
+
+(And(64|32|16|8) ...) => (AND ...)
+(Or(64|32|16|8) ...) => (OR ...)
+(Xor(64|32|16|8) ...) => (XOR ...)
+
+(Neg(64|32|16|8) ...) => (NEG ...)
+(Neg64F ...) => (FNEG ...)
+(Neg32F ...) => (FNEG ...)
+
+(Com(64|32|16|8) x) => (NOR x x)
+
+// Lowering boolean ops
+(AndB ...) => (AND ...)
+(OrB ...) => (OR ...)
+(Not x) => (XORconst [1] x)
+
+// Use ANDN for AND x NOT y
+(AND x (NOR y y)) => (ANDN x y)
+
+// Lowering comparisons
+(EqB x y) => (ANDconst [1] (EQV x y))
+// Sign extension dependence on operand sign sets up for sign/zero-extension elision later
+(Eq8 x y) && isSigned(x.Type) && isSigned(y.Type) => (Equal (CMPW (SignExt8to32 x) (SignExt8to32 y)))
+(Eq16 x y) && isSigned(x.Type) && isSigned(y.Type) => (Equal (CMPW (SignExt16to32 x) (SignExt16to32 y)))
+(Eq8 x y) => (Equal (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
+(Eq16 x y) => (Equal (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
+(Eq32 x y) => (Equal (CMPW x y))
+(Eq64 x y) => (Equal (CMP x y))
+(Eq32F x y) => (Equal (FCMPU x y))
+(Eq64F x y) => (Equal (FCMPU x y))
+(EqPtr x y) => (Equal (CMP x y))
+
+(NeqB ...) => (XOR ...)
+// Like Eq8 and Eq16, prefer sign extension likely to enable later elision.
+(Neq8 x y) && isSigned(x.Type) && isSigned(y.Type) => (NotEqual (CMPW (SignExt8to32 x) (SignExt8to32 y)))
+(Neq16 x y) && isSigned(x.Type) && isSigned(y.Type) => (NotEqual (CMPW (SignExt16to32 x) (SignExt16to32 y)))
+(Neq8 x y) => (NotEqual (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
+(Neq16 x y) => (NotEqual (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
+(Neq32 x y) => (NotEqual (CMPW x y))
+(Neq64 x y) => (NotEqual (CMP x y))
+(Neq32F x y) => (NotEqual (FCMPU x y))
+(Neq64F x y) => (NotEqual (FCMPU x y))
+(NeqPtr x y) => (NotEqual (CMP x y))
+
+(Less8 x y) => (LessThan (CMPW (SignExt8to32 x) (SignExt8to32 y)))
+(Less16 x y) => (LessThan (CMPW (SignExt16to32 x) (SignExt16to32 y)))
+(Less32 x y) => (LessThan (CMPW x y))
+(Less64 x y) => (LessThan (CMP x y))
+(Less32F x y) => (FLessThan (FCMPU x y))
+(Less64F x y) => (FLessThan (FCMPU x y))
+
+(Less8U x y) => (LessThan (CMPWU (ZeroExt8to32 x) (ZeroExt8to32 y)))
+(Less16U x y) => (LessThan (CMPWU (ZeroExt16to32 x) (ZeroExt16to32 y)))
+(Less32U x y) => (LessThan (CMPWU x y))
+(Less64U x y) => (LessThan (CMPU x y))
+
+(Leq8 x y) => (LessEqual (CMPW (SignExt8to32 x) (SignExt8to32 y)))
+(Leq16 x y) => (LessEqual (CMPW (SignExt16to32 x) (SignExt16to32 y)))
+(Leq32 x y) => (LessEqual (CMPW x y))
+(Leq64 x y) => (LessEqual (CMP x y))
+(Leq32F x y) => (FLessEqual (FCMPU x y))
+(Leq64F x y) => (FLessEqual (FCMPU x y))
+
+(Leq8U x y) => (LessEqual (CMPWU (ZeroExt8to32 x) (ZeroExt8to32 y)))
+(Leq16U x y) => (LessEqual (CMPWU (ZeroExt16to32 x) (ZeroExt16to32 y)))
+(Leq32U x y) => (LessEqual (CMPWU x y))
+(Leq64U x y) => (LessEqual (CMPU x y))
+
+// Absorb pseudo-ops into blocks.
+(If (Equal cc) yes no) => (EQ cc yes no)
+(If (NotEqual cc) yes no) => (NE cc yes no)
+(If (LessThan cc) yes no) => (LT cc yes no)
+(If (LessEqual cc) yes no) => (LE cc yes no)
+(If (GreaterThan cc) yes no) => (GT cc yes no)
+(If (GreaterEqual cc) yes no) => (GE cc yes no)
+(If (FLessThan cc) yes no) => (FLT cc yes no)
+(If (FLessEqual cc) yes no) => (FLE cc yes no)
+(If (FGreaterThan cc) yes no) => (FGT cc yes no)
+(If (FGreaterEqual cc) yes no) => (FGE cc yes no)
+
+(If cond yes no) => (NE (CMPWconst [0] (ANDconst <typ.UInt32> [1] cond)) yes no)
+
+// Absorb boolean tests into block
+(NE (CMPWconst [0] (ANDconst [1] (Equal cc))) yes no) => (EQ cc yes no)
+(NE (CMPWconst [0] (ANDconst [1] (NotEqual cc))) yes no) => (NE cc yes no)
+(NE (CMPWconst [0] (ANDconst [1] (LessThan cc))) yes no) => (LT cc yes no)
+(NE (CMPWconst [0] (ANDconst [1] (LessEqual cc))) yes no) => (LE cc yes no)
+(NE (CMPWconst [0] (ANDconst [1] (GreaterThan cc))) yes no) => (GT cc yes no)
+(NE (CMPWconst [0] (ANDconst [1] (GreaterEqual cc))) yes no) => (GE cc yes no)
+(NE (CMPWconst [0] (ANDconst [1] (FLessThan cc))) yes no) => (FLT cc yes no)
+(NE (CMPWconst [0] (ANDconst [1] (FLessEqual cc))) yes no) => (FLE cc yes no)
+(NE (CMPWconst [0] (ANDconst [1] (FGreaterThan cc))) yes no) => (FGT cc yes no)
+(NE (CMPWconst [0] (ANDconst [1] (FGreaterEqual cc))) yes no) => (FGE cc yes no)
+
+// Elide compares of bit tests // TODO need to make both CC and result of ANDCC available.
+(EQ (CMPconst [0] (ANDconst [c] x)) yes no) => (EQ (ANDCCconst [c] x) yes no)
+(NE (CMPconst [0] (ANDconst [c] x)) yes no) => (NE (ANDCCconst [c] x) yes no)
+(EQ (CMPWconst [0] (ANDconst [c] x)) yes no) => (EQ (ANDCCconst [c] x) yes no)
+(NE (CMPWconst [0] (ANDconst [c] x)) yes no) => (NE (ANDCCconst [c] x) yes no)
+
+// absorb flag constants into branches
+(EQ (FlagEQ) yes no) => (First yes no)
+(EQ (FlagLT) yes no) => (First no yes)
+(EQ (FlagGT) yes no) => (First no yes)
+
+(NE (FlagEQ) yes no) => (First no yes)
+(NE (FlagLT) yes no) => (First yes no)
+(NE (FlagGT) yes no) => (First yes no)
+
+(LT (FlagEQ) yes no) => (First no yes)
+(LT (FlagLT) yes no) => (First yes no)
+(LT (FlagGT) yes no) => (First no yes)
+
+(LE (FlagEQ) yes no) => (First yes no)
+(LE (FlagLT) yes no) => (First yes no)
+(LE (FlagGT) yes no) => (First no yes)
+
+(GT (FlagEQ) yes no) => (First no yes)
+(GT (FlagLT) yes no) => (First no yes)
+(GT (FlagGT) yes no) => (First yes no)
+
+(GE (FlagEQ) yes no) => (First yes no)
+(GE (FlagLT) yes no) => (First no yes)
+(GE (FlagGT) yes no) => (First yes no)
+
+// absorb InvertFlags into branches
+(LT (InvertFlags cmp) yes no) => (GT cmp yes no)
+(GT (InvertFlags cmp) yes no) => (LT cmp yes no)
+(LE (InvertFlags cmp) yes no) => (GE cmp yes no)
+(GE (InvertFlags cmp) yes no) => (LE cmp yes no)
+(EQ (InvertFlags cmp) yes no) => (EQ cmp yes no)
+(NE (InvertFlags cmp) yes no) => (NE cmp yes no)
+
+// constant comparisons
+(CMPWconst (MOVDconst [x]) [y]) && int32(x)==int32(y) => (FlagEQ)
+(CMPWconst (MOVDconst [x]) [y]) && int32(x)<int32(y) => (FlagLT)
+(CMPWconst (MOVDconst [x]) [y]) && int32(x)>int32(y) => (FlagGT)
+
+(CMPconst (MOVDconst [x]) [y]) && x==y => (FlagEQ)
+(CMPconst (MOVDconst [x]) [y]) && x<y => (FlagLT)
+(CMPconst (MOVDconst [x]) [y]) && x>y => (FlagGT)
+
+(CMPWUconst (MOVDconst [x]) [y]) && int32(x)==int32(y) => (FlagEQ)
+(CMPWUconst (MOVDconst [x]) [y]) && uint32(x)<uint32(y) => (FlagLT)
+(CMPWUconst (MOVDconst [x]) [y]) && uint32(x)>uint32(y) => (FlagGT)
+
+(CMPUconst (MOVDconst [x]) [y]) && x==y => (FlagEQ)
+(CMPUconst (MOVDconst [x]) [y]) && uint64(x)<uint64(y) => (FlagLT)
+(CMPUconst (MOVDconst [x]) [y]) && uint64(x)>uint64(y) => (FlagGT)
+
+// other known comparisons
+//(CMPconst (MOVBUreg _) [c]) && 0xff < c => (FlagLT)
+//(CMPconst (MOVHUreg _) [c]) && 0xffff < c => (FlagLT)
+//(CMPconst (ANDconst _ [m]) [n]) && 0 <= int32(m) && int32(m) < int32(n) => (FlagLT)
+//(CMPconst (SRLconst _ [c]) [n]) && 0 <= n && 0 < c && c <= 32 && (1<<uint32(32-c)) <= uint32(n) => (FlagLT)
+
+// absorb flag constants into boolean values
+(Equal (FlagEQ)) => (MOVDconst [1])
+(Equal (FlagLT)) => (MOVDconst [0])
+(Equal (FlagGT)) => (MOVDconst [0])
+
+(NotEqual (FlagEQ)) => (MOVDconst [0])
+(NotEqual (FlagLT)) => (MOVDconst [1])
+(NotEqual (FlagGT)) => (MOVDconst [1])
+
+(LessThan (FlagEQ)) => (MOVDconst [0])
+(LessThan (FlagLT)) => (MOVDconst [1])
+(LessThan (FlagGT)) => (MOVDconst [0])
+
+(LessEqual (FlagEQ)) => (MOVDconst [1])
+(LessEqual (FlagLT)) => (MOVDconst [1])
+(LessEqual (FlagGT)) => (MOVDconst [0])
+
+(GreaterThan (FlagEQ)) => (MOVDconst [0])
+(GreaterThan (FlagLT)) => (MOVDconst [0])
+(GreaterThan (FlagGT)) => (MOVDconst [1])
+
+(GreaterEqual (FlagEQ)) => (MOVDconst [1])
+(GreaterEqual (FlagLT)) => (MOVDconst [0])
+(GreaterEqual (FlagGT)) => (MOVDconst [1])
+
+// absorb InvertFlags into boolean values
+(Equal (InvertFlags x)) => (Equal x)
+(NotEqual (InvertFlags x)) => (NotEqual x)
+(LessThan (InvertFlags x)) => (GreaterThan x)
+(GreaterThan (InvertFlags x)) => (LessThan x)
+(LessEqual (InvertFlags x)) => (GreaterEqual x)
+(GreaterEqual (InvertFlags x)) => (LessEqual x)
+
+// Elide compares of bit tests // TODO need to make both CC and result of ANDCC available.
+((EQ|NE|LT|LE|GT|GE) (CMPconst [0] (ANDconst [c] x)) yes no) => ((EQ|NE|LT|LE|GT|GE) (ANDCCconst [c] x) yes no)
+((EQ|NE|LT|LE|GT|GE) (CMPWconst [0] (ANDconst [c] x)) yes no) => ((EQ|NE|LT|LE|GT|GE) (ANDCCconst [c] x) yes no)
+((EQ|NE|LT|LE|GT|GE) (CMPconst [0] z:(AND x y)) yes no) && z.Uses == 1 => ((EQ|NE|LT|LE|GT|GE) (ANDCC x y) yes no)
+((EQ|NE|LT|LE|GT|GE) (CMPconst [0] z:(OR x y)) yes no) && z.Uses == 1 => ((EQ|NE|LT|LE|GT|GE) (ORCC x y) yes no)
+((EQ|NE|LT|LE|GT|GE) (CMPconst [0] z:(XOR x y)) yes no) && z.Uses == 1 => ((EQ|NE|LT|LE|GT|GE) (XORCC x y) yes no)
+
+// Only lower after bool is lowered. It should always lower. This helps ensure the folding below happens reliably.
+(CondSelect x y bool) && flagArg(bool) == nil => (ISEL [6] x y (Select1 <types.TypeFlags> (ANDCCconst [1] bool)))
+// Fold any CR -> GPR -> CR transfers when applying the above rule.
+(ISEL [6] x y (Select1 (ANDCCconst [1] (ISELB [c] one cmp)))) => (ISEL [c] x y cmp)
+
+// Lowering loads
+(Load <t> ptr mem) && (is64BitInt(t) || isPtr(t)) => (MOVDload ptr mem)
+(Load <t> ptr mem) && is32BitInt(t) && isSigned(t) => (MOVWload ptr mem)
+(Load <t> ptr mem) && is32BitInt(t) && !isSigned(t) => (MOVWZload ptr mem)
+(Load <t> ptr mem) && is16BitInt(t) && isSigned(t) => (MOVHload ptr mem)
+(Load <t> ptr mem) && is16BitInt(t) && !isSigned(t) => (MOVHZload ptr mem)
+(Load <t> ptr mem) && t.IsBoolean() => (MOVBZload ptr mem)
+(Load <t> ptr mem) && is8BitInt(t) && isSigned(t) => (MOVBreg (MOVBZload ptr mem)) // PPC has no signed-byte load.
+(Load <t> ptr mem) && is8BitInt(t) && !isSigned(t) => (MOVBZload ptr mem)
+
+(Load <t> ptr mem) && is32BitFloat(t) => (FMOVSload ptr mem)
+(Load <t> ptr mem) && is64BitFloat(t) => (FMOVDload ptr mem)
+
+(Store {t} ptr val mem) && t.Size() == 8 && is64BitFloat(val.Type) => (FMOVDstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 8 && is32BitFloat(val.Type) => (FMOVDstore ptr val mem) // glitch from (Cvt32Fto64F x) => x -- type is wrong
+(Store {t} ptr val mem) && t.Size() == 4 && is32BitFloat(val.Type) => (FMOVSstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 8 && !is64BitFloat(val.Type) => (MOVDstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 4 && is32BitInt(val.Type) => (MOVWstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 2 => (MOVHstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 1 => (MOVBstore ptr val mem)
+
+// Using Zero instead of LoweredZero allows the
+// target address to be folded where possible.
+(Zero [0] _ mem) => mem
+(Zero [1] destptr mem) => (MOVBstorezero destptr mem)
+(Zero [2] destptr mem) =>
+ (MOVHstorezero destptr mem)
+(Zero [3] destptr mem) =>
+ (MOVBstorezero [2] destptr
+ (MOVHstorezero destptr mem))
+(Zero [4] destptr mem) =>
+ (MOVWstorezero destptr mem)
+(Zero [5] destptr mem) =>
+ (MOVBstorezero [4] destptr
+ (MOVWstorezero destptr mem))
+(Zero [6] destptr mem) =>
+ (MOVHstorezero [4] destptr
+ (MOVWstorezero destptr mem))
+(Zero [7] destptr mem) =>
+ (MOVBstorezero [6] destptr
+ (MOVHstorezero [4] destptr
+ (MOVWstorezero destptr mem)))
+
+(Zero [8] {t} destptr mem) => (MOVDstorezero destptr mem)
+(Zero [12] {t} destptr mem) =>
+ (MOVWstorezero [8] destptr
+ (MOVDstorezero [0] destptr mem))
+(Zero [16] {t} destptr mem) =>
+ (MOVDstorezero [8] destptr
+ (MOVDstorezero [0] destptr mem))
+(Zero [24] {t} destptr mem) =>
+ (MOVDstorezero [16] destptr
+ (MOVDstorezero [8] destptr
+ (MOVDstorezero [0] destptr mem)))
+(Zero [32] {t} destptr mem) =>
+ (MOVDstorezero [24] destptr
+ (MOVDstorezero [16] destptr
+ (MOVDstorezero [8] destptr
+ (MOVDstorezero [0] destptr mem))))
+
+// Handle cases not handled above
+// Lowered Short cases do not generate loops, and as a result don't clobber
+// the address registers or flags.
+(Zero [s] ptr mem) && buildcfg.GOPPC64 <= 8 && s < 64 => (LoweredZeroShort [s] ptr mem)
+(Zero [s] ptr mem) && buildcfg.GOPPC64 <= 8 => (LoweredZero [s] ptr mem)
+(Zero [s] ptr mem) && s < 128 && buildcfg.GOPPC64 >= 9 => (LoweredQuadZeroShort [s] ptr mem)
+(Zero [s] ptr mem) && buildcfg.GOPPC64 >= 9 => (LoweredQuadZero [s] ptr mem)
+
+// moves
+(Move [0] _ _ mem) => mem
+(Move [1] dst src mem) => (MOVBstore dst (MOVBZload src mem) mem)
+(Move [2] dst src mem) =>
+ (MOVHstore dst (MOVHZload src mem) mem)
+(Move [4] dst src mem) =>
+ (MOVWstore dst (MOVWZload src mem) mem)
+// MOVD for load and store must have offsets that are multiple of 4
+(Move [8] {t} dst src mem) =>
+ (MOVDstore dst (MOVDload src mem) mem)
+(Move [3] dst src mem) =>
+ (MOVBstore [2] dst (MOVBZload [2] src mem)
+ (MOVHstore dst (MOVHload src mem) mem))
+(Move [5] dst src mem) =>
+ (MOVBstore [4] dst (MOVBZload [4] src mem)
+ (MOVWstore dst (MOVWZload src mem) mem))
+(Move [6] dst src mem) =>
+ (MOVHstore [4] dst (MOVHZload [4] src mem)
+ (MOVWstore dst (MOVWZload src mem) mem))
+(Move [7] dst src mem) =>
+ (MOVBstore [6] dst (MOVBZload [6] src mem)
+ (MOVHstore [4] dst (MOVHZload [4] src mem)
+ (MOVWstore dst (MOVWZload src mem) mem)))
+
+// Large move uses a loop. Since the address is computed and the
+// offset is zero, any alignment can be used.
+(Move [s] dst src mem) && s > 8 && buildcfg.GOPPC64 <= 8 && logLargeCopy(v, s) =>
+ (LoweredMove [s] dst src mem)
+(Move [s] dst src mem) && s > 8 && s <= 64 && buildcfg.GOPPC64 >= 9 =>
+ (LoweredQuadMoveShort [s] dst src mem)
+(Move [s] dst src mem) && s > 8 && buildcfg.GOPPC64 >= 9 && logLargeCopy(v, s) =>
+ (LoweredQuadMove [s] dst src mem)
+
+// Calls
+// Lowering calls
+(StaticCall ...) => (CALLstatic ...)
+(ClosureCall ...) => (CALLclosure ...)
+(InterCall ...) => (CALLinter ...)
+(TailCall ...) => (CALLtail ...)
+
+// Miscellaneous
+(GetClosurePtr ...) => (LoweredGetClosurePtr ...)
+(GetCallerSP ...) => (LoweredGetCallerSP ...)
+(GetCallerPC ...) => (LoweredGetCallerPC ...)
+(IsNonNil ptr) => (NotEqual (CMPconst [0] ptr))
+(IsInBounds idx len) => (LessThan (CMPU idx len))
+(IsSliceInBounds idx len) => (LessEqual (CMPU idx len))
+(NilCheck ...) => (LoweredNilCheck ...)
+
+// Write barrier.
+(WB ...) => (LoweredWB ...)
+
+(PanicBounds [kind] x y mem) && boundsABI(kind) == 0 => (LoweredPanicBoundsA [kind] x y mem)
+(PanicBounds [kind] x y mem) && boundsABI(kind) == 1 => (LoweredPanicBoundsB [kind] x y mem)
+(PanicBounds [kind] x y mem) && boundsABI(kind) == 2 => (LoweredPanicBoundsC [kind] x y mem)
+
+// Optimizations
+// Note that PPC "logical" immediates come in 0:15 and 16:31 unsigned immediate forms,
+// so ORconst, XORconst easily expand into a pair.
+
+// Include very-large constants in the const-const case.
+(AND (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [c&d])
+(OR (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [c|d])
+(XOR (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [c^d])
+(ORN (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [c|^d])
+(ANDN (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [c&^d])
+(NOR (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [^(c|d)])
+
+// Discover consts
+(AND x (MOVDconst [c])) && isU16Bit(c) => (ANDconst [c] x)
+(XOR x (MOVDconst [c])) && isU32Bit(c) => (XORconst [c] x)
+(OR x (MOVDconst [c])) && isU32Bit(c) => (ORconst [c] x)
+
+// Simplify consts
+(ANDconst [c] (ANDconst [d] x)) => (ANDconst [c&d] x)
+(ORconst [c] (ORconst [d] x)) => (ORconst [c|d] x)
+(XORconst [c] (XORconst [d] x)) => (XORconst [c^d] x)
+(ANDconst [-1] x) => x
+(ANDconst [0] _) => (MOVDconst [0])
+(XORconst [0] x) => x
+(ORconst [-1] _) => (MOVDconst [-1])
+(ORconst [0] x) => x
+
+// zero-extend of small and => small and
+(MOVBZreg y:(ANDconst [c] _)) && uint64(c) <= 0xFF => y
+(MOVHZreg y:(ANDconst [c] _)) && uint64(c) <= 0xFFFF => y
+(MOVWZreg y:(ANDconst [c] _)) && uint64(c) <= 0xFFFFFFFF => y
+(MOVWZreg y:(AND (MOVDconst [c]) _)) && uint64(c) <= 0xFFFFFFFF => y
+
+// sign extend of small-positive and => small-positive-and
+(MOVBreg y:(ANDconst [c] _)) && uint64(c) <= 0x7F => y
+(MOVHreg y:(ANDconst [c] _)) && uint64(c) <= 0x7FFF => y
+(MOVWreg y:(ANDconst [c] _)) && uint64(c) <= 0xFFFF => y // 0xFFFF is largest immediate constant, when regarded as 32-bit is > 0
+(MOVWreg y:(AND (MOVDconst [c]) _)) && uint64(c) <= 0x7FFFFFFF => y
+
+// small and of zero-extend => either zero-extend or small and
+(ANDconst [c] y:(MOVBZreg _)) && c&0xFF == 0xFF => y
+(ANDconst [0xFF] y:(MOVBreg _)) => y
+(ANDconst [c] y:(MOVHZreg _)) && c&0xFFFF == 0xFFFF => y
+(ANDconst [0xFFFF] y:(MOVHreg _)) => y
+
+(AND (MOVDconst [c]) y:(MOVWZreg _)) && c&0xFFFFFFFF == 0xFFFFFFFF => y
+(AND (MOVDconst [0xFFFFFFFF]) y:(MOVWreg x)) => (MOVWZreg x)
+// normal case
+(ANDconst [c] (MOV(B|BZ)reg x)) => (ANDconst [c&0xFF] x)
+(ANDconst [c] (MOV(H|HZ)reg x)) => (ANDconst [c&0xFFFF] x)
+(ANDconst [c] (MOV(W|WZ)reg x)) => (ANDconst [c&0xFFFFFFFF] x)
+
+// Eliminate unnecessary sign/zero extend following right shift
+(MOV(B|H|W)Zreg (SRWconst [c] (MOVBZreg x))) => (SRWconst [c] (MOVBZreg x))
+(MOV(H|W)Zreg (SRWconst [c] (MOVHZreg x))) => (SRWconst [c] (MOVHZreg x))
+(MOVWZreg (SRWconst [c] (MOVWZreg x))) => (SRWconst [c] (MOVWZreg x))
+(MOV(B|H|W)reg (SRAWconst [c] (MOVBreg x))) => (SRAWconst [c] (MOVBreg x))
+(MOV(H|W)reg (SRAWconst [c] (MOVHreg x))) => (SRAWconst [c] (MOVHreg x))
+(MOVWreg (SRAWconst [c] (MOVWreg x))) => (SRAWconst [c] (MOVWreg x))
+
+(MOVWZreg (SRWconst [c] x)) && sizeof(x.Type) <= 32 => (SRWconst [c] x)
+(MOVHZreg (SRWconst [c] x)) && sizeof(x.Type) <= 16 => (SRWconst [c] x)
+(MOVBZreg (SRWconst [c] x)) && sizeof(x.Type) == 8 => (SRWconst [c] x)
+(MOVWreg (SRAWconst [c] x)) && sizeof(x.Type) <= 32 => (SRAWconst [c] x)
+(MOVHreg (SRAWconst [c] x)) && sizeof(x.Type) <= 16 => (SRAWconst [c] x)
+(MOVBreg (SRAWconst [c] x)) && sizeof(x.Type) == 8 => (SRAWconst [c] x)
+
+// initial right shift will handle sign/zero extend
+(MOVBZreg (SRDconst [c] x)) && c>=56 => (SRDconst [c] x)
+(MOVBreg (SRDconst [c] x)) && c>56 => (SRDconst [c] x)
+(MOVBreg (SRDconst [c] x)) && c==56 => (SRADconst [c] x)
+(MOVBreg (SRADconst [c] x)) && c>=56 => (SRADconst [c] x)
+(MOVBZreg (SRWconst [c] x)) && c>=24 => (SRWconst [c] x)
+(MOVBreg (SRWconst [c] x)) && c>24 => (SRWconst [c] x)
+(MOVBreg (SRWconst [c] x)) && c==24 => (SRAWconst [c] x)
+(MOVBreg (SRAWconst [c] x)) && c>=24 => (SRAWconst [c] x)
+
+(MOVHZreg (SRDconst [c] x)) && c>=48 => (SRDconst [c] x)
+(MOVHreg (SRDconst [c] x)) && c>48 => (SRDconst [c] x)
+(MOVHreg (SRDconst [c] x)) && c==48 => (SRADconst [c] x)
+(MOVHreg (SRADconst [c] x)) && c>=48 => (SRADconst [c] x)
+(MOVHZreg (SRWconst [c] x)) && c>=16 => (SRWconst [c] x)
+(MOVHreg (SRWconst [c] x)) && c>16 => (SRWconst [c] x)
+(MOVHreg (SRAWconst [c] x)) && c>=16 => (SRAWconst [c] x)
+(MOVHreg (SRWconst [c] x)) && c==16 => (SRAWconst [c] x)
+
+(MOVWZreg (SRDconst [c] x)) && c>=32 => (SRDconst [c] x)
+(MOVWreg (SRDconst [c] x)) && c>32 => (SRDconst [c] x)
+(MOVWreg (SRADconst [c] x)) && c>=32 => (SRADconst [c] x)
+(MOVWreg (SRDconst [c] x)) && c==32 => (SRADconst [c] x)
+
+// Various redundant zero/sign extension combinations.
+(MOVBZreg y:(MOVBZreg _)) => y // repeat
+(MOVBreg y:(MOVBreg _)) => y // repeat
+(MOVBreg (MOVBZreg x)) => (MOVBreg x)
+(MOVBZreg (MOVBreg x)) => (MOVBZreg x)
+
+// H - there are more combinations than these
+
+(MOVHZreg y:(MOVHZreg _)) => y // repeat
+(MOVHZreg y:(MOVBZreg _)) => y // wide of narrow
+(MOVHZreg y:(MOVHBRload _ _)) => y
+
+(MOVHreg y:(MOVHreg _)) => y // repeat
+(MOVHreg y:(MOVBreg _)) => y // wide of narrow
+
+(MOVHreg y:(MOVHZreg x)) => (MOVHreg x)
+(MOVHZreg y:(MOVHreg x)) => (MOVHZreg x)
+
+// W - there are more combinations than these
+
+(MOVWZreg y:(MOVWZreg _)) => y // repeat
+(MOVWZreg y:(MOVHZreg _)) => y // wide of narrow
+(MOVWZreg y:(MOVBZreg _)) => y // wide of narrow
+(MOVWZreg y:(MOVHBRload _ _)) => y
+(MOVWZreg y:(MOVWBRload _ _)) => y
+
+(MOVWreg y:(MOVWreg _)) => y // repeat
+(MOVWreg y:(MOVHreg _)) => y // wide of narrow
+(MOVWreg y:(MOVBreg _)) => y // wide of narrow
+
+(MOVWreg y:(MOVWZreg x)) => (MOVWreg x)
+(MOVWZreg y:(MOVWreg x)) => (MOVWZreg x)
+
+// Truncate then logical then truncate: omit first, lesser or equal truncate
+(MOVWZreg ((OR|XOR|AND) <t> x (MOVWZreg y))) => (MOVWZreg ((OR|XOR|AND) <t> x y))
+(MOVHZreg ((OR|XOR|AND) <t> x (MOVWZreg y))) => (MOVHZreg ((OR|XOR|AND) <t> x y))
+(MOVHZreg ((OR|XOR|AND) <t> x (MOVHZreg y))) => (MOVHZreg ((OR|XOR|AND) <t> x y))
+(MOVBZreg ((OR|XOR|AND) <t> x (MOVWZreg y))) => (MOVBZreg ((OR|XOR|AND) <t> x y))
+(MOVBZreg ((OR|XOR|AND) <t> x (MOVHZreg y))) => (MOVBZreg ((OR|XOR|AND) <t> x y))
+(MOVBZreg ((OR|XOR|AND) <t> x (MOVBZreg y))) => (MOVBZreg ((OR|XOR|AND) <t> x y))
+
+(MOV(B|H|W)Zreg z:(ANDconst [c] (MOVBZload ptr x))) => z
+(MOVBZreg z:(AND y (MOVBZload ptr x))) => z
+(MOV(H|W)Zreg z:(ANDconst [c] (MOVHZload ptr x))) => z
+(MOVHZreg z:(AND y (MOVHZload ptr x))) => z
+(MOVWZreg z:(ANDconst [c] (MOVWZload ptr x))) => z
+(MOVWZreg z:(AND y (MOVWZload ptr x))) => z
+
+// Arithmetic constant ops
+
+(ADD x (MOVDconst [c])) && is32Bit(c) => (ADDconst [c] x)
+(ADDconst [c] (ADDconst [d] x)) && is32Bit(c+d) => (ADDconst [c+d] x)
+(ADDconst [0] x) => x
+(SUB x (MOVDconst [c])) && is32Bit(-c) => (ADDconst [-c] x)
+
+(ADDconst [c] (MOVDaddr [d] {sym} x)) && is32Bit(c+int64(d)) => (MOVDaddr [int32(c+int64(d))] {sym} x)
+(ADDconst [c] x:(SP)) && is32Bit(c) => (MOVDaddr [int32(c)] x) // so it is rematerializeable
+
+(MULL(W|D) x (MOVDconst [c])) && is16Bit(c) => (MULL(W|D)const [int32(c)] x)
+
+// Subtract from (with carry, but ignored) constant.
+// Note, these clobber the carry bit.
+(SUB (MOVDconst [c]) x) && is32Bit(c) => (SUBFCconst [c] x)
+(SUBFCconst [c] (NEG x)) => (ADDconst [c] x)
+(SUBFCconst [c] (SUBFCconst [d] x)) && is32Bit(c-d) => (ADDconst [c-d] x)
+(SUBFCconst [0] x) => (NEG x)
+(ADDconst [c] (SUBFCconst [d] x)) && is32Bit(c+d) => (SUBFCconst [c+d] x)
+(NEG (ADDconst [c] x)) && is32Bit(-c) => (SUBFCconst [-c] x)
+(NEG (SUBFCconst [c] x)) && is32Bit(-c) => (ADDconst [-c] x)
+(NEG (SUB x y)) => (SUB y x)
+(NEG (NEG x)) => x
+
+// Use register moves instead of stores and loads to move int<=>float values
+// Common with math Float64bits, Float64frombits
+(MOVDload [off] {sym} ptr (FMOVDstore [off] {sym} ptr x _)) => (MFVSRD x)
+(FMOVDload [off] {sym} ptr (MOVDstore [off] {sym} ptr x _)) => (MTVSRD x)
+
+(FMOVDstore [off] {sym} ptr (MTVSRD x) mem) => (MOVDstore [off] {sym} ptr x mem)
+(MOVDstore [off] {sym} ptr (MFVSRD x) mem) => (FMOVDstore [off] {sym} ptr x mem)
+
+(MTVSRD (MOVDconst [c])) && !math.IsNaN(math.Float64frombits(uint64(c))) => (FMOVDconst [math.Float64frombits(uint64(c))])
+(MFVSRD (FMOVDconst [c])) => (MOVDconst [int64(math.Float64bits(c))])
+
+(MTVSRD x:(MOVDload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (FMOVDload [off] {sym} ptr mem)
+(MFVSRD x:(FMOVDload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVDload [off] {sym} ptr mem)
+
+// Fold offsets for stores.
+(MOVDstore [off1] {sym} (ADDconst [off2] x) val mem) && is16Bit(int64(off1)+off2) => (MOVDstore [off1+int32(off2)] {sym} x val mem)
+(MOVWstore [off1] {sym} (ADDconst [off2] x) val mem) && is16Bit(int64(off1)+off2) => (MOVWstore [off1+int32(off2)] {sym} x val mem)
+(MOVHstore [off1] {sym} (ADDconst [off2] x) val mem) && is16Bit(int64(off1)+off2) => (MOVHstore [off1+int32(off2)] {sym} x val mem)
+(MOVBstore [off1] {sym} (ADDconst [off2] x) val mem) && is16Bit(int64(off1)+off2) => (MOVBstore [off1+int32(off2)] {sym} x val mem)
+
+(FMOVSstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is16Bit(int64(off1)+off2) => (FMOVSstore [off1+int32(off2)] {sym} ptr val mem)
+(FMOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is16Bit(int64(off1)+off2) => (FMOVDstore [off1+int32(off2)] {sym} ptr val mem)
+
+// Fold address into load/store.
+// The assembler needs to generate several instructions and use
+// temp register for accessing global, and each time it will reload
+// the temp register. So don't fold address of global, unless there
+// is only one use.
+(MOVBstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
+ && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) =>
+ (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+(MOVHstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
+ && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) =>
+ (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+(MOVWstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
+ && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) =>
+ (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+(MOVDstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
+ && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) =>
+ (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+
+(FMOVSstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
+ && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) =>
+ (FMOVSstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+(FMOVDstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem) && canMergeSym(sym1,sym2)
+ && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) =>
+ (FMOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+
+(MOVBZload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
+ && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) =>
+ (MOVBZload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVHload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
+ && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) =>
+ (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVHZload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
+ && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) =>
+ (MOVHZload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVWload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
+ && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) =>
+ (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVWZload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
+ && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) =>
+ (MOVWZload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVDload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
+ && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) =>
+ (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(FMOVSload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
+ && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) =>
+ (FMOVSload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(FMOVDload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2)
+ && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1) =>
+ (FMOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+
+// Fold offsets for loads.
+(FMOVSload [off1] {sym} (ADDconst [off2] ptr) mem) && is16Bit(int64(off1)+off2) => (FMOVSload [off1+int32(off2)] {sym} ptr mem)
+(FMOVDload [off1] {sym} (ADDconst [off2] ptr) mem) && is16Bit(int64(off1)+off2) => (FMOVDload [off1+int32(off2)] {sym} ptr mem)
+
+(MOVDload [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(int64(off1)+off2) => (MOVDload [off1+int32(off2)] {sym} x mem)
+(MOVWload [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(int64(off1)+off2) => (MOVWload [off1+int32(off2)] {sym} x mem)
+(MOVWZload [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(int64(off1)+off2) => (MOVWZload [off1+int32(off2)] {sym} x mem)
+(MOVHload [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(int64(off1)+off2) => (MOVHload [off1+int32(off2)] {sym} x mem)
+(MOVHZload [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(int64(off1)+off2) => (MOVHZload [off1+int32(off2)] {sym} x mem)
+(MOVBZload [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(int64(off1)+off2) => (MOVBZload [off1+int32(off2)] {sym} x mem)
+
+// Determine load + addressing that can be done as a register indexed load
+(MOV(D|W|WZ|H|HZ|BZ)load [0] {sym} p:(ADD ptr idx) mem) && sym == nil && p.Uses == 1 => (MOV(D|W|WZ|H|HZ|BZ)loadidx ptr idx mem)
+
+// Determine if there is benefit to using a non-indexed load, since that saves the load
+// of the index register. With MOVDload and MOVWload, there is no benefit if the offset
+// value is not a multiple of 4, since that results in an extra instruction in the base
+// register address computation.
+(MOV(D|W)loadidx ptr (MOVDconst [c]) mem) && is16Bit(c) && c%4 == 0 => (MOV(D|W)load [int32(c)] ptr mem)
+(MOV(WZ|H|HZ|BZ)loadidx ptr (MOVDconst [c]) mem) && is16Bit(c) => (MOV(WZ|H|HZ|BZ)load [int32(c)] ptr mem)
+(MOV(D|W)loadidx (MOVDconst [c]) ptr mem) && is16Bit(c) && c%4 == 0 => (MOV(D|W)load [int32(c)] ptr mem)
+(MOV(WZ|H|HZ|BZ)loadidx (MOVDconst [c]) ptr mem) && is16Bit(c) => (MOV(WZ|H|HZ|BZ)load [int32(c)] ptr mem)
+
+// Store of zero => storezero
+(MOVDstore [off] {sym} ptr (MOVDconst [0]) mem) => (MOVDstorezero [off] {sym} ptr mem)
+(MOVWstore [off] {sym} ptr (MOVDconst [0]) mem) => (MOVWstorezero [off] {sym} ptr mem)
+(MOVHstore [off] {sym} ptr (MOVDconst [0]) mem) => (MOVHstorezero [off] {sym} ptr mem)
+(MOVBstore [off] {sym} ptr (MOVDconst [0]) mem) => (MOVBstorezero [off] {sym} ptr mem)
+
+// Fold offsets for storezero
+(MOVDstorezero [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(int64(off1)+off2) =>
+ (MOVDstorezero [off1+int32(off2)] {sym} x mem)
+(MOVWstorezero [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(int64(off1)+off2) =>
+ (MOVWstorezero [off1+int32(off2)] {sym} x mem)
+(MOVHstorezero [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(int64(off1)+off2) =>
+ (MOVHstorezero [off1+int32(off2)] {sym} x mem)
+(MOVBstorezero [off1] {sym} (ADDconst [off2] x) mem) && is16Bit(int64(off1)+off2) =>
+ (MOVBstorezero [off1+int32(off2)] {sym} x mem)
+
+// Stores with addressing that can be done as indexed stores
+(MOV(D|W|H|B)store [0] {sym} p:(ADD ptr idx) val mem) && sym == nil && p.Uses == 1 => (MOV(D|W|H|B)storeidx ptr idx val mem)
+
+// Stores with constant index values can be done without indexed instructions
+// No need to lower the idx cases if c%4 is not 0
+(MOVDstoreidx ptr (MOVDconst [c]) val mem) && is16Bit(c) && c%4 == 0 => (MOVDstore [int32(c)] ptr val mem)
+(MOV(W|H|B)storeidx ptr (MOVDconst [c]) val mem) && is16Bit(c) => (MOV(W|H|B)store [int32(c)] ptr val mem)
+(MOVDstoreidx (MOVDconst [c]) ptr val mem) && is16Bit(c) && c%4 == 0 => (MOVDstore [int32(c)] ptr val mem)
+(MOV(W|H|B)storeidx (MOVDconst [c]) ptr val mem) && is16Bit(c) => (MOV(W|H|B)store [int32(c)] ptr val mem)
+
+// Fold symbols into storezero
+(MOVDstorezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem) && canMergeSym(sym1,sym2)
+ && (x.Op != OpSB || p.Uses == 1) =>
+ (MOVDstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem)
+(MOVWstorezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem) && canMergeSym(sym1,sym2)
+ && (x.Op != OpSB || p.Uses == 1) =>
+ (MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem)
+(MOVHstorezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem) && canMergeSym(sym1,sym2)
+ && (x.Op != OpSB || p.Uses == 1) =>
+ (MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem)
+(MOVBstorezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem) && canMergeSym(sym1,sym2)
+ && (x.Op != OpSB || p.Uses == 1) =>
+ (MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem)
+
+// atomic intrinsics
+(AtomicLoad(8|32|64|Ptr) ptr mem) => (LoweredAtomicLoad(8|32|64|Ptr) [1] ptr mem)
+(AtomicLoadAcq(32|64) ptr mem) => (LoweredAtomicLoad(32|64) [0] ptr mem)
+
+(AtomicStore(8|32|64) ptr val mem) => (LoweredAtomicStore(8|32|64) [1] ptr val mem)
+(AtomicStoreRel(32|64) ptr val mem) => (LoweredAtomicStore(32|64) [0] ptr val mem)
+//(AtomicStorePtrNoWB ptr val mem) => (STLR ptr val mem)
+
+(AtomicExchange(32|64) ...) => (LoweredAtomicExchange(32|64) ...)
+
+(AtomicAdd(32|64) ...) => (LoweredAtomicAdd(32|64) ...)
+
+(AtomicCompareAndSwap(32|64) ptr old new_ mem) => (LoweredAtomicCas(32|64) [1] ptr old new_ mem)
+(AtomicCompareAndSwapRel32 ptr old new_ mem) => (LoweredAtomicCas32 [0] ptr old new_ mem)
+
+(AtomicAnd8 ...) => (LoweredAtomicAnd8 ...)
+(AtomicAnd32 ...) => (LoweredAtomicAnd32 ...)
+(AtomicOr8 ...) => (LoweredAtomicOr8 ...)
+(AtomicOr32 ...) => (LoweredAtomicOr32 ...)
+
+(Slicemask <t> x) => (SRADconst (NEG <t> x) [63])
+
+// Note that MOV??reg returns a 64-bit int, x is not necessarily that wide
+// This may interact with other patterns in the future. (Compare with arm64)
+(MOV(B|H|W)Zreg x:(MOVBZload _ _)) => x
+(MOV(B|H|W)Zreg x:(MOVBZloadidx _ _ _)) => x
+(MOV(H|W)Zreg x:(MOVHZload _ _)) => x
+(MOV(H|W)Zreg x:(MOVHZloadidx _ _ _)) => x
+(MOV(H|W)reg x:(MOVHload _ _)) => x
+(MOV(H|W)reg x:(MOVHloadidx _ _ _)) => x
+(MOVWZreg x:(MOVWZload _ _)) => x
+(MOVWZreg x:(MOVWZloadidx _ _ _)) => x
+(MOVWreg x:(MOVWload _ _)) => x
+(MOVWreg x:(MOVWloadidx _ _ _)) => x
+(MOVBZreg x:(Select0 (LoweredAtomicLoad8 _ _))) => x
+(MOVWZreg x:(Select0 (LoweredAtomicLoad32 _ _))) => x
+
+// don't extend if argument is already extended
+(MOVBreg x:(Arg <t>)) && is8BitInt(t) && isSigned(t) => x
+(MOVBZreg x:(Arg <t>)) && is8BitInt(t) && !isSigned(t) => x
+(MOVHreg x:(Arg <t>)) && (is8BitInt(t) || is16BitInt(t)) && isSigned(t) => x
+(MOVHZreg x:(Arg <t>)) && (is8BitInt(t) || is16BitInt(t)) && !isSigned(t) => x
+(MOVWreg x:(Arg <t>)) && (is8BitInt(t) || is16BitInt(t) || is32BitInt(t)) && isSigned(t) => x
+(MOVWZreg x:(Arg <t>)) && (is8BitInt(t) || is16BitInt(t) || is32BitInt(t)) && !isSigned(t) => x
+
+(MOVBZreg (MOVDconst [c])) => (MOVDconst [int64(uint8(c))])
+(MOVBreg (MOVDconst [c])) => (MOVDconst [int64(int8(c))])
+(MOVHZreg (MOVDconst [c])) => (MOVDconst [int64(uint16(c))])
+(MOVHreg (MOVDconst [c])) => (MOVDconst [int64(int16(c))])
+(MOVWreg (MOVDconst [c])) => (MOVDconst [int64(int32(c))])
+(MOVWZreg (MOVDconst [c])) => (MOVDconst [int64(uint32(c))])
+
+// Implement clrsldi and clrslwi extended mnemonics as described in
+// ISA 3.0 section C.8. AuxInt field contains values needed for
+// the instructions, packed together since there is only one available.
+(SLDconst [c] z:(MOVBZreg x)) && c < 8 && z.Uses == 1 => (CLRLSLDI [newPPC64ShiftAuxInt(c,56,63,64)] x)
+(SLDconst [c] z:(MOVHZreg x)) && c < 16 && z.Uses == 1 => (CLRLSLDI [newPPC64ShiftAuxInt(c,48,63,64)] x)
+(SLDconst [c] z:(MOVWZreg x)) && c < 32 && z.Uses == 1 => (CLRLSLDI [newPPC64ShiftAuxInt(c,32,63,64)] x)
+
+(SLDconst [c] z:(ANDconst [d] x)) && z.Uses == 1 && isPPC64ValidShiftMask(d) && c <= (64-getPPC64ShiftMaskLength(d)) => (CLRLSLDI [newPPC64ShiftAuxInt(c,64-getPPC64ShiftMaskLength(d),63,64)] x)
+(SLDconst [c] z:(AND (MOVDconst [d]) x)) && z.Uses == 1 && isPPC64ValidShiftMask(d) && c<=(64-getPPC64ShiftMaskLength(d)) => (CLRLSLDI [newPPC64ShiftAuxInt(c,64-getPPC64ShiftMaskLength(d),63,64)] x)
+(SLWconst [c] z:(MOVBZreg x)) && z.Uses == 1 && c < 8 => (CLRLSLWI [newPPC64ShiftAuxInt(c,24,31,32)] x)
+(SLWconst [c] z:(MOVHZreg x)) && z.Uses == 1 && c < 16 => (CLRLSLWI [newPPC64ShiftAuxInt(c,16,31,32)] x)
+(SLWconst [c] z:(ANDconst [d] x)) && z.Uses == 1 && isPPC64ValidShiftMask(d) && c<=(32-getPPC64ShiftMaskLength(d)) => (CLRLSLWI [newPPC64ShiftAuxInt(c,32-getPPC64ShiftMaskLength(d),31,32)] x)
+(SLWconst [c] z:(AND (MOVDconst [d]) x)) && z.Uses == 1 && isPPC64ValidShiftMask(d) && c<=(32-getPPC64ShiftMaskLength(d)) => (CLRLSLWI [newPPC64ShiftAuxInt(c,32-getPPC64ShiftMaskLength(d),31,32)] x)
+// special case for power9
+(SL(W|D)const [c] z:(MOVWreg x)) && c < 32 && buildcfg.GOPPC64 >= 9 => (EXTSWSLconst [c] x)
+
+// Lose widening ops fed to stores
+(MOVBstore [off] {sym} ptr (MOV(B|BZ|H|HZ|W|WZ)reg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVHstore [off] {sym} ptr (MOV(H|HZ|W|WZ)reg x) mem) => (MOVHstore [off] {sym} ptr x mem)
+(MOVWstore [off] {sym} ptr (MOV(W|WZ)reg x) mem) => (MOVWstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (SRWconst (MOV(H|HZ)reg x) [c]) mem) && c <= 8 => (MOVBstore [off] {sym} ptr (SRWconst <typ.UInt32> x [c]) mem)
+(MOVBstore [off] {sym} ptr (SRWconst (MOV(W|WZ)reg x) [c]) mem) && c <= 24 => (MOVBstore [off] {sym} ptr (SRWconst <typ.UInt32> x [c]) mem)
+(MOVBstoreidx ptr idx (MOV(B|BZ|H|HZ|W|WZ)reg x) mem) => (MOVBstoreidx ptr idx x mem)
+(MOVHstoreidx ptr idx (MOV(H|HZ|W|WZ)reg x) mem) => (MOVHstoreidx ptr idx x mem)
+(MOVWstoreidx ptr idx (MOV(W|WZ)reg x) mem) => (MOVWstoreidx ptr idx x mem)
+(MOVBstoreidx ptr idx (SRWconst (MOV(H|HZ)reg x) [c]) mem) && c <= 8 => (MOVBstoreidx ptr idx (SRWconst <typ.UInt32> x [c]) mem)
+(MOVBstoreidx ptr idx (SRWconst (MOV(W|WZ)reg x) [c]) mem) && c <= 24 => (MOVBstoreidx ptr idx (SRWconst <typ.UInt32> x [c]) mem)
+(MOVHBRstore {sym} ptr (MOV(H|HZ|W|WZ)reg x) mem) => (MOVHBRstore {sym} ptr x mem)
+(MOVWBRstore {sym} ptr (MOV(W|WZ)reg x) mem) => (MOVWBRstore {sym} ptr x mem)
+
+// Lose W-widening ops fed to compare-W
+(CMPW x (MOVWreg y)) => (CMPW x y)
+(CMPW (MOVWreg x) y) => (CMPW x y)
+(CMPWU x (MOVWZreg y)) => (CMPWU x y)
+(CMPWU (MOVWZreg x) y) => (CMPWU x y)
+
+(CMP x (MOVDconst [c])) && is16Bit(c) => (CMPconst x [c])
+(CMP (MOVDconst [c]) y) && is16Bit(c) => (InvertFlags (CMPconst y [c]))
+(CMPW x (MOVDconst [c])) && is16Bit(c) => (CMPWconst x [int32(c)])
+(CMPW (MOVDconst [c]) y) && is16Bit(c) => (InvertFlags (CMPWconst y [int32(c)]))
+
+(CMPU x (MOVDconst [c])) && isU16Bit(c) => (CMPUconst x [c])
+(CMPU (MOVDconst [c]) y) && isU16Bit(c) => (InvertFlags (CMPUconst y [c]))
+(CMPWU x (MOVDconst [c])) && isU16Bit(c) => (CMPWUconst x [int32(c)])
+(CMPWU (MOVDconst [c]) y) && isU16Bit(c) => (InvertFlags (CMPWUconst y [int32(c)]))
+
+// Canonicalize the order of arguments to comparisons - helps with CSE.
+((CMP|CMPW|CMPU|CMPWU) x y) && canonLessThan(x,y) => (InvertFlags ((CMP|CMPW|CMPU|CMPWU) y x))
+
+// ISEL auxInt values 0=LT 1=GT 2=EQ arg2 ? arg0 : arg1
+// ISEL auxInt values 4=GE 5=LE 6=NE !arg2 ? arg1 : arg0
+// ISELB special case where arg0, arg1 values are 0, 1
+
+(Equal cmp) => (ISELB [2] (MOVDconst [1]) cmp)
+(NotEqual cmp) => (ISELB [6] (MOVDconst [1]) cmp)
+(LessThan cmp) => (ISELB [0] (MOVDconst [1]) cmp)
+(FLessThan cmp) => (ISELB [0] (MOVDconst [1]) cmp)
+(FLessEqual cmp) => (ISEL [2] (MOVDconst [1]) (ISELB [0] (MOVDconst [1]) cmp) cmp)
+(GreaterEqual cmp) => (ISELB [4] (MOVDconst [1]) cmp)
+(GreaterThan cmp) => (ISELB [1] (MOVDconst [1]) cmp)
+(FGreaterThan cmp) => (ISELB [1] (MOVDconst [1]) cmp)
+(FGreaterEqual cmp) => (ISEL [2] (MOVDconst [1]) (ISELB [1] (MOVDconst [1]) cmp) cmp)
+(LessEqual cmp) => (ISELB [5] (MOVDconst [1]) cmp)
+
+(ISELB [0] _ (FlagLT)) => (MOVDconst [1])
+(ISELB [0] _ (Flag(GT|EQ))) => (MOVDconst [0])
+(ISELB [1] _ (FlagGT)) => (MOVDconst [1])
+(ISELB [1] _ (Flag(LT|EQ))) => (MOVDconst [0])
+(ISELB [2] _ (FlagEQ)) => (MOVDconst [1])
+(ISELB [2] _ (Flag(LT|GT))) => (MOVDconst [0])
+(ISELB [4] _ (FlagLT)) => (MOVDconst [0])
+(ISELB [4] _ (Flag(GT|EQ))) => (MOVDconst [1])
+(ISELB [5] _ (FlagGT)) => (MOVDconst [0])
+(ISELB [5] _ (Flag(LT|EQ))) => (MOVDconst [1])
+(ISELB [6] _ (FlagEQ)) => (MOVDconst [0])
+(ISELB [6] _ (Flag(LT|GT))) => (MOVDconst [1])
+
+(ISEL [2] x _ (FlagEQ)) => x
+(ISEL [2] _ y (Flag(LT|GT))) => y
+
+(ISEL [6] _ y (FlagEQ)) => y
+(ISEL [6] x _ (Flag(LT|GT))) => x
+
+(ISEL [0] _ y (Flag(EQ|GT))) => y
+(ISEL [0] x _ (FlagLT)) => x
+
+(ISEL [5] _ x (Flag(EQ|LT))) => x
+(ISEL [5] y _ (FlagGT)) => y
+
+(ISEL [1] _ y (Flag(EQ|LT))) => y
+(ISEL [1] x _ (FlagGT)) => x
+
+(ISEL [4] x _ (Flag(EQ|GT))) => x
+(ISEL [4] _ y (FlagLT)) => y
+
+(ISELB [n] (MOVDconst [1]) (InvertFlags bool)) && n%4 == 0 => (ISELB [n+1] (MOVDconst [1]) bool)
+(ISELB [n] (MOVDconst [1]) (InvertFlags bool)) && n%4 == 1 => (ISELB [n-1] (MOVDconst [1]) bool)
+(ISELB [n] (MOVDconst [1]) (InvertFlags bool)) && n%4 == 2 => (ISELB [n] (MOVDconst [1]) bool)
+(ISEL [n] x y (InvertFlags bool)) && n%4 == 0 => (ISEL [n+1] x y bool)
+(ISEL [n] x y (InvertFlags bool)) && n%4 == 1 => (ISEL [n-1] x y bool)
+(ISEL [n] x y (InvertFlags bool)) && n%4 == 2 => (ISEL [n] x y bool)
+(XORconst [1] (ISELB [6] (MOVDconst [1]) cmp)) => (ISELB [2] (MOVDconst [1]) cmp)
+(XORconst [1] (ISELB [5] (MOVDconst [1]) cmp)) => (ISELB [1] (MOVDconst [1]) cmp)
+(XORconst [1] (ISELB [4] (MOVDconst [1]) cmp)) => (ISELB [0] (MOVDconst [1]) cmp)
+
+// A particular pattern seen in cgo code:
+(AND (MOVDconst [c]) x:(MOVBZload _ _)) => (ANDconst [c&0xFF] x)
+
+// floating point negative abs
+(FNEG (FABS x)) => (FNABS x)
+(FNEG (FNABS x)) => (FABS x)
+
+// floating-point fused multiply-add/sub
+(FADD (FMUL x y) z) => (FMADD x y z)
+(FSUB (FMUL x y) z) => (FMSUB x y z)
+(FADDS (FMULS x y) z) => (FMADDS x y z)
+(FSUBS (FMULS x y) z) => (FMSUBS x y z)
+
+
+// The following statements are found in encoding/binary functions UintXX (load) and PutUintXX (store)
+// and convert the statements in these functions from multiple single byte loads or stores to
+// the single largest possible load or store.
+// Some are marked big or little endian based on the order in which the bytes are loaded or stored,
+// not on the ordering of the machine. These are intended for little endian machines.
+// To implement for big endian machines, most rules would have to be duplicated but the
+// resulting rule would be reversed, i. e., MOVHZload on little endian would be MOVHBRload on big endian
+// and vice versa.
+// b[0] | b[1]<<8 => load 16-bit Little endian
+(OR <t> x0:(MOVBZload [i0] {s} p mem)
+ o1:(SL(W|D)const x1:(MOVBZload [i1] {s} p mem) [8]))
+ && !config.BigEndian
+ && i1 == i0+1
+ && x0.Uses ==1 && x1.Uses == 1
+ && o1.Uses == 1
+ && mergePoint(b, x0, x1) != nil
+ && clobber(x0, x1, o1)
+ => @mergePoint(b,x0,x1) (MOVHZload <t> {s} [i0] p mem)
+
+// b[0]<<8 | b[1] => load 16-bit Big endian on Little endian arch.
+// Use byte-reverse indexed load for 2 bytes.
+(OR <t> x0:(MOVBZload [i1] {s} p mem)
+ o1:(SL(W|D)const x1:(MOVBZload [i0] {s} p mem) [8]))
+ && !config.BigEndian
+ && i1 == i0+1
+ && x0.Uses ==1 && x1.Uses == 1
+ && o1.Uses == 1
+ && mergePoint(b, x0, x1) != nil
+ && clobber(x0, x1, o1)
+ => @mergePoint(b,x0,x1) (MOVHBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem)
+
+// b[0]<<n+8 | b[1]<<n => load 16-bit Big endian (where n%8== 0)
+// Use byte-reverse indexed load for 2 bytes,
+// then shift left to the correct position. Used to match subrules
+// from longer rules.
+(OR <t> s0:(SL(W|D)const x0:(MOVBZload [i1] {s} p mem) [n1])
+ s1:(SL(W|D)const x1:(MOVBZload [i0] {s} p mem) [n2]))
+ && !config.BigEndian
+ && i1 == i0+1
+ && n1%8 == 0
+ && n2 == n1+8
+ && x0.Uses == 1 && x1.Uses == 1
+ && s0.Uses == 1 && s1.Uses == 1
+ && mergePoint(b, x0, x1) != nil
+ && clobber(x0, x1, s0, s1)
+ => @mergePoint(b,x0,x1) (SLDconst <t> (MOVHBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem) [n1])
+
+// b[0] | b[1]<<8 | b[2]<<16 | b[3]<<24 => load 32-bit Little endian
+// Use byte-reverse indexed load for 4 bytes.
+(OR <t> s1:(SL(W|D)const x2:(MOVBZload [i3] {s} p mem) [24])
+ o0:(OR <t> s0:(SL(W|D)const x1:(MOVBZload [i2] {s} p mem) [16])
+ x0:(MOVHZload [i0] {s} p mem)))
+ && !config.BigEndian
+ && i2 == i0+2
+ && i3 == i0+3
+ && x0.Uses ==1 && x1.Uses == 1 && x2.Uses == 1
+ && o0.Uses == 1
+ && s0.Uses == 1 && s1.Uses == 1
+ && mergePoint(b, x0, x1, x2) != nil
+ && clobber(x0, x1, x2, s0, s1, o0)
+ => @mergePoint(b,x0,x1,x2) (MOVWZload <t> {s} [i0] p mem)
+
+// b[0]<<24 | b[1]<<16 | b[2]<<8 | b[3] => load 32-bit Big endian order on Little endian arch
+// Use byte-reverse indexed load for 4 bytes with computed address.
+// Could be used to match subrules of a longer rule.
+(OR <t> s1:(SL(W|D)const x2:(MOVBZload [i0] {s} p mem) [24])
+ o0:(OR <t> s0:(SL(W|D)const x1:(MOVBZload [i1] {s} p mem) [16])
+ x0:(MOVHBRload <t> (MOVDaddr <typ.Uintptr> [i2] {s} p) mem)))
+ && !config.BigEndian
+ && i1 == i0+1
+ && i2 == i0+2
+ && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1
+ && o0.Uses == 1
+ && s0.Uses == 1 && s1.Uses == 1
+ && mergePoint(b, x0, x1, x2) != nil
+ && clobber(x0, x1, x2, s0, s1, o0)
+ => @mergePoint(b,x0,x1,x2) (MOVWBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem)
+
+// b[3] | b[2]<<8 | b[1]<<16 | b[0]<<24 => load 32-bit Big endian order on Little endian arch
+// Use byte-reverse indexed load for 4 bytes with computed address.
+// Could be used to match subrules of a longer rule.
+(OR <t> x0:(MOVBZload [i3] {s} p mem)
+ o0:(OR <t> s0:(SL(W|D)const x1:(MOVBZload [i2] {s} p mem) [8])
+ s1:(SL(W|D)const x2:(MOVHBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem) [16])))
+ && !config.BigEndian
+ && i2 == i0+2
+ && i3 == i0+3
+ && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1
+ && o0.Uses == 1
+ && s0.Uses == 1 && s1.Uses == 1
+ && mergePoint(b, x0, x1, x2) != nil
+ && clobber(x0, x1, x2, s0, s1, o0)
+ => @mergePoint(b,x0,x1,x2) (MOVWBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem)
+
+// b[0]<<56 | b[1]<<48 | b[2]<<40 | b[3]<<32 => load 32-bit Big endian order on Little endian arch
+// Use byte-reverse indexed load to for 4 bytes with computed address.
+// Used to match longer rules.
+(OR <t> s2:(SLDconst x2:(MOVBZload [i3] {s} p mem) [32])
+ o0:(OR <t> s1:(SLDconst x1:(MOVBZload [i2] {s} p mem) [40])
+ s0:(SLDconst x0:(MOVHBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem) [48])))
+ && !config.BigEndian
+ && i2 == i0+2
+ && i3 == i0+3
+ && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1
+ && o0.Uses == 1
+ && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1
+ && mergePoint(b, x0, x1, x2) != nil
+ && clobber(x0, x1, x2, s0, s1, s2, o0)
+ => @mergePoint(b,x0,x1,x2) (SLDconst <t> (MOVWBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem) [32])
+
+// b[3]<<32 | b[2]<<40 | b[1]<<48 | b[0]<<56 => load 32-bit Big endian order on Little endian arch
+// Use byte-reverse indexed load for 4 bytes with constant address.
+// Used to match longer rules.
+(OR <t> s2:(SLDconst x2:(MOVBZload [i0] {s} p mem) [56])
+ o0:(OR <t> s1:(SLDconst x1:(MOVBZload [i1] {s} p mem) [48])
+ s0:(SLDconst x0:(MOVHBRload <t> (MOVDaddr <typ.Uintptr> [i2] {s} p) mem) [32])))
+ && !config.BigEndian
+ && i1 == i0+1
+ && i2 == i0+2
+ && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1
+ && o0.Uses == 1
+ && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1
+ && mergePoint(b, x0, x1, x2) != nil
+ && clobber(x0, x1, x2, s0, s1, s2, o0)
+ => @mergePoint(b,x0,x1,x2) (SLDconst <t> (MOVWBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem) [32])
+
+// b[0] | b[1]<<8 | b[2]<<16 | b[3]<<24 | b[4] <<32 | b[5]<<40 | b[6]<<48 | b[7]<<56 => load 64-bit Little endian
+// Rules with commutative ops and many operands will result in extremely large functions in rewritePPC64,
+// so matching shorter previously defined subrules is important.
+// Offset must be multiple of 4 for MOVD
+(OR <t> s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56])
+ o5:(OR <t> s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48])
+ o4:(OR <t> s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40])
+ o3:(OR <t> s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32])
+ x0:(MOVWZload {s} [i0] p mem)))))
+ && !config.BigEndian
+ && i4 == i0+4
+ && i5 == i0+5
+ && i6 == i0+6
+ && i7 == i0+7
+ && x0.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1
+ && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1
+ && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1
+ && mergePoint(b, x0, x4, x5, x6, x7) != nil
+ && clobber(x0, x4, x5, x6, x7, s3, s4, s5, s6, o3, o4, o5)
+ => @mergePoint(b,x0,x4,x5,x6,x7) (MOVDload <t> {s} [i0] p mem)
+
+// b[7] | b[6]<<8 | b[5]<<16 | b[4]<<24 | b[3]<<32 | b[2]<<40 | b[1]<<48 | b[0]<<56 load 64-bit Big endian ordered bytes on Little endian arch
+// Use byte-reverse indexed load of 8 bytes.
+// Rules with commutative ops and many operands can result in extremely large functions in rewritePPC64,
+// so matching shorter previously defined subrules is important.
+(OR <t> s0:(SLDconst x0:(MOVBZload [i0] {s} p mem) [56])
+ o0:(OR <t> s1:(SLDconst x1:(MOVBZload [i1] {s} p mem) [48])
+ o1:(OR <t> s2:(SLDconst x2:(MOVBZload [i2] {s} p mem) [40])
+ o2:(OR <t> s3:(SLDconst x3:(MOVBZload [i3] {s} p mem) [32])
+ x4:(MOVWBRload <t> (MOVDaddr <typ.Uintptr> [i4] p) mem)))))
+ && !config.BigEndian
+ && i1 == i0+1
+ && i2 == i0+2
+ && i3 == i0+3
+ && i4 == i0+4
+ && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1
+ && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1
+ && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1
+ && mergePoint(b, x0, x1, x2, x3, x4) != nil
+ && clobber(x0, x1, x2, x3, x4, o0, o1, o2, s0, s1, s2, s3)
+ => @mergePoint(b,x0,x1,x2,x3,x4) (MOVDBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem)
+
+// b[0]<<56 | b[1]<<48 | b[2]<<40 | b[3]<<32 | b[4]<<24 | b[5]<<16 | b[6]<<8 | b[7] => load 64-bit Big endian ordered bytes on Little endian arch
+// Use byte-reverse indexed load of 8 bytes.
+// Rules with commutative ops and many operands can result in extremely large functions in rewritePPC64,
+// so matching shorter previously defined subrules is important.
+(OR <t> x7:(MOVBZload [i7] {s} p mem)
+ o5:(OR <t> s6:(SLDconst x6:(MOVBZload [i6] {s} p mem) [8])
+ o4:(OR <t> s5:(SLDconst x5:(MOVBZload [i5] {s} p mem) [16])
+ o3:(OR <t> s4:(SLDconst x4:(MOVBZload [i4] {s} p mem) [24])
+ s0:(SL(W|D)const x3:(MOVWBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem) [32])))))
+ && !config.BigEndian
+ && i4 == i0+4
+ && i5 == i0+5
+ && i6 == i0+6
+ && i7 == i0+7
+ && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1
+ && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1
+ && s0.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1
+ && mergePoint(b, x3, x4, x5, x6, x7) != nil
+ && clobber(x3, x4, x5, x6, x7, o3, o4, o5, s0, s4, s5, s6)
+ => @mergePoint(b,x3,x4,x5,x6,x7) (MOVDBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem)
+
+// 2 byte store Little endian as in:
+// b[0] = byte(v >> 16)
+// b[1] = byte(v >> 24)
+// Added for use in matching longer rules.
+(MOVBstore [i1] {s} p (SR(W|D)const w [24])
+ x0:(MOVBstore [i0] {s} p (SR(W|D)const w [16]) mem))
+ && !config.BigEndian
+ && x0.Uses == 1
+ && i1 == i0+1
+ && clobber(x0)
+ => (MOVHstore [i0] {s} p (SRWconst <typ.UInt16> w [16]) mem)
+
+// 2 byte store Little endian as in:
+// b[0] = byte(v)
+// b[1] = byte(v >> 8)
+(MOVBstore [i1] {s} p (SR(W|D)const w [8])
+ x0:(MOVBstore [i0] {s} p w mem))
+ && !config.BigEndian
+ && x0.Uses == 1
+ && i1 == i0+1
+ && clobber(x0)
+ => (MOVHstore [i0] {s} p w mem)
+
+// 4 byte store Little endian as in:
+// b[0:1] = uint16(v)
+// b[2:3] = uint16(v >> 16)
+(MOVHstore [i1] {s} p (SR(W|D)const w [16])
+ x0:(MOVHstore [i0] {s} p w mem))
+ && !config.BigEndian
+ && x0.Uses == 1
+ && i1 == i0+2
+ && clobber(x0)
+ => (MOVWstore [i0] {s} p w mem)
+
+// 4 byte store Big endian as in:
+// b[0] = byte(v >> 24)
+// b[1] = byte(v >> 16)
+// b[2] = byte(v >> 8)
+// b[3] = byte(v)
+// Use byte-reverse indexed 4 byte store.
+(MOVBstore [i3] {s} p w
+ x0:(MOVBstore [i2] {s} p (SRWconst w [8])
+ x1:(MOVBstore [i1] {s} p (SRWconst w [16])
+ x2:(MOVBstore [i0] {s} p (SRWconst w [24]) mem))))
+ && !config.BigEndian
+ && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1
+ && i1 == i0+1 && i2 == i0+2 && i3 == i0+3
+ && clobber(x0, x1, x2)
+ => (MOVWBRstore (MOVDaddr <typ.Uintptr> [i0] {s} p) w mem)
+
+// The 2 byte store appears after the 4 byte store so that the
+// match for the 2 byte store is not done first.
+// If the 4 byte store is based on the 2 byte store then there are
+// variations on the MOVDaddr subrule that would require additional
+// rules to be written.
+
+// 2 byte store Big endian as in:
+// b[0] = byte(v >> 8)
+// b[1] = byte(v)
+(MOVBstore [i1] {s} p w x0:(MOVBstore [i0] {s} p (SRWconst w [8]) mem))
+ && !config.BigEndian
+ && x0.Uses == 1
+ && i1 == i0+1
+ && clobber(x0)
+ => (MOVHBRstore (MOVDaddr <typ.Uintptr> [i0] {s} p) w mem)
+
+// 8 byte store Little endian as in:
+// b[0] = byte(v)
+// b[1] = byte(v >> 8)
+// b[2] = byte(v >> 16)
+// b[3] = byte(v >> 24)
+// b[4] = byte(v >> 32)
+// b[5] = byte(v >> 40)
+// b[6] = byte(v >> 48)
+// b[7] = byte(v >> 56)
+// Built on previously defined rules
+// Offset must be multiple of 4 for MOVDstore
+(MOVBstore [i7] {s} p (SRDconst w [56])
+ x0:(MOVBstore [i6] {s} p (SRDconst w [48])
+ x1:(MOVBstore [i5] {s} p (SRDconst w [40])
+ x2:(MOVBstore [i4] {s} p (SRDconst w [32])
+ x3:(MOVWstore [i0] {s} p w mem)))))
+ && !config.BigEndian
+ && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1
+ && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7
+ && clobber(x0, x1, x2, x3)
+ => (MOVDstore [i0] {s} p w mem)
+
+// 8 byte store Big endian as in:
+// b[0] = byte(v >> 56)
+// b[1] = byte(v >> 48)
+// b[2] = byte(v >> 40)
+// b[3] = byte(v >> 32)
+// b[4] = byte(v >> 24)
+// b[5] = byte(v >> 16)
+// b[6] = byte(v >> 8)
+// b[7] = byte(v)
+// Use byte-reverse indexed 8 byte store.
+(MOVBstore [i7] {s} p w
+ x0:(MOVBstore [i6] {s} p (SRDconst w [8])
+ x1:(MOVBstore [i5] {s} p (SRDconst w [16])
+ x2:(MOVBstore [i4] {s} p (SRDconst w [24])
+ x3:(MOVBstore [i3] {s} p (SRDconst w [32])
+ x4:(MOVBstore [i2] {s} p (SRDconst w [40])
+ x5:(MOVBstore [i1] {s} p (SRDconst w [48])
+ x6:(MOVBstore [i0] {s} p (SRDconst w [56]) mem))))))))
+ && !config.BigEndian
+ && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1
+ && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7
+ && clobber(x0, x1, x2, x3, x4, x5, x6)
+ => (MOVDBRstore (MOVDaddr <typ.Uintptr> [i0] {s} p) w mem)
+
+// Arch-specific inlining for small or disjoint runtime.memmove
+(SelectN [0] call:(CALLstatic {sym} s1:(MOVDstore _ (MOVDconst [sz]) s2:(MOVDstore _ src s3:(MOVDstore {t} _ dst mem)))))
+ && sz >= 0
+ && isSameCall(sym, "runtime.memmove")
+ && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1
+ && isInlinableMemmove(dst, src, sz, config)
+ && clobber(s1, s2, s3, call)
+ => (Move [sz] dst src mem)
+
+// Match post-lowering calls, register version.
+(SelectN [0] call:(CALLstatic {sym} dst src (MOVDconst [sz]) mem))
+ && sz >= 0
+ && isSameCall(sym, "runtime.memmove")
+ && call.Uses == 1
+ && isInlinableMemmove(dst, src, sz, config)
+ && clobber(call)
+ => (Move [sz] dst src mem)
+
+// Prefetch instructions (aux is option: 0 - DCBT ; 8 - DCBT stream)
+(PrefetchCache ptr mem) => (DCBT ptr mem [0])
+(PrefetchCacheStreamed ptr mem) => (DCBT ptr mem [8])
+
diff --git a/src/cmd/compile/internal/ssa/gen/PPC64Ops.go b/src/cmd/compile/internal/ssa/gen/PPC64Ops.go
new file mode 100644
index 0000000..d18cbcc
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/gen/PPC64Ops.go
@@ -0,0 +1,726 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build ignore
+// +build ignore
+
+package main
+
+import "strings"
+
+// Notes:
+// - Less-than-64-bit integer types live in the low portion of registers.
+// The upper portion is junk.
+// - Boolean types are zero or 1; stored in a byte, with upper bytes of the register containing junk.
+// - *const instructions may use a constant larger than the instruction can encode.
+// In this case the assembler expands to multiple instructions and uses tmp
+// register (R31).
+
+var regNamesPPC64 = []string{
+ "R0", // REGZERO, not used, but simplifies counting in regalloc
+ "SP", // REGSP
+ "SB", // REGSB
+ "R3",
+ "R4",
+ "R5",
+ "R6",
+ "R7",
+ "R8",
+ "R9",
+ "R10",
+ "R11", // REGCTXT for closures
+ "R12",
+ "R13", // REGTLS
+ "R14",
+ "R15",
+ "R16",
+ "R17",
+ "R18",
+ "R19",
+ "R20",
+ "R21",
+ "R22",
+ "R23",
+ "R24",
+ "R25",
+ "R26",
+ "R27",
+ "R28",
+ "R29",
+ "g", // REGG. Using name "g" and setting Config.hasGReg makes it "just happen".
+ "R31", // REGTMP
+
+ "F0",
+ "F1",
+ "F2",
+ "F3",
+ "F4",
+ "F5",
+ "F6",
+ "F7",
+ "F8",
+ "F9",
+ "F10",
+ "F11",
+ "F12",
+ "F13",
+ "F14",
+ "F15",
+ "F16",
+ "F17",
+ "F18",
+ "F19",
+ "F20",
+ "F21",
+ "F22",
+ "F23",
+ "F24",
+ "F25",
+ "F26",
+ "F27",
+ "F28",
+ "F29",
+ "F30",
+ "F31",
+
+ // If you add registers, update asyncPreempt in runtime.
+
+ // "CR0",
+ // "CR1",
+ // "CR2",
+ // "CR3",
+ // "CR4",
+ // "CR5",
+ // "CR6",
+ // "CR7",
+
+ // "CR",
+ // "XER",
+ // "LR",
+ // "CTR",
+}
+
+func init() {
+ // Make map from reg names to reg integers.
+ if len(regNamesPPC64) > 64 {
+ panic("too many registers")
+ }
+ num := map[string]int{}
+ for i, name := range regNamesPPC64 {
+ num[name] = i
+ }
+ buildReg := func(s string) regMask {
+ m := regMask(0)
+ for _, r := range strings.Split(s, " ") {
+ if n, ok := num[r]; ok {
+ m |= regMask(1) << uint(n)
+ continue
+ }
+ panic("register " + r + " not found")
+ }
+ return m
+ }
+
+ var (
+ gp = buildReg("R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29")
+ fp = buildReg("F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26")
+ sp = buildReg("SP")
+ sb = buildReg("SB")
+ gr = buildReg("g")
+ // cr = buildReg("CR")
+ // ctr = buildReg("CTR")
+ // lr = buildReg("LR")
+ tmp = buildReg("R31")
+ ctxt = buildReg("R11")
+ callptr = buildReg("R12")
+ // tls = buildReg("R13")
+ gp01 = regInfo{inputs: nil, outputs: []regMask{gp}}
+ gp11 = regInfo{inputs: []regMask{gp | sp | sb}, outputs: []regMask{gp}}
+ gp21 = regInfo{inputs: []regMask{gp | sp | sb, gp | sp | sb}, outputs: []regMask{gp}}
+ gp21a0 = regInfo{inputs: []regMask{gp, gp | sp | sb}, outputs: []regMask{gp}}
+ gp31 = regInfo{inputs: []regMask{gp | sp | sb, gp | sp | sb, gp | sp | sb}, outputs: []regMask{gp}}
+ gp22 = regInfo{inputs: []regMask{gp | sp | sb, gp | sp | sb}, outputs: []regMask{gp, gp}}
+ gp32 = regInfo{inputs: []regMask{gp | sp | sb, gp | sp | sb, gp | sp | sb}, outputs: []regMask{gp, gp}}
+ gp1cr = regInfo{inputs: []regMask{gp | sp | sb}}
+ gp2cr = regInfo{inputs: []regMask{gp | sp | sb, gp | sp | sb}}
+ crgp = regInfo{inputs: nil, outputs: []regMask{gp}}
+ crgp11 = regInfo{inputs: []regMask{gp}, outputs: []regMask{gp}}
+ crgp21 = regInfo{inputs: []regMask{gp, gp}, outputs: []regMask{gp}}
+ gpload = regInfo{inputs: []regMask{gp | sp | sb}, outputs: []regMask{gp}}
+ gploadidx = regInfo{inputs: []regMask{gp | sp | sb, gp}, outputs: []regMask{gp}}
+ prefreg = regInfo{inputs: []regMask{gp | sp | sb}}
+ gpstore = regInfo{inputs: []regMask{gp | sp | sb, gp | sp | sb}}
+ gpstoreidx = regInfo{inputs: []regMask{gp | sp | sb, gp | sp | sb, gp | sp | sb}}
+ gpstorezero = regInfo{inputs: []regMask{gp | sp | sb}} // ppc64.REGZERO is reserved zero value
+ gpxchg = regInfo{inputs: []regMask{gp | sp | sb, gp}, outputs: []regMask{gp}}
+ gpcas = regInfo{inputs: []regMask{gp | sp | sb, gp, gp}, outputs: []regMask{gp}}
+ fp01 = regInfo{inputs: nil, outputs: []regMask{fp}}
+ fp11 = regInfo{inputs: []regMask{fp}, outputs: []regMask{fp}}
+ fpgp = regInfo{inputs: []regMask{fp}, outputs: []regMask{gp}}
+ gpfp = regInfo{inputs: []regMask{gp}, outputs: []regMask{fp}}
+ fp21 = regInfo{inputs: []regMask{fp, fp}, outputs: []regMask{fp}}
+ fp31 = regInfo{inputs: []regMask{fp, fp, fp}, outputs: []regMask{fp}}
+ fp2cr = regInfo{inputs: []regMask{fp, fp}}
+ fpload = regInfo{inputs: []regMask{gp | sp | sb}, outputs: []regMask{fp}}
+ fploadidx = regInfo{inputs: []regMask{gp | sp | sb, gp | sp | sb}, outputs: []regMask{fp}}
+ fpstore = regInfo{inputs: []regMask{gp | sp | sb, fp}}
+ fpstoreidx = regInfo{inputs: []regMask{gp | sp | sb, gp | sp | sb, fp}}
+ callerSave = regMask(gp | fp | gr)
+ r3 = buildReg("R3")
+ r4 = buildReg("R4")
+ r5 = buildReg("R5")
+ r6 = buildReg("R6")
+ )
+ ops := []opData{
+ {name: "ADD", argLength: 2, reg: gp21, asm: "ADD", commutative: true}, // arg0 + arg1
+ {name: "ADDconst", argLength: 1, reg: gp11, asm: "ADD", aux: "Int64"}, // arg0 + auxInt
+ {name: "FADD", argLength: 2, reg: fp21, asm: "FADD", commutative: true}, // arg0+arg1
+ {name: "FADDS", argLength: 2, reg: fp21, asm: "FADDS", commutative: true}, // arg0+arg1
+ {name: "SUB", argLength: 2, reg: gp21, asm: "SUB"}, // arg0-arg1
+ {name: "SUBFCconst", argLength: 1, reg: gp11, asm: "SUBC", aux: "Int64"}, // auxInt - arg0 (with carry)
+ {name: "FSUB", argLength: 2, reg: fp21, asm: "FSUB"}, // arg0-arg1
+ {name: "FSUBS", argLength: 2, reg: fp21, asm: "FSUBS"}, // arg0-arg1
+
+ {name: "MULLD", argLength: 2, reg: gp21, asm: "MULLD", typ: "Int64", commutative: true}, // arg0*arg1 (signed 64-bit)
+ {name: "MULLW", argLength: 2, reg: gp21, asm: "MULLW", typ: "Int32", commutative: true}, // arg0*arg1 (signed 32-bit)
+ {name: "MULLDconst", argLength: 1, reg: gp11, asm: "MULLD", aux: "Int32", typ: "Int64"}, // arg0*auxInt (signed 64-bit)
+ {name: "MULLWconst", argLength: 1, reg: gp11, asm: "MULLW", aux: "Int32", typ: "Int64"}, // arg0*auxInt (signed 64-bit)
+ {name: "MADDLD", argLength: 3, reg: gp31, asm: "MADDLD", typ: "Int64"}, // (arg0*arg1)+arg2 (signed 64-bit)
+
+ {name: "MULHD", argLength: 2, reg: gp21, asm: "MULHD", commutative: true}, // (arg0 * arg1) >> 64, signed
+ {name: "MULHW", argLength: 2, reg: gp21, asm: "MULHW", commutative: true}, // (arg0 * arg1) >> 32, signed
+ {name: "MULHDU", argLength: 2, reg: gp21, asm: "MULHDU", commutative: true}, // (arg0 * arg1) >> 64, unsigned
+ {name: "MULHWU", argLength: 2, reg: gp21, asm: "MULHWU", commutative: true}, // (arg0 * arg1) >> 32, unsigned
+ {name: "LoweredMuluhilo", argLength: 2, reg: gp22, resultNotInArgs: true}, // arg0 * arg1, returns (hi, lo)
+
+ {name: "FMUL", argLength: 2, reg: fp21, asm: "FMUL", commutative: true}, // arg0*arg1
+ {name: "FMULS", argLength: 2, reg: fp21, asm: "FMULS", commutative: true}, // arg0*arg1
+
+ {name: "FMADD", argLength: 3, reg: fp31, asm: "FMADD"}, // arg0*arg1 + arg2
+ {name: "FMADDS", argLength: 3, reg: fp31, asm: "FMADDS"}, // arg0*arg1 + arg2
+ {name: "FMSUB", argLength: 3, reg: fp31, asm: "FMSUB"}, // arg0*arg1 - arg2
+ {name: "FMSUBS", argLength: 3, reg: fp31, asm: "FMSUBS"}, // arg0*arg1 - arg2
+
+ {name: "SRAD", argLength: 2, reg: gp21, asm: "SRAD"}, // signed arg0 >> (arg1&127), 64 bit width (note: 127, not 63!)
+ {name: "SRAW", argLength: 2, reg: gp21, asm: "SRAW"}, // signed arg0 >> (arg1&63), 32 bit width
+ {name: "SRD", argLength: 2, reg: gp21, asm: "SRD"}, // unsigned arg0 >> (arg1&127), 64 bit width
+ {name: "SRW", argLength: 2, reg: gp21, asm: "SRW"}, // unsigned arg0 >> (arg1&63), 32 bit width
+ {name: "SLD", argLength: 2, reg: gp21, asm: "SLD"}, // arg0 << (arg1&127), 64 bit width
+ {name: "SLW", argLength: 2, reg: gp21, asm: "SLW"}, // arg0 << (arg1&63), 32 bit width
+
+ {name: "ROTL", argLength: 2, reg: gp21, asm: "ROTL"}, // arg0 rotate left by arg1 mod 64
+ {name: "ROTLW", argLength: 2, reg: gp21, asm: "ROTLW"}, // uint32(arg0) rotate left by arg1 mod 32
+ // The following are ops to implement the extended mnemonics for shifts as described in section C.8 of the ISA.
+ // The constant shift values are packed into the aux int32.
+ {name: "RLDICL", argLength: 1, reg: gp11, asm: "RLDICL", aux: "Int32"}, // arg0 extract bits identified by shift params"
+ {name: "CLRLSLWI", argLength: 1, reg: gp11, asm: "CLRLSLWI", aux: "Int32"}, //
+ {name: "CLRLSLDI", argLength: 1, reg: gp11, asm: "CLRLSLDI", aux: "Int32"}, //
+
+ {name: "LoweredAdd64Carry", argLength: 3, reg: gp32, resultNotInArgs: true}, // arg0 + arg1 + carry, returns (sum, carry)
+
+ {name: "SRADconst", argLength: 1, reg: gp11, asm: "SRAD", aux: "Int64"}, // signed arg0 >> auxInt, 0 <= auxInt < 64, 64 bit width
+ {name: "SRAWconst", argLength: 1, reg: gp11, asm: "SRAW", aux: "Int64"}, // signed arg0 >> auxInt, 0 <= auxInt < 32, 32 bit width
+ {name: "SRDconst", argLength: 1, reg: gp11, asm: "SRD", aux: "Int64"}, // unsigned arg0 >> auxInt, 0 <= auxInt < 64, 64 bit width
+ {name: "SRWconst", argLength: 1, reg: gp11, asm: "SRW", aux: "Int64"}, // unsigned arg0 >> auxInt, 0 <= auxInt < 32, 32 bit width
+ {name: "SLDconst", argLength: 1, reg: gp11, asm: "SLD", aux: "Int64"}, // arg0 << auxInt, 0 <= auxInt < 64, 64 bit width
+ {name: "SLWconst", argLength: 1, reg: gp11, asm: "SLW", aux: "Int64"}, // arg0 << auxInt, 0 <= auxInt < 32, 32 bit width
+
+ {name: "ROTLconst", argLength: 1, reg: gp11, asm: "ROTL", aux: "Int64"}, // arg0 rotate left by auxInt bits
+ {name: "ROTLWconst", argLength: 1, reg: gp11, asm: "ROTLW", aux: "Int64"}, // uint32(arg0) rotate left by auxInt bits
+ {name: "EXTSWSLconst", argLength: 1, reg: gp11, asm: "EXTSWSLI", aux: "Int64"},
+
+ {name: "RLWINM", argLength: 1, reg: gp11, asm: "RLWNM", aux: "Int64"}, // Rotate and mask by immediate "rlwinm". encodePPC64RotateMask describes aux
+ {name: "RLWNM", argLength: 2, reg: gp21, asm: "RLWNM", aux: "Int64"}, // Rotate and mask by "rlwnm". encodePPC64RotateMask describes aux
+ {name: "RLWMI", argLength: 2, reg: gp21a0, asm: "RLWMI", aux: "Int64", resultInArg0: true}, // "rlwimi" similar aux encoding as above
+
+ {name: "CNTLZD", argLength: 1, reg: gp11, asm: "CNTLZD", clobberFlags: true}, // count leading zeros
+ {name: "CNTLZW", argLength: 1, reg: gp11, asm: "CNTLZW", clobberFlags: true}, // count leading zeros (32 bit)
+
+ {name: "CNTTZD", argLength: 1, reg: gp11, asm: "CNTTZD"}, // count trailing zeros
+ {name: "CNTTZW", argLength: 1, reg: gp11, asm: "CNTTZW"}, // count trailing zeros (32 bit)
+
+ {name: "POPCNTD", argLength: 1, reg: gp11, asm: "POPCNTD"}, // number of set bits in arg0
+ {name: "POPCNTW", argLength: 1, reg: gp11, asm: "POPCNTW"}, // number of set bits in each word of arg0 placed in corresponding word
+ {name: "POPCNTB", argLength: 1, reg: gp11, asm: "POPCNTB"}, // number of set bits in each byte of arg0 placed in corresponding byte
+
+ {name: "FDIV", argLength: 2, reg: fp21, asm: "FDIV"}, // arg0/arg1
+ {name: "FDIVS", argLength: 2, reg: fp21, asm: "FDIVS"}, // arg0/arg1
+
+ {name: "DIVD", argLength: 2, reg: gp21, asm: "DIVD", typ: "Int64"}, // arg0/arg1 (signed 64-bit)
+ {name: "DIVW", argLength: 2, reg: gp21, asm: "DIVW", typ: "Int32"}, // arg0/arg1 (signed 32-bit)
+ {name: "DIVDU", argLength: 2, reg: gp21, asm: "DIVDU", typ: "Int64"}, // arg0/arg1 (unsigned 64-bit)
+ {name: "DIVWU", argLength: 2, reg: gp21, asm: "DIVWU", typ: "Int32"}, // arg0/arg1 (unsigned 32-bit)
+
+ {name: "MODUD", argLength: 2, reg: gp21, asm: "MODUD", typ: "UInt64"}, // arg0 % arg1 (unsigned 64-bit)
+ {name: "MODSD", argLength: 2, reg: gp21, asm: "MODSD", typ: "Int64"}, // arg0 % arg1 (signed 64-bit)
+ {name: "MODUW", argLength: 2, reg: gp21, asm: "MODUW", typ: "UInt32"}, // arg0 % arg1 (unsigned 32-bit)
+ {name: "MODSW", argLength: 2, reg: gp21, asm: "MODSW", typ: "Int32"}, // arg0 % arg1 (signed 32-bit)
+ // MOD is implemented as rem := arg0 - (arg0/arg1) * arg1
+
+ // Conversions are all float-to-float register operations. "Integer" refers to encoding in the FP register.
+ {name: "FCTIDZ", argLength: 1, reg: fp11, asm: "FCTIDZ", typ: "Float64"}, // convert float to 64-bit int round towards zero
+ {name: "FCTIWZ", argLength: 1, reg: fp11, asm: "FCTIWZ", typ: "Float64"}, // convert float to 32-bit int round towards zero
+ {name: "FCFID", argLength: 1, reg: fp11, asm: "FCFID", typ: "Float64"}, // convert 64-bit integer to float
+ {name: "FCFIDS", argLength: 1, reg: fp11, asm: "FCFIDS", typ: "Float32"}, // convert 32-bit integer to float
+ {name: "FRSP", argLength: 1, reg: fp11, asm: "FRSP", typ: "Float64"}, // round float to 32-bit value
+
+ // Movement between float and integer registers with no change in bits; accomplished with stores+loads on PPC.
+ // Because the 32-bit load-literal-bits instructions have impoverished addressability, always widen the
+ // data instead and use FMOVDload and FMOVDstore instead (this will also dodge endianess issues).
+ // There are optimizations that should apply -- (Xi2f64 (MOVWload (not-ADD-ptr+offset) ) ) could use
+ // the word-load instructions. (Xi2f64 (MOVDload ptr )) can be (FMOVDload ptr)
+
+ {name: "MFVSRD", argLength: 1, reg: fpgp, asm: "MFVSRD", typ: "Int64"}, // move 64 bits of F register into G register
+ {name: "MTVSRD", argLength: 1, reg: gpfp, asm: "MTVSRD", typ: "Float64"}, // move 64 bits of G register into F register
+
+ {name: "AND", argLength: 2, reg: gp21, asm: "AND", commutative: true}, // arg0&arg1
+ {name: "ANDN", argLength: 2, reg: gp21, asm: "ANDN"}, // arg0&^arg1
+ {name: "ANDCC", argLength: 2, reg: gp2cr, asm: "ANDCC", commutative: true, typ: "Flags"}, // arg0&arg1 sets CC
+ {name: "OR", argLength: 2, reg: gp21, asm: "OR", commutative: true}, // arg0|arg1
+ {name: "ORN", argLength: 2, reg: gp21, asm: "ORN"}, // arg0|^arg1
+ {name: "ORCC", argLength: 2, reg: gp2cr, asm: "ORCC", commutative: true, typ: "Flags"}, // arg0|arg1 sets CC
+ {name: "NOR", argLength: 2, reg: gp21, asm: "NOR", commutative: true}, // ^(arg0|arg1)
+ {name: "XOR", argLength: 2, reg: gp21, asm: "XOR", typ: "Int64", commutative: true}, // arg0^arg1
+ {name: "XORCC", argLength: 2, reg: gp2cr, asm: "XORCC", commutative: true, typ: "Flags"}, // arg0^arg1 sets CC
+ {name: "EQV", argLength: 2, reg: gp21, asm: "EQV", typ: "Int64", commutative: true}, // arg0^^arg1
+ {name: "NEG", argLength: 1, reg: gp11, asm: "NEG"}, // -arg0 (integer)
+ {name: "FNEG", argLength: 1, reg: fp11, asm: "FNEG"}, // -arg0 (floating point)
+ {name: "FSQRT", argLength: 1, reg: fp11, asm: "FSQRT"}, // sqrt(arg0) (floating point)
+ {name: "FSQRTS", argLength: 1, reg: fp11, asm: "FSQRTS"}, // sqrt(arg0) (floating point, single precision)
+ {name: "FFLOOR", argLength: 1, reg: fp11, asm: "FRIM"}, // floor(arg0), float64
+ {name: "FCEIL", argLength: 1, reg: fp11, asm: "FRIP"}, // ceil(arg0), float64
+ {name: "FTRUNC", argLength: 1, reg: fp11, asm: "FRIZ"}, // trunc(arg0), float64
+ {name: "FROUND", argLength: 1, reg: fp11, asm: "FRIN"}, // round(arg0), float64
+ {name: "FABS", argLength: 1, reg: fp11, asm: "FABS"}, // abs(arg0), float64
+ {name: "FNABS", argLength: 1, reg: fp11, asm: "FNABS"}, // -abs(arg0), float64
+ {name: "FCPSGN", argLength: 2, reg: fp21, asm: "FCPSGN"}, // copysign arg0 -> arg1, float64
+
+ {name: "ORconst", argLength: 1, reg: gp11, asm: "OR", aux: "Int64"}, // arg0|aux
+ {name: "XORconst", argLength: 1, reg: gp11, asm: "XOR", aux: "Int64"}, // arg0^aux
+ {name: "ANDconst", argLength: 1, reg: regInfo{inputs: []regMask{gp | sp | sb}, outputs: []regMask{gp}}, asm: "ANDCC", aux: "Int64", clobberFlags: true}, // arg0&aux // and-immediate sets CC on PPC, always.
+ {name: "ANDCCconst", argLength: 1, reg: regInfo{inputs: []regMask{gp | sp | sb}}, asm: "ANDCC", aux: "Int64", typ: "Flags"}, // arg0&aux == 0 // and-immediate sets CC on PPC, always.
+
+ {name: "MOVBreg", argLength: 1, reg: gp11, asm: "MOVB", typ: "Int64"}, // sign extend int8 to int64
+ {name: "MOVBZreg", argLength: 1, reg: gp11, asm: "MOVBZ", typ: "Int64"}, // zero extend uint8 to uint64
+ {name: "MOVHreg", argLength: 1, reg: gp11, asm: "MOVH", typ: "Int64"}, // sign extend int16 to int64
+ {name: "MOVHZreg", argLength: 1, reg: gp11, asm: "MOVHZ", typ: "Int64"}, // zero extend uint16 to uint64
+ {name: "MOVWreg", argLength: 1, reg: gp11, asm: "MOVW", typ: "Int64"}, // sign extend int32 to int64
+ {name: "MOVWZreg", argLength: 1, reg: gp11, asm: "MOVWZ", typ: "Int64"}, // zero extend uint32 to uint64
+
+ // Load bytes in the endian order of the arch from arg0+aux+auxint into a 64 bit register.
+ {name: "MOVBZload", argLength: 2, reg: gpload, asm: "MOVBZ", aux: "SymOff", typ: "UInt8", faultOnNilArg0: true, symEffect: "Read"}, // load byte zero extend
+ {name: "MOVHload", argLength: 2, reg: gpload, asm: "MOVH", aux: "SymOff", typ: "Int16", faultOnNilArg0: true, symEffect: "Read"}, // load 2 bytes sign extend
+ {name: "MOVHZload", argLength: 2, reg: gpload, asm: "MOVHZ", aux: "SymOff", typ: "UInt16", faultOnNilArg0: true, symEffect: "Read"}, // load 2 bytes zero extend
+ {name: "MOVWload", argLength: 2, reg: gpload, asm: "MOVW", aux: "SymOff", typ: "Int32", faultOnNilArg0: true, symEffect: "Read"}, // load 4 bytes sign extend
+ {name: "MOVWZload", argLength: 2, reg: gpload, asm: "MOVWZ", aux: "SymOff", typ: "UInt32", faultOnNilArg0: true, symEffect: "Read"}, // load 4 bytes zero extend
+ {name: "MOVDload", argLength: 2, reg: gpload, asm: "MOVD", aux: "SymOff", typ: "Int64", faultOnNilArg0: true, symEffect: "Read"}, // load 8 bytes
+
+ // Load bytes in reverse endian order of the arch from arg0 into a 64 bit register, all zero extend.
+ // The generated instructions are indexed loads with no offset field in the instruction so the aux fields are not used.
+ // In these cases the index register field is set to 0 and the full address is in the base register.
+ {name: "MOVDBRload", argLength: 2, reg: gpload, asm: "MOVDBR", aux: "SymOff", typ: "Int64", faultOnNilArg0: true, symEffect: "Read"}, // load 8 bytes reverse order
+ {name: "MOVWBRload", argLength: 2, reg: gpload, asm: "MOVWBR", aux: "SymOff", typ: "Int32", faultOnNilArg0: true, symEffect: "Read"}, // load 4 bytes zero extend reverse order
+ {name: "MOVHBRload", argLength: 2, reg: gpload, asm: "MOVHBR", aux: "SymOff", typ: "Int16", faultOnNilArg0: true, symEffect: "Read"}, // load 2 bytes zero extend reverse order
+
+ // In these cases an index register is used in addition to a base register
+ // Loads from memory location arg[0] + arg[1].
+ {name: "MOVBZloadidx", argLength: 3, reg: gploadidx, asm: "MOVBZ", typ: "UInt8"}, // zero extend uint8 to uint64
+ {name: "MOVHloadidx", argLength: 3, reg: gploadidx, asm: "MOVH", typ: "Int16"}, // sign extend int16 to int64
+ {name: "MOVHZloadidx", argLength: 3, reg: gploadidx, asm: "MOVHZ", typ: "UInt16"}, // zero extend uint16 to uint64
+ {name: "MOVWloadidx", argLength: 3, reg: gploadidx, asm: "MOVW", typ: "Int32"}, // sign extend int32 to int64
+ {name: "MOVWZloadidx", argLength: 3, reg: gploadidx, asm: "MOVWZ", typ: "UInt32"}, // zero extend uint32 to uint64
+ {name: "MOVDloadidx", argLength: 3, reg: gploadidx, asm: "MOVD", typ: "Int64"},
+ {name: "MOVHBRloadidx", argLength: 3, reg: gploadidx, asm: "MOVHBR", typ: "Int16"}, // sign extend int16 to int64
+ {name: "MOVWBRloadidx", argLength: 3, reg: gploadidx, asm: "MOVWBR", typ: "Int32"}, // sign extend int32 to int64
+ {name: "MOVDBRloadidx", argLength: 3, reg: gploadidx, asm: "MOVDBR", typ: "Int64"},
+ {name: "FMOVDloadidx", argLength: 3, reg: fploadidx, asm: "FMOVD", typ: "Float64"},
+ {name: "FMOVSloadidx", argLength: 3, reg: fploadidx, asm: "FMOVS", typ: "Float32"},
+
+ // Prefetch instruction
+ // Do prefetch of address generated with arg0 and arg1 with option aux. arg0=addr,arg1=memory, aux=option.
+ {name: "DCBT", argLength: 2, aux: "Int64", reg: prefreg, asm: "DCBT", hasSideEffects: true},
+
+ // Store bytes in the reverse endian order of the arch into arg0.
+ // These are indexed stores with no offset field in the instruction so the auxint fields are not used.
+ {name: "MOVDBRstore", argLength: 3, reg: gpstore, asm: "MOVDBR", aux: "Sym", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 8 bytes reverse order
+ {name: "MOVWBRstore", argLength: 3, reg: gpstore, asm: "MOVWBR", aux: "Sym", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 4 bytes reverse order
+ {name: "MOVHBRstore", argLength: 3, reg: gpstore, asm: "MOVHBR", aux: "Sym", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 2 bytes reverse order
+
+ // Floating point loads from arg0+aux+auxint
+ {name: "FMOVDload", argLength: 2, reg: fpload, asm: "FMOVD", aux: "SymOff", typ: "Float64", faultOnNilArg0: true, symEffect: "Read"}, // load double float
+ {name: "FMOVSload", argLength: 2, reg: fpload, asm: "FMOVS", aux: "SymOff", typ: "Float32", faultOnNilArg0: true, symEffect: "Read"}, // load single float
+
+ // Store bytes in the endian order of the arch into arg0+aux+auxint
+ {name: "MOVBstore", argLength: 3, reg: gpstore, asm: "MOVB", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store byte
+ {name: "MOVHstore", argLength: 3, reg: gpstore, asm: "MOVH", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 2 bytes
+ {name: "MOVWstore", argLength: 3, reg: gpstore, asm: "MOVW", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 4 bytes
+ {name: "MOVDstore", argLength: 3, reg: gpstore, asm: "MOVD", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 8 bytes
+
+ // Store floating point value into arg0+aux+auxint
+ {name: "FMOVDstore", argLength: 3, reg: fpstore, asm: "FMOVD", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store double flot
+ {name: "FMOVSstore", argLength: 3, reg: fpstore, asm: "FMOVS", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store single float
+
+ // Stores using index and base registers
+ // Stores to arg[0] + arg[1]
+ {name: "MOVBstoreidx", argLength: 4, reg: gpstoreidx, asm: "MOVB", typ: "Mem"}, // store bye
+ {name: "MOVHstoreidx", argLength: 4, reg: gpstoreidx, asm: "MOVH", typ: "Mem"}, // store half word
+ {name: "MOVWstoreidx", argLength: 4, reg: gpstoreidx, asm: "MOVW", typ: "Mem"}, // store word
+ {name: "MOVDstoreidx", argLength: 4, reg: gpstoreidx, asm: "MOVD", typ: "Mem"}, // store double word
+ {name: "FMOVDstoreidx", argLength: 4, reg: fpstoreidx, asm: "FMOVD", typ: "Mem"}, // store double float
+ {name: "FMOVSstoreidx", argLength: 4, reg: fpstoreidx, asm: "FMOVS", typ: "Mem"}, // store single float
+ {name: "MOVHBRstoreidx", argLength: 4, reg: gpstoreidx, asm: "MOVHBR", typ: "Mem"}, // store half word reversed byte using index reg
+ {name: "MOVWBRstoreidx", argLength: 4, reg: gpstoreidx, asm: "MOVWBR", typ: "Mem"}, // store word reversed byte using index reg
+ {name: "MOVDBRstoreidx", argLength: 4, reg: gpstoreidx, asm: "MOVDBR", typ: "Mem"}, // store double word reversed byte using index reg
+
+ // The following ops store 0 into arg0+aux+auxint arg1=mem
+ {name: "MOVBstorezero", argLength: 2, reg: gpstorezero, asm: "MOVB", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store zero 1 byte
+ {name: "MOVHstorezero", argLength: 2, reg: gpstorezero, asm: "MOVH", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store zero 2 bytes
+ {name: "MOVWstorezero", argLength: 2, reg: gpstorezero, asm: "MOVW", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store zero 4 bytes
+ {name: "MOVDstorezero", argLength: 2, reg: gpstorezero, asm: "MOVD", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store zero 8 bytes
+
+ {name: "MOVDaddr", argLength: 1, reg: regInfo{inputs: []regMask{sp | sb | gp}, outputs: []regMask{gp}}, aux: "SymOff", asm: "MOVD", rematerializeable: true, symEffect: "Addr"}, // arg0 + auxInt + aux.(*gc.Sym), arg0=SP/SB/GP
+
+ {name: "MOVDconst", argLength: 0, reg: gp01, aux: "Int64", asm: "MOVD", typ: "Int64", rematerializeable: true}, //
+ {name: "FMOVDconst", argLength: 0, reg: fp01, aux: "Float64", asm: "FMOVD", rematerializeable: true}, //
+ {name: "FMOVSconst", argLength: 0, reg: fp01, aux: "Float32", asm: "FMOVS", rematerializeable: true}, //
+ {name: "FCMPU", argLength: 2, reg: fp2cr, asm: "FCMPU", typ: "Flags"},
+
+ {name: "CMP", argLength: 2, reg: gp2cr, asm: "CMP", typ: "Flags"}, // arg0 compare to arg1
+ {name: "CMPU", argLength: 2, reg: gp2cr, asm: "CMPU", typ: "Flags"}, // arg0 compare to arg1
+ {name: "CMPW", argLength: 2, reg: gp2cr, asm: "CMPW", typ: "Flags"}, // arg0 compare to arg1
+ {name: "CMPWU", argLength: 2, reg: gp2cr, asm: "CMPWU", typ: "Flags"}, // arg0 compare to arg1
+ {name: "CMPconst", argLength: 1, reg: gp1cr, asm: "CMP", aux: "Int64", typ: "Flags"},
+ {name: "CMPUconst", argLength: 1, reg: gp1cr, asm: "CMPU", aux: "Int64", typ: "Flags"},
+ {name: "CMPWconst", argLength: 1, reg: gp1cr, asm: "CMPW", aux: "Int32", typ: "Flags"},
+ {name: "CMPWUconst", argLength: 1, reg: gp1cr, asm: "CMPWU", aux: "Int32", typ: "Flags"},
+
+ // ISEL auxInt values 0=LT 1=GT 2=EQ arg2 ? arg0 : arg1
+ // ISEL auxInt values 4=GE 5=LE 6=NE !arg2 ? arg1 : arg0
+ // ISELB special case where arg0, arg1 values are 0, 1 for boolean result
+ {name: "ISEL", argLength: 3, reg: crgp21, asm: "ISEL", aux: "Int32", typ: "Int32"}, // see above
+ {name: "ISELB", argLength: 2, reg: crgp11, asm: "ISEL", aux: "Int32", typ: "Int32"}, // see above
+
+ // pseudo-ops
+ {name: "Equal", argLength: 1, reg: crgp}, // bool, true flags encode x==y false otherwise.
+ {name: "NotEqual", argLength: 1, reg: crgp}, // bool, true flags encode x!=y false otherwise.
+ {name: "LessThan", argLength: 1, reg: crgp}, // bool, true flags encode x<y false otherwise.
+ {name: "FLessThan", argLength: 1, reg: crgp}, // bool, true flags encode x<y false otherwise.
+ {name: "LessEqual", argLength: 1, reg: crgp}, // bool, true flags encode x<=y false otherwise.
+ {name: "FLessEqual", argLength: 1, reg: crgp}, // bool, true flags encode x<=y false otherwise; PPC <= === !> which is wrong for NaN
+ {name: "GreaterThan", argLength: 1, reg: crgp}, // bool, true flags encode x>y false otherwise.
+ {name: "FGreaterThan", argLength: 1, reg: crgp}, // bool, true flags encode x>y false otherwise.
+ {name: "GreaterEqual", argLength: 1, reg: crgp}, // bool, true flags encode x>=y false otherwise.
+ {name: "FGreaterEqual", argLength: 1, reg: crgp}, // bool, true flags encode x>=y false otherwise.; PPC >= === !< which is wrong for NaN
+
+ // Scheduler ensures LoweredGetClosurePtr occurs only in entry block,
+ // and sorts it to the very beginning of the block to prevent other
+ // use of the closure pointer.
+ {name: "LoweredGetClosurePtr", reg: regInfo{outputs: []regMask{ctxt}}, zeroWidth: true},
+
+ // LoweredGetCallerSP returns the SP of the caller of the current function.
+ {name: "LoweredGetCallerSP", reg: gp01, rematerializeable: true},
+
+ // LoweredGetCallerPC evaluates to the PC to which its "caller" will return.
+ // I.e., if f calls g "calls" getcallerpc,
+ // the result should be the PC within f that g will return to.
+ // See runtime/stubs.go for a more detailed discussion.
+ {name: "LoweredGetCallerPC", reg: gp01, rematerializeable: true},
+
+ //arg0=ptr,arg1=mem, returns void. Faults if ptr is nil.
+ {name: "LoweredNilCheck", argLength: 2, reg: regInfo{inputs: []regMask{gp | sp | sb}, clobbers: tmp}, clobberFlags: true, nilCheck: true, faultOnNilArg0: true},
+ // Round ops to block fused-multiply-add extraction.
+ {name: "LoweredRound32F", argLength: 1, reg: fp11, resultInArg0: true, zeroWidth: true},
+ {name: "LoweredRound64F", argLength: 1, reg: fp11, resultInArg0: true, zeroWidth: true},
+
+ {name: "CALLstatic", argLength: -1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem
+ {name: "CALLtail", argLength: -1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true, tailCall: true}, // tail call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem
+ {name: "CALLclosure", argLength: -1, reg: regInfo{inputs: []regMask{callptr, ctxt, 0}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem
+ {name: "CALLinter", argLength: -1, reg: regInfo{inputs: []regMask{callptr}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem
+
+ // large or unaligned zeroing
+ // arg0 = address of memory to zero (in R3, changed as side effect)
+ // returns mem
+ //
+ // a loop is generated when there is more than one iteration
+ // needed to clear 4 doublewords
+ //
+ // XXLXOR VS32,VS32,VS32
+ // MOVD $len/32,R31
+ // MOVD R31,CTR
+ // MOVD $16,R31
+ // loop:
+ // STXVD2X VS32,(R0)(R3)
+ // STXVD2X VS32,(R31),R3)
+ // ADD R3,32
+ // BC loop
+
+ // remaining doubleword clears generated as needed
+ // MOVD R0,(R3)
+ // MOVD R0,8(R3)
+ // MOVD R0,16(R3)
+ // MOVD R0,24(R3)
+
+ // one or more of these to clear remainder < 8 bytes
+ // MOVW R0,n1(R3)
+ // MOVH R0,n2(R3)
+ // MOVB R0,n3(R3)
+ {
+ name: "LoweredZero",
+ aux: "Int64",
+ argLength: 2,
+ reg: regInfo{
+ inputs: []regMask{buildReg("R20")},
+ clobbers: buildReg("R20"),
+ },
+ clobberFlags: true,
+ typ: "Mem",
+ faultOnNilArg0: true,
+ unsafePoint: true,
+ },
+ {
+ name: "LoweredZeroShort",
+ aux: "Int64",
+ argLength: 2,
+ reg: regInfo{
+ inputs: []regMask{gp}},
+ typ: "Mem",
+ faultOnNilArg0: true,
+ unsafePoint: true,
+ },
+ {
+ name: "LoweredQuadZeroShort",
+ aux: "Int64",
+ argLength: 2,
+ reg: regInfo{
+ inputs: []regMask{gp},
+ },
+ typ: "Mem",
+ faultOnNilArg0: true,
+ unsafePoint: true,
+ },
+ {
+ name: "LoweredQuadZero",
+ aux: "Int64",
+ argLength: 2,
+ reg: regInfo{
+ inputs: []regMask{buildReg("R20")},
+ clobbers: buildReg("R20"),
+ },
+ clobberFlags: true,
+ typ: "Mem",
+ faultOnNilArg0: true,
+ unsafePoint: true,
+ },
+
+ // R31 is temp register
+ // Loop code:
+ // MOVD len/32,R31 set up loop ctr
+ // MOVD R31,CTR
+ // MOVD $16,R31 index register
+ // loop:
+ // LXVD2X (R0)(R4),VS32
+ // LXVD2X (R31)(R4),VS33
+ // ADD R4,$32 increment src
+ // STXVD2X VS32,(R0)(R3)
+ // STXVD2X VS33,(R31)(R3)
+ // ADD R3,$32 increment dst
+ // BC 16,0,loop branch ctr
+ // For this purpose, VS32 and VS33 are treated as
+ // scratch registers. Since regalloc does not
+ // track vector registers, even if it could be marked
+ // as clobbered it would have no effect.
+ // TODO: If vector registers are managed by regalloc
+ // mark these as clobbered.
+ //
+ // Bytes not moved by this loop are moved
+ // with a combination of the following instructions,
+ // starting with the largest sizes and generating as
+ // many as needed, using the appropriate offset value.
+ // MOVD n(R4),R14
+ // MOVD R14,n(R3)
+ // MOVW n1(R4),R14
+ // MOVW R14,n1(R3)
+ // MOVH n2(R4),R14
+ // MOVH R14,n2(R3)
+ // MOVB n3(R4),R14
+ // MOVB R14,n3(R3)
+
+ {
+ name: "LoweredMove",
+ aux: "Int64",
+ argLength: 3,
+ reg: regInfo{
+ inputs: []regMask{buildReg("R20"), buildReg("R21")},
+ clobbers: buildReg("R20 R21"),
+ },
+ clobberFlags: true,
+ typ: "Mem",
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ unsafePoint: true,
+ },
+ {
+ name: "LoweredMoveShort",
+ aux: "Int64",
+ argLength: 3,
+ reg: regInfo{
+ inputs: []regMask{gp, gp},
+ },
+ typ: "Mem",
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ unsafePoint: true,
+ },
+
+ // The following is similar to the LoweredMove, but uses
+ // LXV instead of LXVD2X, which does not require an index
+ // register and will do 4 in a loop instead of only.
+ {
+ name: "LoweredQuadMove",
+ aux: "Int64",
+ argLength: 3,
+ reg: regInfo{
+ inputs: []regMask{buildReg("R20"), buildReg("R21")},
+ clobbers: buildReg("R20 R21"),
+ },
+ clobberFlags: true,
+ typ: "Mem",
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ unsafePoint: true,
+ },
+
+ {
+ name: "LoweredQuadMoveShort",
+ aux: "Int64",
+ argLength: 3,
+ reg: regInfo{
+ inputs: []regMask{gp, gp},
+ },
+ typ: "Mem",
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ unsafePoint: true,
+ },
+
+ {name: "LoweredAtomicStore8", argLength: 3, reg: gpstore, typ: "Mem", aux: "Int64", faultOnNilArg0: true, hasSideEffects: true},
+ {name: "LoweredAtomicStore32", argLength: 3, reg: gpstore, typ: "Mem", aux: "Int64", faultOnNilArg0: true, hasSideEffects: true},
+ {name: "LoweredAtomicStore64", argLength: 3, reg: gpstore, typ: "Mem", aux: "Int64", faultOnNilArg0: true, hasSideEffects: true},
+
+ {name: "LoweredAtomicLoad8", argLength: 2, reg: gpload, typ: "UInt8", aux: "Int64", clobberFlags: true, faultOnNilArg0: true},
+ {name: "LoweredAtomicLoad32", argLength: 2, reg: gpload, typ: "UInt32", aux: "Int64", clobberFlags: true, faultOnNilArg0: true},
+ {name: "LoweredAtomicLoad64", argLength: 2, reg: gpload, typ: "Int64", aux: "Int64", clobberFlags: true, faultOnNilArg0: true},
+ {name: "LoweredAtomicLoadPtr", argLength: 2, reg: gpload, typ: "Int64", aux: "Int64", clobberFlags: true, faultOnNilArg0: true},
+
+ // atomic add32, 64
+ // LWSYNC
+ // LDAR (Rarg0), Rout
+ // ADD Rarg1, Rout
+ // STDCCC Rout, (Rarg0)
+ // BNE -3(PC)
+ // return new sum
+ {name: "LoweredAtomicAdd32", argLength: 3, reg: gpxchg, resultNotInArgs: true, clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true},
+ {name: "LoweredAtomicAdd64", argLength: 3, reg: gpxchg, resultNotInArgs: true, clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true},
+
+ // atomic exchange32, 64
+ // LWSYNC
+ // LDAR (Rarg0), Rout
+ // STDCCC Rarg1, (Rarg0)
+ // BNE -2(PC)
+ // ISYNC
+ // return old val
+ {name: "LoweredAtomicExchange32", argLength: 3, reg: gpxchg, resultNotInArgs: true, clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true},
+ {name: "LoweredAtomicExchange64", argLength: 3, reg: gpxchg, resultNotInArgs: true, clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true},
+
+ // atomic compare and swap.
+ // arg0 = pointer, arg1 = old value, arg2 = new value, arg3 = memory. auxint must be zero.
+ // if *arg0 == arg1 {
+ // *arg0 = arg2
+ // return (true, memory)
+ // } else {
+ // return (false, memory)
+ // }
+ // SYNC
+ // LDAR (Rarg0), Rtmp
+ // CMP Rarg1, Rtmp
+ // BNE 3(PC)
+ // STDCCC Rarg2, (Rarg0)
+ // BNE -4(PC)
+ // CBNZ Rtmp, -4(PC)
+ // CSET EQ, Rout
+ {name: "LoweredAtomicCas64", argLength: 4, reg: gpcas, resultNotInArgs: true, aux: "Int64", clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true},
+ {name: "LoweredAtomicCas32", argLength: 4, reg: gpcas, resultNotInArgs: true, aux: "Int64", clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true},
+
+ // atomic 8/32 and/or.
+ // *arg0 &= (|=) arg1. arg2=mem. returns memory. auxint must be zero.
+ // LBAR/LWAT (Rarg0), Rtmp
+ // AND/OR Rarg1, Rtmp
+ // STBCCC/STWCCC Rtmp, (Rarg0), Rtmp
+ // BNE Rtmp, -3(PC)
+ {name: "LoweredAtomicAnd8", argLength: 3, reg: gpstore, asm: "AND", faultOnNilArg0: true, hasSideEffects: true},
+ {name: "LoweredAtomicAnd32", argLength: 3, reg: gpstore, asm: "AND", faultOnNilArg0: true, hasSideEffects: true},
+ {name: "LoweredAtomicOr8", argLength: 3, reg: gpstore, asm: "OR", faultOnNilArg0: true, hasSideEffects: true},
+ {name: "LoweredAtomicOr32", argLength: 3, reg: gpstore, asm: "OR", faultOnNilArg0: true, hasSideEffects: true},
+
+ // LoweredWB invokes runtime.gcWriteBarrier. arg0=destptr, arg1=srcptr, arg2=mem, aux=runtime.gcWriteBarrier
+ // It preserves R0 through R17 (except special registers R1, R2, R11, R12, R13), g, and its arguments R20 and R21,
+ // but may clobber anything else, including R31 (REGTMP).
+ {name: "LoweredWB", argLength: 3, reg: regInfo{inputs: []regMask{buildReg("R20"), buildReg("R21")}, clobbers: (callerSave &^ buildReg("R0 R3 R4 R5 R6 R7 R8 R9 R10 R14 R15 R16 R17 R20 R21 g")) | buildReg("R31")}, clobberFlags: true, aux: "Sym", symEffect: "None"},
+
+ // There are three of these functions so that they can have three different register inputs.
+ // When we check 0 <= c <= cap (A), then 0 <= b <= c (B), then 0 <= a <= b (C), we want the
+ // default registers to match so we don't need to copy registers around unnecessarily.
+ {name: "LoweredPanicBoundsA", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r5, r6}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go).
+ {name: "LoweredPanicBoundsB", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r4, r5}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go).
+ {name: "LoweredPanicBoundsC", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r3, r4}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go).
+
+ // (InvertFlags (CMP a b)) == (CMP b a)
+ // So if we want (LessThan (CMP a b)) but we can't do that because a is a constant,
+ // then we do (LessThan (InvertFlags (CMP b a))) instead.
+ // Rewrites will convert this to (GreaterThan (CMP b a)).
+ // InvertFlags is a pseudo-op which can't appear in assembly output.
+ {name: "InvertFlags", argLength: 1}, // reverse direction of arg0
+
+ // Constant flag values. For any comparison, there are 3 possible
+ // outcomes: either the three from the signed total order (<,==,>)
+ // or the three from the unsigned total order, depending on which
+ // comparison operation was used (CMP or CMPU -- PPC is different from
+ // the other architectures, which have a single comparison producing
+ // both signed and unsigned comparison results.)
+
+ // These ops are for temporary use by rewrite rules. They
+ // cannot appear in the generated assembly.
+ {name: "FlagEQ"}, // equal
+ {name: "FlagLT"}, // signed < or unsigned <
+ {name: "FlagGT"}, // signed > or unsigned >
+ }
+
+ blocks := []blockData{
+ {name: "EQ", controls: 1},
+ {name: "NE", controls: 1},
+ {name: "LT", controls: 1},
+ {name: "LE", controls: 1},
+ {name: "GT", controls: 1},
+ {name: "GE", controls: 1},
+ {name: "FLT", controls: 1},
+ {name: "FLE", controls: 1},
+ {name: "FGT", controls: 1},
+ {name: "FGE", controls: 1},
+ }
+
+ archs = append(archs, arch{
+ name: "PPC64",
+ pkg: "cmd/internal/obj/ppc64",
+ genfile: "../../ppc64/ssa.go",
+ ops: ops,
+ blocks: blocks,
+ regnames: regNamesPPC64,
+ ParamIntRegNames: "R3 R4 R5 R6 R7 R8 R9 R10 R14 R15 R16 R17",
+ ParamFloatRegNames: "F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12",
+ gpregmask: gp,
+ fpregmask: fp,
+ framepointerreg: -1,
+ linkreg: -1, // not used
+ })
+}
diff --git a/src/cmd/compile/internal/ssa/gen/README b/src/cmd/compile/internal/ssa/gen/README
new file mode 100644
index 0000000..6d2c6bb
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/gen/README
@@ -0,0 +1,7 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+This package generates opcode tables, rewrite rules, etc. for the ssa compiler.
+Run it with go-1.13 (or above):
+ go run *.go
diff --git a/src/cmd/compile/internal/ssa/gen/RISCV64.rules b/src/cmd/compile/internal/ssa/gen/RISCV64.rules
new file mode 100644
index 0000000..acef3df
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/gen/RISCV64.rules
@@ -0,0 +1,757 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Optimizations TODO:
+// * Use SLTI and SLTIU for comparisons to constants, instead of SLT/SLTU with constants in registers
+// * Use the zero register instead of moving 0 into a register.
+// * Add rules to avoid generating a temp bool value for (If (SLT[U] ...) ...).
+// * Arrange for non-trivial Zero and Move lowerings to use aligned loads and stores.
+// * Avoid using Neq32 for writeBarrier.enabled checks.
+
+// Lowering arithmetic
+(Add64 ...) => (ADD ...)
+(AddPtr ...) => (ADD ...)
+(Add32 ...) => (ADD ...)
+(Add16 ...) => (ADD ...)
+(Add8 ...) => (ADD ...)
+(Add32F ...) => (FADDS ...)
+(Add64F ...) => (FADDD ...)
+
+(Sub64 ...) => (SUB ...)
+(SubPtr ...) => (SUB ...)
+(Sub32 ...) => (SUB ...)
+(Sub16 ...) => (SUB ...)
+(Sub8 ...) => (SUB ...)
+(Sub32F ...) => (FSUBS ...)
+(Sub64F ...) => (FSUBD ...)
+
+(Mul64 ...) => (MUL ...)
+(Mul64uhilo ...) => (LoweredMuluhilo ...)
+(Mul64uover ...) => (LoweredMuluover ...)
+(Mul32 ...) => (MULW ...)
+(Mul16 x y) => (MULW (SignExt16to32 x) (SignExt16to32 y))
+(Mul8 x y) => (MULW (SignExt8to32 x) (SignExt8to32 y))
+(Mul32F ...) => (FMULS ...)
+(Mul64F ...) => (FMULD ...)
+
+(Div32F ...) => (FDIVS ...)
+(Div64F ...) => (FDIVD ...)
+
+(Div64 x y [false]) => (DIV x y)
+(Div64u ...) => (DIVU ...)
+(Div32 x y [false]) => (DIVW x y)
+(Div32u ...) => (DIVUW ...)
+(Div16 x y [false]) => (DIVW (SignExt16to32 x) (SignExt16to32 y))
+(Div16u x y) => (DIVUW (ZeroExt16to32 x) (ZeroExt16to32 y))
+(Div8 x y) => (DIVW (SignExt8to32 x) (SignExt8to32 y))
+(Div8u x y) => (DIVUW (ZeroExt8to32 x) (ZeroExt8to32 y))
+
+(Hmul64 ...) => (MULH ...)
+(Hmul64u ...) => (MULHU ...)
+(Hmul32 x y) => (SRAI [32] (MUL (SignExt32to64 x) (SignExt32to64 y)))
+(Hmul32u x y) => (SRLI [32] (MUL (ZeroExt32to64 x) (ZeroExt32to64 y)))
+
+// (x + y) / 2 => (x / 2) + (y / 2) + (x & y & 1)
+(Avg64u <t> x y) => (ADD (ADD <t> (SRLI <t> [1] x) (SRLI <t> [1] y)) (ANDI <t> [1] (AND <t> x y)))
+
+(Mod64 x y [false]) => (REM x y)
+(Mod64u ...) => (REMU ...)
+(Mod32 x y [false]) => (REMW x y)
+(Mod32u ...) => (REMUW ...)
+(Mod16 x y [false]) => (REMW (SignExt16to32 x) (SignExt16to32 y))
+(Mod16u x y) => (REMUW (ZeroExt16to32 x) (ZeroExt16to32 y))
+(Mod8 x y) => (REMW (SignExt8to32 x) (SignExt8to32 y))
+(Mod8u x y) => (REMUW (ZeroExt8to32 x) (ZeroExt8to32 y))
+
+(And64 ...) => (AND ...)
+(And32 ...) => (AND ...)
+(And16 ...) => (AND ...)
+(And8 ...) => (AND ...)
+
+(Or64 ...) => (OR ...)
+(Or32 ...) => (OR ...)
+(Or16 ...) => (OR ...)
+(Or8 ...) => (OR ...)
+
+(Xor64 ...) => (XOR ...)
+(Xor32 ...) => (XOR ...)
+(Xor16 ...) => (XOR ...)
+(Xor8 ...) => (XOR ...)
+
+(Neg64 ...) => (NEG ...)
+(Neg32 ...) => (NEG ...)
+(Neg16 ...) => (NEG ...)
+(Neg8 ...) => (NEG ...)
+(Neg32F ...) => (FNEGS ...)
+(Neg64F ...) => (FNEGD ...)
+
+(Com64 ...) => (NOT ...)
+(Com32 ...) => (NOT ...)
+(Com16 ...) => (NOT ...)
+(Com8 ...) => (NOT ...)
+
+(Sqrt ...) => (FSQRTD ...)
+(Sqrt32 ...) => (FSQRTS ...)
+
+(Copysign ...) => (FSGNJD ...)
+
+(Abs ...) => (FABSD ...)
+
+(FMA ...) => (FMADDD ...)
+
+// Sign and zero extension.
+
+(SignExt8to16 ...) => (MOVBreg ...)
+(SignExt8to32 ...) => (MOVBreg ...)
+(SignExt8to64 ...) => (MOVBreg ...)
+(SignExt16to32 ...) => (MOVHreg ...)
+(SignExt16to64 ...) => (MOVHreg ...)
+(SignExt32to64 ...) => (MOVWreg ...)
+
+(ZeroExt8to16 ...) => (MOVBUreg ...)
+(ZeroExt8to32 ...) => (MOVBUreg ...)
+(ZeroExt8to64 ...) => (MOVBUreg ...)
+(ZeroExt16to32 ...) => (MOVHUreg ...)
+(ZeroExt16to64 ...) => (MOVHUreg ...)
+(ZeroExt32to64 ...) => (MOVWUreg ...)
+
+(Cvt32to32F ...) => (FCVTSW ...)
+(Cvt32to64F ...) => (FCVTDW ...)
+(Cvt64to32F ...) => (FCVTSL ...)
+(Cvt64to64F ...) => (FCVTDL ...)
+
+(Cvt32Fto32 ...) => (FCVTWS ...)
+(Cvt32Fto64 ...) => (FCVTLS ...)
+(Cvt64Fto32 ...) => (FCVTWD ...)
+(Cvt64Fto64 ...) => (FCVTLD ...)
+
+(Cvt32Fto64F ...) => (FCVTDS ...)
+(Cvt64Fto32F ...) => (FCVTSD ...)
+
+(CvtBoolToUint8 ...) => (Copy ...)
+
+(Round32F ...) => (Copy ...)
+(Round64F ...) => (Copy ...)
+
+// From genericOps.go:
+// "0 if arg0 == 0, -1 if arg0 > 0, undef if arg0<0"
+//
+// Like other arches, we compute ~((x-1) >> 63), with arithmetic right shift.
+// For positive x, bit 63 of x-1 is always 0, so the result is -1.
+// For zero x, bit 63 of x-1 is 1, so the result is 0.
+//
+(Slicemask <t> x) => (NOT (SRAI <t> [63] (ADDI <t> [-1] x)))
+
+// Truncations
+// We ignore the unused high parts of registers, so truncates are just copies.
+(Trunc16to8 ...) => (Copy ...)
+(Trunc32to8 ...) => (Copy ...)
+(Trunc32to16 ...) => (Copy ...)
+(Trunc64to8 ...) => (Copy ...)
+(Trunc64to16 ...) => (Copy ...)
+(Trunc64to32 ...) => (Copy ...)
+
+// Shifts
+
+// SLL only considers the bottom 6 bits of y. If y > 64, the result should
+// always be 0.
+//
+// Breaking down the operation:
+//
+// (SLL x y) generates x << (y & 63).
+//
+// If y < 64, this is the value we want. Otherwise, we want zero.
+//
+// So, we AND with -1 * uint64(y < 64), which is 0xfffff... if y < 64 and 0 otherwise.
+(Lsh8x8 <t> x y) => (AND (SLL <t> x y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
+(Lsh8x16 <t> x y) => (AND (SLL <t> x y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
+(Lsh8x32 <t> x y) => (AND (SLL <t> x y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
+(Lsh8x64 <t> x y) => (AND (SLL <t> x y) (Neg8 <t> (SLTIU <t> [64] y)))
+(Lsh16x8 <t> x y) => (AND (SLL <t> x y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
+(Lsh16x16 <t> x y) => (AND (SLL <t> x y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
+(Lsh16x32 <t> x y) => (AND (SLL <t> x y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
+(Lsh16x64 <t> x y) => (AND (SLL <t> x y) (Neg16 <t> (SLTIU <t> [64] y)))
+(Lsh32x8 <t> x y) => (AND (SLL <t> x y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
+(Lsh32x16 <t> x y) => (AND (SLL <t> x y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
+(Lsh32x32 <t> x y) => (AND (SLL <t> x y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
+(Lsh32x64 <t> x y) => (AND (SLL <t> x y) (Neg32 <t> (SLTIU <t> [64] y)))
+(Lsh64x8 <t> x y) => (AND (SLL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
+(Lsh64x16 <t> x y) => (AND (SLL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
+(Lsh64x32 <t> x y) => (AND (SLL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
+(Lsh64x64 <t> x y) => (AND (SLL <t> x y) (Neg64 <t> (SLTIU <t> [64] y)))
+
+// SRL only considers the bottom 6 bits of y. If y > 64, the result should
+// always be 0. See Lsh above for a detailed description.
+(Rsh8Ux8 <t> x y) => (AND (SRL <t> (ZeroExt8to64 x) y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
+(Rsh8Ux16 <t> x y) => (AND (SRL <t> (ZeroExt8to64 x) y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
+(Rsh8Ux32 <t> x y) => (AND (SRL <t> (ZeroExt8to64 x) y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
+(Rsh8Ux64 <t> x y) => (AND (SRL <t> (ZeroExt8to64 x) y) (Neg8 <t> (SLTIU <t> [64] y)))
+(Rsh16Ux8 <t> x y) => (AND (SRL <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
+(Rsh16Ux16 <t> x y) => (AND (SRL <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
+(Rsh16Ux32 <t> x y) => (AND (SRL <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
+(Rsh16Ux64 <t> x y) => (AND (SRL <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] y)))
+(Rsh32Ux8 <t> x y) => (AND (SRL <t> (ZeroExt32to64 x) y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
+(Rsh32Ux16 <t> x y) => (AND (SRL <t> (ZeroExt32to64 x) y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
+(Rsh32Ux32 <t> x y) => (AND (SRL <t> (ZeroExt32to64 x) y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
+(Rsh32Ux64 <t> x y) => (AND (SRL <t> (ZeroExt32to64 x) y) (Neg32 <t> (SLTIU <t> [64] y)))
+(Rsh64Ux8 <t> x y) => (AND (SRL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
+(Rsh64Ux16 <t> x y) => (AND (SRL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
+(Rsh64Ux32 <t> x y) => (AND (SRL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
+(Rsh64Ux64 <t> x y) => (AND (SRL <t> x y) (Neg64 <t> (SLTIU <t> [64] y)))
+
+// SRA only considers the bottom 6 bits of y. If y > 64, the result should
+// be either 0 or -1 based on the sign bit.
+//
+// We implement this by performing the max shift (-1) if y >= 64.
+//
+// We OR (uint64(y < 64) - 1) into y before passing it to SRA. This leaves
+// us with -1 (0xffff...) if y >= 64.
+//
+// We don't need to sign-extend the OR result, as it will be at minimum 8 bits,
+// more than the 6 bits SRA cares about.
+(Rsh8x8 <t> x y) => (SRA <t> (SignExt8to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64 y)))))
+(Rsh8x16 <t> x y) => (SRA <t> (SignExt8to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y)))))
+(Rsh8x32 <t> x y) => (SRA <t> (SignExt8to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y)))))
+(Rsh8x64 <t> x y) => (SRA <t> (SignExt8to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y))))
+(Rsh16x8 <t> x y) => (SRA <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64 y)))))
+(Rsh16x16 <t> x y) => (SRA <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y)))))
+(Rsh16x32 <t> x y) => (SRA <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y)))))
+(Rsh16x64 <t> x y) => (SRA <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y))))
+(Rsh32x8 <t> x y) => (SRA <t> (SignExt32to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64 y)))))
+(Rsh32x16 <t> x y) => (SRA <t> (SignExt32to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y)))))
+(Rsh32x32 <t> x y) => (SRA <t> (SignExt32to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y)))))
+(Rsh32x64 <t> x y) => (SRA <t> (SignExt32to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y))))
+(Rsh64x8 <t> x y) => (SRA <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64 y)))))
+(Rsh64x16 <t> x y) => (SRA <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y)))))
+(Rsh64x32 <t> x y) => (SRA <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y)))))
+(Rsh64x64 <t> x y) => (SRA <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y))))
+
+// Rotates.
+(RotateLeft8 <t> x (MOVDconst [c])) => (Or8 (Lsh8x64 <t> x (MOVDconst [c&7])) (Rsh8Ux64 <t> x (MOVDconst [-c&7])))
+(RotateLeft16 <t> x (MOVDconst [c])) => (Or16 (Lsh16x64 <t> x (MOVDconst [c&15])) (Rsh16Ux64 <t> x (MOVDconst [-c&15])))
+(RotateLeft32 <t> x (MOVDconst [c])) => (Or32 (Lsh32x64 <t> x (MOVDconst [c&31])) (Rsh32Ux64 <t> x (MOVDconst [-c&31])))
+(RotateLeft64 <t> x (MOVDconst [c])) => (Or64 (Lsh64x64 <t> x (MOVDconst [c&63])) (Rsh64Ux64 <t> x (MOVDconst [-c&63])))
+
+(Less64 ...) => (SLT ...)
+(Less32 x y) => (SLT (SignExt32to64 x) (SignExt32to64 y))
+(Less16 x y) => (SLT (SignExt16to64 x) (SignExt16to64 y))
+(Less8 x y) => (SLT (SignExt8to64 x) (SignExt8to64 y))
+(Less64U ...) => (SLTU ...)
+(Less32U x y) => (SLTU (ZeroExt32to64 x) (ZeroExt32to64 y))
+(Less16U x y) => (SLTU (ZeroExt16to64 x) (ZeroExt16to64 y))
+(Less8U x y) => (SLTU (ZeroExt8to64 x) (ZeroExt8to64 y))
+(Less64F ...) => (FLTD ...)
+(Less32F ...) => (FLTS ...)
+
+// Convert x <= y to !(y > x).
+(Leq64 x y) => (Not (Less64 y x))
+(Leq32 x y) => (Not (Less32 y x))
+(Leq16 x y) => (Not (Less16 y x))
+(Leq8 x y) => (Not (Less8 y x))
+(Leq64U x y) => (Not (Less64U y x))
+(Leq32U x y) => (Not (Less32U y x))
+(Leq16U x y) => (Not (Less16U y x))
+(Leq8U x y) => (Not (Less8U y x))
+(Leq64F ...) => (FLED ...)
+(Leq32F ...) => (FLES ...)
+
+(EqPtr x y) => (SEQZ (SUB <typ.Uintptr> x y))
+(Eq64 x y) => (SEQZ (SUB <x.Type> x y))
+(Eq32 x y) => (SEQZ (SUB <x.Type> (ZeroExt32to64 x) (ZeroExt32to64 y)))
+(Eq16 x y) => (SEQZ (SUB <x.Type> (ZeroExt16to64 x) (ZeroExt16to64 y)))
+(Eq8 x y) => (SEQZ (SUB <x.Type> (ZeroExt8to64 x) (ZeroExt8to64 y)))
+(Eq64F ...) => (FEQD ...)
+(Eq32F ...) => (FEQS ...)
+
+(NeqPtr x y) => (SNEZ (SUB <typ.Uintptr> x y))
+(Neq64 x y) => (SNEZ (SUB <x.Type> x y))
+(Neq32 x y) => (SNEZ (SUB <x.Type> (ZeroExt32to64 x) (ZeroExt32to64 y)))
+(Neq16 x y) => (SNEZ (SUB <x.Type> (ZeroExt16to64 x) (ZeroExt16to64 y)))
+(Neq8 x y) => (SNEZ (SUB <x.Type> (ZeroExt8to64 x) (ZeroExt8to64 y)))
+(Neq64F ...) => (FNED ...)
+(Neq32F ...) => (FNES ...)
+
+// Loads
+(Load <t> ptr mem) && t.IsBoolean() => (MOVBUload ptr mem)
+(Load <t> ptr mem) && ( is8BitInt(t) && isSigned(t)) => (MOVBload ptr mem)
+(Load <t> ptr mem) && ( is8BitInt(t) && !isSigned(t)) => (MOVBUload ptr mem)
+(Load <t> ptr mem) && (is16BitInt(t) && isSigned(t)) => (MOVHload ptr mem)
+(Load <t> ptr mem) && (is16BitInt(t) && !isSigned(t)) => (MOVHUload ptr mem)
+(Load <t> ptr mem) && (is32BitInt(t) && isSigned(t)) => (MOVWload ptr mem)
+(Load <t> ptr mem) && (is32BitInt(t) && !isSigned(t)) => (MOVWUload ptr mem)
+(Load <t> ptr mem) && (is64BitInt(t) || isPtr(t)) => (MOVDload ptr mem)
+(Load <t> ptr mem) && is32BitFloat(t) => (FMOVWload ptr mem)
+(Load <t> ptr mem) && is64BitFloat(t) => (FMOVDload ptr mem)
+
+// Stores
+(Store {t} ptr val mem) && t.Size() == 1 => (MOVBstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 2 => (MOVHstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 4 && !is32BitFloat(val.Type) => (MOVWstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 8 && !is64BitFloat(val.Type) => (MOVDstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 4 && is32BitFloat(val.Type) => (FMOVWstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 8 && is64BitFloat(val.Type) => (FMOVDstore ptr val mem)
+
+// We need to fold MOVaddr into the LD/MOVDstore ops so that the live variable analysis
+// knows what variables are being read/written by the ops.
+(MOVBUload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (MOVBUload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+(MOVBload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+(MOVHUload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (MOVHUload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+(MOVHload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (MOVHload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+(MOVWUload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (MOVWUload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+(MOVWload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+(MOVDload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (MOVDload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+
+(MOVBstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+(MOVHstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+(MOVWstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+(MOVDstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+(MOVBstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
+ (MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVHstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
+ (MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVWstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
+ (MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+(MOVDstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
+ (MOVDstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+
+(MOVBUload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(int64(off1)+off2) =>
+ (MOVBUload [off1+int32(off2)] {sym} base mem)
+(MOVBload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(int64(off1)+off2) =>
+ (MOVBload [off1+int32(off2)] {sym} base mem)
+(MOVHUload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(int64(off1)+off2) =>
+ (MOVHUload [off1+int32(off2)] {sym} base mem)
+(MOVHload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(int64(off1)+off2) =>
+ (MOVHload [off1+int32(off2)] {sym} base mem)
+(MOVWUload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(int64(off1)+off2) =>
+ (MOVWUload [off1+int32(off2)] {sym} base mem)
+(MOVWload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(int64(off1)+off2) =>
+ (MOVWload [off1+int32(off2)] {sym} base mem)
+(MOVDload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(int64(off1)+off2) =>
+ (MOVDload [off1+int32(off2)] {sym} base mem)
+
+(MOVBstore [off1] {sym} (ADDI [off2] base) val mem) && is32Bit(int64(off1)+off2) =>
+ (MOVBstore [off1+int32(off2)] {sym} base val mem)
+(MOVHstore [off1] {sym} (ADDI [off2] base) val mem) && is32Bit(int64(off1)+off2) =>
+ (MOVHstore [off1+int32(off2)] {sym} base val mem)
+(MOVWstore [off1] {sym} (ADDI [off2] base) val mem) && is32Bit(int64(off1)+off2) =>
+ (MOVWstore [off1+int32(off2)] {sym} base val mem)
+(MOVDstore [off1] {sym} (ADDI [off2] base) val mem) && is32Bit(int64(off1)+off2) =>
+ (MOVDstore [off1+int32(off2)] {sym} base val mem)
+(MOVBstorezero [off1] {sym} (ADDI [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVBstorezero [off1+int32(off2)] {sym} ptr mem)
+(MOVHstorezero [off1] {sym} (ADDI [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVHstorezero [off1+int32(off2)] {sym} ptr mem)
+(MOVWstorezero [off1] {sym} (ADDI [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVWstorezero [off1+int32(off2)] {sym} ptr mem)
+(MOVDstorezero [off1] {sym} (ADDI [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVDstorezero [off1+int32(off2)] {sym} ptr mem)
+
+// Similarly, fold ADDI into MOVaddr to avoid confusing live variable analysis
+// with OffPtr -> ADDI.
+(ADDI [c] (MOVaddr [d] {s} x)) && is32Bit(c+int64(d)) => (MOVaddr [int32(c)+d] {s} x)
+
+// Small zeroing
+(Zero [0] _ mem) => mem
+(Zero [1] ptr mem) => (MOVBstore ptr (MOVDconst [0]) mem)
+(Zero [2] {t} ptr mem) && t.Alignment()%2 == 0 =>
+ (MOVHstore ptr (MOVDconst [0]) mem)
+(Zero [2] ptr mem) =>
+ (MOVBstore [1] ptr (MOVDconst [0])
+ (MOVBstore ptr (MOVDconst [0]) mem))
+(Zero [4] {t} ptr mem) && t.Alignment()%4 == 0 =>
+ (MOVWstore ptr (MOVDconst [0]) mem)
+(Zero [4] {t} ptr mem) && t.Alignment()%2 == 0 =>
+ (MOVHstore [2] ptr (MOVDconst [0])
+ (MOVHstore ptr (MOVDconst [0]) mem))
+(Zero [4] ptr mem) =>
+ (MOVBstore [3] ptr (MOVDconst [0])
+ (MOVBstore [2] ptr (MOVDconst [0])
+ (MOVBstore [1] ptr (MOVDconst [0])
+ (MOVBstore ptr (MOVDconst [0]) mem))))
+(Zero [8] {t} ptr mem) && t.Alignment()%8 == 0 =>
+ (MOVDstore ptr (MOVDconst [0]) mem)
+(Zero [8] {t} ptr mem) && t.Alignment()%4 == 0 =>
+ (MOVWstore [4] ptr (MOVDconst [0])
+ (MOVWstore ptr (MOVDconst [0]) mem))
+(Zero [8] {t} ptr mem) && t.Alignment()%2 == 0 =>
+ (MOVHstore [6] ptr (MOVDconst [0])
+ (MOVHstore [4] ptr (MOVDconst [0])
+ (MOVHstore [2] ptr (MOVDconst [0])
+ (MOVHstore ptr (MOVDconst [0]) mem))))
+
+(Zero [3] ptr mem) =>
+ (MOVBstore [2] ptr (MOVDconst [0])
+ (MOVBstore [1] ptr (MOVDconst [0])
+ (MOVBstore ptr (MOVDconst [0]) mem)))
+(Zero [6] {t} ptr mem) && t.Alignment()%2 == 0 =>
+ (MOVHstore [4] ptr (MOVDconst [0])
+ (MOVHstore [2] ptr (MOVDconst [0])
+ (MOVHstore ptr (MOVDconst [0]) mem)))
+(Zero [12] {t} ptr mem) && t.Alignment()%4 == 0 =>
+ (MOVWstore [8] ptr (MOVDconst [0])
+ (MOVWstore [4] ptr (MOVDconst [0])
+ (MOVWstore ptr (MOVDconst [0]) mem)))
+(Zero [16] {t} ptr mem) && t.Alignment()%8 == 0 =>
+ (MOVDstore [8] ptr (MOVDconst [0])
+ (MOVDstore ptr (MOVDconst [0]) mem))
+(Zero [24] {t} ptr mem) && t.Alignment()%8 == 0 =>
+ (MOVDstore [16] ptr (MOVDconst [0])
+ (MOVDstore [8] ptr (MOVDconst [0])
+ (MOVDstore ptr (MOVDconst [0]) mem)))
+(Zero [32] {t} ptr mem) && t.Alignment()%8 == 0 =>
+ (MOVDstore [24] ptr (MOVDconst [0])
+ (MOVDstore [16] ptr (MOVDconst [0])
+ (MOVDstore [8] ptr (MOVDconst [0])
+ (MOVDstore ptr (MOVDconst [0]) mem))))
+
+// Medium 8-aligned zeroing uses a Duff's device
+// 8 and 128 are magic constants, see runtime/mkduff.go
+(Zero [s] {t} ptr mem)
+ && s%8 == 0 && s <= 8*128
+ && t.Alignment()%8 == 0 && !config.noDuffDevice =>
+ (DUFFZERO [8 * (128 - s/8)] ptr mem)
+
+// Generic zeroing uses a loop
+(Zero [s] {t} ptr mem) =>
+ (LoweredZero [t.Alignment()]
+ ptr
+ (ADD <ptr.Type> ptr (MOVDconst [s-moveSize(t.Alignment(), config)]))
+ mem)
+
+(Convert ...) => (MOVconvert ...)
+
+// Checks
+(IsNonNil ...) => (SNEZ ...)
+(IsInBounds ...) => (Less64U ...)
+(IsSliceInBounds ...) => (Leq64U ...)
+
+// Trivial lowering
+(NilCheck ...) => (LoweredNilCheck ...)
+(GetClosurePtr ...) => (LoweredGetClosurePtr ...)
+(GetCallerSP ...) => (LoweredGetCallerSP ...)
+(GetCallerPC ...) => (LoweredGetCallerPC ...)
+
+// Write barrier.
+(WB ...) => (LoweredWB ...)
+
+(PanicBounds [kind] x y mem) && boundsABI(kind) == 0 => (LoweredPanicBoundsA [kind] x y mem)
+(PanicBounds [kind] x y mem) && boundsABI(kind) == 1 => (LoweredPanicBoundsB [kind] x y mem)
+(PanicBounds [kind] x y mem) && boundsABI(kind) == 2 => (LoweredPanicBoundsC [kind] x y mem)
+
+// Small moves
+(Move [0] _ _ mem) => mem
+(Move [1] dst src mem) => (MOVBstore dst (MOVBload src mem) mem)
+(Move [2] {t} dst src mem) && t.Alignment()%2 == 0 =>
+ (MOVHstore dst (MOVHload src mem) mem)
+(Move [2] dst src mem) =>
+ (MOVBstore [1] dst (MOVBload [1] src mem)
+ (MOVBstore dst (MOVBload src mem) mem))
+(Move [4] {t} dst src mem) && t.Alignment()%4 == 0 =>
+ (MOVWstore dst (MOVWload src mem) mem)
+(Move [4] {t} dst src mem) && t.Alignment()%2 == 0 =>
+ (MOVHstore [2] dst (MOVHload [2] src mem)
+ (MOVHstore dst (MOVHload src mem) mem))
+(Move [4] dst src mem) =>
+ (MOVBstore [3] dst (MOVBload [3] src mem)
+ (MOVBstore [2] dst (MOVBload [2] src mem)
+ (MOVBstore [1] dst (MOVBload [1] src mem)
+ (MOVBstore dst (MOVBload src mem) mem))))
+(Move [8] {t} dst src mem) && t.Alignment()%8 == 0 =>
+ (MOVDstore dst (MOVDload src mem) mem)
+(Move [8] {t} dst src mem) && t.Alignment()%4 == 0 =>
+ (MOVWstore [4] dst (MOVWload [4] src mem)
+ (MOVWstore dst (MOVWload src mem) mem))
+(Move [8] {t} dst src mem) && t.Alignment()%2 == 0 =>
+ (MOVHstore [6] dst (MOVHload [6] src mem)
+ (MOVHstore [4] dst (MOVHload [4] src mem)
+ (MOVHstore [2] dst (MOVHload [2] src mem)
+ (MOVHstore dst (MOVHload src mem) mem))))
+
+(Move [3] dst src mem) =>
+ (MOVBstore [2] dst (MOVBload [2] src mem)
+ (MOVBstore [1] dst (MOVBload [1] src mem)
+ (MOVBstore dst (MOVBload src mem) mem)))
+(Move [6] {t} dst src mem) && t.Alignment()%2 == 0 =>
+ (MOVHstore [4] dst (MOVHload [4] src mem)
+ (MOVHstore [2] dst (MOVHload [2] src mem)
+ (MOVHstore dst (MOVHload src mem) mem)))
+(Move [12] {t} dst src mem) && t.Alignment()%4 == 0 =>
+ (MOVWstore [8] dst (MOVWload [8] src mem)
+ (MOVWstore [4] dst (MOVWload [4] src mem)
+ (MOVWstore dst (MOVWload src mem) mem)))
+(Move [16] {t} dst src mem) && t.Alignment()%8 == 0 =>
+ (MOVDstore [8] dst (MOVDload [8] src mem)
+ (MOVDstore dst (MOVDload src mem) mem))
+(Move [24] {t} dst src mem) && t.Alignment()%8 == 0 =>
+ (MOVDstore [16] dst (MOVDload [16] src mem)
+ (MOVDstore [8] dst (MOVDload [8] src mem)
+ (MOVDstore dst (MOVDload src mem) mem)))
+(Move [32] {t} dst src mem) && t.Alignment()%8 == 0 =>
+ (MOVDstore [24] dst (MOVDload [24] src mem)
+ (MOVDstore [16] dst (MOVDload [16] src mem)
+ (MOVDstore [8] dst (MOVDload [8] src mem)
+ (MOVDstore dst (MOVDload src mem) mem))))
+
+// Medium 8-aligned move uses a Duff's device
+// 16 and 128 are magic constants, see runtime/mkduff.go
+(Move [s] {t} dst src mem)
+ && s%8 == 0 && s <= 8*128 && t.Alignment()%8 == 0
+ && !config.noDuffDevice && logLargeCopy(v, s) =>
+ (DUFFCOPY [16 * (128 - s/8)] dst src mem)
+
+// Generic move uses a loop
+(Move [s] {t} dst src mem) && (s <= 16 || logLargeCopy(v, s)) =>
+ (LoweredMove [t.Alignment()]
+ dst
+ src
+ (ADDI <src.Type> [s-moveSize(t.Alignment(), config)] src)
+ mem)
+
+// Boolean ops; 0=false, 1=true
+(AndB ...) => (AND ...)
+(OrB ...) => (OR ...)
+(EqB x y) => (SEQZ (XOR <typ.Bool> x y))
+(NeqB ...) => (XOR ...)
+(Not ...) => (SEQZ ...)
+
+// Lowering pointer arithmetic
+// TODO: Special handling for SP offsets, like ARM
+(OffPtr [off] ptr:(SP)) && is32Bit(off) => (MOVaddr [int32(off)] ptr)
+(OffPtr [off] ptr) && is32Bit(off) => (ADDI [off] ptr)
+(OffPtr [off] ptr) => (ADD (MOVDconst [off]) ptr)
+
+(Const8 [val]) => (MOVDconst [int64(val)])
+(Const16 [val]) => (MOVDconst [int64(val)])
+(Const32 [val]) => (MOVDconst [int64(val)])
+(Const64 [val]) => (MOVDconst [int64(val)])
+(Const32F [val]) => (FMVSX (MOVDconst [int64(math.Float32bits(val))]))
+(Const64F [val]) => (FMVDX (MOVDconst [int64(math.Float64bits(val))]))
+(ConstNil) => (MOVDconst [0])
+(ConstBool [val]) => (MOVDconst [int64(b2i(val))])
+
+(Addr {sym} base) => (MOVaddr {sym} [0] base)
+(LocalAddr {sym} base _) => (MOVaddr {sym} base)
+
+// Calls
+(StaticCall ...) => (CALLstatic ...)
+(ClosureCall ...) => (CALLclosure ...)
+(InterCall ...) => (CALLinter ...)
+(TailCall ...) => (CALLtail ...)
+
+// Atomic Intrinsics
+(AtomicLoad8 ...) => (LoweredAtomicLoad8 ...)
+(AtomicLoad32 ...) => (LoweredAtomicLoad32 ...)
+(AtomicLoad64 ...) => (LoweredAtomicLoad64 ...)
+(AtomicLoadPtr ...) => (LoweredAtomicLoad64 ...)
+
+(AtomicStore8 ...) => (LoweredAtomicStore8 ...)
+(AtomicStore32 ...) => (LoweredAtomicStore32 ...)
+(AtomicStore64 ...) => (LoweredAtomicStore64 ...)
+(AtomicStorePtrNoWB ...) => (LoweredAtomicStore64 ...)
+
+(AtomicAdd32 ...) => (LoweredAtomicAdd32 ...)
+(AtomicAdd64 ...) => (LoweredAtomicAdd64 ...)
+
+// AtomicAnd8(ptr,val) => LoweredAtomicAnd32(ptr&^3, ^((uint8(val) ^ 0xff) << ((ptr & 3) * 8)))
+(AtomicAnd8 ptr val mem) =>
+ (LoweredAtomicAnd32 (ANDI <typ.Uintptr> [^3] ptr)
+ (NOT <typ.UInt32> (SLL <typ.UInt32> (XORI <typ.UInt32> [0xff] (ZeroExt8to32 val))
+ (SLLI <typ.UInt64> [3] (ANDI <typ.UInt64> [3] ptr)))) mem)
+
+(AtomicAnd32 ...) => (LoweredAtomicAnd32 ...)
+
+(AtomicCompareAndSwap32 ptr old new mem) => (LoweredAtomicCas32 ptr (SignExt32to64 old) new mem)
+(AtomicCompareAndSwap64 ...) => (LoweredAtomicCas64 ...)
+
+(AtomicExchange32 ...) => (LoweredAtomicExchange32 ...)
+(AtomicExchange64 ...) => (LoweredAtomicExchange64 ...)
+
+// AtomicOr8(ptr,val) => LoweredAtomicOr32(ptr&^3, uint32(val)<<((ptr&3)*8))
+(AtomicOr8 ptr val mem) =>
+ (LoweredAtomicOr32 (ANDI <typ.Uintptr> [^3] ptr)
+ (SLL <typ.UInt32> (ZeroExt8to32 val)
+ (SLLI <typ.UInt64> [3] (ANDI <typ.UInt64> [3] ptr))) mem)
+
+(AtomicOr32 ...) => (LoweredAtomicOr32 ...)
+
+// Conditional branches
+(If cond yes no) => (BNEZ (MOVBUreg <typ.UInt64> cond) yes no)
+
+// Optimizations
+
+// Absorb SEQZ/SNEZ into branch.
+(BEQZ (SEQZ x) yes no) => (BNEZ x yes no)
+(BEQZ (SNEZ x) yes no) => (BEQZ x yes no)
+(BNEZ (SEQZ x) yes no) => (BEQZ x yes no)
+(BNEZ (SNEZ x) yes no) => (BNEZ x yes no)
+
+// Absorb NEG into branch when possible.
+(BEQZ x:(NEG y) yes no) && x.Uses == 1 => (BEQZ y yes no)
+(BNEZ x:(NEG y) yes no) && x.Uses == 1 => (BNEZ y yes no)
+
+// Convert BEQZ/BNEZ into more optimal branch conditions.
+(BEQZ (SUB x y) yes no) => (BEQ x y yes no)
+(BNEZ (SUB x y) yes no) => (BNE x y yes no)
+(BEQZ (SLT x y) yes no) => (BGE x y yes no)
+(BNEZ (SLT x y) yes no) => (BLT x y yes no)
+(BEQZ (SLTU x y) yes no) => (BGEU x y yes no)
+(BNEZ (SLTU x y) yes no) => (BLTU x y yes no)
+
+// Convert branch with zero to more optimal branch zero.
+(BEQ (MOVDconst [0]) cond yes no) => (BEQZ cond yes no)
+(BEQ cond (MOVDconst [0]) yes no) => (BEQZ cond yes no)
+(BNE (MOVDconst [0]) cond yes no) => (BNEZ cond yes no)
+(BNE cond (MOVDconst [0]) yes no) => (BNEZ cond yes no)
+(BLT (MOVDconst [0]) cond yes no) => (BGTZ cond yes no)
+(BLT cond (MOVDconst [0]) yes no) => (BLTZ cond yes no)
+(BGE (MOVDconst [0]) cond yes no) => (BLEZ cond yes no)
+(BGE cond (MOVDconst [0]) yes no) => (BGEZ cond yes no)
+
+// Store zero
+(MOVBstore [off] {sym} ptr (MOVDconst [0]) mem) => (MOVBstorezero [off] {sym} ptr mem)
+(MOVHstore [off] {sym} ptr (MOVDconst [0]) mem) => (MOVHstorezero [off] {sym} ptr mem)
+(MOVWstore [off] {sym} ptr (MOVDconst [0]) mem) => (MOVWstorezero [off] {sym} ptr mem)
+(MOVDstore [off] {sym} ptr (MOVDconst [0]) mem) => (MOVDstorezero [off] {sym} ptr mem)
+
+// Boolean ops are already extended.
+(MOVBUreg x:((SEQZ|SNEZ) _)) => x
+(MOVBUreg x:((SLT|SLTU) _ _)) => x
+
+// Avoid sign/zero extension for consts.
+(MOVBreg (MOVDconst [c])) => (MOVDconst [int64(int8(c))])
+(MOVHreg (MOVDconst [c])) => (MOVDconst [int64(int16(c))])
+(MOVWreg (MOVDconst [c])) => (MOVDconst [int64(int32(c))])
+(MOVBUreg (MOVDconst [c])) => (MOVDconst [int64(uint8(c))])
+(MOVHUreg (MOVDconst [c])) => (MOVDconst [int64(uint16(c))])
+(MOVWUreg (MOVDconst [c])) => (MOVDconst [int64(uint32(c))])
+
+// Avoid sign/zero extension after properly typed load.
+(MOVBreg x:(MOVBload _ _)) => (MOVDreg x)
+(MOVHreg x:(MOVBload _ _)) => (MOVDreg x)
+(MOVHreg x:(MOVBUload _ _)) => (MOVDreg x)
+(MOVHreg x:(MOVHload _ _)) => (MOVDreg x)
+(MOVWreg x:(MOVBload _ _)) => (MOVDreg x)
+(MOVWreg x:(MOVBUload _ _)) => (MOVDreg x)
+(MOVWreg x:(MOVHload _ _)) => (MOVDreg x)
+(MOVWreg x:(MOVHUload _ _)) => (MOVDreg x)
+(MOVWreg x:(MOVWload _ _)) => (MOVDreg x)
+(MOVBUreg x:(MOVBUload _ _)) => (MOVDreg x)
+(MOVHUreg x:(MOVBUload _ _)) => (MOVDreg x)
+(MOVHUreg x:(MOVHUload _ _)) => (MOVDreg x)
+(MOVWUreg x:(MOVBUload _ _)) => (MOVDreg x)
+(MOVWUreg x:(MOVHUload _ _)) => (MOVDreg x)
+(MOVWUreg x:(MOVWUload _ _)) => (MOVDreg x)
+
+// Fold double extensions.
+(MOVBreg x:(MOVBreg _)) => (MOVDreg x)
+(MOVHreg x:(MOVBreg _)) => (MOVDreg x)
+(MOVHreg x:(MOVBUreg _)) => (MOVDreg x)
+(MOVHreg x:(MOVHreg _)) => (MOVDreg x)
+(MOVWreg x:(MOVBreg _)) => (MOVDreg x)
+(MOVWreg x:(MOVBUreg _)) => (MOVDreg x)
+(MOVWreg x:(MOVHreg _)) => (MOVDreg x)
+(MOVWreg x:(MOVWreg _)) => (MOVDreg x)
+(MOVBUreg x:(MOVBUreg _)) => (MOVDreg x)
+(MOVHUreg x:(MOVBUreg _)) => (MOVDreg x)
+(MOVHUreg x:(MOVHUreg _)) => (MOVDreg x)
+(MOVWUreg x:(MOVBUreg _)) => (MOVDreg x)
+(MOVWUreg x:(MOVHUreg _)) => (MOVDreg x)
+(MOVWUreg x:(MOVWUreg _)) => (MOVDreg x)
+
+// Do not extend before store.
+(MOVBstore [off] {sym} ptr (MOVBreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVHreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVWreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVBUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVHUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVHstore [off] {sym} ptr (MOVHreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
+(MOVHstore [off] {sym} ptr (MOVWreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
+(MOVHstore [off] {sym} ptr (MOVHUreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
+(MOVHstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
+(MOVWstore [off] {sym} ptr (MOVWreg x) mem) => (MOVWstore [off] {sym} ptr x mem)
+(MOVWstore [off] {sym} ptr (MOVWUreg x) mem) => (MOVWstore [off] {sym} ptr x mem)
+
+// Replace extend after load with alternate load where possible.
+(MOVBreg <t> x:(MOVBUload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBload <t> [off] {sym} ptr mem)
+(MOVHreg <t> x:(MOVHUload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVHload <t> [off] {sym} ptr mem)
+(MOVWreg <t> x:(MOVWUload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVWload <t> [off] {sym} ptr mem)
+(MOVBUreg <t> x:(MOVBload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVBUload <t> [off] {sym} ptr mem)
+(MOVHUreg <t> x:(MOVHload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVHUload <t> [off] {sym} ptr mem)
+(MOVWUreg <t> x:(MOVWload [off] {sym} ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (MOVWUload <t> [off] {sym} ptr mem)
+
+// If a register move has only 1 use, just use the same register without emitting instruction
+// MOVnop does not emit an instruction, only for ensuring the type.
+(MOVDreg x) && x.Uses == 1 => (MOVDnop x)
+
+// TODO: we should be able to get rid of MOVDnop all together.
+// But for now, this is enough to get rid of lots of them.
+(MOVDnop (MOVDconst [c])) => (MOVDconst [c])
+
+// Fold constant into immediate instructions where possible.
+(ADD (MOVDconst [val]) x) && is32Bit(val) => (ADDI [val] x)
+(AND (MOVDconst [val]) x) && is32Bit(val) => (ANDI [val] x)
+(OR (MOVDconst [val]) x) && is32Bit(val) => (ORI [val] x)
+(XOR (MOVDconst [val]) x) && is32Bit(val) => (XORI [val] x)
+(SLL x (MOVDconst [val])) => (SLLI [int64(val&63)] x)
+(SRL x (MOVDconst [val])) => (SRLI [int64(val&63)] x)
+(SRA x (MOVDconst [val])) => (SRAI [int64(val&63)] x)
+
+// Convert subtraction of a const into ADDI with negative immediate, where possible.
+(SUB x (MOVDconst [val])) && is32Bit(-val) => (ADDI [-val] x)
+
+// Subtraction of zero.
+(SUB x (MOVDconst [0])) => x
+(SUBW x (MOVDconst [0])) => (ADDIW [0] x)
+
+// Subtraction from zero.
+(SUB (MOVDconst [0]) x) => (NEG x)
+(SUBW (MOVDconst [0]) x) => (NEGW x)
+
+// Addition of zero or two constants.
+(ADDI [0] x) => x
+(ADDI [x] (MOVDconst [y])) && is32Bit(x + y) => (MOVDconst [x + y])
+
+// ANDI with all zeros, all ones or two constants.
+(ANDI [0] x) => (MOVDconst [0])
+(ANDI [-1] x) => x
+(ANDI [x] (MOVDconst [y])) => (MOVDconst [x & y])
+
+// ORI with all zeroes, all ones or two constants.
+(ORI [0] x) => x
+(ORI [-1] x) => (MOVDconst [-1])
+(ORI [x] (MOVDconst [y])) => (MOVDconst [x | y])
+
+// Negation of a constant.
+(NEG (MOVDconst [x])) => (MOVDconst [-x])
+(NEGW (MOVDconst [x])) => (MOVDconst [int64(int32(-x))])
+
+// Shift of a constant.
+(SLLI [x] (MOVDconst [y])) && is32Bit(y << x) => (MOVDconst [y << x])
+(SRLI [x] (MOVDconst [y])) => (MOVDconst [int64(uint64(y) >> x)])
+(SRAI [x] (MOVDconst [y])) => (MOVDconst [int64(y) >> x])
+
+// SLTI/SLTIU with constants.
+(SLTI [x] (MOVDconst [y])) => (MOVDconst [b2i(int64(y) < int64(x))])
+(SLTIU [x] (MOVDconst [y])) => (MOVDconst [b2i(uint64(y) < uint64(x))])
+
+// Merge negation into fused multiply-add and multiply-subtract.
+//
+// Key:
+//
+// [+ -](x * y) [+ -] z.
+// _ N A S
+// D U
+// D B
+//
+// Note: multiplication commutativity handled by rule generator.
+(F(MADD|NMADD|MSUB|NMSUB)D neg:(FNEGD x) y z) && neg.Uses == 1 => (F(NMADD|MADD|NMSUB|MSUB)D x y z)
+(F(MADD|NMADD|MSUB|NMSUB)D x y neg:(FNEGD z)) && neg.Uses == 1 => (F(MSUB|NMSUB|MADD|NMADD)D x y z)
diff --git a/src/cmd/compile/internal/ssa/gen/RISCV64Ops.go b/src/cmd/compile/internal/ssa/gen/RISCV64Ops.go
new file mode 100644
index 0000000..0769197
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/gen/RISCV64Ops.go
@@ -0,0 +1,481 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build ignore
+// +build ignore
+
+package main
+
+import (
+ "fmt"
+)
+
+// Notes:
+// - Boolean types occupy the entire register. 0=false, 1=true.
+
+// Suffixes encode the bit width of various instructions:
+//
+// D (double word) = 64 bit int
+// W (word) = 32 bit int
+// H (half word) = 16 bit int
+// B (byte) = 8 bit int
+// S (single) = 32 bit float
+// D (double) = 64 bit float
+// L = 64 bit int, used when the opcode starts with F
+
+const (
+ riscv64REG_G = 27
+ riscv64REG_CTXT = 20
+ riscv64REG_LR = 1
+ riscv64REG_SP = 2
+ riscv64REG_GP = 3
+ riscv64REG_TP = 4
+ riscv64REG_TMP = 31
+ riscv64REG_ZERO = 0
+)
+
+func riscv64RegName(r int) string {
+ switch {
+ case r == riscv64REG_G:
+ return "g"
+ case r == riscv64REG_SP:
+ return "SP"
+ case 0 <= r && r <= 31:
+ return fmt.Sprintf("X%d", r)
+ case 32 <= r && r <= 63:
+ return fmt.Sprintf("F%d", r-32)
+ default:
+ panic(fmt.Sprintf("unknown register %d", r))
+ }
+}
+
+func init() {
+ var regNamesRISCV64 []string
+ var gpMask, fpMask, gpgMask, gpspMask, gpspsbMask, gpspsbgMask regMask
+ regNamed := make(map[string]regMask)
+
+ // Build the list of register names, creating an appropriately indexed
+ // regMask for the gp and fp registers as we go.
+ //
+ // If name is specified, use it rather than the riscv reg number.
+ addreg := func(r int, name string) regMask {
+ mask := regMask(1) << uint(len(regNamesRISCV64))
+ if name == "" {
+ name = riscv64RegName(r)
+ }
+ regNamesRISCV64 = append(regNamesRISCV64, name)
+ regNamed[name] = mask
+ return mask
+ }
+
+ // General purpose registers.
+ for r := 0; r <= 31; r++ {
+ if r == riscv64REG_LR {
+ // LR is not used by regalloc, so we skip it to leave
+ // room for pseudo-register SB.
+ continue
+ }
+
+ mask := addreg(r, "")
+
+ // Add general purpose registers to gpMask.
+ switch r {
+ // ZERO, GP, TP and TMP are not in any gp mask.
+ case riscv64REG_ZERO, riscv64REG_GP, riscv64REG_TP, riscv64REG_TMP:
+ case riscv64REG_G:
+ gpgMask |= mask
+ gpspsbgMask |= mask
+ case riscv64REG_SP:
+ gpspMask |= mask
+ gpspsbMask |= mask
+ gpspsbgMask |= mask
+ default:
+ gpMask |= mask
+ gpgMask |= mask
+ gpspMask |= mask
+ gpspsbMask |= mask
+ gpspsbgMask |= mask
+ }
+ }
+
+ // Floating pointer registers.
+ for r := 32; r <= 63; r++ {
+ mask := addreg(r, "")
+ fpMask |= mask
+ }
+
+ // Pseudo-register: SB
+ mask := addreg(-1, "SB")
+ gpspsbMask |= mask
+ gpspsbgMask |= mask
+
+ if len(regNamesRISCV64) > 64 {
+ // regMask is only 64 bits.
+ panic("Too many RISCV64 registers")
+ }
+
+ regCtxt := regNamed["X20"]
+ callerSave := gpMask | fpMask | regNamed["g"]
+
+ var (
+ gpstore = regInfo{inputs: []regMask{gpspsbMask, gpspMask, 0}} // SB in first input so we can load from a global, but not in second to avoid using SB as a temporary register
+ gpstore0 = regInfo{inputs: []regMask{gpspsbMask}}
+ gp01 = regInfo{outputs: []regMask{gpMask}}
+ gp11 = regInfo{inputs: []regMask{gpMask}, outputs: []regMask{gpMask}}
+ gp21 = regInfo{inputs: []regMask{gpMask, gpMask}, outputs: []regMask{gpMask}}
+ gp22 = regInfo{inputs: []regMask{gpMask, gpMask}, outputs: []regMask{gpMask, gpMask}}
+ gpload = regInfo{inputs: []regMask{gpspsbMask, 0}, outputs: []regMask{gpMask}}
+ gp11sb = regInfo{inputs: []regMask{gpspsbMask}, outputs: []regMask{gpMask}}
+ gpxchg = regInfo{inputs: []regMask{gpspsbgMask, gpgMask}, outputs: []regMask{gpMask}}
+ gpcas = regInfo{inputs: []regMask{gpspsbgMask, gpgMask, gpgMask}, outputs: []regMask{gpMask}}
+ gpatomic = regInfo{inputs: []regMask{gpspsbgMask, gpgMask}}
+
+ fp11 = regInfo{inputs: []regMask{fpMask}, outputs: []regMask{fpMask}}
+ fp21 = regInfo{inputs: []regMask{fpMask, fpMask}, outputs: []regMask{fpMask}}
+ fp31 = regInfo{inputs: []regMask{fpMask, fpMask, fpMask}, outputs: []regMask{fpMask}}
+ gpfp = regInfo{inputs: []regMask{gpMask}, outputs: []regMask{fpMask}}
+ fpgp = regInfo{inputs: []regMask{fpMask}, outputs: []regMask{gpMask}}
+ fpstore = regInfo{inputs: []regMask{gpspsbMask, fpMask, 0}}
+ fpload = regInfo{inputs: []regMask{gpspsbMask, 0}, outputs: []regMask{fpMask}}
+ fp2gp = regInfo{inputs: []regMask{fpMask, fpMask}, outputs: []regMask{gpMask}}
+
+ call = regInfo{clobbers: callerSave}
+ callClosure = regInfo{inputs: []regMask{gpspMask, regCtxt, 0}, clobbers: callerSave}
+ callInter = regInfo{inputs: []regMask{gpMask}, clobbers: callerSave}
+ )
+
+ RISCV64ops := []opData{
+ {name: "ADD", argLength: 2, reg: gp21, asm: "ADD", commutative: true}, // arg0 + arg1
+ {name: "ADDI", argLength: 1, reg: gp11sb, asm: "ADDI", aux: "Int64"}, // arg0 + auxint
+ {name: "ADDIW", argLength: 1, reg: gp11, asm: "ADDIW", aux: "Int64"}, // 32 low bits of arg0 + auxint, sign extended to 64 bits
+ {name: "NEG", argLength: 1, reg: gp11, asm: "NEG"}, // -arg0
+ {name: "NEGW", argLength: 1, reg: gp11, asm: "NEGW"}, // -arg0 of 32 bits, sign extended to 64 bits
+ {name: "SUB", argLength: 2, reg: gp21, asm: "SUB"}, // arg0 - arg1
+ {name: "SUBW", argLength: 2, reg: gp21, asm: "SUBW"}, // 32 low bits of arg 0 - 32 low bits of arg 1, sign extended to 64 bits
+
+ // M extension. H means high (i.e., it returns the top bits of
+ // the result). U means unsigned. W means word (i.e., 32-bit).
+ {name: "MUL", argLength: 2, reg: gp21, asm: "MUL", commutative: true, typ: "Int64"}, // arg0 * arg1
+ {name: "MULW", argLength: 2, reg: gp21, asm: "MULW", commutative: true, typ: "Int32"},
+ {name: "MULH", argLength: 2, reg: gp21, asm: "MULH", commutative: true, typ: "Int64"},
+ {name: "MULHU", argLength: 2, reg: gp21, asm: "MULHU", commutative: true, typ: "UInt64"},
+ {name: "LoweredMuluhilo", argLength: 2, reg: gp22, resultNotInArgs: true}, // arg0 * arg1, return (hi, lo)
+ {name: "LoweredMuluover", argLength: 2, reg: gp22, resultNotInArgs: true}, // arg0 * arg1, return (64 bits of arg0*arg1, overflow)
+
+ {name: "DIV", argLength: 2, reg: gp21, asm: "DIV", typ: "Int64"}, // arg0 / arg1
+ {name: "DIVU", argLength: 2, reg: gp21, asm: "DIVU", typ: "UInt64"},
+ {name: "DIVW", argLength: 2, reg: gp21, asm: "DIVW", typ: "Int32"},
+ {name: "DIVUW", argLength: 2, reg: gp21, asm: "DIVUW", typ: "UInt32"},
+ {name: "REM", argLength: 2, reg: gp21, asm: "REM", typ: "Int64"}, // arg0 % arg1
+ {name: "REMU", argLength: 2, reg: gp21, asm: "REMU", typ: "UInt64"},
+ {name: "REMW", argLength: 2, reg: gp21, asm: "REMW", typ: "Int32"},
+ {name: "REMUW", argLength: 2, reg: gp21, asm: "REMUW", typ: "UInt32"},
+
+ {name: "MOVaddr", argLength: 1, reg: gp11sb, asm: "MOV", aux: "SymOff", rematerializeable: true, symEffect: "RdWr"}, // arg0 + auxint + offset encoded in aux
+ // auxint+aux == add auxint and the offset of the symbol in aux (if any) to the effective address
+
+ {name: "MOVDconst", reg: gp01, asm: "MOV", typ: "UInt64", aux: "Int64", rematerializeable: true}, // auxint
+
+ // Loads: load <size> bits from arg0+auxint+aux and extend to 64 bits; arg1=mem
+ {name: "MOVBload", argLength: 2, reg: gpload, asm: "MOVB", aux: "SymOff", typ: "Int8", faultOnNilArg0: true, symEffect: "Read"}, // 8 bits, sign extend
+ {name: "MOVHload", argLength: 2, reg: gpload, asm: "MOVH", aux: "SymOff", typ: "Int16", faultOnNilArg0: true, symEffect: "Read"}, // 16 bits, sign extend
+ {name: "MOVWload", argLength: 2, reg: gpload, asm: "MOVW", aux: "SymOff", typ: "Int32", faultOnNilArg0: true, symEffect: "Read"}, // 32 bits, sign extend
+ {name: "MOVDload", argLength: 2, reg: gpload, asm: "MOV", aux: "SymOff", typ: "Int64", faultOnNilArg0: true, symEffect: "Read"}, // 64 bits
+ {name: "MOVBUload", argLength: 2, reg: gpload, asm: "MOVBU", aux: "SymOff", typ: "UInt8", faultOnNilArg0: true, symEffect: "Read"}, // 8 bits, zero extend
+ {name: "MOVHUload", argLength: 2, reg: gpload, asm: "MOVHU", aux: "SymOff", typ: "UInt16", faultOnNilArg0: true, symEffect: "Read"}, // 16 bits, zero extend
+ {name: "MOVWUload", argLength: 2, reg: gpload, asm: "MOVWU", aux: "SymOff", typ: "UInt32", faultOnNilArg0: true, symEffect: "Read"}, // 32 bits, zero extend
+
+ // Stores: store <size> lowest bits in arg1 to arg0+auxint+aux; arg2=mem
+ {name: "MOVBstore", argLength: 3, reg: gpstore, asm: "MOVB", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // 8 bits
+ {name: "MOVHstore", argLength: 3, reg: gpstore, asm: "MOVH", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // 16 bits
+ {name: "MOVWstore", argLength: 3, reg: gpstore, asm: "MOVW", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // 32 bits
+ {name: "MOVDstore", argLength: 3, reg: gpstore, asm: "MOV", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // 64 bits
+
+ // Stores: store <size> of zero in arg0+auxint+aux; arg1=mem
+ {name: "MOVBstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVB", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // 8 bits
+ {name: "MOVHstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVH", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // 16 bits
+ {name: "MOVWstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOVW", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // 32 bits
+ {name: "MOVDstorezero", argLength: 2, reg: gpstore0, aux: "SymOff", asm: "MOV", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // 64 bits
+
+ // Conversions
+ {name: "MOVBreg", argLength: 1, reg: gp11, asm: "MOVB"}, // move from arg0, sign-extended from byte
+ {name: "MOVHreg", argLength: 1, reg: gp11, asm: "MOVH"}, // move from arg0, sign-extended from half
+ {name: "MOVWreg", argLength: 1, reg: gp11, asm: "MOVW"}, // move from arg0, sign-extended from word
+ {name: "MOVDreg", argLength: 1, reg: gp11, asm: "MOV"}, // move from arg0
+ {name: "MOVBUreg", argLength: 1, reg: gp11, asm: "MOVBU"}, // move from arg0, unsign-extended from byte
+ {name: "MOVHUreg", argLength: 1, reg: gp11, asm: "MOVHU"}, // move from arg0, unsign-extended from half
+ {name: "MOVWUreg", argLength: 1, reg: gp11, asm: "MOVWU"}, // move from arg0, unsign-extended from word
+
+ {name: "MOVDnop", argLength: 1, reg: regInfo{inputs: []regMask{gpMask}, outputs: []regMask{gpMask}}, resultInArg0: true}, // nop, return arg0 in same register
+
+ // Shift ops
+ {name: "SLL", argLength: 2, reg: gp21, asm: "SLL"}, // arg0 << (aux1 & 63)
+ {name: "SRA", argLength: 2, reg: gp21, asm: "SRA"}, // arg0 >> (aux1 & 63), signed
+ {name: "SRL", argLength: 2, reg: gp21, asm: "SRL"}, // arg0 >> (aux1 & 63), unsigned
+ {name: "SLLI", argLength: 1, reg: gp11, asm: "SLLI", aux: "Int64"}, // arg0 << auxint, shift amount 0-63
+ {name: "SRAI", argLength: 1, reg: gp11, asm: "SRAI", aux: "Int64"}, // arg0 >> auxint, signed, shift amount 0-63
+ {name: "SRLI", argLength: 1, reg: gp11, asm: "SRLI", aux: "Int64"}, // arg0 >> auxint, unsigned, shift amount 0-63
+
+ // Bitwise ops
+ {name: "XOR", argLength: 2, reg: gp21, asm: "XOR", commutative: true}, // arg0 ^ arg1
+ {name: "XORI", argLength: 1, reg: gp11, asm: "XORI", aux: "Int64"}, // arg0 ^ auxint
+ {name: "OR", argLength: 2, reg: gp21, asm: "OR", commutative: true}, // arg0 | arg1
+ {name: "ORI", argLength: 1, reg: gp11, asm: "ORI", aux: "Int64"}, // arg0 | auxint
+ {name: "AND", argLength: 2, reg: gp21, asm: "AND", commutative: true}, // arg0 & arg1
+ {name: "ANDI", argLength: 1, reg: gp11, asm: "ANDI", aux: "Int64"}, // arg0 & auxint
+ {name: "NOT", argLength: 1, reg: gp11, asm: "NOT"}, // ^arg0
+
+ // Generate boolean values
+ {name: "SEQZ", argLength: 1, reg: gp11, asm: "SEQZ"}, // arg0 == 0, result is 0 or 1
+ {name: "SNEZ", argLength: 1, reg: gp11, asm: "SNEZ"}, // arg0 != 0, result is 0 or 1
+ {name: "SLT", argLength: 2, reg: gp21, asm: "SLT"}, // arg0 < arg1, result is 0 or 1
+ {name: "SLTI", argLength: 1, reg: gp11, asm: "SLTI", aux: "Int64"}, // arg0 < auxint, result is 0 or 1
+ {name: "SLTU", argLength: 2, reg: gp21, asm: "SLTU"}, // arg0 < arg1, unsigned, result is 0 or 1
+ {name: "SLTIU", argLength: 1, reg: gp11, asm: "SLTIU", aux: "Int64"}, // arg0 < auxint, unsigned, result is 0 or 1
+
+ // MOVconvert converts between pointers and integers.
+ // We have a special op for this so as to not confuse GC
+ // (particularly stack maps). It takes a memory arg so it
+ // gets correctly ordered with respect to GC safepoints.
+ {name: "MOVconvert", argLength: 2, reg: gp11, asm: "MOV"}, // arg0, but converted to int/ptr as appropriate; arg1=mem
+
+ // Calls
+ {name: "CALLstatic", argLength: 1, reg: call, aux: "CallOff", call: true}, // call static function aux.(*gc.Sym). arg0=mem, auxint=argsize, returns mem
+ {name: "CALLtail", argLength: 1, reg: call, aux: "CallOff", call: true, tailCall: true}, // tail call static function aux.(*gc.Sym). arg0=mem, auxint=argsize, returns mem
+ {name: "CALLclosure", argLength: 3, reg: callClosure, aux: "CallOff", call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem
+ {name: "CALLinter", argLength: 2, reg: callInter, aux: "CallOff", call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem
+
+ // duffzero
+ // arg0 = address of memory to zero (in X10, changed as side effect)
+ // arg1 = mem
+ // auxint = offset into duffzero code to start executing
+ // X1 (link register) changed because of function call
+ // returns mem
+ {
+ name: "DUFFZERO",
+ aux: "Int64",
+ argLength: 2,
+ reg: regInfo{
+ inputs: []regMask{regNamed["X10"]},
+ clobbers: regNamed["X1"] | regNamed["X10"],
+ },
+ typ: "Mem",
+ faultOnNilArg0: true,
+ },
+
+ // duffcopy
+ // arg0 = address of dst memory (in X11, changed as side effect)
+ // arg1 = address of src memory (in X10, changed as side effect)
+ // arg2 = mem
+ // auxint = offset into duffcopy code to start executing
+ // X1 (link register) changed because of function call
+ // returns mem
+ {
+ name: "DUFFCOPY",
+ aux: "Int64",
+ argLength: 3,
+ reg: regInfo{
+ inputs: []regMask{regNamed["X11"], regNamed["X10"]},
+ clobbers: regNamed["X1"] | regNamed["X10"] | regNamed["X11"],
+ },
+ typ: "Mem",
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ },
+
+ // Generic moves and zeros
+
+ // general unaligned zeroing
+ // arg0 = address of memory to zero (in X5, changed as side effect)
+ // arg1 = address of the last element to zero (inclusive)
+ // arg2 = mem
+ // auxint = element size
+ // returns mem
+ // mov ZERO, (X5)
+ // ADD $sz, X5
+ // BGEU Rarg1, X5, -2(PC)
+ {
+ name: "LoweredZero",
+ aux: "Int64",
+ argLength: 3,
+ reg: regInfo{
+ inputs: []regMask{regNamed["X5"], gpMask},
+ clobbers: regNamed["X5"],
+ },
+ typ: "Mem",
+ faultOnNilArg0: true,
+ },
+
+ // general unaligned move
+ // arg0 = address of dst memory (in X5, changed as side effect)
+ // arg1 = address of src memory (in X6, changed as side effect)
+ // arg2 = address of the last element of src (can't be X7 as we clobber it before using arg2)
+ // arg3 = mem
+ // auxint = alignment
+ // clobbers X7 as a tmp register.
+ // returns mem
+ // mov (X6), X7
+ // mov X7, (X5)
+ // ADD $sz, X5
+ // ADD $sz, X6
+ // BGEU Rarg2, X5, -4(PC)
+ {
+ name: "LoweredMove",
+ aux: "Int64",
+ argLength: 4,
+ reg: regInfo{
+ inputs: []regMask{regNamed["X5"], regNamed["X6"], gpMask &^ regNamed["X7"]},
+ clobbers: regNamed["X5"] | regNamed["X6"] | regNamed["X7"],
+ },
+ typ: "Mem",
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ },
+
+ // Atomic loads.
+ // load from arg0. arg1=mem.
+ // returns <value,memory> so they can be properly ordered with other loads.
+ {name: "LoweredAtomicLoad8", argLength: 2, reg: gpload, faultOnNilArg0: true},
+ {name: "LoweredAtomicLoad32", argLength: 2, reg: gpload, faultOnNilArg0: true},
+ {name: "LoweredAtomicLoad64", argLength: 2, reg: gpload, faultOnNilArg0: true},
+
+ // Atomic stores.
+ // store arg1 to *arg0. arg2=mem. returns memory.
+ {name: "LoweredAtomicStore8", argLength: 3, reg: gpstore, faultOnNilArg0: true, hasSideEffects: true},
+ {name: "LoweredAtomicStore32", argLength: 3, reg: gpstore, faultOnNilArg0: true, hasSideEffects: true},
+ {name: "LoweredAtomicStore64", argLength: 3, reg: gpstore, faultOnNilArg0: true, hasSideEffects: true},
+
+ // Atomic exchange.
+ // store arg1 to *arg0. arg2=mem. returns <old content of *arg0, memory>.
+ {name: "LoweredAtomicExchange32", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true},
+ {name: "LoweredAtomicExchange64", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true},
+
+ // Atomic add.
+ // *arg0 += arg1. arg2=mem. returns <new content of *arg0, memory>.
+ {name: "LoweredAtomicAdd32", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+ {name: "LoweredAtomicAdd64", argLength: 3, reg: gpxchg, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+
+ // Atomic compare and swap.
+ // arg0 = pointer, arg1 = old value, arg2 = new value, arg3 = memory.
+ // if *arg0 == arg1 {
+ // *arg0 = arg2
+ // return (true, memory)
+ // } else {
+ // return (false, memory)
+ // }
+ // MOV $0, Rout
+ // LR (Rarg0), Rtmp
+ // BNE Rtmp, Rarg1, 3(PC)
+ // SC Rarg2, (Rarg0), Rtmp
+ // BNE Rtmp, ZERO, -3(PC)
+ // MOV $1, Rout
+ {name: "LoweredAtomicCas32", argLength: 4, reg: gpcas, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+ {name: "LoweredAtomicCas64", argLength: 4, reg: gpcas, resultNotInArgs: true, faultOnNilArg0: true, hasSideEffects: true, unsafePoint: true},
+
+ // Atomic 32 bit AND/OR.
+ // *arg0 &= (|=) arg1. arg2=mem. returns nil.
+ {name: "LoweredAtomicAnd32", argLength: 3, reg: gpatomic, asm: "AMOANDW", faultOnNilArg0: true, hasSideEffects: true},
+ {name: "LoweredAtomicOr32", argLength: 3, reg: gpatomic, asm: "AMOORW", faultOnNilArg0: true, hasSideEffects: true},
+
+ // Lowering pass-throughs
+ {name: "LoweredNilCheck", argLength: 2, faultOnNilArg0: true, nilCheck: true, reg: regInfo{inputs: []regMask{gpspMask}}}, // arg0=ptr,arg1=mem, returns void. Faults if ptr is nil.
+ {name: "LoweredGetClosurePtr", reg: regInfo{outputs: []regMask{regCtxt}}}, // scheduler ensures only at beginning of entry block
+
+ // LoweredGetCallerSP returns the SP of the caller of the current function.
+ {name: "LoweredGetCallerSP", reg: gp01, rematerializeable: true},
+
+ // LoweredGetCallerPC evaluates to the PC to which its "caller" will return.
+ // I.e., if f calls g "calls" getcallerpc,
+ // the result should be the PC within f that g will return to.
+ // See runtime/stubs.go for a more detailed discussion.
+ {name: "LoweredGetCallerPC", reg: gp01, rematerializeable: true},
+
+ // LoweredWB invokes runtime.gcWriteBarrier. arg0=destptr, arg1=srcptr, arg2=mem, aux=runtime.gcWriteBarrier
+ // It saves all GP registers if necessary,
+ // but clobbers RA (LR) because it's a call
+ // and T6 (REG_TMP).
+ {name: "LoweredWB", argLength: 3, reg: regInfo{inputs: []regMask{regNamed["X5"], regNamed["X6"]}, clobbers: (callerSave &^ (gpMask | regNamed["g"])) | regNamed["X1"]}, clobberFlags: true, aux: "Sym", symEffect: "None"},
+
+ // There are three of these functions so that they can have three different register inputs.
+ // When we check 0 <= c <= cap (A), then 0 <= b <= c (B), then 0 <= a <= b (C), we want the
+ // default registers to match so we don't need to copy registers around unnecessarily.
+ {name: "LoweredPanicBoundsA", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{regNamed["X7"], regNamed["X28"]}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go).
+ {name: "LoweredPanicBoundsB", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{regNamed["X6"], regNamed["X7"]}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go).
+ {name: "LoweredPanicBoundsC", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{regNamed["X5"], regNamed["X6"]}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in genericOps.go).
+
+ // F extension.
+ {name: "FADDS", argLength: 2, reg: fp21, asm: "FADDS", commutative: true, typ: "Float32"}, // arg0 + arg1
+ {name: "FSUBS", argLength: 2, reg: fp21, asm: "FSUBS", commutative: false, typ: "Float32"}, // arg0 - arg1
+ {name: "FMULS", argLength: 2, reg: fp21, asm: "FMULS", commutative: true, typ: "Float32"}, // arg0 * arg1
+ {name: "FDIVS", argLength: 2, reg: fp21, asm: "FDIVS", commutative: false, typ: "Float32"}, // arg0 / arg1
+ {name: "FSQRTS", argLength: 1, reg: fp11, asm: "FSQRTS", typ: "Float32"}, // sqrt(arg0)
+ {name: "FNEGS", argLength: 1, reg: fp11, asm: "FNEGS", typ: "Float32"}, // -arg0
+ {name: "FMVSX", argLength: 1, reg: gpfp, asm: "FMVSX", typ: "Float32"}, // reinterpret arg0 as float
+ {name: "FCVTSW", argLength: 1, reg: gpfp, asm: "FCVTSW", typ: "Float32"}, // float32(low 32 bits of arg0)
+ {name: "FCVTSL", argLength: 1, reg: gpfp, asm: "FCVTSL", typ: "Float32"}, // float32(arg0)
+ {name: "FCVTWS", argLength: 1, reg: fpgp, asm: "FCVTWS", typ: "Int32"}, // int32(arg0)
+ {name: "FCVTLS", argLength: 1, reg: fpgp, asm: "FCVTLS", typ: "Int64"}, // int64(arg0)
+ {name: "FMOVWload", argLength: 2, reg: fpload, asm: "MOVF", aux: "SymOff", typ: "Float32", faultOnNilArg0: true, symEffect: "Read"}, // load float32 from arg0+auxint+aux
+ {name: "FMOVWstore", argLength: 3, reg: fpstore, asm: "MOVF", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store float32 to arg0+auxint+aux
+ {name: "FEQS", argLength: 2, reg: fp2gp, asm: "FEQS", commutative: true}, // arg0 == arg1
+ {name: "FNES", argLength: 2, reg: fp2gp, asm: "FNES", commutative: true}, // arg0 != arg1
+ {name: "FLTS", argLength: 2, reg: fp2gp, asm: "FLTS"}, // arg0 < arg1
+ {name: "FLES", argLength: 2, reg: fp2gp, asm: "FLES"}, // arg0 <= arg1
+
+ // D extension.
+ {name: "FADDD", argLength: 2, reg: fp21, asm: "FADDD", commutative: true, typ: "Float64"}, // arg0 + arg1
+ {name: "FSUBD", argLength: 2, reg: fp21, asm: "FSUBD", commutative: false, typ: "Float64"}, // arg0 - arg1
+ {name: "FMULD", argLength: 2, reg: fp21, asm: "FMULD", commutative: true, typ: "Float64"}, // arg0 * arg1
+ {name: "FDIVD", argLength: 2, reg: fp21, asm: "FDIVD", commutative: false, typ: "Float64"}, // arg0 / arg1
+ {name: "FMADDD", argLength: 3, reg: fp31, asm: "FMADDD", commutative: true, typ: "Float64"}, // (arg0 * arg1) + arg2
+ {name: "FMSUBD", argLength: 3, reg: fp31, asm: "FMSUBD", commutative: true, typ: "Float64"}, // (arg0 * arg1) - arg2
+ {name: "FNMADDD", argLength: 3, reg: fp31, asm: "FNMADDD", commutative: true, typ: "Float64"}, // -(arg0 * arg1) + arg2
+ {name: "FNMSUBD", argLength: 3, reg: fp31, asm: "FNMSUBD", commutative: true, typ: "Float64"}, // -(arg0 * arg1) - arg2
+ {name: "FSQRTD", argLength: 1, reg: fp11, asm: "FSQRTD", typ: "Float64"}, // sqrt(arg0)
+ {name: "FNEGD", argLength: 1, reg: fp11, asm: "FNEGD", typ: "Float64"}, // -arg0
+ {name: "FABSD", argLength: 1, reg: fp11, asm: "FABSD", typ: "Float64"}, // abs(arg0)
+ {name: "FSGNJD", argLength: 2, reg: fp21, asm: "FSGNJD", typ: "Float64"}, // copy sign of arg1 to arg0
+ {name: "FMVDX", argLength: 1, reg: gpfp, asm: "FMVDX", typ: "Float64"}, // reinterpret arg0 as float
+ {name: "FCVTDW", argLength: 1, reg: gpfp, asm: "FCVTDW", typ: "Float64"}, // float64(low 32 bits of arg0)
+ {name: "FCVTDL", argLength: 1, reg: gpfp, asm: "FCVTDL", typ: "Float64"}, // float64(arg0)
+ {name: "FCVTWD", argLength: 1, reg: fpgp, asm: "FCVTWD", typ: "Int32"}, // int32(arg0)
+ {name: "FCVTLD", argLength: 1, reg: fpgp, asm: "FCVTLD", typ: "Int64"}, // int64(arg0)
+ {name: "FCVTDS", argLength: 1, reg: fp11, asm: "FCVTDS", typ: "Float64"}, // float64(arg0)
+ {name: "FCVTSD", argLength: 1, reg: fp11, asm: "FCVTSD", typ: "Float32"}, // float32(arg0)
+ {name: "FMOVDload", argLength: 2, reg: fpload, asm: "MOVD", aux: "SymOff", typ: "Float64", faultOnNilArg0: true, symEffect: "Read"}, // load float64 from arg0+auxint+aux
+ {name: "FMOVDstore", argLength: 3, reg: fpstore, asm: "MOVD", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store float6 to arg0+auxint+aux
+ {name: "FEQD", argLength: 2, reg: fp2gp, asm: "FEQD", commutative: true}, // arg0 == arg1
+ {name: "FNED", argLength: 2, reg: fp2gp, asm: "FNED", commutative: true}, // arg0 != arg1
+ {name: "FLTD", argLength: 2, reg: fp2gp, asm: "FLTD"}, // arg0 < arg1
+ {name: "FLED", argLength: 2, reg: fp2gp, asm: "FLED"}, // arg0 <= arg1
+ }
+
+ RISCV64blocks := []blockData{
+ {name: "BEQ", controls: 2},
+ {name: "BNE", controls: 2},
+ {name: "BLT", controls: 2},
+ {name: "BGE", controls: 2},
+ {name: "BLTU", controls: 2},
+ {name: "BGEU", controls: 2},
+
+ {name: "BEQZ", controls: 1},
+ {name: "BNEZ", controls: 1},
+ {name: "BLEZ", controls: 1},
+ {name: "BGEZ", controls: 1},
+ {name: "BLTZ", controls: 1},
+ {name: "BGTZ", controls: 1},
+ }
+
+ archs = append(archs, arch{
+ name: "RISCV64",
+ pkg: "cmd/internal/obj/riscv",
+ genfile: "../../riscv64/ssa.go",
+ ops: RISCV64ops,
+ blocks: RISCV64blocks,
+ regnames: regNamesRISCV64,
+ gpregmask: gpMask,
+ fpregmask: fpMask,
+ framepointerreg: -1, // not used
+ })
+}
diff --git a/src/cmd/compile/internal/ssa/gen/S390X.rules b/src/cmd/compile/internal/ssa/gen/S390X.rules
new file mode 100644
index 0000000..b3928c6
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/gen/S390X.rules
@@ -0,0 +1,1708 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Lowering arithmetic
+(Add(64|Ptr) ...) => (ADD ...)
+(Add(32|16|8) ...) => (ADDW ...)
+(Add32F x y) => (Select0 (FADDS x y))
+(Add64F x y) => (Select0 (FADD x y))
+
+(Sub(64|Ptr) ...) => (SUB ...)
+(Sub(32|16|8) ...) => (SUBW ...)
+(Sub32F x y) => (Select0 (FSUBS x y))
+(Sub64F x y) => (Select0 (FSUB x y))
+
+(Mul64 ...) => (MULLD ...)
+(Mul(32|16|8) ...) => (MULLW ...)
+(Mul32F ...) => (FMULS ...)
+(Mul64F ...) => (FMUL ...)
+(Mul64uhilo ...) => (MLGR ...)
+
+(Div32F ...) => (FDIVS ...)
+(Div64F ...) => (FDIV ...)
+
+(Div64 x y) => (DIVD x y)
+(Div64u ...) => (DIVDU ...)
+// DIVW/DIVWU has a 64-bit dividend and a 32-bit divisor,
+// so a sign/zero extension of the dividend is required.
+(Div32 x y) => (DIVW (MOVWreg x) y)
+(Div32u x y) => (DIVWU (MOVWZreg x) y)
+(Div16 x y) => (DIVW (MOVHreg x) (MOVHreg y))
+(Div16u x y) => (DIVWU (MOVHZreg x) (MOVHZreg y))
+(Div8 x y) => (DIVW (MOVBreg x) (MOVBreg y))
+(Div8u x y) => (DIVWU (MOVBZreg x) (MOVBZreg y))
+
+(Hmul(64|64u) ...) => (MULH(D|DU) ...)
+(Hmul32 x y) => (SRDconst [32] (MULLD (MOVWreg x) (MOVWreg y)))
+(Hmul32u x y) => (SRDconst [32] (MULLD (MOVWZreg x) (MOVWZreg y)))
+
+(Mod64 x y) => (MODD x y)
+(Mod64u ...) => (MODDU ...)
+// MODW/MODWU has a 64-bit dividend and a 32-bit divisor,
+// so a sign/zero extension of the dividend is required.
+(Mod32 x y) => (MODW (MOVWreg x) y)
+(Mod32u x y) => (MODWU (MOVWZreg x) y)
+(Mod16 x y) => (MODW (MOVHreg x) (MOVHreg y))
+(Mod16u x y) => (MODWU (MOVHZreg x) (MOVHZreg y))
+(Mod8 x y) => (MODW (MOVBreg x) (MOVBreg y))
+(Mod8u x y) => (MODWU (MOVBZreg x) (MOVBZreg y))
+
+// (x + y) / 2 with x>=y -> (x - y) / 2 + y
+(Avg64u <t> x y) => (ADD (SRDconst <t> (SUB <t> x y) [1]) y)
+
+(And64 ...) => (AND ...)
+(And(32|16|8) ...) => (ANDW ...)
+
+(Or64 ...) => (OR ...)
+(Or(32|16|8) ...) => (ORW ...)
+
+(Xor64 ...) => (XOR ...)
+(Xor(32|16|8) ...) => (XORW ...)
+
+(Neg64 ...) => (NEG ...)
+(Neg(32|16|8) ...) => (NEGW ...)
+(Neg32F ...) => (FNEGS ...)
+(Neg64F ...) => (FNEG ...)
+
+(Com64 ...) => (NOT ...)
+(Com(32|16|8) ...) => (NOTW ...)
+(NOT x) => (XOR (MOVDconst [-1]) x)
+(NOTW x) => (XORWconst [-1] x)
+
+// Lowering boolean ops
+(AndB ...) => (ANDW ...)
+(OrB ...) => (ORW ...)
+(Not x) => (XORWconst [1] x)
+
+// Lowering pointer arithmetic
+(OffPtr [off] ptr:(SP)) => (MOVDaddr [int32(off)] ptr)
+(OffPtr [off] ptr) && is32Bit(off) => (ADDconst [int32(off)] ptr)
+(OffPtr [off] ptr) => (ADD (MOVDconst [off]) ptr)
+
+// TODO: optimize these cases?
+(Ctz64NonZero ...) => (Ctz64 ...)
+(Ctz32NonZero ...) => (Ctz32 ...)
+
+// Ctz(x) = 64 - findLeftmostOne((x-1)&^x)
+(Ctz64 <t> x) => (SUB (MOVDconst [64]) (FLOGR (AND <t> (SUBconst <t> [1] x) (NOT <t> x))))
+(Ctz32 <t> x) => (SUB (MOVDconst [64]) (FLOGR (MOVWZreg (ANDW <t> (SUBWconst <t> [1] x) (NOTW <t> x)))))
+
+(BitLen64 x) => (SUB (MOVDconst [64]) (FLOGR x))
+
+// POPCNT treats the input register as a vector of 8 bytes, producing
+// a population count for each individual byte. For inputs larger than
+// a single byte we therefore need to sum the individual bytes produced
+// by the POPCNT instruction. For example, the following instruction
+// sequence could be used to calculate the population count of a 4-byte
+// value:
+//
+// MOVD $0x12345678, R1 // R1=0x12345678 <-- input
+// POPCNT R1, R2 // R2=0x02030404
+// SRW $16, R2, R3 // R3=0x00000203
+// ADDW R2, R3, R4 // R4=0x02030607
+// SRW $8, R4, R5 // R5=0x00020306
+// ADDW R4, R5, R6 // R6=0x0205090d
+// MOVBZ R6, R7 // R7=0x0000000d <-- result is 13
+//
+(PopCount8 x) => (POPCNT (MOVBZreg x))
+(PopCount16 x) => (MOVBZreg (SumBytes2 (POPCNT <typ.UInt16> x)))
+(PopCount32 x) => (MOVBZreg (SumBytes4 (POPCNT <typ.UInt32> x)))
+(PopCount64 x) => (MOVBZreg (SumBytes8 (POPCNT <typ.UInt64> x)))
+
+// SumBytes{2,4,8} pseudo operations sum the values of the rightmost
+// 2, 4 or 8 bytes respectively. The result is a single byte however
+// other bytes might contain junk so a zero extension is required if
+// the desired output type is larger than 1 byte.
+(SumBytes2 x) => (ADDW (SRWconst <typ.UInt8> x [8]) x)
+(SumBytes4 x) => (SumBytes2 (ADDW <typ.UInt16> (SRWconst <typ.UInt16> x [16]) x))
+(SumBytes8 x) => (SumBytes4 (ADDW <typ.UInt32> (SRDconst <typ.UInt32> x [32]) x))
+
+(Bswap64 ...) => (MOVDBR ...)
+(Bswap32 ...) => (MOVWBR ...)
+
+// add with carry
+(Select0 (Add64carry x y c))
+ => (Select0 <typ.UInt64> (ADDE x y (Select1 <types.TypeFlags> (ADDCconst c [-1]))))
+(Select1 (Add64carry x y c))
+ => (Select0 <typ.UInt64> (ADDE (MOVDconst [0]) (MOVDconst [0]) (Select1 <types.TypeFlags> (ADDE x y (Select1 <types.TypeFlags> (ADDCconst c [-1]))))))
+
+// subtract with borrow
+(Select0 (Sub64borrow x y c))
+ => (Select0 <typ.UInt64> (SUBE x y (Select1 <types.TypeFlags> (SUBC (MOVDconst [0]) c))))
+(Select1 (Sub64borrow x y c))
+ => (NEG (Select0 <typ.UInt64> (SUBE (MOVDconst [0]) (MOVDconst [0]) (Select1 <types.TypeFlags> (SUBE x y (Select1 <types.TypeFlags> (SUBC (MOVDconst [0]) c)))))))
+
+// math package intrinsics
+(Sqrt ...) => (FSQRT ...)
+(Floor x) => (FIDBR [7] x)
+(Ceil x) => (FIDBR [6] x)
+(Trunc x) => (FIDBR [5] x)
+(RoundToEven x) => (FIDBR [4] x)
+(Round x) => (FIDBR [1] x)
+(FMA x y z) => (FMADD z x y)
+
+(Sqrt32 ...) => (FSQRTS ...)
+
+// Atomic loads and stores.
+// The SYNC instruction (fast-BCR-serialization) prevents store-load
+// reordering. Other sequences of memory operations (load-load,
+// store-store and load-store) are already guaranteed not to be reordered.
+(AtomicLoad(8|32|Acq32|64|Ptr) ptr mem) => (MOV(BZ|WZ|WZ|D|D)atomicload ptr mem)
+(AtomicStore(8|32|64|PtrNoWB) ptr val mem) => (SYNC (MOV(B|W|D|D)atomicstore ptr val mem))
+
+// Store-release doesn't require store-load ordering.
+(AtomicStoreRel32 ptr val mem) => (MOVWatomicstore ptr val mem)
+
+// Atomic adds.
+(AtomicAdd32 ptr val mem) => (AddTupleFirst32 val (LAA ptr val mem))
+(AtomicAdd64 ptr val mem) => (AddTupleFirst64 val (LAAG ptr val mem))
+(Select0 <t> (AddTupleFirst32 val tuple)) => (ADDW val (Select0 <t> tuple))
+(Select1 (AddTupleFirst32 _ tuple)) => (Select1 tuple)
+(Select0 <t> (AddTupleFirst64 val tuple)) => (ADD val (Select0 <t> tuple))
+(Select1 (AddTupleFirst64 _ tuple)) => (Select1 tuple)
+
+// Atomic exchanges.
+(AtomicExchange32 ptr val mem) => (LoweredAtomicExchange32 ptr val mem)
+(AtomicExchange64 ptr val mem) => (LoweredAtomicExchange64 ptr val mem)
+
+// Atomic compare and swap.
+(AtomicCompareAndSwap32 ptr old new_ mem) => (LoweredAtomicCas32 ptr old new_ mem)
+(AtomicCompareAndSwap64 ptr old new_ mem) => (LoweredAtomicCas64 ptr old new_ mem)
+
+// Atomic and: *(*uint8)(ptr) &= val
+//
+// Round pointer down to nearest word boundary and pad value with ones before
+// applying atomic AND operation to target word.
+//
+// *(*uint32)(ptr &^ 3) &= rotateleft(uint32(val) | 0xffffff00, ((3 << 3) ^ ((ptr & 3) << 3))
+//
+(AtomicAnd8 ptr val mem)
+ => (LANfloor
+ ptr
+ (RLL <typ.UInt32>
+ (ORWconst <typ.UInt32> val [-1<<8])
+ (RXSBG <typ.UInt32> {s390x.NewRotateParams(59, 60, 3)} (MOVDconst [3<<3]) ptr))
+ mem)
+
+// Atomic or: *(*uint8)(ptr) |= val
+//
+// Round pointer down to nearest word boundary and pad value with zeros before
+// applying atomic OR operation to target word.
+//
+// *(*uint32)(ptr &^ 3) |= uint32(val) << ((3 << 3) ^ ((ptr & 3) << 3))
+//
+(AtomicOr8 ptr val mem)
+ => (LAOfloor
+ ptr
+ (SLW <typ.UInt32>
+ (MOVBZreg <typ.UInt32> val)
+ (RXSBG <typ.UInt32> {s390x.NewRotateParams(59, 60, 3)} (MOVDconst [3<<3]) ptr))
+ mem)
+
+(AtomicAnd32 ...) => (LAN ...)
+(AtomicOr32 ...) => (LAO ...)
+
+// Lowering extension
+// Note: we always extend to 64 bits even though some ops don't need that many result bits.
+(SignExt8to(16|32|64) ...) => (MOVBreg ...)
+(SignExt16to(32|64) ...) => (MOVHreg ...)
+(SignExt32to64 ...) => (MOVWreg ...)
+
+(ZeroExt8to(16|32|64) ...) => (MOVBZreg ...)
+(ZeroExt16to(32|64) ...) => (MOVHZreg ...)
+(ZeroExt32to64 ...) => (MOVWZreg ...)
+
+(Slicemask <t> x) => (SRADconst (NEG <t> x) [63])
+
+// Lowering truncation
+// Because we ignore high parts of registers, truncates are just copies.
+(Trunc(16|32|64)to8 ...) => (Copy ...)
+(Trunc(32|64)to16 ...) => (Copy ...)
+(Trunc64to32 ...) => (Copy ...)
+
+// Lowering float <-> int
+(Cvt32to32F ...) => (CEFBRA ...)
+(Cvt32to64F ...) => (CDFBRA ...)
+(Cvt64to32F ...) => (CEGBRA ...)
+(Cvt64to64F ...) => (CDGBRA ...)
+
+(Cvt32Fto32 ...) => (CFEBRA ...)
+(Cvt32Fto64 ...) => (CGEBRA ...)
+(Cvt64Fto32 ...) => (CFDBRA ...)
+(Cvt64Fto64 ...) => (CGDBRA ...)
+
+// Lowering float <-> uint
+(Cvt32Uto32F ...) => (CELFBR ...)
+(Cvt32Uto64F ...) => (CDLFBR ...)
+(Cvt64Uto32F ...) => (CELGBR ...)
+(Cvt64Uto64F ...) => (CDLGBR ...)
+
+(Cvt32Fto32U ...) => (CLFEBR ...)
+(Cvt32Fto64U ...) => (CLGEBR ...)
+(Cvt64Fto32U ...) => (CLFDBR ...)
+(Cvt64Fto64U ...) => (CLGDBR ...)
+
+// Lowering float32 <-> float64
+(Cvt32Fto64F ...) => (LDEBR ...)
+(Cvt64Fto32F ...) => (LEDBR ...)
+
+(CvtBoolToUint8 ...) => (Copy ...)
+
+(Round(32|64)F ...) => (LoweredRound(32|64)F ...)
+
+// Lowering shifts
+
+// Lower bounded shifts first. No need to check shift value.
+(Lsh64x(64|32|16|8) x y) && shiftIsBounded(v) => (SLD x y)
+(Lsh32x(64|32|16|8) x y) && shiftIsBounded(v) => (SLW x y)
+(Lsh16x(64|32|16|8) x y) && shiftIsBounded(v) => (SLW x y)
+(Lsh8x(64|32|16|8) x y) && shiftIsBounded(v) => (SLW x y)
+(Rsh64Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRD x y)
+(Rsh32Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRW x y)
+(Rsh16Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRW (MOVHZreg x) y)
+(Rsh8Ux(64|32|16|8) x y) && shiftIsBounded(v) => (SRW (MOVBZreg x) y)
+(Rsh64x(64|32|16|8) x y) && shiftIsBounded(v) => (SRAD x y)
+(Rsh32x(64|32|16|8) x y) && shiftIsBounded(v) => (SRAW x y)
+(Rsh16x(64|32|16|8) x y) && shiftIsBounded(v) => (SRAW (MOVHreg x) y)
+(Rsh8x(64|32|16|8) x y) && shiftIsBounded(v) => (SRAW (MOVBreg x) y)
+
+// Unsigned shifts need to return 0 if shift amount is >= width of shifted value.
+// result = shift >= 64 ? 0 : arg << shift
+(Lsh(64|32|16|8)x64 <t> x y) => (LOCGR {s390x.GreaterOrEqual} <t> (SL(D|W|W|W) <t> x y) (MOVDconst [0]) (CMPUconst y [64]))
+(Lsh(64|32|16|8)x32 <t> x y) => (LOCGR {s390x.GreaterOrEqual} <t> (SL(D|W|W|W) <t> x y) (MOVDconst [0]) (CMPWUconst y [64]))
+(Lsh(64|32|16|8)x16 <t> x y) => (LOCGR {s390x.GreaterOrEqual} <t> (SL(D|W|W|W) <t> x y) (MOVDconst [0]) (CMPWUconst (MOVHZreg y) [64]))
+(Lsh(64|32|16|8)x8 <t> x y) => (LOCGR {s390x.GreaterOrEqual} <t> (SL(D|W|W|W) <t> x y) (MOVDconst [0]) (CMPWUconst (MOVBZreg y) [64]))
+
+(Rsh(64|32)Ux64 <t> x y) => (LOCGR {s390x.GreaterOrEqual} <t> (SR(D|W) <t> x y) (MOVDconst [0]) (CMPUconst y [64]))
+(Rsh(64|32)Ux32 <t> x y) => (LOCGR {s390x.GreaterOrEqual} <t> (SR(D|W) <t> x y) (MOVDconst [0]) (CMPWUconst y [64]))
+(Rsh(64|32)Ux16 <t> x y) => (LOCGR {s390x.GreaterOrEqual} <t> (SR(D|W) <t> x y) (MOVDconst [0]) (CMPWUconst (MOVHZreg y) [64]))
+(Rsh(64|32)Ux8 <t> x y) => (LOCGR {s390x.GreaterOrEqual} <t> (SR(D|W) <t> x y) (MOVDconst [0]) (CMPWUconst (MOVBZreg y) [64]))
+
+(Rsh(16|8)Ux64 <t> x y) => (LOCGR {s390x.GreaterOrEqual} <t> (SRW <t> (MOV(H|B)Zreg x) y) (MOVDconst [0]) (CMPUconst y [64]))
+(Rsh(16|8)Ux32 <t> x y) => (LOCGR {s390x.GreaterOrEqual} <t> (SRW <t> (MOV(H|B)Zreg x) y) (MOVDconst [0]) (CMPWUconst y [64]))
+(Rsh(16|8)Ux16 <t> x y) => (LOCGR {s390x.GreaterOrEqual} <t> (SRW <t> (MOV(H|B)Zreg x) y) (MOVDconst [0]) (CMPWUconst (MOVHZreg y) [64]))
+(Rsh(16|8)Ux8 <t> x y) => (LOCGR {s390x.GreaterOrEqual} <t> (SRW <t> (MOV(H|B)Zreg x) y) (MOVDconst [0]) (CMPWUconst (MOVBZreg y) [64]))
+
+// Signed right shift needs to return 0/-1 if shift amount is >= width of shifted value.
+// We implement this by setting the shift value to 63 (all ones) if the shift value is more than 63.
+// result = arg >> (shift >= 64 ? 63 : shift)
+(Rsh(64|32)x64 x y) => (SRA(D|W) x (LOCGR {s390x.GreaterOrEqual} <y.Type> y (MOVDconst <y.Type> [63]) (CMPUconst y [64])))
+(Rsh(64|32)x32 x y) => (SRA(D|W) x (LOCGR {s390x.GreaterOrEqual} <y.Type> y (MOVDconst <y.Type> [63]) (CMPWUconst y [64])))
+(Rsh(64|32)x16 x y) => (SRA(D|W) x (LOCGR {s390x.GreaterOrEqual} <y.Type> y (MOVDconst <y.Type> [63]) (CMPWUconst (MOVHZreg y) [64])))
+(Rsh(64|32)x8 x y) => (SRA(D|W) x (LOCGR {s390x.GreaterOrEqual} <y.Type> y (MOVDconst <y.Type> [63]) (CMPWUconst (MOVBZreg y) [64])))
+
+(Rsh(16|8)x64 x y) => (SRAW (MOV(H|B)reg x) (LOCGR {s390x.GreaterOrEqual} <y.Type> y (MOVDconst <y.Type> [63]) (CMPUconst y [64])))
+(Rsh(16|8)x32 x y) => (SRAW (MOV(H|B)reg x) (LOCGR {s390x.GreaterOrEqual} <y.Type> y (MOVDconst <y.Type> [63]) (CMPWUconst y [64])))
+(Rsh(16|8)x16 x y) => (SRAW (MOV(H|B)reg x) (LOCGR {s390x.GreaterOrEqual} <y.Type> y (MOVDconst <y.Type> [63]) (CMPWUconst (MOVHZreg y) [64])))
+(Rsh(16|8)x8 x y) => (SRAW (MOV(H|B)reg x) (LOCGR {s390x.GreaterOrEqual} <y.Type> y (MOVDconst <y.Type> [63]) (CMPWUconst (MOVBZreg y) [64])))
+
+// Lowering rotates
+(RotateLeft8 <t> x (MOVDconst [c])) => (Or8 (Lsh8x64 <t> x (MOVDconst [c&7])) (Rsh8Ux64 <t> x (MOVDconst [-c&7])))
+(RotateLeft16 <t> x (MOVDconst [c])) => (Or16 (Lsh16x64 <t> x (MOVDconst [c&15])) (Rsh16Ux64 <t> x (MOVDconst [-c&15])))
+(RotateLeft32 ...) => (RLL ...)
+(RotateLeft64 ...) => (RLLG ...)
+
+// Lowering comparisons
+(Less64 x y) => (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (CMP x y))
+(Less32 x y) => (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (CMPW x y))
+(Less(16|8) x y) => (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (CMPW (MOV(H|B)reg x) (MOV(H|B)reg y)))
+(Less64U x y) => (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (CMPU x y))
+(Less32U x y) => (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (CMPWU x y))
+(Less(16|8)U x y) => (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (CMPWU (MOV(H|B)Zreg x) (MOV(H|B)Zreg y)))
+(Less64F x y) => (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (FCMP x y))
+(Less32F x y) => (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (FCMPS x y))
+
+(Leq64 x y) => (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (CMP x y))
+(Leq32 x y) => (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPW x y))
+(Leq(16|8) x y) => (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPW (MOV(H|B)reg x) (MOV(H|B)reg y)))
+(Leq64U x y) => (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPU x y))
+(Leq32U x y) => (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPWU x y))
+(Leq(16|8)U x y) => (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPWU (MOV(H|B)Zreg x) (MOV(H|B)Zreg y)))
+(Leq64F x y) => (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (FCMP x y))
+(Leq32F x y) => (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (FCMPS x y))
+
+(Eq(64|Ptr) x y) => (LOCGR {s390x.Equal} (MOVDconst [0]) (MOVDconst [1]) (CMP x y))
+(Eq32 x y) => (LOCGR {s390x.Equal} (MOVDconst [0]) (MOVDconst [1]) (CMPW x y))
+(Eq(16|8|B) x y) => (LOCGR {s390x.Equal} (MOVDconst [0]) (MOVDconst [1]) (CMPW (MOV(H|B|B)reg x) (MOV(H|B|B)reg y)))
+(Eq64F x y) => (LOCGR {s390x.Equal} (MOVDconst [0]) (MOVDconst [1]) (FCMP x y))
+(Eq32F x y) => (LOCGR {s390x.Equal} (MOVDconst [0]) (MOVDconst [1]) (FCMPS x y))
+
+(Neq(64|Ptr) x y) => (LOCGR {s390x.NotEqual} (MOVDconst [0]) (MOVDconst [1]) (CMP x y))
+(Neq32 x y) => (LOCGR {s390x.NotEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPW x y))
+(Neq(16|8|B) x y) => (LOCGR {s390x.NotEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPW (MOV(H|B|B)reg x) (MOV(H|B|B)reg y)))
+(Neq64F x y) => (LOCGR {s390x.NotEqual} (MOVDconst [0]) (MOVDconst [1]) (FCMP x y))
+(Neq32F x y) => (LOCGR {s390x.NotEqual} (MOVDconst [0]) (MOVDconst [1]) (FCMPS x y))
+
+// Lowering loads
+(Load <t> ptr mem) && (is64BitInt(t) || isPtr(t)) => (MOVDload ptr mem)
+(Load <t> ptr mem) && is32BitInt(t) && isSigned(t) => (MOVWload ptr mem)
+(Load <t> ptr mem) && is32BitInt(t) && !isSigned(t) => (MOVWZload ptr mem)
+(Load <t> ptr mem) && is16BitInt(t) && isSigned(t) => (MOVHload ptr mem)
+(Load <t> ptr mem) && is16BitInt(t) && !isSigned(t) => (MOVHZload ptr mem)
+(Load <t> ptr mem) && is8BitInt(t) && isSigned(t) => (MOVBload ptr mem)
+(Load <t> ptr mem) && (t.IsBoolean() || (is8BitInt(t) && !isSigned(t))) => (MOVBZload ptr mem)
+(Load <t> ptr mem) && is32BitFloat(t) => (FMOVSload ptr mem)
+(Load <t> ptr mem) && is64BitFloat(t) => (FMOVDload ptr mem)
+
+// Lowering stores
+// These more-specific FP versions of Store pattern should come first.
+(Store {t} ptr val mem) && t.Size() == 8 && is64BitFloat(val.Type) => (FMOVDstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 4 && is32BitFloat(val.Type) => (FMOVSstore ptr val mem)
+
+(Store {t} ptr val mem) && t.Size() == 8 => (MOVDstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 4 => (MOVWstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 2 => (MOVHstore ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 1 => (MOVBstore ptr val mem)
+
+// Lowering moves
+
+// Load and store for small copies.
+(Move [0] _ _ mem) => mem
+(Move [1] dst src mem) => (MOVBstore dst (MOVBZload src mem) mem)
+(Move [2] dst src mem) => (MOVHstore dst (MOVHZload src mem) mem)
+(Move [4] dst src mem) => (MOVWstore dst (MOVWZload src mem) mem)
+(Move [8] dst src mem) => (MOVDstore dst (MOVDload src mem) mem)
+(Move [16] dst src mem) =>
+ (MOVDstore [8] dst (MOVDload [8] src mem)
+ (MOVDstore dst (MOVDload src mem) mem))
+(Move [24] dst src mem) =>
+ (MOVDstore [16] dst (MOVDload [16] src mem)
+ (MOVDstore [8] dst (MOVDload [8] src mem)
+ (MOVDstore dst (MOVDload src mem) mem)))
+(Move [3] dst src mem) =>
+ (MOVBstore [2] dst (MOVBZload [2] src mem)
+ (MOVHstore dst (MOVHZload src mem) mem))
+(Move [5] dst src mem) =>
+ (MOVBstore [4] dst (MOVBZload [4] src mem)
+ (MOVWstore dst (MOVWZload src mem) mem))
+(Move [6] dst src mem) =>
+ (MOVHstore [4] dst (MOVHZload [4] src mem)
+ (MOVWstore dst (MOVWZload src mem) mem))
+(Move [7] dst src mem) =>
+ (MOVBstore [6] dst (MOVBZload [6] src mem)
+ (MOVHstore [4] dst (MOVHZload [4] src mem)
+ (MOVWstore dst (MOVWZload src mem) mem)))
+
+// MVC for other moves. Use up to 4 instructions (sizes up to 1024 bytes).
+(Move [s] dst src mem) && s > 0 && s <= 256 && logLargeCopy(v, s) =>
+ (MVC [makeValAndOff(int32(s), 0)] dst src mem)
+(Move [s] dst src mem) && s > 256 && s <= 512 && logLargeCopy(v, s) =>
+ (MVC [makeValAndOff(int32(s)-256, 256)] dst src (MVC [makeValAndOff(256, 0)] dst src mem))
+(Move [s] dst src mem) && s > 512 && s <= 768 && logLargeCopy(v, s) =>
+ (MVC [makeValAndOff(int32(s)-512, 512)] dst src (MVC [makeValAndOff(256, 256)] dst src (MVC [makeValAndOff(256, 0)] dst src mem)))
+(Move [s] dst src mem) && s > 768 && s <= 1024 && logLargeCopy(v, s) =>
+ (MVC [makeValAndOff(int32(s)-768, 768)] dst src (MVC [makeValAndOff(256, 512)] dst src (MVC [makeValAndOff(256, 256)] dst src (MVC [makeValAndOff(256, 0)] dst src mem))))
+
+// Move more than 1024 bytes using a loop.
+(Move [s] dst src mem) && s > 1024 && logLargeCopy(v, s) =>
+ (LoweredMove [s%256] dst src (ADD <src.Type> src (MOVDconst [(s/256)*256])) mem)
+
+// Lowering Zero instructions
+(Zero [0] _ mem) => mem
+(Zero [1] destptr mem) => (MOVBstoreconst [0] destptr mem)
+(Zero [2] destptr mem) => (MOVHstoreconst [0] destptr mem)
+(Zero [4] destptr mem) => (MOVWstoreconst [0] destptr mem)
+(Zero [8] destptr mem) => (MOVDstoreconst [0] destptr mem)
+(Zero [3] destptr mem) =>
+ (MOVBstoreconst [makeValAndOff(0,2)] destptr
+ (MOVHstoreconst [0] destptr mem))
+(Zero [5] destptr mem) =>
+ (MOVBstoreconst [makeValAndOff(0,4)] destptr
+ (MOVWstoreconst [0] destptr mem))
+(Zero [6] destptr mem) =>
+ (MOVHstoreconst [makeValAndOff(0,4)] destptr
+ (MOVWstoreconst [0] destptr mem))
+(Zero [7] destptr mem) =>
+ (MOVWstoreconst [makeValAndOff(0,3)] destptr
+ (MOVWstoreconst [0] destptr mem))
+
+(Zero [s] destptr mem) && s > 0 && s <= 1024 =>
+ (CLEAR [makeValAndOff(int32(s), 0)] destptr mem)
+
+// Zero more than 1024 bytes using a loop.
+(Zero [s] destptr mem) && s > 1024 =>
+ (LoweredZero [s%256] destptr (ADDconst <destptr.Type> destptr [(int32(s)/256)*256]) mem)
+
+// Lowering constants
+(Const(64|32|16|8) [val]) => (MOVDconst [int64(val)])
+(Const(32|64)F ...) => (FMOV(S|D)const ...)
+(ConstNil) => (MOVDconst [0])
+(ConstBool [t]) => (MOVDconst [b2i(t)])
+
+// Lowering calls
+(StaticCall ...) => (CALLstatic ...)
+(ClosureCall ...) => (CALLclosure ...)
+(InterCall ...) => (CALLinter ...)
+(TailCall ...) => (CALLtail ...)
+
+// Miscellaneous
+(IsNonNil p) => (LOCGR {s390x.NotEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPconst p [0]))
+(IsInBounds idx len) => (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (CMPU idx len))
+(IsSliceInBounds idx len) => (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPU idx len))
+(NilCheck ...) => (LoweredNilCheck ...)
+(GetG ...) => (LoweredGetG ...)
+(GetClosurePtr ...) => (LoweredGetClosurePtr ...)
+(GetCallerSP ...) => (LoweredGetCallerSP ...)
+(GetCallerPC ...) => (LoweredGetCallerPC ...)
+(Addr {sym} base) => (MOVDaddr {sym} base)
+(LocalAddr {sym} base _) => (MOVDaddr {sym} base)
+(ITab (Load ptr mem)) => (MOVDload ptr mem)
+
+// block rewrites
+(If cond yes no) => (CLIJ {s390x.LessOrGreater} (MOVBZreg <typ.Bool> cond) [0] yes no)
+
+// Write barrier.
+(WB ...) => (LoweredWB ...)
+
+(PanicBounds [kind] x y mem) && boundsABI(kind) == 0 => (LoweredPanicBoundsA [kind] x y mem)
+(PanicBounds [kind] x y mem) && boundsABI(kind) == 1 => (LoweredPanicBoundsB [kind] x y mem)
+(PanicBounds [kind] x y mem) && boundsABI(kind) == 2 => (LoweredPanicBoundsC [kind] x y mem)
+
+// ***************************
+// Above: lowering rules
+// Below: optimizations
+// ***************************
+// TODO: Should the optimizations be a separate pass?
+
+// Note: when removing unnecessary sign/zero extensions.
+//
+// After a value is spilled it is restored using a sign- or zero-extension
+// to register-width as appropriate for its type. For example, a uint8 will
+// be restored using a MOVBZ (llgc) instruction which will zero extend the
+// 8-bit value to 64-bits.
+//
+// This is a hazard when folding sign- and zero-extensions since we need to
+// ensure not only that the value in the argument register is correctly
+// extended but also that it will still be correctly extended if it is
+// spilled and restored.
+//
+// In general this means we need type checks when the RHS of a rule is an
+// OpCopy (i.e. "(... x:(...) ...) -> x").
+
+// Merge double extensions.
+(MOV(H|HZ)reg e:(MOV(B|BZ)reg x)) && clobberIfDead(e) => (MOV(B|BZ)reg x)
+(MOV(W|WZ)reg e:(MOV(B|BZ)reg x)) && clobberIfDead(e) => (MOV(B|BZ)reg x)
+(MOV(W|WZ)reg e:(MOV(H|HZ)reg x)) && clobberIfDead(e) => (MOV(H|HZ)reg x)
+
+// Bypass redundant sign extensions.
+(MOV(B|BZ)reg e:(MOVBreg x)) && clobberIfDead(e) => (MOV(B|BZ)reg x)
+(MOV(B|BZ)reg e:(MOVHreg x)) && clobberIfDead(e) => (MOV(B|BZ)reg x)
+(MOV(B|BZ)reg e:(MOVWreg x)) && clobberIfDead(e) => (MOV(B|BZ)reg x)
+(MOV(H|HZ)reg e:(MOVHreg x)) && clobberIfDead(e) => (MOV(H|HZ)reg x)
+(MOV(H|HZ)reg e:(MOVWreg x)) && clobberIfDead(e) => (MOV(H|HZ)reg x)
+(MOV(W|WZ)reg e:(MOVWreg x)) && clobberIfDead(e) => (MOV(W|WZ)reg x)
+
+// Bypass redundant zero extensions.
+(MOV(B|BZ)reg e:(MOVBZreg x)) && clobberIfDead(e) => (MOV(B|BZ)reg x)
+(MOV(B|BZ)reg e:(MOVHZreg x)) && clobberIfDead(e) => (MOV(B|BZ)reg x)
+(MOV(B|BZ)reg e:(MOVWZreg x)) && clobberIfDead(e) => (MOV(B|BZ)reg x)
+(MOV(H|HZ)reg e:(MOVHZreg x)) && clobberIfDead(e) => (MOV(H|HZ)reg x)
+(MOV(H|HZ)reg e:(MOVWZreg x)) && clobberIfDead(e) => (MOV(H|HZ)reg x)
+(MOV(W|WZ)reg e:(MOVWZreg x)) && clobberIfDead(e) => (MOV(W|WZ)reg x)
+
+// Remove zero extensions after zero extending load.
+// Note: take care that if x is spilled it is restored correctly.
+(MOV(B|H|W)Zreg x:(MOVBZload _ _)) && (!x.Type.IsSigned() || x.Type.Size() > 1) => x
+(MOV(H|W)Zreg x:(MOVHZload _ _)) && (!x.Type.IsSigned() || x.Type.Size() > 2) => x
+(MOVWZreg x:(MOVWZload _ _)) && (!x.Type.IsSigned() || x.Type.Size() > 4) => x
+
+// Remove sign extensions after sign extending load.
+// Note: take care that if x is spilled it is restored correctly.
+(MOV(B|H|W)reg x:(MOVBload _ _)) && (x.Type.IsSigned() || x.Type.Size() == 8) => x
+(MOV(H|W)reg x:(MOVHload _ _)) && (x.Type.IsSigned() || x.Type.Size() == 8) => x
+(MOVWreg x:(MOVWload _ _)) && (x.Type.IsSigned() || x.Type.Size() == 8) => x
+
+// Remove sign extensions after zero extending load.
+// These type checks are probably unnecessary but do them anyway just in case.
+(MOV(H|W)reg x:(MOVBZload _ _)) && (!x.Type.IsSigned() || x.Type.Size() > 1) => x
+(MOVWreg x:(MOVHZload _ _)) && (!x.Type.IsSigned() || x.Type.Size() > 2) => x
+
+// Fold sign and zero extensions into loads.
+//
+// Note: The combined instruction must end up in the same block
+// as the original load. If not, we end up making a value with
+// memory type live in two different blocks, which can lead to
+// multiple memory values alive simultaneously.
+//
+// Make sure we don't combine these ops if the load has another use.
+// This prevents a single load from being split into multiple loads
+// which then might return different values. See test/atomicload.go.
+(MOV(B|H|W)Zreg <t> x:(MOV(B|H|W)load [o] {s} p mem))
+ && x.Uses == 1
+ && clobber(x)
+ => @x.Block (MOV(B|H|W)Zload <t> [o] {s} p mem)
+(MOV(B|H|W)reg <t> x:(MOV(B|H|W)Zload [o] {s} p mem))
+ && x.Uses == 1
+ && clobber(x)
+ => @x.Block (MOV(B|H|W)load <t> [o] {s} p mem)
+
+// Remove zero extensions after argument load.
+(MOVBZreg x:(Arg <t>)) && !t.IsSigned() && t.Size() == 1 => x
+(MOVHZreg x:(Arg <t>)) && !t.IsSigned() && t.Size() <= 2 => x
+(MOVWZreg x:(Arg <t>)) && !t.IsSigned() && t.Size() <= 4 => x
+
+// Remove sign extensions after argument load.
+(MOVBreg x:(Arg <t>)) && t.IsSigned() && t.Size() == 1 => x
+(MOVHreg x:(Arg <t>)) && t.IsSigned() && t.Size() <= 2 => x
+(MOVWreg x:(Arg <t>)) && t.IsSigned() && t.Size() <= 4 => x
+
+// Fold zero extensions into constants.
+(MOVBZreg (MOVDconst [c])) => (MOVDconst [int64( uint8(c))])
+(MOVHZreg (MOVDconst [c])) => (MOVDconst [int64(uint16(c))])
+(MOVWZreg (MOVDconst [c])) => (MOVDconst [int64(uint32(c))])
+
+// Fold sign extensions into constants.
+(MOVBreg (MOVDconst [c])) => (MOVDconst [int64( int8(c))])
+(MOVHreg (MOVDconst [c])) => (MOVDconst [int64(int16(c))])
+(MOVWreg (MOVDconst [c])) => (MOVDconst [int64(int32(c))])
+
+// Remove zero extension of conditional move.
+// Note: only for MOVBZreg for now since it is added as part of 'if' statement lowering.
+(MOVBZreg x:(LOCGR (MOVDconst [c]) (MOVDconst [d]) _))
+ && int64(uint8(c)) == c
+ && int64(uint8(d)) == d
+ && (!x.Type.IsSigned() || x.Type.Size() > 1)
+ => x
+
+// Fold boolean tests into blocks.
+// Note: this must match If statement lowering.
+(CLIJ {s390x.LessOrGreater} (LOCGR {d} (MOVDconst [0]) (MOVDconst [x]) cmp) [0] yes no)
+ && int32(x) != 0
+ => (BRC {d} cmp yes no)
+
+// Canonicalize BRC condition code mask by removing impossible conditions.
+// Integer comparisons cannot generate the unordered condition.
+(BRC {c} x:((CMP|CMPW|CMPU|CMPWU) _ _) yes no) && c&s390x.Unordered != 0 => (BRC {c&^s390x.Unordered} x yes no)
+(BRC {c} x:((CMP|CMPW|CMPU|CMPWU)const _) yes no) && c&s390x.Unordered != 0 => (BRC {c&^s390x.Unordered} x yes no)
+
+// Compare-and-branch.
+// Note: bit 3 (unordered) must not be set so we mask out s390x.Unordered.
+(BRC {c} (CMP x y) yes no) => (CGRJ {c&^s390x.Unordered} x y yes no)
+(BRC {c} (CMPW x y) yes no) => (CRJ {c&^s390x.Unordered} x y yes no)
+(BRC {c} (CMPU x y) yes no) => (CLGRJ {c&^s390x.Unordered} x y yes no)
+(BRC {c} (CMPWU x y) yes no) => (CLRJ {c&^s390x.Unordered} x y yes no)
+
+// Compare-and-branch (immediate).
+// Note: bit 3 (unordered) must not be set so we mask out s390x.Unordered.
+(BRC {c} (CMPconst x [y]) yes no) && y == int32( int8(y)) => (CGIJ {c&^s390x.Unordered} x [ int8(y)] yes no)
+(BRC {c} (CMPWconst x [y]) yes no) && y == int32( int8(y)) => (CIJ {c&^s390x.Unordered} x [ int8(y)] yes no)
+(BRC {c} (CMPUconst x [y]) yes no) && y == int32(uint8(y)) => (CLGIJ {c&^s390x.Unordered} x [uint8(y)] yes no)
+(BRC {c} (CMPWUconst x [y]) yes no) && y == int32(uint8(y)) => (CLIJ {c&^s390x.Unordered} x [uint8(y)] yes no)
+
+// Absorb immediate into compare-and-branch.
+(C(R|GR)J {c} x (MOVDconst [y]) yes no) && is8Bit(y) => (C(I|GI)J {c} x [ int8(y)] yes no)
+(CL(R|GR)J {c} x (MOVDconst [y]) yes no) && isU8Bit(y) => (CL(I|GI)J {c} x [uint8(y)] yes no)
+(C(R|GR)J {c} (MOVDconst [x]) y yes no) && is8Bit(x) => (C(I|GI)J {c.ReverseComparison()} y [ int8(x)] yes no)
+(CL(R|GR)J {c} (MOVDconst [x]) y yes no) && isU8Bit(x) => (CL(I|GI)J {c.ReverseComparison()} y [uint8(x)] yes no)
+
+// Prefer comparison with immediate to compare-and-branch.
+(CGRJ {c} x (MOVDconst [y]) yes no) && !is8Bit(y) && is32Bit(y) => (BRC {c} (CMPconst x [int32(y)]) yes no)
+(CRJ {c} x (MOVDconst [y]) yes no) && !is8Bit(y) && is32Bit(y) => (BRC {c} (CMPWconst x [int32(y)]) yes no)
+(CLGRJ {c} x (MOVDconst [y]) yes no) && !isU8Bit(y) && isU32Bit(y) => (BRC {c} (CMPUconst x [int32(y)]) yes no)
+(CLRJ {c} x (MOVDconst [y]) yes no) && !isU8Bit(y) && isU32Bit(y) => (BRC {c} (CMPWUconst x [int32(y)]) yes no)
+(CGRJ {c} (MOVDconst [x]) y yes no) && !is8Bit(x) && is32Bit(x) => (BRC {c.ReverseComparison()} (CMPconst y [int32(x)]) yes no)
+(CRJ {c} (MOVDconst [x]) y yes no) && !is8Bit(x) && is32Bit(x) => (BRC {c.ReverseComparison()} (CMPWconst y [int32(x)]) yes no)
+(CLGRJ {c} (MOVDconst [x]) y yes no) && !isU8Bit(x) && isU32Bit(x) => (BRC {c.ReverseComparison()} (CMPUconst y [int32(x)]) yes no)
+(CLRJ {c} (MOVDconst [x]) y yes no) && !isU8Bit(x) && isU32Bit(x) => (BRC {c.ReverseComparison()} (CMPWUconst y [int32(x)]) yes no)
+
+// Absorb sign/zero extensions into 32-bit compare-and-branch.
+(CIJ {c} (MOV(W|WZ)reg x) [y] yes no) => (CIJ {c} x [y] yes no)
+(CLIJ {c} (MOV(W|WZ)reg x) [y] yes no) => (CLIJ {c} x [y] yes no)
+
+// Bring out-of-range signed immediates into range by varying branch condition.
+(BRC {s390x.Less} (CMPconst x [ 128]) yes no) => (CGIJ {s390x.LessOrEqual} x [ 127] yes no)
+(BRC {s390x.Less} (CMPWconst x [ 128]) yes no) => (CIJ {s390x.LessOrEqual} x [ 127] yes no)
+(BRC {s390x.LessOrEqual} (CMPconst x [-129]) yes no) => (CGIJ {s390x.Less} x [-128] yes no)
+(BRC {s390x.LessOrEqual} (CMPWconst x [-129]) yes no) => (CIJ {s390x.Less} x [-128] yes no)
+(BRC {s390x.Greater} (CMPconst x [-129]) yes no) => (CGIJ {s390x.GreaterOrEqual} x [-128] yes no)
+(BRC {s390x.Greater} (CMPWconst x [-129]) yes no) => (CIJ {s390x.GreaterOrEqual} x [-128] yes no)
+(BRC {s390x.GreaterOrEqual} (CMPconst x [ 128]) yes no) => (CGIJ {s390x.Greater} x [ 127] yes no)
+(BRC {s390x.GreaterOrEqual} (CMPWconst x [ 128]) yes no) => (CIJ {s390x.Greater} x [ 127] yes no)
+
+// Bring out-of-range unsigned immediates into range by varying branch condition.
+(BRC {s390x.Less} (CMP(WU|U)const x [256]) yes no) => (C(L|LG)IJ {s390x.LessOrEqual} x [255] yes no)
+(BRC {s390x.GreaterOrEqual} (CMP(WU|U)const x [256]) yes no) => (C(L|LG)IJ {s390x.Greater} x [255] yes no)
+
+// Bring out-of-range immediates into range by switching signedness (only == and !=).
+(BRC {c} (CMPconst x [y]) yes no) && y == int32(uint8(y)) && (c == s390x.Equal || c == s390x.LessOrGreater) => (CLGIJ {c} x [uint8(y)] yes no)
+(BRC {c} (CMPWconst x [y]) yes no) && y == int32(uint8(y)) && (c == s390x.Equal || c == s390x.LessOrGreater) => (CLIJ {c} x [uint8(y)] yes no)
+(BRC {c} (CMPUconst x [y]) yes no) && y == int32( int8(y)) && (c == s390x.Equal || c == s390x.LessOrGreater) => (CGIJ {c} x [ int8(y)] yes no)
+(BRC {c} (CMPWUconst x [y]) yes no) && y == int32( int8(y)) && (c == s390x.Equal || c == s390x.LessOrGreater) => (CIJ {c} x [ int8(y)] yes no)
+
+// Fold constants into instructions.
+(ADD x (MOVDconst [c])) && is32Bit(c) => (ADDconst [int32(c)] x)
+(ADDW x (MOVDconst [c])) => (ADDWconst [int32(c)] x)
+
+(SUB x (MOVDconst [c])) && is32Bit(c) => (SUBconst x [int32(c)])
+(SUB (MOVDconst [c]) x) && is32Bit(c) => (NEG (SUBconst <v.Type> x [int32(c)]))
+(SUBW x (MOVDconst [c])) => (SUBWconst x [int32(c)])
+(SUBW (MOVDconst [c]) x) => (NEGW (SUBWconst <v.Type> x [int32(c)]))
+
+(MULLD x (MOVDconst [c])) && is32Bit(c) => (MULLDconst [int32(c)] x)
+(MULLW x (MOVDconst [c])) => (MULLWconst [int32(c)] x)
+
+// NILF instructions leave the high 32 bits unchanged which is
+// equivalent to the leftmost 32 bits being set.
+// TODO(mundaym): modify the assembler to accept 64-bit values
+// and use isU32Bit(^c).
+(AND x (MOVDconst [c]))
+ && s390x.NewRotateParams(0, 63, 0).OutMerge(uint64(c)) != nil
+ => (RISBGZ x {*s390x.NewRotateParams(0, 63, 0).OutMerge(uint64(c))})
+(AND x (MOVDconst [c]))
+ && is32Bit(c)
+ && c < 0
+ => (ANDconst [c] x)
+(AND x (MOVDconst [c]))
+ && is32Bit(c)
+ && c >= 0
+ => (MOVWZreg (ANDWconst <typ.UInt32> [int32(c)] x))
+
+(ANDW x (MOVDconst [c])) => (ANDWconst [int32(c)] x)
+
+((AND|ANDW)const [c] ((AND|ANDW)const [d] x)) => ((AND|ANDW)const [c&d] x)
+
+((OR|XOR) x (MOVDconst [c])) && isU32Bit(c) => ((OR|XOR)const [c] x)
+((OR|XOR)W x (MOVDconst [c])) => ((OR|XOR)Wconst [int32(c)] x)
+
+// Constant shifts.
+(S(LD|RD|RAD) x (MOVDconst [c])) => (S(LD|RD|RAD)const x [uint8(c&63)])
+(S(LW|RW|RAW) x (MOVDconst [c])) && c&32 == 0 => (S(LW|RW|RAW)const x [uint8(c&31)])
+(S(LW|RW) _ (MOVDconst [c])) && c&32 != 0 => (MOVDconst [0])
+(SRAW x (MOVDconst [c])) && c&32 != 0 => (SRAWconst x [31])
+
+// Shifts only use the rightmost 6 bits of the shift value.
+(S(LD|RD|RAD|LW|RW|RAW) x (RISBGZ y {r}))
+ && r.Amount == 0
+ && r.OutMask()&63 == 63
+ => (S(LD|RD|RAD|LW|RW|RAW) x y)
+(S(LD|RD|RAD|LW|RW|RAW) x (AND (MOVDconst [c]) y))
+ => (S(LD|RD|RAD|LW|RW|RAW) x (ANDWconst <typ.UInt32> [int32(c&63)] y))
+(S(LD|RD|RAD|LW|RW|RAW) x (ANDWconst [c] y)) && c&63 == 63
+ => (S(LD|RD|RAD|LW|RW|RAW) x y)
+(SLD x (MOV(W|H|B|WZ|HZ|BZ)reg y)) => (SLD x y)
+(SRD x (MOV(W|H|B|WZ|HZ|BZ)reg y)) => (SRD x y)
+(SRAD x (MOV(W|H|B|WZ|HZ|BZ)reg y)) => (SRAD x y)
+(SLW x (MOV(W|H|B|WZ|HZ|BZ)reg y)) => (SLW x y)
+(SRW x (MOV(W|H|B|WZ|HZ|BZ)reg y)) => (SRW x y)
+(SRAW x (MOV(W|H|B|WZ|HZ|BZ)reg y)) => (SRAW x y)
+
+// Match rotate by constant.
+(RLLG x (MOVDconst [c])) => (RISBGZ x {s390x.NewRotateParams(0, 63, uint8(c&63))})
+(RLL x (MOVDconst [c])) => (RLLconst x [uint8(c&31)])
+
+// Match rotate by constant pattern.
+((ADD|OR|XOR) (SLDconst x [c]) (SRDconst x [64-c])) => (RISBGZ x {s390x.NewRotateParams(0, 63, c)})
+((ADD|OR|XOR)W (SLWconst x [c]) (SRWconst x [32-c])) => (RLLconst x [c])
+
+// Signed 64-bit comparison with immediate.
+(CMP x (MOVDconst [c])) && is32Bit(c) => (CMPconst x [int32(c)])
+(CMP (MOVDconst [c]) x) && is32Bit(c) => (InvertFlags (CMPconst x [int32(c)]))
+
+// Unsigned 64-bit comparison with immediate.
+(CMPU x (MOVDconst [c])) && isU32Bit(c) => (CMPUconst x [int32(c)])
+(CMPU (MOVDconst [c]) x) && isU32Bit(c) => (InvertFlags (CMPUconst x [int32(c)]))
+
+// Signed and unsigned 32-bit comparison with immediate.
+(CMP(W|WU) x (MOVDconst [c])) => (CMP(W|WU)const x [int32(c)])
+(CMP(W|WU) (MOVDconst [c]) x) => (InvertFlags (CMP(W|WU)const x [int32(c)]))
+
+// Match (x >> c) << d to 'rotate then insert selected bits [into zero]'.
+(SLDconst (SRDconst x [c]) [d]) => (RISBGZ x {s390x.NewRotateParams(uint8(max8(0, int8(c-d))), 63-d, uint8(int8(d-c)&63))})
+
+// Match (x << c) >> d to 'rotate then insert selected bits [into zero]'.
+(SRDconst (SLDconst x [c]) [d]) => (RISBGZ x {s390x.NewRotateParams(d, uint8(min8(63, int8(63-c+d))), uint8(int8(c-d)&63))})
+
+// Absorb input zero extension into 'rotate then insert selected bits [into zero]'.
+(RISBGZ (MOVWZreg x) {r}) && r.InMerge(0xffffffff) != nil => (RISBGZ x {*r.InMerge(0xffffffff)})
+(RISBGZ (MOVHZreg x) {r}) && r.InMerge(0x0000ffff) != nil => (RISBGZ x {*r.InMerge(0x0000ffff)})
+(RISBGZ (MOVBZreg x) {r}) && r.InMerge(0x000000ff) != nil => (RISBGZ x {*r.InMerge(0x000000ff)})
+
+// Absorb 'rotate then insert selected bits [into zero]' into zero extension.
+(MOVWZreg (RISBGZ x {r})) && r.OutMerge(0xffffffff) != nil => (RISBGZ x {*r.OutMerge(0xffffffff)})
+(MOVHZreg (RISBGZ x {r})) && r.OutMerge(0x0000ffff) != nil => (RISBGZ x {*r.OutMerge(0x0000ffff)})
+(MOVBZreg (RISBGZ x {r})) && r.OutMerge(0x000000ff) != nil => (RISBGZ x {*r.OutMerge(0x000000ff)})
+
+// Absorb shift into 'rotate then insert selected bits [into zero]'.
+//
+// Any unsigned shift can be represented as a rotate and mask operation:
+//
+// x << c => RotateLeft64(x, c) & (^uint64(0) << c)
+// x >> c => RotateLeft64(x, -c) & (^uint64(0) >> c)
+//
+// Therefore when a shift is used as the input to a rotate then insert
+// selected bits instruction we can merge the two together. We just have
+// to be careful that the resultant mask is representable (non-zero and
+// contiguous). For example, assuming that x is variable and c, y and m
+// are constants, a shift followed by a rotate then insert selected bits
+// could be represented as:
+//
+// RotateLeft64(RotateLeft64(x, c) & (^uint64(0) << c), y) & m
+//
+// We can split the rotation by y into two, one rotate for x and one for
+// the mask:
+//
+// RotateLeft64(RotateLeft64(x, c), y) & (RotateLeft64(^uint64(0) << c, y)) & m
+//
+// The rotations of x by c followed by y can then be combined:
+//
+// RotateLeft64(x, c+y) & (RotateLeft64(^uint64(0) << c, y)) & m
+// ^^^^^^^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+// rotate mask
+//
+// To perform this optimization we therefore just need to check that it
+// is valid to merge the shift mask (^(uint64(0)<<c)) into the selected
+// bits mask (i.e. that the resultant mask is non-zero and contiguous).
+//
+(RISBGZ (SLDconst x [c]) {r}) && r.InMerge(^uint64(0)<<c) != nil => (RISBGZ x {(*r.InMerge(^uint64(0)<<c)).RotateLeft(c)})
+(RISBGZ (SRDconst x [c]) {r}) && r.InMerge(^uint64(0)>>c) != nil => (RISBGZ x {(*r.InMerge(^uint64(0)>>c)).RotateLeft(-c)})
+
+// Absorb 'rotate then insert selected bits [into zero]' into left shift.
+(SLDconst (RISBGZ x {r}) [c])
+ && s390x.NewRotateParams(0, 63-c, c).InMerge(r.OutMask()) != nil
+ => (RISBGZ x {(*s390x.NewRotateParams(0, 63-c, c).InMerge(r.OutMask())).RotateLeft(r.Amount)})
+
+// Absorb 'rotate then insert selected bits [into zero]' into right shift.
+(SRDconst (RISBGZ x {r}) [c])
+ && s390x.NewRotateParams(c, 63, -c&63).InMerge(r.OutMask()) != nil
+ => (RISBGZ x {(*s390x.NewRotateParams(c, 63, -c&63).InMerge(r.OutMask())).RotateLeft(r.Amount)})
+
+// Merge 'rotate then insert selected bits [into zero]' instructions together.
+(RISBGZ (RISBGZ x {y}) {z})
+ && z.InMerge(y.OutMask()) != nil
+ => (RISBGZ x {(*z.InMerge(y.OutMask())).RotateLeft(y.Amount)})
+
+// Convert RISBGZ into 64-bit shift (helps CSE).
+(RISBGZ x {r}) && r.End == 63 && r.Start == -r.Amount&63 => (SRDconst x [-r.Amount&63])
+(RISBGZ x {r}) && r.Start == 0 && r.End == 63-r.Amount => (SLDconst x [r.Amount])
+
+// Optimize single bit isolation when it is known to be equivalent to
+// the most significant bit due to mask produced by arithmetic shift.
+// Simply isolate the most significant bit itself and place it in the
+// correct position.
+//
+// Example: (int64(x) >> 63) & 0x8 -> RISBGZ $60, $60, $4, Rsrc, Rdst
+(RISBGZ (SRADconst x [c]) {r})
+ && r.Start == r.End // single bit selected
+ && (r.Start+r.Amount)&63 <= c // equivalent to most significant bit of x
+ => (RISBGZ x {s390x.NewRotateParams(r.Start, r.Start, -r.Start&63)})
+
+// Canonicalize the order of arguments to comparisons - helps with CSE.
+((CMP|CMPW|CMPU|CMPWU) x y) && canonLessThan(x,y) => (InvertFlags ((CMP|CMPW|CMPU|CMPWU) y x))
+
+// Use sign/zero extend instead of RISBGZ.
+(RISBGZ x {r}) && r == s390x.NewRotateParams(56, 63, 0) => (MOVBZreg x)
+(RISBGZ x {r}) && r == s390x.NewRotateParams(48, 63, 0) => (MOVHZreg x)
+(RISBGZ x {r}) && r == s390x.NewRotateParams(32, 63, 0) => (MOVWZreg x)
+
+// Use sign/zero extend instead of ANDW.
+(ANDWconst [0x00ff] x) => (MOVBZreg x)
+(ANDWconst [0xffff] x) => (MOVHZreg x)
+
+// Strength reduce multiplication to the sum (or difference) of two powers of two.
+//
+// Examples:
+// 5x -> 4x + 1x
+// 10x -> 8x + 2x
+// 120x -> 128x - 8x
+// -120x -> 8x - 128x
+//
+// We know that the rightmost bit of any positive value, once isolated, must either
+// be a power of 2 (because it is a single bit) or 0 (if the original value is 0).
+// In all of these rules we use a rightmost bit calculation to determine one operand
+// for the addition or subtraction. We then just need to calculate if the other
+// operand is a valid power of 2 before we can match the rule.
+//
+// Notes:
+// - the generic rules have already matched single powers of two so we ignore them here
+// - isPowerOfTwo32 asserts that its argument is greater than 0
+// - c&(c-1) = clear rightmost bit
+// - c&^(c-1) = isolate rightmost bit
+
+// c = 2ˣ + 2ʸ => c - 2ˣ = 2ʸ
+(MULL(D|W)const <t> x [c]) && isPowerOfTwo32(c&(c-1))
+ => ((ADD|ADDW) (SL(D|W)const <t> x [uint8(log32(c&(c-1)))])
+ (SL(D|W)const <t> x [uint8(log32(c&^(c-1)))]))
+
+// c = 2ʸ - 2ˣ => c + 2ˣ = 2ʸ
+(MULL(D|W)const <t> x [c]) && isPowerOfTwo32(c+(c&^(c-1)))
+ => ((SUB|SUBW) (SL(D|W)const <t> x [uint8(log32(c+(c&^(c-1))))])
+ (SL(D|W)const <t> x [uint8(log32(c&^(c-1)))]))
+
+// c = 2ˣ - 2ʸ => -c + 2ˣ = 2ʸ
+(MULL(D|W)const <t> x [c]) && isPowerOfTwo32(-c+(-c&^(-c-1)))
+ => ((SUB|SUBW) (SL(D|W)const <t> x [uint8(log32(-c&^(-c-1)))])
+ (SL(D|W)const <t> x [uint8(log32(-c+(-c&^(-c-1))))]))
+
+// Fold ADD into MOVDaddr. Odd offsets from SB shouldn't be folded (LARL can't handle them).
+(ADDconst [c] (MOVDaddr [d] {s} x:(SB))) && ((c+d)&1 == 0) && is32Bit(int64(c)+int64(d)) => (MOVDaddr [c+d] {s} x)
+(ADDconst [c] (MOVDaddr [d] {s} x)) && x.Op != OpSB && is20Bit(int64(c)+int64(d)) => (MOVDaddr [c+d] {s} x)
+(ADD idx (MOVDaddr [c] {s} ptr)) && ptr.Op != OpSB => (MOVDaddridx [c] {s} ptr idx)
+
+// fold ADDconst into MOVDaddrx
+(ADDconst [c] (MOVDaddridx [d] {s} x y)) && is20Bit(int64(c)+int64(d)) => (MOVDaddridx [c+d] {s} x y)
+(MOVDaddridx [c] {s} (ADDconst [d] x) y) && is20Bit(int64(c)+int64(d)) => (MOVDaddridx [c+d] {s} x y)
+(MOVDaddridx [c] {s} x (ADDconst [d] y)) && is20Bit(int64(c)+int64(d)) => (MOVDaddridx [c+d] {s} x y)
+
+// reverse ordering of compare instruction
+(LOCGR {c} x y (InvertFlags cmp)) => (LOCGR {c.ReverseComparison()} x y cmp)
+
+// replace load from same location as preceding store with copy
+(MOVDload [off] {sym} ptr1 (MOVDstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => x
+(MOVWload [off] {sym} ptr1 (MOVWstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => (MOVWreg x)
+(MOVHload [off] {sym} ptr1 (MOVHstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => (MOVHreg x)
+(MOVBload [off] {sym} ptr1 (MOVBstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => (MOVBreg x)
+(MOVWZload [off] {sym} ptr1 (MOVWstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => (MOVWZreg x)
+(MOVHZload [off] {sym} ptr1 (MOVHstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => (MOVHZreg x)
+(MOVBZload [off] {sym} ptr1 (MOVBstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => (MOVBZreg x)
+(MOVDload [off] {sym} ptr1 (FMOVDstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => (LGDR x)
+(FMOVDload [off] {sym} ptr1 (MOVDstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => (LDGR x)
+(FMOVDload [off] {sym} ptr1 (FMOVDstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => x
+(FMOVSload [off] {sym} ptr1 (FMOVSstore [off] {sym} ptr2 x _)) && isSamePtr(ptr1, ptr2) => x
+
+// prefer FPR <-> GPR moves over combined load ops
+(MULLDload <t> [off] {sym} x ptr1 (FMOVDstore [off] {sym} ptr2 y _)) && isSamePtr(ptr1, ptr2) => (MULLD x (LGDR <t> y))
+(ADDload <t> [off] {sym} x ptr1 (FMOVDstore [off] {sym} ptr2 y _)) && isSamePtr(ptr1, ptr2) => (ADD x (LGDR <t> y))
+(SUBload <t> [off] {sym} x ptr1 (FMOVDstore [off] {sym} ptr2 y _)) && isSamePtr(ptr1, ptr2) => (SUB x (LGDR <t> y))
+(ORload <t> [off] {sym} x ptr1 (FMOVDstore [off] {sym} ptr2 y _)) && isSamePtr(ptr1, ptr2) => (OR x (LGDR <t> y))
+(ANDload <t> [off] {sym} x ptr1 (FMOVDstore [off] {sym} ptr2 y _)) && isSamePtr(ptr1, ptr2) => (AND x (LGDR <t> y))
+(XORload <t> [off] {sym} x ptr1 (FMOVDstore [off] {sym} ptr2 y _)) && isSamePtr(ptr1, ptr2) => (XOR x (LGDR <t> y))
+
+// detect attempts to set/clear the sign bit
+// may need to be reworked when NIHH/OIHH are added
+(RISBGZ (LGDR <t> x) {r}) && r == s390x.NewRotateParams(1, 63, 0) => (LGDR <t> (LPDFR <x.Type> x))
+(LDGR <t> (RISBGZ x {r})) && r == s390x.NewRotateParams(1, 63, 0) => (LPDFR (LDGR <t> x))
+(OR (MOVDconst [-1<<63]) (LGDR <t> x)) => (LGDR <t> (LNDFR <x.Type> x))
+(LDGR <t> (OR (MOVDconst [-1<<63]) x)) => (LNDFR (LDGR <t> x))
+
+// detect attempts to set the sign bit with load
+(LDGR <t> x:(ORload <t1> [off] {sym} (MOVDconst [-1<<63]) ptr mem)) && x.Uses == 1 && clobber(x) => @x.Block (LNDFR <t> (LDGR <t> (MOVDload <t1> [off] {sym} ptr mem)))
+
+// detect copysign
+(OR (RISBGZ (LGDR x) {r}) (LGDR (LPDFR <t> y)))
+ && r == s390x.NewRotateParams(0, 0, 0)
+ => (LGDR (CPSDR <t> y x))
+(OR (RISBGZ (LGDR x) {r}) (MOVDconst [c]))
+ && c >= 0
+ && r == s390x.NewRotateParams(0, 0, 0)
+ => (LGDR (CPSDR <x.Type> (FMOVDconst <x.Type> [math.Float64frombits(uint64(c))]) x))
+(CPSDR y (FMOVDconst [c])) && !math.Signbit(c) => (LPDFR y)
+(CPSDR y (FMOVDconst [c])) && math.Signbit(c) => (LNDFR y)
+
+// absorb negations into set/clear sign bit
+(FNEG (LPDFR x)) => (LNDFR x)
+(FNEG (LNDFR x)) => (LPDFR x)
+(FNEGS (LPDFR x)) => (LNDFR x)
+(FNEGS (LNDFR x)) => (LPDFR x)
+
+// no need to convert float32 to float64 to set/clear sign bit
+(LEDBR (LPDFR (LDEBR x))) => (LPDFR x)
+(LEDBR (LNDFR (LDEBR x))) => (LNDFR x)
+
+// remove unnecessary FPR <-> GPR moves
+(LDGR (LGDR x)) => x
+(LGDR (LDGR x)) => x
+
+// Don't extend before storing
+(MOVWstore [off] {sym} ptr (MOVWreg x) mem) => (MOVWstore [off] {sym} ptr x mem)
+(MOVHstore [off] {sym} ptr (MOVHreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVBreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+(MOVWstore [off] {sym} ptr (MOVWZreg x) mem) => (MOVWstore [off] {sym} ptr x mem)
+(MOVHstore [off] {sym} ptr (MOVHZreg x) mem) => (MOVHstore [off] {sym} ptr x mem)
+(MOVBstore [off] {sym} ptr (MOVBZreg x) mem) => (MOVBstore [off] {sym} ptr x mem)
+
+// Fold constants into memory operations.
+// Note that this is not always a good idea because if not all the uses of
+// the ADDconst get eliminated, we still have to compute the ADDconst and we now
+// have potentially two live values (ptr and (ADDconst [off] ptr)) instead of one.
+// Nevertheless, let's do it!
+(MOVDload [off1] {sym} (ADDconst [off2] ptr) mem) && is20Bit(int64(off1)+int64(off2)) => (MOVDload [off1+off2] {sym} ptr mem)
+(MOVWload [off1] {sym} (ADDconst [off2] ptr) mem) && is20Bit(int64(off1)+int64(off2)) => (MOVWload [off1+off2] {sym} ptr mem)
+(MOVHload [off1] {sym} (ADDconst [off2] ptr) mem) && is20Bit(int64(off1)+int64(off2)) => (MOVHload [off1+off2] {sym} ptr mem)
+(MOVBload [off1] {sym} (ADDconst [off2] ptr) mem) && is20Bit(int64(off1)+int64(off2)) => (MOVBload [off1+off2] {sym} ptr mem)
+(MOVWZload [off1] {sym} (ADDconst [off2] ptr) mem) && is20Bit(int64(off1)+int64(off2)) => (MOVWZload [off1+off2] {sym} ptr mem)
+(MOVHZload [off1] {sym} (ADDconst [off2] ptr) mem) && is20Bit(int64(off1)+int64(off2)) => (MOVHZload [off1+off2] {sym} ptr mem)
+(MOVBZload [off1] {sym} (ADDconst [off2] ptr) mem) && is20Bit(int64(off1)+int64(off2)) => (MOVBZload [off1+off2] {sym} ptr mem)
+(FMOVSload [off1] {sym} (ADDconst [off2] ptr) mem) && is20Bit(int64(off1)+int64(off2)) => (FMOVSload [off1+off2] {sym} ptr mem)
+(FMOVDload [off1] {sym} (ADDconst [off2] ptr) mem) && is20Bit(int64(off1)+int64(off2)) => (FMOVDload [off1+off2] {sym} ptr mem)
+
+(MOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is20Bit(int64(off1)+int64(off2)) => (MOVDstore [off1+off2] {sym} ptr val mem)
+(MOVWstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is20Bit(int64(off1)+int64(off2)) => (MOVWstore [off1+off2] {sym} ptr val mem)
+(MOVHstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is20Bit(int64(off1)+int64(off2)) => (MOVHstore [off1+off2] {sym} ptr val mem)
+(MOVBstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is20Bit(int64(off1)+int64(off2)) => (MOVBstore [off1+off2] {sym} ptr val mem)
+(FMOVSstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is20Bit(int64(off1)+int64(off2)) => (FMOVSstore [off1+off2] {sym} ptr val mem)
+(FMOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem) && is20Bit(int64(off1)+int64(off2)) => (FMOVDstore [off1+off2] {sym} ptr val mem)
+
+(ADDload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) => (ADDload [off1+off2] {sym} x ptr mem)
+(ADDWload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) => (ADDWload [off1+off2] {sym} x ptr mem)
+(MULLDload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) => (MULLDload [off1+off2] {sym} x ptr mem)
+(MULLWload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) => (MULLWload [off1+off2] {sym} x ptr mem)
+(SUBload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) => (SUBload [off1+off2] {sym} x ptr mem)
+(SUBWload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) => (SUBWload [off1+off2] {sym} x ptr mem)
+
+(ANDload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) => (ANDload [off1+off2] {sym} x ptr mem)
+(ANDWload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) => (ANDWload [off1+off2] {sym} x ptr mem)
+(ORload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) => (ORload [off1+off2] {sym} x ptr mem)
+(ORWload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) => (ORWload [off1+off2] {sym} x ptr mem)
+(XORload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) => (XORload [off1+off2] {sym} x ptr mem)
+(XORWload [off1] {sym} x (ADDconst [off2] ptr) mem) && ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2)) => (XORWload [off1+off2] {sym} x ptr mem)
+
+// Fold constants into stores.
+(MOVDstore [off] {sym} ptr (MOVDconst [c]) mem) && is16Bit(c) && isU12Bit(int64(off)) && ptr.Op != OpSB =>
+ (MOVDstoreconst [makeValAndOff(int32(c),off)] {sym} ptr mem)
+(MOVWstore [off] {sym} ptr (MOVDconst [c]) mem) && is16Bit(c) && isU12Bit(int64(off)) && ptr.Op != OpSB =>
+ (MOVWstoreconst [makeValAndOff(int32(c),off)] {sym} ptr mem)
+(MOVHstore [off] {sym} ptr (MOVDconst [c]) mem) && isU12Bit(int64(off)) && ptr.Op != OpSB =>
+ (MOVHstoreconst [makeValAndOff(int32(int16(c)),off)] {sym} ptr mem)
+(MOVBstore [off] {sym} ptr (MOVDconst [c]) mem) && is20Bit(int64(off)) && ptr.Op != OpSB =>
+ (MOVBstoreconst [makeValAndOff(int32(int8(c)),off)] {sym} ptr mem)
+
+// Fold address offsets into constant stores.
+(MOVDstoreconst [sc] {s} (ADDconst [off] ptr) mem) && isU12Bit(sc.Off64()+int64(off)) =>
+ (MOVDstoreconst [sc.addOffset32(off)] {s} ptr mem)
+(MOVWstoreconst [sc] {s} (ADDconst [off] ptr) mem) && isU12Bit(sc.Off64()+int64(off)) =>
+ (MOVWstoreconst [sc.addOffset32(off)] {s} ptr mem)
+(MOVHstoreconst [sc] {s} (ADDconst [off] ptr) mem) && isU12Bit(sc.Off64()+int64(off)) =>
+ (MOVHstoreconst [sc.addOffset32(off)] {s} ptr mem)
+(MOVBstoreconst [sc] {s} (ADDconst [off] ptr) mem) && is20Bit(sc.Off64()+int64(off)) =>
+ (MOVBstoreconst [sc.addOffset32(off)] {s} ptr mem)
+
+// Merge address calculations into loads and stores.
+// Offsets from SB must not be merged into unaligned memory accesses because
+// loads/stores using PC-relative addressing directly must be aligned to the
+// size of the target.
+(MOVDload [off1] {sym1} (MOVDaddr <t> [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%8 == 0 && (off1+off2)%8 == 0)) =>
+ (MOVDload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+(MOVWZload [off1] {sym1} (MOVDaddr <t> [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%4 == 0 && (off1+off2)%4 == 0)) =>
+ (MOVWZload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+(MOVHZload [off1] {sym1} (MOVDaddr <t> [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%2 == 0 && (off1+off2)%2 == 0)) =>
+ (MOVHZload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+(MOVBZload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (MOVBZload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+(FMOVSload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (FMOVSload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+(FMOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (FMOVDload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+
+(MOVWload [off1] {sym1} (MOVDaddr <t> [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%4 == 0 && (off1+off2)%4 == 0)) =>
+ (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+(MOVHload [off1] {sym1} (MOVDaddr <t> [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%2 == 0 && (off1+off2)%2 == 0)) =>
+ (MOVHload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+(MOVBload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+
+(MOVDstore [off1] {sym1} (MOVDaddr <t> [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%8 == 0 && (off1+off2)%8 == 0)) =>
+ (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+(MOVWstore [off1] {sym1} (MOVDaddr <t> [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%4 == 0 && (off1+off2)%4 == 0)) =>
+ (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+(MOVHstore [off1] {sym1} (MOVDaddr <t> [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%2 == 0 && (off1+off2)%2 == 0)) =>
+ (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+(MOVBstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+(FMOVSstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (FMOVSstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+(FMOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+ (FMOVDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+
+(ADDload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (ADDload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
+(ADDWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (ADDWload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
+(MULLDload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (MULLDload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
+(MULLWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (MULLWload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
+(SUBload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (SUBload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
+(SUBWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (SUBWload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
+
+(ANDload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (ANDload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
+(ANDWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (ANDWload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
+(ORload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (ORload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
+(ORWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (ORWload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
+(XORload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (XORload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
+(XORWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem) && ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2) => (XORWload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
+
+// Cannot store constant to SB directly (no 'move relative long immediate' instructions).
+(MOVDstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem) && ptr.Op != OpSB && canMergeSym(sym1, sym2) && sc.canAdd32(off) =>
+ (MOVDstoreconst [sc.addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
+(MOVWstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem) && ptr.Op != OpSB && canMergeSym(sym1, sym2) && sc.canAdd32(off) =>
+ (MOVWstoreconst [sc.addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
+(MOVHstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem) && ptr.Op != OpSB && canMergeSym(sym1, sym2) && sc.canAdd32(off) =>
+ (MOVHstoreconst [sc.addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
+(MOVBstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem) && ptr.Op != OpSB && canMergeSym(sym1, sym2) && sc.canAdd32(off) =>
+ (MOVBstoreconst [sc.addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
+
+// MOVDaddr into MOVDaddridx
+(MOVDaddridx [off1] {sym1} (MOVDaddr [off2] {sym2} x) y) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB =>
+ (MOVDaddridx [off1+off2] {mergeSym(sym1,sym2)} x y)
+(MOVDaddridx [off1] {sym1} x (MOVDaddr [off2] {sym2} y)) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && y.Op != OpSB =>
+ (MOVDaddridx [off1+off2] {mergeSym(sym1,sym2)} x y)
+
+// Absorb InvertFlags into branches.
+(BRC {c} (InvertFlags cmp) yes no) => (BRC {c.ReverseComparison()} cmp yes no)
+
+// Constant comparisons.
+(CMPconst (MOVDconst [x]) [y]) && x==int64(y) => (FlagEQ)
+(CMPconst (MOVDconst [x]) [y]) && x<int64(y) => (FlagLT)
+(CMPconst (MOVDconst [x]) [y]) && x>int64(y) => (FlagGT)
+(CMPUconst (MOVDconst [x]) [y]) && uint64(x)==uint64(y) => (FlagEQ)
+(CMPUconst (MOVDconst [x]) [y]) && uint64(x)<uint64(y) => (FlagLT)
+(CMPUconst (MOVDconst [x]) [y]) && uint64(x)>uint64(y) => (FlagGT)
+
+(CMPWconst (MOVDconst [x]) [y]) && int32(x)==int32(y) => (FlagEQ)
+(CMPWconst (MOVDconst [x]) [y]) && int32(x)<int32(y) => (FlagLT)
+(CMPWconst (MOVDconst [x]) [y]) && int32(x)>int32(y) => (FlagGT)
+(CMPWUconst (MOVDconst [x]) [y]) && uint32(x)==uint32(y) => (FlagEQ)
+(CMPWUconst (MOVDconst [x]) [y]) && uint32(x)<uint32(y) => (FlagLT)
+(CMPWUconst (MOVDconst [x]) [y]) && uint32(x)>uint32(y) => (FlagGT)
+
+(CMP(W|WU)const (MOVBZreg _) [c]) && 0xff < c => (FlagLT)
+(CMP(W|WU)const (MOVHZreg _) [c]) && 0xffff < c => (FlagLT)
+
+(CMPconst (SRDconst _ [c]) [n]) && c > 0 && n < 0 => (FlagGT)
+(CMPWconst (SRWconst _ [c]) [n]) && c > 0 && n < 0 => (FlagGT)
+
+(CMPUconst (SRDconst _ [c]) [n]) && c > 0 && c < 64 && (1<<uint(64-c)) <= uint64(n) => (FlagLT)
+(CMPWUconst (SRWconst _ [c]) [n]) && c > 0 && c < 32 && (1<<uint(32-c)) <= uint32(n) => (FlagLT)
+
+(CMPWconst (ANDWconst _ [m]) [n]) && int32(m) >= 0 && int32(m) < int32(n) => (FlagLT)
+(CMPWUconst (ANDWconst _ [m]) [n]) && uint32(m) < uint32(n) => (FlagLT)
+
+(CMPconst (RISBGZ x {r}) [c]) && c > 0 && r.OutMask() < uint64(c) => (FlagLT)
+(CMPUconst (RISBGZ x {r}) [c]) && r.OutMask() < uint64(uint32(c)) => (FlagLT)
+
+// Constant compare-and-branch with immediate.
+(CGIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Equal != 0 && int64(x) == int64(y) => (First yes no)
+(CGIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Less != 0 && int64(x) < int64(y) => (First yes no)
+(CGIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Greater != 0 && int64(x) > int64(y) => (First yes no)
+(CIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Equal != 0 && int32(x) == int32(y) => (First yes no)
+(CIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Less != 0 && int32(x) < int32(y) => (First yes no)
+(CIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Greater != 0 && int32(x) > int32(y) => (First yes no)
+(CLGIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Equal != 0 && uint64(x) == uint64(y) => (First yes no)
+(CLGIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Less != 0 && uint64(x) < uint64(y) => (First yes no)
+(CLGIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Greater != 0 && uint64(x) > uint64(y) => (First yes no)
+(CLIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Equal != 0 && uint32(x) == uint32(y) => (First yes no)
+(CLIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Less != 0 && uint32(x) < uint32(y) => (First yes no)
+(CLIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Greater != 0 && uint32(x) > uint32(y) => (First yes no)
+(CGIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Equal == 0 && int64(x) == int64(y) => (First no yes)
+(CGIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Less == 0 && int64(x) < int64(y) => (First no yes)
+(CGIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Greater == 0 && int64(x) > int64(y) => (First no yes)
+(CIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Equal == 0 && int32(x) == int32(y) => (First no yes)
+(CIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Less == 0 && int32(x) < int32(y) => (First no yes)
+(CIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Greater == 0 && int32(x) > int32(y) => (First no yes)
+(CLGIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Equal == 0 && uint64(x) == uint64(y) => (First no yes)
+(CLGIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Less == 0 && uint64(x) < uint64(y) => (First no yes)
+(CLGIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Greater == 0 && uint64(x) > uint64(y) => (First no yes)
+(CLIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Equal == 0 && uint32(x) == uint32(y) => (First no yes)
+(CLIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Less == 0 && uint32(x) < uint32(y) => (First no yes)
+(CLIJ {c} (MOVDconst [x]) [y] yes no) && c&s390x.Greater == 0 && uint32(x) > uint32(y) => (First no yes)
+
+// Constant compare-and-branch with immediate when unsigned comparison with zero.
+(C(L|LG)IJ {s390x.GreaterOrEqual} _ [0] yes no) => (First yes no)
+(C(L|LG)IJ {s390x.Less} _ [0] yes no) => (First no yes)
+
+// Constant compare-and-branch when operands match.
+(C(GR|R|LGR|LR)J {c} x y yes no) && x == y && c&s390x.Equal != 0 => (First yes no)
+(C(GR|R|LGR|LR)J {c} x y yes no) && x == y && c&s390x.Equal == 0 => (First no yes)
+
+// Convert 64-bit comparisons to 32-bit comparisons and signed comparisons
+// to unsigned comparisons.
+// Helps simplify constant comparison detection.
+(CM(P|PU)const (MOV(W|WZ)reg x) [c]) => (CMP(W|WU)const x [c])
+(CM(P|P|PU|PU)const x:(MOV(H|HZ|H|HZ)reg _) [c]) => (CMP(W|W|WU|WU)const x [c])
+(CM(P|P|PU|PU)const x:(MOV(B|BZ|B|BZ)reg _) [c]) => (CMP(W|W|WU|WU)const x [c])
+(CMPconst (MOV(WZ|W)reg x:(ANDWconst [m] _)) [c]) && int32(m) >= 0 && c >= 0 => (CMPWUconst x [c])
+(CMPUconst (MOV(WZ|W)reg x:(ANDWconst [m] _)) [c]) && int32(m) >= 0 => (CMPWUconst x [c])
+(CMPconst x:(SRDconst _ [c]) [n]) && c > 0 && n >= 0 => (CMPUconst x [n])
+(CMPWconst x:(SRWconst _ [c]) [n]) && c > 0 && n >= 0 => (CMPWUconst x [n])
+
+// Absorb sign and zero extensions into 32-bit comparisons.
+(CMP(W|W|WU|WU) x (MOV(W|WZ|W|WZ)reg y)) => (CMP(W|W|WU|WU) x y)
+(CMP(W|W|WU|WU) (MOV(W|WZ|W|WZ)reg x) y) => (CMP(W|W|WU|WU) x y)
+(CMP(W|W|WU|WU)const (MOV(W|WZ|W|WZ)reg x) [c]) => (CMP(W|W|WU|WU)const x [c])
+
+// Absorb flag constants into branches.
+(BRC {c} (FlagEQ) yes no) && c&s390x.Equal != 0 => (First yes no)
+(BRC {c} (FlagLT) yes no) && c&s390x.Less != 0 => (First yes no)
+(BRC {c} (FlagGT) yes no) && c&s390x.Greater != 0 => (First yes no)
+(BRC {c} (FlagOV) yes no) && c&s390x.Unordered != 0 => (First yes no)
+
+(BRC {c} (FlagEQ) yes no) && c&s390x.Equal == 0 => (First no yes)
+(BRC {c} (FlagLT) yes no) && c&s390x.Less == 0 => (First no yes)
+(BRC {c} (FlagGT) yes no) && c&s390x.Greater == 0 => (First no yes)
+(BRC {c} (FlagOV) yes no) && c&s390x.Unordered == 0 => (First no yes)
+
+// Absorb flag constants into SETxx ops.
+(LOCGR {c} _ x (FlagEQ)) && c&s390x.Equal != 0 => x
+(LOCGR {c} _ x (FlagLT)) && c&s390x.Less != 0 => x
+(LOCGR {c} _ x (FlagGT)) && c&s390x.Greater != 0 => x
+(LOCGR {c} _ x (FlagOV)) && c&s390x.Unordered != 0 => x
+
+(LOCGR {c} x _ (FlagEQ)) && c&s390x.Equal == 0 => x
+(LOCGR {c} x _ (FlagLT)) && c&s390x.Less == 0 => x
+(LOCGR {c} x _ (FlagGT)) && c&s390x.Greater == 0 => x
+(LOCGR {c} x _ (FlagOV)) && c&s390x.Unordered == 0 => x
+
+// Remove redundant *const ops
+(ADDconst [0] x) => x
+(ADDWconst [c] x) && int32(c)==0 => x
+(SUBconst [0] x) => x
+(SUBWconst [c] x) && int32(c) == 0 => x
+(ANDconst [0] _) => (MOVDconst [0])
+(ANDWconst [c] _) && int32(c)==0 => (MOVDconst [0])
+(ANDconst [-1] x) => x
+(ANDWconst [c] x) && int32(c)==-1 => x
+(ORconst [0] x) => x
+(ORWconst [c] x) && int32(c)==0 => x
+(ORconst [-1] _) => (MOVDconst [-1])
+(ORWconst [c] _) && int32(c)==-1 => (MOVDconst [-1])
+(XORconst [0] x) => x
+(XORWconst [c] x) && int32(c)==0 => x
+
+// Shifts by zero (may be inserted during multiplication strength reduction).
+((SLD|SLW|SRD|SRW|SRAD|SRAW)const x [0]) => x
+
+// Convert constant subtracts to constant adds.
+(SUBconst [c] x) && c != -(1<<31) => (ADDconst [-c] x)
+(SUBWconst [c] x) => (ADDWconst [-int32(c)] x)
+
+// generic constant folding
+// TODO: more of this
+(ADDconst [c] (MOVDconst [d])) => (MOVDconst [int64(c)+d])
+(ADDWconst [c] (MOVDconst [d])) => (MOVDconst [int64(c)+d])
+(ADDconst [c] (ADDconst [d] x)) && is32Bit(int64(c)+int64(d)) => (ADDconst [c+d] x)
+(ADDWconst [c] (ADDWconst [d] x)) => (ADDWconst [int32(c+d)] x)
+(SUBconst (MOVDconst [d]) [c]) => (MOVDconst [d-int64(c)])
+(SUBconst (SUBconst x [d]) [c]) && is32Bit(-int64(c)-int64(d)) => (ADDconst [-c-d] x)
+(SRADconst [c] (MOVDconst [d])) => (MOVDconst [d>>uint64(c)])
+(SRAWconst [c] (MOVDconst [d])) => (MOVDconst [int64(int32(d))>>uint64(c)])
+(NEG (MOVDconst [c])) => (MOVDconst [-c])
+(NEGW (MOVDconst [c])) => (MOVDconst [int64(int32(-c))])
+(MULLDconst [c] (MOVDconst [d])) => (MOVDconst [int64(c)*d])
+(MULLWconst [c] (MOVDconst [d])) => (MOVDconst [int64(c*int32(d))])
+(AND (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [c&d])
+(ANDconst [c] (MOVDconst [d])) => (MOVDconst [c&d])
+(ANDWconst [c] (MOVDconst [d])) => (MOVDconst [int64(c)&d])
+(OR (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [c|d])
+(ORconst [c] (MOVDconst [d])) => (MOVDconst [c|d])
+(ORWconst [c] (MOVDconst [d])) => (MOVDconst [int64(c)|d])
+(XOR (MOVDconst [c]) (MOVDconst [d])) => (MOVDconst [c^d])
+(XORconst [c] (MOVDconst [d])) => (MOVDconst [c^d])
+(XORWconst [c] (MOVDconst [d])) => (MOVDconst [int64(c)^d])
+(LoweredRound32F x:(FMOVSconst)) => x
+(LoweredRound64F x:(FMOVDconst)) => x
+
+// generic simplifications
+// TODO: more of this
+(ADD x (NEG y)) => (SUB x y)
+(ADDW x (NEGW y)) => (SUBW x y)
+(SUB x x) => (MOVDconst [0])
+(SUBW x x) => (MOVDconst [0])
+(AND x x) => x
+(ANDW x x) => x
+(OR x x) => x
+(ORW x x) => x
+(XOR x x) => (MOVDconst [0])
+(XORW x x) => (MOVDconst [0])
+(NEG (ADDconst [c] (NEG x))) && c != -(1<<31) => (ADDconst [-c] x)
+(MOVBZreg (ANDWconst [m] x)) => (MOVWZreg (ANDWconst <typ.UInt32> [int32( uint8(m))] x))
+(MOVHZreg (ANDWconst [m] x)) => (MOVWZreg (ANDWconst <typ.UInt32> [int32(uint16(m))] x))
+(MOVBreg (ANDWconst [m] x)) && int8(m) >= 0 => (MOVWZreg (ANDWconst <typ.UInt32> [int32( uint8(m))] x))
+(MOVHreg (ANDWconst [m] x)) && int16(m) >= 0 => (MOVWZreg (ANDWconst <typ.UInt32> [int32(uint16(m))] x))
+
+// carry flag generation
+// (only constant fold carry of zero)
+(Select1 (ADDCconst (MOVDconst [c]) [d]))
+ && uint64(c+int64(d)) >= uint64(c) && c+int64(d) == 0
+ => (FlagEQ)
+(Select1 (ADDCconst (MOVDconst [c]) [d]))
+ && uint64(c+int64(d)) >= uint64(c) && c+int64(d) != 0
+ => (FlagLT)
+
+// borrow flag generation
+// (only constant fold borrow of zero)
+(Select1 (SUBC (MOVDconst [c]) (MOVDconst [d])))
+ && uint64(d) <= uint64(c) && c-d == 0
+ => (FlagGT)
+(Select1 (SUBC (MOVDconst [c]) (MOVDconst [d])))
+ && uint64(d) <= uint64(c) && c-d != 0
+ => (FlagOV)
+
+// add with carry
+(ADDE x y (FlagEQ)) => (ADDC x y)
+(ADDE x y (FlagLT)) => (ADDC x y)
+(ADDC x (MOVDconst [c])) && is16Bit(c) => (ADDCconst x [int16(c)])
+(Select0 (ADDCconst (MOVDconst [c]) [d])) => (MOVDconst [c+int64(d)])
+
+// subtract with borrow
+(SUBE x y (FlagGT)) => (SUBC x y)
+(SUBE x y (FlagOV)) => (SUBC x y)
+(Select0 (SUBC (MOVDconst [c]) (MOVDconst [d]))) => (MOVDconst [c-d])
+
+// collapse carry chain
+(ADDE x y (Select1 (ADDCconst [-1] (Select0 (ADDE (MOVDconst [0]) (MOVDconst [0]) c)))))
+ => (ADDE x y c)
+
+// collapse borrow chain
+(SUBE x y (Select1 (SUBC (MOVDconst [0]) (NEG (Select0 (SUBE (MOVDconst [0]) (MOVDconst [0]) c))))))
+ => (SUBE x y c)
+
+// branch on carry
+(C(G|LG)IJ {s390x.Equal} (Select0 (ADDE (MOVDconst [0]) (MOVDconst [0]) carry)) [0]) => (BRC {s390x.NoCarry} carry)
+(C(G|LG)IJ {s390x.Equal} (Select0 (ADDE (MOVDconst [0]) (MOVDconst [0]) carry)) [1]) => (BRC {s390x.Carry} carry)
+(C(G|LG)IJ {s390x.LessOrGreater} (Select0 (ADDE (MOVDconst [0]) (MOVDconst [0]) carry)) [0]) => (BRC {s390x.Carry} carry)
+(C(G|LG)IJ {s390x.LessOrGreater} (Select0 (ADDE (MOVDconst [0]) (MOVDconst [0]) carry)) [1]) => (BRC {s390x.NoCarry} carry)
+(C(G|LG)IJ {s390x.Greater} (Select0 (ADDE (MOVDconst [0]) (MOVDconst [0]) carry)) [0]) => (BRC {s390x.Carry} carry)
+
+// branch on borrow
+(C(G|LG)IJ {s390x.Equal} (NEG (Select0 (SUBE (MOVDconst [0]) (MOVDconst [0]) borrow))) [0]) => (BRC {s390x.NoBorrow} borrow)
+(C(G|LG)IJ {s390x.Equal} (NEG (Select0 (SUBE (MOVDconst [0]) (MOVDconst [0]) borrow))) [1]) => (BRC {s390x.Borrow} borrow)
+(C(G|LG)IJ {s390x.LessOrGreater} (NEG (Select0 (SUBE (MOVDconst [0]) (MOVDconst [0]) borrow))) [0]) => (BRC {s390x.Borrow} borrow)
+(C(G|LG)IJ {s390x.LessOrGreater} (NEG (Select0 (SUBE (MOVDconst [0]) (MOVDconst [0]) borrow))) [1]) => (BRC {s390x.NoBorrow} borrow)
+(C(G|LG)IJ {s390x.Greater} (NEG (Select0 (SUBE (MOVDconst [0]) (MOVDconst [0]) borrow))) [0]) => (BRC {s390x.Borrow} borrow)
+
+// fused multiply-add
+(Select0 (F(ADD|SUB) (FMUL y z) x)) => (FM(ADD|SUB) x y z)
+(Select0 (F(ADDS|SUBS) (FMULS y z) x)) => (FM(ADDS|SUBS) x y z)
+
+// Convert floating point comparisons against zero into 'load and test' instructions.
+(F(CMP|CMPS) x (FMOV(D|S)const [0.0])) => (LT(D|E)BR x)
+(F(CMP|CMPS) (FMOV(D|S)const [0.0]) x) => (InvertFlags (LT(D|E)BR <v.Type> x))
+
+// FSUB, FSUBS, FADD, FADDS now produce a condition code representing the
+// comparison of the result with 0.0. If a compare with zero instruction
+// (e.g. LTDBR) is following one of those instructions, we can use the
+// generated flag and remove the comparison instruction.
+// Note: when inserting Select1 ops we need to ensure they are in the
+// same block as their argument. We could also use @x.Block for this
+// but moving the flag generating value to a different block seems to
+// increase the likelihood that the flags value will have to be regenerated
+// by flagalloc which is not what we want.
+(LTDBR (Select0 x:(F(ADD|SUB) _ _))) && b == x.Block => (Select1 x)
+(LTEBR (Select0 x:(F(ADDS|SUBS) _ _))) && b == x.Block => (Select1 x)
+
+// Fold memory operations into operations.
+// Exclude global data (SB) because these instructions cannot handle relative addresses.
+// TODO(mundaym): indexed versions of these?
+((ADD|SUB|MULLD|AND|OR|XOR) <t> x g:(MOVDload [off] {sym} ptr mem))
+ && ptr.Op != OpSB
+ && is20Bit(int64(off))
+ && canMergeLoadClobber(v, g, x)
+ && clobber(g)
+ => ((ADD|SUB|MULLD|AND|OR|XOR)load <t> [off] {sym} x ptr mem)
+((ADD|SUB|MULL|AND|OR|XOR)W <t> x g:(MOVWload [off] {sym} ptr mem))
+ && ptr.Op != OpSB
+ && is20Bit(int64(off))
+ && canMergeLoadClobber(v, g, x)
+ && clobber(g)
+ => ((ADD|SUB|MULL|AND|OR|XOR)Wload <t> [off] {sym} x ptr mem)
+((ADD|SUB|MULL|AND|OR|XOR)W <t> x g:(MOVWZload [off] {sym} ptr mem))
+ && ptr.Op != OpSB
+ && is20Bit(int64(off))
+ && canMergeLoadClobber(v, g, x)
+ && clobber(g)
+ => ((ADD|SUB|MULL|AND|OR|XOR)Wload <t> [off] {sym} x ptr mem)
+
+// Combine constant stores into larger (unaligned) stores.
+// Avoid SB because constant stores to relative offsets are
+// emulated by the assembler and also can't handle unaligned offsets.
+(MOVBstoreconst [c] {s} p x:(MOVBstoreconst [a] {s} p mem))
+ && p.Op != OpSB
+ && x.Uses == 1
+ && a.Off() + 1 == c.Off()
+ && clobber(x)
+ => (MOVHstoreconst [makeValAndOff(c.Val()&0xff | a.Val()<<8, a.Off())] {s} p mem)
+(MOVHstoreconst [c] {s} p x:(MOVHstoreconst [a] {s} p mem))
+ && p.Op != OpSB
+ && x.Uses == 1
+ && a.Off() + 2 == c.Off()
+ && clobber(x)
+ => (MOVWstore [a.Off()] {s} p (MOVDconst [int64(c.Val()&0xffff | a.Val()<<16)]) mem)
+(MOVWstoreconst [c] {s} p x:(MOVWstoreconst [a] {s} p mem))
+ && p.Op != OpSB
+ && x.Uses == 1
+ && a.Off() + 4 == c.Off()
+ && clobber(x)
+ => (MOVDstore [a.Off()] {s} p (MOVDconst [c.Val64()&0xffffffff | a.Val64()<<32]) mem)
+
+// Combine stores into larger (unaligned) stores.
+// It doesn't work on global data (based on SB) because stores with relative addressing
+// require that the memory operand be aligned.
+(MOVBstore [i] {s} p w x:(MOVBstore [i-1] {s} p (SRDconst [8] w) mem))
+ && p.Op != OpSB
+ && x.Uses == 1
+ && clobber(x)
+ => (MOVHstore [i-1] {s} p w mem)
+(MOVBstore [i] {s} p w0:(SRDconst [j] w) x:(MOVBstore [i-1] {s} p (SRDconst [j+8] w) mem))
+ && p.Op != OpSB
+ && x.Uses == 1
+ && clobber(x)
+ => (MOVHstore [i-1] {s} p w0 mem)
+(MOVBstore [i] {s} p w x:(MOVBstore [i-1] {s} p (SRWconst [8] w) mem))
+ && p.Op != OpSB
+ && x.Uses == 1
+ && clobber(x)
+ => (MOVHstore [i-1] {s} p w mem)
+(MOVBstore [i] {s} p w0:(SRWconst [j] w) x:(MOVBstore [i-1] {s} p (SRWconst [j+8] w) mem))
+ && p.Op != OpSB
+ && x.Uses == 1
+ && clobber(x)
+ => (MOVHstore [i-1] {s} p w0 mem)
+(MOVHstore [i] {s} p w x:(MOVHstore [i-2] {s} p (SRDconst [16] w) mem))
+ && p.Op != OpSB
+ && x.Uses == 1
+ && clobber(x)
+ => (MOVWstore [i-2] {s} p w mem)
+(MOVHstore [i] {s} p w0:(SRDconst [j] w) x:(MOVHstore [i-2] {s} p (SRDconst [j+16] w) mem))
+ && p.Op != OpSB
+ && x.Uses == 1
+ && clobber(x)
+ => (MOVWstore [i-2] {s} p w0 mem)
+(MOVHstore [i] {s} p w x:(MOVHstore [i-2] {s} p (SRWconst [16] w) mem))
+ && p.Op != OpSB
+ && x.Uses == 1
+ && clobber(x)
+ => (MOVWstore [i-2] {s} p w mem)
+(MOVHstore [i] {s} p w0:(SRWconst [j] w) x:(MOVHstore [i-2] {s} p (SRWconst [j+16] w) mem))
+ && p.Op != OpSB
+ && x.Uses == 1
+ && clobber(x)
+ => (MOVWstore [i-2] {s} p w0 mem)
+(MOVWstore [i] {s} p (SRDconst [32] w) x:(MOVWstore [i-4] {s} p w mem))
+ && p.Op != OpSB
+ && x.Uses == 1
+ && clobber(x)
+ => (MOVDstore [i-4] {s} p w mem)
+(MOVWstore [i] {s} p w0:(SRDconst [j] w) x:(MOVWstore [i-4] {s} p (SRDconst [j+32] w) mem))
+ && p.Op != OpSB
+ && x.Uses == 1
+ && clobber(x)
+ => (MOVDstore [i-4] {s} p w0 mem)
+
+// Combine stores into larger (unaligned) stores with the bytes reversed (little endian).
+// Store-with-bytes-reversed instructions do not support relative memory addresses,
+// so these stores can't operate on global data (SB).
+(MOVBstore [i] {s} p (SRDconst [8] w) x:(MOVBstore [i-1] {s} p w mem))
+ && p.Op != OpSB
+ && x.Uses == 1
+ && clobber(x)
+ => (MOVHBRstore [i-1] {s} p w mem)
+(MOVBstore [i] {s} p (SRDconst [j] w) x:(MOVBstore [i-1] {s} p w0:(SRDconst [j-8] w) mem))
+ && p.Op != OpSB
+ && x.Uses == 1
+ && clobber(x)
+ => (MOVHBRstore [i-1] {s} p w0 mem)
+(MOVBstore [i] {s} p (SRWconst [8] w) x:(MOVBstore [i-1] {s} p w mem))
+ && p.Op != OpSB
+ && x.Uses == 1
+ && clobber(x)
+ => (MOVHBRstore [i-1] {s} p w mem)
+(MOVBstore [i] {s} p (SRWconst [j] w) x:(MOVBstore [i-1] {s} p w0:(SRWconst [j-8] w) mem))
+ && p.Op != OpSB
+ && x.Uses == 1
+ && clobber(x)
+ => (MOVHBRstore [i-1] {s} p w0 mem)
+(MOVHBRstore [i] {s} p (SRDconst [16] w) x:(MOVHBRstore [i-2] {s} p w mem))
+ && x.Uses == 1
+ && clobber(x)
+ => (MOVWBRstore [i-2] {s} p w mem)
+(MOVHBRstore [i] {s} p (SRDconst [j] w) x:(MOVHBRstore [i-2] {s} p w0:(SRDconst [j-16] w) mem))
+ && x.Uses == 1
+ && clobber(x)
+ => (MOVWBRstore [i-2] {s} p w0 mem)
+(MOVHBRstore [i] {s} p (SRWconst [16] w) x:(MOVHBRstore [i-2] {s} p w mem))
+ && x.Uses == 1
+ && clobber(x)
+ => (MOVWBRstore [i-2] {s} p w mem)
+(MOVHBRstore [i] {s} p (SRWconst [j] w) x:(MOVHBRstore [i-2] {s} p w0:(SRWconst [j-16] w) mem))
+ && x.Uses == 1
+ && clobber(x)
+ => (MOVWBRstore [i-2] {s} p w0 mem)
+(MOVWBRstore [i] {s} p (SRDconst [32] w) x:(MOVWBRstore [i-4] {s} p w mem))
+ && x.Uses == 1
+ && clobber(x)
+ => (MOVDBRstore [i-4] {s} p w mem)
+(MOVWBRstore [i] {s} p (SRDconst [j] w) x:(MOVWBRstore [i-4] {s} p w0:(SRDconst [j-32] w) mem))
+ && x.Uses == 1
+ && clobber(x)
+ => (MOVDBRstore [i-4] {s} p w0 mem)
+
+(MOVBstore [7] {s} p1 (SRDconst w)
+ x1:(MOVHBRstore [5] {s} p1 (SRDconst w)
+ x2:(MOVWBRstore [1] {s} p1 (SRDconst w)
+ x3:(MOVBstore [0] {s} p1 w mem))))
+ && x1.Uses == 1
+ && x2.Uses == 1
+ && x3.Uses == 1
+ && clobber(x1, x2, x3)
+ => (MOVDBRstore {s} p1 w mem)
+
+// Combining byte loads into larger (unaligned) loads.
+
+// Big-endian loads
+
+(ORW x1:(MOVBZload [i1] {s} p mem)
+ sh:(SLWconst [8] x0:(MOVBZload [i0] {s} p mem)))
+ && i1 == i0+1
+ && p.Op != OpSB
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && sh.Uses == 1
+ && mergePoint(b,x0,x1) != nil
+ && clobber(x0, x1, sh)
+ => @mergePoint(b,x0,x1) (MOVHZload [i0] {s} p mem)
+
+(OR x1:(MOVBZload [i1] {s} p mem)
+ sh:(SLDconst [8] x0:(MOVBZload [i0] {s} p mem)))
+ && i1 == i0+1
+ && p.Op != OpSB
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && sh.Uses == 1
+ && mergePoint(b,x0,x1) != nil
+ && clobber(x0, x1, sh)
+ => @mergePoint(b,x0,x1) (MOVHZload [i0] {s} p mem)
+
+(ORW x1:(MOVHZload [i1] {s} p mem)
+ sh:(SLWconst [16] x0:(MOVHZload [i0] {s} p mem)))
+ && i1 == i0+2
+ && p.Op != OpSB
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && sh.Uses == 1
+ && mergePoint(b,x0,x1) != nil
+ && clobber(x0, x1, sh)
+ => @mergePoint(b,x0,x1) (MOVWZload [i0] {s} p mem)
+
+(OR x1:(MOVHZload [i1] {s} p mem)
+ sh:(SLDconst [16] x0:(MOVHZload [i0] {s} p mem)))
+ && i1 == i0+2
+ && p.Op != OpSB
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && sh.Uses == 1
+ && mergePoint(b,x0,x1) != nil
+ && clobber(x0, x1, sh)
+ => @mergePoint(b,x0,x1) (MOVWZload [i0] {s} p mem)
+
+(OR x1:(MOVWZload [i1] {s} p mem)
+ sh:(SLDconst [32] x0:(MOVWZload [i0] {s} p mem)))
+ && i1 == i0+4
+ && p.Op != OpSB
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && sh.Uses == 1
+ && mergePoint(b,x0,x1) != nil
+ && clobber(x0, x1, sh)
+ => @mergePoint(b,x0,x1) (MOVDload [i0] {s} p mem)
+
+(ORW
+ s0:(SLWconst [j0] x0:(MOVBZload [i0] {s} p mem))
+ or:(ORW
+ s1:(SLWconst [j1] x1:(MOVBZload [i1] {s} p mem))
+ y))
+ && i1 == i0+1
+ && j1 == j0-8
+ && j1 % 16 == 0
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && s0.Uses == 1
+ && s1.Uses == 1
+ && or.Uses == 1
+ && mergePoint(b,x0,x1,y) != nil
+ && clobber(x0, x1, s0, s1, or)
+ => @mergePoint(b,x0,x1,y) (ORW <v.Type> (SLWconst <v.Type> [j1] (MOVHZload [i0] {s} p mem)) y)
+
+(OR
+ s0:(SLDconst [j0] x0:(MOVBZload [i0] {s} p mem))
+ or:(OR
+ s1:(SLDconst [j1] x1:(MOVBZload [i1] {s} p mem))
+ y))
+ && i1 == i0+1
+ && j1 == j0-8
+ && j1 % 16 == 0
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && s0.Uses == 1
+ && s1.Uses == 1
+ && or.Uses == 1
+ && mergePoint(b,x0,x1,y) != nil
+ && clobber(x0, x1, s0, s1, or)
+ => @mergePoint(b,x0,x1,y) (OR <v.Type> (SLDconst <v.Type> [j1] (MOVHZload [i0] {s} p mem)) y)
+
+(OR
+ s0:(SLDconst [j0] x0:(MOVHZload [i0] {s} p mem))
+ or:(OR
+ s1:(SLDconst [j1] x1:(MOVHZload [i1] {s} p mem))
+ y))
+ && i1 == i0+2
+ && j1 == j0-16
+ && j1 % 32 == 0
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && s0.Uses == 1
+ && s1.Uses == 1
+ && or.Uses == 1
+ && mergePoint(b,x0,x1,y) != nil
+ && clobber(x0, x1, s0, s1, or)
+ => @mergePoint(b,x0,x1,y) (OR <v.Type> (SLDconst <v.Type> [j1] (MOVWZload [i0] {s} p mem)) y)
+
+// Little-endian loads
+
+(ORW x0:(MOVBZload [i0] {s} p mem)
+ sh:(SLWconst [8] x1:(MOVBZload [i1] {s} p mem)))
+ && p.Op != OpSB
+ && i1 == i0+1
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && sh.Uses == 1
+ && mergePoint(b,x0,x1) != nil
+ && clobber(x0, x1, sh)
+ => @mergePoint(b,x0,x1) (MOVHZreg (MOVHBRload [i0] {s} p mem))
+
+(OR x0:(MOVBZload [i0] {s} p mem)
+ sh:(SLDconst [8] x1:(MOVBZload [i1] {s} p mem)))
+ && p.Op != OpSB
+ && i1 == i0+1
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && sh.Uses == 1
+ && mergePoint(b,x0,x1) != nil
+ && clobber(x0, x1, sh)
+ => @mergePoint(b,x0,x1) (MOVHZreg (MOVHBRload [i0] {s} p mem))
+
+(ORW r0:(MOVHZreg x0:(MOVHBRload [i0] {s} p mem))
+ sh:(SLWconst [16] r1:(MOVHZreg x1:(MOVHBRload [i1] {s} p mem))))
+ && i1 == i0+2
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && r0.Uses == 1
+ && r1.Uses == 1
+ && sh.Uses == 1
+ && mergePoint(b,x0,x1) != nil
+ && clobber(x0, x1, r0, r1, sh)
+ => @mergePoint(b,x0,x1) (MOVWBRload [i0] {s} p mem)
+
+(OR r0:(MOVHZreg x0:(MOVHBRload [i0] {s} p mem))
+ sh:(SLDconst [16] r1:(MOVHZreg x1:(MOVHBRload [i1] {s} p mem))))
+ && i1 == i0+2
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && r0.Uses == 1
+ && r1.Uses == 1
+ && sh.Uses == 1
+ && mergePoint(b,x0,x1) != nil
+ && clobber(x0, x1, r0, r1, sh)
+ => @mergePoint(b,x0,x1) (MOVWZreg (MOVWBRload [i0] {s} p mem))
+
+(OR r0:(MOVWZreg x0:(MOVWBRload [i0] {s} p mem))
+ sh:(SLDconst [32] r1:(MOVWZreg x1:(MOVWBRload [i1] {s} p mem))))
+ && i1 == i0+4
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && r0.Uses == 1
+ && r1.Uses == 1
+ && sh.Uses == 1
+ && mergePoint(b,x0,x1) != nil
+ && clobber(x0, x1, r0, r1, sh)
+ => @mergePoint(b,x0,x1) (MOVDBRload [i0] {s} p mem)
+
+(ORW
+ s1:(SLWconst [j1] x1:(MOVBZload [i1] {s} p mem))
+ or:(ORW
+ s0:(SLWconst [j0] x0:(MOVBZload [i0] {s} p mem))
+ y))
+ && p.Op != OpSB
+ && i1 == i0+1
+ && j1 == j0+8
+ && j0 % 16 == 0
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && s0.Uses == 1
+ && s1.Uses == 1
+ && or.Uses == 1
+ && mergePoint(b,x0,x1,y) != nil
+ && clobber(x0, x1, s0, s1, or)
+ => @mergePoint(b,x0,x1,y) (ORW <v.Type> (SLWconst <v.Type> [j0] (MOVHZreg (MOVHBRload [i0] {s} p mem))) y)
+
+(OR
+ s1:(SLDconst [j1] x1:(MOVBZload [i1] {s} p mem))
+ or:(OR
+ s0:(SLDconst [j0] x0:(MOVBZload [i0] {s} p mem))
+ y))
+ && p.Op != OpSB
+ && i1 == i0+1
+ && j1 == j0+8
+ && j0 % 16 == 0
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && s0.Uses == 1
+ && s1.Uses == 1
+ && or.Uses == 1
+ && mergePoint(b,x0,x1,y) != nil
+ && clobber(x0, x1, s0, s1, or)
+ => @mergePoint(b,x0,x1,y) (OR <v.Type> (SLDconst <v.Type> [j0] (MOVHZreg (MOVHBRload [i0] {s} p mem))) y)
+
+(OR
+ s1:(SLDconst [j1] r1:(MOVHZreg x1:(MOVHBRload [i1] {s} p mem)))
+ or:(OR
+ s0:(SLDconst [j0] r0:(MOVHZreg x0:(MOVHBRload [i0] {s} p mem)))
+ y))
+ && i1 == i0+2
+ && j1 == j0+16
+ && j0 % 32 == 0
+ && x0.Uses == 1
+ && x1.Uses == 1
+ && r0.Uses == 1
+ && r1.Uses == 1
+ && s0.Uses == 1
+ && s1.Uses == 1
+ && or.Uses == 1
+ && mergePoint(b,x0,x1,y) != nil
+ && clobber(x0, x1, r0, r1, s0, s1, or)
+ => @mergePoint(b,x0,x1,y) (OR <v.Type> (SLDconst <v.Type> [j0] (MOVWZreg (MOVWBRload [i0] {s} p mem))) y)
+
+// Combine stores into store multiples.
+// 32-bit
+(MOVWstore [i] {s} p w1 x:(MOVWstore [i-4] {s} p w0 mem))
+ && p.Op != OpSB
+ && x.Uses == 1
+ && is20Bit(int64(i)-4)
+ && clobber(x)
+ => (STM2 [i-4] {s} p w0 w1 mem)
+(MOVWstore [i] {s} p w2 x:(STM2 [i-8] {s} p w0 w1 mem))
+ && x.Uses == 1
+ && is20Bit(int64(i)-8)
+ && clobber(x)
+ => (STM3 [i-8] {s} p w0 w1 w2 mem)
+(MOVWstore [i] {s} p w3 x:(STM3 [i-12] {s} p w0 w1 w2 mem))
+ && x.Uses == 1
+ && is20Bit(int64(i)-12)
+ && clobber(x)
+ => (STM4 [i-12] {s} p w0 w1 w2 w3 mem)
+(STM2 [i] {s} p w2 w3 x:(STM2 [i-8] {s} p w0 w1 mem))
+ && x.Uses == 1
+ && is20Bit(int64(i)-8)
+ && clobber(x)
+ => (STM4 [i-8] {s} p w0 w1 w2 w3 mem)
+// 64-bit
+(MOVDstore [i] {s} p w1 x:(MOVDstore [i-8] {s} p w0 mem))
+ && p.Op != OpSB
+ && x.Uses == 1
+ && is20Bit(int64(i)-8)
+ && clobber(x)
+ => (STMG2 [i-8] {s} p w0 w1 mem)
+(MOVDstore [i] {s} p w2 x:(STMG2 [i-16] {s} p w0 w1 mem))
+ && x.Uses == 1
+ && is20Bit(int64(i)-16)
+ && clobber(x)
+ => (STMG3 [i-16] {s} p w0 w1 w2 mem)
+(MOVDstore [i] {s} p w3 x:(STMG3 [i-24] {s} p w0 w1 w2 mem))
+ && x.Uses == 1
+ && is20Bit(int64(i)-24)
+ && clobber(x)
+ => (STMG4 [i-24] {s} p w0 w1 w2 w3 mem)
+(STMG2 [i] {s} p w2 w3 x:(STMG2 [i-16] {s} p w0 w1 mem))
+ && x.Uses == 1
+ && is20Bit(int64(i)-16)
+ && clobber(x)
+ => (STMG4 [i-16] {s} p w0 w1 w2 w3 mem)
+
+// Convert 32-bit store multiples into 64-bit stores.
+(STM2 [i] {s} p (SRDconst [32] x) x mem) => (MOVDstore [i] {s} p x mem)
diff --git a/src/cmd/compile/internal/ssa/gen/S390XOps.go b/src/cmd/compile/internal/ssa/gen/S390XOps.go
new file mode 100644
index 0000000..eef8a25
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/gen/S390XOps.go
@@ -0,0 +1,820 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build ignore
+// +build ignore
+
+package main
+
+import "strings"
+
+// Notes:
+// - Integer types live in the low portion of registers. Upper portions are junk.
+// - Boolean types use the low-order byte of a register. 0=false, 1=true.
+// Upper bytes are junk.
+// - When doing sub-register operations, we try to write the whole
+// destination register to avoid a partial-register write.
+// - Unused portions of AuxInt (or the Val portion of ValAndOff) are
+// filled by sign-extending the used portion. Users of AuxInt which interpret
+// AuxInt as unsigned (e.g. shifts) must be careful.
+// - The SB 'register' is implemented using instruction-relative addressing. This
+// places some limitations on when and how memory operands that are addressed
+// relative to SB can be used:
+//
+// 1. Pseudo-instructions do not always map to a single machine instruction when
+// using the SB 'register' to address data. This is because many machine
+// instructions do not have relative long (RL suffix) equivalents. For example,
+// ADDload, which is assembled as AG.
+//
+// 2. Loads and stores using relative addressing require the data be aligned
+// according to its size (8-bytes for double words, 4-bytes for words
+// and so on).
+//
+// We can always work around these by inserting LARL instructions (load address
+// relative long) in the assembler, but typically this results in worse code
+// generation because the address can't be re-used. Inserting instructions in the
+// assembler also means clobbering the temp register and it is a long-term goal
+// to prevent the compiler doing this so that it can be allocated as a normal
+// register.
+//
+// For more information about the z/Architecture, the instruction set and the
+// addressing modes it supports take a look at the z/Architecture Principles of
+// Operation: http://publibfp.boulder.ibm.com/epubs/pdf/dz9zr010.pdf
+//
+// Suffixes encode the bit width of pseudo-instructions.
+// D (double word) = 64 bit (frequently omitted)
+// W (word) = 32 bit
+// H (half word) = 16 bit
+// B (byte) = 8 bit
+// S (single prec.) = 32 bit (double precision is omitted)
+
+// copied from ../../s390x/reg.go
+var regNamesS390X = []string{
+ "R0",
+ "R1",
+ "R2",
+ "R3",
+ "R4",
+ "R5",
+ "R6",
+ "R7",
+ "R8",
+ "R9",
+ "R10",
+ "R11",
+ "R12",
+ "g", // R13
+ "R14",
+ "SP", // R15
+ "F0",
+ "F1",
+ "F2",
+ "F3",
+ "F4",
+ "F5",
+ "F6",
+ "F7",
+ "F8",
+ "F9",
+ "F10",
+ "F11",
+ "F12",
+ "F13",
+ "F14",
+ "F15",
+
+ // If you add registers, update asyncPreempt in runtime.
+
+ //pseudo-registers
+ "SB",
+}
+
+func init() {
+ // Make map from reg names to reg integers.
+ if len(regNamesS390X) > 64 {
+ panic("too many registers")
+ }
+ num := map[string]int{}
+ for i, name := range regNamesS390X {
+ num[name] = i
+ }
+ buildReg := func(s string) regMask {
+ m := regMask(0)
+ for _, r := range strings.Split(s, " ") {
+ if n, ok := num[r]; ok {
+ m |= regMask(1) << uint(n)
+ continue
+ }
+ panic("register " + r + " not found")
+ }
+ return m
+ }
+
+ // Common individual register masks
+ var (
+ sp = buildReg("SP")
+ sb = buildReg("SB")
+ r0 = buildReg("R0")
+ tmp = buildReg("R11") // R11 is used as a temporary in a small number of instructions.
+
+ // R10 is reserved by the assembler.
+ gp = buildReg("R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14")
+ gpg = gp | buildReg("g")
+ gpsp = gp | sp
+
+ // R0 is considered to contain the value 0 in address calculations.
+ ptr = gp &^ r0
+ ptrsp = ptr | sp
+ ptrspsb = ptrsp | sb
+
+ fp = buildReg("F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15")
+ callerSave = gp | fp | buildReg("g") // runtime.setg (and anything calling it) may clobber g
+ r1 = buildReg("R1")
+ r2 = buildReg("R2")
+ r3 = buildReg("R3")
+ )
+ // Common slices of register masks
+ var (
+ gponly = []regMask{gp}
+ fponly = []regMask{fp}
+ )
+
+ // Common regInfo
+ var (
+ gp01 = regInfo{inputs: []regMask{}, outputs: gponly}
+ gp11 = regInfo{inputs: []regMask{gp}, outputs: gponly}
+ gp11sp = regInfo{inputs: []regMask{gpsp}, outputs: gponly}
+ gp21 = regInfo{inputs: []regMask{gp, gp}, outputs: gponly}
+ gp21sp = regInfo{inputs: []regMask{gpsp, gp}, outputs: gponly}
+ gp21tmp = regInfo{inputs: []regMask{gp &^ tmp, gp &^ tmp}, outputs: []regMask{gp &^ tmp}, clobbers: tmp}
+
+ // R0 evaluates to 0 when used as the number of bits to shift
+ // so we need to exclude it from that operand.
+ sh21 = regInfo{inputs: []regMask{gp, ptr}, outputs: gponly}
+
+ addr = regInfo{inputs: []regMask{sp | sb}, outputs: gponly}
+ addridx = regInfo{inputs: []regMask{sp | sb, ptrsp}, outputs: gponly}
+
+ gp2flags = regInfo{inputs: []regMask{gpsp, gpsp}}
+ gp1flags = regInfo{inputs: []regMask{gpsp}}
+ gp2flags1 = regInfo{inputs: []regMask{gp, gp}, outputs: gponly}
+ gp11flags = regInfo{inputs: []regMask{gp}, outputs: gponly}
+ gp21flags = regInfo{inputs: []regMask{gp, gp}, outputs: gponly}
+ gp2flags1flags = regInfo{inputs: []regMask{gp, gp}, outputs: gponly}
+
+ gpload = regInfo{inputs: []regMask{ptrspsb, 0}, outputs: gponly}
+ gploadidx = regInfo{inputs: []regMask{ptrspsb, ptrsp, 0}, outputs: gponly}
+ gpopload = regInfo{inputs: []regMask{gp, ptrsp, 0}, outputs: gponly}
+ gpstore = regInfo{inputs: []regMask{ptrspsb, gpsp, 0}}
+ gpstoreconst = regInfo{inputs: []regMask{ptrspsb, 0}}
+ gpstoreidx = regInfo{inputs: []regMask{ptrsp, ptrsp, gpsp, 0}}
+ gpstorebr = regInfo{inputs: []regMask{ptrsp, gpsp, 0}}
+ gpstorelaa = regInfo{inputs: []regMask{ptrspsb, gpsp, 0}, outputs: gponly}
+ gpstorelab = regInfo{inputs: []regMask{r1, gpsp, 0}, clobbers: r1}
+
+ gpmvc = regInfo{inputs: []regMask{ptrsp, ptrsp, 0}}
+
+ fp01 = regInfo{inputs: []regMask{}, outputs: fponly}
+ fp21 = regInfo{inputs: []regMask{fp, fp}, outputs: fponly}
+ fp31 = regInfo{inputs: []regMask{fp, fp, fp}, outputs: fponly}
+ fp21clobber = regInfo{inputs: []regMask{fp, fp}, outputs: fponly}
+ fpgp = regInfo{inputs: fponly, outputs: gponly}
+ gpfp = regInfo{inputs: gponly, outputs: fponly}
+ fp11 = regInfo{inputs: fponly, outputs: fponly}
+ fp1flags = regInfo{inputs: []regMask{fp}}
+ fp11clobber = regInfo{inputs: fponly, outputs: fponly}
+ fp2flags = regInfo{inputs: []regMask{fp, fp}}
+
+ fpload = regInfo{inputs: []regMask{ptrspsb, 0}, outputs: fponly}
+ fploadidx = regInfo{inputs: []regMask{ptrsp, ptrsp, 0}, outputs: fponly}
+
+ fpstore = regInfo{inputs: []regMask{ptrspsb, fp, 0}}
+ fpstoreidx = regInfo{inputs: []regMask{ptrsp, ptrsp, fp, 0}}
+
+ sync = regInfo{inputs: []regMask{0}}
+
+ // LoweredAtomicCas may overwrite arg1, so force it to R0 for now.
+ cas = regInfo{inputs: []regMask{ptrsp, r0, gpsp, 0}, outputs: []regMask{gp, 0}, clobbers: r0}
+
+ // LoweredAtomicExchange overwrites the output before executing
+ // CS{,G}, so the output register must not be the same as the
+ // input register. For now we just force the output register to
+ // R0.
+ exchange = regInfo{inputs: []regMask{ptrsp, gpsp &^ r0, 0}, outputs: []regMask{r0, 0}}
+ )
+
+ var S390Xops = []opData{
+ // fp ops
+ {name: "FADDS", argLength: 2, reg: fp21clobber, typ: "(Float32,Flags)", asm: "FADDS", commutative: true, resultInArg0: true}, // fp32 arg0 + arg1
+ {name: "FADD", argLength: 2, reg: fp21clobber, typ: "(Float64,Flags)", asm: "FADD", commutative: true, resultInArg0: true}, // fp64 arg0 + arg1
+ {name: "FSUBS", argLength: 2, reg: fp21clobber, typ: "(Float32,Flags)", asm: "FSUBS", resultInArg0: true}, // fp32 arg0 - arg1
+ {name: "FSUB", argLength: 2, reg: fp21clobber, typ: "(Float64,Flags)", asm: "FSUB", resultInArg0: true}, // fp64 arg0 - arg1
+ {name: "FMULS", argLength: 2, reg: fp21, asm: "FMULS", commutative: true, resultInArg0: true}, // fp32 arg0 * arg1
+ {name: "FMUL", argLength: 2, reg: fp21, asm: "FMUL", commutative: true, resultInArg0: true}, // fp64 arg0 * arg1
+ {name: "FDIVS", argLength: 2, reg: fp21, asm: "FDIVS", resultInArg0: true}, // fp32 arg0 / arg1
+ {name: "FDIV", argLength: 2, reg: fp21, asm: "FDIV", resultInArg0: true}, // fp64 arg0 / arg1
+ {name: "FNEGS", argLength: 1, reg: fp11clobber, asm: "FNEGS", clobberFlags: true}, // fp32 -arg0
+ {name: "FNEG", argLength: 1, reg: fp11clobber, asm: "FNEG", clobberFlags: true}, // fp64 -arg0
+ {name: "FMADDS", argLength: 3, reg: fp31, asm: "FMADDS", resultInArg0: true}, // fp32 arg1 * arg2 + arg0
+ {name: "FMADD", argLength: 3, reg: fp31, asm: "FMADD", resultInArg0: true}, // fp64 arg1 * arg2 + arg0
+ {name: "FMSUBS", argLength: 3, reg: fp31, asm: "FMSUBS", resultInArg0: true}, // fp32 arg1 * arg2 - arg0
+ {name: "FMSUB", argLength: 3, reg: fp31, asm: "FMSUB", resultInArg0: true}, // fp64 arg1 * arg2 - arg0
+ {name: "LPDFR", argLength: 1, reg: fp11, asm: "LPDFR"}, // fp64/fp32 set sign bit
+ {name: "LNDFR", argLength: 1, reg: fp11, asm: "LNDFR"}, // fp64/fp32 clear sign bit
+ {name: "CPSDR", argLength: 2, reg: fp21, asm: "CPSDR"}, // fp64/fp32 copy arg1 sign bit to arg0
+
+ // Round to integer, float64 only.
+ //
+ // aux | rounding mode
+ // ----+-----------------------------------
+ // 1 | round to nearest, ties away from 0
+ // 4 | round to nearest, ties to even
+ // 5 | round toward 0
+ // 6 | round toward +∞
+ // 7 | round toward -∞
+ {name: "FIDBR", argLength: 1, reg: fp11, asm: "FIDBR", aux: "Int8"},
+
+ {name: "FMOVSload", argLength: 2, reg: fpload, asm: "FMOVS", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, // fp32 load
+ {name: "FMOVDload", argLength: 2, reg: fpload, asm: "FMOVD", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, // fp64 load
+ {name: "FMOVSconst", reg: fp01, asm: "FMOVS", aux: "Float32", rematerializeable: true}, // fp32 constant
+ {name: "FMOVDconst", reg: fp01, asm: "FMOVD", aux: "Float64", rematerializeable: true}, // fp64 constant
+ {name: "FMOVSloadidx", argLength: 3, reg: fploadidx, asm: "FMOVS", aux: "SymOff", symEffect: "Read"}, // fp32 load indexed by i
+ {name: "FMOVDloadidx", argLength: 3, reg: fploadidx, asm: "FMOVD", aux: "SymOff", symEffect: "Read"}, // fp64 load indexed by i
+
+ {name: "FMOVSstore", argLength: 3, reg: fpstore, asm: "FMOVS", aux: "SymOff", faultOnNilArg0: true, symEffect: "Write"}, // fp32 store
+ {name: "FMOVDstore", argLength: 3, reg: fpstore, asm: "FMOVD", aux: "SymOff", faultOnNilArg0: true, symEffect: "Write"}, // fp64 store
+ {name: "FMOVSstoreidx", argLength: 4, reg: fpstoreidx, asm: "FMOVS", aux: "SymOff", symEffect: "Write"}, // fp32 indexed by i store
+ {name: "FMOVDstoreidx", argLength: 4, reg: fpstoreidx, asm: "FMOVD", aux: "SymOff", symEffect: "Write"}, // fp64 indexed by i store
+
+ // binary ops
+ {name: "ADD", argLength: 2, reg: gp21sp, asm: "ADD", commutative: true, clobberFlags: true}, // arg0 + arg1
+ {name: "ADDW", argLength: 2, reg: gp21sp, asm: "ADDW", commutative: true, clobberFlags: true}, // arg0 + arg1
+ {name: "ADDconst", argLength: 1, reg: gp11sp, asm: "ADD", aux: "Int32", typ: "UInt64", clobberFlags: true}, // arg0 + auxint
+ {name: "ADDWconst", argLength: 1, reg: gp11sp, asm: "ADDW", aux: "Int32", clobberFlags: true}, // arg0 + auxint
+ {name: "ADDload", argLength: 3, reg: gpopload, asm: "ADD", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 + *arg1. arg2=mem
+ {name: "ADDWload", argLength: 3, reg: gpopload, asm: "ADDW", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 + *arg1. arg2=mem
+
+ {name: "SUB", argLength: 2, reg: gp21, asm: "SUB", clobberFlags: true}, // arg0 - arg1
+ {name: "SUBW", argLength: 2, reg: gp21, asm: "SUBW", clobberFlags: true}, // arg0 - arg1
+ {name: "SUBconst", argLength: 1, reg: gp11, asm: "SUB", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 - auxint
+ {name: "SUBWconst", argLength: 1, reg: gp11, asm: "SUBW", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 - auxint
+ {name: "SUBload", argLength: 3, reg: gpopload, asm: "SUB", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 - *arg1. arg2=mem
+ {name: "SUBWload", argLength: 3, reg: gpopload, asm: "SUBW", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 - *arg1. arg2=mem
+
+ {name: "MULLD", argLength: 2, reg: gp21, asm: "MULLD", typ: "Int64", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 * arg1
+ {name: "MULLW", argLength: 2, reg: gp21, asm: "MULLW", typ: "Int32", commutative: true, resultInArg0: true, clobberFlags: true}, // arg0 * arg1
+ {name: "MULLDconst", argLength: 1, reg: gp11, asm: "MULLD", aux: "Int32", typ: "Int64", resultInArg0: true, clobberFlags: true}, // arg0 * auxint
+ {name: "MULLWconst", argLength: 1, reg: gp11, asm: "MULLW", aux: "Int32", typ: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 * auxint
+ {name: "MULLDload", argLength: 3, reg: gpopload, asm: "MULLD", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 * *arg1. arg2=mem
+ {name: "MULLWload", argLength: 3, reg: gpopload, asm: "MULLW", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 * *arg1. arg2=mem
+
+ {name: "MULHD", argLength: 2, reg: gp21tmp, asm: "MULHD", typ: "Int64", commutative: true, resultInArg0: true, clobberFlags: true}, // (arg0 * arg1) >> width
+ {name: "MULHDU", argLength: 2, reg: gp21tmp, asm: "MULHDU", typ: "Int64", commutative: true, resultInArg0: true, clobberFlags: true}, // (arg0 * arg1) >> width
+
+ {name: "DIVD", argLength: 2, reg: gp21tmp, asm: "DIVD", resultInArg0: true, clobberFlags: true}, // arg0 / arg1
+ {name: "DIVW", argLength: 2, reg: gp21tmp, asm: "DIVW", resultInArg0: true, clobberFlags: true}, // arg0 / arg1
+ {name: "DIVDU", argLength: 2, reg: gp21tmp, asm: "DIVDU", resultInArg0: true, clobberFlags: true}, // arg0 / arg1
+ {name: "DIVWU", argLength: 2, reg: gp21tmp, asm: "DIVWU", resultInArg0: true, clobberFlags: true}, // arg0 / arg1
+
+ {name: "MODD", argLength: 2, reg: gp21tmp, asm: "MODD", resultInArg0: true, clobberFlags: true}, // arg0 % arg1
+ {name: "MODW", argLength: 2, reg: gp21tmp, asm: "MODW", resultInArg0: true, clobberFlags: true}, // arg0 % arg1
+
+ {name: "MODDU", argLength: 2, reg: gp21tmp, asm: "MODDU", resultInArg0: true, clobberFlags: true}, // arg0 % arg1
+ {name: "MODWU", argLength: 2, reg: gp21tmp, asm: "MODWU", resultInArg0: true, clobberFlags: true}, // arg0 % arg1
+
+ {name: "AND", argLength: 2, reg: gp21, asm: "AND", commutative: true, clobberFlags: true}, // arg0 & arg1
+ {name: "ANDW", argLength: 2, reg: gp21, asm: "ANDW", commutative: true, clobberFlags: true}, // arg0 & arg1
+ {name: "ANDconst", argLength: 1, reg: gp11, asm: "AND", aux: "Int64", resultInArg0: true, clobberFlags: true}, // arg0 & auxint
+ {name: "ANDWconst", argLength: 1, reg: gp11, asm: "ANDW", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 & auxint
+ {name: "ANDload", argLength: 3, reg: gpopload, asm: "AND", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 & *arg1. arg2=mem
+ {name: "ANDWload", argLength: 3, reg: gpopload, asm: "ANDW", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 & *arg1. arg2=mem
+
+ {name: "OR", argLength: 2, reg: gp21, asm: "OR", commutative: true, clobberFlags: true}, // arg0 | arg1
+ {name: "ORW", argLength: 2, reg: gp21, asm: "ORW", commutative: true, clobberFlags: true}, // arg0 | arg1
+ {name: "ORconst", argLength: 1, reg: gp11, asm: "OR", aux: "Int64", resultInArg0: true, clobberFlags: true}, // arg0 | auxint
+ {name: "ORWconst", argLength: 1, reg: gp11, asm: "ORW", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 | auxint
+ {name: "ORload", argLength: 3, reg: gpopload, asm: "OR", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 | *arg1. arg2=mem
+ {name: "ORWload", argLength: 3, reg: gpopload, asm: "ORW", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 | *arg1. arg2=mem
+
+ {name: "XOR", argLength: 2, reg: gp21, asm: "XOR", commutative: true, clobberFlags: true}, // arg0 ^ arg1
+ {name: "XORW", argLength: 2, reg: gp21, asm: "XORW", commutative: true, clobberFlags: true}, // arg0 ^ arg1
+ {name: "XORconst", argLength: 1, reg: gp11, asm: "XOR", aux: "Int64", resultInArg0: true, clobberFlags: true}, // arg0 ^ auxint
+ {name: "XORWconst", argLength: 1, reg: gp11, asm: "XORW", aux: "Int32", resultInArg0: true, clobberFlags: true}, // arg0 ^ auxint
+ {name: "XORload", argLength: 3, reg: gpopload, asm: "XOR", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 ^ *arg1. arg2=mem
+ {name: "XORWload", argLength: 3, reg: gpopload, asm: "XORW", aux: "SymOff", resultInArg0: true, clobberFlags: true, faultOnNilArg1: true, symEffect: "Read"}, // arg0 ^ *arg1. arg2=mem
+
+ // Arithmetic ops with carry/borrow chain.
+ //
+ // A carry is represented by a condition code of 2 or 3 (GT or OV).
+ // A borrow is represented by a condition code of 0 or 1 (EQ or LT).
+ {name: "ADDC", argLength: 2, reg: gp21flags, asm: "ADDC", typ: "(UInt64,Flags)", commutative: true}, // (arg0 + arg1, carry out)
+ {name: "ADDCconst", argLength: 1, reg: gp11flags, asm: "ADDC", typ: "(UInt64,Flags)", aux: "Int16"}, // (arg0 + auxint, carry out)
+ {name: "ADDE", argLength: 3, reg: gp2flags1flags, asm: "ADDE", typ: "(UInt64,Flags)", commutative: true, resultInArg0: true}, // (arg0 + arg1 + arg2 (carry in), carry out)
+ {name: "SUBC", argLength: 2, reg: gp21flags, asm: "SUBC", typ: "(UInt64,Flags)"}, // (arg0 - arg1, borrow out)
+ {name: "SUBE", argLength: 3, reg: gp2flags1flags, asm: "SUBE", typ: "(UInt64,Flags)", resultInArg0: true}, // (arg0 - arg1 - arg2 (borrow in), borrow out)
+
+ // Comparisons.
+ {name: "CMP", argLength: 2, reg: gp2flags, asm: "CMP", typ: "Flags"}, // arg0 compare to arg1
+ {name: "CMPW", argLength: 2, reg: gp2flags, asm: "CMPW", typ: "Flags"}, // arg0 compare to arg1
+
+ {name: "CMPU", argLength: 2, reg: gp2flags, asm: "CMPU", typ: "Flags"}, // arg0 compare to arg1
+ {name: "CMPWU", argLength: 2, reg: gp2flags, asm: "CMPWU", typ: "Flags"}, // arg0 compare to arg1
+
+ {name: "CMPconst", argLength: 1, reg: gp1flags, asm: "CMP", typ: "Flags", aux: "Int32"}, // arg0 compare to auxint
+ {name: "CMPWconst", argLength: 1, reg: gp1flags, asm: "CMPW", typ: "Flags", aux: "Int32"}, // arg0 compare to auxint
+ {name: "CMPUconst", argLength: 1, reg: gp1flags, asm: "CMPU", typ: "Flags", aux: "Int32"}, // arg0 compare to auxint
+ {name: "CMPWUconst", argLength: 1, reg: gp1flags, asm: "CMPWU", typ: "Flags", aux: "Int32"}, // arg0 compare to auxint
+
+ {name: "FCMPS", argLength: 2, reg: fp2flags, asm: "CEBR", typ: "Flags"}, // arg0 compare to arg1, f32
+ {name: "FCMP", argLength: 2, reg: fp2flags, asm: "FCMPU", typ: "Flags"}, // arg0 compare to arg1, f64
+ {name: "LTDBR", argLength: 1, reg: fp1flags, asm: "LTDBR", typ: "Flags"}, // arg0 compare to 0, f64
+ {name: "LTEBR", argLength: 1, reg: fp1flags, asm: "LTEBR", typ: "Flags"}, // arg0 compare to 0, f32
+
+ {name: "SLD", argLength: 2, reg: sh21, asm: "SLD"}, // arg0 << arg1, shift amount is mod 64
+ {name: "SLW", argLength: 2, reg: sh21, asm: "SLW"}, // arg0 << arg1, shift amount is mod 64
+ {name: "SLDconst", argLength: 1, reg: gp11, asm: "SLD", aux: "UInt8"}, // arg0 << auxint, shift amount 0-63
+ {name: "SLWconst", argLength: 1, reg: gp11, asm: "SLW", aux: "UInt8"}, // arg0 << auxint, shift amount 0-31
+
+ {name: "SRD", argLength: 2, reg: sh21, asm: "SRD"}, // unsigned arg0 >> arg1, shift amount is mod 64
+ {name: "SRW", argLength: 2, reg: sh21, asm: "SRW"}, // unsigned uint32(arg0) >> arg1, shift amount is mod 64
+ {name: "SRDconst", argLength: 1, reg: gp11, asm: "SRD", aux: "UInt8"}, // unsigned arg0 >> auxint, shift amount 0-63
+ {name: "SRWconst", argLength: 1, reg: gp11, asm: "SRW", aux: "UInt8"}, // unsigned uint32(arg0) >> auxint, shift amount 0-31
+
+ // Arithmetic shifts clobber flags.
+ {name: "SRAD", argLength: 2, reg: sh21, asm: "SRAD", clobberFlags: true}, // signed arg0 >> arg1, shift amount is mod 64
+ {name: "SRAW", argLength: 2, reg: sh21, asm: "SRAW", clobberFlags: true}, // signed int32(arg0) >> arg1, shift amount is mod 64
+ {name: "SRADconst", argLength: 1, reg: gp11, asm: "SRAD", aux: "UInt8", clobberFlags: true}, // signed arg0 >> auxint, shift amount 0-63
+ {name: "SRAWconst", argLength: 1, reg: gp11, asm: "SRAW", aux: "UInt8", clobberFlags: true}, // signed int32(arg0) >> auxint, shift amount 0-31
+
+ // Rotate instructions.
+ // Note: no RLLGconst - use RISBGZ instead.
+ {name: "RLLG", argLength: 2, reg: sh21, asm: "RLLG"}, // arg0 rotate left arg1, rotate amount 0-63
+ {name: "RLL", argLength: 2, reg: sh21, asm: "RLL"}, // arg0 rotate left arg1, rotate amount 0-31
+ {name: "RLLconst", argLength: 1, reg: gp11, asm: "RLL", aux: "UInt8"}, // arg0 rotate left auxint, rotate amount 0-31
+
+ // Rotate then (and|or|xor|insert) selected bits instructions.
+ //
+ // Aux is an s390x.RotateParams struct containing Start, End and rotation
+ // Amount fields.
+ //
+ // arg1 is rotated left by the rotation amount then the bits from the start
+ // bit to the end bit (inclusive) are combined with arg0 using the logical
+ // operation specified. Bit indices are specified from left to right - the
+ // MSB is 0 and the LSB is 63.
+ //
+ // Examples:
+ // | aux |
+ // | instruction | start | end | amount | arg0 | arg1 | result |
+ // +-------------+-------+-----+--------+-----------------------+-----------------------+-----------------------+
+ // | RXSBG (XOR) | 0 | 1 | 0 | 0xffff_ffff_ffff_ffff | 0xffff_ffff_ffff_ffff | 0x3fff_ffff_ffff_ffff |
+ // | RXSBG (XOR) | 62 | 63 | 0 | 0xffff_ffff_ffff_ffff | 0xffff_ffff_ffff_ffff | 0xffff_ffff_ffff_fffc |
+ // | RXSBG (XOR) | 0 | 47 | 16 | 0xffff_ffff_ffff_ffff | 0x0000_0000_0000_ffff | 0xffff_ffff_0000_ffff |
+ // +-------------+-------+-----+--------+-----------------------+-----------------------+-----------------------+
+ //
+ {name: "RXSBG", argLength: 2, reg: gp21, asm: "RXSBG", resultInArg0: true, aux: "S390XRotateParams", clobberFlags: true}, // rotate then xor selected bits
+ {name: "RISBGZ", argLength: 1, reg: gp11, asm: "RISBGZ", aux: "S390XRotateParams", clobberFlags: true}, // rotate then insert selected bits [into zero]
+
+ // unary ops
+ {name: "NEG", argLength: 1, reg: gp11, asm: "NEG", clobberFlags: true}, // -arg0
+ {name: "NEGW", argLength: 1, reg: gp11, asm: "NEGW", clobberFlags: true}, // -arg0
+
+ {name: "NOT", argLength: 1, reg: gp11, resultInArg0: true, clobberFlags: true}, // ^arg0
+ {name: "NOTW", argLength: 1, reg: gp11, resultInArg0: true, clobberFlags: true}, // ^arg0
+
+ {name: "FSQRT", argLength: 1, reg: fp11, asm: "FSQRT"}, // sqrt(arg0)
+ {name: "FSQRTS", argLength: 1, reg: fp11, asm: "FSQRTS"}, // sqrt(arg0), float32
+
+ // Conditional register-register moves.
+ // The aux for these values is an s390x.CCMask value representing the condition code mask.
+ {name: "LOCGR", argLength: 3, reg: gp2flags1, resultInArg0: true, asm: "LOCGR", aux: "S390XCCMask"}, // load arg1 into arg0 if the condition code in arg2 matches a masked bit in aux.
+
+ {name: "MOVBreg", argLength: 1, reg: gp11sp, asm: "MOVB", typ: "Int64"}, // sign extend arg0 from int8 to int64
+ {name: "MOVBZreg", argLength: 1, reg: gp11sp, asm: "MOVBZ", typ: "UInt64"}, // zero extend arg0 from int8 to int64
+ {name: "MOVHreg", argLength: 1, reg: gp11sp, asm: "MOVH", typ: "Int64"}, // sign extend arg0 from int16 to int64
+ {name: "MOVHZreg", argLength: 1, reg: gp11sp, asm: "MOVHZ", typ: "UInt64"}, // zero extend arg0 from int16 to int64
+ {name: "MOVWreg", argLength: 1, reg: gp11sp, asm: "MOVW", typ: "Int64"}, // sign extend arg0 from int32 to int64
+ {name: "MOVWZreg", argLength: 1, reg: gp11sp, asm: "MOVWZ", typ: "UInt64"}, // zero extend arg0 from int32 to int64
+
+ {name: "MOVDconst", reg: gp01, asm: "MOVD", typ: "UInt64", aux: "Int64", rematerializeable: true}, // auxint
+
+ {name: "LDGR", argLength: 1, reg: gpfp, asm: "LDGR"}, // move int64 to float64 (no conversion)
+ {name: "LGDR", argLength: 1, reg: fpgp, asm: "LGDR"}, // move float64 to int64 (no conversion)
+
+ {name: "CFDBRA", argLength: 1, reg: fpgp, asm: "CFDBRA", clobberFlags: true}, // convert float64 to int32
+ {name: "CGDBRA", argLength: 1, reg: fpgp, asm: "CGDBRA", clobberFlags: true}, // convert float64 to int64
+ {name: "CFEBRA", argLength: 1, reg: fpgp, asm: "CFEBRA", clobberFlags: true}, // convert float32 to int32
+ {name: "CGEBRA", argLength: 1, reg: fpgp, asm: "CGEBRA", clobberFlags: true}, // convert float32 to int64
+ {name: "CEFBRA", argLength: 1, reg: gpfp, asm: "CEFBRA", clobberFlags: true}, // convert int32 to float32
+ {name: "CDFBRA", argLength: 1, reg: gpfp, asm: "CDFBRA", clobberFlags: true}, // convert int32 to float64
+ {name: "CEGBRA", argLength: 1, reg: gpfp, asm: "CEGBRA", clobberFlags: true}, // convert int64 to float32
+ {name: "CDGBRA", argLength: 1, reg: gpfp, asm: "CDGBRA", clobberFlags: true}, // convert int64 to float64
+ {name: "CLFEBR", argLength: 1, reg: fpgp, asm: "CLFEBR", clobberFlags: true}, // convert float32 to uint32
+ {name: "CLFDBR", argLength: 1, reg: fpgp, asm: "CLFDBR", clobberFlags: true}, // convert float64 to uint32
+ {name: "CLGEBR", argLength: 1, reg: fpgp, asm: "CLGEBR", clobberFlags: true}, // convert float32 to uint64
+ {name: "CLGDBR", argLength: 1, reg: fpgp, asm: "CLGDBR", clobberFlags: true}, // convert float64 to uint64
+ {name: "CELFBR", argLength: 1, reg: gpfp, asm: "CELFBR", clobberFlags: true}, // convert uint32 to float32
+ {name: "CDLFBR", argLength: 1, reg: gpfp, asm: "CDLFBR", clobberFlags: true}, // convert uint32 to float64
+ {name: "CELGBR", argLength: 1, reg: gpfp, asm: "CELGBR", clobberFlags: true}, // convert uint64 to float32
+ {name: "CDLGBR", argLength: 1, reg: gpfp, asm: "CDLGBR", clobberFlags: true}, // convert uint64 to float64
+
+ {name: "LEDBR", argLength: 1, reg: fp11, asm: "LEDBR"}, // convert float64 to float32
+ {name: "LDEBR", argLength: 1, reg: fp11, asm: "LDEBR"}, // convert float32 to float64
+
+ {name: "MOVDaddr", argLength: 1, reg: addr, aux: "SymOff", rematerializeable: true, symEffect: "Read"}, // arg0 + auxint + offset encoded in aux
+ {name: "MOVDaddridx", argLength: 2, reg: addridx, aux: "SymOff", symEffect: "Read"}, // arg0 + arg1 + auxint + aux
+
+ // auxint+aux == add auxint and the offset of the symbol in aux (if any) to the effective address
+ {name: "MOVBZload", argLength: 2, reg: gpload, asm: "MOVBZ", aux: "SymOff", typ: "UInt8", faultOnNilArg0: true, symEffect: "Read"}, // load byte from arg0+auxint+aux. arg1=mem. Zero extend.
+ {name: "MOVBload", argLength: 2, reg: gpload, asm: "MOVB", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, // ditto, sign extend to int64
+ {name: "MOVHZload", argLength: 2, reg: gpload, asm: "MOVHZ", aux: "SymOff", typ: "UInt16", faultOnNilArg0: true, symEffect: "Read"}, // load 2 bytes from arg0+auxint+aux. arg1=mem. Zero extend.
+ {name: "MOVHload", argLength: 2, reg: gpload, asm: "MOVH", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, // ditto, sign extend to int64
+ {name: "MOVWZload", argLength: 2, reg: gpload, asm: "MOVWZ", aux: "SymOff", typ: "UInt32", faultOnNilArg0: true, symEffect: "Read"}, // load 4 bytes from arg0+auxint+aux. arg1=mem. Zero extend.
+ {name: "MOVWload", argLength: 2, reg: gpload, asm: "MOVW", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"}, // ditto, sign extend to int64
+ {name: "MOVDload", argLength: 2, reg: gpload, asm: "MOVD", aux: "SymOff", typ: "UInt64", faultOnNilArg0: true, symEffect: "Read"}, // load 8 bytes from arg0+auxint+aux. arg1=mem
+
+ {name: "MOVWBR", argLength: 1, reg: gp11, asm: "MOVWBR"}, // arg0 swap bytes
+ {name: "MOVDBR", argLength: 1, reg: gp11, asm: "MOVDBR"}, // arg0 swap bytes
+
+ {name: "MOVHBRload", argLength: 2, reg: gpload, asm: "MOVHBR", aux: "SymOff", typ: "UInt16", faultOnNilArg0: true, symEffect: "Read"}, // load 2 bytes from arg0+auxint+aux. arg1=mem. Reverse bytes.
+ {name: "MOVWBRload", argLength: 2, reg: gpload, asm: "MOVWBR", aux: "SymOff", typ: "UInt32", faultOnNilArg0: true, symEffect: "Read"}, // load 4 bytes from arg0+auxint+aux. arg1=mem. Reverse bytes.
+ {name: "MOVDBRload", argLength: 2, reg: gpload, asm: "MOVDBR", aux: "SymOff", typ: "UInt64", faultOnNilArg0: true, symEffect: "Read"}, // load 8 bytes from arg0+auxint+aux. arg1=mem. Reverse bytes.
+
+ {name: "MOVBstore", argLength: 3, reg: gpstore, asm: "MOVB", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store byte in arg1 to arg0+auxint+aux. arg2=mem
+ {name: "MOVHstore", argLength: 3, reg: gpstore, asm: "MOVH", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 2 bytes in arg1 to arg0+auxint+aux. arg2=mem
+ {name: "MOVWstore", argLength: 3, reg: gpstore, asm: "MOVW", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 4 bytes in arg1 to arg0+auxint+aux. arg2=mem
+ {name: "MOVDstore", argLength: 3, reg: gpstore, asm: "MOVD", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 8 bytes in arg1 to arg0+auxint+aux. arg2=mem
+ {name: "MOVHBRstore", argLength: 3, reg: gpstorebr, asm: "MOVHBR", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 2 bytes in arg1 to arg0+auxint+aux. arg2=mem. Reverse bytes.
+ {name: "MOVWBRstore", argLength: 3, reg: gpstorebr, asm: "MOVWBR", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 4 bytes in arg1 to arg0+auxint+aux. arg2=mem. Reverse bytes.
+ {name: "MOVDBRstore", argLength: 3, reg: gpstorebr, asm: "MOVDBR", aux: "SymOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 8 bytes in arg1 to arg0+auxint+aux. arg2=mem. Reverse bytes.
+
+ {name: "MVC", argLength: 3, reg: gpmvc, asm: "MVC", aux: "SymValAndOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, faultOnNilArg1: true, symEffect: "None"}, // arg0=destptr, arg1=srcptr, arg2=mem, auxint=size,off
+
+ // indexed loads/stores
+ {name: "MOVBZloadidx", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVBZ", aux: "SymOff", typ: "UInt8", symEffect: "Read"}, // load a byte from arg0+arg1+auxint+aux. arg2=mem. Zero extend.
+ {name: "MOVBloadidx", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVB", aux: "SymOff", typ: "Int8", symEffect: "Read"}, // load a byte from arg0+arg1+auxint+aux. arg2=mem. Sign extend.
+ {name: "MOVHZloadidx", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVHZ", aux: "SymOff", typ: "UInt16", symEffect: "Read"}, // load 2 bytes from arg0+arg1+auxint+aux. arg2=mem. Zero extend.
+ {name: "MOVHloadidx", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVH", aux: "SymOff", typ: "Int16", symEffect: "Read"}, // load 2 bytes from arg0+arg1+auxint+aux. arg2=mem. Sign extend.
+ {name: "MOVWZloadidx", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVWZ", aux: "SymOff", typ: "UInt32", symEffect: "Read"}, // load 4 bytes from arg0+arg1+auxint+aux. arg2=mem. Zero extend.
+ {name: "MOVWloadidx", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVW", aux: "SymOff", typ: "Int32", symEffect: "Read"}, // load 4 bytes from arg0+arg1+auxint+aux. arg2=mem. Sign extend.
+ {name: "MOVDloadidx", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVD", aux: "SymOff", typ: "UInt64", symEffect: "Read"}, // load 8 bytes from arg0+arg1+auxint+aux. arg2=mem
+ {name: "MOVHBRloadidx", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVHBR", aux: "SymOff", typ: "Int16", symEffect: "Read"}, // load 2 bytes from arg0+arg1+auxint+aux. arg2=mem. Reverse bytes.
+ {name: "MOVWBRloadidx", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVWBR", aux: "SymOff", typ: "Int32", symEffect: "Read"}, // load 4 bytes from arg0+arg1+auxint+aux. arg2=mem. Reverse bytes.
+ {name: "MOVDBRloadidx", argLength: 3, reg: gploadidx, commutative: true, asm: "MOVDBR", aux: "SymOff", typ: "Int64", symEffect: "Read"}, // load 8 bytes from arg0+arg1+auxint+aux. arg2=mem. Reverse bytes.
+ {name: "MOVBstoreidx", argLength: 4, reg: gpstoreidx, commutative: true, asm: "MOVB", aux: "SymOff", symEffect: "Write"}, // store byte in arg2 to arg0+arg1+auxint+aux. arg3=mem
+ {name: "MOVHstoreidx", argLength: 4, reg: gpstoreidx, commutative: true, asm: "MOVH", aux: "SymOff", symEffect: "Write"}, // store 2 bytes in arg2 to arg0+arg1+auxint+aux. arg3=mem
+ {name: "MOVWstoreidx", argLength: 4, reg: gpstoreidx, commutative: true, asm: "MOVW", aux: "SymOff", symEffect: "Write"}, // store 4 bytes in arg2 to arg0+arg1+auxint+aux. arg3=mem
+ {name: "MOVDstoreidx", argLength: 4, reg: gpstoreidx, commutative: true, asm: "MOVD", aux: "SymOff", symEffect: "Write"}, // store 8 bytes in arg2 to arg0+arg1+auxint+aux. arg3=mem
+ {name: "MOVHBRstoreidx", argLength: 4, reg: gpstoreidx, commutative: true, asm: "MOVHBR", aux: "SymOff", symEffect: "Write"}, // store 2 bytes in arg2 to arg0+arg1+auxint+aux. arg3=mem. Reverse bytes.
+ {name: "MOVWBRstoreidx", argLength: 4, reg: gpstoreidx, commutative: true, asm: "MOVWBR", aux: "SymOff", symEffect: "Write"}, // store 4 bytes in arg2 to arg0+arg1+auxint+aux. arg3=mem. Reverse bytes.
+ {name: "MOVDBRstoreidx", argLength: 4, reg: gpstoreidx, commutative: true, asm: "MOVDBR", aux: "SymOff", symEffect: "Write"}, // store 8 bytes in arg2 to arg0+arg1+auxint+aux. arg3=mem. Reverse bytes.
+
+ // For storeconst ops, the AuxInt field encodes both
+ // the value to store and an address offset of the store.
+ // Cast AuxInt to a ValAndOff to extract Val and Off fields.
+ {name: "MOVBstoreconst", argLength: 2, reg: gpstoreconst, asm: "MOVB", aux: "SymValAndOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store low byte of ValAndOff(AuxInt).Val() to arg0+ValAndOff(AuxInt).Off()+aux. arg1=mem
+ {name: "MOVHstoreconst", argLength: 2, reg: gpstoreconst, asm: "MOVH", aux: "SymValAndOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store low 2 bytes of ...
+ {name: "MOVWstoreconst", argLength: 2, reg: gpstoreconst, asm: "MOVW", aux: "SymValAndOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store low 4 bytes of ...
+ {name: "MOVDstoreconst", argLength: 2, reg: gpstoreconst, asm: "MOVD", aux: "SymValAndOff", typ: "Mem", faultOnNilArg0: true, symEffect: "Write"}, // store 8 bytes of ...
+
+ {name: "CLEAR", argLength: 2, reg: regInfo{inputs: []regMask{ptr, 0}}, asm: "CLEAR", aux: "SymValAndOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, symEffect: "Write"},
+
+ {name: "CALLstatic", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem
+ {name: "CALLtail", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true, tailCall: true}, // tail call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem
+ {name: "CALLclosure", argLength: 3, reg: regInfo{inputs: []regMask{ptrsp, buildReg("R12"), 0}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem
+ {name: "CALLinter", argLength: 2, reg: regInfo{inputs: []regMask{ptr}, clobbers: callerSave}, aux: "CallOff", clobberFlags: true, call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem
+
+ // (InvertFlags (CMP a b)) == (CMP b a)
+ // InvertFlags is a pseudo-op which can't appear in assembly output.
+ {name: "InvertFlags", argLength: 1}, // reverse direction of arg0
+
+ // Pseudo-ops
+ {name: "LoweredGetG", argLength: 1, reg: gp01}, // arg0=mem
+ // Scheduler ensures LoweredGetClosurePtr occurs only in entry block,
+ // and sorts it to the very beginning of the block to prevent other
+ // use of R12 (the closure pointer)
+ {name: "LoweredGetClosurePtr", reg: regInfo{outputs: []regMask{buildReg("R12")}}, zeroWidth: true},
+ // arg0=ptr,arg1=mem, returns void. Faults if ptr is nil.
+ // LoweredGetCallerSP returns the SP of the caller of the current function.
+ {name: "LoweredGetCallerSP", reg: gp01, rematerializeable: true},
+ // LoweredGetCallerPC evaluates to the PC to which its "caller" will return.
+ // I.e., if f calls g "calls" getcallerpc,
+ // the result should be the PC within f that g will return to.
+ // See runtime/stubs.go for a more detailed discussion.
+ {name: "LoweredGetCallerPC", reg: gp01, rematerializeable: true},
+ {name: "LoweredNilCheck", argLength: 2, reg: regInfo{inputs: []regMask{ptrsp}}, clobberFlags: true, nilCheck: true, faultOnNilArg0: true},
+ // Round ops to block fused-multiply-add extraction.
+ {name: "LoweredRound32F", argLength: 1, reg: fp11, resultInArg0: true, zeroWidth: true},
+ {name: "LoweredRound64F", argLength: 1, reg: fp11, resultInArg0: true, zeroWidth: true},
+
+ // LoweredWB invokes runtime.gcWriteBarrier. arg0=destptr, arg1=srcptr, arg2=mem, aux=runtime.gcWriteBarrier
+ // It saves all GP registers if necessary,
+ // but clobbers R14 (LR) because it's a call,
+ // and also clobbers R1 as the PLT stub does.
+ {name: "LoweredWB", argLength: 3, reg: regInfo{inputs: []regMask{buildReg("R2"), buildReg("R3")}, clobbers: (callerSave &^ gpg) | buildReg("R14") | r1}, clobberFlags: true, aux: "Sym", symEffect: "None"},
+
+ // There are three of these functions so that they can have three different register inputs.
+ // When we check 0 <= c <= cap (A), then 0 <= b <= c (B), then 0 <= a <= b (C), we want the
+ // default registers to match so we don't need to copy registers around unnecessarily.
+ {name: "LoweredPanicBoundsA", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r2, r3}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in generic.go).
+ {name: "LoweredPanicBoundsB", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r1, r2}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in generic.go).
+ {name: "LoweredPanicBoundsC", argLength: 3, aux: "Int64", reg: regInfo{inputs: []regMask{r0, r1}}, typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory. AuxInt contains report code (see PanicBounds in generic.go).
+
+ // Constant condition code values. The condition code can be 0, 1, 2 or 3.
+ {name: "FlagEQ"}, // CC=0 (equal)
+ {name: "FlagLT"}, // CC=1 (less than)
+ {name: "FlagGT"}, // CC=2 (greater than)
+ {name: "FlagOV"}, // CC=3 (overflow)
+
+ // Fast-BCR-serialization to ensure store-load ordering.
+ {name: "SYNC", argLength: 1, reg: sync, asm: "SYNC", typ: "Mem"},
+
+ // Atomic loads. These are just normal loads but return <value,memory> tuples
+ // so they can be properly ordered with other loads.
+ // load from arg0+auxint+aux. arg1=mem.
+ {name: "MOVBZatomicload", argLength: 2, reg: gpload, asm: "MOVBZ", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"},
+ {name: "MOVWZatomicload", argLength: 2, reg: gpload, asm: "MOVWZ", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"},
+ {name: "MOVDatomicload", argLength: 2, reg: gpload, asm: "MOVD", aux: "SymOff", faultOnNilArg0: true, symEffect: "Read"},
+
+ // Atomic stores. These are just normal stores.
+ // store arg1 to arg0+auxint+aux. arg2=mem.
+ {name: "MOVBatomicstore", argLength: 3, reg: gpstore, asm: "MOVB", aux: "SymOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true, symEffect: "Write"},
+ {name: "MOVWatomicstore", argLength: 3, reg: gpstore, asm: "MOVW", aux: "SymOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true, symEffect: "Write"},
+ {name: "MOVDatomicstore", argLength: 3, reg: gpstore, asm: "MOVD", aux: "SymOff", typ: "Mem", clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true, symEffect: "Write"},
+
+ // Atomic adds.
+ // *(arg0+auxint+aux) += arg1. arg2=mem.
+ // Returns a tuple of <old contents of *(arg0+auxint+aux), memory>.
+ {name: "LAA", argLength: 3, reg: gpstorelaa, asm: "LAA", typ: "(UInt32,Mem)", aux: "SymOff", clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true, symEffect: "RdWr"},
+ {name: "LAAG", argLength: 3, reg: gpstorelaa, asm: "LAAG", typ: "(UInt64,Mem)", aux: "SymOff", clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true, symEffect: "RdWr"},
+ {name: "AddTupleFirst32", argLength: 2}, // arg1=tuple <x,y>. Returns <x+arg0,y>.
+ {name: "AddTupleFirst64", argLength: 2}, // arg1=tuple <x,y>. Returns <x+arg0,y>.
+
+ // Atomic bitwise operations.
+ // Note: 'floor' operations round the pointer down to the nearest word boundary
+ // which reflects how they are used in the runtime.
+ {name: "LAN", argLength: 3, reg: gpstore, asm: "LAN", typ: "Mem", clobberFlags: true, hasSideEffects: true}, // *arg0 &= arg1. arg2 = mem.
+ {name: "LANfloor", argLength: 3, reg: gpstorelab, asm: "LAN", typ: "Mem", clobberFlags: true, hasSideEffects: true}, // *(floor(arg0, 4)) &= arg1. arg2 = mem.
+ {name: "LAO", argLength: 3, reg: gpstore, asm: "LAO", typ: "Mem", clobberFlags: true, hasSideEffects: true}, // *arg0 |= arg1. arg2 = mem.
+ {name: "LAOfloor", argLength: 3, reg: gpstorelab, asm: "LAO", typ: "Mem", clobberFlags: true, hasSideEffects: true}, // *(floor(arg0, 4)) |= arg1. arg2 = mem.
+
+ // Compare and swap.
+ // arg0 = pointer, arg1 = old value, arg2 = new value, arg3 = memory.
+ // if *(arg0+auxint+aux) == arg1 {
+ // *(arg0+auxint+aux) = arg2
+ // return (true, memory)
+ // } else {
+ // return (false, memory)
+ // }
+ // Note that these instructions also return the old value in arg1, but we ignore it.
+ // TODO: have these return flags instead of bool. The current system generates:
+ // CS ...
+ // MOVD $0, ret
+ // BNE 2(PC)
+ // MOVD $1, ret
+ // CMPW ret, $0
+ // BNE ...
+ // instead of just
+ // CS ...
+ // BEQ ...
+ // but we can't do that because memory-using ops can't generate flags yet
+ // (flagalloc wants to move flag-generating instructions around).
+ {name: "LoweredAtomicCas32", argLength: 4, reg: cas, asm: "CS", aux: "SymOff", clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true, symEffect: "RdWr"},
+ {name: "LoweredAtomicCas64", argLength: 4, reg: cas, asm: "CSG", aux: "SymOff", clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true, symEffect: "RdWr"},
+
+ // Lowered atomic swaps, emulated using compare-and-swap.
+ // store arg1 to arg0+auxint+aux, arg2=mem.
+ {name: "LoweredAtomicExchange32", argLength: 3, reg: exchange, asm: "CS", aux: "SymOff", clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true, symEffect: "RdWr"},
+ {name: "LoweredAtomicExchange64", argLength: 3, reg: exchange, asm: "CSG", aux: "SymOff", clobberFlags: true, faultOnNilArg0: true, hasSideEffects: true, symEffect: "RdWr"},
+
+ // find leftmost one
+ {
+ name: "FLOGR",
+ argLength: 1,
+ reg: regInfo{inputs: gponly, outputs: []regMask{buildReg("R0")}, clobbers: buildReg("R1")},
+ asm: "FLOGR",
+ typ: "UInt64",
+ clobberFlags: true,
+ },
+
+ // population count
+ //
+ // Counts the number of ones in each byte of arg0
+ // and places the result into the corresponding byte
+ // of the result.
+ {
+ name: "POPCNT",
+ argLength: 1,
+ reg: gp11,
+ asm: "POPCNT",
+ typ: "UInt64",
+ clobberFlags: true,
+ },
+
+ // unsigned multiplication (64x64 → 128)
+ //
+ // Multiply the two 64-bit input operands together and place the 128-bit result into
+ // an even-odd register pair. The second register in the target pair also contains
+ // one of the input operands. Since we don't currently have a way to specify an
+ // even-odd register pair we hardcode this register pair as R2:R3.
+ {
+ name: "MLGR",
+ argLength: 2,
+ reg: regInfo{inputs: []regMask{gp, r3}, outputs: []regMask{r2, r3}},
+ asm: "MLGR",
+ },
+
+ // pseudo operations to sum the output of the POPCNT instruction
+ {name: "SumBytes2", argLength: 1, typ: "UInt8"}, // sum the rightmost 2 bytes in arg0 ignoring overflow
+ {name: "SumBytes4", argLength: 1, typ: "UInt8"}, // sum the rightmost 4 bytes in arg0 ignoring overflow
+ {name: "SumBytes8", argLength: 1, typ: "UInt8"}, // sum all the bytes in arg0 ignoring overflow
+
+ // store multiple
+ {
+ name: "STMG2",
+ argLength: 4,
+ reg: regInfo{inputs: []regMask{ptrsp, buildReg("R1"), buildReg("R2"), 0}},
+ aux: "SymOff",
+ typ: "Mem",
+ asm: "STMG",
+ faultOnNilArg0: true,
+ symEffect: "Write",
+ clobberFlags: true, // TODO(mundaym): currently uses AGFI to handle large offsets
+ },
+ {
+ name: "STMG3",
+ argLength: 5,
+ reg: regInfo{inputs: []regMask{ptrsp, buildReg("R1"), buildReg("R2"), buildReg("R3"), 0}},
+ aux: "SymOff",
+ typ: "Mem",
+ asm: "STMG",
+ faultOnNilArg0: true,
+ symEffect: "Write",
+ clobberFlags: true, // TODO(mundaym): currently uses AGFI to handle large offsets
+ },
+ {
+ name: "STMG4",
+ argLength: 6,
+ reg: regInfo{inputs: []regMask{
+ ptrsp,
+ buildReg("R1"),
+ buildReg("R2"),
+ buildReg("R3"),
+ buildReg("R4"),
+ 0,
+ }},
+ aux: "SymOff",
+ typ: "Mem",
+ asm: "STMG",
+ faultOnNilArg0: true,
+ symEffect: "Write",
+ clobberFlags: true, // TODO(mundaym): currently uses AGFI to handle large offsets
+ },
+ {
+ name: "STM2",
+ argLength: 4,
+ reg: regInfo{inputs: []regMask{ptrsp, buildReg("R1"), buildReg("R2"), 0}},
+ aux: "SymOff",
+ typ: "Mem",
+ asm: "STMY",
+ faultOnNilArg0: true,
+ symEffect: "Write",
+ clobberFlags: true, // TODO(mundaym): currently uses AGFI to handle large offsets
+ },
+ {
+ name: "STM3",
+ argLength: 5,
+ reg: regInfo{inputs: []regMask{ptrsp, buildReg("R1"), buildReg("R2"), buildReg("R3"), 0}},
+ aux: "SymOff",
+ typ: "Mem",
+ asm: "STMY",
+ faultOnNilArg0: true,
+ symEffect: "Write",
+ clobberFlags: true, // TODO(mundaym): currently uses AGFI to handle large offsets
+ },
+ {
+ name: "STM4",
+ argLength: 6,
+ reg: regInfo{inputs: []regMask{
+ ptrsp,
+ buildReg("R1"),
+ buildReg("R2"),
+ buildReg("R3"),
+ buildReg("R4"),
+ 0,
+ }},
+ aux: "SymOff",
+ typ: "Mem",
+ asm: "STMY",
+ faultOnNilArg0: true,
+ symEffect: "Write",
+ clobberFlags: true, // TODO(mundaym): currently uses AGFI to handle large offsets
+ },
+
+ // large move
+ // auxint = remaining bytes after loop (rem)
+ // arg0 = address of dst memory (in R1, changed as a side effect)
+ // arg1 = address of src memory (in R2, changed as a side effect)
+ // arg2 = pointer to last address to move in loop + 256
+ // arg3 = mem
+ // returns mem
+ //
+ // mvc: MVC $256, 0(R2), 0(R1)
+ // MOVD $256(R1), R1
+ // MOVD $256(R2), R2
+ // CMP R2, Rarg2
+ // BNE mvc
+ // MVC $rem, 0(R2), 0(R1) // if rem > 0
+ {
+ name: "LoweredMove",
+ aux: "Int64",
+ argLength: 4,
+ reg: regInfo{
+ inputs: []regMask{buildReg("R1"), buildReg("R2"), gpsp},
+ clobbers: buildReg("R1 R2"),
+ },
+ clobberFlags: true,
+ typ: "Mem",
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ },
+
+ // large clear
+ // auxint = remaining bytes after loop (rem)
+ // arg0 = address of dst memory (in R1, changed as a side effect)
+ // arg1 = pointer to last address to zero in loop + 256
+ // arg2 = mem
+ // returns mem
+ //
+ // clear: CLEAR $256, 0(R1)
+ // MOVD $256(R1), R1
+ // CMP R1, Rarg2
+ // BNE clear
+ // CLEAR $rem, 0(R1) // if rem > 0
+ {
+ name: "LoweredZero",
+ aux: "Int64",
+ argLength: 3,
+ reg: regInfo{
+ inputs: []regMask{buildReg("R1"), gpsp},
+ clobbers: buildReg("R1"),
+ },
+ clobberFlags: true,
+ typ: "Mem",
+ faultOnNilArg0: true,
+ },
+ }
+
+ // All blocks on s390x have their condition code mask (s390x.CCMask) as the Aux value.
+ // The condition code mask is a 4-bit mask where each bit corresponds to a condition
+ // code value. If the value of the condition code matches a bit set in the condition
+ // code mask then the first successor is executed. Otherwise the second successor is
+ // executed.
+ //
+ // | condition code value | mask bit |
+ // +----------------------+------------+
+ // | 0 (equal) | 0b1000 (8) |
+ // | 1 (less than) | 0b0100 (4) |
+ // | 2 (greater than) | 0b0010 (2) |
+ // | 3 (unordered) | 0b0001 (1) |
+ //
+ // Note: that compare-and-branch instructions must not have bit 3 (0b0001) set.
+ var S390Xblocks = []blockData{
+ // branch on condition
+ {name: "BRC", controls: 1, aux: "S390XCCMask"}, // condition code value (flags) is Controls[0]
+
+ // compare-and-branch (register-register)
+ // - integrates comparison of Controls[0] with Controls[1]
+ // - both control values must be in general purpose registers
+ {name: "CRJ", controls: 2, aux: "S390XCCMask"}, // signed 32-bit integer comparison
+ {name: "CGRJ", controls: 2, aux: "S390XCCMask"}, // signed 64-bit integer comparison
+ {name: "CLRJ", controls: 2, aux: "S390XCCMask"}, // unsigned 32-bit integer comparison
+ {name: "CLGRJ", controls: 2, aux: "S390XCCMask"}, // unsigned 64-bit integer comparison
+
+ // compare-and-branch (register-immediate)
+ // - integrates comparison of Controls[0] with AuxInt
+ // - control value must be in a general purpose register
+ // - the AuxInt value is sign-extended for signed comparisons
+ // and zero-extended for unsigned comparisons
+ {name: "CIJ", controls: 1, aux: "S390XCCMaskInt8"}, // signed 32-bit integer comparison
+ {name: "CGIJ", controls: 1, aux: "S390XCCMaskInt8"}, // signed 64-bit integer comparison
+ {name: "CLIJ", controls: 1, aux: "S390XCCMaskUint8"}, // unsigned 32-bit integer comparison
+ {name: "CLGIJ", controls: 1, aux: "S390XCCMaskUint8"}, // unsigned 64-bit integer comparison
+ }
+
+ archs = append(archs, arch{
+ name: "S390X",
+ pkg: "cmd/internal/obj/s390x",
+ genfile: "../../s390x/ssa.go",
+ ops: S390Xops,
+ blocks: S390Xblocks,
+ regnames: regNamesS390X,
+ gpregmask: gp,
+ fpregmask: fp,
+ framepointerreg: -1, // not used
+ linkreg: int8(num["R14"]),
+ imports: []string{
+ "cmd/internal/obj/s390x",
+ },
+ })
+}
diff --git a/src/cmd/compile/internal/ssa/gen/Wasm.rules b/src/cmd/compile/internal/ssa/gen/Wasm.rules
new file mode 100644
index 0000000..9e683b1
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/gen/Wasm.rules
@@ -0,0 +1,411 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Lowering arithmetic
+(Add(64|32|16|8|Ptr) ...) => (I64Add ...)
+(Add(64|32)F ...) => (F(64|32)Add ...)
+
+(Sub(64|32|16|8|Ptr) ...) => (I64Sub ...)
+(Sub(64|32)F ...) => (F(64|32)Sub ...)
+
+(Mul(64|32|16|8) ...) => (I64Mul ...)
+(Mul(64|32)F ...) => (F(64|32)Mul ...)
+
+(Div64 [false] x y) => (I64DivS x y)
+(Div32 [false] x y) => (I64DivS (SignExt32to64 x) (SignExt32to64 y))
+(Div16 [false] x y) => (I64DivS (SignExt16to64 x) (SignExt16to64 y))
+(Div8 x y) => (I64DivS (SignExt8to64 x) (SignExt8to64 y))
+(Div64u ...) => (I64DivU ...)
+(Div32u x y) => (I64DivU (ZeroExt32to64 x) (ZeroExt32to64 y))
+(Div16u x y) => (I64DivU (ZeroExt16to64 x) (ZeroExt16to64 y))
+(Div8u x y) => (I64DivU (ZeroExt8to64 x) (ZeroExt8to64 y))
+(Div(64|32)F ...) => (F(64|32)Div ...)
+
+(Mod64 [false] x y) => (I64RemS x y)
+(Mod32 [false] x y) => (I64RemS (SignExt32to64 x) (SignExt32to64 y))
+(Mod16 [false] x y) => (I64RemS (SignExt16to64 x) (SignExt16to64 y))
+(Mod8 x y) => (I64RemS (SignExt8to64 x) (SignExt8to64 y))
+(Mod64u ...) => (I64RemU ...)
+(Mod32u x y) => (I64RemU (ZeroExt32to64 x) (ZeroExt32to64 y))
+(Mod16u x y) => (I64RemU (ZeroExt16to64 x) (ZeroExt16to64 y))
+(Mod8u x y) => (I64RemU (ZeroExt8to64 x) (ZeroExt8to64 y))
+
+(And(64|32|16|8|B) ...) => (I64And ...)
+
+(Or(64|32|16|8|B) ...) => (I64Or ...)
+
+(Xor(64|32|16|8) ...) => (I64Xor ...)
+
+(Neg(64|32|16|8) x) => (I64Sub (I64Const [0]) x)
+(Neg(64|32)F ...) => (F(64|32)Neg ...)
+
+(Com(64|32|16|8) x) => (I64Xor x (I64Const [-1]))
+
+(Not ...) => (I64Eqz ...)
+
+// Lowering pointer arithmetic
+(OffPtr ...) => (I64AddConst ...)
+
+// Lowering extension
+// It is unnecessary to extend loads
+(SignExt32to64 x:(I64Load32S _ _)) => x
+(SignExt16to(64|32) x:(I64Load16S _ _)) => x
+(SignExt8to(64|32|16) x:(I64Load8S _ _)) => x
+(ZeroExt32to64 x:(I64Load32U _ _)) => x
+(ZeroExt16to(64|32) x:(I64Load16U _ _)) => x
+(ZeroExt8to(64|32|16) x:(I64Load8U _ _)) => x
+(SignExt32to64 x) && buildcfg.GOWASM.SignExt => (I64Extend32S x)
+(SignExt8to(64|32|16) x) && buildcfg.GOWASM.SignExt => (I64Extend8S x)
+(SignExt16to(64|32) x) && buildcfg.GOWASM.SignExt => (I64Extend16S x)
+(SignExt32to64 x) => (I64ShrS (I64Shl x (I64Const [32])) (I64Const [32]))
+(SignExt16to(64|32) x) => (I64ShrS (I64Shl x (I64Const [48])) (I64Const [48]))
+(SignExt8to(64|32|16) x) => (I64ShrS (I64Shl x (I64Const [56])) (I64Const [56]))
+(ZeroExt32to64 x) => (I64And x (I64Const [0xffffffff]))
+(ZeroExt16to(64|32) x) => (I64And x (I64Const [0xffff]))
+(ZeroExt8to(64|32|16) x) => (I64And x (I64Const [0xff]))
+
+(Slicemask x) => (I64ShrS (I64Sub (I64Const [0]) x) (I64Const [63]))
+
+// Lowering truncation
+// Because we ignore the high parts, truncates are just copies.
+(Trunc64to(32|16|8) ...) => (Copy ...)
+(Trunc32to(16|8) ...) => (Copy ...)
+(Trunc16to8 ...) => (Copy ...)
+
+// Lowering float <=> int
+(Cvt32to(64|32)F x) => (F(64|32)ConvertI64S (SignExt32to64 x))
+(Cvt64to(64|32)F ...) => (F(64|32)ConvertI64S ...)
+(Cvt32Uto(64|32)F x) => (F(64|32)ConvertI64U (ZeroExt32to64 x))
+(Cvt64Uto(64|32)F ...) => (F(64|32)ConvertI64U ...)
+
+(Cvt32Fto32 ...) => (I64TruncSatF32S ...)
+(Cvt32Fto64 ...) => (I64TruncSatF32S ...)
+(Cvt64Fto32 ...) => (I64TruncSatF64S ...)
+(Cvt64Fto64 ...) => (I64TruncSatF64S ...)
+(Cvt32Fto32U ...) => (I64TruncSatF32U ...)
+(Cvt32Fto64U ...) => (I64TruncSatF32U ...)
+(Cvt64Fto32U ...) => (I64TruncSatF64U ...)
+(Cvt64Fto64U ...) => (I64TruncSatF64U ...)
+
+(Cvt32Fto64F ...) => (F64PromoteF32 ...)
+(Cvt64Fto32F ...) => (F32DemoteF64 ...)
+
+(CvtBoolToUint8 ...) => (Copy ...)
+
+(Round32F ...) => (Copy ...)
+(Round64F ...) => (Copy ...)
+
+// Lowering shifts
+// Unsigned shifts need to return 0 if shift amount is >= width of shifted value.
+
+(Lsh64x64 x y) && shiftIsBounded(v) => (I64Shl x y)
+(Lsh64x64 x (I64Const [c])) && uint64(c) < 64 => (I64Shl x (I64Const [c]))
+(Lsh64x64 x (I64Const [c])) && uint64(c) >= 64 => (I64Const [0])
+(Lsh64x64 x y) => (Select (I64Shl x y) (I64Const [0]) (I64LtU y (I64Const [64])))
+(Lsh64x(32|16|8) [c] x y) => (Lsh64x64 [c] x (ZeroExt(32|16|8)to64 y))
+
+(Lsh32x64 ...) => (Lsh64x64 ...)
+(Lsh32x(32|16|8) [c] x y) => (Lsh64x64 [c] x (ZeroExt(32|16|8)to64 y))
+
+(Lsh16x64 ...) => (Lsh64x64 ...)
+(Lsh16x(32|16|8) [c] x y) => (Lsh64x64 [c] x (ZeroExt(32|16|8)to64 y))
+
+(Lsh8x64 ...) => (Lsh64x64 ...)
+(Lsh8x(32|16|8) [c] x y) => (Lsh64x64 [c] x (ZeroExt(32|16|8)to64 y))
+
+(Rsh64Ux64 x y) && shiftIsBounded(v) => (I64ShrU x y)
+(Rsh64Ux64 x (I64Const [c])) && uint64(c) < 64 => (I64ShrU x (I64Const [c]))
+(Rsh64Ux64 x (I64Const [c])) && uint64(c) >= 64 => (I64Const [0])
+(Rsh64Ux64 x y) => (Select (I64ShrU x y) (I64Const [0]) (I64LtU y (I64Const [64])))
+(Rsh64Ux(32|16|8) [c] x y) => (Rsh64Ux64 [c] x (ZeroExt(32|16|8)to64 y))
+
+(Rsh32Ux64 [c] x y) => (Rsh64Ux64 [c] (ZeroExt32to64 x) y)
+(Rsh32Ux(32|16|8) [c] x y) => (Rsh64Ux64 [c] (ZeroExt32to64 x) (ZeroExt(32|16|8)to64 y))
+
+(Rsh16Ux64 [c] x y) => (Rsh64Ux64 [c] (ZeroExt16to64 x) y)
+(Rsh16Ux(32|16|8) [c] x y) => (Rsh64Ux64 [c] (ZeroExt16to64 x) (ZeroExt(32|16|8)to64 y))
+
+(Rsh8Ux64 [c] x y) => (Rsh64Ux64 [c] (ZeroExt8to64 x) y)
+(Rsh8Ux(32|16|8) [c] x y) => (Rsh64Ux64 [c] (ZeroExt8to64 x) (ZeroExt(32|16|8)to64 y))
+
+// Signed right shift needs to return 0/-1 if shift amount is >= width of shifted value.
+// We implement this by setting the shift value to (width - 1) if the shift value is >= width.
+
+(Rsh64x64 x y) && shiftIsBounded(v) => (I64ShrS x y)
+(Rsh64x64 x (I64Const [c])) && uint64(c) < 64 => (I64ShrS x (I64Const [c]))
+(Rsh64x64 x (I64Const [c])) && uint64(c) >= 64 => (I64ShrS x (I64Const [63]))
+(Rsh64x64 x y) => (I64ShrS x (Select <typ.Int64> y (I64Const [63]) (I64LtU y (I64Const [64]))))
+(Rsh64x(32|16|8) [c] x y) => (Rsh64x64 [c] x (ZeroExt(32|16|8)to64 y))
+
+(Rsh32x64 [c] x y) => (Rsh64x64 [c] (SignExt32to64 x) y)
+(Rsh32x(32|16|8) [c] x y) => (Rsh64x64 [c] (SignExt32to64 x) (ZeroExt(32|16|8)to64 y))
+
+(Rsh16x64 [c] x y) => (Rsh64x64 [c] (SignExt16to64 x) y)
+(Rsh16x(32|16|8) [c] x y) => (Rsh64x64 [c] (SignExt16to64 x) (ZeroExt(32|16|8)to64 y))
+
+(Rsh8x64 [c] x y) => (Rsh64x64 [c] (SignExt8to64 x) y)
+(Rsh8x(32|16|8) [c] x y) => (Rsh64x64 [c] (SignExt8to64 x) (ZeroExt(32|16|8)to64 y))
+
+// Lowering rotates
+(RotateLeft8 <t> x (I64Const [c])) => (Or8 (Lsh8x64 <t> x (I64Const [c&7])) (Rsh8Ux64 <t> x (I64Const [-c&7])))
+(RotateLeft16 <t> x (I64Const [c])) => (Or16 (Lsh16x64 <t> x (I64Const [c&15])) (Rsh16Ux64 <t> x (I64Const [-c&15])))
+(RotateLeft32 ...) => (I32Rotl ...)
+(RotateLeft64 ...) => (I64Rotl ...)
+
+// Lowering comparisons
+(Less64 ...) => (I64LtS ...)
+(Less32 x y) => (I64LtS (SignExt32to64 x) (SignExt32to64 y))
+(Less16 x y) => (I64LtS (SignExt16to64 x) (SignExt16to64 y))
+(Less8 x y) => (I64LtS (SignExt8to64 x) (SignExt8to64 y))
+(Less64U ...) => (I64LtU ...)
+(Less32U x y) => (I64LtU (ZeroExt32to64 x) (ZeroExt32to64 y))
+(Less16U x y) => (I64LtU (ZeroExt16to64 x) (ZeroExt16to64 y))
+(Less8U x y) => (I64LtU (ZeroExt8to64 x) (ZeroExt8to64 y))
+(Less(64|32)F ...) => (F(64|32)Lt ...)
+
+(Leq64 ...) => (I64LeS ...)
+(Leq32 x y) => (I64LeS (SignExt32to64 x) (SignExt32to64 y))
+(Leq16 x y) => (I64LeS (SignExt16to64 x) (SignExt16to64 y))
+(Leq8 x y) => (I64LeS (SignExt8to64 x) (SignExt8to64 y))
+(Leq64U ...) => (I64LeU ...)
+(Leq32U x y) => (I64LeU (ZeroExt32to64 x) (ZeroExt32to64 y))
+(Leq16U x y) => (I64LeU (ZeroExt16to64 x) (ZeroExt16to64 y))
+(Leq8U x y) => (I64LeU (ZeroExt8to64 x) (ZeroExt8to64 y))
+(Leq(64|32)F ...) => (F(64|32)Le ...)
+
+(Eq64 ...) => (I64Eq ...)
+(Eq32 x y) => (I64Eq (ZeroExt32to64 x) (ZeroExt32to64 y))
+(Eq16 x y) => (I64Eq (ZeroExt16to64 x) (ZeroExt16to64 y))
+(Eq8 x y) => (I64Eq (ZeroExt8to64 x) (ZeroExt8to64 y))
+(EqB ...) => (I64Eq ...)
+(EqPtr ...) => (I64Eq ...)
+(Eq(64|32)F ...) => (F(64|32)Eq ...)
+
+(Neq64 ...) => (I64Ne ...)
+(Neq32 x y) => (I64Ne (ZeroExt32to64 x) (ZeroExt32to64 y))
+(Neq16 x y) => (I64Ne (ZeroExt16to64 x) (ZeroExt16to64 y))
+(Neq8 x y) => (I64Ne (ZeroExt8to64 x) (ZeroExt8to64 y))
+(NeqB ...) => (I64Ne ...)
+(NeqPtr ...) => (I64Ne ...)
+(Neq(64|32)F ...) => (F(64|32)Ne ...)
+
+// Lowering loads
+(Load <t> ptr mem) && is32BitFloat(t) => (F32Load ptr mem)
+(Load <t> ptr mem) && is64BitFloat(t) => (F64Load ptr mem)
+(Load <t> ptr mem) && t.Size() == 8 => (I64Load ptr mem)
+(Load <t> ptr mem) && t.Size() == 4 && !t.IsSigned() => (I64Load32U ptr mem)
+(Load <t> ptr mem) && t.Size() == 4 && t.IsSigned() => (I64Load32S ptr mem)
+(Load <t> ptr mem) && t.Size() == 2 && !t.IsSigned() => (I64Load16U ptr mem)
+(Load <t> ptr mem) && t.Size() == 2 && t.IsSigned() => (I64Load16S ptr mem)
+(Load <t> ptr mem) && t.Size() == 1 && !t.IsSigned() => (I64Load8U ptr mem)
+(Load <t> ptr mem) && t.Size() == 1 && t.IsSigned() => (I64Load8S ptr mem)
+
+// Lowering stores
+(Store {t} ptr val mem) && is64BitFloat(t) => (F64Store ptr val mem)
+(Store {t} ptr val mem) && is32BitFloat(t) => (F32Store ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 8 => (I64Store ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 4 => (I64Store32 ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 2 => (I64Store16 ptr val mem)
+(Store {t} ptr val mem) && t.Size() == 1 => (I64Store8 ptr val mem)
+
+// Lowering moves
+(Move [0] _ _ mem) => mem
+(Move [1] dst src mem) => (I64Store8 dst (I64Load8U src mem) mem)
+(Move [2] dst src mem) => (I64Store16 dst (I64Load16U src mem) mem)
+(Move [4] dst src mem) => (I64Store32 dst (I64Load32U src mem) mem)
+(Move [8] dst src mem) => (I64Store dst (I64Load src mem) mem)
+(Move [16] dst src mem) =>
+ (I64Store [8] dst (I64Load [8] src mem)
+ (I64Store dst (I64Load src mem) mem))
+(Move [3] dst src mem) =>
+ (I64Store8 [2] dst (I64Load8U [2] src mem)
+ (I64Store16 dst (I64Load16U src mem) mem))
+(Move [5] dst src mem) =>
+ (I64Store8 [4] dst (I64Load8U [4] src mem)
+ (I64Store32 dst (I64Load32U src mem) mem))
+(Move [6] dst src mem) =>
+ (I64Store16 [4] dst (I64Load16U [4] src mem)
+ (I64Store32 dst (I64Load32U src mem) mem))
+(Move [7] dst src mem) =>
+ (I64Store32 [3] dst (I64Load32U [3] src mem)
+ (I64Store32 dst (I64Load32U src mem) mem))
+(Move [s] dst src mem) && s > 8 && s < 16 =>
+ (I64Store [s-8] dst (I64Load [s-8] src mem)
+ (I64Store dst (I64Load src mem) mem))
+
+// Adjust moves to be a multiple of 16 bytes.
+(Move [s] dst src mem)
+ && s > 16 && s%16 != 0 && s%16 <= 8 =>
+ (Move [s-s%16]
+ (OffPtr <dst.Type> dst [s%16])
+ (OffPtr <src.Type> src [s%16])
+ (I64Store dst (I64Load src mem) mem))
+(Move [s] dst src mem)
+ && s > 16 && s%16 != 0 && s%16 > 8 =>
+ (Move [s-s%16]
+ (OffPtr <dst.Type> dst [s%16])
+ (OffPtr <src.Type> src [s%16])
+ (I64Store [8] dst (I64Load [8] src mem)
+ (I64Store dst (I64Load src mem) mem)))
+
+// Large copying uses helper.
+(Move [s] dst src mem) && s%8 == 0 && logLargeCopy(v, s) =>
+ (LoweredMove [s/8] dst src mem)
+
+// Lowering Zero instructions
+(Zero [0] _ mem) => mem
+(Zero [1] destptr mem) => (I64Store8 destptr (I64Const [0]) mem)
+(Zero [2] destptr mem) => (I64Store16 destptr (I64Const [0]) mem)
+(Zero [4] destptr mem) => (I64Store32 destptr (I64Const [0]) mem)
+(Zero [8] destptr mem) => (I64Store destptr (I64Const [0]) mem)
+
+(Zero [3] destptr mem) =>
+ (I64Store8 [2] destptr (I64Const [0])
+ (I64Store16 destptr (I64Const [0]) mem))
+(Zero [5] destptr mem) =>
+ (I64Store8 [4] destptr (I64Const [0])
+ (I64Store32 destptr (I64Const [0]) mem))
+(Zero [6] destptr mem) =>
+ (I64Store16 [4] destptr (I64Const [0])
+ (I64Store32 destptr (I64Const [0]) mem))
+(Zero [7] destptr mem) =>
+ (I64Store32 [3] destptr (I64Const [0])
+ (I64Store32 destptr (I64Const [0]) mem))
+
+// Strip off any fractional word zeroing.
+(Zero [s] destptr mem) && s%8 != 0 && s > 8 =>
+ (Zero [s-s%8] (OffPtr <destptr.Type> destptr [s%8])
+ (I64Store destptr (I64Const [0]) mem))
+
+// Zero small numbers of words directly.
+(Zero [16] destptr mem) =>
+ (I64Store [8] destptr (I64Const [0])
+ (I64Store destptr (I64Const [0]) mem))
+(Zero [24] destptr mem) =>
+ (I64Store [16] destptr (I64Const [0])
+ (I64Store [8] destptr (I64Const [0])
+ (I64Store destptr (I64Const [0]) mem)))
+(Zero [32] destptr mem) =>
+ (I64Store [24] destptr (I64Const [0])
+ (I64Store [16] destptr (I64Const [0])
+ (I64Store [8] destptr (I64Const [0])
+ (I64Store destptr (I64Const [0]) mem))))
+
+// Large zeroing uses helper.
+(Zero [s] destptr mem) && s%8 == 0 && s > 32 =>
+ (LoweredZero [s/8] destptr mem)
+
+// Lowering constants
+(Const64 ...) => (I64Const ...)
+(Const(32|16|8) [c]) => (I64Const [int64(c)])
+(Const(64|32)F ...) => (F(64|32)Const ...)
+(ConstNil) => (I64Const [0])
+(ConstBool [c]) => (I64Const [b2i(c)])
+
+// Lowering calls
+(StaticCall ...) => (LoweredStaticCall ...)
+(ClosureCall ...) => (LoweredClosureCall ...)
+(InterCall ...) => (LoweredInterCall ...)
+(TailCall ...) => (LoweredTailCall ...)
+
+// Miscellaneous
+(Convert ...) => (LoweredConvert ...)
+(IsNonNil p) => (I64Eqz (I64Eqz p))
+(IsInBounds ...) => (I64LtU ...)
+(IsSliceInBounds ...) => (I64LeU ...)
+(NilCheck ...) => (LoweredNilCheck ...)
+(GetClosurePtr ...) => (LoweredGetClosurePtr ...)
+(GetCallerPC ...) => (LoweredGetCallerPC ...)
+(GetCallerSP ...) => (LoweredGetCallerSP ...)
+(Addr {sym} base) => (LoweredAddr {sym} [0] base)
+(LocalAddr {sym} base _) => (LoweredAddr {sym} base)
+
+// Write barrier.
+(WB ...) => (LoweredWB ...)
+
+// --- Intrinsics ---
+(Sqrt ...) => (F64Sqrt ...)
+(Trunc ...) => (F64Trunc ...)
+(Ceil ...) => (F64Ceil ...)
+(Floor ...) => (F64Floor ...)
+(RoundToEven ...) => (F64Nearest ...)
+(Abs ...) => (F64Abs ...)
+(Copysign ...) => (F64Copysign ...)
+
+(Sqrt32 ...) => (F32Sqrt ...)
+
+(Ctz64 ...) => (I64Ctz ...)
+(Ctz32 x) => (I64Ctz (I64Or x (I64Const [0x100000000])))
+(Ctz16 x) => (I64Ctz (I64Or x (I64Const [0x10000])))
+(Ctz8 x) => (I64Ctz (I64Or x (I64Const [0x100])))
+
+(Ctz(64|32|16|8)NonZero ...) => (I64Ctz ...)
+
+(BitLen64 x) => (I64Sub (I64Const [64]) (I64Clz x))
+
+(PopCount64 ...) => (I64Popcnt ...)
+(PopCount32 x) => (I64Popcnt (ZeroExt32to64 x))
+(PopCount16 x) => (I64Popcnt (ZeroExt16to64 x))
+(PopCount8 x) => (I64Popcnt (ZeroExt8to64 x))
+
+(CondSelect ...) => (Select ...)
+
+// --- Optimizations ---
+(I64Add (I64Const [x]) (I64Const [y])) => (I64Const [x + y])
+(I64Mul (I64Const [x]) (I64Const [y])) => (I64Const [x * y])
+(I64And (I64Const [x]) (I64Const [y])) => (I64Const [x & y])
+(I64Or (I64Const [x]) (I64Const [y])) => (I64Const [x | y])
+(I64Xor (I64Const [x]) (I64Const [y])) => (I64Const [x ^ y])
+(F64Add (F64Const [x]) (F64Const [y])) => (F64Const [x + y])
+(F64Mul (F64Const [x]) (F64Const [y])) && !math.IsNaN(x * y) => (F64Const [x * y])
+(I64Eq (I64Const [x]) (I64Const [y])) && x == y => (I64Const [1])
+(I64Eq (I64Const [x]) (I64Const [y])) && x != y => (I64Const [0])
+(I64Ne (I64Const [x]) (I64Const [y])) && x == y => (I64Const [0])
+(I64Ne (I64Const [x]) (I64Const [y])) && x != y => (I64Const [1])
+
+(I64Shl (I64Const [x]) (I64Const [y])) => (I64Const [x << uint64(y)])
+(I64ShrU (I64Const [x]) (I64Const [y])) => (I64Const [int64(uint64(x) >> uint64(y))])
+(I64ShrS (I64Const [x]) (I64Const [y])) => (I64Const [x >> uint64(y)])
+
+// TODO: declare these operations as commutative and get rid of these rules?
+(I64Add (I64Const [x]) y) && y.Op != OpWasmI64Const => (I64Add y (I64Const [x]))
+(I64Mul (I64Const [x]) y) && y.Op != OpWasmI64Const => (I64Mul y (I64Const [x]))
+(I64And (I64Const [x]) y) && y.Op != OpWasmI64Const => (I64And y (I64Const [x]))
+(I64Or (I64Const [x]) y) && y.Op != OpWasmI64Const => (I64Or y (I64Const [x]))
+(I64Xor (I64Const [x]) y) && y.Op != OpWasmI64Const => (I64Xor y (I64Const [x]))
+(F64Add (F64Const [x]) y) && y.Op != OpWasmF64Const => (F64Add y (F64Const [x]))
+(F64Mul (F64Const [x]) y) && y.Op != OpWasmF64Const => (F64Mul y (F64Const [x]))
+(I64Eq (I64Const [x]) y) && y.Op != OpWasmI64Const => (I64Eq y (I64Const [x]))
+(I64Ne (I64Const [x]) y) && y.Op != OpWasmI64Const => (I64Ne y (I64Const [x]))
+
+(I64Eq x (I64Const [0])) => (I64Eqz x)
+(I64LtU (I64Const [0]) x) => (I64Eqz (I64Eqz x))
+(I64LeU x (I64Const [0])) => (I64Eqz x)
+(I64LtU x (I64Const [1])) => (I64Eqz x)
+(I64LeU (I64Const [1]) x) => (I64Eqz (I64Eqz x))
+(I64Ne x (I64Const [0])) => (I64Eqz (I64Eqz x))
+
+(I64Add x (I64Const [y])) => (I64AddConst [y] x)
+(I64AddConst [0] x) => x
+(I64Eqz (I64Eqz (I64Eqz x))) => (I64Eqz x)
+
+// folding offset into load/store
+((I64Load|I64Load32U|I64Load32S|I64Load16U|I64Load16S|I64Load8U|I64Load8S) [off] (I64AddConst [off2] ptr) mem)
+ && isU32Bit(off+off2) =>
+ ((I64Load|I64Load32U|I64Load32S|I64Load16U|I64Load16S|I64Load8U|I64Load8S) [off+off2] ptr mem)
+
+((I64Store|I64Store32|I64Store16|I64Store8) [off] (I64AddConst [off2] ptr) val mem)
+ && isU32Bit(off+off2) =>
+ ((I64Store|I64Store32|I64Store16|I64Store8) [off+off2] ptr val mem)
+
+// folding offset into address
+(I64AddConst [off] (LoweredAddr {sym} [off2] base)) && isU32Bit(off+int64(off2)) =>
+ (LoweredAddr {sym} [int32(off)+off2] base)
+(I64AddConst [off] x:(SP)) && isU32Bit(off) => (LoweredAddr [int32(off)] x) // so it is rematerializeable
+
+// transforming readonly globals into constants
+(I64Load [off] (LoweredAddr {sym} [off2] (SB)) _) && symIsRO(sym) && isU32Bit(off+int64(off2)) => (I64Const [int64(read64(sym, off+int64(off2), config.ctxt.Arch.ByteOrder))])
+(I64Load32U [off] (LoweredAddr {sym} [off2] (SB)) _) && symIsRO(sym) && isU32Bit(off+int64(off2)) => (I64Const [int64(read32(sym, off+int64(off2), config.ctxt.Arch.ByteOrder))])
+(I64Load16U [off] (LoweredAddr {sym} [off2] (SB)) _) && symIsRO(sym) && isU32Bit(off+int64(off2)) => (I64Const [int64(read16(sym, off+int64(off2), config.ctxt.Arch.ByteOrder))])
+(I64Load8U [off] (LoweredAddr {sym} [off2] (SB)) _) && symIsRO(sym) && isU32Bit(off+int64(off2)) => (I64Const [int64(read8(sym, off+int64(off2)))])
diff --git a/src/cmd/compile/internal/ssa/gen/WasmOps.go b/src/cmd/compile/internal/ssa/gen/WasmOps.go
new file mode 100644
index 0000000..edfba4e
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/gen/WasmOps.go
@@ -0,0 +1,280 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build ignore
+// +build ignore
+
+package main
+
+import "strings"
+
+var regNamesWasm = []string{
+ "R0",
+ "R1",
+ "R2",
+ "R3",
+ "R4",
+ "R5",
+ "R6",
+ "R7",
+ "R8",
+ "R9",
+ "R10",
+ "R11",
+ "R12",
+ "R13",
+ "R14",
+ "R15",
+
+ "F0",
+ "F1",
+ "F2",
+ "F3",
+ "F4",
+ "F5",
+ "F6",
+ "F7",
+ "F8",
+ "F9",
+ "F10",
+ "F11",
+ "F12",
+ "F13",
+ "F14",
+ "F15",
+
+ "F16",
+ "F17",
+ "F18",
+ "F19",
+ "F20",
+ "F21",
+ "F22",
+ "F23",
+ "F24",
+ "F25",
+ "F26",
+ "F27",
+ "F28",
+ "F29",
+ "F30",
+ "F31",
+
+ "SP",
+ "g",
+
+ // pseudo-registers
+ "SB",
+}
+
+func init() {
+ // Make map from reg names to reg integers.
+ if len(regNamesWasm) > 64 {
+ panic("too many registers")
+ }
+ num := map[string]int{}
+ for i, name := range regNamesWasm {
+ num[name] = i
+ }
+ buildReg := func(s string) regMask {
+ m := regMask(0)
+ for _, r := range strings.Split(s, " ") {
+ if n, ok := num[r]; ok {
+ m |= regMask(1) << uint(n)
+ continue
+ }
+ panic("register " + r + " not found")
+ }
+ return m
+ }
+
+ var (
+ gp = buildReg("R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15")
+ fp32 = buildReg("F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15")
+ fp64 = buildReg("F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31")
+ gpsp = gp | buildReg("SP")
+ gpspsb = gpsp | buildReg("SB")
+ // The "registers", which are actually local variables, can get clobbered
+ // if we're switching goroutines, because it unwinds the WebAssembly stack.
+ callerSave = gp | fp32 | fp64 | buildReg("g")
+ )
+
+ // Common regInfo
+ var (
+ gp01 = regInfo{inputs: nil, outputs: []regMask{gp}}
+ gp11 = regInfo{inputs: []regMask{gpsp}, outputs: []regMask{gp}}
+ gp21 = regInfo{inputs: []regMask{gpsp, gpsp}, outputs: []regMask{gp}}
+ gp31 = regInfo{inputs: []regMask{gpsp, gpsp, gpsp}, outputs: []regMask{gp}}
+ fp32_01 = regInfo{inputs: nil, outputs: []regMask{fp32}}
+ fp32_11 = regInfo{inputs: []regMask{fp32}, outputs: []regMask{fp32}}
+ fp32_21 = regInfo{inputs: []regMask{fp32, fp32}, outputs: []regMask{fp32}}
+ fp32_21gp = regInfo{inputs: []regMask{fp32, fp32}, outputs: []regMask{gp}}
+ fp64_01 = regInfo{inputs: nil, outputs: []regMask{fp64}}
+ fp64_11 = regInfo{inputs: []regMask{fp64}, outputs: []regMask{fp64}}
+ fp64_21 = regInfo{inputs: []regMask{fp64, fp64}, outputs: []regMask{fp64}}
+ fp64_21gp = regInfo{inputs: []regMask{fp64, fp64}, outputs: []regMask{gp}}
+ gpload = regInfo{inputs: []regMask{gpspsb, 0}, outputs: []regMask{gp}}
+ gpstore = regInfo{inputs: []regMask{gpspsb, gpsp, 0}}
+ fp32load = regInfo{inputs: []regMask{gpspsb, 0}, outputs: []regMask{fp32}}
+ fp32store = regInfo{inputs: []regMask{gpspsb, fp32, 0}}
+ fp64load = regInfo{inputs: []regMask{gpspsb, 0}, outputs: []regMask{fp64}}
+ fp64store = regInfo{inputs: []regMask{gpspsb, fp64, 0}}
+ )
+
+ var WasmOps = []opData{
+ {name: "LoweredStaticCall", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", call: true}, // call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem
+ {name: "LoweredTailCall", argLength: 1, reg: regInfo{clobbers: callerSave}, aux: "CallOff", call: true, tailCall: true}, // tail call static function aux.(*obj.LSym). arg0=mem, auxint=argsize, returns mem
+ {name: "LoweredClosureCall", argLength: 3, reg: regInfo{inputs: []regMask{gp, gp, 0}, clobbers: callerSave}, aux: "CallOff", call: true}, // call function via closure. arg0=codeptr, arg1=closure, arg2=mem, auxint=argsize, returns mem
+ {name: "LoweredInterCall", argLength: 2, reg: regInfo{inputs: []regMask{gp}, clobbers: callerSave}, aux: "CallOff", call: true}, // call fn by pointer. arg0=codeptr, arg1=mem, auxint=argsize, returns mem
+
+ {name: "LoweredAddr", argLength: 1, reg: gp11, aux: "SymOff", rematerializeable: true, symEffect: "Addr"}, // returns base+aux+auxint, arg0=base
+ {name: "LoweredMove", argLength: 3, reg: regInfo{inputs: []regMask{gp, gp}}, aux: "Int64"}, // large move. arg0=dst, arg1=src, arg2=mem, auxint=len/8, returns mem
+ {name: "LoweredZero", argLength: 2, reg: regInfo{inputs: []regMask{gp}}, aux: "Int64"}, // large zeroing. arg0=start, arg1=mem, auxint=len/8, returns mem
+
+ {name: "LoweredGetClosurePtr", reg: gp01}, // returns wasm.REG_CTXT, the closure pointer
+ {name: "LoweredGetCallerPC", reg: gp01, rematerializeable: true}, // returns the PC of the caller of the current function
+ {name: "LoweredGetCallerSP", reg: gp01, rematerializeable: true}, // returns the SP of the caller of the current function
+ {name: "LoweredNilCheck", argLength: 2, reg: regInfo{inputs: []regMask{gp}}, nilCheck: true, faultOnNilArg0: true}, // panic if arg0 is nil. arg1=mem
+ {name: "LoweredWB", argLength: 3, reg: regInfo{inputs: []regMask{gp, gp}}, aux: "Sym", symEffect: "None"}, // invokes runtime.gcWriteBarrier. arg0=destptr, arg1=srcptr, arg2=mem, aux=runtime.gcWriteBarrier
+
+ // LoweredConvert converts between pointers and integers.
+ // We have a special op for this so as to not confuse GCCallOff
+ // (particularly stack maps). It takes a memory arg so it
+ // gets correctly ordered with respect to GC safepoints.
+ // arg0=ptr/int arg1=mem, output=int/ptr
+ //
+ // TODO(neelance): LoweredConvert should not be necessary any more, since OpConvert does not need to be lowered any more (CL 108496).
+ {name: "LoweredConvert", argLength: 2, reg: regInfo{inputs: []regMask{gp}, outputs: []regMask{gp}}},
+
+ // The following are native WebAssembly instructions, see https://webassembly.github.io/spec/core/syntax/instructions.html
+
+ {name: "Select", asm: "Select", argLength: 3, reg: gp31}, // returns arg0 if arg2 != 0, otherwise returns arg1
+
+ {name: "I64Load8U", asm: "I64Load8U", argLength: 2, reg: gpload, aux: "Int64", typ: "UInt8"}, // read unsigned 8-bit integer from address arg0+aux, arg1=mem
+ {name: "I64Load8S", asm: "I64Load8S", argLength: 2, reg: gpload, aux: "Int64", typ: "Int8"}, // read signed 8-bit integer from address arg0+aux, arg1=mem
+ {name: "I64Load16U", asm: "I64Load16U", argLength: 2, reg: gpload, aux: "Int64", typ: "UInt16"}, // read unsigned 16-bit integer from address arg0+aux, arg1=mem
+ {name: "I64Load16S", asm: "I64Load16S", argLength: 2, reg: gpload, aux: "Int64", typ: "Int16"}, // read signed 16-bit integer from address arg0+aux, arg1=mem
+ {name: "I64Load32U", asm: "I64Load32U", argLength: 2, reg: gpload, aux: "Int64", typ: "UInt32"}, // read unsigned 32-bit integer from address arg0+aux, arg1=mem
+ {name: "I64Load32S", asm: "I64Load32S", argLength: 2, reg: gpload, aux: "Int64", typ: "Int32"}, // read signed 32-bit integer from address arg0+aux, arg1=mem
+ {name: "I64Load", asm: "I64Load", argLength: 2, reg: gpload, aux: "Int64", typ: "UInt64"}, // read 64-bit integer from address arg0+aux, arg1=mem
+ {name: "I64Store8", asm: "I64Store8", argLength: 3, reg: gpstore, aux: "Int64", typ: "Mem"}, // store 8-bit integer arg1 at address arg0+aux, arg2=mem, returns mem
+ {name: "I64Store16", asm: "I64Store16", argLength: 3, reg: gpstore, aux: "Int64", typ: "Mem"}, // store 16-bit integer arg1 at address arg0+aux, arg2=mem, returns mem
+ {name: "I64Store32", asm: "I64Store32", argLength: 3, reg: gpstore, aux: "Int64", typ: "Mem"}, // store 32-bit integer arg1 at address arg0+aux, arg2=mem, returns mem
+ {name: "I64Store", asm: "I64Store", argLength: 3, reg: gpstore, aux: "Int64", typ: "Mem"}, // store 64-bit integer arg1 at address arg0+aux, arg2=mem, returns mem
+
+ {name: "F32Load", asm: "F32Load", argLength: 2, reg: fp32load, aux: "Int64", typ: "Float32"}, // read 32-bit float from address arg0+aux, arg1=mem
+ {name: "F64Load", asm: "F64Load", argLength: 2, reg: fp64load, aux: "Int64", typ: "Float64"}, // read 64-bit float from address arg0+aux, arg1=mem
+ {name: "F32Store", asm: "F32Store", argLength: 3, reg: fp32store, aux: "Int64", typ: "Mem"}, // store 32-bit float arg1 at address arg0+aux, arg2=mem, returns mem
+ {name: "F64Store", asm: "F64Store", argLength: 3, reg: fp64store, aux: "Int64", typ: "Mem"}, // store 64-bit float arg1 at address arg0+aux, arg2=mem, returns mem
+
+ {name: "I64Const", reg: gp01, aux: "Int64", rematerializeable: true, typ: "Int64"}, // returns the constant integer aux
+ {name: "F32Const", reg: fp32_01, aux: "Float32", rematerializeable: true, typ: "Float32"}, // returns the constant float aux
+ {name: "F64Const", reg: fp64_01, aux: "Float64", rematerializeable: true, typ: "Float64"}, // returns the constant float aux
+
+ {name: "I64Eqz", asm: "I64Eqz", argLength: 1, reg: gp11, typ: "Bool"}, // arg0 == 0
+ {name: "I64Eq", asm: "I64Eq", argLength: 2, reg: gp21, typ: "Bool"}, // arg0 == arg1
+ {name: "I64Ne", asm: "I64Ne", argLength: 2, reg: gp21, typ: "Bool"}, // arg0 != arg1
+ {name: "I64LtS", asm: "I64LtS", argLength: 2, reg: gp21, typ: "Bool"}, // arg0 < arg1 (signed)
+ {name: "I64LtU", asm: "I64LtU", argLength: 2, reg: gp21, typ: "Bool"}, // arg0 < arg1 (unsigned)
+ {name: "I64GtS", asm: "I64GtS", argLength: 2, reg: gp21, typ: "Bool"}, // arg0 > arg1 (signed)
+ {name: "I64GtU", asm: "I64GtU", argLength: 2, reg: gp21, typ: "Bool"}, // arg0 > arg1 (unsigned)
+ {name: "I64LeS", asm: "I64LeS", argLength: 2, reg: gp21, typ: "Bool"}, // arg0 <= arg1 (signed)
+ {name: "I64LeU", asm: "I64LeU", argLength: 2, reg: gp21, typ: "Bool"}, // arg0 <= arg1 (unsigned)
+ {name: "I64GeS", asm: "I64GeS", argLength: 2, reg: gp21, typ: "Bool"}, // arg0 >= arg1 (signed)
+ {name: "I64GeU", asm: "I64GeU", argLength: 2, reg: gp21, typ: "Bool"}, // arg0 >= arg1 (unsigned)
+
+ {name: "F32Eq", asm: "F32Eq", argLength: 2, reg: fp32_21gp, typ: "Bool"}, // arg0 == arg1
+ {name: "F32Ne", asm: "F32Ne", argLength: 2, reg: fp32_21gp, typ: "Bool"}, // arg0 != arg1
+ {name: "F32Lt", asm: "F32Lt", argLength: 2, reg: fp32_21gp, typ: "Bool"}, // arg0 < arg1
+ {name: "F32Gt", asm: "F32Gt", argLength: 2, reg: fp32_21gp, typ: "Bool"}, // arg0 > arg1
+ {name: "F32Le", asm: "F32Le", argLength: 2, reg: fp32_21gp, typ: "Bool"}, // arg0 <= arg1
+ {name: "F32Ge", asm: "F32Ge", argLength: 2, reg: fp32_21gp, typ: "Bool"}, // arg0 >= arg1
+
+ {name: "F64Eq", asm: "F64Eq", argLength: 2, reg: fp64_21gp, typ: "Bool"}, // arg0 == arg1
+ {name: "F64Ne", asm: "F64Ne", argLength: 2, reg: fp64_21gp, typ: "Bool"}, // arg0 != arg1
+ {name: "F64Lt", asm: "F64Lt", argLength: 2, reg: fp64_21gp, typ: "Bool"}, // arg0 < arg1
+ {name: "F64Gt", asm: "F64Gt", argLength: 2, reg: fp64_21gp, typ: "Bool"}, // arg0 > arg1
+ {name: "F64Le", asm: "F64Le", argLength: 2, reg: fp64_21gp, typ: "Bool"}, // arg0 <= arg1
+ {name: "F64Ge", asm: "F64Ge", argLength: 2, reg: fp64_21gp, typ: "Bool"}, // arg0 >= arg1
+
+ {name: "I64Add", asm: "I64Add", argLength: 2, reg: gp21, typ: "Int64"}, // arg0 + arg1
+ {name: "I64AddConst", asm: "I64Add", argLength: 1, reg: gp11, aux: "Int64", typ: "Int64"}, // arg0 + aux
+ {name: "I64Sub", asm: "I64Sub", argLength: 2, reg: gp21, typ: "Int64"}, // arg0 - arg1
+ {name: "I64Mul", asm: "I64Mul", argLength: 2, reg: gp21, typ: "Int64"}, // arg0 * arg1
+ {name: "I64DivS", asm: "I64DivS", argLength: 2, reg: gp21, typ: "Int64"}, // arg0 / arg1 (signed)
+ {name: "I64DivU", asm: "I64DivU", argLength: 2, reg: gp21, typ: "Int64"}, // arg0 / arg1 (unsigned)
+ {name: "I64RemS", asm: "I64RemS", argLength: 2, reg: gp21, typ: "Int64"}, // arg0 % arg1 (signed)
+ {name: "I64RemU", asm: "I64RemU", argLength: 2, reg: gp21, typ: "Int64"}, // arg0 % arg1 (unsigned)
+ {name: "I64And", asm: "I64And", argLength: 2, reg: gp21, typ: "Int64"}, // arg0 & arg1
+ {name: "I64Or", asm: "I64Or", argLength: 2, reg: gp21, typ: "Int64"}, // arg0 | arg1
+ {name: "I64Xor", asm: "I64Xor", argLength: 2, reg: gp21, typ: "Int64"}, // arg0 ^ arg1
+ {name: "I64Shl", asm: "I64Shl", argLength: 2, reg: gp21, typ: "Int64"}, // arg0 << (arg1 % 64)
+ {name: "I64ShrS", asm: "I64ShrS", argLength: 2, reg: gp21, typ: "Int64"}, // arg0 >> (arg1 % 64) (signed)
+ {name: "I64ShrU", asm: "I64ShrU", argLength: 2, reg: gp21, typ: "Int64"}, // arg0 >> (arg1 % 64) (unsigned)
+
+ {name: "F32Neg", asm: "F32Neg", argLength: 1, reg: fp32_11, typ: "Float32"}, // -arg0
+ {name: "F32Add", asm: "F32Add", argLength: 2, reg: fp32_21, typ: "Float32"}, // arg0 + arg1
+ {name: "F32Sub", asm: "F32Sub", argLength: 2, reg: fp32_21, typ: "Float32"}, // arg0 - arg1
+ {name: "F32Mul", asm: "F32Mul", argLength: 2, reg: fp32_21, typ: "Float32"}, // arg0 * arg1
+ {name: "F32Div", asm: "F32Div", argLength: 2, reg: fp32_21, typ: "Float32"}, // arg0 / arg1
+
+ {name: "F64Neg", asm: "F64Neg", argLength: 1, reg: fp64_11, typ: "Float64"}, // -arg0
+ {name: "F64Add", asm: "F64Add", argLength: 2, reg: fp64_21, typ: "Float64"}, // arg0 + arg1
+ {name: "F64Sub", asm: "F64Sub", argLength: 2, reg: fp64_21, typ: "Float64"}, // arg0 - arg1
+ {name: "F64Mul", asm: "F64Mul", argLength: 2, reg: fp64_21, typ: "Float64"}, // arg0 * arg1
+ {name: "F64Div", asm: "F64Div", argLength: 2, reg: fp64_21, typ: "Float64"}, // arg0 / arg1
+
+ {name: "I64TruncSatF64S", asm: "I64TruncSatF64S", argLength: 1, reg: regInfo{inputs: []regMask{fp64}, outputs: []regMask{gp}}, typ: "Int64"}, // truncates the float arg0 to a signed integer (saturating)
+ {name: "I64TruncSatF64U", asm: "I64TruncSatF64U", argLength: 1, reg: regInfo{inputs: []regMask{fp64}, outputs: []regMask{gp}}, typ: "Int64"}, // truncates the float arg0 to an unsigned integer (saturating)
+ {name: "I64TruncSatF32S", asm: "I64TruncSatF32S", argLength: 1, reg: regInfo{inputs: []regMask{fp32}, outputs: []regMask{gp}}, typ: "Int64"}, // truncates the float arg0 to a signed integer (saturating)
+ {name: "I64TruncSatF32U", asm: "I64TruncSatF32U", argLength: 1, reg: regInfo{inputs: []regMask{fp32}, outputs: []regMask{gp}}, typ: "Int64"}, // truncates the float arg0 to an unsigned integer (saturating)
+ {name: "F32ConvertI64S", asm: "F32ConvertI64S", argLength: 1, reg: regInfo{inputs: []regMask{gp}, outputs: []regMask{fp32}}, typ: "Float32"}, // converts the signed integer arg0 to a float
+ {name: "F32ConvertI64U", asm: "F32ConvertI64U", argLength: 1, reg: regInfo{inputs: []regMask{gp}, outputs: []regMask{fp32}}, typ: "Float32"}, // converts the unsigned integer arg0 to a float
+ {name: "F64ConvertI64S", asm: "F64ConvertI64S", argLength: 1, reg: regInfo{inputs: []regMask{gp}, outputs: []regMask{fp64}}, typ: "Float64"}, // converts the signed integer arg0 to a float
+ {name: "F64ConvertI64U", asm: "F64ConvertI64U", argLength: 1, reg: regInfo{inputs: []regMask{gp}, outputs: []regMask{fp64}}, typ: "Float64"}, // converts the unsigned integer arg0 to a float
+ {name: "F32DemoteF64", asm: "F32DemoteF64", argLength: 1, reg: regInfo{inputs: []regMask{fp64}, outputs: []regMask{fp32}}, typ: "Float32"},
+ {name: "F64PromoteF32", asm: "F64PromoteF32", argLength: 1, reg: regInfo{inputs: []regMask{fp32}, outputs: []regMask{fp64}}, typ: "Float64"},
+
+ {name: "I64Extend8S", asm: "I64Extend8S", argLength: 1, reg: gp11, typ: "Int64"}, // sign-extend arg0 from 8 to 64 bit
+ {name: "I64Extend16S", asm: "I64Extend16S", argLength: 1, reg: gp11, typ: "Int64"}, // sign-extend arg0 from 16 to 64 bit
+ {name: "I64Extend32S", asm: "I64Extend32S", argLength: 1, reg: gp11, typ: "Int64"}, // sign-extend arg0 from 32 to 64 bit
+
+ {name: "F32Sqrt", asm: "F32Sqrt", argLength: 1, reg: fp32_11, typ: "Float32"}, // sqrt(arg0)
+ {name: "F32Trunc", asm: "F32Trunc", argLength: 1, reg: fp32_11, typ: "Float32"}, // trunc(arg0)
+ {name: "F32Ceil", asm: "F32Ceil", argLength: 1, reg: fp32_11, typ: "Float32"}, // ceil(arg0)
+ {name: "F32Floor", asm: "F32Floor", argLength: 1, reg: fp32_11, typ: "Float32"}, // floor(arg0)
+ {name: "F32Nearest", asm: "F32Nearest", argLength: 1, reg: fp32_11, typ: "Float32"}, // round(arg0)
+ {name: "F32Abs", asm: "F32Abs", argLength: 1, reg: fp32_11, typ: "Float32"}, // abs(arg0)
+ {name: "F32Copysign", asm: "F32Copysign", argLength: 2, reg: fp32_21, typ: "Float32"}, // copysign(arg0, arg1)
+
+ {name: "F64Sqrt", asm: "F64Sqrt", argLength: 1, reg: fp64_11, typ: "Float64"}, // sqrt(arg0)
+ {name: "F64Trunc", asm: "F64Trunc", argLength: 1, reg: fp64_11, typ: "Float64"}, // trunc(arg0)
+ {name: "F64Ceil", asm: "F64Ceil", argLength: 1, reg: fp64_11, typ: "Float64"}, // ceil(arg0)
+ {name: "F64Floor", asm: "F64Floor", argLength: 1, reg: fp64_11, typ: "Float64"}, // floor(arg0)
+ {name: "F64Nearest", asm: "F64Nearest", argLength: 1, reg: fp64_11, typ: "Float64"}, // round(arg0)
+ {name: "F64Abs", asm: "F64Abs", argLength: 1, reg: fp64_11, typ: "Float64"}, // abs(arg0)
+ {name: "F64Copysign", asm: "F64Copysign", argLength: 2, reg: fp64_21, typ: "Float64"}, // copysign(arg0, arg1)
+
+ {name: "I64Ctz", asm: "I64Ctz", argLength: 1, reg: gp11, typ: "Int64"}, // ctz(arg0)
+ {name: "I64Clz", asm: "I64Clz", argLength: 1, reg: gp11, typ: "Int64"}, // clz(arg0)
+ {name: "I32Rotl", asm: "I32Rotl", argLength: 2, reg: gp21, typ: "Int32"}, // rotl(arg0, arg1)
+ {name: "I64Rotl", asm: "I64Rotl", argLength: 2, reg: gp21, typ: "Int64"}, // rotl(arg0, arg1)
+ {name: "I64Popcnt", asm: "I64Popcnt", argLength: 1, reg: gp11, typ: "Int64"}, // popcnt(arg0)
+ }
+
+ archs = append(archs, arch{
+ name: "Wasm",
+ pkg: "cmd/internal/obj/wasm",
+ genfile: "../../wasm/ssa.go",
+ ops: WasmOps,
+ blocks: nil,
+ regnames: regNamesWasm,
+ gpregmask: gp,
+ fpregmask: fp32 | fp64,
+ fp32regmask: fp32,
+ fp64regmask: fp64,
+ framepointerreg: -1, // not used
+ linkreg: -1, // not used
+ })
+}
diff --git a/src/cmd/compile/internal/ssa/gen/cover.bash b/src/cmd/compile/internal/ssa/gen/cover.bash
new file mode 100755
index 0000000..6c860fc
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/gen/cover.bash
@@ -0,0 +1,26 @@
+#!/usr/bin/env bash
+# Copyright 2020 The Go Authors. All rights reserved.
+# Use of this source code is governed by a BSD-style
+# license that can be found in the LICENSE file.
+
+# A quick and dirty way to obtain code coverage from rulegen's main func. For
+# example:
+#
+# ./cover.bash && go tool cover -html=cover.out
+#
+# This script is needed to set up a temporary test file, so that we don't break
+# regular 'go run *.go' usage to run the generator.
+
+cat >main_test.go <<-EOF
+ // +build ignore
+
+ package main
+
+ import "testing"
+
+ func TestCoverage(t *testing.T) { main() }
+EOF
+
+go test -run='^TestCoverage$' -coverprofile=cover.out "$@" *.go
+
+rm -f main_test.go
diff --git a/src/cmd/compile/internal/ssa/gen/dec.rules b/src/cmd/compile/internal/ssa/gen/dec.rules
new file mode 100644
index 0000000..b194898
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/gen/dec.rules
@@ -0,0 +1,93 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains rules to decompose builtin compound types
+// (complex,string,slice,interface) into their constituent
+// types. These rules work together with the decomposeBuiltIn
+// pass which handles phis of these types.
+
+// complex ops
+(ComplexReal (ComplexMake real _ )) => real
+(ComplexImag (ComplexMake _ imag )) => imag
+
+(Load <t> ptr mem) && t.IsComplex() && t.Size() == 8 =>
+ (ComplexMake
+ (Load <typ.Float32> ptr mem)
+ (Load <typ.Float32>
+ (OffPtr <typ.Float32Ptr> [4] ptr)
+ mem)
+ )
+(Store {t} dst (ComplexMake real imag) mem) && t.Size() == 8 =>
+ (Store {typ.Float32}
+ (OffPtr <typ.Float32Ptr> [4] dst)
+ imag
+ (Store {typ.Float32} dst real mem))
+(Load <t> ptr mem) && t.IsComplex() && t.Size() == 16 =>
+ (ComplexMake
+ (Load <typ.Float64> ptr mem)
+ (Load <typ.Float64>
+ (OffPtr <typ.Float64Ptr> [8] ptr)
+ mem)
+ )
+(Store {t} dst (ComplexMake real imag) mem) && t.Size() == 16 =>
+ (Store {typ.Float64}
+ (OffPtr <typ.Float64Ptr> [8] dst)
+ imag
+ (Store {typ.Float64} dst real mem))
+
+// string ops
+(StringPtr (StringMake ptr _)) => ptr
+(StringLen (StringMake _ len)) => len
+
+(Load <t> ptr mem) && t.IsString() =>
+ (StringMake
+ (Load <typ.BytePtr> ptr mem)
+ (Load <typ.Int>
+ (OffPtr <typ.IntPtr> [config.PtrSize] ptr)
+ mem))
+(Store dst (StringMake ptr len) mem) =>
+ (Store {typ.Int}
+ (OffPtr <typ.IntPtr> [config.PtrSize] dst)
+ len
+ (Store {typ.BytePtr} dst ptr mem))
+
+// slice ops
+(SlicePtr (SliceMake ptr _ _ )) => ptr
+(SliceLen (SliceMake _ len _)) => len
+(SliceCap (SliceMake _ _ cap)) => cap
+(SlicePtrUnchecked (SliceMake ptr _ _ )) => ptr
+
+(Load <t> ptr mem) && t.IsSlice() =>
+ (SliceMake
+ (Load <t.Elem().PtrTo()> ptr mem)
+ (Load <typ.Int>
+ (OffPtr <typ.IntPtr> [config.PtrSize] ptr)
+ mem)
+ (Load <typ.Int>
+ (OffPtr <typ.IntPtr> [2*config.PtrSize] ptr)
+ mem))
+(Store {t} dst (SliceMake ptr len cap) mem) =>
+ (Store {typ.Int}
+ (OffPtr <typ.IntPtr> [2*config.PtrSize] dst)
+ cap
+ (Store {typ.Int}
+ (OffPtr <typ.IntPtr> [config.PtrSize] dst)
+ len
+ (Store {t.Elem().PtrTo()} dst ptr mem)))
+
+// interface ops
+(ITab (IMake itab _)) => itab
+(IData (IMake _ data)) => data
+
+(Load <t> ptr mem) && t.IsInterface() =>
+ (IMake
+ (Load <typ.Uintptr> ptr mem)
+ (Load <typ.BytePtr>
+ (OffPtr <typ.BytePtrPtr> [config.PtrSize] ptr)
+ mem))
+(Store dst (IMake itab data) mem) =>
+ (Store {typ.BytePtr}
+ (OffPtr <typ.BytePtrPtr> [config.PtrSize] dst)
+ data
+ (Store {typ.Uintptr} dst itab mem))
diff --git a/src/cmd/compile/internal/ssa/gen/dec64.rules b/src/cmd/compile/internal/ssa/gen/dec64.rules
new file mode 100644
index 0000000..b0f10d0
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/gen/dec64.rules
@@ -0,0 +1,396 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains rules to decompose [u]int64 types on 32-bit
+// architectures. These rules work together with the decomposeBuiltIn
+// pass which handles phis of these typ.
+
+(Int64Hi (Int64Make hi _)) => hi
+(Int64Lo (Int64Make _ lo)) => lo
+
+(Load <t> ptr mem) && is64BitInt(t) && !config.BigEndian && t.IsSigned() =>
+ (Int64Make
+ (Load <typ.Int32> (OffPtr <typ.Int32Ptr> [4] ptr) mem)
+ (Load <typ.UInt32> ptr mem))
+
+(Load <t> ptr mem) && is64BitInt(t) && !config.BigEndian && !t.IsSigned() =>
+ (Int64Make
+ (Load <typ.UInt32> (OffPtr <typ.UInt32Ptr> [4] ptr) mem)
+ (Load <typ.UInt32> ptr mem))
+
+(Load <t> ptr mem) && is64BitInt(t) && config.BigEndian && t.IsSigned() =>
+ (Int64Make
+ (Load <typ.Int32> ptr mem)
+ (Load <typ.UInt32> (OffPtr <typ.UInt32Ptr> [4] ptr) mem))
+
+(Load <t> ptr mem) && is64BitInt(t) && config.BigEndian && !t.IsSigned() =>
+ (Int64Make
+ (Load <typ.UInt32> ptr mem)
+ (Load <typ.UInt32> (OffPtr <typ.UInt32Ptr> [4] ptr) mem))
+
+(Store {t} dst (Int64Make hi lo) mem) && t.Size() == 8 && !config.BigEndian =>
+ (Store {hi.Type}
+ (OffPtr <hi.Type.PtrTo()> [4] dst)
+ hi
+ (Store {lo.Type} dst lo mem))
+
+(Store {t} dst (Int64Make hi lo) mem) && t.Size() == 8 && config.BigEndian =>
+ (Store {lo.Type}
+ (OffPtr <lo.Type.PtrTo()> [4] dst)
+ lo
+ (Store {hi.Type} dst hi mem))
+
+// These are not enabled during decomposeBuiltin if late call expansion, but they are always enabled for softFloat
+(Arg {n} [off]) && is64BitInt(v.Type) && !config.BigEndian && v.Type.IsSigned() && !(b.Func.pass.name == "decompose builtin") =>
+ (Int64Make
+ (Arg <typ.Int32> {n} [off+4])
+ (Arg <typ.UInt32> {n} [off]))
+(Arg {n} [off]) && is64BitInt(v.Type) && !config.BigEndian && !v.Type.IsSigned() && !(b.Func.pass.name == "decompose builtin") =>
+ (Int64Make
+ (Arg <typ.UInt32> {n} [off+4])
+ (Arg <typ.UInt32> {n} [off]))
+
+(Arg {n} [off]) && is64BitInt(v.Type) && config.BigEndian && v.Type.IsSigned() && !(b.Func.pass.name == "decompose builtin") =>
+ (Int64Make
+ (Arg <typ.Int32> {n} [off])
+ (Arg <typ.UInt32> {n} [off+4]))
+(Arg {n} [off]) && is64BitInt(v.Type) && config.BigEndian && !v.Type.IsSigned() && !(b.Func.pass.name == "decompose builtin") =>
+ (Int64Make
+ (Arg <typ.UInt32> {n} [off])
+ (Arg <typ.UInt32> {n} [off+4]))
+
+(Add64 x y) =>
+ (Int64Make
+ (Add32withcarry <typ.Int32>
+ (Int64Hi x)
+ (Int64Hi y)
+ (Select1 <types.TypeFlags> (Add32carry (Int64Lo x) (Int64Lo y))))
+ (Select0 <typ.UInt32> (Add32carry (Int64Lo x) (Int64Lo y))))
+
+(Sub64 x y) =>
+ (Int64Make
+ (Sub32withcarry <typ.Int32>
+ (Int64Hi x)
+ (Int64Hi y)
+ (Select1 <types.TypeFlags> (Sub32carry (Int64Lo x) (Int64Lo y))))
+ (Select0 <typ.UInt32> (Sub32carry (Int64Lo x) (Int64Lo y))))
+
+(Mul64 x y) =>
+ (Int64Make
+ (Add32 <typ.UInt32>
+ (Mul32 <typ.UInt32> (Int64Lo x) (Int64Hi y))
+ (Add32 <typ.UInt32>
+ (Mul32 <typ.UInt32> (Int64Hi x) (Int64Lo y))
+ (Select0 <typ.UInt32> (Mul32uhilo (Int64Lo x) (Int64Lo y)))))
+ (Select1 <typ.UInt32> (Mul32uhilo (Int64Lo x) (Int64Lo y))))
+
+(And64 x y) =>
+ (Int64Make
+ (And32 <typ.UInt32> (Int64Hi x) (Int64Hi y))
+ (And32 <typ.UInt32> (Int64Lo x) (Int64Lo y)))
+
+(Or64 x y) =>
+ (Int64Make
+ (Or32 <typ.UInt32> (Int64Hi x) (Int64Hi y))
+ (Or32 <typ.UInt32> (Int64Lo x) (Int64Lo y)))
+
+(Xor64 x y) =>
+ (Int64Make
+ (Xor32 <typ.UInt32> (Int64Hi x) (Int64Hi y))
+ (Xor32 <typ.UInt32> (Int64Lo x) (Int64Lo y)))
+
+(Neg64 <t> x) => (Sub64 (Const64 <t> [0]) x)
+
+(Com64 x) =>
+ (Int64Make
+ (Com32 <typ.UInt32> (Int64Hi x))
+ (Com32 <typ.UInt32> (Int64Lo x)))
+
+// Sadly, just because we know that x is non-zero,
+// we don't know whether either component is,
+// so just treat Ctz64NonZero the same as Ctz64.
+(Ctz64NonZero ...) => (Ctz64 ...)
+
+(Ctz64 x) =>
+ (Add32 <typ.UInt32>
+ (Ctz32 <typ.UInt32> (Int64Lo x))
+ (And32 <typ.UInt32>
+ (Com32 <typ.UInt32> (Zeromask (Int64Lo x)))
+ (Ctz32 <typ.UInt32> (Int64Hi x))))
+
+(BitLen64 x) =>
+ (Add32 <typ.Int>
+ (BitLen32 <typ.Int> (Int64Hi x))
+ (BitLen32 <typ.Int>
+ (Or32 <typ.UInt32>
+ (Int64Lo x)
+ (Zeromask (Int64Hi x)))))
+
+(Bswap64 x) =>
+ (Int64Make
+ (Bswap32 <typ.UInt32> (Int64Lo x))
+ (Bswap32 <typ.UInt32> (Int64Hi x)))
+
+(SignExt32to64 x) => (Int64Make (Signmask x) x)
+(SignExt16to64 x) => (SignExt32to64 (SignExt16to32 x))
+(SignExt8to64 x) => (SignExt32to64 (SignExt8to32 x))
+
+(ZeroExt32to64 x) => (Int64Make (Const32 <typ.UInt32> [0]) x)
+(ZeroExt16to64 x) => (ZeroExt32to64 (ZeroExt16to32 x))
+(ZeroExt8to64 x) => (ZeroExt32to64 (ZeroExt8to32 x))
+
+(Trunc64to32 (Int64Make _ lo)) => lo
+(Trunc64to16 (Int64Make _ lo)) => (Trunc32to16 lo)
+(Trunc64to8 (Int64Make _ lo)) => (Trunc32to8 lo)
+// Most general
+(Trunc64to32 x) => (Int64Lo x)
+(Trunc64to16 x) => (Trunc32to16 (Int64Lo x))
+(Trunc64to8 x) => (Trunc32to8 (Int64Lo x))
+
+(Lsh32x64 _ (Int64Make (Const32 [c]) _)) && c != 0 => (Const32 [0])
+(Rsh32x64 x (Int64Make (Const32 [c]) _)) && c != 0 => (Signmask x)
+(Rsh32Ux64 _ (Int64Make (Const32 [c]) _)) && c != 0 => (Const32 [0])
+(Lsh16x64 _ (Int64Make (Const32 [c]) _)) && c != 0 => (Const32 [0])
+(Rsh16x64 x (Int64Make (Const32 [c]) _)) && c != 0 => (Signmask (SignExt16to32 x))
+(Rsh16Ux64 _ (Int64Make (Const32 [c]) _)) && c != 0 => (Const32 [0])
+(Lsh8x64 _ (Int64Make (Const32 [c]) _)) && c != 0 => (Const32 [0])
+(Rsh8x64 x (Int64Make (Const32 [c]) _)) && c != 0 => (Signmask (SignExt8to32 x))
+(Rsh8Ux64 _ (Int64Make (Const32 [c]) _)) && c != 0 => (Const32 [0])
+
+(Lsh32x64 [c] x (Int64Make (Const32 [0]) lo)) => (Lsh32x32 [c] x lo)
+(Rsh32x64 [c] x (Int64Make (Const32 [0]) lo)) => (Rsh32x32 [c] x lo)
+(Rsh32Ux64 [c] x (Int64Make (Const32 [0]) lo)) => (Rsh32Ux32 [c] x lo)
+(Lsh16x64 [c] x (Int64Make (Const32 [0]) lo)) => (Lsh16x32 [c] x lo)
+(Rsh16x64 [c] x (Int64Make (Const32 [0]) lo)) => (Rsh16x32 [c] x lo)
+(Rsh16Ux64 [c] x (Int64Make (Const32 [0]) lo)) => (Rsh16Ux32 [c] x lo)
+(Lsh8x64 [c] x (Int64Make (Const32 [0]) lo)) => (Lsh8x32 [c] x lo)
+(Rsh8x64 [c] x (Int64Make (Const32 [0]) lo)) => (Rsh8x32 [c] x lo)
+(Rsh8Ux64 [c] x (Int64Make (Const32 [0]) lo)) => (Rsh8Ux32 [c] x lo)
+
+(Lsh64x64 _ (Int64Make (Const32 [c]) _)) && c != 0 => (Const64 [0])
+(Rsh64x64 x (Int64Make (Const32 [c]) _)) && c != 0 => (Int64Make (Signmask (Int64Hi x)) (Signmask (Int64Hi x)))
+(Rsh64Ux64 _ (Int64Make (Const32 [c]) _)) && c != 0 => (Const64 [0])
+
+(Lsh64x64 [c] x (Int64Make (Const32 [0]) lo)) => (Lsh64x32 [c] x lo)
+(Rsh64x64 [c] x (Int64Make (Const32 [0]) lo)) => (Rsh64x32 [c] x lo)
+(Rsh64Ux64 [c] x (Int64Make (Const32 [0]) lo)) => (Rsh64Ux32 [c] x lo)
+
+// turn x64 non-constant shifts to x32 shifts
+// if high 32-bit of the shift is nonzero, make a huge shift
+(Lsh64x64 x (Int64Make hi lo)) && hi.Op != OpConst32 =>
+ (Lsh64x32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
+(Rsh64x64 x (Int64Make hi lo)) && hi.Op != OpConst32 =>
+ (Rsh64x32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
+(Rsh64Ux64 x (Int64Make hi lo)) && hi.Op != OpConst32 =>
+ (Rsh64Ux32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
+(Lsh32x64 x (Int64Make hi lo)) && hi.Op != OpConst32 =>
+ (Lsh32x32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
+(Rsh32x64 x (Int64Make hi lo)) && hi.Op != OpConst32 =>
+ (Rsh32x32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
+(Rsh32Ux64 x (Int64Make hi lo)) && hi.Op != OpConst32 =>
+ (Rsh32Ux32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
+(Lsh16x64 x (Int64Make hi lo)) && hi.Op != OpConst32 =>
+ (Lsh16x32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
+(Rsh16x64 x (Int64Make hi lo)) && hi.Op != OpConst32 =>
+ (Rsh16x32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
+(Rsh16Ux64 x (Int64Make hi lo)) && hi.Op != OpConst32 =>
+ (Rsh16Ux32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
+(Lsh8x64 x (Int64Make hi lo)) && hi.Op != OpConst32 =>
+ (Lsh8x32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
+(Rsh8x64 x (Int64Make hi lo)) && hi.Op != OpConst32 =>
+ (Rsh8x32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
+(Rsh8Ux64 x (Int64Make hi lo)) && hi.Op != OpConst32 =>
+ (Rsh8Ux32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
+
+// Most general
+(Lsh64x64 x y) => (Lsh64x32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
+(Rsh64x64 x y) => (Rsh64x32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
+(Rsh64Ux64 x y) => (Rsh64Ux32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
+(Lsh32x64 x y) => (Lsh32x32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
+(Rsh32x64 x y) => (Rsh32x32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
+(Rsh32Ux64 x y) => (Rsh32Ux32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
+(Lsh16x64 x y) => (Lsh16x32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
+(Rsh16x64 x y) => (Rsh16x32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
+(Rsh16Ux64 x y) => (Rsh16Ux32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
+(Lsh8x64 x y) => (Lsh8x32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
+(Rsh8x64 x y) => (Rsh8x32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
+(Rsh8Ux64 x y) => (Rsh8Ux32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
+
+// Clean up constants a little
+(Or32 <typ.UInt32> (Zeromask (Const32 [c])) y) && c == 0 => y
+(Or32 <typ.UInt32> (Zeromask (Const32 [c])) y) && c != 0 => (Const32 <typ.UInt32> [-1])
+
+// 64x left shift
+// result.hi = hi<<s | lo>>(32-s) | lo<<(s-32) // >> is unsigned, large shifts result 0
+// result.lo = lo<<s
+(Lsh64x32 x s) =>
+ (Int64Make
+ (Or32 <typ.UInt32>
+ (Or32 <typ.UInt32>
+ (Lsh32x32 <typ.UInt32> (Int64Hi x) s)
+ (Rsh32Ux32 <typ.UInt32>
+ (Int64Lo x)
+ (Sub32 <typ.UInt32> (Const32 <typ.UInt32> [32]) s)))
+ (Lsh32x32 <typ.UInt32>
+ (Int64Lo x)
+ (Sub32 <typ.UInt32> s (Const32 <typ.UInt32> [32]))))
+ (Lsh32x32 <typ.UInt32> (Int64Lo x) s))
+(Lsh64x16 x s) =>
+ (Int64Make
+ (Or32 <typ.UInt32>
+ (Or32 <typ.UInt32>
+ (Lsh32x16 <typ.UInt32> (Int64Hi x) s)
+ (Rsh32Ux16 <typ.UInt32>
+ (Int64Lo x)
+ (Sub16 <typ.UInt16> (Const16 <typ.UInt16> [32]) s)))
+ (Lsh32x16 <typ.UInt32>
+ (Int64Lo x)
+ (Sub16 <typ.UInt16> s (Const16 <typ.UInt16> [32]))))
+ (Lsh32x16 <typ.UInt32> (Int64Lo x) s))
+(Lsh64x8 x s) =>
+ (Int64Make
+ (Or32 <typ.UInt32>
+ (Or32 <typ.UInt32>
+ (Lsh32x8 <typ.UInt32> (Int64Hi x) s)
+ (Rsh32Ux8 <typ.UInt32>
+ (Int64Lo x)
+ (Sub8 <typ.UInt8> (Const8 <typ.UInt8> [32]) s)))
+ (Lsh32x8 <typ.UInt32>
+ (Int64Lo x)
+ (Sub8 <typ.UInt8> s (Const8 <typ.UInt8> [32]))))
+ (Lsh32x8 <typ.UInt32> (Int64Lo x) s))
+
+// 64x unsigned right shift
+// result.hi = hi>>s
+// result.lo = lo>>s | hi<<(32-s) | hi>>(s-32) // >> is unsigned, large shifts result 0
+(Rsh64Ux32 x s) =>
+ (Int64Make
+ (Rsh32Ux32 <typ.UInt32> (Int64Hi x) s)
+ (Or32 <typ.UInt32>
+ (Or32 <typ.UInt32>
+ (Rsh32Ux32 <typ.UInt32> (Int64Lo x) s)
+ (Lsh32x32 <typ.UInt32>
+ (Int64Hi x)
+ (Sub32 <typ.UInt32> (Const32 <typ.UInt32> [32]) s)))
+ (Rsh32Ux32 <typ.UInt32>
+ (Int64Hi x)
+ (Sub32 <typ.UInt32> s (Const32 <typ.UInt32> [32])))))
+(Rsh64Ux16 x s) =>
+ (Int64Make
+ (Rsh32Ux16 <typ.UInt32> (Int64Hi x) s)
+ (Or32 <typ.UInt32>
+ (Or32 <typ.UInt32>
+ (Rsh32Ux16 <typ.UInt32> (Int64Lo x) s)
+ (Lsh32x16 <typ.UInt32>
+ (Int64Hi x)
+ (Sub16 <typ.UInt16> (Const16 <typ.UInt16> [32]) s)))
+ (Rsh32Ux16 <typ.UInt32>
+ (Int64Hi x)
+ (Sub16 <typ.UInt16> s (Const16 <typ.UInt16> [32])))))
+(Rsh64Ux8 x s) =>
+ (Int64Make
+ (Rsh32Ux8 <typ.UInt32> (Int64Hi x) s)
+ (Or32 <typ.UInt32>
+ (Or32 <typ.UInt32>
+ (Rsh32Ux8 <typ.UInt32> (Int64Lo x) s)
+ (Lsh32x8 <typ.UInt32>
+ (Int64Hi x)
+ (Sub8 <typ.UInt8> (Const8 <typ.UInt8> [32]) s)))
+ (Rsh32Ux8 <typ.UInt32>
+ (Int64Hi x)
+ (Sub8 <typ.UInt8> s (Const8 <typ.UInt8> [32])))))
+
+// 64x signed right shift
+// result.hi = hi>>s
+// result.lo = lo>>s | hi<<(32-s) | (hi>>(s-32))&zeromask(s>>5) // hi>>(s-32) is signed, large shifts result 0/-1
+(Rsh64x32 x s) =>
+ (Int64Make
+ (Rsh32x32 <typ.UInt32> (Int64Hi x) s)
+ (Or32 <typ.UInt32>
+ (Or32 <typ.UInt32>
+ (Rsh32Ux32 <typ.UInt32> (Int64Lo x) s)
+ (Lsh32x32 <typ.UInt32>
+ (Int64Hi x)
+ (Sub32 <typ.UInt32> (Const32 <typ.UInt32> [32]) s)))
+ (And32 <typ.UInt32>
+ (Rsh32x32 <typ.UInt32>
+ (Int64Hi x)
+ (Sub32 <typ.UInt32> s (Const32 <typ.UInt32> [32])))
+ (Zeromask
+ (Rsh32Ux32 <typ.UInt32> s (Const32 <typ.UInt32> [5]))))))
+(Rsh64x16 x s) =>
+ (Int64Make
+ (Rsh32x16 <typ.UInt32> (Int64Hi x) s)
+ (Or32 <typ.UInt32>
+ (Or32 <typ.UInt32>
+ (Rsh32Ux16 <typ.UInt32> (Int64Lo x) s)
+ (Lsh32x16 <typ.UInt32>
+ (Int64Hi x)
+ (Sub16 <typ.UInt16> (Const16 <typ.UInt16> [32]) s)))
+ (And32 <typ.UInt32>
+ (Rsh32x16 <typ.UInt32>
+ (Int64Hi x)
+ (Sub16 <typ.UInt16> s (Const16 <typ.UInt16> [32])))
+ (Zeromask
+ (ZeroExt16to32
+ (Rsh16Ux32 <typ.UInt16> s (Const32 <typ.UInt32> [5])))))))
+(Rsh64x8 x s) =>
+ (Int64Make
+ (Rsh32x8 <typ.UInt32> (Int64Hi x) s)
+ (Or32 <typ.UInt32>
+ (Or32 <typ.UInt32>
+ (Rsh32Ux8 <typ.UInt32> (Int64Lo x) s)
+ (Lsh32x8 <typ.UInt32>
+ (Int64Hi x)
+ (Sub8 <typ.UInt8> (Const8 <typ.UInt8> [32]) s)))
+ (And32 <typ.UInt32>
+ (Rsh32x8 <typ.UInt32>
+ (Int64Hi x)
+ (Sub8 <typ.UInt8> s (Const8 <typ.UInt8> [32])))
+ (Zeromask
+ (ZeroExt8to32
+ (Rsh8Ux32 <typ.UInt8> s (Const32 <typ.UInt32> [5])))))))
+
+(Const64 <t> [c]) && t.IsSigned() =>
+ (Int64Make (Const32 <typ.Int32> [int32(c>>32)]) (Const32 <typ.UInt32> [int32(c)]))
+(Const64 <t> [c]) && !t.IsSigned() =>
+ (Int64Make (Const32 <typ.UInt32> [int32(c>>32)]) (Const32 <typ.UInt32> [int32(c)]))
+
+(Eq64 x y) =>
+ (AndB
+ (Eq32 (Int64Hi x) (Int64Hi y))
+ (Eq32 (Int64Lo x) (Int64Lo y)))
+
+(Neq64 x y) =>
+ (OrB
+ (Neq32 (Int64Hi x) (Int64Hi y))
+ (Neq32 (Int64Lo x) (Int64Lo y)))
+
+(Less64U x y) =>
+ (OrB
+ (Less32U (Int64Hi x) (Int64Hi y))
+ (AndB
+ (Eq32 (Int64Hi x) (Int64Hi y))
+ (Less32U (Int64Lo x) (Int64Lo y))))
+
+(Leq64U x y) =>
+ (OrB
+ (Less32U (Int64Hi x) (Int64Hi y))
+ (AndB
+ (Eq32 (Int64Hi x) (Int64Hi y))
+ (Leq32U (Int64Lo x) (Int64Lo y))))
+
+(Less64 x y) =>
+ (OrB
+ (Less32 (Int64Hi x) (Int64Hi y))
+ (AndB
+ (Eq32 (Int64Hi x) (Int64Hi y))
+ (Less32U (Int64Lo x) (Int64Lo y))))
+
+(Leq64 x y) =>
+ (OrB
+ (Less32 (Int64Hi x) (Int64Hi y))
+ (AndB
+ (Eq32 (Int64Hi x) (Int64Hi y))
+ (Leq32U (Int64Lo x) (Int64Lo y))))
diff --git a/src/cmd/compile/internal/ssa/gen/dec64Ops.go b/src/cmd/compile/internal/ssa/gen/dec64Ops.go
new file mode 100644
index 0000000..78fcea8
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/gen/dec64Ops.go
@@ -0,0 +1,21 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build ignore
+// +build ignore
+
+package main
+
+var dec64Ops = []opData{}
+
+var dec64Blocks = []blockData{}
+
+func init() {
+ archs = append(archs, arch{
+ name: "dec64",
+ ops: dec64Ops,
+ blocks: dec64Blocks,
+ generic: true,
+ })
+}
diff --git a/src/cmd/compile/internal/ssa/gen/decOps.go b/src/cmd/compile/internal/ssa/gen/decOps.go
new file mode 100644
index 0000000..d5cd793
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/gen/decOps.go
@@ -0,0 +1,21 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build ignore
+// +build ignore
+
+package main
+
+var decOps = []opData{}
+
+var decBlocks = []blockData{}
+
+func init() {
+ archs = append(archs, arch{
+ name: "dec",
+ ops: decOps,
+ blocks: decBlocks,
+ generic: true,
+ })
+}
diff --git a/src/cmd/compile/internal/ssa/gen/generic.rules b/src/cmd/compile/internal/ssa/gen/generic.rules
new file mode 100644
index 0000000..b78d2aa
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/gen/generic.rules
@@ -0,0 +1,2542 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Simplifications that apply to all backend architectures. As an example, this
+// Go source code
+//
+// y := 0 * x
+//
+// can be translated into y := 0 without losing any information, which saves a
+// pointless multiplication instruction. Other .rules files in this directory
+// (for example AMD64.rules) contain rules specific to the architecture in the
+// filename. The rules here apply to every architecture.
+//
+// The code for parsing this file lives in rulegen.go; this file generates
+// ssa/rewritegeneric.go.
+
+// values are specified using the following format:
+// (op <type> [auxint] {aux} arg0 arg1 ...)
+// the type, aux, and auxint fields are optional
+// on the matching side
+// - the type, aux, and auxint fields must match if they are specified.
+// - the first occurrence of a variable defines that variable. Subsequent
+// uses must match (be == to) the first use.
+// - v is defined to be the value matched.
+// - an additional conditional can be provided after the match pattern with "&&".
+// on the generated side
+// - the type of the top-level expression is the same as the one on the left-hand side.
+// - the type of any subexpressions must be specified explicitly (or
+// be specified in the op's type field).
+// - auxint will be 0 if not specified.
+// - aux will be nil if not specified.
+
+// blocks are specified using the following format:
+// (kind controlvalue succ0 succ1 ...)
+// controlvalue must be "nil" or a value expression
+// succ* fields must be variables
+// For now, the generated successors must be a permutation of the matched successors.
+
+// constant folding
+(Trunc16to8 (Const16 [c])) => (Const8 [int8(c)])
+(Trunc32to8 (Const32 [c])) => (Const8 [int8(c)])
+(Trunc32to16 (Const32 [c])) => (Const16 [int16(c)])
+(Trunc64to8 (Const64 [c])) => (Const8 [int8(c)])
+(Trunc64to16 (Const64 [c])) => (Const16 [int16(c)])
+(Trunc64to32 (Const64 [c])) => (Const32 [int32(c)])
+(Cvt64Fto32F (Const64F [c])) => (Const32F [float32(c)])
+(Cvt32Fto64F (Const32F [c])) => (Const64F [float64(c)])
+(Cvt32to32F (Const32 [c])) => (Const32F [float32(c)])
+(Cvt32to64F (Const32 [c])) => (Const64F [float64(c)])
+(Cvt64to32F (Const64 [c])) => (Const32F [float32(c)])
+(Cvt64to64F (Const64 [c])) => (Const64F [float64(c)])
+(Cvt32Fto32 (Const32F [c])) => (Const32 [int32(c)])
+(Cvt32Fto64 (Const32F [c])) => (Const64 [int64(c)])
+(Cvt64Fto32 (Const64F [c])) => (Const32 [int32(c)])
+(Cvt64Fto64 (Const64F [c])) => (Const64 [int64(c)])
+(Round32F x:(Const32F)) => x
+(Round64F x:(Const64F)) => x
+(CvtBoolToUint8 (ConstBool [false])) => (Const8 [0])
+(CvtBoolToUint8 (ConstBool [true])) => (Const8 [1])
+
+(Trunc16to8 (ZeroExt8to16 x)) => x
+(Trunc32to8 (ZeroExt8to32 x)) => x
+(Trunc32to16 (ZeroExt8to32 x)) => (ZeroExt8to16 x)
+(Trunc32to16 (ZeroExt16to32 x)) => x
+(Trunc64to8 (ZeroExt8to64 x)) => x
+(Trunc64to16 (ZeroExt8to64 x)) => (ZeroExt8to16 x)
+(Trunc64to16 (ZeroExt16to64 x)) => x
+(Trunc64to32 (ZeroExt8to64 x)) => (ZeroExt8to32 x)
+(Trunc64to32 (ZeroExt16to64 x)) => (ZeroExt16to32 x)
+(Trunc64to32 (ZeroExt32to64 x)) => x
+(Trunc16to8 (SignExt8to16 x)) => x
+(Trunc32to8 (SignExt8to32 x)) => x
+(Trunc32to16 (SignExt8to32 x)) => (SignExt8to16 x)
+(Trunc32to16 (SignExt16to32 x)) => x
+(Trunc64to8 (SignExt8to64 x)) => x
+(Trunc64to16 (SignExt8to64 x)) => (SignExt8to16 x)
+(Trunc64to16 (SignExt16to64 x)) => x
+(Trunc64to32 (SignExt8to64 x)) => (SignExt8to32 x)
+(Trunc64to32 (SignExt16to64 x)) => (SignExt16to32 x)
+(Trunc64to32 (SignExt32to64 x)) => x
+
+(ZeroExt8to16 (Const8 [c])) => (Const16 [int16( uint8(c))])
+(ZeroExt8to32 (Const8 [c])) => (Const32 [int32( uint8(c))])
+(ZeroExt8to64 (Const8 [c])) => (Const64 [int64( uint8(c))])
+(ZeroExt16to32 (Const16 [c])) => (Const32 [int32(uint16(c))])
+(ZeroExt16to64 (Const16 [c])) => (Const64 [int64(uint16(c))])
+(ZeroExt32to64 (Const32 [c])) => (Const64 [int64(uint32(c))])
+(SignExt8to16 (Const8 [c])) => (Const16 [int16(c)])
+(SignExt8to32 (Const8 [c])) => (Const32 [int32(c)])
+(SignExt8to64 (Const8 [c])) => (Const64 [int64(c)])
+(SignExt16to32 (Const16 [c])) => (Const32 [int32(c)])
+(SignExt16to64 (Const16 [c])) => (Const64 [int64(c)])
+(SignExt32to64 (Const32 [c])) => (Const64 [int64(c)])
+
+(Neg8 (Const8 [c])) => (Const8 [-c])
+(Neg16 (Const16 [c])) => (Const16 [-c])
+(Neg32 (Const32 [c])) => (Const32 [-c])
+(Neg64 (Const64 [c])) => (Const64 [-c])
+(Neg32F (Const32F [c])) && c != 0 => (Const32F [-c])
+(Neg64F (Const64F [c])) && c != 0 => (Const64F [-c])
+
+(Add8 (Const8 [c]) (Const8 [d])) => (Const8 [c+d])
+(Add16 (Const16 [c]) (Const16 [d])) => (Const16 [c+d])
+(Add32 (Const32 [c]) (Const32 [d])) => (Const32 [c+d])
+(Add64 (Const64 [c]) (Const64 [d])) => (Const64 [c+d])
+(Add32F (Const32F [c]) (Const32F [d])) && c+d == c+d => (Const32F [c+d])
+(Add64F (Const64F [c]) (Const64F [d])) && c+d == c+d => (Const64F [c+d])
+(AddPtr <t> x (Const64 [c])) => (OffPtr <t> x [c])
+(AddPtr <t> x (Const32 [c])) => (OffPtr <t> x [int64(c)])
+
+(Sub8 (Const8 [c]) (Const8 [d])) => (Const8 [c-d])
+(Sub16 (Const16 [c]) (Const16 [d])) => (Const16 [c-d])
+(Sub32 (Const32 [c]) (Const32 [d])) => (Const32 [c-d])
+(Sub64 (Const64 [c]) (Const64 [d])) => (Const64 [c-d])
+(Sub32F (Const32F [c]) (Const32F [d])) && c-d == c-d => (Const32F [c-d])
+(Sub64F (Const64F [c]) (Const64F [d])) && c-d == c-d => (Const64F [c-d])
+
+(Mul8 (Const8 [c]) (Const8 [d])) => (Const8 [c*d])
+(Mul16 (Const16 [c]) (Const16 [d])) => (Const16 [c*d])
+(Mul32 (Const32 [c]) (Const32 [d])) => (Const32 [c*d])
+(Mul64 (Const64 [c]) (Const64 [d])) => (Const64 [c*d])
+(Mul32F (Const32F [c]) (Const32F [d])) && c*d == c*d => (Const32F [c*d])
+(Mul64F (Const64F [c]) (Const64F [d])) && c*d == c*d => (Const64F [c*d])
+
+(And8 (Const8 [c]) (Const8 [d])) => (Const8 [c&d])
+(And16 (Const16 [c]) (Const16 [d])) => (Const16 [c&d])
+(And32 (Const32 [c]) (Const32 [d])) => (Const32 [c&d])
+(And64 (Const64 [c]) (Const64 [d])) => (Const64 [c&d])
+
+(Or8 (Const8 [c]) (Const8 [d])) => (Const8 [c|d])
+(Or16 (Const16 [c]) (Const16 [d])) => (Const16 [c|d])
+(Or32 (Const32 [c]) (Const32 [d])) => (Const32 [c|d])
+(Or64 (Const64 [c]) (Const64 [d])) => (Const64 [c|d])
+
+(Xor8 (Const8 [c]) (Const8 [d])) => (Const8 [c^d])
+(Xor16 (Const16 [c]) (Const16 [d])) => (Const16 [c^d])
+(Xor32 (Const32 [c]) (Const32 [d])) => (Const32 [c^d])
+(Xor64 (Const64 [c]) (Const64 [d])) => (Const64 [c^d])
+
+(Ctz64 (Const64 [c])) && config.PtrSize == 4 => (Const32 [int32(ntz64(c))])
+(Ctz32 (Const32 [c])) && config.PtrSize == 4 => (Const32 [int32(ntz32(c))])
+(Ctz16 (Const16 [c])) && config.PtrSize == 4 => (Const32 [int32(ntz16(c))])
+(Ctz8 (Const8 [c])) && config.PtrSize == 4 => (Const32 [int32(ntz8(c))])
+
+(Ctz64 (Const64 [c])) && config.PtrSize == 8 => (Const64 [int64(ntz64(c))])
+(Ctz32 (Const32 [c])) && config.PtrSize == 8 => (Const64 [int64(ntz32(c))])
+(Ctz16 (Const16 [c])) && config.PtrSize == 8 => (Const64 [int64(ntz16(c))])
+(Ctz8 (Const8 [c])) && config.PtrSize == 8 => (Const64 [int64(ntz8(c))])
+
+(Div8 (Const8 [c]) (Const8 [d])) && d != 0 => (Const8 [c/d])
+(Div16 (Const16 [c]) (Const16 [d])) && d != 0 => (Const16 [c/d])
+(Div32 (Const32 [c]) (Const32 [d])) && d != 0 => (Const32 [c/d])
+(Div64 (Const64 [c]) (Const64 [d])) && d != 0 => (Const64 [c/d])
+(Div8u (Const8 [c]) (Const8 [d])) && d != 0 => (Const8 [int8(uint8(c)/uint8(d))])
+(Div16u (Const16 [c]) (Const16 [d])) && d != 0 => (Const16 [int16(uint16(c)/uint16(d))])
+(Div32u (Const32 [c]) (Const32 [d])) && d != 0 => (Const32 [int32(uint32(c)/uint32(d))])
+(Div64u (Const64 [c]) (Const64 [d])) && d != 0 => (Const64 [int64(uint64(c)/uint64(d))])
+(Div32F (Const32F [c]) (Const32F [d])) && c/d == c/d => (Const32F [c/d])
+(Div64F (Const64F [c]) (Const64F [d])) && c/d == c/d => (Const64F [c/d])
+(Select0 (Div128u (Const64 [0]) lo y)) => (Div64u lo y)
+(Select1 (Div128u (Const64 [0]) lo y)) => (Mod64u lo y)
+
+(Not (ConstBool [c])) => (ConstBool [!c])
+
+// Convert x * 1 to x.
+(Mul(8|16|32|64) (Const(8|16|32|64) [1]) x) => x
+
+// Convert x * -1 to -x.
+(Mul(8|16|32|64) (Const(8|16|32|64) [-1]) x) => (Neg(8|16|32|64) x)
+
+// Convert multiplication by a power of two to a shift.
+(Mul8 <t> n (Const8 [c])) && isPowerOfTwo8(c) => (Lsh8x64 <t> n (Const64 <typ.UInt64> [log8(c)]))
+(Mul16 <t> n (Const16 [c])) && isPowerOfTwo16(c) => (Lsh16x64 <t> n (Const64 <typ.UInt64> [log16(c)]))
+(Mul32 <t> n (Const32 [c])) && isPowerOfTwo32(c) => (Lsh32x64 <t> n (Const64 <typ.UInt64> [log32(c)]))
+(Mul64 <t> n (Const64 [c])) && isPowerOfTwo64(c) => (Lsh64x64 <t> n (Const64 <typ.UInt64> [log64(c)]))
+(Mul8 <t> n (Const8 [c])) && t.IsSigned() && isPowerOfTwo8(-c) => (Neg8 (Lsh8x64 <t> n (Const64 <typ.UInt64> [log8(-c)])))
+(Mul16 <t> n (Const16 [c])) && t.IsSigned() && isPowerOfTwo16(-c) => (Neg16 (Lsh16x64 <t> n (Const64 <typ.UInt64> [log16(-c)])))
+(Mul32 <t> n (Const32 [c])) && t.IsSigned() && isPowerOfTwo32(-c) => (Neg32 (Lsh32x64 <t> n (Const64 <typ.UInt64> [log32(-c)])))
+(Mul64 <t> n (Const64 [c])) && t.IsSigned() && isPowerOfTwo64(-c) => (Neg64 (Lsh64x64 <t> n (Const64 <typ.UInt64> [log64(-c)])))
+
+(Mod8 (Const8 [c]) (Const8 [d])) && d != 0 => (Const8 [c % d])
+(Mod16 (Const16 [c]) (Const16 [d])) && d != 0 => (Const16 [c % d])
+(Mod32 (Const32 [c]) (Const32 [d])) && d != 0 => (Const32 [c % d])
+(Mod64 (Const64 [c]) (Const64 [d])) && d != 0 => (Const64 [c % d])
+
+(Mod8u (Const8 [c]) (Const8 [d])) && d != 0 => (Const8 [int8(uint8(c) % uint8(d))])
+(Mod16u (Const16 [c]) (Const16 [d])) && d != 0 => (Const16 [int16(uint16(c) % uint16(d))])
+(Mod32u (Const32 [c]) (Const32 [d])) && d != 0 => (Const32 [int32(uint32(c) % uint32(d))])
+(Mod64u (Const64 [c]) (Const64 [d])) && d != 0 => (Const64 [int64(uint64(c) % uint64(d))])
+
+(Lsh64x64 (Const64 [c]) (Const64 [d])) => (Const64 [c << uint64(d)])
+(Rsh64x64 (Const64 [c]) (Const64 [d])) => (Const64 [c >> uint64(d)])
+(Rsh64Ux64 (Const64 [c]) (Const64 [d])) => (Const64 [int64(uint64(c) >> uint64(d))])
+(Lsh32x64 (Const32 [c]) (Const64 [d])) => (Const32 [c << uint64(d)])
+(Rsh32x64 (Const32 [c]) (Const64 [d])) => (Const32 [c >> uint64(d)])
+(Rsh32Ux64 (Const32 [c]) (Const64 [d])) => (Const32 [int32(uint32(c) >> uint64(d))])
+(Lsh16x64 (Const16 [c]) (Const64 [d])) => (Const16 [c << uint64(d)])
+(Rsh16x64 (Const16 [c]) (Const64 [d])) => (Const16 [c >> uint64(d)])
+(Rsh16Ux64 (Const16 [c]) (Const64 [d])) => (Const16 [int16(uint16(c) >> uint64(d))])
+(Lsh8x64 (Const8 [c]) (Const64 [d])) => (Const8 [c << uint64(d)])
+(Rsh8x64 (Const8 [c]) (Const64 [d])) => (Const8 [c >> uint64(d)])
+(Rsh8Ux64 (Const8 [c]) (Const64 [d])) => (Const8 [int8(uint8(c) >> uint64(d))])
+
+// Fold IsInBounds when the range of the index cannot exceed the limit.
+(IsInBounds (ZeroExt8to32 _) (Const32 [c])) && (1 << 8) <= c => (ConstBool [true])
+(IsInBounds (ZeroExt8to64 _) (Const64 [c])) && (1 << 8) <= c => (ConstBool [true])
+(IsInBounds (ZeroExt16to32 _) (Const32 [c])) && (1 << 16) <= c => (ConstBool [true])
+(IsInBounds (ZeroExt16to64 _) (Const64 [c])) && (1 << 16) <= c => (ConstBool [true])
+(IsInBounds x x) => (ConstBool [false])
+(IsInBounds (And8 (Const8 [c]) _) (Const8 [d])) && 0 <= c && c < d => (ConstBool [true])
+(IsInBounds (ZeroExt8to16 (And8 (Const8 [c]) _)) (Const16 [d])) && 0 <= c && int16(c) < d => (ConstBool [true])
+(IsInBounds (ZeroExt8to32 (And8 (Const8 [c]) _)) (Const32 [d])) && 0 <= c && int32(c) < d => (ConstBool [true])
+(IsInBounds (ZeroExt8to64 (And8 (Const8 [c]) _)) (Const64 [d])) && 0 <= c && int64(c) < d => (ConstBool [true])
+(IsInBounds (And16 (Const16 [c]) _) (Const16 [d])) && 0 <= c && c < d => (ConstBool [true])
+(IsInBounds (ZeroExt16to32 (And16 (Const16 [c]) _)) (Const32 [d])) && 0 <= c && int32(c) < d => (ConstBool [true])
+(IsInBounds (ZeroExt16to64 (And16 (Const16 [c]) _)) (Const64 [d])) && 0 <= c && int64(c) < d => (ConstBool [true])
+(IsInBounds (And32 (Const32 [c]) _) (Const32 [d])) && 0 <= c && c < d => (ConstBool [true])
+(IsInBounds (ZeroExt32to64 (And32 (Const32 [c]) _)) (Const64 [d])) && 0 <= c && int64(c) < d => (ConstBool [true])
+(IsInBounds (And64 (Const64 [c]) _) (Const64 [d])) && 0 <= c && c < d => (ConstBool [true])
+(IsInBounds (Const32 [c]) (Const32 [d])) => (ConstBool [0 <= c && c < d])
+(IsInBounds (Const64 [c]) (Const64 [d])) => (ConstBool [0 <= c && c < d])
+// (Mod64u x y) is always between 0 (inclusive) and y (exclusive).
+(IsInBounds (Mod32u _ y) y) => (ConstBool [true])
+(IsInBounds (Mod64u _ y) y) => (ConstBool [true])
+// Right shifting an unsigned number limits its value.
+(IsInBounds (ZeroExt8to64 (Rsh8Ux64 _ (Const64 [c]))) (Const64 [d])) && 0 < c && c < 8 && 1<<uint( 8-c)-1 < d => (ConstBool [true])
+(IsInBounds (ZeroExt8to32 (Rsh8Ux64 _ (Const64 [c]))) (Const32 [d])) && 0 < c && c < 8 && 1<<uint( 8-c)-1 < d => (ConstBool [true])
+(IsInBounds (ZeroExt8to16 (Rsh8Ux64 _ (Const64 [c]))) (Const16 [d])) && 0 < c && c < 8 && 1<<uint( 8-c)-1 < d => (ConstBool [true])
+(IsInBounds (Rsh8Ux64 _ (Const64 [c])) (Const64 [d])) && 0 < c && c < 8 && 1<<uint( 8-c)-1 < d => (ConstBool [true])
+(IsInBounds (ZeroExt16to64 (Rsh16Ux64 _ (Const64 [c]))) (Const64 [d])) && 0 < c && c < 16 && 1<<uint(16-c)-1 < d => (ConstBool [true])
+(IsInBounds (ZeroExt16to32 (Rsh16Ux64 _ (Const64 [c]))) (Const64 [d])) && 0 < c && c < 16 && 1<<uint(16-c)-1 < d => (ConstBool [true])
+(IsInBounds (Rsh16Ux64 _ (Const64 [c])) (Const64 [d])) && 0 < c && c < 16 && 1<<uint(16-c)-1 < d => (ConstBool [true])
+(IsInBounds (ZeroExt32to64 (Rsh32Ux64 _ (Const64 [c]))) (Const64 [d])) && 0 < c && c < 32 && 1<<uint(32-c)-1 < d => (ConstBool [true])
+(IsInBounds (Rsh32Ux64 _ (Const64 [c])) (Const64 [d])) && 0 < c && c < 32 && 1<<uint(32-c)-1 < d => (ConstBool [true])
+(IsInBounds (Rsh64Ux64 _ (Const64 [c])) (Const64 [d])) && 0 < c && c < 64 && 1<<uint(64-c)-1 < d => (ConstBool [true])
+
+(IsSliceInBounds x x) => (ConstBool [true])
+(IsSliceInBounds (And32 (Const32 [c]) _) (Const32 [d])) && 0 <= c && c <= d => (ConstBool [true])
+(IsSliceInBounds (And64 (Const64 [c]) _) (Const64 [d])) && 0 <= c && c <= d => (ConstBool [true])
+(IsSliceInBounds (Const32 [0]) _) => (ConstBool [true])
+(IsSliceInBounds (Const64 [0]) _) => (ConstBool [true])
+(IsSliceInBounds (Const32 [c]) (Const32 [d])) => (ConstBool [0 <= c && c <= d])
+(IsSliceInBounds (Const64 [c]) (Const64 [d])) => (ConstBool [0 <= c && c <= d])
+(IsSliceInBounds (SliceLen x) (SliceCap x)) => (ConstBool [true])
+
+(Eq(64|32|16|8) x x) => (ConstBool [true])
+(EqB (ConstBool [c]) (ConstBool [d])) => (ConstBool [c == d])
+(EqB (ConstBool [false]) x) => (Not x)
+(EqB (ConstBool [true]) x) => x
+
+(Neq(64|32|16|8) x x) => (ConstBool [false])
+(NeqB (ConstBool [c]) (ConstBool [d])) => (ConstBool [c != d])
+(NeqB (ConstBool [false]) x) => x
+(NeqB (ConstBool [true]) x) => (Not x)
+(NeqB (Not x) (Not y)) => (NeqB x y)
+
+(Eq64 (Const64 <t> [c]) (Add64 (Const64 <t> [d]) x)) => (Eq64 (Const64 <t> [c-d]) x)
+(Eq32 (Const32 <t> [c]) (Add32 (Const32 <t> [d]) x)) => (Eq32 (Const32 <t> [c-d]) x)
+(Eq16 (Const16 <t> [c]) (Add16 (Const16 <t> [d]) x)) => (Eq16 (Const16 <t> [c-d]) x)
+(Eq8 (Const8 <t> [c]) (Add8 (Const8 <t> [d]) x)) => (Eq8 (Const8 <t> [c-d]) x)
+
+(Neq64 (Const64 <t> [c]) (Add64 (Const64 <t> [d]) x)) => (Neq64 (Const64 <t> [c-d]) x)
+(Neq32 (Const32 <t> [c]) (Add32 (Const32 <t> [d]) x)) => (Neq32 (Const32 <t> [c-d]) x)
+(Neq16 (Const16 <t> [c]) (Add16 (Const16 <t> [d]) x)) => (Neq16 (Const16 <t> [c-d]) x)
+(Neq8 (Const8 <t> [c]) (Add8 (Const8 <t> [d]) x)) => (Neq8 (Const8 <t> [c-d]) x)
+
+// signed integer range: ( c <= x && x (<|<=) d ) -> ( unsigned(x-c) (<|<=) unsigned(d-c) )
+(AndB (Leq64 (Const64 [c]) x) ((Less|Leq)64 x (Const64 [d]))) && d >= c => ((Less|Leq)64U (Sub64 <x.Type> x (Const64 <x.Type> [c])) (Const64 <x.Type> [d-c]))
+(AndB (Leq32 (Const32 [c]) x) ((Less|Leq)32 x (Const32 [d]))) && d >= c => ((Less|Leq)32U (Sub32 <x.Type> x (Const32 <x.Type> [c])) (Const32 <x.Type> [d-c]))
+(AndB (Leq16 (Const16 [c]) x) ((Less|Leq)16 x (Const16 [d]))) && d >= c => ((Less|Leq)16U (Sub16 <x.Type> x (Const16 <x.Type> [c])) (Const16 <x.Type> [d-c]))
+(AndB (Leq8 (Const8 [c]) x) ((Less|Leq)8 x (Const8 [d]))) && d >= c => ((Less|Leq)8U (Sub8 <x.Type> x (Const8 <x.Type> [c])) (Const8 <x.Type> [d-c]))
+
+// signed integer range: ( c < x && x (<|<=) d ) -> ( unsigned(x-(c+1)) (<|<=) unsigned(d-(c+1)) )
+(AndB (Less64 (Const64 [c]) x) ((Less|Leq)64 x (Const64 [d]))) && d >= c+1 && c+1 > c => ((Less|Leq)64U (Sub64 <x.Type> x (Const64 <x.Type> [c+1])) (Const64 <x.Type> [d-c-1]))
+(AndB (Less32 (Const32 [c]) x) ((Less|Leq)32 x (Const32 [d]))) && d >= c+1 && c+1 > c => ((Less|Leq)32U (Sub32 <x.Type> x (Const32 <x.Type> [c+1])) (Const32 <x.Type> [d-c-1]))
+(AndB (Less16 (Const16 [c]) x) ((Less|Leq)16 x (Const16 [d]))) && d >= c+1 && c+1 > c => ((Less|Leq)16U (Sub16 <x.Type> x (Const16 <x.Type> [c+1])) (Const16 <x.Type> [d-c-1]))
+(AndB (Less8 (Const8 [c]) x) ((Less|Leq)8 x (Const8 [d]))) && d >= c+1 && c+1 > c => ((Less|Leq)8U (Sub8 <x.Type> x (Const8 <x.Type> [c+1])) (Const8 <x.Type> [d-c-1]))
+
+// unsigned integer range: ( c <= x && x (<|<=) d ) -> ( x-c (<|<=) d-c )
+(AndB (Leq64U (Const64 [c]) x) ((Less|Leq)64U x (Const64 [d]))) && uint64(d) >= uint64(c) => ((Less|Leq)64U (Sub64 <x.Type> x (Const64 <x.Type> [c])) (Const64 <x.Type> [d-c]))
+(AndB (Leq32U (Const32 [c]) x) ((Less|Leq)32U x (Const32 [d]))) && uint32(d) >= uint32(c) => ((Less|Leq)32U (Sub32 <x.Type> x (Const32 <x.Type> [c])) (Const32 <x.Type> [d-c]))
+(AndB (Leq16U (Const16 [c]) x) ((Less|Leq)16U x (Const16 [d]))) && uint16(d) >= uint16(c) => ((Less|Leq)16U (Sub16 <x.Type> x (Const16 <x.Type> [c])) (Const16 <x.Type> [d-c]))
+(AndB (Leq8U (Const8 [c]) x) ((Less|Leq)8U x (Const8 [d]))) && uint8(d) >= uint8(c) => ((Less|Leq)8U (Sub8 <x.Type> x (Const8 <x.Type> [c])) (Const8 <x.Type> [d-c]))
+
+// unsigned integer range: ( c < x && x (<|<=) d ) -> ( x-(c+1) (<|<=) d-(c+1) )
+(AndB (Less64U (Const64 [c]) x) ((Less|Leq)64U x (Const64 [d]))) && uint64(d) >= uint64(c+1) && uint64(c+1) > uint64(c) => ((Less|Leq)64U (Sub64 <x.Type> x (Const64 <x.Type> [c+1])) (Const64 <x.Type> [d-c-1]))
+(AndB (Less32U (Const32 [c]) x) ((Less|Leq)32U x (Const32 [d]))) && uint32(d) >= uint32(c+1) && uint32(c+1) > uint32(c) => ((Less|Leq)32U (Sub32 <x.Type> x (Const32 <x.Type> [c+1])) (Const32 <x.Type> [d-c-1]))
+(AndB (Less16U (Const16 [c]) x) ((Less|Leq)16U x (Const16 [d]))) && uint16(d) >= uint16(c+1) && uint16(c+1) > uint16(c) => ((Less|Leq)16U (Sub16 <x.Type> x (Const16 <x.Type> [c+1])) (Const16 <x.Type> [d-c-1]))
+(AndB (Less8U (Const8 [c]) x) ((Less|Leq)8U x (Const8 [d]))) && uint8(d) >= uint8(c+1) && uint8(c+1) > uint8(c) => ((Less|Leq)8U (Sub8 <x.Type> x (Const8 <x.Type> [c+1])) (Const8 <x.Type> [d-c-1]))
+
+// signed integer range: ( c (<|<=) x || x < d ) -> ( unsigned(c-d) (<|<=) unsigned(x-d) )
+(OrB ((Less|Leq)64 (Const64 [c]) x) (Less64 x (Const64 [d]))) && c >= d => ((Less|Leq)64U (Const64 <x.Type> [c-d]) (Sub64 <x.Type> x (Const64 <x.Type> [d])))
+(OrB ((Less|Leq)32 (Const32 [c]) x) (Less32 x (Const32 [d]))) && c >= d => ((Less|Leq)32U (Const32 <x.Type> [c-d]) (Sub32 <x.Type> x (Const32 <x.Type> [d])))
+(OrB ((Less|Leq)16 (Const16 [c]) x) (Less16 x (Const16 [d]))) && c >= d => ((Less|Leq)16U (Const16 <x.Type> [c-d]) (Sub16 <x.Type> x (Const16 <x.Type> [d])))
+(OrB ((Less|Leq)8 (Const8 [c]) x) (Less8 x (Const8 [d]))) && c >= d => ((Less|Leq)8U (Const8 <x.Type> [c-d]) (Sub8 <x.Type> x (Const8 <x.Type> [d])))
+
+// signed integer range: ( c (<|<=) x || x <= d ) -> ( unsigned(c-(d+1)) (<|<=) unsigned(x-(d+1)) )
+(OrB ((Less|Leq)64 (Const64 [c]) x) (Leq64 x (Const64 [d]))) && c >= d+1 && d+1 > d => ((Less|Leq)64U (Const64 <x.Type> [c-d-1]) (Sub64 <x.Type> x (Const64 <x.Type> [d+1])))
+(OrB ((Less|Leq)32 (Const32 [c]) x) (Leq32 x (Const32 [d]))) && c >= d+1 && d+1 > d => ((Less|Leq)32U (Const32 <x.Type> [c-d-1]) (Sub32 <x.Type> x (Const32 <x.Type> [d+1])))
+(OrB ((Less|Leq)16 (Const16 [c]) x) (Leq16 x (Const16 [d]))) && c >= d+1 && d+1 > d => ((Less|Leq)16U (Const16 <x.Type> [c-d-1]) (Sub16 <x.Type> x (Const16 <x.Type> [d+1])))
+(OrB ((Less|Leq)8 (Const8 [c]) x) (Leq8 x (Const8 [d]))) && c >= d+1 && d+1 > d => ((Less|Leq)8U (Const8 <x.Type> [c-d-1]) (Sub8 <x.Type> x (Const8 <x.Type> [d+1])))
+
+// unsigned integer range: ( c (<|<=) x || x < d ) -> ( c-d (<|<=) x-d )
+(OrB ((Less|Leq)64U (Const64 [c]) x) (Less64U x (Const64 [d]))) && uint64(c) >= uint64(d) => ((Less|Leq)64U (Const64 <x.Type> [c-d]) (Sub64 <x.Type> x (Const64 <x.Type> [d])))
+(OrB ((Less|Leq)32U (Const32 [c]) x) (Less32U x (Const32 [d]))) && uint32(c) >= uint32(d) => ((Less|Leq)32U (Const32 <x.Type> [c-d]) (Sub32 <x.Type> x (Const32 <x.Type> [d])))
+(OrB ((Less|Leq)16U (Const16 [c]) x) (Less16U x (Const16 [d]))) && uint16(c) >= uint16(d) => ((Less|Leq)16U (Const16 <x.Type> [c-d]) (Sub16 <x.Type> x (Const16 <x.Type> [d])))
+(OrB ((Less|Leq)8U (Const8 [c]) x) (Less8U x (Const8 [d]))) && uint8(c) >= uint8(d) => ((Less|Leq)8U (Const8 <x.Type> [c-d]) (Sub8 <x.Type> x (Const8 <x.Type> [d])))
+
+// unsigned integer range: ( c (<|<=) x || x <= d ) -> ( c-(d+1) (<|<=) x-(d+1) )
+(OrB ((Less|Leq)64U (Const64 [c]) x) (Leq64U x (Const64 [d]))) && uint64(c) >= uint64(d+1) && uint64(d+1) > uint64(d) => ((Less|Leq)64U (Const64 <x.Type> [c-d-1]) (Sub64 <x.Type> x (Const64 <x.Type> [d+1])))
+(OrB ((Less|Leq)32U (Const32 [c]) x) (Leq32U x (Const32 [d]))) && uint32(c) >= uint32(d+1) && uint32(d+1) > uint32(d) => ((Less|Leq)32U (Const32 <x.Type> [c-d-1]) (Sub32 <x.Type> x (Const32 <x.Type> [d+1])))
+(OrB ((Less|Leq)16U (Const16 [c]) x) (Leq16U x (Const16 [d]))) && uint16(c) >= uint16(d+1) && uint16(d+1) > uint16(d) => ((Less|Leq)16U (Const16 <x.Type> [c-d-1]) (Sub16 <x.Type> x (Const16 <x.Type> [d+1])))
+(OrB ((Less|Leq)8U (Const8 [c]) x) (Leq8U x (Const8 [d]))) && uint8(c) >= uint8(d+1) && uint8(d+1) > uint8(d) => ((Less|Leq)8U (Const8 <x.Type> [c-d-1]) (Sub8 <x.Type> x (Const8 <x.Type> [d+1])))
+
+// Canonicalize x-const to x+(-const)
+(Sub64 x (Const64 <t> [c])) && x.Op != OpConst64 => (Add64 (Const64 <t> [-c]) x)
+(Sub32 x (Const32 <t> [c])) && x.Op != OpConst32 => (Add32 (Const32 <t> [-c]) x)
+(Sub16 x (Const16 <t> [c])) && x.Op != OpConst16 => (Add16 (Const16 <t> [-c]) x)
+(Sub8 x (Const8 <t> [c])) && x.Op != OpConst8 => (Add8 (Const8 <t> [-c]) x)
+
+// fold negation into comparison operators
+(Not (Eq(64|32|16|8|B|Ptr|64F|32F) x y)) => (Neq(64|32|16|8|B|Ptr|64F|32F) x y)
+(Not (Neq(64|32|16|8|B|Ptr|64F|32F) x y)) => (Eq(64|32|16|8|B|Ptr|64F|32F) x y)
+
+(Not (Less(64|32|16|8) x y)) => (Leq(64|32|16|8) y x)
+(Not (Less(64|32|16|8)U x y)) => (Leq(64|32|16|8)U y x)
+(Not (Leq(64|32|16|8) x y)) => (Less(64|32|16|8) y x)
+(Not (Leq(64|32|16|8)U x y)) => (Less(64|32|16|8)U y x)
+
+// Distribute multiplication c * (d+x) -> c*d + c*x. Useful for:
+// a[i].b = ...; a[i+1].b = ...
+(Mul64 (Const64 <t> [c]) (Add64 <t> (Const64 <t> [d]) x)) =>
+ (Add64 (Const64 <t> [c*d]) (Mul64 <t> (Const64 <t> [c]) x))
+(Mul32 (Const32 <t> [c]) (Add32 <t> (Const32 <t> [d]) x)) =>
+ (Add32 (Const32 <t> [c*d]) (Mul32 <t> (Const32 <t> [c]) x))
+
+// Rewrite x*y ± x*z to x*(y±z)
+(Add(64|32|16|8) <t> (Mul(64|32|16|8) x y) (Mul(64|32|16|8) x z))
+ => (Mul(64|32|16|8) x (Add(64|32|16|8) <t> y z))
+(Sub(64|32|16|8) <t> (Mul(64|32|16|8) x y) (Mul(64|32|16|8) x z))
+ => (Mul(64|32|16|8) x (Sub(64|32|16|8) <t> y z))
+
+// rewrite shifts of 8/16/32 bit consts into 64 bit consts to reduce
+// the number of the other rewrite rules for const shifts
+(Lsh64x32 <t> x (Const32 [c])) => (Lsh64x64 x (Const64 <t> [int64(uint32(c))]))
+(Lsh64x16 <t> x (Const16 [c])) => (Lsh64x64 x (Const64 <t> [int64(uint16(c))]))
+(Lsh64x8 <t> x (Const8 [c])) => (Lsh64x64 x (Const64 <t> [int64(uint8(c))]))
+(Rsh64x32 <t> x (Const32 [c])) => (Rsh64x64 x (Const64 <t> [int64(uint32(c))]))
+(Rsh64x16 <t> x (Const16 [c])) => (Rsh64x64 x (Const64 <t> [int64(uint16(c))]))
+(Rsh64x8 <t> x (Const8 [c])) => (Rsh64x64 x (Const64 <t> [int64(uint8(c))]))
+(Rsh64Ux32 <t> x (Const32 [c])) => (Rsh64Ux64 x (Const64 <t> [int64(uint32(c))]))
+(Rsh64Ux16 <t> x (Const16 [c])) => (Rsh64Ux64 x (Const64 <t> [int64(uint16(c))]))
+(Rsh64Ux8 <t> x (Const8 [c])) => (Rsh64Ux64 x (Const64 <t> [int64(uint8(c))]))
+
+(Lsh32x32 <t> x (Const32 [c])) => (Lsh32x64 x (Const64 <t> [int64(uint32(c))]))
+(Lsh32x16 <t> x (Const16 [c])) => (Lsh32x64 x (Const64 <t> [int64(uint16(c))]))
+(Lsh32x8 <t> x (Const8 [c])) => (Lsh32x64 x (Const64 <t> [int64(uint8(c))]))
+(Rsh32x32 <t> x (Const32 [c])) => (Rsh32x64 x (Const64 <t> [int64(uint32(c))]))
+(Rsh32x16 <t> x (Const16 [c])) => (Rsh32x64 x (Const64 <t> [int64(uint16(c))]))
+(Rsh32x8 <t> x (Const8 [c])) => (Rsh32x64 x (Const64 <t> [int64(uint8(c))]))
+(Rsh32Ux32 <t> x (Const32 [c])) => (Rsh32Ux64 x (Const64 <t> [int64(uint32(c))]))
+(Rsh32Ux16 <t> x (Const16 [c])) => (Rsh32Ux64 x (Const64 <t> [int64(uint16(c))]))
+(Rsh32Ux8 <t> x (Const8 [c])) => (Rsh32Ux64 x (Const64 <t> [int64(uint8(c))]))
+
+(Lsh16x32 <t> x (Const32 [c])) => (Lsh16x64 x (Const64 <t> [int64(uint32(c))]))
+(Lsh16x16 <t> x (Const16 [c])) => (Lsh16x64 x (Const64 <t> [int64(uint16(c))]))
+(Lsh16x8 <t> x (Const8 [c])) => (Lsh16x64 x (Const64 <t> [int64(uint8(c))]))
+(Rsh16x32 <t> x (Const32 [c])) => (Rsh16x64 x (Const64 <t> [int64(uint32(c))]))
+(Rsh16x16 <t> x (Const16 [c])) => (Rsh16x64 x (Const64 <t> [int64(uint16(c))]))
+(Rsh16x8 <t> x (Const8 [c])) => (Rsh16x64 x (Const64 <t> [int64(uint8(c))]))
+(Rsh16Ux32 <t> x (Const32 [c])) => (Rsh16Ux64 x (Const64 <t> [int64(uint32(c))]))
+(Rsh16Ux16 <t> x (Const16 [c])) => (Rsh16Ux64 x (Const64 <t> [int64(uint16(c))]))
+(Rsh16Ux8 <t> x (Const8 [c])) => (Rsh16Ux64 x (Const64 <t> [int64(uint8(c))]))
+
+(Lsh8x32 <t> x (Const32 [c])) => (Lsh8x64 x (Const64 <t> [int64(uint32(c))]))
+(Lsh8x16 <t> x (Const16 [c])) => (Lsh8x64 x (Const64 <t> [int64(uint16(c))]))
+(Lsh8x8 <t> x (Const8 [c])) => (Lsh8x64 x (Const64 <t> [int64(uint8(c))]))
+(Rsh8x32 <t> x (Const32 [c])) => (Rsh8x64 x (Const64 <t> [int64(uint32(c))]))
+(Rsh8x16 <t> x (Const16 [c])) => (Rsh8x64 x (Const64 <t> [int64(uint16(c))]))
+(Rsh8x8 <t> x (Const8 [c])) => (Rsh8x64 x (Const64 <t> [int64(uint8(c))]))
+(Rsh8Ux32 <t> x (Const32 [c])) => (Rsh8Ux64 x (Const64 <t> [int64(uint32(c))]))
+(Rsh8Ux16 <t> x (Const16 [c])) => (Rsh8Ux64 x (Const64 <t> [int64(uint16(c))]))
+(Rsh8Ux8 <t> x (Const8 [c])) => (Rsh8Ux64 x (Const64 <t> [int64(uint8(c))]))
+
+// shifts by zero
+(Lsh(64|32|16|8)x64 x (Const64 [0])) => x
+(Rsh(64|32|16|8)x64 x (Const64 [0])) => x
+(Rsh(64|32|16|8)Ux64 x (Const64 [0])) => x
+
+// rotates by multiples of register width
+(RotateLeft64 x (Const64 [c])) && c%64 == 0 => x
+(RotateLeft32 x (Const32 [c])) && c%32 == 0 => x
+(RotateLeft16 x (Const16 [c])) && c%16 == 0 => x
+(RotateLeft8 x (Const8 [c])) && c%8 == 0 => x
+
+// zero shifted
+(Lsh64x(64|32|16|8) (Const64 [0]) _) => (Const64 [0])
+(Rsh64x(64|32|16|8) (Const64 [0]) _) => (Const64 [0])
+(Rsh64Ux(64|32|16|8) (Const64 [0]) _) => (Const64 [0])
+(Lsh32x(64|32|16|8) (Const32 [0]) _) => (Const32 [0])
+(Rsh32x(64|32|16|8) (Const32 [0]) _) => (Const32 [0])
+(Rsh32Ux(64|32|16|8) (Const32 [0]) _) => (Const32 [0])
+(Lsh16x(64|32|16|8) (Const16 [0]) _) => (Const16 [0])
+(Rsh16x(64|32|16|8) (Const16 [0]) _) => (Const16 [0])
+(Rsh16Ux(64|32|16|8) (Const16 [0]) _) => (Const16 [0])
+(Lsh8x(64|32|16|8) (Const8 [0]) _) => (Const8 [0])
+(Rsh8x(64|32|16|8) (Const8 [0]) _) => (Const8 [0])
+(Rsh8Ux(64|32|16|8) (Const8 [0]) _) => (Const8 [0])
+
+// large left shifts of all values, and right shifts of unsigned values
+((Lsh64|Rsh64U)x64 _ (Const64 [c])) && uint64(c) >= 64 => (Const64 [0])
+((Lsh32|Rsh32U)x64 _ (Const64 [c])) && uint64(c) >= 32 => (Const32 [0])
+((Lsh16|Rsh16U)x64 _ (Const64 [c])) && uint64(c) >= 16 => (Const16 [0])
+((Lsh8|Rsh8U)x64 _ (Const64 [c])) && uint64(c) >= 8 => (Const8 [0])
+
+// combine const shifts
+(Lsh64x64 <t> (Lsh64x64 x (Const64 [c])) (Const64 [d])) && !uaddOvf(c,d) => (Lsh64x64 x (Const64 <t> [c+d]))
+(Lsh32x64 <t> (Lsh32x64 x (Const64 [c])) (Const64 [d])) && !uaddOvf(c,d) => (Lsh32x64 x (Const64 <t> [c+d]))
+(Lsh16x64 <t> (Lsh16x64 x (Const64 [c])) (Const64 [d])) && !uaddOvf(c,d) => (Lsh16x64 x (Const64 <t> [c+d]))
+(Lsh8x64 <t> (Lsh8x64 x (Const64 [c])) (Const64 [d])) && !uaddOvf(c,d) => (Lsh8x64 x (Const64 <t> [c+d]))
+
+(Rsh64x64 <t> (Rsh64x64 x (Const64 [c])) (Const64 [d])) && !uaddOvf(c,d) => (Rsh64x64 x (Const64 <t> [c+d]))
+(Rsh32x64 <t> (Rsh32x64 x (Const64 [c])) (Const64 [d])) && !uaddOvf(c,d) => (Rsh32x64 x (Const64 <t> [c+d]))
+(Rsh16x64 <t> (Rsh16x64 x (Const64 [c])) (Const64 [d])) && !uaddOvf(c,d) => (Rsh16x64 x (Const64 <t> [c+d]))
+(Rsh8x64 <t> (Rsh8x64 x (Const64 [c])) (Const64 [d])) && !uaddOvf(c,d) => (Rsh8x64 x (Const64 <t> [c+d]))
+
+(Rsh64Ux64 <t> (Rsh64Ux64 x (Const64 [c])) (Const64 [d])) && !uaddOvf(c,d) => (Rsh64Ux64 x (Const64 <t> [c+d]))
+(Rsh32Ux64 <t> (Rsh32Ux64 x (Const64 [c])) (Const64 [d])) && !uaddOvf(c,d) => (Rsh32Ux64 x (Const64 <t> [c+d]))
+(Rsh16Ux64 <t> (Rsh16Ux64 x (Const64 [c])) (Const64 [d])) && !uaddOvf(c,d) => (Rsh16Ux64 x (Const64 <t> [c+d]))
+(Rsh8Ux64 <t> (Rsh8Ux64 x (Const64 [c])) (Const64 [d])) && !uaddOvf(c,d) => (Rsh8Ux64 x (Const64 <t> [c+d]))
+
+// Remove signed right shift before an unsigned right shift that extracts the sign bit.
+(Rsh8Ux64 (Rsh8x64 x _) (Const64 <t> [7] )) => (Rsh8Ux64 x (Const64 <t> [7] ))
+(Rsh16Ux64 (Rsh16x64 x _) (Const64 <t> [15])) => (Rsh16Ux64 x (Const64 <t> [15]))
+(Rsh32Ux64 (Rsh32x64 x _) (Const64 <t> [31])) => (Rsh32Ux64 x (Const64 <t> [31]))
+(Rsh64Ux64 (Rsh64x64 x _) (Const64 <t> [63])) => (Rsh64Ux64 x (Const64 <t> [63]))
+
+// ((x >> c1) << c2) >> c3
+(Rsh(64|32|16|8)Ux64 (Lsh(64|32|16|8)x64 (Rsh(64|32|16|8)Ux64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3]))
+ && uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)
+ => (Rsh(64|32|16|8)Ux64 x (Const64 <typ.UInt64> [c1-c2+c3]))
+
+// ((x << c1) >> c2) << c3
+(Lsh(64|32|16|8)x64 (Rsh(64|32|16|8)Ux64 (Lsh(64|32|16|8)x64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3]))
+ && uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)
+ => (Lsh(64|32|16|8)x64 x (Const64 <typ.UInt64> [c1-c2+c3]))
+
+// (x >> c) & uppermask = 0
+(And64 (Const64 [m]) (Rsh64Ux64 _ (Const64 [c]))) && c >= int64(64-ntz64(m)) => (Const64 [0])
+(And32 (Const32 [m]) (Rsh32Ux64 _ (Const64 [c]))) && c >= int64(32-ntz32(m)) => (Const32 [0])
+(And16 (Const16 [m]) (Rsh16Ux64 _ (Const64 [c]))) && c >= int64(16-ntz16(m)) => (Const16 [0])
+(And8 (Const8 [m]) (Rsh8Ux64 _ (Const64 [c]))) && c >= int64(8-ntz8(m)) => (Const8 [0])
+
+// (x << c) & lowermask = 0
+(And64 (Const64 [m]) (Lsh64x64 _ (Const64 [c]))) && c >= int64(64-nlz64(m)) => (Const64 [0])
+(And32 (Const32 [m]) (Lsh32x64 _ (Const64 [c]))) && c >= int64(32-nlz32(m)) => (Const32 [0])
+(And16 (Const16 [m]) (Lsh16x64 _ (Const64 [c]))) && c >= int64(16-nlz16(m)) => (Const16 [0])
+(And8 (Const8 [m]) (Lsh8x64 _ (Const64 [c]))) && c >= int64(8-nlz8(m)) => (Const8 [0])
+
+// replace shifts with zero extensions
+(Rsh16Ux64 (Lsh16x64 x (Const64 [8])) (Const64 [8])) => (ZeroExt8to16 (Trunc16to8 <typ.UInt8> x))
+(Rsh32Ux64 (Lsh32x64 x (Const64 [24])) (Const64 [24])) => (ZeroExt8to32 (Trunc32to8 <typ.UInt8> x))
+(Rsh64Ux64 (Lsh64x64 x (Const64 [56])) (Const64 [56])) => (ZeroExt8to64 (Trunc64to8 <typ.UInt8> x))
+(Rsh32Ux64 (Lsh32x64 x (Const64 [16])) (Const64 [16])) => (ZeroExt16to32 (Trunc32to16 <typ.UInt16> x))
+(Rsh64Ux64 (Lsh64x64 x (Const64 [48])) (Const64 [48])) => (ZeroExt16to64 (Trunc64to16 <typ.UInt16> x))
+(Rsh64Ux64 (Lsh64x64 x (Const64 [32])) (Const64 [32])) => (ZeroExt32to64 (Trunc64to32 <typ.UInt32> x))
+
+// replace shifts with sign extensions
+(Rsh16x64 (Lsh16x64 x (Const64 [8])) (Const64 [8])) => (SignExt8to16 (Trunc16to8 <typ.Int8> x))
+(Rsh32x64 (Lsh32x64 x (Const64 [24])) (Const64 [24])) => (SignExt8to32 (Trunc32to8 <typ.Int8> x))
+(Rsh64x64 (Lsh64x64 x (Const64 [56])) (Const64 [56])) => (SignExt8to64 (Trunc64to8 <typ.Int8> x))
+(Rsh32x64 (Lsh32x64 x (Const64 [16])) (Const64 [16])) => (SignExt16to32 (Trunc32to16 <typ.Int16> x))
+(Rsh64x64 (Lsh64x64 x (Const64 [48])) (Const64 [48])) => (SignExt16to64 (Trunc64to16 <typ.Int16> x))
+(Rsh64x64 (Lsh64x64 x (Const64 [32])) (Const64 [32])) => (SignExt32to64 (Trunc64to32 <typ.Int32> x))
+
+// constant comparisons
+(Eq(64|32|16|8) (Const(64|32|16|8) [c]) (Const(64|32|16|8) [d])) => (ConstBool [c == d])
+(Neq(64|32|16|8) (Const(64|32|16|8) [c]) (Const(64|32|16|8) [d])) => (ConstBool [c != d])
+(Less(64|32|16|8) (Const(64|32|16|8) [c]) (Const(64|32|16|8) [d])) => (ConstBool [c < d])
+(Leq(64|32|16|8) (Const(64|32|16|8) [c]) (Const(64|32|16|8) [d])) => (ConstBool [c <= d])
+
+(Less64U (Const64 [c]) (Const64 [d])) => (ConstBool [uint64(c) < uint64(d)])
+(Less32U (Const32 [c]) (Const32 [d])) => (ConstBool [uint32(c) < uint32(d)])
+(Less16U (Const16 [c]) (Const16 [d])) => (ConstBool [uint16(c) < uint16(d)])
+(Less8U (Const8 [c]) (Const8 [d])) => (ConstBool [ uint8(c) < uint8(d)])
+
+(Leq64U (Const64 [c]) (Const64 [d])) => (ConstBool [uint64(c) <= uint64(d)])
+(Leq32U (Const32 [c]) (Const32 [d])) => (ConstBool [uint32(c) <= uint32(d)])
+(Leq16U (Const16 [c]) (Const16 [d])) => (ConstBool [uint16(c) <= uint16(d)])
+(Leq8U (Const8 [c]) (Const8 [d])) => (ConstBool [ uint8(c) <= uint8(d)])
+
+(Leq8 (Const8 [0]) (And8 _ (Const8 [c]))) && c >= 0 => (ConstBool [true])
+(Leq16 (Const16 [0]) (And16 _ (Const16 [c]))) && c >= 0 => (ConstBool [true])
+(Leq32 (Const32 [0]) (And32 _ (Const32 [c]))) && c >= 0 => (ConstBool [true])
+(Leq64 (Const64 [0]) (And64 _ (Const64 [c]))) && c >= 0 => (ConstBool [true])
+
+(Leq8 (Const8 [0]) (Rsh8Ux64 _ (Const64 [c]))) && c > 0 => (ConstBool [true])
+(Leq16 (Const16 [0]) (Rsh16Ux64 _ (Const64 [c]))) && c > 0 => (ConstBool [true])
+(Leq32 (Const32 [0]) (Rsh32Ux64 _ (Const64 [c]))) && c > 0 => (ConstBool [true])
+(Leq64 (Const64 [0]) (Rsh64Ux64 _ (Const64 [c]))) && c > 0 => (ConstBool [true])
+
+(Less(64|32|16|8) (Const(64|32|16|8) <t> [0]) x) && isNonNegative(x) => (Neq(64|32|16|8) (Const(64|32|16|8) <t> [0]) x)
+(Less(64|32|16|8) x (Const(64|32|16|8) <t> [1])) && isNonNegative(x) => (Eq(64|32|16|8) (Const(64|32|16|8) <t> [0]) x)
+
+// constant floating point comparisons
+(Eq32F (Const32F [c]) (Const32F [d])) => (ConstBool [c == d])
+(Eq64F (Const64F [c]) (Const64F [d])) => (ConstBool [c == d])
+(Neq32F (Const32F [c]) (Const32F [d])) => (ConstBool [c != d])
+(Neq64F (Const64F [c]) (Const64F [d])) => (ConstBool [c != d])
+(Less32F (Const32F [c]) (Const32F [d])) => (ConstBool [c < d])
+(Less64F (Const64F [c]) (Const64F [d])) => (ConstBool [c < d])
+(Leq32F (Const32F [c]) (Const32F [d])) => (ConstBool [c <= d])
+(Leq64F (Const64F [c]) (Const64F [d])) => (ConstBool [c <= d])
+
+// simplifications
+(Or(64|32|16|8) x x) => x
+(Or(64|32|16|8) (Const(64|32|16|8) [0]) x) => x
+(Or(64|32|16|8) (Const(64|32|16|8) [-1]) _) => (Const(64|32|16|8) [-1])
+
+(And(64|32|16|8) x x) => x
+(And(64|32|16|8) (Const(64|32|16|8) [-1]) x) => x
+(And(64|32|16|8) (Const(64|32|16|8) [0]) _) => (Const(64|32|16|8) [0])
+
+(Xor(64|32|16|8) x x) => (Const(64|32|16|8) [0])
+(Xor(64|32|16|8) (Const(64|32|16|8) [0]) x) => x
+
+(Add(64|32|16|8) (Const(64|32|16|8) [0]) x) => x
+(Sub(64|32|16|8) x x) => (Const(64|32|16|8) [0])
+(Mul(64|32|16|8) (Const(64|32|16|8) [0]) _) => (Const(64|32|16|8) [0])
+
+(Com(64|32|16|8) (Com(64|32|16|8) x)) => x
+(Com(64|32|16|8) (Const(64|32|16|8) [c])) => (Const(64|32|16|8) [^c])
+
+(Neg(64|32|16|8) (Sub(64|32|16|8) x y)) => (Sub(64|32|16|8) y x)
+
+// ^(x-1) == ^x+1 == -x
+(Add(64|32|16|8) (Const(64|32|16|8) [1]) (Com(64|32|16|8) x)) => (Neg(64|32|16|8) x)
+(Com(64|32|16|8) (Add(64|32|16|8) (Const(64|32|16|8) [-1]) x)) => (Neg(64|32|16|8) x)
+
+// -(-x) == x
+(Neg(64|32|16|8) (Neg(64|32|16|8) x)) => x
+
+// -^x == x+1
+(Neg(64|32|16|8) <t> (Com(64|32|16|8) x)) => (Add(64|32|16|8) (Const(64|32|16|8) <t> [1]) x)
+
+(And(64|32|16|8) x (And(64|32|16|8) x y)) => (And(64|32|16|8) x y)
+(Or(64|32|16|8) x (Or(64|32|16|8) x y)) => (Or(64|32|16|8) x y)
+(Xor(64|32|16|8) x (Xor(64|32|16|8) x y)) => y
+
+// Unsigned comparisons to zero.
+(Less(64U|32U|16U|8U) _ (Const(64|32|16|8) [0])) => (ConstBool [false])
+(Leq(64U|32U|16U|8U) (Const(64|32|16|8) [0]) _) => (ConstBool [true])
+
+// Ands clear bits. Ors set bits.
+// If a subsequent Or will set all the bits
+// that an And cleared, we can skip the And.
+// This happens in bitmasking code like:
+// x &^= 3 << shift // clear two old bits
+// x |= v << shift // set two new bits
+// when shift is a small constant and v ends up a constant 3.
+(Or8 (And8 x (Const8 [c2])) (Const8 <t> [c1])) && ^(c1 | c2) == 0 => (Or8 (Const8 <t> [c1]) x)
+(Or16 (And16 x (Const16 [c2])) (Const16 <t> [c1])) && ^(c1 | c2) == 0 => (Or16 (Const16 <t> [c1]) x)
+(Or32 (And32 x (Const32 [c2])) (Const32 <t> [c1])) && ^(c1 | c2) == 0 => (Or32 (Const32 <t> [c1]) x)
+(Or64 (And64 x (Const64 [c2])) (Const64 <t> [c1])) && ^(c1 | c2) == 0 => (Or64 (Const64 <t> [c1]) x)
+
+(Trunc64to8 (And64 (Const64 [y]) x)) && y&0xFF == 0xFF => (Trunc64to8 x)
+(Trunc64to16 (And64 (Const64 [y]) x)) && y&0xFFFF == 0xFFFF => (Trunc64to16 x)
+(Trunc64to32 (And64 (Const64 [y]) x)) && y&0xFFFFFFFF == 0xFFFFFFFF => (Trunc64to32 x)
+(Trunc32to8 (And32 (Const32 [y]) x)) && y&0xFF == 0xFF => (Trunc32to8 x)
+(Trunc32to16 (And32 (Const32 [y]) x)) && y&0xFFFF == 0xFFFF => (Trunc32to16 x)
+(Trunc16to8 (And16 (Const16 [y]) x)) && y&0xFF == 0xFF => (Trunc16to8 x)
+
+(ZeroExt8to64 (Trunc64to8 x:(Rsh64Ux64 _ (Const64 [s])))) && s >= 56 => x
+(ZeroExt16to64 (Trunc64to16 x:(Rsh64Ux64 _ (Const64 [s])))) && s >= 48 => x
+(ZeroExt32to64 (Trunc64to32 x:(Rsh64Ux64 _ (Const64 [s])))) && s >= 32 => x
+(ZeroExt8to32 (Trunc32to8 x:(Rsh32Ux64 _ (Const64 [s])))) && s >= 24 => x
+(ZeroExt16to32 (Trunc32to16 x:(Rsh32Ux64 _ (Const64 [s])))) && s >= 16 => x
+(ZeroExt8to16 (Trunc16to8 x:(Rsh16Ux64 _ (Const64 [s])))) && s >= 8 => x
+
+(SignExt8to64 (Trunc64to8 x:(Rsh64x64 _ (Const64 [s])))) && s >= 56 => x
+(SignExt16to64 (Trunc64to16 x:(Rsh64x64 _ (Const64 [s])))) && s >= 48 => x
+(SignExt32to64 (Trunc64to32 x:(Rsh64x64 _ (Const64 [s])))) && s >= 32 => x
+(SignExt8to32 (Trunc32to8 x:(Rsh32x64 _ (Const64 [s])))) && s >= 24 => x
+(SignExt16to32 (Trunc32to16 x:(Rsh32x64 _ (Const64 [s])))) && s >= 16 => x
+(SignExt8to16 (Trunc16to8 x:(Rsh16x64 _ (Const64 [s])))) && s >= 8 => x
+
+(Slicemask (Const32 [x])) && x > 0 => (Const32 [-1])
+(Slicemask (Const32 [0])) => (Const32 [0])
+(Slicemask (Const64 [x])) && x > 0 => (Const64 [-1])
+(Slicemask (Const64 [0])) => (Const64 [0])
+
+// simplifications often used for lengths. e.g. len(s[i:i+5])==5
+(Sub(64|32|16|8) (Add(64|32|16|8) x y) x) => y
+(Sub(64|32|16|8) (Add(64|32|16|8) x y) y) => x
+(Sub(64|32|16|8) (Sub(64|32|16|8) x y) x) => (Neg(64|32|16|8) y)
+(Sub(64|32|16|8) x (Add(64|32|16|8) x y)) => (Neg(64|32|16|8) y)
+(Add(64|32|16|8) x (Sub(64|32|16|8) y x)) => y
+(Add(64|32|16|8) x (Add(64|32|16|8) y (Sub(64|32|16|8) z x))) => (Add(64|32|16|8) y z)
+
+// basic phi simplifications
+(Phi (Const8 [c]) (Const8 [c])) => (Const8 [c])
+(Phi (Const16 [c]) (Const16 [c])) => (Const16 [c])
+(Phi (Const32 [c]) (Const32 [c])) => (Const32 [c])
+(Phi (Const64 [c]) (Const64 [c])) => (Const64 [c])
+
+// slice and interface comparisons
+// The frontend ensures that we can only compare against nil,
+// so we need only compare the first word (interface type or slice ptr).
+(EqInter x y) => (EqPtr (ITab x) (ITab y))
+(NeqInter x y) => (NeqPtr (ITab x) (ITab y))
+(EqSlice x y) => (EqPtr (SlicePtr x) (SlicePtr y))
+(NeqSlice x y) => (NeqPtr (SlicePtr x) (SlicePtr y))
+
+// Load of store of same address, with compatibly typed value and same size
+(Load <t1> p1 (Store {t2} p2 x _))
+ && isSamePtr(p1, p2)
+ && t1.Compare(x.Type) == types.CMPeq
+ && t1.Size() == t2.Size()
+ => x
+(Load <t1> p1 (Store {t2} p2 _ (Store {t3} p3 x _)))
+ && isSamePtr(p1, p3)
+ && t1.Compare(x.Type) == types.CMPeq
+ && t1.Size() == t2.Size()
+ && disjoint(p3, t3.Size(), p2, t2.Size())
+ => x
+(Load <t1> p1 (Store {t2} p2 _ (Store {t3} p3 _ (Store {t4} p4 x _))))
+ && isSamePtr(p1, p4)
+ && t1.Compare(x.Type) == types.CMPeq
+ && t1.Size() == t2.Size()
+ && disjoint(p4, t4.Size(), p2, t2.Size())
+ && disjoint(p4, t4.Size(), p3, t3.Size())
+ => x
+(Load <t1> p1 (Store {t2} p2 _ (Store {t3} p3 _ (Store {t4} p4 _ (Store {t5} p5 x _)))))
+ && isSamePtr(p1, p5)
+ && t1.Compare(x.Type) == types.CMPeq
+ && t1.Size() == t2.Size()
+ && disjoint(p5, t5.Size(), p2, t2.Size())
+ && disjoint(p5, t5.Size(), p3, t3.Size())
+ && disjoint(p5, t5.Size(), p4, t4.Size())
+ => x
+
+// Pass constants through math.Float{32,64}bits and math.Float{32,64}frombits
+ (Load <t1> p1 (Store {t2} p2 (Const64 [x]) _)) && isSamePtr(p1,p2) && sizeof(t2) == 8 && is64BitFloat(t1) && !math.IsNaN(math.Float64frombits(uint64(x))) => (Const64F [math.Float64frombits(uint64(x))])
+ (Load <t1> p1 (Store {t2} p2 (Const32 [x]) _)) && isSamePtr(p1,p2) && sizeof(t2) == 4 && is32BitFloat(t1) && !math.IsNaN(float64(math.Float32frombits(uint32(x)))) => (Const32F [math.Float32frombits(uint32(x))])
+(Load <t1> p1 (Store {t2} p2 (Const64F [x]) _)) && isSamePtr(p1,p2) && sizeof(t2) == 8 && is64BitInt(t1) => (Const64 [int64(math.Float64bits(x))])
+(Load <t1> p1 (Store {t2} p2 (Const32F [x]) _)) && isSamePtr(p1,p2) && sizeof(t2) == 4 && is32BitInt(t1) => (Const32 [int32(math.Float32bits(x))])
+
+// Float Loads up to Zeros so they can be constant folded.
+(Load <t1> op:(OffPtr [o1] p1)
+ (Store {t2} p2 _
+ mem:(Zero [n] p3 _)))
+ && o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p3)
+ && fe.CanSSA(t1)
+ && disjoint(op, t1.Size(), p2, t2.Size())
+ => @mem.Block (Load <t1> (OffPtr <op.Type> [o1] p3) mem)
+(Load <t1> op:(OffPtr [o1] p1)
+ (Store {t2} p2 _
+ (Store {t3} p3 _
+ mem:(Zero [n] p4 _))))
+ && o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p4)
+ && fe.CanSSA(t1)
+ && disjoint(op, t1.Size(), p2, t2.Size())
+ && disjoint(op, t1.Size(), p3, t3.Size())
+ => @mem.Block (Load <t1> (OffPtr <op.Type> [o1] p4) mem)
+(Load <t1> op:(OffPtr [o1] p1)
+ (Store {t2} p2 _
+ (Store {t3} p3 _
+ (Store {t4} p4 _
+ mem:(Zero [n] p5 _)))))
+ && o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p5)
+ && fe.CanSSA(t1)
+ && disjoint(op, t1.Size(), p2, t2.Size())
+ && disjoint(op, t1.Size(), p3, t3.Size())
+ && disjoint(op, t1.Size(), p4, t4.Size())
+ => @mem.Block (Load <t1> (OffPtr <op.Type> [o1] p5) mem)
+(Load <t1> op:(OffPtr [o1] p1)
+ (Store {t2} p2 _
+ (Store {t3} p3 _
+ (Store {t4} p4 _
+ (Store {t5} p5 _
+ mem:(Zero [n] p6 _))))))
+ && o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p6)
+ && fe.CanSSA(t1)
+ && disjoint(op, t1.Size(), p2, t2.Size())
+ && disjoint(op, t1.Size(), p3, t3.Size())
+ && disjoint(op, t1.Size(), p4, t4.Size())
+ && disjoint(op, t1.Size(), p5, t5.Size())
+ => @mem.Block (Load <t1> (OffPtr <op.Type> [o1] p6) mem)
+
+// Zero to Load forwarding.
+(Load <t1> (OffPtr [o] p1) (Zero [n] p2 _))
+ && t1.IsBoolean()
+ && isSamePtr(p1, p2)
+ && n >= o + 1
+ => (ConstBool [false])
+(Load <t1> (OffPtr [o] p1) (Zero [n] p2 _))
+ && is8BitInt(t1)
+ && isSamePtr(p1, p2)
+ && n >= o + 1
+ => (Const8 [0])
+(Load <t1> (OffPtr [o] p1) (Zero [n] p2 _))
+ && is16BitInt(t1)
+ && isSamePtr(p1, p2)
+ && n >= o + 2
+ => (Const16 [0])
+(Load <t1> (OffPtr [o] p1) (Zero [n] p2 _))
+ && is32BitInt(t1)
+ && isSamePtr(p1, p2)
+ && n >= o + 4
+ => (Const32 [0])
+(Load <t1> (OffPtr [o] p1) (Zero [n] p2 _))
+ && is64BitInt(t1)
+ && isSamePtr(p1, p2)
+ && n >= o + 8
+ => (Const64 [0])
+(Load <t1> (OffPtr [o] p1) (Zero [n] p2 _))
+ && is32BitFloat(t1)
+ && isSamePtr(p1, p2)
+ && n >= o + 4
+ => (Const32F [0])
+(Load <t1> (OffPtr [o] p1) (Zero [n] p2 _))
+ && is64BitFloat(t1)
+ && isSamePtr(p1, p2)
+ && n >= o + 8
+ => (Const64F [0])
+
+// Eliminate stores of values that have just been loaded from the same location.
+// We also handle the common case where there are some intermediate stores.
+(Store {t1} p1 (Load <t2> p2 mem) mem)
+ && isSamePtr(p1, p2)
+ && t2.Size() == t1.Size()
+ => mem
+(Store {t1} p1 (Load <t2> p2 oldmem) mem:(Store {t3} p3 _ oldmem))
+ && isSamePtr(p1, p2)
+ && t2.Size() == t1.Size()
+ && disjoint(p1, t1.Size(), p3, t3.Size())
+ => mem
+(Store {t1} p1 (Load <t2> p2 oldmem) mem:(Store {t3} p3 _ (Store {t4} p4 _ oldmem)))
+ && isSamePtr(p1, p2)
+ && t2.Size() == t1.Size()
+ && disjoint(p1, t1.Size(), p3, t3.Size())
+ && disjoint(p1, t1.Size(), p4, t4.Size())
+ => mem
+(Store {t1} p1 (Load <t2> p2 oldmem) mem:(Store {t3} p3 _ (Store {t4} p4 _ (Store {t5} p5 _ oldmem))))
+ && isSamePtr(p1, p2)
+ && t2.Size() == t1.Size()
+ && disjoint(p1, t1.Size(), p3, t3.Size())
+ && disjoint(p1, t1.Size(), p4, t4.Size())
+ && disjoint(p1, t1.Size(), p5, t5.Size())
+ => mem
+
+// Don't Store zeros to cleared variables.
+(Store {t} (OffPtr [o] p1) x mem:(Zero [n] p2 _))
+ && isConstZero(x)
+ && o >= 0 && t.Size() + o <= n && isSamePtr(p1, p2)
+ => mem
+(Store {t1} op:(OffPtr [o1] p1) x mem:(Store {t2} p2 _ (Zero [n] p3 _)))
+ && isConstZero(x)
+ && o1 >= 0 && t1.Size() + o1 <= n && isSamePtr(p1, p3)
+ && disjoint(op, t1.Size(), p2, t2.Size())
+ => mem
+(Store {t1} op:(OffPtr [o1] p1) x mem:(Store {t2} p2 _ (Store {t3} p3 _ (Zero [n] p4 _))))
+ && isConstZero(x)
+ && o1 >= 0 && t1.Size() + o1 <= n && isSamePtr(p1, p4)
+ && disjoint(op, t1.Size(), p2, t2.Size())
+ && disjoint(op, t1.Size(), p3, t3.Size())
+ => mem
+(Store {t1} op:(OffPtr [o1] p1) x mem:(Store {t2} p2 _ (Store {t3} p3 _ (Store {t4} p4 _ (Zero [n] p5 _)))))
+ && isConstZero(x)
+ && o1 >= 0 && t1.Size() + o1 <= n && isSamePtr(p1, p5)
+ && disjoint(op, t1.Size(), p2, t2.Size())
+ && disjoint(op, t1.Size(), p3, t3.Size())
+ && disjoint(op, t1.Size(), p4, t4.Size())
+ => mem
+
+// Collapse OffPtr
+(OffPtr (OffPtr p [y]) [x]) => (OffPtr p [x+y])
+(OffPtr p [0]) && v.Type.Compare(p.Type) == types.CMPeq => p
+
+// indexing operations
+// Note: bounds check has already been done
+(PtrIndex <t> ptr idx) && config.PtrSize == 4 && is32Bit(t.Elem().Size()) => (AddPtr ptr (Mul32 <typ.Int> idx (Const32 <typ.Int> [int32(t.Elem().Size())])))
+(PtrIndex <t> ptr idx) && config.PtrSize == 8 => (AddPtr ptr (Mul64 <typ.Int> idx (Const64 <typ.Int> [t.Elem().Size()])))
+
+// struct operations
+(StructSelect (StructMake1 x)) => x
+(StructSelect [0] (StructMake2 x _)) => x
+(StructSelect [1] (StructMake2 _ x)) => x
+(StructSelect [0] (StructMake3 x _ _)) => x
+(StructSelect [1] (StructMake3 _ x _)) => x
+(StructSelect [2] (StructMake3 _ _ x)) => x
+(StructSelect [0] (StructMake4 x _ _ _)) => x
+(StructSelect [1] (StructMake4 _ x _ _)) => x
+(StructSelect [2] (StructMake4 _ _ x _)) => x
+(StructSelect [3] (StructMake4 _ _ _ x)) => x
+
+(Load <t> _ _) && t.IsStruct() && t.NumFields() == 0 && fe.CanSSA(t) =>
+ (StructMake0)
+(Load <t> ptr mem) && t.IsStruct() && t.NumFields() == 1 && fe.CanSSA(t) =>
+ (StructMake1
+ (Load <t.FieldType(0)> (OffPtr <t.FieldType(0).PtrTo()> [0] ptr) mem))
+(Load <t> ptr mem) && t.IsStruct() && t.NumFields() == 2 && fe.CanSSA(t) =>
+ (StructMake2
+ (Load <t.FieldType(0)> (OffPtr <t.FieldType(0).PtrTo()> [0] ptr) mem)
+ (Load <t.FieldType(1)> (OffPtr <t.FieldType(1).PtrTo()> [t.FieldOff(1)] ptr) mem))
+(Load <t> ptr mem) && t.IsStruct() && t.NumFields() == 3 && fe.CanSSA(t) =>
+ (StructMake3
+ (Load <t.FieldType(0)> (OffPtr <t.FieldType(0).PtrTo()> [0] ptr) mem)
+ (Load <t.FieldType(1)> (OffPtr <t.FieldType(1).PtrTo()> [t.FieldOff(1)] ptr) mem)
+ (Load <t.FieldType(2)> (OffPtr <t.FieldType(2).PtrTo()> [t.FieldOff(2)] ptr) mem))
+(Load <t> ptr mem) && t.IsStruct() && t.NumFields() == 4 && fe.CanSSA(t) =>
+ (StructMake4
+ (Load <t.FieldType(0)> (OffPtr <t.FieldType(0).PtrTo()> [0] ptr) mem)
+ (Load <t.FieldType(1)> (OffPtr <t.FieldType(1).PtrTo()> [t.FieldOff(1)] ptr) mem)
+ (Load <t.FieldType(2)> (OffPtr <t.FieldType(2).PtrTo()> [t.FieldOff(2)] ptr) mem)
+ (Load <t.FieldType(3)> (OffPtr <t.FieldType(3).PtrTo()> [t.FieldOff(3)] ptr) mem))
+
+(StructSelect [i] x:(Load <t> ptr mem)) && !fe.CanSSA(t) =>
+ @x.Block (Load <v.Type> (OffPtr <v.Type.PtrTo()> [t.FieldOff(int(i))] ptr) mem)
+
+(Store _ (StructMake0) mem) => mem
+(Store dst (StructMake1 <t> f0) mem) =>
+ (Store {t.FieldType(0)} (OffPtr <t.FieldType(0).PtrTo()> [0] dst) f0 mem)
+(Store dst (StructMake2 <t> f0 f1) mem) =>
+ (Store {t.FieldType(1)}
+ (OffPtr <t.FieldType(1).PtrTo()> [t.FieldOff(1)] dst)
+ f1
+ (Store {t.FieldType(0)}
+ (OffPtr <t.FieldType(0).PtrTo()> [0] dst)
+ f0 mem))
+(Store dst (StructMake3 <t> f0 f1 f2) mem) =>
+ (Store {t.FieldType(2)}
+ (OffPtr <t.FieldType(2).PtrTo()> [t.FieldOff(2)] dst)
+ f2
+ (Store {t.FieldType(1)}
+ (OffPtr <t.FieldType(1).PtrTo()> [t.FieldOff(1)] dst)
+ f1
+ (Store {t.FieldType(0)}
+ (OffPtr <t.FieldType(0).PtrTo()> [0] dst)
+ f0 mem)))
+(Store dst (StructMake4 <t> f0 f1 f2 f3) mem) =>
+ (Store {t.FieldType(3)}
+ (OffPtr <t.FieldType(3).PtrTo()> [t.FieldOff(3)] dst)
+ f3
+ (Store {t.FieldType(2)}
+ (OffPtr <t.FieldType(2).PtrTo()> [t.FieldOff(2)] dst)
+ f2
+ (Store {t.FieldType(1)}
+ (OffPtr <t.FieldType(1).PtrTo()> [t.FieldOff(1)] dst)
+ f1
+ (Store {t.FieldType(0)}
+ (OffPtr <t.FieldType(0).PtrTo()> [0] dst)
+ f0 mem))))
+
+// Putting struct{*byte} and similar into direct interfaces.
+(IMake _typ (StructMake1 val)) => (IMake _typ val)
+(StructSelect [0] (IData x)) => (IData x)
+
+// un-SSAable values use mem->mem copies
+(Store {t} dst (Load src mem) mem) && !fe.CanSSA(t) =>
+ (Move {t} [t.Size()] dst src mem)
+(Store {t} dst (Load src mem) (VarDef {x} mem)) && !fe.CanSSA(t) =>
+ (Move {t} [t.Size()] dst src (VarDef {x} mem))
+
+// array ops
+(ArraySelect (ArrayMake1 x)) => x
+
+(Load <t> _ _) && t.IsArray() && t.NumElem() == 0 =>
+ (ArrayMake0)
+
+(Load <t> ptr mem) && t.IsArray() && t.NumElem() == 1 && fe.CanSSA(t) =>
+ (ArrayMake1 (Load <t.Elem()> ptr mem))
+
+(Store _ (ArrayMake0) mem) => mem
+(Store dst (ArrayMake1 e) mem) => (Store {e.Type} dst e mem)
+
+// Putting [1]*byte and similar into direct interfaces.
+(IMake _typ (ArrayMake1 val)) => (IMake _typ val)
+(ArraySelect [0] (IData x)) => (IData x)
+
+// string ops
+// Decomposing StringMake and lowering of StringPtr and StringLen
+// happens in a later pass, dec, so that these operations are available
+// to other passes for optimizations.
+(StringPtr (StringMake (Addr <t> {s} base) _)) => (Addr <t> {s} base)
+(StringLen (StringMake _ (Const64 <t> [c]))) => (Const64 <t> [c])
+(ConstString {str}) && config.PtrSize == 4 && str == "" =>
+ (StringMake (ConstNil) (Const32 <typ.Int> [0]))
+(ConstString {str}) && config.PtrSize == 8 && str == "" =>
+ (StringMake (ConstNil) (Const64 <typ.Int> [0]))
+(ConstString {str}) && config.PtrSize == 4 && str != "" =>
+ (StringMake
+ (Addr <typ.BytePtr> {fe.StringData(str)}
+ (SB))
+ (Const32 <typ.Int> [int32(len(str))]))
+(ConstString {str}) && config.PtrSize == 8 && str != "" =>
+ (StringMake
+ (Addr <typ.BytePtr> {fe.StringData(str)}
+ (SB))
+ (Const64 <typ.Int> [int64(len(str))]))
+
+// slice ops
+// Only a few slice rules are provided here. See dec.rules for
+// a more comprehensive set.
+(SliceLen (SliceMake _ (Const64 <t> [c]) _)) => (Const64 <t> [c])
+(SliceCap (SliceMake _ _ (Const64 <t> [c]))) => (Const64 <t> [c])
+(SliceLen (SliceMake _ (Const32 <t> [c]) _)) => (Const32 <t> [c])
+(SliceCap (SliceMake _ _ (Const32 <t> [c]))) => (Const32 <t> [c])
+(SlicePtr (SliceMake (SlicePtr x) _ _)) => (SlicePtr x)
+(SliceLen (SliceMake _ (SliceLen x) _)) => (SliceLen x)
+(SliceCap (SliceMake _ _ (SliceCap x))) => (SliceCap x)
+(SliceCap (SliceMake _ _ (SliceLen x))) => (SliceLen x)
+(ConstSlice) && config.PtrSize == 4 =>
+ (SliceMake
+ (ConstNil <v.Type.Elem().PtrTo()>)
+ (Const32 <typ.Int> [0])
+ (Const32 <typ.Int> [0]))
+(ConstSlice) && config.PtrSize == 8 =>
+ (SliceMake
+ (ConstNil <v.Type.Elem().PtrTo()>)
+ (Const64 <typ.Int> [0])
+ (Const64 <typ.Int> [0]))
+
+// interface ops
+(ConstInterface) =>
+ (IMake
+ (ConstNil <typ.Uintptr>)
+ (ConstNil <typ.BytePtr>))
+
+(NilCheck (GetG mem) mem) => mem
+
+(If (Not cond) yes no) => (If cond no yes)
+(If (ConstBool [c]) yes no) && c => (First yes no)
+(If (ConstBool [c]) yes no) && !c => (First no yes)
+
+// Get rid of Convert ops for pointer arithmetic on unsafe.Pointer.
+(Convert (Add(64|32) (Convert ptr mem) off) mem) => (AddPtr ptr off)
+(Convert (Convert ptr mem) mem) => ptr
+
+// strength reduction of divide by a constant.
+// See ../magic.go for a detailed description of these algorithms.
+
+// Unsigned divide by power of 2. Strength reduce to a shift.
+(Div8u n (Const8 [c])) && isPowerOfTwo8(c) => (Rsh8Ux64 n (Const64 <typ.UInt64> [log8(c)]))
+(Div16u n (Const16 [c])) && isPowerOfTwo16(c) => (Rsh16Ux64 n (Const64 <typ.UInt64> [log16(c)]))
+(Div32u n (Const32 [c])) && isPowerOfTwo32(c) => (Rsh32Ux64 n (Const64 <typ.UInt64> [log32(c)]))
+(Div64u n (Const64 [c])) && isPowerOfTwo64(c) => (Rsh64Ux64 n (Const64 <typ.UInt64> [log64(c)]))
+(Div64u n (Const64 [-1<<63])) => (Rsh64Ux64 n (Const64 <typ.UInt64> [63]))
+
+// Signed non-negative divide by power of 2.
+(Div8 n (Const8 [c])) && isNonNegative(n) && isPowerOfTwo8(c) => (Rsh8Ux64 n (Const64 <typ.UInt64> [log8(c)]))
+(Div16 n (Const16 [c])) && isNonNegative(n) && isPowerOfTwo16(c) => (Rsh16Ux64 n (Const64 <typ.UInt64> [log16(c)]))
+(Div32 n (Const32 [c])) && isNonNegative(n) && isPowerOfTwo32(c) => (Rsh32Ux64 n (Const64 <typ.UInt64> [log32(c)]))
+(Div64 n (Const64 [c])) && isNonNegative(n) && isPowerOfTwo64(c) => (Rsh64Ux64 n (Const64 <typ.UInt64> [log64(c)]))
+(Div64 n (Const64 [-1<<63])) && isNonNegative(n) => (Const64 [0])
+
+// Unsigned divide, not a power of 2. Strength reduce to a multiply.
+// For 8-bit divides, we just do a direct 9-bit by 8-bit multiply.
+(Div8u x (Const8 [c])) && umagicOK8(c) =>
+ (Trunc32to8
+ (Rsh32Ux64 <typ.UInt32>
+ (Mul32 <typ.UInt32>
+ (Const32 <typ.UInt32> [int32(1<<8+umagic8(c).m)])
+ (ZeroExt8to32 x))
+ (Const64 <typ.UInt64> [8+umagic8(c).s])))
+
+// For 16-bit divides on 64-bit machines, we do a direct 17-bit by 16-bit multiply.
+(Div16u x (Const16 [c])) && umagicOK16(c) && config.RegSize == 8 =>
+ (Trunc64to16
+ (Rsh64Ux64 <typ.UInt64>
+ (Mul64 <typ.UInt64>
+ (Const64 <typ.UInt64> [int64(1<<16+umagic16(c).m)])
+ (ZeroExt16to64 x))
+ (Const64 <typ.UInt64> [16+umagic16(c).s])))
+
+// For 16-bit divides on 32-bit machines
+(Div16u x (Const16 [c])) && umagicOK16(c) && config.RegSize == 4 && umagic16(c).m&1 == 0 =>
+ (Trunc32to16
+ (Rsh32Ux64 <typ.UInt32>
+ (Mul32 <typ.UInt32>
+ (Const32 <typ.UInt32> [int32(1<<15+umagic16(c).m/2)])
+ (ZeroExt16to32 x))
+ (Const64 <typ.UInt64> [16+umagic16(c).s-1])))
+(Div16u x (Const16 [c])) && umagicOK16(c) && config.RegSize == 4 && c&1 == 0 =>
+ (Trunc32to16
+ (Rsh32Ux64 <typ.UInt32>
+ (Mul32 <typ.UInt32>
+ (Const32 <typ.UInt32> [int32(1<<15+(umagic16(c).m+1)/2)])
+ (Rsh32Ux64 <typ.UInt32> (ZeroExt16to32 x) (Const64 <typ.UInt64> [1])))
+ (Const64 <typ.UInt64> [16+umagic16(c).s-2])))
+(Div16u x (Const16 [c])) && umagicOK16(c) && config.RegSize == 4 && config.useAvg =>
+ (Trunc32to16
+ (Rsh32Ux64 <typ.UInt32>
+ (Avg32u
+ (Lsh32x64 <typ.UInt32> (ZeroExt16to32 x) (Const64 <typ.UInt64> [16]))
+ (Mul32 <typ.UInt32>
+ (Const32 <typ.UInt32> [int32(umagic16(c).m)])
+ (ZeroExt16to32 x)))
+ (Const64 <typ.UInt64> [16+umagic16(c).s-1])))
+
+// For 32-bit divides on 32-bit machines
+(Div32u x (Const32 [c])) && umagicOK32(c) && config.RegSize == 4 && umagic32(c).m&1 == 0 && config.useHmul =>
+ (Rsh32Ux64 <typ.UInt32>
+ (Hmul32u <typ.UInt32>
+ (Const32 <typ.UInt32> [int32(1<<31+umagic32(c).m/2)])
+ x)
+ (Const64 <typ.UInt64> [umagic32(c).s-1]))
+(Div32u x (Const32 [c])) && umagicOK32(c) && config.RegSize == 4 && c&1 == 0 && config.useHmul =>
+ (Rsh32Ux64 <typ.UInt32>
+ (Hmul32u <typ.UInt32>
+ (Const32 <typ.UInt32> [int32(1<<31+(umagic32(c).m+1)/2)])
+ (Rsh32Ux64 <typ.UInt32> x (Const64 <typ.UInt64> [1])))
+ (Const64 <typ.UInt64> [umagic32(c).s-2]))
+(Div32u x (Const32 [c])) && umagicOK32(c) && config.RegSize == 4 && config.useAvg && config.useHmul =>
+ (Rsh32Ux64 <typ.UInt32>
+ (Avg32u
+ x
+ (Hmul32u <typ.UInt32>
+ (Const32 <typ.UInt32> [int32(umagic32(c).m)])
+ x))
+ (Const64 <typ.UInt64> [umagic32(c).s-1]))
+
+// For 32-bit divides on 64-bit machines
+// We'll use a regular (non-hi) multiply for this case.
+(Div32u x (Const32 [c])) && umagicOK32(c) && config.RegSize == 8 && umagic32(c).m&1 == 0 =>
+ (Trunc64to32
+ (Rsh64Ux64 <typ.UInt64>
+ (Mul64 <typ.UInt64>
+ (Const64 <typ.UInt64> [int64(1<<31+umagic32(c).m/2)])
+ (ZeroExt32to64 x))
+ (Const64 <typ.UInt64> [32+umagic32(c).s-1])))
+(Div32u x (Const32 [c])) && umagicOK32(c) && config.RegSize == 8 && c&1 == 0 =>
+ (Trunc64to32
+ (Rsh64Ux64 <typ.UInt64>
+ (Mul64 <typ.UInt64>
+ (Const64 <typ.UInt64> [int64(1<<31+(umagic32(c).m+1)/2)])
+ (Rsh64Ux64 <typ.UInt64> (ZeroExt32to64 x) (Const64 <typ.UInt64> [1])))
+ (Const64 <typ.UInt64> [32+umagic32(c).s-2])))
+(Div32u x (Const32 [c])) && umagicOK32(c) && config.RegSize == 8 && config.useAvg =>
+ (Trunc64to32
+ (Rsh64Ux64 <typ.UInt64>
+ (Avg64u
+ (Lsh64x64 <typ.UInt64> (ZeroExt32to64 x) (Const64 <typ.UInt64> [32]))
+ (Mul64 <typ.UInt64>
+ (Const64 <typ.UInt32> [int64(umagic32(c).m)])
+ (ZeroExt32to64 x)))
+ (Const64 <typ.UInt64> [32+umagic32(c).s-1])))
+
+// For unsigned 64-bit divides on 32-bit machines,
+// if the constant fits in 16 bits (so that the last term
+// fits in 32 bits), convert to three 32-bit divides by a constant.
+//
+// If 1<<32 = Q * c + R
+// and x = hi << 32 + lo
+//
+// Then x = (hi/c*c + hi%c) << 32 + lo
+// = hi/c*c<<32 + hi%c<<32 + lo
+// = hi/c*c<<32 + (hi%c)*(Q*c+R) + lo/c*c + lo%c
+// = hi/c*c<<32 + (hi%c)*Q*c + lo/c*c + (hi%c*R+lo%c)
+// and x / c = (hi/c)<<32 + (hi%c)*Q + lo/c + (hi%c*R+lo%c)/c
+(Div64u x (Const64 [c])) && c > 0 && c <= 0xFFFF && umagicOK32(int32(c)) && config.RegSize == 4 && config.useHmul =>
+ (Add64
+ (Add64 <typ.UInt64>
+ (Add64 <typ.UInt64>
+ (Lsh64x64 <typ.UInt64>
+ (ZeroExt32to64
+ (Div32u <typ.UInt32>
+ (Trunc64to32 <typ.UInt32> (Rsh64Ux64 <typ.UInt64> x (Const64 <typ.UInt64> [32])))
+ (Const32 <typ.UInt32> [int32(c)])))
+ (Const64 <typ.UInt64> [32]))
+ (ZeroExt32to64 (Div32u <typ.UInt32> (Trunc64to32 <typ.UInt32> x) (Const32 <typ.UInt32> [int32(c)]))))
+ (Mul64 <typ.UInt64>
+ (ZeroExt32to64 <typ.UInt64>
+ (Mod32u <typ.UInt32>
+ (Trunc64to32 <typ.UInt32> (Rsh64Ux64 <typ.UInt64> x (Const64 <typ.UInt64> [32])))
+ (Const32 <typ.UInt32> [int32(c)])))
+ (Const64 <typ.UInt64> [int64((1<<32)/c)])))
+ (ZeroExt32to64
+ (Div32u <typ.UInt32>
+ (Add32 <typ.UInt32>
+ (Mod32u <typ.UInt32> (Trunc64to32 <typ.UInt32> x) (Const32 <typ.UInt32> [int32(c)]))
+ (Mul32 <typ.UInt32>
+ (Mod32u <typ.UInt32>
+ (Trunc64to32 <typ.UInt32> (Rsh64Ux64 <typ.UInt64> x (Const64 <typ.UInt64> [32])))
+ (Const32 <typ.UInt32> [int32(c)]))
+ (Const32 <typ.UInt32> [int32((1<<32)%c)])))
+ (Const32 <typ.UInt32> [int32(c)]))))
+
+// For 64-bit divides on 64-bit machines
+// (64-bit divides on 32-bit machines are lowered to a runtime call by the walk pass.)
+(Div64u x (Const64 [c])) && umagicOK64(c) && config.RegSize == 8 && umagic64(c).m&1 == 0 && config.useHmul =>
+ (Rsh64Ux64 <typ.UInt64>
+ (Hmul64u <typ.UInt64>
+ (Const64 <typ.UInt64> [int64(1<<63+umagic64(c).m/2)])
+ x)
+ (Const64 <typ.UInt64> [umagic64(c).s-1]))
+(Div64u x (Const64 [c])) && umagicOK64(c) && config.RegSize == 8 && c&1 == 0 && config.useHmul =>
+ (Rsh64Ux64 <typ.UInt64>
+ (Hmul64u <typ.UInt64>
+ (Const64 <typ.UInt64> [int64(1<<63+(umagic64(c).m+1)/2)])
+ (Rsh64Ux64 <typ.UInt64> x (Const64 <typ.UInt64> [1])))
+ (Const64 <typ.UInt64> [umagic64(c).s-2]))
+(Div64u x (Const64 [c])) && umagicOK64(c) && config.RegSize == 8 && config.useAvg && config.useHmul =>
+ (Rsh64Ux64 <typ.UInt64>
+ (Avg64u
+ x
+ (Hmul64u <typ.UInt64>
+ (Const64 <typ.UInt64> [int64(umagic64(c).m)])
+ x))
+ (Const64 <typ.UInt64> [umagic64(c).s-1]))
+
+// Signed divide by a negative constant. Rewrite to divide by a positive constant.
+(Div8 <t> n (Const8 [c])) && c < 0 && c != -1<<7 => (Neg8 (Div8 <t> n (Const8 <t> [-c])))
+(Div16 <t> n (Const16 [c])) && c < 0 && c != -1<<15 => (Neg16 (Div16 <t> n (Const16 <t> [-c])))
+(Div32 <t> n (Const32 [c])) && c < 0 && c != -1<<31 => (Neg32 (Div32 <t> n (Const32 <t> [-c])))
+(Div64 <t> n (Const64 [c])) && c < 0 && c != -1<<63 => (Neg64 (Div64 <t> n (Const64 <t> [-c])))
+
+// Dividing by the most-negative number. Result is always 0 except
+// if the input is also the most-negative number.
+// We can detect that using the sign bit of x & -x.
+(Div8 <t> x (Const8 [-1<<7 ])) => (Rsh8Ux64 (And8 <t> x (Neg8 <t> x)) (Const64 <typ.UInt64> [7 ]))
+(Div16 <t> x (Const16 [-1<<15])) => (Rsh16Ux64 (And16 <t> x (Neg16 <t> x)) (Const64 <typ.UInt64> [15]))
+(Div32 <t> x (Const32 [-1<<31])) => (Rsh32Ux64 (And32 <t> x (Neg32 <t> x)) (Const64 <typ.UInt64> [31]))
+(Div64 <t> x (Const64 [-1<<63])) => (Rsh64Ux64 (And64 <t> x (Neg64 <t> x)) (Const64 <typ.UInt64> [63]))
+
+// Signed divide by power of 2.
+// n / c = n >> log(c) if n >= 0
+// = (n+c-1) >> log(c) if n < 0
+// We conditionally add c-1 by adding n>>63>>(64-log(c)) (first shift signed, second shift unsigned).
+(Div8 <t> n (Const8 [c])) && isPowerOfTwo8(c) =>
+ (Rsh8x64
+ (Add8 <t> n (Rsh8Ux64 <t> (Rsh8x64 <t> n (Const64 <typ.UInt64> [ 7])) (Const64 <typ.UInt64> [int64( 8-log8(c))])))
+ (Const64 <typ.UInt64> [int64(log8(c))]))
+(Div16 <t> n (Const16 [c])) && isPowerOfTwo16(c) =>
+ (Rsh16x64
+ (Add16 <t> n (Rsh16Ux64 <t> (Rsh16x64 <t> n (Const64 <typ.UInt64> [15])) (Const64 <typ.UInt64> [int64(16-log16(c))])))
+ (Const64 <typ.UInt64> [int64(log16(c))]))
+(Div32 <t> n (Const32 [c])) && isPowerOfTwo32(c) =>
+ (Rsh32x64
+ (Add32 <t> n (Rsh32Ux64 <t> (Rsh32x64 <t> n (Const64 <typ.UInt64> [31])) (Const64 <typ.UInt64> [int64(32-log32(c))])))
+ (Const64 <typ.UInt64> [int64(log32(c))]))
+(Div64 <t> n (Const64 [c])) && isPowerOfTwo64(c) =>
+ (Rsh64x64
+ (Add64 <t> n (Rsh64Ux64 <t> (Rsh64x64 <t> n (Const64 <typ.UInt64> [63])) (Const64 <typ.UInt64> [int64(64-log64(c))])))
+ (Const64 <typ.UInt64> [int64(log64(c))]))
+
+// Signed divide, not a power of 2. Strength reduce to a multiply.
+(Div8 <t> x (Const8 [c])) && smagicOK8(c) =>
+ (Sub8 <t>
+ (Rsh32x64 <t>
+ (Mul32 <typ.UInt32>
+ (Const32 <typ.UInt32> [int32(smagic8(c).m)])
+ (SignExt8to32 x))
+ (Const64 <typ.UInt64> [8+smagic8(c).s]))
+ (Rsh32x64 <t>
+ (SignExt8to32 x)
+ (Const64 <typ.UInt64> [31])))
+(Div16 <t> x (Const16 [c])) && smagicOK16(c) =>
+ (Sub16 <t>
+ (Rsh32x64 <t>
+ (Mul32 <typ.UInt32>
+ (Const32 <typ.UInt32> [int32(smagic16(c).m)])
+ (SignExt16to32 x))
+ (Const64 <typ.UInt64> [16+smagic16(c).s]))
+ (Rsh32x64 <t>
+ (SignExt16to32 x)
+ (Const64 <typ.UInt64> [31])))
+(Div32 <t> x (Const32 [c])) && smagicOK32(c) && config.RegSize == 8 =>
+ (Sub32 <t>
+ (Rsh64x64 <t>
+ (Mul64 <typ.UInt64>
+ (Const64 <typ.UInt64> [int64(smagic32(c).m)])
+ (SignExt32to64 x))
+ (Const64 <typ.UInt64> [32+smagic32(c).s]))
+ (Rsh64x64 <t>
+ (SignExt32to64 x)
+ (Const64 <typ.UInt64> [63])))
+(Div32 <t> x (Const32 [c])) && smagicOK32(c) && config.RegSize == 4 && smagic32(c).m&1 == 0 && config.useHmul =>
+ (Sub32 <t>
+ (Rsh32x64 <t>
+ (Hmul32 <t>
+ (Const32 <typ.UInt32> [int32(smagic32(c).m/2)])
+ x)
+ (Const64 <typ.UInt64> [smagic32(c).s-1]))
+ (Rsh32x64 <t>
+ x
+ (Const64 <typ.UInt64> [31])))
+(Div32 <t> x (Const32 [c])) && smagicOK32(c) && config.RegSize == 4 && smagic32(c).m&1 != 0 && config.useHmul =>
+ (Sub32 <t>
+ (Rsh32x64 <t>
+ (Add32 <t>
+ (Hmul32 <t>
+ (Const32 <typ.UInt32> [int32(smagic32(c).m)])
+ x)
+ x)
+ (Const64 <typ.UInt64> [smagic32(c).s]))
+ (Rsh32x64 <t>
+ x
+ (Const64 <typ.UInt64> [31])))
+(Div64 <t> x (Const64 [c])) && smagicOK64(c) && smagic64(c).m&1 == 0 && config.useHmul =>
+ (Sub64 <t>
+ (Rsh64x64 <t>
+ (Hmul64 <t>
+ (Const64 <typ.UInt64> [int64(smagic64(c).m/2)])
+ x)
+ (Const64 <typ.UInt64> [smagic64(c).s-1]))
+ (Rsh64x64 <t>
+ x
+ (Const64 <typ.UInt64> [63])))
+(Div64 <t> x (Const64 [c])) && smagicOK64(c) && smagic64(c).m&1 != 0 && config.useHmul =>
+ (Sub64 <t>
+ (Rsh64x64 <t>
+ (Add64 <t>
+ (Hmul64 <t>
+ (Const64 <typ.UInt64> [int64(smagic64(c).m)])
+ x)
+ x)
+ (Const64 <typ.UInt64> [smagic64(c).s]))
+ (Rsh64x64 <t>
+ x
+ (Const64 <typ.UInt64> [63])))
+
+// Unsigned mod by power of 2 constant.
+(Mod8u <t> n (Const8 [c])) && isPowerOfTwo8(c) => (And8 n (Const8 <t> [c-1]))
+(Mod16u <t> n (Const16 [c])) && isPowerOfTwo16(c) => (And16 n (Const16 <t> [c-1]))
+(Mod32u <t> n (Const32 [c])) && isPowerOfTwo32(c) => (And32 n (Const32 <t> [c-1]))
+(Mod64u <t> n (Const64 [c])) && isPowerOfTwo64(c) => (And64 n (Const64 <t> [c-1]))
+(Mod64u <t> n (Const64 [-1<<63])) => (And64 n (Const64 <t> [1<<63-1]))
+
+// Signed non-negative mod by power of 2 constant.
+(Mod8 <t> n (Const8 [c])) && isNonNegative(n) && isPowerOfTwo8(c) => (And8 n (Const8 <t> [c-1]))
+(Mod16 <t> n (Const16 [c])) && isNonNegative(n) && isPowerOfTwo16(c) => (And16 n (Const16 <t> [c-1]))
+(Mod32 <t> n (Const32 [c])) && isNonNegative(n) && isPowerOfTwo32(c) => (And32 n (Const32 <t> [c-1]))
+(Mod64 <t> n (Const64 [c])) && isNonNegative(n) && isPowerOfTwo64(c) => (And64 n (Const64 <t> [c-1]))
+(Mod64 n (Const64 [-1<<63])) && isNonNegative(n) => n
+
+// Signed mod by negative constant.
+(Mod8 <t> n (Const8 [c])) && c < 0 && c != -1<<7 => (Mod8 <t> n (Const8 <t> [-c]))
+(Mod16 <t> n (Const16 [c])) && c < 0 && c != -1<<15 => (Mod16 <t> n (Const16 <t> [-c]))
+(Mod32 <t> n (Const32 [c])) && c < 0 && c != -1<<31 => (Mod32 <t> n (Const32 <t> [-c]))
+(Mod64 <t> n (Const64 [c])) && c < 0 && c != -1<<63 => (Mod64 <t> n (Const64 <t> [-c]))
+
+// All other mods by constants, do A%B = A-(A/B*B).
+// This implements % with two * and a bunch of ancillary ops.
+// One of the * is free if the user's code also computes A/B.
+(Mod8 <t> x (Const8 [c])) && x.Op != OpConst8 && (c > 0 || c == -1<<7)
+ => (Sub8 x (Mul8 <t> (Div8 <t> x (Const8 <t> [c])) (Const8 <t> [c])))
+(Mod16 <t> x (Const16 [c])) && x.Op != OpConst16 && (c > 0 || c == -1<<15)
+ => (Sub16 x (Mul16 <t> (Div16 <t> x (Const16 <t> [c])) (Const16 <t> [c])))
+(Mod32 <t> x (Const32 [c])) && x.Op != OpConst32 && (c > 0 || c == -1<<31)
+ => (Sub32 x (Mul32 <t> (Div32 <t> x (Const32 <t> [c])) (Const32 <t> [c])))
+(Mod64 <t> x (Const64 [c])) && x.Op != OpConst64 && (c > 0 || c == -1<<63)
+ => (Sub64 x (Mul64 <t> (Div64 <t> x (Const64 <t> [c])) (Const64 <t> [c])))
+(Mod8u <t> x (Const8 [c])) && x.Op != OpConst8 && c > 0 && umagicOK8( c)
+ => (Sub8 x (Mul8 <t> (Div8u <t> x (Const8 <t> [c])) (Const8 <t> [c])))
+(Mod16u <t> x (Const16 [c])) && x.Op != OpConst16 && c > 0 && umagicOK16(c)
+ => (Sub16 x (Mul16 <t> (Div16u <t> x (Const16 <t> [c])) (Const16 <t> [c])))
+(Mod32u <t> x (Const32 [c])) && x.Op != OpConst32 && c > 0 && umagicOK32(c)
+ => (Sub32 x (Mul32 <t> (Div32u <t> x (Const32 <t> [c])) (Const32 <t> [c])))
+(Mod64u <t> x (Const64 [c])) && x.Op != OpConst64 && c > 0 && umagicOK64(c)
+ => (Sub64 x (Mul64 <t> (Div64u <t> x (Const64 <t> [c])) (Const64 <t> [c])))
+
+// For architectures without rotates on less than 32-bits, promote these checks to 32-bit.
+(Eq8 (Mod8u x (Const8 [c])) (Const8 [0])) && x.Op != OpConst8 && udivisibleOK8(c) && !hasSmallRotate(config) =>
+ (Eq32 (Mod32u <typ.UInt32> (ZeroExt8to32 <typ.UInt32> x) (Const32 <typ.UInt32> [int32(uint8(c))])) (Const32 <typ.UInt32> [0]))
+(Eq16 (Mod16u x (Const16 [c])) (Const16 [0])) && x.Op != OpConst16 && udivisibleOK16(c) && !hasSmallRotate(config) =>
+ (Eq32 (Mod32u <typ.UInt32> (ZeroExt16to32 <typ.UInt32> x) (Const32 <typ.UInt32> [int32(uint16(c))])) (Const32 <typ.UInt32> [0]))
+(Eq8 (Mod8 x (Const8 [c])) (Const8 [0])) && x.Op != OpConst8 && sdivisibleOK8(c) && !hasSmallRotate(config) =>
+ (Eq32 (Mod32 <typ.Int32> (SignExt8to32 <typ.Int32> x) (Const32 <typ.Int32> [int32(c)])) (Const32 <typ.Int32> [0]))
+(Eq16 (Mod16 x (Const16 [c])) (Const16 [0])) && x.Op != OpConst16 && sdivisibleOK16(c) && !hasSmallRotate(config) =>
+ (Eq32 (Mod32 <typ.Int32> (SignExt16to32 <typ.Int32> x) (Const32 <typ.Int32> [int32(c)])) (Const32 <typ.Int32> [0]))
+
+// Divisibility checks x%c == 0 convert to multiply and rotate.
+// Note, x%c == 0 is rewritten as x == c*(x/c) during the opt pass
+// where (x/c) is performed using multiplication with magic constants.
+// To rewrite x%c == 0 requires pattern matching the rewritten expression
+// and checking that the division by the same constant wasn't already calculated.
+// This check is made by counting uses of the magic constant multiplication.
+// Note that if there were an intermediate opt pass, this rule could be applied
+// directly on the Div op and magic division rewrites could be delayed to late opt.
+
+// Unsigned divisibility checks convert to multiply and rotate.
+(Eq8 x (Mul8 (Const8 [c])
+ (Trunc32to8
+ (Rsh32Ux64
+ mul:(Mul32
+ (Const32 [m])
+ (ZeroExt8to32 x))
+ (Const64 [s])))
+ )
+)
+ && v.Block.Func.pass.name != "opt" && mul.Uses == 1
+ && m == int32(1<<8+umagic8(c).m) && s == 8+umagic8(c).s
+ && x.Op != OpConst8 && udivisibleOK8(c)
+ => (Leq8U
+ (RotateLeft8 <typ.UInt8>
+ (Mul8 <typ.UInt8>
+ (Const8 <typ.UInt8> [int8(udivisible8(c).m)])
+ x)
+ (Const8 <typ.UInt8> [int8(8-udivisible8(c).k)])
+ )
+ (Const8 <typ.UInt8> [int8(udivisible8(c).max)])
+ )
+
+(Eq16 x (Mul16 (Const16 [c])
+ (Trunc64to16
+ (Rsh64Ux64
+ mul:(Mul64
+ (Const64 [m])
+ (ZeroExt16to64 x))
+ (Const64 [s])))
+ )
+)
+ && v.Block.Func.pass.name != "opt" && mul.Uses == 1
+ && m == int64(1<<16+umagic16(c).m) && s == 16+umagic16(c).s
+ && x.Op != OpConst16 && udivisibleOK16(c)
+ => (Leq16U
+ (RotateLeft16 <typ.UInt16>
+ (Mul16 <typ.UInt16>
+ (Const16 <typ.UInt16> [int16(udivisible16(c).m)])
+ x)
+ (Const16 <typ.UInt16> [int16(16-udivisible16(c).k)])
+ )
+ (Const16 <typ.UInt16> [int16(udivisible16(c).max)])
+ )
+
+(Eq16 x (Mul16 (Const16 [c])
+ (Trunc32to16
+ (Rsh32Ux64
+ mul:(Mul32
+ (Const32 [m])
+ (ZeroExt16to32 x))
+ (Const64 [s])))
+ )
+)
+ && v.Block.Func.pass.name != "opt" && mul.Uses == 1
+ && m == int32(1<<15+umagic16(c).m/2) && s == 16+umagic16(c).s-1
+ && x.Op != OpConst16 && udivisibleOK16(c)
+ => (Leq16U
+ (RotateLeft16 <typ.UInt16>
+ (Mul16 <typ.UInt16>
+ (Const16 <typ.UInt16> [int16(udivisible16(c).m)])
+ x)
+ (Const16 <typ.UInt16> [int16(16-udivisible16(c).k)])
+ )
+ (Const16 <typ.UInt16> [int16(udivisible16(c).max)])
+ )
+
+(Eq16 x (Mul16 (Const16 [c])
+ (Trunc32to16
+ (Rsh32Ux64
+ mul:(Mul32
+ (Const32 [m])
+ (Rsh32Ux64 (ZeroExt16to32 x) (Const64 [1])))
+ (Const64 [s])))
+ )
+)
+ && v.Block.Func.pass.name != "opt" && mul.Uses == 1
+ && m == int32(1<<15+(umagic16(c).m+1)/2) && s == 16+umagic16(c).s-2
+ && x.Op != OpConst16 && udivisibleOK16(c)
+ => (Leq16U
+ (RotateLeft16 <typ.UInt16>
+ (Mul16 <typ.UInt16>
+ (Const16 <typ.UInt16> [int16(udivisible16(c).m)])
+ x)
+ (Const16 <typ.UInt16> [int16(16-udivisible16(c).k)])
+ )
+ (Const16 <typ.UInt16> [int16(udivisible16(c).max)])
+ )
+
+(Eq16 x (Mul16 (Const16 [c])
+ (Trunc32to16
+ (Rsh32Ux64
+ (Avg32u
+ (Lsh32x64 (ZeroExt16to32 x) (Const64 [16]))
+ mul:(Mul32
+ (Const32 [m])
+ (ZeroExt16to32 x)))
+ (Const64 [s])))
+ )
+)
+ && v.Block.Func.pass.name != "opt" && mul.Uses == 1
+ && m == int32(umagic16(c).m) && s == 16+umagic16(c).s-1
+ && x.Op != OpConst16 && udivisibleOK16(c)
+ => (Leq16U
+ (RotateLeft16 <typ.UInt16>
+ (Mul16 <typ.UInt16>
+ (Const16 <typ.UInt16> [int16(udivisible16(c).m)])
+ x)
+ (Const16 <typ.UInt16> [int16(16-udivisible16(c).k)])
+ )
+ (Const16 <typ.UInt16> [int16(udivisible16(c).max)])
+ )
+
+(Eq32 x (Mul32 (Const32 [c])
+ (Rsh32Ux64
+ mul:(Hmul32u
+ (Const32 [m])
+ x)
+ (Const64 [s]))
+ )
+)
+ && v.Block.Func.pass.name != "opt" && mul.Uses == 1
+ && m == int32(1<<31+umagic32(c).m/2) && s == umagic32(c).s-1
+ && x.Op != OpConst32 && udivisibleOK32(c)
+ => (Leq32U
+ (RotateLeft32 <typ.UInt32>
+ (Mul32 <typ.UInt32>
+ (Const32 <typ.UInt32> [int32(udivisible32(c).m)])
+ x)
+ (Const32 <typ.UInt32> [int32(32-udivisible32(c).k)])
+ )
+ (Const32 <typ.UInt32> [int32(udivisible32(c).max)])
+ )
+
+(Eq32 x (Mul32 (Const32 [c])
+ (Rsh32Ux64
+ mul:(Hmul32u
+ (Const32 <typ.UInt32> [m])
+ (Rsh32Ux64 x (Const64 [1])))
+ (Const64 [s]))
+ )
+)
+ && v.Block.Func.pass.name != "opt" && mul.Uses == 1
+ && m == int32(1<<31+(umagic32(c).m+1)/2) && s == umagic32(c).s-2
+ && x.Op != OpConst32 && udivisibleOK32(c)
+ => (Leq32U
+ (RotateLeft32 <typ.UInt32>
+ (Mul32 <typ.UInt32>
+ (Const32 <typ.UInt32> [int32(udivisible32(c).m)])
+ x)
+ (Const32 <typ.UInt32> [int32(32-udivisible32(c).k)])
+ )
+ (Const32 <typ.UInt32> [int32(udivisible32(c).max)])
+ )
+
+(Eq32 x (Mul32 (Const32 [c])
+ (Rsh32Ux64
+ (Avg32u
+ x
+ mul:(Hmul32u
+ (Const32 [m])
+ x))
+ (Const64 [s]))
+ )
+)
+ && v.Block.Func.pass.name != "opt" && mul.Uses == 1
+ && m == int32(umagic32(c).m) && s == umagic32(c).s-1
+ && x.Op != OpConst32 && udivisibleOK32(c)
+ => (Leq32U
+ (RotateLeft32 <typ.UInt32>
+ (Mul32 <typ.UInt32>
+ (Const32 <typ.UInt32> [int32(udivisible32(c).m)])
+ x)
+ (Const32 <typ.UInt32> [int32(32-udivisible32(c).k)])
+ )
+ (Const32 <typ.UInt32> [int32(udivisible32(c).max)])
+ )
+
+(Eq32 x (Mul32 (Const32 [c])
+ (Trunc64to32
+ (Rsh64Ux64
+ mul:(Mul64
+ (Const64 [m])
+ (ZeroExt32to64 x))
+ (Const64 [s])))
+ )
+)
+ && v.Block.Func.pass.name != "opt" && mul.Uses == 1
+ && m == int64(1<<31+umagic32(c).m/2) && s == 32+umagic32(c).s-1
+ && x.Op != OpConst32 && udivisibleOK32(c)
+ => (Leq32U
+ (RotateLeft32 <typ.UInt32>
+ (Mul32 <typ.UInt32>
+ (Const32 <typ.UInt32> [int32(udivisible32(c).m)])
+ x)
+ (Const32 <typ.UInt32> [int32(32-udivisible32(c).k)])
+ )
+ (Const32 <typ.UInt32> [int32(udivisible32(c).max)])
+ )
+
+(Eq32 x (Mul32 (Const32 [c])
+ (Trunc64to32
+ (Rsh64Ux64
+ mul:(Mul64
+ (Const64 [m])
+ (Rsh64Ux64 (ZeroExt32to64 x) (Const64 [1])))
+ (Const64 [s])))
+ )
+)
+ && v.Block.Func.pass.name != "opt" && mul.Uses == 1
+ && m == int64(1<<31+(umagic32(c).m+1)/2) && s == 32+umagic32(c).s-2
+ && x.Op != OpConst32 && udivisibleOK32(c)
+ => (Leq32U
+ (RotateLeft32 <typ.UInt32>
+ (Mul32 <typ.UInt32>
+ (Const32 <typ.UInt32> [int32(udivisible32(c).m)])
+ x)
+ (Const32 <typ.UInt32> [int32(32-udivisible32(c).k)])
+ )
+ (Const32 <typ.UInt32> [int32(udivisible32(c).max)])
+ )
+
+(Eq32 x (Mul32 (Const32 [c])
+ (Trunc64to32
+ (Rsh64Ux64
+ (Avg64u
+ (Lsh64x64 (ZeroExt32to64 x) (Const64 [32]))
+ mul:(Mul64
+ (Const64 [m])
+ (ZeroExt32to64 x)))
+ (Const64 [s])))
+ )
+)
+ && v.Block.Func.pass.name != "opt" && mul.Uses == 1
+ && m == int64(umagic32(c).m) && s == 32+umagic32(c).s-1
+ && x.Op != OpConst32 && udivisibleOK32(c)
+ => (Leq32U
+ (RotateLeft32 <typ.UInt32>
+ (Mul32 <typ.UInt32>
+ (Const32 <typ.UInt32> [int32(udivisible32(c).m)])
+ x)
+ (Const32 <typ.UInt32> [int32(32-udivisible32(c).k)])
+ )
+ (Const32 <typ.UInt32> [int32(udivisible32(c).max)])
+ )
+
+(Eq64 x (Mul64 (Const64 [c])
+ (Rsh64Ux64
+ mul:(Hmul64u
+ (Const64 [m])
+ x)
+ (Const64 [s]))
+ )
+) && v.Block.Func.pass.name != "opt" && mul.Uses == 1
+ && m == int64(1<<63+umagic64(c).m/2) && s == umagic64(c).s-1
+ && x.Op != OpConst64 && udivisibleOK64(c)
+ => (Leq64U
+ (RotateLeft64 <typ.UInt64>
+ (Mul64 <typ.UInt64>
+ (Const64 <typ.UInt64> [int64(udivisible64(c).m)])
+ x)
+ (Const64 <typ.UInt64> [64-udivisible64(c).k])
+ )
+ (Const64 <typ.UInt64> [int64(udivisible64(c).max)])
+ )
+(Eq64 x (Mul64 (Const64 [c])
+ (Rsh64Ux64
+ mul:(Hmul64u
+ (Const64 [m])
+ (Rsh64Ux64 x (Const64 [1])))
+ (Const64 [s]))
+ )
+) && v.Block.Func.pass.name != "opt" && mul.Uses == 1
+ && m == int64(1<<63+(umagic64(c).m+1)/2) && s == umagic64(c).s-2
+ && x.Op != OpConst64 && udivisibleOK64(c)
+ => (Leq64U
+ (RotateLeft64 <typ.UInt64>
+ (Mul64 <typ.UInt64>
+ (Const64 <typ.UInt64> [int64(udivisible64(c).m)])
+ x)
+ (Const64 <typ.UInt64> [64-udivisible64(c).k])
+ )
+ (Const64 <typ.UInt64> [int64(udivisible64(c).max)])
+ )
+(Eq64 x (Mul64 (Const64 [c])
+ (Rsh64Ux64
+ (Avg64u
+ x
+ mul:(Hmul64u
+ (Const64 [m])
+ x))
+ (Const64 [s]))
+ )
+) && v.Block.Func.pass.name != "opt" && mul.Uses == 1
+ && m == int64(umagic64(c).m) && s == umagic64(c).s-1
+ && x.Op != OpConst64 && udivisibleOK64(c)
+ => (Leq64U
+ (RotateLeft64 <typ.UInt64>
+ (Mul64 <typ.UInt64>
+ (Const64 <typ.UInt64> [int64(udivisible64(c).m)])
+ x)
+ (Const64 <typ.UInt64> [64-udivisible64(c).k])
+ )
+ (Const64 <typ.UInt64> [int64(udivisible64(c).max)])
+ )
+
+// Signed divisibility checks convert to multiply, add and rotate.
+(Eq8 x (Mul8 (Const8 [c])
+ (Sub8
+ (Rsh32x64
+ mul:(Mul32
+ (Const32 [m])
+ (SignExt8to32 x))
+ (Const64 [s]))
+ (Rsh32x64
+ (SignExt8to32 x)
+ (Const64 [31])))
+ )
+)
+ && v.Block.Func.pass.name != "opt" && mul.Uses == 1
+ && m == int32(smagic8(c).m) && s == 8+smagic8(c).s
+ && x.Op != OpConst8 && sdivisibleOK8(c)
+ => (Leq8U
+ (RotateLeft8 <typ.UInt8>
+ (Add8 <typ.UInt8>
+ (Mul8 <typ.UInt8>
+ (Const8 <typ.UInt8> [int8(sdivisible8(c).m)])
+ x)
+ (Const8 <typ.UInt8> [int8(sdivisible8(c).a)])
+ )
+ (Const8 <typ.UInt8> [int8(8-sdivisible8(c).k)])
+ )
+ (Const8 <typ.UInt8> [int8(sdivisible8(c).max)])
+ )
+
+(Eq16 x (Mul16 (Const16 [c])
+ (Sub16
+ (Rsh32x64
+ mul:(Mul32
+ (Const32 [m])
+ (SignExt16to32 x))
+ (Const64 [s]))
+ (Rsh32x64
+ (SignExt16to32 x)
+ (Const64 [31])))
+ )
+)
+ && v.Block.Func.pass.name != "opt" && mul.Uses == 1
+ && m == int32(smagic16(c).m) && s == 16+smagic16(c).s
+ && x.Op != OpConst16 && sdivisibleOK16(c)
+ => (Leq16U
+ (RotateLeft16 <typ.UInt16>
+ (Add16 <typ.UInt16>
+ (Mul16 <typ.UInt16>
+ (Const16 <typ.UInt16> [int16(sdivisible16(c).m)])
+ x)
+ (Const16 <typ.UInt16> [int16(sdivisible16(c).a)])
+ )
+ (Const16 <typ.UInt16> [int16(16-sdivisible16(c).k)])
+ )
+ (Const16 <typ.UInt16> [int16(sdivisible16(c).max)])
+ )
+
+(Eq32 x (Mul32 (Const32 [c])
+ (Sub32
+ (Rsh64x64
+ mul:(Mul64
+ (Const64 [m])
+ (SignExt32to64 x))
+ (Const64 [s]))
+ (Rsh64x64
+ (SignExt32to64 x)
+ (Const64 [63])))
+ )
+)
+ && v.Block.Func.pass.name != "opt" && mul.Uses == 1
+ && m == int64(smagic32(c).m) && s == 32+smagic32(c).s
+ && x.Op != OpConst32 && sdivisibleOK32(c)
+ => (Leq32U
+ (RotateLeft32 <typ.UInt32>
+ (Add32 <typ.UInt32>
+ (Mul32 <typ.UInt32>
+ (Const32 <typ.UInt32> [int32(sdivisible32(c).m)])
+ x)
+ (Const32 <typ.UInt32> [int32(sdivisible32(c).a)])
+ )
+ (Const32 <typ.UInt32> [int32(32-sdivisible32(c).k)])
+ )
+ (Const32 <typ.UInt32> [int32(sdivisible32(c).max)])
+ )
+
+(Eq32 x (Mul32 (Const32 [c])
+ (Sub32
+ (Rsh32x64
+ mul:(Hmul32
+ (Const32 [m])
+ x)
+ (Const64 [s]))
+ (Rsh32x64
+ x
+ (Const64 [31])))
+ )
+)
+ && v.Block.Func.pass.name != "opt" && mul.Uses == 1
+ && m == int32(smagic32(c).m/2) && s == smagic32(c).s-1
+ && x.Op != OpConst32 && sdivisibleOK32(c)
+ => (Leq32U
+ (RotateLeft32 <typ.UInt32>
+ (Add32 <typ.UInt32>
+ (Mul32 <typ.UInt32>
+ (Const32 <typ.UInt32> [int32(sdivisible32(c).m)])
+ x)
+ (Const32 <typ.UInt32> [int32(sdivisible32(c).a)])
+ )
+ (Const32 <typ.UInt32> [int32(32-sdivisible32(c).k)])
+ )
+ (Const32 <typ.UInt32> [int32(sdivisible32(c).max)])
+ )
+
+(Eq32 x (Mul32 (Const32 [c])
+ (Sub32
+ (Rsh32x64
+ (Add32
+ mul:(Hmul32
+ (Const32 [m])
+ x)
+ x)
+ (Const64 [s]))
+ (Rsh32x64
+ x
+ (Const64 [31])))
+ )
+)
+ && v.Block.Func.pass.name != "opt" && mul.Uses == 1
+ && m == int32(smagic32(c).m) && s == smagic32(c).s
+ && x.Op != OpConst32 && sdivisibleOK32(c)
+ => (Leq32U
+ (RotateLeft32 <typ.UInt32>
+ (Add32 <typ.UInt32>
+ (Mul32 <typ.UInt32>
+ (Const32 <typ.UInt32> [int32(sdivisible32(c).m)])
+ x)
+ (Const32 <typ.UInt32> [int32(sdivisible32(c).a)])
+ )
+ (Const32 <typ.UInt32> [int32(32-sdivisible32(c).k)])
+ )
+ (Const32 <typ.UInt32> [int32(sdivisible32(c).max)])
+ )
+
+(Eq64 x (Mul64 (Const64 [c])
+ (Sub64
+ (Rsh64x64
+ mul:(Hmul64
+ (Const64 [m])
+ x)
+ (Const64 [s]))
+ (Rsh64x64
+ x
+ (Const64 [63])))
+ )
+)
+ && v.Block.Func.pass.name != "opt" && mul.Uses == 1
+ && m == int64(smagic64(c).m/2) && s == smagic64(c).s-1
+ && x.Op != OpConst64 && sdivisibleOK64(c)
+ => (Leq64U
+ (RotateLeft64 <typ.UInt64>
+ (Add64 <typ.UInt64>
+ (Mul64 <typ.UInt64>
+ (Const64 <typ.UInt64> [int64(sdivisible64(c).m)])
+ x)
+ (Const64 <typ.UInt64> [int64(sdivisible64(c).a)])
+ )
+ (Const64 <typ.UInt64> [64-sdivisible64(c).k])
+ )
+ (Const64 <typ.UInt64> [int64(sdivisible64(c).max)])
+ )
+
+(Eq64 x (Mul64 (Const64 [c])
+ (Sub64
+ (Rsh64x64
+ (Add64
+ mul:(Hmul64
+ (Const64 [m])
+ x)
+ x)
+ (Const64 [s]))
+ (Rsh64x64
+ x
+ (Const64 [63])))
+ )
+)
+ && v.Block.Func.pass.name != "opt" && mul.Uses == 1
+ && m == int64(smagic64(c).m) && s == smagic64(c).s
+ && x.Op != OpConst64 && sdivisibleOK64(c)
+ => (Leq64U
+ (RotateLeft64 <typ.UInt64>
+ (Add64 <typ.UInt64>
+ (Mul64 <typ.UInt64>
+ (Const64 <typ.UInt64> [int64(sdivisible64(c).m)])
+ x)
+ (Const64 <typ.UInt64> [int64(sdivisible64(c).a)])
+ )
+ (Const64 <typ.UInt64> [64-sdivisible64(c).k])
+ )
+ (Const64 <typ.UInt64> [int64(sdivisible64(c).max)])
+ )
+
+// Divisibility check for signed integers for power of two constant are simple mask.
+// However, we must match against the rewritten n%c == 0 -> n - c*(n/c) == 0 -> n == c*(n/c)
+// where n/c contains fixup code to handle signed n.
+((Eq8|Neq8) n (Lsh8x64
+ (Rsh8x64
+ (Add8 <t> n (Rsh8Ux64 <t> (Rsh8x64 <t> n (Const64 <typ.UInt64> [ 7])) (Const64 <typ.UInt64> [kbar])))
+ (Const64 <typ.UInt64> [k]))
+ (Const64 <typ.UInt64> [k]))
+) && k > 0 && k < 7 && kbar == 8 - k
+ => ((Eq8|Neq8) (And8 <t> n (Const8 <t> [1<<uint(k)-1])) (Const8 <t> [0]))
+
+((Eq16|Neq16) n (Lsh16x64
+ (Rsh16x64
+ (Add16 <t> n (Rsh16Ux64 <t> (Rsh16x64 <t> n (Const64 <typ.UInt64> [15])) (Const64 <typ.UInt64> [kbar])))
+ (Const64 <typ.UInt64> [k]))
+ (Const64 <typ.UInt64> [k]))
+) && k > 0 && k < 15 && kbar == 16 - k
+ => ((Eq16|Neq16) (And16 <t> n (Const16 <t> [1<<uint(k)-1])) (Const16 <t> [0]))
+
+((Eq32|Neq32) n (Lsh32x64
+ (Rsh32x64
+ (Add32 <t> n (Rsh32Ux64 <t> (Rsh32x64 <t> n (Const64 <typ.UInt64> [31])) (Const64 <typ.UInt64> [kbar])))
+ (Const64 <typ.UInt64> [k]))
+ (Const64 <typ.UInt64> [k]))
+) && k > 0 && k < 31 && kbar == 32 - k
+ => ((Eq32|Neq32) (And32 <t> n (Const32 <t> [1<<uint(k)-1])) (Const32 <t> [0]))
+
+((Eq64|Neq64) n (Lsh64x64
+ (Rsh64x64
+ (Add64 <t> n (Rsh64Ux64 <t> (Rsh64x64 <t> n (Const64 <typ.UInt64> [63])) (Const64 <typ.UInt64> [kbar])))
+ (Const64 <typ.UInt64> [k]))
+ (Const64 <typ.UInt64> [k]))
+) && k > 0 && k < 63 && kbar == 64 - k
+ => ((Eq64|Neq64) (And64 <t> n (Const64 <t> [1<<uint(k)-1])) (Const64 <t> [0]))
+
+(Eq(8|16|32|64) s:(Sub(8|16|32|64) x y) (Const(8|16|32|64) [0])) && s.Uses == 1 => (Eq(8|16|32|64) x y)
+(Neq(8|16|32|64) s:(Sub(8|16|32|64) x y) (Const(8|16|32|64) [0])) && s.Uses == 1 => (Neq(8|16|32|64) x y)
+
+// Optimize bitsets
+(Eq8 (And8 <t> x (Const8 <t> [y])) (Const8 <t> [y])) && oneBit8(y)
+ => (Neq8 (And8 <t> x (Const8 <t> [y])) (Const8 <t> [0]))
+(Eq16 (And16 <t> x (Const16 <t> [y])) (Const16 <t> [y])) && oneBit16(y)
+ => (Neq16 (And16 <t> x (Const16 <t> [y])) (Const16 <t> [0]))
+(Eq32 (And32 <t> x (Const32 <t> [y])) (Const32 <t> [y])) && oneBit32(y)
+ => (Neq32 (And32 <t> x (Const32 <t> [y])) (Const32 <t> [0]))
+(Eq64 (And64 <t> x (Const64 <t> [y])) (Const64 <t> [y])) && oneBit64(y)
+ => (Neq64 (And64 <t> x (Const64 <t> [y])) (Const64 <t> [0]))
+(Neq8 (And8 <t> x (Const8 <t> [y])) (Const8 <t> [y])) && oneBit8(y)
+ => (Eq8 (And8 <t> x (Const8 <t> [y])) (Const8 <t> [0]))
+(Neq16 (And16 <t> x (Const16 <t> [y])) (Const16 <t> [y])) && oneBit16(y)
+ => (Eq16 (And16 <t> x (Const16 <t> [y])) (Const16 <t> [0]))
+(Neq32 (And32 <t> x (Const32 <t> [y])) (Const32 <t> [y])) && oneBit32(y)
+ => (Eq32 (And32 <t> x (Const32 <t> [y])) (Const32 <t> [0]))
+(Neq64 (And64 <t> x (Const64 <t> [y])) (Const64 <t> [y])) && oneBit64(y)
+ => (Eq64 (And64 <t> x (Const64 <t> [y])) (Const64 <t> [0]))
+
+// Reassociate expressions involving
+// constants such that constants come first,
+// exposing obvious constant-folding opportunities.
+// Reassociate (op (op y C) x) to (op C (op x y)) or similar, where C
+// is constant, which pushes constants to the outside
+// of the expression. At that point, any constant-folding
+// opportunities should be obvious.
+// Note: don't include AddPtr here! In order to maintain the
+// invariant that pointers must stay within the pointed-to object,
+// we can't pull part of a pointer computation above the AddPtr.
+// See issue 37881.
+// Note: we don't need to handle any (x-C) cases because we already rewrite
+// (x-C) to (x+(-C)).
+
+// x + (C + z) -> C + (x + z)
+(Add64 (Add64 i:(Const64 <t>) z) x) && (z.Op != OpConst64 && x.Op != OpConst64) => (Add64 i (Add64 <t> z x))
+(Add32 (Add32 i:(Const32 <t>) z) x) && (z.Op != OpConst32 && x.Op != OpConst32) => (Add32 i (Add32 <t> z x))
+(Add16 (Add16 i:(Const16 <t>) z) x) && (z.Op != OpConst16 && x.Op != OpConst16) => (Add16 i (Add16 <t> z x))
+(Add8 (Add8 i:(Const8 <t>) z) x) && (z.Op != OpConst8 && x.Op != OpConst8) => (Add8 i (Add8 <t> z x))
+
+// x + (C - z) -> C + (x - z)
+(Add64 (Sub64 i:(Const64 <t>) z) x) && (z.Op != OpConst64 && x.Op != OpConst64) => (Add64 i (Sub64 <t> x z))
+(Add32 (Sub32 i:(Const32 <t>) z) x) && (z.Op != OpConst32 && x.Op != OpConst32) => (Add32 i (Sub32 <t> x z))
+(Add16 (Sub16 i:(Const16 <t>) z) x) && (z.Op != OpConst16 && x.Op != OpConst16) => (Add16 i (Sub16 <t> x z))
+(Add8 (Sub8 i:(Const8 <t>) z) x) && (z.Op != OpConst8 && x.Op != OpConst8) => (Add8 i (Sub8 <t> x z))
+
+// x - (C - z) -> x + (z - C) -> (x + z) - C
+(Sub64 x (Sub64 i:(Const64 <t>) z)) && (z.Op != OpConst64 && x.Op != OpConst64) => (Sub64 (Add64 <t> x z) i)
+(Sub32 x (Sub32 i:(Const32 <t>) z)) && (z.Op != OpConst32 && x.Op != OpConst32) => (Sub32 (Add32 <t> x z) i)
+(Sub16 x (Sub16 i:(Const16 <t>) z)) && (z.Op != OpConst16 && x.Op != OpConst16) => (Sub16 (Add16 <t> x z) i)
+(Sub8 x (Sub8 i:(Const8 <t>) z)) && (z.Op != OpConst8 && x.Op != OpConst8) => (Sub8 (Add8 <t> x z) i)
+
+// x - (z + C) -> x + (-z - C) -> (x - z) - C
+(Sub64 x (Add64 z i:(Const64 <t>))) && (z.Op != OpConst64 && x.Op != OpConst64) => (Sub64 (Sub64 <t> x z) i)
+(Sub32 x (Add32 z i:(Const32 <t>))) && (z.Op != OpConst32 && x.Op != OpConst32) => (Sub32 (Sub32 <t> x z) i)
+(Sub16 x (Add16 z i:(Const16 <t>))) && (z.Op != OpConst16 && x.Op != OpConst16) => (Sub16 (Sub16 <t> x z) i)
+(Sub8 x (Add8 z i:(Const8 <t>))) && (z.Op != OpConst8 && x.Op != OpConst8) => (Sub8 (Sub8 <t> x z) i)
+
+// (C - z) - x -> C - (z + x)
+(Sub64 (Sub64 i:(Const64 <t>) z) x) && (z.Op != OpConst64 && x.Op != OpConst64) => (Sub64 i (Add64 <t> z x))
+(Sub32 (Sub32 i:(Const32 <t>) z) x) && (z.Op != OpConst32 && x.Op != OpConst32) => (Sub32 i (Add32 <t> z x))
+(Sub16 (Sub16 i:(Const16 <t>) z) x) && (z.Op != OpConst16 && x.Op != OpConst16) => (Sub16 i (Add16 <t> z x))
+(Sub8 (Sub8 i:(Const8 <t>) z) x) && (z.Op != OpConst8 && x.Op != OpConst8) => (Sub8 i (Add8 <t> z x))
+
+// (z + C) -x -> C + (z - x)
+(Sub64 (Add64 z i:(Const64 <t>)) x) && (z.Op != OpConst64 && x.Op != OpConst64) => (Add64 i (Sub64 <t> z x))
+(Sub32 (Add32 z i:(Const32 <t>)) x) && (z.Op != OpConst32 && x.Op != OpConst32) => (Add32 i (Sub32 <t> z x))
+(Sub16 (Add16 z i:(Const16 <t>)) x) && (z.Op != OpConst16 && x.Op != OpConst16) => (Add16 i (Sub16 <t> z x))
+(Sub8 (Add8 z i:(Const8 <t>)) x) && (z.Op != OpConst8 && x.Op != OpConst8) => (Add8 i (Sub8 <t> z x))
+
+// x & (C & z) -> C & (x & z)
+(And64 (And64 i:(Const64 <t>) z) x) && (z.Op != OpConst64 && x.Op != OpConst64) => (And64 i (And64 <t> z x))
+(And32 (And32 i:(Const32 <t>) z) x) && (z.Op != OpConst32 && x.Op != OpConst32) => (And32 i (And32 <t> z x))
+(And16 (And16 i:(Const16 <t>) z) x) && (z.Op != OpConst16 && x.Op != OpConst16) => (And16 i (And16 <t> z x))
+(And8 (And8 i:(Const8 <t>) z) x) && (z.Op != OpConst8 && x.Op != OpConst8) => (And8 i (And8 <t> z x))
+
+// x | (C | z) -> C | (x | z)
+(Or64 (Or64 i:(Const64 <t>) z) x) && (z.Op != OpConst64 && x.Op != OpConst64) => (Or64 i (Or64 <t> z x))
+(Or32 (Or32 i:(Const32 <t>) z) x) && (z.Op != OpConst32 && x.Op != OpConst32) => (Or32 i (Or32 <t> z x))
+(Or16 (Or16 i:(Const16 <t>) z) x) && (z.Op != OpConst16 && x.Op != OpConst16) => (Or16 i (Or16 <t> z x))
+(Or8 (Or8 i:(Const8 <t>) z) x) && (z.Op != OpConst8 && x.Op != OpConst8) => (Or8 i (Or8 <t> z x))
+
+// x ^ (C ^ z) -> C ^ (x ^ z)
+(Xor64 (Xor64 i:(Const64 <t>) z) x) && (z.Op != OpConst64 && x.Op != OpConst64) => (Xor64 i (Xor64 <t> z x))
+(Xor32 (Xor32 i:(Const32 <t>) z) x) && (z.Op != OpConst32 && x.Op != OpConst32) => (Xor32 i (Xor32 <t> z x))
+(Xor16 (Xor16 i:(Const16 <t>) z) x) && (z.Op != OpConst16 && x.Op != OpConst16) => (Xor16 i (Xor16 <t> z x))
+(Xor8 (Xor8 i:(Const8 <t>) z) x) && (z.Op != OpConst8 && x.Op != OpConst8) => (Xor8 i (Xor8 <t> z x))
+
+// x * (D * z) = D * (x * z)
+(Mul64 (Mul64 i:(Const64 <t>) z) x) && (z.Op != OpConst64 && x.Op != OpConst64) => (Mul64 i (Mul64 <t> x z))
+(Mul32 (Mul32 i:(Const32 <t>) z) x) && (z.Op != OpConst32 && x.Op != OpConst32) => (Mul32 i (Mul32 <t> x z))
+(Mul16 (Mul16 i:(Const16 <t>) z) x) && (z.Op != OpConst16 && x.Op != OpConst16) => (Mul16 i (Mul16 <t> x z))
+(Mul8 (Mul8 i:(Const8 <t>) z) x) && (z.Op != OpConst8 && x.Op != OpConst8) => (Mul8 i (Mul8 <t> x z))
+
+// C + (D + x) -> (C + D) + x
+(Add64 (Const64 <t> [c]) (Add64 (Const64 <t> [d]) x)) => (Add64 (Const64 <t> [c+d]) x)
+(Add32 (Const32 <t> [c]) (Add32 (Const32 <t> [d]) x)) => (Add32 (Const32 <t> [c+d]) x)
+(Add16 (Const16 <t> [c]) (Add16 (Const16 <t> [d]) x)) => (Add16 (Const16 <t> [c+d]) x)
+(Add8 (Const8 <t> [c]) (Add8 (Const8 <t> [d]) x)) => (Add8 (Const8 <t> [c+d]) x)
+
+// C + (D - x) -> (C + D) - x
+(Add64 (Const64 <t> [c]) (Sub64 (Const64 <t> [d]) x)) => (Sub64 (Const64 <t> [c+d]) x)
+(Add32 (Const32 <t> [c]) (Sub32 (Const32 <t> [d]) x)) => (Sub32 (Const32 <t> [c+d]) x)
+(Add16 (Const16 <t> [c]) (Sub16 (Const16 <t> [d]) x)) => (Sub16 (Const16 <t> [c+d]) x)
+(Add8 (Const8 <t> [c]) (Sub8 (Const8 <t> [d]) x)) => (Sub8 (Const8 <t> [c+d]) x)
+
+// C - (D - x) -> (C - D) + x
+(Sub64 (Const64 <t> [c]) (Sub64 (Const64 <t> [d]) x)) => (Add64 (Const64 <t> [c-d]) x)
+(Sub32 (Const32 <t> [c]) (Sub32 (Const32 <t> [d]) x)) => (Add32 (Const32 <t> [c-d]) x)
+(Sub16 (Const16 <t> [c]) (Sub16 (Const16 <t> [d]) x)) => (Add16 (Const16 <t> [c-d]) x)
+(Sub8 (Const8 <t> [c]) (Sub8 (Const8 <t> [d]) x)) => (Add8 (Const8 <t> [c-d]) x)
+
+// C - (D + x) -> (C - D) - x
+(Sub64 (Const64 <t> [c]) (Add64 (Const64 <t> [d]) x)) => (Sub64 (Const64 <t> [c-d]) x)
+(Sub32 (Const32 <t> [c]) (Add32 (Const32 <t> [d]) x)) => (Sub32 (Const32 <t> [c-d]) x)
+(Sub16 (Const16 <t> [c]) (Add16 (Const16 <t> [d]) x)) => (Sub16 (Const16 <t> [c-d]) x)
+(Sub8 (Const8 <t> [c]) (Add8 (Const8 <t> [d]) x)) => (Sub8 (Const8 <t> [c-d]) x)
+
+// C & (D & x) -> (C & D) & x
+(And64 (Const64 <t> [c]) (And64 (Const64 <t> [d]) x)) => (And64 (Const64 <t> [c&d]) x)
+(And32 (Const32 <t> [c]) (And32 (Const32 <t> [d]) x)) => (And32 (Const32 <t> [c&d]) x)
+(And16 (Const16 <t> [c]) (And16 (Const16 <t> [d]) x)) => (And16 (Const16 <t> [c&d]) x)
+(And8 (Const8 <t> [c]) (And8 (Const8 <t> [d]) x)) => (And8 (Const8 <t> [c&d]) x)
+
+// C | (D | x) -> (C | D) | x
+(Or64 (Const64 <t> [c]) (Or64 (Const64 <t> [d]) x)) => (Or64 (Const64 <t> [c|d]) x)
+(Or32 (Const32 <t> [c]) (Or32 (Const32 <t> [d]) x)) => (Or32 (Const32 <t> [c|d]) x)
+(Or16 (Const16 <t> [c]) (Or16 (Const16 <t> [d]) x)) => (Or16 (Const16 <t> [c|d]) x)
+(Or8 (Const8 <t> [c]) (Or8 (Const8 <t> [d]) x)) => (Or8 (Const8 <t> [c|d]) x)
+
+// C ^ (D ^ x) -> (C ^ D) ^ x
+(Xor64 (Const64 <t> [c]) (Xor64 (Const64 <t> [d]) x)) => (Xor64 (Const64 <t> [c^d]) x)
+(Xor32 (Const32 <t> [c]) (Xor32 (Const32 <t> [d]) x)) => (Xor32 (Const32 <t> [c^d]) x)
+(Xor16 (Const16 <t> [c]) (Xor16 (Const16 <t> [d]) x)) => (Xor16 (Const16 <t> [c^d]) x)
+(Xor8 (Const8 <t> [c]) (Xor8 (Const8 <t> [d]) x)) => (Xor8 (Const8 <t> [c^d]) x)
+
+// C * (D * x) = (C * D) * x
+(Mul64 (Const64 <t> [c]) (Mul64 (Const64 <t> [d]) x)) => (Mul64 (Const64 <t> [c*d]) x)
+(Mul32 (Const32 <t> [c]) (Mul32 (Const32 <t> [d]) x)) => (Mul32 (Const32 <t> [c*d]) x)
+(Mul16 (Const16 <t> [c]) (Mul16 (Const16 <t> [d]) x)) => (Mul16 (Const16 <t> [c*d]) x)
+(Mul8 (Const8 <t> [c]) (Mul8 (Const8 <t> [d]) x)) => (Mul8 (Const8 <t> [c*d]) x)
+
+// floating point optimizations
+(Mul(32|64)F x (Const(32|64)F [1])) => x
+(Mul32F x (Const32F [-1])) => (Neg32F x)
+(Mul64F x (Const64F [-1])) => (Neg64F x)
+(Mul32F x (Const32F [2])) => (Add32F x x)
+(Mul64F x (Const64F [2])) => (Add64F x x)
+
+(Div32F x (Const32F <t> [c])) && reciprocalExact32(c) => (Mul32F x (Const32F <t> [1/c]))
+(Div64F x (Const64F <t> [c])) && reciprocalExact64(c) => (Mul64F x (Const64F <t> [1/c]))
+
+// rewrite single-precision sqrt expression "float32(math.Sqrt(float64(x)))"
+(Cvt64Fto32F sqrt0:(Sqrt (Cvt32Fto64F x))) && sqrt0.Uses==1 => (Sqrt32 x)
+
+(Sqrt (Const64F [c])) && !math.IsNaN(math.Sqrt(c)) => (Const64F [math.Sqrt(c)])
+
+// for rewriting results of some late-expanded rewrites (below)
+(SelectN [0] (MakeResult x ___)) => x
+(SelectN [1] (MakeResult x y ___)) => y
+(SelectN [2] (MakeResult x y z ___)) => z
+
+// for late-expanded calls, recognize newobject and remove zeroing and nilchecks
+(Zero (SelectN [0] call:(StaticLECall _ _)) mem:(SelectN [1] call))
+ && isSameCall(call.Aux, "runtime.newobject")
+ => mem
+
+(Store (SelectN [0] call:(StaticLECall _ _)) x mem:(SelectN [1] call))
+ && isConstZero(x)
+ && isSameCall(call.Aux, "runtime.newobject")
+ => mem
+
+(Store (OffPtr (SelectN [0] call:(StaticLECall _ _))) x mem:(SelectN [1] call))
+ && isConstZero(x)
+ && isSameCall(call.Aux, "runtime.newobject")
+ => mem
+
+(NilCheck (SelectN [0] call:(StaticLECall _ _)) _)
+ && isSameCall(call.Aux, "runtime.newobject")
+ && warnRule(fe.Debug_checknil(), v, "removed nil check")
+ => (Invalid)
+
+(NilCheck (OffPtr (SelectN [0] call:(StaticLECall _ _))) _)
+ && isSameCall(call.Aux, "runtime.newobject")
+ && warnRule(fe.Debug_checknil(), v, "removed nil check")
+ => (Invalid)
+
+// for late-expanded calls, recognize memequal applied to a single constant byte
+// Support is limited by 1, 2, 4, 8 byte sizes
+(StaticLECall {callAux} sptr (Addr {scon} (SB)) (Const64 [1]) mem)
+ && isSameCall(callAux, "runtime.memequal")
+ && symIsRO(scon)
+ => (MakeResult (Eq8 (Load <typ.Int8> sptr mem) (Const8 <typ.Int8> [int8(read8(scon,0))])) mem)
+
+(StaticLECall {callAux} sptr (Addr {scon} (SB)) (Const64 [2]) mem)
+ && isSameCall(callAux, "runtime.memequal")
+ && symIsRO(scon)
+ && canLoadUnaligned(config)
+ => (MakeResult (Eq16 (Load <typ.Int16> sptr mem) (Const16 <typ.Int16> [int16(read16(scon,0,config.ctxt.Arch.ByteOrder))])) mem)
+
+(StaticLECall {callAux} sptr (Addr {scon} (SB)) (Const64 [4]) mem)
+ && isSameCall(callAux, "runtime.memequal")
+ && symIsRO(scon)
+ && canLoadUnaligned(config)
+ => (MakeResult (Eq32 (Load <typ.Int32> sptr mem) (Const32 <typ.Int32> [int32(read32(scon,0,config.ctxt.Arch.ByteOrder))])) mem)
+
+(StaticLECall {callAux} sptr (Addr {scon} (SB)) (Const64 [8]) mem)
+ && isSameCall(callAux, "runtime.memequal")
+ && symIsRO(scon)
+ && canLoadUnaligned(config) && config.PtrSize == 8
+ => (MakeResult (Eq64 (Load <typ.Int64> sptr mem) (Const64 <typ.Int64> [int64(read64(scon,0,config.ctxt.Arch.ByteOrder))])) mem)
+
+// Evaluate constant address comparisons.
+(EqPtr x x) => (ConstBool [true])
+(NeqPtr x x) => (ConstBool [false])
+(EqPtr (Addr {x} _) (Addr {y} _)) => (ConstBool [x == y])
+(EqPtr (Addr {x} _) (OffPtr [o] (Addr {y} _))) => (ConstBool [x == y && o == 0])
+(EqPtr (OffPtr [o1] (Addr {x} _)) (OffPtr [o2] (Addr {y} _))) => (ConstBool [x == y && o1 == o2])
+(NeqPtr (Addr {x} _) (Addr {y} _)) => (ConstBool [x != y])
+(NeqPtr (Addr {x} _) (OffPtr [o] (Addr {y} _))) => (ConstBool [x != y || o != 0])
+(NeqPtr (OffPtr [o1] (Addr {x} _)) (OffPtr [o2] (Addr {y} _))) => (ConstBool [x != y || o1 != o2])
+(EqPtr (LocalAddr {x} _ _) (LocalAddr {y} _ _)) => (ConstBool [x == y])
+(EqPtr (LocalAddr {x} _ _) (OffPtr [o] (LocalAddr {y} _ _))) => (ConstBool [x == y && o == 0])
+(EqPtr (OffPtr [o1] (LocalAddr {x} _ _)) (OffPtr [o2] (LocalAddr {y} _ _))) => (ConstBool [x == y && o1 == o2])
+(NeqPtr (LocalAddr {x} _ _) (LocalAddr {y} _ _)) => (ConstBool [x != y])
+(NeqPtr (LocalAddr {x} _ _) (OffPtr [o] (LocalAddr {y} _ _))) => (ConstBool [x != y || o != 0])
+(NeqPtr (OffPtr [o1] (LocalAddr {x} _ _)) (OffPtr [o2] (LocalAddr {y} _ _))) => (ConstBool [x != y || o1 != o2])
+(EqPtr (OffPtr [o1] p1) p2) && isSamePtr(p1, p2) => (ConstBool [o1 == 0])
+(NeqPtr (OffPtr [o1] p1) p2) && isSamePtr(p1, p2) => (ConstBool [o1 != 0])
+(EqPtr (OffPtr [o1] p1) (OffPtr [o2] p2)) && isSamePtr(p1, p2) => (ConstBool [o1 == o2])
+(NeqPtr (OffPtr [o1] p1) (OffPtr [o2] p2)) && isSamePtr(p1, p2) => (ConstBool [o1 != o2])
+(EqPtr (Const(32|64) [c]) (Const(32|64) [d])) => (ConstBool [c == d])
+(NeqPtr (Const(32|64) [c]) (Const(32|64) [d])) => (ConstBool [c != d])
+
+(EqPtr (LocalAddr _ _) (Addr _)) => (ConstBool [false])
+(EqPtr (OffPtr (LocalAddr _ _)) (Addr _)) => (ConstBool [false])
+(EqPtr (LocalAddr _ _) (OffPtr (Addr _))) => (ConstBool [false])
+(EqPtr (OffPtr (LocalAddr _ _)) (OffPtr (Addr _))) => (ConstBool [false])
+(NeqPtr (LocalAddr _ _) (Addr _)) => (ConstBool [true])
+(NeqPtr (OffPtr (LocalAddr _ _)) (Addr _)) => (ConstBool [true])
+(NeqPtr (LocalAddr _ _) (OffPtr (Addr _))) => (ConstBool [true])
+(NeqPtr (OffPtr (LocalAddr _ _)) (OffPtr (Addr _))) => (ConstBool [true])
+
+// Simplify address comparisons.
+(EqPtr (AddPtr p1 o1) p2) && isSamePtr(p1, p2) => (Not (IsNonNil o1))
+(NeqPtr (AddPtr p1 o1) p2) && isSamePtr(p1, p2) => (IsNonNil o1)
+(EqPtr (Const(32|64) [0]) p) => (Not (IsNonNil p))
+(NeqPtr (Const(32|64) [0]) p) => (IsNonNil p)
+(EqPtr (ConstNil) p) => (Not (IsNonNil p))
+(NeqPtr (ConstNil) p) => (IsNonNil p)
+
+// Evaluate constant user nil checks.
+(IsNonNil (ConstNil)) => (ConstBool [false])
+(IsNonNil (Const(32|64) [c])) => (ConstBool [c != 0])
+(IsNonNil (Addr _)) => (ConstBool [true])
+(IsNonNil (LocalAddr _ _)) => (ConstBool [true])
+
+// Inline small or disjoint runtime.memmove calls with constant length.
+// See the comment in op Move in genericOps.go for discussion of the type.
+//
+// Note that we've lost any knowledge of the type and alignment requirements
+// of the source and destination. We only know the size, and that the type
+// contains no pointers.
+// The type of the move is not necessarily v.Args[0].Type().Elem()!
+// See issue 55122 for details.
+//
+// Because expand calls runs after prove, constants useful to this pattern may not appear.
+// Both versions need to exist; the memory and register variants.
+//
+// Match post-expansion calls, memory version.
+(SelectN [0] call:(StaticCall {sym} s1:(Store _ (Const(64|32) [sz]) s2:(Store _ src s3:(Store {t} _ dst mem)))))
+ && sz >= 0
+ && isSameCall(sym, "runtime.memmove")
+ && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1
+ && isInlinableMemmove(dst, src, int64(sz), config)
+ && clobber(s1, s2, s3, call)
+ => (Move {types.Types[types.TUINT8]} [int64(sz)] dst src mem)
+
+// Match post-expansion calls, register version.
+(SelectN [0] call:(StaticCall {sym} dst src (Const(64|32) [sz]) mem))
+ && sz >= 0
+ && call.Uses == 1 // this will exclude all calls with results
+ && isSameCall(sym, "runtime.memmove")
+ && isInlinableMemmove(dst, src, int64(sz), config)
+ && clobber(call)
+ => (Move {types.Types[types.TUINT8]} [int64(sz)] dst src mem)
+
+// Match pre-expansion calls.
+(SelectN [0] call:(StaticLECall {sym} dst src (Const(64|32) [sz]) mem))
+ && sz >= 0
+ && call.Uses == 1 // this will exclude all calls with results
+ && isSameCall(sym, "runtime.memmove")
+ && isInlinableMemmove(dst, src, int64(sz), config)
+ && clobber(call)
+ => (Move {types.Types[types.TUINT8]} [int64(sz)] dst src mem)
+
+// De-virtualize late-expanded interface calls into late-expanded static calls.
+// Note that (ITab (IMake)) doesn't get rewritten until after the first opt pass,
+// so this rule should trigger reliably.
+// devirtLECall removes the first argument, adds the devirtualized symbol to the AuxCall, and changes the opcode
+(InterLECall [argsize] {auxCall} (Load (OffPtr [off] (ITab (IMake (Addr {itab} (SB)) _))) _) ___) && devirtLESym(v, auxCall, itab, off) !=
+ nil => devirtLECall(v, devirtLESym(v, auxCall, itab, off))
+
+// Move and Zero optimizations.
+// Move source and destination may overlap.
+
+// Convert Moves into Zeros when the source is known to be zeros.
+(Move {t} [n] dst1 src mem:(Zero {t} [n] dst2 _)) && isSamePtr(src, dst2)
+ => (Zero {t} [n] dst1 mem)
+(Move {t} [n] dst1 src mem:(VarDef (Zero {t} [n] dst0 _))) && isSamePtr(src, dst0)
+ => (Zero {t} [n] dst1 mem)
+(Move {t} [n] dst (Addr {sym} (SB)) mem) && symIsROZero(sym) => (Zero {t} [n] dst mem)
+
+// Don't Store to variables that are about to be overwritten by Move/Zero.
+(Zero {t1} [n] p1 store:(Store {t2} (OffPtr [o2] p2) _ mem))
+ && isSamePtr(p1, p2) && store.Uses == 1
+ && n >= o2 + t2.Size()
+ && clobber(store)
+ => (Zero {t1} [n] p1 mem)
+(Move {t1} [n] dst1 src1 store:(Store {t2} op:(OffPtr [o2] dst2) _ mem))
+ && isSamePtr(dst1, dst2) && store.Uses == 1
+ && n >= o2 + t2.Size()
+ && disjoint(src1, n, op, t2.Size())
+ && clobber(store)
+ => (Move {t1} [n] dst1 src1 mem)
+
+// Don't Move to variables that are immediately completely overwritten.
+(Zero {t} [n] dst1 move:(Move {t} [n] dst2 _ mem))
+ && move.Uses == 1
+ && isSamePtr(dst1, dst2)
+ && clobber(move)
+ => (Zero {t} [n] dst1 mem)
+(Move {t} [n] dst1 src1 move:(Move {t} [n] dst2 _ mem))
+ && move.Uses == 1
+ && isSamePtr(dst1, dst2) && disjoint(src1, n, dst2, n)
+ && clobber(move)
+ => (Move {t} [n] dst1 src1 mem)
+(Zero {t} [n] dst1 vardef:(VarDef {x} move:(Move {t} [n] dst2 _ mem)))
+ && move.Uses == 1 && vardef.Uses == 1
+ && isSamePtr(dst1, dst2)
+ && clobber(move, vardef)
+ => (Zero {t} [n] dst1 (VarDef {x} mem))
+(Move {t} [n] dst1 src1 vardef:(VarDef {x} move:(Move {t} [n] dst2 _ mem)))
+ && move.Uses == 1 && vardef.Uses == 1
+ && isSamePtr(dst1, dst2) && disjoint(src1, n, dst2, n)
+ && clobber(move, vardef)
+ => (Move {t} [n] dst1 src1 (VarDef {x} mem))
+(Store {t1} op1:(OffPtr [o1] p1) d1
+ m2:(Store {t2} op2:(OffPtr [0] p2) d2
+ m3:(Move [n] p3 _ mem)))
+ && m2.Uses == 1 && m3.Uses == 1
+ && o1 == t2.Size()
+ && n == t2.Size() + t1.Size()
+ && isSamePtr(p1, p2) && isSamePtr(p2, p3)
+ && clobber(m2, m3)
+ => (Store {t1} op1 d1 (Store {t2} op2 d2 mem))
+(Store {t1} op1:(OffPtr [o1] p1) d1
+ m2:(Store {t2} op2:(OffPtr [o2] p2) d2
+ m3:(Store {t3} op3:(OffPtr [0] p3) d3
+ m4:(Move [n] p4 _ mem))))
+ && m2.Uses == 1 && m3.Uses == 1 && m4.Uses == 1
+ && o2 == t3.Size()
+ && o1-o2 == t2.Size()
+ && n == t3.Size() + t2.Size() + t1.Size()
+ && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4)
+ && clobber(m2, m3, m4)
+ => (Store {t1} op1 d1 (Store {t2} op2 d2 (Store {t3} op3 d3 mem)))
+(Store {t1} op1:(OffPtr [o1] p1) d1
+ m2:(Store {t2} op2:(OffPtr [o2] p2) d2
+ m3:(Store {t3} op3:(OffPtr [o3] p3) d3
+ m4:(Store {t4} op4:(OffPtr [0] p4) d4
+ m5:(Move [n] p5 _ mem)))))
+ && m2.Uses == 1 && m3.Uses == 1 && m4.Uses == 1 && m5.Uses == 1
+ && o3 == t4.Size()
+ && o2-o3 == t3.Size()
+ && o1-o2 == t2.Size()
+ && n == t4.Size() + t3.Size() + t2.Size() + t1.Size()
+ && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5)
+ && clobber(m2, m3, m4, m5)
+ => (Store {t1} op1 d1 (Store {t2} op2 d2 (Store {t3} op3 d3 (Store {t4} op4 d4 mem))))
+
+// Don't Zero variables that are immediately completely overwritten
+// before being accessed.
+(Move {t} [n] dst1 src1 zero:(Zero {t} [n] dst2 mem))
+ && zero.Uses == 1
+ && isSamePtr(dst1, dst2) && disjoint(src1, n, dst2, n)
+ && clobber(zero)
+ => (Move {t} [n] dst1 src1 mem)
+(Move {t} [n] dst1 src1 vardef:(VarDef {x} zero:(Zero {t} [n] dst2 mem)))
+ && zero.Uses == 1 && vardef.Uses == 1
+ && isSamePtr(dst1, dst2) && disjoint(src1, n, dst2, n)
+ && clobber(zero, vardef)
+ => (Move {t} [n] dst1 src1 (VarDef {x} mem))
+(Store {t1} op1:(OffPtr [o1] p1) d1
+ m2:(Store {t2} op2:(OffPtr [0] p2) d2
+ m3:(Zero [n] p3 mem)))
+ && m2.Uses == 1 && m3.Uses == 1
+ && o1 == t2.Size()
+ && n == t2.Size() + t1.Size()
+ && isSamePtr(p1, p2) && isSamePtr(p2, p3)
+ && clobber(m2, m3)
+ => (Store {t1} op1 d1 (Store {t2} op2 d2 mem))
+(Store {t1} op1:(OffPtr [o1] p1) d1
+ m2:(Store {t2} op2:(OffPtr [o2] p2) d2
+ m3:(Store {t3} op3:(OffPtr [0] p3) d3
+ m4:(Zero [n] p4 mem))))
+ && m2.Uses == 1 && m3.Uses == 1 && m4.Uses == 1
+ && o2 == t3.Size()
+ && o1-o2 == t2.Size()
+ && n == t3.Size() + t2.Size() + t1.Size()
+ && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4)
+ && clobber(m2, m3, m4)
+ => (Store {t1} op1 d1 (Store {t2} op2 d2 (Store {t3} op3 d3 mem)))
+(Store {t1} op1:(OffPtr [o1] p1) d1
+ m2:(Store {t2} op2:(OffPtr [o2] p2) d2
+ m3:(Store {t3} op3:(OffPtr [o3] p3) d3
+ m4:(Store {t4} op4:(OffPtr [0] p4) d4
+ m5:(Zero [n] p5 mem)))))
+ && m2.Uses == 1 && m3.Uses == 1 && m4.Uses == 1 && m5.Uses == 1
+ && o3 == t4.Size()
+ && o2-o3 == t3.Size()
+ && o1-o2 == t2.Size()
+ && n == t4.Size() + t3.Size() + t2.Size() + t1.Size()
+ && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5)
+ && clobber(m2, m3, m4, m5)
+ => (Store {t1} op1 d1 (Store {t2} op2 d2 (Store {t3} op3 d3 (Store {t4} op4 d4 mem))))
+
+// Don't Move from memory if the values are likely to already be
+// in registers.
+(Move {t1} [n] dst p1
+ mem:(Store {t2} op2:(OffPtr <tt2> [o2] p2) d1
+ (Store {t3} op3:(OffPtr <tt3> [0] p3) d2 _)))
+ && isSamePtr(p1, p2) && isSamePtr(p2, p3)
+ && t2.Alignment() <= t1.Alignment()
+ && t3.Alignment() <= t1.Alignment()
+ && registerizable(b, t2)
+ && registerizable(b, t3)
+ && o2 == t3.Size()
+ && n == t2.Size() + t3.Size()
+ => (Store {t2} (OffPtr <tt2> [o2] dst) d1
+ (Store {t3} (OffPtr <tt3> [0] dst) d2 mem))
+(Move {t1} [n] dst p1
+ mem:(Store {t2} op2:(OffPtr <tt2> [o2] p2) d1
+ (Store {t3} op3:(OffPtr <tt3> [o3] p3) d2
+ (Store {t4} op4:(OffPtr <tt4> [0] p4) d3 _))))
+ && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4)
+ && t2.Alignment() <= t1.Alignment()
+ && t3.Alignment() <= t1.Alignment()
+ && t4.Alignment() <= t1.Alignment()
+ && registerizable(b, t2)
+ && registerizable(b, t3)
+ && registerizable(b, t4)
+ && o3 == t4.Size()
+ && o2-o3 == t3.Size()
+ && n == t2.Size() + t3.Size() + t4.Size()
+ => (Store {t2} (OffPtr <tt2> [o2] dst) d1
+ (Store {t3} (OffPtr <tt3> [o3] dst) d2
+ (Store {t4} (OffPtr <tt4> [0] dst) d3 mem)))
+(Move {t1} [n] dst p1
+ mem:(Store {t2} op2:(OffPtr <tt2> [o2] p2) d1
+ (Store {t3} op3:(OffPtr <tt3> [o3] p3) d2
+ (Store {t4} op4:(OffPtr <tt4> [o4] p4) d3
+ (Store {t5} op5:(OffPtr <tt5> [0] p5) d4 _)))))
+ && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5)
+ && t2.Alignment() <= t1.Alignment()
+ && t3.Alignment() <= t1.Alignment()
+ && t4.Alignment() <= t1.Alignment()
+ && t5.Alignment() <= t1.Alignment()
+ && registerizable(b, t2)
+ && registerizable(b, t3)
+ && registerizable(b, t4)
+ && registerizable(b, t5)
+ && o4 == t5.Size()
+ && o3-o4 == t4.Size()
+ && o2-o3 == t3.Size()
+ && n == t2.Size() + t3.Size() + t4.Size() + t5.Size()
+ => (Store {t2} (OffPtr <tt2> [o2] dst) d1
+ (Store {t3} (OffPtr <tt3> [o3] dst) d2
+ (Store {t4} (OffPtr <tt4> [o4] dst) d3
+ (Store {t5} (OffPtr <tt5> [0] dst) d4 mem))))
+
+// Same thing but with VarDef in the middle.
+(Move {t1} [n] dst p1
+ mem:(VarDef
+ (Store {t2} op2:(OffPtr <tt2> [o2] p2) d1
+ (Store {t3} op3:(OffPtr <tt3> [0] p3) d2 _))))
+ && isSamePtr(p1, p2) && isSamePtr(p2, p3)
+ && t2.Alignment() <= t1.Alignment()
+ && t3.Alignment() <= t1.Alignment()
+ && registerizable(b, t2)
+ && registerizable(b, t3)
+ && o2 == t3.Size()
+ && n == t2.Size() + t3.Size()
+ => (Store {t2} (OffPtr <tt2> [o2] dst) d1
+ (Store {t3} (OffPtr <tt3> [0] dst) d2 mem))
+(Move {t1} [n] dst p1
+ mem:(VarDef
+ (Store {t2} op2:(OffPtr <tt2> [o2] p2) d1
+ (Store {t3} op3:(OffPtr <tt3> [o3] p3) d2
+ (Store {t4} op4:(OffPtr <tt4> [0] p4) d3 _)))))
+ && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4)
+ && t2.Alignment() <= t1.Alignment()
+ && t3.Alignment() <= t1.Alignment()
+ && t4.Alignment() <= t1.Alignment()
+ && registerizable(b, t2)
+ && registerizable(b, t3)
+ && registerizable(b, t4)
+ && o3 == t4.Size()
+ && o2-o3 == t3.Size()
+ && n == t2.Size() + t3.Size() + t4.Size()
+ => (Store {t2} (OffPtr <tt2> [o2] dst) d1
+ (Store {t3} (OffPtr <tt3> [o3] dst) d2
+ (Store {t4} (OffPtr <tt4> [0] dst) d3 mem)))
+(Move {t1} [n] dst p1
+ mem:(VarDef
+ (Store {t2} op2:(OffPtr <tt2> [o2] p2) d1
+ (Store {t3} op3:(OffPtr <tt3> [o3] p3) d2
+ (Store {t4} op4:(OffPtr <tt4> [o4] p4) d3
+ (Store {t5} op5:(OffPtr <tt5> [0] p5) d4 _))))))
+ && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5)
+ && t2.Alignment() <= t1.Alignment()
+ && t3.Alignment() <= t1.Alignment()
+ && t4.Alignment() <= t1.Alignment()
+ && t5.Alignment() <= t1.Alignment()
+ && registerizable(b, t2)
+ && registerizable(b, t3)
+ && registerizable(b, t4)
+ && registerizable(b, t5)
+ && o4 == t5.Size()
+ && o3-o4 == t4.Size()
+ && o2-o3 == t3.Size()
+ && n == t2.Size() + t3.Size() + t4.Size() + t5.Size()
+ => (Store {t2} (OffPtr <tt2> [o2] dst) d1
+ (Store {t3} (OffPtr <tt3> [o3] dst) d2
+ (Store {t4} (OffPtr <tt4> [o4] dst) d3
+ (Store {t5} (OffPtr <tt5> [0] dst) d4 mem))))
+
+// Prefer to Zero and Store than to Move.
+(Move {t1} [n] dst p1
+ mem:(Store {t2} op2:(OffPtr <tt2> [o2] p2) d1
+ (Zero {t3} [n] p3 _)))
+ && isSamePtr(p1, p2) && isSamePtr(p2, p3)
+ && t2.Alignment() <= t1.Alignment()
+ && t3.Alignment() <= t1.Alignment()
+ && registerizable(b, t2)
+ && n >= o2 + t2.Size()
+ => (Store {t2} (OffPtr <tt2> [o2] dst) d1
+ (Zero {t1} [n] dst mem))
+(Move {t1} [n] dst p1
+ mem:(Store {t2} (OffPtr <tt2> [o2] p2) d1
+ (Store {t3} (OffPtr <tt3> [o3] p3) d2
+ (Zero {t4} [n] p4 _))))
+ && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4)
+ && t2.Alignment() <= t1.Alignment()
+ && t3.Alignment() <= t1.Alignment()
+ && t4.Alignment() <= t1.Alignment()
+ && registerizable(b, t2)
+ && registerizable(b, t3)
+ && n >= o2 + t2.Size()
+ && n >= o3 + t3.Size()
+ => (Store {t2} (OffPtr <tt2> [o2] dst) d1
+ (Store {t3} (OffPtr <tt3> [o3] dst) d2
+ (Zero {t1} [n] dst mem)))
+(Move {t1} [n] dst p1
+ mem:(Store {t2} (OffPtr <tt2> [o2] p2) d1
+ (Store {t3} (OffPtr <tt3> [o3] p3) d2
+ (Store {t4} (OffPtr <tt4> [o4] p4) d3
+ (Zero {t5} [n] p5 _)))))
+ && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5)
+ && t2.Alignment() <= t1.Alignment()
+ && t3.Alignment() <= t1.Alignment()
+ && t4.Alignment() <= t1.Alignment()
+ && t5.Alignment() <= t1.Alignment()
+ && registerizable(b, t2)
+ && registerizable(b, t3)
+ && registerizable(b, t4)
+ && n >= o2 + t2.Size()
+ && n >= o3 + t3.Size()
+ && n >= o4 + t4.Size()
+ => (Store {t2} (OffPtr <tt2> [o2] dst) d1
+ (Store {t3} (OffPtr <tt3> [o3] dst) d2
+ (Store {t4} (OffPtr <tt4> [o4] dst) d3
+ (Zero {t1} [n] dst mem))))
+(Move {t1} [n] dst p1
+ mem:(Store {t2} (OffPtr <tt2> [o2] p2) d1
+ (Store {t3} (OffPtr <tt3> [o3] p3) d2
+ (Store {t4} (OffPtr <tt4> [o4] p4) d3
+ (Store {t5} (OffPtr <tt5> [o5] p5) d4
+ (Zero {t6} [n] p6 _))))))
+ && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && isSamePtr(p5, p6)
+ && t2.Alignment() <= t1.Alignment()
+ && t3.Alignment() <= t1.Alignment()
+ && t4.Alignment() <= t1.Alignment()
+ && t5.Alignment() <= t1.Alignment()
+ && t6.Alignment() <= t1.Alignment()
+ && registerizable(b, t2)
+ && registerizable(b, t3)
+ && registerizable(b, t4)
+ && registerizable(b, t5)
+ && n >= o2 + t2.Size()
+ && n >= o3 + t3.Size()
+ && n >= o4 + t4.Size()
+ && n >= o5 + t5.Size()
+ => (Store {t2} (OffPtr <tt2> [o2] dst) d1
+ (Store {t3} (OffPtr <tt3> [o3] dst) d2
+ (Store {t4} (OffPtr <tt4> [o4] dst) d3
+ (Store {t5} (OffPtr <tt5> [o5] dst) d4
+ (Zero {t1} [n] dst mem)))))
+(Move {t1} [n] dst p1
+ mem:(VarDef
+ (Store {t2} op2:(OffPtr <tt2> [o2] p2) d1
+ (Zero {t3} [n] p3 _))))
+ && isSamePtr(p1, p2) && isSamePtr(p2, p3)
+ && t2.Alignment() <= t1.Alignment()
+ && t3.Alignment() <= t1.Alignment()
+ && registerizable(b, t2)
+ && n >= o2 + t2.Size()
+ => (Store {t2} (OffPtr <tt2> [o2] dst) d1
+ (Zero {t1} [n] dst mem))
+(Move {t1} [n] dst p1
+ mem:(VarDef
+ (Store {t2} (OffPtr <tt2> [o2] p2) d1
+ (Store {t3} (OffPtr <tt3> [o3] p3) d2
+ (Zero {t4} [n] p4 _)))))
+ && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4)
+ && t2.Alignment() <= t1.Alignment()
+ && t3.Alignment() <= t1.Alignment()
+ && t4.Alignment() <= t1.Alignment()
+ && registerizable(b, t2)
+ && registerizable(b, t3)
+ && n >= o2 + t2.Size()
+ && n >= o3 + t3.Size()
+ => (Store {t2} (OffPtr <tt2> [o2] dst) d1
+ (Store {t3} (OffPtr <tt3> [o3] dst) d2
+ (Zero {t1} [n] dst mem)))
+(Move {t1} [n] dst p1
+ mem:(VarDef
+ (Store {t2} (OffPtr <tt2> [o2] p2) d1
+ (Store {t3} (OffPtr <tt3> [o3] p3) d2
+ (Store {t4} (OffPtr <tt4> [o4] p4) d3
+ (Zero {t5} [n] p5 _))))))
+ && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5)
+ && t2.Alignment() <= t1.Alignment()
+ && t3.Alignment() <= t1.Alignment()
+ && t4.Alignment() <= t1.Alignment()
+ && t5.Alignment() <= t1.Alignment()
+ && registerizable(b, t2)
+ && registerizable(b, t3)
+ && registerizable(b, t4)
+ && n >= o2 + t2.Size()
+ && n >= o3 + t3.Size()
+ && n >= o4 + t4.Size()
+ => (Store {t2} (OffPtr <tt2> [o2] dst) d1
+ (Store {t3} (OffPtr <tt3> [o3] dst) d2
+ (Store {t4} (OffPtr <tt4> [o4] dst) d3
+ (Zero {t1} [n] dst mem))))
+(Move {t1} [n] dst p1
+ mem:(VarDef
+ (Store {t2} (OffPtr <tt2> [o2] p2) d1
+ (Store {t3} (OffPtr <tt3> [o3] p3) d2
+ (Store {t4} (OffPtr <tt4> [o4] p4) d3
+ (Store {t5} (OffPtr <tt5> [o5] p5) d4
+ (Zero {t6} [n] p6 _)))))))
+ && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && isSamePtr(p5, p6)
+ && t2.Alignment() <= t1.Alignment()
+ && t3.Alignment() <= t1.Alignment()
+ && t4.Alignment() <= t1.Alignment()
+ && t5.Alignment() <= t1.Alignment()
+ && t6.Alignment() <= t1.Alignment()
+ && registerizable(b, t2)
+ && registerizable(b, t3)
+ && registerizable(b, t4)
+ && registerizable(b, t5)
+ && n >= o2 + t2.Size()
+ && n >= o3 + t3.Size()
+ && n >= o4 + t4.Size()
+ && n >= o5 + t5.Size()
+ => (Store {t2} (OffPtr <tt2> [o2] dst) d1
+ (Store {t3} (OffPtr <tt3> [o3] dst) d2
+ (Store {t4} (OffPtr <tt4> [o4] dst) d3
+ (Store {t5} (OffPtr <tt5> [o5] dst) d4
+ (Zero {t1} [n] dst mem)))))
+
+(SelectN [0] call:(StaticLECall {sym} a x)) && needRaceCleanup(sym, call) && clobber(call) => x
+(SelectN [0] call:(StaticLECall {sym} x)) && needRaceCleanup(sym, call) && clobber(call) => x
+
+// Collapse moving A -> B -> C into just A -> C.
+// Later passes (deadstore, elim unread auto) will remove the A -> B move, if possible.
+// This happens most commonly when B is an autotmp inserted earlier
+// during compilation to ensure correctness.
+// Take care that overlapping moves are preserved.
+// Restrict this optimization to the stack, to avoid duplicating loads from the heap;
+// see CL 145208 for discussion.
+(Move {t1} [s] dst tmp1 midmem:(Move {t2} [s] tmp2 src _))
+ && t1.Compare(t2) == types.CMPeq
+ && isSamePtr(tmp1, tmp2)
+ && isStackPtr(src) && !isVolatile(src)
+ && disjoint(src, s, tmp2, s)
+ && (disjoint(src, s, dst, s) || isInlinableMemmove(dst, src, s, config))
+ => (Move {t1} [s] dst src midmem)
+
+// Same, but for large types that require VarDefs.
+(Move {t1} [s] dst tmp1 midmem:(VarDef (Move {t2} [s] tmp2 src _)))
+ && t1.Compare(t2) == types.CMPeq
+ && isSamePtr(tmp1, tmp2)
+ && isStackPtr(src) && !isVolatile(src)
+ && disjoint(src, s, tmp2, s)
+ && (disjoint(src, s, dst, s) || isInlinableMemmove(dst, src, s, config))
+ => (Move {t1} [s] dst src midmem)
+
+// Don't zero the same bits twice.
+(Zero {t} [s] dst1 zero:(Zero {t} [s] dst2 _)) && isSamePtr(dst1, dst2) => zero
+(Zero {t} [s] dst1 vardef:(VarDef (Zero {t} [s] dst2 _))) && isSamePtr(dst1, dst2) => vardef
+
+// Elide self-moves. This only happens rarely (e.g test/fixedbugs/bug277.go).
+// However, this rule is needed to prevent the previous rule from looping forever in such cases.
+(Move dst src mem) && isSamePtr(dst, src) => mem
diff --git a/src/cmd/compile/internal/ssa/gen/genericOps.go b/src/cmd/compile/internal/ssa/gen/genericOps.go
new file mode 100644
index 0000000..69cd828
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/gen/genericOps.go
@@ -0,0 +1,662 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build ignore
+// +build ignore
+
+package main
+
+// Generic opcodes typically specify a width. The inputs and outputs
+// of that op are the given number of bits wide. There is no notion of
+// "sign", so Add32 can be used both for signed and unsigned 32-bit
+// addition.
+
+// Signed/unsigned is explicit with the extension ops
+// (SignExt*/ZeroExt*) and implicit as the arg to some opcodes
+// (e.g. the second argument to shifts is unsigned). If not mentioned,
+// all args take signed inputs, or don't care whether their inputs
+// are signed or unsigned.
+
+var genericOps = []opData{
+ // 2-input arithmetic
+ // Types must be consistent with Go typing. Add, for example, must take two values
+ // of the same type and produces that same type.
+ {name: "Add8", argLength: 2, commutative: true}, // arg0 + arg1
+ {name: "Add16", argLength: 2, commutative: true},
+ {name: "Add32", argLength: 2, commutative: true},
+ {name: "Add64", argLength: 2, commutative: true},
+ {name: "AddPtr", argLength: 2}, // For address calculations. arg0 is a pointer and arg1 is an int.
+ {name: "Add32F", argLength: 2, commutative: true},
+ {name: "Add64F", argLength: 2, commutative: true},
+
+ {name: "Sub8", argLength: 2}, // arg0 - arg1
+ {name: "Sub16", argLength: 2},
+ {name: "Sub32", argLength: 2},
+ {name: "Sub64", argLength: 2},
+ {name: "SubPtr", argLength: 2},
+ {name: "Sub32F", argLength: 2},
+ {name: "Sub64F", argLength: 2},
+
+ {name: "Mul8", argLength: 2, commutative: true}, // arg0 * arg1
+ {name: "Mul16", argLength: 2, commutative: true},
+ {name: "Mul32", argLength: 2, commutative: true},
+ {name: "Mul64", argLength: 2, commutative: true},
+ {name: "Mul32F", argLength: 2, commutative: true},
+ {name: "Mul64F", argLength: 2, commutative: true},
+
+ {name: "Div32F", argLength: 2}, // arg0 / arg1
+ {name: "Div64F", argLength: 2},
+
+ {name: "Hmul32", argLength: 2, commutative: true},
+ {name: "Hmul32u", argLength: 2, commutative: true},
+ {name: "Hmul64", argLength: 2, commutative: true},
+ {name: "Hmul64u", argLength: 2, commutative: true},
+
+ {name: "Mul32uhilo", argLength: 2, typ: "(UInt32,UInt32)", commutative: true}, // arg0 * arg1, returns (hi, lo)
+ {name: "Mul64uhilo", argLength: 2, typ: "(UInt64,UInt64)", commutative: true}, // arg0 * arg1, returns (hi, lo)
+
+ {name: "Mul32uover", argLength: 2, typ: "(UInt32,Bool)", commutative: true}, // Let x = arg0*arg1 (full 32x32-> 64 unsigned multiply), returns (uint32(x), (uint32(x) != x))
+ {name: "Mul64uover", argLength: 2, typ: "(UInt64,Bool)", commutative: true}, // Let x = arg0*arg1 (full 64x64->128 unsigned multiply), returns (uint64(x), (uint64(x) != x))
+
+ // Weird special instructions for use in the strength reduction of divides.
+ // These ops compute unsigned (arg0 + arg1) / 2, correct to all
+ // 32/64 bits, even when the intermediate result of the add has 33/65 bits.
+ // These ops can assume arg0 >= arg1.
+ // Note: these ops aren't commutative!
+ {name: "Avg32u", argLength: 2, typ: "UInt32"}, // 32-bit platforms only
+ {name: "Avg64u", argLength: 2, typ: "UInt64"}, // 64-bit platforms only
+
+ // For Div16, Div32 and Div64, AuxInt non-zero means that the divisor has been proved to be not -1
+ // or that the dividend is not the most negative value.
+ {name: "Div8", argLength: 2}, // arg0 / arg1, signed
+ {name: "Div8u", argLength: 2}, // arg0 / arg1, unsigned
+ {name: "Div16", argLength: 2, aux: "Bool"},
+ {name: "Div16u", argLength: 2},
+ {name: "Div32", argLength: 2, aux: "Bool"},
+ {name: "Div32u", argLength: 2},
+ {name: "Div64", argLength: 2, aux: "Bool"},
+ {name: "Div64u", argLength: 2},
+ {name: "Div128u", argLength: 3}, // arg0:arg1 / arg2 (128-bit divided by 64-bit), returns (q, r)
+
+ // For Mod16, Mod32 and Mod64, AuxInt non-zero means that the divisor has been proved to be not -1.
+ {name: "Mod8", argLength: 2}, // arg0 % arg1, signed
+ {name: "Mod8u", argLength: 2}, // arg0 % arg1, unsigned
+ {name: "Mod16", argLength: 2, aux: "Bool"},
+ {name: "Mod16u", argLength: 2},
+ {name: "Mod32", argLength: 2, aux: "Bool"},
+ {name: "Mod32u", argLength: 2},
+ {name: "Mod64", argLength: 2, aux: "Bool"},
+ {name: "Mod64u", argLength: 2},
+
+ {name: "And8", argLength: 2, commutative: true}, // arg0 & arg1
+ {name: "And16", argLength: 2, commutative: true},
+ {name: "And32", argLength: 2, commutative: true},
+ {name: "And64", argLength: 2, commutative: true},
+
+ {name: "Or8", argLength: 2, commutative: true}, // arg0 | arg1
+ {name: "Or16", argLength: 2, commutative: true},
+ {name: "Or32", argLength: 2, commutative: true},
+ {name: "Or64", argLength: 2, commutative: true},
+
+ {name: "Xor8", argLength: 2, commutative: true}, // arg0 ^ arg1
+ {name: "Xor16", argLength: 2, commutative: true},
+ {name: "Xor32", argLength: 2, commutative: true},
+ {name: "Xor64", argLength: 2, commutative: true},
+
+ // For shifts, AxB means the shifted value has A bits and the shift amount has B bits.
+ // Shift amounts are considered unsigned.
+ // If arg1 is known to be nonnegative and less than the number of bits in arg0,
+ // then auxInt may be set to 1.
+ // This enables better code generation on some platforms.
+ {name: "Lsh8x8", argLength: 2, aux: "Bool"}, // arg0 << arg1
+ {name: "Lsh8x16", argLength: 2, aux: "Bool"},
+ {name: "Lsh8x32", argLength: 2, aux: "Bool"},
+ {name: "Lsh8x64", argLength: 2, aux: "Bool"},
+ {name: "Lsh16x8", argLength: 2, aux: "Bool"},
+ {name: "Lsh16x16", argLength: 2, aux: "Bool"},
+ {name: "Lsh16x32", argLength: 2, aux: "Bool"},
+ {name: "Lsh16x64", argLength: 2, aux: "Bool"},
+ {name: "Lsh32x8", argLength: 2, aux: "Bool"},
+ {name: "Lsh32x16", argLength: 2, aux: "Bool"},
+ {name: "Lsh32x32", argLength: 2, aux: "Bool"},
+ {name: "Lsh32x64", argLength: 2, aux: "Bool"},
+ {name: "Lsh64x8", argLength: 2, aux: "Bool"},
+ {name: "Lsh64x16", argLength: 2, aux: "Bool"},
+ {name: "Lsh64x32", argLength: 2, aux: "Bool"},
+ {name: "Lsh64x64", argLength: 2, aux: "Bool"},
+
+ {name: "Rsh8x8", argLength: 2, aux: "Bool"}, // arg0 >> arg1, signed
+ {name: "Rsh8x16", argLength: 2, aux: "Bool"},
+ {name: "Rsh8x32", argLength: 2, aux: "Bool"},
+ {name: "Rsh8x64", argLength: 2, aux: "Bool"},
+ {name: "Rsh16x8", argLength: 2, aux: "Bool"},
+ {name: "Rsh16x16", argLength: 2, aux: "Bool"},
+ {name: "Rsh16x32", argLength: 2, aux: "Bool"},
+ {name: "Rsh16x64", argLength: 2, aux: "Bool"},
+ {name: "Rsh32x8", argLength: 2, aux: "Bool"},
+ {name: "Rsh32x16", argLength: 2, aux: "Bool"},
+ {name: "Rsh32x32", argLength: 2, aux: "Bool"},
+ {name: "Rsh32x64", argLength: 2, aux: "Bool"},
+ {name: "Rsh64x8", argLength: 2, aux: "Bool"},
+ {name: "Rsh64x16", argLength: 2, aux: "Bool"},
+ {name: "Rsh64x32", argLength: 2, aux: "Bool"},
+ {name: "Rsh64x64", argLength: 2, aux: "Bool"},
+
+ {name: "Rsh8Ux8", argLength: 2, aux: "Bool"}, // arg0 >> arg1, unsigned
+ {name: "Rsh8Ux16", argLength: 2, aux: "Bool"},
+ {name: "Rsh8Ux32", argLength: 2, aux: "Bool"},
+ {name: "Rsh8Ux64", argLength: 2, aux: "Bool"},
+ {name: "Rsh16Ux8", argLength: 2, aux: "Bool"},
+ {name: "Rsh16Ux16", argLength: 2, aux: "Bool"},
+ {name: "Rsh16Ux32", argLength: 2, aux: "Bool"},
+ {name: "Rsh16Ux64", argLength: 2, aux: "Bool"},
+ {name: "Rsh32Ux8", argLength: 2, aux: "Bool"},
+ {name: "Rsh32Ux16", argLength: 2, aux: "Bool"},
+ {name: "Rsh32Ux32", argLength: 2, aux: "Bool"},
+ {name: "Rsh32Ux64", argLength: 2, aux: "Bool"},
+ {name: "Rsh64Ux8", argLength: 2, aux: "Bool"},
+ {name: "Rsh64Ux16", argLength: 2, aux: "Bool"},
+ {name: "Rsh64Ux32", argLength: 2, aux: "Bool"},
+ {name: "Rsh64Ux64", argLength: 2, aux: "Bool"},
+
+ // 2-input comparisons
+ {name: "Eq8", argLength: 2, commutative: true, typ: "Bool"}, // arg0 == arg1
+ {name: "Eq16", argLength: 2, commutative: true, typ: "Bool"},
+ {name: "Eq32", argLength: 2, commutative: true, typ: "Bool"},
+ {name: "Eq64", argLength: 2, commutative: true, typ: "Bool"},
+ {name: "EqPtr", argLength: 2, commutative: true, typ: "Bool"},
+ {name: "EqInter", argLength: 2, typ: "Bool"}, // arg0 or arg1 is nil; other cases handled by frontend
+ {name: "EqSlice", argLength: 2, typ: "Bool"}, // arg0 or arg1 is nil; other cases handled by frontend
+ {name: "Eq32F", argLength: 2, commutative: true, typ: "Bool"},
+ {name: "Eq64F", argLength: 2, commutative: true, typ: "Bool"},
+
+ {name: "Neq8", argLength: 2, commutative: true, typ: "Bool"}, // arg0 != arg1
+ {name: "Neq16", argLength: 2, commutative: true, typ: "Bool"},
+ {name: "Neq32", argLength: 2, commutative: true, typ: "Bool"},
+ {name: "Neq64", argLength: 2, commutative: true, typ: "Bool"},
+ {name: "NeqPtr", argLength: 2, commutative: true, typ: "Bool"},
+ {name: "NeqInter", argLength: 2, typ: "Bool"}, // arg0 or arg1 is nil; other cases handled by frontend
+ {name: "NeqSlice", argLength: 2, typ: "Bool"}, // arg0 or arg1 is nil; other cases handled by frontend
+ {name: "Neq32F", argLength: 2, commutative: true, typ: "Bool"},
+ {name: "Neq64F", argLength: 2, commutative: true, typ: "Bool"},
+
+ {name: "Less8", argLength: 2, typ: "Bool"}, // arg0 < arg1, signed
+ {name: "Less8U", argLength: 2, typ: "Bool"}, // arg0 < arg1, unsigned
+ {name: "Less16", argLength: 2, typ: "Bool"},
+ {name: "Less16U", argLength: 2, typ: "Bool"},
+ {name: "Less32", argLength: 2, typ: "Bool"},
+ {name: "Less32U", argLength: 2, typ: "Bool"},
+ {name: "Less64", argLength: 2, typ: "Bool"},
+ {name: "Less64U", argLength: 2, typ: "Bool"},
+ {name: "Less32F", argLength: 2, typ: "Bool"},
+ {name: "Less64F", argLength: 2, typ: "Bool"},
+
+ {name: "Leq8", argLength: 2, typ: "Bool"}, // arg0 <= arg1, signed
+ {name: "Leq8U", argLength: 2, typ: "Bool"}, // arg0 <= arg1, unsigned
+ {name: "Leq16", argLength: 2, typ: "Bool"},
+ {name: "Leq16U", argLength: 2, typ: "Bool"},
+ {name: "Leq32", argLength: 2, typ: "Bool"},
+ {name: "Leq32U", argLength: 2, typ: "Bool"},
+ {name: "Leq64", argLength: 2, typ: "Bool"},
+ {name: "Leq64U", argLength: 2, typ: "Bool"},
+ {name: "Leq32F", argLength: 2, typ: "Bool"},
+ {name: "Leq64F", argLength: 2, typ: "Bool"},
+
+ // the type of a CondSelect is the same as the type of its first
+ // two arguments, which should be register-width scalars; the third
+ // argument should be a boolean
+ {name: "CondSelect", argLength: 3}, // arg2 ? arg0 : arg1
+
+ // boolean ops
+ {name: "AndB", argLength: 2, commutative: true, typ: "Bool"}, // arg0 && arg1 (not shortcircuited)
+ {name: "OrB", argLength: 2, commutative: true, typ: "Bool"}, // arg0 || arg1 (not shortcircuited)
+ {name: "EqB", argLength: 2, commutative: true, typ: "Bool"}, // arg0 == arg1
+ {name: "NeqB", argLength: 2, commutative: true, typ: "Bool"}, // arg0 != arg1
+ {name: "Not", argLength: 1, typ: "Bool"}, // !arg0, boolean
+
+ // 1-input ops
+ {name: "Neg8", argLength: 1}, // -arg0
+ {name: "Neg16", argLength: 1},
+ {name: "Neg32", argLength: 1},
+ {name: "Neg64", argLength: 1},
+ {name: "Neg32F", argLength: 1},
+ {name: "Neg64F", argLength: 1},
+
+ {name: "Com8", argLength: 1}, // ^arg0
+ {name: "Com16", argLength: 1},
+ {name: "Com32", argLength: 1},
+ {name: "Com64", argLength: 1},
+
+ {name: "Ctz8", argLength: 1}, // Count trailing (low order) zeroes (returns 0-8)
+ {name: "Ctz16", argLength: 1}, // Count trailing (low order) zeroes (returns 0-16)
+ {name: "Ctz32", argLength: 1}, // Count trailing (low order) zeroes (returns 0-32)
+ {name: "Ctz64", argLength: 1}, // Count trailing (low order) zeroes (returns 0-64)
+ {name: "Ctz8NonZero", argLength: 1}, // same as above, but arg[0] known to be non-zero, returns 0-7
+ {name: "Ctz16NonZero", argLength: 1}, // same as above, but arg[0] known to be non-zero, returns 0-15
+ {name: "Ctz32NonZero", argLength: 1}, // same as above, but arg[0] known to be non-zero, returns 0-31
+ {name: "Ctz64NonZero", argLength: 1}, // same as above, but arg[0] known to be non-zero, returns 0-63
+ {name: "BitLen8", argLength: 1}, // Number of bits in arg[0] (returns 0-8)
+ {name: "BitLen16", argLength: 1}, // Number of bits in arg[0] (returns 0-16)
+ {name: "BitLen32", argLength: 1}, // Number of bits in arg[0] (returns 0-32)
+ {name: "BitLen64", argLength: 1}, // Number of bits in arg[0] (returns 0-64)
+
+ {name: "Bswap32", argLength: 1}, // Swap bytes
+ {name: "Bswap64", argLength: 1}, // Swap bytes
+
+ {name: "BitRev8", argLength: 1}, // Reverse the bits in arg[0]
+ {name: "BitRev16", argLength: 1}, // Reverse the bits in arg[0]
+ {name: "BitRev32", argLength: 1}, // Reverse the bits in arg[0]
+ {name: "BitRev64", argLength: 1}, // Reverse the bits in arg[0]
+
+ {name: "PopCount8", argLength: 1}, // Count bits in arg[0]
+ {name: "PopCount16", argLength: 1}, // Count bits in arg[0]
+ {name: "PopCount32", argLength: 1}, // Count bits in arg[0]
+ {name: "PopCount64", argLength: 1}, // Count bits in arg[0]
+ {name: "RotateLeft8", argLength: 2}, // Rotate bits in arg[0] left by arg[1]
+ {name: "RotateLeft16", argLength: 2}, // Rotate bits in arg[0] left by arg[1]
+ {name: "RotateLeft32", argLength: 2}, // Rotate bits in arg[0] left by arg[1]
+ {name: "RotateLeft64", argLength: 2}, // Rotate bits in arg[0] left by arg[1]
+
+ // Square root.
+ // Special cases:
+ // +∞ → +∞
+ // ±0 → ±0 (sign preserved)
+ // x<0 → NaN
+ // NaN → NaN
+ {name: "Sqrt", argLength: 1}, // √arg0 (floating point, double precision)
+ {name: "Sqrt32", argLength: 1}, // √arg0 (floating point, single precision)
+
+ // Round to integer, float64 only.
+ // Special cases:
+ // ±∞ → ±∞ (sign preserved)
+ // ±0 → ±0 (sign preserved)
+ // NaN → NaN
+ {name: "Floor", argLength: 1}, // round arg0 toward -∞
+ {name: "Ceil", argLength: 1}, // round arg0 toward +∞
+ {name: "Trunc", argLength: 1}, // round arg0 toward 0
+ {name: "Round", argLength: 1}, // round arg0 to nearest, ties away from 0
+ {name: "RoundToEven", argLength: 1}, // round arg0 to nearest, ties to even
+
+ // Modify the sign bit
+ {name: "Abs", argLength: 1}, // absolute value arg0
+ {name: "Copysign", argLength: 2}, // copy sign from arg0 to arg1
+
+ // 3-input opcode.
+ // Fused-multiply-add, float64 only.
+ // When a*b+c is exactly zero (before rounding), then the result is +0 or -0.
+ // The 0's sign is determined according to the standard rules for the
+ // addition (-0 if both a*b and c are -0, +0 otherwise).
+ //
+ // Otherwise, when a*b+c rounds to zero, then the resulting 0's sign is
+ // determined by the sign of the exact result a*b+c.
+ // See section 6.3 in ieee754.
+ //
+ // When the multiply is an infinity times a zero, the result is NaN.
+ // See section 7.2 in ieee754.
+ {name: "FMA", argLength: 3}, // compute (a*b)+c without intermediate rounding
+
+ // Data movement. Max argument length for Phi is indefinite.
+ {name: "Phi", argLength: -1, zeroWidth: true}, // select an argument based on which predecessor block we came from
+ {name: "Copy", argLength: 1}, // output = arg0
+ // Convert converts between pointers and integers.
+ // We have a special op for this so as to not confuse GC
+ // (particularly stack maps). It takes a memory arg so it
+ // gets correctly ordered with respect to GC safepoints.
+ // It gets compiled to nothing, so its result must in the same
+ // register as its argument. regalloc knows it can use any
+ // allocatable integer register for OpConvert.
+ // arg0=ptr/int arg1=mem, output=int/ptr
+ {name: "Convert", argLength: 2, zeroWidth: true, resultInArg0: true},
+
+ // constants. Constant values are stored in the aux or
+ // auxint fields.
+ {name: "ConstBool", aux: "Bool"}, // auxint is 0 for false and 1 for true
+ {name: "ConstString", aux: "String"}, // value is aux.(string)
+ {name: "ConstNil", typ: "BytePtr"}, // nil pointer
+ {name: "Const8", aux: "Int8"}, // auxint is sign-extended 8 bits
+ {name: "Const16", aux: "Int16"}, // auxint is sign-extended 16 bits
+ {name: "Const32", aux: "Int32"}, // auxint is sign-extended 32 bits
+ // Note: ConstX are sign-extended even when the type of the value is unsigned.
+ // For instance, uint8(0xaa) is stored as auxint=0xffffffffffffffaa.
+ {name: "Const64", aux: "Int64"}, // value is auxint
+ // Note: for both Const32F and Const64F, we disallow encoding NaNs.
+ // Signaling NaNs are tricky because if you do anything with them, they become quiet.
+ // Particularly, converting a 32 bit sNaN to 64 bit and back converts it to a qNaN.
+ // See issue 36399 and 36400.
+ // Encodings of +inf, -inf, and -0 are fine.
+ {name: "Const32F", aux: "Float32"}, // value is math.Float64frombits(uint64(auxint)) and is exactly representable as float 32
+ {name: "Const64F", aux: "Float64"}, // value is math.Float64frombits(uint64(auxint))
+ {name: "ConstInterface"}, // nil interface
+ {name: "ConstSlice"}, // nil slice
+
+ // Constant-like things
+ {name: "InitMem", zeroWidth: true}, // memory input to the function.
+ {name: "Arg", aux: "SymOff", symEffect: "Read", zeroWidth: true}, // argument to the function. aux=GCNode of arg, off = offset in that arg.
+
+ // Like Arg, these are generic ops that survive lowering. AuxInt is a register index, and the actual output register for each index is defined by the architecture.
+ // AuxInt = integer argument index (not a register number). ABI-specified spill loc obtained from function
+ {name: "ArgIntReg", aux: "NameOffsetInt8", zeroWidth: true}, // argument to the function in an int reg.
+ {name: "ArgFloatReg", aux: "NameOffsetInt8", zeroWidth: true}, // argument to the function in a float reg.
+
+ // The address of a variable. arg0 is the base pointer.
+ // If the variable is a global, the base pointer will be SB and
+ // the Aux field will be a *obj.LSym.
+ // If the variable is a local, the base pointer will be SP and
+ // the Aux field will be a *gc.Node.
+ {name: "Addr", argLength: 1, aux: "Sym", symEffect: "Addr"}, // Address of a variable. Arg0=SB. Aux identifies the variable.
+ {name: "LocalAddr", argLength: 2, aux: "Sym", symEffect: "Addr"}, // Address of a variable. Arg0=SP. Arg1=mem. Aux identifies the variable.
+
+ {name: "SP", zeroWidth: true}, // stack pointer
+ {name: "SB", typ: "Uintptr", zeroWidth: true}, // static base pointer (a.k.a. globals pointer)
+ {name: "Invalid"}, // unused value
+
+ // Memory operations
+ {name: "Load", argLength: 2}, // Load from arg0. arg1=memory
+ {name: "Dereference", argLength: 2}, // Load from arg0. arg1=memory. Helper op for arg/result passing, result is an otherwise not-SSA-able "value".
+ {name: "Store", argLength: 3, typ: "Mem", aux: "Typ"}, // Store arg1 to arg0. arg2=memory, aux=type. Returns memory.
+ // Normally we require that the source and destination of Move do not overlap.
+ // There is an exception when we know all the loads will happen before all
+ // the stores. In that case, overlap is ok. See
+ // memmove inlining in generic.rules. When inlineablememmovesize (in ../rewrite.go)
+ // returns true, we must do all loads before all stores, when lowering Move.
+ // The type of Move is used for the write barrier pass to insert write barriers
+ // and for alignment on some architectures.
+ // For pointerless types, it is possible for the type to be inaccurate.
+ // For type alignment and pointer information, use the type in Aux;
+ // for type size, use the size in AuxInt.
+ // The "inline runtime.memmove" rewrite rule generates Moves with inaccurate types,
+ // such as type byte instead of the more accurate type [8]byte.
+ {name: "Move", argLength: 3, typ: "Mem", aux: "TypSize"}, // arg0=destptr, arg1=srcptr, arg2=mem, auxint=size, aux=type. Returns memory.
+ {name: "Zero", argLength: 2, typ: "Mem", aux: "TypSize"}, // arg0=destptr, arg1=mem, auxint=size, aux=type. Returns memory.
+
+ // Memory operations with write barriers.
+ // Expand to runtime calls. Write barrier will be removed if write on stack.
+ {name: "StoreWB", argLength: 3, typ: "Mem", aux: "Typ"}, // Store arg1 to arg0. arg2=memory, aux=type. Returns memory.
+ {name: "MoveWB", argLength: 3, typ: "Mem", aux: "TypSize"}, // arg0=destptr, arg1=srcptr, arg2=mem, auxint=size, aux=type. Returns memory.
+ {name: "ZeroWB", argLength: 2, typ: "Mem", aux: "TypSize"}, // arg0=destptr, arg1=mem, auxint=size, aux=type. Returns memory.
+
+ // WB invokes runtime.gcWriteBarrier. This is not a normal
+ // call: it takes arguments in registers, doesn't clobber
+ // general-purpose registers (the exact clobber set is
+ // arch-dependent), and is not a safe-point.
+ {name: "WB", argLength: 3, typ: "Mem", aux: "Sym", symEffect: "None"}, // arg0=destptr, arg1=srcptr, arg2=mem, aux=runtime.gcWriteBarrier
+
+ {name: "HasCPUFeature", argLength: 0, typ: "bool", aux: "Sym", symEffect: "None"}, // aux=place that this feature flag can be loaded from
+
+ // PanicBounds and PanicExtend generate a runtime panic.
+ // Their arguments provide index values to use in panic messages.
+ // Both PanicBounds and PanicExtend have an AuxInt value from the BoundsKind type (in ../op.go).
+ // PanicBounds' index is int sized.
+ // PanicExtend's index is int64 sized. (PanicExtend is only used on 32-bit archs.)
+ {name: "PanicBounds", argLength: 3, aux: "Int64", typ: "Mem", call: true}, // arg0=idx, arg1=len, arg2=mem, returns memory.
+ {name: "PanicExtend", argLength: 4, aux: "Int64", typ: "Mem", call: true}, // arg0=idxHi, arg1=idxLo, arg2=len, arg3=mem, returns memory.
+
+ // Function calls. Arguments to the call have already been written to the stack.
+ // Return values appear on the stack. The method receiver, if any, is treated
+ // as a phantom first argument.
+ // TODO(josharian): ClosureCall and InterCall should have Int32 aux
+ // to match StaticCall's 32 bit arg size limit.
+ // TODO(drchase,josharian): could the arg size limit be bundled into the rules for CallOff?
+
+ // Before lowering, LECalls receive their fixed inputs (first), memory (last),
+ // and a variable number of input values in the middle.
+ // They produce a variable number of result values.
+ // These values are not necessarily "SSA-able"; they can be too large,
+ // but in that case inputs are loaded immediately before with OpDereference,
+ // and outputs are stored immediately with OpStore.
+ //
+ // After call expansion, Calls have the same fixed-middle-memory arrangement of inputs,
+ // with the difference that the "middle" is only the register-resident inputs,
+ // and the non-register inputs are instead stored at ABI-defined offsets from SP
+ // (and the stores thread through the memory that is ultimately an input to the call).
+ // Outputs follow a similar pattern; register-resident outputs are the leading elements
+ // of a Result-typed output, with memory last, and any memory-resident outputs have been
+ // stored to ABI-defined locations. Each non-memory input or output fits in a register.
+ //
+ // Subsequent architecture-specific lowering only changes the opcode.
+
+ {name: "ClosureCall", argLength: -1, aux: "CallOff", call: true}, // arg0=code pointer, arg1=context ptr, arg2..argN-1 are register inputs, argN=memory. auxint=arg size. Returns Result of register results, plus memory.
+ {name: "StaticCall", argLength: -1, aux: "CallOff", call: true}, // call function aux.(*obj.LSym), arg0..argN-1 are register inputs, argN=memory. auxint=arg size. Returns Result of register results, plus memory.
+ {name: "InterCall", argLength: -1, aux: "CallOff", call: true}, // interface call. arg0=code pointer, arg1..argN-1 are register inputs, argN=memory, auxint=arg size. Returns Result of register results, plus memory.
+ {name: "TailCall", argLength: -1, aux: "CallOff", call: true}, // tail call function aux.(*obj.LSym), arg0..argN-1 are register inputs, argN=memory. auxint=arg size. Returns Result of register results, plus memory.
+
+ {name: "ClosureLECall", argLength: -1, aux: "CallOff", call: true}, // late-expanded closure call. arg0=code pointer, arg1=context ptr, arg2..argN-1 are inputs, argN is mem. auxint = arg size. Result is tuple of result(s), plus mem.
+ {name: "StaticLECall", argLength: -1, aux: "CallOff", call: true}, // late-expanded static call function aux.(*ssa.AuxCall.Fn). arg0..argN-1 are inputs, argN is mem. auxint = arg size. Result is tuple of result(s), plus mem.
+ {name: "InterLECall", argLength: -1, aux: "CallOff", call: true}, // late-expanded interface call. arg0=code pointer, arg1..argN-1 are inputs, argN is mem. auxint = arg size. Result is tuple of result(s), plus mem.
+ {name: "TailLECall", argLength: -1, aux: "CallOff", call: true}, // late-expanded static tail call function aux.(*ssa.AuxCall.Fn). arg0..argN-1 are inputs, argN is mem. auxint = arg size. Result is tuple of result(s), plus mem.
+
+ // Conversions: signed extensions, zero (unsigned) extensions, truncations
+ {name: "SignExt8to16", argLength: 1, typ: "Int16"},
+ {name: "SignExt8to32", argLength: 1, typ: "Int32"},
+ {name: "SignExt8to64", argLength: 1, typ: "Int64"},
+ {name: "SignExt16to32", argLength: 1, typ: "Int32"},
+ {name: "SignExt16to64", argLength: 1, typ: "Int64"},
+ {name: "SignExt32to64", argLength: 1, typ: "Int64"},
+ {name: "ZeroExt8to16", argLength: 1, typ: "UInt16"},
+ {name: "ZeroExt8to32", argLength: 1, typ: "UInt32"},
+ {name: "ZeroExt8to64", argLength: 1, typ: "UInt64"},
+ {name: "ZeroExt16to32", argLength: 1, typ: "UInt32"},
+ {name: "ZeroExt16to64", argLength: 1, typ: "UInt64"},
+ {name: "ZeroExt32to64", argLength: 1, typ: "UInt64"},
+ {name: "Trunc16to8", argLength: 1},
+ {name: "Trunc32to8", argLength: 1},
+ {name: "Trunc32to16", argLength: 1},
+ {name: "Trunc64to8", argLength: 1},
+ {name: "Trunc64to16", argLength: 1},
+ {name: "Trunc64to32", argLength: 1},
+
+ {name: "Cvt32to32F", argLength: 1},
+ {name: "Cvt32to64F", argLength: 1},
+ {name: "Cvt64to32F", argLength: 1},
+ {name: "Cvt64to64F", argLength: 1},
+ {name: "Cvt32Fto32", argLength: 1},
+ {name: "Cvt32Fto64", argLength: 1},
+ {name: "Cvt64Fto32", argLength: 1},
+ {name: "Cvt64Fto64", argLength: 1},
+ {name: "Cvt32Fto64F", argLength: 1},
+ {name: "Cvt64Fto32F", argLength: 1},
+ {name: "CvtBoolToUint8", argLength: 1},
+
+ // Force rounding to precision of type.
+ {name: "Round32F", argLength: 1},
+ {name: "Round64F", argLength: 1},
+
+ // Automatically inserted safety checks
+ {name: "IsNonNil", argLength: 1, typ: "Bool"}, // arg0 != nil
+ {name: "IsInBounds", argLength: 2, typ: "Bool"}, // 0 <= arg0 < arg1. arg1 is guaranteed >= 0.
+ {name: "IsSliceInBounds", argLength: 2, typ: "Bool"}, // 0 <= arg0 <= arg1. arg1 is guaranteed >= 0.
+ {name: "NilCheck", argLength: 2, typ: "Void"}, // arg0=ptr, arg1=mem. Panics if arg0 is nil. Returns void.
+
+ // Pseudo-ops
+ {name: "GetG", argLength: 1, zeroWidth: true}, // runtime.getg() (read g pointer). arg0=mem
+ {name: "GetClosurePtr"}, // get closure pointer from dedicated register
+ {name: "GetCallerPC"}, // for getcallerpc intrinsic
+ {name: "GetCallerSP"}, // for getcallersp intrinsic
+
+ // Indexing operations
+ {name: "PtrIndex", argLength: 2}, // arg0=ptr, arg1=index. Computes ptr+sizeof(*v.type)*index, where index is extended to ptrwidth type
+ {name: "OffPtr", argLength: 1, aux: "Int64"}, // arg0 + auxint (arg0 and result are pointers)
+
+ // Slices
+ {name: "SliceMake", argLength: 3}, // arg0=ptr, arg1=len, arg2=cap
+ {name: "SlicePtr", argLength: 1, typ: "BytePtr"}, // ptr(arg0)
+ {name: "SliceLen", argLength: 1}, // len(arg0)
+ {name: "SliceCap", argLength: 1}, // cap(arg0)
+ // SlicePtrUnchecked, like SlicePtr, extracts the pointer from a slice.
+ // SlicePtr values are assumed non-nil, because they are guarded by bounds checks.
+ // SlicePtrUnchecked values can be nil.
+ {name: "SlicePtrUnchecked", argLength: 1},
+
+ // Complex (part/whole)
+ {name: "ComplexMake", argLength: 2}, // arg0=real, arg1=imag
+ {name: "ComplexReal", argLength: 1}, // real(arg0)
+ {name: "ComplexImag", argLength: 1}, // imag(arg0)
+
+ // Strings
+ {name: "StringMake", argLength: 2}, // arg0=ptr, arg1=len
+ {name: "StringPtr", argLength: 1, typ: "BytePtr"}, // ptr(arg0)
+ {name: "StringLen", argLength: 1, typ: "Int"}, // len(arg0)
+
+ // Interfaces
+ {name: "IMake", argLength: 2}, // arg0=itab, arg1=data
+ {name: "ITab", argLength: 1, typ: "Uintptr"}, // arg0=interface, returns itable field
+ {name: "IData", argLength: 1}, // arg0=interface, returns data field
+
+ // Structs
+ {name: "StructMake0"}, // Returns struct with 0 fields.
+ {name: "StructMake1", argLength: 1}, // arg0=field0. Returns struct.
+ {name: "StructMake2", argLength: 2}, // arg0,arg1=field0,field1. Returns struct.
+ {name: "StructMake3", argLength: 3}, // arg0..2=field0..2. Returns struct.
+ {name: "StructMake4", argLength: 4}, // arg0..3=field0..3. Returns struct.
+ {name: "StructSelect", argLength: 1, aux: "Int64"}, // arg0=struct, auxint=field index. Returns the auxint'th field.
+
+ // Arrays
+ {name: "ArrayMake0"}, // Returns array with 0 elements
+ {name: "ArrayMake1", argLength: 1}, // Returns array with 1 element
+ {name: "ArraySelect", argLength: 1, aux: "Int64"}, // arg0=array, auxint=index. Returns a[i].
+
+ // Spill&restore ops for the register allocator. These are
+ // semantically identical to OpCopy; they do not take/return
+ // stores like regular memory ops do. We can get away without memory
+ // args because we know there is no aliasing of spill slots on the stack.
+ {name: "StoreReg", argLength: 1},
+ {name: "LoadReg", argLength: 1},
+
+ // Used during ssa construction. Like Copy, but the arg has not been specified yet.
+ {name: "FwdRef", aux: "Sym", symEffect: "None"},
+
+ // Unknown value. Used for Values whose values don't matter because they are dead code.
+ {name: "Unknown"},
+
+ {name: "VarDef", argLength: 1, aux: "Sym", typ: "Mem", symEffect: "None", zeroWidth: true}, // aux is a *gc.Node of a variable that is about to be initialized. arg0=mem, returns mem
+ {name: "VarKill", argLength: 1, aux: "Sym", symEffect: "None"}, // aux is a *gc.Node of a variable that is known to be dead. arg0=mem, returns mem
+ // TODO: what's the difference between VarLive and KeepAlive?
+ {name: "VarLive", argLength: 1, aux: "Sym", symEffect: "Read", zeroWidth: true}, // aux is a *gc.Node of a variable that must be kept live. arg0=mem, returns mem
+ {name: "KeepAlive", argLength: 2, typ: "Mem", zeroWidth: true}, // arg[0] is a value that must be kept alive until this mark. arg[1]=mem, returns mem
+
+ // InlMark marks the start of an inlined function body. Its AuxInt field
+ // distinguishes which entry in the local inline tree it is marking.
+ {name: "InlMark", argLength: 1, aux: "Int32", typ: "Void"}, // arg[0]=mem, returns void.
+
+ // Ops for breaking 64-bit operations on 32-bit architectures
+ {name: "Int64Make", argLength: 2, typ: "UInt64"}, // arg0=hi, arg1=lo
+ {name: "Int64Hi", argLength: 1, typ: "UInt32"}, // high 32-bit of arg0
+ {name: "Int64Lo", argLength: 1, typ: "UInt32"}, // low 32-bit of arg0
+
+ {name: "Add32carry", argLength: 2, commutative: true, typ: "(UInt32,Flags)"}, // arg0 + arg1, returns (value, carry)
+ {name: "Add32withcarry", argLength: 3, commutative: true}, // arg0 + arg1 + arg2, arg2=carry (0 or 1)
+
+ {name: "Sub32carry", argLength: 2, typ: "(UInt32,Flags)"}, // arg0 - arg1, returns (value, carry)
+ {name: "Sub32withcarry", argLength: 3}, // arg0 - arg1 - arg2, arg2=carry (0 or 1)
+
+ {name: "Add64carry", argLength: 3, commutative: true, typ: "(UInt64,UInt64)"}, // arg0 + arg1 + arg2, arg2 must be 0 or 1. returns (value, value>>64)
+ {name: "Sub64borrow", argLength: 3, typ: "(UInt64,UInt64)"}, // arg0 - (arg1 + arg2), arg2 must be 0 or 1. returns (value, value>>64&1)
+
+ {name: "Signmask", argLength: 1, typ: "Int32"}, // 0 if arg0 >= 0, -1 if arg0 < 0
+ {name: "Zeromask", argLength: 1, typ: "UInt32"}, // 0 if arg0 == 0, 0xffffffff if arg0 != 0
+ {name: "Slicemask", argLength: 1}, // 0 if arg0 == 0, -1 if arg0 > 0, undef if arg0<0. Type is native int size.
+
+ {name: "SpectreIndex", argLength: 2}, // arg0 if 0 <= arg0 < arg1, 0 otherwise. Type is native int size.
+ {name: "SpectreSliceIndex", argLength: 2}, // arg0 if 0 <= arg0 <= arg1, 0 otherwise. Type is native int size.
+
+ {name: "Cvt32Uto32F", argLength: 1}, // uint32 -> float32, only used on 32-bit arch
+ {name: "Cvt32Uto64F", argLength: 1}, // uint32 -> float64, only used on 32-bit arch
+ {name: "Cvt32Fto32U", argLength: 1}, // float32 -> uint32, only used on 32-bit arch
+ {name: "Cvt64Fto32U", argLength: 1}, // float64 -> uint32, only used on 32-bit arch
+ {name: "Cvt64Uto32F", argLength: 1}, // uint64 -> float32, only used on archs that has the instruction
+ {name: "Cvt64Uto64F", argLength: 1}, // uint64 -> float64, only used on archs that has the instruction
+ {name: "Cvt32Fto64U", argLength: 1}, // float32 -> uint64, only used on archs that has the instruction
+ {name: "Cvt64Fto64U", argLength: 1}, // float64 -> uint64, only used on archs that has the instruction
+
+ // pseudo-ops for breaking Tuple
+ {name: "Select0", argLength: 1, zeroWidth: true}, // the first component of a tuple
+ {name: "Select1", argLength: 1, zeroWidth: true}, // the second component of a tuple
+ {name: "SelectN", argLength: 1, aux: "Int64"}, // arg0=result, auxint=field index. Returns the auxint'th member.
+ {name: "SelectNAddr", argLength: 1, aux: "Int64"}, // arg0=result, auxint=field index. Returns the address of auxint'th member. Used for un-SSA-able result types.
+ {name: "MakeResult", argLength: -1}, // arg0 .. are components of a "Result" (like the result from a Call). The last arg should be memory (like the result from a call).
+
+ // Atomic operations used for semantically inlining sync/atomic and
+ // runtime/internal/atomic. Atomic loads return a new memory so that
+ // the loads are properly ordered with respect to other loads and
+ // stores.
+ {name: "AtomicLoad8", argLength: 2, typ: "(UInt8,Mem)"}, // Load from arg0. arg1=memory. Returns loaded value and new memory.
+ {name: "AtomicLoad32", argLength: 2, typ: "(UInt32,Mem)"}, // Load from arg0. arg1=memory. Returns loaded value and new memory.
+ {name: "AtomicLoad64", argLength: 2, typ: "(UInt64,Mem)"}, // Load from arg0. arg1=memory. Returns loaded value and new memory.
+ {name: "AtomicLoadPtr", argLength: 2, typ: "(BytePtr,Mem)"}, // Load from arg0. arg1=memory. Returns loaded value and new memory.
+ {name: "AtomicLoadAcq32", argLength: 2, typ: "(UInt32,Mem)"}, // Load from arg0. arg1=memory. Lock acquisition, returns loaded value and new memory.
+ {name: "AtomicLoadAcq64", argLength: 2, typ: "(UInt64,Mem)"}, // Load from arg0. arg1=memory. Lock acquisition, returns loaded value and new memory.
+ {name: "AtomicStore8", argLength: 3, typ: "Mem", hasSideEffects: true}, // Store arg1 to *arg0. arg2=memory. Returns memory.
+ {name: "AtomicStore32", argLength: 3, typ: "Mem", hasSideEffects: true}, // Store arg1 to *arg0. arg2=memory. Returns memory.
+ {name: "AtomicStore64", argLength: 3, typ: "Mem", hasSideEffects: true}, // Store arg1 to *arg0. arg2=memory. Returns memory.
+ {name: "AtomicStorePtrNoWB", argLength: 3, typ: "Mem", hasSideEffects: true}, // Store arg1 to *arg0. arg2=memory. Returns memory.
+ {name: "AtomicStoreRel32", argLength: 3, typ: "Mem", hasSideEffects: true}, // Store arg1 to *arg0. arg2=memory. Lock release, returns memory.
+ {name: "AtomicStoreRel64", argLength: 3, typ: "Mem", hasSideEffects: true}, // Store arg1 to *arg0. arg2=memory. Lock release, returns memory.
+ {name: "AtomicExchange32", argLength: 3, typ: "(UInt32,Mem)", hasSideEffects: true}, // Store arg1 to *arg0. arg2=memory. Returns old contents of *arg0 and new memory.
+ {name: "AtomicExchange64", argLength: 3, typ: "(UInt64,Mem)", hasSideEffects: true}, // Store arg1 to *arg0. arg2=memory. Returns old contents of *arg0 and new memory.
+ {name: "AtomicAdd32", argLength: 3, typ: "(UInt32,Mem)", hasSideEffects: true}, // Do *arg0 += arg1. arg2=memory. Returns sum and new memory.
+ {name: "AtomicAdd64", argLength: 3, typ: "(UInt64,Mem)", hasSideEffects: true}, // Do *arg0 += arg1. arg2=memory. Returns sum and new memory.
+ {name: "AtomicCompareAndSwap32", argLength: 4, typ: "(Bool,Mem)", hasSideEffects: true}, // if *arg0==arg1, then set *arg0=arg2. Returns true if store happens and new memory.
+ {name: "AtomicCompareAndSwap64", argLength: 4, typ: "(Bool,Mem)", hasSideEffects: true}, // if *arg0==arg1, then set *arg0=arg2. Returns true if store happens and new memory.
+ {name: "AtomicCompareAndSwapRel32", argLength: 4, typ: "(Bool,Mem)", hasSideEffects: true}, // if *arg0==arg1, then set *arg0=arg2. Lock release, reports whether store happens and new memory.
+ {name: "AtomicAnd8", argLength: 3, typ: "Mem", hasSideEffects: true}, // *arg0 &= arg1. arg2=memory. Returns memory.
+ {name: "AtomicAnd32", argLength: 3, typ: "Mem", hasSideEffects: true}, // *arg0 &= arg1. arg2=memory. Returns memory.
+ {name: "AtomicOr8", argLength: 3, typ: "Mem", hasSideEffects: true}, // *arg0 |= arg1. arg2=memory. Returns memory.
+ {name: "AtomicOr32", argLength: 3, typ: "Mem", hasSideEffects: true}, // *arg0 |= arg1. arg2=memory. Returns memory.
+
+ // Atomic operation variants
+ // These variants have the same semantics as above atomic operations.
+ // But they are used for generating more efficient code on certain modern machines, with run-time CPU feature detection.
+ // Currently, they are used on ARM64 only.
+ {name: "AtomicAdd32Variant", argLength: 3, typ: "(UInt32,Mem)", hasSideEffects: true}, // Do *arg0 += arg1. arg2=memory. Returns sum and new memory.
+ {name: "AtomicAdd64Variant", argLength: 3, typ: "(UInt64,Mem)", hasSideEffects: true}, // Do *arg0 += arg1. arg2=memory. Returns sum and new memory.
+ {name: "AtomicExchange32Variant", argLength: 3, typ: "(UInt32,Mem)", hasSideEffects: true}, // Store arg1 to *arg0. arg2=memory. Returns old contents of *arg0 and new memory.
+ {name: "AtomicExchange64Variant", argLength: 3, typ: "(UInt64,Mem)", hasSideEffects: true}, // Store arg1 to *arg0. arg2=memory. Returns old contents of *arg0 and new memory.
+ {name: "AtomicCompareAndSwap32Variant", argLength: 4, typ: "(Bool,Mem)", hasSideEffects: true}, // if *arg0==arg1, then set *arg0=arg2. Returns true if store happens and new memory.
+ {name: "AtomicCompareAndSwap64Variant", argLength: 4, typ: "(Bool,Mem)", hasSideEffects: true}, // if *arg0==arg1, then set *arg0=arg2. Returns true if store happens and new memory.
+ {name: "AtomicAnd8Variant", argLength: 3, typ: "Mem", hasSideEffects: true}, // *arg0 &= arg1. arg2=memory. Returns memory.
+ {name: "AtomicAnd32Variant", argLength: 3, typ: "Mem", hasSideEffects: true}, // *arg0 &= arg1. arg2=memory. Returns memory.
+ {name: "AtomicOr8Variant", argLength: 3, typ: "Mem", hasSideEffects: true}, // *arg0 |= arg1. arg2=memory. Returns memory.
+ {name: "AtomicOr32Variant", argLength: 3, typ: "Mem", hasSideEffects: true}, // *arg0 |= arg1. arg2=memory. Returns memory.
+
+ // Publication barrier
+ {name: "PubBarrier", argLength: 1, hasSideEffects: true}, // Do data barrier. arg0=memory.
+
+ // Clobber experiment op
+ {name: "Clobber", argLength: 0, typ: "Void", aux: "SymOff", symEffect: "None"}, // write an invalid pointer value to the given pointer slot of a stack variable
+ {name: "ClobberReg", argLength: 0, typ: "Void"}, // clobber a register
+
+ // Prefetch instruction
+ {name: "PrefetchCache", argLength: 2, hasSideEffects: true}, // Do prefetch arg0 to cache. arg0=addr, arg1=memory.
+ {name: "PrefetchCacheStreamed", argLength: 2, hasSideEffects: true}, // Do non-temporal or streamed prefetch arg0 to cache. arg0=addr, arg1=memory.
+}
+
+// kind controls successors implicit exit
+// ----------------------------------------------------------
+// Exit [return mem] [] yes
+// Ret [return mem] [] yes
+// RetJmp [return mem] [] yes
+// Plain [] [next]
+// If [boolean Value] [then, else]
+// First [] [always, never]
+
+var genericBlocks = []blockData{
+ {name: "Plain"}, // a single successor
+ {name: "If", controls: 1}, // if Controls[0] goto Succs[0] else goto Succs[1]
+ {name: "Defer", controls: 1}, // Succs[0]=defer queued, Succs[1]=defer recovered. Controls[0] is call op (of memory type)
+ {name: "Ret", controls: 1}, // no successors, Controls[0] value is memory result
+ {name: "RetJmp", controls: 1}, // no successors, Controls[0] value is a tail call
+ {name: "Exit", controls: 1}, // no successors, Controls[0] value generates a panic
+
+ // transient block state used for dead code removal
+ {name: "First"}, // 2 successors, always takes the first one (second is dead)
+}
+
+func init() {
+ archs = append(archs, arch{
+ name: "generic",
+ ops: genericOps,
+ blocks: genericBlocks,
+ generic: true,
+ })
+}
diff --git a/src/cmd/compile/internal/ssa/gen/main.go b/src/cmd/compile/internal/ssa/gen/main.go
new file mode 100644
index 0000000..2cf0a91
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/gen/main.go
@@ -0,0 +1,573 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build ignore
+// +build ignore
+
+// The gen command generates Go code (in the parent directory) for all
+// the architecture-specific opcodes, blocks, and rewrites.
+package main
+
+import (
+ "bytes"
+ "flag"
+ "fmt"
+ "go/format"
+ "io/ioutil"
+ "log"
+ "os"
+ "path"
+ "regexp"
+ "runtime"
+ "runtime/pprof"
+ "runtime/trace"
+ "sort"
+ "strings"
+ "sync"
+)
+
+// TODO: capitalize these types, so that we can more easily tell variable names
+// apart from type names, and avoid awkward func parameters like "arch arch".
+
+type arch struct {
+ name string
+ pkg string // obj package to import for this arch.
+ genfile string // source file containing opcode code generation.
+ ops []opData
+ blocks []blockData
+ regnames []string
+ ParamIntRegNames string
+ ParamFloatRegNames string
+ gpregmask regMask
+ fpregmask regMask
+ fp32regmask regMask
+ fp64regmask regMask
+ specialregmask regMask
+ framepointerreg int8
+ linkreg int8
+ generic bool
+ imports []string
+}
+
+type opData struct {
+ name string
+ reg regInfo
+ asm string
+ typ string // default result type
+ aux string
+ rematerializeable bool
+ argLength int32 // number of arguments, if -1, then this operation has a variable number of arguments
+ commutative bool // this operation is commutative on its first 2 arguments (e.g. addition)
+ resultInArg0 bool // (first, if a tuple) output of v and v.Args[0] must be allocated to the same register
+ resultNotInArgs bool // outputs must not be allocated to the same registers as inputs
+ clobberFlags bool // this op clobbers flags register
+ call bool // is a function call
+ tailCall bool // is a tail call
+ nilCheck bool // this op is a nil check on arg0
+ faultOnNilArg0 bool // this op will fault if arg0 is nil (and aux encodes a small offset)
+ faultOnNilArg1 bool // this op will fault if arg1 is nil (and aux encodes a small offset)
+ hasSideEffects bool // for "reasons", not to be eliminated. E.g., atomic store, #19182.
+ zeroWidth bool // op never translates into any machine code. example: copy, which may sometimes translate to machine code, is not zero-width.
+ unsafePoint bool // this op is an unsafe point, i.e. not safe for async preemption
+ symEffect string // effect this op has on symbol in aux
+ scale uint8 // amd64/386 indexed load scale
+}
+
+type blockData struct {
+ name string // the suffix for this block ("EQ", "LT", etc.)
+ controls int // the number of control values this type of block requires
+ aux string // the type of the Aux/AuxInt value, if any
+}
+
+type regInfo struct {
+ // inputs[i] encodes the set of registers allowed for the i'th input.
+ // Inputs that don't use registers (flags, memory, etc.) should be 0.
+ inputs []regMask
+ // clobbers encodes the set of registers that are overwritten by
+ // the instruction (other than the output registers).
+ clobbers regMask
+ // outputs[i] encodes the set of registers allowed for the i'th output.
+ outputs []regMask
+}
+
+type regMask uint64
+
+func (a arch) regMaskComment(r regMask) string {
+ var buf bytes.Buffer
+ for i := uint64(0); r != 0; i++ {
+ if r&1 != 0 {
+ if buf.Len() == 0 {
+ buf.WriteString(" //")
+ }
+ buf.WriteString(" ")
+ buf.WriteString(a.regnames[i])
+ }
+ r >>= 1
+ }
+ return buf.String()
+}
+
+var archs []arch
+
+var cpuprofile = flag.String("cpuprofile", "", "write cpu profile to `file`")
+var memprofile = flag.String("memprofile", "", "write memory profile to `file`")
+var tracefile = flag.String("trace", "", "write trace to `file`")
+
+func main() {
+ flag.Parse()
+ if *cpuprofile != "" {
+ f, err := os.Create(*cpuprofile)
+ if err != nil {
+ log.Fatal("could not create CPU profile: ", err)
+ }
+ defer f.Close()
+ if err := pprof.StartCPUProfile(f); err != nil {
+ log.Fatal("could not start CPU profile: ", err)
+ }
+ defer pprof.StopCPUProfile()
+ }
+ if *tracefile != "" {
+ f, err := os.Create(*tracefile)
+ if err != nil {
+ log.Fatalf("failed to create trace output file: %v", err)
+ }
+ defer func() {
+ if err := f.Close(); err != nil {
+ log.Fatalf("failed to close trace file: %v", err)
+ }
+ }()
+
+ if err := trace.Start(f); err != nil {
+ log.Fatalf("failed to start trace: %v", err)
+ }
+ defer trace.Stop()
+ }
+
+ sort.Sort(ArchsByName(archs))
+
+ // The generate tasks are run concurrently, since they are CPU-intensive
+ // that can easily make use of many cores on a machine.
+ //
+ // Note that there is no limit on the concurrency at the moment. On a
+ // four-core laptop at the time of writing, peak RSS usually reaches
+ // ~200MiB, which seems doable by practically any machine nowadays. If
+ // that stops being the case, we can cap this func to a fixed number of
+ // architectures being generated at once.
+
+ tasks := []func(){
+ genOp,
+ }
+ for _, a := range archs {
+ a := a // the funcs are ran concurrently at a later time
+ tasks = append(tasks, func() {
+ genRules(a)
+ genSplitLoadRules(a)
+ })
+ }
+ var wg sync.WaitGroup
+ for _, task := range tasks {
+ task := task
+ wg.Add(1)
+ go func() {
+ task()
+ wg.Done()
+ }()
+ }
+ wg.Wait()
+
+ if *memprofile != "" {
+ f, err := os.Create(*memprofile)
+ if err != nil {
+ log.Fatal("could not create memory profile: ", err)
+ }
+ defer f.Close()
+ runtime.GC() // get up-to-date statistics
+ if err := pprof.WriteHeapProfile(f); err != nil {
+ log.Fatal("could not write memory profile: ", err)
+ }
+ }
+}
+
+func genOp() {
+ w := new(bytes.Buffer)
+ fmt.Fprintf(w, "// Code generated from gen/*Ops.go; DO NOT EDIT.\n")
+ fmt.Fprintln(w)
+ fmt.Fprintln(w, "package ssa")
+
+ fmt.Fprintln(w, "import (")
+ fmt.Fprintln(w, "\"cmd/internal/obj\"")
+ for _, a := range archs {
+ if a.pkg != "" {
+ fmt.Fprintf(w, "%q\n", a.pkg)
+ }
+ }
+ fmt.Fprintln(w, ")")
+
+ // generate Block* declarations
+ fmt.Fprintln(w, "const (")
+ fmt.Fprintln(w, "BlockInvalid BlockKind = iota")
+ for _, a := range archs {
+ fmt.Fprintln(w)
+ for _, d := range a.blocks {
+ fmt.Fprintf(w, "Block%s%s\n", a.Name(), d.name)
+ }
+ }
+ fmt.Fprintln(w, ")")
+
+ // generate block kind string method
+ fmt.Fprintln(w, "var blockString = [...]string{")
+ fmt.Fprintln(w, "BlockInvalid:\"BlockInvalid\",")
+ for _, a := range archs {
+ fmt.Fprintln(w)
+ for _, b := range a.blocks {
+ fmt.Fprintf(w, "Block%s%s:\"%s\",\n", a.Name(), b.name, b.name)
+ }
+ }
+ fmt.Fprintln(w, "}")
+ fmt.Fprintln(w, "func (k BlockKind) String() string {return blockString[k]}")
+
+ // generate block kind auxint method
+ fmt.Fprintln(w, "func (k BlockKind) AuxIntType() string {")
+ fmt.Fprintln(w, "switch k {")
+ for _, a := range archs {
+ for _, b := range a.blocks {
+ if b.auxIntType() == "invalid" {
+ continue
+ }
+ fmt.Fprintf(w, "case Block%s%s: return \"%s\"\n", a.Name(), b.name, b.auxIntType())
+ }
+ }
+ fmt.Fprintln(w, "}")
+ fmt.Fprintln(w, "return \"\"")
+ fmt.Fprintln(w, "}")
+
+ // generate Op* declarations
+ fmt.Fprintln(w, "const (")
+ fmt.Fprintln(w, "OpInvalid Op = iota") // make sure OpInvalid is 0.
+ for _, a := range archs {
+ fmt.Fprintln(w)
+ for _, v := range a.ops {
+ if v.name == "Invalid" {
+ continue
+ }
+ fmt.Fprintf(w, "Op%s%s\n", a.Name(), v.name)
+ }
+ }
+ fmt.Fprintln(w, ")")
+
+ // generate OpInfo table
+ fmt.Fprintln(w, "var opcodeTable = [...]opInfo{")
+ fmt.Fprintln(w, " { name: \"OpInvalid\" },")
+ for _, a := range archs {
+ fmt.Fprintln(w)
+
+ pkg := path.Base(a.pkg)
+ for _, v := range a.ops {
+ if v.name == "Invalid" {
+ continue
+ }
+ fmt.Fprintln(w, "{")
+ fmt.Fprintf(w, "name:\"%s\",\n", v.name)
+
+ // flags
+ if v.aux != "" {
+ fmt.Fprintf(w, "auxType: aux%s,\n", v.aux)
+ }
+ fmt.Fprintf(w, "argLen: %d,\n", v.argLength)
+
+ if v.rematerializeable {
+ if v.reg.clobbers != 0 {
+ log.Fatalf("%s is rematerializeable and clobbers registers", v.name)
+ }
+ if v.clobberFlags {
+ log.Fatalf("%s is rematerializeable and clobbers flags", v.name)
+ }
+ fmt.Fprintln(w, "rematerializeable: true,")
+ }
+ if v.commutative {
+ fmt.Fprintln(w, "commutative: true,")
+ }
+ if v.resultInArg0 {
+ fmt.Fprintln(w, "resultInArg0: true,")
+ // OpConvert's register mask is selected dynamically,
+ // so don't try to check it in the static table.
+ if v.name != "Convert" && v.reg.inputs[0] != v.reg.outputs[0] {
+ log.Fatalf("%s: input[0] and output[0] must use the same registers for %s", a.name, v.name)
+ }
+ if v.name != "Convert" && v.commutative && v.reg.inputs[1] != v.reg.outputs[0] {
+ log.Fatalf("%s: input[1] and output[0] must use the same registers for %s", a.name, v.name)
+ }
+ }
+ if v.resultNotInArgs {
+ fmt.Fprintln(w, "resultNotInArgs: true,")
+ }
+ if v.clobberFlags {
+ fmt.Fprintln(w, "clobberFlags: true,")
+ }
+ if v.call {
+ fmt.Fprintln(w, "call: true,")
+ }
+ if v.tailCall {
+ fmt.Fprintln(w, "tailCall: true,")
+ }
+ if v.nilCheck {
+ fmt.Fprintln(w, "nilCheck: true,")
+ }
+ if v.faultOnNilArg0 {
+ fmt.Fprintln(w, "faultOnNilArg0: true,")
+ if v.aux != "Sym" && v.aux != "SymOff" && v.aux != "SymValAndOff" && v.aux != "Int64" && v.aux != "Int32" && v.aux != "" {
+ log.Fatalf("faultOnNilArg0 with aux %s not allowed", v.aux)
+ }
+ }
+ if v.faultOnNilArg1 {
+ fmt.Fprintln(w, "faultOnNilArg1: true,")
+ if v.aux != "Sym" && v.aux != "SymOff" && v.aux != "SymValAndOff" && v.aux != "Int64" && v.aux != "Int32" && v.aux != "" {
+ log.Fatalf("faultOnNilArg1 with aux %s not allowed", v.aux)
+ }
+ }
+ if v.hasSideEffects {
+ fmt.Fprintln(w, "hasSideEffects: true,")
+ }
+ if v.zeroWidth {
+ fmt.Fprintln(w, "zeroWidth: true,")
+ }
+ if v.unsafePoint {
+ fmt.Fprintln(w, "unsafePoint: true,")
+ }
+ needEffect := strings.HasPrefix(v.aux, "Sym")
+ if v.symEffect != "" {
+ if !needEffect {
+ log.Fatalf("symEffect with aux %s not allowed", v.aux)
+ }
+ fmt.Fprintf(w, "symEffect: Sym%s,\n", strings.Replace(v.symEffect, ",", "|Sym", -1))
+ } else if needEffect {
+ log.Fatalf("symEffect needed for aux %s", v.aux)
+ }
+ if a.name == "generic" {
+ fmt.Fprintln(w, "generic:true,")
+ fmt.Fprintln(w, "},") // close op
+ // generic ops have no reg info or asm
+ continue
+ }
+ if v.asm != "" {
+ fmt.Fprintf(w, "asm: %s.A%s,\n", pkg, v.asm)
+ }
+ if v.scale != 0 {
+ fmt.Fprintf(w, "scale: %d,\n", v.scale)
+ }
+ fmt.Fprintln(w, "reg:regInfo{")
+
+ // Compute input allocation order. We allocate from the
+ // most to the least constrained input. This order guarantees
+ // that we will always be able to find a register.
+ var s []intPair
+ for i, r := range v.reg.inputs {
+ if r != 0 {
+ s = append(s, intPair{countRegs(r), i})
+ }
+ }
+ if len(s) > 0 {
+ sort.Sort(byKey(s))
+ fmt.Fprintln(w, "inputs: []inputInfo{")
+ for _, p := range s {
+ r := v.reg.inputs[p.val]
+ fmt.Fprintf(w, "{%d,%d},%s\n", p.val, r, a.regMaskComment(r))
+ }
+ fmt.Fprintln(w, "},")
+ }
+
+ if v.reg.clobbers > 0 {
+ fmt.Fprintf(w, "clobbers: %d,%s\n", v.reg.clobbers, a.regMaskComment(v.reg.clobbers))
+ }
+
+ // reg outputs
+ s = s[:0]
+ for i, r := range v.reg.outputs {
+ s = append(s, intPair{countRegs(r), i})
+ }
+ if len(s) > 0 {
+ sort.Sort(byKey(s))
+ fmt.Fprintln(w, "outputs: []outputInfo{")
+ for _, p := range s {
+ r := v.reg.outputs[p.val]
+ fmt.Fprintf(w, "{%d,%d},%s\n", p.val, r, a.regMaskComment(r))
+ }
+ fmt.Fprintln(w, "},")
+ }
+ fmt.Fprintln(w, "},") // close reg info
+ fmt.Fprintln(w, "},") // close op
+ }
+ }
+ fmt.Fprintln(w, "}")
+
+ fmt.Fprintln(w, "func (o Op) Asm() obj.As {return opcodeTable[o].asm}")
+ fmt.Fprintln(w, "func (o Op) Scale() int16 {return int16(opcodeTable[o].scale)}")
+
+ // generate op string method
+ fmt.Fprintln(w, "func (o Op) String() string {return opcodeTable[o].name }")
+
+ fmt.Fprintln(w, "func (o Op) SymEffect() SymEffect { return opcodeTable[o].symEffect }")
+ fmt.Fprintln(w, "func (o Op) IsCall() bool { return opcodeTable[o].call }")
+ fmt.Fprintln(w, "func (o Op) IsTailCall() bool { return opcodeTable[o].tailCall }")
+ fmt.Fprintln(w, "func (o Op) HasSideEffects() bool { return opcodeTable[o].hasSideEffects }")
+ fmt.Fprintln(w, "func (o Op) UnsafePoint() bool { return opcodeTable[o].unsafePoint }")
+ fmt.Fprintln(w, "func (o Op) ResultInArg0() bool { return opcodeTable[o].resultInArg0 }")
+
+ // generate registers
+ for _, a := range archs {
+ if a.generic {
+ continue
+ }
+ fmt.Fprintf(w, "var registers%s = [...]Register {\n", a.name)
+ var gcRegN int
+ num := map[string]int8{}
+ for i, r := range a.regnames {
+ num[r] = int8(i)
+ pkg := a.pkg[len("cmd/internal/obj/"):]
+ var objname string // name in cmd/internal/obj/$ARCH
+ switch r {
+ case "SB":
+ // SB isn't a real register. cmd/internal/obj expects 0 in this case.
+ objname = "0"
+ case "SP":
+ objname = pkg + ".REGSP"
+ case "g":
+ objname = pkg + ".REGG"
+ default:
+ objname = pkg + ".REG_" + r
+ }
+ // Assign a GC register map index to registers
+ // that may contain pointers.
+ gcRegIdx := -1
+ if a.gpregmask&(1<<uint(i)) != 0 {
+ gcRegIdx = gcRegN
+ gcRegN++
+ }
+ fmt.Fprintf(w, " {%d, %s, %d, \"%s\"},\n", i, objname, gcRegIdx, r)
+ }
+ parameterRegisterList := func(paramNamesString string) []int8 {
+ paramNamesString = strings.TrimSpace(paramNamesString)
+ if paramNamesString == "" {
+ return nil
+ }
+ paramNames := strings.Split(paramNamesString, " ")
+ var paramRegs []int8
+ for _, regName := range paramNames {
+ if regName == "" {
+ // forgive extra spaces
+ continue
+ }
+ if regNum, ok := num[regName]; ok {
+ paramRegs = append(paramRegs, regNum)
+ delete(num, regName)
+ } else {
+ log.Fatalf("parameter register %s for architecture %s not a register name (or repeated in parameter list)", regName, a.name)
+ }
+ }
+ return paramRegs
+ }
+
+ paramIntRegs := parameterRegisterList(a.ParamIntRegNames)
+ paramFloatRegs := parameterRegisterList(a.ParamFloatRegNames)
+
+ if gcRegN > 32 {
+ // Won't fit in a uint32 mask.
+ log.Fatalf("too many GC registers (%d > 32) on %s", gcRegN, a.name)
+ }
+ fmt.Fprintln(w, "}")
+ fmt.Fprintf(w, "var paramIntReg%s = %#v\n", a.name, paramIntRegs)
+ fmt.Fprintf(w, "var paramFloatReg%s = %#v\n", a.name, paramFloatRegs)
+ fmt.Fprintf(w, "var gpRegMask%s = regMask(%d)\n", a.name, a.gpregmask)
+ fmt.Fprintf(w, "var fpRegMask%s = regMask(%d)\n", a.name, a.fpregmask)
+ if a.fp32regmask != 0 {
+ fmt.Fprintf(w, "var fp32RegMask%s = regMask(%d)\n", a.name, a.fp32regmask)
+ }
+ if a.fp64regmask != 0 {
+ fmt.Fprintf(w, "var fp64RegMask%s = regMask(%d)\n", a.name, a.fp64regmask)
+ }
+ fmt.Fprintf(w, "var specialRegMask%s = regMask(%d)\n", a.name, a.specialregmask)
+ fmt.Fprintf(w, "var framepointerReg%s = int8(%d)\n", a.name, a.framepointerreg)
+ fmt.Fprintf(w, "var linkReg%s = int8(%d)\n", a.name, a.linkreg)
+ }
+
+ // gofmt result
+ b := w.Bytes()
+ var err error
+ b, err = format.Source(b)
+ if err != nil {
+ fmt.Printf("%s\n", w.Bytes())
+ panic(err)
+ }
+
+ if err := ioutil.WriteFile("../opGen.go", b, 0666); err != nil {
+ log.Fatalf("can't write output: %v\n", err)
+ }
+
+ // Check that the arch genfile handles all the arch-specific opcodes.
+ // This is very much a hack, but it is better than nothing.
+ //
+ // Do a single regexp pass to record all ops being handled in a map, and
+ // then compare that with the ops list. This is much faster than one
+ // regexp pass per opcode.
+ for _, a := range archs {
+ if a.genfile == "" {
+ continue
+ }
+
+ pattern := fmt.Sprintf(`\Wssa\.Op%s([a-zA-Z0-9_]+)\W`, a.name)
+ rxOp, err := regexp.Compile(pattern)
+ if err != nil {
+ log.Fatalf("bad opcode regexp %s: %v", pattern, err)
+ }
+
+ src, err := ioutil.ReadFile(a.genfile)
+ if err != nil {
+ log.Fatalf("can't read %s: %v", a.genfile, err)
+ }
+ seen := make(map[string]bool, len(a.ops))
+ for _, m := range rxOp.FindAllSubmatch(src, -1) {
+ seen[string(m[1])] = true
+ }
+ for _, op := range a.ops {
+ if !seen[op.name] {
+ log.Fatalf("Op%s%s has no code generation in %s", a.name, op.name, a.genfile)
+ }
+ }
+ }
+}
+
+// Name returns the name of the architecture for use in Op* and Block* enumerations.
+func (a arch) Name() string {
+ s := a.name
+ if s == "generic" {
+ s = ""
+ }
+ return s
+}
+
+// countRegs returns the number of set bits in the register mask.
+func countRegs(r regMask) int {
+ n := 0
+ for r != 0 {
+ n += int(r & 1)
+ r >>= 1
+ }
+ return n
+}
+
+// for sorting a pair of integers by key
+type intPair struct {
+ key, val int
+}
+type byKey []intPair
+
+func (a byKey) Len() int { return len(a) }
+func (a byKey) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+func (a byKey) Less(i, j int) bool { return a[i].key < a[j].key }
+
+type ArchsByName []arch
+
+func (x ArchsByName) Len() int { return len(x) }
+func (x ArchsByName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+func (x ArchsByName) Less(i, j int) bool { return x[i].name < x[j].name }
diff --git a/src/cmd/compile/internal/ssa/gen/rulegen.go b/src/cmd/compile/internal/ssa/gen/rulegen.go
new file mode 100644
index 0000000..fe8db4e
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/gen/rulegen.go
@@ -0,0 +1,1886 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build gen
+// +build gen
+
+// This program generates Go code that applies rewrite rules to a Value.
+// The generated code implements a function of type func (v *Value) bool
+// which reports whether if did something.
+// Ideas stolen from Swift: http://www.hpl.hp.com/techreports/Compaq-DEC/WRL-2000-2.html
+
+package main
+
+import (
+ "bufio"
+ "bytes"
+ "flag"
+ "fmt"
+ "go/ast"
+ "go/format"
+ "go/parser"
+ "go/printer"
+ "go/token"
+ "io"
+ "io/ioutil"
+ "log"
+ "os"
+ "path"
+ "regexp"
+ "sort"
+ "strconv"
+ "strings"
+
+ "golang.org/x/tools/go/ast/astutil"
+)
+
+// rule syntax:
+// sexpr [&& extra conditions] => [@block] sexpr
+//
+// sexpr are s-expressions (lisp-like parenthesized groupings)
+// sexpr ::= [variable:](opcode sexpr*)
+// | variable
+// | <type>
+// | [auxint]
+// | {aux}
+//
+// aux ::= variable | {code}
+// type ::= variable | {code}
+// variable ::= some token
+// opcode ::= one of the opcodes from the *Ops.go files
+
+// special rules: trailing ellipsis "..." (in the outermost sexpr?) must match on both sides of a rule.
+// trailing three underscore "___" in the outermost match sexpr indicate the presence of
+// extra ignored args that need not appear in the replacement
+
+// extra conditions is just a chunk of Go that evaluates to a boolean. It may use
+// variables declared in the matching tsexpr. The variable "v" is predefined to be
+// the value matched by the entire rule.
+
+// If multiple rules match, the first one in file order is selected.
+
+var (
+ genLog = flag.Bool("log", false, "generate code that logs; for debugging only")
+ addLine = flag.Bool("line", false, "add line number comment to generated rules; for debugging only")
+)
+
+type Rule struct {
+ Rule string
+ Loc string // file name & line number
+}
+
+func (r Rule) String() string {
+ return fmt.Sprintf("rule %q at %s", r.Rule, r.Loc)
+}
+
+func normalizeSpaces(s string) string {
+ return strings.Join(strings.Fields(strings.TrimSpace(s)), " ")
+}
+
+// parse returns the matching part of the rule, additional conditions, and the result.
+func (r Rule) parse() (match, cond, result string) {
+ s := strings.Split(r.Rule, "=>")
+ match = normalizeSpaces(s[0])
+ result = normalizeSpaces(s[1])
+ cond = ""
+ if i := strings.Index(match, "&&"); i >= 0 {
+ cond = normalizeSpaces(match[i+2:])
+ match = normalizeSpaces(match[:i])
+ }
+ return match, cond, result
+}
+
+func genRules(arch arch) { genRulesSuffix(arch, "") }
+func genSplitLoadRules(arch arch) { genRulesSuffix(arch, "splitload") }
+
+func genRulesSuffix(arch arch, suff string) {
+ // Open input file.
+ text, err := os.Open(arch.name + suff + ".rules")
+ if err != nil {
+ if suff == "" {
+ // All architectures must have a plain rules file.
+ log.Fatalf("can't read rule file: %v", err)
+ }
+ // Some architectures have bonus rules files that others don't share. That's fine.
+ return
+ }
+
+ // oprules contains a list of rules for each block and opcode
+ blockrules := map[string][]Rule{}
+ oprules := map[string][]Rule{}
+
+ // read rule file
+ scanner := bufio.NewScanner(text)
+ rule := ""
+ var lineno int
+ var ruleLineno int // line number of "=>"
+ for scanner.Scan() {
+ lineno++
+ line := scanner.Text()
+ if i := strings.Index(line, "//"); i >= 0 {
+ // Remove comments. Note that this isn't string safe, so
+ // it will truncate lines with // inside strings. Oh well.
+ line = line[:i]
+ }
+ rule += " " + line
+ rule = strings.TrimSpace(rule)
+ if rule == "" {
+ continue
+ }
+ if !strings.Contains(rule, "=>") {
+ continue
+ }
+ if ruleLineno == 0 {
+ ruleLineno = lineno
+ }
+ if strings.HasSuffix(rule, "=>") {
+ continue // continue on the next line
+ }
+ if n := balance(rule); n > 0 {
+ continue // open parentheses remain, continue on the next line
+ } else if n < 0 {
+ break // continuing the line can't help, and it will only make errors worse
+ }
+
+ loc := fmt.Sprintf("%s%s.rules:%d", arch.name, suff, ruleLineno)
+ for _, rule2 := range expandOr(rule) {
+ r := Rule{Rule: rule2, Loc: loc}
+ if rawop := strings.Split(rule2, " ")[0][1:]; isBlock(rawop, arch) {
+ blockrules[rawop] = append(blockrules[rawop], r)
+ continue
+ }
+ // Do fancier value op matching.
+ match, _, _ := r.parse()
+ op, oparch, _, _, _, _ := parseValue(match, arch, loc)
+ opname := fmt.Sprintf("Op%s%s", oparch, op.name)
+ oprules[opname] = append(oprules[opname], r)
+ }
+ rule = ""
+ ruleLineno = 0
+ }
+ if err := scanner.Err(); err != nil {
+ log.Fatalf("scanner failed: %v\n", err)
+ }
+ if balance(rule) != 0 {
+ log.Fatalf("%s.rules:%d: unbalanced rule: %v\n", arch.name, lineno, rule)
+ }
+
+ // Order all the ops.
+ var ops []string
+ for op := range oprules {
+ ops = append(ops, op)
+ }
+ sort.Strings(ops)
+
+ genFile := &File{Arch: arch, Suffix: suff}
+ // Main rewrite routine is a switch on v.Op.
+ fn := &Func{Kind: "Value", ArgLen: -1}
+
+ sw := &Switch{Expr: exprf("v.Op")}
+ for _, op := range ops {
+ eop, ok := parseEllipsisRules(oprules[op], arch)
+ if ok {
+ if strings.Contains(oprules[op][0].Rule, "=>") && opByName(arch, op).aux != opByName(arch, eop).aux {
+ panic(fmt.Sprintf("can't use ... for ops that have different aux types: %s and %s", op, eop))
+ }
+ swc := &Case{Expr: exprf("%s", op)}
+ swc.add(stmtf("v.Op = %s", eop))
+ swc.add(stmtf("return true"))
+ sw.add(swc)
+ continue
+ }
+
+ swc := &Case{Expr: exprf("%s", op)}
+ swc.add(stmtf("return rewriteValue%s%s_%s(v)", arch.name, suff, op))
+ sw.add(swc)
+ }
+ if len(sw.List) > 0 { // skip if empty
+ fn.add(sw)
+ }
+ fn.add(stmtf("return false"))
+ genFile.add(fn)
+
+ // Generate a routine per op. Note that we don't make one giant routine
+ // because it is too big for some compilers.
+ for _, op := range ops {
+ rules := oprules[op]
+ _, ok := parseEllipsisRules(oprules[op], arch)
+ if ok {
+ continue
+ }
+
+ // rr is kept between iterations, so that each rule can check
+ // that the previous rule wasn't unconditional.
+ var rr *RuleRewrite
+ fn := &Func{
+ Kind: "Value",
+ Suffix: fmt.Sprintf("_%s", op),
+ ArgLen: opByName(arch, op).argLength,
+ }
+ fn.add(declReserved("b", "v.Block"))
+ fn.add(declReserved("config", "b.Func.Config"))
+ fn.add(declReserved("fe", "b.Func.fe"))
+ fn.add(declReserved("typ", "&b.Func.Config.Types"))
+ for _, rule := range rules {
+ if rr != nil && !rr.CanFail {
+ log.Fatalf("unconditional rule %s is followed by other rules", rr.Match)
+ }
+ rr = &RuleRewrite{Loc: rule.Loc}
+ rr.Match, rr.Cond, rr.Result = rule.parse()
+ pos, _ := genMatch(rr, arch, rr.Match, fn.ArgLen >= 0)
+ if pos == "" {
+ pos = "v.Pos"
+ }
+ if rr.Cond != "" {
+ rr.add(breakf("!(%s)", rr.Cond))
+ }
+ genResult(rr, arch, rr.Result, pos)
+ if *genLog {
+ rr.add(stmtf("logRule(%q)", rule.Loc))
+ }
+ fn.add(rr)
+ }
+ if rr.CanFail {
+ fn.add(stmtf("return false"))
+ }
+ genFile.add(fn)
+ }
+
+ // Generate block rewrite function. There are only a few block types
+ // so we can make this one function with a switch.
+ fn = &Func{Kind: "Block"}
+ fn.add(declReserved("config", "b.Func.Config"))
+ fn.add(declReserved("typ", "&b.Func.Config.Types"))
+
+ sw = &Switch{Expr: exprf("b.Kind")}
+ ops = ops[:0]
+ for op := range blockrules {
+ ops = append(ops, op)
+ }
+ sort.Strings(ops)
+ for _, op := range ops {
+ name, data := getBlockInfo(op, arch)
+ swc := &Case{Expr: exprf("%s", name)}
+ for _, rule := range blockrules[op] {
+ swc.add(genBlockRewrite(rule, arch, data))
+ }
+ sw.add(swc)
+ }
+ if len(sw.List) > 0 { // skip if empty
+ fn.add(sw)
+ }
+ fn.add(stmtf("return false"))
+ genFile.add(fn)
+
+ // Remove unused imports and variables.
+ buf := new(bytes.Buffer)
+ fprint(buf, genFile)
+ fset := token.NewFileSet()
+ file, err := parser.ParseFile(fset, "", buf, parser.ParseComments)
+ if err != nil {
+ filename := fmt.Sprintf("%s_broken.go", arch.name)
+ if err := ioutil.WriteFile(filename, buf.Bytes(), 0644); err != nil {
+ log.Printf("failed to dump broken code to %s: %v", filename, err)
+ } else {
+ log.Printf("dumped broken code to %s", filename)
+ }
+ log.Fatalf("failed to parse generated code for arch %s: %v", arch.name, err)
+ }
+ tfile := fset.File(file.Pos())
+
+ // First, use unusedInspector to find the unused declarations by their
+ // start position.
+ u := unusedInspector{unused: make(map[token.Pos]bool)}
+ u.node(file)
+
+ // Then, delete said nodes via astutil.Apply.
+ pre := func(c *astutil.Cursor) bool {
+ node := c.Node()
+ if node == nil {
+ return true
+ }
+ if u.unused[node.Pos()] {
+ c.Delete()
+ // Unused imports and declarations use exactly
+ // one line. Prevent leaving an empty line.
+ tfile.MergeLine(tfile.Position(node.Pos()).Line)
+ return false
+ }
+ return true
+ }
+ post := func(c *astutil.Cursor) bool {
+ switch node := c.Node().(type) {
+ case *ast.GenDecl:
+ if len(node.Specs) == 0 {
+ // Don't leave a broken or empty GenDecl behind,
+ // such as "import ()".
+ c.Delete()
+ }
+ }
+ return true
+ }
+ file = astutil.Apply(file, pre, post).(*ast.File)
+
+ // Write the well-formatted source to file
+ f, err := os.Create("../rewrite" + arch.name + suff + ".go")
+ if err != nil {
+ log.Fatalf("can't write output: %v", err)
+ }
+ defer f.Close()
+ // gofmt result; use a buffered writer, as otherwise go/format spends
+ // far too much time in syscalls.
+ bw := bufio.NewWriter(f)
+ if err := format.Node(bw, fset, file); err != nil {
+ log.Fatalf("can't format output: %v", err)
+ }
+ if err := bw.Flush(); err != nil {
+ log.Fatalf("can't write output: %v", err)
+ }
+ if err := f.Close(); err != nil {
+ log.Fatalf("can't write output: %v", err)
+ }
+}
+
+// unusedInspector can be used to detect unused variables and imports in an
+// ast.Node via its node method. The result is available in the "unused" map.
+//
+// note that unusedInspector is lazy and best-effort; it only supports the node
+// types and patterns used by the rulegen program.
+type unusedInspector struct {
+ // scope is the current scope, which can never be nil when a declaration
+ // is encountered. That is, the unusedInspector.node entrypoint should
+ // generally be an entire file or block.
+ scope *scope
+
+ // unused is the resulting set of unused declared names, indexed by the
+ // starting position of the node that declared the name.
+ unused map[token.Pos]bool
+
+ // defining is the object currently being defined; this is useful so
+ // that if "foo := bar" is unused and removed, we can then detect if
+ // "bar" becomes unused as well.
+ defining *object
+}
+
+// scoped opens a new scope when called, and returns a function which closes
+// that same scope. When a scope is closed, unused variables are recorded.
+func (u *unusedInspector) scoped() func() {
+ outer := u.scope
+ u.scope = &scope{outer: outer, objects: map[string]*object{}}
+ return func() {
+ for anyUnused := true; anyUnused; {
+ anyUnused = false
+ for _, obj := range u.scope.objects {
+ if obj.numUses > 0 {
+ continue
+ }
+ u.unused[obj.pos] = true
+ for _, used := range obj.used {
+ if used.numUses--; used.numUses == 0 {
+ anyUnused = true
+ }
+ }
+ // We've decremented numUses for each of the
+ // objects in used. Zero this slice too, to keep
+ // everything consistent.
+ obj.used = nil
+ }
+ }
+ u.scope = outer
+ }
+}
+
+func (u *unusedInspector) exprs(list []ast.Expr) {
+ for _, x := range list {
+ u.node(x)
+ }
+}
+
+func (u *unusedInspector) node(node ast.Node) {
+ switch node := node.(type) {
+ case *ast.File:
+ defer u.scoped()()
+ for _, decl := range node.Decls {
+ u.node(decl)
+ }
+ case *ast.GenDecl:
+ for _, spec := range node.Specs {
+ u.node(spec)
+ }
+ case *ast.ImportSpec:
+ impPath, _ := strconv.Unquote(node.Path.Value)
+ name := path.Base(impPath)
+ u.scope.objects[name] = &object{
+ name: name,
+ pos: node.Pos(),
+ }
+ case *ast.FuncDecl:
+ u.node(node.Type)
+ if node.Body != nil {
+ u.node(node.Body)
+ }
+ case *ast.FuncType:
+ if node.Params != nil {
+ u.node(node.Params)
+ }
+ if node.Results != nil {
+ u.node(node.Results)
+ }
+ case *ast.FieldList:
+ for _, field := range node.List {
+ u.node(field)
+ }
+ case *ast.Field:
+ u.node(node.Type)
+
+ // statements
+
+ case *ast.BlockStmt:
+ defer u.scoped()()
+ for _, stmt := range node.List {
+ u.node(stmt)
+ }
+ case *ast.DeclStmt:
+ u.node(node.Decl)
+ case *ast.IfStmt:
+ if node.Init != nil {
+ u.node(node.Init)
+ }
+ u.node(node.Cond)
+ u.node(node.Body)
+ if node.Else != nil {
+ u.node(node.Else)
+ }
+ case *ast.ForStmt:
+ if node.Init != nil {
+ u.node(node.Init)
+ }
+ if node.Cond != nil {
+ u.node(node.Cond)
+ }
+ if node.Post != nil {
+ u.node(node.Post)
+ }
+ u.node(node.Body)
+ case *ast.SwitchStmt:
+ if node.Init != nil {
+ u.node(node.Init)
+ }
+ if node.Tag != nil {
+ u.node(node.Tag)
+ }
+ u.node(node.Body)
+ case *ast.CaseClause:
+ u.exprs(node.List)
+ defer u.scoped()()
+ for _, stmt := range node.Body {
+ u.node(stmt)
+ }
+ case *ast.BranchStmt:
+ case *ast.ExprStmt:
+ u.node(node.X)
+ case *ast.AssignStmt:
+ if node.Tok != token.DEFINE {
+ u.exprs(node.Rhs)
+ u.exprs(node.Lhs)
+ break
+ }
+ lhs := node.Lhs
+ if len(lhs) == 2 && lhs[1].(*ast.Ident).Name == "_" {
+ lhs = lhs[:1]
+ }
+ if len(lhs) != 1 {
+ panic("no support for := with multiple names")
+ }
+
+ name := lhs[0].(*ast.Ident)
+ obj := &object{
+ name: name.Name,
+ pos: name.NamePos,
+ }
+
+ old := u.defining
+ u.defining = obj
+ u.exprs(node.Rhs)
+ u.defining = old
+
+ u.scope.objects[name.Name] = obj
+ case *ast.ReturnStmt:
+ u.exprs(node.Results)
+ case *ast.IncDecStmt:
+ u.node(node.X)
+
+ // expressions
+
+ case *ast.CallExpr:
+ u.node(node.Fun)
+ u.exprs(node.Args)
+ case *ast.SelectorExpr:
+ u.node(node.X)
+ case *ast.UnaryExpr:
+ u.node(node.X)
+ case *ast.BinaryExpr:
+ u.node(node.X)
+ u.node(node.Y)
+ case *ast.StarExpr:
+ u.node(node.X)
+ case *ast.ParenExpr:
+ u.node(node.X)
+ case *ast.IndexExpr:
+ u.node(node.X)
+ u.node(node.Index)
+ case *ast.TypeAssertExpr:
+ u.node(node.X)
+ u.node(node.Type)
+ case *ast.Ident:
+ if obj := u.scope.Lookup(node.Name); obj != nil {
+ obj.numUses++
+ if u.defining != nil {
+ u.defining.used = append(u.defining.used, obj)
+ }
+ }
+ case *ast.BasicLit:
+ case *ast.ValueSpec:
+ u.exprs(node.Values)
+ default:
+ panic(fmt.Sprintf("unhandled node: %T", node))
+ }
+}
+
+// scope keeps track of a certain scope and its declared names, as well as the
+// outer (parent) scope.
+type scope struct {
+ outer *scope // can be nil, if this is the top-level scope
+ objects map[string]*object // indexed by each declared name
+}
+
+func (s *scope) Lookup(name string) *object {
+ if obj := s.objects[name]; obj != nil {
+ return obj
+ }
+ if s.outer == nil {
+ return nil
+ }
+ return s.outer.Lookup(name)
+}
+
+// object keeps track of a declared name, such as a variable or import.
+type object struct {
+ name string
+ pos token.Pos // start position of the node declaring the object
+
+ numUses int // number of times this object is used
+ used []*object // objects that its declaration makes use of
+}
+
+func fprint(w io.Writer, n Node) {
+ switch n := n.(type) {
+ case *File:
+ file := n
+ seenRewrite := make(map[[3]string]string)
+ fmt.Fprintf(w, "// Code generated from gen/%s%s.rules; DO NOT EDIT.\n", n.Arch.name, n.Suffix)
+ fmt.Fprintf(w, "// generated with: cd gen; go run *.go\n")
+ fmt.Fprintf(w, "\npackage ssa\n")
+ for _, path := range append([]string{
+ "fmt",
+ "internal/buildcfg",
+ "math",
+ "cmd/internal/obj",
+ "cmd/compile/internal/base",
+ "cmd/compile/internal/types",
+ }, n.Arch.imports...) {
+ fmt.Fprintf(w, "import %q\n", path)
+ }
+ for _, f := range n.List {
+ f := f.(*Func)
+ fmt.Fprintf(w, "func rewrite%s%s%s%s(", f.Kind, n.Arch.name, n.Suffix, f.Suffix)
+ fmt.Fprintf(w, "%c *%s) bool {\n", strings.ToLower(f.Kind)[0], f.Kind)
+ if f.Kind == "Value" && f.ArgLen > 0 {
+ for i := f.ArgLen - 1; i >= 0; i-- {
+ fmt.Fprintf(w, "v_%d := v.Args[%d]\n", i, i)
+ }
+ }
+ for _, n := range f.List {
+ fprint(w, n)
+
+ if rr, ok := n.(*RuleRewrite); ok {
+ k := [3]string{
+ normalizeMatch(rr.Match, file.Arch),
+ normalizeWhitespace(rr.Cond),
+ normalizeWhitespace(rr.Result),
+ }
+ if prev, ok := seenRewrite[k]; ok {
+ log.Fatalf("duplicate rule %s, previously seen at %s\n", rr.Loc, prev)
+ }
+ seenRewrite[k] = rr.Loc
+ }
+ }
+ fmt.Fprintf(w, "}\n")
+ }
+ case *Switch:
+ fmt.Fprintf(w, "switch ")
+ fprint(w, n.Expr)
+ fmt.Fprintf(w, " {\n")
+ for _, n := range n.List {
+ fprint(w, n)
+ }
+ fmt.Fprintf(w, "}\n")
+ case *Case:
+ fmt.Fprintf(w, "case ")
+ fprint(w, n.Expr)
+ fmt.Fprintf(w, ":\n")
+ for _, n := range n.List {
+ fprint(w, n)
+ }
+ case *RuleRewrite:
+ if *addLine {
+ fmt.Fprintf(w, "// %s\n", n.Loc)
+ }
+ fmt.Fprintf(w, "// match: %s\n", n.Match)
+ if n.Cond != "" {
+ fmt.Fprintf(w, "// cond: %s\n", n.Cond)
+ }
+ fmt.Fprintf(w, "// result: %s\n", n.Result)
+ fmt.Fprintf(w, "for %s {\n", n.Check)
+ nCommutative := 0
+ for _, n := range n.List {
+ if b, ok := n.(*CondBreak); ok {
+ b.InsideCommuteLoop = nCommutative > 0
+ }
+ fprint(w, n)
+ if loop, ok := n.(StartCommuteLoop); ok {
+ if nCommutative != loop.Depth {
+ panic("mismatch commute loop depth")
+ }
+ nCommutative++
+ }
+ }
+ fmt.Fprintf(w, "return true\n")
+ for i := 0; i < nCommutative; i++ {
+ fmt.Fprintln(w, "}")
+ }
+ if n.CommuteDepth > 0 && n.CanFail {
+ fmt.Fprint(w, "break\n")
+ }
+ fmt.Fprintf(w, "}\n")
+ case *Declare:
+ fmt.Fprintf(w, "%s := ", n.Name)
+ fprint(w, n.Value)
+ fmt.Fprintln(w)
+ case *CondBreak:
+ fmt.Fprintf(w, "if ")
+ fprint(w, n.Cond)
+ fmt.Fprintf(w, " {\n")
+ if n.InsideCommuteLoop {
+ fmt.Fprintf(w, "continue")
+ } else {
+ fmt.Fprintf(w, "break")
+ }
+ fmt.Fprintf(w, "\n}\n")
+ case ast.Node:
+ printConfig.Fprint(w, emptyFset, n)
+ if _, ok := n.(ast.Stmt); ok {
+ fmt.Fprintln(w)
+ }
+ case StartCommuteLoop:
+ fmt.Fprintf(w, "for _i%[1]d := 0; _i%[1]d <= 1; _i%[1]d, %[2]s_0, %[2]s_1 = _i%[1]d + 1, %[2]s_1, %[2]s_0 {\n", n.Depth, n.V)
+ default:
+ log.Fatalf("cannot print %T", n)
+ }
+}
+
+var printConfig = printer.Config{
+ Mode: printer.RawFormat, // we use go/format later, so skip work here
+}
+
+var emptyFset = token.NewFileSet()
+
+// Node can be a Statement or an ast.Expr.
+type Node interface{}
+
+// Statement can be one of our high-level statement struct types, or an
+// ast.Stmt under some limited circumstances.
+type Statement interface{}
+
+// BodyBase is shared by all of our statement pseudo-node types which can
+// contain other statements.
+type BodyBase struct {
+ List []Statement
+ CanFail bool
+}
+
+func (w *BodyBase) add(node Statement) {
+ var last Statement
+ if len(w.List) > 0 {
+ last = w.List[len(w.List)-1]
+ }
+ if node, ok := node.(*CondBreak); ok {
+ w.CanFail = true
+ if last, ok := last.(*CondBreak); ok {
+ // Add to the previous "if <cond> { break }" via a
+ // logical OR, which will save verbosity.
+ last.Cond = &ast.BinaryExpr{
+ Op: token.LOR,
+ X: last.Cond,
+ Y: node.Cond,
+ }
+ return
+ }
+ }
+
+ w.List = append(w.List, node)
+}
+
+// predeclared contains globally known tokens that should not be redefined.
+var predeclared = map[string]bool{
+ "nil": true,
+ "false": true,
+ "true": true,
+}
+
+// declared reports if the body contains a Declare with the given name.
+func (w *BodyBase) declared(name string) bool {
+ if predeclared[name] {
+ // Treat predeclared names as having already been declared.
+ // This lets us use nil to match an aux field or
+ // true and false to match an auxint field.
+ return true
+ }
+ for _, s := range w.List {
+ if decl, ok := s.(*Declare); ok && decl.Name == name {
+ return true
+ }
+ }
+ return false
+}
+
+// These types define some high-level statement struct types, which can be used
+// as a Statement. This allows us to keep some node structs simpler, and have
+// higher-level nodes such as an entire rule rewrite.
+//
+// Note that ast.Expr is always used as-is; we don't declare our own expression
+// nodes.
+type (
+ File struct {
+ BodyBase // []*Func
+ Arch arch
+ Suffix string
+ }
+ Func struct {
+ BodyBase
+ Kind string // "Value" or "Block"
+ Suffix string
+ ArgLen int32 // if kind == "Value", number of args for this op
+ }
+ Switch struct {
+ BodyBase // []*Case
+ Expr ast.Expr
+ }
+ Case struct {
+ BodyBase
+ Expr ast.Expr
+ }
+ RuleRewrite struct {
+ BodyBase
+ Match, Cond, Result string // top comments
+ Check string // top-level boolean expression
+
+ Alloc int // for unique var names
+ Loc string // file name & line number of the original rule
+ CommuteDepth int // used to track depth of commute loops
+ }
+ Declare struct {
+ Name string
+ Value ast.Expr
+ }
+ CondBreak struct {
+ Cond ast.Expr
+ InsideCommuteLoop bool
+ }
+ StartCommuteLoop struct {
+ Depth int
+ V string
+ }
+)
+
+// exprf parses a Go expression generated from fmt.Sprintf, panicking if an
+// error occurs.
+func exprf(format string, a ...interface{}) ast.Expr {
+ src := fmt.Sprintf(format, a...)
+ expr, err := parser.ParseExpr(src)
+ if err != nil {
+ log.Fatalf("expr parse error on %q: %v", src, err)
+ }
+ return expr
+}
+
+// stmtf parses a Go statement generated from fmt.Sprintf. This function is only
+// meant for simple statements that don't have a custom Statement node declared
+// in this package, such as ast.ReturnStmt or ast.ExprStmt.
+func stmtf(format string, a ...interface{}) Statement {
+ src := fmt.Sprintf(format, a...)
+ fsrc := "package p\nfunc _() {\n" + src + "\n}\n"
+ file, err := parser.ParseFile(token.NewFileSet(), "", fsrc, 0)
+ if err != nil {
+ log.Fatalf("stmt parse error on %q: %v", src, err)
+ }
+ return file.Decls[0].(*ast.FuncDecl).Body.List[0]
+}
+
+var reservedNames = map[string]bool{
+ "v": true, // Values[i], etc
+ "b": true, // v.Block
+ "config": true, // b.Func.Config
+ "fe": true, // b.Func.fe
+ "typ": true, // &b.Func.Config.Types
+}
+
+// declf constructs a simple "name := value" declaration,
+// using exprf for its value.
+//
+// name must not be one of reservedNames.
+// This helps prevent unintended shadowing and name clashes.
+// To declare a reserved name, use declReserved.
+func declf(loc, name, format string, a ...interface{}) *Declare {
+ if reservedNames[name] {
+ log.Fatalf("rule %s uses the reserved name %s", loc, name)
+ }
+ return &Declare{name, exprf(format, a...)}
+}
+
+// declReserved is like declf, but the name must be one of reservedNames.
+// Calls to declReserved should generally be static and top-level.
+func declReserved(name, value string) *Declare {
+ if !reservedNames[name] {
+ panic(fmt.Sprintf("declReserved call does not use a reserved name: %q", name))
+ }
+ return &Declare{name, exprf(value)}
+}
+
+// breakf constructs a simple "if cond { break }" statement, using exprf for its
+// condition.
+func breakf(format string, a ...interface{}) *CondBreak {
+ return &CondBreak{Cond: exprf(format, a...)}
+}
+
+func genBlockRewrite(rule Rule, arch arch, data blockData) *RuleRewrite {
+ rr := &RuleRewrite{Loc: rule.Loc}
+ rr.Match, rr.Cond, rr.Result = rule.parse()
+ _, _, auxint, aux, s := extract(rr.Match) // remove parens, then split
+
+ // check match of control values
+ if len(s) < data.controls {
+ log.Fatalf("incorrect number of arguments in %s, got %v wanted at least %v", rule, len(s), data.controls)
+ }
+ controls := s[:data.controls]
+ pos := make([]string, data.controls)
+ for i, arg := range controls {
+ cname := fmt.Sprintf("b.Controls[%v]", i)
+ if strings.Contains(arg, "(") {
+ vname, expr := splitNameExpr(arg)
+ if vname == "" {
+ vname = fmt.Sprintf("v_%v", i)
+ }
+ rr.add(declf(rr.Loc, vname, cname))
+ p, op := genMatch0(rr, arch, expr, vname, nil, false) // TODO: pass non-nil cnt?
+ if op != "" {
+ check := fmt.Sprintf("%s.Op == %s", cname, op)
+ if rr.Check == "" {
+ rr.Check = check
+ } else {
+ rr.Check += " && " + check
+ }
+ }
+ if p == "" {
+ p = vname + ".Pos"
+ }
+ pos[i] = p
+ } else {
+ rr.add(declf(rr.Loc, arg, cname))
+ pos[i] = arg + ".Pos"
+ }
+ }
+ for _, e := range []struct {
+ name, field, dclType string
+ }{
+ {auxint, "AuxInt", data.auxIntType()},
+ {aux, "Aux", data.auxType()},
+ } {
+ if e.name == "" {
+ continue
+ }
+
+ if e.dclType == "" {
+ log.Fatalf("op %s has no declared type for %s", data.name, e.field)
+ }
+ if !token.IsIdentifier(e.name) || rr.declared(e.name) {
+ rr.add(breakf("%sTo%s(b.%s) != %s", unTitle(e.field), title(e.dclType), e.field, e.name))
+ } else {
+ rr.add(declf(rr.Loc, e.name, "%sTo%s(b.%s)", unTitle(e.field), title(e.dclType), e.field))
+ }
+ }
+ if rr.Cond != "" {
+ rr.add(breakf("!(%s)", rr.Cond))
+ }
+
+ // Rule matches. Generate result.
+ outop, _, auxint, aux, t := extract(rr.Result) // remove parens, then split
+ blockName, outdata := getBlockInfo(outop, arch)
+ if len(t) < outdata.controls {
+ log.Fatalf("incorrect number of output arguments in %s, got %v wanted at least %v", rule, len(s), outdata.controls)
+ }
+
+ // Check if newsuccs is the same set as succs.
+ succs := s[data.controls:]
+ newsuccs := t[outdata.controls:]
+ m := map[string]bool{}
+ for _, succ := range succs {
+ if m[succ] {
+ log.Fatalf("can't have a repeat successor name %s in %s", succ, rule)
+ }
+ m[succ] = true
+ }
+ for _, succ := range newsuccs {
+ if !m[succ] {
+ log.Fatalf("unknown successor %s in %s", succ, rule)
+ }
+ delete(m, succ)
+ }
+ if len(m) != 0 {
+ log.Fatalf("unmatched successors %v in %s", m, rule)
+ }
+
+ var genControls [2]string
+ for i, control := range t[:outdata.controls] {
+ // Select a source position for any new control values.
+ // TODO: does it always make sense to use the source position
+ // of the original control values or should we be using the
+ // block's source position in some cases?
+ newpos := "b.Pos" // default to block's source position
+ if i < len(pos) && pos[i] != "" {
+ // Use the previous control value's source position.
+ newpos = pos[i]
+ }
+
+ // Generate a new control value (or copy an existing value).
+ genControls[i] = genResult0(rr, arch, control, false, false, newpos, nil)
+ }
+ switch outdata.controls {
+ case 0:
+ rr.add(stmtf("b.Reset(%s)", blockName))
+ case 1:
+ rr.add(stmtf("b.resetWithControl(%s, %s)", blockName, genControls[0]))
+ case 2:
+ rr.add(stmtf("b.resetWithControl2(%s, %s, %s)", blockName, genControls[0], genControls[1]))
+ default:
+ log.Fatalf("too many controls: %d", outdata.controls)
+ }
+
+ if auxint != "" {
+ // Make sure auxint value has the right type.
+ rr.add(stmtf("b.AuxInt = %sToAuxInt(%s)", unTitle(outdata.auxIntType()), auxint))
+ }
+ if aux != "" {
+ // Make sure aux value has the right type.
+ rr.add(stmtf("b.Aux = %sToAux(%s)", unTitle(outdata.auxType()), aux))
+ }
+
+ succChanged := false
+ for i := 0; i < len(succs); i++ {
+ if succs[i] != newsuccs[i] {
+ succChanged = true
+ }
+ }
+ if succChanged {
+ if len(succs) != 2 {
+ log.Fatalf("changed successors, len!=2 in %s", rule)
+ }
+ if succs[0] != newsuccs[1] || succs[1] != newsuccs[0] {
+ log.Fatalf("can only handle swapped successors in %s", rule)
+ }
+ rr.add(stmtf("b.swapSuccessors()"))
+ }
+
+ if *genLog {
+ rr.add(stmtf("logRule(%q)", rule.Loc))
+ }
+ return rr
+}
+
+// genMatch returns the variable whose source position should be used for the
+// result (or "" if no opinion), and a boolean that reports whether the match can fail.
+func genMatch(rr *RuleRewrite, arch arch, match string, pregenTop bool) (pos, checkOp string) {
+ cnt := varCount(rr)
+ return genMatch0(rr, arch, match, "v", cnt, pregenTop)
+}
+
+func genMatch0(rr *RuleRewrite, arch arch, match, v string, cnt map[string]int, pregenTop bool) (pos, checkOp string) {
+ if match[0] != '(' || match[len(match)-1] != ')' {
+ log.Fatalf("%s: non-compound expr in genMatch0: %q", rr.Loc, match)
+ }
+ op, oparch, typ, auxint, aux, args := parseValue(match, arch, rr.Loc)
+
+ checkOp = fmt.Sprintf("Op%s%s", oparch, op.name)
+
+ if op.faultOnNilArg0 || op.faultOnNilArg1 {
+ // Prefer the position of an instruction which could fault.
+ pos = v + ".Pos"
+ }
+
+ // If the last argument is ___, it means "don't care about trailing arguments, really"
+ // The likely/intended use is for rewrites that are too tricky to express in the existing pattern language
+ // Do a length check early because long patterns fed short (ultimately not-matching) inputs will
+ // do an indexing error in pattern-matching.
+ if op.argLength == -1 {
+ l := len(args)
+ if l == 0 || args[l-1] != "___" {
+ rr.add(breakf("len(%s.Args) != %d", v, l))
+ } else if l > 1 && args[l-1] == "___" {
+ rr.add(breakf("len(%s.Args) < %d", v, l-1))
+ }
+ }
+
+ for _, e := range []struct {
+ name, field, dclType string
+ }{
+ {typ, "Type", "*types.Type"},
+ {auxint, "AuxInt", op.auxIntType()},
+ {aux, "Aux", op.auxType()},
+ } {
+ if e.name == "" {
+ continue
+ }
+
+ if e.dclType == "" {
+ log.Fatalf("op %s has no declared type for %s", op.name, e.field)
+ }
+ if !token.IsIdentifier(e.name) || rr.declared(e.name) {
+ switch e.field {
+ case "Aux":
+ rr.add(breakf("auxTo%s(%s.%s) != %s", title(e.dclType), v, e.field, e.name))
+ case "AuxInt":
+ rr.add(breakf("auxIntTo%s(%s.%s) != %s", title(e.dclType), v, e.field, e.name))
+ case "Type":
+ rr.add(breakf("%s.%s != %s", v, e.field, e.name))
+ }
+ } else {
+ switch e.field {
+ case "Aux":
+ rr.add(declf(rr.Loc, e.name, "auxTo%s(%s.%s)", title(e.dclType), v, e.field))
+ case "AuxInt":
+ rr.add(declf(rr.Loc, e.name, "auxIntTo%s(%s.%s)", title(e.dclType), v, e.field))
+ case "Type":
+ rr.add(declf(rr.Loc, e.name, "%s.%s", v, e.field))
+ }
+ }
+ }
+
+ commutative := op.commutative
+ if commutative {
+ if args[0] == args[1] {
+ // When we have (Add x x), for any x,
+ // even if there are other uses of x besides these two,
+ // and even if x is not a variable,
+ // we can skip the commutative match.
+ commutative = false
+ }
+ if cnt[args[0]] == 1 && cnt[args[1]] == 1 {
+ // When we have (Add x y) with no other uses
+ // of x and y in the matching rule and condition,
+ // then we can skip the commutative match (Add y x).
+ commutative = false
+ }
+ }
+
+ if !pregenTop {
+ // Access last argument first to minimize bounds checks.
+ for n := len(args) - 1; n > 0; n-- {
+ a := args[n]
+ if a == "_" {
+ continue
+ }
+ if !rr.declared(a) && token.IsIdentifier(a) && !(commutative && len(args) == 2) {
+ rr.add(declf(rr.Loc, a, "%s.Args[%d]", v, n))
+ // delete the last argument so it is not reprocessed
+ args = args[:n]
+ } else {
+ rr.add(stmtf("_ = %s.Args[%d]", v, n))
+ }
+ break
+ }
+ }
+ if commutative && !pregenTop {
+ for i := 0; i <= 1; i++ {
+ vname := fmt.Sprintf("%s_%d", v, i)
+ rr.add(declf(rr.Loc, vname, "%s.Args[%d]", v, i))
+ }
+ }
+ if commutative {
+ rr.add(StartCommuteLoop{rr.CommuteDepth, v})
+ rr.CommuteDepth++
+ }
+ for i, arg := range args {
+ if arg == "_" {
+ continue
+ }
+ var rhs string
+ if (commutative && i < 2) || pregenTop {
+ rhs = fmt.Sprintf("%s_%d", v, i)
+ } else {
+ rhs = fmt.Sprintf("%s.Args[%d]", v, i)
+ }
+ if !strings.Contains(arg, "(") {
+ // leaf variable
+ if rr.declared(arg) {
+ // variable already has a definition. Check whether
+ // the old definition and the new definition match.
+ // For example, (add x x). Equality is just pointer equality
+ // on Values (so cse is important to do before lowering).
+ rr.add(breakf("%s != %s", arg, rhs))
+ } else {
+ if arg != rhs {
+ rr.add(declf(rr.Loc, arg, "%s", rhs))
+ }
+ }
+ continue
+ }
+ // compound sexpr
+ argname, expr := splitNameExpr(arg)
+ if argname == "" {
+ argname = fmt.Sprintf("%s_%d", v, i)
+ }
+ if argname == "b" {
+ log.Fatalf("don't name args 'b', it is ambiguous with blocks")
+ }
+
+ if argname != rhs {
+ rr.add(declf(rr.Loc, argname, "%s", rhs))
+ }
+ bexpr := exprf("%s.Op != addLater", argname)
+ rr.add(&CondBreak{Cond: bexpr})
+ argPos, argCheckOp := genMatch0(rr, arch, expr, argname, cnt, false)
+ bexpr.(*ast.BinaryExpr).Y.(*ast.Ident).Name = argCheckOp
+
+ if argPos != "" {
+ // Keep the argument in preference to the parent, as the
+ // argument is normally earlier in program flow.
+ // Keep the argument in preference to an earlier argument,
+ // as that prefers the memory argument which is also earlier
+ // in the program flow.
+ pos = argPos
+ }
+ }
+
+ return pos, checkOp
+}
+
+func genResult(rr *RuleRewrite, arch arch, result, pos string) {
+ move := result[0] == '@'
+ if move {
+ // parse @block directive
+ s := strings.SplitN(result[1:], " ", 2)
+ rr.add(stmtf("b = %s", s[0]))
+ result = s[1]
+ }
+ cse := make(map[string]string)
+ genResult0(rr, arch, result, true, move, pos, cse)
+}
+
+func genResult0(rr *RuleRewrite, arch arch, result string, top, move bool, pos string, cse map[string]string) string {
+ resname, expr := splitNameExpr(result)
+ result = expr
+ // TODO: when generating a constant result, use f.constVal to avoid
+ // introducing copies just to clean them up again.
+ if result[0] != '(' {
+ // variable
+ if top {
+ // It in not safe in general to move a variable between blocks
+ // (and particularly not a phi node).
+ // Introduce a copy.
+ rr.add(stmtf("v.copyOf(%s)", result))
+ }
+ return result
+ }
+
+ w := normalizeWhitespace(result)
+ if prev := cse[w]; prev != "" {
+ return prev
+ }
+
+ op, oparch, typ, auxint, aux, args := parseValue(result, arch, rr.Loc)
+
+ // Find the type of the variable.
+ typeOverride := typ != ""
+ if typ == "" && op.typ != "" {
+ typ = typeName(op.typ)
+ }
+
+ v := "v"
+ if top && !move {
+ rr.add(stmtf("v.reset(Op%s%s)", oparch, op.name))
+ if typeOverride {
+ rr.add(stmtf("v.Type = %s", typ))
+ }
+ } else {
+ if typ == "" {
+ log.Fatalf("sub-expression %s (op=Op%s%s) at %s must have a type", result, oparch, op.name, rr.Loc)
+ }
+ if resname == "" {
+ v = fmt.Sprintf("v%d", rr.Alloc)
+ } else {
+ v = resname
+ }
+ rr.Alloc++
+ rr.add(declf(rr.Loc, v, "b.NewValue0(%s, Op%s%s, %s)", pos, oparch, op.name, typ))
+ if move && top {
+ // Rewrite original into a copy
+ rr.add(stmtf("v.copyOf(%s)", v))
+ }
+ }
+
+ if auxint != "" {
+ // Make sure auxint value has the right type.
+ rr.add(stmtf("%s.AuxInt = %sToAuxInt(%s)", v, unTitle(op.auxIntType()), auxint))
+ }
+ if aux != "" {
+ // Make sure aux value has the right type.
+ rr.add(stmtf("%s.Aux = %sToAux(%s)", v, unTitle(op.auxType()), aux))
+ }
+ all := new(strings.Builder)
+ for i, arg := range args {
+ x := genResult0(rr, arch, arg, false, move, pos, cse)
+ if i > 0 {
+ all.WriteString(", ")
+ }
+ all.WriteString(x)
+ }
+ switch len(args) {
+ case 0:
+ case 1:
+ rr.add(stmtf("%s.AddArg(%s)", v, all.String()))
+ default:
+ rr.add(stmtf("%s.AddArg%d(%s)", v, len(args), all.String()))
+ }
+
+ if cse != nil {
+ cse[w] = v
+ }
+ return v
+}
+
+func split(s string) []string {
+ var r []string
+
+outer:
+ for s != "" {
+ d := 0 // depth of ({[<
+ var open, close byte // opening and closing markers ({[< or )}]>
+ nonsp := false // found a non-space char so far
+ for i := 0; i < len(s); i++ {
+ switch {
+ case d == 0 && s[i] == '(':
+ open, close = '(', ')'
+ d++
+ case d == 0 && s[i] == '<':
+ open, close = '<', '>'
+ d++
+ case d == 0 && s[i] == '[':
+ open, close = '[', ']'
+ d++
+ case d == 0 && s[i] == '{':
+ open, close = '{', '}'
+ d++
+ case d == 0 && (s[i] == ' ' || s[i] == '\t'):
+ if nonsp {
+ r = append(r, strings.TrimSpace(s[:i]))
+ s = s[i:]
+ continue outer
+ }
+ case d > 0 && s[i] == open:
+ d++
+ case d > 0 && s[i] == close:
+ d--
+ default:
+ nonsp = true
+ }
+ }
+ if d != 0 {
+ log.Fatalf("imbalanced expression: %q", s)
+ }
+ if nonsp {
+ r = append(r, strings.TrimSpace(s))
+ }
+ break
+ }
+ return r
+}
+
+// isBlock reports whether this op is a block opcode.
+func isBlock(name string, arch arch) bool {
+ for _, b := range genericBlocks {
+ if b.name == name {
+ return true
+ }
+ }
+ for _, b := range arch.blocks {
+ if b.name == name {
+ return true
+ }
+ }
+ return false
+}
+
+func extract(val string) (op, typ, auxint, aux string, args []string) {
+ val = val[1 : len(val)-1] // remove ()
+
+ // Split val up into regions.
+ // Split by spaces/tabs, except those contained in (), {}, [], or <>.
+ s := split(val)
+
+ // Extract restrictions and args.
+ op = s[0]
+ for _, a := range s[1:] {
+ switch a[0] {
+ case '<':
+ typ = a[1 : len(a)-1] // remove <>
+ case '[':
+ auxint = a[1 : len(a)-1] // remove []
+ case '{':
+ aux = a[1 : len(a)-1] // remove {}
+ default:
+ args = append(args, a)
+ }
+ }
+ return
+}
+
+// parseValue parses a parenthesized value from a rule.
+// The value can be from the match or the result side.
+// It returns the op and unparsed strings for typ, auxint, and aux restrictions and for all args.
+// oparch is the architecture that op is located in, or "" for generic.
+func parseValue(val string, arch arch, loc string) (op opData, oparch, typ, auxint, aux string, args []string) {
+ // Resolve the op.
+ var s string
+ s, typ, auxint, aux, args = extract(val)
+
+ // match reports whether x is a good op to select.
+ // If strict is true, rule generation might succeed.
+ // If strict is false, rule generation has failed,
+ // but we're trying to generate a useful error.
+ // Doing strict=true then strict=false allows
+ // precise op matching while retaining good error messages.
+ match := func(x opData, strict bool, archname string) bool {
+ if x.name != s {
+ return false
+ }
+ if x.argLength != -1 && int(x.argLength) != len(args) && (len(args) != 1 || args[0] != "...") {
+ if strict {
+ return false
+ }
+ log.Printf("%s: op %s (%s) should have %d args, has %d", loc, s, archname, x.argLength, len(args))
+ }
+ return true
+ }
+
+ for _, x := range genericOps {
+ if match(x, true, "generic") {
+ op = x
+ break
+ }
+ }
+ for _, x := range arch.ops {
+ if arch.name != "generic" && match(x, true, arch.name) {
+ if op.name != "" {
+ log.Fatalf("%s: matches for op %s found in both generic and %s", loc, op.name, arch.name)
+ }
+ op = x
+ oparch = arch.name
+ break
+ }
+ }
+
+ if op.name == "" {
+ // Failed to find the op.
+ // Run through everything again with strict=false
+ // to generate useful diagnosic messages before failing.
+ for _, x := range genericOps {
+ match(x, false, "generic")
+ }
+ for _, x := range arch.ops {
+ match(x, false, arch.name)
+ }
+ log.Fatalf("%s: unknown op %s", loc, s)
+ }
+
+ // Sanity check aux, auxint.
+ if auxint != "" && !opHasAuxInt(op) {
+ log.Fatalf("%s: op %s %s can't have auxint", loc, op.name, op.aux)
+ }
+ if aux != "" && !opHasAux(op) {
+ log.Fatalf("%s: op %s %s can't have aux", loc, op.name, op.aux)
+ }
+ return
+}
+
+func opHasAuxInt(op opData) bool {
+ switch op.aux {
+ case "Bool", "Int8", "Int16", "Int32", "Int64", "Int128", "UInt8", "Float32", "Float64",
+ "SymOff", "CallOff", "SymValAndOff", "TypSize", "ARM64BitField", "FlagConstant", "CCop":
+ return true
+ }
+ return false
+}
+
+func opHasAux(op opData) bool {
+ switch op.aux {
+ case "String", "Sym", "SymOff", "Call", "CallOff", "SymValAndOff", "Typ", "TypSize",
+ "S390XCCMask", "S390XRotateParams":
+ return true
+ }
+ return false
+}
+
+// splitNameExpr splits s-expr arg, possibly prefixed by "name:",
+// into name and the unprefixed expression.
+// For example, "x:(Foo)" yields "x", "(Foo)",
+// and "(Foo)" yields "", "(Foo)".
+func splitNameExpr(arg string) (name, expr string) {
+ colon := strings.Index(arg, ":")
+ if colon < 0 {
+ return "", arg
+ }
+ openparen := strings.Index(arg, "(")
+ if openparen < 0 {
+ log.Fatalf("splitNameExpr(%q): colon but no open parens", arg)
+ }
+ if colon > openparen {
+ // colon is inside the parens, such as in "(Foo x:(Bar))".
+ return "", arg
+ }
+ return arg[:colon], arg[colon+1:]
+}
+
+func getBlockInfo(op string, arch arch) (name string, data blockData) {
+ for _, b := range genericBlocks {
+ if b.name == op {
+ return "Block" + op, b
+ }
+ }
+ for _, b := range arch.blocks {
+ if b.name == op {
+ return "Block" + arch.name + op, b
+ }
+ }
+ log.Fatalf("could not find block data for %s", op)
+ panic("unreachable")
+}
+
+// typeName returns the string to use to generate a type.
+func typeName(typ string) string {
+ if typ[0] == '(' {
+ ts := strings.Split(typ[1:len(typ)-1], ",")
+ if len(ts) != 2 {
+ log.Fatalf("Tuple expect 2 arguments")
+ }
+ return "types.NewTuple(" + typeName(ts[0]) + ", " + typeName(ts[1]) + ")"
+ }
+ switch typ {
+ case "Flags", "Mem", "Void", "Int128":
+ return "types.Type" + typ
+ default:
+ return "typ." + typ
+ }
+}
+
+// balance returns the number of unclosed '(' characters in s.
+// If a ')' appears without a corresponding '(', balance returns -1.
+func balance(s string) int {
+ balance := 0
+ for _, c := range s {
+ switch c {
+ case '(':
+ balance++
+ case ')':
+ balance--
+ if balance < 0 {
+ // don't allow ")(" to return 0
+ return -1
+ }
+ }
+ }
+ return balance
+}
+
+// findAllOpcode is a function to find the opcode portion of s-expressions.
+var findAllOpcode = regexp.MustCompile(`[(](\w+[|])+\w+[)]`).FindAllStringIndex
+
+// excludeFromExpansion reports whether the substring s[idx[0]:idx[1]] in a rule
+// should be disregarded as a candidate for | expansion.
+// It uses simple syntactic checks to see whether the substring
+// is inside an AuxInt expression or inside the && conditions.
+func excludeFromExpansion(s string, idx []int) bool {
+ left := s[:idx[0]]
+ if strings.LastIndexByte(left, '[') > strings.LastIndexByte(left, ']') {
+ // Inside an AuxInt expression.
+ return true
+ }
+ right := s[idx[1]:]
+ if strings.Contains(left, "&&") && strings.Contains(right, "=>") {
+ // Inside && conditions.
+ return true
+ }
+ return false
+}
+
+// expandOr converts a rule into multiple rules by expanding | ops.
+func expandOr(r string) []string {
+ // Find every occurrence of |-separated things.
+ // They look like MOV(B|W|L|Q|SS|SD)load or MOV(Q|L)loadidx(1|8).
+ // Generate rules selecting one case from each |-form.
+
+ // Count width of |-forms. They must match.
+ n := 1
+ for _, idx := range findAllOpcode(r, -1) {
+ if excludeFromExpansion(r, idx) {
+ continue
+ }
+ s := r[idx[0]:idx[1]]
+ c := strings.Count(s, "|") + 1
+ if c == 1 {
+ continue
+ }
+ if n > 1 && n != c {
+ log.Fatalf("'|' count doesn't match in %s: both %d and %d\n", r, n, c)
+ }
+ n = c
+ }
+ if n == 1 {
+ // No |-form in this rule.
+ return []string{r}
+ }
+ // Build each new rule.
+ res := make([]string, n)
+ for i := 0; i < n; i++ {
+ buf := new(strings.Builder)
+ x := 0
+ for _, idx := range findAllOpcode(r, -1) {
+ if excludeFromExpansion(r, idx) {
+ continue
+ }
+ buf.WriteString(r[x:idx[0]]) // write bytes we've skipped over so far
+ s := r[idx[0]+1 : idx[1]-1] // remove leading "(" and trailing ")"
+ buf.WriteString(strings.Split(s, "|")[i]) // write the op component for this rule
+ x = idx[1] // note that we've written more bytes
+ }
+ buf.WriteString(r[x:])
+ res[i] = buf.String()
+ }
+ return res
+}
+
+// varCount returns a map which counts the number of occurrences of
+// Value variables in the s-expression rr.Match and the Go expression rr.Cond.
+func varCount(rr *RuleRewrite) map[string]int {
+ cnt := map[string]int{}
+ varCount1(rr.Loc, rr.Match, cnt)
+ if rr.Cond != "" {
+ expr, err := parser.ParseExpr(rr.Cond)
+ if err != nil {
+ log.Fatalf("%s: failed to parse cond %q: %v", rr.Loc, rr.Cond, err)
+ }
+ ast.Inspect(expr, func(n ast.Node) bool {
+ if id, ok := n.(*ast.Ident); ok {
+ cnt[id.Name]++
+ }
+ return true
+ })
+ }
+ return cnt
+}
+
+func varCount1(loc, m string, cnt map[string]int) {
+ if m[0] == '<' || m[0] == '[' || m[0] == '{' {
+ return
+ }
+ if token.IsIdentifier(m) {
+ cnt[m]++
+ return
+ }
+ // Split up input.
+ name, expr := splitNameExpr(m)
+ if name != "" {
+ cnt[name]++
+ }
+ if expr[0] != '(' || expr[len(expr)-1] != ')' {
+ log.Fatalf("%s: non-compound expr in varCount1: %q", loc, expr)
+ }
+ s := split(expr[1 : len(expr)-1])
+ for _, arg := range s[1:] {
+ varCount1(loc, arg, cnt)
+ }
+}
+
+// normalizeWhitespace replaces 2+ whitespace sequences with a single space.
+func normalizeWhitespace(x string) string {
+ x = strings.Join(strings.Fields(x), " ")
+ x = strings.Replace(x, "( ", "(", -1)
+ x = strings.Replace(x, " )", ")", -1)
+ x = strings.Replace(x, "[ ", "[", -1)
+ x = strings.Replace(x, " ]", "]", -1)
+ x = strings.Replace(x, ")=>", ") =>", -1)
+ return x
+}
+
+// opIsCommutative reports whether op s is commutative.
+func opIsCommutative(op string, arch arch) bool {
+ for _, x := range genericOps {
+ if op == x.name {
+ if x.commutative {
+ return true
+ }
+ break
+ }
+ }
+ if arch.name != "generic" {
+ for _, x := range arch.ops {
+ if op == x.name {
+ if x.commutative {
+ return true
+ }
+ break
+ }
+ }
+ }
+ return false
+}
+
+func normalizeMatch(m string, arch arch) string {
+ if token.IsIdentifier(m) {
+ return m
+ }
+ op, typ, auxint, aux, args := extract(m)
+ if opIsCommutative(op, arch) {
+ if args[1] < args[0] {
+ args[0], args[1] = args[1], args[0]
+ }
+ }
+ s := new(strings.Builder)
+ fmt.Fprintf(s, "%s <%s> [%s] {%s}", op, typ, auxint, aux)
+ for _, arg := range args {
+ prefix, expr := splitNameExpr(arg)
+ fmt.Fprint(s, " ", prefix, normalizeMatch(expr, arch))
+ }
+ return s.String()
+}
+
+func parseEllipsisRules(rules []Rule, arch arch) (newop string, ok bool) {
+ if len(rules) != 1 {
+ for _, r := range rules {
+ if strings.Contains(r.Rule, "...") {
+ log.Fatalf("%s: found ellipsis in rule, but there are other rules with the same op", r.Loc)
+ }
+ }
+ return "", false
+ }
+ rule := rules[0]
+ match, cond, result := rule.parse()
+ if cond != "" || !isEllipsisValue(match) || !isEllipsisValue(result) {
+ if strings.Contains(rule.Rule, "...") {
+ log.Fatalf("%s: found ellipsis in non-ellipsis rule", rule.Loc)
+ }
+ checkEllipsisRuleCandidate(rule, arch)
+ return "", false
+ }
+ op, oparch, _, _, _, _ := parseValue(result, arch, rule.Loc)
+ return fmt.Sprintf("Op%s%s", oparch, op.name), true
+}
+
+// isEllipsisValue reports whether s is of the form (OpX ...).
+func isEllipsisValue(s string) bool {
+ if len(s) < 2 || s[0] != '(' || s[len(s)-1] != ')' {
+ return false
+ }
+ c := split(s[1 : len(s)-1])
+ if len(c) != 2 || c[1] != "..." {
+ return false
+ }
+ return true
+}
+
+func checkEllipsisRuleCandidate(rule Rule, arch arch) {
+ match, cond, result := rule.parse()
+ if cond != "" {
+ return
+ }
+ op, _, _, auxint, aux, args := parseValue(match, arch, rule.Loc)
+ var auxint2, aux2 string
+ var args2 []string
+ var usingCopy string
+ var eop opData
+ if result[0] != '(' {
+ // Check for (Foo x) => x, which can be converted to (Foo ...) => (Copy ...).
+ args2 = []string{result}
+ usingCopy = " using Copy"
+ } else {
+ eop, _, _, auxint2, aux2, args2 = parseValue(result, arch, rule.Loc)
+ }
+ // Check that all restrictions in match are reproduced exactly in result.
+ if aux != aux2 || auxint != auxint2 || len(args) != len(args2) {
+ return
+ }
+ if strings.Contains(rule.Rule, "=>") && op.aux != eop.aux {
+ return
+ }
+ for i := range args {
+ if args[i] != args2[i] {
+ return
+ }
+ }
+ switch {
+ case opHasAux(op) && aux == "" && aux2 == "":
+ fmt.Printf("%s: rule silently zeros aux, either copy aux or explicitly zero\n", rule.Loc)
+ case opHasAuxInt(op) && auxint == "" && auxint2 == "":
+ fmt.Printf("%s: rule silently zeros auxint, either copy auxint or explicitly zero\n", rule.Loc)
+ default:
+ fmt.Printf("%s: possible ellipsis rule candidate%s: %q\n", rule.Loc, usingCopy, rule.Rule)
+ }
+}
+
+func opByName(arch arch, name string) opData {
+ name = name[2:]
+ for _, x := range genericOps {
+ if name == x.name {
+ return x
+ }
+ }
+ if arch.name != "generic" {
+ name = name[len(arch.name):]
+ for _, x := range arch.ops {
+ if name == x.name {
+ return x
+ }
+ }
+ }
+ log.Fatalf("failed to find op named %s in arch %s", name, arch.name)
+ panic("unreachable")
+}
+
+// auxType returns the Go type that this operation should store in its aux field.
+func (op opData) auxType() string {
+ switch op.aux {
+ case "String":
+ return "string"
+ case "Sym":
+ // Note: a Sym can be an *obj.LSym, a *gc.Node, or nil.
+ return "Sym"
+ case "SymOff":
+ return "Sym"
+ case "Call":
+ return "Call"
+ case "CallOff":
+ return "Call"
+ case "SymValAndOff":
+ return "Sym"
+ case "Typ":
+ return "*types.Type"
+ case "TypSize":
+ return "*types.Type"
+ case "S390XCCMask":
+ return "s390x.CCMask"
+ case "S390XRotateParams":
+ return "s390x.RotateParams"
+ default:
+ return "invalid"
+ }
+}
+
+// auxIntType returns the Go type that this operation should store in its auxInt field.
+func (op opData) auxIntType() string {
+ switch op.aux {
+ case "Bool":
+ return "bool"
+ case "Int8":
+ return "int8"
+ case "Int16":
+ return "int16"
+ case "Int32":
+ return "int32"
+ case "Int64":
+ return "int64"
+ case "Int128":
+ return "int128"
+ case "UInt8":
+ return "uint8"
+ case "Float32":
+ return "float32"
+ case "Float64":
+ return "float64"
+ case "CallOff":
+ return "int32"
+ case "SymOff":
+ return "int32"
+ case "SymValAndOff":
+ return "ValAndOff"
+ case "TypSize":
+ return "int64"
+ case "CCop":
+ return "Op"
+ case "FlagConstant":
+ return "flagConstant"
+ case "ARM64BitField":
+ return "arm64BitField"
+ default:
+ return "invalid"
+ }
+}
+
+// auxType returns the Go type that this block should store in its aux field.
+func (b blockData) auxType() string {
+ switch b.aux {
+ case "S390XCCMask", "S390XCCMaskInt8", "S390XCCMaskUint8":
+ return "s390x.CCMask"
+ case "S390XRotateParams":
+ return "s390x.RotateParams"
+ default:
+ return "invalid"
+ }
+}
+
+// auxIntType returns the Go type that this block should store in its auxInt field.
+func (b blockData) auxIntType() string {
+ switch b.aux {
+ case "S390XCCMaskInt8":
+ return "int8"
+ case "S390XCCMaskUint8":
+ return "uint8"
+ case "Int64":
+ return "int64"
+ default:
+ return "invalid"
+ }
+}
+
+func title(s string) string {
+ if i := strings.Index(s, "."); i >= 0 {
+ switch strings.ToLower(s[:i]) {
+ case "s390x": // keep arch prefix for clarity
+ s = s[:i] + s[i+1:]
+ default:
+ s = s[i+1:]
+ }
+ }
+ return strings.Title(s)
+}
+
+func unTitle(s string) string {
+ if i := strings.Index(s, "."); i >= 0 {
+ switch strings.ToLower(s[:i]) {
+ case "s390x": // keep arch prefix for clarity
+ s = s[:i] + s[i+1:]
+ default:
+ s = s[i+1:]
+ }
+ }
+ return strings.ToLower(s[:1]) + s[1:]
+}
diff --git a/src/cmd/compile/internal/ssa/html.go b/src/cmd/compile/internal/ssa/html.go
new file mode 100644
index 0000000..d9a78b3
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/html.go
@@ -0,0 +1,1316 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "bytes"
+ "cmd/internal/src"
+ "fmt"
+ "html"
+ exec "internal/execabs"
+ "io"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+)
+
+type HTMLWriter struct {
+ w io.WriteCloser
+ Func *Func
+ path string
+ dot *dotWriter
+ prevHash []byte
+ pendingPhases []string
+ pendingTitles []string
+}
+
+func NewHTMLWriter(path string, f *Func, cfgMask string) *HTMLWriter {
+ path = strings.Replace(path, "/", string(filepath.Separator), -1)
+ out, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_TRUNC, 0644)
+ if err != nil {
+ f.Fatalf("%v", err)
+ }
+ reportPath := path
+ if !filepath.IsAbs(reportPath) {
+ pwd, err := os.Getwd()
+ if err != nil {
+ f.Fatalf("%v", err)
+ }
+ reportPath = filepath.Join(pwd, path)
+ }
+ html := HTMLWriter{
+ w: out,
+ Func: f,
+ path: reportPath,
+ dot: newDotWriter(cfgMask),
+ }
+ html.start()
+ return &html
+}
+
+// Fatalf reports an error and exits.
+func (w *HTMLWriter) Fatalf(msg string, args ...interface{}) {
+ fe := w.Func.Frontend()
+ fe.Fatalf(src.NoXPos, msg, args...)
+}
+
+// Logf calls the (w *HTMLWriter).Func's Logf method passing along a msg and args.
+func (w *HTMLWriter) Logf(msg string, args ...interface{}) {
+ w.Func.Logf(msg, args...)
+}
+
+func (w *HTMLWriter) start() {
+ if w == nil {
+ return
+ }
+ w.WriteString("<html>")
+ w.WriteString(`<head>
+<meta http-equiv="Content-Type" content="text/html;charset=UTF-8">
+<style>
+
+body {
+ font-size: 14px;
+ font-family: Arial, sans-serif;
+}
+
+h1 {
+ font-size: 18px;
+ display: inline-block;
+ margin: 0 1em .5em 0;
+}
+
+#helplink {
+ display: inline-block;
+}
+
+#help {
+ display: none;
+}
+
+.stats {
+ font-size: 60%;
+}
+
+table {
+ border: 1px solid black;
+ table-layout: fixed;
+ width: 300px;
+}
+
+th, td {
+ border: 1px solid black;
+ overflow: hidden;
+ width: 400px;
+ vertical-align: top;
+ padding: 5px;
+}
+
+td > h2 {
+ cursor: pointer;
+ font-size: 120%;
+ margin: 5px 0px 5px 0px;
+}
+
+td.collapsed {
+ font-size: 12px;
+ width: 12px;
+ border: 1px solid white;
+ padding: 2px;
+ cursor: pointer;
+ background: #fafafa;
+}
+
+td.collapsed div {
+ text-align: right;
+ transform: rotate(180deg);
+ writing-mode: vertical-lr;
+ white-space: pre;
+}
+
+code, pre, .lines, .ast {
+ font-family: Menlo, monospace;
+ font-size: 12px;
+}
+
+pre {
+ -moz-tab-size: 4;
+ -o-tab-size: 4;
+ tab-size: 4;
+}
+
+.allow-x-scroll {
+ overflow-x: scroll;
+}
+
+.lines {
+ float: left;
+ overflow: hidden;
+ text-align: right;
+ margin-top: 7px;
+}
+
+.lines div {
+ padding-right: 10px;
+ color: gray;
+}
+
+div.line-number {
+ font-size: 12px;
+}
+
+.ast {
+ white-space: nowrap;
+}
+
+td.ssa-prog {
+ width: 600px;
+ word-wrap: break-word;
+}
+
+li {
+ list-style-type: none;
+}
+
+li.ssa-long-value {
+ text-indent: -2em; /* indent wrapped lines */
+}
+
+li.ssa-value-list {
+ display: inline;
+}
+
+li.ssa-start-block {
+ padding: 0;
+ margin: 0;
+}
+
+li.ssa-end-block {
+ padding: 0;
+ margin: 0;
+}
+
+ul.ssa-print-func {
+ padding-left: 0;
+}
+
+li.ssa-start-block button {
+ padding: 0 1em;
+ margin: 0;
+ border: none;
+ display: inline;
+ font-size: 14px;
+ float: right;
+}
+
+button:hover {
+ background-color: #eee;
+ cursor: pointer;
+}
+
+dl.ssa-gen {
+ padding-left: 0;
+}
+
+dt.ssa-prog-src {
+ padding: 0;
+ margin: 0;
+ float: left;
+ width: 4em;
+}
+
+dd.ssa-prog {
+ padding: 0;
+ margin-right: 0;
+ margin-left: 4em;
+}
+
+.dead-value {
+ color: gray;
+}
+
+.dead-block {
+ opacity: 0.5;
+}
+
+.depcycle {
+ font-style: italic;
+}
+
+.line-number {
+ font-size: 11px;
+}
+
+.no-line-number {
+ font-size: 11px;
+ color: gray;
+}
+
+.zoom {
+ position: absolute;
+ float: left;
+ white-space: nowrap;
+ background-color: #eee;
+}
+
+.zoom a:link, .zoom a:visited {
+ text-decoration: none;
+ color: blue;
+ font-size: 16px;
+ padding: 4px 2px;
+}
+
+svg {
+ cursor: default;
+ outline: 1px solid #eee;
+ width: 100%;
+}
+
+body.darkmode {
+ background-color: rgb(21, 21, 21);
+ color: rgb(230, 255, 255);
+ opacity: 100%;
+}
+
+td.darkmode {
+ background-color: rgb(21, 21, 21);
+ border: 1px solid gray;
+}
+
+body.darkmode table, th {
+ border: 1px solid gray;
+}
+
+body.darkmode text {
+ fill: white;
+}
+
+body.darkmode svg polygon:first-child {
+ fill: rgb(21, 21, 21);
+}
+
+.highlight-aquamarine { background-color: aquamarine; color: black; }
+.highlight-coral { background-color: coral; color: black; }
+.highlight-lightpink { background-color: lightpink; color: black; }
+.highlight-lightsteelblue { background-color: lightsteelblue; color: black; }
+.highlight-palegreen { background-color: palegreen; color: black; }
+.highlight-skyblue { background-color: skyblue; color: black; }
+.highlight-lightgray { background-color: lightgray; color: black; }
+.highlight-yellow { background-color: yellow; color: black; }
+.highlight-lime { background-color: lime; color: black; }
+.highlight-khaki { background-color: khaki; color: black; }
+.highlight-aqua { background-color: aqua; color: black; }
+.highlight-salmon { background-color: salmon; color: black; }
+
+/* Ensure all dead values/blocks continue to have gray font color in dark mode with highlights */
+.dead-value span.highlight-aquamarine,
+.dead-block.highlight-aquamarine,
+.dead-value span.highlight-coral,
+.dead-block.highlight-coral,
+.dead-value span.highlight-lightpink,
+.dead-block.highlight-lightpink,
+.dead-value span.highlight-lightsteelblue,
+.dead-block.highlight-lightsteelblue,
+.dead-value span.highlight-palegreen,
+.dead-block.highlight-palegreen,
+.dead-value span.highlight-skyblue,
+.dead-block.highlight-skyblue,
+.dead-value span.highlight-lightgray,
+.dead-block.highlight-lightgray,
+.dead-value span.highlight-yellow,
+.dead-block.highlight-yellow,
+.dead-value span.highlight-lime,
+.dead-block.highlight-lime,
+.dead-value span.highlight-khaki,
+.dead-block.highlight-khaki,
+.dead-value span.highlight-aqua,
+.dead-block.highlight-aqua,
+.dead-value span.highlight-salmon,
+.dead-block.highlight-salmon {
+ color: gray;
+}
+
+.outline-blue { outline: #2893ff solid 2px; }
+.outline-red { outline: red solid 2px; }
+.outline-blueviolet { outline: blueviolet solid 2px; }
+.outline-darkolivegreen { outline: darkolivegreen solid 2px; }
+.outline-fuchsia { outline: fuchsia solid 2px; }
+.outline-sienna { outline: sienna solid 2px; }
+.outline-gold { outline: gold solid 2px; }
+.outline-orangered { outline: orangered solid 2px; }
+.outline-teal { outline: teal solid 2px; }
+.outline-maroon { outline: maroon solid 2px; }
+.outline-black { outline: black solid 2px; }
+
+ellipse.outline-blue { stroke-width: 2px; stroke: #2893ff; }
+ellipse.outline-red { stroke-width: 2px; stroke: red; }
+ellipse.outline-blueviolet { stroke-width: 2px; stroke: blueviolet; }
+ellipse.outline-darkolivegreen { stroke-width: 2px; stroke: darkolivegreen; }
+ellipse.outline-fuchsia { stroke-width: 2px; stroke: fuchsia; }
+ellipse.outline-sienna { stroke-width: 2px; stroke: sienna; }
+ellipse.outline-gold { stroke-width: 2px; stroke: gold; }
+ellipse.outline-orangered { stroke-width: 2px; stroke: orangered; }
+ellipse.outline-teal { stroke-width: 2px; stroke: teal; }
+ellipse.outline-maroon { stroke-width: 2px; stroke: maroon; }
+ellipse.outline-black { stroke-width: 2px; stroke: black; }
+
+/* Capture alternative for outline-black and ellipse.outline-black when in dark mode */
+body.darkmode .outline-black { outline: gray solid 2px; }
+body.darkmode ellipse.outline-black { outline: gray solid 2px; }
+
+</style>
+
+<script type="text/javascript">
+
+// Contains phase names which are expanded by default. Other columns are collapsed.
+let expandedDefault = [
+ "start",
+ "deadcode",
+ "opt",
+ "lower",
+ "late-deadcode",
+ "regalloc",
+ "genssa",
+];
+if (history.state === null) {
+ history.pushState({expandedDefault}, "", location.href);
+}
+
+// ordered list of all available highlight colors
+var highlights = [
+ "highlight-aquamarine",
+ "highlight-coral",
+ "highlight-lightpink",
+ "highlight-lightsteelblue",
+ "highlight-palegreen",
+ "highlight-skyblue",
+ "highlight-lightgray",
+ "highlight-yellow",
+ "highlight-lime",
+ "highlight-khaki",
+ "highlight-aqua",
+ "highlight-salmon"
+];
+
+// state: which value is highlighted this color?
+var highlighted = {};
+for (var i = 0; i < highlights.length; i++) {
+ highlighted[highlights[i]] = "";
+}
+
+// ordered list of all available outline colors
+var outlines = [
+ "outline-blue",
+ "outline-red",
+ "outline-blueviolet",
+ "outline-darkolivegreen",
+ "outline-fuchsia",
+ "outline-sienna",
+ "outline-gold",
+ "outline-orangered",
+ "outline-teal",
+ "outline-maroon",
+ "outline-black"
+];
+
+// state: which value is outlined this color?
+var outlined = {};
+for (var i = 0; i < outlines.length; i++) {
+ outlined[outlines[i]] = "";
+}
+
+window.onload = function() {
+ if (history.state !== null) {
+ expandedDefault = history.state.expandedDefault;
+ }
+ if (window.matchMedia && window.matchMedia("(prefers-color-scheme: dark)").matches) {
+ toggleDarkMode();
+ document.getElementById("dark-mode-button").checked = true;
+ }
+
+ var ssaElemClicked = function(elem, event, selections, selected) {
+ event.stopPropagation();
+
+ // find all values with the same name
+ var c = elem.classList.item(0);
+ var x = document.getElementsByClassName(c);
+
+ // if selected, remove selections from all of them
+ // otherwise, attempt to add
+
+ var remove = "";
+ for (var i = 0; i < selections.length; i++) {
+ var color = selections[i];
+ if (selected[color] == c) {
+ remove = color;
+ break;
+ }
+ }
+
+ if (remove != "") {
+ for (var i = 0; i < x.length; i++) {
+ x[i].classList.remove(remove);
+ }
+ selected[remove] = "";
+ return;
+ }
+
+ // we're adding a selection
+ // find first available color
+ var avail = "";
+ for (var i = 0; i < selections.length; i++) {
+ var color = selections[i];
+ if (selected[color] == "") {
+ avail = color;
+ break;
+ }
+ }
+ if (avail == "") {
+ alert("out of selection colors; go add more");
+ return;
+ }
+
+ // set that as the selection
+ for (var i = 0; i < x.length; i++) {
+ x[i].classList.add(avail);
+ }
+ selected[avail] = c;
+ };
+
+ var ssaValueClicked = function(event) {
+ ssaElemClicked(this, event, highlights, highlighted);
+ };
+
+ var ssaBlockClicked = function(event) {
+ ssaElemClicked(this, event, outlines, outlined);
+ };
+
+ var ssavalues = document.getElementsByClassName("ssa-value");
+ for (var i = 0; i < ssavalues.length; i++) {
+ ssavalues[i].addEventListener('click', ssaValueClicked);
+ }
+
+ var ssalongvalues = document.getElementsByClassName("ssa-long-value");
+ for (var i = 0; i < ssalongvalues.length; i++) {
+ // don't attach listeners to li nodes, just the spans they contain
+ if (ssalongvalues[i].nodeName == "SPAN") {
+ ssalongvalues[i].addEventListener('click', ssaValueClicked);
+ }
+ }
+
+ var ssablocks = document.getElementsByClassName("ssa-block");
+ for (var i = 0; i < ssablocks.length; i++) {
+ ssablocks[i].addEventListener('click', ssaBlockClicked);
+ }
+
+ var lines = document.getElementsByClassName("line-number");
+ for (var i = 0; i < lines.length; i++) {
+ lines[i].addEventListener('click', ssaValueClicked);
+ }
+
+
+ function toggler(phase) {
+ return function() {
+ toggle_cell(phase+'-col');
+ toggle_cell(phase+'-exp');
+ const i = expandedDefault.indexOf(phase);
+ if (i !== -1) {
+ expandedDefault.splice(i, 1);
+ } else {
+ expandedDefault.push(phase);
+ }
+ history.pushState({expandedDefault}, "", location.href);
+ };
+ }
+
+ function toggle_cell(id) {
+ var e = document.getElementById(id);
+ if (e.style.display == 'table-cell') {
+ e.style.display = 'none';
+ } else {
+ e.style.display = 'table-cell';
+ }
+ }
+
+ // Go through all columns and collapse needed phases.
+ const td = document.getElementsByTagName("td");
+ for (let i = 0; i < td.length; i++) {
+ const id = td[i].id;
+ const phase = id.substr(0, id.length-4);
+ let show = expandedDefault.indexOf(phase) !== -1
+
+ // If show == false, check to see if this is a combined column (multiple phases).
+ // If combined, check each of the phases to see if they are in our expandedDefaults.
+ // If any are found, that entire combined column gets shown.
+ if (!show) {
+ const combined = phase.split('--+--');
+ const len = combined.length;
+ if (len > 1) {
+ for (let i = 0; i < len; i++) {
+ const num = expandedDefault.indexOf(combined[i]);
+ if (num !== -1) {
+ expandedDefault.splice(num, 1);
+ if (expandedDefault.indexOf(phase) === -1) {
+ expandedDefault.push(phase);
+ show = true;
+ }
+ }
+ }
+ }
+ }
+ if (id.endsWith("-exp")) {
+ const h2Els = td[i].getElementsByTagName("h2");
+ const len = h2Els.length;
+ if (len > 0) {
+ for (let i = 0; i < len; i++) {
+ h2Els[i].addEventListener('click', toggler(phase));
+ }
+ }
+ } else {
+ td[i].addEventListener('click', toggler(phase));
+ }
+ if (id.endsWith("-col") && show || id.endsWith("-exp") && !show) {
+ td[i].style.display = 'none';
+ continue;
+ }
+ td[i].style.display = 'table-cell';
+ }
+
+ // find all svg block nodes, add their block classes
+ var nodes = document.querySelectorAll('*[id^="graph_node_"]');
+ for (var i = 0; i < nodes.length; i++) {
+ var node = nodes[i];
+ var name = node.id.toString();
+ var block = name.substring(name.lastIndexOf("_")+1);
+ node.classList.remove("node");
+ node.classList.add(block);
+ node.addEventListener('click', ssaBlockClicked);
+ var ellipse = node.getElementsByTagName('ellipse')[0];
+ ellipse.classList.add(block);
+ ellipse.addEventListener('click', ssaBlockClicked);
+ }
+
+ // make big graphs smaller
+ var targetScale = 0.5;
+ var nodes = document.querySelectorAll('*[id^="svg_graph_"]');
+ // TODO: Implement smarter auto-zoom using the viewBox attribute
+ // and in case of big graphs set the width and height of the svg graph to
+ // maximum allowed.
+ for (var i = 0; i < nodes.length; i++) {
+ var node = nodes[i];
+ var name = node.id.toString();
+ var phase = name.substring(name.lastIndexOf("_")+1);
+ var gNode = document.getElementById("g_graph_"+phase);
+ var scale = gNode.transform.baseVal.getItem(0).matrix.a;
+ if (scale > targetScale) {
+ node.width.baseVal.value *= targetScale / scale;
+ node.height.baseVal.value *= targetScale / scale;
+ }
+ }
+};
+
+function toggle_visibility(id) {
+ var e = document.getElementById(id);
+ if (e.style.display == 'block') {
+ e.style.display = 'none';
+ } else {
+ e.style.display = 'block';
+ }
+}
+
+function hideBlock(el) {
+ var es = el.parentNode.parentNode.getElementsByClassName("ssa-value-list");
+ if (es.length===0)
+ return;
+ var e = es[0];
+ if (e.style.display === 'block' || e.style.display === '') {
+ e.style.display = 'none';
+ el.innerHTML = '+';
+ } else {
+ e.style.display = 'block';
+ el.innerHTML = '-';
+ }
+}
+
+// TODO: scale the graph with the viewBox attribute.
+function graphReduce(id) {
+ var node = document.getElementById(id);
+ if (node) {
+ node.width.baseVal.value *= 0.9;
+ node.height.baseVal.value *= 0.9;
+ }
+ return false;
+}
+
+function graphEnlarge(id) {
+ var node = document.getElementById(id);
+ if (node) {
+ node.width.baseVal.value *= 1.1;
+ node.height.baseVal.value *= 1.1;
+ }
+ return false;
+}
+
+function makeDraggable(event) {
+ var svg = event.target;
+ if (window.PointerEvent) {
+ svg.addEventListener('pointerdown', startDrag);
+ svg.addEventListener('pointermove', drag);
+ svg.addEventListener('pointerup', endDrag);
+ svg.addEventListener('pointerleave', endDrag);
+ } else {
+ svg.addEventListener('mousedown', startDrag);
+ svg.addEventListener('mousemove', drag);
+ svg.addEventListener('mouseup', endDrag);
+ svg.addEventListener('mouseleave', endDrag);
+ }
+
+ var point = svg.createSVGPoint();
+ var isPointerDown = false;
+ var pointerOrigin;
+ var viewBox = svg.viewBox.baseVal;
+
+ function getPointFromEvent (event) {
+ point.x = event.clientX;
+ point.y = event.clientY;
+
+ // We get the current transformation matrix of the SVG and we inverse it
+ var invertedSVGMatrix = svg.getScreenCTM().inverse();
+ return point.matrixTransform(invertedSVGMatrix);
+ }
+
+ function startDrag(event) {
+ isPointerDown = true;
+ pointerOrigin = getPointFromEvent(event);
+ }
+
+ function drag(event) {
+ if (!isPointerDown) {
+ return;
+ }
+ event.preventDefault();
+
+ var pointerPosition = getPointFromEvent(event);
+ viewBox.x -= (pointerPosition.x - pointerOrigin.x);
+ viewBox.y -= (pointerPosition.y - pointerOrigin.y);
+ }
+
+ function endDrag(event) {
+ isPointerDown = false;
+ }
+}
+
+function toggleDarkMode() {
+ document.body.classList.toggle('darkmode');
+
+ // Collect all of the "collapsed" elements and apply dark mode on each collapsed column
+ const collapsedEls = document.getElementsByClassName('collapsed');
+ const len = collapsedEls.length;
+
+ for (let i = 0; i < len; i++) {
+ collapsedEls[i].classList.toggle('darkmode');
+ }
+
+ // Collect and spread the appropriate elements from all of the svgs on the page into one array
+ const svgParts = [
+ ...document.querySelectorAll('path'),
+ ...document.querySelectorAll('ellipse'),
+ ...document.querySelectorAll('polygon'),
+ ];
+
+ // Iterate over the svgParts specifically looking for white and black fill/stroke to be toggled.
+ // The verbose conditional is intentional here so that we do not mutate any svg path, ellipse, or polygon that is of any color other than white or black.
+ svgParts.forEach(el => {
+ if (el.attributes.stroke.value === 'white') {
+ el.attributes.stroke.value = 'black';
+ } else if (el.attributes.stroke.value === 'black') {
+ el.attributes.stroke.value = 'white';
+ }
+ if (el.attributes.fill.value === 'white') {
+ el.attributes.fill.value = 'black';
+ } else if (el.attributes.fill.value === 'black') {
+ el.attributes.fill.value = 'white';
+ }
+ });
+}
+
+</script>
+
+</head>`)
+ w.WriteString("<body>")
+ w.WriteString("<h1>")
+ w.WriteString(html.EscapeString(w.Func.Name))
+ w.WriteString("</h1>")
+ w.WriteString(`
+<a href="#" onclick="toggle_visibility('help');return false;" id="helplink">help</a>
+<div id="help">
+
+<p>
+Click on a value or block to toggle highlighting of that value/block
+and its uses. (Values and blocks are highlighted by ID, and IDs of
+dead items may be reused, so not all highlights necessarily correspond
+to the clicked item.)
+</p>
+
+<p>
+Faded out values and blocks are dead code that has not been eliminated.
+</p>
+
+<p>
+Values printed in italics have a dependency cycle.
+</p>
+
+<p>
+<b>CFG</b>: Dashed edge is for unlikely branches. Blue color is for backward edges.
+Edge with a dot means that this edge follows the order in which blocks were laidout.
+</p>
+
+</div>
+<label for="dark-mode-button" style="margin-left: 15px; cursor: pointer;">darkmode</label>
+<input type="checkbox" onclick="toggleDarkMode();" id="dark-mode-button" style="cursor: pointer" />
+`)
+ w.WriteString("<table>")
+ w.WriteString("<tr>")
+}
+
+func (w *HTMLWriter) Close() {
+ if w == nil {
+ return
+ }
+ io.WriteString(w.w, "</tr>")
+ io.WriteString(w.w, "</table>")
+ io.WriteString(w.w, "</body>")
+ io.WriteString(w.w, "</html>")
+ w.w.Close()
+ fmt.Printf("dumped SSA to %v\n", w.path)
+}
+
+// WritePhase writes f in a column headed by title.
+// phase is used for collapsing columns and should be unique across the table.
+func (w *HTMLWriter) WritePhase(phase, title string) {
+ if w == nil {
+ return // avoid generating HTML just to discard it
+ }
+ hash := hashFunc(w.Func)
+ w.pendingPhases = append(w.pendingPhases, phase)
+ w.pendingTitles = append(w.pendingTitles, title)
+ if !bytes.Equal(hash, w.prevHash) {
+ w.flushPhases()
+ }
+ w.prevHash = hash
+}
+
+// flushPhases collects any pending phases and titles, writes them to the html, and resets the pending slices.
+func (w *HTMLWriter) flushPhases() {
+ phaseLen := len(w.pendingPhases)
+ if phaseLen == 0 {
+ return
+ }
+ phases := strings.Join(w.pendingPhases, " + ")
+ w.WriteMultiTitleColumn(
+ phases,
+ w.pendingTitles,
+ fmt.Sprintf("hash-%x", w.prevHash),
+ w.Func.HTML(w.pendingPhases[phaseLen-1], w.dot),
+ )
+ w.pendingPhases = w.pendingPhases[:0]
+ w.pendingTitles = w.pendingTitles[:0]
+}
+
+// FuncLines contains source code for a function to be displayed
+// in sources column.
+type FuncLines struct {
+ Filename string
+ StartLineno uint
+ Lines []string
+}
+
+// ByTopo sorts topologically: target function is on top,
+// followed by inlined functions sorted by filename and line numbers.
+type ByTopo []*FuncLines
+
+func (x ByTopo) Len() int { return len(x) }
+func (x ByTopo) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+func (x ByTopo) Less(i, j int) bool {
+ a := x[i]
+ b := x[j]
+ if a.Filename == b.Filename {
+ return a.StartLineno < b.StartLineno
+ }
+ return a.Filename < b.Filename
+}
+
+// WriteSources writes lines as source code in a column headed by title.
+// phase is used for collapsing columns and should be unique across the table.
+func (w *HTMLWriter) WriteSources(phase string, all []*FuncLines) {
+ if w == nil {
+ return // avoid generating HTML just to discard it
+ }
+ var buf bytes.Buffer
+ fmt.Fprint(&buf, "<div class=\"lines\" style=\"width: 8%\">")
+ filename := ""
+ for _, fl := range all {
+ fmt.Fprint(&buf, "<div>&nbsp;</div>")
+ if filename != fl.Filename {
+ fmt.Fprint(&buf, "<div>&nbsp;</div>")
+ filename = fl.Filename
+ }
+ for i := range fl.Lines {
+ ln := int(fl.StartLineno) + i
+ fmt.Fprintf(&buf, "<div class=\"l%v line-number\">%v</div>", ln, ln)
+ }
+ }
+ fmt.Fprint(&buf, "</div><div style=\"width: 92%\"><pre>")
+ filename = ""
+ for _, fl := range all {
+ fmt.Fprint(&buf, "<div>&nbsp;</div>")
+ if filename != fl.Filename {
+ fmt.Fprintf(&buf, "<div><strong>%v</strong></div>", fl.Filename)
+ filename = fl.Filename
+ }
+ for i, line := range fl.Lines {
+ ln := int(fl.StartLineno) + i
+ var escaped string
+ if strings.TrimSpace(line) == "" {
+ escaped = "&nbsp;"
+ } else {
+ escaped = html.EscapeString(line)
+ }
+ fmt.Fprintf(&buf, "<div class=\"l%v line-number\">%v</div>", ln, escaped)
+ }
+ }
+ fmt.Fprint(&buf, "</pre></div>")
+ w.WriteColumn(phase, phase, "allow-x-scroll", buf.String())
+}
+
+func (w *HTMLWriter) WriteAST(phase string, buf *bytes.Buffer) {
+ if w == nil {
+ return // avoid generating HTML just to discard it
+ }
+ lines := strings.Split(buf.String(), "\n")
+ var out bytes.Buffer
+
+ fmt.Fprint(&out, "<div>")
+ for _, l := range lines {
+ l = strings.TrimSpace(l)
+ var escaped string
+ var lineNo string
+ if l == "" {
+ escaped = "&nbsp;"
+ } else {
+ if strings.HasPrefix(l, "buildssa") {
+ escaped = fmt.Sprintf("<b>%v</b>", l)
+ } else {
+ // Parse the line number from the format file:line:col.
+ // See the implementation in ir/fmt.go:dumpNodeHeader.
+ sl := strings.Split(l, ":")
+ if len(sl) >= 3 {
+ if _, err := strconv.Atoi(sl[len(sl)-2]); err == nil {
+ lineNo = sl[len(sl)-2]
+ }
+ }
+ escaped = html.EscapeString(l)
+ }
+ }
+ if lineNo != "" {
+ fmt.Fprintf(&out, "<div class=\"l%v line-number ast\">%v</div>", lineNo, escaped)
+ } else {
+ fmt.Fprintf(&out, "<div class=\"ast\">%v</div>", escaped)
+ }
+ }
+ fmt.Fprint(&out, "</div>")
+ w.WriteColumn(phase, phase, "allow-x-scroll", out.String())
+}
+
+// WriteColumn writes raw HTML in a column headed by title.
+// It is intended for pre- and post-compilation log output.
+func (w *HTMLWriter) WriteColumn(phase, title, class, html string) {
+ w.WriteMultiTitleColumn(phase, []string{title}, class, html)
+}
+
+func (w *HTMLWriter) WriteMultiTitleColumn(phase string, titles []string, class, html string) {
+ if w == nil {
+ return
+ }
+ id := strings.Replace(phase, " ", "-", -1)
+ // collapsed column
+ w.Printf("<td id=\"%v-col\" class=\"collapsed\"><div>%v</div></td>", id, phase)
+
+ if class == "" {
+ w.Printf("<td id=\"%v-exp\">", id)
+ } else {
+ w.Printf("<td id=\"%v-exp\" class=\"%v\">", id, class)
+ }
+ for _, title := range titles {
+ w.WriteString("<h2>" + title + "</h2>")
+ }
+ w.WriteString(html)
+ w.WriteString("</td>\n")
+}
+
+func (w *HTMLWriter) Printf(msg string, v ...interface{}) {
+ if _, err := fmt.Fprintf(w.w, msg, v...); err != nil {
+ w.Fatalf("%v", err)
+ }
+}
+
+func (w *HTMLWriter) WriteString(s string) {
+ if _, err := io.WriteString(w.w, s); err != nil {
+ w.Fatalf("%v", err)
+ }
+}
+
+func (v *Value) HTML() string {
+ // TODO: Using the value ID as the class ignores the fact
+ // that value IDs get recycled and that some values
+ // are transmuted into other values.
+ s := v.String()
+ return fmt.Sprintf("<span class=\"%s ssa-value\">%s</span>", s, s)
+}
+
+func (v *Value) LongHTML() string {
+ // TODO: Any intra-value formatting?
+ // I'm wary of adding too much visual noise,
+ // but a little bit might be valuable.
+ // We already have visual noise in the form of punctuation
+ // maybe we could replace some of that with formatting.
+ s := fmt.Sprintf("<span class=\"%s ssa-long-value\">", v.String())
+
+ linenumber := "<span class=\"no-line-number\">(?)</span>"
+ if v.Pos.IsKnown() {
+ linenumber = fmt.Sprintf("<span class=\"l%v line-number\">(%s)</span>", v.Pos.LineNumber(), v.Pos.LineNumberHTML())
+ }
+
+ s += fmt.Sprintf("%s %s = %s", v.HTML(), linenumber, v.Op.String())
+
+ s += " &lt;" + html.EscapeString(v.Type.String()) + "&gt;"
+ s += html.EscapeString(v.auxString())
+ for _, a := range v.Args {
+ s += fmt.Sprintf(" %s", a.HTML())
+ }
+ r := v.Block.Func.RegAlloc
+ if int(v.ID) < len(r) && r[v.ID] != nil {
+ s += " : " + html.EscapeString(r[v.ID].String())
+ }
+ var names []string
+ for name, values := range v.Block.Func.NamedValues {
+ for _, value := range values {
+ if value == v {
+ names = append(names, name.String())
+ break // drop duplicates.
+ }
+ }
+ }
+ if len(names) != 0 {
+ s += " (" + strings.Join(names, ", ") + ")"
+ }
+
+ s += "</span>"
+ return s
+}
+
+func (b *Block) HTML() string {
+ // TODO: Using the value ID as the class ignores the fact
+ // that value IDs get recycled and that some values
+ // are transmuted into other values.
+ s := html.EscapeString(b.String())
+ return fmt.Sprintf("<span class=\"%s ssa-block\">%s</span>", s, s)
+}
+
+func (b *Block) LongHTML() string {
+ // TODO: improve this for HTML?
+ s := fmt.Sprintf("<span class=\"%s ssa-block\">%s</span>", html.EscapeString(b.String()), html.EscapeString(b.Kind.String()))
+ if b.Aux != nil {
+ s += html.EscapeString(fmt.Sprintf(" {%v}", b.Aux))
+ }
+ if t := b.AuxIntString(); t != "" {
+ s += html.EscapeString(fmt.Sprintf(" [%v]", t))
+ }
+ for _, c := range b.ControlValues() {
+ s += fmt.Sprintf(" %s", c.HTML())
+ }
+ if len(b.Succs) > 0 {
+ s += " &#8594;" // right arrow
+ for _, e := range b.Succs {
+ c := e.b
+ s += " " + c.HTML()
+ }
+ }
+ switch b.Likely {
+ case BranchUnlikely:
+ s += " (unlikely)"
+ case BranchLikely:
+ s += " (likely)"
+ }
+ if b.Pos.IsKnown() {
+ // TODO does not begin to deal with the full complexity of line numbers.
+ // Maybe we want a string/slice instead, of outer-inner when inlining.
+ s += fmt.Sprintf(" <span class=\"l%v line-number\">(%s)</span>", b.Pos.LineNumber(), b.Pos.LineNumberHTML())
+ }
+ return s
+}
+
+func (f *Func) HTML(phase string, dot *dotWriter) string {
+ buf := new(bytes.Buffer)
+ if dot != nil {
+ dot.writeFuncSVG(buf, phase, f)
+ }
+ fmt.Fprint(buf, "<code>")
+ p := htmlFuncPrinter{w: buf}
+ fprintFunc(p, f)
+
+ // fprintFunc(&buf, f) // TODO: HTML, not text, <br> for line breaks, etc.
+ fmt.Fprint(buf, "</code>")
+ return buf.String()
+}
+
+func (d *dotWriter) writeFuncSVG(w io.Writer, phase string, f *Func) {
+ if d.broken {
+ return
+ }
+ if _, ok := d.phases[phase]; !ok {
+ return
+ }
+ cmd := exec.Command(d.path, "-Tsvg")
+ pipe, err := cmd.StdinPipe()
+ if err != nil {
+ d.broken = true
+ fmt.Println(err)
+ return
+ }
+ buf := new(bytes.Buffer)
+ cmd.Stdout = buf
+ bufErr := new(bytes.Buffer)
+ cmd.Stderr = bufErr
+ err = cmd.Start()
+ if err != nil {
+ d.broken = true
+ fmt.Println(err)
+ return
+ }
+ fmt.Fprint(pipe, `digraph "" { margin=0; ranksep=.2; `)
+ id := strings.Replace(phase, " ", "-", -1)
+ fmt.Fprintf(pipe, `id="g_graph_%s";`, id)
+ fmt.Fprintf(pipe, `node [style=filled,fillcolor=white,fontsize=16,fontname="Menlo,Times,serif",margin="0.01,0.03"];`)
+ fmt.Fprintf(pipe, `edge [fontsize=16,fontname="Menlo,Times,serif"];`)
+ for i, b := range f.Blocks {
+ if b.Kind == BlockInvalid {
+ continue
+ }
+ layout := ""
+ if f.laidout {
+ layout = fmt.Sprintf(" #%d", i)
+ }
+ fmt.Fprintf(pipe, `%v [label="%v%s\n%v",id="graph_node_%v_%v",tooltip="%v"];`, b, b, layout, b.Kind.String(), id, b, b.LongString())
+ }
+ indexOf := make([]int, f.NumBlocks())
+ for i, b := range f.Blocks {
+ indexOf[b.ID] = i
+ }
+ layoutDrawn := make([]bool, f.NumBlocks())
+
+ ponums := make([]int32, f.NumBlocks())
+ _ = postorderWithNumbering(f, ponums)
+ isBackEdge := func(from, to ID) bool {
+ return ponums[from] <= ponums[to]
+ }
+
+ for _, b := range f.Blocks {
+ for i, s := range b.Succs {
+ style := "solid"
+ color := "black"
+ arrow := "vee"
+ if b.unlikelyIndex() == i {
+ style = "dashed"
+ }
+ if f.laidout && indexOf[s.b.ID] == indexOf[b.ID]+1 {
+ // Red color means ordered edge. It overrides other colors.
+ arrow = "dotvee"
+ layoutDrawn[s.b.ID] = true
+ } else if isBackEdge(b.ID, s.b.ID) {
+ color = "#2893ff"
+ }
+ fmt.Fprintf(pipe, `%v -> %v [label=" %d ",style="%s",color="%s",arrowhead="%s"];`, b, s.b, i, style, color, arrow)
+ }
+ }
+ if f.laidout {
+ fmt.Fprintln(pipe, `edge[constraint=false,color=gray,style=solid,arrowhead=dot];`)
+ colors := [...]string{"#eea24f", "#f38385", "#f4d164", "#ca89fc", "gray"}
+ ci := 0
+ for i := 1; i < len(f.Blocks); i++ {
+ if layoutDrawn[f.Blocks[i].ID] {
+ continue
+ }
+ fmt.Fprintf(pipe, `%s -> %s [color="%s"];`, f.Blocks[i-1], f.Blocks[i], colors[ci])
+ ci = (ci + 1) % len(colors)
+ }
+ }
+ fmt.Fprint(pipe, "}")
+ pipe.Close()
+ err = cmd.Wait()
+ if err != nil {
+ d.broken = true
+ fmt.Printf("dot: %v\n%v\n", err, bufErr.String())
+ return
+ }
+
+ svgID := "svg_graph_" + id
+ fmt.Fprintf(w, `<div class="zoom"><button onclick="return graphReduce('%s');">-</button> <button onclick="return graphEnlarge('%s');">+</button></div>`, svgID, svgID)
+ // For now, an awful hack: edit the html as it passes through
+ // our fingers, finding '<svg ' and injecting needed attributes after it.
+ err = d.copyUntil(w, buf, `<svg `)
+ if err != nil {
+ fmt.Printf("injecting attributes: %v\n", err)
+ return
+ }
+ fmt.Fprintf(w, ` id="%s" onload="makeDraggable(evt)" `, svgID)
+ io.Copy(w, buf)
+}
+
+func (b *Block) unlikelyIndex() int {
+ switch b.Likely {
+ case BranchLikely:
+ return 1
+ case BranchUnlikely:
+ return 0
+ }
+ return -1
+}
+
+func (d *dotWriter) copyUntil(w io.Writer, buf *bytes.Buffer, sep string) error {
+ i := bytes.Index(buf.Bytes(), []byte(sep))
+ if i == -1 {
+ return fmt.Errorf("couldn't find dot sep %q", sep)
+ }
+ _, err := io.CopyN(w, buf, int64(i+len(sep)))
+ return err
+}
+
+type htmlFuncPrinter struct {
+ w io.Writer
+}
+
+func (p htmlFuncPrinter) header(f *Func) {}
+
+func (p htmlFuncPrinter) startBlock(b *Block, reachable bool) {
+ var dead string
+ if !reachable {
+ dead = "dead-block"
+ }
+ fmt.Fprintf(p.w, "<ul class=\"%s ssa-print-func %s\">", b, dead)
+ fmt.Fprintf(p.w, "<li class=\"ssa-start-block\">%s:", b.HTML())
+ if len(b.Preds) > 0 {
+ io.WriteString(p.w, " &#8592;") // left arrow
+ for _, e := range b.Preds {
+ pred := e.b
+ fmt.Fprintf(p.w, " %s", pred.HTML())
+ }
+ }
+ if len(b.Values) > 0 {
+ io.WriteString(p.w, `<button onclick="hideBlock(this)">-</button>`)
+ }
+ io.WriteString(p.w, "</li>")
+ if len(b.Values) > 0 { // start list of values
+ io.WriteString(p.w, "<li class=\"ssa-value-list\">")
+ io.WriteString(p.w, "<ul>")
+ }
+}
+
+func (p htmlFuncPrinter) endBlock(b *Block, reachable bool) {
+ if len(b.Values) > 0 { // end list of values
+ io.WriteString(p.w, "</ul>")
+ io.WriteString(p.w, "</li>")
+ }
+ io.WriteString(p.w, "<li class=\"ssa-end-block\">")
+ fmt.Fprint(p.w, b.LongHTML())
+ io.WriteString(p.w, "</li>")
+ io.WriteString(p.w, "</ul>")
+}
+
+func (p htmlFuncPrinter) value(v *Value, live bool) {
+ var dead string
+ if !live {
+ dead = "dead-value"
+ }
+ fmt.Fprintf(p.w, "<li class=\"ssa-long-value %s\">", dead)
+ fmt.Fprint(p.w, v.LongHTML())
+ io.WriteString(p.w, "</li>")
+}
+
+func (p htmlFuncPrinter) startDepCycle() {
+ fmt.Fprintln(p.w, "<span class=\"depcycle\">")
+}
+
+func (p htmlFuncPrinter) endDepCycle() {
+ fmt.Fprintln(p.w, "</span>")
+}
+
+func (p htmlFuncPrinter) named(n LocalSlot, vals []*Value) {
+ fmt.Fprintf(p.w, "<li>name %s: ", n)
+ for _, val := range vals {
+ fmt.Fprintf(p.w, "%s ", val.HTML())
+ }
+ fmt.Fprintf(p.w, "</li>")
+}
+
+type dotWriter struct {
+ path string
+ broken bool
+ phases map[string]bool // keys specify phases with CFGs
+}
+
+// newDotWriter returns non-nil value when mask is valid.
+// dotWriter will generate SVGs only for the phases specified in the mask.
+// mask can contain following patterns and combinations of them:
+// * - all of them;
+// x-y - x through y, inclusive;
+// x,y - x and y, but not the passes between.
+func newDotWriter(mask string) *dotWriter {
+ if mask == "" {
+ return nil
+ }
+ // User can specify phase name with _ instead of spaces.
+ mask = strings.Replace(mask, "_", " ", -1)
+ ph := make(map[string]bool)
+ ranges := strings.Split(mask, ",")
+ for _, r := range ranges {
+ spl := strings.Split(r, "-")
+ if len(spl) > 2 {
+ fmt.Printf("range is not valid: %v\n", mask)
+ return nil
+ }
+ var first, last int
+ if mask == "*" {
+ first = 0
+ last = len(passes) - 1
+ } else {
+ first = passIdxByName(spl[0])
+ last = passIdxByName(spl[len(spl)-1])
+ }
+ if first < 0 || last < 0 || first > last {
+ fmt.Printf("range is not valid: %v\n", r)
+ return nil
+ }
+ for p := first; p <= last; p++ {
+ ph[passes[p].name] = true
+ }
+ }
+
+ path, err := exec.LookPath("dot")
+ if err != nil {
+ fmt.Println(err)
+ return nil
+ }
+ return &dotWriter{path: path, phases: ph}
+}
+
+func passIdxByName(name string) int {
+ for i, p := range passes {
+ if p.name == name {
+ return i
+ }
+ }
+ return -1
+}
diff --git a/src/cmd/compile/internal/ssa/id.go b/src/cmd/compile/internal/ssa/id.go
new file mode 100644
index 0000000..725279e
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/id.go
@@ -0,0 +1,28 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+type ID int32
+
+// idAlloc provides an allocator for unique integers.
+type idAlloc struct {
+ last ID
+}
+
+// get allocates an ID and returns it. IDs are always > 0.
+func (a *idAlloc) get() ID {
+ x := a.last
+ x++
+ if x == 1<<31-1 {
+ panic("too many ids for this function")
+ }
+ a.last = x
+ return x
+}
+
+// num returns the maximum ID ever returned + 1.
+func (a *idAlloc) num() int {
+ return int(a.last + 1)
+}
diff --git a/src/cmd/compile/internal/ssa/layout.go b/src/cmd/compile/internal/ssa/layout.go
new file mode 100644
index 0000000..6abdb0d
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/layout.go
@@ -0,0 +1,182 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// layout orders basic blocks in f with the goal of minimizing control flow instructions.
+// After this phase returns, the order of f.Blocks matters and is the order
+// in which those blocks will appear in the assembly output.
+func layout(f *Func) {
+ f.Blocks = layoutOrder(f)
+}
+
+// Register allocation may use a different order which has constraints
+// imposed by the linear-scan algorithm.
+func layoutRegallocOrder(f *Func) []*Block {
+ // remnant of an experiment; perhaps there will be another.
+ return layoutOrder(f)
+}
+
+func layoutOrder(f *Func) []*Block {
+ order := make([]*Block, 0, f.NumBlocks())
+ scheduled := make([]bool, f.NumBlocks())
+ idToBlock := make([]*Block, f.NumBlocks())
+ indegree := make([]int, f.NumBlocks())
+ posdegree := f.newSparseSet(f.NumBlocks()) // blocks with positive remaining degree
+ defer f.retSparseSet(posdegree)
+ // blocks with zero remaining degree. Use slice to simulate a LIFO queue to implement
+ // the depth-first topology sorting algorithm.
+ var zerodegree []ID
+ // LIFO queue. Track the successor blocks of the scheduled block so that when we
+ // encounter loops, we choose to schedule the successor block of the most recently
+ // scheduled block.
+ var succs []ID
+ exit := f.newSparseSet(f.NumBlocks()) // exit blocks
+ defer f.retSparseSet(exit)
+
+ // Populate idToBlock and find exit blocks.
+ for _, b := range f.Blocks {
+ idToBlock[b.ID] = b
+ if b.Kind == BlockExit {
+ exit.add(b.ID)
+ }
+ }
+
+ // Expand exit to include blocks post-dominated by exit blocks.
+ for {
+ changed := false
+ for _, id := range exit.contents() {
+ b := idToBlock[id]
+ NextPred:
+ for _, pe := range b.Preds {
+ p := pe.b
+ if exit.contains(p.ID) {
+ continue
+ }
+ for _, s := range p.Succs {
+ if !exit.contains(s.b.ID) {
+ continue NextPred
+ }
+ }
+ // All Succs are in exit; add p.
+ exit.add(p.ID)
+ changed = true
+ }
+ }
+ if !changed {
+ break
+ }
+ }
+
+ // Initialize indegree of each block
+ for _, b := range f.Blocks {
+ if exit.contains(b.ID) {
+ // exit blocks are always scheduled last
+ continue
+ }
+ indegree[b.ID] = len(b.Preds)
+ if len(b.Preds) == 0 {
+ // Push an element to the tail of the queue.
+ zerodegree = append(zerodegree, b.ID)
+ } else {
+ posdegree.add(b.ID)
+ }
+ }
+
+ bid := f.Entry.ID
+blockloop:
+ for {
+ // add block to schedule
+ b := idToBlock[bid]
+ order = append(order, b)
+ scheduled[bid] = true
+ if len(order) == len(f.Blocks) {
+ break
+ }
+
+ // Here, the order of traversing the b.Succs affects the direction in which the topological
+ // sort advances in depth. Take the following cfg as an example, regardless of other factors.
+ // b1
+ // 0/ \1
+ // b2 b3
+ // Traverse b.Succs in order, the right child node b3 will be scheduled immediately after
+ // b1, traverse b.Succs in reverse order, the left child node b2 will be scheduled
+ // immediately after b1. The test results show that reverse traversal performs a little
+ // better.
+ // Note: You need to consider both layout and register allocation when testing performance.
+ for i := len(b.Succs) - 1; i >= 0; i-- {
+ c := b.Succs[i].b
+ indegree[c.ID]--
+ if indegree[c.ID] == 0 {
+ posdegree.remove(c.ID)
+ zerodegree = append(zerodegree, c.ID)
+ } else {
+ succs = append(succs, c.ID)
+ }
+ }
+
+ // Pick the next block to schedule
+ // Pick among the successor blocks that have not been scheduled yet.
+
+ // Use likely direction if we have it.
+ var likely *Block
+ switch b.Likely {
+ case BranchLikely:
+ likely = b.Succs[0].b
+ case BranchUnlikely:
+ likely = b.Succs[1].b
+ }
+ if likely != nil && !scheduled[likely.ID] {
+ bid = likely.ID
+ continue
+ }
+
+ // Use degree for now.
+ bid = 0
+ // TODO: improve this part
+ // No successor of the previously scheduled block works.
+ // Pick a zero-degree block if we can.
+ for len(zerodegree) > 0 {
+ // Pop an element from the tail of the queue.
+ cid := zerodegree[len(zerodegree)-1]
+ zerodegree = zerodegree[:len(zerodegree)-1]
+ if !scheduled[cid] {
+ bid = cid
+ continue blockloop
+ }
+ }
+
+ // Still nothing, pick the unscheduled successor block encountered most recently.
+ for len(succs) > 0 {
+ // Pop an element from the tail of the queue.
+ cid := succs[len(succs)-1]
+ succs = succs[:len(succs)-1]
+ if !scheduled[cid] {
+ bid = cid
+ continue blockloop
+ }
+ }
+
+ // Still nothing, pick any non-exit block.
+ for posdegree.size() > 0 {
+ cid := posdegree.pop()
+ if !scheduled[cid] {
+ bid = cid
+ continue blockloop
+ }
+ }
+ // Pick any exit block.
+ // TODO: Order these to minimize jump distances?
+ for {
+ cid := exit.pop()
+ if !scheduled[cid] {
+ bid = cid
+ continue blockloop
+ }
+ }
+ }
+ f.laidout = true
+ return order
+ //f.Blocks = order
+}
diff --git a/src/cmd/compile/internal/ssa/lca.go b/src/cmd/compile/internal/ssa/lca.go
new file mode 100644
index 0000000..90daebe
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/lca.go
@@ -0,0 +1,127 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "math/bits"
+)
+
+// Code to compute lowest common ancestors in the dominator tree.
+// https://en.wikipedia.org/wiki/Lowest_common_ancestor
+// https://en.wikipedia.org/wiki/Range_minimum_query#Solution_using_constant_time_and_linearithmic_space
+
+// lcaRange is a data structure that can compute lowest common ancestor queries
+// in O(n lg n) precomputed space and O(1) time per query.
+type lcaRange struct {
+ // Additional information about each block (indexed by block ID).
+ blocks []lcaRangeBlock
+
+ // Data structure for range minimum queries.
+ // rangeMin[k][i] contains the ID of the minimum depth block
+ // in the Euler tour from positions i to i+1<<k-1, inclusive.
+ rangeMin [][]ID
+}
+
+type lcaRangeBlock struct {
+ b *Block
+ parent ID // parent in dominator tree. 0 = no parent (entry or unreachable)
+ firstChild ID // first child in dominator tree
+ sibling ID // next child of parent
+ pos int32 // an index in the Euler tour where this block appears (any one of its occurrences)
+ depth int32 // depth in dominator tree (root=0, its children=1, etc.)
+}
+
+func makeLCArange(f *Func) *lcaRange {
+ dom := f.Idom()
+
+ // Build tree
+ blocks := make([]lcaRangeBlock, f.NumBlocks())
+ for _, b := range f.Blocks {
+ blocks[b.ID].b = b
+ if dom[b.ID] == nil {
+ continue // entry or unreachable
+ }
+ parent := dom[b.ID].ID
+ blocks[b.ID].parent = parent
+ blocks[b.ID].sibling = blocks[parent].firstChild
+ blocks[parent].firstChild = b.ID
+ }
+
+ // Compute euler tour ordering.
+ // Each reachable block will appear #children+1 times in the tour.
+ tour := make([]ID, 0, f.NumBlocks()*2-1)
+ type queueEntry struct {
+ bid ID // block to work on
+ cid ID // child we're already working on (0 = haven't started yet)
+ }
+ q := []queueEntry{{f.Entry.ID, 0}}
+ for len(q) > 0 {
+ n := len(q) - 1
+ bid := q[n].bid
+ cid := q[n].cid
+ q = q[:n]
+
+ // Add block to tour.
+ blocks[bid].pos = int32(len(tour))
+ tour = append(tour, bid)
+
+ // Proceed down next child edge (if any).
+ if cid == 0 {
+ // This is our first visit to b. Set its depth.
+ blocks[bid].depth = blocks[blocks[bid].parent].depth + 1
+ // Then explore its first child.
+ cid = blocks[bid].firstChild
+ } else {
+ // We've seen b before. Explore the next child.
+ cid = blocks[cid].sibling
+ }
+ if cid != 0 {
+ q = append(q, queueEntry{bid, cid}, queueEntry{cid, 0})
+ }
+ }
+
+ // Compute fast range-minimum query data structure
+ rangeMin := make([][]ID, 0, bits.Len64(uint64(len(tour))))
+ rangeMin = append(rangeMin, tour) // 1-size windows are just the tour itself.
+ for logS, s := 1, 2; s < len(tour); logS, s = logS+1, s*2 {
+ r := make([]ID, len(tour)-s+1)
+ for i := 0; i < len(tour)-s+1; i++ {
+ bid := rangeMin[logS-1][i]
+ bid2 := rangeMin[logS-1][i+s/2]
+ if blocks[bid2].depth < blocks[bid].depth {
+ bid = bid2
+ }
+ r[i] = bid
+ }
+ rangeMin = append(rangeMin, r)
+ }
+
+ return &lcaRange{blocks: blocks, rangeMin: rangeMin}
+}
+
+// find returns the lowest common ancestor of a and b.
+func (lca *lcaRange) find(a, b *Block) *Block {
+ if a == b {
+ return a
+ }
+ // Find the positions of a and bin the Euler tour.
+ p1 := lca.blocks[a.ID].pos
+ p2 := lca.blocks[b.ID].pos
+ if p1 > p2 {
+ p1, p2 = p2, p1
+ }
+
+ // The lowest common ancestor is the minimum depth block
+ // on the tour from p1 to p2. We've precomputed minimum
+ // depth blocks for powers-of-two subsequences of the tour.
+ // Combine the right two precomputed values to get the answer.
+ logS := uint(log64(int64(p2 - p1)))
+ bid1 := lca.rangeMin[logS][p1]
+ bid2 := lca.rangeMin[logS][p2-1<<logS+1]
+ if lca.blocks[bid1].depth < lca.blocks[bid2].depth {
+ return lca.blocks[bid1].b
+ }
+ return lca.blocks[bid2].b
+}
diff --git a/src/cmd/compile/internal/ssa/lca_test.go b/src/cmd/compile/internal/ssa/lca_test.go
new file mode 100644
index 0000000..8c8920c
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/lca_test.go
@@ -0,0 +1,88 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import "testing"
+
+func testLCAgen(t *testing.T, bg blockGen, size int) {
+ c := testConfig(t)
+ fun := c.Fun("entry", bg(size)...)
+ CheckFunc(fun.f)
+ if size == 4 {
+ t.Logf(fun.f.String())
+ }
+ lca1 := makeLCArange(fun.f)
+ lca2 := makeLCAeasy(fun.f)
+ for _, b := range fun.f.Blocks {
+ for _, c := range fun.f.Blocks {
+ l1 := lca1.find(b, c)
+ l2 := lca2.find(b, c)
+ if l1 != l2 {
+ t.Errorf("lca(%s,%s)=%s, want %s", b, c, l1, l2)
+ }
+ }
+ }
+}
+
+func TestLCALinear(t *testing.T) {
+ testLCAgen(t, genLinear, 10)
+ testLCAgen(t, genLinear, 100)
+}
+
+func TestLCAFwdBack(t *testing.T) {
+ testLCAgen(t, genFwdBack, 10)
+ testLCAgen(t, genFwdBack, 100)
+}
+
+func TestLCAManyPred(t *testing.T) {
+ testLCAgen(t, genManyPred, 10)
+ testLCAgen(t, genManyPred, 100)
+}
+
+func TestLCAMaxPred(t *testing.T) {
+ testLCAgen(t, genMaxPred, 10)
+ testLCAgen(t, genMaxPred, 100)
+}
+
+func TestLCAMaxPredValue(t *testing.T) {
+ testLCAgen(t, genMaxPredValue, 10)
+ testLCAgen(t, genMaxPredValue, 100)
+}
+
+// Simple implementation of LCA to compare against.
+type lcaEasy struct {
+ parent []*Block
+}
+
+func makeLCAeasy(f *Func) *lcaEasy {
+ return &lcaEasy{parent: dominators(f)}
+}
+
+func (lca *lcaEasy) find(a, b *Block) *Block {
+ da := lca.depth(a)
+ db := lca.depth(b)
+ for da > db {
+ da--
+ a = lca.parent[a.ID]
+ }
+ for da < db {
+ db--
+ b = lca.parent[b.ID]
+ }
+ for a != b {
+ a = lca.parent[a.ID]
+ b = lca.parent[b.ID]
+ }
+ return a
+}
+
+func (lca *lcaEasy) depth(b *Block) int {
+ n := 0
+ for b != nil {
+ b = lca.parent[b.ID]
+ n++
+ }
+ return n
+}
diff --git a/src/cmd/compile/internal/ssa/likelyadjust.go b/src/cmd/compile/internal/ssa/likelyadjust.go
new file mode 100644
index 0000000..f462bf2
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/likelyadjust.go
@@ -0,0 +1,576 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "fmt"
+)
+
+type loop struct {
+ header *Block // The header node of this (reducible) loop
+ outer *loop // loop containing this loop
+
+ // By default, children, exits, and depth are not initialized.
+ children []*loop // loops nested directly within this loop. Initialized by assembleChildren().
+ exits []*Block // exits records blocks reached by exits from this loop. Initialized by findExits().
+
+ // Next three fields used by regalloc and/or
+ // aid in computation of inner-ness and list of blocks.
+ nBlocks int32 // Number of blocks in this loop but not within inner loops
+ depth int16 // Nesting depth of the loop; 1 is outermost. Initialized by calculateDepths().
+ isInner bool // True if never discovered to contain a loop
+
+ // register allocation uses this.
+ containsUnavoidableCall bool // True if all paths through the loop have a call
+}
+
+// outerinner records that outer contains inner
+func (sdom SparseTree) outerinner(outer, inner *loop) {
+ // There could be other outer loops found in some random order,
+ // locate the new outer loop appropriately among them.
+
+ // Outer loop headers dominate inner loop headers.
+ // Use this to put the "new" "outer" loop in the right place.
+ oldouter := inner.outer
+ for oldouter != nil && sdom.isAncestor(outer.header, oldouter.header) {
+ inner = oldouter
+ oldouter = inner.outer
+ }
+ if outer == oldouter {
+ return
+ }
+ if oldouter != nil {
+ sdom.outerinner(oldouter, outer)
+ }
+
+ inner.outer = outer
+ outer.isInner = false
+}
+
+func checkContainsCall(bb *Block) bool {
+ if bb.Kind == BlockDefer {
+ return true
+ }
+ for _, v := range bb.Values {
+ if opcodeTable[v.Op].call {
+ return true
+ }
+ }
+ return false
+}
+
+type loopnest struct {
+ f *Func
+ b2l []*loop
+ po []*Block
+ sdom SparseTree
+ loops []*loop
+ hasIrreducible bool // TODO current treatment of irreducible loops is very flaky, if accurate loops are needed, must punt at function level.
+
+ // Record which of the lazily initialized fields have actually been initialized.
+ initializedChildren, initializedDepth, initializedExits bool
+}
+
+func min8(a, b int8) int8 {
+ if a < b {
+ return a
+ }
+ return b
+}
+
+func max8(a, b int8) int8 {
+ if a > b {
+ return a
+ }
+ return b
+}
+
+const (
+ blDEFAULT = 0
+ blMin = blDEFAULT
+ blCALL = 1
+ blRET = 2
+ blEXIT = 3
+)
+
+var bllikelies = [4]string{"default", "call", "ret", "exit"}
+
+func describePredictionAgrees(b *Block, prediction BranchPrediction) string {
+ s := ""
+ if prediction == b.Likely {
+ s = " (agrees with previous)"
+ } else if b.Likely != BranchUnknown {
+ s = " (disagrees with previous, ignored)"
+ }
+ return s
+}
+
+func describeBranchPrediction(f *Func, b *Block, likely, not int8, prediction BranchPrediction) {
+ f.Warnl(b.Pos, "Branch prediction rule %s < %s%s",
+ bllikelies[likely-blMin], bllikelies[not-blMin], describePredictionAgrees(b, prediction))
+}
+
+func likelyadjust(f *Func) {
+ // The values assigned to certain and local only matter
+ // in their rank order. 0 is default, more positive
+ // is less likely. It's possible to assign a negative
+ // unlikeliness (though not currently the case).
+ certain := make([]int8, f.NumBlocks()) // In the long run, all outcomes are at least this bad. Mainly for Exit
+ local := make([]int8, f.NumBlocks()) // for our immediate predecessors.
+
+ po := f.postorder()
+ nest := f.loopnest()
+ b2l := nest.b2l
+
+ for _, b := range po {
+ switch b.Kind {
+ case BlockExit:
+ // Very unlikely.
+ local[b.ID] = blEXIT
+ certain[b.ID] = blEXIT
+
+ // Ret, it depends.
+ case BlockRet, BlockRetJmp:
+ local[b.ID] = blRET
+ certain[b.ID] = blRET
+
+ // Calls. TODO not all calls are equal, names give useful clues.
+ // Any name-based heuristics are only relative to other calls,
+ // and less influential than inferences from loop structure.
+ case BlockDefer:
+ local[b.ID] = blCALL
+ certain[b.ID] = max8(blCALL, certain[b.Succs[0].b.ID])
+
+ default:
+ if len(b.Succs) == 1 {
+ certain[b.ID] = certain[b.Succs[0].b.ID]
+ } else if len(b.Succs) == 2 {
+ // If successor is an unvisited backedge, it's in loop and we don't care.
+ // Its default unlikely is also zero which is consistent with favoring loop edges.
+ // Notice that this can act like a "reset" on unlikeliness at loops; the
+ // default "everything returns" unlikeliness is erased by min with the
+ // backedge likeliness; however a loop with calls on every path will be
+ // tagged with call cost. Net effect is that loop entry is favored.
+ b0 := b.Succs[0].b.ID
+ b1 := b.Succs[1].b.ID
+ certain[b.ID] = min8(certain[b0], certain[b1])
+
+ l := b2l[b.ID]
+ l0 := b2l[b0]
+ l1 := b2l[b1]
+
+ prediction := b.Likely
+ // Weak loop heuristic -- both source and at least one dest are in loops,
+ // and there is a difference in the destinations.
+ // TODO what is best arrangement for nested loops?
+ if l != nil && l0 != l1 {
+ noprediction := false
+ switch {
+ // prefer not to exit loops
+ case l1 == nil:
+ prediction = BranchLikely
+ case l0 == nil:
+ prediction = BranchUnlikely
+
+ // prefer to stay in loop, not exit to outer.
+ case l == l0:
+ prediction = BranchLikely
+ case l == l1:
+ prediction = BranchUnlikely
+ default:
+ noprediction = true
+ }
+ if f.pass.debug > 0 && !noprediction {
+ f.Warnl(b.Pos, "Branch prediction rule stay in loop%s",
+ describePredictionAgrees(b, prediction))
+ }
+
+ } else {
+ // Lacking loop structure, fall back on heuristics.
+ if certain[b1] > certain[b0] {
+ prediction = BranchLikely
+ if f.pass.debug > 0 {
+ describeBranchPrediction(f, b, certain[b0], certain[b1], prediction)
+ }
+ } else if certain[b0] > certain[b1] {
+ prediction = BranchUnlikely
+ if f.pass.debug > 0 {
+ describeBranchPrediction(f, b, certain[b1], certain[b0], prediction)
+ }
+ } else if local[b1] > local[b0] {
+ prediction = BranchLikely
+ if f.pass.debug > 0 {
+ describeBranchPrediction(f, b, local[b0], local[b1], prediction)
+ }
+ } else if local[b0] > local[b1] {
+ prediction = BranchUnlikely
+ if f.pass.debug > 0 {
+ describeBranchPrediction(f, b, local[b1], local[b0], prediction)
+ }
+ }
+ }
+ if b.Likely != prediction {
+ if b.Likely == BranchUnknown {
+ b.Likely = prediction
+ }
+ }
+ }
+ // Look for calls in the block. If there is one, make this block unlikely.
+ for _, v := range b.Values {
+ if opcodeTable[v.Op].call {
+ local[b.ID] = blCALL
+ certain[b.ID] = max8(blCALL, certain[b.Succs[0].b.ID])
+ break
+ }
+ }
+ }
+ if f.pass.debug > 2 {
+ f.Warnl(b.Pos, "BP: Block %s, local=%s, certain=%s", b, bllikelies[local[b.ID]-blMin], bllikelies[certain[b.ID]-blMin])
+ }
+
+ }
+}
+
+func (l *loop) String() string {
+ return fmt.Sprintf("hdr:%s", l.header)
+}
+
+func (l *loop) LongString() string {
+ i := ""
+ o := ""
+ if l.isInner {
+ i = ", INNER"
+ }
+ if l.outer != nil {
+ o = ", o=" + l.outer.header.String()
+ }
+ return fmt.Sprintf("hdr:%s%s%s", l.header, i, o)
+}
+
+func (l *loop) isWithinOrEq(ll *loop) bool {
+ if ll == nil { // nil means whole program
+ return true
+ }
+ for ; l != nil; l = l.outer {
+ if l == ll {
+ return true
+ }
+ }
+ return false
+}
+
+// nearestOuterLoop returns the outer loop of loop most nearly
+// containing block b; the header must dominate b. loop itself
+// is assumed to not be that loop. For acceptable performance,
+// we're relying on loop nests to not be terribly deep.
+func (l *loop) nearestOuterLoop(sdom SparseTree, b *Block) *loop {
+ var o *loop
+ for o = l.outer; o != nil && !sdom.IsAncestorEq(o.header, b); o = o.outer {
+ }
+ return o
+}
+
+func loopnestfor(f *Func) *loopnest {
+ po := f.postorder()
+ sdom := f.Sdom()
+ b2l := make([]*loop, f.NumBlocks())
+ loops := make([]*loop, 0)
+ visited := make([]bool, f.NumBlocks())
+ sawIrred := false
+
+ if f.pass.debug > 2 {
+ fmt.Printf("loop finding in %s\n", f.Name)
+ }
+
+ // Reducible-loop-nest-finding.
+ for _, b := range po {
+ if f.pass != nil && f.pass.debug > 3 {
+ fmt.Printf("loop finding at %s\n", b)
+ }
+
+ var innermost *loop // innermost header reachable from this block
+
+ // IF any successor s of b is in a loop headed by h
+ // AND h dominates b
+ // THEN b is in the loop headed by h.
+ //
+ // Choose the first/innermost such h.
+ //
+ // IF s itself dominates b, then s is a loop header;
+ // and there may be more than one such s.
+ // Since there's at most 2 successors, the inner/outer ordering
+ // between them can be established with simple comparisons.
+ for _, e := range b.Succs {
+ bb := e.b
+ l := b2l[bb.ID]
+
+ if sdom.IsAncestorEq(bb, b) { // Found a loop header
+ if f.pass != nil && f.pass.debug > 4 {
+ fmt.Printf("loop finding succ %s of %s is header\n", bb.String(), b.String())
+ }
+ if l == nil {
+ l = &loop{header: bb, isInner: true}
+ loops = append(loops, l)
+ b2l[bb.ID] = l
+ }
+ } else if !visited[bb.ID] { // Found an irreducible loop
+ sawIrred = true
+ if f.pass != nil && f.pass.debug > 4 {
+ fmt.Printf("loop finding succ %s of %s is IRRED, in %s\n", bb.String(), b.String(), f.Name)
+ }
+ } else if l != nil {
+ // TODO handle case where l is irreducible.
+ // Perhaps a loop header is inherited.
+ // is there any loop containing our successor whose
+ // header dominates b?
+ if !sdom.IsAncestorEq(l.header, b) {
+ l = l.nearestOuterLoop(sdom, b)
+ }
+ if f.pass != nil && f.pass.debug > 4 {
+ if l == nil {
+ fmt.Printf("loop finding succ %s of %s has no loop\n", bb.String(), b.String())
+ } else {
+ fmt.Printf("loop finding succ %s of %s provides loop with header %s\n", bb.String(), b.String(), l.header.String())
+ }
+ }
+ } else { // No loop
+ if f.pass != nil && f.pass.debug > 4 {
+ fmt.Printf("loop finding succ %s of %s has no loop\n", bb.String(), b.String())
+ }
+
+ }
+
+ if l == nil || innermost == l {
+ continue
+ }
+
+ if innermost == nil {
+ innermost = l
+ continue
+ }
+
+ if sdom.isAncestor(innermost.header, l.header) {
+ sdom.outerinner(innermost, l)
+ innermost = l
+ } else if sdom.isAncestor(l.header, innermost.header) {
+ sdom.outerinner(l, innermost)
+ }
+ }
+
+ if innermost != nil {
+ b2l[b.ID] = innermost
+ innermost.nBlocks++
+ }
+ visited[b.ID] = true
+ }
+
+ ln := &loopnest{f: f, b2l: b2l, po: po, sdom: sdom, loops: loops, hasIrreducible: sawIrred}
+
+ // Calculate containsUnavoidableCall for regalloc
+ dominatedByCall := make([]bool, f.NumBlocks())
+ for _, b := range po {
+ if checkContainsCall(b) {
+ dominatedByCall[b.ID] = true
+ }
+ }
+ // Run dfs to find path through the loop that avoids all calls.
+ // Such path either escapes loop or return back to header.
+ // It isn't enough to have exit not dominated by any call, for example:
+ // ... some loop
+ // call1 call2
+ // \ /
+ // exit
+ // ...
+ // exit is not dominated by any call, but we don't have call-free path to it.
+ for _, l := range loops {
+ // Header contains call.
+ if dominatedByCall[l.header.ID] {
+ l.containsUnavoidableCall = true
+ continue
+ }
+ callfreepath := false
+ tovisit := make([]*Block, 0, len(l.header.Succs))
+ // Push all non-loop non-exit successors of header onto toVisit.
+ for _, s := range l.header.Succs {
+ nb := s.Block()
+ // This corresponds to loop with zero iterations.
+ if !l.iterationEnd(nb, b2l) {
+ tovisit = append(tovisit, nb)
+ }
+ }
+ for len(tovisit) > 0 {
+ cur := tovisit[len(tovisit)-1]
+ tovisit = tovisit[:len(tovisit)-1]
+ if dominatedByCall[cur.ID] {
+ continue
+ }
+ // Record visited in dominatedByCall.
+ dominatedByCall[cur.ID] = true
+ for _, s := range cur.Succs {
+ nb := s.Block()
+ if l.iterationEnd(nb, b2l) {
+ callfreepath = true
+ }
+ if !dominatedByCall[nb.ID] {
+ tovisit = append(tovisit, nb)
+ }
+
+ }
+ if callfreepath {
+ break
+ }
+ }
+ if !callfreepath {
+ l.containsUnavoidableCall = true
+ }
+ }
+
+ // Curious about the loopiness? "-d=ssa/likelyadjust/stats"
+ if f.pass != nil && f.pass.stats > 0 && len(loops) > 0 {
+ ln.assembleChildren()
+ ln.calculateDepths()
+ ln.findExits()
+
+ // Note stats for non-innermost loops are slightly flawed because
+ // they don't account for inner loop exits that span multiple levels.
+
+ for _, l := range loops {
+ x := len(l.exits)
+ cf := 0
+ if !l.containsUnavoidableCall {
+ cf = 1
+ }
+ inner := 0
+ if l.isInner {
+ inner++
+ }
+
+ f.LogStat("loopstats:",
+ l.depth, "depth", x, "exits",
+ inner, "is_inner", cf, "always_calls", l.nBlocks, "n_blocks")
+ }
+ }
+
+ if f.pass != nil && f.pass.debug > 1 && len(loops) > 0 {
+ fmt.Printf("Loops in %s:\n", f.Name)
+ for _, l := range loops {
+ fmt.Printf("%s, b=", l.LongString())
+ for _, b := range f.Blocks {
+ if b2l[b.ID] == l {
+ fmt.Printf(" %s", b)
+ }
+ }
+ fmt.Print("\n")
+ }
+ fmt.Printf("Nonloop blocks in %s:", f.Name)
+ for _, b := range f.Blocks {
+ if b2l[b.ID] == nil {
+ fmt.Printf(" %s", b)
+ }
+ }
+ fmt.Print("\n")
+ }
+ return ln
+}
+
+// assembleChildren initializes the children field of each
+// loop in the nest. Loop A is a child of loop B if A is
+// directly nested within B (based on the reducible-loops
+// detection above)
+func (ln *loopnest) assembleChildren() {
+ if ln.initializedChildren {
+ return
+ }
+ for _, l := range ln.loops {
+ if l.outer != nil {
+ l.outer.children = append(l.outer.children, l)
+ }
+ }
+ ln.initializedChildren = true
+}
+
+// calculateDepths uses the children field of loops
+// to determine the nesting depth (outer=1) of each
+// loop. This is helpful for finding exit edges.
+func (ln *loopnest) calculateDepths() {
+ if ln.initializedDepth {
+ return
+ }
+ ln.assembleChildren()
+ for _, l := range ln.loops {
+ if l.outer == nil {
+ l.setDepth(1)
+ }
+ }
+ ln.initializedDepth = true
+}
+
+// findExits uses loop depth information to find the
+// exits from a loop.
+func (ln *loopnest) findExits() {
+ if ln.initializedExits {
+ return
+ }
+ ln.calculateDepths()
+ b2l := ln.b2l
+ for _, b := range ln.po {
+ l := b2l[b.ID]
+ if l != nil && len(b.Succs) == 2 {
+ sl := b2l[b.Succs[0].b.ID]
+ if recordIfExit(l, sl, b.Succs[0].b) {
+ continue
+ }
+ sl = b2l[b.Succs[1].b.ID]
+ if recordIfExit(l, sl, b.Succs[1].b) {
+ continue
+ }
+ }
+ }
+ ln.initializedExits = true
+}
+
+// depth returns the loop nesting level of block b.
+func (ln *loopnest) depth(b ID) int16 {
+ if l := ln.b2l[b]; l != nil {
+ return l.depth
+ }
+ return 0
+}
+
+// recordIfExit checks sl (the loop containing b) to see if it
+// is outside of loop l, and if so, records b as an exit block
+// from l and returns true.
+func recordIfExit(l, sl *loop, b *Block) bool {
+ if sl != l {
+ if sl == nil || sl.depth <= l.depth {
+ l.exits = append(l.exits, b)
+ return true
+ }
+ // sl is not nil, and is deeper than l
+ // it's possible for this to be a goto into an irreducible loop made from gotos.
+ for sl.depth > l.depth {
+ sl = sl.outer
+ }
+ if sl != l {
+ l.exits = append(l.exits, b)
+ return true
+ }
+ }
+ return false
+}
+
+func (l *loop) setDepth(d int16) {
+ l.depth = d
+ for _, c := range l.children {
+ c.setDepth(d + 1)
+ }
+}
+
+// iterationEnd checks if block b ends iteration of loop l.
+// Ending iteration means either escaping to outer loop/code or
+// going back to header
+func (l *loop) iterationEnd(b *Block, b2l []*loop) bool {
+ return b == l.header || b2l[b.ID] == nil || (b2l[b.ID] != l && b2l[b.ID].depth <= l.depth)
+}
diff --git a/src/cmd/compile/internal/ssa/location.go b/src/cmd/compile/internal/ssa/location.go
new file mode 100644
index 0000000..b575feb
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/location.go
@@ -0,0 +1,109 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/types"
+ "fmt"
+)
+
+// A place that an ssa variable can reside.
+type Location interface {
+ String() string // name to use in assembly templates: AX, 16(SP), ...
+}
+
+// A Register is a machine register, like AX.
+// They are numbered densely from 0 (for each architecture).
+type Register struct {
+ num int32 // dense numbering
+ objNum int16 // register number from cmd/internal/obj/$ARCH
+ gcNum int16 // GC register map number (dense numbering of registers that can contain pointers)
+ name string
+}
+
+func (r *Register) String() string {
+ return r.name
+}
+
+// ObjNum returns the register number from cmd/internal/obj/$ARCH that
+// corresponds to this register.
+func (r *Register) ObjNum() int16 {
+ return r.objNum
+}
+
+// GCNum returns the runtime GC register index of r, or -1 if this
+// register can't contain pointers.
+func (r *Register) GCNum() int16 {
+ return r.gcNum
+}
+
+// A LocalSlot is a location in the stack frame, which identifies and stores
+// part or all of a PPARAM, PPARAMOUT, or PAUTO ONAME node.
+// It can represent a whole variable, part of a larger stack slot, or part of a
+// variable that has been decomposed into multiple stack slots.
+// As an example, a string could have the following configurations:
+//
+// stack layout LocalSlots
+//
+// Optimizations are disabled. s is on the stack and represented in its entirety.
+// [ ------- s string ---- ] { N: s, Type: string, Off: 0 }
+//
+// s was not decomposed, but the SSA operates on its parts individually, so
+// there is a LocalSlot for each of its fields that points into the single stack slot.
+// [ ------- s string ---- ] { N: s, Type: *uint8, Off: 0 }, {N: s, Type: int, Off: 8}
+//
+// s was decomposed. Each of its fields is in its own stack slot and has its own LocalSLot.
+// [ ptr *uint8 ] [ len int] { N: ptr, Type: *uint8, Off: 0, SplitOf: parent, SplitOffset: 0},
+// { N: len, Type: int, Off: 0, SplitOf: parent, SplitOffset: 8}
+// parent = &{N: s, Type: string}
+type LocalSlot struct {
+ N *ir.Name // an ONAME *ir.Name representing a stack location.
+ Type *types.Type // type of slot
+ Off int64 // offset of slot in N
+
+ SplitOf *LocalSlot // slot is a decomposition of SplitOf
+ SplitOffset int64 // .. at this offset.
+}
+
+func (s LocalSlot) String() string {
+ if s.Off == 0 {
+ return fmt.Sprintf("%v[%v]", s.N, s.Type)
+ }
+ return fmt.Sprintf("%v+%d[%v]", s.N, s.Off, s.Type)
+}
+
+type LocPair [2]Location
+
+func (t LocPair) String() string {
+ n0, n1 := "nil", "nil"
+ if t[0] != nil {
+ n0 = t[0].String()
+ }
+ if t[1] != nil {
+ n1 = t[1].String()
+ }
+ return fmt.Sprintf("<%s,%s>", n0, n1)
+}
+
+type LocResults []Location
+
+func (t LocResults) String() string {
+ s := ""
+ a := "<"
+ for _, r := range t {
+ a += s
+ s = ","
+ a += r.String()
+ }
+ a += ">"
+ return a
+}
+
+type Spill struct {
+ Type *types.Type
+ Offset int64
+ Reg int16
+}
diff --git a/src/cmd/compile/internal/ssa/loopbce.go b/src/cmd/compile/internal/ssa/loopbce.go
new file mode 100644
index 0000000..fd03efb
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/loopbce.go
@@ -0,0 +1,353 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "fmt"
+ "math"
+)
+
+type indVarFlags uint8
+
+const (
+ indVarMinExc indVarFlags = 1 << iota // minimum value is exclusive (default: inclusive)
+ indVarMaxInc // maximum value is inclusive (default: exclusive)
+)
+
+type indVar struct {
+ ind *Value // induction variable
+ min *Value // minimum value, inclusive/exclusive depends on flags
+ max *Value // maximum value, inclusive/exclusive depends on flags
+ entry *Block // entry block in the loop.
+ flags indVarFlags
+ // Invariant: for all blocks strictly dominated by entry:
+ // min <= ind < max [if flags == 0]
+ // min < ind < max [if flags == indVarMinExc]
+ // min <= ind <= max [if flags == indVarMaxInc]
+ // min < ind <= max [if flags == indVarMinExc|indVarMaxInc]
+}
+
+// parseIndVar checks whether the SSA value passed as argument is a valid induction
+// variable, and, if so, extracts:
+// * the minimum bound
+// * the increment value
+// * the "next" value (SSA value that is Phi'd into the induction variable every loop)
+// Currently, we detect induction variables that match (Phi min nxt),
+// with nxt being (Add inc ind).
+// If it can't parse the induction variable correctly, it returns (nil, nil, nil).
+func parseIndVar(ind *Value) (min, inc, nxt *Value) {
+ if ind.Op != OpPhi {
+ return
+ }
+
+ if n := ind.Args[0]; n.Op == OpAdd64 && (n.Args[0] == ind || n.Args[1] == ind) {
+ min, nxt = ind.Args[1], n
+ } else if n := ind.Args[1]; n.Op == OpAdd64 && (n.Args[0] == ind || n.Args[1] == ind) {
+ min, nxt = ind.Args[0], n
+ } else {
+ // Not a recognized induction variable.
+ return
+ }
+
+ if nxt.Args[0] == ind { // nxt = ind + inc
+ inc = nxt.Args[1]
+ } else if nxt.Args[1] == ind { // nxt = inc + ind
+ inc = nxt.Args[0]
+ } else {
+ panic("unreachable") // one of the cases must be true from the above.
+ }
+
+ return
+}
+
+// findIndVar finds induction variables in a function.
+//
+// Look for variables and blocks that satisfy the following
+//
+// loop:
+// ind = (Phi min nxt),
+// if ind < max
+// then goto enter_loop
+// else goto exit_loop
+//
+// enter_loop:
+// do something
+// nxt = inc + ind
+// goto loop
+//
+// exit_loop:
+//
+//
+// TODO: handle 32 bit operations
+func findIndVar(f *Func) []indVar {
+ var iv []indVar
+ sdom := f.Sdom()
+
+ for _, b := range f.Blocks {
+ if b.Kind != BlockIf || len(b.Preds) != 2 {
+ continue
+ }
+
+ var flags indVarFlags
+ var ind, max *Value // induction, and maximum
+
+ // Check thet the control if it either ind </<= max or max >/>= ind.
+ // TODO: Handle 32-bit comparisons.
+ // TODO: Handle unsigned comparisons?
+ c := b.Controls[0]
+ switch c.Op {
+ case OpLeq64:
+ flags |= indVarMaxInc
+ fallthrough
+ case OpLess64:
+ ind, max = c.Args[0], c.Args[1]
+ default:
+ continue
+ }
+
+ // See if this is really an induction variable
+ less := true
+ min, inc, nxt := parseIndVar(ind)
+ if min == nil {
+ // We failed to parse the induction variable. Before punting, we want to check
+ // whether the control op was written with arguments in non-idiomatic order,
+ // so that we believe being "max" (the upper bound) is actually the induction
+ // variable itself. This would happen for code like:
+ // for i := 0; len(n) > i; i++
+ min, inc, nxt = parseIndVar(max)
+ if min == nil {
+ // No recognied induction variable on either operand
+ continue
+ }
+
+ // Ok, the arguments were reversed. Swap them, and remember that we're
+ // looking at a ind >/>= loop (so the induction must be decrementing).
+ ind, max = max, ind
+ less = false
+ }
+
+ // Expect the increment to be a nonzero constant.
+ if inc.Op != OpConst64 {
+ continue
+ }
+ step := inc.AuxInt
+ if step == 0 {
+ continue
+ }
+
+ // Increment sign must match comparison direction.
+ // When incrementing, the termination comparison must be ind </<= max.
+ // When decrementing, the termination comparison must be ind >/>= max.
+ // See issue 26116.
+ if step > 0 && !less {
+ continue
+ }
+ if step < 0 && less {
+ continue
+ }
+
+ // If the increment is negative, swap min/max and their flags
+ if step < 0 {
+ min, max = max, min
+ oldf := flags
+ flags = indVarMaxInc
+ if oldf&indVarMaxInc == 0 {
+ flags |= indVarMinExc
+ }
+ step = -step
+ }
+
+ if flags&indVarMaxInc != 0 && max.Op == OpConst64 && max.AuxInt+step < max.AuxInt {
+ // For a <= comparison, we need to make sure that a value equal to
+ // max can be incremented without overflowing.
+ // (For a < comparison, the %step check below ensures no overflow.)
+ continue
+ }
+
+ // Up to now we extracted the induction variable (ind),
+ // the increment delta (inc), the temporary sum (nxt),
+ // the mininum value (min) and the maximum value (max).
+ //
+ // We also know that ind has the form (Phi min nxt) where
+ // nxt is (Add inc nxt) which means: 1) inc dominates nxt
+ // and 2) there is a loop starting at inc and containing nxt.
+ //
+ // We need to prove that the induction variable is incremented
+ // only when it's smaller than the maximum value.
+ // Two conditions must happen listed below to accept ind
+ // as an induction variable.
+
+ // First condition: loop entry has a single predecessor, which
+ // is the header block. This implies that b.Succs[0] is
+ // reached iff ind < max.
+ if len(b.Succs[0].b.Preds) != 1 {
+ // b.Succs[1] must exit the loop.
+ continue
+ }
+
+ // Second condition: b.Succs[0] dominates nxt so that
+ // nxt is computed when inc < max, meaning nxt <= max.
+ if !sdom.IsAncestorEq(b.Succs[0].b, nxt.Block) {
+ // inc+ind can only be reached through the branch that enters the loop.
+ continue
+ }
+
+ // We can only guarantee that the loop runs within limits of induction variable
+ // if (one of)
+ // (1) the increment is ±1
+ // (2) the limits are constants
+ // (3) loop is of the form k0 upto Known_not_negative-k inclusive, step <= k
+ // (4) loop is of the form k0 upto Known_not_negative-k exclusive, step <= k+1
+ // (5) loop is of the form Known_not_negative downto k0, minint+step < k0
+ if step > 1 {
+ ok := false
+ if min.Op == OpConst64 && max.Op == OpConst64 {
+ if max.AuxInt > min.AuxInt && max.AuxInt%step == min.AuxInt%step { // handle overflow
+ ok = true
+ }
+ }
+ // Handle induction variables of these forms.
+ // KNN is known-not-negative.
+ // SIGNED ARITHMETIC ONLY. (see switch on c above)
+ // Possibilities for KNN are len and cap; perhaps we can infer others.
+ // for i := 0; i <= KNN-k ; i += k
+ // for i := 0; i < KNN-(k-1); i += k
+ // Also handle decreasing.
+
+ // "Proof" copied from https://go-review.googlesource.com/c/go/+/104041/10/src/cmd/compile/internal/ssa/loopbce.go#164
+ //
+ // In the case of
+ // // PC is Positive Constant
+ // L := len(A)-PC
+ // for i := 0; i < L; i = i+PC
+ //
+ // we know:
+ //
+ // 0 + PC does not over/underflow.
+ // len(A)-PC does not over/underflow
+ // maximum value for L is MaxInt-PC
+ // i < L <= MaxInt-PC means i + PC < MaxInt hence no overflow.
+
+ // To match in SSA:
+ // if (a) min.Op == OpConst64(k0)
+ // and (b) k0 >= MININT + step
+ // and (c) max.Op == OpSubtract(Op{StringLen,SliceLen,SliceCap}, k)
+ // or (c) max.Op == OpAdd(Op{StringLen,SliceLen,SliceCap}, -k)
+ // or (c) max.Op == Op{StringLen,SliceLen,SliceCap}
+ // and (d) if upto loop, require indVarMaxInc && step <= k or !indVarMaxInc && step-1 <= k
+
+ if min.Op == OpConst64 && min.AuxInt >= step+math.MinInt64 {
+ knn := max
+ k := int64(0)
+ var kArg *Value
+
+ switch max.Op {
+ case OpSub64:
+ knn = max.Args[0]
+ kArg = max.Args[1]
+
+ case OpAdd64:
+ knn = max.Args[0]
+ kArg = max.Args[1]
+ if knn.Op == OpConst64 {
+ knn, kArg = kArg, knn
+ }
+ }
+ switch knn.Op {
+ case OpSliceLen, OpStringLen, OpSliceCap:
+ default:
+ knn = nil
+ }
+
+ if kArg != nil && kArg.Op == OpConst64 {
+ k = kArg.AuxInt
+ if max.Op == OpAdd64 {
+ k = -k
+ }
+ }
+ if k >= 0 && knn != nil {
+ if inc.AuxInt > 0 { // increasing iteration
+ // The concern for the relation between step and k is to ensure that iv never exceeds knn
+ // i.e., iv < knn-(K-1) ==> iv + K <= knn; iv <= knn-K ==> iv +K < knn
+ if step <= k || flags&indVarMaxInc == 0 && step-1 == k {
+ ok = true
+ }
+ } else { // decreasing iteration
+ // Will be decrementing from max towards min; max is knn-k; will only attempt decrement if
+ // knn-k >[=] min; underflow is only a concern if min-step is not smaller than min.
+ // This all assumes signed integer arithmetic
+ // This is already assured by the test above: min.AuxInt >= step+math.MinInt64
+ ok = true
+ }
+ }
+ }
+
+ // TODO: other unrolling idioms
+ // for i := 0; i < KNN - KNN % k ; i += k
+ // for i := 0; i < KNN&^(k-1) ; i += k // k a power of 2
+ // for i := 0; i < KNN&(-k) ; i += k // k a power of 2
+
+ if !ok {
+ continue
+ }
+ }
+
+ if f.pass.debug >= 1 {
+ printIndVar(b, ind, min, max, step, flags)
+ }
+
+ iv = append(iv, indVar{
+ ind: ind,
+ min: min,
+ max: max,
+ entry: b.Succs[0].b,
+ flags: flags,
+ })
+ b.Logf("found induction variable %v (inc = %v, min = %v, max = %v)\n", ind, inc, min, max)
+ }
+
+ return iv
+}
+
+func dropAdd64(v *Value) (*Value, int64) {
+ if v.Op == OpAdd64 && v.Args[0].Op == OpConst64 {
+ return v.Args[1], v.Args[0].AuxInt
+ }
+ if v.Op == OpAdd64 && v.Args[1].Op == OpConst64 {
+ return v.Args[0], v.Args[1].AuxInt
+ }
+ return v, 0
+}
+
+func printIndVar(b *Block, i, min, max *Value, inc int64, flags indVarFlags) {
+ mb1, mb2 := "[", "]"
+ if flags&indVarMinExc != 0 {
+ mb1 = "("
+ }
+ if flags&indVarMaxInc == 0 {
+ mb2 = ")"
+ }
+
+ mlim1, mlim2 := fmt.Sprint(min.AuxInt), fmt.Sprint(max.AuxInt)
+ if !min.isGenericIntConst() {
+ if b.Func.pass.debug >= 2 {
+ mlim1 = fmt.Sprint(min)
+ } else {
+ mlim1 = "?"
+ }
+ }
+ if !max.isGenericIntConst() {
+ if b.Func.pass.debug >= 2 {
+ mlim2 = fmt.Sprint(max)
+ } else {
+ mlim2 = "?"
+ }
+ }
+ extra := ""
+ if b.Func.pass.debug >= 2 {
+ extra = fmt.Sprintf(" (%s)", i)
+ }
+ b.Func.Warnl(b.Pos, "Induction variable: limits %v%v,%v%v, increment %d%s", mb1, mlim1, mlim2, mb2, inc, extra)
+}
diff --git a/src/cmd/compile/internal/ssa/loopreschedchecks.go b/src/cmd/compile/internal/ssa/loopreschedchecks.go
new file mode 100644
index 0000000..738c626
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/loopreschedchecks.go
@@ -0,0 +1,500 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/compile/internal/types"
+ "fmt"
+)
+
+// an edgeMem records a backedge, together with the memory
+// phi functions at the target of the backedge that must
+// be updated when a rescheduling check replaces the backedge.
+type edgeMem struct {
+ e Edge
+ m *Value // phi for memory at dest of e
+}
+
+// a rewriteTarget is a value-argindex pair indicating
+// where a rewrite is applied. Note that this is for values,
+// not for block controls, because block controls are not targets
+// for the rewrites performed in inserting rescheduling checks.
+type rewriteTarget struct {
+ v *Value
+ i int
+}
+
+type rewrite struct {
+ before, after *Value // before is the expected value before rewrite, after is the new value installed.
+ rewrites []rewriteTarget // all the targets for this rewrite.
+}
+
+func (r *rewrite) String() string {
+ s := "\n\tbefore=" + r.before.String() + ", after=" + r.after.String()
+ for _, rw := range r.rewrites {
+ s += ", (i=" + fmt.Sprint(rw.i) + ", v=" + rw.v.LongString() + ")"
+ }
+ s += "\n"
+ return s
+}
+
+// insertLoopReschedChecks inserts rescheduling checks on loop backedges.
+func insertLoopReschedChecks(f *Func) {
+ // TODO: when split information is recorded in export data, insert checks only on backedges that can be reached on a split-call-free path.
+
+ // Loop reschedule checks compare the stack pointer with
+ // the per-g stack bound. If the pointer appears invalid,
+ // that means a reschedule check is needed.
+ //
+ // Steps:
+ // 1. locate backedges.
+ // 2. Record memory definitions at block end so that
+ // the SSA graph for mem can be properly modified.
+ // 3. Ensure that phi functions that will-be-needed for mem
+ // are present in the graph, initially with trivial inputs.
+ // 4. Record all to-be-modified uses of mem;
+ // apply modifications (split into two steps to simplify and
+ // avoided nagging order-dependencies).
+ // 5. Rewrite backedges to include reschedule check,
+ // and modify destination phi function appropriately with new
+ // definitions for mem.
+
+ if f.NoSplit { // nosplit functions don't reschedule.
+ return
+ }
+
+ backedges := backedges(f)
+ if len(backedges) == 0 { // no backedges means no rescheduling checks.
+ return
+ }
+
+ lastMems := findLastMems(f)
+
+ idom := f.Idom()
+ po := f.postorder()
+ // The ordering in the dominator tree matters; it's important that
+ // the walk of the dominator tree also be a preorder (i.e., a node is
+ // visited only after all its non-backedge predecessors have been visited).
+ sdom := newSparseOrderedTree(f, idom, po)
+
+ if f.pass.debug > 1 {
+ fmt.Printf("before %s = %s\n", f.Name, sdom.treestructure(f.Entry))
+ }
+
+ tofixBackedges := []edgeMem{}
+
+ for _, e := range backedges { // TODO: could filter here by calls in loops, if declared and inferred nosplit are recorded in export data.
+ tofixBackedges = append(tofixBackedges, edgeMem{e, nil})
+ }
+
+ // It's possible that there is no memory state (no global/pointer loads/stores or calls)
+ if lastMems[f.Entry.ID] == nil {
+ lastMems[f.Entry.ID] = f.Entry.NewValue0(f.Entry.Pos, OpInitMem, types.TypeMem)
+ }
+
+ memDefsAtBlockEnds := make([]*Value, f.NumBlocks()) // For each block, the mem def seen at its bottom. Could be from earlier block.
+
+ // Propagate last mem definitions forward through successor blocks.
+ for i := len(po) - 1; i >= 0; i-- {
+ b := po[i]
+ mem := lastMems[b.ID]
+ for j := 0; mem == nil; j++ { // if there's no def, then there's no phi, so the visible mem is identical in all predecessors.
+ // loop because there might be backedges that haven't been visited yet.
+ mem = memDefsAtBlockEnds[b.Preds[j].b.ID]
+ }
+ memDefsAtBlockEnds[b.ID] = mem
+ if f.pass.debug > 2 {
+ fmt.Printf("memDefsAtBlockEnds[%s] = %s\n", b, mem)
+ }
+ }
+
+ // Maps from block to newly-inserted phi function in block.
+ newmemphis := make(map[*Block]rewrite)
+
+ // Insert phi functions as necessary for future changes to flow graph.
+ for i, emc := range tofixBackedges {
+ e := emc.e
+ h := e.b
+
+ // find the phi function for the memory input at "h", if there is one.
+ var headerMemPhi *Value // look for header mem phi
+
+ for _, v := range h.Values {
+ if v.Op == OpPhi && v.Type.IsMemory() {
+ headerMemPhi = v
+ }
+ }
+
+ if headerMemPhi == nil {
+ // if the header is nil, make a trivial phi from the dominator
+ mem0 := memDefsAtBlockEnds[idom[h.ID].ID]
+ headerMemPhi = newPhiFor(h, mem0)
+ newmemphis[h] = rewrite{before: mem0, after: headerMemPhi}
+ addDFphis(mem0, h, h, f, memDefsAtBlockEnds, newmemphis, sdom)
+
+ }
+ tofixBackedges[i].m = headerMemPhi
+
+ }
+ if f.pass.debug > 0 {
+ for b, r := range newmemphis {
+ fmt.Printf("before b=%s, rewrite=%s\n", b, r.String())
+ }
+ }
+
+ // dfPhiTargets notes inputs to phis in dominance frontiers that should not
+ // be rewritten as part of the dominated children of some outer rewrite.
+ dfPhiTargets := make(map[rewriteTarget]bool)
+
+ rewriteNewPhis(f.Entry, f.Entry, f, memDefsAtBlockEnds, newmemphis, dfPhiTargets, sdom)
+
+ if f.pass.debug > 0 {
+ for b, r := range newmemphis {
+ fmt.Printf("after b=%s, rewrite=%s\n", b, r.String())
+ }
+ }
+
+ // Apply collected rewrites.
+ for _, r := range newmemphis {
+ for _, rw := range r.rewrites {
+ rw.v.SetArg(rw.i, r.after)
+ }
+ }
+
+ // Rewrite backedges to include reschedule checks.
+ for _, emc := range tofixBackedges {
+ e := emc.e
+ headerMemPhi := emc.m
+ h := e.b
+ i := e.i
+ p := h.Preds[i]
+ bb := p.b
+ mem0 := headerMemPhi.Args[i]
+ // bb e->p h,
+ // Because we're going to insert a rare-call, make sure the
+ // looping edge still looks likely.
+ likely := BranchLikely
+ if p.i != 0 {
+ likely = BranchUnlikely
+ }
+ if bb.Kind != BlockPlain { // backedges can be unconditional. e.g., if x { something; continue }
+ bb.Likely = likely
+ }
+
+ // rewrite edge to include reschedule check
+ // existing edges:
+ //
+ // bb.Succs[p.i] == Edge{h, i}
+ // h.Preds[i] == p == Edge{bb,p.i}
+ //
+ // new block(s):
+ // test:
+ // if sp < g.limit { goto sched }
+ // goto join
+ // sched:
+ // mem1 := call resched (mem0)
+ // goto join
+ // join:
+ // mem2 := phi(mem0, mem1)
+ // goto h
+ //
+ // and correct arg i of headerMemPhi and headerCtrPhi
+ //
+ // EXCEPT: join block containing only phi functions is bad
+ // for the register allocator. Therefore, there is no
+ // join, and branches targeting join must instead target
+ // the header, and the other phi functions within header are
+ // adjusted for the additional input.
+
+ test := f.NewBlock(BlockIf)
+ sched := f.NewBlock(BlockPlain)
+
+ test.Pos = bb.Pos
+ sched.Pos = bb.Pos
+
+ // if sp < g.limit { goto sched }
+ // goto header
+
+ cfgtypes := &f.Config.Types
+ pt := cfgtypes.Uintptr
+ g := test.NewValue1(bb.Pos, OpGetG, pt, mem0)
+ sp := test.NewValue0(bb.Pos, OpSP, pt)
+ cmpOp := OpLess64U
+ if pt.Size() == 4 {
+ cmpOp = OpLess32U
+ }
+ limaddr := test.NewValue1I(bb.Pos, OpOffPtr, pt, 2*pt.Size(), g)
+ lim := test.NewValue2(bb.Pos, OpLoad, pt, limaddr, mem0)
+ cmp := test.NewValue2(bb.Pos, cmpOp, cfgtypes.Bool, sp, lim)
+ test.SetControl(cmp)
+
+ // if true, goto sched
+ test.AddEdgeTo(sched)
+
+ // if false, rewrite edge to header.
+ // do NOT remove+add, because that will perturb all the other phi functions
+ // as well as messing up other edges to the header.
+ test.Succs = append(test.Succs, Edge{h, i})
+ h.Preds[i] = Edge{test, 1}
+ headerMemPhi.SetArg(i, mem0)
+
+ test.Likely = BranchUnlikely
+
+ // sched:
+ // mem1 := call resched (mem0)
+ // goto header
+ resched := f.fe.Syslook("goschedguarded")
+ // TODO(register args) -- will need more details
+ mem1 := sched.NewValue1A(bb.Pos, OpStaticCall, types.TypeMem, StaticAuxCall(resched, nil), mem0)
+ sched.AddEdgeTo(h)
+ headerMemPhi.AddArg(mem1)
+
+ bb.Succs[p.i] = Edge{test, 0}
+ test.Preds = append(test.Preds, Edge{bb, p.i})
+
+ // Must correct all the other phi functions in the header for new incoming edge.
+ // Except for mem phis, it will be the same value seen on the original
+ // backedge at index i.
+ for _, v := range h.Values {
+ if v.Op == OpPhi && v != headerMemPhi {
+ v.AddArg(v.Args[i])
+ }
+ }
+ }
+
+ f.invalidateCFG()
+
+ if f.pass.debug > 1 {
+ sdom = newSparseTree(f, f.Idom())
+ fmt.Printf("after %s = %s\n", f.Name, sdom.treestructure(f.Entry))
+ }
+}
+
+// newPhiFor inserts a new Phi function into b,
+// with all inputs set to v.
+func newPhiFor(b *Block, v *Value) *Value {
+ phiV := b.NewValue0(b.Pos, OpPhi, v.Type)
+
+ for range b.Preds {
+ phiV.AddArg(v)
+ }
+ return phiV
+}
+
+// rewriteNewPhis updates newphis[h] to record all places where the new phi function inserted
+// in block h will replace a previous definition. Block b is the block currently being processed;
+// if b has its own phi definition then it takes the place of h.
+// defsForUses provides information about other definitions of the variable that are present
+// (and if nil, indicates that the variable is no longer live)
+// sdom must yield a preorder of the flow graph if recursively walked, root-to-children.
+// The result of newSparseOrderedTree with order supplied by a dfs-postorder satisfies this
+// requirement.
+func rewriteNewPhis(h, b *Block, f *Func, defsForUses []*Value, newphis map[*Block]rewrite, dfPhiTargets map[rewriteTarget]bool, sdom SparseTree) {
+ // If b is a block with a new phi, then a new rewrite applies below it in the dominator tree.
+ if _, ok := newphis[b]; ok {
+ h = b
+ }
+ change := newphis[h]
+ x := change.before
+ y := change.after
+
+ // Apply rewrites to this block
+ if x != nil { // don't waste time on the common case of no definition.
+ p := &change.rewrites
+ for _, v := range b.Values {
+ if v == y { // don't rewrite self -- phi inputs are handled below.
+ continue
+ }
+ for i, w := range v.Args {
+ if w != x {
+ continue
+ }
+ tgt := rewriteTarget{v, i}
+
+ // It's possible dominated control flow will rewrite this instead.
+ // Visiting in preorder (a property of how sdom was constructed)
+ // ensures that these are seen in the proper order.
+ if dfPhiTargets[tgt] {
+ continue
+ }
+ *p = append(*p, tgt)
+ if f.pass.debug > 1 {
+ fmt.Printf("added block target for h=%v, b=%v, x=%v, y=%v, tgt.v=%s, tgt.i=%d\n",
+ h, b, x, y, v, i)
+ }
+ }
+ }
+
+ // Rewrite appropriate inputs of phis reached in successors
+ // in dominance frontier, self, and dominated.
+ // If the variable def reaching uses in b is itself defined in b, then the new phi function
+ // does not reach the successors of b. (This assumes a bit about the structure of the
+ // phi use-def graph, but it's true for memory.)
+ if dfu := defsForUses[b.ID]; dfu != nil && dfu.Block != b {
+ for _, e := range b.Succs {
+ s := e.b
+
+ for _, v := range s.Values {
+ if v.Op == OpPhi && v.Args[e.i] == x {
+ tgt := rewriteTarget{v, e.i}
+ *p = append(*p, tgt)
+ dfPhiTargets[tgt] = true
+ if f.pass.debug > 1 {
+ fmt.Printf("added phi target for h=%v, b=%v, s=%v, x=%v, y=%v, tgt.v=%s, tgt.i=%d\n",
+ h, b, s, x, y, v.LongString(), e.i)
+ }
+ break
+ }
+ }
+ }
+ }
+ newphis[h] = change
+ }
+
+ for c := sdom[b.ID].child; c != nil; c = sdom[c.ID].sibling {
+ rewriteNewPhis(h, c, f, defsForUses, newphis, dfPhiTargets, sdom) // TODO: convert to explicit stack from recursion.
+ }
+}
+
+// addDFphis creates new trivial phis that are necessary to correctly reflect (within SSA)
+// a new definition for variable "x" inserted at h (usually but not necessarily a phi).
+// These new phis can only occur at the dominance frontier of h; block s is in the dominance
+// frontier of h if h does not strictly dominate s and if s is a successor of a block b where
+// either b = h or h strictly dominates b.
+// These newly created phis are themselves new definitions that may require addition of their
+// own trivial phi functions in their own dominance frontier, and this is handled recursively.
+func addDFphis(x *Value, h, b *Block, f *Func, defForUses []*Value, newphis map[*Block]rewrite, sdom SparseTree) {
+ oldv := defForUses[b.ID]
+ if oldv != x { // either a new definition replacing x, or nil if it is proven that there are no uses reachable from b
+ return
+ }
+ idom := f.Idom()
+outer:
+ for _, e := range b.Succs {
+ s := e.b
+ // check phi functions in the dominance frontier
+ if sdom.isAncestor(h, s) {
+ continue // h dominates s, successor of b, therefore s is not in the frontier.
+ }
+ if _, ok := newphis[s]; ok {
+ continue // successor s of b already has a new phi function, so there is no need to add another.
+ }
+ if x != nil {
+ for _, v := range s.Values {
+ if v.Op == OpPhi && v.Args[e.i] == x {
+ continue outer // successor s of b has an old phi function, so there is no need to add another.
+ }
+ }
+ }
+
+ old := defForUses[idom[s.ID].ID] // new phi function is correct-but-redundant, combining value "old" on all inputs.
+ headerPhi := newPhiFor(s, old)
+ // the new phi will replace "old" in block s and all blocks dominated by s.
+ newphis[s] = rewrite{before: old, after: headerPhi} // record new phi, to have inputs labeled "old" rewritten to "headerPhi"
+ addDFphis(old, s, s, f, defForUses, newphis, sdom) // the new definition may also create new phi functions.
+ }
+ for c := sdom[b.ID].child; c != nil; c = sdom[c.ID].sibling {
+ addDFphis(x, h, c, f, defForUses, newphis, sdom) // TODO: convert to explicit stack from recursion.
+ }
+}
+
+// findLastMems maps block ids to last memory-output op in a block, if any
+func findLastMems(f *Func) []*Value {
+
+ var stores []*Value
+ lastMems := make([]*Value, f.NumBlocks())
+ storeUse := f.newSparseSet(f.NumValues())
+ defer f.retSparseSet(storeUse)
+ for _, b := range f.Blocks {
+ // Find all the stores in this block. Categorize their uses:
+ // storeUse contains stores which are used by a subsequent store.
+ storeUse.clear()
+ stores = stores[:0]
+ var memPhi *Value
+ for _, v := range b.Values {
+ if v.Op == OpPhi {
+ if v.Type.IsMemory() {
+ memPhi = v
+ }
+ continue
+ }
+ if v.Type.IsMemory() {
+ stores = append(stores, v)
+ for _, a := range v.Args {
+ if a.Block == b && a.Type.IsMemory() {
+ storeUse.add(a.ID)
+ }
+ }
+ }
+ }
+ if len(stores) == 0 {
+ lastMems[b.ID] = memPhi
+ continue
+ }
+
+ // find last store in the block
+ var last *Value
+ for _, v := range stores {
+ if storeUse.contains(v.ID) {
+ continue
+ }
+ if last != nil {
+ b.Fatalf("two final stores - simultaneous live stores %s %s", last, v)
+ }
+ last = v
+ }
+ if last == nil {
+ b.Fatalf("no last store found - cycle?")
+ }
+ lastMems[b.ID] = last
+ }
+ return lastMems
+}
+
+// mark values
+type markKind uint8
+
+const (
+ notFound markKind = iota // block has not been discovered yet
+ notExplored // discovered and in queue, outedges not processed yet
+ explored // discovered and in queue, outedges processed
+ done // all done, in output ordering
+)
+
+type backedgesState struct {
+ b *Block
+ i int
+}
+
+// backedges returns a slice of successor edges that are back
+// edges. For reducible loops, edge.b is the header.
+func backedges(f *Func) []Edge {
+ edges := []Edge{}
+ mark := make([]markKind, f.NumBlocks())
+ stack := []backedgesState{}
+
+ mark[f.Entry.ID] = notExplored
+ stack = append(stack, backedgesState{f.Entry, 0})
+
+ for len(stack) > 0 {
+ l := len(stack)
+ x := stack[l-1]
+ if x.i < len(x.b.Succs) {
+ e := x.b.Succs[x.i]
+ stack[l-1].i++
+ s := e.b
+ if mark[s.ID] == notFound {
+ mark[s.ID] = notExplored
+ stack = append(stack, backedgesState{s, 0})
+ } else if mark[s.ID] == notExplored {
+ edges = append(edges, e)
+ }
+ } else {
+ mark[x.b.ID] = done
+ stack = stack[0 : l-1]
+ }
+ }
+ return edges
+}
diff --git a/src/cmd/compile/internal/ssa/looprotate.go b/src/cmd/compile/internal/ssa/looprotate.go
new file mode 100644
index 0000000..35010a7
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/looprotate.go
@@ -0,0 +1,111 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// loopRotate converts loops with a check-loop-condition-at-beginning
+// to loops with a check-loop-condition-at-end.
+// This helps loops avoid extra unnecessary jumps.
+//
+// loop:
+// CMPQ ...
+// JGE exit
+// ...
+// JMP loop
+// exit:
+//
+// JMP entry
+// loop:
+// ...
+// entry:
+// CMPQ ...
+// JLT loop
+func loopRotate(f *Func) {
+ loopnest := f.loopnest()
+ if loopnest.hasIrreducible {
+ return
+ }
+ if len(loopnest.loops) == 0 {
+ return
+ }
+
+ idToIdx := make([]int, f.NumBlocks())
+ for i, b := range f.Blocks {
+ idToIdx[b.ID] = i
+ }
+
+ // Set of blocks we're moving, by ID.
+ move := map[ID]struct{}{}
+
+ // Map from block ID to the moving blocks that should
+ // come right after it.
+ after := map[ID][]*Block{}
+
+ // Check each loop header and decide if we want to move it.
+ for _, loop := range loopnest.loops {
+ b := loop.header
+ var p *Block // b's in-loop predecessor
+ for _, e := range b.Preds {
+ if e.b.Kind != BlockPlain {
+ continue
+ }
+ if loopnest.b2l[e.b.ID] != loop {
+ continue
+ }
+ p = e.b
+ }
+ if p == nil || p == b {
+ continue
+ }
+ after[p.ID] = []*Block{b}
+ for {
+ nextIdx := idToIdx[b.ID] + 1
+ if nextIdx >= len(f.Blocks) { // reached end of function (maybe impossible?)
+ break
+ }
+ nextb := f.Blocks[nextIdx]
+ if nextb == p { // original loop predecessor is next
+ break
+ }
+ if loopnest.b2l[nextb.ID] == loop {
+ after[p.ID] = append(after[p.ID], nextb)
+ }
+ b = nextb
+ }
+ // Swap b and p so that we'll handle p before b when moving blocks.
+ f.Blocks[idToIdx[loop.header.ID]] = p
+ f.Blocks[idToIdx[p.ID]] = loop.header
+ idToIdx[loop.header.ID], idToIdx[p.ID] = idToIdx[p.ID], idToIdx[loop.header.ID]
+
+ // Place b after p.
+ for _, b := range after[p.ID] {
+ move[b.ID] = struct{}{}
+ }
+ }
+
+ // Move blocks to their destinations in a single pass.
+ // We rely here on the fact that loop headers must come
+ // before the rest of the loop. And that relies on the
+ // fact that we only identify reducible loops.
+ j := 0
+ // Some blocks that are not part of a loop may be placed
+ // between loop blocks. In order to avoid these blocks from
+ // being overwritten, use a temporary slice.
+ newOrder := make([]*Block, 0, f.NumBlocks())
+ for _, b := range f.Blocks {
+ if _, ok := move[b.ID]; ok {
+ continue
+ }
+ newOrder = append(newOrder, b)
+ j++
+ for _, a := range after[b.ID] {
+ newOrder = append(newOrder, a)
+ j++
+ }
+ }
+ if j != len(f.Blocks) {
+ f.Fatalf("bad reordering in looprotate")
+ }
+ f.Blocks = newOrder
+}
diff --git a/src/cmd/compile/internal/ssa/lower.go b/src/cmd/compile/internal/ssa/lower.go
new file mode 100644
index 0000000..5760c35
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/lower.go
@@ -0,0 +1,44 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// convert to machine-dependent ops
+func lower(f *Func) {
+ // repeat rewrites until we find no more rewrites
+ applyRewrite(f, f.Config.lowerBlock, f.Config.lowerValue, removeDeadValues)
+}
+
+// checkLower checks for unlowered opcodes and fails if we find one.
+func checkLower(f *Func) {
+ // Needs to be a separate phase because it must run after both
+ // lowering and a subsequent dead code elimination (because lowering
+ // rules may leave dead generic ops behind).
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ if !opcodeTable[v.Op].generic {
+ continue // lowered
+ }
+ switch v.Op {
+ case OpSP, OpSB, OpInitMem, OpArg, OpArgIntReg, OpArgFloatReg, OpPhi, OpVarDef, OpVarKill, OpVarLive, OpKeepAlive, OpSelect0, OpSelect1, OpSelectN, OpConvert, OpInlMark:
+ continue // ok not to lower
+ case OpMakeResult:
+ if b.Controls[0] == v {
+ continue
+ }
+ case OpGetG:
+ if f.Config.hasGReg {
+ // has hardware g register, regalloc takes care of it
+ continue // ok not to lower
+ }
+ }
+ s := "not lowered: " + v.String() + ", " + v.Op.String() + " " + v.Type.SimpleString()
+
+ for _, a := range v.Args {
+ s += " " + a.Type.SimpleString()
+ }
+ f.Fatalf("%s", s)
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/magic.go b/src/cmd/compile/internal/ssa/magic.go
new file mode 100644
index 0000000..93f8801
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/magic.go
@@ -0,0 +1,424 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "math/big"
+ "math/bits"
+)
+
+// So you want to compute x / c for some constant c?
+// Machine division instructions are slow, so we try to
+// compute this division with a multiplication + a few
+// other cheap instructions instead.
+// (We assume here that c != 0, +/- 1, or +/- 2^i. Those
+// cases are easy to handle in different ways).
+
+// Technique from https://gmplib.org/~tege/divcnst-pldi94.pdf
+
+// First consider unsigned division.
+// Our strategy is to precompute 1/c then do
+// ⎣x / c⎦ = ⎣x * (1/c)⎦.
+// 1/c is less than 1, so we can't compute it directly in
+// integer arithmetic. Let's instead compute 2^e/c
+// for a value of e TBD (^ = exponentiation). Then
+// ⎣x / c⎦ = ⎣x * (2^e/c) / 2^e⎦.
+// Dividing by 2^e is easy. 2^e/c isn't an integer, unfortunately.
+// So we must approximate it. Let's call its approximation m.
+// We'll then compute
+// ⎣x * m / 2^e⎦
+// Which we want to be equal to ⎣x / c⎦ for 0 <= x < 2^n-1
+// where n is the word size.
+// Setting x = c gives us c * m >= 2^e.
+// We'll chose m = ⎡2^e/c⎤ to satisfy that equation.
+// What remains is to choose e.
+// Let m = 2^e/c + delta, 0 <= delta < 1
+// ⎣x * (2^e/c + delta) / 2^e⎦
+// ⎣x / c + x * delta / 2^e⎦
+// We must have x * delta / 2^e < 1/c so that this
+// additional term never rounds differently than ⎣x / c⎦ does.
+// Rearranging,
+// 2^e > x * delta * c
+// x can be at most 2^n-1 and delta can be at most 1.
+// So it is sufficient to have 2^e >= 2^n*c.
+// So we'll choose e = n + s, with s = ⎡log2(c)⎤.
+//
+// An additional complication arises because m has n+1 bits in it.
+// Hardware restricts us to n bit by n bit multiplies.
+// We divide into 3 cases:
+//
+// Case 1: m is even.
+// ⎣x / c⎦ = ⎣x * m / 2^(n+s)⎦
+// ⎣x / c⎦ = ⎣x * (m/2) / 2^(n+s-1)⎦
+// ⎣x / c⎦ = ⎣x * (m/2) / 2^n / 2^(s-1)⎦
+// ⎣x / c⎦ = ⎣⎣x * (m/2) / 2^n⎦ / 2^(s-1)⎦
+// multiply + shift
+//
+// Case 2: c is even.
+// ⎣x / c⎦ = ⎣(x/2) / (c/2)⎦
+// ⎣x / c⎦ = ⎣⎣x/2⎦ / (c/2)⎦
+// This is just the original problem, with x' = ⎣x/2⎦, c' = c/2, n' = n-1.
+// s' = s-1
+// m' = ⎡2^(n'+s')/c'⎤
+// = ⎡2^(n+s-1)/c⎤
+// = ⎡m/2⎤
+// ⎣x / c⎦ = ⎣x' * m' / 2^(n'+s')⎦
+// ⎣x / c⎦ = ⎣⎣x/2⎦ * ⎡m/2⎤ / 2^(n+s-2)⎦
+// ⎣x / c⎦ = ⎣⎣⎣x/2⎦ * ⎡m/2⎤ / 2^n⎦ / 2^(s-2)⎦
+// shift + multiply + shift
+//
+// Case 3: everything else
+// let k = m - 2^n. k fits in n bits.
+// ⎣x / c⎦ = ⎣x * m / 2^(n+s)⎦
+// ⎣x / c⎦ = ⎣x * (2^n + k) / 2^(n+s)⎦
+// ⎣x / c⎦ = ⎣(x + x * k / 2^n) / 2^s⎦
+// ⎣x / c⎦ = ⎣(x + ⎣x * k / 2^n⎦) / 2^s⎦
+// ⎣x / c⎦ = ⎣(x + ⎣x * k / 2^n⎦) / 2^s⎦
+// ⎣x / c⎦ = ⎣⎣(x + ⎣x * k / 2^n⎦) / 2⎦ / 2^(s-1)⎦
+// multiply + avg + shift
+//
+// These can be implemented in hardware using:
+// ⎣a * b / 2^n⎦ - aka high n bits of an n-bit by n-bit multiply.
+// ⎣(a+b) / 2⎦ - aka "average" of two n-bit numbers.
+// (Not just a regular add & shift because the intermediate result
+// a+b has n+1 bits in it. Nevertheless, can be done
+// in 2 instructions on x86.)
+
+// umagicOK reports whether we should strength reduce a n-bit divide by c.
+func umagicOK(n uint, c int64) bool {
+ // Convert from ConstX auxint values to the real uint64 constant they represent.
+ d := uint64(c) << (64 - n) >> (64 - n)
+
+ // Doesn't work for 0.
+ // Don't use for powers of 2.
+ return d&(d-1) != 0
+}
+
+// umagicOKn reports whether we should strength reduce an unsigned n-bit divide by c.
+// We can strength reduce when c != 0 and c is not a power of two.
+func umagicOK8(c int8) bool { return c&(c-1) != 0 }
+func umagicOK16(c int16) bool { return c&(c-1) != 0 }
+func umagicOK32(c int32) bool { return c&(c-1) != 0 }
+func umagicOK64(c int64) bool { return c&(c-1) != 0 }
+
+type umagicData struct {
+ s int64 // ⎡log2(c)⎤
+ m uint64 // ⎡2^(n+s)/c⎤ - 2^n
+}
+
+// umagic computes the constants needed to strength reduce unsigned n-bit divides by the constant uint64(c).
+// The return values satisfy for all 0 <= x < 2^n
+// floor(x / uint64(c)) = x * (m + 2^n) >> (n+s)
+func umagic(n uint, c int64) umagicData {
+ // Convert from ConstX auxint values to the real uint64 constant they represent.
+ d := uint64(c) << (64 - n) >> (64 - n)
+
+ C := new(big.Int).SetUint64(d)
+ s := C.BitLen()
+ M := big.NewInt(1)
+ M.Lsh(M, n+uint(s)) // 2^(n+s)
+ M.Add(M, C) // 2^(n+s)+c
+ M.Sub(M, big.NewInt(1)) // 2^(n+s)+c-1
+ M.Div(M, C) // ⎡2^(n+s)/c⎤
+ if M.Bit(int(n)) != 1 {
+ panic("n+1st bit isn't set")
+ }
+ M.SetBit(M, int(n), 0)
+ m := M.Uint64()
+ return umagicData{s: int64(s), m: m}
+}
+
+func umagic8(c int8) umagicData { return umagic(8, int64(c)) }
+func umagic16(c int16) umagicData { return umagic(16, int64(c)) }
+func umagic32(c int32) umagicData { return umagic(32, int64(c)) }
+func umagic64(c int64) umagicData { return umagic(64, c) }
+
+// For signed division, we use a similar strategy.
+// First, we enforce a positive c.
+// x / c = -(x / (-c))
+// This will require an additional Neg op for c<0.
+//
+// If x is positive we're in a very similar state
+// to the unsigned case above. We define:
+// s = ⎡log2(c)⎤-1
+// m = ⎡2^(n+s)/c⎤
+// Then
+// ⎣x / c⎦ = ⎣x * m / 2^(n+s)⎦
+// If x is negative we have
+// ⎡x / c⎤ = ⎣x * m / 2^(n+s)⎦ + 1
+// (TODO: derivation?)
+//
+// The multiply is a bit odd, as it is a signed n-bit value
+// times an unsigned n-bit value. For n smaller than the
+// word size, we can extend x and m appropriately and use the
+// signed multiply instruction. For n == word size,
+// we must use the signed multiply high and correct
+// the result by adding x*2^n.
+//
+// Adding 1 if x<0 is done by subtracting x>>(n-1).
+
+func smagicOK(n uint, c int64) bool {
+ if c < 0 {
+ // Doesn't work for negative c.
+ return false
+ }
+ // Doesn't work for 0.
+ // Don't use it for powers of 2.
+ return c&(c-1) != 0
+}
+
+// smagicOKn reports whether we should strength reduce an signed n-bit divide by c.
+func smagicOK8(c int8) bool { return smagicOK(8, int64(c)) }
+func smagicOK16(c int16) bool { return smagicOK(16, int64(c)) }
+func smagicOK32(c int32) bool { return smagicOK(32, int64(c)) }
+func smagicOK64(c int64) bool { return smagicOK(64, c) }
+
+type smagicData struct {
+ s int64 // ⎡log2(c)⎤-1
+ m uint64 // ⎡2^(n+s)/c⎤
+}
+
+// magic computes the constants needed to strength reduce signed n-bit divides by the constant c.
+// Must have c>0.
+// The return values satisfy for all -2^(n-1) <= x < 2^(n-1)
+// trunc(x / c) = x * m >> (n+s) + (x < 0 ? 1 : 0)
+func smagic(n uint, c int64) smagicData {
+ C := new(big.Int).SetInt64(c)
+ s := C.BitLen() - 1
+ M := big.NewInt(1)
+ M.Lsh(M, n+uint(s)) // 2^(n+s)
+ M.Add(M, C) // 2^(n+s)+c
+ M.Sub(M, big.NewInt(1)) // 2^(n+s)+c-1
+ M.Div(M, C) // ⎡2^(n+s)/c⎤
+ if M.Bit(int(n)) != 0 {
+ panic("n+1st bit is set")
+ }
+ if M.Bit(int(n-1)) == 0 {
+ panic("nth bit is not set")
+ }
+ m := M.Uint64()
+ return smagicData{s: int64(s), m: m}
+}
+
+func smagic8(c int8) smagicData { return smagic(8, int64(c)) }
+func smagic16(c int16) smagicData { return smagic(16, int64(c)) }
+func smagic32(c int32) smagicData { return smagic(32, int64(c)) }
+func smagic64(c int64) smagicData { return smagic(64, c) }
+
+// Divisibility x%c == 0 can be checked more efficiently than directly computing
+// the modulus x%c and comparing against 0.
+//
+// The same "Division by invariant integers using multiplication" paper
+// by Granlund and Montgomery referenced above briefly mentions this method
+// and it is further elaborated in "Hacker's Delight" by Warren Section 10-17
+//
+// The first thing to note is that for odd integers, exact division can be computed
+// by using the modular inverse with respect to the word size 2^n.
+//
+// Given c, compute m such that (c * m) mod 2^n == 1
+// Then if c divides x (x%c ==0), the quotient is given by q = x/c == x*m mod 2^n
+//
+// x can range from 0, c, 2c, 3c, ... ⎣(2^n - 1)/c⎦ * c the maximum multiple
+// Thus, x*m mod 2^n is 0, 1, 2, 3, ... ⎣(2^n - 1)/c⎦
+// i.e. the quotient takes all values from zero up to max = ⎣(2^n - 1)/c⎦
+//
+// If x is not divisible by c, then x*m mod 2^n must take some larger value than max.
+//
+// This gives x*m mod 2^n <= ⎣(2^n - 1)/c⎦ as a test for divisibility
+// involving one multiplication and compare.
+//
+// To extend this to even integers, consider c = d0 * 2^k where d0 is odd.
+// We can test whether x is divisible by both d0 and 2^k.
+// For d0, the test is the same as above. Let m be such that m*d0 mod 2^n == 1
+// Then x*m mod 2^n <= ⎣(2^n - 1)/d0⎦ is the first test.
+// The test for divisibility by 2^k is a check for k trailing zeroes.
+// Note that since d0 is odd, m is odd and thus x*m will have the same number of
+// trailing zeroes as x. So the two tests are,
+//
+// x*m mod 2^n <= ⎣(2^n - 1)/d0⎦
+// and x*m ends in k zero bits
+//
+// These can be combined into a single comparison by the following
+// (theorem ZRU in Hacker's Delight) for unsigned integers.
+//
+// x <= a and x ends in k zero bits if and only if RotRight(x ,k) <= ⎣a/(2^k)⎦
+// Where RotRight(x ,k) is right rotation of x by k bits.
+//
+// To prove the first direction, x <= a -> ⎣x/(2^k)⎦ <= ⎣a/(2^k)⎦
+// But since x ends in k zeroes all the rotated bits would be zero too.
+// So RotRight(x, k) == ⎣x/(2^k)⎦ <= ⎣a/(2^k)⎦
+//
+// If x does not end in k zero bits, then RotRight(x, k)
+// has some non-zero bits in the k highest bits.
+// ⎣x/(2^k)⎦ has all zeroes in the k highest bits,
+// so RotRight(x, k) > ⎣x/(2^k)⎦
+//
+// Finally, if x > a and has k trailing zero bits, then RotRight(x, k) == ⎣x/(2^k)⎦
+// and ⎣x/(2^k)⎦ must be greater than ⎣a/(2^k)⎦, that is the top n-k bits of x must
+// be greater than the top n-k bits of a because the rest of x bits are zero.
+//
+// So the two conditions about can be replaced with the single test
+//
+// RotRight(x*m mod 2^n, k) <= ⎣(2^n - 1)/c⎦
+//
+// Where d0*2^k was replaced by c on the right hand side.
+
+// udivisibleOK reports whether we should strength reduce an unsigned n-bit divisibilty check by c.
+func udivisibleOK(n uint, c int64) bool {
+ // Convert from ConstX auxint values to the real uint64 constant they represent.
+ d := uint64(c) << (64 - n) >> (64 - n)
+
+ // Doesn't work for 0.
+ // Don't use for powers of 2.
+ return d&(d-1) != 0
+}
+
+func udivisibleOK8(c int8) bool { return udivisibleOK(8, int64(c)) }
+func udivisibleOK16(c int16) bool { return udivisibleOK(16, int64(c)) }
+func udivisibleOK32(c int32) bool { return udivisibleOK(32, int64(c)) }
+func udivisibleOK64(c int64) bool { return udivisibleOK(64, c) }
+
+type udivisibleData struct {
+ k int64 // trailingZeros(c)
+ m uint64 // m * (c>>k) mod 2^n == 1 multiplicative inverse of odd portion modulo 2^n
+ max uint64 // ⎣(2^n - 1)/ c⎦ max value to for divisibility
+}
+
+func udivisible(n uint, c int64) udivisibleData {
+ // Convert from ConstX auxint values to the real uint64 constant they represent.
+ d := uint64(c) << (64 - n) >> (64 - n)
+
+ k := bits.TrailingZeros64(d)
+ d0 := d >> uint(k) // the odd portion of the divisor
+
+ mask := ^uint64(0) >> (64 - n)
+
+ // Calculate the multiplicative inverse via Newton's method.
+ // Quadratic convergence doubles the number of correct bits per iteration.
+ m := d0 // initial guess correct to 3-bits d0*d0 mod 8 == 1
+ m = m * (2 - m*d0) // 6-bits
+ m = m * (2 - m*d0) // 12-bits
+ m = m * (2 - m*d0) // 24-bits
+ m = m * (2 - m*d0) // 48-bits
+ m = m * (2 - m*d0) // 96-bits >= 64-bits
+ m = m & mask
+
+ max := mask / d
+
+ return udivisibleData{
+ k: int64(k),
+ m: m,
+ max: max,
+ }
+}
+
+func udivisible8(c int8) udivisibleData { return udivisible(8, int64(c)) }
+func udivisible16(c int16) udivisibleData { return udivisible(16, int64(c)) }
+func udivisible32(c int32) udivisibleData { return udivisible(32, int64(c)) }
+func udivisible64(c int64) udivisibleData { return udivisible(64, c) }
+
+// For signed integers, a similar method follows.
+//
+// Given c > 1 and odd, compute m such that (c * m) mod 2^n == 1
+// Then if c divides x (x%c ==0), the quotient is given by q = x/c == x*m mod 2^n
+//
+// x can range from ⎡-2^(n-1)/c⎤ * c, ... -c, 0, c, ... ⎣(2^(n-1) - 1)/c⎦ * c
+// Thus, x*m mod 2^n is ⎡-2^(n-1)/c⎤, ... -2, -1, 0, 1, 2, ... ⎣(2^(n-1) - 1)/c⎦
+//
+// So, x is a multiple of c if and only if:
+// ⎡-2^(n-1)/c⎤ <= x*m mod 2^n <= ⎣(2^(n-1) - 1)/c⎦
+//
+// Since c > 1 and odd, this can be simplified by
+// ⎡-2^(n-1)/c⎤ == ⎡(-2^(n-1) + 1)/c⎤ == -⎣(2^(n-1) - 1)/c⎦
+//
+// -⎣(2^(n-1) - 1)/c⎦ <= x*m mod 2^n <= ⎣(2^(n-1) - 1)/c⎦
+//
+// To extend this to even integers, consider c = d0 * 2^k where d0 is odd.
+// We can test whether x is divisible by both d0 and 2^k.
+//
+// Let m be such that (d0 * m) mod 2^n == 1.
+// Let q = x*m mod 2^n. Then c divides x if:
+//
+// -⎣(2^(n-1) - 1)/d0⎦ <= q <= ⎣(2^(n-1) - 1)/d0⎦ and q ends in at least k 0-bits
+//
+// To transform this to a single comparison, we use the following theorem (ZRS in Hacker's Delight).
+//
+// For a >= 0 the following conditions are equivalent:
+// 1) -a <= x <= a and x ends in at least k 0-bits
+// 2) RotRight(x+a', k) <= ⎣2a'/2^k⎦
+//
+// Where a' = a & -2^k (a with its right k bits set to zero)
+//
+// To see that 1 & 2 are equivalent, note that -a <= x <= a is equivalent to
+// -a' <= x <= a' if and only if x ends in at least k 0-bits. Adding -a' to each side gives,
+// 0 <= x + a' <= 2a' and x + a' ends in at least k 0-bits if and only if x does since a' has
+// k 0-bits by definition. We can use theorem ZRU above with x -> x + a' and a -> 2a' giving 1) == 2).
+//
+// Let m be such that (d0 * m) mod 2^n == 1.
+// Let q = x*m mod 2^n.
+// Let a' = ⎣(2^(n-1) - 1)/d0⎦ & -2^k
+//
+// Then the divisibility test is:
+//
+// RotRight(q+a', k) <= ⎣2a'/2^k⎦
+//
+// Note that the calculation is performed using unsigned integers.
+// Since a' can have n-1 bits, 2a' may have n bits and there is no risk of overflow.
+
+// sdivisibleOK reports whether we should strength reduce a signed n-bit divisibilty check by c.
+func sdivisibleOK(n uint, c int64) bool {
+ if c < 0 {
+ // Doesn't work for negative c.
+ return false
+ }
+ // Doesn't work for 0.
+ // Don't use it for powers of 2.
+ return c&(c-1) != 0
+}
+
+func sdivisibleOK8(c int8) bool { return sdivisibleOK(8, int64(c)) }
+func sdivisibleOK16(c int16) bool { return sdivisibleOK(16, int64(c)) }
+func sdivisibleOK32(c int32) bool { return sdivisibleOK(32, int64(c)) }
+func sdivisibleOK64(c int64) bool { return sdivisibleOK(64, c) }
+
+type sdivisibleData struct {
+ k int64 // trailingZeros(c)
+ m uint64 // m * (c>>k) mod 2^n == 1 multiplicative inverse of odd portion modulo 2^n
+ a uint64 // ⎣(2^(n-1) - 1)/ (c>>k)⎦ & -(1<<k) additive constant
+ max uint64 // ⎣(2 a) / (1<<k)⎦ max value to for divisibility
+}
+
+func sdivisible(n uint, c int64) sdivisibleData {
+ d := uint64(c)
+ k := bits.TrailingZeros64(d)
+ d0 := d >> uint(k) // the odd portion of the divisor
+
+ mask := ^uint64(0) >> (64 - n)
+
+ // Calculate the multiplicative inverse via Newton's method.
+ // Quadratic convergence doubles the number of correct bits per iteration.
+ m := d0 // initial guess correct to 3-bits d0*d0 mod 8 == 1
+ m = m * (2 - m*d0) // 6-bits
+ m = m * (2 - m*d0) // 12-bits
+ m = m * (2 - m*d0) // 24-bits
+ m = m * (2 - m*d0) // 48-bits
+ m = m * (2 - m*d0) // 96-bits >= 64-bits
+ m = m & mask
+
+ a := ((mask >> 1) / d0) & -(1 << uint(k))
+ max := (2 * a) >> uint(k)
+
+ return sdivisibleData{
+ k: int64(k),
+ m: m,
+ a: a,
+ max: max,
+ }
+}
+
+func sdivisible8(c int8) sdivisibleData { return sdivisible(8, int64(c)) }
+func sdivisible16(c int16) sdivisibleData { return sdivisible(16, int64(c)) }
+func sdivisible32(c int32) sdivisibleData { return sdivisible(32, int64(c)) }
+func sdivisible64(c int64) sdivisibleData { return sdivisible(64, c) }
diff --git a/src/cmd/compile/internal/ssa/magic_test.go b/src/cmd/compile/internal/ssa/magic_test.go
new file mode 100644
index 0000000..7c6009d
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/magic_test.go
@@ -0,0 +1,410 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "math/big"
+ "testing"
+)
+
+func TestMagicExhaustive8(t *testing.T) {
+ testMagicExhaustive(t, 8)
+}
+func TestMagicExhaustive8U(t *testing.T) {
+ testMagicExhaustiveU(t, 8)
+}
+func TestMagicExhaustive16(t *testing.T) {
+ if testing.Short() {
+ t.Skip("slow test; skipping")
+ }
+ testMagicExhaustive(t, 16)
+}
+func TestMagicExhaustive16U(t *testing.T) {
+ if testing.Short() {
+ t.Skip("slow test; skipping")
+ }
+ testMagicExhaustiveU(t, 16)
+}
+
+// exhaustive test of magic for n bits
+func testMagicExhaustive(t *testing.T, n uint) {
+ min := -int64(1) << (n - 1)
+ max := int64(1) << (n - 1)
+ for c := int64(1); c < max; c++ {
+ if !smagicOK(n, int64(c)) {
+ continue
+ }
+ m := int64(smagic(n, c).m)
+ s := smagic(n, c).s
+ for i := min; i < max; i++ {
+ want := i / c
+ got := (i * m) >> (n + uint(s))
+ if i < 0 {
+ got++
+ }
+ if want != got {
+ t.Errorf("signed magic wrong for %d / %d: got %d, want %d (m=%d,s=%d)\n", i, c, got, want, m, s)
+ }
+ }
+ }
+}
+func testMagicExhaustiveU(t *testing.T, n uint) {
+ max := uint64(1) << n
+ for c := uint64(1); c < max; c++ {
+ if !umagicOK(n, int64(c)) {
+ continue
+ }
+ m := umagic(n, int64(c)).m
+ s := umagic(n, int64(c)).s
+ for i := uint64(0); i < max; i++ {
+ want := i / c
+ got := (i * (max + m)) >> (n + uint(s))
+ if want != got {
+ t.Errorf("unsigned magic wrong for %d / %d: got %d, want %d (m=%d,s=%d)\n", i, c, got, want, m, s)
+ }
+ }
+ }
+}
+
+func TestMagicUnsigned(t *testing.T) {
+ One := new(big.Int).SetUint64(1)
+ for _, n := range [...]uint{8, 16, 32, 64} {
+ TwoN := new(big.Int).Lsh(One, n)
+ Max := new(big.Int).Sub(TwoN, One)
+ for _, c := range [...]uint64{
+ 3,
+ 5,
+ 6,
+ 7,
+ 9,
+ 10,
+ 11,
+ 12,
+ 13,
+ 14,
+ 15,
+ 17,
+ 1<<8 - 1,
+ 1<<8 + 1,
+ 1<<16 - 1,
+ 1<<16 + 1,
+ 1<<32 - 1,
+ 1<<32 + 1,
+ 1<<64 - 1,
+ } {
+ if c>>n != 0 {
+ continue // not appropriate for the given n.
+ }
+ if !umagicOK(n, int64(c)) {
+ t.Errorf("expected n=%d c=%d to pass\n", n, c)
+ }
+ m := umagic(n, int64(c)).m
+ s := umagic(n, int64(c)).s
+
+ C := new(big.Int).SetUint64(c)
+ M := new(big.Int).SetUint64(m)
+ M.Add(M, TwoN)
+
+ // Find largest multiple of c.
+ Mul := new(big.Int).Div(Max, C)
+ Mul.Mul(Mul, C)
+ mul := Mul.Uint64()
+
+ // Try some input values, mostly around multiples of c.
+ for _, x := range [...]uint64{0, 1,
+ c - 1, c, c + 1,
+ 2*c - 1, 2 * c, 2*c + 1,
+ mul - 1, mul, mul + 1,
+ uint64(1)<<n - 1,
+ } {
+ X := new(big.Int).SetUint64(x)
+ if X.Cmp(Max) > 0 {
+ continue
+ }
+ Want := new(big.Int).Quo(X, C)
+ Got := new(big.Int).Mul(X, M)
+ Got.Rsh(Got, n+uint(s))
+ if Want.Cmp(Got) != 0 {
+ t.Errorf("umagic for %d/%d n=%d doesn't work, got=%s, want %s\n", x, c, n, Got, Want)
+ }
+ }
+ }
+ }
+}
+
+func TestMagicSigned(t *testing.T) {
+ One := new(big.Int).SetInt64(1)
+ for _, n := range [...]uint{8, 16, 32, 64} {
+ TwoNMinusOne := new(big.Int).Lsh(One, n-1)
+ Max := new(big.Int).Sub(TwoNMinusOne, One)
+ Min := new(big.Int).Neg(TwoNMinusOne)
+ for _, c := range [...]int64{
+ 3,
+ 5,
+ 6,
+ 7,
+ 9,
+ 10,
+ 11,
+ 12,
+ 13,
+ 14,
+ 15,
+ 17,
+ 1<<7 - 1,
+ 1<<7 + 1,
+ 1<<15 - 1,
+ 1<<15 + 1,
+ 1<<31 - 1,
+ 1<<31 + 1,
+ 1<<63 - 1,
+ } {
+ if c>>(n-1) != 0 {
+ continue // not appropriate for the given n.
+ }
+ if !smagicOK(n, int64(c)) {
+ t.Errorf("expected n=%d c=%d to pass\n", n, c)
+ }
+ m := smagic(n, int64(c)).m
+ s := smagic(n, int64(c)).s
+
+ C := new(big.Int).SetInt64(c)
+ M := new(big.Int).SetUint64(m)
+
+ // Find largest multiple of c.
+ Mul := new(big.Int).Div(Max, C)
+ Mul.Mul(Mul, C)
+ mul := Mul.Int64()
+
+ // Try some input values, mostly around multiples of c.
+ for _, x := range [...]int64{
+ -1, 1,
+ -c - 1, -c, -c + 1, c - 1, c, c + 1,
+ -2*c - 1, -2 * c, -2*c + 1, 2*c - 1, 2 * c, 2*c + 1,
+ -mul - 1, -mul, -mul + 1, mul - 1, mul, mul + 1,
+ int64(1)<<(n-1) - 1, -int64(1) << (n - 1),
+ } {
+ X := new(big.Int).SetInt64(x)
+ if X.Cmp(Min) < 0 || X.Cmp(Max) > 0 {
+ continue
+ }
+ Want := new(big.Int).Quo(X, C)
+ Got := new(big.Int).Mul(X, M)
+ Got.Rsh(Got, n+uint(s))
+ if x < 0 {
+ Got.Add(Got, One)
+ }
+ if Want.Cmp(Got) != 0 {
+ t.Errorf("smagic for %d/%d n=%d doesn't work, got=%s, want %s\n", x, c, n, Got, Want)
+ }
+ }
+ }
+ }
+}
+
+func testDivisibleExhaustiveU(t *testing.T, n uint) {
+ maxU := uint64(1) << n
+ for c := uint64(1); c < maxU; c++ {
+ if !udivisibleOK(n, int64(c)) {
+ continue
+ }
+ k := udivisible(n, int64(c)).k
+ m := udivisible(n, int64(c)).m
+ max := udivisible(n, int64(c)).max
+ mask := ^uint64(0) >> (64 - n)
+ for i := uint64(0); i < maxU; i++ {
+ want := i%c == 0
+ mul := (i * m) & mask
+ rot := (mul>>uint(k) | mul<<(n-uint(k))) & mask
+ got := rot <= max
+ if want != got {
+ t.Errorf("unsigned divisible wrong for %d %% %d == 0: got %v, want %v (k=%d,m=%d,max=%d)\n", i, c, got, want, k, m, max)
+ }
+ }
+ }
+}
+
+func TestDivisibleExhaustive8U(t *testing.T) {
+ testDivisibleExhaustiveU(t, 8)
+}
+
+func TestDivisibleExhaustive16U(t *testing.T) {
+ if testing.Short() {
+ t.Skip("slow test; skipping")
+ }
+ testDivisibleExhaustiveU(t, 16)
+}
+
+func TestDivisibleUnsigned(t *testing.T) {
+ One := new(big.Int).SetUint64(1)
+ for _, n := range [...]uint{8, 16, 32, 64} {
+ TwoN := new(big.Int).Lsh(One, n)
+ Max := new(big.Int).Sub(TwoN, One)
+ for _, c := range [...]uint64{
+ 3,
+ 5,
+ 6,
+ 7,
+ 9,
+ 10,
+ 11,
+ 12,
+ 13,
+ 14,
+ 15,
+ 17,
+ 1<<8 - 1,
+ 1<<8 + 1,
+ 1<<16 - 1,
+ 1<<16 + 1,
+ 1<<32 - 1,
+ 1<<32 + 1,
+ 1<<64 - 1,
+ } {
+ if c>>n != 0 {
+ continue // c too large for the given n.
+ }
+ if !udivisibleOK(n, int64(c)) {
+ t.Errorf("expected n=%d c=%d to pass\n", n, c)
+ }
+ k := udivisible(n, int64(c)).k
+ m := udivisible(n, int64(c)).m
+ max := udivisible(n, int64(c)).max
+ mask := ^uint64(0) >> (64 - n)
+
+ C := new(big.Int).SetUint64(c)
+
+ // Find largest multiple of c.
+ Mul := new(big.Int).Div(Max, C)
+ Mul.Mul(Mul, C)
+ mul := Mul.Uint64()
+
+ // Try some input values, mostly around multiples of c.
+ for _, x := range [...]uint64{0, 1,
+ c - 1, c, c + 1,
+ 2*c - 1, 2 * c, 2*c + 1,
+ mul - 1, mul, mul + 1,
+ uint64(1)<<n - 1,
+ } {
+ X := new(big.Int).SetUint64(x)
+ if X.Cmp(Max) > 0 {
+ continue
+ }
+ want := x%c == 0
+ mul := (x * m) & mask
+ rot := (mul>>uint(k) | mul<<(n-uint(k))) & mask
+ got := rot <= max
+ if want != got {
+ t.Errorf("unsigned divisible wrong for %d %% %d == 0: got %v, want %v (k=%d,m=%d,max=%d)\n", x, c, got, want, k, m, max)
+ }
+ }
+ }
+ }
+}
+
+func testDivisibleExhaustive(t *testing.T, n uint) {
+ minI := -int64(1) << (n - 1)
+ maxI := int64(1) << (n - 1)
+ for c := int64(1); c < maxI; c++ {
+ if !sdivisibleOK(n, int64(c)) {
+ continue
+ }
+ k := sdivisible(n, int64(c)).k
+ m := sdivisible(n, int64(c)).m
+ a := sdivisible(n, int64(c)).a
+ max := sdivisible(n, int64(c)).max
+ mask := ^uint64(0) >> (64 - n)
+ for i := minI; i < maxI; i++ {
+ want := i%c == 0
+ mul := (uint64(i)*m + a) & mask
+ rot := (mul>>uint(k) | mul<<(n-uint(k))) & mask
+ got := rot <= max
+ if want != got {
+ t.Errorf("signed divisible wrong for %d %% %d == 0: got %v, want %v (k=%d,m=%d,a=%d,max=%d)\n", i, c, got, want, k, m, a, max)
+ }
+ }
+ }
+}
+
+func TestDivisibleExhaustive8(t *testing.T) {
+ testDivisibleExhaustive(t, 8)
+}
+
+func TestDivisibleExhaustive16(t *testing.T) {
+ if testing.Short() {
+ t.Skip("slow test; skipping")
+ }
+ testDivisibleExhaustive(t, 16)
+}
+
+func TestDivisibleSigned(t *testing.T) {
+ One := new(big.Int).SetInt64(1)
+ for _, n := range [...]uint{8, 16, 32, 64} {
+ TwoNMinusOne := new(big.Int).Lsh(One, n-1)
+ Max := new(big.Int).Sub(TwoNMinusOne, One)
+ Min := new(big.Int).Neg(TwoNMinusOne)
+ for _, c := range [...]int64{
+ 3,
+ 5,
+ 6,
+ 7,
+ 9,
+ 10,
+ 11,
+ 12,
+ 13,
+ 14,
+ 15,
+ 17,
+ 1<<7 - 1,
+ 1<<7 + 1,
+ 1<<15 - 1,
+ 1<<15 + 1,
+ 1<<31 - 1,
+ 1<<31 + 1,
+ 1<<63 - 1,
+ } {
+ if c>>(n-1) != 0 {
+ continue // not appropriate for the given n.
+ }
+ if !sdivisibleOK(n, int64(c)) {
+ t.Errorf("expected n=%d c=%d to pass\n", n, c)
+ }
+ k := sdivisible(n, int64(c)).k
+ m := sdivisible(n, int64(c)).m
+ a := sdivisible(n, int64(c)).a
+ max := sdivisible(n, int64(c)).max
+ mask := ^uint64(0) >> (64 - n)
+
+ C := new(big.Int).SetInt64(c)
+
+ // Find largest multiple of c.
+ Mul := new(big.Int).Div(Max, C)
+ Mul.Mul(Mul, C)
+ mul := Mul.Int64()
+
+ // Try some input values, mostly around multiples of c.
+ for _, x := range [...]int64{
+ -1, 1,
+ -c - 1, -c, -c + 1, c - 1, c, c + 1,
+ -2*c - 1, -2 * c, -2*c + 1, 2*c - 1, 2 * c, 2*c + 1,
+ -mul - 1, -mul, -mul + 1, mul - 1, mul, mul + 1,
+ int64(1)<<(n-1) - 1, -int64(1) << (n - 1),
+ } {
+ X := new(big.Int).SetInt64(x)
+ if X.Cmp(Min) < 0 || X.Cmp(Max) > 0 {
+ continue
+ }
+ want := x%c == 0
+ mul := (uint64(x)*m + a) & mask
+ rot := (mul>>uint(k) | mul<<(n-uint(k))) & mask
+ got := rot <= max
+ if want != got {
+ t.Errorf("signed divisible wrong for %d %% %d == 0: got %v, want %v (k=%d,m=%d,a=%d,max=%d)\n", x, c, got, want, k, m, a, max)
+ }
+ }
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/nilcheck.go b/src/cmd/compile/internal/ssa/nilcheck.go
new file mode 100644
index 0000000..14f511a
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/nilcheck.go
@@ -0,0 +1,337 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/compile/internal/ir"
+ "cmd/internal/src"
+ "internal/buildcfg"
+)
+
+// nilcheckelim eliminates unnecessary nil checks.
+// runs on machine-independent code.
+func nilcheckelim(f *Func) {
+ // A nil check is redundant if the same nil check was successful in a
+ // dominating block. The efficacy of this pass depends heavily on the
+ // efficacy of the cse pass.
+ sdom := f.Sdom()
+
+ // TODO: Eliminate more nil checks.
+ // We can recursively remove any chain of fixed offset calculations,
+ // i.e. struct fields and array elements, even with non-constant
+ // indices: x is non-nil iff x.a.b[i].c is.
+
+ type walkState int
+ const (
+ Work walkState = iota // process nil checks and traverse to dominees
+ ClearPtr // forget the fact that ptr is nil
+ )
+
+ type bp struct {
+ block *Block // block, or nil in ClearPtr state
+ ptr *Value // if non-nil, ptr that is to be cleared in ClearPtr state
+ op walkState
+ }
+
+ work := make([]bp, 0, 256)
+ work = append(work, bp{block: f.Entry})
+
+ // map from value ID to bool indicating if value is known to be non-nil
+ // in the current dominator path being walked. This slice is updated by
+ // walkStates to maintain the known non-nil values.
+ nonNilValues := make([]bool, f.NumValues())
+
+ // make an initial pass identifying any non-nil values
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ // a value resulting from taking the address of a
+ // value, or a value constructed from an offset of a
+ // non-nil ptr (OpAddPtr) implies it is non-nil
+ // We also assume unsafe pointer arithmetic generates non-nil pointers. See #27180.
+ // We assume that SlicePtr is non-nil because we do a bounds check
+ // before the slice access (and all cap>0 slices have a non-nil ptr). See #30366.
+ if v.Op == OpAddr || v.Op == OpLocalAddr || v.Op == OpAddPtr || v.Op == OpOffPtr || v.Op == OpAdd32 || v.Op == OpAdd64 || v.Op == OpSub32 || v.Op == OpSub64 || v.Op == OpSlicePtr {
+ nonNilValues[v.ID] = true
+ }
+ }
+ }
+
+ for changed := true; changed; {
+ changed = false
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ // phis whose arguments are all non-nil
+ // are non-nil
+ if v.Op == OpPhi {
+ argsNonNil := true
+ for _, a := range v.Args {
+ if !nonNilValues[a.ID] {
+ argsNonNil = false
+ break
+ }
+ }
+ if argsNonNil {
+ if !nonNilValues[v.ID] {
+ changed = true
+ }
+ nonNilValues[v.ID] = true
+ }
+ }
+ }
+ }
+ }
+
+ // allocate auxiliary date structures for computing store order
+ sset := f.newSparseSet(f.NumValues())
+ defer f.retSparseSet(sset)
+ storeNumber := make([]int32, f.NumValues())
+
+ // perform a depth first walk of the dominee tree
+ for len(work) > 0 {
+ node := work[len(work)-1]
+ work = work[:len(work)-1]
+
+ switch node.op {
+ case Work:
+ b := node.block
+
+ // First, see if we're dominated by an explicit nil check.
+ if len(b.Preds) == 1 {
+ p := b.Preds[0].b
+ if p.Kind == BlockIf && p.Controls[0].Op == OpIsNonNil && p.Succs[0].b == b {
+ if ptr := p.Controls[0].Args[0]; !nonNilValues[ptr.ID] {
+ nonNilValues[ptr.ID] = true
+ work = append(work, bp{op: ClearPtr, ptr: ptr})
+ }
+ }
+ }
+
+ // Next, order values in the current block w.r.t. stores.
+ b.Values = storeOrder(b.Values, sset, storeNumber)
+
+ pendingLines := f.cachedLineStarts // Holds statement boundaries that need to be moved to a new value/block
+ pendingLines.clear()
+
+ // Next, process values in the block.
+ i := 0
+ for _, v := range b.Values {
+ b.Values[i] = v
+ i++
+ switch v.Op {
+ case OpIsNonNil:
+ ptr := v.Args[0]
+ if nonNilValues[ptr.ID] {
+ if v.Pos.IsStmt() == src.PosIsStmt { // Boolean true is a terrible statement boundary.
+ pendingLines.add(v.Pos)
+ v.Pos = v.Pos.WithNotStmt()
+ }
+ // This is a redundant explicit nil check.
+ v.reset(OpConstBool)
+ v.AuxInt = 1 // true
+ }
+ case OpNilCheck:
+ ptr := v.Args[0]
+ if nonNilValues[ptr.ID] {
+ // This is a redundant implicit nil check.
+ // Logging in the style of the former compiler -- and omit line 1,
+ // which is usually in generated code.
+ if f.fe.Debug_checknil() && v.Pos.Line() > 1 {
+ f.Warnl(v.Pos, "removed nil check")
+ }
+ if v.Pos.IsStmt() == src.PosIsStmt { // About to lose a statement boundary
+ pendingLines.add(v.Pos)
+ }
+ v.reset(OpUnknown)
+ f.freeValue(v)
+ i--
+ continue
+ }
+ // Record the fact that we know ptr is non nil, and remember to
+ // undo that information when this dominator subtree is done.
+ nonNilValues[ptr.ID] = true
+ work = append(work, bp{op: ClearPtr, ptr: ptr})
+ fallthrough // a non-eliminated nil check might be a good place for a statement boundary.
+ default:
+ if v.Pos.IsStmt() != src.PosNotStmt && !isPoorStatementOp(v.Op) && pendingLines.contains(v.Pos) {
+ v.Pos = v.Pos.WithIsStmt()
+ pendingLines.remove(v.Pos)
+ }
+ }
+ }
+ // This reduces the lost statement count in "go" by 5 (out of 500 total).
+ for j := 0; j < i; j++ { // is this an ordering problem?
+ v := b.Values[j]
+ if v.Pos.IsStmt() != src.PosNotStmt && !isPoorStatementOp(v.Op) && pendingLines.contains(v.Pos) {
+ v.Pos = v.Pos.WithIsStmt()
+ pendingLines.remove(v.Pos)
+ }
+ }
+ if pendingLines.contains(b.Pos) {
+ b.Pos = b.Pos.WithIsStmt()
+ pendingLines.remove(b.Pos)
+ }
+ b.truncateValues(i)
+
+ // Add all dominated blocks to the work list.
+ for w := sdom[node.block.ID].child; w != nil; w = sdom[w.ID].sibling {
+ work = append(work, bp{op: Work, block: w})
+ }
+
+ case ClearPtr:
+ nonNilValues[node.ptr.ID] = false
+ continue
+ }
+ }
+}
+
+// All platforms are guaranteed to fault if we load/store to anything smaller than this address.
+//
+// This should agree with minLegalPointer in the runtime.
+const minZeroPage = 4096
+
+// faultOnLoad is true if a load to an address below minZeroPage will trigger a SIGSEGV.
+var faultOnLoad = buildcfg.GOOS != "aix"
+
+// nilcheckelim2 eliminates unnecessary nil checks.
+// Runs after lowering and scheduling.
+func nilcheckelim2(f *Func) {
+ unnecessary := f.newSparseMap(f.NumValues()) // map from pointer that will be dereferenced to index of dereferencing value in b.Values[]
+ defer f.retSparseMap(unnecessary)
+
+ pendingLines := f.cachedLineStarts // Holds statement boundaries that need to be moved to a new value/block
+
+ for _, b := range f.Blocks {
+ // Walk the block backwards. Find instructions that will fault if their
+ // input pointer is nil. Remove nil checks on those pointers, as the
+ // faulting instruction effectively does the nil check for free.
+ unnecessary.clear()
+ pendingLines.clear()
+ // Optimization: keep track of removed nilcheck with smallest index
+ firstToRemove := len(b.Values)
+ for i := len(b.Values) - 1; i >= 0; i-- {
+ v := b.Values[i]
+ if opcodeTable[v.Op].nilCheck && unnecessary.contains(v.Args[0].ID) {
+ if f.fe.Debug_checknil() && v.Pos.Line() > 1 {
+ f.Warnl(v.Pos, "removed nil check")
+ }
+ // For bug 33724, policy is that we might choose to bump an existing position
+ // off the faulting load/store in favor of the one from the nil check.
+
+ // Iteration order means that first nilcheck in the chain wins, others
+ // are bumped into the ordinary statement preservation algorithm.
+ u := b.Values[unnecessary.get(v.Args[0].ID)]
+ if !u.Pos.SameFileAndLine(v.Pos) {
+ if u.Pos.IsStmt() == src.PosIsStmt {
+ pendingLines.add(u.Pos)
+ }
+ u.Pos = v.Pos
+ } else if v.Pos.IsStmt() == src.PosIsStmt {
+ pendingLines.add(v.Pos)
+ }
+
+ v.reset(OpUnknown)
+ firstToRemove = i
+ continue
+ }
+ if v.Type.IsMemory() || v.Type.IsTuple() && v.Type.FieldType(1).IsMemory() {
+ if v.Op == OpVarKill || v.Op == OpVarLive || (v.Op == OpVarDef && !v.Aux.(*ir.Name).Type().HasPointers()) {
+ // These ops don't really change memory.
+ continue
+ // Note: OpVarDef requires that the defined variable not have pointers.
+ // We need to make sure that there's no possible faulting
+ // instruction between a VarDef and that variable being
+ // fully initialized. If there was, then anything scanning
+ // the stack during the handling of that fault will see
+ // a live but uninitialized pointer variable on the stack.
+ //
+ // If we have:
+ //
+ // NilCheck p
+ // VarDef x
+ // x = *p
+ //
+ // We can't rewrite that to
+ //
+ // VarDef x
+ // NilCheck p
+ // x = *p
+ //
+ // Particularly, even though *p faults on p==nil, we still
+ // have to do the explicit nil check before the VarDef.
+ // See issue #32288.
+ }
+ // This op changes memory. Any faulting instruction after v that
+ // we've recorded in the unnecessary map is now obsolete.
+ unnecessary.clear()
+ }
+
+ // Find any pointers that this op is guaranteed to fault on if nil.
+ var ptrstore [2]*Value
+ ptrs := ptrstore[:0]
+ if opcodeTable[v.Op].faultOnNilArg0 && (faultOnLoad || v.Type.IsMemory()) {
+ // On AIX, only writing will fault.
+ ptrs = append(ptrs, v.Args[0])
+ }
+ if opcodeTable[v.Op].faultOnNilArg1 && (faultOnLoad || (v.Type.IsMemory() && v.Op != OpPPC64LoweredMove)) {
+ // On AIX, only writing will fault.
+ // LoweredMove is a special case because it's considered as a "mem" as it stores on arg0 but arg1 is accessed as a load and should be checked.
+ ptrs = append(ptrs, v.Args[1])
+ }
+
+ for _, ptr := range ptrs {
+ // Check to make sure the offset is small.
+ switch opcodeTable[v.Op].auxType {
+ case auxSym:
+ if v.Aux != nil {
+ continue
+ }
+ case auxSymOff:
+ if v.Aux != nil || v.AuxInt < 0 || v.AuxInt >= minZeroPage {
+ continue
+ }
+ case auxSymValAndOff:
+ off := ValAndOff(v.AuxInt).Off()
+ if v.Aux != nil || off < 0 || off >= minZeroPage {
+ continue
+ }
+ case auxInt32:
+ // Mips uses this auxType for atomic add constant. It does not affect the effective address.
+ case auxInt64:
+ // ARM uses this auxType for duffcopy/duffzero/alignment info.
+ // It does not affect the effective address.
+ case auxNone:
+ // offset is zero.
+ default:
+ v.Fatalf("can't handle aux %s (type %d) yet\n", v.auxString(), int(opcodeTable[v.Op].auxType))
+ }
+ // This instruction is guaranteed to fault if ptr is nil.
+ // Any previous nil check op is unnecessary.
+ unnecessary.set(ptr.ID, int32(i), src.NoXPos)
+ }
+ }
+ // Remove values we've clobbered with OpUnknown.
+ i := firstToRemove
+ for j := i; j < len(b.Values); j++ {
+ v := b.Values[j]
+ if v.Op != OpUnknown {
+ if !notStmtBoundary(v.Op) && pendingLines.contains(v.Pos) { // Late in compilation, so any remaining NotStmt values are probably okay now.
+ v.Pos = v.Pos.WithIsStmt()
+ pendingLines.remove(v.Pos)
+ }
+ b.Values[i] = v
+ i++
+ }
+ }
+
+ if pendingLines.contains(b.Pos) {
+ b.Pos = b.Pos.WithIsStmt()
+ }
+
+ b.truncateValues(i)
+
+ // TODO: if b.Kind == BlockPlain, start the analysis in the subsequent block to find
+ // more unnecessary nil checks. Would fix test/nilptr3.go:159.
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/nilcheck_test.go b/src/cmd/compile/internal/ssa/nilcheck_test.go
new file mode 100644
index 0000000..2e32afe
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/nilcheck_test.go
@@ -0,0 +1,434 @@
+package ssa
+
+import (
+ "cmd/compile/internal/types"
+ "strconv"
+ "testing"
+)
+
+func BenchmarkNilCheckDeep1(b *testing.B) { benchmarkNilCheckDeep(b, 1) }
+func BenchmarkNilCheckDeep10(b *testing.B) { benchmarkNilCheckDeep(b, 10) }
+func BenchmarkNilCheckDeep100(b *testing.B) { benchmarkNilCheckDeep(b, 100) }
+func BenchmarkNilCheckDeep1000(b *testing.B) { benchmarkNilCheckDeep(b, 1000) }
+func BenchmarkNilCheckDeep10000(b *testing.B) { benchmarkNilCheckDeep(b, 10000) }
+
+// benchmarkNilCheckDeep is a stress test of nilcheckelim.
+// It uses the worst possible input: A linear string of
+// nil checks, none of which can be eliminated.
+// Run with multiple depths to observe big-O behavior.
+func benchmarkNilCheckDeep(b *testing.B, depth int) {
+ c := testConfig(b)
+ ptrType := c.config.Types.BytePtr
+
+ var blocs []bloc
+ blocs = append(blocs,
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("sb", OpSB, c.config.Types.Uintptr, 0, nil),
+ Goto(blockn(0)),
+ ),
+ )
+ for i := 0; i < depth; i++ {
+ blocs = append(blocs,
+ Bloc(blockn(i),
+ Valu(ptrn(i), OpAddr, ptrType, 0, nil, "sb"),
+ Valu(booln(i), OpIsNonNil, c.config.Types.Bool, 0, nil, ptrn(i)),
+ If(booln(i), blockn(i+1), "exit"),
+ ),
+ )
+ }
+ blocs = append(blocs,
+ Bloc(blockn(depth), Goto("exit")),
+ Bloc("exit", Exit("mem")),
+ )
+
+ fun := c.Fun("entry", blocs...)
+
+ CheckFunc(fun.f)
+ b.SetBytes(int64(depth)) // helps for eyeballing linearity
+ b.ResetTimer()
+ b.ReportAllocs()
+
+ for i := 0; i < b.N; i++ {
+ nilcheckelim(fun.f)
+ }
+}
+
+func blockn(n int) string { return "b" + strconv.Itoa(n) }
+func ptrn(n int) string { return "p" + strconv.Itoa(n) }
+func booln(n int) string { return "c" + strconv.Itoa(n) }
+
+func isNilCheck(b *Block) bool {
+ return b.Kind == BlockIf && b.Controls[0].Op == OpIsNonNil
+}
+
+// TestNilcheckSimple verifies that a second repeated nilcheck is removed.
+func TestNilcheckSimple(t *testing.T) {
+ c := testConfig(t)
+ ptrType := c.config.Types.BytePtr
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("sb", OpSB, c.config.Types.Uintptr, 0, nil),
+ Goto("checkPtr")),
+ Bloc("checkPtr",
+ Valu("ptr1", OpLoad, ptrType, 0, nil, "sb", "mem"),
+ Valu("bool1", OpIsNonNil, c.config.Types.Bool, 0, nil, "ptr1"),
+ If("bool1", "secondCheck", "exit")),
+ Bloc("secondCheck",
+ Valu("bool2", OpIsNonNil, c.config.Types.Bool, 0, nil, "ptr1"),
+ If("bool2", "extra", "exit")),
+ Bloc("extra",
+ Goto("exit")),
+ Bloc("exit",
+ Exit("mem")))
+
+ CheckFunc(fun.f)
+ nilcheckelim(fun.f)
+
+ // clean up the removed nil check
+ fuse(fun.f, fuseTypePlain)
+ deadcode(fun.f)
+
+ CheckFunc(fun.f)
+ for _, b := range fun.f.Blocks {
+ if b == fun.blocks["secondCheck"] && isNilCheck(b) {
+ t.Errorf("secondCheck was not eliminated")
+ }
+ }
+}
+
+// TestNilcheckDomOrder ensures that the nil check elimination isn't dependent
+// on the order of the dominees.
+func TestNilcheckDomOrder(t *testing.T) {
+ c := testConfig(t)
+ ptrType := c.config.Types.BytePtr
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("sb", OpSB, c.config.Types.Uintptr, 0, nil),
+ Goto("checkPtr")),
+ Bloc("checkPtr",
+ Valu("ptr1", OpLoad, ptrType, 0, nil, "sb", "mem"),
+ Valu("bool1", OpIsNonNil, c.config.Types.Bool, 0, nil, "ptr1"),
+ If("bool1", "secondCheck", "exit")),
+ Bloc("exit",
+ Exit("mem")),
+ Bloc("secondCheck",
+ Valu("bool2", OpIsNonNil, c.config.Types.Bool, 0, nil, "ptr1"),
+ If("bool2", "extra", "exit")),
+ Bloc("extra",
+ Goto("exit")))
+
+ CheckFunc(fun.f)
+ nilcheckelim(fun.f)
+
+ // clean up the removed nil check
+ fuse(fun.f, fuseTypePlain)
+ deadcode(fun.f)
+
+ CheckFunc(fun.f)
+ for _, b := range fun.f.Blocks {
+ if b == fun.blocks["secondCheck"] && isNilCheck(b) {
+ t.Errorf("secondCheck was not eliminated")
+ }
+ }
+}
+
+// TestNilcheckAddr verifies that nilchecks of OpAddr constructed values are removed.
+func TestNilcheckAddr(t *testing.T) {
+ c := testConfig(t)
+ ptrType := c.config.Types.BytePtr
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("sb", OpSB, c.config.Types.Uintptr, 0, nil),
+ Goto("checkPtr")),
+ Bloc("checkPtr",
+ Valu("ptr1", OpAddr, ptrType, 0, nil, "sb"),
+ Valu("bool1", OpIsNonNil, c.config.Types.Bool, 0, nil, "ptr1"),
+ If("bool1", "extra", "exit")),
+ Bloc("extra",
+ Goto("exit")),
+ Bloc("exit",
+ Exit("mem")))
+
+ CheckFunc(fun.f)
+ nilcheckelim(fun.f)
+
+ // clean up the removed nil check
+ fuse(fun.f, fuseTypePlain)
+ deadcode(fun.f)
+
+ CheckFunc(fun.f)
+ for _, b := range fun.f.Blocks {
+ if b == fun.blocks["checkPtr"] && isNilCheck(b) {
+ t.Errorf("checkPtr was not eliminated")
+ }
+ }
+}
+
+// TestNilcheckAddPtr verifies that nilchecks of OpAddPtr constructed values are removed.
+func TestNilcheckAddPtr(t *testing.T) {
+ c := testConfig(t)
+ ptrType := c.config.Types.BytePtr
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("sb", OpSB, c.config.Types.Uintptr, 0, nil),
+ Goto("checkPtr")),
+ Bloc("checkPtr",
+ Valu("off", OpConst64, c.config.Types.Int64, 20, nil),
+ Valu("ptr1", OpAddPtr, ptrType, 0, nil, "sb", "off"),
+ Valu("bool1", OpIsNonNil, c.config.Types.Bool, 0, nil, "ptr1"),
+ If("bool1", "extra", "exit")),
+ Bloc("extra",
+ Goto("exit")),
+ Bloc("exit",
+ Exit("mem")))
+
+ CheckFunc(fun.f)
+ nilcheckelim(fun.f)
+
+ // clean up the removed nil check
+ fuse(fun.f, fuseTypePlain)
+ deadcode(fun.f)
+
+ CheckFunc(fun.f)
+ for _, b := range fun.f.Blocks {
+ if b == fun.blocks["checkPtr"] && isNilCheck(b) {
+ t.Errorf("checkPtr was not eliminated")
+ }
+ }
+}
+
+// TestNilcheckPhi tests that nil checks of phis, for which all values are known to be
+// non-nil are removed.
+func TestNilcheckPhi(t *testing.T) {
+ c := testConfig(t)
+ ptrType := c.config.Types.BytePtr
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("sb", OpSB, c.config.Types.Uintptr, 0, nil),
+ Valu("sp", OpSP, c.config.Types.Uintptr, 0, nil),
+ Valu("baddr", OpLocalAddr, c.config.Types.Bool, 0, StringToAux("b"), "sp", "mem"),
+ Valu("bool1", OpLoad, c.config.Types.Bool, 0, nil, "baddr", "mem"),
+ If("bool1", "b1", "b2")),
+ Bloc("b1",
+ Valu("ptr1", OpAddr, ptrType, 0, nil, "sb"),
+ Goto("checkPtr")),
+ Bloc("b2",
+ Valu("ptr2", OpAddr, ptrType, 0, nil, "sb"),
+ Goto("checkPtr")),
+ // both ptr1 and ptr2 are guaranteed non-nil here
+ Bloc("checkPtr",
+ Valu("phi", OpPhi, ptrType, 0, nil, "ptr1", "ptr2"),
+ Valu("bool2", OpIsNonNil, c.config.Types.Bool, 0, nil, "phi"),
+ If("bool2", "extra", "exit")),
+ Bloc("extra",
+ Goto("exit")),
+ Bloc("exit",
+ Exit("mem")))
+
+ CheckFunc(fun.f)
+ nilcheckelim(fun.f)
+
+ // clean up the removed nil check
+ fuse(fun.f, fuseTypePlain)
+ deadcode(fun.f)
+
+ CheckFunc(fun.f)
+ for _, b := range fun.f.Blocks {
+ if b == fun.blocks["checkPtr"] && isNilCheck(b) {
+ t.Errorf("checkPtr was not eliminated")
+ }
+ }
+}
+
+// TestNilcheckKeepRemove verifies that duplicate checks of the same pointer
+// are removed, but checks of different pointers are not.
+func TestNilcheckKeepRemove(t *testing.T) {
+ c := testConfig(t)
+ ptrType := c.config.Types.BytePtr
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("sb", OpSB, c.config.Types.Uintptr, 0, nil),
+ Goto("checkPtr")),
+ Bloc("checkPtr",
+ Valu("ptr1", OpLoad, ptrType, 0, nil, "sb", "mem"),
+ Valu("bool1", OpIsNonNil, c.config.Types.Bool, 0, nil, "ptr1"),
+ If("bool1", "differentCheck", "exit")),
+ Bloc("differentCheck",
+ Valu("ptr2", OpLoad, ptrType, 0, nil, "sb", "mem"),
+ Valu("bool2", OpIsNonNil, c.config.Types.Bool, 0, nil, "ptr2"),
+ If("bool2", "secondCheck", "exit")),
+ Bloc("secondCheck",
+ Valu("bool3", OpIsNonNil, c.config.Types.Bool, 0, nil, "ptr1"),
+ If("bool3", "extra", "exit")),
+ Bloc("extra",
+ Goto("exit")),
+ Bloc("exit",
+ Exit("mem")))
+
+ CheckFunc(fun.f)
+ nilcheckelim(fun.f)
+
+ // clean up the removed nil check
+ fuse(fun.f, fuseTypePlain)
+ deadcode(fun.f)
+
+ CheckFunc(fun.f)
+ foundDifferentCheck := false
+ for _, b := range fun.f.Blocks {
+ if b == fun.blocks["secondCheck"] && isNilCheck(b) {
+ t.Errorf("secondCheck was not eliminated")
+ }
+ if b == fun.blocks["differentCheck"] && isNilCheck(b) {
+ foundDifferentCheck = true
+ }
+ }
+ if !foundDifferentCheck {
+ t.Errorf("removed differentCheck, but shouldn't have")
+ }
+}
+
+// TestNilcheckInFalseBranch tests that nil checks in the false branch of a nilcheck
+// block are *not* removed.
+func TestNilcheckInFalseBranch(t *testing.T) {
+ c := testConfig(t)
+ ptrType := c.config.Types.BytePtr
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("sb", OpSB, c.config.Types.Uintptr, 0, nil),
+ Goto("checkPtr")),
+ Bloc("checkPtr",
+ Valu("ptr1", OpLoad, ptrType, 0, nil, "sb", "mem"),
+ Valu("bool1", OpIsNonNil, c.config.Types.Bool, 0, nil, "ptr1"),
+ If("bool1", "extra", "secondCheck")),
+ Bloc("secondCheck",
+ Valu("bool2", OpIsNonNil, c.config.Types.Bool, 0, nil, "ptr1"),
+ If("bool2", "extra", "thirdCheck")),
+ Bloc("thirdCheck",
+ Valu("bool3", OpIsNonNil, c.config.Types.Bool, 0, nil, "ptr1"),
+ If("bool3", "extra", "exit")),
+ Bloc("extra",
+ Goto("exit")),
+ Bloc("exit",
+ Exit("mem")))
+
+ CheckFunc(fun.f)
+ nilcheckelim(fun.f)
+
+ // clean up the removed nil check
+ fuse(fun.f, fuseTypePlain)
+ deadcode(fun.f)
+
+ CheckFunc(fun.f)
+ foundSecondCheck := false
+ foundThirdCheck := false
+ for _, b := range fun.f.Blocks {
+ if b == fun.blocks["secondCheck"] && isNilCheck(b) {
+ foundSecondCheck = true
+ }
+ if b == fun.blocks["thirdCheck"] && isNilCheck(b) {
+ foundThirdCheck = true
+ }
+ }
+ if !foundSecondCheck {
+ t.Errorf("removed secondCheck, but shouldn't have [false branch]")
+ }
+ if !foundThirdCheck {
+ t.Errorf("removed thirdCheck, but shouldn't have [false branch]")
+ }
+}
+
+// TestNilcheckUser verifies that a user nil check that dominates a generated nil check
+// wil remove the generated nil check.
+func TestNilcheckUser(t *testing.T) {
+ c := testConfig(t)
+ ptrType := c.config.Types.BytePtr
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("sb", OpSB, c.config.Types.Uintptr, 0, nil),
+ Goto("checkPtr")),
+ Bloc("checkPtr",
+ Valu("ptr1", OpLoad, ptrType, 0, nil, "sb", "mem"),
+ Valu("nilptr", OpConstNil, ptrType, 0, nil),
+ Valu("bool1", OpNeqPtr, c.config.Types.Bool, 0, nil, "ptr1", "nilptr"),
+ If("bool1", "secondCheck", "exit")),
+ Bloc("secondCheck",
+ Valu("bool2", OpIsNonNil, c.config.Types.Bool, 0, nil, "ptr1"),
+ If("bool2", "extra", "exit")),
+ Bloc("extra",
+ Goto("exit")),
+ Bloc("exit",
+ Exit("mem")))
+
+ CheckFunc(fun.f)
+ // we need the opt here to rewrite the user nilcheck
+ opt(fun.f)
+ nilcheckelim(fun.f)
+
+ // clean up the removed nil check
+ fuse(fun.f, fuseTypePlain)
+ deadcode(fun.f)
+
+ CheckFunc(fun.f)
+ for _, b := range fun.f.Blocks {
+ if b == fun.blocks["secondCheck"] && isNilCheck(b) {
+ t.Errorf("secondCheck was not eliminated")
+ }
+ }
+}
+
+// TestNilcheckBug reproduces a bug in nilcheckelim found by compiling math/big
+func TestNilcheckBug(t *testing.T) {
+ c := testConfig(t)
+ ptrType := c.config.Types.BytePtr
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("sb", OpSB, c.config.Types.Uintptr, 0, nil),
+ Goto("checkPtr")),
+ Bloc("checkPtr",
+ Valu("ptr1", OpLoad, ptrType, 0, nil, "sb", "mem"),
+ Valu("nilptr", OpConstNil, ptrType, 0, nil),
+ Valu("bool1", OpNeqPtr, c.config.Types.Bool, 0, nil, "ptr1", "nilptr"),
+ If("bool1", "secondCheck", "couldBeNil")),
+ Bloc("couldBeNil",
+ Goto("secondCheck")),
+ Bloc("secondCheck",
+ Valu("bool2", OpIsNonNil, c.config.Types.Bool, 0, nil, "ptr1"),
+ If("bool2", "extra", "exit")),
+ Bloc("extra",
+ // prevent fuse from eliminating this block
+ Valu("store", OpStore, types.TypeMem, 0, ptrType, "ptr1", "nilptr", "mem"),
+ Goto("exit")),
+ Bloc("exit",
+ Valu("phi", OpPhi, types.TypeMem, 0, nil, "mem", "store"),
+ Exit("phi")))
+
+ CheckFunc(fun.f)
+ // we need the opt here to rewrite the user nilcheck
+ opt(fun.f)
+ nilcheckelim(fun.f)
+
+ // clean up the removed nil check
+ fuse(fun.f, fuseTypePlain)
+ deadcode(fun.f)
+
+ CheckFunc(fun.f)
+ foundSecondCheck := false
+ for _, b := range fun.f.Blocks {
+ if b == fun.blocks["secondCheck"] && isNilCheck(b) {
+ foundSecondCheck = true
+ }
+ }
+ if !foundSecondCheck {
+ t.Errorf("secondCheck was eliminated, but shouldn't have")
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/numberlines.go b/src/cmd/compile/internal/ssa/numberlines.go
new file mode 100644
index 0000000..9d6aeca
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/numberlines.go
@@ -0,0 +1,262 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/internal/src"
+ "fmt"
+ "sort"
+)
+
+func isPoorStatementOp(op Op) bool {
+ switch op {
+ // Note that Nilcheck often vanishes, but when it doesn't, you'd love to start the statement there
+ // so that a debugger-user sees the stop before the panic, and can examine the value.
+ case OpAddr, OpLocalAddr, OpOffPtr, OpStructSelect, OpPhi, OpITab, OpIData,
+ OpIMake, OpStringMake, OpSliceMake, OpStructMake0, OpStructMake1, OpStructMake2, OpStructMake3, OpStructMake4,
+ OpConstBool, OpConst8, OpConst16, OpConst32, OpConst64, OpConst32F, OpConst64F, OpSB, OpSP,
+ OpArgIntReg, OpArgFloatReg:
+ return true
+ }
+ return false
+}
+
+// nextGoodStatementIndex returns an index at i or later that is believed
+// to be a good place to start the statement for b. This decision is
+// based on v's Op, the possibility of a better later operation, and
+// whether the values following i are the same line as v.
+// If a better statement index isn't found, then i is returned.
+func nextGoodStatementIndex(v *Value, i int, b *Block) int {
+ // If the value is the last one in the block, too bad, it will have to do
+ // (this assumes that the value ordering vaguely corresponds to the source
+ // program execution order, which tends to be true directly after ssa is
+ // first built.
+ if i >= len(b.Values)-1 {
+ return i
+ }
+ // Skip the likely-ephemeral/fragile opcodes expected to vanish in a rewrite.
+ if !isPoorStatementOp(v.Op) {
+ return i
+ }
+ // Look ahead to see what the line number is on the next thing that could be a boundary.
+ for j := i + 1; j < len(b.Values); j++ {
+ u := b.Values[j]
+ if u.Pos.IsStmt() == src.PosNotStmt { // ignore non-statements
+ continue
+ }
+ if u.Pos.SameFileAndLine(v.Pos) {
+ if isPoorStatementOp(u.Op) {
+ continue // Keep looking, this is also not a good statement op
+ }
+ return j
+ }
+ return i
+ }
+ return i
+}
+
+// notStmtBoundary reports whether a value with opcode op can never be a statement
+// boundary. Such values don't correspond to a user's understanding of a
+// statement boundary.
+func notStmtBoundary(op Op) bool {
+ switch op {
+ case OpCopy, OpPhi, OpVarKill, OpVarDef, OpVarLive, OpUnknown, OpFwdRef, OpArg, OpArgIntReg, OpArgFloatReg:
+ return true
+ }
+ return false
+}
+
+func (b *Block) FirstPossibleStmtValue() *Value {
+ for _, v := range b.Values {
+ if notStmtBoundary(v.Op) {
+ continue
+ }
+ return v
+ }
+ return nil
+}
+
+func flc(p src.XPos) string {
+ if p == src.NoXPos {
+ return "none"
+ }
+ return fmt.Sprintf("(%d):%d:%d", p.FileIndex(), p.Line(), p.Col())
+}
+
+type fileAndPair struct {
+ f int32
+ lp lineRange
+}
+
+type fileAndPairs []fileAndPair
+
+func (fap fileAndPairs) Len() int {
+ return len(fap)
+}
+func (fap fileAndPairs) Less(i, j int) bool {
+ return fap[i].f < fap[j].f
+}
+func (fap fileAndPairs) Swap(i, j int) {
+ fap[i], fap[j] = fap[j], fap[i]
+}
+
+// -d=ssa/number_lines/stats=1 (that bit) for line and file distribution statistics
+// -d=ssa/number_lines/debug for information about why particular values are marked as statements.
+func numberLines(f *Func) {
+ po := f.Postorder()
+ endlines := make(map[ID]src.XPos)
+ ranges := make(map[int]lineRange)
+ note := func(p src.XPos) {
+ line := uint32(p.Line())
+ i := int(p.FileIndex())
+ lp, found := ranges[i]
+ change := false
+ if line < lp.first || !found {
+ lp.first = line
+ change = true
+ }
+ if line > lp.last {
+ lp.last = line
+ change = true
+ }
+ if change {
+ ranges[i] = lp
+ }
+ }
+
+ // Visit in reverse post order so that all non-loop predecessors come first.
+ for j := len(po) - 1; j >= 0; j-- {
+ b := po[j]
+ // Find the first interesting position and check to see if it differs from any predecessor
+ firstPos := src.NoXPos
+ firstPosIndex := -1
+ if b.Pos.IsStmt() != src.PosNotStmt {
+ note(b.Pos)
+ }
+ for i := 0; i < len(b.Values); i++ {
+ v := b.Values[i]
+ if v.Pos.IsStmt() != src.PosNotStmt {
+ note(v.Pos)
+ // skip ahead to better instruction for this line if possible
+ i = nextGoodStatementIndex(v, i, b)
+ v = b.Values[i]
+ firstPosIndex = i
+ firstPos = v.Pos
+ v.Pos = firstPos.WithDefaultStmt() // default to default
+ break
+ }
+ }
+
+ if firstPosIndex == -1 { // Effectively empty block, check block's own Pos, consider preds.
+ line := src.NoXPos
+ for _, p := range b.Preds {
+ pbi := p.Block().ID
+ if !endlines[pbi].SameFileAndLine(line) {
+ if line == src.NoXPos {
+ line = endlines[pbi]
+ continue
+ } else {
+ line = src.NoXPos
+ break
+ }
+
+ }
+ }
+ // If the block has no statement itself and is effectively empty, tag it w/ predecessor(s) but not as a statement
+ if b.Pos.IsStmt() == src.PosNotStmt {
+ b.Pos = line
+ endlines[b.ID] = line
+ continue
+ }
+ // If the block differs from its predecessors, mark it as a statement
+ if line == src.NoXPos || !line.SameFileAndLine(b.Pos) {
+ b.Pos = b.Pos.WithIsStmt()
+ if f.pass.debug > 0 {
+ fmt.Printf("Mark stmt effectively-empty-block %s %s %s\n", f.Name, b, flc(b.Pos))
+ }
+ }
+ endlines[b.ID] = b.Pos
+ continue
+ }
+ // check predecessors for any difference; if firstPos differs, then it is a boundary.
+ if len(b.Preds) == 0 { // Don't forget the entry block
+ b.Values[firstPosIndex].Pos = firstPos.WithIsStmt()
+ if f.pass.debug > 0 {
+ fmt.Printf("Mark stmt entry-block %s %s %s %s\n", f.Name, b, b.Values[firstPosIndex], flc(firstPos))
+ }
+ } else { // differing pred
+ for _, p := range b.Preds {
+ pbi := p.Block().ID
+ if !endlines[pbi].SameFileAndLine(firstPos) {
+ b.Values[firstPosIndex].Pos = firstPos.WithIsStmt()
+ if f.pass.debug > 0 {
+ fmt.Printf("Mark stmt differing-pred %s %s %s %s, different=%s ending %s\n",
+ f.Name, b, b.Values[firstPosIndex], flc(firstPos), p.Block(), flc(endlines[pbi]))
+ }
+ break
+ }
+ }
+ }
+ // iterate forward setting each new (interesting) position as a statement boundary.
+ for i := firstPosIndex + 1; i < len(b.Values); i++ {
+ v := b.Values[i]
+ if v.Pos.IsStmt() == src.PosNotStmt {
+ continue
+ }
+ note(v.Pos)
+ // skip ahead if possible
+ i = nextGoodStatementIndex(v, i, b)
+ v = b.Values[i]
+ if !v.Pos.SameFileAndLine(firstPos) {
+ if f.pass.debug > 0 {
+ fmt.Printf("Mark stmt new line %s %s %s %s prev pos = %s\n", f.Name, b, v, flc(v.Pos), flc(firstPos))
+ }
+ firstPos = v.Pos
+ v.Pos = v.Pos.WithIsStmt()
+ } else {
+ v.Pos = v.Pos.WithDefaultStmt()
+ }
+ }
+ if b.Pos.IsStmt() != src.PosNotStmt && !b.Pos.SameFileAndLine(firstPos) {
+ if f.pass.debug > 0 {
+ fmt.Printf("Mark stmt end of block differs %s %s %s prev pos = %s\n", f.Name, b, flc(b.Pos), flc(firstPos))
+ }
+ b.Pos = b.Pos.WithIsStmt()
+ firstPos = b.Pos
+ }
+ endlines[b.ID] = firstPos
+ }
+ if f.pass.stats&1 != 0 {
+ // Report summary statistics on the shape of the sparse map about to be constructed
+ // TODO use this information to make sparse maps faster.
+ var entries fileAndPairs
+ for k, v := range ranges {
+ entries = append(entries, fileAndPair{int32(k), v})
+ }
+ sort.Sort(entries)
+ total := uint64(0) // sum over files of maxline(file) - minline(file)
+ maxfile := int32(0) // max(file indices)
+ minline := uint32(0xffffffff) // min over files of minline(file)
+ maxline := uint32(0) // max over files of maxline(file)
+ for _, v := range entries {
+ if f.pass.stats > 1 {
+ f.LogStat("file", v.f, "low", v.lp.first, "high", v.lp.last)
+ }
+ total += uint64(v.lp.last - v.lp.first)
+ if maxfile < v.f {
+ maxfile = v.f
+ }
+ if minline > v.lp.first {
+ minline = v.lp.first
+ }
+ if maxline < v.lp.last {
+ maxline = v.lp.last
+ }
+ }
+ f.LogStat("SUM_LINE_RANGE", total, "MAXMIN_LINE_RANGE", maxline-minline, "MAXFILE", maxfile, "NFILES", len(entries))
+ }
+ // cachedLineStarts is an empty sparse map for values that are included within ranges.
+ f.cachedLineStarts = newXposmap(ranges)
+}
diff --git a/src/cmd/compile/internal/ssa/op.go b/src/cmd/compile/internal/ssa/op.go
new file mode 100644
index 0000000..a1835dc
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/op.go
@@ -0,0 +1,531 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/compile/internal/abi"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "fmt"
+ "strings"
+)
+
+// An Op encodes the specific operation that a Value performs.
+// Opcodes' semantics can be modified by the type and aux fields of the Value.
+// For instance, OpAdd can be 32 or 64 bit, signed or unsigned, float or complex, depending on Value.Type.
+// Semantics of each op are described in the opcode files in gen/*Ops.go.
+// There is one file for generic (architecture-independent) ops and one file
+// for each architecture.
+type Op int32
+
+type opInfo struct {
+ name string
+ reg regInfo
+ auxType auxType
+ argLen int32 // the number of arguments, -1 if variable length
+ asm obj.As
+ generic bool // this is a generic (arch-independent) opcode
+ rematerializeable bool // this op is rematerializeable
+ commutative bool // this operation is commutative (e.g. addition)
+ resultInArg0 bool // (first, if a tuple) output of v and v.Args[0] must be allocated to the same register
+ resultNotInArgs bool // outputs must not be allocated to the same registers as inputs
+ clobberFlags bool // this op clobbers flags register
+ call bool // is a function call
+ tailCall bool // is a tail call
+ nilCheck bool // this op is a nil check on arg0
+ faultOnNilArg0 bool // this op will fault if arg0 is nil (and aux encodes a small offset)
+ faultOnNilArg1 bool // this op will fault if arg1 is nil (and aux encodes a small offset)
+ usesScratch bool // this op requires scratch memory space
+ hasSideEffects bool // for "reasons", not to be eliminated. E.g., atomic store, #19182.
+ zeroWidth bool // op never translates into any machine code. example: copy, which may sometimes translate to machine code, is not zero-width.
+ unsafePoint bool // this op is an unsafe point, i.e. not safe for async preemption
+ symEffect SymEffect // effect this op has on symbol in aux
+ scale uint8 // amd64/386 indexed load scale
+}
+
+type inputInfo struct {
+ idx int // index in Args array
+ regs regMask // allowed input registers
+}
+
+type outputInfo struct {
+ idx int // index in output tuple
+ regs regMask // allowed output registers
+}
+
+type regInfo struct {
+ // inputs encodes the register restrictions for an instruction's inputs.
+ // Each entry specifies an allowed register set for a particular input.
+ // They are listed in the order in which regalloc should pick a register
+ // from the register set (most constrained first).
+ // Inputs which do not need registers are not listed.
+ inputs []inputInfo
+ // clobbers encodes the set of registers that are overwritten by
+ // the instruction (other than the output registers).
+ clobbers regMask
+ // outputs is the same as inputs, but for the outputs of the instruction.
+ outputs []outputInfo
+}
+
+func (r *regInfo) String() string {
+ s := ""
+ s += "INS:\n"
+ for _, i := range r.inputs {
+ mask := fmt.Sprintf("%64b", i.regs)
+ mask = strings.Replace(mask, "0", ".", -1)
+ s += fmt.Sprintf("%2d |%s|\n", i.idx, mask)
+ }
+ s += "OUTS:\n"
+ for _, i := range r.outputs {
+ mask := fmt.Sprintf("%64b", i.regs)
+ mask = strings.Replace(mask, "0", ".", -1)
+ s += fmt.Sprintf("%2d |%s|\n", i.idx, mask)
+ }
+ s += "CLOBBERS:\n"
+ mask := fmt.Sprintf("%64b", r.clobbers)
+ mask = strings.Replace(mask, "0", ".", -1)
+ s += fmt.Sprintf(" |%s|\n", mask)
+ return s
+}
+
+type auxType int8
+
+type AuxNameOffset struct {
+ Name *ir.Name
+ Offset int64
+}
+
+func (a *AuxNameOffset) CanBeAnSSAAux() {}
+func (a *AuxNameOffset) String() string {
+ return fmt.Sprintf("%s+%d", a.Name.Sym().Name, a.Offset)
+}
+
+func (a *AuxNameOffset) FrameOffset() int64 {
+ return a.Name.FrameOffset() + a.Offset
+}
+
+type AuxCall struct {
+ Fn *obj.LSym
+ reg *regInfo // regInfo for this call
+ abiInfo *abi.ABIParamResultInfo
+}
+
+// Reg returns the regInfo for a given call, combining the derived in/out register masks
+// with the machine-specific register information in the input i. (The machine-specific
+// regInfo is much handier at the call site than it is when the AuxCall is being constructed,
+// therefore do this lazily).
+//
+// TODO: there is a Clever Hack that allows pre-generation of a small-ish number of the slices
+// of inputInfo and outputInfo used here, provided that we are willing to reorder the inputs
+// and outputs from calls, so that all integer registers come first, then all floating registers.
+// At this point (active development of register ABI) that is very premature,
+// but if this turns out to be a cost, we could do it.
+func (a *AuxCall) Reg(i *regInfo, c *Config) *regInfo {
+ if a.reg.clobbers != 0 {
+ // Already updated
+ return a.reg
+ }
+ if a.abiInfo.InRegistersUsed()+a.abiInfo.OutRegistersUsed() == 0 {
+ // Shortcut for zero case, also handles old ABI.
+ a.reg = i
+ return a.reg
+ }
+
+ k := len(i.inputs)
+ for _, p := range a.abiInfo.InParams() {
+ for _, r := range p.Registers {
+ m := archRegForAbiReg(r, c)
+ a.reg.inputs = append(a.reg.inputs, inputInfo{idx: k, regs: (1 << m)})
+ k++
+ }
+ }
+ a.reg.inputs = append(a.reg.inputs, i.inputs...) // These are less constrained, thus should come last
+ k = len(i.outputs)
+ for _, p := range a.abiInfo.OutParams() {
+ for _, r := range p.Registers {
+ m := archRegForAbiReg(r, c)
+ a.reg.outputs = append(a.reg.outputs, outputInfo{idx: k, regs: (1 << m)})
+ k++
+ }
+ }
+ a.reg.outputs = append(a.reg.outputs, i.outputs...)
+ a.reg.clobbers = i.clobbers
+ return a.reg
+}
+func (a *AuxCall) ABI() *abi.ABIConfig {
+ return a.abiInfo.Config()
+}
+func (a *AuxCall) ABIInfo() *abi.ABIParamResultInfo {
+ return a.abiInfo
+}
+func (a *AuxCall) ResultReg(c *Config) *regInfo {
+ if a.abiInfo.OutRegistersUsed() == 0 {
+ return a.reg
+ }
+ if len(a.reg.inputs) > 0 {
+ return a.reg
+ }
+ k := 0
+ for _, p := range a.abiInfo.OutParams() {
+ for _, r := range p.Registers {
+ m := archRegForAbiReg(r, c)
+ a.reg.inputs = append(a.reg.inputs, inputInfo{idx: k, regs: (1 << m)})
+ k++
+ }
+ }
+ return a.reg
+}
+
+// For ABI register index r, returns the (dense) register number used in
+// SSA backend.
+func archRegForAbiReg(r abi.RegIndex, c *Config) uint8 {
+ var m int8
+ if int(r) < len(c.intParamRegs) {
+ m = c.intParamRegs[r]
+ } else {
+ m = c.floatParamRegs[int(r)-len(c.intParamRegs)]
+ }
+ return uint8(m)
+}
+
+// For ABI register index r, returns the register number used in the obj
+// package (assembler).
+func ObjRegForAbiReg(r abi.RegIndex, c *Config) int16 {
+ m := archRegForAbiReg(r, c)
+ return c.registers[m].objNum
+}
+
+// ArgWidth returns the amount of stack needed for all the inputs
+// and outputs of a function or method, including ABI-defined parameter
+// slots and ABI-defined spill slots for register-resident parameters.
+//
+// The name is taken from the types package's ArgWidth(<function type>),
+// which predated changes to the ABI; this version handles those changes.
+func (a *AuxCall) ArgWidth() int64 {
+ return a.abiInfo.ArgWidth()
+}
+
+// ParamAssignmentForResult returns the ABI Parameter assignment for result which (indexed 0, 1, etc).
+func (a *AuxCall) ParamAssignmentForResult(which int64) *abi.ABIParamAssignment {
+ return a.abiInfo.OutParam(int(which))
+}
+
+// OffsetOfResult returns the SP offset of result which (indexed 0, 1, etc).
+func (a *AuxCall) OffsetOfResult(which int64) int64 {
+ n := int64(a.abiInfo.OutParam(int(which)).Offset())
+ return n
+}
+
+// OffsetOfArg returns the SP offset of argument which (indexed 0, 1, etc).
+// If the call is to a method, the receiver is the first argument (i.e., index 0)
+func (a *AuxCall) OffsetOfArg(which int64) int64 {
+ n := int64(a.abiInfo.InParam(int(which)).Offset())
+ return n
+}
+
+// RegsOfResult returns the register(s) used for result which (indexed 0, 1, etc).
+func (a *AuxCall) RegsOfResult(which int64) []abi.RegIndex {
+ return a.abiInfo.OutParam(int(which)).Registers
+}
+
+// RegsOfArg returns the register(s) used for argument which (indexed 0, 1, etc).
+// If the call is to a method, the receiver is the first argument (i.e., index 0)
+func (a *AuxCall) RegsOfArg(which int64) []abi.RegIndex {
+ return a.abiInfo.InParam(int(which)).Registers
+}
+
+// NameOfResult returns the type of result which (indexed 0, 1, etc).
+func (a *AuxCall) NameOfResult(which int64) *ir.Name {
+ name := a.abiInfo.OutParam(int(which)).Name
+ if name == nil {
+ return nil
+ }
+ return name.(*ir.Name)
+}
+
+// TypeOfResult returns the type of result which (indexed 0, 1, etc).
+func (a *AuxCall) TypeOfResult(which int64) *types.Type {
+ return a.abiInfo.OutParam(int(which)).Type
+}
+
+// TypeOfArg returns the type of argument which (indexed 0, 1, etc).
+// If the call is to a method, the receiver is the first argument (i.e., index 0)
+func (a *AuxCall) TypeOfArg(which int64) *types.Type {
+ return a.abiInfo.InParam(int(which)).Type
+}
+
+// SizeOfResult returns the size of result which (indexed 0, 1, etc).
+func (a *AuxCall) SizeOfResult(which int64) int64 {
+ return a.TypeOfResult(which).Size()
+}
+
+// SizeOfArg returns the size of argument which (indexed 0, 1, etc).
+// If the call is to a method, the receiver is the first argument (i.e., index 0)
+func (a *AuxCall) SizeOfArg(which int64) int64 {
+ return a.TypeOfArg(which).Size()
+}
+
+// NResults returns the number of results
+func (a *AuxCall) NResults() int64 {
+ return int64(len(a.abiInfo.OutParams()))
+}
+
+// LateExpansionResultType returns the result type (including trailing mem)
+// for a call that will be expanded later in the SSA phase.
+func (a *AuxCall) LateExpansionResultType() *types.Type {
+ var tys []*types.Type
+ for i := int64(0); i < a.NResults(); i++ {
+ tys = append(tys, a.TypeOfResult(i))
+ }
+ tys = append(tys, types.TypeMem)
+ return types.NewResults(tys)
+}
+
+// NArgs returns the number of arguments (including receiver, if there is one).
+func (a *AuxCall) NArgs() int64 {
+ return int64(len(a.abiInfo.InParams()))
+}
+
+// String returns "AuxCall{<fn>}"
+func (a *AuxCall) String() string {
+ var fn string
+ if a.Fn == nil {
+ fn = "AuxCall{nil" // could be interface/closure etc.
+ } else {
+ fn = fmt.Sprintf("AuxCall{%v", a.Fn)
+ }
+ // TODO how much of the ABI should be printed?
+
+ return fn + "}"
+}
+
+// StaticAuxCall returns an AuxCall for a static call.
+func StaticAuxCall(sym *obj.LSym, paramResultInfo *abi.ABIParamResultInfo) *AuxCall {
+ if paramResultInfo == nil {
+ panic(fmt.Errorf("Nil paramResultInfo, sym=%v", sym))
+ }
+ var reg *regInfo
+ if paramResultInfo.InRegistersUsed()+paramResultInfo.OutRegistersUsed() > 0 {
+ reg = &regInfo{}
+ }
+ return &AuxCall{Fn: sym, abiInfo: paramResultInfo, reg: reg}
+}
+
+// InterfaceAuxCall returns an AuxCall for an interface call.
+func InterfaceAuxCall(paramResultInfo *abi.ABIParamResultInfo) *AuxCall {
+ var reg *regInfo
+ if paramResultInfo.InRegistersUsed()+paramResultInfo.OutRegistersUsed() > 0 {
+ reg = &regInfo{}
+ }
+ return &AuxCall{Fn: nil, abiInfo: paramResultInfo, reg: reg}
+}
+
+// ClosureAuxCall returns an AuxCall for a closure call.
+func ClosureAuxCall(paramResultInfo *abi.ABIParamResultInfo) *AuxCall {
+ var reg *regInfo
+ if paramResultInfo.InRegistersUsed()+paramResultInfo.OutRegistersUsed() > 0 {
+ reg = &regInfo{}
+ }
+ return &AuxCall{Fn: nil, abiInfo: paramResultInfo, reg: reg}
+}
+
+func (*AuxCall) CanBeAnSSAAux() {}
+
+// OwnAuxCall returns a function's own AuxCall
+func OwnAuxCall(fn *obj.LSym, paramResultInfo *abi.ABIParamResultInfo) *AuxCall {
+ // TODO if this remains identical to ClosureAuxCall above after new ABI is done, should deduplicate.
+ var reg *regInfo
+ if paramResultInfo.InRegistersUsed()+paramResultInfo.OutRegistersUsed() > 0 {
+ reg = &regInfo{}
+ }
+ return &AuxCall{Fn: fn, abiInfo: paramResultInfo, reg: reg}
+}
+
+const (
+ auxNone auxType = iota
+ auxBool // auxInt is 0/1 for false/true
+ auxInt8 // auxInt is an 8-bit integer
+ auxInt16 // auxInt is a 16-bit integer
+ auxInt32 // auxInt is a 32-bit integer
+ auxInt64 // auxInt is a 64-bit integer
+ auxInt128 // auxInt represents a 128-bit integer. Always 0.
+ auxUInt8 // auxInt is an 8-bit unsigned integer
+ auxFloat32 // auxInt is a float32 (encoded with math.Float64bits)
+ auxFloat64 // auxInt is a float64 (encoded with math.Float64bits)
+ auxFlagConstant // auxInt is a flagConstant
+ auxNameOffsetInt8 // aux is a &struct{Name ir.Name, Offset int64}; auxInt is index in parameter registers array
+ auxString // aux is a string
+ auxSym // aux is a symbol (a *gc.Node for locals, an *obj.LSym for globals, or nil for none)
+ auxSymOff // aux is a symbol, auxInt is an offset
+ auxSymValAndOff // aux is a symbol, auxInt is a ValAndOff
+ auxTyp // aux is a type
+ auxTypSize // aux is a type, auxInt is a size, must have Aux.(Type).Size() == AuxInt
+ auxCCop // aux is a ssa.Op that represents a flags-to-bool conversion (e.g. LessThan)
+ auxCall // aux is a *ssa.AuxCall
+ auxCallOff // aux is a *ssa.AuxCall, AuxInt is int64 param (in+out) size
+
+ // architecture specific aux types
+ auxARM64BitField // aux is an arm64 bitfield lsb and width packed into auxInt
+ auxS390XRotateParams // aux is a s390x rotate parameters object encoding start bit, end bit and rotate amount
+ auxS390XCCMask // aux is a s390x 4-bit condition code mask
+ auxS390XCCMaskInt8 // aux is a s390x 4-bit condition code mask, auxInt is a int8 immediate
+ auxS390XCCMaskUint8 // aux is a s390x 4-bit condition code mask, auxInt is a uint8 immediate
+)
+
+// A SymEffect describes the effect that an SSA Value has on the variable
+// identified by the symbol in its Aux field.
+type SymEffect int8
+
+const (
+ SymRead SymEffect = 1 << iota
+ SymWrite
+ SymAddr
+
+ SymRdWr = SymRead | SymWrite
+
+ SymNone SymEffect = 0
+)
+
+// A Sym represents a symbolic offset from a base register.
+// Currently a Sym can be one of 3 things:
+// - a *gc.Node, for an offset from SP (the stack pointer)
+// - a *obj.LSym, for an offset from SB (the global pointer)
+// - nil, for no offset
+type Sym interface {
+ CanBeAnSSASym()
+ CanBeAnSSAAux()
+}
+
+// A ValAndOff is used by the several opcodes. It holds
+// both a value and a pointer offset.
+// A ValAndOff is intended to be encoded into an AuxInt field.
+// The zero ValAndOff encodes a value of 0 and an offset of 0.
+// The high 32 bits hold a value.
+// The low 32 bits hold a pointer offset.
+type ValAndOff int64
+
+func (x ValAndOff) Val() int32 { return int32(int64(x) >> 32) }
+func (x ValAndOff) Val64() int64 { return int64(x) >> 32 }
+func (x ValAndOff) Val16() int16 { return int16(int64(x) >> 32) }
+func (x ValAndOff) Val8() int8 { return int8(int64(x) >> 32) }
+
+func (x ValAndOff) Off64() int64 { return int64(int32(x)) }
+func (x ValAndOff) Off() int32 { return int32(x) }
+
+func (x ValAndOff) String() string {
+ return fmt.Sprintf("val=%d,off=%d", x.Val(), x.Off())
+}
+
+// validVal reports whether the value can be used
+// as an argument to makeValAndOff.
+func validVal(val int64) bool {
+ return val == int64(int32(val))
+}
+
+func makeValAndOff(val, off int32) ValAndOff {
+ return ValAndOff(int64(val)<<32 + int64(uint32(off)))
+}
+
+func (x ValAndOff) canAdd32(off int32) bool {
+ newoff := x.Off64() + int64(off)
+ return newoff == int64(int32(newoff))
+}
+func (x ValAndOff) canAdd64(off int64) bool {
+ newoff := x.Off64() + off
+ return newoff == int64(int32(newoff))
+}
+
+func (x ValAndOff) addOffset32(off int32) ValAndOff {
+ if !x.canAdd32(off) {
+ panic("invalid ValAndOff.addOffset32")
+ }
+ return makeValAndOff(x.Val(), x.Off()+off)
+}
+func (x ValAndOff) addOffset64(off int64) ValAndOff {
+ if !x.canAdd64(off) {
+ panic("invalid ValAndOff.addOffset64")
+ }
+ return makeValAndOff(x.Val(), x.Off()+int32(off))
+}
+
+// int128 is a type that stores a 128-bit constant.
+// The only allowed constant right now is 0, so we can cheat quite a bit.
+type int128 int64
+
+type BoundsKind uint8
+
+const (
+ BoundsIndex BoundsKind = iota // indexing operation, 0 <= idx < len failed
+ BoundsIndexU // ... with unsigned idx
+ BoundsSliceAlen // 2-arg slicing operation, 0 <= high <= len failed
+ BoundsSliceAlenU // ... with unsigned high
+ BoundsSliceAcap // 2-arg slicing operation, 0 <= high <= cap failed
+ BoundsSliceAcapU // ... with unsigned high
+ BoundsSliceB // 2-arg slicing operation, 0 <= low <= high failed
+ BoundsSliceBU // ... with unsigned low
+ BoundsSlice3Alen // 3-arg slicing operation, 0 <= max <= len failed
+ BoundsSlice3AlenU // ... with unsigned max
+ BoundsSlice3Acap // 3-arg slicing operation, 0 <= max <= cap failed
+ BoundsSlice3AcapU // ... with unsigned max
+ BoundsSlice3B // 3-arg slicing operation, 0 <= high <= max failed
+ BoundsSlice3BU // ... with unsigned high
+ BoundsSlice3C // 3-arg slicing operation, 0 <= low <= high failed
+ BoundsSlice3CU // ... with unsigned low
+ BoundsConvert // conversion to array pointer failed
+ BoundsKindCount
+)
+
+// boundsAPI determines which register arguments a bounds check call should use. For an [a:b:c] slice, we do:
+// CMPQ c, cap
+// JA fail1
+// CMPQ b, c
+// JA fail2
+// CMPQ a, b
+// JA fail3
+//
+// fail1: CALL panicSlice3Acap (c, cap)
+// fail2: CALL panicSlice3B (b, c)
+// fail3: CALL panicSlice3C (a, b)
+//
+// When we register allocate that code, we want the same register to be used for
+// the first arg of panicSlice3Acap and the second arg to panicSlice3B. That way,
+// initializing that register once will satisfy both calls.
+// That desire ends up dividing the set of bounds check calls into 3 sets. This function
+// determines which set to use for a given panic call.
+// The first arg for set 0 should be the second arg for set 1.
+// The first arg for set 1 should be the second arg for set 2.
+func boundsABI(b int64) int {
+ switch BoundsKind(b) {
+ case BoundsSlice3Alen,
+ BoundsSlice3AlenU,
+ BoundsSlice3Acap,
+ BoundsSlice3AcapU,
+ BoundsConvert:
+ return 0
+ case BoundsSliceAlen,
+ BoundsSliceAlenU,
+ BoundsSliceAcap,
+ BoundsSliceAcapU,
+ BoundsSlice3B,
+ BoundsSlice3BU:
+ return 1
+ case BoundsIndex,
+ BoundsIndexU,
+ BoundsSliceB,
+ BoundsSliceBU,
+ BoundsSlice3C,
+ BoundsSlice3CU:
+ return 2
+ default:
+ panic("bad BoundsKind")
+ }
+}
+
+// arm64BitFileld is the GO type of ARM64BitField auxInt.
+// if x is an ARM64BitField, then width=x&0xff, lsb=(x>>8)&0xff, and
+// width+lsb<64 for 64-bit variant, width+lsb<32 for 32-bit variant.
+// the meaning of width and lsb are instruction-dependent.
+type arm64BitField int16
diff --git a/src/cmd/compile/internal/ssa/opGen.go b/src/cmd/compile/internal/ssa/opGen.go
new file mode 100644
index 0000000..81fe5d4
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/opGen.go
@@ -0,0 +1,37469 @@
+// Code generated from gen/*Ops.go; DO NOT EDIT.
+
+package ssa
+
+import (
+ "cmd/internal/obj"
+ "cmd/internal/obj/arm"
+ "cmd/internal/obj/arm64"
+ "cmd/internal/obj/mips"
+ "cmd/internal/obj/ppc64"
+ "cmd/internal/obj/riscv"
+ "cmd/internal/obj/s390x"
+ "cmd/internal/obj/wasm"
+ "cmd/internal/obj/x86"
+)
+
+const (
+ BlockInvalid BlockKind = iota
+
+ Block386EQ
+ Block386NE
+ Block386LT
+ Block386LE
+ Block386GT
+ Block386GE
+ Block386OS
+ Block386OC
+ Block386ULT
+ Block386ULE
+ Block386UGT
+ Block386UGE
+ Block386EQF
+ Block386NEF
+ Block386ORD
+ Block386NAN
+
+ BlockAMD64EQ
+ BlockAMD64NE
+ BlockAMD64LT
+ BlockAMD64LE
+ BlockAMD64GT
+ BlockAMD64GE
+ BlockAMD64OS
+ BlockAMD64OC
+ BlockAMD64ULT
+ BlockAMD64ULE
+ BlockAMD64UGT
+ BlockAMD64UGE
+ BlockAMD64EQF
+ BlockAMD64NEF
+ BlockAMD64ORD
+ BlockAMD64NAN
+
+ BlockARMEQ
+ BlockARMNE
+ BlockARMLT
+ BlockARMLE
+ BlockARMGT
+ BlockARMGE
+ BlockARMULT
+ BlockARMULE
+ BlockARMUGT
+ BlockARMUGE
+ BlockARMLTnoov
+ BlockARMLEnoov
+ BlockARMGTnoov
+ BlockARMGEnoov
+
+ BlockARM64EQ
+ BlockARM64NE
+ BlockARM64LT
+ BlockARM64LE
+ BlockARM64GT
+ BlockARM64GE
+ BlockARM64ULT
+ BlockARM64ULE
+ BlockARM64UGT
+ BlockARM64UGE
+ BlockARM64Z
+ BlockARM64NZ
+ BlockARM64ZW
+ BlockARM64NZW
+ BlockARM64TBZ
+ BlockARM64TBNZ
+ BlockARM64FLT
+ BlockARM64FLE
+ BlockARM64FGT
+ BlockARM64FGE
+ BlockARM64LTnoov
+ BlockARM64LEnoov
+ BlockARM64GTnoov
+ BlockARM64GEnoov
+
+ BlockMIPSEQ
+ BlockMIPSNE
+ BlockMIPSLTZ
+ BlockMIPSLEZ
+ BlockMIPSGTZ
+ BlockMIPSGEZ
+ BlockMIPSFPT
+ BlockMIPSFPF
+
+ BlockMIPS64EQ
+ BlockMIPS64NE
+ BlockMIPS64LTZ
+ BlockMIPS64LEZ
+ BlockMIPS64GTZ
+ BlockMIPS64GEZ
+ BlockMIPS64FPT
+ BlockMIPS64FPF
+
+ BlockPPC64EQ
+ BlockPPC64NE
+ BlockPPC64LT
+ BlockPPC64LE
+ BlockPPC64GT
+ BlockPPC64GE
+ BlockPPC64FLT
+ BlockPPC64FLE
+ BlockPPC64FGT
+ BlockPPC64FGE
+
+ BlockRISCV64BEQ
+ BlockRISCV64BNE
+ BlockRISCV64BLT
+ BlockRISCV64BGE
+ BlockRISCV64BLTU
+ BlockRISCV64BGEU
+ BlockRISCV64BEQZ
+ BlockRISCV64BNEZ
+ BlockRISCV64BLEZ
+ BlockRISCV64BGEZ
+ BlockRISCV64BLTZ
+ BlockRISCV64BGTZ
+
+ BlockS390XBRC
+ BlockS390XCRJ
+ BlockS390XCGRJ
+ BlockS390XCLRJ
+ BlockS390XCLGRJ
+ BlockS390XCIJ
+ BlockS390XCGIJ
+ BlockS390XCLIJ
+ BlockS390XCLGIJ
+
+ BlockPlain
+ BlockIf
+ BlockDefer
+ BlockRet
+ BlockRetJmp
+ BlockExit
+ BlockFirst
+)
+
+var blockString = [...]string{
+ BlockInvalid: "BlockInvalid",
+
+ Block386EQ: "EQ",
+ Block386NE: "NE",
+ Block386LT: "LT",
+ Block386LE: "LE",
+ Block386GT: "GT",
+ Block386GE: "GE",
+ Block386OS: "OS",
+ Block386OC: "OC",
+ Block386ULT: "ULT",
+ Block386ULE: "ULE",
+ Block386UGT: "UGT",
+ Block386UGE: "UGE",
+ Block386EQF: "EQF",
+ Block386NEF: "NEF",
+ Block386ORD: "ORD",
+ Block386NAN: "NAN",
+
+ BlockAMD64EQ: "EQ",
+ BlockAMD64NE: "NE",
+ BlockAMD64LT: "LT",
+ BlockAMD64LE: "LE",
+ BlockAMD64GT: "GT",
+ BlockAMD64GE: "GE",
+ BlockAMD64OS: "OS",
+ BlockAMD64OC: "OC",
+ BlockAMD64ULT: "ULT",
+ BlockAMD64ULE: "ULE",
+ BlockAMD64UGT: "UGT",
+ BlockAMD64UGE: "UGE",
+ BlockAMD64EQF: "EQF",
+ BlockAMD64NEF: "NEF",
+ BlockAMD64ORD: "ORD",
+ BlockAMD64NAN: "NAN",
+
+ BlockARMEQ: "EQ",
+ BlockARMNE: "NE",
+ BlockARMLT: "LT",
+ BlockARMLE: "LE",
+ BlockARMGT: "GT",
+ BlockARMGE: "GE",
+ BlockARMULT: "ULT",
+ BlockARMULE: "ULE",
+ BlockARMUGT: "UGT",
+ BlockARMUGE: "UGE",
+ BlockARMLTnoov: "LTnoov",
+ BlockARMLEnoov: "LEnoov",
+ BlockARMGTnoov: "GTnoov",
+ BlockARMGEnoov: "GEnoov",
+
+ BlockARM64EQ: "EQ",
+ BlockARM64NE: "NE",
+ BlockARM64LT: "LT",
+ BlockARM64LE: "LE",
+ BlockARM64GT: "GT",
+ BlockARM64GE: "GE",
+ BlockARM64ULT: "ULT",
+ BlockARM64ULE: "ULE",
+ BlockARM64UGT: "UGT",
+ BlockARM64UGE: "UGE",
+ BlockARM64Z: "Z",
+ BlockARM64NZ: "NZ",
+ BlockARM64ZW: "ZW",
+ BlockARM64NZW: "NZW",
+ BlockARM64TBZ: "TBZ",
+ BlockARM64TBNZ: "TBNZ",
+ BlockARM64FLT: "FLT",
+ BlockARM64FLE: "FLE",
+ BlockARM64FGT: "FGT",
+ BlockARM64FGE: "FGE",
+ BlockARM64LTnoov: "LTnoov",
+ BlockARM64LEnoov: "LEnoov",
+ BlockARM64GTnoov: "GTnoov",
+ BlockARM64GEnoov: "GEnoov",
+
+ BlockMIPSEQ: "EQ",
+ BlockMIPSNE: "NE",
+ BlockMIPSLTZ: "LTZ",
+ BlockMIPSLEZ: "LEZ",
+ BlockMIPSGTZ: "GTZ",
+ BlockMIPSGEZ: "GEZ",
+ BlockMIPSFPT: "FPT",
+ BlockMIPSFPF: "FPF",
+
+ BlockMIPS64EQ: "EQ",
+ BlockMIPS64NE: "NE",
+ BlockMIPS64LTZ: "LTZ",
+ BlockMIPS64LEZ: "LEZ",
+ BlockMIPS64GTZ: "GTZ",
+ BlockMIPS64GEZ: "GEZ",
+ BlockMIPS64FPT: "FPT",
+ BlockMIPS64FPF: "FPF",
+
+ BlockPPC64EQ: "EQ",
+ BlockPPC64NE: "NE",
+ BlockPPC64LT: "LT",
+ BlockPPC64LE: "LE",
+ BlockPPC64GT: "GT",
+ BlockPPC64GE: "GE",
+ BlockPPC64FLT: "FLT",
+ BlockPPC64FLE: "FLE",
+ BlockPPC64FGT: "FGT",
+ BlockPPC64FGE: "FGE",
+
+ BlockRISCV64BEQ: "BEQ",
+ BlockRISCV64BNE: "BNE",
+ BlockRISCV64BLT: "BLT",
+ BlockRISCV64BGE: "BGE",
+ BlockRISCV64BLTU: "BLTU",
+ BlockRISCV64BGEU: "BGEU",
+ BlockRISCV64BEQZ: "BEQZ",
+ BlockRISCV64BNEZ: "BNEZ",
+ BlockRISCV64BLEZ: "BLEZ",
+ BlockRISCV64BGEZ: "BGEZ",
+ BlockRISCV64BLTZ: "BLTZ",
+ BlockRISCV64BGTZ: "BGTZ",
+
+ BlockS390XBRC: "BRC",
+ BlockS390XCRJ: "CRJ",
+ BlockS390XCGRJ: "CGRJ",
+ BlockS390XCLRJ: "CLRJ",
+ BlockS390XCLGRJ: "CLGRJ",
+ BlockS390XCIJ: "CIJ",
+ BlockS390XCGIJ: "CGIJ",
+ BlockS390XCLIJ: "CLIJ",
+ BlockS390XCLGIJ: "CLGIJ",
+
+ BlockPlain: "Plain",
+ BlockIf: "If",
+ BlockDefer: "Defer",
+ BlockRet: "Ret",
+ BlockRetJmp: "RetJmp",
+ BlockExit: "Exit",
+ BlockFirst: "First",
+}
+
+func (k BlockKind) String() string { return blockString[k] }
+func (k BlockKind) AuxIntType() string {
+ switch k {
+ case BlockARM64TBZ:
+ return "int64"
+ case BlockARM64TBNZ:
+ return "int64"
+ case BlockS390XCIJ:
+ return "int8"
+ case BlockS390XCGIJ:
+ return "int8"
+ case BlockS390XCLIJ:
+ return "uint8"
+ case BlockS390XCLGIJ:
+ return "uint8"
+ }
+ return ""
+}
+
+const (
+ OpInvalid Op = iota
+
+ Op386ADDSS
+ Op386ADDSD
+ Op386SUBSS
+ Op386SUBSD
+ Op386MULSS
+ Op386MULSD
+ Op386DIVSS
+ Op386DIVSD
+ Op386MOVSSload
+ Op386MOVSDload
+ Op386MOVSSconst
+ Op386MOVSDconst
+ Op386MOVSSloadidx1
+ Op386MOVSSloadidx4
+ Op386MOVSDloadidx1
+ Op386MOVSDloadidx8
+ Op386MOVSSstore
+ Op386MOVSDstore
+ Op386MOVSSstoreidx1
+ Op386MOVSSstoreidx4
+ Op386MOVSDstoreidx1
+ Op386MOVSDstoreidx8
+ Op386ADDSSload
+ Op386ADDSDload
+ Op386SUBSSload
+ Op386SUBSDload
+ Op386MULSSload
+ Op386MULSDload
+ Op386DIVSSload
+ Op386DIVSDload
+ Op386ADDL
+ Op386ADDLconst
+ Op386ADDLcarry
+ Op386ADDLconstcarry
+ Op386ADCL
+ Op386ADCLconst
+ Op386SUBL
+ Op386SUBLconst
+ Op386SUBLcarry
+ Op386SUBLconstcarry
+ Op386SBBL
+ Op386SBBLconst
+ Op386MULL
+ Op386MULLconst
+ Op386MULLU
+ Op386HMULL
+ Op386HMULLU
+ Op386MULLQU
+ Op386AVGLU
+ Op386DIVL
+ Op386DIVW
+ Op386DIVLU
+ Op386DIVWU
+ Op386MODL
+ Op386MODW
+ Op386MODLU
+ Op386MODWU
+ Op386ANDL
+ Op386ANDLconst
+ Op386ORL
+ Op386ORLconst
+ Op386XORL
+ Op386XORLconst
+ Op386CMPL
+ Op386CMPW
+ Op386CMPB
+ Op386CMPLconst
+ Op386CMPWconst
+ Op386CMPBconst
+ Op386CMPLload
+ Op386CMPWload
+ Op386CMPBload
+ Op386CMPLconstload
+ Op386CMPWconstload
+ Op386CMPBconstload
+ Op386UCOMISS
+ Op386UCOMISD
+ Op386TESTL
+ Op386TESTW
+ Op386TESTB
+ Op386TESTLconst
+ Op386TESTWconst
+ Op386TESTBconst
+ Op386SHLL
+ Op386SHLLconst
+ Op386SHRL
+ Op386SHRW
+ Op386SHRB
+ Op386SHRLconst
+ Op386SHRWconst
+ Op386SHRBconst
+ Op386SARL
+ Op386SARW
+ Op386SARB
+ Op386SARLconst
+ Op386SARWconst
+ Op386SARBconst
+ Op386ROLLconst
+ Op386ROLWconst
+ Op386ROLBconst
+ Op386ADDLload
+ Op386SUBLload
+ Op386MULLload
+ Op386ANDLload
+ Op386ORLload
+ Op386XORLload
+ Op386ADDLloadidx4
+ Op386SUBLloadidx4
+ Op386MULLloadidx4
+ Op386ANDLloadidx4
+ Op386ORLloadidx4
+ Op386XORLloadidx4
+ Op386NEGL
+ Op386NOTL
+ Op386BSFL
+ Op386BSFW
+ Op386BSRL
+ Op386BSRW
+ Op386BSWAPL
+ Op386SQRTSD
+ Op386SQRTSS
+ Op386SBBLcarrymask
+ Op386SETEQ
+ Op386SETNE
+ Op386SETL
+ Op386SETLE
+ Op386SETG
+ Op386SETGE
+ Op386SETB
+ Op386SETBE
+ Op386SETA
+ Op386SETAE
+ Op386SETO
+ Op386SETEQF
+ Op386SETNEF
+ Op386SETORD
+ Op386SETNAN
+ Op386SETGF
+ Op386SETGEF
+ Op386MOVBLSX
+ Op386MOVBLZX
+ Op386MOVWLSX
+ Op386MOVWLZX
+ Op386MOVLconst
+ Op386CVTTSD2SL
+ Op386CVTTSS2SL
+ Op386CVTSL2SS
+ Op386CVTSL2SD
+ Op386CVTSD2SS
+ Op386CVTSS2SD
+ Op386PXOR
+ Op386LEAL
+ Op386LEAL1
+ Op386LEAL2
+ Op386LEAL4
+ Op386LEAL8
+ Op386MOVBload
+ Op386MOVBLSXload
+ Op386MOVWload
+ Op386MOVWLSXload
+ Op386MOVLload
+ Op386MOVBstore
+ Op386MOVWstore
+ Op386MOVLstore
+ Op386ADDLmodify
+ Op386SUBLmodify
+ Op386ANDLmodify
+ Op386ORLmodify
+ Op386XORLmodify
+ Op386ADDLmodifyidx4
+ Op386SUBLmodifyidx4
+ Op386ANDLmodifyidx4
+ Op386ORLmodifyidx4
+ Op386XORLmodifyidx4
+ Op386ADDLconstmodify
+ Op386ANDLconstmodify
+ Op386ORLconstmodify
+ Op386XORLconstmodify
+ Op386ADDLconstmodifyidx4
+ Op386ANDLconstmodifyidx4
+ Op386ORLconstmodifyidx4
+ Op386XORLconstmodifyidx4
+ Op386MOVBloadidx1
+ Op386MOVWloadidx1
+ Op386MOVWloadidx2
+ Op386MOVLloadidx1
+ Op386MOVLloadidx4
+ Op386MOVBstoreidx1
+ Op386MOVWstoreidx1
+ Op386MOVWstoreidx2
+ Op386MOVLstoreidx1
+ Op386MOVLstoreidx4
+ Op386MOVBstoreconst
+ Op386MOVWstoreconst
+ Op386MOVLstoreconst
+ Op386MOVBstoreconstidx1
+ Op386MOVWstoreconstidx1
+ Op386MOVWstoreconstidx2
+ Op386MOVLstoreconstidx1
+ Op386MOVLstoreconstidx4
+ Op386DUFFZERO
+ Op386REPSTOSL
+ Op386CALLstatic
+ Op386CALLtail
+ Op386CALLclosure
+ Op386CALLinter
+ Op386DUFFCOPY
+ Op386REPMOVSL
+ Op386InvertFlags
+ Op386LoweredGetG
+ Op386LoweredGetClosurePtr
+ Op386LoweredGetCallerPC
+ Op386LoweredGetCallerSP
+ Op386LoweredNilCheck
+ Op386LoweredWB
+ Op386LoweredPanicBoundsA
+ Op386LoweredPanicBoundsB
+ Op386LoweredPanicBoundsC
+ Op386LoweredPanicExtendA
+ Op386LoweredPanicExtendB
+ Op386LoweredPanicExtendC
+ Op386FlagEQ
+ Op386FlagLT_ULT
+ Op386FlagLT_UGT
+ Op386FlagGT_UGT
+ Op386FlagGT_ULT
+ Op386MOVSSconst1
+ Op386MOVSDconst1
+ Op386MOVSSconst2
+ Op386MOVSDconst2
+
+ OpAMD64ADDSS
+ OpAMD64ADDSD
+ OpAMD64SUBSS
+ OpAMD64SUBSD
+ OpAMD64MULSS
+ OpAMD64MULSD
+ OpAMD64DIVSS
+ OpAMD64DIVSD
+ OpAMD64MOVSSload
+ OpAMD64MOVSDload
+ OpAMD64MOVSSconst
+ OpAMD64MOVSDconst
+ OpAMD64MOVSSloadidx1
+ OpAMD64MOVSSloadidx4
+ OpAMD64MOVSDloadidx1
+ OpAMD64MOVSDloadidx8
+ OpAMD64MOVSSstore
+ OpAMD64MOVSDstore
+ OpAMD64MOVSSstoreidx1
+ OpAMD64MOVSSstoreidx4
+ OpAMD64MOVSDstoreidx1
+ OpAMD64MOVSDstoreidx8
+ OpAMD64ADDSSload
+ OpAMD64ADDSDload
+ OpAMD64SUBSSload
+ OpAMD64SUBSDload
+ OpAMD64MULSSload
+ OpAMD64MULSDload
+ OpAMD64DIVSSload
+ OpAMD64DIVSDload
+ OpAMD64ADDSSloadidx1
+ OpAMD64ADDSSloadidx4
+ OpAMD64ADDSDloadidx1
+ OpAMD64ADDSDloadidx8
+ OpAMD64SUBSSloadidx1
+ OpAMD64SUBSSloadidx4
+ OpAMD64SUBSDloadidx1
+ OpAMD64SUBSDloadidx8
+ OpAMD64MULSSloadidx1
+ OpAMD64MULSSloadidx4
+ OpAMD64MULSDloadidx1
+ OpAMD64MULSDloadidx8
+ OpAMD64DIVSSloadidx1
+ OpAMD64DIVSSloadidx4
+ OpAMD64DIVSDloadidx1
+ OpAMD64DIVSDloadidx8
+ OpAMD64ADDQ
+ OpAMD64ADDL
+ OpAMD64ADDQconst
+ OpAMD64ADDLconst
+ OpAMD64ADDQconstmodify
+ OpAMD64ADDLconstmodify
+ OpAMD64SUBQ
+ OpAMD64SUBL
+ OpAMD64SUBQconst
+ OpAMD64SUBLconst
+ OpAMD64MULQ
+ OpAMD64MULL
+ OpAMD64MULQconst
+ OpAMD64MULLconst
+ OpAMD64MULLU
+ OpAMD64MULQU
+ OpAMD64HMULQ
+ OpAMD64HMULL
+ OpAMD64HMULQU
+ OpAMD64HMULLU
+ OpAMD64AVGQU
+ OpAMD64DIVQ
+ OpAMD64DIVL
+ OpAMD64DIVW
+ OpAMD64DIVQU
+ OpAMD64DIVLU
+ OpAMD64DIVWU
+ OpAMD64NEGLflags
+ OpAMD64ADDQcarry
+ OpAMD64ADCQ
+ OpAMD64ADDQconstcarry
+ OpAMD64ADCQconst
+ OpAMD64SUBQborrow
+ OpAMD64SBBQ
+ OpAMD64SUBQconstborrow
+ OpAMD64SBBQconst
+ OpAMD64MULQU2
+ OpAMD64DIVQU2
+ OpAMD64ANDQ
+ OpAMD64ANDL
+ OpAMD64ANDQconst
+ OpAMD64ANDLconst
+ OpAMD64ANDQconstmodify
+ OpAMD64ANDLconstmodify
+ OpAMD64ORQ
+ OpAMD64ORL
+ OpAMD64ORQconst
+ OpAMD64ORLconst
+ OpAMD64ORQconstmodify
+ OpAMD64ORLconstmodify
+ OpAMD64XORQ
+ OpAMD64XORL
+ OpAMD64XORQconst
+ OpAMD64XORLconst
+ OpAMD64XORQconstmodify
+ OpAMD64XORLconstmodify
+ OpAMD64CMPQ
+ OpAMD64CMPL
+ OpAMD64CMPW
+ OpAMD64CMPB
+ OpAMD64CMPQconst
+ OpAMD64CMPLconst
+ OpAMD64CMPWconst
+ OpAMD64CMPBconst
+ OpAMD64CMPQload
+ OpAMD64CMPLload
+ OpAMD64CMPWload
+ OpAMD64CMPBload
+ OpAMD64CMPQconstload
+ OpAMD64CMPLconstload
+ OpAMD64CMPWconstload
+ OpAMD64CMPBconstload
+ OpAMD64CMPQloadidx8
+ OpAMD64CMPQloadidx1
+ OpAMD64CMPLloadidx4
+ OpAMD64CMPLloadidx1
+ OpAMD64CMPWloadidx2
+ OpAMD64CMPWloadidx1
+ OpAMD64CMPBloadidx1
+ OpAMD64CMPQconstloadidx8
+ OpAMD64CMPQconstloadidx1
+ OpAMD64CMPLconstloadidx4
+ OpAMD64CMPLconstloadidx1
+ OpAMD64CMPWconstloadidx2
+ OpAMD64CMPWconstloadidx1
+ OpAMD64CMPBconstloadidx1
+ OpAMD64UCOMISS
+ OpAMD64UCOMISD
+ OpAMD64BTL
+ OpAMD64BTQ
+ OpAMD64BTCL
+ OpAMD64BTCQ
+ OpAMD64BTRL
+ OpAMD64BTRQ
+ OpAMD64BTSL
+ OpAMD64BTSQ
+ OpAMD64BTLconst
+ OpAMD64BTQconst
+ OpAMD64BTCLconst
+ OpAMD64BTCQconst
+ OpAMD64BTRLconst
+ OpAMD64BTRQconst
+ OpAMD64BTSLconst
+ OpAMD64BTSQconst
+ OpAMD64TESTQ
+ OpAMD64TESTL
+ OpAMD64TESTW
+ OpAMD64TESTB
+ OpAMD64TESTQconst
+ OpAMD64TESTLconst
+ OpAMD64TESTWconst
+ OpAMD64TESTBconst
+ OpAMD64SHLQ
+ OpAMD64SHLL
+ OpAMD64SHLQconst
+ OpAMD64SHLLconst
+ OpAMD64SHRQ
+ OpAMD64SHRL
+ OpAMD64SHRW
+ OpAMD64SHRB
+ OpAMD64SHRQconst
+ OpAMD64SHRLconst
+ OpAMD64SHRWconst
+ OpAMD64SHRBconst
+ OpAMD64SARQ
+ OpAMD64SARL
+ OpAMD64SARW
+ OpAMD64SARB
+ OpAMD64SARQconst
+ OpAMD64SARLconst
+ OpAMD64SARWconst
+ OpAMD64SARBconst
+ OpAMD64SHRDQ
+ OpAMD64SHLDQ
+ OpAMD64ROLQ
+ OpAMD64ROLL
+ OpAMD64ROLW
+ OpAMD64ROLB
+ OpAMD64RORQ
+ OpAMD64RORL
+ OpAMD64RORW
+ OpAMD64RORB
+ OpAMD64ROLQconst
+ OpAMD64ROLLconst
+ OpAMD64ROLWconst
+ OpAMD64ROLBconst
+ OpAMD64ADDLload
+ OpAMD64ADDQload
+ OpAMD64SUBQload
+ OpAMD64SUBLload
+ OpAMD64ANDLload
+ OpAMD64ANDQload
+ OpAMD64ORQload
+ OpAMD64ORLload
+ OpAMD64XORQload
+ OpAMD64XORLload
+ OpAMD64ADDLloadidx1
+ OpAMD64ADDLloadidx4
+ OpAMD64ADDLloadidx8
+ OpAMD64ADDQloadidx1
+ OpAMD64ADDQloadidx8
+ OpAMD64SUBLloadidx1
+ OpAMD64SUBLloadidx4
+ OpAMD64SUBLloadidx8
+ OpAMD64SUBQloadidx1
+ OpAMD64SUBQloadidx8
+ OpAMD64ANDLloadidx1
+ OpAMD64ANDLloadidx4
+ OpAMD64ANDLloadidx8
+ OpAMD64ANDQloadidx1
+ OpAMD64ANDQloadidx8
+ OpAMD64ORLloadidx1
+ OpAMD64ORLloadidx4
+ OpAMD64ORLloadidx8
+ OpAMD64ORQloadidx1
+ OpAMD64ORQloadidx8
+ OpAMD64XORLloadidx1
+ OpAMD64XORLloadidx4
+ OpAMD64XORLloadidx8
+ OpAMD64XORQloadidx1
+ OpAMD64XORQloadidx8
+ OpAMD64ADDQmodify
+ OpAMD64SUBQmodify
+ OpAMD64ANDQmodify
+ OpAMD64ORQmodify
+ OpAMD64XORQmodify
+ OpAMD64ADDLmodify
+ OpAMD64SUBLmodify
+ OpAMD64ANDLmodify
+ OpAMD64ORLmodify
+ OpAMD64XORLmodify
+ OpAMD64ADDQmodifyidx1
+ OpAMD64ADDQmodifyidx8
+ OpAMD64SUBQmodifyidx1
+ OpAMD64SUBQmodifyidx8
+ OpAMD64ANDQmodifyidx1
+ OpAMD64ANDQmodifyidx8
+ OpAMD64ORQmodifyidx1
+ OpAMD64ORQmodifyidx8
+ OpAMD64XORQmodifyidx1
+ OpAMD64XORQmodifyidx8
+ OpAMD64ADDLmodifyidx1
+ OpAMD64ADDLmodifyidx4
+ OpAMD64ADDLmodifyidx8
+ OpAMD64SUBLmodifyidx1
+ OpAMD64SUBLmodifyidx4
+ OpAMD64SUBLmodifyidx8
+ OpAMD64ANDLmodifyidx1
+ OpAMD64ANDLmodifyidx4
+ OpAMD64ANDLmodifyidx8
+ OpAMD64ORLmodifyidx1
+ OpAMD64ORLmodifyidx4
+ OpAMD64ORLmodifyidx8
+ OpAMD64XORLmodifyidx1
+ OpAMD64XORLmodifyidx4
+ OpAMD64XORLmodifyidx8
+ OpAMD64ADDQconstmodifyidx1
+ OpAMD64ADDQconstmodifyidx8
+ OpAMD64ANDQconstmodifyidx1
+ OpAMD64ANDQconstmodifyidx8
+ OpAMD64ORQconstmodifyidx1
+ OpAMD64ORQconstmodifyidx8
+ OpAMD64XORQconstmodifyidx1
+ OpAMD64XORQconstmodifyidx8
+ OpAMD64ADDLconstmodifyidx1
+ OpAMD64ADDLconstmodifyidx4
+ OpAMD64ADDLconstmodifyidx8
+ OpAMD64ANDLconstmodifyidx1
+ OpAMD64ANDLconstmodifyidx4
+ OpAMD64ANDLconstmodifyidx8
+ OpAMD64ORLconstmodifyidx1
+ OpAMD64ORLconstmodifyidx4
+ OpAMD64ORLconstmodifyidx8
+ OpAMD64XORLconstmodifyidx1
+ OpAMD64XORLconstmodifyidx4
+ OpAMD64XORLconstmodifyidx8
+ OpAMD64NEGQ
+ OpAMD64NEGL
+ OpAMD64NOTQ
+ OpAMD64NOTL
+ OpAMD64BSFQ
+ OpAMD64BSFL
+ OpAMD64BSRQ
+ OpAMD64BSRL
+ OpAMD64CMOVQEQ
+ OpAMD64CMOVQNE
+ OpAMD64CMOVQLT
+ OpAMD64CMOVQGT
+ OpAMD64CMOVQLE
+ OpAMD64CMOVQGE
+ OpAMD64CMOVQLS
+ OpAMD64CMOVQHI
+ OpAMD64CMOVQCC
+ OpAMD64CMOVQCS
+ OpAMD64CMOVLEQ
+ OpAMD64CMOVLNE
+ OpAMD64CMOVLLT
+ OpAMD64CMOVLGT
+ OpAMD64CMOVLLE
+ OpAMD64CMOVLGE
+ OpAMD64CMOVLLS
+ OpAMD64CMOVLHI
+ OpAMD64CMOVLCC
+ OpAMD64CMOVLCS
+ OpAMD64CMOVWEQ
+ OpAMD64CMOVWNE
+ OpAMD64CMOVWLT
+ OpAMD64CMOVWGT
+ OpAMD64CMOVWLE
+ OpAMD64CMOVWGE
+ OpAMD64CMOVWLS
+ OpAMD64CMOVWHI
+ OpAMD64CMOVWCC
+ OpAMD64CMOVWCS
+ OpAMD64CMOVQEQF
+ OpAMD64CMOVQNEF
+ OpAMD64CMOVQGTF
+ OpAMD64CMOVQGEF
+ OpAMD64CMOVLEQF
+ OpAMD64CMOVLNEF
+ OpAMD64CMOVLGTF
+ OpAMD64CMOVLGEF
+ OpAMD64CMOVWEQF
+ OpAMD64CMOVWNEF
+ OpAMD64CMOVWGTF
+ OpAMD64CMOVWGEF
+ OpAMD64BSWAPQ
+ OpAMD64BSWAPL
+ OpAMD64POPCNTQ
+ OpAMD64POPCNTL
+ OpAMD64SQRTSD
+ OpAMD64SQRTSS
+ OpAMD64ROUNDSD
+ OpAMD64VFMADD231SD
+ OpAMD64SBBQcarrymask
+ OpAMD64SBBLcarrymask
+ OpAMD64SETEQ
+ OpAMD64SETNE
+ OpAMD64SETL
+ OpAMD64SETLE
+ OpAMD64SETG
+ OpAMD64SETGE
+ OpAMD64SETB
+ OpAMD64SETBE
+ OpAMD64SETA
+ OpAMD64SETAE
+ OpAMD64SETO
+ OpAMD64SETEQstore
+ OpAMD64SETNEstore
+ OpAMD64SETLstore
+ OpAMD64SETLEstore
+ OpAMD64SETGstore
+ OpAMD64SETGEstore
+ OpAMD64SETBstore
+ OpAMD64SETBEstore
+ OpAMD64SETAstore
+ OpAMD64SETAEstore
+ OpAMD64SETEQF
+ OpAMD64SETNEF
+ OpAMD64SETORD
+ OpAMD64SETNAN
+ OpAMD64SETGF
+ OpAMD64SETGEF
+ OpAMD64MOVBQSX
+ OpAMD64MOVBQZX
+ OpAMD64MOVWQSX
+ OpAMD64MOVWQZX
+ OpAMD64MOVLQSX
+ OpAMD64MOVLQZX
+ OpAMD64MOVLconst
+ OpAMD64MOVQconst
+ OpAMD64CVTTSD2SL
+ OpAMD64CVTTSD2SQ
+ OpAMD64CVTTSS2SL
+ OpAMD64CVTTSS2SQ
+ OpAMD64CVTSL2SS
+ OpAMD64CVTSL2SD
+ OpAMD64CVTSQ2SS
+ OpAMD64CVTSQ2SD
+ OpAMD64CVTSD2SS
+ OpAMD64CVTSS2SD
+ OpAMD64MOVQi2f
+ OpAMD64MOVQf2i
+ OpAMD64MOVLi2f
+ OpAMD64MOVLf2i
+ OpAMD64PXOR
+ OpAMD64LEAQ
+ OpAMD64LEAL
+ OpAMD64LEAW
+ OpAMD64LEAQ1
+ OpAMD64LEAL1
+ OpAMD64LEAW1
+ OpAMD64LEAQ2
+ OpAMD64LEAL2
+ OpAMD64LEAW2
+ OpAMD64LEAQ4
+ OpAMD64LEAL4
+ OpAMD64LEAW4
+ OpAMD64LEAQ8
+ OpAMD64LEAL8
+ OpAMD64LEAW8
+ OpAMD64MOVBload
+ OpAMD64MOVBQSXload
+ OpAMD64MOVWload
+ OpAMD64MOVWQSXload
+ OpAMD64MOVLload
+ OpAMD64MOVLQSXload
+ OpAMD64MOVQload
+ OpAMD64MOVBstore
+ OpAMD64MOVWstore
+ OpAMD64MOVLstore
+ OpAMD64MOVQstore
+ OpAMD64MOVOload
+ OpAMD64MOVOstore
+ OpAMD64MOVBloadidx1
+ OpAMD64MOVWloadidx1
+ OpAMD64MOVWloadidx2
+ OpAMD64MOVLloadidx1
+ OpAMD64MOVLloadidx4
+ OpAMD64MOVLloadidx8
+ OpAMD64MOVQloadidx1
+ OpAMD64MOVQloadidx8
+ OpAMD64MOVBstoreidx1
+ OpAMD64MOVWstoreidx1
+ OpAMD64MOVWstoreidx2
+ OpAMD64MOVLstoreidx1
+ OpAMD64MOVLstoreidx4
+ OpAMD64MOVLstoreidx8
+ OpAMD64MOVQstoreidx1
+ OpAMD64MOVQstoreidx8
+ OpAMD64MOVBstoreconst
+ OpAMD64MOVWstoreconst
+ OpAMD64MOVLstoreconst
+ OpAMD64MOVQstoreconst
+ OpAMD64MOVOstoreconst
+ OpAMD64MOVBstoreconstidx1
+ OpAMD64MOVWstoreconstidx1
+ OpAMD64MOVWstoreconstidx2
+ OpAMD64MOVLstoreconstidx1
+ OpAMD64MOVLstoreconstidx4
+ OpAMD64MOVQstoreconstidx1
+ OpAMD64MOVQstoreconstidx8
+ OpAMD64DUFFZERO
+ OpAMD64REPSTOSQ
+ OpAMD64CALLstatic
+ OpAMD64CALLtail
+ OpAMD64CALLclosure
+ OpAMD64CALLinter
+ OpAMD64DUFFCOPY
+ OpAMD64REPMOVSQ
+ OpAMD64InvertFlags
+ OpAMD64LoweredGetG
+ OpAMD64LoweredGetClosurePtr
+ OpAMD64LoweredGetCallerPC
+ OpAMD64LoweredGetCallerSP
+ OpAMD64LoweredNilCheck
+ OpAMD64LoweredWB
+ OpAMD64LoweredHasCPUFeature
+ OpAMD64LoweredPanicBoundsA
+ OpAMD64LoweredPanicBoundsB
+ OpAMD64LoweredPanicBoundsC
+ OpAMD64FlagEQ
+ OpAMD64FlagLT_ULT
+ OpAMD64FlagLT_UGT
+ OpAMD64FlagGT_UGT
+ OpAMD64FlagGT_ULT
+ OpAMD64MOVBatomicload
+ OpAMD64MOVLatomicload
+ OpAMD64MOVQatomicload
+ OpAMD64XCHGB
+ OpAMD64XCHGL
+ OpAMD64XCHGQ
+ OpAMD64XADDLlock
+ OpAMD64XADDQlock
+ OpAMD64AddTupleFirst32
+ OpAMD64AddTupleFirst64
+ OpAMD64CMPXCHGLlock
+ OpAMD64CMPXCHGQlock
+ OpAMD64ANDBlock
+ OpAMD64ANDLlock
+ OpAMD64ORBlock
+ OpAMD64ORLlock
+ OpAMD64PrefetchT0
+ OpAMD64PrefetchNTA
+ OpAMD64ANDNQ
+ OpAMD64ANDNL
+ OpAMD64BLSIQ
+ OpAMD64BLSIL
+ OpAMD64BLSMSKQ
+ OpAMD64BLSMSKL
+ OpAMD64BLSRQ
+ OpAMD64BLSRL
+ OpAMD64TZCNTQ
+ OpAMD64TZCNTL
+ OpAMD64MOVBELload
+ OpAMD64MOVBELstore
+ OpAMD64MOVBEQload
+ OpAMD64MOVBEQstore
+
+ OpARMADD
+ OpARMADDconst
+ OpARMSUB
+ OpARMSUBconst
+ OpARMRSB
+ OpARMRSBconst
+ OpARMMUL
+ OpARMHMUL
+ OpARMHMULU
+ OpARMCALLudiv
+ OpARMADDS
+ OpARMADDSconst
+ OpARMADC
+ OpARMADCconst
+ OpARMSUBS
+ OpARMSUBSconst
+ OpARMRSBSconst
+ OpARMSBC
+ OpARMSBCconst
+ OpARMRSCconst
+ OpARMMULLU
+ OpARMMULA
+ OpARMMULS
+ OpARMADDF
+ OpARMADDD
+ OpARMSUBF
+ OpARMSUBD
+ OpARMMULF
+ OpARMMULD
+ OpARMNMULF
+ OpARMNMULD
+ OpARMDIVF
+ OpARMDIVD
+ OpARMMULAF
+ OpARMMULAD
+ OpARMMULSF
+ OpARMMULSD
+ OpARMFMULAD
+ OpARMAND
+ OpARMANDconst
+ OpARMOR
+ OpARMORconst
+ OpARMXOR
+ OpARMXORconst
+ OpARMBIC
+ OpARMBICconst
+ OpARMBFX
+ OpARMBFXU
+ OpARMMVN
+ OpARMNEGF
+ OpARMNEGD
+ OpARMSQRTD
+ OpARMSQRTF
+ OpARMABSD
+ OpARMCLZ
+ OpARMREV
+ OpARMREV16
+ OpARMRBIT
+ OpARMSLL
+ OpARMSLLconst
+ OpARMSRL
+ OpARMSRLconst
+ OpARMSRA
+ OpARMSRAconst
+ OpARMSRR
+ OpARMSRRconst
+ OpARMADDshiftLL
+ OpARMADDshiftRL
+ OpARMADDshiftRA
+ OpARMSUBshiftLL
+ OpARMSUBshiftRL
+ OpARMSUBshiftRA
+ OpARMRSBshiftLL
+ OpARMRSBshiftRL
+ OpARMRSBshiftRA
+ OpARMANDshiftLL
+ OpARMANDshiftRL
+ OpARMANDshiftRA
+ OpARMORshiftLL
+ OpARMORshiftRL
+ OpARMORshiftRA
+ OpARMXORshiftLL
+ OpARMXORshiftRL
+ OpARMXORshiftRA
+ OpARMXORshiftRR
+ OpARMBICshiftLL
+ OpARMBICshiftRL
+ OpARMBICshiftRA
+ OpARMMVNshiftLL
+ OpARMMVNshiftRL
+ OpARMMVNshiftRA
+ OpARMADCshiftLL
+ OpARMADCshiftRL
+ OpARMADCshiftRA
+ OpARMSBCshiftLL
+ OpARMSBCshiftRL
+ OpARMSBCshiftRA
+ OpARMRSCshiftLL
+ OpARMRSCshiftRL
+ OpARMRSCshiftRA
+ OpARMADDSshiftLL
+ OpARMADDSshiftRL
+ OpARMADDSshiftRA
+ OpARMSUBSshiftLL
+ OpARMSUBSshiftRL
+ OpARMSUBSshiftRA
+ OpARMRSBSshiftLL
+ OpARMRSBSshiftRL
+ OpARMRSBSshiftRA
+ OpARMADDshiftLLreg
+ OpARMADDshiftRLreg
+ OpARMADDshiftRAreg
+ OpARMSUBshiftLLreg
+ OpARMSUBshiftRLreg
+ OpARMSUBshiftRAreg
+ OpARMRSBshiftLLreg
+ OpARMRSBshiftRLreg
+ OpARMRSBshiftRAreg
+ OpARMANDshiftLLreg
+ OpARMANDshiftRLreg
+ OpARMANDshiftRAreg
+ OpARMORshiftLLreg
+ OpARMORshiftRLreg
+ OpARMORshiftRAreg
+ OpARMXORshiftLLreg
+ OpARMXORshiftRLreg
+ OpARMXORshiftRAreg
+ OpARMBICshiftLLreg
+ OpARMBICshiftRLreg
+ OpARMBICshiftRAreg
+ OpARMMVNshiftLLreg
+ OpARMMVNshiftRLreg
+ OpARMMVNshiftRAreg
+ OpARMADCshiftLLreg
+ OpARMADCshiftRLreg
+ OpARMADCshiftRAreg
+ OpARMSBCshiftLLreg
+ OpARMSBCshiftRLreg
+ OpARMSBCshiftRAreg
+ OpARMRSCshiftLLreg
+ OpARMRSCshiftRLreg
+ OpARMRSCshiftRAreg
+ OpARMADDSshiftLLreg
+ OpARMADDSshiftRLreg
+ OpARMADDSshiftRAreg
+ OpARMSUBSshiftLLreg
+ OpARMSUBSshiftRLreg
+ OpARMSUBSshiftRAreg
+ OpARMRSBSshiftLLreg
+ OpARMRSBSshiftRLreg
+ OpARMRSBSshiftRAreg
+ OpARMCMP
+ OpARMCMPconst
+ OpARMCMN
+ OpARMCMNconst
+ OpARMTST
+ OpARMTSTconst
+ OpARMTEQ
+ OpARMTEQconst
+ OpARMCMPF
+ OpARMCMPD
+ OpARMCMPshiftLL
+ OpARMCMPshiftRL
+ OpARMCMPshiftRA
+ OpARMCMNshiftLL
+ OpARMCMNshiftRL
+ OpARMCMNshiftRA
+ OpARMTSTshiftLL
+ OpARMTSTshiftRL
+ OpARMTSTshiftRA
+ OpARMTEQshiftLL
+ OpARMTEQshiftRL
+ OpARMTEQshiftRA
+ OpARMCMPshiftLLreg
+ OpARMCMPshiftRLreg
+ OpARMCMPshiftRAreg
+ OpARMCMNshiftLLreg
+ OpARMCMNshiftRLreg
+ OpARMCMNshiftRAreg
+ OpARMTSTshiftLLreg
+ OpARMTSTshiftRLreg
+ OpARMTSTshiftRAreg
+ OpARMTEQshiftLLreg
+ OpARMTEQshiftRLreg
+ OpARMTEQshiftRAreg
+ OpARMCMPF0
+ OpARMCMPD0
+ OpARMMOVWconst
+ OpARMMOVFconst
+ OpARMMOVDconst
+ OpARMMOVWaddr
+ OpARMMOVBload
+ OpARMMOVBUload
+ OpARMMOVHload
+ OpARMMOVHUload
+ OpARMMOVWload
+ OpARMMOVFload
+ OpARMMOVDload
+ OpARMMOVBstore
+ OpARMMOVHstore
+ OpARMMOVWstore
+ OpARMMOVFstore
+ OpARMMOVDstore
+ OpARMMOVWloadidx
+ OpARMMOVWloadshiftLL
+ OpARMMOVWloadshiftRL
+ OpARMMOVWloadshiftRA
+ OpARMMOVBUloadidx
+ OpARMMOVBloadidx
+ OpARMMOVHUloadidx
+ OpARMMOVHloadidx
+ OpARMMOVWstoreidx
+ OpARMMOVWstoreshiftLL
+ OpARMMOVWstoreshiftRL
+ OpARMMOVWstoreshiftRA
+ OpARMMOVBstoreidx
+ OpARMMOVHstoreidx
+ OpARMMOVBreg
+ OpARMMOVBUreg
+ OpARMMOVHreg
+ OpARMMOVHUreg
+ OpARMMOVWreg
+ OpARMMOVWnop
+ OpARMMOVWF
+ OpARMMOVWD
+ OpARMMOVWUF
+ OpARMMOVWUD
+ OpARMMOVFW
+ OpARMMOVDW
+ OpARMMOVFWU
+ OpARMMOVDWU
+ OpARMMOVFD
+ OpARMMOVDF
+ OpARMCMOVWHSconst
+ OpARMCMOVWLSconst
+ OpARMSRAcond
+ OpARMCALLstatic
+ OpARMCALLtail
+ OpARMCALLclosure
+ OpARMCALLinter
+ OpARMLoweredNilCheck
+ OpARMEqual
+ OpARMNotEqual
+ OpARMLessThan
+ OpARMLessEqual
+ OpARMGreaterThan
+ OpARMGreaterEqual
+ OpARMLessThanU
+ OpARMLessEqualU
+ OpARMGreaterThanU
+ OpARMGreaterEqualU
+ OpARMDUFFZERO
+ OpARMDUFFCOPY
+ OpARMLoweredZero
+ OpARMLoweredMove
+ OpARMLoweredGetClosurePtr
+ OpARMLoweredGetCallerSP
+ OpARMLoweredGetCallerPC
+ OpARMLoweredPanicBoundsA
+ OpARMLoweredPanicBoundsB
+ OpARMLoweredPanicBoundsC
+ OpARMLoweredPanicExtendA
+ OpARMLoweredPanicExtendB
+ OpARMLoweredPanicExtendC
+ OpARMFlagConstant
+ OpARMInvertFlags
+ OpARMLoweredWB
+
+ OpARM64ADCSflags
+ OpARM64ADCzerocarry
+ OpARM64ADD
+ OpARM64ADDconst
+ OpARM64ADDSconstflags
+ OpARM64ADDSflags
+ OpARM64SUB
+ OpARM64SUBconst
+ OpARM64SBCSflags
+ OpARM64SUBSflags
+ OpARM64MUL
+ OpARM64MULW
+ OpARM64MNEG
+ OpARM64MNEGW
+ OpARM64MULH
+ OpARM64UMULH
+ OpARM64MULL
+ OpARM64UMULL
+ OpARM64DIV
+ OpARM64UDIV
+ OpARM64DIVW
+ OpARM64UDIVW
+ OpARM64MOD
+ OpARM64UMOD
+ OpARM64MODW
+ OpARM64UMODW
+ OpARM64FADDS
+ OpARM64FADDD
+ OpARM64FSUBS
+ OpARM64FSUBD
+ OpARM64FMULS
+ OpARM64FMULD
+ OpARM64FNMULS
+ OpARM64FNMULD
+ OpARM64FDIVS
+ OpARM64FDIVD
+ OpARM64AND
+ OpARM64ANDconst
+ OpARM64OR
+ OpARM64ORconst
+ OpARM64XOR
+ OpARM64XORconst
+ OpARM64BIC
+ OpARM64EON
+ OpARM64ORN
+ OpARM64LoweredMuluhilo
+ OpARM64MVN
+ OpARM64NEG
+ OpARM64NEGSflags
+ OpARM64NGCzerocarry
+ OpARM64FABSD
+ OpARM64FNEGS
+ OpARM64FNEGD
+ OpARM64FSQRTD
+ OpARM64FSQRTS
+ OpARM64REV
+ OpARM64REVW
+ OpARM64REV16
+ OpARM64REV16W
+ OpARM64RBIT
+ OpARM64RBITW
+ OpARM64CLZ
+ OpARM64CLZW
+ OpARM64VCNT
+ OpARM64VUADDLV
+ OpARM64LoweredRound32F
+ OpARM64LoweredRound64F
+ OpARM64FMADDS
+ OpARM64FMADDD
+ OpARM64FNMADDS
+ OpARM64FNMADDD
+ OpARM64FMSUBS
+ OpARM64FMSUBD
+ OpARM64FNMSUBS
+ OpARM64FNMSUBD
+ OpARM64MADD
+ OpARM64MADDW
+ OpARM64MSUB
+ OpARM64MSUBW
+ OpARM64SLL
+ OpARM64SLLconst
+ OpARM64SRL
+ OpARM64SRLconst
+ OpARM64SRA
+ OpARM64SRAconst
+ OpARM64ROR
+ OpARM64RORW
+ OpARM64RORconst
+ OpARM64RORWconst
+ OpARM64EXTRconst
+ OpARM64EXTRWconst
+ OpARM64CMP
+ OpARM64CMPconst
+ OpARM64CMPW
+ OpARM64CMPWconst
+ OpARM64CMN
+ OpARM64CMNconst
+ OpARM64CMNW
+ OpARM64CMNWconst
+ OpARM64TST
+ OpARM64TSTconst
+ OpARM64TSTW
+ OpARM64TSTWconst
+ OpARM64FCMPS
+ OpARM64FCMPD
+ OpARM64FCMPS0
+ OpARM64FCMPD0
+ OpARM64MVNshiftLL
+ OpARM64MVNshiftRL
+ OpARM64MVNshiftRA
+ OpARM64MVNshiftRO
+ OpARM64NEGshiftLL
+ OpARM64NEGshiftRL
+ OpARM64NEGshiftRA
+ OpARM64ADDshiftLL
+ OpARM64ADDshiftRL
+ OpARM64ADDshiftRA
+ OpARM64SUBshiftLL
+ OpARM64SUBshiftRL
+ OpARM64SUBshiftRA
+ OpARM64ANDshiftLL
+ OpARM64ANDshiftRL
+ OpARM64ANDshiftRA
+ OpARM64ANDshiftRO
+ OpARM64ORshiftLL
+ OpARM64ORshiftRL
+ OpARM64ORshiftRA
+ OpARM64ORshiftRO
+ OpARM64XORshiftLL
+ OpARM64XORshiftRL
+ OpARM64XORshiftRA
+ OpARM64XORshiftRO
+ OpARM64BICshiftLL
+ OpARM64BICshiftRL
+ OpARM64BICshiftRA
+ OpARM64BICshiftRO
+ OpARM64EONshiftLL
+ OpARM64EONshiftRL
+ OpARM64EONshiftRA
+ OpARM64EONshiftRO
+ OpARM64ORNshiftLL
+ OpARM64ORNshiftRL
+ OpARM64ORNshiftRA
+ OpARM64ORNshiftRO
+ OpARM64CMPshiftLL
+ OpARM64CMPshiftRL
+ OpARM64CMPshiftRA
+ OpARM64CMNshiftLL
+ OpARM64CMNshiftRL
+ OpARM64CMNshiftRA
+ OpARM64TSTshiftLL
+ OpARM64TSTshiftRL
+ OpARM64TSTshiftRA
+ OpARM64TSTshiftRO
+ OpARM64BFI
+ OpARM64BFXIL
+ OpARM64SBFIZ
+ OpARM64SBFX
+ OpARM64UBFIZ
+ OpARM64UBFX
+ OpARM64MOVDconst
+ OpARM64FMOVSconst
+ OpARM64FMOVDconst
+ OpARM64MOVDaddr
+ OpARM64MOVBload
+ OpARM64MOVBUload
+ OpARM64MOVHload
+ OpARM64MOVHUload
+ OpARM64MOVWload
+ OpARM64MOVWUload
+ OpARM64MOVDload
+ OpARM64FMOVSload
+ OpARM64FMOVDload
+ OpARM64MOVDloadidx
+ OpARM64MOVWloadidx
+ OpARM64MOVWUloadidx
+ OpARM64MOVHloadidx
+ OpARM64MOVHUloadidx
+ OpARM64MOVBloadidx
+ OpARM64MOVBUloadidx
+ OpARM64FMOVSloadidx
+ OpARM64FMOVDloadidx
+ OpARM64MOVHloadidx2
+ OpARM64MOVHUloadidx2
+ OpARM64MOVWloadidx4
+ OpARM64MOVWUloadidx4
+ OpARM64MOVDloadidx8
+ OpARM64FMOVSloadidx4
+ OpARM64FMOVDloadidx8
+ OpARM64MOVBstore
+ OpARM64MOVHstore
+ OpARM64MOVWstore
+ OpARM64MOVDstore
+ OpARM64STP
+ OpARM64FMOVSstore
+ OpARM64FMOVDstore
+ OpARM64MOVBstoreidx
+ OpARM64MOVHstoreidx
+ OpARM64MOVWstoreidx
+ OpARM64MOVDstoreidx
+ OpARM64FMOVSstoreidx
+ OpARM64FMOVDstoreidx
+ OpARM64MOVHstoreidx2
+ OpARM64MOVWstoreidx4
+ OpARM64MOVDstoreidx8
+ OpARM64FMOVSstoreidx4
+ OpARM64FMOVDstoreidx8
+ OpARM64MOVBstorezero
+ OpARM64MOVHstorezero
+ OpARM64MOVWstorezero
+ OpARM64MOVDstorezero
+ OpARM64MOVQstorezero
+ OpARM64MOVBstorezeroidx
+ OpARM64MOVHstorezeroidx
+ OpARM64MOVWstorezeroidx
+ OpARM64MOVDstorezeroidx
+ OpARM64MOVHstorezeroidx2
+ OpARM64MOVWstorezeroidx4
+ OpARM64MOVDstorezeroidx8
+ OpARM64FMOVDgpfp
+ OpARM64FMOVDfpgp
+ OpARM64FMOVSgpfp
+ OpARM64FMOVSfpgp
+ OpARM64MOVBreg
+ OpARM64MOVBUreg
+ OpARM64MOVHreg
+ OpARM64MOVHUreg
+ OpARM64MOVWreg
+ OpARM64MOVWUreg
+ OpARM64MOVDreg
+ OpARM64MOVDnop
+ OpARM64SCVTFWS
+ OpARM64SCVTFWD
+ OpARM64UCVTFWS
+ OpARM64UCVTFWD
+ OpARM64SCVTFS
+ OpARM64SCVTFD
+ OpARM64UCVTFS
+ OpARM64UCVTFD
+ OpARM64FCVTZSSW
+ OpARM64FCVTZSDW
+ OpARM64FCVTZUSW
+ OpARM64FCVTZUDW
+ OpARM64FCVTZSS
+ OpARM64FCVTZSD
+ OpARM64FCVTZUS
+ OpARM64FCVTZUD
+ OpARM64FCVTSD
+ OpARM64FCVTDS
+ OpARM64FRINTAD
+ OpARM64FRINTMD
+ OpARM64FRINTND
+ OpARM64FRINTPD
+ OpARM64FRINTZD
+ OpARM64CSEL
+ OpARM64CSEL0
+ OpARM64CSINC
+ OpARM64CSINV
+ OpARM64CSNEG
+ OpARM64CSETM
+ OpARM64CALLstatic
+ OpARM64CALLtail
+ OpARM64CALLclosure
+ OpARM64CALLinter
+ OpARM64LoweredNilCheck
+ OpARM64Equal
+ OpARM64NotEqual
+ OpARM64LessThan
+ OpARM64LessEqual
+ OpARM64GreaterThan
+ OpARM64GreaterEqual
+ OpARM64LessThanU
+ OpARM64LessEqualU
+ OpARM64GreaterThanU
+ OpARM64GreaterEqualU
+ OpARM64LessThanF
+ OpARM64LessEqualF
+ OpARM64GreaterThanF
+ OpARM64GreaterEqualF
+ OpARM64NotLessThanF
+ OpARM64NotLessEqualF
+ OpARM64NotGreaterThanF
+ OpARM64NotGreaterEqualF
+ OpARM64DUFFZERO
+ OpARM64LoweredZero
+ OpARM64DUFFCOPY
+ OpARM64LoweredMove
+ OpARM64LoweredGetClosurePtr
+ OpARM64LoweredGetCallerSP
+ OpARM64LoweredGetCallerPC
+ OpARM64FlagConstant
+ OpARM64InvertFlags
+ OpARM64LDAR
+ OpARM64LDARB
+ OpARM64LDARW
+ OpARM64STLRB
+ OpARM64STLR
+ OpARM64STLRW
+ OpARM64LoweredAtomicExchange64
+ OpARM64LoweredAtomicExchange32
+ OpARM64LoweredAtomicExchange64Variant
+ OpARM64LoweredAtomicExchange32Variant
+ OpARM64LoweredAtomicAdd64
+ OpARM64LoweredAtomicAdd32
+ OpARM64LoweredAtomicAdd64Variant
+ OpARM64LoweredAtomicAdd32Variant
+ OpARM64LoweredAtomicCas64
+ OpARM64LoweredAtomicCas32
+ OpARM64LoweredAtomicCas64Variant
+ OpARM64LoweredAtomicCas32Variant
+ OpARM64LoweredAtomicAnd8
+ OpARM64LoweredAtomicAnd32
+ OpARM64LoweredAtomicOr8
+ OpARM64LoweredAtomicOr32
+ OpARM64LoweredAtomicAnd8Variant
+ OpARM64LoweredAtomicAnd32Variant
+ OpARM64LoweredAtomicOr8Variant
+ OpARM64LoweredAtomicOr32Variant
+ OpARM64LoweredWB
+ OpARM64LoweredPanicBoundsA
+ OpARM64LoweredPanicBoundsB
+ OpARM64LoweredPanicBoundsC
+ OpARM64PRFM
+ OpARM64DMB
+
+ OpMIPSADD
+ OpMIPSADDconst
+ OpMIPSSUB
+ OpMIPSSUBconst
+ OpMIPSMUL
+ OpMIPSMULT
+ OpMIPSMULTU
+ OpMIPSDIV
+ OpMIPSDIVU
+ OpMIPSADDF
+ OpMIPSADDD
+ OpMIPSSUBF
+ OpMIPSSUBD
+ OpMIPSMULF
+ OpMIPSMULD
+ OpMIPSDIVF
+ OpMIPSDIVD
+ OpMIPSAND
+ OpMIPSANDconst
+ OpMIPSOR
+ OpMIPSORconst
+ OpMIPSXOR
+ OpMIPSXORconst
+ OpMIPSNOR
+ OpMIPSNORconst
+ OpMIPSNEG
+ OpMIPSNEGF
+ OpMIPSNEGD
+ OpMIPSSQRTD
+ OpMIPSSQRTF
+ OpMIPSSLL
+ OpMIPSSLLconst
+ OpMIPSSRL
+ OpMIPSSRLconst
+ OpMIPSSRA
+ OpMIPSSRAconst
+ OpMIPSCLZ
+ OpMIPSSGT
+ OpMIPSSGTconst
+ OpMIPSSGTzero
+ OpMIPSSGTU
+ OpMIPSSGTUconst
+ OpMIPSSGTUzero
+ OpMIPSCMPEQF
+ OpMIPSCMPEQD
+ OpMIPSCMPGEF
+ OpMIPSCMPGED
+ OpMIPSCMPGTF
+ OpMIPSCMPGTD
+ OpMIPSMOVWconst
+ OpMIPSMOVFconst
+ OpMIPSMOVDconst
+ OpMIPSMOVWaddr
+ OpMIPSMOVBload
+ OpMIPSMOVBUload
+ OpMIPSMOVHload
+ OpMIPSMOVHUload
+ OpMIPSMOVWload
+ OpMIPSMOVFload
+ OpMIPSMOVDload
+ OpMIPSMOVBstore
+ OpMIPSMOVHstore
+ OpMIPSMOVWstore
+ OpMIPSMOVFstore
+ OpMIPSMOVDstore
+ OpMIPSMOVBstorezero
+ OpMIPSMOVHstorezero
+ OpMIPSMOVWstorezero
+ OpMIPSMOVBreg
+ OpMIPSMOVBUreg
+ OpMIPSMOVHreg
+ OpMIPSMOVHUreg
+ OpMIPSMOVWreg
+ OpMIPSMOVWnop
+ OpMIPSCMOVZ
+ OpMIPSCMOVZzero
+ OpMIPSMOVWF
+ OpMIPSMOVWD
+ OpMIPSTRUNCFW
+ OpMIPSTRUNCDW
+ OpMIPSMOVFD
+ OpMIPSMOVDF
+ OpMIPSCALLstatic
+ OpMIPSCALLtail
+ OpMIPSCALLclosure
+ OpMIPSCALLinter
+ OpMIPSLoweredAtomicLoad8
+ OpMIPSLoweredAtomicLoad32
+ OpMIPSLoweredAtomicStore8
+ OpMIPSLoweredAtomicStore32
+ OpMIPSLoweredAtomicStorezero
+ OpMIPSLoweredAtomicExchange
+ OpMIPSLoweredAtomicAdd
+ OpMIPSLoweredAtomicAddconst
+ OpMIPSLoweredAtomicCas
+ OpMIPSLoweredAtomicAnd
+ OpMIPSLoweredAtomicOr
+ OpMIPSLoweredZero
+ OpMIPSLoweredMove
+ OpMIPSLoweredNilCheck
+ OpMIPSFPFlagTrue
+ OpMIPSFPFlagFalse
+ OpMIPSLoweredGetClosurePtr
+ OpMIPSLoweredGetCallerSP
+ OpMIPSLoweredGetCallerPC
+ OpMIPSLoweredWB
+ OpMIPSLoweredPanicBoundsA
+ OpMIPSLoweredPanicBoundsB
+ OpMIPSLoweredPanicBoundsC
+ OpMIPSLoweredPanicExtendA
+ OpMIPSLoweredPanicExtendB
+ OpMIPSLoweredPanicExtendC
+
+ OpMIPS64ADDV
+ OpMIPS64ADDVconst
+ OpMIPS64SUBV
+ OpMIPS64SUBVconst
+ OpMIPS64MULV
+ OpMIPS64MULVU
+ OpMIPS64DIVV
+ OpMIPS64DIVVU
+ OpMIPS64ADDF
+ OpMIPS64ADDD
+ OpMIPS64SUBF
+ OpMIPS64SUBD
+ OpMIPS64MULF
+ OpMIPS64MULD
+ OpMIPS64DIVF
+ OpMIPS64DIVD
+ OpMIPS64AND
+ OpMIPS64ANDconst
+ OpMIPS64OR
+ OpMIPS64ORconst
+ OpMIPS64XOR
+ OpMIPS64XORconst
+ OpMIPS64NOR
+ OpMIPS64NORconst
+ OpMIPS64NEGV
+ OpMIPS64NEGF
+ OpMIPS64NEGD
+ OpMIPS64SQRTD
+ OpMIPS64SQRTF
+ OpMIPS64SLLV
+ OpMIPS64SLLVconst
+ OpMIPS64SRLV
+ OpMIPS64SRLVconst
+ OpMIPS64SRAV
+ OpMIPS64SRAVconst
+ OpMIPS64SGT
+ OpMIPS64SGTconst
+ OpMIPS64SGTU
+ OpMIPS64SGTUconst
+ OpMIPS64CMPEQF
+ OpMIPS64CMPEQD
+ OpMIPS64CMPGEF
+ OpMIPS64CMPGED
+ OpMIPS64CMPGTF
+ OpMIPS64CMPGTD
+ OpMIPS64MOVVconst
+ OpMIPS64MOVFconst
+ OpMIPS64MOVDconst
+ OpMIPS64MOVVaddr
+ OpMIPS64MOVBload
+ OpMIPS64MOVBUload
+ OpMIPS64MOVHload
+ OpMIPS64MOVHUload
+ OpMIPS64MOVWload
+ OpMIPS64MOVWUload
+ OpMIPS64MOVVload
+ OpMIPS64MOVFload
+ OpMIPS64MOVDload
+ OpMIPS64MOVBstore
+ OpMIPS64MOVHstore
+ OpMIPS64MOVWstore
+ OpMIPS64MOVVstore
+ OpMIPS64MOVFstore
+ OpMIPS64MOVDstore
+ OpMIPS64MOVBstorezero
+ OpMIPS64MOVHstorezero
+ OpMIPS64MOVWstorezero
+ OpMIPS64MOVVstorezero
+ OpMIPS64MOVBreg
+ OpMIPS64MOVBUreg
+ OpMIPS64MOVHreg
+ OpMIPS64MOVHUreg
+ OpMIPS64MOVWreg
+ OpMIPS64MOVWUreg
+ OpMIPS64MOVVreg
+ OpMIPS64MOVVnop
+ OpMIPS64MOVWF
+ OpMIPS64MOVWD
+ OpMIPS64MOVVF
+ OpMIPS64MOVVD
+ OpMIPS64TRUNCFW
+ OpMIPS64TRUNCDW
+ OpMIPS64TRUNCFV
+ OpMIPS64TRUNCDV
+ OpMIPS64MOVFD
+ OpMIPS64MOVDF
+ OpMIPS64CALLstatic
+ OpMIPS64CALLtail
+ OpMIPS64CALLclosure
+ OpMIPS64CALLinter
+ OpMIPS64DUFFZERO
+ OpMIPS64DUFFCOPY
+ OpMIPS64LoweredZero
+ OpMIPS64LoweredMove
+ OpMIPS64LoweredAtomicLoad8
+ OpMIPS64LoweredAtomicLoad32
+ OpMIPS64LoweredAtomicLoad64
+ OpMIPS64LoweredAtomicStore8
+ OpMIPS64LoweredAtomicStore32
+ OpMIPS64LoweredAtomicStore64
+ OpMIPS64LoweredAtomicStorezero32
+ OpMIPS64LoweredAtomicStorezero64
+ OpMIPS64LoweredAtomicExchange32
+ OpMIPS64LoweredAtomicExchange64
+ OpMIPS64LoweredAtomicAdd32
+ OpMIPS64LoweredAtomicAdd64
+ OpMIPS64LoweredAtomicAddconst32
+ OpMIPS64LoweredAtomicAddconst64
+ OpMIPS64LoweredAtomicCas32
+ OpMIPS64LoweredAtomicCas64
+ OpMIPS64LoweredNilCheck
+ OpMIPS64FPFlagTrue
+ OpMIPS64FPFlagFalse
+ OpMIPS64LoweredGetClosurePtr
+ OpMIPS64LoweredGetCallerSP
+ OpMIPS64LoweredGetCallerPC
+ OpMIPS64LoweredWB
+ OpMIPS64LoweredPanicBoundsA
+ OpMIPS64LoweredPanicBoundsB
+ OpMIPS64LoweredPanicBoundsC
+
+ OpPPC64ADD
+ OpPPC64ADDconst
+ OpPPC64FADD
+ OpPPC64FADDS
+ OpPPC64SUB
+ OpPPC64SUBFCconst
+ OpPPC64FSUB
+ OpPPC64FSUBS
+ OpPPC64MULLD
+ OpPPC64MULLW
+ OpPPC64MULLDconst
+ OpPPC64MULLWconst
+ OpPPC64MADDLD
+ OpPPC64MULHD
+ OpPPC64MULHW
+ OpPPC64MULHDU
+ OpPPC64MULHWU
+ OpPPC64LoweredMuluhilo
+ OpPPC64FMUL
+ OpPPC64FMULS
+ OpPPC64FMADD
+ OpPPC64FMADDS
+ OpPPC64FMSUB
+ OpPPC64FMSUBS
+ OpPPC64SRAD
+ OpPPC64SRAW
+ OpPPC64SRD
+ OpPPC64SRW
+ OpPPC64SLD
+ OpPPC64SLW
+ OpPPC64ROTL
+ OpPPC64ROTLW
+ OpPPC64RLDICL
+ OpPPC64CLRLSLWI
+ OpPPC64CLRLSLDI
+ OpPPC64LoweredAdd64Carry
+ OpPPC64SRADconst
+ OpPPC64SRAWconst
+ OpPPC64SRDconst
+ OpPPC64SRWconst
+ OpPPC64SLDconst
+ OpPPC64SLWconst
+ OpPPC64ROTLconst
+ OpPPC64ROTLWconst
+ OpPPC64EXTSWSLconst
+ OpPPC64RLWINM
+ OpPPC64RLWNM
+ OpPPC64RLWMI
+ OpPPC64CNTLZD
+ OpPPC64CNTLZW
+ OpPPC64CNTTZD
+ OpPPC64CNTTZW
+ OpPPC64POPCNTD
+ OpPPC64POPCNTW
+ OpPPC64POPCNTB
+ OpPPC64FDIV
+ OpPPC64FDIVS
+ OpPPC64DIVD
+ OpPPC64DIVW
+ OpPPC64DIVDU
+ OpPPC64DIVWU
+ OpPPC64MODUD
+ OpPPC64MODSD
+ OpPPC64MODUW
+ OpPPC64MODSW
+ OpPPC64FCTIDZ
+ OpPPC64FCTIWZ
+ OpPPC64FCFID
+ OpPPC64FCFIDS
+ OpPPC64FRSP
+ OpPPC64MFVSRD
+ OpPPC64MTVSRD
+ OpPPC64AND
+ OpPPC64ANDN
+ OpPPC64ANDCC
+ OpPPC64OR
+ OpPPC64ORN
+ OpPPC64ORCC
+ OpPPC64NOR
+ OpPPC64XOR
+ OpPPC64XORCC
+ OpPPC64EQV
+ OpPPC64NEG
+ OpPPC64FNEG
+ OpPPC64FSQRT
+ OpPPC64FSQRTS
+ OpPPC64FFLOOR
+ OpPPC64FCEIL
+ OpPPC64FTRUNC
+ OpPPC64FROUND
+ OpPPC64FABS
+ OpPPC64FNABS
+ OpPPC64FCPSGN
+ OpPPC64ORconst
+ OpPPC64XORconst
+ OpPPC64ANDconst
+ OpPPC64ANDCCconst
+ OpPPC64MOVBreg
+ OpPPC64MOVBZreg
+ OpPPC64MOVHreg
+ OpPPC64MOVHZreg
+ OpPPC64MOVWreg
+ OpPPC64MOVWZreg
+ OpPPC64MOVBZload
+ OpPPC64MOVHload
+ OpPPC64MOVHZload
+ OpPPC64MOVWload
+ OpPPC64MOVWZload
+ OpPPC64MOVDload
+ OpPPC64MOVDBRload
+ OpPPC64MOVWBRload
+ OpPPC64MOVHBRload
+ OpPPC64MOVBZloadidx
+ OpPPC64MOVHloadidx
+ OpPPC64MOVHZloadidx
+ OpPPC64MOVWloadidx
+ OpPPC64MOVWZloadidx
+ OpPPC64MOVDloadidx
+ OpPPC64MOVHBRloadidx
+ OpPPC64MOVWBRloadidx
+ OpPPC64MOVDBRloadidx
+ OpPPC64FMOVDloadidx
+ OpPPC64FMOVSloadidx
+ OpPPC64DCBT
+ OpPPC64MOVDBRstore
+ OpPPC64MOVWBRstore
+ OpPPC64MOVHBRstore
+ OpPPC64FMOVDload
+ OpPPC64FMOVSload
+ OpPPC64MOVBstore
+ OpPPC64MOVHstore
+ OpPPC64MOVWstore
+ OpPPC64MOVDstore
+ OpPPC64FMOVDstore
+ OpPPC64FMOVSstore
+ OpPPC64MOVBstoreidx
+ OpPPC64MOVHstoreidx
+ OpPPC64MOVWstoreidx
+ OpPPC64MOVDstoreidx
+ OpPPC64FMOVDstoreidx
+ OpPPC64FMOVSstoreidx
+ OpPPC64MOVHBRstoreidx
+ OpPPC64MOVWBRstoreidx
+ OpPPC64MOVDBRstoreidx
+ OpPPC64MOVBstorezero
+ OpPPC64MOVHstorezero
+ OpPPC64MOVWstorezero
+ OpPPC64MOVDstorezero
+ OpPPC64MOVDaddr
+ OpPPC64MOVDconst
+ OpPPC64FMOVDconst
+ OpPPC64FMOVSconst
+ OpPPC64FCMPU
+ OpPPC64CMP
+ OpPPC64CMPU
+ OpPPC64CMPW
+ OpPPC64CMPWU
+ OpPPC64CMPconst
+ OpPPC64CMPUconst
+ OpPPC64CMPWconst
+ OpPPC64CMPWUconst
+ OpPPC64ISEL
+ OpPPC64ISELB
+ OpPPC64Equal
+ OpPPC64NotEqual
+ OpPPC64LessThan
+ OpPPC64FLessThan
+ OpPPC64LessEqual
+ OpPPC64FLessEqual
+ OpPPC64GreaterThan
+ OpPPC64FGreaterThan
+ OpPPC64GreaterEqual
+ OpPPC64FGreaterEqual
+ OpPPC64LoweredGetClosurePtr
+ OpPPC64LoweredGetCallerSP
+ OpPPC64LoweredGetCallerPC
+ OpPPC64LoweredNilCheck
+ OpPPC64LoweredRound32F
+ OpPPC64LoweredRound64F
+ OpPPC64CALLstatic
+ OpPPC64CALLtail
+ OpPPC64CALLclosure
+ OpPPC64CALLinter
+ OpPPC64LoweredZero
+ OpPPC64LoweredZeroShort
+ OpPPC64LoweredQuadZeroShort
+ OpPPC64LoweredQuadZero
+ OpPPC64LoweredMove
+ OpPPC64LoweredMoveShort
+ OpPPC64LoweredQuadMove
+ OpPPC64LoweredQuadMoveShort
+ OpPPC64LoweredAtomicStore8
+ OpPPC64LoweredAtomicStore32
+ OpPPC64LoweredAtomicStore64
+ OpPPC64LoweredAtomicLoad8
+ OpPPC64LoweredAtomicLoad32
+ OpPPC64LoweredAtomicLoad64
+ OpPPC64LoweredAtomicLoadPtr
+ OpPPC64LoweredAtomicAdd32
+ OpPPC64LoweredAtomicAdd64
+ OpPPC64LoweredAtomicExchange32
+ OpPPC64LoweredAtomicExchange64
+ OpPPC64LoweredAtomicCas64
+ OpPPC64LoweredAtomicCas32
+ OpPPC64LoweredAtomicAnd8
+ OpPPC64LoweredAtomicAnd32
+ OpPPC64LoweredAtomicOr8
+ OpPPC64LoweredAtomicOr32
+ OpPPC64LoweredWB
+ OpPPC64LoweredPanicBoundsA
+ OpPPC64LoweredPanicBoundsB
+ OpPPC64LoweredPanicBoundsC
+ OpPPC64InvertFlags
+ OpPPC64FlagEQ
+ OpPPC64FlagLT
+ OpPPC64FlagGT
+
+ OpRISCV64ADD
+ OpRISCV64ADDI
+ OpRISCV64ADDIW
+ OpRISCV64NEG
+ OpRISCV64NEGW
+ OpRISCV64SUB
+ OpRISCV64SUBW
+ OpRISCV64MUL
+ OpRISCV64MULW
+ OpRISCV64MULH
+ OpRISCV64MULHU
+ OpRISCV64LoweredMuluhilo
+ OpRISCV64LoweredMuluover
+ OpRISCV64DIV
+ OpRISCV64DIVU
+ OpRISCV64DIVW
+ OpRISCV64DIVUW
+ OpRISCV64REM
+ OpRISCV64REMU
+ OpRISCV64REMW
+ OpRISCV64REMUW
+ OpRISCV64MOVaddr
+ OpRISCV64MOVDconst
+ OpRISCV64MOVBload
+ OpRISCV64MOVHload
+ OpRISCV64MOVWload
+ OpRISCV64MOVDload
+ OpRISCV64MOVBUload
+ OpRISCV64MOVHUload
+ OpRISCV64MOVWUload
+ OpRISCV64MOVBstore
+ OpRISCV64MOVHstore
+ OpRISCV64MOVWstore
+ OpRISCV64MOVDstore
+ OpRISCV64MOVBstorezero
+ OpRISCV64MOVHstorezero
+ OpRISCV64MOVWstorezero
+ OpRISCV64MOVDstorezero
+ OpRISCV64MOVBreg
+ OpRISCV64MOVHreg
+ OpRISCV64MOVWreg
+ OpRISCV64MOVDreg
+ OpRISCV64MOVBUreg
+ OpRISCV64MOVHUreg
+ OpRISCV64MOVWUreg
+ OpRISCV64MOVDnop
+ OpRISCV64SLL
+ OpRISCV64SRA
+ OpRISCV64SRL
+ OpRISCV64SLLI
+ OpRISCV64SRAI
+ OpRISCV64SRLI
+ OpRISCV64XOR
+ OpRISCV64XORI
+ OpRISCV64OR
+ OpRISCV64ORI
+ OpRISCV64AND
+ OpRISCV64ANDI
+ OpRISCV64NOT
+ OpRISCV64SEQZ
+ OpRISCV64SNEZ
+ OpRISCV64SLT
+ OpRISCV64SLTI
+ OpRISCV64SLTU
+ OpRISCV64SLTIU
+ OpRISCV64MOVconvert
+ OpRISCV64CALLstatic
+ OpRISCV64CALLtail
+ OpRISCV64CALLclosure
+ OpRISCV64CALLinter
+ OpRISCV64DUFFZERO
+ OpRISCV64DUFFCOPY
+ OpRISCV64LoweredZero
+ OpRISCV64LoweredMove
+ OpRISCV64LoweredAtomicLoad8
+ OpRISCV64LoweredAtomicLoad32
+ OpRISCV64LoweredAtomicLoad64
+ OpRISCV64LoweredAtomicStore8
+ OpRISCV64LoweredAtomicStore32
+ OpRISCV64LoweredAtomicStore64
+ OpRISCV64LoweredAtomicExchange32
+ OpRISCV64LoweredAtomicExchange64
+ OpRISCV64LoweredAtomicAdd32
+ OpRISCV64LoweredAtomicAdd64
+ OpRISCV64LoweredAtomicCas32
+ OpRISCV64LoweredAtomicCas64
+ OpRISCV64LoweredAtomicAnd32
+ OpRISCV64LoweredAtomicOr32
+ OpRISCV64LoweredNilCheck
+ OpRISCV64LoweredGetClosurePtr
+ OpRISCV64LoweredGetCallerSP
+ OpRISCV64LoweredGetCallerPC
+ OpRISCV64LoweredWB
+ OpRISCV64LoweredPanicBoundsA
+ OpRISCV64LoweredPanicBoundsB
+ OpRISCV64LoweredPanicBoundsC
+ OpRISCV64FADDS
+ OpRISCV64FSUBS
+ OpRISCV64FMULS
+ OpRISCV64FDIVS
+ OpRISCV64FSQRTS
+ OpRISCV64FNEGS
+ OpRISCV64FMVSX
+ OpRISCV64FCVTSW
+ OpRISCV64FCVTSL
+ OpRISCV64FCVTWS
+ OpRISCV64FCVTLS
+ OpRISCV64FMOVWload
+ OpRISCV64FMOVWstore
+ OpRISCV64FEQS
+ OpRISCV64FNES
+ OpRISCV64FLTS
+ OpRISCV64FLES
+ OpRISCV64FADDD
+ OpRISCV64FSUBD
+ OpRISCV64FMULD
+ OpRISCV64FDIVD
+ OpRISCV64FMADDD
+ OpRISCV64FMSUBD
+ OpRISCV64FNMADDD
+ OpRISCV64FNMSUBD
+ OpRISCV64FSQRTD
+ OpRISCV64FNEGD
+ OpRISCV64FABSD
+ OpRISCV64FSGNJD
+ OpRISCV64FMVDX
+ OpRISCV64FCVTDW
+ OpRISCV64FCVTDL
+ OpRISCV64FCVTWD
+ OpRISCV64FCVTLD
+ OpRISCV64FCVTDS
+ OpRISCV64FCVTSD
+ OpRISCV64FMOVDload
+ OpRISCV64FMOVDstore
+ OpRISCV64FEQD
+ OpRISCV64FNED
+ OpRISCV64FLTD
+ OpRISCV64FLED
+
+ OpS390XFADDS
+ OpS390XFADD
+ OpS390XFSUBS
+ OpS390XFSUB
+ OpS390XFMULS
+ OpS390XFMUL
+ OpS390XFDIVS
+ OpS390XFDIV
+ OpS390XFNEGS
+ OpS390XFNEG
+ OpS390XFMADDS
+ OpS390XFMADD
+ OpS390XFMSUBS
+ OpS390XFMSUB
+ OpS390XLPDFR
+ OpS390XLNDFR
+ OpS390XCPSDR
+ OpS390XFIDBR
+ OpS390XFMOVSload
+ OpS390XFMOVDload
+ OpS390XFMOVSconst
+ OpS390XFMOVDconst
+ OpS390XFMOVSloadidx
+ OpS390XFMOVDloadidx
+ OpS390XFMOVSstore
+ OpS390XFMOVDstore
+ OpS390XFMOVSstoreidx
+ OpS390XFMOVDstoreidx
+ OpS390XADD
+ OpS390XADDW
+ OpS390XADDconst
+ OpS390XADDWconst
+ OpS390XADDload
+ OpS390XADDWload
+ OpS390XSUB
+ OpS390XSUBW
+ OpS390XSUBconst
+ OpS390XSUBWconst
+ OpS390XSUBload
+ OpS390XSUBWload
+ OpS390XMULLD
+ OpS390XMULLW
+ OpS390XMULLDconst
+ OpS390XMULLWconst
+ OpS390XMULLDload
+ OpS390XMULLWload
+ OpS390XMULHD
+ OpS390XMULHDU
+ OpS390XDIVD
+ OpS390XDIVW
+ OpS390XDIVDU
+ OpS390XDIVWU
+ OpS390XMODD
+ OpS390XMODW
+ OpS390XMODDU
+ OpS390XMODWU
+ OpS390XAND
+ OpS390XANDW
+ OpS390XANDconst
+ OpS390XANDWconst
+ OpS390XANDload
+ OpS390XANDWload
+ OpS390XOR
+ OpS390XORW
+ OpS390XORconst
+ OpS390XORWconst
+ OpS390XORload
+ OpS390XORWload
+ OpS390XXOR
+ OpS390XXORW
+ OpS390XXORconst
+ OpS390XXORWconst
+ OpS390XXORload
+ OpS390XXORWload
+ OpS390XADDC
+ OpS390XADDCconst
+ OpS390XADDE
+ OpS390XSUBC
+ OpS390XSUBE
+ OpS390XCMP
+ OpS390XCMPW
+ OpS390XCMPU
+ OpS390XCMPWU
+ OpS390XCMPconst
+ OpS390XCMPWconst
+ OpS390XCMPUconst
+ OpS390XCMPWUconst
+ OpS390XFCMPS
+ OpS390XFCMP
+ OpS390XLTDBR
+ OpS390XLTEBR
+ OpS390XSLD
+ OpS390XSLW
+ OpS390XSLDconst
+ OpS390XSLWconst
+ OpS390XSRD
+ OpS390XSRW
+ OpS390XSRDconst
+ OpS390XSRWconst
+ OpS390XSRAD
+ OpS390XSRAW
+ OpS390XSRADconst
+ OpS390XSRAWconst
+ OpS390XRLLG
+ OpS390XRLL
+ OpS390XRLLconst
+ OpS390XRXSBG
+ OpS390XRISBGZ
+ OpS390XNEG
+ OpS390XNEGW
+ OpS390XNOT
+ OpS390XNOTW
+ OpS390XFSQRT
+ OpS390XFSQRTS
+ OpS390XLOCGR
+ OpS390XMOVBreg
+ OpS390XMOVBZreg
+ OpS390XMOVHreg
+ OpS390XMOVHZreg
+ OpS390XMOVWreg
+ OpS390XMOVWZreg
+ OpS390XMOVDconst
+ OpS390XLDGR
+ OpS390XLGDR
+ OpS390XCFDBRA
+ OpS390XCGDBRA
+ OpS390XCFEBRA
+ OpS390XCGEBRA
+ OpS390XCEFBRA
+ OpS390XCDFBRA
+ OpS390XCEGBRA
+ OpS390XCDGBRA
+ OpS390XCLFEBR
+ OpS390XCLFDBR
+ OpS390XCLGEBR
+ OpS390XCLGDBR
+ OpS390XCELFBR
+ OpS390XCDLFBR
+ OpS390XCELGBR
+ OpS390XCDLGBR
+ OpS390XLEDBR
+ OpS390XLDEBR
+ OpS390XMOVDaddr
+ OpS390XMOVDaddridx
+ OpS390XMOVBZload
+ OpS390XMOVBload
+ OpS390XMOVHZload
+ OpS390XMOVHload
+ OpS390XMOVWZload
+ OpS390XMOVWload
+ OpS390XMOVDload
+ OpS390XMOVWBR
+ OpS390XMOVDBR
+ OpS390XMOVHBRload
+ OpS390XMOVWBRload
+ OpS390XMOVDBRload
+ OpS390XMOVBstore
+ OpS390XMOVHstore
+ OpS390XMOVWstore
+ OpS390XMOVDstore
+ OpS390XMOVHBRstore
+ OpS390XMOVWBRstore
+ OpS390XMOVDBRstore
+ OpS390XMVC
+ OpS390XMOVBZloadidx
+ OpS390XMOVBloadidx
+ OpS390XMOVHZloadidx
+ OpS390XMOVHloadidx
+ OpS390XMOVWZloadidx
+ OpS390XMOVWloadidx
+ OpS390XMOVDloadidx
+ OpS390XMOVHBRloadidx
+ OpS390XMOVWBRloadidx
+ OpS390XMOVDBRloadidx
+ OpS390XMOVBstoreidx
+ OpS390XMOVHstoreidx
+ OpS390XMOVWstoreidx
+ OpS390XMOVDstoreidx
+ OpS390XMOVHBRstoreidx
+ OpS390XMOVWBRstoreidx
+ OpS390XMOVDBRstoreidx
+ OpS390XMOVBstoreconst
+ OpS390XMOVHstoreconst
+ OpS390XMOVWstoreconst
+ OpS390XMOVDstoreconst
+ OpS390XCLEAR
+ OpS390XCALLstatic
+ OpS390XCALLtail
+ OpS390XCALLclosure
+ OpS390XCALLinter
+ OpS390XInvertFlags
+ OpS390XLoweredGetG
+ OpS390XLoweredGetClosurePtr
+ OpS390XLoweredGetCallerSP
+ OpS390XLoweredGetCallerPC
+ OpS390XLoweredNilCheck
+ OpS390XLoweredRound32F
+ OpS390XLoweredRound64F
+ OpS390XLoweredWB
+ OpS390XLoweredPanicBoundsA
+ OpS390XLoweredPanicBoundsB
+ OpS390XLoweredPanicBoundsC
+ OpS390XFlagEQ
+ OpS390XFlagLT
+ OpS390XFlagGT
+ OpS390XFlagOV
+ OpS390XSYNC
+ OpS390XMOVBZatomicload
+ OpS390XMOVWZatomicload
+ OpS390XMOVDatomicload
+ OpS390XMOVBatomicstore
+ OpS390XMOVWatomicstore
+ OpS390XMOVDatomicstore
+ OpS390XLAA
+ OpS390XLAAG
+ OpS390XAddTupleFirst32
+ OpS390XAddTupleFirst64
+ OpS390XLAN
+ OpS390XLANfloor
+ OpS390XLAO
+ OpS390XLAOfloor
+ OpS390XLoweredAtomicCas32
+ OpS390XLoweredAtomicCas64
+ OpS390XLoweredAtomicExchange32
+ OpS390XLoweredAtomicExchange64
+ OpS390XFLOGR
+ OpS390XPOPCNT
+ OpS390XMLGR
+ OpS390XSumBytes2
+ OpS390XSumBytes4
+ OpS390XSumBytes8
+ OpS390XSTMG2
+ OpS390XSTMG3
+ OpS390XSTMG4
+ OpS390XSTM2
+ OpS390XSTM3
+ OpS390XSTM4
+ OpS390XLoweredMove
+ OpS390XLoweredZero
+
+ OpWasmLoweredStaticCall
+ OpWasmLoweredTailCall
+ OpWasmLoweredClosureCall
+ OpWasmLoweredInterCall
+ OpWasmLoweredAddr
+ OpWasmLoweredMove
+ OpWasmLoweredZero
+ OpWasmLoweredGetClosurePtr
+ OpWasmLoweredGetCallerPC
+ OpWasmLoweredGetCallerSP
+ OpWasmLoweredNilCheck
+ OpWasmLoweredWB
+ OpWasmLoweredConvert
+ OpWasmSelect
+ OpWasmI64Load8U
+ OpWasmI64Load8S
+ OpWasmI64Load16U
+ OpWasmI64Load16S
+ OpWasmI64Load32U
+ OpWasmI64Load32S
+ OpWasmI64Load
+ OpWasmI64Store8
+ OpWasmI64Store16
+ OpWasmI64Store32
+ OpWasmI64Store
+ OpWasmF32Load
+ OpWasmF64Load
+ OpWasmF32Store
+ OpWasmF64Store
+ OpWasmI64Const
+ OpWasmF32Const
+ OpWasmF64Const
+ OpWasmI64Eqz
+ OpWasmI64Eq
+ OpWasmI64Ne
+ OpWasmI64LtS
+ OpWasmI64LtU
+ OpWasmI64GtS
+ OpWasmI64GtU
+ OpWasmI64LeS
+ OpWasmI64LeU
+ OpWasmI64GeS
+ OpWasmI64GeU
+ OpWasmF32Eq
+ OpWasmF32Ne
+ OpWasmF32Lt
+ OpWasmF32Gt
+ OpWasmF32Le
+ OpWasmF32Ge
+ OpWasmF64Eq
+ OpWasmF64Ne
+ OpWasmF64Lt
+ OpWasmF64Gt
+ OpWasmF64Le
+ OpWasmF64Ge
+ OpWasmI64Add
+ OpWasmI64AddConst
+ OpWasmI64Sub
+ OpWasmI64Mul
+ OpWasmI64DivS
+ OpWasmI64DivU
+ OpWasmI64RemS
+ OpWasmI64RemU
+ OpWasmI64And
+ OpWasmI64Or
+ OpWasmI64Xor
+ OpWasmI64Shl
+ OpWasmI64ShrS
+ OpWasmI64ShrU
+ OpWasmF32Neg
+ OpWasmF32Add
+ OpWasmF32Sub
+ OpWasmF32Mul
+ OpWasmF32Div
+ OpWasmF64Neg
+ OpWasmF64Add
+ OpWasmF64Sub
+ OpWasmF64Mul
+ OpWasmF64Div
+ OpWasmI64TruncSatF64S
+ OpWasmI64TruncSatF64U
+ OpWasmI64TruncSatF32S
+ OpWasmI64TruncSatF32U
+ OpWasmF32ConvertI64S
+ OpWasmF32ConvertI64U
+ OpWasmF64ConvertI64S
+ OpWasmF64ConvertI64U
+ OpWasmF32DemoteF64
+ OpWasmF64PromoteF32
+ OpWasmI64Extend8S
+ OpWasmI64Extend16S
+ OpWasmI64Extend32S
+ OpWasmF32Sqrt
+ OpWasmF32Trunc
+ OpWasmF32Ceil
+ OpWasmF32Floor
+ OpWasmF32Nearest
+ OpWasmF32Abs
+ OpWasmF32Copysign
+ OpWasmF64Sqrt
+ OpWasmF64Trunc
+ OpWasmF64Ceil
+ OpWasmF64Floor
+ OpWasmF64Nearest
+ OpWasmF64Abs
+ OpWasmF64Copysign
+ OpWasmI64Ctz
+ OpWasmI64Clz
+ OpWasmI32Rotl
+ OpWasmI64Rotl
+ OpWasmI64Popcnt
+
+ OpAdd8
+ OpAdd16
+ OpAdd32
+ OpAdd64
+ OpAddPtr
+ OpAdd32F
+ OpAdd64F
+ OpSub8
+ OpSub16
+ OpSub32
+ OpSub64
+ OpSubPtr
+ OpSub32F
+ OpSub64F
+ OpMul8
+ OpMul16
+ OpMul32
+ OpMul64
+ OpMul32F
+ OpMul64F
+ OpDiv32F
+ OpDiv64F
+ OpHmul32
+ OpHmul32u
+ OpHmul64
+ OpHmul64u
+ OpMul32uhilo
+ OpMul64uhilo
+ OpMul32uover
+ OpMul64uover
+ OpAvg32u
+ OpAvg64u
+ OpDiv8
+ OpDiv8u
+ OpDiv16
+ OpDiv16u
+ OpDiv32
+ OpDiv32u
+ OpDiv64
+ OpDiv64u
+ OpDiv128u
+ OpMod8
+ OpMod8u
+ OpMod16
+ OpMod16u
+ OpMod32
+ OpMod32u
+ OpMod64
+ OpMod64u
+ OpAnd8
+ OpAnd16
+ OpAnd32
+ OpAnd64
+ OpOr8
+ OpOr16
+ OpOr32
+ OpOr64
+ OpXor8
+ OpXor16
+ OpXor32
+ OpXor64
+ OpLsh8x8
+ OpLsh8x16
+ OpLsh8x32
+ OpLsh8x64
+ OpLsh16x8
+ OpLsh16x16
+ OpLsh16x32
+ OpLsh16x64
+ OpLsh32x8
+ OpLsh32x16
+ OpLsh32x32
+ OpLsh32x64
+ OpLsh64x8
+ OpLsh64x16
+ OpLsh64x32
+ OpLsh64x64
+ OpRsh8x8
+ OpRsh8x16
+ OpRsh8x32
+ OpRsh8x64
+ OpRsh16x8
+ OpRsh16x16
+ OpRsh16x32
+ OpRsh16x64
+ OpRsh32x8
+ OpRsh32x16
+ OpRsh32x32
+ OpRsh32x64
+ OpRsh64x8
+ OpRsh64x16
+ OpRsh64x32
+ OpRsh64x64
+ OpRsh8Ux8
+ OpRsh8Ux16
+ OpRsh8Ux32
+ OpRsh8Ux64
+ OpRsh16Ux8
+ OpRsh16Ux16
+ OpRsh16Ux32
+ OpRsh16Ux64
+ OpRsh32Ux8
+ OpRsh32Ux16
+ OpRsh32Ux32
+ OpRsh32Ux64
+ OpRsh64Ux8
+ OpRsh64Ux16
+ OpRsh64Ux32
+ OpRsh64Ux64
+ OpEq8
+ OpEq16
+ OpEq32
+ OpEq64
+ OpEqPtr
+ OpEqInter
+ OpEqSlice
+ OpEq32F
+ OpEq64F
+ OpNeq8
+ OpNeq16
+ OpNeq32
+ OpNeq64
+ OpNeqPtr
+ OpNeqInter
+ OpNeqSlice
+ OpNeq32F
+ OpNeq64F
+ OpLess8
+ OpLess8U
+ OpLess16
+ OpLess16U
+ OpLess32
+ OpLess32U
+ OpLess64
+ OpLess64U
+ OpLess32F
+ OpLess64F
+ OpLeq8
+ OpLeq8U
+ OpLeq16
+ OpLeq16U
+ OpLeq32
+ OpLeq32U
+ OpLeq64
+ OpLeq64U
+ OpLeq32F
+ OpLeq64F
+ OpCondSelect
+ OpAndB
+ OpOrB
+ OpEqB
+ OpNeqB
+ OpNot
+ OpNeg8
+ OpNeg16
+ OpNeg32
+ OpNeg64
+ OpNeg32F
+ OpNeg64F
+ OpCom8
+ OpCom16
+ OpCom32
+ OpCom64
+ OpCtz8
+ OpCtz16
+ OpCtz32
+ OpCtz64
+ OpCtz8NonZero
+ OpCtz16NonZero
+ OpCtz32NonZero
+ OpCtz64NonZero
+ OpBitLen8
+ OpBitLen16
+ OpBitLen32
+ OpBitLen64
+ OpBswap32
+ OpBswap64
+ OpBitRev8
+ OpBitRev16
+ OpBitRev32
+ OpBitRev64
+ OpPopCount8
+ OpPopCount16
+ OpPopCount32
+ OpPopCount64
+ OpRotateLeft8
+ OpRotateLeft16
+ OpRotateLeft32
+ OpRotateLeft64
+ OpSqrt
+ OpSqrt32
+ OpFloor
+ OpCeil
+ OpTrunc
+ OpRound
+ OpRoundToEven
+ OpAbs
+ OpCopysign
+ OpFMA
+ OpPhi
+ OpCopy
+ OpConvert
+ OpConstBool
+ OpConstString
+ OpConstNil
+ OpConst8
+ OpConst16
+ OpConst32
+ OpConst64
+ OpConst32F
+ OpConst64F
+ OpConstInterface
+ OpConstSlice
+ OpInitMem
+ OpArg
+ OpArgIntReg
+ OpArgFloatReg
+ OpAddr
+ OpLocalAddr
+ OpSP
+ OpSB
+ OpLoad
+ OpDereference
+ OpStore
+ OpMove
+ OpZero
+ OpStoreWB
+ OpMoveWB
+ OpZeroWB
+ OpWB
+ OpHasCPUFeature
+ OpPanicBounds
+ OpPanicExtend
+ OpClosureCall
+ OpStaticCall
+ OpInterCall
+ OpTailCall
+ OpClosureLECall
+ OpStaticLECall
+ OpInterLECall
+ OpTailLECall
+ OpSignExt8to16
+ OpSignExt8to32
+ OpSignExt8to64
+ OpSignExt16to32
+ OpSignExt16to64
+ OpSignExt32to64
+ OpZeroExt8to16
+ OpZeroExt8to32
+ OpZeroExt8to64
+ OpZeroExt16to32
+ OpZeroExt16to64
+ OpZeroExt32to64
+ OpTrunc16to8
+ OpTrunc32to8
+ OpTrunc32to16
+ OpTrunc64to8
+ OpTrunc64to16
+ OpTrunc64to32
+ OpCvt32to32F
+ OpCvt32to64F
+ OpCvt64to32F
+ OpCvt64to64F
+ OpCvt32Fto32
+ OpCvt32Fto64
+ OpCvt64Fto32
+ OpCvt64Fto64
+ OpCvt32Fto64F
+ OpCvt64Fto32F
+ OpCvtBoolToUint8
+ OpRound32F
+ OpRound64F
+ OpIsNonNil
+ OpIsInBounds
+ OpIsSliceInBounds
+ OpNilCheck
+ OpGetG
+ OpGetClosurePtr
+ OpGetCallerPC
+ OpGetCallerSP
+ OpPtrIndex
+ OpOffPtr
+ OpSliceMake
+ OpSlicePtr
+ OpSliceLen
+ OpSliceCap
+ OpSlicePtrUnchecked
+ OpComplexMake
+ OpComplexReal
+ OpComplexImag
+ OpStringMake
+ OpStringPtr
+ OpStringLen
+ OpIMake
+ OpITab
+ OpIData
+ OpStructMake0
+ OpStructMake1
+ OpStructMake2
+ OpStructMake3
+ OpStructMake4
+ OpStructSelect
+ OpArrayMake0
+ OpArrayMake1
+ OpArraySelect
+ OpStoreReg
+ OpLoadReg
+ OpFwdRef
+ OpUnknown
+ OpVarDef
+ OpVarKill
+ OpVarLive
+ OpKeepAlive
+ OpInlMark
+ OpInt64Make
+ OpInt64Hi
+ OpInt64Lo
+ OpAdd32carry
+ OpAdd32withcarry
+ OpSub32carry
+ OpSub32withcarry
+ OpAdd64carry
+ OpSub64borrow
+ OpSignmask
+ OpZeromask
+ OpSlicemask
+ OpSpectreIndex
+ OpSpectreSliceIndex
+ OpCvt32Uto32F
+ OpCvt32Uto64F
+ OpCvt32Fto32U
+ OpCvt64Fto32U
+ OpCvt64Uto32F
+ OpCvt64Uto64F
+ OpCvt32Fto64U
+ OpCvt64Fto64U
+ OpSelect0
+ OpSelect1
+ OpSelectN
+ OpSelectNAddr
+ OpMakeResult
+ OpAtomicLoad8
+ OpAtomicLoad32
+ OpAtomicLoad64
+ OpAtomicLoadPtr
+ OpAtomicLoadAcq32
+ OpAtomicLoadAcq64
+ OpAtomicStore8
+ OpAtomicStore32
+ OpAtomicStore64
+ OpAtomicStorePtrNoWB
+ OpAtomicStoreRel32
+ OpAtomicStoreRel64
+ OpAtomicExchange32
+ OpAtomicExchange64
+ OpAtomicAdd32
+ OpAtomicAdd64
+ OpAtomicCompareAndSwap32
+ OpAtomicCompareAndSwap64
+ OpAtomicCompareAndSwapRel32
+ OpAtomicAnd8
+ OpAtomicAnd32
+ OpAtomicOr8
+ OpAtomicOr32
+ OpAtomicAdd32Variant
+ OpAtomicAdd64Variant
+ OpAtomicExchange32Variant
+ OpAtomicExchange64Variant
+ OpAtomicCompareAndSwap32Variant
+ OpAtomicCompareAndSwap64Variant
+ OpAtomicAnd8Variant
+ OpAtomicAnd32Variant
+ OpAtomicOr8Variant
+ OpAtomicOr32Variant
+ OpPubBarrier
+ OpClobber
+ OpClobberReg
+ OpPrefetchCache
+ OpPrefetchCacheStreamed
+)
+
+var opcodeTable = [...]opInfo{
+ {name: "OpInvalid"},
+
+ {
+ name: "ADDSS",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ asm: x86.AADDSS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ {1, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "ADDSD",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ asm: x86.AADDSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ {1, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "SUBSS",
+ argLen: 2,
+ resultInArg0: true,
+ asm: x86.ASUBSS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ {1, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "SUBSD",
+ argLen: 2,
+ resultInArg0: true,
+ asm: x86.ASUBSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ {1, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "MULSS",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ asm: x86.AMULSS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ {1, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "MULSD",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ asm: x86.AMULSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ {1, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "DIVSS",
+ argLen: 2,
+ resultInArg0: true,
+ asm: x86.ADIVSS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ {1, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "DIVSD",
+ argLen: 2,
+ resultInArg0: true,
+ asm: x86.ADIVSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ {1, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "MOVSSload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.AMOVSS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "MOVSDload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.AMOVSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "MOVSSconst",
+ auxType: auxFloat32,
+ argLen: 0,
+ rematerializeable: true,
+ asm: x86.AMOVSS,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "MOVSDconst",
+ auxType: auxFloat64,
+ argLen: 0,
+ rematerializeable: true,
+ asm: x86.AMOVSD,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "MOVSSloadidx1",
+ auxType: auxSymOff,
+ argLen: 3,
+ symEffect: SymRead,
+ asm: x86.AMOVSS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "MOVSSloadidx4",
+ auxType: auxSymOff,
+ argLen: 3,
+ symEffect: SymRead,
+ asm: x86.AMOVSS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "MOVSDloadidx1",
+ auxType: auxSymOff,
+ argLen: 3,
+ symEffect: SymRead,
+ asm: x86.AMOVSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "MOVSDloadidx8",
+ auxType: auxSymOff,
+ argLen: 3,
+ symEffect: SymRead,
+ asm: x86.AMOVSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "MOVSSstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: x86.AMOVSS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "MOVSDstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: x86.AMOVSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "MOVSSstoreidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ symEffect: SymWrite,
+ asm: x86.AMOVSS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {2, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "MOVSSstoreidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ symEffect: SymWrite,
+ asm: x86.AMOVSS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {2, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "MOVSDstoreidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ symEffect: SymWrite,
+ asm: x86.AMOVSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {2, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "MOVSDstoreidx8",
+ auxType: auxSymOff,
+ argLen: 4,
+ symEffect: SymWrite,
+ asm: x86.AMOVSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {2, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "ADDSSload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: x86.AADDSS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ {1, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "ADDSDload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: x86.AADDSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ {1, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "SUBSSload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: x86.ASUBSS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ {1, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "SUBSDload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: x86.ASUBSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ {1, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "MULSSload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: x86.AMULSS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ {1, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "MULSDload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: x86.AMULSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ {1, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "DIVSSload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: x86.ADIVSS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ {1, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "DIVSDload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: x86.ADIVSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ {1, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "ADDL",
+ argLen: 2,
+ commutative: true,
+ clobberFlags: true,
+ asm: x86.AADDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 239}, // AX CX DX BX BP SI DI
+ {0, 255}, // AX CX DX BX SP BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "ADDLconst",
+ auxType: auxInt32,
+ argLen: 1,
+ clobberFlags: true,
+ asm: x86.AADDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 255}, // AX CX DX BX SP BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "ADDLcarry",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ asm: x86.AADDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ {1, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "ADDLconstcarry",
+ auxType: auxInt32,
+ argLen: 1,
+ resultInArg0: true,
+ asm: x86.AADDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "ADCL",
+ argLen: 3,
+ commutative: true,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AADCL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ {1, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "ADCLconst",
+ auxType: auxInt32,
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AADCL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SUBL",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASUBL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ {1, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SUBLconst",
+ auxType: auxInt32,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASUBL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SUBLcarry",
+ argLen: 2,
+ resultInArg0: true,
+ asm: x86.ASUBL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ {1, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SUBLconstcarry",
+ auxType: auxInt32,
+ argLen: 1,
+ resultInArg0: true,
+ asm: x86.ASUBL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SBBL",
+ argLen: 3,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASBBL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ {1, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SBBLconst",
+ auxType: auxInt32,
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASBBL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "MULL",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AIMULL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ {1, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "MULLconst",
+ auxType: auxInt32,
+ argLen: 1,
+ clobberFlags: true,
+ asm: x86.AIMUL3L,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "MULLU",
+ argLen: 2,
+ commutative: true,
+ clobberFlags: true,
+ asm: x86.AMULL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // AX
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ },
+ clobbers: 4, // DX
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 1}, // AX
+ },
+ },
+ },
+ {
+ name: "HMULL",
+ argLen: 2,
+ commutative: true,
+ clobberFlags: true,
+ asm: x86.AIMULL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // AX
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ },
+ clobbers: 1, // AX
+ outputs: []outputInfo{
+ {0, 4}, // DX
+ },
+ },
+ },
+ {
+ name: "HMULLU",
+ argLen: 2,
+ commutative: true,
+ clobberFlags: true,
+ asm: x86.AMULL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // AX
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ },
+ clobbers: 1, // AX
+ outputs: []outputInfo{
+ {0, 4}, // DX
+ },
+ },
+ },
+ {
+ name: "MULLQU",
+ argLen: 2,
+ commutative: true,
+ clobberFlags: true,
+ asm: x86.AMULL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // AX
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 4}, // DX
+ {1, 1}, // AX
+ },
+ },
+ },
+ {
+ name: "AVGLU",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ clobberFlags: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ {1, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "DIVL",
+ auxType: auxBool,
+ argLen: 2,
+ clobberFlags: true,
+ asm: x86.AIDIVL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // AX
+ {1, 251}, // AX CX BX SP BP SI DI
+ },
+ clobbers: 4, // DX
+ outputs: []outputInfo{
+ {0, 1}, // AX
+ },
+ },
+ },
+ {
+ name: "DIVW",
+ auxType: auxBool,
+ argLen: 2,
+ clobberFlags: true,
+ asm: x86.AIDIVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // AX
+ {1, 251}, // AX CX BX SP BP SI DI
+ },
+ clobbers: 4, // DX
+ outputs: []outputInfo{
+ {0, 1}, // AX
+ },
+ },
+ },
+ {
+ name: "DIVLU",
+ argLen: 2,
+ clobberFlags: true,
+ asm: x86.ADIVL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // AX
+ {1, 251}, // AX CX BX SP BP SI DI
+ },
+ clobbers: 4, // DX
+ outputs: []outputInfo{
+ {0, 1}, // AX
+ },
+ },
+ },
+ {
+ name: "DIVWU",
+ argLen: 2,
+ clobberFlags: true,
+ asm: x86.ADIVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // AX
+ {1, 251}, // AX CX BX SP BP SI DI
+ },
+ clobbers: 4, // DX
+ outputs: []outputInfo{
+ {0, 1}, // AX
+ },
+ },
+ },
+ {
+ name: "MODL",
+ auxType: auxBool,
+ argLen: 2,
+ clobberFlags: true,
+ asm: x86.AIDIVL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // AX
+ {1, 251}, // AX CX BX SP BP SI DI
+ },
+ clobbers: 1, // AX
+ outputs: []outputInfo{
+ {0, 4}, // DX
+ },
+ },
+ },
+ {
+ name: "MODW",
+ auxType: auxBool,
+ argLen: 2,
+ clobberFlags: true,
+ asm: x86.AIDIVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // AX
+ {1, 251}, // AX CX BX SP BP SI DI
+ },
+ clobbers: 1, // AX
+ outputs: []outputInfo{
+ {0, 4}, // DX
+ },
+ },
+ },
+ {
+ name: "MODLU",
+ argLen: 2,
+ clobberFlags: true,
+ asm: x86.ADIVL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // AX
+ {1, 251}, // AX CX BX SP BP SI DI
+ },
+ clobbers: 1, // AX
+ outputs: []outputInfo{
+ {0, 4}, // DX
+ },
+ },
+ },
+ {
+ name: "MODWU",
+ argLen: 2,
+ clobberFlags: true,
+ asm: x86.ADIVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // AX
+ {1, 251}, // AX CX BX SP BP SI DI
+ },
+ clobbers: 1, // AX
+ outputs: []outputInfo{
+ {0, 4}, // DX
+ },
+ },
+ },
+ {
+ name: "ANDL",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AANDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ {1, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "ANDLconst",
+ auxType: auxInt32,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AANDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "ORL",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AORL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ {1, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "ORLconst",
+ auxType: auxInt32,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AORL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "XORL",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AXORL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ {1, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "XORLconst",
+ auxType: auxInt32,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AXORL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "CMPL",
+ argLen: 2,
+ asm: x86.ACMPL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 255}, // AX CX DX BX SP BP SI DI
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ },
+ },
+ },
+ {
+ name: "CMPW",
+ argLen: 2,
+ asm: x86.ACMPW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 255}, // AX CX DX BX SP BP SI DI
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ },
+ },
+ },
+ {
+ name: "CMPB",
+ argLen: 2,
+ asm: x86.ACMPB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 255}, // AX CX DX BX SP BP SI DI
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ },
+ },
+ },
+ {
+ name: "CMPLconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: x86.ACMPL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 255}, // AX CX DX BX SP BP SI DI
+ },
+ },
+ },
+ {
+ name: "CMPWconst",
+ auxType: auxInt16,
+ argLen: 1,
+ asm: x86.ACMPW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 255}, // AX CX DX BX SP BP SI DI
+ },
+ },
+ },
+ {
+ name: "CMPBconst",
+ auxType: auxInt8,
+ argLen: 1,
+ asm: x86.ACMPB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 255}, // AX CX DX BX SP BP SI DI
+ },
+ },
+ },
+ {
+ name: "CMPLload",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.ACMPL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "CMPWload",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.ACMPW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "CMPBload",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.ACMPB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "CMPLconstload",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.ACMPL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "CMPWconstload",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.ACMPW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "CMPBconstload",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.ACMPB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "UCOMISS",
+ argLen: 2,
+ asm: x86.AUCOMISS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ {1, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "UCOMISD",
+ argLen: 2,
+ asm: x86.AUCOMISD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ {1, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "TESTL",
+ argLen: 2,
+ commutative: true,
+ asm: x86.ATESTL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 255}, // AX CX DX BX SP BP SI DI
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ },
+ },
+ },
+ {
+ name: "TESTW",
+ argLen: 2,
+ commutative: true,
+ asm: x86.ATESTW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 255}, // AX CX DX BX SP BP SI DI
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ },
+ },
+ },
+ {
+ name: "TESTB",
+ argLen: 2,
+ commutative: true,
+ asm: x86.ATESTB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 255}, // AX CX DX BX SP BP SI DI
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ },
+ },
+ },
+ {
+ name: "TESTLconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: x86.ATESTL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 255}, // AX CX DX BX SP BP SI DI
+ },
+ },
+ },
+ {
+ name: "TESTWconst",
+ auxType: auxInt16,
+ argLen: 1,
+ asm: x86.ATESTW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 255}, // AX CX DX BX SP BP SI DI
+ },
+ },
+ },
+ {
+ name: "TESTBconst",
+ auxType: auxInt8,
+ argLen: 1,
+ asm: x86.ATESTB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 255}, // AX CX DX BX SP BP SI DI
+ },
+ },
+ },
+ {
+ name: "SHLL",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASHLL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2}, // CX
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SHLLconst",
+ auxType: auxInt32,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASHLL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SHRL",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASHRL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2}, // CX
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SHRW",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASHRW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2}, // CX
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SHRB",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASHRB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2}, // CX
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SHRLconst",
+ auxType: auxInt32,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASHRL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SHRWconst",
+ auxType: auxInt16,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASHRW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SHRBconst",
+ auxType: auxInt8,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASHRB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SARL",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASARL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2}, // CX
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SARW",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASARW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2}, // CX
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SARB",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASARB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2}, // CX
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SARLconst",
+ auxType: auxInt32,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASARL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SARWconst",
+ auxType: auxInt16,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASARW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SARBconst",
+ auxType: auxInt8,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASARB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "ROLLconst",
+ auxType: auxInt32,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AROLL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "ROLWconst",
+ auxType: auxInt16,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AROLW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "ROLBconst",
+ auxType: auxInt8,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AROLB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "ADDLload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ clobberFlags: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: x86.AADDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ {1, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SUBLload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ clobberFlags: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: x86.ASUBL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ {1, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "MULLload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ clobberFlags: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: x86.AIMULL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ {1, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "ANDLload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ clobberFlags: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: x86.AANDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ {1, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "ORLload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ clobberFlags: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: x86.AORL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ {1, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "XORLload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ clobberFlags: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: x86.AXORL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ {1, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "ADDLloadidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ clobberFlags: true,
+ symEffect: SymRead,
+ asm: x86.AADDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ {2, 255}, // AX CX DX BX SP BP SI DI
+ {1, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SUBLloadidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ clobberFlags: true,
+ symEffect: SymRead,
+ asm: x86.ASUBL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ {2, 255}, // AX CX DX BX SP BP SI DI
+ {1, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "MULLloadidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ clobberFlags: true,
+ symEffect: SymRead,
+ asm: x86.AIMULL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ {2, 255}, // AX CX DX BX SP BP SI DI
+ {1, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "ANDLloadidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ clobberFlags: true,
+ symEffect: SymRead,
+ asm: x86.AANDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ {2, 255}, // AX CX DX BX SP BP SI DI
+ {1, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "ORLloadidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ clobberFlags: true,
+ symEffect: SymRead,
+ asm: x86.AORL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ {2, 255}, // AX CX DX BX SP BP SI DI
+ {1, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "XORLloadidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ clobberFlags: true,
+ symEffect: SymRead,
+ asm: x86.AXORL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ {2, 255}, // AX CX DX BX SP BP SI DI
+ {1, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "NEGL",
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ANEGL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "NOTL",
+ argLen: 1,
+ resultInArg0: true,
+ asm: x86.ANOTL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "BSFL",
+ argLen: 1,
+ clobberFlags: true,
+ asm: x86.ABSFL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "BSFW",
+ argLen: 1,
+ clobberFlags: true,
+ asm: x86.ABSFW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "BSRL",
+ argLen: 1,
+ clobberFlags: true,
+ asm: x86.ABSRL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "BSRW",
+ argLen: 1,
+ clobberFlags: true,
+ asm: x86.ABSRW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "BSWAPL",
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ABSWAPL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SQRTSD",
+ argLen: 1,
+ asm: x86.ASQRTSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "SQRTSS",
+ argLen: 1,
+ asm: x86.ASQRTSS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "SBBLcarrymask",
+ argLen: 1,
+ asm: x86.ASBBL,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SETEQ",
+ argLen: 1,
+ asm: x86.ASETEQ,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SETNE",
+ argLen: 1,
+ asm: x86.ASETNE,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SETL",
+ argLen: 1,
+ asm: x86.ASETLT,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SETLE",
+ argLen: 1,
+ asm: x86.ASETLE,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SETG",
+ argLen: 1,
+ asm: x86.ASETGT,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SETGE",
+ argLen: 1,
+ asm: x86.ASETGE,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SETB",
+ argLen: 1,
+ asm: x86.ASETCS,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SETBE",
+ argLen: 1,
+ asm: x86.ASETLS,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SETA",
+ argLen: 1,
+ asm: x86.ASETHI,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SETAE",
+ argLen: 1,
+ asm: x86.ASETCC,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SETO",
+ argLen: 1,
+ asm: x86.ASETOS,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SETEQF",
+ argLen: 1,
+ clobberFlags: true,
+ asm: x86.ASETEQ,
+ reg: regInfo{
+ clobbers: 1, // AX
+ outputs: []outputInfo{
+ {0, 238}, // CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SETNEF",
+ argLen: 1,
+ clobberFlags: true,
+ asm: x86.ASETNE,
+ reg: regInfo{
+ clobbers: 1, // AX
+ outputs: []outputInfo{
+ {0, 238}, // CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SETORD",
+ argLen: 1,
+ asm: x86.ASETPC,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SETNAN",
+ argLen: 1,
+ asm: x86.ASETPS,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SETGF",
+ argLen: 1,
+ asm: x86.ASETHI,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "SETGEF",
+ argLen: 1,
+ asm: x86.ASETCC,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "MOVBLSX",
+ argLen: 1,
+ asm: x86.AMOVBLSX,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "MOVBLZX",
+ argLen: 1,
+ asm: x86.AMOVBLZX,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "MOVWLSX",
+ argLen: 1,
+ asm: x86.AMOVWLSX,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "MOVWLZX",
+ argLen: 1,
+ asm: x86.AMOVWLZX,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "MOVLconst",
+ auxType: auxInt32,
+ argLen: 0,
+ rematerializeable: true,
+ asm: x86.AMOVL,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "CVTTSD2SL",
+ argLen: 1,
+ asm: x86.ACVTTSD2SL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "CVTTSS2SL",
+ argLen: 1,
+ asm: x86.ACVTTSS2SL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "CVTSL2SS",
+ argLen: 1,
+ asm: x86.ACVTSL2SS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "CVTSL2SD",
+ argLen: 1,
+ asm: x86.ACVTSL2SD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "CVTSD2SS",
+ argLen: 1,
+ asm: x86.ACVTSD2SS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "CVTSS2SD",
+ argLen: 1,
+ asm: x86.ACVTSS2SD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "PXOR",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ asm: x86.APXOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ {1, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "LEAL",
+ auxType: auxSymOff,
+ argLen: 1,
+ rematerializeable: true,
+ symEffect: SymAddr,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "LEAL1",
+ auxType: auxSymOff,
+ argLen: 2,
+ commutative: true,
+ symEffect: SymAddr,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "LEAL2",
+ auxType: auxSymOff,
+ argLen: 2,
+ symEffect: SymAddr,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "LEAL4",
+ auxType: auxSymOff,
+ argLen: 2,
+ symEffect: SymAddr,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "LEAL8",
+ auxType: auxSymOff,
+ argLen: 2,
+ symEffect: SymAddr,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "MOVBload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.AMOVBLZX,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "MOVBLSXload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.AMOVBLSX,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "MOVWload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.AMOVWLZX,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "MOVWLSXload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.AMOVWLSX,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "MOVLload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.AMOVL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "MOVBstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: x86.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "MOVWstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: x86.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "MOVLstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: x86.AMOVL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "ADDLmodify",
+ auxType: auxSymOff,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AADDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "SUBLmodify",
+ auxType: auxSymOff,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.ASUBL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "ANDLmodify",
+ auxType: auxSymOff,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AANDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "ORLmodify",
+ auxType: auxSymOff,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AORL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "XORLmodify",
+ auxType: auxSymOff,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AXORL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "ADDLmodifyidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AADDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {2, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "SUBLmodifyidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.ASUBL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {2, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "ANDLmodifyidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AANDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {2, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "ORLmodifyidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AORL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {2, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "XORLmodifyidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AXORL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {2, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "ADDLconstmodify",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AADDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "ANDLconstmodify",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AANDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "ORLconstmodify",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AORL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "XORLconstmodify",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AXORL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "ADDLconstmodifyidx4",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AADDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "ANDLconstmodifyidx4",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AANDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "ORLconstmodifyidx4",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AORL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "XORLconstmodifyidx4",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AXORL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "MOVBloadidx1",
+ auxType: auxSymOff,
+ argLen: 3,
+ commutative: true,
+ symEffect: SymRead,
+ asm: x86.AMOVBLZX,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "MOVWloadidx1",
+ auxType: auxSymOff,
+ argLen: 3,
+ commutative: true,
+ symEffect: SymRead,
+ asm: x86.AMOVWLZX,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "MOVWloadidx2",
+ auxType: auxSymOff,
+ argLen: 3,
+ symEffect: SymRead,
+ asm: x86.AMOVWLZX,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "MOVLloadidx1",
+ auxType: auxSymOff,
+ argLen: 3,
+ commutative: true,
+ symEffect: SymRead,
+ asm: x86.AMOVL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "MOVLloadidx4",
+ auxType: auxSymOff,
+ argLen: 3,
+ symEffect: SymRead,
+ asm: x86.AMOVL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "MOVBstoreidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ commutative: true,
+ symEffect: SymWrite,
+ asm: x86.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {2, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "MOVWstoreidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ commutative: true,
+ symEffect: SymWrite,
+ asm: x86.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {2, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "MOVWstoreidx2",
+ auxType: auxSymOff,
+ argLen: 4,
+ symEffect: SymWrite,
+ asm: x86.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {2, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "MOVLstoreidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ commutative: true,
+ symEffect: SymWrite,
+ asm: x86.AMOVL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {2, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "MOVLstoreidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ symEffect: SymWrite,
+ asm: x86.AMOVL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {2, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "MOVBstoreconst",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: x86.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "MOVWstoreconst",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: x86.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "MOVLstoreconst",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: x86.AMOVL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "MOVBstoreconstidx1",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ symEffect: SymWrite,
+ asm: x86.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "MOVWstoreconstidx1",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ symEffect: SymWrite,
+ asm: x86.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "MOVWstoreconstidx2",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ symEffect: SymWrite,
+ asm: x86.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "MOVLstoreconstidx1",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ symEffect: SymWrite,
+ asm: x86.AMOVL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "MOVLstoreconstidx4",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ symEffect: SymWrite,
+ asm: x86.AMOVL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 255}, // AX CX DX BX SP BP SI DI
+ {0, 65791}, // AX CX DX BX SP BP SI DI SB
+ },
+ },
+ },
+ {
+ name: "DUFFZERO",
+ auxType: auxInt64,
+ argLen: 3,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 128}, // DI
+ {1, 1}, // AX
+ },
+ clobbers: 130, // CX DI
+ },
+ },
+ {
+ name: "REPSTOSL",
+ argLen: 4,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 128}, // DI
+ {1, 2}, // CX
+ {2, 1}, // AX
+ },
+ clobbers: 130, // CX DI
+ },
+ },
+ {
+ name: "CALLstatic",
+ auxType: auxCallOff,
+ argLen: 1,
+ clobberFlags: true,
+ call: true,
+ reg: regInfo{
+ clobbers: 65519, // AX CX DX BX BP SI DI X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ {
+ name: "CALLtail",
+ auxType: auxCallOff,
+ argLen: 1,
+ clobberFlags: true,
+ call: true,
+ tailCall: true,
+ reg: regInfo{
+ clobbers: 65519, // AX CX DX BX BP SI DI X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ {
+ name: "CALLclosure",
+ auxType: auxCallOff,
+ argLen: 3,
+ clobberFlags: true,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 4}, // DX
+ {0, 255}, // AX CX DX BX SP BP SI DI
+ },
+ clobbers: 65519, // AX CX DX BX BP SI DI X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ {
+ name: "CALLinter",
+ auxType: auxCallOff,
+ argLen: 2,
+ clobberFlags: true,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ clobbers: 65519, // AX CX DX BX BP SI DI X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ {
+ name: "DUFFCOPY",
+ auxType: auxInt64,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 128}, // DI
+ {1, 64}, // SI
+ },
+ clobbers: 194, // CX SI DI
+ },
+ },
+ {
+ name: "REPMOVSL",
+ argLen: 4,
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 128}, // DI
+ {1, 64}, // SI
+ {2, 2}, // CX
+ },
+ clobbers: 194, // CX SI DI
+ },
+ },
+ {
+ name: "InvertFlags",
+ argLen: 1,
+ reg: regInfo{},
+ },
+ {
+ name: "LoweredGetG",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "LoweredGetClosurePtr",
+ argLen: 0,
+ zeroWidth: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 4}, // DX
+ },
+ },
+ },
+ {
+ name: "LoweredGetCallerPC",
+ argLen: 0,
+ rematerializeable: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "LoweredGetCallerSP",
+ argLen: 0,
+ rematerializeable: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "LoweredNilCheck",
+ argLen: 2,
+ clobberFlags: true,
+ nilCheck: true,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 255}, // AX CX DX BX SP BP SI DI
+ },
+ },
+ },
+ {
+ name: "LoweredWB",
+ auxType: auxSym,
+ argLen: 3,
+ clobberFlags: true,
+ symEffect: SymNone,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 128}, // DI
+ {1, 1}, // AX
+ },
+ clobbers: 65280, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ {
+ name: "LoweredPanicBoundsA",
+ auxType: auxInt64,
+ argLen: 3,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4}, // DX
+ {1, 8}, // BX
+ },
+ },
+ },
+ {
+ name: "LoweredPanicBoundsB",
+ auxType: auxInt64,
+ argLen: 3,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2}, // CX
+ {1, 4}, // DX
+ },
+ },
+ },
+ {
+ name: "LoweredPanicBoundsC",
+ auxType: auxInt64,
+ argLen: 3,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // AX
+ {1, 2}, // CX
+ },
+ },
+ },
+ {
+ name: "LoweredPanicExtendA",
+ auxType: auxInt64,
+ argLen: 4,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 64}, // SI
+ {1, 4}, // DX
+ {2, 8}, // BX
+ },
+ },
+ },
+ {
+ name: "LoweredPanicExtendB",
+ auxType: auxInt64,
+ argLen: 4,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 64}, // SI
+ {1, 2}, // CX
+ {2, 4}, // DX
+ },
+ },
+ },
+ {
+ name: "LoweredPanicExtendC",
+ auxType: auxInt64,
+ argLen: 4,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 64}, // SI
+ {1, 1}, // AX
+ {2, 2}, // CX
+ },
+ },
+ },
+ {
+ name: "FlagEQ",
+ argLen: 0,
+ reg: regInfo{},
+ },
+ {
+ name: "FlagLT_ULT",
+ argLen: 0,
+ reg: regInfo{},
+ },
+ {
+ name: "FlagLT_UGT",
+ argLen: 0,
+ reg: regInfo{},
+ },
+ {
+ name: "FlagGT_UGT",
+ argLen: 0,
+ reg: regInfo{},
+ },
+ {
+ name: "FlagGT_ULT",
+ argLen: 0,
+ reg: regInfo{},
+ },
+ {
+ name: "MOVSSconst1",
+ auxType: auxFloat32,
+ argLen: 0,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "MOVSDconst1",
+ auxType: auxFloat64,
+ argLen: 0,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ },
+ },
+ {
+ name: "MOVSSconst2",
+ argLen: 1,
+ asm: x86.AMOVSS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+ {
+ name: "MOVSDconst2",
+ argLen: 1,
+ asm: x86.AMOVSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 239}, // AX CX DX BX BP SI DI
+ },
+ outputs: []outputInfo{
+ {0, 65280}, // X0 X1 X2 X3 X4 X5 X6 X7
+ },
+ },
+ },
+
+ {
+ name: "ADDSS",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ asm: x86.AADDSS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "ADDSD",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ asm: x86.AADDSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "SUBSS",
+ argLen: 2,
+ resultInArg0: true,
+ asm: x86.ASUBSS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "SUBSD",
+ argLen: 2,
+ resultInArg0: true,
+ asm: x86.ASUBSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "MULSS",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ asm: x86.AMULSS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "MULSD",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ asm: x86.AMULSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "DIVSS",
+ argLen: 2,
+ resultInArg0: true,
+ asm: x86.ADIVSS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "DIVSD",
+ argLen: 2,
+ resultInArg0: true,
+ asm: x86.ADIVSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "MOVSSload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.AMOVSS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "MOVSDload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.AMOVSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "MOVSSconst",
+ auxType: auxFloat32,
+ argLen: 0,
+ rematerializeable: true,
+ asm: x86.AMOVSS,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "MOVSDconst",
+ auxType: auxFloat64,
+ argLen: 0,
+ rematerializeable: true,
+ asm: x86.AMOVSD,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "MOVSSloadidx1",
+ auxType: auxSymOff,
+ argLen: 3,
+ symEffect: SymRead,
+ asm: x86.AMOVSS,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "MOVSSloadidx4",
+ auxType: auxSymOff,
+ argLen: 3,
+ symEffect: SymRead,
+ asm: x86.AMOVSS,
+ scale: 4,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "MOVSDloadidx1",
+ auxType: auxSymOff,
+ argLen: 3,
+ symEffect: SymRead,
+ asm: x86.AMOVSD,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "MOVSDloadidx8",
+ auxType: auxSymOff,
+ argLen: 3,
+ symEffect: SymRead,
+ asm: x86.AMOVSD,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "MOVSSstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: x86.AMOVSS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {0, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB
+ },
+ },
+ },
+ {
+ name: "MOVSDstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: x86.AMOVSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {0, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB
+ },
+ },
+ },
+ {
+ name: "MOVSSstoreidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ symEffect: SymWrite,
+ asm: x86.AMOVSS,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {0, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB
+ },
+ },
+ },
+ {
+ name: "MOVSSstoreidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ symEffect: SymWrite,
+ asm: x86.AMOVSS,
+ scale: 4,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {0, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB
+ },
+ },
+ },
+ {
+ name: "MOVSDstoreidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ symEffect: SymWrite,
+ asm: x86.AMOVSD,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {0, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB
+ },
+ },
+ },
+ {
+ name: "MOVSDstoreidx8",
+ auxType: auxSymOff,
+ argLen: 4,
+ symEffect: SymWrite,
+ asm: x86.AMOVSD,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {0, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB
+ },
+ },
+ },
+ {
+ name: "ADDSSload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: x86.AADDSS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "ADDSDload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: x86.AADDSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "SUBSSload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: x86.ASUBSS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "SUBSDload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: x86.ASUBSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "MULSSload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: x86.AMULSS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "MULSDload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: x86.AMULSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "DIVSSload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: x86.ADIVSS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "DIVSDload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: x86.ADIVSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "ADDSSloadidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ symEffect: SymRead,
+ asm: x86.AADDSS,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "ADDSSloadidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ symEffect: SymRead,
+ asm: x86.AADDSS,
+ scale: 4,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "ADDSDloadidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ symEffect: SymRead,
+ asm: x86.AADDSD,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "ADDSDloadidx8",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ symEffect: SymRead,
+ asm: x86.AADDSD,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "SUBSSloadidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ symEffect: SymRead,
+ asm: x86.ASUBSS,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "SUBSSloadidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ symEffect: SymRead,
+ asm: x86.ASUBSS,
+ scale: 4,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "SUBSDloadidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ symEffect: SymRead,
+ asm: x86.ASUBSD,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "SUBSDloadidx8",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ symEffect: SymRead,
+ asm: x86.ASUBSD,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "MULSSloadidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ symEffect: SymRead,
+ asm: x86.AMULSS,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "MULSSloadidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ symEffect: SymRead,
+ asm: x86.AMULSS,
+ scale: 4,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "MULSDloadidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ symEffect: SymRead,
+ asm: x86.AMULSD,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "MULSDloadidx8",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ symEffect: SymRead,
+ asm: x86.AMULSD,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "DIVSSloadidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ symEffect: SymRead,
+ asm: x86.ADIVSS,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "DIVSSloadidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ symEffect: SymRead,
+ asm: x86.ADIVSS,
+ scale: 4,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "DIVSDloadidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ symEffect: SymRead,
+ asm: x86.ADIVSD,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "DIVSDloadidx8",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ symEffect: SymRead,
+ asm: x86.ADIVSD,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "ADDQ",
+ argLen: 2,
+ commutative: true,
+ clobberFlags: true,
+ asm: x86.AADDQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "ADDL",
+ argLen: 2,
+ commutative: true,
+ clobberFlags: true,
+ asm: x86.AADDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "ADDQconst",
+ auxType: auxInt32,
+ argLen: 1,
+ clobberFlags: true,
+ asm: x86.AADDQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "ADDLconst",
+ auxType: auxInt32,
+ argLen: 1,
+ clobberFlags: true,
+ asm: x86.AADDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "ADDQconstmodify",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AADDQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "ADDLconstmodify",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AADDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "SUBQ",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASUBQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SUBL",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASUBL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SUBQconst",
+ auxType: auxInt32,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASUBQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SUBLconst",
+ auxType: auxInt32,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASUBL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "MULQ",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AIMULQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "MULL",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AIMULL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "MULQconst",
+ auxType: auxInt32,
+ argLen: 1,
+ clobberFlags: true,
+ asm: x86.AIMUL3Q,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "MULLconst",
+ auxType: auxInt32,
+ argLen: 1,
+ clobberFlags: true,
+ asm: x86.AIMUL3L,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "MULLU",
+ argLen: 2,
+ commutative: true,
+ clobberFlags: true,
+ asm: x86.AMULL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // AX
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ clobbers: 4, // DX
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 1}, // AX
+ },
+ },
+ },
+ {
+ name: "MULQU",
+ argLen: 2,
+ commutative: true,
+ clobberFlags: true,
+ asm: x86.AMULQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // AX
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ clobbers: 4, // DX
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 1}, // AX
+ },
+ },
+ },
+ {
+ name: "HMULQ",
+ argLen: 2,
+ clobberFlags: true,
+ asm: x86.AIMULQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // AX
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ clobbers: 1, // AX
+ outputs: []outputInfo{
+ {0, 4}, // DX
+ },
+ },
+ },
+ {
+ name: "HMULL",
+ argLen: 2,
+ clobberFlags: true,
+ asm: x86.AIMULL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // AX
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ clobbers: 1, // AX
+ outputs: []outputInfo{
+ {0, 4}, // DX
+ },
+ },
+ },
+ {
+ name: "HMULQU",
+ argLen: 2,
+ clobberFlags: true,
+ asm: x86.AMULQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // AX
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ clobbers: 1, // AX
+ outputs: []outputInfo{
+ {0, 4}, // DX
+ },
+ },
+ },
+ {
+ name: "HMULLU",
+ argLen: 2,
+ clobberFlags: true,
+ asm: x86.AMULL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // AX
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ clobbers: 1, // AX
+ outputs: []outputInfo{
+ {0, 4}, // DX
+ },
+ },
+ },
+ {
+ name: "AVGQU",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ clobberFlags: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "DIVQ",
+ auxType: auxBool,
+ argLen: 2,
+ clobberFlags: true,
+ asm: x86.AIDIVQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // AX
+ {1, 49147}, // AX CX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 1}, // AX
+ {1, 4}, // DX
+ },
+ },
+ },
+ {
+ name: "DIVL",
+ auxType: auxBool,
+ argLen: 2,
+ clobberFlags: true,
+ asm: x86.AIDIVL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // AX
+ {1, 49147}, // AX CX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 1}, // AX
+ {1, 4}, // DX
+ },
+ },
+ },
+ {
+ name: "DIVW",
+ auxType: auxBool,
+ argLen: 2,
+ clobberFlags: true,
+ asm: x86.AIDIVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // AX
+ {1, 49147}, // AX CX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 1}, // AX
+ {1, 4}, // DX
+ },
+ },
+ },
+ {
+ name: "DIVQU",
+ argLen: 2,
+ clobberFlags: true,
+ asm: x86.ADIVQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // AX
+ {1, 49147}, // AX CX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 1}, // AX
+ {1, 4}, // DX
+ },
+ },
+ },
+ {
+ name: "DIVLU",
+ argLen: 2,
+ clobberFlags: true,
+ asm: x86.ADIVL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // AX
+ {1, 49147}, // AX CX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 1}, // AX
+ {1, 4}, // DX
+ },
+ },
+ },
+ {
+ name: "DIVWU",
+ argLen: 2,
+ clobberFlags: true,
+ asm: x86.ADIVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // AX
+ {1, 49147}, // AX CX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 1}, // AX
+ {1, 4}, // DX
+ },
+ },
+ },
+ {
+ name: "NEGLflags",
+ argLen: 1,
+ resultInArg0: true,
+ asm: x86.ANEGL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "ADDQcarry",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ asm: x86.AADDQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "ADCQ",
+ argLen: 3,
+ commutative: true,
+ resultInArg0: true,
+ asm: x86.AADCQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "ADDQconstcarry",
+ auxType: auxInt32,
+ argLen: 1,
+ resultInArg0: true,
+ asm: x86.AADDQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "ADCQconst",
+ auxType: auxInt32,
+ argLen: 2,
+ resultInArg0: true,
+ asm: x86.AADCQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SUBQborrow",
+ argLen: 2,
+ resultInArg0: true,
+ asm: x86.ASUBQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SBBQ",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ASBBQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SUBQconstborrow",
+ auxType: auxInt32,
+ argLen: 1,
+ resultInArg0: true,
+ asm: x86.ASUBQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SBBQconst",
+ auxType: auxInt32,
+ argLen: 2,
+ resultInArg0: true,
+ asm: x86.ASBBQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "MULQU2",
+ argLen: 2,
+ commutative: true,
+ clobberFlags: true,
+ asm: x86.AMULQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // AX
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 4}, // DX
+ {1, 1}, // AX
+ },
+ },
+ },
+ {
+ name: "DIVQU2",
+ argLen: 3,
+ clobberFlags: true,
+ asm: x86.ADIVQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4}, // DX
+ {1, 1}, // AX
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 1}, // AX
+ {1, 4}, // DX
+ },
+ },
+ },
+ {
+ name: "ANDQ",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AANDQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "ANDL",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AANDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "ANDQconst",
+ auxType: auxInt32,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AANDQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "ANDLconst",
+ auxType: auxInt32,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AANDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "ANDQconstmodify",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AANDQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "ANDLconstmodify",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AANDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "ORQ",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AORQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "ORL",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AORL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "ORQconst",
+ auxType: auxInt32,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AORQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "ORLconst",
+ auxType: auxInt32,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AORL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "ORQconstmodify",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AORQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "ORLconstmodify",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AORL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "XORQ",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AXORQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "XORL",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AXORL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "XORQconst",
+ auxType: auxInt32,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AXORQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "XORLconst",
+ auxType: auxInt32,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AXORL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "XORQconstmodify",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AXORQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "XORLconstmodify",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AXORL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "CMPQ",
+ argLen: 2,
+ asm: x86.ACMPQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CMPL",
+ argLen: 2,
+ asm: x86.ACMPL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CMPW",
+ argLen: 2,
+ asm: x86.ACMPW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CMPB",
+ argLen: 2,
+ asm: x86.ACMPB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CMPQconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: x86.ACMPQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CMPLconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: x86.ACMPL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CMPWconst",
+ auxType: auxInt16,
+ argLen: 1,
+ asm: x86.ACMPW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CMPBconst",
+ auxType: auxInt8,
+ argLen: 1,
+ asm: x86.ACMPB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CMPQload",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.ACMPQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "CMPLload",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.ACMPL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "CMPWload",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.ACMPW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "CMPBload",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.ACMPB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "CMPQconstload",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.ACMPQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "CMPLconstload",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.ACMPL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "CMPWconstload",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.ACMPW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "CMPBconstload",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.ACMPB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "CMPQloadidx8",
+ auxType: auxSymOff,
+ argLen: 4,
+ symEffect: SymRead,
+ asm: x86.ACMPQ,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "CMPQloadidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ commutative: true,
+ symEffect: SymRead,
+ asm: x86.ACMPQ,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "CMPLloadidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ symEffect: SymRead,
+ asm: x86.ACMPL,
+ scale: 4,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "CMPLloadidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ commutative: true,
+ symEffect: SymRead,
+ asm: x86.ACMPL,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "CMPWloadidx2",
+ auxType: auxSymOff,
+ argLen: 4,
+ symEffect: SymRead,
+ asm: x86.ACMPW,
+ scale: 2,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "CMPWloadidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ commutative: true,
+ symEffect: SymRead,
+ asm: x86.ACMPW,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "CMPBloadidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ commutative: true,
+ symEffect: SymRead,
+ asm: x86.ACMPB,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "CMPQconstloadidx8",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ symEffect: SymRead,
+ asm: x86.ACMPQ,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "CMPQconstloadidx1",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ commutative: true,
+ symEffect: SymRead,
+ asm: x86.ACMPQ,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "CMPLconstloadidx4",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ symEffect: SymRead,
+ asm: x86.ACMPL,
+ scale: 4,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "CMPLconstloadidx1",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ commutative: true,
+ symEffect: SymRead,
+ asm: x86.ACMPL,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "CMPWconstloadidx2",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ symEffect: SymRead,
+ asm: x86.ACMPW,
+ scale: 2,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "CMPWconstloadidx1",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ commutative: true,
+ symEffect: SymRead,
+ asm: x86.ACMPW,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "CMPBconstloadidx1",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ commutative: true,
+ symEffect: SymRead,
+ asm: x86.ACMPB,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "UCOMISS",
+ argLen: 2,
+ asm: x86.AUCOMISS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "UCOMISD",
+ argLen: 2,
+ asm: x86.AUCOMISD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "BTL",
+ argLen: 2,
+ asm: x86.ABTL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "BTQ",
+ argLen: 2,
+ asm: x86.ABTQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "BTCL",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ABTCL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "BTCQ",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ABTCQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "BTRL",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ABTRL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "BTRQ",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ABTRQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "BTSL",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ABTSL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "BTSQ",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ABTSQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "BTLconst",
+ auxType: auxInt8,
+ argLen: 1,
+ asm: x86.ABTL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "BTQconst",
+ auxType: auxInt8,
+ argLen: 1,
+ asm: x86.ABTQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "BTCLconst",
+ auxType: auxInt8,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ABTCL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "BTCQconst",
+ auxType: auxInt8,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ABTCQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "BTRLconst",
+ auxType: auxInt8,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ABTRL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "BTRQconst",
+ auxType: auxInt8,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ABTRQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "BTSLconst",
+ auxType: auxInt8,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ABTSL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "BTSQconst",
+ auxType: auxInt8,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ABTSQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "TESTQ",
+ argLen: 2,
+ commutative: true,
+ asm: x86.ATESTQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "TESTL",
+ argLen: 2,
+ commutative: true,
+ asm: x86.ATESTL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "TESTW",
+ argLen: 2,
+ commutative: true,
+ asm: x86.ATESTW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "TESTB",
+ argLen: 2,
+ commutative: true,
+ asm: x86.ATESTB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "TESTQconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: x86.ATESTQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "TESTLconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: x86.ATESTL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "TESTWconst",
+ auxType: auxInt16,
+ argLen: 1,
+ asm: x86.ATESTW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "TESTBconst",
+ auxType: auxInt8,
+ argLen: 1,
+ asm: x86.ATESTB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SHLQ",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASHLQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2}, // CX
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SHLL",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASHLL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2}, // CX
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SHLQconst",
+ auxType: auxInt8,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASHLQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SHLLconst",
+ auxType: auxInt8,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASHLL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SHRQ",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASHRQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2}, // CX
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SHRL",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASHRL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2}, // CX
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SHRW",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASHRW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2}, // CX
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SHRB",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASHRB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2}, // CX
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SHRQconst",
+ auxType: auxInt8,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASHRQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SHRLconst",
+ auxType: auxInt8,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASHRL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SHRWconst",
+ auxType: auxInt8,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASHRW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SHRBconst",
+ auxType: auxInt8,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASHRB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SARQ",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASARQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2}, // CX
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SARL",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASARL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2}, // CX
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SARW",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASARW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2}, // CX
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SARB",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASARB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2}, // CX
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SARQconst",
+ auxType: auxInt8,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASARQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SARLconst",
+ auxType: auxInt8,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASARL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SARWconst",
+ auxType: auxInt8,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASARW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SARBconst",
+ auxType: auxInt8,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASARB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SHRDQ",
+ argLen: 3,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASHRQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {2, 2}, // CX
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SHLDQ",
+ argLen: 3,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ASHLQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {2, 2}, // CX
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "ROLQ",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AROLQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2}, // CX
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "ROLL",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AROLL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2}, // CX
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "ROLW",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AROLW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2}, // CX
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "ROLB",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AROLB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2}, // CX
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "RORQ",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ARORQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2}, // CX
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "RORL",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ARORL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2}, // CX
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "RORW",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ARORW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2}, // CX
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "RORB",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ARORB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2}, // CX
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "ROLQconst",
+ auxType: auxInt8,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AROLQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "ROLLconst",
+ auxType: auxInt8,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AROLL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "ROLWconst",
+ auxType: auxInt8,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AROLW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "ROLBconst",
+ auxType: auxInt8,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.AROLB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "ADDLload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ clobberFlags: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: x86.AADDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "ADDQload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ clobberFlags: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: x86.AADDQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SUBQload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ clobberFlags: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: x86.ASUBQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SUBLload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ clobberFlags: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: x86.ASUBL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "ANDLload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ clobberFlags: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: x86.AANDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "ANDQload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ clobberFlags: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: x86.AANDQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "ORQload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ clobberFlags: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: x86.AORQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "ORLload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ clobberFlags: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: x86.AORL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "XORQload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ clobberFlags: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: x86.AXORQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "XORLload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ clobberFlags: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: x86.AXORL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "ADDLloadidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ clobberFlags: true,
+ symEffect: SymRead,
+ asm: x86.AADDL,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "ADDLloadidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ clobberFlags: true,
+ symEffect: SymRead,
+ asm: x86.AADDL,
+ scale: 4,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "ADDLloadidx8",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ clobberFlags: true,
+ symEffect: SymRead,
+ asm: x86.AADDL,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "ADDQloadidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ clobberFlags: true,
+ symEffect: SymRead,
+ asm: x86.AADDQ,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "ADDQloadidx8",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ clobberFlags: true,
+ symEffect: SymRead,
+ asm: x86.AADDQ,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SUBLloadidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ clobberFlags: true,
+ symEffect: SymRead,
+ asm: x86.ASUBL,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SUBLloadidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ clobberFlags: true,
+ symEffect: SymRead,
+ asm: x86.ASUBL,
+ scale: 4,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SUBLloadidx8",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ clobberFlags: true,
+ symEffect: SymRead,
+ asm: x86.ASUBL,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SUBQloadidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ clobberFlags: true,
+ symEffect: SymRead,
+ asm: x86.ASUBQ,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SUBQloadidx8",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ clobberFlags: true,
+ symEffect: SymRead,
+ asm: x86.ASUBQ,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "ANDLloadidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ clobberFlags: true,
+ symEffect: SymRead,
+ asm: x86.AANDL,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "ANDLloadidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ clobberFlags: true,
+ symEffect: SymRead,
+ asm: x86.AANDL,
+ scale: 4,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "ANDLloadidx8",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ clobberFlags: true,
+ symEffect: SymRead,
+ asm: x86.AANDL,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "ANDQloadidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ clobberFlags: true,
+ symEffect: SymRead,
+ asm: x86.AANDQ,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "ANDQloadidx8",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ clobberFlags: true,
+ symEffect: SymRead,
+ asm: x86.AANDQ,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "ORLloadidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ clobberFlags: true,
+ symEffect: SymRead,
+ asm: x86.AORL,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "ORLloadidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ clobberFlags: true,
+ symEffect: SymRead,
+ asm: x86.AORL,
+ scale: 4,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "ORLloadidx8",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ clobberFlags: true,
+ symEffect: SymRead,
+ asm: x86.AORL,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "ORQloadidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ clobberFlags: true,
+ symEffect: SymRead,
+ asm: x86.AORQ,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "ORQloadidx8",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ clobberFlags: true,
+ symEffect: SymRead,
+ asm: x86.AORQ,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "XORLloadidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ clobberFlags: true,
+ symEffect: SymRead,
+ asm: x86.AXORL,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "XORLloadidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ clobberFlags: true,
+ symEffect: SymRead,
+ asm: x86.AXORL,
+ scale: 4,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "XORLloadidx8",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ clobberFlags: true,
+ symEffect: SymRead,
+ asm: x86.AXORL,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "XORQloadidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ clobberFlags: true,
+ symEffect: SymRead,
+ asm: x86.AXORQ,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "XORQloadidx8",
+ auxType: auxSymOff,
+ argLen: 4,
+ resultInArg0: true,
+ clobberFlags: true,
+ symEffect: SymRead,
+ asm: x86.AXORQ,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "ADDQmodify",
+ auxType: auxSymOff,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AADDQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "SUBQmodify",
+ auxType: auxSymOff,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.ASUBQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "ANDQmodify",
+ auxType: auxSymOff,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AANDQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "ORQmodify",
+ auxType: auxSymOff,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AORQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "XORQmodify",
+ auxType: auxSymOff,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AXORQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "ADDLmodify",
+ auxType: auxSymOff,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AADDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "SUBLmodify",
+ auxType: auxSymOff,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.ASUBL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "ANDLmodify",
+ auxType: auxSymOff,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AANDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "ORLmodify",
+ auxType: auxSymOff,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AORL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "XORLmodify",
+ auxType: auxSymOff,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AXORL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "ADDQmodifyidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AADDQ,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "ADDQmodifyidx8",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AADDQ,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "SUBQmodifyidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.ASUBQ,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "SUBQmodifyidx8",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.ASUBQ,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "ANDQmodifyidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AANDQ,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "ANDQmodifyidx8",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AANDQ,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "ORQmodifyidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AORQ,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "ORQmodifyidx8",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AORQ,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "XORQmodifyidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AXORQ,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "XORQmodifyidx8",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AXORQ,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "ADDLmodifyidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AADDL,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "ADDLmodifyidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AADDL,
+ scale: 4,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "ADDLmodifyidx8",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AADDL,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "SUBLmodifyidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.ASUBL,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "SUBLmodifyidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.ASUBL,
+ scale: 4,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "SUBLmodifyidx8",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.ASUBL,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "ANDLmodifyidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AANDL,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "ANDLmodifyidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AANDL,
+ scale: 4,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "ANDLmodifyidx8",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AANDL,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "ORLmodifyidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AORL,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "ORLmodifyidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AORL,
+ scale: 4,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "ORLmodifyidx8",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AORL,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "XORLmodifyidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AXORL,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "XORLmodifyidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AXORL,
+ scale: 4,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "XORLmodifyidx8",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AXORL,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "ADDQconstmodifyidx1",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AADDQ,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "ADDQconstmodifyidx8",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AADDQ,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "ANDQconstmodifyidx1",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AANDQ,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "ANDQconstmodifyidx8",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AANDQ,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "ORQconstmodifyidx1",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AORQ,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "ORQconstmodifyidx8",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AORQ,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "XORQconstmodifyidx1",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AXORQ,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "XORQconstmodifyidx8",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AXORQ,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "ADDLconstmodifyidx1",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AADDL,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "ADDLconstmodifyidx4",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AADDL,
+ scale: 4,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "ADDLconstmodifyidx8",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AADDL,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "ANDLconstmodifyidx1",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AANDL,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "ANDLconstmodifyidx4",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AANDL,
+ scale: 4,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "ANDLconstmodifyidx8",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AANDL,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "ORLconstmodifyidx1",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AORL,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "ORLconstmodifyidx4",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AORL,
+ scale: 4,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "ORLconstmodifyidx8",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AORL,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "XORLconstmodifyidx1",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AXORL,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "XORLconstmodifyidx4",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AXORL,
+ scale: 4,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "XORLconstmodifyidx8",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ clobberFlags: true,
+ symEffect: SymRead | SymWrite,
+ asm: x86.AXORL,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "NEGQ",
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ANEGQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "NEGL",
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ANEGL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "NOTQ",
+ argLen: 1,
+ resultInArg0: true,
+ asm: x86.ANOTQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "NOTL",
+ argLen: 1,
+ resultInArg0: true,
+ asm: x86.ANOTL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "BSFQ",
+ argLen: 1,
+ asm: x86.ABSFQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "BSFL",
+ argLen: 1,
+ clobberFlags: true,
+ asm: x86.ABSFL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "BSRQ",
+ argLen: 1,
+ asm: x86.ABSRQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "BSRL",
+ argLen: 1,
+ clobberFlags: true,
+ asm: x86.ABSRL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CMOVQEQ",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVQEQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CMOVQNE",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVQNE,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CMOVQLT",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVQLT,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CMOVQGT",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVQGT,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CMOVQLE",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVQLE,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CMOVQGE",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVQGE,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CMOVQLS",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVQLS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CMOVQHI",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVQHI,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CMOVQCC",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVQCC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CMOVQCS",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVQCS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CMOVLEQ",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVLEQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CMOVLNE",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVLNE,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CMOVLLT",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVLLT,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CMOVLGT",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVLGT,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CMOVLLE",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVLLE,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CMOVLGE",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVLGE,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CMOVLLS",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVLLS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CMOVLHI",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVLHI,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CMOVLCC",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVLCC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CMOVLCS",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVLCS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CMOVWEQ",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVWEQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CMOVWNE",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVWNE,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CMOVWLT",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVWLT,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CMOVWGT",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVWGT,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CMOVWLE",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVWLE,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CMOVWGE",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVWGE,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CMOVWLS",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVWLS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CMOVWHI",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVWHI,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CMOVWCC",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVWCC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CMOVWCS",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVWCS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CMOVQEQF",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVQNE,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49134}, // CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ clobbers: 1, // AX
+ outputs: []outputInfo{
+ {0, 49134}, // CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CMOVQNEF",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVQNE,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CMOVQGTF",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVQHI,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CMOVQGEF",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVQCC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CMOVLEQF",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVLNE,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49134}, // CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ clobbers: 1, // AX
+ outputs: []outputInfo{
+ {0, 49134}, // CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CMOVLNEF",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVLNE,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CMOVLGTF",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVLHI,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CMOVLGEF",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVLCC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CMOVWEQF",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVWNE,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49134}, // CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ clobbers: 1, // AX
+ outputs: []outputInfo{
+ {0, 49134}, // CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CMOVWNEF",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVWNE,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CMOVWGTF",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVWHI,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CMOVWGEF",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.ACMOVWCC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "BSWAPQ",
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ABSWAPQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "BSWAPL",
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: x86.ABSWAPL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "POPCNTQ",
+ argLen: 1,
+ clobberFlags: true,
+ asm: x86.APOPCNTQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "POPCNTL",
+ argLen: 1,
+ clobberFlags: true,
+ asm: x86.APOPCNTL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SQRTSD",
+ argLen: 1,
+ asm: x86.ASQRTSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "SQRTSS",
+ argLen: 1,
+ asm: x86.ASQRTSS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "ROUNDSD",
+ auxType: auxInt8,
+ argLen: 1,
+ asm: x86.AROUNDSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "VFMADD231SD",
+ argLen: 3,
+ resultInArg0: true,
+ asm: x86.AVFMADD231SD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {2, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "SBBQcarrymask",
+ argLen: 1,
+ asm: x86.ASBBQ,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SBBLcarrymask",
+ argLen: 1,
+ asm: x86.ASBBL,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SETEQ",
+ argLen: 1,
+ asm: x86.ASETEQ,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SETNE",
+ argLen: 1,
+ asm: x86.ASETNE,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SETL",
+ argLen: 1,
+ asm: x86.ASETLT,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SETLE",
+ argLen: 1,
+ asm: x86.ASETLE,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SETG",
+ argLen: 1,
+ asm: x86.ASETGT,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SETGE",
+ argLen: 1,
+ asm: x86.ASETGE,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SETB",
+ argLen: 1,
+ asm: x86.ASETCS,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SETBE",
+ argLen: 1,
+ asm: x86.ASETLS,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SETA",
+ argLen: 1,
+ asm: x86.ASETHI,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SETAE",
+ argLen: 1,
+ asm: x86.ASETCC,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SETO",
+ argLen: 1,
+ asm: x86.ASETOS,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SETEQstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: x86.ASETEQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "SETNEstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: x86.ASETNE,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "SETLstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: x86.ASETLT,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "SETLEstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: x86.ASETLE,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "SETGstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: x86.ASETGT,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "SETGEstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: x86.ASETGE,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "SETBstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: x86.ASETCS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "SETBEstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: x86.ASETLS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "SETAstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: x86.ASETHI,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "SETAEstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: x86.ASETCC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "SETEQF",
+ argLen: 1,
+ clobberFlags: true,
+ asm: x86.ASETEQ,
+ reg: regInfo{
+ clobbers: 1, // AX
+ outputs: []outputInfo{
+ {0, 49134}, // CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SETNEF",
+ argLen: 1,
+ clobberFlags: true,
+ asm: x86.ASETNE,
+ reg: regInfo{
+ clobbers: 1, // AX
+ outputs: []outputInfo{
+ {0, 49134}, // CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SETORD",
+ argLen: 1,
+ asm: x86.ASETPC,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SETNAN",
+ argLen: 1,
+ asm: x86.ASETPS,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SETGF",
+ argLen: 1,
+ asm: x86.ASETHI,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "SETGEF",
+ argLen: 1,
+ asm: x86.ASETCC,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "MOVBQSX",
+ argLen: 1,
+ asm: x86.AMOVBQSX,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "MOVBQZX",
+ argLen: 1,
+ asm: x86.AMOVBLZX,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "MOVWQSX",
+ argLen: 1,
+ asm: x86.AMOVWQSX,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "MOVWQZX",
+ argLen: 1,
+ asm: x86.AMOVWLZX,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "MOVLQSX",
+ argLen: 1,
+ asm: x86.AMOVLQSX,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "MOVLQZX",
+ argLen: 1,
+ asm: x86.AMOVL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "MOVLconst",
+ auxType: auxInt32,
+ argLen: 0,
+ rematerializeable: true,
+ asm: x86.AMOVL,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "MOVQconst",
+ auxType: auxInt64,
+ argLen: 0,
+ rematerializeable: true,
+ asm: x86.AMOVQ,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CVTTSD2SL",
+ argLen: 1,
+ asm: x86.ACVTTSD2SL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CVTTSD2SQ",
+ argLen: 1,
+ asm: x86.ACVTTSD2SQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CVTTSS2SL",
+ argLen: 1,
+ asm: x86.ACVTTSS2SL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CVTTSS2SQ",
+ argLen: 1,
+ asm: x86.ACVTTSS2SQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CVTSL2SS",
+ argLen: 1,
+ asm: x86.ACVTSL2SS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "CVTSL2SD",
+ argLen: 1,
+ asm: x86.ACVTSL2SD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "CVTSQ2SS",
+ argLen: 1,
+ asm: x86.ACVTSQ2SS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "CVTSQ2SD",
+ argLen: 1,
+ asm: x86.ACVTSQ2SD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "CVTSD2SS",
+ argLen: 1,
+ asm: x86.ACVTSD2SS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "CVTSS2SD",
+ argLen: 1,
+ asm: x86.ACVTSS2SD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "MOVQi2f",
+ argLen: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "MOVQf2i",
+ argLen: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "MOVLi2f",
+ argLen: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "MOVLf2i",
+ argLen: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "PXOR",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ asm: x86.APXOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "LEAQ",
+ auxType: auxSymOff,
+ argLen: 1,
+ rematerializeable: true,
+ symEffect: SymAddr,
+ asm: x86.ALEAQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "LEAL",
+ auxType: auxSymOff,
+ argLen: 1,
+ rematerializeable: true,
+ symEffect: SymAddr,
+ asm: x86.ALEAL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "LEAW",
+ auxType: auxSymOff,
+ argLen: 1,
+ rematerializeable: true,
+ symEffect: SymAddr,
+ asm: x86.ALEAW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "LEAQ1",
+ auxType: auxSymOff,
+ argLen: 2,
+ commutative: true,
+ symEffect: SymAddr,
+ asm: x86.ALEAQ,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "LEAL1",
+ auxType: auxSymOff,
+ argLen: 2,
+ commutative: true,
+ symEffect: SymAddr,
+ asm: x86.ALEAL,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "LEAW1",
+ auxType: auxSymOff,
+ argLen: 2,
+ commutative: true,
+ symEffect: SymAddr,
+ asm: x86.ALEAW,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "LEAQ2",
+ auxType: auxSymOff,
+ argLen: 2,
+ symEffect: SymAddr,
+ asm: x86.ALEAQ,
+ scale: 2,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "LEAL2",
+ auxType: auxSymOff,
+ argLen: 2,
+ symEffect: SymAddr,
+ asm: x86.ALEAL,
+ scale: 2,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "LEAW2",
+ auxType: auxSymOff,
+ argLen: 2,
+ symEffect: SymAddr,
+ asm: x86.ALEAW,
+ scale: 2,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "LEAQ4",
+ auxType: auxSymOff,
+ argLen: 2,
+ symEffect: SymAddr,
+ asm: x86.ALEAQ,
+ scale: 4,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "LEAL4",
+ auxType: auxSymOff,
+ argLen: 2,
+ symEffect: SymAddr,
+ asm: x86.ALEAL,
+ scale: 4,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "LEAW4",
+ auxType: auxSymOff,
+ argLen: 2,
+ symEffect: SymAddr,
+ asm: x86.ALEAW,
+ scale: 4,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "LEAQ8",
+ auxType: auxSymOff,
+ argLen: 2,
+ symEffect: SymAddr,
+ asm: x86.ALEAQ,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "LEAL8",
+ auxType: auxSymOff,
+ argLen: 2,
+ symEffect: SymAddr,
+ asm: x86.ALEAL,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "LEAW8",
+ auxType: auxSymOff,
+ argLen: 2,
+ symEffect: SymAddr,
+ asm: x86.ALEAW,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "MOVBload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.AMOVBLZX,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "MOVBQSXload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.AMOVBQSX,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "MOVWload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.AMOVWLZX,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "MOVWQSXload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.AMOVWQSX,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "MOVLload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.AMOVL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "MOVLQSXload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.AMOVLQSX,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "MOVQload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.AMOVQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "MOVBstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: x86.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "MOVWstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: x86.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "MOVLstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: x86.AMOVL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "MOVQstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: x86.AMOVQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "MOVOload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.AMOVUPS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ },
+ {
+ name: "MOVOstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: x86.AMOVUPS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2147418112}, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ {0, 4295016447}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15 SB
+ },
+ },
+ },
+ {
+ name: "MOVBloadidx1",
+ auxType: auxSymOff,
+ argLen: 3,
+ commutative: true,
+ symEffect: SymRead,
+ asm: x86.AMOVBLZX,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "MOVWloadidx1",
+ auxType: auxSymOff,
+ argLen: 3,
+ commutative: true,
+ symEffect: SymRead,
+ asm: x86.AMOVWLZX,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "MOVWloadidx2",
+ auxType: auxSymOff,
+ argLen: 3,
+ symEffect: SymRead,
+ asm: x86.AMOVWLZX,
+ scale: 2,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "MOVLloadidx1",
+ auxType: auxSymOff,
+ argLen: 3,
+ commutative: true,
+ symEffect: SymRead,
+ asm: x86.AMOVL,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "MOVLloadidx4",
+ auxType: auxSymOff,
+ argLen: 3,
+ symEffect: SymRead,
+ asm: x86.AMOVL,
+ scale: 4,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "MOVLloadidx8",
+ auxType: auxSymOff,
+ argLen: 3,
+ symEffect: SymRead,
+ asm: x86.AMOVL,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "MOVQloadidx1",
+ auxType: auxSymOff,
+ argLen: 3,
+ commutative: true,
+ symEffect: SymRead,
+ asm: x86.AMOVQ,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "MOVQloadidx8",
+ auxType: auxSymOff,
+ argLen: 3,
+ symEffect: SymRead,
+ asm: x86.AMOVQ,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "MOVBstoreidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ commutative: true,
+ symEffect: SymWrite,
+ asm: x86.AMOVB,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "MOVWstoreidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ commutative: true,
+ symEffect: SymWrite,
+ asm: x86.AMOVW,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "MOVWstoreidx2",
+ auxType: auxSymOff,
+ argLen: 4,
+ symEffect: SymWrite,
+ asm: x86.AMOVW,
+ scale: 2,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "MOVLstoreidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ commutative: true,
+ symEffect: SymWrite,
+ asm: x86.AMOVL,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "MOVLstoreidx4",
+ auxType: auxSymOff,
+ argLen: 4,
+ symEffect: SymWrite,
+ asm: x86.AMOVL,
+ scale: 4,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "MOVLstoreidx8",
+ auxType: auxSymOff,
+ argLen: 4,
+ symEffect: SymWrite,
+ asm: x86.AMOVL,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "MOVQstoreidx1",
+ auxType: auxSymOff,
+ argLen: 4,
+ commutative: true,
+ symEffect: SymWrite,
+ asm: x86.AMOVQ,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "MOVQstoreidx8",
+ auxType: auxSymOff,
+ argLen: 4,
+ symEffect: SymWrite,
+ asm: x86.AMOVQ,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "MOVBstoreconst",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: x86.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "MOVWstoreconst",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: x86.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "MOVLstoreconst",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: x86.AMOVL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "MOVQstoreconst",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: x86.AMOVQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "MOVOstoreconst",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: x86.AMOVUPS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "MOVBstoreconstidx1",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ commutative: true,
+ symEffect: SymWrite,
+ asm: x86.AMOVB,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "MOVWstoreconstidx1",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ commutative: true,
+ symEffect: SymWrite,
+ asm: x86.AMOVW,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "MOVWstoreconstidx2",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ symEffect: SymWrite,
+ asm: x86.AMOVW,
+ scale: 2,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "MOVLstoreconstidx1",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ commutative: true,
+ symEffect: SymWrite,
+ asm: x86.AMOVL,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "MOVLstoreconstidx4",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ symEffect: SymWrite,
+ asm: x86.AMOVL,
+ scale: 4,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "MOVQstoreconstidx1",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ commutative: true,
+ symEffect: SymWrite,
+ asm: x86.AMOVQ,
+ scale: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "MOVQstoreconstidx8",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ symEffect: SymWrite,
+ asm: x86.AMOVQ,
+ scale: 8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "DUFFZERO",
+ auxType: auxInt64,
+ argLen: 2,
+ faultOnNilArg0: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 128}, // DI
+ },
+ clobbers: 128, // DI
+ },
+ },
+ {
+ name: "REPSTOSQ",
+ argLen: 4,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 128}, // DI
+ {1, 2}, // CX
+ {2, 1}, // AX
+ },
+ clobbers: 130, // CX DI
+ },
+ },
+ {
+ name: "CALLstatic",
+ auxType: auxCallOff,
+ argLen: -1,
+ clobberFlags: true,
+ call: true,
+ reg: regInfo{
+ clobbers: 2147483631, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 g R15 X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ {
+ name: "CALLtail",
+ auxType: auxCallOff,
+ argLen: -1,
+ clobberFlags: true,
+ call: true,
+ tailCall: true,
+ reg: regInfo{
+ clobbers: 2147483631, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 g R15 X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ {
+ name: "CALLclosure",
+ auxType: auxCallOff,
+ argLen: -1,
+ clobberFlags: true,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 4}, // DX
+ {0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ clobbers: 2147483631, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 g R15 X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ {
+ name: "CALLinter",
+ auxType: auxCallOff,
+ argLen: -1,
+ clobberFlags: true,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ clobbers: 2147483631, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 g R15 X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ {
+ name: "DUFFCOPY",
+ auxType: auxInt64,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 128}, // DI
+ {1, 64}, // SI
+ },
+ clobbers: 65728, // SI DI X0
+ },
+ },
+ {
+ name: "REPMOVSQ",
+ argLen: 4,
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 128}, // DI
+ {1, 64}, // SI
+ {2, 2}, // CX
+ },
+ clobbers: 194, // CX SI DI
+ },
+ },
+ {
+ name: "InvertFlags",
+ argLen: 1,
+ reg: regInfo{},
+ },
+ {
+ name: "LoweredGetG",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "LoweredGetClosurePtr",
+ argLen: 0,
+ zeroWidth: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 4}, // DX
+ },
+ },
+ },
+ {
+ name: "LoweredGetCallerPC",
+ argLen: 0,
+ rematerializeable: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "LoweredGetCallerSP",
+ argLen: 0,
+ rematerializeable: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "LoweredNilCheck",
+ argLen: 2,
+ clobberFlags: true,
+ nilCheck: true,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "LoweredWB",
+ auxType: auxSym,
+ argLen: 3,
+ clobberFlags: true,
+ symEffect: SymNone,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 128}, // DI
+ {1, 879}, // AX CX DX BX BP SI R8 R9
+ },
+ clobbers: 2147418112, // X0 X1 X2 X3 X4 X5 X6 X7 X8 X9 X10 X11 X12 X13 X14
+ },
+ },
+ {
+ name: "LoweredHasCPUFeature",
+ auxType: auxSym,
+ argLen: 0,
+ rematerializeable: true,
+ symEffect: SymNone,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "LoweredPanicBoundsA",
+ auxType: auxInt64,
+ argLen: 3,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4}, // DX
+ {1, 8}, // BX
+ },
+ },
+ },
+ {
+ name: "LoweredPanicBoundsB",
+ auxType: auxInt64,
+ argLen: 3,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2}, // CX
+ {1, 4}, // DX
+ },
+ },
+ },
+ {
+ name: "LoweredPanicBoundsC",
+ auxType: auxInt64,
+ argLen: 3,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // AX
+ {1, 2}, // CX
+ },
+ },
+ },
+ {
+ name: "FlagEQ",
+ argLen: 0,
+ reg: regInfo{},
+ },
+ {
+ name: "FlagLT_ULT",
+ argLen: 0,
+ reg: regInfo{},
+ },
+ {
+ name: "FlagLT_UGT",
+ argLen: 0,
+ reg: regInfo{},
+ },
+ {
+ name: "FlagGT_UGT",
+ argLen: 0,
+ reg: regInfo{},
+ },
+ {
+ name: "FlagGT_ULT",
+ argLen: 0,
+ reg: regInfo{},
+ },
+ {
+ name: "MOVBatomicload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "MOVLatomicload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.AMOVL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "MOVQatomicload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.AMOVQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "XCHGB",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ faultOnNilArg1: true,
+ hasSideEffects: true,
+ symEffect: SymRdWr,
+ asm: x86.AXCHGB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "XCHGL",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ faultOnNilArg1: true,
+ hasSideEffects: true,
+ symEffect: SymRdWr,
+ asm: x86.AXCHGL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "XCHGQ",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ faultOnNilArg1: true,
+ hasSideEffects: true,
+ symEffect: SymRdWr,
+ asm: x86.AXCHGQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "XADDLlock",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ clobberFlags: true,
+ faultOnNilArg1: true,
+ hasSideEffects: true,
+ symEffect: SymRdWr,
+ asm: x86.AXADDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "XADDQlock",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ clobberFlags: true,
+ faultOnNilArg1: true,
+ hasSideEffects: true,
+ symEffect: SymRdWr,
+ asm: x86.AXADDQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "AddTupleFirst32",
+ argLen: 2,
+ reg: regInfo{},
+ },
+ {
+ name: "AddTupleFirst64",
+ argLen: 2,
+ reg: regInfo{},
+ },
+ {
+ name: "CMPXCHGLlock",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ symEffect: SymRdWr,
+ asm: x86.ACMPXCHGL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1}, // AX
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ clobbers: 1, // AX
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "CMPXCHGQlock",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ symEffect: SymRdWr,
+ asm: x86.ACMPXCHGQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1}, // AX
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {2, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ clobbers: 1, // AX
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "ANDBlock",
+ auxType: auxSymOff,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ symEffect: SymRdWr,
+ asm: x86.AANDB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "ANDLlock",
+ auxType: auxSymOff,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ symEffect: SymRdWr,
+ asm: x86.AANDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "ORBlock",
+ auxType: auxSymOff,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ symEffect: SymRdWr,
+ asm: x86.AORB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "ORLlock",
+ auxType: auxSymOff,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ symEffect: SymRdWr,
+ asm: x86.AORL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "PrefetchT0",
+ argLen: 2,
+ hasSideEffects: true,
+ asm: x86.APREFETCHT0,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "PrefetchNTA",
+ argLen: 2,
+ hasSideEffects: true,
+ asm: x86.APREFETCHNTA,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "ANDNQ",
+ argLen: 2,
+ clobberFlags: true,
+ asm: x86.AANDNQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "ANDNL",
+ argLen: 2,
+ clobberFlags: true,
+ asm: x86.AANDNL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {1, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "BLSIQ",
+ argLen: 1,
+ clobberFlags: true,
+ asm: x86.ABLSIQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "BLSIL",
+ argLen: 1,
+ clobberFlags: true,
+ asm: x86.ABLSIL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "BLSMSKQ",
+ argLen: 1,
+ clobberFlags: true,
+ asm: x86.ABLSMSKQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "BLSMSKL",
+ argLen: 1,
+ clobberFlags: true,
+ asm: x86.ABLSMSKL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "BLSRQ",
+ argLen: 1,
+ clobberFlags: true,
+ asm: x86.ABLSRQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "BLSRL",
+ argLen: 1,
+ clobberFlags: true,
+ asm: x86.ABLSRL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "TZCNTQ",
+ argLen: 1,
+ clobberFlags: true,
+ asm: x86.ATZCNTQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "TZCNTL",
+ argLen: 1,
+ clobberFlags: true,
+ asm: x86.ATZCNTL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "MOVBELload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.AMOVBEL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "MOVBELstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: x86.AMOVBEL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+ {
+ name: "MOVBEQload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: x86.AMOVBEQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ outputs: []outputInfo{
+ {0, 49135}, // AX CX DX BX BP SI DI R8 R9 R10 R11 R12 R13 R15
+ },
+ },
+ },
+ {
+ name: "MOVBEQstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: x86.AMOVBEQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 49151}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 R15
+ {0, 4295032831}, // AX CX DX BX SP BP SI DI R8 R9 R10 R11 R12 R13 g R15 SB
+ },
+ },
+ },
+
+ {
+ name: "ADD",
+ argLen: 2,
+ commutative: true,
+ asm: arm.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ADDconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 30719}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "SUB",
+ argLen: 2,
+ asm: arm.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "SUBconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "RSB",
+ argLen: 2,
+ asm: arm.ARSB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "RSBconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm.ARSB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MUL",
+ argLen: 2,
+ commutative: true,
+ asm: arm.AMUL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "HMUL",
+ argLen: 2,
+ commutative: true,
+ asm: arm.AMULL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "HMULU",
+ argLen: 2,
+ commutative: true,
+ asm: arm.AMULLU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "CALLudiv",
+ argLen: 2,
+ clobberFlags: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2}, // R1
+ {1, 1}, // R0
+ },
+ clobbers: 20492, // R2 R3 R12 R14
+ outputs: []outputInfo{
+ {0, 1}, // R0
+ {1, 2}, // R1
+ },
+ },
+ },
+ {
+ name: "ADDS",
+ argLen: 2,
+ commutative: true,
+ asm: arm.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ADDSconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ADC",
+ argLen: 3,
+ commutative: true,
+ asm: arm.AADC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ADCconst",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.AADC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "SUBS",
+ argLen: 2,
+ asm: arm.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "SUBSconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "RSBSconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm.ARSB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "SBC",
+ argLen: 3,
+ asm: arm.ASBC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "SBCconst",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ASBC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "RSCconst",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ARSC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MULLU",
+ argLen: 2,
+ commutative: true,
+ asm: arm.AMULLU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MULA",
+ argLen: 3,
+ asm: arm.AMULA,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MULS",
+ argLen: 3,
+ asm: arm.AMULS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ADDF",
+ argLen: 2,
+ commutative: true,
+ asm: arm.AADDF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "ADDD",
+ argLen: 2,
+ commutative: true,
+ asm: arm.AADDD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "SUBF",
+ argLen: 2,
+ asm: arm.ASUBF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "SUBD",
+ argLen: 2,
+ asm: arm.ASUBD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "MULF",
+ argLen: 2,
+ commutative: true,
+ asm: arm.AMULF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "MULD",
+ argLen: 2,
+ commutative: true,
+ asm: arm.AMULD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "NMULF",
+ argLen: 2,
+ commutative: true,
+ asm: arm.ANMULF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "NMULD",
+ argLen: 2,
+ commutative: true,
+ asm: arm.ANMULD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "DIVF",
+ argLen: 2,
+ asm: arm.ADIVF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "DIVD",
+ argLen: 2,
+ asm: arm.ADIVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "MULAF",
+ argLen: 3,
+ resultInArg0: true,
+ asm: arm.AMULAF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "MULAD",
+ argLen: 3,
+ resultInArg0: true,
+ asm: arm.AMULAD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "MULSF",
+ argLen: 3,
+ resultInArg0: true,
+ asm: arm.AMULSF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "MULSD",
+ argLen: 3,
+ resultInArg0: true,
+ asm: arm.AMULSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "FMULAD",
+ argLen: 3,
+ resultInArg0: true,
+ asm: arm.AFMULAD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "AND",
+ argLen: 2,
+ commutative: true,
+ asm: arm.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ANDconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "OR",
+ argLen: 2,
+ commutative: true,
+ asm: arm.AORR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ORconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm.AORR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "XOR",
+ argLen: 2,
+ commutative: true,
+ asm: arm.AEOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "XORconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm.AEOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "BIC",
+ argLen: 2,
+ asm: arm.ABIC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "BICconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm.ABIC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "BFX",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm.ABFX,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "BFXU",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm.ABFXU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MVN",
+ argLen: 1,
+ asm: arm.AMVN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "NEGF",
+ argLen: 1,
+ asm: arm.ANEGF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "NEGD",
+ argLen: 1,
+ asm: arm.ANEGD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "SQRTD",
+ argLen: 1,
+ asm: arm.ASQRTD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "SQRTF",
+ argLen: 1,
+ asm: arm.ASQRTF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "ABSD",
+ argLen: 1,
+ asm: arm.AABSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "CLZ",
+ argLen: 1,
+ asm: arm.ACLZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "REV",
+ argLen: 1,
+ asm: arm.AREV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "REV16",
+ argLen: 1,
+ asm: arm.AREV16,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "RBIT",
+ argLen: 1,
+ asm: arm.ARBIT,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "SLL",
+ argLen: 2,
+ asm: arm.ASLL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "SLLconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm.ASLL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "SRL",
+ argLen: 2,
+ asm: arm.ASRL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "SRLconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm.ASRL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "SRA",
+ argLen: 2,
+ asm: arm.ASRA,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "SRAconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm.ASRA,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "SRR",
+ argLen: 2,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "SRRconst",
+ auxType: auxInt32,
+ argLen: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ADDshiftLL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ADDshiftRL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ADDshiftRA",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "SUBshiftLL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "SUBshiftRL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "SUBshiftRA",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "RSBshiftLL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ARSB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "RSBshiftRL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ARSB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "RSBshiftRA",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ARSB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ANDshiftLL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ANDshiftRL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ANDshiftRA",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ORshiftLL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.AORR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ORshiftRL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.AORR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ORshiftRA",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.AORR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "XORshiftLL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.AEOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "XORshiftRL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.AEOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "XORshiftRA",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.AEOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "XORshiftRR",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.AEOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "BICshiftLL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ABIC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "BICshiftRL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ABIC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "BICshiftRA",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ABIC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MVNshiftLL",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm.AMVN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MVNshiftRL",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm.AMVN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MVNshiftRA",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm.AMVN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ADCshiftLL",
+ auxType: auxInt32,
+ argLen: 3,
+ asm: arm.AADC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ADCshiftRL",
+ auxType: auxInt32,
+ argLen: 3,
+ asm: arm.AADC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ADCshiftRA",
+ auxType: auxInt32,
+ argLen: 3,
+ asm: arm.AADC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "SBCshiftLL",
+ auxType: auxInt32,
+ argLen: 3,
+ asm: arm.ASBC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "SBCshiftRL",
+ auxType: auxInt32,
+ argLen: 3,
+ asm: arm.ASBC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "SBCshiftRA",
+ auxType: auxInt32,
+ argLen: 3,
+ asm: arm.ASBC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "RSCshiftLL",
+ auxType: auxInt32,
+ argLen: 3,
+ asm: arm.ARSC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "RSCshiftRL",
+ auxType: auxInt32,
+ argLen: 3,
+ asm: arm.ARSC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "RSCshiftRA",
+ auxType: auxInt32,
+ argLen: 3,
+ asm: arm.ARSC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ADDSshiftLL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ADDSshiftRL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ADDSshiftRA",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "SUBSshiftLL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "SUBSshiftRL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "SUBSshiftRA",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "RSBSshiftLL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ARSB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "RSBSshiftRL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ARSB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "RSBSshiftRA",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ARSB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ADDshiftLLreg",
+ argLen: 3,
+ asm: arm.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ADDshiftRLreg",
+ argLen: 3,
+ asm: arm.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ADDshiftRAreg",
+ argLen: 3,
+ asm: arm.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "SUBshiftLLreg",
+ argLen: 3,
+ asm: arm.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "SUBshiftRLreg",
+ argLen: 3,
+ asm: arm.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "SUBshiftRAreg",
+ argLen: 3,
+ asm: arm.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "RSBshiftLLreg",
+ argLen: 3,
+ asm: arm.ARSB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "RSBshiftRLreg",
+ argLen: 3,
+ asm: arm.ARSB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "RSBshiftRAreg",
+ argLen: 3,
+ asm: arm.ARSB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ANDshiftLLreg",
+ argLen: 3,
+ asm: arm.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ANDshiftRLreg",
+ argLen: 3,
+ asm: arm.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ANDshiftRAreg",
+ argLen: 3,
+ asm: arm.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ORshiftLLreg",
+ argLen: 3,
+ asm: arm.AORR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ORshiftRLreg",
+ argLen: 3,
+ asm: arm.AORR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ORshiftRAreg",
+ argLen: 3,
+ asm: arm.AORR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "XORshiftLLreg",
+ argLen: 3,
+ asm: arm.AEOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "XORshiftRLreg",
+ argLen: 3,
+ asm: arm.AEOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "XORshiftRAreg",
+ argLen: 3,
+ asm: arm.AEOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "BICshiftLLreg",
+ argLen: 3,
+ asm: arm.ABIC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "BICshiftRLreg",
+ argLen: 3,
+ asm: arm.ABIC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "BICshiftRAreg",
+ argLen: 3,
+ asm: arm.ABIC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MVNshiftLLreg",
+ argLen: 2,
+ asm: arm.AMVN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MVNshiftRLreg",
+ argLen: 2,
+ asm: arm.AMVN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MVNshiftRAreg",
+ argLen: 2,
+ asm: arm.AMVN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ADCshiftLLreg",
+ argLen: 4,
+ asm: arm.AADC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ADCshiftRLreg",
+ argLen: 4,
+ asm: arm.AADC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ADCshiftRAreg",
+ argLen: 4,
+ asm: arm.AADC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "SBCshiftLLreg",
+ argLen: 4,
+ asm: arm.ASBC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "SBCshiftRLreg",
+ argLen: 4,
+ asm: arm.ASBC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "SBCshiftRAreg",
+ argLen: 4,
+ asm: arm.ASBC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "RSCshiftLLreg",
+ argLen: 4,
+ asm: arm.ARSC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "RSCshiftRLreg",
+ argLen: 4,
+ asm: arm.ARSC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "RSCshiftRAreg",
+ argLen: 4,
+ asm: arm.ARSC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ADDSshiftLLreg",
+ argLen: 3,
+ asm: arm.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ADDSshiftRLreg",
+ argLen: 3,
+ asm: arm.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "ADDSshiftRAreg",
+ argLen: 3,
+ asm: arm.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "SUBSshiftLLreg",
+ argLen: 3,
+ asm: arm.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "SUBSshiftRLreg",
+ argLen: 3,
+ asm: arm.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "SUBSshiftRAreg",
+ argLen: 3,
+ asm: arm.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "RSBSshiftLLreg",
+ argLen: 3,
+ asm: arm.ARSB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "RSBSshiftRLreg",
+ argLen: 3,
+ asm: arm.ARSB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "RSBSshiftRAreg",
+ argLen: 3,
+ asm: arm.ARSB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "CMP",
+ argLen: 2,
+ asm: arm.ACMP,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ },
+ },
+ {
+ name: "CMPconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm.ACMP,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ },
+ },
+ {
+ name: "CMN",
+ argLen: 2,
+ commutative: true,
+ asm: arm.ACMN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ },
+ },
+ {
+ name: "CMNconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm.ACMN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ },
+ },
+ {
+ name: "TST",
+ argLen: 2,
+ commutative: true,
+ asm: arm.ATST,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ },
+ },
+ {
+ name: "TSTconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm.ATST,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ },
+ },
+ {
+ name: "TEQ",
+ argLen: 2,
+ commutative: true,
+ asm: arm.ATEQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ },
+ },
+ {
+ name: "TEQconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm.ATEQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ },
+ },
+ {
+ name: "CMPF",
+ argLen: 2,
+ asm: arm.ACMPF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "CMPD",
+ argLen: 2,
+ asm: arm.ACMPD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "CMPshiftLL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ACMP,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ },
+ },
+ {
+ name: "CMPshiftRL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ACMP,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ },
+ },
+ {
+ name: "CMPshiftRA",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ACMP,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ },
+ },
+ {
+ name: "CMNshiftLL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ACMN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ },
+ },
+ {
+ name: "CMNshiftRL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ACMN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ },
+ },
+ {
+ name: "CMNshiftRA",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ACMN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ },
+ },
+ {
+ name: "TSTshiftLL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ATST,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ },
+ },
+ {
+ name: "TSTshiftRL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ATST,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ },
+ },
+ {
+ name: "TSTshiftRA",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ATST,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ },
+ },
+ {
+ name: "TEQshiftLL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ATEQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ },
+ },
+ {
+ name: "TEQshiftRL",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ATEQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ },
+ },
+ {
+ name: "TEQshiftRA",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: arm.ATEQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ },
+ },
+ {
+ name: "CMPshiftLLreg",
+ argLen: 3,
+ asm: arm.ACMP,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "CMPshiftRLreg",
+ argLen: 3,
+ asm: arm.ACMP,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "CMPshiftRAreg",
+ argLen: 3,
+ asm: arm.ACMP,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "CMNshiftLLreg",
+ argLen: 3,
+ asm: arm.ACMN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "CMNshiftRLreg",
+ argLen: 3,
+ asm: arm.ACMN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "CMNshiftRAreg",
+ argLen: 3,
+ asm: arm.ACMN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "TSTshiftLLreg",
+ argLen: 3,
+ asm: arm.ATST,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "TSTshiftRLreg",
+ argLen: 3,
+ asm: arm.ATST,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "TSTshiftRAreg",
+ argLen: 3,
+ asm: arm.ATST,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "TEQshiftLLreg",
+ argLen: 3,
+ asm: arm.ATEQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "TEQshiftRLreg",
+ argLen: 3,
+ asm: arm.ATEQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "TEQshiftRAreg",
+ argLen: 3,
+ asm: arm.ATEQ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "CMPF0",
+ argLen: 1,
+ asm: arm.ACMPF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "CMPD0",
+ argLen: 1,
+ asm: arm.ACMPD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "MOVWconst",
+ auxType: auxInt32,
+ argLen: 0,
+ rematerializeable: true,
+ asm: arm.AMOVW,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVFconst",
+ auxType: auxFloat64,
+ argLen: 0,
+ rematerializeable: true,
+ asm: arm.AMOVF,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "MOVDconst",
+ auxType: auxFloat64,
+ argLen: 0,
+ rematerializeable: true,
+ asm: arm.AMOVD,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "MOVWaddr",
+ auxType: auxSymOff,
+ argLen: 1,
+ rematerializeable: true,
+ symEffect: SymAddr,
+ asm: arm.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294975488}, // SP SB
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVBload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: arm.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVBUload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: arm.AMOVBU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVHload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: arm.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVHUload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: arm.AMOVHU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVWload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: arm.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVFload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: arm.AMOVF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "MOVDload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: arm.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "MOVBstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: arm.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB
+ },
+ },
+ },
+ {
+ name: "MOVHstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: arm.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB
+ },
+ },
+ },
+ {
+ name: "MOVWstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: arm.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB
+ },
+ },
+ },
+ {
+ name: "MOVFstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: arm.AMOVF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "MOVDstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: arm.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "MOVWloadidx",
+ argLen: 3,
+ asm: arm.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVWloadshiftLL",
+ auxType: auxInt32,
+ argLen: 3,
+ asm: arm.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVWloadshiftRL",
+ auxType: auxInt32,
+ argLen: 3,
+ asm: arm.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVWloadshiftRA",
+ auxType: auxInt32,
+ argLen: 3,
+ asm: arm.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVBUloadidx",
+ argLen: 3,
+ asm: arm.AMOVBU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVBloadidx",
+ argLen: 3,
+ asm: arm.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVHUloadidx",
+ argLen: 3,
+ asm: arm.AMOVHU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVHloadidx",
+ argLen: 3,
+ asm: arm.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVWstoreidx",
+ argLen: 4,
+ asm: arm.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {2, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB
+ },
+ },
+ },
+ {
+ name: "MOVWstoreshiftLL",
+ auxType: auxInt32,
+ argLen: 4,
+ asm: arm.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {2, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB
+ },
+ },
+ },
+ {
+ name: "MOVWstoreshiftRL",
+ auxType: auxInt32,
+ argLen: 4,
+ asm: arm.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {2, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB
+ },
+ },
+ },
+ {
+ name: "MOVWstoreshiftRA",
+ auxType: auxInt32,
+ argLen: 4,
+ asm: arm.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {2, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB
+ },
+ },
+ },
+ {
+ name: "MOVBstoreidx",
+ argLen: 4,
+ asm: arm.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {2, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB
+ },
+ },
+ },
+ {
+ name: "MOVHstoreidx",
+ argLen: 4,
+ asm: arm.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {2, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ {0, 4294998015}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 SP R14 SB
+ },
+ },
+ },
+ {
+ name: "MOVBreg",
+ argLen: 1,
+ asm: arm.AMOVBS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVBUreg",
+ argLen: 1,
+ asm: arm.AMOVBU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVHreg",
+ argLen: 1,
+ asm: arm.AMOVHS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVHUreg",
+ argLen: 1,
+ asm: arm.AMOVHU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVWreg",
+ argLen: 1,
+ asm: arm.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVWnop",
+ argLen: 1,
+ resultInArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVWF",
+ argLen: 1,
+ asm: arm.AMOVWF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ clobbers: 2147483648, // F15
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "MOVWD",
+ argLen: 1,
+ asm: arm.AMOVWD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ clobbers: 2147483648, // F15
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "MOVWUF",
+ argLen: 1,
+ asm: arm.AMOVWF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ clobbers: 2147483648, // F15
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "MOVWUD",
+ argLen: 1,
+ asm: arm.AMOVWD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ clobbers: 2147483648, // F15
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "MOVFW",
+ argLen: 1,
+ asm: arm.AMOVFW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ clobbers: 2147483648, // F15
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVDW",
+ argLen: 1,
+ asm: arm.AMOVDW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ clobbers: 2147483648, // F15
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVFWU",
+ argLen: 1,
+ asm: arm.AMOVFW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ clobbers: 2147483648, // F15
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVDWU",
+ argLen: 1,
+ asm: arm.AMOVDW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ clobbers: 2147483648, // F15
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVFD",
+ argLen: 1,
+ asm: arm.AMOVFD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "MOVDF",
+ argLen: 1,
+ asm: arm.AMOVDF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "CMOVWHSconst",
+ auxType: auxInt32,
+ argLen: 2,
+ resultInArg0: true,
+ asm: arm.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "CMOVWLSconst",
+ auxType: auxInt32,
+ argLen: 2,
+ resultInArg0: true,
+ asm: arm.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "SRAcond",
+ argLen: 3,
+ asm: arm.ASRA,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "CALLstatic",
+ auxType: auxCallOff,
+ argLen: 1,
+ clobberFlags: true,
+ call: true,
+ reg: regInfo{
+ clobbers: 4294924287, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ {
+ name: "CALLtail",
+ auxType: auxCallOff,
+ argLen: 1,
+ clobberFlags: true,
+ call: true,
+ tailCall: true,
+ reg: regInfo{
+ clobbers: 4294924287, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ {
+ name: "CALLclosure",
+ auxType: auxCallOff,
+ argLen: 3,
+ clobberFlags: true,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 128}, // R7
+ {0, 29695}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 SP R14
+ },
+ clobbers: 4294924287, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ {
+ name: "CALLinter",
+ auxType: auxCallOff,
+ argLen: 2,
+ clobberFlags: true,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ clobbers: 4294924287, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ {
+ name: "LoweredNilCheck",
+ argLen: 2,
+ nilCheck: true,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 22527}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 g R12 R14
+ },
+ },
+ },
+ {
+ name: "Equal",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "NotEqual",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "LessThan",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "LessEqual",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "GreaterThan",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "GreaterEqual",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "LessThanU",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "LessEqualU",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "GreaterThanU",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "GreaterEqualU",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "DUFFZERO",
+ auxType: auxInt64,
+ argLen: 3,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2}, // R1
+ {1, 1}, // R0
+ },
+ clobbers: 20482, // R1 R12 R14
+ },
+ },
+ {
+ name: "DUFFCOPY",
+ auxType: auxInt64,
+ argLen: 3,
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4}, // R2
+ {1, 2}, // R1
+ },
+ clobbers: 20487, // R0 R1 R2 R12 R14
+ },
+ },
+ {
+ name: "LoweredZero",
+ auxType: auxInt64,
+ argLen: 4,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2}, // R1
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ clobbers: 2, // R1
+ },
+ },
+ {
+ name: "LoweredMove",
+ auxType: auxInt64,
+ argLen: 4,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4}, // R2
+ {1, 2}, // R1
+ {2, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ clobbers: 6, // R1 R2
+ },
+ },
+ {
+ name: "LoweredGetClosurePtr",
+ argLen: 0,
+ zeroWidth: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 128}, // R7
+ },
+ },
+ },
+ {
+ name: "LoweredGetCallerSP",
+ argLen: 0,
+ rematerializeable: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "LoweredGetCallerPC",
+ argLen: 0,
+ rematerializeable: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "LoweredPanicBoundsA",
+ auxType: auxInt64,
+ argLen: 3,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4}, // R2
+ {1, 8}, // R3
+ },
+ },
+ },
+ {
+ name: "LoweredPanicBoundsB",
+ auxType: auxInt64,
+ argLen: 3,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2}, // R1
+ {1, 4}, // R2
+ },
+ },
+ },
+ {
+ name: "LoweredPanicBoundsC",
+ auxType: auxInt64,
+ argLen: 3,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // R0
+ {1, 2}, // R1
+ },
+ },
+ },
+ {
+ name: "LoweredPanicExtendA",
+ auxType: auxInt64,
+ argLen: 4,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 16}, // R4
+ {1, 4}, // R2
+ {2, 8}, // R3
+ },
+ },
+ },
+ {
+ name: "LoweredPanicExtendB",
+ auxType: auxInt64,
+ argLen: 4,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 16}, // R4
+ {1, 2}, // R1
+ {2, 4}, // R2
+ },
+ },
+ },
+ {
+ name: "LoweredPanicExtendC",
+ auxType: auxInt64,
+ argLen: 4,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 16}, // R4
+ {1, 1}, // R0
+ {2, 2}, // R1
+ },
+ },
+ },
+ {
+ name: "FlagConstant",
+ auxType: auxFlagConstant,
+ argLen: 0,
+ reg: regInfo{},
+ },
+ {
+ name: "InvertFlags",
+ argLen: 1,
+ reg: regInfo{},
+ },
+ {
+ name: "LoweredWB",
+ auxType: auxSym,
+ argLen: 3,
+ clobberFlags: true,
+ symEffect: SymNone,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4}, // R2
+ {1, 8}, // R3
+ },
+ clobbers: 4294922240, // R12 R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+
+ {
+ name: "ADCSflags",
+ argLen: 3,
+ commutative: true,
+ asm: arm64.AADCS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ {1, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "ADCzerocarry",
+ argLen: 1,
+ asm: arm64.AADC,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "ADD",
+ argLen: 2,
+ commutative: true,
+ asm: arm64.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "ADDconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: arm64.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1878786047}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "ADDSconstflags",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: arm64.AADDS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "ADDSflags",
+ argLen: 2,
+ commutative: true,
+ asm: arm64.AADDS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ {1, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "SUB",
+ argLen: 2,
+ asm: arm64.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "SUBconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: arm64.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "SBCSflags",
+ argLen: 3,
+ asm: arm64.ASBCS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ {1, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "SUBSflags",
+ argLen: 2,
+ asm: arm64.ASUBS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ {1, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MUL",
+ argLen: 2,
+ commutative: true,
+ asm: arm64.AMUL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MULW",
+ argLen: 2,
+ commutative: true,
+ asm: arm64.AMULW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MNEG",
+ argLen: 2,
+ commutative: true,
+ asm: arm64.AMNEG,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MNEGW",
+ argLen: 2,
+ commutative: true,
+ asm: arm64.AMNEGW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MULH",
+ argLen: 2,
+ commutative: true,
+ asm: arm64.ASMULH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "UMULH",
+ argLen: 2,
+ commutative: true,
+ asm: arm64.AUMULH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MULL",
+ argLen: 2,
+ commutative: true,
+ asm: arm64.ASMULL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "UMULL",
+ argLen: 2,
+ commutative: true,
+ asm: arm64.AUMULL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "DIV",
+ argLen: 2,
+ asm: arm64.ASDIV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "UDIV",
+ argLen: 2,
+ asm: arm64.AUDIV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "DIVW",
+ argLen: 2,
+ asm: arm64.ASDIVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "UDIVW",
+ argLen: 2,
+ asm: arm64.AUDIVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MOD",
+ argLen: 2,
+ asm: arm64.AREM,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "UMOD",
+ argLen: 2,
+ asm: arm64.AUREM,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MODW",
+ argLen: 2,
+ asm: arm64.AREMW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "UMODW",
+ argLen: 2,
+ asm: arm64.AUREMW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "FADDS",
+ argLen: 2,
+ commutative: true,
+ asm: arm64.AFADDS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FADDD",
+ argLen: 2,
+ commutative: true,
+ asm: arm64.AFADDD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FSUBS",
+ argLen: 2,
+ asm: arm64.AFSUBS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FSUBD",
+ argLen: 2,
+ asm: arm64.AFSUBD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FMULS",
+ argLen: 2,
+ commutative: true,
+ asm: arm64.AFMULS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FMULD",
+ argLen: 2,
+ commutative: true,
+ asm: arm64.AFMULD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FNMULS",
+ argLen: 2,
+ commutative: true,
+ asm: arm64.AFNMULS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FNMULD",
+ argLen: 2,
+ commutative: true,
+ asm: arm64.AFNMULD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FDIVS",
+ argLen: 2,
+ asm: arm64.AFDIVS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FDIVD",
+ argLen: 2,
+ asm: arm64.AFDIVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "AND",
+ argLen: 2,
+ commutative: true,
+ asm: arm64.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "ANDconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: arm64.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "OR",
+ argLen: 2,
+ commutative: true,
+ asm: arm64.AORR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "ORconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: arm64.AORR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "XOR",
+ argLen: 2,
+ commutative: true,
+ asm: arm64.AEOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "XORconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: arm64.AEOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "BIC",
+ argLen: 2,
+ asm: arm64.ABIC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "EON",
+ argLen: 2,
+ asm: arm64.AEON,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "ORN",
+ argLen: 2,
+ asm: arm64.AORN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "LoweredMuluhilo",
+ argLen: 2,
+ resultNotInArgs: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ {1, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MVN",
+ argLen: 1,
+ asm: arm64.AMVN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "NEG",
+ argLen: 1,
+ asm: arm64.ANEG,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "NEGSflags",
+ argLen: 1,
+ asm: arm64.ANEGS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "NGCzerocarry",
+ argLen: 1,
+ asm: arm64.ANGC,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "FABSD",
+ argLen: 1,
+ asm: arm64.AFABSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FNEGS",
+ argLen: 1,
+ asm: arm64.AFNEGS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FNEGD",
+ argLen: 1,
+ asm: arm64.AFNEGD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FSQRTD",
+ argLen: 1,
+ asm: arm64.AFSQRTD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FSQRTS",
+ argLen: 1,
+ asm: arm64.AFSQRTS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "REV",
+ argLen: 1,
+ asm: arm64.AREV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "REVW",
+ argLen: 1,
+ asm: arm64.AREVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "REV16",
+ argLen: 1,
+ asm: arm64.AREV16,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "REV16W",
+ argLen: 1,
+ asm: arm64.AREV16W,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "RBIT",
+ argLen: 1,
+ asm: arm64.ARBIT,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "RBITW",
+ argLen: 1,
+ asm: arm64.ARBITW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "CLZ",
+ argLen: 1,
+ asm: arm64.ACLZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "CLZW",
+ argLen: 1,
+ asm: arm64.ACLZW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "VCNT",
+ argLen: 1,
+ asm: arm64.AVCNT,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "VUADDLV",
+ argLen: 1,
+ asm: arm64.AVUADDLV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "LoweredRound32F",
+ argLen: 1,
+ resultInArg0: true,
+ zeroWidth: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "LoweredRound64F",
+ argLen: 1,
+ resultInArg0: true,
+ zeroWidth: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FMADDS",
+ argLen: 3,
+ asm: arm64.AFMADDS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FMADDD",
+ argLen: 3,
+ asm: arm64.AFMADDD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FNMADDS",
+ argLen: 3,
+ asm: arm64.AFNMADDS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FNMADDD",
+ argLen: 3,
+ asm: arm64.AFNMADDD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FMSUBS",
+ argLen: 3,
+ asm: arm64.AFMSUBS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FMSUBD",
+ argLen: 3,
+ asm: arm64.AFMSUBD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FNMSUBS",
+ argLen: 3,
+ asm: arm64.AFNMSUBS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FNMSUBD",
+ argLen: 3,
+ asm: arm64.AFNMSUBD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "MADD",
+ argLen: 3,
+ asm: arm64.AMADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {2, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MADDW",
+ argLen: 3,
+ asm: arm64.AMADDW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {2, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MSUB",
+ argLen: 3,
+ asm: arm64.AMSUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {2, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MSUBW",
+ argLen: 3,
+ asm: arm64.AMSUBW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {2, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "SLL",
+ argLen: 2,
+ asm: arm64.ALSL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "SLLconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: arm64.ALSL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "SRL",
+ argLen: 2,
+ asm: arm64.ALSR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "SRLconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: arm64.ALSR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "SRA",
+ argLen: 2,
+ asm: arm64.AASR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "SRAconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: arm64.AASR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "ROR",
+ argLen: 2,
+ asm: arm64.AROR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "RORW",
+ argLen: 2,
+ asm: arm64.ARORW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "RORconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: arm64.AROR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "RORWconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: arm64.ARORW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "EXTRconst",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.AEXTR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "EXTRWconst",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.AEXTRW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "CMP",
+ argLen: 2,
+ asm: arm64.ACMP,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ },
+ },
+ {
+ name: "CMPconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: arm64.ACMP,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ },
+ },
+ {
+ name: "CMPW",
+ argLen: 2,
+ asm: arm64.ACMPW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ },
+ },
+ {
+ name: "CMPWconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm64.ACMPW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ },
+ },
+ {
+ name: "CMN",
+ argLen: 2,
+ commutative: true,
+ asm: arm64.ACMN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ },
+ },
+ {
+ name: "CMNconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: arm64.ACMN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ },
+ },
+ {
+ name: "CMNW",
+ argLen: 2,
+ commutative: true,
+ asm: arm64.ACMNW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ },
+ },
+ {
+ name: "CMNWconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm64.ACMNW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ },
+ },
+ {
+ name: "TST",
+ argLen: 2,
+ commutative: true,
+ asm: arm64.ATST,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ },
+ },
+ {
+ name: "TSTconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: arm64.ATST,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ },
+ },
+ {
+ name: "TSTW",
+ argLen: 2,
+ commutative: true,
+ asm: arm64.ATSTW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ },
+ },
+ {
+ name: "TSTWconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: arm64.ATSTW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ },
+ },
+ {
+ name: "FCMPS",
+ argLen: 2,
+ asm: arm64.AFCMPS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FCMPD",
+ argLen: 2,
+ asm: arm64.AFCMPD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FCMPS0",
+ argLen: 1,
+ asm: arm64.AFCMPS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FCMPD0",
+ argLen: 1,
+ asm: arm64.AFCMPD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "MVNshiftLL",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: arm64.AMVN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MVNshiftRL",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: arm64.AMVN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MVNshiftRA",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: arm64.AMVN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MVNshiftRO",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: arm64.AMVN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "NEGshiftLL",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: arm64.ANEG,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "NEGshiftRL",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: arm64.ANEG,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "NEGshiftRA",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: arm64.ANEG,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "ADDshiftLL",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "ADDshiftRL",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "ADDshiftRA",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "SUBshiftLL",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "SUBshiftRL",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "SUBshiftRA",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "ANDshiftLL",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "ANDshiftRL",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "ANDshiftRA",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "ANDshiftRO",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "ORshiftLL",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.AORR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "ORshiftRL",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.AORR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "ORshiftRA",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.AORR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "ORshiftRO",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.AORR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "XORshiftLL",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.AEOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "XORshiftRL",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.AEOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "XORshiftRA",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.AEOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "XORshiftRO",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.AEOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "BICshiftLL",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.ABIC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "BICshiftRL",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.ABIC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "BICshiftRA",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.ABIC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "BICshiftRO",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.ABIC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "EONshiftLL",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.AEON,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "EONshiftRL",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.AEON,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "EONshiftRA",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.AEON,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "EONshiftRO",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.AEON,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "ORNshiftLL",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.AORN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "ORNshiftRL",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.AORN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "ORNshiftRA",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.AORN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "ORNshiftRO",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.AORN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "CMPshiftLL",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.ACMP,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ },
+ },
+ {
+ name: "CMPshiftRL",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.ACMP,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ },
+ },
+ {
+ name: "CMPshiftRA",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.ACMP,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ },
+ },
+ {
+ name: "CMNshiftLL",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.ACMN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ },
+ },
+ {
+ name: "CMNshiftRL",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.ACMN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ },
+ },
+ {
+ name: "CMNshiftRA",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.ACMN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ },
+ },
+ {
+ name: "TSTshiftLL",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.ATST,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ },
+ },
+ {
+ name: "TSTshiftRL",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.ATST,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ },
+ },
+ {
+ name: "TSTshiftRA",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.ATST,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ },
+ },
+ {
+ name: "TSTshiftRO",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: arm64.ATST,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ },
+ },
+ {
+ name: "BFI",
+ auxType: auxARM64BitField,
+ argLen: 2,
+ resultInArg0: true,
+ asm: arm64.ABFI,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ {1, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "BFXIL",
+ auxType: auxARM64BitField,
+ argLen: 2,
+ resultInArg0: true,
+ asm: arm64.ABFXIL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ {1, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "SBFIZ",
+ auxType: auxARM64BitField,
+ argLen: 1,
+ asm: arm64.ASBFIZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "SBFX",
+ auxType: auxARM64BitField,
+ argLen: 1,
+ asm: arm64.ASBFX,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "UBFIZ",
+ auxType: auxARM64BitField,
+ argLen: 1,
+ asm: arm64.AUBFIZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "UBFX",
+ auxType: auxARM64BitField,
+ argLen: 1,
+ asm: arm64.AUBFX,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MOVDconst",
+ auxType: auxInt64,
+ argLen: 0,
+ rematerializeable: true,
+ asm: arm64.AMOVD,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "FMOVSconst",
+ auxType: auxFloat64,
+ argLen: 0,
+ rematerializeable: true,
+ asm: arm64.AFMOVS,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FMOVDconst",
+ auxType: auxFloat64,
+ argLen: 0,
+ rematerializeable: true,
+ asm: arm64.AFMOVD,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "MOVDaddr",
+ auxType: auxSymOff,
+ argLen: 1,
+ rematerializeable: true,
+ symEffect: SymAddr,
+ asm: arm64.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372037928517632}, // SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MOVBload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: arm64.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MOVBUload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: arm64.AMOVBU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MOVHload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: arm64.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MOVHUload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: arm64.AMOVHU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MOVWload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: arm64.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MOVWUload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: arm64.AMOVWU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MOVDload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: arm64.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "FMOVSload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: arm64.AFMOVS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FMOVDload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: arm64.AFMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "MOVDloadidx",
+ argLen: 3,
+ asm: arm64.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MOVWloadidx",
+ argLen: 3,
+ asm: arm64.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MOVWUloadidx",
+ argLen: 3,
+ asm: arm64.AMOVWU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MOVHloadidx",
+ argLen: 3,
+ asm: arm64.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MOVHUloadidx",
+ argLen: 3,
+ asm: arm64.AMOVHU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MOVBloadidx",
+ argLen: 3,
+ asm: arm64.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MOVBUloadidx",
+ argLen: 3,
+ asm: arm64.AMOVBU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "FMOVSloadidx",
+ argLen: 3,
+ asm: arm64.AFMOVS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FMOVDloadidx",
+ argLen: 3,
+ asm: arm64.AFMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "MOVHloadidx2",
+ argLen: 3,
+ asm: arm64.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MOVHUloadidx2",
+ argLen: 3,
+ asm: arm64.AMOVHU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MOVWloadidx4",
+ argLen: 3,
+ asm: arm64.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MOVWUloadidx4",
+ argLen: 3,
+ asm: arm64.AMOVWU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MOVDloadidx8",
+ argLen: 3,
+ asm: arm64.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "FMOVSloadidx4",
+ argLen: 3,
+ asm: arm64.AFMOVS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FMOVDloadidx8",
+ argLen: 3,
+ asm: arm64.AFMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "MOVBstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: arm64.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ },
+ },
+ {
+ name: "MOVHstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: arm64.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ },
+ },
+ {
+ name: "MOVWstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: arm64.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ },
+ },
+ {
+ name: "MOVDstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: arm64.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ },
+ },
+ {
+ name: "STP",
+ auxType: auxSymOff,
+ argLen: 4,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: arm64.ASTP,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {2, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ },
+ },
+ {
+ name: "FMOVSstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: arm64.AFMOVS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FMOVDstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: arm64.AFMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "MOVBstoreidx",
+ argLen: 4,
+ asm: arm64.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {2, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ },
+ },
+ {
+ name: "MOVHstoreidx",
+ argLen: 4,
+ asm: arm64.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {2, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ },
+ },
+ {
+ name: "MOVWstoreidx",
+ argLen: 4,
+ asm: arm64.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {2, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ },
+ },
+ {
+ name: "MOVDstoreidx",
+ argLen: 4,
+ asm: arm64.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {2, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ },
+ },
+ {
+ name: "FMOVSstoreidx",
+ argLen: 4,
+ asm: arm64.AFMOVS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FMOVDstoreidx",
+ argLen: 4,
+ asm: arm64.AFMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "MOVHstoreidx2",
+ argLen: 4,
+ asm: arm64.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {2, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ },
+ },
+ {
+ name: "MOVWstoreidx4",
+ argLen: 4,
+ asm: arm64.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {2, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ },
+ },
+ {
+ name: "MOVDstoreidx8",
+ argLen: 4,
+ asm: arm64.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {2, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ },
+ },
+ {
+ name: "FMOVSstoreidx4",
+ argLen: 4,
+ asm: arm64.AFMOVS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FMOVDstoreidx8",
+ argLen: 4,
+ asm: arm64.AFMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "MOVBstorezero",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: arm64.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ },
+ },
+ {
+ name: "MOVHstorezero",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: arm64.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ },
+ },
+ {
+ name: "MOVWstorezero",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: arm64.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ },
+ },
+ {
+ name: "MOVDstorezero",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: arm64.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ },
+ },
+ {
+ name: "MOVQstorezero",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: arm64.ASTP,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ },
+ },
+ {
+ name: "MOVBstorezeroidx",
+ argLen: 3,
+ asm: arm64.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ },
+ },
+ {
+ name: "MOVHstorezeroidx",
+ argLen: 3,
+ asm: arm64.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ },
+ },
+ {
+ name: "MOVWstorezeroidx",
+ argLen: 3,
+ asm: arm64.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ },
+ },
+ {
+ name: "MOVDstorezeroidx",
+ argLen: 3,
+ asm: arm64.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ },
+ },
+ {
+ name: "MOVHstorezeroidx2",
+ argLen: 3,
+ asm: arm64.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ },
+ },
+ {
+ name: "MOVWstorezeroidx4",
+ argLen: 3,
+ asm: arm64.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ },
+ },
+ {
+ name: "MOVDstorezeroidx8",
+ argLen: 3,
+ asm: arm64.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ },
+ },
+ {
+ name: "FMOVDgpfp",
+ argLen: 1,
+ asm: arm64.AFMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FMOVDfpgp",
+ argLen: 1,
+ asm: arm64.AFMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "FMOVSgpfp",
+ argLen: 1,
+ asm: arm64.AFMOVS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FMOVSfpgp",
+ argLen: 1,
+ asm: arm64.AFMOVS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MOVBreg",
+ argLen: 1,
+ asm: arm64.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MOVBUreg",
+ argLen: 1,
+ asm: arm64.AMOVBU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MOVHreg",
+ argLen: 1,
+ asm: arm64.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MOVHUreg",
+ argLen: 1,
+ asm: arm64.AMOVHU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MOVWreg",
+ argLen: 1,
+ asm: arm64.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MOVWUreg",
+ argLen: 1,
+ asm: arm64.AMOVWU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MOVDreg",
+ argLen: 1,
+ asm: arm64.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "MOVDnop",
+ argLen: 1,
+ resultInArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "SCVTFWS",
+ argLen: 1,
+ asm: arm64.ASCVTFWS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "SCVTFWD",
+ argLen: 1,
+ asm: arm64.ASCVTFWD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "UCVTFWS",
+ argLen: 1,
+ asm: arm64.AUCVTFWS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "UCVTFWD",
+ argLen: 1,
+ asm: arm64.AUCVTFWD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "SCVTFS",
+ argLen: 1,
+ asm: arm64.ASCVTFS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "SCVTFD",
+ argLen: 1,
+ asm: arm64.ASCVTFD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "UCVTFS",
+ argLen: 1,
+ asm: arm64.AUCVTFS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "UCVTFD",
+ argLen: 1,
+ asm: arm64.AUCVTFD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FCVTZSSW",
+ argLen: 1,
+ asm: arm64.AFCVTZSSW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "FCVTZSDW",
+ argLen: 1,
+ asm: arm64.AFCVTZSDW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "FCVTZUSW",
+ argLen: 1,
+ asm: arm64.AFCVTZUSW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "FCVTZUDW",
+ argLen: 1,
+ asm: arm64.AFCVTZUDW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "FCVTZSS",
+ argLen: 1,
+ asm: arm64.AFCVTZSS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "FCVTZSD",
+ argLen: 1,
+ asm: arm64.AFCVTZSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "FCVTZUS",
+ argLen: 1,
+ asm: arm64.AFCVTZUS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "FCVTZUD",
+ argLen: 1,
+ asm: arm64.AFCVTZUD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "FCVTSD",
+ argLen: 1,
+ asm: arm64.AFCVTSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FCVTDS",
+ argLen: 1,
+ asm: arm64.AFCVTDS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FRINTAD",
+ argLen: 1,
+ asm: arm64.AFRINTAD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FRINTMD",
+ argLen: 1,
+ asm: arm64.AFRINTMD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FRINTND",
+ argLen: 1,
+ asm: arm64.AFRINTND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FRINTPD",
+ argLen: 1,
+ asm: arm64.AFRINTPD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FRINTZD",
+ argLen: 1,
+ asm: arm64.AFRINTZD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "CSEL",
+ auxType: auxCCop,
+ argLen: 3,
+ asm: arm64.ACSEL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ {1, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "CSEL0",
+ auxType: auxCCop,
+ argLen: 2,
+ asm: arm64.ACSEL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "CSINC",
+ auxType: auxCCop,
+ argLen: 3,
+ asm: arm64.ACSINC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ {1, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "CSINV",
+ auxType: auxCCop,
+ argLen: 3,
+ asm: arm64.ACSINV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ {1, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "CSNEG",
+ auxType: auxCCop,
+ argLen: 3,
+ asm: arm64.ACSNEG,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ {1, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "CSETM",
+ auxType: auxCCop,
+ argLen: 1,
+ asm: arm64.ACSETM,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "CALLstatic",
+ auxType: auxCallOff,
+ argLen: -1,
+ clobberFlags: true,
+ call: true,
+ reg: regInfo{
+ clobbers: 9223372035512336383, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ {
+ name: "CALLtail",
+ auxType: auxCallOff,
+ argLen: -1,
+ clobberFlags: true,
+ call: true,
+ tailCall: true,
+ reg: regInfo{
+ clobbers: 9223372035512336383, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ {
+ name: "CALLclosure",
+ auxType: auxCallOff,
+ argLen: -1,
+ clobberFlags: true,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 67108864}, // R26
+ {0, 1744568319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30 SP
+ },
+ clobbers: 9223372035512336383, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ {
+ name: "CALLinter",
+ auxType: auxCallOff,
+ argLen: -1,
+ clobberFlags: true,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ clobbers: 9223372035512336383, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ {
+ name: "LoweredNilCheck",
+ argLen: 2,
+ nilCheck: true,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ },
+ },
+ },
+ {
+ name: "Equal",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "NotEqual",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "LessThan",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "LessEqual",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "GreaterThan",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "GreaterEqual",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "LessThanU",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "LessEqualU",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "GreaterThanU",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "GreaterEqualU",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "LessThanF",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "LessEqualF",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "GreaterThanF",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "GreaterEqualF",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "NotLessThanF",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "NotLessEqualF",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "NotGreaterThanF",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "NotGreaterEqualF",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "DUFFZERO",
+ auxType: auxInt64,
+ argLen: 2,
+ faultOnNilArg0: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1048576}, // R20
+ },
+ clobbers: 538116096, // R16 R17 R20 R30
+ },
+ },
+ {
+ name: "LoweredZero",
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65536}, // R16
+ {1, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ clobbers: 65536, // R16
+ },
+ },
+ {
+ name: "DUFFCOPY",
+ auxType: auxInt64,
+ argLen: 3,
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2097152}, // R21
+ {1, 1048576}, // R20
+ },
+ clobbers: 607322112, // R16 R17 R20 R21 R26 R30
+ },
+ },
+ {
+ name: "LoweredMove",
+ argLen: 4,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 131072}, // R17
+ {1, 65536}, // R16
+ {2, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ clobbers: 196608, // R16 R17
+ },
+ },
+ {
+ name: "LoweredGetClosurePtr",
+ argLen: 0,
+ zeroWidth: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 67108864}, // R26
+ },
+ },
+ },
+ {
+ name: "LoweredGetCallerSP",
+ argLen: 0,
+ rematerializeable: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "LoweredGetCallerPC",
+ argLen: 0,
+ rematerializeable: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "FlagConstant",
+ auxType: auxFlagConstant,
+ argLen: 0,
+ reg: regInfo{},
+ },
+ {
+ name: "InvertFlags",
+ argLen: 1,
+ reg: regInfo{},
+ },
+ {
+ name: "LDAR",
+ argLen: 2,
+ faultOnNilArg0: true,
+ asm: arm64.ALDAR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "LDARB",
+ argLen: 2,
+ faultOnNilArg0: true,
+ asm: arm64.ALDARB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "LDARW",
+ argLen: 2,
+ faultOnNilArg0: true,
+ asm: arm64.ALDARW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "STLRB",
+ argLen: 3,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ asm: arm64.ASTLRB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ },
+ },
+ {
+ name: "STLR",
+ argLen: 3,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ asm: arm64.ASTLR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ },
+ },
+ {
+ name: "STLRW",
+ argLen: 3,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ asm: arm64.ASTLRW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicExchange64",
+ argLen: 3,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicExchange32",
+ argLen: 3,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicExchange64Variant",
+ argLen: 3,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicExchange32Variant",
+ argLen: 3,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicAdd64",
+ argLen: 3,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicAdd32",
+ argLen: 3,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicAdd64Variant",
+ argLen: 3,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicAdd32Variant",
+ argLen: 3,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicCas64",
+ argLen: 4,
+ resultNotInArgs: true,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {2, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicCas32",
+ argLen: 4,
+ resultNotInArgs: true,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {2, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicCas64Variant",
+ argLen: 4,
+ resultNotInArgs: true,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {2, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicCas32Variant",
+ argLen: 4,
+ resultNotInArgs: true,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {2, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicAnd8",
+ argLen: 3,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ asm: arm64.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicAnd32",
+ argLen: 3,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ asm: arm64.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicOr8",
+ argLen: 3,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ asm: arm64.AORR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicOr32",
+ argLen: 3,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ asm: arm64.AORR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicAnd8Variant",
+ argLen: 3,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicAnd32Variant",
+ argLen: 3,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicOr8Variant",
+ argLen: 3,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicOr32Variant",
+ argLen: 3,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 805044223}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 670826495}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 R30
+ },
+ },
+ },
+ {
+ name: "LoweredWB",
+ auxType: auxSym,
+ argLen: 3,
+ clobberFlags: true,
+ symEffect: SymNone,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4}, // R2
+ {1, 8}, // R3
+ },
+ clobbers: 9223372035244359680, // R16 R17 R30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ {
+ name: "LoweredPanicBoundsA",
+ auxType: auxInt64,
+ argLen: 3,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4}, // R2
+ {1, 8}, // R3
+ },
+ },
+ },
+ {
+ name: "LoweredPanicBoundsB",
+ auxType: auxInt64,
+ argLen: 3,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2}, // R1
+ {1, 4}, // R2
+ },
+ },
+ },
+ {
+ name: "LoweredPanicBoundsC",
+ auxType: auxInt64,
+ argLen: 3,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // R0
+ {1, 2}, // R1
+ },
+ },
+ },
+ {
+ name: "PRFM",
+ auxType: auxInt64,
+ argLen: 2,
+ hasSideEffects: true,
+ asm: arm64.APRFM,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372038733561855}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R19 R20 R21 R22 R23 R24 R25 R26 g R30 SP SB
+ },
+ },
+ },
+ {
+ name: "DMB",
+ auxType: auxInt64,
+ argLen: 1,
+ hasSideEffects: true,
+ asm: arm64.ADMB,
+ reg: regInfo{},
+ },
+
+ {
+ name: "ADD",
+ argLen: 2,
+ commutative: true,
+ asm: mips.AADDU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "ADDconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: mips.AADDU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 536870910}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "SUB",
+ argLen: 2,
+ asm: mips.ASUBU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "SUBconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: mips.ASUBU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "MUL",
+ argLen: 2,
+ commutative: true,
+ asm: mips.AMUL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ clobbers: 105553116266496, // HI LO
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "MULT",
+ argLen: 2,
+ commutative: true,
+ asm: mips.AMUL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 35184372088832}, // HI
+ {1, 70368744177664}, // LO
+ },
+ },
+ },
+ {
+ name: "MULTU",
+ argLen: 2,
+ commutative: true,
+ asm: mips.AMULU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 35184372088832}, // HI
+ {1, 70368744177664}, // LO
+ },
+ },
+ },
+ {
+ name: "DIV",
+ argLen: 2,
+ asm: mips.ADIV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 35184372088832}, // HI
+ {1, 70368744177664}, // LO
+ },
+ },
+ },
+ {
+ name: "DIVU",
+ argLen: 2,
+ asm: mips.ADIVU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 35184372088832}, // HI
+ {1, 70368744177664}, // LO
+ },
+ },
+ },
+ {
+ name: "ADDF",
+ argLen: 2,
+ commutative: true,
+ asm: mips.AADDF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ outputs: []outputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ },
+ },
+ {
+ name: "ADDD",
+ argLen: 2,
+ commutative: true,
+ asm: mips.AADDD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ outputs: []outputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ },
+ },
+ {
+ name: "SUBF",
+ argLen: 2,
+ asm: mips.ASUBF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ outputs: []outputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ },
+ },
+ {
+ name: "SUBD",
+ argLen: 2,
+ asm: mips.ASUBD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ outputs: []outputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ },
+ },
+ {
+ name: "MULF",
+ argLen: 2,
+ commutative: true,
+ asm: mips.AMULF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ outputs: []outputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ },
+ },
+ {
+ name: "MULD",
+ argLen: 2,
+ commutative: true,
+ asm: mips.AMULD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ outputs: []outputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ },
+ },
+ {
+ name: "DIVF",
+ argLen: 2,
+ asm: mips.ADIVF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ outputs: []outputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ },
+ },
+ {
+ name: "DIVD",
+ argLen: 2,
+ asm: mips.ADIVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ outputs: []outputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ },
+ },
+ {
+ name: "AND",
+ argLen: 2,
+ commutative: true,
+ asm: mips.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "ANDconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: mips.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "OR",
+ argLen: 2,
+ commutative: true,
+ asm: mips.AOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "ORconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: mips.AOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "XOR",
+ argLen: 2,
+ commutative: true,
+ asm: mips.AXOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "XORconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: mips.AXOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "NOR",
+ argLen: 2,
+ commutative: true,
+ asm: mips.ANOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "NORconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: mips.ANOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "NEG",
+ argLen: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "NEGF",
+ argLen: 1,
+ asm: mips.ANEGF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ outputs: []outputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ },
+ },
+ {
+ name: "NEGD",
+ argLen: 1,
+ asm: mips.ANEGD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ outputs: []outputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ },
+ },
+ {
+ name: "SQRTD",
+ argLen: 1,
+ asm: mips.ASQRTD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ outputs: []outputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ },
+ },
+ {
+ name: "SQRTF",
+ argLen: 1,
+ asm: mips.ASQRTF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ outputs: []outputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ },
+ },
+ {
+ name: "SLL",
+ argLen: 2,
+ asm: mips.ASLL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "SLLconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: mips.ASLL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "SRL",
+ argLen: 2,
+ asm: mips.ASRL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "SRLconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: mips.ASRL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "SRA",
+ argLen: 2,
+ asm: mips.ASRA,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "SRAconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: mips.ASRA,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "CLZ",
+ argLen: 1,
+ asm: mips.ACLZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "SGT",
+ argLen: 2,
+ asm: mips.ASGT,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "SGTconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: mips.ASGT,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "SGTzero",
+ argLen: 1,
+ asm: mips.ASGT,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "SGTU",
+ argLen: 2,
+ asm: mips.ASGTU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "SGTUconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: mips.ASGTU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "SGTUzero",
+ argLen: 1,
+ asm: mips.ASGTU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "CMPEQF",
+ argLen: 2,
+ asm: mips.ACMPEQF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ },
+ },
+ {
+ name: "CMPEQD",
+ argLen: 2,
+ asm: mips.ACMPEQD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ },
+ },
+ {
+ name: "CMPGEF",
+ argLen: 2,
+ asm: mips.ACMPGEF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ },
+ },
+ {
+ name: "CMPGED",
+ argLen: 2,
+ asm: mips.ACMPGED,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ },
+ },
+ {
+ name: "CMPGTF",
+ argLen: 2,
+ asm: mips.ACMPGTF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ },
+ },
+ {
+ name: "CMPGTD",
+ argLen: 2,
+ asm: mips.ACMPGTD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ },
+ },
+ {
+ name: "MOVWconst",
+ auxType: auxInt32,
+ argLen: 0,
+ rematerializeable: true,
+ asm: mips.AMOVW,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "MOVFconst",
+ auxType: auxFloat32,
+ argLen: 0,
+ rematerializeable: true,
+ asm: mips.AMOVF,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ },
+ },
+ {
+ name: "MOVDconst",
+ auxType: auxFloat64,
+ argLen: 0,
+ rematerializeable: true,
+ asm: mips.AMOVD,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ },
+ },
+ {
+ name: "MOVWaddr",
+ auxType: auxSymOff,
+ argLen: 1,
+ rematerializeable: true,
+ symEffect: SymAddr,
+ asm: mips.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 140737555464192}, // SP SB
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "MOVBload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: mips.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "MOVBUload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: mips.AMOVBU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "MOVHload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: mips.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "MOVHUload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: mips.AMOVHU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "MOVWload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: mips.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "MOVFload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: mips.AMOVF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ },
+ },
+ {
+ name: "MOVDload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: mips.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ },
+ },
+ {
+ name: "MOVBstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: mips.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB
+ },
+ },
+ },
+ {
+ name: "MOVHstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: mips.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB
+ },
+ },
+ },
+ {
+ name: "MOVWstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: mips.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB
+ },
+ },
+ },
+ {
+ name: "MOVFstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: mips.AMOVF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB
+ },
+ },
+ },
+ {
+ name: "MOVDstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: mips.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB
+ },
+ },
+ },
+ {
+ name: "MOVBstorezero",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: mips.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB
+ },
+ },
+ },
+ {
+ name: "MOVHstorezero",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: mips.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB
+ },
+ },
+ },
+ {
+ name: "MOVWstorezero",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: mips.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB
+ },
+ },
+ },
+ {
+ name: "MOVBreg",
+ argLen: 1,
+ asm: mips.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "MOVBUreg",
+ argLen: 1,
+ asm: mips.AMOVBU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "MOVHreg",
+ argLen: 1,
+ asm: mips.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "MOVHUreg",
+ argLen: 1,
+ asm: mips.AMOVHU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "MOVWreg",
+ argLen: 1,
+ asm: mips.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "MOVWnop",
+ argLen: 1,
+ resultInArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "CMOVZ",
+ argLen: 3,
+ resultInArg0: true,
+ asm: mips.ACMOVZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ {1, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ {2, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "CMOVZzero",
+ argLen: 2,
+ resultInArg0: true,
+ asm: mips.ACMOVZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "MOVWF",
+ argLen: 1,
+ asm: mips.AMOVWF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ outputs: []outputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ },
+ },
+ {
+ name: "MOVWD",
+ argLen: 1,
+ asm: mips.AMOVWD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ outputs: []outputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ },
+ },
+ {
+ name: "TRUNCFW",
+ argLen: 1,
+ asm: mips.ATRUNCFW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ outputs: []outputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ },
+ },
+ {
+ name: "TRUNCDW",
+ argLen: 1,
+ asm: mips.ATRUNCDW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ outputs: []outputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ },
+ },
+ {
+ name: "MOVFD",
+ argLen: 1,
+ asm: mips.AMOVFD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ outputs: []outputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ },
+ },
+ {
+ name: "MOVDF",
+ argLen: 1,
+ asm: mips.AMOVDF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ outputs: []outputInfo{
+ {0, 35183835217920}, // F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30
+ },
+ },
+ },
+ {
+ name: "CALLstatic",
+ auxType: auxCallOff,
+ argLen: 1,
+ clobberFlags: true,
+ call: true,
+ reg: regInfo{
+ clobbers: 140737421246462, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 HI LO
+ },
+ },
+ {
+ name: "CALLtail",
+ auxType: auxCallOff,
+ argLen: 1,
+ clobberFlags: true,
+ call: true,
+ tailCall: true,
+ reg: regInfo{
+ clobbers: 140737421246462, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 HI LO
+ },
+ },
+ {
+ name: "CALLclosure",
+ auxType: auxCallOff,
+ argLen: 3,
+ clobberFlags: true,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 4194304}, // R22
+ {0, 402653182}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP R31
+ },
+ clobbers: 140737421246462, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 HI LO
+ },
+ },
+ {
+ name: "CALLinter",
+ auxType: auxCallOff,
+ argLen: 2,
+ clobberFlags: true,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ clobbers: 140737421246462, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31 F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 HI LO
+ },
+ },
+ {
+ name: "LoweredAtomicLoad8",
+ argLen: 2,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicLoad32",
+ argLen: 2,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicStore8",
+ argLen: 3,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicStore32",
+ argLen: 3,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicStorezero",
+ argLen: 2,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicExchange",
+ argLen: 3,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicAdd",
+ argLen: 3,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicAddconst",
+ auxType: auxInt32,
+ argLen: 2,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicCas",
+ argLen: 4,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ {2, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicAnd",
+ argLen: 3,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ asm: mips.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicOr",
+ argLen: 3,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ asm: mips.AOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ {0, 140738025226238}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 SP g R31 SB
+ },
+ },
+ },
+ {
+ name: "LoweredZero",
+ auxType: auxInt32,
+ argLen: 3,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2}, // R1
+ {1, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ clobbers: 2, // R1
+ },
+ },
+ {
+ name: "LoweredMove",
+ auxType: auxInt32,
+ argLen: 4,
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4}, // R2
+ {1, 2}, // R1
+ {2, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ clobbers: 6, // R1 R2
+ },
+ },
+ {
+ name: "LoweredNilCheck",
+ argLen: 2,
+ nilCheck: true,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 469762046}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 g R31
+ },
+ },
+ },
+ {
+ name: "FPFlagTrue",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "FPFlagFalse",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "LoweredGetClosurePtr",
+ argLen: 0,
+ zeroWidth: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 4194304}, // R22
+ },
+ },
+ },
+ {
+ name: "LoweredGetCallerSP",
+ argLen: 0,
+ rematerializeable: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "LoweredGetCallerPC",
+ argLen: 0,
+ rematerializeable: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 335544318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R28 R31
+ },
+ },
+ },
+ {
+ name: "LoweredWB",
+ auxType: auxSym,
+ argLen: 3,
+ clobberFlags: true,
+ symEffect: SymNone,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1048576}, // R20
+ {1, 2097152}, // R21
+ },
+ clobbers: 140737219919872, // R31 F0 F2 F4 F6 F8 F10 F12 F14 F16 F18 F20 F22 F24 F26 F28 F30 HI LO
+ },
+ },
+ {
+ name: "LoweredPanicBoundsA",
+ auxType: auxInt64,
+ argLen: 3,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 8}, // R3
+ {1, 16}, // R4
+ },
+ },
+ },
+ {
+ name: "LoweredPanicBoundsB",
+ auxType: auxInt64,
+ argLen: 3,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4}, // R2
+ {1, 8}, // R3
+ },
+ },
+ },
+ {
+ name: "LoweredPanicBoundsC",
+ auxType: auxInt64,
+ argLen: 3,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2}, // R1
+ {1, 4}, // R2
+ },
+ },
+ },
+ {
+ name: "LoweredPanicExtendA",
+ auxType: auxInt64,
+ argLen: 4,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 32}, // R5
+ {1, 8}, // R3
+ {2, 16}, // R4
+ },
+ },
+ },
+ {
+ name: "LoweredPanicExtendB",
+ auxType: auxInt64,
+ argLen: 4,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 32}, // R5
+ {1, 4}, // R2
+ {2, 8}, // R3
+ },
+ },
+ },
+ {
+ name: "LoweredPanicExtendC",
+ auxType: auxInt64,
+ argLen: 4,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 32}, // R5
+ {1, 2}, // R1
+ {2, 4}, // R2
+ },
+ },
+ },
+
+ {
+ name: "ADDV",
+ argLen: 2,
+ commutative: true,
+ asm: mips.AADDVU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "ADDVconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: mips.AADDVU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 268435454}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "SUBV",
+ argLen: 2,
+ asm: mips.ASUBVU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "SUBVconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: mips.ASUBVU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "MULV",
+ argLen: 2,
+ commutative: true,
+ asm: mips.AMULV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ },
+ outputs: []outputInfo{
+ {0, 1152921504606846976}, // HI
+ {1, 2305843009213693952}, // LO
+ },
+ },
+ },
+ {
+ name: "MULVU",
+ argLen: 2,
+ commutative: true,
+ asm: mips.AMULVU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ },
+ outputs: []outputInfo{
+ {0, 1152921504606846976}, // HI
+ {1, 2305843009213693952}, // LO
+ },
+ },
+ },
+ {
+ name: "DIVV",
+ argLen: 2,
+ asm: mips.ADIVV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ },
+ outputs: []outputInfo{
+ {0, 1152921504606846976}, // HI
+ {1, 2305843009213693952}, // LO
+ },
+ },
+ },
+ {
+ name: "DIVVU",
+ argLen: 2,
+ asm: mips.ADIVVU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ },
+ outputs: []outputInfo{
+ {0, 1152921504606846976}, // HI
+ {1, 2305843009213693952}, // LO
+ },
+ },
+ },
+ {
+ name: "ADDF",
+ argLen: 2,
+ commutative: true,
+ asm: mips.AADDF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "ADDD",
+ argLen: 2,
+ commutative: true,
+ asm: mips.AADDD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "SUBF",
+ argLen: 2,
+ asm: mips.ASUBF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "SUBD",
+ argLen: 2,
+ asm: mips.ASUBD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "MULF",
+ argLen: 2,
+ commutative: true,
+ asm: mips.AMULF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "MULD",
+ argLen: 2,
+ commutative: true,
+ asm: mips.AMULD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "DIVF",
+ argLen: 2,
+ asm: mips.ADIVF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "DIVD",
+ argLen: 2,
+ asm: mips.ADIVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "AND",
+ argLen: 2,
+ commutative: true,
+ asm: mips.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "ANDconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: mips.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "OR",
+ argLen: 2,
+ commutative: true,
+ asm: mips.AOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "ORconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: mips.AOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "XOR",
+ argLen: 2,
+ commutative: true,
+ asm: mips.AXOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "XORconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: mips.AXOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "NOR",
+ argLen: 2,
+ commutative: true,
+ asm: mips.ANOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "NORconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: mips.ANOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "NEGV",
+ argLen: 1,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "NEGF",
+ argLen: 1,
+ asm: mips.ANEGF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "NEGD",
+ argLen: 1,
+ asm: mips.ANEGD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "SQRTD",
+ argLen: 1,
+ asm: mips.ASQRTD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "SQRTF",
+ argLen: 1,
+ asm: mips.ASQRTF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "SLLV",
+ argLen: 2,
+ asm: mips.ASLLV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "SLLVconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: mips.ASLLV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "SRLV",
+ argLen: 2,
+ asm: mips.ASRLV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "SRLVconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: mips.ASRLV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "SRAV",
+ argLen: 2,
+ asm: mips.ASRAV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "SRAVconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: mips.ASRAV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "SGT",
+ argLen: 2,
+ asm: mips.ASGT,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "SGTconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: mips.ASGT,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "SGTU",
+ argLen: 2,
+ asm: mips.ASGTU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "SGTUconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: mips.ASGTU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "CMPEQF",
+ argLen: 2,
+ asm: mips.ACMPEQF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "CMPEQD",
+ argLen: 2,
+ asm: mips.ACMPEQD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "CMPGEF",
+ argLen: 2,
+ asm: mips.ACMPGEF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "CMPGED",
+ argLen: 2,
+ asm: mips.ACMPGED,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "CMPGTF",
+ argLen: 2,
+ asm: mips.ACMPGTF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "CMPGTD",
+ argLen: 2,
+ asm: mips.ACMPGTD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "MOVVconst",
+ auxType: auxInt64,
+ argLen: 0,
+ rematerializeable: true,
+ asm: mips.AMOVV,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "MOVFconst",
+ auxType: auxFloat64,
+ argLen: 0,
+ rematerializeable: true,
+ asm: mips.AMOVF,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "MOVDconst",
+ auxType: auxFloat64,
+ argLen: 0,
+ rematerializeable: true,
+ asm: mips.AMOVD,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "MOVVaddr",
+ auxType: auxSymOff,
+ argLen: 1,
+ rematerializeable: true,
+ symEffect: SymAddr,
+ asm: mips.AMOVV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686018460942336}, // SP SB
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "MOVBload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: mips.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "MOVBUload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: mips.AMOVBU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "MOVHload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: mips.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "MOVHUload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: mips.AMOVHU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "MOVWload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: mips.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "MOVWUload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: mips.AMOVWU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "MOVVload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: mips.AMOVV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "MOVFload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: mips.AMOVF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "MOVDload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: mips.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "MOVBstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: mips.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ },
+ },
+ },
+ {
+ name: "MOVHstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: mips.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ },
+ },
+ },
+ {
+ name: "MOVWstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: mips.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ },
+ },
+ },
+ {
+ name: "MOVVstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: mips.AMOVV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ },
+ },
+ },
+ {
+ name: "MOVFstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: mips.AMOVF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "MOVDstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: mips.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ {1, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "MOVBstorezero",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: mips.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ },
+ },
+ },
+ {
+ name: "MOVHstorezero",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: mips.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ },
+ },
+ },
+ {
+ name: "MOVWstorezero",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: mips.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ },
+ },
+ },
+ {
+ name: "MOVVstorezero",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: mips.AMOVV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ },
+ },
+ },
+ {
+ name: "MOVBreg",
+ argLen: 1,
+ asm: mips.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "MOVBUreg",
+ argLen: 1,
+ asm: mips.AMOVBU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "MOVHreg",
+ argLen: 1,
+ asm: mips.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "MOVHUreg",
+ argLen: 1,
+ asm: mips.AMOVHU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "MOVWreg",
+ argLen: 1,
+ asm: mips.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "MOVWUreg",
+ argLen: 1,
+ asm: mips.AMOVWU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "MOVVreg",
+ argLen: 1,
+ asm: mips.AMOVV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "MOVVnop",
+ argLen: 1,
+ resultInArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "MOVWF",
+ argLen: 1,
+ asm: mips.AMOVWF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "MOVWD",
+ argLen: 1,
+ asm: mips.AMOVWD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "MOVVF",
+ argLen: 1,
+ asm: mips.AMOVVF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "MOVVD",
+ argLen: 1,
+ asm: mips.AMOVVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "TRUNCFW",
+ argLen: 1,
+ asm: mips.ATRUNCFW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "TRUNCDW",
+ argLen: 1,
+ asm: mips.ATRUNCDW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "TRUNCFV",
+ argLen: 1,
+ asm: mips.ATRUNCFV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "TRUNCDV",
+ argLen: 1,
+ asm: mips.ATRUNCDV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "MOVFD",
+ argLen: 1,
+ asm: mips.AMOVFD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "MOVDF",
+ argLen: 1,
+ asm: mips.AMOVDF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1152921504338411520}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "CALLstatic",
+ auxType: auxCallOff,
+ argLen: 1,
+ clobberFlags: true,
+ call: true,
+ reg: regInfo{
+ clobbers: 4611686018393833470, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 HI LO
+ },
+ },
+ {
+ name: "CALLtail",
+ auxType: auxCallOff,
+ argLen: 1,
+ clobberFlags: true,
+ call: true,
+ tailCall: true,
+ reg: regInfo{
+ clobbers: 4611686018393833470, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 HI LO
+ },
+ },
+ {
+ name: "CALLclosure",
+ auxType: auxCallOff,
+ argLen: 3,
+ clobberFlags: true,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 4194304}, // R22
+ {0, 201326590}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP R31
+ },
+ clobbers: 4611686018393833470, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 HI LO
+ },
+ },
+ {
+ name: "CALLinter",
+ auxType: auxCallOff,
+ argLen: 2,
+ clobberFlags: true,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ clobbers: 4611686018393833470, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 HI LO
+ },
+ },
+ {
+ name: "DUFFZERO",
+ auxType: auxInt64,
+ argLen: 2,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ clobbers: 134217730, // R1 R31
+ },
+ },
+ {
+ name: "DUFFCOPY",
+ auxType: auxInt64,
+ argLen: 3,
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4}, // R2
+ {1, 2}, // R1
+ },
+ clobbers: 134217734, // R1 R2 R31
+ },
+ },
+ {
+ name: "LoweredZero",
+ auxType: auxInt64,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2}, // R1
+ {1, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ clobbers: 2, // R1
+ },
+ },
+ {
+ name: "LoweredMove",
+ auxType: auxInt64,
+ argLen: 4,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4}, // R2
+ {1, 2}, // R1
+ {2, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ clobbers: 6, // R1 R2
+ },
+ },
+ {
+ name: "LoweredAtomicLoad8",
+ argLen: 2,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicLoad32",
+ argLen: 2,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicLoad64",
+ argLen: 2,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicStore8",
+ argLen: 3,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicStore32",
+ argLen: 3,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicStore64",
+ argLen: 3,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicStorezero32",
+ argLen: 2,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicStorezero64",
+ argLen: 2,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicExchange32",
+ argLen: 3,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicExchange64",
+ argLen: 3,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicAdd32",
+ argLen: 3,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicAdd64",
+ argLen: 3,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicAddconst32",
+ auxType: auxInt32,
+ argLen: 2,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicAddconst64",
+ auxType: auxInt64,
+ argLen: 2,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicCas32",
+ argLen: 4,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ {2, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicCas64",
+ argLen: 4,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ {2, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ {0, 4611686018695823358}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 SP g R31 SB
+ },
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "LoweredNilCheck",
+ argLen: 2,
+ nilCheck: true,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 234881022}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 g R31
+ },
+ },
+ },
+ {
+ name: "FPFlagTrue",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "FPFlagFalse",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "LoweredGetClosurePtr",
+ argLen: 0,
+ zeroWidth: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 4194304}, // R22
+ },
+ },
+ },
+ {
+ name: "LoweredGetCallerSP",
+ argLen: 0,
+ rematerializeable: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "LoweredGetCallerPC",
+ argLen: 0,
+ rematerializeable: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 167772158}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 R16 R17 R18 R19 R20 R21 R22 R24 R25 R31
+ },
+ },
+ },
+ {
+ name: "LoweredWB",
+ auxType: auxSym,
+ argLen: 3,
+ clobberFlags: true,
+ symEffect: SymNone,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1048576}, // R20
+ {1, 2097152}, // R21
+ },
+ clobbers: 4611686018293170176, // R31 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 HI LO
+ },
+ },
+ {
+ name: "LoweredPanicBoundsA",
+ auxType: auxInt64,
+ argLen: 3,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 8}, // R3
+ {1, 16}, // R4
+ },
+ },
+ },
+ {
+ name: "LoweredPanicBoundsB",
+ auxType: auxInt64,
+ argLen: 3,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4}, // R2
+ {1, 8}, // R3
+ },
+ },
+ },
+ {
+ name: "LoweredPanicBoundsC",
+ auxType: auxInt64,
+ argLen: 3,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2}, // R1
+ {1, 4}, // R2
+ },
+ },
+ },
+
+ {
+ name: "ADD",
+ argLen: 2,
+ commutative: true,
+ asm: ppc64.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "ADDconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: ppc64.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "FADD",
+ argLen: 2,
+ commutative: true,
+ asm: ppc64.AFADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ {1, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ outputs: []outputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ },
+ {
+ name: "FADDS",
+ argLen: 2,
+ commutative: true,
+ asm: ppc64.AFADDS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ {1, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ outputs: []outputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ },
+ {
+ name: "SUB",
+ argLen: 2,
+ asm: ppc64.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "SUBFCconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: ppc64.ASUBC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "FSUB",
+ argLen: 2,
+ asm: ppc64.AFSUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ {1, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ outputs: []outputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ },
+ {
+ name: "FSUBS",
+ argLen: 2,
+ asm: ppc64.AFSUBS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ {1, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ outputs: []outputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ },
+ {
+ name: "MULLD",
+ argLen: 2,
+ commutative: true,
+ asm: ppc64.AMULLD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MULLW",
+ argLen: 2,
+ commutative: true,
+ asm: ppc64.AMULLW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MULLDconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: ppc64.AMULLD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MULLWconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: ppc64.AMULLW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MADDLD",
+ argLen: 3,
+ asm: ppc64.AMADDLD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MULHD",
+ argLen: 2,
+ commutative: true,
+ asm: ppc64.AMULHD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MULHW",
+ argLen: 2,
+ commutative: true,
+ asm: ppc64.AMULHW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MULHDU",
+ argLen: 2,
+ commutative: true,
+ asm: ppc64.AMULHDU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MULHWU",
+ argLen: 2,
+ commutative: true,
+ asm: ppc64.AMULHWU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "LoweredMuluhilo",
+ argLen: 2,
+ resultNotInArgs: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "FMUL",
+ argLen: 2,
+ commutative: true,
+ asm: ppc64.AFMUL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ {1, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ outputs: []outputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ },
+ {
+ name: "FMULS",
+ argLen: 2,
+ commutative: true,
+ asm: ppc64.AFMULS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ {1, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ outputs: []outputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ },
+ {
+ name: "FMADD",
+ argLen: 3,
+ asm: ppc64.AFMADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ {1, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ {2, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ outputs: []outputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ },
+ {
+ name: "FMADDS",
+ argLen: 3,
+ asm: ppc64.AFMADDS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ {1, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ {2, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ outputs: []outputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ },
+ {
+ name: "FMSUB",
+ argLen: 3,
+ asm: ppc64.AFMSUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ {1, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ {2, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ outputs: []outputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ },
+ {
+ name: "FMSUBS",
+ argLen: 3,
+ asm: ppc64.AFMSUBS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ {1, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ {2, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ outputs: []outputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ },
+ {
+ name: "SRAD",
+ argLen: 2,
+ asm: ppc64.ASRAD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "SRAW",
+ argLen: 2,
+ asm: ppc64.ASRAW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "SRD",
+ argLen: 2,
+ asm: ppc64.ASRD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "SRW",
+ argLen: 2,
+ asm: ppc64.ASRW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "SLD",
+ argLen: 2,
+ asm: ppc64.ASLD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "SLW",
+ argLen: 2,
+ asm: ppc64.ASLW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "ROTL",
+ argLen: 2,
+ asm: ppc64.AROTL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "ROTLW",
+ argLen: 2,
+ asm: ppc64.AROTLW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "RLDICL",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: ppc64.ARLDICL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "CLRLSLWI",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: ppc64.ACLRLSLWI,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "CLRLSLDI",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: ppc64.ACLRLSLDI,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "LoweredAdd64Carry",
+ argLen: 3,
+ resultNotInArgs: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "SRADconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: ppc64.ASRAD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "SRAWconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: ppc64.ASRAW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "SRDconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: ppc64.ASRD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "SRWconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: ppc64.ASRW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "SLDconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: ppc64.ASLD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "SLWconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: ppc64.ASLW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "ROTLconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: ppc64.AROTL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "ROTLWconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: ppc64.AROTLW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "EXTSWSLconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: ppc64.AEXTSWSLI,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "RLWINM",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: ppc64.ARLWNM,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "RLWNM",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: ppc64.ARLWNM,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "RLWMI",
+ auxType: auxInt64,
+ argLen: 2,
+ resultInArg0: true,
+ asm: ppc64.ARLWMI,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "CNTLZD",
+ argLen: 1,
+ clobberFlags: true,
+ asm: ppc64.ACNTLZD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "CNTLZW",
+ argLen: 1,
+ clobberFlags: true,
+ asm: ppc64.ACNTLZW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "CNTTZD",
+ argLen: 1,
+ asm: ppc64.ACNTTZD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "CNTTZW",
+ argLen: 1,
+ asm: ppc64.ACNTTZW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "POPCNTD",
+ argLen: 1,
+ asm: ppc64.APOPCNTD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "POPCNTW",
+ argLen: 1,
+ asm: ppc64.APOPCNTW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "POPCNTB",
+ argLen: 1,
+ asm: ppc64.APOPCNTB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "FDIV",
+ argLen: 2,
+ asm: ppc64.AFDIV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ {1, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ outputs: []outputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ },
+ {
+ name: "FDIVS",
+ argLen: 2,
+ asm: ppc64.AFDIVS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ {1, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ outputs: []outputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ },
+ {
+ name: "DIVD",
+ argLen: 2,
+ asm: ppc64.ADIVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "DIVW",
+ argLen: 2,
+ asm: ppc64.ADIVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "DIVDU",
+ argLen: 2,
+ asm: ppc64.ADIVDU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "DIVWU",
+ argLen: 2,
+ asm: ppc64.ADIVWU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MODUD",
+ argLen: 2,
+ asm: ppc64.AMODUD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MODSD",
+ argLen: 2,
+ asm: ppc64.AMODSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MODUW",
+ argLen: 2,
+ asm: ppc64.AMODUW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MODSW",
+ argLen: 2,
+ asm: ppc64.AMODSW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "FCTIDZ",
+ argLen: 1,
+ asm: ppc64.AFCTIDZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ outputs: []outputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ },
+ {
+ name: "FCTIWZ",
+ argLen: 1,
+ asm: ppc64.AFCTIWZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ outputs: []outputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ },
+ {
+ name: "FCFID",
+ argLen: 1,
+ asm: ppc64.AFCFID,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ outputs: []outputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ },
+ {
+ name: "FCFIDS",
+ argLen: 1,
+ asm: ppc64.AFCFIDS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ outputs: []outputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ },
+ {
+ name: "FRSP",
+ argLen: 1,
+ asm: ppc64.AFRSP,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ outputs: []outputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ },
+ {
+ name: "MFVSRD",
+ argLen: 1,
+ asm: ppc64.AMFVSRD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MTVSRD",
+ argLen: 1,
+ asm: ppc64.AMTVSRD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ },
+ {
+ name: "AND",
+ argLen: 2,
+ commutative: true,
+ asm: ppc64.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "ANDN",
+ argLen: 2,
+ asm: ppc64.AANDN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "ANDCC",
+ argLen: 2,
+ commutative: true,
+ asm: ppc64.AANDCC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "OR",
+ argLen: 2,
+ commutative: true,
+ asm: ppc64.AOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "ORN",
+ argLen: 2,
+ asm: ppc64.AORN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "ORCC",
+ argLen: 2,
+ commutative: true,
+ asm: ppc64.AORCC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "NOR",
+ argLen: 2,
+ commutative: true,
+ asm: ppc64.ANOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "XOR",
+ argLen: 2,
+ commutative: true,
+ asm: ppc64.AXOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "XORCC",
+ argLen: 2,
+ commutative: true,
+ asm: ppc64.AXORCC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "EQV",
+ argLen: 2,
+ commutative: true,
+ asm: ppc64.AEQV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "NEG",
+ argLen: 1,
+ asm: ppc64.ANEG,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "FNEG",
+ argLen: 1,
+ asm: ppc64.AFNEG,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ outputs: []outputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ },
+ {
+ name: "FSQRT",
+ argLen: 1,
+ asm: ppc64.AFSQRT,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ outputs: []outputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ },
+ {
+ name: "FSQRTS",
+ argLen: 1,
+ asm: ppc64.AFSQRTS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ outputs: []outputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ },
+ {
+ name: "FFLOOR",
+ argLen: 1,
+ asm: ppc64.AFRIM,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ outputs: []outputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ },
+ {
+ name: "FCEIL",
+ argLen: 1,
+ asm: ppc64.AFRIP,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ outputs: []outputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ },
+ {
+ name: "FTRUNC",
+ argLen: 1,
+ asm: ppc64.AFRIZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ outputs: []outputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ },
+ {
+ name: "FROUND",
+ argLen: 1,
+ asm: ppc64.AFRIN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ outputs: []outputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ },
+ {
+ name: "FABS",
+ argLen: 1,
+ asm: ppc64.AFABS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ outputs: []outputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ },
+ {
+ name: "FNABS",
+ argLen: 1,
+ asm: ppc64.AFNABS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ outputs: []outputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ },
+ {
+ name: "FCPSGN",
+ argLen: 2,
+ asm: ppc64.AFCPSGN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ {1, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ outputs: []outputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ },
+ {
+ name: "ORconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: ppc64.AOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "XORconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: ppc64.AXOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "ANDconst",
+ auxType: auxInt64,
+ argLen: 1,
+ clobberFlags: true,
+ asm: ppc64.AANDCC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "ANDCCconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: ppc64.AANDCC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVBreg",
+ argLen: 1,
+ asm: ppc64.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVBZreg",
+ argLen: 1,
+ asm: ppc64.AMOVBZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVHreg",
+ argLen: 1,
+ asm: ppc64.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVHZreg",
+ argLen: 1,
+ asm: ppc64.AMOVHZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVWreg",
+ argLen: 1,
+ asm: ppc64.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVWZreg",
+ argLen: 1,
+ asm: ppc64.AMOVWZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVBZload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: ppc64.AMOVBZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVHload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: ppc64.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVHZload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: ppc64.AMOVHZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVWload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: ppc64.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVWZload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: ppc64.AMOVWZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVDload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: ppc64.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVDBRload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: ppc64.AMOVDBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVWBRload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: ppc64.AMOVWBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVHBRload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: ppc64.AMOVHBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVBZloadidx",
+ argLen: 3,
+ asm: ppc64.AMOVBZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVHloadidx",
+ argLen: 3,
+ asm: ppc64.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVHZloadidx",
+ argLen: 3,
+ asm: ppc64.AMOVHZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVWloadidx",
+ argLen: 3,
+ asm: ppc64.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVWZloadidx",
+ argLen: 3,
+ asm: ppc64.AMOVWZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVDloadidx",
+ argLen: 3,
+ asm: ppc64.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVHBRloadidx",
+ argLen: 3,
+ asm: ppc64.AMOVHBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVWBRloadidx",
+ argLen: 3,
+ asm: ppc64.AMOVWBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVDBRloadidx",
+ argLen: 3,
+ asm: ppc64.AMOVDBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "FMOVDloadidx",
+ argLen: 3,
+ asm: ppc64.AFMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ },
+ {
+ name: "FMOVSloadidx",
+ argLen: 3,
+ asm: ppc64.AFMOVS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ },
+ {
+ name: "DCBT",
+ auxType: auxInt64,
+ argLen: 2,
+ hasSideEffects: true,
+ asm: ppc64.ADCBT,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVDBRstore",
+ auxType: auxSym,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: ppc64.AMOVDBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVWBRstore",
+ auxType: auxSym,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: ppc64.AMOVWBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVHBRstore",
+ auxType: auxSym,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: ppc64.AMOVHBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "FMOVDload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: ppc64.AFMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ },
+ {
+ name: "FMOVSload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: ppc64.AFMOVS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ },
+ {
+ name: "MOVBstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: ppc64.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVHstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: ppc64.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVWstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: ppc64.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVDstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: ppc64.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "FMOVDstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: ppc64.AFMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "FMOVSstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: ppc64.AFMOVS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVBstoreidx",
+ argLen: 4,
+ asm: ppc64.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVHstoreidx",
+ argLen: 4,
+ asm: ppc64.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVWstoreidx",
+ argLen: 4,
+ asm: ppc64.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVDstoreidx",
+ argLen: 4,
+ asm: ppc64.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "FMOVDstoreidx",
+ argLen: 4,
+ asm: ppc64.AFMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {2, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "FMOVSstoreidx",
+ argLen: 4,
+ asm: ppc64.AFMOVS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {2, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVHBRstoreidx",
+ argLen: 4,
+ asm: ppc64.AMOVHBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVWBRstoreidx",
+ argLen: 4,
+ asm: ppc64.AMOVWBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVDBRstoreidx",
+ argLen: 4,
+ asm: ppc64.AMOVDBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {2, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVBstorezero",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: ppc64.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVHstorezero",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: ppc64.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVWstorezero",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: ppc64.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVDstorezero",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: ppc64.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVDaddr",
+ auxType: auxSymOff,
+ argLen: 1,
+ rematerializeable: true,
+ symEffect: SymAddr,
+ asm: ppc64.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "MOVDconst",
+ auxType: auxInt64,
+ argLen: 0,
+ rematerializeable: true,
+ asm: ppc64.AMOVD,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "FMOVDconst",
+ auxType: auxFloat64,
+ argLen: 0,
+ rematerializeable: true,
+ asm: ppc64.AFMOVD,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ },
+ {
+ name: "FMOVSconst",
+ auxType: auxFloat32,
+ argLen: 0,
+ rematerializeable: true,
+ asm: ppc64.AFMOVS,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ },
+ {
+ name: "FCMPU",
+ argLen: 2,
+ asm: ppc64.AFCMPU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ {1, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ },
+ {
+ name: "CMP",
+ argLen: 2,
+ asm: ppc64.ACMP,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "CMPU",
+ argLen: 2,
+ asm: ppc64.ACMPU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "CMPW",
+ argLen: 2,
+ asm: ppc64.ACMPW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "CMPWU",
+ argLen: 2,
+ asm: ppc64.ACMPWU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "CMPconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: ppc64.ACMP,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "CMPUconst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: ppc64.ACMPU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "CMPWconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: ppc64.ACMPW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "CMPWUconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: ppc64.ACMPWU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "ISEL",
+ auxType: auxInt32,
+ argLen: 3,
+ asm: ppc64.AISEL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "ISELB",
+ auxType: auxInt32,
+ argLen: 2,
+ asm: ppc64.AISEL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "Equal",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "NotEqual",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "LessThan",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "FLessThan",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "LessEqual",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "FLessEqual",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "GreaterThan",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "FGreaterThan",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "GreaterEqual",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "FGreaterEqual",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "LoweredGetClosurePtr",
+ argLen: 0,
+ zeroWidth: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 2048}, // R11
+ },
+ },
+ },
+ {
+ name: "LoweredGetCallerSP",
+ argLen: 0,
+ rematerializeable: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "LoweredGetCallerPC",
+ argLen: 0,
+ rematerializeable: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "LoweredNilCheck",
+ argLen: 2,
+ clobberFlags: true,
+ nilCheck: true,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ clobbers: 2147483648, // R31
+ },
+ },
+ {
+ name: "LoweredRound32F",
+ argLen: 1,
+ resultInArg0: true,
+ zeroWidth: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ outputs: []outputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ },
+ {
+ name: "LoweredRound64F",
+ argLen: 1,
+ resultInArg0: true,
+ zeroWidth: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ outputs: []outputInfo{
+ {0, 576460743713488896}, // F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ },
+ {
+ name: "CALLstatic",
+ auxType: auxCallOff,
+ argLen: -1,
+ clobberFlags: true,
+ call: true,
+ reg: regInfo{
+ clobbers: 576460745860964344, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 g F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ {
+ name: "CALLtail",
+ auxType: auxCallOff,
+ argLen: -1,
+ clobberFlags: true,
+ call: true,
+ tailCall: true,
+ reg: regInfo{
+ clobbers: 576460745860964344, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 g F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ {
+ name: "CALLclosure",
+ auxType: auxCallOff,
+ argLen: -1,
+ clobberFlags: true,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4096}, // R12
+ {1, 2048}, // R11
+ },
+ clobbers: 576460745860964344, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 g F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ {
+ name: "CALLinter",
+ auxType: auxCallOff,
+ argLen: -1,
+ clobberFlags: true,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4096}, // R12
+ },
+ clobbers: 576460745860964344, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29 g F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ {
+ name: "LoweredZero",
+ auxType: auxInt64,
+ argLen: 2,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1048576}, // R20
+ },
+ clobbers: 1048576, // R20
+ },
+ },
+ {
+ name: "LoweredZeroShort",
+ auxType: auxInt64,
+ argLen: 2,
+ faultOnNilArg0: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "LoweredQuadZeroShort",
+ auxType: auxInt64,
+ argLen: 2,
+ faultOnNilArg0: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "LoweredQuadZero",
+ auxType: auxInt64,
+ argLen: 2,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1048576}, // R20
+ },
+ clobbers: 1048576, // R20
+ },
+ },
+ {
+ name: "LoweredMove",
+ auxType: auxInt64,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1048576}, // R20
+ {1, 2097152}, // R21
+ },
+ clobbers: 3145728, // R20 R21
+ },
+ },
+ {
+ name: "LoweredMoveShort",
+ auxType: auxInt64,
+ argLen: 3,
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "LoweredQuadMove",
+ auxType: auxInt64,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1048576}, // R20
+ {1, 2097152}, // R21
+ },
+ clobbers: 3145728, // R20 R21
+ },
+ },
+ {
+ name: "LoweredQuadMoveShort",
+ auxType: auxInt64,
+ argLen: 3,
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicStore8",
+ auxType: auxInt64,
+ argLen: 3,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicStore32",
+ auxType: auxInt64,
+ argLen: 3,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicStore64",
+ auxType: auxInt64,
+ argLen: 3,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicLoad8",
+ auxType: auxInt64,
+ argLen: 2,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicLoad32",
+ auxType: auxInt64,
+ argLen: 2,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicLoad64",
+ auxType: auxInt64,
+ argLen: 2,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicLoadPtr",
+ auxType: auxInt64,
+ argLen: 2,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicAdd32",
+ argLen: 3,
+ resultNotInArgs: true,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicAdd64",
+ argLen: 3,
+ resultNotInArgs: true,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicExchange32",
+ argLen: 3,
+ resultNotInArgs: true,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicExchange64",
+ argLen: 3,
+ resultNotInArgs: true,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicCas64",
+ auxType: auxInt64,
+ argLen: 4,
+ resultNotInArgs: true,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {2, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicCas32",
+ auxType: auxInt64,
+ argLen: 4,
+ resultNotInArgs: true,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {2, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ outputs: []outputInfo{
+ {0, 1073733624}, // R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicAnd8",
+ argLen: 3,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ asm: ppc64.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicAnd32",
+ argLen: 3,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ asm: ppc64.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicOr8",
+ argLen: 3,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ asm: ppc64.AOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicOr32",
+ argLen: 3,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ asm: ppc64.AOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ {1, 1073733630}, // SP SB R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R14 R15 R16 R17 R18 R19 R20 R21 R22 R23 R24 R25 R26 R27 R28 R29
+ },
+ },
+ },
+ {
+ name: "LoweredWB",
+ auxType: auxSym,
+ argLen: 3,
+ clobberFlags: true,
+ symEffect: SymNone,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1048576}, // R20
+ {1, 2097152}, // R21
+ },
+ clobbers: 576460746931312640, // R11 R12 R18 R19 R22 R23 R24 R25 R26 R27 R28 R29 R31 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26
+ },
+ },
+ {
+ name: "LoweredPanicBoundsA",
+ auxType: auxInt64,
+ argLen: 3,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 32}, // R5
+ {1, 64}, // R6
+ },
+ },
+ },
+ {
+ name: "LoweredPanicBoundsB",
+ auxType: auxInt64,
+ argLen: 3,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 16}, // R4
+ {1, 32}, // R5
+ },
+ },
+ },
+ {
+ name: "LoweredPanicBoundsC",
+ auxType: auxInt64,
+ argLen: 3,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 8}, // R3
+ {1, 16}, // R4
+ },
+ },
+ },
+ {
+ name: "InvertFlags",
+ argLen: 1,
+ reg: regInfo{},
+ },
+ {
+ name: "FlagEQ",
+ argLen: 0,
+ reg: regInfo{},
+ },
+ {
+ name: "FlagLT",
+ argLen: 0,
+ reg: regInfo{},
+ },
+ {
+ name: "FlagGT",
+ argLen: 0,
+ reg: regInfo{},
+ },
+
+ {
+ name: "ADD",
+ argLen: 2,
+ commutative: true,
+ asm: riscv.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "ADDI",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: riscv.AADDI,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "ADDIW",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: riscv.AADDIW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "NEG",
+ argLen: 1,
+ asm: riscv.ANEG,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "NEGW",
+ argLen: 1,
+ asm: riscv.ANEGW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "SUB",
+ argLen: 2,
+ asm: riscv.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "SUBW",
+ argLen: 2,
+ asm: riscv.ASUBW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "MUL",
+ argLen: 2,
+ commutative: true,
+ asm: riscv.AMUL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "MULW",
+ argLen: 2,
+ commutative: true,
+ asm: riscv.AMULW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "MULH",
+ argLen: 2,
+ commutative: true,
+ asm: riscv.AMULH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "MULHU",
+ argLen: 2,
+ commutative: true,
+ asm: riscv.AMULHU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "LoweredMuluhilo",
+ argLen: 2,
+ resultNotInArgs: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "LoweredMuluover",
+ argLen: 2,
+ resultNotInArgs: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "DIV",
+ argLen: 2,
+ asm: riscv.ADIV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "DIVU",
+ argLen: 2,
+ asm: riscv.ADIVU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "DIVW",
+ argLen: 2,
+ asm: riscv.ADIVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "DIVUW",
+ argLen: 2,
+ asm: riscv.ADIVUW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "REM",
+ argLen: 2,
+ asm: riscv.AREM,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "REMU",
+ argLen: 2,
+ asm: riscv.AREMU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "REMW",
+ argLen: 2,
+ asm: riscv.AREMW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "REMUW",
+ argLen: 2,
+ asm: riscv.AREMUW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "MOVaddr",
+ auxType: auxSymOff,
+ argLen: 1,
+ rematerializeable: true,
+ symEffect: SymRdWr,
+ asm: riscv.AMOV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "MOVDconst",
+ auxType: auxInt64,
+ argLen: 0,
+ rematerializeable: true,
+ asm: riscv.AMOV,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "MOVBload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: riscv.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "MOVHload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: riscv.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "MOVWload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: riscv.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "MOVDload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: riscv.AMOV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "MOVBUload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: riscv.AMOVBU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "MOVHUload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: riscv.AMOVHU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "MOVWUload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: riscv.AMOVWU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "MOVBstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: riscv.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1006632946}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
+ },
+ },
+ },
+ {
+ name: "MOVHstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: riscv.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1006632946}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
+ },
+ },
+ },
+ {
+ name: "MOVWstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: riscv.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1006632946}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
+ },
+ },
+ },
+ {
+ name: "MOVDstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: riscv.AMOV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1006632946}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
+ },
+ },
+ },
+ {
+ name: "MOVBstorezero",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: riscv.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
+ },
+ },
+ },
+ {
+ name: "MOVHstorezero",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: riscv.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
+ },
+ },
+ },
+ {
+ name: "MOVWstorezero",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: riscv.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
+ },
+ },
+ },
+ {
+ name: "MOVDstorezero",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: riscv.AMOV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
+ },
+ },
+ },
+ {
+ name: "MOVBreg",
+ argLen: 1,
+ asm: riscv.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "MOVHreg",
+ argLen: 1,
+ asm: riscv.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "MOVWreg",
+ argLen: 1,
+ asm: riscv.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "MOVDreg",
+ argLen: 1,
+ asm: riscv.AMOV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "MOVBUreg",
+ argLen: 1,
+ asm: riscv.AMOVBU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "MOVHUreg",
+ argLen: 1,
+ asm: riscv.AMOVHU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "MOVWUreg",
+ argLen: 1,
+ asm: riscv.AMOVWU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "MOVDnop",
+ argLen: 1,
+ resultInArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "SLL",
+ argLen: 2,
+ asm: riscv.ASLL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "SRA",
+ argLen: 2,
+ asm: riscv.ASRA,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "SRL",
+ argLen: 2,
+ asm: riscv.ASRL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "SLLI",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: riscv.ASLLI,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "SRAI",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: riscv.ASRAI,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "SRLI",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: riscv.ASRLI,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "XOR",
+ argLen: 2,
+ commutative: true,
+ asm: riscv.AXOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "XORI",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: riscv.AXORI,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "OR",
+ argLen: 2,
+ commutative: true,
+ asm: riscv.AOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "ORI",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: riscv.AORI,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "AND",
+ argLen: 2,
+ commutative: true,
+ asm: riscv.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "ANDI",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: riscv.AANDI,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "NOT",
+ argLen: 1,
+ asm: riscv.ANOT,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "SEQZ",
+ argLen: 1,
+ asm: riscv.ASEQZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "SNEZ",
+ argLen: 1,
+ asm: riscv.ASNEZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "SLT",
+ argLen: 2,
+ asm: riscv.ASLT,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "SLTI",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: riscv.ASLTI,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "SLTU",
+ argLen: 2,
+ asm: riscv.ASLTU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "SLTIU",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: riscv.ASLTIU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "MOVconvert",
+ argLen: 2,
+ asm: riscv.AMOV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "CALLstatic",
+ auxType: auxCallOff,
+ argLen: 1,
+ call: true,
+ reg: regInfo{
+ clobbers: 9223372035781033968, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ {
+ name: "CALLtail",
+ auxType: auxCallOff,
+ argLen: 1,
+ call: true,
+ tailCall: true,
+ reg: regInfo{
+ clobbers: 9223372035781033968, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ {
+ name: "CALLclosure",
+ auxType: auxCallOff,
+ argLen: 3,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 524288}, // X20
+ {0, 1006632946}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ clobbers: 9223372035781033968, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ {
+ name: "CALLinter",
+ auxType: auxCallOff,
+ argLen: 2,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ clobbers: 9223372035781033968, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ {
+ name: "DUFFZERO",
+ auxType: auxInt64,
+ argLen: 2,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 512}, // X10
+ },
+ clobbers: 512, // X10
+ },
+ },
+ {
+ name: "DUFFCOPY",
+ auxType: auxInt64,
+ argLen: 3,
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1024}, // X11
+ {1, 512}, // X10
+ },
+ clobbers: 1536, // X10 X11
+ },
+ },
+ {
+ name: "LoweredZero",
+ auxType: auxInt64,
+ argLen: 3,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 16}, // X5
+ {1, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ clobbers: 16, // X5
+ },
+ },
+ {
+ name: "LoweredMove",
+ auxType: auxInt64,
+ argLen: 4,
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 16}, // X5
+ {1, 32}, // X6
+ {2, 1006632880}, // X5 X6 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ clobbers: 112, // X5 X6 X7
+ },
+ },
+ {
+ name: "LoweredAtomicLoad8",
+ argLen: 2,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicLoad32",
+ argLen: 2,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicLoad64",
+ argLen: 2,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicStore8",
+ argLen: 3,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1006632946}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicStore32",
+ argLen: 3,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1006632946}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicStore64",
+ argLen: 3,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1006632946}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicExchange32",
+ argLen: 3,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30
+ {0, 9223372037928517618}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 SB
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicExchange64",
+ argLen: 3,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30
+ {0, 9223372037928517618}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 SB
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicAdd32",
+ argLen: 3,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30
+ {0, 9223372037928517618}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 SB
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicAdd64",
+ argLen: 3,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30
+ {0, 9223372037928517618}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 SB
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicCas32",
+ argLen: 4,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30
+ {2, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30
+ {0, 9223372037928517618}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 SB
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicCas64",
+ argLen: 4,
+ resultNotInArgs: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ unsafePoint: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30
+ {2, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30
+ {0, 9223372037928517618}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 SB
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicAnd32",
+ argLen: 3,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ asm: riscv.AAMOANDW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30
+ {0, 9223372037928517618}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 SB
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicOr32",
+ argLen: 3,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ asm: riscv.AAMOORW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1073741808}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30
+ {0, 9223372037928517618}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 g X28 X29 X30 SB
+ },
+ },
+ },
+ {
+ name: "LoweredNilCheck",
+ argLen: 2,
+ nilCheck: true,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632946}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "LoweredGetClosurePtr",
+ argLen: 0,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 524288}, // X20
+ },
+ },
+ },
+ {
+ name: "LoweredGetCallerSP",
+ argLen: 0,
+ rematerializeable: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "LoweredGetCallerPC",
+ argLen: 0,
+ rematerializeable: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "LoweredWB",
+ auxType: auxSym,
+ argLen: 3,
+ clobberFlags: true,
+ symEffect: SymNone,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 16}, // X5
+ {1, 32}, // X6
+ },
+ clobbers: 9223372034707292160, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ {
+ name: "LoweredPanicBoundsA",
+ auxType: auxInt64,
+ argLen: 3,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 64}, // X7
+ {1, 134217728}, // X28
+ },
+ },
+ },
+ {
+ name: "LoweredPanicBoundsB",
+ auxType: auxInt64,
+ argLen: 3,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 32}, // X6
+ {1, 64}, // X7
+ },
+ },
+ },
+ {
+ name: "LoweredPanicBoundsC",
+ auxType: auxInt64,
+ argLen: 3,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 16}, // X5
+ {1, 32}, // X6
+ },
+ },
+ },
+ {
+ name: "FADDS",
+ argLen: 2,
+ commutative: true,
+ asm: riscv.AFADDS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FSUBS",
+ argLen: 2,
+ asm: riscv.AFSUBS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FMULS",
+ argLen: 2,
+ commutative: true,
+ asm: riscv.AFMULS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FDIVS",
+ argLen: 2,
+ asm: riscv.AFDIVS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FSQRTS",
+ argLen: 1,
+ asm: riscv.AFSQRTS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FNEGS",
+ argLen: 1,
+ asm: riscv.AFNEGS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FMVSX",
+ argLen: 1,
+ asm: riscv.AFMVSX,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FCVTSW",
+ argLen: 1,
+ asm: riscv.AFCVTSW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FCVTSL",
+ argLen: 1,
+ asm: riscv.AFCVTSL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FCVTWS",
+ argLen: 1,
+ asm: riscv.AFCVTWS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "FCVTLS",
+ argLen: 1,
+ asm: riscv.AFCVTLS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "FMOVWload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: riscv.AMOVF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FMOVWstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: riscv.AMOVF,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FEQS",
+ argLen: 2,
+ commutative: true,
+ asm: riscv.AFEQS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "FNES",
+ argLen: 2,
+ commutative: true,
+ asm: riscv.AFNES,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "FLTS",
+ argLen: 2,
+ asm: riscv.AFLTS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "FLES",
+ argLen: 2,
+ asm: riscv.AFLES,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "FADDD",
+ argLen: 2,
+ commutative: true,
+ asm: riscv.AFADDD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FSUBD",
+ argLen: 2,
+ asm: riscv.AFSUBD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FMULD",
+ argLen: 2,
+ commutative: true,
+ asm: riscv.AFMULD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FDIVD",
+ argLen: 2,
+ asm: riscv.AFDIVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FMADDD",
+ argLen: 3,
+ commutative: true,
+ asm: riscv.AFMADDD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FMSUBD",
+ argLen: 3,
+ commutative: true,
+ asm: riscv.AFMSUBD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FNMADDD",
+ argLen: 3,
+ commutative: true,
+ asm: riscv.AFNMADDD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FNMSUBD",
+ argLen: 3,
+ commutative: true,
+ asm: riscv.AFNMSUBD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {2, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FSQRTD",
+ argLen: 1,
+ asm: riscv.AFSQRTD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FNEGD",
+ argLen: 1,
+ asm: riscv.AFNEGD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FABSD",
+ argLen: 1,
+ asm: riscv.AFABSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FSGNJD",
+ argLen: 2,
+ asm: riscv.AFSGNJD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FMVDX",
+ argLen: 1,
+ asm: riscv.AFMVDX,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FCVTDW",
+ argLen: 1,
+ asm: riscv.AFCVTDW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FCVTDL",
+ argLen: 1,
+ asm: riscv.AFCVTDL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FCVTWD",
+ argLen: 1,
+ asm: riscv.AFCVTWD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "FCVTLD",
+ argLen: 1,
+ asm: riscv.AFCVTLD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "FCVTDS",
+ argLen: 1,
+ asm: riscv.AFCVTDS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FCVTSD",
+ argLen: 1,
+ asm: riscv.AFCVTSD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FMOVDload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: riscv.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
+ },
+ outputs: []outputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FMOVDstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: riscv.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372037861408754}, // SP X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30 SB
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "FEQD",
+ argLen: 2,
+ commutative: true,
+ asm: riscv.AFEQD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "FNED",
+ argLen: 2,
+ commutative: true,
+ asm: riscv.AFNED,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "FLTD",
+ argLen: 2,
+ asm: riscv.AFLTD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+ {
+ name: "FLED",
+ argLen: 2,
+ asm: riscv.AFLED,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 9223372034707292160}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 1006632944}, // X5 X6 X7 X8 X9 X10 X11 X12 X13 X14 X15 X16 X17 X18 X19 X20 X21 X22 X23 X24 X25 X26 X28 X29 X30
+ },
+ },
+ },
+
+ {
+ name: "FADDS",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ asm: s390x.AFADDS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "FADD",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ asm: s390x.AFADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "FSUBS",
+ argLen: 2,
+ resultInArg0: true,
+ asm: s390x.AFSUBS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "FSUB",
+ argLen: 2,
+ resultInArg0: true,
+ asm: s390x.AFSUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "FMULS",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ asm: s390x.AFMULS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "FMUL",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ asm: s390x.AFMUL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "FDIVS",
+ argLen: 2,
+ resultInArg0: true,
+ asm: s390x.AFDIVS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "FDIV",
+ argLen: 2,
+ resultInArg0: true,
+ asm: s390x.AFDIV,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "FNEGS",
+ argLen: 1,
+ clobberFlags: true,
+ asm: s390x.AFNEGS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "FNEG",
+ argLen: 1,
+ clobberFlags: true,
+ asm: s390x.AFNEG,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "FMADDS",
+ argLen: 3,
+ resultInArg0: true,
+ asm: s390x.AFMADDS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "FMADD",
+ argLen: 3,
+ resultInArg0: true,
+ asm: s390x.AFMADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "FMSUBS",
+ argLen: 3,
+ resultInArg0: true,
+ asm: s390x.AFMSUBS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "FMSUB",
+ argLen: 3,
+ resultInArg0: true,
+ asm: s390x.AFMSUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "LPDFR",
+ argLen: 1,
+ asm: s390x.ALPDFR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "LNDFR",
+ argLen: 1,
+ asm: s390x.ALNDFR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "CPSDR",
+ argLen: 2,
+ asm: s390x.ACPSDR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "FIDBR",
+ auxType: auxInt8,
+ argLen: 1,
+ asm: s390x.AFIDBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "FMOVSload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: s390x.AFMOVS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "FMOVDload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: s390x.AFMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "FMOVSconst",
+ auxType: auxFloat32,
+ argLen: 0,
+ rematerializeable: true,
+ asm: s390x.AFMOVS,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "FMOVDconst",
+ auxType: auxFloat64,
+ argLen: 0,
+ rematerializeable: true,
+ asm: s390x.AFMOVD,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "FMOVSloadidx",
+ auxType: auxSymOff,
+ argLen: 3,
+ symEffect: SymRead,
+ asm: s390x.AFMOVS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "FMOVDloadidx",
+ auxType: auxSymOff,
+ argLen: 3,
+ symEffect: SymRead,
+ asm: s390x.AFMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "FMOVSstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: s390x.AFMOVS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "FMOVDstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: s390x.AFMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "FMOVSstoreidx",
+ auxType: auxSymOff,
+ argLen: 4,
+ symEffect: SymWrite,
+ asm: s390x.AFMOVS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "FMOVDstoreidx",
+ auxType: auxSymOff,
+ argLen: 4,
+ symEffect: SymWrite,
+ asm: s390x.AFMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {2, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "ADD",
+ argLen: 2,
+ commutative: true,
+ clobberFlags: true,
+ asm: s390x.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "ADDW",
+ argLen: 2,
+ commutative: true,
+ clobberFlags: true,
+ asm: s390x.AADDW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "ADDconst",
+ auxType: auxInt32,
+ argLen: 1,
+ clobberFlags: true,
+ asm: s390x.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "ADDWconst",
+ auxType: auxInt32,
+ argLen: 1,
+ clobberFlags: true,
+ asm: s390x.AADDW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "ADDload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ clobberFlags: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: s390x.AADD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "ADDWload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ clobberFlags: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: s390x.AADDW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "SUB",
+ argLen: 2,
+ clobberFlags: true,
+ asm: s390x.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "SUBW",
+ argLen: 2,
+ clobberFlags: true,
+ asm: s390x.ASUBW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "SUBconst",
+ auxType: auxInt32,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: s390x.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "SUBWconst",
+ auxType: auxInt32,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: s390x.ASUBW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "SUBload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ clobberFlags: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: s390x.ASUB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "SUBWload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ clobberFlags: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: s390x.ASUBW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MULLD",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: s390x.AMULLD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MULLW",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: s390x.AMULLW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MULLDconst",
+ auxType: auxInt32,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: s390x.AMULLD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MULLWconst",
+ auxType: auxInt32,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: s390x.AMULLW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MULLDload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ clobberFlags: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: s390x.AMULLD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MULLWload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ clobberFlags: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: s390x.AMULLW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MULHD",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: s390x.AMULHD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ clobbers: 2048, // R11
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MULHDU",
+ argLen: 2,
+ commutative: true,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: s390x.AMULHDU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ clobbers: 2048, // R11
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "DIVD",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: s390x.ADIVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ clobbers: 2048, // R11
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "DIVW",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: s390x.ADIVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ clobbers: 2048, // R11
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "DIVDU",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: s390x.ADIVDU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ clobbers: 2048, // R11
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "DIVWU",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: s390x.ADIVWU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ clobbers: 2048, // R11
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MODD",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: s390x.AMODD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ clobbers: 2048, // R11
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MODW",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: s390x.AMODW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ clobbers: 2048, // R11
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MODDU",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: s390x.AMODDU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ clobbers: 2048, // R11
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "MODWU",
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: s390x.AMODWU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ {1, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ clobbers: 2048, // R11
+ outputs: []outputInfo{
+ {0, 21503}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R12 R14
+ },
+ },
+ },
+ {
+ name: "AND",
+ argLen: 2,
+ commutative: true,
+ clobberFlags: true,
+ asm: s390x.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "ANDW",
+ argLen: 2,
+ commutative: true,
+ clobberFlags: true,
+ asm: s390x.AANDW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "ANDconst",
+ auxType: auxInt64,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: s390x.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "ANDWconst",
+ auxType: auxInt32,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: s390x.AANDW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "ANDload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ clobberFlags: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: s390x.AAND,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "ANDWload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ clobberFlags: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: s390x.AANDW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "OR",
+ argLen: 2,
+ commutative: true,
+ clobberFlags: true,
+ asm: s390x.AOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "ORW",
+ argLen: 2,
+ commutative: true,
+ clobberFlags: true,
+ asm: s390x.AORW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "ORconst",
+ auxType: auxInt64,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: s390x.AOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "ORWconst",
+ auxType: auxInt32,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: s390x.AORW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "ORload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ clobberFlags: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: s390x.AOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "ORWload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ clobberFlags: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: s390x.AORW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "XOR",
+ argLen: 2,
+ commutative: true,
+ clobberFlags: true,
+ asm: s390x.AXOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "XORW",
+ argLen: 2,
+ commutative: true,
+ clobberFlags: true,
+ asm: s390x.AXORW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "XORconst",
+ auxType: auxInt64,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: s390x.AXOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "XORWconst",
+ auxType: auxInt32,
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: s390x.AXORW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "XORload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ clobberFlags: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: s390x.AXOR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "XORWload",
+ auxType: auxSymOff,
+ argLen: 3,
+ resultInArg0: true,
+ clobberFlags: true,
+ faultOnNilArg1: true,
+ symEffect: SymRead,
+ asm: s390x.AXORW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "ADDC",
+ argLen: 2,
+ commutative: true,
+ asm: s390x.AADDC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "ADDCconst",
+ auxType: auxInt16,
+ argLen: 1,
+ asm: s390x.AADDC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "ADDE",
+ argLen: 3,
+ commutative: true,
+ resultInArg0: true,
+ asm: s390x.AADDE,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "SUBC",
+ argLen: 2,
+ asm: s390x.ASUBC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "SUBE",
+ argLen: 3,
+ resultInArg0: true,
+ asm: s390x.ASUBE,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "CMP",
+ argLen: 2,
+ asm: s390x.ACMP,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "CMPW",
+ argLen: 2,
+ asm: s390x.ACMPW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "CMPU",
+ argLen: 2,
+ asm: s390x.ACMPU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "CMPWU",
+ argLen: 2,
+ asm: s390x.ACMPWU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "CMPconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: s390x.ACMP,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "CMPWconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: s390x.ACMPW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "CMPUconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: s390x.ACMPU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "CMPWUconst",
+ auxType: auxInt32,
+ argLen: 1,
+ asm: s390x.ACMPWU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "FCMPS",
+ argLen: 2,
+ asm: s390x.ACEBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "FCMP",
+ argLen: 2,
+ asm: s390x.AFCMPU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "LTDBR",
+ argLen: 1,
+ asm: s390x.ALTDBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "LTEBR",
+ argLen: 1,
+ asm: s390x.ALTEBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "SLD",
+ argLen: 2,
+ asm: s390x.ASLD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "SLW",
+ argLen: 2,
+ asm: s390x.ASLW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "SLDconst",
+ auxType: auxUInt8,
+ argLen: 1,
+ asm: s390x.ASLD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "SLWconst",
+ auxType: auxUInt8,
+ argLen: 1,
+ asm: s390x.ASLW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "SRD",
+ argLen: 2,
+ asm: s390x.ASRD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "SRW",
+ argLen: 2,
+ asm: s390x.ASRW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "SRDconst",
+ auxType: auxUInt8,
+ argLen: 1,
+ asm: s390x.ASRD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "SRWconst",
+ auxType: auxUInt8,
+ argLen: 1,
+ asm: s390x.ASRW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "SRAD",
+ argLen: 2,
+ clobberFlags: true,
+ asm: s390x.ASRAD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "SRAW",
+ argLen: 2,
+ clobberFlags: true,
+ asm: s390x.ASRAW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "SRADconst",
+ auxType: auxUInt8,
+ argLen: 1,
+ clobberFlags: true,
+ asm: s390x.ASRAD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "SRAWconst",
+ auxType: auxUInt8,
+ argLen: 1,
+ clobberFlags: true,
+ asm: s390x.ASRAW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "RLLG",
+ argLen: 2,
+ asm: s390x.ARLLG,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "RLL",
+ argLen: 2,
+ asm: s390x.ARLL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "RLLconst",
+ auxType: auxUInt8,
+ argLen: 1,
+ asm: s390x.ARLL,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "RXSBG",
+ auxType: auxS390XRotateParams,
+ argLen: 2,
+ resultInArg0: true,
+ clobberFlags: true,
+ asm: s390x.ARXSBG,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "RISBGZ",
+ auxType: auxS390XRotateParams,
+ argLen: 1,
+ clobberFlags: true,
+ asm: s390x.ARISBGZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "NEG",
+ argLen: 1,
+ clobberFlags: true,
+ asm: s390x.ANEG,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "NEGW",
+ argLen: 1,
+ clobberFlags: true,
+ asm: s390x.ANEGW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "NOT",
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "NOTW",
+ argLen: 1,
+ resultInArg0: true,
+ clobberFlags: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "FSQRT",
+ argLen: 1,
+ asm: s390x.AFSQRT,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "FSQRTS",
+ argLen: 1,
+ asm: s390x.AFSQRTS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "LOCGR",
+ auxType: auxS390XCCMask,
+ argLen: 3,
+ resultInArg0: true,
+ asm: s390x.ALOCGR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ {1, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVBreg",
+ argLen: 1,
+ asm: s390x.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVBZreg",
+ argLen: 1,
+ asm: s390x.AMOVBZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVHreg",
+ argLen: 1,
+ asm: s390x.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVHZreg",
+ argLen: 1,
+ asm: s390x.AMOVHZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVWreg",
+ argLen: 1,
+ asm: s390x.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVWZreg",
+ argLen: 1,
+ asm: s390x.AMOVWZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVDconst",
+ auxType: auxInt64,
+ argLen: 0,
+ rematerializeable: true,
+ asm: s390x.AMOVD,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "LDGR",
+ argLen: 1,
+ asm: s390x.ALDGR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "LGDR",
+ argLen: 1,
+ asm: s390x.ALGDR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "CFDBRA",
+ argLen: 1,
+ clobberFlags: true,
+ asm: s390x.ACFDBRA,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "CGDBRA",
+ argLen: 1,
+ clobberFlags: true,
+ asm: s390x.ACGDBRA,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "CFEBRA",
+ argLen: 1,
+ clobberFlags: true,
+ asm: s390x.ACFEBRA,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "CGEBRA",
+ argLen: 1,
+ clobberFlags: true,
+ asm: s390x.ACGEBRA,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "CEFBRA",
+ argLen: 1,
+ clobberFlags: true,
+ asm: s390x.ACEFBRA,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "CDFBRA",
+ argLen: 1,
+ clobberFlags: true,
+ asm: s390x.ACDFBRA,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "CEGBRA",
+ argLen: 1,
+ clobberFlags: true,
+ asm: s390x.ACEGBRA,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "CDGBRA",
+ argLen: 1,
+ clobberFlags: true,
+ asm: s390x.ACDGBRA,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "CLFEBR",
+ argLen: 1,
+ clobberFlags: true,
+ asm: s390x.ACLFEBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "CLFDBR",
+ argLen: 1,
+ clobberFlags: true,
+ asm: s390x.ACLFDBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "CLGEBR",
+ argLen: 1,
+ clobberFlags: true,
+ asm: s390x.ACLGEBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "CLGDBR",
+ argLen: 1,
+ clobberFlags: true,
+ asm: s390x.ACLGDBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "CELFBR",
+ argLen: 1,
+ clobberFlags: true,
+ asm: s390x.ACELFBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "CDLFBR",
+ argLen: 1,
+ clobberFlags: true,
+ asm: s390x.ACDLFBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "CELGBR",
+ argLen: 1,
+ clobberFlags: true,
+ asm: s390x.ACELGBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "CDLGBR",
+ argLen: 1,
+ clobberFlags: true,
+ asm: s390x.ACDLGBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "LEDBR",
+ argLen: 1,
+ asm: s390x.ALEDBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "LDEBR",
+ argLen: 1,
+ asm: s390x.ALDEBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "MOVDaddr",
+ auxType: auxSymOff,
+ argLen: 1,
+ rematerializeable: true,
+ symEffect: SymRead,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295000064}, // SP SB
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVDaddridx",
+ auxType: auxSymOff,
+ argLen: 2,
+ symEffect: SymRead,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295000064}, // SP SB
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVBZload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: s390x.AMOVBZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVBload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: s390x.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVHZload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: s390x.AMOVHZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVHload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: s390x.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVWZload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: s390x.AMOVWZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVWload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: s390x.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVDload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: s390x.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVWBR",
+ argLen: 1,
+ asm: s390x.AMOVWBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVDBR",
+ argLen: 1,
+ asm: s390x.AMOVDBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVHBRload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: s390x.AMOVHBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVWBRload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: s390x.AMOVWBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVDBRload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: s390x.AMOVDBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVBstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: s390x.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "MOVHstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: s390x.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "MOVWstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: s390x.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "MOVDstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: s390x.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "MOVHBRstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: s390x.AMOVHBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "MOVWBRstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: s390x.AMOVWBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "MOVDBRstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: s390x.AMOVDBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "MVC",
+ auxType: auxSymValAndOff,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ symEffect: SymNone,
+ asm: s390x.AMVC,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "MOVBZloadidx",
+ auxType: auxSymOff,
+ argLen: 3,
+ commutative: true,
+ symEffect: SymRead,
+ asm: s390x.AMOVBZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVBloadidx",
+ auxType: auxSymOff,
+ argLen: 3,
+ commutative: true,
+ symEffect: SymRead,
+ asm: s390x.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVHZloadidx",
+ auxType: auxSymOff,
+ argLen: 3,
+ commutative: true,
+ symEffect: SymRead,
+ asm: s390x.AMOVHZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVHloadidx",
+ auxType: auxSymOff,
+ argLen: 3,
+ commutative: true,
+ symEffect: SymRead,
+ asm: s390x.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVWZloadidx",
+ auxType: auxSymOff,
+ argLen: 3,
+ commutative: true,
+ symEffect: SymRead,
+ asm: s390x.AMOVWZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVWloadidx",
+ auxType: auxSymOff,
+ argLen: 3,
+ commutative: true,
+ symEffect: SymRead,
+ asm: s390x.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVDloadidx",
+ auxType: auxSymOff,
+ argLen: 3,
+ commutative: true,
+ symEffect: SymRead,
+ asm: s390x.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVHBRloadidx",
+ auxType: auxSymOff,
+ argLen: 3,
+ commutative: true,
+ symEffect: SymRead,
+ asm: s390x.AMOVHBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVWBRloadidx",
+ auxType: auxSymOff,
+ argLen: 3,
+ commutative: true,
+ symEffect: SymRead,
+ asm: s390x.AMOVWBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVDBRloadidx",
+ auxType: auxSymOff,
+ argLen: 3,
+ commutative: true,
+ symEffect: SymRead,
+ asm: s390x.AMOVDBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVBstoreidx",
+ auxType: auxSymOff,
+ argLen: 4,
+ commutative: true,
+ symEffect: SymWrite,
+ asm: s390x.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "MOVHstoreidx",
+ auxType: auxSymOff,
+ argLen: 4,
+ commutative: true,
+ symEffect: SymWrite,
+ asm: s390x.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "MOVWstoreidx",
+ auxType: auxSymOff,
+ argLen: 4,
+ commutative: true,
+ symEffect: SymWrite,
+ asm: s390x.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "MOVDstoreidx",
+ auxType: auxSymOff,
+ argLen: 4,
+ commutative: true,
+ symEffect: SymWrite,
+ asm: s390x.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "MOVHBRstoreidx",
+ auxType: auxSymOff,
+ argLen: 4,
+ commutative: true,
+ symEffect: SymWrite,
+ asm: s390x.AMOVHBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "MOVWBRstoreidx",
+ auxType: auxSymOff,
+ argLen: 4,
+ commutative: true,
+ symEffect: SymWrite,
+ asm: s390x.AMOVWBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "MOVDBRstoreidx",
+ auxType: auxSymOff,
+ argLen: 4,
+ commutative: true,
+ symEffect: SymWrite,
+ asm: s390x.AMOVDBR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "MOVBstoreconst",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: s390x.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ },
+ },
+ },
+ {
+ name: "MOVHstoreconst",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: s390x.AMOVH,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ },
+ },
+ },
+ {
+ name: "MOVWstoreconst",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: s390x.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ },
+ },
+ },
+ {
+ name: "MOVDstoreconst",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: s390x.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ },
+ },
+ },
+ {
+ name: "CLEAR",
+ auxType: auxSymValAndOff,
+ argLen: 2,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: s390x.ACLEAR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "CALLstatic",
+ auxType: auxCallOff,
+ argLen: 1,
+ clobberFlags: true,
+ call: true,
+ reg: regInfo{
+ clobbers: 4294933503, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 g R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ {
+ name: "CALLtail",
+ auxType: auxCallOff,
+ argLen: 1,
+ clobberFlags: true,
+ call: true,
+ tailCall: true,
+ reg: regInfo{
+ clobbers: 4294933503, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 g R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ {
+ name: "CALLclosure",
+ auxType: auxCallOff,
+ argLen: 3,
+ clobberFlags: true,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 4096}, // R12
+ {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ clobbers: 4294933503, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 g R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ {
+ name: "CALLinter",
+ auxType: auxCallOff,
+ argLen: 2,
+ clobberFlags: true,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23550}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ clobbers: 4294933503, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 g R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ {
+ name: "InvertFlags",
+ argLen: 1,
+ reg: regInfo{},
+ },
+ {
+ name: "LoweredGetG",
+ argLen: 1,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "LoweredGetClosurePtr",
+ argLen: 0,
+ zeroWidth: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 4096}, // R12
+ },
+ },
+ },
+ {
+ name: "LoweredGetCallerSP",
+ argLen: 0,
+ rematerializeable: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "LoweredGetCallerPC",
+ argLen: 0,
+ rematerializeable: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "LoweredNilCheck",
+ argLen: 2,
+ clobberFlags: true,
+ nilCheck: true,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "LoweredRound32F",
+ argLen: 1,
+ resultInArg0: true,
+ zeroWidth: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "LoweredRound64F",
+ argLen: 1,
+ resultInArg0: true,
+ zeroWidth: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "LoweredWB",
+ auxType: auxSym,
+ argLen: 3,
+ clobberFlags: true,
+ symEffect: SymNone,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4}, // R2
+ {1, 8}, // R3
+ },
+ clobbers: 4294918146, // R1 R14 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ {
+ name: "LoweredPanicBoundsA",
+ auxType: auxInt64,
+ argLen: 3,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4}, // R2
+ {1, 8}, // R3
+ },
+ },
+ },
+ {
+ name: "LoweredPanicBoundsB",
+ auxType: auxInt64,
+ argLen: 3,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2}, // R1
+ {1, 4}, // R2
+ },
+ },
+ },
+ {
+ name: "LoweredPanicBoundsC",
+ auxType: auxInt64,
+ argLen: 3,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1}, // R0
+ {1, 2}, // R1
+ },
+ },
+ },
+ {
+ name: "FlagEQ",
+ argLen: 0,
+ reg: regInfo{},
+ },
+ {
+ name: "FlagLT",
+ argLen: 0,
+ reg: regInfo{},
+ },
+ {
+ name: "FlagGT",
+ argLen: 0,
+ reg: regInfo{},
+ },
+ {
+ name: "FlagOV",
+ argLen: 0,
+ reg: regInfo{},
+ },
+ {
+ name: "SYNC",
+ argLen: 1,
+ asm: s390x.ASYNC,
+ reg: regInfo{},
+ },
+ {
+ name: "MOVBZatomicload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: s390x.AMOVBZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVWZatomicload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: s390x.AMOVWZ,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVDatomicload",
+ auxType: auxSymOff,
+ argLen: 2,
+ faultOnNilArg0: true,
+ symEffect: SymRead,
+ asm: s390x.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MOVBatomicstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ symEffect: SymWrite,
+ asm: s390x.AMOVB,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "MOVWatomicstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ symEffect: SymWrite,
+ asm: s390x.AMOVW,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "MOVDatomicstore",
+ auxType: auxSymOff,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ symEffect: SymWrite,
+ asm: s390x.AMOVD,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "LAA",
+ auxType: auxSymOff,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ symEffect: SymRdWr,
+ asm: s390x.ALAA,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "LAAG",
+ auxType: auxSymOff,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ symEffect: SymRdWr,
+ asm: s390x.ALAAG,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "AddTupleFirst32",
+ argLen: 2,
+ reg: regInfo{},
+ },
+ {
+ name: "AddTupleFirst64",
+ argLen: 2,
+ reg: regInfo{},
+ },
+ {
+ name: "LAN",
+ argLen: 3,
+ clobberFlags: true,
+ hasSideEffects: true,
+ asm: s390x.ALAN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "LANfloor",
+ argLen: 3,
+ clobberFlags: true,
+ hasSideEffects: true,
+ asm: s390x.ALAN,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2}, // R1
+ {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ clobbers: 2, // R1
+ },
+ },
+ {
+ name: "LAO",
+ argLen: 3,
+ clobberFlags: true,
+ hasSideEffects: true,
+ asm: s390x.ALAO,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4295023614}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP SB
+ {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "LAOfloor",
+ argLen: 3,
+ clobberFlags: true,
+ hasSideEffects: true,
+ asm: s390x.ALAO,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2}, // R1
+ {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ clobbers: 2, // R1
+ },
+ },
+ {
+ name: "LoweredAtomicCas32",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ symEffect: SymRdWr,
+ asm: s390x.ACS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1}, // R0
+ {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ clobbers: 1, // R0
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicCas64",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ symEffect: SymRdWr,
+ asm: s390x.ACSG,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 1}, // R0
+ {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ clobbers: 1, // R0
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicExchange32",
+ auxType: auxSymOff,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ symEffect: SymRdWr,
+ asm: s390x.ACS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 1}, // R0
+ },
+ },
+ },
+ {
+ name: "LoweredAtomicExchange64",
+ auxType: auxSymOff,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ hasSideEffects: true,
+ symEffect: SymRdWr,
+ asm: s390x.ACSG,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ {1, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ outputs: []outputInfo{
+ {1, 0},
+ {0, 1}, // R0
+ },
+ },
+ },
+ {
+ name: "FLOGR",
+ argLen: 1,
+ clobberFlags: true,
+ asm: s390x.AFLOGR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ clobbers: 2, // R1
+ outputs: []outputInfo{
+ {0, 1}, // R0
+ },
+ },
+ },
+ {
+ name: "POPCNT",
+ argLen: 1,
+ clobberFlags: true,
+ asm: s390x.APOPCNT,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ },
+ },
+ {
+ name: "MLGR",
+ argLen: 2,
+ asm: s390x.AMLGR,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 8}, // R3
+ {0, 23551}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14
+ },
+ outputs: []outputInfo{
+ {0, 4}, // R2
+ {1, 8}, // R3
+ },
+ },
+ },
+ {
+ name: "SumBytes2",
+ argLen: 1,
+ reg: regInfo{},
+ },
+ {
+ name: "SumBytes4",
+ argLen: 1,
+ reg: regInfo{},
+ },
+ {
+ name: "SumBytes8",
+ argLen: 1,
+ reg: regInfo{},
+ },
+ {
+ name: "STMG2",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: s390x.ASTMG,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2}, // R1
+ {2, 4}, // R2
+ {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "STMG3",
+ auxType: auxSymOff,
+ argLen: 5,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: s390x.ASTMG,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2}, // R1
+ {2, 4}, // R2
+ {3, 8}, // R3
+ {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "STMG4",
+ auxType: auxSymOff,
+ argLen: 6,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: s390x.ASTMG,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2}, // R1
+ {2, 4}, // R2
+ {3, 8}, // R3
+ {4, 16}, // R4
+ {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "STM2",
+ auxType: auxSymOff,
+ argLen: 4,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: s390x.ASTMY,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2}, // R1
+ {2, 4}, // R2
+ {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "STM3",
+ auxType: auxSymOff,
+ argLen: 5,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: s390x.ASTMY,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2}, // R1
+ {2, 4}, // R2
+ {3, 8}, // R3
+ {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "STM4",
+ auxType: auxSymOff,
+ argLen: 6,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ symEffect: SymWrite,
+ asm: s390x.ASTMY,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 2}, // R1
+ {2, 4}, // R2
+ {3, 8}, // R3
+ {4, 16}, // R4
+ {0, 56318}, // R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ },
+ },
+ {
+ name: "LoweredMove",
+ auxType: auxInt64,
+ argLen: 4,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ faultOnNilArg1: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2}, // R1
+ {1, 4}, // R2
+ {2, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ clobbers: 6, // R1 R2
+ },
+ },
+ {
+ name: "LoweredZero",
+ auxType: auxInt64,
+ argLen: 3,
+ clobberFlags: true,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 2}, // R1
+ {1, 56319}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R11 R12 R14 SP
+ },
+ clobbers: 2, // R1
+ },
+ },
+
+ {
+ name: "LoweredStaticCall",
+ auxType: auxCallOff,
+ argLen: 1,
+ call: true,
+ reg: regInfo{
+ clobbers: 844424930131967, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 g
+ },
+ },
+ {
+ name: "LoweredTailCall",
+ auxType: auxCallOff,
+ argLen: 1,
+ call: true,
+ tailCall: true,
+ reg: regInfo{
+ clobbers: 844424930131967, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 g
+ },
+ },
+ {
+ name: "LoweredClosureCall",
+ auxType: auxCallOff,
+ argLen: 3,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ clobbers: 844424930131967, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 g
+ },
+ },
+ {
+ name: "LoweredInterCall",
+ auxType: auxCallOff,
+ argLen: 2,
+ call: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ clobbers: 844424930131967, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15 F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31 g
+ },
+ },
+ {
+ name: "LoweredAddr",
+ auxType: auxSymOff,
+ argLen: 1,
+ rematerializeable: true,
+ symEffect: SymAddr,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "LoweredMove",
+ auxType: auxInt64,
+ argLen: 3,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "LoweredZero",
+ auxType: auxInt64,
+ argLen: 2,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "LoweredGetClosurePtr",
+ argLen: 0,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "LoweredGetCallerPC",
+ argLen: 0,
+ rematerializeable: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "LoweredGetCallerSP",
+ argLen: 0,
+ rematerializeable: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "LoweredNilCheck",
+ argLen: 2,
+ nilCheck: true,
+ faultOnNilArg0: true,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "LoweredWB",
+ auxType: auxSym,
+ argLen: 3,
+ symEffect: SymNone,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ {1, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "LoweredConvert",
+ argLen: 2,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "Select",
+ argLen: 3,
+ asm: wasm.ASelect,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ {2, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64Load8U",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: wasm.AI64Load8U,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64Load8S",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: wasm.AI64Load8S,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64Load16U",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: wasm.AI64Load16U,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64Load16S",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: wasm.AI64Load16S,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64Load32U",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: wasm.AI64Load32U,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64Load32S",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: wasm.AI64Load32S,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64Load",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: wasm.AI64Load,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64Store8",
+ auxType: auxInt64,
+ argLen: 3,
+ asm: wasm.AI64Store8,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB
+ },
+ },
+ },
+ {
+ name: "I64Store16",
+ auxType: auxInt64,
+ argLen: 3,
+ asm: wasm.AI64Store16,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB
+ },
+ },
+ },
+ {
+ name: "I64Store32",
+ auxType: auxInt64,
+ argLen: 3,
+ asm: wasm.AI64Store32,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB
+ },
+ },
+ },
+ {
+ name: "I64Store",
+ auxType: auxInt64,
+ argLen: 3,
+ asm: wasm.AI64Store,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB
+ },
+ },
+ },
+ {
+ name: "F32Load",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: wasm.AF32Load,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "F64Load",
+ auxType: auxInt64,
+ argLen: 2,
+ asm: wasm.AF64Load,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB
+ },
+ outputs: []outputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "F32Store",
+ auxType: auxInt64,
+ argLen: 3,
+ asm: wasm.AF32Store,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB
+ },
+ },
+ },
+ {
+ name: "F64Store",
+ auxType: auxInt64,
+ argLen: 3,
+ asm: wasm.AF64Store,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {0, 1407374883618815}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP SB
+ },
+ },
+ },
+ {
+ name: "I64Const",
+ auxType: auxInt64,
+ argLen: 0,
+ rematerializeable: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "F32Const",
+ auxType: auxFloat32,
+ argLen: 0,
+ rematerializeable: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "F64Const",
+ auxType: auxFloat64,
+ argLen: 0,
+ rematerializeable: true,
+ reg: regInfo{
+ outputs: []outputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "I64Eqz",
+ argLen: 1,
+ asm: wasm.AI64Eqz,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64Eq",
+ argLen: 2,
+ asm: wasm.AI64Eq,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64Ne",
+ argLen: 2,
+ asm: wasm.AI64Ne,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64LtS",
+ argLen: 2,
+ asm: wasm.AI64LtS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64LtU",
+ argLen: 2,
+ asm: wasm.AI64LtU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64GtS",
+ argLen: 2,
+ asm: wasm.AI64GtS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64GtU",
+ argLen: 2,
+ asm: wasm.AI64GtU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64LeS",
+ argLen: 2,
+ asm: wasm.AI64LeS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64LeU",
+ argLen: 2,
+ asm: wasm.AI64LeU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64GeS",
+ argLen: 2,
+ asm: wasm.AI64GeS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64GeU",
+ argLen: 2,
+ asm: wasm.AI64GeU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "F32Eq",
+ argLen: 2,
+ asm: wasm.AF32Eq,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "F32Ne",
+ argLen: 2,
+ asm: wasm.AF32Ne,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "F32Lt",
+ argLen: 2,
+ asm: wasm.AF32Lt,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "F32Gt",
+ argLen: 2,
+ asm: wasm.AF32Gt,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "F32Le",
+ argLen: 2,
+ asm: wasm.AF32Le,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "F32Ge",
+ argLen: 2,
+ asm: wasm.AF32Ge,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "F64Eq",
+ argLen: 2,
+ asm: wasm.AF64Eq,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "F64Ne",
+ argLen: 2,
+ asm: wasm.AF64Ne,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "F64Lt",
+ argLen: 2,
+ asm: wasm.AF64Lt,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "F64Gt",
+ argLen: 2,
+ asm: wasm.AF64Gt,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "F64Le",
+ argLen: 2,
+ asm: wasm.AF64Le,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "F64Ge",
+ argLen: 2,
+ asm: wasm.AF64Ge,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64Add",
+ argLen: 2,
+ asm: wasm.AI64Add,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64AddConst",
+ auxType: auxInt64,
+ argLen: 1,
+ asm: wasm.AI64Add,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64Sub",
+ argLen: 2,
+ asm: wasm.AI64Sub,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64Mul",
+ argLen: 2,
+ asm: wasm.AI64Mul,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64DivS",
+ argLen: 2,
+ asm: wasm.AI64DivS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64DivU",
+ argLen: 2,
+ asm: wasm.AI64DivU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64RemS",
+ argLen: 2,
+ asm: wasm.AI64RemS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64RemU",
+ argLen: 2,
+ asm: wasm.AI64RemU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64And",
+ argLen: 2,
+ asm: wasm.AI64And,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64Or",
+ argLen: 2,
+ asm: wasm.AI64Or,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64Xor",
+ argLen: 2,
+ asm: wasm.AI64Xor,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64Shl",
+ argLen: 2,
+ asm: wasm.AI64Shl,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64ShrS",
+ argLen: 2,
+ asm: wasm.AI64ShrS,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64ShrU",
+ argLen: 2,
+ asm: wasm.AI64ShrU,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "F32Neg",
+ argLen: 1,
+ asm: wasm.AF32Neg,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "F32Add",
+ argLen: 2,
+ asm: wasm.AF32Add,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "F32Sub",
+ argLen: 2,
+ asm: wasm.AF32Sub,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "F32Mul",
+ argLen: 2,
+ asm: wasm.AF32Mul,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "F32Div",
+ argLen: 2,
+ asm: wasm.AF32Div,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "F64Neg",
+ argLen: 1,
+ asm: wasm.AF64Neg,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "F64Add",
+ argLen: 2,
+ asm: wasm.AF64Add,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "F64Sub",
+ argLen: 2,
+ asm: wasm.AF64Sub,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "F64Mul",
+ argLen: 2,
+ asm: wasm.AF64Mul,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "F64Div",
+ argLen: 2,
+ asm: wasm.AF64Div,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "I64TruncSatF64S",
+ argLen: 1,
+ asm: wasm.AI64TruncSatF64S,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64TruncSatF64U",
+ argLen: 1,
+ asm: wasm.AI64TruncSatF64U,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64TruncSatF32S",
+ argLen: 1,
+ asm: wasm.AI64TruncSatF32S,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64TruncSatF32U",
+ argLen: 1,
+ asm: wasm.AI64TruncSatF32U,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "F32ConvertI64S",
+ argLen: 1,
+ asm: wasm.AF32ConvertI64S,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "F32ConvertI64U",
+ argLen: 1,
+ asm: wasm.AF32ConvertI64U,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "F64ConvertI64S",
+ argLen: 1,
+ asm: wasm.AF64ConvertI64S,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "F64ConvertI64U",
+ argLen: 1,
+ asm: wasm.AF64ConvertI64U,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ outputs: []outputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "F32DemoteF64",
+ argLen: 1,
+ asm: wasm.AF32DemoteF64,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "F64PromoteF32",
+ argLen: 1,
+ asm: wasm.AF64PromoteF32,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "I64Extend8S",
+ argLen: 1,
+ asm: wasm.AI64Extend8S,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64Extend16S",
+ argLen: 1,
+ asm: wasm.AI64Extend16S,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64Extend32S",
+ argLen: 1,
+ asm: wasm.AI64Extend32S,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "F32Sqrt",
+ argLen: 1,
+ asm: wasm.AF32Sqrt,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "F32Trunc",
+ argLen: 1,
+ asm: wasm.AF32Trunc,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "F32Ceil",
+ argLen: 1,
+ asm: wasm.AF32Ceil,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "F32Floor",
+ argLen: 1,
+ asm: wasm.AF32Floor,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "F32Nearest",
+ argLen: 1,
+ asm: wasm.AF32Nearest,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "F32Abs",
+ argLen: 1,
+ asm: wasm.AF32Abs,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "F32Copysign",
+ argLen: 2,
+ asm: wasm.AF32Copysign,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ {1, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ outputs: []outputInfo{
+ {0, 4294901760}, // F0 F1 F2 F3 F4 F5 F6 F7 F8 F9 F10 F11 F12 F13 F14 F15
+ },
+ },
+ },
+ {
+ name: "F64Sqrt",
+ argLen: 1,
+ asm: wasm.AF64Sqrt,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "F64Trunc",
+ argLen: 1,
+ asm: wasm.AF64Trunc,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "F64Ceil",
+ argLen: 1,
+ asm: wasm.AF64Ceil,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "F64Floor",
+ argLen: 1,
+ asm: wasm.AF64Floor,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "F64Nearest",
+ argLen: 1,
+ asm: wasm.AF64Nearest,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "F64Abs",
+ argLen: 1,
+ asm: wasm.AF64Abs,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "F64Copysign",
+ argLen: 2,
+ asm: wasm.AF64Copysign,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ {1, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ outputs: []outputInfo{
+ {0, 281470681743360}, // F16 F17 F18 F19 F20 F21 F22 F23 F24 F25 F26 F27 F28 F29 F30 F31
+ },
+ },
+ },
+ {
+ name: "I64Ctz",
+ argLen: 1,
+ asm: wasm.AI64Ctz,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64Clz",
+ argLen: 1,
+ asm: wasm.AI64Clz,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I32Rotl",
+ argLen: 2,
+ asm: wasm.AI32Rotl,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64Rotl",
+ argLen: 2,
+ asm: wasm.AI64Rotl,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ {1, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+ {
+ name: "I64Popcnt",
+ argLen: 1,
+ asm: wasm.AI64Popcnt,
+ reg: regInfo{
+ inputs: []inputInfo{
+ {0, 281474976776191}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15 SP
+ },
+ outputs: []outputInfo{
+ {0, 65535}, // R0 R1 R2 R3 R4 R5 R6 R7 R8 R9 R10 R11 R12 R13 R14 R15
+ },
+ },
+ },
+
+ {
+ name: "Add8",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Add16",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Add32",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Add64",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "AddPtr",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Add32F",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Add64F",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Sub8",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Sub16",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Sub32",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Sub64",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "SubPtr",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Sub32F",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Sub64F",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Mul8",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Mul16",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Mul32",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Mul64",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Mul32F",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Mul64F",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Div32F",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Div64F",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Hmul32",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Hmul32u",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Hmul64",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Hmul64u",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Mul32uhilo",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Mul64uhilo",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Mul32uover",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Mul64uover",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Avg32u",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Avg64u",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Div8",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Div8u",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Div16",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Div16u",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Div32",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Div32u",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Div64",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Div64u",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Div128u",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "Mod8",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Mod8u",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Mod16",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Mod16u",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Mod32",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Mod32u",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Mod64",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Mod64u",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "And8",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "And16",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "And32",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "And64",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Or8",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Or16",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Or32",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Or64",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Xor8",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Xor16",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Xor32",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Xor64",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Lsh8x8",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Lsh8x16",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Lsh8x32",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Lsh8x64",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Lsh16x8",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Lsh16x16",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Lsh16x32",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Lsh16x64",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Lsh32x8",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Lsh32x16",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Lsh32x32",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Lsh32x64",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Lsh64x8",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Lsh64x16",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Lsh64x32",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Lsh64x64",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Rsh8x8",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Rsh8x16",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Rsh8x32",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Rsh8x64",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Rsh16x8",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Rsh16x16",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Rsh16x32",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Rsh16x64",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Rsh32x8",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Rsh32x16",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Rsh32x32",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Rsh32x64",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Rsh64x8",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Rsh64x16",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Rsh64x32",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Rsh64x64",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Rsh8Ux8",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Rsh8Ux16",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Rsh8Ux32",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Rsh8Ux64",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Rsh16Ux8",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Rsh16Ux16",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Rsh16Ux32",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Rsh16Ux64",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Rsh32Ux8",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Rsh32Ux16",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Rsh32Ux32",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Rsh32Ux64",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Rsh64Ux8",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Rsh64Ux16",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Rsh64Ux32",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Rsh64Ux64",
+ auxType: auxBool,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Eq8",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Eq16",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Eq32",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Eq64",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "EqPtr",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "EqInter",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "EqSlice",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Eq32F",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Eq64F",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Neq8",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Neq16",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Neq32",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Neq64",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "NeqPtr",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "NeqInter",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "NeqSlice",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Neq32F",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Neq64F",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Less8",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Less8U",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Less16",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Less16U",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Less32",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Less32U",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Less64",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Less64U",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Less32F",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Less64F",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Leq8",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Leq8U",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Leq16",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Leq16U",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Leq32",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Leq32U",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Leq64",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Leq64U",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Leq32F",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Leq64F",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "CondSelect",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "AndB",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "OrB",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "EqB",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "NeqB",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Not",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Neg8",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Neg16",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Neg32",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Neg64",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Neg32F",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Neg64F",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Com8",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Com16",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Com32",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Com64",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Ctz8",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Ctz16",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Ctz32",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Ctz64",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Ctz8NonZero",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Ctz16NonZero",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Ctz32NonZero",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Ctz64NonZero",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "BitLen8",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "BitLen16",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "BitLen32",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "BitLen64",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Bswap32",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Bswap64",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "BitRev8",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "BitRev16",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "BitRev32",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "BitRev64",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "PopCount8",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "PopCount16",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "PopCount32",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "PopCount64",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "RotateLeft8",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "RotateLeft16",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "RotateLeft32",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "RotateLeft64",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Sqrt",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Sqrt32",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Floor",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Ceil",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Trunc",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Round",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "RoundToEven",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Abs",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Copysign",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "FMA",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "Phi",
+ argLen: -1,
+ zeroWidth: true,
+ generic: true,
+ },
+ {
+ name: "Copy",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Convert",
+ argLen: 2,
+ resultInArg0: true,
+ zeroWidth: true,
+ generic: true,
+ },
+ {
+ name: "ConstBool",
+ auxType: auxBool,
+ argLen: 0,
+ generic: true,
+ },
+ {
+ name: "ConstString",
+ auxType: auxString,
+ argLen: 0,
+ generic: true,
+ },
+ {
+ name: "ConstNil",
+ argLen: 0,
+ generic: true,
+ },
+ {
+ name: "Const8",
+ auxType: auxInt8,
+ argLen: 0,
+ generic: true,
+ },
+ {
+ name: "Const16",
+ auxType: auxInt16,
+ argLen: 0,
+ generic: true,
+ },
+ {
+ name: "Const32",
+ auxType: auxInt32,
+ argLen: 0,
+ generic: true,
+ },
+ {
+ name: "Const64",
+ auxType: auxInt64,
+ argLen: 0,
+ generic: true,
+ },
+ {
+ name: "Const32F",
+ auxType: auxFloat32,
+ argLen: 0,
+ generic: true,
+ },
+ {
+ name: "Const64F",
+ auxType: auxFloat64,
+ argLen: 0,
+ generic: true,
+ },
+ {
+ name: "ConstInterface",
+ argLen: 0,
+ generic: true,
+ },
+ {
+ name: "ConstSlice",
+ argLen: 0,
+ generic: true,
+ },
+ {
+ name: "InitMem",
+ argLen: 0,
+ zeroWidth: true,
+ generic: true,
+ },
+ {
+ name: "Arg",
+ auxType: auxSymOff,
+ argLen: 0,
+ zeroWidth: true,
+ symEffect: SymRead,
+ generic: true,
+ },
+ {
+ name: "ArgIntReg",
+ auxType: auxNameOffsetInt8,
+ argLen: 0,
+ zeroWidth: true,
+ generic: true,
+ },
+ {
+ name: "ArgFloatReg",
+ auxType: auxNameOffsetInt8,
+ argLen: 0,
+ zeroWidth: true,
+ generic: true,
+ },
+ {
+ name: "Addr",
+ auxType: auxSym,
+ argLen: 1,
+ symEffect: SymAddr,
+ generic: true,
+ },
+ {
+ name: "LocalAddr",
+ auxType: auxSym,
+ argLen: 2,
+ symEffect: SymAddr,
+ generic: true,
+ },
+ {
+ name: "SP",
+ argLen: 0,
+ zeroWidth: true,
+ generic: true,
+ },
+ {
+ name: "SB",
+ argLen: 0,
+ zeroWidth: true,
+ generic: true,
+ },
+ {
+ name: "Load",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Dereference",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Store",
+ auxType: auxTyp,
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "Move",
+ auxType: auxTypSize,
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "Zero",
+ auxType: auxTypSize,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "StoreWB",
+ auxType: auxTyp,
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "MoveWB",
+ auxType: auxTypSize,
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "ZeroWB",
+ auxType: auxTypSize,
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "WB",
+ auxType: auxSym,
+ argLen: 3,
+ symEffect: SymNone,
+ generic: true,
+ },
+ {
+ name: "HasCPUFeature",
+ auxType: auxSym,
+ argLen: 0,
+ symEffect: SymNone,
+ generic: true,
+ },
+ {
+ name: "PanicBounds",
+ auxType: auxInt64,
+ argLen: 3,
+ call: true,
+ generic: true,
+ },
+ {
+ name: "PanicExtend",
+ auxType: auxInt64,
+ argLen: 4,
+ call: true,
+ generic: true,
+ },
+ {
+ name: "ClosureCall",
+ auxType: auxCallOff,
+ argLen: -1,
+ call: true,
+ generic: true,
+ },
+ {
+ name: "StaticCall",
+ auxType: auxCallOff,
+ argLen: -1,
+ call: true,
+ generic: true,
+ },
+ {
+ name: "InterCall",
+ auxType: auxCallOff,
+ argLen: -1,
+ call: true,
+ generic: true,
+ },
+ {
+ name: "TailCall",
+ auxType: auxCallOff,
+ argLen: -1,
+ call: true,
+ generic: true,
+ },
+ {
+ name: "ClosureLECall",
+ auxType: auxCallOff,
+ argLen: -1,
+ call: true,
+ generic: true,
+ },
+ {
+ name: "StaticLECall",
+ auxType: auxCallOff,
+ argLen: -1,
+ call: true,
+ generic: true,
+ },
+ {
+ name: "InterLECall",
+ auxType: auxCallOff,
+ argLen: -1,
+ call: true,
+ generic: true,
+ },
+ {
+ name: "TailLECall",
+ auxType: auxCallOff,
+ argLen: -1,
+ call: true,
+ generic: true,
+ },
+ {
+ name: "SignExt8to16",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "SignExt8to32",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "SignExt8to64",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "SignExt16to32",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "SignExt16to64",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "SignExt32to64",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "ZeroExt8to16",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "ZeroExt8to32",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "ZeroExt8to64",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "ZeroExt16to32",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "ZeroExt16to64",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "ZeroExt32to64",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Trunc16to8",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Trunc32to8",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Trunc32to16",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Trunc64to8",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Trunc64to16",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Trunc64to32",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Cvt32to32F",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Cvt32to64F",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Cvt64to32F",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Cvt64to64F",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Cvt32Fto32",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Cvt32Fto64",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Cvt64Fto32",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Cvt64Fto64",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Cvt32Fto64F",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Cvt64Fto32F",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "CvtBoolToUint8",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Round32F",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Round64F",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "IsNonNil",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "IsInBounds",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "IsSliceInBounds",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "NilCheck",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "GetG",
+ argLen: 1,
+ zeroWidth: true,
+ generic: true,
+ },
+ {
+ name: "GetClosurePtr",
+ argLen: 0,
+ generic: true,
+ },
+ {
+ name: "GetCallerPC",
+ argLen: 0,
+ generic: true,
+ },
+ {
+ name: "GetCallerSP",
+ argLen: 0,
+ generic: true,
+ },
+ {
+ name: "PtrIndex",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "OffPtr",
+ auxType: auxInt64,
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "SliceMake",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "SlicePtr",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "SliceLen",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "SliceCap",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "SlicePtrUnchecked",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "ComplexMake",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "ComplexReal",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "ComplexImag",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "StringMake",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "StringPtr",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "StringLen",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "IMake",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "ITab",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "IData",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "StructMake0",
+ argLen: 0,
+ generic: true,
+ },
+ {
+ name: "StructMake1",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "StructMake2",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "StructMake3",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "StructMake4",
+ argLen: 4,
+ generic: true,
+ },
+ {
+ name: "StructSelect",
+ auxType: auxInt64,
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "ArrayMake0",
+ argLen: 0,
+ generic: true,
+ },
+ {
+ name: "ArrayMake1",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "ArraySelect",
+ auxType: auxInt64,
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "StoreReg",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "LoadReg",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "FwdRef",
+ auxType: auxSym,
+ argLen: 0,
+ symEffect: SymNone,
+ generic: true,
+ },
+ {
+ name: "Unknown",
+ argLen: 0,
+ generic: true,
+ },
+ {
+ name: "VarDef",
+ auxType: auxSym,
+ argLen: 1,
+ zeroWidth: true,
+ symEffect: SymNone,
+ generic: true,
+ },
+ {
+ name: "VarKill",
+ auxType: auxSym,
+ argLen: 1,
+ symEffect: SymNone,
+ generic: true,
+ },
+ {
+ name: "VarLive",
+ auxType: auxSym,
+ argLen: 1,
+ zeroWidth: true,
+ symEffect: SymRead,
+ generic: true,
+ },
+ {
+ name: "KeepAlive",
+ argLen: 2,
+ zeroWidth: true,
+ generic: true,
+ },
+ {
+ name: "InlMark",
+ auxType: auxInt32,
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Int64Make",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Int64Hi",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Int64Lo",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Add32carry",
+ argLen: 2,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Add32withcarry",
+ argLen: 3,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Sub32carry",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Sub32withcarry",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "Add64carry",
+ argLen: 3,
+ commutative: true,
+ generic: true,
+ },
+ {
+ name: "Sub64borrow",
+ argLen: 3,
+ generic: true,
+ },
+ {
+ name: "Signmask",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Zeromask",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Slicemask",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "SpectreIndex",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "SpectreSliceIndex",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "Cvt32Uto32F",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Cvt32Uto64F",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Cvt32Fto32U",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Cvt64Fto32U",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Cvt64Uto32F",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Cvt64Uto64F",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Cvt32Fto64U",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Cvt64Fto64U",
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "Select0",
+ argLen: 1,
+ zeroWidth: true,
+ generic: true,
+ },
+ {
+ name: "Select1",
+ argLen: 1,
+ zeroWidth: true,
+ generic: true,
+ },
+ {
+ name: "SelectN",
+ auxType: auxInt64,
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "SelectNAddr",
+ auxType: auxInt64,
+ argLen: 1,
+ generic: true,
+ },
+ {
+ name: "MakeResult",
+ argLen: -1,
+ generic: true,
+ },
+ {
+ name: "AtomicLoad8",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "AtomicLoad32",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "AtomicLoad64",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "AtomicLoadPtr",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "AtomicLoadAcq32",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "AtomicLoadAcq64",
+ argLen: 2,
+ generic: true,
+ },
+ {
+ name: "AtomicStore8",
+ argLen: 3,
+ hasSideEffects: true,
+ generic: true,
+ },
+ {
+ name: "AtomicStore32",
+ argLen: 3,
+ hasSideEffects: true,
+ generic: true,
+ },
+ {
+ name: "AtomicStore64",
+ argLen: 3,
+ hasSideEffects: true,
+ generic: true,
+ },
+ {
+ name: "AtomicStorePtrNoWB",
+ argLen: 3,
+ hasSideEffects: true,
+ generic: true,
+ },
+ {
+ name: "AtomicStoreRel32",
+ argLen: 3,
+ hasSideEffects: true,
+ generic: true,
+ },
+ {
+ name: "AtomicStoreRel64",
+ argLen: 3,
+ hasSideEffects: true,
+ generic: true,
+ },
+ {
+ name: "AtomicExchange32",
+ argLen: 3,
+ hasSideEffects: true,
+ generic: true,
+ },
+ {
+ name: "AtomicExchange64",
+ argLen: 3,
+ hasSideEffects: true,
+ generic: true,
+ },
+ {
+ name: "AtomicAdd32",
+ argLen: 3,
+ hasSideEffects: true,
+ generic: true,
+ },
+ {
+ name: "AtomicAdd64",
+ argLen: 3,
+ hasSideEffects: true,
+ generic: true,
+ },
+ {
+ name: "AtomicCompareAndSwap32",
+ argLen: 4,
+ hasSideEffects: true,
+ generic: true,
+ },
+ {
+ name: "AtomicCompareAndSwap64",
+ argLen: 4,
+ hasSideEffects: true,
+ generic: true,
+ },
+ {
+ name: "AtomicCompareAndSwapRel32",
+ argLen: 4,
+ hasSideEffects: true,
+ generic: true,
+ },
+ {
+ name: "AtomicAnd8",
+ argLen: 3,
+ hasSideEffects: true,
+ generic: true,
+ },
+ {
+ name: "AtomicAnd32",
+ argLen: 3,
+ hasSideEffects: true,
+ generic: true,
+ },
+ {
+ name: "AtomicOr8",
+ argLen: 3,
+ hasSideEffects: true,
+ generic: true,
+ },
+ {
+ name: "AtomicOr32",
+ argLen: 3,
+ hasSideEffects: true,
+ generic: true,
+ },
+ {
+ name: "AtomicAdd32Variant",
+ argLen: 3,
+ hasSideEffects: true,
+ generic: true,
+ },
+ {
+ name: "AtomicAdd64Variant",
+ argLen: 3,
+ hasSideEffects: true,
+ generic: true,
+ },
+ {
+ name: "AtomicExchange32Variant",
+ argLen: 3,
+ hasSideEffects: true,
+ generic: true,
+ },
+ {
+ name: "AtomicExchange64Variant",
+ argLen: 3,
+ hasSideEffects: true,
+ generic: true,
+ },
+ {
+ name: "AtomicCompareAndSwap32Variant",
+ argLen: 4,
+ hasSideEffects: true,
+ generic: true,
+ },
+ {
+ name: "AtomicCompareAndSwap64Variant",
+ argLen: 4,
+ hasSideEffects: true,
+ generic: true,
+ },
+ {
+ name: "AtomicAnd8Variant",
+ argLen: 3,
+ hasSideEffects: true,
+ generic: true,
+ },
+ {
+ name: "AtomicAnd32Variant",
+ argLen: 3,
+ hasSideEffects: true,
+ generic: true,
+ },
+ {
+ name: "AtomicOr8Variant",
+ argLen: 3,
+ hasSideEffects: true,
+ generic: true,
+ },
+ {
+ name: "AtomicOr32Variant",
+ argLen: 3,
+ hasSideEffects: true,
+ generic: true,
+ },
+ {
+ name: "PubBarrier",
+ argLen: 1,
+ hasSideEffects: true,
+ generic: true,
+ },
+ {
+ name: "Clobber",
+ auxType: auxSymOff,
+ argLen: 0,
+ symEffect: SymNone,
+ generic: true,
+ },
+ {
+ name: "ClobberReg",
+ argLen: 0,
+ generic: true,
+ },
+ {
+ name: "PrefetchCache",
+ argLen: 2,
+ hasSideEffects: true,
+ generic: true,
+ },
+ {
+ name: "PrefetchCacheStreamed",
+ argLen: 2,
+ hasSideEffects: true,
+ generic: true,
+ },
+}
+
+func (o Op) Asm() obj.As { return opcodeTable[o].asm }
+func (o Op) Scale() int16 { return int16(opcodeTable[o].scale) }
+func (o Op) String() string { return opcodeTable[o].name }
+func (o Op) SymEffect() SymEffect { return opcodeTable[o].symEffect }
+func (o Op) IsCall() bool { return opcodeTable[o].call }
+func (o Op) IsTailCall() bool { return opcodeTable[o].tailCall }
+func (o Op) HasSideEffects() bool { return opcodeTable[o].hasSideEffects }
+func (o Op) UnsafePoint() bool { return opcodeTable[o].unsafePoint }
+func (o Op) ResultInArg0() bool { return opcodeTable[o].resultInArg0 }
+
+var registers386 = [...]Register{
+ {0, x86.REG_AX, 0, "AX"},
+ {1, x86.REG_CX, 1, "CX"},
+ {2, x86.REG_DX, 2, "DX"},
+ {3, x86.REG_BX, 3, "BX"},
+ {4, x86.REGSP, -1, "SP"},
+ {5, x86.REG_BP, 4, "BP"},
+ {6, x86.REG_SI, 5, "SI"},
+ {7, x86.REG_DI, 6, "DI"},
+ {8, x86.REG_X0, -1, "X0"},
+ {9, x86.REG_X1, -1, "X1"},
+ {10, x86.REG_X2, -1, "X2"},
+ {11, x86.REG_X3, -1, "X3"},
+ {12, x86.REG_X4, -1, "X4"},
+ {13, x86.REG_X5, -1, "X5"},
+ {14, x86.REG_X6, -1, "X6"},
+ {15, x86.REG_X7, -1, "X7"},
+ {16, 0, -1, "SB"},
+}
+var paramIntReg386 = []int8(nil)
+var paramFloatReg386 = []int8(nil)
+var gpRegMask386 = regMask(239)
+var fpRegMask386 = regMask(65280)
+var specialRegMask386 = regMask(0)
+var framepointerReg386 = int8(5)
+var linkReg386 = int8(-1)
+var registersAMD64 = [...]Register{
+ {0, x86.REG_AX, 0, "AX"},
+ {1, x86.REG_CX, 1, "CX"},
+ {2, x86.REG_DX, 2, "DX"},
+ {3, x86.REG_BX, 3, "BX"},
+ {4, x86.REGSP, -1, "SP"},
+ {5, x86.REG_BP, 4, "BP"},
+ {6, x86.REG_SI, 5, "SI"},
+ {7, x86.REG_DI, 6, "DI"},
+ {8, x86.REG_R8, 7, "R8"},
+ {9, x86.REG_R9, 8, "R9"},
+ {10, x86.REG_R10, 9, "R10"},
+ {11, x86.REG_R11, 10, "R11"},
+ {12, x86.REG_R12, 11, "R12"},
+ {13, x86.REG_R13, 12, "R13"},
+ {14, x86.REGG, -1, "g"},
+ {15, x86.REG_R15, 13, "R15"},
+ {16, x86.REG_X0, -1, "X0"},
+ {17, x86.REG_X1, -1, "X1"},
+ {18, x86.REG_X2, -1, "X2"},
+ {19, x86.REG_X3, -1, "X3"},
+ {20, x86.REG_X4, -1, "X4"},
+ {21, x86.REG_X5, -1, "X5"},
+ {22, x86.REG_X6, -1, "X6"},
+ {23, x86.REG_X7, -1, "X7"},
+ {24, x86.REG_X8, -1, "X8"},
+ {25, x86.REG_X9, -1, "X9"},
+ {26, x86.REG_X10, -1, "X10"},
+ {27, x86.REG_X11, -1, "X11"},
+ {28, x86.REG_X12, -1, "X12"},
+ {29, x86.REG_X13, -1, "X13"},
+ {30, x86.REG_X14, -1, "X14"},
+ {31, x86.REG_X15, -1, "X15"},
+ {32, 0, -1, "SB"},
+}
+var paramIntRegAMD64 = []int8{0, 3, 1, 7, 6, 8, 9, 10, 11}
+var paramFloatRegAMD64 = []int8{16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30}
+var gpRegMaskAMD64 = regMask(49135)
+var fpRegMaskAMD64 = regMask(2147418112)
+var specialRegMaskAMD64 = regMask(2147483648)
+var framepointerRegAMD64 = int8(5)
+var linkRegAMD64 = int8(-1)
+var registersARM = [...]Register{
+ {0, arm.REG_R0, 0, "R0"},
+ {1, arm.REG_R1, 1, "R1"},
+ {2, arm.REG_R2, 2, "R2"},
+ {3, arm.REG_R3, 3, "R3"},
+ {4, arm.REG_R4, 4, "R4"},
+ {5, arm.REG_R5, 5, "R5"},
+ {6, arm.REG_R6, 6, "R6"},
+ {7, arm.REG_R7, 7, "R7"},
+ {8, arm.REG_R8, 8, "R8"},
+ {9, arm.REG_R9, 9, "R9"},
+ {10, arm.REGG, -1, "g"},
+ {11, arm.REG_R11, -1, "R11"},
+ {12, arm.REG_R12, 10, "R12"},
+ {13, arm.REGSP, -1, "SP"},
+ {14, arm.REG_R14, 11, "R14"},
+ {15, arm.REG_R15, -1, "R15"},
+ {16, arm.REG_F0, -1, "F0"},
+ {17, arm.REG_F1, -1, "F1"},
+ {18, arm.REG_F2, -1, "F2"},
+ {19, arm.REG_F3, -1, "F3"},
+ {20, arm.REG_F4, -1, "F4"},
+ {21, arm.REG_F5, -1, "F5"},
+ {22, arm.REG_F6, -1, "F6"},
+ {23, arm.REG_F7, -1, "F7"},
+ {24, arm.REG_F8, -1, "F8"},
+ {25, arm.REG_F9, -1, "F9"},
+ {26, arm.REG_F10, -1, "F10"},
+ {27, arm.REG_F11, -1, "F11"},
+ {28, arm.REG_F12, -1, "F12"},
+ {29, arm.REG_F13, -1, "F13"},
+ {30, arm.REG_F14, -1, "F14"},
+ {31, arm.REG_F15, -1, "F15"},
+ {32, 0, -1, "SB"},
+}
+var paramIntRegARM = []int8(nil)
+var paramFloatRegARM = []int8(nil)
+var gpRegMaskARM = regMask(21503)
+var fpRegMaskARM = regMask(4294901760)
+var specialRegMaskARM = regMask(0)
+var framepointerRegARM = int8(-1)
+var linkRegARM = int8(14)
+var registersARM64 = [...]Register{
+ {0, arm64.REG_R0, 0, "R0"},
+ {1, arm64.REG_R1, 1, "R1"},
+ {2, arm64.REG_R2, 2, "R2"},
+ {3, arm64.REG_R3, 3, "R3"},
+ {4, arm64.REG_R4, 4, "R4"},
+ {5, arm64.REG_R5, 5, "R5"},
+ {6, arm64.REG_R6, 6, "R6"},
+ {7, arm64.REG_R7, 7, "R7"},
+ {8, arm64.REG_R8, 8, "R8"},
+ {9, arm64.REG_R9, 9, "R9"},
+ {10, arm64.REG_R10, 10, "R10"},
+ {11, arm64.REG_R11, 11, "R11"},
+ {12, arm64.REG_R12, 12, "R12"},
+ {13, arm64.REG_R13, 13, "R13"},
+ {14, arm64.REG_R14, 14, "R14"},
+ {15, arm64.REG_R15, 15, "R15"},
+ {16, arm64.REG_R16, 16, "R16"},
+ {17, arm64.REG_R17, 17, "R17"},
+ {18, arm64.REG_R18, -1, "R18"},
+ {19, arm64.REG_R19, 18, "R19"},
+ {20, arm64.REG_R20, 19, "R20"},
+ {21, arm64.REG_R21, 20, "R21"},
+ {22, arm64.REG_R22, 21, "R22"},
+ {23, arm64.REG_R23, 22, "R23"},
+ {24, arm64.REG_R24, 23, "R24"},
+ {25, arm64.REG_R25, 24, "R25"},
+ {26, arm64.REG_R26, 25, "R26"},
+ {27, arm64.REGG, -1, "g"},
+ {28, arm64.REG_R29, -1, "R29"},
+ {29, arm64.REG_R30, 26, "R30"},
+ {30, arm64.REGSP, -1, "SP"},
+ {31, arm64.REG_F0, -1, "F0"},
+ {32, arm64.REG_F1, -1, "F1"},
+ {33, arm64.REG_F2, -1, "F2"},
+ {34, arm64.REG_F3, -1, "F3"},
+ {35, arm64.REG_F4, -1, "F4"},
+ {36, arm64.REG_F5, -1, "F5"},
+ {37, arm64.REG_F6, -1, "F6"},
+ {38, arm64.REG_F7, -1, "F7"},
+ {39, arm64.REG_F8, -1, "F8"},
+ {40, arm64.REG_F9, -1, "F9"},
+ {41, arm64.REG_F10, -1, "F10"},
+ {42, arm64.REG_F11, -1, "F11"},
+ {43, arm64.REG_F12, -1, "F12"},
+ {44, arm64.REG_F13, -1, "F13"},
+ {45, arm64.REG_F14, -1, "F14"},
+ {46, arm64.REG_F15, -1, "F15"},
+ {47, arm64.REG_F16, -1, "F16"},
+ {48, arm64.REG_F17, -1, "F17"},
+ {49, arm64.REG_F18, -1, "F18"},
+ {50, arm64.REG_F19, -1, "F19"},
+ {51, arm64.REG_F20, -1, "F20"},
+ {52, arm64.REG_F21, -1, "F21"},
+ {53, arm64.REG_F22, -1, "F22"},
+ {54, arm64.REG_F23, -1, "F23"},
+ {55, arm64.REG_F24, -1, "F24"},
+ {56, arm64.REG_F25, -1, "F25"},
+ {57, arm64.REG_F26, -1, "F26"},
+ {58, arm64.REG_F27, -1, "F27"},
+ {59, arm64.REG_F28, -1, "F28"},
+ {60, arm64.REG_F29, -1, "F29"},
+ {61, arm64.REG_F30, -1, "F30"},
+ {62, arm64.REG_F31, -1, "F31"},
+ {63, 0, -1, "SB"},
+}
+var paramIntRegARM64 = []int8{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}
+var paramFloatRegARM64 = []int8{31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46}
+var gpRegMaskARM64 = regMask(670826495)
+var fpRegMaskARM64 = regMask(9223372034707292160)
+var specialRegMaskARM64 = regMask(0)
+var framepointerRegARM64 = int8(-1)
+var linkRegARM64 = int8(29)
+var registersMIPS = [...]Register{
+ {0, mips.REG_R0, -1, "R0"},
+ {1, mips.REG_R1, 0, "R1"},
+ {2, mips.REG_R2, 1, "R2"},
+ {3, mips.REG_R3, 2, "R3"},
+ {4, mips.REG_R4, 3, "R4"},
+ {5, mips.REG_R5, 4, "R5"},
+ {6, mips.REG_R6, 5, "R6"},
+ {7, mips.REG_R7, 6, "R7"},
+ {8, mips.REG_R8, 7, "R8"},
+ {9, mips.REG_R9, 8, "R9"},
+ {10, mips.REG_R10, 9, "R10"},
+ {11, mips.REG_R11, 10, "R11"},
+ {12, mips.REG_R12, 11, "R12"},
+ {13, mips.REG_R13, 12, "R13"},
+ {14, mips.REG_R14, 13, "R14"},
+ {15, mips.REG_R15, 14, "R15"},
+ {16, mips.REG_R16, 15, "R16"},
+ {17, mips.REG_R17, 16, "R17"},
+ {18, mips.REG_R18, 17, "R18"},
+ {19, mips.REG_R19, 18, "R19"},
+ {20, mips.REG_R20, 19, "R20"},
+ {21, mips.REG_R21, 20, "R21"},
+ {22, mips.REG_R22, 21, "R22"},
+ {23, mips.REG_R24, 22, "R24"},
+ {24, mips.REG_R25, 23, "R25"},
+ {25, mips.REG_R28, 24, "R28"},
+ {26, mips.REGSP, -1, "SP"},
+ {27, mips.REGG, -1, "g"},
+ {28, mips.REG_R31, 25, "R31"},
+ {29, mips.REG_F0, -1, "F0"},
+ {30, mips.REG_F2, -1, "F2"},
+ {31, mips.REG_F4, -1, "F4"},
+ {32, mips.REG_F6, -1, "F6"},
+ {33, mips.REG_F8, -1, "F8"},
+ {34, mips.REG_F10, -1, "F10"},
+ {35, mips.REG_F12, -1, "F12"},
+ {36, mips.REG_F14, -1, "F14"},
+ {37, mips.REG_F16, -1, "F16"},
+ {38, mips.REG_F18, -1, "F18"},
+ {39, mips.REG_F20, -1, "F20"},
+ {40, mips.REG_F22, -1, "F22"},
+ {41, mips.REG_F24, -1, "F24"},
+ {42, mips.REG_F26, -1, "F26"},
+ {43, mips.REG_F28, -1, "F28"},
+ {44, mips.REG_F30, -1, "F30"},
+ {45, mips.REG_HI, -1, "HI"},
+ {46, mips.REG_LO, -1, "LO"},
+ {47, 0, -1, "SB"},
+}
+var paramIntRegMIPS = []int8(nil)
+var paramFloatRegMIPS = []int8(nil)
+var gpRegMaskMIPS = regMask(335544318)
+var fpRegMaskMIPS = regMask(35183835217920)
+var specialRegMaskMIPS = regMask(105553116266496)
+var framepointerRegMIPS = int8(-1)
+var linkRegMIPS = int8(28)
+var registersMIPS64 = [...]Register{
+ {0, mips.REG_R0, -1, "R0"},
+ {1, mips.REG_R1, 0, "R1"},
+ {2, mips.REG_R2, 1, "R2"},
+ {3, mips.REG_R3, 2, "R3"},
+ {4, mips.REG_R4, 3, "R4"},
+ {5, mips.REG_R5, 4, "R5"},
+ {6, mips.REG_R6, 5, "R6"},
+ {7, mips.REG_R7, 6, "R7"},
+ {8, mips.REG_R8, 7, "R8"},
+ {9, mips.REG_R9, 8, "R9"},
+ {10, mips.REG_R10, 9, "R10"},
+ {11, mips.REG_R11, 10, "R11"},
+ {12, mips.REG_R12, 11, "R12"},
+ {13, mips.REG_R13, 12, "R13"},
+ {14, mips.REG_R14, 13, "R14"},
+ {15, mips.REG_R15, 14, "R15"},
+ {16, mips.REG_R16, 15, "R16"},
+ {17, mips.REG_R17, 16, "R17"},
+ {18, mips.REG_R18, 17, "R18"},
+ {19, mips.REG_R19, 18, "R19"},
+ {20, mips.REG_R20, 19, "R20"},
+ {21, mips.REG_R21, 20, "R21"},
+ {22, mips.REG_R22, 21, "R22"},
+ {23, mips.REG_R24, 22, "R24"},
+ {24, mips.REG_R25, 23, "R25"},
+ {25, mips.REGSP, -1, "SP"},
+ {26, mips.REGG, -1, "g"},
+ {27, mips.REG_R31, 24, "R31"},
+ {28, mips.REG_F0, -1, "F0"},
+ {29, mips.REG_F1, -1, "F1"},
+ {30, mips.REG_F2, -1, "F2"},
+ {31, mips.REG_F3, -1, "F3"},
+ {32, mips.REG_F4, -1, "F4"},
+ {33, mips.REG_F5, -1, "F5"},
+ {34, mips.REG_F6, -1, "F6"},
+ {35, mips.REG_F7, -1, "F7"},
+ {36, mips.REG_F8, -1, "F8"},
+ {37, mips.REG_F9, -1, "F9"},
+ {38, mips.REG_F10, -1, "F10"},
+ {39, mips.REG_F11, -1, "F11"},
+ {40, mips.REG_F12, -1, "F12"},
+ {41, mips.REG_F13, -1, "F13"},
+ {42, mips.REG_F14, -1, "F14"},
+ {43, mips.REG_F15, -1, "F15"},
+ {44, mips.REG_F16, -1, "F16"},
+ {45, mips.REG_F17, -1, "F17"},
+ {46, mips.REG_F18, -1, "F18"},
+ {47, mips.REG_F19, -1, "F19"},
+ {48, mips.REG_F20, -1, "F20"},
+ {49, mips.REG_F21, -1, "F21"},
+ {50, mips.REG_F22, -1, "F22"},
+ {51, mips.REG_F23, -1, "F23"},
+ {52, mips.REG_F24, -1, "F24"},
+ {53, mips.REG_F25, -1, "F25"},
+ {54, mips.REG_F26, -1, "F26"},
+ {55, mips.REG_F27, -1, "F27"},
+ {56, mips.REG_F28, -1, "F28"},
+ {57, mips.REG_F29, -1, "F29"},
+ {58, mips.REG_F30, -1, "F30"},
+ {59, mips.REG_F31, -1, "F31"},
+ {60, mips.REG_HI, -1, "HI"},
+ {61, mips.REG_LO, -1, "LO"},
+ {62, 0, -1, "SB"},
+}
+var paramIntRegMIPS64 = []int8(nil)
+var paramFloatRegMIPS64 = []int8(nil)
+var gpRegMaskMIPS64 = regMask(167772158)
+var fpRegMaskMIPS64 = regMask(1152921504338411520)
+var specialRegMaskMIPS64 = regMask(3458764513820540928)
+var framepointerRegMIPS64 = int8(-1)
+var linkRegMIPS64 = int8(27)
+var registersPPC64 = [...]Register{
+ {0, ppc64.REG_R0, -1, "R0"},
+ {1, ppc64.REGSP, -1, "SP"},
+ {2, 0, -1, "SB"},
+ {3, ppc64.REG_R3, 0, "R3"},
+ {4, ppc64.REG_R4, 1, "R4"},
+ {5, ppc64.REG_R5, 2, "R5"},
+ {6, ppc64.REG_R6, 3, "R6"},
+ {7, ppc64.REG_R7, 4, "R7"},
+ {8, ppc64.REG_R8, 5, "R8"},
+ {9, ppc64.REG_R9, 6, "R9"},
+ {10, ppc64.REG_R10, 7, "R10"},
+ {11, ppc64.REG_R11, 8, "R11"},
+ {12, ppc64.REG_R12, 9, "R12"},
+ {13, ppc64.REG_R13, -1, "R13"},
+ {14, ppc64.REG_R14, 10, "R14"},
+ {15, ppc64.REG_R15, 11, "R15"},
+ {16, ppc64.REG_R16, 12, "R16"},
+ {17, ppc64.REG_R17, 13, "R17"},
+ {18, ppc64.REG_R18, 14, "R18"},
+ {19, ppc64.REG_R19, 15, "R19"},
+ {20, ppc64.REG_R20, 16, "R20"},
+ {21, ppc64.REG_R21, 17, "R21"},
+ {22, ppc64.REG_R22, 18, "R22"},
+ {23, ppc64.REG_R23, 19, "R23"},
+ {24, ppc64.REG_R24, 20, "R24"},
+ {25, ppc64.REG_R25, 21, "R25"},
+ {26, ppc64.REG_R26, 22, "R26"},
+ {27, ppc64.REG_R27, 23, "R27"},
+ {28, ppc64.REG_R28, 24, "R28"},
+ {29, ppc64.REG_R29, 25, "R29"},
+ {30, ppc64.REGG, -1, "g"},
+ {31, ppc64.REG_R31, -1, "R31"},
+ {32, ppc64.REG_F0, -1, "F0"},
+ {33, ppc64.REG_F1, -1, "F1"},
+ {34, ppc64.REG_F2, -1, "F2"},
+ {35, ppc64.REG_F3, -1, "F3"},
+ {36, ppc64.REG_F4, -1, "F4"},
+ {37, ppc64.REG_F5, -1, "F5"},
+ {38, ppc64.REG_F6, -1, "F6"},
+ {39, ppc64.REG_F7, -1, "F7"},
+ {40, ppc64.REG_F8, -1, "F8"},
+ {41, ppc64.REG_F9, -1, "F9"},
+ {42, ppc64.REG_F10, -1, "F10"},
+ {43, ppc64.REG_F11, -1, "F11"},
+ {44, ppc64.REG_F12, -1, "F12"},
+ {45, ppc64.REG_F13, -1, "F13"},
+ {46, ppc64.REG_F14, -1, "F14"},
+ {47, ppc64.REG_F15, -1, "F15"},
+ {48, ppc64.REG_F16, -1, "F16"},
+ {49, ppc64.REG_F17, -1, "F17"},
+ {50, ppc64.REG_F18, -1, "F18"},
+ {51, ppc64.REG_F19, -1, "F19"},
+ {52, ppc64.REG_F20, -1, "F20"},
+ {53, ppc64.REG_F21, -1, "F21"},
+ {54, ppc64.REG_F22, -1, "F22"},
+ {55, ppc64.REG_F23, -1, "F23"},
+ {56, ppc64.REG_F24, -1, "F24"},
+ {57, ppc64.REG_F25, -1, "F25"},
+ {58, ppc64.REG_F26, -1, "F26"},
+ {59, ppc64.REG_F27, -1, "F27"},
+ {60, ppc64.REG_F28, -1, "F28"},
+ {61, ppc64.REG_F29, -1, "F29"},
+ {62, ppc64.REG_F30, -1, "F30"},
+ {63, ppc64.REG_F31, -1, "F31"},
+}
+var paramIntRegPPC64 = []int8{3, 4, 5, 6, 7, 8, 9, 10, 14, 15, 16, 17}
+var paramFloatRegPPC64 = []int8{33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44}
+var gpRegMaskPPC64 = regMask(1073733624)
+var fpRegMaskPPC64 = regMask(576460743713488896)
+var specialRegMaskPPC64 = regMask(0)
+var framepointerRegPPC64 = int8(-1)
+var linkRegPPC64 = int8(-1)
+var registersRISCV64 = [...]Register{
+ {0, riscv.REG_X0, -1, "X0"},
+ {1, riscv.REGSP, -1, "SP"},
+ {2, riscv.REG_X3, -1, "X3"},
+ {3, riscv.REG_X4, -1, "X4"},
+ {4, riscv.REG_X5, 0, "X5"},
+ {5, riscv.REG_X6, 1, "X6"},
+ {6, riscv.REG_X7, 2, "X7"},
+ {7, riscv.REG_X8, 3, "X8"},
+ {8, riscv.REG_X9, 4, "X9"},
+ {9, riscv.REG_X10, 5, "X10"},
+ {10, riscv.REG_X11, 6, "X11"},
+ {11, riscv.REG_X12, 7, "X12"},
+ {12, riscv.REG_X13, 8, "X13"},
+ {13, riscv.REG_X14, 9, "X14"},
+ {14, riscv.REG_X15, 10, "X15"},
+ {15, riscv.REG_X16, 11, "X16"},
+ {16, riscv.REG_X17, 12, "X17"},
+ {17, riscv.REG_X18, 13, "X18"},
+ {18, riscv.REG_X19, 14, "X19"},
+ {19, riscv.REG_X20, 15, "X20"},
+ {20, riscv.REG_X21, 16, "X21"},
+ {21, riscv.REG_X22, 17, "X22"},
+ {22, riscv.REG_X23, 18, "X23"},
+ {23, riscv.REG_X24, 19, "X24"},
+ {24, riscv.REG_X25, 20, "X25"},
+ {25, riscv.REG_X26, 21, "X26"},
+ {26, riscv.REGG, -1, "g"},
+ {27, riscv.REG_X28, 22, "X28"},
+ {28, riscv.REG_X29, 23, "X29"},
+ {29, riscv.REG_X30, 24, "X30"},
+ {30, riscv.REG_X31, -1, "X31"},
+ {31, riscv.REG_F0, -1, "F0"},
+ {32, riscv.REG_F1, -1, "F1"},
+ {33, riscv.REG_F2, -1, "F2"},
+ {34, riscv.REG_F3, -1, "F3"},
+ {35, riscv.REG_F4, -1, "F4"},
+ {36, riscv.REG_F5, -1, "F5"},
+ {37, riscv.REG_F6, -1, "F6"},
+ {38, riscv.REG_F7, -1, "F7"},
+ {39, riscv.REG_F8, -1, "F8"},
+ {40, riscv.REG_F9, -1, "F9"},
+ {41, riscv.REG_F10, -1, "F10"},
+ {42, riscv.REG_F11, -1, "F11"},
+ {43, riscv.REG_F12, -1, "F12"},
+ {44, riscv.REG_F13, -1, "F13"},
+ {45, riscv.REG_F14, -1, "F14"},
+ {46, riscv.REG_F15, -1, "F15"},
+ {47, riscv.REG_F16, -1, "F16"},
+ {48, riscv.REG_F17, -1, "F17"},
+ {49, riscv.REG_F18, -1, "F18"},
+ {50, riscv.REG_F19, -1, "F19"},
+ {51, riscv.REG_F20, -1, "F20"},
+ {52, riscv.REG_F21, -1, "F21"},
+ {53, riscv.REG_F22, -1, "F22"},
+ {54, riscv.REG_F23, -1, "F23"},
+ {55, riscv.REG_F24, -1, "F24"},
+ {56, riscv.REG_F25, -1, "F25"},
+ {57, riscv.REG_F26, -1, "F26"},
+ {58, riscv.REG_F27, -1, "F27"},
+ {59, riscv.REG_F28, -1, "F28"},
+ {60, riscv.REG_F29, -1, "F29"},
+ {61, riscv.REG_F30, -1, "F30"},
+ {62, riscv.REG_F31, -1, "F31"},
+ {63, 0, -1, "SB"},
+}
+var paramIntRegRISCV64 = []int8(nil)
+var paramFloatRegRISCV64 = []int8(nil)
+var gpRegMaskRISCV64 = regMask(1006632944)
+var fpRegMaskRISCV64 = regMask(9223372034707292160)
+var specialRegMaskRISCV64 = regMask(0)
+var framepointerRegRISCV64 = int8(-1)
+var linkRegRISCV64 = int8(0)
+var registersS390X = [...]Register{
+ {0, s390x.REG_R0, 0, "R0"},
+ {1, s390x.REG_R1, 1, "R1"},
+ {2, s390x.REG_R2, 2, "R2"},
+ {3, s390x.REG_R3, 3, "R3"},
+ {4, s390x.REG_R4, 4, "R4"},
+ {5, s390x.REG_R5, 5, "R5"},
+ {6, s390x.REG_R6, 6, "R6"},
+ {7, s390x.REG_R7, 7, "R7"},
+ {8, s390x.REG_R8, 8, "R8"},
+ {9, s390x.REG_R9, 9, "R9"},
+ {10, s390x.REG_R10, -1, "R10"},
+ {11, s390x.REG_R11, 10, "R11"},
+ {12, s390x.REG_R12, 11, "R12"},
+ {13, s390x.REGG, -1, "g"},
+ {14, s390x.REG_R14, 12, "R14"},
+ {15, s390x.REGSP, -1, "SP"},
+ {16, s390x.REG_F0, -1, "F0"},
+ {17, s390x.REG_F1, -1, "F1"},
+ {18, s390x.REG_F2, -1, "F2"},
+ {19, s390x.REG_F3, -1, "F3"},
+ {20, s390x.REG_F4, -1, "F4"},
+ {21, s390x.REG_F5, -1, "F5"},
+ {22, s390x.REG_F6, -1, "F6"},
+ {23, s390x.REG_F7, -1, "F7"},
+ {24, s390x.REG_F8, -1, "F8"},
+ {25, s390x.REG_F9, -1, "F9"},
+ {26, s390x.REG_F10, -1, "F10"},
+ {27, s390x.REG_F11, -1, "F11"},
+ {28, s390x.REG_F12, -1, "F12"},
+ {29, s390x.REG_F13, -1, "F13"},
+ {30, s390x.REG_F14, -1, "F14"},
+ {31, s390x.REG_F15, -1, "F15"},
+ {32, 0, -1, "SB"},
+}
+var paramIntRegS390X = []int8(nil)
+var paramFloatRegS390X = []int8(nil)
+var gpRegMaskS390X = regMask(23551)
+var fpRegMaskS390X = regMask(4294901760)
+var specialRegMaskS390X = regMask(0)
+var framepointerRegS390X = int8(-1)
+var linkRegS390X = int8(14)
+var registersWasm = [...]Register{
+ {0, wasm.REG_R0, 0, "R0"},
+ {1, wasm.REG_R1, 1, "R1"},
+ {2, wasm.REG_R2, 2, "R2"},
+ {3, wasm.REG_R3, 3, "R3"},
+ {4, wasm.REG_R4, 4, "R4"},
+ {5, wasm.REG_R5, 5, "R5"},
+ {6, wasm.REG_R6, 6, "R6"},
+ {7, wasm.REG_R7, 7, "R7"},
+ {8, wasm.REG_R8, 8, "R8"},
+ {9, wasm.REG_R9, 9, "R9"},
+ {10, wasm.REG_R10, 10, "R10"},
+ {11, wasm.REG_R11, 11, "R11"},
+ {12, wasm.REG_R12, 12, "R12"},
+ {13, wasm.REG_R13, 13, "R13"},
+ {14, wasm.REG_R14, 14, "R14"},
+ {15, wasm.REG_R15, 15, "R15"},
+ {16, wasm.REG_F0, -1, "F0"},
+ {17, wasm.REG_F1, -1, "F1"},
+ {18, wasm.REG_F2, -1, "F2"},
+ {19, wasm.REG_F3, -1, "F3"},
+ {20, wasm.REG_F4, -1, "F4"},
+ {21, wasm.REG_F5, -1, "F5"},
+ {22, wasm.REG_F6, -1, "F6"},
+ {23, wasm.REG_F7, -1, "F7"},
+ {24, wasm.REG_F8, -1, "F8"},
+ {25, wasm.REG_F9, -1, "F9"},
+ {26, wasm.REG_F10, -1, "F10"},
+ {27, wasm.REG_F11, -1, "F11"},
+ {28, wasm.REG_F12, -1, "F12"},
+ {29, wasm.REG_F13, -1, "F13"},
+ {30, wasm.REG_F14, -1, "F14"},
+ {31, wasm.REG_F15, -1, "F15"},
+ {32, wasm.REG_F16, -1, "F16"},
+ {33, wasm.REG_F17, -1, "F17"},
+ {34, wasm.REG_F18, -1, "F18"},
+ {35, wasm.REG_F19, -1, "F19"},
+ {36, wasm.REG_F20, -1, "F20"},
+ {37, wasm.REG_F21, -1, "F21"},
+ {38, wasm.REG_F22, -1, "F22"},
+ {39, wasm.REG_F23, -1, "F23"},
+ {40, wasm.REG_F24, -1, "F24"},
+ {41, wasm.REG_F25, -1, "F25"},
+ {42, wasm.REG_F26, -1, "F26"},
+ {43, wasm.REG_F27, -1, "F27"},
+ {44, wasm.REG_F28, -1, "F28"},
+ {45, wasm.REG_F29, -1, "F29"},
+ {46, wasm.REG_F30, -1, "F30"},
+ {47, wasm.REG_F31, -1, "F31"},
+ {48, wasm.REGSP, -1, "SP"},
+ {49, wasm.REGG, -1, "g"},
+ {50, 0, -1, "SB"},
+}
+var paramIntRegWasm = []int8(nil)
+var paramFloatRegWasm = []int8(nil)
+var gpRegMaskWasm = regMask(65535)
+var fpRegMaskWasm = regMask(281474976645120)
+var fp32RegMaskWasm = regMask(4294901760)
+var fp64RegMaskWasm = regMask(281470681743360)
+var specialRegMaskWasm = regMask(0)
+var framepointerRegWasm = int8(-1)
+var linkRegWasm = int8(-1)
diff --git a/src/cmd/compile/internal/ssa/opt.go b/src/cmd/compile/internal/ssa/opt.go
new file mode 100644
index 0000000..128e614
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/opt.go
@@ -0,0 +1,10 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// machine-independent optimization
+func opt(f *Func) {
+ applyRewrite(f, rewriteBlockgeneric, rewriteValuegeneric, removeDeadValues)
+}
diff --git a/src/cmd/compile/internal/ssa/passbm_test.go b/src/cmd/compile/internal/ssa/passbm_test.go
new file mode 100644
index 0000000..3fd3eb5
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/passbm_test.go
@@ -0,0 +1,101 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/compile/internal/types"
+ "fmt"
+ "testing"
+)
+
+const (
+ blockCount = 1000
+ passCount = 15000
+)
+
+type passFunc func(*Func)
+
+func BenchmarkDSEPass(b *testing.B) { benchFnPass(b, dse, blockCount, genFunction) }
+func BenchmarkDSEPassBlock(b *testing.B) { benchFnBlock(b, dse, genFunction) }
+func BenchmarkCSEPass(b *testing.B) { benchFnPass(b, cse, blockCount, genFunction) }
+func BenchmarkCSEPassBlock(b *testing.B) { benchFnBlock(b, cse, genFunction) }
+func BenchmarkDeadcodePass(b *testing.B) { benchFnPass(b, deadcode, blockCount, genFunction) }
+func BenchmarkDeadcodePassBlock(b *testing.B) { benchFnBlock(b, deadcode, genFunction) }
+
+func multi(f *Func) {
+ cse(f)
+ dse(f)
+ deadcode(f)
+}
+func BenchmarkMultiPass(b *testing.B) { benchFnPass(b, multi, blockCount, genFunction) }
+func BenchmarkMultiPassBlock(b *testing.B) { benchFnBlock(b, multi, genFunction) }
+
+// benchFnPass runs passFunc b.N times across a single function.
+func benchFnPass(b *testing.B, fn passFunc, size int, bg blockGen) {
+ b.ReportAllocs()
+ c := testConfig(b)
+ fun := c.Fun("entry", bg(size)...)
+ CheckFunc(fun.f)
+ b.ResetTimer()
+ for i := 0; i < b.N; i++ {
+ fn(fun.f)
+ b.StopTimer()
+ CheckFunc(fun.f)
+ b.StartTimer()
+ }
+}
+
+// benchFnPass runs passFunc across a function with b.N blocks.
+func benchFnBlock(b *testing.B, fn passFunc, bg blockGen) {
+ b.ReportAllocs()
+ c := testConfig(b)
+ fun := c.Fun("entry", bg(b.N)...)
+ CheckFunc(fun.f)
+ b.ResetTimer()
+ for i := 0; i < passCount; i++ {
+ fn(fun.f)
+ }
+ b.StopTimer()
+}
+
+func genFunction(size int) []bloc {
+ var blocs []bloc
+ elemType := types.Types[types.TINT64]
+ ptrType := elemType.PtrTo()
+
+ valn := func(s string, m, n int) string { return fmt.Sprintf("%s%d-%d", s, m, n) }
+ blocs = append(blocs,
+ Bloc("entry",
+ Valu(valn("store", 0, 4), OpInitMem, types.TypeMem, 0, nil),
+ Valu("sb", OpSB, types.Types[types.TUINTPTR], 0, nil),
+ Goto(blockn(1)),
+ ),
+ )
+ for i := 1; i < size+1; i++ {
+ blocs = append(blocs, Bloc(blockn(i),
+ Valu(valn("v", i, 0), OpConstBool, types.Types[types.TBOOL], 1, nil),
+ Valu(valn("addr", i, 1), OpAddr, ptrType, 0, nil, "sb"),
+ Valu(valn("addr", i, 2), OpAddr, ptrType, 0, nil, "sb"),
+ Valu(valn("addr", i, 3), OpAddr, ptrType, 0, nil, "sb"),
+ Valu(valn("zero", i, 1), OpZero, types.TypeMem, 8, elemType, valn("addr", i, 3),
+ valn("store", i-1, 4)),
+ Valu(valn("store", i, 1), OpStore, types.TypeMem, 0, elemType, valn("addr", i, 1),
+ valn("v", i, 0), valn("zero", i, 1)),
+ Valu(valn("store", i, 2), OpStore, types.TypeMem, 0, elemType, valn("addr", i, 2),
+ valn("v", i, 0), valn("store", i, 1)),
+ Valu(valn("store", i, 3), OpStore, types.TypeMem, 0, elemType, valn("addr", i, 1),
+ valn("v", i, 0), valn("store", i, 2)),
+ Valu(valn("store", i, 4), OpStore, types.TypeMem, 0, elemType, valn("addr", i, 3),
+ valn("v", i, 0), valn("store", i, 3)),
+ Goto(blockn(i+1))))
+ }
+
+ blocs = append(blocs,
+ Bloc(blockn(size+1), Goto("exit")),
+ Bloc("exit", Exit("store0-4")),
+ )
+
+ return blocs
+}
diff --git a/src/cmd/compile/internal/ssa/phielim.go b/src/cmd/compile/internal/ssa/phielim.go
new file mode 100644
index 0000000..761cb7a
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/phielim.go
@@ -0,0 +1,69 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// phielim eliminates redundant phi values from f.
+// A phi is redundant if its arguments are all equal. For
+// purposes of counting, ignore the phi itself. Both of
+// these phis are redundant:
+// v = phi(x,x,x)
+// v = phi(x,v,x,v)
+// We repeat this process to also catch situations like:
+// v = phi(x, phi(x, x), phi(x, v))
+// TODO: Can we also simplify cases like:
+// v = phi(v, w, x)
+// w = phi(v, w, x)
+// and would that be useful?
+func phielim(f *Func) {
+ for {
+ change := false
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ copyelimValue(v)
+ change = phielimValue(v) || change
+ }
+ }
+ if !change {
+ break
+ }
+ }
+}
+
+// phielimValue tries to convert the phi v to a copy.
+func phielimValue(v *Value) bool {
+ if v.Op != OpPhi {
+ return false
+ }
+
+ // If there are two distinct args of v which
+ // are not v itself, then the phi must remain.
+ // Otherwise, we can replace it with a copy.
+ var w *Value
+ for _, x := range v.Args {
+ if x == v {
+ continue
+ }
+ if x == w {
+ continue
+ }
+ if w != nil {
+ return false
+ }
+ w = x
+ }
+
+ if w == nil {
+ // v references only itself. It must be in
+ // a dead code loop. Don't bother modifying it.
+ return false
+ }
+ v.Op = OpCopy
+ v.SetArgs1(w)
+ f := v.Block.Func
+ if f.pass.debug > 0 {
+ f.Warnl(v.Pos, "eliminated phi")
+ }
+ return true
+}
diff --git a/src/cmd/compile/internal/ssa/phiopt.go b/src/cmd/compile/internal/ssa/phiopt.go
new file mode 100644
index 0000000..745c61c
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/phiopt.go
@@ -0,0 +1,323 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// phiopt eliminates boolean Phis based on the previous if.
+//
+// Main use case is to transform:
+// x := false
+// if b {
+// x = true
+// }
+// into x = b.
+//
+// In SSA code this appears as
+//
+// b0
+// If b -> b1 b2
+// b1
+// Plain -> b2
+// b2
+// x = (OpPhi (ConstBool [true]) (ConstBool [false]))
+//
+// In this case we can replace x with a copy of b.
+func phiopt(f *Func) {
+ sdom := f.Sdom()
+ for _, b := range f.Blocks {
+ if len(b.Preds) != 2 || len(b.Values) == 0 {
+ // TODO: handle more than 2 predecessors, e.g. a || b || c.
+ continue
+ }
+
+ pb0, b0 := b, b.Preds[0].b
+ for len(b0.Succs) == 1 && len(b0.Preds) == 1 {
+ pb0, b0 = b0, b0.Preds[0].b
+ }
+ if b0.Kind != BlockIf {
+ continue
+ }
+ pb1, b1 := b, b.Preds[1].b
+ for len(b1.Succs) == 1 && len(b1.Preds) == 1 {
+ pb1, b1 = b1, b1.Preds[0].b
+ }
+ if b1 != b0 {
+ continue
+ }
+ // b0 is the if block giving the boolean value.
+ // reverse is the predecessor from which the truth value comes.
+ var reverse int
+ if b0.Succs[0].b == pb0 && b0.Succs[1].b == pb1 {
+ reverse = 0
+ } else if b0.Succs[0].b == pb1 && b0.Succs[1].b == pb0 {
+ reverse = 1
+ } else {
+ b.Fatalf("invalid predecessors\n")
+ }
+
+ for _, v := range b.Values {
+ if v.Op != OpPhi {
+ continue
+ }
+
+ // Look for conversions from bool to 0/1.
+ if v.Type.IsInteger() {
+ phioptint(v, b0, reverse)
+ }
+
+ if !v.Type.IsBoolean() {
+ continue
+ }
+
+ // Replaces
+ // if a { x = true } else { x = false } with x = a
+ // and
+ // if a { x = false } else { x = true } with x = !a
+ if v.Args[0].Op == OpConstBool && v.Args[1].Op == OpConstBool {
+ if v.Args[reverse].AuxInt != v.Args[1-reverse].AuxInt {
+ ops := [2]Op{OpNot, OpCopy}
+ v.reset(ops[v.Args[reverse].AuxInt])
+ v.AddArg(b0.Controls[0])
+ if f.pass.debug > 0 {
+ f.Warnl(b.Pos, "converted OpPhi to %v", v.Op)
+ }
+ continue
+ }
+ }
+
+ // Replaces
+ // if a { x = true } else { x = value } with x = a || value.
+ // Requires that value dominates x, meaning that regardless of a,
+ // value is always computed. This guarantees that the side effects
+ // of value are not seen if a is false.
+ if v.Args[reverse].Op == OpConstBool && v.Args[reverse].AuxInt == 1 {
+ if tmp := v.Args[1-reverse]; sdom.IsAncestorEq(tmp.Block, b) {
+ v.reset(OpOrB)
+ v.SetArgs2(b0.Controls[0], tmp)
+ if f.pass.debug > 0 {
+ f.Warnl(b.Pos, "converted OpPhi to %v", v.Op)
+ }
+ continue
+ }
+ }
+
+ // Replaces
+ // if a { x = value } else { x = false } with x = a && value.
+ // Requires that value dominates x, meaning that regardless of a,
+ // value is always computed. This guarantees that the side effects
+ // of value are not seen if a is false.
+ if v.Args[1-reverse].Op == OpConstBool && v.Args[1-reverse].AuxInt == 0 {
+ if tmp := v.Args[reverse]; sdom.IsAncestorEq(tmp.Block, b) {
+ v.reset(OpAndB)
+ v.SetArgs2(b0.Controls[0], tmp)
+ if f.pass.debug > 0 {
+ f.Warnl(b.Pos, "converted OpPhi to %v", v.Op)
+ }
+ continue
+ }
+ }
+ }
+ }
+ // strengthen phi optimization.
+ // Main use case is to transform:
+ // x := false
+ // if c {
+ // x = true
+ // ...
+ // }
+ // into
+ // x := c
+ // if x { ... }
+ //
+ // For example, in SSA code a case appears as
+ // b0
+ // If c -> b, sb0
+ // sb0
+ // If d -> sd0, sd1
+ // sd1
+ // ...
+ // sd0
+ // Plain -> b
+ // b
+ // x = (OpPhi (ConstBool [true]) (ConstBool [false]))
+ //
+ // In this case we can also replace x with a copy of c.
+ //
+ // The optimization idea:
+ // 1. block b has a phi value x, x = OpPhi (ConstBool [true]) (ConstBool [false]),
+ // and len(b.Preds) is equal to 2.
+ // 2. find the common dominator(b0) of the predecessors(pb0, pb1) of block b, and the
+ // dominator(b0) is a If block.
+ // Special case: one of the predecessors(pb0 or pb1) is the dominator(b0).
+ // 3. the successors(sb0, sb1) of the dominator need to dominate the predecessors(pb0, pb1)
+ // of block b respectively.
+ // 4. replace this boolean Phi based on dominator block.
+ //
+ // b0(pb0) b0(pb1) b0
+ // | \ / | / \
+ // | sb1 sb0 | sb0 sb1
+ // | ... ... | ... ...
+ // | pb1 pb0 | pb0 pb1
+ // | / \ | \ /
+ // b b b
+ //
+ var lca *lcaRange
+ for _, b := range f.Blocks {
+ if len(b.Preds) != 2 || len(b.Values) == 0 {
+ // TODO: handle more than 2 predecessors, e.g. a || b || c.
+ continue
+ }
+
+ for _, v := range b.Values {
+ // find a phi value v = OpPhi (ConstBool [true]) (ConstBool [false]).
+ // TODO: v = OpPhi (ConstBool [true]) (Arg <bool> {value})
+ if v.Op != OpPhi {
+ continue
+ }
+ if v.Args[0].Op != OpConstBool || v.Args[1].Op != OpConstBool {
+ continue
+ }
+ if v.Args[0].AuxInt == v.Args[1].AuxInt {
+ continue
+ }
+
+ pb0 := b.Preds[0].b
+ pb1 := b.Preds[1].b
+ if pb0.Kind == BlockIf && pb0 == sdom.Parent(b) {
+ // special case: pb0 is the dominator block b0.
+ // b0(pb0)
+ // | \
+ // | sb1
+ // | ...
+ // | pb1
+ // | /
+ // b
+ // if another successor sb1 of b0(pb0) dominates pb1, do replace.
+ ei := b.Preds[0].i
+ sb1 := pb0.Succs[1-ei].b
+ if sdom.IsAncestorEq(sb1, pb1) {
+ convertPhi(pb0, v, ei)
+ break
+ }
+ } else if pb1.Kind == BlockIf && pb1 == sdom.Parent(b) {
+ // special case: pb1 is the dominator block b0.
+ // b0(pb1)
+ // / |
+ // sb0 |
+ // ... |
+ // pb0 |
+ // \ |
+ // b
+ // if another successor sb0 of b0(pb0) dominates pb0, do replace.
+ ei := b.Preds[1].i
+ sb0 := pb1.Succs[1-ei].b
+ if sdom.IsAncestorEq(sb0, pb0) {
+ convertPhi(pb1, v, 1-ei)
+ break
+ }
+ } else {
+ // b0
+ // / \
+ // sb0 sb1
+ // ... ...
+ // pb0 pb1
+ // \ /
+ // b
+ //
+ // Build data structure for fast least-common-ancestor queries.
+ if lca == nil {
+ lca = makeLCArange(f)
+ }
+ b0 := lca.find(pb0, pb1)
+ if b0.Kind != BlockIf {
+ break
+ }
+ sb0 := b0.Succs[0].b
+ sb1 := b0.Succs[1].b
+ var reverse int
+ if sdom.IsAncestorEq(sb0, pb0) && sdom.IsAncestorEq(sb1, pb1) {
+ reverse = 0
+ } else if sdom.IsAncestorEq(sb1, pb0) && sdom.IsAncestorEq(sb0, pb1) {
+ reverse = 1
+ } else {
+ break
+ }
+ if len(sb0.Preds) != 1 || len(sb1.Preds) != 1 {
+ // we can not replace phi value x in the following case.
+ // if gp == nil || sp < lo { x = true}
+ // if a || b { x = true }
+ // so the if statement can only have one condition.
+ break
+ }
+ convertPhi(b0, v, reverse)
+ }
+ }
+ }
+}
+
+func phioptint(v *Value, b0 *Block, reverse int) {
+ a0 := v.Args[0]
+ a1 := v.Args[1]
+ if a0.Op != a1.Op {
+ return
+ }
+
+ switch a0.Op {
+ case OpConst8, OpConst16, OpConst32, OpConst64:
+ default:
+ return
+ }
+
+ negate := false
+ switch {
+ case a0.AuxInt == 0 && a1.AuxInt == 1:
+ negate = true
+ case a0.AuxInt == 1 && a1.AuxInt == 0:
+ default:
+ return
+ }
+
+ if reverse == 1 {
+ negate = !negate
+ }
+
+ a := b0.Controls[0]
+ if negate {
+ a = v.Block.NewValue1(v.Pos, OpNot, a.Type, a)
+ }
+ v.AddArg(a)
+
+ cvt := v.Block.NewValue1(v.Pos, OpCvtBoolToUint8, v.Block.Func.Config.Types.UInt8, a)
+ switch v.Type.Size() {
+ case 1:
+ v.reset(OpCopy)
+ case 2:
+ v.reset(OpZeroExt8to16)
+ case 4:
+ v.reset(OpZeroExt8to32)
+ case 8:
+ v.reset(OpZeroExt8to64)
+ default:
+ v.Fatalf("bad int size %d", v.Type.Size())
+ }
+ v.AddArg(cvt)
+
+ f := b0.Func
+ if f.pass.debug > 0 {
+ f.Warnl(v.Block.Pos, "converted OpPhi bool -> int%d", v.Type.Size()*8)
+ }
+}
+
+// b is the If block giving the boolean value.
+// v is the phi value v = (OpPhi (ConstBool [true]) (ConstBool [false])).
+// reverse is the predecessor from which the truth value comes.
+func convertPhi(b *Block, v *Value, reverse int) {
+ f := b.Func
+ ops := [2]Op{OpNot, OpCopy}
+ v.reset(ops[v.Args[reverse].AuxInt])
+ v.AddArg(b.Controls[0])
+ if f.pass.debug > 0 {
+ f.Warnl(b.Pos, "converted OpPhi to %v", v.Op)
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/poset.go b/src/cmd/compile/internal/ssa/poset.go
new file mode 100644
index 0000000..ee884ca
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/poset.go
@@ -0,0 +1,1359 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "fmt"
+ "os"
+)
+
+// If true, check poset integrity after every mutation
+var debugPoset = false
+
+const uintSize = 32 << (^uint(0) >> 63) // 32 or 64
+
+// bitset is a bit array for dense indexes.
+type bitset []uint
+
+func newBitset(n int) bitset {
+ return make(bitset, (n+uintSize-1)/uintSize)
+}
+
+func (bs bitset) Reset() {
+ for i := range bs {
+ bs[i] = 0
+ }
+}
+
+func (bs bitset) Set(idx uint32) {
+ bs[idx/uintSize] |= 1 << (idx % uintSize)
+}
+
+func (bs bitset) Clear(idx uint32) {
+ bs[idx/uintSize] &^= 1 << (idx % uintSize)
+}
+
+func (bs bitset) Test(idx uint32) bool {
+ return bs[idx/uintSize]&(1<<(idx%uintSize)) != 0
+}
+
+type undoType uint8
+
+const (
+ undoInvalid undoType = iota
+ undoCheckpoint // a checkpoint to group undo passes
+ undoSetChl // change back left child of undo.idx to undo.edge
+ undoSetChr // change back right child of undo.idx to undo.edge
+ undoNonEqual // forget that SSA value undo.ID is non-equal to undo.idx (another ID)
+ undoNewNode // remove new node created for SSA value undo.ID
+ undoNewConstant // remove the constant node idx from the constants map
+ undoAliasNode // unalias SSA value undo.ID so that it points back to node index undo.idx
+ undoNewRoot // remove node undo.idx from root list
+ undoChangeRoot // remove node undo.idx from root list, and put back undo.edge.Target instead
+ undoMergeRoot // remove node undo.idx from root list, and put back its children instead
+)
+
+// posetUndo represents an undo pass to be performed.
+// It's an union of fields that can be used to store information,
+// and typ is the discriminant, that specifies which kind
+// of operation must be performed. Not all fields are always used.
+type posetUndo struct {
+ typ undoType
+ idx uint32
+ ID ID
+ edge posetEdge
+}
+
+const (
+ // Make poset handle constants as unsigned numbers.
+ posetFlagUnsigned = 1 << iota
+)
+
+// A poset edge. The zero value is the null/empty edge.
+// Packs target node index (31 bits) and strict flag (1 bit).
+type posetEdge uint32
+
+func newedge(t uint32, strict bool) posetEdge {
+ s := uint32(0)
+ if strict {
+ s = 1
+ }
+ return posetEdge(t<<1 | s)
+}
+func (e posetEdge) Target() uint32 { return uint32(e) >> 1 }
+func (e posetEdge) Strict() bool { return uint32(e)&1 != 0 }
+func (e posetEdge) String() string {
+ s := fmt.Sprint(e.Target())
+ if e.Strict() {
+ s += "*"
+ }
+ return s
+}
+
+// posetNode is a node of a DAG within the poset.
+type posetNode struct {
+ l, r posetEdge
+}
+
+// poset is a union-find data structure that can represent a partially ordered set
+// of SSA values. Given a binary relation that creates a partial order (eg: '<'),
+// clients can record relations between SSA values using SetOrder, and later
+// check relations (in the transitive closure) with Ordered. For instance,
+// if SetOrder is called to record that A<B and B<C, Ordered will later confirm
+// that A<C.
+//
+// It is possible to record equality relations between SSA values with SetEqual and check
+// equality with Equal. Equality propagates into the transitive closure for the partial
+// order so that if we know that A<B<C and later learn that A==D, Ordered will return
+// true for D<C.
+//
+// It is also possible to record inequality relations between nodes with SetNonEqual;
+// non-equality relations are not transitive, but they can still be useful: for instance
+// if we know that A<=B and later we learn that A!=B, we can deduce that A<B.
+// NonEqual can be used to check whether it is known that the nodes are different, either
+// because SetNonEqual was called before, or because we know that they are strictly ordered.
+//
+// poset will refuse to record new relations that contradict existing relations:
+// for instance if A<B<C, calling SetOrder for C<A will fail returning false; also
+// calling SetEqual for C==A will fail.
+//
+// poset is implemented as a forest of DAGs; in each DAG, if there is a path (directed)
+// from node A to B, it means that A<B (or A<=B). Equality is represented by mapping
+// two SSA values to the same DAG node; when a new equality relation is recorded
+// between two existing nodes,the nodes are merged, adjusting incoming and outgoing edges.
+//
+// Constants are specially treated. When a constant is added to the poset, it is
+// immediately linked to other constants already present; so for instance if the
+// poset knows that x<=3, and then x is tested against 5, 5 is first added and linked
+// 3 (using 3<5), so that the poset knows that x<=3<5; at that point, it is able
+// to answer x<5 correctly. This means that all constants are always within the same
+// DAG; as an implementation detail, we enfoce that the DAG containtining the constants
+// is always the first in the forest.
+//
+// poset is designed to be memory efficient and do little allocations during normal usage.
+// Most internal data structures are pre-allocated and flat, so for instance adding a
+// new relation does not cause any allocation. For performance reasons,
+// each node has only up to two outgoing edges (like a binary tree), so intermediate
+// "extra" nodes are required to represent more than two relations. For instance,
+// to record that A<I, A<J, A<K (with no known relation between I,J,K), we create the
+// following DAG:
+//
+// A
+// / \
+// I extra
+// / \
+// J K
+//
+type poset struct {
+ lastidx uint32 // last generated dense index
+ flags uint8 // internal flags
+ values map[ID]uint32 // map SSA values to dense indexes
+ constants map[int64]uint32 // record SSA constants together with their value
+ nodes []posetNode // nodes (in all DAGs)
+ roots []uint32 // list of root nodes (forest)
+ noneq map[uint32]bitset // non-equal relations
+ undo []posetUndo // undo chain
+}
+
+func newPoset() *poset {
+ return &poset{
+ values: make(map[ID]uint32),
+ constants: make(map[int64]uint32, 8),
+ nodes: make([]posetNode, 1, 16),
+ roots: make([]uint32, 0, 4),
+ noneq: make(map[uint32]bitset),
+ undo: make([]posetUndo, 0, 4),
+ }
+}
+
+func (po *poset) SetUnsigned(uns bool) {
+ if uns {
+ po.flags |= posetFlagUnsigned
+ } else {
+ po.flags &^= posetFlagUnsigned
+ }
+}
+
+// Handle children
+func (po *poset) setchl(i uint32, l posetEdge) { po.nodes[i].l = l }
+func (po *poset) setchr(i uint32, r posetEdge) { po.nodes[i].r = r }
+func (po *poset) chl(i uint32) uint32 { return po.nodes[i].l.Target() }
+func (po *poset) chr(i uint32) uint32 { return po.nodes[i].r.Target() }
+func (po *poset) children(i uint32) (posetEdge, posetEdge) {
+ return po.nodes[i].l, po.nodes[i].r
+}
+
+// upush records a new undo step. It can be used for simple
+// undo passes that record up to one index and one edge.
+func (po *poset) upush(typ undoType, p uint32, e posetEdge) {
+ po.undo = append(po.undo, posetUndo{typ: typ, idx: p, edge: e})
+}
+
+// upushnew pushes an undo pass for a new node
+func (po *poset) upushnew(id ID, idx uint32) {
+ po.undo = append(po.undo, posetUndo{typ: undoNewNode, ID: id, idx: idx})
+}
+
+// upushneq pushes a new undo pass for a nonequal relation
+func (po *poset) upushneq(idx1 uint32, idx2 uint32) {
+ po.undo = append(po.undo, posetUndo{typ: undoNonEqual, ID: ID(idx1), idx: idx2})
+}
+
+// upushalias pushes a new undo pass for aliasing two nodes
+func (po *poset) upushalias(id ID, i2 uint32) {
+ po.undo = append(po.undo, posetUndo{typ: undoAliasNode, ID: id, idx: i2})
+}
+
+// upushconst pushes a new undo pass for a new constant
+func (po *poset) upushconst(idx uint32, old uint32) {
+ po.undo = append(po.undo, posetUndo{typ: undoNewConstant, idx: idx, ID: ID(old)})
+}
+
+// addchild adds i2 as direct child of i1.
+func (po *poset) addchild(i1, i2 uint32, strict bool) {
+ i1l, i1r := po.children(i1)
+ e2 := newedge(i2, strict)
+
+ if i1l == 0 {
+ po.setchl(i1, e2)
+ po.upush(undoSetChl, i1, 0)
+ } else if i1r == 0 {
+ po.setchr(i1, e2)
+ po.upush(undoSetChr, i1, 0)
+ } else {
+ // If n1 already has two children, add an intermediate extra
+ // node to record the relation correctly (without relating
+ // n2 to other existing nodes). Use a non-deterministic value
+ // to decide whether to append on the left or the right, to avoid
+ // creating degenerated chains.
+ //
+ // n1
+ // / \
+ // i1l extra
+ // / \
+ // i1r n2
+ //
+ extra := po.newnode(nil)
+ if (i1^i2)&1 != 0 { // non-deterministic
+ po.setchl(extra, i1r)
+ po.setchr(extra, e2)
+ po.setchr(i1, newedge(extra, false))
+ po.upush(undoSetChr, i1, i1r)
+ } else {
+ po.setchl(extra, i1l)
+ po.setchr(extra, e2)
+ po.setchl(i1, newedge(extra, false))
+ po.upush(undoSetChl, i1, i1l)
+ }
+ }
+}
+
+// newnode allocates a new node bound to SSA value n.
+// If n is nil, this is an extra node (= only used internally).
+func (po *poset) newnode(n *Value) uint32 {
+ i := po.lastidx + 1
+ po.lastidx++
+ po.nodes = append(po.nodes, posetNode{})
+ if n != nil {
+ if po.values[n.ID] != 0 {
+ panic("newnode for Value already inserted")
+ }
+ po.values[n.ID] = i
+ po.upushnew(n.ID, i)
+ } else {
+ po.upushnew(0, i)
+ }
+ return i
+}
+
+// lookup searches for a SSA value into the forest of DAGS, and return its node.
+// Constants are materialized on the fly during lookup.
+func (po *poset) lookup(n *Value) (uint32, bool) {
+ i, f := po.values[n.ID]
+ if !f && n.isGenericIntConst() {
+ po.newconst(n)
+ i, f = po.values[n.ID]
+ }
+ return i, f
+}
+
+// newconst creates a node for a constant. It links it to other constants, so
+// that n<=5 is detected true when n<=3 is known to be true.
+// TODO: this is O(N), fix it.
+func (po *poset) newconst(n *Value) {
+ if !n.isGenericIntConst() {
+ panic("newconst on non-constant")
+ }
+
+ // If the same constant is already present in the poset through a different
+ // Value, just alias to it without allocating a new node.
+ val := n.AuxInt
+ if po.flags&posetFlagUnsigned != 0 {
+ val = int64(n.AuxUnsigned())
+ }
+ if c, found := po.constants[val]; found {
+ po.values[n.ID] = c
+ po.upushalias(n.ID, 0)
+ return
+ }
+
+ // Create the new node for this constant
+ i := po.newnode(n)
+
+ // If this is the first constant, put it as a new root, as
+ // we can't record an existing connection so we don't have
+ // a specific DAG to add it to. Notice that we want all
+ // constants to be in root #0, so make sure the new root
+ // goes there.
+ if len(po.constants) == 0 {
+ idx := len(po.roots)
+ po.roots = append(po.roots, i)
+ po.roots[0], po.roots[idx] = po.roots[idx], po.roots[0]
+ po.upush(undoNewRoot, i, 0)
+ po.constants[val] = i
+ po.upushconst(i, 0)
+ return
+ }
+
+ // Find the lower and upper bound among existing constants. That is,
+ // find the higher constant that is lower than the one that we're adding,
+ // and the lower constant that is higher.
+ // The loop is duplicated to handle signed and unsigned comparison,
+ // depending on how the poset was configured.
+ var lowerptr, higherptr uint32
+
+ if po.flags&posetFlagUnsigned != 0 {
+ var lower, higher uint64
+ val1 := n.AuxUnsigned()
+ for val2, ptr := range po.constants {
+ val2 := uint64(val2)
+ if val1 == val2 {
+ panic("unreachable")
+ }
+ if val2 < val1 && (lowerptr == 0 || val2 > lower) {
+ lower = val2
+ lowerptr = ptr
+ } else if val2 > val1 && (higherptr == 0 || val2 < higher) {
+ higher = val2
+ higherptr = ptr
+ }
+ }
+ } else {
+ var lower, higher int64
+ val1 := n.AuxInt
+ for val2, ptr := range po.constants {
+ if val1 == val2 {
+ panic("unreachable")
+ }
+ if val2 < val1 && (lowerptr == 0 || val2 > lower) {
+ lower = val2
+ lowerptr = ptr
+ } else if val2 > val1 && (higherptr == 0 || val2 < higher) {
+ higher = val2
+ higherptr = ptr
+ }
+ }
+ }
+
+ if lowerptr == 0 && higherptr == 0 {
+ // This should not happen, as at least one
+ // other constant must exist if we get here.
+ panic("no constant found")
+ }
+
+ // Create the new node and connect it to the bounds, so that
+ // lower < n < higher. We could have found both bounds or only one
+ // of them, depending on what other constants are present in the poset.
+ // Notice that we always link constants together, so they
+ // are always part of the same DAG.
+ switch {
+ case lowerptr != 0 && higherptr != 0:
+ // Both bounds are present, record lower < n < higher.
+ po.addchild(lowerptr, i, true)
+ po.addchild(i, higherptr, true)
+
+ case lowerptr != 0:
+ // Lower bound only, record lower < n.
+ po.addchild(lowerptr, i, true)
+
+ case higherptr != 0:
+ // Higher bound only. To record n < higher, we need
+ // an extra root:
+ //
+ // extra
+ // / \
+ // root \
+ // / n
+ // .... /
+ // \ /
+ // higher
+ //
+ i2 := higherptr
+ r2 := po.findroot(i2)
+ if r2 != po.roots[0] { // all constants should be in root #0
+ panic("constant not in root #0")
+ }
+ extra := po.newnode(nil)
+ po.changeroot(r2, extra)
+ po.upush(undoChangeRoot, extra, newedge(r2, false))
+ po.addchild(extra, r2, false)
+ po.addchild(extra, i, false)
+ po.addchild(i, i2, true)
+ }
+
+ po.constants[val] = i
+ po.upushconst(i, 0)
+}
+
+// aliasnewnode records that a single node n2 (not in the poset yet) is an alias
+// of the master node n1.
+func (po *poset) aliasnewnode(n1, n2 *Value) {
+ i1, i2 := po.values[n1.ID], po.values[n2.ID]
+ if i1 == 0 || i2 != 0 {
+ panic("aliasnewnode invalid arguments")
+ }
+
+ po.values[n2.ID] = i1
+ po.upushalias(n2.ID, 0)
+}
+
+// aliasnodes records that all the nodes i2s are aliases of a single master node n1.
+// aliasnodes takes care of rearranging the DAG, changing references of parent/children
+// of nodes in i2s, so that they point to n1 instead.
+// Complexity is O(n) (with n being the total number of nodes in the poset, not just
+// the number of nodes being aliased).
+func (po *poset) aliasnodes(n1 *Value, i2s bitset) {
+ i1 := po.values[n1.ID]
+ if i1 == 0 {
+ panic("aliasnode for non-existing node")
+ }
+ if i2s.Test(i1) {
+ panic("aliasnode i2s contains n1 node")
+ }
+
+ // Go through all the nodes to adjust parent/chidlren of nodes in i2s
+ for idx, n := range po.nodes {
+ // Do not touch i1 itself, otherwise we can create useless self-loops
+ if uint32(idx) == i1 {
+ continue
+ }
+ l, r := n.l, n.r
+
+ // Rename all references to i2s into i1
+ if i2s.Test(l.Target()) {
+ po.setchl(uint32(idx), newedge(i1, l.Strict()))
+ po.upush(undoSetChl, uint32(idx), l)
+ }
+ if i2s.Test(r.Target()) {
+ po.setchr(uint32(idx), newedge(i1, r.Strict()))
+ po.upush(undoSetChr, uint32(idx), r)
+ }
+
+ // Connect all chidren of i2s to i1 (unless those children
+ // are in i2s as well, in which case it would be useless)
+ if i2s.Test(uint32(idx)) {
+ if l != 0 && !i2s.Test(l.Target()) {
+ po.addchild(i1, l.Target(), l.Strict())
+ }
+ if r != 0 && !i2s.Test(r.Target()) {
+ po.addchild(i1, r.Target(), r.Strict())
+ }
+ po.setchl(uint32(idx), 0)
+ po.setchr(uint32(idx), 0)
+ po.upush(undoSetChl, uint32(idx), l)
+ po.upush(undoSetChr, uint32(idx), r)
+ }
+ }
+
+ // Reassign all existing IDs that point to i2 to i1.
+ // This includes n2.ID.
+ for k, v := range po.values {
+ if i2s.Test(v) {
+ po.values[k] = i1
+ po.upushalias(k, v)
+ }
+ }
+
+ // If one of the aliased nodes is a constant, then make sure
+ // po.constants is updated to point to the master node.
+ for val, idx := range po.constants {
+ if i2s.Test(idx) {
+ po.constants[val] = i1
+ po.upushconst(i1, idx)
+ }
+ }
+}
+
+func (po *poset) isroot(r uint32) bool {
+ for i := range po.roots {
+ if po.roots[i] == r {
+ return true
+ }
+ }
+ return false
+}
+
+func (po *poset) changeroot(oldr, newr uint32) {
+ for i := range po.roots {
+ if po.roots[i] == oldr {
+ po.roots[i] = newr
+ return
+ }
+ }
+ panic("changeroot on non-root")
+}
+
+func (po *poset) removeroot(r uint32) {
+ for i := range po.roots {
+ if po.roots[i] == r {
+ po.roots = append(po.roots[:i], po.roots[i+1:]...)
+ return
+ }
+ }
+ panic("removeroot on non-root")
+}
+
+// dfs performs a depth-first search within the DAG whose root is r.
+// f is the visit function called for each node; if it returns true,
+// the search is aborted and true is returned. The root node is
+// visited too.
+// If strict, ignore edges across a path until at least one
+// strict edge is found. For instance, for a chain A<=B<=C<D<=E<F,
+// a strict walk visits D,E,F.
+// If the visit ends, false is returned.
+func (po *poset) dfs(r uint32, strict bool, f func(i uint32) bool) bool {
+ closed := newBitset(int(po.lastidx + 1))
+ open := make([]uint32, 1, 64)
+ open[0] = r
+
+ if strict {
+ // Do a first DFS; walk all paths and stop when we find a strict
+ // edge, building a "next" list of nodes reachable through strict
+ // edges. This will be the bootstrap open list for the real DFS.
+ next := make([]uint32, 0, 64)
+
+ for len(open) > 0 {
+ i := open[len(open)-1]
+ open = open[:len(open)-1]
+
+ // Don't visit the same node twice. Notice that all nodes
+ // across non-strict paths are still visited at least once, so
+ // a non-strict path can never obscure a strict path to the
+ // same node.
+ if !closed.Test(i) {
+ closed.Set(i)
+
+ l, r := po.children(i)
+ if l != 0 {
+ if l.Strict() {
+ next = append(next, l.Target())
+ } else {
+ open = append(open, l.Target())
+ }
+ }
+ if r != 0 {
+ if r.Strict() {
+ next = append(next, r.Target())
+ } else {
+ open = append(open, r.Target())
+ }
+ }
+ }
+ }
+ open = next
+ closed.Reset()
+ }
+
+ for len(open) > 0 {
+ i := open[len(open)-1]
+ open = open[:len(open)-1]
+
+ if !closed.Test(i) {
+ if f(i) {
+ return true
+ }
+ closed.Set(i)
+ l, r := po.children(i)
+ if l != 0 {
+ open = append(open, l.Target())
+ }
+ if r != 0 {
+ open = append(open, r.Target())
+ }
+ }
+ }
+ return false
+}
+
+// Returns true if there is a path from i1 to i2.
+// If strict == true: if the function returns true, then i1 < i2.
+// If strict == false: if the function returns true, then i1 <= i2.
+// If the function returns false, no relation is known.
+func (po *poset) reaches(i1, i2 uint32, strict bool) bool {
+ return po.dfs(i1, strict, func(n uint32) bool {
+ return n == i2
+ })
+}
+
+// findroot finds i's root, that is which DAG contains i.
+// Returns the root; if i is itself a root, it is returned.
+// Panic if i is not in any DAG.
+func (po *poset) findroot(i uint32) uint32 {
+ // TODO(rasky): if needed, a way to speed up this search is
+ // storing a bitset for each root using it as a mini bloom filter
+ // of nodes present under that root.
+ for _, r := range po.roots {
+ if po.reaches(r, i, false) {
+ return r
+ }
+ }
+ panic("findroot didn't find any root")
+}
+
+// mergeroot merges two DAGs into one DAG by creating a new extra root
+func (po *poset) mergeroot(r1, r2 uint32) uint32 {
+ // Root #0 is special as it contains all constants. Since mergeroot
+ // discards r2 as root and keeps r1, make sure that r2 is not root #0,
+ // otherwise constants would move to a different root.
+ if r2 == po.roots[0] {
+ r1, r2 = r2, r1
+ }
+ r := po.newnode(nil)
+ po.setchl(r, newedge(r1, false))
+ po.setchr(r, newedge(r2, false))
+ po.changeroot(r1, r)
+ po.removeroot(r2)
+ po.upush(undoMergeRoot, r, 0)
+ return r
+}
+
+// collapsepath marks n1 and n2 as equal and collapses as equal all
+// nodes across all paths between n1 and n2. If a strict edge is
+// found, the function does not modify the DAG and returns false.
+// Complexity is O(n).
+func (po *poset) collapsepath(n1, n2 *Value) bool {
+ i1, i2 := po.values[n1.ID], po.values[n2.ID]
+ if po.reaches(i1, i2, true) {
+ return false
+ }
+
+ // Find all the paths from i1 to i2
+ paths := po.findpaths(i1, i2)
+ // Mark all nodes in all the paths as aliases of n1
+ // (excluding n1 itself)
+ paths.Clear(i1)
+ po.aliasnodes(n1, paths)
+ return true
+}
+
+// findpaths is a recursive function that calculates all paths from cur to dst
+// and return them as a bitset (the index of a node is set in the bitset if
+// that node is on at least one path from cur to dst).
+// We do a DFS from cur (stopping going deep any time we reach dst, if ever),
+// and mark as part of the paths any node that has a children which is already
+// part of the path (or is dst itself).
+func (po *poset) findpaths(cur, dst uint32) bitset {
+ seen := newBitset(int(po.lastidx + 1))
+ path := newBitset(int(po.lastidx + 1))
+ path.Set(dst)
+ po.findpaths1(cur, dst, seen, path)
+ return path
+}
+
+func (po *poset) findpaths1(cur, dst uint32, seen bitset, path bitset) {
+ if cur == dst {
+ return
+ }
+ seen.Set(cur)
+ l, r := po.chl(cur), po.chr(cur)
+ if !seen.Test(l) {
+ po.findpaths1(l, dst, seen, path)
+ }
+ if !seen.Test(r) {
+ po.findpaths1(r, dst, seen, path)
+ }
+ if path.Test(l) || path.Test(r) {
+ path.Set(cur)
+ }
+}
+
+// Check whether it is recorded that i1!=i2
+func (po *poset) isnoneq(i1, i2 uint32) bool {
+ if i1 == i2 {
+ return false
+ }
+ if i1 < i2 {
+ i1, i2 = i2, i1
+ }
+
+ // Check if we recorded a non-equal relation before
+ if bs, ok := po.noneq[i1]; ok && bs.Test(i2) {
+ return true
+ }
+ return false
+}
+
+// Record that i1!=i2
+func (po *poset) setnoneq(n1, n2 *Value) {
+ i1, f1 := po.lookup(n1)
+ i2, f2 := po.lookup(n2)
+
+ // If any of the nodes do not exist in the poset, allocate them. Since
+ // we don't know any relation (in the partial order) about them, they must
+ // become independent roots.
+ if !f1 {
+ i1 = po.newnode(n1)
+ po.roots = append(po.roots, i1)
+ po.upush(undoNewRoot, i1, 0)
+ }
+ if !f2 {
+ i2 = po.newnode(n2)
+ po.roots = append(po.roots, i2)
+ po.upush(undoNewRoot, i2, 0)
+ }
+
+ if i1 == i2 {
+ panic("setnoneq on same node")
+ }
+ if i1 < i2 {
+ i1, i2 = i2, i1
+ }
+ bs := po.noneq[i1]
+ if bs == nil {
+ // Given that we record non-equality relations using the
+ // higher index as a key, the bitsize will never change size.
+ // TODO(rasky): if memory is a problem, consider allocating
+ // a small bitset and lazily grow it when higher indices arrive.
+ bs = newBitset(int(i1))
+ po.noneq[i1] = bs
+ } else if bs.Test(i2) {
+ // Already recorded
+ return
+ }
+ bs.Set(i2)
+ po.upushneq(i1, i2)
+}
+
+// CheckIntegrity verifies internal integrity of a poset. It is intended
+// for debugging purposes.
+func (po *poset) CheckIntegrity() {
+ // Record which index is a constant
+ constants := newBitset(int(po.lastidx + 1))
+ for _, c := range po.constants {
+ constants.Set(c)
+ }
+
+ // Verify that each node appears in a single DAG, and that
+ // all constants are within the first DAG
+ seen := newBitset(int(po.lastidx + 1))
+ for ridx, r := range po.roots {
+ if r == 0 {
+ panic("empty root")
+ }
+
+ po.dfs(r, false, func(i uint32) bool {
+ if seen.Test(i) {
+ panic("duplicate node")
+ }
+ seen.Set(i)
+ if constants.Test(i) {
+ if ridx != 0 {
+ panic("constants not in the first DAG")
+ }
+ }
+ return false
+ })
+ }
+
+ // Verify that values contain the minimum set
+ for id, idx := range po.values {
+ if !seen.Test(idx) {
+ panic(fmt.Errorf("spurious value [%d]=%d", id, idx))
+ }
+ }
+
+ // Verify that only existing nodes have non-zero children
+ for i, n := range po.nodes {
+ if n.l|n.r != 0 {
+ if !seen.Test(uint32(i)) {
+ panic(fmt.Errorf("children of unknown node %d->%v", i, n))
+ }
+ if n.l.Target() == uint32(i) || n.r.Target() == uint32(i) {
+ panic(fmt.Errorf("self-loop on node %d", i))
+ }
+ }
+ }
+}
+
+// CheckEmpty checks that a poset is completely empty.
+// It can be used for debugging purposes, as a poset is supposed to
+// be empty after it's fully rolled back through Undo.
+func (po *poset) CheckEmpty() error {
+ if len(po.nodes) != 1 {
+ return fmt.Errorf("non-empty nodes list: %v", po.nodes)
+ }
+ if len(po.values) != 0 {
+ return fmt.Errorf("non-empty value map: %v", po.values)
+ }
+ if len(po.roots) != 0 {
+ return fmt.Errorf("non-empty root list: %v", po.roots)
+ }
+ if len(po.constants) != 0 {
+ return fmt.Errorf("non-empty constants: %v", po.constants)
+ }
+ if len(po.undo) != 0 {
+ return fmt.Errorf("non-empty undo list: %v", po.undo)
+ }
+ if po.lastidx != 0 {
+ return fmt.Errorf("lastidx index is not zero: %v", po.lastidx)
+ }
+ for _, bs := range po.noneq {
+ for _, x := range bs {
+ if x != 0 {
+ return fmt.Errorf("non-empty noneq map")
+ }
+ }
+ }
+ return nil
+}
+
+// DotDump dumps the poset in graphviz format to file fn, with the specified title.
+func (po *poset) DotDump(fn string, title string) error {
+ f, err := os.Create(fn)
+ if err != nil {
+ return err
+ }
+ defer f.Close()
+
+ // Create reverse index mapping (taking aliases into account)
+ names := make(map[uint32]string)
+ for id, i := range po.values {
+ s := names[i]
+ if s == "" {
+ s = fmt.Sprintf("v%d", id)
+ } else {
+ s += fmt.Sprintf(", v%d", id)
+ }
+ names[i] = s
+ }
+
+ // Create reverse constant mapping
+ consts := make(map[uint32]int64)
+ for val, idx := range po.constants {
+ consts[idx] = val
+ }
+
+ fmt.Fprintf(f, "digraph poset {\n")
+ fmt.Fprintf(f, "\tedge [ fontsize=10 ]\n")
+ for ridx, r := range po.roots {
+ fmt.Fprintf(f, "\tsubgraph root%d {\n", ridx)
+ po.dfs(r, false, func(i uint32) bool {
+ if val, ok := consts[i]; ok {
+ // Constant
+ var vals string
+ if po.flags&posetFlagUnsigned != 0 {
+ vals = fmt.Sprint(uint64(val))
+ } else {
+ vals = fmt.Sprint(int64(val))
+ }
+ fmt.Fprintf(f, "\t\tnode%d [shape=box style=filled fillcolor=cadetblue1 label=<%s <font point-size=\"6\">%s [%d]</font>>]\n",
+ i, vals, names[i], i)
+ } else {
+ // Normal SSA value
+ fmt.Fprintf(f, "\t\tnode%d [label=<%s <font point-size=\"6\">[%d]</font>>]\n", i, names[i], i)
+ }
+ chl, chr := po.children(i)
+ for _, ch := range []posetEdge{chl, chr} {
+ if ch != 0 {
+ if ch.Strict() {
+ fmt.Fprintf(f, "\t\tnode%d -> node%d [label=\" <\" color=\"red\"]\n", i, ch.Target())
+ } else {
+ fmt.Fprintf(f, "\t\tnode%d -> node%d [label=\" <=\" color=\"green\"]\n", i, ch.Target())
+ }
+ }
+ }
+ return false
+ })
+ fmt.Fprintf(f, "\t}\n")
+ }
+ fmt.Fprintf(f, "\tlabelloc=\"t\"\n")
+ fmt.Fprintf(f, "\tlabeldistance=\"3.0\"\n")
+ fmt.Fprintf(f, "\tlabel=%q\n", title)
+ fmt.Fprintf(f, "}\n")
+ return nil
+}
+
+// Ordered reports whether n1<n2. It returns false either when it is
+// certain that n1<n2 is false, or if there is not enough information
+// to tell.
+// Complexity is O(n).
+func (po *poset) Ordered(n1, n2 *Value) bool {
+ if debugPoset {
+ defer po.CheckIntegrity()
+ }
+ if n1.ID == n2.ID {
+ panic("should not call Ordered with n1==n2")
+ }
+
+ i1, f1 := po.lookup(n1)
+ i2, f2 := po.lookup(n2)
+ if !f1 || !f2 {
+ return false
+ }
+
+ return i1 != i2 && po.reaches(i1, i2, true)
+}
+
+// OrderedOrEqual reports whether n1<=n2. It returns false either when it is
+// certain that n1<=n2 is false, or if there is not enough information
+// to tell.
+// Complexity is O(n).
+func (po *poset) OrderedOrEqual(n1, n2 *Value) bool {
+ if debugPoset {
+ defer po.CheckIntegrity()
+ }
+ if n1.ID == n2.ID {
+ panic("should not call Ordered with n1==n2")
+ }
+
+ i1, f1 := po.lookup(n1)
+ i2, f2 := po.lookup(n2)
+ if !f1 || !f2 {
+ return false
+ }
+
+ return i1 == i2 || po.reaches(i1, i2, false)
+}
+
+// Equal reports whether n1==n2. It returns false either when it is
+// certain that n1==n2 is false, or if there is not enough information
+// to tell.
+// Complexity is O(1).
+func (po *poset) Equal(n1, n2 *Value) bool {
+ if debugPoset {
+ defer po.CheckIntegrity()
+ }
+ if n1.ID == n2.ID {
+ panic("should not call Equal with n1==n2")
+ }
+
+ i1, f1 := po.lookup(n1)
+ i2, f2 := po.lookup(n2)
+ return f1 && f2 && i1 == i2
+}
+
+// NonEqual reports whether n1!=n2. It returns false either when it is
+// certain that n1!=n2 is false, or if there is not enough information
+// to tell.
+// Complexity is O(n) (because it internally calls Ordered to see if we
+// can infer n1!=n2 from n1<n2 or n2<n1).
+func (po *poset) NonEqual(n1, n2 *Value) bool {
+ if debugPoset {
+ defer po.CheckIntegrity()
+ }
+ if n1.ID == n2.ID {
+ panic("should not call NonEqual with n1==n2")
+ }
+
+ // If we never saw the nodes before, we don't
+ // have a recorded non-equality.
+ i1, f1 := po.lookup(n1)
+ i2, f2 := po.lookup(n2)
+ if !f1 || !f2 {
+ return false
+ }
+
+ // Check if we recored inequality
+ if po.isnoneq(i1, i2) {
+ return true
+ }
+
+ // Check if n1<n2 or n2<n1, in which case we can infer that n1!=n2
+ if po.Ordered(n1, n2) || po.Ordered(n2, n1) {
+ return true
+ }
+
+ return false
+}
+
+// setOrder records that n1<n2 or n1<=n2 (depending on strict). Returns false
+// if this is a contradiction.
+// Implements SetOrder() and SetOrderOrEqual()
+func (po *poset) setOrder(n1, n2 *Value, strict bool) bool {
+ i1, f1 := po.lookup(n1)
+ i2, f2 := po.lookup(n2)
+
+ switch {
+ case !f1 && !f2:
+ // Neither n1 nor n2 are in the poset, so they are not related
+ // in any way to existing nodes.
+ // Create a new DAG to record the relation.
+ i1, i2 = po.newnode(n1), po.newnode(n2)
+ po.roots = append(po.roots, i1)
+ po.upush(undoNewRoot, i1, 0)
+ po.addchild(i1, i2, strict)
+
+ case f1 && !f2:
+ // n1 is in one of the DAGs, while n2 is not. Add n2 as children
+ // of n1.
+ i2 = po.newnode(n2)
+ po.addchild(i1, i2, strict)
+
+ case !f1 && f2:
+ // n1 is not in any DAG but n2 is. If n2 is a root, we can put
+ // n1 in its place as a root; otherwise, we need to create a new
+ // extra root to record the relation.
+ i1 = po.newnode(n1)
+
+ if po.isroot(i2) {
+ po.changeroot(i2, i1)
+ po.upush(undoChangeRoot, i1, newedge(i2, strict))
+ po.addchild(i1, i2, strict)
+ return true
+ }
+
+ // Search for i2's root; this requires a O(n) search on all
+ // DAGs
+ r := po.findroot(i2)
+
+ // Re-parent as follows:
+ //
+ // extra
+ // r / \
+ // \ ===> r i1
+ // i2 \ /
+ // i2
+ //
+ extra := po.newnode(nil)
+ po.changeroot(r, extra)
+ po.upush(undoChangeRoot, extra, newedge(r, false))
+ po.addchild(extra, r, false)
+ po.addchild(extra, i1, false)
+ po.addchild(i1, i2, strict)
+
+ case f1 && f2:
+ // If the nodes are aliased, fail only if we're setting a strict order
+ // (that is, we cannot set n1<n2 if n1==n2).
+ if i1 == i2 {
+ return !strict
+ }
+
+ // If we are trying to record n1<=n2 but we learned that n1!=n2,
+ // record n1<n2, as it provides more information.
+ if !strict && po.isnoneq(i1, i2) {
+ strict = true
+ }
+
+ // Both n1 and n2 are in the poset. This is the complex part of the algorithm
+ // as we need to find many different cases and DAG shapes.
+
+ // Check if n1 somehow reaches n2
+ if po.reaches(i1, i2, false) {
+ // This is the table of all cases we need to handle:
+ //
+ // DAG New Action
+ // ---------------------------------------------------
+ // #1: N1<=X<=N2 | N1<=N2 | do nothing
+ // #2: N1<=X<=N2 | N1<N2 | add strict edge (N1<N2)
+ // #3: N1<X<N2 | N1<=N2 | do nothing (we already know more)
+ // #4: N1<X<N2 | N1<N2 | do nothing
+
+ // Check if we're in case #2
+ if strict && !po.reaches(i1, i2, true) {
+ po.addchild(i1, i2, true)
+ return true
+ }
+
+ // Case #1, #3 o #4: nothing to do
+ return true
+ }
+
+ // Check if n2 somehow reaches n1
+ if po.reaches(i2, i1, false) {
+ // This is the table of all cases we need to handle:
+ //
+ // DAG New Action
+ // ---------------------------------------------------
+ // #5: N2<=X<=N1 | N1<=N2 | collapse path (learn that N1=X=N2)
+ // #6: N2<=X<=N1 | N1<N2 | contradiction
+ // #7: N2<X<N1 | N1<=N2 | contradiction in the path
+ // #8: N2<X<N1 | N1<N2 | contradiction
+
+ if strict {
+ // Cases #6 and #8: contradiction
+ return false
+ }
+
+ // We're in case #5 or #7. Try to collapse path, and that will
+ // fail if it realizes that we are in case #7.
+ return po.collapsepath(n2, n1)
+ }
+
+ // We don't know of any existing relation between n1 and n2. They could
+ // be part of the same DAG or not.
+ // Find their roots to check whether they are in the same DAG.
+ r1, r2 := po.findroot(i1), po.findroot(i2)
+ if r1 != r2 {
+ // We need to merge the two DAGs to record a relation between the nodes
+ po.mergeroot(r1, r2)
+ }
+
+ // Connect n1 and n2
+ po.addchild(i1, i2, strict)
+ }
+
+ return true
+}
+
+// SetOrder records that n1<n2. Returns false if this is a contradiction
+// Complexity is O(1) if n2 was never seen before, or O(n) otherwise.
+func (po *poset) SetOrder(n1, n2 *Value) bool {
+ if debugPoset {
+ defer po.CheckIntegrity()
+ }
+ if n1.ID == n2.ID {
+ panic("should not call SetOrder with n1==n2")
+ }
+ return po.setOrder(n1, n2, true)
+}
+
+// SetOrderOrEqual records that n1<=n2. Returns false if this is a contradiction
+// Complexity is O(1) if n2 was never seen before, or O(n) otherwise.
+func (po *poset) SetOrderOrEqual(n1, n2 *Value) bool {
+ if debugPoset {
+ defer po.CheckIntegrity()
+ }
+ if n1.ID == n2.ID {
+ panic("should not call SetOrder with n1==n2")
+ }
+ return po.setOrder(n1, n2, false)
+}
+
+// SetEqual records that n1==n2. Returns false if this is a contradiction
+// (that is, if it is already recorded that n1<n2 or n2<n1).
+// Complexity is O(1) if n2 was never seen before, or O(n) otherwise.
+func (po *poset) SetEqual(n1, n2 *Value) bool {
+ if debugPoset {
+ defer po.CheckIntegrity()
+ }
+ if n1.ID == n2.ID {
+ panic("should not call Add with n1==n2")
+ }
+
+ i1, f1 := po.lookup(n1)
+ i2, f2 := po.lookup(n2)
+
+ switch {
+ case !f1 && !f2:
+ i1 = po.newnode(n1)
+ po.roots = append(po.roots, i1)
+ po.upush(undoNewRoot, i1, 0)
+ po.aliasnewnode(n1, n2)
+ case f1 && !f2:
+ po.aliasnewnode(n1, n2)
+ case !f1 && f2:
+ po.aliasnewnode(n2, n1)
+ case f1 && f2:
+ if i1 == i2 {
+ // Already aliased, ignore
+ return true
+ }
+
+ // If we recorded that n1!=n2, this is a contradiction.
+ if po.isnoneq(i1, i2) {
+ return false
+ }
+
+ // If we already knew that n1<=n2, we can collapse the path to
+ // record n1==n2 (and viceversa).
+ if po.reaches(i1, i2, false) {
+ return po.collapsepath(n1, n2)
+ }
+ if po.reaches(i2, i1, false) {
+ return po.collapsepath(n2, n1)
+ }
+
+ r1 := po.findroot(i1)
+ r2 := po.findroot(i2)
+ if r1 != r2 {
+ // Merge the two DAGs so we can record relations between the nodes
+ po.mergeroot(r1, r2)
+ }
+
+ // Set n2 as alias of n1. This will also update all the references
+ // to n2 to become references to n1
+ i2s := newBitset(int(po.lastidx) + 1)
+ i2s.Set(i2)
+ po.aliasnodes(n1, i2s)
+ }
+ return true
+}
+
+// SetNonEqual records that n1!=n2. Returns false if this is a contradiction
+// (that is, if it is already recorded that n1==n2).
+// Complexity is O(n).
+func (po *poset) SetNonEqual(n1, n2 *Value) bool {
+ if debugPoset {
+ defer po.CheckIntegrity()
+ }
+ if n1.ID == n2.ID {
+ panic("should not call SetNonEqual with n1==n2")
+ }
+
+ // Check whether the nodes are already in the poset
+ i1, f1 := po.lookup(n1)
+ i2, f2 := po.lookup(n2)
+
+ // If either node wasn't present, we just record the new relation
+ // and exit.
+ if !f1 || !f2 {
+ po.setnoneq(n1, n2)
+ return true
+ }
+
+ // See if we already know this, in which case there's nothing to do.
+ if po.isnoneq(i1, i2) {
+ return true
+ }
+
+ // Check if we're contradicting an existing equality relation
+ if po.Equal(n1, n2) {
+ return false
+ }
+
+ // Record non-equality
+ po.setnoneq(n1, n2)
+
+ // If we know that i1<=i2 but not i1<i2, learn that as we
+ // now know that they are not equal. Do the same for i2<=i1.
+ // Do this check only if both nodes were already in the DAG,
+ // otherwise there cannot be an existing relation.
+ if po.reaches(i1, i2, false) && !po.reaches(i1, i2, true) {
+ po.addchild(i1, i2, true)
+ }
+ if po.reaches(i2, i1, false) && !po.reaches(i2, i1, true) {
+ po.addchild(i2, i1, true)
+ }
+
+ return true
+}
+
+// Checkpoint saves the current state of the DAG so that it's possible
+// to later undo this state.
+// Complexity is O(1).
+func (po *poset) Checkpoint() {
+ po.undo = append(po.undo, posetUndo{typ: undoCheckpoint})
+}
+
+// Undo restores the state of the poset to the previous checkpoint.
+// Complexity depends on the type of operations that were performed
+// since the last checkpoint; each Set* operation creates an undo
+// pass which Undo has to revert with a worst-case complexity of O(n).
+func (po *poset) Undo() {
+ if len(po.undo) == 0 {
+ panic("empty undo stack")
+ }
+ if debugPoset {
+ defer po.CheckIntegrity()
+ }
+
+ for len(po.undo) > 0 {
+ pass := po.undo[len(po.undo)-1]
+ po.undo = po.undo[:len(po.undo)-1]
+
+ switch pass.typ {
+ case undoCheckpoint:
+ return
+
+ case undoSetChl:
+ po.setchl(pass.idx, pass.edge)
+
+ case undoSetChr:
+ po.setchr(pass.idx, pass.edge)
+
+ case undoNonEqual:
+ po.noneq[uint32(pass.ID)].Clear(pass.idx)
+
+ case undoNewNode:
+ if pass.idx != po.lastidx {
+ panic("invalid newnode index")
+ }
+ if pass.ID != 0 {
+ if po.values[pass.ID] != pass.idx {
+ panic("invalid newnode undo pass")
+ }
+ delete(po.values, pass.ID)
+ }
+ po.setchl(pass.idx, 0)
+ po.setchr(pass.idx, 0)
+ po.nodes = po.nodes[:pass.idx]
+ po.lastidx--
+
+ case undoNewConstant:
+ // FIXME: remove this O(n) loop
+ var val int64
+ var i uint32
+ for val, i = range po.constants {
+ if i == pass.idx {
+ break
+ }
+ }
+ if i != pass.idx {
+ panic("constant not found in undo pass")
+ }
+ if pass.ID == 0 {
+ delete(po.constants, val)
+ } else {
+ // Restore previous index as constant node
+ // (also restoring the invariant on correct bounds)
+ oldidx := uint32(pass.ID)
+ po.constants[val] = oldidx
+ }
+
+ case undoAliasNode:
+ ID, prev := pass.ID, pass.idx
+ cur := po.values[ID]
+ if prev == 0 {
+ // Born as an alias, die as an alias
+ delete(po.values, ID)
+ } else {
+ if cur == prev {
+ panic("invalid aliasnode undo pass")
+ }
+ // Give it back previous value
+ po.values[ID] = prev
+ }
+
+ case undoNewRoot:
+ i := pass.idx
+ l, r := po.children(i)
+ if l|r != 0 {
+ panic("non-empty root in undo newroot")
+ }
+ po.removeroot(i)
+
+ case undoChangeRoot:
+ i := pass.idx
+ l, r := po.children(i)
+ if l|r != 0 {
+ panic("non-empty root in undo changeroot")
+ }
+ po.changeroot(i, pass.edge.Target())
+
+ case undoMergeRoot:
+ i := pass.idx
+ l, r := po.children(i)
+ po.changeroot(i, l.Target())
+ po.roots = append(po.roots, r.Target())
+
+ default:
+ panic(pass.typ)
+ }
+ }
+
+ if debugPoset && po.CheckEmpty() != nil {
+ panic("poset not empty at the end of undo")
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/poset_test.go b/src/cmd/compile/internal/ssa/poset_test.go
new file mode 100644
index 0000000..a6db1d1
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/poset_test.go
@@ -0,0 +1,800 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "fmt"
+ "testing"
+)
+
+const (
+ SetOrder = "SetOrder"
+ SetOrder_Fail = "SetOrder_Fail"
+ SetOrderOrEqual = "SetOrderOrEqual"
+ SetOrderOrEqual_Fail = "SetOrderOrEqual_Fail"
+ Ordered = "Ordered"
+ Ordered_Fail = "Ordered_Fail"
+ OrderedOrEqual = "OrderedOrEqual"
+ OrderedOrEqual_Fail = "OrderedOrEqual_Fail"
+ SetEqual = "SetEqual"
+ SetEqual_Fail = "SetEqual_Fail"
+ Equal = "Equal"
+ Equal_Fail = "Equal_Fail"
+ SetNonEqual = "SetNonEqual"
+ SetNonEqual_Fail = "SetNonEqual_Fail"
+ NonEqual = "NonEqual"
+ NonEqual_Fail = "NonEqual_Fail"
+ Checkpoint = "Checkpoint"
+ Undo = "Undo"
+)
+
+type posetTestOp struct {
+ typ string
+ a, b int
+}
+
+func vconst(i int) int {
+ if i < -128 || i >= 128 {
+ panic("invalid const")
+ }
+ return 1000 + 128 + i
+}
+
+func vconst2(i int) int {
+ if i < -128 || i >= 128 {
+ panic("invalid const")
+ }
+ return 1000 + 256 + i
+}
+
+func testPosetOps(t *testing.T, unsigned bool, ops []posetTestOp) {
+ var v [1512]*Value
+ for i := range v {
+ v[i] = new(Value)
+ v[i].ID = ID(i)
+ if i >= 1000 && i < 1256 {
+ v[i].Op = OpConst64
+ v[i].AuxInt = int64(i - 1000 - 128)
+ }
+ if i >= 1256 && i < 1512 {
+ v[i].Op = OpConst64
+ v[i].AuxInt = int64(i - 1000 - 256)
+ }
+ }
+
+ po := newPoset()
+ po.SetUnsigned(unsigned)
+ for idx, op := range ops {
+ t.Logf("op%d%v", idx, op)
+ switch op.typ {
+ case SetOrder:
+ if !po.SetOrder(v[op.a], v[op.b]) {
+ t.Errorf("FAILED: op%d%v failed", idx, op)
+ }
+ case SetOrder_Fail:
+ if po.SetOrder(v[op.a], v[op.b]) {
+ t.Errorf("FAILED: op%d%v passed", idx, op)
+ }
+ case SetOrderOrEqual:
+ if !po.SetOrderOrEqual(v[op.a], v[op.b]) {
+ t.Errorf("FAILED: op%d%v failed", idx, op)
+ }
+ case SetOrderOrEqual_Fail:
+ if po.SetOrderOrEqual(v[op.a], v[op.b]) {
+ t.Errorf("FAILED: op%d%v passed", idx, op)
+ }
+ case Ordered:
+ if !po.Ordered(v[op.a], v[op.b]) {
+ t.Errorf("FAILED: op%d%v failed", idx, op)
+ }
+ case Ordered_Fail:
+ if po.Ordered(v[op.a], v[op.b]) {
+ t.Errorf("FAILED: op%d%v passed", idx, op)
+ }
+ case OrderedOrEqual:
+ if !po.OrderedOrEqual(v[op.a], v[op.b]) {
+ t.Errorf("FAILED: op%d%v failed", idx, op)
+ }
+ case OrderedOrEqual_Fail:
+ if po.OrderedOrEqual(v[op.a], v[op.b]) {
+ t.Errorf("FAILED: op%d%v passed", idx, op)
+ }
+ case SetEqual:
+ if !po.SetEqual(v[op.a], v[op.b]) {
+ t.Errorf("FAILED: op%d%v failed", idx, op)
+ }
+ case SetEqual_Fail:
+ if po.SetEqual(v[op.a], v[op.b]) {
+ t.Errorf("FAILED: op%d%v passed", idx, op)
+ }
+ case Equal:
+ if !po.Equal(v[op.a], v[op.b]) {
+ t.Errorf("FAILED: op%d%v failed", idx, op)
+ }
+ case Equal_Fail:
+ if po.Equal(v[op.a], v[op.b]) {
+ t.Errorf("FAILED: op%d%v passed", idx, op)
+ }
+ case SetNonEqual:
+ if !po.SetNonEqual(v[op.a], v[op.b]) {
+ t.Errorf("FAILED: op%d%v failed", idx, op)
+ }
+ case SetNonEqual_Fail:
+ if po.SetNonEqual(v[op.a], v[op.b]) {
+ t.Errorf("FAILED: op%d%v passed", idx, op)
+ }
+ case NonEqual:
+ if !po.NonEqual(v[op.a], v[op.b]) {
+ t.Errorf("FAILED: op%d%v failed", idx, op)
+ }
+ case NonEqual_Fail:
+ if po.NonEqual(v[op.a], v[op.b]) {
+ t.Errorf("FAILED: op%d%v passed", idx, op)
+ }
+ case Checkpoint:
+ po.Checkpoint()
+ case Undo:
+ t.Log("Undo stack", po.undo)
+ po.Undo()
+ default:
+ panic("unimplemented")
+ }
+
+ if false {
+ po.DotDump(fmt.Sprintf("op%d.dot", idx), fmt.Sprintf("Last op: %v", op))
+ }
+
+ po.CheckIntegrity()
+ }
+
+ // Check that the poset is completely empty
+ if err := po.CheckEmpty(); err != nil {
+ t.Error(err)
+ }
+}
+
+func TestPoset(t *testing.T) {
+ testPosetOps(t, false, []posetTestOp{
+ {Ordered_Fail, 123, 124},
+
+ // Dag #0: 100<101
+ {Checkpoint, 0, 0},
+ {SetOrder, 100, 101},
+ {Ordered, 100, 101},
+ {Ordered_Fail, 101, 100},
+ {SetOrder_Fail, 101, 100},
+ {SetOrder, 100, 101}, // repeat
+ {NonEqual, 100, 101},
+ {NonEqual, 101, 100},
+ {SetEqual_Fail, 100, 101},
+
+ // Dag #1: 4<=7<12
+ {Checkpoint, 0, 0},
+ {SetOrderOrEqual, 4, 7},
+ {OrderedOrEqual, 4, 7},
+ {SetOrder, 7, 12},
+ {Ordered, 7, 12},
+ {Ordered, 4, 12},
+ {Ordered_Fail, 12, 4},
+ {NonEqual, 4, 12},
+ {NonEqual, 12, 4},
+ {NonEqual_Fail, 4, 100},
+ {OrderedOrEqual, 4, 12},
+ {OrderedOrEqual_Fail, 12, 4},
+ {OrderedOrEqual, 4, 7},
+ {OrderedOrEqual_Fail, 7, 4},
+
+ // Dag #1: 1<4<=7<12
+ {Checkpoint, 0, 0},
+ {SetOrder, 1, 4},
+ {Ordered, 1, 4},
+ {Ordered, 1, 12},
+ {Ordered_Fail, 12, 1},
+
+ // Dag #1: 1<4<=7<12, 6<7
+ {Checkpoint, 0, 0},
+ {SetOrder, 6, 7},
+ {Ordered, 6, 7},
+ {Ordered, 6, 12},
+ {SetOrder_Fail, 7, 4},
+ {SetOrder_Fail, 7, 6},
+ {SetOrder_Fail, 7, 1},
+
+ // Dag #1: 1<4<=7<12, 1<6<7
+ {Checkpoint, 0, 0},
+ {Ordered_Fail, 1, 6},
+ {SetOrder, 1, 6},
+ {Ordered, 1, 6},
+ {SetOrder_Fail, 6, 1},
+
+ // Dag #1: 1<4<=7<12, 1<4<6<7
+ {Checkpoint, 0, 0},
+ {Ordered_Fail, 4, 6},
+ {Ordered_Fail, 4, 7},
+ {SetOrder, 4, 6},
+ {Ordered, 4, 6},
+ {OrderedOrEqual, 4, 6},
+ {Ordered, 4, 7},
+ {OrderedOrEqual, 4, 7},
+ {SetOrder_Fail, 6, 4},
+ {Ordered_Fail, 7, 6},
+ {Ordered_Fail, 7, 4},
+ {OrderedOrEqual_Fail, 7, 6},
+ {OrderedOrEqual_Fail, 7, 4},
+
+ // Merge: 1<4<6, 4<=7<12, 6<101
+ {Checkpoint, 0, 0},
+ {Ordered_Fail, 6, 101},
+ {SetOrder, 6, 101},
+ {Ordered, 6, 101},
+ {Ordered, 1, 101},
+
+ // Merge: 1<4<6, 4<=7<12, 6<100<101
+ {Checkpoint, 0, 0},
+ {Ordered_Fail, 6, 100},
+ {SetOrder, 6, 100},
+ {Ordered, 1, 100},
+
+ // Undo: 1<4<6<7<12, 6<101
+ {Ordered, 100, 101},
+ {Undo, 0, 0},
+ {Ordered, 100, 101},
+ {Ordered_Fail, 6, 100},
+ {Ordered, 6, 101},
+ {Ordered, 1, 101},
+
+ // Undo: 1<4<6<7<12, 100<101
+ {Undo, 0, 0},
+ {Ordered_Fail, 1, 100},
+ {Ordered_Fail, 1, 101},
+ {Ordered_Fail, 6, 100},
+ {Ordered_Fail, 6, 101},
+
+ // Merge: 1<4<6<7<12, 6<100<101
+ {Checkpoint, 0, 0},
+ {Ordered, 100, 101},
+ {SetOrder, 6, 100},
+ {Ordered, 6, 100},
+ {Ordered, 6, 101},
+ {Ordered, 1, 101},
+
+ // Undo 2 times: 1<4<7<12, 1<6<7
+ {Undo, 0, 0},
+ {Undo, 0, 0},
+ {Ordered, 1, 6},
+ {Ordered, 4, 12},
+ {Ordered_Fail, 4, 6},
+ {SetOrder_Fail, 6, 1},
+
+ // Undo 2 times: 1<4<7<12
+ {Undo, 0, 0},
+ {Undo, 0, 0},
+ {Ordered, 1, 12},
+ {Ordered, 7, 12},
+ {Ordered_Fail, 1, 6},
+ {Ordered_Fail, 6, 7},
+ {Ordered, 100, 101},
+ {Ordered_Fail, 1, 101},
+
+ // Undo: 4<7<12
+ {Undo, 0, 0},
+ {Ordered_Fail, 1, 12},
+ {Ordered_Fail, 1, 4},
+ {Ordered, 4, 12},
+ {Ordered, 100, 101},
+
+ // Undo: 100<101
+ {Undo, 0, 0},
+ {Ordered_Fail, 4, 7},
+ {Ordered_Fail, 7, 12},
+ {Ordered, 100, 101},
+
+ // Recreated DAG #1 from scratch, reusing same nodes.
+ // This also stresses that Undo has done its job correctly.
+ // DAG: 1<2<(5|6), 101<102<(105|106<107)
+ {Checkpoint, 0, 0},
+ {SetOrder, 101, 102},
+ {SetOrder, 102, 105},
+ {SetOrder, 102, 106},
+ {SetOrder, 106, 107},
+ {SetOrder, 1, 2},
+ {SetOrder, 2, 5},
+ {SetOrder, 2, 6},
+ {SetEqual_Fail, 1, 6},
+ {SetEqual_Fail, 107, 102},
+
+ // Now Set 2 == 102
+ // New DAG: (1|101)<2==102<(5|6|105|106<107)
+ {Checkpoint, 0, 0},
+ {SetEqual, 2, 102},
+ {Equal, 2, 102},
+ {SetEqual, 2, 102}, // trivially pass
+ {SetNonEqual_Fail, 2, 102}, // trivially fail
+ {Ordered, 1, 107},
+ {Ordered, 101, 6},
+ {Ordered, 101, 105},
+ {Ordered, 2, 106},
+ {Ordered, 102, 6},
+
+ // Undo SetEqual
+ {Undo, 0, 0},
+ {Equal_Fail, 2, 102},
+ {Ordered_Fail, 2, 102},
+ {Ordered_Fail, 1, 107},
+ {Ordered_Fail, 101, 6},
+ {Checkpoint, 0, 0},
+ {SetEqual, 2, 100},
+ {Ordered, 1, 107},
+ {Ordered, 100, 6},
+
+ // SetEqual with new node
+ {Undo, 0, 0},
+ {Checkpoint, 0, 0},
+ {SetEqual, 2, 400},
+ {SetEqual, 401, 2},
+ {Equal, 400, 401},
+ {Ordered, 1, 400},
+ {Ordered, 400, 6},
+ {Ordered, 1, 401},
+ {Ordered, 401, 6},
+ {Ordered_Fail, 2, 401},
+
+ // SetEqual unseen nodes and then connect
+ {Checkpoint, 0, 0},
+ {SetEqual, 500, 501},
+ {SetEqual, 102, 501},
+ {Equal, 500, 102},
+ {Ordered, 501, 106},
+ {Ordered, 100, 500},
+ {SetEqual, 500, 501},
+ {Ordered_Fail, 500, 501},
+ {Ordered_Fail, 102, 501},
+
+ // SetNonEqual relations
+ {Undo, 0, 0},
+ {Checkpoint, 0, 0},
+ {SetNonEqual, 600, 601},
+ {NonEqual, 600, 601},
+ {SetNonEqual, 601, 602},
+ {NonEqual, 601, 602},
+ {NonEqual_Fail, 600, 602}, // non-transitive
+ {SetEqual_Fail, 601, 602},
+
+ // Undo back to beginning, leave the poset empty
+ {Undo, 0, 0},
+ {Undo, 0, 0},
+ {Undo, 0, 0},
+ {Undo, 0, 0},
+ })
+}
+
+func TestPosetStrict(t *testing.T) {
+
+ testPosetOps(t, false, []posetTestOp{
+ {Checkpoint, 0, 0},
+ // Build: 20!=30, 10<20<=30<40. The 20<=30 will become 20<30.
+ {SetNonEqual, 20, 30},
+ {SetOrder, 10, 20},
+ {SetOrderOrEqual, 20, 30}, // this is affected by 20!=30
+ {SetOrder, 30, 40},
+
+ {Ordered, 10, 30},
+ {Ordered, 20, 30},
+ {Ordered, 10, 40},
+ {OrderedOrEqual, 10, 30},
+ {OrderedOrEqual, 20, 30},
+ {OrderedOrEqual, 10, 40},
+
+ {Undo, 0, 0},
+
+ // Now do the opposite: first build the DAG and then learn non-equality
+ {Checkpoint, 0, 0},
+ {SetOrder, 10, 20},
+ {SetOrderOrEqual, 20, 30}, // this is affected by 20!=30
+ {SetOrder, 30, 40},
+
+ {Ordered, 10, 30},
+ {Ordered_Fail, 20, 30},
+ {Ordered, 10, 40},
+ {OrderedOrEqual, 10, 30},
+ {OrderedOrEqual, 20, 30},
+ {OrderedOrEqual, 10, 40},
+
+ {Checkpoint, 0, 0},
+ {SetNonEqual, 20, 30},
+ {Ordered, 10, 30},
+ {Ordered, 20, 30},
+ {Ordered, 10, 40},
+ {OrderedOrEqual, 10, 30},
+ {OrderedOrEqual, 20, 30},
+ {OrderedOrEqual, 10, 40},
+ {Undo, 0, 0},
+
+ {Checkpoint, 0, 0},
+ {SetOrderOrEqual, 30, 35},
+ {OrderedOrEqual, 20, 35},
+ {Ordered_Fail, 20, 35},
+ {SetNonEqual, 20, 35},
+ {Ordered, 20, 35},
+ {Undo, 0, 0},
+
+ // Learn <= and >=
+ {Checkpoint, 0, 0},
+ {SetOrderOrEqual, 50, 60},
+ {SetOrderOrEqual, 60, 50},
+ {OrderedOrEqual, 50, 60},
+ {OrderedOrEqual, 60, 50},
+ {Ordered_Fail, 50, 60},
+ {Ordered_Fail, 60, 50},
+ {Equal, 50, 60},
+ {Equal, 60, 50},
+ {NonEqual_Fail, 50, 60},
+ {NonEqual_Fail, 60, 50},
+ {Undo, 0, 0},
+
+ {Undo, 0, 0},
+ })
+}
+
+func TestPosetCollapse(t *testing.T) {
+ testPosetOps(t, false, []posetTestOp{
+ {Checkpoint, 0, 0},
+ // Create a complex graph of <= relations among nodes between 10 and 25.
+ {SetOrderOrEqual, 10, 15},
+ {SetOrderOrEqual, 15, 20},
+ {SetOrderOrEqual, 20, vconst(20)},
+ {SetOrderOrEqual, vconst(20), 25},
+ {SetOrderOrEqual, 10, 12},
+ {SetOrderOrEqual, 12, 16},
+ {SetOrderOrEqual, 16, vconst(20)},
+ {SetOrderOrEqual, 10, 17},
+ {SetOrderOrEqual, 17, 25},
+ {SetOrderOrEqual, 15, 18},
+ {SetOrderOrEqual, 18, vconst(20)},
+ {SetOrderOrEqual, 15, 19},
+ {SetOrderOrEqual, 19, 25},
+
+ // These are other paths not part of the main collapsing path
+ {SetOrderOrEqual, 10, 11},
+ {SetOrderOrEqual, 11, 26},
+ {SetOrderOrEqual, 13, 25},
+ {SetOrderOrEqual, 100, 25},
+ {SetOrderOrEqual, 101, 15},
+ {SetOrderOrEqual, 102, 10},
+ {SetOrderOrEqual, 25, 103},
+ {SetOrderOrEqual, 20, 104},
+
+ {Checkpoint, 0, 0},
+ // Collapse everything by setting 10 >= 25: this should make everything equal
+ {SetOrderOrEqual, 25, 10},
+
+ // Check that all nodes are pairwise equal now
+ {Equal, 10, 12},
+ {Equal, 10, 15},
+ {Equal, 10, 16},
+ {Equal, 10, 17},
+ {Equal, 10, 18},
+ {Equal, 10, 19},
+ {Equal, 10, vconst(20)},
+ {Equal, 10, vconst2(20)},
+ {Equal, 10, 25},
+
+ {Equal, 12, 15},
+ {Equal, 12, 16},
+ {Equal, 12, 17},
+ {Equal, 12, 18},
+ {Equal, 12, 19},
+ {Equal, 12, vconst(20)},
+ {Equal, 12, vconst2(20)},
+ {Equal, 12, 25},
+
+ {Equal, 15, 16},
+ {Equal, 15, 17},
+ {Equal, 15, 18},
+ {Equal, 15, 19},
+ {Equal, 15, vconst(20)},
+ {Equal, 15, vconst2(20)},
+ {Equal, 15, 25},
+
+ {Equal, 16, 17},
+ {Equal, 16, 18},
+ {Equal, 16, 19},
+ {Equal, 16, vconst(20)},
+ {Equal, 16, vconst2(20)},
+ {Equal, 16, 25},
+
+ {Equal, 17, 18},
+ {Equal, 17, 19},
+ {Equal, 17, vconst(20)},
+ {Equal, 17, vconst2(20)},
+ {Equal, 17, 25},
+
+ {Equal, 18, 19},
+ {Equal, 18, vconst(20)},
+ {Equal, 18, vconst2(20)},
+ {Equal, 18, 25},
+
+ {Equal, 19, vconst(20)},
+ {Equal, 19, vconst2(20)},
+ {Equal, 19, 25},
+
+ {Equal, vconst(20), vconst2(20)},
+ {Equal, vconst(20), 25},
+
+ {Equal, vconst2(20), 25},
+
+ // ... but not 11/26/100/101/102, which were on a different path
+ {Equal_Fail, 10, 11},
+ {Equal_Fail, 10, 26},
+ {Equal_Fail, 10, 100},
+ {Equal_Fail, 10, 101},
+ {Equal_Fail, 10, 102},
+ {OrderedOrEqual, 10, 26},
+ {OrderedOrEqual, 25, 26},
+ {OrderedOrEqual, 13, 25},
+ {OrderedOrEqual, 13, 10},
+
+ {Undo, 0, 0},
+ {OrderedOrEqual, 10, 25},
+ {Equal_Fail, 10, 12},
+ {Equal_Fail, 10, 15},
+ {Equal_Fail, 10, 25},
+
+ {Undo, 0, 0},
+ })
+
+ testPosetOps(t, false, []posetTestOp{
+ {Checkpoint, 0, 0},
+ {SetOrderOrEqual, 10, 15},
+ {SetOrderOrEqual, 15, 20},
+ {SetOrderOrEqual, 20, 25},
+ {SetOrder, 10, 16},
+ {SetOrderOrEqual, 16, 20},
+ // Check that we cannot collapse here because of the strict relation 10<16
+ {SetOrderOrEqual_Fail, 20, 10},
+ {Undo, 0, 0},
+ })
+}
+
+func TestPosetSetEqual(t *testing.T) {
+ testPosetOps(t, false, []posetTestOp{
+ // 10<=20<=30<40, 20<=100<110
+ {Checkpoint, 0, 0},
+ {SetOrderOrEqual, 10, 20},
+ {SetOrderOrEqual, 20, 30},
+ {SetOrder, 30, 40},
+ {SetOrderOrEqual, 20, 100},
+ {SetOrder, 100, 110},
+ {OrderedOrEqual, 10, 30},
+ {OrderedOrEqual_Fail, 30, 10},
+ {Ordered_Fail, 10, 30},
+ {Ordered_Fail, 30, 10},
+ {Ordered, 10, 40},
+ {Ordered_Fail, 40, 10},
+
+ // Try learning 10==20.
+ {Checkpoint, 0, 0},
+ {SetEqual, 10, 20},
+ {OrderedOrEqual, 10, 20},
+ {Ordered_Fail, 10, 20},
+ {Equal, 10, 20},
+ {SetOrderOrEqual, 10, 20},
+ {SetOrderOrEqual, 20, 10},
+ {SetOrder_Fail, 10, 20},
+ {SetOrder_Fail, 20, 10},
+ {Undo, 0, 0},
+
+ // Try learning 20==10.
+ {Checkpoint, 0, 0},
+ {SetEqual, 20, 10},
+ {OrderedOrEqual, 10, 20},
+ {Ordered_Fail, 10, 20},
+ {Equal, 10, 20},
+ {Undo, 0, 0},
+
+ // Try learning 10==40 or 30==40 or 10==110.
+ {Checkpoint, 0, 0},
+ {SetEqual_Fail, 10, 40},
+ {SetEqual_Fail, 40, 10},
+ {SetEqual_Fail, 30, 40},
+ {SetEqual_Fail, 40, 30},
+ {SetEqual_Fail, 10, 110},
+ {SetEqual_Fail, 110, 10},
+ {Undo, 0, 0},
+
+ // Try learning 40==110, and then 10==40 or 10=110
+ {Checkpoint, 0, 0},
+ {SetEqual, 40, 110},
+ {SetEqual_Fail, 10, 40},
+ {SetEqual_Fail, 40, 10},
+ {SetEqual_Fail, 10, 110},
+ {SetEqual_Fail, 110, 10},
+ {Undo, 0, 0},
+
+ // Try learning 40<20 or 30<20 or 110<10
+ {Checkpoint, 0, 0},
+ {SetOrder_Fail, 40, 20},
+ {SetOrder_Fail, 30, 20},
+ {SetOrder_Fail, 110, 10},
+ {Undo, 0, 0},
+
+ // Try learning 30<=20
+ {Checkpoint, 0, 0},
+ {SetOrderOrEqual, 30, 20},
+ {Equal, 30, 20},
+ {OrderedOrEqual, 30, 100},
+ {Ordered, 30, 110},
+ {Undo, 0, 0},
+
+ {Undo, 0, 0},
+ })
+}
+
+func TestPosetConst(t *testing.T) {
+ testPosetOps(t, false, []posetTestOp{
+ {Checkpoint, 0, 0},
+ {SetOrder, 1, vconst(15)},
+ {SetOrderOrEqual, 100, vconst(120)},
+ {Ordered, 1, vconst(15)},
+ {Ordered, 1, vconst(120)},
+ {OrderedOrEqual, 1, vconst(120)},
+ {OrderedOrEqual, 100, vconst(120)},
+ {Ordered_Fail, 100, vconst(15)},
+ {Ordered_Fail, vconst(15), 100},
+
+ {Checkpoint, 0, 0},
+ {SetOrderOrEqual, 1, 5},
+ {SetOrderOrEqual, 5, 25},
+ {SetEqual, 20, vconst(20)},
+ {SetEqual, 25, vconst(25)},
+ {Ordered, 1, 20},
+ {Ordered, 1, vconst(30)},
+ {Undo, 0, 0},
+
+ {Checkpoint, 0, 0},
+ {SetOrderOrEqual, 1, 5},
+ {SetOrderOrEqual, 5, 25},
+ {SetEqual, vconst(-20), 5},
+ {SetEqual, vconst(-25), 1},
+ {Ordered, 1, 5},
+ {Ordered, vconst(-30), 1},
+ {Undo, 0, 0},
+
+ {Checkpoint, 0, 0},
+ {SetNonEqual, 1, vconst(4)},
+ {SetNonEqual, 1, vconst(6)},
+ {NonEqual, 1, vconst(4)},
+ {NonEqual_Fail, 1, vconst(5)},
+ {NonEqual, 1, vconst(6)},
+ {Equal_Fail, 1, vconst(4)},
+ {Equal_Fail, 1, vconst(5)},
+ {Equal_Fail, 1, vconst(6)},
+ {Equal_Fail, 1, vconst(7)},
+ {Undo, 0, 0},
+
+ {Undo, 0, 0},
+ })
+
+ testPosetOps(t, true, []posetTestOp{
+ {Checkpoint, 0, 0},
+ {SetOrder, 1, vconst(15)},
+ {SetOrderOrEqual, 100, vconst(-5)}, // -5 is a very big number in unsigned
+ {Ordered, 1, vconst(15)},
+ {Ordered, 1, vconst(-5)},
+ {OrderedOrEqual, 1, vconst(-5)},
+ {OrderedOrEqual, 100, vconst(-5)},
+ {Ordered_Fail, 100, vconst(15)},
+ {Ordered_Fail, vconst(15), 100},
+
+ {Undo, 0, 0},
+ })
+
+ testPosetOps(t, false, []posetTestOp{
+ {Checkpoint, 0, 0},
+ {SetOrderOrEqual, 1, vconst(3)},
+ {SetNonEqual, 1, vconst(0)},
+ {Ordered_Fail, 1, vconst(0)},
+ {Undo, 0, 0},
+ })
+
+ testPosetOps(t, false, []posetTestOp{
+ // Check relations of a constant with itself
+ {Checkpoint, 0, 0},
+ {SetOrderOrEqual, vconst(3), vconst2(3)},
+ {Undo, 0, 0},
+ {Checkpoint, 0, 0},
+ {SetEqual, vconst(3), vconst2(3)},
+ {Undo, 0, 0},
+ {Checkpoint, 0, 0},
+ {SetNonEqual_Fail, vconst(3), vconst2(3)},
+ {Undo, 0, 0},
+ {Checkpoint, 0, 0},
+ {SetOrder_Fail, vconst(3), vconst2(3)},
+ {Undo, 0, 0},
+
+ // Check relations of two constants among them, using
+ // different instances of the same constant
+ {Checkpoint, 0, 0},
+ {SetOrderOrEqual, vconst(3), vconst(4)},
+ {OrderedOrEqual, vconst(3), vconst2(4)},
+ {Undo, 0, 0},
+ {Checkpoint, 0, 0},
+ {SetOrder, vconst(3), vconst(4)},
+ {Ordered, vconst(3), vconst2(4)},
+ {Undo, 0, 0},
+ {Checkpoint, 0, 0},
+ {SetEqual_Fail, vconst(3), vconst(4)},
+ {SetEqual_Fail, vconst(3), vconst2(4)},
+ {Undo, 0, 0},
+ {Checkpoint, 0, 0},
+ {NonEqual, vconst(3), vconst(4)},
+ {NonEqual, vconst(3), vconst2(4)},
+ {Undo, 0, 0},
+ {Checkpoint, 0, 0},
+ {Equal_Fail, vconst(3), vconst(4)},
+ {Equal_Fail, vconst(3), vconst2(4)},
+ {Undo, 0, 0},
+ {Checkpoint, 0, 0},
+ {SetNonEqual, vconst(3), vconst(4)},
+ {SetNonEqual, vconst(3), vconst2(4)},
+ {Undo, 0, 0},
+ })
+}
+
+func TestPosetNonEqual(t *testing.T) {
+ testPosetOps(t, false, []posetTestOp{
+ {Equal_Fail, 10, 20},
+ {NonEqual_Fail, 10, 20},
+
+ // Learn 10!=20
+ {Checkpoint, 0, 0},
+ {SetNonEqual, 10, 20},
+ {Equal_Fail, 10, 20},
+ {NonEqual, 10, 20},
+ {SetEqual_Fail, 10, 20},
+
+ // Learn again 10!=20
+ {Checkpoint, 0, 0},
+ {SetNonEqual, 10, 20},
+ {Equal_Fail, 10, 20},
+ {NonEqual, 10, 20},
+
+ // Undo. We still know 10!=20
+ {Undo, 0, 0},
+ {Equal_Fail, 10, 20},
+ {NonEqual, 10, 20},
+ {SetEqual_Fail, 10, 20},
+
+ // Undo again. Now we know nothing
+ {Undo, 0, 0},
+ {Equal_Fail, 10, 20},
+ {NonEqual_Fail, 10, 20},
+
+ // Learn 10==20
+ {Checkpoint, 0, 0},
+ {SetEqual, 10, 20},
+ {Equal, 10, 20},
+ {NonEqual_Fail, 10, 20},
+ {SetNonEqual_Fail, 10, 20},
+
+ // Learn again 10==20
+ {Checkpoint, 0, 0},
+ {SetEqual, 10, 20},
+ {Equal, 10, 20},
+ {NonEqual_Fail, 10, 20},
+ {SetNonEqual_Fail, 10, 20},
+
+ // Undo. We still know 10==20
+ {Undo, 0, 0},
+ {Equal, 10, 20},
+ {NonEqual_Fail, 10, 20},
+ {SetNonEqual_Fail, 10, 20},
+
+ // Undo. We know nothing
+ {Undo, 0, 0},
+ {Equal_Fail, 10, 20},
+ {NonEqual_Fail, 10, 20},
+ })
+}
diff --git a/src/cmd/compile/internal/ssa/print.go b/src/cmd/compile/internal/ssa/print.go
new file mode 100644
index 0000000..96cd2c7
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/print.go
@@ -0,0 +1,191 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "bytes"
+ "cmd/internal/src"
+ "crypto/sha256"
+ "fmt"
+ "io"
+)
+
+func printFunc(f *Func) {
+ f.Logf("%s", f)
+}
+
+func hashFunc(f *Func) []byte {
+ h := sha256.New()
+ p := stringFuncPrinter{w: h, printDead: true}
+ fprintFunc(p, f)
+ return h.Sum(nil)
+}
+
+func (f *Func) String() string {
+ var buf bytes.Buffer
+ p := stringFuncPrinter{w: &buf, printDead: true}
+ fprintFunc(p, f)
+ return buf.String()
+}
+
+// rewriteHash returns a hash of f suitable for detecting rewrite cycles.
+func (f *Func) rewriteHash() string {
+ h := sha256.New()
+ p := stringFuncPrinter{w: h, printDead: false}
+ fprintFunc(p, f)
+ return fmt.Sprintf("%x", h.Sum(nil))
+}
+
+type funcPrinter interface {
+ header(f *Func)
+ startBlock(b *Block, reachable bool)
+ endBlock(b *Block, reachable bool)
+ value(v *Value, live bool)
+ startDepCycle()
+ endDepCycle()
+ named(n LocalSlot, vals []*Value)
+}
+
+type stringFuncPrinter struct {
+ w io.Writer
+ printDead bool
+}
+
+func (p stringFuncPrinter) header(f *Func) {
+ fmt.Fprint(p.w, f.Name)
+ fmt.Fprint(p.w, " ")
+ fmt.Fprintln(p.w, f.Type)
+}
+
+func (p stringFuncPrinter) startBlock(b *Block, reachable bool) {
+ if !p.printDead && !reachable {
+ return
+ }
+ fmt.Fprintf(p.w, " b%d:", b.ID)
+ if len(b.Preds) > 0 {
+ io.WriteString(p.w, " <-")
+ for _, e := range b.Preds {
+ pred := e.b
+ fmt.Fprintf(p.w, " b%d", pred.ID)
+ }
+ }
+ if !reachable {
+ fmt.Fprint(p.w, " DEAD")
+ }
+ io.WriteString(p.w, "\n")
+}
+
+func (p stringFuncPrinter) endBlock(b *Block, reachable bool) {
+ if !p.printDead && !reachable {
+ return
+ }
+ fmt.Fprintln(p.w, " "+b.LongString())
+}
+
+func StmtString(p src.XPos) string {
+ linenumber := "(?) "
+ if p.IsKnown() {
+ pfx := ""
+ if p.IsStmt() == src.PosIsStmt {
+ pfx = "+"
+ }
+ if p.IsStmt() == src.PosNotStmt {
+ pfx = "-"
+ }
+ linenumber = fmt.Sprintf("(%s%d) ", pfx, p.Line())
+ }
+ return linenumber
+}
+
+func (p stringFuncPrinter) value(v *Value, live bool) {
+ if !p.printDead && !live {
+ return
+ }
+ fmt.Fprintf(p.w, " %s", StmtString(v.Pos))
+ fmt.Fprint(p.w, v.LongString())
+ if !live {
+ fmt.Fprint(p.w, " DEAD")
+ }
+ fmt.Fprintln(p.w)
+}
+
+func (p stringFuncPrinter) startDepCycle() {
+ fmt.Fprintln(p.w, "dependency cycle!")
+}
+
+func (p stringFuncPrinter) endDepCycle() {}
+
+func (p stringFuncPrinter) named(n LocalSlot, vals []*Value) {
+ fmt.Fprintf(p.w, "name %s: %v\n", n, vals)
+}
+
+func fprintFunc(p funcPrinter, f *Func) {
+ reachable, live := findlive(f)
+ defer f.retDeadcodeLive(live)
+ p.header(f)
+ printed := make([]bool, f.NumValues())
+ for _, b := range f.Blocks {
+ p.startBlock(b, reachable[b.ID])
+
+ if f.scheduled {
+ // Order of Values has been decided - print in that order.
+ for _, v := range b.Values {
+ p.value(v, live[v.ID])
+ printed[v.ID] = true
+ }
+ p.endBlock(b, reachable[b.ID])
+ continue
+ }
+
+ // print phis first since all value cycles contain a phi
+ n := 0
+ for _, v := range b.Values {
+ if v.Op != OpPhi {
+ continue
+ }
+ p.value(v, live[v.ID])
+ printed[v.ID] = true
+ n++
+ }
+
+ // print rest of values in dependency order
+ for n < len(b.Values) {
+ m := n
+ outer:
+ for _, v := range b.Values {
+ if printed[v.ID] {
+ continue
+ }
+ for _, w := range v.Args {
+ // w == nil shouldn't happen, but if it does,
+ // don't panic; we'll get a better diagnosis later.
+ if w != nil && w.Block == b && !printed[w.ID] {
+ continue outer
+ }
+ }
+ p.value(v, live[v.ID])
+ printed[v.ID] = true
+ n++
+ }
+ if m == n {
+ p.startDepCycle()
+ for _, v := range b.Values {
+ if printed[v.ID] {
+ continue
+ }
+ p.value(v, live[v.ID])
+ printed[v.ID] = true
+ n++
+ }
+ p.endDepCycle()
+ }
+ }
+
+ p.endBlock(b, reachable[b.ID])
+ }
+ for _, name := range f.Names {
+ p.named(*name, f.NamedValues[*name])
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/prove.go b/src/cmd/compile/internal/ssa/prove.go
new file mode 100644
index 0000000..b203584
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/prove.go
@@ -0,0 +1,1442 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/internal/src"
+ "fmt"
+ "math"
+)
+
+type branch int
+
+const (
+ unknown branch = iota
+ positive
+ negative
+)
+
+// relation represents the set of possible relations between
+// pairs of variables (v, w). Without a priori knowledge the
+// mask is lt | eq | gt meaning v can be less than, equal to or
+// greater than w. When the execution path branches on the condition
+// `v op w` the set of relations is updated to exclude any
+// relation not possible due to `v op w` being true (or false).
+//
+// E.g.
+//
+// r := relation(...)
+//
+// if v < w {
+// newR := r & lt
+// }
+// if v >= w {
+// newR := r & (eq|gt)
+// }
+// if v != w {
+// newR := r & (lt|gt)
+// }
+type relation uint
+
+const (
+ lt relation = 1 << iota
+ eq
+ gt
+)
+
+var relationStrings = [...]string{
+ 0: "none", lt: "<", eq: "==", lt | eq: "<=",
+ gt: ">", gt | lt: "!=", gt | eq: ">=", gt | eq | lt: "any",
+}
+
+func (r relation) String() string {
+ if r < relation(len(relationStrings)) {
+ return relationStrings[r]
+ }
+ return fmt.Sprintf("relation(%d)", uint(r))
+}
+
+// domain represents the domain of a variable pair in which a set
+// of relations is known. For example, relations learned for unsigned
+// pairs cannot be transferred to signed pairs because the same bit
+// representation can mean something else.
+type domain uint
+
+const (
+ signed domain = 1 << iota
+ unsigned
+ pointer
+ boolean
+)
+
+var domainStrings = [...]string{
+ "signed", "unsigned", "pointer", "boolean",
+}
+
+func (d domain) String() string {
+ s := ""
+ for i, ds := range domainStrings {
+ if d&(1<<uint(i)) != 0 {
+ if len(s) != 0 {
+ s += "|"
+ }
+ s += ds
+ d &^= 1 << uint(i)
+ }
+ }
+ if d != 0 {
+ if len(s) != 0 {
+ s += "|"
+ }
+ s += fmt.Sprintf("0x%x", uint(d))
+ }
+ return s
+}
+
+type pair struct {
+ v, w *Value // a pair of values, ordered by ID.
+ // v can be nil, to mean the zero value.
+ // for booleans the zero value (v == nil) is false.
+ d domain
+}
+
+// fact is a pair plus a relation for that pair.
+type fact struct {
+ p pair
+ r relation
+}
+
+// a limit records known upper and lower bounds for a value.
+type limit struct {
+ min, max int64 // min <= value <= max, signed
+ umin, umax uint64 // umin <= value <= umax, unsigned
+}
+
+func (l limit) String() string {
+ return fmt.Sprintf("sm,SM,um,UM=%d,%d,%d,%d", l.min, l.max, l.umin, l.umax)
+}
+
+func (l limit) intersect(l2 limit) limit {
+ if l.min < l2.min {
+ l.min = l2.min
+ }
+ if l.umin < l2.umin {
+ l.umin = l2.umin
+ }
+ if l.max > l2.max {
+ l.max = l2.max
+ }
+ if l.umax > l2.umax {
+ l.umax = l2.umax
+ }
+ return l
+}
+
+var noLimit = limit{math.MinInt64, math.MaxInt64, 0, math.MaxUint64}
+
+// a limitFact is a limit known for a particular value.
+type limitFact struct {
+ vid ID
+ limit limit
+}
+
+// factsTable keeps track of relations between pairs of values.
+//
+// The fact table logic is sound, but incomplete. Outside of a few
+// special cases, it performs no deduction or arithmetic. While there
+// are known decision procedures for this, the ad hoc approach taken
+// by the facts table is effective for real code while remaining very
+// efficient.
+type factsTable struct {
+ // unsat is true if facts contains a contradiction.
+ //
+ // Note that the factsTable logic is incomplete, so if unsat
+ // is false, the assertions in factsTable could be satisfiable
+ // *or* unsatisfiable.
+ unsat bool // true if facts contains a contradiction
+ unsatDepth int // number of unsat checkpoints
+
+ facts map[pair]relation // current known set of relation
+ stack []fact // previous sets of relations
+
+ // order is a couple of partial order sets that record information
+ // about relations between SSA values in the signed and unsigned
+ // domain.
+ orderS *poset
+ orderU *poset
+
+ // known lower and upper bounds on individual values.
+ limits map[ID]limit
+ limitStack []limitFact // previous entries
+
+ // For each slice s, a map from s to a len(s)/cap(s) value (if any)
+ // TODO: check if there are cases that matter where we have
+ // more than one len(s) for a slice. We could keep a list if necessary.
+ lens map[ID]*Value
+ caps map[ID]*Value
+
+ // zero is a zero-valued constant
+ zero *Value
+}
+
+// checkpointFact is an invalid value used for checkpointing
+// and restoring factsTable.
+var checkpointFact = fact{}
+var checkpointBound = limitFact{}
+
+func newFactsTable(f *Func) *factsTable {
+ ft := &factsTable{}
+ ft.orderS = f.newPoset()
+ ft.orderU = f.newPoset()
+ ft.orderS.SetUnsigned(false)
+ ft.orderU.SetUnsigned(true)
+ ft.facts = make(map[pair]relation)
+ ft.stack = make([]fact, 4)
+ ft.limits = make(map[ID]limit)
+ ft.limitStack = make([]limitFact, 4)
+ ft.zero = f.ConstInt64(f.Config.Types.Int64, 0)
+ return ft
+}
+
+// update updates the set of relations between v and w in domain d
+// restricting it to r.
+func (ft *factsTable) update(parent *Block, v, w *Value, d domain, r relation) {
+ if parent.Func.pass.debug > 2 {
+ parent.Func.Warnl(parent.Pos, "parent=%s, update %s %s %s", parent, v, w, r)
+ }
+ // No need to do anything else if we already found unsat.
+ if ft.unsat {
+ return
+ }
+
+ // Self-fact. It's wasteful to register it into the facts
+ // table, so just note whether it's satisfiable
+ if v == w {
+ if r&eq == 0 {
+ ft.unsat = true
+ }
+ return
+ }
+
+ if d == signed || d == unsigned {
+ var ok bool
+ order := ft.orderS
+ if d == unsigned {
+ order = ft.orderU
+ }
+ switch r {
+ case lt:
+ ok = order.SetOrder(v, w)
+ case gt:
+ ok = order.SetOrder(w, v)
+ case lt | eq:
+ ok = order.SetOrderOrEqual(v, w)
+ case gt | eq:
+ ok = order.SetOrderOrEqual(w, v)
+ case eq:
+ ok = order.SetEqual(v, w)
+ case lt | gt:
+ ok = order.SetNonEqual(v, w)
+ default:
+ panic("unknown relation")
+ }
+ if !ok {
+ if parent.Func.pass.debug > 2 {
+ parent.Func.Warnl(parent.Pos, "unsat %s %s %s", v, w, r)
+ }
+ ft.unsat = true
+ return
+ }
+ } else {
+ if lessByID(w, v) {
+ v, w = w, v
+ r = reverseBits[r]
+ }
+
+ p := pair{v, w, d}
+ oldR, ok := ft.facts[p]
+ if !ok {
+ if v == w {
+ oldR = eq
+ } else {
+ oldR = lt | eq | gt
+ }
+ }
+ // No changes compared to information already in facts table.
+ if oldR == r {
+ return
+ }
+ ft.stack = append(ft.stack, fact{p, oldR})
+ ft.facts[p] = oldR & r
+ // If this relation is not satisfiable, mark it and exit right away
+ if oldR&r == 0 {
+ if parent.Func.pass.debug > 2 {
+ parent.Func.Warnl(parent.Pos, "unsat %s %s %s", v, w, r)
+ }
+ ft.unsat = true
+ return
+ }
+ }
+
+ // Extract bounds when comparing against constants
+ if v.isGenericIntConst() {
+ v, w = w, v
+ r = reverseBits[r]
+ }
+ if v != nil && w.isGenericIntConst() {
+ // Note: all the +1/-1 below could overflow/underflow. Either will
+ // still generate correct results, it will just lead to imprecision.
+ // In fact if there is overflow/underflow, the corresponding
+ // code is unreachable because the known range is outside the range
+ // of the value's type.
+ old, ok := ft.limits[v.ID]
+ if !ok {
+ old = noLimit
+ if v.isGenericIntConst() {
+ switch d {
+ case signed:
+ old.min, old.max = v.AuxInt, v.AuxInt
+ if v.AuxInt >= 0 {
+ old.umin, old.umax = uint64(v.AuxInt), uint64(v.AuxInt)
+ }
+ case unsigned:
+ old.umin = v.AuxUnsigned()
+ old.umax = old.umin
+ if int64(old.umin) >= 0 {
+ old.min, old.max = int64(old.umin), int64(old.umin)
+ }
+ }
+ }
+ }
+ lim := noLimit
+ switch d {
+ case signed:
+ c := w.AuxInt
+ switch r {
+ case lt:
+ lim.max = c - 1
+ case lt | eq:
+ lim.max = c
+ case gt | eq:
+ lim.min = c
+ case gt:
+ lim.min = c + 1
+ case lt | gt:
+ lim = old
+ if c == lim.min {
+ lim.min++
+ }
+ if c == lim.max {
+ lim.max--
+ }
+ case eq:
+ lim.min = c
+ lim.max = c
+ }
+ if lim.min >= 0 {
+ // int(x) >= 0 && int(x) >= N ⇒ uint(x) >= N
+ lim.umin = uint64(lim.min)
+ }
+ if lim.max != noLimit.max && old.min >= 0 && lim.max >= 0 {
+ // 0 <= int(x) <= N ⇒ 0 <= uint(x) <= N
+ // This is for a max update, so the lower bound
+ // comes from what we already know (old).
+ lim.umax = uint64(lim.max)
+ }
+ case unsigned:
+ uc := w.AuxUnsigned()
+ switch r {
+ case lt:
+ lim.umax = uc - 1
+ case lt | eq:
+ lim.umax = uc
+ case gt | eq:
+ lim.umin = uc
+ case gt:
+ lim.umin = uc + 1
+ case lt | gt:
+ lim = old
+ if uc == lim.umin {
+ lim.umin++
+ }
+ if uc == lim.umax {
+ lim.umax--
+ }
+ case eq:
+ lim.umin = uc
+ lim.umax = uc
+ }
+ // We could use the contrapositives of the
+ // signed implications to derive signed facts,
+ // but it turns out not to matter.
+ }
+ ft.limitStack = append(ft.limitStack, limitFact{v.ID, old})
+ lim = old.intersect(lim)
+ ft.limits[v.ID] = lim
+ if v.Block.Func.pass.debug > 2 {
+ v.Block.Func.Warnl(parent.Pos, "parent=%s, new limits %s %s %s %s", parent, v, w, r, lim.String())
+ }
+ if lim.min > lim.max || lim.umin > lim.umax {
+ ft.unsat = true
+ return
+ }
+ }
+
+ // Derived facts below here are only about numbers.
+ if d != signed && d != unsigned {
+ return
+ }
+
+ // Additional facts we know given the relationship between len and cap.
+ //
+ // TODO: Since prove now derives transitive relations, it
+ // should be sufficient to learn that len(w) <= cap(w) at the
+ // beginning of prove where we look for all len/cap ops.
+ if v.Op == OpSliceLen && r&lt == 0 && ft.caps[v.Args[0].ID] != nil {
+ // len(s) > w implies cap(s) > w
+ // len(s) >= w implies cap(s) >= w
+ // len(s) == w implies cap(s) >= w
+ ft.update(parent, ft.caps[v.Args[0].ID], w, d, r|gt)
+ }
+ if w.Op == OpSliceLen && r&gt == 0 && ft.caps[w.Args[0].ID] != nil {
+ // same, length on the RHS.
+ ft.update(parent, v, ft.caps[w.Args[0].ID], d, r|lt)
+ }
+ if v.Op == OpSliceCap && r&gt == 0 && ft.lens[v.Args[0].ID] != nil {
+ // cap(s) < w implies len(s) < w
+ // cap(s) <= w implies len(s) <= w
+ // cap(s) == w implies len(s) <= w
+ ft.update(parent, ft.lens[v.Args[0].ID], w, d, r|lt)
+ }
+ if w.Op == OpSliceCap && r&lt == 0 && ft.lens[w.Args[0].ID] != nil {
+ // same, capacity on the RHS.
+ ft.update(parent, v, ft.lens[w.Args[0].ID], d, r|gt)
+ }
+
+ // Process fence-post implications.
+ //
+ // First, make the condition > or >=.
+ if r == lt || r == lt|eq {
+ v, w = w, v
+ r = reverseBits[r]
+ }
+ switch r {
+ case gt:
+ if x, delta := isConstDelta(v); x != nil && delta == 1 {
+ // x+1 > w ⇒ x >= w
+ //
+ // This is useful for eliminating the
+ // growslice branch of append.
+ ft.update(parent, x, w, d, gt|eq)
+ } else if x, delta := isConstDelta(w); x != nil && delta == -1 {
+ // v > x-1 ⇒ v >= x
+ ft.update(parent, v, x, d, gt|eq)
+ }
+ case gt | eq:
+ if x, delta := isConstDelta(v); x != nil && delta == -1 {
+ // x-1 >= w && x > min ⇒ x > w
+ //
+ // Useful for i > 0; s[i-1].
+ lim, ok := ft.limits[x.ID]
+ if ok && ((d == signed && lim.min > opMin[v.Op]) || (d == unsigned && lim.umin > 0)) {
+ ft.update(parent, x, w, d, gt)
+ }
+ } else if x, delta := isConstDelta(w); x != nil && delta == 1 {
+ // v >= x+1 && x < max ⇒ v > x
+ lim, ok := ft.limits[x.ID]
+ if ok && ((d == signed && lim.max < opMax[w.Op]) || (d == unsigned && lim.umax < opUMax[w.Op])) {
+ ft.update(parent, v, x, d, gt)
+ }
+ }
+ }
+
+ // Process: x+delta > w (with delta constant)
+ // Only signed domain for now (useful for accesses to slices in loops).
+ if r == gt || r == gt|eq {
+ if x, delta := isConstDelta(v); x != nil && d == signed {
+ if parent.Func.pass.debug > 1 {
+ parent.Func.Warnl(parent.Pos, "x+d %s w; x:%v %v delta:%v w:%v d:%v", r, x, parent.String(), delta, w.AuxInt, d)
+ }
+ if !w.isGenericIntConst() {
+ // If we know that x+delta > w but w is not constant, we can derive:
+ // if delta < 0 and x > MinInt - delta, then x > w (because x+delta cannot underflow)
+ // This is useful for loops with bounds "len(slice)-K" (delta = -K)
+ if l, has := ft.limits[x.ID]; has && delta < 0 {
+ if (x.Type.Size() == 8 && l.min >= math.MinInt64-delta) ||
+ (x.Type.Size() == 4 && l.min >= math.MinInt32-delta) {
+ ft.update(parent, x, w, signed, r)
+ }
+ }
+ } else {
+ // With w,delta constants, we want to derive: x+delta > w ⇒ x > w-delta
+ //
+ // We compute (using integers of the correct size):
+ // min = w - delta
+ // max = MaxInt - delta
+ //
+ // And we prove that:
+ // if min<max: min < x AND x <= max
+ // if min>max: min < x OR x <= max
+ //
+ // This is always correct, even in case of overflow.
+ //
+ // If the initial fact is x+delta >= w instead, the derived conditions are:
+ // if min<max: min <= x AND x <= max
+ // if min>max: min <= x OR x <= max
+ //
+ // Notice the conditions for max are still <=, as they handle overflows.
+ var min, max int64
+ var vmin, vmax *Value
+ switch x.Type.Size() {
+ case 8:
+ min = w.AuxInt - delta
+ max = int64(^uint64(0)>>1) - delta
+
+ vmin = parent.NewValue0I(parent.Pos, OpConst64, parent.Func.Config.Types.Int64, min)
+ vmax = parent.NewValue0I(parent.Pos, OpConst64, parent.Func.Config.Types.Int64, max)
+
+ case 4:
+ min = int64(int32(w.AuxInt) - int32(delta))
+ max = int64(int32(^uint32(0)>>1) - int32(delta))
+
+ vmin = parent.NewValue0I(parent.Pos, OpConst32, parent.Func.Config.Types.Int32, min)
+ vmax = parent.NewValue0I(parent.Pos, OpConst32, parent.Func.Config.Types.Int32, max)
+
+ default:
+ panic("unimplemented")
+ }
+
+ if min < max {
+ // Record that x > min and max >= x
+ ft.update(parent, x, vmin, d, r)
+ ft.update(parent, vmax, x, d, r|eq)
+ } else {
+ // We know that either x>min OR x<=max. factsTable cannot record OR conditions,
+ // so let's see if we can already prove that one of them is false, in which case
+ // the other must be true
+ if l, has := ft.limits[x.ID]; has {
+ if l.max <= min {
+ if r&eq == 0 || l.max < min {
+ // x>min (x>=min) is impossible, so it must be x<=max
+ ft.update(parent, vmax, x, d, r|eq)
+ }
+ } else if l.min > max {
+ // x<=max is impossible, so it must be x>min
+ ft.update(parent, x, vmin, d, r)
+ }
+ }
+ }
+ }
+ }
+ }
+
+ // Look through value-preserving extensions.
+ // If the domain is appropriate for the pre-extension Type,
+ // repeat the update with the pre-extension Value.
+ if isCleanExt(v) {
+ switch {
+ case d == signed && v.Args[0].Type.IsSigned():
+ fallthrough
+ case d == unsigned && !v.Args[0].Type.IsSigned():
+ ft.update(parent, v.Args[0], w, d, r)
+ }
+ }
+ if isCleanExt(w) {
+ switch {
+ case d == signed && w.Args[0].Type.IsSigned():
+ fallthrough
+ case d == unsigned && !w.Args[0].Type.IsSigned():
+ ft.update(parent, v, w.Args[0], d, r)
+ }
+ }
+}
+
+var opMin = map[Op]int64{
+ OpAdd64: math.MinInt64, OpSub64: math.MinInt64,
+ OpAdd32: math.MinInt32, OpSub32: math.MinInt32,
+}
+
+var opMax = map[Op]int64{
+ OpAdd64: math.MaxInt64, OpSub64: math.MaxInt64,
+ OpAdd32: math.MaxInt32, OpSub32: math.MaxInt32,
+}
+
+var opUMax = map[Op]uint64{
+ OpAdd64: math.MaxUint64, OpSub64: math.MaxUint64,
+ OpAdd32: math.MaxUint32, OpSub32: math.MaxUint32,
+}
+
+// isNonNegative reports whether v is known to be non-negative.
+func (ft *factsTable) isNonNegative(v *Value) bool {
+ if isNonNegative(v) {
+ return true
+ }
+
+ var max int64
+ switch v.Type.Size() {
+ case 1:
+ max = math.MaxInt8
+ case 2:
+ max = math.MaxInt16
+ case 4:
+ max = math.MaxInt32
+ case 8:
+ max = math.MaxInt64
+ default:
+ panic("unexpected integer size")
+ }
+
+ // Check if the recorded limits can prove that the value is positive
+
+ if l, has := ft.limits[v.ID]; has && (l.min >= 0 || l.umax <= uint64(max)) {
+ return true
+ }
+
+ // Check if v = x+delta, and we can use x's limits to prove that it's positive
+ if x, delta := isConstDelta(v); x != nil {
+ if l, has := ft.limits[x.ID]; has {
+ if delta > 0 && l.min >= -delta && l.max <= max-delta {
+ return true
+ }
+ if delta < 0 && l.min >= -delta {
+ return true
+ }
+ }
+ }
+
+ // Check if v is a value-preserving extension of a non-negative value.
+ if isCleanExt(v) && ft.isNonNegative(v.Args[0]) {
+ return true
+ }
+
+ // Check if the signed poset can prove that the value is >= 0
+ return ft.orderS.OrderedOrEqual(ft.zero, v)
+}
+
+// checkpoint saves the current state of known relations.
+// Called when descending on a branch.
+func (ft *factsTable) checkpoint() {
+ if ft.unsat {
+ ft.unsatDepth++
+ }
+ ft.stack = append(ft.stack, checkpointFact)
+ ft.limitStack = append(ft.limitStack, checkpointBound)
+ ft.orderS.Checkpoint()
+ ft.orderU.Checkpoint()
+}
+
+// restore restores known relation to the state just
+// before the previous checkpoint.
+// Called when backing up on a branch.
+func (ft *factsTable) restore() {
+ if ft.unsatDepth > 0 {
+ ft.unsatDepth--
+ } else {
+ ft.unsat = false
+ }
+ for {
+ old := ft.stack[len(ft.stack)-1]
+ ft.stack = ft.stack[:len(ft.stack)-1]
+ if old == checkpointFact {
+ break
+ }
+ if old.r == lt|eq|gt {
+ delete(ft.facts, old.p)
+ } else {
+ ft.facts[old.p] = old.r
+ }
+ }
+ for {
+ old := ft.limitStack[len(ft.limitStack)-1]
+ ft.limitStack = ft.limitStack[:len(ft.limitStack)-1]
+ if old.vid == 0 { // checkpointBound
+ break
+ }
+ if old.limit == noLimit {
+ delete(ft.limits, old.vid)
+ } else {
+ ft.limits[old.vid] = old.limit
+ }
+ }
+ ft.orderS.Undo()
+ ft.orderU.Undo()
+}
+
+func lessByID(v, w *Value) bool {
+ if v == nil && w == nil {
+ // Should not happen, but just in case.
+ return false
+ }
+ if v == nil {
+ return true
+ }
+ return w != nil && v.ID < w.ID
+}
+
+var (
+ reverseBits = [...]relation{0, 4, 2, 6, 1, 5, 3, 7}
+
+ // maps what we learn when the positive branch is taken.
+ // For example:
+ // OpLess8: {signed, lt},
+ // v1 = (OpLess8 v2 v3).
+ // If v1 branch is taken then we learn that the rangeMask
+ // can be at most lt.
+ domainRelationTable = map[Op]struct {
+ d domain
+ r relation
+ }{
+ OpEq8: {signed | unsigned, eq},
+ OpEq16: {signed | unsigned, eq},
+ OpEq32: {signed | unsigned, eq},
+ OpEq64: {signed | unsigned, eq},
+ OpEqPtr: {pointer, eq},
+
+ OpNeq8: {signed | unsigned, lt | gt},
+ OpNeq16: {signed | unsigned, lt | gt},
+ OpNeq32: {signed | unsigned, lt | gt},
+ OpNeq64: {signed | unsigned, lt | gt},
+ OpNeqPtr: {pointer, lt | gt},
+
+ OpLess8: {signed, lt},
+ OpLess8U: {unsigned, lt},
+ OpLess16: {signed, lt},
+ OpLess16U: {unsigned, lt},
+ OpLess32: {signed, lt},
+ OpLess32U: {unsigned, lt},
+ OpLess64: {signed, lt},
+ OpLess64U: {unsigned, lt},
+
+ OpLeq8: {signed, lt | eq},
+ OpLeq8U: {unsigned, lt | eq},
+ OpLeq16: {signed, lt | eq},
+ OpLeq16U: {unsigned, lt | eq},
+ OpLeq32: {signed, lt | eq},
+ OpLeq32U: {unsigned, lt | eq},
+ OpLeq64: {signed, lt | eq},
+ OpLeq64U: {unsigned, lt | eq},
+
+ // For these ops, the negative branch is different: we can only
+ // prove signed/GE (signed/GT) if we can prove that arg0 is non-negative.
+ // See the special case in addBranchRestrictions.
+ OpIsInBounds: {signed | unsigned, lt}, // 0 <= arg0 < arg1
+ OpIsSliceInBounds: {signed | unsigned, lt | eq}, // 0 <= arg0 <= arg1
+ }
+)
+
+// cleanup returns the posets to the free list
+func (ft *factsTable) cleanup(f *Func) {
+ for _, po := range []*poset{ft.orderS, ft.orderU} {
+ // Make sure it's empty as it should be. A non-empty poset
+ // might cause errors and miscompilations if reused.
+ if checkEnabled {
+ if err := po.CheckEmpty(); err != nil {
+ f.Fatalf("poset not empty after function %s: %v", f.Name, err)
+ }
+ }
+ f.retPoset(po)
+ }
+}
+
+// prove removes redundant BlockIf branches that can be inferred
+// from previous dominating comparisons.
+//
+// By far, the most common redundant pair are generated by bounds checking.
+// For example for the code:
+//
+// a[i] = 4
+// foo(a[i])
+//
+// The compiler will generate the following code:
+//
+// if i >= len(a) {
+// panic("not in bounds")
+// }
+// a[i] = 4
+// if i >= len(a) {
+// panic("not in bounds")
+// }
+// foo(a[i])
+//
+// The second comparison i >= len(a) is clearly redundant because if the
+// else branch of the first comparison is executed, we already know that i < len(a).
+// The code for the second panic can be removed.
+//
+// prove works by finding contradictions and trimming branches whose
+// conditions are unsatisfiable given the branches leading up to them.
+// It tracks a "fact table" of branch conditions. For each branching
+// block, it asserts the branch conditions that uniquely dominate that
+// block, and then separately asserts the block's branch condition and
+// its negation. If either leads to a contradiction, it can trim that
+// successor.
+func prove(f *Func) {
+ ft := newFactsTable(f)
+ ft.checkpoint()
+
+ var lensVars map[*Block][]*Value
+
+ // Find length and capacity ops.
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ if v.Uses == 0 {
+ // We don't care about dead values.
+ // (There can be some that are CSEd but not removed yet.)
+ continue
+ }
+ switch v.Op {
+ case OpStringLen:
+ ft.update(b, v, ft.zero, signed, gt|eq)
+ case OpSliceLen:
+ if ft.lens == nil {
+ ft.lens = map[ID]*Value{}
+ }
+ // Set all len Values for the same slice as equal in the poset.
+ // The poset handles transitive relations, so Values related to
+ // any OpSliceLen for this slice will be correctly related to others.
+ if l, ok := ft.lens[v.Args[0].ID]; ok {
+ ft.update(b, v, l, signed, eq)
+ } else {
+ ft.lens[v.Args[0].ID] = v
+ }
+ ft.update(b, v, ft.zero, signed, gt|eq)
+ if v.Args[0].Op == OpSliceMake {
+ if lensVars == nil {
+ lensVars = make(map[*Block][]*Value)
+ }
+ lensVars[b] = append(lensVars[b], v)
+ }
+ case OpSliceCap:
+ if ft.caps == nil {
+ ft.caps = map[ID]*Value{}
+ }
+ // Same as case OpSliceLen above, but for slice cap.
+ if c, ok := ft.caps[v.Args[0].ID]; ok {
+ ft.update(b, v, c, signed, eq)
+ } else {
+ ft.caps[v.Args[0].ID] = v
+ }
+ ft.update(b, v, ft.zero, signed, gt|eq)
+ if v.Args[0].Op == OpSliceMake {
+ if lensVars == nil {
+ lensVars = make(map[*Block][]*Value)
+ }
+ lensVars[b] = append(lensVars[b], v)
+ }
+ }
+ }
+ }
+
+ // Find induction variables. Currently, findIndVars
+ // is limited to one induction variable per block.
+ var indVars map[*Block]indVar
+ for _, v := range findIndVar(f) {
+ if indVars == nil {
+ indVars = make(map[*Block]indVar)
+ }
+ indVars[v.entry] = v
+ }
+
+ // current node state
+ type walkState int
+ const (
+ descend walkState = iota
+ simplify
+ )
+ // work maintains the DFS stack.
+ type bp struct {
+ block *Block // current handled block
+ state walkState // what's to do
+ }
+ work := make([]bp, 0, 256)
+ work = append(work, bp{
+ block: f.Entry,
+ state: descend,
+ })
+
+ idom := f.Idom()
+ sdom := f.Sdom()
+
+ // DFS on the dominator tree.
+ //
+ // For efficiency, we consider only the dominator tree rather
+ // than the entire flow graph. On the way down, we consider
+ // incoming branches and accumulate conditions that uniquely
+ // dominate the current block. If we discover a contradiction,
+ // we can eliminate the entire block and all of its children.
+ // On the way back up, we consider outgoing branches that
+ // haven't already been considered. This way we consider each
+ // branch condition only once.
+ for len(work) > 0 {
+ node := work[len(work)-1]
+ work = work[:len(work)-1]
+ parent := idom[node.block.ID]
+ branch := getBranch(sdom, parent, node.block)
+
+ switch node.state {
+ case descend:
+ ft.checkpoint()
+
+ // Entering the block, add the block-depending facts that we collected
+ // at the beginning: induction variables and lens/caps of slices.
+ if iv, ok := indVars[node.block]; ok {
+ addIndVarRestrictions(ft, parent, iv)
+ }
+ if lens, ok := lensVars[node.block]; ok {
+ for _, v := range lens {
+ switch v.Op {
+ case OpSliceLen:
+ ft.update(node.block, v, v.Args[0].Args[1], signed, eq)
+ case OpSliceCap:
+ ft.update(node.block, v, v.Args[0].Args[2], signed, eq)
+ }
+ }
+ }
+
+ if branch != unknown {
+ addBranchRestrictions(ft, parent, branch)
+ if ft.unsat {
+ // node.block is unreachable.
+ // Remove it and don't visit
+ // its children.
+ removeBranch(parent, branch)
+ ft.restore()
+ break
+ }
+ // Otherwise, we can now commit to
+ // taking this branch. We'll restore
+ // ft when we unwind.
+ }
+
+ // Add inductive facts for phis in this block.
+ addLocalInductiveFacts(ft, node.block)
+
+ work = append(work, bp{
+ block: node.block,
+ state: simplify,
+ })
+ for s := sdom.Child(node.block); s != nil; s = sdom.Sibling(s) {
+ work = append(work, bp{
+ block: s,
+ state: descend,
+ })
+ }
+
+ case simplify:
+ simplifyBlock(sdom, ft, node.block)
+ ft.restore()
+ }
+ }
+
+ ft.restore()
+
+ ft.cleanup(f)
+}
+
+// getBranch returns the range restrictions added by p
+// when reaching b. p is the immediate dominator of b.
+func getBranch(sdom SparseTree, p *Block, b *Block) branch {
+ if p == nil || p.Kind != BlockIf {
+ return unknown
+ }
+ // If p and p.Succs[0] are dominators it means that every path
+ // from entry to b passes through p and p.Succs[0]. We care that
+ // no path from entry to b passes through p.Succs[1]. If p.Succs[0]
+ // has one predecessor then (apart from the degenerate case),
+ // there is no path from entry that can reach b through p.Succs[1].
+ // TODO: how about p->yes->b->yes, i.e. a loop in yes.
+ if sdom.IsAncestorEq(p.Succs[0].b, b) && len(p.Succs[0].b.Preds) == 1 {
+ return positive
+ }
+ if sdom.IsAncestorEq(p.Succs[1].b, b) && len(p.Succs[1].b.Preds) == 1 {
+ return negative
+ }
+ return unknown
+}
+
+// addIndVarRestrictions updates the factsTables ft with the facts
+// learned from the induction variable indVar which drives the loop
+// starting in Block b.
+func addIndVarRestrictions(ft *factsTable, b *Block, iv indVar) {
+ d := signed
+ if ft.isNonNegative(iv.min) && ft.isNonNegative(iv.max) {
+ d |= unsigned
+ }
+
+ if iv.flags&indVarMinExc == 0 {
+ addRestrictions(b, ft, d, iv.min, iv.ind, lt|eq)
+ } else {
+ addRestrictions(b, ft, d, iv.min, iv.ind, lt)
+ }
+
+ if iv.flags&indVarMaxInc == 0 {
+ addRestrictions(b, ft, d, iv.ind, iv.max, lt)
+ } else {
+ addRestrictions(b, ft, d, iv.ind, iv.max, lt|eq)
+ }
+}
+
+// addBranchRestrictions updates the factsTables ft with the facts learned when
+// branching from Block b in direction br.
+func addBranchRestrictions(ft *factsTable, b *Block, br branch) {
+ c := b.Controls[0]
+ switch br {
+ case negative:
+ addRestrictions(b, ft, boolean, nil, c, eq)
+ case positive:
+ addRestrictions(b, ft, boolean, nil, c, lt|gt)
+ default:
+ panic("unknown branch")
+ }
+ if tr, has := domainRelationTable[c.Op]; has {
+ // When we branched from parent we learned a new set of
+ // restrictions. Update the factsTable accordingly.
+ d := tr.d
+ if d == signed && ft.isNonNegative(c.Args[0]) && ft.isNonNegative(c.Args[1]) {
+ d |= unsigned
+ }
+ switch c.Op {
+ case OpIsInBounds, OpIsSliceInBounds:
+ // 0 <= a0 < a1 (or 0 <= a0 <= a1)
+ //
+ // On the positive branch, we learn:
+ // signed: 0 <= a0 < a1 (or 0 <= a0 <= a1)
+ // unsigned: a0 < a1 (or a0 <= a1)
+ //
+ // On the negative branch, we learn (0 > a0 ||
+ // a0 >= a1). In the unsigned domain, this is
+ // simply a0 >= a1 (which is the reverse of the
+ // positive branch, so nothing surprising).
+ // But in the signed domain, we can't express the ||
+ // condition, so check if a0 is non-negative instead,
+ // to be able to learn something.
+ switch br {
+ case negative:
+ d = unsigned
+ if ft.isNonNegative(c.Args[0]) {
+ d |= signed
+ }
+ addRestrictions(b, ft, d, c.Args[0], c.Args[1], tr.r^(lt|gt|eq))
+ case positive:
+ addRestrictions(b, ft, signed, ft.zero, c.Args[0], lt|eq)
+ addRestrictions(b, ft, d, c.Args[0], c.Args[1], tr.r)
+ }
+ default:
+ switch br {
+ case negative:
+ addRestrictions(b, ft, d, c.Args[0], c.Args[1], tr.r^(lt|gt|eq))
+ case positive:
+ addRestrictions(b, ft, d, c.Args[0], c.Args[1], tr.r)
+ }
+ }
+
+ }
+}
+
+// addRestrictions updates restrictions from the immediate
+// dominating block (p) using r.
+func addRestrictions(parent *Block, ft *factsTable, t domain, v, w *Value, r relation) {
+ if t == 0 {
+ // Trivial case: nothing to do.
+ // Shoult not happen, but just in case.
+ return
+ }
+ for i := domain(1); i <= t; i <<= 1 {
+ if t&i == 0 {
+ continue
+ }
+ ft.update(parent, v, w, i, r)
+ }
+}
+
+// addLocalInductiveFacts adds inductive facts when visiting b, where
+// b is a join point in a loop. In contrast with findIndVar, this
+// depends on facts established for b, which is why it happens when
+// visiting b. addLocalInductiveFacts specifically targets the pattern
+// created by OFORUNTIL, which isn't detected by findIndVar.
+//
+// TODO: It would be nice to combine this with findIndVar.
+func addLocalInductiveFacts(ft *factsTable, b *Block) {
+ // This looks for a specific pattern of induction:
+ //
+ // 1. i1 = OpPhi(min, i2) in b
+ // 2. i2 = i1 + 1
+ // 3. i2 < max at exit from b.Preds[1]
+ // 4. min < max
+ //
+ // If all of these conditions are true, then i1 < max and i1 >= min.
+
+ // To ensure this is a loop header node.
+ if len(b.Preds) != 2 {
+ return
+ }
+
+ for _, i1 := range b.Values {
+ if i1.Op != OpPhi {
+ continue
+ }
+
+ // Check for conditions 1 and 2. This is easy to do
+ // and will throw out most phis.
+ min, i2 := i1.Args[0], i1.Args[1]
+ if i1q, delta := isConstDelta(i2); i1q != i1 || delta != 1 {
+ continue
+ }
+
+ // Try to prove condition 3. We can't just query the
+ // fact table for this because we don't know what the
+ // facts of b.Preds[1] are (in general, b.Preds[1] is
+ // a loop-back edge, so we haven't even been there
+ // yet). As a conservative approximation, we look for
+ // this condition in the predecessor chain until we
+ // hit a join point.
+ uniquePred := func(b *Block) *Block {
+ if len(b.Preds) == 1 {
+ return b.Preds[0].b
+ }
+ return nil
+ }
+ pred, child := b.Preds[1].b, b
+ for ; pred != nil; pred, child = uniquePred(pred), pred {
+ if pred.Kind != BlockIf {
+ continue
+ }
+ control := pred.Controls[0]
+
+ br := unknown
+ if pred.Succs[0].b == child {
+ br = positive
+ }
+ if pred.Succs[1].b == child {
+ if br != unknown {
+ continue
+ }
+ br = negative
+ }
+ if br == unknown {
+ continue
+ }
+
+ tr, has := domainRelationTable[control.Op]
+ if !has {
+ continue
+ }
+ r := tr.r
+ if br == negative {
+ // Negative branch taken to reach b.
+ // Complement the relations.
+ r = (lt | eq | gt) ^ r
+ }
+
+ // Check for i2 < max or max > i2.
+ var max *Value
+ if r == lt && control.Args[0] == i2 {
+ max = control.Args[1]
+ } else if r == gt && control.Args[1] == i2 {
+ max = control.Args[0]
+ } else {
+ continue
+ }
+
+ // Check condition 4 now that we have a
+ // candidate max. For this we can query the
+ // fact table. We "prove" min < max by showing
+ // that min >= max is unsat. (This may simply
+ // compare two constants; that's fine.)
+ ft.checkpoint()
+ ft.update(b, min, max, tr.d, gt|eq)
+ proved := ft.unsat
+ ft.restore()
+
+ if proved {
+ // We know that min <= i1 < max.
+ if b.Func.pass.debug > 0 {
+ printIndVar(b, i1, min, max, 1, 0)
+ }
+ ft.update(b, min, i1, tr.d, lt|eq)
+ ft.update(b, i1, max, tr.d, lt)
+ }
+ }
+ }
+}
+
+var ctzNonZeroOp = map[Op]Op{OpCtz8: OpCtz8NonZero, OpCtz16: OpCtz16NonZero, OpCtz32: OpCtz32NonZero, OpCtz64: OpCtz64NonZero}
+var mostNegativeDividend = map[Op]int64{
+ OpDiv16: -1 << 15,
+ OpMod16: -1 << 15,
+ OpDiv32: -1 << 31,
+ OpMod32: -1 << 31,
+ OpDiv64: -1 << 63,
+ OpMod64: -1 << 63}
+
+// simplifyBlock simplifies some constant values in b and evaluates
+// branches to non-uniquely dominated successors of b.
+func simplifyBlock(sdom SparseTree, ft *factsTable, b *Block) {
+ for _, v := range b.Values {
+ switch v.Op {
+ case OpSlicemask:
+ // Replace OpSlicemask operations in b with constants where possible.
+ x, delta := isConstDelta(v.Args[0])
+ if x == nil {
+ continue
+ }
+ // slicemask(x + y)
+ // if x is larger than -y (y is negative), then slicemask is -1.
+ lim, ok := ft.limits[x.ID]
+ if !ok {
+ continue
+ }
+ if lim.umin > uint64(-delta) {
+ if v.Args[0].Op == OpAdd64 {
+ v.reset(OpConst64)
+ } else {
+ v.reset(OpConst32)
+ }
+ if b.Func.pass.debug > 0 {
+ b.Func.Warnl(v.Pos, "Proved slicemask not needed")
+ }
+ v.AuxInt = -1
+ }
+ case OpCtz8, OpCtz16, OpCtz32, OpCtz64:
+ // On some architectures, notably amd64, we can generate much better
+ // code for CtzNN if we know that the argument is non-zero.
+ // Capture that information here for use in arch-specific optimizations.
+ x := v.Args[0]
+ lim, ok := ft.limits[x.ID]
+ if !ok {
+ continue
+ }
+ if lim.umin > 0 || lim.min > 0 || lim.max < 0 {
+ if b.Func.pass.debug > 0 {
+ b.Func.Warnl(v.Pos, "Proved %v non-zero", v.Op)
+ }
+ v.Op = ctzNonZeroOp[v.Op]
+ }
+ case OpRsh8x8, OpRsh8x16, OpRsh8x32, OpRsh8x64,
+ OpRsh16x8, OpRsh16x16, OpRsh16x32, OpRsh16x64,
+ OpRsh32x8, OpRsh32x16, OpRsh32x32, OpRsh32x64,
+ OpRsh64x8, OpRsh64x16, OpRsh64x32, OpRsh64x64:
+ // Check whether, for a >> b, we know that a is non-negative
+ // and b is all of a's bits except the MSB. If so, a is shifted to zero.
+ bits := 8 * v.Type.Size()
+ if v.Args[1].isGenericIntConst() && v.Args[1].AuxInt >= bits-1 && ft.isNonNegative(v.Args[0]) {
+ if b.Func.pass.debug > 0 {
+ b.Func.Warnl(v.Pos, "Proved %v shifts to zero", v.Op)
+ }
+ switch bits {
+ case 64:
+ v.reset(OpConst64)
+ case 32:
+ v.reset(OpConst32)
+ case 16:
+ v.reset(OpConst16)
+ case 8:
+ v.reset(OpConst8)
+ default:
+ panic("unexpected integer size")
+ }
+ v.AuxInt = 0
+ continue // Be sure not to fallthrough - this is no longer OpRsh.
+ }
+ // If the Rsh hasn't been replaced with 0, still check if it is bounded.
+ fallthrough
+ case OpLsh8x8, OpLsh8x16, OpLsh8x32, OpLsh8x64,
+ OpLsh16x8, OpLsh16x16, OpLsh16x32, OpLsh16x64,
+ OpLsh32x8, OpLsh32x16, OpLsh32x32, OpLsh32x64,
+ OpLsh64x8, OpLsh64x16, OpLsh64x32, OpLsh64x64,
+ OpRsh8Ux8, OpRsh8Ux16, OpRsh8Ux32, OpRsh8Ux64,
+ OpRsh16Ux8, OpRsh16Ux16, OpRsh16Ux32, OpRsh16Ux64,
+ OpRsh32Ux8, OpRsh32Ux16, OpRsh32Ux32, OpRsh32Ux64,
+ OpRsh64Ux8, OpRsh64Ux16, OpRsh64Ux32, OpRsh64Ux64:
+ // Check whether, for a << b, we know that b
+ // is strictly less than the number of bits in a.
+ by := v.Args[1]
+ lim, ok := ft.limits[by.ID]
+ if !ok {
+ continue
+ }
+ bits := 8 * v.Args[0].Type.Size()
+ if lim.umax < uint64(bits) || (lim.max < bits && ft.isNonNegative(by)) {
+ v.AuxInt = 1 // see shiftIsBounded
+ if b.Func.pass.debug > 0 {
+ b.Func.Warnl(v.Pos, "Proved %v bounded", v.Op)
+ }
+ }
+ case OpDiv16, OpDiv32, OpDiv64, OpMod16, OpMod32, OpMod64:
+ // On amd64 and 386 fix-up code can be avoided if we know
+ // the divisor is not -1 or the dividend > MinIntNN.
+ // Don't modify AuxInt on other architectures,
+ // as that can interfere with CSE.
+ // TODO: add other architectures?
+ if b.Func.Config.arch != "386" && b.Func.Config.arch != "amd64" {
+ break
+ }
+ divr := v.Args[1]
+ divrLim, divrLimok := ft.limits[divr.ID]
+ divd := v.Args[0]
+ divdLim, divdLimok := ft.limits[divd.ID]
+ if (divrLimok && (divrLim.max < -1 || divrLim.min > -1)) ||
+ (divdLimok && divdLim.min > mostNegativeDividend[v.Op]) {
+ // See DivisionNeedsFixUp in rewrite.go.
+ // v.AuxInt = 1 means we have proved both that the divisor is not -1
+ // and that the dividend is not the most negative integer,
+ // so we do not need to add fix-up code.
+ v.AuxInt = 1
+ if b.Func.pass.debug > 0 {
+ b.Func.Warnl(v.Pos, "Proved %v does not need fix-up", v.Op)
+ }
+ }
+ }
+ }
+
+ if b.Kind != BlockIf {
+ return
+ }
+
+ // Consider outgoing edges from this block.
+ parent := b
+ for i, branch := range [...]branch{positive, negative} {
+ child := parent.Succs[i].b
+ if getBranch(sdom, parent, child) != unknown {
+ // For edges to uniquely dominated blocks, we
+ // already did this when we visited the child.
+ continue
+ }
+ // For edges to other blocks, this can trim a branch
+ // even if we couldn't get rid of the child itself.
+ ft.checkpoint()
+ addBranchRestrictions(ft, parent, branch)
+ unsat := ft.unsat
+ ft.restore()
+ if unsat {
+ // This branch is impossible, so remove it
+ // from the block.
+ removeBranch(parent, branch)
+ // No point in considering the other branch.
+ // (It *is* possible for both to be
+ // unsatisfiable since the fact table is
+ // incomplete. We could turn this into a
+ // BlockExit, but it doesn't seem worth it.)
+ break
+ }
+ }
+}
+
+func removeBranch(b *Block, branch branch) {
+ c := b.Controls[0]
+ if b.Func.pass.debug > 0 {
+ verb := "Proved"
+ if branch == positive {
+ verb = "Disproved"
+ }
+ if b.Func.pass.debug > 1 {
+ b.Func.Warnl(b.Pos, "%s %s (%s)", verb, c.Op, c)
+ } else {
+ b.Func.Warnl(b.Pos, "%s %s", verb, c.Op)
+ }
+ }
+ if c != nil && c.Pos.IsStmt() == src.PosIsStmt && c.Pos.SameFileAndLine(b.Pos) {
+ // attempt to preserve statement marker.
+ b.Pos = b.Pos.WithIsStmt()
+ }
+ b.Kind = BlockFirst
+ b.ResetControls()
+ if branch == positive {
+ b.swapSuccessors()
+ }
+}
+
+// isNonNegative reports whether v is known to be greater or equal to zero.
+func isNonNegative(v *Value) bool {
+ if !v.Type.IsInteger() {
+ v.Fatalf("isNonNegative bad type: %v", v.Type)
+ }
+ // TODO: return true if !v.Type.IsSigned()
+ // SSA isn't type-safe enough to do that now (issue 37753).
+ // The checks below depend only on the pattern of bits.
+
+ switch v.Op {
+ case OpConst64:
+ return v.AuxInt >= 0
+
+ case OpConst32:
+ return int32(v.AuxInt) >= 0
+
+ case OpConst16:
+ return int16(v.AuxInt) >= 0
+
+ case OpConst8:
+ return int8(v.AuxInt) >= 0
+
+ case OpStringLen, OpSliceLen, OpSliceCap,
+ OpZeroExt8to64, OpZeroExt16to64, OpZeroExt32to64,
+ OpZeroExt8to32, OpZeroExt16to32, OpZeroExt8to16,
+ OpCtz64, OpCtz32, OpCtz16, OpCtz8:
+ return true
+
+ case OpRsh64Ux64, OpRsh32Ux64:
+ by := v.Args[1]
+ return by.Op == OpConst64 && by.AuxInt > 0
+
+ case OpRsh64x64, OpRsh32x64, OpRsh8x64, OpRsh16x64, OpRsh32x32, OpRsh64x32,
+ OpSignExt32to64, OpSignExt16to64, OpSignExt8to64, OpSignExt16to32, OpSignExt8to32:
+ return isNonNegative(v.Args[0])
+
+ case OpAnd64, OpAnd32, OpAnd16, OpAnd8:
+ return isNonNegative(v.Args[0]) || isNonNegative(v.Args[1])
+
+ case OpMod64, OpMod32, OpMod16, OpMod8,
+ OpDiv64, OpDiv32, OpDiv16, OpDiv8,
+ OpOr64, OpOr32, OpOr16, OpOr8,
+ OpXor64, OpXor32, OpXor16, OpXor8:
+ return isNonNegative(v.Args[0]) && isNonNegative(v.Args[1])
+
+ // We could handle OpPhi here, but the improvements from doing
+ // so are very minor, and it is neither simple nor cheap.
+ }
+ return false
+}
+
+// isConstDelta returns non-nil if v is equivalent to w+delta (signed).
+func isConstDelta(v *Value) (w *Value, delta int64) {
+ cop := OpConst64
+ switch v.Op {
+ case OpAdd32, OpSub32:
+ cop = OpConst32
+ }
+ switch v.Op {
+ case OpAdd64, OpAdd32:
+ if v.Args[0].Op == cop {
+ return v.Args[1], v.Args[0].AuxInt
+ }
+ if v.Args[1].Op == cop {
+ return v.Args[0], v.Args[1].AuxInt
+ }
+ case OpSub64, OpSub32:
+ if v.Args[1].Op == cop {
+ aux := v.Args[1].AuxInt
+ if aux != -aux { // Overflow; too bad
+ return v.Args[0], -aux
+ }
+ }
+ }
+ return nil, 0
+}
+
+// isCleanExt reports whether v is the result of a value-preserving
+// sign or zero extension
+func isCleanExt(v *Value) bool {
+ switch v.Op {
+ case OpSignExt8to16, OpSignExt8to32, OpSignExt8to64,
+ OpSignExt16to32, OpSignExt16to64, OpSignExt32to64:
+ // signed -> signed is the only value-preserving sign extension
+ return v.Args[0].Type.IsSigned() && v.Type.IsSigned()
+
+ case OpZeroExt8to16, OpZeroExt8to32, OpZeroExt8to64,
+ OpZeroExt16to32, OpZeroExt16to64, OpZeroExt32to64:
+ // unsigned -> signed/unsigned are value-preserving zero extensions
+ return !v.Args[0].Type.IsSigned()
+ }
+ return false
+}
diff --git a/src/cmd/compile/internal/ssa/regalloc.go b/src/cmd/compile/internal/ssa/regalloc.go
new file mode 100644
index 0000000..64792d0
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/regalloc.go
@@ -0,0 +1,2829 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Register allocation.
+//
+// We use a version of a linear scan register allocator. We treat the
+// whole function as a single long basic block and run through
+// it using a greedy register allocator. Then all merge edges
+// (those targeting a block with len(Preds)>1) are processed to
+// shuffle data into the place that the target of the edge expects.
+//
+// The greedy allocator moves values into registers just before they
+// are used, spills registers only when necessary, and spills the
+// value whose next use is farthest in the future.
+//
+// The register allocator requires that a block is not scheduled until
+// at least one of its predecessors have been scheduled. The most recent
+// such predecessor provides the starting register state for a block.
+//
+// It also requires that there are no critical edges (critical =
+// comes from a block with >1 successor and goes to a block with >1
+// predecessor). This makes it easy to add fixup code on merge edges -
+// the source of a merge edge has only one successor, so we can add
+// fixup code to the end of that block.
+
+// Spilling
+//
+// During the normal course of the allocator, we might throw a still-live
+// value out of all registers. When that value is subsequently used, we must
+// load it from a slot on the stack. We must also issue an instruction to
+// initialize that stack location with a copy of v.
+//
+// pre-regalloc:
+// (1) v = Op ...
+// (2) x = Op ...
+// (3) ... = Op v ...
+//
+// post-regalloc:
+// (1) v = Op ... : AX // computes v, store result in AX
+// s = StoreReg v // spill v to a stack slot
+// (2) x = Op ... : AX // some other op uses AX
+// c = LoadReg s : CX // restore v from stack slot
+// (3) ... = Op c ... // use the restored value
+//
+// Allocation occurs normally until we reach (3) and we realize we have
+// a use of v and it isn't in any register. At that point, we allocate
+// a spill (a StoreReg) for v. We can't determine the correct place for
+// the spill at this point, so we allocate the spill as blockless initially.
+// The restore is then generated to load v back into a register so it can
+// be used. Subsequent uses of v will use the restored value c instead.
+//
+// What remains is the question of where to schedule the spill.
+// During allocation, we keep track of the dominator of all restores of v.
+// The spill of v must dominate that block. The spill must also be issued at
+// a point where v is still in a register.
+//
+// To find the right place, start at b, the block which dominates all restores.
+// - If b is v.Block, then issue the spill right after v.
+// It is known to be in a register at that point, and dominates any restores.
+// - Otherwise, if v is in a register at the start of b,
+// put the spill of v at the start of b.
+// - Otherwise, set b = immediate dominator of b, and repeat.
+//
+// Phi values are special, as always. We define two kinds of phis, those
+// where the merge happens in a register (a "register" phi) and those where
+// the merge happens in a stack location (a "stack" phi).
+//
+// A register phi must have the phi and all of its inputs allocated to the
+// same register. Register phis are spilled similarly to regular ops.
+//
+// A stack phi must have the phi and all of its inputs allocated to the same
+// stack location. Stack phis start out life already spilled - each phi
+// input must be a store (using StoreReg) at the end of the corresponding
+// predecessor block.
+// b1: y = ... : AX b2: z = ... : BX
+// y2 = StoreReg y z2 = StoreReg z
+// goto b3 goto b3
+// b3: x = phi(y2, z2)
+// The stack allocator knows that StoreReg args of stack-allocated phis
+// must be allocated to the same stack slot as the phi that uses them.
+// x is now a spilled value and a restore must appear before its first use.
+
+// TODO
+
+// Use an affinity graph to mark two values which should use the
+// same register. This affinity graph will be used to prefer certain
+// registers for allocation. This affinity helps eliminate moves that
+// are required for phi implementations and helps generate allocations
+// for 2-register architectures.
+
+// Note: regalloc generates a not-quite-SSA output. If we have:
+//
+// b1: x = ... : AX
+// x2 = StoreReg x
+// ... AX gets reused for something else ...
+// if ... goto b3 else b4
+//
+// b3: x3 = LoadReg x2 : BX b4: x4 = LoadReg x2 : CX
+// ... use x3 ... ... use x4 ...
+//
+// b2: ... use x3 ...
+//
+// If b3 is the primary predecessor of b2, then we use x3 in b2 and
+// add a x4:CX->BX copy at the end of b4.
+// But the definition of x3 doesn't dominate b2. We should really
+// insert an extra phi at the start of b2 (x5=phi(x3,x4):BX) to keep
+// SSA form. For now, we ignore this problem as remaining in strict
+// SSA form isn't needed after regalloc. We'll just leave the use
+// of x3 not dominated by the definition of x3, and the CX->BX copy
+// will have no use (so don't run deadcode after regalloc!).
+// TODO: maybe we should introduce these extra phis?
+
+package ssa
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+ "cmd/internal/sys"
+ "fmt"
+ "internal/buildcfg"
+ "math/bits"
+ "unsafe"
+)
+
+const (
+ moveSpills = iota
+ logSpills
+ regDebug
+ stackDebug
+)
+
+// distance is a measure of how far into the future values are used.
+// distance is measured in units of instructions.
+const (
+ likelyDistance = 1
+ normalDistance = 10
+ unlikelyDistance = 100
+)
+
+// regalloc performs register allocation on f. It sets f.RegAlloc
+// to the resulting allocation.
+func regalloc(f *Func) {
+ var s regAllocState
+ s.init(f)
+ s.regalloc(f)
+}
+
+type register uint8
+
+const noRegister register = 255
+
+// For bulk initializing
+var noRegisters [32]register = [32]register{
+ noRegister, noRegister, noRegister, noRegister, noRegister, noRegister, noRegister, noRegister,
+ noRegister, noRegister, noRegister, noRegister, noRegister, noRegister, noRegister, noRegister,
+ noRegister, noRegister, noRegister, noRegister, noRegister, noRegister, noRegister, noRegister,
+ noRegister, noRegister, noRegister, noRegister, noRegister, noRegister, noRegister, noRegister,
+}
+
+// A regMask encodes a set of machine registers.
+// TODO: regMask -> regSet?
+type regMask uint64
+
+func (m regMask) String() string {
+ s := ""
+ for r := register(0); m != 0; r++ {
+ if m>>r&1 == 0 {
+ continue
+ }
+ m &^= regMask(1) << r
+ if s != "" {
+ s += " "
+ }
+ s += fmt.Sprintf("r%d", r)
+ }
+ return s
+}
+
+func (s *regAllocState) RegMaskString(m regMask) string {
+ str := ""
+ for r := register(0); m != 0; r++ {
+ if m>>r&1 == 0 {
+ continue
+ }
+ m &^= regMask(1) << r
+ if str != "" {
+ str += " "
+ }
+ str += s.registers[r].String()
+ }
+ return str
+}
+
+// countRegs returns the number of set bits in the register mask.
+func countRegs(r regMask) int {
+ return bits.OnesCount64(uint64(r))
+}
+
+// pickReg picks an arbitrary register from the register mask.
+func pickReg(r regMask) register {
+ if r == 0 {
+ panic("can't pick a register from an empty set")
+ }
+ // pick the lowest one
+ return register(bits.TrailingZeros64(uint64(r)))
+}
+
+type use struct {
+ dist int32 // distance from start of the block to a use of a value
+ pos src.XPos // source position of the use
+ next *use // linked list of uses of a value in nondecreasing dist order
+}
+
+// A valState records the register allocation state for a (pre-regalloc) value.
+type valState struct {
+ regs regMask // the set of registers holding a Value (usually just one)
+ uses *use // list of uses in this block
+ spill *Value // spilled copy of the Value (if any)
+ restoreMin int32 // minimum of all restores' blocks' sdom.entry
+ restoreMax int32 // maximum of all restores' blocks' sdom.exit
+ needReg bool // cached value of !v.Type.IsMemory() && !v.Type.IsVoid() && !.v.Type.IsFlags()
+ rematerializeable bool // cached value of v.rematerializeable()
+}
+
+type regState struct {
+ v *Value // Original (preregalloc) Value stored in this register.
+ c *Value // A Value equal to v which is currently in a register. Might be v or a copy of it.
+ // If a register is unused, v==c==nil
+}
+
+type regAllocState struct {
+ f *Func
+
+ sdom SparseTree
+ registers []Register
+ numRegs register
+ SPReg register
+ SBReg register
+ GReg register
+ allocatable regMask
+
+ // live values at the end of each block. live[b.ID] is a list of value IDs
+ // which are live at the end of b, together with a count of how many instructions
+ // forward to the next use.
+ live [][]liveInfo
+ // desired register assignments at the end of each block.
+ // Note that this is a static map computed before allocation occurs. Dynamic
+ // register desires (from partially completed allocations) will trump
+ // this information.
+ desired []desiredState
+
+ // current state of each (preregalloc) Value
+ values []valState
+
+ // ID of SP, SB values
+ sp, sb ID
+
+ // For each Value, map from its value ID back to the
+ // preregalloc Value it was derived from.
+ orig []*Value
+
+ // current state of each register
+ regs []regState
+
+ // registers that contain values which can't be kicked out
+ nospill regMask
+
+ // mask of registers currently in use
+ used regMask
+
+ // mask of registers used in the current instruction
+ tmpused regMask
+
+ // current block we're working on
+ curBlock *Block
+
+ // cache of use records
+ freeUseRecords *use
+
+ // endRegs[blockid] is the register state at the end of each block.
+ // encoded as a set of endReg records.
+ endRegs [][]endReg
+
+ // startRegs[blockid] is the register state at the start of merge blocks.
+ // saved state does not include the state of phi ops in the block.
+ startRegs [][]startReg
+
+ // spillLive[blockid] is the set of live spills at the end of each block
+ spillLive [][]ID
+
+ // a set of copies we generated to move things around, and
+ // whether it is used in shuffle. Unused copies will be deleted.
+ copies map[*Value]bool
+
+ loopnest *loopnest
+
+ // choose a good order in which to visit blocks for allocation purposes.
+ visitOrder []*Block
+
+ // blockOrder[b.ID] corresponds to the index of block b in visitOrder.
+ blockOrder []int32
+
+ // whether to insert instructions that clobber dead registers at call sites
+ doClobber bool
+}
+
+type endReg struct {
+ r register
+ v *Value // pre-regalloc value held in this register (TODO: can we use ID here?)
+ c *Value // cached version of the value
+}
+
+type startReg struct {
+ r register
+ v *Value // pre-regalloc value needed in this register
+ c *Value // cached version of the value
+ pos src.XPos // source position of use of this register
+}
+
+// freeReg frees up register r. Any current user of r is kicked out.
+func (s *regAllocState) freeReg(r register) {
+ v := s.regs[r].v
+ if v == nil {
+ s.f.Fatalf("tried to free an already free register %d\n", r)
+ }
+
+ // Mark r as unused.
+ if s.f.pass.debug > regDebug {
+ fmt.Printf("freeReg %s (dump %s/%s)\n", &s.registers[r], v, s.regs[r].c)
+ }
+ s.regs[r] = regState{}
+ s.values[v.ID].regs &^= regMask(1) << r
+ s.used &^= regMask(1) << r
+}
+
+// freeRegs frees up all registers listed in m.
+func (s *regAllocState) freeRegs(m regMask) {
+ for m&s.used != 0 {
+ s.freeReg(pickReg(m & s.used))
+ }
+}
+
+// clobberRegs inserts instructions that clobber registers listed in m.
+func (s *regAllocState) clobberRegs(m regMask) {
+ m &= s.allocatable & s.f.Config.gpRegMask // only integer register can contain pointers, only clobber them
+ for m != 0 {
+ r := pickReg(m)
+ m &^= 1 << r
+ x := s.curBlock.NewValue0(src.NoXPos, OpClobberReg, types.TypeVoid)
+ s.f.setHome(x, &s.registers[r])
+ }
+}
+
+// setOrig records that c's original value is the same as
+// v's original value.
+func (s *regAllocState) setOrig(c *Value, v *Value) {
+ for int(c.ID) >= len(s.orig) {
+ s.orig = append(s.orig, nil)
+ }
+ if s.orig[c.ID] != nil {
+ s.f.Fatalf("orig value set twice %s %s", c, v)
+ }
+ s.orig[c.ID] = s.orig[v.ID]
+}
+
+// assignReg assigns register r to hold c, a copy of v.
+// r must be unused.
+func (s *regAllocState) assignReg(r register, v *Value, c *Value) {
+ if s.f.pass.debug > regDebug {
+ fmt.Printf("assignReg %s %s/%s\n", &s.registers[r], v, c)
+ }
+ if s.regs[r].v != nil {
+ s.f.Fatalf("tried to assign register %d to %s/%s but it is already used by %s", r, v, c, s.regs[r].v)
+ }
+
+ // Update state.
+ s.regs[r] = regState{v, c}
+ s.values[v.ID].regs |= regMask(1) << r
+ s.used |= regMask(1) << r
+ s.f.setHome(c, &s.registers[r])
+}
+
+// allocReg chooses a register from the set of registers in mask.
+// If there is no unused register, a Value will be kicked out of
+// a register to make room.
+func (s *regAllocState) allocReg(mask regMask, v *Value) register {
+ if v.OnWasmStack {
+ return noRegister
+ }
+
+ mask &= s.allocatable
+ mask &^= s.nospill
+ if mask == 0 {
+ s.f.Fatalf("no register available for %s", v.LongString())
+ }
+
+ // Pick an unused register if one is available.
+ if mask&^s.used != 0 {
+ return pickReg(mask &^ s.used)
+ }
+
+ // Pick a value to spill. Spill the value with the
+ // farthest-in-the-future use.
+ // TODO: Prefer registers with already spilled Values?
+ // TODO: Modify preference using affinity graph.
+ // TODO: if a single value is in multiple registers, spill one of them
+ // before spilling a value in just a single register.
+
+ // Find a register to spill. We spill the register containing the value
+ // whose next use is as far in the future as possible.
+ // https://en.wikipedia.org/wiki/Page_replacement_algorithm#The_theoretically_optimal_page_replacement_algorithm
+ var r register
+ maxuse := int32(-1)
+ for t := register(0); t < s.numRegs; t++ {
+ if mask>>t&1 == 0 {
+ continue
+ }
+ v := s.regs[t].v
+ if n := s.values[v.ID].uses.dist; n > maxuse {
+ // v's next use is farther in the future than any value
+ // we've seen so far. A new best spill candidate.
+ r = t
+ maxuse = n
+ }
+ }
+ if maxuse == -1 {
+ s.f.Fatalf("couldn't find register to spill")
+ }
+
+ if s.f.Config.ctxt.Arch.Arch == sys.ArchWasm {
+ // TODO(neelance): In theory this should never happen, because all wasm registers are equal.
+ // So if there is still a free register, the allocation should have picked that one in the first place instead of
+ // trying to kick some other value out. In practice, this case does happen and it breaks the stack optimization.
+ s.freeReg(r)
+ return r
+ }
+
+ // Try to move it around before kicking out, if there is a free register.
+ // We generate a Copy and record it. It will be deleted if never used.
+ v2 := s.regs[r].v
+ m := s.compatRegs(v2.Type) &^ s.used &^ s.tmpused &^ (regMask(1) << r)
+ if m != 0 && !s.values[v2.ID].rematerializeable && countRegs(s.values[v2.ID].regs) == 1 {
+ r2 := pickReg(m)
+ c := s.curBlock.NewValue1(v2.Pos, OpCopy, v2.Type, s.regs[r].c)
+ s.copies[c] = false
+ if s.f.pass.debug > regDebug {
+ fmt.Printf("copy %s to %s : %s\n", v2, c, &s.registers[r2])
+ }
+ s.setOrig(c, v2)
+ s.assignReg(r2, v2, c)
+ }
+ s.freeReg(r)
+ return r
+}
+
+// makeSpill returns a Value which represents the spilled value of v.
+// b is the block in which the spill is used.
+func (s *regAllocState) makeSpill(v *Value, b *Block) *Value {
+ vi := &s.values[v.ID]
+ if vi.spill != nil {
+ // Final block not known - keep track of subtree where restores reside.
+ vi.restoreMin = min32(vi.restoreMin, s.sdom[b.ID].entry)
+ vi.restoreMax = max32(vi.restoreMax, s.sdom[b.ID].exit)
+ return vi.spill
+ }
+ // Make a spill for v. We don't know where we want
+ // to put it yet, so we leave it blockless for now.
+ spill := s.f.newValueNoBlock(OpStoreReg, v.Type, v.Pos)
+ // We also don't know what the spill's arg will be.
+ // Leave it argless for now.
+ s.setOrig(spill, v)
+ vi.spill = spill
+ vi.restoreMin = s.sdom[b.ID].entry
+ vi.restoreMax = s.sdom[b.ID].exit
+ return spill
+}
+
+// allocValToReg allocates v to a register selected from regMask and
+// returns the register copy of v. Any previous user is kicked out and spilled
+// (if necessary). Load code is added at the current pc. If nospill is set the
+// allocated register is marked nospill so the assignment cannot be
+// undone until the caller allows it by clearing nospill. Returns a
+// *Value which is either v or a copy of v allocated to the chosen register.
+func (s *regAllocState) allocValToReg(v *Value, mask regMask, nospill bool, pos src.XPos) *Value {
+ if s.f.Config.ctxt.Arch.Arch == sys.ArchWasm && v.rematerializeable() {
+ c := v.copyIntoWithXPos(s.curBlock, pos)
+ c.OnWasmStack = true
+ s.setOrig(c, v)
+ return c
+ }
+ if v.OnWasmStack {
+ return v
+ }
+
+ vi := &s.values[v.ID]
+ pos = pos.WithNotStmt()
+ // Check if v is already in a requested register.
+ if mask&vi.regs != 0 {
+ r := pickReg(mask & vi.regs)
+ if s.regs[r].v != v || s.regs[r].c == nil {
+ panic("bad register state")
+ }
+ if nospill {
+ s.nospill |= regMask(1) << r
+ }
+ return s.regs[r].c
+ }
+
+ var r register
+ // If nospill is set, the value is used immediately, so it can live on the WebAssembly stack.
+ onWasmStack := nospill && s.f.Config.ctxt.Arch.Arch == sys.ArchWasm
+ if !onWasmStack {
+ // Allocate a register.
+ r = s.allocReg(mask, v)
+ }
+
+ // Allocate v to the new register.
+ var c *Value
+ if vi.regs != 0 {
+ // Copy from a register that v is already in.
+ r2 := pickReg(vi.regs)
+ if s.regs[r2].v != v {
+ panic("bad register state")
+ }
+ c = s.curBlock.NewValue1(pos, OpCopy, v.Type, s.regs[r2].c)
+ } else if v.rematerializeable() {
+ // Rematerialize instead of loading from the spill location.
+ c = v.copyIntoWithXPos(s.curBlock, pos)
+ } else {
+ // Load v from its spill location.
+ spill := s.makeSpill(v, s.curBlock)
+ if s.f.pass.debug > logSpills {
+ s.f.Warnl(vi.spill.Pos, "load spill for %v from %v", v, spill)
+ }
+ c = s.curBlock.NewValue1(pos, OpLoadReg, v.Type, spill)
+ }
+
+ s.setOrig(c, v)
+
+ if onWasmStack {
+ c.OnWasmStack = true
+ return c
+ }
+
+ s.assignReg(r, v, c)
+ if c.Op == OpLoadReg && s.isGReg(r) {
+ s.f.Fatalf("allocValToReg.OpLoadReg targeting g: " + c.LongString())
+ }
+ if nospill {
+ s.nospill |= regMask(1) << r
+ }
+ return c
+}
+
+// isLeaf reports whether f performs any calls.
+func isLeaf(f *Func) bool {
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ if v.Op.IsCall() && !v.Op.IsTailCall() {
+ // tail call is not counted as it does not save the return PC or need a frame
+ return false
+ }
+ }
+ }
+ return true
+}
+
+func (s *regAllocState) init(f *Func) {
+ s.f = f
+ s.f.RegAlloc = s.f.Cache.locs[:0]
+ s.registers = f.Config.registers
+ if nr := len(s.registers); nr == 0 || nr > int(noRegister) || nr > int(unsafe.Sizeof(regMask(0))*8) {
+ s.f.Fatalf("bad number of registers: %d", nr)
+ } else {
+ s.numRegs = register(nr)
+ }
+ // Locate SP, SB, and g registers.
+ s.SPReg = noRegister
+ s.SBReg = noRegister
+ s.GReg = noRegister
+ for r := register(0); r < s.numRegs; r++ {
+ switch s.registers[r].String() {
+ case "SP":
+ s.SPReg = r
+ case "SB":
+ s.SBReg = r
+ case "g":
+ s.GReg = r
+ }
+ }
+ // Make sure we found all required registers.
+ switch noRegister {
+ case s.SPReg:
+ s.f.Fatalf("no SP register found")
+ case s.SBReg:
+ s.f.Fatalf("no SB register found")
+ case s.GReg:
+ if f.Config.hasGReg {
+ s.f.Fatalf("no g register found")
+ }
+ }
+
+ // Figure out which registers we're allowed to use.
+ s.allocatable = s.f.Config.gpRegMask | s.f.Config.fpRegMask | s.f.Config.specialRegMask
+ s.allocatable &^= 1 << s.SPReg
+ s.allocatable &^= 1 << s.SBReg
+ if s.f.Config.hasGReg {
+ s.allocatable &^= 1 << s.GReg
+ }
+ if buildcfg.FramePointerEnabled && s.f.Config.FPReg >= 0 {
+ s.allocatable &^= 1 << uint(s.f.Config.FPReg)
+ }
+ if s.f.Config.LinkReg != -1 {
+ if isLeaf(f) {
+ // Leaf functions don't save/restore the link register.
+ s.allocatable &^= 1 << uint(s.f.Config.LinkReg)
+ }
+ }
+ if s.f.Config.ctxt.Flag_dynlink {
+ switch s.f.Config.arch {
+ case "386":
+ // nothing to do.
+ // Note that for Flag_shared (position independent code)
+ // we do need to be careful, but that carefulness is hidden
+ // in the rewrite rules so we always have a free register
+ // available for global load/stores. See gen/386.rules (search for Flag_shared).
+ case "amd64":
+ s.allocatable &^= 1 << 15 // R15
+ case "arm":
+ s.allocatable &^= 1 << 9 // R9
+ case "arm64":
+ // nothing to do
+ case "ppc64le": // R2 already reserved.
+ // nothing to do
+ case "riscv64": // X3 (aka GP) and X4 (aka TP) already reserved.
+ // nothing to do
+ case "s390x":
+ s.allocatable &^= 1 << 11 // R11
+ default:
+ s.f.fe.Fatalf(src.NoXPos, "arch %s not implemented", s.f.Config.arch)
+ }
+ }
+
+ // Linear scan register allocation can be influenced by the order in which blocks appear.
+ // Decouple the register allocation order from the generated block order.
+ // This also creates an opportunity for experiments to find a better order.
+ s.visitOrder = layoutRegallocOrder(f)
+
+ // Compute block order. This array allows us to distinguish forward edges
+ // from backward edges and compute how far they go.
+ s.blockOrder = make([]int32, f.NumBlocks())
+ for i, b := range s.visitOrder {
+ s.blockOrder[b.ID] = int32(i)
+ }
+
+ s.regs = make([]regState, s.numRegs)
+ nv := f.NumValues()
+ if cap(s.f.Cache.regallocValues) >= nv {
+ s.f.Cache.regallocValues = s.f.Cache.regallocValues[:nv]
+ } else {
+ s.f.Cache.regallocValues = make([]valState, nv)
+ }
+ s.values = s.f.Cache.regallocValues
+ s.orig = make([]*Value, nv)
+ s.copies = make(map[*Value]bool)
+ for _, b := range s.visitOrder {
+ for _, v := range b.Values {
+ if !v.Type.IsMemory() && !v.Type.IsVoid() && !v.Type.IsFlags() && !v.Type.IsTuple() {
+ s.values[v.ID].needReg = true
+ s.values[v.ID].rematerializeable = v.rematerializeable()
+ s.orig[v.ID] = v
+ }
+ // Note: needReg is false for values returning Tuple types.
+ // Instead, we mark the corresponding Selects as needReg.
+ }
+ }
+ s.computeLive()
+
+ s.endRegs = make([][]endReg, f.NumBlocks())
+ s.startRegs = make([][]startReg, f.NumBlocks())
+ s.spillLive = make([][]ID, f.NumBlocks())
+ s.sdom = f.Sdom()
+
+ // wasm: Mark instructions that can be optimized to have their values only on the WebAssembly stack.
+ if f.Config.ctxt.Arch.Arch == sys.ArchWasm {
+ canLiveOnStack := f.newSparseSet(f.NumValues())
+ defer f.retSparseSet(canLiveOnStack)
+ for _, b := range f.Blocks {
+ // New block. Clear candidate set.
+ canLiveOnStack.clear()
+ for _, c := range b.ControlValues() {
+ if c.Uses == 1 && !opcodeTable[c.Op].generic {
+ canLiveOnStack.add(c.ID)
+ }
+ }
+ // Walking backwards.
+ for i := len(b.Values) - 1; i >= 0; i-- {
+ v := b.Values[i]
+ if canLiveOnStack.contains(v.ID) {
+ v.OnWasmStack = true
+ } else {
+ // Value can not live on stack. Values are not allowed to be reordered, so clear candidate set.
+ canLiveOnStack.clear()
+ }
+ for _, arg := range v.Args {
+ // Value can live on the stack if:
+ // - it is only used once
+ // - it is used in the same basic block
+ // - it is not a "mem" value
+ // - it is a WebAssembly op
+ if arg.Uses == 1 && arg.Block == v.Block && !arg.Type.IsMemory() && !opcodeTable[arg.Op].generic {
+ canLiveOnStack.add(arg.ID)
+ }
+ }
+ }
+ }
+ }
+
+ // The clobberdeadreg experiment inserts code to clobber dead registers
+ // at call sites.
+ // Ignore huge functions to avoid doing too much work.
+ if base.Flag.ClobberDeadReg && len(s.f.Blocks) <= 10000 {
+ // TODO: honor GOCLOBBERDEADHASH, or maybe GOSSAHASH.
+ s.doClobber = true
+ }
+}
+
+// Adds a use record for id at distance dist from the start of the block.
+// All calls to addUse must happen with nonincreasing dist.
+func (s *regAllocState) addUse(id ID, dist int32, pos src.XPos) {
+ r := s.freeUseRecords
+ if r != nil {
+ s.freeUseRecords = r.next
+ } else {
+ r = &use{}
+ }
+ r.dist = dist
+ r.pos = pos
+ r.next = s.values[id].uses
+ s.values[id].uses = r
+ if r.next != nil && dist > r.next.dist {
+ s.f.Fatalf("uses added in wrong order")
+ }
+}
+
+// advanceUses advances the uses of v's args from the state before v to the state after v.
+// Any values which have no more uses are deallocated from registers.
+func (s *regAllocState) advanceUses(v *Value) {
+ for _, a := range v.Args {
+ if !s.values[a.ID].needReg {
+ continue
+ }
+ ai := &s.values[a.ID]
+ r := ai.uses
+ ai.uses = r.next
+ if r.next == nil {
+ // Value is dead, free all registers that hold it.
+ s.freeRegs(ai.regs)
+ }
+ r.next = s.freeUseRecords
+ s.freeUseRecords = r
+ }
+}
+
+// liveAfterCurrentInstruction reports whether v is live after
+// the current instruction is completed. v must be used by the
+// current instruction.
+func (s *regAllocState) liveAfterCurrentInstruction(v *Value) bool {
+ u := s.values[v.ID].uses
+ if u == nil {
+ panic(fmt.Errorf("u is nil, v = %s, s.values[v.ID] = %v", v.LongString(), s.values[v.ID]))
+ }
+ d := u.dist
+ for u != nil && u.dist == d {
+ u = u.next
+ }
+ return u != nil && u.dist > d
+}
+
+// Sets the state of the registers to that encoded in regs.
+func (s *regAllocState) setState(regs []endReg) {
+ s.freeRegs(s.used)
+ for _, x := range regs {
+ s.assignReg(x.r, x.v, x.c)
+ }
+}
+
+// compatRegs returns the set of registers which can store a type t.
+func (s *regAllocState) compatRegs(t *types.Type) regMask {
+ var m regMask
+ if t.IsTuple() || t.IsFlags() {
+ return 0
+ }
+ if t.IsFloat() || t == types.TypeInt128 {
+ if t.Kind() == types.TFLOAT32 && s.f.Config.fp32RegMask != 0 {
+ m = s.f.Config.fp32RegMask
+ } else if t.Kind() == types.TFLOAT64 && s.f.Config.fp64RegMask != 0 {
+ m = s.f.Config.fp64RegMask
+ } else {
+ m = s.f.Config.fpRegMask
+ }
+ } else {
+ m = s.f.Config.gpRegMask
+ }
+ return m & s.allocatable
+}
+
+// regspec returns the regInfo for operation op.
+func (s *regAllocState) regspec(v *Value) regInfo {
+ op := v.Op
+ if op == OpConvert {
+ // OpConvert is a generic op, so it doesn't have a
+ // register set in the static table. It can use any
+ // allocatable integer register.
+ m := s.allocatable & s.f.Config.gpRegMask
+ return regInfo{inputs: []inputInfo{{regs: m}}, outputs: []outputInfo{{regs: m}}}
+ }
+ if op == OpArgIntReg {
+ reg := v.Block.Func.Config.intParamRegs[v.AuxInt8()]
+ return regInfo{outputs: []outputInfo{{regs: 1 << uint(reg)}}}
+ }
+ if op == OpArgFloatReg {
+ reg := v.Block.Func.Config.floatParamRegs[v.AuxInt8()]
+ return regInfo{outputs: []outputInfo{{regs: 1 << uint(reg)}}}
+ }
+ if op.IsCall() {
+ if ac, ok := v.Aux.(*AuxCall); ok && ac.reg != nil {
+ return *ac.Reg(&opcodeTable[op].reg, s.f.Config)
+ }
+ }
+ if op == OpMakeResult && s.f.OwnAux.reg != nil {
+ return *s.f.OwnAux.ResultReg(s.f.Config)
+ }
+ return opcodeTable[op].reg
+}
+
+func (s *regAllocState) isGReg(r register) bool {
+ return s.f.Config.hasGReg && s.GReg == r
+}
+
+func (s *regAllocState) regalloc(f *Func) {
+ regValLiveSet := f.newSparseSet(f.NumValues()) // set of values that may be live in register
+ defer f.retSparseSet(regValLiveSet)
+ var oldSched []*Value
+ var phis []*Value
+ var phiRegs []register
+ var args []*Value
+
+ // Data structure used for computing desired registers.
+ var desired desiredState
+
+ // Desired registers for inputs & outputs for each instruction in the block.
+ type dentry struct {
+ out [4]register // desired output registers
+ in [3][4]register // desired input registers (for inputs 0,1, and 2)
+ }
+ var dinfo []dentry
+
+ if f.Entry != f.Blocks[0] {
+ f.Fatalf("entry block must be first")
+ }
+
+ for _, b := range s.visitOrder {
+ if s.f.pass.debug > regDebug {
+ fmt.Printf("Begin processing block %v\n", b)
+ }
+ s.curBlock = b
+
+ // Initialize regValLiveSet and uses fields for this block.
+ // Walk backwards through the block doing liveness analysis.
+ regValLiveSet.clear()
+ for _, e := range s.live[b.ID] {
+ s.addUse(e.ID, int32(len(b.Values))+e.dist, e.pos) // pseudo-uses from beyond end of block
+ regValLiveSet.add(e.ID)
+ }
+ for _, v := range b.ControlValues() {
+ if s.values[v.ID].needReg {
+ s.addUse(v.ID, int32(len(b.Values)), b.Pos) // pseudo-use by control values
+ regValLiveSet.add(v.ID)
+ }
+ }
+ for i := len(b.Values) - 1; i >= 0; i-- {
+ v := b.Values[i]
+ regValLiveSet.remove(v.ID)
+ if v.Op == OpPhi {
+ // Remove v from the live set, but don't add
+ // any inputs. This is the state the len(b.Preds)>1
+ // case below desires; it wants to process phis specially.
+ continue
+ }
+ if opcodeTable[v.Op].call {
+ // Function call clobbers all the registers but SP and SB.
+ regValLiveSet.clear()
+ if s.sp != 0 && s.values[s.sp].uses != nil {
+ regValLiveSet.add(s.sp)
+ }
+ if s.sb != 0 && s.values[s.sb].uses != nil {
+ regValLiveSet.add(s.sb)
+ }
+ }
+ for _, a := range v.Args {
+ if !s.values[a.ID].needReg {
+ continue
+ }
+ s.addUse(a.ID, int32(i), v.Pos)
+ regValLiveSet.add(a.ID)
+ }
+ }
+ if s.f.pass.debug > regDebug {
+ fmt.Printf("use distances for %s\n", b)
+ for i := range s.values {
+ vi := &s.values[i]
+ u := vi.uses
+ if u == nil {
+ continue
+ }
+ fmt.Printf(" v%d:", i)
+ for u != nil {
+ fmt.Printf(" %d", u.dist)
+ u = u.next
+ }
+ fmt.Println()
+ }
+ }
+
+ // Make a copy of the block schedule so we can generate a new one in place.
+ // We make a separate copy for phis and regular values.
+ nphi := 0
+ for _, v := range b.Values {
+ if v.Op != OpPhi {
+ break
+ }
+ nphi++
+ }
+ phis = append(phis[:0], b.Values[:nphi]...)
+ oldSched = append(oldSched[:0], b.Values[nphi:]...)
+ b.Values = b.Values[:0]
+
+ // Initialize start state of block.
+ if b == f.Entry {
+ // Regalloc state is empty to start.
+ if nphi > 0 {
+ f.Fatalf("phis in entry block")
+ }
+ } else if len(b.Preds) == 1 {
+ // Start regalloc state with the end state of the previous block.
+ s.setState(s.endRegs[b.Preds[0].b.ID])
+ if nphi > 0 {
+ f.Fatalf("phis in single-predecessor block")
+ }
+ // Drop any values which are no longer live.
+ // This may happen because at the end of p, a value may be
+ // live but only used by some other successor of p.
+ for r := register(0); r < s.numRegs; r++ {
+ v := s.regs[r].v
+ if v != nil && !regValLiveSet.contains(v.ID) {
+ s.freeReg(r)
+ }
+ }
+ } else {
+ // This is the complicated case. We have more than one predecessor,
+ // which means we may have Phi ops.
+
+ // Start with the final register state of the predecessor with least spill values.
+ // This is based on the following points:
+ // 1, The less spill value indicates that the register pressure of this path is smaller,
+ // so the values of this block are more likely to be allocated to registers.
+ // 2, Avoid the predecessor that contains the function call, because the predecessor that
+ // contains the function call usually generates a lot of spills and lose the previous
+ // allocation state.
+ // TODO: Improve this part. At least the size of endRegs of the predecessor also has
+ // an impact on the code size and compiler speed. But it is not easy to find a simple
+ // and efficient method that combines multiple factors.
+ idx := -1
+ for i, p := range b.Preds {
+ // If the predecessor has not been visited yet, skip it because its end state
+ // (redRegs and spillLive) has not been computed yet.
+ pb := p.b
+ if s.blockOrder[pb.ID] >= s.blockOrder[b.ID] {
+ continue
+ }
+ if idx == -1 {
+ idx = i
+ continue
+ }
+ pSel := b.Preds[idx].b
+ if len(s.spillLive[pb.ID]) < len(s.spillLive[pSel.ID]) {
+ idx = i
+ } else if len(s.spillLive[pb.ID]) == len(s.spillLive[pSel.ID]) {
+ // Use a bit of likely information. After critical pass, pb and pSel must
+ // be plain blocks, so check edge pb->pb.Preds instead of edge pb->b.
+ // TODO: improve the prediction of the likely predecessor. The following
+ // method is only suitable for the simplest cases. For complex cases,
+ // the prediction may be inaccurate, but this does not affect the
+ // correctness of the program.
+ // According to the layout algorithm, the predecessor with the
+ // smaller blockOrder is the true branch, and the test results show
+ // that it is better to choose the predecessor with a smaller
+ // blockOrder than no choice.
+ if pb.likelyBranch() && !pSel.likelyBranch() || s.blockOrder[pb.ID] < s.blockOrder[pSel.ID] {
+ idx = i
+ }
+ }
+ }
+ if idx < 0 {
+ f.Fatalf("bad visitOrder, no predecessor of %s has been visited before it", b)
+ }
+ p := b.Preds[idx].b
+ s.setState(s.endRegs[p.ID])
+
+ if s.f.pass.debug > regDebug {
+ fmt.Printf("starting merge block %s with end state of %s:\n", b, p)
+ for _, x := range s.endRegs[p.ID] {
+ fmt.Printf(" %s: orig:%s cache:%s\n", &s.registers[x.r], x.v, x.c)
+ }
+ }
+
+ // Decide on registers for phi ops. Use the registers determined
+ // by the primary predecessor if we can.
+ // TODO: pick best of (already processed) predecessors?
+ // Majority vote? Deepest nesting level?
+ phiRegs = phiRegs[:0]
+ var phiUsed regMask
+
+ for _, v := range phis {
+ if !s.values[v.ID].needReg {
+ phiRegs = append(phiRegs, noRegister)
+ continue
+ }
+ a := v.Args[idx]
+ // Some instructions target not-allocatable registers.
+ // They're not suitable for further (phi-function) allocation.
+ m := s.values[a.ID].regs &^ phiUsed & s.allocatable
+ if m != 0 {
+ r := pickReg(m)
+ phiUsed |= regMask(1) << r
+ phiRegs = append(phiRegs, r)
+ } else {
+ phiRegs = append(phiRegs, noRegister)
+ }
+ }
+
+ // Second pass - deallocate all in-register phi inputs.
+ for i, v := range phis {
+ if !s.values[v.ID].needReg {
+ continue
+ }
+ a := v.Args[idx]
+ r := phiRegs[i]
+ if r == noRegister {
+ continue
+ }
+ if regValLiveSet.contains(a.ID) {
+ // Input value is still live (it is used by something other than Phi).
+ // Try to move it around before kicking out, if there is a free register.
+ // We generate a Copy in the predecessor block and record it. It will be
+ // deleted later if never used.
+ //
+ // Pick a free register. At this point some registers used in the predecessor
+ // block may have been deallocated. Those are the ones used for Phis. Exclude
+ // them (and they are not going to be helpful anyway).
+ m := s.compatRegs(a.Type) &^ s.used &^ phiUsed
+ if m != 0 && !s.values[a.ID].rematerializeable && countRegs(s.values[a.ID].regs) == 1 {
+ r2 := pickReg(m)
+ c := p.NewValue1(a.Pos, OpCopy, a.Type, s.regs[r].c)
+ s.copies[c] = false
+ if s.f.pass.debug > regDebug {
+ fmt.Printf("copy %s to %s : %s\n", a, c, &s.registers[r2])
+ }
+ s.setOrig(c, a)
+ s.assignReg(r2, a, c)
+ s.endRegs[p.ID] = append(s.endRegs[p.ID], endReg{r2, a, c})
+ }
+ }
+ s.freeReg(r)
+ }
+
+ // Copy phi ops into new schedule.
+ b.Values = append(b.Values, phis...)
+
+ // Third pass - pick registers for phis whose input
+ // was not in a register in the primary predecessor.
+ for i, v := range phis {
+ if !s.values[v.ID].needReg {
+ continue
+ }
+ if phiRegs[i] != noRegister {
+ continue
+ }
+ m := s.compatRegs(v.Type) &^ phiUsed &^ s.used
+ // If one of the other inputs of v is in a register, and the register is available,
+ // select this register, which can save some unnecessary copies.
+ for i, pe := range b.Preds {
+ if i == idx {
+ continue
+ }
+ ri := noRegister
+ for _, er := range s.endRegs[pe.b.ID] {
+ if er.v == s.orig[v.Args[i].ID] {
+ ri = er.r
+ break
+ }
+ }
+ if ri != noRegister && m>>ri&1 != 0 {
+ m = regMask(1) << ri
+ break
+ }
+ }
+ if m != 0 {
+ r := pickReg(m)
+ phiRegs[i] = r
+ phiUsed |= regMask(1) << r
+ }
+ }
+
+ // Set registers for phis. Add phi spill code.
+ for i, v := range phis {
+ if !s.values[v.ID].needReg {
+ continue
+ }
+ r := phiRegs[i]
+ if r == noRegister {
+ // stack-based phi
+ // Spills will be inserted in all the predecessors below.
+ s.values[v.ID].spill = v // v starts life spilled
+ continue
+ }
+ // register-based phi
+ s.assignReg(r, v, v)
+ }
+
+ // Deallocate any values which are no longer live. Phis are excluded.
+ for r := register(0); r < s.numRegs; r++ {
+ if phiUsed>>r&1 != 0 {
+ continue
+ }
+ v := s.regs[r].v
+ if v != nil && !regValLiveSet.contains(v.ID) {
+ s.freeReg(r)
+ }
+ }
+
+ // Save the starting state for use by merge edges.
+ // We append to a stack allocated variable that we'll
+ // later copy into s.startRegs in one fell swoop, to save
+ // on allocations.
+ regList := make([]startReg, 0, 32)
+ for r := register(0); r < s.numRegs; r++ {
+ v := s.regs[r].v
+ if v == nil {
+ continue
+ }
+ if phiUsed>>r&1 != 0 {
+ // Skip registers that phis used, we'll handle those
+ // specially during merge edge processing.
+ continue
+ }
+ regList = append(regList, startReg{r, v, s.regs[r].c, s.values[v.ID].uses.pos})
+ }
+ s.startRegs[b.ID] = make([]startReg, len(regList))
+ copy(s.startRegs[b.ID], regList)
+
+ if s.f.pass.debug > regDebug {
+ fmt.Printf("after phis\n")
+ for _, x := range s.startRegs[b.ID] {
+ fmt.Printf(" %s: v%d\n", &s.registers[x.r], x.v.ID)
+ }
+ }
+ }
+
+ // Allocate space to record the desired registers for each value.
+ if l := len(oldSched); cap(dinfo) < l {
+ dinfo = make([]dentry, l)
+ } else {
+ dinfo = dinfo[:l]
+ for i := range dinfo {
+ dinfo[i] = dentry{}
+ }
+ }
+
+ // Load static desired register info at the end of the block.
+ desired.copy(&s.desired[b.ID])
+
+ // Check actual assigned registers at the start of the next block(s).
+ // Dynamically assigned registers will trump the static
+ // desired registers computed during liveness analysis.
+ // Note that we do this phase after startRegs is set above, so that
+ // we get the right behavior for a block which branches to itself.
+ for _, e := range b.Succs {
+ succ := e.b
+ // TODO: prioritize likely successor?
+ for _, x := range s.startRegs[succ.ID] {
+ desired.add(x.v.ID, x.r)
+ }
+ // Process phi ops in succ.
+ pidx := e.i
+ for _, v := range succ.Values {
+ if v.Op != OpPhi {
+ break
+ }
+ if !s.values[v.ID].needReg {
+ continue
+ }
+ rp, ok := s.f.getHome(v.ID).(*Register)
+ if !ok {
+ // If v is not assigned a register, pick a register assigned to one of v's inputs.
+ // Hopefully v will get assigned that register later.
+ // If the inputs have allocated register information, add it to desired,
+ // which may reduce spill or copy operations when the register is available.
+ for _, a := range v.Args {
+ rp, ok = s.f.getHome(a.ID).(*Register)
+ if ok {
+ break
+ }
+ }
+ if !ok {
+ continue
+ }
+ }
+ desired.add(v.Args[pidx].ID, register(rp.num))
+ }
+ }
+ // Walk values backwards computing desired register info.
+ // See computeLive for more comments.
+ for i := len(oldSched) - 1; i >= 0; i-- {
+ v := oldSched[i]
+ prefs := desired.remove(v.ID)
+ regspec := s.regspec(v)
+ desired.clobber(regspec.clobbers)
+ for _, j := range regspec.inputs {
+ if countRegs(j.regs) != 1 {
+ continue
+ }
+ desired.clobber(j.regs)
+ desired.add(v.Args[j.idx].ID, pickReg(j.regs))
+ }
+ if opcodeTable[v.Op].resultInArg0 {
+ if opcodeTable[v.Op].commutative {
+ desired.addList(v.Args[1].ID, prefs)
+ }
+ desired.addList(v.Args[0].ID, prefs)
+ }
+ // Save desired registers for this value.
+ dinfo[i].out = prefs
+ for j, a := range v.Args {
+ if j >= len(dinfo[i].in) {
+ break
+ }
+ dinfo[i].in[j] = desired.get(a.ID)
+ }
+ }
+
+ // Process all the non-phi values.
+ for idx, v := range oldSched {
+ if s.f.pass.debug > regDebug {
+ fmt.Printf(" processing %s\n", v.LongString())
+ }
+ regspec := s.regspec(v)
+ if v.Op == OpPhi {
+ f.Fatalf("phi %s not at start of block", v)
+ }
+ if v.Op == OpSP {
+ s.assignReg(s.SPReg, v, v)
+ b.Values = append(b.Values, v)
+ s.advanceUses(v)
+ s.sp = v.ID
+ continue
+ }
+ if v.Op == OpSB {
+ s.assignReg(s.SBReg, v, v)
+ b.Values = append(b.Values, v)
+ s.advanceUses(v)
+ s.sb = v.ID
+ continue
+ }
+ if v.Op == OpSelect0 || v.Op == OpSelect1 || v.Op == OpSelectN {
+ if s.values[v.ID].needReg {
+ if v.Op == OpSelectN {
+ s.assignReg(register(s.f.getHome(v.Args[0].ID).(LocResults)[int(v.AuxInt)].(*Register).num), v, v)
+ } else {
+ var i = 0
+ if v.Op == OpSelect1 {
+ i = 1
+ }
+ s.assignReg(register(s.f.getHome(v.Args[0].ID).(LocPair)[i].(*Register).num), v, v)
+ }
+ }
+ b.Values = append(b.Values, v)
+ s.advanceUses(v)
+ goto issueSpill
+ }
+ if v.Op == OpGetG && s.f.Config.hasGReg {
+ // use hardware g register
+ if s.regs[s.GReg].v != nil {
+ s.freeReg(s.GReg) // kick out the old value
+ }
+ s.assignReg(s.GReg, v, v)
+ b.Values = append(b.Values, v)
+ s.advanceUses(v)
+ goto issueSpill
+ }
+ if v.Op == OpArg {
+ // Args are "pre-spilled" values. We don't allocate
+ // any register here. We just set up the spill pointer to
+ // point at itself and any later user will restore it to use it.
+ s.values[v.ID].spill = v
+ b.Values = append(b.Values, v)
+ s.advanceUses(v)
+ continue
+ }
+ if v.Op == OpKeepAlive {
+ // Make sure the argument to v is still live here.
+ s.advanceUses(v)
+ a := v.Args[0]
+ vi := &s.values[a.ID]
+ if vi.regs == 0 && !vi.rematerializeable {
+ // Use the spill location.
+ // This forces later liveness analysis to make the
+ // value live at this point.
+ v.SetArg(0, s.makeSpill(a, b))
+ } else if _, ok := a.Aux.(*ir.Name); ok && vi.rematerializeable {
+ // Rematerializeable value with a gc.Node. This is the address of
+ // a stack object (e.g. an LEAQ). Keep the object live.
+ // Change it to VarLive, which is what plive expects for locals.
+ v.Op = OpVarLive
+ v.SetArgs1(v.Args[1])
+ v.Aux = a.Aux
+ } else {
+ // In-register and rematerializeable values are already live.
+ // These are typically rematerializeable constants like nil,
+ // or values of a variable that were modified since the last call.
+ v.Op = OpCopy
+ v.SetArgs1(v.Args[1])
+ }
+ b.Values = append(b.Values, v)
+ continue
+ }
+ if len(regspec.inputs) == 0 && len(regspec.outputs) == 0 {
+ // No register allocation required (or none specified yet)
+ if s.doClobber && v.Op.IsCall() {
+ s.clobberRegs(regspec.clobbers)
+ }
+ s.freeRegs(regspec.clobbers)
+ b.Values = append(b.Values, v)
+ s.advanceUses(v)
+ continue
+ }
+
+ if s.values[v.ID].rematerializeable {
+ // Value is rematerializeable, don't issue it here.
+ // It will get issued just before each use (see
+ // allocValueToReg).
+ for _, a := range v.Args {
+ a.Uses--
+ }
+ s.advanceUses(v)
+ continue
+ }
+
+ if s.f.pass.debug > regDebug {
+ fmt.Printf("value %s\n", v.LongString())
+ fmt.Printf(" out:")
+ for _, r := range dinfo[idx].out {
+ if r != noRegister {
+ fmt.Printf(" %s", &s.registers[r])
+ }
+ }
+ fmt.Println()
+ for i := 0; i < len(v.Args) && i < 3; i++ {
+ fmt.Printf(" in%d:", i)
+ for _, r := range dinfo[idx].in[i] {
+ if r != noRegister {
+ fmt.Printf(" %s", &s.registers[r])
+ }
+ }
+ fmt.Println()
+ }
+ }
+
+ // Move arguments to registers.
+ // First, if an arg must be in a specific register and it is already
+ // in place, keep it.
+ args = append(args[:0], make([]*Value, len(v.Args))...)
+ for i, a := range v.Args {
+ if !s.values[a.ID].needReg {
+ args[i] = a
+ }
+ }
+ for _, i := range regspec.inputs {
+ mask := i.regs
+ if countRegs(mask) == 1 && mask&s.values[v.Args[i.idx].ID].regs != 0 {
+ args[i.idx] = s.allocValToReg(v.Args[i.idx], mask, true, v.Pos)
+ }
+ }
+ // Then, if an arg must be in a specific register and that
+ // register is free, allocate that one. Otherwise when processing
+ // another input we may kick a value into the free register, which
+ // then will be kicked out again.
+ // This is a common case for passing-in-register arguments for
+ // function calls.
+ for {
+ freed := false
+ for _, i := range regspec.inputs {
+ if args[i.idx] != nil {
+ continue // already allocated
+ }
+ mask := i.regs
+ if countRegs(mask) == 1 && mask&^s.used != 0 {
+ args[i.idx] = s.allocValToReg(v.Args[i.idx], mask, true, v.Pos)
+ // If the input is in other registers that will be clobbered by v,
+ // or the input is dead, free the registers. This may make room
+ // for other inputs.
+ oldregs := s.values[v.Args[i.idx].ID].regs
+ if oldregs&^regspec.clobbers == 0 || !s.liveAfterCurrentInstruction(v.Args[i.idx]) {
+ s.freeRegs(oldregs &^ mask &^ s.nospill)
+ freed = true
+ }
+ }
+ }
+ if !freed {
+ break
+ }
+ }
+ // Last, allocate remaining ones, in an ordering defined
+ // by the register specification (most constrained first).
+ for _, i := range regspec.inputs {
+ if args[i.idx] != nil {
+ continue // already allocated
+ }
+ mask := i.regs
+ if mask&s.values[v.Args[i.idx].ID].regs == 0 {
+ // Need a new register for the input.
+ mask &= s.allocatable
+ mask &^= s.nospill
+ // Used desired register if available.
+ if i.idx < 3 {
+ for _, r := range dinfo[idx].in[i.idx] {
+ if r != noRegister && (mask&^s.used)>>r&1 != 0 {
+ // Desired register is allowed and unused.
+ mask = regMask(1) << r
+ break
+ }
+ }
+ }
+ // Avoid registers we're saving for other values.
+ if mask&^desired.avoid != 0 {
+ mask &^= desired.avoid
+ }
+ }
+ args[i.idx] = s.allocValToReg(v.Args[i.idx], mask, true, v.Pos)
+ }
+
+ // If the output clobbers the input register, make sure we have
+ // at least two copies of the input register so we don't
+ // have to reload the value from the spill location.
+ if opcodeTable[v.Op].resultInArg0 {
+ var m regMask
+ if !s.liveAfterCurrentInstruction(v.Args[0]) {
+ // arg0 is dead. We can clobber its register.
+ goto ok
+ }
+ if opcodeTable[v.Op].commutative && !s.liveAfterCurrentInstruction(v.Args[1]) {
+ args[0], args[1] = args[1], args[0]
+ goto ok
+ }
+ if s.values[v.Args[0].ID].rematerializeable {
+ // We can rematerialize the input, don't worry about clobbering it.
+ goto ok
+ }
+ if opcodeTable[v.Op].commutative && s.values[v.Args[1].ID].rematerializeable {
+ args[0], args[1] = args[1], args[0]
+ goto ok
+ }
+ if countRegs(s.values[v.Args[0].ID].regs) >= 2 {
+ // we have at least 2 copies of arg0. We can afford to clobber one.
+ goto ok
+ }
+ if opcodeTable[v.Op].commutative && countRegs(s.values[v.Args[1].ID].regs) >= 2 {
+ args[0], args[1] = args[1], args[0]
+ goto ok
+ }
+
+ // We can't overwrite arg0 (or arg1, if commutative). So we
+ // need to make a copy of an input so we have a register we can modify.
+
+ // Possible new registers to copy into.
+ m = s.compatRegs(v.Args[0].Type) &^ s.used
+ if m == 0 {
+ // No free registers. In this case we'll just clobber
+ // an input and future uses of that input must use a restore.
+ // TODO(khr): We should really do this like allocReg does it,
+ // spilling the value with the most distant next use.
+ goto ok
+ }
+
+ // Try to move an input to the desired output, if allowed.
+ for _, r := range dinfo[idx].out {
+ if r != noRegister && (m&regspec.outputs[0].regs)>>r&1 != 0 {
+ m = regMask(1) << r
+ args[0] = s.allocValToReg(v.Args[0], m, true, v.Pos)
+ // Note: we update args[0] so the instruction will
+ // use the register copy we just made.
+ goto ok
+ }
+ }
+ // Try to copy input to its desired location & use its old
+ // location as the result register.
+ for _, r := range dinfo[idx].in[0] {
+ if r != noRegister && m>>r&1 != 0 {
+ m = regMask(1) << r
+ c := s.allocValToReg(v.Args[0], m, true, v.Pos)
+ s.copies[c] = false
+ // Note: no update to args[0] so the instruction will
+ // use the original copy.
+ goto ok
+ }
+ }
+ if opcodeTable[v.Op].commutative {
+ for _, r := range dinfo[idx].in[1] {
+ if r != noRegister && m>>r&1 != 0 {
+ m = regMask(1) << r
+ c := s.allocValToReg(v.Args[1], m, true, v.Pos)
+ s.copies[c] = false
+ args[0], args[1] = args[1], args[0]
+ goto ok
+ }
+ }
+ }
+ // Avoid future fixed uses if we can.
+ if m&^desired.avoid != 0 {
+ m &^= desired.avoid
+ }
+ // Save input 0 to a new register so we can clobber it.
+ c := s.allocValToReg(v.Args[0], m, true, v.Pos)
+ s.copies[c] = false
+ }
+
+ ok:
+ // Now that all args are in regs, we're ready to issue the value itself.
+ // Before we pick a register for the output value, allow input registers
+ // to be deallocated. We do this here so that the output can use the
+ // same register as a dying input.
+ if !opcodeTable[v.Op].resultNotInArgs {
+ s.tmpused = s.nospill
+ s.nospill = 0
+ s.advanceUses(v) // frees any registers holding args that are no longer live
+ }
+
+ // Dump any registers which will be clobbered
+ if s.doClobber && v.Op.IsCall() {
+ // clobber registers that are marked as clobber in regmask, but
+ // don't clobber inputs.
+ s.clobberRegs(regspec.clobbers &^ s.tmpused &^ s.nospill)
+ }
+ s.freeRegs(regspec.clobbers)
+ s.tmpused |= regspec.clobbers
+
+ // Pick registers for outputs.
+ {
+ outRegs := noRegisters // TODO if this is costly, hoist and clear incrementally below.
+ maxOutIdx := -1
+ var used regMask
+ for _, out := range regspec.outputs {
+ mask := out.regs & s.allocatable &^ used
+ if mask == 0 {
+ continue
+ }
+ if opcodeTable[v.Op].resultInArg0 && out.idx == 0 {
+ if !opcodeTable[v.Op].commutative {
+ // Output must use the same register as input 0.
+ r := register(s.f.getHome(args[0].ID).(*Register).num)
+ if mask>>r&1 == 0 {
+ s.f.Fatalf("resultInArg0 value's input %v cannot be an output of %s", s.f.getHome(args[0].ID).(*Register), v.LongString())
+ }
+ mask = regMask(1) << r
+ } else {
+ // Output must use the same register as input 0 or 1.
+ r0 := register(s.f.getHome(args[0].ID).(*Register).num)
+ r1 := register(s.f.getHome(args[1].ID).(*Register).num)
+ // Check r0 and r1 for desired output register.
+ found := false
+ for _, r := range dinfo[idx].out {
+ if (r == r0 || r == r1) && (mask&^s.used)>>r&1 != 0 {
+ mask = regMask(1) << r
+ found = true
+ if r == r1 {
+ args[0], args[1] = args[1], args[0]
+ }
+ break
+ }
+ }
+ if !found {
+ // Neither are desired, pick r0.
+ mask = regMask(1) << r0
+ }
+ }
+ }
+ for _, r := range dinfo[idx].out {
+ if r != noRegister && (mask&^s.used)>>r&1 != 0 {
+ // Desired register is allowed and unused.
+ mask = regMask(1) << r
+ break
+ }
+ }
+ // Avoid registers we're saving for other values.
+ if mask&^desired.avoid&^s.nospill != 0 {
+ mask &^= desired.avoid
+ }
+ r := s.allocReg(mask, v)
+ if out.idx > maxOutIdx {
+ maxOutIdx = out.idx
+ }
+ outRegs[out.idx] = r
+ used |= regMask(1) << r
+ s.tmpused |= regMask(1) << r
+ }
+ // Record register choices
+ if v.Type.IsTuple() {
+ var outLocs LocPair
+ if r := outRegs[0]; r != noRegister {
+ outLocs[0] = &s.registers[r]
+ }
+ if r := outRegs[1]; r != noRegister {
+ outLocs[1] = &s.registers[r]
+ }
+ s.f.setHome(v, outLocs)
+ // Note that subsequent SelectX instructions will do the assignReg calls.
+ } else if v.Type.IsResults() {
+ // preallocate outLocs to the right size, which is maxOutIdx+1
+ outLocs := make(LocResults, maxOutIdx+1, maxOutIdx+1)
+ for i := 0; i <= maxOutIdx; i++ {
+ if r := outRegs[i]; r != noRegister {
+ outLocs[i] = &s.registers[r]
+ }
+ }
+ s.f.setHome(v, outLocs)
+ } else {
+ if r := outRegs[0]; r != noRegister {
+ s.assignReg(r, v, v)
+ }
+ }
+ }
+
+ // deallocate dead args, if we have not done so
+ if opcodeTable[v.Op].resultNotInArgs {
+ s.nospill = 0
+ s.advanceUses(v) // frees any registers holding args that are no longer live
+ }
+ s.tmpused = 0
+
+ // Issue the Value itself.
+ for i, a := range args {
+ v.SetArg(i, a) // use register version of arguments
+ }
+ b.Values = append(b.Values, v)
+
+ issueSpill:
+ }
+
+ // Copy the control values - we need this so we can reduce the
+ // uses property of these values later.
+ controls := append(make([]*Value, 0, 2), b.ControlValues()...)
+
+ // Load control values into registers.
+ for i, v := range b.ControlValues() {
+ if !s.values[v.ID].needReg {
+ continue
+ }
+ if s.f.pass.debug > regDebug {
+ fmt.Printf(" processing control %s\n", v.LongString())
+ }
+ // We assume that a control input can be passed in any
+ // type-compatible register. If this turns out not to be true,
+ // we'll need to introduce a regspec for a block's control value.
+ b.ReplaceControl(i, s.allocValToReg(v, s.compatRegs(v.Type), false, b.Pos))
+ }
+
+ // Reduce the uses of the control values once registers have been loaded.
+ // This loop is equivalent to the advanceUses method.
+ for _, v := range controls {
+ vi := &s.values[v.ID]
+ if !vi.needReg {
+ continue
+ }
+ // Remove this use from the uses list.
+ u := vi.uses
+ vi.uses = u.next
+ if u.next == nil {
+ s.freeRegs(vi.regs) // value is dead
+ }
+ u.next = s.freeUseRecords
+ s.freeUseRecords = u
+ }
+
+ // If we are approaching a merge point and we are the primary
+ // predecessor of it, find live values that we use soon after
+ // the merge point and promote them to registers now.
+ if len(b.Succs) == 1 {
+ if s.f.Config.hasGReg && s.regs[s.GReg].v != nil {
+ s.freeReg(s.GReg) // Spill value in G register before any merge.
+ }
+ // For this to be worthwhile, the loop must have no calls in it.
+ top := b.Succs[0].b
+ loop := s.loopnest.b2l[top.ID]
+ if loop == nil || loop.header != top || loop.containsUnavoidableCall {
+ goto badloop
+ }
+
+ // TODO: sort by distance, pick the closest ones?
+ for _, live := range s.live[b.ID] {
+ if live.dist >= unlikelyDistance {
+ // Don't preload anything live after the loop.
+ continue
+ }
+ vid := live.ID
+ vi := &s.values[vid]
+ if vi.regs != 0 {
+ continue
+ }
+ if vi.rematerializeable {
+ continue
+ }
+ v := s.orig[vid]
+ m := s.compatRegs(v.Type) &^ s.used
+ // Used desired register if available.
+ outerloop:
+ for _, e := range desired.entries {
+ if e.ID != v.ID {
+ continue
+ }
+ for _, r := range e.regs {
+ if r != noRegister && m>>r&1 != 0 {
+ m = regMask(1) << r
+ break outerloop
+ }
+ }
+ }
+ if m&^desired.avoid != 0 {
+ m &^= desired.avoid
+ }
+ if m != 0 {
+ s.allocValToReg(v, m, false, b.Pos)
+ }
+ }
+ }
+ badloop:
+ ;
+
+ // Save end-of-block register state.
+ // First count how many, this cuts allocations in half.
+ k := 0
+ for r := register(0); r < s.numRegs; r++ {
+ v := s.regs[r].v
+ if v == nil {
+ continue
+ }
+ k++
+ }
+ regList := make([]endReg, 0, k)
+ for r := register(0); r < s.numRegs; r++ {
+ v := s.regs[r].v
+ if v == nil {
+ continue
+ }
+ regList = append(regList, endReg{r, v, s.regs[r].c})
+ }
+ s.endRegs[b.ID] = regList
+
+ if checkEnabled {
+ regValLiveSet.clear()
+ for _, x := range s.live[b.ID] {
+ regValLiveSet.add(x.ID)
+ }
+ for r := register(0); r < s.numRegs; r++ {
+ v := s.regs[r].v
+ if v == nil {
+ continue
+ }
+ if !regValLiveSet.contains(v.ID) {
+ s.f.Fatalf("val %s is in reg but not live at end of %s", v, b)
+ }
+ }
+ }
+
+ // If a value is live at the end of the block and
+ // isn't in a register, generate a use for the spill location.
+ // We need to remember this information so that
+ // the liveness analysis in stackalloc is correct.
+ for _, e := range s.live[b.ID] {
+ vi := &s.values[e.ID]
+ if vi.regs != 0 {
+ // in a register, we'll use that source for the merge.
+ continue
+ }
+ if vi.rematerializeable {
+ // we'll rematerialize during the merge.
+ continue
+ }
+ if s.f.pass.debug > regDebug {
+ fmt.Printf("live-at-end spill for %s at %s\n", s.orig[e.ID], b)
+ }
+ spill := s.makeSpill(s.orig[e.ID], b)
+ s.spillLive[b.ID] = append(s.spillLive[b.ID], spill.ID)
+ }
+
+ // Clear any final uses.
+ // All that is left should be the pseudo-uses added for values which
+ // are live at the end of b.
+ for _, e := range s.live[b.ID] {
+ u := s.values[e.ID].uses
+ if u == nil {
+ f.Fatalf("live at end, no uses v%d", e.ID)
+ }
+ if u.next != nil {
+ f.Fatalf("live at end, too many uses v%d", e.ID)
+ }
+ s.values[e.ID].uses = nil
+ u.next = s.freeUseRecords
+ s.freeUseRecords = u
+ }
+ }
+
+ // Decide where the spills we generated will go.
+ s.placeSpills()
+
+ // Anything that didn't get a register gets a stack location here.
+ // (StoreReg, stack-based phis, inputs, ...)
+ stacklive := stackalloc(s.f, s.spillLive)
+
+ // Fix up all merge edges.
+ s.shuffle(stacklive)
+
+ // Erase any copies we never used.
+ // Also, an unused copy might be the only use of another copy,
+ // so continue erasing until we reach a fixed point.
+ for {
+ progress := false
+ for c, used := range s.copies {
+ if !used && c.Uses == 0 {
+ if s.f.pass.debug > regDebug {
+ fmt.Printf("delete copied value %s\n", c.LongString())
+ }
+ c.resetArgs()
+ f.freeValue(c)
+ delete(s.copies, c)
+ progress = true
+ }
+ }
+ if !progress {
+ break
+ }
+ }
+
+ for _, b := range s.visitOrder {
+ i := 0
+ for _, v := range b.Values {
+ if v.Op == OpInvalid {
+ continue
+ }
+ b.Values[i] = v
+ i++
+ }
+ b.Values = b.Values[:i]
+ }
+}
+
+func (s *regAllocState) placeSpills() {
+ mustBeFirst := func(op Op) bool {
+ return op.isLoweredGetClosurePtr() || op == OpPhi || op == OpArgIntReg || op == OpArgFloatReg
+ }
+
+ // Start maps block IDs to the list of spills
+ // that go at the start of the block (but after any phis).
+ start := map[ID][]*Value{}
+ // After maps value IDs to the list of spills
+ // that go immediately after that value ID.
+ after := map[ID][]*Value{}
+
+ for i := range s.values {
+ vi := s.values[i]
+ spill := vi.spill
+ if spill == nil {
+ continue
+ }
+ if spill.Block != nil {
+ // Some spills are already fully set up,
+ // like OpArgs and stack-based phis.
+ continue
+ }
+ v := s.orig[i]
+
+ // Walk down the dominator tree looking for a good place to
+ // put the spill of v. At the start "best" is the best place
+ // we have found so far.
+ // TODO: find a way to make this O(1) without arbitrary cutoffs.
+ if v == nil {
+ panic(fmt.Errorf("nil v, s.orig[%d], vi = %v, spill = %s", i, vi, spill.LongString()))
+ }
+ best := v.Block
+ bestArg := v
+ var bestDepth int16
+ if l := s.loopnest.b2l[best.ID]; l != nil {
+ bestDepth = l.depth
+ }
+ b := best
+ const maxSpillSearch = 100
+ for i := 0; i < maxSpillSearch; i++ {
+ // Find the child of b in the dominator tree which
+ // dominates all restores.
+ p := b
+ b = nil
+ for c := s.sdom.Child(p); c != nil && i < maxSpillSearch; c, i = s.sdom.Sibling(c), i+1 {
+ if s.sdom[c.ID].entry <= vi.restoreMin && s.sdom[c.ID].exit >= vi.restoreMax {
+ // c also dominates all restores. Walk down into c.
+ b = c
+ break
+ }
+ }
+ if b == nil {
+ // Ran out of blocks which dominate all restores.
+ break
+ }
+
+ var depth int16
+ if l := s.loopnest.b2l[b.ID]; l != nil {
+ depth = l.depth
+ }
+ if depth > bestDepth {
+ // Don't push the spill into a deeper loop.
+ continue
+ }
+
+ // If v is in a register at the start of b, we can
+ // place the spill here (after the phis).
+ if len(b.Preds) == 1 {
+ for _, e := range s.endRegs[b.Preds[0].b.ID] {
+ if e.v == v {
+ // Found a better spot for the spill.
+ best = b
+ bestArg = e.c
+ bestDepth = depth
+ break
+ }
+ }
+ } else {
+ for _, e := range s.startRegs[b.ID] {
+ if e.v == v {
+ // Found a better spot for the spill.
+ best = b
+ bestArg = e.c
+ bestDepth = depth
+ break
+ }
+ }
+ }
+ }
+
+ // Put the spill in the best block we found.
+ spill.Block = best
+ spill.AddArg(bestArg)
+ if best == v.Block && !mustBeFirst(v.Op) {
+ // Place immediately after v.
+ after[v.ID] = append(after[v.ID], spill)
+ } else {
+ // Place at the start of best block.
+ start[best.ID] = append(start[best.ID], spill)
+ }
+ }
+
+ // Insert spill instructions into the block schedules.
+ var oldSched []*Value
+ for _, b := range s.visitOrder {
+ nfirst := 0
+ for _, v := range b.Values {
+ if !mustBeFirst(v.Op) {
+ break
+ }
+ nfirst++
+ }
+ oldSched = append(oldSched[:0], b.Values[nfirst:]...)
+ b.Values = b.Values[:nfirst]
+ b.Values = append(b.Values, start[b.ID]...)
+ for _, v := range oldSched {
+ b.Values = append(b.Values, v)
+ b.Values = append(b.Values, after[v.ID]...)
+ }
+ }
+}
+
+// shuffle fixes up all the merge edges (those going into blocks of indegree > 1).
+func (s *regAllocState) shuffle(stacklive [][]ID) {
+ var e edgeState
+ e.s = s
+ e.cache = map[ID][]*Value{}
+ e.contents = map[Location]contentRecord{}
+ if s.f.pass.debug > regDebug {
+ fmt.Printf("shuffle %s\n", s.f.Name)
+ fmt.Println(s.f.String())
+ }
+
+ for _, b := range s.visitOrder {
+ if len(b.Preds) <= 1 {
+ continue
+ }
+ e.b = b
+ for i, edge := range b.Preds {
+ p := edge.b
+ e.p = p
+ e.setup(i, s.endRegs[p.ID], s.startRegs[b.ID], stacklive[p.ID])
+ e.process()
+ }
+ }
+
+ if s.f.pass.debug > regDebug {
+ fmt.Printf("post shuffle %s\n", s.f.Name)
+ fmt.Println(s.f.String())
+ }
+}
+
+type edgeState struct {
+ s *regAllocState
+ p, b *Block // edge goes from p->b.
+
+ // for each pre-regalloc value, a list of equivalent cached values
+ cache map[ID][]*Value
+ cachedVals []ID // (superset of) keys of the above map, for deterministic iteration
+
+ // map from location to the value it contains
+ contents map[Location]contentRecord
+
+ // desired destination locations
+ destinations []dstRecord
+ extra []dstRecord
+
+ usedRegs regMask // registers currently holding something
+ uniqueRegs regMask // registers holding the only copy of a value
+ finalRegs regMask // registers holding final target
+ rematerializeableRegs regMask // registers that hold rematerializeable values
+}
+
+type contentRecord struct {
+ vid ID // pre-regalloc value
+ c *Value // cached value
+ final bool // this is a satisfied destination
+ pos src.XPos // source position of use of the value
+}
+
+type dstRecord struct {
+ loc Location // register or stack slot
+ vid ID // pre-regalloc value it should contain
+ splice **Value // place to store reference to the generating instruction
+ pos src.XPos // source position of use of this location
+}
+
+// setup initializes the edge state for shuffling.
+func (e *edgeState) setup(idx int, srcReg []endReg, dstReg []startReg, stacklive []ID) {
+ if e.s.f.pass.debug > regDebug {
+ fmt.Printf("edge %s->%s\n", e.p, e.b)
+ }
+
+ // Clear state.
+ for _, vid := range e.cachedVals {
+ delete(e.cache, vid)
+ }
+ e.cachedVals = e.cachedVals[:0]
+ for k := range e.contents {
+ delete(e.contents, k)
+ }
+ e.usedRegs = 0
+ e.uniqueRegs = 0
+ e.finalRegs = 0
+ e.rematerializeableRegs = 0
+
+ // Live registers can be sources.
+ for _, x := range srcReg {
+ e.set(&e.s.registers[x.r], x.v.ID, x.c, false, src.NoXPos) // don't care the position of the source
+ }
+ // So can all of the spill locations.
+ for _, spillID := range stacklive {
+ v := e.s.orig[spillID]
+ spill := e.s.values[v.ID].spill
+ if !e.s.sdom.IsAncestorEq(spill.Block, e.p) {
+ // Spills were placed that only dominate the uses found
+ // during the first regalloc pass. The edge fixup code
+ // can't use a spill location if the spill doesn't dominate
+ // the edge.
+ // We are guaranteed that if the spill doesn't dominate this edge,
+ // then the value is available in a register (because we called
+ // makeSpill for every value not in a register at the start
+ // of an edge).
+ continue
+ }
+ e.set(e.s.f.getHome(spillID), v.ID, spill, false, src.NoXPos) // don't care the position of the source
+ }
+
+ // Figure out all the destinations we need.
+ dsts := e.destinations[:0]
+ for _, x := range dstReg {
+ dsts = append(dsts, dstRecord{&e.s.registers[x.r], x.v.ID, nil, x.pos})
+ }
+ // Phis need their args to end up in a specific location.
+ for _, v := range e.b.Values {
+ if v.Op != OpPhi {
+ break
+ }
+ loc := e.s.f.getHome(v.ID)
+ if loc == nil {
+ continue
+ }
+ dsts = append(dsts, dstRecord{loc, v.Args[idx].ID, &v.Args[idx], v.Pos})
+ }
+ e.destinations = dsts
+
+ if e.s.f.pass.debug > regDebug {
+ for _, vid := range e.cachedVals {
+ a := e.cache[vid]
+ for _, c := range a {
+ fmt.Printf("src %s: v%d cache=%s\n", e.s.f.getHome(c.ID), vid, c)
+ }
+ }
+ for _, d := range e.destinations {
+ fmt.Printf("dst %s: v%d\n", d.loc, d.vid)
+ }
+ }
+}
+
+// process generates code to move all the values to the right destination locations.
+func (e *edgeState) process() {
+ dsts := e.destinations
+
+ // Process the destinations until they are all satisfied.
+ for len(dsts) > 0 {
+ i := 0
+ for _, d := range dsts {
+ if !e.processDest(d.loc, d.vid, d.splice, d.pos) {
+ // Failed - save for next iteration.
+ dsts[i] = d
+ i++
+ }
+ }
+ if i < len(dsts) {
+ // Made some progress. Go around again.
+ dsts = dsts[:i]
+
+ // Append any extras destinations we generated.
+ dsts = append(dsts, e.extra...)
+ e.extra = e.extra[:0]
+ continue
+ }
+
+ // We made no progress. That means that any
+ // remaining unsatisfied moves are in simple cycles.
+ // For example, A -> B -> C -> D -> A.
+ // A ----> B
+ // ^ |
+ // | |
+ // | v
+ // D <---- C
+
+ // To break the cycle, we pick an unused register, say R,
+ // and put a copy of B there.
+ // A ----> B
+ // ^ |
+ // | |
+ // | v
+ // D <---- C <---- R=copyofB
+ // When we resume the outer loop, the A->B move can now proceed,
+ // and eventually the whole cycle completes.
+
+ // Copy any cycle location to a temp register. This duplicates
+ // one of the cycle entries, allowing the just duplicated value
+ // to be overwritten and the cycle to proceed.
+ d := dsts[0]
+ loc := d.loc
+ vid := e.contents[loc].vid
+ c := e.contents[loc].c
+ r := e.findRegFor(c.Type)
+ if e.s.f.pass.debug > regDebug {
+ fmt.Printf("breaking cycle with v%d in %s:%s\n", vid, loc, c)
+ }
+ e.erase(r)
+ pos := d.pos.WithNotStmt()
+ if _, isReg := loc.(*Register); isReg {
+ c = e.p.NewValue1(pos, OpCopy, c.Type, c)
+ } else {
+ c = e.p.NewValue1(pos, OpLoadReg, c.Type, c)
+ }
+ e.set(r, vid, c, false, pos)
+ if c.Op == OpLoadReg && e.s.isGReg(register(r.(*Register).num)) {
+ e.s.f.Fatalf("process.OpLoadReg targeting g: " + c.LongString())
+ }
+ }
+}
+
+// processDest generates code to put value vid into location loc. Returns true
+// if progress was made.
+func (e *edgeState) processDest(loc Location, vid ID, splice **Value, pos src.XPos) bool {
+ pos = pos.WithNotStmt()
+ occupant := e.contents[loc]
+ if occupant.vid == vid {
+ // Value is already in the correct place.
+ e.contents[loc] = contentRecord{vid, occupant.c, true, pos}
+ if splice != nil {
+ (*splice).Uses--
+ *splice = occupant.c
+ occupant.c.Uses++
+ }
+ // Note: if splice==nil then c will appear dead. This is
+ // non-SSA formed code, so be careful after this pass not to run
+ // deadcode elimination.
+ if _, ok := e.s.copies[occupant.c]; ok {
+ // The copy at occupant.c was used to avoid spill.
+ e.s.copies[occupant.c] = true
+ }
+ return true
+ }
+
+ // Check if we're allowed to clobber the destination location.
+ if len(e.cache[occupant.vid]) == 1 && !e.s.values[occupant.vid].rematerializeable {
+ // We can't overwrite the last copy
+ // of a value that needs to survive.
+ return false
+ }
+
+ // Copy from a source of v, register preferred.
+ v := e.s.orig[vid]
+ var c *Value
+ var src Location
+ if e.s.f.pass.debug > regDebug {
+ fmt.Printf("moving v%d to %s\n", vid, loc)
+ fmt.Printf("sources of v%d:", vid)
+ }
+ for _, w := range e.cache[vid] {
+ h := e.s.f.getHome(w.ID)
+ if e.s.f.pass.debug > regDebug {
+ fmt.Printf(" %s:%s", h, w)
+ }
+ _, isreg := h.(*Register)
+ if src == nil || isreg {
+ c = w
+ src = h
+ }
+ }
+ if e.s.f.pass.debug > regDebug {
+ if src != nil {
+ fmt.Printf(" [use %s]\n", src)
+ } else {
+ fmt.Printf(" [no source]\n")
+ }
+ }
+ _, dstReg := loc.(*Register)
+
+ // Pre-clobber destination. This avoids the
+ // following situation:
+ // - v is currently held in R0 and stacktmp0.
+ // - We want to copy stacktmp1 to stacktmp0.
+ // - We choose R0 as the temporary register.
+ // During the copy, both R0 and stacktmp0 are
+ // clobbered, losing both copies of v. Oops!
+ // Erasing the destination early means R0 will not
+ // be chosen as the temp register, as it will then
+ // be the last copy of v.
+ e.erase(loc)
+ var x *Value
+ if c == nil || e.s.values[vid].rematerializeable {
+ if !e.s.values[vid].rematerializeable {
+ e.s.f.Fatalf("can't find source for %s->%s: %s\n", e.p, e.b, v.LongString())
+ }
+ if dstReg {
+ x = v.copyInto(e.p)
+ } else {
+ // Rematerialize into stack slot. Need a free
+ // register to accomplish this.
+ r := e.findRegFor(v.Type)
+ e.erase(r)
+ x = v.copyIntoWithXPos(e.p, pos)
+ e.set(r, vid, x, false, pos)
+ // Make sure we spill with the size of the slot, not the
+ // size of x (which might be wider due to our dropping
+ // of narrowing conversions).
+ x = e.p.NewValue1(pos, OpStoreReg, loc.(LocalSlot).Type, x)
+ }
+ } else {
+ // Emit move from src to dst.
+ _, srcReg := src.(*Register)
+ if srcReg {
+ if dstReg {
+ x = e.p.NewValue1(pos, OpCopy, c.Type, c)
+ } else {
+ x = e.p.NewValue1(pos, OpStoreReg, loc.(LocalSlot).Type, c)
+ }
+ } else {
+ if dstReg {
+ x = e.p.NewValue1(pos, OpLoadReg, c.Type, c)
+ } else {
+ // mem->mem. Use temp register.
+ r := e.findRegFor(c.Type)
+ e.erase(r)
+ t := e.p.NewValue1(pos, OpLoadReg, c.Type, c)
+ e.set(r, vid, t, false, pos)
+ x = e.p.NewValue1(pos, OpStoreReg, loc.(LocalSlot).Type, t)
+ }
+ }
+ }
+ e.set(loc, vid, x, true, pos)
+ if x.Op == OpLoadReg && e.s.isGReg(register(loc.(*Register).num)) {
+ e.s.f.Fatalf("processDest.OpLoadReg targeting g: " + x.LongString())
+ }
+ if splice != nil {
+ (*splice).Uses--
+ *splice = x
+ x.Uses++
+ }
+ return true
+}
+
+// set changes the contents of location loc to hold the given value and its cached representative.
+func (e *edgeState) set(loc Location, vid ID, c *Value, final bool, pos src.XPos) {
+ e.s.f.setHome(c, loc)
+ e.contents[loc] = contentRecord{vid, c, final, pos}
+ a := e.cache[vid]
+ if len(a) == 0 {
+ e.cachedVals = append(e.cachedVals, vid)
+ }
+ a = append(a, c)
+ e.cache[vid] = a
+ if r, ok := loc.(*Register); ok {
+ if e.usedRegs&(regMask(1)<<uint(r.num)) != 0 {
+ e.s.f.Fatalf("%v is already set (v%d/%v)", r, vid, c)
+ }
+ e.usedRegs |= regMask(1) << uint(r.num)
+ if final {
+ e.finalRegs |= regMask(1) << uint(r.num)
+ }
+ if len(a) == 1 {
+ e.uniqueRegs |= regMask(1) << uint(r.num)
+ }
+ if len(a) == 2 {
+ if t, ok := e.s.f.getHome(a[0].ID).(*Register); ok {
+ e.uniqueRegs &^= regMask(1) << uint(t.num)
+ }
+ }
+ if e.s.values[vid].rematerializeable {
+ e.rematerializeableRegs |= regMask(1) << uint(r.num)
+ }
+ }
+ if e.s.f.pass.debug > regDebug {
+ fmt.Printf("%s\n", c.LongString())
+ fmt.Printf("v%d now available in %s:%s\n", vid, loc, c)
+ }
+}
+
+// erase removes any user of loc.
+func (e *edgeState) erase(loc Location) {
+ cr := e.contents[loc]
+ if cr.c == nil {
+ return
+ }
+ vid := cr.vid
+
+ if cr.final {
+ // Add a destination to move this value back into place.
+ // Make sure it gets added to the tail of the destination queue
+ // so we make progress on other moves first.
+ e.extra = append(e.extra, dstRecord{loc, cr.vid, nil, cr.pos})
+ }
+
+ // Remove c from the list of cached values.
+ a := e.cache[vid]
+ for i, c := range a {
+ if e.s.f.getHome(c.ID) == loc {
+ if e.s.f.pass.debug > regDebug {
+ fmt.Printf("v%d no longer available in %s:%s\n", vid, loc, c)
+ }
+ a[i], a = a[len(a)-1], a[:len(a)-1]
+ break
+ }
+ }
+ e.cache[vid] = a
+
+ // Update register masks.
+ if r, ok := loc.(*Register); ok {
+ e.usedRegs &^= regMask(1) << uint(r.num)
+ if cr.final {
+ e.finalRegs &^= regMask(1) << uint(r.num)
+ }
+ e.rematerializeableRegs &^= regMask(1) << uint(r.num)
+ }
+ if len(a) == 1 {
+ if r, ok := e.s.f.getHome(a[0].ID).(*Register); ok {
+ e.uniqueRegs |= regMask(1) << uint(r.num)
+ }
+ }
+}
+
+// findRegFor finds a register we can use to make a temp copy of type typ.
+func (e *edgeState) findRegFor(typ *types.Type) Location {
+ // Which registers are possibilities.
+ types := &e.s.f.Config.Types
+ m := e.s.compatRegs(typ)
+
+ // Pick a register. In priority order:
+ // 1) an unused register
+ // 2) a non-unique register not holding a final value
+ // 3) a non-unique register
+ // 4) a register holding a rematerializeable value
+ x := m &^ e.usedRegs
+ if x != 0 {
+ return &e.s.registers[pickReg(x)]
+ }
+ x = m &^ e.uniqueRegs &^ e.finalRegs
+ if x != 0 {
+ return &e.s.registers[pickReg(x)]
+ }
+ x = m &^ e.uniqueRegs
+ if x != 0 {
+ return &e.s.registers[pickReg(x)]
+ }
+ x = m & e.rematerializeableRegs
+ if x != 0 {
+ return &e.s.registers[pickReg(x)]
+ }
+
+ // No register is available.
+ // Pick a register to spill.
+ for _, vid := range e.cachedVals {
+ a := e.cache[vid]
+ for _, c := range a {
+ if r, ok := e.s.f.getHome(c.ID).(*Register); ok && m>>uint(r.num)&1 != 0 {
+ if !c.rematerializeable() {
+ x := e.p.NewValue1(c.Pos, OpStoreReg, c.Type, c)
+ // Allocate a temp location to spill a register to.
+ // The type of the slot is immaterial - it will not be live across
+ // any safepoint. Just use a type big enough to hold any register.
+ t := LocalSlot{N: e.s.f.fe.Auto(c.Pos, types.Int64), Type: types.Int64}
+ // TODO: reuse these slots. They'll need to be erased first.
+ e.set(t, vid, x, false, c.Pos)
+ if e.s.f.pass.debug > regDebug {
+ fmt.Printf(" SPILL %s->%s %s\n", r, t, x.LongString())
+ }
+ }
+ // r will now be overwritten by the caller. At some point
+ // later, the newly saved value will be moved back to its
+ // final destination in processDest.
+ return r
+ }
+ }
+ }
+
+ fmt.Printf("m:%d unique:%d final:%d rematerializable:%d\n", m, e.uniqueRegs, e.finalRegs, e.rematerializeableRegs)
+ for _, vid := range e.cachedVals {
+ a := e.cache[vid]
+ for _, c := range a {
+ fmt.Printf("v%d: %s %s\n", vid, c, e.s.f.getHome(c.ID))
+ }
+ }
+ e.s.f.Fatalf("can't find empty register on edge %s->%s", e.p, e.b)
+ return nil
+}
+
+// rematerializeable reports whether the register allocator should recompute
+// a value instead of spilling/restoring it.
+func (v *Value) rematerializeable() bool {
+ if !opcodeTable[v.Op].rematerializeable {
+ return false
+ }
+ for _, a := range v.Args {
+ // SP and SB (generated by OpSP and OpSB) are always available.
+ if a.Op != OpSP && a.Op != OpSB {
+ return false
+ }
+ }
+ return true
+}
+
+type liveInfo struct {
+ ID ID // ID of value
+ dist int32 // # of instructions before next use
+ pos src.XPos // source position of next use
+}
+
+// computeLive computes a map from block ID to a list of value IDs live at the end
+// of that block. Together with the value ID is a count of how many instructions
+// to the next use of that value. The resulting map is stored in s.live.
+// computeLive also computes the desired register information at the end of each block.
+// This desired register information is stored in s.desired.
+// TODO: this could be quadratic if lots of variables are live across lots of
+// basic blocks. Figure out a way to make this function (or, more precisely, the user
+// of this function) require only linear size & time.
+func (s *regAllocState) computeLive() {
+ f := s.f
+ s.live = make([][]liveInfo, f.NumBlocks())
+ s.desired = make([]desiredState, f.NumBlocks())
+ var phis []*Value
+
+ live := f.newSparseMap(f.NumValues())
+ defer f.retSparseMap(live)
+ t := f.newSparseMap(f.NumValues())
+ defer f.retSparseMap(t)
+
+ // Keep track of which value we want in each register.
+ var desired desiredState
+
+ // Instead of iterating over f.Blocks, iterate over their postordering.
+ // Liveness information flows backward, so starting at the end
+ // increases the probability that we will stabilize quickly.
+ // TODO: Do a better job yet. Here's one possibility:
+ // Calculate the dominator tree and locate all strongly connected components.
+ // If a value is live in one block of an SCC, it is live in all.
+ // Walk the dominator tree from end to beginning, just once, treating SCC
+ // components as single blocks, duplicated calculated liveness information
+ // out to all of them.
+ po := f.postorder()
+ s.loopnest = f.loopnest()
+ s.loopnest.calculateDepths()
+ for {
+ changed := false
+
+ for _, b := range po {
+ // Start with known live values at the end of the block.
+ // Add len(b.Values) to adjust from end-of-block distance
+ // to beginning-of-block distance.
+ live.clear()
+ for _, e := range s.live[b.ID] {
+ live.set(e.ID, e.dist+int32(len(b.Values)), e.pos)
+ }
+
+ // Mark control values as live
+ for _, c := range b.ControlValues() {
+ if s.values[c.ID].needReg {
+ live.set(c.ID, int32(len(b.Values)), b.Pos)
+ }
+ }
+
+ // Propagate backwards to the start of the block
+ // Assumes Values have been scheduled.
+ phis = phis[:0]
+ for i := len(b.Values) - 1; i >= 0; i-- {
+ v := b.Values[i]
+ live.remove(v.ID)
+ if v.Op == OpPhi {
+ // save phi ops for later
+ phis = append(phis, v)
+ continue
+ }
+ if opcodeTable[v.Op].call {
+ c := live.contents()
+ for i := range c {
+ c[i].val += unlikelyDistance
+ }
+ }
+ for _, a := range v.Args {
+ if s.values[a.ID].needReg {
+ live.set(a.ID, int32(i), v.Pos)
+ }
+ }
+ }
+ // Propagate desired registers backwards.
+ desired.copy(&s.desired[b.ID])
+ for i := len(b.Values) - 1; i >= 0; i-- {
+ v := b.Values[i]
+ prefs := desired.remove(v.ID)
+ if v.Op == OpPhi {
+ // TODO: if v is a phi, save desired register for phi inputs.
+ // For now, we just drop it and don't propagate
+ // desired registers back though phi nodes.
+ continue
+ }
+ regspec := s.regspec(v)
+ // Cancel desired registers if they get clobbered.
+ desired.clobber(regspec.clobbers)
+ // Update desired registers if there are any fixed register inputs.
+ for _, j := range regspec.inputs {
+ if countRegs(j.regs) != 1 {
+ continue
+ }
+ desired.clobber(j.regs)
+ desired.add(v.Args[j.idx].ID, pickReg(j.regs))
+ }
+ // Set desired register of input 0 if this is a 2-operand instruction.
+ if opcodeTable[v.Op].resultInArg0 {
+ if opcodeTable[v.Op].commutative {
+ desired.addList(v.Args[1].ID, prefs)
+ }
+ desired.addList(v.Args[0].ID, prefs)
+ }
+ }
+
+ // For each predecessor of b, expand its list of live-at-end values.
+ // invariant: live contains the values live at the start of b (excluding phi inputs)
+ for i, e := range b.Preds {
+ p := e.b
+ // Compute additional distance for the edge.
+ // Note: delta must be at least 1 to distinguish the control
+ // value use from the first user in a successor block.
+ delta := int32(normalDistance)
+ if len(p.Succs) == 2 {
+ if p.Succs[0].b == b && p.Likely == BranchLikely ||
+ p.Succs[1].b == b && p.Likely == BranchUnlikely {
+ delta = likelyDistance
+ }
+ if p.Succs[0].b == b && p.Likely == BranchUnlikely ||
+ p.Succs[1].b == b && p.Likely == BranchLikely {
+ delta = unlikelyDistance
+ }
+ }
+
+ // Update any desired registers at the end of p.
+ s.desired[p.ID].merge(&desired)
+
+ // Start t off with the previously known live values at the end of p.
+ t.clear()
+ for _, e := range s.live[p.ID] {
+ t.set(e.ID, e.dist, e.pos)
+ }
+ update := false
+
+ // Add new live values from scanning this block.
+ for _, e := range live.contents() {
+ d := e.val + delta
+ if !t.contains(e.key) || d < t.get(e.key) {
+ update = true
+ t.set(e.key, d, e.aux)
+ }
+ }
+ // Also add the correct arg from the saved phi values.
+ // All phis are at distance delta (we consider them
+ // simultaneously happening at the start of the block).
+ for _, v := range phis {
+ id := v.Args[i].ID
+ if s.values[id].needReg && (!t.contains(id) || delta < t.get(id)) {
+ update = true
+ t.set(id, delta, v.Pos)
+ }
+ }
+
+ if !update {
+ continue
+ }
+ // The live set has changed, update it.
+ l := s.live[p.ID][:0]
+ if cap(l) < t.size() {
+ l = make([]liveInfo, 0, t.size())
+ }
+ for _, e := range t.contents() {
+ l = append(l, liveInfo{e.key, e.val, e.aux})
+ }
+ s.live[p.ID] = l
+ changed = true
+ }
+ }
+
+ if !changed {
+ break
+ }
+ }
+ if f.pass.debug > regDebug {
+ fmt.Println("live values at end of each block")
+ for _, b := range f.Blocks {
+ fmt.Printf(" %s:", b)
+ for _, x := range s.live[b.ID] {
+ fmt.Printf(" v%d(%d)", x.ID, x.dist)
+ for _, e := range s.desired[b.ID].entries {
+ if e.ID != x.ID {
+ continue
+ }
+ fmt.Printf("[")
+ first := true
+ for _, r := range e.regs {
+ if r == noRegister {
+ continue
+ }
+ if !first {
+ fmt.Printf(",")
+ }
+ fmt.Print(&s.registers[r])
+ first = false
+ }
+ fmt.Printf("]")
+ }
+ }
+ if avoid := s.desired[b.ID].avoid; avoid != 0 {
+ fmt.Printf(" avoid=%v", s.RegMaskString(avoid))
+ }
+ fmt.Println()
+ }
+ }
+}
+
+// A desiredState represents desired register assignments.
+type desiredState struct {
+ // Desired assignments will be small, so we just use a list
+ // of valueID+registers entries.
+ entries []desiredStateEntry
+ // Registers that other values want to be in. This value will
+ // contain at least the union of the regs fields of entries, but
+ // may contain additional entries for values that were once in
+ // this data structure but are no longer.
+ avoid regMask
+}
+type desiredStateEntry struct {
+ // (pre-regalloc) value
+ ID ID
+ // Registers it would like to be in, in priority order.
+ // Unused slots are filled with noRegister.
+ regs [4]register
+}
+
+func (d *desiredState) clear() {
+ d.entries = d.entries[:0]
+ d.avoid = 0
+}
+
+// get returns a list of desired registers for value vid.
+func (d *desiredState) get(vid ID) [4]register {
+ for _, e := range d.entries {
+ if e.ID == vid {
+ return e.regs
+ }
+ }
+ return [4]register{noRegister, noRegister, noRegister, noRegister}
+}
+
+// add records that we'd like value vid to be in register r.
+func (d *desiredState) add(vid ID, r register) {
+ d.avoid |= regMask(1) << r
+ for i := range d.entries {
+ e := &d.entries[i]
+ if e.ID != vid {
+ continue
+ }
+ if e.regs[0] == r {
+ // Already known and highest priority
+ return
+ }
+ for j := 1; j < len(e.regs); j++ {
+ if e.regs[j] == r {
+ // Move from lower priority to top priority
+ copy(e.regs[1:], e.regs[:j])
+ e.regs[0] = r
+ return
+ }
+ }
+ copy(e.regs[1:], e.regs[:])
+ e.regs[0] = r
+ return
+ }
+ d.entries = append(d.entries, desiredStateEntry{vid, [4]register{r, noRegister, noRegister, noRegister}})
+}
+
+func (d *desiredState) addList(vid ID, regs [4]register) {
+ // regs is in priority order, so iterate in reverse order.
+ for i := len(regs) - 1; i >= 0; i-- {
+ r := regs[i]
+ if r != noRegister {
+ d.add(vid, r)
+ }
+ }
+}
+
+// clobber erases any desired registers in the set m.
+func (d *desiredState) clobber(m regMask) {
+ for i := 0; i < len(d.entries); {
+ e := &d.entries[i]
+ j := 0
+ for _, r := range e.regs {
+ if r != noRegister && m>>r&1 == 0 {
+ e.regs[j] = r
+ j++
+ }
+ }
+ if j == 0 {
+ // No more desired registers for this value.
+ d.entries[i] = d.entries[len(d.entries)-1]
+ d.entries = d.entries[:len(d.entries)-1]
+ continue
+ }
+ for ; j < len(e.regs); j++ {
+ e.regs[j] = noRegister
+ }
+ i++
+ }
+ d.avoid &^= m
+}
+
+// copy copies a desired state from another desiredState x.
+func (d *desiredState) copy(x *desiredState) {
+ d.entries = append(d.entries[:0], x.entries...)
+ d.avoid = x.avoid
+}
+
+// remove removes the desired registers for vid and returns them.
+func (d *desiredState) remove(vid ID) [4]register {
+ for i := range d.entries {
+ if d.entries[i].ID == vid {
+ regs := d.entries[i].regs
+ d.entries[i] = d.entries[len(d.entries)-1]
+ d.entries = d.entries[:len(d.entries)-1]
+ return regs
+ }
+ }
+ return [4]register{noRegister, noRegister, noRegister, noRegister}
+}
+
+// merge merges another desired state x into d.
+func (d *desiredState) merge(x *desiredState) {
+ d.avoid |= x.avoid
+ // There should only be a few desired registers, so
+ // linear insert is ok.
+ for _, e := range x.entries {
+ d.addList(e.ID, e.regs)
+ }
+}
+
+func min32(x, y int32) int32 {
+ if x < y {
+ return x
+ }
+ return y
+}
+func max32(x, y int32) int32 {
+ if x > y {
+ return x
+ }
+ return y
+}
diff --git a/src/cmd/compile/internal/ssa/regalloc_test.go b/src/cmd/compile/internal/ssa/regalloc_test.go
new file mode 100644
index 0000000..d990cac
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/regalloc_test.go
@@ -0,0 +1,230 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+ "testing"
+)
+
+func TestLiveControlOps(t *testing.T) {
+ c := testConfig(t)
+ f := c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("x", OpAMD64MOVLconst, c.config.Types.Int8, 1, nil),
+ Valu("y", OpAMD64MOVLconst, c.config.Types.Int8, 2, nil),
+ Valu("a", OpAMD64TESTB, types.TypeFlags, 0, nil, "x", "y"),
+ Valu("b", OpAMD64TESTB, types.TypeFlags, 0, nil, "y", "x"),
+ Eq("a", "if", "exit"),
+ ),
+ Bloc("if",
+ Eq("b", "plain", "exit"),
+ ),
+ Bloc("plain",
+ Goto("exit"),
+ ),
+ Bloc("exit",
+ Exit("mem"),
+ ),
+ )
+ flagalloc(f.f)
+ regalloc(f.f)
+ checkFunc(f.f)
+}
+
+// Test to make sure G register is never reloaded from spill (spill of G is okay)
+// See #25504
+func TestNoGetgLoadReg(t *testing.T) {
+ /*
+ Original:
+ func fff3(i int) *g {
+ gee := getg()
+ if i == 0 {
+ fff()
+ }
+ return gee // here
+ }
+ */
+ c := testConfigARM64(t)
+ f := c.Fun("b1",
+ Bloc("b1",
+ Valu("v1", OpInitMem, types.TypeMem, 0, nil),
+ Valu("v6", OpArg, c.config.Types.Int64, 0, c.Frontend().Auto(src.NoXPos, c.config.Types.Int64)),
+ Valu("v8", OpGetG, c.config.Types.Int64.PtrTo(), 0, nil, "v1"),
+ Valu("v11", OpARM64CMPconst, types.TypeFlags, 0, nil, "v6"),
+ Eq("v11", "b2", "b4"),
+ ),
+ Bloc("b4",
+ Goto("b3"),
+ ),
+ Bloc("b3",
+ Valu("v14", OpPhi, types.TypeMem, 0, nil, "v1", "v12"),
+ Valu("sb", OpSB, c.config.Types.Uintptr, 0, nil),
+ Valu("v16", OpARM64MOVDstore, types.TypeMem, 0, nil, "v8", "sb", "v14"),
+ Exit("v16"),
+ ),
+ Bloc("b2",
+ Valu("v12", OpARM64CALLstatic, types.TypeMem, 0, AuxCallLSym("_"), "v1"),
+ Goto("b3"),
+ ),
+ )
+ regalloc(f.f)
+ checkFunc(f.f)
+ // Double-check that we never restore to the G register. Regalloc should catch it, but check again anyway.
+ r := f.f.RegAlloc
+ for _, b := range f.blocks {
+ for _, v := range b.Values {
+ if v.Op == OpLoadReg && r[v.ID].String() == "g" {
+ t.Errorf("Saw OpLoadReg targeting g register: %s", v.LongString())
+ }
+ }
+ }
+}
+
+// Test to make sure we don't push spills into loops.
+// See issue #19595.
+func TestSpillWithLoop(t *testing.T) {
+ c := testConfig(t)
+ f := c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("ptr", OpArg, c.config.Types.Int64.PtrTo(), 0, c.Frontend().Auto(src.NoXPos, c.config.Types.Int64)),
+ Valu("cond", OpArg, c.config.Types.Bool, 0, c.Frontend().Auto(src.NoXPos, c.config.Types.Bool)),
+ Valu("ld", OpAMD64MOVQload, c.config.Types.Int64, 0, nil, "ptr", "mem"), // this value needs a spill
+ Goto("loop"),
+ ),
+ Bloc("loop",
+ Valu("memphi", OpPhi, types.TypeMem, 0, nil, "mem", "call"),
+ Valu("call", OpAMD64CALLstatic, types.TypeMem, 0, AuxCallLSym("_"), "memphi"),
+ Valu("test", OpAMD64CMPBconst, types.TypeFlags, 0, nil, "cond"),
+ Eq("test", "next", "exit"),
+ ),
+ Bloc("next",
+ Goto("loop"),
+ ),
+ Bloc("exit",
+ Valu("store", OpAMD64MOVQstore, types.TypeMem, 0, nil, "ptr", "ld", "call"),
+ Exit("store"),
+ ),
+ )
+ regalloc(f.f)
+ checkFunc(f.f)
+ for _, v := range f.blocks["loop"].Values {
+ if v.Op == OpStoreReg {
+ t.Errorf("spill inside loop %s", v.LongString())
+ }
+ }
+}
+
+func TestSpillMove1(t *testing.T) {
+ c := testConfig(t)
+ f := c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("x", OpArg, c.config.Types.Int64, 0, c.Frontend().Auto(src.NoXPos, c.config.Types.Int64)),
+ Valu("p", OpArg, c.config.Types.Int64.PtrTo(), 0, c.Frontend().Auto(src.NoXPos, c.config.Types.Int64.PtrTo())),
+ Valu("a", OpAMD64TESTQ, types.TypeFlags, 0, nil, "x", "x"),
+ Goto("loop1"),
+ ),
+ Bloc("loop1",
+ Valu("y", OpAMD64MULQ, c.config.Types.Int64, 0, nil, "x", "x"),
+ Eq("a", "loop2", "exit1"),
+ ),
+ Bloc("loop2",
+ Eq("a", "loop1", "exit2"),
+ ),
+ Bloc("exit1",
+ // store before call, y is available in a register
+ Valu("mem2", OpAMD64MOVQstore, types.TypeMem, 0, nil, "p", "y", "mem"),
+ Valu("mem3", OpAMD64CALLstatic, types.TypeMem, 0, AuxCallLSym("_"), "mem2"),
+ Exit("mem3"),
+ ),
+ Bloc("exit2",
+ // store after call, y must be loaded from a spill location
+ Valu("mem4", OpAMD64CALLstatic, types.TypeMem, 0, AuxCallLSym("_"), "mem"),
+ Valu("mem5", OpAMD64MOVQstore, types.TypeMem, 0, nil, "p", "y", "mem4"),
+ Exit("mem5"),
+ ),
+ )
+ flagalloc(f.f)
+ regalloc(f.f)
+ checkFunc(f.f)
+ // Spill should be moved to exit2.
+ if numSpills(f.blocks["loop1"]) != 0 {
+ t.Errorf("spill present from loop1")
+ }
+ if numSpills(f.blocks["loop2"]) != 0 {
+ t.Errorf("spill present in loop2")
+ }
+ if numSpills(f.blocks["exit1"]) != 0 {
+ t.Errorf("spill present in exit1")
+ }
+ if numSpills(f.blocks["exit2"]) != 1 {
+ t.Errorf("spill missing in exit2")
+ }
+
+}
+
+func TestSpillMove2(t *testing.T) {
+ c := testConfig(t)
+ f := c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("x", OpArg, c.config.Types.Int64, 0, c.Frontend().Auto(src.NoXPos, c.config.Types.Int64)),
+ Valu("p", OpArg, c.config.Types.Int64.PtrTo(), 0, c.Frontend().Auto(src.NoXPos, c.config.Types.Int64.PtrTo())),
+ Valu("a", OpAMD64TESTQ, types.TypeFlags, 0, nil, "x", "x"),
+ Goto("loop1"),
+ ),
+ Bloc("loop1",
+ Valu("y", OpAMD64MULQ, c.config.Types.Int64, 0, nil, "x", "x"),
+ Eq("a", "loop2", "exit1"),
+ ),
+ Bloc("loop2",
+ Eq("a", "loop1", "exit2"),
+ ),
+ Bloc("exit1",
+ // store after call, y must be loaded from a spill location
+ Valu("mem2", OpAMD64CALLstatic, types.TypeMem, 0, AuxCallLSym("_"), "mem"),
+ Valu("mem3", OpAMD64MOVQstore, types.TypeMem, 0, nil, "p", "y", "mem2"),
+ Exit("mem3"),
+ ),
+ Bloc("exit2",
+ // store after call, y must be loaded from a spill location
+ Valu("mem4", OpAMD64CALLstatic, types.TypeMem, 0, AuxCallLSym("_"), "mem"),
+ Valu("mem5", OpAMD64MOVQstore, types.TypeMem, 0, nil, "p", "y", "mem4"),
+ Exit("mem5"),
+ ),
+ )
+ flagalloc(f.f)
+ regalloc(f.f)
+ checkFunc(f.f)
+ // There should be a spill in loop1, and nowhere else.
+ // TODO: resurrect moving spills out of loops? We could put spills at the start of both exit1 and exit2.
+ if numSpills(f.blocks["loop1"]) != 1 {
+ t.Errorf("spill missing from loop1")
+ }
+ if numSpills(f.blocks["loop2"]) != 0 {
+ t.Errorf("spill present in loop2")
+ }
+ if numSpills(f.blocks["exit1"]) != 0 {
+ t.Errorf("spill present in exit1")
+ }
+ if numSpills(f.blocks["exit2"]) != 0 {
+ t.Errorf("spill present in exit2")
+ }
+
+}
+
+func numSpills(b *Block) int {
+ n := 0
+ for _, v := range b.Values {
+ if v.Op == OpStoreReg {
+ n++
+ }
+ }
+ return n
+}
diff --git a/src/cmd/compile/internal/ssa/rewrite.go b/src/cmd/compile/internal/ssa/rewrite.go
new file mode 100644
index 0000000..70cd4c5
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/rewrite.go
@@ -0,0 +1,1963 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/compile/internal/logopt"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/obj/s390x"
+ "cmd/internal/objabi"
+ "cmd/internal/src"
+ "encoding/binary"
+ "fmt"
+ "io"
+ "math"
+ "math/bits"
+ "os"
+ "path/filepath"
+)
+
+type deadValueChoice bool
+
+const (
+ leaveDeadValues deadValueChoice = false
+ removeDeadValues = true
+)
+
+// deadcode indicates whether rewrite should try to remove any values that become dead.
+func applyRewrite(f *Func, rb blockRewriter, rv valueRewriter, deadcode deadValueChoice) {
+ // repeat rewrites until we find no more rewrites
+ pendingLines := f.cachedLineStarts // Holds statement boundaries that need to be moved to a new value/block
+ pendingLines.clear()
+ debug := f.pass.debug
+ if debug > 1 {
+ fmt.Printf("%s: rewriting for %s\n", f.pass.name, f.Name)
+ }
+ var iters int
+ var states map[string]bool
+ for {
+ change := false
+ deadChange := false
+ for _, b := range f.Blocks {
+ var b0 *Block
+ if debug > 1 {
+ b0 = new(Block)
+ *b0 = *b
+ b0.Succs = append([]Edge{}, b.Succs...) // make a new copy, not aliasing
+ }
+ for i, c := range b.ControlValues() {
+ for c.Op == OpCopy {
+ c = c.Args[0]
+ b.ReplaceControl(i, c)
+ }
+ }
+ if rb(b) {
+ change = true
+ if debug > 1 {
+ fmt.Printf("rewriting %s -> %s\n", b0.LongString(), b.LongString())
+ }
+ }
+ for j, v := range b.Values {
+ var v0 *Value
+ if debug > 1 {
+ v0 = new(Value)
+ *v0 = *v
+ v0.Args = append([]*Value{}, v.Args...) // make a new copy, not aliasing
+ }
+ if v.Uses == 0 && v.removeable() {
+ if v.Op != OpInvalid && deadcode == removeDeadValues {
+ // Reset any values that are now unused, so that we decrement
+ // the use count of all of its arguments.
+ // Not quite a deadcode pass, because it does not handle cycles.
+ // But it should help Uses==1 rules to fire.
+ v.reset(OpInvalid)
+ deadChange = true
+ }
+ // No point rewriting values which aren't used.
+ continue
+ }
+
+ vchange := phielimValue(v)
+ if vchange && debug > 1 {
+ fmt.Printf("rewriting %s -> %s\n", v0.LongString(), v.LongString())
+ }
+
+ // Eliminate copy inputs.
+ // If any copy input becomes unused, mark it
+ // as invalid and discard its argument. Repeat
+ // recursively on the discarded argument.
+ // This phase helps remove phantom "dead copy" uses
+ // of a value so that a x.Uses==1 rule condition
+ // fires reliably.
+ for i, a := range v.Args {
+ if a.Op != OpCopy {
+ continue
+ }
+ aa := copySource(a)
+ v.SetArg(i, aa)
+ // If a, a copy, has a line boundary indicator, attempt to find a new value
+ // to hold it. The first candidate is the value that will replace a (aa),
+ // if it shares the same block and line and is eligible.
+ // The second option is v, which has a as an input. Because aa is earlier in
+ // the data flow, it is the better choice.
+ if a.Pos.IsStmt() == src.PosIsStmt {
+ if aa.Block == a.Block && aa.Pos.Line() == a.Pos.Line() && aa.Pos.IsStmt() != src.PosNotStmt {
+ aa.Pos = aa.Pos.WithIsStmt()
+ } else if v.Block == a.Block && v.Pos.Line() == a.Pos.Line() && v.Pos.IsStmt() != src.PosNotStmt {
+ v.Pos = v.Pos.WithIsStmt()
+ } else {
+ // Record the lost line and look for a new home after all rewrites are complete.
+ // TODO: it's possible (in FOR loops, in particular) for statement boundaries for the same
+ // line to appear in more than one block, but only one block is stored, so if both end
+ // up here, then one will be lost.
+ pendingLines.set(a.Pos, int32(a.Block.ID))
+ }
+ a.Pos = a.Pos.WithNotStmt()
+ }
+ vchange = true
+ for a.Uses == 0 {
+ b := a.Args[0]
+ a.reset(OpInvalid)
+ a = b
+ }
+ }
+ if vchange && debug > 1 {
+ fmt.Printf("rewriting %s -> %s\n", v0.LongString(), v.LongString())
+ }
+
+ // apply rewrite function
+ if rv(v) {
+ vchange = true
+ // If value changed to a poor choice for a statement boundary, move the boundary
+ if v.Pos.IsStmt() == src.PosIsStmt {
+ if k := nextGoodStatementIndex(v, j, b); k != j {
+ v.Pos = v.Pos.WithNotStmt()
+ b.Values[k].Pos = b.Values[k].Pos.WithIsStmt()
+ }
+ }
+ }
+
+ change = change || vchange
+ if vchange && debug > 1 {
+ fmt.Printf("rewriting %s -> %s\n", v0.LongString(), v.LongString())
+ }
+ }
+ }
+ if !change && !deadChange {
+ break
+ }
+ iters++
+ if (iters > 1000 || debug >= 2) && change {
+ // We've done a suspiciously large number of rewrites (or we're in debug mode).
+ // As of Sep 2021, 90% of rewrites complete in 4 iterations or fewer
+ // and the maximum value encountered during make.bash is 12.
+ // Start checking for cycles. (This is too expensive to do routinely.)
+ // Note: we avoid this path for deadChange-only iterations, to fix #51639.
+ if states == nil {
+ states = make(map[string]bool)
+ }
+ h := f.rewriteHash()
+ if _, ok := states[h]; ok {
+ // We've found a cycle.
+ // To diagnose it, set debug to 2 and start again,
+ // so that we'll print all rules applied until we complete another cycle.
+ // If debug is already >= 2, we've already done that, so it's time to crash.
+ if debug < 2 {
+ debug = 2
+ states = make(map[string]bool)
+ } else {
+ f.Fatalf("rewrite cycle detected")
+ }
+ }
+ states[h] = true
+ }
+ }
+ // remove clobbered values
+ for _, b := range f.Blocks {
+ j := 0
+ for i, v := range b.Values {
+ vl := v.Pos
+ if v.Op == OpInvalid {
+ if v.Pos.IsStmt() == src.PosIsStmt {
+ pendingLines.set(vl, int32(b.ID))
+ }
+ f.freeValue(v)
+ continue
+ }
+ if v.Pos.IsStmt() != src.PosNotStmt && !notStmtBoundary(v.Op) && pendingLines.get(vl) == int32(b.ID) {
+ pendingLines.remove(vl)
+ v.Pos = v.Pos.WithIsStmt()
+ }
+ if i != j {
+ b.Values[j] = v
+ }
+ j++
+ }
+ if pendingLines.get(b.Pos) == int32(b.ID) {
+ b.Pos = b.Pos.WithIsStmt()
+ pendingLines.remove(b.Pos)
+ }
+ b.truncateValues(j)
+ }
+}
+
+// Common functions called from rewriting rules
+
+func is64BitFloat(t *types.Type) bool {
+ return t.Size() == 8 && t.IsFloat()
+}
+
+func is32BitFloat(t *types.Type) bool {
+ return t.Size() == 4 && t.IsFloat()
+}
+
+func is64BitInt(t *types.Type) bool {
+ return t.Size() == 8 && t.IsInteger()
+}
+
+func is32BitInt(t *types.Type) bool {
+ return t.Size() == 4 && t.IsInteger()
+}
+
+func is16BitInt(t *types.Type) bool {
+ return t.Size() == 2 && t.IsInteger()
+}
+
+func is8BitInt(t *types.Type) bool {
+ return t.Size() == 1 && t.IsInteger()
+}
+
+func isPtr(t *types.Type) bool {
+ return t.IsPtrShaped()
+}
+
+func isSigned(t *types.Type) bool {
+ return t.IsSigned()
+}
+
+// mergeSym merges two symbolic offsets. There is no real merging of
+// offsets, we just pick the non-nil one.
+func mergeSym(x, y Sym) Sym {
+ if x == nil {
+ return y
+ }
+ if y == nil {
+ return x
+ }
+ panic(fmt.Sprintf("mergeSym with two non-nil syms %v %v", x, y))
+}
+
+func canMergeSym(x, y Sym) bool {
+ return x == nil || y == nil
+}
+
+// canMergeLoadClobber reports whether the load can be merged into target without
+// invalidating the schedule.
+// It also checks that the other non-load argument x is something we
+// are ok with clobbering.
+func canMergeLoadClobber(target, load, x *Value) bool {
+ // The register containing x is going to get clobbered.
+ // Don't merge if we still need the value of x.
+ // We don't have liveness information here, but we can
+ // approximate x dying with:
+ // 1) target is x's only use.
+ // 2) target is not in a deeper loop than x.
+ if x.Uses != 1 {
+ return false
+ }
+ loopnest := x.Block.Func.loopnest()
+ loopnest.calculateDepths()
+ if loopnest.depth(target.Block.ID) > loopnest.depth(x.Block.ID) {
+ return false
+ }
+ return canMergeLoad(target, load)
+}
+
+// canMergeLoad reports whether the load can be merged into target without
+// invalidating the schedule.
+func canMergeLoad(target, load *Value) bool {
+ if target.Block.ID != load.Block.ID {
+ // If the load is in a different block do not merge it.
+ return false
+ }
+
+ // We can't merge the load into the target if the load
+ // has more than one use.
+ if load.Uses != 1 {
+ return false
+ }
+
+ mem := load.MemoryArg()
+
+ // We need the load's memory arg to still be alive at target. That
+ // can't be the case if one of target's args depends on a memory
+ // state that is a successor of load's memory arg.
+ //
+ // For example, it would be invalid to merge load into target in
+ // the following situation because newmem has killed oldmem
+ // before target is reached:
+ // load = read ... oldmem
+ // newmem = write ... oldmem
+ // arg0 = read ... newmem
+ // target = add arg0 load
+ //
+ // If the argument comes from a different block then we can exclude
+ // it immediately because it must dominate load (which is in the
+ // same block as target).
+ var args []*Value
+ for _, a := range target.Args {
+ if a != load && a.Block.ID == target.Block.ID {
+ args = append(args, a)
+ }
+ }
+
+ // memPreds contains memory states known to be predecessors of load's
+ // memory state. It is lazily initialized.
+ var memPreds map[*Value]bool
+ for i := 0; len(args) > 0; i++ {
+ const limit = 100
+ if i >= limit {
+ // Give up if we have done a lot of iterations.
+ return false
+ }
+ v := args[len(args)-1]
+ args = args[:len(args)-1]
+ if target.Block.ID != v.Block.ID {
+ // Since target and load are in the same block
+ // we can stop searching when we leave the block.
+ continue
+ }
+ if v.Op == OpPhi {
+ // A Phi implies we have reached the top of the block.
+ // The memory phi, if it exists, is always
+ // the first logical store in the block.
+ continue
+ }
+ if v.Type.IsTuple() && v.Type.FieldType(1).IsMemory() {
+ // We could handle this situation however it is likely
+ // to be very rare.
+ return false
+ }
+ if v.Op.SymEffect()&SymAddr != 0 {
+ // This case prevents an operation that calculates the
+ // address of a local variable from being forced to schedule
+ // before its corresponding VarDef.
+ // See issue 28445.
+ // v1 = LOAD ...
+ // v2 = VARDEF
+ // v3 = LEAQ
+ // v4 = CMPQ v1 v3
+ // We don't want to combine the CMPQ with the load, because
+ // that would force the CMPQ to schedule before the VARDEF, which
+ // in turn requires the LEAQ to schedule before the VARDEF.
+ return false
+ }
+ if v.Type.IsMemory() {
+ if memPreds == nil {
+ // Initialise a map containing memory states
+ // known to be predecessors of load's memory
+ // state.
+ memPreds = make(map[*Value]bool)
+ m := mem
+ const limit = 50
+ for i := 0; i < limit; i++ {
+ if m.Op == OpPhi {
+ // The memory phi, if it exists, is always
+ // the first logical store in the block.
+ break
+ }
+ if m.Block.ID != target.Block.ID {
+ break
+ }
+ if !m.Type.IsMemory() {
+ break
+ }
+ memPreds[m] = true
+ if len(m.Args) == 0 {
+ break
+ }
+ m = m.MemoryArg()
+ }
+ }
+
+ // We can merge if v is a predecessor of mem.
+ //
+ // For example, we can merge load into target in the
+ // following scenario:
+ // x = read ... v
+ // mem = write ... v
+ // load = read ... mem
+ // target = add x load
+ if memPreds[v] {
+ continue
+ }
+ return false
+ }
+ if len(v.Args) > 0 && v.Args[len(v.Args)-1] == mem {
+ // If v takes mem as an input then we know mem
+ // is valid at this point.
+ continue
+ }
+ for _, a := range v.Args {
+ if target.Block.ID == a.Block.ID {
+ args = append(args, a)
+ }
+ }
+ }
+
+ return true
+}
+
+// isSameCall reports whether sym is the same as the given named symbol
+func isSameCall(sym interface{}, name string) bool {
+ fn := sym.(*AuxCall).Fn
+ return fn != nil && fn.String() == name
+}
+
+// canLoadUnaligned reports if the achitecture supports unaligned load operations
+func canLoadUnaligned(c *Config) bool {
+ return c.ctxt.Arch.Alignment == 1
+}
+
+// nlz returns the number of leading zeros.
+func nlz64(x int64) int { return bits.LeadingZeros64(uint64(x)) }
+func nlz32(x int32) int { return bits.LeadingZeros32(uint32(x)) }
+func nlz16(x int16) int { return bits.LeadingZeros16(uint16(x)) }
+func nlz8(x int8) int { return bits.LeadingZeros8(uint8(x)) }
+
+// ntzX returns the number of trailing zeros.
+func ntz64(x int64) int { return bits.TrailingZeros64(uint64(x)) }
+func ntz32(x int32) int { return bits.TrailingZeros32(uint32(x)) }
+func ntz16(x int16) int { return bits.TrailingZeros16(uint16(x)) }
+func ntz8(x int8) int { return bits.TrailingZeros8(uint8(x)) }
+
+func oneBit(x int64) bool { return x&(x-1) == 0 && x != 0 }
+func oneBit8(x int8) bool { return x&(x-1) == 0 && x != 0 }
+func oneBit16(x int16) bool { return x&(x-1) == 0 && x != 0 }
+func oneBit32(x int32) bool { return x&(x-1) == 0 && x != 0 }
+func oneBit64(x int64) bool { return x&(x-1) == 0 && x != 0 }
+
+// nto returns the number of trailing ones.
+func nto(x int64) int64 {
+ return int64(ntz64(^x))
+}
+
+// logX returns logarithm of n base 2.
+// n must be a positive power of 2 (isPowerOfTwoX returns true).
+func log8(n int8) int64 {
+ return int64(bits.Len8(uint8(n))) - 1
+}
+func log16(n int16) int64 {
+ return int64(bits.Len16(uint16(n))) - 1
+}
+func log32(n int32) int64 {
+ return int64(bits.Len32(uint32(n))) - 1
+}
+func log64(n int64) int64 {
+ return int64(bits.Len64(uint64(n))) - 1
+}
+
+// log2uint32 returns logarithm in base 2 of uint32(n), with log2(0) = -1.
+// Rounds down.
+func log2uint32(n int64) int64 {
+ return int64(bits.Len32(uint32(n))) - 1
+}
+
+// isPowerOfTwo functions report whether n is a power of 2.
+func isPowerOfTwo8(n int8) bool {
+ return n > 0 && n&(n-1) == 0
+}
+func isPowerOfTwo16(n int16) bool {
+ return n > 0 && n&(n-1) == 0
+}
+func isPowerOfTwo32(n int32) bool {
+ return n > 0 && n&(n-1) == 0
+}
+func isPowerOfTwo64(n int64) bool {
+ return n > 0 && n&(n-1) == 0
+}
+
+// isUint64PowerOfTwo reports whether uint64(n) is a power of 2.
+func isUint64PowerOfTwo(in int64) bool {
+ n := uint64(in)
+ return n > 0 && n&(n-1) == 0
+}
+
+// isUint32PowerOfTwo reports whether uint32(n) is a power of 2.
+func isUint32PowerOfTwo(in int64) bool {
+ n := uint64(uint32(in))
+ return n > 0 && n&(n-1) == 0
+}
+
+// is32Bit reports whether n can be represented as a signed 32 bit integer.
+func is32Bit(n int64) bool {
+ return n == int64(int32(n))
+}
+
+// is16Bit reports whether n can be represented as a signed 16 bit integer.
+func is16Bit(n int64) bool {
+ return n == int64(int16(n))
+}
+
+// is8Bit reports whether n can be represented as a signed 8 bit integer.
+func is8Bit(n int64) bool {
+ return n == int64(int8(n))
+}
+
+// isU8Bit reports whether n can be represented as an unsigned 8 bit integer.
+func isU8Bit(n int64) bool {
+ return n == int64(uint8(n))
+}
+
+// isU12Bit reports whether n can be represented as an unsigned 12 bit integer.
+func isU12Bit(n int64) bool {
+ return 0 <= n && n < (1<<12)
+}
+
+// isU16Bit reports whether n can be represented as an unsigned 16 bit integer.
+func isU16Bit(n int64) bool {
+ return n == int64(uint16(n))
+}
+
+// isU32Bit reports whether n can be represented as an unsigned 32 bit integer.
+func isU32Bit(n int64) bool {
+ return n == int64(uint32(n))
+}
+
+// is20Bit reports whether n can be represented as a signed 20 bit integer.
+func is20Bit(n int64) bool {
+ return -(1<<19) <= n && n < (1<<19)
+}
+
+// b2i translates a boolean value to 0 or 1 for assigning to auxInt.
+func b2i(b bool) int64 {
+ if b {
+ return 1
+ }
+ return 0
+}
+
+// b2i32 translates a boolean value to 0 or 1.
+func b2i32(b bool) int32 {
+ if b {
+ return 1
+ }
+ return 0
+}
+
+// shiftIsBounded reports whether (left/right) shift Value v is known to be bounded.
+// A shift is bounded if it is shifting by less than the width of the shifted value.
+func shiftIsBounded(v *Value) bool {
+ return v.AuxInt != 0
+}
+
+// canonLessThan returns whether x is "ordered" less than y, for purposes of normalizing
+// generated code as much as possible.
+func canonLessThan(x, y *Value) bool {
+ if x.Op != y.Op {
+ return x.Op < y.Op
+ }
+ if !x.Pos.SameFileAndLine(y.Pos) {
+ return x.Pos.Before(y.Pos)
+ }
+ return x.ID < y.ID
+}
+
+// truncate64Fto32F converts a float64 value to a float32 preserving the bit pattern
+// of the mantissa. It will panic if the truncation results in lost information.
+func truncate64Fto32F(f float64) float32 {
+ if !isExactFloat32(f) {
+ panic("truncate64Fto32F: truncation is not exact")
+ }
+ if !math.IsNaN(f) {
+ return float32(f)
+ }
+ // NaN bit patterns aren't necessarily preserved across conversion
+ // instructions so we need to do the conversion manually.
+ b := math.Float64bits(f)
+ m := b & ((1 << 52) - 1) // mantissa (a.k.a. significand)
+ // | sign | exponent | mantissa |
+ r := uint32(((b >> 32) & (1 << 31)) | 0x7f800000 | (m >> (52 - 23)))
+ return math.Float32frombits(r)
+}
+
+// extend32Fto64F converts a float32 value to a float64 value preserving the bit
+// pattern of the mantissa.
+func extend32Fto64F(f float32) float64 {
+ if !math.IsNaN(float64(f)) {
+ return float64(f)
+ }
+ // NaN bit patterns aren't necessarily preserved across conversion
+ // instructions so we need to do the conversion manually.
+ b := uint64(math.Float32bits(f))
+ // | sign | exponent | mantissa |
+ r := ((b << 32) & (1 << 63)) | (0x7ff << 52) | ((b & 0x7fffff) << (52 - 23))
+ return math.Float64frombits(r)
+}
+
+// DivisionNeedsFixUp reports whether the division needs fix-up code.
+func DivisionNeedsFixUp(v *Value) bool {
+ return v.AuxInt == 0
+}
+
+// auxFrom64F encodes a float64 value so it can be stored in an AuxInt.
+func auxFrom64F(f float64) int64 {
+ if f != f {
+ panic("can't encode a NaN in AuxInt field")
+ }
+ return int64(math.Float64bits(f))
+}
+
+// auxFrom32F encodes a float32 value so it can be stored in an AuxInt.
+func auxFrom32F(f float32) int64 {
+ if f != f {
+ panic("can't encode a NaN in AuxInt field")
+ }
+ return int64(math.Float64bits(extend32Fto64F(f)))
+}
+
+// auxTo32F decodes a float32 from the AuxInt value provided.
+func auxTo32F(i int64) float32 {
+ return truncate64Fto32F(math.Float64frombits(uint64(i)))
+}
+
+// auxTo64F decodes a float64 from the AuxInt value provided.
+func auxTo64F(i int64) float64 {
+ return math.Float64frombits(uint64(i))
+}
+
+func auxIntToBool(i int64) bool {
+ if i == 0 {
+ return false
+ }
+ return true
+}
+func auxIntToInt8(i int64) int8 {
+ return int8(i)
+}
+func auxIntToInt16(i int64) int16 {
+ return int16(i)
+}
+func auxIntToInt32(i int64) int32 {
+ return int32(i)
+}
+func auxIntToInt64(i int64) int64 {
+ return i
+}
+func auxIntToUint8(i int64) uint8 {
+ return uint8(i)
+}
+func auxIntToFloat32(i int64) float32 {
+ return float32(math.Float64frombits(uint64(i)))
+}
+func auxIntToFloat64(i int64) float64 {
+ return math.Float64frombits(uint64(i))
+}
+func auxIntToValAndOff(i int64) ValAndOff {
+ return ValAndOff(i)
+}
+func auxIntToArm64BitField(i int64) arm64BitField {
+ return arm64BitField(i)
+}
+func auxIntToInt128(x int64) int128 {
+ if x != 0 {
+ panic("nonzero int128 not allowed")
+ }
+ return 0
+}
+func auxIntToFlagConstant(x int64) flagConstant {
+ return flagConstant(x)
+}
+
+func auxIntToOp(cc int64) Op {
+ return Op(cc)
+}
+
+func boolToAuxInt(b bool) int64 {
+ if b {
+ return 1
+ }
+ return 0
+}
+func int8ToAuxInt(i int8) int64 {
+ return int64(i)
+}
+func int16ToAuxInt(i int16) int64 {
+ return int64(i)
+}
+func int32ToAuxInt(i int32) int64 {
+ return int64(i)
+}
+func int64ToAuxInt(i int64) int64 {
+ return int64(i)
+}
+func uint8ToAuxInt(i uint8) int64 {
+ return int64(int8(i))
+}
+func float32ToAuxInt(f float32) int64 {
+ return int64(math.Float64bits(float64(f)))
+}
+func float64ToAuxInt(f float64) int64 {
+ return int64(math.Float64bits(f))
+}
+func valAndOffToAuxInt(v ValAndOff) int64 {
+ return int64(v)
+}
+func arm64BitFieldToAuxInt(v arm64BitField) int64 {
+ return int64(v)
+}
+func int128ToAuxInt(x int128) int64 {
+ if x != 0 {
+ panic("nonzero int128 not allowed")
+ }
+ return 0
+}
+func flagConstantToAuxInt(x flagConstant) int64 {
+ return int64(x)
+}
+
+func opToAuxInt(o Op) int64 {
+ return int64(o)
+}
+
+// Aux is an interface to hold miscellaneous data in Blocks and Values.
+type Aux interface {
+ CanBeAnSSAAux()
+}
+
+// stringAux wraps string values for use in Aux.
+type stringAux string
+
+func (stringAux) CanBeAnSSAAux() {}
+
+func auxToString(i Aux) string {
+ return string(i.(stringAux))
+}
+func auxToSym(i Aux) Sym {
+ // TODO: kind of a hack - allows nil interface through
+ s, _ := i.(Sym)
+ return s
+}
+func auxToType(i Aux) *types.Type {
+ return i.(*types.Type)
+}
+func auxToCall(i Aux) *AuxCall {
+ return i.(*AuxCall)
+}
+func auxToS390xCCMask(i Aux) s390x.CCMask {
+ return i.(s390x.CCMask)
+}
+func auxToS390xRotateParams(i Aux) s390x.RotateParams {
+ return i.(s390x.RotateParams)
+}
+
+func StringToAux(s string) Aux {
+ return stringAux(s)
+}
+func symToAux(s Sym) Aux {
+ return s
+}
+func callToAux(s *AuxCall) Aux {
+ return s
+}
+func typeToAux(t *types.Type) Aux {
+ return t
+}
+func s390xCCMaskToAux(c s390x.CCMask) Aux {
+ return c
+}
+func s390xRotateParamsToAux(r s390x.RotateParams) Aux {
+ return r
+}
+
+// uaddOvf reports whether unsigned a+b would overflow.
+func uaddOvf(a, b int64) bool {
+ return uint64(a)+uint64(b) < uint64(a)
+}
+
+// loadLSymOffset simulates reading a word at an offset into a
+// read-only symbol's runtime memory. If it would read a pointer to
+// another symbol, that symbol is returned. Otherwise, it returns nil.
+func loadLSymOffset(lsym *obj.LSym, offset int64) *obj.LSym {
+ if lsym.Type != objabi.SRODATA {
+ return nil
+ }
+
+ for _, r := range lsym.R {
+ if int64(r.Off) == offset && r.Type&^objabi.R_WEAK == objabi.R_ADDR && r.Add == 0 {
+ return r.Sym
+ }
+ }
+
+ return nil
+}
+
+// de-virtualize an InterLECall
+// 'sym' is the symbol for the itab
+func devirtLESym(v *Value, aux Aux, sym Sym, offset int64) *obj.LSym {
+ n, ok := sym.(*obj.LSym)
+ if !ok {
+ return nil
+ }
+
+ lsym := loadLSymOffset(n, offset)
+ if f := v.Block.Func; f.pass.debug > 0 {
+ if lsym != nil {
+ f.Warnl(v.Pos, "de-virtualizing call")
+ } else {
+ f.Warnl(v.Pos, "couldn't de-virtualize call")
+ }
+ }
+ return lsym
+}
+
+func devirtLECall(v *Value, sym *obj.LSym) *Value {
+ v.Op = OpStaticLECall
+ auxcall := v.Aux.(*AuxCall)
+ auxcall.Fn = sym
+ // Remove first arg
+ v.Args[0].Uses--
+ copy(v.Args[0:], v.Args[1:])
+ v.Args[len(v.Args)-1] = nil // aid GC
+ v.Args = v.Args[:len(v.Args)-1]
+ return v
+}
+
+// isSamePtr reports whether p1 and p2 point to the same address.
+func isSamePtr(p1, p2 *Value) bool {
+ if p1 == p2 {
+ return true
+ }
+ if p1.Op != p2.Op {
+ return false
+ }
+ switch p1.Op {
+ case OpOffPtr:
+ return p1.AuxInt == p2.AuxInt && isSamePtr(p1.Args[0], p2.Args[0])
+ case OpAddr, OpLocalAddr:
+ // OpAddr's 0th arg is either OpSP or OpSB, which means that it is uniquely identified by its Op.
+ // Checking for value equality only works after [z]cse has run.
+ return p1.Aux == p2.Aux && p1.Args[0].Op == p2.Args[0].Op
+ case OpAddPtr:
+ return p1.Args[1] == p2.Args[1] && isSamePtr(p1.Args[0], p2.Args[0])
+ }
+ return false
+}
+
+func isStackPtr(v *Value) bool {
+ for v.Op == OpOffPtr || v.Op == OpAddPtr {
+ v = v.Args[0]
+ }
+ return v.Op == OpSP || v.Op == OpLocalAddr
+}
+
+// disjoint reports whether the memory region specified by [p1:p1+n1)
+// does not overlap with [p2:p2+n2).
+// A return value of false does not imply the regions overlap.
+func disjoint(p1 *Value, n1 int64, p2 *Value, n2 int64) bool {
+ if n1 == 0 || n2 == 0 {
+ return true
+ }
+ if p1 == p2 {
+ return false
+ }
+ baseAndOffset := func(ptr *Value) (base *Value, offset int64) {
+ base, offset = ptr, 0
+ for base.Op == OpOffPtr {
+ offset += base.AuxInt
+ base = base.Args[0]
+ }
+ return base, offset
+ }
+ p1, off1 := baseAndOffset(p1)
+ p2, off2 := baseAndOffset(p2)
+ if isSamePtr(p1, p2) {
+ return !overlap(off1, n1, off2, n2)
+ }
+ // p1 and p2 are not the same, so if they are both OpAddrs then
+ // they point to different variables.
+ // If one pointer is on the stack and the other is an argument
+ // then they can't overlap.
+ switch p1.Op {
+ case OpAddr, OpLocalAddr:
+ if p2.Op == OpAddr || p2.Op == OpLocalAddr || p2.Op == OpSP {
+ return true
+ }
+ return (p2.Op == OpArg || p2.Op == OpArgIntReg) && p1.Args[0].Op == OpSP
+ case OpArg, OpArgIntReg:
+ if p2.Op == OpSP || p2.Op == OpLocalAddr {
+ return true
+ }
+ case OpSP:
+ return p2.Op == OpAddr || p2.Op == OpLocalAddr || p2.Op == OpArg || p2.Op == OpArgIntReg || p2.Op == OpSP
+ }
+ return false
+}
+
+// moveSize returns the number of bytes an aligned MOV instruction moves
+func moveSize(align int64, c *Config) int64 {
+ switch {
+ case align%8 == 0 && c.PtrSize == 8:
+ return 8
+ case align%4 == 0:
+ return 4
+ case align%2 == 0:
+ return 2
+ }
+ return 1
+}
+
+// mergePoint finds a block among a's blocks which dominates b and is itself
+// dominated by all of a's blocks. Returns nil if it can't find one.
+// Might return nil even if one does exist.
+func mergePoint(b *Block, a ...*Value) *Block {
+ // Walk backward from b looking for one of the a's blocks.
+
+ // Max distance
+ d := 100
+
+ for d > 0 {
+ for _, x := range a {
+ if b == x.Block {
+ goto found
+ }
+ }
+ if len(b.Preds) > 1 {
+ // Don't know which way to go back. Abort.
+ return nil
+ }
+ b = b.Preds[0].b
+ d--
+ }
+ return nil // too far away
+found:
+ // At this point, r is the first value in a that we find by walking backwards.
+ // if we return anything, r will be it.
+ r := b
+
+ // Keep going, counting the other a's that we find. They must all dominate r.
+ na := 0
+ for d > 0 {
+ for _, x := range a {
+ if b == x.Block {
+ na++
+ }
+ }
+ if na == len(a) {
+ // Found all of a in a backwards walk. We can return r.
+ return r
+ }
+ if len(b.Preds) > 1 {
+ return nil
+ }
+ b = b.Preds[0].b
+ d--
+
+ }
+ return nil // too far away
+}
+
+// clobber invalidates values. Returns true.
+// clobber is used by rewrite rules to:
+// A) make sure the values are really dead and never used again.
+// B) decrement use counts of the values' args.
+func clobber(vv ...*Value) bool {
+ for _, v := range vv {
+ v.reset(OpInvalid)
+ // Note: leave v.Block intact. The Block field is used after clobber.
+ }
+ return true
+}
+
+// clobberIfDead resets v when use count is 1. Returns true.
+// clobberIfDead is used by rewrite rules to decrement
+// use counts of v's args when v is dead and never used.
+func clobberIfDead(v *Value) bool {
+ if v.Uses == 1 {
+ v.reset(OpInvalid)
+ }
+ // Note: leave v.Block intact. The Block field is used after clobberIfDead.
+ return true
+}
+
+// noteRule is an easy way to track if a rule is matched when writing
+// new ones. Make the rule of interest also conditional on
+// noteRule("note to self: rule of interest matched")
+// and that message will print when the rule matches.
+func noteRule(s string) bool {
+ fmt.Println(s)
+ return true
+}
+
+// countRule increments Func.ruleMatches[key].
+// If Func.ruleMatches is non-nil at the end
+// of compilation, it will be printed to stdout.
+// This is intended to make it easier to find which functions
+// which contain lots of rules matches when developing new rules.
+func countRule(v *Value, key string) bool {
+ f := v.Block.Func
+ if f.ruleMatches == nil {
+ f.ruleMatches = make(map[string]int)
+ }
+ f.ruleMatches[key]++
+ return true
+}
+
+// warnRule generates compiler debug output with string s when
+// v is not in autogenerated code, cond is true and the rule has fired.
+func warnRule(cond bool, v *Value, s string) bool {
+ if pos := v.Pos; pos.Line() > 1 && cond {
+ v.Block.Func.Warnl(pos, s)
+ }
+ return true
+}
+
+// for a pseudo-op like (LessThan x), extract x
+func flagArg(v *Value) *Value {
+ if len(v.Args) != 1 || !v.Args[0].Type.IsFlags() {
+ return nil
+ }
+ return v.Args[0]
+}
+
+// arm64Negate finds the complement to an ARM64 condition code,
+// for example !Equal -> NotEqual or !LessThan -> GreaterEqual
+//
+// For floating point, it's more subtle because NaN is unordered. We do
+// !LessThanF -> NotLessThanF, the latter takes care of NaNs.
+func arm64Negate(op Op) Op {
+ switch op {
+ case OpARM64LessThan:
+ return OpARM64GreaterEqual
+ case OpARM64LessThanU:
+ return OpARM64GreaterEqualU
+ case OpARM64GreaterThan:
+ return OpARM64LessEqual
+ case OpARM64GreaterThanU:
+ return OpARM64LessEqualU
+ case OpARM64LessEqual:
+ return OpARM64GreaterThan
+ case OpARM64LessEqualU:
+ return OpARM64GreaterThanU
+ case OpARM64GreaterEqual:
+ return OpARM64LessThan
+ case OpARM64GreaterEqualU:
+ return OpARM64LessThanU
+ case OpARM64Equal:
+ return OpARM64NotEqual
+ case OpARM64NotEqual:
+ return OpARM64Equal
+ case OpARM64LessThanF:
+ return OpARM64NotLessThanF
+ case OpARM64NotLessThanF:
+ return OpARM64LessThanF
+ case OpARM64LessEqualF:
+ return OpARM64NotLessEqualF
+ case OpARM64NotLessEqualF:
+ return OpARM64LessEqualF
+ case OpARM64GreaterThanF:
+ return OpARM64NotGreaterThanF
+ case OpARM64NotGreaterThanF:
+ return OpARM64GreaterThanF
+ case OpARM64GreaterEqualF:
+ return OpARM64NotGreaterEqualF
+ case OpARM64NotGreaterEqualF:
+ return OpARM64GreaterEqualF
+ default:
+ panic("unreachable")
+ }
+}
+
+// arm64Invert evaluates (InvertFlags op), which
+// is the same as altering the condition codes such
+// that the same result would be produced if the arguments
+// to the flag-generating instruction were reversed, e.g.
+// (InvertFlags (CMP x y)) -> (CMP y x)
+func arm64Invert(op Op) Op {
+ switch op {
+ case OpARM64LessThan:
+ return OpARM64GreaterThan
+ case OpARM64LessThanU:
+ return OpARM64GreaterThanU
+ case OpARM64GreaterThan:
+ return OpARM64LessThan
+ case OpARM64GreaterThanU:
+ return OpARM64LessThanU
+ case OpARM64LessEqual:
+ return OpARM64GreaterEqual
+ case OpARM64LessEqualU:
+ return OpARM64GreaterEqualU
+ case OpARM64GreaterEqual:
+ return OpARM64LessEqual
+ case OpARM64GreaterEqualU:
+ return OpARM64LessEqualU
+ case OpARM64Equal, OpARM64NotEqual:
+ return op
+ case OpARM64LessThanF:
+ return OpARM64GreaterThanF
+ case OpARM64GreaterThanF:
+ return OpARM64LessThanF
+ case OpARM64LessEqualF:
+ return OpARM64GreaterEqualF
+ case OpARM64GreaterEqualF:
+ return OpARM64LessEqualF
+ case OpARM64NotLessThanF:
+ return OpARM64NotGreaterThanF
+ case OpARM64NotGreaterThanF:
+ return OpARM64NotLessThanF
+ case OpARM64NotLessEqualF:
+ return OpARM64NotGreaterEqualF
+ case OpARM64NotGreaterEqualF:
+ return OpARM64NotLessEqualF
+ default:
+ panic("unreachable")
+ }
+}
+
+// evaluate an ARM64 op against a flags value
+// that is potentially constant; return 1 for true,
+// -1 for false, and 0 for not constant.
+func ccARM64Eval(op Op, flags *Value) int {
+ fop := flags.Op
+ if fop == OpARM64InvertFlags {
+ return -ccARM64Eval(op, flags.Args[0])
+ }
+ if fop != OpARM64FlagConstant {
+ return 0
+ }
+ fc := flagConstant(flags.AuxInt)
+ b2i := func(b bool) int {
+ if b {
+ return 1
+ }
+ return -1
+ }
+ switch op {
+ case OpARM64Equal:
+ return b2i(fc.eq())
+ case OpARM64NotEqual:
+ return b2i(fc.ne())
+ case OpARM64LessThan:
+ return b2i(fc.lt())
+ case OpARM64LessThanU:
+ return b2i(fc.ult())
+ case OpARM64GreaterThan:
+ return b2i(fc.gt())
+ case OpARM64GreaterThanU:
+ return b2i(fc.ugt())
+ case OpARM64LessEqual:
+ return b2i(fc.le())
+ case OpARM64LessEqualU:
+ return b2i(fc.ule())
+ case OpARM64GreaterEqual:
+ return b2i(fc.ge())
+ case OpARM64GreaterEqualU:
+ return b2i(fc.uge())
+ }
+ return 0
+}
+
+// logRule logs the use of the rule s. This will only be enabled if
+// rewrite rules were generated with the -log option, see gen/rulegen.go.
+func logRule(s string) {
+ if ruleFile == nil {
+ // Open a log file to write log to. We open in append
+ // mode because all.bash runs the compiler lots of times,
+ // and we want the concatenation of all of those logs.
+ // This means, of course, that users need to rm the old log
+ // to get fresh data.
+ // TODO: all.bash runs compilers in parallel. Need to synchronize logging somehow?
+ w, err := os.OpenFile(filepath.Join(os.Getenv("GOROOT"), "src", "rulelog"),
+ os.O_CREATE|os.O_WRONLY|os.O_APPEND, 0666)
+ if err != nil {
+ panic(err)
+ }
+ ruleFile = w
+ }
+ _, err := fmt.Fprintln(ruleFile, s)
+ if err != nil {
+ panic(err)
+ }
+}
+
+var ruleFile io.Writer
+
+func min(x, y int64) int64 {
+ if x < y {
+ return x
+ }
+ return y
+}
+
+func isConstZero(v *Value) bool {
+ switch v.Op {
+ case OpConstNil:
+ return true
+ case OpConst64, OpConst32, OpConst16, OpConst8, OpConstBool, OpConst32F, OpConst64F:
+ return v.AuxInt == 0
+ }
+ return false
+}
+
+// reciprocalExact64 reports whether 1/c is exactly representable.
+func reciprocalExact64(c float64) bool {
+ b := math.Float64bits(c)
+ man := b & (1<<52 - 1)
+ if man != 0 {
+ return false // not a power of 2, denormal, or NaN
+ }
+ exp := b >> 52 & (1<<11 - 1)
+ // exponent bias is 0x3ff. So taking the reciprocal of a number
+ // changes the exponent to 0x7fe-exp.
+ switch exp {
+ case 0:
+ return false // ±0
+ case 0x7ff:
+ return false // ±inf
+ case 0x7fe:
+ return false // exponent is not representable
+ default:
+ return true
+ }
+}
+
+// reciprocalExact32 reports whether 1/c is exactly representable.
+func reciprocalExact32(c float32) bool {
+ b := math.Float32bits(c)
+ man := b & (1<<23 - 1)
+ if man != 0 {
+ return false // not a power of 2, denormal, or NaN
+ }
+ exp := b >> 23 & (1<<8 - 1)
+ // exponent bias is 0x7f. So taking the reciprocal of a number
+ // changes the exponent to 0xfe-exp.
+ switch exp {
+ case 0:
+ return false // ±0
+ case 0xff:
+ return false // ±inf
+ case 0xfe:
+ return false // exponent is not representable
+ default:
+ return true
+ }
+}
+
+// check if an immediate can be directly encoded into an ARM's instruction
+func isARMImmRot(v uint32) bool {
+ for i := 0; i < 16; i++ {
+ if v&^0xff == 0 {
+ return true
+ }
+ v = v<<2 | v>>30
+ }
+
+ return false
+}
+
+// overlap reports whether the ranges given by the given offset and
+// size pairs overlap.
+func overlap(offset1, size1, offset2, size2 int64) bool {
+ if offset1 >= offset2 && offset2+size2 > offset1 {
+ return true
+ }
+ if offset2 >= offset1 && offset1+size1 > offset2 {
+ return true
+ }
+ return false
+}
+
+func areAdjacentOffsets(off1, off2, size int64) bool {
+ return off1+size == off2 || off1 == off2+size
+}
+
+// check if value zeroes out upper 32-bit of 64-bit register.
+// depth limits recursion depth. In AMD64.rules 3 is used as limit,
+// because it catches same amount of cases as 4.
+func zeroUpper32Bits(x *Value, depth int) bool {
+ switch x.Op {
+ case OpAMD64MOVLconst, OpAMD64MOVLload, OpAMD64MOVLQZX, OpAMD64MOVLloadidx1,
+ OpAMD64MOVWload, OpAMD64MOVWloadidx1, OpAMD64MOVBload, OpAMD64MOVBloadidx1,
+ OpAMD64MOVLloadidx4, OpAMD64ADDLload, OpAMD64SUBLload, OpAMD64ANDLload,
+ OpAMD64ORLload, OpAMD64XORLload, OpAMD64CVTTSD2SL,
+ OpAMD64ADDL, OpAMD64ADDLconst, OpAMD64SUBL, OpAMD64SUBLconst,
+ OpAMD64ANDL, OpAMD64ANDLconst, OpAMD64ORL, OpAMD64ORLconst,
+ OpAMD64XORL, OpAMD64XORLconst, OpAMD64NEGL, OpAMD64NOTL,
+ OpAMD64SHRL, OpAMD64SHRLconst, OpAMD64SARL, OpAMD64SARLconst,
+ OpAMD64SHLL, OpAMD64SHLLconst:
+ return true
+ case OpArg:
+ return x.Type.Size() == 4
+ case OpPhi, OpSelect0, OpSelect1:
+ // Phis can use each-other as an arguments, instead of tracking visited values,
+ // just limit recursion depth.
+ if depth <= 0 {
+ return false
+ }
+ for i := range x.Args {
+ if !zeroUpper32Bits(x.Args[i], depth-1) {
+ return false
+ }
+ }
+ return true
+
+ }
+ return false
+}
+
+// zeroUpper48Bits is similar to zeroUpper32Bits, but for upper 48 bits
+func zeroUpper48Bits(x *Value, depth int) bool {
+ switch x.Op {
+ case OpAMD64MOVWQZX, OpAMD64MOVWload, OpAMD64MOVWloadidx1, OpAMD64MOVWloadidx2:
+ return true
+ case OpArg:
+ return x.Type.Size() == 2
+ case OpPhi, OpSelect0, OpSelect1:
+ // Phis can use each-other as an arguments, instead of tracking visited values,
+ // just limit recursion depth.
+ if depth <= 0 {
+ return false
+ }
+ for i := range x.Args {
+ if !zeroUpper48Bits(x.Args[i], depth-1) {
+ return false
+ }
+ }
+ return true
+
+ }
+ return false
+}
+
+// zeroUpper56Bits is similar to zeroUpper32Bits, but for upper 56 bits
+func zeroUpper56Bits(x *Value, depth int) bool {
+ switch x.Op {
+ case OpAMD64MOVBQZX, OpAMD64MOVBload, OpAMD64MOVBloadidx1:
+ return true
+ case OpArg:
+ return x.Type.Size() == 1
+ case OpPhi, OpSelect0, OpSelect1:
+ // Phis can use each-other as an arguments, instead of tracking visited values,
+ // just limit recursion depth.
+ if depth <= 0 {
+ return false
+ }
+ for i := range x.Args {
+ if !zeroUpper56Bits(x.Args[i], depth-1) {
+ return false
+ }
+ }
+ return true
+
+ }
+ return false
+}
+
+// isInlinableMemmove reports whether the given arch performs a Move of the given size
+// faster than memmove. It will only return true if replacing the memmove with a Move is
+// safe, either because Move will do all of its loads before any of its stores, or
+// because the arguments are known to be disjoint.
+// This is used as a check for replacing memmove with Move ops.
+func isInlinableMemmove(dst, src *Value, sz int64, c *Config) bool {
+ // It is always safe to convert memmove into Move when its arguments are disjoint.
+ // Move ops may or may not be faster for large sizes depending on how the platform
+ // lowers them, so we only perform this optimization on platforms that we know to
+ // have fast Move ops.
+ switch c.arch {
+ case "amd64":
+ return sz <= 16 || (sz < 1024 && disjoint(dst, sz, src, sz))
+ case "386", "arm64":
+ return sz <= 8
+ case "s390x", "ppc64", "ppc64le":
+ return sz <= 8 || disjoint(dst, sz, src, sz)
+ case "arm", "mips", "mips64", "mipsle", "mips64le":
+ return sz <= 4
+ }
+ return false
+}
+func IsInlinableMemmove(dst, src *Value, sz int64, c *Config) bool {
+ return isInlinableMemmove(dst, src, sz, c)
+}
+
+// logLargeCopy logs the occurrence of a large copy.
+// The best place to do this is in the rewrite rules where the size of the move is easy to find.
+// "Large" is arbitrarily chosen to be 128 bytes; this may change.
+func logLargeCopy(v *Value, s int64) bool {
+ if s < 128 {
+ return true
+ }
+ if logopt.Enabled() {
+ logopt.LogOpt(v.Pos, "copy", "lower", v.Block.Func.Name, fmt.Sprintf("%d bytes", s))
+ }
+ return true
+}
+func LogLargeCopy(funcName string, pos src.XPos, s int64) {
+ if s < 128 {
+ return
+ }
+ if logopt.Enabled() {
+ logopt.LogOpt(pos, "copy", "lower", funcName, fmt.Sprintf("%d bytes", s))
+ }
+}
+
+// hasSmallRotate reports whether the architecture has rotate instructions
+// for sizes < 32-bit. This is used to decide whether to promote some rotations.
+func hasSmallRotate(c *Config) bool {
+ switch c.arch {
+ case "amd64", "386":
+ return true
+ default:
+ return false
+ }
+}
+
+func newPPC64ShiftAuxInt(sh, mb, me, sz int64) int32 {
+ if sh < 0 || sh >= sz {
+ panic("PPC64 shift arg sh out of range")
+ }
+ if mb < 0 || mb >= sz {
+ panic("PPC64 shift arg mb out of range")
+ }
+ if me < 0 || me >= sz {
+ panic("PPC64 shift arg me out of range")
+ }
+ return int32(sh<<16 | mb<<8 | me)
+}
+
+func GetPPC64Shiftsh(auxint int64) int64 {
+ return int64(int8(auxint >> 16))
+}
+
+func GetPPC64Shiftmb(auxint int64) int64 {
+ return int64(int8(auxint >> 8))
+}
+
+func GetPPC64Shiftme(auxint int64) int64 {
+ return int64(int8(auxint))
+}
+
+// Test if this value can encoded as a mask for a rlwinm like
+// operation. Masks can also extend from the msb and wrap to
+// the lsb too. That is, the valid masks are 32 bit strings
+// of the form: 0..01..10..0 or 1..10..01..1 or 1...1
+func isPPC64WordRotateMask(v64 int64) bool {
+ // Isolate rightmost 1 (if none 0) and add.
+ v := uint32(v64)
+ vp := (v & -v) + v
+ // Likewise, for the wrapping case.
+ vn := ^v
+ vpn := (vn & -vn) + vn
+ return (v&vp == 0 || vn&vpn == 0) && v != 0
+}
+
+// Compress mask and shift into single value of the form
+// me | mb<<8 | rotate<<16 | nbits<<24 where me and mb can
+// be used to regenerate the input mask.
+func encodePPC64RotateMask(rotate, mask, nbits int64) int64 {
+ var mb, me, mbn, men int
+
+ // Determine boundaries and then decode them
+ if mask == 0 || ^mask == 0 || rotate >= nbits {
+ panic("Invalid PPC64 rotate mask")
+ } else if nbits == 32 {
+ mb = bits.LeadingZeros32(uint32(mask))
+ me = 32 - bits.TrailingZeros32(uint32(mask))
+ mbn = bits.LeadingZeros32(^uint32(mask))
+ men = 32 - bits.TrailingZeros32(^uint32(mask))
+ } else {
+ mb = bits.LeadingZeros64(uint64(mask))
+ me = 64 - bits.TrailingZeros64(uint64(mask))
+ mbn = bits.LeadingZeros64(^uint64(mask))
+ men = 64 - bits.TrailingZeros64(^uint64(mask))
+ }
+ // Check for a wrapping mask (e.g bits at 0 and 63)
+ if mb == 0 && me == int(nbits) {
+ // swap the inverted values
+ mb, me = men, mbn
+ }
+
+ return int64(me) | int64(mb<<8) | int64(rotate<<16) | int64(nbits<<24)
+}
+
+// The inverse operation of encodePPC64RotateMask. The values returned as
+// mb and me satisfy the POWER ISA definition of MASK(x,y) where MASK(mb,me) = mask.
+func DecodePPC64RotateMask(sauxint int64) (rotate, mb, me int64, mask uint64) {
+ auxint := uint64(sauxint)
+ rotate = int64((auxint >> 16) & 0xFF)
+ mb = int64((auxint >> 8) & 0xFF)
+ me = int64((auxint >> 0) & 0xFF)
+ nbits := int64((auxint >> 24) & 0xFF)
+ mask = ((1 << uint(nbits-mb)) - 1) ^ ((1 << uint(nbits-me)) - 1)
+ if mb > me {
+ mask = ^mask
+ }
+ if nbits == 32 {
+ mask = uint64(uint32(mask))
+ }
+
+ // Fixup ME to match ISA definition. The second argument to MASK(..,me)
+ // is inclusive.
+ me = (me - 1) & (nbits - 1)
+ return
+}
+
+// This verifies that the mask is a set of
+// consecutive bits including the least
+// significant bit.
+func isPPC64ValidShiftMask(v int64) bool {
+ if (v != 0) && ((v+1)&v) == 0 {
+ return true
+ }
+ return false
+}
+
+func getPPC64ShiftMaskLength(v int64) int64 {
+ return int64(bits.Len64(uint64(v)))
+}
+
+// Decompose a shift right into an equivalent rotate/mask,
+// and return mask & m.
+func mergePPC64RShiftMask(m, s, nbits int64) int64 {
+ smask := uint64((1<<uint(nbits))-1) >> uint(s)
+ return m & int64(smask)
+}
+
+// Combine (ANDconst [m] (SRWconst [s])) into (RLWINM [y]) or return 0
+func mergePPC64AndSrwi(m, s int64) int64 {
+ mask := mergePPC64RShiftMask(m, s, 32)
+ if !isPPC64WordRotateMask(mask) {
+ return 0
+ }
+ return encodePPC64RotateMask((32-s)&31, mask, 32)
+}
+
+// Test if a shift right feeding into a CLRLSLDI can be merged into RLWINM.
+// Return the encoded RLWINM constant, or 0 if they cannot be merged.
+func mergePPC64ClrlsldiSrw(sld, srw int64) int64 {
+ mask_1 := uint64(0xFFFFFFFF >> uint(srw))
+ // for CLRLSLDI, it's more convient to think of it as a mask left bits then rotate left.
+ mask_2 := uint64(0xFFFFFFFFFFFFFFFF) >> uint(GetPPC64Shiftmb(int64(sld)))
+
+ // Rewrite mask to apply after the final left shift.
+ mask_3 := (mask_1 & mask_2) << uint(GetPPC64Shiftsh(sld))
+
+ r_1 := 32 - srw
+ r_2 := GetPPC64Shiftsh(sld)
+ r_3 := (r_1 + r_2) & 31 // This can wrap.
+
+ if uint64(uint32(mask_3)) != mask_3 || mask_3 == 0 {
+ return 0
+ }
+ return encodePPC64RotateMask(int64(r_3), int64(mask_3), 32)
+}
+
+// Test if a RLWINM feeding into a CLRLSLDI can be merged into RLWINM. Return
+// the encoded RLWINM constant, or 0 if they cannot be merged.
+func mergePPC64ClrlsldiRlwinm(sld int32, rlw int64) int64 {
+ r_1, _, _, mask_1 := DecodePPC64RotateMask(rlw)
+ // for CLRLSLDI, it's more convient to think of it as a mask left bits then rotate left.
+ mask_2 := uint64(0xFFFFFFFFFFFFFFFF) >> uint(GetPPC64Shiftmb(int64(sld)))
+
+ // combine the masks, and adjust for the final left shift.
+ mask_3 := (mask_1 & mask_2) << uint(GetPPC64Shiftsh(int64(sld)))
+ r_2 := GetPPC64Shiftsh(int64(sld))
+ r_3 := (r_1 + r_2) & 31 // This can wrap.
+
+ // Verify the result is still a valid bitmask of <= 32 bits.
+ if !isPPC64WordRotateMask(int64(mask_3)) || uint64(uint32(mask_3)) != mask_3 {
+ return 0
+ }
+ return encodePPC64RotateMask(r_3, int64(mask_3), 32)
+}
+
+// Compute the encoded RLWINM constant from combining (SLDconst [sld] (SRWconst [srw] x)),
+// or return 0 if they cannot be combined.
+func mergePPC64SldiSrw(sld, srw int64) int64 {
+ if sld > srw || srw >= 32 {
+ return 0
+ }
+ mask_r := uint32(0xFFFFFFFF) >> uint(srw)
+ mask_l := uint32(0xFFFFFFFF) >> uint(sld)
+ mask := (mask_r & mask_l) << uint(sld)
+ return encodePPC64RotateMask((32-srw+sld)&31, int64(mask), 32)
+}
+
+// Convenience function to rotate a 32 bit constant value by another constant.
+func rotateLeft32(v, rotate int64) int64 {
+ return int64(bits.RotateLeft32(uint32(v), int(rotate)))
+}
+
+func rotateRight64(v, rotate int64) int64 {
+ return int64(bits.RotateLeft64(uint64(v), int(-rotate)))
+}
+
+// encodes the lsb and width for arm(64) bitfield ops into the expected auxInt format.
+func armBFAuxInt(lsb, width int64) arm64BitField {
+ if lsb < 0 || lsb > 63 {
+ panic("ARM(64) bit field lsb constant out of range")
+ }
+ if width < 1 || lsb+width > 64 {
+ panic("ARM(64) bit field width constant out of range")
+ }
+ return arm64BitField(width | lsb<<8)
+}
+
+// returns the lsb part of the auxInt field of arm64 bitfield ops.
+func (bfc arm64BitField) getARM64BFlsb() int64 {
+ return int64(uint64(bfc) >> 8)
+}
+
+// returns the width part of the auxInt field of arm64 bitfield ops.
+func (bfc arm64BitField) getARM64BFwidth() int64 {
+ return int64(bfc) & 0xff
+}
+
+// checks if mask >> rshift applied at lsb is a valid arm64 bitfield op mask.
+func isARM64BFMask(lsb, mask, rshift int64) bool {
+ shiftedMask := int64(uint64(mask) >> uint64(rshift))
+ return shiftedMask != 0 && isPowerOfTwo64(shiftedMask+1) && nto(shiftedMask)+lsb < 64
+}
+
+// returns the bitfield width of mask >> rshift for arm64 bitfield ops
+func arm64BFWidth(mask, rshift int64) int64 {
+ shiftedMask := int64(uint64(mask) >> uint64(rshift))
+ if shiftedMask == 0 {
+ panic("ARM64 BF mask is zero")
+ }
+ return nto(shiftedMask)
+}
+
+// sizeof returns the size of t in bytes.
+// It will panic if t is not a *types.Type.
+func sizeof(t interface{}) int64 {
+ return t.(*types.Type).Size()
+}
+
+// registerizable reports whether t is a primitive type that fits in
+// a register. It assumes float64 values will always fit into registers
+// even if that isn't strictly true.
+func registerizable(b *Block, typ *types.Type) bool {
+ if typ.IsPtrShaped() || typ.IsFloat() {
+ return true
+ }
+ if typ.IsInteger() {
+ return typ.Size() <= b.Func.Config.RegSize
+ }
+ return false
+}
+
+// needRaceCleanup reports whether this call to racefuncenter/exit isn't needed.
+func needRaceCleanup(sym *AuxCall, v *Value) bool {
+ f := v.Block.Func
+ if !f.Config.Race {
+ return false
+ }
+ if !isSameCall(sym, "runtime.racefuncenter") && !isSameCall(sym, "runtime.racefuncexit") {
+ return false
+ }
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ switch v.Op {
+ case OpStaticCall, OpStaticLECall:
+ // Check for racefuncenter will encounter racefuncexit and vice versa.
+ // Allow calls to panic*
+ s := v.Aux.(*AuxCall).Fn.String()
+ switch s {
+ case "runtime.racefuncenter", "runtime.racefuncexit",
+ "runtime.panicdivide", "runtime.panicwrap",
+ "runtime.panicshift":
+ continue
+ }
+ // If we encountered any call, we need to keep racefunc*,
+ // for accurate stacktraces.
+ return false
+ case OpPanicBounds, OpPanicExtend:
+ // Note: these are panic generators that are ok (like the static calls above).
+ case OpClosureCall, OpInterCall, OpClosureLECall, OpInterLECall:
+ // We must keep the race functions if there are any other call types.
+ return false
+ }
+ }
+ }
+ if isSameCall(sym, "runtime.racefuncenter") {
+ // TODO REGISTER ABI this needs to be cleaned up.
+ // If we're removing racefuncenter, remove its argument as well.
+ if v.Args[0].Op != OpStore {
+ if v.Op == OpStaticLECall {
+ // there is no store, yet.
+ return true
+ }
+ return false
+ }
+ mem := v.Args[0].Args[2]
+ v.Args[0].reset(OpCopy)
+ v.Args[0].AddArg(mem)
+ }
+ return true
+}
+
+// symIsRO reports whether sym is a read-only global.
+func symIsRO(sym interface{}) bool {
+ lsym := sym.(*obj.LSym)
+ return lsym.Type == objabi.SRODATA && len(lsym.R) == 0
+}
+
+// symIsROZero reports whether sym is a read-only global whose data contains all zeros.
+func symIsROZero(sym Sym) bool {
+ lsym := sym.(*obj.LSym)
+ if lsym.Type != objabi.SRODATA || len(lsym.R) != 0 {
+ return false
+ }
+ for _, b := range lsym.P {
+ if b != 0 {
+ return false
+ }
+ }
+ return true
+}
+
+// read8 reads one byte from the read-only global sym at offset off.
+func read8(sym interface{}, off int64) uint8 {
+ lsym := sym.(*obj.LSym)
+ if off >= int64(len(lsym.P)) || off < 0 {
+ // Invalid index into the global sym.
+ // This can happen in dead code, so we don't want to panic.
+ // Just return any value, it will eventually get ignored.
+ // See issue 29215.
+ return 0
+ }
+ return lsym.P[off]
+}
+
+// read16 reads two bytes from the read-only global sym at offset off.
+func read16(sym interface{}, off int64, byteorder binary.ByteOrder) uint16 {
+ lsym := sym.(*obj.LSym)
+ // lsym.P is written lazily.
+ // Bytes requested after the end of lsym.P are 0.
+ var src []byte
+ if 0 <= off && off < int64(len(lsym.P)) {
+ src = lsym.P[off:]
+ }
+ buf := make([]byte, 2)
+ copy(buf, src)
+ return byteorder.Uint16(buf)
+}
+
+// read32 reads four bytes from the read-only global sym at offset off.
+func read32(sym interface{}, off int64, byteorder binary.ByteOrder) uint32 {
+ lsym := sym.(*obj.LSym)
+ var src []byte
+ if 0 <= off && off < int64(len(lsym.P)) {
+ src = lsym.P[off:]
+ }
+ buf := make([]byte, 4)
+ copy(buf, src)
+ return byteorder.Uint32(buf)
+}
+
+// read64 reads eight bytes from the read-only global sym at offset off.
+func read64(sym interface{}, off int64, byteorder binary.ByteOrder) uint64 {
+ lsym := sym.(*obj.LSym)
+ var src []byte
+ if 0 <= off && off < int64(len(lsym.P)) {
+ src = lsym.P[off:]
+ }
+ buf := make([]byte, 8)
+ copy(buf, src)
+ return byteorder.Uint64(buf)
+}
+
+// sequentialAddresses reports true if it can prove that x + n == y
+func sequentialAddresses(x, y *Value, n int64) bool {
+ if x.Op == Op386ADDL && y.Op == Op386LEAL1 && y.AuxInt == n && y.Aux == nil &&
+ (x.Args[0] == y.Args[0] && x.Args[1] == y.Args[1] ||
+ x.Args[0] == y.Args[1] && x.Args[1] == y.Args[0]) {
+ return true
+ }
+ if x.Op == Op386LEAL1 && y.Op == Op386LEAL1 && y.AuxInt == x.AuxInt+n && x.Aux == y.Aux &&
+ (x.Args[0] == y.Args[0] && x.Args[1] == y.Args[1] ||
+ x.Args[0] == y.Args[1] && x.Args[1] == y.Args[0]) {
+ return true
+ }
+ if x.Op == OpAMD64ADDQ && y.Op == OpAMD64LEAQ1 && y.AuxInt == n && y.Aux == nil &&
+ (x.Args[0] == y.Args[0] && x.Args[1] == y.Args[1] ||
+ x.Args[0] == y.Args[1] && x.Args[1] == y.Args[0]) {
+ return true
+ }
+ if x.Op == OpAMD64LEAQ1 && y.Op == OpAMD64LEAQ1 && y.AuxInt == x.AuxInt+n && x.Aux == y.Aux &&
+ (x.Args[0] == y.Args[0] && x.Args[1] == y.Args[1] ||
+ x.Args[0] == y.Args[1] && x.Args[1] == y.Args[0]) {
+ return true
+ }
+ return false
+}
+
+// flagConstant represents the result of a compile-time comparison.
+// The sense of these flags does not necessarily represent the hardware's notion
+// of a flags register - these are just a compile-time construct.
+// We happen to match the semantics to those of arm/arm64.
+// Note that these semantics differ from x86: the carry flag has the opposite
+// sense on a subtraction!
+// On amd64, C=1 represents a borrow, e.g. SBB on amd64 does x - y - C.
+// On arm64, C=0 represents a borrow, e.g. SBC on arm64 does x - y - ^C.
+// (because it does x + ^y + C).
+// See https://en.wikipedia.org/wiki/Carry_flag#Vs._borrow_flag
+type flagConstant uint8
+
+// N reports whether the result of an operation is negative (high bit set).
+func (fc flagConstant) N() bool {
+ return fc&1 != 0
+}
+
+// Z reports whether the result of an operation is 0.
+func (fc flagConstant) Z() bool {
+ return fc&2 != 0
+}
+
+// C reports whether an unsigned add overflowed (carry), or an
+// unsigned subtract did not underflow (borrow).
+func (fc flagConstant) C() bool {
+ return fc&4 != 0
+}
+
+// V reports whether a signed operation overflowed or underflowed.
+func (fc flagConstant) V() bool {
+ return fc&8 != 0
+}
+
+func (fc flagConstant) eq() bool {
+ return fc.Z()
+}
+func (fc flagConstant) ne() bool {
+ return !fc.Z()
+}
+func (fc flagConstant) lt() bool {
+ return fc.N() != fc.V()
+}
+func (fc flagConstant) le() bool {
+ return fc.Z() || fc.lt()
+}
+func (fc flagConstant) gt() bool {
+ return !fc.Z() && fc.ge()
+}
+func (fc flagConstant) ge() bool {
+ return fc.N() == fc.V()
+}
+func (fc flagConstant) ult() bool {
+ return !fc.C()
+}
+func (fc flagConstant) ule() bool {
+ return fc.Z() || fc.ult()
+}
+func (fc flagConstant) ugt() bool {
+ return !fc.Z() && fc.uge()
+}
+func (fc flagConstant) uge() bool {
+ return fc.C()
+}
+
+func (fc flagConstant) ltNoov() bool {
+ return fc.lt() && !fc.V()
+}
+func (fc flagConstant) leNoov() bool {
+ return fc.le() && !fc.V()
+}
+func (fc flagConstant) gtNoov() bool {
+ return fc.gt() && !fc.V()
+}
+func (fc flagConstant) geNoov() bool {
+ return fc.ge() && !fc.V()
+}
+
+func (fc flagConstant) String() string {
+ return fmt.Sprintf("N=%v,Z=%v,C=%v,V=%v", fc.N(), fc.Z(), fc.C(), fc.V())
+}
+
+type flagConstantBuilder struct {
+ N bool
+ Z bool
+ C bool
+ V bool
+}
+
+func (fcs flagConstantBuilder) encode() flagConstant {
+ var fc flagConstant
+ if fcs.N {
+ fc |= 1
+ }
+ if fcs.Z {
+ fc |= 2
+ }
+ if fcs.C {
+ fc |= 4
+ }
+ if fcs.V {
+ fc |= 8
+ }
+ return fc
+}
+
+// Note: addFlags(x,y) != subFlags(x,-y) in some situations:
+// - the results of the C flag are different
+// - the results of the V flag when y==minint are different
+
+// addFlags64 returns the flags that would be set from computing x+y.
+func addFlags64(x, y int64) flagConstant {
+ var fcb flagConstantBuilder
+ fcb.Z = x+y == 0
+ fcb.N = x+y < 0
+ fcb.C = uint64(x+y) < uint64(x)
+ fcb.V = x >= 0 && y >= 0 && x+y < 0 || x < 0 && y < 0 && x+y >= 0
+ return fcb.encode()
+}
+
+// subFlags64 returns the flags that would be set from computing x-y.
+func subFlags64(x, y int64) flagConstant {
+ var fcb flagConstantBuilder
+ fcb.Z = x-y == 0
+ fcb.N = x-y < 0
+ fcb.C = uint64(y) <= uint64(x) // This code follows the arm carry flag model.
+ fcb.V = x >= 0 && y < 0 && x-y < 0 || x < 0 && y >= 0 && x-y >= 0
+ return fcb.encode()
+}
+
+// addFlags32 returns the flags that would be set from computing x+y.
+func addFlags32(x, y int32) flagConstant {
+ var fcb flagConstantBuilder
+ fcb.Z = x+y == 0
+ fcb.N = x+y < 0
+ fcb.C = uint32(x+y) < uint32(x)
+ fcb.V = x >= 0 && y >= 0 && x+y < 0 || x < 0 && y < 0 && x+y >= 0
+ return fcb.encode()
+}
+
+// subFlags32 returns the flags that would be set from computing x-y.
+func subFlags32(x, y int32) flagConstant {
+ var fcb flagConstantBuilder
+ fcb.Z = x-y == 0
+ fcb.N = x-y < 0
+ fcb.C = uint32(y) <= uint32(x) // This code follows the arm carry flag model.
+ fcb.V = x >= 0 && y < 0 && x-y < 0 || x < 0 && y >= 0 && x-y >= 0
+ return fcb.encode()
+}
+
+// logicFlags64 returns flags set to the sign/zeroness of x.
+// C and V are set to false.
+func logicFlags64(x int64) flagConstant {
+ var fcb flagConstantBuilder
+ fcb.Z = x == 0
+ fcb.N = x < 0
+ return fcb.encode()
+}
+
+// logicFlags32 returns flags set to the sign/zeroness of x.
+// C and V are set to false.
+func logicFlags32(x int32) flagConstant {
+ var fcb flagConstantBuilder
+ fcb.Z = x == 0
+ fcb.N = x < 0
+ return fcb.encode()
+}
diff --git a/src/cmd/compile/internal/ssa/rewrite386.go b/src/cmd/compile/internal/ssa/rewrite386.go
new file mode 100644
index 0000000..34f3786
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/rewrite386.go
@@ -0,0 +1,12557 @@
+// Code generated from gen/386.rules; DO NOT EDIT.
+// generated with: cd gen; go run *.go
+
+package ssa
+
+import "math"
+import "cmd/compile/internal/types"
+
+func rewriteValue386(v *Value) bool {
+ switch v.Op {
+ case Op386ADCL:
+ return rewriteValue386_Op386ADCL(v)
+ case Op386ADDL:
+ return rewriteValue386_Op386ADDL(v)
+ case Op386ADDLcarry:
+ return rewriteValue386_Op386ADDLcarry(v)
+ case Op386ADDLconst:
+ return rewriteValue386_Op386ADDLconst(v)
+ case Op386ADDLconstmodify:
+ return rewriteValue386_Op386ADDLconstmodify(v)
+ case Op386ADDLload:
+ return rewriteValue386_Op386ADDLload(v)
+ case Op386ADDLmodify:
+ return rewriteValue386_Op386ADDLmodify(v)
+ case Op386ADDSD:
+ return rewriteValue386_Op386ADDSD(v)
+ case Op386ADDSDload:
+ return rewriteValue386_Op386ADDSDload(v)
+ case Op386ADDSS:
+ return rewriteValue386_Op386ADDSS(v)
+ case Op386ADDSSload:
+ return rewriteValue386_Op386ADDSSload(v)
+ case Op386ANDL:
+ return rewriteValue386_Op386ANDL(v)
+ case Op386ANDLconst:
+ return rewriteValue386_Op386ANDLconst(v)
+ case Op386ANDLconstmodify:
+ return rewriteValue386_Op386ANDLconstmodify(v)
+ case Op386ANDLload:
+ return rewriteValue386_Op386ANDLload(v)
+ case Op386ANDLmodify:
+ return rewriteValue386_Op386ANDLmodify(v)
+ case Op386CMPB:
+ return rewriteValue386_Op386CMPB(v)
+ case Op386CMPBconst:
+ return rewriteValue386_Op386CMPBconst(v)
+ case Op386CMPBload:
+ return rewriteValue386_Op386CMPBload(v)
+ case Op386CMPL:
+ return rewriteValue386_Op386CMPL(v)
+ case Op386CMPLconst:
+ return rewriteValue386_Op386CMPLconst(v)
+ case Op386CMPLload:
+ return rewriteValue386_Op386CMPLload(v)
+ case Op386CMPW:
+ return rewriteValue386_Op386CMPW(v)
+ case Op386CMPWconst:
+ return rewriteValue386_Op386CMPWconst(v)
+ case Op386CMPWload:
+ return rewriteValue386_Op386CMPWload(v)
+ case Op386DIVSD:
+ return rewriteValue386_Op386DIVSD(v)
+ case Op386DIVSDload:
+ return rewriteValue386_Op386DIVSDload(v)
+ case Op386DIVSS:
+ return rewriteValue386_Op386DIVSS(v)
+ case Op386DIVSSload:
+ return rewriteValue386_Op386DIVSSload(v)
+ case Op386LEAL:
+ return rewriteValue386_Op386LEAL(v)
+ case Op386LEAL1:
+ return rewriteValue386_Op386LEAL1(v)
+ case Op386LEAL2:
+ return rewriteValue386_Op386LEAL2(v)
+ case Op386LEAL4:
+ return rewriteValue386_Op386LEAL4(v)
+ case Op386LEAL8:
+ return rewriteValue386_Op386LEAL8(v)
+ case Op386MOVBLSX:
+ return rewriteValue386_Op386MOVBLSX(v)
+ case Op386MOVBLSXload:
+ return rewriteValue386_Op386MOVBLSXload(v)
+ case Op386MOVBLZX:
+ return rewriteValue386_Op386MOVBLZX(v)
+ case Op386MOVBload:
+ return rewriteValue386_Op386MOVBload(v)
+ case Op386MOVBstore:
+ return rewriteValue386_Op386MOVBstore(v)
+ case Op386MOVBstoreconst:
+ return rewriteValue386_Op386MOVBstoreconst(v)
+ case Op386MOVLload:
+ return rewriteValue386_Op386MOVLload(v)
+ case Op386MOVLstore:
+ return rewriteValue386_Op386MOVLstore(v)
+ case Op386MOVLstoreconst:
+ return rewriteValue386_Op386MOVLstoreconst(v)
+ case Op386MOVSDconst:
+ return rewriteValue386_Op386MOVSDconst(v)
+ case Op386MOVSDload:
+ return rewriteValue386_Op386MOVSDload(v)
+ case Op386MOVSDstore:
+ return rewriteValue386_Op386MOVSDstore(v)
+ case Op386MOVSSconst:
+ return rewriteValue386_Op386MOVSSconst(v)
+ case Op386MOVSSload:
+ return rewriteValue386_Op386MOVSSload(v)
+ case Op386MOVSSstore:
+ return rewriteValue386_Op386MOVSSstore(v)
+ case Op386MOVWLSX:
+ return rewriteValue386_Op386MOVWLSX(v)
+ case Op386MOVWLSXload:
+ return rewriteValue386_Op386MOVWLSXload(v)
+ case Op386MOVWLZX:
+ return rewriteValue386_Op386MOVWLZX(v)
+ case Op386MOVWload:
+ return rewriteValue386_Op386MOVWload(v)
+ case Op386MOVWstore:
+ return rewriteValue386_Op386MOVWstore(v)
+ case Op386MOVWstoreconst:
+ return rewriteValue386_Op386MOVWstoreconst(v)
+ case Op386MULL:
+ return rewriteValue386_Op386MULL(v)
+ case Op386MULLconst:
+ return rewriteValue386_Op386MULLconst(v)
+ case Op386MULLload:
+ return rewriteValue386_Op386MULLload(v)
+ case Op386MULSD:
+ return rewriteValue386_Op386MULSD(v)
+ case Op386MULSDload:
+ return rewriteValue386_Op386MULSDload(v)
+ case Op386MULSS:
+ return rewriteValue386_Op386MULSS(v)
+ case Op386MULSSload:
+ return rewriteValue386_Op386MULSSload(v)
+ case Op386NEGL:
+ return rewriteValue386_Op386NEGL(v)
+ case Op386NOTL:
+ return rewriteValue386_Op386NOTL(v)
+ case Op386ORL:
+ return rewriteValue386_Op386ORL(v)
+ case Op386ORLconst:
+ return rewriteValue386_Op386ORLconst(v)
+ case Op386ORLconstmodify:
+ return rewriteValue386_Op386ORLconstmodify(v)
+ case Op386ORLload:
+ return rewriteValue386_Op386ORLload(v)
+ case Op386ORLmodify:
+ return rewriteValue386_Op386ORLmodify(v)
+ case Op386ROLBconst:
+ return rewriteValue386_Op386ROLBconst(v)
+ case Op386ROLLconst:
+ return rewriteValue386_Op386ROLLconst(v)
+ case Op386ROLWconst:
+ return rewriteValue386_Op386ROLWconst(v)
+ case Op386SARB:
+ return rewriteValue386_Op386SARB(v)
+ case Op386SARBconst:
+ return rewriteValue386_Op386SARBconst(v)
+ case Op386SARL:
+ return rewriteValue386_Op386SARL(v)
+ case Op386SARLconst:
+ return rewriteValue386_Op386SARLconst(v)
+ case Op386SARW:
+ return rewriteValue386_Op386SARW(v)
+ case Op386SARWconst:
+ return rewriteValue386_Op386SARWconst(v)
+ case Op386SBBL:
+ return rewriteValue386_Op386SBBL(v)
+ case Op386SBBLcarrymask:
+ return rewriteValue386_Op386SBBLcarrymask(v)
+ case Op386SETA:
+ return rewriteValue386_Op386SETA(v)
+ case Op386SETAE:
+ return rewriteValue386_Op386SETAE(v)
+ case Op386SETB:
+ return rewriteValue386_Op386SETB(v)
+ case Op386SETBE:
+ return rewriteValue386_Op386SETBE(v)
+ case Op386SETEQ:
+ return rewriteValue386_Op386SETEQ(v)
+ case Op386SETG:
+ return rewriteValue386_Op386SETG(v)
+ case Op386SETGE:
+ return rewriteValue386_Op386SETGE(v)
+ case Op386SETL:
+ return rewriteValue386_Op386SETL(v)
+ case Op386SETLE:
+ return rewriteValue386_Op386SETLE(v)
+ case Op386SETNE:
+ return rewriteValue386_Op386SETNE(v)
+ case Op386SHLL:
+ return rewriteValue386_Op386SHLL(v)
+ case Op386SHLLconst:
+ return rewriteValue386_Op386SHLLconst(v)
+ case Op386SHRB:
+ return rewriteValue386_Op386SHRB(v)
+ case Op386SHRBconst:
+ return rewriteValue386_Op386SHRBconst(v)
+ case Op386SHRL:
+ return rewriteValue386_Op386SHRL(v)
+ case Op386SHRLconst:
+ return rewriteValue386_Op386SHRLconst(v)
+ case Op386SHRW:
+ return rewriteValue386_Op386SHRW(v)
+ case Op386SHRWconst:
+ return rewriteValue386_Op386SHRWconst(v)
+ case Op386SUBL:
+ return rewriteValue386_Op386SUBL(v)
+ case Op386SUBLcarry:
+ return rewriteValue386_Op386SUBLcarry(v)
+ case Op386SUBLconst:
+ return rewriteValue386_Op386SUBLconst(v)
+ case Op386SUBLload:
+ return rewriteValue386_Op386SUBLload(v)
+ case Op386SUBLmodify:
+ return rewriteValue386_Op386SUBLmodify(v)
+ case Op386SUBSD:
+ return rewriteValue386_Op386SUBSD(v)
+ case Op386SUBSDload:
+ return rewriteValue386_Op386SUBSDload(v)
+ case Op386SUBSS:
+ return rewriteValue386_Op386SUBSS(v)
+ case Op386SUBSSload:
+ return rewriteValue386_Op386SUBSSload(v)
+ case Op386XORL:
+ return rewriteValue386_Op386XORL(v)
+ case Op386XORLconst:
+ return rewriteValue386_Op386XORLconst(v)
+ case Op386XORLconstmodify:
+ return rewriteValue386_Op386XORLconstmodify(v)
+ case Op386XORLload:
+ return rewriteValue386_Op386XORLload(v)
+ case Op386XORLmodify:
+ return rewriteValue386_Op386XORLmodify(v)
+ case OpAdd16:
+ v.Op = Op386ADDL
+ return true
+ case OpAdd32:
+ v.Op = Op386ADDL
+ return true
+ case OpAdd32F:
+ v.Op = Op386ADDSS
+ return true
+ case OpAdd32carry:
+ v.Op = Op386ADDLcarry
+ return true
+ case OpAdd32withcarry:
+ v.Op = Op386ADCL
+ return true
+ case OpAdd64F:
+ v.Op = Op386ADDSD
+ return true
+ case OpAdd8:
+ v.Op = Op386ADDL
+ return true
+ case OpAddPtr:
+ v.Op = Op386ADDL
+ return true
+ case OpAddr:
+ return rewriteValue386_OpAddr(v)
+ case OpAnd16:
+ v.Op = Op386ANDL
+ return true
+ case OpAnd32:
+ v.Op = Op386ANDL
+ return true
+ case OpAnd8:
+ v.Op = Op386ANDL
+ return true
+ case OpAndB:
+ v.Op = Op386ANDL
+ return true
+ case OpAvg32u:
+ v.Op = Op386AVGLU
+ return true
+ case OpBswap32:
+ v.Op = Op386BSWAPL
+ return true
+ case OpClosureCall:
+ v.Op = Op386CALLclosure
+ return true
+ case OpCom16:
+ v.Op = Op386NOTL
+ return true
+ case OpCom32:
+ v.Op = Op386NOTL
+ return true
+ case OpCom8:
+ v.Op = Op386NOTL
+ return true
+ case OpConst16:
+ return rewriteValue386_OpConst16(v)
+ case OpConst32:
+ v.Op = Op386MOVLconst
+ return true
+ case OpConst32F:
+ v.Op = Op386MOVSSconst
+ return true
+ case OpConst64F:
+ v.Op = Op386MOVSDconst
+ return true
+ case OpConst8:
+ return rewriteValue386_OpConst8(v)
+ case OpConstBool:
+ return rewriteValue386_OpConstBool(v)
+ case OpConstNil:
+ return rewriteValue386_OpConstNil(v)
+ case OpCtz16:
+ return rewriteValue386_OpCtz16(v)
+ case OpCtz16NonZero:
+ v.Op = Op386BSFL
+ return true
+ case OpCvt32Fto32:
+ v.Op = Op386CVTTSS2SL
+ return true
+ case OpCvt32Fto64F:
+ v.Op = Op386CVTSS2SD
+ return true
+ case OpCvt32to32F:
+ v.Op = Op386CVTSL2SS
+ return true
+ case OpCvt32to64F:
+ v.Op = Op386CVTSL2SD
+ return true
+ case OpCvt64Fto32:
+ v.Op = Op386CVTTSD2SL
+ return true
+ case OpCvt64Fto32F:
+ v.Op = Op386CVTSD2SS
+ return true
+ case OpCvtBoolToUint8:
+ v.Op = OpCopy
+ return true
+ case OpDiv16:
+ v.Op = Op386DIVW
+ return true
+ case OpDiv16u:
+ v.Op = Op386DIVWU
+ return true
+ case OpDiv32:
+ v.Op = Op386DIVL
+ return true
+ case OpDiv32F:
+ v.Op = Op386DIVSS
+ return true
+ case OpDiv32u:
+ v.Op = Op386DIVLU
+ return true
+ case OpDiv64F:
+ v.Op = Op386DIVSD
+ return true
+ case OpDiv8:
+ return rewriteValue386_OpDiv8(v)
+ case OpDiv8u:
+ return rewriteValue386_OpDiv8u(v)
+ case OpEq16:
+ return rewriteValue386_OpEq16(v)
+ case OpEq32:
+ return rewriteValue386_OpEq32(v)
+ case OpEq32F:
+ return rewriteValue386_OpEq32F(v)
+ case OpEq64F:
+ return rewriteValue386_OpEq64F(v)
+ case OpEq8:
+ return rewriteValue386_OpEq8(v)
+ case OpEqB:
+ return rewriteValue386_OpEqB(v)
+ case OpEqPtr:
+ return rewriteValue386_OpEqPtr(v)
+ case OpGetCallerPC:
+ v.Op = Op386LoweredGetCallerPC
+ return true
+ case OpGetCallerSP:
+ v.Op = Op386LoweredGetCallerSP
+ return true
+ case OpGetClosurePtr:
+ v.Op = Op386LoweredGetClosurePtr
+ return true
+ case OpGetG:
+ v.Op = Op386LoweredGetG
+ return true
+ case OpHmul32:
+ v.Op = Op386HMULL
+ return true
+ case OpHmul32u:
+ v.Op = Op386HMULLU
+ return true
+ case OpInterCall:
+ v.Op = Op386CALLinter
+ return true
+ case OpIsInBounds:
+ return rewriteValue386_OpIsInBounds(v)
+ case OpIsNonNil:
+ return rewriteValue386_OpIsNonNil(v)
+ case OpIsSliceInBounds:
+ return rewriteValue386_OpIsSliceInBounds(v)
+ case OpLeq16:
+ return rewriteValue386_OpLeq16(v)
+ case OpLeq16U:
+ return rewriteValue386_OpLeq16U(v)
+ case OpLeq32:
+ return rewriteValue386_OpLeq32(v)
+ case OpLeq32F:
+ return rewriteValue386_OpLeq32F(v)
+ case OpLeq32U:
+ return rewriteValue386_OpLeq32U(v)
+ case OpLeq64F:
+ return rewriteValue386_OpLeq64F(v)
+ case OpLeq8:
+ return rewriteValue386_OpLeq8(v)
+ case OpLeq8U:
+ return rewriteValue386_OpLeq8U(v)
+ case OpLess16:
+ return rewriteValue386_OpLess16(v)
+ case OpLess16U:
+ return rewriteValue386_OpLess16U(v)
+ case OpLess32:
+ return rewriteValue386_OpLess32(v)
+ case OpLess32F:
+ return rewriteValue386_OpLess32F(v)
+ case OpLess32U:
+ return rewriteValue386_OpLess32U(v)
+ case OpLess64F:
+ return rewriteValue386_OpLess64F(v)
+ case OpLess8:
+ return rewriteValue386_OpLess8(v)
+ case OpLess8U:
+ return rewriteValue386_OpLess8U(v)
+ case OpLoad:
+ return rewriteValue386_OpLoad(v)
+ case OpLocalAddr:
+ return rewriteValue386_OpLocalAddr(v)
+ case OpLsh16x16:
+ return rewriteValue386_OpLsh16x16(v)
+ case OpLsh16x32:
+ return rewriteValue386_OpLsh16x32(v)
+ case OpLsh16x64:
+ return rewriteValue386_OpLsh16x64(v)
+ case OpLsh16x8:
+ return rewriteValue386_OpLsh16x8(v)
+ case OpLsh32x16:
+ return rewriteValue386_OpLsh32x16(v)
+ case OpLsh32x32:
+ return rewriteValue386_OpLsh32x32(v)
+ case OpLsh32x64:
+ return rewriteValue386_OpLsh32x64(v)
+ case OpLsh32x8:
+ return rewriteValue386_OpLsh32x8(v)
+ case OpLsh8x16:
+ return rewriteValue386_OpLsh8x16(v)
+ case OpLsh8x32:
+ return rewriteValue386_OpLsh8x32(v)
+ case OpLsh8x64:
+ return rewriteValue386_OpLsh8x64(v)
+ case OpLsh8x8:
+ return rewriteValue386_OpLsh8x8(v)
+ case OpMod16:
+ v.Op = Op386MODW
+ return true
+ case OpMod16u:
+ v.Op = Op386MODWU
+ return true
+ case OpMod32:
+ v.Op = Op386MODL
+ return true
+ case OpMod32u:
+ v.Op = Op386MODLU
+ return true
+ case OpMod8:
+ return rewriteValue386_OpMod8(v)
+ case OpMod8u:
+ return rewriteValue386_OpMod8u(v)
+ case OpMove:
+ return rewriteValue386_OpMove(v)
+ case OpMul16:
+ v.Op = Op386MULL
+ return true
+ case OpMul32:
+ v.Op = Op386MULL
+ return true
+ case OpMul32F:
+ v.Op = Op386MULSS
+ return true
+ case OpMul32uhilo:
+ v.Op = Op386MULLQU
+ return true
+ case OpMul64F:
+ v.Op = Op386MULSD
+ return true
+ case OpMul8:
+ v.Op = Op386MULL
+ return true
+ case OpNeg16:
+ v.Op = Op386NEGL
+ return true
+ case OpNeg32:
+ v.Op = Op386NEGL
+ return true
+ case OpNeg32F:
+ return rewriteValue386_OpNeg32F(v)
+ case OpNeg64F:
+ return rewriteValue386_OpNeg64F(v)
+ case OpNeg8:
+ v.Op = Op386NEGL
+ return true
+ case OpNeq16:
+ return rewriteValue386_OpNeq16(v)
+ case OpNeq32:
+ return rewriteValue386_OpNeq32(v)
+ case OpNeq32F:
+ return rewriteValue386_OpNeq32F(v)
+ case OpNeq64F:
+ return rewriteValue386_OpNeq64F(v)
+ case OpNeq8:
+ return rewriteValue386_OpNeq8(v)
+ case OpNeqB:
+ return rewriteValue386_OpNeqB(v)
+ case OpNeqPtr:
+ return rewriteValue386_OpNeqPtr(v)
+ case OpNilCheck:
+ v.Op = Op386LoweredNilCheck
+ return true
+ case OpNot:
+ return rewriteValue386_OpNot(v)
+ case OpOffPtr:
+ return rewriteValue386_OpOffPtr(v)
+ case OpOr16:
+ v.Op = Op386ORL
+ return true
+ case OpOr32:
+ v.Op = Op386ORL
+ return true
+ case OpOr8:
+ v.Op = Op386ORL
+ return true
+ case OpOrB:
+ v.Op = Op386ORL
+ return true
+ case OpPanicBounds:
+ return rewriteValue386_OpPanicBounds(v)
+ case OpPanicExtend:
+ return rewriteValue386_OpPanicExtend(v)
+ case OpRotateLeft16:
+ return rewriteValue386_OpRotateLeft16(v)
+ case OpRotateLeft32:
+ return rewriteValue386_OpRotateLeft32(v)
+ case OpRotateLeft8:
+ return rewriteValue386_OpRotateLeft8(v)
+ case OpRound32F:
+ v.Op = OpCopy
+ return true
+ case OpRound64F:
+ v.Op = OpCopy
+ return true
+ case OpRsh16Ux16:
+ return rewriteValue386_OpRsh16Ux16(v)
+ case OpRsh16Ux32:
+ return rewriteValue386_OpRsh16Ux32(v)
+ case OpRsh16Ux64:
+ return rewriteValue386_OpRsh16Ux64(v)
+ case OpRsh16Ux8:
+ return rewriteValue386_OpRsh16Ux8(v)
+ case OpRsh16x16:
+ return rewriteValue386_OpRsh16x16(v)
+ case OpRsh16x32:
+ return rewriteValue386_OpRsh16x32(v)
+ case OpRsh16x64:
+ return rewriteValue386_OpRsh16x64(v)
+ case OpRsh16x8:
+ return rewriteValue386_OpRsh16x8(v)
+ case OpRsh32Ux16:
+ return rewriteValue386_OpRsh32Ux16(v)
+ case OpRsh32Ux32:
+ return rewriteValue386_OpRsh32Ux32(v)
+ case OpRsh32Ux64:
+ return rewriteValue386_OpRsh32Ux64(v)
+ case OpRsh32Ux8:
+ return rewriteValue386_OpRsh32Ux8(v)
+ case OpRsh32x16:
+ return rewriteValue386_OpRsh32x16(v)
+ case OpRsh32x32:
+ return rewriteValue386_OpRsh32x32(v)
+ case OpRsh32x64:
+ return rewriteValue386_OpRsh32x64(v)
+ case OpRsh32x8:
+ return rewriteValue386_OpRsh32x8(v)
+ case OpRsh8Ux16:
+ return rewriteValue386_OpRsh8Ux16(v)
+ case OpRsh8Ux32:
+ return rewriteValue386_OpRsh8Ux32(v)
+ case OpRsh8Ux64:
+ return rewriteValue386_OpRsh8Ux64(v)
+ case OpRsh8Ux8:
+ return rewriteValue386_OpRsh8Ux8(v)
+ case OpRsh8x16:
+ return rewriteValue386_OpRsh8x16(v)
+ case OpRsh8x32:
+ return rewriteValue386_OpRsh8x32(v)
+ case OpRsh8x64:
+ return rewriteValue386_OpRsh8x64(v)
+ case OpRsh8x8:
+ return rewriteValue386_OpRsh8x8(v)
+ case OpSelect0:
+ return rewriteValue386_OpSelect0(v)
+ case OpSelect1:
+ return rewriteValue386_OpSelect1(v)
+ case OpSignExt16to32:
+ v.Op = Op386MOVWLSX
+ return true
+ case OpSignExt8to16:
+ v.Op = Op386MOVBLSX
+ return true
+ case OpSignExt8to32:
+ v.Op = Op386MOVBLSX
+ return true
+ case OpSignmask:
+ return rewriteValue386_OpSignmask(v)
+ case OpSlicemask:
+ return rewriteValue386_OpSlicemask(v)
+ case OpSqrt:
+ v.Op = Op386SQRTSD
+ return true
+ case OpSqrt32:
+ v.Op = Op386SQRTSS
+ return true
+ case OpStaticCall:
+ v.Op = Op386CALLstatic
+ return true
+ case OpStore:
+ return rewriteValue386_OpStore(v)
+ case OpSub16:
+ v.Op = Op386SUBL
+ return true
+ case OpSub32:
+ v.Op = Op386SUBL
+ return true
+ case OpSub32F:
+ v.Op = Op386SUBSS
+ return true
+ case OpSub32carry:
+ v.Op = Op386SUBLcarry
+ return true
+ case OpSub32withcarry:
+ v.Op = Op386SBBL
+ return true
+ case OpSub64F:
+ v.Op = Op386SUBSD
+ return true
+ case OpSub8:
+ v.Op = Op386SUBL
+ return true
+ case OpSubPtr:
+ v.Op = Op386SUBL
+ return true
+ case OpTailCall:
+ v.Op = Op386CALLtail
+ return true
+ case OpTrunc16to8:
+ v.Op = OpCopy
+ return true
+ case OpTrunc32to16:
+ v.Op = OpCopy
+ return true
+ case OpTrunc32to8:
+ v.Op = OpCopy
+ return true
+ case OpWB:
+ v.Op = Op386LoweredWB
+ return true
+ case OpXor16:
+ v.Op = Op386XORL
+ return true
+ case OpXor32:
+ v.Op = Op386XORL
+ return true
+ case OpXor8:
+ v.Op = Op386XORL
+ return true
+ case OpZero:
+ return rewriteValue386_OpZero(v)
+ case OpZeroExt16to32:
+ v.Op = Op386MOVWLZX
+ return true
+ case OpZeroExt8to16:
+ v.Op = Op386MOVBLZX
+ return true
+ case OpZeroExt8to32:
+ v.Op = Op386MOVBLZX
+ return true
+ case OpZeromask:
+ return rewriteValue386_OpZeromask(v)
+ }
+ return false
+}
+func rewriteValue386_Op386ADCL(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ADCL x (MOVLconst [c]) f)
+ // result: (ADCLconst [c] x f)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != Op386MOVLconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ f := v_2
+ v.reset(Op386ADCLconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, f)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValue386_Op386ADDL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ADDL x (MOVLconst [c]))
+ // result: (ADDLconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != Op386MOVLconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(Op386ADDLconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ADDL (SHLLconst [c] x) (SHRLconst [d] x))
+ // cond: d == 32-c
+ // result: (ROLLconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != Op386SHLLconst {
+ continue
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if v_1.Op != Op386SHRLconst {
+ continue
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ if x != v_1.Args[0] || !(d == 32-c) {
+ continue
+ }
+ v.reset(Op386ROLLconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ADDL <t> (SHLLconst x [c]) (SHRWconst x [d]))
+ // cond: c < 16 && d == int16(16-c) && t.Size() == 2
+ // result: (ROLWconst x [int16(c)])
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != Op386SHLLconst {
+ continue
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if v_1.Op != Op386SHRWconst {
+ continue
+ }
+ d := auxIntToInt16(v_1.AuxInt)
+ if x != v_1.Args[0] || !(c < 16 && d == int16(16-c) && t.Size() == 2) {
+ continue
+ }
+ v.reset(Op386ROLWconst)
+ v.AuxInt = int16ToAuxInt(int16(c))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ADDL <t> (SHLLconst x [c]) (SHRBconst x [d]))
+ // cond: c < 8 && d == int8(8-c) && t.Size() == 1
+ // result: (ROLBconst x [int8(c)])
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != Op386SHLLconst {
+ continue
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if v_1.Op != Op386SHRBconst {
+ continue
+ }
+ d := auxIntToInt8(v_1.AuxInt)
+ if x != v_1.Args[0] || !(c < 8 && d == int8(8-c) && t.Size() == 1) {
+ continue
+ }
+ v.reset(Op386ROLBconst)
+ v.AuxInt = int8ToAuxInt(int8(c))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ADDL x (SHLLconst [3] y))
+ // result: (LEAL8 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != Op386SHLLconst || auxIntToInt32(v_1.AuxInt) != 3 {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(Op386LEAL8)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADDL x (SHLLconst [2] y))
+ // result: (LEAL4 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != Op386SHLLconst || auxIntToInt32(v_1.AuxInt) != 2 {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(Op386LEAL4)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADDL x (SHLLconst [1] y))
+ // result: (LEAL2 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != Op386SHLLconst || auxIntToInt32(v_1.AuxInt) != 1 {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(Op386LEAL2)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADDL x (ADDL y y))
+ // result: (LEAL2 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != Op386ADDL {
+ continue
+ }
+ y := v_1.Args[1]
+ if y != v_1.Args[0] {
+ continue
+ }
+ v.reset(Op386LEAL2)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADDL x (ADDL x y))
+ // result: (LEAL2 y x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != Op386ADDL {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if x != v_1_0 {
+ continue
+ }
+ y := v_1_1
+ v.reset(Op386LEAL2)
+ v.AddArg2(y, x)
+ return true
+ }
+ }
+ break
+ }
+ // match: (ADDL (ADDLconst [c] x) y)
+ // result: (LEAL1 [c] x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != Op386ADDLconst {
+ continue
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ y := v_1
+ v.reset(Op386LEAL1)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADDL x (LEAL [c] {s} y))
+ // cond: x.Op != OpSB && y.Op != OpSB
+ // result: (LEAL1 [c] {s} x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != Op386LEAL {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ s := auxToSym(v_1.Aux)
+ y := v_1.Args[0]
+ if !(x.Op != OpSB && y.Op != OpSB) {
+ continue
+ }
+ v.reset(Op386LEAL1)
+ v.AuxInt = int32ToAuxInt(c)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADDL x l:(MOVLload [off] {sym} ptr mem))
+ // cond: canMergeLoadClobber(v, l, x) && clobber(l)
+ // result: (ADDLload x [off] {sym} ptr mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ l := v_1
+ if l.Op != Op386MOVLload {
+ continue
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
+ continue
+ }
+ v.reset(Op386ADDLload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ // match: (ADDL x (NEGL y))
+ // result: (SUBL x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != Op386NEGL {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(Op386SUBL)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValue386_Op386ADDLcarry(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ADDLcarry x (MOVLconst [c]))
+ // result: (ADDLconstcarry [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != Op386MOVLconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(Op386ADDLconstcarry)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValue386_Op386ADDLconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ADDLconst [c] (ADDL x y))
+ // result: (LEAL1 [c] x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != Op386ADDL {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(Op386LEAL1)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (ADDLconst [c] (LEAL [d] {s} x))
+ // cond: is32Bit(int64(c)+int64(d))
+ // result: (LEAL [c+d] {s} x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ s := auxToSym(v_0.Aux)
+ x := v_0.Args[0]
+ if !(is32Bit(int64(c) + int64(d))) {
+ break
+ }
+ v.reset(Op386LEAL)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDLconst [c] x:(SP))
+ // result: (LEAL [c] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if x.Op != OpSP {
+ break
+ }
+ v.reset(Op386LEAL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDLconst [c] (LEAL1 [d] {s} x y))
+ // cond: is32Bit(int64(c)+int64(d))
+ // result: (LEAL1 [c+d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != Op386LEAL1 {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ s := auxToSym(v_0.Aux)
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if !(is32Bit(int64(c) + int64(d))) {
+ break
+ }
+ v.reset(Op386LEAL1)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (ADDLconst [c] (LEAL2 [d] {s} x y))
+ // cond: is32Bit(int64(c)+int64(d))
+ // result: (LEAL2 [c+d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != Op386LEAL2 {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ s := auxToSym(v_0.Aux)
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if !(is32Bit(int64(c) + int64(d))) {
+ break
+ }
+ v.reset(Op386LEAL2)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (ADDLconst [c] (LEAL4 [d] {s} x y))
+ // cond: is32Bit(int64(c)+int64(d))
+ // result: (LEAL4 [c+d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != Op386LEAL4 {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ s := auxToSym(v_0.Aux)
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if !(is32Bit(int64(c) + int64(d))) {
+ break
+ }
+ v.reset(Op386LEAL4)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (ADDLconst [c] (LEAL8 [d] {s} x y))
+ // cond: is32Bit(int64(c)+int64(d))
+ // result: (LEAL8 [c+d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != Op386LEAL8 {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ s := auxToSym(v_0.Aux)
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if !(is32Bit(int64(c) + int64(d))) {
+ break
+ }
+ v.reset(Op386LEAL8)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (ADDLconst [c] x)
+ // cond: c==0
+ // result: x
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(c == 0) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (ADDLconst [c] (MOVLconst [d]))
+ // result: (MOVLconst [c+d])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(c + d)
+ return true
+ }
+ // match: (ADDLconst [c] (ADDLconst [d] x))
+ // result: (ADDLconst [c+d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(Op386ADDLconst)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386ADDLconstmodify(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (ADDLconstmodify [valoff1] {sym} (ADDLconst [off2] base) mem)
+ // cond: valoff1.canAdd32(off2)
+ // result: (ADDLconstmodify [valoff1.addOffset32(off2)] {sym} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(valoff1.canAdd32(off2)) {
+ break
+ }
+ v.reset(Op386ADDLconstmodify)
+ v.AuxInt = valAndOffToAuxInt(valoff1.addOffset32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (ADDLconstmodify [valoff1] {sym1} (LEAL [off2] {sym2} base) mem)
+ // cond: valoff1.canAdd32(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (ADDLconstmodify [valoff1.addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(valoff1.canAdd32(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386ADDLconstmodify)
+ v.AuxInt = valAndOffToAuxInt(valoff1.addOffset32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386ADDLload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (ADDLload [off1] {sym} val (ADDLconst [off2] base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (ADDLload [off1+off2] {sym} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != Op386ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(Op386ADDLload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (ADDLload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (ADDLload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386ADDLload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386ADDLmodify(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (ADDLmodify [off1] {sym} (ADDLconst [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (ADDLmodify [off1+off2] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(Op386ADDLmodify)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (ADDLmodify [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (ADDLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386ADDLmodify)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386ADDSD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ADDSD x l:(MOVSDload [off] {sym} ptr mem))
+ // cond: canMergeLoadClobber(v, l, x) && clobber(l)
+ // result: (ADDSDload x [off] {sym} ptr mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ l := v_1
+ if l.Op != Op386MOVSDload {
+ continue
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
+ continue
+ }
+ v.reset(Op386ADDSDload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValue386_Op386ADDSDload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (ADDSDload [off1] {sym} val (ADDLconst [off2] base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (ADDSDload [off1+off2] {sym} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != Op386ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(Op386ADDSDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (ADDSDload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (ADDSDload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386ADDSDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386ADDSS(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ADDSS x l:(MOVSSload [off] {sym} ptr mem))
+ // cond: canMergeLoadClobber(v, l, x) && clobber(l)
+ // result: (ADDSSload x [off] {sym} ptr mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ l := v_1
+ if l.Op != Op386MOVSSload {
+ continue
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
+ continue
+ }
+ v.reset(Op386ADDSSload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValue386_Op386ADDSSload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (ADDSSload [off1] {sym} val (ADDLconst [off2] base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (ADDSSload [off1+off2] {sym} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != Op386ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(Op386ADDSSload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (ADDSSload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (ADDSSload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386ADDSSload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386ANDL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ANDL x (MOVLconst [c]))
+ // result: (ANDLconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != Op386MOVLconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(Op386ANDLconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ANDL x l:(MOVLload [off] {sym} ptr mem))
+ // cond: canMergeLoadClobber(v, l, x) && clobber(l)
+ // result: (ANDLload x [off] {sym} ptr mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ l := v_1
+ if l.Op != Op386MOVLload {
+ continue
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
+ continue
+ }
+ v.reset(Op386ANDLload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ // match: (ANDL x x)
+ // result: x
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386ANDLconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ANDLconst [c] (ANDLconst [d] x))
+ // result: (ANDLconst [c & d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != Op386ANDLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(Op386ANDLconst)
+ v.AuxInt = int32ToAuxInt(c & d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDLconst [c] _)
+ // cond: c==0
+ // result: (MOVLconst [0])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if !(c == 0) {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (ANDLconst [c] x)
+ // cond: c==-1
+ // result: x
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(c == -1) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (ANDLconst [c] (MOVLconst [d]))
+ // result: (MOVLconst [c&d])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(c & d)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386ANDLconstmodify(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (ANDLconstmodify [valoff1] {sym} (ADDLconst [off2] base) mem)
+ // cond: valoff1.canAdd32(off2)
+ // result: (ANDLconstmodify [valoff1.addOffset32(off2)] {sym} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(valoff1.canAdd32(off2)) {
+ break
+ }
+ v.reset(Op386ANDLconstmodify)
+ v.AuxInt = valAndOffToAuxInt(valoff1.addOffset32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (ANDLconstmodify [valoff1] {sym1} (LEAL [off2] {sym2} base) mem)
+ // cond: valoff1.canAdd32(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (ANDLconstmodify [valoff1.addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(valoff1.canAdd32(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386ANDLconstmodify)
+ v.AuxInt = valAndOffToAuxInt(valoff1.addOffset32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386ANDLload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (ANDLload [off1] {sym} val (ADDLconst [off2] base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (ANDLload [off1+off2] {sym} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != Op386ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(Op386ANDLload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (ANDLload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (ANDLload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386ANDLload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386ANDLmodify(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (ANDLmodify [off1] {sym} (ADDLconst [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (ANDLmodify [off1+off2] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(Op386ANDLmodify)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (ANDLmodify [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (ANDLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386ANDLmodify)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386CMPB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMPB x (MOVLconst [c]))
+ // result: (CMPBconst x [int8(c)])
+ for {
+ x := v_0
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(Op386CMPBconst)
+ v.AuxInt = int8ToAuxInt(int8(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPB (MOVLconst [c]) x)
+ // result: (InvertFlags (CMPBconst x [int8(c)]))
+ for {
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(Op386InvertFlags)
+ v0 := b.NewValue0(v.Pos, Op386CMPBconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(int8(c))
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPB x y)
+ // cond: canonLessThan(x,y)
+ // result: (InvertFlags (CMPB y x))
+ for {
+ x := v_0
+ y := v_1
+ if !(canonLessThan(x, y)) {
+ break
+ }
+ v.reset(Op386InvertFlags)
+ v0 := b.NewValue0(v.Pos, Op386CMPB, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPB l:(MOVBload {sym} [off] ptr mem) x)
+ // cond: canMergeLoad(v, l) && clobber(l)
+ // result: (CMPBload {sym} [off] ptr x mem)
+ for {
+ l := v_0
+ if l.Op != Op386MOVBload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ x := v_1
+ if !(canMergeLoad(v, l) && clobber(l)) {
+ break
+ }
+ v.reset(Op386CMPBload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (CMPB x l:(MOVBload {sym} [off] ptr mem))
+ // cond: canMergeLoad(v, l) && clobber(l)
+ // result: (InvertFlags (CMPBload {sym} [off] ptr x mem))
+ for {
+ x := v_0
+ l := v_1
+ if l.Op != Op386MOVBload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoad(v, l) && clobber(l)) {
+ break
+ }
+ v.reset(Op386InvertFlags)
+ v0 := b.NewValue0(l.Pos, Op386CMPBload, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg3(ptr, x, mem)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386CMPBconst(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMPBconst (MOVLconst [x]) [y])
+ // cond: int8(x)==y
+ // result: (FlagEQ)
+ for {
+ y := auxIntToInt8(v.AuxInt)
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(int8(x) == y) {
+ break
+ }
+ v.reset(Op386FlagEQ)
+ return true
+ }
+ // match: (CMPBconst (MOVLconst [x]) [y])
+ // cond: int8(x)<y && uint8(x)<uint8(y)
+ // result: (FlagLT_ULT)
+ for {
+ y := auxIntToInt8(v.AuxInt)
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(int8(x) < y && uint8(x) < uint8(y)) {
+ break
+ }
+ v.reset(Op386FlagLT_ULT)
+ return true
+ }
+ // match: (CMPBconst (MOVLconst [x]) [y])
+ // cond: int8(x)<y && uint8(x)>uint8(y)
+ // result: (FlagLT_UGT)
+ for {
+ y := auxIntToInt8(v.AuxInt)
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(int8(x) < y && uint8(x) > uint8(y)) {
+ break
+ }
+ v.reset(Op386FlagLT_UGT)
+ return true
+ }
+ // match: (CMPBconst (MOVLconst [x]) [y])
+ // cond: int8(x)>y && uint8(x)<uint8(y)
+ // result: (FlagGT_ULT)
+ for {
+ y := auxIntToInt8(v.AuxInt)
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(int8(x) > y && uint8(x) < uint8(y)) {
+ break
+ }
+ v.reset(Op386FlagGT_ULT)
+ return true
+ }
+ // match: (CMPBconst (MOVLconst [x]) [y])
+ // cond: int8(x)>y && uint8(x)>uint8(y)
+ // result: (FlagGT_UGT)
+ for {
+ y := auxIntToInt8(v.AuxInt)
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(int8(x) > y && uint8(x) > uint8(y)) {
+ break
+ }
+ v.reset(Op386FlagGT_UGT)
+ return true
+ }
+ // match: (CMPBconst (ANDLconst _ [m]) [n])
+ // cond: 0 <= int8(m) && int8(m) < n
+ // result: (FlagLT_ULT)
+ for {
+ n := auxIntToInt8(v.AuxInt)
+ if v_0.Op != Op386ANDLconst {
+ break
+ }
+ m := auxIntToInt32(v_0.AuxInt)
+ if !(0 <= int8(m) && int8(m) < n) {
+ break
+ }
+ v.reset(Op386FlagLT_ULT)
+ return true
+ }
+ // match: (CMPBconst l:(ANDL x y) [0])
+ // cond: l.Uses==1
+ // result: (TESTB x y)
+ for {
+ if auxIntToInt8(v.AuxInt) != 0 {
+ break
+ }
+ l := v_0
+ if l.Op != Op386ANDL {
+ break
+ }
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v.reset(Op386TESTB)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (CMPBconst l:(ANDLconst [c] x) [0])
+ // cond: l.Uses==1
+ // result: (TESTBconst [int8(c)] x)
+ for {
+ if auxIntToInt8(v.AuxInt) != 0 {
+ break
+ }
+ l := v_0
+ if l.Op != Op386ANDLconst {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v.reset(Op386TESTBconst)
+ v.AuxInt = int8ToAuxInt(int8(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPBconst x [0])
+ // result: (TESTB x x)
+ for {
+ if auxIntToInt8(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.reset(Op386TESTB)
+ v.AddArg2(x, x)
+ return true
+ }
+ // match: (CMPBconst l:(MOVBload {sym} [off] ptr mem) [c])
+ // cond: l.Uses == 1 && clobber(l)
+ // result: @l.Block (CMPBconstload {sym} [makeValAndOff(int32(c),off)] ptr mem)
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ l := v_0
+ if l.Op != Op386MOVBload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(l.Uses == 1 && clobber(l)) {
+ break
+ }
+ b = l.Block
+ v0 := b.NewValue0(l.Pos, Op386CMPBconstload, types.TypeFlags)
+ v.copyOf(v0)
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386CMPBload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMPBload {sym} [off] ptr (MOVLconst [c]) mem)
+ // result: (CMPBconstload {sym} [makeValAndOff(int32(int8(c)),off)] ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ mem := v_2
+ v.reset(Op386CMPBconstload)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386CMPL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMPL x (MOVLconst [c]))
+ // result: (CMPLconst x [c])
+ for {
+ x := v_0
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(Op386CMPLconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPL (MOVLconst [c]) x)
+ // result: (InvertFlags (CMPLconst x [c]))
+ for {
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(Op386InvertFlags)
+ v0 := b.NewValue0(v.Pos, Op386CMPLconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPL x y)
+ // cond: canonLessThan(x,y)
+ // result: (InvertFlags (CMPL y x))
+ for {
+ x := v_0
+ y := v_1
+ if !(canonLessThan(x, y)) {
+ break
+ }
+ v.reset(Op386InvertFlags)
+ v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPL l:(MOVLload {sym} [off] ptr mem) x)
+ // cond: canMergeLoad(v, l) && clobber(l)
+ // result: (CMPLload {sym} [off] ptr x mem)
+ for {
+ l := v_0
+ if l.Op != Op386MOVLload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ x := v_1
+ if !(canMergeLoad(v, l) && clobber(l)) {
+ break
+ }
+ v.reset(Op386CMPLload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (CMPL x l:(MOVLload {sym} [off] ptr mem))
+ // cond: canMergeLoad(v, l) && clobber(l)
+ // result: (InvertFlags (CMPLload {sym} [off] ptr x mem))
+ for {
+ x := v_0
+ l := v_1
+ if l.Op != Op386MOVLload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoad(v, l) && clobber(l)) {
+ break
+ }
+ v.reset(Op386InvertFlags)
+ v0 := b.NewValue0(l.Pos, Op386CMPLload, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg3(ptr, x, mem)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386CMPLconst(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMPLconst (MOVLconst [x]) [y])
+ // cond: x==y
+ // result: (FlagEQ)
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(x == y) {
+ break
+ }
+ v.reset(Op386FlagEQ)
+ return true
+ }
+ // match: (CMPLconst (MOVLconst [x]) [y])
+ // cond: x<y && uint32(x)<uint32(y)
+ // result: (FlagLT_ULT)
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(x < y && uint32(x) < uint32(y)) {
+ break
+ }
+ v.reset(Op386FlagLT_ULT)
+ return true
+ }
+ // match: (CMPLconst (MOVLconst [x]) [y])
+ // cond: x<y && uint32(x)>uint32(y)
+ // result: (FlagLT_UGT)
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(x < y && uint32(x) > uint32(y)) {
+ break
+ }
+ v.reset(Op386FlagLT_UGT)
+ return true
+ }
+ // match: (CMPLconst (MOVLconst [x]) [y])
+ // cond: x>y && uint32(x)<uint32(y)
+ // result: (FlagGT_ULT)
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(x > y && uint32(x) < uint32(y)) {
+ break
+ }
+ v.reset(Op386FlagGT_ULT)
+ return true
+ }
+ // match: (CMPLconst (MOVLconst [x]) [y])
+ // cond: x>y && uint32(x)>uint32(y)
+ // result: (FlagGT_UGT)
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(x > y && uint32(x) > uint32(y)) {
+ break
+ }
+ v.reset(Op386FlagGT_UGT)
+ return true
+ }
+ // match: (CMPLconst (SHRLconst _ [c]) [n])
+ // cond: 0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n)
+ // result: (FlagLT_ULT)
+ for {
+ n := auxIntToInt32(v.AuxInt)
+ if v_0.Op != Op386SHRLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ if !(0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n)) {
+ break
+ }
+ v.reset(Op386FlagLT_ULT)
+ return true
+ }
+ // match: (CMPLconst (ANDLconst _ [m]) [n])
+ // cond: 0 <= m && m < n
+ // result: (FlagLT_ULT)
+ for {
+ n := auxIntToInt32(v.AuxInt)
+ if v_0.Op != Op386ANDLconst {
+ break
+ }
+ m := auxIntToInt32(v_0.AuxInt)
+ if !(0 <= m && m < n) {
+ break
+ }
+ v.reset(Op386FlagLT_ULT)
+ return true
+ }
+ // match: (CMPLconst l:(ANDL x y) [0])
+ // cond: l.Uses==1
+ // result: (TESTL x y)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ l := v_0
+ if l.Op != Op386ANDL {
+ break
+ }
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v.reset(Op386TESTL)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (CMPLconst l:(ANDLconst [c] x) [0])
+ // cond: l.Uses==1
+ // result: (TESTLconst [c] x)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ l := v_0
+ if l.Op != Op386ANDLconst {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v.reset(Op386TESTLconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPLconst x [0])
+ // result: (TESTL x x)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.reset(Op386TESTL)
+ v.AddArg2(x, x)
+ return true
+ }
+ // match: (CMPLconst l:(MOVLload {sym} [off] ptr mem) [c])
+ // cond: l.Uses == 1 && clobber(l)
+ // result: @l.Block (CMPLconstload {sym} [makeValAndOff(int32(c),off)] ptr mem)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ l := v_0
+ if l.Op != Op386MOVLload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(l.Uses == 1 && clobber(l)) {
+ break
+ }
+ b = l.Block
+ v0 := b.NewValue0(l.Pos, Op386CMPLconstload, types.TypeFlags)
+ v.copyOf(v0)
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386CMPLload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMPLload {sym} [off] ptr (MOVLconst [c]) mem)
+ // result: (CMPLconstload {sym} [makeValAndOff(c,off)] ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ mem := v_2
+ v.reset(Op386CMPLconstload)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(c, off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386CMPW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMPW x (MOVLconst [c]))
+ // result: (CMPWconst x [int16(c)])
+ for {
+ x := v_0
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(Op386CMPWconst)
+ v.AuxInt = int16ToAuxInt(int16(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPW (MOVLconst [c]) x)
+ // result: (InvertFlags (CMPWconst x [int16(c)]))
+ for {
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(Op386InvertFlags)
+ v0 := b.NewValue0(v.Pos, Op386CMPWconst, types.TypeFlags)
+ v0.AuxInt = int16ToAuxInt(int16(c))
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPW x y)
+ // cond: canonLessThan(x,y)
+ // result: (InvertFlags (CMPW y x))
+ for {
+ x := v_0
+ y := v_1
+ if !(canonLessThan(x, y)) {
+ break
+ }
+ v.reset(Op386InvertFlags)
+ v0 := b.NewValue0(v.Pos, Op386CMPW, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPW l:(MOVWload {sym} [off] ptr mem) x)
+ // cond: canMergeLoad(v, l) && clobber(l)
+ // result: (CMPWload {sym} [off] ptr x mem)
+ for {
+ l := v_0
+ if l.Op != Op386MOVWload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ x := v_1
+ if !(canMergeLoad(v, l) && clobber(l)) {
+ break
+ }
+ v.reset(Op386CMPWload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (CMPW x l:(MOVWload {sym} [off] ptr mem))
+ // cond: canMergeLoad(v, l) && clobber(l)
+ // result: (InvertFlags (CMPWload {sym} [off] ptr x mem))
+ for {
+ x := v_0
+ l := v_1
+ if l.Op != Op386MOVWload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoad(v, l) && clobber(l)) {
+ break
+ }
+ v.reset(Op386InvertFlags)
+ v0 := b.NewValue0(l.Pos, Op386CMPWload, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg3(ptr, x, mem)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386CMPWconst(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMPWconst (MOVLconst [x]) [y])
+ // cond: int16(x)==y
+ // result: (FlagEQ)
+ for {
+ y := auxIntToInt16(v.AuxInt)
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(int16(x) == y) {
+ break
+ }
+ v.reset(Op386FlagEQ)
+ return true
+ }
+ // match: (CMPWconst (MOVLconst [x]) [y])
+ // cond: int16(x)<y && uint16(x)<uint16(y)
+ // result: (FlagLT_ULT)
+ for {
+ y := auxIntToInt16(v.AuxInt)
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(int16(x) < y && uint16(x) < uint16(y)) {
+ break
+ }
+ v.reset(Op386FlagLT_ULT)
+ return true
+ }
+ // match: (CMPWconst (MOVLconst [x]) [y])
+ // cond: int16(x)<y && uint16(x)>uint16(y)
+ // result: (FlagLT_UGT)
+ for {
+ y := auxIntToInt16(v.AuxInt)
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(int16(x) < y && uint16(x) > uint16(y)) {
+ break
+ }
+ v.reset(Op386FlagLT_UGT)
+ return true
+ }
+ // match: (CMPWconst (MOVLconst [x]) [y])
+ // cond: int16(x)>y && uint16(x)<uint16(y)
+ // result: (FlagGT_ULT)
+ for {
+ y := auxIntToInt16(v.AuxInt)
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(int16(x) > y && uint16(x) < uint16(y)) {
+ break
+ }
+ v.reset(Op386FlagGT_ULT)
+ return true
+ }
+ // match: (CMPWconst (MOVLconst [x]) [y])
+ // cond: int16(x)>y && uint16(x)>uint16(y)
+ // result: (FlagGT_UGT)
+ for {
+ y := auxIntToInt16(v.AuxInt)
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(int16(x) > y && uint16(x) > uint16(y)) {
+ break
+ }
+ v.reset(Op386FlagGT_UGT)
+ return true
+ }
+ // match: (CMPWconst (ANDLconst _ [m]) [n])
+ // cond: 0 <= int16(m) && int16(m) < n
+ // result: (FlagLT_ULT)
+ for {
+ n := auxIntToInt16(v.AuxInt)
+ if v_0.Op != Op386ANDLconst {
+ break
+ }
+ m := auxIntToInt32(v_0.AuxInt)
+ if !(0 <= int16(m) && int16(m) < n) {
+ break
+ }
+ v.reset(Op386FlagLT_ULT)
+ return true
+ }
+ // match: (CMPWconst l:(ANDL x y) [0])
+ // cond: l.Uses==1
+ // result: (TESTW x y)
+ for {
+ if auxIntToInt16(v.AuxInt) != 0 {
+ break
+ }
+ l := v_0
+ if l.Op != Op386ANDL {
+ break
+ }
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v.reset(Op386TESTW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (CMPWconst l:(ANDLconst [c] x) [0])
+ // cond: l.Uses==1
+ // result: (TESTWconst [int16(c)] x)
+ for {
+ if auxIntToInt16(v.AuxInt) != 0 {
+ break
+ }
+ l := v_0
+ if l.Op != Op386ANDLconst {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v.reset(Op386TESTWconst)
+ v.AuxInt = int16ToAuxInt(int16(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPWconst x [0])
+ // result: (TESTW x x)
+ for {
+ if auxIntToInt16(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.reset(Op386TESTW)
+ v.AddArg2(x, x)
+ return true
+ }
+ // match: (CMPWconst l:(MOVWload {sym} [off] ptr mem) [c])
+ // cond: l.Uses == 1 && clobber(l)
+ // result: @l.Block (CMPWconstload {sym} [makeValAndOff(int32(c),off)] ptr mem)
+ for {
+ c := auxIntToInt16(v.AuxInt)
+ l := v_0
+ if l.Op != Op386MOVWload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(l.Uses == 1 && clobber(l)) {
+ break
+ }
+ b = l.Block
+ v0 := b.NewValue0(l.Pos, Op386CMPWconstload, types.TypeFlags)
+ v.copyOf(v0)
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386CMPWload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMPWload {sym} [off] ptr (MOVLconst [c]) mem)
+ // result: (CMPWconstload {sym} [makeValAndOff(int32(int16(c)),off)] ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ mem := v_2
+ v.reset(Op386CMPWconstload)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int16(c)), off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386DIVSD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (DIVSD x l:(MOVSDload [off] {sym} ptr mem))
+ // cond: canMergeLoadClobber(v, l, x) && clobber(l)
+ // result: (DIVSDload x [off] {sym} ptr mem)
+ for {
+ x := v_0
+ l := v_1
+ if l.Op != Op386MOVSDload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
+ break
+ }
+ v.reset(Op386DIVSDload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386DIVSDload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (DIVSDload [off1] {sym} val (ADDLconst [off2] base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (DIVSDload [off1+off2] {sym} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != Op386ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(Op386DIVSDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (DIVSDload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (DIVSDload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386DIVSDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386DIVSS(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (DIVSS x l:(MOVSSload [off] {sym} ptr mem))
+ // cond: canMergeLoadClobber(v, l, x) && clobber(l)
+ // result: (DIVSSload x [off] {sym} ptr mem)
+ for {
+ x := v_0
+ l := v_1
+ if l.Op != Op386MOVSSload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
+ break
+ }
+ v.reset(Op386DIVSSload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386DIVSSload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (DIVSSload [off1] {sym} val (ADDLconst [off2] base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (DIVSSload [off1+off2] {sym} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != Op386ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(Op386DIVSSload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (DIVSSload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (DIVSSload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386DIVSSload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386LEAL(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (LEAL [c] {s} (ADDLconst [d] x))
+ // cond: is32Bit(int64(c)+int64(d))
+ // result: (LEAL [c+d] {s} x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(is32Bit(int64(c) + int64(d))) {
+ break
+ }
+ v.reset(Op386LEAL)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg(x)
+ return true
+ }
+ // match: (LEAL [c] {s} (ADDL x y))
+ // cond: x.Op != OpSB && y.Op != OpSB
+ // result: (LEAL1 [c] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ if v_0.Op != Op386ADDL {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ y := v_0_1
+ if !(x.Op != OpSB && y.Op != OpSB) {
+ continue
+ }
+ v.reset(Op386LEAL1)
+ v.AuxInt = int32ToAuxInt(c)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (LEAL [off1] {sym1} (LEAL [off2] {sym2} x))
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (LEAL [off1+off2] {mergeSym(sym1,sym2)} x)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ x := v_0.Args[0]
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(Op386LEAL)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg(x)
+ return true
+ }
+ // match: (LEAL [off1] {sym1} (LEAL1 [off2] {sym2} x y))
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (LEAL1 [off1+off2] {mergeSym(sym1,sym2)} x y)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != Op386LEAL1 {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(Op386LEAL1)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAL [off1] {sym1} (LEAL2 [off2] {sym2} x y))
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (LEAL2 [off1+off2] {mergeSym(sym1,sym2)} x y)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != Op386LEAL2 {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(Op386LEAL2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAL [off1] {sym1} (LEAL4 [off2] {sym2} x y))
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (LEAL4 [off1+off2] {mergeSym(sym1,sym2)} x y)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != Op386LEAL4 {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(Op386LEAL4)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAL [off1] {sym1} (LEAL8 [off2] {sym2} x y))
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (LEAL8 [off1+off2] {mergeSym(sym1,sym2)} x y)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != Op386LEAL8 {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(Op386LEAL8)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386LEAL1(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LEAL1 [c] {s} (ADDLconst [d] x) y)
+ // cond: is32Bit(int64(c)+int64(d)) && x.Op != OpSB
+ // result: (LEAL1 [c+d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != Op386ADDLconst {
+ continue
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ y := v_1
+ if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
+ continue
+ }
+ v.reset(Op386LEAL1)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (LEAL1 [c] {s} x (SHLLconst [1] y))
+ // result: (LEAL2 [c] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != Op386SHLLconst || auxIntToInt32(v_1.AuxInt) != 1 {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(Op386LEAL2)
+ v.AuxInt = int32ToAuxInt(c)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (LEAL1 [c] {s} x (SHLLconst [2] y))
+ // result: (LEAL4 [c] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != Op386SHLLconst || auxIntToInt32(v_1.AuxInt) != 2 {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(Op386LEAL4)
+ v.AuxInt = int32ToAuxInt(c)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (LEAL1 [c] {s} x (SHLLconst [3] y))
+ // result: (LEAL8 [c] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != Op386SHLLconst || auxIntToInt32(v_1.AuxInt) != 3 {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(Op386LEAL8)
+ v.AuxInt = int32ToAuxInt(c)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (LEAL1 [off1] {sym1} (LEAL [off2] {sym2} x) y)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB
+ // result: (LEAL1 [off1+off2] {mergeSym(sym1,sym2)} x y)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != Op386LEAL {
+ continue
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ x := v_0.Args[0]
+ y := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
+ continue
+ }
+ v.reset(Op386LEAL1)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (LEAL1 [off1] {sym1} x (LEAL1 [off2] {sym2} y y))
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (LEAL2 [off1+off2] {mergeSym(sym1, sym2)} x y)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != Op386LEAL1 {
+ continue
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ y := v_1.Args[1]
+ if y != v_1.Args[0] || !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ continue
+ }
+ v.reset(Op386LEAL2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (LEAL1 [off1] {sym1} x (LEAL1 [off2] {sym2} x y))
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (LEAL2 [off1+off2] {mergeSym(sym1, sym2)} y x)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != Op386LEAL1 {
+ continue
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if x != v_1_0 {
+ continue
+ }
+ y := v_1_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ continue
+ }
+ v.reset(Op386LEAL2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(y, x)
+ return true
+ }
+ }
+ break
+ }
+ // match: (LEAL1 [0] {nil} x y)
+ // result: (ADDL x y)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 || auxToSym(v.Aux) != nil {
+ break
+ }
+ x := v_0
+ y := v_1
+ v.reset(Op386ADDL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386LEAL2(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LEAL2 [c] {s} (ADDLconst [d] x) y)
+ // cond: is32Bit(int64(c)+int64(d)) && x.Op != OpSB
+ // result: (LEAL2 [c+d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ y := v_1
+ if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
+ break
+ }
+ v.reset(Op386LEAL2)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAL2 [c] {s} x (ADDLconst [d] y))
+ // cond: is32Bit(int64(c)+2*int64(d)) && y.Op != OpSB
+ // result: (LEAL2 [c+2*d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != Op386ADDLconst {
+ break
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(is32Bit(int64(c)+2*int64(d)) && y.Op != OpSB) {
+ break
+ }
+ v.reset(Op386LEAL2)
+ v.AuxInt = int32ToAuxInt(c + 2*d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAL2 [c] {s} x (SHLLconst [1] y))
+ // result: (LEAL4 [c] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != Op386SHLLconst || auxIntToInt32(v_1.AuxInt) != 1 {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(Op386LEAL4)
+ v.AuxInt = int32ToAuxInt(c)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAL2 [c] {s} x (SHLLconst [2] y))
+ // result: (LEAL8 [c] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != Op386SHLLconst || auxIntToInt32(v_1.AuxInt) != 2 {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(Op386LEAL8)
+ v.AuxInt = int32ToAuxInt(c)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAL2 [off1] {sym1} (LEAL [off2] {sym2} x) y)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB
+ // result: (LEAL2 [off1+off2] {mergeSym(sym1,sym2)} x y)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ x := v_0.Args[0]
+ y := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
+ break
+ }
+ v.reset(Op386LEAL2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAL2 [off1] {sym} x (LEAL1 [off2] {nil} y y))
+ // cond: is32Bit(int64(off1)+2*int64(off2))
+ // result: (LEAL4 [off1+2*off2] {sym} x y)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != Op386LEAL1 {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ if auxToSym(v_1.Aux) != nil {
+ break
+ }
+ y := v_1.Args[1]
+ if y != v_1.Args[0] || !(is32Bit(int64(off1) + 2*int64(off2))) {
+ break
+ }
+ v.reset(Op386LEAL4)
+ v.AuxInt = int32ToAuxInt(off1 + 2*off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386LEAL4(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LEAL4 [c] {s} (ADDLconst [d] x) y)
+ // cond: is32Bit(int64(c)+int64(d)) && x.Op != OpSB
+ // result: (LEAL4 [c+d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ y := v_1
+ if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
+ break
+ }
+ v.reset(Op386LEAL4)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAL4 [c] {s} x (ADDLconst [d] y))
+ // cond: is32Bit(int64(c)+4*int64(d)) && y.Op != OpSB
+ // result: (LEAL4 [c+4*d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != Op386ADDLconst {
+ break
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(is32Bit(int64(c)+4*int64(d)) && y.Op != OpSB) {
+ break
+ }
+ v.reset(Op386LEAL4)
+ v.AuxInt = int32ToAuxInt(c + 4*d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAL4 [c] {s} x (SHLLconst [1] y))
+ // result: (LEAL8 [c] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != Op386SHLLconst || auxIntToInt32(v_1.AuxInt) != 1 {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(Op386LEAL8)
+ v.AuxInt = int32ToAuxInt(c)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAL4 [off1] {sym1} (LEAL [off2] {sym2} x) y)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB
+ // result: (LEAL4 [off1+off2] {mergeSym(sym1,sym2)} x y)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ x := v_0.Args[0]
+ y := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
+ break
+ }
+ v.reset(Op386LEAL4)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAL4 [off1] {sym} x (LEAL1 [off2] {nil} y y))
+ // cond: is32Bit(int64(off1)+4*int64(off2))
+ // result: (LEAL8 [off1+4*off2] {sym} x y)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != Op386LEAL1 {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ if auxToSym(v_1.Aux) != nil {
+ break
+ }
+ y := v_1.Args[1]
+ if y != v_1.Args[0] || !(is32Bit(int64(off1) + 4*int64(off2))) {
+ break
+ }
+ v.reset(Op386LEAL8)
+ v.AuxInt = int32ToAuxInt(off1 + 4*off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386LEAL8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LEAL8 [c] {s} (ADDLconst [d] x) y)
+ // cond: is32Bit(int64(c)+int64(d)) && x.Op != OpSB
+ // result: (LEAL8 [c+d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ y := v_1
+ if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
+ break
+ }
+ v.reset(Op386LEAL8)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAL8 [c] {s} x (ADDLconst [d] y))
+ // cond: is32Bit(int64(c)+8*int64(d)) && y.Op != OpSB
+ // result: (LEAL8 [c+8*d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != Op386ADDLconst {
+ break
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(is32Bit(int64(c)+8*int64(d)) && y.Op != OpSB) {
+ break
+ }
+ v.reset(Op386LEAL8)
+ v.AuxInt = int32ToAuxInt(c + 8*d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAL8 [off1] {sym1} (LEAL [off2] {sym2} x) y)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB
+ // result: (LEAL8 [off1+off2] {mergeSym(sym1,sym2)} x y)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ x := v_0.Args[0]
+ y := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
+ break
+ }
+ v.reset(Op386LEAL8)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVBLSX(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MOVBLSX x:(MOVBload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVBLSXload <v.Type> [off] {sym} ptr mem)
+ for {
+ x := v_0
+ if x.Op != Op386MOVBload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, Op386MOVBLSXload, v.Type)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBLSX (ANDLconst [c] x))
+ // cond: c & 0x80 == 0
+ // result: (ANDLconst [c & 0x7f] x)
+ for {
+ if v_0.Op != Op386ANDLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(c&0x80 == 0) {
+ break
+ }
+ v.reset(Op386ANDLconst)
+ v.AuxInt = int32ToAuxInt(c & 0x7f)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVBLSXload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVBLSXload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: (MOVBLSX x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != Op386MOVBstore {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(Op386MOVBLSX)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBLSXload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVBLSXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386MOVBLSXload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVBLZX(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MOVBLZX x:(MOVBload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
+ for {
+ x := v_0
+ if x.Op != Op386MOVBload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, Op386MOVBload, v.Type)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBLZX (ANDLconst [c] x))
+ // result: (ANDLconst [c & 0xff] x)
+ for {
+ if v_0.Op != Op386ANDLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(Op386ANDLconst)
+ v.AuxInt = int32ToAuxInt(c & 0xff)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVBload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: (MOVBLZX x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != Op386MOVBstore {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(Op386MOVBLZX)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBload [off1] {sym} (ADDLconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (MOVBload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(Op386MOVBload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386MOVBload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (MOVBload [off] {sym} (SB) _)
+ // cond: symIsRO(sym)
+ // result: (MOVLconst [int32(read8(sym, int64(off)))])
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpSB || !(symIsRO(sym)) {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(int32(read8(sym, int64(off))))
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVBstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVBstore [off] {sym} ptr (MOVBLSX x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != Op386MOVBLSX {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(Op386MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVBLZX x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != Op386MOVBLZX {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(Op386MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off1] {sym} (ADDLconst [off2] ptr) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (MOVBstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(Op386MOVBstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVLconst [c]) mem)
+ // result: (MOVBstoreconst [makeValAndOff(c,off)] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ mem := v_2
+ v.reset(Op386MOVBstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(c, off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386MOVBstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (MOVBstore [i] {s} p (SHRWconst [8] w) x:(MOVBstore [i-1] {s} p w mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVWstore [i-1] {s} p w mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ if v_1.Op != Op386SHRWconst || auxIntToInt16(v_1.AuxInt) != 8 {
+ break
+ }
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != Op386MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ if p != x.Args[0] || w != x.Args[1] || !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(Op386MOVWstore)
+ v.AuxInt = int32ToAuxInt(i - 1)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, w, mem)
+ return true
+ }
+ // match: (MOVBstore [i] {s} p (SHRLconst [8] w) x:(MOVBstore [i-1] {s} p w mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVWstore [i-1] {s} p w mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ if v_1.Op != Op386SHRLconst || auxIntToInt32(v_1.AuxInt) != 8 {
+ break
+ }
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != Op386MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ if p != x.Args[0] || w != x.Args[1] || !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(Op386MOVWstore)
+ v.AuxInt = int32ToAuxInt(i - 1)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, w, mem)
+ return true
+ }
+ // match: (MOVBstore [i] {s} p w x:(MOVBstore {s} [i+1] p (SHRWconst [8] w) mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVWstore [i] {s} p w mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ w := v_1
+ x := v_2
+ if x.Op != Op386MOVBstore || auxIntToInt32(x.AuxInt) != i+1 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ if p != x.Args[0] {
+ break
+ }
+ x_1 := x.Args[1]
+ if x_1.Op != Op386SHRWconst || auxIntToInt16(x_1.AuxInt) != 8 || w != x_1.Args[0] || !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(Op386MOVWstore)
+ v.AuxInt = int32ToAuxInt(i)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, w, mem)
+ return true
+ }
+ // match: (MOVBstore [i] {s} p w x:(MOVBstore {s} [i+1] p (SHRLconst [8] w) mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVWstore [i] {s} p w mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ w := v_1
+ x := v_2
+ if x.Op != Op386MOVBstore || auxIntToInt32(x.AuxInt) != i+1 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ if p != x.Args[0] {
+ break
+ }
+ x_1 := x.Args[1]
+ if x_1.Op != Op386SHRLconst || auxIntToInt32(x_1.AuxInt) != 8 || w != x_1.Args[0] || !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(Op386MOVWstore)
+ v.AuxInt = int32ToAuxInt(i)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, w, mem)
+ return true
+ }
+ // match: (MOVBstore [i] {s} p (SHRLconst [j] w) x:(MOVBstore [i-1] {s} p w0:(SHRLconst [j-8] w) mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVWstore [i-1] {s} p w0 mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ if v_1.Op != Op386SHRLconst {
+ break
+ }
+ j := auxIntToInt32(v_1.AuxInt)
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != Op386MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ if p != x.Args[0] {
+ break
+ }
+ w0 := x.Args[1]
+ if w0.Op != Op386SHRLconst || auxIntToInt32(w0.AuxInt) != j-8 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(Op386MOVWstore)
+ v.AuxInt = int32ToAuxInt(i - 1)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, w0, mem)
+ return true
+ }
+ // match: (MOVBstore [i] {s} p1 (SHRWconst [8] w) x:(MOVBstore [i] {s} p0 w mem))
+ // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)
+ // result: (MOVWstore [i] {s} p0 w mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p1 := v_0
+ if v_1.Op != Op386SHRWconst || auxIntToInt16(v_1.AuxInt) != 8 {
+ break
+ }
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != Op386MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ p0 := x.Args[0]
+ if w != x.Args[1] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
+ break
+ }
+ v.reset(Op386MOVWstore)
+ v.AuxInt = int32ToAuxInt(i)
+ v.Aux = symToAux(s)
+ v.AddArg3(p0, w, mem)
+ return true
+ }
+ // match: (MOVBstore [i] {s} p1 (SHRLconst [8] w) x:(MOVBstore [i] {s} p0 w mem))
+ // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)
+ // result: (MOVWstore [i] {s} p0 w mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p1 := v_0
+ if v_1.Op != Op386SHRLconst || auxIntToInt32(v_1.AuxInt) != 8 {
+ break
+ }
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != Op386MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ p0 := x.Args[0]
+ if w != x.Args[1] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
+ break
+ }
+ v.reset(Op386MOVWstore)
+ v.AuxInt = int32ToAuxInt(i)
+ v.Aux = symToAux(s)
+ v.AddArg3(p0, w, mem)
+ return true
+ }
+ // match: (MOVBstore [i] {s} p0 w x:(MOVBstore {s} [i] p1 (SHRWconst [8] w) mem))
+ // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)
+ // result: (MOVWstore [i] {s} p0 w mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p0 := v_0
+ w := v_1
+ x := v_2
+ if x.Op != Op386MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ p1 := x.Args[0]
+ x_1 := x.Args[1]
+ if x_1.Op != Op386SHRWconst || auxIntToInt16(x_1.AuxInt) != 8 || w != x_1.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
+ break
+ }
+ v.reset(Op386MOVWstore)
+ v.AuxInt = int32ToAuxInt(i)
+ v.Aux = symToAux(s)
+ v.AddArg3(p0, w, mem)
+ return true
+ }
+ // match: (MOVBstore [i] {s} p0 w x:(MOVBstore {s} [i] p1 (SHRLconst [8] w) mem))
+ // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)
+ // result: (MOVWstore [i] {s} p0 w mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p0 := v_0
+ w := v_1
+ x := v_2
+ if x.Op != Op386MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ p1 := x.Args[0]
+ x_1 := x.Args[1]
+ if x_1.Op != Op386SHRLconst || auxIntToInt32(x_1.AuxInt) != 8 || w != x_1.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
+ break
+ }
+ v.reset(Op386MOVWstore)
+ v.AuxInt = int32ToAuxInt(i)
+ v.Aux = symToAux(s)
+ v.AddArg3(p0, w, mem)
+ return true
+ }
+ // match: (MOVBstore [i] {s} p1 (SHRLconst [j] w) x:(MOVBstore [i] {s} p0 w0:(SHRLconst [j-8] w) mem))
+ // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)
+ // result: (MOVWstore [i] {s} p0 w0 mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p1 := v_0
+ if v_1.Op != Op386SHRLconst {
+ break
+ }
+ j := auxIntToInt32(v_1.AuxInt)
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != Op386MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ p0 := x.Args[0]
+ w0 := x.Args[1]
+ if w0.Op != Op386SHRLconst || auxIntToInt32(w0.AuxInt) != j-8 || w != w0.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
+ break
+ }
+ v.reset(Op386MOVWstore)
+ v.AuxInt = int32ToAuxInt(i)
+ v.Aux = symToAux(s)
+ v.AddArg3(p0, w0, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVBstoreconst(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVBstoreconst [sc] {s} (ADDLconst [off] ptr) mem)
+ // cond: sc.canAdd32(off)
+ // result: (MOVBstoreconst [sc.addOffset32(off)] {s} ptr mem)
+ for {
+ sc := auxIntToValAndOff(v.AuxInt)
+ s := auxToSym(v.Aux)
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ off := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(sc.canAdd32(off)) {
+ break
+ }
+ v.reset(Op386MOVBstoreconst)
+ v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off))
+ v.Aux = symToAux(s)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1, sym2) && sc.canAdd32(off) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVBstoreconst [sc.addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
+ for {
+ sc := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && sc.canAdd32(off) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386MOVBstoreconst)
+ v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBstoreconst [c] {s} p x:(MOVBstoreconst [a] {s} p mem))
+ // cond: x.Uses == 1 && a.Off() + 1 == c.Off() && clobber(x)
+ // result: (MOVWstoreconst [makeValAndOff(a.Val()&0xff | c.Val()<<8, a.Off())] {s} p mem)
+ for {
+ c := auxIntToValAndOff(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ x := v_1
+ if x.Op != Op386MOVBstoreconst {
+ break
+ }
+ a := auxIntToValAndOff(x.AuxInt)
+ if auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[1]
+ if p != x.Args[0] || !(x.Uses == 1 && a.Off()+1 == c.Off() && clobber(x)) {
+ break
+ }
+ v.reset(Op386MOVWstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(a.Val()&0xff|c.Val()<<8, a.Off()))
+ v.Aux = symToAux(s)
+ v.AddArg2(p, mem)
+ return true
+ }
+ // match: (MOVBstoreconst [a] {s} p x:(MOVBstoreconst [c] {s} p mem))
+ // cond: x.Uses == 1 && a.Off() + 1 == c.Off() && clobber(x)
+ // result: (MOVWstoreconst [makeValAndOff(a.Val()&0xff | c.Val()<<8, a.Off())] {s} p mem)
+ for {
+ a := auxIntToValAndOff(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ x := v_1
+ if x.Op != Op386MOVBstoreconst {
+ break
+ }
+ c := auxIntToValAndOff(x.AuxInt)
+ if auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[1]
+ if p != x.Args[0] || !(x.Uses == 1 && a.Off()+1 == c.Off() && clobber(x)) {
+ break
+ }
+ v.reset(Op386MOVWstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(a.Val()&0xff|c.Val()<<8, a.Off()))
+ v.Aux = symToAux(s)
+ v.AddArg2(p, mem)
+ return true
+ }
+ // match: (MOVBstoreconst [c] {s} p1 x:(MOVBstoreconst [a] {s} p0 mem))
+ // cond: x.Uses == 1 && a.Off() == c.Off() && sequentialAddresses(p0, p1, 1) && clobber(x)
+ // result: (MOVWstoreconst [makeValAndOff(a.Val()&0xff | c.Val()<<8, a.Off())] {s} p0 mem)
+ for {
+ c := auxIntToValAndOff(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p1 := v_0
+ x := v_1
+ if x.Op != Op386MOVBstoreconst {
+ break
+ }
+ a := auxIntToValAndOff(x.AuxInt)
+ if auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[1]
+ p0 := x.Args[0]
+ if !(x.Uses == 1 && a.Off() == c.Off() && sequentialAddresses(p0, p1, 1) && clobber(x)) {
+ break
+ }
+ v.reset(Op386MOVWstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(a.Val()&0xff|c.Val()<<8, a.Off()))
+ v.Aux = symToAux(s)
+ v.AddArg2(p0, mem)
+ return true
+ }
+ // match: (MOVBstoreconst [a] {s} p0 x:(MOVBstoreconst [c] {s} p1 mem))
+ // cond: x.Uses == 1 && a.Off() == c.Off() && sequentialAddresses(p0, p1, 1) && clobber(x)
+ // result: (MOVWstoreconst [makeValAndOff(a.Val()&0xff | c.Val()<<8, a.Off())] {s} p0 mem)
+ for {
+ a := auxIntToValAndOff(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p0 := v_0
+ x := v_1
+ if x.Op != Op386MOVBstoreconst {
+ break
+ }
+ c := auxIntToValAndOff(x.AuxInt)
+ if auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[1]
+ p1 := x.Args[0]
+ if !(x.Uses == 1 && a.Off() == c.Off() && sequentialAddresses(p0, p1, 1) && clobber(x)) {
+ break
+ }
+ v.reset(Op386MOVWstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(a.Val()&0xff|c.Val()<<8, a.Off()))
+ v.Aux = symToAux(s)
+ v.AddArg2(p0, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVLload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVLload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: x
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != Op386MOVLstore {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVLload [off1] {sym} (ADDLconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (MOVLload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(Op386MOVLload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVLload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVLload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386MOVLload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (MOVLload [off] {sym} (SB) _)
+ // cond: symIsRO(sym)
+ // result: (MOVLconst [int32(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))])
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpSB || !(symIsRO(sym)) {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(int32(read32(sym, int64(off), config.ctxt.Arch.ByteOrder)))
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVLstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVLstore [off1] {sym} (ADDLconst [off2] ptr) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (MOVLstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(Op386MOVLstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVLstore [off] {sym} ptr (MOVLconst [c]) mem)
+ // result: (MOVLstoreconst [makeValAndOff(c,off)] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ mem := v_2
+ v.reset(Op386MOVLstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(c, off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVLstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386MOVLstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (MOVLstore {sym} [off] ptr y:(ADDLload x [off] {sym} ptr mem) mem)
+ // cond: y.Uses==1 && clobber(y)
+ // result: (ADDLmodify [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != Op386ADDLload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
+ break
+ }
+ mem := y.Args[2]
+ x := y.Args[0]
+ if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
+ break
+ }
+ v.reset(Op386ADDLmodify)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVLstore {sym} [off] ptr y:(ANDLload x [off] {sym} ptr mem) mem)
+ // cond: y.Uses==1 && clobber(y)
+ // result: (ANDLmodify [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != Op386ANDLload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
+ break
+ }
+ mem := y.Args[2]
+ x := y.Args[0]
+ if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
+ break
+ }
+ v.reset(Op386ANDLmodify)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVLstore {sym} [off] ptr y:(ORLload x [off] {sym} ptr mem) mem)
+ // cond: y.Uses==1 && clobber(y)
+ // result: (ORLmodify [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != Op386ORLload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
+ break
+ }
+ mem := y.Args[2]
+ x := y.Args[0]
+ if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
+ break
+ }
+ v.reset(Op386ORLmodify)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVLstore {sym} [off] ptr y:(XORLload x [off] {sym} ptr mem) mem)
+ // cond: y.Uses==1 && clobber(y)
+ // result: (XORLmodify [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != Op386XORLload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
+ break
+ }
+ mem := y.Args[2]
+ x := y.Args[0]
+ if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
+ break
+ }
+ v.reset(Op386XORLmodify)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVLstore {sym} [off] ptr y:(ADDL l:(MOVLload [off] {sym} ptr mem) x) mem)
+ // cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
+ // result: (ADDLmodify [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != Op386ADDL {
+ break
+ }
+ _ = y.Args[1]
+ y_0 := y.Args[0]
+ y_1 := y.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
+ l := y_0
+ if l.Op != Op386MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
+ continue
+ }
+ mem := l.Args[1]
+ if ptr != l.Args[0] {
+ continue
+ }
+ x := y_1
+ if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
+ continue
+ }
+ v.reset(Op386ADDLmodify)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ break
+ }
+ // match: (MOVLstore {sym} [off] ptr y:(SUBL l:(MOVLload [off] {sym} ptr mem) x) mem)
+ // cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
+ // result: (SUBLmodify [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != Op386SUBL {
+ break
+ }
+ x := y.Args[1]
+ l := y.Args[0]
+ if l.Op != Op386MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
+ break
+ }
+ mem := l.Args[1]
+ if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
+ break
+ }
+ v.reset(Op386SUBLmodify)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVLstore {sym} [off] ptr y:(ANDL l:(MOVLload [off] {sym} ptr mem) x) mem)
+ // cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
+ // result: (ANDLmodify [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != Op386ANDL {
+ break
+ }
+ _ = y.Args[1]
+ y_0 := y.Args[0]
+ y_1 := y.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
+ l := y_0
+ if l.Op != Op386MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
+ continue
+ }
+ mem := l.Args[1]
+ if ptr != l.Args[0] {
+ continue
+ }
+ x := y_1
+ if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
+ continue
+ }
+ v.reset(Op386ANDLmodify)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ break
+ }
+ // match: (MOVLstore {sym} [off] ptr y:(ORL l:(MOVLload [off] {sym} ptr mem) x) mem)
+ // cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
+ // result: (ORLmodify [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != Op386ORL {
+ break
+ }
+ _ = y.Args[1]
+ y_0 := y.Args[0]
+ y_1 := y.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
+ l := y_0
+ if l.Op != Op386MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
+ continue
+ }
+ mem := l.Args[1]
+ if ptr != l.Args[0] {
+ continue
+ }
+ x := y_1
+ if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
+ continue
+ }
+ v.reset(Op386ORLmodify)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ break
+ }
+ // match: (MOVLstore {sym} [off] ptr y:(XORL l:(MOVLload [off] {sym} ptr mem) x) mem)
+ // cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
+ // result: (XORLmodify [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != Op386XORL {
+ break
+ }
+ _ = y.Args[1]
+ y_0 := y.Args[0]
+ y_1 := y.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
+ l := y_0
+ if l.Op != Op386MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
+ continue
+ }
+ mem := l.Args[1]
+ if ptr != l.Args[0] {
+ continue
+ }
+ x := y_1
+ if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
+ continue
+ }
+ v.reset(Op386XORLmodify)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ break
+ }
+ // match: (MOVLstore {sym} [off] ptr y:(ADDLconst [c] l:(MOVLload [off] {sym} ptr mem)) mem)
+ // cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
+ // result: (ADDLconstmodify [makeValAndOff(c,off)] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != Op386ADDLconst {
+ break
+ }
+ c := auxIntToInt32(y.AuxInt)
+ l := y.Args[0]
+ if l.Op != Op386MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
+ break
+ }
+ mem := l.Args[1]
+ if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
+ break
+ }
+ v.reset(Op386ADDLconstmodify)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(c, off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVLstore {sym} [off] ptr y:(ANDLconst [c] l:(MOVLload [off] {sym} ptr mem)) mem)
+ // cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
+ // result: (ANDLconstmodify [makeValAndOff(c,off)] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != Op386ANDLconst {
+ break
+ }
+ c := auxIntToInt32(y.AuxInt)
+ l := y.Args[0]
+ if l.Op != Op386MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
+ break
+ }
+ mem := l.Args[1]
+ if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
+ break
+ }
+ v.reset(Op386ANDLconstmodify)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(c, off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVLstore {sym} [off] ptr y:(ORLconst [c] l:(MOVLload [off] {sym} ptr mem)) mem)
+ // cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
+ // result: (ORLconstmodify [makeValAndOff(c,off)] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != Op386ORLconst {
+ break
+ }
+ c := auxIntToInt32(y.AuxInt)
+ l := y.Args[0]
+ if l.Op != Op386MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
+ break
+ }
+ mem := l.Args[1]
+ if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
+ break
+ }
+ v.reset(Op386ORLconstmodify)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(c, off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVLstore {sym} [off] ptr y:(XORLconst [c] l:(MOVLload [off] {sym} ptr mem)) mem)
+ // cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
+ // result: (XORLconstmodify [makeValAndOff(c,off)] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != Op386XORLconst {
+ break
+ }
+ c := auxIntToInt32(y.AuxInt)
+ l := y.Args[0]
+ if l.Op != Op386MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
+ break
+ }
+ mem := l.Args[1]
+ if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
+ break
+ }
+ v.reset(Op386XORLconstmodify)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(c, off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVLstoreconst(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVLstoreconst [sc] {s} (ADDLconst [off] ptr) mem)
+ // cond: sc.canAdd32(off)
+ // result: (MOVLstoreconst [sc.addOffset32(off)] {s} ptr mem)
+ for {
+ sc := auxIntToValAndOff(v.AuxInt)
+ s := auxToSym(v.Aux)
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ off := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(sc.canAdd32(off)) {
+ break
+ }
+ v.reset(Op386MOVLstoreconst)
+ v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off))
+ v.Aux = symToAux(s)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVLstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1, sym2) && sc.canAdd32(off) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVLstoreconst [sc.addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
+ for {
+ sc := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && sc.canAdd32(off) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386MOVLstoreconst)
+ v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVSDconst(v *Value) bool {
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (MOVSDconst [c])
+ // cond: config.ctxt.Flag_shared
+ // result: (MOVSDconst2 (MOVSDconst1 [c]))
+ for {
+ c := auxIntToFloat64(v.AuxInt)
+ if !(config.ctxt.Flag_shared) {
+ break
+ }
+ v.reset(Op386MOVSDconst2)
+ v0 := b.NewValue0(v.Pos, Op386MOVSDconst1, typ.UInt32)
+ v0.AuxInt = float64ToAuxInt(c)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVSDload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVSDload [off1] {sym} (ADDLconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (MOVSDload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(Op386MOVSDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVSDload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVSDload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386MOVSDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVSDstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVSDstore [off1] {sym} (ADDLconst [off2] ptr) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (MOVSDstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(Op386MOVSDstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVSDstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVSDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386MOVSDstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVSSconst(v *Value) bool {
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (MOVSSconst [c])
+ // cond: config.ctxt.Flag_shared
+ // result: (MOVSSconst2 (MOVSSconst1 [c]))
+ for {
+ c := auxIntToFloat32(v.AuxInt)
+ if !(config.ctxt.Flag_shared) {
+ break
+ }
+ v.reset(Op386MOVSSconst2)
+ v0 := b.NewValue0(v.Pos, Op386MOVSSconst1, typ.UInt32)
+ v0.AuxInt = float32ToAuxInt(c)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVSSload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVSSload [off1] {sym} (ADDLconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (MOVSSload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(Op386MOVSSload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVSSload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVSSload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386MOVSSload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVSSstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVSSstore [off1] {sym} (ADDLconst [off2] ptr) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (MOVSSstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(Op386MOVSSstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVSSstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVSSstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386MOVSSstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVWLSX(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MOVWLSX x:(MOVWload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVWLSXload <v.Type> [off] {sym} ptr mem)
+ for {
+ x := v_0
+ if x.Op != Op386MOVWload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, Op386MOVWLSXload, v.Type)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWLSX (ANDLconst [c] x))
+ // cond: c & 0x8000 == 0
+ // result: (ANDLconst [c & 0x7fff] x)
+ for {
+ if v_0.Op != Op386ANDLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(c&0x8000 == 0) {
+ break
+ }
+ v.reset(Op386ANDLconst)
+ v.AuxInt = int32ToAuxInt(c & 0x7fff)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVWLSXload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVWLSXload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: (MOVWLSX x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != Op386MOVWstore {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(Op386MOVWLSX)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWLSXload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVWLSXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386MOVWLSXload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVWLZX(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MOVWLZX x:(MOVWload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVWload <v.Type> [off] {sym} ptr mem)
+ for {
+ x := v_0
+ if x.Op != Op386MOVWload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, Op386MOVWload, v.Type)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWLZX (ANDLconst [c] x))
+ // result: (ANDLconst [c & 0xffff] x)
+ for {
+ if v_0.Op != Op386ANDLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(Op386ANDLconst)
+ v.AuxInt = int32ToAuxInt(c & 0xffff)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVWload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: (MOVWLZX x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != Op386MOVWstore {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(Op386MOVWLZX)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWload [off1] {sym} (ADDLconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (MOVWload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(Op386MOVWload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWload [off1] {sym1} (LEAL [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386MOVWload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (MOVWload [off] {sym} (SB) _)
+ // cond: symIsRO(sym)
+ // result: (MOVLconst [int32(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))])
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpSB || !(symIsRO(sym)) {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(int32(read16(sym, int64(off), config.ctxt.Arch.ByteOrder)))
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVWstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVWstore [off] {sym} ptr (MOVWLSX x) mem)
+ // result: (MOVWstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != Op386MOVWLSX {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(Op386MOVWstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVWstore [off] {sym} ptr (MOVWLZX x) mem)
+ // result: (MOVWstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != Op386MOVWLZX {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(Op386MOVWstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVWstore [off1] {sym} (ADDLconst [off2] ptr) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (MOVWstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(Op386MOVWstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVWstore [off] {sym} ptr (MOVLconst [c]) mem)
+ // result: (MOVWstoreconst [makeValAndOff(c,off)] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ mem := v_2
+ v.reset(Op386MOVWstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(c, off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWstore [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386MOVWstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (MOVWstore [i] {s} p (SHRLconst [16] w) x:(MOVWstore [i-2] {s} p w mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVLstore [i-2] {s} p w mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ if v_1.Op != Op386SHRLconst || auxIntToInt32(v_1.AuxInt) != 16 {
+ break
+ }
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != Op386MOVWstore || auxIntToInt32(x.AuxInt) != i-2 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ if p != x.Args[0] || w != x.Args[1] || !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(Op386MOVLstore)
+ v.AuxInt = int32ToAuxInt(i - 2)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, w, mem)
+ return true
+ }
+ // match: (MOVWstore [i] {s} p (SHRLconst [j] w) x:(MOVWstore [i-2] {s} p w0:(SHRLconst [j-16] w) mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVLstore [i-2] {s} p w0 mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ if v_1.Op != Op386SHRLconst {
+ break
+ }
+ j := auxIntToInt32(v_1.AuxInt)
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != Op386MOVWstore || auxIntToInt32(x.AuxInt) != i-2 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ if p != x.Args[0] {
+ break
+ }
+ w0 := x.Args[1]
+ if w0.Op != Op386SHRLconst || auxIntToInt32(w0.AuxInt) != j-16 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(Op386MOVLstore)
+ v.AuxInt = int32ToAuxInt(i - 2)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, w0, mem)
+ return true
+ }
+ // match: (MOVWstore [i] {s} p1 (SHRLconst [16] w) x:(MOVWstore [i] {s} p0 w mem))
+ // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 2) && clobber(x)
+ // result: (MOVLstore [i] {s} p0 w mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p1 := v_0
+ if v_1.Op != Op386SHRLconst || auxIntToInt32(v_1.AuxInt) != 16 {
+ break
+ }
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != Op386MOVWstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ p0 := x.Args[0]
+ if w != x.Args[1] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 2) && clobber(x)) {
+ break
+ }
+ v.reset(Op386MOVLstore)
+ v.AuxInt = int32ToAuxInt(i)
+ v.Aux = symToAux(s)
+ v.AddArg3(p0, w, mem)
+ return true
+ }
+ // match: (MOVWstore [i] {s} p1 (SHRLconst [j] w) x:(MOVWstore [i] {s} p0 w0:(SHRLconst [j-16] w) mem))
+ // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 2) && clobber(x)
+ // result: (MOVLstore [i] {s} p0 w0 mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p1 := v_0
+ if v_1.Op != Op386SHRLconst {
+ break
+ }
+ j := auxIntToInt32(v_1.AuxInt)
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != Op386MOVWstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ p0 := x.Args[0]
+ w0 := x.Args[1]
+ if w0.Op != Op386SHRLconst || auxIntToInt32(w0.AuxInt) != j-16 || w != w0.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 2) && clobber(x)) {
+ break
+ }
+ v.reset(Op386MOVLstore)
+ v.AuxInt = int32ToAuxInt(i)
+ v.Aux = symToAux(s)
+ v.AddArg3(p0, w0, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MOVWstoreconst(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVWstoreconst [sc] {s} (ADDLconst [off] ptr) mem)
+ // cond: sc.canAdd32(off)
+ // result: (MOVWstoreconst [sc.addOffset32(off)] {s} ptr mem)
+ for {
+ sc := auxIntToValAndOff(v.AuxInt)
+ s := auxToSym(v.Aux)
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ off := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(sc.canAdd32(off)) {
+ break
+ }
+ v.reset(Op386MOVWstoreconst)
+ v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off))
+ v.Aux = symToAux(s)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWstoreconst [sc] {sym1} (LEAL [off] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1, sym2) && sc.canAdd32(off) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVWstoreconst [sc.addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
+ for {
+ sc := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && sc.canAdd32(off) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386MOVWstoreconst)
+ v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWstoreconst [c] {s} p x:(MOVWstoreconst [a] {s} p mem))
+ // cond: x.Uses == 1 && a.Off() + 2 == c.Off() && clobber(x)
+ // result: (MOVLstoreconst [makeValAndOff(a.Val()&0xffff | c.Val()<<16, a.Off())] {s} p mem)
+ for {
+ c := auxIntToValAndOff(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ x := v_1
+ if x.Op != Op386MOVWstoreconst {
+ break
+ }
+ a := auxIntToValAndOff(x.AuxInt)
+ if auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[1]
+ if p != x.Args[0] || !(x.Uses == 1 && a.Off()+2 == c.Off() && clobber(x)) {
+ break
+ }
+ v.reset(Op386MOVLstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(a.Val()&0xffff|c.Val()<<16, a.Off()))
+ v.Aux = symToAux(s)
+ v.AddArg2(p, mem)
+ return true
+ }
+ // match: (MOVWstoreconst [a] {s} p x:(MOVWstoreconst [c] {s} p mem))
+ // cond: x.Uses == 1 && ValAndOff(a).Off() + 2 == ValAndOff(c).Off() && clobber(x)
+ // result: (MOVLstoreconst [makeValAndOff(a.Val()&0xffff | c.Val()<<16, a.Off())] {s} p mem)
+ for {
+ a := auxIntToValAndOff(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ x := v_1
+ if x.Op != Op386MOVWstoreconst {
+ break
+ }
+ c := auxIntToValAndOff(x.AuxInt)
+ if auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[1]
+ if p != x.Args[0] || !(x.Uses == 1 && ValAndOff(a).Off()+2 == ValAndOff(c).Off() && clobber(x)) {
+ break
+ }
+ v.reset(Op386MOVLstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(a.Val()&0xffff|c.Val()<<16, a.Off()))
+ v.Aux = symToAux(s)
+ v.AddArg2(p, mem)
+ return true
+ }
+ // match: (MOVWstoreconst [c] {s} p1 x:(MOVWstoreconst [a] {s} p0 mem))
+ // cond: x.Uses == 1 && a.Off() == c.Off() && sequentialAddresses(p0, p1, 2) && clobber(x)
+ // result: (MOVLstoreconst [makeValAndOff(a.Val()&0xffff | c.Val()<<16, a.Off())] {s} p0 mem)
+ for {
+ c := auxIntToValAndOff(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p1 := v_0
+ x := v_1
+ if x.Op != Op386MOVWstoreconst {
+ break
+ }
+ a := auxIntToValAndOff(x.AuxInt)
+ if auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[1]
+ p0 := x.Args[0]
+ if !(x.Uses == 1 && a.Off() == c.Off() && sequentialAddresses(p0, p1, 2) && clobber(x)) {
+ break
+ }
+ v.reset(Op386MOVLstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(a.Val()&0xffff|c.Val()<<16, a.Off()))
+ v.Aux = symToAux(s)
+ v.AddArg2(p0, mem)
+ return true
+ }
+ // match: (MOVWstoreconst [a] {s} p0 x:(MOVWstoreconst [c] {s} p1 mem))
+ // cond: x.Uses == 1 && a.Off() == c.Off() && sequentialAddresses(p0, p1, 2) && clobber(x)
+ // result: (MOVLstoreconst [makeValAndOff(a.Val()&0xffff | c.Val()<<16, a.Off())] {s} p0 mem)
+ for {
+ a := auxIntToValAndOff(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p0 := v_0
+ x := v_1
+ if x.Op != Op386MOVWstoreconst {
+ break
+ }
+ c := auxIntToValAndOff(x.AuxInt)
+ if auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[1]
+ p1 := x.Args[0]
+ if !(x.Uses == 1 && a.Off() == c.Off() && sequentialAddresses(p0, p1, 2) && clobber(x)) {
+ break
+ }
+ v.reset(Op386MOVLstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(a.Val()&0xffff|c.Val()<<16, a.Off()))
+ v.Aux = symToAux(s)
+ v.AddArg2(p0, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MULL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MULL x (MOVLconst [c]))
+ // result: (MULLconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != Op386MOVLconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(Op386MULLconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (MULL x l:(MOVLload [off] {sym} ptr mem))
+ // cond: canMergeLoadClobber(v, l, x) && clobber(l)
+ // result: (MULLload x [off] {sym} ptr mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ l := v_1
+ if l.Op != Op386MOVLload {
+ continue
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
+ continue
+ }
+ v.reset(Op386MULLload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValue386_Op386MULLconst(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MULLconst [c] (MULLconst [d] x))
+ // result: (MULLconst [c * d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != Op386MULLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(Op386MULLconst)
+ v.AuxInt = int32ToAuxInt(c * d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MULLconst [-9] x)
+ // result: (NEGL (LEAL8 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != -9 {
+ break
+ }
+ x := v_0
+ v.reset(Op386NEGL)
+ v0 := b.NewValue0(v.Pos, Op386LEAL8, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MULLconst [-5] x)
+ // result: (NEGL (LEAL4 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != -5 {
+ break
+ }
+ x := v_0
+ v.reset(Op386NEGL)
+ v0 := b.NewValue0(v.Pos, Op386LEAL4, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MULLconst [-3] x)
+ // result: (NEGL (LEAL2 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != -3 {
+ break
+ }
+ x := v_0
+ v.reset(Op386NEGL)
+ v0 := b.NewValue0(v.Pos, Op386LEAL2, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MULLconst [-1] x)
+ // result: (NEGL x)
+ for {
+ if auxIntToInt32(v.AuxInt) != -1 {
+ break
+ }
+ x := v_0
+ v.reset(Op386NEGL)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MULLconst [0] _)
+ // result: (MOVLconst [0])
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (MULLconst [1] x)
+ // result: x
+ for {
+ if auxIntToInt32(v.AuxInt) != 1 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (MULLconst [3] x)
+ // result: (LEAL2 x x)
+ for {
+ if auxIntToInt32(v.AuxInt) != 3 {
+ break
+ }
+ x := v_0
+ v.reset(Op386LEAL2)
+ v.AddArg2(x, x)
+ return true
+ }
+ // match: (MULLconst [5] x)
+ // result: (LEAL4 x x)
+ for {
+ if auxIntToInt32(v.AuxInt) != 5 {
+ break
+ }
+ x := v_0
+ v.reset(Op386LEAL4)
+ v.AddArg2(x, x)
+ return true
+ }
+ // match: (MULLconst [7] x)
+ // result: (LEAL2 x (LEAL2 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 7 {
+ break
+ }
+ x := v_0
+ v.reset(Op386LEAL2)
+ v0 := b.NewValue0(v.Pos, Op386LEAL2, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (MULLconst [9] x)
+ // result: (LEAL8 x x)
+ for {
+ if auxIntToInt32(v.AuxInt) != 9 {
+ break
+ }
+ x := v_0
+ v.reset(Op386LEAL8)
+ v.AddArg2(x, x)
+ return true
+ }
+ // match: (MULLconst [11] x)
+ // result: (LEAL2 x (LEAL4 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 11 {
+ break
+ }
+ x := v_0
+ v.reset(Op386LEAL2)
+ v0 := b.NewValue0(v.Pos, Op386LEAL4, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (MULLconst [13] x)
+ // result: (LEAL4 x (LEAL2 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 13 {
+ break
+ }
+ x := v_0
+ v.reset(Op386LEAL4)
+ v0 := b.NewValue0(v.Pos, Op386LEAL2, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (MULLconst [19] x)
+ // result: (LEAL2 x (LEAL8 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 19 {
+ break
+ }
+ x := v_0
+ v.reset(Op386LEAL2)
+ v0 := b.NewValue0(v.Pos, Op386LEAL8, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (MULLconst [21] x)
+ // result: (LEAL4 x (LEAL4 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 21 {
+ break
+ }
+ x := v_0
+ v.reset(Op386LEAL4)
+ v0 := b.NewValue0(v.Pos, Op386LEAL4, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (MULLconst [25] x)
+ // result: (LEAL8 x (LEAL2 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 25 {
+ break
+ }
+ x := v_0
+ v.reset(Op386LEAL8)
+ v0 := b.NewValue0(v.Pos, Op386LEAL2, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (MULLconst [27] x)
+ // result: (LEAL8 (LEAL2 <v.Type> x x) (LEAL2 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 27 {
+ break
+ }
+ x := v_0
+ v.reset(Op386LEAL8)
+ v0 := b.NewValue0(v.Pos, Op386LEAL2, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(v0, v0)
+ return true
+ }
+ // match: (MULLconst [37] x)
+ // result: (LEAL4 x (LEAL8 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 37 {
+ break
+ }
+ x := v_0
+ v.reset(Op386LEAL4)
+ v0 := b.NewValue0(v.Pos, Op386LEAL8, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (MULLconst [41] x)
+ // result: (LEAL8 x (LEAL4 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 41 {
+ break
+ }
+ x := v_0
+ v.reset(Op386LEAL8)
+ v0 := b.NewValue0(v.Pos, Op386LEAL4, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (MULLconst [45] x)
+ // result: (LEAL8 (LEAL4 <v.Type> x x) (LEAL4 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 45 {
+ break
+ }
+ x := v_0
+ v.reset(Op386LEAL8)
+ v0 := b.NewValue0(v.Pos, Op386LEAL4, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(v0, v0)
+ return true
+ }
+ // match: (MULLconst [73] x)
+ // result: (LEAL8 x (LEAL8 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 73 {
+ break
+ }
+ x := v_0
+ v.reset(Op386LEAL8)
+ v0 := b.NewValue0(v.Pos, Op386LEAL8, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (MULLconst [81] x)
+ // result: (LEAL8 (LEAL8 <v.Type> x x) (LEAL8 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 81 {
+ break
+ }
+ x := v_0
+ v.reset(Op386LEAL8)
+ v0 := b.NewValue0(v.Pos, Op386LEAL8, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(v0, v0)
+ return true
+ }
+ // match: (MULLconst [c] x)
+ // cond: isPowerOfTwo32(c+1) && c >= 15
+ // result: (SUBL (SHLLconst <v.Type> [int32(log32(c+1))] x) x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(isPowerOfTwo32(c+1) && c >= 15) {
+ break
+ }
+ v.reset(Op386SUBL)
+ v0 := b.NewValue0(v.Pos, Op386SHLLconst, v.Type)
+ v0.AuxInt = int32ToAuxInt(int32(log32(c + 1)))
+ v0.AddArg(x)
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (MULLconst [c] x)
+ // cond: isPowerOfTwo32(c-1) && c >= 17
+ // result: (LEAL1 (SHLLconst <v.Type> [int32(log32(c-1))] x) x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(isPowerOfTwo32(c-1) && c >= 17) {
+ break
+ }
+ v.reset(Op386LEAL1)
+ v0 := b.NewValue0(v.Pos, Op386SHLLconst, v.Type)
+ v0.AuxInt = int32ToAuxInt(int32(log32(c - 1)))
+ v0.AddArg(x)
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (MULLconst [c] x)
+ // cond: isPowerOfTwo32(c-2) && c >= 34
+ // result: (LEAL2 (SHLLconst <v.Type> [int32(log32(c-2))] x) x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(isPowerOfTwo32(c-2) && c >= 34) {
+ break
+ }
+ v.reset(Op386LEAL2)
+ v0 := b.NewValue0(v.Pos, Op386SHLLconst, v.Type)
+ v0.AuxInt = int32ToAuxInt(int32(log32(c - 2)))
+ v0.AddArg(x)
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (MULLconst [c] x)
+ // cond: isPowerOfTwo32(c-4) && c >= 68
+ // result: (LEAL4 (SHLLconst <v.Type> [int32(log32(c-4))] x) x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(isPowerOfTwo32(c-4) && c >= 68) {
+ break
+ }
+ v.reset(Op386LEAL4)
+ v0 := b.NewValue0(v.Pos, Op386SHLLconst, v.Type)
+ v0.AuxInt = int32ToAuxInt(int32(log32(c - 4)))
+ v0.AddArg(x)
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (MULLconst [c] x)
+ // cond: isPowerOfTwo32(c-8) && c >= 136
+ // result: (LEAL8 (SHLLconst <v.Type> [int32(log32(c-8))] x) x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(isPowerOfTwo32(c-8) && c >= 136) {
+ break
+ }
+ v.reset(Op386LEAL8)
+ v0 := b.NewValue0(v.Pos, Op386SHLLconst, v.Type)
+ v0.AuxInt = int32ToAuxInt(int32(log32(c - 8)))
+ v0.AddArg(x)
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (MULLconst [c] x)
+ // cond: c%3 == 0 && isPowerOfTwo32(c/3)
+ // result: (SHLLconst [int32(log32(c/3))] (LEAL2 <v.Type> x x))
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(c%3 == 0 && isPowerOfTwo32(c/3)) {
+ break
+ }
+ v.reset(Op386SHLLconst)
+ v.AuxInt = int32ToAuxInt(int32(log32(c / 3)))
+ v0 := b.NewValue0(v.Pos, Op386LEAL2, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MULLconst [c] x)
+ // cond: c%5 == 0 && isPowerOfTwo32(c/5)
+ // result: (SHLLconst [int32(log32(c/5))] (LEAL4 <v.Type> x x))
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(c%5 == 0 && isPowerOfTwo32(c/5)) {
+ break
+ }
+ v.reset(Op386SHLLconst)
+ v.AuxInt = int32ToAuxInt(int32(log32(c / 5)))
+ v0 := b.NewValue0(v.Pos, Op386LEAL4, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MULLconst [c] x)
+ // cond: c%9 == 0 && isPowerOfTwo32(c/9)
+ // result: (SHLLconst [int32(log32(c/9))] (LEAL8 <v.Type> x x))
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(c%9 == 0 && isPowerOfTwo32(c/9)) {
+ break
+ }
+ v.reset(Op386SHLLconst)
+ v.AuxInt = int32ToAuxInt(int32(log32(c / 9)))
+ v0 := b.NewValue0(v.Pos, Op386LEAL8, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MULLconst [c] (MOVLconst [d]))
+ // result: (MOVLconst [c*d])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(c * d)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MULLload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MULLload [off1] {sym} val (ADDLconst [off2] base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (MULLload [off1+off2] {sym} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != Op386ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(Op386MULLload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (MULLload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MULLload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386MULLload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MULSD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MULSD x l:(MOVSDload [off] {sym} ptr mem))
+ // cond: canMergeLoadClobber(v, l, x) && clobber(l)
+ // result: (MULSDload x [off] {sym} ptr mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ l := v_1
+ if l.Op != Op386MOVSDload {
+ continue
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
+ continue
+ }
+ v.reset(Op386MULSDload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValue386_Op386MULSDload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MULSDload [off1] {sym} val (ADDLconst [off2] base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (MULSDload [off1+off2] {sym} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != Op386ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(Op386MULSDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (MULSDload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MULSDload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386MULSDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386MULSS(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MULSS x l:(MOVSSload [off] {sym} ptr mem))
+ // cond: canMergeLoadClobber(v, l, x) && clobber(l)
+ // result: (MULSSload x [off] {sym} ptr mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ l := v_1
+ if l.Op != Op386MOVSSload {
+ continue
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
+ continue
+ }
+ v.reset(Op386MULSSload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValue386_Op386MULSSload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MULSSload [off1] {sym} val (ADDLconst [off2] base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (MULSSload [off1+off2] {sym} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != Op386ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(Op386MULSSload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (MULSSload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MULSSload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386MULSSload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386NEGL(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (NEGL (MOVLconst [c]))
+ // result: (MOVLconst [-c])
+ for {
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(-c)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386NOTL(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (NOTL (MOVLconst [c]))
+ // result: (MOVLconst [^c])
+ for {
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(^c)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386ORL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (ORL x (MOVLconst [c]))
+ // result: (ORLconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != Op386MOVLconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(Op386ORLconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: ( ORL (SHLLconst [c] x) (SHRLconst [d] x))
+ // cond: d == 32-c
+ // result: (ROLLconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != Op386SHLLconst {
+ continue
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if v_1.Op != Op386SHRLconst {
+ continue
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ if x != v_1.Args[0] || !(d == 32-c) {
+ continue
+ }
+ v.reset(Op386ROLLconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: ( ORL <t> (SHLLconst x [c]) (SHRWconst x [d]))
+ // cond: c < 16 && d == int16(16-c) && t.Size() == 2
+ // result: (ROLWconst x [int16(c)])
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != Op386SHLLconst {
+ continue
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if v_1.Op != Op386SHRWconst {
+ continue
+ }
+ d := auxIntToInt16(v_1.AuxInt)
+ if x != v_1.Args[0] || !(c < 16 && d == int16(16-c) && t.Size() == 2) {
+ continue
+ }
+ v.reset(Op386ROLWconst)
+ v.AuxInt = int16ToAuxInt(int16(c))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: ( ORL <t> (SHLLconst x [c]) (SHRBconst x [d]))
+ // cond: c < 8 && d == int8(8-c) && t.Size() == 1
+ // result: (ROLBconst x [int8(c)])
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != Op386SHLLconst {
+ continue
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if v_1.Op != Op386SHRBconst {
+ continue
+ }
+ d := auxIntToInt8(v_1.AuxInt)
+ if x != v_1.Args[0] || !(c < 8 && d == int8(8-c) && t.Size() == 1) {
+ continue
+ }
+ v.reset(Op386ROLBconst)
+ v.AuxInt = int8ToAuxInt(int8(c))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ORL x l:(MOVLload [off] {sym} ptr mem))
+ // cond: canMergeLoadClobber(v, l, x) && clobber(l)
+ // result: (ORLload x [off] {sym} ptr mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ l := v_1
+ if l.Op != Op386MOVLload {
+ continue
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
+ continue
+ }
+ v.reset(Op386ORLload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ // match: (ORL x x)
+ // result: x
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (ORL x0:(MOVBload [i0] {s} p mem) s0:(SHLLconst [8] x1:(MOVBload [i1] {s} p mem)))
+ // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, s0)
+ // result: @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ if x0.Op != Op386MOVBload {
+ continue
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p := x0.Args[0]
+ s0 := v_1
+ if s0.Op != Op386SHLLconst || auxIntToInt32(s0.AuxInt) != 8 {
+ continue
+ }
+ x1 := s0.Args[0]
+ if x1.Op != Op386MOVBload {
+ continue
+ }
+ i1 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
+ continue
+ }
+ _ = x1.Args[1]
+ if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, s0)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(x1.Pos, Op386MOVWload, typ.UInt16)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(i0)
+ v0.Aux = symToAux(s)
+ v0.AddArg2(p, mem)
+ return true
+ }
+ break
+ }
+ // match: (ORL x0:(MOVBload [i] {s} p0 mem) s0:(SHLLconst [8] x1:(MOVBload [i] {s} p1 mem)))
+ // cond: x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b,x0,x1) != nil && clobber(x0, x1, s0)
+ // result: @mergePoint(b,x0,x1) (MOVWload [i] {s} p0 mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ if x0.Op != Op386MOVBload {
+ continue
+ }
+ i := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p0 := x0.Args[0]
+ s0 := v_1
+ if s0.Op != Op386SHLLconst || auxIntToInt32(s0.AuxInt) != 8 {
+ continue
+ }
+ x1 := s0.Args[0]
+ if x1.Op != Op386MOVBload || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s {
+ continue
+ }
+ _ = x1.Args[1]
+ p1 := x1.Args[0]
+ if mem != x1.Args[1] || !(x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b, x0, x1) != nil && clobber(x0, x1, s0)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(x1.Pos, Op386MOVWload, typ.UInt16)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(i)
+ v0.Aux = symToAux(s)
+ v0.AddArg2(p0, mem)
+ return true
+ }
+ break
+ }
+ // match: (ORL o0:(ORL x0:(MOVWload [i0] {s} p mem) s0:(SHLLconst [16] x1:(MOVBload [i2] {s} p mem))) s1:(SHLLconst [24] x2:(MOVBload [i3] {s} p mem)))
+ // cond: i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0, x1, x2, s0, s1, o0)
+ // result: @mergePoint(b,x0,x1,x2) (MOVLload [i0] {s} p mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ o0 := v_0
+ if o0.Op != Op386ORL {
+ continue
+ }
+ _ = o0.Args[1]
+ o0_0 := o0.Args[0]
+ o0_1 := o0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, o0_0, o0_1 = _i1+1, o0_1, o0_0 {
+ x0 := o0_0
+ if x0.Op != Op386MOVWload {
+ continue
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p := x0.Args[0]
+ s0 := o0_1
+ if s0.Op != Op386SHLLconst || auxIntToInt32(s0.AuxInt) != 16 {
+ continue
+ }
+ x1 := s0.Args[0]
+ if x1.Op != Op386MOVBload {
+ continue
+ }
+ i2 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
+ continue
+ }
+ _ = x1.Args[1]
+ if p != x1.Args[0] || mem != x1.Args[1] {
+ continue
+ }
+ s1 := v_1
+ if s1.Op != Op386SHLLconst || auxIntToInt32(s1.AuxInt) != 24 {
+ continue
+ }
+ x2 := s1.Args[0]
+ if x2.Op != Op386MOVBload {
+ continue
+ }
+ i3 := auxIntToInt32(x2.AuxInt)
+ if auxToSym(x2.Aux) != s {
+ continue
+ }
+ _ = x2.Args[1]
+ if p != x2.Args[0] || mem != x2.Args[1] || !(i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0, x1, x2, s0, s1, o0)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1, x2)
+ v0 := b.NewValue0(x2.Pos, Op386MOVLload, typ.UInt32)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(i0)
+ v0.Aux = symToAux(s)
+ v0.AddArg2(p, mem)
+ return true
+ }
+ }
+ break
+ }
+ // match: (ORL o0:(ORL x0:(MOVWload [i] {s} p0 mem) s0:(SHLLconst [16] x1:(MOVBload [i] {s} p1 mem))) s1:(SHLLconst [24] x2:(MOVBload [i] {s} p2 mem)))
+ // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && sequentialAddresses(p0, p1, 2) && sequentialAddresses(p1, p2, 1) && mergePoint(b,x0,x1,x2) != nil && clobber(x0, x1, x2, s0, s1, o0)
+ // result: @mergePoint(b,x0,x1,x2) (MOVLload [i] {s} p0 mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ o0 := v_0
+ if o0.Op != Op386ORL {
+ continue
+ }
+ _ = o0.Args[1]
+ o0_0 := o0.Args[0]
+ o0_1 := o0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, o0_0, o0_1 = _i1+1, o0_1, o0_0 {
+ x0 := o0_0
+ if x0.Op != Op386MOVWload {
+ continue
+ }
+ i := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p0 := x0.Args[0]
+ s0 := o0_1
+ if s0.Op != Op386SHLLconst || auxIntToInt32(s0.AuxInt) != 16 {
+ continue
+ }
+ x1 := s0.Args[0]
+ if x1.Op != Op386MOVBload || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s {
+ continue
+ }
+ _ = x1.Args[1]
+ p1 := x1.Args[0]
+ if mem != x1.Args[1] {
+ continue
+ }
+ s1 := v_1
+ if s1.Op != Op386SHLLconst || auxIntToInt32(s1.AuxInt) != 24 {
+ continue
+ }
+ x2 := s1.Args[0]
+ if x2.Op != Op386MOVBload || auxIntToInt32(x2.AuxInt) != i || auxToSym(x2.Aux) != s {
+ continue
+ }
+ _ = x2.Args[1]
+ p2 := x2.Args[0]
+ if mem != x2.Args[1] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && o0.Uses == 1 && sequentialAddresses(p0, p1, 2) && sequentialAddresses(p1, p2, 1) && mergePoint(b, x0, x1, x2) != nil && clobber(x0, x1, x2, s0, s1, o0)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1, x2)
+ v0 := b.NewValue0(x2.Pos, Op386MOVLload, typ.UInt32)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(i)
+ v0.Aux = symToAux(s)
+ v0.AddArg2(p0, mem)
+ return true
+ }
+ }
+ break
+ }
+ return false
+}
+func rewriteValue386_Op386ORLconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ORLconst [c] x)
+ // cond: c==0
+ // result: x
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(c == 0) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (ORLconst [c] _)
+ // cond: c==-1
+ // result: (MOVLconst [-1])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if !(c == -1) {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(-1)
+ return true
+ }
+ // match: (ORLconst [c] (MOVLconst [d]))
+ // result: (MOVLconst [c|d])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(c | d)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386ORLconstmodify(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (ORLconstmodify [valoff1] {sym} (ADDLconst [off2] base) mem)
+ // cond: valoff1.canAdd32(off2)
+ // result: (ORLconstmodify [valoff1.addOffset32(off2)] {sym} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(valoff1.canAdd32(off2)) {
+ break
+ }
+ v.reset(Op386ORLconstmodify)
+ v.AuxInt = valAndOffToAuxInt(valoff1.addOffset32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (ORLconstmodify [valoff1] {sym1} (LEAL [off2] {sym2} base) mem)
+ // cond: valoff1.canAdd32(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (ORLconstmodify [valoff1.addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(valoff1.canAdd32(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386ORLconstmodify)
+ v.AuxInt = valAndOffToAuxInt(valoff1.addOffset32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386ORLload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (ORLload [off1] {sym} val (ADDLconst [off2] base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (ORLload [off1+off2] {sym} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != Op386ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(Op386ORLload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (ORLload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (ORLload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386ORLload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386ORLmodify(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (ORLmodify [off1] {sym} (ADDLconst [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (ORLmodify [off1+off2] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(Op386ORLmodify)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (ORLmodify [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (ORLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386ORLmodify)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386ROLBconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ROLBconst [c] (ROLBconst [d] x))
+ // result: (ROLBconst [(c+d)& 7] x)
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ if v_0.Op != Op386ROLBconst {
+ break
+ }
+ d := auxIntToInt8(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(Op386ROLBconst)
+ v.AuxInt = int8ToAuxInt((c + d) & 7)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ROLBconst [0] x)
+ // result: x
+ for {
+ if auxIntToInt8(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386ROLLconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ROLLconst [c] (ROLLconst [d] x))
+ // result: (ROLLconst [(c+d)&31] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != Op386ROLLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(Op386ROLLconst)
+ v.AuxInt = int32ToAuxInt((c + d) & 31)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ROLLconst [0] x)
+ // result: x
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386ROLWconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ROLWconst [c] (ROLWconst [d] x))
+ // result: (ROLWconst [(c+d)&15] x)
+ for {
+ c := auxIntToInt16(v.AuxInt)
+ if v_0.Op != Op386ROLWconst {
+ break
+ }
+ d := auxIntToInt16(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(Op386ROLWconst)
+ v.AuxInt = int16ToAuxInt((c + d) & 15)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ROLWconst [0] x)
+ // result: x
+ for {
+ if auxIntToInt16(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SARB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SARB x (MOVLconst [c]))
+ // result: (SARBconst [int8(min(int64(c&31),7))] x)
+ for {
+ x := v_0
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(Op386SARBconst)
+ v.AuxInt = int8ToAuxInt(int8(min(int64(c&31), 7)))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SARBconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SARBconst x [0])
+ // result: x
+ for {
+ if auxIntToInt8(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (SARBconst [c] (MOVLconst [d]))
+ // result: (MOVLconst [d>>uint64(c)])
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(d >> uint64(c))
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SARL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SARL x (MOVLconst [c]))
+ // result: (SARLconst [c&31] x)
+ for {
+ x := v_0
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(Op386SARLconst)
+ v.AuxInt = int32ToAuxInt(c & 31)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SARL x (ANDLconst [31] y))
+ // result: (SARL x y)
+ for {
+ x := v_0
+ if v_1.Op != Op386ANDLconst || auxIntToInt32(v_1.AuxInt) != 31 {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(Op386SARL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SARLconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SARLconst x [0])
+ // result: x
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (SARLconst [c] (MOVLconst [d]))
+ // result: (MOVLconst [d>>uint64(c)])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(d >> uint64(c))
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SARW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SARW x (MOVLconst [c]))
+ // result: (SARWconst [int16(min(int64(c&31),15))] x)
+ for {
+ x := v_0
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(Op386SARWconst)
+ v.AuxInt = int16ToAuxInt(int16(min(int64(c&31), 15)))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SARWconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SARWconst x [0])
+ // result: x
+ for {
+ if auxIntToInt16(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (SARWconst [c] (MOVLconst [d]))
+ // result: (MOVLconst [d>>uint64(c)])
+ for {
+ c := auxIntToInt16(v.AuxInt)
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(d >> uint64(c))
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SBBL(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SBBL x (MOVLconst [c]) f)
+ // result: (SBBLconst [c] x f)
+ for {
+ x := v_0
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ f := v_2
+ v.reset(Op386SBBLconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, f)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SBBLcarrymask(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SBBLcarrymask (FlagEQ))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != Op386FlagEQ {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SBBLcarrymask (FlagLT_ULT))
+ // result: (MOVLconst [-1])
+ for {
+ if v_0.Op != Op386FlagLT_ULT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(-1)
+ return true
+ }
+ // match: (SBBLcarrymask (FlagLT_UGT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != Op386FlagLT_UGT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SBBLcarrymask (FlagGT_ULT))
+ // result: (MOVLconst [-1])
+ for {
+ if v_0.Op != Op386FlagGT_ULT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(-1)
+ return true
+ }
+ // match: (SBBLcarrymask (FlagGT_UGT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != Op386FlagGT_UGT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SETA(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SETA (InvertFlags x))
+ // result: (SETB x)
+ for {
+ if v_0.Op != Op386InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(Op386SETB)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SETA (FlagEQ))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != Op386FlagEQ {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETA (FlagLT_ULT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != Op386FlagLT_ULT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETA (FlagLT_UGT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != Op386FlagLT_UGT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETA (FlagGT_ULT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != Op386FlagGT_ULT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETA (FlagGT_UGT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != Op386FlagGT_UGT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SETAE(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SETAE (InvertFlags x))
+ // result: (SETBE x)
+ for {
+ if v_0.Op != Op386InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(Op386SETBE)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SETAE (FlagEQ))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != Op386FlagEQ {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETAE (FlagLT_ULT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != Op386FlagLT_ULT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETAE (FlagLT_UGT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != Op386FlagLT_UGT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETAE (FlagGT_ULT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != Op386FlagGT_ULT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETAE (FlagGT_UGT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != Op386FlagGT_UGT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SETB(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SETB (InvertFlags x))
+ // result: (SETA x)
+ for {
+ if v_0.Op != Op386InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(Op386SETA)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SETB (FlagEQ))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != Op386FlagEQ {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETB (FlagLT_ULT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != Op386FlagLT_ULT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETB (FlagLT_UGT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != Op386FlagLT_UGT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETB (FlagGT_ULT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != Op386FlagGT_ULT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETB (FlagGT_UGT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != Op386FlagGT_UGT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SETBE(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SETBE (InvertFlags x))
+ // result: (SETAE x)
+ for {
+ if v_0.Op != Op386InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(Op386SETAE)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SETBE (FlagEQ))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != Op386FlagEQ {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETBE (FlagLT_ULT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != Op386FlagLT_ULT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETBE (FlagLT_UGT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != Op386FlagLT_UGT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETBE (FlagGT_ULT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != Op386FlagGT_ULT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETBE (FlagGT_UGT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != Op386FlagGT_UGT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SETEQ(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SETEQ (InvertFlags x))
+ // result: (SETEQ x)
+ for {
+ if v_0.Op != Op386InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(Op386SETEQ)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SETEQ (FlagEQ))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != Op386FlagEQ {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETEQ (FlagLT_ULT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != Op386FlagLT_ULT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETEQ (FlagLT_UGT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != Op386FlagLT_UGT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETEQ (FlagGT_ULT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != Op386FlagGT_ULT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETEQ (FlagGT_UGT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != Op386FlagGT_UGT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SETG(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SETG (InvertFlags x))
+ // result: (SETL x)
+ for {
+ if v_0.Op != Op386InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(Op386SETL)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SETG (FlagEQ))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != Op386FlagEQ {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETG (FlagLT_ULT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != Op386FlagLT_ULT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETG (FlagLT_UGT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != Op386FlagLT_UGT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETG (FlagGT_ULT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != Op386FlagGT_ULT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETG (FlagGT_UGT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != Op386FlagGT_UGT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SETGE(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SETGE (InvertFlags x))
+ // result: (SETLE x)
+ for {
+ if v_0.Op != Op386InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(Op386SETLE)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SETGE (FlagEQ))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != Op386FlagEQ {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETGE (FlagLT_ULT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != Op386FlagLT_ULT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETGE (FlagLT_UGT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != Op386FlagLT_UGT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETGE (FlagGT_ULT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != Op386FlagGT_ULT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETGE (FlagGT_UGT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != Op386FlagGT_UGT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SETL(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SETL (InvertFlags x))
+ // result: (SETG x)
+ for {
+ if v_0.Op != Op386InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(Op386SETG)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SETL (FlagEQ))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != Op386FlagEQ {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETL (FlagLT_ULT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != Op386FlagLT_ULT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETL (FlagLT_UGT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != Op386FlagLT_UGT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETL (FlagGT_ULT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != Op386FlagGT_ULT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETL (FlagGT_UGT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != Op386FlagGT_UGT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SETLE(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SETLE (InvertFlags x))
+ // result: (SETGE x)
+ for {
+ if v_0.Op != Op386InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(Op386SETGE)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SETLE (FlagEQ))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != Op386FlagEQ {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETLE (FlagLT_ULT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != Op386FlagLT_ULT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETLE (FlagLT_UGT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != Op386FlagLT_UGT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETLE (FlagGT_ULT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != Op386FlagGT_ULT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETLE (FlagGT_UGT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != Op386FlagGT_UGT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SETNE(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SETNE (InvertFlags x))
+ // result: (SETNE x)
+ for {
+ if v_0.Op != Op386InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(Op386SETNE)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SETNE (FlagEQ))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != Op386FlagEQ {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETNE (FlagLT_ULT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != Op386FlagLT_ULT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETNE (FlagLT_UGT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != Op386FlagLT_UGT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETNE (FlagGT_ULT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != Op386FlagGT_ULT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETNE (FlagGT_UGT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != Op386FlagGT_UGT {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SHLL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SHLL x (MOVLconst [c]))
+ // result: (SHLLconst [c&31] x)
+ for {
+ x := v_0
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(Op386SHLLconst)
+ v.AuxInt = int32ToAuxInt(c & 31)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SHLL x (ANDLconst [31] y))
+ // result: (SHLL x y)
+ for {
+ x := v_0
+ if v_1.Op != Op386ANDLconst || auxIntToInt32(v_1.AuxInt) != 31 {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(Op386SHLL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SHLLconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SHLLconst x [0])
+ // result: x
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SHRB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SHRB x (MOVLconst [c]))
+ // cond: c&31 < 8
+ // result: (SHRBconst [int8(c&31)] x)
+ for {
+ x := v_0
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(c&31 < 8) {
+ break
+ }
+ v.reset(Op386SHRBconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 31))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SHRB _ (MOVLconst [c]))
+ // cond: c&31 >= 8
+ // result: (MOVLconst [0])
+ for {
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(c&31 >= 8) {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SHRBconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SHRBconst x [0])
+ // result: x
+ for {
+ if auxIntToInt8(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SHRL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SHRL x (MOVLconst [c]))
+ // result: (SHRLconst [c&31] x)
+ for {
+ x := v_0
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(Op386SHRLconst)
+ v.AuxInt = int32ToAuxInt(c & 31)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SHRL x (ANDLconst [31] y))
+ // result: (SHRL x y)
+ for {
+ x := v_0
+ if v_1.Op != Op386ANDLconst || auxIntToInt32(v_1.AuxInt) != 31 {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(Op386SHRL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SHRLconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SHRLconst x [0])
+ // result: x
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SHRW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SHRW x (MOVLconst [c]))
+ // cond: c&31 < 16
+ // result: (SHRWconst [int16(c&31)] x)
+ for {
+ x := v_0
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(c&31 < 16) {
+ break
+ }
+ v.reset(Op386SHRWconst)
+ v.AuxInt = int16ToAuxInt(int16(c & 31))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SHRW _ (MOVLconst [c]))
+ // cond: c&31 >= 16
+ // result: (MOVLconst [0])
+ for {
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(c&31 >= 16) {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SHRWconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SHRWconst x [0])
+ // result: x
+ for {
+ if auxIntToInt16(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SUBL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SUBL x (MOVLconst [c]))
+ // result: (SUBLconst x [c])
+ for {
+ x := v_0
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(Op386SUBLconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBL (MOVLconst [c]) x)
+ // result: (NEGL (SUBLconst <v.Type> x [c]))
+ for {
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(Op386NEGL)
+ v0 := b.NewValue0(v.Pos, Op386SUBLconst, v.Type)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SUBL x l:(MOVLload [off] {sym} ptr mem))
+ // cond: canMergeLoadClobber(v, l, x) && clobber(l)
+ // result: (SUBLload x [off] {sym} ptr mem)
+ for {
+ x := v_0
+ l := v_1
+ if l.Op != Op386MOVLload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
+ break
+ }
+ v.reset(Op386SUBLload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ // match: (SUBL x x)
+ // result: (MOVLconst [0])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SUBLcarry(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SUBLcarry x (MOVLconst [c]))
+ // result: (SUBLconstcarry [c] x)
+ for {
+ x := v_0
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(Op386SUBLconstcarry)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SUBLconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SUBLconst [c] x)
+ // cond: c==0
+ // result: x
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(c == 0) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (SUBLconst [c] x)
+ // result: (ADDLconst [-c] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ v.reset(Op386ADDLconst)
+ v.AuxInt = int32ToAuxInt(-c)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValue386_Op386SUBLload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (SUBLload [off1] {sym} val (ADDLconst [off2] base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (SUBLload [off1+off2] {sym} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != Op386ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(Op386SUBLload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (SUBLload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (SUBLload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386SUBLload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SUBLmodify(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (SUBLmodify [off1] {sym} (ADDLconst [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (SUBLmodify [off1+off2] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(Op386SUBLmodify)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (SUBLmodify [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (SUBLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386SUBLmodify)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SUBSD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SUBSD x l:(MOVSDload [off] {sym} ptr mem))
+ // cond: canMergeLoadClobber(v, l, x) && clobber(l)
+ // result: (SUBSDload x [off] {sym} ptr mem)
+ for {
+ x := v_0
+ l := v_1
+ if l.Op != Op386MOVSDload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
+ break
+ }
+ v.reset(Op386SUBSDload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SUBSDload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (SUBSDload [off1] {sym} val (ADDLconst [off2] base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (SUBSDload [off1+off2] {sym} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != Op386ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(Op386SUBSDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (SUBSDload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (SUBSDload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386SUBSDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SUBSS(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SUBSS x l:(MOVSSload [off] {sym} ptr mem))
+ // cond: canMergeLoadClobber(v, l, x) && clobber(l)
+ // result: (SUBSSload x [off] {sym} ptr mem)
+ for {
+ x := v_0
+ l := v_1
+ if l.Op != Op386MOVSSload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
+ break
+ }
+ v.reset(Op386SUBSSload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386SUBSSload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (SUBSSload [off1] {sym} val (ADDLconst [off2] base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (SUBSSload [off1+off2] {sym} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != Op386ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(Op386SUBSSload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (SUBSSload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (SUBSSload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386SUBSSload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386XORL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (XORL x (MOVLconst [c]))
+ // result: (XORLconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != Op386MOVLconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(Op386XORLconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (XORL (SHLLconst [c] x) (SHRLconst [d] x))
+ // cond: d == 32-c
+ // result: (ROLLconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != Op386SHLLconst {
+ continue
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if v_1.Op != Op386SHRLconst {
+ continue
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ if x != v_1.Args[0] || !(d == 32-c) {
+ continue
+ }
+ v.reset(Op386ROLLconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (XORL <t> (SHLLconst x [c]) (SHRWconst x [d]))
+ // cond: c < 16 && d == int16(16-c) && t.Size() == 2
+ // result: (ROLWconst x [int16(c)])
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != Op386SHLLconst {
+ continue
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if v_1.Op != Op386SHRWconst {
+ continue
+ }
+ d := auxIntToInt16(v_1.AuxInt)
+ if x != v_1.Args[0] || !(c < 16 && d == int16(16-c) && t.Size() == 2) {
+ continue
+ }
+ v.reset(Op386ROLWconst)
+ v.AuxInt = int16ToAuxInt(int16(c))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (XORL <t> (SHLLconst x [c]) (SHRBconst x [d]))
+ // cond: c < 8 && d == int8(8-c) && t.Size() == 1
+ // result: (ROLBconst x [int8(c)])
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != Op386SHLLconst {
+ continue
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if v_1.Op != Op386SHRBconst {
+ continue
+ }
+ d := auxIntToInt8(v_1.AuxInt)
+ if x != v_1.Args[0] || !(c < 8 && d == int8(8-c) && t.Size() == 1) {
+ continue
+ }
+ v.reset(Op386ROLBconst)
+ v.AuxInt = int8ToAuxInt(int8(c))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (XORL x l:(MOVLload [off] {sym} ptr mem))
+ // cond: canMergeLoadClobber(v, l, x) && clobber(l)
+ // result: (XORLload x [off] {sym} ptr mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ l := v_1
+ if l.Op != Op386MOVLload {
+ continue
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
+ continue
+ }
+ v.reset(Op386XORLload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ // match: (XORL x x)
+ // result: (MOVLconst [0])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386XORLconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (XORLconst [c] (XORLconst [d] x))
+ // result: (XORLconst [c ^ d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != Op386XORLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(Op386XORLconst)
+ v.AuxInt = int32ToAuxInt(c ^ d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORLconst [c] x)
+ // cond: c==0
+ // result: x
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(c == 0) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (XORLconst [c] (MOVLconst [d]))
+ // result: (MOVLconst [c^d])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != Op386MOVLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(c ^ d)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386XORLconstmodify(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (XORLconstmodify [valoff1] {sym} (ADDLconst [off2] base) mem)
+ // cond: valoff1.canAdd32(off2)
+ // result: (XORLconstmodify [valoff1.addOffset32(off2)] {sym} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(valoff1.canAdd32(off2)) {
+ break
+ }
+ v.reset(Op386XORLconstmodify)
+ v.AuxInt = valAndOffToAuxInt(valoff1.addOffset32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (XORLconstmodify [valoff1] {sym1} (LEAL [off2] {sym2} base) mem)
+ // cond: valoff1.canAdd32(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (XORLconstmodify [valoff1.addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(valoff1.canAdd32(off2) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386XORLconstmodify)
+ v.AuxInt = valAndOffToAuxInt(valoff1.addOffset32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386XORLload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (XORLload [off1] {sym} val (ADDLconst [off2] base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (XORLload [off1+off2] {sym} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != Op386ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(Op386XORLload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (XORLload [off1] {sym1} val (LEAL [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (XORLload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386XORLload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_Op386XORLmodify(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (XORLmodify [off1] {sym} (ADDLconst [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (XORLmodify [off1+off2] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != Op386ADDLconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(Op386XORLmodify)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (XORLmodify [off1] {sym1} (LEAL [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (XORLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != Op386LEAL {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(Op386XORLmodify)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpAddr(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Addr {sym} base)
+ // result: (LEAL {sym} base)
+ for {
+ sym := auxToSym(v.Aux)
+ base := v_0
+ v.reset(Op386LEAL)
+ v.Aux = symToAux(sym)
+ v.AddArg(base)
+ return true
+ }
+}
+func rewriteValue386_OpConst16(v *Value) bool {
+ // match: (Const16 [c])
+ // result: (MOVLconst [int32(c)])
+ for {
+ c := auxIntToInt16(v.AuxInt)
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ return true
+ }
+}
+func rewriteValue386_OpConst8(v *Value) bool {
+ // match: (Const8 [c])
+ // result: (MOVLconst [int32(c)])
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ return true
+ }
+}
+func rewriteValue386_OpConstBool(v *Value) bool {
+ // match: (ConstBool [c])
+ // result: (MOVLconst [b2i32(c)])
+ for {
+ c := auxIntToBool(v.AuxInt)
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(b2i32(c))
+ return true
+ }
+}
+func rewriteValue386_OpConstNil(v *Value) bool {
+ // match: (ConstNil)
+ // result: (MOVLconst [0])
+ for {
+ v.reset(Op386MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+}
+func rewriteValue386_OpCtz16(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Ctz16 x)
+ // result: (BSFL (ORLconst <typ.UInt32> [0x10000] x))
+ for {
+ x := v_0
+ v.reset(Op386BSFL)
+ v0 := b.NewValue0(v.Pos, Op386ORLconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(0x10000)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpDiv8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div8 x y)
+ // result: (DIVW (SignExt8to16 x) (SignExt8to16 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(Op386DIVW)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValue386_OpDiv8u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div8u x y)
+ // result: (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(Op386DIVWU)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValue386_OpEq16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Eq16 x y)
+ // result: (SETEQ (CMPW x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(Op386SETEQ)
+ v0 := b.NewValue0(v.Pos, Op386CMPW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpEq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Eq32 x y)
+ // result: (SETEQ (CMPL x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(Op386SETEQ)
+ v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpEq32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Eq32F x y)
+ // result: (SETEQF (UCOMISS x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(Op386SETEQF)
+ v0 := b.NewValue0(v.Pos, Op386UCOMISS, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpEq64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Eq64F x y)
+ // result: (SETEQF (UCOMISD x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(Op386SETEQF)
+ v0 := b.NewValue0(v.Pos, Op386UCOMISD, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpEq8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Eq8 x y)
+ // result: (SETEQ (CMPB x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(Op386SETEQ)
+ v0 := b.NewValue0(v.Pos, Op386CMPB, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpEqB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (EqB x y)
+ // result: (SETEQ (CMPB x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(Op386SETEQ)
+ v0 := b.NewValue0(v.Pos, Op386CMPB, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpEqPtr(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (EqPtr x y)
+ // result: (SETEQ (CMPL x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(Op386SETEQ)
+ v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpIsInBounds(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (IsInBounds idx len)
+ // result: (SETB (CMPL idx len))
+ for {
+ idx := v_0
+ len := v_1
+ v.reset(Op386SETB)
+ v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags)
+ v0.AddArg2(idx, len)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpIsNonNil(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (IsNonNil p)
+ // result: (SETNE (TESTL p p))
+ for {
+ p := v_0
+ v.reset(Op386SETNE)
+ v0 := b.NewValue0(v.Pos, Op386TESTL, types.TypeFlags)
+ v0.AddArg2(p, p)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpIsSliceInBounds(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (IsSliceInBounds idx len)
+ // result: (SETBE (CMPL idx len))
+ for {
+ idx := v_0
+ len := v_1
+ v.reset(Op386SETBE)
+ v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags)
+ v0.AddArg2(idx, len)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpLeq16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq16 x y)
+ // result: (SETLE (CMPW x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(Op386SETLE)
+ v0 := b.NewValue0(v.Pos, Op386CMPW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpLeq16U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq16U x y)
+ // result: (SETBE (CMPW x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(Op386SETBE)
+ v0 := b.NewValue0(v.Pos, Op386CMPW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpLeq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq32 x y)
+ // result: (SETLE (CMPL x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(Op386SETLE)
+ v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpLeq32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq32F x y)
+ // result: (SETGEF (UCOMISS y x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(Op386SETGEF)
+ v0 := b.NewValue0(v.Pos, Op386UCOMISS, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpLeq32U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq32U x y)
+ // result: (SETBE (CMPL x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(Op386SETBE)
+ v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpLeq64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq64F x y)
+ // result: (SETGEF (UCOMISD y x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(Op386SETGEF)
+ v0 := b.NewValue0(v.Pos, Op386UCOMISD, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpLeq8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq8 x y)
+ // result: (SETLE (CMPB x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(Op386SETLE)
+ v0 := b.NewValue0(v.Pos, Op386CMPB, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpLeq8U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq8U x y)
+ // result: (SETBE (CMPB x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(Op386SETBE)
+ v0 := b.NewValue0(v.Pos, Op386CMPB, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpLess16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less16 x y)
+ // result: (SETL (CMPW x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(Op386SETL)
+ v0 := b.NewValue0(v.Pos, Op386CMPW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpLess16U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less16U x y)
+ // result: (SETB (CMPW x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(Op386SETB)
+ v0 := b.NewValue0(v.Pos, Op386CMPW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpLess32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less32 x y)
+ // result: (SETL (CMPL x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(Op386SETL)
+ v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpLess32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less32F x y)
+ // result: (SETGF (UCOMISS y x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(Op386SETGF)
+ v0 := b.NewValue0(v.Pos, Op386UCOMISS, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpLess32U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less32U x y)
+ // result: (SETB (CMPL x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(Op386SETB)
+ v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpLess64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less64F x y)
+ // result: (SETGF (UCOMISD y x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(Op386SETGF)
+ v0 := b.NewValue0(v.Pos, Op386UCOMISD, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpLess8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less8 x y)
+ // result: (SETL (CMPB x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(Op386SETL)
+ v0 := b.NewValue0(v.Pos, Op386CMPB, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpLess8U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less8U x y)
+ // result: (SETB (CMPB x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(Op386SETB)
+ v0 := b.NewValue0(v.Pos, Op386CMPB, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpLoad(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Load <t> ptr mem)
+ // cond: (is32BitInt(t) || isPtr(t))
+ // result: (MOVLload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is32BitInt(t) || isPtr(t)) {
+ break
+ }
+ v.reset(Op386MOVLload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is16BitInt(t)
+ // result: (MOVWload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is16BitInt(t)) {
+ break
+ }
+ v.reset(Op386MOVWload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (t.IsBoolean() || is8BitInt(t))
+ // result: (MOVBload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(t.IsBoolean() || is8BitInt(t)) {
+ break
+ }
+ v.reset(Op386MOVBload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is32BitFloat(t)
+ // result: (MOVSSload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is32BitFloat(t)) {
+ break
+ }
+ v.reset(Op386MOVSSload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is64BitFloat(t)
+ // result: (MOVSDload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is64BitFloat(t)) {
+ break
+ }
+ v.reset(Op386MOVSDload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpLocalAddr(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (LocalAddr {sym} base _)
+ // result: (LEAL {sym} base)
+ for {
+ sym := auxToSym(v.Aux)
+ base := v_0
+ v.reset(Op386LEAL)
+ v.Aux = symToAux(sym)
+ v.AddArg(base)
+ return true
+ }
+}
+func rewriteValue386_OpLsh16x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh16x16 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Pos, Op386SHLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, Op386CMPWconst, types.TypeFlags)
+ v2.AuxInt = int16ToAuxInt(32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Lsh16x16 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHLL <t> x y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SHLL)
+ v.Type = t
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpLsh16x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh16x32 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Pos, Op386SHLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, Op386CMPLconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Lsh16x32 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHLL <t> x y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SHLL)
+ v.Type = t
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpLsh16x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Lsh16x64 x (Const64 [c]))
+ // cond: uint64(c) < 16
+ // result: (SHLLconst x [int32(c)])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) < 16) {
+ break
+ }
+ v.reset(Op386SHLLconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (Lsh16x64 _ (Const64 [c]))
+ // cond: uint64(c) >= 16
+ // result: (Const16 [0])
+ for {
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 16) {
+ break
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpLsh16x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh16x8 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Pos, Op386SHLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, Op386CMPBconst, types.TypeFlags)
+ v2.AuxInt = int8ToAuxInt(32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Lsh16x8 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHLL <t> x y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SHLL)
+ v.Type = t
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpLsh32x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh32x16 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Pos, Op386SHLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, Op386CMPWconst, types.TypeFlags)
+ v2.AuxInt = int16ToAuxInt(32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Lsh32x16 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHLL <t> x y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SHLL)
+ v.Type = t
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpLsh32x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh32x32 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Pos, Op386SHLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, Op386CMPLconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Lsh32x32 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHLL <t> x y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SHLL)
+ v.Type = t
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpLsh32x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Lsh32x64 x (Const64 [c]))
+ // cond: uint64(c) < 32
+ // result: (SHLLconst x [int32(c)])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) < 32) {
+ break
+ }
+ v.reset(Op386SHLLconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (Lsh32x64 _ (Const64 [c]))
+ // cond: uint64(c) >= 32
+ // result: (Const32 [0])
+ for {
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 32) {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpLsh32x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh32x8 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Pos, Op386SHLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, Op386CMPBconst, types.TypeFlags)
+ v2.AuxInt = int8ToAuxInt(32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Lsh32x8 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHLL <t> x y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SHLL)
+ v.Type = t
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpLsh8x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh8x16 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Pos, Op386SHLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, Op386CMPWconst, types.TypeFlags)
+ v2.AuxInt = int16ToAuxInt(32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Lsh8x16 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHLL <t> x y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SHLL)
+ v.Type = t
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpLsh8x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh8x32 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Pos, Op386SHLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, Op386CMPLconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Lsh8x32 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHLL <t> x y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SHLL)
+ v.Type = t
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpLsh8x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Lsh8x64 x (Const64 [c]))
+ // cond: uint64(c) < 8
+ // result: (SHLLconst x [int32(c)])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) < 8) {
+ break
+ }
+ v.reset(Op386SHLLconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (Lsh8x64 _ (Const64 [c]))
+ // cond: uint64(c) >= 8
+ // result: (Const8 [0])
+ for {
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 8) {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpLsh8x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh8x8 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Pos, Op386SHLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, Op386CMPBconst, types.TypeFlags)
+ v2.AuxInt = int8ToAuxInt(32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Lsh8x8 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHLL <t> x y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SHLL)
+ v.Type = t
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpMod8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod8 x y)
+ // result: (MODW (SignExt8to16 x) (SignExt8to16 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(Op386MODW)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValue386_OpMod8u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod8u x y)
+ // result: (MODWU (ZeroExt8to16 x) (ZeroExt8to16 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(Op386MODWU)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValue386_OpMove(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (Move [0] _ _ mem)
+ // result: mem
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ mem := v_2
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Move [1] dst src mem)
+ // result: (MOVBstore dst (MOVBload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 1 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(Op386MOVBstore)
+ v0 := b.NewValue0(v.Pos, Op386MOVBload, typ.UInt8)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [2] dst src mem)
+ // result: (MOVWstore dst (MOVWload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 2 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(Op386MOVWstore)
+ v0 := b.NewValue0(v.Pos, Op386MOVWload, typ.UInt16)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [4] dst src mem)
+ // result: (MOVLstore dst (MOVLload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(Op386MOVLstore)
+ v0 := b.NewValue0(v.Pos, Op386MOVLload, typ.UInt32)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [3] dst src mem)
+ // result: (MOVBstore [2] dst (MOVBload [2] src mem) (MOVWstore dst (MOVWload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 3 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(Op386MOVBstore)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, Op386MOVBload, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(2)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, Op386MOVWstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, Op386MOVWload, typ.UInt16)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [5] dst src mem)
+ // result: (MOVBstore [4] dst (MOVBload [4] src mem) (MOVLstore dst (MOVLload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 5 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(Op386MOVBstore)
+ v.AuxInt = int32ToAuxInt(4)
+ v0 := b.NewValue0(v.Pos, Op386MOVBload, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(4)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, Op386MOVLstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, Op386MOVLload, typ.UInt32)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [6] dst src mem)
+ // result: (MOVWstore [4] dst (MOVWload [4] src mem) (MOVLstore dst (MOVLload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 6 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(Op386MOVWstore)
+ v.AuxInt = int32ToAuxInt(4)
+ v0 := b.NewValue0(v.Pos, Op386MOVWload, typ.UInt16)
+ v0.AuxInt = int32ToAuxInt(4)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, Op386MOVLstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, Op386MOVLload, typ.UInt32)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [7] dst src mem)
+ // result: (MOVLstore [3] dst (MOVLload [3] src mem) (MOVLstore dst (MOVLload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 7 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(Op386MOVLstore)
+ v.AuxInt = int32ToAuxInt(3)
+ v0 := b.NewValue0(v.Pos, Op386MOVLload, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(3)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, Op386MOVLstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, Op386MOVLload, typ.UInt32)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [8] dst src mem)
+ // result: (MOVLstore [4] dst (MOVLload [4] src mem) (MOVLstore dst (MOVLload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(Op386MOVLstore)
+ v.AuxInt = int32ToAuxInt(4)
+ v0 := b.NewValue0(v.Pos, Op386MOVLload, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(4)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, Op386MOVLstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, Op386MOVLload, typ.UInt32)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: s > 8 && s%4 != 0
+ // result: (Move [s-s%4] (ADDLconst <dst.Type> dst [int32(s%4)]) (ADDLconst <src.Type> src [int32(s%4)]) (MOVLstore dst (MOVLload src mem) mem))
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(s > 8 && s%4 != 0) {
+ break
+ }
+ v.reset(OpMove)
+ v.AuxInt = int64ToAuxInt(s - s%4)
+ v0 := b.NewValue0(v.Pos, Op386ADDLconst, dst.Type)
+ v0.AuxInt = int32ToAuxInt(int32(s % 4))
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, Op386ADDLconst, src.Type)
+ v1.AuxInt = int32ToAuxInt(int32(s % 4))
+ v1.AddArg(src)
+ v2 := b.NewValue0(v.Pos, Op386MOVLstore, types.TypeMem)
+ v3 := b.NewValue0(v.Pos, Op386MOVLload, typ.UInt32)
+ v3.AddArg2(src, mem)
+ v2.AddArg3(dst, v3, mem)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: s > 8 && s <= 4*128 && s%4 == 0 && !config.noDuffDevice && logLargeCopy(v, s)
+ // result: (DUFFCOPY [10*(128-s/4)] dst src mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(s > 8 && s <= 4*128 && s%4 == 0 && !config.noDuffDevice && logLargeCopy(v, s)) {
+ break
+ }
+ v.reset(Op386DUFFCOPY)
+ v.AuxInt = int64ToAuxInt(10 * (128 - s/4))
+ v.AddArg3(dst, src, mem)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: (s > 4*128 || config.noDuffDevice) && s%4 == 0 && logLargeCopy(v, s)
+ // result: (REPMOVSL dst src (MOVLconst [int32(s/4)]) mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !((s > 4*128 || config.noDuffDevice) && s%4 == 0 && logLargeCopy(v, s)) {
+ break
+ }
+ v.reset(Op386REPMOVSL)
+ v0 := b.NewValue0(v.Pos, Op386MOVLconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(int32(s / 4))
+ v.AddArg4(dst, src, v0, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpNeg32F(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neg32F x)
+ // result: (PXOR x (MOVSSconst <typ.Float32> [float32(math.Copysign(0, -1))]))
+ for {
+ x := v_0
+ v.reset(Op386PXOR)
+ v0 := b.NewValue0(v.Pos, Op386MOVSSconst, typ.Float32)
+ v0.AuxInt = float32ToAuxInt(float32(math.Copysign(0, -1)))
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValue386_OpNeg64F(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neg64F x)
+ // result: (PXOR x (MOVSDconst <typ.Float64> [math.Copysign(0, -1)]))
+ for {
+ x := v_0
+ v.reset(Op386PXOR)
+ v0 := b.NewValue0(v.Pos, Op386MOVSDconst, typ.Float64)
+ v0.AuxInt = float64ToAuxInt(math.Copysign(0, -1))
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValue386_OpNeq16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Neq16 x y)
+ // result: (SETNE (CMPW x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(Op386SETNE)
+ v0 := b.NewValue0(v.Pos, Op386CMPW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpNeq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Neq32 x y)
+ // result: (SETNE (CMPL x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(Op386SETNE)
+ v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpNeq32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Neq32F x y)
+ // result: (SETNEF (UCOMISS x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(Op386SETNEF)
+ v0 := b.NewValue0(v.Pos, Op386UCOMISS, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpNeq64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Neq64F x y)
+ // result: (SETNEF (UCOMISD x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(Op386SETNEF)
+ v0 := b.NewValue0(v.Pos, Op386UCOMISD, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpNeq8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Neq8 x y)
+ // result: (SETNE (CMPB x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(Op386SETNE)
+ v0 := b.NewValue0(v.Pos, Op386CMPB, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpNeqB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (NeqB x y)
+ // result: (SETNE (CMPB x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(Op386SETNE)
+ v0 := b.NewValue0(v.Pos, Op386CMPB, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpNeqPtr(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (NeqPtr x y)
+ // result: (SETNE (CMPL x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(Op386SETNE)
+ v0 := b.NewValue0(v.Pos, Op386CMPL, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpNot(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Not x)
+ // result: (XORLconst [1] x)
+ for {
+ x := v_0
+ v.reset(Op386XORLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValue386_OpOffPtr(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (OffPtr [off] ptr)
+ // result: (ADDLconst [int32(off)] ptr)
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ ptr := v_0
+ v.reset(Op386ADDLconst)
+ v.AuxInt = int32ToAuxInt(int32(off))
+ v.AddArg(ptr)
+ return true
+ }
+}
+func rewriteValue386_OpPanicBounds(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (PanicBounds [kind] x y mem)
+ // cond: boundsABI(kind) == 0
+ // result: (LoweredPanicBoundsA [kind] x y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ x := v_0
+ y := v_1
+ mem := v_2
+ if !(boundsABI(kind) == 0) {
+ break
+ }
+ v.reset(Op386LoweredPanicBoundsA)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg3(x, y, mem)
+ return true
+ }
+ // match: (PanicBounds [kind] x y mem)
+ // cond: boundsABI(kind) == 1
+ // result: (LoweredPanicBoundsB [kind] x y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ x := v_0
+ y := v_1
+ mem := v_2
+ if !(boundsABI(kind) == 1) {
+ break
+ }
+ v.reset(Op386LoweredPanicBoundsB)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg3(x, y, mem)
+ return true
+ }
+ // match: (PanicBounds [kind] x y mem)
+ // cond: boundsABI(kind) == 2
+ // result: (LoweredPanicBoundsC [kind] x y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ x := v_0
+ y := v_1
+ mem := v_2
+ if !(boundsABI(kind) == 2) {
+ break
+ }
+ v.reset(Op386LoweredPanicBoundsC)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg3(x, y, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpPanicExtend(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (PanicExtend [kind] hi lo y mem)
+ // cond: boundsABI(kind) == 0
+ // result: (LoweredPanicExtendA [kind] hi lo y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ hi := v_0
+ lo := v_1
+ y := v_2
+ mem := v_3
+ if !(boundsABI(kind) == 0) {
+ break
+ }
+ v.reset(Op386LoweredPanicExtendA)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg4(hi, lo, y, mem)
+ return true
+ }
+ // match: (PanicExtend [kind] hi lo y mem)
+ // cond: boundsABI(kind) == 1
+ // result: (LoweredPanicExtendB [kind] hi lo y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ hi := v_0
+ lo := v_1
+ y := v_2
+ mem := v_3
+ if !(boundsABI(kind) == 1) {
+ break
+ }
+ v.reset(Op386LoweredPanicExtendB)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg4(hi, lo, y, mem)
+ return true
+ }
+ // match: (PanicExtend [kind] hi lo y mem)
+ // cond: boundsABI(kind) == 2
+ // result: (LoweredPanicExtendC [kind] hi lo y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ hi := v_0
+ lo := v_1
+ y := v_2
+ mem := v_3
+ if !(boundsABI(kind) == 2) {
+ break
+ }
+ v.reset(Op386LoweredPanicExtendC)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg4(hi, lo, y, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpRotateLeft16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (RotateLeft16 x (MOVLconst [c]))
+ // result: (ROLWconst [int16(c&15)] x)
+ for {
+ x := v_0
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(Op386ROLWconst)
+ v.AuxInt = int16ToAuxInt(int16(c & 15))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpRotateLeft32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (RotateLeft32 x (MOVLconst [c]))
+ // result: (ROLLconst [c&31] x)
+ for {
+ x := v_0
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(Op386ROLLconst)
+ v.AuxInt = int32ToAuxInt(c & 31)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpRotateLeft8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (RotateLeft8 x (MOVLconst [c]))
+ // result: (ROLBconst [int8(c&7)] x)
+ for {
+ x := v_0
+ if v_1.Op != Op386MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(Op386ROLBconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 7))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpRsh16Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh16Ux16 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPWconst y [16])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Pos, Op386SHRW, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, Op386CMPWconst, types.TypeFlags)
+ v2.AuxInt = int16ToAuxInt(16)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Rsh16Ux16 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHRW <t> x y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SHRW)
+ v.Type = t
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpRsh16Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh16Ux32 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPLconst y [16])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Pos, Op386SHRW, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, Op386CMPLconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(16)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Rsh16Ux32 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHRW <t> x y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SHRW)
+ v.Type = t
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpRsh16Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Rsh16Ux64 x (Const64 [c]))
+ // cond: uint64(c) < 16
+ // result: (SHRWconst x [int16(c)])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) < 16) {
+ break
+ }
+ v.reset(Op386SHRWconst)
+ v.AuxInt = int16ToAuxInt(int16(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (Rsh16Ux64 _ (Const64 [c]))
+ // cond: uint64(c) >= 16
+ // result: (Const16 [0])
+ for {
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 16) {
+ break
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpRsh16Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh16Ux8 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPBconst y [16])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Pos, Op386SHRW, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, Op386CMPBconst, types.TypeFlags)
+ v2.AuxInt = int8ToAuxInt(16)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Rsh16Ux8 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHRW <t> x y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SHRW)
+ v.Type = t
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpRsh16x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh16x16 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [16])))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SARW)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, Op386ORL, y.Type)
+ v1 := b.NewValue0(v.Pos, Op386NOTL, y.Type)
+ v2 := b.NewValue0(v.Pos, Op386SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Pos, Op386CMPWconst, types.TypeFlags)
+ v3.AuxInt = int16ToAuxInt(16)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh16x16 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SARW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SARW)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpRsh16x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh16x32 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [16])))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SARW)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, Op386ORL, y.Type)
+ v1 := b.NewValue0(v.Pos, Op386NOTL, y.Type)
+ v2 := b.NewValue0(v.Pos, Op386SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Pos, Op386CMPLconst, types.TypeFlags)
+ v3.AuxInt = int32ToAuxInt(16)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh16x32 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SARW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SARW)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpRsh16x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Rsh16x64 x (Const64 [c]))
+ // cond: uint64(c) < 16
+ // result: (SARWconst x [int16(c)])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) < 16) {
+ break
+ }
+ v.reset(Op386SARWconst)
+ v.AuxInt = int16ToAuxInt(int16(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (Rsh16x64 x (Const64 [c]))
+ // cond: uint64(c) >= 16
+ // result: (SARWconst x [15])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 16) {
+ break
+ }
+ v.reset(Op386SARWconst)
+ v.AuxInt = int16ToAuxInt(15)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpRsh16x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh16x8 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [16])))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SARW)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, Op386ORL, y.Type)
+ v1 := b.NewValue0(v.Pos, Op386NOTL, y.Type)
+ v2 := b.NewValue0(v.Pos, Op386SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Pos, Op386CMPBconst, types.TypeFlags)
+ v3.AuxInt = int8ToAuxInt(16)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh16x8 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SARW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SARW)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpRsh32Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh32Ux16 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Pos, Op386SHRL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, Op386CMPWconst, types.TypeFlags)
+ v2.AuxInt = int16ToAuxInt(32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Rsh32Ux16 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHRL <t> x y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SHRL)
+ v.Type = t
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpRsh32Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh32Ux32 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Pos, Op386SHRL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, Op386CMPLconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Rsh32Ux32 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHRL <t> x y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SHRL)
+ v.Type = t
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpRsh32Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Rsh32Ux64 x (Const64 [c]))
+ // cond: uint64(c) < 32
+ // result: (SHRLconst x [int32(c)])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) < 32) {
+ break
+ }
+ v.reset(Op386SHRLconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (Rsh32Ux64 _ (Const64 [c]))
+ // cond: uint64(c) >= 32
+ // result: (Const32 [0])
+ for {
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 32) {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpRsh32Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh32Ux8 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Pos, Op386SHRL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, Op386CMPBconst, types.TypeFlags)
+ v2.AuxInt = int8ToAuxInt(32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Rsh32Ux8 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHRL <t> x y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SHRL)
+ v.Type = t
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpRsh32x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh32x16 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [32])))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SARL)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, Op386ORL, y.Type)
+ v1 := b.NewValue0(v.Pos, Op386NOTL, y.Type)
+ v2 := b.NewValue0(v.Pos, Op386SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Pos, Op386CMPWconst, types.TypeFlags)
+ v3.AuxInt = int16ToAuxInt(32)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh32x16 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SARL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SARL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpRsh32x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh32x32 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [32])))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SARL)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, Op386ORL, y.Type)
+ v1 := b.NewValue0(v.Pos, Op386NOTL, y.Type)
+ v2 := b.NewValue0(v.Pos, Op386SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Pos, Op386CMPLconst, types.TypeFlags)
+ v3.AuxInt = int32ToAuxInt(32)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh32x32 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SARL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SARL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpRsh32x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Rsh32x64 x (Const64 [c]))
+ // cond: uint64(c) < 32
+ // result: (SARLconst x [int32(c)])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) < 32) {
+ break
+ }
+ v.reset(Op386SARLconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (Rsh32x64 x (Const64 [c]))
+ // cond: uint64(c) >= 32
+ // result: (SARLconst x [31])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 32) {
+ break
+ }
+ v.reset(Op386SARLconst)
+ v.AuxInt = int32ToAuxInt(31)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpRsh32x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh32x8 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [32])))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SARL)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, Op386ORL, y.Type)
+ v1 := b.NewValue0(v.Pos, Op386NOTL, y.Type)
+ v2 := b.NewValue0(v.Pos, Op386SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Pos, Op386CMPBconst, types.TypeFlags)
+ v3.AuxInt = int8ToAuxInt(32)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh32x8 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SARL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SARL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpRsh8Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh8Ux16 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPWconst y [8])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Pos, Op386SHRB, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, Op386CMPWconst, types.TypeFlags)
+ v2.AuxInt = int16ToAuxInt(8)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Rsh8Ux16 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHRB <t> x y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SHRB)
+ v.Type = t
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpRsh8Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh8Ux32 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPLconst y [8])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Pos, Op386SHRB, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, Op386CMPLconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(8)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Rsh8Ux32 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHRB <t> x y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SHRB)
+ v.Type = t
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpRsh8Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Rsh8Ux64 x (Const64 [c]))
+ // cond: uint64(c) < 8
+ // result: (SHRBconst x [int8(c)])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) < 8) {
+ break
+ }
+ v.reset(Op386SHRBconst)
+ v.AuxInt = int8ToAuxInt(int8(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (Rsh8Ux64 _ (Const64 [c]))
+ // cond: uint64(c) >= 8
+ // result: (Const8 [0])
+ for {
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 8) {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpRsh8Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh8Ux8 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPBconst y [8])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386ANDL)
+ v0 := b.NewValue0(v.Pos, Op386SHRB, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, Op386CMPBconst, types.TypeFlags)
+ v2.AuxInt = int8ToAuxInt(8)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Rsh8Ux8 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHRB <t> x y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SHRB)
+ v.Type = t
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpRsh8x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh8x16 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [8])))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SARB)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, Op386ORL, y.Type)
+ v1 := b.NewValue0(v.Pos, Op386NOTL, y.Type)
+ v2 := b.NewValue0(v.Pos, Op386SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Pos, Op386CMPWconst, types.TypeFlags)
+ v3.AuxInt = int16ToAuxInt(8)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh8x16 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SARB x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SARB)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpRsh8x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh8x32 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [8])))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SARB)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, Op386ORL, y.Type)
+ v1 := b.NewValue0(v.Pos, Op386NOTL, y.Type)
+ v2 := b.NewValue0(v.Pos, Op386SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Pos, Op386CMPLconst, types.TypeFlags)
+ v3.AuxInt = int32ToAuxInt(8)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh8x32 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SARB x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SARB)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpRsh8x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Rsh8x64 x (Const64 [c]))
+ // cond: uint64(c) < 8
+ // result: (SARBconst x [int8(c)])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) < 8) {
+ break
+ }
+ v.reset(Op386SARBconst)
+ v.AuxInt = int8ToAuxInt(int8(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (Rsh8x64 x (Const64 [c]))
+ // cond: uint64(c) >= 8
+ // result: (SARBconst x [7])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 8) {
+ break
+ }
+ v.reset(Op386SARBconst)
+ v.AuxInt = int8ToAuxInt(7)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpRsh8x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh8x8 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [8])))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SARB)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, Op386ORL, y.Type)
+ v1 := b.NewValue0(v.Pos, Op386NOTL, y.Type)
+ v2 := b.NewValue0(v.Pos, Op386SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Pos, Op386CMPBconst, types.TypeFlags)
+ v3.AuxInt = int8ToAuxInt(8)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh8x8 <t> x y)
+ // cond: shiftIsBounded(v)
+ // result: (SARB x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(Op386SARB)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpSelect0(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Select0 (Mul32uover x y))
+ // result: (Select0 <typ.UInt32> (MULLU x y))
+ for {
+ if v_0.Op != OpMul32uover {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpSelect0)
+ v.Type = typ.UInt32
+ v0 := b.NewValue0(v.Pos, Op386MULLU, types.NewTuple(typ.UInt32, types.TypeFlags))
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpSelect1(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Select1 (Mul32uover x y))
+ // result: (SETO (Select1 <types.TypeFlags> (MULLU x y)))
+ for {
+ if v_0.Op != OpMul32uover {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(Op386SETO)
+ v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, Op386MULLU, types.NewTuple(typ.UInt32, types.TypeFlags))
+ v1.AddArg2(x, y)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpSignmask(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Signmask x)
+ // result: (SARLconst x [31])
+ for {
+ x := v_0
+ v.reset(Op386SARLconst)
+ v.AuxInt = int32ToAuxInt(31)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValue386_OpSlicemask(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Slicemask <t> x)
+ // result: (SARLconst (NEGL <t> x) [31])
+ for {
+ t := v.Type
+ x := v_0
+ v.reset(Op386SARLconst)
+ v.AuxInt = int32ToAuxInt(31)
+ v0 := b.NewValue0(v.Pos, Op386NEGL, t)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386_OpStore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 8 && is64BitFloat(val.Type)
+ // result: (MOVSDstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 8 && is64BitFloat(val.Type)) {
+ break
+ }
+ v.reset(Op386MOVSDstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 4 && is32BitFloat(val.Type)
+ // result: (MOVSSstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 4 && is32BitFloat(val.Type)) {
+ break
+ }
+ v.reset(Op386MOVSSstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 4
+ // result: (MOVLstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 4) {
+ break
+ }
+ v.reset(Op386MOVLstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 2
+ // result: (MOVWstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 2) {
+ break
+ }
+ v.reset(Op386MOVWstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 1
+ // result: (MOVBstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 1) {
+ break
+ }
+ v.reset(Op386MOVBstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpZero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (Zero [0] _ mem)
+ // result: mem
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ mem := v_1
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Zero [1] destptr mem)
+ // result: (MOVBstoreconst [0] destptr mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 1 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(Op386MOVBstoreconst)
+ v.AuxInt = valAndOffToAuxInt(0)
+ v.AddArg2(destptr, mem)
+ return true
+ }
+ // match: (Zero [2] destptr mem)
+ // result: (MOVWstoreconst [0] destptr mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 2 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(Op386MOVWstoreconst)
+ v.AuxInt = valAndOffToAuxInt(0)
+ v.AddArg2(destptr, mem)
+ return true
+ }
+ // match: (Zero [4] destptr mem)
+ // result: (MOVLstoreconst [0] destptr mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(Op386MOVLstoreconst)
+ v.AuxInt = valAndOffToAuxInt(0)
+ v.AddArg2(destptr, mem)
+ return true
+ }
+ // match: (Zero [3] destptr mem)
+ // result: (MOVBstoreconst [makeValAndOff(0,2)] destptr (MOVWstoreconst [makeValAndOff(0,0)] destptr mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 3 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(Op386MOVBstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 2))
+ v0 := b.NewValue0(v.Pos, Op386MOVWstoreconst, types.TypeMem)
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
+ v0.AddArg2(destptr, mem)
+ v.AddArg2(destptr, v0)
+ return true
+ }
+ // match: (Zero [5] destptr mem)
+ // result: (MOVBstoreconst [makeValAndOff(0,4)] destptr (MOVLstoreconst [makeValAndOff(0,0)] destptr mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 5 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(Op386MOVBstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 4))
+ v0 := b.NewValue0(v.Pos, Op386MOVLstoreconst, types.TypeMem)
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
+ v0.AddArg2(destptr, mem)
+ v.AddArg2(destptr, v0)
+ return true
+ }
+ // match: (Zero [6] destptr mem)
+ // result: (MOVWstoreconst [makeValAndOff(0,4)] destptr (MOVLstoreconst [makeValAndOff(0,0)] destptr mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 6 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(Op386MOVWstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 4))
+ v0 := b.NewValue0(v.Pos, Op386MOVLstoreconst, types.TypeMem)
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
+ v0.AddArg2(destptr, mem)
+ v.AddArg2(destptr, v0)
+ return true
+ }
+ // match: (Zero [7] destptr mem)
+ // result: (MOVLstoreconst [makeValAndOff(0,3)] destptr (MOVLstoreconst [makeValAndOff(0,0)] destptr mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 7 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(Op386MOVLstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 3))
+ v0 := b.NewValue0(v.Pos, Op386MOVLstoreconst, types.TypeMem)
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
+ v0.AddArg2(destptr, mem)
+ v.AddArg2(destptr, v0)
+ return true
+ }
+ // match: (Zero [s] destptr mem)
+ // cond: s%4 != 0 && s > 4
+ // result: (Zero [s-s%4] (ADDLconst destptr [int32(s%4)]) (MOVLstoreconst [0] destptr mem))
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ destptr := v_0
+ mem := v_1
+ if !(s%4 != 0 && s > 4) {
+ break
+ }
+ v.reset(OpZero)
+ v.AuxInt = int64ToAuxInt(s - s%4)
+ v0 := b.NewValue0(v.Pos, Op386ADDLconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(int32(s % 4))
+ v0.AddArg(destptr)
+ v1 := b.NewValue0(v.Pos, Op386MOVLstoreconst, types.TypeMem)
+ v1.AuxInt = valAndOffToAuxInt(0)
+ v1.AddArg2(destptr, mem)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Zero [8] destptr mem)
+ // result: (MOVLstoreconst [makeValAndOff(0,4)] destptr (MOVLstoreconst [makeValAndOff(0,0)] destptr mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(Op386MOVLstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 4))
+ v0 := b.NewValue0(v.Pos, Op386MOVLstoreconst, types.TypeMem)
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
+ v0.AddArg2(destptr, mem)
+ v.AddArg2(destptr, v0)
+ return true
+ }
+ // match: (Zero [12] destptr mem)
+ // result: (MOVLstoreconst [makeValAndOff(0,8)] destptr (MOVLstoreconst [makeValAndOff(0,4)] destptr (MOVLstoreconst [makeValAndOff(0,0)] destptr mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 12 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(Op386MOVLstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 8))
+ v0 := b.NewValue0(v.Pos, Op386MOVLstoreconst, types.TypeMem)
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 4))
+ v1 := b.NewValue0(v.Pos, Op386MOVLstoreconst, types.TypeMem)
+ v1.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
+ v1.AddArg2(destptr, mem)
+ v0.AddArg2(destptr, v1)
+ v.AddArg2(destptr, v0)
+ return true
+ }
+ // match: (Zero [16] destptr mem)
+ // result: (MOVLstoreconst [makeValAndOff(0,12)] destptr (MOVLstoreconst [makeValAndOff(0,8)] destptr (MOVLstoreconst [makeValAndOff(0,4)] destptr (MOVLstoreconst [makeValAndOff(0,0)] destptr mem))))
+ for {
+ if auxIntToInt64(v.AuxInt) != 16 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(Op386MOVLstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 12))
+ v0 := b.NewValue0(v.Pos, Op386MOVLstoreconst, types.TypeMem)
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 8))
+ v1 := b.NewValue0(v.Pos, Op386MOVLstoreconst, types.TypeMem)
+ v1.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 4))
+ v2 := b.NewValue0(v.Pos, Op386MOVLstoreconst, types.TypeMem)
+ v2.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
+ v2.AddArg2(destptr, mem)
+ v1.AddArg2(destptr, v2)
+ v0.AddArg2(destptr, v1)
+ v.AddArg2(destptr, v0)
+ return true
+ }
+ // match: (Zero [s] destptr mem)
+ // cond: s > 16 && s <= 4*128 && s%4 == 0 && !config.noDuffDevice
+ // result: (DUFFZERO [1*(128-s/4)] destptr (MOVLconst [0]) mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ destptr := v_0
+ mem := v_1
+ if !(s > 16 && s <= 4*128 && s%4 == 0 && !config.noDuffDevice) {
+ break
+ }
+ v.reset(Op386DUFFZERO)
+ v.AuxInt = int64ToAuxInt(1 * (128 - s/4))
+ v0 := b.NewValue0(v.Pos, Op386MOVLconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(destptr, v0, mem)
+ return true
+ }
+ // match: (Zero [s] destptr mem)
+ // cond: (s > 4*128 || (config.noDuffDevice && s > 16)) && s%4 == 0
+ // result: (REPSTOSL destptr (MOVLconst [int32(s/4)]) (MOVLconst [0]) mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ destptr := v_0
+ mem := v_1
+ if !((s > 4*128 || (config.noDuffDevice && s > 16)) && s%4 == 0) {
+ break
+ }
+ v.reset(Op386REPSTOSL)
+ v0 := b.NewValue0(v.Pos, Op386MOVLconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(int32(s / 4))
+ v1 := b.NewValue0(v.Pos, Op386MOVLconst, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(0)
+ v.AddArg4(destptr, v0, v1, mem)
+ return true
+ }
+ return false
+}
+func rewriteValue386_OpZeromask(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Zeromask <t> x)
+ // result: (XORLconst [-1] (SBBLcarrymask <t> (CMPLconst x [1])))
+ for {
+ t := v.Type
+ x := v_0
+ v.reset(Op386XORLconst)
+ v.AuxInt = int32ToAuxInt(-1)
+ v0 := b.NewValue0(v.Pos, Op386SBBLcarrymask, t)
+ v1 := b.NewValue0(v.Pos, Op386CMPLconst, types.TypeFlags)
+ v1.AuxInt = int32ToAuxInt(1)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteBlock386(b *Block) bool {
+ switch b.Kind {
+ case Block386EQ:
+ // match: (EQ (InvertFlags cmp) yes no)
+ // result: (EQ cmp yes no)
+ for b.Controls[0].Op == Op386InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(Block386EQ, cmp)
+ return true
+ }
+ // match: (EQ (FlagEQ) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == Op386FlagEQ {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (EQ (FlagLT_ULT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == Op386FlagLT_ULT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (EQ (FlagLT_UGT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == Op386FlagLT_UGT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (EQ (FlagGT_ULT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == Op386FlagGT_ULT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (EQ (FlagGT_UGT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == Op386FlagGT_UGT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ case Block386GE:
+ // match: (GE (InvertFlags cmp) yes no)
+ // result: (LE cmp yes no)
+ for b.Controls[0].Op == Op386InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(Block386LE, cmp)
+ return true
+ }
+ // match: (GE (FlagEQ) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == Op386FlagEQ {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (GE (FlagLT_ULT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == Op386FlagLT_ULT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (GE (FlagLT_UGT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == Op386FlagLT_UGT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (GE (FlagGT_ULT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == Op386FlagGT_ULT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (GE (FlagGT_UGT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == Op386FlagGT_UGT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ case Block386GT:
+ // match: (GT (InvertFlags cmp) yes no)
+ // result: (LT cmp yes no)
+ for b.Controls[0].Op == Op386InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(Block386LT, cmp)
+ return true
+ }
+ // match: (GT (FlagEQ) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == Op386FlagEQ {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (GT (FlagLT_ULT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == Op386FlagLT_ULT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (GT (FlagLT_UGT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == Op386FlagLT_UGT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (GT (FlagGT_ULT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == Op386FlagGT_ULT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (GT (FlagGT_UGT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == Op386FlagGT_UGT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ case BlockIf:
+ // match: (If (SETL cmp) yes no)
+ // result: (LT cmp yes no)
+ for b.Controls[0].Op == Op386SETL {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(Block386LT, cmp)
+ return true
+ }
+ // match: (If (SETLE cmp) yes no)
+ // result: (LE cmp yes no)
+ for b.Controls[0].Op == Op386SETLE {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(Block386LE, cmp)
+ return true
+ }
+ // match: (If (SETG cmp) yes no)
+ // result: (GT cmp yes no)
+ for b.Controls[0].Op == Op386SETG {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(Block386GT, cmp)
+ return true
+ }
+ // match: (If (SETGE cmp) yes no)
+ // result: (GE cmp yes no)
+ for b.Controls[0].Op == Op386SETGE {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(Block386GE, cmp)
+ return true
+ }
+ // match: (If (SETEQ cmp) yes no)
+ // result: (EQ cmp yes no)
+ for b.Controls[0].Op == Op386SETEQ {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(Block386EQ, cmp)
+ return true
+ }
+ // match: (If (SETNE cmp) yes no)
+ // result: (NE cmp yes no)
+ for b.Controls[0].Op == Op386SETNE {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(Block386NE, cmp)
+ return true
+ }
+ // match: (If (SETB cmp) yes no)
+ // result: (ULT cmp yes no)
+ for b.Controls[0].Op == Op386SETB {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(Block386ULT, cmp)
+ return true
+ }
+ // match: (If (SETBE cmp) yes no)
+ // result: (ULE cmp yes no)
+ for b.Controls[0].Op == Op386SETBE {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(Block386ULE, cmp)
+ return true
+ }
+ // match: (If (SETA cmp) yes no)
+ // result: (UGT cmp yes no)
+ for b.Controls[0].Op == Op386SETA {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(Block386UGT, cmp)
+ return true
+ }
+ // match: (If (SETAE cmp) yes no)
+ // result: (UGE cmp yes no)
+ for b.Controls[0].Op == Op386SETAE {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(Block386UGE, cmp)
+ return true
+ }
+ // match: (If (SETO cmp) yes no)
+ // result: (OS cmp yes no)
+ for b.Controls[0].Op == Op386SETO {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(Block386OS, cmp)
+ return true
+ }
+ // match: (If (SETGF cmp) yes no)
+ // result: (UGT cmp yes no)
+ for b.Controls[0].Op == Op386SETGF {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(Block386UGT, cmp)
+ return true
+ }
+ // match: (If (SETGEF cmp) yes no)
+ // result: (UGE cmp yes no)
+ for b.Controls[0].Op == Op386SETGEF {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(Block386UGE, cmp)
+ return true
+ }
+ // match: (If (SETEQF cmp) yes no)
+ // result: (EQF cmp yes no)
+ for b.Controls[0].Op == Op386SETEQF {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(Block386EQF, cmp)
+ return true
+ }
+ // match: (If (SETNEF cmp) yes no)
+ // result: (NEF cmp yes no)
+ for b.Controls[0].Op == Op386SETNEF {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(Block386NEF, cmp)
+ return true
+ }
+ // match: (If cond yes no)
+ // result: (NE (TESTB cond cond) yes no)
+ for {
+ cond := b.Controls[0]
+ v0 := b.NewValue0(cond.Pos, Op386TESTB, types.TypeFlags)
+ v0.AddArg2(cond, cond)
+ b.resetWithControl(Block386NE, v0)
+ return true
+ }
+ case Block386LE:
+ // match: (LE (InvertFlags cmp) yes no)
+ // result: (GE cmp yes no)
+ for b.Controls[0].Op == Op386InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(Block386GE, cmp)
+ return true
+ }
+ // match: (LE (FlagEQ) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == Op386FlagEQ {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (LE (FlagLT_ULT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == Op386FlagLT_ULT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (LE (FlagLT_UGT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == Op386FlagLT_UGT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (LE (FlagGT_ULT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == Op386FlagGT_ULT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (LE (FlagGT_UGT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == Op386FlagGT_UGT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ case Block386LT:
+ // match: (LT (InvertFlags cmp) yes no)
+ // result: (GT cmp yes no)
+ for b.Controls[0].Op == Op386InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(Block386GT, cmp)
+ return true
+ }
+ // match: (LT (FlagEQ) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == Op386FlagEQ {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (LT (FlagLT_ULT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == Op386FlagLT_ULT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (LT (FlagLT_UGT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == Op386FlagLT_UGT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (LT (FlagGT_ULT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == Op386FlagGT_ULT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (LT (FlagGT_UGT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == Op386FlagGT_UGT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ case Block386NE:
+ // match: (NE (TESTB (SETL cmp) (SETL cmp)) yes no)
+ // result: (LT cmp yes no)
+ for b.Controls[0].Op == Op386TESTB {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != Op386SETL {
+ break
+ }
+ cmp := v_0_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != Op386SETL || cmp != v_0_1.Args[0] {
+ break
+ }
+ b.resetWithControl(Block386LT, cmp)
+ return true
+ }
+ // match: (NE (TESTB (SETLE cmp) (SETLE cmp)) yes no)
+ // result: (LE cmp yes no)
+ for b.Controls[0].Op == Op386TESTB {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != Op386SETLE {
+ break
+ }
+ cmp := v_0_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != Op386SETLE || cmp != v_0_1.Args[0] {
+ break
+ }
+ b.resetWithControl(Block386LE, cmp)
+ return true
+ }
+ // match: (NE (TESTB (SETG cmp) (SETG cmp)) yes no)
+ // result: (GT cmp yes no)
+ for b.Controls[0].Op == Op386TESTB {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != Op386SETG {
+ break
+ }
+ cmp := v_0_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != Op386SETG || cmp != v_0_1.Args[0] {
+ break
+ }
+ b.resetWithControl(Block386GT, cmp)
+ return true
+ }
+ // match: (NE (TESTB (SETGE cmp) (SETGE cmp)) yes no)
+ // result: (GE cmp yes no)
+ for b.Controls[0].Op == Op386TESTB {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != Op386SETGE {
+ break
+ }
+ cmp := v_0_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != Op386SETGE || cmp != v_0_1.Args[0] {
+ break
+ }
+ b.resetWithControl(Block386GE, cmp)
+ return true
+ }
+ // match: (NE (TESTB (SETEQ cmp) (SETEQ cmp)) yes no)
+ // result: (EQ cmp yes no)
+ for b.Controls[0].Op == Op386TESTB {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != Op386SETEQ {
+ break
+ }
+ cmp := v_0_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != Op386SETEQ || cmp != v_0_1.Args[0] {
+ break
+ }
+ b.resetWithControl(Block386EQ, cmp)
+ return true
+ }
+ // match: (NE (TESTB (SETNE cmp) (SETNE cmp)) yes no)
+ // result: (NE cmp yes no)
+ for b.Controls[0].Op == Op386TESTB {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != Op386SETNE {
+ break
+ }
+ cmp := v_0_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != Op386SETNE || cmp != v_0_1.Args[0] {
+ break
+ }
+ b.resetWithControl(Block386NE, cmp)
+ return true
+ }
+ // match: (NE (TESTB (SETB cmp) (SETB cmp)) yes no)
+ // result: (ULT cmp yes no)
+ for b.Controls[0].Op == Op386TESTB {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != Op386SETB {
+ break
+ }
+ cmp := v_0_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != Op386SETB || cmp != v_0_1.Args[0] {
+ break
+ }
+ b.resetWithControl(Block386ULT, cmp)
+ return true
+ }
+ // match: (NE (TESTB (SETBE cmp) (SETBE cmp)) yes no)
+ // result: (ULE cmp yes no)
+ for b.Controls[0].Op == Op386TESTB {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != Op386SETBE {
+ break
+ }
+ cmp := v_0_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != Op386SETBE || cmp != v_0_1.Args[0] {
+ break
+ }
+ b.resetWithControl(Block386ULE, cmp)
+ return true
+ }
+ // match: (NE (TESTB (SETA cmp) (SETA cmp)) yes no)
+ // result: (UGT cmp yes no)
+ for b.Controls[0].Op == Op386TESTB {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != Op386SETA {
+ break
+ }
+ cmp := v_0_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != Op386SETA || cmp != v_0_1.Args[0] {
+ break
+ }
+ b.resetWithControl(Block386UGT, cmp)
+ return true
+ }
+ // match: (NE (TESTB (SETAE cmp) (SETAE cmp)) yes no)
+ // result: (UGE cmp yes no)
+ for b.Controls[0].Op == Op386TESTB {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != Op386SETAE {
+ break
+ }
+ cmp := v_0_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != Op386SETAE || cmp != v_0_1.Args[0] {
+ break
+ }
+ b.resetWithControl(Block386UGE, cmp)
+ return true
+ }
+ // match: (NE (TESTB (SETO cmp) (SETO cmp)) yes no)
+ // result: (OS cmp yes no)
+ for b.Controls[0].Op == Op386TESTB {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != Op386SETO {
+ break
+ }
+ cmp := v_0_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != Op386SETO || cmp != v_0_1.Args[0] {
+ break
+ }
+ b.resetWithControl(Block386OS, cmp)
+ return true
+ }
+ // match: (NE (TESTB (SETGF cmp) (SETGF cmp)) yes no)
+ // result: (UGT cmp yes no)
+ for b.Controls[0].Op == Op386TESTB {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != Op386SETGF {
+ break
+ }
+ cmp := v_0_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != Op386SETGF || cmp != v_0_1.Args[0] {
+ break
+ }
+ b.resetWithControl(Block386UGT, cmp)
+ return true
+ }
+ // match: (NE (TESTB (SETGEF cmp) (SETGEF cmp)) yes no)
+ // result: (UGE cmp yes no)
+ for b.Controls[0].Op == Op386TESTB {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != Op386SETGEF {
+ break
+ }
+ cmp := v_0_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != Op386SETGEF || cmp != v_0_1.Args[0] {
+ break
+ }
+ b.resetWithControl(Block386UGE, cmp)
+ return true
+ }
+ // match: (NE (TESTB (SETEQF cmp) (SETEQF cmp)) yes no)
+ // result: (EQF cmp yes no)
+ for b.Controls[0].Op == Op386TESTB {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != Op386SETEQF {
+ break
+ }
+ cmp := v_0_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != Op386SETEQF || cmp != v_0_1.Args[0] {
+ break
+ }
+ b.resetWithControl(Block386EQF, cmp)
+ return true
+ }
+ // match: (NE (TESTB (SETNEF cmp) (SETNEF cmp)) yes no)
+ // result: (NEF cmp yes no)
+ for b.Controls[0].Op == Op386TESTB {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != Op386SETNEF {
+ break
+ }
+ cmp := v_0_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != Op386SETNEF || cmp != v_0_1.Args[0] {
+ break
+ }
+ b.resetWithControl(Block386NEF, cmp)
+ return true
+ }
+ // match: (NE (InvertFlags cmp) yes no)
+ // result: (NE cmp yes no)
+ for b.Controls[0].Op == Op386InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(Block386NE, cmp)
+ return true
+ }
+ // match: (NE (FlagEQ) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == Op386FlagEQ {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (NE (FlagLT_ULT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == Op386FlagLT_ULT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (NE (FlagLT_UGT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == Op386FlagLT_UGT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (NE (FlagGT_ULT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == Op386FlagGT_ULT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (NE (FlagGT_UGT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == Op386FlagGT_UGT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ case Block386UGE:
+ // match: (UGE (InvertFlags cmp) yes no)
+ // result: (ULE cmp yes no)
+ for b.Controls[0].Op == Op386InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(Block386ULE, cmp)
+ return true
+ }
+ // match: (UGE (FlagEQ) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == Op386FlagEQ {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (UGE (FlagLT_ULT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == Op386FlagLT_ULT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (UGE (FlagLT_UGT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == Op386FlagLT_UGT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (UGE (FlagGT_ULT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == Op386FlagGT_ULT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (UGE (FlagGT_UGT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == Op386FlagGT_UGT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ case Block386UGT:
+ // match: (UGT (InvertFlags cmp) yes no)
+ // result: (ULT cmp yes no)
+ for b.Controls[0].Op == Op386InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(Block386ULT, cmp)
+ return true
+ }
+ // match: (UGT (FlagEQ) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == Op386FlagEQ {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (UGT (FlagLT_ULT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == Op386FlagLT_ULT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (UGT (FlagLT_UGT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == Op386FlagLT_UGT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (UGT (FlagGT_ULT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == Op386FlagGT_ULT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (UGT (FlagGT_UGT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == Op386FlagGT_UGT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ case Block386ULE:
+ // match: (ULE (InvertFlags cmp) yes no)
+ // result: (UGE cmp yes no)
+ for b.Controls[0].Op == Op386InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(Block386UGE, cmp)
+ return true
+ }
+ // match: (ULE (FlagEQ) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == Op386FlagEQ {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (ULE (FlagLT_ULT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == Op386FlagLT_ULT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (ULE (FlagLT_UGT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == Op386FlagLT_UGT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (ULE (FlagGT_ULT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == Op386FlagGT_ULT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (ULE (FlagGT_UGT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == Op386FlagGT_UGT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ case Block386ULT:
+ // match: (ULT (InvertFlags cmp) yes no)
+ // result: (UGT cmp yes no)
+ for b.Controls[0].Op == Op386InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(Block386UGT, cmp)
+ return true
+ }
+ // match: (ULT (FlagEQ) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == Op386FlagEQ {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (ULT (FlagLT_ULT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == Op386FlagLT_ULT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (ULT (FlagLT_UGT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == Op386FlagLT_UGT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (ULT (FlagGT_ULT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == Op386FlagGT_ULT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (ULT (FlagGT_UGT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == Op386FlagGT_UGT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ }
+ return false
+}
diff --git a/src/cmd/compile/internal/ssa/rewrite386splitload.go b/src/cmd/compile/internal/ssa/rewrite386splitload.go
new file mode 100644
index 0000000..670e7f4
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/rewrite386splitload.go
@@ -0,0 +1,160 @@
+// Code generated from gen/386splitload.rules; DO NOT EDIT.
+// generated with: cd gen; go run *.go
+
+package ssa
+
+func rewriteValue386splitload(v *Value) bool {
+ switch v.Op {
+ case Op386CMPBconstload:
+ return rewriteValue386splitload_Op386CMPBconstload(v)
+ case Op386CMPBload:
+ return rewriteValue386splitload_Op386CMPBload(v)
+ case Op386CMPLconstload:
+ return rewriteValue386splitload_Op386CMPLconstload(v)
+ case Op386CMPLload:
+ return rewriteValue386splitload_Op386CMPLload(v)
+ case Op386CMPWconstload:
+ return rewriteValue386splitload_Op386CMPWconstload(v)
+ case Op386CMPWload:
+ return rewriteValue386splitload_Op386CMPWload(v)
+ }
+ return false
+}
+func rewriteValue386splitload_Op386CMPBconstload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (CMPBconstload {sym} [vo] ptr mem)
+ // result: (CMPBconst (MOVBload {sym} [vo.Off()] ptr mem) [vo.Val8()])
+ for {
+ vo := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ mem := v_1
+ v.reset(Op386CMPBconst)
+ v.AuxInt = int8ToAuxInt(vo.Val8())
+ v0 := b.NewValue0(v.Pos, Op386MOVBload, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(vo.Off())
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386splitload_Op386CMPBload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (CMPBload {sym} [off] ptr x mem)
+ // result: (CMPB (MOVBload {sym} [off] ptr mem) x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ x := v_1
+ mem := v_2
+ v.reset(Op386CMPB)
+ v0 := b.NewValue0(v.Pos, Op386MOVBload, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ v.AddArg2(v0, x)
+ return true
+ }
+}
+func rewriteValue386splitload_Op386CMPLconstload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (CMPLconstload {sym} [vo] ptr mem)
+ // result: (CMPLconst (MOVLload {sym} [vo.Off()] ptr mem) [vo.Val()])
+ for {
+ vo := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ mem := v_1
+ v.reset(Op386CMPLconst)
+ v.AuxInt = int32ToAuxInt(vo.Val())
+ v0 := b.NewValue0(v.Pos, Op386MOVLload, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(vo.Off())
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386splitload_Op386CMPLload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (CMPLload {sym} [off] ptr x mem)
+ // result: (CMPL (MOVLload {sym} [off] ptr mem) x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ x := v_1
+ mem := v_2
+ v.reset(Op386CMPL)
+ v0 := b.NewValue0(v.Pos, Op386MOVLload, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ v.AddArg2(v0, x)
+ return true
+ }
+}
+func rewriteValue386splitload_Op386CMPWconstload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (CMPWconstload {sym} [vo] ptr mem)
+ // result: (CMPWconst (MOVWload {sym} [vo.Off()] ptr mem) [vo.Val16()])
+ for {
+ vo := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ mem := v_1
+ v.reset(Op386CMPWconst)
+ v.AuxInt = int16ToAuxInt(vo.Val16())
+ v0 := b.NewValue0(v.Pos, Op386MOVWload, typ.UInt16)
+ v0.AuxInt = int32ToAuxInt(vo.Off())
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValue386splitload_Op386CMPWload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (CMPWload {sym} [off] ptr x mem)
+ // result: (CMPW (MOVWload {sym} [off] ptr mem) x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ x := v_1
+ mem := v_2
+ v.reset(Op386CMPW)
+ v0 := b.NewValue0(v.Pos, Op386MOVWload, typ.UInt16)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ v.AddArg2(v0, x)
+ return true
+ }
+}
+func rewriteBlock386splitload(b *Block) bool {
+ return false
+}
diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64.go b/src/cmd/compile/internal/ssa/rewriteAMD64.go
new file mode 100644
index 0000000..0c789d6
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/rewriteAMD64.go
@@ -0,0 +1,35214 @@
+// Code generated from gen/AMD64.rules; DO NOT EDIT.
+// generated with: cd gen; go run *.go
+
+package ssa
+
+import "internal/buildcfg"
+import "math"
+import "cmd/internal/obj"
+import "cmd/compile/internal/types"
+
+func rewriteValueAMD64(v *Value) bool {
+ switch v.Op {
+ case OpAMD64ADCQ:
+ return rewriteValueAMD64_OpAMD64ADCQ(v)
+ case OpAMD64ADCQconst:
+ return rewriteValueAMD64_OpAMD64ADCQconst(v)
+ case OpAMD64ADDL:
+ return rewriteValueAMD64_OpAMD64ADDL(v)
+ case OpAMD64ADDLconst:
+ return rewriteValueAMD64_OpAMD64ADDLconst(v)
+ case OpAMD64ADDLconstmodify:
+ return rewriteValueAMD64_OpAMD64ADDLconstmodify(v)
+ case OpAMD64ADDLload:
+ return rewriteValueAMD64_OpAMD64ADDLload(v)
+ case OpAMD64ADDLmodify:
+ return rewriteValueAMD64_OpAMD64ADDLmodify(v)
+ case OpAMD64ADDQ:
+ return rewriteValueAMD64_OpAMD64ADDQ(v)
+ case OpAMD64ADDQcarry:
+ return rewriteValueAMD64_OpAMD64ADDQcarry(v)
+ case OpAMD64ADDQconst:
+ return rewriteValueAMD64_OpAMD64ADDQconst(v)
+ case OpAMD64ADDQconstmodify:
+ return rewriteValueAMD64_OpAMD64ADDQconstmodify(v)
+ case OpAMD64ADDQload:
+ return rewriteValueAMD64_OpAMD64ADDQload(v)
+ case OpAMD64ADDQmodify:
+ return rewriteValueAMD64_OpAMD64ADDQmodify(v)
+ case OpAMD64ADDSD:
+ return rewriteValueAMD64_OpAMD64ADDSD(v)
+ case OpAMD64ADDSDload:
+ return rewriteValueAMD64_OpAMD64ADDSDload(v)
+ case OpAMD64ADDSS:
+ return rewriteValueAMD64_OpAMD64ADDSS(v)
+ case OpAMD64ADDSSload:
+ return rewriteValueAMD64_OpAMD64ADDSSload(v)
+ case OpAMD64ANDL:
+ return rewriteValueAMD64_OpAMD64ANDL(v)
+ case OpAMD64ANDLconst:
+ return rewriteValueAMD64_OpAMD64ANDLconst(v)
+ case OpAMD64ANDLconstmodify:
+ return rewriteValueAMD64_OpAMD64ANDLconstmodify(v)
+ case OpAMD64ANDLload:
+ return rewriteValueAMD64_OpAMD64ANDLload(v)
+ case OpAMD64ANDLmodify:
+ return rewriteValueAMD64_OpAMD64ANDLmodify(v)
+ case OpAMD64ANDNL:
+ return rewriteValueAMD64_OpAMD64ANDNL(v)
+ case OpAMD64ANDNQ:
+ return rewriteValueAMD64_OpAMD64ANDNQ(v)
+ case OpAMD64ANDQ:
+ return rewriteValueAMD64_OpAMD64ANDQ(v)
+ case OpAMD64ANDQconst:
+ return rewriteValueAMD64_OpAMD64ANDQconst(v)
+ case OpAMD64ANDQconstmodify:
+ return rewriteValueAMD64_OpAMD64ANDQconstmodify(v)
+ case OpAMD64ANDQload:
+ return rewriteValueAMD64_OpAMD64ANDQload(v)
+ case OpAMD64ANDQmodify:
+ return rewriteValueAMD64_OpAMD64ANDQmodify(v)
+ case OpAMD64BSFQ:
+ return rewriteValueAMD64_OpAMD64BSFQ(v)
+ case OpAMD64BSWAPL:
+ return rewriteValueAMD64_OpAMD64BSWAPL(v)
+ case OpAMD64BSWAPQ:
+ return rewriteValueAMD64_OpAMD64BSWAPQ(v)
+ case OpAMD64BTCLconst:
+ return rewriteValueAMD64_OpAMD64BTCLconst(v)
+ case OpAMD64BTCQconst:
+ return rewriteValueAMD64_OpAMD64BTCQconst(v)
+ case OpAMD64BTLconst:
+ return rewriteValueAMD64_OpAMD64BTLconst(v)
+ case OpAMD64BTQconst:
+ return rewriteValueAMD64_OpAMD64BTQconst(v)
+ case OpAMD64BTRLconst:
+ return rewriteValueAMD64_OpAMD64BTRLconst(v)
+ case OpAMD64BTRQconst:
+ return rewriteValueAMD64_OpAMD64BTRQconst(v)
+ case OpAMD64BTSLconst:
+ return rewriteValueAMD64_OpAMD64BTSLconst(v)
+ case OpAMD64BTSQconst:
+ return rewriteValueAMD64_OpAMD64BTSQconst(v)
+ case OpAMD64CMOVLCC:
+ return rewriteValueAMD64_OpAMD64CMOVLCC(v)
+ case OpAMD64CMOVLCS:
+ return rewriteValueAMD64_OpAMD64CMOVLCS(v)
+ case OpAMD64CMOVLEQ:
+ return rewriteValueAMD64_OpAMD64CMOVLEQ(v)
+ case OpAMD64CMOVLGE:
+ return rewriteValueAMD64_OpAMD64CMOVLGE(v)
+ case OpAMD64CMOVLGT:
+ return rewriteValueAMD64_OpAMD64CMOVLGT(v)
+ case OpAMD64CMOVLHI:
+ return rewriteValueAMD64_OpAMD64CMOVLHI(v)
+ case OpAMD64CMOVLLE:
+ return rewriteValueAMD64_OpAMD64CMOVLLE(v)
+ case OpAMD64CMOVLLS:
+ return rewriteValueAMD64_OpAMD64CMOVLLS(v)
+ case OpAMD64CMOVLLT:
+ return rewriteValueAMD64_OpAMD64CMOVLLT(v)
+ case OpAMD64CMOVLNE:
+ return rewriteValueAMD64_OpAMD64CMOVLNE(v)
+ case OpAMD64CMOVQCC:
+ return rewriteValueAMD64_OpAMD64CMOVQCC(v)
+ case OpAMD64CMOVQCS:
+ return rewriteValueAMD64_OpAMD64CMOVQCS(v)
+ case OpAMD64CMOVQEQ:
+ return rewriteValueAMD64_OpAMD64CMOVQEQ(v)
+ case OpAMD64CMOVQGE:
+ return rewriteValueAMD64_OpAMD64CMOVQGE(v)
+ case OpAMD64CMOVQGT:
+ return rewriteValueAMD64_OpAMD64CMOVQGT(v)
+ case OpAMD64CMOVQHI:
+ return rewriteValueAMD64_OpAMD64CMOVQHI(v)
+ case OpAMD64CMOVQLE:
+ return rewriteValueAMD64_OpAMD64CMOVQLE(v)
+ case OpAMD64CMOVQLS:
+ return rewriteValueAMD64_OpAMD64CMOVQLS(v)
+ case OpAMD64CMOVQLT:
+ return rewriteValueAMD64_OpAMD64CMOVQLT(v)
+ case OpAMD64CMOVQNE:
+ return rewriteValueAMD64_OpAMD64CMOVQNE(v)
+ case OpAMD64CMOVWCC:
+ return rewriteValueAMD64_OpAMD64CMOVWCC(v)
+ case OpAMD64CMOVWCS:
+ return rewriteValueAMD64_OpAMD64CMOVWCS(v)
+ case OpAMD64CMOVWEQ:
+ return rewriteValueAMD64_OpAMD64CMOVWEQ(v)
+ case OpAMD64CMOVWGE:
+ return rewriteValueAMD64_OpAMD64CMOVWGE(v)
+ case OpAMD64CMOVWGT:
+ return rewriteValueAMD64_OpAMD64CMOVWGT(v)
+ case OpAMD64CMOVWHI:
+ return rewriteValueAMD64_OpAMD64CMOVWHI(v)
+ case OpAMD64CMOVWLE:
+ return rewriteValueAMD64_OpAMD64CMOVWLE(v)
+ case OpAMD64CMOVWLS:
+ return rewriteValueAMD64_OpAMD64CMOVWLS(v)
+ case OpAMD64CMOVWLT:
+ return rewriteValueAMD64_OpAMD64CMOVWLT(v)
+ case OpAMD64CMOVWNE:
+ return rewriteValueAMD64_OpAMD64CMOVWNE(v)
+ case OpAMD64CMPB:
+ return rewriteValueAMD64_OpAMD64CMPB(v)
+ case OpAMD64CMPBconst:
+ return rewriteValueAMD64_OpAMD64CMPBconst(v)
+ case OpAMD64CMPBconstload:
+ return rewriteValueAMD64_OpAMD64CMPBconstload(v)
+ case OpAMD64CMPBload:
+ return rewriteValueAMD64_OpAMD64CMPBload(v)
+ case OpAMD64CMPL:
+ return rewriteValueAMD64_OpAMD64CMPL(v)
+ case OpAMD64CMPLconst:
+ return rewriteValueAMD64_OpAMD64CMPLconst(v)
+ case OpAMD64CMPLconstload:
+ return rewriteValueAMD64_OpAMD64CMPLconstload(v)
+ case OpAMD64CMPLload:
+ return rewriteValueAMD64_OpAMD64CMPLload(v)
+ case OpAMD64CMPQ:
+ return rewriteValueAMD64_OpAMD64CMPQ(v)
+ case OpAMD64CMPQconst:
+ return rewriteValueAMD64_OpAMD64CMPQconst(v)
+ case OpAMD64CMPQconstload:
+ return rewriteValueAMD64_OpAMD64CMPQconstload(v)
+ case OpAMD64CMPQload:
+ return rewriteValueAMD64_OpAMD64CMPQload(v)
+ case OpAMD64CMPW:
+ return rewriteValueAMD64_OpAMD64CMPW(v)
+ case OpAMD64CMPWconst:
+ return rewriteValueAMD64_OpAMD64CMPWconst(v)
+ case OpAMD64CMPWconstload:
+ return rewriteValueAMD64_OpAMD64CMPWconstload(v)
+ case OpAMD64CMPWload:
+ return rewriteValueAMD64_OpAMD64CMPWload(v)
+ case OpAMD64CMPXCHGLlock:
+ return rewriteValueAMD64_OpAMD64CMPXCHGLlock(v)
+ case OpAMD64CMPXCHGQlock:
+ return rewriteValueAMD64_OpAMD64CMPXCHGQlock(v)
+ case OpAMD64DIVSD:
+ return rewriteValueAMD64_OpAMD64DIVSD(v)
+ case OpAMD64DIVSDload:
+ return rewriteValueAMD64_OpAMD64DIVSDload(v)
+ case OpAMD64DIVSS:
+ return rewriteValueAMD64_OpAMD64DIVSS(v)
+ case OpAMD64DIVSSload:
+ return rewriteValueAMD64_OpAMD64DIVSSload(v)
+ case OpAMD64HMULL:
+ return rewriteValueAMD64_OpAMD64HMULL(v)
+ case OpAMD64HMULLU:
+ return rewriteValueAMD64_OpAMD64HMULLU(v)
+ case OpAMD64HMULQ:
+ return rewriteValueAMD64_OpAMD64HMULQ(v)
+ case OpAMD64HMULQU:
+ return rewriteValueAMD64_OpAMD64HMULQU(v)
+ case OpAMD64LEAL:
+ return rewriteValueAMD64_OpAMD64LEAL(v)
+ case OpAMD64LEAL1:
+ return rewriteValueAMD64_OpAMD64LEAL1(v)
+ case OpAMD64LEAL2:
+ return rewriteValueAMD64_OpAMD64LEAL2(v)
+ case OpAMD64LEAL4:
+ return rewriteValueAMD64_OpAMD64LEAL4(v)
+ case OpAMD64LEAL8:
+ return rewriteValueAMD64_OpAMD64LEAL8(v)
+ case OpAMD64LEAQ:
+ return rewriteValueAMD64_OpAMD64LEAQ(v)
+ case OpAMD64LEAQ1:
+ return rewriteValueAMD64_OpAMD64LEAQ1(v)
+ case OpAMD64LEAQ2:
+ return rewriteValueAMD64_OpAMD64LEAQ2(v)
+ case OpAMD64LEAQ4:
+ return rewriteValueAMD64_OpAMD64LEAQ4(v)
+ case OpAMD64LEAQ8:
+ return rewriteValueAMD64_OpAMD64LEAQ8(v)
+ case OpAMD64MOVBELstore:
+ return rewriteValueAMD64_OpAMD64MOVBELstore(v)
+ case OpAMD64MOVBEQstore:
+ return rewriteValueAMD64_OpAMD64MOVBEQstore(v)
+ case OpAMD64MOVBQSX:
+ return rewriteValueAMD64_OpAMD64MOVBQSX(v)
+ case OpAMD64MOVBQSXload:
+ return rewriteValueAMD64_OpAMD64MOVBQSXload(v)
+ case OpAMD64MOVBQZX:
+ return rewriteValueAMD64_OpAMD64MOVBQZX(v)
+ case OpAMD64MOVBatomicload:
+ return rewriteValueAMD64_OpAMD64MOVBatomicload(v)
+ case OpAMD64MOVBload:
+ return rewriteValueAMD64_OpAMD64MOVBload(v)
+ case OpAMD64MOVBstore:
+ return rewriteValueAMD64_OpAMD64MOVBstore(v)
+ case OpAMD64MOVBstoreconst:
+ return rewriteValueAMD64_OpAMD64MOVBstoreconst(v)
+ case OpAMD64MOVLQSX:
+ return rewriteValueAMD64_OpAMD64MOVLQSX(v)
+ case OpAMD64MOVLQSXload:
+ return rewriteValueAMD64_OpAMD64MOVLQSXload(v)
+ case OpAMD64MOVLQZX:
+ return rewriteValueAMD64_OpAMD64MOVLQZX(v)
+ case OpAMD64MOVLatomicload:
+ return rewriteValueAMD64_OpAMD64MOVLatomicload(v)
+ case OpAMD64MOVLf2i:
+ return rewriteValueAMD64_OpAMD64MOVLf2i(v)
+ case OpAMD64MOVLi2f:
+ return rewriteValueAMD64_OpAMD64MOVLi2f(v)
+ case OpAMD64MOVLload:
+ return rewriteValueAMD64_OpAMD64MOVLload(v)
+ case OpAMD64MOVLstore:
+ return rewriteValueAMD64_OpAMD64MOVLstore(v)
+ case OpAMD64MOVLstoreconst:
+ return rewriteValueAMD64_OpAMD64MOVLstoreconst(v)
+ case OpAMD64MOVOload:
+ return rewriteValueAMD64_OpAMD64MOVOload(v)
+ case OpAMD64MOVOstore:
+ return rewriteValueAMD64_OpAMD64MOVOstore(v)
+ case OpAMD64MOVOstoreconst:
+ return rewriteValueAMD64_OpAMD64MOVOstoreconst(v)
+ case OpAMD64MOVQatomicload:
+ return rewriteValueAMD64_OpAMD64MOVQatomicload(v)
+ case OpAMD64MOVQf2i:
+ return rewriteValueAMD64_OpAMD64MOVQf2i(v)
+ case OpAMD64MOVQi2f:
+ return rewriteValueAMD64_OpAMD64MOVQi2f(v)
+ case OpAMD64MOVQload:
+ return rewriteValueAMD64_OpAMD64MOVQload(v)
+ case OpAMD64MOVQstore:
+ return rewriteValueAMD64_OpAMD64MOVQstore(v)
+ case OpAMD64MOVQstoreconst:
+ return rewriteValueAMD64_OpAMD64MOVQstoreconst(v)
+ case OpAMD64MOVSDload:
+ return rewriteValueAMD64_OpAMD64MOVSDload(v)
+ case OpAMD64MOVSDstore:
+ return rewriteValueAMD64_OpAMD64MOVSDstore(v)
+ case OpAMD64MOVSSload:
+ return rewriteValueAMD64_OpAMD64MOVSSload(v)
+ case OpAMD64MOVSSstore:
+ return rewriteValueAMD64_OpAMD64MOVSSstore(v)
+ case OpAMD64MOVWQSX:
+ return rewriteValueAMD64_OpAMD64MOVWQSX(v)
+ case OpAMD64MOVWQSXload:
+ return rewriteValueAMD64_OpAMD64MOVWQSXload(v)
+ case OpAMD64MOVWQZX:
+ return rewriteValueAMD64_OpAMD64MOVWQZX(v)
+ case OpAMD64MOVWload:
+ return rewriteValueAMD64_OpAMD64MOVWload(v)
+ case OpAMD64MOVWstore:
+ return rewriteValueAMD64_OpAMD64MOVWstore(v)
+ case OpAMD64MOVWstoreconst:
+ return rewriteValueAMD64_OpAMD64MOVWstoreconst(v)
+ case OpAMD64MULL:
+ return rewriteValueAMD64_OpAMD64MULL(v)
+ case OpAMD64MULLconst:
+ return rewriteValueAMD64_OpAMD64MULLconst(v)
+ case OpAMD64MULQ:
+ return rewriteValueAMD64_OpAMD64MULQ(v)
+ case OpAMD64MULQconst:
+ return rewriteValueAMD64_OpAMD64MULQconst(v)
+ case OpAMD64MULSD:
+ return rewriteValueAMD64_OpAMD64MULSD(v)
+ case OpAMD64MULSDload:
+ return rewriteValueAMD64_OpAMD64MULSDload(v)
+ case OpAMD64MULSS:
+ return rewriteValueAMD64_OpAMD64MULSS(v)
+ case OpAMD64MULSSload:
+ return rewriteValueAMD64_OpAMD64MULSSload(v)
+ case OpAMD64NEGL:
+ return rewriteValueAMD64_OpAMD64NEGL(v)
+ case OpAMD64NEGQ:
+ return rewriteValueAMD64_OpAMD64NEGQ(v)
+ case OpAMD64NOTL:
+ return rewriteValueAMD64_OpAMD64NOTL(v)
+ case OpAMD64NOTQ:
+ return rewriteValueAMD64_OpAMD64NOTQ(v)
+ case OpAMD64ORL:
+ return rewriteValueAMD64_OpAMD64ORL(v)
+ case OpAMD64ORLconst:
+ return rewriteValueAMD64_OpAMD64ORLconst(v)
+ case OpAMD64ORLconstmodify:
+ return rewriteValueAMD64_OpAMD64ORLconstmodify(v)
+ case OpAMD64ORLload:
+ return rewriteValueAMD64_OpAMD64ORLload(v)
+ case OpAMD64ORLmodify:
+ return rewriteValueAMD64_OpAMD64ORLmodify(v)
+ case OpAMD64ORQ:
+ return rewriteValueAMD64_OpAMD64ORQ(v)
+ case OpAMD64ORQconst:
+ return rewriteValueAMD64_OpAMD64ORQconst(v)
+ case OpAMD64ORQconstmodify:
+ return rewriteValueAMD64_OpAMD64ORQconstmodify(v)
+ case OpAMD64ORQload:
+ return rewriteValueAMD64_OpAMD64ORQload(v)
+ case OpAMD64ORQmodify:
+ return rewriteValueAMD64_OpAMD64ORQmodify(v)
+ case OpAMD64ROLB:
+ return rewriteValueAMD64_OpAMD64ROLB(v)
+ case OpAMD64ROLBconst:
+ return rewriteValueAMD64_OpAMD64ROLBconst(v)
+ case OpAMD64ROLL:
+ return rewriteValueAMD64_OpAMD64ROLL(v)
+ case OpAMD64ROLLconst:
+ return rewriteValueAMD64_OpAMD64ROLLconst(v)
+ case OpAMD64ROLQ:
+ return rewriteValueAMD64_OpAMD64ROLQ(v)
+ case OpAMD64ROLQconst:
+ return rewriteValueAMD64_OpAMD64ROLQconst(v)
+ case OpAMD64ROLW:
+ return rewriteValueAMD64_OpAMD64ROLW(v)
+ case OpAMD64ROLWconst:
+ return rewriteValueAMD64_OpAMD64ROLWconst(v)
+ case OpAMD64RORB:
+ return rewriteValueAMD64_OpAMD64RORB(v)
+ case OpAMD64RORL:
+ return rewriteValueAMD64_OpAMD64RORL(v)
+ case OpAMD64RORQ:
+ return rewriteValueAMD64_OpAMD64RORQ(v)
+ case OpAMD64RORW:
+ return rewriteValueAMD64_OpAMD64RORW(v)
+ case OpAMD64SARB:
+ return rewriteValueAMD64_OpAMD64SARB(v)
+ case OpAMD64SARBconst:
+ return rewriteValueAMD64_OpAMD64SARBconst(v)
+ case OpAMD64SARL:
+ return rewriteValueAMD64_OpAMD64SARL(v)
+ case OpAMD64SARLconst:
+ return rewriteValueAMD64_OpAMD64SARLconst(v)
+ case OpAMD64SARQ:
+ return rewriteValueAMD64_OpAMD64SARQ(v)
+ case OpAMD64SARQconst:
+ return rewriteValueAMD64_OpAMD64SARQconst(v)
+ case OpAMD64SARW:
+ return rewriteValueAMD64_OpAMD64SARW(v)
+ case OpAMD64SARWconst:
+ return rewriteValueAMD64_OpAMD64SARWconst(v)
+ case OpAMD64SBBLcarrymask:
+ return rewriteValueAMD64_OpAMD64SBBLcarrymask(v)
+ case OpAMD64SBBQ:
+ return rewriteValueAMD64_OpAMD64SBBQ(v)
+ case OpAMD64SBBQcarrymask:
+ return rewriteValueAMD64_OpAMD64SBBQcarrymask(v)
+ case OpAMD64SBBQconst:
+ return rewriteValueAMD64_OpAMD64SBBQconst(v)
+ case OpAMD64SETA:
+ return rewriteValueAMD64_OpAMD64SETA(v)
+ case OpAMD64SETAE:
+ return rewriteValueAMD64_OpAMD64SETAE(v)
+ case OpAMD64SETAEstore:
+ return rewriteValueAMD64_OpAMD64SETAEstore(v)
+ case OpAMD64SETAstore:
+ return rewriteValueAMD64_OpAMD64SETAstore(v)
+ case OpAMD64SETB:
+ return rewriteValueAMD64_OpAMD64SETB(v)
+ case OpAMD64SETBE:
+ return rewriteValueAMD64_OpAMD64SETBE(v)
+ case OpAMD64SETBEstore:
+ return rewriteValueAMD64_OpAMD64SETBEstore(v)
+ case OpAMD64SETBstore:
+ return rewriteValueAMD64_OpAMD64SETBstore(v)
+ case OpAMD64SETEQ:
+ return rewriteValueAMD64_OpAMD64SETEQ(v)
+ case OpAMD64SETEQstore:
+ return rewriteValueAMD64_OpAMD64SETEQstore(v)
+ case OpAMD64SETG:
+ return rewriteValueAMD64_OpAMD64SETG(v)
+ case OpAMD64SETGE:
+ return rewriteValueAMD64_OpAMD64SETGE(v)
+ case OpAMD64SETGEstore:
+ return rewriteValueAMD64_OpAMD64SETGEstore(v)
+ case OpAMD64SETGstore:
+ return rewriteValueAMD64_OpAMD64SETGstore(v)
+ case OpAMD64SETL:
+ return rewriteValueAMD64_OpAMD64SETL(v)
+ case OpAMD64SETLE:
+ return rewriteValueAMD64_OpAMD64SETLE(v)
+ case OpAMD64SETLEstore:
+ return rewriteValueAMD64_OpAMD64SETLEstore(v)
+ case OpAMD64SETLstore:
+ return rewriteValueAMD64_OpAMD64SETLstore(v)
+ case OpAMD64SETNE:
+ return rewriteValueAMD64_OpAMD64SETNE(v)
+ case OpAMD64SETNEstore:
+ return rewriteValueAMD64_OpAMD64SETNEstore(v)
+ case OpAMD64SHLL:
+ return rewriteValueAMD64_OpAMD64SHLL(v)
+ case OpAMD64SHLLconst:
+ return rewriteValueAMD64_OpAMD64SHLLconst(v)
+ case OpAMD64SHLQ:
+ return rewriteValueAMD64_OpAMD64SHLQ(v)
+ case OpAMD64SHLQconst:
+ return rewriteValueAMD64_OpAMD64SHLQconst(v)
+ case OpAMD64SHRB:
+ return rewriteValueAMD64_OpAMD64SHRB(v)
+ case OpAMD64SHRBconst:
+ return rewriteValueAMD64_OpAMD64SHRBconst(v)
+ case OpAMD64SHRL:
+ return rewriteValueAMD64_OpAMD64SHRL(v)
+ case OpAMD64SHRLconst:
+ return rewriteValueAMD64_OpAMD64SHRLconst(v)
+ case OpAMD64SHRQ:
+ return rewriteValueAMD64_OpAMD64SHRQ(v)
+ case OpAMD64SHRQconst:
+ return rewriteValueAMD64_OpAMD64SHRQconst(v)
+ case OpAMD64SHRW:
+ return rewriteValueAMD64_OpAMD64SHRW(v)
+ case OpAMD64SHRWconst:
+ return rewriteValueAMD64_OpAMD64SHRWconst(v)
+ case OpAMD64SUBL:
+ return rewriteValueAMD64_OpAMD64SUBL(v)
+ case OpAMD64SUBLconst:
+ return rewriteValueAMD64_OpAMD64SUBLconst(v)
+ case OpAMD64SUBLload:
+ return rewriteValueAMD64_OpAMD64SUBLload(v)
+ case OpAMD64SUBLmodify:
+ return rewriteValueAMD64_OpAMD64SUBLmodify(v)
+ case OpAMD64SUBQ:
+ return rewriteValueAMD64_OpAMD64SUBQ(v)
+ case OpAMD64SUBQborrow:
+ return rewriteValueAMD64_OpAMD64SUBQborrow(v)
+ case OpAMD64SUBQconst:
+ return rewriteValueAMD64_OpAMD64SUBQconst(v)
+ case OpAMD64SUBQload:
+ return rewriteValueAMD64_OpAMD64SUBQload(v)
+ case OpAMD64SUBQmodify:
+ return rewriteValueAMD64_OpAMD64SUBQmodify(v)
+ case OpAMD64SUBSD:
+ return rewriteValueAMD64_OpAMD64SUBSD(v)
+ case OpAMD64SUBSDload:
+ return rewriteValueAMD64_OpAMD64SUBSDload(v)
+ case OpAMD64SUBSS:
+ return rewriteValueAMD64_OpAMD64SUBSS(v)
+ case OpAMD64SUBSSload:
+ return rewriteValueAMD64_OpAMD64SUBSSload(v)
+ case OpAMD64TESTB:
+ return rewriteValueAMD64_OpAMD64TESTB(v)
+ case OpAMD64TESTBconst:
+ return rewriteValueAMD64_OpAMD64TESTBconst(v)
+ case OpAMD64TESTL:
+ return rewriteValueAMD64_OpAMD64TESTL(v)
+ case OpAMD64TESTLconst:
+ return rewriteValueAMD64_OpAMD64TESTLconst(v)
+ case OpAMD64TESTQ:
+ return rewriteValueAMD64_OpAMD64TESTQ(v)
+ case OpAMD64TESTQconst:
+ return rewriteValueAMD64_OpAMD64TESTQconst(v)
+ case OpAMD64TESTW:
+ return rewriteValueAMD64_OpAMD64TESTW(v)
+ case OpAMD64TESTWconst:
+ return rewriteValueAMD64_OpAMD64TESTWconst(v)
+ case OpAMD64XADDLlock:
+ return rewriteValueAMD64_OpAMD64XADDLlock(v)
+ case OpAMD64XADDQlock:
+ return rewriteValueAMD64_OpAMD64XADDQlock(v)
+ case OpAMD64XCHGL:
+ return rewriteValueAMD64_OpAMD64XCHGL(v)
+ case OpAMD64XCHGQ:
+ return rewriteValueAMD64_OpAMD64XCHGQ(v)
+ case OpAMD64XORL:
+ return rewriteValueAMD64_OpAMD64XORL(v)
+ case OpAMD64XORLconst:
+ return rewriteValueAMD64_OpAMD64XORLconst(v)
+ case OpAMD64XORLconstmodify:
+ return rewriteValueAMD64_OpAMD64XORLconstmodify(v)
+ case OpAMD64XORLload:
+ return rewriteValueAMD64_OpAMD64XORLload(v)
+ case OpAMD64XORLmodify:
+ return rewriteValueAMD64_OpAMD64XORLmodify(v)
+ case OpAMD64XORQ:
+ return rewriteValueAMD64_OpAMD64XORQ(v)
+ case OpAMD64XORQconst:
+ return rewriteValueAMD64_OpAMD64XORQconst(v)
+ case OpAMD64XORQconstmodify:
+ return rewriteValueAMD64_OpAMD64XORQconstmodify(v)
+ case OpAMD64XORQload:
+ return rewriteValueAMD64_OpAMD64XORQload(v)
+ case OpAMD64XORQmodify:
+ return rewriteValueAMD64_OpAMD64XORQmodify(v)
+ case OpAdd16:
+ v.Op = OpAMD64ADDL
+ return true
+ case OpAdd32:
+ v.Op = OpAMD64ADDL
+ return true
+ case OpAdd32F:
+ v.Op = OpAMD64ADDSS
+ return true
+ case OpAdd64:
+ v.Op = OpAMD64ADDQ
+ return true
+ case OpAdd64F:
+ v.Op = OpAMD64ADDSD
+ return true
+ case OpAdd8:
+ v.Op = OpAMD64ADDL
+ return true
+ case OpAddPtr:
+ v.Op = OpAMD64ADDQ
+ return true
+ case OpAddr:
+ return rewriteValueAMD64_OpAddr(v)
+ case OpAnd16:
+ v.Op = OpAMD64ANDL
+ return true
+ case OpAnd32:
+ v.Op = OpAMD64ANDL
+ return true
+ case OpAnd64:
+ v.Op = OpAMD64ANDQ
+ return true
+ case OpAnd8:
+ v.Op = OpAMD64ANDL
+ return true
+ case OpAndB:
+ v.Op = OpAMD64ANDL
+ return true
+ case OpAtomicAdd32:
+ return rewriteValueAMD64_OpAtomicAdd32(v)
+ case OpAtomicAdd64:
+ return rewriteValueAMD64_OpAtomicAdd64(v)
+ case OpAtomicAnd32:
+ return rewriteValueAMD64_OpAtomicAnd32(v)
+ case OpAtomicAnd8:
+ return rewriteValueAMD64_OpAtomicAnd8(v)
+ case OpAtomicCompareAndSwap32:
+ return rewriteValueAMD64_OpAtomicCompareAndSwap32(v)
+ case OpAtomicCompareAndSwap64:
+ return rewriteValueAMD64_OpAtomicCompareAndSwap64(v)
+ case OpAtomicExchange32:
+ return rewriteValueAMD64_OpAtomicExchange32(v)
+ case OpAtomicExchange64:
+ return rewriteValueAMD64_OpAtomicExchange64(v)
+ case OpAtomicLoad32:
+ return rewriteValueAMD64_OpAtomicLoad32(v)
+ case OpAtomicLoad64:
+ return rewriteValueAMD64_OpAtomicLoad64(v)
+ case OpAtomicLoad8:
+ return rewriteValueAMD64_OpAtomicLoad8(v)
+ case OpAtomicLoadPtr:
+ return rewriteValueAMD64_OpAtomicLoadPtr(v)
+ case OpAtomicOr32:
+ return rewriteValueAMD64_OpAtomicOr32(v)
+ case OpAtomicOr8:
+ return rewriteValueAMD64_OpAtomicOr8(v)
+ case OpAtomicStore32:
+ return rewriteValueAMD64_OpAtomicStore32(v)
+ case OpAtomicStore64:
+ return rewriteValueAMD64_OpAtomicStore64(v)
+ case OpAtomicStore8:
+ return rewriteValueAMD64_OpAtomicStore8(v)
+ case OpAtomicStorePtrNoWB:
+ return rewriteValueAMD64_OpAtomicStorePtrNoWB(v)
+ case OpAvg64u:
+ v.Op = OpAMD64AVGQU
+ return true
+ case OpBitLen16:
+ return rewriteValueAMD64_OpBitLen16(v)
+ case OpBitLen32:
+ return rewriteValueAMD64_OpBitLen32(v)
+ case OpBitLen64:
+ return rewriteValueAMD64_OpBitLen64(v)
+ case OpBitLen8:
+ return rewriteValueAMD64_OpBitLen8(v)
+ case OpBswap32:
+ v.Op = OpAMD64BSWAPL
+ return true
+ case OpBswap64:
+ v.Op = OpAMD64BSWAPQ
+ return true
+ case OpCeil:
+ return rewriteValueAMD64_OpCeil(v)
+ case OpClosureCall:
+ v.Op = OpAMD64CALLclosure
+ return true
+ case OpCom16:
+ v.Op = OpAMD64NOTL
+ return true
+ case OpCom32:
+ v.Op = OpAMD64NOTL
+ return true
+ case OpCom64:
+ v.Op = OpAMD64NOTQ
+ return true
+ case OpCom8:
+ v.Op = OpAMD64NOTL
+ return true
+ case OpCondSelect:
+ return rewriteValueAMD64_OpCondSelect(v)
+ case OpConst16:
+ return rewriteValueAMD64_OpConst16(v)
+ case OpConst32:
+ v.Op = OpAMD64MOVLconst
+ return true
+ case OpConst32F:
+ v.Op = OpAMD64MOVSSconst
+ return true
+ case OpConst64:
+ v.Op = OpAMD64MOVQconst
+ return true
+ case OpConst64F:
+ v.Op = OpAMD64MOVSDconst
+ return true
+ case OpConst8:
+ return rewriteValueAMD64_OpConst8(v)
+ case OpConstBool:
+ return rewriteValueAMD64_OpConstBool(v)
+ case OpConstNil:
+ return rewriteValueAMD64_OpConstNil(v)
+ case OpCtz16:
+ return rewriteValueAMD64_OpCtz16(v)
+ case OpCtz16NonZero:
+ return rewriteValueAMD64_OpCtz16NonZero(v)
+ case OpCtz32:
+ return rewriteValueAMD64_OpCtz32(v)
+ case OpCtz32NonZero:
+ return rewriteValueAMD64_OpCtz32NonZero(v)
+ case OpCtz64:
+ return rewriteValueAMD64_OpCtz64(v)
+ case OpCtz64NonZero:
+ return rewriteValueAMD64_OpCtz64NonZero(v)
+ case OpCtz8:
+ return rewriteValueAMD64_OpCtz8(v)
+ case OpCtz8NonZero:
+ return rewriteValueAMD64_OpCtz8NonZero(v)
+ case OpCvt32Fto32:
+ v.Op = OpAMD64CVTTSS2SL
+ return true
+ case OpCvt32Fto64:
+ v.Op = OpAMD64CVTTSS2SQ
+ return true
+ case OpCvt32Fto64F:
+ v.Op = OpAMD64CVTSS2SD
+ return true
+ case OpCvt32to32F:
+ v.Op = OpAMD64CVTSL2SS
+ return true
+ case OpCvt32to64F:
+ v.Op = OpAMD64CVTSL2SD
+ return true
+ case OpCvt64Fto32:
+ v.Op = OpAMD64CVTTSD2SL
+ return true
+ case OpCvt64Fto32F:
+ v.Op = OpAMD64CVTSD2SS
+ return true
+ case OpCvt64Fto64:
+ v.Op = OpAMD64CVTTSD2SQ
+ return true
+ case OpCvt64to32F:
+ v.Op = OpAMD64CVTSQ2SS
+ return true
+ case OpCvt64to64F:
+ v.Op = OpAMD64CVTSQ2SD
+ return true
+ case OpCvtBoolToUint8:
+ v.Op = OpCopy
+ return true
+ case OpDiv128u:
+ v.Op = OpAMD64DIVQU2
+ return true
+ case OpDiv16:
+ return rewriteValueAMD64_OpDiv16(v)
+ case OpDiv16u:
+ return rewriteValueAMD64_OpDiv16u(v)
+ case OpDiv32:
+ return rewriteValueAMD64_OpDiv32(v)
+ case OpDiv32F:
+ v.Op = OpAMD64DIVSS
+ return true
+ case OpDiv32u:
+ return rewriteValueAMD64_OpDiv32u(v)
+ case OpDiv64:
+ return rewriteValueAMD64_OpDiv64(v)
+ case OpDiv64F:
+ v.Op = OpAMD64DIVSD
+ return true
+ case OpDiv64u:
+ return rewriteValueAMD64_OpDiv64u(v)
+ case OpDiv8:
+ return rewriteValueAMD64_OpDiv8(v)
+ case OpDiv8u:
+ return rewriteValueAMD64_OpDiv8u(v)
+ case OpEq16:
+ return rewriteValueAMD64_OpEq16(v)
+ case OpEq32:
+ return rewriteValueAMD64_OpEq32(v)
+ case OpEq32F:
+ return rewriteValueAMD64_OpEq32F(v)
+ case OpEq64:
+ return rewriteValueAMD64_OpEq64(v)
+ case OpEq64F:
+ return rewriteValueAMD64_OpEq64F(v)
+ case OpEq8:
+ return rewriteValueAMD64_OpEq8(v)
+ case OpEqB:
+ return rewriteValueAMD64_OpEqB(v)
+ case OpEqPtr:
+ return rewriteValueAMD64_OpEqPtr(v)
+ case OpFMA:
+ return rewriteValueAMD64_OpFMA(v)
+ case OpFloor:
+ return rewriteValueAMD64_OpFloor(v)
+ case OpGetCallerPC:
+ v.Op = OpAMD64LoweredGetCallerPC
+ return true
+ case OpGetCallerSP:
+ v.Op = OpAMD64LoweredGetCallerSP
+ return true
+ case OpGetClosurePtr:
+ v.Op = OpAMD64LoweredGetClosurePtr
+ return true
+ case OpGetG:
+ return rewriteValueAMD64_OpGetG(v)
+ case OpHasCPUFeature:
+ return rewriteValueAMD64_OpHasCPUFeature(v)
+ case OpHmul32:
+ v.Op = OpAMD64HMULL
+ return true
+ case OpHmul32u:
+ v.Op = OpAMD64HMULLU
+ return true
+ case OpHmul64:
+ v.Op = OpAMD64HMULQ
+ return true
+ case OpHmul64u:
+ v.Op = OpAMD64HMULQU
+ return true
+ case OpInterCall:
+ v.Op = OpAMD64CALLinter
+ return true
+ case OpIsInBounds:
+ return rewriteValueAMD64_OpIsInBounds(v)
+ case OpIsNonNil:
+ return rewriteValueAMD64_OpIsNonNil(v)
+ case OpIsSliceInBounds:
+ return rewriteValueAMD64_OpIsSliceInBounds(v)
+ case OpLeq16:
+ return rewriteValueAMD64_OpLeq16(v)
+ case OpLeq16U:
+ return rewriteValueAMD64_OpLeq16U(v)
+ case OpLeq32:
+ return rewriteValueAMD64_OpLeq32(v)
+ case OpLeq32F:
+ return rewriteValueAMD64_OpLeq32F(v)
+ case OpLeq32U:
+ return rewriteValueAMD64_OpLeq32U(v)
+ case OpLeq64:
+ return rewriteValueAMD64_OpLeq64(v)
+ case OpLeq64F:
+ return rewriteValueAMD64_OpLeq64F(v)
+ case OpLeq64U:
+ return rewriteValueAMD64_OpLeq64U(v)
+ case OpLeq8:
+ return rewriteValueAMD64_OpLeq8(v)
+ case OpLeq8U:
+ return rewriteValueAMD64_OpLeq8U(v)
+ case OpLess16:
+ return rewriteValueAMD64_OpLess16(v)
+ case OpLess16U:
+ return rewriteValueAMD64_OpLess16U(v)
+ case OpLess32:
+ return rewriteValueAMD64_OpLess32(v)
+ case OpLess32F:
+ return rewriteValueAMD64_OpLess32F(v)
+ case OpLess32U:
+ return rewriteValueAMD64_OpLess32U(v)
+ case OpLess64:
+ return rewriteValueAMD64_OpLess64(v)
+ case OpLess64F:
+ return rewriteValueAMD64_OpLess64F(v)
+ case OpLess64U:
+ return rewriteValueAMD64_OpLess64U(v)
+ case OpLess8:
+ return rewriteValueAMD64_OpLess8(v)
+ case OpLess8U:
+ return rewriteValueAMD64_OpLess8U(v)
+ case OpLoad:
+ return rewriteValueAMD64_OpLoad(v)
+ case OpLocalAddr:
+ return rewriteValueAMD64_OpLocalAddr(v)
+ case OpLsh16x16:
+ return rewriteValueAMD64_OpLsh16x16(v)
+ case OpLsh16x32:
+ return rewriteValueAMD64_OpLsh16x32(v)
+ case OpLsh16x64:
+ return rewriteValueAMD64_OpLsh16x64(v)
+ case OpLsh16x8:
+ return rewriteValueAMD64_OpLsh16x8(v)
+ case OpLsh32x16:
+ return rewriteValueAMD64_OpLsh32x16(v)
+ case OpLsh32x32:
+ return rewriteValueAMD64_OpLsh32x32(v)
+ case OpLsh32x64:
+ return rewriteValueAMD64_OpLsh32x64(v)
+ case OpLsh32x8:
+ return rewriteValueAMD64_OpLsh32x8(v)
+ case OpLsh64x16:
+ return rewriteValueAMD64_OpLsh64x16(v)
+ case OpLsh64x32:
+ return rewriteValueAMD64_OpLsh64x32(v)
+ case OpLsh64x64:
+ return rewriteValueAMD64_OpLsh64x64(v)
+ case OpLsh64x8:
+ return rewriteValueAMD64_OpLsh64x8(v)
+ case OpLsh8x16:
+ return rewriteValueAMD64_OpLsh8x16(v)
+ case OpLsh8x32:
+ return rewriteValueAMD64_OpLsh8x32(v)
+ case OpLsh8x64:
+ return rewriteValueAMD64_OpLsh8x64(v)
+ case OpLsh8x8:
+ return rewriteValueAMD64_OpLsh8x8(v)
+ case OpMod16:
+ return rewriteValueAMD64_OpMod16(v)
+ case OpMod16u:
+ return rewriteValueAMD64_OpMod16u(v)
+ case OpMod32:
+ return rewriteValueAMD64_OpMod32(v)
+ case OpMod32u:
+ return rewriteValueAMD64_OpMod32u(v)
+ case OpMod64:
+ return rewriteValueAMD64_OpMod64(v)
+ case OpMod64u:
+ return rewriteValueAMD64_OpMod64u(v)
+ case OpMod8:
+ return rewriteValueAMD64_OpMod8(v)
+ case OpMod8u:
+ return rewriteValueAMD64_OpMod8u(v)
+ case OpMove:
+ return rewriteValueAMD64_OpMove(v)
+ case OpMul16:
+ v.Op = OpAMD64MULL
+ return true
+ case OpMul32:
+ v.Op = OpAMD64MULL
+ return true
+ case OpMul32F:
+ v.Op = OpAMD64MULSS
+ return true
+ case OpMul64:
+ v.Op = OpAMD64MULQ
+ return true
+ case OpMul64F:
+ v.Op = OpAMD64MULSD
+ return true
+ case OpMul64uhilo:
+ v.Op = OpAMD64MULQU2
+ return true
+ case OpMul8:
+ v.Op = OpAMD64MULL
+ return true
+ case OpNeg16:
+ v.Op = OpAMD64NEGL
+ return true
+ case OpNeg32:
+ v.Op = OpAMD64NEGL
+ return true
+ case OpNeg32F:
+ return rewriteValueAMD64_OpNeg32F(v)
+ case OpNeg64:
+ v.Op = OpAMD64NEGQ
+ return true
+ case OpNeg64F:
+ return rewriteValueAMD64_OpNeg64F(v)
+ case OpNeg8:
+ v.Op = OpAMD64NEGL
+ return true
+ case OpNeq16:
+ return rewriteValueAMD64_OpNeq16(v)
+ case OpNeq32:
+ return rewriteValueAMD64_OpNeq32(v)
+ case OpNeq32F:
+ return rewriteValueAMD64_OpNeq32F(v)
+ case OpNeq64:
+ return rewriteValueAMD64_OpNeq64(v)
+ case OpNeq64F:
+ return rewriteValueAMD64_OpNeq64F(v)
+ case OpNeq8:
+ return rewriteValueAMD64_OpNeq8(v)
+ case OpNeqB:
+ return rewriteValueAMD64_OpNeqB(v)
+ case OpNeqPtr:
+ return rewriteValueAMD64_OpNeqPtr(v)
+ case OpNilCheck:
+ v.Op = OpAMD64LoweredNilCheck
+ return true
+ case OpNot:
+ return rewriteValueAMD64_OpNot(v)
+ case OpOffPtr:
+ return rewriteValueAMD64_OpOffPtr(v)
+ case OpOr16:
+ v.Op = OpAMD64ORL
+ return true
+ case OpOr32:
+ v.Op = OpAMD64ORL
+ return true
+ case OpOr64:
+ v.Op = OpAMD64ORQ
+ return true
+ case OpOr8:
+ v.Op = OpAMD64ORL
+ return true
+ case OpOrB:
+ v.Op = OpAMD64ORL
+ return true
+ case OpPanicBounds:
+ return rewriteValueAMD64_OpPanicBounds(v)
+ case OpPopCount16:
+ return rewriteValueAMD64_OpPopCount16(v)
+ case OpPopCount32:
+ v.Op = OpAMD64POPCNTL
+ return true
+ case OpPopCount64:
+ v.Op = OpAMD64POPCNTQ
+ return true
+ case OpPopCount8:
+ return rewriteValueAMD64_OpPopCount8(v)
+ case OpPrefetchCache:
+ v.Op = OpAMD64PrefetchT0
+ return true
+ case OpPrefetchCacheStreamed:
+ v.Op = OpAMD64PrefetchNTA
+ return true
+ case OpRotateLeft16:
+ v.Op = OpAMD64ROLW
+ return true
+ case OpRotateLeft32:
+ v.Op = OpAMD64ROLL
+ return true
+ case OpRotateLeft64:
+ v.Op = OpAMD64ROLQ
+ return true
+ case OpRotateLeft8:
+ v.Op = OpAMD64ROLB
+ return true
+ case OpRound32F:
+ v.Op = OpCopy
+ return true
+ case OpRound64F:
+ v.Op = OpCopy
+ return true
+ case OpRoundToEven:
+ return rewriteValueAMD64_OpRoundToEven(v)
+ case OpRsh16Ux16:
+ return rewriteValueAMD64_OpRsh16Ux16(v)
+ case OpRsh16Ux32:
+ return rewriteValueAMD64_OpRsh16Ux32(v)
+ case OpRsh16Ux64:
+ return rewriteValueAMD64_OpRsh16Ux64(v)
+ case OpRsh16Ux8:
+ return rewriteValueAMD64_OpRsh16Ux8(v)
+ case OpRsh16x16:
+ return rewriteValueAMD64_OpRsh16x16(v)
+ case OpRsh16x32:
+ return rewriteValueAMD64_OpRsh16x32(v)
+ case OpRsh16x64:
+ return rewriteValueAMD64_OpRsh16x64(v)
+ case OpRsh16x8:
+ return rewriteValueAMD64_OpRsh16x8(v)
+ case OpRsh32Ux16:
+ return rewriteValueAMD64_OpRsh32Ux16(v)
+ case OpRsh32Ux32:
+ return rewriteValueAMD64_OpRsh32Ux32(v)
+ case OpRsh32Ux64:
+ return rewriteValueAMD64_OpRsh32Ux64(v)
+ case OpRsh32Ux8:
+ return rewriteValueAMD64_OpRsh32Ux8(v)
+ case OpRsh32x16:
+ return rewriteValueAMD64_OpRsh32x16(v)
+ case OpRsh32x32:
+ return rewriteValueAMD64_OpRsh32x32(v)
+ case OpRsh32x64:
+ return rewriteValueAMD64_OpRsh32x64(v)
+ case OpRsh32x8:
+ return rewriteValueAMD64_OpRsh32x8(v)
+ case OpRsh64Ux16:
+ return rewriteValueAMD64_OpRsh64Ux16(v)
+ case OpRsh64Ux32:
+ return rewriteValueAMD64_OpRsh64Ux32(v)
+ case OpRsh64Ux64:
+ return rewriteValueAMD64_OpRsh64Ux64(v)
+ case OpRsh64Ux8:
+ return rewriteValueAMD64_OpRsh64Ux8(v)
+ case OpRsh64x16:
+ return rewriteValueAMD64_OpRsh64x16(v)
+ case OpRsh64x32:
+ return rewriteValueAMD64_OpRsh64x32(v)
+ case OpRsh64x64:
+ return rewriteValueAMD64_OpRsh64x64(v)
+ case OpRsh64x8:
+ return rewriteValueAMD64_OpRsh64x8(v)
+ case OpRsh8Ux16:
+ return rewriteValueAMD64_OpRsh8Ux16(v)
+ case OpRsh8Ux32:
+ return rewriteValueAMD64_OpRsh8Ux32(v)
+ case OpRsh8Ux64:
+ return rewriteValueAMD64_OpRsh8Ux64(v)
+ case OpRsh8Ux8:
+ return rewriteValueAMD64_OpRsh8Ux8(v)
+ case OpRsh8x16:
+ return rewriteValueAMD64_OpRsh8x16(v)
+ case OpRsh8x32:
+ return rewriteValueAMD64_OpRsh8x32(v)
+ case OpRsh8x64:
+ return rewriteValueAMD64_OpRsh8x64(v)
+ case OpRsh8x8:
+ return rewriteValueAMD64_OpRsh8x8(v)
+ case OpSelect0:
+ return rewriteValueAMD64_OpSelect0(v)
+ case OpSelect1:
+ return rewriteValueAMD64_OpSelect1(v)
+ case OpSelectN:
+ return rewriteValueAMD64_OpSelectN(v)
+ case OpSignExt16to32:
+ v.Op = OpAMD64MOVWQSX
+ return true
+ case OpSignExt16to64:
+ v.Op = OpAMD64MOVWQSX
+ return true
+ case OpSignExt32to64:
+ v.Op = OpAMD64MOVLQSX
+ return true
+ case OpSignExt8to16:
+ v.Op = OpAMD64MOVBQSX
+ return true
+ case OpSignExt8to32:
+ v.Op = OpAMD64MOVBQSX
+ return true
+ case OpSignExt8to64:
+ v.Op = OpAMD64MOVBQSX
+ return true
+ case OpSlicemask:
+ return rewriteValueAMD64_OpSlicemask(v)
+ case OpSpectreIndex:
+ return rewriteValueAMD64_OpSpectreIndex(v)
+ case OpSpectreSliceIndex:
+ return rewriteValueAMD64_OpSpectreSliceIndex(v)
+ case OpSqrt:
+ v.Op = OpAMD64SQRTSD
+ return true
+ case OpSqrt32:
+ v.Op = OpAMD64SQRTSS
+ return true
+ case OpStaticCall:
+ v.Op = OpAMD64CALLstatic
+ return true
+ case OpStore:
+ return rewriteValueAMD64_OpStore(v)
+ case OpSub16:
+ v.Op = OpAMD64SUBL
+ return true
+ case OpSub32:
+ v.Op = OpAMD64SUBL
+ return true
+ case OpSub32F:
+ v.Op = OpAMD64SUBSS
+ return true
+ case OpSub64:
+ v.Op = OpAMD64SUBQ
+ return true
+ case OpSub64F:
+ v.Op = OpAMD64SUBSD
+ return true
+ case OpSub8:
+ v.Op = OpAMD64SUBL
+ return true
+ case OpSubPtr:
+ v.Op = OpAMD64SUBQ
+ return true
+ case OpTailCall:
+ v.Op = OpAMD64CALLtail
+ return true
+ case OpTrunc:
+ return rewriteValueAMD64_OpTrunc(v)
+ case OpTrunc16to8:
+ v.Op = OpCopy
+ return true
+ case OpTrunc32to16:
+ v.Op = OpCopy
+ return true
+ case OpTrunc32to8:
+ v.Op = OpCopy
+ return true
+ case OpTrunc64to16:
+ v.Op = OpCopy
+ return true
+ case OpTrunc64to32:
+ v.Op = OpCopy
+ return true
+ case OpTrunc64to8:
+ v.Op = OpCopy
+ return true
+ case OpWB:
+ v.Op = OpAMD64LoweredWB
+ return true
+ case OpXor16:
+ v.Op = OpAMD64XORL
+ return true
+ case OpXor32:
+ v.Op = OpAMD64XORL
+ return true
+ case OpXor64:
+ v.Op = OpAMD64XORQ
+ return true
+ case OpXor8:
+ v.Op = OpAMD64XORL
+ return true
+ case OpZero:
+ return rewriteValueAMD64_OpZero(v)
+ case OpZeroExt16to32:
+ v.Op = OpAMD64MOVWQZX
+ return true
+ case OpZeroExt16to64:
+ v.Op = OpAMD64MOVWQZX
+ return true
+ case OpZeroExt32to64:
+ v.Op = OpAMD64MOVLQZX
+ return true
+ case OpZeroExt8to16:
+ v.Op = OpAMD64MOVBQZX
+ return true
+ case OpZeroExt8to32:
+ v.Op = OpAMD64MOVBQZX
+ return true
+ case OpZeroExt8to64:
+ v.Op = OpAMD64MOVBQZX
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ADCQ(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ADCQ x (MOVQconst [c]) carry)
+ // cond: is32Bit(c)
+ // result: (ADCQconst x [int32(c)] carry)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ carry := v_2
+ if !(is32Bit(c)) {
+ continue
+ }
+ v.reset(OpAMD64ADCQconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(x, carry)
+ return true
+ }
+ break
+ }
+ // match: (ADCQ x y (FlagEQ))
+ // result: (ADDQcarry x y)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.reset(OpAMD64ADDQcarry)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ADCQconst(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ADCQconst x [c] (FlagEQ))
+ // result: (ADDQconstcarry x [c])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.reset(OpAMD64ADDQconstcarry)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ADDL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ADDL x (MOVLconst [c]))
+ // result: (ADDLconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpAMD64ADDLconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ADDL (SHLLconst x [c]) (SHRLconst x [d]))
+ // cond: d==32-c
+ // result: (ROLLconst x [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHLLconst {
+ continue
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ x := v_0.Args[0]
+ if v_1.Op != OpAMD64SHRLconst {
+ continue
+ }
+ d := auxIntToInt8(v_1.AuxInt)
+ if x != v_1.Args[0] || !(d == 32-c) {
+ continue
+ }
+ v.reset(OpAMD64ROLLconst)
+ v.AuxInt = int8ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ADDL <t> (SHLLconst x [c]) (SHRWconst x [d]))
+ // cond: d==16-c && c < 16 && t.Size() == 2
+ // result: (ROLWconst x [c])
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHLLconst {
+ continue
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ x := v_0.Args[0]
+ if v_1.Op != OpAMD64SHRWconst {
+ continue
+ }
+ d := auxIntToInt8(v_1.AuxInt)
+ if x != v_1.Args[0] || !(d == 16-c && c < 16 && t.Size() == 2) {
+ continue
+ }
+ v.reset(OpAMD64ROLWconst)
+ v.AuxInt = int8ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ADDL <t> (SHLLconst x [c]) (SHRBconst x [d]))
+ // cond: d==8-c && c < 8 && t.Size() == 1
+ // result: (ROLBconst x [c])
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHLLconst {
+ continue
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ x := v_0.Args[0]
+ if v_1.Op != OpAMD64SHRBconst {
+ continue
+ }
+ d := auxIntToInt8(v_1.AuxInt)
+ if x != v_1.Args[0] || !(d == 8-c && c < 8 && t.Size() == 1) {
+ continue
+ }
+ v.reset(OpAMD64ROLBconst)
+ v.AuxInt = int8ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ADDL x (SHLLconst [3] y))
+ // result: (LEAL8 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 3 {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64LEAL8)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADDL x (SHLLconst [2] y))
+ // result: (LEAL4 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 2 {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64LEAL4)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADDL x (SHLLconst [1] y))
+ // result: (LEAL2 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 1 {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64LEAL2)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADDL x (ADDL y y))
+ // result: (LEAL2 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64ADDL {
+ continue
+ }
+ y := v_1.Args[1]
+ if y != v_1.Args[0] {
+ continue
+ }
+ v.reset(OpAMD64LEAL2)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADDL x (ADDL x y))
+ // result: (LEAL2 y x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64ADDL {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if x != v_1_0 {
+ continue
+ }
+ y := v_1_1
+ v.reset(OpAMD64LEAL2)
+ v.AddArg2(y, x)
+ return true
+ }
+ }
+ break
+ }
+ // match: (ADDL (ADDLconst [c] x) y)
+ // result: (LEAL1 [c] x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64ADDLconst {
+ continue
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ y := v_1
+ v.reset(OpAMD64LEAL1)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADDL x (LEAL [c] {s} y))
+ // cond: x.Op != OpSB && y.Op != OpSB
+ // result: (LEAL1 [c] {s} x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64LEAL {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ s := auxToSym(v_1.Aux)
+ y := v_1.Args[0]
+ if !(x.Op != OpSB && y.Op != OpSB) {
+ continue
+ }
+ v.reset(OpAMD64LEAL1)
+ v.AuxInt = int32ToAuxInt(c)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADDL x (NEGL y))
+ // result: (SUBL x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64NEGL {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64SUBL)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADDL x l:(MOVLload [off] {sym} ptr mem))
+ // cond: canMergeLoadClobber(v, l, x) && clobber(l)
+ // result: (ADDLload x [off] {sym} ptr mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ l := v_1
+ if l.Op != OpAMD64MOVLload {
+ continue
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
+ continue
+ }
+ v.reset(OpAMD64ADDLload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ADDLconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ADDLconst [c] (ADDL x y))
+ // result: (LEAL1 [c] x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64ADDL {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpAMD64LEAL1)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (ADDLconst [c] (SHLLconst [1] x))
+ // result: (LEAL1 [c] x x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64SHLLconst || auxIntToInt8(v_0.AuxInt) != 1 {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64LEAL1)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, x)
+ return true
+ }
+ // match: (ADDLconst [c] (LEAL [d] {s} x))
+ // cond: is32Bit(int64(c)+int64(d))
+ // result: (LEAL [c+d] {s} x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64LEAL {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ s := auxToSym(v_0.Aux)
+ x := v_0.Args[0]
+ if !(is32Bit(int64(c) + int64(d))) {
+ break
+ }
+ v.reset(OpAMD64LEAL)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDLconst [c] (LEAL1 [d] {s} x y))
+ // cond: is32Bit(int64(c)+int64(d))
+ // result: (LEAL1 [c+d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64LEAL1 {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ s := auxToSym(v_0.Aux)
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if !(is32Bit(int64(c) + int64(d))) {
+ break
+ }
+ v.reset(OpAMD64LEAL1)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (ADDLconst [c] (LEAL2 [d] {s} x y))
+ // cond: is32Bit(int64(c)+int64(d))
+ // result: (LEAL2 [c+d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64LEAL2 {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ s := auxToSym(v_0.Aux)
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if !(is32Bit(int64(c) + int64(d))) {
+ break
+ }
+ v.reset(OpAMD64LEAL2)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (ADDLconst [c] (LEAL4 [d] {s} x y))
+ // cond: is32Bit(int64(c)+int64(d))
+ // result: (LEAL4 [c+d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64LEAL4 {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ s := auxToSym(v_0.Aux)
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if !(is32Bit(int64(c) + int64(d))) {
+ break
+ }
+ v.reset(OpAMD64LEAL4)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (ADDLconst [c] (LEAL8 [d] {s} x y))
+ // cond: is32Bit(int64(c)+int64(d))
+ // result: (LEAL8 [c+d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64LEAL8 {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ s := auxToSym(v_0.Aux)
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if !(is32Bit(int64(c) + int64(d))) {
+ break
+ }
+ v.reset(OpAMD64LEAL8)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (ADDLconst [c] x)
+ // cond: c==0
+ // result: x
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(c == 0) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (ADDLconst [c] (MOVLconst [d]))
+ // result: (MOVLconst [c+d])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(c + d)
+ return true
+ }
+ // match: (ADDLconst [c] (ADDLconst [d] x))
+ // result: (ADDLconst [c+d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64ADDLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpAMD64ADDLconst)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDLconst [off] x:(SP))
+ // result: (LEAL [off] x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if x.Op != OpSP {
+ break
+ }
+ v.reset(OpAMD64LEAL)
+ v.AuxInt = int32ToAuxInt(off)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ADDLconstmodify(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ADDLconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem)
+ // cond: ValAndOff(valoff1).canAdd32(off2)
+ // result: (ADDLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(ValAndOff(valoff1).canAdd32(off2)) {
+ break
+ }
+ v.reset(OpAMD64ADDLconstmodify)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (ADDLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
+ // result: (ADDLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64ADDLconstmodify)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ADDLload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (ADDLload [off1] {sym} val (ADDQconst [off2] base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (ADDLload [off1+off2] {sym} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64ADDLload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (ADDLload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (ADDLload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64ADDLload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (ADDLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _))
+ // result: (ADDL x (MOVLf2i y))
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ ptr := v_1
+ if v_2.Op != OpAMD64MOVSSstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
+ break
+ }
+ y := v_2.Args[1]
+ if ptr != v_2.Args[0] {
+ break
+ }
+ v.reset(OpAMD64ADDL)
+ v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ADDLmodify(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ADDLmodify [off1] {sym} (ADDQconst [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (ADDLmodify [off1+off2] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64ADDLmodify)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (ADDLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (ADDLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64ADDLmodify)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ADDQ(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ADDQ x (MOVQconst [c]))
+ // cond: is32Bit(c)
+ // result: (ADDQconst [int32(c)] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(is32Bit(c)) {
+ continue
+ }
+ v.reset(OpAMD64ADDQconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ADDQ x (MOVLconst [c]))
+ // result: (ADDQconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpAMD64ADDQconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ADDQ (SHLQconst x [c]) (SHRQconst x [d]))
+ // cond: d==64-c
+ // result: (ROLQconst x [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHLQconst {
+ continue
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ x := v_0.Args[0]
+ if v_1.Op != OpAMD64SHRQconst {
+ continue
+ }
+ d := auxIntToInt8(v_1.AuxInt)
+ if x != v_1.Args[0] || !(d == 64-c) {
+ continue
+ }
+ v.reset(OpAMD64ROLQconst)
+ v.AuxInt = int8ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ADDQ x (SHLQconst [3] y))
+ // result: (LEAQ8 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 3 {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64LEAQ8)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADDQ x (SHLQconst [2] y))
+ // result: (LEAQ4 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 2 {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64LEAQ4)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADDQ x (SHLQconst [1] y))
+ // result: (LEAQ2 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 1 {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64LEAQ2)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADDQ x (ADDQ y y))
+ // result: (LEAQ2 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64ADDQ {
+ continue
+ }
+ y := v_1.Args[1]
+ if y != v_1.Args[0] {
+ continue
+ }
+ v.reset(OpAMD64LEAQ2)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADDQ x (ADDQ x y))
+ // result: (LEAQ2 y x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64ADDQ {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if x != v_1_0 {
+ continue
+ }
+ y := v_1_1
+ v.reset(OpAMD64LEAQ2)
+ v.AddArg2(y, x)
+ return true
+ }
+ }
+ break
+ }
+ // match: (ADDQ (ADDQconst [c] x) y)
+ // result: (LEAQ1 [c] x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64ADDQconst {
+ continue
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ y := v_1
+ v.reset(OpAMD64LEAQ1)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADDQ x (LEAQ [c] {s} y))
+ // cond: x.Op != OpSB && y.Op != OpSB
+ // result: (LEAQ1 [c] {s} x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64LEAQ {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ s := auxToSym(v_1.Aux)
+ y := v_1.Args[0]
+ if !(x.Op != OpSB && y.Op != OpSB) {
+ continue
+ }
+ v.reset(OpAMD64LEAQ1)
+ v.AuxInt = int32ToAuxInt(c)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADDQ x (NEGQ y))
+ // result: (SUBQ x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64NEGQ {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64SUBQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADDQ x l:(MOVQload [off] {sym} ptr mem))
+ // cond: canMergeLoadClobber(v, l, x) && clobber(l)
+ // result: (ADDQload x [off] {sym} ptr mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ l := v_1
+ if l.Op != OpAMD64MOVQload {
+ continue
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
+ continue
+ }
+ v.reset(OpAMD64ADDQload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ADDQcarry(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ADDQcarry x (MOVQconst [c]))
+ // cond: is32Bit(c)
+ // result: (ADDQconstcarry x [int32(c)])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(is32Bit(c)) {
+ continue
+ }
+ v.reset(OpAMD64ADDQconstcarry)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ADDQconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ADDQconst [c] (ADDQ x y))
+ // result: (LEAQ1 [c] x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64ADDQ {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpAMD64LEAQ1)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (ADDQconst [c] (SHLQconst [1] x))
+ // result: (LEAQ1 [c] x x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64SHLQconst || auxIntToInt8(v_0.AuxInt) != 1 {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64LEAQ1)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, x)
+ return true
+ }
+ // match: (ADDQconst [c] (LEAQ [d] {s} x))
+ // cond: is32Bit(int64(c)+int64(d))
+ // result: (LEAQ [c+d] {s} x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ s := auxToSym(v_0.Aux)
+ x := v_0.Args[0]
+ if !(is32Bit(int64(c) + int64(d))) {
+ break
+ }
+ v.reset(OpAMD64LEAQ)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDQconst [c] (LEAQ1 [d] {s} x y))
+ // cond: is32Bit(int64(c)+int64(d))
+ // result: (LEAQ1 [c+d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64LEAQ1 {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ s := auxToSym(v_0.Aux)
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if !(is32Bit(int64(c) + int64(d))) {
+ break
+ }
+ v.reset(OpAMD64LEAQ1)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (ADDQconst [c] (LEAQ2 [d] {s} x y))
+ // cond: is32Bit(int64(c)+int64(d))
+ // result: (LEAQ2 [c+d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64LEAQ2 {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ s := auxToSym(v_0.Aux)
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if !(is32Bit(int64(c) + int64(d))) {
+ break
+ }
+ v.reset(OpAMD64LEAQ2)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (ADDQconst [c] (LEAQ4 [d] {s} x y))
+ // cond: is32Bit(int64(c)+int64(d))
+ // result: (LEAQ4 [c+d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64LEAQ4 {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ s := auxToSym(v_0.Aux)
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if !(is32Bit(int64(c) + int64(d))) {
+ break
+ }
+ v.reset(OpAMD64LEAQ4)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (ADDQconst [c] (LEAQ8 [d] {s} x y))
+ // cond: is32Bit(int64(c)+int64(d))
+ // result: (LEAQ8 [c+d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64LEAQ8 {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ s := auxToSym(v_0.Aux)
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if !(is32Bit(int64(c) + int64(d))) {
+ break
+ }
+ v.reset(OpAMD64LEAQ8)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (ADDQconst [0] x)
+ // result: x
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (ADDQconst [c] (MOVQconst [d]))
+ // result: (MOVQconst [int64(c)+d])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = int64ToAuxInt(int64(c) + d)
+ return true
+ }
+ // match: (ADDQconst [c] (ADDQconst [d] x))
+ // cond: is32Bit(int64(c)+int64(d))
+ // result: (ADDQconst [c+d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(is32Bit(int64(c) + int64(d))) {
+ break
+ }
+ v.reset(OpAMD64ADDQconst)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDQconst [off] x:(SP))
+ // result: (LEAQ [off] x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if x.Op != OpSP {
+ break
+ }
+ v.reset(OpAMD64LEAQ)
+ v.AuxInt = int32ToAuxInt(off)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ADDQconstmodify(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ADDQconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem)
+ // cond: ValAndOff(valoff1).canAdd32(off2)
+ // result: (ADDQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(ValAndOff(valoff1).canAdd32(off2)) {
+ break
+ }
+ v.reset(OpAMD64ADDQconstmodify)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (ADDQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
+ // result: (ADDQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64ADDQconstmodify)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ADDQload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (ADDQload [off1] {sym} val (ADDQconst [off2] base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (ADDQload [off1+off2] {sym} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64ADDQload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (ADDQload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (ADDQload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64ADDQload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (ADDQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _))
+ // result: (ADDQ x (MOVQf2i y))
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ ptr := v_1
+ if v_2.Op != OpAMD64MOVSDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
+ break
+ }
+ y := v_2.Args[1]
+ if ptr != v_2.Args[0] {
+ break
+ }
+ v.reset(OpAMD64ADDQ)
+ v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ADDQmodify(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ADDQmodify [off1] {sym} (ADDQconst [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (ADDQmodify [off1+off2] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64ADDQmodify)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (ADDQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (ADDQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64ADDQmodify)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ADDSD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ADDSD x l:(MOVSDload [off] {sym} ptr mem))
+ // cond: canMergeLoadClobber(v, l, x) && clobber(l)
+ // result: (ADDSDload x [off] {sym} ptr mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ l := v_1
+ if l.Op != OpAMD64MOVSDload {
+ continue
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
+ continue
+ }
+ v.reset(OpAMD64ADDSDload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ADDSDload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (ADDSDload [off1] {sym} val (ADDQconst [off2] base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (ADDSDload [off1+off2] {sym} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64ADDSDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (ADDSDload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (ADDSDload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64ADDSDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (ADDSDload x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _))
+ // result: (ADDSD x (MOVQi2f y))
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ ptr := v_1
+ if v_2.Op != OpAMD64MOVQstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
+ break
+ }
+ y := v_2.Args[1]
+ if ptr != v_2.Args[0] {
+ break
+ }
+ v.reset(OpAMD64ADDSD)
+ v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQi2f, typ.Float64)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ADDSS(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ADDSS x l:(MOVSSload [off] {sym} ptr mem))
+ // cond: canMergeLoadClobber(v, l, x) && clobber(l)
+ // result: (ADDSSload x [off] {sym} ptr mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ l := v_1
+ if l.Op != OpAMD64MOVSSload {
+ continue
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
+ continue
+ }
+ v.reset(OpAMD64ADDSSload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ADDSSload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (ADDSSload [off1] {sym} val (ADDQconst [off2] base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (ADDSSload [off1+off2] {sym} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64ADDSSload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (ADDSSload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (ADDSSload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64ADDSSload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (ADDSSload x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _))
+ // result: (ADDSS x (MOVLi2f y))
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ ptr := v_1
+ if v_2.Op != OpAMD64MOVLstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
+ break
+ }
+ y := v_2.Args[1]
+ if ptr != v_2.Args[0] {
+ break
+ }
+ v.reset(OpAMD64ADDSS)
+ v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLi2f, typ.Float32)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ANDL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ANDL (NOTL (SHLL (MOVLconst [1]) y)) x)
+ // result: (BTRL x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64NOTL {
+ continue
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64SHLL {
+ continue
+ }
+ y := v_0_0.Args[1]
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0_0.AuxInt) != 1 {
+ continue
+ }
+ x := v_1
+ v.reset(OpAMD64BTRL)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ANDL (MOVLconst [c]) x)
+ // cond: isUint32PowerOfTwo(int64(^c)) && uint64(^c) >= 128
+ // result: (BTRLconst [int8(log32(^c))] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64MOVLconst {
+ continue
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ if !(isUint32PowerOfTwo(int64(^c)) && uint64(^c) >= 128) {
+ continue
+ }
+ v.reset(OpAMD64BTRLconst)
+ v.AuxInt = int8ToAuxInt(int8(log32(^c)))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ANDL x (MOVLconst [c]))
+ // result: (ANDLconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpAMD64ANDLconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ANDL x x)
+ // result: x
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (ANDL x l:(MOVLload [off] {sym} ptr mem))
+ // cond: canMergeLoadClobber(v, l, x) && clobber(l)
+ // result: (ANDLload x [off] {sym} ptr mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ l := v_1
+ if l.Op != OpAMD64MOVLload {
+ continue
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
+ continue
+ }
+ v.reset(OpAMD64ANDLload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ // match: (ANDL x (NOTL y))
+ // cond: buildcfg.GOAMD64 >= 3
+ // result: (ANDNL x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64NOTL {
+ continue
+ }
+ y := v_1.Args[0]
+ if !(buildcfg.GOAMD64 >= 3) {
+ continue
+ }
+ v.reset(OpAMD64ANDNL)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ANDL x (NEGL x))
+ // cond: buildcfg.GOAMD64 >= 3
+ // result: (BLSIL x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64NEGL || x != v_1.Args[0] || !(buildcfg.GOAMD64 >= 3) {
+ continue
+ }
+ v.reset(OpAMD64BLSIL)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ANDL x (ADDLconst [-1] x))
+ // cond: buildcfg.GOAMD64 >= 3
+ // result: (BLSRL x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64ADDLconst || auxIntToInt32(v_1.AuxInt) != -1 || x != v_1.Args[0] || !(buildcfg.GOAMD64 >= 3) {
+ continue
+ }
+ v.reset(OpAMD64BLSRL)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ANDLconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ANDLconst [c] x)
+ // cond: isUint32PowerOfTwo(int64(^c)) && uint64(^c) >= 128
+ // result: (BTRLconst [int8(log32(^c))] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(isUint32PowerOfTwo(int64(^c)) && uint64(^c) >= 128) {
+ break
+ }
+ v.reset(OpAMD64BTRLconst)
+ v.AuxInt = int8ToAuxInt(int8(log32(^c)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDLconst [c] (ANDLconst [d] x))
+ // result: (ANDLconst [c & d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64ANDLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpAMD64ANDLconst)
+ v.AuxInt = int32ToAuxInt(c & d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDLconst [c] (BTRLconst [d] x))
+ // result: (ANDLconst [c &^ (1<<uint32(d))] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64BTRLconst {
+ break
+ }
+ d := auxIntToInt8(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpAMD64ANDLconst)
+ v.AuxInt = int32ToAuxInt(c &^ (1 << uint32(d)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDLconst [ 0xFF] x)
+ // result: (MOVBQZX x)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0xFF {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64MOVBQZX)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDLconst [0xFFFF] x)
+ // result: (MOVWQZX x)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0xFFFF {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64MOVWQZX)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDLconst [c] _)
+ // cond: c==0
+ // result: (MOVLconst [0])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if !(c == 0) {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (ANDLconst [c] x)
+ // cond: c==-1
+ // result: x
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(c == -1) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (ANDLconst [c] (MOVLconst [d]))
+ // result: (MOVLconst [c&d])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(c & d)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ANDLconstmodify(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ANDLconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem)
+ // cond: ValAndOff(valoff1).canAdd32(off2)
+ // result: (ANDLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(ValAndOff(valoff1).canAdd32(off2)) {
+ break
+ }
+ v.reset(OpAMD64ANDLconstmodify)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (ANDLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
+ // result: (ANDLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64ANDLconstmodify)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ANDLload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (ANDLload [off1] {sym} val (ADDQconst [off2] base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (ANDLload [off1+off2] {sym} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64ANDLload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (ANDLload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (ANDLload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64ANDLload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (ANDLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _))
+ // result: (ANDL x (MOVLf2i y))
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ ptr := v_1
+ if v_2.Op != OpAMD64MOVSSstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
+ break
+ }
+ y := v_2.Args[1]
+ if ptr != v_2.Args[0] {
+ break
+ }
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ANDLmodify(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ANDLmodify [off1] {sym} (ADDQconst [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (ANDLmodify [off1+off2] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64ANDLmodify)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (ANDLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (ANDLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64ANDLmodify)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ANDNL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ANDNL x (SHLL (MOVLconst [1]) y))
+ // result: (BTRL x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64SHLL {
+ break
+ }
+ y := v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_1_0.AuxInt) != 1 {
+ break
+ }
+ v.reset(OpAMD64BTRL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ANDNQ(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ANDNQ x (SHLQ (MOVQconst [1]) y))
+ // result: (BTRQ x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64SHLQ {
+ break
+ }
+ y := v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_1_0.AuxInt) != 1 {
+ break
+ }
+ v.reset(OpAMD64BTRQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ANDQ(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ANDQ (NOTQ (SHLQ (MOVQconst [1]) y)) x)
+ // result: (BTRQ x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64NOTQ {
+ continue
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64SHLQ {
+ continue
+ }
+ y := v_0_0.Args[1]
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
+ continue
+ }
+ x := v_1
+ v.reset(OpAMD64BTRQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ANDQ (MOVQconst [c]) x)
+ // cond: isUint64PowerOfTwo(^c) && uint64(^c) >= 128
+ // result: (BTRQconst [int8(log64(^c))] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64MOVQconst {
+ continue
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ if !(isUint64PowerOfTwo(^c) && uint64(^c) >= 128) {
+ continue
+ }
+ v.reset(OpAMD64BTRQconst)
+ v.AuxInt = int8ToAuxInt(int8(log64(^c)))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ANDQ x (MOVQconst [c]))
+ // cond: is32Bit(c)
+ // result: (ANDQconst [int32(c)] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(is32Bit(c)) {
+ continue
+ }
+ v.reset(OpAMD64ANDQconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ANDQ x x)
+ // result: x
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (ANDQ x l:(MOVQload [off] {sym} ptr mem))
+ // cond: canMergeLoadClobber(v, l, x) && clobber(l)
+ // result: (ANDQload x [off] {sym} ptr mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ l := v_1
+ if l.Op != OpAMD64MOVQload {
+ continue
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
+ continue
+ }
+ v.reset(OpAMD64ANDQload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ // match: (ANDQ x (NOTQ y))
+ // cond: buildcfg.GOAMD64 >= 3
+ // result: (ANDNQ x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64NOTQ {
+ continue
+ }
+ y := v_1.Args[0]
+ if !(buildcfg.GOAMD64 >= 3) {
+ continue
+ }
+ v.reset(OpAMD64ANDNQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ANDQ x (NEGQ x))
+ // cond: buildcfg.GOAMD64 >= 3
+ // result: (BLSIQ x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64NEGQ || x != v_1.Args[0] || !(buildcfg.GOAMD64 >= 3) {
+ continue
+ }
+ v.reset(OpAMD64BLSIQ)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ANDQ x (ADDQconst [-1] x))
+ // cond: buildcfg.GOAMD64 >= 3
+ // result: (BLSRQ x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64ADDQconst || auxIntToInt32(v_1.AuxInt) != -1 || x != v_1.Args[0] || !(buildcfg.GOAMD64 >= 3) {
+ continue
+ }
+ v.reset(OpAMD64BLSRQ)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ANDQconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ANDQconst [c] x)
+ // cond: isUint64PowerOfTwo(int64(^c)) && uint64(^c) >= 128
+ // result: (BTRQconst [int8(log32(^c))] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(isUint64PowerOfTwo(int64(^c)) && uint64(^c) >= 128) {
+ break
+ }
+ v.reset(OpAMD64BTRQconst)
+ v.AuxInt = int8ToAuxInt(int8(log32(^c)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDQconst [c] (ANDQconst [d] x))
+ // result: (ANDQconst [c & d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64ANDQconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpAMD64ANDQconst)
+ v.AuxInt = int32ToAuxInt(c & d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDQconst [c] (BTRQconst [d] x))
+ // cond: is32Bit(int64(c) &^ (1<<uint32(d)))
+ // result: (ANDQconst [c &^ (1<<uint32(d))] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64BTRQconst {
+ break
+ }
+ d := auxIntToInt8(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(is32Bit(int64(c) &^ (1 << uint32(d)))) {
+ break
+ }
+ v.reset(OpAMD64ANDQconst)
+ v.AuxInt = int32ToAuxInt(c &^ (1 << uint32(d)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDQconst [ 0xFF] x)
+ // result: (MOVBQZX x)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0xFF {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64MOVBQZX)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDQconst [0xFFFF] x)
+ // result: (MOVWQZX x)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0xFFFF {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64MOVWQZX)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDQconst [0] _)
+ // result: (MOVQconst [0])
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (ANDQconst [-1] x)
+ // result: x
+ for {
+ if auxIntToInt32(v.AuxInt) != -1 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (ANDQconst [c] (MOVQconst [d]))
+ // result: (MOVQconst [int64(c)&d])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = int64ToAuxInt(int64(c) & d)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ANDQconstmodify(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ANDQconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem)
+ // cond: ValAndOff(valoff1).canAdd32(off2)
+ // result: (ANDQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(ValAndOff(valoff1).canAdd32(off2)) {
+ break
+ }
+ v.reset(OpAMD64ANDQconstmodify)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (ANDQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
+ // result: (ANDQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64ANDQconstmodify)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ANDQload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (ANDQload [off1] {sym} val (ADDQconst [off2] base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (ANDQload [off1+off2] {sym} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64ANDQload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (ANDQload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (ANDQload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64ANDQload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (ANDQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _))
+ // result: (ANDQ x (MOVQf2i y))
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ ptr := v_1
+ if v_2.Op != OpAMD64MOVSDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
+ break
+ }
+ y := v_2.Args[1]
+ if ptr != v_2.Args[0] {
+ break
+ }
+ v.reset(OpAMD64ANDQ)
+ v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ANDQmodify(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ANDQmodify [off1] {sym} (ADDQconst [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (ANDQmodify [off1+off2] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64ANDQmodify)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (ANDQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (ANDQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64ANDQmodify)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64BSFQ(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (BSFQ (ORQconst <t> [1<<8] (MOVBQZX x)))
+ // result: (BSFQ (ORQconst <t> [1<<8] x))
+ for {
+ if v_0.Op != OpAMD64ORQconst {
+ break
+ }
+ t := v_0.Type
+ if auxIntToInt32(v_0.AuxInt) != 1<<8 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64MOVBQZX {
+ break
+ }
+ x := v_0_0.Args[0]
+ v.reset(OpAMD64BSFQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64ORQconst, t)
+ v0.AuxInt = int32ToAuxInt(1 << 8)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (BSFQ (ORQconst <t> [1<<16] (MOVWQZX x)))
+ // result: (BSFQ (ORQconst <t> [1<<16] x))
+ for {
+ if v_0.Op != OpAMD64ORQconst {
+ break
+ }
+ t := v_0.Type
+ if auxIntToInt32(v_0.AuxInt) != 1<<16 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64MOVWQZX {
+ break
+ }
+ x := v_0_0.Args[0]
+ v.reset(OpAMD64BSFQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64ORQconst, t)
+ v0.AuxInt = int32ToAuxInt(1 << 16)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64BSWAPL(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (BSWAPL (BSWAPL p))
+ // result: p
+ for {
+ if v_0.Op != OpAMD64BSWAPL {
+ break
+ }
+ p := v_0.Args[0]
+ v.copyOf(p)
+ return true
+ }
+ // match: (BSWAPL x:(MOVLload [i] {s} p mem))
+ // cond: x.Uses == 1 && buildcfg.GOAMD64 >= 3
+ // result: (MOVBELload [i] {s} p mem)
+ for {
+ x := v_0
+ if x.Op != OpAMD64MOVLload {
+ break
+ }
+ i := auxIntToInt32(x.AuxInt)
+ s := auxToSym(x.Aux)
+ mem := x.Args[1]
+ p := x.Args[0]
+ if !(x.Uses == 1 && buildcfg.GOAMD64 >= 3) {
+ break
+ }
+ v.reset(OpAMD64MOVBELload)
+ v.AuxInt = int32ToAuxInt(i)
+ v.Aux = symToAux(s)
+ v.AddArg2(p, mem)
+ return true
+ }
+ // match: (BSWAPL (MOVBELload [i] {s} p m))
+ // result: (MOVLload [i] {s} p m)
+ for {
+ if v_0.Op != OpAMD64MOVBELload {
+ break
+ }
+ i := auxIntToInt32(v_0.AuxInt)
+ s := auxToSym(v_0.Aux)
+ m := v_0.Args[1]
+ p := v_0.Args[0]
+ v.reset(OpAMD64MOVLload)
+ v.AuxInt = int32ToAuxInt(i)
+ v.Aux = symToAux(s)
+ v.AddArg2(p, m)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64BSWAPQ(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (BSWAPQ (BSWAPQ p))
+ // result: p
+ for {
+ if v_0.Op != OpAMD64BSWAPQ {
+ break
+ }
+ p := v_0.Args[0]
+ v.copyOf(p)
+ return true
+ }
+ // match: (BSWAPQ x:(MOVQload [i] {s} p mem))
+ // cond: x.Uses == 1 && buildcfg.GOAMD64 >= 3
+ // result: (MOVBEQload [i] {s} p mem)
+ for {
+ x := v_0
+ if x.Op != OpAMD64MOVQload {
+ break
+ }
+ i := auxIntToInt32(x.AuxInt)
+ s := auxToSym(x.Aux)
+ mem := x.Args[1]
+ p := x.Args[0]
+ if !(x.Uses == 1 && buildcfg.GOAMD64 >= 3) {
+ break
+ }
+ v.reset(OpAMD64MOVBEQload)
+ v.AuxInt = int32ToAuxInt(i)
+ v.Aux = symToAux(s)
+ v.AddArg2(p, mem)
+ return true
+ }
+ // match: (BSWAPQ (MOVBEQload [i] {s} p m))
+ // result: (MOVQload [i] {s} p m)
+ for {
+ if v_0.Op != OpAMD64MOVBEQload {
+ break
+ }
+ i := auxIntToInt32(v_0.AuxInt)
+ s := auxToSym(v_0.Aux)
+ m := v_0.Args[1]
+ p := v_0.Args[0]
+ v.reset(OpAMD64MOVQload)
+ v.AuxInt = int32ToAuxInt(i)
+ v.Aux = symToAux(s)
+ v.AddArg2(p, m)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64BTCLconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (BTCLconst [c] (XORLconst [d] x))
+ // result: (XORLconst [d ^ 1<<uint32(c)] x)
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64XORLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpAMD64XORLconst)
+ v.AuxInt = int32ToAuxInt(d ^ 1<<uint32(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (BTCLconst [c] (BTCLconst [d] x))
+ // result: (XORLconst [1<<uint32(c) | 1<<uint32(d)] x)
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64BTCLconst {
+ break
+ }
+ d := auxIntToInt8(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpAMD64XORLconst)
+ v.AuxInt = int32ToAuxInt(1<<uint32(c) | 1<<uint32(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (BTCLconst [c] (MOVLconst [d]))
+ // result: (MOVLconst [d^(1<<uint32(c))])
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(d ^ (1 << uint32(c)))
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64BTCQconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (BTCQconst [c] (XORQconst [d] x))
+ // cond: is32Bit(int64(d) ^ 1<<uint32(c))
+ // result: (XORQconst [d ^ 1<<uint32(c)] x)
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64XORQconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(is32Bit(int64(d) ^ 1<<uint32(c))) {
+ break
+ }
+ v.reset(OpAMD64XORQconst)
+ v.AuxInt = int32ToAuxInt(d ^ 1<<uint32(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (BTCQconst [c] (BTCQconst [d] x))
+ // cond: is32Bit(1<<uint32(c) ^ 1<<uint32(d))
+ // result: (XORQconst [1<<uint32(c) ^ 1<<uint32(d)] x)
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64BTCQconst {
+ break
+ }
+ d := auxIntToInt8(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(is32Bit(1<<uint32(c) ^ 1<<uint32(d))) {
+ break
+ }
+ v.reset(OpAMD64XORQconst)
+ v.AuxInt = int32ToAuxInt(1<<uint32(c) ^ 1<<uint32(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (BTCQconst [c] (MOVQconst [d]))
+ // result: (MOVQconst [d^(1<<uint32(c))])
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = int64ToAuxInt(d ^ (1 << uint32(c)))
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64BTLconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (BTLconst [c] (SHRQconst [d] x))
+ // cond: (c+d)<64
+ // result: (BTQconst [c+d] x)
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64SHRQconst {
+ break
+ }
+ d := auxIntToInt8(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !((c + d) < 64) {
+ break
+ }
+ v.reset(OpAMD64BTQconst)
+ v.AuxInt = int8ToAuxInt(c + d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (BTLconst [c] (SHLQconst [d] x))
+ // cond: c>d
+ // result: (BTLconst [c-d] x)
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64SHLQconst {
+ break
+ }
+ d := auxIntToInt8(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(c > d) {
+ break
+ }
+ v.reset(OpAMD64BTLconst)
+ v.AuxInt = int8ToAuxInt(c - d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (BTLconst [0] s:(SHRQ x y))
+ // result: (BTQ y x)
+ for {
+ if auxIntToInt8(v.AuxInt) != 0 {
+ break
+ }
+ s := v_0
+ if s.Op != OpAMD64SHRQ {
+ break
+ }
+ y := s.Args[1]
+ x := s.Args[0]
+ v.reset(OpAMD64BTQ)
+ v.AddArg2(y, x)
+ return true
+ }
+ // match: (BTLconst [c] (SHRLconst [d] x))
+ // cond: (c+d)<32
+ // result: (BTLconst [c+d] x)
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64SHRLconst {
+ break
+ }
+ d := auxIntToInt8(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !((c + d) < 32) {
+ break
+ }
+ v.reset(OpAMD64BTLconst)
+ v.AuxInt = int8ToAuxInt(c + d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (BTLconst [c] (SHLLconst [d] x))
+ // cond: c>d
+ // result: (BTLconst [c-d] x)
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64SHLLconst {
+ break
+ }
+ d := auxIntToInt8(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(c > d) {
+ break
+ }
+ v.reset(OpAMD64BTLconst)
+ v.AuxInt = int8ToAuxInt(c - d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (BTLconst [0] s:(SHRL x y))
+ // result: (BTL y x)
+ for {
+ if auxIntToInt8(v.AuxInt) != 0 {
+ break
+ }
+ s := v_0
+ if s.Op != OpAMD64SHRL {
+ break
+ }
+ y := s.Args[1]
+ x := s.Args[0]
+ v.reset(OpAMD64BTL)
+ v.AddArg2(y, x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64BTQconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (BTQconst [c] (SHRQconst [d] x))
+ // cond: (c+d)<64
+ // result: (BTQconst [c+d] x)
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64SHRQconst {
+ break
+ }
+ d := auxIntToInt8(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !((c + d) < 64) {
+ break
+ }
+ v.reset(OpAMD64BTQconst)
+ v.AuxInt = int8ToAuxInt(c + d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (BTQconst [c] (SHLQconst [d] x))
+ // cond: c>d
+ // result: (BTQconst [c-d] x)
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64SHLQconst {
+ break
+ }
+ d := auxIntToInt8(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(c > d) {
+ break
+ }
+ v.reset(OpAMD64BTQconst)
+ v.AuxInt = int8ToAuxInt(c - d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (BTQconst [0] s:(SHRQ x y))
+ // result: (BTQ y x)
+ for {
+ if auxIntToInt8(v.AuxInt) != 0 {
+ break
+ }
+ s := v_0
+ if s.Op != OpAMD64SHRQ {
+ break
+ }
+ y := s.Args[1]
+ x := s.Args[0]
+ v.reset(OpAMD64BTQ)
+ v.AddArg2(y, x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64BTRLconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (BTRLconst [c] (BTSLconst [c] x))
+ // result: (BTRLconst [c] x)
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64BTSLconst || auxIntToInt8(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64BTRLconst)
+ v.AuxInt = int8ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (BTRLconst [c] (BTCLconst [c] x))
+ // result: (BTRLconst [c] x)
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64BTCLconst || auxIntToInt8(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64BTRLconst)
+ v.AuxInt = int8ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (BTRLconst [c] (ANDLconst [d] x))
+ // result: (ANDLconst [d &^ (1<<uint32(c))] x)
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64ANDLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpAMD64ANDLconst)
+ v.AuxInt = int32ToAuxInt(d &^ (1 << uint32(c)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (BTRLconst [c] (BTRLconst [d] x))
+ // result: (ANDLconst [^(1<<uint32(c) | 1<<uint32(d))] x)
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64BTRLconst {
+ break
+ }
+ d := auxIntToInt8(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpAMD64ANDLconst)
+ v.AuxInt = int32ToAuxInt(^(1<<uint32(c) | 1<<uint32(d)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (BTRLconst [c] (MOVLconst [d]))
+ // result: (MOVLconst [d&^(1<<uint32(c))])
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(d &^ (1 << uint32(c)))
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64BTRQconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (BTRQconst [c] (BTSQconst [c] x))
+ // result: (BTRQconst [c] x)
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64BTSQconst || auxIntToInt8(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64BTRQconst)
+ v.AuxInt = int8ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (BTRQconst [c] (BTCQconst [c] x))
+ // result: (BTRQconst [c] x)
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64BTCQconst || auxIntToInt8(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64BTRQconst)
+ v.AuxInt = int8ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (BTRQconst [c] (ANDQconst [d] x))
+ // cond: is32Bit(int64(d) &^ (1<<uint32(c)))
+ // result: (ANDQconst [d &^ (1<<uint32(c))] x)
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64ANDQconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(is32Bit(int64(d) &^ (1 << uint32(c)))) {
+ break
+ }
+ v.reset(OpAMD64ANDQconst)
+ v.AuxInt = int32ToAuxInt(d &^ (1 << uint32(c)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (BTRQconst [c] (BTRQconst [d] x))
+ // cond: is32Bit(^(1<<uint32(c) | 1<<uint32(d)))
+ // result: (ANDQconst [^(1<<uint32(c) | 1<<uint32(d))] x)
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64BTRQconst {
+ break
+ }
+ d := auxIntToInt8(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(is32Bit(^(1<<uint32(c) | 1<<uint32(d)))) {
+ break
+ }
+ v.reset(OpAMD64ANDQconst)
+ v.AuxInt = int32ToAuxInt(^(1<<uint32(c) | 1<<uint32(d)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (BTRQconst [c] (MOVQconst [d]))
+ // result: (MOVQconst [d&^(1<<uint32(c))])
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = int64ToAuxInt(d &^ (1 << uint32(c)))
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64BTSLconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (BTSLconst [c] (BTRLconst [c] x))
+ // result: (BTSLconst [c] x)
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64BTRLconst || auxIntToInt8(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64BTSLconst)
+ v.AuxInt = int8ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (BTSLconst [c] (BTCLconst [c] x))
+ // result: (BTSLconst [c] x)
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64BTCLconst || auxIntToInt8(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64BTSLconst)
+ v.AuxInt = int8ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (BTSLconst [c] (ORLconst [d] x))
+ // result: (ORLconst [d | 1<<uint32(c)] x)
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64ORLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpAMD64ORLconst)
+ v.AuxInt = int32ToAuxInt(d | 1<<uint32(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (BTSLconst [c] (BTSLconst [d] x))
+ // result: (ORLconst [1<<uint32(c) | 1<<uint32(d)] x)
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64BTSLconst {
+ break
+ }
+ d := auxIntToInt8(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpAMD64ORLconst)
+ v.AuxInt = int32ToAuxInt(1<<uint32(c) | 1<<uint32(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (BTSLconst [c] (MOVLconst [d]))
+ // result: (MOVLconst [d|(1<<uint32(c))])
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(d | (1 << uint32(c)))
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64BTSQconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (BTSQconst [c] (BTRQconst [c] x))
+ // result: (BTSQconst [c] x)
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64BTRQconst || auxIntToInt8(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64BTSQconst)
+ v.AuxInt = int8ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (BTSQconst [c] (BTCQconst [c] x))
+ // result: (BTSQconst [c] x)
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64BTCQconst || auxIntToInt8(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64BTSQconst)
+ v.AuxInt = int8ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (BTSQconst [c] (ORQconst [d] x))
+ // cond: is32Bit(int64(d) | 1<<uint32(c))
+ // result: (ORQconst [d | 1<<uint32(c)] x)
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64ORQconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(is32Bit(int64(d) | 1<<uint32(c))) {
+ break
+ }
+ v.reset(OpAMD64ORQconst)
+ v.AuxInt = int32ToAuxInt(d | 1<<uint32(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (BTSQconst [c] (BTSQconst [d] x))
+ // cond: is32Bit(1<<uint32(c) | 1<<uint32(d))
+ // result: (ORQconst [1<<uint32(c) | 1<<uint32(d)] x)
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64BTSQconst {
+ break
+ }
+ d := auxIntToInt8(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(is32Bit(1<<uint32(c) | 1<<uint32(d))) {
+ break
+ }
+ v.reset(OpAMD64ORQconst)
+ v.AuxInt = int32ToAuxInt(1<<uint32(c) | 1<<uint32(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (BTSQconst [c] (MOVQconst [d]))
+ // result: (MOVQconst [d|(1<<uint32(c))])
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = int64ToAuxInt(d | (1 << uint32(c)))
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVLCC(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMOVLCC x y (InvertFlags cond))
+ // result: (CMOVLLS x y cond)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVLLS)
+ v.AddArg3(x, y, cond)
+ return true
+ }
+ // match: (CMOVLCC _ x (FlagEQ))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVLCC _ x (FlagGT_UGT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVLCC y _ (FlagGT_ULT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVLCC y _ (FlagLT_ULT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVLCC _ x (FlagLT_UGT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVLCS(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMOVLCS x y (InvertFlags cond))
+ // result: (CMOVLHI x y cond)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVLHI)
+ v.AddArg3(x, y, cond)
+ return true
+ }
+ // match: (CMOVLCS y _ (FlagEQ))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVLCS y _ (FlagGT_UGT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVLCS _ x (FlagGT_ULT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVLCS _ x (FlagLT_ULT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVLCS y _ (FlagLT_UGT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVLEQ(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMOVLEQ x y (InvertFlags cond))
+ // result: (CMOVLEQ x y cond)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVLEQ)
+ v.AddArg3(x, y, cond)
+ return true
+ }
+ // match: (CMOVLEQ _ x (FlagEQ))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVLEQ y _ (FlagGT_UGT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVLEQ y _ (FlagGT_ULT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVLEQ y _ (FlagLT_ULT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVLEQ y _ (FlagLT_UGT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVLGE(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMOVLGE x y (InvertFlags cond))
+ // result: (CMOVLLE x y cond)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVLLE)
+ v.AddArg3(x, y, cond)
+ return true
+ }
+ // match: (CMOVLGE _ x (FlagEQ))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVLGE _ x (FlagGT_UGT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVLGE _ x (FlagGT_ULT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVLGE y _ (FlagLT_ULT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVLGE y _ (FlagLT_UGT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVLGT(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMOVLGT x y (InvertFlags cond))
+ // result: (CMOVLLT x y cond)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVLLT)
+ v.AddArg3(x, y, cond)
+ return true
+ }
+ // match: (CMOVLGT y _ (FlagEQ))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVLGT _ x (FlagGT_UGT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVLGT _ x (FlagGT_ULT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVLGT y _ (FlagLT_ULT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVLGT y _ (FlagLT_UGT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVLHI(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMOVLHI x y (InvertFlags cond))
+ // result: (CMOVLCS x y cond)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVLCS)
+ v.AddArg3(x, y, cond)
+ return true
+ }
+ // match: (CMOVLHI y _ (FlagEQ))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVLHI _ x (FlagGT_UGT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVLHI y _ (FlagGT_ULT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVLHI y _ (FlagLT_ULT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVLHI _ x (FlagLT_UGT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVLLE(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMOVLLE x y (InvertFlags cond))
+ // result: (CMOVLGE x y cond)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVLGE)
+ v.AddArg3(x, y, cond)
+ return true
+ }
+ // match: (CMOVLLE _ x (FlagEQ))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVLLE y _ (FlagGT_UGT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVLLE y _ (FlagGT_ULT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVLLE _ x (FlagLT_ULT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVLLE _ x (FlagLT_UGT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVLLS(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMOVLLS x y (InvertFlags cond))
+ // result: (CMOVLCC x y cond)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVLCC)
+ v.AddArg3(x, y, cond)
+ return true
+ }
+ // match: (CMOVLLS _ x (FlagEQ))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVLLS y _ (FlagGT_UGT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVLLS _ x (FlagGT_ULT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVLLS _ x (FlagLT_ULT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVLLS y _ (FlagLT_UGT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVLLT(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMOVLLT x y (InvertFlags cond))
+ // result: (CMOVLGT x y cond)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVLGT)
+ v.AddArg3(x, y, cond)
+ return true
+ }
+ // match: (CMOVLLT y _ (FlagEQ))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVLLT y _ (FlagGT_UGT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVLLT y _ (FlagGT_ULT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVLLT _ x (FlagLT_ULT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVLLT _ x (FlagLT_UGT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVLNE(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMOVLNE x y (InvertFlags cond))
+ // result: (CMOVLNE x y cond)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVLNE)
+ v.AddArg3(x, y, cond)
+ return true
+ }
+ // match: (CMOVLNE y _ (FlagEQ))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVLNE _ x (FlagGT_UGT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVLNE _ x (FlagGT_ULT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVLNE _ x (FlagLT_ULT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVLNE _ x (FlagLT_UGT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVQCC(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMOVQCC x y (InvertFlags cond))
+ // result: (CMOVQLS x y cond)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVQLS)
+ v.AddArg3(x, y, cond)
+ return true
+ }
+ // match: (CMOVQCC _ x (FlagEQ))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVQCC _ x (FlagGT_UGT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVQCC y _ (FlagGT_ULT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVQCC y _ (FlagLT_ULT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVQCC _ x (FlagLT_UGT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVQCS(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMOVQCS x y (InvertFlags cond))
+ // result: (CMOVQHI x y cond)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVQHI)
+ v.AddArg3(x, y, cond)
+ return true
+ }
+ // match: (CMOVQCS y _ (FlagEQ))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVQCS y _ (FlagGT_UGT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVQCS _ x (FlagGT_ULT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVQCS _ x (FlagLT_ULT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVQCS y _ (FlagLT_UGT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVQEQ(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMOVQEQ x y (InvertFlags cond))
+ // result: (CMOVQEQ x y cond)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVQEQ)
+ v.AddArg3(x, y, cond)
+ return true
+ }
+ // match: (CMOVQEQ _ x (FlagEQ))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVQEQ y _ (FlagGT_UGT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVQEQ y _ (FlagGT_ULT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVQEQ y _ (FlagLT_ULT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVQEQ y _ (FlagLT_UGT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVQEQ x _ (Select1 (BSFQ (ORQconst [c] _))))
+ // cond: c != 0
+ // result: x
+ for {
+ x := v_0
+ if v_2.Op != OpSelect1 {
+ break
+ }
+ v_2_0 := v_2.Args[0]
+ if v_2_0.Op != OpAMD64BSFQ {
+ break
+ }
+ v_2_0_0 := v_2_0.Args[0]
+ if v_2_0_0.Op != OpAMD64ORQconst {
+ break
+ }
+ c := auxIntToInt32(v_2_0_0.AuxInt)
+ if !(c != 0) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVQGE(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMOVQGE x y (InvertFlags cond))
+ // result: (CMOVQLE x y cond)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVQLE)
+ v.AddArg3(x, y, cond)
+ return true
+ }
+ // match: (CMOVQGE _ x (FlagEQ))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVQGE _ x (FlagGT_UGT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVQGE _ x (FlagGT_ULT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVQGE y _ (FlagLT_ULT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVQGE y _ (FlagLT_UGT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVQGT(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMOVQGT x y (InvertFlags cond))
+ // result: (CMOVQLT x y cond)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVQLT)
+ v.AddArg3(x, y, cond)
+ return true
+ }
+ // match: (CMOVQGT y _ (FlagEQ))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVQGT _ x (FlagGT_UGT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVQGT _ x (FlagGT_ULT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVQGT y _ (FlagLT_ULT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVQGT y _ (FlagLT_UGT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVQHI(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMOVQHI x y (InvertFlags cond))
+ // result: (CMOVQCS x y cond)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVQCS)
+ v.AddArg3(x, y, cond)
+ return true
+ }
+ // match: (CMOVQHI y _ (FlagEQ))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVQHI _ x (FlagGT_UGT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVQHI y _ (FlagGT_ULT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVQHI y _ (FlagLT_ULT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVQHI _ x (FlagLT_UGT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVQLE(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMOVQLE x y (InvertFlags cond))
+ // result: (CMOVQGE x y cond)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVQGE)
+ v.AddArg3(x, y, cond)
+ return true
+ }
+ // match: (CMOVQLE _ x (FlagEQ))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVQLE y _ (FlagGT_UGT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVQLE y _ (FlagGT_ULT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVQLE _ x (FlagLT_ULT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVQLE _ x (FlagLT_UGT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVQLS(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMOVQLS x y (InvertFlags cond))
+ // result: (CMOVQCC x y cond)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVQCC)
+ v.AddArg3(x, y, cond)
+ return true
+ }
+ // match: (CMOVQLS _ x (FlagEQ))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVQLS y _ (FlagGT_UGT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVQLS _ x (FlagGT_ULT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVQLS _ x (FlagLT_ULT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVQLS y _ (FlagLT_UGT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVQLT(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMOVQLT x y (InvertFlags cond))
+ // result: (CMOVQGT x y cond)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVQGT)
+ v.AddArg3(x, y, cond)
+ return true
+ }
+ // match: (CMOVQLT y _ (FlagEQ))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVQLT y _ (FlagGT_UGT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVQLT y _ (FlagGT_ULT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVQLT _ x (FlagLT_ULT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVQLT _ x (FlagLT_UGT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVQNE(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMOVQNE x y (InvertFlags cond))
+ // result: (CMOVQNE x y cond)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVQNE)
+ v.AddArg3(x, y, cond)
+ return true
+ }
+ // match: (CMOVQNE y _ (FlagEQ))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVQNE _ x (FlagGT_UGT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVQNE _ x (FlagGT_ULT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVQNE _ x (FlagLT_ULT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVQNE _ x (FlagLT_UGT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVWCC(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMOVWCC x y (InvertFlags cond))
+ // result: (CMOVWLS x y cond)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVWLS)
+ v.AddArg3(x, y, cond)
+ return true
+ }
+ // match: (CMOVWCC _ x (FlagEQ))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVWCC _ x (FlagGT_UGT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVWCC y _ (FlagGT_ULT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVWCC y _ (FlagLT_ULT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVWCC _ x (FlagLT_UGT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVWCS(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMOVWCS x y (InvertFlags cond))
+ // result: (CMOVWHI x y cond)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVWHI)
+ v.AddArg3(x, y, cond)
+ return true
+ }
+ // match: (CMOVWCS y _ (FlagEQ))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVWCS y _ (FlagGT_UGT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVWCS _ x (FlagGT_ULT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVWCS _ x (FlagLT_ULT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVWCS y _ (FlagLT_UGT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVWEQ(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMOVWEQ x y (InvertFlags cond))
+ // result: (CMOVWEQ x y cond)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVWEQ)
+ v.AddArg3(x, y, cond)
+ return true
+ }
+ // match: (CMOVWEQ _ x (FlagEQ))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVWEQ y _ (FlagGT_UGT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVWEQ y _ (FlagGT_ULT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVWEQ y _ (FlagLT_ULT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVWEQ y _ (FlagLT_UGT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVWGE(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMOVWGE x y (InvertFlags cond))
+ // result: (CMOVWLE x y cond)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVWLE)
+ v.AddArg3(x, y, cond)
+ return true
+ }
+ // match: (CMOVWGE _ x (FlagEQ))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVWGE _ x (FlagGT_UGT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVWGE _ x (FlagGT_ULT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVWGE y _ (FlagLT_ULT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVWGE y _ (FlagLT_UGT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVWGT(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMOVWGT x y (InvertFlags cond))
+ // result: (CMOVWLT x y cond)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVWLT)
+ v.AddArg3(x, y, cond)
+ return true
+ }
+ // match: (CMOVWGT y _ (FlagEQ))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVWGT _ x (FlagGT_UGT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVWGT _ x (FlagGT_ULT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVWGT y _ (FlagLT_ULT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVWGT y _ (FlagLT_UGT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVWHI(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMOVWHI x y (InvertFlags cond))
+ // result: (CMOVWCS x y cond)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVWCS)
+ v.AddArg3(x, y, cond)
+ return true
+ }
+ // match: (CMOVWHI y _ (FlagEQ))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVWHI _ x (FlagGT_UGT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVWHI y _ (FlagGT_ULT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVWHI y _ (FlagLT_ULT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVWHI _ x (FlagLT_UGT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVWLE(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMOVWLE x y (InvertFlags cond))
+ // result: (CMOVWGE x y cond)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVWGE)
+ v.AddArg3(x, y, cond)
+ return true
+ }
+ // match: (CMOVWLE _ x (FlagEQ))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVWLE y _ (FlagGT_UGT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVWLE y _ (FlagGT_ULT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVWLE _ x (FlagLT_ULT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVWLE _ x (FlagLT_UGT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVWLS(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMOVWLS x y (InvertFlags cond))
+ // result: (CMOVWCC x y cond)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVWCC)
+ v.AddArg3(x, y, cond)
+ return true
+ }
+ // match: (CMOVWLS _ x (FlagEQ))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVWLS y _ (FlagGT_UGT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVWLS _ x (FlagGT_ULT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVWLS _ x (FlagLT_ULT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVWLS y _ (FlagLT_UGT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVWLT(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMOVWLT x y (InvertFlags cond))
+ // result: (CMOVWGT x y cond)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVWGT)
+ v.AddArg3(x, y, cond)
+ return true
+ }
+ // match: (CMOVWLT y _ (FlagEQ))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVWLT y _ (FlagGT_UGT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVWLT y _ (FlagGT_ULT))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVWLT _ x (FlagLT_ULT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVWLT _ x (FlagLT_UGT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMOVWNE(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMOVWNE x y (InvertFlags cond))
+ // result: (CMOVWNE x y cond)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64InvertFlags {
+ break
+ }
+ cond := v_2.Args[0]
+ v.reset(OpAMD64CMOVWNE)
+ v.AddArg3(x, y, cond)
+ return true
+ }
+ // match: (CMOVWNE y _ (FlagEQ))
+ // result: y
+ for {
+ y := v_0
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CMOVWNE _ x (FlagGT_UGT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVWNE _ x (FlagGT_ULT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVWNE _ x (FlagLT_ULT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVWNE _ x (FlagLT_UGT))
+ // result: x
+ for {
+ x := v_1
+ if v_2.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMPB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMPB x (MOVLconst [c]))
+ // result: (CMPBconst x [int8(c)])
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpAMD64CMPBconst)
+ v.AuxInt = int8ToAuxInt(int8(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPB (MOVLconst [c]) x)
+ // result: (InvertFlags (CMPBconst x [int8(c)]))
+ for {
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpAMD64InvertFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(int8(c))
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPB x y)
+ // cond: canonLessThan(x,y)
+ // result: (InvertFlags (CMPB y x))
+ for {
+ x := v_0
+ y := v_1
+ if !(canonLessThan(x, y)) {
+ break
+ }
+ v.reset(OpAMD64InvertFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPB l:(MOVBload {sym} [off] ptr mem) x)
+ // cond: canMergeLoad(v, l) && clobber(l)
+ // result: (CMPBload {sym} [off] ptr x mem)
+ for {
+ l := v_0
+ if l.Op != OpAMD64MOVBload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ x := v_1
+ if !(canMergeLoad(v, l) && clobber(l)) {
+ break
+ }
+ v.reset(OpAMD64CMPBload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (CMPB x l:(MOVBload {sym} [off] ptr mem))
+ // cond: canMergeLoad(v, l) && clobber(l)
+ // result: (InvertFlags (CMPBload {sym} [off] ptr x mem))
+ for {
+ x := v_0
+ l := v_1
+ if l.Op != OpAMD64MOVBload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoad(v, l) && clobber(l)) {
+ break
+ }
+ v.reset(OpAMD64InvertFlags)
+ v0 := b.NewValue0(l.Pos, OpAMD64CMPBload, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg3(ptr, x, mem)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMPBconst(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMPBconst (MOVLconst [x]) [y])
+ // cond: int8(x)==y
+ // result: (FlagEQ)
+ for {
+ y := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(int8(x) == y) {
+ break
+ }
+ v.reset(OpAMD64FlagEQ)
+ return true
+ }
+ // match: (CMPBconst (MOVLconst [x]) [y])
+ // cond: int8(x)<y && uint8(x)<uint8(y)
+ // result: (FlagLT_ULT)
+ for {
+ y := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(int8(x) < y && uint8(x) < uint8(y)) {
+ break
+ }
+ v.reset(OpAMD64FlagLT_ULT)
+ return true
+ }
+ // match: (CMPBconst (MOVLconst [x]) [y])
+ // cond: int8(x)<y && uint8(x)>uint8(y)
+ // result: (FlagLT_UGT)
+ for {
+ y := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(int8(x) < y && uint8(x) > uint8(y)) {
+ break
+ }
+ v.reset(OpAMD64FlagLT_UGT)
+ return true
+ }
+ // match: (CMPBconst (MOVLconst [x]) [y])
+ // cond: int8(x)>y && uint8(x)<uint8(y)
+ // result: (FlagGT_ULT)
+ for {
+ y := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(int8(x) > y && uint8(x) < uint8(y)) {
+ break
+ }
+ v.reset(OpAMD64FlagGT_ULT)
+ return true
+ }
+ // match: (CMPBconst (MOVLconst [x]) [y])
+ // cond: int8(x)>y && uint8(x)>uint8(y)
+ // result: (FlagGT_UGT)
+ for {
+ y := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(int8(x) > y && uint8(x) > uint8(y)) {
+ break
+ }
+ v.reset(OpAMD64FlagGT_UGT)
+ return true
+ }
+ // match: (CMPBconst (ANDLconst _ [m]) [n])
+ // cond: 0 <= int8(m) && int8(m) < n
+ // result: (FlagLT_ULT)
+ for {
+ n := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64ANDLconst {
+ break
+ }
+ m := auxIntToInt32(v_0.AuxInt)
+ if !(0 <= int8(m) && int8(m) < n) {
+ break
+ }
+ v.reset(OpAMD64FlagLT_ULT)
+ return true
+ }
+ // match: (CMPBconst a:(ANDL x y) [0])
+ // cond: a.Uses == 1
+ // result: (TESTB x y)
+ for {
+ if auxIntToInt8(v.AuxInt) != 0 {
+ break
+ }
+ a := v_0
+ if a.Op != OpAMD64ANDL {
+ break
+ }
+ y := a.Args[1]
+ x := a.Args[0]
+ if !(a.Uses == 1) {
+ break
+ }
+ v.reset(OpAMD64TESTB)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (CMPBconst a:(ANDLconst [c] x) [0])
+ // cond: a.Uses == 1
+ // result: (TESTBconst [int8(c)] x)
+ for {
+ if auxIntToInt8(v.AuxInt) != 0 {
+ break
+ }
+ a := v_0
+ if a.Op != OpAMD64ANDLconst {
+ break
+ }
+ c := auxIntToInt32(a.AuxInt)
+ x := a.Args[0]
+ if !(a.Uses == 1) {
+ break
+ }
+ v.reset(OpAMD64TESTBconst)
+ v.AuxInt = int8ToAuxInt(int8(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPBconst x [0])
+ // result: (TESTB x x)
+ for {
+ if auxIntToInt8(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64TESTB)
+ v.AddArg2(x, x)
+ return true
+ }
+ // match: (CMPBconst l:(MOVBload {sym} [off] ptr mem) [c])
+ // cond: l.Uses == 1 && clobber(l)
+ // result: @l.Block (CMPBconstload {sym} [makeValAndOff(int32(c),off)] ptr mem)
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ l := v_0
+ if l.Op != OpAMD64MOVBload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(l.Uses == 1 && clobber(l)) {
+ break
+ }
+ b = l.Block
+ v0 := b.NewValue0(l.Pos, OpAMD64CMPBconstload, types.TypeFlags)
+ v.copyOf(v0)
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMPBconstload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMPBconstload [valoff1] {sym} (ADDQconst [off2] base) mem)
+ // cond: ValAndOff(valoff1).canAdd32(off2)
+ // result: (CMPBconstload [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(ValAndOff(valoff1).canAdd32(off2)) {
+ break
+ }
+ v.reset(OpAMD64CMPBconstload)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (CMPBconstload [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
+ // result: (CMPBconstload [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64CMPBconstload)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMPBload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMPBload [off1] {sym} (ADDQconst [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (CMPBload [off1+off2] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64CMPBload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (CMPBload [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (CMPBload [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64CMPBload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (CMPBload {sym} [off] ptr (MOVLconst [c]) mem)
+ // result: (CMPBconstload {sym} [makeValAndOff(int32(int8(c)),off)] ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ mem := v_2
+ v.reset(OpAMD64CMPBconstload)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMPL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMPL x (MOVLconst [c]))
+ // result: (CMPLconst x [c])
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpAMD64CMPLconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPL (MOVLconst [c]) x)
+ // result: (InvertFlags (CMPLconst x [c]))
+ for {
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpAMD64InvertFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPL x y)
+ // cond: canonLessThan(x,y)
+ // result: (InvertFlags (CMPL y x))
+ for {
+ x := v_0
+ y := v_1
+ if !(canonLessThan(x, y)) {
+ break
+ }
+ v.reset(OpAMD64InvertFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPL l:(MOVLload {sym} [off] ptr mem) x)
+ // cond: canMergeLoad(v, l) && clobber(l)
+ // result: (CMPLload {sym} [off] ptr x mem)
+ for {
+ l := v_0
+ if l.Op != OpAMD64MOVLload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ x := v_1
+ if !(canMergeLoad(v, l) && clobber(l)) {
+ break
+ }
+ v.reset(OpAMD64CMPLload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (CMPL x l:(MOVLload {sym} [off] ptr mem))
+ // cond: canMergeLoad(v, l) && clobber(l)
+ // result: (InvertFlags (CMPLload {sym} [off] ptr x mem))
+ for {
+ x := v_0
+ l := v_1
+ if l.Op != OpAMD64MOVLload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoad(v, l) && clobber(l)) {
+ break
+ }
+ v.reset(OpAMD64InvertFlags)
+ v0 := b.NewValue0(l.Pos, OpAMD64CMPLload, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg3(ptr, x, mem)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMPLconst(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMPLconst (MOVLconst [x]) [y])
+ // cond: x==y
+ // result: (FlagEQ)
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(x == y) {
+ break
+ }
+ v.reset(OpAMD64FlagEQ)
+ return true
+ }
+ // match: (CMPLconst (MOVLconst [x]) [y])
+ // cond: x<y && uint32(x)<uint32(y)
+ // result: (FlagLT_ULT)
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(x < y && uint32(x) < uint32(y)) {
+ break
+ }
+ v.reset(OpAMD64FlagLT_ULT)
+ return true
+ }
+ // match: (CMPLconst (MOVLconst [x]) [y])
+ // cond: x<y && uint32(x)>uint32(y)
+ // result: (FlagLT_UGT)
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(x < y && uint32(x) > uint32(y)) {
+ break
+ }
+ v.reset(OpAMD64FlagLT_UGT)
+ return true
+ }
+ // match: (CMPLconst (MOVLconst [x]) [y])
+ // cond: x>y && uint32(x)<uint32(y)
+ // result: (FlagGT_ULT)
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(x > y && uint32(x) < uint32(y)) {
+ break
+ }
+ v.reset(OpAMD64FlagGT_ULT)
+ return true
+ }
+ // match: (CMPLconst (MOVLconst [x]) [y])
+ // cond: x>y && uint32(x)>uint32(y)
+ // result: (FlagGT_UGT)
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(x > y && uint32(x) > uint32(y)) {
+ break
+ }
+ v.reset(OpAMD64FlagGT_UGT)
+ return true
+ }
+ // match: (CMPLconst (SHRLconst _ [c]) [n])
+ // cond: 0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n)
+ // result: (FlagLT_ULT)
+ for {
+ n := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64SHRLconst {
+ break
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ if !(0 <= n && 0 < c && c <= 32 && (1<<uint64(32-c)) <= uint64(n)) {
+ break
+ }
+ v.reset(OpAMD64FlagLT_ULT)
+ return true
+ }
+ // match: (CMPLconst (ANDLconst _ [m]) [n])
+ // cond: 0 <= m && m < n
+ // result: (FlagLT_ULT)
+ for {
+ n := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64ANDLconst {
+ break
+ }
+ m := auxIntToInt32(v_0.AuxInt)
+ if !(0 <= m && m < n) {
+ break
+ }
+ v.reset(OpAMD64FlagLT_ULT)
+ return true
+ }
+ // match: (CMPLconst a:(ANDL x y) [0])
+ // cond: a.Uses == 1
+ // result: (TESTL x y)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ a := v_0
+ if a.Op != OpAMD64ANDL {
+ break
+ }
+ y := a.Args[1]
+ x := a.Args[0]
+ if !(a.Uses == 1) {
+ break
+ }
+ v.reset(OpAMD64TESTL)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (CMPLconst a:(ANDLconst [c] x) [0])
+ // cond: a.Uses == 1
+ // result: (TESTLconst [c] x)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ a := v_0
+ if a.Op != OpAMD64ANDLconst {
+ break
+ }
+ c := auxIntToInt32(a.AuxInt)
+ x := a.Args[0]
+ if !(a.Uses == 1) {
+ break
+ }
+ v.reset(OpAMD64TESTLconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPLconst x [0])
+ // result: (TESTL x x)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64TESTL)
+ v.AddArg2(x, x)
+ return true
+ }
+ // match: (CMPLconst l:(MOVLload {sym} [off] ptr mem) [c])
+ // cond: l.Uses == 1 && clobber(l)
+ // result: @l.Block (CMPLconstload {sym} [makeValAndOff(c,off)] ptr mem)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ l := v_0
+ if l.Op != OpAMD64MOVLload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(l.Uses == 1 && clobber(l)) {
+ break
+ }
+ b = l.Block
+ v0 := b.NewValue0(l.Pos, OpAMD64CMPLconstload, types.TypeFlags)
+ v.copyOf(v0)
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff(c, off))
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMPLconstload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMPLconstload [valoff1] {sym} (ADDQconst [off2] base) mem)
+ // cond: ValAndOff(valoff1).canAdd32(off2)
+ // result: (CMPLconstload [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(ValAndOff(valoff1).canAdd32(off2)) {
+ break
+ }
+ v.reset(OpAMD64CMPLconstload)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (CMPLconstload [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
+ // result: (CMPLconstload [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64CMPLconstload)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMPLload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMPLload [off1] {sym} (ADDQconst [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (CMPLload [off1+off2] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64CMPLload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (CMPLload [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (CMPLload [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64CMPLload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (CMPLload {sym} [off] ptr (MOVLconst [c]) mem)
+ // result: (CMPLconstload {sym} [makeValAndOff(c,off)] ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ mem := v_2
+ v.reset(OpAMD64CMPLconstload)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(c, off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMPQ(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMPQ x (MOVQconst [c]))
+ // cond: is32Bit(c)
+ // result: (CMPQconst x [int32(c)])
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpAMD64CMPQconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPQ (MOVQconst [c]) x)
+ // cond: is32Bit(c)
+ // result: (InvertFlags (CMPQconst x [int32(c)]))
+ for {
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpAMD64InvertFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(int32(c))
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPQ x y)
+ // cond: canonLessThan(x,y)
+ // result: (InvertFlags (CMPQ y x))
+ for {
+ x := v_0
+ y := v_1
+ if !(canonLessThan(x, y)) {
+ break
+ }
+ v.reset(OpAMD64InvertFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPQ (MOVQconst [x]) (MOVQconst [y]))
+ // cond: x==y
+ // result: (FlagEQ)
+ for {
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ y := auxIntToInt64(v_1.AuxInt)
+ if !(x == y) {
+ break
+ }
+ v.reset(OpAMD64FlagEQ)
+ return true
+ }
+ // match: (CMPQ (MOVQconst [x]) (MOVQconst [y]))
+ // cond: x<y && uint64(x)<uint64(y)
+ // result: (FlagLT_ULT)
+ for {
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ y := auxIntToInt64(v_1.AuxInt)
+ if !(x < y && uint64(x) < uint64(y)) {
+ break
+ }
+ v.reset(OpAMD64FlagLT_ULT)
+ return true
+ }
+ // match: (CMPQ (MOVQconst [x]) (MOVQconst [y]))
+ // cond: x<y && uint64(x)>uint64(y)
+ // result: (FlagLT_UGT)
+ for {
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ y := auxIntToInt64(v_1.AuxInt)
+ if !(x < y && uint64(x) > uint64(y)) {
+ break
+ }
+ v.reset(OpAMD64FlagLT_UGT)
+ return true
+ }
+ // match: (CMPQ (MOVQconst [x]) (MOVQconst [y]))
+ // cond: x>y && uint64(x)<uint64(y)
+ // result: (FlagGT_ULT)
+ for {
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ y := auxIntToInt64(v_1.AuxInt)
+ if !(x > y && uint64(x) < uint64(y)) {
+ break
+ }
+ v.reset(OpAMD64FlagGT_ULT)
+ return true
+ }
+ // match: (CMPQ (MOVQconst [x]) (MOVQconst [y]))
+ // cond: x>y && uint64(x)>uint64(y)
+ // result: (FlagGT_UGT)
+ for {
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ y := auxIntToInt64(v_1.AuxInt)
+ if !(x > y && uint64(x) > uint64(y)) {
+ break
+ }
+ v.reset(OpAMD64FlagGT_UGT)
+ return true
+ }
+ // match: (CMPQ l:(MOVQload {sym} [off] ptr mem) x)
+ // cond: canMergeLoad(v, l) && clobber(l)
+ // result: (CMPQload {sym} [off] ptr x mem)
+ for {
+ l := v_0
+ if l.Op != OpAMD64MOVQload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ x := v_1
+ if !(canMergeLoad(v, l) && clobber(l)) {
+ break
+ }
+ v.reset(OpAMD64CMPQload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (CMPQ x l:(MOVQload {sym} [off] ptr mem))
+ // cond: canMergeLoad(v, l) && clobber(l)
+ // result: (InvertFlags (CMPQload {sym} [off] ptr x mem))
+ for {
+ x := v_0
+ l := v_1
+ if l.Op != OpAMD64MOVQload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoad(v, l) && clobber(l)) {
+ break
+ }
+ v.reset(OpAMD64InvertFlags)
+ v0 := b.NewValue0(l.Pos, OpAMD64CMPQload, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg3(ptr, x, mem)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMPQconst(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMPQconst (NEGQ (ADDQconst [-16] (ANDQconst [15] _))) [32])
+ // result: (FlagLT_ULT)
+ for {
+ if auxIntToInt32(v.AuxInt) != 32 || v_0.Op != OpAMD64NEGQ {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_0_0.AuxInt) != -16 {
+ break
+ }
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_0_0_0.AuxInt) != 15 {
+ break
+ }
+ v.reset(OpAMD64FlagLT_ULT)
+ return true
+ }
+ // match: (CMPQconst (NEGQ (ADDQconst [ -8] (ANDQconst [7] _))) [32])
+ // result: (FlagLT_ULT)
+ for {
+ if auxIntToInt32(v.AuxInt) != 32 || v_0.Op != OpAMD64NEGQ {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_0_0.AuxInt) != -8 {
+ break
+ }
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_0_0_0.AuxInt) != 7 {
+ break
+ }
+ v.reset(OpAMD64FlagLT_ULT)
+ return true
+ }
+ // match: (CMPQconst (MOVQconst [x]) [y])
+ // cond: x==int64(y)
+ // result: (FlagEQ)
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if !(x == int64(y)) {
+ break
+ }
+ v.reset(OpAMD64FlagEQ)
+ return true
+ }
+ // match: (CMPQconst (MOVQconst [x]) [y])
+ // cond: x<int64(y) && uint64(x)<uint64(int64(y))
+ // result: (FlagLT_ULT)
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if !(x < int64(y) && uint64(x) < uint64(int64(y))) {
+ break
+ }
+ v.reset(OpAMD64FlagLT_ULT)
+ return true
+ }
+ // match: (CMPQconst (MOVQconst [x]) [y])
+ // cond: x<int64(y) && uint64(x)>uint64(int64(y))
+ // result: (FlagLT_UGT)
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if !(x < int64(y) && uint64(x) > uint64(int64(y))) {
+ break
+ }
+ v.reset(OpAMD64FlagLT_UGT)
+ return true
+ }
+ // match: (CMPQconst (MOVQconst [x]) [y])
+ // cond: x>int64(y) && uint64(x)<uint64(int64(y))
+ // result: (FlagGT_ULT)
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if !(x > int64(y) && uint64(x) < uint64(int64(y))) {
+ break
+ }
+ v.reset(OpAMD64FlagGT_ULT)
+ return true
+ }
+ // match: (CMPQconst (MOVQconst [x]) [y])
+ // cond: x>int64(y) && uint64(x)>uint64(int64(y))
+ // result: (FlagGT_UGT)
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if !(x > int64(y) && uint64(x) > uint64(int64(y))) {
+ break
+ }
+ v.reset(OpAMD64FlagGT_UGT)
+ return true
+ }
+ // match: (CMPQconst (MOVBQZX _) [c])
+ // cond: 0xFF < c
+ // result: (FlagLT_ULT)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64MOVBQZX || !(0xFF < c) {
+ break
+ }
+ v.reset(OpAMD64FlagLT_ULT)
+ return true
+ }
+ // match: (CMPQconst (MOVWQZX _) [c])
+ // cond: 0xFFFF < c
+ // result: (FlagLT_ULT)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64MOVWQZX || !(0xFFFF < c) {
+ break
+ }
+ v.reset(OpAMD64FlagLT_ULT)
+ return true
+ }
+ // match: (CMPQconst (SHRQconst _ [c]) [n])
+ // cond: 0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n)
+ // result: (FlagLT_ULT)
+ for {
+ n := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64SHRQconst {
+ break
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ if !(0 <= n && 0 < c && c <= 64 && (1<<uint64(64-c)) <= uint64(n)) {
+ break
+ }
+ v.reset(OpAMD64FlagLT_ULT)
+ return true
+ }
+ // match: (CMPQconst (ANDQconst _ [m]) [n])
+ // cond: 0 <= m && m < n
+ // result: (FlagLT_ULT)
+ for {
+ n := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64ANDQconst {
+ break
+ }
+ m := auxIntToInt32(v_0.AuxInt)
+ if !(0 <= m && m < n) {
+ break
+ }
+ v.reset(OpAMD64FlagLT_ULT)
+ return true
+ }
+ // match: (CMPQconst (ANDLconst _ [m]) [n])
+ // cond: 0 <= m && m < n
+ // result: (FlagLT_ULT)
+ for {
+ n := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64ANDLconst {
+ break
+ }
+ m := auxIntToInt32(v_0.AuxInt)
+ if !(0 <= m && m < n) {
+ break
+ }
+ v.reset(OpAMD64FlagLT_ULT)
+ return true
+ }
+ // match: (CMPQconst a:(ANDQ x y) [0])
+ // cond: a.Uses == 1
+ // result: (TESTQ x y)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ a := v_0
+ if a.Op != OpAMD64ANDQ {
+ break
+ }
+ y := a.Args[1]
+ x := a.Args[0]
+ if !(a.Uses == 1) {
+ break
+ }
+ v.reset(OpAMD64TESTQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (CMPQconst a:(ANDQconst [c] x) [0])
+ // cond: a.Uses == 1
+ // result: (TESTQconst [c] x)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ a := v_0
+ if a.Op != OpAMD64ANDQconst {
+ break
+ }
+ c := auxIntToInt32(a.AuxInt)
+ x := a.Args[0]
+ if !(a.Uses == 1) {
+ break
+ }
+ v.reset(OpAMD64TESTQconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPQconst x [0])
+ // result: (TESTQ x x)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64TESTQ)
+ v.AddArg2(x, x)
+ return true
+ }
+ // match: (CMPQconst l:(MOVQload {sym} [off] ptr mem) [c])
+ // cond: l.Uses == 1 && clobber(l)
+ // result: @l.Block (CMPQconstload {sym} [makeValAndOff(c,off)] ptr mem)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ l := v_0
+ if l.Op != OpAMD64MOVQload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(l.Uses == 1 && clobber(l)) {
+ break
+ }
+ b = l.Block
+ v0 := b.NewValue0(l.Pos, OpAMD64CMPQconstload, types.TypeFlags)
+ v.copyOf(v0)
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff(c, off))
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMPQconstload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMPQconstload [valoff1] {sym} (ADDQconst [off2] base) mem)
+ // cond: ValAndOff(valoff1).canAdd32(off2)
+ // result: (CMPQconstload [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(ValAndOff(valoff1).canAdd32(off2)) {
+ break
+ }
+ v.reset(OpAMD64CMPQconstload)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (CMPQconstload [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
+ // result: (CMPQconstload [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64CMPQconstload)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMPQload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMPQload [off1] {sym} (ADDQconst [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (CMPQload [off1+off2] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64CMPQload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (CMPQload [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (CMPQload [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64CMPQload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (CMPQload {sym} [off] ptr (MOVQconst [c]) mem)
+ // cond: validVal(c)
+ // result: (CMPQconstload {sym} [makeValAndOff(int32(c),off)] ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(validVal(c)) {
+ break
+ }
+ v.reset(OpAMD64CMPQconstload)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMPW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMPW x (MOVLconst [c]))
+ // result: (CMPWconst x [int16(c)])
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpAMD64CMPWconst)
+ v.AuxInt = int16ToAuxInt(int16(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPW (MOVLconst [c]) x)
+ // result: (InvertFlags (CMPWconst x [int16(c)]))
+ for {
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpAMD64InvertFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
+ v0.AuxInt = int16ToAuxInt(int16(c))
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPW x y)
+ // cond: canonLessThan(x,y)
+ // result: (InvertFlags (CMPW y x))
+ for {
+ x := v_0
+ y := v_1
+ if !(canonLessThan(x, y)) {
+ break
+ }
+ v.reset(OpAMD64InvertFlags)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPW l:(MOVWload {sym} [off] ptr mem) x)
+ // cond: canMergeLoad(v, l) && clobber(l)
+ // result: (CMPWload {sym} [off] ptr x mem)
+ for {
+ l := v_0
+ if l.Op != OpAMD64MOVWload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ x := v_1
+ if !(canMergeLoad(v, l) && clobber(l)) {
+ break
+ }
+ v.reset(OpAMD64CMPWload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (CMPW x l:(MOVWload {sym} [off] ptr mem))
+ // cond: canMergeLoad(v, l) && clobber(l)
+ // result: (InvertFlags (CMPWload {sym} [off] ptr x mem))
+ for {
+ x := v_0
+ l := v_1
+ if l.Op != OpAMD64MOVWload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoad(v, l) && clobber(l)) {
+ break
+ }
+ v.reset(OpAMD64InvertFlags)
+ v0 := b.NewValue0(l.Pos, OpAMD64CMPWload, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg3(ptr, x, mem)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMPWconst(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMPWconst (MOVLconst [x]) [y])
+ // cond: int16(x)==y
+ // result: (FlagEQ)
+ for {
+ y := auxIntToInt16(v.AuxInt)
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(int16(x) == y) {
+ break
+ }
+ v.reset(OpAMD64FlagEQ)
+ return true
+ }
+ // match: (CMPWconst (MOVLconst [x]) [y])
+ // cond: int16(x)<y && uint16(x)<uint16(y)
+ // result: (FlagLT_ULT)
+ for {
+ y := auxIntToInt16(v.AuxInt)
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(int16(x) < y && uint16(x) < uint16(y)) {
+ break
+ }
+ v.reset(OpAMD64FlagLT_ULT)
+ return true
+ }
+ // match: (CMPWconst (MOVLconst [x]) [y])
+ // cond: int16(x)<y && uint16(x)>uint16(y)
+ // result: (FlagLT_UGT)
+ for {
+ y := auxIntToInt16(v.AuxInt)
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(int16(x) < y && uint16(x) > uint16(y)) {
+ break
+ }
+ v.reset(OpAMD64FlagLT_UGT)
+ return true
+ }
+ // match: (CMPWconst (MOVLconst [x]) [y])
+ // cond: int16(x)>y && uint16(x)<uint16(y)
+ // result: (FlagGT_ULT)
+ for {
+ y := auxIntToInt16(v.AuxInt)
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(int16(x) > y && uint16(x) < uint16(y)) {
+ break
+ }
+ v.reset(OpAMD64FlagGT_ULT)
+ return true
+ }
+ // match: (CMPWconst (MOVLconst [x]) [y])
+ // cond: int16(x)>y && uint16(x)>uint16(y)
+ // result: (FlagGT_UGT)
+ for {
+ y := auxIntToInt16(v.AuxInt)
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(int16(x) > y && uint16(x) > uint16(y)) {
+ break
+ }
+ v.reset(OpAMD64FlagGT_UGT)
+ return true
+ }
+ // match: (CMPWconst (ANDLconst _ [m]) [n])
+ // cond: 0 <= int16(m) && int16(m) < n
+ // result: (FlagLT_ULT)
+ for {
+ n := auxIntToInt16(v.AuxInt)
+ if v_0.Op != OpAMD64ANDLconst {
+ break
+ }
+ m := auxIntToInt32(v_0.AuxInt)
+ if !(0 <= int16(m) && int16(m) < n) {
+ break
+ }
+ v.reset(OpAMD64FlagLT_ULT)
+ return true
+ }
+ // match: (CMPWconst a:(ANDL x y) [0])
+ // cond: a.Uses == 1
+ // result: (TESTW x y)
+ for {
+ if auxIntToInt16(v.AuxInt) != 0 {
+ break
+ }
+ a := v_0
+ if a.Op != OpAMD64ANDL {
+ break
+ }
+ y := a.Args[1]
+ x := a.Args[0]
+ if !(a.Uses == 1) {
+ break
+ }
+ v.reset(OpAMD64TESTW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (CMPWconst a:(ANDLconst [c] x) [0])
+ // cond: a.Uses == 1
+ // result: (TESTWconst [int16(c)] x)
+ for {
+ if auxIntToInt16(v.AuxInt) != 0 {
+ break
+ }
+ a := v_0
+ if a.Op != OpAMD64ANDLconst {
+ break
+ }
+ c := auxIntToInt32(a.AuxInt)
+ x := a.Args[0]
+ if !(a.Uses == 1) {
+ break
+ }
+ v.reset(OpAMD64TESTWconst)
+ v.AuxInt = int16ToAuxInt(int16(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPWconst x [0])
+ // result: (TESTW x x)
+ for {
+ if auxIntToInt16(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64TESTW)
+ v.AddArg2(x, x)
+ return true
+ }
+ // match: (CMPWconst l:(MOVWload {sym} [off] ptr mem) [c])
+ // cond: l.Uses == 1 && clobber(l)
+ // result: @l.Block (CMPWconstload {sym} [makeValAndOff(int32(c),off)] ptr mem)
+ for {
+ c := auxIntToInt16(v.AuxInt)
+ l := v_0
+ if l.Op != OpAMD64MOVWload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(l.Uses == 1 && clobber(l)) {
+ break
+ }
+ b = l.Block
+ v0 := b.NewValue0(l.Pos, OpAMD64CMPWconstload, types.TypeFlags)
+ v.copyOf(v0)
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMPWconstload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMPWconstload [valoff1] {sym} (ADDQconst [off2] base) mem)
+ // cond: ValAndOff(valoff1).canAdd32(off2)
+ // result: (CMPWconstload [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(ValAndOff(valoff1).canAdd32(off2)) {
+ break
+ }
+ v.reset(OpAMD64CMPWconstload)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (CMPWconstload [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
+ // result: (CMPWconstload [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64CMPWconstload)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMPWload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMPWload [off1] {sym} (ADDQconst [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (CMPWload [off1+off2] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64CMPWload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (CMPWload [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (CMPWload [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64CMPWload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (CMPWload {sym} [off] ptr (MOVLconst [c]) mem)
+ // result: (CMPWconstload {sym} [makeValAndOff(int32(int16(c)),off)] ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ mem := v_2
+ v.reset(OpAMD64CMPWconstload)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int16(c)), off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMPXCHGLlock(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMPXCHGLlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (CMPXCHGLlock [off1+off2] {sym} ptr old new_ mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ old := v_1
+ new_ := v_2
+ mem := v_3
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64CMPXCHGLlock)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg4(ptr, old, new_, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64CMPXCHGQlock(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMPXCHGQlock [off1] {sym} (ADDQconst [off2] ptr) old new_ mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (CMPXCHGQlock [off1+off2] {sym} ptr old new_ mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ old := v_1
+ new_ := v_2
+ mem := v_3
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64CMPXCHGQlock)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg4(ptr, old, new_, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64DIVSD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (DIVSD x l:(MOVSDload [off] {sym} ptr mem))
+ // cond: canMergeLoadClobber(v, l, x) && clobber(l)
+ // result: (DIVSDload x [off] {sym} ptr mem)
+ for {
+ x := v_0
+ l := v_1
+ if l.Op != OpAMD64MOVSDload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
+ break
+ }
+ v.reset(OpAMD64DIVSDload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64DIVSDload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (DIVSDload [off1] {sym} val (ADDQconst [off2] base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (DIVSDload [off1+off2] {sym} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64DIVSDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (DIVSDload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (DIVSDload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64DIVSDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64DIVSS(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (DIVSS x l:(MOVSSload [off] {sym} ptr mem))
+ // cond: canMergeLoadClobber(v, l, x) && clobber(l)
+ // result: (DIVSSload x [off] {sym} ptr mem)
+ for {
+ x := v_0
+ l := v_1
+ if l.Op != OpAMD64MOVSSload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
+ break
+ }
+ v.reset(OpAMD64DIVSSload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64DIVSSload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (DIVSSload [off1] {sym} val (ADDQconst [off2] base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (DIVSSload [off1+off2] {sym} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64DIVSSload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (DIVSSload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (DIVSSload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64DIVSSload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64HMULL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (HMULL x y)
+ // cond: !x.rematerializeable() && y.rematerializeable()
+ // result: (HMULL y x)
+ for {
+ x := v_0
+ y := v_1
+ if !(!x.rematerializeable() && y.rematerializeable()) {
+ break
+ }
+ v.reset(OpAMD64HMULL)
+ v.AddArg2(y, x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64HMULLU(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (HMULLU x y)
+ // cond: !x.rematerializeable() && y.rematerializeable()
+ // result: (HMULLU y x)
+ for {
+ x := v_0
+ y := v_1
+ if !(!x.rematerializeable() && y.rematerializeable()) {
+ break
+ }
+ v.reset(OpAMD64HMULLU)
+ v.AddArg2(y, x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64HMULQ(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (HMULQ x y)
+ // cond: !x.rematerializeable() && y.rematerializeable()
+ // result: (HMULQ y x)
+ for {
+ x := v_0
+ y := v_1
+ if !(!x.rematerializeable() && y.rematerializeable()) {
+ break
+ }
+ v.reset(OpAMD64HMULQ)
+ v.AddArg2(y, x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64HMULQU(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (HMULQU x y)
+ // cond: !x.rematerializeable() && y.rematerializeable()
+ // result: (HMULQU y x)
+ for {
+ x := v_0
+ y := v_1
+ if !(!x.rematerializeable() && y.rematerializeable()) {
+ break
+ }
+ v.reset(OpAMD64HMULQU)
+ v.AddArg2(y, x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64LEAL(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (LEAL [c] {s} (ADDLconst [d] x))
+ // cond: is32Bit(int64(c)+int64(d))
+ // result: (LEAL [c+d] {s} x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(is32Bit(int64(c) + int64(d))) {
+ break
+ }
+ v.reset(OpAMD64LEAL)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg(x)
+ return true
+ }
+ // match: (LEAL [c] {s} (ADDL x y))
+ // cond: x.Op != OpSB && y.Op != OpSB
+ // result: (LEAL1 [c] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDL {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ y := v_0_1
+ if !(x.Op != OpSB && y.Op != OpSB) {
+ continue
+ }
+ v.reset(OpAMD64LEAL1)
+ v.AuxInt = int32ToAuxInt(c)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64LEAL1(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LEAL1 [c] {s} (ADDLconst [d] x) y)
+ // cond: is32Bit(int64(c)+int64(d)) && x.Op != OpSB
+ // result: (LEAL1 [c+d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64ADDLconst {
+ continue
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ y := v_1
+ if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
+ continue
+ }
+ v.reset(OpAMD64LEAL1)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (LEAL1 [c] {s} x (SHLLconst [1] y))
+ // result: (LEAL2 [c] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 1 {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64LEAL2)
+ v.AuxInt = int32ToAuxInt(c)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (LEAL1 [c] {s} x (SHLLconst [2] y))
+ // result: (LEAL4 [c] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 2 {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64LEAL4)
+ v.AuxInt = int32ToAuxInt(c)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (LEAL1 [c] {s} x (SHLLconst [3] y))
+ // result: (LEAL8 [c] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 3 {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64LEAL8)
+ v.AuxInt = int32ToAuxInt(c)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64LEAL2(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LEAL2 [c] {s} (ADDLconst [d] x) y)
+ // cond: is32Bit(int64(c)+int64(d)) && x.Op != OpSB
+ // result: (LEAL2 [c+d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ y := v_1
+ if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
+ break
+ }
+ v.reset(OpAMD64LEAL2)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAL2 [c] {s} x (ADDLconst [d] y))
+ // cond: is32Bit(int64(c)+2*int64(d)) && y.Op != OpSB
+ // result: (LEAL2 [c+2*d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpAMD64ADDLconst {
+ break
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(is32Bit(int64(c)+2*int64(d)) && y.Op != OpSB) {
+ break
+ }
+ v.reset(OpAMD64LEAL2)
+ v.AuxInt = int32ToAuxInt(c + 2*d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAL2 [c] {s} x (SHLLconst [1] y))
+ // result: (LEAL4 [c] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 1 {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64LEAL4)
+ v.AuxInt = int32ToAuxInt(c)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAL2 [c] {s} x (SHLLconst [2] y))
+ // result: (LEAL8 [c] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 2 {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64LEAL8)
+ v.AuxInt = int32ToAuxInt(c)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64LEAL4(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LEAL4 [c] {s} (ADDLconst [d] x) y)
+ // cond: is32Bit(int64(c)+int64(d)) && x.Op != OpSB
+ // result: (LEAL4 [c+d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ y := v_1
+ if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
+ break
+ }
+ v.reset(OpAMD64LEAL4)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAL4 [c] {s} x (ADDLconst [d] y))
+ // cond: is32Bit(int64(c)+4*int64(d)) && y.Op != OpSB
+ // result: (LEAL4 [c+4*d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpAMD64ADDLconst {
+ break
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(is32Bit(int64(c)+4*int64(d)) && y.Op != OpSB) {
+ break
+ }
+ v.reset(OpAMD64LEAL4)
+ v.AuxInt = int32ToAuxInt(c + 4*d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAL4 [c] {s} x (SHLLconst [1] y))
+ // result: (LEAL8 [c] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpAMD64SHLLconst || auxIntToInt8(v_1.AuxInt) != 1 {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64LEAL8)
+ v.AuxInt = int32ToAuxInt(c)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64LEAL8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LEAL8 [c] {s} (ADDLconst [d] x) y)
+ // cond: is32Bit(int64(c)+int64(d)) && x.Op != OpSB
+ // result: (LEAL8 [c+d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ y := v_1
+ if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
+ break
+ }
+ v.reset(OpAMD64LEAL8)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAL8 [c] {s} x (ADDLconst [d] y))
+ // cond: is32Bit(int64(c)+8*int64(d)) && y.Op != OpSB
+ // result: (LEAL8 [c+8*d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpAMD64ADDLconst {
+ break
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(is32Bit(int64(c)+8*int64(d)) && y.Op != OpSB) {
+ break
+ }
+ v.reset(OpAMD64LEAL8)
+ v.AuxInt = int32ToAuxInt(c + 8*d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64LEAQ(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (LEAQ [c] {s} (ADDQconst [d] x))
+ // cond: is32Bit(int64(c)+int64(d))
+ // result: (LEAQ [c+d] {s} x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(is32Bit(int64(c) + int64(d))) {
+ break
+ }
+ v.reset(OpAMD64LEAQ)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg(x)
+ return true
+ }
+ // match: (LEAQ [c] {s} (ADDQ x y))
+ // cond: x.Op != OpSB && y.Op != OpSB
+ // result: (LEAQ1 [c] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQ {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ y := v_0_1
+ if !(x.Op != OpSB && y.Op != OpSB) {
+ continue
+ }
+ v.reset(OpAMD64LEAQ1)
+ v.AuxInt = int32ToAuxInt(c)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (LEAQ [off1] {sym1} (LEAQ [off2] {sym2} x))
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (LEAQ [off1+off2] {mergeSym(sym1,sym2)} x)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ x := v_0.Args[0]
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64LEAQ)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg(x)
+ return true
+ }
+ // match: (LEAQ [off1] {sym1} (LEAQ1 [off2] {sym2} x y))
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ1 {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64LEAQ1)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAQ [off1] {sym1} (LEAQ2 [off2] {sym2} x y))
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ2 {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64LEAQ2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAQ [off1] {sym1} (LEAQ4 [off2] {sym2} x y))
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ4 {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64LEAQ4)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAQ [off1] {sym1} (LEAQ8 [off2] {sym2} x y))
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ8 {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64LEAQ8)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64LEAQ1(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LEAQ1 [c] {s} (ADDQconst [d] x) y)
+ // cond: is32Bit(int64(c)+int64(d)) && x.Op != OpSB
+ // result: (LEAQ1 [c+d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64ADDQconst {
+ continue
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ y := v_1
+ if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
+ continue
+ }
+ v.reset(OpAMD64LEAQ1)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (LEAQ1 [c] {s} x (SHLQconst [1] y))
+ // result: (LEAQ2 [c] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 1 {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64LEAQ2)
+ v.AuxInt = int32ToAuxInt(c)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (LEAQ1 [c] {s} x (SHLQconst [2] y))
+ // result: (LEAQ4 [c] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 2 {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64LEAQ4)
+ v.AuxInt = int32ToAuxInt(c)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (LEAQ1 [c] {s} x (SHLQconst [3] y))
+ // result: (LEAQ8 [c] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 3 {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64LEAQ8)
+ v.AuxInt = int32ToAuxInt(c)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (LEAQ1 [off1] {sym1} (LEAQ [off2] {sym2} x) y)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB
+ // result: (LEAQ1 [off1+off2] {mergeSym(sym1,sym2)} x y)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64LEAQ {
+ continue
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ x := v_0.Args[0]
+ y := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
+ continue
+ }
+ v.reset(OpAMD64LEAQ1)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (LEAQ1 [off1] {sym1} x (LEAQ1 [off2] {sym2} y y))
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (LEAQ2 [off1+off2] {mergeSym(sym1, sym2)} x y)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64LEAQ1 {
+ continue
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ y := v_1.Args[1]
+ if y != v_1.Args[0] || !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ continue
+ }
+ v.reset(OpAMD64LEAQ2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (LEAQ1 [off1] {sym1} x (LEAQ1 [off2] {sym2} x y))
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (LEAQ2 [off1+off2] {mergeSym(sym1, sym2)} y x)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64LEAQ1 {
+ continue
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if x != v_1_0 {
+ continue
+ }
+ y := v_1_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ continue
+ }
+ v.reset(OpAMD64LEAQ2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(y, x)
+ return true
+ }
+ }
+ break
+ }
+ // match: (LEAQ1 [0] x y)
+ // cond: v.Aux == nil
+ // result: (ADDQ x y)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ y := v_1
+ if !(v.Aux == nil) {
+ break
+ }
+ v.reset(OpAMD64ADDQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64LEAQ2(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LEAQ2 [c] {s} (ADDQconst [d] x) y)
+ // cond: is32Bit(int64(c)+int64(d)) && x.Op != OpSB
+ // result: (LEAQ2 [c+d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ y := v_1
+ if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
+ break
+ }
+ v.reset(OpAMD64LEAQ2)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAQ2 [c] {s} x (ADDQconst [d] y))
+ // cond: is32Bit(int64(c)+2*int64(d)) && y.Op != OpSB
+ // result: (LEAQ2 [c+2*d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(is32Bit(int64(c)+2*int64(d)) && y.Op != OpSB) {
+ break
+ }
+ v.reset(OpAMD64LEAQ2)
+ v.AuxInt = int32ToAuxInt(c + 2*d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAQ2 [c] {s} x (SHLQconst [1] y))
+ // result: (LEAQ4 [c] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 1 {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64LEAQ4)
+ v.AuxInt = int32ToAuxInt(c)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAQ2 [c] {s} x (SHLQconst [2] y))
+ // result: (LEAQ8 [c] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 2 {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64LEAQ8)
+ v.AuxInt = int32ToAuxInt(c)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAQ2 [off1] {sym1} (LEAQ [off2] {sym2} x) y)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB
+ // result: (LEAQ2 [off1+off2] {mergeSym(sym1,sym2)} x y)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ x := v_0.Args[0]
+ y := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
+ break
+ }
+ v.reset(OpAMD64LEAQ2)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAQ2 [off1] {sym1} x (LEAQ1 [off2] {sym2} y y))
+ // cond: is32Bit(int64(off1)+2*int64(off2)) && sym2 == nil
+ // result: (LEAQ4 [off1+2*off2] {sym1} x y)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpAMD64LEAQ1 {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ y := v_1.Args[1]
+ if y != v_1.Args[0] || !(is32Bit(int64(off1)+2*int64(off2)) && sym2 == nil) {
+ break
+ }
+ v.reset(OpAMD64LEAQ4)
+ v.AuxInt = int32ToAuxInt(off1 + 2*off2)
+ v.Aux = symToAux(sym1)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAQ2 [off] {sym} x (MOVQconst [scale]))
+ // cond: is32Bit(int64(off)+int64(scale)*2)
+ // result: (LEAQ [off+int32(scale)*2] {sym} x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ scale := auxIntToInt64(v_1.AuxInt)
+ if !(is32Bit(int64(off) + int64(scale)*2)) {
+ break
+ }
+ v.reset(OpAMD64LEAQ)
+ v.AuxInt = int32ToAuxInt(off + int32(scale)*2)
+ v.Aux = symToAux(sym)
+ v.AddArg(x)
+ return true
+ }
+ // match: (LEAQ2 [off] {sym} x (MOVLconst [scale]))
+ // cond: is32Bit(int64(off)+int64(scale)*2)
+ // result: (LEAQ [off+int32(scale)*2] {sym} x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ scale := auxIntToInt32(v_1.AuxInt)
+ if !(is32Bit(int64(off) + int64(scale)*2)) {
+ break
+ }
+ v.reset(OpAMD64LEAQ)
+ v.AuxInt = int32ToAuxInt(off + int32(scale)*2)
+ v.Aux = symToAux(sym)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64LEAQ4(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LEAQ4 [c] {s} (ADDQconst [d] x) y)
+ // cond: is32Bit(int64(c)+int64(d)) && x.Op != OpSB
+ // result: (LEAQ4 [c+d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ y := v_1
+ if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
+ break
+ }
+ v.reset(OpAMD64LEAQ4)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAQ4 [c] {s} x (ADDQconst [d] y))
+ // cond: is32Bit(int64(c)+4*int64(d)) && y.Op != OpSB
+ // result: (LEAQ4 [c+4*d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(is32Bit(int64(c)+4*int64(d)) && y.Op != OpSB) {
+ break
+ }
+ v.reset(OpAMD64LEAQ4)
+ v.AuxInt = int32ToAuxInt(c + 4*d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAQ4 [c] {s} x (SHLQconst [1] y))
+ // result: (LEAQ8 [c] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpAMD64SHLQconst || auxIntToInt8(v_1.AuxInt) != 1 {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64LEAQ8)
+ v.AuxInt = int32ToAuxInt(c)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAQ4 [off1] {sym1} (LEAQ [off2] {sym2} x) y)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB
+ // result: (LEAQ4 [off1+off2] {mergeSym(sym1,sym2)} x y)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ x := v_0.Args[0]
+ y := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
+ break
+ }
+ v.reset(OpAMD64LEAQ4)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAQ4 [off1] {sym1} x (LEAQ1 [off2] {sym2} y y))
+ // cond: is32Bit(int64(off1)+4*int64(off2)) && sym2 == nil
+ // result: (LEAQ8 [off1+4*off2] {sym1} x y)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpAMD64LEAQ1 {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ y := v_1.Args[1]
+ if y != v_1.Args[0] || !(is32Bit(int64(off1)+4*int64(off2)) && sym2 == nil) {
+ break
+ }
+ v.reset(OpAMD64LEAQ8)
+ v.AuxInt = int32ToAuxInt(off1 + 4*off2)
+ v.Aux = symToAux(sym1)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAQ4 [off] {sym} x (MOVQconst [scale]))
+ // cond: is32Bit(int64(off)+int64(scale)*4)
+ // result: (LEAQ [off+int32(scale)*4] {sym} x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ scale := auxIntToInt64(v_1.AuxInt)
+ if !(is32Bit(int64(off) + int64(scale)*4)) {
+ break
+ }
+ v.reset(OpAMD64LEAQ)
+ v.AuxInt = int32ToAuxInt(off + int32(scale)*4)
+ v.Aux = symToAux(sym)
+ v.AddArg(x)
+ return true
+ }
+ // match: (LEAQ4 [off] {sym} x (MOVLconst [scale]))
+ // cond: is32Bit(int64(off)+int64(scale)*4)
+ // result: (LEAQ [off+int32(scale)*4] {sym} x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ scale := auxIntToInt32(v_1.AuxInt)
+ if !(is32Bit(int64(off) + int64(scale)*4)) {
+ break
+ }
+ v.reset(OpAMD64LEAQ)
+ v.AuxInt = int32ToAuxInt(off + int32(scale)*4)
+ v.Aux = symToAux(sym)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64LEAQ8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LEAQ8 [c] {s} (ADDQconst [d] x) y)
+ // cond: is32Bit(int64(c)+int64(d)) && x.Op != OpSB
+ // result: (LEAQ8 [c+d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ y := v_1
+ if !(is32Bit(int64(c)+int64(d)) && x.Op != OpSB) {
+ break
+ }
+ v.reset(OpAMD64LEAQ8)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAQ8 [c] {s} x (ADDQconst [d] y))
+ // cond: is32Bit(int64(c)+8*int64(d)) && y.Op != OpSB
+ // result: (LEAQ8 [c+8*d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(is32Bit(int64(c)+8*int64(d)) && y.Op != OpSB) {
+ break
+ }
+ v.reset(OpAMD64LEAQ8)
+ v.AuxInt = int32ToAuxInt(c + 8*d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAQ8 [off1] {sym1} (LEAQ [off2] {sym2} x) y)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB
+ // result: (LEAQ8 [off1+off2] {mergeSym(sym1,sym2)} x y)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ x := v_0.Args[0]
+ y := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
+ break
+ }
+ v.reset(OpAMD64LEAQ8)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (LEAQ8 [off] {sym} x (MOVQconst [scale]))
+ // cond: is32Bit(int64(off)+int64(scale)*8)
+ // result: (LEAQ [off+int32(scale)*8] {sym} x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ scale := auxIntToInt64(v_1.AuxInt)
+ if !(is32Bit(int64(off) + int64(scale)*8)) {
+ break
+ }
+ v.reset(OpAMD64LEAQ)
+ v.AuxInt = int32ToAuxInt(off + int32(scale)*8)
+ v.Aux = symToAux(sym)
+ v.AddArg(x)
+ return true
+ }
+ // match: (LEAQ8 [off] {sym} x (MOVLconst [scale]))
+ // cond: is32Bit(int64(off)+int64(scale)*8)
+ // result: (LEAQ [off+int32(scale)*8] {sym} x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ scale := auxIntToInt32(v_1.AuxInt)
+ if !(is32Bit(int64(off) + int64(scale)*8)) {
+ break
+ }
+ v.reset(OpAMD64LEAQ)
+ v.AuxInt = int32ToAuxInt(off + int32(scale)*8)
+ v.Aux = symToAux(sym)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVBELstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVBELstore [i] {s} p (BSWAPL x) m)
+ // result: (MOVLstore [i] {s} p x m)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ if v_1.Op != OpAMD64BSWAPL {
+ break
+ }
+ x := v_1.Args[0]
+ m := v_2
+ v.reset(OpAMD64MOVLstore)
+ v.AuxInt = int32ToAuxInt(i)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, x, m)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVBEQstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVBEQstore [i] {s} p (BSWAPQ x) m)
+ // result: (MOVQstore [i] {s} p x m)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ if v_1.Op != OpAMD64BSWAPQ {
+ break
+ }
+ x := v_1.Args[0]
+ m := v_2
+ v.reset(OpAMD64MOVQstore)
+ v.AuxInt = int32ToAuxInt(i)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, x, m)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVBQSX(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MOVBQSX x:(MOVBload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem)
+ for {
+ x := v_0
+ if x.Op != OpAMD64MOVBload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBQSX x:(MOVWload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem)
+ for {
+ x := v_0
+ if x.Op != OpAMD64MOVWload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBQSX x:(MOVLload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem)
+ for {
+ x := v_0
+ if x.Op != OpAMD64MOVLload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBQSX x:(MOVQload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVBQSXload <v.Type> [off] {sym} ptr mem)
+ for {
+ x := v_0
+ if x.Op != OpAMD64MOVQload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpAMD64MOVBQSXload, v.Type)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBQSX (ANDLconst [c] x))
+ // cond: c & 0x80 == 0
+ // result: (ANDLconst [c & 0x7f] x)
+ for {
+ if v_0.Op != OpAMD64ANDLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(c&0x80 == 0) {
+ break
+ }
+ v.reset(OpAMD64ANDLconst)
+ v.AuxInt = int32ToAuxInt(c & 0x7f)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBQSX (MOVBQSX x))
+ // result: (MOVBQSX x)
+ for {
+ if v_0.Op != OpAMD64MOVBQSX {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64MOVBQSX)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVBQSXload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVBQSXload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: (MOVBQSX x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVBstore {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpAMD64MOVBQSX)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVBQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64MOVBQSXload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVBQZX(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MOVBQZX x:(MOVBload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
+ for {
+ x := v_0
+ if x.Op != OpAMD64MOVBload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBQZX x:(MOVWload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
+ for {
+ x := v_0
+ if x.Op != OpAMD64MOVWload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBQZX x:(MOVLload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
+ for {
+ x := v_0
+ if x.Op != OpAMD64MOVLload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBQZX x:(MOVQload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVBload <v.Type> [off] {sym} ptr mem)
+ for {
+ x := v_0
+ if x.Op != OpAMD64MOVQload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpAMD64MOVBload, v.Type)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBQZX x)
+ // cond: zeroUpper56Bits(x,3)
+ // result: x
+ for {
+ x := v_0
+ if !(zeroUpper56Bits(x, 3)) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVBQZX (ANDLconst [c] x))
+ // result: (ANDLconst [c & 0xff] x)
+ for {
+ if v_0.Op != OpAMD64ANDLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpAMD64ANDLconst)
+ v.AuxInt = int32ToAuxInt(c & 0xff)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBQZX (MOVBQZX x))
+ // result: (MOVBQZX x)
+ for {
+ if v_0.Op != OpAMD64MOVBQZX {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64MOVBQZX)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVBatomicload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVBatomicload [off1] {sym} (ADDQconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (MOVBatomicload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64MOVBatomicload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBatomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVBatomicload [off1+off2] {mergeSym(sym1, sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64MOVBatomicload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVBload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: (MOVBQZX x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVBstore {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpAMD64MOVBQZX)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBload [off1] {sym} (ADDQconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (MOVBload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64MOVBload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64MOVBload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (MOVBload [off] {sym} (SB) _)
+ // cond: symIsRO(sym)
+ // result: (MOVLconst [int32(read8(sym, int64(off)))])
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpSB || !(symIsRO(sym)) {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(int32(read8(sym, int64(off))))
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVBstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (MOVBstore [off] {sym} ptr y:(SETL x) mem)
+ // cond: y.Uses == 1
+ // result: (SETLstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != OpAMD64SETL {
+ break
+ }
+ x := y.Args[0]
+ mem := v_2
+ if !(y.Uses == 1) {
+ break
+ }
+ v.reset(OpAMD64SETLstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr y:(SETLE x) mem)
+ // cond: y.Uses == 1
+ // result: (SETLEstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != OpAMD64SETLE {
+ break
+ }
+ x := y.Args[0]
+ mem := v_2
+ if !(y.Uses == 1) {
+ break
+ }
+ v.reset(OpAMD64SETLEstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr y:(SETG x) mem)
+ // cond: y.Uses == 1
+ // result: (SETGstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != OpAMD64SETG {
+ break
+ }
+ x := y.Args[0]
+ mem := v_2
+ if !(y.Uses == 1) {
+ break
+ }
+ v.reset(OpAMD64SETGstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr y:(SETGE x) mem)
+ // cond: y.Uses == 1
+ // result: (SETGEstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != OpAMD64SETGE {
+ break
+ }
+ x := y.Args[0]
+ mem := v_2
+ if !(y.Uses == 1) {
+ break
+ }
+ v.reset(OpAMD64SETGEstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr y:(SETEQ x) mem)
+ // cond: y.Uses == 1
+ // result: (SETEQstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != OpAMD64SETEQ {
+ break
+ }
+ x := y.Args[0]
+ mem := v_2
+ if !(y.Uses == 1) {
+ break
+ }
+ v.reset(OpAMD64SETEQstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr y:(SETNE x) mem)
+ // cond: y.Uses == 1
+ // result: (SETNEstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != OpAMD64SETNE {
+ break
+ }
+ x := y.Args[0]
+ mem := v_2
+ if !(y.Uses == 1) {
+ break
+ }
+ v.reset(OpAMD64SETNEstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr y:(SETB x) mem)
+ // cond: y.Uses == 1
+ // result: (SETBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != OpAMD64SETB {
+ break
+ }
+ x := y.Args[0]
+ mem := v_2
+ if !(y.Uses == 1) {
+ break
+ }
+ v.reset(OpAMD64SETBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr y:(SETBE x) mem)
+ // cond: y.Uses == 1
+ // result: (SETBEstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != OpAMD64SETBE {
+ break
+ }
+ x := y.Args[0]
+ mem := v_2
+ if !(y.Uses == 1) {
+ break
+ }
+ v.reset(OpAMD64SETBEstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr y:(SETA x) mem)
+ // cond: y.Uses == 1
+ // result: (SETAstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != OpAMD64SETA {
+ break
+ }
+ x := y.Args[0]
+ mem := v_2
+ if !(y.Uses == 1) {
+ break
+ }
+ v.reset(OpAMD64SETAstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr y:(SETAE x) mem)
+ // cond: y.Uses == 1
+ // result: (SETAEstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != OpAMD64SETAE {
+ break
+ }
+ x := y.Args[0]
+ mem := v_2
+ if !(y.Uses == 1) {
+ break
+ }
+ v.reset(OpAMD64SETAEstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVBQSX x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVBQSX {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVBQZX x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVBQZX {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off1] {sym} (ADDQconst [off2] ptr) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (MOVBstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVLconst [c]) mem)
+ // result: (MOVBstoreconst [makeValAndOff(int32(int8(c)),off)] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ mem := v_2
+ v.reset(OpAMD64MOVBstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVQconst [c]) mem)
+ // result: (MOVBstoreconst [makeValAndOff(int32(int8(c)),off)] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ v.reset(OpAMD64MOVBstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (MOVBstore [i] {s} p w x0:(MOVBstore [i-1] {s} p (SHRWconst [8] w) mem))
+ // cond: x0.Uses == 1 && clobber(x0)
+ // result: (MOVWstore [i-1] {s} p (ROLWconst <w.Type> [8] w) mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ w := v_1
+ x0 := v_2
+ if x0.Op != OpAMD64MOVBstore || auxIntToInt32(x0.AuxInt) != i-1 || auxToSym(x0.Aux) != s {
+ break
+ }
+ mem := x0.Args[2]
+ if p != x0.Args[0] {
+ break
+ }
+ x0_1 := x0.Args[1]
+ if x0_1.Op != OpAMD64SHRWconst || auxIntToInt8(x0_1.AuxInt) != 8 || w != x0_1.Args[0] || !(x0.Uses == 1 && clobber(x0)) {
+ break
+ }
+ v.reset(OpAMD64MOVWstore)
+ v.AuxInt = int32ToAuxInt(i - 1)
+ v.Aux = symToAux(s)
+ v0 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, w.Type)
+ v0.AuxInt = int8ToAuxInt(8)
+ v0.AddArg(w)
+ v.AddArg3(p, v0, mem)
+ return true
+ }
+ // match: (MOVBstore [i] {s} p1 w x0:(MOVBstore [i] {s} p0 (SHRWconst [8] w) mem))
+ // cond: x0.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x0)
+ // result: (MOVWstore [i] {s} p0 (ROLWconst <w.Type> [8] w) mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p1 := v_0
+ w := v_1
+ x0 := v_2
+ if x0.Op != OpAMD64MOVBstore || auxIntToInt32(x0.AuxInt) != i || auxToSym(x0.Aux) != s {
+ break
+ }
+ mem := x0.Args[2]
+ p0 := x0.Args[0]
+ x0_1 := x0.Args[1]
+ if x0_1.Op != OpAMD64SHRWconst || auxIntToInt8(x0_1.AuxInt) != 8 || w != x0_1.Args[0] || !(x0.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x0)) {
+ break
+ }
+ v.reset(OpAMD64MOVWstore)
+ v.AuxInt = int32ToAuxInt(i)
+ v.Aux = symToAux(s)
+ v0 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, w.Type)
+ v0.AuxInt = int8ToAuxInt(8)
+ v0.AddArg(w)
+ v.AddArg3(p0, v0, mem)
+ return true
+ }
+ // match: (MOVBstore [i] {s} p w x2:(MOVBstore [i-1] {s} p (SHRLconst [8] w) x1:(MOVBstore [i-2] {s} p (SHRLconst [16] w) x0:(MOVBstore [i-3] {s} p (SHRLconst [24] w) mem))))
+ // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0, x1, x2)
+ // result: (MOVLstore [i-3] {s} p (BSWAPL <w.Type> w) mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ w := v_1
+ x2 := v_2
+ if x2.Op != OpAMD64MOVBstore || auxIntToInt32(x2.AuxInt) != i-1 || auxToSym(x2.Aux) != s {
+ break
+ }
+ _ = x2.Args[2]
+ if p != x2.Args[0] {
+ break
+ }
+ x2_1 := x2.Args[1]
+ if x2_1.Op != OpAMD64SHRLconst || auxIntToInt8(x2_1.AuxInt) != 8 || w != x2_1.Args[0] {
+ break
+ }
+ x1 := x2.Args[2]
+ if x1.Op != OpAMD64MOVBstore || auxIntToInt32(x1.AuxInt) != i-2 || auxToSym(x1.Aux) != s {
+ break
+ }
+ _ = x1.Args[2]
+ if p != x1.Args[0] {
+ break
+ }
+ x1_1 := x1.Args[1]
+ if x1_1.Op != OpAMD64SHRLconst || auxIntToInt8(x1_1.AuxInt) != 16 || w != x1_1.Args[0] {
+ break
+ }
+ x0 := x1.Args[2]
+ if x0.Op != OpAMD64MOVBstore || auxIntToInt32(x0.AuxInt) != i-3 || auxToSym(x0.Aux) != s {
+ break
+ }
+ mem := x0.Args[2]
+ if p != x0.Args[0] {
+ break
+ }
+ x0_1 := x0.Args[1]
+ if x0_1.Op != OpAMD64SHRLconst || auxIntToInt8(x0_1.AuxInt) != 24 || w != x0_1.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0, x1, x2)) {
+ break
+ }
+ v.reset(OpAMD64MOVLstore)
+ v.AuxInt = int32ToAuxInt(i - 3)
+ v.Aux = symToAux(s)
+ v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPL, w.Type)
+ v0.AddArg(w)
+ v.AddArg3(p, v0, mem)
+ return true
+ }
+ // match: (MOVBstore [i] {s} p3 w x2:(MOVBstore [i] {s} p2 (SHRLconst [8] w) x1:(MOVBstore [i] {s} p1 (SHRLconst [16] w) x0:(MOVBstore [i] {s} p0 (SHRLconst [24] w) mem))))
+ // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && sequentialAddresses(p0, p1, 1) && sequentialAddresses(p1, p2, 1) && sequentialAddresses(p2, p3, 1) && clobber(x0, x1, x2)
+ // result: (MOVLstore [i] {s} p0 (BSWAPL <w.Type> w) mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p3 := v_0
+ w := v_1
+ x2 := v_2
+ if x2.Op != OpAMD64MOVBstore || auxIntToInt32(x2.AuxInt) != i || auxToSym(x2.Aux) != s {
+ break
+ }
+ _ = x2.Args[2]
+ p2 := x2.Args[0]
+ x2_1 := x2.Args[1]
+ if x2_1.Op != OpAMD64SHRLconst || auxIntToInt8(x2_1.AuxInt) != 8 || w != x2_1.Args[0] {
+ break
+ }
+ x1 := x2.Args[2]
+ if x1.Op != OpAMD64MOVBstore || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s {
+ break
+ }
+ _ = x1.Args[2]
+ p1 := x1.Args[0]
+ x1_1 := x1.Args[1]
+ if x1_1.Op != OpAMD64SHRLconst || auxIntToInt8(x1_1.AuxInt) != 16 || w != x1_1.Args[0] {
+ break
+ }
+ x0 := x1.Args[2]
+ if x0.Op != OpAMD64MOVBstore || auxIntToInt32(x0.AuxInt) != i || auxToSym(x0.Aux) != s {
+ break
+ }
+ mem := x0.Args[2]
+ p0 := x0.Args[0]
+ x0_1 := x0.Args[1]
+ if x0_1.Op != OpAMD64SHRLconst || auxIntToInt8(x0_1.AuxInt) != 24 || w != x0_1.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && sequentialAddresses(p0, p1, 1) && sequentialAddresses(p1, p2, 1) && sequentialAddresses(p2, p3, 1) && clobber(x0, x1, x2)) {
+ break
+ }
+ v.reset(OpAMD64MOVLstore)
+ v.AuxInt = int32ToAuxInt(i)
+ v.Aux = symToAux(s)
+ v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPL, w.Type)
+ v0.AddArg(w)
+ v.AddArg3(p0, v0, mem)
+ return true
+ }
+ // match: (MOVBstore [i] {s} p w x6:(MOVBstore [i-1] {s} p (SHRQconst [8] w) x5:(MOVBstore [i-2] {s} p (SHRQconst [16] w) x4:(MOVBstore [i-3] {s} p (SHRQconst [24] w) x3:(MOVBstore [i-4] {s} p (SHRQconst [32] w) x2:(MOVBstore [i-5] {s} p (SHRQconst [40] w) x1:(MOVBstore [i-6] {s} p (SHRQconst [48] w) x0:(MOVBstore [i-7] {s} p (SHRQconst [56] w) mem))))))))
+ // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0, x1, x2, x3, x4, x5, x6)
+ // result: (MOVQstore [i-7] {s} p (BSWAPQ <w.Type> w) mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ w := v_1
+ x6 := v_2
+ if x6.Op != OpAMD64MOVBstore || auxIntToInt32(x6.AuxInt) != i-1 || auxToSym(x6.Aux) != s {
+ break
+ }
+ _ = x6.Args[2]
+ if p != x6.Args[0] {
+ break
+ }
+ x6_1 := x6.Args[1]
+ if x6_1.Op != OpAMD64SHRQconst || auxIntToInt8(x6_1.AuxInt) != 8 || w != x6_1.Args[0] {
+ break
+ }
+ x5 := x6.Args[2]
+ if x5.Op != OpAMD64MOVBstore || auxIntToInt32(x5.AuxInt) != i-2 || auxToSym(x5.Aux) != s {
+ break
+ }
+ _ = x5.Args[2]
+ if p != x5.Args[0] {
+ break
+ }
+ x5_1 := x5.Args[1]
+ if x5_1.Op != OpAMD64SHRQconst || auxIntToInt8(x5_1.AuxInt) != 16 || w != x5_1.Args[0] {
+ break
+ }
+ x4 := x5.Args[2]
+ if x4.Op != OpAMD64MOVBstore || auxIntToInt32(x4.AuxInt) != i-3 || auxToSym(x4.Aux) != s {
+ break
+ }
+ _ = x4.Args[2]
+ if p != x4.Args[0] {
+ break
+ }
+ x4_1 := x4.Args[1]
+ if x4_1.Op != OpAMD64SHRQconst || auxIntToInt8(x4_1.AuxInt) != 24 || w != x4_1.Args[0] {
+ break
+ }
+ x3 := x4.Args[2]
+ if x3.Op != OpAMD64MOVBstore || auxIntToInt32(x3.AuxInt) != i-4 || auxToSym(x3.Aux) != s {
+ break
+ }
+ _ = x3.Args[2]
+ if p != x3.Args[0] {
+ break
+ }
+ x3_1 := x3.Args[1]
+ if x3_1.Op != OpAMD64SHRQconst || auxIntToInt8(x3_1.AuxInt) != 32 || w != x3_1.Args[0] {
+ break
+ }
+ x2 := x3.Args[2]
+ if x2.Op != OpAMD64MOVBstore || auxIntToInt32(x2.AuxInt) != i-5 || auxToSym(x2.Aux) != s {
+ break
+ }
+ _ = x2.Args[2]
+ if p != x2.Args[0] {
+ break
+ }
+ x2_1 := x2.Args[1]
+ if x2_1.Op != OpAMD64SHRQconst || auxIntToInt8(x2_1.AuxInt) != 40 || w != x2_1.Args[0] {
+ break
+ }
+ x1 := x2.Args[2]
+ if x1.Op != OpAMD64MOVBstore || auxIntToInt32(x1.AuxInt) != i-6 || auxToSym(x1.Aux) != s {
+ break
+ }
+ _ = x1.Args[2]
+ if p != x1.Args[0] {
+ break
+ }
+ x1_1 := x1.Args[1]
+ if x1_1.Op != OpAMD64SHRQconst || auxIntToInt8(x1_1.AuxInt) != 48 || w != x1_1.Args[0] {
+ break
+ }
+ x0 := x1.Args[2]
+ if x0.Op != OpAMD64MOVBstore || auxIntToInt32(x0.AuxInt) != i-7 || auxToSym(x0.Aux) != s {
+ break
+ }
+ mem := x0.Args[2]
+ if p != x0.Args[0] {
+ break
+ }
+ x0_1 := x0.Args[1]
+ if x0_1.Op != OpAMD64SHRQconst || auxIntToInt8(x0_1.AuxInt) != 56 || w != x0_1.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0, x1, x2, x3, x4, x5, x6)) {
+ break
+ }
+ v.reset(OpAMD64MOVQstore)
+ v.AuxInt = int32ToAuxInt(i - 7)
+ v.Aux = symToAux(s)
+ v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPQ, w.Type)
+ v0.AddArg(w)
+ v.AddArg3(p, v0, mem)
+ return true
+ }
+ // match: (MOVBstore [i] {s} p7 w x6:(MOVBstore [i] {s} p6 (SHRQconst [8] w) x5:(MOVBstore [i] {s} p5 (SHRQconst [16] w) x4:(MOVBstore [i] {s} p4 (SHRQconst [24] w) x3:(MOVBstore [i] {s} p3 (SHRQconst [32] w) x2:(MOVBstore [i] {s} p2 (SHRQconst [40] w) x1:(MOVBstore [i] {s} p1 (SHRQconst [48] w) x0:(MOVBstore [i] {s} p0 (SHRQconst [56] w) mem))))))))
+ // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && sequentialAddresses(p0, p1, 1) && sequentialAddresses(p1, p2, 1) && sequentialAddresses(p2, p3, 1) && sequentialAddresses(p3, p4, 1) && sequentialAddresses(p4, p5, 1) && sequentialAddresses(p5, p6, 1) && sequentialAddresses(p6, p7, 1) && clobber(x0, x1, x2, x3, x4, x5, x6)
+ // result: (MOVQstore [i] {s} p0 (BSWAPQ <w.Type> w) mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p7 := v_0
+ w := v_1
+ x6 := v_2
+ if x6.Op != OpAMD64MOVBstore || auxIntToInt32(x6.AuxInt) != i || auxToSym(x6.Aux) != s {
+ break
+ }
+ _ = x6.Args[2]
+ p6 := x6.Args[0]
+ x6_1 := x6.Args[1]
+ if x6_1.Op != OpAMD64SHRQconst || auxIntToInt8(x6_1.AuxInt) != 8 || w != x6_1.Args[0] {
+ break
+ }
+ x5 := x6.Args[2]
+ if x5.Op != OpAMD64MOVBstore || auxIntToInt32(x5.AuxInt) != i || auxToSym(x5.Aux) != s {
+ break
+ }
+ _ = x5.Args[2]
+ p5 := x5.Args[0]
+ x5_1 := x5.Args[1]
+ if x5_1.Op != OpAMD64SHRQconst || auxIntToInt8(x5_1.AuxInt) != 16 || w != x5_1.Args[0] {
+ break
+ }
+ x4 := x5.Args[2]
+ if x4.Op != OpAMD64MOVBstore || auxIntToInt32(x4.AuxInt) != i || auxToSym(x4.Aux) != s {
+ break
+ }
+ _ = x4.Args[2]
+ p4 := x4.Args[0]
+ x4_1 := x4.Args[1]
+ if x4_1.Op != OpAMD64SHRQconst || auxIntToInt8(x4_1.AuxInt) != 24 || w != x4_1.Args[0] {
+ break
+ }
+ x3 := x4.Args[2]
+ if x3.Op != OpAMD64MOVBstore || auxIntToInt32(x3.AuxInt) != i || auxToSym(x3.Aux) != s {
+ break
+ }
+ _ = x3.Args[2]
+ p3 := x3.Args[0]
+ x3_1 := x3.Args[1]
+ if x3_1.Op != OpAMD64SHRQconst || auxIntToInt8(x3_1.AuxInt) != 32 || w != x3_1.Args[0] {
+ break
+ }
+ x2 := x3.Args[2]
+ if x2.Op != OpAMD64MOVBstore || auxIntToInt32(x2.AuxInt) != i || auxToSym(x2.Aux) != s {
+ break
+ }
+ _ = x2.Args[2]
+ p2 := x2.Args[0]
+ x2_1 := x2.Args[1]
+ if x2_1.Op != OpAMD64SHRQconst || auxIntToInt8(x2_1.AuxInt) != 40 || w != x2_1.Args[0] {
+ break
+ }
+ x1 := x2.Args[2]
+ if x1.Op != OpAMD64MOVBstore || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s {
+ break
+ }
+ _ = x1.Args[2]
+ p1 := x1.Args[0]
+ x1_1 := x1.Args[1]
+ if x1_1.Op != OpAMD64SHRQconst || auxIntToInt8(x1_1.AuxInt) != 48 || w != x1_1.Args[0] {
+ break
+ }
+ x0 := x1.Args[2]
+ if x0.Op != OpAMD64MOVBstore || auxIntToInt32(x0.AuxInt) != i || auxToSym(x0.Aux) != s {
+ break
+ }
+ mem := x0.Args[2]
+ p0 := x0.Args[0]
+ x0_1 := x0.Args[1]
+ if x0_1.Op != OpAMD64SHRQconst || auxIntToInt8(x0_1.AuxInt) != 56 || w != x0_1.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && sequentialAddresses(p0, p1, 1) && sequentialAddresses(p1, p2, 1) && sequentialAddresses(p2, p3, 1) && sequentialAddresses(p3, p4, 1) && sequentialAddresses(p4, p5, 1) && sequentialAddresses(p5, p6, 1) && sequentialAddresses(p6, p7, 1) && clobber(x0, x1, x2, x3, x4, x5, x6)) {
+ break
+ }
+ v.reset(OpAMD64MOVQstore)
+ v.AuxInt = int32ToAuxInt(i)
+ v.Aux = symToAux(s)
+ v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPQ, w.Type)
+ v0.AddArg(w)
+ v.AddArg3(p0, v0, mem)
+ return true
+ }
+ // match: (MOVBstore [i] {s} p (SHRWconst [8] w) x:(MOVBstore [i-1] {s} p w mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVWstore [i-1] {s} p w mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ if v_1.Op != OpAMD64SHRWconst || auxIntToInt8(v_1.AuxInt) != 8 {
+ break
+ }
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ if p != x.Args[0] || w != x.Args[1] || !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpAMD64MOVWstore)
+ v.AuxInt = int32ToAuxInt(i - 1)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, w, mem)
+ return true
+ }
+ // match: (MOVBstore [i] {s} p (SHRLconst [8] w) x:(MOVBstore [i-1] {s} p w mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVWstore [i-1] {s} p w mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ if v_1.Op != OpAMD64SHRLconst || auxIntToInt8(v_1.AuxInt) != 8 {
+ break
+ }
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ if p != x.Args[0] || w != x.Args[1] || !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpAMD64MOVWstore)
+ v.AuxInt = int32ToAuxInt(i - 1)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, w, mem)
+ return true
+ }
+ // match: (MOVBstore [i] {s} p (SHRQconst [8] w) x:(MOVBstore [i-1] {s} p w mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVWstore [i-1] {s} p w mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ if v_1.Op != OpAMD64SHRQconst || auxIntToInt8(v_1.AuxInt) != 8 {
+ break
+ }
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ if p != x.Args[0] || w != x.Args[1] || !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpAMD64MOVWstore)
+ v.AuxInt = int32ToAuxInt(i - 1)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, w, mem)
+ return true
+ }
+ // match: (MOVBstore [i] {s} p w x:(MOVBstore [i+1] {s} p (SHRWconst [8] w) mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVWstore [i] {s} p w mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ w := v_1
+ x := v_2
+ if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i+1 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ if p != x.Args[0] {
+ break
+ }
+ x_1 := x.Args[1]
+ if x_1.Op != OpAMD64SHRWconst || auxIntToInt8(x_1.AuxInt) != 8 || w != x_1.Args[0] || !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpAMD64MOVWstore)
+ v.AuxInt = int32ToAuxInt(i)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, w, mem)
+ return true
+ }
+ // match: (MOVBstore [i] {s} p w x:(MOVBstore [i+1] {s} p (SHRLconst [8] w) mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVWstore [i] {s} p w mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ w := v_1
+ x := v_2
+ if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i+1 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ if p != x.Args[0] {
+ break
+ }
+ x_1 := x.Args[1]
+ if x_1.Op != OpAMD64SHRLconst || auxIntToInt8(x_1.AuxInt) != 8 || w != x_1.Args[0] || !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpAMD64MOVWstore)
+ v.AuxInt = int32ToAuxInt(i)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, w, mem)
+ return true
+ }
+ // match: (MOVBstore [i] {s} p w x:(MOVBstore [i+1] {s} p (SHRQconst [8] w) mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVWstore [i] {s} p w mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ w := v_1
+ x := v_2
+ if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i+1 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ if p != x.Args[0] {
+ break
+ }
+ x_1 := x.Args[1]
+ if x_1.Op != OpAMD64SHRQconst || auxIntToInt8(x_1.AuxInt) != 8 || w != x_1.Args[0] || !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpAMD64MOVWstore)
+ v.AuxInt = int32ToAuxInt(i)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, w, mem)
+ return true
+ }
+ // match: (MOVBstore [i] {s} p (SHRLconst [j] w) x:(MOVBstore [i-1] {s} p w0:(SHRLconst [j-8] w) mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVWstore [i-1] {s} p w0 mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ if v_1.Op != OpAMD64SHRLconst {
+ break
+ }
+ j := auxIntToInt8(v_1.AuxInt)
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ if p != x.Args[0] {
+ break
+ }
+ w0 := x.Args[1]
+ if w0.Op != OpAMD64SHRLconst || auxIntToInt8(w0.AuxInt) != j-8 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpAMD64MOVWstore)
+ v.AuxInt = int32ToAuxInt(i - 1)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, w0, mem)
+ return true
+ }
+ // match: (MOVBstore [i] {s} p (SHRQconst [j] w) x:(MOVBstore [i-1] {s} p w0:(SHRQconst [j-8] w) mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVWstore [i-1] {s} p w0 mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ if v_1.Op != OpAMD64SHRQconst {
+ break
+ }
+ j := auxIntToInt8(v_1.AuxInt)
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ if p != x.Args[0] {
+ break
+ }
+ w0 := x.Args[1]
+ if w0.Op != OpAMD64SHRQconst || auxIntToInt8(w0.AuxInt) != j-8 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpAMD64MOVWstore)
+ v.AuxInt = int32ToAuxInt(i - 1)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, w0, mem)
+ return true
+ }
+ // match: (MOVBstore [i] {s} p1 (SHRWconst [8] w) x:(MOVBstore [i] {s} p0 w mem))
+ // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)
+ // result: (MOVWstore [i] {s} p0 w mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p1 := v_0
+ if v_1.Op != OpAMD64SHRWconst || auxIntToInt8(v_1.AuxInt) != 8 {
+ break
+ }
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ p0 := x.Args[0]
+ if w != x.Args[1] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
+ break
+ }
+ v.reset(OpAMD64MOVWstore)
+ v.AuxInt = int32ToAuxInt(i)
+ v.Aux = symToAux(s)
+ v.AddArg3(p0, w, mem)
+ return true
+ }
+ // match: (MOVBstore [i] {s} p1 (SHRLconst [8] w) x:(MOVBstore [i] {s} p0 w mem))
+ // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)
+ // result: (MOVWstore [i] {s} p0 w mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p1 := v_0
+ if v_1.Op != OpAMD64SHRLconst || auxIntToInt8(v_1.AuxInt) != 8 {
+ break
+ }
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ p0 := x.Args[0]
+ if w != x.Args[1] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
+ break
+ }
+ v.reset(OpAMD64MOVWstore)
+ v.AuxInt = int32ToAuxInt(i)
+ v.Aux = symToAux(s)
+ v.AddArg3(p0, w, mem)
+ return true
+ }
+ // match: (MOVBstore [i] {s} p1 (SHRQconst [8] w) x:(MOVBstore [i] {s} p0 w mem))
+ // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)
+ // result: (MOVWstore [i] {s} p0 w mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p1 := v_0
+ if v_1.Op != OpAMD64SHRQconst || auxIntToInt8(v_1.AuxInt) != 8 {
+ break
+ }
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ p0 := x.Args[0]
+ if w != x.Args[1] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
+ break
+ }
+ v.reset(OpAMD64MOVWstore)
+ v.AuxInt = int32ToAuxInt(i)
+ v.Aux = symToAux(s)
+ v.AddArg3(p0, w, mem)
+ return true
+ }
+ // match: (MOVBstore [i] {s} p0 w x:(MOVBstore [i] {s} p1 (SHRWconst [8] w) mem))
+ // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)
+ // result: (MOVWstore [i] {s} p0 w mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p0 := v_0
+ w := v_1
+ x := v_2
+ if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ p1 := x.Args[0]
+ x_1 := x.Args[1]
+ if x_1.Op != OpAMD64SHRWconst || auxIntToInt8(x_1.AuxInt) != 8 || w != x_1.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
+ break
+ }
+ v.reset(OpAMD64MOVWstore)
+ v.AuxInt = int32ToAuxInt(i)
+ v.Aux = symToAux(s)
+ v.AddArg3(p0, w, mem)
+ return true
+ }
+ // match: (MOVBstore [i] {s} p0 w x:(MOVBstore [i] {s} p1 (SHRLconst [8] w) mem))
+ // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)
+ // result: (MOVWstore [i] {s} p0 w mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p0 := v_0
+ w := v_1
+ x := v_2
+ if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ p1 := x.Args[0]
+ x_1 := x.Args[1]
+ if x_1.Op != OpAMD64SHRLconst || auxIntToInt8(x_1.AuxInt) != 8 || w != x_1.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
+ break
+ }
+ v.reset(OpAMD64MOVWstore)
+ v.AuxInt = int32ToAuxInt(i)
+ v.Aux = symToAux(s)
+ v.AddArg3(p0, w, mem)
+ return true
+ }
+ // match: (MOVBstore [i] {s} p0 w x:(MOVBstore [i] {s} p1 (SHRQconst [8] w) mem))
+ // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)
+ // result: (MOVWstore [i] {s} p0 w mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p0 := v_0
+ w := v_1
+ x := v_2
+ if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ p1 := x.Args[0]
+ x_1 := x.Args[1]
+ if x_1.Op != OpAMD64SHRQconst || auxIntToInt8(x_1.AuxInt) != 8 || w != x_1.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
+ break
+ }
+ v.reset(OpAMD64MOVWstore)
+ v.AuxInt = int32ToAuxInt(i)
+ v.Aux = symToAux(s)
+ v.AddArg3(p0, w, mem)
+ return true
+ }
+ // match: (MOVBstore [i] {s} p1 (SHRLconst [j] w) x:(MOVBstore [i] {s} p0 w0:(SHRLconst [j-8] w) mem))
+ // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)
+ // result: (MOVWstore [i] {s} p0 w0 mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p1 := v_0
+ if v_1.Op != OpAMD64SHRLconst {
+ break
+ }
+ j := auxIntToInt8(v_1.AuxInt)
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ p0 := x.Args[0]
+ w0 := x.Args[1]
+ if w0.Op != OpAMD64SHRLconst || auxIntToInt8(w0.AuxInt) != j-8 || w != w0.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
+ break
+ }
+ v.reset(OpAMD64MOVWstore)
+ v.AuxInt = int32ToAuxInt(i)
+ v.Aux = symToAux(s)
+ v.AddArg3(p0, w0, mem)
+ return true
+ }
+ // match: (MOVBstore [i] {s} p1 (SHRQconst [j] w) x:(MOVBstore [i] {s} p0 w0:(SHRQconst [j-8] w) mem))
+ // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)
+ // result: (MOVWstore [i] {s} p0 w0 mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p1 := v_0
+ if v_1.Op != OpAMD64SHRQconst {
+ break
+ }
+ j := auxIntToInt8(v_1.AuxInt)
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpAMD64MOVBstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ p0 := x.Args[0]
+ w0 := x.Args[1]
+ if w0.Op != OpAMD64SHRQconst || auxIntToInt8(w0.AuxInt) != j-8 || w != w0.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 1) && clobber(x)) {
+ break
+ }
+ v.reset(OpAMD64MOVWstore)
+ v.AuxInt = int32ToAuxInt(i)
+ v.Aux = symToAux(s)
+ v.AddArg3(p0, w0, mem)
+ return true
+ }
+ // match: (MOVBstore [7] {s} p1 (SHRQconst [56] w) x1:(MOVWstore [5] {s} p1 (SHRQconst [40] w) x2:(MOVLstore [1] {s} p1 (SHRQconst [8] w) x3:(MOVBstore [0] {s} p1 w mem))))
+ // cond: x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && clobber(x1, x2, x3)
+ // result: (MOVQstore {s} p1 w mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 7 {
+ break
+ }
+ s := auxToSym(v.Aux)
+ p1 := v_0
+ if v_1.Op != OpAMD64SHRQconst || auxIntToInt8(v_1.AuxInt) != 56 {
+ break
+ }
+ w := v_1.Args[0]
+ x1 := v_2
+ if x1.Op != OpAMD64MOVWstore || auxIntToInt32(x1.AuxInt) != 5 || auxToSym(x1.Aux) != s {
+ break
+ }
+ _ = x1.Args[2]
+ if p1 != x1.Args[0] {
+ break
+ }
+ x1_1 := x1.Args[1]
+ if x1_1.Op != OpAMD64SHRQconst || auxIntToInt8(x1_1.AuxInt) != 40 || w != x1_1.Args[0] {
+ break
+ }
+ x2 := x1.Args[2]
+ if x2.Op != OpAMD64MOVLstore || auxIntToInt32(x2.AuxInt) != 1 || auxToSym(x2.Aux) != s {
+ break
+ }
+ _ = x2.Args[2]
+ if p1 != x2.Args[0] {
+ break
+ }
+ x2_1 := x2.Args[1]
+ if x2_1.Op != OpAMD64SHRQconst || auxIntToInt8(x2_1.AuxInt) != 8 || w != x2_1.Args[0] {
+ break
+ }
+ x3 := x2.Args[2]
+ if x3.Op != OpAMD64MOVBstore || auxIntToInt32(x3.AuxInt) != 0 || auxToSym(x3.Aux) != s {
+ break
+ }
+ mem := x3.Args[2]
+ if p1 != x3.Args[0] || w != x3.Args[1] || !(x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && clobber(x1, x2, x3)) {
+ break
+ }
+ v.reset(OpAMD64MOVQstore)
+ v.Aux = symToAux(s)
+ v.AddArg3(p1, w, mem)
+ return true
+ }
+ // match: (MOVBstore [i] {s} p x1:(MOVBload [j] {s2} p2 mem) mem2:(MOVBstore [i-1] {s} p x2:(MOVBload [j-1] {s2} p2 mem) mem))
+ // cond: x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1, x2, mem2)
+ // result: (MOVWstore [i-1] {s} p (MOVWload [j-1] {s2} p2 mem) mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ x1 := v_1
+ if x1.Op != OpAMD64MOVBload {
+ break
+ }
+ j := auxIntToInt32(x1.AuxInt)
+ s2 := auxToSym(x1.Aux)
+ mem := x1.Args[1]
+ p2 := x1.Args[0]
+ mem2 := v_2
+ if mem2.Op != OpAMD64MOVBstore || auxIntToInt32(mem2.AuxInt) != i-1 || auxToSym(mem2.Aux) != s {
+ break
+ }
+ _ = mem2.Args[2]
+ if p != mem2.Args[0] {
+ break
+ }
+ x2 := mem2.Args[1]
+ if x2.Op != OpAMD64MOVBload || auxIntToInt32(x2.AuxInt) != j-1 || auxToSym(x2.Aux) != s2 {
+ break
+ }
+ _ = x2.Args[1]
+ if p2 != x2.Args[0] || mem != x2.Args[1] || mem != mem2.Args[2] || !(x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1, x2, mem2)) {
+ break
+ }
+ v.reset(OpAMD64MOVWstore)
+ v.AuxInt = int32ToAuxInt(i - 1)
+ v.Aux = symToAux(s)
+ v0 := b.NewValue0(x2.Pos, OpAMD64MOVWload, typ.UInt16)
+ v0.AuxInt = int32ToAuxInt(j - 1)
+ v0.Aux = symToAux(s2)
+ v0.AddArg2(p2, mem)
+ v.AddArg3(p, v0, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVBstoreconst(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVBstoreconst [sc] {s} (ADDQconst [off] ptr) mem)
+ // cond: ValAndOff(sc).canAdd32(off)
+ // result: (MOVBstoreconst [ValAndOff(sc).addOffset32(off)] {s} ptr mem)
+ for {
+ sc := auxIntToValAndOff(v.AuxInt)
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(ValAndOff(sc).canAdd32(off)) {
+ break
+ }
+ v.reset(OpAMD64MOVBstoreconst)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
+ v.Aux = symToAux(s)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)
+ // result: (MOVBstoreconst [ValAndOff(sc).addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
+ for {
+ sc := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)) {
+ break
+ }
+ v.reset(OpAMD64MOVBstoreconst)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBstoreconst [c] {s} p x:(MOVBstoreconst [a] {s} p mem))
+ // cond: x.Uses == 1 && a.Off() + 1 == c.Off() && clobber(x)
+ // result: (MOVWstoreconst [makeValAndOff(a.Val()&0xff | c.Val()<<8, a.Off())] {s} p mem)
+ for {
+ c := auxIntToValAndOff(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ x := v_1
+ if x.Op != OpAMD64MOVBstoreconst {
+ break
+ }
+ a := auxIntToValAndOff(x.AuxInt)
+ if auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[1]
+ if p != x.Args[0] || !(x.Uses == 1 && a.Off()+1 == c.Off() && clobber(x)) {
+ break
+ }
+ v.reset(OpAMD64MOVWstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(a.Val()&0xff|c.Val()<<8, a.Off()))
+ v.Aux = symToAux(s)
+ v.AddArg2(p, mem)
+ return true
+ }
+ // match: (MOVBstoreconst [a] {s} p x:(MOVBstoreconst [c] {s} p mem))
+ // cond: x.Uses == 1 && a.Off() + 1 == c.Off() && clobber(x)
+ // result: (MOVWstoreconst [makeValAndOff(a.Val()&0xff | c.Val()<<8, a.Off())] {s} p mem)
+ for {
+ a := auxIntToValAndOff(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ x := v_1
+ if x.Op != OpAMD64MOVBstoreconst {
+ break
+ }
+ c := auxIntToValAndOff(x.AuxInt)
+ if auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[1]
+ if p != x.Args[0] || !(x.Uses == 1 && a.Off()+1 == c.Off() && clobber(x)) {
+ break
+ }
+ v.reset(OpAMD64MOVWstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(a.Val()&0xff|c.Val()<<8, a.Off()))
+ v.Aux = symToAux(s)
+ v.AddArg2(p, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVLQSX(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MOVLQSX x:(MOVLload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVLQSXload <v.Type> [off] {sym} ptr mem)
+ for {
+ x := v_0
+ if x.Op != OpAMD64MOVLload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpAMD64MOVLQSXload, v.Type)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVLQSX x:(MOVQload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVLQSXload <v.Type> [off] {sym} ptr mem)
+ for {
+ x := v_0
+ if x.Op != OpAMD64MOVQload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpAMD64MOVLQSXload, v.Type)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVLQSX (ANDLconst [c] x))
+ // cond: uint32(c) & 0x80000000 == 0
+ // result: (ANDLconst [c & 0x7fffffff] x)
+ for {
+ if v_0.Op != OpAMD64ANDLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(uint32(c)&0x80000000 == 0) {
+ break
+ }
+ v.reset(OpAMD64ANDLconst)
+ v.AuxInt = int32ToAuxInt(c & 0x7fffffff)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVLQSX (MOVLQSX x))
+ // result: (MOVLQSX x)
+ for {
+ if v_0.Op != OpAMD64MOVLQSX {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64MOVLQSX)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVLQSX (MOVWQSX x))
+ // result: (MOVWQSX x)
+ for {
+ if v_0.Op != OpAMD64MOVWQSX {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64MOVWQSX)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVLQSX (MOVBQSX x))
+ // result: (MOVBQSX x)
+ for {
+ if v_0.Op != OpAMD64MOVBQSX {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64MOVBQSX)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVLQSXload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVLQSXload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: (MOVLQSX x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVLstore {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpAMD64MOVLQSX)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVLQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVLQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64MOVLQSXload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVLQZX(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MOVLQZX x:(MOVLload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVLload <v.Type> [off] {sym} ptr mem)
+ for {
+ x := v_0
+ if x.Op != OpAMD64MOVLload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpAMD64MOVLload, v.Type)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVLQZX x:(MOVQload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVLload <v.Type> [off] {sym} ptr mem)
+ for {
+ x := v_0
+ if x.Op != OpAMD64MOVQload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpAMD64MOVLload, v.Type)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVLQZX x)
+ // cond: zeroUpper32Bits(x,3)
+ // result: x
+ for {
+ x := v_0
+ if !(zeroUpper32Bits(x, 3)) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVLQZX (ANDLconst [c] x))
+ // result: (ANDLconst [c] x)
+ for {
+ if v_0.Op != OpAMD64ANDLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpAMD64ANDLconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVLQZX (MOVLQZX x))
+ // result: (MOVLQZX x)
+ for {
+ if v_0.Op != OpAMD64MOVLQZX {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64MOVLQZX)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVLQZX (MOVWQZX x))
+ // result: (MOVWQZX x)
+ for {
+ if v_0.Op != OpAMD64MOVWQZX {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64MOVWQZX)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVLQZX (MOVBQZX x))
+ // result: (MOVBQZX x)
+ for {
+ if v_0.Op != OpAMD64MOVBQZX {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64MOVBQZX)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVLatomicload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVLatomicload [off1] {sym} (ADDQconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (MOVLatomicload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64MOVLatomicload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVLatomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVLatomicload [off1+off2] {mergeSym(sym1, sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64MOVLatomicload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVLf2i(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MOVLf2i <t> (Arg <u> [off] {sym}))
+ // cond: t.Size() == u.Size()
+ // result: @b.Func.Entry (Arg <t> [off] {sym})
+ for {
+ t := v.Type
+ if v_0.Op != OpArg {
+ break
+ }
+ u := v_0.Type
+ off := auxIntToInt32(v_0.AuxInt)
+ sym := auxToSym(v_0.Aux)
+ if !(t.Size() == u.Size()) {
+ break
+ }
+ b = b.Func.Entry
+ v0 := b.NewValue0(v.Pos, OpArg, t)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVLi2f(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MOVLi2f <t> (Arg <u> [off] {sym}))
+ // cond: t.Size() == u.Size()
+ // result: @b.Func.Entry (Arg <t> [off] {sym})
+ for {
+ t := v.Type
+ if v_0.Op != OpArg {
+ break
+ }
+ u := v_0.Type
+ off := auxIntToInt32(v_0.AuxInt)
+ sym := auxToSym(v_0.Aux)
+ if !(t.Size() == u.Size()) {
+ break
+ }
+ b = b.Func.Entry
+ v0 := b.NewValue0(v.Pos, OpArg, t)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVLload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVLload [off] {sym} ptr (MOVLstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: (MOVLQZX x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVLstore {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpAMD64MOVLQZX)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVLload [off1] {sym} (ADDQconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (MOVLload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64MOVLload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVLload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVLload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64MOVLload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (MOVLload [off] {sym} ptr (MOVSSstore [off] {sym} ptr val _))
+ // result: (MOVLf2i val)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVSSstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
+ break
+ }
+ val := v_1.Args[1]
+ if ptr != v_1.Args[0] {
+ break
+ }
+ v.reset(OpAMD64MOVLf2i)
+ v.AddArg(val)
+ return true
+ }
+ // match: (MOVLload [off] {sym} (SB) _)
+ // cond: symIsRO(sym)
+ // result: (MOVQconst [int64(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))])
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpSB || !(symIsRO(sym)) {
+ break
+ }
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = int64ToAuxInt(int64(read32(sym, int64(off), config.ctxt.Arch.ByteOrder)))
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVLstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (MOVLstore [off] {sym} ptr (MOVLQSX x) mem)
+ // result: (MOVLstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVLQSX {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpAMD64MOVLstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVLstore [off] {sym} ptr (MOVLQZX x) mem)
+ // result: (MOVLstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVLQZX {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpAMD64MOVLstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVLstore [off1] {sym} (ADDQconst [off2] ptr) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (MOVLstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64MOVLstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVLstore [off] {sym} ptr (MOVLconst [c]) mem)
+ // result: (MOVLstoreconst [makeValAndOff(int32(c),off)] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ mem := v_2
+ v.reset(OpAMD64MOVLstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVLstore [off] {sym} ptr (MOVQconst [c]) mem)
+ // result: (MOVLstoreconst [makeValAndOff(int32(c),off)] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ v.reset(OpAMD64MOVLstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVLstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64MOVLstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (MOVLstore [i] {s} p (SHRQconst [32] w) x:(MOVLstore [i-4] {s} p w mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVQstore [i-4] {s} p w mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ if v_1.Op != OpAMD64SHRQconst || auxIntToInt8(v_1.AuxInt) != 32 {
+ break
+ }
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpAMD64MOVLstore || auxIntToInt32(x.AuxInt) != i-4 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ if p != x.Args[0] || w != x.Args[1] || !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpAMD64MOVQstore)
+ v.AuxInt = int32ToAuxInt(i - 4)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, w, mem)
+ return true
+ }
+ // match: (MOVLstore [i] {s} p (SHRQconst [j] w) x:(MOVLstore [i-4] {s} p w0:(SHRQconst [j-32] w) mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVQstore [i-4] {s} p w0 mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ if v_1.Op != OpAMD64SHRQconst {
+ break
+ }
+ j := auxIntToInt8(v_1.AuxInt)
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpAMD64MOVLstore || auxIntToInt32(x.AuxInt) != i-4 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ if p != x.Args[0] {
+ break
+ }
+ w0 := x.Args[1]
+ if w0.Op != OpAMD64SHRQconst || auxIntToInt8(w0.AuxInt) != j-32 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpAMD64MOVQstore)
+ v.AuxInt = int32ToAuxInt(i - 4)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, w0, mem)
+ return true
+ }
+ // match: (MOVLstore [i] {s} p1 (SHRQconst [32] w) x:(MOVLstore [i] {s} p0 w mem))
+ // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 4) && clobber(x)
+ // result: (MOVQstore [i] {s} p0 w mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p1 := v_0
+ if v_1.Op != OpAMD64SHRQconst || auxIntToInt8(v_1.AuxInt) != 32 {
+ break
+ }
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpAMD64MOVLstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ p0 := x.Args[0]
+ if w != x.Args[1] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 4) && clobber(x)) {
+ break
+ }
+ v.reset(OpAMD64MOVQstore)
+ v.AuxInt = int32ToAuxInt(i)
+ v.Aux = symToAux(s)
+ v.AddArg3(p0, w, mem)
+ return true
+ }
+ // match: (MOVLstore [i] {s} p1 (SHRQconst [j] w) x:(MOVLstore [i] {s} p0 w0:(SHRQconst [j-32] w) mem))
+ // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 4) && clobber(x)
+ // result: (MOVQstore [i] {s} p0 w0 mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p1 := v_0
+ if v_1.Op != OpAMD64SHRQconst {
+ break
+ }
+ j := auxIntToInt8(v_1.AuxInt)
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpAMD64MOVLstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ p0 := x.Args[0]
+ w0 := x.Args[1]
+ if w0.Op != OpAMD64SHRQconst || auxIntToInt8(w0.AuxInt) != j-32 || w != w0.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 4) && clobber(x)) {
+ break
+ }
+ v.reset(OpAMD64MOVQstore)
+ v.AuxInt = int32ToAuxInt(i)
+ v.Aux = symToAux(s)
+ v.AddArg3(p0, w0, mem)
+ return true
+ }
+ // match: (MOVLstore [i] {s} p x1:(MOVLload [j] {s2} p2 mem) mem2:(MOVLstore [i-4] {s} p x2:(MOVLload [j-4] {s2} p2 mem) mem))
+ // cond: x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1, x2, mem2)
+ // result: (MOVQstore [i-4] {s} p (MOVQload [j-4] {s2} p2 mem) mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ x1 := v_1
+ if x1.Op != OpAMD64MOVLload {
+ break
+ }
+ j := auxIntToInt32(x1.AuxInt)
+ s2 := auxToSym(x1.Aux)
+ mem := x1.Args[1]
+ p2 := x1.Args[0]
+ mem2 := v_2
+ if mem2.Op != OpAMD64MOVLstore || auxIntToInt32(mem2.AuxInt) != i-4 || auxToSym(mem2.Aux) != s {
+ break
+ }
+ _ = mem2.Args[2]
+ if p != mem2.Args[0] {
+ break
+ }
+ x2 := mem2.Args[1]
+ if x2.Op != OpAMD64MOVLload || auxIntToInt32(x2.AuxInt) != j-4 || auxToSym(x2.Aux) != s2 {
+ break
+ }
+ _ = x2.Args[1]
+ if p2 != x2.Args[0] || mem != x2.Args[1] || mem != mem2.Args[2] || !(x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1, x2, mem2)) {
+ break
+ }
+ v.reset(OpAMD64MOVQstore)
+ v.AuxInt = int32ToAuxInt(i - 4)
+ v.Aux = symToAux(s)
+ v0 := b.NewValue0(x2.Pos, OpAMD64MOVQload, typ.UInt64)
+ v0.AuxInt = int32ToAuxInt(j - 4)
+ v0.Aux = symToAux(s2)
+ v0.AddArg2(p2, mem)
+ v.AddArg3(p, v0, mem)
+ return true
+ }
+ // match: (MOVLstore {sym} [off] ptr y:(ADDLload x [off] {sym} ptr mem) mem)
+ // cond: y.Uses==1 && clobber(y)
+ // result: (ADDLmodify [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != OpAMD64ADDLload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
+ break
+ }
+ mem := y.Args[2]
+ x := y.Args[0]
+ if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
+ break
+ }
+ v.reset(OpAMD64ADDLmodify)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVLstore {sym} [off] ptr y:(ANDLload x [off] {sym} ptr mem) mem)
+ // cond: y.Uses==1 && clobber(y)
+ // result: (ANDLmodify [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != OpAMD64ANDLload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
+ break
+ }
+ mem := y.Args[2]
+ x := y.Args[0]
+ if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
+ break
+ }
+ v.reset(OpAMD64ANDLmodify)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVLstore {sym} [off] ptr y:(ORLload x [off] {sym} ptr mem) mem)
+ // cond: y.Uses==1 && clobber(y)
+ // result: (ORLmodify [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != OpAMD64ORLload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
+ break
+ }
+ mem := y.Args[2]
+ x := y.Args[0]
+ if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
+ break
+ }
+ v.reset(OpAMD64ORLmodify)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVLstore {sym} [off] ptr y:(XORLload x [off] {sym} ptr mem) mem)
+ // cond: y.Uses==1 && clobber(y)
+ // result: (XORLmodify [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != OpAMD64XORLload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
+ break
+ }
+ mem := y.Args[2]
+ x := y.Args[0]
+ if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
+ break
+ }
+ v.reset(OpAMD64XORLmodify)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVLstore {sym} [off] ptr y:(ADDL l:(MOVLload [off] {sym} ptr mem) x) mem)
+ // cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
+ // result: (ADDLmodify [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != OpAMD64ADDL {
+ break
+ }
+ _ = y.Args[1]
+ y_0 := y.Args[0]
+ y_1 := y.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
+ l := y_0
+ if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
+ continue
+ }
+ mem := l.Args[1]
+ if ptr != l.Args[0] {
+ continue
+ }
+ x := y_1
+ if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
+ continue
+ }
+ v.reset(OpAMD64ADDLmodify)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ break
+ }
+ // match: (MOVLstore {sym} [off] ptr y:(SUBL l:(MOVLload [off] {sym} ptr mem) x) mem)
+ // cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
+ // result: (SUBLmodify [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != OpAMD64SUBL {
+ break
+ }
+ x := y.Args[1]
+ l := y.Args[0]
+ if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
+ break
+ }
+ mem := l.Args[1]
+ if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
+ break
+ }
+ v.reset(OpAMD64SUBLmodify)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVLstore {sym} [off] ptr y:(ANDL l:(MOVLload [off] {sym} ptr mem) x) mem)
+ // cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
+ // result: (ANDLmodify [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != OpAMD64ANDL {
+ break
+ }
+ _ = y.Args[1]
+ y_0 := y.Args[0]
+ y_1 := y.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
+ l := y_0
+ if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
+ continue
+ }
+ mem := l.Args[1]
+ if ptr != l.Args[0] {
+ continue
+ }
+ x := y_1
+ if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
+ continue
+ }
+ v.reset(OpAMD64ANDLmodify)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ break
+ }
+ // match: (MOVLstore {sym} [off] ptr y:(ORL l:(MOVLload [off] {sym} ptr mem) x) mem)
+ // cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
+ // result: (ORLmodify [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != OpAMD64ORL {
+ break
+ }
+ _ = y.Args[1]
+ y_0 := y.Args[0]
+ y_1 := y.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
+ l := y_0
+ if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
+ continue
+ }
+ mem := l.Args[1]
+ if ptr != l.Args[0] {
+ continue
+ }
+ x := y_1
+ if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
+ continue
+ }
+ v.reset(OpAMD64ORLmodify)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ break
+ }
+ // match: (MOVLstore {sym} [off] ptr y:(XORL l:(MOVLload [off] {sym} ptr mem) x) mem)
+ // cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
+ // result: (XORLmodify [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != OpAMD64XORL {
+ break
+ }
+ _ = y.Args[1]
+ y_0 := y.Args[0]
+ y_1 := y.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
+ l := y_0
+ if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
+ continue
+ }
+ mem := l.Args[1]
+ if ptr != l.Args[0] {
+ continue
+ }
+ x := y_1
+ if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
+ continue
+ }
+ v.reset(OpAMD64XORLmodify)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ break
+ }
+ // match: (MOVLstore [off] {sym} ptr a:(ADDLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem)
+ // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)
+ // result: (ADDLconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ a := v_1
+ if a.Op != OpAMD64ADDLconst {
+ break
+ }
+ c := auxIntToInt32(a.AuxInt)
+ l := a.Args[0]
+ if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
+ break
+ }
+ mem := l.Args[1]
+ ptr2 := l.Args[0]
+ if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
+ break
+ }
+ v.reset(OpAMD64ADDLconstmodify)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVLstore [off] {sym} ptr a:(ANDLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem)
+ // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)
+ // result: (ANDLconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ a := v_1
+ if a.Op != OpAMD64ANDLconst {
+ break
+ }
+ c := auxIntToInt32(a.AuxInt)
+ l := a.Args[0]
+ if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
+ break
+ }
+ mem := l.Args[1]
+ ptr2 := l.Args[0]
+ if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
+ break
+ }
+ v.reset(OpAMD64ANDLconstmodify)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVLstore [off] {sym} ptr a:(ORLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem)
+ // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)
+ // result: (ORLconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ a := v_1
+ if a.Op != OpAMD64ORLconst {
+ break
+ }
+ c := auxIntToInt32(a.AuxInt)
+ l := a.Args[0]
+ if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
+ break
+ }
+ mem := l.Args[1]
+ ptr2 := l.Args[0]
+ if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
+ break
+ }
+ v.reset(OpAMD64ORLconstmodify)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVLstore [off] {sym} ptr a:(XORLconst [c] l:(MOVLload [off] {sym} ptr2 mem)) mem)
+ // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)
+ // result: (XORLconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ a := v_1
+ if a.Op != OpAMD64XORLconst {
+ break
+ }
+ c := auxIntToInt32(a.AuxInt)
+ l := a.Args[0]
+ if l.Op != OpAMD64MOVLload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
+ break
+ }
+ mem := l.Args[1]
+ ptr2 := l.Args[0]
+ if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
+ break
+ }
+ v.reset(OpAMD64XORLconstmodify)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVLstore [off] {sym} ptr (MOVLf2i val) mem)
+ // result: (MOVSSstore [off] {sym} ptr val mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVLf2i {
+ break
+ }
+ val := v_1.Args[0]
+ mem := v_2
+ v.reset(OpAMD64MOVSSstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVLstore [i] {s} p x:(BSWAPL w) mem)
+ // cond: x.Uses == 1 && buildcfg.GOAMD64 >= 3
+ // result: (MOVBELstore [i] {s} p w mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ x := v_1
+ if x.Op != OpAMD64BSWAPL {
+ break
+ }
+ w := x.Args[0]
+ mem := v_2
+ if !(x.Uses == 1 && buildcfg.GOAMD64 >= 3) {
+ break
+ }
+ v.reset(OpAMD64MOVBELstore)
+ v.AuxInt = int32ToAuxInt(i)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, w, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVLstoreconst(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (MOVLstoreconst [sc] {s} (ADDQconst [off] ptr) mem)
+ // cond: ValAndOff(sc).canAdd32(off)
+ // result: (MOVLstoreconst [ValAndOff(sc).addOffset32(off)] {s} ptr mem)
+ for {
+ sc := auxIntToValAndOff(v.AuxInt)
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(ValAndOff(sc).canAdd32(off)) {
+ break
+ }
+ v.reset(OpAMD64MOVLstoreconst)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
+ v.Aux = symToAux(s)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVLstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)
+ // result: (MOVLstoreconst [ValAndOff(sc).addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
+ for {
+ sc := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)) {
+ break
+ }
+ v.reset(OpAMD64MOVLstoreconst)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVLstoreconst [c] {s} p x:(MOVLstoreconst [a] {s} p mem))
+ // cond: x.Uses == 1 && a.Off() + 4 == c.Off() && clobber(x)
+ // result: (MOVQstore [a.Off()] {s} p (MOVQconst [a.Val64()&0xffffffff | c.Val64()<<32]) mem)
+ for {
+ c := auxIntToValAndOff(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ x := v_1
+ if x.Op != OpAMD64MOVLstoreconst {
+ break
+ }
+ a := auxIntToValAndOff(x.AuxInt)
+ if auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[1]
+ if p != x.Args[0] || !(x.Uses == 1 && a.Off()+4 == c.Off() && clobber(x)) {
+ break
+ }
+ v.reset(OpAMD64MOVQstore)
+ v.AuxInt = int32ToAuxInt(a.Off())
+ v.Aux = symToAux(s)
+ v0 := b.NewValue0(x.Pos, OpAMD64MOVQconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(a.Val64()&0xffffffff | c.Val64()<<32)
+ v.AddArg3(p, v0, mem)
+ return true
+ }
+ // match: (MOVLstoreconst [a] {s} p x:(MOVLstoreconst [c] {s} p mem))
+ // cond: x.Uses == 1 && a.Off() + 4 == c.Off() && clobber(x)
+ // result: (MOVQstore [a.Off()] {s} p (MOVQconst [a.Val64()&0xffffffff | c.Val64()<<32]) mem)
+ for {
+ a := auxIntToValAndOff(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ x := v_1
+ if x.Op != OpAMD64MOVLstoreconst {
+ break
+ }
+ c := auxIntToValAndOff(x.AuxInt)
+ if auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[1]
+ if p != x.Args[0] || !(x.Uses == 1 && a.Off()+4 == c.Off() && clobber(x)) {
+ break
+ }
+ v.reset(OpAMD64MOVQstore)
+ v.AuxInt = int32ToAuxInt(a.Off())
+ v.Aux = symToAux(s)
+ v0 := b.NewValue0(x.Pos, OpAMD64MOVQconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(a.Val64()&0xffffffff | c.Val64()<<32)
+ v.AddArg3(p, v0, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVOload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVOload [off1] {sym} (ADDQconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (MOVOload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64MOVOload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVOload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVOload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64MOVOload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVOstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (MOVOstore [off1] {sym} (ADDQconst [off2] ptr) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (MOVOstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64MOVOstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVOstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVOstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64MOVOstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (MOVOstore [dstOff] {dstSym} ptr (MOVOload [srcOff] {srcSym} (SB) _) mem)
+ // cond: symIsRO(srcSym)
+ // result: (MOVQstore [dstOff+8] {dstSym} ptr (MOVQconst [int64(read64(srcSym, int64(srcOff)+8, config.ctxt.Arch.ByteOrder))]) (MOVQstore [dstOff] {dstSym} ptr (MOVQconst [int64(read64(srcSym, int64(srcOff), config.ctxt.Arch.ByteOrder))]) mem))
+ for {
+ dstOff := auxIntToInt32(v.AuxInt)
+ dstSym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVOload {
+ break
+ }
+ srcOff := auxIntToInt32(v_1.AuxInt)
+ srcSym := auxToSym(v_1.Aux)
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpSB {
+ break
+ }
+ mem := v_2
+ if !(symIsRO(srcSym)) {
+ break
+ }
+ v.reset(OpAMD64MOVQstore)
+ v.AuxInt = int32ToAuxInt(dstOff + 8)
+ v.Aux = symToAux(dstSym)
+ v0 := b.NewValue0(v_1.Pos, OpAMD64MOVQconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(int64(read64(srcSym, int64(srcOff)+8, config.ctxt.Arch.ByteOrder)))
+ v1 := b.NewValue0(v_1.Pos, OpAMD64MOVQstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(dstOff)
+ v1.Aux = symToAux(dstSym)
+ v2 := b.NewValue0(v_1.Pos, OpAMD64MOVQconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(int64(read64(srcSym, int64(srcOff), config.ctxt.Arch.ByteOrder)))
+ v1.AddArg3(ptr, v2, mem)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVOstoreconst(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVOstoreconst [sc] {s} (ADDQconst [off] ptr) mem)
+ // cond: ValAndOff(sc).canAdd32(off)
+ // result: (MOVOstoreconst [ValAndOff(sc).addOffset32(off)] {s} ptr mem)
+ for {
+ sc := auxIntToValAndOff(v.AuxInt)
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(ValAndOff(sc).canAdd32(off)) {
+ break
+ }
+ v.reset(OpAMD64MOVOstoreconst)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
+ v.Aux = symToAux(s)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVOstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)
+ // result: (MOVOstoreconst [ValAndOff(sc).addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
+ for {
+ sc := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)) {
+ break
+ }
+ v.reset(OpAMD64MOVOstoreconst)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVQatomicload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVQatomicload [off1] {sym} (ADDQconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (MOVQatomicload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64MOVQatomicload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVQatomicload [off1] {sym1} (LEAQ [off2] {sym2} ptr) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVQatomicload [off1+off2] {mergeSym(sym1, sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64MOVQatomicload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVQf2i(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MOVQf2i <t> (Arg <u> [off] {sym}))
+ // cond: t.Size() == u.Size()
+ // result: @b.Func.Entry (Arg <t> [off] {sym})
+ for {
+ t := v.Type
+ if v_0.Op != OpArg {
+ break
+ }
+ u := v_0.Type
+ off := auxIntToInt32(v_0.AuxInt)
+ sym := auxToSym(v_0.Aux)
+ if !(t.Size() == u.Size()) {
+ break
+ }
+ b = b.Func.Entry
+ v0 := b.NewValue0(v.Pos, OpArg, t)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVQi2f(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MOVQi2f <t> (Arg <u> [off] {sym}))
+ // cond: t.Size() == u.Size()
+ // result: @b.Func.Entry (Arg <t> [off] {sym})
+ for {
+ t := v.Type
+ if v_0.Op != OpArg {
+ break
+ }
+ u := v_0.Type
+ off := auxIntToInt32(v_0.AuxInt)
+ sym := auxToSym(v_0.Aux)
+ if !(t.Size() == u.Size()) {
+ break
+ }
+ b = b.Func.Entry
+ v0 := b.NewValue0(v.Pos, OpArg, t)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVQload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVQload [off] {sym} ptr (MOVQstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: x
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVQstore {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVQload [off1] {sym} (ADDQconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (MOVQload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64MOVQload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVQload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVQload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64MOVQload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (MOVQload [off] {sym} ptr (MOVSDstore [off] {sym} ptr val _))
+ // result: (MOVQf2i val)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVSDstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
+ break
+ }
+ val := v_1.Args[1]
+ if ptr != v_1.Args[0] {
+ break
+ }
+ v.reset(OpAMD64MOVQf2i)
+ v.AddArg(val)
+ return true
+ }
+ // match: (MOVQload [off] {sym} (SB) _)
+ // cond: symIsRO(sym)
+ // result: (MOVQconst [int64(read64(sym, int64(off), config.ctxt.Arch.ByteOrder))])
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpSB || !(symIsRO(sym)) {
+ break
+ }
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = int64ToAuxInt(int64(read64(sym, int64(off), config.ctxt.Arch.ByteOrder)))
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVQstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVQstore [off1] {sym} (ADDQconst [off2] ptr) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (MOVQstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64MOVQstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVQstore [off] {sym} ptr (MOVQconst [c]) mem)
+ // cond: validVal(c)
+ // result: (MOVQstoreconst [makeValAndOff(int32(c),off)] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(validVal(c)) {
+ break
+ }
+ v.reset(OpAMD64MOVQstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVQstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVQstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64MOVQstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (MOVQstore {sym} [off] ptr y:(ADDQload x [off] {sym} ptr mem) mem)
+ // cond: y.Uses==1 && clobber(y)
+ // result: (ADDQmodify [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != OpAMD64ADDQload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
+ break
+ }
+ mem := y.Args[2]
+ x := y.Args[0]
+ if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
+ break
+ }
+ v.reset(OpAMD64ADDQmodify)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVQstore {sym} [off] ptr y:(ANDQload x [off] {sym} ptr mem) mem)
+ // cond: y.Uses==1 && clobber(y)
+ // result: (ANDQmodify [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != OpAMD64ANDQload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
+ break
+ }
+ mem := y.Args[2]
+ x := y.Args[0]
+ if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
+ break
+ }
+ v.reset(OpAMD64ANDQmodify)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVQstore {sym} [off] ptr y:(ORQload x [off] {sym} ptr mem) mem)
+ // cond: y.Uses==1 && clobber(y)
+ // result: (ORQmodify [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != OpAMD64ORQload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
+ break
+ }
+ mem := y.Args[2]
+ x := y.Args[0]
+ if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
+ break
+ }
+ v.reset(OpAMD64ORQmodify)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVQstore {sym} [off] ptr y:(XORQload x [off] {sym} ptr mem) mem)
+ // cond: y.Uses==1 && clobber(y)
+ // result: (XORQmodify [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != OpAMD64XORQload || auxIntToInt32(y.AuxInt) != off || auxToSym(y.Aux) != sym {
+ break
+ }
+ mem := y.Args[2]
+ x := y.Args[0]
+ if ptr != y.Args[1] || mem != v_2 || !(y.Uses == 1 && clobber(y)) {
+ break
+ }
+ v.reset(OpAMD64XORQmodify)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVQstore {sym} [off] ptr y:(ADDQ l:(MOVQload [off] {sym} ptr mem) x) mem)
+ // cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
+ // result: (ADDQmodify [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != OpAMD64ADDQ {
+ break
+ }
+ _ = y.Args[1]
+ y_0 := y.Args[0]
+ y_1 := y.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
+ l := y_0
+ if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
+ continue
+ }
+ mem := l.Args[1]
+ if ptr != l.Args[0] {
+ continue
+ }
+ x := y_1
+ if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
+ continue
+ }
+ v.reset(OpAMD64ADDQmodify)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ break
+ }
+ // match: (MOVQstore {sym} [off] ptr y:(SUBQ l:(MOVQload [off] {sym} ptr mem) x) mem)
+ // cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
+ // result: (SUBQmodify [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != OpAMD64SUBQ {
+ break
+ }
+ x := y.Args[1]
+ l := y.Args[0]
+ if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
+ break
+ }
+ mem := l.Args[1]
+ if ptr != l.Args[0] || mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
+ break
+ }
+ v.reset(OpAMD64SUBQmodify)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVQstore {sym} [off] ptr y:(ANDQ l:(MOVQload [off] {sym} ptr mem) x) mem)
+ // cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
+ // result: (ANDQmodify [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != OpAMD64ANDQ {
+ break
+ }
+ _ = y.Args[1]
+ y_0 := y.Args[0]
+ y_1 := y.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
+ l := y_0
+ if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
+ continue
+ }
+ mem := l.Args[1]
+ if ptr != l.Args[0] {
+ continue
+ }
+ x := y_1
+ if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
+ continue
+ }
+ v.reset(OpAMD64ANDQmodify)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ break
+ }
+ // match: (MOVQstore {sym} [off] ptr y:(ORQ l:(MOVQload [off] {sym} ptr mem) x) mem)
+ // cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
+ // result: (ORQmodify [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != OpAMD64ORQ {
+ break
+ }
+ _ = y.Args[1]
+ y_0 := y.Args[0]
+ y_1 := y.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
+ l := y_0
+ if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
+ continue
+ }
+ mem := l.Args[1]
+ if ptr != l.Args[0] {
+ continue
+ }
+ x := y_1
+ if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
+ continue
+ }
+ v.reset(OpAMD64ORQmodify)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ break
+ }
+ // match: (MOVQstore {sym} [off] ptr y:(XORQ l:(MOVQload [off] {sym} ptr mem) x) mem)
+ // cond: y.Uses==1 && l.Uses==1 && clobber(y, l)
+ // result: (XORQmodify [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ y := v_1
+ if y.Op != OpAMD64XORQ {
+ break
+ }
+ _ = y.Args[1]
+ y_0 := y.Args[0]
+ y_1 := y.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
+ l := y_0
+ if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
+ continue
+ }
+ mem := l.Args[1]
+ if ptr != l.Args[0] {
+ continue
+ }
+ x := y_1
+ if mem != v_2 || !(y.Uses == 1 && l.Uses == 1 && clobber(y, l)) {
+ continue
+ }
+ v.reset(OpAMD64XORQmodify)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ break
+ }
+ // match: (MOVQstore [off] {sym} ptr a:(ADDQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem)
+ // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)
+ // result: (ADDQconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ a := v_1
+ if a.Op != OpAMD64ADDQconst {
+ break
+ }
+ c := auxIntToInt32(a.AuxInt)
+ l := a.Args[0]
+ if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
+ break
+ }
+ mem := l.Args[1]
+ ptr2 := l.Args[0]
+ if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
+ break
+ }
+ v.reset(OpAMD64ADDQconstmodify)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVQstore [off] {sym} ptr a:(ANDQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem)
+ // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)
+ // result: (ANDQconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ a := v_1
+ if a.Op != OpAMD64ANDQconst {
+ break
+ }
+ c := auxIntToInt32(a.AuxInt)
+ l := a.Args[0]
+ if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
+ break
+ }
+ mem := l.Args[1]
+ ptr2 := l.Args[0]
+ if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
+ break
+ }
+ v.reset(OpAMD64ANDQconstmodify)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVQstore [off] {sym} ptr a:(ORQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem)
+ // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)
+ // result: (ORQconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ a := v_1
+ if a.Op != OpAMD64ORQconst {
+ break
+ }
+ c := auxIntToInt32(a.AuxInt)
+ l := a.Args[0]
+ if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
+ break
+ }
+ mem := l.Args[1]
+ ptr2 := l.Args[0]
+ if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
+ break
+ }
+ v.reset(OpAMD64ORQconstmodify)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVQstore [off] {sym} ptr a:(XORQconst [c] l:(MOVQload [off] {sym} ptr2 mem)) mem)
+ // cond: isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)
+ // result: (XORQconstmodify {sym} [makeValAndOff(int32(c),off)] ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ a := v_1
+ if a.Op != OpAMD64XORQconst {
+ break
+ }
+ c := auxIntToInt32(a.AuxInt)
+ l := a.Args[0]
+ if l.Op != OpAMD64MOVQload || auxIntToInt32(l.AuxInt) != off || auxToSym(l.Aux) != sym {
+ break
+ }
+ mem := l.Args[1]
+ ptr2 := l.Args[0]
+ if mem != v_2 || !(isSamePtr(ptr, ptr2) && a.Uses == 1 && l.Uses == 1 && clobber(l, a)) {
+ break
+ }
+ v.reset(OpAMD64XORQconstmodify)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVQstore [off] {sym} ptr (MOVQf2i val) mem)
+ // result: (MOVSDstore [off] {sym} ptr val mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVQf2i {
+ break
+ }
+ val := v_1.Args[0]
+ mem := v_2
+ v.reset(OpAMD64MOVSDstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVQstore [i] {s} p x:(BSWAPQ w) mem)
+ // cond: x.Uses == 1 && buildcfg.GOAMD64 >= 3
+ // result: (MOVBEQstore [i] {s} p w mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ x := v_1
+ if x.Op != OpAMD64BSWAPQ {
+ break
+ }
+ w := x.Args[0]
+ mem := v_2
+ if !(x.Uses == 1 && buildcfg.GOAMD64 >= 3) {
+ break
+ }
+ v.reset(OpAMD64MOVBEQstore)
+ v.AuxInt = int32ToAuxInt(i)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, w, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVQstoreconst(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVQstoreconst [sc] {s} (ADDQconst [off] ptr) mem)
+ // cond: ValAndOff(sc).canAdd32(off)
+ // result: (MOVQstoreconst [ValAndOff(sc).addOffset32(off)] {s} ptr mem)
+ for {
+ sc := auxIntToValAndOff(v.AuxInt)
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(ValAndOff(sc).canAdd32(off)) {
+ break
+ }
+ v.reset(OpAMD64MOVQstoreconst)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
+ v.Aux = symToAux(s)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVQstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)
+ // result: (MOVQstoreconst [ValAndOff(sc).addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
+ for {
+ sc := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)) {
+ break
+ }
+ v.reset(OpAMD64MOVQstoreconst)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVQstoreconst [c] {s} p x:(MOVQstoreconst [a] {s} p mem))
+ // cond: config.useSSE && x.Uses == 1 && a.Off() + 8 == c.Off() && a.Val() == 0 && c.Val() == 0 && clobber(x)
+ // result: (MOVOstoreconst [makeValAndOff(0,a.Off())] {s} p mem)
+ for {
+ c := auxIntToValAndOff(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ x := v_1
+ if x.Op != OpAMD64MOVQstoreconst {
+ break
+ }
+ a := auxIntToValAndOff(x.AuxInt)
+ if auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[1]
+ if p != x.Args[0] || !(config.useSSE && x.Uses == 1 && a.Off()+8 == c.Off() && a.Val() == 0 && c.Val() == 0 && clobber(x)) {
+ break
+ }
+ v.reset(OpAMD64MOVOstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, a.Off()))
+ v.Aux = symToAux(s)
+ v.AddArg2(p, mem)
+ return true
+ }
+ // match: (MOVQstoreconst [a] {s} p x:(MOVQstoreconst [c] {s} p mem))
+ // cond: config.useSSE && x.Uses == 1 && a.Off() + 8 == c.Off() && a.Val() == 0 && c.Val() == 0 && clobber(x)
+ // result: (MOVOstoreconst [makeValAndOff(0,a.Off())] {s} p mem)
+ for {
+ a := auxIntToValAndOff(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ x := v_1
+ if x.Op != OpAMD64MOVQstoreconst {
+ break
+ }
+ c := auxIntToValAndOff(x.AuxInt)
+ if auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[1]
+ if p != x.Args[0] || !(config.useSSE && x.Uses == 1 && a.Off()+8 == c.Off() && a.Val() == 0 && c.Val() == 0 && clobber(x)) {
+ break
+ }
+ v.reset(OpAMD64MOVOstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, a.Off()))
+ v.Aux = symToAux(s)
+ v.AddArg2(p, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVSDload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVSDload [off1] {sym} (ADDQconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (MOVSDload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64MOVSDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVSDload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVSDload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64MOVSDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (MOVSDload [off] {sym} ptr (MOVQstore [off] {sym} ptr val _))
+ // result: (MOVQi2f val)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVQstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
+ break
+ }
+ val := v_1.Args[1]
+ if ptr != v_1.Args[0] {
+ break
+ }
+ v.reset(OpAMD64MOVQi2f)
+ v.AddArg(val)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVSDstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVSDstore [off1] {sym} (ADDQconst [off2] ptr) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (MOVSDstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64MOVSDstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVSDstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVSDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64MOVSDstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (MOVSDstore [off] {sym} ptr (MOVQi2f val) mem)
+ // result: (MOVQstore [off] {sym} ptr val mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVQi2f {
+ break
+ }
+ val := v_1.Args[0]
+ mem := v_2
+ v.reset(OpAMD64MOVQstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVSSload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVSSload [off1] {sym} (ADDQconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (MOVSSload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64MOVSSload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVSSload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVSSload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64MOVSSload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (MOVSSload [off] {sym} ptr (MOVLstore [off] {sym} ptr val _))
+ // result: (MOVLi2f val)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVLstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
+ break
+ }
+ val := v_1.Args[1]
+ if ptr != v_1.Args[0] {
+ break
+ }
+ v.reset(OpAMD64MOVLi2f)
+ v.AddArg(val)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVSSstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVSSstore [off1] {sym} (ADDQconst [off2] ptr) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (MOVSSstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64MOVSSstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVSSstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVSSstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64MOVSSstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (MOVSSstore [off] {sym} ptr (MOVLi2f val) mem)
+ // result: (MOVLstore [off] {sym} ptr val mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVLi2f {
+ break
+ }
+ val := v_1.Args[0]
+ mem := v_2
+ v.reset(OpAMD64MOVLstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVWQSX(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MOVWQSX x:(MOVWload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem)
+ for {
+ x := v_0
+ if x.Op != OpAMD64MOVWload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpAMD64MOVWQSXload, v.Type)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWQSX x:(MOVLload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem)
+ for {
+ x := v_0
+ if x.Op != OpAMD64MOVLload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpAMD64MOVWQSXload, v.Type)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWQSX x:(MOVQload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVWQSXload <v.Type> [off] {sym} ptr mem)
+ for {
+ x := v_0
+ if x.Op != OpAMD64MOVQload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpAMD64MOVWQSXload, v.Type)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWQSX (ANDLconst [c] x))
+ // cond: c & 0x8000 == 0
+ // result: (ANDLconst [c & 0x7fff] x)
+ for {
+ if v_0.Op != OpAMD64ANDLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(c&0x8000 == 0) {
+ break
+ }
+ v.reset(OpAMD64ANDLconst)
+ v.AuxInt = int32ToAuxInt(c & 0x7fff)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWQSX (MOVWQSX x))
+ // result: (MOVWQSX x)
+ for {
+ if v_0.Op != OpAMD64MOVWQSX {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64MOVWQSX)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWQSX (MOVBQSX x))
+ // result: (MOVBQSX x)
+ for {
+ if v_0.Op != OpAMD64MOVBQSX {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64MOVBQSX)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVWQSXload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWQSXload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: (MOVWQSX x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVWstore {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpAMD64MOVWQSX)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWQSXload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVWQSXload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64MOVWQSXload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVWQZX(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MOVWQZX x:(MOVWload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVWload <v.Type> [off] {sym} ptr mem)
+ for {
+ x := v_0
+ if x.Op != OpAMD64MOVWload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpAMD64MOVWload, v.Type)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWQZX x:(MOVLload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVWload <v.Type> [off] {sym} ptr mem)
+ for {
+ x := v_0
+ if x.Op != OpAMD64MOVLload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpAMD64MOVWload, v.Type)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWQZX x:(MOVQload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVWload <v.Type> [off] {sym} ptr mem)
+ for {
+ x := v_0
+ if x.Op != OpAMD64MOVQload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpAMD64MOVWload, v.Type)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWQZX x)
+ // cond: zeroUpper48Bits(x,3)
+ // result: x
+ for {
+ x := v_0
+ if !(zeroUpper48Bits(x, 3)) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVWQZX (ANDLconst [c] x))
+ // result: (ANDLconst [c & 0xffff] x)
+ for {
+ if v_0.Op != OpAMD64ANDLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpAMD64ANDLconst)
+ v.AuxInt = int32ToAuxInt(c & 0xffff)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWQZX (MOVWQZX x))
+ // result: (MOVWQZX x)
+ for {
+ if v_0.Op != OpAMD64MOVWQZX {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64MOVWQZX)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWQZX (MOVBQZX x))
+ // result: (MOVBQZX x)
+ for {
+ if v_0.Op != OpAMD64MOVBQZX {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64MOVBQZX)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVWload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: (MOVWQZX x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVWstore {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpAMD64MOVWQZX)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWload [off1] {sym} (ADDQconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (MOVWload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64MOVWload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWload [off1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64MOVWload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (MOVWload [off] {sym} (SB) _)
+ // cond: symIsRO(sym)
+ // result: (MOVLconst [int32(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))])
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpSB || !(symIsRO(sym)) {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(int32(read16(sym, int64(off), config.ctxt.Arch.ByteOrder)))
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVWstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (MOVWstore [off] {sym} ptr (MOVWQSX x) mem)
+ // result: (MOVWstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVWQSX {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpAMD64MOVWstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVWstore [off] {sym} ptr (MOVWQZX x) mem)
+ // result: (MOVWstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVWQZX {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpAMD64MOVWstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVWstore [off1] {sym} (ADDQconst [off2] ptr) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (MOVWstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64MOVWstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVWstore [off] {sym} ptr (MOVLconst [c]) mem)
+ // result: (MOVWstoreconst [makeValAndOff(int32(int16(c)),off)] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ mem := v_2
+ v.reset(OpAMD64MOVWstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int16(c)), off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWstore [off] {sym} ptr (MOVQconst [c]) mem)
+ // result: (MOVWstoreconst [makeValAndOff(int32(int16(c)),off)] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ v.reset(OpAMD64MOVWstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int16(c)), off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64MOVWstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (MOVWstore [i] {s} p (SHRLconst [16] w) x:(MOVWstore [i-2] {s} p w mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVLstore [i-2] {s} p w mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ if v_1.Op != OpAMD64SHRLconst || auxIntToInt8(v_1.AuxInt) != 16 {
+ break
+ }
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpAMD64MOVWstore || auxIntToInt32(x.AuxInt) != i-2 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ if p != x.Args[0] || w != x.Args[1] || !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpAMD64MOVLstore)
+ v.AuxInt = int32ToAuxInt(i - 2)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, w, mem)
+ return true
+ }
+ // match: (MOVWstore [i] {s} p (SHRQconst [16] w) x:(MOVWstore [i-2] {s} p w mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVLstore [i-2] {s} p w mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ if v_1.Op != OpAMD64SHRQconst || auxIntToInt8(v_1.AuxInt) != 16 {
+ break
+ }
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpAMD64MOVWstore || auxIntToInt32(x.AuxInt) != i-2 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ if p != x.Args[0] || w != x.Args[1] || !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpAMD64MOVLstore)
+ v.AuxInt = int32ToAuxInt(i - 2)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, w, mem)
+ return true
+ }
+ // match: (MOVWstore [i] {s} p (SHRLconst [j] w) x:(MOVWstore [i-2] {s} p w0:(SHRLconst [j-16] w) mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVLstore [i-2] {s} p w0 mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ if v_1.Op != OpAMD64SHRLconst {
+ break
+ }
+ j := auxIntToInt8(v_1.AuxInt)
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpAMD64MOVWstore || auxIntToInt32(x.AuxInt) != i-2 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ if p != x.Args[0] {
+ break
+ }
+ w0 := x.Args[1]
+ if w0.Op != OpAMD64SHRLconst || auxIntToInt8(w0.AuxInt) != j-16 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpAMD64MOVLstore)
+ v.AuxInt = int32ToAuxInt(i - 2)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, w0, mem)
+ return true
+ }
+ // match: (MOVWstore [i] {s} p (SHRQconst [j] w) x:(MOVWstore [i-2] {s} p w0:(SHRQconst [j-16] w) mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVLstore [i-2] {s} p w0 mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ if v_1.Op != OpAMD64SHRQconst {
+ break
+ }
+ j := auxIntToInt8(v_1.AuxInt)
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpAMD64MOVWstore || auxIntToInt32(x.AuxInt) != i-2 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ if p != x.Args[0] {
+ break
+ }
+ w0 := x.Args[1]
+ if w0.Op != OpAMD64SHRQconst || auxIntToInt8(w0.AuxInt) != j-16 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpAMD64MOVLstore)
+ v.AuxInt = int32ToAuxInt(i - 2)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, w0, mem)
+ return true
+ }
+ // match: (MOVWstore [i] {s} p1 (SHRLconst [16] w) x:(MOVWstore [i] {s} p0 w mem))
+ // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 2) && clobber(x)
+ // result: (MOVLstore [i] {s} p0 w mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p1 := v_0
+ if v_1.Op != OpAMD64SHRLconst || auxIntToInt8(v_1.AuxInt) != 16 {
+ break
+ }
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpAMD64MOVWstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ p0 := x.Args[0]
+ if w != x.Args[1] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 2) && clobber(x)) {
+ break
+ }
+ v.reset(OpAMD64MOVLstore)
+ v.AuxInt = int32ToAuxInt(i)
+ v.Aux = symToAux(s)
+ v.AddArg3(p0, w, mem)
+ return true
+ }
+ // match: (MOVWstore [i] {s} p1 (SHRQconst [16] w) x:(MOVWstore [i] {s} p0 w mem))
+ // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 2) && clobber(x)
+ // result: (MOVLstore [i] {s} p0 w mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p1 := v_0
+ if v_1.Op != OpAMD64SHRQconst || auxIntToInt8(v_1.AuxInt) != 16 {
+ break
+ }
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpAMD64MOVWstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ p0 := x.Args[0]
+ if w != x.Args[1] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 2) && clobber(x)) {
+ break
+ }
+ v.reset(OpAMD64MOVLstore)
+ v.AuxInt = int32ToAuxInt(i)
+ v.Aux = symToAux(s)
+ v.AddArg3(p0, w, mem)
+ return true
+ }
+ // match: (MOVWstore [i] {s} p1 (SHRLconst [j] w) x:(MOVWstore [i] {s} p0 w0:(SHRLconst [j-16] w) mem))
+ // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 2) && clobber(x)
+ // result: (MOVLstore [i] {s} p0 w0 mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p1 := v_0
+ if v_1.Op != OpAMD64SHRLconst {
+ break
+ }
+ j := auxIntToInt8(v_1.AuxInt)
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpAMD64MOVWstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ p0 := x.Args[0]
+ w0 := x.Args[1]
+ if w0.Op != OpAMD64SHRLconst || auxIntToInt8(w0.AuxInt) != j-16 || w != w0.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 2) && clobber(x)) {
+ break
+ }
+ v.reset(OpAMD64MOVLstore)
+ v.AuxInt = int32ToAuxInt(i)
+ v.Aux = symToAux(s)
+ v.AddArg3(p0, w0, mem)
+ return true
+ }
+ // match: (MOVWstore [i] {s} p1 (SHRQconst [j] w) x:(MOVWstore [i] {s} p0 w0:(SHRQconst [j-16] w) mem))
+ // cond: x.Uses == 1 && sequentialAddresses(p0, p1, 2) && clobber(x)
+ // result: (MOVLstore [i] {s} p0 w0 mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p1 := v_0
+ if v_1.Op != OpAMD64SHRQconst {
+ break
+ }
+ j := auxIntToInt8(v_1.AuxInt)
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpAMD64MOVWstore || auxIntToInt32(x.AuxInt) != i || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ p0 := x.Args[0]
+ w0 := x.Args[1]
+ if w0.Op != OpAMD64SHRQconst || auxIntToInt8(w0.AuxInt) != j-16 || w != w0.Args[0] || !(x.Uses == 1 && sequentialAddresses(p0, p1, 2) && clobber(x)) {
+ break
+ }
+ v.reset(OpAMD64MOVLstore)
+ v.AuxInt = int32ToAuxInt(i)
+ v.Aux = symToAux(s)
+ v.AddArg3(p0, w0, mem)
+ return true
+ }
+ // match: (MOVWstore [i] {s} p x1:(MOVWload [j] {s2} p2 mem) mem2:(MOVWstore [i-2] {s} p x2:(MOVWload [j-2] {s2} p2 mem) mem))
+ // cond: x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1, x2, mem2)
+ // result: (MOVLstore [i-2] {s} p (MOVLload [j-2] {s2} p2 mem) mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ x1 := v_1
+ if x1.Op != OpAMD64MOVWload {
+ break
+ }
+ j := auxIntToInt32(x1.AuxInt)
+ s2 := auxToSym(x1.Aux)
+ mem := x1.Args[1]
+ p2 := x1.Args[0]
+ mem2 := v_2
+ if mem2.Op != OpAMD64MOVWstore || auxIntToInt32(mem2.AuxInt) != i-2 || auxToSym(mem2.Aux) != s {
+ break
+ }
+ _ = mem2.Args[2]
+ if p != mem2.Args[0] {
+ break
+ }
+ x2 := mem2.Args[1]
+ if x2.Op != OpAMD64MOVWload || auxIntToInt32(x2.AuxInt) != j-2 || auxToSym(x2.Aux) != s2 {
+ break
+ }
+ _ = x2.Args[1]
+ if p2 != x2.Args[0] || mem != x2.Args[1] || mem != mem2.Args[2] || !(x1.Uses == 1 && x2.Uses == 1 && mem2.Uses == 1 && clobber(x1, x2, mem2)) {
+ break
+ }
+ v.reset(OpAMD64MOVLstore)
+ v.AuxInt = int32ToAuxInt(i - 2)
+ v.Aux = symToAux(s)
+ v0 := b.NewValue0(x2.Pos, OpAMD64MOVLload, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(j - 2)
+ v0.Aux = symToAux(s2)
+ v0.AddArg2(p2, mem)
+ v.AddArg3(p, v0, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MOVWstoreconst(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWstoreconst [sc] {s} (ADDQconst [off] ptr) mem)
+ // cond: ValAndOff(sc).canAdd32(off)
+ // result: (MOVWstoreconst [ValAndOff(sc).addOffset32(off)] {s} ptr mem)
+ for {
+ sc := auxIntToValAndOff(v.AuxInt)
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(ValAndOff(sc).canAdd32(off)) {
+ break
+ }
+ v.reset(OpAMD64MOVWstoreconst)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
+ v.Aux = symToAux(s)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWstoreconst [sc] {sym1} (LEAQ [off] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)
+ // result: (MOVWstoreconst [ValAndOff(sc).addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
+ for {
+ sc := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && ValAndOff(sc).canAdd32(off)) {
+ break
+ }
+ v.reset(OpAMD64MOVWstoreconst)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(sc).addOffset32(off))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWstoreconst [c] {s} p x:(MOVWstoreconst [a] {s} p mem))
+ // cond: x.Uses == 1 && a.Off() + 2 == c.Off() && clobber(x)
+ // result: (MOVLstoreconst [makeValAndOff(a.Val()&0xffff | c.Val()<<16, a.Off())] {s} p mem)
+ for {
+ c := auxIntToValAndOff(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ x := v_1
+ if x.Op != OpAMD64MOVWstoreconst {
+ break
+ }
+ a := auxIntToValAndOff(x.AuxInt)
+ if auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[1]
+ if p != x.Args[0] || !(x.Uses == 1 && a.Off()+2 == c.Off() && clobber(x)) {
+ break
+ }
+ v.reset(OpAMD64MOVLstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(a.Val()&0xffff|c.Val()<<16, a.Off()))
+ v.Aux = symToAux(s)
+ v.AddArg2(p, mem)
+ return true
+ }
+ // match: (MOVWstoreconst [a] {s} p x:(MOVWstoreconst [c] {s} p mem))
+ // cond: x.Uses == 1 && a.Off() + 2 == c.Off() && clobber(x)
+ // result: (MOVLstoreconst [makeValAndOff(a.Val()&0xffff | c.Val()<<16, a.Off())] {s} p mem)
+ for {
+ a := auxIntToValAndOff(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ x := v_1
+ if x.Op != OpAMD64MOVWstoreconst {
+ break
+ }
+ c := auxIntToValAndOff(x.AuxInt)
+ if auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[1]
+ if p != x.Args[0] || !(x.Uses == 1 && a.Off()+2 == c.Off() && clobber(x)) {
+ break
+ }
+ v.reset(OpAMD64MOVLstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(a.Val()&0xffff|c.Val()<<16, a.Off()))
+ v.Aux = symToAux(s)
+ v.AddArg2(p, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MULL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MULL x (MOVLconst [c]))
+ // result: (MULLconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpAMD64MULLconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MULLconst(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MULLconst [c] (MULLconst [d] x))
+ // result: (MULLconst [c * d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64MULLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpAMD64MULLconst)
+ v.AuxInt = int32ToAuxInt(c * d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MULLconst [-9] x)
+ // result: (NEGL (LEAL8 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != -9 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64NEGL)
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MULLconst [-5] x)
+ // result: (NEGL (LEAL4 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != -5 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64NEGL)
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MULLconst [-3] x)
+ // result: (NEGL (LEAL2 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != -3 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64NEGL)
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MULLconst [-1] x)
+ // result: (NEGL x)
+ for {
+ if auxIntToInt32(v.AuxInt) != -1 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64NEGL)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MULLconst [ 0] _)
+ // result: (MOVLconst [0])
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (MULLconst [ 1] x)
+ // result: x
+ for {
+ if auxIntToInt32(v.AuxInt) != 1 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (MULLconst [ 3] x)
+ // result: (LEAL2 x x)
+ for {
+ if auxIntToInt32(v.AuxInt) != 3 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64LEAL2)
+ v.AddArg2(x, x)
+ return true
+ }
+ // match: (MULLconst [ 5] x)
+ // result: (LEAL4 x x)
+ for {
+ if auxIntToInt32(v.AuxInt) != 5 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64LEAL4)
+ v.AddArg2(x, x)
+ return true
+ }
+ // match: (MULLconst [ 7] x)
+ // result: (LEAL2 x (LEAL2 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 7 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64LEAL2)
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (MULLconst [ 9] x)
+ // result: (LEAL8 x x)
+ for {
+ if auxIntToInt32(v.AuxInt) != 9 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64LEAL8)
+ v.AddArg2(x, x)
+ return true
+ }
+ // match: (MULLconst [11] x)
+ // result: (LEAL2 x (LEAL4 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 11 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64LEAL2)
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (MULLconst [13] x)
+ // result: (LEAL4 x (LEAL2 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 13 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64LEAL4)
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (MULLconst [19] x)
+ // result: (LEAL2 x (LEAL8 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 19 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64LEAL2)
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (MULLconst [21] x)
+ // result: (LEAL4 x (LEAL4 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 21 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64LEAL4)
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (MULLconst [25] x)
+ // result: (LEAL8 x (LEAL2 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 25 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64LEAL8)
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (MULLconst [27] x)
+ // result: (LEAL8 (LEAL2 <v.Type> x x) (LEAL2 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 27 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64LEAL8)
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(v0, v0)
+ return true
+ }
+ // match: (MULLconst [37] x)
+ // result: (LEAL4 x (LEAL8 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 37 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64LEAL4)
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (MULLconst [41] x)
+ // result: (LEAL8 x (LEAL4 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 41 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64LEAL8)
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (MULLconst [45] x)
+ // result: (LEAL8 (LEAL4 <v.Type> x x) (LEAL4 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 45 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64LEAL8)
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(v0, v0)
+ return true
+ }
+ // match: (MULLconst [73] x)
+ // result: (LEAL8 x (LEAL8 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 73 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64LEAL8)
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (MULLconst [81] x)
+ // result: (LEAL8 (LEAL8 <v.Type> x x) (LEAL8 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 81 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64LEAL8)
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(v0, v0)
+ return true
+ }
+ // match: (MULLconst [c] x)
+ // cond: isPowerOfTwo64(int64(c)+1) && c >= 15
+ // result: (SUBL (SHLLconst <v.Type> [int8(log64(int64(c)+1))] x) x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(isPowerOfTwo64(int64(c)+1) && c >= 15) {
+ break
+ }
+ v.reset(OpAMD64SUBL)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
+ v0.AuxInt = int8ToAuxInt(int8(log64(int64(c) + 1)))
+ v0.AddArg(x)
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (MULLconst [c] x)
+ // cond: isPowerOfTwo32(c-1) && c >= 17
+ // result: (LEAL1 (SHLLconst <v.Type> [int8(log32(c-1))] x) x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(isPowerOfTwo32(c-1) && c >= 17) {
+ break
+ }
+ v.reset(OpAMD64LEAL1)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
+ v0.AuxInt = int8ToAuxInt(int8(log32(c - 1)))
+ v0.AddArg(x)
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (MULLconst [c] x)
+ // cond: isPowerOfTwo32(c-2) && c >= 34
+ // result: (LEAL2 (SHLLconst <v.Type> [int8(log32(c-2))] x) x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(isPowerOfTwo32(c-2) && c >= 34) {
+ break
+ }
+ v.reset(OpAMD64LEAL2)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
+ v0.AuxInt = int8ToAuxInt(int8(log32(c - 2)))
+ v0.AddArg(x)
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (MULLconst [c] x)
+ // cond: isPowerOfTwo32(c-4) && c >= 68
+ // result: (LEAL4 (SHLLconst <v.Type> [int8(log32(c-4))] x) x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(isPowerOfTwo32(c-4) && c >= 68) {
+ break
+ }
+ v.reset(OpAMD64LEAL4)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
+ v0.AuxInt = int8ToAuxInt(int8(log32(c - 4)))
+ v0.AddArg(x)
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (MULLconst [c] x)
+ // cond: isPowerOfTwo32(c-8) && c >= 136
+ // result: (LEAL8 (SHLLconst <v.Type> [int8(log32(c-8))] x) x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(isPowerOfTwo32(c-8) && c >= 136) {
+ break
+ }
+ v.reset(OpAMD64LEAL8)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHLLconst, v.Type)
+ v0.AuxInt = int8ToAuxInt(int8(log32(c - 8)))
+ v0.AddArg(x)
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (MULLconst [c] x)
+ // cond: c%3 == 0 && isPowerOfTwo32(c/3)
+ // result: (SHLLconst [int8(log32(c/3))] (LEAL2 <v.Type> x x))
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(c%3 == 0 && isPowerOfTwo32(c/3)) {
+ break
+ }
+ v.reset(OpAMD64SHLLconst)
+ v.AuxInt = int8ToAuxInt(int8(log32(c / 3)))
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAL2, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MULLconst [c] x)
+ // cond: c%5 == 0 && isPowerOfTwo32(c/5)
+ // result: (SHLLconst [int8(log32(c/5))] (LEAL4 <v.Type> x x))
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(c%5 == 0 && isPowerOfTwo32(c/5)) {
+ break
+ }
+ v.reset(OpAMD64SHLLconst)
+ v.AuxInt = int8ToAuxInt(int8(log32(c / 5)))
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAL4, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MULLconst [c] x)
+ // cond: c%9 == 0 && isPowerOfTwo32(c/9)
+ // result: (SHLLconst [int8(log32(c/9))] (LEAL8 <v.Type> x x))
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(c%9 == 0 && isPowerOfTwo32(c/9)) {
+ break
+ }
+ v.reset(OpAMD64SHLLconst)
+ v.AuxInt = int8ToAuxInt(int8(log32(c / 9)))
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAL8, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MULLconst [c] (MOVLconst [d]))
+ // result: (MOVLconst [c*d])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(c * d)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MULQ(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MULQ x (MOVQconst [c]))
+ // cond: is32Bit(c)
+ // result: (MULQconst [int32(c)] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(is32Bit(c)) {
+ continue
+ }
+ v.reset(OpAMD64MULQconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MULQconst(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MULQconst [c] (MULQconst [d] x))
+ // cond: is32Bit(int64(c)*int64(d))
+ // result: (MULQconst [c * d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64MULQconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(is32Bit(int64(c) * int64(d))) {
+ break
+ }
+ v.reset(OpAMD64MULQconst)
+ v.AuxInt = int32ToAuxInt(c * d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MULQconst [-9] x)
+ // result: (NEGQ (LEAQ8 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != -9 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64NEGQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MULQconst [-5] x)
+ // result: (NEGQ (LEAQ4 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != -5 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64NEGQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MULQconst [-3] x)
+ // result: (NEGQ (LEAQ2 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != -3 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64NEGQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MULQconst [-1] x)
+ // result: (NEGQ x)
+ for {
+ if auxIntToInt32(v.AuxInt) != -1 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64NEGQ)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MULQconst [ 0] _)
+ // result: (MOVQconst [0])
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (MULQconst [ 1] x)
+ // result: x
+ for {
+ if auxIntToInt32(v.AuxInt) != 1 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (MULQconst [ 3] x)
+ // result: (LEAQ2 x x)
+ for {
+ if auxIntToInt32(v.AuxInt) != 3 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64LEAQ2)
+ v.AddArg2(x, x)
+ return true
+ }
+ // match: (MULQconst [ 5] x)
+ // result: (LEAQ4 x x)
+ for {
+ if auxIntToInt32(v.AuxInt) != 5 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64LEAQ4)
+ v.AddArg2(x, x)
+ return true
+ }
+ // match: (MULQconst [ 7] x)
+ // result: (LEAQ2 x (LEAQ2 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 7 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64LEAQ2)
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (MULQconst [ 9] x)
+ // result: (LEAQ8 x x)
+ for {
+ if auxIntToInt32(v.AuxInt) != 9 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64LEAQ8)
+ v.AddArg2(x, x)
+ return true
+ }
+ // match: (MULQconst [11] x)
+ // result: (LEAQ2 x (LEAQ4 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 11 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64LEAQ2)
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (MULQconst [13] x)
+ // result: (LEAQ4 x (LEAQ2 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 13 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64LEAQ4)
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (MULQconst [19] x)
+ // result: (LEAQ2 x (LEAQ8 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 19 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64LEAQ2)
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (MULQconst [21] x)
+ // result: (LEAQ4 x (LEAQ4 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 21 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64LEAQ4)
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (MULQconst [25] x)
+ // result: (LEAQ8 x (LEAQ2 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 25 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64LEAQ8)
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (MULQconst [27] x)
+ // result: (LEAQ8 (LEAQ2 <v.Type> x x) (LEAQ2 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 27 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64LEAQ8)
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(v0, v0)
+ return true
+ }
+ // match: (MULQconst [37] x)
+ // result: (LEAQ4 x (LEAQ8 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 37 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64LEAQ4)
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (MULQconst [41] x)
+ // result: (LEAQ8 x (LEAQ4 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 41 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64LEAQ8)
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (MULQconst [45] x)
+ // result: (LEAQ8 (LEAQ4 <v.Type> x x) (LEAQ4 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 45 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64LEAQ8)
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(v0, v0)
+ return true
+ }
+ // match: (MULQconst [73] x)
+ // result: (LEAQ8 x (LEAQ8 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 73 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64LEAQ8)
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (MULQconst [81] x)
+ // result: (LEAQ8 (LEAQ8 <v.Type> x x) (LEAQ8 <v.Type> x x))
+ for {
+ if auxIntToInt32(v.AuxInt) != 81 {
+ break
+ }
+ x := v_0
+ v.reset(OpAMD64LEAQ8)
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg2(v0, v0)
+ return true
+ }
+ // match: (MULQconst [c] x)
+ // cond: isPowerOfTwo64(int64(c)+1) && c >= 15
+ // result: (SUBQ (SHLQconst <v.Type> [int8(log64(int64(c)+1))] x) x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(isPowerOfTwo64(int64(c)+1) && c >= 15) {
+ break
+ }
+ v.reset(OpAMD64SUBQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
+ v0.AuxInt = int8ToAuxInt(int8(log64(int64(c) + 1)))
+ v0.AddArg(x)
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (MULQconst [c] x)
+ // cond: isPowerOfTwo32(c-1) && c >= 17
+ // result: (LEAQ1 (SHLQconst <v.Type> [int8(log32(c-1))] x) x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(isPowerOfTwo32(c-1) && c >= 17) {
+ break
+ }
+ v.reset(OpAMD64LEAQ1)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
+ v0.AuxInt = int8ToAuxInt(int8(log32(c - 1)))
+ v0.AddArg(x)
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (MULQconst [c] x)
+ // cond: isPowerOfTwo32(c-2) && c >= 34
+ // result: (LEAQ2 (SHLQconst <v.Type> [int8(log32(c-2))] x) x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(isPowerOfTwo32(c-2) && c >= 34) {
+ break
+ }
+ v.reset(OpAMD64LEAQ2)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
+ v0.AuxInt = int8ToAuxInt(int8(log32(c - 2)))
+ v0.AddArg(x)
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (MULQconst [c] x)
+ // cond: isPowerOfTwo32(c-4) && c >= 68
+ // result: (LEAQ4 (SHLQconst <v.Type> [int8(log32(c-4))] x) x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(isPowerOfTwo32(c-4) && c >= 68) {
+ break
+ }
+ v.reset(OpAMD64LEAQ4)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
+ v0.AuxInt = int8ToAuxInt(int8(log32(c - 4)))
+ v0.AddArg(x)
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (MULQconst [c] x)
+ // cond: isPowerOfTwo32(c-8) && c >= 136
+ // result: (LEAQ8 (SHLQconst <v.Type> [int8(log32(c-8))] x) x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(isPowerOfTwo32(c-8) && c >= 136) {
+ break
+ }
+ v.reset(OpAMD64LEAQ8)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHLQconst, v.Type)
+ v0.AuxInt = int8ToAuxInt(int8(log32(c - 8)))
+ v0.AddArg(x)
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (MULQconst [c] x)
+ // cond: c%3 == 0 && isPowerOfTwo32(c/3)
+ // result: (SHLQconst [int8(log32(c/3))] (LEAQ2 <v.Type> x x))
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(c%3 == 0 && isPowerOfTwo32(c/3)) {
+ break
+ }
+ v.reset(OpAMD64SHLQconst)
+ v.AuxInt = int8ToAuxInt(int8(log32(c / 3)))
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAQ2, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MULQconst [c] x)
+ // cond: c%5 == 0 && isPowerOfTwo32(c/5)
+ // result: (SHLQconst [int8(log32(c/5))] (LEAQ4 <v.Type> x x))
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(c%5 == 0 && isPowerOfTwo32(c/5)) {
+ break
+ }
+ v.reset(OpAMD64SHLQconst)
+ v.AuxInt = int8ToAuxInt(int8(log32(c / 5)))
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAQ4, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MULQconst [c] x)
+ // cond: c%9 == 0 && isPowerOfTwo32(c/9)
+ // result: (SHLQconst [int8(log32(c/9))] (LEAQ8 <v.Type> x x))
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(c%9 == 0 && isPowerOfTwo32(c/9)) {
+ break
+ }
+ v.reset(OpAMD64SHLQconst)
+ v.AuxInt = int8ToAuxInt(int8(log32(c / 9)))
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAQ8, v.Type)
+ v0.AddArg2(x, x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MULQconst [c] (MOVQconst [d]))
+ // result: (MOVQconst [int64(c)*d])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = int64ToAuxInt(int64(c) * d)
+ return true
+ }
+ // match: (MULQconst [c] (NEGQ x))
+ // cond: c != -(1<<31)
+ // result: (MULQconst [-c] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64NEGQ {
+ break
+ }
+ x := v_0.Args[0]
+ if !(c != -(1 << 31)) {
+ break
+ }
+ v.reset(OpAMD64MULQconst)
+ v.AuxInt = int32ToAuxInt(-c)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MULSD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MULSD x l:(MOVSDload [off] {sym} ptr mem))
+ // cond: canMergeLoadClobber(v, l, x) && clobber(l)
+ // result: (MULSDload x [off] {sym} ptr mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ l := v_1
+ if l.Op != OpAMD64MOVSDload {
+ continue
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
+ continue
+ }
+ v.reset(OpAMD64MULSDload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MULSDload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (MULSDload [off1] {sym} val (ADDQconst [off2] base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (MULSDload [off1+off2] {sym} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64MULSDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (MULSDload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MULSDload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64MULSDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (MULSDload x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _))
+ // result: (MULSD x (MOVQi2f y))
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ ptr := v_1
+ if v_2.Op != OpAMD64MOVQstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
+ break
+ }
+ y := v_2.Args[1]
+ if ptr != v_2.Args[0] {
+ break
+ }
+ v.reset(OpAMD64MULSD)
+ v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQi2f, typ.Float64)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MULSS(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MULSS x l:(MOVSSload [off] {sym} ptr mem))
+ // cond: canMergeLoadClobber(v, l, x) && clobber(l)
+ // result: (MULSSload x [off] {sym} ptr mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ l := v_1
+ if l.Op != OpAMD64MOVSSload {
+ continue
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
+ continue
+ }
+ v.reset(OpAMD64MULSSload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64MULSSload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (MULSSload [off1] {sym} val (ADDQconst [off2] base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (MULSSload [off1+off2] {sym} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64MULSSload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (MULSSload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MULSSload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64MULSSload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (MULSSload x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _))
+ // result: (MULSS x (MOVLi2f y))
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ ptr := v_1
+ if v_2.Op != OpAMD64MOVLstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
+ break
+ }
+ y := v_2.Args[1]
+ if ptr != v_2.Args[0] {
+ break
+ }
+ v.reset(OpAMD64MULSS)
+ v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLi2f, typ.Float32)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64NEGL(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (NEGL (NEGL x))
+ // result: x
+ for {
+ if v_0.Op != OpAMD64NEGL {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (NEGL s:(SUBL x y))
+ // cond: s.Uses == 1
+ // result: (SUBL y x)
+ for {
+ s := v_0
+ if s.Op != OpAMD64SUBL {
+ break
+ }
+ y := s.Args[1]
+ x := s.Args[0]
+ if !(s.Uses == 1) {
+ break
+ }
+ v.reset(OpAMD64SUBL)
+ v.AddArg2(y, x)
+ return true
+ }
+ // match: (NEGL (MOVLconst [c]))
+ // result: (MOVLconst [-c])
+ for {
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(-c)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64NEGQ(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (NEGQ (NEGQ x))
+ // result: x
+ for {
+ if v_0.Op != OpAMD64NEGQ {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (NEGQ s:(SUBQ x y))
+ // cond: s.Uses == 1
+ // result: (SUBQ y x)
+ for {
+ s := v_0
+ if s.Op != OpAMD64SUBQ {
+ break
+ }
+ y := s.Args[1]
+ x := s.Args[0]
+ if !(s.Uses == 1) {
+ break
+ }
+ v.reset(OpAMD64SUBQ)
+ v.AddArg2(y, x)
+ return true
+ }
+ // match: (NEGQ (MOVQconst [c]))
+ // result: (MOVQconst [-c])
+ for {
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = int64ToAuxInt(-c)
+ return true
+ }
+ // match: (NEGQ (ADDQconst [c] (NEGQ x)))
+ // cond: c != -(1<<31)
+ // result: (ADDQconst [-c] x)
+ for {
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64NEGQ {
+ break
+ }
+ x := v_0_0.Args[0]
+ if !(c != -(1 << 31)) {
+ break
+ }
+ v.reset(OpAMD64ADDQconst)
+ v.AuxInt = int32ToAuxInt(-c)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64NOTL(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (NOTL (MOVLconst [c]))
+ // result: (MOVLconst [^c])
+ for {
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(^c)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64NOTQ(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (NOTQ (MOVQconst [c]))
+ // result: (MOVQconst [^c])
+ for {
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = int64ToAuxInt(^c)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ORL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (ORL (SHLL (MOVLconst [1]) y) x)
+ // result: (BTSL x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHLL {
+ continue
+ }
+ y := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0.AuxInt) != 1 {
+ continue
+ }
+ x := v_1
+ v.reset(OpAMD64BTSL)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ORL (MOVLconst [c]) x)
+ // cond: isUint32PowerOfTwo(int64(c)) && uint64(c) >= 128
+ // result: (BTSLconst [int8(log32(c))] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64MOVLconst {
+ continue
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ if !(isUint32PowerOfTwo(int64(c)) && uint64(c) >= 128) {
+ continue
+ }
+ v.reset(OpAMD64BTSLconst)
+ v.AuxInt = int8ToAuxInt(int8(log32(c)))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ORL x (MOVLconst [c]))
+ // result: (ORLconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpAMD64ORLconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ORL (SHLLconst x [c]) (SHRLconst x [d]))
+ // cond: d==32-c
+ // result: (ROLLconst x [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHLLconst {
+ continue
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ x := v_0.Args[0]
+ if v_1.Op != OpAMD64SHRLconst {
+ continue
+ }
+ d := auxIntToInt8(v_1.AuxInt)
+ if x != v_1.Args[0] || !(d == 32-c) {
+ continue
+ }
+ v.reset(OpAMD64ROLLconst)
+ v.AuxInt = int8ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ORL <t> (SHLLconst x [c]) (SHRWconst x [d]))
+ // cond: d==16-c && c < 16 && t.Size() == 2
+ // result: (ROLWconst x [c])
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHLLconst {
+ continue
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ x := v_0.Args[0]
+ if v_1.Op != OpAMD64SHRWconst {
+ continue
+ }
+ d := auxIntToInt8(v_1.AuxInt)
+ if x != v_1.Args[0] || !(d == 16-c && c < 16 && t.Size() == 2) {
+ continue
+ }
+ v.reset(OpAMD64ROLWconst)
+ v.AuxInt = int8ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ORL <t> (SHLLconst x [c]) (SHRBconst x [d]))
+ // cond: d==8-c && c < 8 && t.Size() == 1
+ // result: (ROLBconst x [c])
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHLLconst {
+ continue
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ x := v_0.Args[0]
+ if v_1.Op != OpAMD64SHRBconst {
+ continue
+ }
+ d := auxIntToInt8(v_1.AuxInt)
+ if x != v_1.Args[0] || !(d == 8-c && c < 8 && t.Size() == 1) {
+ continue
+ }
+ v.reset(OpAMD64ROLBconst)
+ v.AuxInt = int8ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ORL (SHLL x y) (ANDL (SHRL x (NEGQ y)) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32]))))
+ // result: (ROLL x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHLL {
+ continue
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if v_1.Op != OpAMD64ANDL {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpAMD64SHRL {
+ continue
+ }
+ _ = v_1_0.Args[1]
+ if x != v_1_0.Args[0] {
+ continue
+ }
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpAMD64NEGQ || y != v_1_0_1.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_1_1_0.AuxInt) != 32 {
+ continue
+ }
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpAMD64NEGQ {
+ continue
+ }
+ v_1_1_0_0_0 := v_1_1_0_0.Args[0]
+ if v_1_1_0_0_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -32 {
+ continue
+ }
+ v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
+ if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 31 || y != v_1_1_0_0_0_0.Args[0] {
+ continue
+ }
+ v.reset(OpAMD64ROLL)
+ v.AddArg2(x, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (ORL (SHLL x y) (ANDL (SHRL x (NEGL y)) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32]))))
+ // result: (ROLL x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHLL {
+ continue
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if v_1.Op != OpAMD64ANDL {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpAMD64SHRL {
+ continue
+ }
+ _ = v_1_0.Args[1]
+ if x != v_1_0.Args[0] {
+ continue
+ }
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpAMD64NEGL || y != v_1_0_1.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_1_1_0.AuxInt) != 32 {
+ continue
+ }
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpAMD64NEGL {
+ continue
+ }
+ v_1_1_0_0_0 := v_1_1_0_0.Args[0]
+ if v_1_1_0_0_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -32 {
+ continue
+ }
+ v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
+ if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 31 || y != v_1_1_0_0_0_0.Args[0] {
+ continue
+ }
+ v.reset(OpAMD64ROLL)
+ v.AddArg2(x, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (ORL (SHRL x y) (ANDL (SHLL x (NEGQ y)) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [31]) [-32])) [32]))))
+ // result: (RORL x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHRL {
+ continue
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if v_1.Op != OpAMD64ANDL {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpAMD64SHLL {
+ continue
+ }
+ _ = v_1_0.Args[1]
+ if x != v_1_0.Args[0] {
+ continue
+ }
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpAMD64NEGQ || y != v_1_0_1.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_1_1_0.AuxInt) != 32 {
+ continue
+ }
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpAMD64NEGQ {
+ continue
+ }
+ v_1_1_0_0_0 := v_1_1_0_0.Args[0]
+ if v_1_1_0_0_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -32 {
+ continue
+ }
+ v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
+ if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 31 || y != v_1_1_0_0_0_0.Args[0] {
+ continue
+ }
+ v.reset(OpAMD64RORL)
+ v.AddArg2(x, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (ORL (SHRL x y) (ANDL (SHLL x (NEGL y)) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [31]) [-32])) [32]))))
+ // result: (RORL x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHRL {
+ continue
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if v_1.Op != OpAMD64ANDL {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpAMD64SHLL {
+ continue
+ }
+ _ = v_1_0.Args[1]
+ if x != v_1_0.Args[0] {
+ continue
+ }
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpAMD64NEGL || y != v_1_0_1.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_1_1_0.AuxInt) != 32 {
+ continue
+ }
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpAMD64NEGL {
+ continue
+ }
+ v_1_1_0_0_0 := v_1_1_0_0.Args[0]
+ if v_1_1_0_0_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -32 {
+ continue
+ }
+ v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
+ if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 31 || y != v_1_1_0_0_0_0.Args[0] {
+ continue
+ }
+ v.reset(OpAMD64RORL)
+ v.AddArg2(x, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (ORL (SHLL x (ANDQconst y [15])) (ANDL (SHRW x (NEGQ (ADDQconst (ANDQconst y [15]) [-16]))) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [15]) [-16])) [16]))))
+ // cond: v.Type.Size() == 2
+ // result: (ROLW x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHLL {
+ continue
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpAMD64ANDQconst || auxIntToInt32(v_0_1.AuxInt) != 15 {
+ continue
+ }
+ y := v_0_1.Args[0]
+ if v_1.Op != OpAMD64ANDL {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpAMD64SHRW {
+ continue
+ }
+ _ = v_1_0.Args[1]
+ if x != v_1_0.Args[0] {
+ continue
+ }
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpAMD64NEGQ {
+ continue
+ }
+ v_1_0_1_0 := v_1_0_1.Args[0]
+ if v_1_0_1_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_0_1_0.AuxInt) != -16 {
+ continue
+ }
+ v_1_0_1_0_0 := v_1_0_1_0.Args[0]
+ if v_1_0_1_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_0_1_0_0.AuxInt) != 15 || y != v_1_0_1_0_0.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_1_1_0.AuxInt) != 16 {
+ continue
+ }
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpAMD64NEGQ {
+ continue
+ }
+ v_1_1_0_0_0 := v_1_1_0_0.Args[0]
+ if v_1_1_0_0_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -16 {
+ continue
+ }
+ v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
+ if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 15 || y != v_1_1_0_0_0_0.Args[0] || !(v.Type.Size() == 2) {
+ continue
+ }
+ v.reset(OpAMD64ROLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (ORL (SHLL x (ANDLconst y [15])) (ANDL (SHRW x (NEGL (ADDLconst (ANDLconst y [15]) [-16]))) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [15]) [-16])) [16]))))
+ // cond: v.Type.Size() == 2
+ // result: (ROLW x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHLL {
+ continue
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpAMD64ANDLconst || auxIntToInt32(v_0_1.AuxInt) != 15 {
+ continue
+ }
+ y := v_0_1.Args[0]
+ if v_1.Op != OpAMD64ANDL {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpAMD64SHRW {
+ continue
+ }
+ _ = v_1_0.Args[1]
+ if x != v_1_0.Args[0] {
+ continue
+ }
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpAMD64NEGL {
+ continue
+ }
+ v_1_0_1_0 := v_1_0_1.Args[0]
+ if v_1_0_1_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_0_1_0.AuxInt) != -16 {
+ continue
+ }
+ v_1_0_1_0_0 := v_1_0_1_0.Args[0]
+ if v_1_0_1_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_0_1_0_0.AuxInt) != 15 || y != v_1_0_1_0_0.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_1_1_0.AuxInt) != 16 {
+ continue
+ }
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpAMD64NEGL {
+ continue
+ }
+ v_1_1_0_0_0 := v_1_1_0_0.Args[0]
+ if v_1_1_0_0_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -16 {
+ continue
+ }
+ v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
+ if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 15 || y != v_1_1_0_0_0_0.Args[0] || !(v.Type.Size() == 2) {
+ continue
+ }
+ v.reset(OpAMD64ROLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (ORL (SHRW x (ANDQconst y [15])) (SHLL x (NEGQ (ADDQconst (ANDQconst y [15]) [-16]))))
+ // cond: v.Type.Size() == 2
+ // result: (RORW x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHRW {
+ continue
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpAMD64ANDQconst || auxIntToInt32(v_0_1.AuxInt) != 15 {
+ continue
+ }
+ y := v_0_1.Args[0]
+ if v_1.Op != OpAMD64SHLL {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpAMD64NEGQ {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_1_0.AuxInt) != -16 {
+ continue
+ }
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_1_0_0.AuxInt) != 15 || y != v_1_1_0_0.Args[0] || !(v.Type.Size() == 2) {
+ continue
+ }
+ v.reset(OpAMD64RORW)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ORL (SHRW x (ANDLconst y [15])) (SHLL x (NEGL (ADDLconst (ANDLconst y [15]) [-16]))))
+ // cond: v.Type.Size() == 2
+ // result: (RORW x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHRW {
+ continue
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpAMD64ANDLconst || auxIntToInt32(v_0_1.AuxInt) != 15 {
+ continue
+ }
+ y := v_0_1.Args[0]
+ if v_1.Op != OpAMD64SHLL {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpAMD64NEGL {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_1_0.AuxInt) != -16 {
+ continue
+ }
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_1_0_0.AuxInt) != 15 || y != v_1_1_0_0.Args[0] || !(v.Type.Size() == 2) {
+ continue
+ }
+ v.reset(OpAMD64RORW)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ORL (SHLL x (ANDQconst y [ 7])) (ANDL (SHRB x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8]))) (SBBLcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8])) [ 8]))))
+ // cond: v.Type.Size() == 1
+ // result: (ROLB x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHLL {
+ continue
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpAMD64ANDQconst || auxIntToInt32(v_0_1.AuxInt) != 7 {
+ continue
+ }
+ y := v_0_1.Args[0]
+ if v_1.Op != OpAMD64ANDL {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpAMD64SHRB {
+ continue
+ }
+ _ = v_1_0.Args[1]
+ if x != v_1_0.Args[0] {
+ continue
+ }
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpAMD64NEGQ {
+ continue
+ }
+ v_1_0_1_0 := v_1_0_1.Args[0]
+ if v_1_0_1_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_0_1_0.AuxInt) != -8 {
+ continue
+ }
+ v_1_0_1_0_0 := v_1_0_1_0.Args[0]
+ if v_1_0_1_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_0_1_0_0.AuxInt) != 7 || y != v_1_0_1_0_0.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_1_1_0.AuxInt) != 8 {
+ continue
+ }
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpAMD64NEGQ {
+ continue
+ }
+ v_1_1_0_0_0 := v_1_1_0_0.Args[0]
+ if v_1_1_0_0_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -8 {
+ continue
+ }
+ v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
+ if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 7 || y != v_1_1_0_0_0_0.Args[0] || !(v.Type.Size() == 1) {
+ continue
+ }
+ v.reset(OpAMD64ROLB)
+ v.AddArg2(x, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (ORL (SHLL x (ANDLconst y [ 7])) (ANDL (SHRB x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8]))) (SBBLcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8])) [ 8]))))
+ // cond: v.Type.Size() == 1
+ // result: (ROLB x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHLL {
+ continue
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpAMD64ANDLconst || auxIntToInt32(v_0_1.AuxInt) != 7 {
+ continue
+ }
+ y := v_0_1.Args[0]
+ if v_1.Op != OpAMD64ANDL {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpAMD64SHRB {
+ continue
+ }
+ _ = v_1_0.Args[1]
+ if x != v_1_0.Args[0] {
+ continue
+ }
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpAMD64NEGL {
+ continue
+ }
+ v_1_0_1_0 := v_1_0_1.Args[0]
+ if v_1_0_1_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_0_1_0.AuxInt) != -8 {
+ continue
+ }
+ v_1_0_1_0_0 := v_1_0_1_0.Args[0]
+ if v_1_0_1_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_0_1_0_0.AuxInt) != 7 || y != v_1_0_1_0_0.Args[0] || v_1_1.Op != OpAMD64SBBLcarrymask {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_1_1_0.AuxInt) != 8 {
+ continue
+ }
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpAMD64NEGL {
+ continue
+ }
+ v_1_1_0_0_0 := v_1_1_0_0.Args[0]
+ if v_1_1_0_0_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -8 {
+ continue
+ }
+ v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
+ if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 7 || y != v_1_1_0_0_0_0.Args[0] || !(v.Type.Size() == 1) {
+ continue
+ }
+ v.reset(OpAMD64ROLB)
+ v.AddArg2(x, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (ORL (SHRB x (ANDQconst y [ 7])) (SHLL x (NEGQ (ADDQconst (ANDQconst y [ 7]) [ -8]))))
+ // cond: v.Type.Size() == 1
+ // result: (RORB x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHRB {
+ continue
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpAMD64ANDQconst || auxIntToInt32(v_0_1.AuxInt) != 7 {
+ continue
+ }
+ y := v_0_1.Args[0]
+ if v_1.Op != OpAMD64SHLL {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpAMD64NEGQ {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_1_0.AuxInt) != -8 {
+ continue
+ }
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_1_0_0.AuxInt) != 7 || y != v_1_1_0_0.Args[0] || !(v.Type.Size() == 1) {
+ continue
+ }
+ v.reset(OpAMD64RORB)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ORL (SHRB x (ANDLconst y [ 7])) (SHLL x (NEGL (ADDLconst (ANDLconst y [ 7]) [ -8]))))
+ // cond: v.Type.Size() == 1
+ // result: (RORB x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHRB {
+ continue
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpAMD64ANDLconst || auxIntToInt32(v_0_1.AuxInt) != 7 {
+ continue
+ }
+ y := v_0_1.Args[0]
+ if v_1.Op != OpAMD64SHLL {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpAMD64NEGL {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_1_0.AuxInt) != -8 {
+ continue
+ }
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_1_0_0.AuxInt) != 7 || y != v_1_1_0_0.Args[0] || !(v.Type.Size() == 1) {
+ continue
+ }
+ v.reset(OpAMD64RORB)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ORL x x)
+ // result: x
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (ORL x0:(MOVBload [i0] {s} p mem) sh:(SHLLconst [8] x1:(MOVBload [i1] {s} p mem)))
+ // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh)
+ // result: @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ if x0.Op != OpAMD64MOVBload {
+ continue
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p := x0.Args[0]
+ sh := v_1
+ if sh.Op != OpAMD64SHLLconst || auxIntToInt8(sh.AuxInt) != 8 {
+ continue
+ }
+ x1 := sh.Args[0]
+ if x1.Op != OpAMD64MOVBload {
+ continue
+ }
+ i1 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
+ continue
+ }
+ _ = x1.Args[1]
+ if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(i0)
+ v0.Aux = symToAux(s)
+ v0.AddArg2(p, mem)
+ return true
+ }
+ break
+ }
+ // match: (ORL x0:(MOVBload [i] {s} p0 mem) sh:(SHLLconst [8] x1:(MOVBload [i] {s} p1 mem)))
+ // cond: x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh)
+ // result: @mergePoint(b,x0,x1) (MOVWload [i] {s} p0 mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ if x0.Op != OpAMD64MOVBload {
+ continue
+ }
+ i := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p0 := x0.Args[0]
+ sh := v_1
+ if sh.Op != OpAMD64SHLLconst || auxIntToInt8(sh.AuxInt) != 8 {
+ continue
+ }
+ x1 := sh.Args[0]
+ if x1.Op != OpAMD64MOVBload || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s {
+ continue
+ }
+ _ = x1.Args[1]
+ p1 := x1.Args[0]
+ if mem != x1.Args[1] || !(x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(i)
+ v0.Aux = symToAux(s)
+ v0.AddArg2(p0, mem)
+ return true
+ }
+ break
+ }
+ // match: (ORL x0:(MOVWload [i0] {s} p mem) sh:(SHLLconst [16] x1:(MOVWload [i1] {s} p mem)))
+ // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh)
+ // result: @mergePoint(b,x0,x1) (MOVLload [i0] {s} p mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ if x0.Op != OpAMD64MOVWload {
+ continue
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p := x0.Args[0]
+ sh := v_1
+ if sh.Op != OpAMD64SHLLconst || auxIntToInt8(sh.AuxInt) != 16 {
+ continue
+ }
+ x1 := sh.Args[0]
+ if x1.Op != OpAMD64MOVWload {
+ continue
+ }
+ i1 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
+ continue
+ }
+ _ = x1.Args[1]
+ if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(x1.Pos, OpAMD64MOVLload, typ.UInt32)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(i0)
+ v0.Aux = symToAux(s)
+ v0.AddArg2(p, mem)
+ return true
+ }
+ break
+ }
+ // match: (ORL x0:(MOVWload [i] {s} p0 mem) sh:(SHLLconst [16] x1:(MOVWload [i] {s} p1 mem)))
+ // cond: x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 2) && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh)
+ // result: @mergePoint(b,x0,x1) (MOVLload [i] {s} p0 mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ if x0.Op != OpAMD64MOVWload {
+ continue
+ }
+ i := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p0 := x0.Args[0]
+ sh := v_1
+ if sh.Op != OpAMD64SHLLconst || auxIntToInt8(sh.AuxInt) != 16 {
+ continue
+ }
+ x1 := sh.Args[0]
+ if x1.Op != OpAMD64MOVWload || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s {
+ continue
+ }
+ _ = x1.Args[1]
+ p1 := x1.Args[0]
+ if mem != x1.Args[1] || !(x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 2) && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(x1.Pos, OpAMD64MOVLload, typ.UInt32)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(i)
+ v0.Aux = symToAux(s)
+ v0.AddArg2(p0, mem)
+ return true
+ }
+ break
+ }
+ // match: (ORL s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem)) or:(ORL s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem)) y))
+ // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, s0, s1, or)
+ // result: @mergePoint(b,x0,x1,y) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ s1 := v_0
+ if s1.Op != OpAMD64SHLLconst {
+ continue
+ }
+ j1 := auxIntToInt8(s1.AuxInt)
+ x1 := s1.Args[0]
+ if x1.Op != OpAMD64MOVBload {
+ continue
+ }
+ i1 := auxIntToInt32(x1.AuxInt)
+ s := auxToSym(x1.Aux)
+ mem := x1.Args[1]
+ p := x1.Args[0]
+ or := v_1
+ if or.Op != OpAMD64ORL {
+ continue
+ }
+ _ = or.Args[1]
+ or_0 := or.Args[0]
+ or_1 := or.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, or_0, or_1 = _i1+1, or_1, or_0 {
+ s0 := or_0
+ if s0.Op != OpAMD64SHLLconst {
+ continue
+ }
+ j0 := auxIntToInt8(s0.AuxInt)
+ x0 := s0.Args[0]
+ if x0.Op != OpAMD64MOVBload {
+ continue
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ if auxToSym(x0.Aux) != s {
+ continue
+ }
+ _ = x0.Args[1]
+ if p != x0.Args[0] || mem != x0.Args[1] {
+ continue
+ }
+ y := or_1
+ if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0, x1, s0, s1, or)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1, y)
+ v0 := b.NewValue0(x0.Pos, OpAMD64ORL, v.Type)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x0.Pos, OpAMD64SHLLconst, v.Type)
+ v1.AuxInt = int8ToAuxInt(j0)
+ v2 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16)
+ v2.AuxInt = int32ToAuxInt(i0)
+ v2.Aux = symToAux(s)
+ v2.AddArg2(p, mem)
+ v1.AddArg(v2)
+ v0.AddArg2(v1, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (ORL s1:(SHLLconst [j1] x1:(MOVBload [i] {s} p1 mem)) or:(ORL s0:(SHLLconst [j0] x0:(MOVBload [i] {s} p0 mem)) y))
+ // cond: j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, s0, s1, or)
+ // result: @mergePoint(b,x0,x1,y) (ORL <v.Type> (SHLLconst <v.Type> [j0] (MOVWload [i] {s} p0 mem)) y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ s1 := v_0
+ if s1.Op != OpAMD64SHLLconst {
+ continue
+ }
+ j1 := auxIntToInt8(s1.AuxInt)
+ x1 := s1.Args[0]
+ if x1.Op != OpAMD64MOVBload {
+ continue
+ }
+ i := auxIntToInt32(x1.AuxInt)
+ s := auxToSym(x1.Aux)
+ mem := x1.Args[1]
+ p1 := x1.Args[0]
+ or := v_1
+ if or.Op != OpAMD64ORL {
+ continue
+ }
+ _ = or.Args[1]
+ or_0 := or.Args[0]
+ or_1 := or.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, or_0, or_1 = _i1+1, or_1, or_0 {
+ s0 := or_0
+ if s0.Op != OpAMD64SHLLconst {
+ continue
+ }
+ j0 := auxIntToInt8(s0.AuxInt)
+ x0 := s0.Args[0]
+ if x0.Op != OpAMD64MOVBload || auxIntToInt32(x0.AuxInt) != i || auxToSym(x0.Aux) != s {
+ continue
+ }
+ _ = x0.Args[1]
+ p0 := x0.Args[0]
+ if mem != x0.Args[1] {
+ continue
+ }
+ y := or_1
+ if !(j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b, x0, x1, y) != nil && clobber(x0, x1, s0, s1, or)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1, y)
+ v0 := b.NewValue0(x0.Pos, OpAMD64ORL, v.Type)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x0.Pos, OpAMD64SHLLconst, v.Type)
+ v1.AuxInt = int8ToAuxInt(j0)
+ v2 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16)
+ v2.AuxInt = int32ToAuxInt(i)
+ v2.Aux = symToAux(s)
+ v2.AddArg2(p0, mem)
+ v1.AddArg(v2)
+ v0.AddArg2(v1, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (ORL x1:(MOVBload [i1] {s} p mem) sh:(SHLLconst [8] x0:(MOVBload [i0] {s} p mem)))
+ // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh)
+ // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWload [i0] {s} p mem))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x1 := v_0
+ if x1.Op != OpAMD64MOVBload {
+ continue
+ }
+ i1 := auxIntToInt32(x1.AuxInt)
+ s := auxToSym(x1.Aux)
+ mem := x1.Args[1]
+ p := x1.Args[0]
+ sh := v_1
+ if sh.Op != OpAMD64SHLLconst || auxIntToInt8(sh.AuxInt) != 8 {
+ continue
+ }
+ x0 := sh.Args[0]
+ if x0.Op != OpAMD64MOVBload {
+ continue
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ if auxToSym(x0.Aux) != s {
+ continue
+ }
+ _ = x0.Args[1]
+ if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, v.Type)
+ v.copyOf(v0)
+ v0.AuxInt = int8ToAuxInt(8)
+ v1 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16)
+ v1.AuxInt = int32ToAuxInt(i0)
+ v1.Aux = symToAux(s)
+ v1.AddArg2(p, mem)
+ v0.AddArg(v1)
+ return true
+ }
+ break
+ }
+ // match: (ORL x1:(MOVBload [i] {s} p1 mem) sh:(SHLLconst [8] x0:(MOVBload [i] {s} p0 mem)))
+ // cond: x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh)
+ // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWload [i] {s} p0 mem))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x1 := v_0
+ if x1.Op != OpAMD64MOVBload {
+ continue
+ }
+ i := auxIntToInt32(x1.AuxInt)
+ s := auxToSym(x1.Aux)
+ mem := x1.Args[1]
+ p1 := x1.Args[0]
+ sh := v_1
+ if sh.Op != OpAMD64SHLLconst || auxIntToInt8(sh.AuxInt) != 8 {
+ continue
+ }
+ x0 := sh.Args[0]
+ if x0.Op != OpAMD64MOVBload || auxIntToInt32(x0.AuxInt) != i || auxToSym(x0.Aux) != s {
+ continue
+ }
+ _ = x0.Args[1]
+ p0 := x0.Args[0]
+ if mem != x0.Args[1] || !(x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, v.Type)
+ v.copyOf(v0)
+ v0.AuxInt = int8ToAuxInt(8)
+ v1 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16)
+ v1.AuxInt = int32ToAuxInt(i)
+ v1.Aux = symToAux(s)
+ v1.AddArg2(p0, mem)
+ v0.AddArg(v1)
+ return true
+ }
+ break
+ }
+ // match: (ORL r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem)) sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem))))
+ // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, r0, r1, sh)
+ // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLload [i0] {s} p mem))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ r1 := v_0
+ if r1.Op != OpAMD64ROLWconst || auxIntToInt8(r1.AuxInt) != 8 {
+ continue
+ }
+ x1 := r1.Args[0]
+ if x1.Op != OpAMD64MOVWload {
+ continue
+ }
+ i1 := auxIntToInt32(x1.AuxInt)
+ s := auxToSym(x1.Aux)
+ mem := x1.Args[1]
+ p := x1.Args[0]
+ sh := v_1
+ if sh.Op != OpAMD64SHLLconst || auxIntToInt8(sh.AuxInt) != 16 {
+ continue
+ }
+ r0 := sh.Args[0]
+ if r0.Op != OpAMD64ROLWconst || auxIntToInt8(r0.AuxInt) != 8 {
+ continue
+ }
+ x0 := r0.Args[0]
+ if x0.Op != OpAMD64MOVWload {
+ continue
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ if auxToSym(x0.Aux) != s {
+ continue
+ }
+ _ = x0.Args[1]
+ if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, r0, r1, sh)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPL, v.Type)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x0.Pos, OpAMD64MOVLload, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(i0)
+ v1.Aux = symToAux(s)
+ v1.AddArg2(p, mem)
+ v0.AddArg(v1)
+ return true
+ }
+ break
+ }
+ // match: (ORL r1:(ROLWconst [8] x1:(MOVWload [i] {s} p1 mem)) sh:(SHLLconst [16] r0:(ROLWconst [8] x0:(MOVWload [i] {s} p0 mem))))
+ // cond: x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 2) && mergePoint(b,x0,x1) != nil && clobber(x0, x1, r0, r1, sh)
+ // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLload [i] {s} p0 mem))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ r1 := v_0
+ if r1.Op != OpAMD64ROLWconst || auxIntToInt8(r1.AuxInt) != 8 {
+ continue
+ }
+ x1 := r1.Args[0]
+ if x1.Op != OpAMD64MOVWload {
+ continue
+ }
+ i := auxIntToInt32(x1.AuxInt)
+ s := auxToSym(x1.Aux)
+ mem := x1.Args[1]
+ p1 := x1.Args[0]
+ sh := v_1
+ if sh.Op != OpAMD64SHLLconst || auxIntToInt8(sh.AuxInt) != 16 {
+ continue
+ }
+ r0 := sh.Args[0]
+ if r0.Op != OpAMD64ROLWconst || auxIntToInt8(r0.AuxInt) != 8 {
+ continue
+ }
+ x0 := r0.Args[0]
+ if x0.Op != OpAMD64MOVWload || auxIntToInt32(x0.AuxInt) != i || auxToSym(x0.Aux) != s {
+ continue
+ }
+ _ = x0.Args[1]
+ p0 := x0.Args[0]
+ if mem != x0.Args[1] || !(x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 2) && mergePoint(b, x0, x1) != nil && clobber(x0, x1, r0, r1, sh)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPL, v.Type)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x0.Pos, OpAMD64MOVLload, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(i)
+ v1.Aux = symToAux(s)
+ v1.AddArg2(p0, mem)
+ v0.AddArg(v1)
+ return true
+ }
+ break
+ }
+ // match: (ORL s0:(SHLLconst [j0] x0:(MOVBload [i0] {s} p mem)) or:(ORL s1:(SHLLconst [j1] x1:(MOVBload [i1] {s} p mem)) y))
+ // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, s0, s1, or)
+ // result: @mergePoint(b,x0,x1,y) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ s0 := v_0
+ if s0.Op != OpAMD64SHLLconst {
+ continue
+ }
+ j0 := auxIntToInt8(s0.AuxInt)
+ x0 := s0.Args[0]
+ if x0.Op != OpAMD64MOVBload {
+ continue
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p := x0.Args[0]
+ or := v_1
+ if or.Op != OpAMD64ORL {
+ continue
+ }
+ _ = or.Args[1]
+ or_0 := or.Args[0]
+ or_1 := or.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, or_0, or_1 = _i1+1, or_1, or_0 {
+ s1 := or_0
+ if s1.Op != OpAMD64SHLLconst {
+ continue
+ }
+ j1 := auxIntToInt8(s1.AuxInt)
+ x1 := s1.Args[0]
+ if x1.Op != OpAMD64MOVBload {
+ continue
+ }
+ i1 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
+ continue
+ }
+ _ = x1.Args[1]
+ if p != x1.Args[0] || mem != x1.Args[1] {
+ continue
+ }
+ y := or_1
+ if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0, x1, s0, s1, or)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1, y)
+ v0 := b.NewValue0(x1.Pos, OpAMD64ORL, v.Type)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x1.Pos, OpAMD64SHLLconst, v.Type)
+ v1.AuxInt = int8ToAuxInt(j1)
+ v2 := b.NewValue0(x1.Pos, OpAMD64ROLWconst, typ.UInt16)
+ v2.AuxInt = int8ToAuxInt(8)
+ v3 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16)
+ v3.AuxInt = int32ToAuxInt(i0)
+ v3.Aux = symToAux(s)
+ v3.AddArg2(p, mem)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(v1, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (ORL s0:(SHLLconst [j0] x0:(MOVBload [i] {s} p0 mem)) or:(ORL s1:(SHLLconst [j1] x1:(MOVBload [i] {s} p1 mem)) y))
+ // cond: j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, s0, s1, or)
+ // result: @mergePoint(b,x0,x1,y) (ORL <v.Type> (SHLLconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i] {s} p0 mem))) y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ s0 := v_0
+ if s0.Op != OpAMD64SHLLconst {
+ continue
+ }
+ j0 := auxIntToInt8(s0.AuxInt)
+ x0 := s0.Args[0]
+ if x0.Op != OpAMD64MOVBload {
+ continue
+ }
+ i := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p0 := x0.Args[0]
+ or := v_1
+ if or.Op != OpAMD64ORL {
+ continue
+ }
+ _ = or.Args[1]
+ or_0 := or.Args[0]
+ or_1 := or.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, or_0, or_1 = _i1+1, or_1, or_0 {
+ s1 := or_0
+ if s1.Op != OpAMD64SHLLconst {
+ continue
+ }
+ j1 := auxIntToInt8(s1.AuxInt)
+ x1 := s1.Args[0]
+ if x1.Op != OpAMD64MOVBload || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s {
+ continue
+ }
+ _ = x1.Args[1]
+ p1 := x1.Args[0]
+ if mem != x1.Args[1] {
+ continue
+ }
+ y := or_1
+ if !(j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b, x0, x1, y) != nil && clobber(x0, x1, s0, s1, or)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1, y)
+ v0 := b.NewValue0(x1.Pos, OpAMD64ORL, v.Type)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x1.Pos, OpAMD64SHLLconst, v.Type)
+ v1.AuxInt = int8ToAuxInt(j1)
+ v2 := b.NewValue0(x1.Pos, OpAMD64ROLWconst, typ.UInt16)
+ v2.AuxInt = int8ToAuxInt(8)
+ v3 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16)
+ v3.AuxInt = int32ToAuxInt(i)
+ v3.Aux = symToAux(s)
+ v3.AddArg2(p0, mem)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(v1, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (ORL x l:(MOVLload [off] {sym} ptr mem))
+ // cond: canMergeLoadClobber(v, l, x) && clobber(l)
+ // result: (ORLload x [off] {sym} ptr mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ l := v_1
+ if l.Op != OpAMD64MOVLload {
+ continue
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
+ continue
+ }
+ v.reset(OpAMD64ORLload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ORLconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ORLconst [c] x)
+ // cond: isUint32PowerOfTwo(int64(c)) && uint64(c) >= 128
+ // result: (BTSLconst [int8(log32(c))] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(isUint32PowerOfTwo(int64(c)) && uint64(c) >= 128) {
+ break
+ }
+ v.reset(OpAMD64BTSLconst)
+ v.AuxInt = int8ToAuxInt(int8(log32(c)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ORLconst [c] (ORLconst [d] x))
+ // result: (ORLconst [c | d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64ORLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpAMD64ORLconst)
+ v.AuxInt = int32ToAuxInt(c | d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ORLconst [c] (BTSLconst [d] x))
+ // result: (ORLconst [c | 1<<uint32(d)] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64BTSLconst {
+ break
+ }
+ d := auxIntToInt8(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpAMD64ORLconst)
+ v.AuxInt = int32ToAuxInt(c | 1<<uint32(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ORLconst [c] x)
+ // cond: c==0
+ // result: x
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(c == 0) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (ORLconst [c] _)
+ // cond: c==-1
+ // result: (MOVLconst [-1])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if !(c == -1) {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(-1)
+ return true
+ }
+ // match: (ORLconst [c] (MOVLconst [d]))
+ // result: (MOVLconst [c|d])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(c | d)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ORLconstmodify(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ORLconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem)
+ // cond: ValAndOff(valoff1).canAdd32(off2)
+ // result: (ORLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(ValAndOff(valoff1).canAdd32(off2)) {
+ break
+ }
+ v.reset(OpAMD64ORLconstmodify)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (ORLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
+ // result: (ORLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64ORLconstmodify)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ORLload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (ORLload [off1] {sym} val (ADDQconst [off2] base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (ORLload [off1+off2] {sym} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64ORLload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (ORLload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (ORLload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64ORLload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: ( ORLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _))
+ // result: ( ORL x (MOVLf2i y))
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ ptr := v_1
+ if v_2.Op != OpAMD64MOVSSstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
+ break
+ }
+ y := v_2.Args[1]
+ if ptr != v_2.Args[0] {
+ break
+ }
+ v.reset(OpAMD64ORL)
+ v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ORLmodify(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ORLmodify [off1] {sym} (ADDQconst [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (ORLmodify [off1+off2] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64ORLmodify)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (ORLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (ORLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64ORLmodify)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ORQ(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (ORQ (SHLQ (MOVQconst [1]) y) x)
+ // result: (BTSQ x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHLQ {
+ continue
+ }
+ y := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0.AuxInt) != 1 {
+ continue
+ }
+ x := v_1
+ v.reset(OpAMD64BTSQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ORQ (MOVQconst [c]) x)
+ // cond: isUint64PowerOfTwo(c) && uint64(c) >= 128
+ // result: (BTSQconst [int8(log64(c))] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64MOVQconst {
+ continue
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ if !(isUint64PowerOfTwo(c) && uint64(c) >= 128) {
+ continue
+ }
+ v.reset(OpAMD64BTSQconst)
+ v.AuxInt = int8ToAuxInt(int8(log64(c)))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ORQ x (MOVQconst [c]))
+ // cond: is32Bit(c)
+ // result: (ORQconst [int32(c)] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(is32Bit(c)) {
+ continue
+ }
+ v.reset(OpAMD64ORQconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ORQ x (MOVLconst [c]))
+ // result: (ORQconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpAMD64ORQconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ORQ (SHLQconst x [c]) (SHRQconst x [d]))
+ // cond: d==64-c
+ // result: (ROLQconst x [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHLQconst {
+ continue
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ x := v_0.Args[0]
+ if v_1.Op != OpAMD64SHRQconst {
+ continue
+ }
+ d := auxIntToInt8(v_1.AuxInt)
+ if x != v_1.Args[0] || !(d == 64-c) {
+ continue
+ }
+ v.reset(OpAMD64ROLQconst)
+ v.AuxInt = int8ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ORQ (SHLQ x y) (ANDQ (SHRQ x (NEGQ y)) (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64]))))
+ // result: (ROLQ x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHLQ {
+ continue
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if v_1.Op != OpAMD64ANDQ {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpAMD64SHRQ {
+ continue
+ }
+ _ = v_1_0.Args[1]
+ if x != v_1_0.Args[0] {
+ continue
+ }
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpAMD64NEGQ || y != v_1_0_1.Args[0] || v_1_1.Op != OpAMD64SBBQcarrymask {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_1_1_0.AuxInt) != 64 {
+ continue
+ }
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpAMD64NEGQ {
+ continue
+ }
+ v_1_1_0_0_0 := v_1_1_0_0.Args[0]
+ if v_1_1_0_0_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -64 {
+ continue
+ }
+ v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
+ if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 63 || y != v_1_1_0_0_0_0.Args[0] {
+ continue
+ }
+ v.reset(OpAMD64ROLQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (ORQ (SHLQ x y) (ANDQ (SHRQ x (NEGL y)) (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64]))))
+ // result: (ROLQ x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHLQ {
+ continue
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if v_1.Op != OpAMD64ANDQ {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpAMD64SHRQ {
+ continue
+ }
+ _ = v_1_0.Args[1]
+ if x != v_1_0.Args[0] {
+ continue
+ }
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpAMD64NEGL || y != v_1_0_1.Args[0] || v_1_1.Op != OpAMD64SBBQcarrymask {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_1_1_0.AuxInt) != 64 {
+ continue
+ }
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpAMD64NEGL {
+ continue
+ }
+ v_1_1_0_0_0 := v_1_1_0_0.Args[0]
+ if v_1_1_0_0_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -64 {
+ continue
+ }
+ v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
+ if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 63 || y != v_1_1_0_0_0_0.Args[0] {
+ continue
+ }
+ v.reset(OpAMD64ROLQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (ORQ (SHRQ x y) (ANDQ (SHLQ x (NEGQ y)) (SBBQcarrymask (CMPQconst (NEGQ (ADDQconst (ANDQconst y [63]) [-64])) [64]))))
+ // result: (RORQ x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHRQ {
+ continue
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if v_1.Op != OpAMD64ANDQ {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpAMD64SHLQ {
+ continue
+ }
+ _ = v_1_0.Args[1]
+ if x != v_1_0.Args[0] {
+ continue
+ }
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpAMD64NEGQ || y != v_1_0_1.Args[0] || v_1_1.Op != OpAMD64SBBQcarrymask {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_1_1_0.AuxInt) != 64 {
+ continue
+ }
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpAMD64NEGQ {
+ continue
+ }
+ v_1_1_0_0_0 := v_1_1_0_0.Args[0]
+ if v_1_1_0_0_0.Op != OpAMD64ADDQconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -64 {
+ continue
+ }
+ v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
+ if v_1_1_0_0_0_0.Op != OpAMD64ANDQconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 63 || y != v_1_1_0_0_0_0.Args[0] {
+ continue
+ }
+ v.reset(OpAMD64RORQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (ORQ (SHRQ x y) (ANDQ (SHLQ x (NEGL y)) (SBBQcarrymask (CMPLconst (NEGL (ADDLconst (ANDLconst y [63]) [-64])) [64]))))
+ // result: (RORQ x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHRQ {
+ continue
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if v_1.Op != OpAMD64ANDQ {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpAMD64SHLQ {
+ continue
+ }
+ _ = v_1_0.Args[1]
+ if x != v_1_0.Args[0] {
+ continue
+ }
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpAMD64NEGL || y != v_1_0_1.Args[0] || v_1_1.Op != OpAMD64SBBQcarrymask {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_1_1_0.AuxInt) != 64 {
+ continue
+ }
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpAMD64NEGL {
+ continue
+ }
+ v_1_1_0_0_0 := v_1_1_0_0.Args[0]
+ if v_1_1_0_0_0.Op != OpAMD64ADDLconst || auxIntToInt32(v_1_1_0_0_0.AuxInt) != -64 {
+ continue
+ }
+ v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
+ if v_1_1_0_0_0_0.Op != OpAMD64ANDLconst || auxIntToInt32(v_1_1_0_0_0_0.AuxInt) != 63 || y != v_1_1_0_0_0_0.Args[0] {
+ continue
+ }
+ v.reset(OpAMD64RORQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (ORQ (SHRQ lo bits) (SHLQ hi (NEGQ bits)))
+ // result: (SHRDQ lo hi bits)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHRQ {
+ continue
+ }
+ bits := v_0.Args[1]
+ lo := v_0.Args[0]
+ if v_1.Op != OpAMD64SHLQ {
+ continue
+ }
+ _ = v_1.Args[1]
+ hi := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpAMD64NEGQ || bits != v_1_1.Args[0] {
+ continue
+ }
+ v.reset(OpAMD64SHRDQ)
+ v.AddArg3(lo, hi, bits)
+ return true
+ }
+ break
+ }
+ // match: (ORQ (SHLQ lo bits) (SHRQ hi (NEGQ bits)))
+ // result: (SHLDQ lo hi bits)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHLQ {
+ continue
+ }
+ bits := v_0.Args[1]
+ lo := v_0.Args[0]
+ if v_1.Op != OpAMD64SHRQ {
+ continue
+ }
+ _ = v_1.Args[1]
+ hi := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpAMD64NEGQ || bits != v_1_1.Args[0] {
+ continue
+ }
+ v.reset(OpAMD64SHLDQ)
+ v.AddArg3(lo, hi, bits)
+ return true
+ }
+ break
+ }
+ // match: (ORQ (MOVQconst [c]) (MOVQconst [d]))
+ // result: (MOVQconst [c|d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64MOVQconst {
+ continue
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpAMD64MOVQconst {
+ continue
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = int64ToAuxInt(c | d)
+ return true
+ }
+ break
+ }
+ // match: (ORQ x x)
+ // result: x
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (ORQ x0:(MOVBload [i0] {s} p mem) sh:(SHLQconst [8] x1:(MOVBload [i1] {s} p mem)))
+ // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh)
+ // result: @mergePoint(b,x0,x1) (MOVWload [i0] {s} p mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ if x0.Op != OpAMD64MOVBload {
+ continue
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p := x0.Args[0]
+ sh := v_1
+ if sh.Op != OpAMD64SHLQconst || auxIntToInt8(sh.AuxInt) != 8 {
+ continue
+ }
+ x1 := sh.Args[0]
+ if x1.Op != OpAMD64MOVBload {
+ continue
+ }
+ i1 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
+ continue
+ }
+ _ = x1.Args[1]
+ if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(i0)
+ v0.Aux = symToAux(s)
+ v0.AddArg2(p, mem)
+ return true
+ }
+ break
+ }
+ // match: (ORQ x0:(MOVBload [i] {s} p0 mem) sh:(SHLQconst [8] x1:(MOVBload [i] {s} p1 mem)))
+ // cond: x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh)
+ // result: @mergePoint(b,x0,x1) (MOVWload [i] {s} p0 mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ if x0.Op != OpAMD64MOVBload {
+ continue
+ }
+ i := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p0 := x0.Args[0]
+ sh := v_1
+ if sh.Op != OpAMD64SHLQconst || auxIntToInt8(sh.AuxInt) != 8 {
+ continue
+ }
+ x1 := sh.Args[0]
+ if x1.Op != OpAMD64MOVBload || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s {
+ continue
+ }
+ _ = x1.Args[1]
+ p1 := x1.Args[0]
+ if mem != x1.Args[1] || !(x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(i)
+ v0.Aux = symToAux(s)
+ v0.AddArg2(p0, mem)
+ return true
+ }
+ break
+ }
+ // match: (ORQ x0:(MOVWload [i0] {s} p mem) sh:(SHLQconst [16] x1:(MOVWload [i1] {s} p mem)))
+ // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh)
+ // result: @mergePoint(b,x0,x1) (MOVLload [i0] {s} p mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ if x0.Op != OpAMD64MOVWload {
+ continue
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p := x0.Args[0]
+ sh := v_1
+ if sh.Op != OpAMD64SHLQconst || auxIntToInt8(sh.AuxInt) != 16 {
+ continue
+ }
+ x1 := sh.Args[0]
+ if x1.Op != OpAMD64MOVWload {
+ continue
+ }
+ i1 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
+ continue
+ }
+ _ = x1.Args[1]
+ if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(x1.Pos, OpAMD64MOVLload, typ.UInt32)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(i0)
+ v0.Aux = symToAux(s)
+ v0.AddArg2(p, mem)
+ return true
+ }
+ break
+ }
+ // match: (ORQ x0:(MOVWload [i] {s} p0 mem) sh:(SHLQconst [16] x1:(MOVWload [i] {s} p1 mem)))
+ // cond: x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 2) && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh)
+ // result: @mergePoint(b,x0,x1) (MOVLload [i] {s} p0 mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ if x0.Op != OpAMD64MOVWload {
+ continue
+ }
+ i := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p0 := x0.Args[0]
+ sh := v_1
+ if sh.Op != OpAMD64SHLQconst || auxIntToInt8(sh.AuxInt) != 16 {
+ continue
+ }
+ x1 := sh.Args[0]
+ if x1.Op != OpAMD64MOVWload || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s {
+ continue
+ }
+ _ = x1.Args[1]
+ p1 := x1.Args[0]
+ if mem != x1.Args[1] || !(x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 2) && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(x1.Pos, OpAMD64MOVLload, typ.UInt32)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(i)
+ v0.Aux = symToAux(s)
+ v0.AddArg2(p0, mem)
+ return true
+ }
+ break
+ }
+ // match: (ORQ x0:(MOVLload [i0] {s} p mem) sh:(SHLQconst [32] x1:(MOVLload [i1] {s} p mem)))
+ // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh)
+ // result: @mergePoint(b,x0,x1) (MOVQload [i0] {s} p mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ if x0.Op != OpAMD64MOVLload {
+ continue
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p := x0.Args[0]
+ sh := v_1
+ if sh.Op != OpAMD64SHLQconst || auxIntToInt8(sh.AuxInt) != 32 {
+ continue
+ }
+ x1 := sh.Args[0]
+ if x1.Op != OpAMD64MOVLload {
+ continue
+ }
+ i1 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
+ continue
+ }
+ _ = x1.Args[1]
+ if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(x1.Pos, OpAMD64MOVQload, typ.UInt64)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(i0)
+ v0.Aux = symToAux(s)
+ v0.AddArg2(p, mem)
+ return true
+ }
+ break
+ }
+ // match: (ORQ x0:(MOVLload [i] {s} p0 mem) sh:(SHLQconst [32] x1:(MOVLload [i] {s} p1 mem)))
+ // cond: x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 4) && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh)
+ // result: @mergePoint(b,x0,x1) (MOVQload [i] {s} p0 mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ if x0.Op != OpAMD64MOVLload {
+ continue
+ }
+ i := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p0 := x0.Args[0]
+ sh := v_1
+ if sh.Op != OpAMD64SHLQconst || auxIntToInt8(sh.AuxInt) != 32 {
+ continue
+ }
+ x1 := sh.Args[0]
+ if x1.Op != OpAMD64MOVLload || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s {
+ continue
+ }
+ _ = x1.Args[1]
+ p1 := x1.Args[0]
+ if mem != x1.Args[1] || !(x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 4) && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(x1.Pos, OpAMD64MOVQload, typ.UInt64)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(i)
+ v0.Aux = symToAux(s)
+ v0.AddArg2(p0, mem)
+ return true
+ }
+ break
+ }
+ // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem)) y))
+ // cond: i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, s0, s1, or)
+ // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWload [i0] {s} p mem)) y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ s1 := v_0
+ if s1.Op != OpAMD64SHLQconst {
+ continue
+ }
+ j1 := auxIntToInt8(s1.AuxInt)
+ x1 := s1.Args[0]
+ if x1.Op != OpAMD64MOVBload {
+ continue
+ }
+ i1 := auxIntToInt32(x1.AuxInt)
+ s := auxToSym(x1.Aux)
+ mem := x1.Args[1]
+ p := x1.Args[0]
+ or := v_1
+ if or.Op != OpAMD64ORQ {
+ continue
+ }
+ _ = or.Args[1]
+ or_0 := or.Args[0]
+ or_1 := or.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, or_0, or_1 = _i1+1, or_1, or_0 {
+ s0 := or_0
+ if s0.Op != OpAMD64SHLQconst {
+ continue
+ }
+ j0 := auxIntToInt8(s0.AuxInt)
+ x0 := s0.Args[0]
+ if x0.Op != OpAMD64MOVBload {
+ continue
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ if auxToSym(x0.Aux) != s {
+ continue
+ }
+ _ = x0.Args[1]
+ if p != x0.Args[0] || mem != x0.Args[1] {
+ continue
+ }
+ y := or_1
+ if !(i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0, x1, s0, s1, or)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1, y)
+ v0 := b.NewValue0(x0.Pos, OpAMD64ORQ, v.Type)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x0.Pos, OpAMD64SHLQconst, v.Type)
+ v1.AuxInt = int8ToAuxInt(j0)
+ v2 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16)
+ v2.AuxInt = int32ToAuxInt(i0)
+ v2.Aux = symToAux(s)
+ v2.AddArg2(p, mem)
+ v1.AddArg(v2)
+ v0.AddArg2(v1, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (ORQ s1:(SHLQconst [j1] x1:(MOVBload [i] {s} p1 mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVBload [i] {s} p0 mem)) y))
+ // cond: j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, s0, s1, or)
+ // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVWload [i] {s} p0 mem)) y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ s1 := v_0
+ if s1.Op != OpAMD64SHLQconst {
+ continue
+ }
+ j1 := auxIntToInt8(s1.AuxInt)
+ x1 := s1.Args[0]
+ if x1.Op != OpAMD64MOVBload {
+ continue
+ }
+ i := auxIntToInt32(x1.AuxInt)
+ s := auxToSym(x1.Aux)
+ mem := x1.Args[1]
+ p1 := x1.Args[0]
+ or := v_1
+ if or.Op != OpAMD64ORQ {
+ continue
+ }
+ _ = or.Args[1]
+ or_0 := or.Args[0]
+ or_1 := or.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, or_0, or_1 = _i1+1, or_1, or_0 {
+ s0 := or_0
+ if s0.Op != OpAMD64SHLQconst {
+ continue
+ }
+ j0 := auxIntToInt8(s0.AuxInt)
+ x0 := s0.Args[0]
+ if x0.Op != OpAMD64MOVBload || auxIntToInt32(x0.AuxInt) != i || auxToSym(x0.Aux) != s {
+ continue
+ }
+ _ = x0.Args[1]
+ p0 := x0.Args[0]
+ if mem != x0.Args[1] {
+ continue
+ }
+ y := or_1
+ if !(j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b, x0, x1, y) != nil && clobber(x0, x1, s0, s1, or)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1, y)
+ v0 := b.NewValue0(x0.Pos, OpAMD64ORQ, v.Type)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x0.Pos, OpAMD64SHLQconst, v.Type)
+ v1.AuxInt = int8ToAuxInt(j0)
+ v2 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16)
+ v2.AuxInt = int32ToAuxInt(i)
+ v2.Aux = symToAux(s)
+ v2.AddArg2(p0, mem)
+ v1.AddArg(v2)
+ v0.AddArg2(v1, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWload [i1] {s} p mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVWload [i0] {s} p mem)) y))
+ // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, s0, s1, or)
+ // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLload [i0] {s} p mem)) y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ s1 := v_0
+ if s1.Op != OpAMD64SHLQconst {
+ continue
+ }
+ j1 := auxIntToInt8(s1.AuxInt)
+ x1 := s1.Args[0]
+ if x1.Op != OpAMD64MOVWload {
+ continue
+ }
+ i1 := auxIntToInt32(x1.AuxInt)
+ s := auxToSym(x1.Aux)
+ mem := x1.Args[1]
+ p := x1.Args[0]
+ or := v_1
+ if or.Op != OpAMD64ORQ {
+ continue
+ }
+ _ = or.Args[1]
+ or_0 := or.Args[0]
+ or_1 := or.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, or_0, or_1 = _i1+1, or_1, or_0 {
+ s0 := or_0
+ if s0.Op != OpAMD64SHLQconst {
+ continue
+ }
+ j0 := auxIntToInt8(s0.AuxInt)
+ x0 := s0.Args[0]
+ if x0.Op != OpAMD64MOVWload {
+ continue
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ if auxToSym(x0.Aux) != s {
+ continue
+ }
+ _ = x0.Args[1]
+ if p != x0.Args[0] || mem != x0.Args[1] {
+ continue
+ }
+ y := or_1
+ if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0, x1, s0, s1, or)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1, y)
+ v0 := b.NewValue0(x0.Pos, OpAMD64ORQ, v.Type)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x0.Pos, OpAMD64SHLQconst, v.Type)
+ v1.AuxInt = int8ToAuxInt(j0)
+ v2 := b.NewValue0(x0.Pos, OpAMD64MOVLload, typ.UInt32)
+ v2.AuxInt = int32ToAuxInt(i0)
+ v2.Aux = symToAux(s)
+ v2.AddArg2(p, mem)
+ v1.AddArg(v2)
+ v0.AddArg2(v1, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (ORQ s1:(SHLQconst [j1] x1:(MOVWload [i] {s} p1 mem)) or:(ORQ s0:(SHLQconst [j0] x0:(MOVWload [i] {s} p0 mem)) y))
+ // cond: j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && sequentialAddresses(p0, p1, 2) && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, s0, s1, or)
+ // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j0] (MOVLload [i] {s} p0 mem)) y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ s1 := v_0
+ if s1.Op != OpAMD64SHLQconst {
+ continue
+ }
+ j1 := auxIntToInt8(s1.AuxInt)
+ x1 := s1.Args[0]
+ if x1.Op != OpAMD64MOVWload {
+ continue
+ }
+ i := auxIntToInt32(x1.AuxInt)
+ s := auxToSym(x1.Aux)
+ mem := x1.Args[1]
+ p1 := x1.Args[0]
+ or := v_1
+ if or.Op != OpAMD64ORQ {
+ continue
+ }
+ _ = or.Args[1]
+ or_0 := or.Args[0]
+ or_1 := or.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, or_0, or_1 = _i1+1, or_1, or_0 {
+ s0 := or_0
+ if s0.Op != OpAMD64SHLQconst {
+ continue
+ }
+ j0 := auxIntToInt8(s0.AuxInt)
+ x0 := s0.Args[0]
+ if x0.Op != OpAMD64MOVWload || auxIntToInt32(x0.AuxInt) != i || auxToSym(x0.Aux) != s {
+ continue
+ }
+ _ = x0.Args[1]
+ p0 := x0.Args[0]
+ if mem != x0.Args[1] {
+ continue
+ }
+ y := or_1
+ if !(j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && sequentialAddresses(p0, p1, 2) && mergePoint(b, x0, x1, y) != nil && clobber(x0, x1, s0, s1, or)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1, y)
+ v0 := b.NewValue0(x0.Pos, OpAMD64ORQ, v.Type)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x0.Pos, OpAMD64SHLQconst, v.Type)
+ v1.AuxInt = int8ToAuxInt(j0)
+ v2 := b.NewValue0(x0.Pos, OpAMD64MOVLload, typ.UInt32)
+ v2.AuxInt = int32ToAuxInt(i)
+ v2.Aux = symToAux(s)
+ v2.AddArg2(p0, mem)
+ v1.AddArg(v2)
+ v0.AddArg2(v1, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (ORQ x1:(MOVBload [i1] {s} p mem) sh:(SHLQconst [8] x0:(MOVBload [i0] {s} p mem)))
+ // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh)
+ // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWload [i0] {s} p mem))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x1 := v_0
+ if x1.Op != OpAMD64MOVBload {
+ continue
+ }
+ i1 := auxIntToInt32(x1.AuxInt)
+ s := auxToSym(x1.Aux)
+ mem := x1.Args[1]
+ p := x1.Args[0]
+ sh := v_1
+ if sh.Op != OpAMD64SHLQconst || auxIntToInt8(sh.AuxInt) != 8 {
+ continue
+ }
+ x0 := sh.Args[0]
+ if x0.Op != OpAMD64MOVBload {
+ continue
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ if auxToSym(x0.Aux) != s {
+ continue
+ }
+ _ = x0.Args[1]
+ if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, v.Type)
+ v.copyOf(v0)
+ v0.AuxInt = int8ToAuxInt(8)
+ v1 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16)
+ v1.AuxInt = int32ToAuxInt(i0)
+ v1.Aux = symToAux(s)
+ v1.AddArg2(p, mem)
+ v0.AddArg(v1)
+ return true
+ }
+ break
+ }
+ // match: (ORQ x1:(MOVBload [i] {s} p1 mem) sh:(SHLQconst [8] x0:(MOVBload [i] {s} p0 mem)))
+ // cond: x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh)
+ // result: @mergePoint(b,x0,x1) (ROLWconst <v.Type> [8] (MOVWload [i] {s} p0 mem))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x1 := v_0
+ if x1.Op != OpAMD64MOVBload {
+ continue
+ }
+ i := auxIntToInt32(x1.AuxInt)
+ s := auxToSym(x1.Aux)
+ mem := x1.Args[1]
+ p1 := x1.Args[0]
+ sh := v_1
+ if sh.Op != OpAMD64SHLQconst || auxIntToInt8(sh.AuxInt) != 8 {
+ continue
+ }
+ x0 := sh.Args[0]
+ if x0.Op != OpAMD64MOVBload || auxIntToInt32(x0.AuxInt) != i || auxToSym(x0.Aux) != s {
+ continue
+ }
+ _ = x0.Args[1]
+ p0 := x0.Args[0]
+ if mem != x0.Args[1] || !(x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(x0.Pos, OpAMD64ROLWconst, v.Type)
+ v.copyOf(v0)
+ v0.AuxInt = int8ToAuxInt(8)
+ v1 := b.NewValue0(x0.Pos, OpAMD64MOVWload, typ.UInt16)
+ v1.AuxInt = int32ToAuxInt(i)
+ v1.Aux = symToAux(s)
+ v1.AddArg2(p0, mem)
+ v0.AddArg(v1)
+ return true
+ }
+ break
+ }
+ // match: (ORQ r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem)) sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem))))
+ // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, r0, r1, sh)
+ // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLload [i0] {s} p mem))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ r1 := v_0
+ if r1.Op != OpAMD64ROLWconst || auxIntToInt8(r1.AuxInt) != 8 {
+ continue
+ }
+ x1 := r1.Args[0]
+ if x1.Op != OpAMD64MOVWload {
+ continue
+ }
+ i1 := auxIntToInt32(x1.AuxInt)
+ s := auxToSym(x1.Aux)
+ mem := x1.Args[1]
+ p := x1.Args[0]
+ sh := v_1
+ if sh.Op != OpAMD64SHLQconst || auxIntToInt8(sh.AuxInt) != 16 {
+ continue
+ }
+ r0 := sh.Args[0]
+ if r0.Op != OpAMD64ROLWconst || auxIntToInt8(r0.AuxInt) != 8 {
+ continue
+ }
+ x0 := r0.Args[0]
+ if x0.Op != OpAMD64MOVWload {
+ continue
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ if auxToSym(x0.Aux) != s {
+ continue
+ }
+ _ = x0.Args[1]
+ if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, r0, r1, sh)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPL, v.Type)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x0.Pos, OpAMD64MOVLload, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(i0)
+ v1.Aux = symToAux(s)
+ v1.AddArg2(p, mem)
+ v0.AddArg(v1)
+ return true
+ }
+ break
+ }
+ // match: (ORQ r1:(ROLWconst [8] x1:(MOVWload [i] {s} p1 mem)) sh:(SHLQconst [16] r0:(ROLWconst [8] x0:(MOVWload [i] {s} p0 mem))))
+ // cond: x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 2) && mergePoint(b,x0,x1) != nil && clobber(x0, x1, r0, r1, sh)
+ // result: @mergePoint(b,x0,x1) (BSWAPL <v.Type> (MOVLload [i] {s} p0 mem))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ r1 := v_0
+ if r1.Op != OpAMD64ROLWconst || auxIntToInt8(r1.AuxInt) != 8 {
+ continue
+ }
+ x1 := r1.Args[0]
+ if x1.Op != OpAMD64MOVWload {
+ continue
+ }
+ i := auxIntToInt32(x1.AuxInt)
+ s := auxToSym(x1.Aux)
+ mem := x1.Args[1]
+ p1 := x1.Args[0]
+ sh := v_1
+ if sh.Op != OpAMD64SHLQconst || auxIntToInt8(sh.AuxInt) != 16 {
+ continue
+ }
+ r0 := sh.Args[0]
+ if r0.Op != OpAMD64ROLWconst || auxIntToInt8(r0.AuxInt) != 8 {
+ continue
+ }
+ x0 := r0.Args[0]
+ if x0.Op != OpAMD64MOVWload || auxIntToInt32(x0.AuxInt) != i || auxToSym(x0.Aux) != s {
+ continue
+ }
+ _ = x0.Args[1]
+ p0 := x0.Args[0]
+ if mem != x0.Args[1] || !(x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 2) && mergePoint(b, x0, x1) != nil && clobber(x0, x1, r0, r1, sh)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPL, v.Type)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x0.Pos, OpAMD64MOVLload, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(i)
+ v1.Aux = symToAux(s)
+ v1.AddArg2(p0, mem)
+ v0.AddArg(v1)
+ return true
+ }
+ break
+ }
+ // match: (ORQ r1:(BSWAPL x1:(MOVLload [i1] {s} p mem)) sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLload [i0] {s} p mem))))
+ // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, r0, r1, sh)
+ // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQload [i0] {s} p mem))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ r1 := v_0
+ if r1.Op != OpAMD64BSWAPL {
+ continue
+ }
+ x1 := r1.Args[0]
+ if x1.Op != OpAMD64MOVLload {
+ continue
+ }
+ i1 := auxIntToInt32(x1.AuxInt)
+ s := auxToSym(x1.Aux)
+ mem := x1.Args[1]
+ p := x1.Args[0]
+ sh := v_1
+ if sh.Op != OpAMD64SHLQconst || auxIntToInt8(sh.AuxInt) != 32 {
+ continue
+ }
+ r0 := sh.Args[0]
+ if r0.Op != OpAMD64BSWAPL {
+ continue
+ }
+ x0 := r0.Args[0]
+ if x0.Op != OpAMD64MOVLload {
+ continue
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ if auxToSym(x0.Aux) != s {
+ continue
+ }
+ _ = x0.Args[1]
+ if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, r0, r1, sh)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPQ, v.Type)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x0.Pos, OpAMD64MOVQload, typ.UInt64)
+ v1.AuxInt = int32ToAuxInt(i0)
+ v1.Aux = symToAux(s)
+ v1.AddArg2(p, mem)
+ v0.AddArg(v1)
+ return true
+ }
+ break
+ }
+ // match: (ORQ r1:(BSWAPL x1:(MOVLload [i] {s} p1 mem)) sh:(SHLQconst [32] r0:(BSWAPL x0:(MOVLload [i] {s} p0 mem))))
+ // cond: x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 4) && mergePoint(b,x0,x1) != nil && clobber(x0, x1, r0, r1, sh)
+ // result: @mergePoint(b,x0,x1) (BSWAPQ <v.Type> (MOVQload [i] {s} p0 mem))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ r1 := v_0
+ if r1.Op != OpAMD64BSWAPL {
+ continue
+ }
+ x1 := r1.Args[0]
+ if x1.Op != OpAMD64MOVLload {
+ continue
+ }
+ i := auxIntToInt32(x1.AuxInt)
+ s := auxToSym(x1.Aux)
+ mem := x1.Args[1]
+ p1 := x1.Args[0]
+ sh := v_1
+ if sh.Op != OpAMD64SHLQconst || auxIntToInt8(sh.AuxInt) != 32 {
+ continue
+ }
+ r0 := sh.Args[0]
+ if r0.Op != OpAMD64BSWAPL {
+ continue
+ }
+ x0 := r0.Args[0]
+ if x0.Op != OpAMD64MOVLload || auxIntToInt32(x0.AuxInt) != i || auxToSym(x0.Aux) != s {
+ continue
+ }
+ _ = x0.Args[1]
+ p0 := x0.Args[0]
+ if mem != x0.Args[1] || !(x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p0, p1, 4) && mergePoint(b, x0, x1) != nil && clobber(x0, x1, r0, r1, sh)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(x0.Pos, OpAMD64BSWAPQ, v.Type)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x0.Pos, OpAMD64MOVQload, typ.UInt64)
+ v1.AuxInt = int32ToAuxInt(i)
+ v1.Aux = symToAux(s)
+ v1.AddArg2(p0, mem)
+ v0.AddArg(v1)
+ return true
+ }
+ break
+ }
+ // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBload [i0] {s} p mem)) or:(ORQ s1:(SHLQconst [j1] x1:(MOVBload [i1] {s} p mem)) y))
+ // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, s0, s1, or)
+ // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i0] {s} p mem))) y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ s0 := v_0
+ if s0.Op != OpAMD64SHLQconst {
+ continue
+ }
+ j0 := auxIntToInt8(s0.AuxInt)
+ x0 := s0.Args[0]
+ if x0.Op != OpAMD64MOVBload {
+ continue
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p := x0.Args[0]
+ or := v_1
+ if or.Op != OpAMD64ORQ {
+ continue
+ }
+ _ = or.Args[1]
+ or_0 := or.Args[0]
+ or_1 := or.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, or_0, or_1 = _i1+1, or_1, or_0 {
+ s1 := or_0
+ if s1.Op != OpAMD64SHLQconst {
+ continue
+ }
+ j1 := auxIntToInt8(s1.AuxInt)
+ x1 := s1.Args[0]
+ if x1.Op != OpAMD64MOVBload {
+ continue
+ }
+ i1 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
+ continue
+ }
+ _ = x1.Args[1]
+ if p != x1.Args[0] || mem != x1.Args[1] {
+ continue
+ }
+ y := or_1
+ if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0, x1, s0, s1, or)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1, y)
+ v0 := b.NewValue0(x1.Pos, OpAMD64ORQ, v.Type)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x1.Pos, OpAMD64SHLQconst, v.Type)
+ v1.AuxInt = int8ToAuxInt(j1)
+ v2 := b.NewValue0(x1.Pos, OpAMD64ROLWconst, typ.UInt16)
+ v2.AuxInt = int8ToAuxInt(8)
+ v3 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16)
+ v3.AuxInt = int32ToAuxInt(i0)
+ v3.Aux = symToAux(s)
+ v3.AddArg2(p, mem)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(v1, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (ORQ s0:(SHLQconst [j0] x0:(MOVBload [i] {s} p0 mem)) or:(ORQ s1:(SHLQconst [j1] x1:(MOVBload [i] {s} p1 mem)) y))
+ // cond: j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, s0, s1, or)
+ // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (ROLWconst <typ.UInt16> [8] (MOVWload [i] {s} p0 mem))) y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ s0 := v_0
+ if s0.Op != OpAMD64SHLQconst {
+ continue
+ }
+ j0 := auxIntToInt8(s0.AuxInt)
+ x0 := s0.Args[0]
+ if x0.Op != OpAMD64MOVBload {
+ continue
+ }
+ i := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p0 := x0.Args[0]
+ or := v_1
+ if or.Op != OpAMD64ORQ {
+ continue
+ }
+ _ = or.Args[1]
+ or_0 := or.Args[0]
+ or_1 := or.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, or_0, or_1 = _i1+1, or_1, or_0 {
+ s1 := or_0
+ if s1.Op != OpAMD64SHLQconst {
+ continue
+ }
+ j1 := auxIntToInt8(s1.AuxInt)
+ x1 := s1.Args[0]
+ if x1.Op != OpAMD64MOVBload || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s {
+ continue
+ }
+ _ = x1.Args[1]
+ p1 := x1.Args[0]
+ if mem != x1.Args[1] {
+ continue
+ }
+ y := or_1
+ if !(j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && sequentialAddresses(p0, p1, 1) && mergePoint(b, x0, x1, y) != nil && clobber(x0, x1, s0, s1, or)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1, y)
+ v0 := b.NewValue0(x1.Pos, OpAMD64ORQ, v.Type)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x1.Pos, OpAMD64SHLQconst, v.Type)
+ v1.AuxInt = int8ToAuxInt(j1)
+ v2 := b.NewValue0(x1.Pos, OpAMD64ROLWconst, typ.UInt16)
+ v2.AuxInt = int8ToAuxInt(8)
+ v3 := b.NewValue0(x1.Pos, OpAMD64MOVWload, typ.UInt16)
+ v3.AuxInt = int32ToAuxInt(i)
+ v3.Aux = symToAux(s)
+ v3.AddArg2(p0, mem)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(v1, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWload [i0] {s} p mem))) or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWload [i1] {s} p mem))) y))
+ // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, r0, r1, s0, s1, or)
+ // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLload [i0] {s} p mem))) y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ s0 := v_0
+ if s0.Op != OpAMD64SHLQconst {
+ continue
+ }
+ j0 := auxIntToInt8(s0.AuxInt)
+ r0 := s0.Args[0]
+ if r0.Op != OpAMD64ROLWconst || auxIntToInt8(r0.AuxInt) != 8 {
+ continue
+ }
+ x0 := r0.Args[0]
+ if x0.Op != OpAMD64MOVWload {
+ continue
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p := x0.Args[0]
+ or := v_1
+ if or.Op != OpAMD64ORQ {
+ continue
+ }
+ _ = or.Args[1]
+ or_0 := or.Args[0]
+ or_1 := or.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, or_0, or_1 = _i1+1, or_1, or_0 {
+ s1 := or_0
+ if s1.Op != OpAMD64SHLQconst {
+ continue
+ }
+ j1 := auxIntToInt8(s1.AuxInt)
+ r1 := s1.Args[0]
+ if r1.Op != OpAMD64ROLWconst || auxIntToInt8(r1.AuxInt) != 8 {
+ continue
+ }
+ x1 := r1.Args[0]
+ if x1.Op != OpAMD64MOVWload {
+ continue
+ }
+ i1 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
+ continue
+ }
+ _ = x1.Args[1]
+ if p != x1.Args[0] || mem != x1.Args[1] {
+ continue
+ }
+ y := or_1
+ if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0, x1, r0, r1, s0, s1, or)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1, y)
+ v0 := b.NewValue0(x1.Pos, OpAMD64ORQ, v.Type)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x1.Pos, OpAMD64SHLQconst, v.Type)
+ v1.AuxInt = int8ToAuxInt(j1)
+ v2 := b.NewValue0(x1.Pos, OpAMD64BSWAPL, typ.UInt32)
+ v3 := b.NewValue0(x1.Pos, OpAMD64MOVLload, typ.UInt32)
+ v3.AuxInt = int32ToAuxInt(i0)
+ v3.Aux = symToAux(s)
+ v3.AddArg2(p, mem)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(v1, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (ORQ s0:(SHLQconst [j0] r0:(ROLWconst [8] x0:(MOVWload [i] {s} p0 mem))) or:(ORQ s1:(SHLQconst [j1] r1:(ROLWconst [8] x1:(MOVWload [i] {s} p1 mem))) y))
+ // cond: j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && sequentialAddresses(p0, p1, 2) && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, r0, r1, s0, s1, or)
+ // result: @mergePoint(b,x0,x1,y) (ORQ <v.Type> (SHLQconst <v.Type> [j1] (BSWAPL <typ.UInt32> (MOVLload [i] {s} p0 mem))) y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ s0 := v_0
+ if s0.Op != OpAMD64SHLQconst {
+ continue
+ }
+ j0 := auxIntToInt8(s0.AuxInt)
+ r0 := s0.Args[0]
+ if r0.Op != OpAMD64ROLWconst || auxIntToInt8(r0.AuxInt) != 8 {
+ continue
+ }
+ x0 := r0.Args[0]
+ if x0.Op != OpAMD64MOVWload {
+ continue
+ }
+ i := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p0 := x0.Args[0]
+ or := v_1
+ if or.Op != OpAMD64ORQ {
+ continue
+ }
+ _ = or.Args[1]
+ or_0 := or.Args[0]
+ or_1 := or.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, or_0, or_1 = _i1+1, or_1, or_0 {
+ s1 := or_0
+ if s1.Op != OpAMD64SHLQconst {
+ continue
+ }
+ j1 := auxIntToInt8(s1.AuxInt)
+ r1 := s1.Args[0]
+ if r1.Op != OpAMD64ROLWconst || auxIntToInt8(r1.AuxInt) != 8 {
+ continue
+ }
+ x1 := r1.Args[0]
+ if x1.Op != OpAMD64MOVWload || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s {
+ continue
+ }
+ _ = x1.Args[1]
+ p1 := x1.Args[0]
+ if mem != x1.Args[1] {
+ continue
+ }
+ y := or_1
+ if !(j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && sequentialAddresses(p0, p1, 2) && mergePoint(b, x0, x1, y) != nil && clobber(x0, x1, r0, r1, s0, s1, or)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1, y)
+ v0 := b.NewValue0(x1.Pos, OpAMD64ORQ, v.Type)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x1.Pos, OpAMD64SHLQconst, v.Type)
+ v1.AuxInt = int8ToAuxInt(j1)
+ v2 := b.NewValue0(x1.Pos, OpAMD64BSWAPL, typ.UInt32)
+ v3 := b.NewValue0(x1.Pos, OpAMD64MOVLload, typ.UInt32)
+ v3.AuxInt = int32ToAuxInt(i)
+ v3.Aux = symToAux(s)
+ v3.AddArg2(p0, mem)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(v1, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (ORQ x l:(MOVQload [off] {sym} ptr mem))
+ // cond: canMergeLoadClobber(v, l, x) && clobber(l)
+ // result: (ORQload x [off] {sym} ptr mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ l := v_1
+ if l.Op != OpAMD64MOVQload {
+ continue
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
+ continue
+ }
+ v.reset(OpAMD64ORQload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ // match: (ORQ x0:(MOVBELload [i0] {s} p mem) sh:(SHLQconst [32] x1:(MOVBELload [i1] {s} p mem)))
+ // cond: i0 == i1+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh)
+ // result: @mergePoint(b,x0,x1) (MOVBEQload [i1] {s} p mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ if x0.Op != OpAMD64MOVBELload {
+ continue
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p := x0.Args[0]
+ sh := v_1
+ if sh.Op != OpAMD64SHLQconst || auxIntToInt8(sh.AuxInt) != 32 {
+ continue
+ }
+ x1 := sh.Args[0]
+ if x1.Op != OpAMD64MOVBELload {
+ continue
+ }
+ i1 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
+ continue
+ }
+ _ = x1.Args[1]
+ if p != x1.Args[0] || mem != x1.Args[1] || !(i0 == i1+4 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(x1.Pos, OpAMD64MOVBEQload, typ.UInt64)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(i1)
+ v0.Aux = symToAux(s)
+ v0.AddArg2(p, mem)
+ return true
+ }
+ break
+ }
+ // match: (ORQ x0:(MOVBELload [i] {s} p0 mem) sh:(SHLQconst [32] x1:(MOVBELload [i] {s} p1 mem)))
+ // cond: x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p1, p0, 4) && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh)
+ // result: @mergePoint(b,x0,x1) (MOVBEQload [i] {s} p1 mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ if x0.Op != OpAMD64MOVBELload {
+ continue
+ }
+ i := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p0 := x0.Args[0]
+ sh := v_1
+ if sh.Op != OpAMD64SHLQconst || auxIntToInt8(sh.AuxInt) != 32 {
+ continue
+ }
+ x1 := sh.Args[0]
+ if x1.Op != OpAMD64MOVBELload || auxIntToInt32(x1.AuxInt) != i || auxToSym(x1.Aux) != s {
+ continue
+ }
+ _ = x1.Args[1]
+ p1 := x1.Args[0]
+ if mem != x1.Args[1] || !(x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && sequentialAddresses(p1, p0, 4) && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(x1.Pos, OpAMD64MOVBEQload, typ.UInt64)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(i)
+ v0.Aux = symToAux(s)
+ v0.AddArg2(p1, mem)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ORQconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ORQconst [c] x)
+ // cond: isUint64PowerOfTwo(int64(c)) && uint64(c) >= 128
+ // result: (BTSQconst [int8(log32(c))] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(isUint64PowerOfTwo(int64(c)) && uint64(c) >= 128) {
+ break
+ }
+ v.reset(OpAMD64BTSQconst)
+ v.AuxInt = int8ToAuxInt(int8(log32(c)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ORQconst [c] (ORQconst [d] x))
+ // result: (ORQconst [c | d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64ORQconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpAMD64ORQconst)
+ v.AuxInt = int32ToAuxInt(c | d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ORQconst [c] (BTSQconst [d] x))
+ // cond: is32Bit(int64(c) | 1<<uint32(d))
+ // result: (ORQconst [c | 1<<uint32(d)] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64BTSQconst {
+ break
+ }
+ d := auxIntToInt8(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(is32Bit(int64(c) | 1<<uint32(d))) {
+ break
+ }
+ v.reset(OpAMD64ORQconst)
+ v.AuxInt = int32ToAuxInt(c | 1<<uint32(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ORQconst [0] x)
+ // result: x
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (ORQconst [-1] _)
+ // result: (MOVQconst [-1])
+ for {
+ if auxIntToInt32(v.AuxInt) != -1 {
+ break
+ }
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = int64ToAuxInt(-1)
+ return true
+ }
+ // match: (ORQconst [c] (MOVQconst [d]))
+ // result: (MOVQconst [int64(c)|d])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = int64ToAuxInt(int64(c) | d)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ORQconstmodify(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ORQconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem)
+ // cond: ValAndOff(valoff1).canAdd32(off2)
+ // result: (ORQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(ValAndOff(valoff1).canAdd32(off2)) {
+ break
+ }
+ v.reset(OpAMD64ORQconstmodify)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (ORQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
+ // result: (ORQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64ORQconstmodify)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ORQload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (ORQload [off1] {sym} val (ADDQconst [off2] base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (ORQload [off1+off2] {sym} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64ORQload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (ORQload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (ORQload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64ORQload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: ( ORQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _))
+ // result: ( ORQ x (MOVQf2i y))
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ ptr := v_1
+ if v_2.Op != OpAMD64MOVSDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
+ break
+ }
+ y := v_2.Args[1]
+ if ptr != v_2.Args[0] {
+ break
+ }
+ v.reset(OpAMD64ORQ)
+ v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ORQmodify(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ORQmodify [off1] {sym} (ADDQconst [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (ORQmodify [off1+off2] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64ORQmodify)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (ORQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (ORQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64ORQmodify)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ROLB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ROLB x (NEGQ y))
+ // result: (RORB x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGQ {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64RORB)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (ROLB x (NEGL y))
+ // result: (RORB x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGL {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64RORB)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (ROLB x (MOVQconst [c]))
+ // result: (ROLBconst [int8(c&7) ] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpAMD64ROLBconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 7))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ROLB x (MOVLconst [c]))
+ // result: (ROLBconst [int8(c&7) ] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpAMD64ROLBconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 7))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ROLBconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ROLBconst [c] (ROLBconst [d] x))
+ // result: (ROLBconst [(c+d)& 7] x)
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64ROLBconst {
+ break
+ }
+ d := auxIntToInt8(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpAMD64ROLBconst)
+ v.AuxInt = int8ToAuxInt((c + d) & 7)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ROLBconst x [0])
+ // result: x
+ for {
+ if auxIntToInt8(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ROLL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ROLL x (NEGQ y))
+ // result: (RORL x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGQ {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64RORL)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (ROLL x (NEGL y))
+ // result: (RORL x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGL {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64RORL)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (ROLL x (MOVQconst [c]))
+ // result: (ROLLconst [int8(c&31)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpAMD64ROLLconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 31))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ROLL x (MOVLconst [c]))
+ // result: (ROLLconst [int8(c&31)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpAMD64ROLLconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 31))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ROLLconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ROLLconst [c] (ROLLconst [d] x))
+ // result: (ROLLconst [(c+d)&31] x)
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64ROLLconst {
+ break
+ }
+ d := auxIntToInt8(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpAMD64ROLLconst)
+ v.AuxInt = int8ToAuxInt((c + d) & 31)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ROLLconst x [0])
+ // result: x
+ for {
+ if auxIntToInt8(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ROLQ(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ROLQ x (NEGQ y))
+ // result: (RORQ x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGQ {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64RORQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (ROLQ x (NEGL y))
+ // result: (RORQ x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGL {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64RORQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (ROLQ x (MOVQconst [c]))
+ // result: (ROLQconst [int8(c&63)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpAMD64ROLQconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 63))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ROLQ x (MOVLconst [c]))
+ // result: (ROLQconst [int8(c&63)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpAMD64ROLQconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 63))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ROLQconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ROLQconst [c] (ROLQconst [d] x))
+ // result: (ROLQconst [(c+d)&63] x)
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64ROLQconst {
+ break
+ }
+ d := auxIntToInt8(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpAMD64ROLQconst)
+ v.AuxInt = int8ToAuxInt((c + d) & 63)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ROLQconst x [0])
+ // result: x
+ for {
+ if auxIntToInt8(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ROLW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ROLW x (NEGQ y))
+ // result: (RORW x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGQ {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64RORW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (ROLW x (NEGL y))
+ // result: (RORW x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGL {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64RORW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (ROLW x (MOVQconst [c]))
+ // result: (ROLWconst [int8(c&15)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpAMD64ROLWconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 15))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ROLW x (MOVLconst [c]))
+ // result: (ROLWconst [int8(c&15)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpAMD64ROLWconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 15))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64ROLWconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ROLWconst [c] (ROLWconst [d] x))
+ // result: (ROLWconst [(c+d)&15] x)
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64ROLWconst {
+ break
+ }
+ d := auxIntToInt8(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpAMD64ROLWconst)
+ v.AuxInt = int8ToAuxInt((c + d) & 15)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ROLWconst x [0])
+ // result: x
+ for {
+ if auxIntToInt8(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64RORB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (RORB x (NEGQ y))
+ // result: (ROLB x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGQ {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64ROLB)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (RORB x (NEGL y))
+ // result: (ROLB x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGL {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64ROLB)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (RORB x (MOVQconst [c]))
+ // result: (ROLBconst [int8((-c)&7) ] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpAMD64ROLBconst)
+ v.AuxInt = int8ToAuxInt(int8((-c) & 7))
+ v.AddArg(x)
+ return true
+ }
+ // match: (RORB x (MOVLconst [c]))
+ // result: (ROLBconst [int8((-c)&7) ] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpAMD64ROLBconst)
+ v.AuxInt = int8ToAuxInt(int8((-c) & 7))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64RORL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (RORL x (NEGQ y))
+ // result: (ROLL x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGQ {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64ROLL)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (RORL x (NEGL y))
+ // result: (ROLL x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGL {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64ROLL)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (RORL x (MOVQconst [c]))
+ // result: (ROLLconst [int8((-c)&31)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpAMD64ROLLconst)
+ v.AuxInt = int8ToAuxInt(int8((-c) & 31))
+ v.AddArg(x)
+ return true
+ }
+ // match: (RORL x (MOVLconst [c]))
+ // result: (ROLLconst [int8((-c)&31)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpAMD64ROLLconst)
+ v.AuxInt = int8ToAuxInt(int8((-c) & 31))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64RORQ(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (RORQ x (NEGQ y))
+ // result: (ROLQ x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGQ {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64ROLQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (RORQ x (NEGL y))
+ // result: (ROLQ x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGL {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64ROLQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (RORQ x (MOVQconst [c]))
+ // result: (ROLQconst [int8((-c)&63)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpAMD64ROLQconst)
+ v.AuxInt = int8ToAuxInt(int8((-c) & 63))
+ v.AddArg(x)
+ return true
+ }
+ // match: (RORQ x (MOVLconst [c]))
+ // result: (ROLQconst [int8((-c)&63)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpAMD64ROLQconst)
+ v.AuxInt = int8ToAuxInt(int8((-c) & 63))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64RORW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (RORW x (NEGQ y))
+ // result: (ROLW x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGQ {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64ROLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (RORW x (NEGL y))
+ // result: (ROLW x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGL {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpAMD64ROLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (RORW x (MOVQconst [c]))
+ // result: (ROLWconst [int8((-c)&15)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpAMD64ROLWconst)
+ v.AuxInt = int8ToAuxInt(int8((-c) & 15))
+ v.AddArg(x)
+ return true
+ }
+ // match: (RORW x (MOVLconst [c]))
+ // result: (ROLWconst [int8((-c)&15)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpAMD64ROLWconst)
+ v.AuxInt = int8ToAuxInt(int8((-c) & 15))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SARB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SARB x (MOVQconst [c]))
+ // result: (SARBconst [int8(min(int64(c)&31,7))] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpAMD64SARBconst)
+ v.AuxInt = int8ToAuxInt(int8(min(int64(c)&31, 7)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SARB x (MOVLconst [c]))
+ // result: (SARBconst [int8(min(int64(c)&31,7))] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpAMD64SARBconst)
+ v.AuxInt = int8ToAuxInt(int8(min(int64(c)&31, 7)))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SARBconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SARBconst x [0])
+ // result: x
+ for {
+ if auxIntToInt8(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (SARBconst [c] (MOVQconst [d]))
+ // result: (MOVQconst [int64(int8(d))>>uint64(c)])
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = int64ToAuxInt(int64(int8(d)) >> uint64(c))
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SARL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SARL x (MOVQconst [c]))
+ // result: (SARLconst [int8(c&31)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpAMD64SARLconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 31))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SARL x (MOVLconst [c]))
+ // result: (SARLconst [int8(c&31)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpAMD64SARLconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 31))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SARL x (ADDQconst [c] y))
+ // cond: c & 31 == 0
+ // result: (SARL x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&31 == 0) {
+ break
+ }
+ v.reset(OpAMD64SARL)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SARL x (NEGQ <t> (ADDQconst [c] y)))
+ // cond: c & 31 == 0
+ // result: (SARL x (NEGQ <t> y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGQ {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&31 == 0) {
+ break
+ }
+ v.reset(OpAMD64SARL)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SARL x (ANDQconst [c] y))
+ // cond: c & 31 == 31
+ // result: (SARL x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ANDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&31 == 31) {
+ break
+ }
+ v.reset(OpAMD64SARL)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SARL x (NEGQ <t> (ANDQconst [c] y)))
+ // cond: c & 31 == 31
+ // result: (SARL x (NEGQ <t> y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGQ {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ANDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&31 == 31) {
+ break
+ }
+ v.reset(OpAMD64SARL)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SARL x (ADDLconst [c] y))
+ // cond: c & 31 == 0
+ // result: (SARL x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ADDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&31 == 0) {
+ break
+ }
+ v.reset(OpAMD64SARL)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SARL x (NEGL <t> (ADDLconst [c] y)))
+ // cond: c & 31 == 0
+ // result: (SARL x (NEGL <t> y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGL {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ADDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&31 == 0) {
+ break
+ }
+ v.reset(OpAMD64SARL)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SARL x (ANDLconst [c] y))
+ // cond: c & 31 == 31
+ // result: (SARL x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ANDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&31 == 31) {
+ break
+ }
+ v.reset(OpAMD64SARL)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SARL x (NEGL <t> (ANDLconst [c] y)))
+ // cond: c & 31 == 31
+ // result: (SARL x (NEGL <t> y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGL {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ANDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&31 == 31) {
+ break
+ }
+ v.reset(OpAMD64SARL)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SARLconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SARLconst x [0])
+ // result: x
+ for {
+ if auxIntToInt8(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (SARLconst [c] (MOVQconst [d]))
+ // result: (MOVQconst [int64(int32(d))>>uint64(c)])
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = int64ToAuxInt(int64(int32(d)) >> uint64(c))
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SARQ(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SARQ x (MOVQconst [c]))
+ // result: (SARQconst [int8(c&63)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpAMD64SARQconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 63))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SARQ x (MOVLconst [c]))
+ // result: (SARQconst [int8(c&63)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpAMD64SARQconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 63))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SARQ x (ADDQconst [c] y))
+ // cond: c & 63 == 0
+ // result: (SARQ x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&63 == 0) {
+ break
+ }
+ v.reset(OpAMD64SARQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SARQ x (NEGQ <t> (ADDQconst [c] y)))
+ // cond: c & 63 == 0
+ // result: (SARQ x (NEGQ <t> y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGQ {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&63 == 0) {
+ break
+ }
+ v.reset(OpAMD64SARQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SARQ x (ANDQconst [c] y))
+ // cond: c & 63 == 63
+ // result: (SARQ x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ANDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&63 == 63) {
+ break
+ }
+ v.reset(OpAMD64SARQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SARQ x (NEGQ <t> (ANDQconst [c] y)))
+ // cond: c & 63 == 63
+ // result: (SARQ x (NEGQ <t> y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGQ {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ANDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&63 == 63) {
+ break
+ }
+ v.reset(OpAMD64SARQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SARQ x (ADDLconst [c] y))
+ // cond: c & 63 == 0
+ // result: (SARQ x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ADDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&63 == 0) {
+ break
+ }
+ v.reset(OpAMD64SARQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SARQ x (NEGL <t> (ADDLconst [c] y)))
+ // cond: c & 63 == 0
+ // result: (SARQ x (NEGL <t> y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGL {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ADDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&63 == 0) {
+ break
+ }
+ v.reset(OpAMD64SARQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SARQ x (ANDLconst [c] y))
+ // cond: c & 63 == 63
+ // result: (SARQ x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ANDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&63 == 63) {
+ break
+ }
+ v.reset(OpAMD64SARQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SARQ x (NEGL <t> (ANDLconst [c] y)))
+ // cond: c & 63 == 63
+ // result: (SARQ x (NEGL <t> y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGL {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ANDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&63 == 63) {
+ break
+ }
+ v.reset(OpAMD64SARQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SARQconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SARQconst x [0])
+ // result: x
+ for {
+ if auxIntToInt8(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (SARQconst [c] (MOVQconst [d]))
+ // result: (MOVQconst [d>>uint64(c)])
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = int64ToAuxInt(d >> uint64(c))
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SARW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SARW x (MOVQconst [c]))
+ // result: (SARWconst [int8(min(int64(c)&31,15))] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpAMD64SARWconst)
+ v.AuxInt = int8ToAuxInt(int8(min(int64(c)&31, 15)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SARW x (MOVLconst [c]))
+ // result: (SARWconst [int8(min(int64(c)&31,15))] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpAMD64SARWconst)
+ v.AuxInt = int8ToAuxInt(int8(min(int64(c)&31, 15)))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SARWconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SARWconst x [0])
+ // result: x
+ for {
+ if auxIntToInt8(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (SARWconst [c] (MOVQconst [d]))
+ // result: (MOVQconst [int64(int16(d))>>uint64(c)])
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = int64ToAuxInt(int64(int16(d)) >> uint64(c))
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SBBLcarrymask(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SBBLcarrymask (FlagEQ))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SBBLcarrymask (FlagLT_ULT))
+ // result: (MOVLconst [-1])
+ for {
+ if v_0.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(-1)
+ return true
+ }
+ // match: (SBBLcarrymask (FlagLT_UGT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SBBLcarrymask (FlagGT_ULT))
+ // result: (MOVLconst [-1])
+ for {
+ if v_0.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(-1)
+ return true
+ }
+ // match: (SBBLcarrymask (FlagGT_UGT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SBBQ(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SBBQ x (MOVQconst [c]) borrow)
+ // cond: is32Bit(c)
+ // result: (SBBQconst x [int32(c)] borrow)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ borrow := v_2
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpAMD64SBBQconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(x, borrow)
+ return true
+ }
+ // match: (SBBQ x y (FlagEQ))
+ // result: (SUBQborrow x y)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.reset(OpAMD64SUBQborrow)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SBBQcarrymask(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SBBQcarrymask (FlagEQ))
+ // result: (MOVQconst [0])
+ for {
+ if v_0.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (SBBQcarrymask (FlagLT_ULT))
+ // result: (MOVQconst [-1])
+ for {
+ if v_0.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = int64ToAuxInt(-1)
+ return true
+ }
+ // match: (SBBQcarrymask (FlagLT_UGT))
+ // result: (MOVQconst [0])
+ for {
+ if v_0.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (SBBQcarrymask (FlagGT_ULT))
+ // result: (MOVQconst [-1])
+ for {
+ if v_0.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = int64ToAuxInt(-1)
+ return true
+ }
+ // match: (SBBQcarrymask (FlagGT_UGT))
+ // result: (MOVQconst [0])
+ for {
+ if v_0.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SBBQconst(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SBBQconst x [c] (FlagEQ))
+ // result: (SUBQconstborrow x [c])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.reset(OpAMD64SUBQconstborrow)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SETA(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SETA (InvertFlags x))
+ // result: (SETB x)
+ for {
+ if v_0.Op != OpAMD64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64SETB)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SETA (FlagEQ))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETA (FlagLT_ULT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETA (FlagLT_UGT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETA (FlagGT_ULT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETA (FlagGT_UGT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SETAE(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SETAE (TESTQ x x))
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpAMD64TESTQ {
+ break
+ }
+ x := v_0.Args[1]
+ if x != v_0.Args[0] {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ // match: (SETAE (TESTL x x))
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpAMD64TESTL {
+ break
+ }
+ x := v_0.Args[1]
+ if x != v_0.Args[0] {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ // match: (SETAE (TESTW x x))
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpAMD64TESTW {
+ break
+ }
+ x := v_0.Args[1]
+ if x != v_0.Args[0] {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ // match: (SETAE (TESTB x x))
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpAMD64TESTB {
+ break
+ }
+ x := v_0.Args[1]
+ if x != v_0.Args[0] {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ // match: (SETAE (InvertFlags x))
+ // result: (SETBE x)
+ for {
+ if v_0.Op != OpAMD64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64SETBE)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SETAE (FlagEQ))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETAE (FlagLT_ULT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETAE (FlagLT_UGT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETAE (FlagGT_ULT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETAE (FlagGT_UGT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SETAEstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SETAEstore [off] {sym} ptr (InvertFlags x) mem)
+ // result: (SETBEstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64InvertFlags {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpAMD64SETBEstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (SETAEstore [off1] {sym} (ADDQconst [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (SETAEstore [off1+off2] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64SETAEstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (SETAEstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (SETAEstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64SETAEstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (SETAEstore [off] {sym} ptr (FlagEQ) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagEQ {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(1)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETAEstore [off] {sym} ptr (FlagLT_ULT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETAEstore [off] {sym} ptr (FlagLT_UGT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(1)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETAEstore [off] {sym} ptr (FlagGT_ULT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETAEstore [off] {sym} ptr (FlagGT_UGT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(1)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SETAstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SETAstore [off] {sym} ptr (InvertFlags x) mem)
+ // result: (SETBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64InvertFlags {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpAMD64SETBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (SETAstore [off1] {sym} (ADDQconst [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (SETAstore [off1+off2] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64SETAstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (SETAstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (SETAstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64SETAstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (SETAstore [off] {sym} ptr (FlagEQ) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagEQ {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETAstore [off] {sym} ptr (FlagLT_ULT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETAstore [off] {sym} ptr (FlagLT_UGT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(1)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETAstore [off] {sym} ptr (FlagGT_ULT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETAstore [off] {sym} ptr (FlagGT_UGT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(1)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SETB(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SETB (TESTQ x x))
+ // result: (ConstBool [false])
+ for {
+ if v_0.Op != OpAMD64TESTQ {
+ break
+ }
+ x := v_0.Args[1]
+ if x != v_0.Args[0] {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(false)
+ return true
+ }
+ // match: (SETB (TESTL x x))
+ // result: (ConstBool [false])
+ for {
+ if v_0.Op != OpAMD64TESTL {
+ break
+ }
+ x := v_0.Args[1]
+ if x != v_0.Args[0] {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(false)
+ return true
+ }
+ // match: (SETB (TESTW x x))
+ // result: (ConstBool [false])
+ for {
+ if v_0.Op != OpAMD64TESTW {
+ break
+ }
+ x := v_0.Args[1]
+ if x != v_0.Args[0] {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(false)
+ return true
+ }
+ // match: (SETB (TESTB x x))
+ // result: (ConstBool [false])
+ for {
+ if v_0.Op != OpAMD64TESTB {
+ break
+ }
+ x := v_0.Args[1]
+ if x != v_0.Args[0] {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(false)
+ return true
+ }
+ // match: (SETB (BTLconst [0] x))
+ // result: (ANDLconst [1] x)
+ for {
+ if v_0.Op != OpAMD64BTLconst || auxIntToInt8(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64ANDLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SETB (BTQconst [0] x))
+ // result: (ANDQconst [1] x)
+ for {
+ if v_0.Op != OpAMD64BTQconst || auxIntToInt8(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64ANDQconst)
+ v.AuxInt = int32ToAuxInt(1)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SETB (InvertFlags x))
+ // result: (SETA x)
+ for {
+ if v_0.Op != OpAMD64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64SETA)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SETB (FlagEQ))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETB (FlagLT_ULT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETB (FlagLT_UGT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETB (FlagGT_ULT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETB (FlagGT_UGT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SETBE(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SETBE (InvertFlags x))
+ // result: (SETAE x)
+ for {
+ if v_0.Op != OpAMD64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64SETAE)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SETBE (FlagEQ))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETBE (FlagLT_ULT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETBE (FlagLT_UGT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETBE (FlagGT_ULT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETBE (FlagGT_UGT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SETBEstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SETBEstore [off] {sym} ptr (InvertFlags x) mem)
+ // result: (SETAEstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64InvertFlags {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpAMD64SETAEstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (SETBEstore [off1] {sym} (ADDQconst [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (SETBEstore [off1+off2] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64SETBEstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (SETBEstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (SETBEstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64SETBEstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (SETBEstore [off] {sym} ptr (FlagEQ) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagEQ {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(1)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETBEstore [off] {sym} ptr (FlagLT_ULT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(1)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETBEstore [off] {sym} ptr (FlagLT_UGT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETBEstore [off] {sym} ptr (FlagGT_ULT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(1)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETBEstore [off] {sym} ptr (FlagGT_UGT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SETBstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SETBstore [off] {sym} ptr (InvertFlags x) mem)
+ // result: (SETAstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64InvertFlags {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpAMD64SETAstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (SETBstore [off1] {sym} (ADDQconst [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (SETBstore [off1+off2] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64SETBstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (SETBstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (SETBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64SETBstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (SETBstore [off] {sym} ptr (FlagEQ) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagEQ {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETBstore [off] {sym} ptr (FlagLT_ULT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(1)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETBstore [off] {sym} ptr (FlagLT_UGT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETBstore [off] {sym} ptr (FlagGT_ULT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(1)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETBstore [off] {sym} ptr (FlagGT_UGT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SETEQ(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SETEQ (TESTL (SHLL (MOVLconst [1]) x) y))
+ // result: (SETAE (BTL x y))
+ for {
+ if v_0.Op != OpAMD64TESTL {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpAMD64SHLL {
+ continue
+ }
+ x := v_0_0.Args[1]
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0_0.AuxInt) != 1 {
+ continue
+ }
+ y := v_0_1
+ v.reset(OpAMD64SETAE)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (SETEQ (TESTQ (SHLQ (MOVQconst [1]) x) y))
+ // result: (SETAE (BTQ x y))
+ for {
+ if v_0.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpAMD64SHLQ {
+ continue
+ }
+ x := v_0_0.Args[1]
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
+ continue
+ }
+ y := v_0_1
+ v.reset(OpAMD64SETAE)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (SETEQ (TESTLconst [c] x))
+ // cond: isUint32PowerOfTwo(int64(c))
+ // result: (SETAE (BTLconst [int8(log32(c))] x))
+ for {
+ if v_0.Op != OpAMD64TESTLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(isUint32PowerOfTwo(int64(c))) {
+ break
+ }
+ v.reset(OpAMD64SETAE)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(int8(log32(c)))
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SETEQ (TESTQconst [c] x))
+ // cond: isUint64PowerOfTwo(int64(c))
+ // result: (SETAE (BTQconst [int8(log32(c))] x))
+ for {
+ if v_0.Op != OpAMD64TESTQconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(isUint64PowerOfTwo(int64(c))) {
+ break
+ }
+ v.reset(OpAMD64SETAE)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(int8(log32(c)))
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SETEQ (TESTQ (MOVQconst [c]) x))
+ // cond: isUint64PowerOfTwo(c)
+ // result: (SETAE (BTQconst [int8(log64(c))] x))
+ for {
+ if v_0.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpAMD64MOVQconst {
+ continue
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ x := v_0_1
+ if !(isUint64PowerOfTwo(c)) {
+ continue
+ }
+ v.reset(OpAMD64SETAE)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(int8(log64(c)))
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (SETEQ (CMPLconst [1] s:(ANDLconst [1] _)))
+ // result: (SETNE (CMPLconst [0] s))
+ for {
+ if v_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_0.AuxInt) != 1 {
+ break
+ }
+ s := v_0.Args[0]
+ if s.Op != OpAMD64ANDLconst || auxIntToInt32(s.AuxInt) != 1 {
+ break
+ }
+ v.reset(OpAMD64SETNE)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(0)
+ v0.AddArg(s)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SETEQ (CMPQconst [1] s:(ANDQconst [1] _)))
+ // result: (SETNE (CMPQconst [0] s))
+ for {
+ if v_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_0.AuxInt) != 1 {
+ break
+ }
+ s := v_0.Args[0]
+ if s.Op != OpAMD64ANDQconst || auxIntToInt32(s.AuxInt) != 1 {
+ break
+ }
+ v.reset(OpAMD64SETNE)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(0)
+ v0.AddArg(s)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SETEQ (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2))
+ // cond: z1==z2
+ // result: (SETAE (BTQconst [63] x))
+ for {
+ if v_0.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ z1 := v_0_0
+ if z1.Op != OpAMD64SHLQconst || auxIntToInt8(z1.AuxInt) != 63 {
+ continue
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
+ continue
+ }
+ x := z1_0.Args[0]
+ z2 := v_0_1
+ if !(z1 == z2) {
+ continue
+ }
+ v.reset(OpAMD64SETAE)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(63)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (SETEQ (TESTL z1:(SHLLconst [31] (SHRQconst [31] x)) z2))
+ // cond: z1==z2
+ // result: (SETAE (BTQconst [31] x))
+ for {
+ if v_0.Op != OpAMD64TESTL {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ z1 := v_0_0
+ if z1.Op != OpAMD64SHLLconst || auxIntToInt8(z1.AuxInt) != 31 {
+ continue
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 31 {
+ continue
+ }
+ x := z1_0.Args[0]
+ z2 := v_0_1
+ if !(z1 == z2) {
+ continue
+ }
+ v.reset(OpAMD64SETAE)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(31)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (SETEQ (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2))
+ // cond: z1==z2
+ // result: (SETAE (BTQconst [0] x))
+ for {
+ if v_0.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ z1 := v_0_0
+ if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
+ continue
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHLQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
+ continue
+ }
+ x := z1_0.Args[0]
+ z2 := v_0_1
+ if !(z1 == z2) {
+ continue
+ }
+ v.reset(OpAMD64SETAE)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(0)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (SETEQ (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2))
+ // cond: z1==z2
+ // result: (SETAE (BTLconst [0] x))
+ for {
+ if v_0.Op != OpAMD64TESTL {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ z1 := v_0_0
+ if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
+ continue
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHLLconst || auxIntToInt8(z1_0.AuxInt) != 31 {
+ continue
+ }
+ x := z1_0.Args[0]
+ z2 := v_0_1
+ if !(z1 == z2) {
+ continue
+ }
+ v.reset(OpAMD64SETAE)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(0)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (SETEQ (TESTQ z1:(SHRQconst [63] x) z2))
+ // cond: z1==z2
+ // result: (SETAE (BTQconst [63] x))
+ for {
+ if v_0.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ z1 := v_0_0
+ if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
+ continue
+ }
+ x := z1.Args[0]
+ z2 := v_0_1
+ if !(z1 == z2) {
+ continue
+ }
+ v.reset(OpAMD64SETAE)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(63)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (SETEQ (TESTL z1:(SHRLconst [31] x) z2))
+ // cond: z1==z2
+ // result: (SETAE (BTLconst [31] x))
+ for {
+ if v_0.Op != OpAMD64TESTL {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ z1 := v_0_0
+ if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
+ continue
+ }
+ x := z1.Args[0]
+ z2 := v_0_1
+ if !(z1 == z2) {
+ continue
+ }
+ v.reset(OpAMD64SETAE)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(31)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (SETEQ (InvertFlags x))
+ // result: (SETEQ x)
+ for {
+ if v_0.Op != OpAMD64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64SETEQ)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SETEQ (FlagEQ))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETEQ (FlagLT_ULT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETEQ (FlagLT_UGT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETEQ (FlagGT_ULT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETEQ (FlagGT_UGT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SETEQstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SETEQstore [off] {sym} ptr (TESTL (SHLL (MOVLconst [1]) x) y) mem)
+ // result: (SETAEstore [off] {sym} ptr (BTL x y) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64TESTL {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpAMD64SHLL {
+ continue
+ }
+ x := v_1_0.Args[1]
+ v_1_0_0 := v_1_0.Args[0]
+ if v_1_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_1_0_0.AuxInt) != 1 {
+ continue
+ }
+ y := v_1_1
+ mem := v_2
+ v.reset(OpAMD64SETAEstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ break
+ }
+ // match: (SETEQstore [off] {sym} ptr (TESTQ (SHLQ (MOVQconst [1]) x) y) mem)
+ // result: (SETAEstore [off] {sym} ptr (BTQ x y) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpAMD64SHLQ {
+ continue
+ }
+ x := v_1_0.Args[1]
+ v_1_0_0 := v_1_0.Args[0]
+ if v_1_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_1_0_0.AuxInt) != 1 {
+ continue
+ }
+ y := v_1_1
+ mem := v_2
+ v.reset(OpAMD64SETAEstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ break
+ }
+ // match: (SETEQstore [off] {sym} ptr (TESTLconst [c] x) mem)
+ // cond: isUint32PowerOfTwo(int64(c))
+ // result: (SETAEstore [off] {sym} ptr (BTLconst [int8(log32(c))] x) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64TESTLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ x := v_1.Args[0]
+ mem := v_2
+ if !(isUint32PowerOfTwo(int64(c))) {
+ break
+ }
+ v.reset(OpAMD64SETAEstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(int8(log32(c)))
+ v0.AddArg(x)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETEQstore [off] {sym} ptr (TESTQconst [c] x) mem)
+ // cond: isUint64PowerOfTwo(int64(c))
+ // result: (SETAEstore [off] {sym} ptr (BTQconst [int8(log32(c))] x) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64TESTQconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ x := v_1.Args[0]
+ mem := v_2
+ if !(isUint64PowerOfTwo(int64(c))) {
+ break
+ }
+ v.reset(OpAMD64SETAEstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(int8(log32(c)))
+ v0.AddArg(x)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETEQstore [off] {sym} ptr (TESTQ (MOVQconst [c]) x) mem)
+ // cond: isUint64PowerOfTwo(c)
+ // result: (SETAEstore [off] {sym} ptr (BTQconst [int8(log64(c))] x) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpAMD64MOVQconst {
+ continue
+ }
+ c := auxIntToInt64(v_1_0.AuxInt)
+ x := v_1_1
+ mem := v_2
+ if !(isUint64PowerOfTwo(c)) {
+ continue
+ }
+ v.reset(OpAMD64SETAEstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(int8(log64(c)))
+ v0.AddArg(x)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ break
+ }
+ // match: (SETEQstore [off] {sym} ptr (CMPLconst [1] s:(ANDLconst [1] _)) mem)
+ // result: (SETNEstore [off] {sym} ptr (CMPLconst [0] s) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64CMPLconst || auxIntToInt32(v_1.AuxInt) != 1 {
+ break
+ }
+ s := v_1.Args[0]
+ if s.Op != OpAMD64ANDLconst || auxIntToInt32(s.AuxInt) != 1 {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64SETNEstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(0)
+ v0.AddArg(s)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETEQstore [off] {sym} ptr (CMPQconst [1] s:(ANDQconst [1] _)) mem)
+ // result: (SETNEstore [off] {sym} ptr (CMPQconst [0] s) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64CMPQconst || auxIntToInt32(v_1.AuxInt) != 1 {
+ break
+ }
+ s := v_1.Args[0]
+ if s.Op != OpAMD64ANDQconst || auxIntToInt32(s.AuxInt) != 1 {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64SETNEstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(0)
+ v0.AddArg(s)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETEQstore [off] {sym} ptr (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2) mem)
+ // cond: z1==z2
+ // result: (SETAEstore [off] {sym} ptr (BTQconst [63] x) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ z1 := v_1_0
+ if z1.Op != OpAMD64SHLQconst || auxIntToInt8(z1.AuxInt) != 63 {
+ continue
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
+ continue
+ }
+ x := z1_0.Args[0]
+ z2 := v_1_1
+ mem := v_2
+ if !(z1 == z2) {
+ continue
+ }
+ v.reset(OpAMD64SETAEstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(63)
+ v0.AddArg(x)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ break
+ }
+ // match: (SETEQstore [off] {sym} ptr (TESTL z1:(SHLLconst [31] (SHRLconst [31] x)) z2) mem)
+ // cond: z1==z2
+ // result: (SETAEstore [off] {sym} ptr (BTLconst [31] x) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64TESTL {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ z1 := v_1_0
+ if z1.Op != OpAMD64SHLLconst || auxIntToInt8(z1.AuxInt) != 31 {
+ continue
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHRLconst || auxIntToInt8(z1_0.AuxInt) != 31 {
+ continue
+ }
+ x := z1_0.Args[0]
+ z2 := v_1_1
+ mem := v_2
+ if !(z1 == z2) {
+ continue
+ }
+ v.reset(OpAMD64SETAEstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(31)
+ v0.AddArg(x)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ break
+ }
+ // match: (SETEQstore [off] {sym} ptr (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2) mem)
+ // cond: z1==z2
+ // result: (SETAEstore [off] {sym} ptr (BTQconst [0] x) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ z1 := v_1_0
+ if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
+ continue
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHLQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
+ continue
+ }
+ x := z1_0.Args[0]
+ z2 := v_1_1
+ mem := v_2
+ if !(z1 == z2) {
+ continue
+ }
+ v.reset(OpAMD64SETAEstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(0)
+ v0.AddArg(x)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ break
+ }
+ // match: (SETEQstore [off] {sym} ptr (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2) mem)
+ // cond: z1==z2
+ // result: (SETAEstore [off] {sym} ptr (BTLconst [0] x) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64TESTL {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ z1 := v_1_0
+ if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
+ continue
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHLLconst || auxIntToInt8(z1_0.AuxInt) != 31 {
+ continue
+ }
+ x := z1_0.Args[0]
+ z2 := v_1_1
+ mem := v_2
+ if !(z1 == z2) {
+ continue
+ }
+ v.reset(OpAMD64SETAEstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(0)
+ v0.AddArg(x)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ break
+ }
+ // match: (SETEQstore [off] {sym} ptr (TESTQ z1:(SHRQconst [63] x) z2) mem)
+ // cond: z1==z2
+ // result: (SETAEstore [off] {sym} ptr (BTQconst [63] x) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ z1 := v_1_0
+ if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
+ continue
+ }
+ x := z1.Args[0]
+ z2 := v_1_1
+ mem := v_2
+ if !(z1 == z2) {
+ continue
+ }
+ v.reset(OpAMD64SETAEstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(63)
+ v0.AddArg(x)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ break
+ }
+ // match: (SETEQstore [off] {sym} ptr (TESTL z1:(SHRLconst [31] x) z2) mem)
+ // cond: z1==z2
+ // result: (SETAEstore [off] {sym} ptr (BTLconst [31] x) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64TESTL {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ z1 := v_1_0
+ if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
+ continue
+ }
+ x := z1.Args[0]
+ z2 := v_1_1
+ mem := v_2
+ if !(z1 == z2) {
+ continue
+ }
+ v.reset(OpAMD64SETAEstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(31)
+ v0.AddArg(x)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ break
+ }
+ // match: (SETEQstore [off] {sym} ptr (InvertFlags x) mem)
+ // result: (SETEQstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64InvertFlags {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpAMD64SETEQstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (SETEQstore [off1] {sym} (ADDQconst [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (SETEQstore [off1+off2] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64SETEQstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (SETEQstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (SETEQstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64SETEQstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (SETEQstore [off] {sym} ptr (FlagEQ) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagEQ {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(1)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETEQstore [off] {sym} ptr (FlagLT_ULT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETEQstore [off] {sym} ptr (FlagLT_UGT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETEQstore [off] {sym} ptr (FlagGT_ULT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETEQstore [off] {sym} ptr (FlagGT_UGT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SETG(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SETG (InvertFlags x))
+ // result: (SETL x)
+ for {
+ if v_0.Op != OpAMD64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64SETL)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SETG (FlagEQ))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETG (FlagLT_ULT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETG (FlagLT_UGT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETG (FlagGT_ULT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETG (FlagGT_UGT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SETGE(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SETGE (InvertFlags x))
+ // result: (SETLE x)
+ for {
+ if v_0.Op != OpAMD64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64SETLE)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SETGE (FlagEQ))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETGE (FlagLT_ULT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETGE (FlagLT_UGT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETGE (FlagGT_ULT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETGE (FlagGT_UGT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SETGEstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SETGEstore [off] {sym} ptr (InvertFlags x) mem)
+ // result: (SETLEstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64InvertFlags {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpAMD64SETLEstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (SETGEstore [off1] {sym} (ADDQconst [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (SETGEstore [off1+off2] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64SETGEstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (SETGEstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (SETGEstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64SETGEstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (SETGEstore [off] {sym} ptr (FlagEQ) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagEQ {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(1)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETGEstore [off] {sym} ptr (FlagLT_ULT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETGEstore [off] {sym} ptr (FlagLT_UGT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETGEstore [off] {sym} ptr (FlagGT_ULT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(1)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETGEstore [off] {sym} ptr (FlagGT_UGT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(1)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SETGstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SETGstore [off] {sym} ptr (InvertFlags x) mem)
+ // result: (SETLstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64InvertFlags {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpAMD64SETLstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (SETGstore [off1] {sym} (ADDQconst [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (SETGstore [off1+off2] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64SETGstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (SETGstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (SETGstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64SETGstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (SETGstore [off] {sym} ptr (FlagEQ) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagEQ {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETGstore [off] {sym} ptr (FlagLT_ULT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETGstore [off] {sym} ptr (FlagLT_UGT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETGstore [off] {sym} ptr (FlagGT_ULT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(1)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETGstore [off] {sym} ptr (FlagGT_UGT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(1)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SETL(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SETL (InvertFlags x))
+ // result: (SETG x)
+ for {
+ if v_0.Op != OpAMD64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64SETG)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SETL (FlagEQ))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETL (FlagLT_ULT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETL (FlagLT_UGT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETL (FlagGT_ULT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETL (FlagGT_UGT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SETLE(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SETLE (InvertFlags x))
+ // result: (SETGE x)
+ for {
+ if v_0.Op != OpAMD64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64SETGE)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SETLE (FlagEQ))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETLE (FlagLT_ULT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETLE (FlagLT_UGT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETLE (FlagGT_ULT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETLE (FlagGT_UGT))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SETLEstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SETLEstore [off] {sym} ptr (InvertFlags x) mem)
+ // result: (SETGEstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64InvertFlags {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpAMD64SETGEstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (SETLEstore [off1] {sym} (ADDQconst [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (SETLEstore [off1+off2] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64SETLEstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (SETLEstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (SETLEstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64SETLEstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (SETLEstore [off] {sym} ptr (FlagEQ) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagEQ {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(1)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETLEstore [off] {sym} ptr (FlagLT_ULT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(1)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETLEstore [off] {sym} ptr (FlagLT_UGT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(1)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETLEstore [off] {sym} ptr (FlagGT_ULT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETLEstore [off] {sym} ptr (FlagGT_UGT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SETLstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SETLstore [off] {sym} ptr (InvertFlags x) mem)
+ // result: (SETGstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64InvertFlags {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpAMD64SETGstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (SETLstore [off1] {sym} (ADDQconst [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (SETLstore [off1+off2] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64SETLstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (SETLstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (SETLstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64SETLstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (SETLstore [off] {sym} ptr (FlagEQ) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagEQ {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETLstore [off] {sym} ptr (FlagLT_ULT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(1)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETLstore [off] {sym} ptr (FlagLT_UGT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(1)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETLstore [off] {sym} ptr (FlagGT_ULT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETLstore [off] {sym} ptr (FlagGT_UGT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SETNE(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SETNE (TESTBconst [1] x))
+ // result: (ANDLconst [1] x)
+ for {
+ if v_0.Op != OpAMD64TESTBconst || auxIntToInt8(v_0.AuxInt) != 1 {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64ANDLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SETNE (TESTWconst [1] x))
+ // result: (ANDLconst [1] x)
+ for {
+ if v_0.Op != OpAMD64TESTWconst || auxIntToInt16(v_0.AuxInt) != 1 {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64ANDLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SETNE (TESTL (SHLL (MOVLconst [1]) x) y))
+ // result: (SETB (BTL x y))
+ for {
+ if v_0.Op != OpAMD64TESTL {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpAMD64SHLL {
+ continue
+ }
+ x := v_0_0.Args[1]
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0_0.AuxInt) != 1 {
+ continue
+ }
+ y := v_0_1
+ v.reset(OpAMD64SETB)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (SETNE (TESTQ (SHLQ (MOVQconst [1]) x) y))
+ // result: (SETB (BTQ x y))
+ for {
+ if v_0.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpAMD64SHLQ {
+ continue
+ }
+ x := v_0_0.Args[1]
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
+ continue
+ }
+ y := v_0_1
+ v.reset(OpAMD64SETB)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (SETNE (TESTLconst [c] x))
+ // cond: isUint32PowerOfTwo(int64(c))
+ // result: (SETB (BTLconst [int8(log32(c))] x))
+ for {
+ if v_0.Op != OpAMD64TESTLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(isUint32PowerOfTwo(int64(c))) {
+ break
+ }
+ v.reset(OpAMD64SETB)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(int8(log32(c)))
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SETNE (TESTQconst [c] x))
+ // cond: isUint64PowerOfTwo(int64(c))
+ // result: (SETB (BTQconst [int8(log32(c))] x))
+ for {
+ if v_0.Op != OpAMD64TESTQconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(isUint64PowerOfTwo(int64(c))) {
+ break
+ }
+ v.reset(OpAMD64SETB)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(int8(log32(c)))
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SETNE (TESTQ (MOVQconst [c]) x))
+ // cond: isUint64PowerOfTwo(c)
+ // result: (SETB (BTQconst [int8(log64(c))] x))
+ for {
+ if v_0.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpAMD64MOVQconst {
+ continue
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ x := v_0_1
+ if !(isUint64PowerOfTwo(c)) {
+ continue
+ }
+ v.reset(OpAMD64SETB)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(int8(log64(c)))
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (SETNE (CMPLconst [1] s:(ANDLconst [1] _)))
+ // result: (SETEQ (CMPLconst [0] s))
+ for {
+ if v_0.Op != OpAMD64CMPLconst || auxIntToInt32(v_0.AuxInt) != 1 {
+ break
+ }
+ s := v_0.Args[0]
+ if s.Op != OpAMD64ANDLconst || auxIntToInt32(s.AuxInt) != 1 {
+ break
+ }
+ v.reset(OpAMD64SETEQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(0)
+ v0.AddArg(s)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SETNE (CMPQconst [1] s:(ANDQconst [1] _)))
+ // result: (SETEQ (CMPQconst [0] s))
+ for {
+ if v_0.Op != OpAMD64CMPQconst || auxIntToInt32(v_0.AuxInt) != 1 {
+ break
+ }
+ s := v_0.Args[0]
+ if s.Op != OpAMD64ANDQconst || auxIntToInt32(s.AuxInt) != 1 {
+ break
+ }
+ v.reset(OpAMD64SETEQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(0)
+ v0.AddArg(s)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SETNE (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2))
+ // cond: z1==z2
+ // result: (SETB (BTQconst [63] x))
+ for {
+ if v_0.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ z1 := v_0_0
+ if z1.Op != OpAMD64SHLQconst || auxIntToInt8(z1.AuxInt) != 63 {
+ continue
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
+ continue
+ }
+ x := z1_0.Args[0]
+ z2 := v_0_1
+ if !(z1 == z2) {
+ continue
+ }
+ v.reset(OpAMD64SETB)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(63)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (SETNE (TESTL z1:(SHLLconst [31] (SHRQconst [31] x)) z2))
+ // cond: z1==z2
+ // result: (SETB (BTQconst [31] x))
+ for {
+ if v_0.Op != OpAMD64TESTL {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ z1 := v_0_0
+ if z1.Op != OpAMD64SHLLconst || auxIntToInt8(z1.AuxInt) != 31 {
+ continue
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 31 {
+ continue
+ }
+ x := z1_0.Args[0]
+ z2 := v_0_1
+ if !(z1 == z2) {
+ continue
+ }
+ v.reset(OpAMD64SETB)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(31)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (SETNE (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2))
+ // cond: z1==z2
+ // result: (SETB (BTQconst [0] x))
+ for {
+ if v_0.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ z1 := v_0_0
+ if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
+ continue
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHLQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
+ continue
+ }
+ x := z1_0.Args[0]
+ z2 := v_0_1
+ if !(z1 == z2) {
+ continue
+ }
+ v.reset(OpAMD64SETB)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(0)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (SETNE (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2))
+ // cond: z1==z2
+ // result: (SETB (BTLconst [0] x))
+ for {
+ if v_0.Op != OpAMD64TESTL {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ z1 := v_0_0
+ if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
+ continue
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHLLconst || auxIntToInt8(z1_0.AuxInt) != 31 {
+ continue
+ }
+ x := z1_0.Args[0]
+ z2 := v_0_1
+ if !(z1 == z2) {
+ continue
+ }
+ v.reset(OpAMD64SETB)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(0)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (SETNE (TESTQ z1:(SHRQconst [63] x) z2))
+ // cond: z1==z2
+ // result: (SETB (BTQconst [63] x))
+ for {
+ if v_0.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ z1 := v_0_0
+ if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
+ continue
+ }
+ x := z1.Args[0]
+ z2 := v_0_1
+ if !(z1 == z2) {
+ continue
+ }
+ v.reset(OpAMD64SETB)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(63)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (SETNE (TESTL z1:(SHRLconst [31] x) z2))
+ // cond: z1==z2
+ // result: (SETB (BTLconst [31] x))
+ for {
+ if v_0.Op != OpAMD64TESTL {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ z1 := v_0_0
+ if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
+ continue
+ }
+ x := z1.Args[0]
+ z2 := v_0_1
+ if !(z1 == z2) {
+ continue
+ }
+ v.reset(OpAMD64SETB)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(31)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (SETNE (InvertFlags x))
+ // result: (SETNE x)
+ for {
+ if v_0.Op != OpAMD64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64SETNE)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SETNE (FlagEQ))
+ // result: (MOVLconst [0])
+ for {
+ if v_0.Op != OpAMD64FlagEQ {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SETNE (FlagLT_ULT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETNE (FlagLT_UGT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETNE (FlagGT_ULT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SETNE (FlagGT_UGT))
+ // result: (MOVLconst [1])
+ for {
+ if v_0.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SETNEstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SETNEstore [off] {sym} ptr (TESTL (SHLL (MOVLconst [1]) x) y) mem)
+ // result: (SETBstore [off] {sym} ptr (BTL x y) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64TESTL {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpAMD64SHLL {
+ continue
+ }
+ x := v_1_0.Args[1]
+ v_1_0_0 := v_1_0.Args[0]
+ if v_1_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_1_0_0.AuxInt) != 1 {
+ continue
+ }
+ y := v_1_1
+ mem := v_2
+ v.reset(OpAMD64SETBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTL, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ break
+ }
+ // match: (SETNEstore [off] {sym} ptr (TESTQ (SHLQ (MOVQconst [1]) x) y) mem)
+ // result: (SETBstore [off] {sym} ptr (BTQ x y) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpAMD64SHLQ {
+ continue
+ }
+ x := v_1_0.Args[1]
+ v_1_0_0 := v_1_0.Args[0]
+ if v_1_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_1_0_0.AuxInt) != 1 {
+ continue
+ }
+ y := v_1_1
+ mem := v_2
+ v.reset(OpAMD64SETBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQ, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ break
+ }
+ // match: (SETNEstore [off] {sym} ptr (TESTLconst [c] x) mem)
+ // cond: isUint32PowerOfTwo(int64(c))
+ // result: (SETBstore [off] {sym} ptr (BTLconst [int8(log32(c))] x) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64TESTLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ x := v_1.Args[0]
+ mem := v_2
+ if !(isUint32PowerOfTwo(int64(c))) {
+ break
+ }
+ v.reset(OpAMD64SETBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(int8(log32(c)))
+ v0.AddArg(x)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETNEstore [off] {sym} ptr (TESTQconst [c] x) mem)
+ // cond: isUint64PowerOfTwo(int64(c))
+ // result: (SETBstore [off] {sym} ptr (BTQconst [int8(log32(c))] x) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64TESTQconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ x := v_1.Args[0]
+ mem := v_2
+ if !(isUint64PowerOfTwo(int64(c))) {
+ break
+ }
+ v.reset(OpAMD64SETBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(int8(log32(c)))
+ v0.AddArg(x)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETNEstore [off] {sym} ptr (TESTQ (MOVQconst [c]) x) mem)
+ // cond: isUint64PowerOfTwo(c)
+ // result: (SETBstore [off] {sym} ptr (BTQconst [int8(log64(c))] x) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpAMD64MOVQconst {
+ continue
+ }
+ c := auxIntToInt64(v_1_0.AuxInt)
+ x := v_1_1
+ mem := v_2
+ if !(isUint64PowerOfTwo(c)) {
+ continue
+ }
+ v.reset(OpAMD64SETBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(int8(log64(c)))
+ v0.AddArg(x)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ break
+ }
+ // match: (SETNEstore [off] {sym} ptr (CMPLconst [1] s:(ANDLconst [1] _)) mem)
+ // result: (SETEQstore [off] {sym} ptr (CMPLconst [0] s) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64CMPLconst || auxIntToInt32(v_1.AuxInt) != 1 {
+ break
+ }
+ s := v_1.Args[0]
+ if s.Op != OpAMD64ANDLconst || auxIntToInt32(s.AuxInt) != 1 {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64SETEQstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(0)
+ v0.AddArg(s)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETNEstore [off] {sym} ptr (CMPQconst [1] s:(ANDQconst [1] _)) mem)
+ // result: (SETEQstore [off] {sym} ptr (CMPQconst [0] s) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64CMPQconst || auxIntToInt32(v_1.AuxInt) != 1 {
+ break
+ }
+ s := v_1.Args[0]
+ if s.Op != OpAMD64ANDQconst || auxIntToInt32(s.AuxInt) != 1 {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64SETEQstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(0)
+ v0.AddArg(s)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETNEstore [off] {sym} ptr (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2) mem)
+ // cond: z1==z2
+ // result: (SETBstore [off] {sym} ptr (BTQconst [63] x) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ z1 := v_1_0
+ if z1.Op != OpAMD64SHLQconst || auxIntToInt8(z1.AuxInt) != 63 {
+ continue
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
+ continue
+ }
+ x := z1_0.Args[0]
+ z2 := v_1_1
+ mem := v_2
+ if !(z1 == z2) {
+ continue
+ }
+ v.reset(OpAMD64SETBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(63)
+ v0.AddArg(x)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ break
+ }
+ // match: (SETNEstore [off] {sym} ptr (TESTL z1:(SHLLconst [31] (SHRLconst [31] x)) z2) mem)
+ // cond: z1==z2
+ // result: (SETBstore [off] {sym} ptr (BTLconst [31] x) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64TESTL {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ z1 := v_1_0
+ if z1.Op != OpAMD64SHLLconst || auxIntToInt8(z1.AuxInt) != 31 {
+ continue
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHRLconst || auxIntToInt8(z1_0.AuxInt) != 31 {
+ continue
+ }
+ x := z1_0.Args[0]
+ z2 := v_1_1
+ mem := v_2
+ if !(z1 == z2) {
+ continue
+ }
+ v.reset(OpAMD64SETBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(31)
+ v0.AddArg(x)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ break
+ }
+ // match: (SETNEstore [off] {sym} ptr (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2) mem)
+ // cond: z1==z2
+ // result: (SETBstore [off] {sym} ptr (BTQconst [0] x) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ z1 := v_1_0
+ if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
+ continue
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHLQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
+ continue
+ }
+ x := z1_0.Args[0]
+ z2 := v_1_1
+ mem := v_2
+ if !(z1 == z2) {
+ continue
+ }
+ v.reset(OpAMD64SETBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(0)
+ v0.AddArg(x)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ break
+ }
+ // match: (SETNEstore [off] {sym} ptr (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2) mem)
+ // cond: z1==z2
+ // result: (SETBstore [off] {sym} ptr (BTLconst [0] x) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64TESTL {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ z1 := v_1_0
+ if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
+ continue
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHLLconst || auxIntToInt8(z1_0.AuxInt) != 31 {
+ continue
+ }
+ x := z1_0.Args[0]
+ z2 := v_1_1
+ mem := v_2
+ if !(z1 == z2) {
+ continue
+ }
+ v.reset(OpAMD64SETBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(0)
+ v0.AddArg(x)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ break
+ }
+ // match: (SETNEstore [off] {sym} ptr (TESTQ z1:(SHRQconst [63] x) z2) mem)
+ // cond: z1==z2
+ // result: (SETBstore [off] {sym} ptr (BTQconst [63] x) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64TESTQ {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ z1 := v_1_0
+ if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
+ continue
+ }
+ x := z1.Args[0]
+ z2 := v_1_1
+ mem := v_2
+ if !(z1 == z2) {
+ continue
+ }
+ v.reset(OpAMD64SETBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(63)
+ v0.AddArg(x)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ break
+ }
+ // match: (SETNEstore [off] {sym} ptr (TESTL z1:(SHRLconst [31] x) z2) mem)
+ // cond: z1==z2
+ // result: (SETBstore [off] {sym} ptr (BTLconst [31] x) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64TESTL {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ z1 := v_1_0
+ if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
+ continue
+ }
+ x := z1.Args[0]
+ z2 := v_1_1
+ mem := v_2
+ if !(z1 == z2) {
+ continue
+ }
+ v.reset(OpAMD64SETBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTLconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(31)
+ v0.AddArg(x)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ break
+ }
+ // match: (SETNEstore [off] {sym} ptr (InvertFlags x) mem)
+ // result: (SETNEstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64InvertFlags {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpAMD64SETNEstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (SETNEstore [off1] {sym} (ADDQconst [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (SETNEstore [off1+off2] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64SETNEstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (SETNEstore [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (SETNEstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64SETNEstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (SETNEstore [off] {sym} ptr (FlagEQ) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [0]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagEQ {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETNEstore [off] {sym} ptr (FlagLT_ULT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagLT_ULT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(1)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETNEstore [off] {sym} ptr (FlagLT_UGT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagLT_UGT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(1)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETNEstore [off] {sym} ptr (FlagGT_ULT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagGT_ULT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(1)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (SETNEstore [off] {sym} ptr (FlagGT_UGT) mem)
+ // result: (MOVBstore [off] {sym} ptr (MOVLconst <typ.UInt8> [1]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpAMD64FlagGT_UGT {
+ break
+ }
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLconst, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(1)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SHLL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SHLL x (MOVQconst [c]))
+ // result: (SHLLconst [int8(c&31)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpAMD64SHLLconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 31))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SHLL x (MOVLconst [c]))
+ // result: (SHLLconst [int8(c&31)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpAMD64SHLLconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 31))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SHLL x (ADDQconst [c] y))
+ // cond: c & 31 == 0
+ // result: (SHLL x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&31 == 0) {
+ break
+ }
+ v.reset(OpAMD64SHLL)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SHLL x (NEGQ <t> (ADDQconst [c] y)))
+ // cond: c & 31 == 0
+ // result: (SHLL x (NEGQ <t> y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGQ {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&31 == 0) {
+ break
+ }
+ v.reset(OpAMD64SHLL)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SHLL x (ANDQconst [c] y))
+ // cond: c & 31 == 31
+ // result: (SHLL x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ANDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&31 == 31) {
+ break
+ }
+ v.reset(OpAMD64SHLL)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SHLL x (NEGQ <t> (ANDQconst [c] y)))
+ // cond: c & 31 == 31
+ // result: (SHLL x (NEGQ <t> y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGQ {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ANDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&31 == 31) {
+ break
+ }
+ v.reset(OpAMD64SHLL)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SHLL x (ADDLconst [c] y))
+ // cond: c & 31 == 0
+ // result: (SHLL x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ADDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&31 == 0) {
+ break
+ }
+ v.reset(OpAMD64SHLL)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SHLL x (NEGL <t> (ADDLconst [c] y)))
+ // cond: c & 31 == 0
+ // result: (SHLL x (NEGL <t> y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGL {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ADDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&31 == 0) {
+ break
+ }
+ v.reset(OpAMD64SHLL)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SHLL x (ANDLconst [c] y))
+ // cond: c & 31 == 31
+ // result: (SHLL x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ANDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&31 == 31) {
+ break
+ }
+ v.reset(OpAMD64SHLL)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SHLL x (NEGL <t> (ANDLconst [c] y)))
+ // cond: c & 31 == 31
+ // result: (SHLL x (NEGL <t> y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGL {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ANDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&31 == 31) {
+ break
+ }
+ v.reset(OpAMD64SHLL)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SHLLconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SHLLconst [1] (SHRLconst [1] x))
+ // result: (BTRLconst [0] x)
+ for {
+ if auxIntToInt8(v.AuxInt) != 1 || v_0.Op != OpAMD64SHRLconst || auxIntToInt8(v_0.AuxInt) != 1 {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64BTRLconst)
+ v.AuxInt = int8ToAuxInt(0)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SHLLconst x [0])
+ // result: x
+ for {
+ if auxIntToInt8(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (SHLLconst [d] (MOVLconst [c]))
+ // result: (MOVLconst [c << uint64(d)])
+ for {
+ d := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(c << uint64(d))
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SHLQ(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SHLQ x (MOVQconst [c]))
+ // result: (SHLQconst [int8(c&63)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpAMD64SHLQconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 63))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SHLQ x (MOVLconst [c]))
+ // result: (SHLQconst [int8(c&63)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpAMD64SHLQconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 63))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SHLQ x (ADDQconst [c] y))
+ // cond: c & 63 == 0
+ // result: (SHLQ x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&63 == 0) {
+ break
+ }
+ v.reset(OpAMD64SHLQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SHLQ x (NEGQ <t> (ADDQconst [c] y)))
+ // cond: c & 63 == 0
+ // result: (SHLQ x (NEGQ <t> y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGQ {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&63 == 0) {
+ break
+ }
+ v.reset(OpAMD64SHLQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SHLQ x (ANDQconst [c] y))
+ // cond: c & 63 == 63
+ // result: (SHLQ x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ANDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&63 == 63) {
+ break
+ }
+ v.reset(OpAMD64SHLQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SHLQ x (NEGQ <t> (ANDQconst [c] y)))
+ // cond: c & 63 == 63
+ // result: (SHLQ x (NEGQ <t> y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGQ {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ANDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&63 == 63) {
+ break
+ }
+ v.reset(OpAMD64SHLQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SHLQ x (ADDLconst [c] y))
+ // cond: c & 63 == 0
+ // result: (SHLQ x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ADDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&63 == 0) {
+ break
+ }
+ v.reset(OpAMD64SHLQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SHLQ x (NEGL <t> (ADDLconst [c] y)))
+ // cond: c & 63 == 0
+ // result: (SHLQ x (NEGL <t> y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGL {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ADDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&63 == 0) {
+ break
+ }
+ v.reset(OpAMD64SHLQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SHLQ x (ANDLconst [c] y))
+ // cond: c & 63 == 63
+ // result: (SHLQ x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ANDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&63 == 63) {
+ break
+ }
+ v.reset(OpAMD64SHLQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SHLQ x (NEGL <t> (ANDLconst [c] y)))
+ // cond: c & 63 == 63
+ // result: (SHLQ x (NEGL <t> y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGL {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ANDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&63 == 63) {
+ break
+ }
+ v.reset(OpAMD64SHLQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SHLQconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SHLQconst [1] (SHRQconst [1] x))
+ // result: (BTRQconst [0] x)
+ for {
+ if auxIntToInt8(v.AuxInt) != 1 || v_0.Op != OpAMD64SHRQconst || auxIntToInt8(v_0.AuxInt) != 1 {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64BTRQconst)
+ v.AuxInt = int8ToAuxInt(0)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SHLQconst x [0])
+ // result: x
+ for {
+ if auxIntToInt8(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (SHLQconst [d] (MOVQconst [c]))
+ // result: (MOVQconst [c << uint64(d)])
+ for {
+ d := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = int64ToAuxInt(c << uint64(d))
+ return true
+ }
+ // match: (SHLQconst [d] (MOVLconst [c]))
+ // result: (MOVQconst [int64(c) << uint64(d)])
+ for {
+ d := auxIntToInt8(v.AuxInt)
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = int64ToAuxInt(int64(c) << uint64(d))
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SHRB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SHRB x (MOVQconst [c]))
+ // cond: c&31 < 8
+ // result: (SHRBconst [int8(c&31)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(c&31 < 8) {
+ break
+ }
+ v.reset(OpAMD64SHRBconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 31))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SHRB x (MOVLconst [c]))
+ // cond: c&31 < 8
+ // result: (SHRBconst [int8(c&31)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(c&31 < 8) {
+ break
+ }
+ v.reset(OpAMD64SHRBconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 31))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SHRB _ (MOVQconst [c]))
+ // cond: c&31 >= 8
+ // result: (MOVLconst [0])
+ for {
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(c&31 >= 8) {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SHRB _ (MOVLconst [c]))
+ // cond: c&31 >= 8
+ // result: (MOVLconst [0])
+ for {
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(c&31 >= 8) {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SHRBconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SHRBconst x [0])
+ // result: x
+ for {
+ if auxIntToInt8(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SHRL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SHRL x (MOVQconst [c]))
+ // result: (SHRLconst [int8(c&31)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpAMD64SHRLconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 31))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SHRL x (MOVLconst [c]))
+ // result: (SHRLconst [int8(c&31)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpAMD64SHRLconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 31))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SHRL x (ADDQconst [c] y))
+ // cond: c & 31 == 0
+ // result: (SHRL x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&31 == 0) {
+ break
+ }
+ v.reset(OpAMD64SHRL)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SHRL x (NEGQ <t> (ADDQconst [c] y)))
+ // cond: c & 31 == 0
+ // result: (SHRL x (NEGQ <t> y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGQ {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&31 == 0) {
+ break
+ }
+ v.reset(OpAMD64SHRL)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SHRL x (ANDQconst [c] y))
+ // cond: c & 31 == 31
+ // result: (SHRL x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ANDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&31 == 31) {
+ break
+ }
+ v.reset(OpAMD64SHRL)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SHRL x (NEGQ <t> (ANDQconst [c] y)))
+ // cond: c & 31 == 31
+ // result: (SHRL x (NEGQ <t> y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGQ {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ANDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&31 == 31) {
+ break
+ }
+ v.reset(OpAMD64SHRL)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SHRL x (ADDLconst [c] y))
+ // cond: c & 31 == 0
+ // result: (SHRL x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ADDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&31 == 0) {
+ break
+ }
+ v.reset(OpAMD64SHRL)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SHRL x (NEGL <t> (ADDLconst [c] y)))
+ // cond: c & 31 == 0
+ // result: (SHRL x (NEGL <t> y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGL {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ADDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&31 == 0) {
+ break
+ }
+ v.reset(OpAMD64SHRL)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SHRL x (ANDLconst [c] y))
+ // cond: c & 31 == 31
+ // result: (SHRL x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ANDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&31 == 31) {
+ break
+ }
+ v.reset(OpAMD64SHRL)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SHRL x (NEGL <t> (ANDLconst [c] y)))
+ // cond: c & 31 == 31
+ // result: (SHRL x (NEGL <t> y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGL {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ANDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&31 == 31) {
+ break
+ }
+ v.reset(OpAMD64SHRL)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SHRLconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SHRLconst [1] (SHLLconst [1] x))
+ // result: (BTRLconst [31] x)
+ for {
+ if auxIntToInt8(v.AuxInt) != 1 || v_0.Op != OpAMD64SHLLconst || auxIntToInt8(v_0.AuxInt) != 1 {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64BTRLconst)
+ v.AuxInt = int8ToAuxInt(31)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SHRLconst x [0])
+ // result: x
+ for {
+ if auxIntToInt8(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SHRQ(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SHRQ x (MOVQconst [c]))
+ // result: (SHRQconst [int8(c&63)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpAMD64SHRQconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 63))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SHRQ x (MOVLconst [c]))
+ // result: (SHRQconst [int8(c&63)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpAMD64SHRQconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 63))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SHRQ x (ADDQconst [c] y))
+ // cond: c & 63 == 0
+ // result: (SHRQ x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&63 == 0) {
+ break
+ }
+ v.reset(OpAMD64SHRQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SHRQ x (NEGQ <t> (ADDQconst [c] y)))
+ // cond: c & 63 == 0
+ // result: (SHRQ x (NEGQ <t> y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGQ {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&63 == 0) {
+ break
+ }
+ v.reset(OpAMD64SHRQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SHRQ x (ANDQconst [c] y))
+ // cond: c & 63 == 63
+ // result: (SHRQ x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ANDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&63 == 63) {
+ break
+ }
+ v.reset(OpAMD64SHRQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SHRQ x (NEGQ <t> (ANDQconst [c] y)))
+ // cond: c & 63 == 63
+ // result: (SHRQ x (NEGQ <t> y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGQ {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ANDQconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&63 == 63) {
+ break
+ }
+ v.reset(OpAMD64SHRQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SHRQ x (ADDLconst [c] y))
+ // cond: c & 63 == 0
+ // result: (SHRQ x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ADDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&63 == 0) {
+ break
+ }
+ v.reset(OpAMD64SHRQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SHRQ x (NEGL <t> (ADDLconst [c] y)))
+ // cond: c & 63 == 0
+ // result: (SHRQ x (NEGL <t> y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGL {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ADDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&63 == 0) {
+ break
+ }
+ v.reset(OpAMD64SHRQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SHRQ x (ANDLconst [c] y))
+ // cond: c & 63 == 63
+ // result: (SHRQ x y)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64ANDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&63 == 63) {
+ break
+ }
+ v.reset(OpAMD64SHRQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SHRQ x (NEGL <t> (ANDLconst [c] y)))
+ // cond: c & 63 == 63
+ // result: (SHRQ x (NEGL <t> y))
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64NEGL {
+ break
+ }
+ t := v_1.Type
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAMD64ANDLconst {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ y := v_1_0.Args[0]
+ if !(c&63 == 63) {
+ break
+ }
+ v.reset(OpAMD64SHRQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGL, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SHRQconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SHRQconst [1] (SHLQconst [1] x))
+ // result: (BTRQconst [63] x)
+ for {
+ if auxIntToInt8(v.AuxInt) != 1 || v_0.Op != OpAMD64SHLQconst || auxIntToInt8(v_0.AuxInt) != 1 {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64BTRQconst)
+ v.AuxInt = int8ToAuxInt(63)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SHRQconst x [0])
+ // result: x
+ for {
+ if auxIntToInt8(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SHRW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SHRW x (MOVQconst [c]))
+ // cond: c&31 < 16
+ // result: (SHRWconst [int8(c&31)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(c&31 < 16) {
+ break
+ }
+ v.reset(OpAMD64SHRWconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 31))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SHRW x (MOVLconst [c]))
+ // cond: c&31 < 16
+ // result: (SHRWconst [int8(c&31)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(c&31 < 16) {
+ break
+ }
+ v.reset(OpAMD64SHRWconst)
+ v.AuxInt = int8ToAuxInt(int8(c & 31))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SHRW _ (MOVQconst [c]))
+ // cond: c&31 >= 16
+ // result: (MOVLconst [0])
+ for {
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(c&31 >= 16) {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SHRW _ (MOVLconst [c]))
+ // cond: c&31 >= 16
+ // result: (MOVLconst [0])
+ for {
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(c&31 >= 16) {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SHRWconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SHRWconst x [0])
+ // result: x
+ for {
+ if auxIntToInt8(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SUBL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SUBL x (MOVLconst [c]))
+ // result: (SUBLconst x [c])
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpAMD64SUBLconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBL (MOVLconst [c]) x)
+ // result: (NEGL (SUBLconst <v.Type> x [c]))
+ for {
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpAMD64NEGL)
+ v0 := b.NewValue0(v.Pos, OpAMD64SUBLconst, v.Type)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SUBL x x)
+ // result: (MOVLconst [0])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SUBL x l:(MOVLload [off] {sym} ptr mem))
+ // cond: canMergeLoadClobber(v, l, x) && clobber(l)
+ // result: (SUBLload x [off] {sym} ptr mem)
+ for {
+ x := v_0
+ l := v_1
+ if l.Op != OpAMD64MOVLload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
+ break
+ }
+ v.reset(OpAMD64SUBLload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SUBLconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SUBLconst [c] x)
+ // cond: c==0
+ // result: x
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(c == 0) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (SUBLconst [c] x)
+ // result: (ADDLconst [-c] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ v.reset(OpAMD64ADDLconst)
+ v.AuxInt = int32ToAuxInt(-c)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueAMD64_OpAMD64SUBLload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SUBLload [off1] {sym} val (ADDQconst [off2] base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (SUBLload [off1+off2] {sym} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64SUBLload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (SUBLload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (SUBLload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64SUBLload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (SUBLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _))
+ // result: (SUBL x (MOVLf2i y))
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ ptr := v_1
+ if v_2.Op != OpAMD64MOVSSstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
+ break
+ }
+ y := v_2.Args[1]
+ if ptr != v_2.Args[0] {
+ break
+ }
+ v.reset(OpAMD64SUBL)
+ v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SUBLmodify(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SUBLmodify [off1] {sym} (ADDQconst [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (SUBLmodify [off1+off2] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64SUBLmodify)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (SUBLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (SUBLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64SUBLmodify)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SUBQ(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SUBQ x (MOVQconst [c]))
+ // cond: is32Bit(c)
+ // result: (SUBQconst x [int32(c)])
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpAMD64SUBQconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBQ (MOVQconst [c]) x)
+ // cond: is32Bit(c)
+ // result: (NEGQ (SUBQconst <v.Type> x [int32(c)]))
+ for {
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpAMD64NEGQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64SUBQconst, v.Type)
+ v0.AuxInt = int32ToAuxInt(int32(c))
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SUBQ x x)
+ // result: (MOVQconst [0])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (SUBQ x l:(MOVQload [off] {sym} ptr mem))
+ // cond: canMergeLoadClobber(v, l, x) && clobber(l)
+ // result: (SUBQload x [off] {sym} ptr mem)
+ for {
+ x := v_0
+ l := v_1
+ if l.Op != OpAMD64MOVQload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
+ break
+ }
+ v.reset(OpAMD64SUBQload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SUBQborrow(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SUBQborrow x (MOVQconst [c]))
+ // cond: is32Bit(c)
+ // result: (SUBQconstborrow x [int32(c)])
+ for {
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpAMD64SUBQconstborrow)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SUBQconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SUBQconst [0] x)
+ // result: x
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (SUBQconst [c] x)
+ // cond: c != -(1<<31)
+ // result: (ADDQconst [-c] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(c != -(1 << 31)) {
+ break
+ }
+ v.reset(OpAMD64ADDQconst)
+ v.AuxInt = int32ToAuxInt(-c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBQconst (MOVQconst [d]) [c])
+ // result: (MOVQconst [d-int64(c)])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = int64ToAuxInt(d - int64(c))
+ return true
+ }
+ // match: (SUBQconst (SUBQconst x [d]) [c])
+ // cond: is32Bit(int64(-c)-int64(d))
+ // result: (ADDQconst [-c-d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64SUBQconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(is32Bit(int64(-c) - int64(d))) {
+ break
+ }
+ v.reset(OpAMD64ADDQconst)
+ v.AuxInt = int32ToAuxInt(-c - d)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SUBQload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SUBQload [off1] {sym} val (ADDQconst [off2] base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (SUBQload [off1+off2] {sym} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64SUBQload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (SUBQload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (SUBQload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64SUBQload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (SUBQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _))
+ // result: (SUBQ x (MOVQf2i y))
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ ptr := v_1
+ if v_2.Op != OpAMD64MOVSDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
+ break
+ }
+ y := v_2.Args[1]
+ if ptr != v_2.Args[0] {
+ break
+ }
+ v.reset(OpAMD64SUBQ)
+ v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SUBQmodify(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SUBQmodify [off1] {sym} (ADDQconst [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (SUBQmodify [off1+off2] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64SUBQmodify)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (SUBQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (SUBQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64SUBQmodify)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SUBSD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SUBSD x l:(MOVSDload [off] {sym} ptr mem))
+ // cond: canMergeLoadClobber(v, l, x) && clobber(l)
+ // result: (SUBSDload x [off] {sym} ptr mem)
+ for {
+ x := v_0
+ l := v_1
+ if l.Op != OpAMD64MOVSDload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
+ break
+ }
+ v.reset(OpAMD64SUBSDload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SUBSDload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SUBSDload [off1] {sym} val (ADDQconst [off2] base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (SUBSDload [off1+off2] {sym} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64SUBSDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (SUBSDload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (SUBSDload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64SUBSDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (SUBSDload x [off] {sym} ptr (MOVQstore [off] {sym} ptr y _))
+ // result: (SUBSD x (MOVQi2f y))
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ ptr := v_1
+ if v_2.Op != OpAMD64MOVQstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
+ break
+ }
+ y := v_2.Args[1]
+ if ptr != v_2.Args[0] {
+ break
+ }
+ v.reset(OpAMD64SUBSD)
+ v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQi2f, typ.Float64)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SUBSS(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SUBSS x l:(MOVSSload [off] {sym} ptr mem))
+ // cond: canMergeLoadClobber(v, l, x) && clobber(l)
+ // result: (SUBSSload x [off] {sym} ptr mem)
+ for {
+ x := v_0
+ l := v_1
+ if l.Op != OpAMD64MOVSSload {
+ break
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
+ break
+ }
+ v.reset(OpAMD64SUBSSload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64SUBSSload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SUBSSload [off1] {sym} val (ADDQconst [off2] base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (SUBSSload [off1+off2] {sym} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64SUBSSload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (SUBSSload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (SUBSSload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64SUBSSload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (SUBSSload x [off] {sym} ptr (MOVLstore [off] {sym} ptr y _))
+ // result: (SUBSS x (MOVLi2f y))
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ ptr := v_1
+ if v_2.Op != OpAMD64MOVLstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
+ break
+ }
+ y := v_2.Args[1]
+ if ptr != v_2.Args[0] {
+ break
+ }
+ v.reset(OpAMD64SUBSS)
+ v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLi2f, typ.Float32)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64TESTB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (TESTB (MOVLconst [c]) x)
+ // result: (TESTBconst [int8(c)] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64MOVLconst {
+ continue
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpAMD64TESTBconst)
+ v.AuxInt = int8ToAuxInt(int8(c))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (TESTB l:(MOVBload {sym} [off] ptr mem) l2)
+ // cond: l == l2 && l.Uses == 2 && clobber(l)
+ // result: @l.Block (CMPBconstload {sym} [makeValAndOff(0, off)] ptr mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ l := v_0
+ if l.Op != OpAMD64MOVBload {
+ continue
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ l2 := v_1
+ if !(l == l2 && l.Uses == 2 && clobber(l)) {
+ continue
+ }
+ b = l.Block
+ v0 := b.NewValue0(l.Pos, OpAMD64CMPBconstload, types.TypeFlags)
+ v.copyOf(v0)
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, off))
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64TESTBconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (TESTBconst [-1] x)
+ // cond: x.Op != OpAMD64MOVLconst
+ // result: (TESTB x x)
+ for {
+ if auxIntToInt8(v.AuxInt) != -1 {
+ break
+ }
+ x := v_0
+ if !(x.Op != OpAMD64MOVLconst) {
+ break
+ }
+ v.reset(OpAMD64TESTB)
+ v.AddArg2(x, x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64TESTL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (TESTL (MOVLconst [c]) x)
+ // result: (TESTLconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64MOVLconst {
+ continue
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpAMD64TESTLconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (TESTL l:(MOVLload {sym} [off] ptr mem) l2)
+ // cond: l == l2 && l.Uses == 2 && clobber(l)
+ // result: @l.Block (CMPLconstload {sym} [makeValAndOff(0, off)] ptr mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ l := v_0
+ if l.Op != OpAMD64MOVLload {
+ continue
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ l2 := v_1
+ if !(l == l2 && l.Uses == 2 && clobber(l)) {
+ continue
+ }
+ b = l.Block
+ v0 := b.NewValue0(l.Pos, OpAMD64CMPLconstload, types.TypeFlags)
+ v.copyOf(v0)
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, off))
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ break
+ }
+ // match: (TESTL a:(ANDLload [off] {sym} x ptr mem) a)
+ // cond: a.Uses == 2 && a.Block == v.Block && clobber(a)
+ // result: (TESTL (MOVLload <a.Type> [off] {sym} ptr mem) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ a := v_0
+ if a.Op != OpAMD64ANDLload {
+ continue
+ }
+ off := auxIntToInt32(a.AuxInt)
+ sym := auxToSym(a.Aux)
+ mem := a.Args[2]
+ x := a.Args[0]
+ ptr := a.Args[1]
+ if a != v_1 || !(a.Uses == 2 && a.Block == v.Block && clobber(a)) {
+ continue
+ }
+ v.reset(OpAMD64TESTL)
+ v0 := b.NewValue0(a.Pos, OpAMD64MOVLload, a.Type)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ v.AddArg2(v0, x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64TESTLconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (TESTLconst [c] (MOVLconst [c]))
+ // cond: c == 0
+ // result: (FlagEQ)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0.AuxInt) != c || !(c == 0) {
+ break
+ }
+ v.reset(OpAMD64FlagEQ)
+ return true
+ }
+ // match: (TESTLconst [c] (MOVLconst [c]))
+ // cond: c < 0
+ // result: (FlagLT_UGT)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0.AuxInt) != c || !(c < 0) {
+ break
+ }
+ v.reset(OpAMD64FlagLT_UGT)
+ return true
+ }
+ // match: (TESTLconst [c] (MOVLconst [c]))
+ // cond: c > 0
+ // result: (FlagGT_UGT)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0.AuxInt) != c || !(c > 0) {
+ break
+ }
+ v.reset(OpAMD64FlagGT_UGT)
+ return true
+ }
+ // match: (TESTLconst [-1] x)
+ // cond: x.Op != OpAMD64MOVLconst
+ // result: (TESTL x x)
+ for {
+ if auxIntToInt32(v.AuxInt) != -1 {
+ break
+ }
+ x := v_0
+ if !(x.Op != OpAMD64MOVLconst) {
+ break
+ }
+ v.reset(OpAMD64TESTL)
+ v.AddArg2(x, x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64TESTQ(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (TESTQ (MOVQconst [c]) x)
+ // cond: is32Bit(c)
+ // result: (TESTQconst [int32(c)] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64MOVQconst {
+ continue
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ if !(is32Bit(c)) {
+ continue
+ }
+ v.reset(OpAMD64TESTQconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (TESTQ l:(MOVQload {sym} [off] ptr mem) l2)
+ // cond: l == l2 && l.Uses == 2 && clobber(l)
+ // result: @l.Block (CMPQconstload {sym} [makeValAndOff(0, off)] ptr mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ l := v_0
+ if l.Op != OpAMD64MOVQload {
+ continue
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ l2 := v_1
+ if !(l == l2 && l.Uses == 2 && clobber(l)) {
+ continue
+ }
+ b = l.Block
+ v0 := b.NewValue0(l.Pos, OpAMD64CMPQconstload, types.TypeFlags)
+ v.copyOf(v0)
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, off))
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ break
+ }
+ // match: (TESTQ a:(ANDQload [off] {sym} x ptr mem) a)
+ // cond: a.Uses == 2 && a.Block == v.Block && clobber(a)
+ // result: (TESTQ (MOVQload <a.Type> [off] {sym} ptr mem) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ a := v_0
+ if a.Op != OpAMD64ANDQload {
+ continue
+ }
+ off := auxIntToInt32(a.AuxInt)
+ sym := auxToSym(a.Aux)
+ mem := a.Args[2]
+ x := a.Args[0]
+ ptr := a.Args[1]
+ if a != v_1 || !(a.Uses == 2 && a.Block == v.Block && clobber(a)) {
+ continue
+ }
+ v.reset(OpAMD64TESTQ)
+ v0 := b.NewValue0(a.Pos, OpAMD64MOVQload, a.Type)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ v.AddArg2(v0, x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64TESTQconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (TESTQconst [c] (MOVQconst [d]))
+ // cond: int64(c) == d && c == 0
+ // result: (FlagEQ)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ if !(int64(c) == d && c == 0) {
+ break
+ }
+ v.reset(OpAMD64FlagEQ)
+ return true
+ }
+ // match: (TESTQconst [c] (MOVQconst [d]))
+ // cond: int64(c) == d && c < 0
+ // result: (FlagLT_UGT)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ if !(int64(c) == d && c < 0) {
+ break
+ }
+ v.reset(OpAMD64FlagLT_UGT)
+ return true
+ }
+ // match: (TESTQconst [c] (MOVQconst [d]))
+ // cond: int64(c) == d && c > 0
+ // result: (FlagGT_UGT)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ if !(int64(c) == d && c > 0) {
+ break
+ }
+ v.reset(OpAMD64FlagGT_UGT)
+ return true
+ }
+ // match: (TESTQconst [-1] x)
+ // cond: x.Op != OpAMD64MOVQconst
+ // result: (TESTQ x x)
+ for {
+ if auxIntToInt32(v.AuxInt) != -1 {
+ break
+ }
+ x := v_0
+ if !(x.Op != OpAMD64MOVQconst) {
+ break
+ }
+ v.reset(OpAMD64TESTQ)
+ v.AddArg2(x, x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64TESTW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (TESTW (MOVLconst [c]) x)
+ // result: (TESTWconst [int16(c)] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64MOVLconst {
+ continue
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpAMD64TESTWconst)
+ v.AuxInt = int16ToAuxInt(int16(c))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (TESTW l:(MOVWload {sym} [off] ptr mem) l2)
+ // cond: l == l2 && l.Uses == 2 && clobber(l)
+ // result: @l.Block (CMPWconstload {sym} [makeValAndOff(0, off)] ptr mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ l := v_0
+ if l.Op != OpAMD64MOVWload {
+ continue
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ l2 := v_1
+ if !(l == l2 && l.Uses == 2 && clobber(l)) {
+ continue
+ }
+ b = l.Block
+ v0 := b.NewValue0(l.Pos, OpAMD64CMPWconstload, types.TypeFlags)
+ v.copyOf(v0)
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, off))
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64TESTWconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (TESTWconst [-1] x)
+ // cond: x.Op != OpAMD64MOVLconst
+ // result: (TESTW x x)
+ for {
+ if auxIntToInt16(v.AuxInt) != -1 {
+ break
+ }
+ x := v_0
+ if !(x.Op != OpAMD64MOVLconst) {
+ break
+ }
+ v.reset(OpAMD64TESTW)
+ v.AddArg2(x, x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64XADDLlock(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (XADDLlock [off1] {sym} val (ADDQconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (XADDLlock [off1+off2] {sym} val ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ ptr := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64XADDLlock)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64XADDQlock(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (XADDQlock [off1] {sym} val (ADDQconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (XADDQlock [off1+off2] {sym} val ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ ptr := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64XADDQlock)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64XCHGL(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (XCHGL [off1] {sym} val (ADDQconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (XCHGL [off1+off2] {sym} val ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ ptr := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64XCHGL)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, ptr, mem)
+ return true
+ }
+ // match: (XCHGL [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && ptr.Op != OpSB
+ // result: (XCHGL [off1+off2] {mergeSym(sym1,sym2)} val ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ ptr := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && ptr.Op != OpSB) {
+ break
+ }
+ v.reset(OpAMD64XCHGL)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(val, ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64XCHGQ(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (XCHGQ [off1] {sym} val (ADDQconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (XCHGQ [off1+off2] {sym} val ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ ptr := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64XCHGQ)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, ptr, mem)
+ return true
+ }
+ // match: (XCHGQ [off1] {sym1} val (LEAQ [off2] {sym2} ptr) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && ptr.Op != OpSB
+ // result: (XCHGQ [off1+off2] {mergeSym(sym1,sym2)} val ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ ptr := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && ptr.Op != OpSB) {
+ break
+ }
+ v.reset(OpAMD64XCHGQ)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(val, ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64XORL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (XORL (SHLL (MOVLconst [1]) y) x)
+ // result: (BTCL x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHLL {
+ continue
+ }
+ y := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0.AuxInt) != 1 {
+ continue
+ }
+ x := v_1
+ v.reset(OpAMD64BTCL)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (XORL (MOVLconst [c]) x)
+ // cond: isUint32PowerOfTwo(int64(c)) && uint64(c) >= 128
+ // result: (BTCLconst [int8(log32(c))] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64MOVLconst {
+ continue
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ if !(isUint32PowerOfTwo(int64(c)) && uint64(c) >= 128) {
+ continue
+ }
+ v.reset(OpAMD64BTCLconst)
+ v.AuxInt = int8ToAuxInt(int8(log32(c)))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (XORL x (MOVLconst [c]))
+ // result: (XORLconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64MOVLconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpAMD64XORLconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (XORL (SHLLconst x [c]) (SHRLconst x [d]))
+ // cond: d==32-c
+ // result: (ROLLconst x [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHLLconst {
+ continue
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ x := v_0.Args[0]
+ if v_1.Op != OpAMD64SHRLconst {
+ continue
+ }
+ d := auxIntToInt8(v_1.AuxInt)
+ if x != v_1.Args[0] || !(d == 32-c) {
+ continue
+ }
+ v.reset(OpAMD64ROLLconst)
+ v.AuxInt = int8ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (XORL <t> (SHLLconst x [c]) (SHRWconst x [d]))
+ // cond: d==16-c && c < 16 && t.Size() == 2
+ // result: (ROLWconst x [c])
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHLLconst {
+ continue
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ x := v_0.Args[0]
+ if v_1.Op != OpAMD64SHRWconst {
+ continue
+ }
+ d := auxIntToInt8(v_1.AuxInt)
+ if x != v_1.Args[0] || !(d == 16-c && c < 16 && t.Size() == 2) {
+ continue
+ }
+ v.reset(OpAMD64ROLWconst)
+ v.AuxInt = int8ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (XORL <t> (SHLLconst x [c]) (SHRBconst x [d]))
+ // cond: d==8-c && c < 8 && t.Size() == 1
+ // result: (ROLBconst x [c])
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHLLconst {
+ continue
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ x := v_0.Args[0]
+ if v_1.Op != OpAMD64SHRBconst {
+ continue
+ }
+ d := auxIntToInt8(v_1.AuxInt)
+ if x != v_1.Args[0] || !(d == 8-c && c < 8 && t.Size() == 1) {
+ continue
+ }
+ v.reset(OpAMD64ROLBconst)
+ v.AuxInt = int8ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (XORL x x)
+ // result: (MOVLconst [0])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (XORL x l:(MOVLload [off] {sym} ptr mem))
+ // cond: canMergeLoadClobber(v, l, x) && clobber(l)
+ // result: (XORLload x [off] {sym} ptr mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ l := v_1
+ if l.Op != OpAMD64MOVLload {
+ continue
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
+ continue
+ }
+ v.reset(OpAMD64XORLload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ // match: (XORL x (ADDLconst [-1] x))
+ // cond: buildcfg.GOAMD64 >= 3
+ // result: (BLSMSKL x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64ADDLconst || auxIntToInt32(v_1.AuxInt) != -1 || x != v_1.Args[0] || !(buildcfg.GOAMD64 >= 3) {
+ continue
+ }
+ v.reset(OpAMD64BLSMSKL)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64XORLconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (XORLconst [c] x)
+ // cond: isUint32PowerOfTwo(int64(c)) && uint64(c) >= 128
+ // result: (BTCLconst [int8(log32(c))] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(isUint32PowerOfTwo(int64(c)) && uint64(c) >= 128) {
+ break
+ }
+ v.reset(OpAMD64BTCLconst)
+ v.AuxInt = int8ToAuxInt(int8(log32(c)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORLconst [1] (SETNE x))
+ // result: (SETEQ x)
+ for {
+ if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETNE {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64SETEQ)
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORLconst [1] (SETEQ x))
+ // result: (SETNE x)
+ for {
+ if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETEQ {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64SETNE)
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORLconst [1] (SETL x))
+ // result: (SETGE x)
+ for {
+ if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETL {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64SETGE)
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORLconst [1] (SETGE x))
+ // result: (SETL x)
+ for {
+ if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETGE {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64SETL)
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORLconst [1] (SETLE x))
+ // result: (SETG x)
+ for {
+ if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETLE {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64SETG)
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORLconst [1] (SETG x))
+ // result: (SETLE x)
+ for {
+ if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETG {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64SETLE)
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORLconst [1] (SETB x))
+ // result: (SETAE x)
+ for {
+ if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETB {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64SETAE)
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORLconst [1] (SETAE x))
+ // result: (SETB x)
+ for {
+ if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETAE {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64SETB)
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORLconst [1] (SETBE x))
+ // result: (SETA x)
+ for {
+ if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETBE {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64SETA)
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORLconst [1] (SETA x))
+ // result: (SETBE x)
+ for {
+ if auxIntToInt32(v.AuxInt) != 1 || v_0.Op != OpAMD64SETA {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAMD64SETBE)
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORLconst [c] (XORLconst [d] x))
+ // result: (XORLconst [c ^ d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64XORLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpAMD64XORLconst)
+ v.AuxInt = int32ToAuxInt(c ^ d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORLconst [c] (BTCLconst [d] x))
+ // result: (XORLconst [c ^ 1<<uint32(d)] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64BTCLconst {
+ break
+ }
+ d := auxIntToInt8(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpAMD64XORLconst)
+ v.AuxInt = int32ToAuxInt(c ^ 1<<uint32(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORLconst [c] x)
+ // cond: c==0
+ // result: x
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(c == 0) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (XORLconst [c] (MOVLconst [d]))
+ // result: (MOVLconst [c^d])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64MOVLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(c ^ d)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64XORLconstmodify(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (XORLconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem)
+ // cond: ValAndOff(valoff1).canAdd32(off2)
+ // result: (XORLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(ValAndOff(valoff1).canAdd32(off2)) {
+ break
+ }
+ v.reset(OpAMD64XORLconstmodify)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (XORLconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
+ // result: (XORLconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64XORLconstmodify)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64XORLload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (XORLload [off1] {sym} val (ADDQconst [off2] base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (XORLload [off1+off2] {sym} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64XORLload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (XORLload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (XORLload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64XORLload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (XORLload x [off] {sym} ptr (MOVSSstore [off] {sym} ptr y _))
+ // result: (XORL x (MOVLf2i y))
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ ptr := v_1
+ if v_2.Op != OpAMD64MOVSSstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
+ break
+ }
+ y := v_2.Args[1]
+ if ptr != v_2.Args[0] {
+ break
+ }
+ v.reset(OpAMD64XORL)
+ v0 := b.NewValue0(v_2.Pos, OpAMD64MOVLf2i, typ.UInt32)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64XORLmodify(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (XORLmodify [off1] {sym} (ADDQconst [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (XORLmodify [off1+off2] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64XORLmodify)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (XORLmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (XORLmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64XORLmodify)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64XORQ(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (XORQ (SHLQ (MOVQconst [1]) y) x)
+ // result: (BTCQ x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHLQ {
+ continue
+ }
+ y := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0.AuxInt) != 1 {
+ continue
+ }
+ x := v_1
+ v.reset(OpAMD64BTCQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (XORQ (MOVQconst [c]) x)
+ // cond: isUint64PowerOfTwo(c) && uint64(c) >= 128
+ // result: (BTCQconst [int8(log64(c))] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64MOVQconst {
+ continue
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ if !(isUint64PowerOfTwo(c) && uint64(c) >= 128) {
+ continue
+ }
+ v.reset(OpAMD64BTCQconst)
+ v.AuxInt = int8ToAuxInt(int8(log64(c)))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (XORQ x (MOVQconst [c]))
+ // cond: is32Bit(c)
+ // result: (XORQconst [int32(c)] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64MOVQconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(is32Bit(c)) {
+ continue
+ }
+ v.reset(OpAMD64XORQconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (XORQ (SHLQconst x [c]) (SHRQconst x [d]))
+ // cond: d==64-c
+ // result: (ROLQconst x [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAMD64SHLQconst {
+ continue
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ x := v_0.Args[0]
+ if v_1.Op != OpAMD64SHRQconst {
+ continue
+ }
+ d := auxIntToInt8(v_1.AuxInt)
+ if x != v_1.Args[0] || !(d == 64-c) {
+ continue
+ }
+ v.reset(OpAMD64ROLQconst)
+ v.AuxInt = int8ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (XORQ x x)
+ // result: (MOVQconst [0])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (XORQ x l:(MOVQload [off] {sym} ptr mem))
+ // cond: canMergeLoadClobber(v, l, x) && clobber(l)
+ // result: (XORQload x [off] {sym} ptr mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ l := v_1
+ if l.Op != OpAMD64MOVQload {
+ continue
+ }
+ off := auxIntToInt32(l.AuxInt)
+ sym := auxToSym(l.Aux)
+ mem := l.Args[1]
+ ptr := l.Args[0]
+ if !(canMergeLoadClobber(v, l, x) && clobber(l)) {
+ continue
+ }
+ v.reset(OpAMD64XORQload)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ // match: (XORQ x (ADDQconst [-1] x))
+ // cond: buildcfg.GOAMD64 >= 3
+ // result: (BLSMSKQ x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAMD64ADDQconst || auxIntToInt32(v_1.AuxInt) != -1 || x != v_1.Args[0] || !(buildcfg.GOAMD64 >= 3) {
+ continue
+ }
+ v.reset(OpAMD64BLSMSKQ)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64XORQconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (XORQconst [c] x)
+ // cond: isUint64PowerOfTwo(int64(c)) && uint64(c) >= 128
+ // result: (BTCQconst [int8(log32(c))] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(isUint64PowerOfTwo(int64(c)) && uint64(c) >= 128) {
+ break
+ }
+ v.reset(OpAMD64BTCQconst)
+ v.AuxInt = int8ToAuxInt(int8(log32(c)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORQconst [c] (XORQconst [d] x))
+ // result: (XORQconst [c ^ d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64XORQconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpAMD64XORQconst)
+ v.AuxInt = int32ToAuxInt(c ^ d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORQconst [c] (BTCQconst [d] x))
+ // cond: is32Bit(int64(c) ^ 1<<uint32(d))
+ // result: (XORQconst [c ^ 1<<uint32(d)] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64BTCQconst {
+ break
+ }
+ d := auxIntToInt8(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(is32Bit(int64(c) ^ 1<<uint32(d))) {
+ break
+ }
+ v.reset(OpAMD64XORQconst)
+ v.AuxInt = int32ToAuxInt(c ^ 1<<uint32(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORQconst [0] x)
+ // result: x
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (XORQconst [c] (MOVQconst [d]))
+ // result: (MOVQconst [int64(c)^d])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpAMD64MOVQconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = int64ToAuxInt(int64(c) ^ d)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64XORQconstmodify(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (XORQconstmodify [valoff1] {sym} (ADDQconst [off2] base) mem)
+ // cond: ValAndOff(valoff1).canAdd32(off2)
+ // result: (XORQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {sym} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(ValAndOff(valoff1).canAdd32(off2)) {
+ break
+ }
+ v.reset(OpAMD64XORQconstmodify)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (XORQconstmodify [valoff1] {sym1} (LEAQ [off2] {sym2} base) mem)
+ // cond: ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)
+ // result: (XORQconstmodify [ValAndOff(valoff1).addOffset32(off2)] {mergeSym(sym1,sym2)} base mem)
+ for {
+ valoff1 := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(ValAndOff(valoff1).canAdd32(off2) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64XORQconstmodify)
+ v.AuxInt = valAndOffToAuxInt(ValAndOff(valoff1).addOffset32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64XORQload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (XORQload [off1] {sym} val (ADDQconst [off2] base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (XORQload [off1+off2] {sym} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64XORQload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (XORQload [off1] {sym1} val (LEAQ [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (XORQload [off1+off2] {mergeSym(sym1,sym2)} val base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ val := v_0
+ if v_1.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ base := v_1.Args[0]
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64XORQload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(val, base, mem)
+ return true
+ }
+ // match: (XORQload x [off] {sym} ptr (MOVSDstore [off] {sym} ptr y _))
+ // result: (XORQ x (MOVQf2i y))
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ ptr := v_1
+ if v_2.Op != OpAMD64MOVSDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
+ break
+ }
+ y := v_2.Args[1]
+ if ptr != v_2.Args[0] {
+ break
+ }
+ v.reset(OpAMD64XORQ)
+ v0 := b.NewValue0(v_2.Pos, OpAMD64MOVQf2i, typ.UInt64)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAMD64XORQmodify(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (XORQmodify [off1] {sym} (ADDQconst [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2))
+ // result: (XORQmodify [off1+off2] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64ADDQconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpAMD64XORQmodify)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (XORQmodify [off1] {sym1} (LEAQ [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (XORQmodify [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpAMD64LEAQ {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpAMD64XORQmodify)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpAddr(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Addr {sym} base)
+ // result: (LEAQ {sym} base)
+ for {
+ sym := auxToSym(v.Aux)
+ base := v_0
+ v.reset(OpAMD64LEAQ)
+ v.Aux = symToAux(sym)
+ v.AddArg(base)
+ return true
+ }
+}
+func rewriteValueAMD64_OpAtomicAdd32(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (AtomicAdd32 ptr val mem)
+ // result: (AddTupleFirst32 val (XADDLlock val ptr mem))
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpAMD64AddTupleFirst32)
+ v0 := b.NewValue0(v.Pos, OpAMD64XADDLlock, types.NewTuple(typ.UInt32, types.TypeMem))
+ v0.AddArg3(val, ptr, mem)
+ v.AddArg2(val, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpAtomicAdd64(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (AtomicAdd64 ptr val mem)
+ // result: (AddTupleFirst64 val (XADDQlock val ptr mem))
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpAMD64AddTupleFirst64)
+ v0 := b.NewValue0(v.Pos, OpAMD64XADDQlock, types.NewTuple(typ.UInt64, types.TypeMem))
+ v0.AddArg3(val, ptr, mem)
+ v.AddArg2(val, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpAtomicAnd32(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicAnd32 ptr val mem)
+ // result: (ANDLlock ptr val mem)
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpAMD64ANDLlock)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+}
+func rewriteValueAMD64_OpAtomicAnd8(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicAnd8 ptr val mem)
+ // result: (ANDBlock ptr val mem)
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpAMD64ANDBlock)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+}
+func rewriteValueAMD64_OpAtomicCompareAndSwap32(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicCompareAndSwap32 ptr old new_ mem)
+ // result: (CMPXCHGLlock ptr old new_ mem)
+ for {
+ ptr := v_0
+ old := v_1
+ new_ := v_2
+ mem := v_3
+ v.reset(OpAMD64CMPXCHGLlock)
+ v.AddArg4(ptr, old, new_, mem)
+ return true
+ }
+}
+func rewriteValueAMD64_OpAtomicCompareAndSwap64(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicCompareAndSwap64 ptr old new_ mem)
+ // result: (CMPXCHGQlock ptr old new_ mem)
+ for {
+ ptr := v_0
+ old := v_1
+ new_ := v_2
+ mem := v_3
+ v.reset(OpAMD64CMPXCHGQlock)
+ v.AddArg4(ptr, old, new_, mem)
+ return true
+ }
+}
+func rewriteValueAMD64_OpAtomicExchange32(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicExchange32 ptr val mem)
+ // result: (XCHGL val ptr mem)
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpAMD64XCHGL)
+ v.AddArg3(val, ptr, mem)
+ return true
+ }
+}
+func rewriteValueAMD64_OpAtomicExchange64(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicExchange64 ptr val mem)
+ // result: (XCHGQ val ptr mem)
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpAMD64XCHGQ)
+ v.AddArg3(val, ptr, mem)
+ return true
+ }
+}
+func rewriteValueAMD64_OpAtomicLoad32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicLoad32 ptr mem)
+ // result: (MOVLatomicload ptr mem)
+ for {
+ ptr := v_0
+ mem := v_1
+ v.reset(OpAMD64MOVLatomicload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+}
+func rewriteValueAMD64_OpAtomicLoad64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicLoad64 ptr mem)
+ // result: (MOVQatomicload ptr mem)
+ for {
+ ptr := v_0
+ mem := v_1
+ v.reset(OpAMD64MOVQatomicload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+}
+func rewriteValueAMD64_OpAtomicLoad8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicLoad8 ptr mem)
+ // result: (MOVBatomicload ptr mem)
+ for {
+ ptr := v_0
+ mem := v_1
+ v.reset(OpAMD64MOVBatomicload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+}
+func rewriteValueAMD64_OpAtomicLoadPtr(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicLoadPtr ptr mem)
+ // result: (MOVQatomicload ptr mem)
+ for {
+ ptr := v_0
+ mem := v_1
+ v.reset(OpAMD64MOVQatomicload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+}
+func rewriteValueAMD64_OpAtomicOr32(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicOr32 ptr val mem)
+ // result: (ORLlock ptr val mem)
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpAMD64ORLlock)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+}
+func rewriteValueAMD64_OpAtomicOr8(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicOr8 ptr val mem)
+ // result: (ORBlock ptr val mem)
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpAMD64ORBlock)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+}
+func rewriteValueAMD64_OpAtomicStore32(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (AtomicStore32 ptr val mem)
+ // result: (Select1 (XCHGL <types.NewTuple(typ.UInt32,types.TypeMem)> val ptr mem))
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpAMD64XCHGL, types.NewTuple(typ.UInt32, types.TypeMem))
+ v0.AddArg3(val, ptr, mem)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpAtomicStore64(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (AtomicStore64 ptr val mem)
+ // result: (Select1 (XCHGQ <types.NewTuple(typ.UInt64,types.TypeMem)> val ptr mem))
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.UInt64, types.TypeMem))
+ v0.AddArg3(val, ptr, mem)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpAtomicStore8(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (AtomicStore8 ptr val mem)
+ // result: (Select1 (XCHGB <types.NewTuple(typ.UInt8,types.TypeMem)> val ptr mem))
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpAMD64XCHGB, types.NewTuple(typ.UInt8, types.TypeMem))
+ v0.AddArg3(val, ptr, mem)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpAtomicStorePtrNoWB(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (AtomicStorePtrNoWB ptr val mem)
+ // result: (Select1 (XCHGQ <types.NewTuple(typ.BytePtr,types.TypeMem)> val ptr mem))
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpAMD64XCHGQ, types.NewTuple(typ.BytePtr, types.TypeMem))
+ v0.AddArg3(val, ptr, mem)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpBitLen16(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (BitLen16 x)
+ // result: (BSRL (LEAL1 <typ.UInt32> [1] (MOVWQZX <typ.UInt32> x) (MOVWQZX <typ.UInt32> x)))
+ for {
+ x := v_0
+ v.reset(OpAMD64BSRL)
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAL1, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(1)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt32)
+ v1.AddArg(x)
+ v0.AddArg2(v1, v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpBitLen32(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (BitLen32 x)
+ // result: (Select0 (BSRQ (LEAQ1 <typ.UInt64> [1] (MOVLQZX <typ.UInt64> x) (MOVLQZX <typ.UInt64> x))))
+ for {
+ x := v_0
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags))
+ v1 := b.NewValue0(v.Pos, OpAMD64LEAQ1, typ.UInt64)
+ v1.AuxInt = int32ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, typ.UInt64)
+ v2.AddArg(x)
+ v1.AddArg2(v2, v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpBitLen64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (BitLen64 <t> x)
+ // result: (ADDQconst [1] (CMOVQEQ <t> (Select0 <t> (BSRQ x)) (MOVQconst <t> [-1]) (Select1 <types.TypeFlags> (BSRQ x))))
+ for {
+ t := v.Type
+ x := v_0
+ v.reset(OpAMD64ADDQconst)
+ v.AuxInt = int32ToAuxInt(1)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMOVQEQ, t)
+ v1 := b.NewValue0(v.Pos, OpSelect0, t)
+ v2 := b.NewValue0(v.Pos, OpAMD64BSRQ, types.NewTuple(typ.UInt64, types.TypeFlags))
+ v2.AddArg(x)
+ v1.AddArg(v2)
+ v3 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t)
+ v3.AuxInt = int64ToAuxInt(-1)
+ v4 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v4.AddArg(v2)
+ v0.AddArg3(v1, v3, v4)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpBitLen8(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (BitLen8 x)
+ // result: (BSRL (LEAL1 <typ.UInt32> [1] (MOVBQZX <typ.UInt32> x) (MOVBQZX <typ.UInt32> x)))
+ for {
+ x := v_0
+ v.reset(OpAMD64BSRL)
+ v0 := b.NewValue0(v.Pos, OpAMD64LEAL1, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(1)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt32)
+ v1.AddArg(x)
+ v0.AddArg2(v1, v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCeil(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Ceil x)
+ // result: (ROUNDSD [2] x)
+ for {
+ x := v_0
+ v.reset(OpAMD64ROUNDSD)
+ v.AuxInt = int8ToAuxInt(2)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCondSelect(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (CondSelect <t> x y (SETEQ cond))
+ // cond: (is64BitInt(t) || isPtr(t))
+ // result: (CMOVQEQ y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETEQ {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is64BitInt(t) || isPtr(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVQEQ)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETNE cond))
+ // cond: (is64BitInt(t) || isPtr(t))
+ // result: (CMOVQNE y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETNE {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is64BitInt(t) || isPtr(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVQNE)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETL cond))
+ // cond: (is64BitInt(t) || isPtr(t))
+ // result: (CMOVQLT y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETL {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is64BitInt(t) || isPtr(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVQLT)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETG cond))
+ // cond: (is64BitInt(t) || isPtr(t))
+ // result: (CMOVQGT y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETG {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is64BitInt(t) || isPtr(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVQGT)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETLE cond))
+ // cond: (is64BitInt(t) || isPtr(t))
+ // result: (CMOVQLE y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETLE {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is64BitInt(t) || isPtr(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVQLE)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETGE cond))
+ // cond: (is64BitInt(t) || isPtr(t))
+ // result: (CMOVQGE y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETGE {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is64BitInt(t) || isPtr(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVQGE)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETA cond))
+ // cond: (is64BitInt(t) || isPtr(t))
+ // result: (CMOVQHI y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETA {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is64BitInt(t) || isPtr(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVQHI)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETB cond))
+ // cond: (is64BitInt(t) || isPtr(t))
+ // result: (CMOVQCS y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETB {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is64BitInt(t) || isPtr(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVQCS)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETAE cond))
+ // cond: (is64BitInt(t) || isPtr(t))
+ // result: (CMOVQCC y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETAE {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is64BitInt(t) || isPtr(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVQCC)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETBE cond))
+ // cond: (is64BitInt(t) || isPtr(t))
+ // result: (CMOVQLS y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETBE {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is64BitInt(t) || isPtr(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVQLS)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETEQF cond))
+ // cond: (is64BitInt(t) || isPtr(t))
+ // result: (CMOVQEQF y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETEQF {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is64BitInt(t) || isPtr(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVQEQF)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETNEF cond))
+ // cond: (is64BitInt(t) || isPtr(t))
+ // result: (CMOVQNEF y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETNEF {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is64BitInt(t) || isPtr(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVQNEF)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETGF cond))
+ // cond: (is64BitInt(t) || isPtr(t))
+ // result: (CMOVQGTF y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETGF {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is64BitInt(t) || isPtr(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVQGTF)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETGEF cond))
+ // cond: (is64BitInt(t) || isPtr(t))
+ // result: (CMOVQGEF y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETGEF {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is64BitInt(t) || isPtr(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVQGEF)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETEQ cond))
+ // cond: is32BitInt(t)
+ // result: (CMOVLEQ y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETEQ {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is32BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVLEQ)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETNE cond))
+ // cond: is32BitInt(t)
+ // result: (CMOVLNE y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETNE {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is32BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVLNE)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETL cond))
+ // cond: is32BitInt(t)
+ // result: (CMOVLLT y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETL {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is32BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVLLT)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETG cond))
+ // cond: is32BitInt(t)
+ // result: (CMOVLGT y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETG {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is32BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVLGT)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETLE cond))
+ // cond: is32BitInt(t)
+ // result: (CMOVLLE y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETLE {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is32BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVLLE)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETGE cond))
+ // cond: is32BitInt(t)
+ // result: (CMOVLGE y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETGE {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is32BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVLGE)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETA cond))
+ // cond: is32BitInt(t)
+ // result: (CMOVLHI y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETA {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is32BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVLHI)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETB cond))
+ // cond: is32BitInt(t)
+ // result: (CMOVLCS y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETB {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is32BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVLCS)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETAE cond))
+ // cond: is32BitInt(t)
+ // result: (CMOVLCC y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETAE {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is32BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVLCC)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETBE cond))
+ // cond: is32BitInt(t)
+ // result: (CMOVLLS y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETBE {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is32BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVLLS)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETEQF cond))
+ // cond: is32BitInt(t)
+ // result: (CMOVLEQF y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETEQF {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is32BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVLEQF)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETNEF cond))
+ // cond: is32BitInt(t)
+ // result: (CMOVLNEF y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETNEF {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is32BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVLNEF)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETGF cond))
+ // cond: is32BitInt(t)
+ // result: (CMOVLGTF y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETGF {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is32BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVLGTF)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETGEF cond))
+ // cond: is32BitInt(t)
+ // result: (CMOVLGEF y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETGEF {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is32BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVLGEF)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETEQ cond))
+ // cond: is16BitInt(t)
+ // result: (CMOVWEQ y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETEQ {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is16BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVWEQ)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETNE cond))
+ // cond: is16BitInt(t)
+ // result: (CMOVWNE y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETNE {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is16BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVWNE)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETL cond))
+ // cond: is16BitInt(t)
+ // result: (CMOVWLT y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETL {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is16BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVWLT)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETG cond))
+ // cond: is16BitInt(t)
+ // result: (CMOVWGT y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETG {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is16BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVWGT)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETLE cond))
+ // cond: is16BitInt(t)
+ // result: (CMOVWLE y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETLE {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is16BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVWLE)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETGE cond))
+ // cond: is16BitInt(t)
+ // result: (CMOVWGE y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETGE {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is16BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVWGE)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETA cond))
+ // cond: is16BitInt(t)
+ // result: (CMOVWHI y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETA {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is16BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVWHI)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETB cond))
+ // cond: is16BitInt(t)
+ // result: (CMOVWCS y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETB {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is16BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVWCS)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETAE cond))
+ // cond: is16BitInt(t)
+ // result: (CMOVWCC y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETAE {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is16BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVWCC)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETBE cond))
+ // cond: is16BitInt(t)
+ // result: (CMOVWLS y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETBE {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is16BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVWLS)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETEQF cond))
+ // cond: is16BitInt(t)
+ // result: (CMOVWEQF y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETEQF {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is16BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVWEQF)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETNEF cond))
+ // cond: is16BitInt(t)
+ // result: (CMOVWNEF y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETNEF {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is16BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVWNEF)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETGF cond))
+ // cond: is16BitInt(t)
+ // result: (CMOVWGTF y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETGF {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is16BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVWGTF)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y (SETGEF cond))
+ // cond: is16BitInt(t)
+ // result: (CMOVWGEF y x cond)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if v_2.Op != OpAMD64SETGEF {
+ break
+ }
+ cond := v_2.Args[0]
+ if !(is16BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVWGEF)
+ v.AddArg3(y, x, cond)
+ return true
+ }
+ // match: (CondSelect <t> x y check)
+ // cond: !check.Type.IsFlags() && check.Type.Size() == 1
+ // result: (CondSelect <t> x y (MOVBQZX <typ.UInt64> check))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ check := v_2
+ if !(!check.Type.IsFlags() && check.Type.Size() == 1) {
+ break
+ }
+ v.reset(OpCondSelect)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt64)
+ v0.AddArg(check)
+ v.AddArg3(x, y, v0)
+ return true
+ }
+ // match: (CondSelect <t> x y check)
+ // cond: !check.Type.IsFlags() && check.Type.Size() == 2
+ // result: (CondSelect <t> x y (MOVWQZX <typ.UInt64> check))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ check := v_2
+ if !(!check.Type.IsFlags() && check.Type.Size() == 2) {
+ break
+ }
+ v.reset(OpCondSelect)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt64)
+ v0.AddArg(check)
+ v.AddArg3(x, y, v0)
+ return true
+ }
+ // match: (CondSelect <t> x y check)
+ // cond: !check.Type.IsFlags() && check.Type.Size() == 4
+ // result: (CondSelect <t> x y (MOVLQZX <typ.UInt64> check))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ check := v_2
+ if !(!check.Type.IsFlags() && check.Type.Size() == 4) {
+ break
+ }
+ v.reset(OpCondSelect)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLQZX, typ.UInt64)
+ v0.AddArg(check)
+ v.AddArg3(x, y, v0)
+ return true
+ }
+ // match: (CondSelect <t> x y check)
+ // cond: !check.Type.IsFlags() && check.Type.Size() == 8 && (is64BitInt(t) || isPtr(t))
+ // result: (CMOVQNE y x (CMPQconst [0] check))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ check := v_2
+ if !(!check.Type.IsFlags() && check.Type.Size() == 8 && (is64BitInt(t) || isPtr(t))) {
+ break
+ }
+ v.reset(OpAMD64CMOVQNE)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(0)
+ v0.AddArg(check)
+ v.AddArg3(y, x, v0)
+ return true
+ }
+ // match: (CondSelect <t> x y check)
+ // cond: !check.Type.IsFlags() && check.Type.Size() == 8 && is32BitInt(t)
+ // result: (CMOVLNE y x (CMPQconst [0] check))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ check := v_2
+ if !(!check.Type.IsFlags() && check.Type.Size() == 8 && is32BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVLNE)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(0)
+ v0.AddArg(check)
+ v.AddArg3(y, x, v0)
+ return true
+ }
+ // match: (CondSelect <t> x y check)
+ // cond: !check.Type.IsFlags() && check.Type.Size() == 8 && is16BitInt(t)
+ // result: (CMOVWNE y x (CMPQconst [0] check))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ check := v_2
+ if !(!check.Type.IsFlags() && check.Type.Size() == 8 && is16BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64CMOVWNE)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(0)
+ v0.AddArg(check)
+ v.AddArg3(y, x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpConst16(v *Value) bool {
+ // match: (Const16 [c])
+ // result: (MOVLconst [int32(c)])
+ for {
+ c := auxIntToInt16(v.AuxInt)
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ return true
+ }
+}
+func rewriteValueAMD64_OpConst8(v *Value) bool {
+ // match: (Const8 [c])
+ // result: (MOVLconst [int32(c)])
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ return true
+ }
+}
+func rewriteValueAMD64_OpConstBool(v *Value) bool {
+ // match: (ConstBool [c])
+ // result: (MOVLconst [b2i32(c)])
+ for {
+ c := auxIntToBool(v.AuxInt)
+ v.reset(OpAMD64MOVLconst)
+ v.AuxInt = int32ToAuxInt(b2i32(c))
+ return true
+ }
+}
+func rewriteValueAMD64_OpConstNil(v *Value) bool {
+ // match: (ConstNil )
+ // result: (MOVQconst [0])
+ for {
+ v.reset(OpAMD64MOVQconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCtz16(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Ctz16 x)
+ // result: (BSFL (BTSLconst <typ.UInt32> [16] x))
+ for {
+ x := v_0
+ v.reset(OpAMD64BSFL)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTSLconst, typ.UInt32)
+ v0.AuxInt = int8ToAuxInt(16)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCtz16NonZero(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Ctz16NonZero x)
+ // cond: buildcfg.GOAMD64 >= 3
+ // result: (TZCNTL x)
+ for {
+ x := v_0
+ if !(buildcfg.GOAMD64 >= 3) {
+ break
+ }
+ v.reset(OpAMD64TZCNTL)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Ctz16NonZero x)
+ // cond: buildcfg.GOAMD64 < 3
+ // result: (BSFL x)
+ for {
+ x := v_0
+ if !(buildcfg.GOAMD64 < 3) {
+ break
+ }
+ v.reset(OpAMD64BSFL)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpCtz32(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Ctz32 x)
+ // cond: buildcfg.GOAMD64 >= 3
+ // result: (TZCNTL x)
+ for {
+ x := v_0
+ if !(buildcfg.GOAMD64 >= 3) {
+ break
+ }
+ v.reset(OpAMD64TZCNTL)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Ctz32 x)
+ // cond: buildcfg.GOAMD64 < 3
+ // result: (Select0 (BSFQ (BTSQconst <typ.UInt64> [32] x)))
+ for {
+ x := v_0
+ if !(buildcfg.GOAMD64 < 3) {
+ break
+ }
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags))
+ v1 := b.NewValue0(v.Pos, OpAMD64BTSQconst, typ.UInt64)
+ v1.AuxInt = int8ToAuxInt(32)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpCtz32NonZero(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Ctz32NonZero x)
+ // cond: buildcfg.GOAMD64 >= 3
+ // result: (TZCNTL x)
+ for {
+ x := v_0
+ if !(buildcfg.GOAMD64 >= 3) {
+ break
+ }
+ v.reset(OpAMD64TZCNTL)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Ctz32NonZero x)
+ // cond: buildcfg.GOAMD64 < 3
+ // result: (BSFL x)
+ for {
+ x := v_0
+ if !(buildcfg.GOAMD64 < 3) {
+ break
+ }
+ v.reset(OpAMD64BSFL)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpCtz64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Ctz64 x)
+ // cond: buildcfg.GOAMD64 >= 3
+ // result: (TZCNTQ x)
+ for {
+ x := v_0
+ if !(buildcfg.GOAMD64 >= 3) {
+ break
+ }
+ v.reset(OpAMD64TZCNTQ)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Ctz64 <t> x)
+ // cond: buildcfg.GOAMD64 < 3
+ // result: (CMOVQEQ (Select0 <t> (BSFQ x)) (MOVQconst <t> [64]) (Select1 <types.TypeFlags> (BSFQ x)))
+ for {
+ t := v.Type
+ x := v_0
+ if !(buildcfg.GOAMD64 < 3) {
+ break
+ }
+ v.reset(OpAMD64CMOVQEQ)
+ v0 := b.NewValue0(v.Pos, OpSelect0, t)
+ v1 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags))
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVQconst, t)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v3.AddArg(v1)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpCtz64NonZero(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Ctz64NonZero x)
+ // cond: buildcfg.GOAMD64 >= 3
+ // result: (TZCNTQ x)
+ for {
+ x := v_0
+ if !(buildcfg.GOAMD64 >= 3) {
+ break
+ }
+ v.reset(OpAMD64TZCNTQ)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Ctz64NonZero x)
+ // cond: buildcfg.GOAMD64 < 3
+ // result: (Select0 (BSFQ x))
+ for {
+ x := v_0
+ if !(buildcfg.GOAMD64 < 3) {
+ break
+ }
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpAMD64BSFQ, types.NewTuple(typ.UInt64, types.TypeFlags))
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpCtz8(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Ctz8 x)
+ // result: (BSFL (BTSLconst <typ.UInt32> [ 8] x))
+ for {
+ x := v_0
+ v.reset(OpAMD64BSFL)
+ v0 := b.NewValue0(v.Pos, OpAMD64BTSLconst, typ.UInt32)
+ v0.AuxInt = int8ToAuxInt(8)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpCtz8NonZero(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Ctz8NonZero x)
+ // cond: buildcfg.GOAMD64 >= 3
+ // result: (TZCNTL x)
+ for {
+ x := v_0
+ if !(buildcfg.GOAMD64 >= 3) {
+ break
+ }
+ v.reset(OpAMD64TZCNTL)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Ctz8NonZero x)
+ // cond: buildcfg.GOAMD64 < 3
+ // result: (BSFL x)
+ for {
+ x := v_0
+ if !(buildcfg.GOAMD64 < 3) {
+ break
+ }
+ v.reset(OpAMD64BSFL)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpDiv16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div16 [a] x y)
+ // result: (Select0 (DIVW [a] x y))
+ for {
+ a := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16))
+ v0.AuxInt = boolToAuxInt(a)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpDiv16u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div16u x y)
+ // result: (Select0 (DIVWU x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16))
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpDiv32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div32 [a] x y)
+ // result: (Select0 (DIVL [a] x y))
+ for {
+ a := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32))
+ v0.AuxInt = boolToAuxInt(a)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpDiv32u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div32u x y)
+ // result: (Select0 (DIVLU x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, types.NewTuple(typ.UInt32, typ.UInt32))
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpDiv64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div64 [a] x y)
+ // result: (Select0 (DIVQ [a] x y))
+ for {
+ a := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64))
+ v0.AuxInt = boolToAuxInt(a)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpDiv64u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div64u x y)
+ // result: (Select0 (DIVQU x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, types.NewTuple(typ.UInt64, typ.UInt64))
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpDiv8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div8 x y)
+ // result: (Select0 (DIVW (SignExt8to16 x) (SignExt8to16 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16))
+ v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpDiv8u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div8u x y)
+ // result: (Select0 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16))
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpEq16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Eq16 x y)
+ // result: (SETEQ (CMPW x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETEQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpEq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Eq32 x y)
+ // result: (SETEQ (CMPL x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETEQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpEq32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Eq32F x y)
+ // result: (SETEQF (UCOMISS x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETEQF)
+ v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpEq64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Eq64 x y)
+ // result: (SETEQ (CMPQ x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETEQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpEq64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Eq64F x y)
+ // result: (SETEQF (UCOMISD x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETEQF)
+ v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpEq8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Eq8 x y)
+ // result: (SETEQ (CMPB x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETEQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpEqB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (EqB x y)
+ // result: (SETEQ (CMPB x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETEQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpEqPtr(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (EqPtr x y)
+ // result: (SETEQ (CMPQ x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETEQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpFMA(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FMA x y z)
+ // result: (VFMADD231SD z x y)
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ v.reset(OpAMD64VFMADD231SD)
+ v.AddArg3(z, x, y)
+ return true
+ }
+}
+func rewriteValueAMD64_OpFloor(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Floor x)
+ // result: (ROUNDSD [1] x)
+ for {
+ x := v_0
+ v.reset(OpAMD64ROUNDSD)
+ v.AuxInt = int8ToAuxInt(1)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueAMD64_OpGetG(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (GetG mem)
+ // cond: v.Block.Func.OwnAux.Fn.ABI() != obj.ABIInternal
+ // result: (LoweredGetG mem)
+ for {
+ mem := v_0
+ if !(v.Block.Func.OwnAux.Fn.ABI() != obj.ABIInternal) {
+ break
+ }
+ v.reset(OpAMD64LoweredGetG)
+ v.AddArg(mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpHasCPUFeature(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (HasCPUFeature {s})
+ // result: (SETNE (CMPLconst [0] (LoweredHasCPUFeature {s})))
+ for {
+ s := auxToSym(v.Aux)
+ v.reset(OpAMD64SETNE)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpAMD64LoweredHasCPUFeature, typ.UInt64)
+ v1.Aux = symToAux(s)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpIsInBounds(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (IsInBounds idx len)
+ // result: (SETB (CMPQ idx len))
+ for {
+ idx := v_0
+ len := v_1
+ v.reset(OpAMD64SETB)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
+ v0.AddArg2(idx, len)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpIsNonNil(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (IsNonNil p)
+ // result: (SETNE (TESTQ p p))
+ for {
+ p := v_0
+ v.reset(OpAMD64SETNE)
+ v0 := b.NewValue0(v.Pos, OpAMD64TESTQ, types.TypeFlags)
+ v0.AddArg2(p, p)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpIsSliceInBounds(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (IsSliceInBounds idx len)
+ // result: (SETBE (CMPQ idx len))
+ for {
+ idx := v_0
+ len := v_1
+ v.reset(OpAMD64SETBE)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
+ v0.AddArg2(idx, len)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLeq16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq16 x y)
+ // result: (SETLE (CMPW x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETLE)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLeq16U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq16U x y)
+ // result: (SETBE (CMPW x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETBE)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLeq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq32 x y)
+ // result: (SETLE (CMPL x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETLE)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLeq32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq32F x y)
+ // result: (SETGEF (UCOMISS y x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETGEF)
+ v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLeq32U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq32U x y)
+ // result: (SETBE (CMPL x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETBE)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLeq64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq64 x y)
+ // result: (SETLE (CMPQ x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETLE)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLeq64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq64F x y)
+ // result: (SETGEF (UCOMISD y x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETGEF)
+ v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLeq64U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq64U x y)
+ // result: (SETBE (CMPQ x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETBE)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLeq8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq8 x y)
+ // result: (SETLE (CMPB x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETLE)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLeq8U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq8U x y)
+ // result: (SETBE (CMPB x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETBE)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLess16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less16 x y)
+ // result: (SETL (CMPW x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETL)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLess16U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less16U x y)
+ // result: (SETB (CMPW x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETB)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLess32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less32 x y)
+ // result: (SETL (CMPL x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETL)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLess32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less32F x y)
+ // result: (SETGF (UCOMISS y x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETGF)
+ v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLess32U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less32U x y)
+ // result: (SETB (CMPL x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETB)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLess64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less64 x y)
+ // result: (SETL (CMPQ x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETL)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLess64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less64F x y)
+ // result: (SETGF (UCOMISD y x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETGF)
+ v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLess64U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less64U x y)
+ // result: (SETB (CMPQ x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETB)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLess8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less8 x y)
+ // result: (SETL (CMPB x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETL)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLess8U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less8U x y)
+ // result: (SETB (CMPB x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETB)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLoad(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Load <t> ptr mem)
+ // cond: (is64BitInt(t) || isPtr(t))
+ // result: (MOVQload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is64BitInt(t) || isPtr(t)) {
+ break
+ }
+ v.reset(OpAMD64MOVQload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is32BitInt(t)
+ // result: (MOVLload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is32BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64MOVLload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is16BitInt(t)
+ // result: (MOVWload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is16BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64MOVWload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (t.IsBoolean() || is8BitInt(t))
+ // result: (MOVBload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(t.IsBoolean() || is8BitInt(t)) {
+ break
+ }
+ v.reset(OpAMD64MOVBload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is32BitFloat(t)
+ // result: (MOVSSload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is32BitFloat(t)) {
+ break
+ }
+ v.reset(OpAMD64MOVSSload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is64BitFloat(t)
+ // result: (MOVSDload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is64BitFloat(t)) {
+ break
+ }
+ v.reset(OpAMD64MOVSDload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpLocalAddr(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (LocalAddr {sym} base _)
+ // result: (LEAQ {sym} base)
+ for {
+ sym := auxToSym(v.Aux)
+ base := v_0
+ v.reset(OpAMD64LEAQ)
+ v.Aux = symToAux(sym)
+ v.AddArg(base)
+ return true
+ }
+}
+func rewriteValueAMD64_OpLsh16x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh16x16 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
+ v2.AuxInt = int16ToAuxInt(32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Lsh16x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHLL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHLL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpLsh16x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh16x32 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Lsh16x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHLL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHLL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpLsh16x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh16x64 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Lsh16x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHLL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHLL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpLsh16x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh16x8 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
+ v2.AuxInt = int8ToAuxInt(32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Lsh16x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHLL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHLL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpLsh32x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh32x16 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
+ v2.AuxInt = int16ToAuxInt(32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Lsh32x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHLL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHLL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpLsh32x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh32x32 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Lsh32x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHLL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHLL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpLsh32x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh32x64 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Lsh32x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHLL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHLL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpLsh32x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh32x8 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
+ v2.AuxInt = int8ToAuxInt(32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Lsh32x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHLL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHLL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpLsh64x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh64x16 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64ANDQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
+ v2.AuxInt = int16ToAuxInt(64)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Lsh64x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHLQ x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHLQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpLsh64x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh64x32 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64ANDQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Lsh64x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHLQ x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHLQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpLsh64x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh64x64 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64ANDQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Lsh64x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHLQ x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHLQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpLsh64x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh64x8 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDQ (SHLQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64ANDQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHLQ, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
+ v2.AuxInt = int8ToAuxInt(64)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Lsh64x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHLQ x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHLQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpLsh8x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh8x16 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
+ v2.AuxInt = int16ToAuxInt(32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Lsh8x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHLL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHLL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpLsh8x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh8x32 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Lsh8x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHLL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHLL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpLsh8x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh8x64 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Lsh8x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHLL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHLL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpLsh8x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh8x8 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHLL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
+ v2.AuxInt = int8ToAuxInt(32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Lsh8x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHLL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHLL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpMod16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod16 [a] x y)
+ // result: (Select1 (DIVW [a] x y))
+ for {
+ a := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16))
+ v0.AuxInt = boolToAuxInt(a)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMod16u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod16u x y)
+ // result: (Select1 (DIVWU x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16))
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMod32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod32 [a] x y)
+ // result: (Select1 (DIVL [a] x y))
+ for {
+ a := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpAMD64DIVL, types.NewTuple(typ.Int32, typ.Int32))
+ v0.AuxInt = boolToAuxInt(a)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMod32u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod32u x y)
+ // result: (Select1 (DIVLU x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpAMD64DIVLU, types.NewTuple(typ.UInt32, typ.UInt32))
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMod64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod64 [a] x y)
+ // result: (Select1 (DIVQ [a] x y))
+ for {
+ a := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpAMD64DIVQ, types.NewTuple(typ.Int64, typ.Int64))
+ v0.AuxInt = boolToAuxInt(a)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMod64u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod64u x y)
+ // result: (Select1 (DIVQU x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpAMD64DIVQU, types.NewTuple(typ.UInt64, typ.UInt64))
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMod8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod8 x y)
+ // result: (Select1 (DIVW (SignExt8to16 x) (SignExt8to16 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpAMD64DIVW, types.NewTuple(typ.Int16, typ.Int16))
+ v1 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpSignExt8to16, typ.Int16)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMod8u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod8u x y)
+ // result: (Select1 (DIVWU (ZeroExt8to16 x) (ZeroExt8to16 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpAMD64DIVWU, types.NewTuple(typ.UInt16, typ.UInt16))
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to16, typ.UInt16)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpMove(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (Move [0] _ _ mem)
+ // result: mem
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ mem := v_2
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Move [1] dst src mem)
+ // result: (MOVBstore dst (MOVBload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 1 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [2] dst src mem)
+ // result: (MOVWstore dst (MOVWload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 2 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpAMD64MOVWstore)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [4] dst src mem)
+ // result: (MOVLstore dst (MOVLload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpAMD64MOVLstore)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [8] dst src mem)
+ // result: (MOVQstore dst (MOVQload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpAMD64MOVQstore)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [16] dst src mem)
+ // cond: config.useSSE
+ // result: (MOVOstore dst (MOVOload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 16 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(config.useSSE) {
+ break
+ }
+ v.reset(OpAMD64MOVOstore)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVOload, types.TypeInt128)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [16] dst src mem)
+ // cond: !config.useSSE
+ // result: (MOVQstore [8] dst (MOVQload [8] src mem) (MOVQstore dst (MOVQload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 16 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(!config.useSSE) {
+ break
+ }
+ v.reset(OpAMD64MOVQstore)
+ v.AuxInt = int32ToAuxInt(8)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
+ v0.AuxInt = int32ToAuxInt(8)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [32] dst src mem)
+ // result: (Move [16] (OffPtr <dst.Type> dst [16]) (OffPtr <src.Type> src [16]) (Move [16] dst src mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 32 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpMove)
+ v.AuxInt = int64ToAuxInt(16)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
+ v0.AuxInt = int64ToAuxInt(16)
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
+ v1.AuxInt = int64ToAuxInt(16)
+ v1.AddArg(src)
+ v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem)
+ v2.AuxInt = int64ToAuxInt(16)
+ v2.AddArg3(dst, src, mem)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+ // match: (Move [48] dst src mem)
+ // cond: config.useSSE
+ // result: (Move [32] (OffPtr <dst.Type> dst [16]) (OffPtr <src.Type> src [16]) (Move [16] dst src mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 48 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(config.useSSE) {
+ break
+ }
+ v.reset(OpMove)
+ v.AuxInt = int64ToAuxInt(32)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
+ v0.AuxInt = int64ToAuxInt(16)
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
+ v1.AuxInt = int64ToAuxInt(16)
+ v1.AddArg(src)
+ v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem)
+ v2.AuxInt = int64ToAuxInt(16)
+ v2.AddArg3(dst, src, mem)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+ // match: (Move [64] dst src mem)
+ // cond: config.useSSE
+ // result: (Move [32] (OffPtr <dst.Type> dst [32]) (OffPtr <src.Type> src [32]) (Move [32] dst src mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 64 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(config.useSSE) {
+ break
+ }
+ v.reset(OpMove)
+ v.AuxInt = int64ToAuxInt(32)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
+ v0.AuxInt = int64ToAuxInt(32)
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
+ v1.AuxInt = int64ToAuxInt(32)
+ v1.AddArg(src)
+ v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem)
+ v2.AuxInt = int64ToAuxInt(32)
+ v2.AddArg3(dst, src, mem)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+ // match: (Move [3] dst src mem)
+ // result: (MOVBstore [2] dst (MOVBload [2] src mem) (MOVWstore dst (MOVWload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 3 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(2)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVWstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [5] dst src mem)
+ // result: (MOVBstore [4] dst (MOVBload [4] src mem) (MOVLstore dst (MOVLload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 5 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(4)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(4)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [6] dst src mem)
+ // result: (MOVWstore [4] dst (MOVWload [4] src mem) (MOVLstore dst (MOVLload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 6 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpAMD64MOVWstore)
+ v.AuxInt = int32ToAuxInt(4)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
+ v0.AuxInt = int32ToAuxInt(4)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [7] dst src mem)
+ // result: (MOVLstore [3] dst (MOVLload [3] src mem) (MOVLstore dst (MOVLload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 7 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpAMD64MOVLstore)
+ v.AuxInt = int32ToAuxInt(3)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(3)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVLstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [9] dst src mem)
+ // result: (MOVBstore [8] dst (MOVBload [8] src mem) (MOVQstore dst (MOVQload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 9 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpAMD64MOVBstore)
+ v.AuxInt = int32ToAuxInt(8)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(8)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [10] dst src mem)
+ // result: (MOVWstore [8] dst (MOVWload [8] src mem) (MOVQstore dst (MOVQload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 10 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpAMD64MOVWstore)
+ v.AuxInt = int32ToAuxInt(8)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
+ v0.AuxInt = int32ToAuxInt(8)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [12] dst src mem)
+ // result: (MOVLstore [8] dst (MOVLload [8] src mem) (MOVQstore dst (MOVQload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 12 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpAMD64MOVLstore)
+ v.AuxInt = int32ToAuxInt(8)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(8)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: s == 11 || s >= 13 && s <= 15
+ // result: (MOVQstore [int32(s-8)] dst (MOVQload [int32(s-8)] src mem) (MOVQstore dst (MOVQload src mem) mem))
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(s == 11 || s >= 13 && s <= 15) {
+ break
+ }
+ v.reset(OpAMD64MOVQstore)
+ v.AuxInt = int32ToAuxInt(int32(s - 8))
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
+ v0.AuxInt = int32ToAuxInt(int32(s - 8))
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: s > 16 && s%16 != 0 && s%16 <= 8
+ // result: (Move [s-s%16] (OffPtr <dst.Type> dst [s%16]) (OffPtr <src.Type> src [s%16]) (MOVQstore dst (MOVQload src mem) mem))
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(s > 16 && s%16 != 0 && s%16 <= 8) {
+ break
+ }
+ v.reset(OpMove)
+ v.AuxInt = int64ToAuxInt(s - s%16)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
+ v0.AuxInt = int64ToAuxInt(s % 16)
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
+ v1.AuxInt = int64ToAuxInt(s % 16)
+ v1.AddArg(src)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
+ v3 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
+ v3.AddArg2(src, mem)
+ v2.AddArg3(dst, v3, mem)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: s > 16 && s%16 != 0 && s%16 > 8 && config.useSSE
+ // result: (Move [s-s%16] (OffPtr <dst.Type> dst [s%16]) (OffPtr <src.Type> src [s%16]) (MOVOstore dst (MOVOload src mem) mem))
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(s > 16 && s%16 != 0 && s%16 > 8 && config.useSSE) {
+ break
+ }
+ v.reset(OpMove)
+ v.AuxInt = int64ToAuxInt(s - s%16)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
+ v0.AuxInt = int64ToAuxInt(s % 16)
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
+ v1.AuxInt = int64ToAuxInt(s % 16)
+ v1.AddArg(src)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVOstore, types.TypeMem)
+ v3 := b.NewValue0(v.Pos, OpAMD64MOVOload, types.TypeInt128)
+ v3.AddArg2(src, mem)
+ v2.AddArg3(dst, v3, mem)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: s > 16 && s%16 != 0 && s%16 > 8 && !config.useSSE
+ // result: (Move [s-s%16] (OffPtr <dst.Type> dst [s%16]) (OffPtr <src.Type> src [s%16]) (MOVQstore [8] dst (MOVQload [8] src mem) (MOVQstore dst (MOVQload src mem) mem)))
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(s > 16 && s%16 != 0 && s%16 > 8 && !config.useSSE) {
+ break
+ }
+ v.reset(OpMove)
+ v.AuxInt = int64ToAuxInt(s - s%16)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
+ v0.AuxInt = int64ToAuxInt(s % 16)
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
+ v1.AuxInt = int64ToAuxInt(s % 16)
+ v1.AddArg(src)
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
+ v2.AuxInt = int32ToAuxInt(8)
+ v3 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
+ v3.AuxInt = int32ToAuxInt(8)
+ v3.AddArg2(src, mem)
+ v4 := b.NewValue0(v.Pos, OpAMD64MOVQstore, types.TypeMem)
+ v5 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
+ v5.AddArg2(src, mem)
+ v4.AddArg3(dst, v5, mem)
+ v2.AddArg3(dst, v3, v4)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: s > 64 && s <= 16*64 && s%16 == 0 && !config.noDuffDevice && logLargeCopy(v, s)
+ // result: (DUFFCOPY [s] dst src mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(s > 64 && s <= 16*64 && s%16 == 0 && !config.noDuffDevice && logLargeCopy(v, s)) {
+ break
+ }
+ v.reset(OpAMD64DUFFCOPY)
+ v.AuxInt = int64ToAuxInt(s)
+ v.AddArg3(dst, src, mem)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: (s > 16*64 || config.noDuffDevice) && s%8 == 0 && logLargeCopy(v, s)
+ // result: (REPMOVSQ dst src (MOVQconst [s/8]) mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !((s > 16*64 || config.noDuffDevice) && s%8 == 0 && logLargeCopy(v, s)) {
+ break
+ }
+ v.reset(OpAMD64REPMOVSQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(s / 8)
+ v.AddArg4(dst, src, v0, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpNeg32F(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neg32F x)
+ // result: (PXOR x (MOVSSconst <typ.Float32> [float32(math.Copysign(0, -1))]))
+ for {
+ x := v_0
+ v.reset(OpAMD64PXOR)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVSSconst, typ.Float32)
+ v0.AuxInt = float32ToAuxInt(float32(math.Copysign(0, -1)))
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpNeg64F(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neg64F x)
+ // result: (PXOR x (MOVSDconst <typ.Float64> [math.Copysign(0, -1)]))
+ for {
+ x := v_0
+ v.reset(OpAMD64PXOR)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVSDconst, typ.Float64)
+ v0.AuxInt = float64ToAuxInt(math.Copysign(0, -1))
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpNeq16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Neq16 x y)
+ // result: (SETNE (CMPW x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETNE)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpNeq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Neq32 x y)
+ // result: (SETNE (CMPL x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETNE)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPL, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpNeq32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Neq32F x y)
+ // result: (SETNEF (UCOMISS x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETNEF)
+ v0 := b.NewValue0(v.Pos, OpAMD64UCOMISS, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpNeq64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Neq64 x y)
+ // result: (SETNE (CMPQ x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETNE)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpNeq64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Neq64F x y)
+ // result: (SETNEF (UCOMISD x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETNEF)
+ v0 := b.NewValue0(v.Pos, OpAMD64UCOMISD, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpNeq8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Neq8 x y)
+ // result: (SETNE (CMPB x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETNE)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpNeqB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (NeqB x y)
+ // result: (SETNE (CMPB x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETNE)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPB, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpNeqPtr(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (NeqPtr x y)
+ // result: (SETNE (CMPQ x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64SETNE)
+ v0 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpNot(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Not x)
+ // result: (XORLconst [1] x)
+ for {
+ x := v_0
+ v.reset(OpAMD64XORLconst)
+ v.AuxInt = int32ToAuxInt(1)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueAMD64_OpOffPtr(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (OffPtr [off] ptr)
+ // cond: is32Bit(off)
+ // result: (ADDQconst [int32(off)] ptr)
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ ptr := v_0
+ if !(is32Bit(off)) {
+ break
+ }
+ v.reset(OpAMD64ADDQconst)
+ v.AuxInt = int32ToAuxInt(int32(off))
+ v.AddArg(ptr)
+ return true
+ }
+ // match: (OffPtr [off] ptr)
+ // result: (ADDQ (MOVQconst [off]) ptr)
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ ptr := v_0
+ v.reset(OpAMD64ADDQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(off)
+ v.AddArg2(v0, ptr)
+ return true
+ }
+}
+func rewriteValueAMD64_OpPanicBounds(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (PanicBounds [kind] x y mem)
+ // cond: boundsABI(kind) == 0
+ // result: (LoweredPanicBoundsA [kind] x y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ x := v_0
+ y := v_1
+ mem := v_2
+ if !(boundsABI(kind) == 0) {
+ break
+ }
+ v.reset(OpAMD64LoweredPanicBoundsA)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg3(x, y, mem)
+ return true
+ }
+ // match: (PanicBounds [kind] x y mem)
+ // cond: boundsABI(kind) == 1
+ // result: (LoweredPanicBoundsB [kind] x y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ x := v_0
+ y := v_1
+ mem := v_2
+ if !(boundsABI(kind) == 1) {
+ break
+ }
+ v.reset(OpAMD64LoweredPanicBoundsB)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg3(x, y, mem)
+ return true
+ }
+ // match: (PanicBounds [kind] x y mem)
+ // cond: boundsABI(kind) == 2
+ // result: (LoweredPanicBoundsC [kind] x y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ x := v_0
+ y := v_1
+ mem := v_2
+ if !(boundsABI(kind) == 2) {
+ break
+ }
+ v.reset(OpAMD64LoweredPanicBoundsC)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg3(x, y, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpPopCount16(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (PopCount16 x)
+ // result: (POPCNTL (MOVWQZX <typ.UInt32> x))
+ for {
+ x := v_0
+ v.reset(OpAMD64POPCNTL)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVWQZX, typ.UInt32)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpPopCount8(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (PopCount8 x)
+ // result: (POPCNTL (MOVBQZX <typ.UInt32> x))
+ for {
+ x := v_0
+ v.reset(OpAMD64POPCNTL)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVBQZX, typ.UInt32)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpRoundToEven(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (RoundToEven x)
+ // result: (ROUNDSD [0] x)
+ for {
+ x := v_0
+ v.reset(OpAMD64ROUNDSD)
+ v.AuxInt = int8ToAuxInt(0)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueAMD64_OpRsh16Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh16Ux16 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPWconst y [16])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
+ v2.AuxInt = int16ToAuxInt(16)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Rsh16Ux16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHRW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHRW)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpRsh16Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh16Ux32 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPLconst y [16])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(16)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Rsh16Ux32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHRW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHRW)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpRsh16Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh16Ux64 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPQconst y [16])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(16)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Rsh16Ux64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHRW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHRW)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpRsh16Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh16Ux8 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHRW <t> x y) (SBBLcarrymask <t> (CMPBconst y [16])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHRW, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
+ v2.AuxInt = int8ToAuxInt(16)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Rsh16Ux8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHRW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHRW)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpRsh16x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh16x16 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [16])))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARW)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
+ v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
+ v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
+ v3.AuxInt = int16ToAuxInt(16)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh16x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SARW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARW)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpRsh16x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh16x32 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [16])))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARW)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
+ v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
+ v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
+ v3.AuxInt = int32ToAuxInt(16)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh16x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SARW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARW)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpRsh16x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh16x64 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SARW <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [16])))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARW)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type)
+ v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type)
+ v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type)
+ v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
+ v3.AuxInt = int32ToAuxInt(16)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh16x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SARW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARW)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpRsh16x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh16x8 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SARW <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [16])))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARW)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
+ v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
+ v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
+ v3.AuxInt = int8ToAuxInt(16)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh16x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SARW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARW)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpRsh32Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh32Ux16 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPWconst y [32])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
+ v2.AuxInt = int16ToAuxInt(32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Rsh32Ux16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHRL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHRL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpRsh32Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh32Ux32 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPLconst y [32])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Rsh32Ux32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHRL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHRL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpRsh32Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh32Ux64 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPQconst y [32])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Rsh32Ux64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHRL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHRL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpRsh32Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh32Ux8 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHRL <t> x y) (SBBLcarrymask <t> (CMPBconst y [32])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHRL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
+ v2.AuxInt = int8ToAuxInt(32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Rsh32Ux8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHRL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHRL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpRsh32x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh32x16 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [32])))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARL)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
+ v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
+ v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
+ v3.AuxInt = int16ToAuxInt(32)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh32x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SARL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpRsh32x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh32x32 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [32])))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARL)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
+ v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
+ v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
+ v3.AuxInt = int32ToAuxInt(32)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh32x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SARL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpRsh32x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh32x64 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SARL <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [32])))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARL)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type)
+ v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type)
+ v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type)
+ v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
+ v3.AuxInt = int32ToAuxInt(32)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh32x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SARL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpRsh32x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh32x8 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SARL <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [32])))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARL)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
+ v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
+ v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
+ v3.AuxInt = int8ToAuxInt(32)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh32x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SARL x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARL)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpRsh64Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh64Ux16 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPWconst y [64])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64ANDQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
+ v2.AuxInt = int16ToAuxInt(64)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Rsh64Ux16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHRQ x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHRQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpRsh64Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh64Ux32 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPLconst y [64])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64ANDQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Rsh64Ux32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHRQ x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHRQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpRsh64Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh64Ux64 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPQconst y [64])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64ANDQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Rsh64Ux64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHRQ x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHRQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpRsh64Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh64Ux8 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDQ (SHRQ <t> x y) (SBBQcarrymask <t> (CMPBconst y [64])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64ANDQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHRQ, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, t)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
+ v2.AuxInt = int8ToAuxInt(64)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Rsh64Ux8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHRQ x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHRQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpRsh64x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh64x16 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [64])))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARQ)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
+ v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
+ v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
+ v3.AuxInt = int16ToAuxInt(64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh64x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SARQ x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpRsh64x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh64x32 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [64])))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARQ)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
+ v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
+ v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
+ v3.AuxInt = int32ToAuxInt(64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh64x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SARQ x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpRsh64x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh64x64 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SARQ <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [64])))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARQ)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type)
+ v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type)
+ v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type)
+ v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
+ v3.AuxInt = int32ToAuxInt(64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh64x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SARQ x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpRsh64x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh64x8 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SARQ <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [64])))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARQ)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
+ v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
+ v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
+ v3.AuxInt = int8ToAuxInt(64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh64x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SARQ x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARQ)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpRsh8Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh8Ux16 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPWconst y [8])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
+ v2.AuxInt = int16ToAuxInt(8)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Rsh8Ux16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHRB x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHRB)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpRsh8Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh8Ux32 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPLconst y [8])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(8)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Rsh8Ux32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHRB x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHRB)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpRsh8Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh8Ux64 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPQconst y [8])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(8)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Rsh8Ux64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHRB x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHRB)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpRsh8Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh8Ux8 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (ANDL (SHRB <t> x y) (SBBLcarrymask <t> (CMPBconst y [8])))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64ANDL)
+ v0 := b.NewValue0(v.Pos, OpAMD64SHRB, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, t)
+ v2 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
+ v2.AuxInt = int8ToAuxInt(8)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Rsh8Ux8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SHRB x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SHRB)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpRsh8x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh8x16 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPWconst y [8])))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARB)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
+ v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
+ v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Pos, OpAMD64CMPWconst, types.TypeFlags)
+ v3.AuxInt = int16ToAuxInt(8)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh8x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SARB x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARB)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpRsh8x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh8x32 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPLconst y [8])))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARB)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
+ v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
+ v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Pos, OpAMD64CMPLconst, types.TypeFlags)
+ v3.AuxInt = int32ToAuxInt(8)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh8x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SARB x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARB)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpRsh8x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh8x64 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SARB <t> x (ORQ <y.Type> y (NOTQ <y.Type> (SBBQcarrymask <y.Type> (CMPQconst y [8])))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARB)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpAMD64ORQ, y.Type)
+ v1 := b.NewValue0(v.Pos, OpAMD64NOTQ, y.Type)
+ v2 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, y.Type)
+ v3 := b.NewValue0(v.Pos, OpAMD64CMPQconst, types.TypeFlags)
+ v3.AuxInt = int32ToAuxInt(8)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh8x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SARB x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARB)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpRsh8x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh8x8 <t> x y)
+ // cond: !shiftIsBounded(v)
+ // result: (SARB <t> x (ORL <y.Type> y (NOTL <y.Type> (SBBLcarrymask <y.Type> (CMPBconst y [8])))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ if !(!shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARB)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpAMD64ORL, y.Type)
+ v1 := b.NewValue0(v.Pos, OpAMD64NOTL, y.Type)
+ v2 := b.NewValue0(v.Pos, OpAMD64SBBLcarrymask, y.Type)
+ v3 := b.NewValue0(v.Pos, OpAMD64CMPBconst, types.TypeFlags)
+ v3.AuxInt = int8ToAuxInt(8)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh8x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SARB x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpAMD64SARB)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpSelect0(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Select0 (Mul64uover x y))
+ // result: (Select0 <typ.UInt64> (MULQU x y))
+ for {
+ if v_0.Op != OpMul64uover {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpSelect0)
+ v.Type = typ.UInt64
+ v0 := b.NewValue0(v.Pos, OpAMD64MULQU, types.NewTuple(typ.UInt64, types.TypeFlags))
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Select0 (Mul32uover x y))
+ // result: (Select0 <typ.UInt32> (MULLU x y))
+ for {
+ if v_0.Op != OpMul32uover {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpSelect0)
+ v.Type = typ.UInt32
+ v0 := b.NewValue0(v.Pos, OpAMD64MULLU, types.NewTuple(typ.UInt32, types.TypeFlags))
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Select0 (Add64carry x y c))
+ // result: (Select0 <typ.UInt64> (ADCQ x y (Select1 <types.TypeFlags> (NEGLflags c))))
+ for {
+ if v_0.Op != OpAdd64carry {
+ break
+ }
+ c := v_0.Args[2]
+ x := v_0.Args[0]
+ y := v_0.Args[1]
+ v.reset(OpSelect0)
+ v.Type = typ.UInt64
+ v0 := b.NewValue0(v.Pos, OpAMD64ADCQ, types.NewTuple(typ.UInt64, types.TypeFlags))
+ v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags))
+ v2.AddArg(c)
+ v1.AddArg(v2)
+ v0.AddArg3(x, y, v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Select0 (Sub64borrow x y c))
+ // result: (Select0 <typ.UInt64> (SBBQ x y (Select1 <types.TypeFlags> (NEGLflags c))))
+ for {
+ if v_0.Op != OpSub64borrow {
+ break
+ }
+ c := v_0.Args[2]
+ x := v_0.Args[0]
+ y := v_0.Args[1]
+ v.reset(OpSelect0)
+ v.Type = typ.UInt64
+ v0 := b.NewValue0(v.Pos, OpAMD64SBBQ, types.NewTuple(typ.UInt64, types.TypeFlags))
+ v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags))
+ v2.AddArg(c)
+ v1.AddArg(v2)
+ v0.AddArg3(x, y, v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Select0 <t> (AddTupleFirst32 val tuple))
+ // result: (ADDL val (Select0 <t> tuple))
+ for {
+ t := v.Type
+ if v_0.Op != OpAMD64AddTupleFirst32 {
+ break
+ }
+ tuple := v_0.Args[1]
+ val := v_0.Args[0]
+ v.reset(OpAMD64ADDL)
+ v0 := b.NewValue0(v.Pos, OpSelect0, t)
+ v0.AddArg(tuple)
+ v.AddArg2(val, v0)
+ return true
+ }
+ // match: (Select0 <t> (AddTupleFirst64 val tuple))
+ // result: (ADDQ val (Select0 <t> tuple))
+ for {
+ t := v.Type
+ if v_0.Op != OpAMD64AddTupleFirst64 {
+ break
+ }
+ tuple := v_0.Args[1]
+ val := v_0.Args[0]
+ v.reset(OpAMD64ADDQ)
+ v0 := b.NewValue0(v.Pos, OpSelect0, t)
+ v0.AddArg(tuple)
+ v.AddArg2(val, v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpSelect1(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Select1 (Mul64uover x y))
+ // result: (SETO (Select1 <types.TypeFlags> (MULQU x y)))
+ for {
+ if v_0.Op != OpMul64uover {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpAMD64SETO)
+ v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpAMD64MULQU, types.NewTuple(typ.UInt64, types.TypeFlags))
+ v1.AddArg2(x, y)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Select1 (Mul32uover x y))
+ // result: (SETO (Select1 <types.TypeFlags> (MULLU x y)))
+ for {
+ if v_0.Op != OpMul32uover {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpAMD64SETO)
+ v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpAMD64MULLU, types.NewTuple(typ.UInt32, types.TypeFlags))
+ v1.AddArg2(x, y)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Select1 (Add64carry x y c))
+ // result: (NEGQ <typ.UInt64> (SBBQcarrymask <typ.UInt64> (Select1 <types.TypeFlags> (ADCQ x y (Select1 <types.TypeFlags> (NEGLflags c))))))
+ for {
+ if v_0.Op != OpAdd64carry {
+ break
+ }
+ c := v_0.Args[2]
+ x := v_0.Args[0]
+ y := v_0.Args[1]
+ v.reset(OpAMD64NEGQ)
+ v.Type = typ.UInt64
+ v0 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, typ.UInt64)
+ v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpAMD64ADCQ, types.NewTuple(typ.UInt64, types.TypeFlags))
+ v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v4 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags))
+ v4.AddArg(c)
+ v3.AddArg(v4)
+ v2.AddArg3(x, y, v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Select1 (Sub64borrow x y c))
+ // result: (NEGQ <typ.UInt64> (SBBQcarrymask <typ.UInt64> (Select1 <types.TypeFlags> (SBBQ x y (Select1 <types.TypeFlags> (NEGLflags c))))))
+ for {
+ if v_0.Op != OpSub64borrow {
+ break
+ }
+ c := v_0.Args[2]
+ x := v_0.Args[0]
+ y := v_0.Args[1]
+ v.reset(OpAMD64NEGQ)
+ v.Type = typ.UInt64
+ v0 := b.NewValue0(v.Pos, OpAMD64SBBQcarrymask, typ.UInt64)
+ v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpAMD64SBBQ, types.NewTuple(typ.UInt64, types.TypeFlags))
+ v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v4 := b.NewValue0(v.Pos, OpAMD64NEGLflags, types.NewTuple(typ.UInt32, types.TypeFlags))
+ v4.AddArg(c)
+ v3.AddArg(v4)
+ v2.AddArg3(x, y, v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Select1 (NEGLflags (MOVQconst [0])))
+ // result: (FlagEQ)
+ for {
+ if v_0.Op != OpAMD64NEGLflags {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpAMD64FlagEQ)
+ return true
+ }
+ // match: (Select1 (NEGLflags (NEGQ (SBBQcarrymask x))))
+ // result: x
+ for {
+ if v_0.Op != OpAMD64NEGLflags {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64NEGQ {
+ break
+ }
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpAMD64SBBQcarrymask {
+ break
+ }
+ x := v_0_0_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (Select1 (AddTupleFirst32 _ tuple))
+ // result: (Select1 tuple)
+ for {
+ if v_0.Op != OpAMD64AddTupleFirst32 {
+ break
+ }
+ tuple := v_0.Args[1]
+ v.reset(OpSelect1)
+ v.AddArg(tuple)
+ return true
+ }
+ // match: (Select1 (AddTupleFirst64 _ tuple))
+ // result: (Select1 tuple)
+ for {
+ if v_0.Op != OpAMD64AddTupleFirst64 {
+ break
+ }
+ tuple := v_0.Args[1]
+ v.reset(OpSelect1)
+ v.AddArg(tuple)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpSelectN(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (SelectN [0] call:(CALLstatic {sym} s1:(MOVQstoreconst _ [sc] s2:(MOVQstore _ src s3:(MOVQstore _ dst mem)))))
+ // cond: sc.Val64() >= 0 && isSameCall(sym, "runtime.memmove") && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && isInlinableMemmove(dst, src, sc.Val64(), config) && clobber(s1, s2, s3, call)
+ // result: (Move [sc.Val64()] dst src mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ call := v_0
+ if call.Op != OpAMD64CALLstatic || len(call.Args) != 1 {
+ break
+ }
+ sym := auxToCall(call.Aux)
+ s1 := call.Args[0]
+ if s1.Op != OpAMD64MOVQstoreconst {
+ break
+ }
+ sc := auxIntToValAndOff(s1.AuxInt)
+ _ = s1.Args[1]
+ s2 := s1.Args[1]
+ if s2.Op != OpAMD64MOVQstore {
+ break
+ }
+ _ = s2.Args[2]
+ src := s2.Args[1]
+ s3 := s2.Args[2]
+ if s3.Op != OpAMD64MOVQstore {
+ break
+ }
+ mem := s3.Args[2]
+ dst := s3.Args[1]
+ if !(sc.Val64() >= 0 && isSameCall(sym, "runtime.memmove") && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && isInlinableMemmove(dst, src, sc.Val64(), config) && clobber(s1, s2, s3, call)) {
+ break
+ }
+ v.reset(OpMove)
+ v.AuxInt = int64ToAuxInt(sc.Val64())
+ v.AddArg3(dst, src, mem)
+ return true
+ }
+ // match: (SelectN [0] call:(CALLstatic {sym} dst src (MOVQconst [sz]) mem))
+ // cond: sz >= 0 && isSameCall(sym, "runtime.memmove") && call.Uses == 1 && isInlinableMemmove(dst, src, sz, config) && clobber(call)
+ // result: (Move [sz] dst src mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ call := v_0
+ if call.Op != OpAMD64CALLstatic || len(call.Args) != 4 {
+ break
+ }
+ sym := auxToCall(call.Aux)
+ mem := call.Args[3]
+ dst := call.Args[0]
+ src := call.Args[1]
+ call_2 := call.Args[2]
+ if call_2.Op != OpAMD64MOVQconst {
+ break
+ }
+ sz := auxIntToInt64(call_2.AuxInt)
+ if !(sz >= 0 && isSameCall(sym, "runtime.memmove") && call.Uses == 1 && isInlinableMemmove(dst, src, sz, config) && clobber(call)) {
+ break
+ }
+ v.reset(OpMove)
+ v.AuxInt = int64ToAuxInt(sz)
+ v.AddArg3(dst, src, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpSlicemask(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Slicemask <t> x)
+ // result: (SARQconst (NEGQ <t> x) [63])
+ for {
+ t := v.Type
+ x := v_0
+ v.reset(OpAMD64SARQconst)
+ v.AuxInt = int8ToAuxInt(63)
+ v0 := b.NewValue0(v.Pos, OpAMD64NEGQ, t)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueAMD64_OpSpectreIndex(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SpectreIndex <t> x y)
+ // result: (CMOVQCC x (MOVQconst [0]) (CMPQ x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64CMOVQCC)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
+ v1.AddArg2(x, y)
+ v.AddArg3(x, v0, v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpSpectreSliceIndex(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SpectreSliceIndex <t> x y)
+ // result: (CMOVQHI x (MOVQconst [0]) (CMPQ x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAMD64CMOVQHI)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpAMD64CMPQ, types.TypeFlags)
+ v1.AddArg2(x, y)
+ v.AddArg3(x, v0, v1)
+ return true
+ }
+}
+func rewriteValueAMD64_OpStore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 8 && is64BitFloat(val.Type)
+ // result: (MOVSDstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 8 && is64BitFloat(val.Type)) {
+ break
+ }
+ v.reset(OpAMD64MOVSDstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 4 && is32BitFloat(val.Type)
+ // result: (MOVSSstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 4 && is32BitFloat(val.Type)) {
+ break
+ }
+ v.reset(OpAMD64MOVSSstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 8
+ // result: (MOVQstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 8) {
+ break
+ }
+ v.reset(OpAMD64MOVQstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 4
+ // result: (MOVLstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 4) {
+ break
+ }
+ v.reset(OpAMD64MOVLstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 2
+ // result: (MOVWstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 2) {
+ break
+ }
+ v.reset(OpAMD64MOVWstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 1
+ // result: (MOVBstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 1) {
+ break
+ }
+ v.reset(OpAMD64MOVBstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64_OpTrunc(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Trunc x)
+ // result: (ROUNDSD [3] x)
+ for {
+ x := v_0
+ v.reset(OpAMD64ROUNDSD)
+ v.AuxInt = int8ToAuxInt(3)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueAMD64_OpZero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (Zero [0] _ mem)
+ // result: mem
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ mem := v_1
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Zero [1] destptr mem)
+ // result: (MOVBstoreconst [makeValAndOff(0,0)] destptr mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 1 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpAMD64MOVBstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
+ v.AddArg2(destptr, mem)
+ return true
+ }
+ // match: (Zero [2] destptr mem)
+ // result: (MOVWstoreconst [makeValAndOff(0,0)] destptr mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 2 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpAMD64MOVWstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
+ v.AddArg2(destptr, mem)
+ return true
+ }
+ // match: (Zero [4] destptr mem)
+ // result: (MOVLstoreconst [makeValAndOff(0,0)] destptr mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpAMD64MOVLstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
+ v.AddArg2(destptr, mem)
+ return true
+ }
+ // match: (Zero [8] destptr mem)
+ // result: (MOVQstoreconst [makeValAndOff(0,0)] destptr mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpAMD64MOVQstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
+ v.AddArg2(destptr, mem)
+ return true
+ }
+ // match: (Zero [3] destptr mem)
+ // result: (MOVBstoreconst [makeValAndOff(0,2)] destptr (MOVWstoreconst [makeValAndOff(0,0)] destptr mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 3 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpAMD64MOVBstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 2))
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVWstoreconst, types.TypeMem)
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
+ v0.AddArg2(destptr, mem)
+ v.AddArg2(destptr, v0)
+ return true
+ }
+ // match: (Zero [5] destptr mem)
+ // result: (MOVBstoreconst [makeValAndOff(0,4)] destptr (MOVLstoreconst [makeValAndOff(0,0)] destptr mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 5 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpAMD64MOVBstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 4))
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, types.TypeMem)
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
+ v0.AddArg2(destptr, mem)
+ v.AddArg2(destptr, v0)
+ return true
+ }
+ // match: (Zero [6] destptr mem)
+ // result: (MOVWstoreconst [makeValAndOff(0,4)] destptr (MOVLstoreconst [makeValAndOff(0,0)] destptr mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 6 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpAMD64MOVWstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 4))
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, types.TypeMem)
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
+ v0.AddArg2(destptr, mem)
+ v.AddArg2(destptr, v0)
+ return true
+ }
+ // match: (Zero [7] destptr mem)
+ // result: (MOVLstoreconst [makeValAndOff(0,3)] destptr (MOVLstoreconst [makeValAndOff(0,0)] destptr mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 7 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpAMD64MOVLstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 3))
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLstoreconst, types.TypeMem)
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
+ v0.AddArg2(destptr, mem)
+ v.AddArg2(destptr, v0)
+ return true
+ }
+ // match: (Zero [s] destptr mem)
+ // cond: s%8 != 0 && s > 8 && !config.useSSE
+ // result: (Zero [s-s%8] (OffPtr <destptr.Type> destptr [s%8]) (MOVQstoreconst [makeValAndOff(0,0)] destptr mem))
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ destptr := v_0
+ mem := v_1
+ if !(s%8 != 0 && s > 8 && !config.useSSE) {
+ break
+ }
+ v.reset(OpZero)
+ v.AuxInt = int64ToAuxInt(s - s%8)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type)
+ v0.AuxInt = int64ToAuxInt(s % 8)
+ v0.AddArg(destptr)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
+ v1.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
+ v1.AddArg2(destptr, mem)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Zero [16] destptr mem)
+ // cond: !config.useSSE
+ // result: (MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [makeValAndOff(0,0)] destptr mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 16 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ if !(!config.useSSE) {
+ break
+ }
+ v.reset(OpAMD64MOVQstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 8))
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
+ v0.AddArg2(destptr, mem)
+ v.AddArg2(destptr, v0)
+ return true
+ }
+ // match: (Zero [24] destptr mem)
+ // cond: !config.useSSE
+ // result: (MOVQstoreconst [makeValAndOff(0,16)] destptr (MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [makeValAndOff(0,0)] destptr mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 24 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ if !(!config.useSSE) {
+ break
+ }
+ v.reset(OpAMD64MOVQstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 16))
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 8))
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
+ v1.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
+ v1.AddArg2(destptr, mem)
+ v0.AddArg2(destptr, v1)
+ v.AddArg2(destptr, v0)
+ return true
+ }
+ // match: (Zero [32] destptr mem)
+ // cond: !config.useSSE
+ // result: (MOVQstoreconst [makeValAndOff(0,24)] destptr (MOVQstoreconst [makeValAndOff(0,16)] destptr (MOVQstoreconst [makeValAndOff(0,8)] destptr (MOVQstoreconst [makeValAndOff(0,0)] destptr mem))))
+ for {
+ if auxIntToInt64(v.AuxInt) != 32 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ if !(!config.useSSE) {
+ break
+ }
+ v.reset(OpAMD64MOVQstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 24))
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 16))
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
+ v1.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 8))
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
+ v2.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
+ v2.AddArg2(destptr, mem)
+ v1.AddArg2(destptr, v2)
+ v0.AddArg2(destptr, v1)
+ v.AddArg2(destptr, v0)
+ return true
+ }
+ // match: (Zero [s] destptr mem)
+ // cond: s > 8 && s < 16 && config.useSSE
+ // result: (MOVQstoreconst [makeValAndOff(0,int32(s-8))] destptr (MOVQstoreconst [makeValAndOff(0,0)] destptr mem))
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ destptr := v_0
+ mem := v_1
+ if !(s > 8 && s < 16 && config.useSSE) {
+ break
+ }
+ v.reset(OpAMD64MOVQstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, int32(s-8)))
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQstoreconst, types.TypeMem)
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
+ v0.AddArg2(destptr, mem)
+ v.AddArg2(destptr, v0)
+ return true
+ }
+ // match: (Zero [s] destptr mem)
+ // cond: s%16 != 0 && s > 16 && s%16 > 8 && config.useSSE
+ // result: (Zero [s-s%16] (OffPtr <destptr.Type> destptr [s%16]) (MOVOstoreconst [makeValAndOff(0,0)] destptr mem))
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ destptr := v_0
+ mem := v_1
+ if !(s%16 != 0 && s > 16 && s%16 > 8 && config.useSSE) {
+ break
+ }
+ v.reset(OpZero)
+ v.AuxInt = int64ToAuxInt(s - s%16)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type)
+ v0.AuxInt = int64ToAuxInt(s % 16)
+ v0.AddArg(destptr)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVOstoreconst, types.TypeMem)
+ v1.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
+ v1.AddArg2(destptr, mem)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Zero [s] destptr mem)
+ // cond: s%16 != 0 && s > 16 && s%16 <= 8 && config.useSSE
+ // result: (Zero [s-s%16] (OffPtr <destptr.Type> destptr [s%16]) (MOVOstoreconst [makeValAndOff(0,0)] destptr mem))
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ destptr := v_0
+ mem := v_1
+ if !(s%16 != 0 && s > 16 && s%16 <= 8 && config.useSSE) {
+ break
+ }
+ v.reset(OpZero)
+ v.AuxInt = int64ToAuxInt(s - s%16)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type)
+ v0.AuxInt = int64ToAuxInt(s % 16)
+ v0.AddArg(destptr)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVOstoreconst, types.TypeMem)
+ v1.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
+ v1.AddArg2(destptr, mem)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Zero [16] destptr mem)
+ // cond: config.useSSE
+ // result: (MOVOstoreconst [makeValAndOff(0,0)] destptr mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 16 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ if !(config.useSSE) {
+ break
+ }
+ v.reset(OpAMD64MOVOstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
+ v.AddArg2(destptr, mem)
+ return true
+ }
+ // match: (Zero [32] destptr mem)
+ // cond: config.useSSE
+ // result: (MOVOstoreconst [makeValAndOff(0,16)] destptr (MOVOstoreconst [makeValAndOff(0,0)] destptr mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 32 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ if !(config.useSSE) {
+ break
+ }
+ v.reset(OpAMD64MOVOstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 16))
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVOstoreconst, types.TypeMem)
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
+ v0.AddArg2(destptr, mem)
+ v.AddArg2(destptr, v0)
+ return true
+ }
+ // match: (Zero [48] destptr mem)
+ // cond: config.useSSE
+ // result: (MOVOstoreconst [makeValAndOff(0,32)] destptr (MOVOstoreconst [makeValAndOff(0,16)] destptr (MOVOstoreconst [makeValAndOff(0,0)] destptr mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 48 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ if !(config.useSSE) {
+ break
+ }
+ v.reset(OpAMD64MOVOstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 32))
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVOstoreconst, types.TypeMem)
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 16))
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVOstoreconst, types.TypeMem)
+ v1.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
+ v1.AddArg2(destptr, mem)
+ v0.AddArg2(destptr, v1)
+ v.AddArg2(destptr, v0)
+ return true
+ }
+ // match: (Zero [64] destptr mem)
+ // cond: config.useSSE
+ // result: (MOVOstoreconst [makeValAndOff(0,48)] destptr (MOVOstoreconst [makeValAndOff(0,32)] destptr (MOVOstoreconst [makeValAndOff(0,16)] destptr (MOVOstoreconst [makeValAndOff(0,0)] destptr mem))))
+ for {
+ if auxIntToInt64(v.AuxInt) != 64 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ if !(config.useSSE) {
+ break
+ }
+ v.reset(OpAMD64MOVOstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 48))
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVOstoreconst, types.TypeMem)
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 32))
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVOstoreconst, types.TypeMem)
+ v1.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 16))
+ v2 := b.NewValue0(v.Pos, OpAMD64MOVOstoreconst, types.TypeMem)
+ v2.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 0))
+ v2.AddArg2(destptr, mem)
+ v1.AddArg2(destptr, v2)
+ v0.AddArg2(destptr, v1)
+ v.AddArg2(destptr, v0)
+ return true
+ }
+ // match: (Zero [s] destptr mem)
+ // cond: s > 64 && s <= 1024 && s%16 == 0 && !config.noDuffDevice
+ // result: (DUFFZERO [s] destptr mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ destptr := v_0
+ mem := v_1
+ if !(s > 64 && s <= 1024 && s%16 == 0 && !config.noDuffDevice) {
+ break
+ }
+ v.reset(OpAMD64DUFFZERO)
+ v.AuxInt = int64ToAuxInt(s)
+ v.AddArg2(destptr, mem)
+ return true
+ }
+ // match: (Zero [s] destptr mem)
+ // cond: (s > 1024 || (config.noDuffDevice && s > 64 || !config.useSSE && s > 32)) && s%8 == 0
+ // result: (REPSTOSQ destptr (MOVQconst [s/8]) (MOVQconst [0]) mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ destptr := v_0
+ mem := v_1
+ if !((s > 1024 || (config.noDuffDevice && s > 64 || !config.useSSE && s > 32)) && s%8 == 0) {
+ break
+ }
+ v.reset(OpAMD64REPSTOSQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(s / 8)
+ v1 := b.NewValue0(v.Pos, OpAMD64MOVQconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v.AddArg4(destptr, v0, v1, mem)
+ return true
+ }
+ return false
+}
+func rewriteBlockAMD64(b *Block) bool {
+ switch b.Kind {
+ case BlockAMD64EQ:
+ // match: (EQ (TESTL (SHLL (MOVLconst [1]) x) y))
+ // result: (UGE (BTL x y))
+ for b.Controls[0].Op == OpAMD64TESTL {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpAMD64SHLL {
+ continue
+ }
+ x := v_0_0.Args[1]
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0_0.AuxInt) != 1 {
+ continue
+ }
+ y := v_0_1
+ v0 := b.NewValue0(v_0.Pos, OpAMD64BTL, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockAMD64UGE, v0)
+ return true
+ }
+ break
+ }
+ // match: (EQ (TESTQ (SHLQ (MOVQconst [1]) x) y))
+ // result: (UGE (BTQ x y))
+ for b.Controls[0].Op == OpAMD64TESTQ {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpAMD64SHLQ {
+ continue
+ }
+ x := v_0_0.Args[1]
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
+ continue
+ }
+ y := v_0_1
+ v0 := b.NewValue0(v_0.Pos, OpAMD64BTQ, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockAMD64UGE, v0)
+ return true
+ }
+ break
+ }
+ // match: (EQ (TESTLconst [c] x))
+ // cond: isUint32PowerOfTwo(int64(c))
+ // result: (UGE (BTLconst [int8(log32(c))] x))
+ for b.Controls[0].Op == OpAMD64TESTLconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(isUint32PowerOfTwo(int64(c))) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(int8(log32(c)))
+ v0.AddArg(x)
+ b.resetWithControl(BlockAMD64UGE, v0)
+ return true
+ }
+ // match: (EQ (TESTQconst [c] x))
+ // cond: isUint64PowerOfTwo(int64(c))
+ // result: (UGE (BTQconst [int8(log32(c))] x))
+ for b.Controls[0].Op == OpAMD64TESTQconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(isUint64PowerOfTwo(int64(c))) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(int8(log32(c)))
+ v0.AddArg(x)
+ b.resetWithControl(BlockAMD64UGE, v0)
+ return true
+ }
+ // match: (EQ (TESTQ (MOVQconst [c]) x))
+ // cond: isUint64PowerOfTwo(c)
+ // result: (UGE (BTQconst [int8(log64(c))] x))
+ for b.Controls[0].Op == OpAMD64TESTQ {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpAMD64MOVQconst {
+ continue
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ x := v_0_1
+ if !(isUint64PowerOfTwo(c)) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(int8(log64(c)))
+ v0.AddArg(x)
+ b.resetWithControl(BlockAMD64UGE, v0)
+ return true
+ }
+ break
+ }
+ // match: (EQ (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2))
+ // cond: z1==z2
+ // result: (UGE (BTQconst [63] x))
+ for b.Controls[0].Op == OpAMD64TESTQ {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ z1 := v_0_0
+ if z1.Op != OpAMD64SHLQconst || auxIntToInt8(z1.AuxInt) != 63 {
+ continue
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
+ continue
+ }
+ x := z1_0.Args[0]
+ z2 := v_0_1
+ if !(z1 == z2) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(63)
+ v0.AddArg(x)
+ b.resetWithControl(BlockAMD64UGE, v0)
+ return true
+ }
+ break
+ }
+ // match: (EQ (TESTL z1:(SHLLconst [31] (SHRQconst [31] x)) z2))
+ // cond: z1==z2
+ // result: (UGE (BTQconst [31] x))
+ for b.Controls[0].Op == OpAMD64TESTL {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ z1 := v_0_0
+ if z1.Op != OpAMD64SHLLconst || auxIntToInt8(z1.AuxInt) != 31 {
+ continue
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 31 {
+ continue
+ }
+ x := z1_0.Args[0]
+ z2 := v_0_1
+ if !(z1 == z2) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(31)
+ v0.AddArg(x)
+ b.resetWithControl(BlockAMD64UGE, v0)
+ return true
+ }
+ break
+ }
+ // match: (EQ (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2))
+ // cond: z1==z2
+ // result: (UGE (BTQconst [0] x))
+ for b.Controls[0].Op == OpAMD64TESTQ {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ z1 := v_0_0
+ if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
+ continue
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHLQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
+ continue
+ }
+ x := z1_0.Args[0]
+ z2 := v_0_1
+ if !(z1 == z2) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(0)
+ v0.AddArg(x)
+ b.resetWithControl(BlockAMD64UGE, v0)
+ return true
+ }
+ break
+ }
+ // match: (EQ (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2))
+ // cond: z1==z2
+ // result: (UGE (BTLconst [0] x))
+ for b.Controls[0].Op == OpAMD64TESTL {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ z1 := v_0_0
+ if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
+ continue
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHLLconst || auxIntToInt8(z1_0.AuxInt) != 31 {
+ continue
+ }
+ x := z1_0.Args[0]
+ z2 := v_0_1
+ if !(z1 == z2) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(0)
+ v0.AddArg(x)
+ b.resetWithControl(BlockAMD64UGE, v0)
+ return true
+ }
+ break
+ }
+ // match: (EQ (TESTQ z1:(SHRQconst [63] x) z2))
+ // cond: z1==z2
+ // result: (UGE (BTQconst [63] x))
+ for b.Controls[0].Op == OpAMD64TESTQ {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ z1 := v_0_0
+ if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
+ continue
+ }
+ x := z1.Args[0]
+ z2 := v_0_1
+ if !(z1 == z2) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(63)
+ v0.AddArg(x)
+ b.resetWithControl(BlockAMD64UGE, v0)
+ return true
+ }
+ break
+ }
+ // match: (EQ (TESTL z1:(SHRLconst [31] x) z2))
+ // cond: z1==z2
+ // result: (UGE (BTLconst [31] x))
+ for b.Controls[0].Op == OpAMD64TESTL {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ z1 := v_0_0
+ if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
+ continue
+ }
+ x := z1.Args[0]
+ z2 := v_0_1
+ if !(z1 == z2) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(31)
+ v0.AddArg(x)
+ b.resetWithControl(BlockAMD64UGE, v0)
+ return true
+ }
+ break
+ }
+ // match: (EQ (InvertFlags cmp) yes no)
+ // result: (EQ cmp yes no)
+ for b.Controls[0].Op == OpAMD64InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockAMD64EQ, cmp)
+ return true
+ }
+ // match: (EQ (FlagEQ) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpAMD64FlagEQ {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (EQ (FlagLT_ULT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpAMD64FlagLT_ULT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (EQ (FlagLT_UGT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpAMD64FlagLT_UGT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (EQ (FlagGT_ULT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpAMD64FlagGT_ULT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (EQ (FlagGT_UGT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpAMD64FlagGT_UGT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ case BlockAMD64GE:
+ // match: (GE (InvertFlags cmp) yes no)
+ // result: (LE cmp yes no)
+ for b.Controls[0].Op == OpAMD64InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockAMD64LE, cmp)
+ return true
+ }
+ // match: (GE (FlagEQ) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpAMD64FlagEQ {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (GE (FlagLT_ULT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpAMD64FlagLT_ULT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (GE (FlagLT_UGT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpAMD64FlagLT_UGT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (GE (FlagGT_ULT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpAMD64FlagGT_ULT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (GE (FlagGT_UGT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpAMD64FlagGT_UGT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ case BlockAMD64GT:
+ // match: (GT (InvertFlags cmp) yes no)
+ // result: (LT cmp yes no)
+ for b.Controls[0].Op == OpAMD64InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockAMD64LT, cmp)
+ return true
+ }
+ // match: (GT (FlagEQ) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpAMD64FlagEQ {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (GT (FlagLT_ULT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpAMD64FlagLT_ULT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (GT (FlagLT_UGT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpAMD64FlagLT_UGT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (GT (FlagGT_ULT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpAMD64FlagGT_ULT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (GT (FlagGT_UGT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpAMD64FlagGT_UGT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ case BlockIf:
+ // match: (If (SETL cmp) yes no)
+ // result: (LT cmp yes no)
+ for b.Controls[0].Op == OpAMD64SETL {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockAMD64LT, cmp)
+ return true
+ }
+ // match: (If (SETLE cmp) yes no)
+ // result: (LE cmp yes no)
+ for b.Controls[0].Op == OpAMD64SETLE {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockAMD64LE, cmp)
+ return true
+ }
+ // match: (If (SETG cmp) yes no)
+ // result: (GT cmp yes no)
+ for b.Controls[0].Op == OpAMD64SETG {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockAMD64GT, cmp)
+ return true
+ }
+ // match: (If (SETGE cmp) yes no)
+ // result: (GE cmp yes no)
+ for b.Controls[0].Op == OpAMD64SETGE {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockAMD64GE, cmp)
+ return true
+ }
+ // match: (If (SETEQ cmp) yes no)
+ // result: (EQ cmp yes no)
+ for b.Controls[0].Op == OpAMD64SETEQ {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockAMD64EQ, cmp)
+ return true
+ }
+ // match: (If (SETNE cmp) yes no)
+ // result: (NE cmp yes no)
+ for b.Controls[0].Op == OpAMD64SETNE {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockAMD64NE, cmp)
+ return true
+ }
+ // match: (If (SETB cmp) yes no)
+ // result: (ULT cmp yes no)
+ for b.Controls[0].Op == OpAMD64SETB {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockAMD64ULT, cmp)
+ return true
+ }
+ // match: (If (SETBE cmp) yes no)
+ // result: (ULE cmp yes no)
+ for b.Controls[0].Op == OpAMD64SETBE {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockAMD64ULE, cmp)
+ return true
+ }
+ // match: (If (SETA cmp) yes no)
+ // result: (UGT cmp yes no)
+ for b.Controls[0].Op == OpAMD64SETA {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockAMD64UGT, cmp)
+ return true
+ }
+ // match: (If (SETAE cmp) yes no)
+ // result: (UGE cmp yes no)
+ for b.Controls[0].Op == OpAMD64SETAE {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockAMD64UGE, cmp)
+ return true
+ }
+ // match: (If (SETO cmp) yes no)
+ // result: (OS cmp yes no)
+ for b.Controls[0].Op == OpAMD64SETO {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockAMD64OS, cmp)
+ return true
+ }
+ // match: (If (SETGF cmp) yes no)
+ // result: (UGT cmp yes no)
+ for b.Controls[0].Op == OpAMD64SETGF {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockAMD64UGT, cmp)
+ return true
+ }
+ // match: (If (SETGEF cmp) yes no)
+ // result: (UGE cmp yes no)
+ for b.Controls[0].Op == OpAMD64SETGEF {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockAMD64UGE, cmp)
+ return true
+ }
+ // match: (If (SETEQF cmp) yes no)
+ // result: (EQF cmp yes no)
+ for b.Controls[0].Op == OpAMD64SETEQF {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockAMD64EQF, cmp)
+ return true
+ }
+ // match: (If (SETNEF cmp) yes no)
+ // result: (NEF cmp yes no)
+ for b.Controls[0].Op == OpAMD64SETNEF {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockAMD64NEF, cmp)
+ return true
+ }
+ // match: (If cond yes no)
+ // result: (NE (TESTB cond cond) yes no)
+ for {
+ cond := b.Controls[0]
+ v0 := b.NewValue0(cond.Pos, OpAMD64TESTB, types.TypeFlags)
+ v0.AddArg2(cond, cond)
+ b.resetWithControl(BlockAMD64NE, v0)
+ return true
+ }
+ case BlockAMD64LE:
+ // match: (LE (InvertFlags cmp) yes no)
+ // result: (GE cmp yes no)
+ for b.Controls[0].Op == OpAMD64InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockAMD64GE, cmp)
+ return true
+ }
+ // match: (LE (FlagEQ) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpAMD64FlagEQ {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (LE (FlagLT_ULT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpAMD64FlagLT_ULT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (LE (FlagLT_UGT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpAMD64FlagLT_UGT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (LE (FlagGT_ULT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpAMD64FlagGT_ULT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (LE (FlagGT_UGT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpAMD64FlagGT_UGT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ case BlockAMD64LT:
+ // match: (LT (InvertFlags cmp) yes no)
+ // result: (GT cmp yes no)
+ for b.Controls[0].Op == OpAMD64InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockAMD64GT, cmp)
+ return true
+ }
+ // match: (LT (FlagEQ) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpAMD64FlagEQ {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (LT (FlagLT_ULT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpAMD64FlagLT_ULT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (LT (FlagLT_UGT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpAMD64FlagLT_UGT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (LT (FlagGT_ULT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpAMD64FlagGT_ULT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (LT (FlagGT_UGT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpAMD64FlagGT_UGT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ case BlockAMD64NE:
+ // match: (NE (TESTB (SETL cmp) (SETL cmp)) yes no)
+ // result: (LT cmp yes no)
+ for b.Controls[0].Op == OpAMD64TESTB {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64SETL {
+ break
+ }
+ cmp := v_0_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpAMD64SETL || cmp != v_0_1.Args[0] {
+ break
+ }
+ b.resetWithControl(BlockAMD64LT, cmp)
+ return true
+ }
+ // match: (NE (TESTB (SETLE cmp) (SETLE cmp)) yes no)
+ // result: (LE cmp yes no)
+ for b.Controls[0].Op == OpAMD64TESTB {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64SETLE {
+ break
+ }
+ cmp := v_0_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpAMD64SETLE || cmp != v_0_1.Args[0] {
+ break
+ }
+ b.resetWithControl(BlockAMD64LE, cmp)
+ return true
+ }
+ // match: (NE (TESTB (SETG cmp) (SETG cmp)) yes no)
+ // result: (GT cmp yes no)
+ for b.Controls[0].Op == OpAMD64TESTB {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64SETG {
+ break
+ }
+ cmp := v_0_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpAMD64SETG || cmp != v_0_1.Args[0] {
+ break
+ }
+ b.resetWithControl(BlockAMD64GT, cmp)
+ return true
+ }
+ // match: (NE (TESTB (SETGE cmp) (SETGE cmp)) yes no)
+ // result: (GE cmp yes no)
+ for b.Controls[0].Op == OpAMD64TESTB {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64SETGE {
+ break
+ }
+ cmp := v_0_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpAMD64SETGE || cmp != v_0_1.Args[0] {
+ break
+ }
+ b.resetWithControl(BlockAMD64GE, cmp)
+ return true
+ }
+ // match: (NE (TESTB (SETEQ cmp) (SETEQ cmp)) yes no)
+ // result: (EQ cmp yes no)
+ for b.Controls[0].Op == OpAMD64TESTB {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64SETEQ {
+ break
+ }
+ cmp := v_0_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpAMD64SETEQ || cmp != v_0_1.Args[0] {
+ break
+ }
+ b.resetWithControl(BlockAMD64EQ, cmp)
+ return true
+ }
+ // match: (NE (TESTB (SETNE cmp) (SETNE cmp)) yes no)
+ // result: (NE cmp yes no)
+ for b.Controls[0].Op == OpAMD64TESTB {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64SETNE {
+ break
+ }
+ cmp := v_0_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpAMD64SETNE || cmp != v_0_1.Args[0] {
+ break
+ }
+ b.resetWithControl(BlockAMD64NE, cmp)
+ return true
+ }
+ // match: (NE (TESTB (SETB cmp) (SETB cmp)) yes no)
+ // result: (ULT cmp yes no)
+ for b.Controls[0].Op == OpAMD64TESTB {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64SETB {
+ break
+ }
+ cmp := v_0_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpAMD64SETB || cmp != v_0_1.Args[0] {
+ break
+ }
+ b.resetWithControl(BlockAMD64ULT, cmp)
+ return true
+ }
+ // match: (NE (TESTB (SETBE cmp) (SETBE cmp)) yes no)
+ // result: (ULE cmp yes no)
+ for b.Controls[0].Op == OpAMD64TESTB {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64SETBE {
+ break
+ }
+ cmp := v_0_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpAMD64SETBE || cmp != v_0_1.Args[0] {
+ break
+ }
+ b.resetWithControl(BlockAMD64ULE, cmp)
+ return true
+ }
+ // match: (NE (TESTB (SETA cmp) (SETA cmp)) yes no)
+ // result: (UGT cmp yes no)
+ for b.Controls[0].Op == OpAMD64TESTB {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64SETA {
+ break
+ }
+ cmp := v_0_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpAMD64SETA || cmp != v_0_1.Args[0] {
+ break
+ }
+ b.resetWithControl(BlockAMD64UGT, cmp)
+ return true
+ }
+ // match: (NE (TESTB (SETAE cmp) (SETAE cmp)) yes no)
+ // result: (UGE cmp yes no)
+ for b.Controls[0].Op == OpAMD64TESTB {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64SETAE {
+ break
+ }
+ cmp := v_0_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpAMD64SETAE || cmp != v_0_1.Args[0] {
+ break
+ }
+ b.resetWithControl(BlockAMD64UGE, cmp)
+ return true
+ }
+ // match: (NE (TESTB (SETO cmp) (SETO cmp)) yes no)
+ // result: (OS cmp yes no)
+ for b.Controls[0].Op == OpAMD64TESTB {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64SETO {
+ break
+ }
+ cmp := v_0_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpAMD64SETO || cmp != v_0_1.Args[0] {
+ break
+ }
+ b.resetWithControl(BlockAMD64OS, cmp)
+ return true
+ }
+ // match: (NE (TESTL (SHLL (MOVLconst [1]) x) y))
+ // result: (ULT (BTL x y))
+ for b.Controls[0].Op == OpAMD64TESTL {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpAMD64SHLL {
+ continue
+ }
+ x := v_0_0.Args[1]
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpAMD64MOVLconst || auxIntToInt32(v_0_0_0.AuxInt) != 1 {
+ continue
+ }
+ y := v_0_1
+ v0 := b.NewValue0(v_0.Pos, OpAMD64BTL, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockAMD64ULT, v0)
+ return true
+ }
+ break
+ }
+ // match: (NE (TESTQ (SHLQ (MOVQconst [1]) x) y))
+ // result: (ULT (BTQ x y))
+ for b.Controls[0].Op == OpAMD64TESTQ {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpAMD64SHLQ {
+ continue
+ }
+ x := v_0_0.Args[1]
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpAMD64MOVQconst || auxIntToInt64(v_0_0_0.AuxInt) != 1 {
+ continue
+ }
+ y := v_0_1
+ v0 := b.NewValue0(v_0.Pos, OpAMD64BTQ, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockAMD64ULT, v0)
+ return true
+ }
+ break
+ }
+ // match: (NE (TESTLconst [c] x))
+ // cond: isUint32PowerOfTwo(int64(c))
+ // result: (ULT (BTLconst [int8(log32(c))] x))
+ for b.Controls[0].Op == OpAMD64TESTLconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(isUint32PowerOfTwo(int64(c))) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(int8(log32(c)))
+ v0.AddArg(x)
+ b.resetWithControl(BlockAMD64ULT, v0)
+ return true
+ }
+ // match: (NE (TESTQconst [c] x))
+ // cond: isUint64PowerOfTwo(int64(c))
+ // result: (ULT (BTQconst [int8(log32(c))] x))
+ for b.Controls[0].Op == OpAMD64TESTQconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(isUint64PowerOfTwo(int64(c))) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(int8(log32(c)))
+ v0.AddArg(x)
+ b.resetWithControl(BlockAMD64ULT, v0)
+ return true
+ }
+ // match: (NE (TESTQ (MOVQconst [c]) x))
+ // cond: isUint64PowerOfTwo(c)
+ // result: (ULT (BTQconst [int8(log64(c))] x))
+ for b.Controls[0].Op == OpAMD64TESTQ {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpAMD64MOVQconst {
+ continue
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ x := v_0_1
+ if !(isUint64PowerOfTwo(c)) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(int8(log64(c)))
+ v0.AddArg(x)
+ b.resetWithControl(BlockAMD64ULT, v0)
+ return true
+ }
+ break
+ }
+ // match: (NE (TESTQ z1:(SHLQconst [63] (SHRQconst [63] x)) z2))
+ // cond: z1==z2
+ // result: (ULT (BTQconst [63] x))
+ for b.Controls[0].Op == OpAMD64TESTQ {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ z1 := v_0_0
+ if z1.Op != OpAMD64SHLQconst || auxIntToInt8(z1.AuxInt) != 63 {
+ continue
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
+ continue
+ }
+ x := z1_0.Args[0]
+ z2 := v_0_1
+ if !(z1 == z2) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(63)
+ v0.AddArg(x)
+ b.resetWithControl(BlockAMD64ULT, v0)
+ return true
+ }
+ break
+ }
+ // match: (NE (TESTL z1:(SHLLconst [31] (SHRQconst [31] x)) z2))
+ // cond: z1==z2
+ // result: (ULT (BTQconst [31] x))
+ for b.Controls[0].Op == OpAMD64TESTL {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ z1 := v_0_0
+ if z1.Op != OpAMD64SHLLconst || auxIntToInt8(z1.AuxInt) != 31 {
+ continue
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHRQconst || auxIntToInt8(z1_0.AuxInt) != 31 {
+ continue
+ }
+ x := z1_0.Args[0]
+ z2 := v_0_1
+ if !(z1 == z2) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(31)
+ v0.AddArg(x)
+ b.resetWithControl(BlockAMD64ULT, v0)
+ return true
+ }
+ break
+ }
+ // match: (NE (TESTQ z1:(SHRQconst [63] (SHLQconst [63] x)) z2))
+ // cond: z1==z2
+ // result: (ULT (BTQconst [0] x))
+ for b.Controls[0].Op == OpAMD64TESTQ {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ z1 := v_0_0
+ if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
+ continue
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHLQconst || auxIntToInt8(z1_0.AuxInt) != 63 {
+ continue
+ }
+ x := z1_0.Args[0]
+ z2 := v_0_1
+ if !(z1 == z2) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(0)
+ v0.AddArg(x)
+ b.resetWithControl(BlockAMD64ULT, v0)
+ return true
+ }
+ break
+ }
+ // match: (NE (TESTL z1:(SHRLconst [31] (SHLLconst [31] x)) z2))
+ // cond: z1==z2
+ // result: (ULT (BTLconst [0] x))
+ for b.Controls[0].Op == OpAMD64TESTL {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ z1 := v_0_0
+ if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
+ continue
+ }
+ z1_0 := z1.Args[0]
+ if z1_0.Op != OpAMD64SHLLconst || auxIntToInt8(z1_0.AuxInt) != 31 {
+ continue
+ }
+ x := z1_0.Args[0]
+ z2 := v_0_1
+ if !(z1 == z2) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(0)
+ v0.AddArg(x)
+ b.resetWithControl(BlockAMD64ULT, v0)
+ return true
+ }
+ break
+ }
+ // match: (NE (TESTQ z1:(SHRQconst [63] x) z2))
+ // cond: z1==z2
+ // result: (ULT (BTQconst [63] x))
+ for b.Controls[0].Op == OpAMD64TESTQ {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ z1 := v_0_0
+ if z1.Op != OpAMD64SHRQconst || auxIntToInt8(z1.AuxInt) != 63 {
+ continue
+ }
+ x := z1.Args[0]
+ z2 := v_0_1
+ if !(z1 == z2) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpAMD64BTQconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(63)
+ v0.AddArg(x)
+ b.resetWithControl(BlockAMD64ULT, v0)
+ return true
+ }
+ break
+ }
+ // match: (NE (TESTL z1:(SHRLconst [31] x) z2))
+ // cond: z1==z2
+ // result: (ULT (BTLconst [31] x))
+ for b.Controls[0].Op == OpAMD64TESTL {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ z1 := v_0_0
+ if z1.Op != OpAMD64SHRLconst || auxIntToInt8(z1.AuxInt) != 31 {
+ continue
+ }
+ x := z1.Args[0]
+ z2 := v_0_1
+ if !(z1 == z2) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpAMD64BTLconst, types.TypeFlags)
+ v0.AuxInt = int8ToAuxInt(31)
+ v0.AddArg(x)
+ b.resetWithControl(BlockAMD64ULT, v0)
+ return true
+ }
+ break
+ }
+ // match: (NE (TESTB (SETGF cmp) (SETGF cmp)) yes no)
+ // result: (UGT cmp yes no)
+ for b.Controls[0].Op == OpAMD64TESTB {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64SETGF {
+ break
+ }
+ cmp := v_0_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpAMD64SETGF || cmp != v_0_1.Args[0] {
+ break
+ }
+ b.resetWithControl(BlockAMD64UGT, cmp)
+ return true
+ }
+ // match: (NE (TESTB (SETGEF cmp) (SETGEF cmp)) yes no)
+ // result: (UGE cmp yes no)
+ for b.Controls[0].Op == OpAMD64TESTB {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64SETGEF {
+ break
+ }
+ cmp := v_0_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpAMD64SETGEF || cmp != v_0_1.Args[0] {
+ break
+ }
+ b.resetWithControl(BlockAMD64UGE, cmp)
+ return true
+ }
+ // match: (NE (TESTB (SETEQF cmp) (SETEQF cmp)) yes no)
+ // result: (EQF cmp yes no)
+ for b.Controls[0].Op == OpAMD64TESTB {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64SETEQF {
+ break
+ }
+ cmp := v_0_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpAMD64SETEQF || cmp != v_0_1.Args[0] {
+ break
+ }
+ b.resetWithControl(BlockAMD64EQF, cmp)
+ return true
+ }
+ // match: (NE (TESTB (SETNEF cmp) (SETNEF cmp)) yes no)
+ // result: (NEF cmp yes no)
+ for b.Controls[0].Op == OpAMD64TESTB {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAMD64SETNEF {
+ break
+ }
+ cmp := v_0_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpAMD64SETNEF || cmp != v_0_1.Args[0] {
+ break
+ }
+ b.resetWithControl(BlockAMD64NEF, cmp)
+ return true
+ }
+ // match: (NE (InvertFlags cmp) yes no)
+ // result: (NE cmp yes no)
+ for b.Controls[0].Op == OpAMD64InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockAMD64NE, cmp)
+ return true
+ }
+ // match: (NE (FlagEQ) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpAMD64FlagEQ {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (NE (FlagLT_ULT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpAMD64FlagLT_ULT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (NE (FlagLT_UGT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpAMD64FlagLT_UGT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (NE (FlagGT_ULT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpAMD64FlagGT_ULT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (NE (FlagGT_UGT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpAMD64FlagGT_UGT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ case BlockAMD64UGE:
+ // match: (UGE (TESTQ x x) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpAMD64TESTQ {
+ v_0 := b.Controls[0]
+ x := v_0.Args[1]
+ if x != v_0.Args[0] {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (UGE (TESTL x x) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpAMD64TESTL {
+ v_0 := b.Controls[0]
+ x := v_0.Args[1]
+ if x != v_0.Args[0] {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (UGE (TESTW x x) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpAMD64TESTW {
+ v_0 := b.Controls[0]
+ x := v_0.Args[1]
+ if x != v_0.Args[0] {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (UGE (TESTB x x) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpAMD64TESTB {
+ v_0 := b.Controls[0]
+ x := v_0.Args[1]
+ if x != v_0.Args[0] {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (UGE (InvertFlags cmp) yes no)
+ // result: (ULE cmp yes no)
+ for b.Controls[0].Op == OpAMD64InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockAMD64ULE, cmp)
+ return true
+ }
+ // match: (UGE (FlagEQ) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpAMD64FlagEQ {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (UGE (FlagLT_ULT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpAMD64FlagLT_ULT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (UGE (FlagLT_UGT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpAMD64FlagLT_UGT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (UGE (FlagGT_ULT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpAMD64FlagGT_ULT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (UGE (FlagGT_UGT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpAMD64FlagGT_UGT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ case BlockAMD64UGT:
+ // match: (UGT (InvertFlags cmp) yes no)
+ // result: (ULT cmp yes no)
+ for b.Controls[0].Op == OpAMD64InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockAMD64ULT, cmp)
+ return true
+ }
+ // match: (UGT (FlagEQ) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpAMD64FlagEQ {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (UGT (FlagLT_ULT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpAMD64FlagLT_ULT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (UGT (FlagLT_UGT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpAMD64FlagLT_UGT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (UGT (FlagGT_ULT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpAMD64FlagGT_ULT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (UGT (FlagGT_UGT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpAMD64FlagGT_UGT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ case BlockAMD64ULE:
+ // match: (ULE (InvertFlags cmp) yes no)
+ // result: (UGE cmp yes no)
+ for b.Controls[0].Op == OpAMD64InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockAMD64UGE, cmp)
+ return true
+ }
+ // match: (ULE (FlagEQ) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpAMD64FlagEQ {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (ULE (FlagLT_ULT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpAMD64FlagLT_ULT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (ULE (FlagLT_UGT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpAMD64FlagLT_UGT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (ULE (FlagGT_ULT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpAMD64FlagGT_ULT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (ULE (FlagGT_UGT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpAMD64FlagGT_UGT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ case BlockAMD64ULT:
+ // match: (ULT (TESTQ x x) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpAMD64TESTQ {
+ v_0 := b.Controls[0]
+ x := v_0.Args[1]
+ if x != v_0.Args[0] {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (ULT (TESTL x x) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpAMD64TESTL {
+ v_0 := b.Controls[0]
+ x := v_0.Args[1]
+ if x != v_0.Args[0] {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (ULT (TESTW x x) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpAMD64TESTW {
+ v_0 := b.Controls[0]
+ x := v_0.Args[1]
+ if x != v_0.Args[0] {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (ULT (TESTB x x) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpAMD64TESTB {
+ v_0 := b.Controls[0]
+ x := v_0.Args[1]
+ if x != v_0.Args[0] {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (ULT (InvertFlags cmp) yes no)
+ // result: (UGT cmp yes no)
+ for b.Controls[0].Op == OpAMD64InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockAMD64UGT, cmp)
+ return true
+ }
+ // match: (ULT (FlagEQ) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpAMD64FlagEQ {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (ULT (FlagLT_ULT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpAMD64FlagLT_ULT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (ULT (FlagLT_UGT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpAMD64FlagLT_UGT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (ULT (FlagGT_ULT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpAMD64FlagGT_ULT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (ULT (FlagGT_UGT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpAMD64FlagGT_UGT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ }
+ return false
+}
diff --git a/src/cmd/compile/internal/ssa/rewriteAMD64splitload.go b/src/cmd/compile/internal/ssa/rewriteAMD64splitload.go
new file mode 100644
index 0000000..ae50aaa
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/rewriteAMD64splitload.go
@@ -0,0 +1,851 @@
+// Code generated from gen/AMD64splitload.rules; DO NOT EDIT.
+// generated with: cd gen; go run *.go
+
+package ssa
+
+func rewriteValueAMD64splitload(v *Value) bool {
+ switch v.Op {
+ case OpAMD64CMPBconstload:
+ return rewriteValueAMD64splitload_OpAMD64CMPBconstload(v)
+ case OpAMD64CMPBconstloadidx1:
+ return rewriteValueAMD64splitload_OpAMD64CMPBconstloadidx1(v)
+ case OpAMD64CMPBload:
+ return rewriteValueAMD64splitload_OpAMD64CMPBload(v)
+ case OpAMD64CMPBloadidx1:
+ return rewriteValueAMD64splitload_OpAMD64CMPBloadidx1(v)
+ case OpAMD64CMPLconstload:
+ return rewriteValueAMD64splitload_OpAMD64CMPLconstload(v)
+ case OpAMD64CMPLconstloadidx1:
+ return rewriteValueAMD64splitload_OpAMD64CMPLconstloadidx1(v)
+ case OpAMD64CMPLconstloadidx4:
+ return rewriteValueAMD64splitload_OpAMD64CMPLconstloadidx4(v)
+ case OpAMD64CMPLload:
+ return rewriteValueAMD64splitload_OpAMD64CMPLload(v)
+ case OpAMD64CMPLloadidx1:
+ return rewriteValueAMD64splitload_OpAMD64CMPLloadidx1(v)
+ case OpAMD64CMPLloadidx4:
+ return rewriteValueAMD64splitload_OpAMD64CMPLloadidx4(v)
+ case OpAMD64CMPQconstload:
+ return rewriteValueAMD64splitload_OpAMD64CMPQconstload(v)
+ case OpAMD64CMPQconstloadidx1:
+ return rewriteValueAMD64splitload_OpAMD64CMPQconstloadidx1(v)
+ case OpAMD64CMPQconstloadidx8:
+ return rewriteValueAMD64splitload_OpAMD64CMPQconstloadidx8(v)
+ case OpAMD64CMPQload:
+ return rewriteValueAMD64splitload_OpAMD64CMPQload(v)
+ case OpAMD64CMPQloadidx1:
+ return rewriteValueAMD64splitload_OpAMD64CMPQloadidx1(v)
+ case OpAMD64CMPQloadidx8:
+ return rewriteValueAMD64splitload_OpAMD64CMPQloadidx8(v)
+ case OpAMD64CMPWconstload:
+ return rewriteValueAMD64splitload_OpAMD64CMPWconstload(v)
+ case OpAMD64CMPWconstloadidx1:
+ return rewriteValueAMD64splitload_OpAMD64CMPWconstloadidx1(v)
+ case OpAMD64CMPWconstloadidx2:
+ return rewriteValueAMD64splitload_OpAMD64CMPWconstloadidx2(v)
+ case OpAMD64CMPWload:
+ return rewriteValueAMD64splitload_OpAMD64CMPWload(v)
+ case OpAMD64CMPWloadidx1:
+ return rewriteValueAMD64splitload_OpAMD64CMPWloadidx1(v)
+ case OpAMD64CMPWloadidx2:
+ return rewriteValueAMD64splitload_OpAMD64CMPWloadidx2(v)
+ }
+ return false
+}
+func rewriteValueAMD64splitload_OpAMD64CMPBconstload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (CMPBconstload {sym} [vo] ptr mem)
+ // cond: vo.Val() == 0
+ // result: (TESTB x:(MOVBload {sym} [vo.Off()] ptr mem) x)
+ for {
+ vo := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(vo.Val() == 0) {
+ break
+ }
+ v.reset(OpAMD64TESTB)
+ x := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8)
+ x.AuxInt = int32ToAuxInt(vo.Off())
+ x.Aux = symToAux(sym)
+ x.AddArg2(ptr, mem)
+ v.AddArg2(x, x)
+ return true
+ }
+ // match: (CMPBconstload {sym} [vo] ptr mem)
+ // cond: vo.Val() != 0
+ // result: (CMPBconst (MOVBload {sym} [vo.Off()] ptr mem) [vo.Val8()])
+ for {
+ vo := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(vo.Val() != 0) {
+ break
+ }
+ v.reset(OpAMD64CMPBconst)
+ v.AuxInt = int8ToAuxInt(vo.Val8())
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(vo.Off())
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64splitload_OpAMD64CMPBconstloadidx1(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (CMPBconstloadidx1 {sym} [vo] ptr idx mem)
+ // cond: vo.Val() == 0
+ // result: (TESTB x:(MOVBloadidx1 {sym} [vo.Off()] ptr idx mem) x)
+ for {
+ vo := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ idx := v_1
+ mem := v_2
+ if !(vo.Val() == 0) {
+ break
+ }
+ v.reset(OpAMD64TESTB)
+ x := b.NewValue0(v.Pos, OpAMD64MOVBloadidx1, typ.UInt8)
+ x.AuxInt = int32ToAuxInt(vo.Off())
+ x.Aux = symToAux(sym)
+ x.AddArg3(ptr, idx, mem)
+ v.AddArg2(x, x)
+ return true
+ }
+ // match: (CMPBconstloadidx1 {sym} [vo] ptr idx mem)
+ // cond: vo.Val() != 0
+ // result: (CMPBconst (MOVBloadidx1 {sym} [vo.Off()] ptr idx mem) [vo.Val8()])
+ for {
+ vo := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ idx := v_1
+ mem := v_2
+ if !(vo.Val() != 0) {
+ break
+ }
+ v.reset(OpAMD64CMPBconst)
+ v.AuxInt = int8ToAuxInt(vo.Val8())
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVBloadidx1, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(vo.Off())
+ v0.Aux = symToAux(sym)
+ v0.AddArg3(ptr, idx, mem)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64splitload_OpAMD64CMPBload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (CMPBload {sym} [off] ptr x mem)
+ // result: (CMPB (MOVBload {sym} [off] ptr mem) x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ x := v_1
+ mem := v_2
+ v.reset(OpAMD64CMPB)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVBload, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ v.AddArg2(v0, x)
+ return true
+ }
+}
+func rewriteValueAMD64splitload_OpAMD64CMPBloadidx1(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (CMPBloadidx1 {sym} [off] ptr idx x mem)
+ // result: (CMPB (MOVBloadidx1 {sym} [off] ptr idx mem) x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ idx := v_1
+ x := v_2
+ mem := v_3
+ v.reset(OpAMD64CMPB)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVBloadidx1, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg3(ptr, idx, mem)
+ v.AddArg2(v0, x)
+ return true
+ }
+}
+func rewriteValueAMD64splitload_OpAMD64CMPLconstload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (CMPLconstload {sym} [vo] ptr mem)
+ // cond: vo.Val() == 0
+ // result: (TESTL x:(MOVLload {sym} [vo.Off()] ptr mem) x)
+ for {
+ vo := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(vo.Val() == 0) {
+ break
+ }
+ v.reset(OpAMD64TESTL)
+ x := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
+ x.AuxInt = int32ToAuxInt(vo.Off())
+ x.Aux = symToAux(sym)
+ x.AddArg2(ptr, mem)
+ v.AddArg2(x, x)
+ return true
+ }
+ // match: (CMPLconstload {sym} [vo] ptr mem)
+ // cond: vo.Val() != 0
+ // result: (CMPLconst (MOVLload {sym} [vo.Off()] ptr mem) [vo.Val()])
+ for {
+ vo := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(vo.Val() != 0) {
+ break
+ }
+ v.reset(OpAMD64CMPLconst)
+ v.AuxInt = int32ToAuxInt(vo.Val())
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(vo.Off())
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64splitload_OpAMD64CMPLconstloadidx1(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (CMPLconstloadidx1 {sym} [vo] ptr idx mem)
+ // cond: vo.Val() == 0
+ // result: (TESTL x:(MOVLloadidx1 {sym} [vo.Off()] ptr idx mem) x)
+ for {
+ vo := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ idx := v_1
+ mem := v_2
+ if !(vo.Val() == 0) {
+ break
+ }
+ v.reset(OpAMD64TESTL)
+ x := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
+ x.AuxInt = int32ToAuxInt(vo.Off())
+ x.Aux = symToAux(sym)
+ x.AddArg3(ptr, idx, mem)
+ v.AddArg2(x, x)
+ return true
+ }
+ // match: (CMPLconstloadidx1 {sym} [vo] ptr idx mem)
+ // cond: vo.Val() != 0
+ // result: (CMPLconst (MOVLloadidx1 {sym} [vo.Off()] ptr idx mem) [vo.Val()])
+ for {
+ vo := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ idx := v_1
+ mem := v_2
+ if !(vo.Val() != 0) {
+ break
+ }
+ v.reset(OpAMD64CMPLconst)
+ v.AuxInt = int32ToAuxInt(vo.Val())
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(vo.Off())
+ v0.Aux = symToAux(sym)
+ v0.AddArg3(ptr, idx, mem)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64splitload_OpAMD64CMPLconstloadidx4(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (CMPLconstloadidx4 {sym} [vo] ptr idx mem)
+ // cond: vo.Val() == 0
+ // result: (TESTL x:(MOVLloadidx4 {sym} [vo.Off()] ptr idx mem) x)
+ for {
+ vo := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ idx := v_1
+ mem := v_2
+ if !(vo.Val() == 0) {
+ break
+ }
+ v.reset(OpAMD64TESTL)
+ x := b.NewValue0(v.Pos, OpAMD64MOVLloadidx4, typ.UInt32)
+ x.AuxInt = int32ToAuxInt(vo.Off())
+ x.Aux = symToAux(sym)
+ x.AddArg3(ptr, idx, mem)
+ v.AddArg2(x, x)
+ return true
+ }
+ // match: (CMPLconstloadidx4 {sym} [vo] ptr idx mem)
+ // cond: vo.Val() != 0
+ // result: (CMPLconst (MOVLloadidx4 {sym} [vo.Off()] ptr idx mem) [vo.Val()])
+ for {
+ vo := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ idx := v_1
+ mem := v_2
+ if !(vo.Val() != 0) {
+ break
+ }
+ v.reset(OpAMD64CMPLconst)
+ v.AuxInt = int32ToAuxInt(vo.Val())
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx4, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(vo.Off())
+ v0.Aux = symToAux(sym)
+ v0.AddArg3(ptr, idx, mem)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64splitload_OpAMD64CMPLload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (CMPLload {sym} [off] ptr x mem)
+ // result: (CMPL (MOVLload {sym} [off] ptr mem) x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ x := v_1
+ mem := v_2
+ v.reset(OpAMD64CMPL)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLload, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ v.AddArg2(v0, x)
+ return true
+ }
+}
+func rewriteValueAMD64splitload_OpAMD64CMPLloadidx1(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (CMPLloadidx1 {sym} [off] ptr idx x mem)
+ // result: (CMPL (MOVLloadidx1 {sym} [off] ptr idx mem) x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ idx := v_1
+ x := v_2
+ mem := v_3
+ v.reset(OpAMD64CMPL)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx1, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg3(ptr, idx, mem)
+ v.AddArg2(v0, x)
+ return true
+ }
+}
+func rewriteValueAMD64splitload_OpAMD64CMPLloadidx4(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (CMPLloadidx4 {sym} [off] ptr idx x mem)
+ // result: (CMPL (MOVLloadidx4 {sym} [off] ptr idx mem) x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ idx := v_1
+ x := v_2
+ mem := v_3
+ v.reset(OpAMD64CMPL)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVLloadidx4, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg3(ptr, idx, mem)
+ v.AddArg2(v0, x)
+ return true
+ }
+}
+func rewriteValueAMD64splitload_OpAMD64CMPQconstload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (CMPQconstload {sym} [vo] ptr mem)
+ // cond: vo.Val() == 0
+ // result: (TESTQ x:(MOVQload {sym} [vo.Off()] ptr mem) x)
+ for {
+ vo := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(vo.Val() == 0) {
+ break
+ }
+ v.reset(OpAMD64TESTQ)
+ x := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
+ x.AuxInt = int32ToAuxInt(vo.Off())
+ x.Aux = symToAux(sym)
+ x.AddArg2(ptr, mem)
+ v.AddArg2(x, x)
+ return true
+ }
+ // match: (CMPQconstload {sym} [vo] ptr mem)
+ // cond: vo.Val() != 0
+ // result: (CMPQconst (MOVQload {sym} [vo.Off()] ptr mem) [vo.Val()])
+ for {
+ vo := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(vo.Val() != 0) {
+ break
+ }
+ v.reset(OpAMD64CMPQconst)
+ v.AuxInt = int32ToAuxInt(vo.Val())
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
+ v0.AuxInt = int32ToAuxInt(vo.Off())
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64splitload_OpAMD64CMPQconstloadidx1(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (CMPQconstloadidx1 {sym} [vo] ptr idx mem)
+ // cond: vo.Val() == 0
+ // result: (TESTQ x:(MOVQloadidx1 {sym} [vo.Off()] ptr idx mem) x)
+ for {
+ vo := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ idx := v_1
+ mem := v_2
+ if !(vo.Val() == 0) {
+ break
+ }
+ v.reset(OpAMD64TESTQ)
+ x := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64)
+ x.AuxInt = int32ToAuxInt(vo.Off())
+ x.Aux = symToAux(sym)
+ x.AddArg3(ptr, idx, mem)
+ v.AddArg2(x, x)
+ return true
+ }
+ // match: (CMPQconstloadidx1 {sym} [vo] ptr idx mem)
+ // cond: vo.Val() != 0
+ // result: (CMPQconst (MOVQloadidx1 {sym} [vo.Off()] ptr idx mem) [vo.Val()])
+ for {
+ vo := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ idx := v_1
+ mem := v_2
+ if !(vo.Val() != 0) {
+ break
+ }
+ v.reset(OpAMD64CMPQconst)
+ v.AuxInt = int32ToAuxInt(vo.Val())
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64)
+ v0.AuxInt = int32ToAuxInt(vo.Off())
+ v0.Aux = symToAux(sym)
+ v0.AddArg3(ptr, idx, mem)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64splitload_OpAMD64CMPQconstloadidx8(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (CMPQconstloadidx8 {sym} [vo] ptr idx mem)
+ // cond: vo.Val() == 0
+ // result: (TESTQ x:(MOVQloadidx8 {sym} [vo.Off()] ptr idx mem) x)
+ for {
+ vo := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ idx := v_1
+ mem := v_2
+ if !(vo.Val() == 0) {
+ break
+ }
+ v.reset(OpAMD64TESTQ)
+ x := b.NewValue0(v.Pos, OpAMD64MOVQloadidx8, typ.UInt64)
+ x.AuxInt = int32ToAuxInt(vo.Off())
+ x.Aux = symToAux(sym)
+ x.AddArg3(ptr, idx, mem)
+ v.AddArg2(x, x)
+ return true
+ }
+ // match: (CMPQconstloadidx8 {sym} [vo] ptr idx mem)
+ // cond: vo.Val() != 0
+ // result: (CMPQconst (MOVQloadidx8 {sym} [vo.Off()] ptr idx mem) [vo.Val()])
+ for {
+ vo := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ idx := v_1
+ mem := v_2
+ if !(vo.Val() != 0) {
+ break
+ }
+ v.reset(OpAMD64CMPQconst)
+ v.AuxInt = int32ToAuxInt(vo.Val())
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx8, typ.UInt64)
+ v0.AuxInt = int32ToAuxInt(vo.Off())
+ v0.Aux = symToAux(sym)
+ v0.AddArg3(ptr, idx, mem)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64splitload_OpAMD64CMPQload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (CMPQload {sym} [off] ptr x mem)
+ // result: (CMPQ (MOVQload {sym} [off] ptr mem) x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ x := v_1
+ mem := v_2
+ v.reset(OpAMD64CMPQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQload, typ.UInt64)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ v.AddArg2(v0, x)
+ return true
+ }
+}
+func rewriteValueAMD64splitload_OpAMD64CMPQloadidx1(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (CMPQloadidx1 {sym} [off] ptr idx x mem)
+ // result: (CMPQ (MOVQloadidx1 {sym} [off] ptr idx mem) x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ idx := v_1
+ x := v_2
+ mem := v_3
+ v.reset(OpAMD64CMPQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx1, typ.UInt64)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg3(ptr, idx, mem)
+ v.AddArg2(v0, x)
+ return true
+ }
+}
+func rewriteValueAMD64splitload_OpAMD64CMPQloadidx8(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (CMPQloadidx8 {sym} [off] ptr idx x mem)
+ // result: (CMPQ (MOVQloadidx8 {sym} [off] ptr idx mem) x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ idx := v_1
+ x := v_2
+ mem := v_3
+ v.reset(OpAMD64CMPQ)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVQloadidx8, typ.UInt64)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg3(ptr, idx, mem)
+ v.AddArg2(v0, x)
+ return true
+ }
+}
+func rewriteValueAMD64splitload_OpAMD64CMPWconstload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (CMPWconstload {sym} [vo] ptr mem)
+ // cond: vo.Val() == 0
+ // result: (TESTW x:(MOVWload {sym} [vo.Off()] ptr mem) x)
+ for {
+ vo := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(vo.Val() == 0) {
+ break
+ }
+ v.reset(OpAMD64TESTW)
+ x := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
+ x.AuxInt = int32ToAuxInt(vo.Off())
+ x.Aux = symToAux(sym)
+ x.AddArg2(ptr, mem)
+ v.AddArg2(x, x)
+ return true
+ }
+ // match: (CMPWconstload {sym} [vo] ptr mem)
+ // cond: vo.Val() != 0
+ // result: (CMPWconst (MOVWload {sym} [vo.Off()] ptr mem) [vo.Val16()])
+ for {
+ vo := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(vo.Val() != 0) {
+ break
+ }
+ v.reset(OpAMD64CMPWconst)
+ v.AuxInt = int16ToAuxInt(vo.Val16())
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
+ v0.AuxInt = int32ToAuxInt(vo.Off())
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64splitload_OpAMD64CMPWconstloadidx1(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (CMPWconstloadidx1 {sym} [vo] ptr idx mem)
+ // cond: vo.Val() == 0
+ // result: (TESTW x:(MOVWloadidx1 {sym} [vo.Off()] ptr idx mem) x)
+ for {
+ vo := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ idx := v_1
+ mem := v_2
+ if !(vo.Val() == 0) {
+ break
+ }
+ v.reset(OpAMD64TESTW)
+ x := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
+ x.AuxInt = int32ToAuxInt(vo.Off())
+ x.Aux = symToAux(sym)
+ x.AddArg3(ptr, idx, mem)
+ v.AddArg2(x, x)
+ return true
+ }
+ // match: (CMPWconstloadidx1 {sym} [vo] ptr idx mem)
+ // cond: vo.Val() != 0
+ // result: (CMPWconst (MOVWloadidx1 {sym} [vo.Off()] ptr idx mem) [vo.Val16()])
+ for {
+ vo := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ idx := v_1
+ mem := v_2
+ if !(vo.Val() != 0) {
+ break
+ }
+ v.reset(OpAMD64CMPWconst)
+ v.AuxInt = int16ToAuxInt(vo.Val16())
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
+ v0.AuxInt = int32ToAuxInt(vo.Off())
+ v0.Aux = symToAux(sym)
+ v0.AddArg3(ptr, idx, mem)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64splitload_OpAMD64CMPWconstloadidx2(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (CMPWconstloadidx2 {sym} [vo] ptr idx mem)
+ // cond: vo.Val() == 0
+ // result: (TESTW x:(MOVWloadidx2 {sym} [vo.Off()] ptr idx mem) x)
+ for {
+ vo := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ idx := v_1
+ mem := v_2
+ if !(vo.Val() == 0) {
+ break
+ }
+ v.reset(OpAMD64TESTW)
+ x := b.NewValue0(v.Pos, OpAMD64MOVWloadidx2, typ.UInt16)
+ x.AuxInt = int32ToAuxInt(vo.Off())
+ x.Aux = symToAux(sym)
+ x.AddArg3(ptr, idx, mem)
+ v.AddArg2(x, x)
+ return true
+ }
+ // match: (CMPWconstloadidx2 {sym} [vo] ptr idx mem)
+ // cond: vo.Val() != 0
+ // result: (CMPWconst (MOVWloadidx2 {sym} [vo.Off()] ptr idx mem) [vo.Val16()])
+ for {
+ vo := auxIntToValAndOff(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ idx := v_1
+ mem := v_2
+ if !(vo.Val() != 0) {
+ break
+ }
+ v.reset(OpAMD64CMPWconst)
+ v.AuxInt = int16ToAuxInt(vo.Val16())
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx2, typ.UInt16)
+ v0.AuxInt = int32ToAuxInt(vo.Off())
+ v0.Aux = symToAux(sym)
+ v0.AddArg3(ptr, idx, mem)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueAMD64splitload_OpAMD64CMPWload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (CMPWload {sym} [off] ptr x mem)
+ // result: (CMPW (MOVWload {sym} [off] ptr mem) x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ x := v_1
+ mem := v_2
+ v.reset(OpAMD64CMPW)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVWload, typ.UInt16)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ v.AddArg2(v0, x)
+ return true
+ }
+}
+func rewriteValueAMD64splitload_OpAMD64CMPWloadidx1(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (CMPWloadidx1 {sym} [off] ptr idx x mem)
+ // result: (CMPW (MOVWloadidx1 {sym} [off] ptr idx mem) x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ idx := v_1
+ x := v_2
+ mem := v_3
+ v.reset(OpAMD64CMPW)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx1, typ.UInt16)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg3(ptr, idx, mem)
+ v.AddArg2(v0, x)
+ return true
+ }
+}
+func rewriteValueAMD64splitload_OpAMD64CMPWloadidx2(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (CMPWloadidx2 {sym} [off] ptr idx x mem)
+ // result: (CMPW (MOVWloadidx2 {sym} [off] ptr idx mem) x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ idx := v_1
+ x := v_2
+ mem := v_3
+ v.reset(OpAMD64CMPW)
+ v0 := b.NewValue0(v.Pos, OpAMD64MOVWloadidx2, typ.UInt16)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg3(ptr, idx, mem)
+ v.AddArg2(v0, x)
+ return true
+ }
+}
+func rewriteBlockAMD64splitload(b *Block) bool {
+ return false
+}
diff --git a/src/cmd/compile/internal/ssa/rewriteARM.go b/src/cmd/compile/internal/ssa/rewriteARM.go
new file mode 100644
index 0000000..1b50bf9
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/rewriteARM.go
@@ -0,0 +1,21903 @@
+// Code generated from gen/ARM.rules; DO NOT EDIT.
+// generated with: cd gen; go run *.go
+
+package ssa
+
+import "internal/buildcfg"
+import "cmd/compile/internal/types"
+
+func rewriteValueARM(v *Value) bool {
+ switch v.Op {
+ case OpARMADC:
+ return rewriteValueARM_OpARMADC(v)
+ case OpARMADCconst:
+ return rewriteValueARM_OpARMADCconst(v)
+ case OpARMADCshiftLL:
+ return rewriteValueARM_OpARMADCshiftLL(v)
+ case OpARMADCshiftLLreg:
+ return rewriteValueARM_OpARMADCshiftLLreg(v)
+ case OpARMADCshiftRA:
+ return rewriteValueARM_OpARMADCshiftRA(v)
+ case OpARMADCshiftRAreg:
+ return rewriteValueARM_OpARMADCshiftRAreg(v)
+ case OpARMADCshiftRL:
+ return rewriteValueARM_OpARMADCshiftRL(v)
+ case OpARMADCshiftRLreg:
+ return rewriteValueARM_OpARMADCshiftRLreg(v)
+ case OpARMADD:
+ return rewriteValueARM_OpARMADD(v)
+ case OpARMADDD:
+ return rewriteValueARM_OpARMADDD(v)
+ case OpARMADDF:
+ return rewriteValueARM_OpARMADDF(v)
+ case OpARMADDS:
+ return rewriteValueARM_OpARMADDS(v)
+ case OpARMADDSshiftLL:
+ return rewriteValueARM_OpARMADDSshiftLL(v)
+ case OpARMADDSshiftLLreg:
+ return rewriteValueARM_OpARMADDSshiftLLreg(v)
+ case OpARMADDSshiftRA:
+ return rewriteValueARM_OpARMADDSshiftRA(v)
+ case OpARMADDSshiftRAreg:
+ return rewriteValueARM_OpARMADDSshiftRAreg(v)
+ case OpARMADDSshiftRL:
+ return rewriteValueARM_OpARMADDSshiftRL(v)
+ case OpARMADDSshiftRLreg:
+ return rewriteValueARM_OpARMADDSshiftRLreg(v)
+ case OpARMADDconst:
+ return rewriteValueARM_OpARMADDconst(v)
+ case OpARMADDshiftLL:
+ return rewriteValueARM_OpARMADDshiftLL(v)
+ case OpARMADDshiftLLreg:
+ return rewriteValueARM_OpARMADDshiftLLreg(v)
+ case OpARMADDshiftRA:
+ return rewriteValueARM_OpARMADDshiftRA(v)
+ case OpARMADDshiftRAreg:
+ return rewriteValueARM_OpARMADDshiftRAreg(v)
+ case OpARMADDshiftRL:
+ return rewriteValueARM_OpARMADDshiftRL(v)
+ case OpARMADDshiftRLreg:
+ return rewriteValueARM_OpARMADDshiftRLreg(v)
+ case OpARMAND:
+ return rewriteValueARM_OpARMAND(v)
+ case OpARMANDconst:
+ return rewriteValueARM_OpARMANDconst(v)
+ case OpARMANDshiftLL:
+ return rewriteValueARM_OpARMANDshiftLL(v)
+ case OpARMANDshiftLLreg:
+ return rewriteValueARM_OpARMANDshiftLLreg(v)
+ case OpARMANDshiftRA:
+ return rewriteValueARM_OpARMANDshiftRA(v)
+ case OpARMANDshiftRAreg:
+ return rewriteValueARM_OpARMANDshiftRAreg(v)
+ case OpARMANDshiftRL:
+ return rewriteValueARM_OpARMANDshiftRL(v)
+ case OpARMANDshiftRLreg:
+ return rewriteValueARM_OpARMANDshiftRLreg(v)
+ case OpARMBFX:
+ return rewriteValueARM_OpARMBFX(v)
+ case OpARMBFXU:
+ return rewriteValueARM_OpARMBFXU(v)
+ case OpARMBIC:
+ return rewriteValueARM_OpARMBIC(v)
+ case OpARMBICconst:
+ return rewriteValueARM_OpARMBICconst(v)
+ case OpARMBICshiftLL:
+ return rewriteValueARM_OpARMBICshiftLL(v)
+ case OpARMBICshiftLLreg:
+ return rewriteValueARM_OpARMBICshiftLLreg(v)
+ case OpARMBICshiftRA:
+ return rewriteValueARM_OpARMBICshiftRA(v)
+ case OpARMBICshiftRAreg:
+ return rewriteValueARM_OpARMBICshiftRAreg(v)
+ case OpARMBICshiftRL:
+ return rewriteValueARM_OpARMBICshiftRL(v)
+ case OpARMBICshiftRLreg:
+ return rewriteValueARM_OpARMBICshiftRLreg(v)
+ case OpARMCMN:
+ return rewriteValueARM_OpARMCMN(v)
+ case OpARMCMNconst:
+ return rewriteValueARM_OpARMCMNconst(v)
+ case OpARMCMNshiftLL:
+ return rewriteValueARM_OpARMCMNshiftLL(v)
+ case OpARMCMNshiftLLreg:
+ return rewriteValueARM_OpARMCMNshiftLLreg(v)
+ case OpARMCMNshiftRA:
+ return rewriteValueARM_OpARMCMNshiftRA(v)
+ case OpARMCMNshiftRAreg:
+ return rewriteValueARM_OpARMCMNshiftRAreg(v)
+ case OpARMCMNshiftRL:
+ return rewriteValueARM_OpARMCMNshiftRL(v)
+ case OpARMCMNshiftRLreg:
+ return rewriteValueARM_OpARMCMNshiftRLreg(v)
+ case OpARMCMOVWHSconst:
+ return rewriteValueARM_OpARMCMOVWHSconst(v)
+ case OpARMCMOVWLSconst:
+ return rewriteValueARM_OpARMCMOVWLSconst(v)
+ case OpARMCMP:
+ return rewriteValueARM_OpARMCMP(v)
+ case OpARMCMPD:
+ return rewriteValueARM_OpARMCMPD(v)
+ case OpARMCMPF:
+ return rewriteValueARM_OpARMCMPF(v)
+ case OpARMCMPconst:
+ return rewriteValueARM_OpARMCMPconst(v)
+ case OpARMCMPshiftLL:
+ return rewriteValueARM_OpARMCMPshiftLL(v)
+ case OpARMCMPshiftLLreg:
+ return rewriteValueARM_OpARMCMPshiftLLreg(v)
+ case OpARMCMPshiftRA:
+ return rewriteValueARM_OpARMCMPshiftRA(v)
+ case OpARMCMPshiftRAreg:
+ return rewriteValueARM_OpARMCMPshiftRAreg(v)
+ case OpARMCMPshiftRL:
+ return rewriteValueARM_OpARMCMPshiftRL(v)
+ case OpARMCMPshiftRLreg:
+ return rewriteValueARM_OpARMCMPshiftRLreg(v)
+ case OpARMEqual:
+ return rewriteValueARM_OpARMEqual(v)
+ case OpARMGreaterEqual:
+ return rewriteValueARM_OpARMGreaterEqual(v)
+ case OpARMGreaterEqualU:
+ return rewriteValueARM_OpARMGreaterEqualU(v)
+ case OpARMGreaterThan:
+ return rewriteValueARM_OpARMGreaterThan(v)
+ case OpARMGreaterThanU:
+ return rewriteValueARM_OpARMGreaterThanU(v)
+ case OpARMLessEqual:
+ return rewriteValueARM_OpARMLessEqual(v)
+ case OpARMLessEqualU:
+ return rewriteValueARM_OpARMLessEqualU(v)
+ case OpARMLessThan:
+ return rewriteValueARM_OpARMLessThan(v)
+ case OpARMLessThanU:
+ return rewriteValueARM_OpARMLessThanU(v)
+ case OpARMMOVBUload:
+ return rewriteValueARM_OpARMMOVBUload(v)
+ case OpARMMOVBUloadidx:
+ return rewriteValueARM_OpARMMOVBUloadidx(v)
+ case OpARMMOVBUreg:
+ return rewriteValueARM_OpARMMOVBUreg(v)
+ case OpARMMOVBload:
+ return rewriteValueARM_OpARMMOVBload(v)
+ case OpARMMOVBloadidx:
+ return rewriteValueARM_OpARMMOVBloadidx(v)
+ case OpARMMOVBreg:
+ return rewriteValueARM_OpARMMOVBreg(v)
+ case OpARMMOVBstore:
+ return rewriteValueARM_OpARMMOVBstore(v)
+ case OpARMMOVBstoreidx:
+ return rewriteValueARM_OpARMMOVBstoreidx(v)
+ case OpARMMOVDload:
+ return rewriteValueARM_OpARMMOVDload(v)
+ case OpARMMOVDstore:
+ return rewriteValueARM_OpARMMOVDstore(v)
+ case OpARMMOVFload:
+ return rewriteValueARM_OpARMMOVFload(v)
+ case OpARMMOVFstore:
+ return rewriteValueARM_OpARMMOVFstore(v)
+ case OpARMMOVHUload:
+ return rewriteValueARM_OpARMMOVHUload(v)
+ case OpARMMOVHUloadidx:
+ return rewriteValueARM_OpARMMOVHUloadidx(v)
+ case OpARMMOVHUreg:
+ return rewriteValueARM_OpARMMOVHUreg(v)
+ case OpARMMOVHload:
+ return rewriteValueARM_OpARMMOVHload(v)
+ case OpARMMOVHloadidx:
+ return rewriteValueARM_OpARMMOVHloadidx(v)
+ case OpARMMOVHreg:
+ return rewriteValueARM_OpARMMOVHreg(v)
+ case OpARMMOVHstore:
+ return rewriteValueARM_OpARMMOVHstore(v)
+ case OpARMMOVHstoreidx:
+ return rewriteValueARM_OpARMMOVHstoreidx(v)
+ case OpARMMOVWload:
+ return rewriteValueARM_OpARMMOVWload(v)
+ case OpARMMOVWloadidx:
+ return rewriteValueARM_OpARMMOVWloadidx(v)
+ case OpARMMOVWloadshiftLL:
+ return rewriteValueARM_OpARMMOVWloadshiftLL(v)
+ case OpARMMOVWloadshiftRA:
+ return rewriteValueARM_OpARMMOVWloadshiftRA(v)
+ case OpARMMOVWloadshiftRL:
+ return rewriteValueARM_OpARMMOVWloadshiftRL(v)
+ case OpARMMOVWnop:
+ return rewriteValueARM_OpARMMOVWnop(v)
+ case OpARMMOVWreg:
+ return rewriteValueARM_OpARMMOVWreg(v)
+ case OpARMMOVWstore:
+ return rewriteValueARM_OpARMMOVWstore(v)
+ case OpARMMOVWstoreidx:
+ return rewriteValueARM_OpARMMOVWstoreidx(v)
+ case OpARMMOVWstoreshiftLL:
+ return rewriteValueARM_OpARMMOVWstoreshiftLL(v)
+ case OpARMMOVWstoreshiftRA:
+ return rewriteValueARM_OpARMMOVWstoreshiftRA(v)
+ case OpARMMOVWstoreshiftRL:
+ return rewriteValueARM_OpARMMOVWstoreshiftRL(v)
+ case OpARMMUL:
+ return rewriteValueARM_OpARMMUL(v)
+ case OpARMMULA:
+ return rewriteValueARM_OpARMMULA(v)
+ case OpARMMULD:
+ return rewriteValueARM_OpARMMULD(v)
+ case OpARMMULF:
+ return rewriteValueARM_OpARMMULF(v)
+ case OpARMMULS:
+ return rewriteValueARM_OpARMMULS(v)
+ case OpARMMVN:
+ return rewriteValueARM_OpARMMVN(v)
+ case OpARMMVNshiftLL:
+ return rewriteValueARM_OpARMMVNshiftLL(v)
+ case OpARMMVNshiftLLreg:
+ return rewriteValueARM_OpARMMVNshiftLLreg(v)
+ case OpARMMVNshiftRA:
+ return rewriteValueARM_OpARMMVNshiftRA(v)
+ case OpARMMVNshiftRAreg:
+ return rewriteValueARM_OpARMMVNshiftRAreg(v)
+ case OpARMMVNshiftRL:
+ return rewriteValueARM_OpARMMVNshiftRL(v)
+ case OpARMMVNshiftRLreg:
+ return rewriteValueARM_OpARMMVNshiftRLreg(v)
+ case OpARMNEGD:
+ return rewriteValueARM_OpARMNEGD(v)
+ case OpARMNEGF:
+ return rewriteValueARM_OpARMNEGF(v)
+ case OpARMNMULD:
+ return rewriteValueARM_OpARMNMULD(v)
+ case OpARMNMULF:
+ return rewriteValueARM_OpARMNMULF(v)
+ case OpARMNotEqual:
+ return rewriteValueARM_OpARMNotEqual(v)
+ case OpARMOR:
+ return rewriteValueARM_OpARMOR(v)
+ case OpARMORconst:
+ return rewriteValueARM_OpARMORconst(v)
+ case OpARMORshiftLL:
+ return rewriteValueARM_OpARMORshiftLL(v)
+ case OpARMORshiftLLreg:
+ return rewriteValueARM_OpARMORshiftLLreg(v)
+ case OpARMORshiftRA:
+ return rewriteValueARM_OpARMORshiftRA(v)
+ case OpARMORshiftRAreg:
+ return rewriteValueARM_OpARMORshiftRAreg(v)
+ case OpARMORshiftRL:
+ return rewriteValueARM_OpARMORshiftRL(v)
+ case OpARMORshiftRLreg:
+ return rewriteValueARM_OpARMORshiftRLreg(v)
+ case OpARMRSB:
+ return rewriteValueARM_OpARMRSB(v)
+ case OpARMRSBSshiftLL:
+ return rewriteValueARM_OpARMRSBSshiftLL(v)
+ case OpARMRSBSshiftLLreg:
+ return rewriteValueARM_OpARMRSBSshiftLLreg(v)
+ case OpARMRSBSshiftRA:
+ return rewriteValueARM_OpARMRSBSshiftRA(v)
+ case OpARMRSBSshiftRAreg:
+ return rewriteValueARM_OpARMRSBSshiftRAreg(v)
+ case OpARMRSBSshiftRL:
+ return rewriteValueARM_OpARMRSBSshiftRL(v)
+ case OpARMRSBSshiftRLreg:
+ return rewriteValueARM_OpARMRSBSshiftRLreg(v)
+ case OpARMRSBconst:
+ return rewriteValueARM_OpARMRSBconst(v)
+ case OpARMRSBshiftLL:
+ return rewriteValueARM_OpARMRSBshiftLL(v)
+ case OpARMRSBshiftLLreg:
+ return rewriteValueARM_OpARMRSBshiftLLreg(v)
+ case OpARMRSBshiftRA:
+ return rewriteValueARM_OpARMRSBshiftRA(v)
+ case OpARMRSBshiftRAreg:
+ return rewriteValueARM_OpARMRSBshiftRAreg(v)
+ case OpARMRSBshiftRL:
+ return rewriteValueARM_OpARMRSBshiftRL(v)
+ case OpARMRSBshiftRLreg:
+ return rewriteValueARM_OpARMRSBshiftRLreg(v)
+ case OpARMRSCconst:
+ return rewriteValueARM_OpARMRSCconst(v)
+ case OpARMRSCshiftLL:
+ return rewriteValueARM_OpARMRSCshiftLL(v)
+ case OpARMRSCshiftLLreg:
+ return rewriteValueARM_OpARMRSCshiftLLreg(v)
+ case OpARMRSCshiftRA:
+ return rewriteValueARM_OpARMRSCshiftRA(v)
+ case OpARMRSCshiftRAreg:
+ return rewriteValueARM_OpARMRSCshiftRAreg(v)
+ case OpARMRSCshiftRL:
+ return rewriteValueARM_OpARMRSCshiftRL(v)
+ case OpARMRSCshiftRLreg:
+ return rewriteValueARM_OpARMRSCshiftRLreg(v)
+ case OpARMSBC:
+ return rewriteValueARM_OpARMSBC(v)
+ case OpARMSBCconst:
+ return rewriteValueARM_OpARMSBCconst(v)
+ case OpARMSBCshiftLL:
+ return rewriteValueARM_OpARMSBCshiftLL(v)
+ case OpARMSBCshiftLLreg:
+ return rewriteValueARM_OpARMSBCshiftLLreg(v)
+ case OpARMSBCshiftRA:
+ return rewriteValueARM_OpARMSBCshiftRA(v)
+ case OpARMSBCshiftRAreg:
+ return rewriteValueARM_OpARMSBCshiftRAreg(v)
+ case OpARMSBCshiftRL:
+ return rewriteValueARM_OpARMSBCshiftRL(v)
+ case OpARMSBCshiftRLreg:
+ return rewriteValueARM_OpARMSBCshiftRLreg(v)
+ case OpARMSLL:
+ return rewriteValueARM_OpARMSLL(v)
+ case OpARMSLLconst:
+ return rewriteValueARM_OpARMSLLconst(v)
+ case OpARMSRA:
+ return rewriteValueARM_OpARMSRA(v)
+ case OpARMSRAcond:
+ return rewriteValueARM_OpARMSRAcond(v)
+ case OpARMSRAconst:
+ return rewriteValueARM_OpARMSRAconst(v)
+ case OpARMSRL:
+ return rewriteValueARM_OpARMSRL(v)
+ case OpARMSRLconst:
+ return rewriteValueARM_OpARMSRLconst(v)
+ case OpARMSRR:
+ return rewriteValueARM_OpARMSRR(v)
+ case OpARMSUB:
+ return rewriteValueARM_OpARMSUB(v)
+ case OpARMSUBD:
+ return rewriteValueARM_OpARMSUBD(v)
+ case OpARMSUBF:
+ return rewriteValueARM_OpARMSUBF(v)
+ case OpARMSUBS:
+ return rewriteValueARM_OpARMSUBS(v)
+ case OpARMSUBSshiftLL:
+ return rewriteValueARM_OpARMSUBSshiftLL(v)
+ case OpARMSUBSshiftLLreg:
+ return rewriteValueARM_OpARMSUBSshiftLLreg(v)
+ case OpARMSUBSshiftRA:
+ return rewriteValueARM_OpARMSUBSshiftRA(v)
+ case OpARMSUBSshiftRAreg:
+ return rewriteValueARM_OpARMSUBSshiftRAreg(v)
+ case OpARMSUBSshiftRL:
+ return rewriteValueARM_OpARMSUBSshiftRL(v)
+ case OpARMSUBSshiftRLreg:
+ return rewriteValueARM_OpARMSUBSshiftRLreg(v)
+ case OpARMSUBconst:
+ return rewriteValueARM_OpARMSUBconst(v)
+ case OpARMSUBshiftLL:
+ return rewriteValueARM_OpARMSUBshiftLL(v)
+ case OpARMSUBshiftLLreg:
+ return rewriteValueARM_OpARMSUBshiftLLreg(v)
+ case OpARMSUBshiftRA:
+ return rewriteValueARM_OpARMSUBshiftRA(v)
+ case OpARMSUBshiftRAreg:
+ return rewriteValueARM_OpARMSUBshiftRAreg(v)
+ case OpARMSUBshiftRL:
+ return rewriteValueARM_OpARMSUBshiftRL(v)
+ case OpARMSUBshiftRLreg:
+ return rewriteValueARM_OpARMSUBshiftRLreg(v)
+ case OpARMTEQ:
+ return rewriteValueARM_OpARMTEQ(v)
+ case OpARMTEQconst:
+ return rewriteValueARM_OpARMTEQconst(v)
+ case OpARMTEQshiftLL:
+ return rewriteValueARM_OpARMTEQshiftLL(v)
+ case OpARMTEQshiftLLreg:
+ return rewriteValueARM_OpARMTEQshiftLLreg(v)
+ case OpARMTEQshiftRA:
+ return rewriteValueARM_OpARMTEQshiftRA(v)
+ case OpARMTEQshiftRAreg:
+ return rewriteValueARM_OpARMTEQshiftRAreg(v)
+ case OpARMTEQshiftRL:
+ return rewriteValueARM_OpARMTEQshiftRL(v)
+ case OpARMTEQshiftRLreg:
+ return rewriteValueARM_OpARMTEQshiftRLreg(v)
+ case OpARMTST:
+ return rewriteValueARM_OpARMTST(v)
+ case OpARMTSTconst:
+ return rewriteValueARM_OpARMTSTconst(v)
+ case OpARMTSTshiftLL:
+ return rewriteValueARM_OpARMTSTshiftLL(v)
+ case OpARMTSTshiftLLreg:
+ return rewriteValueARM_OpARMTSTshiftLLreg(v)
+ case OpARMTSTshiftRA:
+ return rewriteValueARM_OpARMTSTshiftRA(v)
+ case OpARMTSTshiftRAreg:
+ return rewriteValueARM_OpARMTSTshiftRAreg(v)
+ case OpARMTSTshiftRL:
+ return rewriteValueARM_OpARMTSTshiftRL(v)
+ case OpARMTSTshiftRLreg:
+ return rewriteValueARM_OpARMTSTshiftRLreg(v)
+ case OpARMXOR:
+ return rewriteValueARM_OpARMXOR(v)
+ case OpARMXORconst:
+ return rewriteValueARM_OpARMXORconst(v)
+ case OpARMXORshiftLL:
+ return rewriteValueARM_OpARMXORshiftLL(v)
+ case OpARMXORshiftLLreg:
+ return rewriteValueARM_OpARMXORshiftLLreg(v)
+ case OpARMXORshiftRA:
+ return rewriteValueARM_OpARMXORshiftRA(v)
+ case OpARMXORshiftRAreg:
+ return rewriteValueARM_OpARMXORshiftRAreg(v)
+ case OpARMXORshiftRL:
+ return rewriteValueARM_OpARMXORshiftRL(v)
+ case OpARMXORshiftRLreg:
+ return rewriteValueARM_OpARMXORshiftRLreg(v)
+ case OpARMXORshiftRR:
+ return rewriteValueARM_OpARMXORshiftRR(v)
+ case OpAbs:
+ v.Op = OpARMABSD
+ return true
+ case OpAdd16:
+ v.Op = OpARMADD
+ return true
+ case OpAdd32:
+ v.Op = OpARMADD
+ return true
+ case OpAdd32F:
+ v.Op = OpARMADDF
+ return true
+ case OpAdd32carry:
+ v.Op = OpARMADDS
+ return true
+ case OpAdd32withcarry:
+ v.Op = OpARMADC
+ return true
+ case OpAdd64F:
+ v.Op = OpARMADDD
+ return true
+ case OpAdd8:
+ v.Op = OpARMADD
+ return true
+ case OpAddPtr:
+ v.Op = OpARMADD
+ return true
+ case OpAddr:
+ return rewriteValueARM_OpAddr(v)
+ case OpAnd16:
+ v.Op = OpARMAND
+ return true
+ case OpAnd32:
+ v.Op = OpARMAND
+ return true
+ case OpAnd8:
+ v.Op = OpARMAND
+ return true
+ case OpAndB:
+ v.Op = OpARMAND
+ return true
+ case OpAvg32u:
+ return rewriteValueARM_OpAvg32u(v)
+ case OpBitLen32:
+ return rewriteValueARM_OpBitLen32(v)
+ case OpBswap32:
+ return rewriteValueARM_OpBswap32(v)
+ case OpClosureCall:
+ v.Op = OpARMCALLclosure
+ return true
+ case OpCom16:
+ v.Op = OpARMMVN
+ return true
+ case OpCom32:
+ v.Op = OpARMMVN
+ return true
+ case OpCom8:
+ v.Op = OpARMMVN
+ return true
+ case OpConst16:
+ return rewriteValueARM_OpConst16(v)
+ case OpConst32:
+ return rewriteValueARM_OpConst32(v)
+ case OpConst32F:
+ return rewriteValueARM_OpConst32F(v)
+ case OpConst64F:
+ return rewriteValueARM_OpConst64F(v)
+ case OpConst8:
+ return rewriteValueARM_OpConst8(v)
+ case OpConstBool:
+ return rewriteValueARM_OpConstBool(v)
+ case OpConstNil:
+ return rewriteValueARM_OpConstNil(v)
+ case OpCtz16:
+ return rewriteValueARM_OpCtz16(v)
+ case OpCtz16NonZero:
+ v.Op = OpCtz32
+ return true
+ case OpCtz32:
+ return rewriteValueARM_OpCtz32(v)
+ case OpCtz32NonZero:
+ v.Op = OpCtz32
+ return true
+ case OpCtz8:
+ return rewriteValueARM_OpCtz8(v)
+ case OpCtz8NonZero:
+ v.Op = OpCtz32
+ return true
+ case OpCvt32Fto32:
+ v.Op = OpARMMOVFW
+ return true
+ case OpCvt32Fto32U:
+ v.Op = OpARMMOVFWU
+ return true
+ case OpCvt32Fto64F:
+ v.Op = OpARMMOVFD
+ return true
+ case OpCvt32Uto32F:
+ v.Op = OpARMMOVWUF
+ return true
+ case OpCvt32Uto64F:
+ v.Op = OpARMMOVWUD
+ return true
+ case OpCvt32to32F:
+ v.Op = OpARMMOVWF
+ return true
+ case OpCvt32to64F:
+ v.Op = OpARMMOVWD
+ return true
+ case OpCvt64Fto32:
+ v.Op = OpARMMOVDW
+ return true
+ case OpCvt64Fto32F:
+ v.Op = OpARMMOVDF
+ return true
+ case OpCvt64Fto32U:
+ v.Op = OpARMMOVDWU
+ return true
+ case OpCvtBoolToUint8:
+ v.Op = OpCopy
+ return true
+ case OpDiv16:
+ return rewriteValueARM_OpDiv16(v)
+ case OpDiv16u:
+ return rewriteValueARM_OpDiv16u(v)
+ case OpDiv32:
+ return rewriteValueARM_OpDiv32(v)
+ case OpDiv32F:
+ v.Op = OpARMDIVF
+ return true
+ case OpDiv32u:
+ return rewriteValueARM_OpDiv32u(v)
+ case OpDiv64F:
+ v.Op = OpARMDIVD
+ return true
+ case OpDiv8:
+ return rewriteValueARM_OpDiv8(v)
+ case OpDiv8u:
+ return rewriteValueARM_OpDiv8u(v)
+ case OpEq16:
+ return rewriteValueARM_OpEq16(v)
+ case OpEq32:
+ return rewriteValueARM_OpEq32(v)
+ case OpEq32F:
+ return rewriteValueARM_OpEq32F(v)
+ case OpEq64F:
+ return rewriteValueARM_OpEq64F(v)
+ case OpEq8:
+ return rewriteValueARM_OpEq8(v)
+ case OpEqB:
+ return rewriteValueARM_OpEqB(v)
+ case OpEqPtr:
+ return rewriteValueARM_OpEqPtr(v)
+ case OpFMA:
+ return rewriteValueARM_OpFMA(v)
+ case OpGetCallerPC:
+ v.Op = OpARMLoweredGetCallerPC
+ return true
+ case OpGetCallerSP:
+ v.Op = OpARMLoweredGetCallerSP
+ return true
+ case OpGetClosurePtr:
+ v.Op = OpARMLoweredGetClosurePtr
+ return true
+ case OpHmul32:
+ v.Op = OpARMHMUL
+ return true
+ case OpHmul32u:
+ v.Op = OpARMHMULU
+ return true
+ case OpInterCall:
+ v.Op = OpARMCALLinter
+ return true
+ case OpIsInBounds:
+ return rewriteValueARM_OpIsInBounds(v)
+ case OpIsNonNil:
+ return rewriteValueARM_OpIsNonNil(v)
+ case OpIsSliceInBounds:
+ return rewriteValueARM_OpIsSliceInBounds(v)
+ case OpLeq16:
+ return rewriteValueARM_OpLeq16(v)
+ case OpLeq16U:
+ return rewriteValueARM_OpLeq16U(v)
+ case OpLeq32:
+ return rewriteValueARM_OpLeq32(v)
+ case OpLeq32F:
+ return rewriteValueARM_OpLeq32F(v)
+ case OpLeq32U:
+ return rewriteValueARM_OpLeq32U(v)
+ case OpLeq64F:
+ return rewriteValueARM_OpLeq64F(v)
+ case OpLeq8:
+ return rewriteValueARM_OpLeq8(v)
+ case OpLeq8U:
+ return rewriteValueARM_OpLeq8U(v)
+ case OpLess16:
+ return rewriteValueARM_OpLess16(v)
+ case OpLess16U:
+ return rewriteValueARM_OpLess16U(v)
+ case OpLess32:
+ return rewriteValueARM_OpLess32(v)
+ case OpLess32F:
+ return rewriteValueARM_OpLess32F(v)
+ case OpLess32U:
+ return rewriteValueARM_OpLess32U(v)
+ case OpLess64F:
+ return rewriteValueARM_OpLess64F(v)
+ case OpLess8:
+ return rewriteValueARM_OpLess8(v)
+ case OpLess8U:
+ return rewriteValueARM_OpLess8U(v)
+ case OpLoad:
+ return rewriteValueARM_OpLoad(v)
+ case OpLocalAddr:
+ return rewriteValueARM_OpLocalAddr(v)
+ case OpLsh16x16:
+ return rewriteValueARM_OpLsh16x16(v)
+ case OpLsh16x32:
+ return rewriteValueARM_OpLsh16x32(v)
+ case OpLsh16x64:
+ return rewriteValueARM_OpLsh16x64(v)
+ case OpLsh16x8:
+ return rewriteValueARM_OpLsh16x8(v)
+ case OpLsh32x16:
+ return rewriteValueARM_OpLsh32x16(v)
+ case OpLsh32x32:
+ return rewriteValueARM_OpLsh32x32(v)
+ case OpLsh32x64:
+ return rewriteValueARM_OpLsh32x64(v)
+ case OpLsh32x8:
+ return rewriteValueARM_OpLsh32x8(v)
+ case OpLsh8x16:
+ return rewriteValueARM_OpLsh8x16(v)
+ case OpLsh8x32:
+ return rewriteValueARM_OpLsh8x32(v)
+ case OpLsh8x64:
+ return rewriteValueARM_OpLsh8x64(v)
+ case OpLsh8x8:
+ return rewriteValueARM_OpLsh8x8(v)
+ case OpMod16:
+ return rewriteValueARM_OpMod16(v)
+ case OpMod16u:
+ return rewriteValueARM_OpMod16u(v)
+ case OpMod32:
+ return rewriteValueARM_OpMod32(v)
+ case OpMod32u:
+ return rewriteValueARM_OpMod32u(v)
+ case OpMod8:
+ return rewriteValueARM_OpMod8(v)
+ case OpMod8u:
+ return rewriteValueARM_OpMod8u(v)
+ case OpMove:
+ return rewriteValueARM_OpMove(v)
+ case OpMul16:
+ v.Op = OpARMMUL
+ return true
+ case OpMul32:
+ v.Op = OpARMMUL
+ return true
+ case OpMul32F:
+ v.Op = OpARMMULF
+ return true
+ case OpMul32uhilo:
+ v.Op = OpARMMULLU
+ return true
+ case OpMul64F:
+ v.Op = OpARMMULD
+ return true
+ case OpMul8:
+ v.Op = OpARMMUL
+ return true
+ case OpNeg16:
+ return rewriteValueARM_OpNeg16(v)
+ case OpNeg32:
+ return rewriteValueARM_OpNeg32(v)
+ case OpNeg32F:
+ v.Op = OpARMNEGF
+ return true
+ case OpNeg64F:
+ v.Op = OpARMNEGD
+ return true
+ case OpNeg8:
+ return rewriteValueARM_OpNeg8(v)
+ case OpNeq16:
+ return rewriteValueARM_OpNeq16(v)
+ case OpNeq32:
+ return rewriteValueARM_OpNeq32(v)
+ case OpNeq32F:
+ return rewriteValueARM_OpNeq32F(v)
+ case OpNeq64F:
+ return rewriteValueARM_OpNeq64F(v)
+ case OpNeq8:
+ return rewriteValueARM_OpNeq8(v)
+ case OpNeqB:
+ v.Op = OpARMXOR
+ return true
+ case OpNeqPtr:
+ return rewriteValueARM_OpNeqPtr(v)
+ case OpNilCheck:
+ v.Op = OpARMLoweredNilCheck
+ return true
+ case OpNot:
+ return rewriteValueARM_OpNot(v)
+ case OpOffPtr:
+ return rewriteValueARM_OpOffPtr(v)
+ case OpOr16:
+ v.Op = OpARMOR
+ return true
+ case OpOr32:
+ v.Op = OpARMOR
+ return true
+ case OpOr8:
+ v.Op = OpARMOR
+ return true
+ case OpOrB:
+ v.Op = OpARMOR
+ return true
+ case OpPanicBounds:
+ return rewriteValueARM_OpPanicBounds(v)
+ case OpPanicExtend:
+ return rewriteValueARM_OpPanicExtend(v)
+ case OpRotateLeft16:
+ return rewriteValueARM_OpRotateLeft16(v)
+ case OpRotateLeft32:
+ return rewriteValueARM_OpRotateLeft32(v)
+ case OpRotateLeft8:
+ return rewriteValueARM_OpRotateLeft8(v)
+ case OpRound32F:
+ v.Op = OpCopy
+ return true
+ case OpRound64F:
+ v.Op = OpCopy
+ return true
+ case OpRsh16Ux16:
+ return rewriteValueARM_OpRsh16Ux16(v)
+ case OpRsh16Ux32:
+ return rewriteValueARM_OpRsh16Ux32(v)
+ case OpRsh16Ux64:
+ return rewriteValueARM_OpRsh16Ux64(v)
+ case OpRsh16Ux8:
+ return rewriteValueARM_OpRsh16Ux8(v)
+ case OpRsh16x16:
+ return rewriteValueARM_OpRsh16x16(v)
+ case OpRsh16x32:
+ return rewriteValueARM_OpRsh16x32(v)
+ case OpRsh16x64:
+ return rewriteValueARM_OpRsh16x64(v)
+ case OpRsh16x8:
+ return rewriteValueARM_OpRsh16x8(v)
+ case OpRsh32Ux16:
+ return rewriteValueARM_OpRsh32Ux16(v)
+ case OpRsh32Ux32:
+ return rewriteValueARM_OpRsh32Ux32(v)
+ case OpRsh32Ux64:
+ return rewriteValueARM_OpRsh32Ux64(v)
+ case OpRsh32Ux8:
+ return rewriteValueARM_OpRsh32Ux8(v)
+ case OpRsh32x16:
+ return rewriteValueARM_OpRsh32x16(v)
+ case OpRsh32x32:
+ return rewriteValueARM_OpRsh32x32(v)
+ case OpRsh32x64:
+ return rewriteValueARM_OpRsh32x64(v)
+ case OpRsh32x8:
+ return rewriteValueARM_OpRsh32x8(v)
+ case OpRsh8Ux16:
+ return rewriteValueARM_OpRsh8Ux16(v)
+ case OpRsh8Ux32:
+ return rewriteValueARM_OpRsh8Ux32(v)
+ case OpRsh8Ux64:
+ return rewriteValueARM_OpRsh8Ux64(v)
+ case OpRsh8Ux8:
+ return rewriteValueARM_OpRsh8Ux8(v)
+ case OpRsh8x16:
+ return rewriteValueARM_OpRsh8x16(v)
+ case OpRsh8x32:
+ return rewriteValueARM_OpRsh8x32(v)
+ case OpRsh8x64:
+ return rewriteValueARM_OpRsh8x64(v)
+ case OpRsh8x8:
+ return rewriteValueARM_OpRsh8x8(v)
+ case OpSelect0:
+ return rewriteValueARM_OpSelect0(v)
+ case OpSelect1:
+ return rewriteValueARM_OpSelect1(v)
+ case OpSignExt16to32:
+ v.Op = OpARMMOVHreg
+ return true
+ case OpSignExt8to16:
+ v.Op = OpARMMOVBreg
+ return true
+ case OpSignExt8to32:
+ v.Op = OpARMMOVBreg
+ return true
+ case OpSignmask:
+ return rewriteValueARM_OpSignmask(v)
+ case OpSlicemask:
+ return rewriteValueARM_OpSlicemask(v)
+ case OpSqrt:
+ v.Op = OpARMSQRTD
+ return true
+ case OpSqrt32:
+ v.Op = OpARMSQRTF
+ return true
+ case OpStaticCall:
+ v.Op = OpARMCALLstatic
+ return true
+ case OpStore:
+ return rewriteValueARM_OpStore(v)
+ case OpSub16:
+ v.Op = OpARMSUB
+ return true
+ case OpSub32:
+ v.Op = OpARMSUB
+ return true
+ case OpSub32F:
+ v.Op = OpARMSUBF
+ return true
+ case OpSub32carry:
+ v.Op = OpARMSUBS
+ return true
+ case OpSub32withcarry:
+ v.Op = OpARMSBC
+ return true
+ case OpSub64F:
+ v.Op = OpARMSUBD
+ return true
+ case OpSub8:
+ v.Op = OpARMSUB
+ return true
+ case OpSubPtr:
+ v.Op = OpARMSUB
+ return true
+ case OpTailCall:
+ v.Op = OpARMCALLtail
+ return true
+ case OpTrunc16to8:
+ v.Op = OpCopy
+ return true
+ case OpTrunc32to16:
+ v.Op = OpCopy
+ return true
+ case OpTrunc32to8:
+ v.Op = OpCopy
+ return true
+ case OpWB:
+ v.Op = OpARMLoweredWB
+ return true
+ case OpXor16:
+ v.Op = OpARMXOR
+ return true
+ case OpXor32:
+ v.Op = OpARMXOR
+ return true
+ case OpXor8:
+ v.Op = OpARMXOR
+ return true
+ case OpZero:
+ return rewriteValueARM_OpZero(v)
+ case OpZeroExt16to32:
+ v.Op = OpARMMOVHUreg
+ return true
+ case OpZeroExt8to16:
+ v.Op = OpARMMOVBUreg
+ return true
+ case OpZeroExt8to32:
+ v.Op = OpARMMOVBUreg
+ return true
+ case OpZeromask:
+ return rewriteValueARM_OpZeromask(v)
+ }
+ return false
+}
+func rewriteValueARM_OpARMADC(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ADC (MOVWconst [c]) x flags)
+ // result: (ADCconst [c] x flags)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpARMMOVWconst {
+ continue
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ flags := v_2
+ v.reset(OpARMADCconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, flags)
+ return true
+ }
+ break
+ }
+ // match: (ADC x (SLLconst [c] y) flags)
+ // result: (ADCshiftLL x y [c] flags)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSLLconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ flags := v_2
+ v.reset(OpARMADCshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg3(x, y, flags)
+ return true
+ }
+ break
+ }
+ // match: (ADC x (SRLconst [c] y) flags)
+ // result: (ADCshiftRL x y [c] flags)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRLconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ flags := v_2
+ v.reset(OpARMADCshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg3(x, y, flags)
+ return true
+ }
+ break
+ }
+ // match: (ADC x (SRAconst [c] y) flags)
+ // result: (ADCshiftRA x y [c] flags)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRAconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ flags := v_2
+ v.reset(OpARMADCshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg3(x, y, flags)
+ return true
+ }
+ break
+ }
+ // match: (ADC x (SLL y z) flags)
+ // result: (ADCshiftLLreg x y z flags)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSLL {
+ continue
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ flags := v_2
+ v.reset(OpARMADCshiftLLreg)
+ v.AddArg4(x, y, z, flags)
+ return true
+ }
+ break
+ }
+ // match: (ADC x (SRL y z) flags)
+ // result: (ADCshiftRLreg x y z flags)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRL {
+ continue
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ flags := v_2
+ v.reset(OpARMADCshiftRLreg)
+ v.AddArg4(x, y, z, flags)
+ return true
+ }
+ break
+ }
+ // match: (ADC x (SRA y z) flags)
+ // result: (ADCshiftRAreg x y z flags)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRA {
+ continue
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ flags := v_2
+ v.reset(OpARMADCshiftRAreg)
+ v.AddArg4(x, y, z, flags)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueARM_OpARMADCconst(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ADCconst [c] (ADDconst [d] x) flags)
+ // result: (ADCconst [c+d] x flags)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMADDconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ flags := v_1
+ v.reset(OpARMADCconst)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.AddArg2(x, flags)
+ return true
+ }
+ // match: (ADCconst [c] (SUBconst [d] x) flags)
+ // result: (ADCconst [c-d] x flags)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMSUBconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ flags := v_1
+ v.reset(OpARMADCconst)
+ v.AuxInt = int32ToAuxInt(c - d)
+ v.AddArg2(x, flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMADCshiftLL(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ADCshiftLL (MOVWconst [c]) x [d] flags)
+ // result: (ADCconst [c] (SLLconst <x.Type> x [d]) flags)
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ flags := v_2
+ v.reset(OpARMADCconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg2(v0, flags)
+ return true
+ }
+ // match: (ADCshiftLL x (MOVWconst [c]) [d] flags)
+ // result: (ADCconst x [c<<uint64(d)] flags)
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ flags := v_2
+ v.reset(OpARMADCconst)
+ v.AuxInt = int32ToAuxInt(c << uint64(d))
+ v.AddArg2(x, flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMADCshiftLLreg(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ADCshiftLLreg (MOVWconst [c]) x y flags)
+ // result: (ADCconst [c] (SLL <x.Type> x y) flags)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ flags := v_3
+ v.reset(OpARMADCconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg2(v0, flags)
+ return true
+ }
+ // match: (ADCshiftLLreg x y (MOVWconst [c]) flags)
+ // cond: 0 <= c && c < 32
+ // result: (ADCshiftLL x y [c] flags)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ flags := v_3
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMADCshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg3(x, y, flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMADCshiftRA(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ADCshiftRA (MOVWconst [c]) x [d] flags)
+ // result: (ADCconst [c] (SRAconst <x.Type> x [d]) flags)
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ flags := v_2
+ v.reset(OpARMADCconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRAconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg2(v0, flags)
+ return true
+ }
+ // match: (ADCshiftRA x (MOVWconst [c]) [d] flags)
+ // result: (ADCconst x [c>>uint64(d)] flags)
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ flags := v_2
+ v.reset(OpARMADCconst)
+ v.AuxInt = int32ToAuxInt(c >> uint64(d))
+ v.AddArg2(x, flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMADCshiftRAreg(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ADCshiftRAreg (MOVWconst [c]) x y flags)
+ // result: (ADCconst [c] (SRA <x.Type> x y) flags)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ flags := v_3
+ v.reset(OpARMADCconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg2(v0, flags)
+ return true
+ }
+ // match: (ADCshiftRAreg x y (MOVWconst [c]) flags)
+ // cond: 0 <= c && c < 32
+ // result: (ADCshiftRA x y [c] flags)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ flags := v_3
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMADCshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg3(x, y, flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMADCshiftRL(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ADCshiftRL (MOVWconst [c]) x [d] flags)
+ // result: (ADCconst [c] (SRLconst <x.Type> x [d]) flags)
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ flags := v_2
+ v.reset(OpARMADCconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg2(v0, flags)
+ return true
+ }
+ // match: (ADCshiftRL x (MOVWconst [c]) [d] flags)
+ // result: (ADCconst x [int32(uint32(c)>>uint64(d))] flags)
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ flags := v_2
+ v.reset(OpARMADCconst)
+ v.AuxInt = int32ToAuxInt(int32(uint32(c) >> uint64(d)))
+ v.AddArg2(x, flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMADCshiftRLreg(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ADCshiftRLreg (MOVWconst [c]) x y flags)
+ // result: (ADCconst [c] (SRL <x.Type> x y) flags)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ flags := v_3
+ v.reset(OpARMADCconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg2(v0, flags)
+ return true
+ }
+ // match: (ADCshiftRLreg x y (MOVWconst [c]) flags)
+ // cond: 0 <= c && c < 32
+ // result: (ADCshiftRL x y [c] flags)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ flags := v_3
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMADCshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg3(x, y, flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMADD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ADD x (MOVWconst [c]))
+ // result: (ADDconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMADDconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ADD x (SLLconst [c] y))
+ // result: (ADDshiftLL x y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSLLconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMADDshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADD x (SRLconst [c] y))
+ // result: (ADDshiftRL x y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRLconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMADDshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADD x (SRAconst [c] y))
+ // result: (ADDshiftRA x y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRAconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMADDshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADD x (SLL y z))
+ // result: (ADDshiftLLreg x y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSLL {
+ continue
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMADDshiftLLreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ break
+ }
+ // match: (ADD x (SRL y z))
+ // result: (ADDshiftRLreg x y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRL {
+ continue
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMADDshiftRLreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ break
+ }
+ // match: (ADD x (SRA y z))
+ // result: (ADDshiftRAreg x y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRA {
+ continue
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMADDshiftRAreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ break
+ }
+ // match: (ADD x (RSBconst [0] y))
+ // result: (SUB x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMRSBconst || auxIntToInt32(v_1.AuxInt) != 0 {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(OpARMSUB)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADD <t> (RSBconst [c] x) (RSBconst [d] y))
+ // result: (RSBconst [c+d] (ADD <t> x y))
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpARMRSBconst {
+ continue
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if v_1.Op != OpARMRSBconst {
+ continue
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMRSBconst)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v0 := b.NewValue0(v.Pos, OpARMADD, t)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (ADD (MUL x y) a)
+ // result: (MULA x y a)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpARMMUL {
+ continue
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ a := v_1
+ v.reset(OpARMMULA)
+ v.AddArg3(x, y, a)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueARM_OpARMADDD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ADDD a (MULD x y))
+ // cond: a.Uses == 1 && buildcfg.GOARM >= 6
+ // result: (MULAD a x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ a := v_0
+ if v_1.Op != OpARMMULD {
+ continue
+ }
+ y := v_1.Args[1]
+ x := v_1.Args[0]
+ if !(a.Uses == 1 && buildcfg.GOARM >= 6) {
+ continue
+ }
+ v.reset(OpARMMULAD)
+ v.AddArg3(a, x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADDD a (NMULD x y))
+ // cond: a.Uses == 1 && buildcfg.GOARM >= 6
+ // result: (MULSD a x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ a := v_0
+ if v_1.Op != OpARMNMULD {
+ continue
+ }
+ y := v_1.Args[1]
+ x := v_1.Args[0]
+ if !(a.Uses == 1 && buildcfg.GOARM >= 6) {
+ continue
+ }
+ v.reset(OpARMMULSD)
+ v.AddArg3(a, x, y)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueARM_OpARMADDF(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ADDF a (MULF x y))
+ // cond: a.Uses == 1 && buildcfg.GOARM >= 6
+ // result: (MULAF a x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ a := v_0
+ if v_1.Op != OpARMMULF {
+ continue
+ }
+ y := v_1.Args[1]
+ x := v_1.Args[0]
+ if !(a.Uses == 1 && buildcfg.GOARM >= 6) {
+ continue
+ }
+ v.reset(OpARMMULAF)
+ v.AddArg3(a, x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADDF a (NMULF x y))
+ // cond: a.Uses == 1 && buildcfg.GOARM >= 6
+ // result: (MULSF a x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ a := v_0
+ if v_1.Op != OpARMNMULF {
+ continue
+ }
+ y := v_1.Args[1]
+ x := v_1.Args[0]
+ if !(a.Uses == 1 && buildcfg.GOARM >= 6) {
+ continue
+ }
+ v.reset(OpARMMULSF)
+ v.AddArg3(a, x, y)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueARM_OpARMADDS(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ADDS x (MOVWconst [c]))
+ // result: (ADDSconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMADDSconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ADDS x (SLLconst [c] y))
+ // result: (ADDSshiftLL x y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSLLconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMADDSshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADDS x (SRLconst [c] y))
+ // result: (ADDSshiftRL x y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRLconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMADDSshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADDS x (SRAconst [c] y))
+ // result: (ADDSshiftRA x y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRAconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMADDSshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADDS x (SLL y z))
+ // result: (ADDSshiftLLreg x y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSLL {
+ continue
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMADDSshiftLLreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ break
+ }
+ // match: (ADDS x (SRL y z))
+ // result: (ADDSshiftRLreg x y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRL {
+ continue
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMADDSshiftRLreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ break
+ }
+ // match: (ADDS x (SRA y z))
+ // result: (ADDSshiftRAreg x y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRA {
+ continue
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMADDSshiftRAreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueARM_OpARMADDSshiftLL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ADDSshiftLL (MOVWconst [c]) x [d])
+ // result: (ADDSconst [c] (SLLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMADDSconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ADDSshiftLL x (MOVWconst [c]) [d])
+ // result: (ADDSconst x [c<<uint64(d)])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMADDSconst)
+ v.AuxInt = int32ToAuxInt(c << uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMADDSshiftLLreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ADDSshiftLLreg (MOVWconst [c]) x y)
+ // result: (ADDSconst [c] (SLL <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMADDSconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ADDSshiftLLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (ADDSshiftLL x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMADDSshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMADDSshiftRA(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ADDSshiftRA (MOVWconst [c]) x [d])
+ // result: (ADDSconst [c] (SRAconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMADDSconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRAconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ADDSshiftRA x (MOVWconst [c]) [d])
+ // result: (ADDSconst x [c>>uint64(d)])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMADDSconst)
+ v.AuxInt = int32ToAuxInt(c >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMADDSshiftRAreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ADDSshiftRAreg (MOVWconst [c]) x y)
+ // result: (ADDSconst [c] (SRA <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMADDSconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ADDSshiftRAreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (ADDSshiftRA x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMADDSshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMADDSshiftRL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ADDSshiftRL (MOVWconst [c]) x [d])
+ // result: (ADDSconst [c] (SRLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMADDSconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ADDSshiftRL x (MOVWconst [c]) [d])
+ // result: (ADDSconst x [int32(uint32(c)>>uint64(d))])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMADDSconst)
+ v.AuxInt = int32ToAuxInt(int32(uint32(c) >> uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMADDSshiftRLreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ADDSshiftRLreg (MOVWconst [c]) x y)
+ // result: (ADDSconst [c] (SRL <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMADDSconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ADDSshiftRLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (ADDSshiftRL x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMADDSshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMADDconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ADDconst [off1] (MOVWaddr [off2] {sym} ptr))
+ // result: (MOVWaddr [off1+off2] {sym} ptr)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ v.reset(OpARMMOVWaddr)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg(ptr)
+ return true
+ }
+ // match: (ADDconst [0] x)
+ // result: x
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (ADDconst [c] x)
+ // cond: !isARMImmRot(uint32(c)) && isARMImmRot(uint32(-c))
+ // result: (SUBconst [-c] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(!isARMImmRot(uint32(c)) && isARMImmRot(uint32(-c))) {
+ break
+ }
+ v.reset(OpARMSUBconst)
+ v.AuxInt = int32ToAuxInt(-c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDconst [c] x)
+ // cond: buildcfg.GOARM==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && uint32(-c)<=0xffff
+ // result: (SUBconst [-c] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(buildcfg.GOARM == 7 && !isARMImmRot(uint32(c)) && uint32(c) > 0xffff && uint32(-c) <= 0xffff) {
+ break
+ }
+ v.reset(OpARMSUBconst)
+ v.AuxInt = int32ToAuxInt(-c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDconst [c] (MOVWconst [d]))
+ // result: (MOVWconst [c+d])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(c + d)
+ return true
+ }
+ // match: (ADDconst [c] (ADDconst [d] x))
+ // result: (ADDconst [c+d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMADDconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpARMADDconst)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDconst [c] (SUBconst [d] x))
+ // result: (ADDconst [c-d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMSUBconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpARMADDconst)
+ v.AuxInt = int32ToAuxInt(c - d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDconst [c] (RSBconst [d] x))
+ // result: (RSBconst [c+d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMRSBconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpARMRSBconst)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMADDshiftLL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (ADDshiftLL (MOVWconst [c]) x [d])
+ // result: (ADDconst [c] (SLLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMADDconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ADDshiftLL x (MOVWconst [c]) [d])
+ // result: (ADDconst x [c<<uint64(d)])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMADDconst)
+ v.AuxInt = int32ToAuxInt(c << uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDshiftLL [c] (SRLconst x [32-c]) x)
+ // result: (SRRconst [32-c] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMSRLconst || auxIntToInt32(v_0.AuxInt) != 32-c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARMSRRconst)
+ v.AuxInt = int32ToAuxInt(32 - c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDshiftLL <typ.UInt16> [8] (BFXU <typ.UInt16> [int32(armBFAuxInt(8, 8))] x) x)
+ // result: (REV16 x)
+ for {
+ if v.Type != typ.UInt16 || auxIntToInt32(v.AuxInt) != 8 || v_0.Op != OpARMBFXU || v_0.Type != typ.UInt16 || auxIntToInt32(v_0.AuxInt) != int32(armBFAuxInt(8, 8)) {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARMREV16)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDshiftLL <typ.UInt16> [8] (SRLconst <typ.UInt16> [24] (SLLconst [16] x)) x)
+ // cond: buildcfg.GOARM>=6
+ // result: (REV16 x)
+ for {
+ if v.Type != typ.UInt16 || auxIntToInt32(v.AuxInt) != 8 || v_0.Op != OpARMSRLconst || v_0.Type != typ.UInt16 || auxIntToInt32(v_0.AuxInt) != 24 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpARMSLLconst || auxIntToInt32(v_0_0.AuxInt) != 16 {
+ break
+ }
+ x := v_0_0.Args[0]
+ if x != v_1 || !(buildcfg.GOARM >= 6) {
+ break
+ }
+ v.reset(OpARMREV16)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMADDshiftLLreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ADDshiftLLreg (MOVWconst [c]) x y)
+ // result: (ADDconst [c] (SLL <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMADDconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ADDshiftLLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (ADDshiftLL x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMADDshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMADDshiftRA(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ADDshiftRA (MOVWconst [c]) x [d])
+ // result: (ADDconst [c] (SRAconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMADDconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRAconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ADDshiftRA x (MOVWconst [c]) [d])
+ // result: (ADDconst x [c>>uint64(d)])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMADDconst)
+ v.AuxInt = int32ToAuxInt(c >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMADDshiftRAreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ADDshiftRAreg (MOVWconst [c]) x y)
+ // result: (ADDconst [c] (SRA <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMADDconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ADDshiftRAreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (ADDshiftRA x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMADDshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMADDshiftRL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ADDshiftRL (MOVWconst [c]) x [d])
+ // result: (ADDconst [c] (SRLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMADDconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ADDshiftRL x (MOVWconst [c]) [d])
+ // result: (ADDconst x [int32(uint32(c)>>uint64(d))])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMADDconst)
+ v.AuxInt = int32ToAuxInt(int32(uint32(c) >> uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDshiftRL [c] (SLLconst x [32-c]) x)
+ // result: (SRRconst [ c] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMSLLconst || auxIntToInt32(v_0.AuxInt) != 32-c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARMSRRconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMADDshiftRLreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ADDshiftRLreg (MOVWconst [c]) x y)
+ // result: (ADDconst [c] (SRL <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMADDconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ADDshiftRLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (ADDshiftRL x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMADDshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMAND(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AND x (MOVWconst [c]))
+ // result: (ANDconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMANDconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (AND x (SLLconst [c] y))
+ // result: (ANDshiftLL x y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSLLconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMANDshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (AND x (SRLconst [c] y))
+ // result: (ANDshiftRL x y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRLconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMANDshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (AND x (SRAconst [c] y))
+ // result: (ANDshiftRA x y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRAconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMANDshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (AND x (SLL y z))
+ // result: (ANDshiftLLreg x y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSLL {
+ continue
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMANDshiftLLreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ break
+ }
+ // match: (AND x (SRL y z))
+ // result: (ANDshiftRLreg x y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRL {
+ continue
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMANDshiftRLreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ break
+ }
+ // match: (AND x (SRA y z))
+ // result: (ANDshiftRAreg x y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRA {
+ continue
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMANDshiftRAreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ break
+ }
+ // match: (AND x x)
+ // result: x
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (AND x (MVN y))
+ // result: (BIC x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMMVN {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(OpARMBIC)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (AND x (MVNshiftLL y [c]))
+ // result: (BICshiftLL x y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMMVNshiftLL {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMBICshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (AND x (MVNshiftRL y [c]))
+ // result: (BICshiftRL x y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMMVNshiftRL {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMBICshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (AND x (MVNshiftRA y [c]))
+ // result: (BICshiftRA x y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMMVNshiftRA {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMBICshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueARM_OpARMANDconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ANDconst [0] _)
+ // result: (MOVWconst [0])
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (ANDconst [c] x)
+ // cond: int32(c)==-1
+ // result: x
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(int32(c) == -1) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (ANDconst [c] x)
+ // cond: !isARMImmRot(uint32(c)) && isARMImmRot(^uint32(c))
+ // result: (BICconst [int32(^uint32(c))] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(!isARMImmRot(uint32(c)) && isARMImmRot(^uint32(c))) {
+ break
+ }
+ v.reset(OpARMBICconst)
+ v.AuxInt = int32ToAuxInt(int32(^uint32(c)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDconst [c] x)
+ // cond: buildcfg.GOARM==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && ^uint32(c)<=0xffff
+ // result: (BICconst [int32(^uint32(c))] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(buildcfg.GOARM == 7 && !isARMImmRot(uint32(c)) && uint32(c) > 0xffff && ^uint32(c) <= 0xffff) {
+ break
+ }
+ v.reset(OpARMBICconst)
+ v.AuxInt = int32ToAuxInt(int32(^uint32(c)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDconst [c] (MOVWconst [d]))
+ // result: (MOVWconst [c&d])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(c & d)
+ return true
+ }
+ // match: (ANDconst [c] (ANDconst [d] x))
+ // result: (ANDconst [c&d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMANDconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpARMANDconst)
+ v.AuxInt = int32ToAuxInt(c & d)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMANDshiftLL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ANDshiftLL (MOVWconst [c]) x [d])
+ // result: (ANDconst [c] (SLLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMANDconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ANDshiftLL x (MOVWconst [c]) [d])
+ // result: (ANDconst x [c<<uint64(d)])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMANDconst)
+ v.AuxInt = int32ToAuxInt(c << uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDshiftLL y:(SLLconst x [c]) x [c])
+ // result: y
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ y := v_0
+ if y.Op != OpARMSLLconst || auxIntToInt32(y.AuxInt) != c {
+ break
+ }
+ x := y.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMANDshiftLLreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ANDshiftLLreg (MOVWconst [c]) x y)
+ // result: (ANDconst [c] (SLL <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMANDconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ANDshiftLLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (ANDshiftLL x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMANDshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMANDshiftRA(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ANDshiftRA (MOVWconst [c]) x [d])
+ // result: (ANDconst [c] (SRAconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMANDconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRAconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ANDshiftRA x (MOVWconst [c]) [d])
+ // result: (ANDconst x [c>>uint64(d)])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMANDconst)
+ v.AuxInt = int32ToAuxInt(c >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDshiftRA y:(SRAconst x [c]) x [c])
+ // result: y
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ y := v_0
+ if y.Op != OpARMSRAconst || auxIntToInt32(y.AuxInt) != c {
+ break
+ }
+ x := y.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMANDshiftRAreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ANDshiftRAreg (MOVWconst [c]) x y)
+ // result: (ANDconst [c] (SRA <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMANDconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ANDshiftRAreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (ANDshiftRA x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMANDshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMANDshiftRL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ANDshiftRL (MOVWconst [c]) x [d])
+ // result: (ANDconst [c] (SRLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMANDconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ANDshiftRL x (MOVWconst [c]) [d])
+ // result: (ANDconst x [int32(uint32(c)>>uint64(d))])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMANDconst)
+ v.AuxInt = int32ToAuxInt(int32(uint32(c) >> uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDshiftRL y:(SRLconst x [c]) x [c])
+ // result: y
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ y := v_0
+ if y.Op != OpARMSRLconst || auxIntToInt32(y.AuxInt) != c {
+ break
+ }
+ x := y.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMANDshiftRLreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ANDshiftRLreg (MOVWconst [c]) x y)
+ // result: (ANDconst [c] (SRL <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMANDconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ANDshiftRLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (ANDshiftRL x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMANDshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMBFX(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (BFX [c] (MOVWconst [d]))
+ // result: (MOVWconst [d<<(32-uint32(c&0xff)-uint32(c>>8))>>(32-uint32(c>>8))])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(d << (32 - uint32(c&0xff) - uint32(c>>8)) >> (32 - uint32(c>>8)))
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMBFXU(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (BFXU [c] (MOVWconst [d]))
+ // result: (MOVWconst [int32(uint32(d)<<(32-uint32(c&0xff)-uint32(c>>8))>>(32-uint32(c>>8)))])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(int32(uint32(d) << (32 - uint32(c&0xff) - uint32(c>>8)) >> (32 - uint32(c>>8))))
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMBIC(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (BIC x (MOVWconst [c]))
+ // result: (BICconst [c] x)
+ for {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMBICconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (BIC x (SLLconst [c] y))
+ // result: (BICshiftLL x y [c])
+ for {
+ x := v_0
+ if v_1.Op != OpARMSLLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMBICshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (BIC x (SRLconst [c] y))
+ // result: (BICshiftRL x y [c])
+ for {
+ x := v_0
+ if v_1.Op != OpARMSRLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMBICshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (BIC x (SRAconst [c] y))
+ // result: (BICshiftRA x y [c])
+ for {
+ x := v_0
+ if v_1.Op != OpARMSRAconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMBICshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (BIC x (SLL y z))
+ // result: (BICshiftLLreg x y z)
+ for {
+ x := v_0
+ if v_1.Op != OpARMSLL {
+ break
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMBICshiftLLreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ // match: (BIC x (SRL y z))
+ // result: (BICshiftRLreg x y z)
+ for {
+ x := v_0
+ if v_1.Op != OpARMSRL {
+ break
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMBICshiftRLreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ // match: (BIC x (SRA y z))
+ // result: (BICshiftRAreg x y z)
+ for {
+ x := v_0
+ if v_1.Op != OpARMSRA {
+ break
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMBICshiftRAreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ // match: (BIC x x)
+ // result: (MOVWconst [0])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMBICconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (BICconst [0] x)
+ // result: x
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (BICconst [c] _)
+ // cond: int32(c)==-1
+ // result: (MOVWconst [0])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if !(int32(c) == -1) {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (BICconst [c] x)
+ // cond: !isARMImmRot(uint32(c)) && isARMImmRot(^uint32(c))
+ // result: (ANDconst [int32(^uint32(c))] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(!isARMImmRot(uint32(c)) && isARMImmRot(^uint32(c))) {
+ break
+ }
+ v.reset(OpARMANDconst)
+ v.AuxInt = int32ToAuxInt(int32(^uint32(c)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (BICconst [c] x)
+ // cond: buildcfg.GOARM==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && ^uint32(c)<=0xffff
+ // result: (ANDconst [int32(^uint32(c))] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(buildcfg.GOARM == 7 && !isARMImmRot(uint32(c)) && uint32(c) > 0xffff && ^uint32(c) <= 0xffff) {
+ break
+ }
+ v.reset(OpARMANDconst)
+ v.AuxInt = int32ToAuxInt(int32(^uint32(c)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (BICconst [c] (MOVWconst [d]))
+ // result: (MOVWconst [d&^c])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(d &^ c)
+ return true
+ }
+ // match: (BICconst [c] (BICconst [d] x))
+ // result: (BICconst [c|d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMBICconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpARMBICconst)
+ v.AuxInt = int32ToAuxInt(c | d)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMBICshiftLL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (BICshiftLL x (MOVWconst [c]) [d])
+ // result: (BICconst x [c<<uint64(d)])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMBICconst)
+ v.AuxInt = int32ToAuxInt(c << uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (BICshiftLL (SLLconst x [c]) x [c])
+ // result: (MOVWconst [0])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMSLLconst || auxIntToInt32(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMBICshiftLLreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (BICshiftLLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (BICshiftLL x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMBICshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMBICshiftRA(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (BICshiftRA x (MOVWconst [c]) [d])
+ // result: (BICconst x [c>>uint64(d)])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMBICconst)
+ v.AuxInt = int32ToAuxInt(c >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (BICshiftRA (SRAconst x [c]) x [c])
+ // result: (MOVWconst [0])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMSRAconst || auxIntToInt32(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMBICshiftRAreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (BICshiftRAreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (BICshiftRA x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMBICshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMBICshiftRL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (BICshiftRL x (MOVWconst [c]) [d])
+ // result: (BICconst x [int32(uint32(c)>>uint64(d))])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMBICconst)
+ v.AuxInt = int32ToAuxInt(int32(uint32(c) >> uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (BICshiftRL (SRLconst x [c]) x [c])
+ // result: (MOVWconst [0])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMSRLconst || auxIntToInt32(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMBICshiftRLreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (BICshiftRLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (BICshiftRL x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMBICshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMCMN(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMN x (MOVWconst [c]))
+ // result: (CMNconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMCMNconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (CMN x (SLLconst [c] y))
+ // result: (CMNshiftLL x y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSLLconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMCMNshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (CMN x (SRLconst [c] y))
+ // result: (CMNshiftRL x y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRLconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMCMNshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (CMN x (SRAconst [c] y))
+ // result: (CMNshiftRA x y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRAconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMCMNshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (CMN x (SLL y z))
+ // result: (CMNshiftLLreg x y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSLL {
+ continue
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMCMNshiftLLreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ break
+ }
+ // match: (CMN x (SRL y z))
+ // result: (CMNshiftRLreg x y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRL {
+ continue
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMCMNshiftRLreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ break
+ }
+ // match: (CMN x (SRA y z))
+ // result: (CMNshiftRAreg x y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRA {
+ continue
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMCMNshiftRAreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueARM_OpARMCMNconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (CMNconst (MOVWconst [x]) [y])
+ // result: (FlagConstant [addFlags32(x,y)])
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpARMFlagConstant)
+ v.AuxInt = flagConstantToAuxInt(addFlags32(x, y))
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMCMNshiftLL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMNshiftLL (MOVWconst [c]) x [d])
+ // result: (CMNconst [c] (SLLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMCMNconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMNshiftLL x (MOVWconst [c]) [d])
+ // result: (CMNconst x [c<<uint64(d)])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMCMNconst)
+ v.AuxInt = int32ToAuxInt(c << uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMCMNshiftLLreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMNshiftLLreg (MOVWconst [c]) x y)
+ // result: (CMNconst [c] (SLL <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMCMNconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMNshiftLLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (CMNshiftLL x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMCMNshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMCMNshiftRA(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMNshiftRA (MOVWconst [c]) x [d])
+ // result: (CMNconst [c] (SRAconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMCMNconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRAconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMNshiftRA x (MOVWconst [c]) [d])
+ // result: (CMNconst x [c>>uint64(d)])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMCMNconst)
+ v.AuxInt = int32ToAuxInt(c >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMCMNshiftRAreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMNshiftRAreg (MOVWconst [c]) x y)
+ // result: (CMNconst [c] (SRA <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMCMNconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMNshiftRAreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (CMNshiftRA x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMCMNshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMCMNshiftRL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMNshiftRL (MOVWconst [c]) x [d])
+ // result: (CMNconst [c] (SRLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMCMNconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMNshiftRL x (MOVWconst [c]) [d])
+ // result: (CMNconst x [int32(uint32(c)>>uint64(d))])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMCMNconst)
+ v.AuxInt = int32ToAuxInt(int32(uint32(c) >> uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMCMNshiftRLreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMNshiftRLreg (MOVWconst [c]) x y)
+ // result: (CMNconst [c] (SRL <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMCMNconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMNshiftRLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (CMNshiftRL x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMCMNshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMCMOVWHSconst(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMOVWHSconst _ (FlagConstant [fc]) [c])
+ // cond: fc.uge()
+ // result: (MOVWconst [c])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_1.Op != OpARMFlagConstant {
+ break
+ }
+ fc := auxIntToFlagConstant(v_1.AuxInt)
+ if !(fc.uge()) {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(c)
+ return true
+ }
+ // match: (CMOVWHSconst x (FlagConstant [fc]) [c])
+ // cond: fc.ult()
+ // result: x
+ for {
+ x := v_0
+ if v_1.Op != OpARMFlagConstant {
+ break
+ }
+ fc := auxIntToFlagConstant(v_1.AuxInt)
+ if !(fc.ult()) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVWHSconst x (InvertFlags flags) [c])
+ // result: (CMOVWLSconst x flags [c])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMInvertFlags {
+ break
+ }
+ flags := v_1.Args[0]
+ v.reset(OpARMCMOVWLSconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMCMOVWLSconst(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMOVWLSconst _ (FlagConstant [fc]) [c])
+ // cond: fc.ule()
+ // result: (MOVWconst [c])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_1.Op != OpARMFlagConstant {
+ break
+ }
+ fc := auxIntToFlagConstant(v_1.AuxInt)
+ if !(fc.ule()) {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(c)
+ return true
+ }
+ // match: (CMOVWLSconst x (FlagConstant [fc]) [c])
+ // cond: fc.ugt()
+ // result: x
+ for {
+ x := v_0
+ if v_1.Op != OpARMFlagConstant {
+ break
+ }
+ fc := auxIntToFlagConstant(v_1.AuxInt)
+ if !(fc.ugt()) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CMOVWLSconst x (InvertFlags flags) [c])
+ // result: (CMOVWHSconst x flags [c])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMInvertFlags {
+ break
+ }
+ flags := v_1.Args[0]
+ v.reset(OpARMCMOVWHSconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMCMP(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMP x (MOVWconst [c]))
+ // result: (CMPconst [c] x)
+ for {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMCMPconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMP (MOVWconst [c]) x)
+ // result: (InvertFlags (CMPconst [c] x))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMInvertFlags)
+ v0 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMP x y)
+ // cond: canonLessThan(x,y)
+ // result: (InvertFlags (CMP y x))
+ for {
+ x := v_0
+ y := v_1
+ if !(canonLessThan(x, y)) {
+ break
+ }
+ v.reset(OpARMInvertFlags)
+ v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMP x (SLLconst [c] y))
+ // result: (CMPshiftLL x y [c])
+ for {
+ x := v_0
+ if v_1.Op != OpARMSLLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMCMPshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (CMP (SLLconst [c] y) x)
+ // result: (InvertFlags (CMPshiftLL x y [c]))
+ for {
+ if v_0.Op != OpARMSLLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ y := v_0.Args[0]
+ x := v_1
+ v.reset(OpARMInvertFlags)
+ v0 := b.NewValue0(v.Pos, OpARMCMPshiftLL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMP x (SRLconst [c] y))
+ // result: (CMPshiftRL x y [c])
+ for {
+ x := v_0
+ if v_1.Op != OpARMSRLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMCMPshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (CMP (SRLconst [c] y) x)
+ // result: (InvertFlags (CMPshiftRL x y [c]))
+ for {
+ if v_0.Op != OpARMSRLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ y := v_0.Args[0]
+ x := v_1
+ v.reset(OpARMInvertFlags)
+ v0 := b.NewValue0(v.Pos, OpARMCMPshiftRL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMP x (SRAconst [c] y))
+ // result: (CMPshiftRA x y [c])
+ for {
+ x := v_0
+ if v_1.Op != OpARMSRAconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMCMPshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (CMP (SRAconst [c] y) x)
+ // result: (InvertFlags (CMPshiftRA x y [c]))
+ for {
+ if v_0.Op != OpARMSRAconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ y := v_0.Args[0]
+ x := v_1
+ v.reset(OpARMInvertFlags)
+ v0 := b.NewValue0(v.Pos, OpARMCMPshiftRA, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMP x (SLL y z))
+ // result: (CMPshiftLLreg x y z)
+ for {
+ x := v_0
+ if v_1.Op != OpARMSLL {
+ break
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMCMPshiftLLreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ // match: (CMP (SLL y z) x)
+ // result: (InvertFlags (CMPshiftLLreg x y z))
+ for {
+ if v_0.Op != OpARMSLL {
+ break
+ }
+ z := v_0.Args[1]
+ y := v_0.Args[0]
+ x := v_1
+ v.reset(OpARMInvertFlags)
+ v0 := b.NewValue0(v.Pos, OpARMCMPshiftLLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMP x (SRL y z))
+ // result: (CMPshiftRLreg x y z)
+ for {
+ x := v_0
+ if v_1.Op != OpARMSRL {
+ break
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMCMPshiftRLreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ // match: (CMP (SRL y z) x)
+ // result: (InvertFlags (CMPshiftRLreg x y z))
+ for {
+ if v_0.Op != OpARMSRL {
+ break
+ }
+ z := v_0.Args[1]
+ y := v_0.Args[0]
+ x := v_1
+ v.reset(OpARMInvertFlags)
+ v0 := b.NewValue0(v.Pos, OpARMCMPshiftRLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMP x (SRA y z))
+ // result: (CMPshiftRAreg x y z)
+ for {
+ x := v_0
+ if v_1.Op != OpARMSRA {
+ break
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMCMPshiftRAreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ // match: (CMP (SRA y z) x)
+ // result: (InvertFlags (CMPshiftRAreg x y z))
+ for {
+ if v_0.Op != OpARMSRA {
+ break
+ }
+ z := v_0.Args[1]
+ y := v_0.Args[0]
+ x := v_1
+ v.reset(OpARMInvertFlags)
+ v0 := b.NewValue0(v.Pos, OpARMCMPshiftRAreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMCMPD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMPD x (MOVDconst [0]))
+ // result: (CMPD0 x)
+ for {
+ x := v_0
+ if v_1.Op != OpARMMOVDconst || auxIntToFloat64(v_1.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpARMCMPD0)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMCMPF(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMPF x (MOVFconst [0]))
+ // result: (CMPF0 x)
+ for {
+ x := v_0
+ if v_1.Op != OpARMMOVFconst || auxIntToFloat64(v_1.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpARMCMPF0)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMCMPconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (CMPconst (MOVWconst [x]) [y])
+ // result: (FlagConstant [subFlags32(x,y)])
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpARMFlagConstant)
+ v.AuxInt = flagConstantToAuxInt(subFlags32(x, y))
+ return true
+ }
+ // match: (CMPconst (MOVBUreg _) [c])
+ // cond: 0xff < c
+ // result: (FlagConstant [subFlags32(0, 1)])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVBUreg || !(0xff < c) {
+ break
+ }
+ v.reset(OpARMFlagConstant)
+ v.AuxInt = flagConstantToAuxInt(subFlags32(0, 1))
+ return true
+ }
+ // match: (CMPconst (MOVHUreg _) [c])
+ // cond: 0xffff < c
+ // result: (FlagConstant [subFlags32(0, 1)])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVHUreg || !(0xffff < c) {
+ break
+ }
+ v.reset(OpARMFlagConstant)
+ v.AuxInt = flagConstantToAuxInt(subFlags32(0, 1))
+ return true
+ }
+ // match: (CMPconst (ANDconst _ [m]) [n])
+ // cond: 0 <= m && m < n
+ // result: (FlagConstant [subFlags32(0, 1)])
+ for {
+ n := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMANDconst {
+ break
+ }
+ m := auxIntToInt32(v_0.AuxInt)
+ if !(0 <= m && m < n) {
+ break
+ }
+ v.reset(OpARMFlagConstant)
+ v.AuxInt = flagConstantToAuxInt(subFlags32(0, 1))
+ return true
+ }
+ // match: (CMPconst (SRLconst _ [c]) [n])
+ // cond: 0 <= n && 0 < c && c <= 32 && (1<<uint32(32-c)) <= uint32(n)
+ // result: (FlagConstant [subFlags32(0, 1)])
+ for {
+ n := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMSRLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ if !(0 <= n && 0 < c && c <= 32 && (1<<uint32(32-c)) <= uint32(n)) {
+ break
+ }
+ v.reset(OpARMFlagConstant)
+ v.AuxInt = flagConstantToAuxInt(subFlags32(0, 1))
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMCMPshiftLL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMPshiftLL (MOVWconst [c]) x [d])
+ // result: (InvertFlags (CMPconst [c] (SLLconst <x.Type> x [d])))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMInvertFlags)
+ v0 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v1 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v1.AuxInt = int32ToAuxInt(d)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPshiftLL x (MOVWconst [c]) [d])
+ // result: (CMPconst x [c<<uint64(d)])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMCMPconst)
+ v.AuxInt = int32ToAuxInt(c << uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMCMPshiftLLreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMPshiftLLreg (MOVWconst [c]) x y)
+ // result: (InvertFlags (CMPconst [c] (SLL <x.Type> x y)))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMInvertFlags)
+ v0 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v1 := b.NewValue0(v.Pos, OpARMSLL, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPshiftLLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (CMPshiftLL x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMCMPshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMCMPshiftRA(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMPshiftRA (MOVWconst [c]) x [d])
+ // result: (InvertFlags (CMPconst [c] (SRAconst <x.Type> x [d])))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMInvertFlags)
+ v0 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v1 := b.NewValue0(v.Pos, OpARMSRAconst, x.Type)
+ v1.AuxInt = int32ToAuxInt(d)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPshiftRA x (MOVWconst [c]) [d])
+ // result: (CMPconst x [c>>uint64(d)])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMCMPconst)
+ v.AuxInt = int32ToAuxInt(c >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMCMPshiftRAreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMPshiftRAreg (MOVWconst [c]) x y)
+ // result: (InvertFlags (CMPconst [c] (SRA <x.Type> x y)))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMInvertFlags)
+ v0 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v1 := b.NewValue0(v.Pos, OpARMSRA, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPshiftRAreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (CMPshiftRA x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMCMPshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMCMPshiftRL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMPshiftRL (MOVWconst [c]) x [d])
+ // result: (InvertFlags (CMPconst [c] (SRLconst <x.Type> x [d])))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMInvertFlags)
+ v0 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v1 := b.NewValue0(v.Pos, OpARMSRLconst, x.Type)
+ v1.AuxInt = int32ToAuxInt(d)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPshiftRL x (MOVWconst [c]) [d])
+ // result: (CMPconst x [int32(uint32(c)>>uint64(d))])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMCMPconst)
+ v.AuxInt = int32ToAuxInt(int32(uint32(c) >> uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMCMPshiftRLreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMPshiftRLreg (MOVWconst [c]) x y)
+ // result: (InvertFlags (CMPconst [c] (SRL <x.Type> x y)))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMInvertFlags)
+ v0 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v1 := b.NewValue0(v.Pos, OpARMSRL, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPshiftRLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (CMPshiftRL x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMCMPshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMEqual(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Equal (FlagConstant [fc]))
+ // result: (MOVWconst [b2i32(fc.eq())])
+ for {
+ if v_0.Op != OpARMFlagConstant {
+ break
+ }
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(b2i32(fc.eq()))
+ return true
+ }
+ // match: (Equal (InvertFlags x))
+ // result: (Equal x)
+ for {
+ if v_0.Op != OpARMInvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARMEqual)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMGreaterEqual(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (GreaterEqual (FlagConstant [fc]))
+ // result: (MOVWconst [b2i32(fc.ge())])
+ for {
+ if v_0.Op != OpARMFlagConstant {
+ break
+ }
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(b2i32(fc.ge()))
+ return true
+ }
+ // match: (GreaterEqual (InvertFlags x))
+ // result: (LessEqual x)
+ for {
+ if v_0.Op != OpARMInvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARMLessEqual)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMGreaterEqualU(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (GreaterEqualU (FlagConstant [fc]))
+ // result: (MOVWconst [b2i32(fc.uge())])
+ for {
+ if v_0.Op != OpARMFlagConstant {
+ break
+ }
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(b2i32(fc.uge()))
+ return true
+ }
+ // match: (GreaterEqualU (InvertFlags x))
+ // result: (LessEqualU x)
+ for {
+ if v_0.Op != OpARMInvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARMLessEqualU)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMGreaterThan(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (GreaterThan (FlagConstant [fc]))
+ // result: (MOVWconst [b2i32(fc.gt())])
+ for {
+ if v_0.Op != OpARMFlagConstant {
+ break
+ }
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(b2i32(fc.gt()))
+ return true
+ }
+ // match: (GreaterThan (InvertFlags x))
+ // result: (LessThan x)
+ for {
+ if v_0.Op != OpARMInvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARMLessThan)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMGreaterThanU(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (GreaterThanU (FlagConstant [fc]))
+ // result: (MOVWconst [b2i32(fc.ugt())])
+ for {
+ if v_0.Op != OpARMFlagConstant {
+ break
+ }
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(b2i32(fc.ugt()))
+ return true
+ }
+ // match: (GreaterThanU (InvertFlags x))
+ // result: (LessThanU x)
+ for {
+ if v_0.Op != OpARMInvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARMLessThanU)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMLessEqual(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (LessEqual (FlagConstant [fc]))
+ // result: (MOVWconst [b2i32(fc.le())])
+ for {
+ if v_0.Op != OpARMFlagConstant {
+ break
+ }
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(b2i32(fc.le()))
+ return true
+ }
+ // match: (LessEqual (InvertFlags x))
+ // result: (GreaterEqual x)
+ for {
+ if v_0.Op != OpARMInvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARMGreaterEqual)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMLessEqualU(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (LessEqualU (FlagConstant [fc]))
+ // result: (MOVWconst [b2i32(fc.ule())])
+ for {
+ if v_0.Op != OpARMFlagConstant {
+ break
+ }
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(b2i32(fc.ule()))
+ return true
+ }
+ // match: (LessEqualU (InvertFlags x))
+ // result: (GreaterEqualU x)
+ for {
+ if v_0.Op != OpARMInvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARMGreaterEqualU)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMLessThan(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (LessThan (FlagConstant [fc]))
+ // result: (MOVWconst [b2i32(fc.lt())])
+ for {
+ if v_0.Op != OpARMFlagConstant {
+ break
+ }
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(b2i32(fc.lt()))
+ return true
+ }
+ // match: (LessThan (InvertFlags x))
+ // result: (GreaterThan x)
+ for {
+ if v_0.Op != OpARMInvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARMGreaterThan)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMLessThanU(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (LessThanU (FlagConstant [fc]))
+ // result: (MOVWconst [b2i32(fc.ult())])
+ for {
+ if v_0.Op != OpARMFlagConstant {
+ break
+ }
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(b2i32(fc.ult()))
+ return true
+ }
+ // match: (LessThanU (InvertFlags x))
+ // result: (GreaterThanU x)
+ for {
+ if v_0.Op != OpARMInvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARMGreaterThanU)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVBUload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVBUload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // result: (MOVBUload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ v.reset(OpARMMOVBUload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBUload [off1] {sym} (SUBconst [off2] ptr) mem)
+ // result: (MOVBUload [off1-off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMSUBconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ v.reset(OpARMMOVBUload)
+ v.AuxInt = int32ToAuxInt(off1 - off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBUload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVBUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpARMMOVWaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARMMOVBUload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBUload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: (MOVBUreg x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARMMOVBstore {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpARMMOVBUreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBUload [0] {sym} (ADD ptr idx) mem)
+ // cond: sym == nil
+ // result: (MOVBUloadidx ptr idx mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMADD {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(sym == nil) {
+ break
+ }
+ v.reset(OpARMMOVBUloadidx)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVBUload [off] {sym} (SB) _)
+ // cond: symIsRO(sym)
+ // result: (MOVWconst [int32(read8(sym, int64(off)))])
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpSB || !(symIsRO(sym)) {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(int32(read8(sym, int64(off))))
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVBUloadidx(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVBUloadidx ptr idx (MOVBstoreidx ptr2 idx x _))
+ // cond: isSamePtr(ptr, ptr2)
+ // result: (MOVBUreg x)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARMMOVBstoreidx {
+ break
+ }
+ x := v_2.Args[2]
+ ptr2 := v_2.Args[0]
+ if idx != v_2.Args[1] || !(isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpARMMOVBUreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBUloadidx ptr (MOVWconst [c]) mem)
+ // result: (MOVBUload [c] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ mem := v_2
+ v.reset(OpARMMOVBUload)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBUloadidx (MOVWconst [c]) ptr mem)
+ // result: (MOVBUload [c] ptr mem)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ ptr := v_1
+ mem := v_2
+ v.reset(OpARMMOVBUload)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVBUreg(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MOVBUreg x:(MOVBUload _ _))
+ // result: (MOVWreg x)
+ for {
+ x := v_0
+ if x.Op != OpARMMOVBUload {
+ break
+ }
+ v.reset(OpARMMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBUreg (ANDconst [c] x))
+ // result: (ANDconst [c&0xff] x)
+ for {
+ if v_0.Op != OpARMANDconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpARMANDconst)
+ v.AuxInt = int32ToAuxInt(c & 0xff)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBUreg x:(MOVBUreg _))
+ // result: (MOVWreg x)
+ for {
+ x := v_0
+ if x.Op != OpARMMOVBUreg {
+ break
+ }
+ v.reset(OpARMMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBUreg (MOVWconst [c]))
+ // result: (MOVWconst [int32(uint8(c))])
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(int32(uint8(c)))
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVBload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVBload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // result: (MOVBload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ v.reset(OpARMMOVBload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBload [off1] {sym} (SUBconst [off2] ptr) mem)
+ // result: (MOVBload [off1-off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMSUBconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ v.reset(OpARMMOVBload)
+ v.AuxInt = int32ToAuxInt(off1 - off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpARMMOVWaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARMMOVBload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: (MOVBreg x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARMMOVBstore {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpARMMOVBreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBload [0] {sym} (ADD ptr idx) mem)
+ // cond: sym == nil
+ // result: (MOVBloadidx ptr idx mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMADD {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(sym == nil) {
+ break
+ }
+ v.reset(OpARMMOVBloadidx)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVBloadidx(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVBloadidx ptr idx (MOVBstoreidx ptr2 idx x _))
+ // cond: isSamePtr(ptr, ptr2)
+ // result: (MOVBreg x)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARMMOVBstoreidx {
+ break
+ }
+ x := v_2.Args[2]
+ ptr2 := v_2.Args[0]
+ if idx != v_2.Args[1] || !(isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpARMMOVBreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBloadidx ptr (MOVWconst [c]) mem)
+ // result: (MOVBload [c] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ mem := v_2
+ v.reset(OpARMMOVBload)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBloadidx (MOVWconst [c]) ptr mem)
+ // result: (MOVBload [c] ptr mem)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ ptr := v_1
+ mem := v_2
+ v.reset(OpARMMOVBload)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVBreg(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MOVBreg x:(MOVBload _ _))
+ // result: (MOVWreg x)
+ for {
+ x := v_0
+ if x.Op != OpARMMOVBload {
+ break
+ }
+ v.reset(OpARMMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBreg (ANDconst [c] x))
+ // cond: c & 0x80 == 0
+ // result: (ANDconst [c&0x7f] x)
+ for {
+ if v_0.Op != OpARMANDconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(c&0x80 == 0) {
+ break
+ }
+ v.reset(OpARMANDconst)
+ v.AuxInt = int32ToAuxInt(c & 0x7f)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBreg x:(MOVBreg _))
+ // result: (MOVWreg x)
+ for {
+ x := v_0
+ if x.Op != OpARMMOVBreg {
+ break
+ }
+ v.reset(OpARMMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBreg (MOVWconst [c]))
+ // result: (MOVWconst [int32(int8(c))])
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(int32(int8(c)))
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVBstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVBstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+ // result: (MOVBstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ v.reset(OpARMMOVBstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVBstore [off1] {sym} (SUBconst [off2] ptr) val mem)
+ // result: (MOVBstore [off1-off2] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMSUBconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ v.reset(OpARMMOVBstore)
+ v.AuxInt = int32ToAuxInt(off1 - off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVBstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpARMMOVWaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARMMOVBstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVBreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARMMOVBreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpARMMOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVBUreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARMMOVBUreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpARMMOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVHreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARMMOVHreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpARMMOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVHUreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARMMOVHUreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpARMMOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [0] {sym} (ADD ptr idx) val mem)
+ // cond: sym == nil
+ // result: (MOVBstoreidx ptr idx val mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMADD {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(sym == nil) {
+ break
+ }
+ v.reset(OpARMMOVBstoreidx)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVBstoreidx(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVBstoreidx ptr (MOVWconst [c]) val mem)
+ // result: (MOVBstore [c] ptr val mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ val := v_2
+ mem := v_3
+ v.reset(OpARMMOVBstore)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVBstoreidx (MOVWconst [c]) ptr val mem)
+ // result: (MOVBstore [c] ptr val mem)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ ptr := v_1
+ val := v_2
+ mem := v_3
+ v.reset(OpARMMOVBstore)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVDload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVDload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // result: (MOVDload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ v.reset(OpARMMOVDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVDload [off1] {sym} (SUBconst [off2] ptr) mem)
+ // result: (MOVDload [off1-off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMSUBconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ v.reset(OpARMMOVDload)
+ v.AuxInt = int32ToAuxInt(off1 - off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVDload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpARMMOVWaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARMMOVDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVDload [off] {sym} ptr (MOVDstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: x
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARMMOVDstore {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVDstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+ // result: (MOVDstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ v.reset(OpARMMOVDstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVDstore [off1] {sym} (SUBconst [off2] ptr) val mem)
+ // result: (MOVDstore [off1-off2] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMSUBconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ v.reset(OpARMMOVDstore)
+ v.AuxInt = int32ToAuxInt(off1 - off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVDstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpARMMOVWaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARMMOVDstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVFload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVFload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // result: (MOVFload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ v.reset(OpARMMOVFload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVFload [off1] {sym} (SUBconst [off2] ptr) mem)
+ // result: (MOVFload [off1-off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMSUBconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ v.reset(OpARMMOVFload)
+ v.AuxInt = int32ToAuxInt(off1 - off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVFload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVFload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpARMMOVWaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARMMOVFload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVFload [off] {sym} ptr (MOVFstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: x
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARMMOVFstore {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVFstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVFstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+ // result: (MOVFstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ v.reset(OpARMMOVFstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVFstore [off1] {sym} (SUBconst [off2] ptr) val mem)
+ // result: (MOVFstore [off1-off2] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMSUBconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ v.reset(OpARMMOVFstore)
+ v.AuxInt = int32ToAuxInt(off1 - off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVFstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVFstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpARMMOVWaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARMMOVFstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVHUload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVHUload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // result: (MOVHUload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ v.reset(OpARMMOVHUload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHUload [off1] {sym} (SUBconst [off2] ptr) mem)
+ // result: (MOVHUload [off1-off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMSUBconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ v.reset(OpARMMOVHUload)
+ v.AuxInt = int32ToAuxInt(off1 - off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHUload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVHUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpARMMOVWaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARMMOVHUload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHUload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: (MOVHUreg x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARMMOVHstore {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpARMMOVHUreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUload [0] {sym} (ADD ptr idx) mem)
+ // cond: sym == nil
+ // result: (MOVHUloadidx ptr idx mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMADD {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(sym == nil) {
+ break
+ }
+ v.reset(OpARMMOVHUloadidx)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVHUload [off] {sym} (SB) _)
+ // cond: symIsRO(sym)
+ // result: (MOVWconst [int32(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))])
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpSB || !(symIsRO(sym)) {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(int32(read16(sym, int64(off), config.ctxt.Arch.ByteOrder)))
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVHUloadidx(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHUloadidx ptr idx (MOVHstoreidx ptr2 idx x _))
+ // cond: isSamePtr(ptr, ptr2)
+ // result: (MOVHUreg x)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARMMOVHstoreidx {
+ break
+ }
+ x := v_2.Args[2]
+ ptr2 := v_2.Args[0]
+ if idx != v_2.Args[1] || !(isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpARMMOVHUreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUloadidx ptr (MOVWconst [c]) mem)
+ // result: (MOVHUload [c] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ mem := v_2
+ v.reset(OpARMMOVHUload)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHUloadidx (MOVWconst [c]) ptr mem)
+ // result: (MOVHUload [c] ptr mem)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ ptr := v_1
+ mem := v_2
+ v.reset(OpARMMOVHUload)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVHUreg(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MOVHUreg x:(MOVBUload _ _))
+ // result: (MOVWreg x)
+ for {
+ x := v_0
+ if x.Op != OpARMMOVBUload {
+ break
+ }
+ v.reset(OpARMMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUreg x:(MOVHUload _ _))
+ // result: (MOVWreg x)
+ for {
+ x := v_0
+ if x.Op != OpARMMOVHUload {
+ break
+ }
+ v.reset(OpARMMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUreg (ANDconst [c] x))
+ // result: (ANDconst [c&0xffff] x)
+ for {
+ if v_0.Op != OpARMANDconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpARMANDconst)
+ v.AuxInt = int32ToAuxInt(c & 0xffff)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUreg x:(MOVBUreg _))
+ // result: (MOVWreg x)
+ for {
+ x := v_0
+ if x.Op != OpARMMOVBUreg {
+ break
+ }
+ v.reset(OpARMMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUreg x:(MOVHUreg _))
+ // result: (MOVWreg x)
+ for {
+ x := v_0
+ if x.Op != OpARMMOVHUreg {
+ break
+ }
+ v.reset(OpARMMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUreg (MOVWconst [c]))
+ // result: (MOVWconst [int32(uint16(c))])
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(int32(uint16(c)))
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVHload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // result: (MOVHload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ v.reset(OpARMMOVHload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHload [off1] {sym} (SUBconst [off2] ptr) mem)
+ // result: (MOVHload [off1-off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMSUBconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ v.reset(OpARMMOVHload)
+ v.AuxInt = int32ToAuxInt(off1 - off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpARMMOVWaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARMMOVHload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: (MOVHreg x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARMMOVHstore {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpARMMOVHreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHload [0] {sym} (ADD ptr idx) mem)
+ // cond: sym == nil
+ // result: (MOVHloadidx ptr idx mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMADD {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(sym == nil) {
+ break
+ }
+ v.reset(OpARMMOVHloadidx)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVHloadidx(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHloadidx ptr idx (MOVHstoreidx ptr2 idx x _))
+ // cond: isSamePtr(ptr, ptr2)
+ // result: (MOVHreg x)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARMMOVHstoreidx {
+ break
+ }
+ x := v_2.Args[2]
+ ptr2 := v_2.Args[0]
+ if idx != v_2.Args[1] || !(isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpARMMOVHreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHloadidx ptr (MOVWconst [c]) mem)
+ // result: (MOVHload [c] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ mem := v_2
+ v.reset(OpARMMOVHload)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHloadidx (MOVWconst [c]) ptr mem)
+ // result: (MOVHload [c] ptr mem)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ ptr := v_1
+ mem := v_2
+ v.reset(OpARMMOVHload)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVHreg(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MOVHreg x:(MOVBload _ _))
+ // result: (MOVWreg x)
+ for {
+ x := v_0
+ if x.Op != OpARMMOVBload {
+ break
+ }
+ v.reset(OpARMMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVBUload _ _))
+ // result: (MOVWreg x)
+ for {
+ x := v_0
+ if x.Op != OpARMMOVBUload {
+ break
+ }
+ v.reset(OpARMMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVHload _ _))
+ // result: (MOVWreg x)
+ for {
+ x := v_0
+ if x.Op != OpARMMOVHload {
+ break
+ }
+ v.reset(OpARMMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg (ANDconst [c] x))
+ // cond: c & 0x8000 == 0
+ // result: (ANDconst [c&0x7fff] x)
+ for {
+ if v_0.Op != OpARMANDconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(c&0x8000 == 0) {
+ break
+ }
+ v.reset(OpARMANDconst)
+ v.AuxInt = int32ToAuxInt(c & 0x7fff)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVBreg _))
+ // result: (MOVWreg x)
+ for {
+ x := v_0
+ if x.Op != OpARMMOVBreg {
+ break
+ }
+ v.reset(OpARMMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVBUreg _))
+ // result: (MOVWreg x)
+ for {
+ x := v_0
+ if x.Op != OpARMMOVBUreg {
+ break
+ }
+ v.reset(OpARMMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVHreg _))
+ // result: (MOVWreg x)
+ for {
+ x := v_0
+ if x.Op != OpARMMOVHreg {
+ break
+ }
+ v.reset(OpARMMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg (MOVWconst [c]))
+ // result: (MOVWconst [int32(int16(c))])
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(int32(int16(c)))
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVHstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+ // result: (MOVHstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ v.reset(OpARMMOVHstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVHstore [off1] {sym} (SUBconst [off2] ptr) val mem)
+ // result: (MOVHstore [off1-off2] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMSUBconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ v.reset(OpARMMOVHstore)
+ v.AuxInt = int32ToAuxInt(off1 - off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVHstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpARMMOVWaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARMMOVHstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (MOVHreg x) mem)
+ // result: (MOVHstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARMMOVHreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpARMMOVHstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (MOVHUreg x) mem)
+ // result: (MOVHstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARMMOVHUreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpARMMOVHstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVHstore [0] {sym} (ADD ptr idx) val mem)
+ // cond: sym == nil
+ // result: (MOVHstoreidx ptr idx val mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMADD {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(sym == nil) {
+ break
+ }
+ v.reset(OpARMMOVHstoreidx)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVHstoreidx(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHstoreidx ptr (MOVWconst [c]) val mem)
+ // result: (MOVHstore [c] ptr val mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ val := v_2
+ mem := v_3
+ v.reset(OpARMMOVHstore)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVHstoreidx (MOVWconst [c]) ptr val mem)
+ // result: (MOVHstore [c] ptr val mem)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ ptr := v_1
+ val := v_2
+ mem := v_3
+ v.reset(OpARMMOVHstore)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVWload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVWload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // result: (MOVWload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ v.reset(OpARMMOVWload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWload [off1] {sym} (SUBconst [off2] ptr) mem)
+ // result: (MOVWload [off1-off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMSUBconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ v.reset(OpARMMOVWload)
+ v.AuxInt = int32ToAuxInt(off1 - off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpARMMOVWaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARMMOVWload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: x
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARMMOVWstore {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVWload [0] {sym} (ADD ptr idx) mem)
+ // cond: sym == nil
+ // result: (MOVWloadidx ptr idx mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMADD {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(sym == nil) {
+ break
+ }
+ v.reset(OpARMMOVWloadidx)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVWload [0] {sym} (ADDshiftLL ptr idx [c]) mem)
+ // cond: sym == nil
+ // result: (MOVWloadshiftLL ptr idx [c] mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMADDshiftLL {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(sym == nil) {
+ break
+ }
+ v.reset(OpARMMOVWloadshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVWload [0] {sym} (ADDshiftRL ptr idx [c]) mem)
+ // cond: sym == nil
+ // result: (MOVWloadshiftRL ptr idx [c] mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMADDshiftRL {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(sym == nil) {
+ break
+ }
+ v.reset(OpARMMOVWloadshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVWload [0] {sym} (ADDshiftRA ptr idx [c]) mem)
+ // cond: sym == nil
+ // result: (MOVWloadshiftRA ptr idx [c] mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMADDshiftRA {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(sym == nil) {
+ break
+ }
+ v.reset(OpARMMOVWloadshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVWload [off] {sym} (SB) _)
+ // cond: symIsRO(sym)
+ // result: (MOVWconst [int32(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))])
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpSB || !(symIsRO(sym)) {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(int32(read32(sym, int64(off), config.ctxt.Arch.ByteOrder)))
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVWloadidx(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWloadidx ptr idx (MOVWstoreidx ptr2 idx x _))
+ // cond: isSamePtr(ptr, ptr2)
+ // result: x
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARMMOVWstoreidx {
+ break
+ }
+ x := v_2.Args[2]
+ ptr2 := v_2.Args[0]
+ if idx != v_2.Args[1] || !(isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVWloadidx ptr (MOVWconst [c]) mem)
+ // result: (MOVWload [c] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ mem := v_2
+ v.reset(OpARMMOVWload)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWloadidx (MOVWconst [c]) ptr mem)
+ // result: (MOVWload [c] ptr mem)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ ptr := v_1
+ mem := v_2
+ v.reset(OpARMMOVWload)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWloadidx ptr (SLLconst idx [c]) mem)
+ // result: (MOVWloadshiftLL ptr idx [c] mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARMSLLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ idx := v_1.Args[0]
+ mem := v_2
+ v.reset(OpARMMOVWloadshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVWloadidx (SLLconst idx [c]) ptr mem)
+ // result: (MOVWloadshiftLL ptr idx [c] mem)
+ for {
+ if v_0.Op != OpARMSLLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ idx := v_0.Args[0]
+ ptr := v_1
+ mem := v_2
+ v.reset(OpARMMOVWloadshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVWloadidx ptr (SRLconst idx [c]) mem)
+ // result: (MOVWloadshiftRL ptr idx [c] mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARMSRLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ idx := v_1.Args[0]
+ mem := v_2
+ v.reset(OpARMMOVWloadshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVWloadidx (SRLconst idx [c]) ptr mem)
+ // result: (MOVWloadshiftRL ptr idx [c] mem)
+ for {
+ if v_0.Op != OpARMSRLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ idx := v_0.Args[0]
+ ptr := v_1
+ mem := v_2
+ v.reset(OpARMMOVWloadshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVWloadidx ptr (SRAconst idx [c]) mem)
+ // result: (MOVWloadshiftRA ptr idx [c] mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARMSRAconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ idx := v_1.Args[0]
+ mem := v_2
+ v.reset(OpARMMOVWloadshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVWloadidx (SRAconst idx [c]) ptr mem)
+ // result: (MOVWloadshiftRA ptr idx [c] mem)
+ for {
+ if v_0.Op != OpARMSRAconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ idx := v_0.Args[0]
+ ptr := v_1
+ mem := v_2
+ v.reset(OpARMMOVWloadshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVWloadshiftLL(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWloadshiftLL ptr idx [c] (MOVWstoreshiftLL ptr2 idx [d] x _))
+ // cond: c==d && isSamePtr(ptr, ptr2)
+ // result: x
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARMMOVWstoreshiftLL {
+ break
+ }
+ d := auxIntToInt32(v_2.AuxInt)
+ x := v_2.Args[2]
+ ptr2 := v_2.Args[0]
+ if idx != v_2.Args[1] || !(c == d && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVWloadshiftLL ptr (MOVWconst [c]) [d] mem)
+ // result: (MOVWload [int32(uint32(c)<<uint64(d))] ptr mem)
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ ptr := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ mem := v_2
+ v.reset(OpARMMOVWload)
+ v.AuxInt = int32ToAuxInt(int32(uint32(c) << uint64(d)))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVWloadshiftRA(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWloadshiftRA ptr idx [c] (MOVWstoreshiftRA ptr2 idx [d] x _))
+ // cond: c==d && isSamePtr(ptr, ptr2)
+ // result: x
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARMMOVWstoreshiftRA {
+ break
+ }
+ d := auxIntToInt32(v_2.AuxInt)
+ x := v_2.Args[2]
+ ptr2 := v_2.Args[0]
+ if idx != v_2.Args[1] || !(c == d && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVWloadshiftRA ptr (MOVWconst [c]) [d] mem)
+ // result: (MOVWload [c>>uint64(d)] ptr mem)
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ ptr := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ mem := v_2
+ v.reset(OpARMMOVWload)
+ v.AuxInt = int32ToAuxInt(c >> uint64(d))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVWloadshiftRL(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWloadshiftRL ptr idx [c] (MOVWstoreshiftRL ptr2 idx [d] x _))
+ // cond: c==d && isSamePtr(ptr, ptr2)
+ // result: x
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARMMOVWstoreshiftRL {
+ break
+ }
+ d := auxIntToInt32(v_2.AuxInt)
+ x := v_2.Args[2]
+ ptr2 := v_2.Args[0]
+ if idx != v_2.Args[1] || !(c == d && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVWloadshiftRL ptr (MOVWconst [c]) [d] mem)
+ // result: (MOVWload [int32(uint32(c)>>uint64(d))] ptr mem)
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ ptr := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ mem := v_2
+ v.reset(OpARMMOVWload)
+ v.AuxInt = int32ToAuxInt(int32(uint32(c) >> uint64(d)))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVWnop(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MOVWnop (MOVWconst [c]))
+ // result: (MOVWconst [c])
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(c)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVWreg(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MOVWreg x)
+ // cond: x.Uses == 1
+ // result: (MOVWnop x)
+ for {
+ x := v_0
+ if !(x.Uses == 1) {
+ break
+ }
+ v.reset(OpARMMOVWnop)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg (MOVWconst [c]))
+ // result: (MOVWconst [c])
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(c)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVWstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+ // result: (MOVWstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ v.reset(OpARMMOVWstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVWstore [off1] {sym} (SUBconst [off2] ptr) val mem)
+ // result: (MOVWstore [off1-off2] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMSUBconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ v.reset(OpARMMOVWstore)
+ v.AuxInt = int32ToAuxInt(off1 - off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVWstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpARMMOVWaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpARMMOVWstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVWstore [0] {sym} (ADD ptr idx) val mem)
+ // cond: sym == nil
+ // result: (MOVWstoreidx ptr idx val mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMADD {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(sym == nil) {
+ break
+ }
+ v.reset(OpARMMOVWstoreidx)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ // match: (MOVWstore [0] {sym} (ADDshiftLL ptr idx [c]) val mem)
+ // cond: sym == nil
+ // result: (MOVWstoreshiftLL ptr idx [c] val mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMADDshiftLL {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(sym == nil) {
+ break
+ }
+ v.reset(OpARMMOVWstoreshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ // match: (MOVWstore [0] {sym} (ADDshiftRL ptr idx [c]) val mem)
+ // cond: sym == nil
+ // result: (MOVWstoreshiftRL ptr idx [c] val mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMADDshiftRL {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(sym == nil) {
+ break
+ }
+ v.reset(OpARMMOVWstoreshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ // match: (MOVWstore [0] {sym} (ADDshiftRA ptr idx [c]) val mem)
+ // cond: sym == nil
+ // result: (MOVWstoreshiftRA ptr idx [c] val mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARMADDshiftRA {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(sym == nil) {
+ break
+ }
+ v.reset(OpARMMOVWstoreshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVWstoreidx(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWstoreidx ptr (MOVWconst [c]) val mem)
+ // result: (MOVWstore [c] ptr val mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ val := v_2
+ mem := v_3
+ v.reset(OpARMMOVWstore)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVWstoreidx (MOVWconst [c]) ptr val mem)
+ // result: (MOVWstore [c] ptr val mem)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ ptr := v_1
+ val := v_2
+ mem := v_3
+ v.reset(OpARMMOVWstore)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVWstoreidx ptr (SLLconst idx [c]) val mem)
+ // result: (MOVWstoreshiftLL ptr idx [c] val mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARMSLLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ idx := v_1.Args[0]
+ val := v_2
+ mem := v_3
+ v.reset(OpARMMOVWstoreshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ // match: (MOVWstoreidx (SLLconst idx [c]) ptr val mem)
+ // result: (MOVWstoreshiftLL ptr idx [c] val mem)
+ for {
+ if v_0.Op != OpARMSLLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ idx := v_0.Args[0]
+ ptr := v_1
+ val := v_2
+ mem := v_3
+ v.reset(OpARMMOVWstoreshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ // match: (MOVWstoreidx ptr (SRLconst idx [c]) val mem)
+ // result: (MOVWstoreshiftRL ptr idx [c] val mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARMSRLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ idx := v_1.Args[0]
+ val := v_2
+ mem := v_3
+ v.reset(OpARMMOVWstoreshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ // match: (MOVWstoreidx (SRLconst idx [c]) ptr val mem)
+ // result: (MOVWstoreshiftRL ptr idx [c] val mem)
+ for {
+ if v_0.Op != OpARMSRLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ idx := v_0.Args[0]
+ ptr := v_1
+ val := v_2
+ mem := v_3
+ v.reset(OpARMMOVWstoreshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ // match: (MOVWstoreidx ptr (SRAconst idx [c]) val mem)
+ // result: (MOVWstoreshiftRA ptr idx [c] val mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARMSRAconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ idx := v_1.Args[0]
+ val := v_2
+ mem := v_3
+ v.reset(OpARMMOVWstoreshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ // match: (MOVWstoreidx (SRAconst idx [c]) ptr val mem)
+ // result: (MOVWstoreshiftRA ptr idx [c] val mem)
+ for {
+ if v_0.Op != OpARMSRAconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ idx := v_0.Args[0]
+ ptr := v_1
+ val := v_2
+ mem := v_3
+ v.reset(OpARMMOVWstoreshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVWstoreshiftLL(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWstoreshiftLL ptr (MOVWconst [c]) [d] val mem)
+ // result: (MOVWstore [int32(uint32(c)<<uint64(d))] ptr val mem)
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ ptr := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ val := v_2
+ mem := v_3
+ v.reset(OpARMMOVWstore)
+ v.AuxInt = int32ToAuxInt(int32(uint32(c) << uint64(d)))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVWstoreshiftRA(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWstoreshiftRA ptr (MOVWconst [c]) [d] val mem)
+ // result: (MOVWstore [c>>uint64(d)] ptr val mem)
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ ptr := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ val := v_2
+ mem := v_3
+ v.reset(OpARMMOVWstore)
+ v.AuxInt = int32ToAuxInt(c >> uint64(d))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMOVWstoreshiftRL(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWstoreshiftRL ptr (MOVWconst [c]) [d] val mem)
+ // result: (MOVWstore [int32(uint32(c)>>uint64(d))] ptr val mem)
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ ptr := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ val := v_2
+ mem := v_3
+ v.reset(OpARMMOVWstore)
+ v.AuxInt = int32ToAuxInt(int32(uint32(c) >> uint64(d)))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMUL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MUL x (MOVWconst [c]))
+ // cond: int32(c) == -1
+ // result: (RSBconst [0] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(int32(c) == -1) {
+ continue
+ }
+ v.reset(OpARMRSBconst)
+ v.AuxInt = int32ToAuxInt(0)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (MUL _ (MOVWconst [0]))
+ // result: (MOVWconst [0])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_1.Op != OpARMMOVWconst || auxIntToInt32(v_1.AuxInt) != 0 {
+ continue
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ break
+ }
+ // match: (MUL x (MOVWconst [1]))
+ // result: x
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst || auxIntToInt32(v_1.AuxInt) != 1 {
+ continue
+ }
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (MUL x (MOVWconst [c]))
+ // cond: isPowerOfTwo32(c)
+ // result: (SLLconst [int32(log32(c))] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(isPowerOfTwo32(c)) {
+ continue
+ }
+ v.reset(OpARMSLLconst)
+ v.AuxInt = int32ToAuxInt(int32(log32(c)))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (MUL x (MOVWconst [c]))
+ // cond: isPowerOfTwo32(c-1) && c >= 3
+ // result: (ADDshiftLL x x [int32(log32(c-1))])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(isPowerOfTwo32(c-1) && c >= 3) {
+ continue
+ }
+ v.reset(OpARMADDshiftLL)
+ v.AuxInt = int32ToAuxInt(int32(log32(c - 1)))
+ v.AddArg2(x, x)
+ return true
+ }
+ break
+ }
+ // match: (MUL x (MOVWconst [c]))
+ // cond: isPowerOfTwo32(c+1) && c >= 7
+ // result: (RSBshiftLL x x [int32(log32(c+1))])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(isPowerOfTwo32(c+1) && c >= 7) {
+ continue
+ }
+ v.reset(OpARMRSBshiftLL)
+ v.AuxInt = int32ToAuxInt(int32(log32(c + 1)))
+ v.AddArg2(x, x)
+ return true
+ }
+ break
+ }
+ // match: (MUL x (MOVWconst [c]))
+ // cond: c%3 == 0 && isPowerOfTwo32(c/3)
+ // result: (SLLconst [int32(log32(c/3))] (ADDshiftLL <x.Type> x x [1]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(c%3 == 0 && isPowerOfTwo32(c/3)) {
+ continue
+ }
+ v.reset(OpARMSLLconst)
+ v.AuxInt = int32ToAuxInt(int32(log32(c / 3)))
+ v0 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type)
+ v0.AuxInt = int32ToAuxInt(1)
+ v0.AddArg2(x, x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MUL x (MOVWconst [c]))
+ // cond: c%5 == 0 && isPowerOfTwo32(c/5)
+ // result: (SLLconst [int32(log32(c/5))] (ADDshiftLL <x.Type> x x [2]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(c%5 == 0 && isPowerOfTwo32(c/5)) {
+ continue
+ }
+ v.reset(OpARMSLLconst)
+ v.AuxInt = int32ToAuxInt(int32(log32(c / 5)))
+ v0 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type)
+ v0.AuxInt = int32ToAuxInt(2)
+ v0.AddArg2(x, x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MUL x (MOVWconst [c]))
+ // cond: c%7 == 0 && isPowerOfTwo32(c/7)
+ // result: (SLLconst [int32(log32(c/7))] (RSBshiftLL <x.Type> x x [3]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(c%7 == 0 && isPowerOfTwo32(c/7)) {
+ continue
+ }
+ v.reset(OpARMSLLconst)
+ v.AuxInt = int32ToAuxInt(int32(log32(c / 7)))
+ v0 := b.NewValue0(v.Pos, OpARMRSBshiftLL, x.Type)
+ v0.AuxInt = int32ToAuxInt(3)
+ v0.AddArg2(x, x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MUL x (MOVWconst [c]))
+ // cond: c%9 == 0 && isPowerOfTwo32(c/9)
+ // result: (SLLconst [int32(log32(c/9))] (ADDshiftLL <x.Type> x x [3]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(c%9 == 0 && isPowerOfTwo32(c/9)) {
+ continue
+ }
+ v.reset(OpARMSLLconst)
+ v.AuxInt = int32ToAuxInt(int32(log32(c / 9)))
+ v0 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type)
+ v0.AuxInt = int32ToAuxInt(3)
+ v0.AddArg2(x, x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MUL (MOVWconst [c]) (MOVWconst [d]))
+ // result: (MOVWconst [c*d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpARMMOVWconst {
+ continue
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpARMMOVWconst {
+ continue
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(c * d)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueARM_OpARMMULA(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MULA x (MOVWconst [c]) a)
+ // cond: c == -1
+ // result: (SUB a x)
+ for {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ a := v_2
+ if !(c == -1) {
+ break
+ }
+ v.reset(OpARMSUB)
+ v.AddArg2(a, x)
+ return true
+ }
+ // match: (MULA _ (MOVWconst [0]) a)
+ // result: a
+ for {
+ if v_1.Op != OpARMMOVWconst || auxIntToInt32(v_1.AuxInt) != 0 {
+ break
+ }
+ a := v_2
+ v.copyOf(a)
+ return true
+ }
+ // match: (MULA x (MOVWconst [1]) a)
+ // result: (ADD x a)
+ for {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst || auxIntToInt32(v_1.AuxInt) != 1 {
+ break
+ }
+ a := v_2
+ v.reset(OpARMADD)
+ v.AddArg2(x, a)
+ return true
+ }
+ // match: (MULA x (MOVWconst [c]) a)
+ // cond: isPowerOfTwo32(c)
+ // result: (ADD (SLLconst <x.Type> [int32(log32(c))] x) a)
+ for {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ a := v_2
+ if !(isPowerOfTwo32(c)) {
+ break
+ }
+ v.reset(OpARMADD)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(int32(log32(c)))
+ v0.AddArg(x)
+ v.AddArg2(v0, a)
+ return true
+ }
+ // match: (MULA x (MOVWconst [c]) a)
+ // cond: isPowerOfTwo32(c-1) && c >= 3
+ // result: (ADD (ADDshiftLL <x.Type> x x [int32(log32(c-1))]) a)
+ for {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ a := v_2
+ if !(isPowerOfTwo32(c-1) && c >= 3) {
+ break
+ }
+ v.reset(OpARMADD)
+ v0 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type)
+ v0.AuxInt = int32ToAuxInt(int32(log32(c - 1)))
+ v0.AddArg2(x, x)
+ v.AddArg2(v0, a)
+ return true
+ }
+ // match: (MULA x (MOVWconst [c]) a)
+ // cond: isPowerOfTwo32(c+1) && c >= 7
+ // result: (ADD (RSBshiftLL <x.Type> x x [int32(log32(c+1))]) a)
+ for {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ a := v_2
+ if !(isPowerOfTwo32(c+1) && c >= 7) {
+ break
+ }
+ v.reset(OpARMADD)
+ v0 := b.NewValue0(v.Pos, OpARMRSBshiftLL, x.Type)
+ v0.AuxInt = int32ToAuxInt(int32(log32(c + 1)))
+ v0.AddArg2(x, x)
+ v.AddArg2(v0, a)
+ return true
+ }
+ // match: (MULA x (MOVWconst [c]) a)
+ // cond: c%3 == 0 && isPowerOfTwo32(c/3)
+ // result: (ADD (SLLconst <x.Type> [int32(log32(c/3))] (ADDshiftLL <x.Type> x x [1])) a)
+ for {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ a := v_2
+ if !(c%3 == 0 && isPowerOfTwo32(c/3)) {
+ break
+ }
+ v.reset(OpARMADD)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(int32(log32(c / 3)))
+ v1 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type)
+ v1.AuxInt = int32ToAuxInt(1)
+ v1.AddArg2(x, x)
+ v0.AddArg(v1)
+ v.AddArg2(v0, a)
+ return true
+ }
+ // match: (MULA x (MOVWconst [c]) a)
+ // cond: c%5 == 0 && isPowerOfTwo32(c/5)
+ // result: (ADD (SLLconst <x.Type> [int32(log32(c/5))] (ADDshiftLL <x.Type> x x [2])) a)
+ for {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ a := v_2
+ if !(c%5 == 0 && isPowerOfTwo32(c/5)) {
+ break
+ }
+ v.reset(OpARMADD)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(int32(log32(c / 5)))
+ v1 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type)
+ v1.AuxInt = int32ToAuxInt(2)
+ v1.AddArg2(x, x)
+ v0.AddArg(v1)
+ v.AddArg2(v0, a)
+ return true
+ }
+ // match: (MULA x (MOVWconst [c]) a)
+ // cond: c%7 == 0 && isPowerOfTwo32(c/7)
+ // result: (ADD (SLLconst <x.Type> [int32(log32(c/7))] (RSBshiftLL <x.Type> x x [3])) a)
+ for {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ a := v_2
+ if !(c%7 == 0 && isPowerOfTwo32(c/7)) {
+ break
+ }
+ v.reset(OpARMADD)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(int32(log32(c / 7)))
+ v1 := b.NewValue0(v.Pos, OpARMRSBshiftLL, x.Type)
+ v1.AuxInt = int32ToAuxInt(3)
+ v1.AddArg2(x, x)
+ v0.AddArg(v1)
+ v.AddArg2(v0, a)
+ return true
+ }
+ // match: (MULA x (MOVWconst [c]) a)
+ // cond: c%9 == 0 && isPowerOfTwo32(c/9)
+ // result: (ADD (SLLconst <x.Type> [int32(log32(c/9))] (ADDshiftLL <x.Type> x x [3])) a)
+ for {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ a := v_2
+ if !(c%9 == 0 && isPowerOfTwo32(c/9)) {
+ break
+ }
+ v.reset(OpARMADD)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(int32(log32(c / 9)))
+ v1 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type)
+ v1.AuxInt = int32ToAuxInt(3)
+ v1.AddArg2(x, x)
+ v0.AddArg(v1)
+ v.AddArg2(v0, a)
+ return true
+ }
+ // match: (MULA (MOVWconst [c]) x a)
+ // cond: c == -1
+ // result: (SUB a x)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ a := v_2
+ if !(c == -1) {
+ break
+ }
+ v.reset(OpARMSUB)
+ v.AddArg2(a, x)
+ return true
+ }
+ // match: (MULA (MOVWconst [0]) _ a)
+ // result: a
+ for {
+ if v_0.Op != OpARMMOVWconst || auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ a := v_2
+ v.copyOf(a)
+ return true
+ }
+ // match: (MULA (MOVWconst [1]) x a)
+ // result: (ADD x a)
+ for {
+ if v_0.Op != OpARMMOVWconst || auxIntToInt32(v_0.AuxInt) != 1 {
+ break
+ }
+ x := v_1
+ a := v_2
+ v.reset(OpARMADD)
+ v.AddArg2(x, a)
+ return true
+ }
+ // match: (MULA (MOVWconst [c]) x a)
+ // cond: isPowerOfTwo32(c)
+ // result: (ADD (SLLconst <x.Type> [int32(log32(c))] x) a)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ a := v_2
+ if !(isPowerOfTwo32(c)) {
+ break
+ }
+ v.reset(OpARMADD)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(int32(log32(c)))
+ v0.AddArg(x)
+ v.AddArg2(v0, a)
+ return true
+ }
+ // match: (MULA (MOVWconst [c]) x a)
+ // cond: isPowerOfTwo32(c-1) && c >= 3
+ // result: (ADD (ADDshiftLL <x.Type> x x [int32(log32(c-1))]) a)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ a := v_2
+ if !(isPowerOfTwo32(c-1) && c >= 3) {
+ break
+ }
+ v.reset(OpARMADD)
+ v0 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type)
+ v0.AuxInt = int32ToAuxInt(int32(log32(c - 1)))
+ v0.AddArg2(x, x)
+ v.AddArg2(v0, a)
+ return true
+ }
+ // match: (MULA (MOVWconst [c]) x a)
+ // cond: isPowerOfTwo32(c+1) && c >= 7
+ // result: (ADD (RSBshiftLL <x.Type> x x [int32(log32(c+1))]) a)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ a := v_2
+ if !(isPowerOfTwo32(c+1) && c >= 7) {
+ break
+ }
+ v.reset(OpARMADD)
+ v0 := b.NewValue0(v.Pos, OpARMRSBshiftLL, x.Type)
+ v0.AuxInt = int32ToAuxInt(int32(log32(c + 1)))
+ v0.AddArg2(x, x)
+ v.AddArg2(v0, a)
+ return true
+ }
+ // match: (MULA (MOVWconst [c]) x a)
+ // cond: c%3 == 0 && isPowerOfTwo32(c/3)
+ // result: (ADD (SLLconst <x.Type> [int32(log32(c/3))] (ADDshiftLL <x.Type> x x [1])) a)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ a := v_2
+ if !(c%3 == 0 && isPowerOfTwo32(c/3)) {
+ break
+ }
+ v.reset(OpARMADD)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(int32(log32(c / 3)))
+ v1 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type)
+ v1.AuxInt = int32ToAuxInt(1)
+ v1.AddArg2(x, x)
+ v0.AddArg(v1)
+ v.AddArg2(v0, a)
+ return true
+ }
+ // match: (MULA (MOVWconst [c]) x a)
+ // cond: c%5 == 0 && isPowerOfTwo32(c/5)
+ // result: (ADD (SLLconst <x.Type> [int32(log32(c/5))] (ADDshiftLL <x.Type> x x [2])) a)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ a := v_2
+ if !(c%5 == 0 && isPowerOfTwo32(c/5)) {
+ break
+ }
+ v.reset(OpARMADD)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(int32(log32(c / 5)))
+ v1 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type)
+ v1.AuxInt = int32ToAuxInt(2)
+ v1.AddArg2(x, x)
+ v0.AddArg(v1)
+ v.AddArg2(v0, a)
+ return true
+ }
+ // match: (MULA (MOVWconst [c]) x a)
+ // cond: c%7 == 0 && isPowerOfTwo32(c/7)
+ // result: (ADD (SLLconst <x.Type> [int32(log32(c/7))] (RSBshiftLL <x.Type> x x [3])) a)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ a := v_2
+ if !(c%7 == 0 && isPowerOfTwo32(c/7)) {
+ break
+ }
+ v.reset(OpARMADD)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(int32(log32(c / 7)))
+ v1 := b.NewValue0(v.Pos, OpARMRSBshiftLL, x.Type)
+ v1.AuxInt = int32ToAuxInt(3)
+ v1.AddArg2(x, x)
+ v0.AddArg(v1)
+ v.AddArg2(v0, a)
+ return true
+ }
+ // match: (MULA (MOVWconst [c]) x a)
+ // cond: c%9 == 0 && isPowerOfTwo32(c/9)
+ // result: (ADD (SLLconst <x.Type> [int32(log32(c/9))] (ADDshiftLL <x.Type> x x [3])) a)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ a := v_2
+ if !(c%9 == 0 && isPowerOfTwo32(c/9)) {
+ break
+ }
+ v.reset(OpARMADD)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(int32(log32(c / 9)))
+ v1 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type)
+ v1.AuxInt = int32ToAuxInt(3)
+ v1.AddArg2(x, x)
+ v0.AddArg(v1)
+ v.AddArg2(v0, a)
+ return true
+ }
+ // match: (MULA (MOVWconst [c]) (MOVWconst [d]) a)
+ // result: (ADDconst [c*d] a)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ a := v_2
+ v.reset(OpARMADDconst)
+ v.AuxInt = int32ToAuxInt(c * d)
+ v.AddArg(a)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMULD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MULD (NEGD x) y)
+ // cond: buildcfg.GOARM >= 6
+ // result: (NMULD x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpARMNEGD {
+ continue
+ }
+ x := v_0.Args[0]
+ y := v_1
+ if !(buildcfg.GOARM >= 6) {
+ continue
+ }
+ v.reset(OpARMNMULD)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueARM_OpARMMULF(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MULF (NEGF x) y)
+ // cond: buildcfg.GOARM >= 6
+ // result: (NMULF x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpARMNEGF {
+ continue
+ }
+ x := v_0.Args[0]
+ y := v_1
+ if !(buildcfg.GOARM >= 6) {
+ continue
+ }
+ v.reset(OpARMNMULF)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueARM_OpARMMULS(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MULS x (MOVWconst [c]) a)
+ // cond: c == -1
+ // result: (ADD a x)
+ for {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ a := v_2
+ if !(c == -1) {
+ break
+ }
+ v.reset(OpARMADD)
+ v.AddArg2(a, x)
+ return true
+ }
+ // match: (MULS _ (MOVWconst [0]) a)
+ // result: a
+ for {
+ if v_1.Op != OpARMMOVWconst || auxIntToInt32(v_1.AuxInt) != 0 {
+ break
+ }
+ a := v_2
+ v.copyOf(a)
+ return true
+ }
+ // match: (MULS x (MOVWconst [1]) a)
+ // result: (RSB x a)
+ for {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst || auxIntToInt32(v_1.AuxInt) != 1 {
+ break
+ }
+ a := v_2
+ v.reset(OpARMRSB)
+ v.AddArg2(x, a)
+ return true
+ }
+ // match: (MULS x (MOVWconst [c]) a)
+ // cond: isPowerOfTwo32(c)
+ // result: (RSB (SLLconst <x.Type> [int32(log32(c))] x) a)
+ for {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ a := v_2
+ if !(isPowerOfTwo32(c)) {
+ break
+ }
+ v.reset(OpARMRSB)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(int32(log32(c)))
+ v0.AddArg(x)
+ v.AddArg2(v0, a)
+ return true
+ }
+ // match: (MULS x (MOVWconst [c]) a)
+ // cond: isPowerOfTwo32(c-1) && c >= 3
+ // result: (RSB (ADDshiftLL <x.Type> x x [int32(log32(c-1))]) a)
+ for {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ a := v_2
+ if !(isPowerOfTwo32(c-1) && c >= 3) {
+ break
+ }
+ v.reset(OpARMRSB)
+ v0 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type)
+ v0.AuxInt = int32ToAuxInt(int32(log32(c - 1)))
+ v0.AddArg2(x, x)
+ v.AddArg2(v0, a)
+ return true
+ }
+ // match: (MULS x (MOVWconst [c]) a)
+ // cond: isPowerOfTwo32(c+1) && c >= 7
+ // result: (RSB (RSBshiftLL <x.Type> x x [int32(log32(c+1))]) a)
+ for {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ a := v_2
+ if !(isPowerOfTwo32(c+1) && c >= 7) {
+ break
+ }
+ v.reset(OpARMRSB)
+ v0 := b.NewValue0(v.Pos, OpARMRSBshiftLL, x.Type)
+ v0.AuxInt = int32ToAuxInt(int32(log32(c + 1)))
+ v0.AddArg2(x, x)
+ v.AddArg2(v0, a)
+ return true
+ }
+ // match: (MULS x (MOVWconst [c]) a)
+ // cond: c%3 == 0 && isPowerOfTwo32(c/3)
+ // result: (RSB (SLLconst <x.Type> [int32(log32(c/3))] (ADDshiftLL <x.Type> x x [1])) a)
+ for {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ a := v_2
+ if !(c%3 == 0 && isPowerOfTwo32(c/3)) {
+ break
+ }
+ v.reset(OpARMRSB)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(int32(log32(c / 3)))
+ v1 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type)
+ v1.AuxInt = int32ToAuxInt(1)
+ v1.AddArg2(x, x)
+ v0.AddArg(v1)
+ v.AddArg2(v0, a)
+ return true
+ }
+ // match: (MULS x (MOVWconst [c]) a)
+ // cond: c%5 == 0 && isPowerOfTwo32(c/5)
+ // result: (RSB (SLLconst <x.Type> [int32(log32(c/5))] (ADDshiftLL <x.Type> x x [2])) a)
+ for {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ a := v_2
+ if !(c%5 == 0 && isPowerOfTwo32(c/5)) {
+ break
+ }
+ v.reset(OpARMRSB)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(int32(log32(c / 5)))
+ v1 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type)
+ v1.AuxInt = int32ToAuxInt(2)
+ v1.AddArg2(x, x)
+ v0.AddArg(v1)
+ v.AddArg2(v0, a)
+ return true
+ }
+ // match: (MULS x (MOVWconst [c]) a)
+ // cond: c%7 == 0 && isPowerOfTwo32(c/7)
+ // result: (RSB (SLLconst <x.Type> [int32(log32(c/7))] (RSBshiftLL <x.Type> x x [3])) a)
+ for {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ a := v_2
+ if !(c%7 == 0 && isPowerOfTwo32(c/7)) {
+ break
+ }
+ v.reset(OpARMRSB)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(int32(log32(c / 7)))
+ v1 := b.NewValue0(v.Pos, OpARMRSBshiftLL, x.Type)
+ v1.AuxInt = int32ToAuxInt(3)
+ v1.AddArg2(x, x)
+ v0.AddArg(v1)
+ v.AddArg2(v0, a)
+ return true
+ }
+ // match: (MULS x (MOVWconst [c]) a)
+ // cond: c%9 == 0 && isPowerOfTwo32(c/9)
+ // result: (RSB (SLLconst <x.Type> [int32(log32(c/9))] (ADDshiftLL <x.Type> x x [3])) a)
+ for {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ a := v_2
+ if !(c%9 == 0 && isPowerOfTwo32(c/9)) {
+ break
+ }
+ v.reset(OpARMRSB)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(int32(log32(c / 9)))
+ v1 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type)
+ v1.AuxInt = int32ToAuxInt(3)
+ v1.AddArg2(x, x)
+ v0.AddArg(v1)
+ v.AddArg2(v0, a)
+ return true
+ }
+ // match: (MULS (MOVWconst [c]) x a)
+ // cond: c == -1
+ // result: (ADD a x)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ a := v_2
+ if !(c == -1) {
+ break
+ }
+ v.reset(OpARMADD)
+ v.AddArg2(a, x)
+ return true
+ }
+ // match: (MULS (MOVWconst [0]) _ a)
+ // result: a
+ for {
+ if v_0.Op != OpARMMOVWconst || auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ a := v_2
+ v.copyOf(a)
+ return true
+ }
+ // match: (MULS (MOVWconst [1]) x a)
+ // result: (RSB x a)
+ for {
+ if v_0.Op != OpARMMOVWconst || auxIntToInt32(v_0.AuxInt) != 1 {
+ break
+ }
+ x := v_1
+ a := v_2
+ v.reset(OpARMRSB)
+ v.AddArg2(x, a)
+ return true
+ }
+ // match: (MULS (MOVWconst [c]) x a)
+ // cond: isPowerOfTwo32(c)
+ // result: (RSB (SLLconst <x.Type> [int32(log32(c))] x) a)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ a := v_2
+ if !(isPowerOfTwo32(c)) {
+ break
+ }
+ v.reset(OpARMRSB)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(int32(log32(c)))
+ v0.AddArg(x)
+ v.AddArg2(v0, a)
+ return true
+ }
+ // match: (MULS (MOVWconst [c]) x a)
+ // cond: isPowerOfTwo32(c-1) && c >= 3
+ // result: (RSB (ADDshiftLL <x.Type> x x [int32(log32(c-1))]) a)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ a := v_2
+ if !(isPowerOfTwo32(c-1) && c >= 3) {
+ break
+ }
+ v.reset(OpARMRSB)
+ v0 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type)
+ v0.AuxInt = int32ToAuxInt(int32(log32(c - 1)))
+ v0.AddArg2(x, x)
+ v.AddArg2(v0, a)
+ return true
+ }
+ // match: (MULS (MOVWconst [c]) x a)
+ // cond: isPowerOfTwo32(c+1) && c >= 7
+ // result: (RSB (RSBshiftLL <x.Type> x x [int32(log32(c+1))]) a)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ a := v_2
+ if !(isPowerOfTwo32(c+1) && c >= 7) {
+ break
+ }
+ v.reset(OpARMRSB)
+ v0 := b.NewValue0(v.Pos, OpARMRSBshiftLL, x.Type)
+ v0.AuxInt = int32ToAuxInt(int32(log32(c + 1)))
+ v0.AddArg2(x, x)
+ v.AddArg2(v0, a)
+ return true
+ }
+ // match: (MULS (MOVWconst [c]) x a)
+ // cond: c%3 == 0 && isPowerOfTwo32(c/3)
+ // result: (RSB (SLLconst <x.Type> [int32(log32(c/3))] (ADDshiftLL <x.Type> x x [1])) a)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ a := v_2
+ if !(c%3 == 0 && isPowerOfTwo32(c/3)) {
+ break
+ }
+ v.reset(OpARMRSB)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(int32(log32(c / 3)))
+ v1 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type)
+ v1.AuxInt = int32ToAuxInt(1)
+ v1.AddArg2(x, x)
+ v0.AddArg(v1)
+ v.AddArg2(v0, a)
+ return true
+ }
+ // match: (MULS (MOVWconst [c]) x a)
+ // cond: c%5 == 0 && isPowerOfTwo32(c/5)
+ // result: (RSB (SLLconst <x.Type> [int32(log32(c/5))] (ADDshiftLL <x.Type> x x [2])) a)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ a := v_2
+ if !(c%5 == 0 && isPowerOfTwo32(c/5)) {
+ break
+ }
+ v.reset(OpARMRSB)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(int32(log32(c / 5)))
+ v1 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type)
+ v1.AuxInt = int32ToAuxInt(2)
+ v1.AddArg2(x, x)
+ v0.AddArg(v1)
+ v.AddArg2(v0, a)
+ return true
+ }
+ // match: (MULS (MOVWconst [c]) x a)
+ // cond: c%7 == 0 && isPowerOfTwo32(c/7)
+ // result: (RSB (SLLconst <x.Type> [int32(log32(c/7))] (RSBshiftLL <x.Type> x x [3])) a)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ a := v_2
+ if !(c%7 == 0 && isPowerOfTwo32(c/7)) {
+ break
+ }
+ v.reset(OpARMRSB)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(int32(log32(c / 7)))
+ v1 := b.NewValue0(v.Pos, OpARMRSBshiftLL, x.Type)
+ v1.AuxInt = int32ToAuxInt(3)
+ v1.AddArg2(x, x)
+ v0.AddArg(v1)
+ v.AddArg2(v0, a)
+ return true
+ }
+ // match: (MULS (MOVWconst [c]) x a)
+ // cond: c%9 == 0 && isPowerOfTwo32(c/9)
+ // result: (RSB (SLLconst <x.Type> [int32(log32(c/9))] (ADDshiftLL <x.Type> x x [3])) a)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ a := v_2
+ if !(c%9 == 0 && isPowerOfTwo32(c/9)) {
+ break
+ }
+ v.reset(OpARMRSB)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(int32(log32(c / 9)))
+ v1 := b.NewValue0(v.Pos, OpARMADDshiftLL, x.Type)
+ v1.AuxInt = int32ToAuxInt(3)
+ v1.AddArg2(x, x)
+ v0.AddArg(v1)
+ v.AddArg2(v0, a)
+ return true
+ }
+ // match: (MULS (MOVWconst [c]) (MOVWconst [d]) a)
+ // result: (SUBconst [c*d] a)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ a := v_2
+ v.reset(OpARMSUBconst)
+ v.AuxInt = int32ToAuxInt(c * d)
+ v.AddArg(a)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMVN(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MVN (MOVWconst [c]))
+ // result: (MOVWconst [^c])
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(^c)
+ return true
+ }
+ // match: (MVN (SLLconst [c] x))
+ // result: (MVNshiftLL x [c])
+ for {
+ if v_0.Op != OpARMSLLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpARMMVNshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MVN (SRLconst [c] x))
+ // result: (MVNshiftRL x [c])
+ for {
+ if v_0.Op != OpARMSRLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpARMMVNshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MVN (SRAconst [c] x))
+ // result: (MVNshiftRA x [c])
+ for {
+ if v_0.Op != OpARMSRAconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpARMMVNshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MVN (SLL x y))
+ // result: (MVNshiftLLreg x y)
+ for {
+ if v_0.Op != OpARMSLL {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpARMMVNshiftLLreg)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (MVN (SRL x y))
+ // result: (MVNshiftRLreg x y)
+ for {
+ if v_0.Op != OpARMSRL {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpARMMVNshiftRLreg)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (MVN (SRA x y))
+ // result: (MVNshiftRAreg x y)
+ for {
+ if v_0.Op != OpARMSRA {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpARMMVNshiftRAreg)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMVNshiftLL(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MVNshiftLL (MOVWconst [c]) [d])
+ // result: (MOVWconst [^(c<<uint64(d))])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(^(c << uint64(d)))
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMVNshiftLLreg(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MVNshiftLLreg x (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (MVNshiftLL x [c])
+ for {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMMVNshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMVNshiftRA(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MVNshiftRA (MOVWconst [c]) [d])
+ // result: (MOVWconst [int32(c)>>uint64(d)])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(int32(c) >> uint64(d))
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMVNshiftRAreg(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MVNshiftRAreg x (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (MVNshiftRA x [c])
+ for {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMMVNshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMVNshiftRL(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MVNshiftRL (MOVWconst [c]) [d])
+ // result: (MOVWconst [^int32(uint32(c)>>uint64(d))])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(^int32(uint32(c) >> uint64(d)))
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMMVNshiftRLreg(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MVNshiftRLreg x (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (MVNshiftRL x [c])
+ for {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMMVNshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMNEGD(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (NEGD (MULD x y))
+ // cond: buildcfg.GOARM >= 6
+ // result: (NMULD x y)
+ for {
+ if v_0.Op != OpARMMULD {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if !(buildcfg.GOARM >= 6) {
+ break
+ }
+ v.reset(OpARMNMULD)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMNEGF(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (NEGF (MULF x y))
+ // cond: buildcfg.GOARM >= 6
+ // result: (NMULF x y)
+ for {
+ if v_0.Op != OpARMMULF {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if !(buildcfg.GOARM >= 6) {
+ break
+ }
+ v.reset(OpARMNMULF)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMNMULD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (NMULD (NEGD x) y)
+ // result: (MULD x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpARMNEGD {
+ continue
+ }
+ x := v_0.Args[0]
+ y := v_1
+ v.reset(OpARMMULD)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueARM_OpARMNMULF(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (NMULF (NEGF x) y)
+ // result: (MULF x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpARMNEGF {
+ continue
+ }
+ x := v_0.Args[0]
+ y := v_1
+ v.reset(OpARMMULF)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueARM_OpARMNotEqual(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (NotEqual (FlagConstant [fc]))
+ // result: (MOVWconst [b2i32(fc.ne())])
+ for {
+ if v_0.Op != OpARMFlagConstant {
+ break
+ }
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(b2i32(fc.ne()))
+ return true
+ }
+ // match: (NotEqual (InvertFlags x))
+ // result: (NotEqual x)
+ for {
+ if v_0.Op != OpARMInvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARMNotEqual)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMOR(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (OR x (MOVWconst [c]))
+ // result: (ORconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMORconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (OR x (SLLconst [c] y))
+ // result: (ORshiftLL x y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSLLconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMORshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (OR x (SRLconst [c] y))
+ // result: (ORshiftRL x y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRLconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMORshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (OR x (SRAconst [c] y))
+ // result: (ORshiftRA x y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRAconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMORshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (OR x (SLL y z))
+ // result: (ORshiftLLreg x y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSLL {
+ continue
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMORshiftLLreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ break
+ }
+ // match: (OR x (SRL y z))
+ // result: (ORshiftRLreg x y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRL {
+ continue
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMORshiftRLreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ break
+ }
+ // match: (OR x (SRA y z))
+ // result: (ORshiftRAreg x y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRA {
+ continue
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMORshiftRAreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ break
+ }
+ // match: (OR x x)
+ // result: x
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMORconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ORconst [0] x)
+ // result: x
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (ORconst [c] _)
+ // cond: int32(c)==-1
+ // result: (MOVWconst [-1])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if !(int32(c) == -1) {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(-1)
+ return true
+ }
+ // match: (ORconst [c] (MOVWconst [d]))
+ // result: (MOVWconst [c|d])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(c | d)
+ return true
+ }
+ // match: (ORconst [c] (ORconst [d] x))
+ // result: (ORconst [c|d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMORconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpARMORconst)
+ v.AuxInt = int32ToAuxInt(c | d)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMORshiftLL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (ORshiftLL (MOVWconst [c]) x [d])
+ // result: (ORconst [c] (SLLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMORconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ORshiftLL x (MOVWconst [c]) [d])
+ // result: (ORconst x [c<<uint64(d)])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMORconst)
+ v.AuxInt = int32ToAuxInt(c << uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: ( ORshiftLL [c] (SRLconst x [32-c]) x)
+ // result: (SRRconst [32-c] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMSRLconst || auxIntToInt32(v_0.AuxInt) != 32-c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARMSRRconst)
+ v.AuxInt = int32ToAuxInt(32 - c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ORshiftLL <typ.UInt16> [8] (BFXU <typ.UInt16> [int32(armBFAuxInt(8, 8))] x) x)
+ // result: (REV16 x)
+ for {
+ if v.Type != typ.UInt16 || auxIntToInt32(v.AuxInt) != 8 || v_0.Op != OpARMBFXU || v_0.Type != typ.UInt16 || auxIntToInt32(v_0.AuxInt) != int32(armBFAuxInt(8, 8)) {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARMREV16)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ORshiftLL <typ.UInt16> [8] (SRLconst <typ.UInt16> [24] (SLLconst [16] x)) x)
+ // cond: buildcfg.GOARM>=6
+ // result: (REV16 x)
+ for {
+ if v.Type != typ.UInt16 || auxIntToInt32(v.AuxInt) != 8 || v_0.Op != OpARMSRLconst || v_0.Type != typ.UInt16 || auxIntToInt32(v_0.AuxInt) != 24 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpARMSLLconst || auxIntToInt32(v_0_0.AuxInt) != 16 {
+ break
+ }
+ x := v_0_0.Args[0]
+ if x != v_1 || !(buildcfg.GOARM >= 6) {
+ break
+ }
+ v.reset(OpARMREV16)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ORshiftLL y:(SLLconst x [c]) x [c])
+ // result: y
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ y := v_0
+ if y.Op != OpARMSLLconst || auxIntToInt32(y.AuxInt) != c {
+ break
+ }
+ x := y.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMORshiftLLreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ORshiftLLreg (MOVWconst [c]) x y)
+ // result: (ORconst [c] (SLL <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMORconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ORshiftLLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (ORshiftLL x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMORshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMORshiftRA(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ORshiftRA (MOVWconst [c]) x [d])
+ // result: (ORconst [c] (SRAconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMORconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRAconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ORshiftRA x (MOVWconst [c]) [d])
+ // result: (ORconst x [c>>uint64(d)])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMORconst)
+ v.AuxInt = int32ToAuxInt(c >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ORshiftRA y:(SRAconst x [c]) x [c])
+ // result: y
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ y := v_0
+ if y.Op != OpARMSRAconst || auxIntToInt32(y.AuxInt) != c {
+ break
+ }
+ x := y.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMORshiftRAreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ORshiftRAreg (MOVWconst [c]) x y)
+ // result: (ORconst [c] (SRA <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMORconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ORshiftRAreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (ORshiftRA x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMORshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMORshiftRL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ORshiftRL (MOVWconst [c]) x [d])
+ // result: (ORconst [c] (SRLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMORconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ORshiftRL x (MOVWconst [c]) [d])
+ // result: (ORconst x [int32(uint32(c)>>uint64(d))])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMORconst)
+ v.AuxInt = int32ToAuxInt(int32(uint32(c) >> uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ // match: ( ORshiftRL [c] (SLLconst x [32-c]) x)
+ // result: (SRRconst [ c] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMSLLconst || auxIntToInt32(v_0.AuxInt) != 32-c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARMSRRconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ORshiftRL y:(SRLconst x [c]) x [c])
+ // result: y
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ y := v_0
+ if y.Op != OpARMSRLconst || auxIntToInt32(y.AuxInt) != c {
+ break
+ }
+ x := y.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMORshiftRLreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ORshiftRLreg (MOVWconst [c]) x y)
+ // result: (ORconst [c] (SRL <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMORconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ORshiftRLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (ORshiftRL x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMORshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (RSB (MOVWconst [c]) x)
+ // result: (SUBconst [c] x)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMSUBconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (RSB x (MOVWconst [c]))
+ // result: (RSBconst [c] x)
+ for {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMRSBconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (RSB x (SLLconst [c] y))
+ // result: (RSBshiftLL x y [c])
+ for {
+ x := v_0
+ if v_1.Op != OpARMSLLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMRSBshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (RSB (SLLconst [c] y) x)
+ // result: (SUBshiftLL x y [c])
+ for {
+ if v_0.Op != OpARMSLLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ y := v_0.Args[0]
+ x := v_1
+ v.reset(OpARMSUBshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (RSB x (SRLconst [c] y))
+ // result: (RSBshiftRL x y [c])
+ for {
+ x := v_0
+ if v_1.Op != OpARMSRLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMRSBshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (RSB (SRLconst [c] y) x)
+ // result: (SUBshiftRL x y [c])
+ for {
+ if v_0.Op != OpARMSRLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ y := v_0.Args[0]
+ x := v_1
+ v.reset(OpARMSUBshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (RSB x (SRAconst [c] y))
+ // result: (RSBshiftRA x y [c])
+ for {
+ x := v_0
+ if v_1.Op != OpARMSRAconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMRSBshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (RSB (SRAconst [c] y) x)
+ // result: (SUBshiftRA x y [c])
+ for {
+ if v_0.Op != OpARMSRAconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ y := v_0.Args[0]
+ x := v_1
+ v.reset(OpARMSUBshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (RSB x (SLL y z))
+ // result: (RSBshiftLLreg x y z)
+ for {
+ x := v_0
+ if v_1.Op != OpARMSLL {
+ break
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMRSBshiftLLreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ // match: (RSB (SLL y z) x)
+ // result: (SUBshiftLLreg x y z)
+ for {
+ if v_0.Op != OpARMSLL {
+ break
+ }
+ z := v_0.Args[1]
+ y := v_0.Args[0]
+ x := v_1
+ v.reset(OpARMSUBshiftLLreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ // match: (RSB x (SRL y z))
+ // result: (RSBshiftRLreg x y z)
+ for {
+ x := v_0
+ if v_1.Op != OpARMSRL {
+ break
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMRSBshiftRLreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ // match: (RSB (SRL y z) x)
+ // result: (SUBshiftRLreg x y z)
+ for {
+ if v_0.Op != OpARMSRL {
+ break
+ }
+ z := v_0.Args[1]
+ y := v_0.Args[0]
+ x := v_1
+ v.reset(OpARMSUBshiftRLreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ // match: (RSB x (SRA y z))
+ // result: (RSBshiftRAreg x y z)
+ for {
+ x := v_0
+ if v_1.Op != OpARMSRA {
+ break
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMRSBshiftRAreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ // match: (RSB (SRA y z) x)
+ // result: (SUBshiftRAreg x y z)
+ for {
+ if v_0.Op != OpARMSRA {
+ break
+ }
+ z := v_0.Args[1]
+ y := v_0.Args[0]
+ x := v_1
+ v.reset(OpARMSUBshiftRAreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ // match: (RSB x x)
+ // result: (MOVWconst [0])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (RSB (MUL x y) a)
+ // cond: buildcfg.GOARM == 7
+ // result: (MULS x y a)
+ for {
+ if v_0.Op != OpARMMUL {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ a := v_1
+ if !(buildcfg.GOARM == 7) {
+ break
+ }
+ v.reset(OpARMMULS)
+ v.AddArg3(x, y, a)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSBSshiftLL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (RSBSshiftLL (MOVWconst [c]) x [d])
+ // result: (SUBSconst [c] (SLLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMSUBSconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (RSBSshiftLL x (MOVWconst [c]) [d])
+ // result: (RSBSconst x [c<<uint64(d)])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMRSBSconst)
+ v.AuxInt = int32ToAuxInt(c << uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSBSshiftLLreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (RSBSshiftLLreg (MOVWconst [c]) x y)
+ // result: (SUBSconst [c] (SLL <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMSUBSconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (RSBSshiftLLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (RSBSshiftLL x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMRSBSshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSBSshiftRA(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (RSBSshiftRA (MOVWconst [c]) x [d])
+ // result: (SUBSconst [c] (SRAconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMSUBSconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRAconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (RSBSshiftRA x (MOVWconst [c]) [d])
+ // result: (RSBSconst x [c>>uint64(d)])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMRSBSconst)
+ v.AuxInt = int32ToAuxInt(c >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSBSshiftRAreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (RSBSshiftRAreg (MOVWconst [c]) x y)
+ // result: (SUBSconst [c] (SRA <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMSUBSconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (RSBSshiftRAreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (RSBSshiftRA x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMRSBSshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSBSshiftRL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (RSBSshiftRL (MOVWconst [c]) x [d])
+ // result: (SUBSconst [c] (SRLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMSUBSconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (RSBSshiftRL x (MOVWconst [c]) [d])
+ // result: (RSBSconst x [int32(uint32(c)>>uint64(d))])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMRSBSconst)
+ v.AuxInt = int32ToAuxInt(int32(uint32(c) >> uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSBSshiftRLreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (RSBSshiftRLreg (MOVWconst [c]) x y)
+ // result: (SUBSconst [c] (SRL <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMSUBSconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (RSBSshiftRLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (RSBSshiftRL x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMRSBSshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSBconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (RSBconst [c] (MOVWconst [d]))
+ // result: (MOVWconst [c-d])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(c - d)
+ return true
+ }
+ // match: (RSBconst [c] (RSBconst [d] x))
+ // result: (ADDconst [c-d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMRSBconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpARMADDconst)
+ v.AuxInt = int32ToAuxInt(c - d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (RSBconst [c] (ADDconst [d] x))
+ // result: (RSBconst [c-d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMADDconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpARMRSBconst)
+ v.AuxInt = int32ToAuxInt(c - d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (RSBconst [c] (SUBconst [d] x))
+ // result: (RSBconst [c+d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMSUBconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpARMRSBconst)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSBshiftLL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (RSBshiftLL (MOVWconst [c]) x [d])
+ // result: (SUBconst [c] (SLLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMSUBconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (RSBshiftLL x (MOVWconst [c]) [d])
+ // result: (RSBconst x [c<<uint64(d)])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMRSBconst)
+ v.AuxInt = int32ToAuxInt(c << uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (RSBshiftLL (SLLconst x [c]) x [c])
+ // result: (MOVWconst [0])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMSLLconst || auxIntToInt32(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSBshiftLLreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (RSBshiftLLreg (MOVWconst [c]) x y)
+ // result: (SUBconst [c] (SLL <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMSUBconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (RSBshiftLLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (RSBshiftLL x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMRSBshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSBshiftRA(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (RSBshiftRA (MOVWconst [c]) x [d])
+ // result: (SUBconst [c] (SRAconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMSUBconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRAconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (RSBshiftRA x (MOVWconst [c]) [d])
+ // result: (RSBconst x [c>>uint64(d)])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMRSBconst)
+ v.AuxInt = int32ToAuxInt(c >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (RSBshiftRA (SRAconst x [c]) x [c])
+ // result: (MOVWconst [0])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMSRAconst || auxIntToInt32(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSBshiftRAreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (RSBshiftRAreg (MOVWconst [c]) x y)
+ // result: (SUBconst [c] (SRA <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMSUBconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (RSBshiftRAreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (RSBshiftRA x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMRSBshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSBshiftRL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (RSBshiftRL (MOVWconst [c]) x [d])
+ // result: (SUBconst [c] (SRLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMSUBconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (RSBshiftRL x (MOVWconst [c]) [d])
+ // result: (RSBconst x [int32(uint32(c)>>uint64(d))])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMRSBconst)
+ v.AuxInt = int32ToAuxInt(int32(uint32(c) >> uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (RSBshiftRL (SRLconst x [c]) x [c])
+ // result: (MOVWconst [0])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMSRLconst || auxIntToInt32(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSBshiftRLreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (RSBshiftRLreg (MOVWconst [c]) x y)
+ // result: (SUBconst [c] (SRL <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMSUBconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (RSBshiftRLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (RSBshiftRL x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMRSBshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSCconst(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (RSCconst [c] (ADDconst [d] x) flags)
+ // result: (RSCconst [c-d] x flags)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMADDconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ flags := v_1
+ v.reset(OpARMRSCconst)
+ v.AuxInt = int32ToAuxInt(c - d)
+ v.AddArg2(x, flags)
+ return true
+ }
+ // match: (RSCconst [c] (SUBconst [d] x) flags)
+ // result: (RSCconst [c+d] x flags)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMSUBconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ flags := v_1
+ v.reset(OpARMRSCconst)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.AddArg2(x, flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSCshiftLL(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (RSCshiftLL (MOVWconst [c]) x [d] flags)
+ // result: (SBCconst [c] (SLLconst <x.Type> x [d]) flags)
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ flags := v_2
+ v.reset(OpARMSBCconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg2(v0, flags)
+ return true
+ }
+ // match: (RSCshiftLL x (MOVWconst [c]) [d] flags)
+ // result: (RSCconst x [c<<uint64(d)] flags)
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ flags := v_2
+ v.reset(OpARMRSCconst)
+ v.AuxInt = int32ToAuxInt(c << uint64(d))
+ v.AddArg2(x, flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSCshiftLLreg(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (RSCshiftLLreg (MOVWconst [c]) x y flags)
+ // result: (SBCconst [c] (SLL <x.Type> x y) flags)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ flags := v_3
+ v.reset(OpARMSBCconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg2(v0, flags)
+ return true
+ }
+ // match: (RSCshiftLLreg x y (MOVWconst [c]) flags)
+ // cond: 0 <= c && c < 32
+ // result: (RSCshiftLL x y [c] flags)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ flags := v_3
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMRSCshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg3(x, y, flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSCshiftRA(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (RSCshiftRA (MOVWconst [c]) x [d] flags)
+ // result: (SBCconst [c] (SRAconst <x.Type> x [d]) flags)
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ flags := v_2
+ v.reset(OpARMSBCconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRAconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg2(v0, flags)
+ return true
+ }
+ // match: (RSCshiftRA x (MOVWconst [c]) [d] flags)
+ // result: (RSCconst x [c>>uint64(d)] flags)
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ flags := v_2
+ v.reset(OpARMRSCconst)
+ v.AuxInt = int32ToAuxInt(c >> uint64(d))
+ v.AddArg2(x, flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSCshiftRAreg(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (RSCshiftRAreg (MOVWconst [c]) x y flags)
+ // result: (SBCconst [c] (SRA <x.Type> x y) flags)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ flags := v_3
+ v.reset(OpARMSBCconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg2(v0, flags)
+ return true
+ }
+ // match: (RSCshiftRAreg x y (MOVWconst [c]) flags)
+ // cond: 0 <= c && c < 32
+ // result: (RSCshiftRA x y [c] flags)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ flags := v_3
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMRSCshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg3(x, y, flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSCshiftRL(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (RSCshiftRL (MOVWconst [c]) x [d] flags)
+ // result: (SBCconst [c] (SRLconst <x.Type> x [d]) flags)
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ flags := v_2
+ v.reset(OpARMSBCconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg2(v0, flags)
+ return true
+ }
+ // match: (RSCshiftRL x (MOVWconst [c]) [d] flags)
+ // result: (RSCconst x [int32(uint32(c)>>uint64(d))] flags)
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ flags := v_2
+ v.reset(OpARMRSCconst)
+ v.AuxInt = int32ToAuxInt(int32(uint32(c) >> uint64(d)))
+ v.AddArg2(x, flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMRSCshiftRLreg(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (RSCshiftRLreg (MOVWconst [c]) x y flags)
+ // result: (SBCconst [c] (SRL <x.Type> x y) flags)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ flags := v_3
+ v.reset(OpARMSBCconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg2(v0, flags)
+ return true
+ }
+ // match: (RSCshiftRLreg x y (MOVWconst [c]) flags)
+ // cond: 0 <= c && c < 32
+ // result: (RSCshiftRL x y [c] flags)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ flags := v_3
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMRSCshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg3(x, y, flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSBC(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SBC (MOVWconst [c]) x flags)
+ // result: (RSCconst [c] x flags)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ flags := v_2
+ v.reset(OpARMRSCconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, flags)
+ return true
+ }
+ // match: (SBC x (MOVWconst [c]) flags)
+ // result: (SBCconst [c] x flags)
+ for {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ flags := v_2
+ v.reset(OpARMSBCconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, flags)
+ return true
+ }
+ // match: (SBC x (SLLconst [c] y) flags)
+ // result: (SBCshiftLL x y [c] flags)
+ for {
+ x := v_0
+ if v_1.Op != OpARMSLLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ flags := v_2
+ v.reset(OpARMSBCshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg3(x, y, flags)
+ return true
+ }
+ // match: (SBC (SLLconst [c] y) x flags)
+ // result: (RSCshiftLL x y [c] flags)
+ for {
+ if v_0.Op != OpARMSLLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ y := v_0.Args[0]
+ x := v_1
+ flags := v_2
+ v.reset(OpARMRSCshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg3(x, y, flags)
+ return true
+ }
+ // match: (SBC x (SRLconst [c] y) flags)
+ // result: (SBCshiftRL x y [c] flags)
+ for {
+ x := v_0
+ if v_1.Op != OpARMSRLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ flags := v_2
+ v.reset(OpARMSBCshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg3(x, y, flags)
+ return true
+ }
+ // match: (SBC (SRLconst [c] y) x flags)
+ // result: (RSCshiftRL x y [c] flags)
+ for {
+ if v_0.Op != OpARMSRLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ y := v_0.Args[0]
+ x := v_1
+ flags := v_2
+ v.reset(OpARMRSCshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg3(x, y, flags)
+ return true
+ }
+ // match: (SBC x (SRAconst [c] y) flags)
+ // result: (SBCshiftRA x y [c] flags)
+ for {
+ x := v_0
+ if v_1.Op != OpARMSRAconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ flags := v_2
+ v.reset(OpARMSBCshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg3(x, y, flags)
+ return true
+ }
+ // match: (SBC (SRAconst [c] y) x flags)
+ // result: (RSCshiftRA x y [c] flags)
+ for {
+ if v_0.Op != OpARMSRAconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ y := v_0.Args[0]
+ x := v_1
+ flags := v_2
+ v.reset(OpARMRSCshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg3(x, y, flags)
+ return true
+ }
+ // match: (SBC x (SLL y z) flags)
+ // result: (SBCshiftLLreg x y z flags)
+ for {
+ x := v_0
+ if v_1.Op != OpARMSLL {
+ break
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ flags := v_2
+ v.reset(OpARMSBCshiftLLreg)
+ v.AddArg4(x, y, z, flags)
+ return true
+ }
+ // match: (SBC (SLL y z) x flags)
+ // result: (RSCshiftLLreg x y z flags)
+ for {
+ if v_0.Op != OpARMSLL {
+ break
+ }
+ z := v_0.Args[1]
+ y := v_0.Args[0]
+ x := v_1
+ flags := v_2
+ v.reset(OpARMRSCshiftLLreg)
+ v.AddArg4(x, y, z, flags)
+ return true
+ }
+ // match: (SBC x (SRL y z) flags)
+ // result: (SBCshiftRLreg x y z flags)
+ for {
+ x := v_0
+ if v_1.Op != OpARMSRL {
+ break
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ flags := v_2
+ v.reset(OpARMSBCshiftRLreg)
+ v.AddArg4(x, y, z, flags)
+ return true
+ }
+ // match: (SBC (SRL y z) x flags)
+ // result: (RSCshiftRLreg x y z flags)
+ for {
+ if v_0.Op != OpARMSRL {
+ break
+ }
+ z := v_0.Args[1]
+ y := v_0.Args[0]
+ x := v_1
+ flags := v_2
+ v.reset(OpARMRSCshiftRLreg)
+ v.AddArg4(x, y, z, flags)
+ return true
+ }
+ // match: (SBC x (SRA y z) flags)
+ // result: (SBCshiftRAreg x y z flags)
+ for {
+ x := v_0
+ if v_1.Op != OpARMSRA {
+ break
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ flags := v_2
+ v.reset(OpARMSBCshiftRAreg)
+ v.AddArg4(x, y, z, flags)
+ return true
+ }
+ // match: (SBC (SRA y z) x flags)
+ // result: (RSCshiftRAreg x y z flags)
+ for {
+ if v_0.Op != OpARMSRA {
+ break
+ }
+ z := v_0.Args[1]
+ y := v_0.Args[0]
+ x := v_1
+ flags := v_2
+ v.reset(OpARMRSCshiftRAreg)
+ v.AddArg4(x, y, z, flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSBCconst(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SBCconst [c] (ADDconst [d] x) flags)
+ // result: (SBCconst [c-d] x flags)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMADDconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ flags := v_1
+ v.reset(OpARMSBCconst)
+ v.AuxInt = int32ToAuxInt(c - d)
+ v.AddArg2(x, flags)
+ return true
+ }
+ // match: (SBCconst [c] (SUBconst [d] x) flags)
+ // result: (SBCconst [c+d] x flags)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMSUBconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ flags := v_1
+ v.reset(OpARMSBCconst)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.AddArg2(x, flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSBCshiftLL(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SBCshiftLL (MOVWconst [c]) x [d] flags)
+ // result: (RSCconst [c] (SLLconst <x.Type> x [d]) flags)
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ flags := v_2
+ v.reset(OpARMRSCconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg2(v0, flags)
+ return true
+ }
+ // match: (SBCshiftLL x (MOVWconst [c]) [d] flags)
+ // result: (SBCconst x [c<<uint64(d)] flags)
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ flags := v_2
+ v.reset(OpARMSBCconst)
+ v.AuxInt = int32ToAuxInt(c << uint64(d))
+ v.AddArg2(x, flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSBCshiftLLreg(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SBCshiftLLreg (MOVWconst [c]) x y flags)
+ // result: (RSCconst [c] (SLL <x.Type> x y) flags)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ flags := v_3
+ v.reset(OpARMRSCconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg2(v0, flags)
+ return true
+ }
+ // match: (SBCshiftLLreg x y (MOVWconst [c]) flags)
+ // cond: 0 <= c && c < 32
+ // result: (SBCshiftLL x y [c] flags)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ flags := v_3
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMSBCshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg3(x, y, flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSBCshiftRA(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SBCshiftRA (MOVWconst [c]) x [d] flags)
+ // result: (RSCconst [c] (SRAconst <x.Type> x [d]) flags)
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ flags := v_2
+ v.reset(OpARMRSCconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRAconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg2(v0, flags)
+ return true
+ }
+ // match: (SBCshiftRA x (MOVWconst [c]) [d] flags)
+ // result: (SBCconst x [c>>uint64(d)] flags)
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ flags := v_2
+ v.reset(OpARMSBCconst)
+ v.AuxInt = int32ToAuxInt(c >> uint64(d))
+ v.AddArg2(x, flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSBCshiftRAreg(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SBCshiftRAreg (MOVWconst [c]) x y flags)
+ // result: (RSCconst [c] (SRA <x.Type> x y) flags)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ flags := v_3
+ v.reset(OpARMRSCconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg2(v0, flags)
+ return true
+ }
+ // match: (SBCshiftRAreg x y (MOVWconst [c]) flags)
+ // cond: 0 <= c && c < 32
+ // result: (SBCshiftRA x y [c] flags)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ flags := v_3
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMSBCshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg3(x, y, flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSBCshiftRL(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SBCshiftRL (MOVWconst [c]) x [d] flags)
+ // result: (RSCconst [c] (SRLconst <x.Type> x [d]) flags)
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ flags := v_2
+ v.reset(OpARMRSCconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg2(v0, flags)
+ return true
+ }
+ // match: (SBCshiftRL x (MOVWconst [c]) [d] flags)
+ // result: (SBCconst x [int32(uint32(c)>>uint64(d))] flags)
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ flags := v_2
+ v.reset(OpARMSBCconst)
+ v.AuxInt = int32ToAuxInt(int32(uint32(c) >> uint64(d)))
+ v.AddArg2(x, flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSBCshiftRLreg(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SBCshiftRLreg (MOVWconst [c]) x y flags)
+ // result: (RSCconst [c] (SRL <x.Type> x y) flags)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ flags := v_3
+ v.reset(OpARMRSCconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg2(v0, flags)
+ return true
+ }
+ // match: (SBCshiftRLreg x y (MOVWconst [c]) flags)
+ // cond: 0 <= c && c < 32
+ // result: (SBCshiftRL x y [c] flags)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ flags := v_3
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMSBCshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg3(x, y, flags)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSLL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SLL x (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (SLLconst x [c])
+ for {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMSLLconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSLLconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SLLconst [c] (MOVWconst [d]))
+ // result: (MOVWconst [d<<uint64(c)])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(d << uint64(c))
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSRA(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SRA x (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (SRAconst x [c])
+ for {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMSRAconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSRAcond(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SRAcond x _ (FlagConstant [fc]))
+ // cond: fc.uge()
+ // result: (SRAconst x [31])
+ for {
+ x := v_0
+ if v_2.Op != OpARMFlagConstant {
+ break
+ }
+ fc := auxIntToFlagConstant(v_2.AuxInt)
+ if !(fc.uge()) {
+ break
+ }
+ v.reset(OpARMSRAconst)
+ v.AuxInt = int32ToAuxInt(31)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SRAcond x y (FlagConstant [fc]))
+ // cond: fc.ult()
+ // result: (SRA x y)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMFlagConstant {
+ break
+ }
+ fc := auxIntToFlagConstant(v_2.AuxInt)
+ if !(fc.ult()) {
+ break
+ }
+ v.reset(OpARMSRA)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSRAconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SRAconst [c] (MOVWconst [d]))
+ // result: (MOVWconst [d>>uint64(c)])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(d >> uint64(c))
+ return true
+ }
+ // match: (SRAconst (SLLconst x [c]) [d])
+ // cond: buildcfg.GOARM==7 && uint64(d)>=uint64(c) && uint64(d)<=31
+ // result: (BFX [(d-c)|(32-d)<<8] x)
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMSLLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(buildcfg.GOARM == 7 && uint64(d) >= uint64(c) && uint64(d) <= 31) {
+ break
+ }
+ v.reset(OpARMBFX)
+ v.AuxInt = int32ToAuxInt((d - c) | (32-d)<<8)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSRL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SRL x (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (SRLconst x [c])
+ for {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMSRLconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSRLconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SRLconst [c] (MOVWconst [d]))
+ // result: (MOVWconst [int32(uint32(d)>>uint64(c))])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(int32(uint32(d) >> uint64(c)))
+ return true
+ }
+ // match: (SRLconst (SLLconst x [c]) [d])
+ // cond: buildcfg.GOARM==7 && uint64(d)>=uint64(c) && uint64(d)<=31
+ // result: (BFXU [(d-c)|(32-d)<<8] x)
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMSLLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(buildcfg.GOARM == 7 && uint64(d) >= uint64(c) && uint64(d) <= 31) {
+ break
+ }
+ v.reset(OpARMBFXU)
+ v.AuxInt = int32ToAuxInt((d - c) | (32-d)<<8)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSRR(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SRR x (MOVWconst [c]))
+ // result: (SRRconst x [c&31])
+ for {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMSRRconst)
+ v.AuxInt = int32ToAuxInt(c & 31)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSUB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SUB (MOVWconst [c]) x)
+ // result: (RSBconst [c] x)
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMRSBconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUB x (MOVWconst [c]))
+ // result: (SUBconst [c] x)
+ for {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMSUBconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUB x (SLLconst [c] y))
+ // result: (SUBshiftLL x y [c])
+ for {
+ x := v_0
+ if v_1.Op != OpARMSLLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMSUBshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SUB (SLLconst [c] y) x)
+ // result: (RSBshiftLL x y [c])
+ for {
+ if v_0.Op != OpARMSLLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ y := v_0.Args[0]
+ x := v_1
+ v.reset(OpARMRSBshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SUB x (SRLconst [c] y))
+ // result: (SUBshiftRL x y [c])
+ for {
+ x := v_0
+ if v_1.Op != OpARMSRLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMSUBshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SUB (SRLconst [c] y) x)
+ // result: (RSBshiftRL x y [c])
+ for {
+ if v_0.Op != OpARMSRLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ y := v_0.Args[0]
+ x := v_1
+ v.reset(OpARMRSBshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SUB x (SRAconst [c] y))
+ // result: (SUBshiftRA x y [c])
+ for {
+ x := v_0
+ if v_1.Op != OpARMSRAconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMSUBshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SUB (SRAconst [c] y) x)
+ // result: (RSBshiftRA x y [c])
+ for {
+ if v_0.Op != OpARMSRAconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ y := v_0.Args[0]
+ x := v_1
+ v.reset(OpARMRSBshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SUB x (SLL y z))
+ // result: (SUBshiftLLreg x y z)
+ for {
+ x := v_0
+ if v_1.Op != OpARMSLL {
+ break
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMSUBshiftLLreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ // match: (SUB (SLL y z) x)
+ // result: (RSBshiftLLreg x y z)
+ for {
+ if v_0.Op != OpARMSLL {
+ break
+ }
+ z := v_0.Args[1]
+ y := v_0.Args[0]
+ x := v_1
+ v.reset(OpARMRSBshiftLLreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ // match: (SUB x (SRL y z))
+ // result: (SUBshiftRLreg x y z)
+ for {
+ x := v_0
+ if v_1.Op != OpARMSRL {
+ break
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMSUBshiftRLreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ // match: (SUB (SRL y z) x)
+ // result: (RSBshiftRLreg x y z)
+ for {
+ if v_0.Op != OpARMSRL {
+ break
+ }
+ z := v_0.Args[1]
+ y := v_0.Args[0]
+ x := v_1
+ v.reset(OpARMRSBshiftRLreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ // match: (SUB x (SRA y z))
+ // result: (SUBshiftRAreg x y z)
+ for {
+ x := v_0
+ if v_1.Op != OpARMSRA {
+ break
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMSUBshiftRAreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ // match: (SUB (SRA y z) x)
+ // result: (RSBshiftRAreg x y z)
+ for {
+ if v_0.Op != OpARMSRA {
+ break
+ }
+ z := v_0.Args[1]
+ y := v_0.Args[0]
+ x := v_1
+ v.reset(OpARMRSBshiftRAreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ // match: (SUB x x)
+ // result: (MOVWconst [0])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SUB a (MUL x y))
+ // cond: buildcfg.GOARM == 7
+ // result: (MULS x y a)
+ for {
+ a := v_0
+ if v_1.Op != OpARMMUL {
+ break
+ }
+ y := v_1.Args[1]
+ x := v_1.Args[0]
+ if !(buildcfg.GOARM == 7) {
+ break
+ }
+ v.reset(OpARMMULS)
+ v.AddArg3(x, y, a)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSUBD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SUBD a (MULD x y))
+ // cond: a.Uses == 1 && buildcfg.GOARM >= 6
+ // result: (MULSD a x y)
+ for {
+ a := v_0
+ if v_1.Op != OpARMMULD {
+ break
+ }
+ y := v_1.Args[1]
+ x := v_1.Args[0]
+ if !(a.Uses == 1 && buildcfg.GOARM >= 6) {
+ break
+ }
+ v.reset(OpARMMULSD)
+ v.AddArg3(a, x, y)
+ return true
+ }
+ // match: (SUBD a (NMULD x y))
+ // cond: a.Uses == 1 && buildcfg.GOARM >= 6
+ // result: (MULAD a x y)
+ for {
+ a := v_0
+ if v_1.Op != OpARMNMULD {
+ break
+ }
+ y := v_1.Args[1]
+ x := v_1.Args[0]
+ if !(a.Uses == 1 && buildcfg.GOARM >= 6) {
+ break
+ }
+ v.reset(OpARMMULAD)
+ v.AddArg3(a, x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSUBF(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SUBF a (MULF x y))
+ // cond: a.Uses == 1 && buildcfg.GOARM >= 6
+ // result: (MULSF a x y)
+ for {
+ a := v_0
+ if v_1.Op != OpARMMULF {
+ break
+ }
+ y := v_1.Args[1]
+ x := v_1.Args[0]
+ if !(a.Uses == 1 && buildcfg.GOARM >= 6) {
+ break
+ }
+ v.reset(OpARMMULSF)
+ v.AddArg3(a, x, y)
+ return true
+ }
+ // match: (SUBF a (NMULF x y))
+ // cond: a.Uses == 1 && buildcfg.GOARM >= 6
+ // result: (MULAF a x y)
+ for {
+ a := v_0
+ if v_1.Op != OpARMNMULF {
+ break
+ }
+ y := v_1.Args[1]
+ x := v_1.Args[0]
+ if !(a.Uses == 1 && buildcfg.GOARM >= 6) {
+ break
+ }
+ v.reset(OpARMMULAF)
+ v.AddArg3(a, x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSUBS(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SUBS x (MOVWconst [c]))
+ // result: (SUBSconst [c] x)
+ for {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMSUBSconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBS x (SLLconst [c] y))
+ // result: (SUBSshiftLL x y [c])
+ for {
+ x := v_0
+ if v_1.Op != OpARMSLLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMSUBSshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SUBS (SLLconst [c] y) x)
+ // result: (RSBSshiftLL x y [c])
+ for {
+ if v_0.Op != OpARMSLLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ y := v_0.Args[0]
+ x := v_1
+ v.reset(OpARMRSBSshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SUBS x (SRLconst [c] y))
+ // result: (SUBSshiftRL x y [c])
+ for {
+ x := v_0
+ if v_1.Op != OpARMSRLconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMSUBSshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SUBS (SRLconst [c] y) x)
+ // result: (RSBSshiftRL x y [c])
+ for {
+ if v_0.Op != OpARMSRLconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ y := v_0.Args[0]
+ x := v_1
+ v.reset(OpARMRSBSshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SUBS x (SRAconst [c] y))
+ // result: (SUBSshiftRA x y [c])
+ for {
+ x := v_0
+ if v_1.Op != OpARMSRAconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMSUBSshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SUBS (SRAconst [c] y) x)
+ // result: (RSBSshiftRA x y [c])
+ for {
+ if v_0.Op != OpARMSRAconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ y := v_0.Args[0]
+ x := v_1
+ v.reset(OpARMRSBSshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SUBS x (SLL y z))
+ // result: (SUBSshiftLLreg x y z)
+ for {
+ x := v_0
+ if v_1.Op != OpARMSLL {
+ break
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMSUBSshiftLLreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ // match: (SUBS (SLL y z) x)
+ // result: (RSBSshiftLLreg x y z)
+ for {
+ if v_0.Op != OpARMSLL {
+ break
+ }
+ z := v_0.Args[1]
+ y := v_0.Args[0]
+ x := v_1
+ v.reset(OpARMRSBSshiftLLreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ // match: (SUBS x (SRL y z))
+ // result: (SUBSshiftRLreg x y z)
+ for {
+ x := v_0
+ if v_1.Op != OpARMSRL {
+ break
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMSUBSshiftRLreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ // match: (SUBS (SRL y z) x)
+ // result: (RSBSshiftRLreg x y z)
+ for {
+ if v_0.Op != OpARMSRL {
+ break
+ }
+ z := v_0.Args[1]
+ y := v_0.Args[0]
+ x := v_1
+ v.reset(OpARMRSBSshiftRLreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ // match: (SUBS x (SRA y z))
+ // result: (SUBSshiftRAreg x y z)
+ for {
+ x := v_0
+ if v_1.Op != OpARMSRA {
+ break
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMSUBSshiftRAreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ // match: (SUBS (SRA y z) x)
+ // result: (RSBSshiftRAreg x y z)
+ for {
+ if v_0.Op != OpARMSRA {
+ break
+ }
+ z := v_0.Args[1]
+ y := v_0.Args[0]
+ x := v_1
+ v.reset(OpARMRSBSshiftRAreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSUBSshiftLL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SUBSshiftLL (MOVWconst [c]) x [d])
+ // result: (RSBSconst [c] (SLLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMRSBSconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SUBSshiftLL x (MOVWconst [c]) [d])
+ // result: (SUBSconst x [c<<uint64(d)])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMSUBSconst)
+ v.AuxInt = int32ToAuxInt(c << uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSUBSshiftLLreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SUBSshiftLLreg (MOVWconst [c]) x y)
+ // result: (RSBSconst [c] (SLL <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMRSBSconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SUBSshiftLLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (SUBSshiftLL x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMSUBSshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSUBSshiftRA(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SUBSshiftRA (MOVWconst [c]) x [d])
+ // result: (RSBSconst [c] (SRAconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMRSBSconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRAconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SUBSshiftRA x (MOVWconst [c]) [d])
+ // result: (SUBSconst x [c>>uint64(d)])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMSUBSconst)
+ v.AuxInt = int32ToAuxInt(c >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSUBSshiftRAreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SUBSshiftRAreg (MOVWconst [c]) x y)
+ // result: (RSBSconst [c] (SRA <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMRSBSconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SUBSshiftRAreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (SUBSshiftRA x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMSUBSshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSUBSshiftRL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SUBSshiftRL (MOVWconst [c]) x [d])
+ // result: (RSBSconst [c] (SRLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMRSBSconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SUBSshiftRL x (MOVWconst [c]) [d])
+ // result: (SUBSconst x [int32(uint32(c)>>uint64(d))])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMSUBSconst)
+ v.AuxInt = int32ToAuxInt(int32(uint32(c) >> uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSUBSshiftRLreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SUBSshiftRLreg (MOVWconst [c]) x y)
+ // result: (RSBSconst [c] (SRL <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMRSBSconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SUBSshiftRLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (SUBSshiftRL x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMSUBSshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSUBconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SUBconst [off1] (MOVWaddr [off2] {sym} ptr))
+ // result: (MOVWaddr [off2-off1] {sym} ptr)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ v.reset(OpARMMOVWaddr)
+ v.AuxInt = int32ToAuxInt(off2 - off1)
+ v.Aux = symToAux(sym)
+ v.AddArg(ptr)
+ return true
+ }
+ // match: (SUBconst [0] x)
+ // result: x
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (SUBconst [c] x)
+ // cond: !isARMImmRot(uint32(c)) && isARMImmRot(uint32(-c))
+ // result: (ADDconst [-c] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(!isARMImmRot(uint32(c)) && isARMImmRot(uint32(-c))) {
+ break
+ }
+ v.reset(OpARMADDconst)
+ v.AuxInt = int32ToAuxInt(-c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBconst [c] x)
+ // cond: buildcfg.GOARM==7 && !isARMImmRot(uint32(c)) && uint32(c)>0xffff && uint32(-c)<=0xffff
+ // result: (ADDconst [-c] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(buildcfg.GOARM == 7 && !isARMImmRot(uint32(c)) && uint32(c) > 0xffff && uint32(-c) <= 0xffff) {
+ break
+ }
+ v.reset(OpARMADDconst)
+ v.AuxInt = int32ToAuxInt(-c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBconst [c] (MOVWconst [d]))
+ // result: (MOVWconst [d-c])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(d - c)
+ return true
+ }
+ // match: (SUBconst [c] (SUBconst [d] x))
+ // result: (ADDconst [-c-d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMSUBconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpARMADDconst)
+ v.AuxInt = int32ToAuxInt(-c - d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBconst [c] (ADDconst [d] x))
+ // result: (ADDconst [-c+d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMADDconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpARMADDconst)
+ v.AuxInt = int32ToAuxInt(-c + d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBconst [c] (RSBconst [d] x))
+ // result: (RSBconst [-c+d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMRSBconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpARMRSBconst)
+ v.AuxInt = int32ToAuxInt(-c + d)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSUBshiftLL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SUBshiftLL (MOVWconst [c]) x [d])
+ // result: (RSBconst [c] (SLLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMRSBconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SUBshiftLL x (MOVWconst [c]) [d])
+ // result: (SUBconst x [c<<uint64(d)])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMSUBconst)
+ v.AuxInt = int32ToAuxInt(c << uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBshiftLL (SLLconst x [c]) x [c])
+ // result: (MOVWconst [0])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMSLLconst || auxIntToInt32(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSUBshiftLLreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SUBshiftLLreg (MOVWconst [c]) x y)
+ // result: (RSBconst [c] (SLL <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMRSBconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SUBshiftLLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (SUBshiftLL x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMSUBshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSUBshiftRA(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SUBshiftRA (MOVWconst [c]) x [d])
+ // result: (RSBconst [c] (SRAconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMRSBconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRAconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SUBshiftRA x (MOVWconst [c]) [d])
+ // result: (SUBconst x [c>>uint64(d)])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMSUBconst)
+ v.AuxInt = int32ToAuxInt(c >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBshiftRA (SRAconst x [c]) x [c])
+ // result: (MOVWconst [0])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMSRAconst || auxIntToInt32(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSUBshiftRAreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SUBshiftRAreg (MOVWconst [c]) x y)
+ // result: (RSBconst [c] (SRA <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMRSBconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SUBshiftRAreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (SUBshiftRA x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMSUBshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSUBshiftRL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SUBshiftRL (MOVWconst [c]) x [d])
+ // result: (RSBconst [c] (SRLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMRSBconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SUBshiftRL x (MOVWconst [c]) [d])
+ // result: (SUBconst x [int32(uint32(c)>>uint64(d))])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMSUBconst)
+ v.AuxInt = int32ToAuxInt(int32(uint32(c) >> uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBshiftRL (SRLconst x [c]) x [c])
+ // result: (MOVWconst [0])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMSRLconst || auxIntToInt32(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMSUBshiftRLreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SUBshiftRLreg (MOVWconst [c]) x y)
+ // result: (RSBconst [c] (SRL <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMRSBconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SUBshiftRLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (SUBshiftRL x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMSUBshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMTEQ(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (TEQ x (MOVWconst [c]))
+ // result: (TEQconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMTEQconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (TEQ x (SLLconst [c] y))
+ // result: (TEQshiftLL x y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSLLconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMTEQshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (TEQ x (SRLconst [c] y))
+ // result: (TEQshiftRL x y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRLconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMTEQshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (TEQ x (SRAconst [c] y))
+ // result: (TEQshiftRA x y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRAconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMTEQshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (TEQ x (SLL y z))
+ // result: (TEQshiftLLreg x y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSLL {
+ continue
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMTEQshiftLLreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ break
+ }
+ // match: (TEQ x (SRL y z))
+ // result: (TEQshiftRLreg x y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRL {
+ continue
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMTEQshiftRLreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ break
+ }
+ // match: (TEQ x (SRA y z))
+ // result: (TEQshiftRAreg x y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRA {
+ continue
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMTEQshiftRAreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueARM_OpARMTEQconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (TEQconst (MOVWconst [x]) [y])
+ // result: (FlagConstant [logicFlags32(x^y)])
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpARMFlagConstant)
+ v.AuxInt = flagConstantToAuxInt(logicFlags32(x ^ y))
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMTEQshiftLL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (TEQshiftLL (MOVWconst [c]) x [d])
+ // result: (TEQconst [c] (SLLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMTEQconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (TEQshiftLL x (MOVWconst [c]) [d])
+ // result: (TEQconst x [c<<uint64(d)])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMTEQconst)
+ v.AuxInt = int32ToAuxInt(c << uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMTEQshiftLLreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (TEQshiftLLreg (MOVWconst [c]) x y)
+ // result: (TEQconst [c] (SLL <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMTEQconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (TEQshiftLLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (TEQshiftLL x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMTEQshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMTEQshiftRA(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (TEQshiftRA (MOVWconst [c]) x [d])
+ // result: (TEQconst [c] (SRAconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMTEQconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRAconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (TEQshiftRA x (MOVWconst [c]) [d])
+ // result: (TEQconst x [c>>uint64(d)])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMTEQconst)
+ v.AuxInt = int32ToAuxInt(c >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMTEQshiftRAreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (TEQshiftRAreg (MOVWconst [c]) x y)
+ // result: (TEQconst [c] (SRA <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMTEQconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (TEQshiftRAreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (TEQshiftRA x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMTEQshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMTEQshiftRL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (TEQshiftRL (MOVWconst [c]) x [d])
+ // result: (TEQconst [c] (SRLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMTEQconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (TEQshiftRL x (MOVWconst [c]) [d])
+ // result: (TEQconst x [int32(uint32(c)>>uint64(d))])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMTEQconst)
+ v.AuxInt = int32ToAuxInt(int32(uint32(c) >> uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMTEQshiftRLreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (TEQshiftRLreg (MOVWconst [c]) x y)
+ // result: (TEQconst [c] (SRL <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMTEQconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (TEQshiftRLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (TEQshiftRL x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMTEQshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMTST(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (TST x (MOVWconst [c]))
+ // result: (TSTconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMTSTconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (TST x (SLLconst [c] y))
+ // result: (TSTshiftLL x y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSLLconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMTSTshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (TST x (SRLconst [c] y))
+ // result: (TSTshiftRL x y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRLconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMTSTshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (TST x (SRAconst [c] y))
+ // result: (TSTshiftRA x y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRAconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMTSTshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (TST x (SLL y z))
+ // result: (TSTshiftLLreg x y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSLL {
+ continue
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMTSTshiftLLreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ break
+ }
+ // match: (TST x (SRL y z))
+ // result: (TSTshiftRLreg x y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRL {
+ continue
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMTSTshiftRLreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ break
+ }
+ // match: (TST x (SRA y z))
+ // result: (TSTshiftRAreg x y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRA {
+ continue
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMTSTshiftRAreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueARM_OpARMTSTconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (TSTconst (MOVWconst [x]) [y])
+ // result: (FlagConstant [logicFlags32(x&y)])
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpARMFlagConstant)
+ v.AuxInt = flagConstantToAuxInt(logicFlags32(x & y))
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMTSTshiftLL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (TSTshiftLL (MOVWconst [c]) x [d])
+ // result: (TSTconst [c] (SLLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMTSTconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (TSTshiftLL x (MOVWconst [c]) [d])
+ // result: (TSTconst x [c<<uint64(d)])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMTSTconst)
+ v.AuxInt = int32ToAuxInt(c << uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMTSTshiftLLreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (TSTshiftLLreg (MOVWconst [c]) x y)
+ // result: (TSTconst [c] (SLL <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMTSTconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (TSTshiftLLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (TSTshiftLL x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMTSTshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMTSTshiftRA(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (TSTshiftRA (MOVWconst [c]) x [d])
+ // result: (TSTconst [c] (SRAconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMTSTconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRAconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (TSTshiftRA x (MOVWconst [c]) [d])
+ // result: (TSTconst x [c>>uint64(d)])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMTSTconst)
+ v.AuxInt = int32ToAuxInt(c >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMTSTshiftRAreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (TSTshiftRAreg (MOVWconst [c]) x y)
+ // result: (TSTconst [c] (SRA <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMTSTconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (TSTshiftRAreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (TSTshiftRA x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMTSTshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMTSTshiftRL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (TSTshiftRL (MOVWconst [c]) x [d])
+ // result: (TSTconst [c] (SRLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMTSTconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (TSTshiftRL x (MOVWconst [c]) [d])
+ // result: (TSTconst x [int32(uint32(c)>>uint64(d))])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMTSTconst)
+ v.AuxInt = int32ToAuxInt(int32(uint32(c) >> uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMTSTshiftRLreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (TSTshiftRLreg (MOVWconst [c]) x y)
+ // result: (TSTconst [c] (SRL <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMTSTconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (TSTshiftRLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (TSTshiftRL x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMTSTshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMXOR(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (XOR x (MOVWconst [c]))
+ // result: (XORconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMXORconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (XOR x (SLLconst [c] y))
+ // result: (XORshiftLL x y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSLLconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMXORshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (XOR x (SRLconst [c] y))
+ // result: (XORshiftRL x y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRLconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMXORshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (XOR x (SRAconst [c] y))
+ // result: (XORshiftRA x y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRAconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMXORshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (XOR x (SRRconst [c] y))
+ // result: (XORshiftRR x y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRRconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ v.reset(OpARMXORshiftRR)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (XOR x (SLL y z))
+ // result: (XORshiftLLreg x y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSLL {
+ continue
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMXORshiftLLreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ break
+ }
+ // match: (XOR x (SRL y z))
+ // result: (XORshiftRLreg x y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRL {
+ continue
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMXORshiftRLreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ break
+ }
+ // match: (XOR x (SRA y z))
+ // result: (XORshiftRAreg x y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARMSRA {
+ continue
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARMXORshiftRAreg)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ break
+ }
+ // match: (XOR x x)
+ // result: (MOVWconst [0])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMXORconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (XORconst [0] x)
+ // result: x
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (XORconst [c] (MOVWconst [d]))
+ // result: (MOVWconst [c^d])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(c ^ d)
+ return true
+ }
+ // match: (XORconst [c] (XORconst [d] x))
+ // result: (XORconst [c^d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMXORconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpARMXORconst)
+ v.AuxInt = int32ToAuxInt(c ^ d)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMXORshiftLL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (XORshiftLL (MOVWconst [c]) x [d])
+ // result: (XORconst [c] (SLLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMXORconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (XORshiftLL x (MOVWconst [c]) [d])
+ // result: (XORconst x [c<<uint64(d)])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMXORconst)
+ v.AuxInt = int32ToAuxInt(c << uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORshiftLL [c] (SRLconst x [32-c]) x)
+ // result: (SRRconst [32-c] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMSRLconst || auxIntToInt32(v_0.AuxInt) != 32-c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARMSRRconst)
+ v.AuxInt = int32ToAuxInt(32 - c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORshiftLL <typ.UInt16> [8] (BFXU <typ.UInt16> [int32(armBFAuxInt(8, 8))] x) x)
+ // result: (REV16 x)
+ for {
+ if v.Type != typ.UInt16 || auxIntToInt32(v.AuxInt) != 8 || v_0.Op != OpARMBFXU || v_0.Type != typ.UInt16 || auxIntToInt32(v_0.AuxInt) != int32(armBFAuxInt(8, 8)) {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARMREV16)
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORshiftLL <typ.UInt16> [8] (SRLconst <typ.UInt16> [24] (SLLconst [16] x)) x)
+ // cond: buildcfg.GOARM>=6
+ // result: (REV16 x)
+ for {
+ if v.Type != typ.UInt16 || auxIntToInt32(v.AuxInt) != 8 || v_0.Op != OpARMSRLconst || v_0.Type != typ.UInt16 || auxIntToInt32(v_0.AuxInt) != 24 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpARMSLLconst || auxIntToInt32(v_0_0.AuxInt) != 16 {
+ break
+ }
+ x := v_0_0.Args[0]
+ if x != v_1 || !(buildcfg.GOARM >= 6) {
+ break
+ }
+ v.reset(OpARMREV16)
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORshiftLL (SLLconst x [c]) x [c])
+ // result: (MOVWconst [0])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMSLLconst || auxIntToInt32(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMXORshiftLLreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (XORshiftLLreg (MOVWconst [c]) x y)
+ // result: (XORconst [c] (SLL <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMXORconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (XORshiftLLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (XORshiftLL x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMXORshiftLL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMXORshiftRA(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (XORshiftRA (MOVWconst [c]) x [d])
+ // result: (XORconst [c] (SRAconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMXORconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRAconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (XORshiftRA x (MOVWconst [c]) [d])
+ // result: (XORconst x [c>>uint64(d)])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMXORconst)
+ v.AuxInt = int32ToAuxInt(c >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORshiftRA (SRAconst x [c]) x [c])
+ // result: (MOVWconst [0])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMSRAconst || auxIntToInt32(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMXORshiftRAreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (XORshiftRAreg (MOVWconst [c]) x y)
+ // result: (XORconst [c] (SRA <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMXORconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRA, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (XORshiftRAreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (XORshiftRA x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMXORshiftRA)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMXORshiftRL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (XORshiftRL (MOVWconst [c]) x [d])
+ // result: (XORconst [c] (SRLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMXORconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRLconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (XORshiftRL x (MOVWconst [c]) [d])
+ // result: (XORconst x [int32(uint32(c)>>uint64(d))])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMXORconst)
+ v.AuxInt = int32ToAuxInt(int32(uint32(c) >> uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORshiftRL [c] (SLLconst x [32-c]) x)
+ // result: (SRRconst [ c] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMSLLconst || auxIntToInt32(v_0.AuxInt) != 32-c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARMSRRconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORshiftRL (SRLconst x [c]) x [c])
+ // result: (MOVWconst [0])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMSRLconst || auxIntToInt32(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMXORshiftRLreg(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (XORshiftRLreg (MOVWconst [c]) x y)
+ // result: (XORconst [c] (SRL <x.Type> x y))
+ for {
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARMXORconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (XORshiftRLreg x y (MOVWconst [c]))
+ // cond: 0 <= c && c < 32
+ // result: (XORshiftRL x y [c])
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(0 <= c && c < 32) {
+ break
+ }
+ v.reset(OpARMXORshiftRL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpARMXORshiftRR(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (XORshiftRR (MOVWconst [c]) x [d])
+ // result: (XORconst [c] (SRRconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARMXORconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARMSRRconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (XORshiftRR x (MOVWconst [c]) [d])
+ // result: (XORconst x [int32(uint32(c)>>uint64(d)|uint32(c)<<uint64(32-d))])
+ for {
+ d := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpARMXORconst)
+ v.AuxInt = int32ToAuxInt(int32(uint32(c)>>uint64(d) | uint32(c)<<uint64(32-d)))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpAddr(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Addr {sym} base)
+ // result: (MOVWaddr {sym} base)
+ for {
+ sym := auxToSym(v.Aux)
+ base := v_0
+ v.reset(OpARMMOVWaddr)
+ v.Aux = symToAux(sym)
+ v.AddArg(base)
+ return true
+ }
+}
+func rewriteValueARM_OpAvg32u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Avg32u <t> x y)
+ // result: (ADD (SRLconst <t> (SUB <t> x y) [1]) y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpARMADD)
+ v0 := b.NewValue0(v.Pos, OpARMSRLconst, t)
+ v0.AuxInt = int32ToAuxInt(1)
+ v1 := b.NewValue0(v.Pos, OpARMSUB, t)
+ v1.AddArg2(x, y)
+ v0.AddArg(v1)
+ v.AddArg2(v0, y)
+ return true
+ }
+}
+func rewriteValueARM_OpBitLen32(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (BitLen32 <t> x)
+ // result: (RSBconst [32] (CLZ <t> x))
+ for {
+ t := v.Type
+ x := v_0
+ v.reset(OpARMRSBconst)
+ v.AuxInt = int32ToAuxInt(32)
+ v0 := b.NewValue0(v.Pos, OpARMCLZ, t)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpBswap32(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Bswap32 <t> x)
+ // cond: buildcfg.GOARM==5
+ // result: (XOR <t> (SRLconst <t> (BICconst <t> (XOR <t> x (SRRconst <t> [16] x)) [0xff0000]) [8]) (SRRconst <t> x [8]))
+ for {
+ t := v.Type
+ x := v_0
+ if !(buildcfg.GOARM == 5) {
+ break
+ }
+ v.reset(OpARMXOR)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpARMSRLconst, t)
+ v0.AuxInt = int32ToAuxInt(8)
+ v1 := b.NewValue0(v.Pos, OpARMBICconst, t)
+ v1.AuxInt = int32ToAuxInt(0xff0000)
+ v2 := b.NewValue0(v.Pos, OpARMXOR, t)
+ v3 := b.NewValue0(v.Pos, OpARMSRRconst, t)
+ v3.AuxInt = int32ToAuxInt(16)
+ v3.AddArg(x)
+ v2.AddArg2(x, v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v4 := b.NewValue0(v.Pos, OpARMSRRconst, t)
+ v4.AuxInt = int32ToAuxInt(8)
+ v4.AddArg(x)
+ v.AddArg2(v0, v4)
+ return true
+ }
+ // match: (Bswap32 x)
+ // cond: buildcfg.GOARM>=6
+ // result: (REV x)
+ for {
+ x := v_0
+ if !(buildcfg.GOARM >= 6) {
+ break
+ }
+ v.reset(OpARMREV)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpConst16(v *Value) bool {
+ // match: (Const16 [val])
+ // result: (MOVWconst [int32(val)])
+ for {
+ val := auxIntToInt16(v.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(int32(val))
+ return true
+ }
+}
+func rewriteValueARM_OpConst32(v *Value) bool {
+ // match: (Const32 [val])
+ // result: (MOVWconst [int32(val)])
+ for {
+ val := auxIntToInt32(v.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(int32(val))
+ return true
+ }
+}
+func rewriteValueARM_OpConst32F(v *Value) bool {
+ // match: (Const32F [val])
+ // result: (MOVFconst [float64(val)])
+ for {
+ val := auxIntToFloat32(v.AuxInt)
+ v.reset(OpARMMOVFconst)
+ v.AuxInt = float64ToAuxInt(float64(val))
+ return true
+ }
+}
+func rewriteValueARM_OpConst64F(v *Value) bool {
+ // match: (Const64F [val])
+ // result: (MOVDconst [float64(val)])
+ for {
+ val := auxIntToFloat64(v.AuxInt)
+ v.reset(OpARMMOVDconst)
+ v.AuxInt = float64ToAuxInt(float64(val))
+ return true
+ }
+}
+func rewriteValueARM_OpConst8(v *Value) bool {
+ // match: (Const8 [val])
+ // result: (MOVWconst [int32(val)])
+ for {
+ val := auxIntToInt8(v.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(int32(val))
+ return true
+ }
+}
+func rewriteValueARM_OpConstBool(v *Value) bool {
+ // match: (ConstBool [t])
+ // result: (MOVWconst [b2i32(t)])
+ for {
+ t := auxIntToBool(v.AuxInt)
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(b2i32(t))
+ return true
+ }
+}
+func rewriteValueARM_OpConstNil(v *Value) bool {
+ // match: (ConstNil)
+ // result: (MOVWconst [0])
+ for {
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+}
+func rewriteValueARM_OpCtz16(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Ctz16 <t> x)
+ // cond: buildcfg.GOARM<=6
+ // result: (RSBconst [32] (CLZ <t> (SUBconst <typ.UInt32> (AND <typ.UInt32> (ORconst <typ.UInt32> [0x10000] x) (RSBconst <typ.UInt32> [0] (ORconst <typ.UInt32> [0x10000] x))) [1])))
+ for {
+ t := v.Type
+ x := v_0
+ if !(buildcfg.GOARM <= 6) {
+ break
+ }
+ v.reset(OpARMRSBconst)
+ v.AuxInt = int32ToAuxInt(32)
+ v0 := b.NewValue0(v.Pos, OpARMCLZ, t)
+ v1 := b.NewValue0(v.Pos, OpARMSUBconst, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpARMAND, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpARMORconst, typ.UInt32)
+ v3.AuxInt = int32ToAuxInt(0x10000)
+ v3.AddArg(x)
+ v4 := b.NewValue0(v.Pos, OpARMRSBconst, typ.UInt32)
+ v4.AuxInt = int32ToAuxInt(0)
+ v4.AddArg(v3)
+ v2.AddArg2(v3, v4)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Ctz16 <t> x)
+ // cond: buildcfg.GOARM==7
+ // result: (CLZ <t> (RBIT <typ.UInt32> (ORconst <typ.UInt32> [0x10000] x)))
+ for {
+ t := v.Type
+ x := v_0
+ if !(buildcfg.GOARM == 7) {
+ break
+ }
+ v.reset(OpARMCLZ)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpARMRBIT, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpARMORconst, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(0x10000)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpCtz32(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Ctz32 <t> x)
+ // cond: buildcfg.GOARM<=6
+ // result: (RSBconst [32] (CLZ <t> (SUBconst <t> (AND <t> x (RSBconst <t> [0] x)) [1])))
+ for {
+ t := v.Type
+ x := v_0
+ if !(buildcfg.GOARM <= 6) {
+ break
+ }
+ v.reset(OpARMRSBconst)
+ v.AuxInt = int32ToAuxInt(32)
+ v0 := b.NewValue0(v.Pos, OpARMCLZ, t)
+ v1 := b.NewValue0(v.Pos, OpARMSUBconst, t)
+ v1.AuxInt = int32ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpARMAND, t)
+ v3 := b.NewValue0(v.Pos, OpARMRSBconst, t)
+ v3.AuxInt = int32ToAuxInt(0)
+ v3.AddArg(x)
+ v2.AddArg2(x, v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Ctz32 <t> x)
+ // cond: buildcfg.GOARM==7
+ // result: (CLZ <t> (RBIT <t> x))
+ for {
+ t := v.Type
+ x := v_0
+ if !(buildcfg.GOARM == 7) {
+ break
+ }
+ v.reset(OpARMCLZ)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpARMRBIT, t)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpCtz8(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Ctz8 <t> x)
+ // cond: buildcfg.GOARM<=6
+ // result: (RSBconst [32] (CLZ <t> (SUBconst <typ.UInt32> (AND <typ.UInt32> (ORconst <typ.UInt32> [0x100] x) (RSBconst <typ.UInt32> [0] (ORconst <typ.UInt32> [0x100] x))) [1])))
+ for {
+ t := v.Type
+ x := v_0
+ if !(buildcfg.GOARM <= 6) {
+ break
+ }
+ v.reset(OpARMRSBconst)
+ v.AuxInt = int32ToAuxInt(32)
+ v0 := b.NewValue0(v.Pos, OpARMCLZ, t)
+ v1 := b.NewValue0(v.Pos, OpARMSUBconst, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpARMAND, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpARMORconst, typ.UInt32)
+ v3.AuxInt = int32ToAuxInt(0x100)
+ v3.AddArg(x)
+ v4 := b.NewValue0(v.Pos, OpARMRSBconst, typ.UInt32)
+ v4.AuxInt = int32ToAuxInt(0)
+ v4.AddArg(v3)
+ v2.AddArg2(v3, v4)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Ctz8 <t> x)
+ // cond: buildcfg.GOARM==7
+ // result: (CLZ <t> (RBIT <typ.UInt32> (ORconst <typ.UInt32> [0x100] x)))
+ for {
+ t := v.Type
+ x := v_0
+ if !(buildcfg.GOARM == 7) {
+ break
+ }
+ v.reset(OpARMCLZ)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpARMRBIT, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpARMORconst, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(0x100)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpDiv16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div16 x y)
+ // result: (Div32 (SignExt16to32 x) (SignExt16to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpDiv32)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueARM_OpDiv16u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div16u x y)
+ // result: (Div32u (ZeroExt16to32 x) (ZeroExt16to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpDiv32u)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueARM_OpDiv32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div32 x y)
+ // result: (SUB (XOR <typ.UInt32> (Select0 <typ.UInt32> (CALLudiv (SUB <typ.UInt32> (XOR x <typ.UInt32> (Signmask x)) (Signmask x)) (SUB <typ.UInt32> (XOR y <typ.UInt32> (Signmask y)) (Signmask y)))) (Signmask (XOR <typ.UInt32> x y))) (Signmask (XOR <typ.UInt32> x y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMSUB)
+ v0 := b.NewValue0(v.Pos, OpARMXOR, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpSelect0, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpARMCALLudiv, types.NewTuple(typ.UInt32, typ.UInt32))
+ v3 := b.NewValue0(v.Pos, OpARMSUB, typ.UInt32)
+ v4 := b.NewValue0(v.Pos, OpARMXOR, typ.UInt32)
+ v5 := b.NewValue0(v.Pos, OpSignmask, typ.Int32)
+ v5.AddArg(x)
+ v4.AddArg2(x, v5)
+ v3.AddArg2(v4, v5)
+ v6 := b.NewValue0(v.Pos, OpARMSUB, typ.UInt32)
+ v7 := b.NewValue0(v.Pos, OpARMXOR, typ.UInt32)
+ v8 := b.NewValue0(v.Pos, OpSignmask, typ.Int32)
+ v8.AddArg(y)
+ v7.AddArg2(y, v8)
+ v6.AddArg2(v7, v8)
+ v2.AddArg2(v3, v6)
+ v1.AddArg(v2)
+ v9 := b.NewValue0(v.Pos, OpSignmask, typ.Int32)
+ v10 := b.NewValue0(v.Pos, OpARMXOR, typ.UInt32)
+ v10.AddArg2(x, y)
+ v9.AddArg(v10)
+ v0.AddArg2(v1, v9)
+ v.AddArg2(v0, v9)
+ return true
+ }
+}
+func rewriteValueARM_OpDiv32u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div32u x y)
+ // result: (Select0 <typ.UInt32> (CALLudiv x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect0)
+ v.Type = typ.UInt32
+ v0 := b.NewValue0(v.Pos, OpARMCALLudiv, types.NewTuple(typ.UInt32, typ.UInt32))
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpDiv8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div8 x y)
+ // result: (Div32 (SignExt8to32 x) (SignExt8to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpDiv32)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueARM_OpDiv8u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div8u x y)
+ // result: (Div32u (ZeroExt8to32 x) (ZeroExt8to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpDiv32u)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueARM_OpEq16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Eq16 x y)
+ // result: (Equal (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMEqual)
+ v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpEq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Eq32 x y)
+ // result: (Equal (CMP x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMEqual)
+ v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpEq32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Eq32F x y)
+ // result: (Equal (CMPF x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMEqual)
+ v0 := b.NewValue0(v.Pos, OpARMCMPF, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpEq64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Eq64F x y)
+ // result: (Equal (CMPD x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMEqual)
+ v0 := b.NewValue0(v.Pos, OpARMCMPD, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpEq8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Eq8 x y)
+ // result: (Equal (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMEqual)
+ v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpEqB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (EqB x y)
+ // result: (XORconst [1] (XOR <typ.Bool> x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMXORconst)
+ v.AuxInt = int32ToAuxInt(1)
+ v0 := b.NewValue0(v.Pos, OpARMXOR, typ.Bool)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpEqPtr(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (EqPtr x y)
+ // result: (Equal (CMP x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMEqual)
+ v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpFMA(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FMA x y z)
+ // result: (FMULAD z x y)
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ v.reset(OpARMFMULAD)
+ v.AddArg3(z, x, y)
+ return true
+ }
+}
+func rewriteValueARM_OpIsInBounds(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (IsInBounds idx len)
+ // result: (LessThanU (CMP idx len))
+ for {
+ idx := v_0
+ len := v_1
+ v.reset(OpARMLessThanU)
+ v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
+ v0.AddArg2(idx, len)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpIsNonNil(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (IsNonNil ptr)
+ // result: (NotEqual (CMPconst [0] ptr))
+ for {
+ ptr := v_0
+ v.reset(OpARMNotEqual)
+ v0 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(0)
+ v0.AddArg(ptr)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpIsSliceInBounds(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (IsSliceInBounds idx len)
+ // result: (LessEqualU (CMP idx len))
+ for {
+ idx := v_0
+ len := v_1
+ v.reset(OpARMLessEqualU)
+ v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
+ v0.AddArg2(idx, len)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLeq16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq16 x y)
+ // result: (LessEqual (CMP (SignExt16to32 x) (SignExt16to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMLessEqual)
+ v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLeq16U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq16U x y)
+ // result: (LessEqualU (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMLessEqualU)
+ v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLeq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq32 x y)
+ // result: (LessEqual (CMP x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMLessEqual)
+ v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLeq32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq32F x y)
+ // result: (GreaterEqual (CMPF y x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMGreaterEqual)
+ v0 := b.NewValue0(v.Pos, OpARMCMPF, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLeq32U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq32U x y)
+ // result: (LessEqualU (CMP x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMLessEqualU)
+ v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLeq64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq64F x y)
+ // result: (GreaterEqual (CMPD y x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMGreaterEqual)
+ v0 := b.NewValue0(v.Pos, OpARMCMPD, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLeq8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq8 x y)
+ // result: (LessEqual (CMP (SignExt8to32 x) (SignExt8to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMLessEqual)
+ v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLeq8U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq8U x y)
+ // result: (LessEqualU (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMLessEqualU)
+ v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLess16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less16 x y)
+ // result: (LessThan (CMP (SignExt16to32 x) (SignExt16to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMLessThan)
+ v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLess16U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less16U x y)
+ // result: (LessThanU (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMLessThanU)
+ v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLess32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less32 x y)
+ // result: (LessThan (CMP x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMLessThan)
+ v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLess32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less32F x y)
+ // result: (GreaterThan (CMPF y x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMGreaterThan)
+ v0 := b.NewValue0(v.Pos, OpARMCMPF, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLess32U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less32U x y)
+ // result: (LessThanU (CMP x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMLessThanU)
+ v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLess64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less64F x y)
+ // result: (GreaterThan (CMPD y x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMGreaterThan)
+ v0 := b.NewValue0(v.Pos, OpARMCMPD, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLess8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less8 x y)
+ // result: (LessThan (CMP (SignExt8to32 x) (SignExt8to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMLessThan)
+ v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLess8U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less8U x y)
+ // result: (LessThanU (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMLessThanU)
+ v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLoad(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Load <t> ptr mem)
+ // cond: t.IsBoolean()
+ // result: (MOVBUload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(t.IsBoolean()) {
+ break
+ }
+ v.reset(OpARMMOVBUload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is8BitInt(t) && isSigned(t))
+ // result: (MOVBload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is8BitInt(t) && isSigned(t)) {
+ break
+ }
+ v.reset(OpARMMOVBload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is8BitInt(t) && !isSigned(t))
+ // result: (MOVBUload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is8BitInt(t) && !isSigned(t)) {
+ break
+ }
+ v.reset(OpARMMOVBUload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is16BitInt(t) && isSigned(t))
+ // result: (MOVHload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is16BitInt(t) && isSigned(t)) {
+ break
+ }
+ v.reset(OpARMMOVHload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is16BitInt(t) && !isSigned(t))
+ // result: (MOVHUload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is16BitInt(t) && !isSigned(t)) {
+ break
+ }
+ v.reset(OpARMMOVHUload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is32BitInt(t) || isPtr(t))
+ // result: (MOVWload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is32BitInt(t) || isPtr(t)) {
+ break
+ }
+ v.reset(OpARMMOVWload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is32BitFloat(t)
+ // result: (MOVFload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is32BitFloat(t)) {
+ break
+ }
+ v.reset(OpARMMOVFload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is64BitFloat(t)
+ // result: (MOVDload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is64BitFloat(t)) {
+ break
+ }
+ v.reset(OpARMMOVDload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpLocalAddr(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (LocalAddr {sym} base _)
+ // result: (MOVWaddr {sym} base)
+ for {
+ sym := auxToSym(v.Aux)
+ base := v_0
+ v.reset(OpARMMOVWaddr)
+ v.Aux = symToAux(sym)
+ v.AddArg(base)
+ return true
+ }
+}
+func rewriteValueARM_OpLsh16x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh16x16 x y)
+ // result: (CMOVWHSconst (SLL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMCMOVWHSconst)
+ v.AuxInt = int32ToAuxInt(0)
+ v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(256)
+ v2.AddArg(v1)
+ v.AddArg2(v0, v2)
+ return true
+ }
+}
+func rewriteValueARM_OpLsh16x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh16x32 x y)
+ // result: (CMOVWHSconst (SLL <x.Type> x y) (CMPconst [256] y) [0])
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMCMOVWHSconst)
+ v.AuxInt = int32ToAuxInt(0)
+ v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
+ v1.AuxInt = int32ToAuxInt(256)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueARM_OpLsh16x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Lsh16x64 x (Const64 [c]))
+ // cond: uint64(c) < 16
+ // result: (SLLconst x [int32(c)])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) < 16) {
+ break
+ }
+ v.reset(OpARMSLLconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (Lsh16x64 _ (Const64 [c]))
+ // cond: uint64(c) >= 16
+ // result: (Const16 [0])
+ for {
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 16) {
+ break
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpLsh16x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh16x8 x y)
+ // result: (SLL x (ZeroExt8to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMSLL)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLsh32x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh32x16 x y)
+ // result: (CMOVWHSconst (SLL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMCMOVWHSconst)
+ v.AuxInt = int32ToAuxInt(0)
+ v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(256)
+ v2.AddArg(v1)
+ v.AddArg2(v0, v2)
+ return true
+ }
+}
+func rewriteValueARM_OpLsh32x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh32x32 x y)
+ // result: (CMOVWHSconst (SLL <x.Type> x y) (CMPconst [256] y) [0])
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMCMOVWHSconst)
+ v.AuxInt = int32ToAuxInt(0)
+ v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
+ v1.AuxInt = int32ToAuxInt(256)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueARM_OpLsh32x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Lsh32x64 x (Const64 [c]))
+ // cond: uint64(c) < 32
+ // result: (SLLconst x [int32(c)])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) < 32) {
+ break
+ }
+ v.reset(OpARMSLLconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (Lsh32x64 _ (Const64 [c]))
+ // cond: uint64(c) >= 32
+ // result: (Const32 [0])
+ for {
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 32) {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpLsh32x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh32x8 x y)
+ // result: (SLL x (ZeroExt8to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMSLL)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueARM_OpLsh8x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh8x16 x y)
+ // result: (CMOVWHSconst (SLL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMCMOVWHSconst)
+ v.AuxInt = int32ToAuxInt(0)
+ v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(256)
+ v2.AddArg(v1)
+ v.AddArg2(v0, v2)
+ return true
+ }
+}
+func rewriteValueARM_OpLsh8x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh8x32 x y)
+ // result: (CMOVWHSconst (SLL <x.Type> x y) (CMPconst [256] y) [0])
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMCMOVWHSconst)
+ v.AuxInt = int32ToAuxInt(0)
+ v0 := b.NewValue0(v.Pos, OpARMSLL, x.Type)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
+ v1.AuxInt = int32ToAuxInt(256)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueARM_OpLsh8x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Lsh8x64 x (Const64 [c]))
+ // cond: uint64(c) < 8
+ // result: (SLLconst x [int32(c)])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) < 8) {
+ break
+ }
+ v.reset(OpARMSLLconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (Lsh8x64 _ (Const64 [c]))
+ // cond: uint64(c) >= 8
+ // result: (Const8 [0])
+ for {
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 8) {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpLsh8x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh8x8 x y)
+ // result: (SLL x (ZeroExt8to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMSLL)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueARM_OpMod16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod16 x y)
+ // result: (Mod32 (SignExt16to32 x) (SignExt16to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMod32)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueARM_OpMod16u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod16u x y)
+ // result: (Mod32u (ZeroExt16to32 x) (ZeroExt16to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMod32u)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueARM_OpMod32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod32 x y)
+ // result: (SUB (XOR <typ.UInt32> (Select1 <typ.UInt32> (CALLudiv (SUB <typ.UInt32> (XOR <typ.UInt32> x (Signmask x)) (Signmask x)) (SUB <typ.UInt32> (XOR <typ.UInt32> y (Signmask y)) (Signmask y)))) (Signmask x)) (Signmask x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMSUB)
+ v0 := b.NewValue0(v.Pos, OpARMXOR, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpSelect1, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpARMCALLudiv, types.NewTuple(typ.UInt32, typ.UInt32))
+ v3 := b.NewValue0(v.Pos, OpARMSUB, typ.UInt32)
+ v4 := b.NewValue0(v.Pos, OpARMXOR, typ.UInt32)
+ v5 := b.NewValue0(v.Pos, OpSignmask, typ.Int32)
+ v5.AddArg(x)
+ v4.AddArg2(x, v5)
+ v3.AddArg2(v4, v5)
+ v6 := b.NewValue0(v.Pos, OpARMSUB, typ.UInt32)
+ v7 := b.NewValue0(v.Pos, OpARMXOR, typ.UInt32)
+ v8 := b.NewValue0(v.Pos, OpSignmask, typ.Int32)
+ v8.AddArg(y)
+ v7.AddArg2(y, v8)
+ v6.AddArg2(v7, v8)
+ v2.AddArg2(v3, v6)
+ v1.AddArg(v2)
+ v0.AddArg2(v1, v5)
+ v.AddArg2(v0, v5)
+ return true
+ }
+}
+func rewriteValueARM_OpMod32u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod32u x y)
+ // result: (Select1 <typ.UInt32> (CALLudiv x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect1)
+ v.Type = typ.UInt32
+ v0 := b.NewValue0(v.Pos, OpARMCALLudiv, types.NewTuple(typ.UInt32, typ.UInt32))
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpMod8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod8 x y)
+ // result: (Mod32 (SignExt8to32 x) (SignExt8to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMod32)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueARM_OpMod8u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod8u x y)
+ // result: (Mod32u (ZeroExt8to32 x) (ZeroExt8to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMod32u)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueARM_OpMove(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (Move [0] _ _ mem)
+ // result: mem
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ mem := v_2
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Move [1] dst src mem)
+ // result: (MOVBstore dst (MOVBUload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 1 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpARMMOVBstore)
+ v0 := b.NewValue0(v.Pos, OpARMMOVBUload, typ.UInt8)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [2] {t} dst src mem)
+ // cond: t.Alignment()%2 == 0
+ // result: (MOVHstore dst (MOVHUload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 2 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%2 == 0) {
+ break
+ }
+ v.reset(OpARMMOVHstore)
+ v0 := b.NewValue0(v.Pos, OpARMMOVHUload, typ.UInt16)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [2] dst src mem)
+ // result: (MOVBstore [1] dst (MOVBUload [1] src mem) (MOVBstore dst (MOVBUload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 2 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpARMMOVBstore)
+ v.AuxInt = int32ToAuxInt(1)
+ v0 := b.NewValue0(v.Pos, OpARMMOVBUload, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(1)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpARMMOVBUload, typ.UInt8)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [4] {t} dst src mem)
+ // cond: t.Alignment()%4 == 0
+ // result: (MOVWstore dst (MOVWload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%4 == 0) {
+ break
+ }
+ v.reset(OpARMMOVWstore)
+ v0 := b.NewValue0(v.Pos, OpARMMOVWload, typ.UInt32)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [4] {t} dst src mem)
+ // cond: t.Alignment()%2 == 0
+ // result: (MOVHstore [2] dst (MOVHUload [2] src mem) (MOVHstore dst (MOVHUload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%2 == 0) {
+ break
+ }
+ v.reset(OpARMMOVHstore)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpARMMOVHUload, typ.UInt16)
+ v0.AuxInt = int32ToAuxInt(2)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpARMMOVHstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpARMMOVHUload, typ.UInt16)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [4] dst src mem)
+ // result: (MOVBstore [3] dst (MOVBUload [3] src mem) (MOVBstore [2] dst (MOVBUload [2] src mem) (MOVBstore [1] dst (MOVBUload [1] src mem) (MOVBstore dst (MOVBUload src mem) mem))))
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpARMMOVBstore)
+ v.AuxInt = int32ToAuxInt(3)
+ v0 := b.NewValue0(v.Pos, OpARMMOVBUload, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(3)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(2)
+ v2 := b.NewValue0(v.Pos, OpARMMOVBUload, typ.UInt8)
+ v2.AuxInt = int32ToAuxInt(2)
+ v2.AddArg2(src, mem)
+ v3 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem)
+ v3.AuxInt = int32ToAuxInt(1)
+ v4 := b.NewValue0(v.Pos, OpARMMOVBUload, typ.UInt8)
+ v4.AuxInt = int32ToAuxInt(1)
+ v4.AddArg2(src, mem)
+ v5 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem)
+ v6 := b.NewValue0(v.Pos, OpARMMOVBUload, typ.UInt8)
+ v6.AddArg2(src, mem)
+ v5.AddArg3(dst, v6, mem)
+ v3.AddArg3(dst, v4, v5)
+ v1.AddArg3(dst, v2, v3)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [3] dst src mem)
+ // result: (MOVBstore [2] dst (MOVBUload [2] src mem) (MOVBstore [1] dst (MOVBUload [1] src mem) (MOVBstore dst (MOVBUload src mem) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 3 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpARMMOVBstore)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpARMMOVBUload, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(2)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpARMMOVBUload, typ.UInt8)
+ v2.AuxInt = int32ToAuxInt(1)
+ v2.AddArg2(src, mem)
+ v3 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem)
+ v4 := b.NewValue0(v.Pos, OpARMMOVBUload, typ.UInt8)
+ v4.AddArg2(src, mem)
+ v3.AddArg3(dst, v4, mem)
+ v1.AddArg3(dst, v2, v3)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [s] {t} dst src mem)
+ // cond: s%4 == 0 && s > 4 && s <= 512 && t.Alignment()%4 == 0 && !config.noDuffDevice && logLargeCopy(v, s)
+ // result: (DUFFCOPY [8 * (128 - s/4)] dst src mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(s%4 == 0 && s > 4 && s <= 512 && t.Alignment()%4 == 0 && !config.noDuffDevice && logLargeCopy(v, s)) {
+ break
+ }
+ v.reset(OpARMDUFFCOPY)
+ v.AuxInt = int64ToAuxInt(8 * (128 - s/4))
+ v.AddArg3(dst, src, mem)
+ return true
+ }
+ // match: (Move [s] {t} dst src mem)
+ // cond: ((s > 512 || config.noDuffDevice) || t.Alignment()%4 != 0) && logLargeCopy(v, s)
+ // result: (LoweredMove [t.Alignment()] dst src (ADDconst <src.Type> src [int32(s-moveSize(t.Alignment(), config))]) mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(((s > 512 || config.noDuffDevice) || t.Alignment()%4 != 0) && logLargeCopy(v, s)) {
+ break
+ }
+ v.reset(OpARMLoweredMove)
+ v.AuxInt = int64ToAuxInt(t.Alignment())
+ v0 := b.NewValue0(v.Pos, OpARMADDconst, src.Type)
+ v0.AuxInt = int32ToAuxInt(int32(s - moveSize(t.Alignment(), config)))
+ v0.AddArg(src)
+ v.AddArg4(dst, src, v0, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpNeg16(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Neg16 x)
+ // result: (RSBconst [0] x)
+ for {
+ x := v_0
+ v.reset(OpARMRSBconst)
+ v.AuxInt = int32ToAuxInt(0)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpNeg32(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Neg32 x)
+ // result: (RSBconst [0] x)
+ for {
+ x := v_0
+ v.reset(OpARMRSBconst)
+ v.AuxInt = int32ToAuxInt(0)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpNeg8(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Neg8 x)
+ // result: (RSBconst [0] x)
+ for {
+ x := v_0
+ v.reset(OpARMRSBconst)
+ v.AuxInt = int32ToAuxInt(0)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpNeq16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neq16 x y)
+ // result: (NotEqual (CMP (ZeroExt16to32 x) (ZeroExt16to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMNotEqual)
+ v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpNeq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Neq32 x y)
+ // result: (NotEqual (CMP x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMNotEqual)
+ v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpNeq32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Neq32F x y)
+ // result: (NotEqual (CMPF x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMNotEqual)
+ v0 := b.NewValue0(v.Pos, OpARMCMPF, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpNeq64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Neq64F x y)
+ // result: (NotEqual (CMPD x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMNotEqual)
+ v0 := b.NewValue0(v.Pos, OpARMCMPD, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpNeq8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neq8 x y)
+ // result: (NotEqual (CMP (ZeroExt8to32 x) (ZeroExt8to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMNotEqual)
+ v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpNeqPtr(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (NeqPtr x y)
+ // result: (NotEqual (CMP x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMNotEqual)
+ v0 := b.NewValue0(v.Pos, OpARMCMP, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpNot(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Not x)
+ // result: (XORconst [1] x)
+ for {
+ x := v_0
+ v.reset(OpARMXORconst)
+ v.AuxInt = int32ToAuxInt(1)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpOffPtr(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (OffPtr [off] ptr:(SP))
+ // result: (MOVWaddr [int32(off)] ptr)
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ ptr := v_0
+ if ptr.Op != OpSP {
+ break
+ }
+ v.reset(OpARMMOVWaddr)
+ v.AuxInt = int32ToAuxInt(int32(off))
+ v.AddArg(ptr)
+ return true
+ }
+ // match: (OffPtr [off] ptr)
+ // result: (ADDconst [int32(off)] ptr)
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ ptr := v_0
+ v.reset(OpARMADDconst)
+ v.AuxInt = int32ToAuxInt(int32(off))
+ v.AddArg(ptr)
+ return true
+ }
+}
+func rewriteValueARM_OpPanicBounds(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (PanicBounds [kind] x y mem)
+ // cond: boundsABI(kind) == 0
+ // result: (LoweredPanicBoundsA [kind] x y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ x := v_0
+ y := v_1
+ mem := v_2
+ if !(boundsABI(kind) == 0) {
+ break
+ }
+ v.reset(OpARMLoweredPanicBoundsA)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg3(x, y, mem)
+ return true
+ }
+ // match: (PanicBounds [kind] x y mem)
+ // cond: boundsABI(kind) == 1
+ // result: (LoweredPanicBoundsB [kind] x y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ x := v_0
+ y := v_1
+ mem := v_2
+ if !(boundsABI(kind) == 1) {
+ break
+ }
+ v.reset(OpARMLoweredPanicBoundsB)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg3(x, y, mem)
+ return true
+ }
+ // match: (PanicBounds [kind] x y mem)
+ // cond: boundsABI(kind) == 2
+ // result: (LoweredPanicBoundsC [kind] x y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ x := v_0
+ y := v_1
+ mem := v_2
+ if !(boundsABI(kind) == 2) {
+ break
+ }
+ v.reset(OpARMLoweredPanicBoundsC)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg3(x, y, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpPanicExtend(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (PanicExtend [kind] hi lo y mem)
+ // cond: boundsABI(kind) == 0
+ // result: (LoweredPanicExtendA [kind] hi lo y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ hi := v_0
+ lo := v_1
+ y := v_2
+ mem := v_3
+ if !(boundsABI(kind) == 0) {
+ break
+ }
+ v.reset(OpARMLoweredPanicExtendA)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg4(hi, lo, y, mem)
+ return true
+ }
+ // match: (PanicExtend [kind] hi lo y mem)
+ // cond: boundsABI(kind) == 1
+ // result: (LoweredPanicExtendB [kind] hi lo y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ hi := v_0
+ lo := v_1
+ y := v_2
+ mem := v_3
+ if !(boundsABI(kind) == 1) {
+ break
+ }
+ v.reset(OpARMLoweredPanicExtendB)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg4(hi, lo, y, mem)
+ return true
+ }
+ // match: (PanicExtend [kind] hi lo y mem)
+ // cond: boundsABI(kind) == 2
+ // result: (LoweredPanicExtendC [kind] hi lo y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ hi := v_0
+ lo := v_1
+ y := v_2
+ mem := v_3
+ if !(boundsABI(kind) == 2) {
+ break
+ }
+ v.reset(OpARMLoweredPanicExtendC)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg4(hi, lo, y, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpRotateLeft16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (RotateLeft16 <t> x (MOVWconst [c]))
+ // result: (Or16 (Lsh16x32 <t> x (MOVWconst [c&15])) (Rsh16Ux32 <t> x (MOVWconst [-c&15])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpOr16)
+ v0 := b.NewValue0(v.Pos, OpLsh16x32, t)
+ v1 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(c & 15)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpRsh16Ux32, t)
+ v3 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32)
+ v3.AuxInt = int32ToAuxInt(-c & 15)
+ v2.AddArg2(x, v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpRotateLeft32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (RotateLeft32 x y)
+ // result: (SRR x (RSBconst [0] <y.Type> y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMSRR)
+ v0 := b.NewValue0(v.Pos, OpARMRSBconst, y.Type)
+ v0.AuxInt = int32ToAuxInt(0)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueARM_OpRotateLeft8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (RotateLeft8 <t> x (MOVWconst [c]))
+ // result: (Or8 (Lsh8x32 <t> x (MOVWconst [c&7])) (Rsh8Ux32 <t> x (MOVWconst [-c&7])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpOr8)
+ v0 := b.NewValue0(v.Pos, OpLsh8x32, t)
+ v1 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(c & 7)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpRsh8Ux32, t)
+ v3 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32)
+ v3.AuxInt = int32ToAuxInt(-c & 7)
+ v2.AddArg2(x, v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpRsh16Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux16 x y)
+ // result: (CMOVWHSconst (SRL <x.Type> (ZeroExt16to32 x) (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMCMOVWHSconst)
+ v.AuxInt = int32ToAuxInt(0)
+ v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
+ v3.AuxInt = int32ToAuxInt(256)
+ v3.AddArg(v2)
+ v.AddArg2(v0, v3)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh16Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux32 x y)
+ // result: (CMOVWHSconst (SRL <x.Type> (ZeroExt16to32 x) y) (CMPconst [256] y) [0])
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMCMOVWHSconst)
+ v.AuxInt = int32ToAuxInt(0)
+ v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(256)
+ v2.AddArg(y)
+ v.AddArg2(v0, v2)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh16Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux64 x (Const64 [c]))
+ // cond: uint64(c) < 16
+ // result: (SRLconst (SLLconst <typ.UInt32> x [16]) [int32(c+16)])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) < 16) {
+ break
+ }
+ v.reset(OpARMSRLconst)
+ v.AuxInt = int32ToAuxInt(int32(c + 16))
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(16)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Rsh16Ux64 _ (Const64 [c]))
+ // cond: uint64(c) >= 16
+ // result: (Const16 [0])
+ for {
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 16) {
+ break
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpRsh16Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux8 x y)
+ // result: (SRL (ZeroExt16to32 x) (ZeroExt8to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMSRL)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh16x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x16 x y)
+ // result: (SRAcond (SignExt16to32 x) (ZeroExt16to32 y) (CMPconst [256] (ZeroExt16to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMSRAcond)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(y)
+ v2 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(256)
+ v2.AddArg(v1)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh16x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x32 x y)
+ // result: (SRAcond (SignExt16to32 x) y (CMPconst [256] y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMSRAcond)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
+ v1.AuxInt = int32ToAuxInt(256)
+ v1.AddArg(y)
+ v.AddArg3(v0, y, v1)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh16x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x64 x (Const64 [c]))
+ // cond: uint64(c) < 16
+ // result: (SRAconst (SLLconst <typ.UInt32> x [16]) [int32(c+16)])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) < 16) {
+ break
+ }
+ v.reset(OpARMSRAconst)
+ v.AuxInt = int32ToAuxInt(int32(c + 16))
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(16)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Rsh16x64 x (Const64 [c]))
+ // cond: uint64(c) >= 16
+ // result: (SRAconst (SLLconst <typ.UInt32> x [16]) [31])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 16) {
+ break
+ }
+ v.reset(OpARMSRAconst)
+ v.AuxInt = int32ToAuxInt(31)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(16)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpRsh16x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x8 x y)
+ // result: (SRA (SignExt16to32 x) (ZeroExt8to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMSRA)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh32Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32Ux16 x y)
+ // result: (CMOVWHSconst (SRL <x.Type> x (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMCMOVWHSconst)
+ v.AuxInt = int32ToAuxInt(0)
+ v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(256)
+ v2.AddArg(v1)
+ v.AddArg2(v0, v2)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh32Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh32Ux32 x y)
+ // result: (CMOVWHSconst (SRL <x.Type> x y) (CMPconst [256] y) [0])
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMCMOVWHSconst)
+ v.AuxInt = int32ToAuxInt(0)
+ v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
+ v1.AuxInt = int32ToAuxInt(256)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh32Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Rsh32Ux64 x (Const64 [c]))
+ // cond: uint64(c) < 32
+ // result: (SRLconst x [int32(c)])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) < 32) {
+ break
+ }
+ v.reset(OpARMSRLconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (Rsh32Ux64 _ (Const64 [c]))
+ // cond: uint64(c) >= 32
+ // result: (Const32 [0])
+ for {
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 32) {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpRsh32Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32Ux8 x y)
+ // result: (SRL x (ZeroExt8to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMSRL)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh32x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32x16 x y)
+ // result: (SRAcond x (ZeroExt16to32 y) (CMPconst [256] (ZeroExt16to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMSRAcond)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
+ v1.AuxInt = int32ToAuxInt(256)
+ v1.AddArg(v0)
+ v.AddArg3(x, v0, v1)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh32x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh32x32 x y)
+ // result: (SRAcond x y (CMPconst [256] y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMSRAcond)
+ v0 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(256)
+ v0.AddArg(y)
+ v.AddArg3(x, y, v0)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh32x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Rsh32x64 x (Const64 [c]))
+ // cond: uint64(c) < 32
+ // result: (SRAconst x [int32(c)])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) < 32) {
+ break
+ }
+ v.reset(OpARMSRAconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (Rsh32x64 x (Const64 [c]))
+ // cond: uint64(c) >= 32
+ // result: (SRAconst x [31])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 32) {
+ break
+ }
+ v.reset(OpARMSRAconst)
+ v.AuxInt = int32ToAuxInt(31)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpRsh32x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32x8 x y)
+ // result: (SRA x (ZeroExt8to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMSRA)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh8Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux16 x y)
+ // result: (CMOVWHSconst (SRL <x.Type> (ZeroExt8to32 x) (ZeroExt16to32 y)) (CMPconst [256] (ZeroExt16to32 y)) [0])
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMCMOVWHSconst)
+ v.AuxInt = int32ToAuxInt(0)
+ v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
+ v3.AuxInt = int32ToAuxInt(256)
+ v3.AddArg(v2)
+ v.AddArg2(v0, v3)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh8Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux32 x y)
+ // result: (CMOVWHSconst (SRL <x.Type> (ZeroExt8to32 x) y) (CMPconst [256] y) [0])
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMCMOVWHSconst)
+ v.AuxInt = int32ToAuxInt(0)
+ v0 := b.NewValue0(v.Pos, OpARMSRL, x.Type)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(256)
+ v2.AddArg(y)
+ v.AddArg2(v0, v2)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh8Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux64 x (Const64 [c]))
+ // cond: uint64(c) < 8
+ // result: (SRLconst (SLLconst <typ.UInt32> x [24]) [int32(c+24)])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) < 8) {
+ break
+ }
+ v.reset(OpARMSRLconst)
+ v.AuxInt = int32ToAuxInt(int32(c + 24))
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(24)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Rsh8Ux64 _ (Const64 [c]))
+ // cond: uint64(c) >= 8
+ // result: (Const8 [0])
+ for {
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 8) {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpRsh8Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux8 x y)
+ // result: (SRL (ZeroExt8to32 x) (ZeroExt8to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMSRL)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh8x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x16 x y)
+ // result: (SRAcond (SignExt8to32 x) (ZeroExt16to32 y) (CMPconst [256] (ZeroExt16to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMSRAcond)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(y)
+ v2 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(256)
+ v2.AddArg(v1)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh8x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x32 x y)
+ // result: (SRAcond (SignExt8to32 x) y (CMPconst [256] y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMSRAcond)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpARMCMPconst, types.TypeFlags)
+ v1.AuxInt = int32ToAuxInt(256)
+ v1.AddArg(y)
+ v.AddArg3(v0, y, v1)
+ return true
+ }
+}
+func rewriteValueARM_OpRsh8x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x64 x (Const64 [c]))
+ // cond: uint64(c) < 8
+ // result: (SRAconst (SLLconst <typ.UInt32> x [24]) [int32(c+24)])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) < 8) {
+ break
+ }
+ v.reset(OpARMSRAconst)
+ v.AuxInt = int32ToAuxInt(int32(c + 24))
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(24)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Rsh8x64 x (Const64 [c]))
+ // cond: uint64(c) >= 8
+ // result: (SRAconst (SLLconst <typ.UInt32> x [24]) [31])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 8) {
+ break
+ }
+ v.reset(OpARMSRAconst)
+ v.AuxInt = int32ToAuxInt(31)
+ v0 := b.NewValue0(v.Pos, OpARMSLLconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(24)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpRsh8x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x8 x y)
+ // result: (SRA (SignExt8to32 x) (ZeroExt8to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARMSRA)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueARM_OpSelect0(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Select0 (CALLudiv x (MOVWconst [1])))
+ // result: x
+ for {
+ if v_0.Op != OpARMCALLudiv {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpARMMOVWconst || auxIntToInt32(v_0_1.AuxInt) != 1 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (Select0 (CALLudiv x (MOVWconst [c])))
+ // cond: isPowerOfTwo32(c)
+ // result: (SRLconst [int32(log32(c))] x)
+ for {
+ if v_0.Op != OpARMCALLudiv {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0_1.AuxInt)
+ if !(isPowerOfTwo32(c)) {
+ break
+ }
+ v.reset(OpARMSRLconst)
+ v.AuxInt = int32ToAuxInt(int32(log32(c)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (Select0 (CALLudiv (MOVWconst [c]) (MOVWconst [d])))
+ // cond: d != 0
+ // result: (MOVWconst [int32(uint32(c)/uint32(d))])
+ for {
+ if v_0.Op != OpARMCALLudiv {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0_0.AuxInt)
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpARMMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(int32(uint32(c) / uint32(d)))
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpSelect1(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Select1 (CALLudiv _ (MOVWconst [1])))
+ // result: (MOVWconst [0])
+ for {
+ if v_0.Op != OpARMCALLudiv {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpARMMOVWconst || auxIntToInt32(v_0_1.AuxInt) != 1 {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (Select1 (CALLudiv x (MOVWconst [c])))
+ // cond: isPowerOfTwo32(c)
+ // result: (ANDconst [c-1] x)
+ for {
+ if v_0.Op != OpARMCALLudiv {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0_1.AuxInt)
+ if !(isPowerOfTwo32(c)) {
+ break
+ }
+ v.reset(OpARMANDconst)
+ v.AuxInt = int32ToAuxInt(c - 1)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Select1 (CALLudiv (MOVWconst [c]) (MOVWconst [d])))
+ // cond: d != 0
+ // result: (MOVWconst [int32(uint32(c)%uint32(d))])
+ for {
+ if v_0.Op != OpARMCALLudiv {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpARMMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0_0.AuxInt)
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpARMMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpARMMOVWconst)
+ v.AuxInt = int32ToAuxInt(int32(uint32(c) % uint32(d)))
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpSignmask(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Signmask x)
+ // result: (SRAconst x [31])
+ for {
+ x := v_0
+ v.reset(OpARMSRAconst)
+ v.AuxInt = int32ToAuxInt(31)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueARM_OpSlicemask(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Slicemask <t> x)
+ // result: (SRAconst (RSBconst <t> [0] x) [31])
+ for {
+ t := v.Type
+ x := v_0
+ v.reset(OpARMSRAconst)
+ v.AuxInt = int32ToAuxInt(31)
+ v0 := b.NewValue0(v.Pos, OpARMRSBconst, t)
+ v0.AuxInt = int32ToAuxInt(0)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM_OpStore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 1
+ // result: (MOVBstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 1) {
+ break
+ }
+ v.reset(OpARMMOVBstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 2
+ // result: (MOVHstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 2) {
+ break
+ }
+ v.reset(OpARMMOVHstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 4 && !is32BitFloat(val.Type)
+ // result: (MOVWstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 4 && !is32BitFloat(val.Type)) {
+ break
+ }
+ v.reset(OpARMMOVWstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 4 && is32BitFloat(val.Type)
+ // result: (MOVFstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 4 && is32BitFloat(val.Type)) {
+ break
+ }
+ v.reset(OpARMMOVFstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 8 && is64BitFloat(val.Type)
+ // result: (MOVDstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 8 && is64BitFloat(val.Type)) {
+ break
+ }
+ v.reset(OpARMMOVDstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpZero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (Zero [0] _ mem)
+ // result: mem
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ mem := v_1
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Zero [1] ptr mem)
+ // result: (MOVBstore ptr (MOVWconst [0]) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 1 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpARMMOVBstore)
+ v0 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (Zero [2] {t} ptr mem)
+ // cond: t.Alignment()%2 == 0
+ // result: (MOVHstore ptr (MOVWconst [0]) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 2 {
+ break
+ }
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(t.Alignment()%2 == 0) {
+ break
+ }
+ v.reset(OpARMMOVHstore)
+ v0 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (Zero [2] ptr mem)
+ // result: (MOVBstore [1] ptr (MOVWconst [0]) (MOVBstore [0] ptr (MOVWconst [0]) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 2 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpARMMOVBstore)
+ v.AuxInt = int32ToAuxInt(1)
+ v0 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(0)
+ v1.AddArg3(ptr, v0, mem)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [4] {t} ptr mem)
+ // cond: t.Alignment()%4 == 0
+ // result: (MOVWstore ptr (MOVWconst [0]) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(t.Alignment()%4 == 0) {
+ break
+ }
+ v.reset(OpARMMOVWstore)
+ v0 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (Zero [4] {t} ptr mem)
+ // cond: t.Alignment()%2 == 0
+ // result: (MOVHstore [2] ptr (MOVWconst [0]) (MOVHstore [0] ptr (MOVWconst [0]) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(t.Alignment()%2 == 0) {
+ break
+ }
+ v.reset(OpARMMOVHstore)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpARMMOVHstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(0)
+ v1.AddArg3(ptr, v0, mem)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [4] ptr mem)
+ // result: (MOVBstore [3] ptr (MOVWconst [0]) (MOVBstore [2] ptr (MOVWconst [0]) (MOVBstore [1] ptr (MOVWconst [0]) (MOVBstore [0] ptr (MOVWconst [0]) mem))))
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpARMMOVBstore)
+ v.AuxInt = int32ToAuxInt(3)
+ v0 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(2)
+ v2 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem)
+ v2.AuxInt = int32ToAuxInt(1)
+ v3 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem)
+ v3.AuxInt = int32ToAuxInt(0)
+ v3.AddArg3(ptr, v0, mem)
+ v2.AddArg3(ptr, v0, v3)
+ v1.AddArg3(ptr, v0, v2)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [3] ptr mem)
+ // result: (MOVBstore [2] ptr (MOVWconst [0]) (MOVBstore [1] ptr (MOVWconst [0]) (MOVBstore [0] ptr (MOVWconst [0]) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 3 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpARMMOVBstore)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpARMMOVBstore, types.TypeMem)
+ v2.AuxInt = int32ToAuxInt(0)
+ v2.AddArg3(ptr, v0, mem)
+ v1.AddArg3(ptr, v0, v2)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [s] {t} ptr mem)
+ // cond: s%4 == 0 && s > 4 && s <= 512 && t.Alignment()%4 == 0 && !config.noDuffDevice
+ // result: (DUFFZERO [4 * (128 - s/4)] ptr (MOVWconst [0]) mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(s%4 == 0 && s > 4 && s <= 512 && t.Alignment()%4 == 0 && !config.noDuffDevice) {
+ break
+ }
+ v.reset(OpARMDUFFZERO)
+ v.AuxInt = int64ToAuxInt(4 * (128 - s/4))
+ v0 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (Zero [s] {t} ptr mem)
+ // cond: (s > 512 || config.noDuffDevice) || t.Alignment()%4 != 0
+ // result: (LoweredZero [t.Alignment()] ptr (ADDconst <ptr.Type> ptr [int32(s-moveSize(t.Alignment(), config))]) (MOVWconst [0]) mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !((s > 512 || config.noDuffDevice) || t.Alignment()%4 != 0) {
+ break
+ }
+ v.reset(OpARMLoweredZero)
+ v.AuxInt = int64ToAuxInt(t.Alignment())
+ v0 := b.NewValue0(v.Pos, OpARMADDconst, ptr.Type)
+ v0.AuxInt = int32ToAuxInt(int32(s - moveSize(t.Alignment(), config)))
+ v0.AddArg(ptr)
+ v1 := b.NewValue0(v.Pos, OpARMMOVWconst, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(0)
+ v.AddArg4(ptr, v0, v1, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM_OpZeromask(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Zeromask x)
+ // result: (SRAconst (RSBshiftRL <typ.Int32> x x [1]) [31])
+ for {
+ x := v_0
+ v.reset(OpARMSRAconst)
+ v.AuxInt = int32ToAuxInt(31)
+ v0 := b.NewValue0(v.Pos, OpARMRSBshiftRL, typ.Int32)
+ v0.AuxInt = int32ToAuxInt(1)
+ v0.AddArg2(x, x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteBlockARM(b *Block) bool {
+ switch b.Kind {
+ case BlockARMEQ:
+ // match: (EQ (FlagConstant [fc]) yes no)
+ // cond: fc.eq()
+ // result: (First yes no)
+ for b.Controls[0].Op == OpARMFlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(fc.eq()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (EQ (FlagConstant [fc]) yes no)
+ // cond: !fc.eq()
+ // result: (First no yes)
+ for b.Controls[0].Op == OpARMFlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(!fc.eq()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (EQ (InvertFlags cmp) yes no)
+ // result: (EQ cmp yes no)
+ for b.Controls[0].Op == OpARMInvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockARMEQ, cmp)
+ return true
+ }
+ // match: (EQ (CMP x (RSBconst [0] y)))
+ // result: (EQ (CMN x y))
+ for b.Controls[0].Op == OpARMCMP {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpARMRSBconst || auxIntToInt32(v_0_1.AuxInt) != 0 {
+ break
+ }
+ y := v_0_1.Args[0]
+ v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ // match: (EQ (CMN x (RSBconst [0] y)))
+ // result: (EQ (CMP x y))
+ for b.Controls[0].Op == OpARMCMN {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpARMRSBconst || auxIntToInt32(v_0_1.AuxInt) != 0 {
+ continue
+ }
+ y := v_0_1.Args[0]
+ v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ break
+ }
+ // match: (EQ (CMPconst [0] l:(SUB x y)) yes no)
+ // cond: l.Uses==1
+ // result: (EQ (CMP x y) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUB {
+ break
+ }
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] l:(MULS x y a)) yes no)
+ // cond: l.Uses==1
+ // result: (EQ (CMP a (MUL <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMMULS {
+ break
+ }
+ a := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARMMUL, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] l:(SUBconst [c] x)) yes no)
+ // cond: l.Uses==1
+ // result: (EQ (CMPconst [c] x) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBconst {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg(x)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] l:(SUBshiftLL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (EQ (CMPshiftLL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftLL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftLL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] l:(SUBshiftRL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (EQ (CMPshiftRL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftRL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] l:(SUBshiftRA x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (EQ (CMPshiftRA x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftRA {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRA, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] l:(SUBshiftLLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (EQ (CMPshiftLLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftLLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftLLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] l:(SUBshiftRLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (EQ (CMPshiftRLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftRLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] l:(SUBshiftRAreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (EQ (CMPshiftRAreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftRAreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRAreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] l:(ADD x y)) yes no)
+ // cond: l.Uses==1
+ // result: (EQ (CMN x y) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADD {
+ break
+ }
+ _ = l.Args[1]
+ l_0 := l.Args[0]
+ l_1 := l.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, l_0, l_1 = _i0+1, l_1, l_0 {
+ x := l_0
+ y := l_1
+ if !(l.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ break
+ }
+ // match: (EQ (CMPconst [0] l:(MULA x y a)) yes no)
+ // cond: l.Uses==1
+ // result: (EQ (CMN a (MUL <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMMULA {
+ break
+ }
+ a := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARMMUL, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] l:(ADDconst [c] x)) yes no)
+ // cond: l.Uses==1
+ // result: (EQ (CMNconst [c] x) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDconst {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg(x)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] l:(ADDshiftLL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (EQ (CMNshiftLL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftLL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftLL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] l:(ADDshiftRL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (EQ (CMNshiftRL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftRL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] l:(ADDshiftRA x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (EQ (CMNshiftRA x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftRA {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRA, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] l:(ADDshiftLLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (EQ (CMNshiftLLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftLLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftLLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] l:(ADDshiftRLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (EQ (CMNshiftRLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftRLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] l:(ADDshiftRAreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (EQ (CMNshiftRAreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftRAreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRAreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] l:(AND x y)) yes no)
+ // cond: l.Uses==1
+ // result: (EQ (TST x y) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMAND {
+ break
+ }
+ _ = l.Args[1]
+ l_0 := l.Args[0]
+ l_1 := l.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, l_0, l_1 = _i0+1, l_1, l_0 {
+ x := l_0
+ y := l_1
+ if !(l.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTST, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ break
+ }
+ // match: (EQ (CMPconst [0] l:(ANDconst [c] x)) yes no)
+ // cond: l.Uses==1
+ // result: (EQ (TSTconst [c] x) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDconst {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg(x)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] l:(ANDshiftLL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (EQ (TSTshiftLL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftLL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftLL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] l:(ANDshiftRL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (EQ (TSTshiftRL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftRL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] l:(ANDshiftRA x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (EQ (TSTshiftRA x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftRA {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRA, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] l:(ANDshiftLLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (EQ (TSTshiftLLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftLLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftLLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] l:(ANDshiftRLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (EQ (TSTshiftRLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftRLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] l:(ANDshiftRAreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (EQ (TSTshiftRAreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftRAreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRAreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] l:(XOR x y)) yes no)
+ // cond: l.Uses==1
+ // result: (EQ (TEQ x y) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXOR {
+ break
+ }
+ _ = l.Args[1]
+ l_0 := l.Args[0]
+ l_1 := l.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, l_0, l_1 = _i0+1, l_1, l_0 {
+ x := l_0
+ y := l_1
+ if !(l.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQ, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ break
+ }
+ // match: (EQ (CMPconst [0] l:(XORconst [c] x)) yes no)
+ // cond: l.Uses==1
+ // result: (EQ (TEQconst [c] x) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORconst {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg(x)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] l:(XORshiftLL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (EQ (TEQshiftLL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftLL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftLL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] l:(XORshiftRL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (EQ (TEQshiftRL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftRL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] l:(XORshiftRA x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (EQ (TEQshiftRA x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftRA {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRA, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] l:(XORshiftLLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (EQ (TEQshiftLLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftLLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftLLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] l:(XORshiftRLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (EQ (TEQshiftRLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftRLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] l:(XORshiftRAreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (EQ (TEQshiftRAreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftRAreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRAreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMEQ, v0)
+ return true
+ }
+ case BlockARMGE:
+ // match: (GE (FlagConstant [fc]) yes no)
+ // cond: fc.ge()
+ // result: (First yes no)
+ for b.Controls[0].Op == OpARMFlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(fc.ge()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (GE (FlagConstant [fc]) yes no)
+ // cond: !fc.ge()
+ // result: (First no yes)
+ for b.Controls[0].Op == OpARMFlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(!fc.ge()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (GE (InvertFlags cmp) yes no)
+ // result: (LE cmp yes no)
+ for b.Controls[0].Op == OpARMInvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockARMLE, cmp)
+ return true
+ }
+ // match: (GE (CMPconst [0] l:(SUB x y)) yes no)
+ // cond: l.Uses==1
+ // result: (GEnoov (CMP x y) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUB {
+ break
+ }
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMGEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] l:(MULS x y a)) yes no)
+ // cond: l.Uses==1
+ // result: (GEnoov (CMP a (MUL <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMMULS {
+ break
+ }
+ a := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARMMUL, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARMGEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] l:(SUBconst [c] x)) yes no)
+ // cond: l.Uses==1
+ // result: (GEnoov (CMPconst [c] x) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBconst {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg(x)
+ b.resetWithControl(BlockARMGEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] l:(SUBshiftLL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (GEnoov (CMPshiftLL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftLL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftLL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMGEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] l:(SUBshiftRL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (GEnoov (CMPshiftRL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftRL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMGEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] l:(SUBshiftRA x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (GEnoov (CMPshiftRA x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftRA {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRA, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMGEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] l:(SUBshiftLLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (GEnoov (CMPshiftLLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftLLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftLLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMGEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] l:(SUBshiftRLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (GEnoov (CMPshiftRLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftRLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMGEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] l:(SUBshiftRAreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (GEnoov (CMPshiftRAreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftRAreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRAreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMGEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] l:(ADD x y)) yes no)
+ // cond: l.Uses==1
+ // result: (GEnoov (CMN x y) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADD {
+ break
+ }
+ _ = l.Args[1]
+ l_0 := l.Args[0]
+ l_1 := l.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, l_0, l_1 = _i0+1, l_1, l_0 {
+ x := l_0
+ y := l_1
+ if !(l.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMGEnoov, v0)
+ return true
+ }
+ break
+ }
+ // match: (GE (CMPconst [0] l:(MULA x y a)) yes no)
+ // cond: l.Uses==1
+ // result: (GEnoov (CMN a (MUL <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMMULA {
+ break
+ }
+ a := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARMMUL, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARMGEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] l:(ADDconst [c] x)) yes no)
+ // cond: l.Uses==1
+ // result: (GEnoov (CMNconst [c] x) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDconst {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg(x)
+ b.resetWithControl(BlockARMGEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] l:(ADDshiftLL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (GEnoov (CMNshiftLL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftLL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftLL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMGEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] l:(ADDshiftRL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (GEnoov (CMNshiftRL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftRL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMGEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] l:(ADDshiftRA x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (GEnoov (CMNshiftRA x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftRA {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRA, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMGEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] l:(ADDshiftLLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (GEnoov (CMNshiftLLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftLLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftLLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMGEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] l:(ADDshiftRLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (GEnoov (CMNshiftRLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftRLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMGEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] l:(ADDshiftRAreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (GEnoov (CMNshiftRAreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftRAreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRAreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMGEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] l:(AND x y)) yes no)
+ // cond: l.Uses==1
+ // result: (GEnoov (TST x y) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMAND {
+ break
+ }
+ _ = l.Args[1]
+ l_0 := l.Args[0]
+ l_1 := l.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, l_0, l_1 = _i0+1, l_1, l_0 {
+ x := l_0
+ y := l_1
+ if !(l.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTST, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMGEnoov, v0)
+ return true
+ }
+ break
+ }
+ // match: (GE (CMPconst [0] l:(ANDconst [c] x)) yes no)
+ // cond: l.Uses==1
+ // result: (GEnoov (TSTconst [c] x) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDconst {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg(x)
+ b.resetWithControl(BlockARMGEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] l:(ANDshiftLL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (GEnoov (TSTshiftLL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftLL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftLL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMGEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] l:(ANDshiftRL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (GEnoov (TSTshiftRL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftRL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMGEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] l:(ANDshiftRA x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (GEnoov (TSTshiftRA x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftRA {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRA, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMGEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] l:(ANDshiftLLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (GEnoov (TSTshiftLLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftLLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftLLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMGEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] l:(ANDshiftRLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (GEnoov (TSTshiftRLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftRLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMGEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] l:(ANDshiftRAreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (GEnoov (TSTshiftRAreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftRAreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRAreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMGEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] l:(XOR x y)) yes no)
+ // cond: l.Uses==1
+ // result: (GEnoov (TEQ x y) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXOR {
+ break
+ }
+ _ = l.Args[1]
+ l_0 := l.Args[0]
+ l_1 := l.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, l_0, l_1 = _i0+1, l_1, l_0 {
+ x := l_0
+ y := l_1
+ if !(l.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQ, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMGEnoov, v0)
+ return true
+ }
+ break
+ }
+ // match: (GE (CMPconst [0] l:(XORconst [c] x)) yes no)
+ // cond: l.Uses==1
+ // result: (GEnoov (TEQconst [c] x) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORconst {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg(x)
+ b.resetWithControl(BlockARMGEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] l:(XORshiftLL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (GEnoov (TEQshiftLL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftLL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftLL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMGEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] l:(XORshiftRL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (GEnoov (TEQshiftRL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftRL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMGEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] l:(XORshiftRA x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (GEnoov (TEQshiftRA x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftRA {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRA, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMGEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] l:(XORshiftLLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (GEnoov (TEQshiftLLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftLLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftLLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMGEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] l:(XORshiftRLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (GEnoov (TEQshiftRLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftRLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMGEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] l:(XORshiftRAreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (GEnoov (TEQshiftRAreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftRAreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRAreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMGEnoov, v0)
+ return true
+ }
+ case BlockARMGEnoov:
+ // match: (GEnoov (FlagConstant [fc]) yes no)
+ // cond: fc.geNoov()
+ // result: (First yes no)
+ for b.Controls[0].Op == OpARMFlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(fc.geNoov()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (GEnoov (FlagConstant [fc]) yes no)
+ // cond: !fc.geNoov()
+ // result: (First no yes)
+ for b.Controls[0].Op == OpARMFlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(!fc.geNoov()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (GEnoov (InvertFlags cmp) yes no)
+ // result: (LEnoov cmp yes no)
+ for b.Controls[0].Op == OpARMInvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockARMLEnoov, cmp)
+ return true
+ }
+ case BlockARMGT:
+ // match: (GT (FlagConstant [fc]) yes no)
+ // cond: fc.gt()
+ // result: (First yes no)
+ for b.Controls[0].Op == OpARMFlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(fc.gt()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (GT (FlagConstant [fc]) yes no)
+ // cond: !fc.gt()
+ // result: (First no yes)
+ for b.Controls[0].Op == OpARMFlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(!fc.gt()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (GT (InvertFlags cmp) yes no)
+ // result: (LT cmp yes no)
+ for b.Controls[0].Op == OpARMInvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockARMLT, cmp)
+ return true
+ }
+ // match: (GT (CMPconst [0] l:(SUB x y)) yes no)
+ // cond: l.Uses==1
+ // result: (GTnoov (CMP x y) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUB {
+ break
+ }
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMGTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] l:(MULS x y a)) yes no)
+ // cond: l.Uses==1
+ // result: (GTnoov (CMP a (MUL <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMMULS {
+ break
+ }
+ a := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARMMUL, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARMGTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] l:(SUBconst [c] x)) yes no)
+ // cond: l.Uses==1
+ // result: (GTnoov (CMPconst [c] x) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBconst {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg(x)
+ b.resetWithControl(BlockARMGTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] l:(SUBshiftLL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (GTnoov (CMPshiftLL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftLL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftLL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMGTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] l:(SUBshiftRL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (GTnoov (CMPshiftRL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftRL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMGTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] l:(SUBshiftRA x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (GTnoov (CMPshiftRA x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftRA {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRA, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMGTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] l:(SUBshiftLLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (GTnoov (CMPshiftLLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftLLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftLLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMGTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] l:(SUBshiftRLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (GTnoov (CMPshiftRLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftRLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMGTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] l:(SUBshiftRAreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (GTnoov (CMPshiftRAreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftRAreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRAreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMGTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] l:(ADD x y)) yes no)
+ // cond: l.Uses==1
+ // result: (GTnoov (CMN x y) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADD {
+ break
+ }
+ _ = l.Args[1]
+ l_0 := l.Args[0]
+ l_1 := l.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, l_0, l_1 = _i0+1, l_1, l_0 {
+ x := l_0
+ y := l_1
+ if !(l.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMGTnoov, v0)
+ return true
+ }
+ break
+ }
+ // match: (GT (CMPconst [0] l:(ADDconst [c] x)) yes no)
+ // cond: l.Uses==1
+ // result: (GTnoov (CMNconst [c] x) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDconst {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg(x)
+ b.resetWithControl(BlockARMGTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] l:(ADDshiftLL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (GTnoov (CMNshiftLL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftLL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftLL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMGTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] l:(ADDshiftRL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (GTnoov (CMNshiftRL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftRL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMGTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] l:(ADDshiftRA x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (GTnoov (CMNshiftRA x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftRA {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRA, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMGTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] l:(ADDshiftLLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (GTnoov (CMNshiftLLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftLLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftLLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMGTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] l:(ADDshiftRLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (GTnoov (CMNshiftRLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftRLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMGTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] l:(ADDshiftRAreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (GTnoov (CMNshiftRAreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftRAreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRAreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMGTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] l:(MULA x y a)) yes no)
+ // cond: l.Uses==1
+ // result: (GTnoov (CMN a (MUL <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMMULA {
+ break
+ }
+ a := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARMMUL, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARMGTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] l:(AND x y)) yes no)
+ // cond: l.Uses==1
+ // result: (GTnoov (TST x y) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMAND {
+ break
+ }
+ _ = l.Args[1]
+ l_0 := l.Args[0]
+ l_1 := l.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, l_0, l_1 = _i0+1, l_1, l_0 {
+ x := l_0
+ y := l_1
+ if !(l.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTST, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMGTnoov, v0)
+ return true
+ }
+ break
+ }
+ // match: (GT (CMPconst [0] l:(ANDconst [c] x)) yes no)
+ // cond: l.Uses==1
+ // result: (GTnoov (TSTconst [c] x) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDconst {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg(x)
+ b.resetWithControl(BlockARMGTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] l:(ANDshiftLL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (GTnoov (TSTshiftLL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftLL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftLL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMGTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] l:(ANDshiftRL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (GTnoov (TSTshiftRL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftRL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMGTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] l:(ANDshiftRA x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (GTnoov (TSTshiftRA x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftRA {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRA, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMGTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] l:(ANDshiftLLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (GTnoov (TSTshiftLLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftLLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftLLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMGTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] l:(ANDshiftRLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (GTnoov (TSTshiftRLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftRLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMGTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] l:(ANDshiftRAreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (GTnoov (TSTshiftRAreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftRAreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRAreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMGTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] l:(XOR x y)) yes no)
+ // cond: l.Uses==1
+ // result: (GTnoov (TEQ x y) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXOR {
+ break
+ }
+ _ = l.Args[1]
+ l_0 := l.Args[0]
+ l_1 := l.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, l_0, l_1 = _i0+1, l_1, l_0 {
+ x := l_0
+ y := l_1
+ if !(l.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQ, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMGTnoov, v0)
+ return true
+ }
+ break
+ }
+ // match: (GT (CMPconst [0] l:(XORconst [c] x)) yes no)
+ // cond: l.Uses==1
+ // result: (GTnoov (TEQconst [c] x) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORconst {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg(x)
+ b.resetWithControl(BlockARMGTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] l:(XORshiftLL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (GTnoov (TEQshiftLL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftLL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftLL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMGTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] l:(XORshiftRL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (GTnoov (TEQshiftRL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftRL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMGTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] l:(XORshiftRA x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (GTnoov (TEQshiftRA x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftRA {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRA, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMGTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] l:(XORshiftLLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (GTnoov (TEQshiftLLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftLLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftLLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMGTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] l:(XORshiftRLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (GTnoov (TEQshiftRLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftRLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMGTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] l:(XORshiftRAreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (GTnoov (TEQshiftRAreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftRAreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRAreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMGTnoov, v0)
+ return true
+ }
+ case BlockARMGTnoov:
+ // match: (GTnoov (FlagConstant [fc]) yes no)
+ // cond: fc.gtNoov()
+ // result: (First yes no)
+ for b.Controls[0].Op == OpARMFlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(fc.gtNoov()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (GTnoov (FlagConstant [fc]) yes no)
+ // cond: !fc.gtNoov()
+ // result: (First no yes)
+ for b.Controls[0].Op == OpARMFlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(!fc.gtNoov()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (GTnoov (InvertFlags cmp) yes no)
+ // result: (LTnoov cmp yes no)
+ for b.Controls[0].Op == OpARMInvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockARMLTnoov, cmp)
+ return true
+ }
+ case BlockIf:
+ // match: (If (Equal cc) yes no)
+ // result: (EQ cc yes no)
+ for b.Controls[0].Op == OpARMEqual {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARMEQ, cc)
+ return true
+ }
+ // match: (If (NotEqual cc) yes no)
+ // result: (NE cc yes no)
+ for b.Controls[0].Op == OpARMNotEqual {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARMNE, cc)
+ return true
+ }
+ // match: (If (LessThan cc) yes no)
+ // result: (LT cc yes no)
+ for b.Controls[0].Op == OpARMLessThan {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARMLT, cc)
+ return true
+ }
+ // match: (If (LessThanU cc) yes no)
+ // result: (ULT cc yes no)
+ for b.Controls[0].Op == OpARMLessThanU {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARMULT, cc)
+ return true
+ }
+ // match: (If (LessEqual cc) yes no)
+ // result: (LE cc yes no)
+ for b.Controls[0].Op == OpARMLessEqual {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARMLE, cc)
+ return true
+ }
+ // match: (If (LessEqualU cc) yes no)
+ // result: (ULE cc yes no)
+ for b.Controls[0].Op == OpARMLessEqualU {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARMULE, cc)
+ return true
+ }
+ // match: (If (GreaterThan cc) yes no)
+ // result: (GT cc yes no)
+ for b.Controls[0].Op == OpARMGreaterThan {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARMGT, cc)
+ return true
+ }
+ // match: (If (GreaterThanU cc) yes no)
+ // result: (UGT cc yes no)
+ for b.Controls[0].Op == OpARMGreaterThanU {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARMUGT, cc)
+ return true
+ }
+ // match: (If (GreaterEqual cc) yes no)
+ // result: (GE cc yes no)
+ for b.Controls[0].Op == OpARMGreaterEqual {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARMGE, cc)
+ return true
+ }
+ // match: (If (GreaterEqualU cc) yes no)
+ // result: (UGE cc yes no)
+ for b.Controls[0].Op == OpARMGreaterEqualU {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARMUGE, cc)
+ return true
+ }
+ // match: (If cond yes no)
+ // result: (NE (CMPconst [0] cond) yes no)
+ for {
+ cond := b.Controls[0]
+ v0 := b.NewValue0(cond.Pos, OpARMCMPconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(0)
+ v0.AddArg(cond)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ case BlockARMLE:
+ // match: (LE (FlagConstant [fc]) yes no)
+ // cond: fc.le()
+ // result: (First yes no)
+ for b.Controls[0].Op == OpARMFlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(fc.le()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (LE (FlagConstant [fc]) yes no)
+ // cond: !fc.le()
+ // result: (First no yes)
+ for b.Controls[0].Op == OpARMFlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(!fc.le()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (LE (InvertFlags cmp) yes no)
+ // result: (GE cmp yes no)
+ for b.Controls[0].Op == OpARMInvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockARMGE, cmp)
+ return true
+ }
+ // match: (LE (CMPconst [0] l:(SUB x y)) yes no)
+ // cond: l.Uses==1
+ // result: (LEnoov (CMP x y) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUB {
+ break
+ }
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMLEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] l:(MULS x y a)) yes no)
+ // cond: l.Uses==1
+ // result: (LEnoov (CMP a (MUL <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMMULS {
+ break
+ }
+ a := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARMMUL, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARMLEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] l:(SUBconst [c] x)) yes no)
+ // cond: l.Uses==1
+ // result: (LEnoov (CMPconst [c] x) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBconst {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg(x)
+ b.resetWithControl(BlockARMLEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] l:(SUBshiftLL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (LEnoov (CMPshiftLL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftLL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftLL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMLEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] l:(SUBshiftRL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (LEnoov (CMPshiftRL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftRL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMLEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] l:(SUBshiftRA x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (LEnoov (CMPshiftRA x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftRA {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRA, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMLEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] l:(SUBshiftLLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (LEnoov (CMPshiftLLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftLLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftLLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMLEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] l:(SUBshiftRLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (LEnoov (CMPshiftRLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftRLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMLEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] l:(SUBshiftRAreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (LEnoov (CMPshiftRAreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftRAreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRAreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMLEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] l:(ADD x y)) yes no)
+ // cond: l.Uses==1
+ // result: (LEnoov (CMN x y) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADD {
+ break
+ }
+ _ = l.Args[1]
+ l_0 := l.Args[0]
+ l_1 := l.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, l_0, l_1 = _i0+1, l_1, l_0 {
+ x := l_0
+ y := l_1
+ if !(l.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMLEnoov, v0)
+ return true
+ }
+ break
+ }
+ // match: (LE (CMPconst [0] l:(MULA x y a)) yes no)
+ // cond: l.Uses==1
+ // result: (LEnoov (CMN a (MUL <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMMULA {
+ break
+ }
+ a := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARMMUL, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARMLEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] l:(ADDconst [c] x)) yes no)
+ // cond: l.Uses==1
+ // result: (LEnoov (CMNconst [c] x) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDconst {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg(x)
+ b.resetWithControl(BlockARMLEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] l:(ADDshiftLL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (LEnoov (CMNshiftLL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftLL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftLL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMLEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] l:(ADDshiftRL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (LEnoov (CMNshiftRL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftRL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMLEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] l:(ADDshiftRA x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (LEnoov (CMNshiftRA x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftRA {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRA, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMLEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] l:(ADDshiftLLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (LEnoov (CMNshiftLLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftLLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftLLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMLEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] l:(ADDshiftRLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (LEnoov (CMNshiftRLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftRLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMLEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] l:(ADDshiftRAreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (LEnoov (CMNshiftRAreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftRAreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRAreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMLEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] l:(AND x y)) yes no)
+ // cond: l.Uses==1
+ // result: (LEnoov (TST x y) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMAND {
+ break
+ }
+ _ = l.Args[1]
+ l_0 := l.Args[0]
+ l_1 := l.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, l_0, l_1 = _i0+1, l_1, l_0 {
+ x := l_0
+ y := l_1
+ if !(l.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTST, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMLEnoov, v0)
+ return true
+ }
+ break
+ }
+ // match: (LE (CMPconst [0] l:(ANDconst [c] x)) yes no)
+ // cond: l.Uses==1
+ // result: (LEnoov (TSTconst [c] x) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDconst {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg(x)
+ b.resetWithControl(BlockARMLEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] l:(ANDshiftLL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (LEnoov (TSTshiftLL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftLL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftLL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMLEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] l:(ANDshiftRL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (LEnoov (TSTshiftRL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftRL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMLEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] l:(ANDshiftRA x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (LEnoov (TSTshiftRA x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftRA {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRA, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMLEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] l:(ANDshiftLLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (LEnoov (TSTshiftLLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftLLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftLLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMLEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] l:(ANDshiftRLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (LEnoov (TSTshiftRLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftRLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMLEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] l:(ANDshiftRAreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (LEnoov (TSTshiftRAreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftRAreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRAreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMLEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] l:(XOR x y)) yes no)
+ // cond: l.Uses==1
+ // result: (LEnoov (TEQ x y) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXOR {
+ break
+ }
+ _ = l.Args[1]
+ l_0 := l.Args[0]
+ l_1 := l.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, l_0, l_1 = _i0+1, l_1, l_0 {
+ x := l_0
+ y := l_1
+ if !(l.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQ, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMLEnoov, v0)
+ return true
+ }
+ break
+ }
+ // match: (LE (CMPconst [0] l:(XORconst [c] x)) yes no)
+ // cond: l.Uses==1
+ // result: (LEnoov (TEQconst [c] x) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORconst {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg(x)
+ b.resetWithControl(BlockARMLEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] l:(XORshiftLL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (LEnoov (TEQshiftLL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftLL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftLL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMLEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] l:(XORshiftRL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (LEnoov (TEQshiftRL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftRL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMLEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] l:(XORshiftRA x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (LEnoov (TEQshiftRA x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftRA {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRA, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMLEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] l:(XORshiftLLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (LEnoov (TEQshiftLLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftLLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftLLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMLEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] l:(XORshiftRLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (LEnoov (TEQshiftRLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftRLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMLEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] l:(XORshiftRAreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (LEnoov (TEQshiftRAreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftRAreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRAreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMLEnoov, v0)
+ return true
+ }
+ case BlockARMLEnoov:
+ // match: (LEnoov (FlagConstant [fc]) yes no)
+ // cond: fc.leNoov()
+ // result: (First yes no)
+ for b.Controls[0].Op == OpARMFlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(fc.leNoov()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (LEnoov (FlagConstant [fc]) yes no)
+ // cond: !fc.leNoov()
+ // result: (First no yes)
+ for b.Controls[0].Op == OpARMFlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(!fc.leNoov()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (LEnoov (InvertFlags cmp) yes no)
+ // result: (GEnoov cmp yes no)
+ for b.Controls[0].Op == OpARMInvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockARMGEnoov, cmp)
+ return true
+ }
+ case BlockARMLT:
+ // match: (LT (FlagConstant [fc]) yes no)
+ // cond: fc.lt()
+ // result: (First yes no)
+ for b.Controls[0].Op == OpARMFlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(fc.lt()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (LT (FlagConstant [fc]) yes no)
+ // cond: !fc.lt()
+ // result: (First no yes)
+ for b.Controls[0].Op == OpARMFlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(!fc.lt()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (LT (InvertFlags cmp) yes no)
+ // result: (GT cmp yes no)
+ for b.Controls[0].Op == OpARMInvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockARMGT, cmp)
+ return true
+ }
+ // match: (LT (CMPconst [0] l:(SUB x y)) yes no)
+ // cond: l.Uses==1
+ // result: (LTnoov (CMP x y) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUB {
+ break
+ }
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMLTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] l:(MULS x y a)) yes no)
+ // cond: l.Uses==1
+ // result: (LTnoov (CMP a (MUL <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMMULS {
+ break
+ }
+ a := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARMMUL, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARMLTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] l:(SUBconst [c] x)) yes no)
+ // cond: l.Uses==1
+ // result: (LTnoov (CMPconst [c] x) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBconst {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg(x)
+ b.resetWithControl(BlockARMLTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] l:(SUBshiftLL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (LTnoov (CMPshiftLL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftLL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftLL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMLTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] l:(SUBshiftRL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (LTnoov (CMPshiftRL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftRL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMLTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] l:(SUBshiftRA x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (LTnoov (CMPshiftRA x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftRA {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRA, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMLTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] l:(SUBshiftLLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (LTnoov (CMPshiftLLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftLLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftLLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMLTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] l:(SUBshiftRLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (LTnoov (CMPshiftRLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftRLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMLTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] l:(SUBshiftRAreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (LTnoov (CMPshiftRAreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftRAreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRAreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMLTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] l:(ADD x y)) yes no)
+ // cond: l.Uses==1
+ // result: (LTnoov (CMN x y) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADD {
+ break
+ }
+ _ = l.Args[1]
+ l_0 := l.Args[0]
+ l_1 := l.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, l_0, l_1 = _i0+1, l_1, l_0 {
+ x := l_0
+ y := l_1
+ if !(l.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMLTnoov, v0)
+ return true
+ }
+ break
+ }
+ // match: (LT (CMPconst [0] l:(MULA x y a)) yes no)
+ // cond: l.Uses==1
+ // result: (LTnoov (CMN a (MUL <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMMULA {
+ break
+ }
+ a := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARMMUL, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARMLTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] l:(ADDconst [c] x)) yes no)
+ // cond: l.Uses==1
+ // result: (LTnoov (CMNconst [c] x) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDconst {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg(x)
+ b.resetWithControl(BlockARMLTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] l:(ADDshiftLL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (LTnoov (CMNshiftLL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftLL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftLL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMLTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] l:(ADDshiftRL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (LTnoov (CMNshiftRL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftRL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMLTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] l:(ADDshiftRA x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (LTnoov (CMNshiftRA x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftRA {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRA, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMLTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] l:(ADDshiftLLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (LTnoov (CMNshiftLLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftLLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftLLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMLTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] l:(ADDshiftRLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (LTnoov (CMNshiftRLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftRLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMLTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] l:(ADDshiftRAreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (LTnoov (CMNshiftRAreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftRAreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRAreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMLTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] l:(AND x y)) yes no)
+ // cond: l.Uses==1
+ // result: (LTnoov (TST x y) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMAND {
+ break
+ }
+ _ = l.Args[1]
+ l_0 := l.Args[0]
+ l_1 := l.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, l_0, l_1 = _i0+1, l_1, l_0 {
+ x := l_0
+ y := l_1
+ if !(l.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTST, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMLTnoov, v0)
+ return true
+ }
+ break
+ }
+ // match: (LT (CMPconst [0] l:(ANDconst [c] x)) yes no)
+ // cond: l.Uses==1
+ // result: (LTnoov (TSTconst [c] x) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDconst {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg(x)
+ b.resetWithControl(BlockARMLTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] l:(ANDshiftLL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (LTnoov (TSTshiftLL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftLL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftLL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMLTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] l:(ANDshiftRL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (LTnoov (TSTshiftRL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftRL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMLTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] l:(ANDshiftRA x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (LTnoov (TSTshiftRA x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftRA {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRA, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMLTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] l:(ANDshiftLLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (LTnoov (TSTshiftLLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftLLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftLLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMLTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] l:(ANDshiftRLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (LTnoov (TSTshiftRLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftRLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMLTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] l:(ANDshiftRAreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (LTnoov (TSTshiftRAreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftRAreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRAreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMLTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] l:(XOR x y)) yes no)
+ // cond: l.Uses==1
+ // result: (LTnoov (TEQ x y) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXOR {
+ break
+ }
+ _ = l.Args[1]
+ l_0 := l.Args[0]
+ l_1 := l.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, l_0, l_1 = _i0+1, l_1, l_0 {
+ x := l_0
+ y := l_1
+ if !(l.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQ, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMLTnoov, v0)
+ return true
+ }
+ break
+ }
+ // match: (LT (CMPconst [0] l:(XORconst [c] x)) yes no)
+ // cond: l.Uses==1
+ // result: (LTnoov (TEQconst [c] x) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORconst {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg(x)
+ b.resetWithControl(BlockARMLTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] l:(XORshiftLL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (LTnoov (TEQshiftLL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftLL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftLL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMLTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] l:(XORshiftRL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (LTnoov (TEQshiftRL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftRL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMLTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] l:(XORshiftRA x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (LTnoov (TEQshiftRA x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftRA {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRA, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMLTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] l:(XORshiftLLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (LTnoov (TEQshiftLLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftLLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftLLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMLTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] l:(XORshiftRLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (LTnoov (TEQshiftRLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftRLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMLTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] l:(XORshiftRAreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (LTnoov (TEQshiftRAreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftRAreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRAreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMLTnoov, v0)
+ return true
+ }
+ case BlockARMLTnoov:
+ // match: (LTnoov (FlagConstant [fc]) yes no)
+ // cond: fc.ltNoov()
+ // result: (First yes no)
+ for b.Controls[0].Op == OpARMFlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(fc.ltNoov()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (LTnoov (FlagConstant [fc]) yes no)
+ // cond: !fc.ltNoov()
+ // result: (First no yes)
+ for b.Controls[0].Op == OpARMFlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(!fc.ltNoov()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (LTnoov (InvertFlags cmp) yes no)
+ // result: (GTnoov cmp yes no)
+ for b.Controls[0].Op == OpARMInvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockARMGTnoov, cmp)
+ return true
+ }
+ case BlockARMNE:
+ // match: (NE (CMPconst [0] (Equal cc)) yes no)
+ // result: (EQ cc yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpARMEqual {
+ break
+ }
+ cc := v_0_0.Args[0]
+ b.resetWithControl(BlockARMEQ, cc)
+ return true
+ }
+ // match: (NE (CMPconst [0] (NotEqual cc)) yes no)
+ // result: (NE cc yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpARMNotEqual {
+ break
+ }
+ cc := v_0_0.Args[0]
+ b.resetWithControl(BlockARMNE, cc)
+ return true
+ }
+ // match: (NE (CMPconst [0] (LessThan cc)) yes no)
+ // result: (LT cc yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpARMLessThan {
+ break
+ }
+ cc := v_0_0.Args[0]
+ b.resetWithControl(BlockARMLT, cc)
+ return true
+ }
+ // match: (NE (CMPconst [0] (LessThanU cc)) yes no)
+ // result: (ULT cc yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpARMLessThanU {
+ break
+ }
+ cc := v_0_0.Args[0]
+ b.resetWithControl(BlockARMULT, cc)
+ return true
+ }
+ // match: (NE (CMPconst [0] (LessEqual cc)) yes no)
+ // result: (LE cc yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpARMLessEqual {
+ break
+ }
+ cc := v_0_0.Args[0]
+ b.resetWithControl(BlockARMLE, cc)
+ return true
+ }
+ // match: (NE (CMPconst [0] (LessEqualU cc)) yes no)
+ // result: (ULE cc yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpARMLessEqualU {
+ break
+ }
+ cc := v_0_0.Args[0]
+ b.resetWithControl(BlockARMULE, cc)
+ return true
+ }
+ // match: (NE (CMPconst [0] (GreaterThan cc)) yes no)
+ // result: (GT cc yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpARMGreaterThan {
+ break
+ }
+ cc := v_0_0.Args[0]
+ b.resetWithControl(BlockARMGT, cc)
+ return true
+ }
+ // match: (NE (CMPconst [0] (GreaterThanU cc)) yes no)
+ // result: (UGT cc yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpARMGreaterThanU {
+ break
+ }
+ cc := v_0_0.Args[0]
+ b.resetWithControl(BlockARMUGT, cc)
+ return true
+ }
+ // match: (NE (CMPconst [0] (GreaterEqual cc)) yes no)
+ // result: (GE cc yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpARMGreaterEqual {
+ break
+ }
+ cc := v_0_0.Args[0]
+ b.resetWithControl(BlockARMGE, cc)
+ return true
+ }
+ // match: (NE (CMPconst [0] (GreaterEqualU cc)) yes no)
+ // result: (UGE cc yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpARMGreaterEqualU {
+ break
+ }
+ cc := v_0_0.Args[0]
+ b.resetWithControl(BlockARMUGE, cc)
+ return true
+ }
+ // match: (NE (FlagConstant [fc]) yes no)
+ // cond: fc.ne()
+ // result: (First yes no)
+ for b.Controls[0].Op == OpARMFlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(fc.ne()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (NE (FlagConstant [fc]) yes no)
+ // cond: !fc.ne()
+ // result: (First no yes)
+ for b.Controls[0].Op == OpARMFlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(!fc.ne()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (NE (InvertFlags cmp) yes no)
+ // result: (NE cmp yes no)
+ for b.Controls[0].Op == OpARMInvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockARMNE, cmp)
+ return true
+ }
+ // match: (NE (CMP x (RSBconst [0] y)))
+ // result: (NE (CMN x y))
+ for b.Controls[0].Op == OpARMCMP {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpARMRSBconst || auxIntToInt32(v_0_1.AuxInt) != 0 {
+ break
+ }
+ y := v_0_1.Args[0]
+ v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ // match: (NE (CMN x (RSBconst [0] y)))
+ // result: (NE (CMP x y))
+ for b.Controls[0].Op == OpARMCMN {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpARMRSBconst || auxIntToInt32(v_0_1.AuxInt) != 0 {
+ continue
+ }
+ y := v_0_1.Args[0]
+ v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ break
+ }
+ // match: (NE (CMPconst [0] l:(SUB x y)) yes no)
+ // cond: l.Uses==1
+ // result: (NE (CMP x y) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUB {
+ break
+ }
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] l:(MULS x y a)) yes no)
+ // cond: l.Uses==1
+ // result: (NE (CMP a (MUL <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMMULS {
+ break
+ }
+ a := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMP, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARMMUL, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] l:(SUBconst [c] x)) yes no)
+ // cond: l.Uses==1
+ // result: (NE (CMPconst [c] x) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBconst {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg(x)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] l:(SUBshiftLL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (NE (CMPshiftLL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftLL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftLL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] l:(SUBshiftRL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (NE (CMPshiftRL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftRL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] l:(SUBshiftRA x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (NE (CMPshiftRA x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftRA {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRA, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] l:(SUBshiftLLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (NE (CMPshiftLLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftLLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftLLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] l:(SUBshiftRLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (NE (CMPshiftRLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftRLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] l:(SUBshiftRAreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (NE (CMPshiftRAreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMSUBshiftRAreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMPshiftRAreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] l:(ADD x y)) yes no)
+ // cond: l.Uses==1
+ // result: (NE (CMN x y) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADD {
+ break
+ }
+ _ = l.Args[1]
+ l_0 := l.Args[0]
+ l_1 := l.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, l_0, l_1 = _i0+1, l_1, l_0 {
+ x := l_0
+ y := l_1
+ if !(l.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ break
+ }
+ // match: (NE (CMPconst [0] l:(MULA x y a)) yes no)
+ // cond: l.Uses==1
+ // result: (NE (CMN a (MUL <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMMULA {
+ break
+ }
+ a := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMN, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARMMUL, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] l:(ADDconst [c] x)) yes no)
+ // cond: l.Uses==1
+ // result: (NE (CMNconst [c] x) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDconst {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg(x)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] l:(ADDshiftLL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (NE (CMNshiftLL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftLL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftLL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] l:(ADDshiftRL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (NE (CMNshiftRL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftRL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] l:(ADDshiftRA x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (NE (CMNshiftRA x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftRA {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRA, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] l:(ADDshiftLLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (NE (CMNshiftLLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftLLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftLLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] l:(ADDshiftRLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (NE (CMNshiftRLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftRLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] l:(ADDshiftRAreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (NE (CMNshiftRAreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMADDshiftRAreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMCMNshiftRAreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] l:(AND x y)) yes no)
+ // cond: l.Uses==1
+ // result: (NE (TST x y) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMAND {
+ break
+ }
+ _ = l.Args[1]
+ l_0 := l.Args[0]
+ l_1 := l.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, l_0, l_1 = _i0+1, l_1, l_0 {
+ x := l_0
+ y := l_1
+ if !(l.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTST, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ break
+ }
+ // match: (NE (CMPconst [0] l:(ANDconst [c] x)) yes no)
+ // cond: l.Uses==1
+ // result: (NE (TSTconst [c] x) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDconst {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg(x)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] l:(ANDshiftLL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (NE (TSTshiftLL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftLL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftLL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] l:(ANDshiftRL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (NE (TSTshiftRL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftRL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] l:(ANDshiftRA x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (NE (TSTshiftRA x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftRA {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRA, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] l:(ANDshiftLLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (NE (TSTshiftLLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftLLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftLLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] l:(ANDshiftRLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (NE (TSTshiftRLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftRLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] l:(ANDshiftRAreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (NE (TSTshiftRAreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMANDshiftRAreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTSTshiftRAreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] l:(XOR x y)) yes no)
+ // cond: l.Uses==1
+ // result: (NE (TEQ x y) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXOR {
+ break
+ }
+ _ = l.Args[1]
+ l_0 := l.Args[0]
+ l_1 := l.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, l_0, l_1 = _i0+1, l_1, l_0 {
+ x := l_0
+ y := l_1
+ if !(l.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQ, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ break
+ }
+ // match: (NE (CMPconst [0] l:(XORconst [c] x)) yes no)
+ // cond: l.Uses==1
+ // result: (NE (TEQconst [c] x) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORconst {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg(x)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] l:(XORshiftLL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (NE (TEQshiftLL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftLL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftLL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] l:(XORshiftRL x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (NE (TEQshiftRL x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftRL {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRL, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] l:(XORshiftRA x y [c])) yes no)
+ // cond: l.Uses==1
+ // result: (NE (TEQshiftRA x y [c]) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftRA {
+ break
+ }
+ c := auxIntToInt32(l.AuxInt)
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRA, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] l:(XORshiftLLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (NE (TEQshiftLLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftLLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftLLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] l:(XORshiftRLreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (NE (TEQshiftRLreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftRLreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRLreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] l:(XORshiftRAreg x y z)) yes no)
+ // cond: l.Uses==1
+ // result: (NE (TEQshiftRAreg x y z) yes no)
+ for b.Controls[0].Op == OpARMCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ l := v_0.Args[0]
+ if l.Op != OpARMXORshiftRAreg {
+ break
+ }
+ z := l.Args[2]
+ x := l.Args[0]
+ y := l.Args[1]
+ if !(l.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARMTEQshiftRAreg, types.TypeFlags)
+ v0.AddArg3(x, y, z)
+ b.resetWithControl(BlockARMNE, v0)
+ return true
+ }
+ case BlockARMUGE:
+ // match: (UGE (FlagConstant [fc]) yes no)
+ // cond: fc.uge()
+ // result: (First yes no)
+ for b.Controls[0].Op == OpARMFlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(fc.uge()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (UGE (FlagConstant [fc]) yes no)
+ // cond: !fc.uge()
+ // result: (First no yes)
+ for b.Controls[0].Op == OpARMFlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(!fc.uge()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (UGE (InvertFlags cmp) yes no)
+ // result: (ULE cmp yes no)
+ for b.Controls[0].Op == OpARMInvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockARMULE, cmp)
+ return true
+ }
+ case BlockARMUGT:
+ // match: (UGT (FlagConstant [fc]) yes no)
+ // cond: fc.ugt()
+ // result: (First yes no)
+ for b.Controls[0].Op == OpARMFlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(fc.ugt()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (UGT (FlagConstant [fc]) yes no)
+ // cond: !fc.ugt()
+ // result: (First no yes)
+ for b.Controls[0].Op == OpARMFlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(!fc.ugt()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (UGT (InvertFlags cmp) yes no)
+ // result: (ULT cmp yes no)
+ for b.Controls[0].Op == OpARMInvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockARMULT, cmp)
+ return true
+ }
+ case BlockARMULE:
+ // match: (ULE (FlagConstant [fc]) yes no)
+ // cond: fc.ule()
+ // result: (First yes no)
+ for b.Controls[0].Op == OpARMFlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(fc.ule()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (ULE (FlagConstant [fc]) yes no)
+ // cond: !fc.ule()
+ // result: (First no yes)
+ for b.Controls[0].Op == OpARMFlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(!fc.ule()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (ULE (InvertFlags cmp) yes no)
+ // result: (UGE cmp yes no)
+ for b.Controls[0].Op == OpARMInvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockARMUGE, cmp)
+ return true
+ }
+ case BlockARMULT:
+ // match: (ULT (FlagConstant [fc]) yes no)
+ // cond: fc.ult()
+ // result: (First yes no)
+ for b.Controls[0].Op == OpARMFlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(fc.ult()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (ULT (FlagConstant [fc]) yes no)
+ // cond: !fc.ult()
+ // result: (First no yes)
+ for b.Controls[0].Op == OpARMFlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(!fc.ult()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (ULT (InvertFlags cmp) yes no)
+ // result: (UGT cmp yes no)
+ for b.Controls[0].Op == OpARMInvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockARMUGT, cmp)
+ return true
+ }
+ }
+ return false
+}
diff --git a/src/cmd/compile/internal/ssa/rewriteARM64.go b/src/cmd/compile/internal/ssa/rewriteARM64.go
new file mode 100644
index 0000000..ad1052f
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/rewriteARM64.go
@@ -0,0 +1,30569 @@
+// Code generated from gen/ARM64.rules; DO NOT EDIT.
+// generated with: cd gen; go run *.go
+
+package ssa
+
+import "cmd/compile/internal/types"
+
+func rewriteValueARM64(v *Value) bool {
+ switch v.Op {
+ case OpARM64ADCSflags:
+ return rewriteValueARM64_OpARM64ADCSflags(v)
+ case OpARM64ADD:
+ return rewriteValueARM64_OpARM64ADD(v)
+ case OpARM64ADDconst:
+ return rewriteValueARM64_OpARM64ADDconst(v)
+ case OpARM64ADDshiftLL:
+ return rewriteValueARM64_OpARM64ADDshiftLL(v)
+ case OpARM64ADDshiftRA:
+ return rewriteValueARM64_OpARM64ADDshiftRA(v)
+ case OpARM64ADDshiftRL:
+ return rewriteValueARM64_OpARM64ADDshiftRL(v)
+ case OpARM64AND:
+ return rewriteValueARM64_OpARM64AND(v)
+ case OpARM64ANDconst:
+ return rewriteValueARM64_OpARM64ANDconst(v)
+ case OpARM64ANDshiftLL:
+ return rewriteValueARM64_OpARM64ANDshiftLL(v)
+ case OpARM64ANDshiftRA:
+ return rewriteValueARM64_OpARM64ANDshiftRA(v)
+ case OpARM64ANDshiftRL:
+ return rewriteValueARM64_OpARM64ANDshiftRL(v)
+ case OpARM64ANDshiftRO:
+ return rewriteValueARM64_OpARM64ANDshiftRO(v)
+ case OpARM64BIC:
+ return rewriteValueARM64_OpARM64BIC(v)
+ case OpARM64BICshiftLL:
+ return rewriteValueARM64_OpARM64BICshiftLL(v)
+ case OpARM64BICshiftRA:
+ return rewriteValueARM64_OpARM64BICshiftRA(v)
+ case OpARM64BICshiftRL:
+ return rewriteValueARM64_OpARM64BICshiftRL(v)
+ case OpARM64BICshiftRO:
+ return rewriteValueARM64_OpARM64BICshiftRO(v)
+ case OpARM64CMN:
+ return rewriteValueARM64_OpARM64CMN(v)
+ case OpARM64CMNW:
+ return rewriteValueARM64_OpARM64CMNW(v)
+ case OpARM64CMNWconst:
+ return rewriteValueARM64_OpARM64CMNWconst(v)
+ case OpARM64CMNconst:
+ return rewriteValueARM64_OpARM64CMNconst(v)
+ case OpARM64CMNshiftLL:
+ return rewriteValueARM64_OpARM64CMNshiftLL(v)
+ case OpARM64CMNshiftRA:
+ return rewriteValueARM64_OpARM64CMNshiftRA(v)
+ case OpARM64CMNshiftRL:
+ return rewriteValueARM64_OpARM64CMNshiftRL(v)
+ case OpARM64CMP:
+ return rewriteValueARM64_OpARM64CMP(v)
+ case OpARM64CMPW:
+ return rewriteValueARM64_OpARM64CMPW(v)
+ case OpARM64CMPWconst:
+ return rewriteValueARM64_OpARM64CMPWconst(v)
+ case OpARM64CMPconst:
+ return rewriteValueARM64_OpARM64CMPconst(v)
+ case OpARM64CMPshiftLL:
+ return rewriteValueARM64_OpARM64CMPshiftLL(v)
+ case OpARM64CMPshiftRA:
+ return rewriteValueARM64_OpARM64CMPshiftRA(v)
+ case OpARM64CMPshiftRL:
+ return rewriteValueARM64_OpARM64CMPshiftRL(v)
+ case OpARM64CSEL:
+ return rewriteValueARM64_OpARM64CSEL(v)
+ case OpARM64CSEL0:
+ return rewriteValueARM64_OpARM64CSEL0(v)
+ case OpARM64CSETM:
+ return rewriteValueARM64_OpARM64CSETM(v)
+ case OpARM64CSINC:
+ return rewriteValueARM64_OpARM64CSINC(v)
+ case OpARM64CSINV:
+ return rewriteValueARM64_OpARM64CSINV(v)
+ case OpARM64CSNEG:
+ return rewriteValueARM64_OpARM64CSNEG(v)
+ case OpARM64DIV:
+ return rewriteValueARM64_OpARM64DIV(v)
+ case OpARM64DIVW:
+ return rewriteValueARM64_OpARM64DIVW(v)
+ case OpARM64EON:
+ return rewriteValueARM64_OpARM64EON(v)
+ case OpARM64EONshiftLL:
+ return rewriteValueARM64_OpARM64EONshiftLL(v)
+ case OpARM64EONshiftRA:
+ return rewriteValueARM64_OpARM64EONshiftRA(v)
+ case OpARM64EONshiftRL:
+ return rewriteValueARM64_OpARM64EONshiftRL(v)
+ case OpARM64EONshiftRO:
+ return rewriteValueARM64_OpARM64EONshiftRO(v)
+ case OpARM64Equal:
+ return rewriteValueARM64_OpARM64Equal(v)
+ case OpARM64FADDD:
+ return rewriteValueARM64_OpARM64FADDD(v)
+ case OpARM64FADDS:
+ return rewriteValueARM64_OpARM64FADDS(v)
+ case OpARM64FCMPD:
+ return rewriteValueARM64_OpARM64FCMPD(v)
+ case OpARM64FCMPS:
+ return rewriteValueARM64_OpARM64FCMPS(v)
+ case OpARM64FMOVDfpgp:
+ return rewriteValueARM64_OpARM64FMOVDfpgp(v)
+ case OpARM64FMOVDgpfp:
+ return rewriteValueARM64_OpARM64FMOVDgpfp(v)
+ case OpARM64FMOVDload:
+ return rewriteValueARM64_OpARM64FMOVDload(v)
+ case OpARM64FMOVDloadidx:
+ return rewriteValueARM64_OpARM64FMOVDloadidx(v)
+ case OpARM64FMOVDloadidx8:
+ return rewriteValueARM64_OpARM64FMOVDloadidx8(v)
+ case OpARM64FMOVDstore:
+ return rewriteValueARM64_OpARM64FMOVDstore(v)
+ case OpARM64FMOVDstoreidx:
+ return rewriteValueARM64_OpARM64FMOVDstoreidx(v)
+ case OpARM64FMOVDstoreidx8:
+ return rewriteValueARM64_OpARM64FMOVDstoreidx8(v)
+ case OpARM64FMOVSload:
+ return rewriteValueARM64_OpARM64FMOVSload(v)
+ case OpARM64FMOVSloadidx:
+ return rewriteValueARM64_OpARM64FMOVSloadidx(v)
+ case OpARM64FMOVSloadidx4:
+ return rewriteValueARM64_OpARM64FMOVSloadidx4(v)
+ case OpARM64FMOVSstore:
+ return rewriteValueARM64_OpARM64FMOVSstore(v)
+ case OpARM64FMOVSstoreidx:
+ return rewriteValueARM64_OpARM64FMOVSstoreidx(v)
+ case OpARM64FMOVSstoreidx4:
+ return rewriteValueARM64_OpARM64FMOVSstoreidx4(v)
+ case OpARM64FMULD:
+ return rewriteValueARM64_OpARM64FMULD(v)
+ case OpARM64FMULS:
+ return rewriteValueARM64_OpARM64FMULS(v)
+ case OpARM64FNEGD:
+ return rewriteValueARM64_OpARM64FNEGD(v)
+ case OpARM64FNEGS:
+ return rewriteValueARM64_OpARM64FNEGS(v)
+ case OpARM64FNMULD:
+ return rewriteValueARM64_OpARM64FNMULD(v)
+ case OpARM64FNMULS:
+ return rewriteValueARM64_OpARM64FNMULS(v)
+ case OpARM64FSUBD:
+ return rewriteValueARM64_OpARM64FSUBD(v)
+ case OpARM64FSUBS:
+ return rewriteValueARM64_OpARM64FSUBS(v)
+ case OpARM64GreaterEqual:
+ return rewriteValueARM64_OpARM64GreaterEqual(v)
+ case OpARM64GreaterEqualF:
+ return rewriteValueARM64_OpARM64GreaterEqualF(v)
+ case OpARM64GreaterEqualU:
+ return rewriteValueARM64_OpARM64GreaterEqualU(v)
+ case OpARM64GreaterThan:
+ return rewriteValueARM64_OpARM64GreaterThan(v)
+ case OpARM64GreaterThanF:
+ return rewriteValueARM64_OpARM64GreaterThanF(v)
+ case OpARM64GreaterThanU:
+ return rewriteValueARM64_OpARM64GreaterThanU(v)
+ case OpARM64LessEqual:
+ return rewriteValueARM64_OpARM64LessEqual(v)
+ case OpARM64LessEqualF:
+ return rewriteValueARM64_OpARM64LessEqualF(v)
+ case OpARM64LessEqualU:
+ return rewriteValueARM64_OpARM64LessEqualU(v)
+ case OpARM64LessThan:
+ return rewriteValueARM64_OpARM64LessThan(v)
+ case OpARM64LessThanF:
+ return rewriteValueARM64_OpARM64LessThanF(v)
+ case OpARM64LessThanU:
+ return rewriteValueARM64_OpARM64LessThanU(v)
+ case OpARM64MADD:
+ return rewriteValueARM64_OpARM64MADD(v)
+ case OpARM64MADDW:
+ return rewriteValueARM64_OpARM64MADDW(v)
+ case OpARM64MNEG:
+ return rewriteValueARM64_OpARM64MNEG(v)
+ case OpARM64MNEGW:
+ return rewriteValueARM64_OpARM64MNEGW(v)
+ case OpARM64MOD:
+ return rewriteValueARM64_OpARM64MOD(v)
+ case OpARM64MODW:
+ return rewriteValueARM64_OpARM64MODW(v)
+ case OpARM64MOVBUload:
+ return rewriteValueARM64_OpARM64MOVBUload(v)
+ case OpARM64MOVBUloadidx:
+ return rewriteValueARM64_OpARM64MOVBUloadidx(v)
+ case OpARM64MOVBUreg:
+ return rewriteValueARM64_OpARM64MOVBUreg(v)
+ case OpARM64MOVBload:
+ return rewriteValueARM64_OpARM64MOVBload(v)
+ case OpARM64MOVBloadidx:
+ return rewriteValueARM64_OpARM64MOVBloadidx(v)
+ case OpARM64MOVBreg:
+ return rewriteValueARM64_OpARM64MOVBreg(v)
+ case OpARM64MOVBstore:
+ return rewriteValueARM64_OpARM64MOVBstore(v)
+ case OpARM64MOVBstoreidx:
+ return rewriteValueARM64_OpARM64MOVBstoreidx(v)
+ case OpARM64MOVBstorezero:
+ return rewriteValueARM64_OpARM64MOVBstorezero(v)
+ case OpARM64MOVBstorezeroidx:
+ return rewriteValueARM64_OpARM64MOVBstorezeroidx(v)
+ case OpARM64MOVDload:
+ return rewriteValueARM64_OpARM64MOVDload(v)
+ case OpARM64MOVDloadidx:
+ return rewriteValueARM64_OpARM64MOVDloadidx(v)
+ case OpARM64MOVDloadidx8:
+ return rewriteValueARM64_OpARM64MOVDloadidx8(v)
+ case OpARM64MOVDnop:
+ return rewriteValueARM64_OpARM64MOVDnop(v)
+ case OpARM64MOVDreg:
+ return rewriteValueARM64_OpARM64MOVDreg(v)
+ case OpARM64MOVDstore:
+ return rewriteValueARM64_OpARM64MOVDstore(v)
+ case OpARM64MOVDstoreidx:
+ return rewriteValueARM64_OpARM64MOVDstoreidx(v)
+ case OpARM64MOVDstoreidx8:
+ return rewriteValueARM64_OpARM64MOVDstoreidx8(v)
+ case OpARM64MOVDstorezero:
+ return rewriteValueARM64_OpARM64MOVDstorezero(v)
+ case OpARM64MOVDstorezeroidx:
+ return rewriteValueARM64_OpARM64MOVDstorezeroidx(v)
+ case OpARM64MOVDstorezeroidx8:
+ return rewriteValueARM64_OpARM64MOVDstorezeroidx8(v)
+ case OpARM64MOVHUload:
+ return rewriteValueARM64_OpARM64MOVHUload(v)
+ case OpARM64MOVHUloadidx:
+ return rewriteValueARM64_OpARM64MOVHUloadidx(v)
+ case OpARM64MOVHUloadidx2:
+ return rewriteValueARM64_OpARM64MOVHUloadidx2(v)
+ case OpARM64MOVHUreg:
+ return rewriteValueARM64_OpARM64MOVHUreg(v)
+ case OpARM64MOVHload:
+ return rewriteValueARM64_OpARM64MOVHload(v)
+ case OpARM64MOVHloadidx:
+ return rewriteValueARM64_OpARM64MOVHloadidx(v)
+ case OpARM64MOVHloadidx2:
+ return rewriteValueARM64_OpARM64MOVHloadidx2(v)
+ case OpARM64MOVHreg:
+ return rewriteValueARM64_OpARM64MOVHreg(v)
+ case OpARM64MOVHstore:
+ return rewriteValueARM64_OpARM64MOVHstore(v)
+ case OpARM64MOVHstoreidx:
+ return rewriteValueARM64_OpARM64MOVHstoreidx(v)
+ case OpARM64MOVHstoreidx2:
+ return rewriteValueARM64_OpARM64MOVHstoreidx2(v)
+ case OpARM64MOVHstorezero:
+ return rewriteValueARM64_OpARM64MOVHstorezero(v)
+ case OpARM64MOVHstorezeroidx:
+ return rewriteValueARM64_OpARM64MOVHstorezeroidx(v)
+ case OpARM64MOVHstorezeroidx2:
+ return rewriteValueARM64_OpARM64MOVHstorezeroidx2(v)
+ case OpARM64MOVQstorezero:
+ return rewriteValueARM64_OpARM64MOVQstorezero(v)
+ case OpARM64MOVWUload:
+ return rewriteValueARM64_OpARM64MOVWUload(v)
+ case OpARM64MOVWUloadidx:
+ return rewriteValueARM64_OpARM64MOVWUloadidx(v)
+ case OpARM64MOVWUloadidx4:
+ return rewriteValueARM64_OpARM64MOVWUloadidx4(v)
+ case OpARM64MOVWUreg:
+ return rewriteValueARM64_OpARM64MOVWUreg(v)
+ case OpARM64MOVWload:
+ return rewriteValueARM64_OpARM64MOVWload(v)
+ case OpARM64MOVWloadidx:
+ return rewriteValueARM64_OpARM64MOVWloadidx(v)
+ case OpARM64MOVWloadidx4:
+ return rewriteValueARM64_OpARM64MOVWloadidx4(v)
+ case OpARM64MOVWreg:
+ return rewriteValueARM64_OpARM64MOVWreg(v)
+ case OpARM64MOVWstore:
+ return rewriteValueARM64_OpARM64MOVWstore(v)
+ case OpARM64MOVWstoreidx:
+ return rewriteValueARM64_OpARM64MOVWstoreidx(v)
+ case OpARM64MOVWstoreidx4:
+ return rewriteValueARM64_OpARM64MOVWstoreidx4(v)
+ case OpARM64MOVWstorezero:
+ return rewriteValueARM64_OpARM64MOVWstorezero(v)
+ case OpARM64MOVWstorezeroidx:
+ return rewriteValueARM64_OpARM64MOVWstorezeroidx(v)
+ case OpARM64MOVWstorezeroidx4:
+ return rewriteValueARM64_OpARM64MOVWstorezeroidx4(v)
+ case OpARM64MSUB:
+ return rewriteValueARM64_OpARM64MSUB(v)
+ case OpARM64MSUBW:
+ return rewriteValueARM64_OpARM64MSUBW(v)
+ case OpARM64MUL:
+ return rewriteValueARM64_OpARM64MUL(v)
+ case OpARM64MULW:
+ return rewriteValueARM64_OpARM64MULW(v)
+ case OpARM64MVN:
+ return rewriteValueARM64_OpARM64MVN(v)
+ case OpARM64MVNshiftLL:
+ return rewriteValueARM64_OpARM64MVNshiftLL(v)
+ case OpARM64MVNshiftRA:
+ return rewriteValueARM64_OpARM64MVNshiftRA(v)
+ case OpARM64MVNshiftRL:
+ return rewriteValueARM64_OpARM64MVNshiftRL(v)
+ case OpARM64MVNshiftRO:
+ return rewriteValueARM64_OpARM64MVNshiftRO(v)
+ case OpARM64NEG:
+ return rewriteValueARM64_OpARM64NEG(v)
+ case OpARM64NEGshiftLL:
+ return rewriteValueARM64_OpARM64NEGshiftLL(v)
+ case OpARM64NEGshiftRA:
+ return rewriteValueARM64_OpARM64NEGshiftRA(v)
+ case OpARM64NEGshiftRL:
+ return rewriteValueARM64_OpARM64NEGshiftRL(v)
+ case OpARM64NotEqual:
+ return rewriteValueARM64_OpARM64NotEqual(v)
+ case OpARM64OR:
+ return rewriteValueARM64_OpARM64OR(v)
+ case OpARM64ORN:
+ return rewriteValueARM64_OpARM64ORN(v)
+ case OpARM64ORNshiftLL:
+ return rewriteValueARM64_OpARM64ORNshiftLL(v)
+ case OpARM64ORNshiftRA:
+ return rewriteValueARM64_OpARM64ORNshiftRA(v)
+ case OpARM64ORNshiftRL:
+ return rewriteValueARM64_OpARM64ORNshiftRL(v)
+ case OpARM64ORNshiftRO:
+ return rewriteValueARM64_OpARM64ORNshiftRO(v)
+ case OpARM64ORconst:
+ return rewriteValueARM64_OpARM64ORconst(v)
+ case OpARM64ORshiftLL:
+ return rewriteValueARM64_OpARM64ORshiftLL(v)
+ case OpARM64ORshiftRA:
+ return rewriteValueARM64_OpARM64ORshiftRA(v)
+ case OpARM64ORshiftRL:
+ return rewriteValueARM64_OpARM64ORshiftRL(v)
+ case OpARM64ORshiftRO:
+ return rewriteValueARM64_OpARM64ORshiftRO(v)
+ case OpARM64REV:
+ return rewriteValueARM64_OpARM64REV(v)
+ case OpARM64REVW:
+ return rewriteValueARM64_OpARM64REVW(v)
+ case OpARM64ROR:
+ return rewriteValueARM64_OpARM64ROR(v)
+ case OpARM64RORW:
+ return rewriteValueARM64_OpARM64RORW(v)
+ case OpARM64RORWconst:
+ return rewriteValueARM64_OpARM64RORWconst(v)
+ case OpARM64RORconst:
+ return rewriteValueARM64_OpARM64RORconst(v)
+ case OpARM64SBCSflags:
+ return rewriteValueARM64_OpARM64SBCSflags(v)
+ case OpARM64SLL:
+ return rewriteValueARM64_OpARM64SLL(v)
+ case OpARM64SLLconst:
+ return rewriteValueARM64_OpARM64SLLconst(v)
+ case OpARM64SRA:
+ return rewriteValueARM64_OpARM64SRA(v)
+ case OpARM64SRAconst:
+ return rewriteValueARM64_OpARM64SRAconst(v)
+ case OpARM64SRL:
+ return rewriteValueARM64_OpARM64SRL(v)
+ case OpARM64SRLconst:
+ return rewriteValueARM64_OpARM64SRLconst(v)
+ case OpARM64STP:
+ return rewriteValueARM64_OpARM64STP(v)
+ case OpARM64SUB:
+ return rewriteValueARM64_OpARM64SUB(v)
+ case OpARM64SUBconst:
+ return rewriteValueARM64_OpARM64SUBconst(v)
+ case OpARM64SUBshiftLL:
+ return rewriteValueARM64_OpARM64SUBshiftLL(v)
+ case OpARM64SUBshiftRA:
+ return rewriteValueARM64_OpARM64SUBshiftRA(v)
+ case OpARM64SUBshiftRL:
+ return rewriteValueARM64_OpARM64SUBshiftRL(v)
+ case OpARM64TST:
+ return rewriteValueARM64_OpARM64TST(v)
+ case OpARM64TSTW:
+ return rewriteValueARM64_OpARM64TSTW(v)
+ case OpARM64TSTWconst:
+ return rewriteValueARM64_OpARM64TSTWconst(v)
+ case OpARM64TSTconst:
+ return rewriteValueARM64_OpARM64TSTconst(v)
+ case OpARM64TSTshiftLL:
+ return rewriteValueARM64_OpARM64TSTshiftLL(v)
+ case OpARM64TSTshiftRA:
+ return rewriteValueARM64_OpARM64TSTshiftRA(v)
+ case OpARM64TSTshiftRL:
+ return rewriteValueARM64_OpARM64TSTshiftRL(v)
+ case OpARM64TSTshiftRO:
+ return rewriteValueARM64_OpARM64TSTshiftRO(v)
+ case OpARM64UBFIZ:
+ return rewriteValueARM64_OpARM64UBFIZ(v)
+ case OpARM64UBFX:
+ return rewriteValueARM64_OpARM64UBFX(v)
+ case OpARM64UDIV:
+ return rewriteValueARM64_OpARM64UDIV(v)
+ case OpARM64UDIVW:
+ return rewriteValueARM64_OpARM64UDIVW(v)
+ case OpARM64UMOD:
+ return rewriteValueARM64_OpARM64UMOD(v)
+ case OpARM64UMODW:
+ return rewriteValueARM64_OpARM64UMODW(v)
+ case OpARM64XOR:
+ return rewriteValueARM64_OpARM64XOR(v)
+ case OpARM64XORconst:
+ return rewriteValueARM64_OpARM64XORconst(v)
+ case OpARM64XORshiftLL:
+ return rewriteValueARM64_OpARM64XORshiftLL(v)
+ case OpARM64XORshiftRA:
+ return rewriteValueARM64_OpARM64XORshiftRA(v)
+ case OpARM64XORshiftRL:
+ return rewriteValueARM64_OpARM64XORshiftRL(v)
+ case OpARM64XORshiftRO:
+ return rewriteValueARM64_OpARM64XORshiftRO(v)
+ case OpAbs:
+ v.Op = OpARM64FABSD
+ return true
+ case OpAdd16:
+ v.Op = OpARM64ADD
+ return true
+ case OpAdd32:
+ v.Op = OpARM64ADD
+ return true
+ case OpAdd32F:
+ v.Op = OpARM64FADDS
+ return true
+ case OpAdd64:
+ v.Op = OpARM64ADD
+ return true
+ case OpAdd64F:
+ v.Op = OpARM64FADDD
+ return true
+ case OpAdd8:
+ v.Op = OpARM64ADD
+ return true
+ case OpAddPtr:
+ v.Op = OpARM64ADD
+ return true
+ case OpAddr:
+ return rewriteValueARM64_OpAddr(v)
+ case OpAnd16:
+ v.Op = OpARM64AND
+ return true
+ case OpAnd32:
+ v.Op = OpARM64AND
+ return true
+ case OpAnd64:
+ v.Op = OpARM64AND
+ return true
+ case OpAnd8:
+ v.Op = OpARM64AND
+ return true
+ case OpAndB:
+ v.Op = OpARM64AND
+ return true
+ case OpAtomicAdd32:
+ v.Op = OpARM64LoweredAtomicAdd32
+ return true
+ case OpAtomicAdd32Variant:
+ v.Op = OpARM64LoweredAtomicAdd32Variant
+ return true
+ case OpAtomicAdd64:
+ v.Op = OpARM64LoweredAtomicAdd64
+ return true
+ case OpAtomicAdd64Variant:
+ v.Op = OpARM64LoweredAtomicAdd64Variant
+ return true
+ case OpAtomicAnd32:
+ return rewriteValueARM64_OpAtomicAnd32(v)
+ case OpAtomicAnd32Variant:
+ return rewriteValueARM64_OpAtomicAnd32Variant(v)
+ case OpAtomicAnd8:
+ return rewriteValueARM64_OpAtomicAnd8(v)
+ case OpAtomicAnd8Variant:
+ return rewriteValueARM64_OpAtomicAnd8Variant(v)
+ case OpAtomicCompareAndSwap32:
+ v.Op = OpARM64LoweredAtomicCas32
+ return true
+ case OpAtomicCompareAndSwap32Variant:
+ v.Op = OpARM64LoweredAtomicCas32Variant
+ return true
+ case OpAtomicCompareAndSwap64:
+ v.Op = OpARM64LoweredAtomicCas64
+ return true
+ case OpAtomicCompareAndSwap64Variant:
+ v.Op = OpARM64LoweredAtomicCas64Variant
+ return true
+ case OpAtomicExchange32:
+ v.Op = OpARM64LoweredAtomicExchange32
+ return true
+ case OpAtomicExchange32Variant:
+ v.Op = OpARM64LoweredAtomicExchange32Variant
+ return true
+ case OpAtomicExchange64:
+ v.Op = OpARM64LoweredAtomicExchange64
+ return true
+ case OpAtomicExchange64Variant:
+ v.Op = OpARM64LoweredAtomicExchange64Variant
+ return true
+ case OpAtomicLoad32:
+ v.Op = OpARM64LDARW
+ return true
+ case OpAtomicLoad64:
+ v.Op = OpARM64LDAR
+ return true
+ case OpAtomicLoad8:
+ v.Op = OpARM64LDARB
+ return true
+ case OpAtomicLoadPtr:
+ v.Op = OpARM64LDAR
+ return true
+ case OpAtomicOr32:
+ return rewriteValueARM64_OpAtomicOr32(v)
+ case OpAtomicOr32Variant:
+ return rewriteValueARM64_OpAtomicOr32Variant(v)
+ case OpAtomicOr8:
+ return rewriteValueARM64_OpAtomicOr8(v)
+ case OpAtomicOr8Variant:
+ return rewriteValueARM64_OpAtomicOr8Variant(v)
+ case OpAtomicStore32:
+ v.Op = OpARM64STLRW
+ return true
+ case OpAtomicStore64:
+ v.Op = OpARM64STLR
+ return true
+ case OpAtomicStore8:
+ v.Op = OpARM64STLRB
+ return true
+ case OpAtomicStorePtrNoWB:
+ v.Op = OpARM64STLR
+ return true
+ case OpAvg64u:
+ return rewriteValueARM64_OpAvg64u(v)
+ case OpBitLen32:
+ return rewriteValueARM64_OpBitLen32(v)
+ case OpBitLen64:
+ return rewriteValueARM64_OpBitLen64(v)
+ case OpBitRev16:
+ return rewriteValueARM64_OpBitRev16(v)
+ case OpBitRev32:
+ v.Op = OpARM64RBITW
+ return true
+ case OpBitRev64:
+ v.Op = OpARM64RBIT
+ return true
+ case OpBitRev8:
+ return rewriteValueARM64_OpBitRev8(v)
+ case OpBswap32:
+ v.Op = OpARM64REVW
+ return true
+ case OpBswap64:
+ v.Op = OpARM64REV
+ return true
+ case OpCeil:
+ v.Op = OpARM64FRINTPD
+ return true
+ case OpClosureCall:
+ v.Op = OpARM64CALLclosure
+ return true
+ case OpCom16:
+ v.Op = OpARM64MVN
+ return true
+ case OpCom32:
+ v.Op = OpARM64MVN
+ return true
+ case OpCom64:
+ v.Op = OpARM64MVN
+ return true
+ case OpCom8:
+ v.Op = OpARM64MVN
+ return true
+ case OpCondSelect:
+ return rewriteValueARM64_OpCondSelect(v)
+ case OpConst16:
+ return rewriteValueARM64_OpConst16(v)
+ case OpConst32:
+ return rewriteValueARM64_OpConst32(v)
+ case OpConst32F:
+ return rewriteValueARM64_OpConst32F(v)
+ case OpConst64:
+ return rewriteValueARM64_OpConst64(v)
+ case OpConst64F:
+ return rewriteValueARM64_OpConst64F(v)
+ case OpConst8:
+ return rewriteValueARM64_OpConst8(v)
+ case OpConstBool:
+ return rewriteValueARM64_OpConstBool(v)
+ case OpConstNil:
+ return rewriteValueARM64_OpConstNil(v)
+ case OpCtz16:
+ return rewriteValueARM64_OpCtz16(v)
+ case OpCtz16NonZero:
+ v.Op = OpCtz32
+ return true
+ case OpCtz32:
+ return rewriteValueARM64_OpCtz32(v)
+ case OpCtz32NonZero:
+ v.Op = OpCtz32
+ return true
+ case OpCtz64:
+ return rewriteValueARM64_OpCtz64(v)
+ case OpCtz64NonZero:
+ v.Op = OpCtz64
+ return true
+ case OpCtz8:
+ return rewriteValueARM64_OpCtz8(v)
+ case OpCtz8NonZero:
+ v.Op = OpCtz32
+ return true
+ case OpCvt32Fto32:
+ v.Op = OpARM64FCVTZSSW
+ return true
+ case OpCvt32Fto32U:
+ v.Op = OpARM64FCVTZUSW
+ return true
+ case OpCvt32Fto64:
+ v.Op = OpARM64FCVTZSS
+ return true
+ case OpCvt32Fto64F:
+ v.Op = OpARM64FCVTSD
+ return true
+ case OpCvt32Fto64U:
+ v.Op = OpARM64FCVTZUS
+ return true
+ case OpCvt32Uto32F:
+ v.Op = OpARM64UCVTFWS
+ return true
+ case OpCvt32Uto64F:
+ v.Op = OpARM64UCVTFWD
+ return true
+ case OpCvt32to32F:
+ v.Op = OpARM64SCVTFWS
+ return true
+ case OpCvt32to64F:
+ v.Op = OpARM64SCVTFWD
+ return true
+ case OpCvt64Fto32:
+ v.Op = OpARM64FCVTZSDW
+ return true
+ case OpCvt64Fto32F:
+ v.Op = OpARM64FCVTDS
+ return true
+ case OpCvt64Fto32U:
+ v.Op = OpARM64FCVTZUDW
+ return true
+ case OpCvt64Fto64:
+ v.Op = OpARM64FCVTZSD
+ return true
+ case OpCvt64Fto64U:
+ v.Op = OpARM64FCVTZUD
+ return true
+ case OpCvt64Uto32F:
+ v.Op = OpARM64UCVTFS
+ return true
+ case OpCvt64Uto64F:
+ v.Op = OpARM64UCVTFD
+ return true
+ case OpCvt64to32F:
+ v.Op = OpARM64SCVTFS
+ return true
+ case OpCvt64to64F:
+ v.Op = OpARM64SCVTFD
+ return true
+ case OpCvtBoolToUint8:
+ v.Op = OpCopy
+ return true
+ case OpDiv16:
+ return rewriteValueARM64_OpDiv16(v)
+ case OpDiv16u:
+ return rewriteValueARM64_OpDiv16u(v)
+ case OpDiv32:
+ return rewriteValueARM64_OpDiv32(v)
+ case OpDiv32F:
+ v.Op = OpARM64FDIVS
+ return true
+ case OpDiv32u:
+ v.Op = OpARM64UDIVW
+ return true
+ case OpDiv64:
+ return rewriteValueARM64_OpDiv64(v)
+ case OpDiv64F:
+ v.Op = OpARM64FDIVD
+ return true
+ case OpDiv64u:
+ v.Op = OpARM64UDIV
+ return true
+ case OpDiv8:
+ return rewriteValueARM64_OpDiv8(v)
+ case OpDiv8u:
+ return rewriteValueARM64_OpDiv8u(v)
+ case OpEq16:
+ return rewriteValueARM64_OpEq16(v)
+ case OpEq32:
+ return rewriteValueARM64_OpEq32(v)
+ case OpEq32F:
+ return rewriteValueARM64_OpEq32F(v)
+ case OpEq64:
+ return rewriteValueARM64_OpEq64(v)
+ case OpEq64F:
+ return rewriteValueARM64_OpEq64F(v)
+ case OpEq8:
+ return rewriteValueARM64_OpEq8(v)
+ case OpEqB:
+ return rewriteValueARM64_OpEqB(v)
+ case OpEqPtr:
+ return rewriteValueARM64_OpEqPtr(v)
+ case OpFMA:
+ return rewriteValueARM64_OpFMA(v)
+ case OpFloor:
+ v.Op = OpARM64FRINTMD
+ return true
+ case OpGetCallerPC:
+ v.Op = OpARM64LoweredGetCallerPC
+ return true
+ case OpGetCallerSP:
+ v.Op = OpARM64LoweredGetCallerSP
+ return true
+ case OpGetClosurePtr:
+ v.Op = OpARM64LoweredGetClosurePtr
+ return true
+ case OpHmul32:
+ return rewriteValueARM64_OpHmul32(v)
+ case OpHmul32u:
+ return rewriteValueARM64_OpHmul32u(v)
+ case OpHmul64:
+ v.Op = OpARM64MULH
+ return true
+ case OpHmul64u:
+ v.Op = OpARM64UMULH
+ return true
+ case OpInterCall:
+ v.Op = OpARM64CALLinter
+ return true
+ case OpIsInBounds:
+ return rewriteValueARM64_OpIsInBounds(v)
+ case OpIsNonNil:
+ return rewriteValueARM64_OpIsNonNil(v)
+ case OpIsSliceInBounds:
+ return rewriteValueARM64_OpIsSliceInBounds(v)
+ case OpLeq16:
+ return rewriteValueARM64_OpLeq16(v)
+ case OpLeq16U:
+ return rewriteValueARM64_OpLeq16U(v)
+ case OpLeq32:
+ return rewriteValueARM64_OpLeq32(v)
+ case OpLeq32F:
+ return rewriteValueARM64_OpLeq32F(v)
+ case OpLeq32U:
+ return rewriteValueARM64_OpLeq32U(v)
+ case OpLeq64:
+ return rewriteValueARM64_OpLeq64(v)
+ case OpLeq64F:
+ return rewriteValueARM64_OpLeq64F(v)
+ case OpLeq64U:
+ return rewriteValueARM64_OpLeq64U(v)
+ case OpLeq8:
+ return rewriteValueARM64_OpLeq8(v)
+ case OpLeq8U:
+ return rewriteValueARM64_OpLeq8U(v)
+ case OpLess16:
+ return rewriteValueARM64_OpLess16(v)
+ case OpLess16U:
+ return rewriteValueARM64_OpLess16U(v)
+ case OpLess32:
+ return rewriteValueARM64_OpLess32(v)
+ case OpLess32F:
+ return rewriteValueARM64_OpLess32F(v)
+ case OpLess32U:
+ return rewriteValueARM64_OpLess32U(v)
+ case OpLess64:
+ return rewriteValueARM64_OpLess64(v)
+ case OpLess64F:
+ return rewriteValueARM64_OpLess64F(v)
+ case OpLess64U:
+ return rewriteValueARM64_OpLess64U(v)
+ case OpLess8:
+ return rewriteValueARM64_OpLess8(v)
+ case OpLess8U:
+ return rewriteValueARM64_OpLess8U(v)
+ case OpLoad:
+ return rewriteValueARM64_OpLoad(v)
+ case OpLocalAddr:
+ return rewriteValueARM64_OpLocalAddr(v)
+ case OpLsh16x16:
+ return rewriteValueARM64_OpLsh16x16(v)
+ case OpLsh16x32:
+ return rewriteValueARM64_OpLsh16x32(v)
+ case OpLsh16x64:
+ return rewriteValueARM64_OpLsh16x64(v)
+ case OpLsh16x8:
+ return rewriteValueARM64_OpLsh16x8(v)
+ case OpLsh32x16:
+ return rewriteValueARM64_OpLsh32x16(v)
+ case OpLsh32x32:
+ return rewriteValueARM64_OpLsh32x32(v)
+ case OpLsh32x64:
+ return rewriteValueARM64_OpLsh32x64(v)
+ case OpLsh32x8:
+ return rewriteValueARM64_OpLsh32x8(v)
+ case OpLsh64x16:
+ return rewriteValueARM64_OpLsh64x16(v)
+ case OpLsh64x32:
+ return rewriteValueARM64_OpLsh64x32(v)
+ case OpLsh64x64:
+ return rewriteValueARM64_OpLsh64x64(v)
+ case OpLsh64x8:
+ return rewriteValueARM64_OpLsh64x8(v)
+ case OpLsh8x16:
+ return rewriteValueARM64_OpLsh8x16(v)
+ case OpLsh8x32:
+ return rewriteValueARM64_OpLsh8x32(v)
+ case OpLsh8x64:
+ return rewriteValueARM64_OpLsh8x64(v)
+ case OpLsh8x8:
+ return rewriteValueARM64_OpLsh8x8(v)
+ case OpMod16:
+ return rewriteValueARM64_OpMod16(v)
+ case OpMod16u:
+ return rewriteValueARM64_OpMod16u(v)
+ case OpMod32:
+ return rewriteValueARM64_OpMod32(v)
+ case OpMod32u:
+ v.Op = OpARM64UMODW
+ return true
+ case OpMod64:
+ return rewriteValueARM64_OpMod64(v)
+ case OpMod64u:
+ v.Op = OpARM64UMOD
+ return true
+ case OpMod8:
+ return rewriteValueARM64_OpMod8(v)
+ case OpMod8u:
+ return rewriteValueARM64_OpMod8u(v)
+ case OpMove:
+ return rewriteValueARM64_OpMove(v)
+ case OpMul16:
+ v.Op = OpARM64MULW
+ return true
+ case OpMul32:
+ v.Op = OpARM64MULW
+ return true
+ case OpMul32F:
+ v.Op = OpARM64FMULS
+ return true
+ case OpMul64:
+ v.Op = OpARM64MUL
+ return true
+ case OpMul64F:
+ v.Op = OpARM64FMULD
+ return true
+ case OpMul64uhilo:
+ v.Op = OpARM64LoweredMuluhilo
+ return true
+ case OpMul8:
+ v.Op = OpARM64MULW
+ return true
+ case OpNeg16:
+ v.Op = OpARM64NEG
+ return true
+ case OpNeg32:
+ v.Op = OpARM64NEG
+ return true
+ case OpNeg32F:
+ v.Op = OpARM64FNEGS
+ return true
+ case OpNeg64:
+ v.Op = OpARM64NEG
+ return true
+ case OpNeg64F:
+ v.Op = OpARM64FNEGD
+ return true
+ case OpNeg8:
+ v.Op = OpARM64NEG
+ return true
+ case OpNeq16:
+ return rewriteValueARM64_OpNeq16(v)
+ case OpNeq32:
+ return rewriteValueARM64_OpNeq32(v)
+ case OpNeq32F:
+ return rewriteValueARM64_OpNeq32F(v)
+ case OpNeq64:
+ return rewriteValueARM64_OpNeq64(v)
+ case OpNeq64F:
+ return rewriteValueARM64_OpNeq64F(v)
+ case OpNeq8:
+ return rewriteValueARM64_OpNeq8(v)
+ case OpNeqB:
+ v.Op = OpARM64XOR
+ return true
+ case OpNeqPtr:
+ return rewriteValueARM64_OpNeqPtr(v)
+ case OpNilCheck:
+ v.Op = OpARM64LoweredNilCheck
+ return true
+ case OpNot:
+ return rewriteValueARM64_OpNot(v)
+ case OpOffPtr:
+ return rewriteValueARM64_OpOffPtr(v)
+ case OpOr16:
+ v.Op = OpARM64OR
+ return true
+ case OpOr32:
+ v.Op = OpARM64OR
+ return true
+ case OpOr64:
+ v.Op = OpARM64OR
+ return true
+ case OpOr8:
+ v.Op = OpARM64OR
+ return true
+ case OpOrB:
+ v.Op = OpARM64OR
+ return true
+ case OpPanicBounds:
+ return rewriteValueARM64_OpPanicBounds(v)
+ case OpPopCount16:
+ return rewriteValueARM64_OpPopCount16(v)
+ case OpPopCount32:
+ return rewriteValueARM64_OpPopCount32(v)
+ case OpPopCount64:
+ return rewriteValueARM64_OpPopCount64(v)
+ case OpPrefetchCache:
+ return rewriteValueARM64_OpPrefetchCache(v)
+ case OpPrefetchCacheStreamed:
+ return rewriteValueARM64_OpPrefetchCacheStreamed(v)
+ case OpPubBarrier:
+ return rewriteValueARM64_OpPubBarrier(v)
+ case OpRotateLeft16:
+ return rewriteValueARM64_OpRotateLeft16(v)
+ case OpRotateLeft32:
+ return rewriteValueARM64_OpRotateLeft32(v)
+ case OpRotateLeft64:
+ return rewriteValueARM64_OpRotateLeft64(v)
+ case OpRotateLeft8:
+ return rewriteValueARM64_OpRotateLeft8(v)
+ case OpRound:
+ v.Op = OpARM64FRINTAD
+ return true
+ case OpRound32F:
+ v.Op = OpARM64LoweredRound32F
+ return true
+ case OpRound64F:
+ v.Op = OpARM64LoweredRound64F
+ return true
+ case OpRoundToEven:
+ v.Op = OpARM64FRINTND
+ return true
+ case OpRsh16Ux16:
+ return rewriteValueARM64_OpRsh16Ux16(v)
+ case OpRsh16Ux32:
+ return rewriteValueARM64_OpRsh16Ux32(v)
+ case OpRsh16Ux64:
+ return rewriteValueARM64_OpRsh16Ux64(v)
+ case OpRsh16Ux8:
+ return rewriteValueARM64_OpRsh16Ux8(v)
+ case OpRsh16x16:
+ return rewriteValueARM64_OpRsh16x16(v)
+ case OpRsh16x32:
+ return rewriteValueARM64_OpRsh16x32(v)
+ case OpRsh16x64:
+ return rewriteValueARM64_OpRsh16x64(v)
+ case OpRsh16x8:
+ return rewriteValueARM64_OpRsh16x8(v)
+ case OpRsh32Ux16:
+ return rewriteValueARM64_OpRsh32Ux16(v)
+ case OpRsh32Ux32:
+ return rewriteValueARM64_OpRsh32Ux32(v)
+ case OpRsh32Ux64:
+ return rewriteValueARM64_OpRsh32Ux64(v)
+ case OpRsh32Ux8:
+ return rewriteValueARM64_OpRsh32Ux8(v)
+ case OpRsh32x16:
+ return rewriteValueARM64_OpRsh32x16(v)
+ case OpRsh32x32:
+ return rewriteValueARM64_OpRsh32x32(v)
+ case OpRsh32x64:
+ return rewriteValueARM64_OpRsh32x64(v)
+ case OpRsh32x8:
+ return rewriteValueARM64_OpRsh32x8(v)
+ case OpRsh64Ux16:
+ return rewriteValueARM64_OpRsh64Ux16(v)
+ case OpRsh64Ux32:
+ return rewriteValueARM64_OpRsh64Ux32(v)
+ case OpRsh64Ux64:
+ return rewriteValueARM64_OpRsh64Ux64(v)
+ case OpRsh64Ux8:
+ return rewriteValueARM64_OpRsh64Ux8(v)
+ case OpRsh64x16:
+ return rewriteValueARM64_OpRsh64x16(v)
+ case OpRsh64x32:
+ return rewriteValueARM64_OpRsh64x32(v)
+ case OpRsh64x64:
+ return rewriteValueARM64_OpRsh64x64(v)
+ case OpRsh64x8:
+ return rewriteValueARM64_OpRsh64x8(v)
+ case OpRsh8Ux16:
+ return rewriteValueARM64_OpRsh8Ux16(v)
+ case OpRsh8Ux32:
+ return rewriteValueARM64_OpRsh8Ux32(v)
+ case OpRsh8Ux64:
+ return rewriteValueARM64_OpRsh8Ux64(v)
+ case OpRsh8Ux8:
+ return rewriteValueARM64_OpRsh8Ux8(v)
+ case OpRsh8x16:
+ return rewriteValueARM64_OpRsh8x16(v)
+ case OpRsh8x32:
+ return rewriteValueARM64_OpRsh8x32(v)
+ case OpRsh8x64:
+ return rewriteValueARM64_OpRsh8x64(v)
+ case OpRsh8x8:
+ return rewriteValueARM64_OpRsh8x8(v)
+ case OpSelect0:
+ return rewriteValueARM64_OpSelect0(v)
+ case OpSelect1:
+ return rewriteValueARM64_OpSelect1(v)
+ case OpSelectN:
+ return rewriteValueARM64_OpSelectN(v)
+ case OpSignExt16to32:
+ v.Op = OpARM64MOVHreg
+ return true
+ case OpSignExt16to64:
+ v.Op = OpARM64MOVHreg
+ return true
+ case OpSignExt32to64:
+ v.Op = OpARM64MOVWreg
+ return true
+ case OpSignExt8to16:
+ v.Op = OpARM64MOVBreg
+ return true
+ case OpSignExt8to32:
+ v.Op = OpARM64MOVBreg
+ return true
+ case OpSignExt8to64:
+ v.Op = OpARM64MOVBreg
+ return true
+ case OpSlicemask:
+ return rewriteValueARM64_OpSlicemask(v)
+ case OpSqrt:
+ v.Op = OpARM64FSQRTD
+ return true
+ case OpSqrt32:
+ v.Op = OpARM64FSQRTS
+ return true
+ case OpStaticCall:
+ v.Op = OpARM64CALLstatic
+ return true
+ case OpStore:
+ return rewriteValueARM64_OpStore(v)
+ case OpSub16:
+ v.Op = OpARM64SUB
+ return true
+ case OpSub32:
+ v.Op = OpARM64SUB
+ return true
+ case OpSub32F:
+ v.Op = OpARM64FSUBS
+ return true
+ case OpSub64:
+ v.Op = OpARM64SUB
+ return true
+ case OpSub64F:
+ v.Op = OpARM64FSUBD
+ return true
+ case OpSub8:
+ v.Op = OpARM64SUB
+ return true
+ case OpSubPtr:
+ v.Op = OpARM64SUB
+ return true
+ case OpTailCall:
+ v.Op = OpARM64CALLtail
+ return true
+ case OpTrunc:
+ v.Op = OpARM64FRINTZD
+ return true
+ case OpTrunc16to8:
+ v.Op = OpCopy
+ return true
+ case OpTrunc32to16:
+ v.Op = OpCopy
+ return true
+ case OpTrunc32to8:
+ v.Op = OpCopy
+ return true
+ case OpTrunc64to16:
+ v.Op = OpCopy
+ return true
+ case OpTrunc64to32:
+ v.Op = OpCopy
+ return true
+ case OpTrunc64to8:
+ v.Op = OpCopy
+ return true
+ case OpWB:
+ v.Op = OpARM64LoweredWB
+ return true
+ case OpXor16:
+ v.Op = OpARM64XOR
+ return true
+ case OpXor32:
+ v.Op = OpARM64XOR
+ return true
+ case OpXor64:
+ v.Op = OpARM64XOR
+ return true
+ case OpXor8:
+ v.Op = OpARM64XOR
+ return true
+ case OpZero:
+ return rewriteValueARM64_OpZero(v)
+ case OpZeroExt16to32:
+ v.Op = OpARM64MOVHUreg
+ return true
+ case OpZeroExt16to64:
+ v.Op = OpARM64MOVHUreg
+ return true
+ case OpZeroExt32to64:
+ v.Op = OpARM64MOVWUreg
+ return true
+ case OpZeroExt8to16:
+ v.Op = OpARM64MOVBUreg
+ return true
+ case OpZeroExt8to32:
+ v.Op = OpARM64MOVBUreg
+ return true
+ case OpZeroExt8to64:
+ v.Op = OpARM64MOVBUreg
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64ADCSflags(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (ADCSflags x y (Select1 <types.TypeFlags> (ADDSconstflags [-1] (ADCzerocarry <typ.UInt64> c))))
+ // result: (ADCSflags x y c)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpSelect1 || v_2.Type != types.TypeFlags {
+ break
+ }
+ v_2_0 := v_2.Args[0]
+ if v_2_0.Op != OpARM64ADDSconstflags || auxIntToInt64(v_2_0.AuxInt) != -1 {
+ break
+ }
+ v_2_0_0 := v_2_0.Args[0]
+ if v_2_0_0.Op != OpARM64ADCzerocarry || v_2_0_0.Type != typ.UInt64 {
+ break
+ }
+ c := v_2_0_0.Args[0]
+ v.reset(OpARM64ADCSflags)
+ v.AddArg3(x, y, c)
+ return true
+ }
+ // match: (ADCSflags x y (Select1 <types.TypeFlags> (ADDSconstflags [-1] (MOVDconst [0]))))
+ // result: (ADDSflags x y)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpSelect1 || v_2.Type != types.TypeFlags {
+ break
+ }
+ v_2_0 := v_2.Args[0]
+ if v_2_0.Op != OpARM64ADDSconstflags || auxIntToInt64(v_2_0.AuxInt) != -1 {
+ break
+ }
+ v_2_0_0 := v_2_0.Args[0]
+ if v_2_0_0.Op != OpARM64MOVDconst || auxIntToInt64(v_2_0_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpARM64ADDSflags)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64ADD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (ADD x (MOVDconst [c]))
+ // result: (ADDconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64ADDconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ADD a l:(MUL x y))
+ // cond: l.Uses==1 && clobber(l)
+ // result: (MADD a x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ a := v_0
+ l := v_1
+ if l.Op != OpARM64MUL {
+ continue
+ }
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1 && clobber(l)) {
+ continue
+ }
+ v.reset(OpARM64MADD)
+ v.AddArg3(a, x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADD a l:(MNEG x y))
+ // cond: l.Uses==1 && clobber(l)
+ // result: (MSUB a x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ a := v_0
+ l := v_1
+ if l.Op != OpARM64MNEG {
+ continue
+ }
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1 && clobber(l)) {
+ continue
+ }
+ v.reset(OpARM64MSUB)
+ v.AddArg3(a, x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADD a l:(MULW x y))
+ // cond: a.Type.Size() != 8 && l.Uses==1 && clobber(l)
+ // result: (MADDW a x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ a := v_0
+ l := v_1
+ if l.Op != OpARM64MULW {
+ continue
+ }
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(a.Type.Size() != 8 && l.Uses == 1 && clobber(l)) {
+ continue
+ }
+ v.reset(OpARM64MADDW)
+ v.AddArg3(a, x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADD a l:(MNEGW x y))
+ // cond: a.Type.Size() != 8 && l.Uses==1 && clobber(l)
+ // result: (MSUBW a x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ a := v_0
+ l := v_1
+ if l.Op != OpARM64MNEGW {
+ continue
+ }
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(a.Type.Size() != 8 && l.Uses == 1 && clobber(l)) {
+ continue
+ }
+ v.reset(OpARM64MSUBW)
+ v.AddArg3(a, x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADD x (NEG y))
+ // result: (SUB x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64NEG {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(OpARM64SUB)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADD x0 x1:(SLLconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (ADDshiftLL x0 y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64SLLconst {
+ continue
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ continue
+ }
+ v.reset(OpARM64ADDshiftLL)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ break
+ }
+ // match: (ADD x0 x1:(SRLconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (ADDshiftRL x0 y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64SRLconst {
+ continue
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ continue
+ }
+ v.reset(OpARM64ADDshiftRL)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ break
+ }
+ // match: (ADD x0 x1:(SRAconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (ADDshiftRA x0 y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64SRAconst {
+ continue
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ continue
+ }
+ v.reset(OpARM64ADDshiftRA)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ break
+ }
+ // match: (ADD (SLL x (ANDconst <t> [63] y)) (CSEL0 <typ.UInt64> [cc] (SRL <typ.UInt64> x (SUB <t> (MOVDconst [64]) (ANDconst <t> [63] y))) (CMPconst [64] (SUB <t> (MOVDconst [64]) (ANDconst <t> [63] y)))))
+ // cond: cc == OpARM64LessThanU
+ // result: (ROR x (NEG <t> y))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpARM64SLL {
+ continue
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpARM64ANDconst {
+ continue
+ }
+ t := v_0_1.Type
+ if auxIntToInt64(v_0_1.AuxInt) != 63 {
+ continue
+ }
+ y := v_0_1.Args[0]
+ if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt64 {
+ continue
+ }
+ cc := auxIntToOp(v_1.AuxInt)
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpARM64SRL || v_1_0.Type != typ.UInt64 {
+ continue
+ }
+ _ = v_1_0.Args[1]
+ if x != v_1_0.Args[0] {
+ continue
+ }
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpARM64SUB || v_1_0_1.Type != t {
+ continue
+ }
+ _ = v_1_0_1.Args[1]
+ v_1_0_1_0 := v_1_0_1.Args[0]
+ if v_1_0_1_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_0_1_0.AuxInt) != 64 {
+ continue
+ }
+ v_1_0_1_1 := v_1_0_1.Args[1]
+ if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || auxIntToInt64(v_1_0_1_1.AuxInt) != 63 || y != v_1_0_1_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpARM64CMPconst || auxIntToInt64(v_1_1.AuxInt) != 64 {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpARM64SUB || v_1_1_0.Type != t {
+ continue
+ }
+ _ = v_1_1_0.Args[1]
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_1_0_0.AuxInt) != 64 {
+ continue
+ }
+ v_1_1_0_1 := v_1_1_0.Args[1]
+ if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || auxIntToInt64(v_1_1_0_1.AuxInt) != 63 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
+ continue
+ }
+ v.reset(OpARM64ROR)
+ v0 := b.NewValue0(v.Pos, OpARM64NEG, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ break
+ }
+ // match: (ADD (SRL <typ.UInt64> x (ANDconst <t> [63] y)) (CSEL0 <typ.UInt64> [cc] (SLL x (SUB <t> (MOVDconst [64]) (ANDconst <t> [63] y))) (CMPconst [64] (SUB <t> (MOVDconst [64]) (ANDconst <t> [63] y)))))
+ // cond: cc == OpARM64LessThanU
+ // result: (ROR x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpARM64SRL || v_0.Type != typ.UInt64 {
+ continue
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpARM64ANDconst {
+ continue
+ }
+ t := v_0_1.Type
+ if auxIntToInt64(v_0_1.AuxInt) != 63 {
+ continue
+ }
+ y := v_0_1.Args[0]
+ if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt64 {
+ continue
+ }
+ cc := auxIntToOp(v_1.AuxInt)
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpARM64SLL {
+ continue
+ }
+ _ = v_1_0.Args[1]
+ if x != v_1_0.Args[0] {
+ continue
+ }
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpARM64SUB || v_1_0_1.Type != t {
+ continue
+ }
+ _ = v_1_0_1.Args[1]
+ v_1_0_1_0 := v_1_0_1.Args[0]
+ if v_1_0_1_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_0_1_0.AuxInt) != 64 {
+ continue
+ }
+ v_1_0_1_1 := v_1_0_1.Args[1]
+ if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || auxIntToInt64(v_1_0_1_1.AuxInt) != 63 || y != v_1_0_1_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpARM64CMPconst || auxIntToInt64(v_1_1.AuxInt) != 64 {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpARM64SUB || v_1_1_0.Type != t {
+ continue
+ }
+ _ = v_1_1_0.Args[1]
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_1_0_0.AuxInt) != 64 {
+ continue
+ }
+ v_1_1_0_1 := v_1_1_0.Args[1]
+ if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || auxIntToInt64(v_1_1_0_1.AuxInt) != 63 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
+ continue
+ }
+ v.reset(OpARM64ROR)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADD (SLL x (ANDconst <t> [31] y)) (CSEL0 <typ.UInt32> [cc] (SRL <typ.UInt32> (MOVWUreg x) (SUB <t> (MOVDconst [32]) (ANDconst <t> [31] y))) (CMPconst [64] (SUB <t> (MOVDconst [32]) (ANDconst <t> [31] y)))))
+ // cond: cc == OpARM64LessThanU
+ // result: (RORW x (NEG <t> y))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpARM64SLL {
+ continue
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpARM64ANDconst {
+ continue
+ }
+ t := v_0_1.Type
+ if auxIntToInt64(v_0_1.AuxInt) != 31 {
+ continue
+ }
+ y := v_0_1.Args[0]
+ if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt32 {
+ continue
+ }
+ cc := auxIntToOp(v_1.AuxInt)
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpARM64SRL || v_1_0.Type != typ.UInt32 {
+ continue
+ }
+ _ = v_1_0.Args[1]
+ v_1_0_0 := v_1_0.Args[0]
+ if v_1_0_0.Op != OpARM64MOVWUreg || x != v_1_0_0.Args[0] {
+ continue
+ }
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpARM64SUB || v_1_0_1.Type != t {
+ continue
+ }
+ _ = v_1_0_1.Args[1]
+ v_1_0_1_0 := v_1_0_1.Args[0]
+ if v_1_0_1_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_0_1_0.AuxInt) != 32 {
+ continue
+ }
+ v_1_0_1_1 := v_1_0_1.Args[1]
+ if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || auxIntToInt64(v_1_0_1_1.AuxInt) != 31 || y != v_1_0_1_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpARM64CMPconst || auxIntToInt64(v_1_1.AuxInt) != 64 {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpARM64SUB || v_1_1_0.Type != t {
+ continue
+ }
+ _ = v_1_1_0.Args[1]
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_1_0_0.AuxInt) != 32 {
+ continue
+ }
+ v_1_1_0_1 := v_1_1_0.Args[1]
+ if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || auxIntToInt64(v_1_1_0_1.AuxInt) != 31 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
+ continue
+ }
+ v.reset(OpARM64RORW)
+ v0 := b.NewValue0(v.Pos, OpARM64NEG, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ break
+ }
+ // match: (ADD (SRL <typ.UInt32> (MOVWUreg x) (ANDconst <t> [31] y)) (CSEL0 <typ.UInt32> [cc] (SLL x (SUB <t> (MOVDconst [32]) (ANDconst <t> [31] y))) (CMPconst [64] (SUB <t> (MOVDconst [32]) (ANDconst <t> [31] y)))))
+ // cond: cc == OpARM64LessThanU
+ // result: (RORW x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpARM64SRL || v_0.Type != typ.UInt32 {
+ continue
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpARM64MOVWUreg {
+ continue
+ }
+ x := v_0_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpARM64ANDconst {
+ continue
+ }
+ t := v_0_1.Type
+ if auxIntToInt64(v_0_1.AuxInt) != 31 {
+ continue
+ }
+ y := v_0_1.Args[0]
+ if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt32 {
+ continue
+ }
+ cc := auxIntToOp(v_1.AuxInt)
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpARM64SLL {
+ continue
+ }
+ _ = v_1_0.Args[1]
+ if x != v_1_0.Args[0] {
+ continue
+ }
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpARM64SUB || v_1_0_1.Type != t {
+ continue
+ }
+ _ = v_1_0_1.Args[1]
+ v_1_0_1_0 := v_1_0_1.Args[0]
+ if v_1_0_1_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_0_1_0.AuxInt) != 32 {
+ continue
+ }
+ v_1_0_1_1 := v_1_0_1.Args[1]
+ if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || auxIntToInt64(v_1_0_1_1.AuxInt) != 31 || y != v_1_0_1_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpARM64CMPconst || auxIntToInt64(v_1_1.AuxInt) != 64 {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpARM64SUB || v_1_1_0.Type != t {
+ continue
+ }
+ _ = v_1_1_0.Args[1]
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_1_0_0.AuxInt) != 32 {
+ continue
+ }
+ v_1_1_0_1 := v_1_1_0.Args[1]
+ if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || auxIntToInt64(v_1_1_0_1.AuxInt) != 31 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
+ continue
+ }
+ v.reset(OpARM64RORW)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64ADDconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ADDconst [off1] (MOVDaddr [off2] {sym} ptr))
+ // cond: is32Bit(off1+int64(off2))
+ // result: (MOVDaddr [int32(off1)+off2] {sym} ptr)
+ for {
+ off1 := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ if !(is32Bit(off1 + int64(off2))) {
+ break
+ }
+ v.reset(OpARM64MOVDaddr)
+ v.AuxInt = int32ToAuxInt(int32(off1) + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg(ptr)
+ return true
+ }
+ // match: (ADDconst [0] x)
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (ADDconst [c] (MOVDconst [d]))
+ // result: (MOVDconst [c+d])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(c + d)
+ return true
+ }
+ // match: (ADDconst [c] (ADDconst [d] x))
+ // result: (ADDconst [c+d] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpARM64ADDconst)
+ v.AuxInt = int64ToAuxInt(c + d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDconst [c] (SUBconst [d] x))
+ // result: (ADDconst [c-d] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SUBconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpARM64ADDconst)
+ v.AuxInt = int64ToAuxInt(c - d)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64ADDshiftLL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (ADDshiftLL (MOVDconst [c]) x [d])
+ // result: (ADDconst [c] (SLLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARM64ADDconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ADDshiftLL x (MOVDconst [c]) [d])
+ // result: (ADDconst x [int64(uint64(c)<<uint64(d))])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64ADDconst)
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) << uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDshiftLL [c] (SRLconst x [64-c]) x)
+ // result: (RORconst [64-c] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != 64-c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARM64RORconst)
+ v.AuxInt = int64ToAuxInt(64 - c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDshiftLL <t> [c] (UBFX [bfc] x) x)
+ // cond: c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)
+ // result: (RORWconst [32-c] x)
+ for {
+ t := v.Type
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64UBFX {
+ break
+ }
+ bfc := auxIntToArm64BitField(v_0.AuxInt)
+ x := v_0.Args[0]
+ if x != v_1 || !(c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)) {
+ break
+ }
+ v.reset(OpARM64RORWconst)
+ v.AuxInt = int64ToAuxInt(32 - c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDshiftLL <typ.UInt16> [8] (UBFX <typ.UInt16> [armBFAuxInt(8, 8)] x) x)
+ // result: (REV16W x)
+ for {
+ if v.Type != typ.UInt16 || auxIntToInt64(v.AuxInt) != 8 || v_0.Op != OpARM64UBFX || v_0.Type != typ.UInt16 || auxIntToArm64BitField(v_0.AuxInt) != armBFAuxInt(8, 8) {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARM64REV16W)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDshiftLL [8] (UBFX [armBFAuxInt(8, 24)] (ANDconst [c1] x)) (ANDconst [c2] x))
+ // cond: uint32(c1) == 0xff00ff00 && uint32(c2) == 0x00ff00ff
+ // result: (REV16W x)
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 || v_0.Op != OpARM64UBFX || auxIntToArm64BitField(v_0.AuxInt) != armBFAuxInt(8, 24) {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpARM64ANDconst {
+ break
+ }
+ c1 := auxIntToInt64(v_0_0.AuxInt)
+ x := v_0_0.Args[0]
+ if v_1.Op != OpARM64ANDconst {
+ break
+ }
+ c2 := auxIntToInt64(v_1.AuxInt)
+ if x != v_1.Args[0] || !(uint32(c1) == 0xff00ff00 && uint32(c2) == 0x00ff00ff) {
+ break
+ }
+ v.reset(OpARM64REV16W)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDshiftLL [8] (SRLconst [8] (ANDconst [c1] x)) (ANDconst [c2] x))
+ // cond: (uint64(c1) == 0xff00ff00ff00ff00 && uint64(c2) == 0x00ff00ff00ff00ff)
+ // result: (REV16 x)
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 || v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != 8 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpARM64ANDconst {
+ break
+ }
+ c1 := auxIntToInt64(v_0_0.AuxInt)
+ x := v_0_0.Args[0]
+ if v_1.Op != OpARM64ANDconst {
+ break
+ }
+ c2 := auxIntToInt64(v_1.AuxInt)
+ if x != v_1.Args[0] || !(uint64(c1) == 0xff00ff00ff00ff00 && uint64(c2) == 0x00ff00ff00ff00ff) {
+ break
+ }
+ v.reset(OpARM64REV16)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDshiftLL [8] (SRLconst [8] (ANDconst [c1] x)) (ANDconst [c2] x))
+ // cond: (uint64(c1) == 0xff00ff00 && uint64(c2) == 0x00ff00ff)
+ // result: (REV16 (ANDconst <x.Type> [0xffffffff] x))
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 || v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != 8 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpARM64ANDconst {
+ break
+ }
+ c1 := auxIntToInt64(v_0_0.AuxInt)
+ x := v_0_0.Args[0]
+ if v_1.Op != OpARM64ANDconst {
+ break
+ }
+ c2 := auxIntToInt64(v_1.AuxInt)
+ if x != v_1.Args[0] || !(uint64(c1) == 0xff00ff00 && uint64(c2) == 0x00ff00ff) {
+ break
+ }
+ v.reset(OpARM64REV16)
+ v0 := b.NewValue0(v.Pos, OpARM64ANDconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(0xffffffff)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ADDshiftLL [c] (SRLconst x [64-c]) x2)
+ // result: (EXTRconst [64-c] x2 x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != 64-c {
+ break
+ }
+ x := v_0.Args[0]
+ x2 := v_1
+ v.reset(OpARM64EXTRconst)
+ v.AuxInt = int64ToAuxInt(64 - c)
+ v.AddArg2(x2, x)
+ return true
+ }
+ // match: (ADDshiftLL <t> [c] (UBFX [bfc] x) x2)
+ // cond: c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)
+ // result: (EXTRWconst [32-c] x2 x)
+ for {
+ t := v.Type
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64UBFX {
+ break
+ }
+ bfc := auxIntToArm64BitField(v_0.AuxInt)
+ x := v_0.Args[0]
+ x2 := v_1
+ if !(c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)) {
+ break
+ }
+ v.reset(OpARM64EXTRWconst)
+ v.AuxInt = int64ToAuxInt(32 - c)
+ v.AddArg2(x2, x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64ADDshiftRA(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ADDshiftRA (MOVDconst [c]) x [d])
+ // result: (ADDconst [c] (SRAconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARM64ADDconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARM64SRAconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ADDshiftRA x (MOVDconst [c]) [d])
+ // result: (ADDconst x [c>>uint64(d)])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64ADDconst)
+ v.AuxInt = int64ToAuxInt(c >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64ADDshiftRL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ADDshiftRL (MOVDconst [c]) x [d])
+ // result: (ADDconst [c] (SRLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARM64ADDconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARM64SRLconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ADDshiftRL x (MOVDconst [c]) [d])
+ // result: (ADDconst x [int64(uint64(c)>>uint64(d))])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64ADDconst)
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) >> uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDshiftRL [c] (SLLconst x [64-c]) x)
+ // result: (RORconst [ c] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != 64-c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARM64RORconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDshiftRL <t> [c] (SLLconst x [32-c]) (MOVWUreg x))
+ // cond: c < 32 && t.Size() == 4
+ // result: (RORWconst [c] x)
+ for {
+ t := v.Type
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != 32-c {
+ break
+ }
+ x := v_0.Args[0]
+ if v_1.Op != OpARM64MOVWUreg || x != v_1.Args[0] || !(c < 32 && t.Size() == 4) {
+ break
+ }
+ v.reset(OpARM64RORWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64AND(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AND x (MOVDconst [c]))
+ // result: (ANDconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64ANDconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (AND x x)
+ // result: x
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (AND x (MVN y))
+ // result: (BIC x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MVN {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(OpARM64BIC)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (AND x0 x1:(SLLconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (ANDshiftLL x0 y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64SLLconst {
+ continue
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ continue
+ }
+ v.reset(OpARM64ANDshiftLL)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ break
+ }
+ // match: (AND x0 x1:(SRLconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (ANDshiftRL x0 y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64SRLconst {
+ continue
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ continue
+ }
+ v.reset(OpARM64ANDshiftRL)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ break
+ }
+ // match: (AND x0 x1:(SRAconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (ANDshiftRA x0 y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64SRAconst {
+ continue
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ continue
+ }
+ v.reset(OpARM64ANDshiftRA)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ break
+ }
+ // match: (AND x0 x1:(RORconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (ANDshiftRO x0 y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64RORconst {
+ continue
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ continue
+ }
+ v.reset(OpARM64ANDshiftRO)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64ANDconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ANDconst [0] _)
+ // result: (MOVDconst [0])
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (ANDconst [-1] x)
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != -1 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (ANDconst [c] (MOVDconst [d]))
+ // result: (MOVDconst [c&d])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(c & d)
+ return true
+ }
+ // match: (ANDconst [c] (ANDconst [d] x))
+ // result: (ANDconst [c&d] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64ANDconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpARM64ANDconst)
+ v.AuxInt = int64ToAuxInt(c & d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDconst [c] (MOVWUreg x))
+ // result: (ANDconst [c&(1<<32-1)] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVWUreg {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARM64ANDconst)
+ v.AuxInt = int64ToAuxInt(c & (1<<32 - 1))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDconst [c] (MOVHUreg x))
+ // result: (ANDconst [c&(1<<16-1)] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVHUreg {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARM64ANDconst)
+ v.AuxInt = int64ToAuxInt(c & (1<<16 - 1))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDconst [c] (MOVBUreg x))
+ // result: (ANDconst [c&(1<<8-1)] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVBUreg {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARM64ANDconst)
+ v.AuxInt = int64ToAuxInt(c & (1<<8 - 1))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDconst [ac] (SLLconst [sc] x))
+ // cond: isARM64BFMask(sc, ac, sc)
+ // result: (UBFIZ [armBFAuxInt(sc, arm64BFWidth(ac, sc))] x)
+ for {
+ ac := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SLLconst {
+ break
+ }
+ sc := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(isARM64BFMask(sc, ac, sc)) {
+ break
+ }
+ v.reset(OpARM64UBFIZ)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, arm64BFWidth(ac, sc)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDconst [ac] (SRLconst [sc] x))
+ // cond: isARM64BFMask(sc, ac, 0)
+ // result: (UBFX [armBFAuxInt(sc, arm64BFWidth(ac, 0))] x)
+ for {
+ ac := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SRLconst {
+ break
+ }
+ sc := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(isARM64BFMask(sc, ac, 0)) {
+ break
+ }
+ v.reset(OpARM64UBFX)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, arm64BFWidth(ac, 0)))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64ANDshiftLL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ANDshiftLL (MOVDconst [c]) x [d])
+ // result: (ANDconst [c] (SLLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARM64ANDconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ANDshiftLL x (MOVDconst [c]) [d])
+ // result: (ANDconst x [int64(uint64(c)<<uint64(d))])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64ANDconst)
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) << uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDshiftLL y:(SLLconst x [c]) x [c])
+ // result: y
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ y := v_0
+ if y.Op != OpARM64SLLconst || auxIntToInt64(y.AuxInt) != c {
+ break
+ }
+ x := y.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64ANDshiftRA(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ANDshiftRA (MOVDconst [c]) x [d])
+ // result: (ANDconst [c] (SRAconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARM64ANDconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARM64SRAconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ANDshiftRA x (MOVDconst [c]) [d])
+ // result: (ANDconst x [c>>uint64(d)])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64ANDconst)
+ v.AuxInt = int64ToAuxInt(c >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDshiftRA y:(SRAconst x [c]) x [c])
+ // result: y
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ y := v_0
+ if y.Op != OpARM64SRAconst || auxIntToInt64(y.AuxInt) != c {
+ break
+ }
+ x := y.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64ANDshiftRL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ANDshiftRL (MOVDconst [c]) x [d])
+ // result: (ANDconst [c] (SRLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARM64ANDconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARM64SRLconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ANDshiftRL x (MOVDconst [c]) [d])
+ // result: (ANDconst x [int64(uint64(c)>>uint64(d))])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64ANDconst)
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) >> uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDshiftRL y:(SRLconst x [c]) x [c])
+ // result: y
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ y := v_0
+ if y.Op != OpARM64SRLconst || auxIntToInt64(y.AuxInt) != c {
+ break
+ }
+ x := y.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64ANDshiftRO(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ANDshiftRO (MOVDconst [c]) x [d])
+ // result: (ANDconst [c] (RORconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARM64ANDconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARM64RORconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ANDshiftRO x (MOVDconst [c]) [d])
+ // result: (ANDconst x [rotateRight64(c, d)])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64ANDconst)
+ v.AuxInt = int64ToAuxInt(rotateRight64(c, d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDshiftRO y:(RORconst x [c]) x [c])
+ // result: y
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ y := v_0
+ if y.Op != OpARM64RORconst || auxIntToInt64(y.AuxInt) != c {
+ break
+ }
+ x := y.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64BIC(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (BIC x (MOVDconst [c]))
+ // result: (ANDconst [^c] x)
+ for {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64ANDconst)
+ v.AuxInt = int64ToAuxInt(^c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (BIC x x)
+ // result: (MOVDconst [0])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (BIC x0 x1:(SLLconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (BICshiftLL x0 y [c])
+ for {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64SLLconst {
+ break
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ break
+ }
+ v.reset(OpARM64BICshiftLL)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ // match: (BIC x0 x1:(SRLconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (BICshiftRL x0 y [c])
+ for {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64SRLconst {
+ break
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ break
+ }
+ v.reset(OpARM64BICshiftRL)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ // match: (BIC x0 x1:(SRAconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (BICshiftRA x0 y [c])
+ for {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64SRAconst {
+ break
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ break
+ }
+ v.reset(OpARM64BICshiftRA)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ // match: (BIC x0 x1:(RORconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (BICshiftRO x0 y [c])
+ for {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64RORconst {
+ break
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ break
+ }
+ v.reset(OpARM64BICshiftRO)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64BICshiftLL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (BICshiftLL x (MOVDconst [c]) [d])
+ // result: (ANDconst x [^int64(uint64(c)<<uint64(d))])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64ANDconst)
+ v.AuxInt = int64ToAuxInt(^int64(uint64(c) << uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (BICshiftLL (SLLconst x [c]) x [c])
+ // result: (MOVDconst [0])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64BICshiftRA(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (BICshiftRA x (MOVDconst [c]) [d])
+ // result: (ANDconst x [^(c>>uint64(d))])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64ANDconst)
+ v.AuxInt = int64ToAuxInt(^(c >> uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (BICshiftRA (SRAconst x [c]) x [c])
+ // result: (MOVDconst [0])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SRAconst || auxIntToInt64(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64BICshiftRL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (BICshiftRL x (MOVDconst [c]) [d])
+ // result: (ANDconst x [^int64(uint64(c)>>uint64(d))])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64ANDconst)
+ v.AuxInt = int64ToAuxInt(^int64(uint64(c) >> uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (BICshiftRL (SRLconst x [c]) x [c])
+ // result: (MOVDconst [0])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64BICshiftRO(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (BICshiftRO x (MOVDconst [c]) [d])
+ // result: (ANDconst x [^rotateRight64(c, d)])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64ANDconst)
+ v.AuxInt = int64ToAuxInt(^rotateRight64(c, d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (BICshiftRO (RORconst x [c]) x [c])
+ // result: (MOVDconst [0])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64RORconst || auxIntToInt64(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64CMN(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMN x (MOVDconst [c]))
+ // result: (CMNconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64CMNconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (CMN x0 x1:(SLLconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (CMNshiftLL x0 y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64SLLconst {
+ continue
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ continue
+ }
+ v.reset(OpARM64CMNshiftLL)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ break
+ }
+ // match: (CMN x0 x1:(SRLconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (CMNshiftRL x0 y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64SRLconst {
+ continue
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ continue
+ }
+ v.reset(OpARM64CMNshiftRL)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ break
+ }
+ // match: (CMN x0 x1:(SRAconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (CMNshiftRA x0 y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64SRAconst {
+ continue
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ continue
+ }
+ v.reset(OpARM64CMNshiftRA)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64CMNW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMNW x (MOVDconst [c]))
+ // result: (CMNWconst [int32(c)] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64CMNWconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64CMNWconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (CMNWconst (MOVDconst [x]) [y])
+ // result: (FlagConstant [addFlags32(int32(x),y)])
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpARM64FlagConstant)
+ v.AuxInt = flagConstantToAuxInt(addFlags32(int32(x), y))
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64CMNconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (CMNconst (MOVDconst [x]) [y])
+ // result: (FlagConstant [addFlags64(x,y)])
+ for {
+ y := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpARM64FlagConstant)
+ v.AuxInt = flagConstantToAuxInt(addFlags64(x, y))
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64CMNshiftLL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMNshiftLL (MOVDconst [c]) x [d])
+ // result: (CMNconst [c] (SLLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARM64CMNconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMNshiftLL x (MOVDconst [c]) [d])
+ // result: (CMNconst x [int64(uint64(c)<<uint64(d))])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64CMNconst)
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) << uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64CMNshiftRA(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMNshiftRA (MOVDconst [c]) x [d])
+ // result: (CMNconst [c] (SRAconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARM64CMNconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARM64SRAconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMNshiftRA x (MOVDconst [c]) [d])
+ // result: (CMNconst x [c>>uint64(d)])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64CMNconst)
+ v.AuxInt = int64ToAuxInt(c >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64CMNshiftRL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMNshiftRL (MOVDconst [c]) x [d])
+ // result: (CMNconst [c] (SRLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARM64CMNconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARM64SRLconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMNshiftRL x (MOVDconst [c]) [d])
+ // result: (CMNconst x [int64(uint64(c)>>uint64(d))])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64CMNconst)
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) >> uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64CMP(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMP x (MOVDconst [c]))
+ // result: (CMPconst [c] x)
+ for {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64CMPconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMP (MOVDconst [c]) x)
+ // result: (InvertFlags (CMPconst [c] x))
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARM64InvertFlags)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMP x y)
+ // cond: canonLessThan(x,y)
+ // result: (InvertFlags (CMP y x))
+ for {
+ x := v_0
+ y := v_1
+ if !(canonLessThan(x, y)) {
+ break
+ }
+ v.reset(OpARM64InvertFlags)
+ v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMP x0 x1:(SLLconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (CMPshiftLL x0 y [c])
+ for {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64SLLconst {
+ break
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ break
+ }
+ v.reset(OpARM64CMPshiftLL)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ // match: (CMP x0:(SLLconst [c] y) x1)
+ // cond: clobberIfDead(x0)
+ // result: (InvertFlags (CMPshiftLL x1 y [c]))
+ for {
+ x0 := v_0
+ if x0.Op != OpARM64SLLconst {
+ break
+ }
+ c := auxIntToInt64(x0.AuxInt)
+ y := x0.Args[0]
+ x1 := v_1
+ if !(clobberIfDead(x0)) {
+ break
+ }
+ v.reset(OpARM64InvertFlags)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPshiftLL, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg2(x1, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMP x0 x1:(SRLconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (CMPshiftRL x0 y [c])
+ for {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64SRLconst {
+ break
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ break
+ }
+ v.reset(OpARM64CMPshiftRL)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ // match: (CMP x0:(SRLconst [c] y) x1)
+ // cond: clobberIfDead(x0)
+ // result: (InvertFlags (CMPshiftRL x1 y [c]))
+ for {
+ x0 := v_0
+ if x0.Op != OpARM64SRLconst {
+ break
+ }
+ c := auxIntToInt64(x0.AuxInt)
+ y := x0.Args[0]
+ x1 := v_1
+ if !(clobberIfDead(x0)) {
+ break
+ }
+ v.reset(OpARM64InvertFlags)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPshiftRL, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg2(x1, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMP x0 x1:(SRAconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (CMPshiftRA x0 y [c])
+ for {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64SRAconst {
+ break
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ break
+ }
+ v.reset(OpARM64CMPshiftRA)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ // match: (CMP x0:(SRAconst [c] y) x1)
+ // cond: clobberIfDead(x0)
+ // result: (InvertFlags (CMPshiftRA x1 y [c]))
+ for {
+ x0 := v_0
+ if x0.Op != OpARM64SRAconst {
+ break
+ }
+ c := auxIntToInt64(x0.AuxInt)
+ y := x0.Args[0]
+ x1 := v_1
+ if !(clobberIfDead(x0)) {
+ break
+ }
+ v.reset(OpARM64InvertFlags)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPshiftRA, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg2(x1, y)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64CMPW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMPW x (MOVDconst [c]))
+ // result: (CMPWconst [int32(c)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64CMPWconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPW (MOVDconst [c]) x)
+ // result: (InvertFlags (CMPWconst [int32(c)] x))
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARM64InvertFlags)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPWconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(int32(c))
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPW x y)
+ // cond: canonLessThan(x,y)
+ // result: (InvertFlags (CMPW y x))
+ for {
+ x := v_0
+ y := v_1
+ if !(canonLessThan(x, y)) {
+ break
+ }
+ v.reset(OpARM64InvertFlags)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64CMPWconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (CMPWconst (MOVDconst [x]) [y])
+ // result: (FlagConstant [subFlags32(int32(x),y)])
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpARM64FlagConstant)
+ v.AuxInt = flagConstantToAuxInt(subFlags32(int32(x), y))
+ return true
+ }
+ // match: (CMPWconst (MOVBUreg _) [c])
+ // cond: 0xff < c
+ // result: (FlagConstant [subFlags64(0,1)])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARM64MOVBUreg || !(0xff < c) {
+ break
+ }
+ v.reset(OpARM64FlagConstant)
+ v.AuxInt = flagConstantToAuxInt(subFlags64(0, 1))
+ return true
+ }
+ // match: (CMPWconst (MOVHUreg _) [c])
+ // cond: 0xffff < c
+ // result: (FlagConstant [subFlags64(0,1)])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARM64MOVHUreg || !(0xffff < c) {
+ break
+ }
+ v.reset(OpARM64FlagConstant)
+ v.AuxInt = flagConstantToAuxInt(subFlags64(0, 1))
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64CMPconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (CMPconst (MOVDconst [x]) [y])
+ // result: (FlagConstant [subFlags64(x,y)])
+ for {
+ y := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpARM64FlagConstant)
+ v.AuxInt = flagConstantToAuxInt(subFlags64(x, y))
+ return true
+ }
+ // match: (CMPconst (MOVBUreg _) [c])
+ // cond: 0xff < c
+ // result: (FlagConstant [subFlags64(0,1)])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVBUreg || !(0xff < c) {
+ break
+ }
+ v.reset(OpARM64FlagConstant)
+ v.AuxInt = flagConstantToAuxInt(subFlags64(0, 1))
+ return true
+ }
+ // match: (CMPconst (MOVHUreg _) [c])
+ // cond: 0xffff < c
+ // result: (FlagConstant [subFlags64(0,1)])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVHUreg || !(0xffff < c) {
+ break
+ }
+ v.reset(OpARM64FlagConstant)
+ v.AuxInt = flagConstantToAuxInt(subFlags64(0, 1))
+ return true
+ }
+ // match: (CMPconst (MOVWUreg _) [c])
+ // cond: 0xffffffff < c
+ // result: (FlagConstant [subFlags64(0,1)])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVWUreg || !(0xffffffff < c) {
+ break
+ }
+ v.reset(OpARM64FlagConstant)
+ v.AuxInt = flagConstantToAuxInt(subFlags64(0, 1))
+ return true
+ }
+ // match: (CMPconst (ANDconst _ [m]) [n])
+ // cond: 0 <= m && m < n
+ // result: (FlagConstant [subFlags64(0,1)])
+ for {
+ n := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64ANDconst {
+ break
+ }
+ m := auxIntToInt64(v_0.AuxInt)
+ if !(0 <= m && m < n) {
+ break
+ }
+ v.reset(OpARM64FlagConstant)
+ v.AuxInt = flagConstantToAuxInt(subFlags64(0, 1))
+ return true
+ }
+ // match: (CMPconst (SRLconst _ [c]) [n])
+ // cond: 0 <= n && 0 < c && c <= 63 && (1<<uint64(64-c)) <= uint64(n)
+ // result: (FlagConstant [subFlags64(0,1)])
+ for {
+ n := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SRLconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if !(0 <= n && 0 < c && c <= 63 && (1<<uint64(64-c)) <= uint64(n)) {
+ break
+ }
+ v.reset(OpARM64FlagConstant)
+ v.AuxInt = flagConstantToAuxInt(subFlags64(0, 1))
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64CMPshiftLL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMPshiftLL (MOVDconst [c]) x [d])
+ // result: (InvertFlags (CMPconst [c] (SLLconst <x.Type> x [d])))
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARM64InvertFlags)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v1 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type)
+ v1.AuxInt = int64ToAuxInt(d)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPshiftLL x (MOVDconst [c]) [d])
+ // result: (CMPconst x [int64(uint64(c)<<uint64(d))])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64CMPconst)
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) << uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64CMPshiftRA(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMPshiftRA (MOVDconst [c]) x [d])
+ // result: (InvertFlags (CMPconst [c] (SRAconst <x.Type> x [d])))
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARM64InvertFlags)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v1 := b.NewValue0(v.Pos, OpARM64SRAconst, x.Type)
+ v1.AuxInt = int64ToAuxInt(d)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPshiftRA x (MOVDconst [c]) [d])
+ // result: (CMPconst x [c>>uint64(d)])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64CMPconst)
+ v.AuxInt = int64ToAuxInt(c >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64CMPshiftRL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMPshiftRL (MOVDconst [c]) x [d])
+ // result: (InvertFlags (CMPconst [c] (SRLconst <x.Type> x [d])))
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARM64InvertFlags)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v1 := b.NewValue0(v.Pos, OpARM64SRLconst, x.Type)
+ v1.AuxInt = int64ToAuxInt(d)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPshiftRL x (MOVDconst [c]) [d])
+ // result: (CMPconst x [int64(uint64(c)>>uint64(d))])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64CMPconst)
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) >> uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64CSEL(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CSEL [cc] (MOVDconst [-1]) (MOVDconst [0]) flag)
+ // result: (CSETM [cc] flag)
+ for {
+ cc := auxIntToOp(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst || auxIntToInt64(v_0.AuxInt) != -1 || v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ flag := v_2
+ v.reset(OpARM64CSETM)
+ v.AuxInt = opToAuxInt(cc)
+ v.AddArg(flag)
+ return true
+ }
+ // match: (CSEL [cc] (MOVDconst [0]) (MOVDconst [-1]) flag)
+ // result: (CSETM [arm64Negate(cc)] flag)
+ for {
+ cc := auxIntToOp(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst || auxIntToInt64(v_0.AuxInt) != 0 || v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != -1 {
+ break
+ }
+ flag := v_2
+ v.reset(OpARM64CSETM)
+ v.AuxInt = opToAuxInt(arm64Negate(cc))
+ v.AddArg(flag)
+ return true
+ }
+ // match: (CSEL [cc] x (MOVDconst [0]) flag)
+ // result: (CSEL0 [cc] x flag)
+ for {
+ cc := auxIntToOp(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ flag := v_2
+ v.reset(OpARM64CSEL0)
+ v.AuxInt = opToAuxInt(cc)
+ v.AddArg2(x, flag)
+ return true
+ }
+ // match: (CSEL [cc] (MOVDconst [0]) y flag)
+ // result: (CSEL0 [arm64Negate(cc)] y flag)
+ for {
+ cc := auxIntToOp(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ y := v_1
+ flag := v_2
+ v.reset(OpARM64CSEL0)
+ v.AuxInt = opToAuxInt(arm64Negate(cc))
+ v.AddArg2(y, flag)
+ return true
+ }
+ // match: (CSEL [cc] x (ADDconst [1] a) flag)
+ // result: (CSINC [cc] x a flag)
+ for {
+ cc := auxIntToOp(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64ADDconst || auxIntToInt64(v_1.AuxInt) != 1 {
+ break
+ }
+ a := v_1.Args[0]
+ flag := v_2
+ v.reset(OpARM64CSINC)
+ v.AuxInt = opToAuxInt(cc)
+ v.AddArg3(x, a, flag)
+ return true
+ }
+ // match: (CSEL [cc] (ADDconst [1] a) x flag)
+ // result: (CSINC [arm64Negate(cc)] x a flag)
+ for {
+ cc := auxIntToOp(v.AuxInt)
+ if v_0.Op != OpARM64ADDconst || auxIntToInt64(v_0.AuxInt) != 1 {
+ break
+ }
+ a := v_0.Args[0]
+ x := v_1
+ flag := v_2
+ v.reset(OpARM64CSINC)
+ v.AuxInt = opToAuxInt(arm64Negate(cc))
+ v.AddArg3(x, a, flag)
+ return true
+ }
+ // match: (CSEL [cc] x (MVN a) flag)
+ // result: (CSINV [cc] x a flag)
+ for {
+ cc := auxIntToOp(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MVN {
+ break
+ }
+ a := v_1.Args[0]
+ flag := v_2
+ v.reset(OpARM64CSINV)
+ v.AuxInt = opToAuxInt(cc)
+ v.AddArg3(x, a, flag)
+ return true
+ }
+ // match: (CSEL [cc] (MVN a) x flag)
+ // result: (CSINV [arm64Negate(cc)] x a flag)
+ for {
+ cc := auxIntToOp(v.AuxInt)
+ if v_0.Op != OpARM64MVN {
+ break
+ }
+ a := v_0.Args[0]
+ x := v_1
+ flag := v_2
+ v.reset(OpARM64CSINV)
+ v.AuxInt = opToAuxInt(arm64Negate(cc))
+ v.AddArg3(x, a, flag)
+ return true
+ }
+ // match: (CSEL [cc] x (NEG a) flag)
+ // result: (CSNEG [cc] x a flag)
+ for {
+ cc := auxIntToOp(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64NEG {
+ break
+ }
+ a := v_1.Args[0]
+ flag := v_2
+ v.reset(OpARM64CSNEG)
+ v.AuxInt = opToAuxInt(cc)
+ v.AddArg3(x, a, flag)
+ return true
+ }
+ // match: (CSEL [cc] (NEG a) x flag)
+ // result: (CSNEG [arm64Negate(cc)] x a flag)
+ for {
+ cc := auxIntToOp(v.AuxInt)
+ if v_0.Op != OpARM64NEG {
+ break
+ }
+ a := v_0.Args[0]
+ x := v_1
+ flag := v_2
+ v.reset(OpARM64CSNEG)
+ v.AuxInt = opToAuxInt(arm64Negate(cc))
+ v.AddArg3(x, a, flag)
+ return true
+ }
+ // match: (CSEL [cc] x y (InvertFlags cmp))
+ // result: (CSEL [arm64Invert(cc)] x y cmp)
+ for {
+ cc := auxIntToOp(v.AuxInt)
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARM64InvertFlags {
+ break
+ }
+ cmp := v_2.Args[0]
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(arm64Invert(cc))
+ v.AddArg3(x, y, cmp)
+ return true
+ }
+ // match: (CSEL [cc] x _ flag)
+ // cond: ccARM64Eval(cc, flag) > 0
+ // result: x
+ for {
+ cc := auxIntToOp(v.AuxInt)
+ x := v_0
+ flag := v_2
+ if !(ccARM64Eval(cc, flag) > 0) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CSEL [cc] _ y flag)
+ // cond: ccARM64Eval(cc, flag) < 0
+ // result: y
+ for {
+ cc := auxIntToOp(v.AuxInt)
+ y := v_1
+ flag := v_2
+ if !(ccARM64Eval(cc, flag) < 0) {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (CSEL [cc] x y (CMPWconst [0] boolval))
+ // cond: cc == OpARM64NotEqual && flagArg(boolval) != nil
+ // result: (CSEL [boolval.Op] x y flagArg(boolval))
+ for {
+ cc := auxIntToOp(v.AuxInt)
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARM64CMPWconst || auxIntToInt32(v_2.AuxInt) != 0 {
+ break
+ }
+ boolval := v_2.Args[0]
+ if !(cc == OpARM64NotEqual && flagArg(boolval) != nil) {
+ break
+ }
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(boolval.Op)
+ v.AddArg3(x, y, flagArg(boolval))
+ return true
+ }
+ // match: (CSEL [cc] x y (CMPWconst [0] boolval))
+ // cond: cc == OpARM64Equal && flagArg(boolval) != nil
+ // result: (CSEL [arm64Negate(boolval.Op)] x y flagArg(boolval))
+ for {
+ cc := auxIntToOp(v.AuxInt)
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARM64CMPWconst || auxIntToInt32(v_2.AuxInt) != 0 {
+ break
+ }
+ boolval := v_2.Args[0]
+ if !(cc == OpARM64Equal && flagArg(boolval) != nil) {
+ break
+ }
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(arm64Negate(boolval.Op))
+ v.AddArg3(x, y, flagArg(boolval))
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64CSEL0(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CSEL0 [cc] x (InvertFlags cmp))
+ // result: (CSEL0 [arm64Invert(cc)] x cmp)
+ for {
+ cc := auxIntToOp(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64InvertFlags {
+ break
+ }
+ cmp := v_1.Args[0]
+ v.reset(OpARM64CSEL0)
+ v.AuxInt = opToAuxInt(arm64Invert(cc))
+ v.AddArg2(x, cmp)
+ return true
+ }
+ // match: (CSEL0 [cc] x flag)
+ // cond: ccARM64Eval(cc, flag) > 0
+ // result: x
+ for {
+ cc := auxIntToOp(v.AuxInt)
+ x := v_0
+ flag := v_1
+ if !(ccARM64Eval(cc, flag) > 0) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CSEL0 [cc] _ flag)
+ // cond: ccARM64Eval(cc, flag) < 0
+ // result: (MOVDconst [0])
+ for {
+ cc := auxIntToOp(v.AuxInt)
+ flag := v_1
+ if !(ccARM64Eval(cc, flag) < 0) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (CSEL0 [cc] x (CMPWconst [0] boolval))
+ // cond: cc == OpARM64NotEqual && flagArg(boolval) != nil
+ // result: (CSEL0 [boolval.Op] x flagArg(boolval))
+ for {
+ cc := auxIntToOp(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64CMPWconst || auxIntToInt32(v_1.AuxInt) != 0 {
+ break
+ }
+ boolval := v_1.Args[0]
+ if !(cc == OpARM64NotEqual && flagArg(boolval) != nil) {
+ break
+ }
+ v.reset(OpARM64CSEL0)
+ v.AuxInt = opToAuxInt(boolval.Op)
+ v.AddArg2(x, flagArg(boolval))
+ return true
+ }
+ // match: (CSEL0 [cc] x (CMPWconst [0] boolval))
+ // cond: cc == OpARM64Equal && flagArg(boolval) != nil
+ // result: (CSEL0 [arm64Negate(boolval.Op)] x flagArg(boolval))
+ for {
+ cc := auxIntToOp(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64CMPWconst || auxIntToInt32(v_1.AuxInt) != 0 {
+ break
+ }
+ boolval := v_1.Args[0]
+ if !(cc == OpARM64Equal && flagArg(boolval) != nil) {
+ break
+ }
+ v.reset(OpARM64CSEL0)
+ v.AuxInt = opToAuxInt(arm64Negate(boolval.Op))
+ v.AddArg2(x, flagArg(boolval))
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64CSETM(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (CSETM [cc] (InvertFlags cmp))
+ // result: (CSETM [arm64Invert(cc)] cmp)
+ for {
+ cc := auxIntToOp(v.AuxInt)
+ if v_0.Op != OpARM64InvertFlags {
+ break
+ }
+ cmp := v_0.Args[0]
+ v.reset(OpARM64CSETM)
+ v.AuxInt = opToAuxInt(arm64Invert(cc))
+ v.AddArg(cmp)
+ return true
+ }
+ // match: (CSETM [cc] flag)
+ // cond: ccARM64Eval(cc, flag) > 0
+ // result: (MOVDconst [-1])
+ for {
+ cc := auxIntToOp(v.AuxInt)
+ flag := v_0
+ if !(ccARM64Eval(cc, flag) > 0) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(-1)
+ return true
+ }
+ // match: (CSETM [cc] flag)
+ // cond: ccARM64Eval(cc, flag) < 0
+ // result: (MOVDconst [0])
+ for {
+ cc := auxIntToOp(v.AuxInt)
+ flag := v_0
+ if !(ccARM64Eval(cc, flag) < 0) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64CSINC(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CSINC [cc] x y (InvertFlags cmp))
+ // result: (CSINC [arm64Invert(cc)] x y cmp)
+ for {
+ cc := auxIntToOp(v.AuxInt)
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARM64InvertFlags {
+ break
+ }
+ cmp := v_2.Args[0]
+ v.reset(OpARM64CSINC)
+ v.AuxInt = opToAuxInt(arm64Invert(cc))
+ v.AddArg3(x, y, cmp)
+ return true
+ }
+ // match: (CSINC [cc] x _ flag)
+ // cond: ccARM64Eval(cc, flag) > 0
+ // result: x
+ for {
+ cc := auxIntToOp(v.AuxInt)
+ x := v_0
+ flag := v_2
+ if !(ccARM64Eval(cc, flag) > 0) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CSINC [cc] _ y flag)
+ // cond: ccARM64Eval(cc, flag) < 0
+ // result: (ADDconst [1] y)
+ for {
+ cc := auxIntToOp(v.AuxInt)
+ y := v_1
+ flag := v_2
+ if !(ccARM64Eval(cc, flag) < 0) {
+ break
+ }
+ v.reset(OpARM64ADDconst)
+ v.AuxInt = int64ToAuxInt(1)
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64CSINV(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CSINV [cc] x y (InvertFlags cmp))
+ // result: (CSINV [arm64Invert(cc)] x y cmp)
+ for {
+ cc := auxIntToOp(v.AuxInt)
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARM64InvertFlags {
+ break
+ }
+ cmp := v_2.Args[0]
+ v.reset(OpARM64CSINV)
+ v.AuxInt = opToAuxInt(arm64Invert(cc))
+ v.AddArg3(x, y, cmp)
+ return true
+ }
+ // match: (CSINV [cc] x _ flag)
+ // cond: ccARM64Eval(cc, flag) > 0
+ // result: x
+ for {
+ cc := auxIntToOp(v.AuxInt)
+ x := v_0
+ flag := v_2
+ if !(ccARM64Eval(cc, flag) > 0) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CSINV [cc] _ y flag)
+ // cond: ccARM64Eval(cc, flag) < 0
+ // result: (Not y)
+ for {
+ cc := auxIntToOp(v.AuxInt)
+ y := v_1
+ flag := v_2
+ if !(ccARM64Eval(cc, flag) < 0) {
+ break
+ }
+ v.reset(OpNot)
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64CSNEG(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CSNEG [cc] x y (InvertFlags cmp))
+ // result: (CSNEG [arm64Invert(cc)] x y cmp)
+ for {
+ cc := auxIntToOp(v.AuxInt)
+ x := v_0
+ y := v_1
+ if v_2.Op != OpARM64InvertFlags {
+ break
+ }
+ cmp := v_2.Args[0]
+ v.reset(OpARM64CSNEG)
+ v.AuxInt = opToAuxInt(arm64Invert(cc))
+ v.AddArg3(x, y, cmp)
+ return true
+ }
+ // match: (CSNEG [cc] x _ flag)
+ // cond: ccARM64Eval(cc, flag) > 0
+ // result: x
+ for {
+ cc := auxIntToOp(v.AuxInt)
+ x := v_0
+ flag := v_2
+ if !(ccARM64Eval(cc, flag) > 0) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (CSNEG [cc] _ y flag)
+ // cond: ccARM64Eval(cc, flag) < 0
+ // result: (NEG y)
+ for {
+ cc := auxIntToOp(v.AuxInt)
+ y := v_1
+ flag := v_2
+ if !(ccARM64Eval(cc, flag) < 0) {
+ break
+ }
+ v.reset(OpARM64NEG)
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64DIV(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (DIV (MOVDconst [c]) (MOVDconst [d]))
+ // cond: d != 0
+ // result: (MOVDconst [c/d])
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(c / d)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64DIVW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (DIVW (MOVDconst [c]) (MOVDconst [d]))
+ // cond: d != 0
+ // result: (MOVDconst [int64(int32(c)/int32(d))])
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(int32(c) / int32(d)))
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64EON(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (EON x (MOVDconst [c]))
+ // result: (XORconst [^c] x)
+ for {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64XORconst)
+ v.AuxInt = int64ToAuxInt(^c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (EON x x)
+ // result: (MOVDconst [-1])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(-1)
+ return true
+ }
+ // match: (EON x0 x1:(SLLconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (EONshiftLL x0 y [c])
+ for {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64SLLconst {
+ break
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ break
+ }
+ v.reset(OpARM64EONshiftLL)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ // match: (EON x0 x1:(SRLconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (EONshiftRL x0 y [c])
+ for {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64SRLconst {
+ break
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ break
+ }
+ v.reset(OpARM64EONshiftRL)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ // match: (EON x0 x1:(SRAconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (EONshiftRA x0 y [c])
+ for {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64SRAconst {
+ break
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ break
+ }
+ v.reset(OpARM64EONshiftRA)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ // match: (EON x0 x1:(RORconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (EONshiftRO x0 y [c])
+ for {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64RORconst {
+ break
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ break
+ }
+ v.reset(OpARM64EONshiftRO)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64EONshiftLL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (EONshiftLL x (MOVDconst [c]) [d])
+ // result: (XORconst x [^int64(uint64(c)<<uint64(d))])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64XORconst)
+ v.AuxInt = int64ToAuxInt(^int64(uint64(c) << uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (EONshiftLL (SLLconst x [c]) x [c])
+ // result: (MOVDconst [-1])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(-1)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64EONshiftRA(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (EONshiftRA x (MOVDconst [c]) [d])
+ // result: (XORconst x [^(c>>uint64(d))])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64XORconst)
+ v.AuxInt = int64ToAuxInt(^(c >> uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (EONshiftRA (SRAconst x [c]) x [c])
+ // result: (MOVDconst [-1])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SRAconst || auxIntToInt64(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(-1)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64EONshiftRL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (EONshiftRL x (MOVDconst [c]) [d])
+ // result: (XORconst x [^int64(uint64(c)>>uint64(d))])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64XORconst)
+ v.AuxInt = int64ToAuxInt(^int64(uint64(c) >> uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (EONshiftRL (SRLconst x [c]) x [c])
+ // result: (MOVDconst [-1])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(-1)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64EONshiftRO(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (EONshiftRO x (MOVDconst [c]) [d])
+ // result: (XORconst x [^rotateRight64(c, d)])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64XORconst)
+ v.AuxInt = int64ToAuxInt(^rotateRight64(c, d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (EONshiftRO (RORconst x [c]) x [c])
+ // result: (MOVDconst [-1])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64RORconst || auxIntToInt64(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(-1)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64Equal(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Equal (FlagConstant [fc]))
+ // result: (MOVDconst [b2i(fc.eq())])
+ for {
+ if v_0.Op != OpARM64FlagConstant {
+ break
+ }
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(b2i(fc.eq()))
+ return true
+ }
+ // match: (Equal (InvertFlags x))
+ // result: (Equal x)
+ for {
+ if v_0.Op != OpARM64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARM64Equal)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64FADDD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FADDD a (FMULD x y))
+ // result: (FMADDD a x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ a := v_0
+ if v_1.Op != OpARM64FMULD {
+ continue
+ }
+ y := v_1.Args[1]
+ x := v_1.Args[0]
+ v.reset(OpARM64FMADDD)
+ v.AddArg3(a, x, y)
+ return true
+ }
+ break
+ }
+ // match: (FADDD a (FNMULD x y))
+ // result: (FMSUBD a x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ a := v_0
+ if v_1.Op != OpARM64FNMULD {
+ continue
+ }
+ y := v_1.Args[1]
+ x := v_1.Args[0]
+ v.reset(OpARM64FMSUBD)
+ v.AddArg3(a, x, y)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64FADDS(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FADDS a (FMULS x y))
+ // result: (FMADDS a x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ a := v_0
+ if v_1.Op != OpARM64FMULS {
+ continue
+ }
+ y := v_1.Args[1]
+ x := v_1.Args[0]
+ v.reset(OpARM64FMADDS)
+ v.AddArg3(a, x, y)
+ return true
+ }
+ break
+ }
+ // match: (FADDS a (FNMULS x y))
+ // result: (FMSUBS a x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ a := v_0
+ if v_1.Op != OpARM64FNMULS {
+ continue
+ }
+ y := v_1.Args[1]
+ x := v_1.Args[0]
+ v.reset(OpARM64FMSUBS)
+ v.AddArg3(a, x, y)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64FCMPD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (FCMPD x (FMOVDconst [0]))
+ // result: (FCMPD0 x)
+ for {
+ x := v_0
+ if v_1.Op != OpARM64FMOVDconst || auxIntToFloat64(v_1.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpARM64FCMPD0)
+ v.AddArg(x)
+ return true
+ }
+ // match: (FCMPD (FMOVDconst [0]) x)
+ // result: (InvertFlags (FCMPD0 x))
+ for {
+ if v_0.Op != OpARM64FMOVDconst || auxIntToFloat64(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_1
+ v.reset(OpARM64InvertFlags)
+ v0 := b.NewValue0(v.Pos, OpARM64FCMPD0, types.TypeFlags)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64FCMPS(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (FCMPS x (FMOVSconst [0]))
+ // result: (FCMPS0 x)
+ for {
+ x := v_0
+ if v_1.Op != OpARM64FMOVSconst || auxIntToFloat64(v_1.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpARM64FCMPS0)
+ v.AddArg(x)
+ return true
+ }
+ // match: (FCMPS (FMOVSconst [0]) x)
+ // result: (InvertFlags (FCMPS0 x))
+ for {
+ if v_0.Op != OpARM64FMOVSconst || auxIntToFloat64(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_1
+ v.reset(OpARM64InvertFlags)
+ v0 := b.NewValue0(v.Pos, OpARM64FCMPS0, types.TypeFlags)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64FMOVDfpgp(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (FMOVDfpgp <t> (Arg [off] {sym}))
+ // result: @b.Func.Entry (Arg <t> [off] {sym})
+ for {
+ t := v.Type
+ if v_0.Op != OpArg {
+ break
+ }
+ off := auxIntToInt32(v_0.AuxInt)
+ sym := auxToSym(v_0.Aux)
+ b = b.Func.Entry
+ v0 := b.NewValue0(v.Pos, OpArg, t)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64FMOVDgpfp(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (FMOVDgpfp <t> (Arg [off] {sym}))
+ // result: @b.Func.Entry (Arg <t> [off] {sym})
+ for {
+ t := v.Type
+ if v_0.Op != OpArg {
+ break
+ }
+ off := auxIntToInt32(v_0.AuxInt)
+ sym := auxToSym(v_0.Aux)
+ b = b.Func.Entry
+ v0 := b.NewValue0(v.Pos, OpArg, t)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64FMOVDload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (FMOVDload [off] {sym} ptr (MOVDstore [off] {sym} ptr val _))
+ // result: (FMOVDgpfp val)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
+ break
+ }
+ val := v_1.Args[1]
+ if ptr != v_1.Args[0] {
+ break
+ }
+ v.reset(OpARM64FMOVDgpfp)
+ v.AddArg(val)
+ return true
+ }
+ // match: (FMOVDload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (FMOVDload [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpARM64FMOVDload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (FMOVDload [off] {sym} (ADD ptr idx) mem)
+ // cond: off == 0 && sym == nil
+ // result: (FMOVDloadidx ptr idx mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(off == 0 && sym == nil) {
+ break
+ }
+ v.reset(OpARM64FMOVDloadidx)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (FMOVDload [off] {sym} (ADDshiftLL [3] ptr idx) mem)
+ // cond: off == 0 && sym == nil
+ // result: (FMOVDloadidx8 ptr idx mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 3 {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(off == 0 && sym == nil) {
+ break
+ }
+ v.reset(OpARM64FMOVDloadidx8)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (FMOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (FMOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpARM64FMOVDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64FMOVDloadidx(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FMOVDloadidx ptr (MOVDconst [c]) mem)
+ // cond: is32Bit(c)
+ // result: (FMOVDload [int32(c)] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64FMOVDload)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (FMOVDloadidx (MOVDconst [c]) ptr mem)
+ // cond: is32Bit(c)
+ // result: (FMOVDload [int32(c)] ptr mem)
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ ptr := v_1
+ mem := v_2
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64FMOVDload)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (FMOVDloadidx ptr (SLLconst [3] idx) mem)
+ // result: (FMOVDloadidx8 ptr idx mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64SLLconst || auxIntToInt64(v_1.AuxInt) != 3 {
+ break
+ }
+ idx := v_1.Args[0]
+ mem := v_2
+ v.reset(OpARM64FMOVDloadidx8)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (FMOVDloadidx (SLLconst [3] idx) ptr mem)
+ // result: (FMOVDloadidx8 ptr idx mem)
+ for {
+ if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != 3 {
+ break
+ }
+ idx := v_0.Args[0]
+ ptr := v_1
+ mem := v_2
+ v.reset(OpARM64FMOVDloadidx8)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64FMOVDloadidx8(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FMOVDloadidx8 ptr (MOVDconst [c]) mem)
+ // cond: is32Bit(c<<3)
+ // result: (FMOVDload ptr [int32(c)<<3] mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(is32Bit(c << 3)) {
+ break
+ }
+ v.reset(OpARM64FMOVDload)
+ v.AuxInt = int32ToAuxInt(int32(c) << 3)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64FMOVDstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (FMOVDstore [off] {sym} ptr (FMOVDgpfp val) mem)
+ // result: (MOVDstore [off] {sym} ptr val mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARM64FMOVDgpfp {
+ break
+ }
+ val := v_1.Args[0]
+ mem := v_2
+ v.reset(OpARM64MOVDstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (FMOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (FMOVDstore [off1+int32(off2)] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpARM64FMOVDstore)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (FMOVDstore [off] {sym} (ADD ptr idx) val mem)
+ // cond: off == 0 && sym == nil
+ // result: (FMOVDstoreidx ptr idx val mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(off == 0 && sym == nil) {
+ break
+ }
+ v.reset(OpARM64FMOVDstoreidx)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ // match: (FMOVDstore [off] {sym} (ADDshiftLL [3] ptr idx) val mem)
+ // cond: off == 0 && sym == nil
+ // result: (FMOVDstoreidx8 ptr idx val mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 3 {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(off == 0 && sym == nil) {
+ break
+ }
+ v.reset(OpARM64FMOVDstoreidx8)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ // match: (FMOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (FMOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpARM64FMOVDstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64FMOVDstoreidx(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FMOVDstoreidx ptr (MOVDconst [c]) val mem)
+ // cond: is32Bit(c)
+ // result: (FMOVDstore [int32(c)] ptr val mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ val := v_2
+ mem := v_3
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64FMOVDstore)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (FMOVDstoreidx (MOVDconst [c]) idx val mem)
+ // cond: is32Bit(c)
+ // result: (FMOVDstore [int32(c)] idx val mem)
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ idx := v_1
+ val := v_2
+ mem := v_3
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64FMOVDstore)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg3(idx, val, mem)
+ return true
+ }
+ // match: (FMOVDstoreidx ptr (SLLconst [3] idx) val mem)
+ // result: (FMOVDstoreidx8 ptr idx val mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64SLLconst || auxIntToInt64(v_1.AuxInt) != 3 {
+ break
+ }
+ idx := v_1.Args[0]
+ val := v_2
+ mem := v_3
+ v.reset(OpARM64FMOVDstoreidx8)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ // match: (FMOVDstoreidx (SLLconst [3] idx) ptr val mem)
+ // result: (FMOVDstoreidx8 ptr idx val mem)
+ for {
+ if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != 3 {
+ break
+ }
+ idx := v_0.Args[0]
+ ptr := v_1
+ val := v_2
+ mem := v_3
+ v.reset(OpARM64FMOVDstoreidx8)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64FMOVDstoreidx8(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FMOVDstoreidx8 ptr (MOVDconst [c]) val mem)
+ // cond: is32Bit(c<<3)
+ // result: (FMOVDstore [int32(c)<<3] ptr val mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ val := v_2
+ mem := v_3
+ if !(is32Bit(c << 3)) {
+ break
+ }
+ v.reset(OpARM64FMOVDstore)
+ v.AuxInt = int32ToAuxInt(int32(c) << 3)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64FMOVSload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (FMOVSload [off] {sym} ptr (MOVWstore [off] {sym} ptr val _))
+ // result: (FMOVSgpfp val)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARM64MOVWstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
+ break
+ }
+ val := v_1.Args[1]
+ if ptr != v_1.Args[0] {
+ break
+ }
+ v.reset(OpARM64FMOVSgpfp)
+ v.AddArg(val)
+ return true
+ }
+ // match: (FMOVSload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (FMOVSload [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpARM64FMOVSload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (FMOVSload [off] {sym} (ADD ptr idx) mem)
+ // cond: off == 0 && sym == nil
+ // result: (FMOVSloadidx ptr idx mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(off == 0 && sym == nil) {
+ break
+ }
+ v.reset(OpARM64FMOVSloadidx)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (FMOVSload [off] {sym} (ADDshiftLL [2] ptr idx) mem)
+ // cond: off == 0 && sym == nil
+ // result: (FMOVSloadidx4 ptr idx mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 2 {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(off == 0 && sym == nil) {
+ break
+ }
+ v.reset(OpARM64FMOVSloadidx4)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (FMOVSload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (FMOVSload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpARM64FMOVSload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64FMOVSloadidx(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FMOVSloadidx ptr (MOVDconst [c]) mem)
+ // cond: is32Bit(c)
+ // result: (FMOVSload [int32(c)] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64FMOVSload)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (FMOVSloadidx (MOVDconst [c]) ptr mem)
+ // cond: is32Bit(c)
+ // result: (FMOVSload [int32(c)] ptr mem)
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ ptr := v_1
+ mem := v_2
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64FMOVSload)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (FMOVSloadidx ptr (SLLconst [2] idx) mem)
+ // result: (FMOVSloadidx4 ptr idx mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64SLLconst || auxIntToInt64(v_1.AuxInt) != 2 {
+ break
+ }
+ idx := v_1.Args[0]
+ mem := v_2
+ v.reset(OpARM64FMOVSloadidx4)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (FMOVSloadidx (SLLconst [2] idx) ptr mem)
+ // result: (FMOVSloadidx4 ptr idx mem)
+ for {
+ if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != 2 {
+ break
+ }
+ idx := v_0.Args[0]
+ ptr := v_1
+ mem := v_2
+ v.reset(OpARM64FMOVSloadidx4)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64FMOVSloadidx4(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FMOVSloadidx4 ptr (MOVDconst [c]) mem)
+ // cond: is32Bit(c<<2)
+ // result: (FMOVSload ptr [int32(c)<<2] mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(is32Bit(c << 2)) {
+ break
+ }
+ v.reset(OpARM64FMOVSload)
+ v.AuxInt = int32ToAuxInt(int32(c) << 2)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64FMOVSstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (FMOVSstore [off] {sym} ptr (FMOVSgpfp val) mem)
+ // result: (MOVWstore [off] {sym} ptr val mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARM64FMOVSgpfp {
+ break
+ }
+ val := v_1.Args[0]
+ mem := v_2
+ v.reset(OpARM64MOVWstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (FMOVSstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (FMOVSstore [off1+int32(off2)] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpARM64FMOVSstore)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (FMOVSstore [off] {sym} (ADD ptr idx) val mem)
+ // cond: off == 0 && sym == nil
+ // result: (FMOVSstoreidx ptr idx val mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(off == 0 && sym == nil) {
+ break
+ }
+ v.reset(OpARM64FMOVSstoreidx)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ // match: (FMOVSstore [off] {sym} (ADDshiftLL [2] ptr idx) val mem)
+ // cond: off == 0 && sym == nil
+ // result: (FMOVSstoreidx4 ptr idx val mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 2 {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(off == 0 && sym == nil) {
+ break
+ }
+ v.reset(OpARM64FMOVSstoreidx4)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ // match: (FMOVSstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (FMOVSstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpARM64FMOVSstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64FMOVSstoreidx(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FMOVSstoreidx ptr (MOVDconst [c]) val mem)
+ // cond: is32Bit(c)
+ // result: (FMOVSstore [int32(c)] ptr val mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ val := v_2
+ mem := v_3
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64FMOVSstore)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (FMOVSstoreidx (MOVDconst [c]) idx val mem)
+ // cond: is32Bit(c)
+ // result: (FMOVSstore [int32(c)] idx val mem)
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ idx := v_1
+ val := v_2
+ mem := v_3
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64FMOVSstore)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg3(idx, val, mem)
+ return true
+ }
+ // match: (FMOVSstoreidx ptr (SLLconst [2] idx) val mem)
+ // result: (FMOVSstoreidx4 ptr idx val mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64SLLconst || auxIntToInt64(v_1.AuxInt) != 2 {
+ break
+ }
+ idx := v_1.Args[0]
+ val := v_2
+ mem := v_3
+ v.reset(OpARM64FMOVSstoreidx4)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ // match: (FMOVSstoreidx (SLLconst [2] idx) ptr val mem)
+ // result: (FMOVSstoreidx4 ptr idx val mem)
+ for {
+ if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != 2 {
+ break
+ }
+ idx := v_0.Args[0]
+ ptr := v_1
+ val := v_2
+ mem := v_3
+ v.reset(OpARM64FMOVSstoreidx4)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64FMOVSstoreidx4(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FMOVSstoreidx4 ptr (MOVDconst [c]) val mem)
+ // cond: is32Bit(c<<2)
+ // result: (FMOVSstore [int32(c)<<2] ptr val mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ val := v_2
+ mem := v_3
+ if !(is32Bit(c << 2)) {
+ break
+ }
+ v.reset(OpARM64FMOVSstore)
+ v.AuxInt = int32ToAuxInt(int32(c) << 2)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64FMULD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FMULD (FNEGD x) y)
+ // result: (FNMULD x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpARM64FNEGD {
+ continue
+ }
+ x := v_0.Args[0]
+ y := v_1
+ v.reset(OpARM64FNMULD)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64FMULS(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FMULS (FNEGS x) y)
+ // result: (FNMULS x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpARM64FNEGS {
+ continue
+ }
+ x := v_0.Args[0]
+ y := v_1
+ v.reset(OpARM64FNMULS)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64FNEGD(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (FNEGD (FMULD x y))
+ // result: (FNMULD x y)
+ for {
+ if v_0.Op != OpARM64FMULD {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpARM64FNMULD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (FNEGD (FNMULD x y))
+ // result: (FMULD x y)
+ for {
+ if v_0.Op != OpARM64FNMULD {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpARM64FMULD)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64FNEGS(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (FNEGS (FMULS x y))
+ // result: (FNMULS x y)
+ for {
+ if v_0.Op != OpARM64FMULS {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpARM64FNMULS)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (FNEGS (FNMULS x y))
+ // result: (FMULS x y)
+ for {
+ if v_0.Op != OpARM64FNMULS {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpARM64FMULS)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64FNMULD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FNMULD (FNEGD x) y)
+ // result: (FMULD x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpARM64FNEGD {
+ continue
+ }
+ x := v_0.Args[0]
+ y := v_1
+ v.reset(OpARM64FMULD)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64FNMULS(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FNMULS (FNEGS x) y)
+ // result: (FMULS x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpARM64FNEGS {
+ continue
+ }
+ x := v_0.Args[0]
+ y := v_1
+ v.reset(OpARM64FMULS)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64FSUBD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FSUBD a (FMULD x y))
+ // result: (FMSUBD a x y)
+ for {
+ a := v_0
+ if v_1.Op != OpARM64FMULD {
+ break
+ }
+ y := v_1.Args[1]
+ x := v_1.Args[0]
+ v.reset(OpARM64FMSUBD)
+ v.AddArg3(a, x, y)
+ return true
+ }
+ // match: (FSUBD (FMULD x y) a)
+ // result: (FNMSUBD a x y)
+ for {
+ if v_0.Op != OpARM64FMULD {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ a := v_1
+ v.reset(OpARM64FNMSUBD)
+ v.AddArg3(a, x, y)
+ return true
+ }
+ // match: (FSUBD a (FNMULD x y))
+ // result: (FMADDD a x y)
+ for {
+ a := v_0
+ if v_1.Op != OpARM64FNMULD {
+ break
+ }
+ y := v_1.Args[1]
+ x := v_1.Args[0]
+ v.reset(OpARM64FMADDD)
+ v.AddArg3(a, x, y)
+ return true
+ }
+ // match: (FSUBD (FNMULD x y) a)
+ // result: (FNMADDD a x y)
+ for {
+ if v_0.Op != OpARM64FNMULD {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ a := v_1
+ v.reset(OpARM64FNMADDD)
+ v.AddArg3(a, x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64FSUBS(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FSUBS a (FMULS x y))
+ // result: (FMSUBS a x y)
+ for {
+ a := v_0
+ if v_1.Op != OpARM64FMULS {
+ break
+ }
+ y := v_1.Args[1]
+ x := v_1.Args[0]
+ v.reset(OpARM64FMSUBS)
+ v.AddArg3(a, x, y)
+ return true
+ }
+ // match: (FSUBS (FMULS x y) a)
+ // result: (FNMSUBS a x y)
+ for {
+ if v_0.Op != OpARM64FMULS {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ a := v_1
+ v.reset(OpARM64FNMSUBS)
+ v.AddArg3(a, x, y)
+ return true
+ }
+ // match: (FSUBS a (FNMULS x y))
+ // result: (FMADDS a x y)
+ for {
+ a := v_0
+ if v_1.Op != OpARM64FNMULS {
+ break
+ }
+ y := v_1.Args[1]
+ x := v_1.Args[0]
+ v.reset(OpARM64FMADDS)
+ v.AddArg3(a, x, y)
+ return true
+ }
+ // match: (FSUBS (FNMULS x y) a)
+ // result: (FNMADDS a x y)
+ for {
+ if v_0.Op != OpARM64FNMULS {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ a := v_1
+ v.reset(OpARM64FNMADDS)
+ v.AddArg3(a, x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64GreaterEqual(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (GreaterEqual (FlagConstant [fc]))
+ // result: (MOVDconst [b2i(fc.ge())])
+ for {
+ if v_0.Op != OpARM64FlagConstant {
+ break
+ }
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(b2i(fc.ge()))
+ return true
+ }
+ // match: (GreaterEqual (InvertFlags x))
+ // result: (LessEqual x)
+ for {
+ if v_0.Op != OpARM64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARM64LessEqual)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64GreaterEqualF(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (GreaterEqualF (InvertFlags x))
+ // result: (LessEqualF x)
+ for {
+ if v_0.Op != OpARM64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARM64LessEqualF)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64GreaterEqualU(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (GreaterEqualU (FlagConstant [fc]))
+ // result: (MOVDconst [b2i(fc.uge())])
+ for {
+ if v_0.Op != OpARM64FlagConstant {
+ break
+ }
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(b2i(fc.uge()))
+ return true
+ }
+ // match: (GreaterEqualU (InvertFlags x))
+ // result: (LessEqualU x)
+ for {
+ if v_0.Op != OpARM64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARM64LessEqualU)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64GreaterThan(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (GreaterThan (FlagConstant [fc]))
+ // result: (MOVDconst [b2i(fc.gt())])
+ for {
+ if v_0.Op != OpARM64FlagConstant {
+ break
+ }
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(b2i(fc.gt()))
+ return true
+ }
+ // match: (GreaterThan (InvertFlags x))
+ // result: (LessThan x)
+ for {
+ if v_0.Op != OpARM64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARM64LessThan)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64GreaterThanF(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (GreaterThanF (InvertFlags x))
+ // result: (LessThanF x)
+ for {
+ if v_0.Op != OpARM64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARM64LessThanF)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64GreaterThanU(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (GreaterThanU (FlagConstant [fc]))
+ // result: (MOVDconst [b2i(fc.ugt())])
+ for {
+ if v_0.Op != OpARM64FlagConstant {
+ break
+ }
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(b2i(fc.ugt()))
+ return true
+ }
+ // match: (GreaterThanU (InvertFlags x))
+ // result: (LessThanU x)
+ for {
+ if v_0.Op != OpARM64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARM64LessThanU)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64LessEqual(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (LessEqual (FlagConstant [fc]))
+ // result: (MOVDconst [b2i(fc.le())])
+ for {
+ if v_0.Op != OpARM64FlagConstant {
+ break
+ }
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(b2i(fc.le()))
+ return true
+ }
+ // match: (LessEqual (InvertFlags x))
+ // result: (GreaterEqual x)
+ for {
+ if v_0.Op != OpARM64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARM64GreaterEqual)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64LessEqualF(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (LessEqualF (InvertFlags x))
+ // result: (GreaterEqualF x)
+ for {
+ if v_0.Op != OpARM64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARM64GreaterEqualF)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64LessEqualU(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (LessEqualU (FlagConstant [fc]))
+ // result: (MOVDconst [b2i(fc.ule())])
+ for {
+ if v_0.Op != OpARM64FlagConstant {
+ break
+ }
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(b2i(fc.ule()))
+ return true
+ }
+ // match: (LessEqualU (InvertFlags x))
+ // result: (GreaterEqualU x)
+ for {
+ if v_0.Op != OpARM64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARM64GreaterEqualU)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64LessThan(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (LessThan (FlagConstant [fc]))
+ // result: (MOVDconst [b2i(fc.lt())])
+ for {
+ if v_0.Op != OpARM64FlagConstant {
+ break
+ }
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(b2i(fc.lt()))
+ return true
+ }
+ // match: (LessThan (InvertFlags x))
+ // result: (GreaterThan x)
+ for {
+ if v_0.Op != OpARM64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARM64GreaterThan)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64LessThanF(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (LessThanF (InvertFlags x))
+ // result: (GreaterThanF x)
+ for {
+ if v_0.Op != OpARM64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARM64GreaterThanF)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64LessThanU(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (LessThanU (FlagConstant [fc]))
+ // result: (MOVDconst [b2i(fc.ult())])
+ for {
+ if v_0.Op != OpARM64FlagConstant {
+ break
+ }
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(b2i(fc.ult()))
+ return true
+ }
+ // match: (LessThanU (InvertFlags x))
+ // result: (GreaterThanU x)
+ for {
+ if v_0.Op != OpARM64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARM64GreaterThanU)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MADD(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MADD a x (MOVDconst [-1]))
+ // result: (SUB a x)
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst || auxIntToInt64(v_2.AuxInt) != -1 {
+ break
+ }
+ v.reset(OpARM64SUB)
+ v.AddArg2(a, x)
+ return true
+ }
+ // match: (MADD a _ (MOVDconst [0]))
+ // result: a
+ for {
+ a := v_0
+ if v_2.Op != OpARM64MOVDconst || auxIntToInt64(v_2.AuxInt) != 0 {
+ break
+ }
+ v.copyOf(a)
+ return true
+ }
+ // match: (MADD a x (MOVDconst [1]))
+ // result: (ADD a x)
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst || auxIntToInt64(v_2.AuxInt) != 1 {
+ break
+ }
+ v.reset(OpARM64ADD)
+ v.AddArg2(a, x)
+ return true
+ }
+ // match: (MADD a x (MOVDconst [c]))
+ // cond: isPowerOfTwo64(c)
+ // result: (ADDshiftLL a x [log64(c)])
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ if !(isPowerOfTwo64(c)) {
+ break
+ }
+ v.reset(OpARM64ADDshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c))
+ v.AddArg2(a, x)
+ return true
+ }
+ // match: (MADD a x (MOVDconst [c]))
+ // cond: isPowerOfTwo64(c-1) && c>=3
+ // result: (ADD a (ADDshiftLL <x.Type> x x [log64(c-1)]))
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ if !(isPowerOfTwo64(c-1) && c >= 3) {
+ break
+ }
+ v.reset(OpARM64ADD)
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c - 1))
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MADD a x (MOVDconst [c]))
+ // cond: isPowerOfTwo64(c+1) && c>=7
+ // result: (SUB a (SUBshiftLL <x.Type> x x [log64(c+1)]))
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ if !(isPowerOfTwo64(c+1) && c >= 7) {
+ break
+ }
+ v.reset(OpARM64SUB)
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c + 1))
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MADD a x (MOVDconst [c]))
+ // cond: c%3 == 0 && isPowerOfTwo64(c/3)
+ // result: (SUBshiftLL a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)])
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ if !(c%3 == 0 && isPowerOfTwo64(c/3)) {
+ break
+ }
+ v.reset(OpARM64SUBshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c / 3))
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(2)
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MADD a x (MOVDconst [c]))
+ // cond: c%5 == 0 && isPowerOfTwo64(c/5)
+ // result: (ADDshiftLL a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)])
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ if !(c%5 == 0 && isPowerOfTwo64(c/5)) {
+ break
+ }
+ v.reset(OpARM64ADDshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c / 5))
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(2)
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MADD a x (MOVDconst [c]))
+ // cond: c%7 == 0 && isPowerOfTwo64(c/7)
+ // result: (SUBshiftLL a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)])
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ if !(c%7 == 0 && isPowerOfTwo64(c/7)) {
+ break
+ }
+ v.reset(OpARM64SUBshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c / 7))
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(3)
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MADD a x (MOVDconst [c]))
+ // cond: c%9 == 0 && isPowerOfTwo64(c/9)
+ // result: (ADDshiftLL a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)])
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ if !(c%9 == 0 && isPowerOfTwo64(c/9)) {
+ break
+ }
+ v.reset(OpARM64ADDshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c / 9))
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(3)
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MADD a (MOVDconst [-1]) x)
+ // result: (SUB a x)
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != -1 {
+ break
+ }
+ x := v_2
+ v.reset(OpARM64SUB)
+ v.AddArg2(a, x)
+ return true
+ }
+ // match: (MADD a (MOVDconst [0]) _)
+ // result: a
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ v.copyOf(a)
+ return true
+ }
+ // match: (MADD a (MOVDconst [1]) x)
+ // result: (ADD a x)
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 1 {
+ break
+ }
+ x := v_2
+ v.reset(OpARM64ADD)
+ v.AddArg2(a, x)
+ return true
+ }
+ // match: (MADD a (MOVDconst [c]) x)
+ // cond: isPowerOfTwo64(c)
+ // result: (ADDshiftLL a x [log64(c)])
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ x := v_2
+ if !(isPowerOfTwo64(c)) {
+ break
+ }
+ v.reset(OpARM64ADDshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c))
+ v.AddArg2(a, x)
+ return true
+ }
+ // match: (MADD a (MOVDconst [c]) x)
+ // cond: isPowerOfTwo64(c-1) && c>=3
+ // result: (ADD a (ADDshiftLL <x.Type> x x [log64(c-1)]))
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ x := v_2
+ if !(isPowerOfTwo64(c-1) && c >= 3) {
+ break
+ }
+ v.reset(OpARM64ADD)
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c - 1))
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MADD a (MOVDconst [c]) x)
+ // cond: isPowerOfTwo64(c+1) && c>=7
+ // result: (SUB a (SUBshiftLL <x.Type> x x [log64(c+1)]))
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ x := v_2
+ if !(isPowerOfTwo64(c+1) && c >= 7) {
+ break
+ }
+ v.reset(OpARM64SUB)
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c + 1))
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MADD a (MOVDconst [c]) x)
+ // cond: c%3 == 0 && isPowerOfTwo64(c/3)
+ // result: (SUBshiftLL a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)])
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ x := v_2
+ if !(c%3 == 0 && isPowerOfTwo64(c/3)) {
+ break
+ }
+ v.reset(OpARM64SUBshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c / 3))
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(2)
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MADD a (MOVDconst [c]) x)
+ // cond: c%5 == 0 && isPowerOfTwo64(c/5)
+ // result: (ADDshiftLL a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)])
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ x := v_2
+ if !(c%5 == 0 && isPowerOfTwo64(c/5)) {
+ break
+ }
+ v.reset(OpARM64ADDshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c / 5))
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(2)
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MADD a (MOVDconst [c]) x)
+ // cond: c%7 == 0 && isPowerOfTwo64(c/7)
+ // result: (SUBshiftLL a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)])
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ x := v_2
+ if !(c%7 == 0 && isPowerOfTwo64(c/7)) {
+ break
+ }
+ v.reset(OpARM64SUBshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c / 7))
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(3)
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MADD a (MOVDconst [c]) x)
+ // cond: c%9 == 0 && isPowerOfTwo64(c/9)
+ // result: (ADDshiftLL a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)])
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ x := v_2
+ if !(c%9 == 0 && isPowerOfTwo64(c/9)) {
+ break
+ }
+ v.reset(OpARM64ADDshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c / 9))
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(3)
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MADD (MOVDconst [c]) x y)
+ // result: (ADDconst [c] (MUL <x.Type> x y))
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARM64ADDconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARM64MUL, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MADD a (MOVDconst [c]) (MOVDconst [d]))
+ // result: (ADDconst [c*d] a)
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_2.AuxInt)
+ v.reset(OpARM64ADDconst)
+ v.AuxInt = int64ToAuxInt(c * d)
+ v.AddArg(a)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MADDW(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MADDW a x (MOVDconst [c]))
+ // cond: int32(c)==-1
+ // result: (SUB a x)
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ if !(int32(c) == -1) {
+ break
+ }
+ v.reset(OpARM64SUB)
+ v.AddArg2(a, x)
+ return true
+ }
+ // match: (MADDW a _ (MOVDconst [c]))
+ // cond: int32(c)==0
+ // result: a
+ for {
+ a := v_0
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ if !(int32(c) == 0) {
+ break
+ }
+ v.copyOf(a)
+ return true
+ }
+ // match: (MADDW a x (MOVDconst [c]))
+ // cond: int32(c)==1
+ // result: (ADD a x)
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ if !(int32(c) == 1) {
+ break
+ }
+ v.reset(OpARM64ADD)
+ v.AddArg2(a, x)
+ return true
+ }
+ // match: (MADDW a x (MOVDconst [c]))
+ // cond: isPowerOfTwo64(c)
+ // result: (ADDshiftLL a x [log64(c)])
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ if !(isPowerOfTwo64(c)) {
+ break
+ }
+ v.reset(OpARM64ADDshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c))
+ v.AddArg2(a, x)
+ return true
+ }
+ // match: (MADDW a x (MOVDconst [c]))
+ // cond: isPowerOfTwo64(c-1) && int32(c)>=3
+ // result: (ADD a (ADDshiftLL <x.Type> x x [log64(c-1)]))
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ if !(isPowerOfTwo64(c-1) && int32(c) >= 3) {
+ break
+ }
+ v.reset(OpARM64ADD)
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c - 1))
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MADDW a x (MOVDconst [c]))
+ // cond: isPowerOfTwo64(c+1) && int32(c)>=7
+ // result: (SUB a (SUBshiftLL <x.Type> x x [log64(c+1)]))
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ if !(isPowerOfTwo64(c+1) && int32(c) >= 7) {
+ break
+ }
+ v.reset(OpARM64SUB)
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c + 1))
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MADDW a x (MOVDconst [c]))
+ // cond: c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c)
+ // result: (SUBshiftLL a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)])
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ if !(c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64SUBshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c / 3))
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(2)
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MADDW a x (MOVDconst [c]))
+ // cond: c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c)
+ // result: (ADDshiftLL a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)])
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ if !(c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64ADDshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c / 5))
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(2)
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MADDW a x (MOVDconst [c]))
+ // cond: c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c)
+ // result: (SUBshiftLL a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)])
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ if !(c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64SUBshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c / 7))
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(3)
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MADDW a x (MOVDconst [c]))
+ // cond: c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c)
+ // result: (ADDshiftLL a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)])
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ if !(c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64ADDshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c / 9))
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(3)
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MADDW a (MOVDconst [c]) x)
+ // cond: int32(c)==-1
+ // result: (SUB a x)
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ x := v_2
+ if !(int32(c) == -1) {
+ break
+ }
+ v.reset(OpARM64SUB)
+ v.AddArg2(a, x)
+ return true
+ }
+ // match: (MADDW a (MOVDconst [c]) _)
+ // cond: int32(c)==0
+ // result: a
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(int32(c) == 0) {
+ break
+ }
+ v.copyOf(a)
+ return true
+ }
+ // match: (MADDW a (MOVDconst [c]) x)
+ // cond: int32(c)==1
+ // result: (ADD a x)
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ x := v_2
+ if !(int32(c) == 1) {
+ break
+ }
+ v.reset(OpARM64ADD)
+ v.AddArg2(a, x)
+ return true
+ }
+ // match: (MADDW a (MOVDconst [c]) x)
+ // cond: isPowerOfTwo64(c)
+ // result: (ADDshiftLL a x [log64(c)])
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ x := v_2
+ if !(isPowerOfTwo64(c)) {
+ break
+ }
+ v.reset(OpARM64ADDshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c))
+ v.AddArg2(a, x)
+ return true
+ }
+ // match: (MADDW a (MOVDconst [c]) x)
+ // cond: isPowerOfTwo64(c-1) && int32(c)>=3
+ // result: (ADD a (ADDshiftLL <x.Type> x x [log64(c-1)]))
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ x := v_2
+ if !(isPowerOfTwo64(c-1) && int32(c) >= 3) {
+ break
+ }
+ v.reset(OpARM64ADD)
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c - 1))
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MADDW a (MOVDconst [c]) x)
+ // cond: isPowerOfTwo64(c+1) && int32(c)>=7
+ // result: (SUB a (SUBshiftLL <x.Type> x x [log64(c+1)]))
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ x := v_2
+ if !(isPowerOfTwo64(c+1) && int32(c) >= 7) {
+ break
+ }
+ v.reset(OpARM64SUB)
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c + 1))
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MADDW a (MOVDconst [c]) x)
+ // cond: c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c)
+ // result: (SUBshiftLL a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)])
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ x := v_2
+ if !(c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64SUBshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c / 3))
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(2)
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MADDW a (MOVDconst [c]) x)
+ // cond: c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c)
+ // result: (ADDshiftLL a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)])
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ x := v_2
+ if !(c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64ADDshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c / 5))
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(2)
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MADDW a (MOVDconst [c]) x)
+ // cond: c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c)
+ // result: (SUBshiftLL a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)])
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ x := v_2
+ if !(c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64SUBshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c / 7))
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(3)
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MADDW a (MOVDconst [c]) x)
+ // cond: c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c)
+ // result: (ADDshiftLL a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)])
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ x := v_2
+ if !(c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64ADDshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c / 9))
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(3)
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MADDW (MOVDconst [c]) x y)
+ // result: (ADDconst [c] (MULW <x.Type> x y))
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARM64ADDconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARM64MULW, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MADDW a (MOVDconst [c]) (MOVDconst [d]))
+ // result: (ADDconst [int64(int32(c)*int32(d))] a)
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_2.AuxInt)
+ v.reset(OpARM64ADDconst)
+ v.AuxInt = int64ToAuxInt(int64(int32(c) * int32(d)))
+ v.AddArg(a)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MNEG(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MNEG x (MOVDconst [-1]))
+ // result: x
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != -1 {
+ continue
+ }
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (MNEG _ (MOVDconst [0]))
+ // result: (MOVDconst [0])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 {
+ continue
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ break
+ }
+ // match: (MNEG x (MOVDconst [1]))
+ // result: (NEG x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 1 {
+ continue
+ }
+ v.reset(OpARM64NEG)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (MNEG x (MOVDconst [c]))
+ // cond: isPowerOfTwo64(c)
+ // result: (NEG (SLLconst <x.Type> [log64(c)] x))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(isPowerOfTwo64(c)) {
+ continue
+ }
+ v.reset(OpARM64NEG)
+ v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c))
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MNEG x (MOVDconst [c]))
+ // cond: isPowerOfTwo64(c-1) && c >= 3
+ // result: (NEG (ADDshiftLL <x.Type> x x [log64(c-1)]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(isPowerOfTwo64(c-1) && c >= 3) {
+ continue
+ }
+ v.reset(OpARM64NEG)
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c - 1))
+ v0.AddArg2(x, x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MNEG x (MOVDconst [c]))
+ // cond: isPowerOfTwo64(c+1) && c >= 7
+ // result: (NEG (ADDshiftLL <x.Type> (NEG <x.Type> x) x [log64(c+1)]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(isPowerOfTwo64(c+1) && c >= 7) {
+ continue
+ }
+ v.reset(OpARM64NEG)
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c + 1))
+ v1 := b.NewValue0(v.Pos, OpARM64NEG, x.Type)
+ v1.AddArg(x)
+ v0.AddArg2(v1, x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MNEG x (MOVDconst [c]))
+ // cond: c%3 == 0 && isPowerOfTwo64(c/3)
+ // result: (SLLconst <x.Type> [log64(c/3)] (SUBshiftLL <x.Type> x x [2]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(c%3 == 0 && isPowerOfTwo64(c/3)) {
+ continue
+ }
+ v.reset(OpARM64SLLconst)
+ v.Type = x.Type
+ v.AuxInt = int64ToAuxInt(log64(c / 3))
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(2)
+ v0.AddArg2(x, x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MNEG x (MOVDconst [c]))
+ // cond: c%5 == 0 && isPowerOfTwo64(c/5)
+ // result: (NEG (SLLconst <x.Type> [log64(c/5)] (ADDshiftLL <x.Type> x x [2])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(c%5 == 0 && isPowerOfTwo64(c/5)) {
+ continue
+ }
+ v.reset(OpARM64NEG)
+ v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c / 5))
+ v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v1.AuxInt = int64ToAuxInt(2)
+ v1.AddArg2(x, x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MNEG x (MOVDconst [c]))
+ // cond: c%7 == 0 && isPowerOfTwo64(c/7)
+ // result: (SLLconst <x.Type> [log64(c/7)] (SUBshiftLL <x.Type> x x [3]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(c%7 == 0 && isPowerOfTwo64(c/7)) {
+ continue
+ }
+ v.reset(OpARM64SLLconst)
+ v.Type = x.Type
+ v.AuxInt = int64ToAuxInt(log64(c / 7))
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(3)
+ v0.AddArg2(x, x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MNEG x (MOVDconst [c]))
+ // cond: c%9 == 0 && isPowerOfTwo64(c/9)
+ // result: (NEG (SLLconst <x.Type> [log64(c/9)] (ADDshiftLL <x.Type> x x [3])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(c%9 == 0 && isPowerOfTwo64(c/9)) {
+ continue
+ }
+ v.reset(OpARM64NEG)
+ v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c / 9))
+ v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v1.AuxInt = int64ToAuxInt(3)
+ v1.AddArg2(x, x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MNEG (MOVDconst [c]) (MOVDconst [d]))
+ // result: (MOVDconst [-c*d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(-c * d)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MNEGW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MNEGW x (MOVDconst [c]))
+ // cond: int32(c)==-1
+ // result: x
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(int32(c) == -1) {
+ continue
+ }
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (MNEGW _ (MOVDconst [c]))
+ // cond: int32(c)==0
+ // result: (MOVDconst [0])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(int32(c) == 0) {
+ continue
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ break
+ }
+ // match: (MNEGW x (MOVDconst [c]))
+ // cond: int32(c)==1
+ // result: (NEG x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(int32(c) == 1) {
+ continue
+ }
+ v.reset(OpARM64NEG)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (MNEGW x (MOVDconst [c]))
+ // cond: isPowerOfTwo64(c)
+ // result: (NEG (SLLconst <x.Type> [log64(c)] x))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(isPowerOfTwo64(c)) {
+ continue
+ }
+ v.reset(OpARM64NEG)
+ v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c))
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MNEGW x (MOVDconst [c]))
+ // cond: isPowerOfTwo64(c-1) && int32(c) >= 3
+ // result: (NEG (ADDshiftLL <x.Type> x x [log64(c-1)]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(isPowerOfTwo64(c-1) && int32(c) >= 3) {
+ continue
+ }
+ v.reset(OpARM64NEG)
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c - 1))
+ v0.AddArg2(x, x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MNEGW x (MOVDconst [c]))
+ // cond: isPowerOfTwo64(c+1) && int32(c) >= 7
+ // result: (NEG (ADDshiftLL <x.Type> (NEG <x.Type> x) x [log64(c+1)]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(isPowerOfTwo64(c+1) && int32(c) >= 7) {
+ continue
+ }
+ v.reset(OpARM64NEG)
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c + 1))
+ v1 := b.NewValue0(v.Pos, OpARM64NEG, x.Type)
+ v1.AddArg(x)
+ v0.AddArg2(v1, x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MNEGW x (MOVDconst [c]))
+ // cond: c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c)
+ // result: (SLLconst <x.Type> [log64(c/3)] (SUBshiftLL <x.Type> x x [2]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c)) {
+ continue
+ }
+ v.reset(OpARM64SLLconst)
+ v.Type = x.Type
+ v.AuxInt = int64ToAuxInt(log64(c / 3))
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(2)
+ v0.AddArg2(x, x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MNEGW x (MOVDconst [c]))
+ // cond: c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c)
+ // result: (NEG (SLLconst <x.Type> [log64(c/5)] (ADDshiftLL <x.Type> x x [2])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c)) {
+ continue
+ }
+ v.reset(OpARM64NEG)
+ v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c / 5))
+ v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v1.AuxInt = int64ToAuxInt(2)
+ v1.AddArg2(x, x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MNEGW x (MOVDconst [c]))
+ // cond: c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c)
+ // result: (SLLconst <x.Type> [log64(c/7)] (SUBshiftLL <x.Type> x x [3]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c)) {
+ continue
+ }
+ v.reset(OpARM64SLLconst)
+ v.Type = x.Type
+ v.AuxInt = int64ToAuxInt(log64(c / 7))
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(3)
+ v0.AddArg2(x, x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MNEGW x (MOVDconst [c]))
+ // cond: c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c)
+ // result: (NEG (SLLconst <x.Type> [log64(c/9)] (ADDshiftLL <x.Type> x x [3])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c)) {
+ continue
+ }
+ v.reset(OpARM64NEG)
+ v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c / 9))
+ v1 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v1.AuxInt = int64ToAuxInt(3)
+ v1.AddArg2(x, x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MNEGW (MOVDconst [c]) (MOVDconst [d]))
+ // result: (MOVDconst [-int64(int32(c)*int32(d))])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(-int64(int32(c) * int32(d)))
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOD (MOVDconst [c]) (MOVDconst [d]))
+ // cond: d != 0
+ // result: (MOVDconst [c%d])
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(c % d)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MODW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MODW (MOVDconst [c]) (MOVDconst [d]))
+ // cond: d != 0
+ // result: (MOVDconst [int64(int32(c)%int32(d))])
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(int32(c) % int32(d)))
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVBUload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVBUload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVBUload [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpARM64MOVBUload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBUload [off] {sym} (ADD ptr idx) mem)
+ // cond: off == 0 && sym == nil
+ // result: (MOVBUloadidx ptr idx mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(off == 0 && sym == nil) {
+ break
+ }
+ v.reset(OpARM64MOVBUloadidx)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVBUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVBUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpARM64MOVBUload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBUload [off] {sym} ptr (MOVBstorezero [off2] {sym2} ptr2 _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: (MOVDconst [0])
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARM64MOVBstorezero {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (MOVBUload [off] {sym} (SB) _)
+ // cond: symIsRO(sym)
+ // result: (MOVDconst [int64(read8(sym, int64(off)))])
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpSB || !(symIsRO(sym)) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(read8(sym, int64(off))))
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVBUloadidx(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVBUloadidx ptr (MOVDconst [c]) mem)
+ // cond: is32Bit(c)
+ // result: (MOVBUload [int32(c)] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVBUload)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBUloadidx (MOVDconst [c]) ptr mem)
+ // cond: is32Bit(c)
+ // result: (MOVBUload [int32(c)] ptr mem)
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ ptr := v_1
+ mem := v_2
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVBUload)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBUloadidx ptr idx (MOVBstorezeroidx ptr2 idx2 _))
+ // cond: (isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2))
+ // result: (MOVDconst [0])
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVBstorezeroidx {
+ break
+ }
+ idx2 := v_2.Args[1]
+ ptr2 := v_2.Args[0]
+ if !(isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVBUreg(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MOVBUreg x:(MOVBUload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVBUload {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBUreg x:(MOVBUloadidx _ _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVBUloadidx {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBUreg x:(MOVBUreg _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVBUreg {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBUreg (ANDconst [c] x))
+ // result: (ANDconst [c&(1<<8-1)] x)
+ for {
+ if v_0.Op != OpARM64ANDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpARM64ANDconst)
+ v.AuxInt = int64ToAuxInt(c & (1<<8 - 1))
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBUreg (MOVDconst [c]))
+ // result: (MOVDconst [int64(uint8(c))])
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(uint8(c)))
+ return true
+ }
+ // match: (MOVBUreg x:(Equal _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64Equal {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBUreg x:(NotEqual _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64NotEqual {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBUreg x:(LessThan _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64LessThan {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBUreg x:(LessThanU _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64LessThanU {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBUreg x:(LessThanF _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64LessThanF {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBUreg x:(LessEqual _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64LessEqual {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBUreg x:(LessEqualU _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64LessEqualU {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBUreg x:(LessEqualF _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64LessEqualF {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBUreg x:(GreaterThan _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64GreaterThan {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBUreg x:(GreaterThanU _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64GreaterThanU {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBUreg x:(GreaterThanF _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64GreaterThanF {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBUreg x:(GreaterEqual _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64GreaterEqual {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBUreg x:(GreaterEqualU _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64GreaterEqualU {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBUreg x:(GreaterEqualF _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64GreaterEqualF {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBUreg (SLLconst [lc] x))
+ // cond: lc >= 8
+ // result: (MOVDconst [0])
+ for {
+ if v_0.Op != OpARM64SLLconst {
+ break
+ }
+ lc := auxIntToInt64(v_0.AuxInt)
+ if !(lc >= 8) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (MOVBUreg (SLLconst [lc] x))
+ // cond: lc < 8
+ // result: (UBFIZ [armBFAuxInt(lc, 8-lc)] x)
+ for {
+ if v_0.Op != OpARM64SLLconst {
+ break
+ }
+ lc := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(lc < 8) {
+ break
+ }
+ v.reset(OpARM64UBFIZ)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(lc, 8-lc))
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBUreg (SRLconst [rc] x))
+ // cond: rc < 8
+ // result: (UBFX [armBFAuxInt(rc, 8)] x)
+ for {
+ if v_0.Op != OpARM64SRLconst {
+ break
+ }
+ rc := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(rc < 8) {
+ break
+ }
+ v.reset(OpARM64UBFX)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(rc, 8))
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBUreg (UBFX [bfc] x))
+ // cond: bfc.getARM64BFwidth() <= 8
+ // result: (UBFX [bfc] x)
+ for {
+ if v_0.Op != OpARM64UBFX {
+ break
+ }
+ bfc := auxIntToArm64BitField(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(bfc.getARM64BFwidth() <= 8) {
+ break
+ }
+ v.reset(OpARM64UBFX)
+ v.AuxInt = arm64BitFieldToAuxInt(bfc)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVBload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVBload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVBload [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpARM64MOVBload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBload [off] {sym} (ADD ptr idx) mem)
+ // cond: off == 0 && sym == nil
+ // result: (MOVBloadidx ptr idx mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(off == 0 && sym == nil) {
+ break
+ }
+ v.reset(OpARM64MOVBloadidx)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVBload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpARM64MOVBload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBload [off] {sym} ptr (MOVBstorezero [off2] {sym2} ptr2 _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: (MOVDconst [0])
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARM64MOVBstorezero {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVBloadidx(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVBloadidx ptr (MOVDconst [c]) mem)
+ // cond: is32Bit(c)
+ // result: (MOVBload [int32(c)] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVBload)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBloadidx (MOVDconst [c]) ptr mem)
+ // cond: is32Bit(c)
+ // result: (MOVBload [int32(c)] ptr mem)
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ ptr := v_1
+ mem := v_2
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVBload)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBloadidx ptr idx (MOVBstorezeroidx ptr2 idx2 _))
+ // cond: (isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2))
+ // result: (MOVDconst [0])
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVBstorezeroidx {
+ break
+ }
+ idx2 := v_2.Args[1]
+ ptr2 := v_2.Args[0]
+ if !(isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVBreg(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MOVBreg x:(MOVBload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVBload {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBreg x:(MOVBloadidx _ _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVBloadidx {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBreg x:(MOVBreg _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVBreg {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBreg (MOVDconst [c]))
+ // result: (MOVDconst [int64(int8(c))])
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(int8(c)))
+ return true
+ }
+ // match: (MOVBreg (SLLconst [lc] x))
+ // cond: lc < 8
+ // result: (SBFIZ [armBFAuxInt(lc, 8-lc)] x)
+ for {
+ if v_0.Op != OpARM64SLLconst {
+ break
+ }
+ lc := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(lc < 8) {
+ break
+ }
+ v.reset(OpARM64SBFIZ)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(lc, 8-lc))
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBreg (SBFX [bfc] x))
+ // cond: bfc.getARM64BFwidth() <= 8
+ // result: (SBFX [bfc] x)
+ for {
+ if v_0.Op != OpARM64SBFX {
+ break
+ }
+ bfc := auxIntToArm64BitField(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(bfc.getARM64BFwidth() <= 8) {
+ break
+ }
+ v.reset(OpARM64SBFX)
+ v.AuxInt = arm64BitFieldToAuxInt(bfc)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVBstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVBstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVBstore [off1+int32(off2)] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpARM64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} (ADD ptr idx) val mem)
+ // cond: off == 0 && sym == nil
+ // result: (MOVBstoreidx ptr idx val mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(off == 0 && sym == nil) {
+ break
+ }
+ v.reset(OpARM64MOVBstoreidx)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ // match: (MOVBstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpARM64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVDconst [0]) mem)
+ // result: (MOVBstorezero [off] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ mem := v_2
+ v.reset(OpARM64MOVBstorezero)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVBreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARM64MOVBreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpARM64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVBUreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARM64MOVBUreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpARM64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVHreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARM64MOVHreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpARM64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVHUreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARM64MOVHUreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpARM64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVWreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARM64MOVWreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpARM64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVWUreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARM64MOVWUreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpARM64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [i] {s} ptr0 (SRLconst [8] w) x:(MOVBstore [i-1] {s} ptr1 w mem))
+ // cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)
+ // result: (MOVHstore [i-1] {s} ptr0 w mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ ptr0 := v_0
+ if v_1.Op != OpARM64SRLconst || auxIntToInt64(v_1.AuxInt) != 8 {
+ break
+ }
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpARM64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ ptr1 := x.Args[0]
+ if w != x.Args[1] || !(x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)) {
+ break
+ }
+ v.reset(OpARM64MOVHstore)
+ v.AuxInt = int32ToAuxInt(i - 1)
+ v.Aux = symToAux(s)
+ v.AddArg3(ptr0, w, mem)
+ return true
+ }
+ // match: (MOVBstore [1] {s} (ADD ptr0 idx0) (SRLconst [8] w) x:(MOVBstoreidx ptr1 idx1 w mem))
+ // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
+ // result: (MOVHstoreidx ptr1 idx1 w mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 1 {
+ break
+ }
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ ptr0 := v_0_0
+ idx0 := v_0_1
+ if v_1.Op != OpARM64SRLconst || auxIntToInt64(v_1.AuxInt) != 8 {
+ continue
+ }
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpARM64MOVBstoreidx {
+ continue
+ }
+ mem := x.Args[3]
+ ptr1 := x.Args[0]
+ idx1 := x.Args[1]
+ if w != x.Args[2] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) {
+ continue
+ }
+ v.reset(OpARM64MOVHstoreidx)
+ v.AddArg4(ptr1, idx1, w, mem)
+ return true
+ }
+ break
+ }
+ // match: (MOVBstore [i] {s} ptr0 (UBFX [armBFAuxInt(8, 8)] w) x:(MOVBstore [i-1] {s} ptr1 w mem))
+ // cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)
+ // result: (MOVHstore [i-1] {s} ptr0 w mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ ptr0 := v_0
+ if v_1.Op != OpARM64UBFX || auxIntToArm64BitField(v_1.AuxInt) != armBFAuxInt(8, 8) {
+ break
+ }
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpARM64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ ptr1 := x.Args[0]
+ if w != x.Args[1] || !(x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)) {
+ break
+ }
+ v.reset(OpARM64MOVHstore)
+ v.AuxInt = int32ToAuxInt(i - 1)
+ v.Aux = symToAux(s)
+ v.AddArg3(ptr0, w, mem)
+ return true
+ }
+ // match: (MOVBstore [1] {s} (ADD ptr0 idx0) (UBFX [armBFAuxInt(8, 8)] w) x:(MOVBstoreidx ptr1 idx1 w mem))
+ // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
+ // result: (MOVHstoreidx ptr1 idx1 w mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 1 {
+ break
+ }
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ ptr0 := v_0_0
+ idx0 := v_0_1
+ if v_1.Op != OpARM64UBFX || auxIntToArm64BitField(v_1.AuxInt) != armBFAuxInt(8, 8) {
+ continue
+ }
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpARM64MOVBstoreidx {
+ continue
+ }
+ mem := x.Args[3]
+ ptr1 := x.Args[0]
+ idx1 := x.Args[1]
+ if w != x.Args[2] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) {
+ continue
+ }
+ v.reset(OpARM64MOVHstoreidx)
+ v.AddArg4(ptr1, idx1, w, mem)
+ return true
+ }
+ break
+ }
+ // match: (MOVBstore [i] {s} ptr0 (UBFX [armBFAuxInt(8, 24)] w) x:(MOVBstore [i-1] {s} ptr1 w mem))
+ // cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)
+ // result: (MOVHstore [i-1] {s} ptr0 w mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ ptr0 := v_0
+ if v_1.Op != OpARM64UBFX || auxIntToArm64BitField(v_1.AuxInt) != armBFAuxInt(8, 24) {
+ break
+ }
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpARM64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ ptr1 := x.Args[0]
+ if w != x.Args[1] || !(x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)) {
+ break
+ }
+ v.reset(OpARM64MOVHstore)
+ v.AuxInt = int32ToAuxInt(i - 1)
+ v.Aux = symToAux(s)
+ v.AddArg3(ptr0, w, mem)
+ return true
+ }
+ // match: (MOVBstore [1] {s} (ADD ptr0 idx0) (UBFX [armBFAuxInt(8, 24)] w) x:(MOVBstoreidx ptr1 idx1 w mem))
+ // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
+ // result: (MOVHstoreidx ptr1 idx1 w mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 1 {
+ break
+ }
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ ptr0 := v_0_0
+ idx0 := v_0_1
+ if v_1.Op != OpARM64UBFX || auxIntToArm64BitField(v_1.AuxInt) != armBFAuxInt(8, 24) {
+ continue
+ }
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpARM64MOVBstoreidx {
+ continue
+ }
+ mem := x.Args[3]
+ ptr1 := x.Args[0]
+ idx1 := x.Args[1]
+ if w != x.Args[2] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) {
+ continue
+ }
+ v.reset(OpARM64MOVHstoreidx)
+ v.AddArg4(ptr1, idx1, w, mem)
+ return true
+ }
+ break
+ }
+ // match: (MOVBstore [i] {s} ptr0 (SRLconst [8] (MOVDreg w)) x:(MOVBstore [i-1] {s} ptr1 w mem))
+ // cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)
+ // result: (MOVHstore [i-1] {s} ptr0 w mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ ptr0 := v_0
+ if v_1.Op != OpARM64SRLconst || auxIntToInt64(v_1.AuxInt) != 8 {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpARM64MOVDreg {
+ break
+ }
+ w := v_1_0.Args[0]
+ x := v_2
+ if x.Op != OpARM64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ ptr1 := x.Args[0]
+ if w != x.Args[1] || !(x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)) {
+ break
+ }
+ v.reset(OpARM64MOVHstore)
+ v.AuxInt = int32ToAuxInt(i - 1)
+ v.Aux = symToAux(s)
+ v.AddArg3(ptr0, w, mem)
+ return true
+ }
+ // match: (MOVBstore [1] {s} (ADD ptr0 idx0) (SRLconst [8] (MOVDreg w)) x:(MOVBstoreidx ptr1 idx1 w mem))
+ // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
+ // result: (MOVHstoreidx ptr1 idx1 w mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 1 {
+ break
+ }
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ ptr0 := v_0_0
+ idx0 := v_0_1
+ if v_1.Op != OpARM64SRLconst || auxIntToInt64(v_1.AuxInt) != 8 {
+ continue
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpARM64MOVDreg {
+ continue
+ }
+ w := v_1_0.Args[0]
+ x := v_2
+ if x.Op != OpARM64MOVBstoreidx {
+ continue
+ }
+ mem := x.Args[3]
+ ptr1 := x.Args[0]
+ idx1 := x.Args[1]
+ if w != x.Args[2] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) {
+ continue
+ }
+ v.reset(OpARM64MOVHstoreidx)
+ v.AddArg4(ptr1, idx1, w, mem)
+ return true
+ }
+ break
+ }
+ // match: (MOVBstore [i] {s} ptr0 (SRLconst [j] w) x:(MOVBstore [i-1] {s} ptr1 w0:(SRLconst [j-8] w) mem))
+ // cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)
+ // result: (MOVHstore [i-1] {s} ptr0 w0 mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ ptr0 := v_0
+ if v_1.Op != OpARM64SRLconst {
+ break
+ }
+ j := auxIntToInt64(v_1.AuxInt)
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpARM64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ ptr1 := x.Args[0]
+ w0 := x.Args[1]
+ if w0.Op != OpARM64SRLconst || auxIntToInt64(w0.AuxInt) != j-8 || w != w0.Args[0] || !(x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)) {
+ break
+ }
+ v.reset(OpARM64MOVHstore)
+ v.AuxInt = int32ToAuxInt(i - 1)
+ v.Aux = symToAux(s)
+ v.AddArg3(ptr0, w0, mem)
+ return true
+ }
+ // match: (MOVBstore [1] {s} (ADD ptr0 idx0) (SRLconst [j] w) x:(MOVBstoreidx ptr1 idx1 w0:(SRLconst [j-8] w) mem))
+ // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
+ // result: (MOVHstoreidx ptr1 idx1 w0 mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 1 {
+ break
+ }
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ ptr0 := v_0_0
+ idx0 := v_0_1
+ if v_1.Op != OpARM64SRLconst {
+ continue
+ }
+ j := auxIntToInt64(v_1.AuxInt)
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpARM64MOVBstoreidx {
+ continue
+ }
+ mem := x.Args[3]
+ ptr1 := x.Args[0]
+ idx1 := x.Args[1]
+ w0 := x.Args[2]
+ if w0.Op != OpARM64SRLconst || auxIntToInt64(w0.AuxInt) != j-8 || w != w0.Args[0] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) {
+ continue
+ }
+ v.reset(OpARM64MOVHstoreidx)
+ v.AddArg4(ptr1, idx1, w0, mem)
+ return true
+ }
+ break
+ }
+ // match: (MOVBstore [i] {s} ptr0 (UBFX [bfc] w) x:(MOVBstore [i-1] {s} ptr1 w0:(UBFX [bfc2] w) mem))
+ // cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && bfc.getARM64BFwidth() == 32 - bfc.getARM64BFlsb() && bfc2.getARM64BFwidth() == 32 - bfc2.getARM64BFlsb() && bfc2.getARM64BFlsb() == bfc.getARM64BFlsb() - 8 && clobber(x)
+ // result: (MOVHstore [i-1] {s} ptr0 w0 mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ ptr0 := v_0
+ if v_1.Op != OpARM64UBFX {
+ break
+ }
+ bfc := auxIntToArm64BitField(v_1.AuxInt)
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpARM64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ ptr1 := x.Args[0]
+ w0 := x.Args[1]
+ if w0.Op != OpARM64UBFX {
+ break
+ }
+ bfc2 := auxIntToArm64BitField(w0.AuxInt)
+ if w != w0.Args[0] || !(x.Uses == 1 && isSamePtr(ptr0, ptr1) && bfc.getARM64BFwidth() == 32-bfc.getARM64BFlsb() && bfc2.getARM64BFwidth() == 32-bfc2.getARM64BFlsb() && bfc2.getARM64BFlsb() == bfc.getARM64BFlsb()-8 && clobber(x)) {
+ break
+ }
+ v.reset(OpARM64MOVHstore)
+ v.AuxInt = int32ToAuxInt(i - 1)
+ v.Aux = symToAux(s)
+ v.AddArg3(ptr0, w0, mem)
+ return true
+ }
+ // match: (MOVBstore [1] {s} (ADD ptr0 idx0) (UBFX [bfc] w) x:(MOVBstoreidx ptr1 idx1 w0:(UBFX [bfc2] w) mem))
+ // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && bfc.getARM64BFwidth() == 32 - bfc.getARM64BFlsb() && bfc2.getARM64BFwidth() == 32 - bfc2.getARM64BFlsb() && bfc2.getARM64BFlsb() == bfc.getARM64BFlsb() - 8 && clobber(x)
+ // result: (MOVHstoreidx ptr1 idx1 w0 mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 1 {
+ break
+ }
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ ptr0 := v_0_0
+ idx0 := v_0_1
+ if v_1.Op != OpARM64UBFX {
+ continue
+ }
+ bfc := auxIntToArm64BitField(v_1.AuxInt)
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpARM64MOVBstoreidx {
+ continue
+ }
+ mem := x.Args[3]
+ ptr1 := x.Args[0]
+ idx1 := x.Args[1]
+ w0 := x.Args[2]
+ if w0.Op != OpARM64UBFX {
+ continue
+ }
+ bfc2 := auxIntToArm64BitField(w0.AuxInt)
+ if w != w0.Args[0] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && bfc.getARM64BFwidth() == 32-bfc.getARM64BFlsb() && bfc2.getARM64BFwidth() == 32-bfc2.getARM64BFlsb() && bfc2.getARM64BFlsb() == bfc.getARM64BFlsb()-8 && clobber(x)) {
+ continue
+ }
+ v.reset(OpARM64MOVHstoreidx)
+ v.AddArg4(ptr1, idx1, w0, mem)
+ return true
+ }
+ break
+ }
+ // match: (MOVBstore [i] {s} ptr0 (SRLconst [j] (MOVDreg w)) x:(MOVBstore [i-1] {s} ptr1 w0:(SRLconst [j-8] (MOVDreg w)) mem))
+ // cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)
+ // result: (MOVHstore [i-1] {s} ptr0 w0 mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ ptr0 := v_0
+ if v_1.Op != OpARM64SRLconst {
+ break
+ }
+ j := auxIntToInt64(v_1.AuxInt)
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpARM64MOVDreg {
+ break
+ }
+ w := v_1_0.Args[0]
+ x := v_2
+ if x.Op != OpARM64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ ptr1 := x.Args[0]
+ w0 := x.Args[1]
+ if w0.Op != OpARM64SRLconst || auxIntToInt64(w0.AuxInt) != j-8 {
+ break
+ }
+ w0_0 := w0.Args[0]
+ if w0_0.Op != OpARM64MOVDreg || w != w0_0.Args[0] || !(x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)) {
+ break
+ }
+ v.reset(OpARM64MOVHstore)
+ v.AuxInt = int32ToAuxInt(i - 1)
+ v.Aux = symToAux(s)
+ v.AddArg3(ptr0, w0, mem)
+ return true
+ }
+ // match: (MOVBstore [1] {s} (ADD ptr0 idx0) (SRLconst [j] (MOVDreg w)) x:(MOVBstoreidx ptr1 idx1 w0:(SRLconst [j-8] (MOVDreg w)) mem))
+ // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
+ // result: (MOVHstoreidx ptr1 idx1 w0 mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 1 {
+ break
+ }
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ ptr0 := v_0_0
+ idx0 := v_0_1
+ if v_1.Op != OpARM64SRLconst {
+ continue
+ }
+ j := auxIntToInt64(v_1.AuxInt)
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpARM64MOVDreg {
+ continue
+ }
+ w := v_1_0.Args[0]
+ x := v_2
+ if x.Op != OpARM64MOVBstoreidx {
+ continue
+ }
+ mem := x.Args[3]
+ ptr1 := x.Args[0]
+ idx1 := x.Args[1]
+ w0 := x.Args[2]
+ if w0.Op != OpARM64SRLconst || auxIntToInt64(w0.AuxInt) != j-8 {
+ continue
+ }
+ w0_0 := w0.Args[0]
+ if w0_0.Op != OpARM64MOVDreg || w != w0_0.Args[0] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) {
+ continue
+ }
+ v.reset(OpARM64MOVHstoreidx)
+ v.AddArg4(ptr1, idx1, w0, mem)
+ return true
+ }
+ break
+ }
+ // match: (MOVBstore [i] {s} ptr w x0:(MOVBstore [i-1] {s} ptr (SRLconst [8] w) x1:(MOVBstore [i-2] {s} ptr (SRLconst [16] w) x2:(MOVBstore [i-3] {s} ptr (SRLconst [24] w) x3:(MOVBstore [i-4] {s} ptr (SRLconst [32] w) x4:(MOVBstore [i-5] {s} ptr (SRLconst [40] w) x5:(MOVBstore [i-6] {s} ptr (SRLconst [48] w) x6:(MOVBstore [i-7] {s} ptr (SRLconst [56] w) mem))))))))
+ // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0, x1, x2, x3, x4, x5, x6)
+ // result: (MOVDstore [i-7] {s} ptr (REV <w.Type> w) mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ ptr := v_0
+ w := v_1
+ x0 := v_2
+ if x0.Op != OpARM64MOVBstore || auxIntToInt32(x0.AuxInt) != i-1 || auxToSym(x0.Aux) != s {
+ break
+ }
+ _ = x0.Args[2]
+ if ptr != x0.Args[0] {
+ break
+ }
+ x0_1 := x0.Args[1]
+ if x0_1.Op != OpARM64SRLconst || auxIntToInt64(x0_1.AuxInt) != 8 || w != x0_1.Args[0] {
+ break
+ }
+ x1 := x0.Args[2]
+ if x1.Op != OpARM64MOVBstore || auxIntToInt32(x1.AuxInt) != i-2 || auxToSym(x1.Aux) != s {
+ break
+ }
+ _ = x1.Args[2]
+ if ptr != x1.Args[0] {
+ break
+ }
+ x1_1 := x1.Args[1]
+ if x1_1.Op != OpARM64SRLconst || auxIntToInt64(x1_1.AuxInt) != 16 || w != x1_1.Args[0] {
+ break
+ }
+ x2 := x1.Args[2]
+ if x2.Op != OpARM64MOVBstore || auxIntToInt32(x2.AuxInt) != i-3 || auxToSym(x2.Aux) != s {
+ break
+ }
+ _ = x2.Args[2]
+ if ptr != x2.Args[0] {
+ break
+ }
+ x2_1 := x2.Args[1]
+ if x2_1.Op != OpARM64SRLconst || auxIntToInt64(x2_1.AuxInt) != 24 || w != x2_1.Args[0] {
+ break
+ }
+ x3 := x2.Args[2]
+ if x3.Op != OpARM64MOVBstore || auxIntToInt32(x3.AuxInt) != i-4 || auxToSym(x3.Aux) != s {
+ break
+ }
+ _ = x3.Args[2]
+ if ptr != x3.Args[0] {
+ break
+ }
+ x3_1 := x3.Args[1]
+ if x3_1.Op != OpARM64SRLconst || auxIntToInt64(x3_1.AuxInt) != 32 || w != x3_1.Args[0] {
+ break
+ }
+ x4 := x3.Args[2]
+ if x4.Op != OpARM64MOVBstore || auxIntToInt32(x4.AuxInt) != i-5 || auxToSym(x4.Aux) != s {
+ break
+ }
+ _ = x4.Args[2]
+ if ptr != x4.Args[0] {
+ break
+ }
+ x4_1 := x4.Args[1]
+ if x4_1.Op != OpARM64SRLconst || auxIntToInt64(x4_1.AuxInt) != 40 || w != x4_1.Args[0] {
+ break
+ }
+ x5 := x4.Args[2]
+ if x5.Op != OpARM64MOVBstore || auxIntToInt32(x5.AuxInt) != i-6 || auxToSym(x5.Aux) != s {
+ break
+ }
+ _ = x5.Args[2]
+ if ptr != x5.Args[0] {
+ break
+ }
+ x5_1 := x5.Args[1]
+ if x5_1.Op != OpARM64SRLconst || auxIntToInt64(x5_1.AuxInt) != 48 || w != x5_1.Args[0] {
+ break
+ }
+ x6 := x5.Args[2]
+ if x6.Op != OpARM64MOVBstore || auxIntToInt32(x6.AuxInt) != i-7 || auxToSym(x6.Aux) != s {
+ break
+ }
+ mem := x6.Args[2]
+ if ptr != x6.Args[0] {
+ break
+ }
+ x6_1 := x6.Args[1]
+ if x6_1.Op != OpARM64SRLconst || auxIntToInt64(x6_1.AuxInt) != 56 || w != x6_1.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && clobber(x0, x1, x2, x3, x4, x5, x6)) {
+ break
+ }
+ v.reset(OpARM64MOVDstore)
+ v.AuxInt = int32ToAuxInt(i - 7)
+ v.Aux = symToAux(s)
+ v0 := b.NewValue0(x6.Pos, OpARM64REV, w.Type)
+ v0.AddArg(w)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (MOVBstore [7] {s} p w x0:(MOVBstore [6] {s} p (SRLconst [8] w) x1:(MOVBstore [5] {s} p (SRLconst [16] w) x2:(MOVBstore [4] {s} p (SRLconst [24] w) x3:(MOVBstore [3] {s} p (SRLconst [32] w) x4:(MOVBstore [2] {s} p (SRLconst [40] w) x5:(MOVBstore [1] {s} p1:(ADD ptr1 idx1) (SRLconst [48] w) x6:(MOVBstoreidx ptr0 idx0 (SRLconst [56] w) mem))))))))
+ // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0, x1, x2, x3, x4, x5, x6)
+ // result: (MOVDstoreidx ptr0 idx0 (REV <w.Type> w) mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 7 {
+ break
+ }
+ s := auxToSym(v.Aux)
+ p := v_0
+ w := v_1
+ x0 := v_2
+ if x0.Op != OpARM64MOVBstore || auxIntToInt32(x0.AuxInt) != 6 || auxToSym(x0.Aux) != s {
+ break
+ }
+ _ = x0.Args[2]
+ if p != x0.Args[0] {
+ break
+ }
+ x0_1 := x0.Args[1]
+ if x0_1.Op != OpARM64SRLconst || auxIntToInt64(x0_1.AuxInt) != 8 || w != x0_1.Args[0] {
+ break
+ }
+ x1 := x0.Args[2]
+ if x1.Op != OpARM64MOVBstore || auxIntToInt32(x1.AuxInt) != 5 || auxToSym(x1.Aux) != s {
+ break
+ }
+ _ = x1.Args[2]
+ if p != x1.Args[0] {
+ break
+ }
+ x1_1 := x1.Args[1]
+ if x1_1.Op != OpARM64SRLconst || auxIntToInt64(x1_1.AuxInt) != 16 || w != x1_1.Args[0] {
+ break
+ }
+ x2 := x1.Args[2]
+ if x2.Op != OpARM64MOVBstore || auxIntToInt32(x2.AuxInt) != 4 || auxToSym(x2.Aux) != s {
+ break
+ }
+ _ = x2.Args[2]
+ if p != x2.Args[0] {
+ break
+ }
+ x2_1 := x2.Args[1]
+ if x2_1.Op != OpARM64SRLconst || auxIntToInt64(x2_1.AuxInt) != 24 || w != x2_1.Args[0] {
+ break
+ }
+ x3 := x2.Args[2]
+ if x3.Op != OpARM64MOVBstore || auxIntToInt32(x3.AuxInt) != 3 || auxToSym(x3.Aux) != s {
+ break
+ }
+ _ = x3.Args[2]
+ if p != x3.Args[0] {
+ break
+ }
+ x3_1 := x3.Args[1]
+ if x3_1.Op != OpARM64SRLconst || auxIntToInt64(x3_1.AuxInt) != 32 || w != x3_1.Args[0] {
+ break
+ }
+ x4 := x3.Args[2]
+ if x4.Op != OpARM64MOVBstore || auxIntToInt32(x4.AuxInt) != 2 || auxToSym(x4.Aux) != s {
+ break
+ }
+ _ = x4.Args[2]
+ if p != x4.Args[0] {
+ break
+ }
+ x4_1 := x4.Args[1]
+ if x4_1.Op != OpARM64SRLconst || auxIntToInt64(x4_1.AuxInt) != 40 || w != x4_1.Args[0] {
+ break
+ }
+ x5 := x4.Args[2]
+ if x5.Op != OpARM64MOVBstore || auxIntToInt32(x5.AuxInt) != 1 || auxToSym(x5.Aux) != s {
+ break
+ }
+ _ = x5.Args[2]
+ p1 := x5.Args[0]
+ if p1.Op != OpARM64ADD {
+ break
+ }
+ _ = p1.Args[1]
+ p1_0 := p1.Args[0]
+ p1_1 := p1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, p1_0, p1_1 = _i0+1, p1_1, p1_0 {
+ ptr1 := p1_0
+ idx1 := p1_1
+ x5_1 := x5.Args[1]
+ if x5_1.Op != OpARM64SRLconst || auxIntToInt64(x5_1.AuxInt) != 48 || w != x5_1.Args[0] {
+ continue
+ }
+ x6 := x5.Args[2]
+ if x6.Op != OpARM64MOVBstoreidx {
+ continue
+ }
+ mem := x6.Args[3]
+ ptr0 := x6.Args[0]
+ idx0 := x6.Args[1]
+ x6_2 := x6.Args[2]
+ if x6_2.Op != OpARM64SRLconst || auxIntToInt64(x6_2.AuxInt) != 56 || w != x6_2.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0, x1, x2, x3, x4, x5, x6)) {
+ continue
+ }
+ v.reset(OpARM64MOVDstoreidx)
+ v0 := b.NewValue0(x5.Pos, OpARM64REV, w.Type)
+ v0.AddArg(w)
+ v.AddArg4(ptr0, idx0, v0, mem)
+ return true
+ }
+ break
+ }
+ // match: (MOVBstore [i] {s} ptr w x0:(MOVBstore [i-1] {s} ptr (UBFX [armBFAuxInt(8, 24)] w) x1:(MOVBstore [i-2] {s} ptr (UBFX [armBFAuxInt(16, 16)] w) x2:(MOVBstore [i-3] {s} ptr (UBFX [armBFAuxInt(24, 8)] w) mem))))
+ // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0, x1, x2)
+ // result: (MOVWstore [i-3] {s} ptr (REVW <w.Type> w) mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ ptr := v_0
+ w := v_1
+ x0 := v_2
+ if x0.Op != OpARM64MOVBstore || auxIntToInt32(x0.AuxInt) != i-1 || auxToSym(x0.Aux) != s {
+ break
+ }
+ _ = x0.Args[2]
+ if ptr != x0.Args[0] {
+ break
+ }
+ x0_1 := x0.Args[1]
+ if x0_1.Op != OpARM64UBFX || auxIntToArm64BitField(x0_1.AuxInt) != armBFAuxInt(8, 24) || w != x0_1.Args[0] {
+ break
+ }
+ x1 := x0.Args[2]
+ if x1.Op != OpARM64MOVBstore || auxIntToInt32(x1.AuxInt) != i-2 || auxToSym(x1.Aux) != s {
+ break
+ }
+ _ = x1.Args[2]
+ if ptr != x1.Args[0] {
+ break
+ }
+ x1_1 := x1.Args[1]
+ if x1_1.Op != OpARM64UBFX || auxIntToArm64BitField(x1_1.AuxInt) != armBFAuxInt(16, 16) || w != x1_1.Args[0] {
+ break
+ }
+ x2 := x1.Args[2]
+ if x2.Op != OpARM64MOVBstore || auxIntToInt32(x2.AuxInt) != i-3 || auxToSym(x2.Aux) != s {
+ break
+ }
+ mem := x2.Args[2]
+ if ptr != x2.Args[0] {
+ break
+ }
+ x2_1 := x2.Args[1]
+ if x2_1.Op != OpARM64UBFX || auxIntToArm64BitField(x2_1.AuxInt) != armBFAuxInt(24, 8) || w != x2_1.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0, x1, x2)) {
+ break
+ }
+ v.reset(OpARM64MOVWstore)
+ v.AuxInt = int32ToAuxInt(i - 3)
+ v.Aux = symToAux(s)
+ v0 := b.NewValue0(x2.Pos, OpARM64REVW, w.Type)
+ v0.AddArg(w)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (MOVBstore [3] {s} p w x0:(MOVBstore [2] {s} p (UBFX [armBFAuxInt(8, 24)] w) x1:(MOVBstore [1] {s} p1:(ADD ptr1 idx1) (UBFX [armBFAuxInt(16, 16)] w) x2:(MOVBstoreidx ptr0 idx0 (UBFX [armBFAuxInt(24, 8)] w) mem))))
+ // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0, x1, x2)
+ // result: (MOVWstoreidx ptr0 idx0 (REVW <w.Type> w) mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 3 {
+ break
+ }
+ s := auxToSym(v.Aux)
+ p := v_0
+ w := v_1
+ x0 := v_2
+ if x0.Op != OpARM64MOVBstore || auxIntToInt32(x0.AuxInt) != 2 || auxToSym(x0.Aux) != s {
+ break
+ }
+ _ = x0.Args[2]
+ if p != x0.Args[0] {
+ break
+ }
+ x0_1 := x0.Args[1]
+ if x0_1.Op != OpARM64UBFX || auxIntToArm64BitField(x0_1.AuxInt) != armBFAuxInt(8, 24) || w != x0_1.Args[0] {
+ break
+ }
+ x1 := x0.Args[2]
+ if x1.Op != OpARM64MOVBstore || auxIntToInt32(x1.AuxInt) != 1 || auxToSym(x1.Aux) != s {
+ break
+ }
+ _ = x1.Args[2]
+ p1 := x1.Args[0]
+ if p1.Op != OpARM64ADD {
+ break
+ }
+ _ = p1.Args[1]
+ p1_0 := p1.Args[0]
+ p1_1 := p1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, p1_0, p1_1 = _i0+1, p1_1, p1_0 {
+ ptr1 := p1_0
+ idx1 := p1_1
+ x1_1 := x1.Args[1]
+ if x1_1.Op != OpARM64UBFX || auxIntToArm64BitField(x1_1.AuxInt) != armBFAuxInt(16, 16) || w != x1_1.Args[0] {
+ continue
+ }
+ x2 := x1.Args[2]
+ if x2.Op != OpARM64MOVBstoreidx {
+ continue
+ }
+ mem := x2.Args[3]
+ ptr0 := x2.Args[0]
+ idx0 := x2.Args[1]
+ x2_2 := x2.Args[2]
+ if x2_2.Op != OpARM64UBFX || auxIntToArm64BitField(x2_2.AuxInt) != armBFAuxInt(24, 8) || w != x2_2.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0, x1, x2)) {
+ continue
+ }
+ v.reset(OpARM64MOVWstoreidx)
+ v0 := b.NewValue0(x1.Pos, OpARM64REVW, w.Type)
+ v0.AddArg(w)
+ v.AddArg4(ptr0, idx0, v0, mem)
+ return true
+ }
+ break
+ }
+ // match: (MOVBstore [i] {s} ptr w x0:(MOVBstore [i-1] {s} ptr (SRLconst [8] (MOVDreg w)) x1:(MOVBstore [i-2] {s} ptr (SRLconst [16] (MOVDreg w)) x2:(MOVBstore [i-3] {s} ptr (SRLconst [24] (MOVDreg w)) mem))))
+ // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0, x1, x2)
+ // result: (MOVWstore [i-3] {s} ptr (REVW <w.Type> w) mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ ptr := v_0
+ w := v_1
+ x0 := v_2
+ if x0.Op != OpARM64MOVBstore || auxIntToInt32(x0.AuxInt) != i-1 || auxToSym(x0.Aux) != s {
+ break
+ }
+ _ = x0.Args[2]
+ if ptr != x0.Args[0] {
+ break
+ }
+ x0_1 := x0.Args[1]
+ if x0_1.Op != OpARM64SRLconst || auxIntToInt64(x0_1.AuxInt) != 8 {
+ break
+ }
+ x0_1_0 := x0_1.Args[0]
+ if x0_1_0.Op != OpARM64MOVDreg || w != x0_1_0.Args[0] {
+ break
+ }
+ x1 := x0.Args[2]
+ if x1.Op != OpARM64MOVBstore || auxIntToInt32(x1.AuxInt) != i-2 || auxToSym(x1.Aux) != s {
+ break
+ }
+ _ = x1.Args[2]
+ if ptr != x1.Args[0] {
+ break
+ }
+ x1_1 := x1.Args[1]
+ if x1_1.Op != OpARM64SRLconst || auxIntToInt64(x1_1.AuxInt) != 16 {
+ break
+ }
+ x1_1_0 := x1_1.Args[0]
+ if x1_1_0.Op != OpARM64MOVDreg || w != x1_1_0.Args[0] {
+ break
+ }
+ x2 := x1.Args[2]
+ if x2.Op != OpARM64MOVBstore || auxIntToInt32(x2.AuxInt) != i-3 || auxToSym(x2.Aux) != s {
+ break
+ }
+ mem := x2.Args[2]
+ if ptr != x2.Args[0] {
+ break
+ }
+ x2_1 := x2.Args[1]
+ if x2_1.Op != OpARM64SRLconst || auxIntToInt64(x2_1.AuxInt) != 24 {
+ break
+ }
+ x2_1_0 := x2_1.Args[0]
+ if x2_1_0.Op != OpARM64MOVDreg || w != x2_1_0.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0, x1, x2)) {
+ break
+ }
+ v.reset(OpARM64MOVWstore)
+ v.AuxInt = int32ToAuxInt(i - 3)
+ v.Aux = symToAux(s)
+ v0 := b.NewValue0(x2.Pos, OpARM64REVW, w.Type)
+ v0.AddArg(w)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (MOVBstore [3] {s} p w x0:(MOVBstore [2] {s} p (SRLconst [8] (MOVDreg w)) x1:(MOVBstore [1] {s} p1:(ADD ptr1 idx1) (SRLconst [16] (MOVDreg w)) x2:(MOVBstoreidx ptr0 idx0 (SRLconst [24] (MOVDreg w)) mem))))
+ // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0, x1, x2)
+ // result: (MOVWstoreidx ptr0 idx0 (REVW <w.Type> w) mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 3 {
+ break
+ }
+ s := auxToSym(v.Aux)
+ p := v_0
+ w := v_1
+ x0 := v_2
+ if x0.Op != OpARM64MOVBstore || auxIntToInt32(x0.AuxInt) != 2 || auxToSym(x0.Aux) != s {
+ break
+ }
+ _ = x0.Args[2]
+ if p != x0.Args[0] {
+ break
+ }
+ x0_1 := x0.Args[1]
+ if x0_1.Op != OpARM64SRLconst || auxIntToInt64(x0_1.AuxInt) != 8 {
+ break
+ }
+ x0_1_0 := x0_1.Args[0]
+ if x0_1_0.Op != OpARM64MOVDreg || w != x0_1_0.Args[0] {
+ break
+ }
+ x1 := x0.Args[2]
+ if x1.Op != OpARM64MOVBstore || auxIntToInt32(x1.AuxInt) != 1 || auxToSym(x1.Aux) != s {
+ break
+ }
+ _ = x1.Args[2]
+ p1 := x1.Args[0]
+ if p1.Op != OpARM64ADD {
+ break
+ }
+ _ = p1.Args[1]
+ p1_0 := p1.Args[0]
+ p1_1 := p1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, p1_0, p1_1 = _i0+1, p1_1, p1_0 {
+ ptr1 := p1_0
+ idx1 := p1_1
+ x1_1 := x1.Args[1]
+ if x1_1.Op != OpARM64SRLconst || auxIntToInt64(x1_1.AuxInt) != 16 {
+ continue
+ }
+ x1_1_0 := x1_1.Args[0]
+ if x1_1_0.Op != OpARM64MOVDreg || w != x1_1_0.Args[0] {
+ continue
+ }
+ x2 := x1.Args[2]
+ if x2.Op != OpARM64MOVBstoreidx {
+ continue
+ }
+ mem := x2.Args[3]
+ ptr0 := x2.Args[0]
+ idx0 := x2.Args[1]
+ x2_2 := x2.Args[2]
+ if x2_2.Op != OpARM64SRLconst || auxIntToInt64(x2_2.AuxInt) != 24 {
+ continue
+ }
+ x2_2_0 := x2_2.Args[0]
+ if x2_2_0.Op != OpARM64MOVDreg || w != x2_2_0.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0, x1, x2)) {
+ continue
+ }
+ v.reset(OpARM64MOVWstoreidx)
+ v0 := b.NewValue0(x1.Pos, OpARM64REVW, w.Type)
+ v0.AddArg(w)
+ v.AddArg4(ptr0, idx0, v0, mem)
+ return true
+ }
+ break
+ }
+ // match: (MOVBstore [i] {s} ptr w x0:(MOVBstore [i-1] {s} ptr (SRLconst [8] w) x1:(MOVBstore [i-2] {s} ptr (SRLconst [16] w) x2:(MOVBstore [i-3] {s} ptr (SRLconst [24] w) mem))))
+ // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0, x1, x2)
+ // result: (MOVWstore [i-3] {s} ptr (REVW <w.Type> w) mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ ptr := v_0
+ w := v_1
+ x0 := v_2
+ if x0.Op != OpARM64MOVBstore || auxIntToInt32(x0.AuxInt) != i-1 || auxToSym(x0.Aux) != s {
+ break
+ }
+ _ = x0.Args[2]
+ if ptr != x0.Args[0] {
+ break
+ }
+ x0_1 := x0.Args[1]
+ if x0_1.Op != OpARM64SRLconst || auxIntToInt64(x0_1.AuxInt) != 8 || w != x0_1.Args[0] {
+ break
+ }
+ x1 := x0.Args[2]
+ if x1.Op != OpARM64MOVBstore || auxIntToInt32(x1.AuxInt) != i-2 || auxToSym(x1.Aux) != s {
+ break
+ }
+ _ = x1.Args[2]
+ if ptr != x1.Args[0] {
+ break
+ }
+ x1_1 := x1.Args[1]
+ if x1_1.Op != OpARM64SRLconst || auxIntToInt64(x1_1.AuxInt) != 16 || w != x1_1.Args[0] {
+ break
+ }
+ x2 := x1.Args[2]
+ if x2.Op != OpARM64MOVBstore || auxIntToInt32(x2.AuxInt) != i-3 || auxToSym(x2.Aux) != s {
+ break
+ }
+ mem := x2.Args[2]
+ if ptr != x2.Args[0] {
+ break
+ }
+ x2_1 := x2.Args[1]
+ if x2_1.Op != OpARM64SRLconst || auxIntToInt64(x2_1.AuxInt) != 24 || w != x2_1.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0, x1, x2)) {
+ break
+ }
+ v.reset(OpARM64MOVWstore)
+ v.AuxInt = int32ToAuxInt(i - 3)
+ v.Aux = symToAux(s)
+ v0 := b.NewValue0(x2.Pos, OpARM64REVW, w.Type)
+ v0.AddArg(w)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (MOVBstore [3] {s} p w x0:(MOVBstore [2] {s} p (SRLconst [8] w) x1:(MOVBstore [1] {s} p1:(ADD ptr1 idx1) (SRLconst [16] w) x2:(MOVBstoreidx ptr0 idx0 (SRLconst [24] w) mem))))
+ // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0, x1, x2)
+ // result: (MOVWstoreidx ptr0 idx0 (REVW <w.Type> w) mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 3 {
+ break
+ }
+ s := auxToSym(v.Aux)
+ p := v_0
+ w := v_1
+ x0 := v_2
+ if x0.Op != OpARM64MOVBstore || auxIntToInt32(x0.AuxInt) != 2 || auxToSym(x0.Aux) != s {
+ break
+ }
+ _ = x0.Args[2]
+ if p != x0.Args[0] {
+ break
+ }
+ x0_1 := x0.Args[1]
+ if x0_1.Op != OpARM64SRLconst || auxIntToInt64(x0_1.AuxInt) != 8 || w != x0_1.Args[0] {
+ break
+ }
+ x1 := x0.Args[2]
+ if x1.Op != OpARM64MOVBstore || auxIntToInt32(x1.AuxInt) != 1 || auxToSym(x1.Aux) != s {
+ break
+ }
+ _ = x1.Args[2]
+ p1 := x1.Args[0]
+ if p1.Op != OpARM64ADD {
+ break
+ }
+ _ = p1.Args[1]
+ p1_0 := p1.Args[0]
+ p1_1 := p1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, p1_0, p1_1 = _i0+1, p1_1, p1_0 {
+ ptr1 := p1_0
+ idx1 := p1_1
+ x1_1 := x1.Args[1]
+ if x1_1.Op != OpARM64SRLconst || auxIntToInt64(x1_1.AuxInt) != 16 || w != x1_1.Args[0] {
+ continue
+ }
+ x2 := x1.Args[2]
+ if x2.Op != OpARM64MOVBstoreidx {
+ continue
+ }
+ mem := x2.Args[3]
+ ptr0 := x2.Args[0]
+ idx0 := x2.Args[1]
+ x2_2 := x2.Args[2]
+ if x2_2.Op != OpARM64SRLconst || auxIntToInt64(x2_2.AuxInt) != 24 || w != x2_2.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0, x1, x2)) {
+ continue
+ }
+ v.reset(OpARM64MOVWstoreidx)
+ v0 := b.NewValue0(x1.Pos, OpARM64REVW, w.Type)
+ v0.AddArg(w)
+ v.AddArg4(ptr0, idx0, v0, mem)
+ return true
+ }
+ break
+ }
+ // match: (MOVBstore [i] {s} ptr w x:(MOVBstore [i-1] {s} ptr (SRLconst [8] w) mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVHstore [i-1] {s} ptr (REV16W <w.Type> w) mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ ptr := v_0
+ w := v_1
+ x := v_2
+ if x.Op != OpARM64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ if ptr != x.Args[0] {
+ break
+ }
+ x_1 := x.Args[1]
+ if x_1.Op != OpARM64SRLconst || auxIntToInt64(x_1.AuxInt) != 8 || w != x_1.Args[0] || !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpARM64MOVHstore)
+ v.AuxInt = int32ToAuxInt(i - 1)
+ v.Aux = symToAux(s)
+ v0 := b.NewValue0(x.Pos, OpARM64REV16W, w.Type)
+ v0.AddArg(w)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (MOVBstore [1] {s} (ADD ptr1 idx1) w x:(MOVBstoreidx ptr0 idx0 (SRLconst [8] w) mem))
+ // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
+ // result: (MOVHstoreidx ptr0 idx0 (REV16W <w.Type> w) mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 1 {
+ break
+ }
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ ptr1 := v_0_0
+ idx1 := v_0_1
+ w := v_1
+ x := v_2
+ if x.Op != OpARM64MOVBstoreidx {
+ continue
+ }
+ mem := x.Args[3]
+ ptr0 := x.Args[0]
+ idx0 := x.Args[1]
+ x_2 := x.Args[2]
+ if x_2.Op != OpARM64SRLconst || auxIntToInt64(x_2.AuxInt) != 8 || w != x_2.Args[0] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) {
+ continue
+ }
+ v.reset(OpARM64MOVHstoreidx)
+ v0 := b.NewValue0(v.Pos, OpARM64REV16W, w.Type)
+ v0.AddArg(w)
+ v.AddArg4(ptr0, idx0, v0, mem)
+ return true
+ }
+ break
+ }
+ // match: (MOVBstore [i] {s} ptr w x:(MOVBstore [i-1] {s} ptr (UBFX [armBFAuxInt(8, 8)] w) mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVHstore [i-1] {s} ptr (REV16W <w.Type> w) mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ ptr := v_0
+ w := v_1
+ x := v_2
+ if x.Op != OpARM64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ if ptr != x.Args[0] {
+ break
+ }
+ x_1 := x.Args[1]
+ if x_1.Op != OpARM64UBFX || auxIntToArm64BitField(x_1.AuxInt) != armBFAuxInt(8, 8) || w != x_1.Args[0] || !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpARM64MOVHstore)
+ v.AuxInt = int32ToAuxInt(i - 1)
+ v.Aux = symToAux(s)
+ v0 := b.NewValue0(x.Pos, OpARM64REV16W, w.Type)
+ v0.AddArg(w)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (MOVBstore [1] {s} (ADD ptr1 idx1) w x:(MOVBstoreidx ptr0 idx0 (UBFX [armBFAuxInt(8, 8)] w) mem))
+ // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
+ // result: (MOVHstoreidx ptr0 idx0 (REV16W <w.Type> w) mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 1 {
+ break
+ }
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ ptr1 := v_0_0
+ idx1 := v_0_1
+ w := v_1
+ x := v_2
+ if x.Op != OpARM64MOVBstoreidx {
+ continue
+ }
+ mem := x.Args[3]
+ ptr0 := x.Args[0]
+ idx0 := x.Args[1]
+ x_2 := x.Args[2]
+ if x_2.Op != OpARM64UBFX || auxIntToArm64BitField(x_2.AuxInt) != armBFAuxInt(8, 8) || w != x_2.Args[0] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) {
+ continue
+ }
+ v.reset(OpARM64MOVHstoreidx)
+ v0 := b.NewValue0(v.Pos, OpARM64REV16W, w.Type)
+ v0.AddArg(w)
+ v.AddArg4(ptr0, idx0, v0, mem)
+ return true
+ }
+ break
+ }
+ // match: (MOVBstore [i] {s} ptr w x:(MOVBstore [i-1] {s} ptr (SRLconst [8] (MOVDreg w)) mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVHstore [i-1] {s} ptr (REV16W <w.Type> w) mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ ptr := v_0
+ w := v_1
+ x := v_2
+ if x.Op != OpARM64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ if ptr != x.Args[0] {
+ break
+ }
+ x_1 := x.Args[1]
+ if x_1.Op != OpARM64SRLconst || auxIntToInt64(x_1.AuxInt) != 8 {
+ break
+ }
+ x_1_0 := x_1.Args[0]
+ if x_1_0.Op != OpARM64MOVDreg || w != x_1_0.Args[0] || !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpARM64MOVHstore)
+ v.AuxInt = int32ToAuxInt(i - 1)
+ v.Aux = symToAux(s)
+ v0 := b.NewValue0(x.Pos, OpARM64REV16W, w.Type)
+ v0.AddArg(w)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (MOVBstore [1] {s} (ADD ptr1 idx1) w x:(MOVBstoreidx ptr0 idx0 (SRLconst [8] (MOVDreg w)) mem))
+ // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
+ // result: (MOVHstoreidx ptr0 idx0 (REV16W <w.Type> w) mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 1 {
+ break
+ }
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ ptr1 := v_0_0
+ idx1 := v_0_1
+ w := v_1
+ x := v_2
+ if x.Op != OpARM64MOVBstoreidx {
+ continue
+ }
+ mem := x.Args[3]
+ ptr0 := x.Args[0]
+ idx0 := x.Args[1]
+ x_2 := x.Args[2]
+ if x_2.Op != OpARM64SRLconst || auxIntToInt64(x_2.AuxInt) != 8 {
+ continue
+ }
+ x_2_0 := x_2.Args[0]
+ if x_2_0.Op != OpARM64MOVDreg || w != x_2_0.Args[0] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) {
+ continue
+ }
+ v.reset(OpARM64MOVHstoreidx)
+ v0 := b.NewValue0(v.Pos, OpARM64REV16W, w.Type)
+ v0.AddArg(w)
+ v.AddArg4(ptr0, idx0, v0, mem)
+ return true
+ }
+ break
+ }
+ // match: (MOVBstore [i] {s} ptr w x:(MOVBstore [i-1] {s} ptr (UBFX [armBFAuxInt(8, 24)] w) mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVHstore [i-1] {s} ptr (REV16W <w.Type> w) mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ ptr := v_0
+ w := v_1
+ x := v_2
+ if x.Op != OpARM64MOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ if ptr != x.Args[0] {
+ break
+ }
+ x_1 := x.Args[1]
+ if x_1.Op != OpARM64UBFX || auxIntToArm64BitField(x_1.AuxInt) != armBFAuxInt(8, 24) || w != x_1.Args[0] || !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpARM64MOVHstore)
+ v.AuxInt = int32ToAuxInt(i - 1)
+ v.Aux = symToAux(s)
+ v0 := b.NewValue0(x.Pos, OpARM64REV16W, w.Type)
+ v0.AddArg(w)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (MOVBstore [1] {s} (ADD ptr1 idx1) w x:(MOVBstoreidx ptr0 idx0 (UBFX [armBFAuxInt(8, 24)] w) mem))
+ // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
+ // result: (MOVHstoreidx ptr0 idx0 (REV16W <w.Type> w) mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 1 {
+ break
+ }
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ ptr1 := v_0_0
+ idx1 := v_0_1
+ w := v_1
+ x := v_2
+ if x.Op != OpARM64MOVBstoreidx {
+ continue
+ }
+ mem := x.Args[3]
+ ptr0 := x.Args[0]
+ idx0 := x.Args[1]
+ x_2 := x.Args[2]
+ if x_2.Op != OpARM64UBFX || auxIntToArm64BitField(x_2.AuxInt) != armBFAuxInt(8, 24) || w != x_2.Args[0] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) {
+ continue
+ }
+ v.reset(OpARM64MOVHstoreidx)
+ v0 := b.NewValue0(v.Pos, OpARM64REV16W, w.Type)
+ v0.AddArg(w)
+ v.AddArg4(ptr0, idx0, v0, mem)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVBstoreidx(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MOVBstoreidx ptr (MOVDconst [c]) val mem)
+ // cond: is32Bit(c)
+ // result: (MOVBstore [int32(c)] ptr val mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ val := v_2
+ mem := v_3
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVBstore)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVBstoreidx (MOVDconst [c]) idx val mem)
+ // cond: is32Bit(c)
+ // result: (MOVBstore [int32(c)] idx val mem)
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ idx := v_1
+ val := v_2
+ mem := v_3
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVBstore)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg3(idx, val, mem)
+ return true
+ }
+ // match: (MOVBstoreidx ptr idx (MOVDconst [0]) mem)
+ // result: (MOVBstorezeroidx ptr idx mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVDconst || auxIntToInt64(v_2.AuxInt) != 0 {
+ break
+ }
+ mem := v_3
+ v.reset(OpARM64MOVBstorezeroidx)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVBstoreidx ptr idx (MOVBreg x) mem)
+ // result: (MOVBstoreidx ptr idx x mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVBreg {
+ break
+ }
+ x := v_2.Args[0]
+ mem := v_3
+ v.reset(OpARM64MOVBstoreidx)
+ v.AddArg4(ptr, idx, x, mem)
+ return true
+ }
+ // match: (MOVBstoreidx ptr idx (MOVBUreg x) mem)
+ // result: (MOVBstoreidx ptr idx x mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVBUreg {
+ break
+ }
+ x := v_2.Args[0]
+ mem := v_3
+ v.reset(OpARM64MOVBstoreidx)
+ v.AddArg4(ptr, idx, x, mem)
+ return true
+ }
+ // match: (MOVBstoreidx ptr idx (MOVHreg x) mem)
+ // result: (MOVBstoreidx ptr idx x mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVHreg {
+ break
+ }
+ x := v_2.Args[0]
+ mem := v_3
+ v.reset(OpARM64MOVBstoreidx)
+ v.AddArg4(ptr, idx, x, mem)
+ return true
+ }
+ // match: (MOVBstoreidx ptr idx (MOVHUreg x) mem)
+ // result: (MOVBstoreidx ptr idx x mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVHUreg {
+ break
+ }
+ x := v_2.Args[0]
+ mem := v_3
+ v.reset(OpARM64MOVBstoreidx)
+ v.AddArg4(ptr, idx, x, mem)
+ return true
+ }
+ // match: (MOVBstoreidx ptr idx (MOVWreg x) mem)
+ // result: (MOVBstoreidx ptr idx x mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVWreg {
+ break
+ }
+ x := v_2.Args[0]
+ mem := v_3
+ v.reset(OpARM64MOVBstoreidx)
+ v.AddArg4(ptr, idx, x, mem)
+ return true
+ }
+ // match: (MOVBstoreidx ptr idx (MOVWUreg x) mem)
+ // result: (MOVBstoreidx ptr idx x mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVWUreg {
+ break
+ }
+ x := v_2.Args[0]
+ mem := v_3
+ v.reset(OpARM64MOVBstoreidx)
+ v.AddArg4(ptr, idx, x, mem)
+ return true
+ }
+ // match: (MOVBstoreidx ptr (ADDconst [1] idx) (SRLconst [8] w) x:(MOVBstoreidx ptr idx w mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVHstoreidx ptr idx w mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64ADDconst || auxIntToInt64(v_1.AuxInt) != 1 {
+ break
+ }
+ idx := v_1.Args[0]
+ if v_2.Op != OpARM64SRLconst || auxIntToInt64(v_2.AuxInt) != 8 {
+ break
+ }
+ w := v_2.Args[0]
+ x := v_3
+ if x.Op != OpARM64MOVBstoreidx {
+ break
+ }
+ mem := x.Args[3]
+ if ptr != x.Args[0] || idx != x.Args[1] || w != x.Args[2] || !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpARM64MOVHstoreidx)
+ v.AddArg4(ptr, idx, w, mem)
+ return true
+ }
+ // match: (MOVBstoreidx ptr (ADDconst [3] idx) w x0:(MOVBstoreidx ptr (ADDconst [2] idx) (UBFX [armBFAuxInt(8, 24)] w) x1:(MOVBstoreidx ptr (ADDconst [1] idx) (UBFX [armBFAuxInt(16, 16)] w) x2:(MOVBstoreidx ptr idx (UBFX [armBFAuxInt(24, 8)] w) mem))))
+ // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0, x1, x2)
+ // result: (MOVWstoreidx ptr idx (REVW <w.Type> w) mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64ADDconst || auxIntToInt64(v_1.AuxInt) != 3 {
+ break
+ }
+ idx := v_1.Args[0]
+ w := v_2
+ x0 := v_3
+ if x0.Op != OpARM64MOVBstoreidx {
+ break
+ }
+ _ = x0.Args[3]
+ if ptr != x0.Args[0] {
+ break
+ }
+ x0_1 := x0.Args[1]
+ if x0_1.Op != OpARM64ADDconst || auxIntToInt64(x0_1.AuxInt) != 2 || idx != x0_1.Args[0] {
+ break
+ }
+ x0_2 := x0.Args[2]
+ if x0_2.Op != OpARM64UBFX || auxIntToArm64BitField(x0_2.AuxInt) != armBFAuxInt(8, 24) || w != x0_2.Args[0] {
+ break
+ }
+ x1 := x0.Args[3]
+ if x1.Op != OpARM64MOVBstoreidx {
+ break
+ }
+ _ = x1.Args[3]
+ if ptr != x1.Args[0] {
+ break
+ }
+ x1_1 := x1.Args[1]
+ if x1_1.Op != OpARM64ADDconst || auxIntToInt64(x1_1.AuxInt) != 1 || idx != x1_1.Args[0] {
+ break
+ }
+ x1_2 := x1.Args[2]
+ if x1_2.Op != OpARM64UBFX || auxIntToArm64BitField(x1_2.AuxInt) != armBFAuxInt(16, 16) || w != x1_2.Args[0] {
+ break
+ }
+ x2 := x1.Args[3]
+ if x2.Op != OpARM64MOVBstoreidx {
+ break
+ }
+ mem := x2.Args[3]
+ if ptr != x2.Args[0] || idx != x2.Args[1] {
+ break
+ }
+ x2_2 := x2.Args[2]
+ if x2_2.Op != OpARM64UBFX || auxIntToArm64BitField(x2_2.AuxInt) != armBFAuxInt(24, 8) || w != x2_2.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0, x1, x2)) {
+ break
+ }
+ v.reset(OpARM64MOVWstoreidx)
+ v0 := b.NewValue0(v.Pos, OpARM64REVW, w.Type)
+ v0.AddArg(w)
+ v.AddArg4(ptr, idx, v0, mem)
+ return true
+ }
+ // match: (MOVBstoreidx ptr idx w x0:(MOVBstoreidx ptr (ADDconst [1] idx) (UBFX [armBFAuxInt(8, 24)] w) x1:(MOVBstoreidx ptr (ADDconst [2] idx) (UBFX [armBFAuxInt(16, 16)] w) x2:(MOVBstoreidx ptr (ADDconst [3] idx) (UBFX [armBFAuxInt(24, 8)] w) mem))))
+ // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0, x1, x2)
+ // result: (MOVWstoreidx ptr idx w mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ w := v_2
+ x0 := v_3
+ if x0.Op != OpARM64MOVBstoreidx {
+ break
+ }
+ _ = x0.Args[3]
+ if ptr != x0.Args[0] {
+ break
+ }
+ x0_1 := x0.Args[1]
+ if x0_1.Op != OpARM64ADDconst || auxIntToInt64(x0_1.AuxInt) != 1 || idx != x0_1.Args[0] {
+ break
+ }
+ x0_2 := x0.Args[2]
+ if x0_2.Op != OpARM64UBFX || auxIntToArm64BitField(x0_2.AuxInt) != armBFAuxInt(8, 24) || w != x0_2.Args[0] {
+ break
+ }
+ x1 := x0.Args[3]
+ if x1.Op != OpARM64MOVBstoreidx {
+ break
+ }
+ _ = x1.Args[3]
+ if ptr != x1.Args[0] {
+ break
+ }
+ x1_1 := x1.Args[1]
+ if x1_1.Op != OpARM64ADDconst || auxIntToInt64(x1_1.AuxInt) != 2 || idx != x1_1.Args[0] {
+ break
+ }
+ x1_2 := x1.Args[2]
+ if x1_2.Op != OpARM64UBFX || auxIntToArm64BitField(x1_2.AuxInt) != armBFAuxInt(16, 16) || w != x1_2.Args[0] {
+ break
+ }
+ x2 := x1.Args[3]
+ if x2.Op != OpARM64MOVBstoreidx {
+ break
+ }
+ mem := x2.Args[3]
+ if ptr != x2.Args[0] {
+ break
+ }
+ x2_1 := x2.Args[1]
+ if x2_1.Op != OpARM64ADDconst || auxIntToInt64(x2_1.AuxInt) != 3 || idx != x2_1.Args[0] {
+ break
+ }
+ x2_2 := x2.Args[2]
+ if x2_2.Op != OpARM64UBFX || auxIntToArm64BitField(x2_2.AuxInt) != armBFAuxInt(24, 8) || w != x2_2.Args[0] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && clobber(x0, x1, x2)) {
+ break
+ }
+ v.reset(OpARM64MOVWstoreidx)
+ v.AddArg4(ptr, idx, w, mem)
+ return true
+ }
+ // match: (MOVBstoreidx ptr (ADDconst [1] idx) w x:(MOVBstoreidx ptr idx (UBFX [armBFAuxInt(8, 8)] w) mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVHstoreidx ptr idx (REV16W <w.Type> w) mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64ADDconst || auxIntToInt64(v_1.AuxInt) != 1 {
+ break
+ }
+ idx := v_1.Args[0]
+ w := v_2
+ x := v_3
+ if x.Op != OpARM64MOVBstoreidx {
+ break
+ }
+ mem := x.Args[3]
+ if ptr != x.Args[0] || idx != x.Args[1] {
+ break
+ }
+ x_2 := x.Args[2]
+ if x_2.Op != OpARM64UBFX || auxIntToArm64BitField(x_2.AuxInt) != armBFAuxInt(8, 8) || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpARM64MOVHstoreidx)
+ v0 := b.NewValue0(v.Pos, OpARM64REV16W, w.Type)
+ v0.AddArg(w)
+ v.AddArg4(ptr, idx, v0, mem)
+ return true
+ }
+ // match: (MOVBstoreidx ptr idx w x:(MOVBstoreidx ptr (ADDconst [1] idx) (UBFX [armBFAuxInt(8, 8)] w) mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVHstoreidx ptr idx w mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ w := v_2
+ x := v_3
+ if x.Op != OpARM64MOVBstoreidx {
+ break
+ }
+ mem := x.Args[3]
+ if ptr != x.Args[0] {
+ break
+ }
+ x_1 := x.Args[1]
+ if x_1.Op != OpARM64ADDconst || auxIntToInt64(x_1.AuxInt) != 1 || idx != x_1.Args[0] {
+ break
+ }
+ x_2 := x.Args[2]
+ if x_2.Op != OpARM64UBFX || auxIntToArm64BitField(x_2.AuxInt) != armBFAuxInt(8, 8) || w != x_2.Args[0] || !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpARM64MOVHstoreidx)
+ v.AddArg4(ptr, idx, w, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVBstorezero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVBstorezero [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVBstorezero [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpARM64MOVBstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpARM64MOVBstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBstorezero [off] {sym} (ADD ptr idx) mem)
+ // cond: off == 0 && sym == nil
+ // result: (MOVBstorezeroidx ptr idx mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(off == 0 && sym == nil) {
+ break
+ }
+ v.reset(OpARM64MOVBstorezeroidx)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVBstorezero [i] {s} ptr0 x:(MOVBstorezero [j] {s} ptr1 mem))
+ // cond: x.Uses == 1 && areAdjacentOffsets(int64(i),int64(j),1) && isSamePtr(ptr0, ptr1) && clobber(x)
+ // result: (MOVHstorezero [int32(min(int64(i),int64(j)))] {s} ptr0 mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ ptr0 := v_0
+ x := v_1
+ if x.Op != OpARM64MOVBstorezero {
+ break
+ }
+ j := auxIntToInt32(x.AuxInt)
+ if auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[1]
+ ptr1 := x.Args[0]
+ if !(x.Uses == 1 && areAdjacentOffsets(int64(i), int64(j), 1) && isSamePtr(ptr0, ptr1) && clobber(x)) {
+ break
+ }
+ v.reset(OpARM64MOVHstorezero)
+ v.AuxInt = int32ToAuxInt(int32(min(int64(i), int64(j))))
+ v.Aux = symToAux(s)
+ v.AddArg2(ptr0, mem)
+ return true
+ }
+ // match: (MOVBstorezero [1] {s} (ADD ptr0 idx0) x:(MOVBstorezeroidx ptr1 idx1 mem))
+ // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
+ // result: (MOVHstorezeroidx ptr1 idx1 mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 1 {
+ break
+ }
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ ptr0 := v_0_0
+ idx0 := v_0_1
+ x := v_1
+ if x.Op != OpARM64MOVBstorezeroidx {
+ continue
+ }
+ mem := x.Args[2]
+ ptr1 := x.Args[0]
+ idx1 := x.Args[1]
+ if !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) {
+ continue
+ }
+ v.reset(OpARM64MOVHstorezeroidx)
+ v.AddArg3(ptr1, idx1, mem)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVBstorezeroidx(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVBstorezeroidx ptr (MOVDconst [c]) mem)
+ // cond: is32Bit(c)
+ // result: (MOVBstorezero [int32(c)] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVBstorezero)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBstorezeroidx (MOVDconst [c]) idx mem)
+ // cond: is32Bit(c)
+ // result: (MOVBstorezero [int32(c)] idx mem)
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ idx := v_1
+ mem := v_2
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVBstorezero)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(idx, mem)
+ return true
+ }
+ // match: (MOVBstorezeroidx ptr (ADDconst [1] idx) x:(MOVBstorezeroidx ptr idx mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVHstorezeroidx ptr idx mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64ADDconst || auxIntToInt64(v_1.AuxInt) != 1 {
+ break
+ }
+ idx := v_1.Args[0]
+ x := v_2
+ if x.Op != OpARM64MOVBstorezeroidx {
+ break
+ }
+ mem := x.Args[2]
+ if ptr != x.Args[0] || idx != x.Args[1] || !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpARM64MOVHstorezeroidx)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVDload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVDload [off] {sym} ptr (FMOVDstore [off] {sym} ptr val _))
+ // result: (FMOVDfpgp val)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARM64FMOVDstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
+ break
+ }
+ val := v_1.Args[1]
+ if ptr != v_1.Args[0] {
+ break
+ }
+ v.reset(OpARM64FMOVDfpgp)
+ v.AddArg(val)
+ return true
+ }
+ // match: (MOVDload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVDload [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpARM64MOVDload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVDload [off] {sym} (ADD ptr idx) mem)
+ // cond: off == 0 && sym == nil
+ // result: (MOVDloadidx ptr idx mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(off == 0 && sym == nil) {
+ break
+ }
+ v.reset(OpARM64MOVDloadidx)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVDload [off] {sym} (ADDshiftLL [3] ptr idx) mem)
+ // cond: off == 0 && sym == nil
+ // result: (MOVDloadidx8 ptr idx mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 3 {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(off == 0 && sym == nil) {
+ break
+ }
+ v.reset(OpARM64MOVDloadidx8)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpARM64MOVDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVDload [off] {sym} ptr (MOVDstorezero [off2] {sym2} ptr2 _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: (MOVDconst [0])
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDstorezero {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (MOVDload [off] {sym} (SB) _)
+ // cond: symIsRO(sym)
+ // result: (MOVDconst [int64(read64(sym, int64(off), config.ctxt.Arch.ByteOrder))])
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpSB || !(symIsRO(sym)) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(read64(sym, int64(off), config.ctxt.Arch.ByteOrder)))
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVDloadidx(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVDloadidx ptr (MOVDconst [c]) mem)
+ // cond: is32Bit(c)
+ // result: (MOVDload [int32(c)] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVDload)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVDloadidx (MOVDconst [c]) ptr mem)
+ // cond: is32Bit(c)
+ // result: (MOVDload [int32(c)] ptr mem)
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ ptr := v_1
+ mem := v_2
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVDload)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVDloadidx ptr (SLLconst [3] idx) mem)
+ // result: (MOVDloadidx8 ptr idx mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64SLLconst || auxIntToInt64(v_1.AuxInt) != 3 {
+ break
+ }
+ idx := v_1.Args[0]
+ mem := v_2
+ v.reset(OpARM64MOVDloadidx8)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVDloadidx (SLLconst [3] idx) ptr mem)
+ // result: (MOVDloadidx8 ptr idx mem)
+ for {
+ if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != 3 {
+ break
+ }
+ idx := v_0.Args[0]
+ ptr := v_1
+ mem := v_2
+ v.reset(OpARM64MOVDloadidx8)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVDloadidx ptr idx (MOVDstorezeroidx ptr2 idx2 _))
+ // cond: (isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2))
+ // result: (MOVDconst [0])
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVDstorezeroidx {
+ break
+ }
+ idx2 := v_2.Args[1]
+ ptr2 := v_2.Args[0]
+ if !(isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVDloadidx8(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVDloadidx8 ptr (MOVDconst [c]) mem)
+ // cond: is32Bit(c<<3)
+ // result: (MOVDload [int32(c)<<3] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(is32Bit(c << 3)) {
+ break
+ }
+ v.reset(OpARM64MOVDload)
+ v.AuxInt = int32ToAuxInt(int32(c) << 3)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVDloadidx8 ptr idx (MOVDstorezeroidx8 ptr2 idx2 _))
+ // cond: isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2)
+ // result: (MOVDconst [0])
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVDstorezeroidx8 {
+ break
+ }
+ idx2 := v_2.Args[1]
+ ptr2 := v_2.Args[0]
+ if !(isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2)) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVDnop(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MOVDnop (MOVDconst [c]))
+ // result: (MOVDconst [c])
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(c)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVDreg(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MOVDreg x)
+ // cond: x.Uses == 1
+ // result: (MOVDnop x)
+ for {
+ x := v_0
+ if !(x.Uses == 1) {
+ break
+ }
+ v.reset(OpARM64MOVDnop)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVDreg (MOVDconst [c]))
+ // result: (MOVDconst [c])
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(c)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVDstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVDstore [off] {sym} ptr (FMOVDfpgp val) mem)
+ // result: (FMOVDstore [off] {sym} ptr val mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARM64FMOVDfpgp {
+ break
+ }
+ val := v_1.Args[0]
+ mem := v_2
+ v.reset(OpARM64FMOVDstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVDstore [off1+int32(off2)] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpARM64MOVDstore)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVDstore [off] {sym} (ADD ptr idx) val mem)
+ // cond: off == 0 && sym == nil
+ // result: (MOVDstoreidx ptr idx val mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(off == 0 && sym == nil) {
+ break
+ }
+ v.reset(OpARM64MOVDstoreidx)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ // match: (MOVDstore [off] {sym} (ADDshiftLL [3] ptr idx) val mem)
+ // cond: off == 0 && sym == nil
+ // result: (MOVDstoreidx8 ptr idx val mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 3 {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(off == 0 && sym == nil) {
+ break
+ }
+ v.reset(OpARM64MOVDstoreidx8)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ // match: (MOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpARM64MOVDstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVDstore [off] {sym} ptr (MOVDconst [0]) mem)
+ // result: (MOVDstorezero [off] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ mem := v_2
+ v.reset(OpARM64MOVDstorezero)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVDstoreidx(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVDstoreidx ptr (MOVDconst [c]) val mem)
+ // cond: is32Bit(c)
+ // result: (MOVDstore [int32(c)] ptr val mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ val := v_2
+ mem := v_3
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVDstore)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVDstoreidx (MOVDconst [c]) idx val mem)
+ // cond: is32Bit(c)
+ // result: (MOVDstore [int32(c)] idx val mem)
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ idx := v_1
+ val := v_2
+ mem := v_3
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVDstore)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg3(idx, val, mem)
+ return true
+ }
+ // match: (MOVDstoreidx ptr (SLLconst [3] idx) val mem)
+ // result: (MOVDstoreidx8 ptr idx val mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64SLLconst || auxIntToInt64(v_1.AuxInt) != 3 {
+ break
+ }
+ idx := v_1.Args[0]
+ val := v_2
+ mem := v_3
+ v.reset(OpARM64MOVDstoreidx8)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ // match: (MOVDstoreidx (SLLconst [3] idx) ptr val mem)
+ // result: (MOVDstoreidx8 ptr idx val mem)
+ for {
+ if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != 3 {
+ break
+ }
+ idx := v_0.Args[0]
+ ptr := v_1
+ val := v_2
+ mem := v_3
+ v.reset(OpARM64MOVDstoreidx8)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ // match: (MOVDstoreidx ptr idx (MOVDconst [0]) mem)
+ // result: (MOVDstorezeroidx ptr idx mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVDconst || auxIntToInt64(v_2.AuxInt) != 0 {
+ break
+ }
+ mem := v_3
+ v.reset(OpARM64MOVDstorezeroidx)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVDstoreidx8(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVDstoreidx8 ptr (MOVDconst [c]) val mem)
+ // cond: is32Bit(c<<3)
+ // result: (MOVDstore [int32(c)<<3] ptr val mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ val := v_2
+ mem := v_3
+ if !(is32Bit(c << 3)) {
+ break
+ }
+ v.reset(OpARM64MOVDstore)
+ v.AuxInt = int32ToAuxInt(int32(c) << 3)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVDstoreidx8 ptr idx (MOVDconst [0]) mem)
+ // result: (MOVDstorezeroidx8 ptr idx mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVDconst || auxIntToInt64(v_2.AuxInt) != 0 {
+ break
+ }
+ mem := v_3
+ v.reset(OpARM64MOVDstorezeroidx8)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVDstorezero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVDstorezero [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVDstorezero [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpARM64MOVDstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVDstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVDstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpARM64MOVDstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVDstorezero [off] {sym} (ADD ptr idx) mem)
+ // cond: off == 0 && sym == nil
+ // result: (MOVDstorezeroidx ptr idx mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(off == 0 && sym == nil) {
+ break
+ }
+ v.reset(OpARM64MOVDstorezeroidx)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVDstorezero [off] {sym} (ADDshiftLL [3] ptr idx) mem)
+ // cond: off == 0 && sym == nil
+ // result: (MOVDstorezeroidx8 ptr idx mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 3 {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(off == 0 && sym == nil) {
+ break
+ }
+ v.reset(OpARM64MOVDstorezeroidx8)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVDstorezero [i] {s} ptr0 x:(MOVDstorezero [j] {s} ptr1 mem))
+ // cond: x.Uses == 1 && areAdjacentOffsets(int64(i),int64(j),8) && isSamePtr(ptr0, ptr1) && clobber(x)
+ // result: (MOVQstorezero [int32(min(int64(i),int64(j)))] {s} ptr0 mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ ptr0 := v_0
+ x := v_1
+ if x.Op != OpARM64MOVDstorezero {
+ break
+ }
+ j := auxIntToInt32(x.AuxInt)
+ if auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[1]
+ ptr1 := x.Args[0]
+ if !(x.Uses == 1 && areAdjacentOffsets(int64(i), int64(j), 8) && isSamePtr(ptr0, ptr1) && clobber(x)) {
+ break
+ }
+ v.reset(OpARM64MOVQstorezero)
+ v.AuxInt = int32ToAuxInt(int32(min(int64(i), int64(j))))
+ v.Aux = symToAux(s)
+ v.AddArg2(ptr0, mem)
+ return true
+ }
+ // match: (MOVDstorezero [8] {s} p0:(ADD ptr0 idx0) x:(MOVDstorezeroidx ptr1 idx1 mem))
+ // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
+ // result: (MOVQstorezero [0] {s} p0 mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 8 {
+ break
+ }
+ s := auxToSym(v.Aux)
+ p0 := v_0
+ if p0.Op != OpARM64ADD {
+ break
+ }
+ _ = p0.Args[1]
+ p0_0 := p0.Args[0]
+ p0_1 := p0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, p0_0, p0_1 = _i0+1, p0_1, p0_0 {
+ ptr0 := p0_0
+ idx0 := p0_1
+ x := v_1
+ if x.Op != OpARM64MOVDstorezeroidx {
+ continue
+ }
+ mem := x.Args[2]
+ ptr1 := x.Args[0]
+ idx1 := x.Args[1]
+ if !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) {
+ continue
+ }
+ v.reset(OpARM64MOVQstorezero)
+ v.AuxInt = int32ToAuxInt(0)
+ v.Aux = symToAux(s)
+ v.AddArg2(p0, mem)
+ return true
+ }
+ break
+ }
+ // match: (MOVDstorezero [8] {s} p0:(ADDshiftLL [3] ptr0 idx0) x:(MOVDstorezeroidx8 ptr1 idx1 mem))
+ // cond: x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x)
+ // result: (MOVQstorezero [0] {s} p0 mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 8 {
+ break
+ }
+ s := auxToSym(v.Aux)
+ p0 := v_0
+ if p0.Op != OpARM64ADDshiftLL || auxIntToInt64(p0.AuxInt) != 3 {
+ break
+ }
+ idx0 := p0.Args[1]
+ ptr0 := p0.Args[0]
+ x := v_1
+ if x.Op != OpARM64MOVDstorezeroidx8 {
+ break
+ }
+ mem := x.Args[2]
+ ptr1 := x.Args[0]
+ idx1 := x.Args[1]
+ if !(x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x)) {
+ break
+ }
+ v.reset(OpARM64MOVQstorezero)
+ v.AuxInt = int32ToAuxInt(0)
+ v.Aux = symToAux(s)
+ v.AddArg2(p0, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVDstorezeroidx(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVDstorezeroidx ptr (MOVDconst [c]) mem)
+ // cond: is32Bit(c)
+ // result: (MOVDstorezero [int32(c)] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVDstorezero)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVDstorezeroidx (MOVDconst [c]) idx mem)
+ // cond: is32Bit(c)
+ // result: (MOVDstorezero [int32(c)] idx mem)
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ idx := v_1
+ mem := v_2
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVDstorezero)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(idx, mem)
+ return true
+ }
+ // match: (MOVDstorezeroidx ptr (SLLconst [3] idx) mem)
+ // result: (MOVDstorezeroidx8 ptr idx mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64SLLconst || auxIntToInt64(v_1.AuxInt) != 3 {
+ break
+ }
+ idx := v_1.Args[0]
+ mem := v_2
+ v.reset(OpARM64MOVDstorezeroidx8)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVDstorezeroidx (SLLconst [3] idx) ptr mem)
+ // result: (MOVDstorezeroidx8 ptr idx mem)
+ for {
+ if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != 3 {
+ break
+ }
+ idx := v_0.Args[0]
+ ptr := v_1
+ mem := v_2
+ v.reset(OpARM64MOVDstorezeroidx8)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVDstorezeroidx8(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVDstorezeroidx8 ptr (MOVDconst [c]) mem)
+ // cond: is32Bit(c<<3)
+ // result: (MOVDstorezero [int32(c<<3)] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(is32Bit(c << 3)) {
+ break
+ }
+ v.reset(OpARM64MOVDstorezero)
+ v.AuxInt = int32ToAuxInt(int32(c << 3))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVHUload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVHUload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVHUload [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpARM64MOVHUload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHUload [off] {sym} (ADD ptr idx) mem)
+ // cond: off == 0 && sym == nil
+ // result: (MOVHUloadidx ptr idx mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(off == 0 && sym == nil) {
+ break
+ }
+ v.reset(OpARM64MOVHUloadidx)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVHUload [off] {sym} (ADDshiftLL [1] ptr idx) mem)
+ // cond: off == 0 && sym == nil
+ // result: (MOVHUloadidx2 ptr idx mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 1 {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(off == 0 && sym == nil) {
+ break
+ }
+ v.reset(OpARM64MOVHUloadidx2)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVHUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVHUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpARM64MOVHUload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHUload [off] {sym} ptr (MOVHstorezero [off2] {sym2} ptr2 _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: (MOVDconst [0])
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARM64MOVHstorezero {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (MOVHUload [off] {sym} (SB) _)
+ // cond: symIsRO(sym)
+ // result: (MOVDconst [int64(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))])
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpSB || !(symIsRO(sym)) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(read16(sym, int64(off), config.ctxt.Arch.ByteOrder)))
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVHUloadidx(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHUloadidx ptr (MOVDconst [c]) mem)
+ // cond: is32Bit(c)
+ // result: (MOVHUload [int32(c)] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVHUload)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHUloadidx (MOVDconst [c]) ptr mem)
+ // cond: is32Bit(c)
+ // result: (MOVHUload [int32(c)] ptr mem)
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ ptr := v_1
+ mem := v_2
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVHUload)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHUloadidx ptr (SLLconst [1] idx) mem)
+ // result: (MOVHUloadidx2 ptr idx mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64SLLconst || auxIntToInt64(v_1.AuxInt) != 1 {
+ break
+ }
+ idx := v_1.Args[0]
+ mem := v_2
+ v.reset(OpARM64MOVHUloadidx2)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVHUloadidx ptr (ADD idx idx) mem)
+ // result: (MOVHUloadidx2 ptr idx mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64ADD {
+ break
+ }
+ idx := v_1.Args[1]
+ if idx != v_1.Args[0] {
+ break
+ }
+ mem := v_2
+ v.reset(OpARM64MOVHUloadidx2)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVHUloadidx (ADD idx idx) ptr mem)
+ // result: (MOVHUloadidx2 ptr idx mem)
+ for {
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ idx := v_0.Args[1]
+ if idx != v_0.Args[0] {
+ break
+ }
+ ptr := v_1
+ mem := v_2
+ v.reset(OpARM64MOVHUloadidx2)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVHUloadidx ptr idx (MOVHstorezeroidx ptr2 idx2 _))
+ // cond: (isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2))
+ // result: (MOVDconst [0])
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVHstorezeroidx {
+ break
+ }
+ idx2 := v_2.Args[1]
+ ptr2 := v_2.Args[0]
+ if !(isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVHUloadidx2(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHUloadidx2 ptr (MOVDconst [c]) mem)
+ // cond: is32Bit(c<<1)
+ // result: (MOVHUload [int32(c)<<1] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(is32Bit(c << 1)) {
+ break
+ }
+ v.reset(OpARM64MOVHUload)
+ v.AuxInt = int32ToAuxInt(int32(c) << 1)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHUloadidx2 ptr idx (MOVHstorezeroidx2 ptr2 idx2 _))
+ // cond: isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2)
+ // result: (MOVDconst [0])
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVHstorezeroidx2 {
+ break
+ }
+ idx2 := v_2.Args[1]
+ ptr2 := v_2.Args[0]
+ if !(isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2)) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVHUreg(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MOVHUreg x:(MOVBUload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVBUload {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUreg x:(MOVHUload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVHUload {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUreg x:(MOVBUloadidx _ _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVBUloadidx {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUreg x:(MOVHUloadidx _ _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVHUloadidx {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUreg x:(MOVHUloadidx2 _ _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVHUloadidx2 {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUreg x:(MOVBUreg _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVBUreg {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUreg x:(MOVHUreg _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVHUreg {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUreg (ANDconst [c] x))
+ // result: (ANDconst [c&(1<<16-1)] x)
+ for {
+ if v_0.Op != OpARM64ANDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpARM64ANDconst)
+ v.AuxInt = int64ToAuxInt(c & (1<<16 - 1))
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUreg (MOVDconst [c]))
+ // result: (MOVDconst [int64(uint16(c))])
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(uint16(c)))
+ return true
+ }
+ // match: (MOVHUreg (SLLconst [lc] x))
+ // cond: lc >= 16
+ // result: (MOVDconst [0])
+ for {
+ if v_0.Op != OpARM64SLLconst {
+ break
+ }
+ lc := auxIntToInt64(v_0.AuxInt)
+ if !(lc >= 16) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (MOVHUreg (SLLconst [lc] x))
+ // cond: lc < 16
+ // result: (UBFIZ [armBFAuxInt(lc, 16-lc)] x)
+ for {
+ if v_0.Op != OpARM64SLLconst {
+ break
+ }
+ lc := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(lc < 16) {
+ break
+ }
+ v.reset(OpARM64UBFIZ)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(lc, 16-lc))
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUreg (SRLconst [rc] x))
+ // cond: rc < 16
+ // result: (UBFX [armBFAuxInt(rc, 16)] x)
+ for {
+ if v_0.Op != OpARM64SRLconst {
+ break
+ }
+ rc := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(rc < 16) {
+ break
+ }
+ v.reset(OpARM64UBFX)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(rc, 16))
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUreg (UBFX [bfc] x))
+ // cond: bfc.getARM64BFwidth() <= 16
+ // result: (UBFX [bfc] x)
+ for {
+ if v_0.Op != OpARM64UBFX {
+ break
+ }
+ bfc := auxIntToArm64BitField(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(bfc.getARM64BFwidth() <= 16) {
+ break
+ }
+ v.reset(OpARM64UBFX)
+ v.AuxInt = arm64BitFieldToAuxInt(bfc)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVHload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVHload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVHload [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpARM64MOVHload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHload [off] {sym} (ADD ptr idx) mem)
+ // cond: off == 0 && sym == nil
+ // result: (MOVHloadidx ptr idx mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(off == 0 && sym == nil) {
+ break
+ }
+ v.reset(OpARM64MOVHloadidx)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVHload [off] {sym} (ADDshiftLL [1] ptr idx) mem)
+ // cond: off == 0 && sym == nil
+ // result: (MOVHloadidx2 ptr idx mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 1 {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(off == 0 && sym == nil) {
+ break
+ }
+ v.reset(OpARM64MOVHloadidx2)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVHload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpARM64MOVHload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHload [off] {sym} ptr (MOVHstorezero [off2] {sym2} ptr2 _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: (MOVDconst [0])
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARM64MOVHstorezero {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVHloadidx(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHloadidx ptr (MOVDconst [c]) mem)
+ // cond: is32Bit(c)
+ // result: (MOVHload [int32(c)] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVHload)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHloadidx (MOVDconst [c]) ptr mem)
+ // cond: is32Bit(c)
+ // result: (MOVHload [int32(c)] ptr mem)
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ ptr := v_1
+ mem := v_2
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVHload)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHloadidx ptr (SLLconst [1] idx) mem)
+ // result: (MOVHloadidx2 ptr idx mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64SLLconst || auxIntToInt64(v_1.AuxInt) != 1 {
+ break
+ }
+ idx := v_1.Args[0]
+ mem := v_2
+ v.reset(OpARM64MOVHloadidx2)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVHloadidx ptr (ADD idx idx) mem)
+ // result: (MOVHloadidx2 ptr idx mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64ADD {
+ break
+ }
+ idx := v_1.Args[1]
+ if idx != v_1.Args[0] {
+ break
+ }
+ mem := v_2
+ v.reset(OpARM64MOVHloadidx2)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVHloadidx (ADD idx idx) ptr mem)
+ // result: (MOVHloadidx2 ptr idx mem)
+ for {
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ idx := v_0.Args[1]
+ if idx != v_0.Args[0] {
+ break
+ }
+ ptr := v_1
+ mem := v_2
+ v.reset(OpARM64MOVHloadidx2)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVHloadidx ptr idx (MOVHstorezeroidx ptr2 idx2 _))
+ // cond: (isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2))
+ // result: (MOVDconst [0])
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVHstorezeroidx {
+ break
+ }
+ idx2 := v_2.Args[1]
+ ptr2 := v_2.Args[0]
+ if !(isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVHloadidx2(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHloadidx2 ptr (MOVDconst [c]) mem)
+ // cond: is32Bit(c<<1)
+ // result: (MOVHload [int32(c)<<1] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(is32Bit(c << 1)) {
+ break
+ }
+ v.reset(OpARM64MOVHload)
+ v.AuxInt = int32ToAuxInt(int32(c) << 1)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHloadidx2 ptr idx (MOVHstorezeroidx2 ptr2 idx2 _))
+ // cond: isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2)
+ // result: (MOVDconst [0])
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVHstorezeroidx2 {
+ break
+ }
+ idx2 := v_2.Args[1]
+ ptr2 := v_2.Args[0]
+ if !(isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2)) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVHreg(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MOVHreg x:(MOVBload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVBload {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVBUload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVBUload {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVHload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVHload {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVBloadidx _ _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVBloadidx {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVBUloadidx _ _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVBUloadidx {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVHloadidx _ _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVHloadidx {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVHloadidx2 _ _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVHloadidx2 {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVBreg _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVBreg {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVBUreg _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVBUreg {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVHreg _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVHreg {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg (MOVDconst [c]))
+ // result: (MOVDconst [int64(int16(c))])
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(int16(c)))
+ return true
+ }
+ // match: (MOVHreg (SLLconst [lc] x))
+ // cond: lc < 16
+ // result: (SBFIZ [armBFAuxInt(lc, 16-lc)] x)
+ for {
+ if v_0.Op != OpARM64SLLconst {
+ break
+ }
+ lc := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(lc < 16) {
+ break
+ }
+ v.reset(OpARM64SBFIZ)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(lc, 16-lc))
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg (SBFX [bfc] x))
+ // cond: bfc.getARM64BFwidth() <= 16
+ // result: (SBFX [bfc] x)
+ for {
+ if v_0.Op != OpARM64SBFX {
+ break
+ }
+ bfc := auxIntToArm64BitField(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(bfc.getARM64BFwidth() <= 16) {
+ break
+ }
+ v.reset(OpARM64SBFX)
+ v.AuxInt = arm64BitFieldToAuxInt(bfc)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVHstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVHstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVHstore [off1+int32(off2)] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpARM64MOVHstore)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} (ADD ptr idx) val mem)
+ // cond: off == 0 && sym == nil
+ // result: (MOVHstoreidx ptr idx val mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(off == 0 && sym == nil) {
+ break
+ }
+ v.reset(OpARM64MOVHstoreidx)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} (ADDshiftLL [1] ptr idx) val mem)
+ // cond: off == 0 && sym == nil
+ // result: (MOVHstoreidx2 ptr idx val mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 1 {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(off == 0 && sym == nil) {
+ break
+ }
+ v.reset(OpARM64MOVHstoreidx2)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ // match: (MOVHstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpARM64MOVHstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (MOVDconst [0]) mem)
+ // result: (MOVHstorezero [off] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ mem := v_2
+ v.reset(OpARM64MOVHstorezero)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (MOVHreg x) mem)
+ // result: (MOVHstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARM64MOVHreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpARM64MOVHstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (MOVHUreg x) mem)
+ // result: (MOVHstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARM64MOVHUreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpARM64MOVHstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (MOVWreg x) mem)
+ // result: (MOVHstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARM64MOVWreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpARM64MOVHstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (MOVWUreg x) mem)
+ // result: (MOVHstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARM64MOVWUreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpARM64MOVHstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVHstore [i] {s} ptr0 (SRLconst [16] w) x:(MOVHstore [i-2] {s} ptr1 w mem))
+ // cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)
+ // result: (MOVWstore [i-2] {s} ptr0 w mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ ptr0 := v_0
+ if v_1.Op != OpARM64SRLconst || auxIntToInt64(v_1.AuxInt) != 16 {
+ break
+ }
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpARM64MOVHstore || auxIntToInt32(x.AuxInt) != i-2 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ ptr1 := x.Args[0]
+ if w != x.Args[1] || !(x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)) {
+ break
+ }
+ v.reset(OpARM64MOVWstore)
+ v.AuxInt = int32ToAuxInt(i - 2)
+ v.Aux = symToAux(s)
+ v.AddArg3(ptr0, w, mem)
+ return true
+ }
+ // match: (MOVHstore [2] {s} (ADD ptr0 idx0) (SRLconst [16] w) x:(MOVHstoreidx ptr1 idx1 w mem))
+ // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
+ // result: (MOVWstoreidx ptr1 idx1 w mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 2 {
+ break
+ }
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ ptr0 := v_0_0
+ idx0 := v_0_1
+ if v_1.Op != OpARM64SRLconst || auxIntToInt64(v_1.AuxInt) != 16 {
+ continue
+ }
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpARM64MOVHstoreidx {
+ continue
+ }
+ mem := x.Args[3]
+ ptr1 := x.Args[0]
+ idx1 := x.Args[1]
+ if w != x.Args[2] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) {
+ continue
+ }
+ v.reset(OpARM64MOVWstoreidx)
+ v.AddArg4(ptr1, idx1, w, mem)
+ return true
+ }
+ break
+ }
+ // match: (MOVHstore [2] {s} (ADDshiftLL [1] ptr0 idx0) (SRLconst [16] w) x:(MOVHstoreidx2 ptr1 idx1 w mem))
+ // cond: x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x)
+ // result: (MOVWstoreidx ptr1 (SLLconst <idx1.Type> [1] idx1) w mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 2 {
+ break
+ }
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 1 {
+ break
+ }
+ idx0 := v_0.Args[1]
+ ptr0 := v_0.Args[0]
+ if v_1.Op != OpARM64SRLconst || auxIntToInt64(v_1.AuxInt) != 16 {
+ break
+ }
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpARM64MOVHstoreidx2 {
+ break
+ }
+ mem := x.Args[3]
+ ptr1 := x.Args[0]
+ idx1 := x.Args[1]
+ if w != x.Args[2] || !(x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x)) {
+ break
+ }
+ v.reset(OpARM64MOVWstoreidx)
+ v0 := b.NewValue0(v.Pos, OpARM64SLLconst, idx1.Type)
+ v0.AuxInt = int64ToAuxInt(1)
+ v0.AddArg(idx1)
+ v.AddArg4(ptr1, v0, w, mem)
+ return true
+ }
+ // match: (MOVHstore [i] {s} ptr0 (UBFX [armBFAuxInt(16, 16)] w) x:(MOVHstore [i-2] {s} ptr1 w mem))
+ // cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)
+ // result: (MOVWstore [i-2] {s} ptr0 w mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ ptr0 := v_0
+ if v_1.Op != OpARM64UBFX || auxIntToArm64BitField(v_1.AuxInt) != armBFAuxInt(16, 16) {
+ break
+ }
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpARM64MOVHstore || auxIntToInt32(x.AuxInt) != i-2 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ ptr1 := x.Args[0]
+ if w != x.Args[1] || !(x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)) {
+ break
+ }
+ v.reset(OpARM64MOVWstore)
+ v.AuxInt = int32ToAuxInt(i - 2)
+ v.Aux = symToAux(s)
+ v.AddArg3(ptr0, w, mem)
+ return true
+ }
+ // match: (MOVHstore [2] {s} (ADD ptr0 idx0) (UBFX [armBFAuxInt(16, 16)] w) x:(MOVHstoreidx ptr1 idx1 w mem))
+ // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
+ // result: (MOVWstoreidx ptr1 idx1 w mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 2 {
+ break
+ }
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ ptr0 := v_0_0
+ idx0 := v_0_1
+ if v_1.Op != OpARM64UBFX || auxIntToArm64BitField(v_1.AuxInt) != armBFAuxInt(16, 16) {
+ continue
+ }
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpARM64MOVHstoreidx {
+ continue
+ }
+ mem := x.Args[3]
+ ptr1 := x.Args[0]
+ idx1 := x.Args[1]
+ if w != x.Args[2] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) {
+ continue
+ }
+ v.reset(OpARM64MOVWstoreidx)
+ v.AddArg4(ptr1, idx1, w, mem)
+ return true
+ }
+ break
+ }
+ // match: (MOVHstore [2] {s} (ADDshiftLL [1] ptr0 idx0) (UBFX [armBFAuxInt(16, 16)] w) x:(MOVHstoreidx2 ptr1 idx1 w mem))
+ // cond: x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x)
+ // result: (MOVWstoreidx ptr1 (SLLconst <idx1.Type> [1] idx1) w mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 2 {
+ break
+ }
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 1 {
+ break
+ }
+ idx0 := v_0.Args[1]
+ ptr0 := v_0.Args[0]
+ if v_1.Op != OpARM64UBFX || auxIntToArm64BitField(v_1.AuxInt) != armBFAuxInt(16, 16) {
+ break
+ }
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpARM64MOVHstoreidx2 {
+ break
+ }
+ mem := x.Args[3]
+ ptr1 := x.Args[0]
+ idx1 := x.Args[1]
+ if w != x.Args[2] || !(x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x)) {
+ break
+ }
+ v.reset(OpARM64MOVWstoreidx)
+ v0 := b.NewValue0(v.Pos, OpARM64SLLconst, idx1.Type)
+ v0.AuxInt = int64ToAuxInt(1)
+ v0.AddArg(idx1)
+ v.AddArg4(ptr1, v0, w, mem)
+ return true
+ }
+ // match: (MOVHstore [i] {s} ptr0 (SRLconst [16] (MOVDreg w)) x:(MOVHstore [i-2] {s} ptr1 w mem))
+ // cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)
+ // result: (MOVWstore [i-2] {s} ptr0 w mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ ptr0 := v_0
+ if v_1.Op != OpARM64SRLconst || auxIntToInt64(v_1.AuxInt) != 16 {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpARM64MOVDreg {
+ break
+ }
+ w := v_1_0.Args[0]
+ x := v_2
+ if x.Op != OpARM64MOVHstore || auxIntToInt32(x.AuxInt) != i-2 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ ptr1 := x.Args[0]
+ if w != x.Args[1] || !(x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)) {
+ break
+ }
+ v.reset(OpARM64MOVWstore)
+ v.AuxInt = int32ToAuxInt(i - 2)
+ v.Aux = symToAux(s)
+ v.AddArg3(ptr0, w, mem)
+ return true
+ }
+ // match: (MOVHstore [2] {s} (ADD ptr0 idx0) (SRLconst [16] (MOVDreg w)) x:(MOVHstoreidx ptr1 idx1 w mem))
+ // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
+ // result: (MOVWstoreidx ptr1 idx1 w mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 2 {
+ break
+ }
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ ptr0 := v_0_0
+ idx0 := v_0_1
+ if v_1.Op != OpARM64SRLconst || auxIntToInt64(v_1.AuxInt) != 16 {
+ continue
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpARM64MOVDreg {
+ continue
+ }
+ w := v_1_0.Args[0]
+ x := v_2
+ if x.Op != OpARM64MOVHstoreidx {
+ continue
+ }
+ mem := x.Args[3]
+ ptr1 := x.Args[0]
+ idx1 := x.Args[1]
+ if w != x.Args[2] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) {
+ continue
+ }
+ v.reset(OpARM64MOVWstoreidx)
+ v.AddArg4(ptr1, idx1, w, mem)
+ return true
+ }
+ break
+ }
+ // match: (MOVHstore [2] {s} (ADDshiftLL [1] ptr0 idx0) (SRLconst [16] (MOVDreg w)) x:(MOVHstoreidx2 ptr1 idx1 w mem))
+ // cond: x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x)
+ // result: (MOVWstoreidx ptr1 (SLLconst <idx1.Type> [1] idx1) w mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 2 {
+ break
+ }
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 1 {
+ break
+ }
+ idx0 := v_0.Args[1]
+ ptr0 := v_0.Args[0]
+ if v_1.Op != OpARM64SRLconst || auxIntToInt64(v_1.AuxInt) != 16 {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpARM64MOVDreg {
+ break
+ }
+ w := v_1_0.Args[0]
+ x := v_2
+ if x.Op != OpARM64MOVHstoreidx2 {
+ break
+ }
+ mem := x.Args[3]
+ ptr1 := x.Args[0]
+ idx1 := x.Args[1]
+ if w != x.Args[2] || !(x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x)) {
+ break
+ }
+ v.reset(OpARM64MOVWstoreidx)
+ v0 := b.NewValue0(v.Pos, OpARM64SLLconst, idx1.Type)
+ v0.AuxInt = int64ToAuxInt(1)
+ v0.AddArg(idx1)
+ v.AddArg4(ptr1, v0, w, mem)
+ return true
+ }
+ // match: (MOVHstore [i] {s} ptr0 (SRLconst [j] w) x:(MOVHstore [i-2] {s} ptr1 w0:(SRLconst [j-16] w) mem))
+ // cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)
+ // result: (MOVWstore [i-2] {s} ptr0 w0 mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ ptr0 := v_0
+ if v_1.Op != OpARM64SRLconst {
+ break
+ }
+ j := auxIntToInt64(v_1.AuxInt)
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpARM64MOVHstore || auxIntToInt32(x.AuxInt) != i-2 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ ptr1 := x.Args[0]
+ w0 := x.Args[1]
+ if w0.Op != OpARM64SRLconst || auxIntToInt64(w0.AuxInt) != j-16 || w != w0.Args[0] || !(x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)) {
+ break
+ }
+ v.reset(OpARM64MOVWstore)
+ v.AuxInt = int32ToAuxInt(i - 2)
+ v.Aux = symToAux(s)
+ v.AddArg3(ptr0, w0, mem)
+ return true
+ }
+ // match: (MOVHstore [2] {s} (ADD ptr0 idx0) (SRLconst [j] w) x:(MOVHstoreidx ptr1 idx1 w0:(SRLconst [j-16] w) mem))
+ // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
+ // result: (MOVWstoreidx ptr1 idx1 w0 mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 2 {
+ break
+ }
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ ptr0 := v_0_0
+ idx0 := v_0_1
+ if v_1.Op != OpARM64SRLconst {
+ continue
+ }
+ j := auxIntToInt64(v_1.AuxInt)
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpARM64MOVHstoreidx {
+ continue
+ }
+ mem := x.Args[3]
+ ptr1 := x.Args[0]
+ idx1 := x.Args[1]
+ w0 := x.Args[2]
+ if w0.Op != OpARM64SRLconst || auxIntToInt64(w0.AuxInt) != j-16 || w != w0.Args[0] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) {
+ continue
+ }
+ v.reset(OpARM64MOVWstoreidx)
+ v.AddArg4(ptr1, idx1, w0, mem)
+ return true
+ }
+ break
+ }
+ // match: (MOVHstore [2] {s} (ADDshiftLL [1] ptr0 idx0) (SRLconst [j] w) x:(MOVHstoreidx2 ptr1 idx1 w0:(SRLconst [j-16] w) mem))
+ // cond: x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x)
+ // result: (MOVWstoreidx ptr1 (SLLconst <idx1.Type> [1] idx1) w0 mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 2 {
+ break
+ }
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 1 {
+ break
+ }
+ idx0 := v_0.Args[1]
+ ptr0 := v_0.Args[0]
+ if v_1.Op != OpARM64SRLconst {
+ break
+ }
+ j := auxIntToInt64(v_1.AuxInt)
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpARM64MOVHstoreidx2 {
+ break
+ }
+ mem := x.Args[3]
+ ptr1 := x.Args[0]
+ idx1 := x.Args[1]
+ w0 := x.Args[2]
+ if w0.Op != OpARM64SRLconst || auxIntToInt64(w0.AuxInt) != j-16 || w != w0.Args[0] || !(x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x)) {
+ break
+ }
+ v.reset(OpARM64MOVWstoreidx)
+ v0 := b.NewValue0(v.Pos, OpARM64SLLconst, idx1.Type)
+ v0.AuxInt = int64ToAuxInt(1)
+ v0.AddArg(idx1)
+ v.AddArg4(ptr1, v0, w0, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVHstoreidx(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHstoreidx ptr (MOVDconst [c]) val mem)
+ // cond: is32Bit(c)
+ // result: (MOVHstore [int32(c)] ptr val mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ val := v_2
+ mem := v_3
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVHstore)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVHstoreidx (MOVDconst [c]) idx val mem)
+ // cond: is32Bit(c)
+ // result: (MOVHstore [int32(c)] idx val mem)
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ idx := v_1
+ val := v_2
+ mem := v_3
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVHstore)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg3(idx, val, mem)
+ return true
+ }
+ // match: (MOVHstoreidx ptr (SLLconst [1] idx) val mem)
+ // result: (MOVHstoreidx2 ptr idx val mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64SLLconst || auxIntToInt64(v_1.AuxInt) != 1 {
+ break
+ }
+ idx := v_1.Args[0]
+ val := v_2
+ mem := v_3
+ v.reset(OpARM64MOVHstoreidx2)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ // match: (MOVHstoreidx ptr (ADD idx idx) val mem)
+ // result: (MOVHstoreidx2 ptr idx val mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64ADD {
+ break
+ }
+ idx := v_1.Args[1]
+ if idx != v_1.Args[0] {
+ break
+ }
+ val := v_2
+ mem := v_3
+ v.reset(OpARM64MOVHstoreidx2)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ // match: (MOVHstoreidx (SLLconst [1] idx) ptr val mem)
+ // result: (MOVHstoreidx2 ptr idx val mem)
+ for {
+ if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != 1 {
+ break
+ }
+ idx := v_0.Args[0]
+ ptr := v_1
+ val := v_2
+ mem := v_3
+ v.reset(OpARM64MOVHstoreidx2)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ // match: (MOVHstoreidx (ADD idx idx) ptr val mem)
+ // result: (MOVHstoreidx2 ptr idx val mem)
+ for {
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ idx := v_0.Args[1]
+ if idx != v_0.Args[0] {
+ break
+ }
+ ptr := v_1
+ val := v_2
+ mem := v_3
+ v.reset(OpARM64MOVHstoreidx2)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ // match: (MOVHstoreidx ptr idx (MOVDconst [0]) mem)
+ // result: (MOVHstorezeroidx ptr idx mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVDconst || auxIntToInt64(v_2.AuxInt) != 0 {
+ break
+ }
+ mem := v_3
+ v.reset(OpARM64MOVHstorezeroidx)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVHstoreidx ptr idx (MOVHreg x) mem)
+ // result: (MOVHstoreidx ptr idx x mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVHreg {
+ break
+ }
+ x := v_2.Args[0]
+ mem := v_3
+ v.reset(OpARM64MOVHstoreidx)
+ v.AddArg4(ptr, idx, x, mem)
+ return true
+ }
+ // match: (MOVHstoreidx ptr idx (MOVHUreg x) mem)
+ // result: (MOVHstoreidx ptr idx x mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVHUreg {
+ break
+ }
+ x := v_2.Args[0]
+ mem := v_3
+ v.reset(OpARM64MOVHstoreidx)
+ v.AddArg4(ptr, idx, x, mem)
+ return true
+ }
+ // match: (MOVHstoreidx ptr idx (MOVWreg x) mem)
+ // result: (MOVHstoreidx ptr idx x mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVWreg {
+ break
+ }
+ x := v_2.Args[0]
+ mem := v_3
+ v.reset(OpARM64MOVHstoreidx)
+ v.AddArg4(ptr, idx, x, mem)
+ return true
+ }
+ // match: (MOVHstoreidx ptr idx (MOVWUreg x) mem)
+ // result: (MOVHstoreidx ptr idx x mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVWUreg {
+ break
+ }
+ x := v_2.Args[0]
+ mem := v_3
+ v.reset(OpARM64MOVHstoreidx)
+ v.AddArg4(ptr, idx, x, mem)
+ return true
+ }
+ // match: (MOVHstoreidx ptr (ADDconst [2] idx) (SRLconst [16] w) x:(MOVHstoreidx ptr idx w mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVWstoreidx ptr idx w mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64ADDconst || auxIntToInt64(v_1.AuxInt) != 2 {
+ break
+ }
+ idx := v_1.Args[0]
+ if v_2.Op != OpARM64SRLconst || auxIntToInt64(v_2.AuxInt) != 16 {
+ break
+ }
+ w := v_2.Args[0]
+ x := v_3
+ if x.Op != OpARM64MOVHstoreidx {
+ break
+ }
+ mem := x.Args[3]
+ if ptr != x.Args[0] || idx != x.Args[1] || w != x.Args[2] || !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpARM64MOVWstoreidx)
+ v.AddArg4(ptr, idx, w, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVHstoreidx2(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHstoreidx2 ptr (MOVDconst [c]) val mem)
+ // cond: is32Bit(c<<1)
+ // result: (MOVHstore [int32(c)<<1] ptr val mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ val := v_2
+ mem := v_3
+ if !(is32Bit(c << 1)) {
+ break
+ }
+ v.reset(OpARM64MOVHstore)
+ v.AuxInt = int32ToAuxInt(int32(c) << 1)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVHstoreidx2 ptr idx (MOVDconst [0]) mem)
+ // result: (MOVHstorezeroidx2 ptr idx mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVDconst || auxIntToInt64(v_2.AuxInt) != 0 {
+ break
+ }
+ mem := v_3
+ v.reset(OpARM64MOVHstorezeroidx2)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVHstoreidx2 ptr idx (MOVHreg x) mem)
+ // result: (MOVHstoreidx2 ptr idx x mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVHreg {
+ break
+ }
+ x := v_2.Args[0]
+ mem := v_3
+ v.reset(OpARM64MOVHstoreidx2)
+ v.AddArg4(ptr, idx, x, mem)
+ return true
+ }
+ // match: (MOVHstoreidx2 ptr idx (MOVHUreg x) mem)
+ // result: (MOVHstoreidx2 ptr idx x mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVHUreg {
+ break
+ }
+ x := v_2.Args[0]
+ mem := v_3
+ v.reset(OpARM64MOVHstoreidx2)
+ v.AddArg4(ptr, idx, x, mem)
+ return true
+ }
+ // match: (MOVHstoreidx2 ptr idx (MOVWreg x) mem)
+ // result: (MOVHstoreidx2 ptr idx x mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVWreg {
+ break
+ }
+ x := v_2.Args[0]
+ mem := v_3
+ v.reset(OpARM64MOVHstoreidx2)
+ v.AddArg4(ptr, idx, x, mem)
+ return true
+ }
+ // match: (MOVHstoreidx2 ptr idx (MOVWUreg x) mem)
+ // result: (MOVHstoreidx2 ptr idx x mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVWUreg {
+ break
+ }
+ x := v_2.Args[0]
+ mem := v_3
+ v.reset(OpARM64MOVHstoreidx2)
+ v.AddArg4(ptr, idx, x, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVHstorezero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVHstorezero [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVHstorezero [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpARM64MOVHstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpARM64MOVHstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHstorezero [off] {sym} (ADD ptr idx) mem)
+ // cond: off == 0 && sym == nil
+ // result: (MOVHstorezeroidx ptr idx mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(off == 0 && sym == nil) {
+ break
+ }
+ v.reset(OpARM64MOVHstorezeroidx)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVHstorezero [off] {sym} (ADDshiftLL [1] ptr idx) mem)
+ // cond: off == 0 && sym == nil
+ // result: (MOVHstorezeroidx2 ptr idx mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 1 {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(off == 0 && sym == nil) {
+ break
+ }
+ v.reset(OpARM64MOVHstorezeroidx2)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVHstorezero [i] {s} ptr0 x:(MOVHstorezero [j] {s} ptr1 mem))
+ // cond: x.Uses == 1 && areAdjacentOffsets(int64(i),int64(j),2) && isSamePtr(ptr0, ptr1) && clobber(x)
+ // result: (MOVWstorezero [int32(min(int64(i),int64(j)))] {s} ptr0 mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ ptr0 := v_0
+ x := v_1
+ if x.Op != OpARM64MOVHstorezero {
+ break
+ }
+ j := auxIntToInt32(x.AuxInt)
+ if auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[1]
+ ptr1 := x.Args[0]
+ if !(x.Uses == 1 && areAdjacentOffsets(int64(i), int64(j), 2) && isSamePtr(ptr0, ptr1) && clobber(x)) {
+ break
+ }
+ v.reset(OpARM64MOVWstorezero)
+ v.AuxInt = int32ToAuxInt(int32(min(int64(i), int64(j))))
+ v.Aux = symToAux(s)
+ v.AddArg2(ptr0, mem)
+ return true
+ }
+ // match: (MOVHstorezero [2] {s} (ADD ptr0 idx0) x:(MOVHstorezeroidx ptr1 idx1 mem))
+ // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
+ // result: (MOVWstorezeroidx ptr1 idx1 mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 2 {
+ break
+ }
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ ptr0 := v_0_0
+ idx0 := v_0_1
+ x := v_1
+ if x.Op != OpARM64MOVHstorezeroidx {
+ continue
+ }
+ mem := x.Args[2]
+ ptr1 := x.Args[0]
+ idx1 := x.Args[1]
+ if !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) {
+ continue
+ }
+ v.reset(OpARM64MOVWstorezeroidx)
+ v.AddArg3(ptr1, idx1, mem)
+ return true
+ }
+ break
+ }
+ // match: (MOVHstorezero [2] {s} (ADDshiftLL [1] ptr0 idx0) x:(MOVHstorezeroidx2 ptr1 idx1 mem))
+ // cond: x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x)
+ // result: (MOVWstorezeroidx ptr1 (SLLconst <idx1.Type> [1] idx1) mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 2 {
+ break
+ }
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 1 {
+ break
+ }
+ idx0 := v_0.Args[1]
+ ptr0 := v_0.Args[0]
+ x := v_1
+ if x.Op != OpARM64MOVHstorezeroidx2 {
+ break
+ }
+ mem := x.Args[2]
+ ptr1 := x.Args[0]
+ idx1 := x.Args[1]
+ if !(x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x)) {
+ break
+ }
+ v.reset(OpARM64MOVWstorezeroidx)
+ v0 := b.NewValue0(v.Pos, OpARM64SLLconst, idx1.Type)
+ v0.AuxInt = int64ToAuxInt(1)
+ v0.AddArg(idx1)
+ v.AddArg3(ptr1, v0, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVHstorezeroidx(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHstorezeroidx ptr (MOVDconst [c]) mem)
+ // cond: is32Bit(c)
+ // result: (MOVHstorezero [int32(c)] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVHstorezero)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHstorezeroidx (MOVDconst [c]) idx mem)
+ // cond: is32Bit(c)
+ // result: (MOVHstorezero [int32(c)] idx mem)
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ idx := v_1
+ mem := v_2
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVHstorezero)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(idx, mem)
+ return true
+ }
+ // match: (MOVHstorezeroidx ptr (SLLconst [1] idx) mem)
+ // result: (MOVHstorezeroidx2 ptr idx mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64SLLconst || auxIntToInt64(v_1.AuxInt) != 1 {
+ break
+ }
+ idx := v_1.Args[0]
+ mem := v_2
+ v.reset(OpARM64MOVHstorezeroidx2)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVHstorezeroidx ptr (ADD idx idx) mem)
+ // result: (MOVHstorezeroidx2 ptr idx mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64ADD {
+ break
+ }
+ idx := v_1.Args[1]
+ if idx != v_1.Args[0] {
+ break
+ }
+ mem := v_2
+ v.reset(OpARM64MOVHstorezeroidx2)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVHstorezeroidx (SLLconst [1] idx) ptr mem)
+ // result: (MOVHstorezeroidx2 ptr idx mem)
+ for {
+ if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != 1 {
+ break
+ }
+ idx := v_0.Args[0]
+ ptr := v_1
+ mem := v_2
+ v.reset(OpARM64MOVHstorezeroidx2)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVHstorezeroidx (ADD idx idx) ptr mem)
+ // result: (MOVHstorezeroidx2 ptr idx mem)
+ for {
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ idx := v_0.Args[1]
+ if idx != v_0.Args[0] {
+ break
+ }
+ ptr := v_1
+ mem := v_2
+ v.reset(OpARM64MOVHstorezeroidx2)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVHstorezeroidx ptr (ADDconst [2] idx) x:(MOVHstorezeroidx ptr idx mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVWstorezeroidx ptr idx mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64ADDconst || auxIntToInt64(v_1.AuxInt) != 2 {
+ break
+ }
+ idx := v_1.Args[0]
+ x := v_2
+ if x.Op != OpARM64MOVHstorezeroidx {
+ break
+ }
+ mem := x.Args[2]
+ if ptr != x.Args[0] || idx != x.Args[1] || !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpARM64MOVWstorezeroidx)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVHstorezeroidx2(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHstorezeroidx2 ptr (MOVDconst [c]) mem)
+ // cond: is32Bit(c<<1)
+ // result: (MOVHstorezero [int32(c<<1)] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(is32Bit(c << 1)) {
+ break
+ }
+ v.reset(OpARM64MOVHstorezero)
+ v.AuxInt = int32ToAuxInt(int32(c << 1))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVQstorezero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVQstorezero [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVQstorezero [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpARM64MOVQstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVQstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVQstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpARM64MOVQstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVWUload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVWUload [off] {sym} ptr (FMOVSstore [off] {sym} ptr val _))
+ // result: (FMOVSfpgp val)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARM64FMOVSstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
+ break
+ }
+ val := v_1.Args[1]
+ if ptr != v_1.Args[0] {
+ break
+ }
+ v.reset(OpARM64FMOVSfpgp)
+ v.AddArg(val)
+ return true
+ }
+ // match: (MOVWUload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVWUload [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpARM64MOVWUload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWUload [off] {sym} (ADD ptr idx) mem)
+ // cond: off == 0 && sym == nil
+ // result: (MOVWUloadidx ptr idx mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(off == 0 && sym == nil) {
+ break
+ }
+ v.reset(OpARM64MOVWUloadidx)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVWUload [off] {sym} (ADDshiftLL [2] ptr idx) mem)
+ // cond: off == 0 && sym == nil
+ // result: (MOVWUloadidx4 ptr idx mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 2 {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(off == 0 && sym == nil) {
+ break
+ }
+ v.reset(OpARM64MOVWUloadidx4)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVWUload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVWUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpARM64MOVWUload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWUload [off] {sym} ptr (MOVWstorezero [off2] {sym2} ptr2 _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: (MOVDconst [0])
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARM64MOVWstorezero {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (MOVWUload [off] {sym} (SB) _)
+ // cond: symIsRO(sym)
+ // result: (MOVDconst [int64(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))])
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpSB || !(symIsRO(sym)) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(read32(sym, int64(off), config.ctxt.Arch.ByteOrder)))
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVWUloadidx(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWUloadidx ptr (MOVDconst [c]) mem)
+ // cond: is32Bit(c)
+ // result: (MOVWUload [int32(c)] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVWUload)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWUloadidx (MOVDconst [c]) ptr mem)
+ // cond: is32Bit(c)
+ // result: (MOVWUload [int32(c)] ptr mem)
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ ptr := v_1
+ mem := v_2
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVWUload)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWUloadidx ptr (SLLconst [2] idx) mem)
+ // result: (MOVWUloadidx4 ptr idx mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64SLLconst || auxIntToInt64(v_1.AuxInt) != 2 {
+ break
+ }
+ idx := v_1.Args[0]
+ mem := v_2
+ v.reset(OpARM64MOVWUloadidx4)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVWUloadidx (SLLconst [2] idx) ptr mem)
+ // result: (MOVWUloadidx4 ptr idx mem)
+ for {
+ if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != 2 {
+ break
+ }
+ idx := v_0.Args[0]
+ ptr := v_1
+ mem := v_2
+ v.reset(OpARM64MOVWUloadidx4)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVWUloadidx ptr idx (MOVWstorezeroidx ptr2 idx2 _))
+ // cond: (isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2))
+ // result: (MOVDconst [0])
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVWstorezeroidx {
+ break
+ }
+ idx2 := v_2.Args[1]
+ ptr2 := v_2.Args[0]
+ if !(isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVWUloadidx4(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWUloadidx4 ptr (MOVDconst [c]) mem)
+ // cond: is32Bit(c<<2)
+ // result: (MOVWUload [int32(c)<<2] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(is32Bit(c << 2)) {
+ break
+ }
+ v.reset(OpARM64MOVWUload)
+ v.AuxInt = int32ToAuxInt(int32(c) << 2)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWUloadidx4 ptr idx (MOVWstorezeroidx4 ptr2 idx2 _))
+ // cond: isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2)
+ // result: (MOVDconst [0])
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVWstorezeroidx4 {
+ break
+ }
+ idx2 := v_2.Args[1]
+ ptr2 := v_2.Args[0]
+ if !(isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2)) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVWUreg(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MOVWUreg x:(MOVBUload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVBUload {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWUreg x:(MOVHUload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVHUload {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWUreg x:(MOVWUload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVWUload {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWUreg x:(MOVBUloadidx _ _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVBUloadidx {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWUreg x:(MOVHUloadidx _ _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVHUloadidx {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWUreg x:(MOVWUloadidx _ _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVWUloadidx {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWUreg x:(MOVHUloadidx2 _ _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVHUloadidx2 {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWUreg x:(MOVWUloadidx4 _ _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVWUloadidx4 {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWUreg x:(MOVBUreg _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVBUreg {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWUreg x:(MOVHUreg _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVHUreg {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWUreg x:(MOVWUreg _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVWUreg {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWUreg (ANDconst [c] x))
+ // result: (ANDconst [c&(1<<32-1)] x)
+ for {
+ if v_0.Op != OpARM64ANDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpARM64ANDconst)
+ v.AuxInt = int64ToAuxInt(c & (1<<32 - 1))
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWUreg (MOVDconst [c]))
+ // result: (MOVDconst [int64(uint32(c))])
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(uint32(c)))
+ return true
+ }
+ // match: (MOVWUreg (SLLconst [lc] x))
+ // cond: lc >= 32
+ // result: (MOVDconst [0])
+ for {
+ if v_0.Op != OpARM64SLLconst {
+ break
+ }
+ lc := auxIntToInt64(v_0.AuxInt)
+ if !(lc >= 32) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (MOVWUreg (SLLconst [lc] x))
+ // cond: lc < 32
+ // result: (UBFIZ [armBFAuxInt(lc, 32-lc)] x)
+ for {
+ if v_0.Op != OpARM64SLLconst {
+ break
+ }
+ lc := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(lc < 32) {
+ break
+ }
+ v.reset(OpARM64UBFIZ)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(lc, 32-lc))
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWUreg (SRLconst [rc] x))
+ // cond: rc < 32
+ // result: (UBFX [armBFAuxInt(rc, 32)] x)
+ for {
+ if v_0.Op != OpARM64SRLconst {
+ break
+ }
+ rc := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(rc < 32) {
+ break
+ }
+ v.reset(OpARM64UBFX)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(rc, 32))
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWUreg (UBFX [bfc] x))
+ // cond: bfc.getARM64BFwidth() <= 32
+ // result: (UBFX [bfc] x)
+ for {
+ if v_0.Op != OpARM64UBFX {
+ break
+ }
+ bfc := auxIntToArm64BitField(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(bfc.getARM64BFwidth() <= 32) {
+ break
+ }
+ v.reset(OpARM64UBFX)
+ v.AuxInt = arm64BitFieldToAuxInt(bfc)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVWload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVWload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVWload [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpARM64MOVWload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWload [off] {sym} (ADD ptr idx) mem)
+ // cond: off == 0 && sym == nil
+ // result: (MOVWloadidx ptr idx mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(off == 0 && sym == nil) {
+ break
+ }
+ v.reset(OpARM64MOVWloadidx)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVWload [off] {sym} (ADDshiftLL [2] ptr idx) mem)
+ // cond: off == 0 && sym == nil
+ // result: (MOVWloadidx4 ptr idx mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 2 {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(off == 0 && sym == nil) {
+ break
+ }
+ v.reset(OpARM64MOVWloadidx4)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVWload [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpARM64MOVWload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWload [off] {sym} ptr (MOVWstorezero [off2] {sym2} ptr2 _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: (MOVDconst [0])
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARM64MOVWstorezero {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVWloadidx(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWloadidx ptr (MOVDconst [c]) mem)
+ // cond: is32Bit(c)
+ // result: (MOVWload [int32(c)] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVWload)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWloadidx (MOVDconst [c]) ptr mem)
+ // cond: is32Bit(c)
+ // result: (MOVWload [int32(c)] ptr mem)
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ ptr := v_1
+ mem := v_2
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVWload)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWloadidx ptr (SLLconst [2] idx) mem)
+ // result: (MOVWloadidx4 ptr idx mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64SLLconst || auxIntToInt64(v_1.AuxInt) != 2 {
+ break
+ }
+ idx := v_1.Args[0]
+ mem := v_2
+ v.reset(OpARM64MOVWloadidx4)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVWloadidx (SLLconst [2] idx) ptr mem)
+ // result: (MOVWloadidx4 ptr idx mem)
+ for {
+ if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != 2 {
+ break
+ }
+ idx := v_0.Args[0]
+ ptr := v_1
+ mem := v_2
+ v.reset(OpARM64MOVWloadidx4)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVWloadidx ptr idx (MOVWstorezeroidx ptr2 idx2 _))
+ // cond: (isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2))
+ // result: (MOVDconst [0])
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVWstorezeroidx {
+ break
+ }
+ idx2 := v_2.Args[1]
+ ptr2 := v_2.Args[0]
+ if !(isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2) || isSamePtr(ptr, idx2) && isSamePtr(idx, ptr2)) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVWloadidx4(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWloadidx4 ptr (MOVDconst [c]) mem)
+ // cond: is32Bit(c<<2)
+ // result: (MOVWload [int32(c)<<2] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(is32Bit(c << 2)) {
+ break
+ }
+ v.reset(OpARM64MOVWload)
+ v.AuxInt = int32ToAuxInt(int32(c) << 2)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWloadidx4 ptr idx (MOVWstorezeroidx4 ptr2 idx2 _))
+ // cond: isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2)
+ // result: (MOVDconst [0])
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVWstorezeroidx4 {
+ break
+ }
+ idx2 := v_2.Args[1]
+ ptr2 := v_2.Args[0]
+ if !(isSamePtr(ptr, ptr2) && isSamePtr(idx, idx2)) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVWreg(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MOVWreg x:(MOVBload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVBload {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVBUload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVBUload {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVHload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVHload {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVHUload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVHUload {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVWload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVWload {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVBloadidx _ _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVBloadidx {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVBUloadidx _ _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVBUloadidx {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVHloadidx _ _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVHloadidx {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVHUloadidx _ _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVHUloadidx {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVWloadidx _ _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVWloadidx {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVHloadidx2 _ _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVHloadidx2 {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVHUloadidx2 _ _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVHUloadidx2 {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVWloadidx4 _ _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVWloadidx4 {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVBreg _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVBreg {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVBUreg _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVBUreg {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVHreg _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVHreg {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVWreg _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpARM64MOVWreg {
+ break
+ }
+ v.reset(OpARM64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg (MOVDconst [c]))
+ // result: (MOVDconst [int64(int32(c))])
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(int32(c)))
+ return true
+ }
+ // match: (MOVWreg (SLLconst [lc] x))
+ // cond: lc < 32
+ // result: (SBFIZ [armBFAuxInt(lc, 32-lc)] x)
+ for {
+ if v_0.Op != OpARM64SLLconst {
+ break
+ }
+ lc := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(lc < 32) {
+ break
+ }
+ v.reset(OpARM64SBFIZ)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(lc, 32-lc))
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg (SBFX [bfc] x))
+ // cond: bfc.getARM64BFwidth() <= 32
+ // result: (SBFX [bfc] x)
+ for {
+ if v_0.Op != OpARM64SBFX {
+ break
+ }
+ bfc := auxIntToArm64BitField(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(bfc.getARM64BFwidth() <= 32) {
+ break
+ }
+ v.reset(OpARM64SBFX)
+ v.AuxInt = arm64BitFieldToAuxInt(bfc)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVWstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVWstore [off] {sym} ptr (FMOVSfpgp val) mem)
+ // result: (FMOVSstore [off] {sym} ptr val mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARM64FMOVSfpgp {
+ break
+ }
+ val := v_1.Args[0]
+ mem := v_2
+ v.reset(OpARM64FMOVSstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVWstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVWstore [off1+int32(off2)] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpARM64MOVWstore)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVWstore [off] {sym} (ADD ptr idx) val mem)
+ // cond: off == 0 && sym == nil
+ // result: (MOVWstoreidx ptr idx val mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(off == 0 && sym == nil) {
+ break
+ }
+ v.reset(OpARM64MOVWstoreidx)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ // match: (MOVWstore [off] {sym} (ADDshiftLL [2] ptr idx) val mem)
+ // cond: off == 0 && sym == nil
+ // result: (MOVWstoreidx4 ptr idx val mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 2 {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(off == 0 && sym == nil) {
+ break
+ }
+ v.reset(OpARM64MOVWstoreidx4)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ // match: (MOVWstore [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpARM64MOVWstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVWstore [off] {sym} ptr (MOVDconst [0]) mem)
+ // result: (MOVWstorezero [off] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ mem := v_2
+ v.reset(OpARM64MOVWstorezero)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWstore [off] {sym} ptr (MOVWreg x) mem)
+ // result: (MOVWstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARM64MOVWreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpARM64MOVWstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVWstore [off] {sym} ptr (MOVWUreg x) mem)
+ // result: (MOVWstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARM64MOVWUreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpARM64MOVWstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVWstore [i] {s} ptr0 (SRLconst [32] w) x:(MOVWstore [i-4] {s} ptr1 w mem))
+ // cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)
+ // result: (MOVDstore [i-4] {s} ptr0 w mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ ptr0 := v_0
+ if v_1.Op != OpARM64SRLconst || auxIntToInt64(v_1.AuxInt) != 32 {
+ break
+ }
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpARM64MOVWstore || auxIntToInt32(x.AuxInt) != i-4 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ ptr1 := x.Args[0]
+ if w != x.Args[1] || !(x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)) {
+ break
+ }
+ v.reset(OpARM64MOVDstore)
+ v.AuxInt = int32ToAuxInt(i - 4)
+ v.Aux = symToAux(s)
+ v.AddArg3(ptr0, w, mem)
+ return true
+ }
+ // match: (MOVWstore [4] {s} (ADD ptr0 idx0) (SRLconst [32] w) x:(MOVWstoreidx ptr1 idx1 w mem))
+ // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
+ // result: (MOVDstoreidx ptr1 idx1 w mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 4 {
+ break
+ }
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ ptr0 := v_0_0
+ idx0 := v_0_1
+ if v_1.Op != OpARM64SRLconst || auxIntToInt64(v_1.AuxInt) != 32 {
+ continue
+ }
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpARM64MOVWstoreidx {
+ continue
+ }
+ mem := x.Args[3]
+ ptr1 := x.Args[0]
+ idx1 := x.Args[1]
+ if w != x.Args[2] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) {
+ continue
+ }
+ v.reset(OpARM64MOVDstoreidx)
+ v.AddArg4(ptr1, idx1, w, mem)
+ return true
+ }
+ break
+ }
+ // match: (MOVWstore [4] {s} (ADDshiftLL [2] ptr0 idx0) (SRLconst [32] w) x:(MOVWstoreidx4 ptr1 idx1 w mem))
+ // cond: x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x)
+ // result: (MOVDstoreidx ptr1 (SLLconst <idx1.Type> [2] idx1) w mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 4 {
+ break
+ }
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 2 {
+ break
+ }
+ idx0 := v_0.Args[1]
+ ptr0 := v_0.Args[0]
+ if v_1.Op != OpARM64SRLconst || auxIntToInt64(v_1.AuxInt) != 32 {
+ break
+ }
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpARM64MOVWstoreidx4 {
+ break
+ }
+ mem := x.Args[3]
+ ptr1 := x.Args[0]
+ idx1 := x.Args[1]
+ if w != x.Args[2] || !(x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x)) {
+ break
+ }
+ v.reset(OpARM64MOVDstoreidx)
+ v0 := b.NewValue0(v.Pos, OpARM64SLLconst, idx1.Type)
+ v0.AuxInt = int64ToAuxInt(2)
+ v0.AddArg(idx1)
+ v.AddArg4(ptr1, v0, w, mem)
+ return true
+ }
+ // match: (MOVWstore [i] {s} ptr0 (SRLconst [j] w) x:(MOVWstore [i-4] {s} ptr1 w0:(SRLconst [j-32] w) mem))
+ // cond: x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)
+ // result: (MOVDstore [i-4] {s} ptr0 w0 mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ ptr0 := v_0
+ if v_1.Op != OpARM64SRLconst {
+ break
+ }
+ j := auxIntToInt64(v_1.AuxInt)
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpARM64MOVWstore || auxIntToInt32(x.AuxInt) != i-4 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ ptr1 := x.Args[0]
+ w0 := x.Args[1]
+ if w0.Op != OpARM64SRLconst || auxIntToInt64(w0.AuxInt) != j-32 || w != w0.Args[0] || !(x.Uses == 1 && isSamePtr(ptr0, ptr1) && clobber(x)) {
+ break
+ }
+ v.reset(OpARM64MOVDstore)
+ v.AuxInt = int32ToAuxInt(i - 4)
+ v.Aux = symToAux(s)
+ v.AddArg3(ptr0, w0, mem)
+ return true
+ }
+ // match: (MOVWstore [4] {s} (ADD ptr0 idx0) (SRLconst [j] w) x:(MOVWstoreidx ptr1 idx1 w0:(SRLconst [j-32] w) mem))
+ // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
+ // result: (MOVDstoreidx ptr1 idx1 w0 mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 4 {
+ break
+ }
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ ptr0 := v_0_0
+ idx0 := v_0_1
+ if v_1.Op != OpARM64SRLconst {
+ continue
+ }
+ j := auxIntToInt64(v_1.AuxInt)
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpARM64MOVWstoreidx {
+ continue
+ }
+ mem := x.Args[3]
+ ptr1 := x.Args[0]
+ idx1 := x.Args[1]
+ w0 := x.Args[2]
+ if w0.Op != OpARM64SRLconst || auxIntToInt64(w0.AuxInt) != j-32 || w != w0.Args[0] || !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) {
+ continue
+ }
+ v.reset(OpARM64MOVDstoreidx)
+ v.AddArg4(ptr1, idx1, w0, mem)
+ return true
+ }
+ break
+ }
+ // match: (MOVWstore [4] {s} (ADDshiftLL [2] ptr0 idx0) (SRLconst [j] w) x:(MOVWstoreidx4 ptr1 idx1 w0:(SRLconst [j-32] w) mem))
+ // cond: x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x)
+ // result: (MOVDstoreidx ptr1 (SLLconst <idx1.Type> [2] idx1) w0 mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 4 {
+ break
+ }
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 2 {
+ break
+ }
+ idx0 := v_0.Args[1]
+ ptr0 := v_0.Args[0]
+ if v_1.Op != OpARM64SRLconst {
+ break
+ }
+ j := auxIntToInt64(v_1.AuxInt)
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpARM64MOVWstoreidx4 {
+ break
+ }
+ mem := x.Args[3]
+ ptr1 := x.Args[0]
+ idx1 := x.Args[1]
+ w0 := x.Args[2]
+ if w0.Op != OpARM64SRLconst || auxIntToInt64(w0.AuxInt) != j-32 || w != w0.Args[0] || !(x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x)) {
+ break
+ }
+ v.reset(OpARM64MOVDstoreidx)
+ v0 := b.NewValue0(v.Pos, OpARM64SLLconst, idx1.Type)
+ v0.AuxInt = int64ToAuxInt(2)
+ v0.AddArg(idx1)
+ v.AddArg4(ptr1, v0, w0, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVWstoreidx(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWstoreidx ptr (MOVDconst [c]) val mem)
+ // cond: is32Bit(c)
+ // result: (MOVWstore [int32(c)] ptr val mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ val := v_2
+ mem := v_3
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVWstore)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVWstoreidx (MOVDconst [c]) idx val mem)
+ // cond: is32Bit(c)
+ // result: (MOVWstore [int32(c)] idx val mem)
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ idx := v_1
+ val := v_2
+ mem := v_3
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVWstore)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg3(idx, val, mem)
+ return true
+ }
+ // match: (MOVWstoreidx ptr (SLLconst [2] idx) val mem)
+ // result: (MOVWstoreidx4 ptr idx val mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64SLLconst || auxIntToInt64(v_1.AuxInt) != 2 {
+ break
+ }
+ idx := v_1.Args[0]
+ val := v_2
+ mem := v_3
+ v.reset(OpARM64MOVWstoreidx4)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ // match: (MOVWstoreidx (SLLconst [2] idx) ptr val mem)
+ // result: (MOVWstoreidx4 ptr idx val mem)
+ for {
+ if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != 2 {
+ break
+ }
+ idx := v_0.Args[0]
+ ptr := v_1
+ val := v_2
+ mem := v_3
+ v.reset(OpARM64MOVWstoreidx4)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ // match: (MOVWstoreidx ptr idx (MOVDconst [0]) mem)
+ // result: (MOVWstorezeroidx ptr idx mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVDconst || auxIntToInt64(v_2.AuxInt) != 0 {
+ break
+ }
+ mem := v_3
+ v.reset(OpARM64MOVWstorezeroidx)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVWstoreidx ptr idx (MOVWreg x) mem)
+ // result: (MOVWstoreidx ptr idx x mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVWreg {
+ break
+ }
+ x := v_2.Args[0]
+ mem := v_3
+ v.reset(OpARM64MOVWstoreidx)
+ v.AddArg4(ptr, idx, x, mem)
+ return true
+ }
+ // match: (MOVWstoreidx ptr idx (MOVWUreg x) mem)
+ // result: (MOVWstoreidx ptr idx x mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVWUreg {
+ break
+ }
+ x := v_2.Args[0]
+ mem := v_3
+ v.reset(OpARM64MOVWstoreidx)
+ v.AddArg4(ptr, idx, x, mem)
+ return true
+ }
+ // match: (MOVWstoreidx ptr (ADDconst [4] idx) (SRLconst [32] w) x:(MOVWstoreidx ptr idx w mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVDstoreidx ptr idx w mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64ADDconst || auxIntToInt64(v_1.AuxInt) != 4 {
+ break
+ }
+ idx := v_1.Args[0]
+ if v_2.Op != OpARM64SRLconst || auxIntToInt64(v_2.AuxInt) != 32 {
+ break
+ }
+ w := v_2.Args[0]
+ x := v_3
+ if x.Op != OpARM64MOVWstoreidx {
+ break
+ }
+ mem := x.Args[3]
+ if ptr != x.Args[0] || idx != x.Args[1] || w != x.Args[2] || !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpARM64MOVDstoreidx)
+ v.AddArg4(ptr, idx, w, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVWstoreidx4(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWstoreidx4 ptr (MOVDconst [c]) val mem)
+ // cond: is32Bit(c<<2)
+ // result: (MOVWstore [int32(c)<<2] ptr val mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ val := v_2
+ mem := v_3
+ if !(is32Bit(c << 2)) {
+ break
+ }
+ v.reset(OpARM64MOVWstore)
+ v.AuxInt = int32ToAuxInt(int32(c) << 2)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVWstoreidx4 ptr idx (MOVDconst [0]) mem)
+ // result: (MOVWstorezeroidx4 ptr idx mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVDconst || auxIntToInt64(v_2.AuxInt) != 0 {
+ break
+ }
+ mem := v_3
+ v.reset(OpARM64MOVWstorezeroidx4)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVWstoreidx4 ptr idx (MOVWreg x) mem)
+ // result: (MOVWstoreidx4 ptr idx x mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVWreg {
+ break
+ }
+ x := v_2.Args[0]
+ mem := v_3
+ v.reset(OpARM64MOVWstoreidx4)
+ v.AddArg4(ptr, idx, x, mem)
+ return true
+ }
+ // match: (MOVWstoreidx4 ptr idx (MOVWUreg x) mem)
+ // result: (MOVWstoreidx4 ptr idx x mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpARM64MOVWUreg {
+ break
+ }
+ x := v_2.Args[0]
+ mem := v_3
+ v.reset(OpARM64MOVWstoreidx4)
+ v.AddArg4(ptr, idx, x, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVWstorezero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVWstorezero [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVWstorezero [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpARM64MOVWstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWstorezero [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpARM64MOVWstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWstorezero [off] {sym} (ADD ptr idx) mem)
+ // cond: off == 0 && sym == nil
+ // result: (MOVWstorezeroidx ptr idx mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(off == 0 && sym == nil) {
+ break
+ }
+ v.reset(OpARM64MOVWstorezeroidx)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVWstorezero [off] {sym} (ADDshiftLL [2] ptr idx) mem)
+ // cond: off == 0 && sym == nil
+ // result: (MOVWstorezeroidx4 ptr idx mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 2 {
+ break
+ }
+ idx := v_0.Args[1]
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(off == 0 && sym == nil) {
+ break
+ }
+ v.reset(OpARM64MOVWstorezeroidx4)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVWstorezero [i] {s} ptr0 x:(MOVWstorezero [j] {s} ptr1 mem))
+ // cond: x.Uses == 1 && areAdjacentOffsets(int64(i),int64(j),4) && isSamePtr(ptr0, ptr1) && clobber(x)
+ // result: (MOVDstorezero [int32(min(int64(i),int64(j)))] {s} ptr0 mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ ptr0 := v_0
+ x := v_1
+ if x.Op != OpARM64MOVWstorezero {
+ break
+ }
+ j := auxIntToInt32(x.AuxInt)
+ if auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[1]
+ ptr1 := x.Args[0]
+ if !(x.Uses == 1 && areAdjacentOffsets(int64(i), int64(j), 4) && isSamePtr(ptr0, ptr1) && clobber(x)) {
+ break
+ }
+ v.reset(OpARM64MOVDstorezero)
+ v.AuxInt = int32ToAuxInt(int32(min(int64(i), int64(j))))
+ v.Aux = symToAux(s)
+ v.AddArg2(ptr0, mem)
+ return true
+ }
+ // match: (MOVWstorezero [4] {s} (ADD ptr0 idx0) x:(MOVWstorezeroidx ptr1 idx1 mem))
+ // cond: x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)
+ // result: (MOVDstorezeroidx ptr1 idx1 mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 4 {
+ break
+ }
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADD {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ ptr0 := v_0_0
+ idx0 := v_0_1
+ x := v_1
+ if x.Op != OpARM64MOVWstorezeroidx {
+ continue
+ }
+ mem := x.Args[2]
+ ptr1 := x.Args[0]
+ idx1 := x.Args[1]
+ if !(x.Uses == 1 && s == nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x)) {
+ continue
+ }
+ v.reset(OpARM64MOVDstorezeroidx)
+ v.AddArg3(ptr1, idx1, mem)
+ return true
+ }
+ break
+ }
+ // match: (MOVWstorezero [4] {s} (ADDshiftLL [2] ptr0 idx0) x:(MOVWstorezeroidx4 ptr1 idx1 mem))
+ // cond: x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x)
+ // result: (MOVDstorezeroidx ptr1 (SLLconst <idx1.Type> [2] idx1) mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 4 {
+ break
+ }
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDshiftLL || auxIntToInt64(v_0.AuxInt) != 2 {
+ break
+ }
+ idx0 := v_0.Args[1]
+ ptr0 := v_0.Args[0]
+ x := v_1
+ if x.Op != OpARM64MOVWstorezeroidx4 {
+ break
+ }
+ mem := x.Args[2]
+ ptr1 := x.Args[0]
+ idx1 := x.Args[1]
+ if !(x.Uses == 1 && s == nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && clobber(x)) {
+ break
+ }
+ v.reset(OpARM64MOVDstorezeroidx)
+ v0 := b.NewValue0(v.Pos, OpARM64SLLconst, idx1.Type)
+ v0.AuxInt = int64ToAuxInt(2)
+ v0.AddArg(idx1)
+ v.AddArg3(ptr1, v0, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVWstorezeroidx(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWstorezeroidx ptr (MOVDconst [c]) mem)
+ // cond: is32Bit(c)
+ // result: (MOVWstorezero [int32(c)] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVWstorezero)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWstorezeroidx (MOVDconst [c]) idx mem)
+ // cond: is32Bit(c)
+ // result: (MOVWstorezero [int32(c)] idx mem)
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ idx := v_1
+ mem := v_2
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64MOVWstorezero)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(idx, mem)
+ return true
+ }
+ // match: (MOVWstorezeroidx ptr (SLLconst [2] idx) mem)
+ // result: (MOVWstorezeroidx4 ptr idx mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64SLLconst || auxIntToInt64(v_1.AuxInt) != 2 {
+ break
+ }
+ idx := v_1.Args[0]
+ mem := v_2
+ v.reset(OpARM64MOVWstorezeroidx4)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVWstorezeroidx (SLLconst [2] idx) ptr mem)
+ // result: (MOVWstorezeroidx4 ptr idx mem)
+ for {
+ if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != 2 {
+ break
+ }
+ idx := v_0.Args[0]
+ ptr := v_1
+ mem := v_2
+ v.reset(OpARM64MOVWstorezeroidx4)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (MOVWstorezeroidx ptr (ADDconst [4] idx) x:(MOVWstorezeroidx ptr idx mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVDstorezeroidx ptr idx mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64ADDconst || auxIntToInt64(v_1.AuxInt) != 4 {
+ break
+ }
+ idx := v_1.Args[0]
+ x := v_2
+ if x.Op != OpARM64MOVWstorezeroidx {
+ break
+ }
+ mem := x.Args[2]
+ if ptr != x.Args[0] || idx != x.Args[1] || !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpARM64MOVDstorezeroidx)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MOVWstorezeroidx4(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWstorezeroidx4 ptr (MOVDconst [c]) mem)
+ // cond: is32Bit(c<<2)
+ // result: (MOVWstorezero [int32(c<<2)] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(is32Bit(c << 2)) {
+ break
+ }
+ v.reset(OpARM64MOVWstorezero)
+ v.AuxInt = int32ToAuxInt(int32(c << 2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MSUB(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MSUB a x (MOVDconst [-1]))
+ // result: (ADD a x)
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst || auxIntToInt64(v_2.AuxInt) != -1 {
+ break
+ }
+ v.reset(OpARM64ADD)
+ v.AddArg2(a, x)
+ return true
+ }
+ // match: (MSUB a _ (MOVDconst [0]))
+ // result: a
+ for {
+ a := v_0
+ if v_2.Op != OpARM64MOVDconst || auxIntToInt64(v_2.AuxInt) != 0 {
+ break
+ }
+ v.copyOf(a)
+ return true
+ }
+ // match: (MSUB a x (MOVDconst [1]))
+ // result: (SUB a x)
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst || auxIntToInt64(v_2.AuxInt) != 1 {
+ break
+ }
+ v.reset(OpARM64SUB)
+ v.AddArg2(a, x)
+ return true
+ }
+ // match: (MSUB a x (MOVDconst [c]))
+ // cond: isPowerOfTwo64(c)
+ // result: (SUBshiftLL a x [log64(c)])
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ if !(isPowerOfTwo64(c)) {
+ break
+ }
+ v.reset(OpARM64SUBshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c))
+ v.AddArg2(a, x)
+ return true
+ }
+ // match: (MSUB a x (MOVDconst [c]))
+ // cond: isPowerOfTwo64(c-1) && c>=3
+ // result: (SUB a (ADDshiftLL <x.Type> x x [log64(c-1)]))
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ if !(isPowerOfTwo64(c-1) && c >= 3) {
+ break
+ }
+ v.reset(OpARM64SUB)
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c - 1))
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MSUB a x (MOVDconst [c]))
+ // cond: isPowerOfTwo64(c+1) && c>=7
+ // result: (ADD a (SUBshiftLL <x.Type> x x [log64(c+1)]))
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ if !(isPowerOfTwo64(c+1) && c >= 7) {
+ break
+ }
+ v.reset(OpARM64ADD)
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c + 1))
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MSUB a x (MOVDconst [c]))
+ // cond: c%3 == 0 && isPowerOfTwo64(c/3)
+ // result: (ADDshiftLL a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)])
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ if !(c%3 == 0 && isPowerOfTwo64(c/3)) {
+ break
+ }
+ v.reset(OpARM64ADDshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c / 3))
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(2)
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MSUB a x (MOVDconst [c]))
+ // cond: c%5 == 0 && isPowerOfTwo64(c/5)
+ // result: (SUBshiftLL a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)])
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ if !(c%5 == 0 && isPowerOfTwo64(c/5)) {
+ break
+ }
+ v.reset(OpARM64SUBshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c / 5))
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(2)
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MSUB a x (MOVDconst [c]))
+ // cond: c%7 == 0 && isPowerOfTwo64(c/7)
+ // result: (ADDshiftLL a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)])
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ if !(c%7 == 0 && isPowerOfTwo64(c/7)) {
+ break
+ }
+ v.reset(OpARM64ADDshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c / 7))
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(3)
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MSUB a x (MOVDconst [c]))
+ // cond: c%9 == 0 && isPowerOfTwo64(c/9)
+ // result: (SUBshiftLL a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)])
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ if !(c%9 == 0 && isPowerOfTwo64(c/9)) {
+ break
+ }
+ v.reset(OpARM64SUBshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c / 9))
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(3)
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MSUB a (MOVDconst [-1]) x)
+ // result: (ADD a x)
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != -1 {
+ break
+ }
+ x := v_2
+ v.reset(OpARM64ADD)
+ v.AddArg2(a, x)
+ return true
+ }
+ // match: (MSUB a (MOVDconst [0]) _)
+ // result: a
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ v.copyOf(a)
+ return true
+ }
+ // match: (MSUB a (MOVDconst [1]) x)
+ // result: (SUB a x)
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 1 {
+ break
+ }
+ x := v_2
+ v.reset(OpARM64SUB)
+ v.AddArg2(a, x)
+ return true
+ }
+ // match: (MSUB a (MOVDconst [c]) x)
+ // cond: isPowerOfTwo64(c)
+ // result: (SUBshiftLL a x [log64(c)])
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ x := v_2
+ if !(isPowerOfTwo64(c)) {
+ break
+ }
+ v.reset(OpARM64SUBshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c))
+ v.AddArg2(a, x)
+ return true
+ }
+ // match: (MSUB a (MOVDconst [c]) x)
+ // cond: isPowerOfTwo64(c-1) && c>=3
+ // result: (SUB a (ADDshiftLL <x.Type> x x [log64(c-1)]))
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ x := v_2
+ if !(isPowerOfTwo64(c-1) && c >= 3) {
+ break
+ }
+ v.reset(OpARM64SUB)
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c - 1))
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MSUB a (MOVDconst [c]) x)
+ // cond: isPowerOfTwo64(c+1) && c>=7
+ // result: (ADD a (SUBshiftLL <x.Type> x x [log64(c+1)]))
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ x := v_2
+ if !(isPowerOfTwo64(c+1) && c >= 7) {
+ break
+ }
+ v.reset(OpARM64ADD)
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c + 1))
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MSUB a (MOVDconst [c]) x)
+ // cond: c%3 == 0 && isPowerOfTwo64(c/3)
+ // result: (ADDshiftLL a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)])
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ x := v_2
+ if !(c%3 == 0 && isPowerOfTwo64(c/3)) {
+ break
+ }
+ v.reset(OpARM64ADDshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c / 3))
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(2)
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MSUB a (MOVDconst [c]) x)
+ // cond: c%5 == 0 && isPowerOfTwo64(c/5)
+ // result: (SUBshiftLL a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)])
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ x := v_2
+ if !(c%5 == 0 && isPowerOfTwo64(c/5)) {
+ break
+ }
+ v.reset(OpARM64SUBshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c / 5))
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(2)
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MSUB a (MOVDconst [c]) x)
+ // cond: c%7 == 0 && isPowerOfTwo64(c/7)
+ // result: (ADDshiftLL a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)])
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ x := v_2
+ if !(c%7 == 0 && isPowerOfTwo64(c/7)) {
+ break
+ }
+ v.reset(OpARM64ADDshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c / 7))
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(3)
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MSUB a (MOVDconst [c]) x)
+ // cond: c%9 == 0 && isPowerOfTwo64(c/9)
+ // result: (SUBshiftLL a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)])
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ x := v_2
+ if !(c%9 == 0 && isPowerOfTwo64(c/9)) {
+ break
+ }
+ v.reset(OpARM64SUBshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c / 9))
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(3)
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MSUB (MOVDconst [c]) x y)
+ // result: (ADDconst [c] (MNEG <x.Type> x y))
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARM64ADDconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARM64MNEG, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MSUB a (MOVDconst [c]) (MOVDconst [d]))
+ // result: (SUBconst [c*d] a)
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_2.AuxInt)
+ v.reset(OpARM64SUBconst)
+ v.AuxInt = int64ToAuxInt(c * d)
+ v.AddArg(a)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MSUBW(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MSUBW a x (MOVDconst [c]))
+ // cond: int32(c)==-1
+ // result: (ADD a x)
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ if !(int32(c) == -1) {
+ break
+ }
+ v.reset(OpARM64ADD)
+ v.AddArg2(a, x)
+ return true
+ }
+ // match: (MSUBW a _ (MOVDconst [c]))
+ // cond: int32(c)==0
+ // result: a
+ for {
+ a := v_0
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ if !(int32(c) == 0) {
+ break
+ }
+ v.copyOf(a)
+ return true
+ }
+ // match: (MSUBW a x (MOVDconst [c]))
+ // cond: int32(c)==1
+ // result: (SUB a x)
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ if !(int32(c) == 1) {
+ break
+ }
+ v.reset(OpARM64SUB)
+ v.AddArg2(a, x)
+ return true
+ }
+ // match: (MSUBW a x (MOVDconst [c]))
+ // cond: isPowerOfTwo64(c)
+ // result: (SUBshiftLL a x [log64(c)])
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ if !(isPowerOfTwo64(c)) {
+ break
+ }
+ v.reset(OpARM64SUBshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c))
+ v.AddArg2(a, x)
+ return true
+ }
+ // match: (MSUBW a x (MOVDconst [c]))
+ // cond: isPowerOfTwo64(c-1) && int32(c)>=3
+ // result: (SUB a (ADDshiftLL <x.Type> x x [log64(c-1)]))
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ if !(isPowerOfTwo64(c-1) && int32(c) >= 3) {
+ break
+ }
+ v.reset(OpARM64SUB)
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c - 1))
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MSUBW a x (MOVDconst [c]))
+ // cond: isPowerOfTwo64(c+1) && int32(c)>=7
+ // result: (ADD a (SUBshiftLL <x.Type> x x [log64(c+1)]))
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ if !(isPowerOfTwo64(c+1) && int32(c) >= 7) {
+ break
+ }
+ v.reset(OpARM64ADD)
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c + 1))
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MSUBW a x (MOVDconst [c]))
+ // cond: c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c)
+ // result: (ADDshiftLL a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)])
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ if !(c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64ADDshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c / 3))
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(2)
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MSUBW a x (MOVDconst [c]))
+ // cond: c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c)
+ // result: (SUBshiftLL a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)])
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ if !(c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64SUBshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c / 5))
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(2)
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MSUBW a x (MOVDconst [c]))
+ // cond: c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c)
+ // result: (ADDshiftLL a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)])
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ if !(c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64ADDshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c / 7))
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(3)
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MSUBW a x (MOVDconst [c]))
+ // cond: c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c)
+ // result: (SUBshiftLL a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)])
+ for {
+ a := v_0
+ x := v_1
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ if !(c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64SUBshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c / 9))
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(3)
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MSUBW a (MOVDconst [c]) x)
+ // cond: int32(c)==-1
+ // result: (ADD a x)
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ x := v_2
+ if !(int32(c) == -1) {
+ break
+ }
+ v.reset(OpARM64ADD)
+ v.AddArg2(a, x)
+ return true
+ }
+ // match: (MSUBW a (MOVDconst [c]) _)
+ // cond: int32(c)==0
+ // result: a
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(int32(c) == 0) {
+ break
+ }
+ v.copyOf(a)
+ return true
+ }
+ // match: (MSUBW a (MOVDconst [c]) x)
+ // cond: int32(c)==1
+ // result: (SUB a x)
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ x := v_2
+ if !(int32(c) == 1) {
+ break
+ }
+ v.reset(OpARM64SUB)
+ v.AddArg2(a, x)
+ return true
+ }
+ // match: (MSUBW a (MOVDconst [c]) x)
+ // cond: isPowerOfTwo64(c)
+ // result: (SUBshiftLL a x [log64(c)])
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ x := v_2
+ if !(isPowerOfTwo64(c)) {
+ break
+ }
+ v.reset(OpARM64SUBshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c))
+ v.AddArg2(a, x)
+ return true
+ }
+ // match: (MSUBW a (MOVDconst [c]) x)
+ // cond: isPowerOfTwo64(c-1) && int32(c)>=3
+ // result: (SUB a (ADDshiftLL <x.Type> x x [log64(c-1)]))
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ x := v_2
+ if !(isPowerOfTwo64(c-1) && int32(c) >= 3) {
+ break
+ }
+ v.reset(OpARM64SUB)
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c - 1))
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MSUBW a (MOVDconst [c]) x)
+ // cond: isPowerOfTwo64(c+1) && int32(c)>=7
+ // result: (ADD a (SUBshiftLL <x.Type> x x [log64(c+1)]))
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ x := v_2
+ if !(isPowerOfTwo64(c+1) && int32(c) >= 7) {
+ break
+ }
+ v.reset(OpARM64ADD)
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(log64(c + 1))
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MSUBW a (MOVDconst [c]) x)
+ // cond: c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c)
+ // result: (ADDshiftLL a (SUBshiftLL <x.Type> x x [2]) [log64(c/3)])
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ x := v_2
+ if !(c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64ADDshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c / 3))
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(2)
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MSUBW a (MOVDconst [c]) x)
+ // cond: c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c)
+ // result: (SUBshiftLL a (ADDshiftLL <x.Type> x x [2]) [log64(c/5)])
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ x := v_2
+ if !(c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64SUBshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c / 5))
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(2)
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MSUBW a (MOVDconst [c]) x)
+ // cond: c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c)
+ // result: (ADDshiftLL a (SUBshiftLL <x.Type> x x [3]) [log64(c/7)])
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ x := v_2
+ if !(c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64ADDshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c / 7))
+ v0 := b.NewValue0(v.Pos, OpARM64SUBshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(3)
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MSUBW a (MOVDconst [c]) x)
+ // cond: c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c)
+ // result: (SUBshiftLL a (ADDshiftLL <x.Type> x x [3]) [log64(c/9)])
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ x := v_2
+ if !(c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64SUBshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c / 9))
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(3)
+ v0.AddArg2(x, x)
+ v.AddArg2(a, v0)
+ return true
+ }
+ // match: (MSUBW (MOVDconst [c]) x y)
+ // result: (ADDconst [c] (MNEGW <x.Type> x y))
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ y := v_2
+ v.reset(OpARM64ADDconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARM64MNEGW, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MSUBW a (MOVDconst [c]) (MOVDconst [d]))
+ // result: (SUBconst [int64(int32(c)*int32(d))] a)
+ for {
+ a := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if v_2.Op != OpARM64MOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_2.AuxInt)
+ v.reset(OpARM64SUBconst)
+ v.AuxInt = int64ToAuxInt(int64(int32(c) * int32(d)))
+ v.AddArg(a)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MUL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MUL (NEG x) y)
+ // result: (MNEG x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpARM64NEG {
+ continue
+ }
+ x := v_0.Args[0]
+ y := v_1
+ v.reset(OpARM64MNEG)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (MUL x (MOVDconst [-1]))
+ // result: (NEG x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != -1 {
+ continue
+ }
+ v.reset(OpARM64NEG)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (MUL _ (MOVDconst [0]))
+ // result: (MOVDconst [0])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 {
+ continue
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ break
+ }
+ // match: (MUL x (MOVDconst [1]))
+ // result: x
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 1 {
+ continue
+ }
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (MUL x (MOVDconst [c]))
+ // cond: isPowerOfTwo64(c)
+ // result: (SLLconst [log64(c)] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(isPowerOfTwo64(c)) {
+ continue
+ }
+ v.reset(OpARM64SLLconst)
+ v.AuxInt = int64ToAuxInt(log64(c))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (MUL x (MOVDconst [c]))
+ // cond: isPowerOfTwo64(c-1) && c >= 3
+ // result: (ADDshiftLL x x [log64(c-1)])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(isPowerOfTwo64(c-1) && c >= 3) {
+ continue
+ }
+ v.reset(OpARM64ADDshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c - 1))
+ v.AddArg2(x, x)
+ return true
+ }
+ break
+ }
+ // match: (MUL x (MOVDconst [c]))
+ // cond: isPowerOfTwo64(c+1) && c >= 7
+ // result: (ADDshiftLL (NEG <x.Type> x) x [log64(c+1)])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(isPowerOfTwo64(c+1) && c >= 7) {
+ continue
+ }
+ v.reset(OpARM64ADDshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c + 1))
+ v0 := b.NewValue0(v.Pos, OpARM64NEG, x.Type)
+ v0.AddArg(x)
+ v.AddArg2(v0, x)
+ return true
+ }
+ break
+ }
+ // match: (MUL x (MOVDconst [c]))
+ // cond: c%3 == 0 && isPowerOfTwo64(c/3)
+ // result: (SLLconst [log64(c/3)] (ADDshiftLL <x.Type> x x [1]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(c%3 == 0 && isPowerOfTwo64(c/3)) {
+ continue
+ }
+ v.reset(OpARM64SLLconst)
+ v.AuxInt = int64ToAuxInt(log64(c / 3))
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(1)
+ v0.AddArg2(x, x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MUL x (MOVDconst [c]))
+ // cond: c%5 == 0 && isPowerOfTwo64(c/5)
+ // result: (SLLconst [log64(c/5)] (ADDshiftLL <x.Type> x x [2]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(c%5 == 0 && isPowerOfTwo64(c/5)) {
+ continue
+ }
+ v.reset(OpARM64SLLconst)
+ v.AuxInt = int64ToAuxInt(log64(c / 5))
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(2)
+ v0.AddArg2(x, x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MUL x (MOVDconst [c]))
+ // cond: c%7 == 0 && isPowerOfTwo64(c/7)
+ // result: (SLLconst [log64(c/7)] (ADDshiftLL <x.Type> (NEG <x.Type> x) x [3]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(c%7 == 0 && isPowerOfTwo64(c/7)) {
+ continue
+ }
+ v.reset(OpARM64SLLconst)
+ v.AuxInt = int64ToAuxInt(log64(c / 7))
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(3)
+ v1 := b.NewValue0(v.Pos, OpARM64NEG, x.Type)
+ v1.AddArg(x)
+ v0.AddArg2(v1, x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MUL x (MOVDconst [c]))
+ // cond: c%9 == 0 && isPowerOfTwo64(c/9)
+ // result: (SLLconst [log64(c/9)] (ADDshiftLL <x.Type> x x [3]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(c%9 == 0 && isPowerOfTwo64(c/9)) {
+ continue
+ }
+ v.reset(OpARM64SLLconst)
+ v.AuxInt = int64ToAuxInt(log64(c / 9))
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(3)
+ v0.AddArg2(x, x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MUL (MOVDconst [c]) (MOVDconst [d]))
+ // result: (MOVDconst [c*d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(c * d)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MULW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MULW (NEG x) y)
+ // result: (MNEGW x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpARM64NEG {
+ continue
+ }
+ x := v_0.Args[0]
+ y := v_1
+ v.reset(OpARM64MNEGW)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (MULW x (MOVDconst [c]))
+ // cond: int32(c)==-1
+ // result: (NEG x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(int32(c) == -1) {
+ continue
+ }
+ v.reset(OpARM64NEG)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (MULW _ (MOVDconst [c]))
+ // cond: int32(c)==0
+ // result: (MOVDconst [0])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(int32(c) == 0) {
+ continue
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ break
+ }
+ // match: (MULW x (MOVDconst [c]))
+ // cond: int32(c)==1
+ // result: x
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(int32(c) == 1) {
+ continue
+ }
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (MULW x (MOVDconst [c]))
+ // cond: isPowerOfTwo64(c)
+ // result: (SLLconst [log64(c)] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(isPowerOfTwo64(c)) {
+ continue
+ }
+ v.reset(OpARM64SLLconst)
+ v.AuxInt = int64ToAuxInt(log64(c))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (MULW x (MOVDconst [c]))
+ // cond: isPowerOfTwo64(c-1) && int32(c) >= 3
+ // result: (ADDshiftLL x x [log64(c-1)])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(isPowerOfTwo64(c-1) && int32(c) >= 3) {
+ continue
+ }
+ v.reset(OpARM64ADDshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c - 1))
+ v.AddArg2(x, x)
+ return true
+ }
+ break
+ }
+ // match: (MULW x (MOVDconst [c]))
+ // cond: isPowerOfTwo64(c+1) && int32(c) >= 7
+ // result: (ADDshiftLL (NEG <x.Type> x) x [log64(c+1)])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(isPowerOfTwo64(c+1) && int32(c) >= 7) {
+ continue
+ }
+ v.reset(OpARM64ADDshiftLL)
+ v.AuxInt = int64ToAuxInt(log64(c + 1))
+ v0 := b.NewValue0(v.Pos, OpARM64NEG, x.Type)
+ v0.AddArg(x)
+ v.AddArg2(v0, x)
+ return true
+ }
+ break
+ }
+ // match: (MULW x (MOVDconst [c]))
+ // cond: c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c)
+ // result: (SLLconst [log64(c/3)] (ADDshiftLL <x.Type> x x [1]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(c%3 == 0 && isPowerOfTwo64(c/3) && is32Bit(c)) {
+ continue
+ }
+ v.reset(OpARM64SLLconst)
+ v.AuxInt = int64ToAuxInt(log64(c / 3))
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(1)
+ v0.AddArg2(x, x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MULW x (MOVDconst [c]))
+ // cond: c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c)
+ // result: (SLLconst [log64(c/5)] (ADDshiftLL <x.Type> x x [2]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(c%5 == 0 && isPowerOfTwo64(c/5) && is32Bit(c)) {
+ continue
+ }
+ v.reset(OpARM64SLLconst)
+ v.AuxInt = int64ToAuxInt(log64(c / 5))
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(2)
+ v0.AddArg2(x, x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MULW x (MOVDconst [c]))
+ // cond: c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c)
+ // result: (SLLconst [log64(c/7)] (ADDshiftLL <x.Type> (NEG <x.Type> x) x [3]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(c%7 == 0 && isPowerOfTwo64(c/7) && is32Bit(c)) {
+ continue
+ }
+ v.reset(OpARM64SLLconst)
+ v.AuxInt = int64ToAuxInt(log64(c / 7))
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(3)
+ v1 := b.NewValue0(v.Pos, OpARM64NEG, x.Type)
+ v1.AddArg(x)
+ v0.AddArg2(v1, x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MULW x (MOVDconst [c]))
+ // cond: c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c)
+ // result: (SLLconst [log64(c/9)] (ADDshiftLL <x.Type> x x [3]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(c%9 == 0 && isPowerOfTwo64(c/9) && is32Bit(c)) {
+ continue
+ }
+ v.reset(OpARM64SLLconst)
+ v.AuxInt = int64ToAuxInt(log64(c / 9))
+ v0 := b.NewValue0(v.Pos, OpARM64ADDshiftLL, x.Type)
+ v0.AuxInt = int64ToAuxInt(3)
+ v0.AddArg2(x, x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MULW (MOVDconst [c]) (MOVDconst [d]))
+ // result: (MOVDconst [int64(int32(c)*int32(d))])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(int32(c) * int32(d)))
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MVN(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MVN (XOR x y))
+ // result: (EON x y)
+ for {
+ if v_0.Op != OpARM64XOR {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpARM64EON)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (MVN (MOVDconst [c]))
+ // result: (MOVDconst [^c])
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(^c)
+ return true
+ }
+ // match: (MVN x:(SLLconst [c] y))
+ // cond: clobberIfDead(x)
+ // result: (MVNshiftLL [c] y)
+ for {
+ x := v_0
+ if x.Op != OpARM64SLLconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(clobberIfDead(x)) {
+ break
+ }
+ v.reset(OpARM64MVNshiftLL)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(y)
+ return true
+ }
+ // match: (MVN x:(SRLconst [c] y))
+ // cond: clobberIfDead(x)
+ // result: (MVNshiftRL [c] y)
+ for {
+ x := v_0
+ if x.Op != OpARM64SRLconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(clobberIfDead(x)) {
+ break
+ }
+ v.reset(OpARM64MVNshiftRL)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(y)
+ return true
+ }
+ // match: (MVN x:(SRAconst [c] y))
+ // cond: clobberIfDead(x)
+ // result: (MVNshiftRA [c] y)
+ for {
+ x := v_0
+ if x.Op != OpARM64SRAconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(clobberIfDead(x)) {
+ break
+ }
+ v.reset(OpARM64MVNshiftRA)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(y)
+ return true
+ }
+ // match: (MVN x:(RORconst [c] y))
+ // cond: clobberIfDead(x)
+ // result: (MVNshiftRO [c] y)
+ for {
+ x := v_0
+ if x.Op != OpARM64RORconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(clobberIfDead(x)) {
+ break
+ }
+ v.reset(OpARM64MVNshiftRO)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MVNshiftLL(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MVNshiftLL (MOVDconst [c]) [d])
+ // result: (MOVDconst [^int64(uint64(c)<<uint64(d))])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(^int64(uint64(c) << uint64(d)))
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MVNshiftRA(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MVNshiftRA (MOVDconst [c]) [d])
+ // result: (MOVDconst [^(c>>uint64(d))])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(^(c >> uint64(d)))
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MVNshiftRL(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MVNshiftRL (MOVDconst [c]) [d])
+ // result: (MOVDconst [^int64(uint64(c)>>uint64(d))])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(^int64(uint64(c) >> uint64(d)))
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64MVNshiftRO(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MVNshiftRO (MOVDconst [c]) [d])
+ // result: (MOVDconst [^rotateRight64(c, d)])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(^rotateRight64(c, d))
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64NEG(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (NEG (MUL x y))
+ // result: (MNEG x y)
+ for {
+ if v_0.Op != OpARM64MUL {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpARM64MNEG)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (NEG (MULW x y))
+ // result: (MNEGW x y)
+ for {
+ if v_0.Op != OpARM64MULW {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpARM64MNEGW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (NEG (NEG x))
+ // result: x
+ for {
+ if v_0.Op != OpARM64NEG {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (NEG (MOVDconst [c]))
+ // result: (MOVDconst [-c])
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(-c)
+ return true
+ }
+ // match: (NEG x:(SLLconst [c] y))
+ // cond: clobberIfDead(x)
+ // result: (NEGshiftLL [c] y)
+ for {
+ x := v_0
+ if x.Op != OpARM64SLLconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(clobberIfDead(x)) {
+ break
+ }
+ v.reset(OpARM64NEGshiftLL)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(y)
+ return true
+ }
+ // match: (NEG x:(SRLconst [c] y))
+ // cond: clobberIfDead(x)
+ // result: (NEGshiftRL [c] y)
+ for {
+ x := v_0
+ if x.Op != OpARM64SRLconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(clobberIfDead(x)) {
+ break
+ }
+ v.reset(OpARM64NEGshiftRL)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(y)
+ return true
+ }
+ // match: (NEG x:(SRAconst [c] y))
+ // cond: clobberIfDead(x)
+ // result: (NEGshiftRA [c] y)
+ for {
+ x := v_0
+ if x.Op != OpARM64SRAconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(clobberIfDead(x)) {
+ break
+ }
+ v.reset(OpARM64NEGshiftRA)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64NEGshiftLL(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (NEGshiftLL (MOVDconst [c]) [d])
+ // result: (MOVDconst [-int64(uint64(c)<<uint64(d))])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(-int64(uint64(c) << uint64(d)))
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64NEGshiftRA(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (NEGshiftRA (MOVDconst [c]) [d])
+ // result: (MOVDconst [-(c>>uint64(d))])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(-(c >> uint64(d)))
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64NEGshiftRL(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (NEGshiftRL (MOVDconst [c]) [d])
+ // result: (MOVDconst [-int64(uint64(c)>>uint64(d))])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(-int64(uint64(c) >> uint64(d)))
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64NotEqual(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (NotEqual (FlagConstant [fc]))
+ // result: (MOVDconst [b2i(fc.ne())])
+ for {
+ if v_0.Op != OpARM64FlagConstant {
+ break
+ }
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(b2i(fc.ne()))
+ return true
+ }
+ // match: (NotEqual (InvertFlags x))
+ // result: (NotEqual x)
+ for {
+ if v_0.Op != OpARM64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARM64NotEqual)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64OR(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (OR x (MOVDconst [c]))
+ // result: (ORconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64ORconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (OR x x)
+ // result: x
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (OR x (MVN y))
+ // result: (ORN x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MVN {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(OpARM64ORN)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (OR x0 x1:(SLLconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (ORshiftLL x0 y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64SLLconst {
+ continue
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ continue
+ }
+ v.reset(OpARM64ORshiftLL)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ break
+ }
+ // match: (OR x0 x1:(SRLconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (ORshiftRL x0 y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64SRLconst {
+ continue
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ continue
+ }
+ v.reset(OpARM64ORshiftRL)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ break
+ }
+ // match: (OR x0 x1:(SRAconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (ORshiftRA x0 y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64SRAconst {
+ continue
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ continue
+ }
+ v.reset(OpARM64ORshiftRA)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ break
+ }
+ // match: (OR x0 x1:(RORconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (ORshiftRO x0 y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64RORconst {
+ continue
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ continue
+ }
+ v.reset(OpARM64ORshiftRO)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ break
+ }
+ // match: (OR (SLL x (ANDconst <t> [63] y)) (CSEL0 <typ.UInt64> [cc] (SRL <typ.UInt64> x (SUB <t> (MOVDconst [64]) (ANDconst <t> [63] y))) (CMPconst [64] (SUB <t> (MOVDconst [64]) (ANDconst <t> [63] y)))))
+ // cond: cc == OpARM64LessThanU
+ // result: (ROR x (NEG <t> y))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpARM64SLL {
+ continue
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpARM64ANDconst {
+ continue
+ }
+ t := v_0_1.Type
+ if auxIntToInt64(v_0_1.AuxInt) != 63 {
+ continue
+ }
+ y := v_0_1.Args[0]
+ if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt64 {
+ continue
+ }
+ cc := auxIntToOp(v_1.AuxInt)
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpARM64SRL || v_1_0.Type != typ.UInt64 {
+ continue
+ }
+ _ = v_1_0.Args[1]
+ if x != v_1_0.Args[0] {
+ continue
+ }
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpARM64SUB || v_1_0_1.Type != t {
+ continue
+ }
+ _ = v_1_0_1.Args[1]
+ v_1_0_1_0 := v_1_0_1.Args[0]
+ if v_1_0_1_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_0_1_0.AuxInt) != 64 {
+ continue
+ }
+ v_1_0_1_1 := v_1_0_1.Args[1]
+ if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || auxIntToInt64(v_1_0_1_1.AuxInt) != 63 || y != v_1_0_1_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpARM64CMPconst || auxIntToInt64(v_1_1.AuxInt) != 64 {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpARM64SUB || v_1_1_0.Type != t {
+ continue
+ }
+ _ = v_1_1_0.Args[1]
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_1_0_0.AuxInt) != 64 {
+ continue
+ }
+ v_1_1_0_1 := v_1_1_0.Args[1]
+ if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || auxIntToInt64(v_1_1_0_1.AuxInt) != 63 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
+ continue
+ }
+ v.reset(OpARM64ROR)
+ v0 := b.NewValue0(v.Pos, OpARM64NEG, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ break
+ }
+ // match: (OR (SRL <typ.UInt64> x (ANDconst <t> [63] y)) (CSEL0 <typ.UInt64> [cc] (SLL x (SUB <t> (MOVDconst [64]) (ANDconst <t> [63] y))) (CMPconst [64] (SUB <t> (MOVDconst [64]) (ANDconst <t> [63] y)))))
+ // cond: cc == OpARM64LessThanU
+ // result: (ROR x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpARM64SRL || v_0.Type != typ.UInt64 {
+ continue
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpARM64ANDconst {
+ continue
+ }
+ t := v_0_1.Type
+ if auxIntToInt64(v_0_1.AuxInt) != 63 {
+ continue
+ }
+ y := v_0_1.Args[0]
+ if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt64 {
+ continue
+ }
+ cc := auxIntToOp(v_1.AuxInt)
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpARM64SLL {
+ continue
+ }
+ _ = v_1_0.Args[1]
+ if x != v_1_0.Args[0] {
+ continue
+ }
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpARM64SUB || v_1_0_1.Type != t {
+ continue
+ }
+ _ = v_1_0_1.Args[1]
+ v_1_0_1_0 := v_1_0_1.Args[0]
+ if v_1_0_1_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_0_1_0.AuxInt) != 64 {
+ continue
+ }
+ v_1_0_1_1 := v_1_0_1.Args[1]
+ if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || auxIntToInt64(v_1_0_1_1.AuxInt) != 63 || y != v_1_0_1_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpARM64CMPconst || auxIntToInt64(v_1_1.AuxInt) != 64 {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpARM64SUB || v_1_1_0.Type != t {
+ continue
+ }
+ _ = v_1_1_0.Args[1]
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_1_0_0.AuxInt) != 64 {
+ continue
+ }
+ v_1_1_0_1 := v_1_1_0.Args[1]
+ if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || auxIntToInt64(v_1_1_0_1.AuxInt) != 63 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
+ continue
+ }
+ v.reset(OpARM64ROR)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (OR (SLL x (ANDconst <t> [31] y)) (CSEL0 <typ.UInt32> [cc] (SRL <typ.UInt32> (MOVWUreg x) (SUB <t> (MOVDconst [32]) (ANDconst <t> [31] y))) (CMPconst [64] (SUB <t> (MOVDconst [32]) (ANDconst <t> [31] y)))))
+ // cond: cc == OpARM64LessThanU
+ // result: (RORW x (NEG <t> y))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpARM64SLL {
+ continue
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpARM64ANDconst {
+ continue
+ }
+ t := v_0_1.Type
+ if auxIntToInt64(v_0_1.AuxInt) != 31 {
+ continue
+ }
+ y := v_0_1.Args[0]
+ if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt32 {
+ continue
+ }
+ cc := auxIntToOp(v_1.AuxInt)
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpARM64SRL || v_1_0.Type != typ.UInt32 {
+ continue
+ }
+ _ = v_1_0.Args[1]
+ v_1_0_0 := v_1_0.Args[0]
+ if v_1_0_0.Op != OpARM64MOVWUreg || x != v_1_0_0.Args[0] {
+ continue
+ }
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpARM64SUB || v_1_0_1.Type != t {
+ continue
+ }
+ _ = v_1_0_1.Args[1]
+ v_1_0_1_0 := v_1_0_1.Args[0]
+ if v_1_0_1_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_0_1_0.AuxInt) != 32 {
+ continue
+ }
+ v_1_0_1_1 := v_1_0_1.Args[1]
+ if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || auxIntToInt64(v_1_0_1_1.AuxInt) != 31 || y != v_1_0_1_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpARM64CMPconst || auxIntToInt64(v_1_1.AuxInt) != 64 {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpARM64SUB || v_1_1_0.Type != t {
+ continue
+ }
+ _ = v_1_1_0.Args[1]
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_1_0_0.AuxInt) != 32 {
+ continue
+ }
+ v_1_1_0_1 := v_1_1_0.Args[1]
+ if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || auxIntToInt64(v_1_1_0_1.AuxInt) != 31 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
+ continue
+ }
+ v.reset(OpARM64RORW)
+ v0 := b.NewValue0(v.Pos, OpARM64NEG, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ break
+ }
+ // match: (OR (SRL <typ.UInt32> (MOVWUreg x) (ANDconst <t> [31] y)) (CSEL0 <typ.UInt32> [cc] (SLL x (SUB <t> (MOVDconst [32]) (ANDconst <t> [31] y))) (CMPconst [64] (SUB <t> (MOVDconst [32]) (ANDconst <t> [31] y)))))
+ // cond: cc == OpARM64LessThanU
+ // result: (RORW x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpARM64SRL || v_0.Type != typ.UInt32 {
+ continue
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpARM64MOVWUreg {
+ continue
+ }
+ x := v_0_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpARM64ANDconst {
+ continue
+ }
+ t := v_0_1.Type
+ if auxIntToInt64(v_0_1.AuxInt) != 31 {
+ continue
+ }
+ y := v_0_1.Args[0]
+ if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt32 {
+ continue
+ }
+ cc := auxIntToOp(v_1.AuxInt)
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpARM64SLL {
+ continue
+ }
+ _ = v_1_0.Args[1]
+ if x != v_1_0.Args[0] {
+ continue
+ }
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpARM64SUB || v_1_0_1.Type != t {
+ continue
+ }
+ _ = v_1_0_1.Args[1]
+ v_1_0_1_0 := v_1_0_1.Args[0]
+ if v_1_0_1_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_0_1_0.AuxInt) != 32 {
+ continue
+ }
+ v_1_0_1_1 := v_1_0_1.Args[1]
+ if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || auxIntToInt64(v_1_0_1_1.AuxInt) != 31 || y != v_1_0_1_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpARM64CMPconst || auxIntToInt64(v_1_1.AuxInt) != 64 {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpARM64SUB || v_1_1_0.Type != t {
+ continue
+ }
+ _ = v_1_1_0.Args[1]
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_1_0_0.AuxInt) != 32 {
+ continue
+ }
+ v_1_1_0_1 := v_1_1_0.Args[1]
+ if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || auxIntToInt64(v_1_1_0_1.AuxInt) != 31 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
+ continue
+ }
+ v.reset(OpARM64RORW)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (OR (UBFIZ [bfc] x) (ANDconst [ac] y))
+ // cond: ac == ^((1<<uint(bfc.getARM64BFwidth())-1) << uint(bfc.getARM64BFlsb()))
+ // result: (BFI [bfc] y x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpARM64UBFIZ {
+ continue
+ }
+ bfc := auxIntToArm64BitField(v_0.AuxInt)
+ x := v_0.Args[0]
+ if v_1.Op != OpARM64ANDconst {
+ continue
+ }
+ ac := auxIntToInt64(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(ac == ^((1<<uint(bfc.getARM64BFwidth()) - 1) << uint(bfc.getARM64BFlsb()))) {
+ continue
+ }
+ v.reset(OpARM64BFI)
+ v.AuxInt = arm64BitFieldToAuxInt(bfc)
+ v.AddArg2(y, x)
+ return true
+ }
+ break
+ }
+ // match: (OR (UBFX [bfc] x) (ANDconst [ac] y))
+ // cond: ac == ^(1<<uint(bfc.getARM64BFwidth())-1)
+ // result: (BFXIL [bfc] y x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpARM64UBFX {
+ continue
+ }
+ bfc := auxIntToArm64BitField(v_0.AuxInt)
+ x := v_0.Args[0]
+ if v_1.Op != OpARM64ANDconst {
+ continue
+ }
+ ac := auxIntToInt64(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(ac == ^(1<<uint(bfc.getARM64BFwidth()) - 1)) {
+ continue
+ }
+ v.reset(OpARM64BFXIL)
+ v.AuxInt = arm64BitFieldToAuxInt(bfc)
+ v.AddArg2(y, x)
+ return true
+ }
+ break
+ }
+ // match: (OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24] y0:(MOVDnop x0:(MOVBUload [i3] {s} p mem))) y1:(MOVDnop x1:(MOVBUload [i2] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [i1] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [i0] {s} p mem)))
+ // cond: i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3) != nil && clobber(x0, x1, x2, x3, y0, y1, y2, y3, o0, o1, s0)
+ // result: @mergePoint(b,x0,x1,x2,x3) (MOVWUload <t> {s} (OffPtr <p.Type> [int64(i0)] p) mem)
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ o0 := v_0
+ if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 8 {
+ continue
+ }
+ _ = o0.Args[1]
+ o1 := o0.Args[0]
+ if o1.Op != OpARM64ORshiftLL || auxIntToInt64(o1.AuxInt) != 16 {
+ continue
+ }
+ _ = o1.Args[1]
+ s0 := o1.Args[0]
+ if s0.Op != OpARM64SLLconst || auxIntToInt64(s0.AuxInt) != 24 {
+ continue
+ }
+ y0 := s0.Args[0]
+ if y0.Op != OpARM64MOVDnop {
+ continue
+ }
+ x0 := y0.Args[0]
+ if x0.Op != OpARM64MOVBUload {
+ continue
+ }
+ i3 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p := x0.Args[0]
+ y1 := o1.Args[1]
+ if y1.Op != OpARM64MOVDnop {
+ continue
+ }
+ x1 := y1.Args[0]
+ if x1.Op != OpARM64MOVBUload {
+ continue
+ }
+ i2 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
+ continue
+ }
+ _ = x1.Args[1]
+ if p != x1.Args[0] || mem != x1.Args[1] {
+ continue
+ }
+ y2 := o0.Args[1]
+ if y2.Op != OpARM64MOVDnop {
+ continue
+ }
+ x2 := y2.Args[0]
+ if x2.Op != OpARM64MOVBUload {
+ continue
+ }
+ i1 := auxIntToInt32(x2.AuxInt)
+ if auxToSym(x2.Aux) != s {
+ continue
+ }
+ _ = x2.Args[1]
+ if p != x2.Args[0] || mem != x2.Args[1] {
+ continue
+ }
+ y3 := v_1
+ if y3.Op != OpARM64MOVDnop {
+ continue
+ }
+ x3 := y3.Args[0]
+ if x3.Op != OpARM64MOVBUload {
+ continue
+ }
+ i0 := auxIntToInt32(x3.AuxInt)
+ if auxToSym(x3.Aux) != s {
+ continue
+ }
+ _ = x3.Args[1]
+ if p != x3.Args[0] || mem != x3.Args[1] || !(i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3) != nil && clobber(x0, x1, x2, x3, y0, y1, y2, y3, o0, o1, s0)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1, x2, x3)
+ v0 := b.NewValue0(x3.Pos, OpARM64MOVWUload, t)
+ v.copyOf(v0)
+ v0.Aux = symToAux(s)
+ v1 := b.NewValue0(x3.Pos, OpOffPtr, p.Type)
+ v1.AuxInt = int64ToAuxInt(int64(i0))
+ v1.AddArg(p)
+ v0.AddArg2(v1, mem)
+ return true
+ }
+ break
+ }
+ // match: (OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24] y0:(MOVDnop x0:(MOVBUload [3] {s} p mem))) y1:(MOVDnop x1:(MOVBUload [2] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [1] {s} p1:(ADD ptr1 idx1) mem))) y3:(MOVDnop x3:(MOVBUloadidx ptr0 idx0 mem)))
+ // cond: s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0, x1, x2, x3, y0, y1, y2, y3, o0, o1, s0)
+ // result: @mergePoint(b,x0,x1,x2,x3) (MOVWUloadidx <t> ptr0 idx0 mem)
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ o0 := v_0
+ if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 8 {
+ continue
+ }
+ _ = o0.Args[1]
+ o1 := o0.Args[0]
+ if o1.Op != OpARM64ORshiftLL || auxIntToInt64(o1.AuxInt) != 16 {
+ continue
+ }
+ _ = o1.Args[1]
+ s0 := o1.Args[0]
+ if s0.Op != OpARM64SLLconst || auxIntToInt64(s0.AuxInt) != 24 {
+ continue
+ }
+ y0 := s0.Args[0]
+ if y0.Op != OpARM64MOVDnop {
+ continue
+ }
+ x0 := y0.Args[0]
+ if x0.Op != OpARM64MOVBUload || auxIntToInt32(x0.AuxInt) != 3 {
+ continue
+ }
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p := x0.Args[0]
+ y1 := o1.Args[1]
+ if y1.Op != OpARM64MOVDnop {
+ continue
+ }
+ x1 := y1.Args[0]
+ if x1.Op != OpARM64MOVBUload || auxIntToInt32(x1.AuxInt) != 2 || auxToSym(x1.Aux) != s {
+ continue
+ }
+ _ = x1.Args[1]
+ if p != x1.Args[0] || mem != x1.Args[1] {
+ continue
+ }
+ y2 := o0.Args[1]
+ if y2.Op != OpARM64MOVDnop {
+ continue
+ }
+ x2 := y2.Args[0]
+ if x2.Op != OpARM64MOVBUload || auxIntToInt32(x2.AuxInt) != 1 || auxToSym(x2.Aux) != s {
+ continue
+ }
+ _ = x2.Args[1]
+ p1 := x2.Args[0]
+ if p1.Op != OpARM64ADD {
+ continue
+ }
+ _ = p1.Args[1]
+ p1_0 := p1.Args[0]
+ p1_1 := p1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, p1_0, p1_1 = _i1+1, p1_1, p1_0 {
+ ptr1 := p1_0
+ idx1 := p1_1
+ if mem != x2.Args[1] {
+ continue
+ }
+ y3 := v_1
+ if y3.Op != OpARM64MOVDnop {
+ continue
+ }
+ x3 := y3.Args[0]
+ if x3.Op != OpARM64MOVBUloadidx {
+ continue
+ }
+ _ = x3.Args[2]
+ ptr0 := x3.Args[0]
+ idx0 := x3.Args[1]
+ if mem != x3.Args[2] || !(s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0, x1, x2, x3, y0, y1, y2, y3, o0, o1, s0)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1, x2, x3)
+ v0 := b.NewValue0(x2.Pos, OpARM64MOVWUloadidx, t)
+ v.copyOf(v0)
+ v0.AddArg3(ptr0, idx0, mem)
+ return true
+ }
+ }
+ break
+ }
+ // match: (OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24] y0:(MOVDnop x0:(MOVBUloadidx ptr (ADDconst [3] idx) mem))) y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [2] idx) mem))) y2:(MOVDnop x2:(MOVBUloadidx ptr (ADDconst [1] idx) mem))) y3:(MOVDnop x3:(MOVBUloadidx ptr idx mem)))
+ // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3) != nil && clobber(x0, x1, x2, x3, y0, y1, y2, y3, o0, o1, s0)
+ // result: @mergePoint(b,x0,x1,x2,x3) (MOVWUloadidx <t> ptr idx mem)
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ o0 := v_0
+ if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 8 {
+ continue
+ }
+ _ = o0.Args[1]
+ o1 := o0.Args[0]
+ if o1.Op != OpARM64ORshiftLL || auxIntToInt64(o1.AuxInt) != 16 {
+ continue
+ }
+ _ = o1.Args[1]
+ s0 := o1.Args[0]
+ if s0.Op != OpARM64SLLconst || auxIntToInt64(s0.AuxInt) != 24 {
+ continue
+ }
+ y0 := s0.Args[0]
+ if y0.Op != OpARM64MOVDnop {
+ continue
+ }
+ x0 := y0.Args[0]
+ if x0.Op != OpARM64MOVBUloadidx {
+ continue
+ }
+ mem := x0.Args[2]
+ ptr := x0.Args[0]
+ x0_1 := x0.Args[1]
+ if x0_1.Op != OpARM64ADDconst || auxIntToInt64(x0_1.AuxInt) != 3 {
+ continue
+ }
+ idx := x0_1.Args[0]
+ y1 := o1.Args[1]
+ if y1.Op != OpARM64MOVDnop {
+ continue
+ }
+ x1 := y1.Args[0]
+ if x1.Op != OpARM64MOVBUloadidx {
+ continue
+ }
+ _ = x1.Args[2]
+ if ptr != x1.Args[0] {
+ continue
+ }
+ x1_1 := x1.Args[1]
+ if x1_1.Op != OpARM64ADDconst || auxIntToInt64(x1_1.AuxInt) != 2 || idx != x1_1.Args[0] || mem != x1.Args[2] {
+ continue
+ }
+ y2 := o0.Args[1]
+ if y2.Op != OpARM64MOVDnop {
+ continue
+ }
+ x2 := y2.Args[0]
+ if x2.Op != OpARM64MOVBUloadidx {
+ continue
+ }
+ _ = x2.Args[2]
+ if ptr != x2.Args[0] {
+ continue
+ }
+ x2_1 := x2.Args[1]
+ if x2_1.Op != OpARM64ADDconst || auxIntToInt64(x2_1.AuxInt) != 1 || idx != x2_1.Args[0] || mem != x2.Args[2] {
+ continue
+ }
+ y3 := v_1
+ if y3.Op != OpARM64MOVDnop {
+ continue
+ }
+ x3 := y3.Args[0]
+ if x3.Op != OpARM64MOVBUloadidx {
+ continue
+ }
+ _ = x3.Args[2]
+ if ptr != x3.Args[0] || idx != x3.Args[1] || mem != x3.Args[2] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3) != nil && clobber(x0, x1, x2, x3, y0, y1, y2, y3, o0, o1, s0)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1, x2, x3)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVWUloadidx, t)
+ v.copyOf(v0)
+ v0.AddArg3(ptr, idx, mem)
+ return true
+ }
+ break
+ }
+ // match: (OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56] y0:(MOVDnop x0:(MOVBUload [i7] {s} p mem))) y1:(MOVDnop x1:(MOVBUload [i6] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [i5] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [i4] {s} p mem))) y4:(MOVDnop x4:(MOVBUload [i3] {s} p mem))) y5:(MOVDnop x5:(MOVBUload [i2] {s} p mem))) y6:(MOVDnop x6:(MOVBUload [i1] {s} p mem))) y7:(MOVDnop x7:(MOVBUload [i0] {s} p mem)))
+ // cond: i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil && clobber(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, y6, y7, o0, o1, o2, o3, o4, o5, s0)
+ // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDload <t> {s} (OffPtr <p.Type> [int64(i0)] p) mem)
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ o0 := v_0
+ if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 8 {
+ continue
+ }
+ _ = o0.Args[1]
+ o1 := o0.Args[0]
+ if o1.Op != OpARM64ORshiftLL || auxIntToInt64(o1.AuxInt) != 16 {
+ continue
+ }
+ _ = o1.Args[1]
+ o2 := o1.Args[0]
+ if o2.Op != OpARM64ORshiftLL || auxIntToInt64(o2.AuxInt) != 24 {
+ continue
+ }
+ _ = o2.Args[1]
+ o3 := o2.Args[0]
+ if o3.Op != OpARM64ORshiftLL || auxIntToInt64(o3.AuxInt) != 32 {
+ continue
+ }
+ _ = o3.Args[1]
+ o4 := o3.Args[0]
+ if o4.Op != OpARM64ORshiftLL || auxIntToInt64(o4.AuxInt) != 40 {
+ continue
+ }
+ _ = o4.Args[1]
+ o5 := o4.Args[0]
+ if o5.Op != OpARM64ORshiftLL || auxIntToInt64(o5.AuxInt) != 48 {
+ continue
+ }
+ _ = o5.Args[1]
+ s0 := o5.Args[0]
+ if s0.Op != OpARM64SLLconst || auxIntToInt64(s0.AuxInt) != 56 {
+ continue
+ }
+ y0 := s0.Args[0]
+ if y0.Op != OpARM64MOVDnop {
+ continue
+ }
+ x0 := y0.Args[0]
+ if x0.Op != OpARM64MOVBUload {
+ continue
+ }
+ i7 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p := x0.Args[0]
+ y1 := o5.Args[1]
+ if y1.Op != OpARM64MOVDnop {
+ continue
+ }
+ x1 := y1.Args[0]
+ if x1.Op != OpARM64MOVBUload {
+ continue
+ }
+ i6 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
+ continue
+ }
+ _ = x1.Args[1]
+ if p != x1.Args[0] || mem != x1.Args[1] {
+ continue
+ }
+ y2 := o4.Args[1]
+ if y2.Op != OpARM64MOVDnop {
+ continue
+ }
+ x2 := y2.Args[0]
+ if x2.Op != OpARM64MOVBUload {
+ continue
+ }
+ i5 := auxIntToInt32(x2.AuxInt)
+ if auxToSym(x2.Aux) != s {
+ continue
+ }
+ _ = x2.Args[1]
+ if p != x2.Args[0] || mem != x2.Args[1] {
+ continue
+ }
+ y3 := o3.Args[1]
+ if y3.Op != OpARM64MOVDnop {
+ continue
+ }
+ x3 := y3.Args[0]
+ if x3.Op != OpARM64MOVBUload {
+ continue
+ }
+ i4 := auxIntToInt32(x3.AuxInt)
+ if auxToSym(x3.Aux) != s {
+ continue
+ }
+ _ = x3.Args[1]
+ if p != x3.Args[0] || mem != x3.Args[1] {
+ continue
+ }
+ y4 := o2.Args[1]
+ if y4.Op != OpARM64MOVDnop {
+ continue
+ }
+ x4 := y4.Args[0]
+ if x4.Op != OpARM64MOVBUload {
+ continue
+ }
+ i3 := auxIntToInt32(x4.AuxInt)
+ if auxToSym(x4.Aux) != s {
+ continue
+ }
+ _ = x4.Args[1]
+ if p != x4.Args[0] || mem != x4.Args[1] {
+ continue
+ }
+ y5 := o1.Args[1]
+ if y5.Op != OpARM64MOVDnop {
+ continue
+ }
+ x5 := y5.Args[0]
+ if x5.Op != OpARM64MOVBUload {
+ continue
+ }
+ i2 := auxIntToInt32(x5.AuxInt)
+ if auxToSym(x5.Aux) != s {
+ continue
+ }
+ _ = x5.Args[1]
+ if p != x5.Args[0] || mem != x5.Args[1] {
+ continue
+ }
+ y6 := o0.Args[1]
+ if y6.Op != OpARM64MOVDnop {
+ continue
+ }
+ x6 := y6.Args[0]
+ if x6.Op != OpARM64MOVBUload {
+ continue
+ }
+ i1 := auxIntToInt32(x6.AuxInt)
+ if auxToSym(x6.Aux) != s {
+ continue
+ }
+ _ = x6.Args[1]
+ if p != x6.Args[0] || mem != x6.Args[1] {
+ continue
+ }
+ y7 := v_1
+ if y7.Op != OpARM64MOVDnop {
+ continue
+ }
+ x7 := y7.Args[0]
+ if x7.Op != OpARM64MOVBUload {
+ continue
+ }
+ i0 := auxIntToInt32(x7.AuxInt)
+ if auxToSym(x7.Aux) != s {
+ continue
+ }
+ _ = x7.Args[1]
+ if p != x7.Args[0] || mem != x7.Args[1] || !(i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, y6, y7, o0, o1, o2, o3, o4, o5, s0)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7)
+ v0 := b.NewValue0(x7.Pos, OpARM64MOVDload, t)
+ v.copyOf(v0)
+ v0.Aux = symToAux(s)
+ v1 := b.NewValue0(x7.Pos, OpOffPtr, p.Type)
+ v1.AuxInt = int64ToAuxInt(int64(i0))
+ v1.AddArg(p)
+ v0.AddArg2(v1, mem)
+ return true
+ }
+ break
+ }
+ // match: (OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56] y0:(MOVDnop x0:(MOVBUload [7] {s} p mem))) y1:(MOVDnop x1:(MOVBUload [6] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [5] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [4] {s} p mem))) y4:(MOVDnop x4:(MOVBUload [3] {s} p mem))) y5:(MOVDnop x5:(MOVBUload [2] {s} p mem))) y6:(MOVDnop x6:(MOVBUload [1] {s} p1:(ADD ptr1 idx1) mem))) y7:(MOVDnop x7:(MOVBUloadidx ptr0 idx0 mem)))
+ // cond: s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, y6, y7, o0, o1, o2, o3, o4, o5, s0)
+ // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDloadidx <t> ptr0 idx0 mem)
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ o0 := v_0
+ if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 8 {
+ continue
+ }
+ _ = o0.Args[1]
+ o1 := o0.Args[0]
+ if o1.Op != OpARM64ORshiftLL || auxIntToInt64(o1.AuxInt) != 16 {
+ continue
+ }
+ _ = o1.Args[1]
+ o2 := o1.Args[0]
+ if o2.Op != OpARM64ORshiftLL || auxIntToInt64(o2.AuxInt) != 24 {
+ continue
+ }
+ _ = o2.Args[1]
+ o3 := o2.Args[0]
+ if o3.Op != OpARM64ORshiftLL || auxIntToInt64(o3.AuxInt) != 32 {
+ continue
+ }
+ _ = o3.Args[1]
+ o4 := o3.Args[0]
+ if o4.Op != OpARM64ORshiftLL || auxIntToInt64(o4.AuxInt) != 40 {
+ continue
+ }
+ _ = o4.Args[1]
+ o5 := o4.Args[0]
+ if o5.Op != OpARM64ORshiftLL || auxIntToInt64(o5.AuxInt) != 48 {
+ continue
+ }
+ _ = o5.Args[1]
+ s0 := o5.Args[0]
+ if s0.Op != OpARM64SLLconst || auxIntToInt64(s0.AuxInt) != 56 {
+ continue
+ }
+ y0 := s0.Args[0]
+ if y0.Op != OpARM64MOVDnop {
+ continue
+ }
+ x0 := y0.Args[0]
+ if x0.Op != OpARM64MOVBUload || auxIntToInt32(x0.AuxInt) != 7 {
+ continue
+ }
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p := x0.Args[0]
+ y1 := o5.Args[1]
+ if y1.Op != OpARM64MOVDnop {
+ continue
+ }
+ x1 := y1.Args[0]
+ if x1.Op != OpARM64MOVBUload || auxIntToInt32(x1.AuxInt) != 6 || auxToSym(x1.Aux) != s {
+ continue
+ }
+ _ = x1.Args[1]
+ if p != x1.Args[0] || mem != x1.Args[1] {
+ continue
+ }
+ y2 := o4.Args[1]
+ if y2.Op != OpARM64MOVDnop {
+ continue
+ }
+ x2 := y2.Args[0]
+ if x2.Op != OpARM64MOVBUload || auxIntToInt32(x2.AuxInt) != 5 || auxToSym(x2.Aux) != s {
+ continue
+ }
+ _ = x2.Args[1]
+ if p != x2.Args[0] || mem != x2.Args[1] {
+ continue
+ }
+ y3 := o3.Args[1]
+ if y3.Op != OpARM64MOVDnop {
+ continue
+ }
+ x3 := y3.Args[0]
+ if x3.Op != OpARM64MOVBUload || auxIntToInt32(x3.AuxInt) != 4 || auxToSym(x3.Aux) != s {
+ continue
+ }
+ _ = x3.Args[1]
+ if p != x3.Args[0] || mem != x3.Args[1] {
+ continue
+ }
+ y4 := o2.Args[1]
+ if y4.Op != OpARM64MOVDnop {
+ continue
+ }
+ x4 := y4.Args[0]
+ if x4.Op != OpARM64MOVBUload || auxIntToInt32(x4.AuxInt) != 3 || auxToSym(x4.Aux) != s {
+ continue
+ }
+ _ = x4.Args[1]
+ if p != x4.Args[0] || mem != x4.Args[1] {
+ continue
+ }
+ y5 := o1.Args[1]
+ if y5.Op != OpARM64MOVDnop {
+ continue
+ }
+ x5 := y5.Args[0]
+ if x5.Op != OpARM64MOVBUload || auxIntToInt32(x5.AuxInt) != 2 || auxToSym(x5.Aux) != s {
+ continue
+ }
+ _ = x5.Args[1]
+ if p != x5.Args[0] || mem != x5.Args[1] {
+ continue
+ }
+ y6 := o0.Args[1]
+ if y6.Op != OpARM64MOVDnop {
+ continue
+ }
+ x6 := y6.Args[0]
+ if x6.Op != OpARM64MOVBUload || auxIntToInt32(x6.AuxInt) != 1 || auxToSym(x6.Aux) != s {
+ continue
+ }
+ _ = x6.Args[1]
+ p1 := x6.Args[0]
+ if p1.Op != OpARM64ADD {
+ continue
+ }
+ _ = p1.Args[1]
+ p1_0 := p1.Args[0]
+ p1_1 := p1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, p1_0, p1_1 = _i1+1, p1_1, p1_0 {
+ ptr1 := p1_0
+ idx1 := p1_1
+ if mem != x6.Args[1] {
+ continue
+ }
+ y7 := v_1
+ if y7.Op != OpARM64MOVDnop {
+ continue
+ }
+ x7 := y7.Args[0]
+ if x7.Op != OpARM64MOVBUloadidx {
+ continue
+ }
+ _ = x7.Args[2]
+ ptr0 := x7.Args[0]
+ idx0 := x7.Args[1]
+ if mem != x7.Args[2] || !(s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, y6, y7, o0, o1, o2, o3, o4, o5, s0)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7)
+ v0 := b.NewValue0(x6.Pos, OpARM64MOVDloadidx, t)
+ v.copyOf(v0)
+ v0.AddArg3(ptr0, idx0, mem)
+ return true
+ }
+ }
+ break
+ }
+ // match: (OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56] y0:(MOVDnop x0:(MOVBUloadidx ptr (ADDconst [7] idx) mem))) y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [6] idx) mem))) y2:(MOVDnop x2:(MOVBUloadidx ptr (ADDconst [5] idx) mem))) y3:(MOVDnop x3:(MOVBUloadidx ptr (ADDconst [4] idx) mem))) y4:(MOVDnop x4:(MOVBUloadidx ptr (ADDconst [3] idx) mem))) y5:(MOVDnop x5:(MOVBUloadidx ptr (ADDconst [2] idx) mem))) y6:(MOVDnop x6:(MOVBUloadidx ptr (ADDconst [1] idx) mem))) y7:(MOVDnop x7:(MOVBUloadidx ptr idx mem)))
+ // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil && clobber(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, y6, y7, o0, o1, o2, o3, o4, o5, s0)
+ // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (MOVDloadidx <t> ptr idx mem)
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ o0 := v_0
+ if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 8 {
+ continue
+ }
+ _ = o0.Args[1]
+ o1 := o0.Args[0]
+ if o1.Op != OpARM64ORshiftLL || auxIntToInt64(o1.AuxInt) != 16 {
+ continue
+ }
+ _ = o1.Args[1]
+ o2 := o1.Args[0]
+ if o2.Op != OpARM64ORshiftLL || auxIntToInt64(o2.AuxInt) != 24 {
+ continue
+ }
+ _ = o2.Args[1]
+ o3 := o2.Args[0]
+ if o3.Op != OpARM64ORshiftLL || auxIntToInt64(o3.AuxInt) != 32 {
+ continue
+ }
+ _ = o3.Args[1]
+ o4 := o3.Args[0]
+ if o4.Op != OpARM64ORshiftLL || auxIntToInt64(o4.AuxInt) != 40 {
+ continue
+ }
+ _ = o4.Args[1]
+ o5 := o4.Args[0]
+ if o5.Op != OpARM64ORshiftLL || auxIntToInt64(o5.AuxInt) != 48 {
+ continue
+ }
+ _ = o5.Args[1]
+ s0 := o5.Args[0]
+ if s0.Op != OpARM64SLLconst || auxIntToInt64(s0.AuxInt) != 56 {
+ continue
+ }
+ y0 := s0.Args[0]
+ if y0.Op != OpARM64MOVDnop {
+ continue
+ }
+ x0 := y0.Args[0]
+ if x0.Op != OpARM64MOVBUloadidx {
+ continue
+ }
+ mem := x0.Args[2]
+ ptr := x0.Args[0]
+ x0_1 := x0.Args[1]
+ if x0_1.Op != OpARM64ADDconst || auxIntToInt64(x0_1.AuxInt) != 7 {
+ continue
+ }
+ idx := x0_1.Args[0]
+ y1 := o5.Args[1]
+ if y1.Op != OpARM64MOVDnop {
+ continue
+ }
+ x1 := y1.Args[0]
+ if x1.Op != OpARM64MOVBUloadidx {
+ continue
+ }
+ _ = x1.Args[2]
+ if ptr != x1.Args[0] {
+ continue
+ }
+ x1_1 := x1.Args[1]
+ if x1_1.Op != OpARM64ADDconst || auxIntToInt64(x1_1.AuxInt) != 6 || idx != x1_1.Args[0] || mem != x1.Args[2] {
+ continue
+ }
+ y2 := o4.Args[1]
+ if y2.Op != OpARM64MOVDnop {
+ continue
+ }
+ x2 := y2.Args[0]
+ if x2.Op != OpARM64MOVBUloadidx {
+ continue
+ }
+ _ = x2.Args[2]
+ if ptr != x2.Args[0] {
+ continue
+ }
+ x2_1 := x2.Args[1]
+ if x2_1.Op != OpARM64ADDconst || auxIntToInt64(x2_1.AuxInt) != 5 || idx != x2_1.Args[0] || mem != x2.Args[2] {
+ continue
+ }
+ y3 := o3.Args[1]
+ if y3.Op != OpARM64MOVDnop {
+ continue
+ }
+ x3 := y3.Args[0]
+ if x3.Op != OpARM64MOVBUloadidx {
+ continue
+ }
+ _ = x3.Args[2]
+ if ptr != x3.Args[0] {
+ continue
+ }
+ x3_1 := x3.Args[1]
+ if x3_1.Op != OpARM64ADDconst || auxIntToInt64(x3_1.AuxInt) != 4 || idx != x3_1.Args[0] || mem != x3.Args[2] {
+ continue
+ }
+ y4 := o2.Args[1]
+ if y4.Op != OpARM64MOVDnop {
+ continue
+ }
+ x4 := y4.Args[0]
+ if x4.Op != OpARM64MOVBUloadidx {
+ continue
+ }
+ _ = x4.Args[2]
+ if ptr != x4.Args[0] {
+ continue
+ }
+ x4_1 := x4.Args[1]
+ if x4_1.Op != OpARM64ADDconst || auxIntToInt64(x4_1.AuxInt) != 3 || idx != x4_1.Args[0] || mem != x4.Args[2] {
+ continue
+ }
+ y5 := o1.Args[1]
+ if y5.Op != OpARM64MOVDnop {
+ continue
+ }
+ x5 := y5.Args[0]
+ if x5.Op != OpARM64MOVBUloadidx {
+ continue
+ }
+ _ = x5.Args[2]
+ if ptr != x5.Args[0] {
+ continue
+ }
+ x5_1 := x5.Args[1]
+ if x5_1.Op != OpARM64ADDconst || auxIntToInt64(x5_1.AuxInt) != 2 || idx != x5_1.Args[0] || mem != x5.Args[2] {
+ continue
+ }
+ y6 := o0.Args[1]
+ if y6.Op != OpARM64MOVDnop {
+ continue
+ }
+ x6 := y6.Args[0]
+ if x6.Op != OpARM64MOVBUloadidx {
+ continue
+ }
+ _ = x6.Args[2]
+ if ptr != x6.Args[0] {
+ continue
+ }
+ x6_1 := x6.Args[1]
+ if x6_1.Op != OpARM64ADDconst || auxIntToInt64(x6_1.AuxInt) != 1 || idx != x6_1.Args[0] || mem != x6.Args[2] {
+ continue
+ }
+ y7 := v_1
+ if y7.Op != OpARM64MOVDnop {
+ continue
+ }
+ x7 := y7.Args[0]
+ if x7.Op != OpARM64MOVBUloadidx {
+ continue
+ }
+ _ = x7.Args[2]
+ if ptr != x7.Args[0] || idx != x7.Args[1] || mem != x7.Args[2] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, y6, y7, o0, o1, o2, o3, o4, o5, s0)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDloadidx, t)
+ v.copyOf(v0)
+ v0.AddArg3(ptr, idx, mem)
+ return true
+ }
+ break
+ }
+ // match: (OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24] y0:(MOVDnop x0:(MOVBUload [i0] {s} p mem))) y1:(MOVDnop x1:(MOVBUload [i1] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [i2] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [i3] {s} p mem)))
+ // cond: i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3) != nil && clobber(x0, x1, x2, x3, y0, y1, y2, y3, o0, o1, s0)
+ // result: @mergePoint(b,x0,x1,x2,x3) (REVW <t> (MOVWUload <t> {s} (OffPtr <p.Type> [int64(i0)] p) mem))
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ o0 := v_0
+ if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 8 {
+ continue
+ }
+ _ = o0.Args[1]
+ o1 := o0.Args[0]
+ if o1.Op != OpARM64ORshiftLL || auxIntToInt64(o1.AuxInt) != 16 {
+ continue
+ }
+ _ = o1.Args[1]
+ s0 := o1.Args[0]
+ if s0.Op != OpARM64SLLconst || auxIntToInt64(s0.AuxInt) != 24 {
+ continue
+ }
+ y0 := s0.Args[0]
+ if y0.Op != OpARM64MOVDnop {
+ continue
+ }
+ x0 := y0.Args[0]
+ if x0.Op != OpARM64MOVBUload {
+ continue
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p := x0.Args[0]
+ y1 := o1.Args[1]
+ if y1.Op != OpARM64MOVDnop {
+ continue
+ }
+ x1 := y1.Args[0]
+ if x1.Op != OpARM64MOVBUload {
+ continue
+ }
+ i1 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
+ continue
+ }
+ _ = x1.Args[1]
+ if p != x1.Args[0] || mem != x1.Args[1] {
+ continue
+ }
+ y2 := o0.Args[1]
+ if y2.Op != OpARM64MOVDnop {
+ continue
+ }
+ x2 := y2.Args[0]
+ if x2.Op != OpARM64MOVBUload {
+ continue
+ }
+ i2 := auxIntToInt32(x2.AuxInt)
+ if auxToSym(x2.Aux) != s {
+ continue
+ }
+ _ = x2.Args[1]
+ if p != x2.Args[0] || mem != x2.Args[1] {
+ continue
+ }
+ y3 := v_1
+ if y3.Op != OpARM64MOVDnop {
+ continue
+ }
+ x3 := y3.Args[0]
+ if x3.Op != OpARM64MOVBUload {
+ continue
+ }
+ i3 := auxIntToInt32(x3.AuxInt)
+ if auxToSym(x3.Aux) != s {
+ continue
+ }
+ _ = x3.Args[1]
+ if p != x3.Args[0] || mem != x3.Args[1] || !(i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3) != nil && clobber(x0, x1, x2, x3, y0, y1, y2, y3, o0, o1, s0)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1, x2, x3)
+ v0 := b.NewValue0(x3.Pos, OpARM64REVW, t)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x3.Pos, OpARM64MOVWUload, t)
+ v1.Aux = symToAux(s)
+ v2 := b.NewValue0(x3.Pos, OpOffPtr, p.Type)
+ v2.AuxInt = int64ToAuxInt(int64(i0))
+ v2.AddArg(p)
+ v1.AddArg2(v2, mem)
+ v0.AddArg(v1)
+ return true
+ }
+ break
+ }
+ // match: (OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24] y0:(MOVDnop x0:(MOVBUloadidx ptr0 idx0 mem))) y1:(MOVDnop x1:(MOVBUload [1] {s} p1:(ADD ptr1 idx1) mem))) y2:(MOVDnop x2:(MOVBUload [2] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [3] {s} p mem)))
+ // cond: s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0, x1, x2, x3, y0, y1, y2, y3, o0, o1, s0)
+ // result: @mergePoint(b,x0,x1,x2,x3) (REVW <t> (MOVWUloadidx <t> ptr0 idx0 mem))
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ o0 := v_0
+ if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 8 {
+ continue
+ }
+ _ = o0.Args[1]
+ o1 := o0.Args[0]
+ if o1.Op != OpARM64ORshiftLL || auxIntToInt64(o1.AuxInt) != 16 {
+ continue
+ }
+ _ = o1.Args[1]
+ s0 := o1.Args[0]
+ if s0.Op != OpARM64SLLconst || auxIntToInt64(s0.AuxInt) != 24 {
+ continue
+ }
+ y0 := s0.Args[0]
+ if y0.Op != OpARM64MOVDnop {
+ continue
+ }
+ x0 := y0.Args[0]
+ if x0.Op != OpARM64MOVBUloadidx {
+ continue
+ }
+ mem := x0.Args[2]
+ ptr0 := x0.Args[0]
+ idx0 := x0.Args[1]
+ y1 := o1.Args[1]
+ if y1.Op != OpARM64MOVDnop {
+ continue
+ }
+ x1 := y1.Args[0]
+ if x1.Op != OpARM64MOVBUload || auxIntToInt32(x1.AuxInt) != 1 {
+ continue
+ }
+ s := auxToSym(x1.Aux)
+ _ = x1.Args[1]
+ p1 := x1.Args[0]
+ if p1.Op != OpARM64ADD {
+ continue
+ }
+ _ = p1.Args[1]
+ p1_0 := p1.Args[0]
+ p1_1 := p1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, p1_0, p1_1 = _i1+1, p1_1, p1_0 {
+ ptr1 := p1_0
+ idx1 := p1_1
+ if mem != x1.Args[1] {
+ continue
+ }
+ y2 := o0.Args[1]
+ if y2.Op != OpARM64MOVDnop {
+ continue
+ }
+ x2 := y2.Args[0]
+ if x2.Op != OpARM64MOVBUload || auxIntToInt32(x2.AuxInt) != 2 || auxToSym(x2.Aux) != s {
+ continue
+ }
+ _ = x2.Args[1]
+ p := x2.Args[0]
+ if mem != x2.Args[1] {
+ continue
+ }
+ y3 := v_1
+ if y3.Op != OpARM64MOVDnop {
+ continue
+ }
+ x3 := y3.Args[0]
+ if x3.Op != OpARM64MOVBUload || auxIntToInt32(x3.AuxInt) != 3 || auxToSym(x3.Aux) != s {
+ continue
+ }
+ _ = x3.Args[1]
+ if p != x3.Args[0] || mem != x3.Args[1] || !(s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0, x1, x2, x3, y0, y1, y2, y3, o0, o1, s0)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1, x2, x3)
+ v0 := b.NewValue0(x3.Pos, OpARM64REVW, t)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x3.Pos, OpARM64MOVWUloadidx, t)
+ v1.AddArg3(ptr0, idx0, mem)
+ v0.AddArg(v1)
+ return true
+ }
+ }
+ break
+ }
+ // match: (OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] s0:(SLLconst [24] y0:(MOVDnop x0:(MOVBUloadidx ptr idx mem))) y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [1] idx) mem))) y2:(MOVDnop x2:(MOVBUloadidx ptr (ADDconst [2] idx) mem))) y3:(MOVDnop x3:(MOVBUloadidx ptr (ADDconst [3] idx) mem)))
+ // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3) != nil && clobber(x0, x1, x2, x3, y0, y1, y2, y3, o0, o1, s0)
+ // result: @mergePoint(b,x0,x1,x2,x3) (REVW <t> (MOVWUloadidx <t> ptr idx mem))
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ o0 := v_0
+ if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 8 {
+ continue
+ }
+ _ = o0.Args[1]
+ o1 := o0.Args[0]
+ if o1.Op != OpARM64ORshiftLL || auxIntToInt64(o1.AuxInt) != 16 {
+ continue
+ }
+ _ = o1.Args[1]
+ s0 := o1.Args[0]
+ if s0.Op != OpARM64SLLconst || auxIntToInt64(s0.AuxInt) != 24 {
+ continue
+ }
+ y0 := s0.Args[0]
+ if y0.Op != OpARM64MOVDnop {
+ continue
+ }
+ x0 := y0.Args[0]
+ if x0.Op != OpARM64MOVBUloadidx {
+ continue
+ }
+ mem := x0.Args[2]
+ ptr := x0.Args[0]
+ idx := x0.Args[1]
+ y1 := o1.Args[1]
+ if y1.Op != OpARM64MOVDnop {
+ continue
+ }
+ x1 := y1.Args[0]
+ if x1.Op != OpARM64MOVBUloadidx {
+ continue
+ }
+ _ = x1.Args[2]
+ if ptr != x1.Args[0] {
+ continue
+ }
+ x1_1 := x1.Args[1]
+ if x1_1.Op != OpARM64ADDconst || auxIntToInt64(x1_1.AuxInt) != 1 || idx != x1_1.Args[0] || mem != x1.Args[2] {
+ continue
+ }
+ y2 := o0.Args[1]
+ if y2.Op != OpARM64MOVDnop {
+ continue
+ }
+ x2 := y2.Args[0]
+ if x2.Op != OpARM64MOVBUloadidx {
+ continue
+ }
+ _ = x2.Args[2]
+ if ptr != x2.Args[0] {
+ continue
+ }
+ x2_1 := x2.Args[1]
+ if x2_1.Op != OpARM64ADDconst || auxIntToInt64(x2_1.AuxInt) != 2 || idx != x2_1.Args[0] || mem != x2.Args[2] {
+ continue
+ }
+ y3 := v_1
+ if y3.Op != OpARM64MOVDnop {
+ continue
+ }
+ x3 := y3.Args[0]
+ if x3.Op != OpARM64MOVBUloadidx {
+ continue
+ }
+ _ = x3.Args[2]
+ if ptr != x3.Args[0] {
+ continue
+ }
+ x3_1 := x3.Args[1]
+ if x3_1.Op != OpARM64ADDconst || auxIntToInt64(x3_1.AuxInt) != 3 || idx != x3_1.Args[0] || mem != x3.Args[2] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3) != nil && clobber(x0, x1, x2, x3, y0, y1, y2, y3, o0, o1, s0)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1, x2, x3)
+ v0 := b.NewValue0(v.Pos, OpARM64REVW, t)
+ v.copyOf(v0)
+ v1 := b.NewValue0(v.Pos, OpARM64MOVWUloadidx, t)
+ v1.AddArg3(ptr, idx, mem)
+ v0.AddArg(v1)
+ return true
+ }
+ break
+ }
+ // match: (OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56] y0:(MOVDnop x0:(MOVBUload [i0] {s} p mem))) y1:(MOVDnop x1:(MOVBUload [i1] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [i2] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [i3] {s} p mem))) y4:(MOVDnop x4:(MOVBUload [i4] {s} p mem))) y5:(MOVDnop x5:(MOVBUload [i5] {s} p mem))) y6:(MOVDnop x6:(MOVBUload [i6] {s} p mem))) y7:(MOVDnop x7:(MOVBUload [i7] {s} p mem)))
+ // cond: i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil && clobber(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, y6, y7, o0, o1, o2, o3, o4, o5, s0)
+ // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (REV <t> (MOVDload <t> {s} (OffPtr <p.Type> [int64(i0)] p) mem))
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ o0 := v_0
+ if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 8 {
+ continue
+ }
+ _ = o0.Args[1]
+ o1 := o0.Args[0]
+ if o1.Op != OpARM64ORshiftLL || auxIntToInt64(o1.AuxInt) != 16 {
+ continue
+ }
+ _ = o1.Args[1]
+ o2 := o1.Args[0]
+ if o2.Op != OpARM64ORshiftLL || auxIntToInt64(o2.AuxInt) != 24 {
+ continue
+ }
+ _ = o2.Args[1]
+ o3 := o2.Args[0]
+ if o3.Op != OpARM64ORshiftLL || auxIntToInt64(o3.AuxInt) != 32 {
+ continue
+ }
+ _ = o3.Args[1]
+ o4 := o3.Args[0]
+ if o4.Op != OpARM64ORshiftLL || auxIntToInt64(o4.AuxInt) != 40 {
+ continue
+ }
+ _ = o4.Args[1]
+ o5 := o4.Args[0]
+ if o5.Op != OpARM64ORshiftLL || auxIntToInt64(o5.AuxInt) != 48 {
+ continue
+ }
+ _ = o5.Args[1]
+ s0 := o5.Args[0]
+ if s0.Op != OpARM64SLLconst || auxIntToInt64(s0.AuxInt) != 56 {
+ continue
+ }
+ y0 := s0.Args[0]
+ if y0.Op != OpARM64MOVDnop {
+ continue
+ }
+ x0 := y0.Args[0]
+ if x0.Op != OpARM64MOVBUload {
+ continue
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p := x0.Args[0]
+ y1 := o5.Args[1]
+ if y1.Op != OpARM64MOVDnop {
+ continue
+ }
+ x1 := y1.Args[0]
+ if x1.Op != OpARM64MOVBUload {
+ continue
+ }
+ i1 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
+ continue
+ }
+ _ = x1.Args[1]
+ if p != x1.Args[0] || mem != x1.Args[1] {
+ continue
+ }
+ y2 := o4.Args[1]
+ if y2.Op != OpARM64MOVDnop {
+ continue
+ }
+ x2 := y2.Args[0]
+ if x2.Op != OpARM64MOVBUload {
+ continue
+ }
+ i2 := auxIntToInt32(x2.AuxInt)
+ if auxToSym(x2.Aux) != s {
+ continue
+ }
+ _ = x2.Args[1]
+ if p != x2.Args[0] || mem != x2.Args[1] {
+ continue
+ }
+ y3 := o3.Args[1]
+ if y3.Op != OpARM64MOVDnop {
+ continue
+ }
+ x3 := y3.Args[0]
+ if x3.Op != OpARM64MOVBUload {
+ continue
+ }
+ i3 := auxIntToInt32(x3.AuxInt)
+ if auxToSym(x3.Aux) != s {
+ continue
+ }
+ _ = x3.Args[1]
+ if p != x3.Args[0] || mem != x3.Args[1] {
+ continue
+ }
+ y4 := o2.Args[1]
+ if y4.Op != OpARM64MOVDnop {
+ continue
+ }
+ x4 := y4.Args[0]
+ if x4.Op != OpARM64MOVBUload {
+ continue
+ }
+ i4 := auxIntToInt32(x4.AuxInt)
+ if auxToSym(x4.Aux) != s {
+ continue
+ }
+ _ = x4.Args[1]
+ if p != x4.Args[0] || mem != x4.Args[1] {
+ continue
+ }
+ y5 := o1.Args[1]
+ if y5.Op != OpARM64MOVDnop {
+ continue
+ }
+ x5 := y5.Args[0]
+ if x5.Op != OpARM64MOVBUload {
+ continue
+ }
+ i5 := auxIntToInt32(x5.AuxInt)
+ if auxToSym(x5.Aux) != s {
+ continue
+ }
+ _ = x5.Args[1]
+ if p != x5.Args[0] || mem != x5.Args[1] {
+ continue
+ }
+ y6 := o0.Args[1]
+ if y6.Op != OpARM64MOVDnop {
+ continue
+ }
+ x6 := y6.Args[0]
+ if x6.Op != OpARM64MOVBUload {
+ continue
+ }
+ i6 := auxIntToInt32(x6.AuxInt)
+ if auxToSym(x6.Aux) != s {
+ continue
+ }
+ _ = x6.Args[1]
+ if p != x6.Args[0] || mem != x6.Args[1] {
+ continue
+ }
+ y7 := v_1
+ if y7.Op != OpARM64MOVDnop {
+ continue
+ }
+ x7 := y7.Args[0]
+ if x7.Op != OpARM64MOVBUload {
+ continue
+ }
+ i7 := auxIntToInt32(x7.AuxInt)
+ if auxToSym(x7.Aux) != s {
+ continue
+ }
+ _ = x7.Args[1]
+ if p != x7.Args[0] || mem != x7.Args[1] || !(i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, y6, y7, o0, o1, o2, o3, o4, o5, s0)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7)
+ v0 := b.NewValue0(x7.Pos, OpARM64REV, t)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x7.Pos, OpARM64MOVDload, t)
+ v1.Aux = symToAux(s)
+ v2 := b.NewValue0(x7.Pos, OpOffPtr, p.Type)
+ v2.AuxInt = int64ToAuxInt(int64(i0))
+ v2.AddArg(p)
+ v1.AddArg2(v2, mem)
+ v0.AddArg(v1)
+ return true
+ }
+ break
+ }
+ // match: (OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56] y0:(MOVDnop x0:(MOVBUloadidx ptr0 idx0 mem))) y1:(MOVDnop x1:(MOVBUload [1] {s} p1:(ADD ptr1 idx1) mem))) y2:(MOVDnop x2:(MOVBUload [2] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [3] {s} p mem))) y4:(MOVDnop x4:(MOVBUload [4] {s} p mem))) y5:(MOVDnop x5:(MOVBUload [5] {s} p mem))) y6:(MOVDnop x6:(MOVBUload [6] {s} p mem))) y7:(MOVDnop x7:(MOVBUload [7] {s} p mem)))
+ // cond: s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, y6, y7, o0, o1, o2, o3, o4, o5, s0)
+ // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (REV <t> (MOVDloadidx <t> ptr0 idx0 mem))
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ o0 := v_0
+ if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 8 {
+ continue
+ }
+ _ = o0.Args[1]
+ o1 := o0.Args[0]
+ if o1.Op != OpARM64ORshiftLL || auxIntToInt64(o1.AuxInt) != 16 {
+ continue
+ }
+ _ = o1.Args[1]
+ o2 := o1.Args[0]
+ if o2.Op != OpARM64ORshiftLL || auxIntToInt64(o2.AuxInt) != 24 {
+ continue
+ }
+ _ = o2.Args[1]
+ o3 := o2.Args[0]
+ if o3.Op != OpARM64ORshiftLL || auxIntToInt64(o3.AuxInt) != 32 {
+ continue
+ }
+ _ = o3.Args[1]
+ o4 := o3.Args[0]
+ if o4.Op != OpARM64ORshiftLL || auxIntToInt64(o4.AuxInt) != 40 {
+ continue
+ }
+ _ = o4.Args[1]
+ o5 := o4.Args[0]
+ if o5.Op != OpARM64ORshiftLL || auxIntToInt64(o5.AuxInt) != 48 {
+ continue
+ }
+ _ = o5.Args[1]
+ s0 := o5.Args[0]
+ if s0.Op != OpARM64SLLconst || auxIntToInt64(s0.AuxInt) != 56 {
+ continue
+ }
+ y0 := s0.Args[0]
+ if y0.Op != OpARM64MOVDnop {
+ continue
+ }
+ x0 := y0.Args[0]
+ if x0.Op != OpARM64MOVBUloadidx {
+ continue
+ }
+ mem := x0.Args[2]
+ ptr0 := x0.Args[0]
+ idx0 := x0.Args[1]
+ y1 := o5.Args[1]
+ if y1.Op != OpARM64MOVDnop {
+ continue
+ }
+ x1 := y1.Args[0]
+ if x1.Op != OpARM64MOVBUload || auxIntToInt32(x1.AuxInt) != 1 {
+ continue
+ }
+ s := auxToSym(x1.Aux)
+ _ = x1.Args[1]
+ p1 := x1.Args[0]
+ if p1.Op != OpARM64ADD {
+ continue
+ }
+ _ = p1.Args[1]
+ p1_0 := p1.Args[0]
+ p1_1 := p1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, p1_0, p1_1 = _i1+1, p1_1, p1_0 {
+ ptr1 := p1_0
+ idx1 := p1_1
+ if mem != x1.Args[1] {
+ continue
+ }
+ y2 := o4.Args[1]
+ if y2.Op != OpARM64MOVDnop {
+ continue
+ }
+ x2 := y2.Args[0]
+ if x2.Op != OpARM64MOVBUload || auxIntToInt32(x2.AuxInt) != 2 || auxToSym(x2.Aux) != s {
+ continue
+ }
+ _ = x2.Args[1]
+ p := x2.Args[0]
+ if mem != x2.Args[1] {
+ continue
+ }
+ y3 := o3.Args[1]
+ if y3.Op != OpARM64MOVDnop {
+ continue
+ }
+ x3 := y3.Args[0]
+ if x3.Op != OpARM64MOVBUload || auxIntToInt32(x3.AuxInt) != 3 || auxToSym(x3.Aux) != s {
+ continue
+ }
+ _ = x3.Args[1]
+ if p != x3.Args[0] || mem != x3.Args[1] {
+ continue
+ }
+ y4 := o2.Args[1]
+ if y4.Op != OpARM64MOVDnop {
+ continue
+ }
+ x4 := y4.Args[0]
+ if x4.Op != OpARM64MOVBUload || auxIntToInt32(x4.AuxInt) != 4 || auxToSym(x4.Aux) != s {
+ continue
+ }
+ _ = x4.Args[1]
+ if p != x4.Args[0] || mem != x4.Args[1] {
+ continue
+ }
+ y5 := o1.Args[1]
+ if y5.Op != OpARM64MOVDnop {
+ continue
+ }
+ x5 := y5.Args[0]
+ if x5.Op != OpARM64MOVBUload || auxIntToInt32(x5.AuxInt) != 5 || auxToSym(x5.Aux) != s {
+ continue
+ }
+ _ = x5.Args[1]
+ if p != x5.Args[0] || mem != x5.Args[1] {
+ continue
+ }
+ y6 := o0.Args[1]
+ if y6.Op != OpARM64MOVDnop {
+ continue
+ }
+ x6 := y6.Args[0]
+ if x6.Op != OpARM64MOVBUload || auxIntToInt32(x6.AuxInt) != 6 || auxToSym(x6.Aux) != s {
+ continue
+ }
+ _ = x6.Args[1]
+ if p != x6.Args[0] || mem != x6.Args[1] {
+ continue
+ }
+ y7 := v_1
+ if y7.Op != OpARM64MOVDnop {
+ continue
+ }
+ x7 := y7.Args[0]
+ if x7.Op != OpARM64MOVBUload || auxIntToInt32(x7.AuxInt) != 7 || auxToSym(x7.Aux) != s {
+ continue
+ }
+ _ = x7.Args[1]
+ if p != x7.Args[0] || mem != x7.Args[1] || !(s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, y6, y7, o0, o1, o2, o3, o4, o5, s0)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7)
+ v0 := b.NewValue0(x7.Pos, OpARM64REV, t)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x7.Pos, OpARM64MOVDloadidx, t)
+ v1.AddArg3(ptr0, idx0, mem)
+ v0.AddArg(v1)
+ return true
+ }
+ }
+ break
+ }
+ // match: (OR <t> o0:(ORshiftLL [8] o1:(ORshiftLL [16] o2:(ORshiftLL [24] o3:(ORshiftLL [32] o4:(ORshiftLL [40] o5:(ORshiftLL [48] s0:(SLLconst [56] y0:(MOVDnop x0:(MOVBUloadidx ptr idx mem))) y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [1] idx) mem))) y2:(MOVDnop x2:(MOVBUloadidx ptr (ADDconst [2] idx) mem))) y3:(MOVDnop x3:(MOVBUloadidx ptr (ADDconst [3] idx) mem))) y4:(MOVDnop x4:(MOVBUloadidx ptr (ADDconst [4] idx) mem))) y5:(MOVDnop x5:(MOVBUloadidx ptr (ADDconst [5] idx) mem))) y6:(MOVDnop x6:(MOVBUloadidx ptr (ADDconst [6] idx) mem))) y7:(MOVDnop x7:(MOVBUloadidx ptr (ADDconst [7] idx) mem)))
+ // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) != nil && clobber(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, y6, y7, o0, o1, o2, o3, o4, o5, s0)
+ // result: @mergePoint(b,x0,x1,x2,x3,x4,x5,x6,x7) (REV <t> (MOVDloadidx <t> ptr idx mem))
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ o0 := v_0
+ if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 8 {
+ continue
+ }
+ _ = o0.Args[1]
+ o1 := o0.Args[0]
+ if o1.Op != OpARM64ORshiftLL || auxIntToInt64(o1.AuxInt) != 16 {
+ continue
+ }
+ _ = o1.Args[1]
+ o2 := o1.Args[0]
+ if o2.Op != OpARM64ORshiftLL || auxIntToInt64(o2.AuxInt) != 24 {
+ continue
+ }
+ _ = o2.Args[1]
+ o3 := o2.Args[0]
+ if o3.Op != OpARM64ORshiftLL || auxIntToInt64(o3.AuxInt) != 32 {
+ continue
+ }
+ _ = o3.Args[1]
+ o4 := o3.Args[0]
+ if o4.Op != OpARM64ORshiftLL || auxIntToInt64(o4.AuxInt) != 40 {
+ continue
+ }
+ _ = o4.Args[1]
+ o5 := o4.Args[0]
+ if o5.Op != OpARM64ORshiftLL || auxIntToInt64(o5.AuxInt) != 48 {
+ continue
+ }
+ _ = o5.Args[1]
+ s0 := o5.Args[0]
+ if s0.Op != OpARM64SLLconst || auxIntToInt64(s0.AuxInt) != 56 {
+ continue
+ }
+ y0 := s0.Args[0]
+ if y0.Op != OpARM64MOVDnop {
+ continue
+ }
+ x0 := y0.Args[0]
+ if x0.Op != OpARM64MOVBUloadidx {
+ continue
+ }
+ mem := x0.Args[2]
+ ptr := x0.Args[0]
+ idx := x0.Args[1]
+ y1 := o5.Args[1]
+ if y1.Op != OpARM64MOVDnop {
+ continue
+ }
+ x1 := y1.Args[0]
+ if x1.Op != OpARM64MOVBUloadidx {
+ continue
+ }
+ _ = x1.Args[2]
+ if ptr != x1.Args[0] {
+ continue
+ }
+ x1_1 := x1.Args[1]
+ if x1_1.Op != OpARM64ADDconst || auxIntToInt64(x1_1.AuxInt) != 1 || idx != x1_1.Args[0] || mem != x1.Args[2] {
+ continue
+ }
+ y2 := o4.Args[1]
+ if y2.Op != OpARM64MOVDnop {
+ continue
+ }
+ x2 := y2.Args[0]
+ if x2.Op != OpARM64MOVBUloadidx {
+ continue
+ }
+ _ = x2.Args[2]
+ if ptr != x2.Args[0] {
+ continue
+ }
+ x2_1 := x2.Args[1]
+ if x2_1.Op != OpARM64ADDconst || auxIntToInt64(x2_1.AuxInt) != 2 || idx != x2_1.Args[0] || mem != x2.Args[2] {
+ continue
+ }
+ y3 := o3.Args[1]
+ if y3.Op != OpARM64MOVDnop {
+ continue
+ }
+ x3 := y3.Args[0]
+ if x3.Op != OpARM64MOVBUloadidx {
+ continue
+ }
+ _ = x3.Args[2]
+ if ptr != x3.Args[0] {
+ continue
+ }
+ x3_1 := x3.Args[1]
+ if x3_1.Op != OpARM64ADDconst || auxIntToInt64(x3_1.AuxInt) != 3 || idx != x3_1.Args[0] || mem != x3.Args[2] {
+ continue
+ }
+ y4 := o2.Args[1]
+ if y4.Op != OpARM64MOVDnop {
+ continue
+ }
+ x4 := y4.Args[0]
+ if x4.Op != OpARM64MOVBUloadidx {
+ continue
+ }
+ _ = x4.Args[2]
+ if ptr != x4.Args[0] {
+ continue
+ }
+ x4_1 := x4.Args[1]
+ if x4_1.Op != OpARM64ADDconst || auxIntToInt64(x4_1.AuxInt) != 4 || idx != x4_1.Args[0] || mem != x4.Args[2] {
+ continue
+ }
+ y5 := o1.Args[1]
+ if y5.Op != OpARM64MOVDnop {
+ continue
+ }
+ x5 := y5.Args[0]
+ if x5.Op != OpARM64MOVBUloadidx {
+ continue
+ }
+ _ = x5.Args[2]
+ if ptr != x5.Args[0] {
+ continue
+ }
+ x5_1 := x5.Args[1]
+ if x5_1.Op != OpARM64ADDconst || auxIntToInt64(x5_1.AuxInt) != 5 || idx != x5_1.Args[0] || mem != x5.Args[2] {
+ continue
+ }
+ y6 := o0.Args[1]
+ if y6.Op != OpARM64MOVDnop {
+ continue
+ }
+ x6 := y6.Args[0]
+ if x6.Op != OpARM64MOVBUloadidx {
+ continue
+ }
+ _ = x6.Args[2]
+ if ptr != x6.Args[0] {
+ continue
+ }
+ x6_1 := x6.Args[1]
+ if x6_1.Op != OpARM64ADDconst || auxIntToInt64(x6_1.AuxInt) != 6 || idx != x6_1.Args[0] || mem != x6.Args[2] {
+ continue
+ }
+ y7 := v_1
+ if y7.Op != OpARM64MOVDnop {
+ continue
+ }
+ x7 := y7.Args[0]
+ if x7.Op != OpARM64MOVBUloadidx {
+ continue
+ }
+ _ = x7.Args[2]
+ if ptr != x7.Args[0] {
+ continue
+ }
+ x7_1 := x7.Args[1]
+ if x7_1.Op != OpARM64ADDconst || auxIntToInt64(x7_1.AuxInt) != 7 || idx != x7_1.Args[0] || mem != x7.Args[2] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && y5.Uses == 1 && y6.Uses == 1 && y7.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7) != nil && clobber(x0, x1, x2, x3, x4, x5, x6, x7, y0, y1, y2, y3, y4, y5, y6, y7, o0, o1, o2, o3, o4, o5, s0)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1, x2, x3, x4, x5, x6, x7)
+ v0 := b.NewValue0(v.Pos, OpARM64REV, t)
+ v.copyOf(v0)
+ v1 := b.NewValue0(v.Pos, OpARM64MOVDloadidx, t)
+ v1.AddArg3(ptr, idx, mem)
+ v0.AddArg(v1)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64ORN(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ORN x (MOVDconst [c]))
+ // result: (ORconst [^c] x)
+ for {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64ORconst)
+ v.AuxInt = int64ToAuxInt(^c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ORN x x)
+ // result: (MOVDconst [-1])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(-1)
+ return true
+ }
+ // match: (ORN x0 x1:(SLLconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (ORNshiftLL x0 y [c])
+ for {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64SLLconst {
+ break
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ break
+ }
+ v.reset(OpARM64ORNshiftLL)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ // match: (ORN x0 x1:(SRLconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (ORNshiftRL x0 y [c])
+ for {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64SRLconst {
+ break
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ break
+ }
+ v.reset(OpARM64ORNshiftRL)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ // match: (ORN x0 x1:(SRAconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (ORNshiftRA x0 y [c])
+ for {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64SRAconst {
+ break
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ break
+ }
+ v.reset(OpARM64ORNshiftRA)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ // match: (ORN x0 x1:(RORconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (ORNshiftRO x0 y [c])
+ for {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64RORconst {
+ break
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ break
+ }
+ v.reset(OpARM64ORNshiftRO)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64ORNshiftLL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ORNshiftLL x (MOVDconst [c]) [d])
+ // result: (ORconst x [^int64(uint64(c)<<uint64(d))])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64ORconst)
+ v.AuxInt = int64ToAuxInt(^int64(uint64(c) << uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ORNshiftLL (SLLconst x [c]) x [c])
+ // result: (MOVDconst [-1])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(-1)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64ORNshiftRA(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ORNshiftRA x (MOVDconst [c]) [d])
+ // result: (ORconst x [^(c>>uint64(d))])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64ORconst)
+ v.AuxInt = int64ToAuxInt(^(c >> uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ORNshiftRA (SRAconst x [c]) x [c])
+ // result: (MOVDconst [-1])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SRAconst || auxIntToInt64(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(-1)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64ORNshiftRL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ORNshiftRL x (MOVDconst [c]) [d])
+ // result: (ORconst x [^int64(uint64(c)>>uint64(d))])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64ORconst)
+ v.AuxInt = int64ToAuxInt(^int64(uint64(c) >> uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ORNshiftRL (SRLconst x [c]) x [c])
+ // result: (MOVDconst [-1])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(-1)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64ORNshiftRO(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ORNshiftRO x (MOVDconst [c]) [d])
+ // result: (ORconst x [^rotateRight64(c, d)])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64ORconst)
+ v.AuxInt = int64ToAuxInt(^rotateRight64(c, d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ORNshiftRO (RORconst x [c]) x [c])
+ // result: (MOVDconst [-1])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64RORconst || auxIntToInt64(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(-1)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64ORconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ORconst [0] x)
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (ORconst [-1] _)
+ // result: (MOVDconst [-1])
+ for {
+ if auxIntToInt64(v.AuxInt) != -1 {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(-1)
+ return true
+ }
+ // match: (ORconst [c] (MOVDconst [d]))
+ // result: (MOVDconst [c|d])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(c | d)
+ return true
+ }
+ // match: (ORconst [c] (ORconst [d] x))
+ // result: (ORconst [c|d] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64ORconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpARM64ORconst)
+ v.AuxInt = int64ToAuxInt(c | d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ORconst [c1] (ANDconst [c2] x))
+ // cond: c2|c1 == ^0
+ // result: (ORconst [c1] x)
+ for {
+ c1 := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64ANDconst {
+ break
+ }
+ c2 := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(c2|c1 == ^0) {
+ break
+ }
+ v.reset(OpARM64ORconst)
+ v.AuxInt = int64ToAuxInt(c1)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64ORshiftLL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (ORshiftLL (MOVDconst [c]) x [d])
+ // result: (ORconst [c] (SLLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARM64ORconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ORshiftLL x (MOVDconst [c]) [d])
+ // result: (ORconst x [int64(uint64(c)<<uint64(d))])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64ORconst)
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) << uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ORshiftLL y:(SLLconst x [c]) x [c])
+ // result: y
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ y := v_0
+ if y.Op != OpARM64SLLconst || auxIntToInt64(y.AuxInt) != c {
+ break
+ }
+ x := y.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: ( ORshiftLL [c] (SRLconst x [64-c]) x)
+ // result: (RORconst [64-c] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != 64-c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARM64RORconst)
+ v.AuxInt = int64ToAuxInt(64 - c)
+ v.AddArg(x)
+ return true
+ }
+ // match: ( ORshiftLL <t> [c] (UBFX [bfc] x) x)
+ // cond: c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)
+ // result: (RORWconst [32-c] x)
+ for {
+ t := v.Type
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64UBFX {
+ break
+ }
+ bfc := auxIntToArm64BitField(v_0.AuxInt)
+ x := v_0.Args[0]
+ if x != v_1 || !(c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)) {
+ break
+ }
+ v.reset(OpARM64RORWconst)
+ v.AuxInt = int64ToAuxInt(32 - c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ORshiftLL <typ.UInt16> [8] (UBFX <typ.UInt16> [armBFAuxInt(8, 8)] x) x)
+ // result: (REV16W x)
+ for {
+ if v.Type != typ.UInt16 || auxIntToInt64(v.AuxInt) != 8 || v_0.Op != OpARM64UBFX || v_0.Type != typ.UInt16 || auxIntToArm64BitField(v_0.AuxInt) != armBFAuxInt(8, 8) {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARM64REV16W)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ORshiftLL [8] (UBFX [armBFAuxInt(8, 24)] (ANDconst [c1] x)) (ANDconst [c2] x))
+ // cond: uint32(c1) == 0xff00ff00 && uint32(c2) == 0x00ff00ff
+ // result: (REV16W x)
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 || v_0.Op != OpARM64UBFX || auxIntToArm64BitField(v_0.AuxInt) != armBFAuxInt(8, 24) {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpARM64ANDconst {
+ break
+ }
+ c1 := auxIntToInt64(v_0_0.AuxInt)
+ x := v_0_0.Args[0]
+ if v_1.Op != OpARM64ANDconst {
+ break
+ }
+ c2 := auxIntToInt64(v_1.AuxInt)
+ if x != v_1.Args[0] || !(uint32(c1) == 0xff00ff00 && uint32(c2) == 0x00ff00ff) {
+ break
+ }
+ v.reset(OpARM64REV16W)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ORshiftLL [8] (SRLconst [8] (ANDconst [c1] x)) (ANDconst [c2] x))
+ // cond: (uint64(c1) == 0xff00ff00ff00ff00 && uint64(c2) == 0x00ff00ff00ff00ff)
+ // result: (REV16 x)
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 || v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != 8 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpARM64ANDconst {
+ break
+ }
+ c1 := auxIntToInt64(v_0_0.AuxInt)
+ x := v_0_0.Args[0]
+ if v_1.Op != OpARM64ANDconst {
+ break
+ }
+ c2 := auxIntToInt64(v_1.AuxInt)
+ if x != v_1.Args[0] || !(uint64(c1) == 0xff00ff00ff00ff00 && uint64(c2) == 0x00ff00ff00ff00ff) {
+ break
+ }
+ v.reset(OpARM64REV16)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ORshiftLL [8] (SRLconst [8] (ANDconst [c1] x)) (ANDconst [c2] x))
+ // cond: (uint64(c1) == 0xff00ff00 && uint64(c2) == 0x00ff00ff)
+ // result: (REV16 (ANDconst <x.Type> [0xffffffff] x))
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 || v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != 8 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpARM64ANDconst {
+ break
+ }
+ c1 := auxIntToInt64(v_0_0.AuxInt)
+ x := v_0_0.Args[0]
+ if v_1.Op != OpARM64ANDconst {
+ break
+ }
+ c2 := auxIntToInt64(v_1.AuxInt)
+ if x != v_1.Args[0] || !(uint64(c1) == 0xff00ff00 && uint64(c2) == 0x00ff00ff) {
+ break
+ }
+ v.reset(OpARM64REV16)
+ v0 := b.NewValue0(v.Pos, OpARM64ANDconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(0xffffffff)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: ( ORshiftLL [c] (SRLconst x [64-c]) x2)
+ // result: (EXTRconst [64-c] x2 x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != 64-c {
+ break
+ }
+ x := v_0.Args[0]
+ x2 := v_1
+ v.reset(OpARM64EXTRconst)
+ v.AuxInt = int64ToAuxInt(64 - c)
+ v.AddArg2(x2, x)
+ return true
+ }
+ // match: ( ORshiftLL <t> [c] (UBFX [bfc] x) x2)
+ // cond: c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)
+ // result: (EXTRWconst [32-c] x2 x)
+ for {
+ t := v.Type
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64UBFX {
+ break
+ }
+ bfc := auxIntToArm64BitField(v_0.AuxInt)
+ x := v_0.Args[0]
+ x2 := v_1
+ if !(c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)) {
+ break
+ }
+ v.reset(OpARM64EXTRWconst)
+ v.AuxInt = int64ToAuxInt(32 - c)
+ v.AddArg2(x2, x)
+ return true
+ }
+ // match: (ORshiftLL [sc] (UBFX [bfc] x) (SRLconst [sc] y))
+ // cond: sc == bfc.getARM64BFwidth()
+ // result: (BFXIL [bfc] y x)
+ for {
+ sc := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64UBFX {
+ break
+ }
+ bfc := auxIntToArm64BitField(v_0.AuxInt)
+ x := v_0.Args[0]
+ if v_1.Op != OpARM64SRLconst || auxIntToInt64(v_1.AuxInt) != sc {
+ break
+ }
+ y := v_1.Args[0]
+ if !(sc == bfc.getARM64BFwidth()) {
+ break
+ }
+ v.reset(OpARM64BFXIL)
+ v.AuxInt = arm64BitFieldToAuxInt(bfc)
+ v.AddArg2(y, x)
+ return true
+ }
+ // match: (ORshiftLL <t> [8] y0:(MOVDnop x0:(MOVBUload [i0] {s} p mem)) y1:(MOVDnop x1:(MOVBUload [i1] {s} p mem)))
+ // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, y0, y1)
+ // result: @mergePoint(b,x0,x1) (MOVHUload <t> {s} (OffPtr <p.Type> [int64(i0)] p) mem)
+ for {
+ t := v.Type
+ if auxIntToInt64(v.AuxInt) != 8 {
+ break
+ }
+ y0 := v_0
+ if y0.Op != OpARM64MOVDnop {
+ break
+ }
+ x0 := y0.Args[0]
+ if x0.Op != OpARM64MOVBUload {
+ break
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p := x0.Args[0]
+ y1 := v_1
+ if y1.Op != OpARM64MOVDnop {
+ break
+ }
+ x1 := y1.Args[0]
+ if x1.Op != OpARM64MOVBUload {
+ break
+ }
+ i1 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
+ break
+ }
+ _ = x1.Args[1]
+ if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, y0, y1)) {
+ break
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(x1.Pos, OpARM64MOVHUload, t)
+ v.copyOf(v0)
+ v0.Aux = symToAux(s)
+ v1 := b.NewValue0(x1.Pos, OpOffPtr, p.Type)
+ v1.AuxInt = int64ToAuxInt(int64(i0))
+ v1.AddArg(p)
+ v0.AddArg2(v1, mem)
+ return true
+ }
+ // match: (ORshiftLL <t> [8] y0:(MOVDnop x0:(MOVBUloadidx ptr0 idx0 mem)) y1:(MOVDnop x1:(MOVBUload [1] {s} p1:(ADD ptr1 idx1) mem)))
+ // cond: s == nil && x0.Uses == 1 && x1.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && mergePoint(b,x0,x1) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x0, x1, y0, y1)
+ // result: @mergePoint(b,x0,x1) (MOVHUloadidx <t> ptr0 idx0 mem)
+ for {
+ t := v.Type
+ if auxIntToInt64(v.AuxInt) != 8 {
+ break
+ }
+ y0 := v_0
+ if y0.Op != OpARM64MOVDnop {
+ break
+ }
+ x0 := y0.Args[0]
+ if x0.Op != OpARM64MOVBUloadidx {
+ break
+ }
+ mem := x0.Args[2]
+ ptr0 := x0.Args[0]
+ idx0 := x0.Args[1]
+ y1 := v_1
+ if y1.Op != OpARM64MOVDnop {
+ break
+ }
+ x1 := y1.Args[0]
+ if x1.Op != OpARM64MOVBUload || auxIntToInt32(x1.AuxInt) != 1 {
+ break
+ }
+ s := auxToSym(x1.Aux)
+ _ = x1.Args[1]
+ p1 := x1.Args[0]
+ if p1.Op != OpARM64ADD {
+ break
+ }
+ _ = p1.Args[1]
+ p1_0 := p1.Args[0]
+ p1_1 := p1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, p1_0, p1_1 = _i0+1, p1_1, p1_0 {
+ ptr1 := p1_0
+ idx1 := p1_1
+ if mem != x1.Args[1] || !(s == nil && x0.Uses == 1 && x1.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && mergePoint(b, x0, x1) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x0, x1, y0, y1)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(x1.Pos, OpARM64MOVHUloadidx, t)
+ v.copyOf(v0)
+ v0.AddArg3(ptr0, idx0, mem)
+ return true
+ }
+ break
+ }
+ // match: (ORshiftLL <t> [8] y0:(MOVDnop x0:(MOVBUloadidx ptr idx mem)) y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [1] idx) mem)))
+ // cond: x0.Uses == 1 && x1.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, y0, y1)
+ // result: @mergePoint(b,x0,x1) (MOVHUloadidx <t> ptr idx mem)
+ for {
+ t := v.Type
+ if auxIntToInt64(v.AuxInt) != 8 {
+ break
+ }
+ y0 := v_0
+ if y0.Op != OpARM64MOVDnop {
+ break
+ }
+ x0 := y0.Args[0]
+ if x0.Op != OpARM64MOVBUloadidx {
+ break
+ }
+ mem := x0.Args[2]
+ ptr := x0.Args[0]
+ idx := x0.Args[1]
+ y1 := v_1
+ if y1.Op != OpARM64MOVDnop {
+ break
+ }
+ x1 := y1.Args[0]
+ if x1.Op != OpARM64MOVBUloadidx {
+ break
+ }
+ _ = x1.Args[2]
+ if ptr != x1.Args[0] {
+ break
+ }
+ x1_1 := x1.Args[1]
+ if x1_1.Op != OpARM64ADDconst || auxIntToInt64(x1_1.AuxInt) != 1 || idx != x1_1.Args[0] || mem != x1.Args[2] || !(x0.Uses == 1 && x1.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, y0, y1)) {
+ break
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVHUloadidx, t)
+ v.copyOf(v0)
+ v0.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (ORshiftLL <t> [24] o0:(ORshiftLL [16] x0:(MOVHUload [i0] {s} p mem) y1:(MOVDnop x1:(MOVBUload [i2] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [i3] {s} p mem)))
+ // cond: i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0, x1, x2, y1, y2, o0)
+ // result: @mergePoint(b,x0,x1,x2) (MOVWUload <t> {s} (OffPtr <p.Type> [int64(i0)] p) mem)
+ for {
+ t := v.Type
+ if auxIntToInt64(v.AuxInt) != 24 {
+ break
+ }
+ o0 := v_0
+ if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 16 {
+ break
+ }
+ _ = o0.Args[1]
+ x0 := o0.Args[0]
+ if x0.Op != OpARM64MOVHUload {
+ break
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p := x0.Args[0]
+ y1 := o0.Args[1]
+ if y1.Op != OpARM64MOVDnop {
+ break
+ }
+ x1 := y1.Args[0]
+ if x1.Op != OpARM64MOVBUload {
+ break
+ }
+ i2 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
+ break
+ }
+ _ = x1.Args[1]
+ if p != x1.Args[0] || mem != x1.Args[1] {
+ break
+ }
+ y2 := v_1
+ if y2.Op != OpARM64MOVDnop {
+ break
+ }
+ x2 := y2.Args[0]
+ if x2.Op != OpARM64MOVBUload {
+ break
+ }
+ i3 := auxIntToInt32(x2.AuxInt)
+ if auxToSym(x2.Aux) != s {
+ break
+ }
+ _ = x2.Args[1]
+ if p != x2.Args[0] || mem != x2.Args[1] || !(i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0, x1, x2, y1, y2, o0)) {
+ break
+ }
+ b = mergePoint(b, x0, x1, x2)
+ v0 := b.NewValue0(x2.Pos, OpARM64MOVWUload, t)
+ v.copyOf(v0)
+ v0.Aux = symToAux(s)
+ v1 := b.NewValue0(x2.Pos, OpOffPtr, p.Type)
+ v1.AuxInt = int64ToAuxInt(int64(i0))
+ v1.AddArg(p)
+ v0.AddArg2(v1, mem)
+ return true
+ }
+ // match: (ORshiftLL <t> [24] o0:(ORshiftLL [16] x0:(MOVHUloadidx ptr0 idx0 mem) y1:(MOVDnop x1:(MOVBUload [2] {s} p1:(ADD ptr1 idx1) mem))) y2:(MOVDnop x2:(MOVBUload [3] {s} p mem)))
+ // cond: s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0, x1, x2, y1, y2, o0)
+ // result: @mergePoint(b,x0,x1,x2) (MOVWUloadidx <t> ptr0 idx0 mem)
+ for {
+ t := v.Type
+ if auxIntToInt64(v.AuxInt) != 24 {
+ break
+ }
+ o0 := v_0
+ if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 16 {
+ break
+ }
+ _ = o0.Args[1]
+ x0 := o0.Args[0]
+ if x0.Op != OpARM64MOVHUloadidx {
+ break
+ }
+ mem := x0.Args[2]
+ ptr0 := x0.Args[0]
+ idx0 := x0.Args[1]
+ y1 := o0.Args[1]
+ if y1.Op != OpARM64MOVDnop {
+ break
+ }
+ x1 := y1.Args[0]
+ if x1.Op != OpARM64MOVBUload || auxIntToInt32(x1.AuxInt) != 2 {
+ break
+ }
+ s := auxToSym(x1.Aux)
+ _ = x1.Args[1]
+ p1 := x1.Args[0]
+ if p1.Op != OpARM64ADD {
+ break
+ }
+ _ = p1.Args[1]
+ p1_0 := p1.Args[0]
+ p1_1 := p1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, p1_0, p1_1 = _i0+1, p1_1, p1_0 {
+ ptr1 := p1_0
+ idx1 := p1_1
+ if mem != x1.Args[1] {
+ continue
+ }
+ y2 := v_1
+ if y2.Op != OpARM64MOVDnop {
+ continue
+ }
+ x2 := y2.Args[0]
+ if x2.Op != OpARM64MOVBUload || auxIntToInt32(x2.AuxInt) != 3 || auxToSym(x2.Aux) != s {
+ continue
+ }
+ _ = x2.Args[1]
+ p := x2.Args[0]
+ if mem != x2.Args[1] || !(s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0, x1, x2, y1, y2, o0)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1, x2)
+ v0 := b.NewValue0(x2.Pos, OpARM64MOVWUloadidx, t)
+ v.copyOf(v0)
+ v0.AddArg3(ptr0, idx0, mem)
+ return true
+ }
+ break
+ }
+ // match: (ORshiftLL <t> [24] o0:(ORshiftLL [16] x0:(MOVHUloadidx ptr idx mem) y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [2] idx) mem))) y2:(MOVDnop x2:(MOVBUloadidx ptr (ADDconst [3] idx) mem)))
+ // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0, x1, x2, y1, y2, o0)
+ // result: @mergePoint(b,x0,x1,x2) (MOVWUloadidx <t> ptr idx mem)
+ for {
+ t := v.Type
+ if auxIntToInt64(v.AuxInt) != 24 {
+ break
+ }
+ o0 := v_0
+ if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 16 {
+ break
+ }
+ _ = o0.Args[1]
+ x0 := o0.Args[0]
+ if x0.Op != OpARM64MOVHUloadidx {
+ break
+ }
+ mem := x0.Args[2]
+ ptr := x0.Args[0]
+ idx := x0.Args[1]
+ y1 := o0.Args[1]
+ if y1.Op != OpARM64MOVDnop {
+ break
+ }
+ x1 := y1.Args[0]
+ if x1.Op != OpARM64MOVBUloadidx {
+ break
+ }
+ _ = x1.Args[2]
+ if ptr != x1.Args[0] {
+ break
+ }
+ x1_1 := x1.Args[1]
+ if x1_1.Op != OpARM64ADDconst || auxIntToInt64(x1_1.AuxInt) != 2 || idx != x1_1.Args[0] || mem != x1.Args[2] {
+ break
+ }
+ y2 := v_1
+ if y2.Op != OpARM64MOVDnop {
+ break
+ }
+ x2 := y2.Args[0]
+ if x2.Op != OpARM64MOVBUloadidx {
+ break
+ }
+ _ = x2.Args[2]
+ if ptr != x2.Args[0] {
+ break
+ }
+ x2_1 := x2.Args[1]
+ if x2_1.Op != OpARM64ADDconst || auxIntToInt64(x2_1.AuxInt) != 3 || idx != x2_1.Args[0] || mem != x2.Args[2] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0, x1, x2, y1, y2, o0)) {
+ break
+ }
+ b = mergePoint(b, x0, x1, x2)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVWUloadidx, t)
+ v.copyOf(v0)
+ v0.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (ORshiftLL <t> [24] o0:(ORshiftLL [16] x0:(MOVHUloadidx2 ptr0 idx0 mem) y1:(MOVDnop x1:(MOVBUload [2] {s} p1:(ADDshiftLL [1] ptr1 idx1) mem))) y2:(MOVDnop x2:(MOVBUload [3] {s} p mem)))
+ // cond: s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && isSamePtr(p1, p) && clobber(x0, x1, x2, y1, y2, o0)
+ // result: @mergePoint(b,x0,x1,x2) (MOVWUloadidx <t> ptr0 (SLLconst <idx0.Type> [1] idx0) mem)
+ for {
+ t := v.Type
+ if auxIntToInt64(v.AuxInt) != 24 {
+ break
+ }
+ o0 := v_0
+ if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 16 {
+ break
+ }
+ _ = o0.Args[1]
+ x0 := o0.Args[0]
+ if x0.Op != OpARM64MOVHUloadidx2 {
+ break
+ }
+ mem := x0.Args[2]
+ ptr0 := x0.Args[0]
+ idx0 := x0.Args[1]
+ y1 := o0.Args[1]
+ if y1.Op != OpARM64MOVDnop {
+ break
+ }
+ x1 := y1.Args[0]
+ if x1.Op != OpARM64MOVBUload || auxIntToInt32(x1.AuxInt) != 2 {
+ break
+ }
+ s := auxToSym(x1.Aux)
+ _ = x1.Args[1]
+ p1 := x1.Args[0]
+ if p1.Op != OpARM64ADDshiftLL || auxIntToInt64(p1.AuxInt) != 1 {
+ break
+ }
+ idx1 := p1.Args[1]
+ ptr1 := p1.Args[0]
+ if mem != x1.Args[1] {
+ break
+ }
+ y2 := v_1
+ if y2.Op != OpARM64MOVDnop {
+ break
+ }
+ x2 := y2.Args[0]
+ if x2.Op != OpARM64MOVBUload || auxIntToInt32(x2.AuxInt) != 3 || auxToSym(x2.Aux) != s {
+ break
+ }
+ _ = x2.Args[1]
+ p := x2.Args[0]
+ if mem != x2.Args[1] || !(s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && isSamePtr(p1, p) && clobber(x0, x1, x2, y1, y2, o0)) {
+ break
+ }
+ b = mergePoint(b, x0, x1, x2)
+ v0 := b.NewValue0(x2.Pos, OpARM64MOVWUloadidx, t)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x2.Pos, OpARM64SLLconst, idx0.Type)
+ v1.AuxInt = int64ToAuxInt(1)
+ v1.AddArg(idx0)
+ v0.AddArg3(ptr0, v1, mem)
+ return true
+ }
+ // match: (ORshiftLL <t> [56] o0:(ORshiftLL [48] o1:(ORshiftLL [40] o2:(ORshiftLL [32] x0:(MOVWUload [i0] {s} p mem) y1:(MOVDnop x1:(MOVBUload [i4] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [i5] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [i6] {s} p mem))) y4:(MOVDnop x4:(MOVBUload [i7] {s} p mem)))
+ // cond: i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4) != nil && clobber(x0, x1, x2, x3, x4, y1, y2, y3, y4, o0, o1, o2)
+ // result: @mergePoint(b,x0,x1,x2,x3,x4) (MOVDload <t> {s} (OffPtr <p.Type> [int64(i0)] p) mem)
+ for {
+ t := v.Type
+ if auxIntToInt64(v.AuxInt) != 56 {
+ break
+ }
+ o0 := v_0
+ if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 48 {
+ break
+ }
+ _ = o0.Args[1]
+ o1 := o0.Args[0]
+ if o1.Op != OpARM64ORshiftLL || auxIntToInt64(o1.AuxInt) != 40 {
+ break
+ }
+ _ = o1.Args[1]
+ o2 := o1.Args[0]
+ if o2.Op != OpARM64ORshiftLL || auxIntToInt64(o2.AuxInt) != 32 {
+ break
+ }
+ _ = o2.Args[1]
+ x0 := o2.Args[0]
+ if x0.Op != OpARM64MOVWUload {
+ break
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p := x0.Args[0]
+ y1 := o2.Args[1]
+ if y1.Op != OpARM64MOVDnop {
+ break
+ }
+ x1 := y1.Args[0]
+ if x1.Op != OpARM64MOVBUload {
+ break
+ }
+ i4 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
+ break
+ }
+ _ = x1.Args[1]
+ if p != x1.Args[0] || mem != x1.Args[1] {
+ break
+ }
+ y2 := o1.Args[1]
+ if y2.Op != OpARM64MOVDnop {
+ break
+ }
+ x2 := y2.Args[0]
+ if x2.Op != OpARM64MOVBUload {
+ break
+ }
+ i5 := auxIntToInt32(x2.AuxInt)
+ if auxToSym(x2.Aux) != s {
+ break
+ }
+ _ = x2.Args[1]
+ if p != x2.Args[0] || mem != x2.Args[1] {
+ break
+ }
+ y3 := o0.Args[1]
+ if y3.Op != OpARM64MOVDnop {
+ break
+ }
+ x3 := y3.Args[0]
+ if x3.Op != OpARM64MOVBUload {
+ break
+ }
+ i6 := auxIntToInt32(x3.AuxInt)
+ if auxToSym(x3.Aux) != s {
+ break
+ }
+ _ = x3.Args[1]
+ if p != x3.Args[0] || mem != x3.Args[1] {
+ break
+ }
+ y4 := v_1
+ if y4.Op != OpARM64MOVDnop {
+ break
+ }
+ x4 := y4.Args[0]
+ if x4.Op != OpARM64MOVBUload {
+ break
+ }
+ i7 := auxIntToInt32(x4.AuxInt)
+ if auxToSym(x4.Aux) != s {
+ break
+ }
+ _ = x4.Args[1]
+ if p != x4.Args[0] || mem != x4.Args[1] || !(i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4) != nil && clobber(x0, x1, x2, x3, x4, y1, y2, y3, y4, o0, o1, o2)) {
+ break
+ }
+ b = mergePoint(b, x0, x1, x2, x3, x4)
+ v0 := b.NewValue0(x4.Pos, OpARM64MOVDload, t)
+ v.copyOf(v0)
+ v0.Aux = symToAux(s)
+ v1 := b.NewValue0(x4.Pos, OpOffPtr, p.Type)
+ v1.AuxInt = int64ToAuxInt(int64(i0))
+ v1.AddArg(p)
+ v0.AddArg2(v1, mem)
+ return true
+ }
+ // match: (ORshiftLL <t> [56] o0:(ORshiftLL [48] o1:(ORshiftLL [40] o2:(ORshiftLL [32] x0:(MOVWUloadidx ptr0 idx0 mem) y1:(MOVDnop x1:(MOVBUload [4] {s} p1:(ADD ptr1 idx1) mem))) y2:(MOVDnop x2:(MOVBUload [5] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [6] {s} p mem))) y4:(MOVDnop x4:(MOVBUload [7] {s} p mem)))
+ // cond: s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0, x1, x2, x3, x4, y1, y2, y3, y4, o0, o1, o2)
+ // result: @mergePoint(b,x0,x1,x2,x3,x4) (MOVDloadidx <t> ptr0 idx0 mem)
+ for {
+ t := v.Type
+ if auxIntToInt64(v.AuxInt) != 56 {
+ break
+ }
+ o0 := v_0
+ if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 48 {
+ break
+ }
+ _ = o0.Args[1]
+ o1 := o0.Args[0]
+ if o1.Op != OpARM64ORshiftLL || auxIntToInt64(o1.AuxInt) != 40 {
+ break
+ }
+ _ = o1.Args[1]
+ o2 := o1.Args[0]
+ if o2.Op != OpARM64ORshiftLL || auxIntToInt64(o2.AuxInt) != 32 {
+ break
+ }
+ _ = o2.Args[1]
+ x0 := o2.Args[0]
+ if x0.Op != OpARM64MOVWUloadidx {
+ break
+ }
+ mem := x0.Args[2]
+ ptr0 := x0.Args[0]
+ idx0 := x0.Args[1]
+ y1 := o2.Args[1]
+ if y1.Op != OpARM64MOVDnop {
+ break
+ }
+ x1 := y1.Args[0]
+ if x1.Op != OpARM64MOVBUload || auxIntToInt32(x1.AuxInt) != 4 {
+ break
+ }
+ s := auxToSym(x1.Aux)
+ _ = x1.Args[1]
+ p1 := x1.Args[0]
+ if p1.Op != OpARM64ADD {
+ break
+ }
+ _ = p1.Args[1]
+ p1_0 := p1.Args[0]
+ p1_1 := p1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, p1_0, p1_1 = _i0+1, p1_1, p1_0 {
+ ptr1 := p1_0
+ idx1 := p1_1
+ if mem != x1.Args[1] {
+ continue
+ }
+ y2 := o1.Args[1]
+ if y2.Op != OpARM64MOVDnop {
+ continue
+ }
+ x2 := y2.Args[0]
+ if x2.Op != OpARM64MOVBUload || auxIntToInt32(x2.AuxInt) != 5 || auxToSym(x2.Aux) != s {
+ continue
+ }
+ _ = x2.Args[1]
+ p := x2.Args[0]
+ if mem != x2.Args[1] {
+ continue
+ }
+ y3 := o0.Args[1]
+ if y3.Op != OpARM64MOVDnop {
+ continue
+ }
+ x3 := y3.Args[0]
+ if x3.Op != OpARM64MOVBUload || auxIntToInt32(x3.AuxInt) != 6 || auxToSym(x3.Aux) != s {
+ continue
+ }
+ _ = x3.Args[1]
+ if p != x3.Args[0] || mem != x3.Args[1] {
+ continue
+ }
+ y4 := v_1
+ if y4.Op != OpARM64MOVDnop {
+ continue
+ }
+ x4 := y4.Args[0]
+ if x4.Op != OpARM64MOVBUload || auxIntToInt32(x4.AuxInt) != 7 || auxToSym(x4.Aux) != s {
+ continue
+ }
+ _ = x4.Args[1]
+ if p != x4.Args[0] || mem != x4.Args[1] || !(s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0, x1, x2, x3, x4, y1, y2, y3, y4, o0, o1, o2)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1, x2, x3, x4)
+ v0 := b.NewValue0(x4.Pos, OpARM64MOVDloadidx, t)
+ v.copyOf(v0)
+ v0.AddArg3(ptr0, idx0, mem)
+ return true
+ }
+ break
+ }
+ // match: (ORshiftLL <t> [56] o0:(ORshiftLL [48] o1:(ORshiftLL [40] o2:(ORshiftLL [32] x0:(MOVWUloadidx4 ptr0 idx0 mem) y1:(MOVDnop x1:(MOVBUload [4] {s} p1:(ADDshiftLL [2] ptr1 idx1) mem))) y2:(MOVDnop x2:(MOVBUload [5] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [6] {s} p mem))) y4:(MOVDnop x4:(MOVBUload [7] {s} p mem)))
+ // cond: s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4) != nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && isSamePtr(p1, p) && clobber(x0, x1, x2, x3, x4, y1, y2, y3, y4, o0, o1, o2)
+ // result: @mergePoint(b,x0,x1,x2,x3,x4) (MOVDloadidx <t> ptr0 (SLLconst <idx0.Type> [2] idx0) mem)
+ for {
+ t := v.Type
+ if auxIntToInt64(v.AuxInt) != 56 {
+ break
+ }
+ o0 := v_0
+ if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 48 {
+ break
+ }
+ _ = o0.Args[1]
+ o1 := o0.Args[0]
+ if o1.Op != OpARM64ORshiftLL || auxIntToInt64(o1.AuxInt) != 40 {
+ break
+ }
+ _ = o1.Args[1]
+ o2 := o1.Args[0]
+ if o2.Op != OpARM64ORshiftLL || auxIntToInt64(o2.AuxInt) != 32 {
+ break
+ }
+ _ = o2.Args[1]
+ x0 := o2.Args[0]
+ if x0.Op != OpARM64MOVWUloadidx4 {
+ break
+ }
+ mem := x0.Args[2]
+ ptr0 := x0.Args[0]
+ idx0 := x0.Args[1]
+ y1 := o2.Args[1]
+ if y1.Op != OpARM64MOVDnop {
+ break
+ }
+ x1 := y1.Args[0]
+ if x1.Op != OpARM64MOVBUload || auxIntToInt32(x1.AuxInt) != 4 {
+ break
+ }
+ s := auxToSym(x1.Aux)
+ _ = x1.Args[1]
+ p1 := x1.Args[0]
+ if p1.Op != OpARM64ADDshiftLL || auxIntToInt64(p1.AuxInt) != 2 {
+ break
+ }
+ idx1 := p1.Args[1]
+ ptr1 := p1.Args[0]
+ if mem != x1.Args[1] {
+ break
+ }
+ y2 := o1.Args[1]
+ if y2.Op != OpARM64MOVDnop {
+ break
+ }
+ x2 := y2.Args[0]
+ if x2.Op != OpARM64MOVBUload || auxIntToInt32(x2.AuxInt) != 5 || auxToSym(x2.Aux) != s {
+ break
+ }
+ _ = x2.Args[1]
+ p := x2.Args[0]
+ if mem != x2.Args[1] {
+ break
+ }
+ y3 := o0.Args[1]
+ if y3.Op != OpARM64MOVDnop {
+ break
+ }
+ x3 := y3.Args[0]
+ if x3.Op != OpARM64MOVBUload || auxIntToInt32(x3.AuxInt) != 6 || auxToSym(x3.Aux) != s {
+ break
+ }
+ _ = x3.Args[1]
+ if p != x3.Args[0] || mem != x3.Args[1] {
+ break
+ }
+ y4 := v_1
+ if y4.Op != OpARM64MOVDnop {
+ break
+ }
+ x4 := y4.Args[0]
+ if x4.Op != OpARM64MOVBUload || auxIntToInt32(x4.AuxInt) != 7 || auxToSym(x4.Aux) != s {
+ break
+ }
+ _ = x4.Args[1]
+ if p != x4.Args[0] || mem != x4.Args[1] || !(s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4) != nil && isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) && isSamePtr(p1, p) && clobber(x0, x1, x2, x3, x4, y1, y2, y3, y4, o0, o1, o2)) {
+ break
+ }
+ b = mergePoint(b, x0, x1, x2, x3, x4)
+ v0 := b.NewValue0(x4.Pos, OpARM64MOVDloadidx, t)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x4.Pos, OpARM64SLLconst, idx0.Type)
+ v1.AuxInt = int64ToAuxInt(2)
+ v1.AddArg(idx0)
+ v0.AddArg3(ptr0, v1, mem)
+ return true
+ }
+ // match: (ORshiftLL <t> [56] o0:(ORshiftLL [48] o1:(ORshiftLL [40] o2:(ORshiftLL [32] x0:(MOVWUloadidx ptr idx mem) y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [4] idx) mem))) y2:(MOVDnop x2:(MOVBUloadidx ptr (ADDconst [5] idx) mem))) y3:(MOVDnop x3:(MOVBUloadidx ptr (ADDconst [6] idx) mem))) y4:(MOVDnop x4:(MOVBUloadidx ptr (ADDconst [7] idx) mem)))
+ // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4) != nil && clobber(x0, x1, x2, x3, x4, y1, y2, y3, y4, o0, o1, o2)
+ // result: @mergePoint(b,x0,x1,x2,x3,x4) (MOVDloadidx <t> ptr idx mem)
+ for {
+ t := v.Type
+ if auxIntToInt64(v.AuxInt) != 56 {
+ break
+ }
+ o0 := v_0
+ if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 48 {
+ break
+ }
+ _ = o0.Args[1]
+ o1 := o0.Args[0]
+ if o1.Op != OpARM64ORshiftLL || auxIntToInt64(o1.AuxInt) != 40 {
+ break
+ }
+ _ = o1.Args[1]
+ o2 := o1.Args[0]
+ if o2.Op != OpARM64ORshiftLL || auxIntToInt64(o2.AuxInt) != 32 {
+ break
+ }
+ _ = o2.Args[1]
+ x0 := o2.Args[0]
+ if x0.Op != OpARM64MOVWUloadidx {
+ break
+ }
+ mem := x0.Args[2]
+ ptr := x0.Args[0]
+ idx := x0.Args[1]
+ y1 := o2.Args[1]
+ if y1.Op != OpARM64MOVDnop {
+ break
+ }
+ x1 := y1.Args[0]
+ if x1.Op != OpARM64MOVBUloadidx {
+ break
+ }
+ _ = x1.Args[2]
+ if ptr != x1.Args[0] {
+ break
+ }
+ x1_1 := x1.Args[1]
+ if x1_1.Op != OpARM64ADDconst || auxIntToInt64(x1_1.AuxInt) != 4 || idx != x1_1.Args[0] || mem != x1.Args[2] {
+ break
+ }
+ y2 := o1.Args[1]
+ if y2.Op != OpARM64MOVDnop {
+ break
+ }
+ x2 := y2.Args[0]
+ if x2.Op != OpARM64MOVBUloadidx {
+ break
+ }
+ _ = x2.Args[2]
+ if ptr != x2.Args[0] {
+ break
+ }
+ x2_1 := x2.Args[1]
+ if x2_1.Op != OpARM64ADDconst || auxIntToInt64(x2_1.AuxInt) != 5 || idx != x2_1.Args[0] || mem != x2.Args[2] {
+ break
+ }
+ y3 := o0.Args[1]
+ if y3.Op != OpARM64MOVDnop {
+ break
+ }
+ x3 := y3.Args[0]
+ if x3.Op != OpARM64MOVBUloadidx {
+ break
+ }
+ _ = x3.Args[2]
+ if ptr != x3.Args[0] {
+ break
+ }
+ x3_1 := x3.Args[1]
+ if x3_1.Op != OpARM64ADDconst || auxIntToInt64(x3_1.AuxInt) != 6 || idx != x3_1.Args[0] || mem != x3.Args[2] {
+ break
+ }
+ y4 := v_1
+ if y4.Op != OpARM64MOVDnop {
+ break
+ }
+ x4 := y4.Args[0]
+ if x4.Op != OpARM64MOVBUloadidx {
+ break
+ }
+ _ = x4.Args[2]
+ if ptr != x4.Args[0] {
+ break
+ }
+ x4_1 := x4.Args[1]
+ if x4_1.Op != OpARM64ADDconst || auxIntToInt64(x4_1.AuxInt) != 7 || idx != x4_1.Args[0] || mem != x4.Args[2] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4) != nil && clobber(x0, x1, x2, x3, x4, y1, y2, y3, y4, o0, o1, o2)) {
+ break
+ }
+ b = mergePoint(b, x0, x1, x2, x3, x4)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDloadidx, t)
+ v.copyOf(v0)
+ v0.AddArg3(ptr, idx, mem)
+ return true
+ }
+ // match: (ORshiftLL <t> [8] y0:(MOVDnop x0:(MOVBUload [i1] {s} p mem)) y1:(MOVDnop x1:(MOVBUload [i0] {s} p mem)))
+ // cond: i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, y0, y1)
+ // result: @mergePoint(b,x0,x1) (REV16W <t> (MOVHUload <t> [i0] {s} p mem))
+ for {
+ t := v.Type
+ if auxIntToInt64(v.AuxInt) != 8 {
+ break
+ }
+ y0 := v_0
+ if y0.Op != OpARM64MOVDnop {
+ break
+ }
+ x0 := y0.Args[0]
+ if x0.Op != OpARM64MOVBUload {
+ break
+ }
+ i1 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p := x0.Args[0]
+ y1 := v_1
+ if y1.Op != OpARM64MOVDnop {
+ break
+ }
+ x1 := y1.Args[0]
+ if x1.Op != OpARM64MOVBUload {
+ break
+ }
+ i0 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
+ break
+ }
+ _ = x1.Args[1]
+ if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, y0, y1)) {
+ break
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(x1.Pos, OpARM64REV16W, t)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x1.Pos, OpARM64MOVHUload, t)
+ v1.AuxInt = int32ToAuxInt(i0)
+ v1.Aux = symToAux(s)
+ v1.AddArg2(p, mem)
+ v0.AddArg(v1)
+ return true
+ }
+ // match: (ORshiftLL <t> [8] y0:(MOVDnop x0:(MOVBUload [1] {s} p1:(ADD ptr1 idx1) mem)) y1:(MOVDnop x1:(MOVBUloadidx ptr0 idx0 mem)))
+ // cond: s == nil && x0.Uses == 1 && x1.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && mergePoint(b,x0,x1) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x0, x1, y0, y1)
+ // result: @mergePoint(b,x0,x1) (REV16W <t> (MOVHUloadidx <t> ptr0 idx0 mem))
+ for {
+ t := v.Type
+ if auxIntToInt64(v.AuxInt) != 8 {
+ break
+ }
+ y0 := v_0
+ if y0.Op != OpARM64MOVDnop {
+ break
+ }
+ x0 := y0.Args[0]
+ if x0.Op != OpARM64MOVBUload || auxIntToInt32(x0.AuxInt) != 1 {
+ break
+ }
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p1 := x0.Args[0]
+ if p1.Op != OpARM64ADD {
+ break
+ }
+ _ = p1.Args[1]
+ p1_0 := p1.Args[0]
+ p1_1 := p1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, p1_0, p1_1 = _i0+1, p1_1, p1_0 {
+ ptr1 := p1_0
+ idx1 := p1_1
+ y1 := v_1
+ if y1.Op != OpARM64MOVDnop {
+ continue
+ }
+ x1 := y1.Args[0]
+ if x1.Op != OpARM64MOVBUloadidx {
+ continue
+ }
+ _ = x1.Args[2]
+ ptr0 := x1.Args[0]
+ idx0 := x1.Args[1]
+ if mem != x1.Args[2] || !(s == nil && x0.Uses == 1 && x1.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && mergePoint(b, x0, x1) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && clobber(x0, x1, y0, y1)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(x0.Pos, OpARM64REV16W, t)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x0.Pos, OpARM64MOVHUloadidx, t)
+ v1.AddArg3(ptr0, idx0, mem)
+ v0.AddArg(v1)
+ return true
+ }
+ break
+ }
+ // match: (ORshiftLL <t> [8] y0:(MOVDnop x0:(MOVBUloadidx ptr (ADDconst [1] idx) mem)) y1:(MOVDnop x1:(MOVBUloadidx ptr idx mem)))
+ // cond: x0.Uses == 1 && x1.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, y0, y1)
+ // result: @mergePoint(b,x0,x1) (REV16W <t> (MOVHUloadidx <t> ptr idx mem))
+ for {
+ t := v.Type
+ if auxIntToInt64(v.AuxInt) != 8 {
+ break
+ }
+ y0 := v_0
+ if y0.Op != OpARM64MOVDnop {
+ break
+ }
+ x0 := y0.Args[0]
+ if x0.Op != OpARM64MOVBUloadidx {
+ break
+ }
+ mem := x0.Args[2]
+ ptr := x0.Args[0]
+ x0_1 := x0.Args[1]
+ if x0_1.Op != OpARM64ADDconst || auxIntToInt64(x0_1.AuxInt) != 1 {
+ break
+ }
+ idx := x0_1.Args[0]
+ y1 := v_1
+ if y1.Op != OpARM64MOVDnop {
+ break
+ }
+ x1 := y1.Args[0]
+ if x1.Op != OpARM64MOVBUloadidx {
+ break
+ }
+ _ = x1.Args[2]
+ if ptr != x1.Args[0] || idx != x1.Args[1] || mem != x1.Args[2] || !(x0.Uses == 1 && x1.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, y0, y1)) {
+ break
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(v.Pos, OpARM64REV16W, t)
+ v.copyOf(v0)
+ v1 := b.NewValue0(v.Pos, OpARM64MOVHUloadidx, t)
+ v1.AddArg3(ptr, idx, mem)
+ v0.AddArg(v1)
+ return true
+ }
+ // match: (ORshiftLL <t> [24] o0:(ORshiftLL [16] y0:(REV16W x0:(MOVHUload [i2] {s} p mem)) y1:(MOVDnop x1:(MOVBUload [i1] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [i0] {s} p mem)))
+ // cond: i1 == i0+1 && i2 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0, x1, x2, y0, y1, y2, o0)
+ // result: @mergePoint(b,x0,x1,x2) (REVW <t> (MOVWUload <t> {s} (OffPtr <p.Type> [int64(i0)] p) mem))
+ for {
+ t := v.Type
+ if auxIntToInt64(v.AuxInt) != 24 {
+ break
+ }
+ o0 := v_0
+ if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 16 {
+ break
+ }
+ _ = o0.Args[1]
+ y0 := o0.Args[0]
+ if y0.Op != OpARM64REV16W {
+ break
+ }
+ x0 := y0.Args[0]
+ if x0.Op != OpARM64MOVHUload {
+ break
+ }
+ i2 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p := x0.Args[0]
+ y1 := o0.Args[1]
+ if y1.Op != OpARM64MOVDnop {
+ break
+ }
+ x1 := y1.Args[0]
+ if x1.Op != OpARM64MOVBUload {
+ break
+ }
+ i1 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
+ break
+ }
+ _ = x1.Args[1]
+ if p != x1.Args[0] || mem != x1.Args[1] {
+ break
+ }
+ y2 := v_1
+ if y2.Op != OpARM64MOVDnop {
+ break
+ }
+ x2 := y2.Args[0]
+ if x2.Op != OpARM64MOVBUload {
+ break
+ }
+ i0 := auxIntToInt32(x2.AuxInt)
+ if auxToSym(x2.Aux) != s {
+ break
+ }
+ _ = x2.Args[1]
+ if p != x2.Args[0] || mem != x2.Args[1] || !(i1 == i0+1 && i2 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0, x1, x2, y0, y1, y2, o0)) {
+ break
+ }
+ b = mergePoint(b, x0, x1, x2)
+ v0 := b.NewValue0(x2.Pos, OpARM64REVW, t)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x2.Pos, OpARM64MOVWUload, t)
+ v1.Aux = symToAux(s)
+ v2 := b.NewValue0(x2.Pos, OpOffPtr, p.Type)
+ v2.AuxInt = int64ToAuxInt(int64(i0))
+ v2.AddArg(p)
+ v1.AddArg2(v2, mem)
+ v0.AddArg(v1)
+ return true
+ }
+ // match: (ORshiftLL <t> [24] o0:(ORshiftLL [16] y0:(REV16W x0:(MOVHUload [2] {s} p mem)) y1:(MOVDnop x1:(MOVBUload [1] {s} p1:(ADD ptr1 idx1) mem))) y2:(MOVDnop x2:(MOVBUloadidx ptr0 idx0 mem)))
+ // cond: s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0, x1, x2, y0, y1, y2, o0)
+ // result: @mergePoint(b,x0,x1,x2) (REVW <t> (MOVWUloadidx <t> ptr0 idx0 mem))
+ for {
+ t := v.Type
+ if auxIntToInt64(v.AuxInt) != 24 {
+ break
+ }
+ o0 := v_0
+ if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 16 {
+ break
+ }
+ _ = o0.Args[1]
+ y0 := o0.Args[0]
+ if y0.Op != OpARM64REV16W {
+ break
+ }
+ x0 := y0.Args[0]
+ if x0.Op != OpARM64MOVHUload || auxIntToInt32(x0.AuxInt) != 2 {
+ break
+ }
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p := x0.Args[0]
+ y1 := o0.Args[1]
+ if y1.Op != OpARM64MOVDnop {
+ break
+ }
+ x1 := y1.Args[0]
+ if x1.Op != OpARM64MOVBUload || auxIntToInt32(x1.AuxInt) != 1 || auxToSym(x1.Aux) != s {
+ break
+ }
+ _ = x1.Args[1]
+ p1 := x1.Args[0]
+ if p1.Op != OpARM64ADD {
+ break
+ }
+ _ = p1.Args[1]
+ p1_0 := p1.Args[0]
+ p1_1 := p1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, p1_0, p1_1 = _i0+1, p1_1, p1_0 {
+ ptr1 := p1_0
+ idx1 := p1_1
+ if mem != x1.Args[1] {
+ continue
+ }
+ y2 := v_1
+ if y2.Op != OpARM64MOVDnop {
+ continue
+ }
+ x2 := y2.Args[0]
+ if x2.Op != OpARM64MOVBUloadidx {
+ continue
+ }
+ _ = x2.Args[2]
+ ptr0 := x2.Args[0]
+ idx0 := x2.Args[1]
+ if mem != x2.Args[2] || !(s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0, x1, x2, y0, y1, y2, o0)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1, x2)
+ v0 := b.NewValue0(x1.Pos, OpARM64REVW, t)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x1.Pos, OpARM64MOVWUloadidx, t)
+ v1.AddArg3(ptr0, idx0, mem)
+ v0.AddArg(v1)
+ return true
+ }
+ break
+ }
+ // match: (ORshiftLL <t> [24] o0:(ORshiftLL [16] y0:(REV16W x0:(MOVHUloadidx ptr (ADDconst [2] idx) mem)) y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [1] idx) mem))) y2:(MOVDnop x2:(MOVBUloadidx ptr idx mem)))
+ // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && o0.Uses == 1 && mergePoint(b,x0,x1,x2) != nil && clobber(x0, x1, x2, y0, y1, y2, o0)
+ // result: @mergePoint(b,x0,x1,x2) (REVW <t> (MOVWUloadidx <t> ptr idx mem))
+ for {
+ t := v.Type
+ if auxIntToInt64(v.AuxInt) != 24 {
+ break
+ }
+ o0 := v_0
+ if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 16 {
+ break
+ }
+ _ = o0.Args[1]
+ y0 := o0.Args[0]
+ if y0.Op != OpARM64REV16W {
+ break
+ }
+ x0 := y0.Args[0]
+ if x0.Op != OpARM64MOVHUloadidx {
+ break
+ }
+ mem := x0.Args[2]
+ ptr := x0.Args[0]
+ x0_1 := x0.Args[1]
+ if x0_1.Op != OpARM64ADDconst || auxIntToInt64(x0_1.AuxInt) != 2 {
+ break
+ }
+ idx := x0_1.Args[0]
+ y1 := o0.Args[1]
+ if y1.Op != OpARM64MOVDnop {
+ break
+ }
+ x1 := y1.Args[0]
+ if x1.Op != OpARM64MOVBUloadidx {
+ break
+ }
+ _ = x1.Args[2]
+ if ptr != x1.Args[0] {
+ break
+ }
+ x1_1 := x1.Args[1]
+ if x1_1.Op != OpARM64ADDconst || auxIntToInt64(x1_1.AuxInt) != 1 || idx != x1_1.Args[0] || mem != x1.Args[2] {
+ break
+ }
+ y2 := v_1
+ if y2.Op != OpARM64MOVDnop {
+ break
+ }
+ x2 := y2.Args[0]
+ if x2.Op != OpARM64MOVBUloadidx {
+ break
+ }
+ _ = x2.Args[2]
+ if ptr != x2.Args[0] || idx != x2.Args[1] || mem != x2.Args[2] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && o0.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0, x1, x2, y0, y1, y2, o0)) {
+ break
+ }
+ b = mergePoint(b, x0, x1, x2)
+ v0 := b.NewValue0(v.Pos, OpARM64REVW, t)
+ v.copyOf(v0)
+ v1 := b.NewValue0(v.Pos, OpARM64MOVWUloadidx, t)
+ v1.AddArg3(ptr, idx, mem)
+ v0.AddArg(v1)
+ return true
+ }
+ // match: (ORshiftLL <t> [56] o0:(ORshiftLL [48] o1:(ORshiftLL [40] o2:(ORshiftLL [32] y0:(REVW x0:(MOVWUload [i4] {s} p mem)) y1:(MOVDnop x1:(MOVBUload [i3] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [i2] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [i1] {s} p mem))) y4:(MOVDnop x4:(MOVBUload [i0] {s} p mem)))
+ // cond: i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4) != nil && clobber(x0, x1, x2, x3, x4, y0, y1, y2, y3, y4, o0, o1, o2)
+ // result: @mergePoint(b,x0,x1,x2,x3,x4) (REV <t> (MOVDload <t> {s} (OffPtr <p.Type> [int64(i0)] p) mem))
+ for {
+ t := v.Type
+ if auxIntToInt64(v.AuxInt) != 56 {
+ break
+ }
+ o0 := v_0
+ if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 48 {
+ break
+ }
+ _ = o0.Args[1]
+ o1 := o0.Args[0]
+ if o1.Op != OpARM64ORshiftLL || auxIntToInt64(o1.AuxInt) != 40 {
+ break
+ }
+ _ = o1.Args[1]
+ o2 := o1.Args[0]
+ if o2.Op != OpARM64ORshiftLL || auxIntToInt64(o2.AuxInt) != 32 {
+ break
+ }
+ _ = o2.Args[1]
+ y0 := o2.Args[0]
+ if y0.Op != OpARM64REVW {
+ break
+ }
+ x0 := y0.Args[0]
+ if x0.Op != OpARM64MOVWUload {
+ break
+ }
+ i4 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p := x0.Args[0]
+ y1 := o2.Args[1]
+ if y1.Op != OpARM64MOVDnop {
+ break
+ }
+ x1 := y1.Args[0]
+ if x1.Op != OpARM64MOVBUload {
+ break
+ }
+ i3 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
+ break
+ }
+ _ = x1.Args[1]
+ if p != x1.Args[0] || mem != x1.Args[1] {
+ break
+ }
+ y2 := o1.Args[1]
+ if y2.Op != OpARM64MOVDnop {
+ break
+ }
+ x2 := y2.Args[0]
+ if x2.Op != OpARM64MOVBUload {
+ break
+ }
+ i2 := auxIntToInt32(x2.AuxInt)
+ if auxToSym(x2.Aux) != s {
+ break
+ }
+ _ = x2.Args[1]
+ if p != x2.Args[0] || mem != x2.Args[1] {
+ break
+ }
+ y3 := o0.Args[1]
+ if y3.Op != OpARM64MOVDnop {
+ break
+ }
+ x3 := y3.Args[0]
+ if x3.Op != OpARM64MOVBUload {
+ break
+ }
+ i1 := auxIntToInt32(x3.AuxInt)
+ if auxToSym(x3.Aux) != s {
+ break
+ }
+ _ = x3.Args[1]
+ if p != x3.Args[0] || mem != x3.Args[1] {
+ break
+ }
+ y4 := v_1
+ if y4.Op != OpARM64MOVDnop {
+ break
+ }
+ x4 := y4.Args[0]
+ if x4.Op != OpARM64MOVBUload {
+ break
+ }
+ i0 := auxIntToInt32(x4.AuxInt)
+ if auxToSym(x4.Aux) != s {
+ break
+ }
+ _ = x4.Args[1]
+ if p != x4.Args[0] || mem != x4.Args[1] || !(i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4) != nil && clobber(x0, x1, x2, x3, x4, y0, y1, y2, y3, y4, o0, o1, o2)) {
+ break
+ }
+ b = mergePoint(b, x0, x1, x2, x3, x4)
+ v0 := b.NewValue0(x4.Pos, OpARM64REV, t)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x4.Pos, OpARM64MOVDload, t)
+ v1.Aux = symToAux(s)
+ v2 := b.NewValue0(x4.Pos, OpOffPtr, p.Type)
+ v2.AuxInt = int64ToAuxInt(int64(i0))
+ v2.AddArg(p)
+ v1.AddArg2(v2, mem)
+ v0.AddArg(v1)
+ return true
+ }
+ // match: (ORshiftLL <t> [56] o0:(ORshiftLL [48] o1:(ORshiftLL [40] o2:(ORshiftLL [32] y0:(REVW x0:(MOVWUload [4] {s} p mem)) y1:(MOVDnop x1:(MOVBUload [3] {s} p mem))) y2:(MOVDnop x2:(MOVBUload [2] {s} p mem))) y3:(MOVDnop x3:(MOVBUload [1] {s} p1:(ADD ptr1 idx1) mem))) y4:(MOVDnop x4:(MOVBUloadidx ptr0 idx0 mem)))
+ // cond: s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0, x1, x2, x3, x4, y0, y1, y2, y3, y4, o0, o1, o2)
+ // result: @mergePoint(b,x0,x1,x2,x3,x4) (REV <t> (MOVDloadidx <t> ptr0 idx0 mem))
+ for {
+ t := v.Type
+ if auxIntToInt64(v.AuxInt) != 56 {
+ break
+ }
+ o0 := v_0
+ if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 48 {
+ break
+ }
+ _ = o0.Args[1]
+ o1 := o0.Args[0]
+ if o1.Op != OpARM64ORshiftLL || auxIntToInt64(o1.AuxInt) != 40 {
+ break
+ }
+ _ = o1.Args[1]
+ o2 := o1.Args[0]
+ if o2.Op != OpARM64ORshiftLL || auxIntToInt64(o2.AuxInt) != 32 {
+ break
+ }
+ _ = o2.Args[1]
+ y0 := o2.Args[0]
+ if y0.Op != OpARM64REVW {
+ break
+ }
+ x0 := y0.Args[0]
+ if x0.Op != OpARM64MOVWUload || auxIntToInt32(x0.AuxInt) != 4 {
+ break
+ }
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p := x0.Args[0]
+ y1 := o2.Args[1]
+ if y1.Op != OpARM64MOVDnop {
+ break
+ }
+ x1 := y1.Args[0]
+ if x1.Op != OpARM64MOVBUload || auxIntToInt32(x1.AuxInt) != 3 || auxToSym(x1.Aux) != s {
+ break
+ }
+ _ = x1.Args[1]
+ if p != x1.Args[0] || mem != x1.Args[1] {
+ break
+ }
+ y2 := o1.Args[1]
+ if y2.Op != OpARM64MOVDnop {
+ break
+ }
+ x2 := y2.Args[0]
+ if x2.Op != OpARM64MOVBUload || auxIntToInt32(x2.AuxInt) != 2 || auxToSym(x2.Aux) != s {
+ break
+ }
+ _ = x2.Args[1]
+ if p != x2.Args[0] || mem != x2.Args[1] {
+ break
+ }
+ y3 := o0.Args[1]
+ if y3.Op != OpARM64MOVDnop {
+ break
+ }
+ x3 := y3.Args[0]
+ if x3.Op != OpARM64MOVBUload || auxIntToInt32(x3.AuxInt) != 1 || auxToSym(x3.Aux) != s {
+ break
+ }
+ _ = x3.Args[1]
+ p1 := x3.Args[0]
+ if p1.Op != OpARM64ADD {
+ break
+ }
+ _ = p1.Args[1]
+ p1_0 := p1.Args[0]
+ p1_1 := p1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, p1_0, p1_1 = _i0+1, p1_1, p1_0 {
+ ptr1 := p1_0
+ idx1 := p1_1
+ if mem != x3.Args[1] {
+ continue
+ }
+ y4 := v_1
+ if y4.Op != OpARM64MOVDnop {
+ continue
+ }
+ x4 := y4.Args[0]
+ if x4.Op != OpARM64MOVBUloadidx {
+ continue
+ }
+ _ = x4.Args[2]
+ ptr0 := x4.Args[0]
+ idx0 := x4.Args[1]
+ if mem != x4.Args[2] || !(s == nil && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4) != nil && (isSamePtr(ptr0, ptr1) && isSamePtr(idx0, idx1) || isSamePtr(ptr0, idx1) && isSamePtr(idx0, ptr1)) && isSamePtr(p1, p) && clobber(x0, x1, x2, x3, x4, y0, y1, y2, y3, y4, o0, o1, o2)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1, x2, x3, x4)
+ v0 := b.NewValue0(x3.Pos, OpARM64REV, t)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x3.Pos, OpARM64MOVDloadidx, t)
+ v1.AddArg3(ptr0, idx0, mem)
+ v0.AddArg(v1)
+ return true
+ }
+ break
+ }
+ // match: (ORshiftLL <t> [56] o0:(ORshiftLL [48] o1:(ORshiftLL [40] o2:(ORshiftLL [32] y0:(REVW x0:(MOVWUloadidx ptr (ADDconst [4] idx) mem)) y1:(MOVDnop x1:(MOVBUloadidx ptr (ADDconst [3] idx) mem))) y2:(MOVDnop x2:(MOVBUloadidx ptr (ADDconst [2] idx) mem))) y3:(MOVDnop x3:(MOVBUloadidx ptr (ADDconst [1] idx) mem))) y4:(MOVDnop x4:(MOVBUloadidx ptr idx mem)))
+ // cond: x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && mergePoint(b,x0,x1,x2,x3,x4) != nil && clobber(x0, x1, x2, x3, x4, y0, y1, y2, y3, y4, o0, o1, o2)
+ // result: @mergePoint(b,x0,x1,x2,x3,x4) (REV <t> (MOVDloadidx <t> ptr idx mem))
+ for {
+ t := v.Type
+ if auxIntToInt64(v.AuxInt) != 56 {
+ break
+ }
+ o0 := v_0
+ if o0.Op != OpARM64ORshiftLL || auxIntToInt64(o0.AuxInt) != 48 {
+ break
+ }
+ _ = o0.Args[1]
+ o1 := o0.Args[0]
+ if o1.Op != OpARM64ORshiftLL || auxIntToInt64(o1.AuxInt) != 40 {
+ break
+ }
+ _ = o1.Args[1]
+ o2 := o1.Args[0]
+ if o2.Op != OpARM64ORshiftLL || auxIntToInt64(o2.AuxInt) != 32 {
+ break
+ }
+ _ = o2.Args[1]
+ y0 := o2.Args[0]
+ if y0.Op != OpARM64REVW {
+ break
+ }
+ x0 := y0.Args[0]
+ if x0.Op != OpARM64MOVWUloadidx {
+ break
+ }
+ mem := x0.Args[2]
+ ptr := x0.Args[0]
+ x0_1 := x0.Args[1]
+ if x0_1.Op != OpARM64ADDconst || auxIntToInt64(x0_1.AuxInt) != 4 {
+ break
+ }
+ idx := x0_1.Args[0]
+ y1 := o2.Args[1]
+ if y1.Op != OpARM64MOVDnop {
+ break
+ }
+ x1 := y1.Args[0]
+ if x1.Op != OpARM64MOVBUloadidx {
+ break
+ }
+ _ = x1.Args[2]
+ if ptr != x1.Args[0] {
+ break
+ }
+ x1_1 := x1.Args[1]
+ if x1_1.Op != OpARM64ADDconst || auxIntToInt64(x1_1.AuxInt) != 3 || idx != x1_1.Args[0] || mem != x1.Args[2] {
+ break
+ }
+ y2 := o1.Args[1]
+ if y2.Op != OpARM64MOVDnop {
+ break
+ }
+ x2 := y2.Args[0]
+ if x2.Op != OpARM64MOVBUloadidx {
+ break
+ }
+ _ = x2.Args[2]
+ if ptr != x2.Args[0] {
+ break
+ }
+ x2_1 := x2.Args[1]
+ if x2_1.Op != OpARM64ADDconst || auxIntToInt64(x2_1.AuxInt) != 2 || idx != x2_1.Args[0] || mem != x2.Args[2] {
+ break
+ }
+ y3 := o0.Args[1]
+ if y3.Op != OpARM64MOVDnop {
+ break
+ }
+ x3 := y3.Args[0]
+ if x3.Op != OpARM64MOVBUloadidx {
+ break
+ }
+ _ = x3.Args[2]
+ if ptr != x3.Args[0] {
+ break
+ }
+ x3_1 := x3.Args[1]
+ if x3_1.Op != OpARM64ADDconst || auxIntToInt64(x3_1.AuxInt) != 1 || idx != x3_1.Args[0] || mem != x3.Args[2] {
+ break
+ }
+ y4 := v_1
+ if y4.Op != OpARM64MOVDnop {
+ break
+ }
+ x4 := y4.Args[0]
+ if x4.Op != OpARM64MOVBUloadidx {
+ break
+ }
+ _ = x4.Args[2]
+ if ptr != x4.Args[0] || idx != x4.Args[1] || mem != x4.Args[2] || !(x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && y0.Uses == 1 && y1.Uses == 1 && y2.Uses == 1 && y3.Uses == 1 && y4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4) != nil && clobber(x0, x1, x2, x3, x4, y0, y1, y2, y3, y4, o0, o1, o2)) {
+ break
+ }
+ b = mergePoint(b, x0, x1, x2, x3, x4)
+ v0 := b.NewValue0(v.Pos, OpARM64REV, t)
+ v.copyOf(v0)
+ v1 := b.NewValue0(v.Pos, OpARM64MOVDloadidx, t)
+ v1.AddArg3(ptr, idx, mem)
+ v0.AddArg(v1)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64ORshiftRA(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ORshiftRA (MOVDconst [c]) x [d])
+ // result: (ORconst [c] (SRAconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARM64ORconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARM64SRAconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ORshiftRA x (MOVDconst [c]) [d])
+ // result: (ORconst x [c>>uint64(d)])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64ORconst)
+ v.AuxInt = int64ToAuxInt(c >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ORshiftRA y:(SRAconst x [c]) x [c])
+ // result: y
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ y := v_0
+ if y.Op != OpARM64SRAconst || auxIntToInt64(y.AuxInt) != c {
+ break
+ }
+ x := y.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64ORshiftRL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ORshiftRL (MOVDconst [c]) x [d])
+ // result: (ORconst [c] (SRLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARM64ORconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARM64SRLconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ORshiftRL x (MOVDconst [c]) [d])
+ // result: (ORconst x [int64(uint64(c)>>uint64(d))])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64ORconst)
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) >> uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ORshiftRL y:(SRLconst x [c]) x [c])
+ // result: y
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ y := v_0
+ if y.Op != OpARM64SRLconst || auxIntToInt64(y.AuxInt) != c {
+ break
+ }
+ x := y.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: ( ORshiftRL [c] (SLLconst x [64-c]) x)
+ // result: (RORconst [ c] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != 64-c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARM64RORconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: ( ORshiftRL <t> [c] (SLLconst x [32-c]) (MOVWUreg x))
+ // cond: c < 32 && t.Size() == 4
+ // result: (RORWconst [c] x)
+ for {
+ t := v.Type
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != 32-c {
+ break
+ }
+ x := v_0.Args[0]
+ if v_1.Op != OpARM64MOVWUreg || x != v_1.Args[0] || !(c < 32 && t.Size() == 4) {
+ break
+ }
+ v.reset(OpARM64RORWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ORshiftRL [rc] (ANDconst [ac] x) (SLLconst [lc] y))
+ // cond: lc > rc && ac == ^((1<<uint(64-lc)-1) << uint64(lc-rc))
+ // result: (BFI [armBFAuxInt(lc-rc, 64-lc)] x y)
+ for {
+ rc := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64ANDconst {
+ break
+ }
+ ac := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if v_1.Op != OpARM64SLLconst {
+ break
+ }
+ lc := auxIntToInt64(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(lc > rc && ac == ^((1<<uint(64-lc)-1)<<uint64(lc-rc))) {
+ break
+ }
+ v.reset(OpARM64BFI)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(lc-rc, 64-lc))
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (ORshiftRL [rc] (ANDconst [ac] y) (SLLconst [lc] x))
+ // cond: lc < rc && ac == ^((1<<uint(64-rc)-1))
+ // result: (BFXIL [armBFAuxInt(rc-lc, 64-rc)] y x)
+ for {
+ rc := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64ANDconst {
+ break
+ }
+ ac := auxIntToInt64(v_0.AuxInt)
+ y := v_0.Args[0]
+ if v_1.Op != OpARM64SLLconst {
+ break
+ }
+ lc := auxIntToInt64(v_1.AuxInt)
+ x := v_1.Args[0]
+ if !(lc < rc && ac == ^(1<<uint(64-rc)-1)) {
+ break
+ }
+ v.reset(OpARM64BFXIL)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(rc-lc, 64-rc))
+ v.AddArg2(y, x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64ORshiftRO(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ORshiftRO (MOVDconst [c]) x [d])
+ // result: (ORconst [c] (RORconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARM64ORconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARM64RORconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (ORshiftRO x (MOVDconst [c]) [d])
+ // result: (ORconst x [rotateRight64(c, d)])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64ORconst)
+ v.AuxInt = int64ToAuxInt(rotateRight64(c, d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ORshiftRO y:(RORconst x [c]) x [c])
+ // result: y
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ y := v_0
+ if y.Op != OpARM64RORconst || auxIntToInt64(y.AuxInt) != c {
+ break
+ }
+ x := y.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64REV(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (REV (REV p))
+ // result: p
+ for {
+ if v_0.Op != OpARM64REV {
+ break
+ }
+ p := v_0.Args[0]
+ v.copyOf(p)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64REVW(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (REVW (REVW p))
+ // result: p
+ for {
+ if v_0.Op != OpARM64REVW {
+ break
+ }
+ p := v_0.Args[0]
+ v.copyOf(p)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64ROR(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ROR x (MOVDconst [c]))
+ // result: (RORconst x [c&63])
+ for {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64RORconst)
+ v.AuxInt = int64ToAuxInt(c & 63)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64RORW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (RORW x (MOVDconst [c]))
+ // result: (RORWconst x [c&31])
+ for {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64RORWconst)
+ v.AuxInt = int64ToAuxInt(c & 31)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64RORWconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (RORWconst [c] (RORWconst [d] x))
+ // result: (RORWconst [(c+d)&31] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64RORWconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpARM64RORWconst)
+ v.AuxInt = int64ToAuxInt((c + d) & 31)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64RORconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (RORconst [c] (RORconst [d] x))
+ // result: (RORconst [(c+d)&63] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64RORconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpARM64RORconst)
+ v.AuxInt = int64ToAuxInt((c + d) & 63)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64SBCSflags(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SBCSflags x y (Select1 <types.TypeFlags> (NEGSflags (NEG <typ.UInt64> (NGCzerocarry <typ.UInt64> bo)))))
+ // result: (SBCSflags x y bo)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpSelect1 || v_2.Type != types.TypeFlags {
+ break
+ }
+ v_2_0 := v_2.Args[0]
+ if v_2_0.Op != OpARM64NEGSflags {
+ break
+ }
+ v_2_0_0 := v_2_0.Args[0]
+ if v_2_0_0.Op != OpARM64NEG || v_2_0_0.Type != typ.UInt64 {
+ break
+ }
+ v_2_0_0_0 := v_2_0_0.Args[0]
+ if v_2_0_0_0.Op != OpARM64NGCzerocarry || v_2_0_0_0.Type != typ.UInt64 {
+ break
+ }
+ bo := v_2_0_0_0.Args[0]
+ v.reset(OpARM64SBCSflags)
+ v.AddArg3(x, y, bo)
+ return true
+ }
+ // match: (SBCSflags x y (Select1 <types.TypeFlags> (NEGSflags (MOVDconst [0]))))
+ // result: (SUBSflags x y)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpSelect1 || v_2.Type != types.TypeFlags {
+ break
+ }
+ v_2_0 := v_2.Args[0]
+ if v_2_0.Op != OpARM64NEGSflags {
+ break
+ }
+ v_2_0_0 := v_2_0.Args[0]
+ if v_2_0_0.Op != OpARM64MOVDconst || auxIntToInt64(v_2_0_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpARM64SUBSflags)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64SLL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SLL x (MOVDconst [c]))
+ // result: (SLLconst x [c&63])
+ for {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64SLLconst)
+ v.AuxInt = int64ToAuxInt(c & 63)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64SLLconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SLLconst [c] (MOVDconst [d]))
+ // result: (MOVDconst [d<<uint64(c)])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(d << uint64(c))
+ return true
+ }
+ // match: (SLLconst [c] (SRLconst [c] x))
+ // cond: 0 < c && c < 64
+ // result: (ANDconst [^(1<<uint(c)-1)] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ if !(0 < c && c < 64) {
+ break
+ }
+ v.reset(OpARM64ANDconst)
+ v.AuxInt = int64ToAuxInt(^(1<<uint(c) - 1))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SLLconst [lc] (MOVWreg x))
+ // result: (SBFIZ [armBFAuxInt(lc, min(32, 64-lc))] x)
+ for {
+ lc := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVWreg {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARM64SBFIZ)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(lc, min(32, 64-lc)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SLLconst [lc] (MOVHreg x))
+ // result: (SBFIZ [armBFAuxInt(lc, min(16, 64-lc))] x)
+ for {
+ lc := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVHreg {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARM64SBFIZ)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(lc, min(16, 64-lc)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SLLconst [lc] (MOVBreg x))
+ // result: (SBFIZ [armBFAuxInt(lc, min(8, 64-lc))] x)
+ for {
+ lc := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVBreg {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARM64SBFIZ)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(lc, min(8, 64-lc)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SLLconst [lc] (MOVWUreg x))
+ // result: (UBFIZ [armBFAuxInt(lc, min(32, 64-lc))] x)
+ for {
+ lc := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVWUreg {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARM64UBFIZ)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(lc, min(32, 64-lc)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SLLconst [lc] (MOVHUreg x))
+ // result: (UBFIZ [armBFAuxInt(lc, min(16, 64-lc))] x)
+ for {
+ lc := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVHUreg {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARM64UBFIZ)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(lc, min(16, 64-lc)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SLLconst [lc] (MOVBUreg x))
+ // result: (UBFIZ [armBFAuxInt(lc, min(8, 64-lc))] x)
+ for {
+ lc := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVBUreg {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpARM64UBFIZ)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(lc, min(8, 64-lc)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SLLconst [sc] (ANDconst [ac] x))
+ // cond: isARM64BFMask(sc, ac, 0)
+ // result: (UBFIZ [armBFAuxInt(sc, arm64BFWidth(ac, 0))] x)
+ for {
+ sc := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64ANDconst {
+ break
+ }
+ ac := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(isARM64BFMask(sc, ac, 0)) {
+ break
+ }
+ v.reset(OpARM64UBFIZ)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, arm64BFWidth(ac, 0)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SLLconst [sc] (UBFIZ [bfc] x))
+ // cond: sc+bfc.getARM64BFwidth()+bfc.getARM64BFlsb() < 64
+ // result: (UBFIZ [armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth())] x)
+ for {
+ sc := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64UBFIZ {
+ break
+ }
+ bfc := auxIntToArm64BitField(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(sc+bfc.getARM64BFwidth()+bfc.getARM64BFlsb() < 64) {
+ break
+ }
+ v.reset(OpARM64UBFIZ)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth()))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64SRA(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SRA x (MOVDconst [c]))
+ // result: (SRAconst x [c&63])
+ for {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64SRAconst)
+ v.AuxInt = int64ToAuxInt(c & 63)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64SRAconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SRAconst [c] (MOVDconst [d]))
+ // result: (MOVDconst [d>>uint64(c)])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(d >> uint64(c))
+ return true
+ }
+ // match: (SRAconst [rc] (SLLconst [lc] x))
+ // cond: lc > rc
+ // result: (SBFIZ [armBFAuxInt(lc-rc, 64-lc)] x)
+ for {
+ rc := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SLLconst {
+ break
+ }
+ lc := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(lc > rc) {
+ break
+ }
+ v.reset(OpARM64SBFIZ)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(lc-rc, 64-lc))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SRAconst [rc] (SLLconst [lc] x))
+ // cond: lc <= rc
+ // result: (SBFX [armBFAuxInt(rc-lc, 64-rc)] x)
+ for {
+ rc := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SLLconst {
+ break
+ }
+ lc := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(lc <= rc) {
+ break
+ }
+ v.reset(OpARM64SBFX)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(rc-lc, 64-rc))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SRAconst [rc] (MOVWreg x))
+ // cond: rc < 32
+ // result: (SBFX [armBFAuxInt(rc, 32-rc)] x)
+ for {
+ rc := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVWreg {
+ break
+ }
+ x := v_0.Args[0]
+ if !(rc < 32) {
+ break
+ }
+ v.reset(OpARM64SBFX)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(rc, 32-rc))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SRAconst [rc] (MOVHreg x))
+ // cond: rc < 16
+ // result: (SBFX [armBFAuxInt(rc, 16-rc)] x)
+ for {
+ rc := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVHreg {
+ break
+ }
+ x := v_0.Args[0]
+ if !(rc < 16) {
+ break
+ }
+ v.reset(OpARM64SBFX)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(rc, 16-rc))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SRAconst [rc] (MOVBreg x))
+ // cond: rc < 8
+ // result: (SBFX [armBFAuxInt(rc, 8-rc)] x)
+ for {
+ rc := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVBreg {
+ break
+ }
+ x := v_0.Args[0]
+ if !(rc < 8) {
+ break
+ }
+ v.reset(OpARM64SBFX)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(rc, 8-rc))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SRAconst [sc] (SBFIZ [bfc] x))
+ // cond: sc < bfc.getARM64BFlsb()
+ // result: (SBFIZ [armBFAuxInt(bfc.getARM64BFlsb()-sc, bfc.getARM64BFwidth())] x)
+ for {
+ sc := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SBFIZ {
+ break
+ }
+ bfc := auxIntToArm64BitField(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(sc < bfc.getARM64BFlsb()) {
+ break
+ }
+ v.reset(OpARM64SBFIZ)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.getARM64BFlsb()-sc, bfc.getARM64BFwidth()))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SRAconst [sc] (SBFIZ [bfc] x))
+ // cond: sc >= bfc.getARM64BFlsb() && sc < bfc.getARM64BFlsb()+bfc.getARM64BFwidth()
+ // result: (SBFX [armBFAuxInt(sc-bfc.getARM64BFlsb(), bfc.getARM64BFlsb()+bfc.getARM64BFwidth()-sc)] x)
+ for {
+ sc := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SBFIZ {
+ break
+ }
+ bfc := auxIntToArm64BitField(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(sc >= bfc.getARM64BFlsb() && sc < bfc.getARM64BFlsb()+bfc.getARM64BFwidth()) {
+ break
+ }
+ v.reset(OpARM64SBFX)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc-bfc.getARM64BFlsb(), bfc.getARM64BFlsb()+bfc.getARM64BFwidth()-sc))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64SRL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SRL x (MOVDconst [c]))
+ // result: (SRLconst x [c&63])
+ for {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64SRLconst)
+ v.AuxInt = int64ToAuxInt(c & 63)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64SRLconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SRLconst [c] (MOVDconst [d]))
+ // result: (MOVDconst [int64(uint64(d)>>uint64(c))])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(uint64(d) >> uint64(c)))
+ return true
+ }
+ // match: (SRLconst [c] (SLLconst [c] x))
+ // cond: 0 < c && c < 64
+ // result: (ANDconst [1<<uint(64-c)-1] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ if !(0 < c && c < 64) {
+ break
+ }
+ v.reset(OpARM64ANDconst)
+ v.AuxInt = int64ToAuxInt(1<<uint(64-c) - 1)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SRLconst [rc] (MOVWUreg x))
+ // cond: rc >= 32
+ // result: (MOVDconst [0])
+ for {
+ rc := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVWUreg {
+ break
+ }
+ if !(rc >= 32) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (SRLconst [rc] (MOVHUreg x))
+ // cond: rc >= 16
+ // result: (MOVDconst [0])
+ for {
+ rc := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVHUreg {
+ break
+ }
+ if !(rc >= 16) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (SRLconst [rc] (MOVBUreg x))
+ // cond: rc >= 8
+ // result: (MOVDconst [0])
+ for {
+ rc := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVBUreg {
+ break
+ }
+ if !(rc >= 8) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (SRLconst [rc] (SLLconst [lc] x))
+ // cond: lc > rc
+ // result: (UBFIZ [armBFAuxInt(lc-rc, 64-lc)] x)
+ for {
+ rc := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SLLconst {
+ break
+ }
+ lc := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(lc > rc) {
+ break
+ }
+ v.reset(OpARM64UBFIZ)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(lc-rc, 64-lc))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SRLconst [rc] (SLLconst [lc] x))
+ // cond: lc < rc
+ // result: (UBFX [armBFAuxInt(rc-lc, 64-rc)] x)
+ for {
+ rc := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SLLconst {
+ break
+ }
+ lc := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(lc < rc) {
+ break
+ }
+ v.reset(OpARM64UBFX)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(rc-lc, 64-rc))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SRLconst [rc] (MOVWUreg x))
+ // cond: rc < 32
+ // result: (UBFX [armBFAuxInt(rc, 32-rc)] x)
+ for {
+ rc := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVWUreg {
+ break
+ }
+ x := v_0.Args[0]
+ if !(rc < 32) {
+ break
+ }
+ v.reset(OpARM64UBFX)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(rc, 32-rc))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SRLconst [rc] (MOVHUreg x))
+ // cond: rc < 16
+ // result: (UBFX [armBFAuxInt(rc, 16-rc)] x)
+ for {
+ rc := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVHUreg {
+ break
+ }
+ x := v_0.Args[0]
+ if !(rc < 16) {
+ break
+ }
+ v.reset(OpARM64UBFX)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(rc, 16-rc))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SRLconst [rc] (MOVBUreg x))
+ // cond: rc < 8
+ // result: (UBFX [armBFAuxInt(rc, 8-rc)] x)
+ for {
+ rc := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVBUreg {
+ break
+ }
+ x := v_0.Args[0]
+ if !(rc < 8) {
+ break
+ }
+ v.reset(OpARM64UBFX)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(rc, 8-rc))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SRLconst [sc] (ANDconst [ac] x))
+ // cond: isARM64BFMask(sc, ac, sc)
+ // result: (UBFX [armBFAuxInt(sc, arm64BFWidth(ac, sc))] x)
+ for {
+ sc := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64ANDconst {
+ break
+ }
+ ac := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(isARM64BFMask(sc, ac, sc)) {
+ break
+ }
+ v.reset(OpARM64UBFX)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc, arm64BFWidth(ac, sc)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SRLconst [sc] (UBFX [bfc] x))
+ // cond: sc < bfc.getARM64BFwidth()
+ // result: (UBFX [armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth()-sc)] x)
+ for {
+ sc := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64UBFX {
+ break
+ }
+ bfc := auxIntToArm64BitField(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(sc < bfc.getARM64BFwidth()) {
+ break
+ }
+ v.reset(OpARM64UBFX)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth()-sc))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SRLconst [sc] (UBFIZ [bfc] x))
+ // cond: sc == bfc.getARM64BFlsb()
+ // result: (ANDconst [1<<uint(bfc.getARM64BFwidth())-1] x)
+ for {
+ sc := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64UBFIZ {
+ break
+ }
+ bfc := auxIntToArm64BitField(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(sc == bfc.getARM64BFlsb()) {
+ break
+ }
+ v.reset(OpARM64ANDconst)
+ v.AuxInt = int64ToAuxInt(1<<uint(bfc.getARM64BFwidth()) - 1)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SRLconst [sc] (UBFIZ [bfc] x))
+ // cond: sc < bfc.getARM64BFlsb()
+ // result: (UBFIZ [armBFAuxInt(bfc.getARM64BFlsb()-sc, bfc.getARM64BFwidth())] x)
+ for {
+ sc := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64UBFIZ {
+ break
+ }
+ bfc := auxIntToArm64BitField(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(sc < bfc.getARM64BFlsb()) {
+ break
+ }
+ v.reset(OpARM64UBFIZ)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.getARM64BFlsb()-sc, bfc.getARM64BFwidth()))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SRLconst [sc] (UBFIZ [bfc] x))
+ // cond: sc > bfc.getARM64BFlsb() && sc < bfc.getARM64BFlsb()+bfc.getARM64BFwidth()
+ // result: (UBFX [armBFAuxInt(sc-bfc.getARM64BFlsb(), bfc.getARM64BFlsb()+bfc.getARM64BFwidth()-sc)] x)
+ for {
+ sc := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64UBFIZ {
+ break
+ }
+ bfc := auxIntToArm64BitField(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(sc > bfc.getARM64BFlsb() && sc < bfc.getARM64BFlsb()+bfc.getARM64BFwidth()) {
+ break
+ }
+ v.reset(OpARM64UBFX)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc-bfc.getARM64BFlsb(), bfc.getARM64BFlsb()+bfc.getARM64BFwidth()-sc))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64STP(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (STP [off1] {sym} (ADDconst [off2] ptr) val1 val2 mem)
+ // cond: is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (STP [off1+int32(off2)] {sym} ptr val1 val2 mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val1 := v_1
+ val2 := v_2
+ mem := v_3
+ if !(is32Bit(int64(off1)+off2) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpARM64STP)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg4(ptr, val1, val2, mem)
+ return true
+ }
+ // match: (STP [off1] {sym1} (MOVDaddr [off2] {sym2} ptr) val1 val2 mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)
+ // result: (STP [off1+off2] {mergeSym(sym1,sym2)} ptr val1 val2 mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpARM64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ val1 := v_1
+ val2 := v_2
+ mem := v_3
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (ptr.Op != OpSB || !config.ctxt.Flag_shared)) {
+ break
+ }
+ v.reset(OpARM64STP)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg4(ptr, val1, val2, mem)
+ return true
+ }
+ // match: (STP [off] {sym} ptr (MOVDconst [0]) (MOVDconst [0]) mem)
+ // result: (MOVQstorezero [off] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 || v_2.Op != OpARM64MOVDconst || auxIntToInt64(v_2.AuxInt) != 0 {
+ break
+ }
+ mem := v_3
+ v.reset(OpARM64MOVQstorezero)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64SUB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SUB x (MOVDconst [c]))
+ // result: (SUBconst [c] x)
+ for {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64SUBconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUB a l:(MUL x y))
+ // cond: l.Uses==1 && clobber(l)
+ // result: (MSUB a x y)
+ for {
+ a := v_0
+ l := v_1
+ if l.Op != OpARM64MUL {
+ break
+ }
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1 && clobber(l)) {
+ break
+ }
+ v.reset(OpARM64MSUB)
+ v.AddArg3(a, x, y)
+ return true
+ }
+ // match: (SUB a l:(MNEG x y))
+ // cond: l.Uses==1 && clobber(l)
+ // result: (MADD a x y)
+ for {
+ a := v_0
+ l := v_1
+ if l.Op != OpARM64MNEG {
+ break
+ }
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(l.Uses == 1 && clobber(l)) {
+ break
+ }
+ v.reset(OpARM64MADD)
+ v.AddArg3(a, x, y)
+ return true
+ }
+ // match: (SUB a l:(MULW x y))
+ // cond: a.Type.Size() != 8 && l.Uses==1 && clobber(l)
+ // result: (MSUBW a x y)
+ for {
+ a := v_0
+ l := v_1
+ if l.Op != OpARM64MULW {
+ break
+ }
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(a.Type.Size() != 8 && l.Uses == 1 && clobber(l)) {
+ break
+ }
+ v.reset(OpARM64MSUBW)
+ v.AddArg3(a, x, y)
+ return true
+ }
+ // match: (SUB a l:(MNEGW x y))
+ // cond: a.Type.Size() != 8 && l.Uses==1 && clobber(l)
+ // result: (MADDW a x y)
+ for {
+ a := v_0
+ l := v_1
+ if l.Op != OpARM64MNEGW {
+ break
+ }
+ y := l.Args[1]
+ x := l.Args[0]
+ if !(a.Type.Size() != 8 && l.Uses == 1 && clobber(l)) {
+ break
+ }
+ v.reset(OpARM64MADDW)
+ v.AddArg3(a, x, y)
+ return true
+ }
+ // match: (SUB x x)
+ // result: (MOVDconst [0])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (SUB x (SUB y z))
+ // result: (SUB (ADD <v.Type> x z) y)
+ for {
+ x := v_0
+ if v_1.Op != OpARM64SUB {
+ break
+ }
+ z := v_1.Args[1]
+ y := v_1.Args[0]
+ v.reset(OpARM64SUB)
+ v0 := b.NewValue0(v.Pos, OpARM64ADD, v.Type)
+ v0.AddArg2(x, z)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (SUB (SUB x y) z)
+ // result: (SUB x (ADD <y.Type> y z))
+ for {
+ if v_0.Op != OpARM64SUB {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ z := v_1
+ v.reset(OpARM64SUB)
+ v0 := b.NewValue0(v.Pos, OpARM64ADD, y.Type)
+ v0.AddArg2(y, z)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SUB x0 x1:(SLLconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (SUBshiftLL x0 y [c])
+ for {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64SLLconst {
+ break
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ break
+ }
+ v.reset(OpARM64SUBshiftLL)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ // match: (SUB x0 x1:(SRLconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (SUBshiftRL x0 y [c])
+ for {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64SRLconst {
+ break
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ break
+ }
+ v.reset(OpARM64SUBshiftRL)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ // match: (SUB x0 x1:(SRAconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (SUBshiftRA x0 y [c])
+ for {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64SRAconst {
+ break
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ break
+ }
+ v.reset(OpARM64SUBshiftRA)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64SUBconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SUBconst [0] x)
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (SUBconst [c] (MOVDconst [d]))
+ // result: (MOVDconst [d-c])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(d - c)
+ return true
+ }
+ // match: (SUBconst [c] (SUBconst [d] x))
+ // result: (ADDconst [-c-d] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SUBconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpARM64ADDconst)
+ v.AuxInt = int64ToAuxInt(-c - d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBconst [c] (ADDconst [d] x))
+ // result: (ADDconst [-c+d] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64ADDconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpARM64ADDconst)
+ v.AuxInt = int64ToAuxInt(-c + d)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64SUBshiftLL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SUBshiftLL x (MOVDconst [c]) [d])
+ // result: (SUBconst x [int64(uint64(c)<<uint64(d))])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64SUBconst)
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) << uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBshiftLL (SLLconst x [c]) x [c])
+ // result: (MOVDconst [0])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64SUBshiftRA(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SUBshiftRA x (MOVDconst [c]) [d])
+ // result: (SUBconst x [c>>uint64(d)])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64SUBconst)
+ v.AuxInt = int64ToAuxInt(c >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBshiftRA (SRAconst x [c]) x [c])
+ // result: (MOVDconst [0])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SRAconst || auxIntToInt64(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64SUBshiftRL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SUBshiftRL x (MOVDconst [c]) [d])
+ // result: (SUBconst x [int64(uint64(c)>>uint64(d))])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64SUBconst)
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) >> uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBshiftRL (SRLconst x [c]) x [c])
+ // result: (MOVDconst [0])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64TST(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (TST x (MOVDconst [c]))
+ // result: (TSTconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64TSTconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (TST x0 x1:(SLLconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (TSTshiftLL x0 y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64SLLconst {
+ continue
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ continue
+ }
+ v.reset(OpARM64TSTshiftLL)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ break
+ }
+ // match: (TST x0 x1:(SRLconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (TSTshiftRL x0 y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64SRLconst {
+ continue
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ continue
+ }
+ v.reset(OpARM64TSTshiftRL)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ break
+ }
+ // match: (TST x0 x1:(SRAconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (TSTshiftRA x0 y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64SRAconst {
+ continue
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ continue
+ }
+ v.reset(OpARM64TSTshiftRA)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ break
+ }
+ // match: (TST x0 x1:(RORconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (TSTshiftRO x0 y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64RORconst {
+ continue
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ continue
+ }
+ v.reset(OpARM64TSTshiftRO)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64TSTW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (TSTW x (MOVDconst [c]))
+ // result: (TSTWconst [int32(c)] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64TSTWconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64TSTWconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (TSTWconst (MOVDconst [x]) [y])
+ // result: (FlagConstant [logicFlags32(int32(x)&y)])
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpARM64FlagConstant)
+ v.AuxInt = flagConstantToAuxInt(logicFlags32(int32(x) & y))
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64TSTconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (TSTconst (MOVDconst [x]) [y])
+ // result: (FlagConstant [logicFlags64(x&y)])
+ for {
+ y := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpARM64FlagConstant)
+ v.AuxInt = flagConstantToAuxInt(logicFlags64(x & y))
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64TSTshiftLL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (TSTshiftLL (MOVDconst [c]) x [d])
+ // result: (TSTconst [c] (SLLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARM64TSTconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (TSTshiftLL x (MOVDconst [c]) [d])
+ // result: (TSTconst x [int64(uint64(c)<<uint64(d))])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64TSTconst)
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) << uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64TSTshiftRA(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (TSTshiftRA (MOVDconst [c]) x [d])
+ // result: (TSTconst [c] (SRAconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARM64TSTconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARM64SRAconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (TSTshiftRA x (MOVDconst [c]) [d])
+ // result: (TSTconst x [c>>uint64(d)])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64TSTconst)
+ v.AuxInt = int64ToAuxInt(c >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64TSTshiftRL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (TSTshiftRL (MOVDconst [c]) x [d])
+ // result: (TSTconst [c] (SRLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARM64TSTconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARM64SRLconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (TSTshiftRL x (MOVDconst [c]) [d])
+ // result: (TSTconst x [int64(uint64(c)>>uint64(d))])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64TSTconst)
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) >> uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64TSTshiftRO(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (TSTshiftRO (MOVDconst [c]) x [d])
+ // result: (TSTconst [c] (RORconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARM64TSTconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARM64RORconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (TSTshiftRO x (MOVDconst [c]) [d])
+ // result: (TSTconst x [rotateRight64(c, d)])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64TSTconst)
+ v.AuxInt = int64ToAuxInt(rotateRight64(c, d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64UBFIZ(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (UBFIZ [bfc] (SLLconst [sc] x))
+ // cond: sc < bfc.getARM64BFwidth()
+ // result: (UBFIZ [armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth()-sc)] x)
+ for {
+ bfc := auxIntToArm64BitField(v.AuxInt)
+ if v_0.Op != OpARM64SLLconst {
+ break
+ }
+ sc := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(sc < bfc.getARM64BFwidth()) {
+ break
+ }
+ v.reset(OpARM64UBFIZ)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth()-sc))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64UBFX(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (UBFX [bfc] (SRLconst [sc] x))
+ // cond: sc+bfc.getARM64BFwidth()+bfc.getARM64BFlsb() < 64
+ // result: (UBFX [armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth())] x)
+ for {
+ bfc := auxIntToArm64BitField(v.AuxInt)
+ if v_0.Op != OpARM64SRLconst {
+ break
+ }
+ sc := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(sc+bfc.getARM64BFwidth()+bfc.getARM64BFlsb() < 64) {
+ break
+ }
+ v.reset(OpARM64UBFX)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.getARM64BFlsb()+sc, bfc.getARM64BFwidth()))
+ v.AddArg(x)
+ return true
+ }
+ // match: (UBFX [bfc] (SLLconst [sc] x))
+ // cond: sc == bfc.getARM64BFlsb()
+ // result: (ANDconst [1<<uint(bfc.getARM64BFwidth())-1] x)
+ for {
+ bfc := auxIntToArm64BitField(v.AuxInt)
+ if v_0.Op != OpARM64SLLconst {
+ break
+ }
+ sc := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(sc == bfc.getARM64BFlsb()) {
+ break
+ }
+ v.reset(OpARM64ANDconst)
+ v.AuxInt = int64ToAuxInt(1<<uint(bfc.getARM64BFwidth()) - 1)
+ v.AddArg(x)
+ return true
+ }
+ // match: (UBFX [bfc] (SLLconst [sc] x))
+ // cond: sc < bfc.getARM64BFlsb()
+ // result: (UBFX [armBFAuxInt(bfc.getARM64BFlsb()-sc, bfc.getARM64BFwidth())] x)
+ for {
+ bfc := auxIntToArm64BitField(v.AuxInt)
+ if v_0.Op != OpARM64SLLconst {
+ break
+ }
+ sc := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(sc < bfc.getARM64BFlsb()) {
+ break
+ }
+ v.reset(OpARM64UBFX)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(bfc.getARM64BFlsb()-sc, bfc.getARM64BFwidth()))
+ v.AddArg(x)
+ return true
+ }
+ // match: (UBFX [bfc] (SLLconst [sc] x))
+ // cond: sc > bfc.getARM64BFlsb() && sc < bfc.getARM64BFlsb()+bfc.getARM64BFwidth()
+ // result: (UBFIZ [armBFAuxInt(sc-bfc.getARM64BFlsb(), bfc.getARM64BFlsb()+bfc.getARM64BFwidth()-sc)] x)
+ for {
+ bfc := auxIntToArm64BitField(v.AuxInt)
+ if v_0.Op != OpARM64SLLconst {
+ break
+ }
+ sc := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(sc > bfc.getARM64BFlsb() && sc < bfc.getARM64BFlsb()+bfc.getARM64BFwidth()) {
+ break
+ }
+ v.reset(OpARM64UBFIZ)
+ v.AuxInt = arm64BitFieldToAuxInt(armBFAuxInt(sc-bfc.getARM64BFlsb(), bfc.getARM64BFlsb()+bfc.getARM64BFwidth()-sc))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64UDIV(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (UDIV x (MOVDconst [1]))
+ // result: x
+ for {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 1 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (UDIV x (MOVDconst [c]))
+ // cond: isPowerOfTwo64(c)
+ // result: (SRLconst [log64(c)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(isPowerOfTwo64(c)) {
+ break
+ }
+ v.reset(OpARM64SRLconst)
+ v.AuxInt = int64ToAuxInt(log64(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (UDIV (MOVDconst [c]) (MOVDconst [d]))
+ // cond: d != 0
+ // result: (MOVDconst [int64(uint64(c)/uint64(d))])
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) / uint64(d)))
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64UDIVW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (UDIVW x (MOVDconst [c]))
+ // cond: uint32(c)==1
+ // result: x
+ for {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint32(c) == 1) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (UDIVW x (MOVDconst [c]))
+ // cond: isPowerOfTwo64(c) && is32Bit(c)
+ // result: (SRLconst [log64(c)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(isPowerOfTwo64(c) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64SRLconst)
+ v.AuxInt = int64ToAuxInt(log64(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (UDIVW (MOVDconst [c]) (MOVDconst [d]))
+ // cond: d != 0
+ // result: (MOVDconst [int64(uint32(c)/uint32(d))])
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(uint32(c) / uint32(d)))
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64UMOD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (UMOD <typ.UInt64> x y)
+ // result: (MSUB <typ.UInt64> x y (UDIV <typ.UInt64> x y))
+ for {
+ if v.Type != typ.UInt64 {
+ break
+ }
+ x := v_0
+ y := v_1
+ v.reset(OpARM64MSUB)
+ v.Type = typ.UInt64
+ v0 := b.NewValue0(v.Pos, OpARM64UDIV, typ.UInt64)
+ v0.AddArg2(x, y)
+ v.AddArg3(x, y, v0)
+ return true
+ }
+ // match: (UMOD _ (MOVDconst [1]))
+ // result: (MOVDconst [0])
+ for {
+ if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 1 {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (UMOD x (MOVDconst [c]))
+ // cond: isPowerOfTwo64(c)
+ // result: (ANDconst [c-1] x)
+ for {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(isPowerOfTwo64(c)) {
+ break
+ }
+ v.reset(OpARM64ANDconst)
+ v.AuxInt = int64ToAuxInt(c - 1)
+ v.AddArg(x)
+ return true
+ }
+ // match: (UMOD (MOVDconst [c]) (MOVDconst [d]))
+ // cond: d != 0
+ // result: (MOVDconst [int64(uint64(c)%uint64(d))])
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) % uint64(d)))
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64UMODW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (UMODW <typ.UInt32> x y)
+ // result: (MSUBW <typ.UInt32> x y (UDIVW <typ.UInt32> x y))
+ for {
+ if v.Type != typ.UInt32 {
+ break
+ }
+ x := v_0
+ y := v_1
+ v.reset(OpARM64MSUBW)
+ v.Type = typ.UInt32
+ v0 := b.NewValue0(v.Pos, OpARM64UDIVW, typ.UInt32)
+ v0.AddArg2(x, y)
+ v.AddArg3(x, y, v0)
+ return true
+ }
+ // match: (UMODW _ (MOVDconst [c]))
+ // cond: uint32(c)==1
+ // result: (MOVDconst [0])
+ for {
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint32(c) == 1) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (UMODW x (MOVDconst [c]))
+ // cond: isPowerOfTwo64(c) && is32Bit(c)
+ // result: (ANDconst [c-1] x)
+ for {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(isPowerOfTwo64(c) && is32Bit(c)) {
+ break
+ }
+ v.reset(OpARM64ANDconst)
+ v.AuxInt = int64ToAuxInt(c - 1)
+ v.AddArg(x)
+ return true
+ }
+ // match: (UMODW (MOVDconst [c]) (MOVDconst [d]))
+ // cond: d != 0
+ // result: (MOVDconst [int64(uint32(c)%uint32(d))])
+ for {
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(uint32(c) % uint32(d)))
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64XOR(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (XOR x (MOVDconst [c]))
+ // result: (XORconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64XORconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (XOR x x)
+ // result: (MOVDconst [0])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (XOR x (MVN y))
+ // result: (EON x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpARM64MVN {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(OpARM64EON)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (XOR x0 x1:(SLLconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (XORshiftLL x0 y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64SLLconst {
+ continue
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ continue
+ }
+ v.reset(OpARM64XORshiftLL)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ break
+ }
+ // match: (XOR x0 x1:(SRLconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (XORshiftRL x0 y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64SRLconst {
+ continue
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ continue
+ }
+ v.reset(OpARM64XORshiftRL)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ break
+ }
+ // match: (XOR x0 x1:(SRAconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (XORshiftRA x0 y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64SRAconst {
+ continue
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ continue
+ }
+ v.reset(OpARM64XORshiftRA)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ break
+ }
+ // match: (XOR x0 x1:(RORconst [c] y))
+ // cond: clobberIfDead(x1)
+ // result: (XORshiftRO x0 y [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ x1 := v_1
+ if x1.Op != OpARM64RORconst {
+ continue
+ }
+ c := auxIntToInt64(x1.AuxInt)
+ y := x1.Args[0]
+ if !(clobberIfDead(x1)) {
+ continue
+ }
+ v.reset(OpARM64XORshiftRO)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x0, y)
+ return true
+ }
+ break
+ }
+ // match: (XOR (SLL x (ANDconst <t> [63] y)) (CSEL0 <typ.UInt64> [cc] (SRL <typ.UInt64> x (SUB <t> (MOVDconst [64]) (ANDconst <t> [63] y))) (CMPconst [64] (SUB <t> (MOVDconst [64]) (ANDconst <t> [63] y)))))
+ // cond: cc == OpARM64LessThanU
+ // result: (ROR x (NEG <t> y))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpARM64SLL {
+ continue
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpARM64ANDconst {
+ continue
+ }
+ t := v_0_1.Type
+ if auxIntToInt64(v_0_1.AuxInt) != 63 {
+ continue
+ }
+ y := v_0_1.Args[0]
+ if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt64 {
+ continue
+ }
+ cc := auxIntToOp(v_1.AuxInt)
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpARM64SRL || v_1_0.Type != typ.UInt64 {
+ continue
+ }
+ _ = v_1_0.Args[1]
+ if x != v_1_0.Args[0] {
+ continue
+ }
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpARM64SUB || v_1_0_1.Type != t {
+ continue
+ }
+ _ = v_1_0_1.Args[1]
+ v_1_0_1_0 := v_1_0_1.Args[0]
+ if v_1_0_1_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_0_1_0.AuxInt) != 64 {
+ continue
+ }
+ v_1_0_1_1 := v_1_0_1.Args[1]
+ if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || auxIntToInt64(v_1_0_1_1.AuxInt) != 63 || y != v_1_0_1_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpARM64CMPconst || auxIntToInt64(v_1_1.AuxInt) != 64 {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpARM64SUB || v_1_1_0.Type != t {
+ continue
+ }
+ _ = v_1_1_0.Args[1]
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_1_0_0.AuxInt) != 64 {
+ continue
+ }
+ v_1_1_0_1 := v_1_1_0.Args[1]
+ if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || auxIntToInt64(v_1_1_0_1.AuxInt) != 63 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
+ continue
+ }
+ v.reset(OpARM64ROR)
+ v0 := b.NewValue0(v.Pos, OpARM64NEG, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ break
+ }
+ // match: (XOR (SRL <typ.UInt64> x (ANDconst <t> [63] y)) (CSEL0 <typ.UInt64> [cc] (SLL x (SUB <t> (MOVDconst [64]) (ANDconst <t> [63] y))) (CMPconst [64] (SUB <t> (MOVDconst [64]) (ANDconst <t> [63] y)))))
+ // cond: cc == OpARM64LessThanU
+ // result: (ROR x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpARM64SRL || v_0.Type != typ.UInt64 {
+ continue
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpARM64ANDconst {
+ continue
+ }
+ t := v_0_1.Type
+ if auxIntToInt64(v_0_1.AuxInt) != 63 {
+ continue
+ }
+ y := v_0_1.Args[0]
+ if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt64 {
+ continue
+ }
+ cc := auxIntToOp(v_1.AuxInt)
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpARM64SLL {
+ continue
+ }
+ _ = v_1_0.Args[1]
+ if x != v_1_0.Args[0] {
+ continue
+ }
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpARM64SUB || v_1_0_1.Type != t {
+ continue
+ }
+ _ = v_1_0_1.Args[1]
+ v_1_0_1_0 := v_1_0_1.Args[0]
+ if v_1_0_1_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_0_1_0.AuxInt) != 64 {
+ continue
+ }
+ v_1_0_1_1 := v_1_0_1.Args[1]
+ if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || auxIntToInt64(v_1_0_1_1.AuxInt) != 63 || y != v_1_0_1_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpARM64CMPconst || auxIntToInt64(v_1_1.AuxInt) != 64 {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpARM64SUB || v_1_1_0.Type != t {
+ continue
+ }
+ _ = v_1_1_0.Args[1]
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_1_0_0.AuxInt) != 64 {
+ continue
+ }
+ v_1_1_0_1 := v_1_1_0.Args[1]
+ if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || auxIntToInt64(v_1_1_0_1.AuxInt) != 63 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
+ continue
+ }
+ v.reset(OpARM64ROR)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (XOR (SLL x (ANDconst <t> [31] y)) (CSEL0 <typ.UInt32> [cc] (SRL <typ.UInt32> (MOVWUreg x) (SUB <t> (MOVDconst [32]) (ANDconst <t> [31] y))) (CMPconst [64] (SUB <t> (MOVDconst [32]) (ANDconst <t> [31] y)))))
+ // cond: cc == OpARM64LessThanU
+ // result: (RORW x (NEG <t> y))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpARM64SLL {
+ continue
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpARM64ANDconst {
+ continue
+ }
+ t := v_0_1.Type
+ if auxIntToInt64(v_0_1.AuxInt) != 31 {
+ continue
+ }
+ y := v_0_1.Args[0]
+ if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt32 {
+ continue
+ }
+ cc := auxIntToOp(v_1.AuxInt)
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpARM64SRL || v_1_0.Type != typ.UInt32 {
+ continue
+ }
+ _ = v_1_0.Args[1]
+ v_1_0_0 := v_1_0.Args[0]
+ if v_1_0_0.Op != OpARM64MOVWUreg || x != v_1_0_0.Args[0] {
+ continue
+ }
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpARM64SUB || v_1_0_1.Type != t {
+ continue
+ }
+ _ = v_1_0_1.Args[1]
+ v_1_0_1_0 := v_1_0_1.Args[0]
+ if v_1_0_1_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_0_1_0.AuxInt) != 32 {
+ continue
+ }
+ v_1_0_1_1 := v_1_0_1.Args[1]
+ if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || auxIntToInt64(v_1_0_1_1.AuxInt) != 31 || y != v_1_0_1_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpARM64CMPconst || auxIntToInt64(v_1_1.AuxInt) != 64 {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpARM64SUB || v_1_1_0.Type != t {
+ continue
+ }
+ _ = v_1_1_0.Args[1]
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_1_0_0.AuxInt) != 32 {
+ continue
+ }
+ v_1_1_0_1 := v_1_1_0.Args[1]
+ if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || auxIntToInt64(v_1_1_0_1.AuxInt) != 31 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
+ continue
+ }
+ v.reset(OpARM64RORW)
+ v0 := b.NewValue0(v.Pos, OpARM64NEG, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ break
+ }
+ // match: (XOR (SRL <typ.UInt32> (MOVWUreg x) (ANDconst <t> [31] y)) (CSEL0 <typ.UInt32> [cc] (SLL x (SUB <t> (MOVDconst [32]) (ANDconst <t> [31] y))) (CMPconst [64] (SUB <t> (MOVDconst [32]) (ANDconst <t> [31] y)))))
+ // cond: cc == OpARM64LessThanU
+ // result: (RORW x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpARM64SRL || v_0.Type != typ.UInt32 {
+ continue
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpARM64MOVWUreg {
+ continue
+ }
+ x := v_0_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpARM64ANDconst {
+ continue
+ }
+ t := v_0_1.Type
+ if auxIntToInt64(v_0_1.AuxInt) != 31 {
+ continue
+ }
+ y := v_0_1.Args[0]
+ if v_1.Op != OpARM64CSEL0 || v_1.Type != typ.UInt32 {
+ continue
+ }
+ cc := auxIntToOp(v_1.AuxInt)
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpARM64SLL {
+ continue
+ }
+ _ = v_1_0.Args[1]
+ if x != v_1_0.Args[0] {
+ continue
+ }
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpARM64SUB || v_1_0_1.Type != t {
+ continue
+ }
+ _ = v_1_0_1.Args[1]
+ v_1_0_1_0 := v_1_0_1.Args[0]
+ if v_1_0_1_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_0_1_0.AuxInt) != 32 {
+ continue
+ }
+ v_1_0_1_1 := v_1_0_1.Args[1]
+ if v_1_0_1_1.Op != OpARM64ANDconst || v_1_0_1_1.Type != t || auxIntToInt64(v_1_0_1_1.AuxInt) != 31 || y != v_1_0_1_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpARM64CMPconst || auxIntToInt64(v_1_1.AuxInt) != 64 {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpARM64SUB || v_1_1_0.Type != t {
+ continue
+ }
+ _ = v_1_1_0.Args[1]
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpARM64MOVDconst || auxIntToInt64(v_1_1_0_0.AuxInt) != 32 {
+ continue
+ }
+ v_1_1_0_1 := v_1_1_0.Args[1]
+ if v_1_1_0_1.Op != OpARM64ANDconst || v_1_1_0_1.Type != t || auxIntToInt64(v_1_1_0_1.AuxInt) != 31 || y != v_1_1_0_1.Args[0] || !(cc == OpARM64LessThanU) {
+ continue
+ }
+ v.reset(OpARM64RORW)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64XORconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (XORconst [0] x)
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (XORconst [-1] x)
+ // result: (MVN x)
+ for {
+ if auxIntToInt64(v.AuxInt) != -1 {
+ break
+ }
+ x := v_0
+ v.reset(OpARM64MVN)
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORconst [c] (MOVDconst [d]))
+ // result: (MOVDconst [c^d])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(c ^ d)
+ return true
+ }
+ // match: (XORconst [c] (XORconst [d] x))
+ // result: (XORconst [c^d] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64XORconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpARM64XORconst)
+ v.AuxInt = int64ToAuxInt(c ^ d)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64XORshiftLL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (XORshiftLL (MOVDconst [c]) x [d])
+ // result: (XORconst [c] (SLLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARM64XORconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARM64SLLconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (XORshiftLL x (MOVDconst [c]) [d])
+ // result: (XORconst x [int64(uint64(c)<<uint64(d))])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64XORconst)
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) << uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORshiftLL (SLLconst x [c]) x [c])
+ // result: (MOVDconst [0])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (XORshiftLL [c] (SRLconst x [64-c]) x)
+ // result: (RORconst [64-c] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != 64-c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARM64RORconst)
+ v.AuxInt = int64ToAuxInt(64 - c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORshiftLL <t> [c] (UBFX [bfc] x) x)
+ // cond: c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)
+ // result: (RORWconst [32-c] x)
+ for {
+ t := v.Type
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64UBFX {
+ break
+ }
+ bfc := auxIntToArm64BitField(v_0.AuxInt)
+ x := v_0.Args[0]
+ if x != v_1 || !(c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)) {
+ break
+ }
+ v.reset(OpARM64RORWconst)
+ v.AuxInt = int64ToAuxInt(32 - c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORshiftLL <typ.UInt16> [8] (UBFX <typ.UInt16> [armBFAuxInt(8, 8)] x) x)
+ // result: (REV16W x)
+ for {
+ if v.Type != typ.UInt16 || auxIntToInt64(v.AuxInt) != 8 || v_0.Op != OpARM64UBFX || v_0.Type != typ.UInt16 || auxIntToArm64BitField(v_0.AuxInt) != armBFAuxInt(8, 8) {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARM64REV16W)
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORshiftLL [8] (UBFX [armBFAuxInt(8, 24)] (ANDconst [c1] x)) (ANDconst [c2] x))
+ // cond: uint32(c1) == 0xff00ff00 && uint32(c2) == 0x00ff00ff
+ // result: (REV16W x)
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 || v_0.Op != OpARM64UBFX || auxIntToArm64BitField(v_0.AuxInt) != armBFAuxInt(8, 24) {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpARM64ANDconst {
+ break
+ }
+ c1 := auxIntToInt64(v_0_0.AuxInt)
+ x := v_0_0.Args[0]
+ if v_1.Op != OpARM64ANDconst {
+ break
+ }
+ c2 := auxIntToInt64(v_1.AuxInt)
+ if x != v_1.Args[0] || !(uint32(c1) == 0xff00ff00 && uint32(c2) == 0x00ff00ff) {
+ break
+ }
+ v.reset(OpARM64REV16W)
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORshiftLL [8] (SRLconst [8] (ANDconst [c1] x)) (ANDconst [c2] x))
+ // cond: (uint64(c1) == 0xff00ff00ff00ff00 && uint64(c2) == 0x00ff00ff00ff00ff)
+ // result: (REV16 x)
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 || v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != 8 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpARM64ANDconst {
+ break
+ }
+ c1 := auxIntToInt64(v_0_0.AuxInt)
+ x := v_0_0.Args[0]
+ if v_1.Op != OpARM64ANDconst {
+ break
+ }
+ c2 := auxIntToInt64(v_1.AuxInt)
+ if x != v_1.Args[0] || !(uint64(c1) == 0xff00ff00ff00ff00 && uint64(c2) == 0x00ff00ff00ff00ff) {
+ break
+ }
+ v.reset(OpARM64REV16)
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORshiftLL [8] (SRLconst [8] (ANDconst [c1] x)) (ANDconst [c2] x))
+ // cond: (uint64(c1) == 0xff00ff00 && uint64(c2) == 0x00ff00ff)
+ // result: (REV16 (ANDconst <x.Type> [0xffffffff] x))
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 || v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != 8 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpARM64ANDconst {
+ break
+ }
+ c1 := auxIntToInt64(v_0_0.AuxInt)
+ x := v_0_0.Args[0]
+ if v_1.Op != OpARM64ANDconst {
+ break
+ }
+ c2 := auxIntToInt64(v_1.AuxInt)
+ if x != v_1.Args[0] || !(uint64(c1) == 0xff00ff00 && uint64(c2) == 0x00ff00ff) {
+ break
+ }
+ v.reset(OpARM64REV16)
+ v0 := b.NewValue0(v.Pos, OpARM64ANDconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(0xffffffff)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (XORshiftLL [c] (SRLconst x [64-c]) x2)
+ // result: (EXTRconst [64-c] x2 x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != 64-c {
+ break
+ }
+ x := v_0.Args[0]
+ x2 := v_1
+ v.reset(OpARM64EXTRconst)
+ v.AuxInt = int64ToAuxInt(64 - c)
+ v.AddArg2(x2, x)
+ return true
+ }
+ // match: (XORshiftLL <t> [c] (UBFX [bfc] x) x2)
+ // cond: c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)
+ // result: (EXTRWconst [32-c] x2 x)
+ for {
+ t := v.Type
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64UBFX {
+ break
+ }
+ bfc := auxIntToArm64BitField(v_0.AuxInt)
+ x := v_0.Args[0]
+ x2 := v_1
+ if !(c < 32 && t.Size() == 4 && bfc == armBFAuxInt(32-c, c)) {
+ break
+ }
+ v.reset(OpARM64EXTRWconst)
+ v.AuxInt = int64ToAuxInt(32 - c)
+ v.AddArg2(x2, x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64XORshiftRA(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (XORshiftRA (MOVDconst [c]) x [d])
+ // result: (XORconst [c] (SRAconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARM64XORconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARM64SRAconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (XORshiftRA x (MOVDconst [c]) [d])
+ // result: (XORconst x [c>>uint64(d)])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64XORconst)
+ v.AuxInt = int64ToAuxInt(c >> uint64(d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORshiftRA (SRAconst x [c]) x [c])
+ // result: (MOVDconst [0])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SRAconst || auxIntToInt64(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64XORshiftRL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (XORshiftRL (MOVDconst [c]) x [d])
+ // result: (XORconst [c] (SRLconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARM64XORconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARM64SRLconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (XORshiftRL x (MOVDconst [c]) [d])
+ // result: (XORconst x [int64(uint64(c)>>uint64(d))])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64XORconst)
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) >> uint64(d)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORshiftRL (SRLconst x [c]) x [c])
+ // result: (MOVDconst [0])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SRLconst || auxIntToInt64(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (XORshiftRL [c] (SLLconst x [64-c]) x)
+ // result: (RORconst [ c] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != 64-c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARM64RORconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORshiftRL <t> [c] (SLLconst x [32-c]) (MOVWUreg x))
+ // cond: c < 32 && t.Size() == 4
+ // result: (RORWconst [c] x)
+ for {
+ t := v.Type
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64SLLconst || auxIntToInt64(v_0.AuxInt) != 32-c {
+ break
+ }
+ x := v_0.Args[0]
+ if v_1.Op != OpARM64MOVWUreg || x != v_1.Args[0] || !(c < 32 && t.Size() == 4) {
+ break
+ }
+ v.reset(OpARM64RORWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpARM64XORshiftRO(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (XORshiftRO (MOVDconst [c]) x [d])
+ // result: (XORconst [c] (RORconst <x.Type> x [d]))
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ v.reset(OpARM64XORconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpARM64RORconst, x.Type)
+ v0.AuxInt = int64ToAuxInt(d)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (XORshiftRO x (MOVDconst [c]) [d])
+ // result: (XORconst x [rotateRight64(c, d)])
+ for {
+ d := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpARM64XORconst)
+ v.AuxInt = int64ToAuxInt(rotateRight64(c, d))
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORshiftRO (RORconst x [c]) x [c])
+ // result: (MOVDconst [0])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpARM64RORconst || auxIntToInt64(v_0.AuxInt) != c {
+ break
+ }
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpAddr(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Addr {sym} base)
+ // result: (MOVDaddr {sym} base)
+ for {
+ sym := auxToSym(v.Aux)
+ base := v_0
+ v.reset(OpARM64MOVDaddr)
+ v.Aux = symToAux(sym)
+ v.AddArg(base)
+ return true
+ }
+}
+func rewriteValueARM64_OpAtomicAnd32(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (AtomicAnd32 ptr val mem)
+ // result: (Select1 (LoweredAtomicAnd32 ptr val mem))
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpARM64LoweredAtomicAnd32, types.NewTuple(typ.UInt32, types.TypeMem))
+ v0.AddArg3(ptr, val, mem)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpAtomicAnd32Variant(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (AtomicAnd32Variant ptr val mem)
+ // result: (Select1 (LoweredAtomicAnd32Variant ptr val mem))
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpARM64LoweredAtomicAnd32Variant, types.NewTuple(typ.UInt32, types.TypeMem))
+ v0.AddArg3(ptr, val, mem)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpAtomicAnd8(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (AtomicAnd8 ptr val mem)
+ // result: (Select1 (LoweredAtomicAnd8 ptr val mem))
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpARM64LoweredAtomicAnd8, types.NewTuple(typ.UInt8, types.TypeMem))
+ v0.AddArg3(ptr, val, mem)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpAtomicAnd8Variant(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (AtomicAnd8Variant ptr val mem)
+ // result: (Select1 (LoweredAtomicAnd8Variant ptr val mem))
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpARM64LoweredAtomicAnd8Variant, types.NewTuple(typ.UInt8, types.TypeMem))
+ v0.AddArg3(ptr, val, mem)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpAtomicOr32(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (AtomicOr32 ptr val mem)
+ // result: (Select1 (LoweredAtomicOr32 ptr val mem))
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpARM64LoweredAtomicOr32, types.NewTuple(typ.UInt32, types.TypeMem))
+ v0.AddArg3(ptr, val, mem)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpAtomicOr32Variant(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (AtomicOr32Variant ptr val mem)
+ // result: (Select1 (LoweredAtomicOr32Variant ptr val mem))
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpARM64LoweredAtomicOr32Variant, types.NewTuple(typ.UInt32, types.TypeMem))
+ v0.AddArg3(ptr, val, mem)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpAtomicOr8(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (AtomicOr8 ptr val mem)
+ // result: (Select1 (LoweredAtomicOr8 ptr val mem))
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpARM64LoweredAtomicOr8, types.NewTuple(typ.UInt8, types.TypeMem))
+ v0.AddArg3(ptr, val, mem)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpAtomicOr8Variant(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (AtomicOr8Variant ptr val mem)
+ // result: (Select1 (LoweredAtomicOr8Variant ptr val mem))
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpARM64LoweredAtomicOr8Variant, types.NewTuple(typ.UInt8, types.TypeMem))
+ v0.AddArg3(ptr, val, mem)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpAvg64u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Avg64u <t> x y)
+ // result: (ADD (SRLconst <t> (SUB <t> x y) [1]) y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpARM64ADD)
+ v0 := b.NewValue0(v.Pos, OpARM64SRLconst, t)
+ v0.AuxInt = int64ToAuxInt(1)
+ v1 := b.NewValue0(v.Pos, OpARM64SUB, t)
+ v1.AddArg2(x, y)
+ v0.AddArg(v1)
+ v.AddArg2(v0, y)
+ return true
+ }
+}
+func rewriteValueARM64_OpBitLen32(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (BitLen32 x)
+ // result: (SUB (MOVDconst [32]) (CLZW <typ.Int> x))
+ for {
+ x := v_0
+ v.reset(OpARM64SUB)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(32)
+ v1 := b.NewValue0(v.Pos, OpARM64CLZW, typ.Int)
+ v1.AddArg(x)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueARM64_OpBitLen64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (BitLen64 x)
+ // result: (SUB (MOVDconst [64]) (CLZ <typ.Int> x))
+ for {
+ x := v_0
+ v.reset(OpARM64SUB)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(64)
+ v1 := b.NewValue0(v.Pos, OpARM64CLZ, typ.Int)
+ v1.AddArg(x)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueARM64_OpBitRev16(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (BitRev16 x)
+ // result: (SRLconst [48] (RBIT <typ.UInt64> x))
+ for {
+ x := v_0
+ v.reset(OpARM64SRLconst)
+ v.AuxInt = int64ToAuxInt(48)
+ v0 := b.NewValue0(v.Pos, OpARM64RBIT, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpBitRev8(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (BitRev8 x)
+ // result: (SRLconst [56] (RBIT <typ.UInt64> x))
+ for {
+ x := v_0
+ v.reset(OpARM64SRLconst)
+ v.AuxInt = int64ToAuxInt(56)
+ v0 := b.NewValue0(v.Pos, OpARM64RBIT, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpCondSelect(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CondSelect x y boolval)
+ // cond: flagArg(boolval) != nil
+ // result: (CSEL [boolval.Op] x y flagArg(boolval))
+ for {
+ x := v_0
+ y := v_1
+ boolval := v_2
+ if !(flagArg(boolval) != nil) {
+ break
+ }
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(boolval.Op)
+ v.AddArg3(x, y, flagArg(boolval))
+ return true
+ }
+ // match: (CondSelect x y boolval)
+ // cond: flagArg(boolval) == nil
+ // result: (CSEL [OpARM64NotEqual] x y (TSTWconst [1] boolval))
+ for {
+ x := v_0
+ y := v_1
+ boolval := v_2
+ if !(flagArg(boolval) == nil) {
+ break
+ }
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(OpARM64NotEqual)
+ v0 := b.NewValue0(v.Pos, OpARM64TSTWconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(1)
+ v0.AddArg(boolval)
+ v.AddArg3(x, y, v0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpConst16(v *Value) bool {
+ // match: (Const16 [val])
+ // result: (MOVDconst [int64(val)])
+ for {
+ val := auxIntToInt16(v.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(val))
+ return true
+ }
+}
+func rewriteValueARM64_OpConst32(v *Value) bool {
+ // match: (Const32 [val])
+ // result: (MOVDconst [int64(val)])
+ for {
+ val := auxIntToInt32(v.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(val))
+ return true
+ }
+}
+func rewriteValueARM64_OpConst32F(v *Value) bool {
+ // match: (Const32F [val])
+ // result: (FMOVSconst [float64(val)])
+ for {
+ val := auxIntToFloat32(v.AuxInt)
+ v.reset(OpARM64FMOVSconst)
+ v.AuxInt = float64ToAuxInt(float64(val))
+ return true
+ }
+}
+func rewriteValueARM64_OpConst64(v *Value) bool {
+ // match: (Const64 [val])
+ // result: (MOVDconst [int64(val)])
+ for {
+ val := auxIntToInt64(v.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(val))
+ return true
+ }
+}
+func rewriteValueARM64_OpConst64F(v *Value) bool {
+ // match: (Const64F [val])
+ // result: (FMOVDconst [float64(val)])
+ for {
+ val := auxIntToFloat64(v.AuxInt)
+ v.reset(OpARM64FMOVDconst)
+ v.AuxInt = float64ToAuxInt(float64(val))
+ return true
+ }
+}
+func rewriteValueARM64_OpConst8(v *Value) bool {
+ // match: (Const8 [val])
+ // result: (MOVDconst [int64(val)])
+ for {
+ val := auxIntToInt8(v.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(val))
+ return true
+ }
+}
+func rewriteValueARM64_OpConstBool(v *Value) bool {
+ // match: (ConstBool [t])
+ // result: (MOVDconst [b2i(t)])
+ for {
+ t := auxIntToBool(v.AuxInt)
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(b2i(t))
+ return true
+ }
+}
+func rewriteValueARM64_OpConstNil(v *Value) bool {
+ // match: (ConstNil)
+ // result: (MOVDconst [0])
+ for {
+ v.reset(OpARM64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+}
+func rewriteValueARM64_OpCtz16(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Ctz16 <t> x)
+ // result: (CLZW <t> (RBITW <typ.UInt32> (ORconst <typ.UInt32> [0x10000] x)))
+ for {
+ t := v.Type
+ x := v_0
+ v.reset(OpARM64CLZW)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpARM64RBITW, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpARM64ORconst, typ.UInt32)
+ v1.AuxInt = int64ToAuxInt(0x10000)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpCtz32(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Ctz32 <t> x)
+ // result: (CLZW (RBITW <t> x))
+ for {
+ t := v.Type
+ x := v_0
+ v.reset(OpARM64CLZW)
+ v0 := b.NewValue0(v.Pos, OpARM64RBITW, t)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpCtz64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Ctz64 <t> x)
+ // result: (CLZ (RBIT <t> x))
+ for {
+ t := v.Type
+ x := v_0
+ v.reset(OpARM64CLZ)
+ v0 := b.NewValue0(v.Pos, OpARM64RBIT, t)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpCtz8(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Ctz8 <t> x)
+ // result: (CLZW <t> (RBITW <typ.UInt32> (ORconst <typ.UInt32> [0x100] x)))
+ for {
+ t := v.Type
+ x := v_0
+ v.reset(OpARM64CLZW)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpARM64RBITW, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpARM64ORconst, typ.UInt32)
+ v1.AuxInt = int64ToAuxInt(0x100)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpDiv16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div16 [false] x y)
+ // result: (DIVW (SignExt16to32 x) (SignExt16to32 y))
+ for {
+ if auxIntToBool(v.AuxInt) != false {
+ break
+ }
+ x := v_0
+ y := v_1
+ v.reset(OpARM64DIVW)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpDiv16u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div16u x y)
+ // result: (UDIVW (ZeroExt16to32 x) (ZeroExt16to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64UDIVW)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueARM64_OpDiv32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Div32 [false] x y)
+ // result: (DIVW x y)
+ for {
+ if auxIntToBool(v.AuxInt) != false {
+ break
+ }
+ x := v_0
+ y := v_1
+ v.reset(OpARM64DIVW)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpDiv64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Div64 [false] x y)
+ // result: (DIV x y)
+ for {
+ if auxIntToBool(v.AuxInt) != false {
+ break
+ }
+ x := v_0
+ y := v_1
+ v.reset(OpARM64DIV)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpDiv8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div8 x y)
+ // result: (DIVW (SignExt8to32 x) (SignExt8to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64DIVW)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueARM64_OpDiv8u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div8u x y)
+ // result: (UDIVW (ZeroExt8to32 x) (ZeroExt8to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64UDIVW)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueARM64_OpEq16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Eq16 x y)
+ // result: (Equal (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64Equal)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpEq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Eq32 x y)
+ // result: (Equal (CMPW x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64Equal)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpEq32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Eq32F x y)
+ // result: (Equal (FCMPS x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64Equal)
+ v0 := b.NewValue0(v.Pos, OpARM64FCMPS, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpEq64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Eq64 x y)
+ // result: (Equal (CMP x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64Equal)
+ v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpEq64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Eq64F x y)
+ // result: (Equal (FCMPD x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64Equal)
+ v0 := b.NewValue0(v.Pos, OpARM64FCMPD, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpEq8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Eq8 x y)
+ // result: (Equal (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64Equal)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpEqB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (EqB x y)
+ // result: (XOR (MOVDconst [1]) (XOR <typ.Bool> x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64XOR)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v1 := b.NewValue0(v.Pos, OpARM64XOR, typ.Bool)
+ v1.AddArg2(x, y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueARM64_OpEqPtr(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (EqPtr x y)
+ // result: (Equal (CMP x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64Equal)
+ v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpFMA(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FMA x y z)
+ // result: (FMADDD z x y)
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ v.reset(OpARM64FMADDD)
+ v.AddArg3(z, x, y)
+ return true
+ }
+}
+func rewriteValueARM64_OpHmul32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Hmul32 x y)
+ // result: (SRAconst (MULL <typ.Int64> x y) [32])
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64SRAconst)
+ v.AuxInt = int64ToAuxInt(32)
+ v0 := b.NewValue0(v.Pos, OpARM64MULL, typ.Int64)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpHmul32u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Hmul32u x y)
+ // result: (SRAconst (UMULL <typ.UInt64> x y) [32])
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64SRAconst)
+ v.AuxInt = int64ToAuxInt(32)
+ v0 := b.NewValue0(v.Pos, OpARM64UMULL, typ.UInt64)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpIsInBounds(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (IsInBounds idx len)
+ // result: (LessThanU (CMP idx len))
+ for {
+ idx := v_0
+ len := v_1
+ v.reset(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags)
+ v0.AddArg2(idx, len)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpIsNonNil(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (IsNonNil ptr)
+ // result: (NotEqual (CMPconst [0] ptr))
+ for {
+ ptr := v_0
+ v.reset(OpARM64NotEqual)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(0)
+ v0.AddArg(ptr)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpIsSliceInBounds(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (IsSliceInBounds idx len)
+ // result: (LessEqualU (CMP idx len))
+ for {
+ idx := v_0
+ len := v_1
+ v.reset(OpARM64LessEqualU)
+ v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags)
+ v0.AddArg2(idx, len)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLeq16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq16 x y)
+ // result: (LessEqual (CMPW (SignExt16to32 x) (SignExt16to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64LessEqual)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLeq16U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq16U x zero:(MOVDconst [0]))
+ // result: (Eq16 x zero)
+ for {
+ x := v_0
+ zero := v_1
+ if zero.Op != OpARM64MOVDconst || auxIntToInt64(zero.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpEq16)
+ v.AddArg2(x, zero)
+ return true
+ }
+ // match: (Leq16U (MOVDconst [1]) x)
+ // result: (Neq16 (MOVDconst [0]) x)
+ for {
+ if v_0.Op != OpARM64MOVDconst || auxIntToInt64(v_0.AuxInt) != 1 {
+ break
+ }
+ x := v_1
+ v.reset(OpNeq16)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (Leq16U x y)
+ // result: (LessEqualU (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64LessEqualU)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLeq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq32 x y)
+ // result: (LessEqual (CMPW x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64LessEqual)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLeq32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq32F x y)
+ // result: (LessEqualF (FCMPS x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64LessEqualF)
+ v0 := b.NewValue0(v.Pos, OpARM64FCMPS, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLeq32U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq32U x zero:(MOVDconst [0]))
+ // result: (Eq32 x zero)
+ for {
+ x := v_0
+ zero := v_1
+ if zero.Op != OpARM64MOVDconst || auxIntToInt64(zero.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpEq32)
+ v.AddArg2(x, zero)
+ return true
+ }
+ // match: (Leq32U (MOVDconst [1]) x)
+ // result: (Neq32 (MOVDconst [0]) x)
+ for {
+ if v_0.Op != OpARM64MOVDconst || auxIntToInt64(v_0.AuxInt) != 1 {
+ break
+ }
+ x := v_1
+ v.reset(OpNeq32)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (Leq32U x y)
+ // result: (LessEqualU (CMPW x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64LessEqualU)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLeq64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq64 x y)
+ // result: (LessEqual (CMP x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64LessEqual)
+ v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLeq64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq64F x y)
+ // result: (LessEqualF (FCMPD x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64LessEqualF)
+ v0 := b.NewValue0(v.Pos, OpARM64FCMPD, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLeq64U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq64U x zero:(MOVDconst [0]))
+ // result: (Eq64 x zero)
+ for {
+ x := v_0
+ zero := v_1
+ if zero.Op != OpARM64MOVDconst || auxIntToInt64(zero.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpEq64)
+ v.AddArg2(x, zero)
+ return true
+ }
+ // match: (Leq64U (MOVDconst [1]) x)
+ // result: (Neq64 (MOVDconst [0]) x)
+ for {
+ if v_0.Op != OpARM64MOVDconst || auxIntToInt64(v_0.AuxInt) != 1 {
+ break
+ }
+ x := v_1
+ v.reset(OpNeq64)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (Leq64U x y)
+ // result: (LessEqualU (CMP x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64LessEqualU)
+ v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLeq8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq8 x y)
+ // result: (LessEqual (CMPW (SignExt8to32 x) (SignExt8to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64LessEqual)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLeq8U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq8U x zero:(MOVDconst [0]))
+ // result: (Eq8 x zero)
+ for {
+ x := v_0
+ zero := v_1
+ if zero.Op != OpARM64MOVDconst || auxIntToInt64(zero.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpEq8)
+ v.AddArg2(x, zero)
+ return true
+ }
+ // match: (Leq8U (MOVDconst [1]) x)
+ // result: (Neq8 (MOVDconst [0]) x)
+ for {
+ if v_0.Op != OpARM64MOVDconst || auxIntToInt64(v_0.AuxInt) != 1 {
+ break
+ }
+ x := v_1
+ v.reset(OpNeq8)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (Leq8U x y)
+ // result: (LessEqualU (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64LessEqualU)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLess16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less16 x y)
+ // result: (LessThan (CMPW (SignExt16to32 x) (SignExt16to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64LessThan)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLess16U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less16U zero:(MOVDconst [0]) x)
+ // result: (Neq16 zero x)
+ for {
+ zero := v_0
+ if zero.Op != OpARM64MOVDconst || auxIntToInt64(zero.AuxInt) != 0 {
+ break
+ }
+ x := v_1
+ v.reset(OpNeq16)
+ v.AddArg2(zero, x)
+ return true
+ }
+ // match: (Less16U x (MOVDconst [1]))
+ // result: (Eq16 x (MOVDconst [0]))
+ for {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 1 {
+ break
+ }
+ v.reset(OpEq16)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Less16U x y)
+ // result: (LessThanU (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLess32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less32 x y)
+ // result: (LessThan (CMPW x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64LessThan)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLess32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less32F x y)
+ // result: (LessThanF (FCMPS x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64LessThanF)
+ v0 := b.NewValue0(v.Pos, OpARM64FCMPS, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLess32U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less32U zero:(MOVDconst [0]) x)
+ // result: (Neq32 zero x)
+ for {
+ zero := v_0
+ if zero.Op != OpARM64MOVDconst || auxIntToInt64(zero.AuxInt) != 0 {
+ break
+ }
+ x := v_1
+ v.reset(OpNeq32)
+ v.AddArg2(zero, x)
+ return true
+ }
+ // match: (Less32U x (MOVDconst [1]))
+ // result: (Eq32 x (MOVDconst [0]))
+ for {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 1 {
+ break
+ }
+ v.reset(OpEq32)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Less32U x y)
+ // result: (LessThanU (CMPW x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLess64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less64 x y)
+ // result: (LessThan (CMP x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64LessThan)
+ v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLess64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less64F x y)
+ // result: (LessThanF (FCMPD x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64LessThanF)
+ v0 := b.NewValue0(v.Pos, OpARM64FCMPD, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLess64U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less64U zero:(MOVDconst [0]) x)
+ // result: (Neq64 zero x)
+ for {
+ zero := v_0
+ if zero.Op != OpARM64MOVDconst || auxIntToInt64(zero.AuxInt) != 0 {
+ break
+ }
+ x := v_1
+ v.reset(OpNeq64)
+ v.AddArg2(zero, x)
+ return true
+ }
+ // match: (Less64U x (MOVDconst [1]))
+ // result: (Eq64 x (MOVDconst [0]))
+ for {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 1 {
+ break
+ }
+ v.reset(OpEq64)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Less64U x y)
+ // result: (LessThanU (CMP x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLess8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less8 x y)
+ // result: (LessThan (CMPW (SignExt8to32 x) (SignExt8to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64LessThan)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLess8U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less8U zero:(MOVDconst [0]) x)
+ // result: (Neq8 zero x)
+ for {
+ zero := v_0
+ if zero.Op != OpARM64MOVDconst || auxIntToInt64(zero.AuxInt) != 0 {
+ break
+ }
+ x := v_1
+ v.reset(OpNeq8)
+ v.AddArg2(zero, x)
+ return true
+ }
+ // match: (Less8U x (MOVDconst [1]))
+ // result: (Eq8 x (MOVDconst [0]))
+ for {
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst || auxIntToInt64(v_1.AuxInt) != 1 {
+ break
+ }
+ v.reset(OpEq8)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Less8U x y)
+ // result: (LessThanU (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpLoad(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Load <t> ptr mem)
+ // cond: t.IsBoolean()
+ // result: (MOVBUload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(t.IsBoolean()) {
+ break
+ }
+ v.reset(OpARM64MOVBUload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is8BitInt(t) && isSigned(t))
+ // result: (MOVBload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is8BitInt(t) && isSigned(t)) {
+ break
+ }
+ v.reset(OpARM64MOVBload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is8BitInt(t) && !isSigned(t))
+ // result: (MOVBUload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is8BitInt(t) && !isSigned(t)) {
+ break
+ }
+ v.reset(OpARM64MOVBUload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is16BitInt(t) && isSigned(t))
+ // result: (MOVHload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is16BitInt(t) && isSigned(t)) {
+ break
+ }
+ v.reset(OpARM64MOVHload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is16BitInt(t) && !isSigned(t))
+ // result: (MOVHUload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is16BitInt(t) && !isSigned(t)) {
+ break
+ }
+ v.reset(OpARM64MOVHUload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is32BitInt(t) && isSigned(t))
+ // result: (MOVWload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is32BitInt(t) && isSigned(t)) {
+ break
+ }
+ v.reset(OpARM64MOVWload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is32BitInt(t) && !isSigned(t))
+ // result: (MOVWUload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is32BitInt(t) && !isSigned(t)) {
+ break
+ }
+ v.reset(OpARM64MOVWUload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is64BitInt(t) || isPtr(t))
+ // result: (MOVDload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is64BitInt(t) || isPtr(t)) {
+ break
+ }
+ v.reset(OpARM64MOVDload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is32BitFloat(t)
+ // result: (FMOVSload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is32BitFloat(t)) {
+ break
+ }
+ v.reset(OpARM64FMOVSload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is64BitFloat(t)
+ // result: (FMOVDload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is64BitFloat(t)) {
+ break
+ }
+ v.reset(OpARM64FMOVDload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpLocalAddr(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (LocalAddr {sym} base _)
+ // result: (MOVDaddr {sym} base)
+ for {
+ sym := auxToSym(v.Aux)
+ base := v_0
+ v.reset(OpARM64MOVDaddr)
+ v.Aux = symToAux(sym)
+ v.AddArg(base)
+ return true
+ }
+}
+func rewriteValueARM64_OpLsh16x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh16x16 <t> x y)
+ // result: (CSEL [OpARM64LessThanU] (SLL <t> x (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64SLL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst64, t)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v3.AuxInt = int64ToAuxInt(64)
+ v3.AddArg(v1)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValueARM64_OpLsh16x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh16x32 <t> x y)
+ // result: (CSEL [OpARM64LessThanU] (SLL <t> x (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64SLL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst64, t)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v3.AuxInt = int64ToAuxInt(64)
+ v3.AddArg(v1)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValueARM64_OpLsh16x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh16x64 <t> x y)
+ // result: (CSEL [OpARM64LessThanU] (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] y))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64SLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpConst64, t)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v2.AuxInt = int64ToAuxInt(64)
+ v2.AddArg(y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueARM64_OpLsh16x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh16x8 <t> x y)
+ // result: (CSEL [OpARM64LessThanU] (SLL <t> x (ZeroExt8to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64SLL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst64, t)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v3.AuxInt = int64ToAuxInt(64)
+ v3.AddArg(v1)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValueARM64_OpLsh32x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh32x16 <t> x y)
+ // result: (CSEL [OpARM64LessThanU] (SLL <t> x (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64SLL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst64, t)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v3.AuxInt = int64ToAuxInt(64)
+ v3.AddArg(v1)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValueARM64_OpLsh32x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh32x32 <t> x y)
+ // result: (CSEL [OpARM64LessThanU] (SLL <t> x (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64SLL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst64, t)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v3.AuxInt = int64ToAuxInt(64)
+ v3.AddArg(v1)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValueARM64_OpLsh32x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh32x64 <t> x y)
+ // result: (CSEL [OpARM64LessThanU] (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] y))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64SLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpConst64, t)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v2.AuxInt = int64ToAuxInt(64)
+ v2.AddArg(y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueARM64_OpLsh32x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh32x8 <t> x y)
+ // result: (CSEL [OpARM64LessThanU] (SLL <t> x (ZeroExt8to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64SLL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst64, t)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v3.AuxInt = int64ToAuxInt(64)
+ v3.AddArg(v1)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValueARM64_OpLsh64x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh64x16 <t> x y)
+ // result: (CSEL [OpARM64LessThanU] (SLL <t> x (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64SLL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst64, t)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v3.AuxInt = int64ToAuxInt(64)
+ v3.AddArg(v1)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValueARM64_OpLsh64x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh64x32 <t> x y)
+ // result: (CSEL [OpARM64LessThanU] (SLL <t> x (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64SLL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst64, t)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v3.AuxInt = int64ToAuxInt(64)
+ v3.AddArg(v1)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValueARM64_OpLsh64x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh64x64 <t> x y)
+ // result: (CSEL [OpARM64LessThanU] (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] y))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64SLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpConst64, t)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v2.AuxInt = int64ToAuxInt(64)
+ v2.AddArg(y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueARM64_OpLsh64x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh64x8 <t> x y)
+ // result: (CSEL [OpARM64LessThanU] (SLL <t> x (ZeroExt8to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64SLL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst64, t)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v3.AuxInt = int64ToAuxInt(64)
+ v3.AddArg(v1)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValueARM64_OpLsh8x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh8x16 <t> x y)
+ // result: (CSEL [OpARM64LessThanU] (SLL <t> x (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64SLL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst64, t)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v3.AuxInt = int64ToAuxInt(64)
+ v3.AddArg(v1)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValueARM64_OpLsh8x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh8x32 <t> x y)
+ // result: (CSEL [OpARM64LessThanU] (SLL <t> x (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64SLL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst64, t)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v3.AuxInt = int64ToAuxInt(64)
+ v3.AddArg(v1)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValueARM64_OpLsh8x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh8x64 <t> x y)
+ // result: (CSEL [OpARM64LessThanU] (SLL <t> x y) (Const64 <t> [0]) (CMPconst [64] y))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64SLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpConst64, t)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v2.AuxInt = int64ToAuxInt(64)
+ v2.AddArg(y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueARM64_OpLsh8x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh8x8 <t> x y)
+ // result: (CSEL [OpARM64LessThanU] (SLL <t> x (ZeroExt8to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64SLL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst64, t)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v3.AuxInt = int64ToAuxInt(64)
+ v3.AddArg(v1)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValueARM64_OpMod16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod16 x y)
+ // result: (MODW (SignExt16to32 x) (SignExt16to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64MODW)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueARM64_OpMod16u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod16u x y)
+ // result: (UMODW (ZeroExt16to32 x) (ZeroExt16to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64UMODW)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueARM64_OpMod32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Mod32 x y)
+ // result: (MODW x y)
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64MODW)
+ v.AddArg2(x, y)
+ return true
+ }
+}
+func rewriteValueARM64_OpMod64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Mod64 x y)
+ // result: (MOD x y)
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64MOD)
+ v.AddArg2(x, y)
+ return true
+ }
+}
+func rewriteValueARM64_OpMod8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod8 x y)
+ // result: (MODW (SignExt8to32 x) (SignExt8to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64MODW)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueARM64_OpMod8u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod8u x y)
+ // result: (UMODW (ZeroExt8to32 x) (ZeroExt8to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64UMODW)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueARM64_OpMove(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (Move [0] _ _ mem)
+ // result: mem
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ mem := v_2
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Move [1] dst src mem)
+ // result: (MOVBstore dst (MOVBUload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 1 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpARM64MOVBstore)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVBUload, typ.UInt8)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [2] dst src mem)
+ // result: (MOVHstore dst (MOVHUload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 2 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpARM64MOVHstore)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVHUload, typ.UInt16)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [4] dst src mem)
+ // result: (MOVWstore dst (MOVWUload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpARM64MOVWstore)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVWUload, typ.UInt32)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [8] dst src mem)
+ // result: (MOVDstore dst (MOVDload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpARM64MOVDstore)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDload, typ.UInt64)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [3] dst src mem)
+ // result: (MOVBstore [2] dst (MOVBUload [2] src mem) (MOVHstore dst (MOVHUload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 3 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpARM64MOVBstore)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVBUload, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(2)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpARM64MOVHstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpARM64MOVHUload, typ.UInt16)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [5] dst src mem)
+ // result: (MOVBstore [4] dst (MOVBUload [4] src mem) (MOVWstore dst (MOVWUload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 5 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpARM64MOVBstore)
+ v.AuxInt = int32ToAuxInt(4)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVBUload, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(4)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpARM64MOVWstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpARM64MOVWUload, typ.UInt32)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [6] dst src mem)
+ // result: (MOVHstore [4] dst (MOVHUload [4] src mem) (MOVWstore dst (MOVWUload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 6 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpARM64MOVHstore)
+ v.AuxInt = int32ToAuxInt(4)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVHUload, typ.UInt16)
+ v0.AuxInt = int32ToAuxInt(4)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpARM64MOVWstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpARM64MOVWUload, typ.UInt32)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [7] dst src mem)
+ // result: (MOVBstore [6] dst (MOVBUload [6] src mem) (MOVHstore [4] dst (MOVHUload [4] src mem) (MOVWstore dst (MOVWUload src mem) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 7 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpARM64MOVBstore)
+ v.AuxInt = int32ToAuxInt(6)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVBUload, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(6)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpARM64MOVHstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(4)
+ v2 := b.NewValue0(v.Pos, OpARM64MOVHUload, typ.UInt16)
+ v2.AuxInt = int32ToAuxInt(4)
+ v2.AddArg2(src, mem)
+ v3 := b.NewValue0(v.Pos, OpARM64MOVWstore, types.TypeMem)
+ v4 := b.NewValue0(v.Pos, OpARM64MOVWUload, typ.UInt32)
+ v4.AddArg2(src, mem)
+ v3.AddArg3(dst, v4, mem)
+ v1.AddArg3(dst, v2, v3)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [12] dst src mem)
+ // result: (MOVWstore [8] dst (MOVWUload [8] src mem) (MOVDstore dst (MOVDload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 12 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpARM64MOVWstore)
+ v.AuxInt = int32ToAuxInt(8)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVWUload, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(8)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpARM64MOVDload, typ.UInt64)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [16] dst src mem)
+ // result: (MOVDstore [8] dst (MOVDload [8] src mem) (MOVDstore dst (MOVDload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 16 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpARM64MOVDstore)
+ v.AuxInt = int32ToAuxInt(8)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDload, typ.UInt64)
+ v0.AuxInt = int32ToAuxInt(8)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpARM64MOVDload, typ.UInt64)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [24] dst src mem)
+ // result: (MOVDstore [16] dst (MOVDload [16] src mem) (MOVDstore [8] dst (MOVDload [8] src mem) (MOVDstore dst (MOVDload src mem) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 24 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpARM64MOVDstore)
+ v.AuxInt = int32ToAuxInt(16)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDload, typ.UInt64)
+ v0.AuxInt = int32ToAuxInt(16)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(8)
+ v2 := b.NewValue0(v.Pos, OpARM64MOVDload, typ.UInt64)
+ v2.AuxInt = int32ToAuxInt(8)
+ v2.AddArg2(src, mem)
+ v3 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem)
+ v4 := b.NewValue0(v.Pos, OpARM64MOVDload, typ.UInt64)
+ v4.AddArg2(src, mem)
+ v3.AddArg3(dst, v4, mem)
+ v1.AddArg3(dst, v2, v3)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: s%8 != 0 && s > 8
+ // result: (Move [s%8] (OffPtr <dst.Type> dst [s-s%8]) (OffPtr <src.Type> src [s-s%8]) (Move [s-s%8] dst src mem))
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(s%8 != 0 && s > 8) {
+ break
+ }
+ v.reset(OpMove)
+ v.AuxInt = int64ToAuxInt(s % 8)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
+ v0.AuxInt = int64ToAuxInt(s - s%8)
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
+ v1.AuxInt = int64ToAuxInt(s - s%8)
+ v1.AddArg(src)
+ v2 := b.NewValue0(v.Pos, OpMove, types.TypeMem)
+ v2.AuxInt = int64ToAuxInt(s - s%8)
+ v2.AddArg3(dst, src, mem)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: s > 32 && s <= 16*64 && s%16 == 8 && !config.noDuffDevice && logLargeCopy(v, s)
+ // result: (MOVDstore [int32(s-8)] dst (MOVDload [int32(s-8)] src mem) (DUFFCOPY <types.TypeMem> [8*(64-(s-8)/16)] dst src mem))
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(s > 32 && s <= 16*64 && s%16 == 8 && !config.noDuffDevice && logLargeCopy(v, s)) {
+ break
+ }
+ v.reset(OpARM64MOVDstore)
+ v.AuxInt = int32ToAuxInt(int32(s - 8))
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDload, typ.UInt64)
+ v0.AuxInt = int32ToAuxInt(int32(s - 8))
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpARM64DUFFCOPY, types.TypeMem)
+ v1.AuxInt = int64ToAuxInt(8 * (64 - (s-8)/16))
+ v1.AddArg3(dst, src, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: s > 32 && s <= 16*64 && s%16 == 0 && !config.noDuffDevice && logLargeCopy(v, s)
+ // result: (DUFFCOPY [8 * (64 - s/16)] dst src mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(s > 32 && s <= 16*64 && s%16 == 0 && !config.noDuffDevice && logLargeCopy(v, s)) {
+ break
+ }
+ v.reset(OpARM64DUFFCOPY)
+ v.AuxInt = int64ToAuxInt(8 * (64 - s/16))
+ v.AddArg3(dst, src, mem)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: s > 24 && s%8 == 0 && logLargeCopy(v, s)
+ // result: (LoweredMove dst src (ADDconst <src.Type> src [s-8]) mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(s > 24 && s%8 == 0 && logLargeCopy(v, s)) {
+ break
+ }
+ v.reset(OpARM64LoweredMove)
+ v0 := b.NewValue0(v.Pos, OpARM64ADDconst, src.Type)
+ v0.AuxInt = int64ToAuxInt(s - 8)
+ v0.AddArg(src)
+ v.AddArg4(dst, src, v0, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpNeq16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neq16 x y)
+ // result: (NotEqual (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64NotEqual)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpNeq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Neq32 x y)
+ // result: (NotEqual (CMPW x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64NotEqual)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpNeq32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Neq32F x y)
+ // result: (NotEqual (FCMPS x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64NotEqual)
+ v0 := b.NewValue0(v.Pos, OpARM64FCMPS, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpNeq64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Neq64 x y)
+ // result: (NotEqual (CMP x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64NotEqual)
+ v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpNeq64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Neq64F x y)
+ // result: (NotEqual (FCMPD x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64NotEqual)
+ v0 := b.NewValue0(v.Pos, OpARM64FCMPD, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpNeq8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neq8 x y)
+ // result: (NotEqual (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64NotEqual)
+ v0 := b.NewValue0(v.Pos, OpARM64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpNeqPtr(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (NeqPtr x y)
+ // result: (NotEqual (CMP x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64NotEqual)
+ v0 := b.NewValue0(v.Pos, OpARM64CMP, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpNot(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Not x)
+ // result: (XOR (MOVDconst [1]) x)
+ for {
+ x := v_0
+ v.reset(OpARM64XOR)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v.AddArg2(v0, x)
+ return true
+ }
+}
+func rewriteValueARM64_OpOffPtr(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (OffPtr [off] ptr:(SP))
+ // cond: is32Bit(off)
+ // result: (MOVDaddr [int32(off)] ptr)
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ ptr := v_0
+ if ptr.Op != OpSP || !(is32Bit(off)) {
+ break
+ }
+ v.reset(OpARM64MOVDaddr)
+ v.AuxInt = int32ToAuxInt(int32(off))
+ v.AddArg(ptr)
+ return true
+ }
+ // match: (OffPtr [off] ptr)
+ // result: (ADDconst [off] ptr)
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ ptr := v_0
+ v.reset(OpARM64ADDconst)
+ v.AuxInt = int64ToAuxInt(off)
+ v.AddArg(ptr)
+ return true
+ }
+}
+func rewriteValueARM64_OpPanicBounds(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (PanicBounds [kind] x y mem)
+ // cond: boundsABI(kind) == 0
+ // result: (LoweredPanicBoundsA [kind] x y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ x := v_0
+ y := v_1
+ mem := v_2
+ if !(boundsABI(kind) == 0) {
+ break
+ }
+ v.reset(OpARM64LoweredPanicBoundsA)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg3(x, y, mem)
+ return true
+ }
+ // match: (PanicBounds [kind] x y mem)
+ // cond: boundsABI(kind) == 1
+ // result: (LoweredPanicBoundsB [kind] x y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ x := v_0
+ y := v_1
+ mem := v_2
+ if !(boundsABI(kind) == 1) {
+ break
+ }
+ v.reset(OpARM64LoweredPanicBoundsB)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg3(x, y, mem)
+ return true
+ }
+ // match: (PanicBounds [kind] x y mem)
+ // cond: boundsABI(kind) == 2
+ // result: (LoweredPanicBoundsC [kind] x y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ x := v_0
+ y := v_1
+ mem := v_2
+ if !(boundsABI(kind) == 2) {
+ break
+ }
+ v.reset(OpARM64LoweredPanicBoundsC)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg3(x, y, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpPopCount16(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (PopCount16 <t> x)
+ // result: (FMOVDfpgp <t> (VUADDLV <typ.Float64> (VCNT <typ.Float64> (FMOVDgpfp <typ.Float64> (ZeroExt16to64 x)))))
+ for {
+ t := v.Type
+ x := v_0
+ v.reset(OpARM64FMOVDfpgp)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpARM64VUADDLV, typ.Float64)
+ v1 := b.NewValue0(v.Pos, OpARM64VCNT, typ.Float64)
+ v2 := b.NewValue0(v.Pos, OpARM64FMOVDgpfp, typ.Float64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v3.AddArg(x)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpPopCount32(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (PopCount32 <t> x)
+ // result: (FMOVDfpgp <t> (VUADDLV <typ.Float64> (VCNT <typ.Float64> (FMOVDgpfp <typ.Float64> (ZeroExt32to64 x)))))
+ for {
+ t := v.Type
+ x := v_0
+ v.reset(OpARM64FMOVDfpgp)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpARM64VUADDLV, typ.Float64)
+ v1 := b.NewValue0(v.Pos, OpARM64VCNT, typ.Float64)
+ v2 := b.NewValue0(v.Pos, OpARM64FMOVDgpfp, typ.Float64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v3.AddArg(x)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpPopCount64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (PopCount64 <t> x)
+ // result: (FMOVDfpgp <t> (VUADDLV <typ.Float64> (VCNT <typ.Float64> (FMOVDgpfp <typ.Float64> x))))
+ for {
+ t := v.Type
+ x := v_0
+ v.reset(OpARM64FMOVDfpgp)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpARM64VUADDLV, typ.Float64)
+ v1 := b.NewValue0(v.Pos, OpARM64VCNT, typ.Float64)
+ v2 := b.NewValue0(v.Pos, OpARM64FMOVDgpfp, typ.Float64)
+ v2.AddArg(x)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpPrefetchCache(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (PrefetchCache addr mem)
+ // result: (PRFM [0] addr mem)
+ for {
+ addr := v_0
+ mem := v_1
+ v.reset(OpARM64PRFM)
+ v.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(addr, mem)
+ return true
+ }
+}
+func rewriteValueARM64_OpPrefetchCacheStreamed(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (PrefetchCacheStreamed addr mem)
+ // result: (PRFM [1] addr mem)
+ for {
+ addr := v_0
+ mem := v_1
+ v.reset(OpARM64PRFM)
+ v.AuxInt = int64ToAuxInt(1)
+ v.AddArg2(addr, mem)
+ return true
+ }
+}
+func rewriteValueARM64_OpPubBarrier(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (PubBarrier mem)
+ // result: (DMB [0xe] mem)
+ for {
+ mem := v_0
+ v.reset(OpARM64DMB)
+ v.AuxInt = int64ToAuxInt(0xe)
+ v.AddArg(mem)
+ return true
+ }
+}
+func rewriteValueARM64_OpRotateLeft16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (RotateLeft16 <t> x (MOVDconst [c]))
+ // result: (Or16 (Lsh16x64 <t> x (MOVDconst [c&15])) (Rsh16Ux64 <t> x (MOVDconst [-c&15])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpOr16)
+ v0 := b.NewValue0(v.Pos, OpLsh16x64, t)
+ v1 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(c & 15)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpRsh16Ux64, t)
+ v3 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(-c & 15)
+ v2.AddArg2(x, v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpRotateLeft32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (RotateLeft32 x y)
+ // result: (RORW x (NEG <y.Type> y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64RORW)
+ v0 := b.NewValue0(v.Pos, OpARM64NEG, y.Type)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpRotateLeft64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (RotateLeft64 x y)
+ // result: (ROR x (NEG <y.Type> y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64ROR)
+ v0 := b.NewValue0(v.Pos, OpARM64NEG, y.Type)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpRotateLeft8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (RotateLeft8 <t> x (MOVDconst [c]))
+ // result: (Or8 (Lsh8x64 <t> x (MOVDconst [c&7])) (Rsh8Ux64 <t> x (MOVDconst [-c&7])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpARM64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpOr8)
+ v0 := b.NewValue0(v.Pos, OpLsh8x64, t)
+ v1 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(c & 7)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpRsh8Ux64, t)
+ v3 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(-c & 7)
+ v2.AddArg2(x, v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpRsh16Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux16 <t> x y)
+ // result: (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt16to64 x) (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64SRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpConst64, t)
+ v3.AuxInt = int64ToAuxInt(0)
+ v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v4.AuxInt = int64ToAuxInt(64)
+ v4.AddArg(v2)
+ v.AddArg3(v0, v3, v4)
+ return true
+ }
+}
+func rewriteValueARM64_OpRsh16Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux32 <t> x y)
+ // result: (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt16to64 x) (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64SRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpConst64, t)
+ v3.AuxInt = int64ToAuxInt(0)
+ v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v4.AuxInt = int64ToAuxInt(64)
+ v4.AddArg(v2)
+ v.AddArg3(v0, v3, v4)
+ return true
+ }
+}
+func rewriteValueARM64_OpRsh16Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux64 <t> x y)
+ // result: (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt16to64 x) y) (Const64 <t> [0]) (CMPconst [64] y))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64SRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpConst64, t)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v3.AuxInt = int64ToAuxInt(64)
+ v3.AddArg(y)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValueARM64_OpRsh16Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux8 <t> x y)
+ // result: (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt16to64 x) (ZeroExt8to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64SRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpConst64, t)
+ v3.AuxInt = int64ToAuxInt(0)
+ v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v4.AuxInt = int64ToAuxInt(64)
+ v4.AddArg(v2)
+ v.AddArg3(v0, v3, v4)
+ return true
+ }
+}
+func rewriteValueARM64_OpRsh16x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x16 x y)
+ // result: (SRA (SignExt16to64 x) (CSEL [OpARM64LessThanU] <y.Type> (ZeroExt16to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt16to64 y))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64SRA)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type)
+ v1.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v2.AddArg(y)
+ v3 := b.NewValue0(v.Pos, OpConst64, y.Type)
+ v3.AuxInt = int64ToAuxInt(63)
+ v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v4.AuxInt = int64ToAuxInt(64)
+ v4.AddArg(v2)
+ v1.AddArg3(v2, v3, v4)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueARM64_OpRsh16x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x32 x y)
+ // result: (SRA (SignExt16to64 x) (CSEL [OpARM64LessThanU] <y.Type> (ZeroExt32to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt32to64 y))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64SRA)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type)
+ v1.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v2.AddArg(y)
+ v3 := b.NewValue0(v.Pos, OpConst64, y.Type)
+ v3.AuxInt = int64ToAuxInt(63)
+ v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v4.AuxInt = int64ToAuxInt(64)
+ v4.AddArg(v2)
+ v1.AddArg3(v2, v3, v4)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueARM64_OpRsh16x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x64 x y)
+ // result: (SRA (SignExt16to64 x) (CSEL [OpARM64LessThanU] <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64SRA)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type)
+ v1.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v2 := b.NewValue0(v.Pos, OpConst64, y.Type)
+ v2.AuxInt = int64ToAuxInt(63)
+ v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v3.AuxInt = int64ToAuxInt(64)
+ v3.AddArg(y)
+ v1.AddArg3(y, v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueARM64_OpRsh16x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x8 x y)
+ // result: (SRA (SignExt16to64 x) (CSEL [OpARM64LessThanU] <y.Type> (ZeroExt8to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt8to64 y))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64SRA)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type)
+ v1.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v2.AddArg(y)
+ v3 := b.NewValue0(v.Pos, OpConst64, y.Type)
+ v3.AuxInt = int64ToAuxInt(63)
+ v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v4.AuxInt = int64ToAuxInt(64)
+ v4.AddArg(v2)
+ v1.AddArg3(v2, v3, v4)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueARM64_OpRsh32Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32Ux16 <t> x y)
+ // result: (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt32to64 x) (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64SRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpConst64, t)
+ v3.AuxInt = int64ToAuxInt(0)
+ v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v4.AuxInt = int64ToAuxInt(64)
+ v4.AddArg(v2)
+ v.AddArg3(v0, v3, v4)
+ return true
+ }
+}
+func rewriteValueARM64_OpRsh32Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32Ux32 <t> x y)
+ // result: (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt32to64 x) (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64SRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpConst64, t)
+ v3.AuxInt = int64ToAuxInt(0)
+ v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v4.AuxInt = int64ToAuxInt(64)
+ v4.AddArg(v2)
+ v.AddArg3(v0, v3, v4)
+ return true
+ }
+}
+func rewriteValueARM64_OpRsh32Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32Ux64 <t> x y)
+ // result: (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt32to64 x) y) (Const64 <t> [0]) (CMPconst [64] y))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64SRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpConst64, t)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v3.AuxInt = int64ToAuxInt(64)
+ v3.AddArg(y)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValueARM64_OpRsh32Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32Ux8 <t> x y)
+ // result: (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt32to64 x) (ZeroExt8to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64SRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpConst64, t)
+ v3.AuxInt = int64ToAuxInt(0)
+ v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v4.AuxInt = int64ToAuxInt(64)
+ v4.AddArg(v2)
+ v.AddArg3(v0, v3, v4)
+ return true
+ }
+}
+func rewriteValueARM64_OpRsh32x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32x16 x y)
+ // result: (SRA (SignExt32to64 x) (CSEL [OpARM64LessThanU] <y.Type> (ZeroExt16to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt16to64 y))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64SRA)
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type)
+ v1.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v2.AddArg(y)
+ v3 := b.NewValue0(v.Pos, OpConst64, y.Type)
+ v3.AuxInt = int64ToAuxInt(63)
+ v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v4.AuxInt = int64ToAuxInt(64)
+ v4.AddArg(v2)
+ v1.AddArg3(v2, v3, v4)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueARM64_OpRsh32x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32x32 x y)
+ // result: (SRA (SignExt32to64 x) (CSEL [OpARM64LessThanU] <y.Type> (ZeroExt32to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt32to64 y))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64SRA)
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type)
+ v1.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v2.AddArg(y)
+ v3 := b.NewValue0(v.Pos, OpConst64, y.Type)
+ v3.AuxInt = int64ToAuxInt(63)
+ v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v4.AuxInt = int64ToAuxInt(64)
+ v4.AddArg(v2)
+ v1.AddArg3(v2, v3, v4)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueARM64_OpRsh32x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32x64 x y)
+ // result: (SRA (SignExt32to64 x) (CSEL [OpARM64LessThanU] <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64SRA)
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type)
+ v1.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v2 := b.NewValue0(v.Pos, OpConst64, y.Type)
+ v2.AuxInt = int64ToAuxInt(63)
+ v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v3.AuxInt = int64ToAuxInt(64)
+ v3.AddArg(y)
+ v1.AddArg3(y, v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueARM64_OpRsh32x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32x8 x y)
+ // result: (SRA (SignExt32to64 x) (CSEL [OpARM64LessThanU] <y.Type> (ZeroExt8to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt8to64 y))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64SRA)
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type)
+ v1.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v2.AddArg(y)
+ v3 := b.NewValue0(v.Pos, OpConst64, y.Type)
+ v3.AuxInt = int64ToAuxInt(63)
+ v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v4.AuxInt = int64ToAuxInt(64)
+ v4.AddArg(v2)
+ v1.AddArg3(v2, v3, v4)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueARM64_OpRsh64Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64Ux16 <t> x y)
+ // result: (CSEL [OpARM64LessThanU] (SRL <t> x (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64SRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst64, t)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v3.AuxInt = int64ToAuxInt(64)
+ v3.AddArg(v1)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValueARM64_OpRsh64Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64Ux32 <t> x y)
+ // result: (CSEL [OpARM64LessThanU] (SRL <t> x (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64SRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst64, t)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v3.AuxInt = int64ToAuxInt(64)
+ v3.AddArg(v1)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValueARM64_OpRsh64Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh64Ux64 <t> x y)
+ // result: (CSEL [OpARM64LessThanU] (SRL <t> x y) (Const64 <t> [0]) (CMPconst [64] y))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64SRL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpConst64, t)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v2.AuxInt = int64ToAuxInt(64)
+ v2.AddArg(y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueARM64_OpRsh64Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64Ux8 <t> x y)
+ // result: (CSEL [OpARM64LessThanU] (SRL <t> x (ZeroExt8to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64SRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst64, t)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v3.AuxInt = int64ToAuxInt(64)
+ v3.AddArg(v1)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValueARM64_OpRsh64x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64x16 x y)
+ // result: (SRA x (CSEL [OpARM64LessThanU] <y.Type> (ZeroExt16to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt16to64 y))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64SRA)
+ v0 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type)
+ v0.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(y)
+ v2 := b.NewValue0(v.Pos, OpConst64, y.Type)
+ v2.AuxInt = int64ToAuxInt(63)
+ v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v3.AuxInt = int64ToAuxInt(64)
+ v3.AddArg(v1)
+ v0.AddArg3(v1, v2, v3)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpRsh64x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64x32 x y)
+ // result: (SRA x (CSEL [OpARM64LessThanU] <y.Type> (ZeroExt32to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt32to64 y))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64SRA)
+ v0 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type)
+ v0.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(y)
+ v2 := b.NewValue0(v.Pos, OpConst64, y.Type)
+ v2.AuxInt = int64ToAuxInt(63)
+ v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v3.AuxInt = int64ToAuxInt(64)
+ v3.AddArg(v1)
+ v0.AddArg3(v1, v2, v3)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpRsh64x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh64x64 x y)
+ // result: (SRA x (CSEL [OpARM64LessThanU] <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64SRA)
+ v0 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type)
+ v0.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v1 := b.NewValue0(v.Pos, OpConst64, y.Type)
+ v1.AuxInt = int64ToAuxInt(63)
+ v2 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v2.AuxInt = int64ToAuxInt(64)
+ v2.AddArg(y)
+ v0.AddArg3(y, v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpRsh64x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64x8 x y)
+ // result: (SRA x (CSEL [OpARM64LessThanU] <y.Type> (ZeroExt8to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt8to64 y))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64SRA)
+ v0 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type)
+ v0.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(y)
+ v2 := b.NewValue0(v.Pos, OpConst64, y.Type)
+ v2.AuxInt = int64ToAuxInt(63)
+ v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v3.AuxInt = int64ToAuxInt(64)
+ v3.AddArg(v1)
+ v0.AddArg3(v1, v2, v3)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpRsh8Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux16 <t> x y)
+ // result: (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt8to64 x) (ZeroExt16to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt16to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64SRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpConst64, t)
+ v3.AuxInt = int64ToAuxInt(0)
+ v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v4.AuxInt = int64ToAuxInt(64)
+ v4.AddArg(v2)
+ v.AddArg3(v0, v3, v4)
+ return true
+ }
+}
+func rewriteValueARM64_OpRsh8Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux32 <t> x y)
+ // result: (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt8to64 x) (ZeroExt32to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt32to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64SRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpConst64, t)
+ v3.AuxInt = int64ToAuxInt(0)
+ v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v4.AuxInt = int64ToAuxInt(64)
+ v4.AddArg(v2)
+ v.AddArg3(v0, v3, v4)
+ return true
+ }
+}
+func rewriteValueARM64_OpRsh8Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux64 <t> x y)
+ // result: (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt8to64 x) y) (Const64 <t> [0]) (CMPconst [64] y))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64SRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpConst64, t)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v3.AuxInt = int64ToAuxInt(64)
+ v3.AddArg(y)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValueARM64_OpRsh8Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux8 <t> x y)
+ // result: (CSEL [OpARM64LessThanU] (SRL <t> (ZeroExt8to64 x) (ZeroExt8to64 y)) (Const64 <t> [0]) (CMPconst [64] (ZeroExt8to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpARM64CSEL)
+ v.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v0 := b.NewValue0(v.Pos, OpARM64SRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpConst64, t)
+ v3.AuxInt = int64ToAuxInt(0)
+ v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v4.AuxInt = int64ToAuxInt(64)
+ v4.AddArg(v2)
+ v.AddArg3(v0, v3, v4)
+ return true
+ }
+}
+func rewriteValueARM64_OpRsh8x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x16 x y)
+ // result: (SRA (SignExt8to64 x) (CSEL [OpARM64LessThanU] <y.Type> (ZeroExt16to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt16to64 y))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64SRA)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type)
+ v1.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v2.AddArg(y)
+ v3 := b.NewValue0(v.Pos, OpConst64, y.Type)
+ v3.AuxInt = int64ToAuxInt(63)
+ v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v4.AuxInt = int64ToAuxInt(64)
+ v4.AddArg(v2)
+ v1.AddArg3(v2, v3, v4)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueARM64_OpRsh8x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x32 x y)
+ // result: (SRA (SignExt8to64 x) (CSEL [OpARM64LessThanU] <y.Type> (ZeroExt32to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt32to64 y))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64SRA)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type)
+ v1.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v2.AddArg(y)
+ v3 := b.NewValue0(v.Pos, OpConst64, y.Type)
+ v3.AuxInt = int64ToAuxInt(63)
+ v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v4.AuxInt = int64ToAuxInt(64)
+ v4.AddArg(v2)
+ v1.AddArg3(v2, v3, v4)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueARM64_OpRsh8x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x64 x y)
+ // result: (SRA (SignExt8to64 x) (CSEL [OpARM64LessThanU] <y.Type> y (Const64 <y.Type> [63]) (CMPconst [64] y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64SRA)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type)
+ v1.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v2 := b.NewValue0(v.Pos, OpConst64, y.Type)
+ v2.AuxInt = int64ToAuxInt(63)
+ v3 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v3.AuxInt = int64ToAuxInt(64)
+ v3.AddArg(y)
+ v1.AddArg3(y, v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueARM64_OpRsh8x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x8 x y)
+ // result: (SRA (SignExt8to64 x) (CSEL [OpARM64LessThanU] <y.Type> (ZeroExt8to64 y) (Const64 <y.Type> [63]) (CMPconst [64] (ZeroExt8to64 y))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpARM64SRA)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpARM64CSEL, y.Type)
+ v1.AuxInt = opToAuxInt(OpARM64LessThanU)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v2.AddArg(y)
+ v3 := b.NewValue0(v.Pos, OpConst64, y.Type)
+ v3.AuxInt = int64ToAuxInt(63)
+ v4 := b.NewValue0(v.Pos, OpARM64CMPconst, types.TypeFlags)
+ v4.AuxInt = int64ToAuxInt(64)
+ v4.AddArg(v2)
+ v1.AddArg3(v2, v3, v4)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueARM64_OpSelect0(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Select0 (Add64carry x y c))
+ // result: (Select0 <typ.UInt64> (ADCSflags x y (Select1 <types.TypeFlags> (ADDSconstflags [-1] c))))
+ for {
+ if v_0.Op != OpAdd64carry {
+ break
+ }
+ c := v_0.Args[2]
+ x := v_0.Args[0]
+ y := v_0.Args[1]
+ v.reset(OpSelect0)
+ v.Type = typ.UInt64
+ v0 := b.NewValue0(v.Pos, OpARM64ADCSflags, types.NewTuple(typ.UInt64, types.TypeFlags))
+ v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpARM64ADDSconstflags, types.NewTuple(typ.UInt64, types.TypeFlags))
+ v2.AuxInt = int64ToAuxInt(-1)
+ v2.AddArg(c)
+ v1.AddArg(v2)
+ v0.AddArg3(x, y, v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Select0 (Sub64borrow x y bo))
+ // result: (Select0 <typ.UInt64> (SBCSflags x y (Select1 <types.TypeFlags> (NEGSflags bo))))
+ for {
+ if v_0.Op != OpSub64borrow {
+ break
+ }
+ bo := v_0.Args[2]
+ x := v_0.Args[0]
+ y := v_0.Args[1]
+ v.reset(OpSelect0)
+ v.Type = typ.UInt64
+ v0 := b.NewValue0(v.Pos, OpARM64SBCSflags, types.NewTuple(typ.UInt64, types.TypeFlags))
+ v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpARM64NEGSflags, types.NewTuple(typ.UInt64, types.TypeFlags))
+ v2.AddArg(bo)
+ v1.AddArg(v2)
+ v0.AddArg3(x, y, v1)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpSelect1(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Select1 (Add64carry x y c))
+ // result: (ADCzerocarry <typ.UInt64> (Select1 <types.TypeFlags> (ADCSflags x y (Select1 <types.TypeFlags> (ADDSconstflags [-1] c)))))
+ for {
+ if v_0.Op != OpAdd64carry {
+ break
+ }
+ c := v_0.Args[2]
+ x := v_0.Args[0]
+ y := v_0.Args[1]
+ v.reset(OpARM64ADCzerocarry)
+ v.Type = typ.UInt64
+ v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpARM64ADCSflags, types.NewTuple(typ.UInt64, types.TypeFlags))
+ v2 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpARM64ADDSconstflags, types.NewTuple(typ.UInt64, types.TypeFlags))
+ v3.AuxInt = int64ToAuxInt(-1)
+ v3.AddArg(c)
+ v2.AddArg(v3)
+ v1.AddArg3(x, y, v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Select1 (Sub64borrow x y bo))
+ // result: (NEG <typ.UInt64> (NGCzerocarry <typ.UInt64> (Select1 <types.TypeFlags> (SBCSflags x y (Select1 <types.TypeFlags> (NEGSflags bo))))))
+ for {
+ if v_0.Op != OpSub64borrow {
+ break
+ }
+ bo := v_0.Args[2]
+ x := v_0.Args[0]
+ y := v_0.Args[1]
+ v.reset(OpARM64NEG)
+ v.Type = typ.UInt64
+ v0 := b.NewValue0(v.Pos, OpARM64NGCzerocarry, typ.UInt64)
+ v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpARM64SBCSflags, types.NewTuple(typ.UInt64, types.TypeFlags))
+ v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v4 := b.NewValue0(v.Pos, OpARM64NEGSflags, types.NewTuple(typ.UInt64, types.TypeFlags))
+ v4.AddArg(bo)
+ v3.AddArg(v4)
+ v2.AddArg3(x, y, v3)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpSelectN(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (SelectN [0] call:(CALLstatic {sym} s1:(MOVDstore _ (MOVDconst [sz]) s2:(MOVDstore _ src s3:(MOVDstore {t} _ dst mem)))))
+ // cond: sz >= 0 && isSameCall(sym, "runtime.memmove") && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && isInlinableMemmove(dst, src, sz, config) && clobber(s1, s2, s3, call)
+ // result: (Move [sz] dst src mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ call := v_0
+ if call.Op != OpARM64CALLstatic || len(call.Args) != 1 {
+ break
+ }
+ sym := auxToCall(call.Aux)
+ s1 := call.Args[0]
+ if s1.Op != OpARM64MOVDstore {
+ break
+ }
+ _ = s1.Args[2]
+ s1_1 := s1.Args[1]
+ if s1_1.Op != OpARM64MOVDconst {
+ break
+ }
+ sz := auxIntToInt64(s1_1.AuxInt)
+ s2 := s1.Args[2]
+ if s2.Op != OpARM64MOVDstore {
+ break
+ }
+ _ = s2.Args[2]
+ src := s2.Args[1]
+ s3 := s2.Args[2]
+ if s3.Op != OpARM64MOVDstore {
+ break
+ }
+ mem := s3.Args[2]
+ dst := s3.Args[1]
+ if !(sz >= 0 && isSameCall(sym, "runtime.memmove") && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && isInlinableMemmove(dst, src, sz, config) && clobber(s1, s2, s3, call)) {
+ break
+ }
+ v.reset(OpMove)
+ v.AuxInt = int64ToAuxInt(sz)
+ v.AddArg3(dst, src, mem)
+ return true
+ }
+ // match: (SelectN [0] call:(CALLstatic {sym} dst src (MOVDconst [sz]) mem))
+ // cond: sz >= 0 && isSameCall(sym, "runtime.memmove") && call.Uses == 1 && isInlinableMemmove(dst, src, sz, config) && clobber(call)
+ // result: (Move [sz] dst src mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ call := v_0
+ if call.Op != OpARM64CALLstatic || len(call.Args) != 4 {
+ break
+ }
+ sym := auxToCall(call.Aux)
+ mem := call.Args[3]
+ dst := call.Args[0]
+ src := call.Args[1]
+ call_2 := call.Args[2]
+ if call_2.Op != OpARM64MOVDconst {
+ break
+ }
+ sz := auxIntToInt64(call_2.AuxInt)
+ if !(sz >= 0 && isSameCall(sym, "runtime.memmove") && call.Uses == 1 && isInlinableMemmove(dst, src, sz, config) && clobber(call)) {
+ break
+ }
+ v.reset(OpMove)
+ v.AuxInt = int64ToAuxInt(sz)
+ v.AddArg3(dst, src, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpSlicemask(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Slicemask <t> x)
+ // result: (SRAconst (NEG <t> x) [63])
+ for {
+ t := v.Type
+ x := v_0
+ v.reset(OpARM64SRAconst)
+ v.AuxInt = int64ToAuxInt(63)
+ v0 := b.NewValue0(v.Pos, OpARM64NEG, t)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueARM64_OpStore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 1
+ // result: (MOVBstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 1) {
+ break
+ }
+ v.reset(OpARM64MOVBstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 2
+ // result: (MOVHstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 2) {
+ break
+ }
+ v.reset(OpARM64MOVHstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 4 && !is32BitFloat(val.Type)
+ // result: (MOVWstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 4 && !is32BitFloat(val.Type)) {
+ break
+ }
+ v.reset(OpARM64MOVWstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 8 && !is64BitFloat(val.Type)
+ // result: (MOVDstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 8 && !is64BitFloat(val.Type)) {
+ break
+ }
+ v.reset(OpARM64MOVDstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 4 && is32BitFloat(val.Type)
+ // result: (FMOVSstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 4 && is32BitFloat(val.Type)) {
+ break
+ }
+ v.reset(OpARM64FMOVSstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 8 && is64BitFloat(val.Type)
+ // result: (FMOVDstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 8 && is64BitFloat(val.Type)) {
+ break
+ }
+ v.reset(OpARM64FMOVDstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueARM64_OpZero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (Zero [0] _ mem)
+ // result: mem
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ mem := v_1
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Zero [1] ptr mem)
+ // result: (MOVBstore ptr (MOVDconst [0]) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 1 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpARM64MOVBstore)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (Zero [2] ptr mem)
+ // result: (MOVHstore ptr (MOVDconst [0]) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 2 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpARM64MOVHstore)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (Zero [4] ptr mem)
+ // result: (MOVWstore ptr (MOVDconst [0]) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpARM64MOVWstore)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (Zero [8] ptr mem)
+ // result: (MOVDstore ptr (MOVDconst [0]) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpARM64MOVDstore)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (Zero [3] ptr mem)
+ // result: (MOVBstore [2] ptr (MOVDconst [0]) (MOVHstore ptr (MOVDconst [0]) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 3 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpARM64MOVBstore)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpARM64MOVHstore, types.TypeMem)
+ v1.AddArg3(ptr, v0, mem)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [5] ptr mem)
+ // result: (MOVBstore [4] ptr (MOVDconst [0]) (MOVWstore ptr (MOVDconst [0]) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 5 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpARM64MOVBstore)
+ v.AuxInt = int32ToAuxInt(4)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpARM64MOVWstore, types.TypeMem)
+ v1.AddArg3(ptr, v0, mem)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [6] ptr mem)
+ // result: (MOVHstore [4] ptr (MOVDconst [0]) (MOVWstore ptr (MOVDconst [0]) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 6 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpARM64MOVHstore)
+ v.AuxInt = int32ToAuxInt(4)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpARM64MOVWstore, types.TypeMem)
+ v1.AddArg3(ptr, v0, mem)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [7] ptr mem)
+ // result: (MOVBstore [6] ptr (MOVDconst [0]) (MOVHstore [4] ptr (MOVDconst [0]) (MOVWstore ptr (MOVDconst [0]) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 7 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpARM64MOVBstore)
+ v.AuxInt = int32ToAuxInt(6)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpARM64MOVHstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(4)
+ v2 := b.NewValue0(v.Pos, OpARM64MOVWstore, types.TypeMem)
+ v2.AddArg3(ptr, v0, mem)
+ v1.AddArg3(ptr, v0, v2)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [9] ptr mem)
+ // result: (MOVBstore [8] ptr (MOVDconst [0]) (MOVDstore ptr (MOVDconst [0]) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 9 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpARM64MOVBstore)
+ v.AuxInt = int32ToAuxInt(8)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem)
+ v1.AddArg3(ptr, v0, mem)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [10] ptr mem)
+ // result: (MOVHstore [8] ptr (MOVDconst [0]) (MOVDstore ptr (MOVDconst [0]) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 10 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpARM64MOVHstore)
+ v.AuxInt = int32ToAuxInt(8)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem)
+ v1.AddArg3(ptr, v0, mem)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [11] ptr mem)
+ // result: (MOVBstore [10] ptr (MOVDconst [0]) (MOVHstore [8] ptr (MOVDconst [0]) (MOVDstore ptr (MOVDconst [0]) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 11 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpARM64MOVBstore)
+ v.AuxInt = int32ToAuxInt(10)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpARM64MOVHstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(8)
+ v2 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem)
+ v2.AddArg3(ptr, v0, mem)
+ v1.AddArg3(ptr, v0, v2)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [12] ptr mem)
+ // result: (MOVWstore [8] ptr (MOVDconst [0]) (MOVDstore ptr (MOVDconst [0]) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 12 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpARM64MOVWstore)
+ v.AuxInt = int32ToAuxInt(8)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem)
+ v1.AddArg3(ptr, v0, mem)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [13] ptr mem)
+ // result: (MOVBstore [12] ptr (MOVDconst [0]) (MOVWstore [8] ptr (MOVDconst [0]) (MOVDstore ptr (MOVDconst [0]) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 13 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpARM64MOVBstore)
+ v.AuxInt = int32ToAuxInt(12)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpARM64MOVWstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(8)
+ v2 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem)
+ v2.AddArg3(ptr, v0, mem)
+ v1.AddArg3(ptr, v0, v2)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [14] ptr mem)
+ // result: (MOVHstore [12] ptr (MOVDconst [0]) (MOVWstore [8] ptr (MOVDconst [0]) (MOVDstore ptr (MOVDconst [0]) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 14 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpARM64MOVHstore)
+ v.AuxInt = int32ToAuxInt(12)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpARM64MOVWstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(8)
+ v2 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem)
+ v2.AddArg3(ptr, v0, mem)
+ v1.AddArg3(ptr, v0, v2)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [15] ptr mem)
+ // result: (MOVBstore [14] ptr (MOVDconst [0]) (MOVHstore [12] ptr (MOVDconst [0]) (MOVWstore [8] ptr (MOVDconst [0]) (MOVDstore ptr (MOVDconst [0]) mem))))
+ for {
+ if auxIntToInt64(v.AuxInt) != 15 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpARM64MOVBstore)
+ v.AuxInt = int32ToAuxInt(14)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpARM64MOVHstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(12)
+ v2 := b.NewValue0(v.Pos, OpARM64MOVWstore, types.TypeMem)
+ v2.AuxInt = int32ToAuxInt(8)
+ v3 := b.NewValue0(v.Pos, OpARM64MOVDstore, types.TypeMem)
+ v3.AddArg3(ptr, v0, mem)
+ v2.AddArg3(ptr, v0, v3)
+ v1.AddArg3(ptr, v0, v2)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [16] ptr mem)
+ // result: (STP [0] ptr (MOVDconst [0]) (MOVDconst [0]) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 16 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpARM64STP)
+ v.AuxInt = int32ToAuxInt(0)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg4(ptr, v0, v0, mem)
+ return true
+ }
+ // match: (Zero [32] ptr mem)
+ // result: (STP [16] ptr (MOVDconst [0]) (MOVDconst [0]) (STP [0] ptr (MOVDconst [0]) (MOVDconst [0]) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 32 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpARM64STP)
+ v.AuxInt = int32ToAuxInt(16)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpARM64STP, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(0)
+ v1.AddArg4(ptr, v0, v0, mem)
+ v.AddArg4(ptr, v0, v0, v1)
+ return true
+ }
+ // match: (Zero [48] ptr mem)
+ // result: (STP [32] ptr (MOVDconst [0]) (MOVDconst [0]) (STP [16] ptr (MOVDconst [0]) (MOVDconst [0]) (STP [0] ptr (MOVDconst [0]) (MOVDconst [0]) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 48 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpARM64STP)
+ v.AuxInt = int32ToAuxInt(32)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpARM64STP, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(16)
+ v2 := b.NewValue0(v.Pos, OpARM64STP, types.TypeMem)
+ v2.AuxInt = int32ToAuxInt(0)
+ v2.AddArg4(ptr, v0, v0, mem)
+ v1.AddArg4(ptr, v0, v0, v2)
+ v.AddArg4(ptr, v0, v0, v1)
+ return true
+ }
+ // match: (Zero [64] ptr mem)
+ // result: (STP [48] ptr (MOVDconst [0]) (MOVDconst [0]) (STP [32] ptr (MOVDconst [0]) (MOVDconst [0]) (STP [16] ptr (MOVDconst [0]) (MOVDconst [0]) (STP [0] ptr (MOVDconst [0]) (MOVDconst [0]) mem))))
+ for {
+ if auxIntToInt64(v.AuxInt) != 64 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpARM64STP)
+ v.AuxInt = int32ToAuxInt(48)
+ v0 := b.NewValue0(v.Pos, OpARM64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpARM64STP, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(32)
+ v2 := b.NewValue0(v.Pos, OpARM64STP, types.TypeMem)
+ v2.AuxInt = int32ToAuxInt(16)
+ v3 := b.NewValue0(v.Pos, OpARM64STP, types.TypeMem)
+ v3.AuxInt = int32ToAuxInt(0)
+ v3.AddArg4(ptr, v0, v0, mem)
+ v2.AddArg4(ptr, v0, v0, v3)
+ v1.AddArg4(ptr, v0, v0, v2)
+ v.AddArg4(ptr, v0, v0, v1)
+ return true
+ }
+ // match: (Zero [s] ptr mem)
+ // cond: s%16 != 0 && s%16 <= 8 && s > 16
+ // result: (Zero [8] (OffPtr <ptr.Type> ptr [s-8]) (Zero [s-s%16] ptr mem))
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ ptr := v_0
+ mem := v_1
+ if !(s%16 != 0 && s%16 <= 8 && s > 16) {
+ break
+ }
+ v.reset(OpZero)
+ v.AuxInt = int64ToAuxInt(8)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, ptr.Type)
+ v0.AuxInt = int64ToAuxInt(s - 8)
+ v0.AddArg(ptr)
+ v1 := b.NewValue0(v.Pos, OpZero, types.TypeMem)
+ v1.AuxInt = int64ToAuxInt(s - s%16)
+ v1.AddArg2(ptr, mem)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Zero [s] ptr mem)
+ // cond: s%16 != 0 && s%16 > 8 && s > 16
+ // result: (Zero [16] (OffPtr <ptr.Type> ptr [s-16]) (Zero [s-s%16] ptr mem))
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ ptr := v_0
+ mem := v_1
+ if !(s%16 != 0 && s%16 > 8 && s > 16) {
+ break
+ }
+ v.reset(OpZero)
+ v.AuxInt = int64ToAuxInt(16)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, ptr.Type)
+ v0.AuxInt = int64ToAuxInt(s - 16)
+ v0.AddArg(ptr)
+ v1 := b.NewValue0(v.Pos, OpZero, types.TypeMem)
+ v1.AuxInt = int64ToAuxInt(s - s%16)
+ v1.AddArg2(ptr, mem)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Zero [s] ptr mem)
+ // cond: s%16 == 0 && s > 64 && s <= 16*64 && !config.noDuffDevice
+ // result: (DUFFZERO [4 * (64 - s/16)] ptr mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ ptr := v_0
+ mem := v_1
+ if !(s%16 == 0 && s > 64 && s <= 16*64 && !config.noDuffDevice) {
+ break
+ }
+ v.reset(OpARM64DUFFZERO)
+ v.AuxInt = int64ToAuxInt(4 * (64 - s/16))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Zero [s] ptr mem)
+ // cond: s%16 == 0 && (s > 16*64 || config.noDuffDevice)
+ // result: (LoweredZero ptr (ADDconst <ptr.Type> [s-16] ptr) mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ ptr := v_0
+ mem := v_1
+ if !(s%16 == 0 && (s > 16*64 || config.noDuffDevice)) {
+ break
+ }
+ v.reset(OpARM64LoweredZero)
+ v0 := b.NewValue0(v.Pos, OpARM64ADDconst, ptr.Type)
+ v0.AuxInt = int64ToAuxInt(s - 16)
+ v0.AddArg(ptr)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ return false
+}
+func rewriteBlockARM64(b *Block) bool {
+ switch b.Kind {
+ case BlockARM64EQ:
+ // match: (EQ (CMPWconst [0] x:(ANDconst [c] y)) yes no)
+ // cond: x.Uses == 1
+ // result: (EQ (TSTWconst [int32(c)] y) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpARM64ANDconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(x.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64TSTWconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(int32(c))
+ v0.AddArg(y)
+ b.resetWithControl(BlockARM64EQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] z:(AND x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (EQ (TST x y) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64AND {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64TST, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARM64EQ, v0)
+ return true
+ }
+ break
+ }
+ // match: (EQ (CMPWconst [0] z:(AND x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (EQ (TSTW x y) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64AND {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64TSTW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARM64EQ, v0)
+ return true
+ }
+ break
+ }
+ // match: (EQ (CMPconst [0] x:(ANDconst [c] y)) yes no)
+ // cond: x.Uses == 1
+ // result: (EQ (TSTconst [c] y) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpARM64ANDconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(x.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64TSTconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(y)
+ b.resetWithControl(BlockARM64EQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] x:(ADDconst [c] y)) yes no)
+ // cond: x.Uses == 1
+ // result: (EQ (CMNconst [c] y) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpARM64ADDconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(x.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMNconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(y)
+ b.resetWithControl(BlockARM64EQ, v0)
+ return true
+ }
+ // match: (EQ (CMPWconst [0] x:(ADDconst [c] y)) yes no)
+ // cond: x.Uses == 1
+ // result: (EQ (CMNWconst [int32(c)] y) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpARM64ADDconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(x.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMNWconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(int32(c))
+ v0.AddArg(y)
+ b.resetWithControl(BlockARM64EQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] z:(ADD x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (EQ (CMN x y) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64ADD {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARM64EQ, v0)
+ return true
+ }
+ break
+ }
+ // match: (EQ (CMPWconst [0] z:(ADD x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (EQ (CMNW x y) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64ADD {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARM64EQ, v0)
+ return true
+ }
+ break
+ }
+ // match: (EQ (CMP x z:(NEG y)) yes no)
+ // cond: z.Uses == 1
+ // result: (EQ (CMN x y) yes no)
+ for b.Controls[0].Op == OpARM64CMP {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ z := v_0.Args[1]
+ if z.Op != OpARM64NEG {
+ break
+ }
+ y := z.Args[0]
+ if !(z.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARM64EQ, v0)
+ return true
+ }
+ // match: (EQ (CMPW x z:(NEG y)) yes no)
+ // cond: z.Uses == 1
+ // result: (EQ (CMNW x y) yes no)
+ for b.Controls[0].Op == OpARM64CMPW {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ z := v_0.Args[1]
+ if z.Op != OpARM64NEG {
+ break
+ }
+ y := z.Args[0]
+ if !(z.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARM64EQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] x) yes no)
+ // result: (Z x yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ b.resetWithControl(BlockARM64Z, x)
+ return true
+ }
+ // match: (EQ (CMPWconst [0] x) yes no)
+ // result: (ZW x yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ b.resetWithControl(BlockARM64ZW, x)
+ return true
+ }
+ // match: (EQ (CMPconst [0] z:(MADD a x y)) yes no)
+ // cond: z.Uses==1
+ // result: (EQ (CMN a (MUL <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64MADD {
+ break
+ }
+ y := z.Args[2]
+ a := z.Args[0]
+ x := z.Args[1]
+ if !(z.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARM64MUL, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARM64EQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] z:(MSUB a x y)) yes no)
+ // cond: z.Uses==1
+ // result: (EQ (CMP a (MUL <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64MSUB {
+ break
+ }
+ y := z.Args[2]
+ a := z.Args[0]
+ x := z.Args[1]
+ if !(z.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMP, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARM64MUL, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARM64EQ, v0)
+ return true
+ }
+ // match: (EQ (CMPWconst [0] z:(MADDW a x y)) yes no)
+ // cond: z.Uses==1
+ // result: (EQ (CMNW a (MULW <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64MADDW {
+ break
+ }
+ y := z.Args[2]
+ a := z.Args[0]
+ x := z.Args[1]
+ if !(z.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARM64MULW, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARM64EQ, v0)
+ return true
+ }
+ // match: (EQ (CMPWconst [0] z:(MSUBW a x y)) yes no)
+ // cond: z.Uses==1
+ // result: (EQ (CMPW a (MULW <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64MSUBW {
+ break
+ }
+ y := z.Args[2]
+ a := z.Args[0]
+ x := z.Args[1]
+ if !(z.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARM64MULW, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARM64EQ, v0)
+ return true
+ }
+ // match: (EQ (TSTconst [c] x) yes no)
+ // cond: oneBit(c)
+ // result: (TBZ [int64(ntz64(c))] x yes no)
+ for b.Controls[0].Op == OpARM64TSTconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(oneBit(c)) {
+ break
+ }
+ b.resetWithControl(BlockARM64TBZ, x)
+ b.AuxInt = int64ToAuxInt(int64(ntz64(c)))
+ return true
+ }
+ // match: (EQ (TSTWconst [c] x) yes no)
+ // cond: oneBit(int64(uint32(c)))
+ // result: (TBZ [int64(ntz64(int64(uint32(c))))] x yes no)
+ for b.Controls[0].Op == OpARM64TSTWconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(oneBit(int64(uint32(c)))) {
+ break
+ }
+ b.resetWithControl(BlockARM64TBZ, x)
+ b.AuxInt = int64ToAuxInt(int64(ntz64(int64(uint32(c)))))
+ return true
+ }
+ // match: (EQ (FlagConstant [fc]) yes no)
+ // cond: fc.eq()
+ // result: (First yes no)
+ for b.Controls[0].Op == OpARM64FlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(fc.eq()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (EQ (FlagConstant [fc]) yes no)
+ // cond: !fc.eq()
+ // result: (First no yes)
+ for b.Controls[0].Op == OpARM64FlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(!fc.eq()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (EQ (InvertFlags cmp) yes no)
+ // result: (EQ cmp yes no)
+ for b.Controls[0].Op == OpARM64InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockARM64EQ, cmp)
+ return true
+ }
+ case BlockARM64FGE:
+ // match: (FGE (InvertFlags cmp) yes no)
+ // result: (FLE cmp yes no)
+ for b.Controls[0].Op == OpARM64InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockARM64FLE, cmp)
+ return true
+ }
+ case BlockARM64FGT:
+ // match: (FGT (InvertFlags cmp) yes no)
+ // result: (FLT cmp yes no)
+ for b.Controls[0].Op == OpARM64InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockARM64FLT, cmp)
+ return true
+ }
+ case BlockARM64FLE:
+ // match: (FLE (InvertFlags cmp) yes no)
+ // result: (FGE cmp yes no)
+ for b.Controls[0].Op == OpARM64InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockARM64FGE, cmp)
+ return true
+ }
+ case BlockARM64FLT:
+ // match: (FLT (InvertFlags cmp) yes no)
+ // result: (FGT cmp yes no)
+ for b.Controls[0].Op == OpARM64InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockARM64FGT, cmp)
+ return true
+ }
+ case BlockARM64GE:
+ // match: (GE (CMPWconst [0] x:(ANDconst [c] y)) yes no)
+ // cond: x.Uses == 1
+ // result: (GE (TSTWconst [int32(c)] y) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpARM64ANDconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(x.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64TSTWconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(int32(c))
+ v0.AddArg(y)
+ b.resetWithControl(BlockARM64GE, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] z:(AND x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (GE (TST x y) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64AND {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64TST, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARM64GE, v0)
+ return true
+ }
+ break
+ }
+ // match: (GE (CMPWconst [0] z:(AND x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (GE (TSTW x y) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64AND {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64TSTW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARM64GE, v0)
+ return true
+ }
+ break
+ }
+ // match: (GE (CMPconst [0] x:(ANDconst [c] y)) yes no)
+ // cond: x.Uses == 1
+ // result: (GE (TSTconst [c] y) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpARM64ANDconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(x.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64TSTconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(y)
+ b.resetWithControl(BlockARM64GE, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] x:(ADDconst [c] y)) yes no)
+ // cond: x.Uses == 1
+ // result: (GEnoov (CMNconst [c] y) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpARM64ADDconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(x.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMNconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(y)
+ b.resetWithControl(BlockARM64GEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPWconst [0] x:(ADDconst [c] y)) yes no)
+ // cond: x.Uses == 1
+ // result: (GEnoov (CMNWconst [int32(c)] y) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpARM64ADDconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(x.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMNWconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(int32(c))
+ v0.AddArg(y)
+ b.resetWithControl(BlockARM64GEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] z:(ADD x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (GEnoov (CMN x y) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64ADD {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARM64GEnoov, v0)
+ return true
+ }
+ break
+ }
+ // match: (GE (CMPWconst [0] z:(ADD x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (GEnoov (CMNW x y) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64ADD {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARM64GEnoov, v0)
+ return true
+ }
+ break
+ }
+ // match: (GE (CMPconst [0] z:(MADD a x y)) yes no)
+ // cond: z.Uses==1
+ // result: (GEnoov (CMN a (MUL <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64MADD {
+ break
+ }
+ y := z.Args[2]
+ a := z.Args[0]
+ x := z.Args[1]
+ if !(z.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARM64MUL, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARM64GEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] z:(MSUB a x y)) yes no)
+ // cond: z.Uses==1
+ // result: (GEnoov (CMP a (MUL <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64MSUB {
+ break
+ }
+ y := z.Args[2]
+ a := z.Args[0]
+ x := z.Args[1]
+ if !(z.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMP, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARM64MUL, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARM64GEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPWconst [0] z:(MADDW a x y)) yes no)
+ // cond: z.Uses==1
+ // result: (GEnoov (CMNW a (MULW <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64MADDW {
+ break
+ }
+ y := z.Args[2]
+ a := z.Args[0]
+ x := z.Args[1]
+ if !(z.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARM64MULW, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARM64GEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPWconst [0] z:(MSUBW a x y)) yes no)
+ // cond: z.Uses==1
+ // result: (GEnoov (CMPW a (MULW <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64MSUBW {
+ break
+ }
+ y := z.Args[2]
+ a := z.Args[0]
+ x := z.Args[1]
+ if !(z.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARM64MULW, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARM64GEnoov, v0)
+ return true
+ }
+ // match: (GE (CMPWconst [0] x) yes no)
+ // result: (TBZ [31] x yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ b.resetWithControl(BlockARM64TBZ, x)
+ b.AuxInt = int64ToAuxInt(31)
+ return true
+ }
+ // match: (GE (CMPconst [0] x) yes no)
+ // result: (TBZ [63] x yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ b.resetWithControl(BlockARM64TBZ, x)
+ b.AuxInt = int64ToAuxInt(63)
+ return true
+ }
+ // match: (GE (FlagConstant [fc]) yes no)
+ // cond: fc.ge()
+ // result: (First yes no)
+ for b.Controls[0].Op == OpARM64FlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(fc.ge()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (GE (FlagConstant [fc]) yes no)
+ // cond: !fc.ge()
+ // result: (First no yes)
+ for b.Controls[0].Op == OpARM64FlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(!fc.ge()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (GE (InvertFlags cmp) yes no)
+ // result: (LE cmp yes no)
+ for b.Controls[0].Op == OpARM64InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockARM64LE, cmp)
+ return true
+ }
+ case BlockARM64GEnoov:
+ // match: (GEnoov (FlagConstant [fc]) yes no)
+ // cond: fc.geNoov()
+ // result: (First yes no)
+ for b.Controls[0].Op == OpARM64FlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(fc.geNoov()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (GEnoov (FlagConstant [fc]) yes no)
+ // cond: !fc.geNoov()
+ // result: (First no yes)
+ for b.Controls[0].Op == OpARM64FlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(!fc.geNoov()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (GEnoov (InvertFlags cmp) yes no)
+ // result: (LEnoov cmp yes no)
+ for b.Controls[0].Op == OpARM64InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockARM64LEnoov, cmp)
+ return true
+ }
+ case BlockARM64GT:
+ // match: (GT (CMPWconst [0] x:(ANDconst [c] y)) yes no)
+ // cond: x.Uses == 1
+ // result: (GT (TSTWconst [int32(c)] y) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpARM64ANDconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(x.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64TSTWconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(int32(c))
+ v0.AddArg(y)
+ b.resetWithControl(BlockARM64GT, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] z:(AND x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (GT (TST x y) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64AND {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64TST, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARM64GT, v0)
+ return true
+ }
+ break
+ }
+ // match: (GT (CMPWconst [0] z:(AND x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (GT (TSTW x y) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64AND {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64TSTW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARM64GT, v0)
+ return true
+ }
+ break
+ }
+ // match: (GT (CMPconst [0] x:(ANDconst [c] y)) yes no)
+ // cond: x.Uses == 1
+ // result: (GT (TSTconst [c] y) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpARM64ANDconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(x.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64TSTconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(y)
+ b.resetWithControl(BlockARM64GT, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] x:(ADDconst [c] y)) yes no)
+ // cond: x.Uses == 1
+ // result: (GTnoov (CMNconst [c] y) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpARM64ADDconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(x.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMNconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(y)
+ b.resetWithControl(BlockARM64GTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPWconst [0] x:(ADDconst [c] y)) yes no)
+ // cond: x.Uses == 1
+ // result: (GTnoov (CMNWconst [int32(c)] y) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpARM64ADDconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(x.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMNWconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(int32(c))
+ v0.AddArg(y)
+ b.resetWithControl(BlockARM64GTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] z:(ADD x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (GTnoov (CMN x y) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64ADD {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARM64GTnoov, v0)
+ return true
+ }
+ break
+ }
+ // match: (GT (CMPWconst [0] z:(ADD x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (GTnoov (CMNW x y) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64ADD {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARM64GTnoov, v0)
+ return true
+ }
+ break
+ }
+ // match: (GT (CMPconst [0] z:(MADD a x y)) yes no)
+ // cond: z.Uses==1
+ // result: (GTnoov (CMN a (MUL <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64MADD {
+ break
+ }
+ y := z.Args[2]
+ a := z.Args[0]
+ x := z.Args[1]
+ if !(z.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARM64MUL, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARM64GTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] z:(MSUB a x y)) yes no)
+ // cond: z.Uses==1
+ // result: (GTnoov (CMP a (MUL <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64MSUB {
+ break
+ }
+ y := z.Args[2]
+ a := z.Args[0]
+ x := z.Args[1]
+ if !(z.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMP, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARM64MUL, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARM64GTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPWconst [0] z:(MADDW a x y)) yes no)
+ // cond: z.Uses==1
+ // result: (GTnoov (CMNW a (MULW <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64MADDW {
+ break
+ }
+ y := z.Args[2]
+ a := z.Args[0]
+ x := z.Args[1]
+ if !(z.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARM64MULW, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARM64GTnoov, v0)
+ return true
+ }
+ // match: (GT (CMPWconst [0] z:(MSUBW a x y)) yes no)
+ // cond: z.Uses==1
+ // result: (GTnoov (CMPW a (MULW <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64MSUBW {
+ break
+ }
+ y := z.Args[2]
+ a := z.Args[0]
+ x := z.Args[1]
+ if !(z.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARM64MULW, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARM64GTnoov, v0)
+ return true
+ }
+ // match: (GT (FlagConstant [fc]) yes no)
+ // cond: fc.gt()
+ // result: (First yes no)
+ for b.Controls[0].Op == OpARM64FlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(fc.gt()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (GT (FlagConstant [fc]) yes no)
+ // cond: !fc.gt()
+ // result: (First no yes)
+ for b.Controls[0].Op == OpARM64FlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(!fc.gt()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (GT (InvertFlags cmp) yes no)
+ // result: (LT cmp yes no)
+ for b.Controls[0].Op == OpARM64InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockARM64LT, cmp)
+ return true
+ }
+ case BlockARM64GTnoov:
+ // match: (GTnoov (FlagConstant [fc]) yes no)
+ // cond: fc.gtNoov()
+ // result: (First yes no)
+ for b.Controls[0].Op == OpARM64FlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(fc.gtNoov()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (GTnoov (FlagConstant [fc]) yes no)
+ // cond: !fc.gtNoov()
+ // result: (First no yes)
+ for b.Controls[0].Op == OpARM64FlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(!fc.gtNoov()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (GTnoov (InvertFlags cmp) yes no)
+ // result: (LTnoov cmp yes no)
+ for b.Controls[0].Op == OpARM64InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockARM64LTnoov, cmp)
+ return true
+ }
+ case BlockIf:
+ // match: (If (Equal cc) yes no)
+ // result: (EQ cc yes no)
+ for b.Controls[0].Op == OpARM64Equal {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARM64EQ, cc)
+ return true
+ }
+ // match: (If (NotEqual cc) yes no)
+ // result: (NE cc yes no)
+ for b.Controls[0].Op == OpARM64NotEqual {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARM64NE, cc)
+ return true
+ }
+ // match: (If (LessThan cc) yes no)
+ // result: (LT cc yes no)
+ for b.Controls[0].Op == OpARM64LessThan {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARM64LT, cc)
+ return true
+ }
+ // match: (If (LessThanU cc) yes no)
+ // result: (ULT cc yes no)
+ for b.Controls[0].Op == OpARM64LessThanU {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARM64ULT, cc)
+ return true
+ }
+ // match: (If (LessEqual cc) yes no)
+ // result: (LE cc yes no)
+ for b.Controls[0].Op == OpARM64LessEqual {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARM64LE, cc)
+ return true
+ }
+ // match: (If (LessEqualU cc) yes no)
+ // result: (ULE cc yes no)
+ for b.Controls[0].Op == OpARM64LessEqualU {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARM64ULE, cc)
+ return true
+ }
+ // match: (If (GreaterThan cc) yes no)
+ // result: (GT cc yes no)
+ for b.Controls[0].Op == OpARM64GreaterThan {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARM64GT, cc)
+ return true
+ }
+ // match: (If (GreaterThanU cc) yes no)
+ // result: (UGT cc yes no)
+ for b.Controls[0].Op == OpARM64GreaterThanU {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARM64UGT, cc)
+ return true
+ }
+ // match: (If (GreaterEqual cc) yes no)
+ // result: (GE cc yes no)
+ for b.Controls[0].Op == OpARM64GreaterEqual {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARM64GE, cc)
+ return true
+ }
+ // match: (If (GreaterEqualU cc) yes no)
+ // result: (UGE cc yes no)
+ for b.Controls[0].Op == OpARM64GreaterEqualU {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARM64UGE, cc)
+ return true
+ }
+ // match: (If (LessThanF cc) yes no)
+ // result: (FLT cc yes no)
+ for b.Controls[0].Op == OpARM64LessThanF {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARM64FLT, cc)
+ return true
+ }
+ // match: (If (LessEqualF cc) yes no)
+ // result: (FLE cc yes no)
+ for b.Controls[0].Op == OpARM64LessEqualF {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARM64FLE, cc)
+ return true
+ }
+ // match: (If (GreaterThanF cc) yes no)
+ // result: (FGT cc yes no)
+ for b.Controls[0].Op == OpARM64GreaterThanF {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARM64FGT, cc)
+ return true
+ }
+ // match: (If (GreaterEqualF cc) yes no)
+ // result: (FGE cc yes no)
+ for b.Controls[0].Op == OpARM64GreaterEqualF {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARM64FGE, cc)
+ return true
+ }
+ // match: (If cond yes no)
+ // result: (TBNZ [0] cond yes no)
+ for {
+ cond := b.Controls[0]
+ b.resetWithControl(BlockARM64TBNZ, cond)
+ b.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ case BlockARM64LE:
+ // match: (LE (CMPWconst [0] x:(ANDconst [c] y)) yes no)
+ // cond: x.Uses == 1
+ // result: (LE (TSTWconst [int32(c)] y) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpARM64ANDconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(x.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64TSTWconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(int32(c))
+ v0.AddArg(y)
+ b.resetWithControl(BlockARM64LE, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] z:(AND x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (LE (TST x y) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64AND {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64TST, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARM64LE, v0)
+ return true
+ }
+ break
+ }
+ // match: (LE (CMPWconst [0] z:(AND x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (LE (TSTW x y) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64AND {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64TSTW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARM64LE, v0)
+ return true
+ }
+ break
+ }
+ // match: (LE (CMPconst [0] x:(ANDconst [c] y)) yes no)
+ // cond: x.Uses == 1
+ // result: (LE (TSTconst [c] y) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpARM64ANDconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(x.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64TSTconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(y)
+ b.resetWithControl(BlockARM64LE, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] x:(ADDconst [c] y)) yes no)
+ // cond: x.Uses == 1
+ // result: (LEnoov (CMNconst [c] y) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpARM64ADDconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(x.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMNconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(y)
+ b.resetWithControl(BlockARM64LEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPWconst [0] x:(ADDconst [c] y)) yes no)
+ // cond: x.Uses == 1
+ // result: (LEnoov (CMNWconst [int32(c)] y) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpARM64ADDconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(x.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMNWconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(int32(c))
+ v0.AddArg(y)
+ b.resetWithControl(BlockARM64LEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] z:(ADD x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (LEnoov (CMN x y) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64ADD {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARM64LEnoov, v0)
+ return true
+ }
+ break
+ }
+ // match: (LE (CMPWconst [0] z:(ADD x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (LEnoov (CMNW x y) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64ADD {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARM64LEnoov, v0)
+ return true
+ }
+ break
+ }
+ // match: (LE (CMPconst [0] z:(MADD a x y)) yes no)
+ // cond: z.Uses==1
+ // result: (LEnoov (CMN a (MUL <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64MADD {
+ break
+ }
+ y := z.Args[2]
+ a := z.Args[0]
+ x := z.Args[1]
+ if !(z.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARM64MUL, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARM64LEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] z:(MSUB a x y)) yes no)
+ // cond: z.Uses==1
+ // result: (LEnoov (CMP a (MUL <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64MSUB {
+ break
+ }
+ y := z.Args[2]
+ a := z.Args[0]
+ x := z.Args[1]
+ if !(z.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMP, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARM64MUL, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARM64LEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPWconst [0] z:(MADDW a x y)) yes no)
+ // cond: z.Uses==1
+ // result: (LEnoov (CMNW a (MULW <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64MADDW {
+ break
+ }
+ y := z.Args[2]
+ a := z.Args[0]
+ x := z.Args[1]
+ if !(z.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARM64MULW, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARM64LEnoov, v0)
+ return true
+ }
+ // match: (LE (CMPWconst [0] z:(MSUBW a x y)) yes no)
+ // cond: z.Uses==1
+ // result: (LEnoov (CMPW a (MULW <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64MSUBW {
+ break
+ }
+ y := z.Args[2]
+ a := z.Args[0]
+ x := z.Args[1]
+ if !(z.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARM64MULW, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARM64LEnoov, v0)
+ return true
+ }
+ // match: (LE (FlagConstant [fc]) yes no)
+ // cond: fc.le()
+ // result: (First yes no)
+ for b.Controls[0].Op == OpARM64FlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(fc.le()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (LE (FlagConstant [fc]) yes no)
+ // cond: !fc.le()
+ // result: (First no yes)
+ for b.Controls[0].Op == OpARM64FlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(!fc.le()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (LE (InvertFlags cmp) yes no)
+ // result: (GE cmp yes no)
+ for b.Controls[0].Op == OpARM64InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockARM64GE, cmp)
+ return true
+ }
+ case BlockARM64LEnoov:
+ // match: (LEnoov (FlagConstant [fc]) yes no)
+ // cond: fc.leNoov()
+ // result: (First yes no)
+ for b.Controls[0].Op == OpARM64FlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(fc.leNoov()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (LEnoov (FlagConstant [fc]) yes no)
+ // cond: !fc.leNoov()
+ // result: (First no yes)
+ for b.Controls[0].Op == OpARM64FlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(!fc.leNoov()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (LEnoov (InvertFlags cmp) yes no)
+ // result: (GEnoov cmp yes no)
+ for b.Controls[0].Op == OpARM64InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockARM64GEnoov, cmp)
+ return true
+ }
+ case BlockARM64LT:
+ // match: (LT (CMPWconst [0] x:(ANDconst [c] y)) yes no)
+ // cond: x.Uses == 1
+ // result: (LT (TSTWconst [int32(c)] y) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpARM64ANDconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(x.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64TSTWconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(int32(c))
+ v0.AddArg(y)
+ b.resetWithControl(BlockARM64LT, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] z:(AND x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (LT (TST x y) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64AND {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64TST, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARM64LT, v0)
+ return true
+ }
+ break
+ }
+ // match: (LT (CMPWconst [0] z:(AND x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (LT (TSTW x y) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64AND {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64TSTW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARM64LT, v0)
+ return true
+ }
+ break
+ }
+ // match: (LT (CMPconst [0] x:(ANDconst [c] y)) yes no)
+ // cond: x.Uses == 1
+ // result: (LT (TSTconst [c] y) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpARM64ANDconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(x.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64TSTconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(y)
+ b.resetWithControl(BlockARM64LT, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] x:(ADDconst [c] y)) yes no)
+ // cond: x.Uses == 1
+ // result: (LTnoov (CMNconst [c] y) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpARM64ADDconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(x.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMNconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(y)
+ b.resetWithControl(BlockARM64LTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPWconst [0] x:(ADDconst [c] y)) yes no)
+ // cond: x.Uses == 1
+ // result: (LTnoov (CMNWconst [int32(c)] y) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpARM64ADDconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(x.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMNWconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(int32(c))
+ v0.AddArg(y)
+ b.resetWithControl(BlockARM64LTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] z:(ADD x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (LTnoov (CMN x y) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64ADD {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARM64LTnoov, v0)
+ return true
+ }
+ break
+ }
+ // match: (LT (CMPWconst [0] z:(ADD x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (LTnoov (CMNW x y) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64ADD {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARM64LTnoov, v0)
+ return true
+ }
+ break
+ }
+ // match: (LT (CMPconst [0] z:(MADD a x y)) yes no)
+ // cond: z.Uses==1
+ // result: (LTnoov (CMN a (MUL <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64MADD {
+ break
+ }
+ y := z.Args[2]
+ a := z.Args[0]
+ x := z.Args[1]
+ if !(z.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARM64MUL, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARM64LTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] z:(MSUB a x y)) yes no)
+ // cond: z.Uses==1
+ // result: (LTnoov (CMP a (MUL <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64MSUB {
+ break
+ }
+ y := z.Args[2]
+ a := z.Args[0]
+ x := z.Args[1]
+ if !(z.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMP, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARM64MUL, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARM64LTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPWconst [0] z:(MADDW a x y)) yes no)
+ // cond: z.Uses==1
+ // result: (LTnoov (CMNW a (MULW <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64MADDW {
+ break
+ }
+ y := z.Args[2]
+ a := z.Args[0]
+ x := z.Args[1]
+ if !(z.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARM64MULW, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARM64LTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPWconst [0] z:(MSUBW a x y)) yes no)
+ // cond: z.Uses==1
+ // result: (LTnoov (CMPW a (MULW <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64MSUBW {
+ break
+ }
+ y := z.Args[2]
+ a := z.Args[0]
+ x := z.Args[1]
+ if !(z.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARM64MULW, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARM64LTnoov, v0)
+ return true
+ }
+ // match: (LT (CMPWconst [0] x) yes no)
+ // result: (TBNZ [31] x yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ b.resetWithControl(BlockARM64TBNZ, x)
+ b.AuxInt = int64ToAuxInt(31)
+ return true
+ }
+ // match: (LT (CMPconst [0] x) yes no)
+ // result: (TBNZ [63] x yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ b.resetWithControl(BlockARM64TBNZ, x)
+ b.AuxInt = int64ToAuxInt(63)
+ return true
+ }
+ // match: (LT (FlagConstant [fc]) yes no)
+ // cond: fc.lt()
+ // result: (First yes no)
+ for b.Controls[0].Op == OpARM64FlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(fc.lt()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (LT (FlagConstant [fc]) yes no)
+ // cond: !fc.lt()
+ // result: (First no yes)
+ for b.Controls[0].Op == OpARM64FlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(!fc.lt()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (LT (InvertFlags cmp) yes no)
+ // result: (GT cmp yes no)
+ for b.Controls[0].Op == OpARM64InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockARM64GT, cmp)
+ return true
+ }
+ case BlockARM64LTnoov:
+ // match: (LTnoov (FlagConstant [fc]) yes no)
+ // cond: fc.ltNoov()
+ // result: (First yes no)
+ for b.Controls[0].Op == OpARM64FlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(fc.ltNoov()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (LTnoov (FlagConstant [fc]) yes no)
+ // cond: !fc.ltNoov()
+ // result: (First no yes)
+ for b.Controls[0].Op == OpARM64FlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(!fc.ltNoov()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (LTnoov (InvertFlags cmp) yes no)
+ // result: (GTnoov cmp yes no)
+ for b.Controls[0].Op == OpARM64InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockARM64GTnoov, cmp)
+ return true
+ }
+ case BlockARM64NE:
+ // match: (NE (CMPWconst [0] x:(ANDconst [c] y)) yes no)
+ // cond: x.Uses == 1
+ // result: (NE (TSTWconst [int32(c)] y) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpARM64ANDconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(x.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64TSTWconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(int32(c))
+ v0.AddArg(y)
+ b.resetWithControl(BlockARM64NE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] z:(AND x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (NE (TST x y) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64AND {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64TST, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARM64NE, v0)
+ return true
+ }
+ break
+ }
+ // match: (NE (CMPWconst [0] z:(AND x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (NE (TSTW x y) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64AND {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64TSTW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARM64NE, v0)
+ return true
+ }
+ break
+ }
+ // match: (NE (CMPconst [0] x:(ANDconst [c] y)) yes no)
+ // cond: x.Uses == 1
+ // result: (NE (TSTconst [c] y) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpARM64ANDconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(x.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64TSTconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(y)
+ b.resetWithControl(BlockARM64NE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] x:(ADDconst [c] y)) yes no)
+ // cond: x.Uses == 1
+ // result: (NE (CMNconst [c] y) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpARM64ADDconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(x.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMNconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(y)
+ b.resetWithControl(BlockARM64NE, v0)
+ return true
+ }
+ // match: (NE (CMPWconst [0] x:(ADDconst [c] y)) yes no)
+ // cond: x.Uses == 1
+ // result: (NE (CMNWconst [int32(c)] y) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpARM64ADDconst {
+ break
+ }
+ c := auxIntToInt64(x.AuxInt)
+ y := x.Args[0]
+ if !(x.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMNWconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(int32(c))
+ v0.AddArg(y)
+ b.resetWithControl(BlockARM64NE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] z:(ADD x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (NE (CMN x y) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64ADD {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARM64NE, v0)
+ return true
+ }
+ break
+ }
+ // match: (NE (CMPWconst [0] z:(ADD x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (NE (CMNW x y) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64ADD {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARM64NE, v0)
+ return true
+ }
+ break
+ }
+ // match: (NE (CMP x z:(NEG y)) yes no)
+ // cond: z.Uses == 1
+ // result: (NE (CMN x y) yes no)
+ for b.Controls[0].Op == OpARM64CMP {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ z := v_0.Args[1]
+ if z.Op != OpARM64NEG {
+ break
+ }
+ y := z.Args[0]
+ if !(z.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARM64NE, v0)
+ return true
+ }
+ // match: (NE (CMPW x z:(NEG y)) yes no)
+ // cond: z.Uses == 1
+ // result: (NE (CMNW x y) yes no)
+ for b.Controls[0].Op == OpARM64CMPW {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ z := v_0.Args[1]
+ if z.Op != OpARM64NEG {
+ break
+ }
+ y := z.Args[0]
+ if !(z.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockARM64NE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] x) yes no)
+ // result: (NZ x yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ b.resetWithControl(BlockARM64NZ, x)
+ return true
+ }
+ // match: (NE (CMPWconst [0] x) yes no)
+ // result: (NZW x yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ b.resetWithControl(BlockARM64NZW, x)
+ return true
+ }
+ // match: (NE (CMPconst [0] z:(MADD a x y)) yes no)
+ // cond: z.Uses==1
+ // result: (NE (CMN a (MUL <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64MADD {
+ break
+ }
+ y := z.Args[2]
+ a := z.Args[0]
+ x := z.Args[1]
+ if !(z.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMN, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARM64MUL, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARM64NE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] z:(MSUB a x y)) yes no)
+ // cond: z.Uses==1
+ // result: (NE (CMP a (MUL <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARM64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64MSUB {
+ break
+ }
+ y := z.Args[2]
+ a := z.Args[0]
+ x := z.Args[1]
+ if !(z.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMP, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARM64MUL, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARM64NE, v0)
+ return true
+ }
+ // match: (NE (CMPWconst [0] z:(MADDW a x y)) yes no)
+ // cond: z.Uses==1
+ // result: (NE (CMNW a (MULW <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64MADDW {
+ break
+ }
+ y := z.Args[2]
+ a := z.Args[0]
+ x := z.Args[1]
+ if !(z.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMNW, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARM64MULW, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARM64NE, v0)
+ return true
+ }
+ // match: (NE (CMPWconst [0] z:(MSUBW a x y)) yes no)
+ // cond: z.Uses==1
+ // result: (NE (CMPW a (MULW <x.Type> x y)) yes no)
+ for b.Controls[0].Op == OpARM64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpARM64MSUBW {
+ break
+ }
+ y := z.Args[2]
+ a := z.Args[0]
+ x := z.Args[1]
+ if !(z.Uses == 1) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpARM64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v_0.Pos, OpARM64MULW, x.Type)
+ v1.AddArg2(x, y)
+ v0.AddArg2(a, v1)
+ b.resetWithControl(BlockARM64NE, v0)
+ return true
+ }
+ // match: (NE (TSTconst [c] x) yes no)
+ // cond: oneBit(c)
+ // result: (TBNZ [int64(ntz64(c))] x yes no)
+ for b.Controls[0].Op == OpARM64TSTconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(oneBit(c)) {
+ break
+ }
+ b.resetWithControl(BlockARM64TBNZ, x)
+ b.AuxInt = int64ToAuxInt(int64(ntz64(c)))
+ return true
+ }
+ // match: (NE (TSTWconst [c] x) yes no)
+ // cond: oneBit(int64(uint32(c)))
+ // result: (TBNZ [int64(ntz64(int64(uint32(c))))] x yes no)
+ for b.Controls[0].Op == OpARM64TSTWconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(oneBit(int64(uint32(c)))) {
+ break
+ }
+ b.resetWithControl(BlockARM64TBNZ, x)
+ b.AuxInt = int64ToAuxInt(int64(ntz64(int64(uint32(c)))))
+ return true
+ }
+ // match: (NE (FlagConstant [fc]) yes no)
+ // cond: fc.ne()
+ // result: (First yes no)
+ for b.Controls[0].Op == OpARM64FlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(fc.ne()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (NE (FlagConstant [fc]) yes no)
+ // cond: !fc.ne()
+ // result: (First no yes)
+ for b.Controls[0].Op == OpARM64FlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(!fc.ne()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (NE (InvertFlags cmp) yes no)
+ // result: (NE cmp yes no)
+ for b.Controls[0].Op == OpARM64InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockARM64NE, cmp)
+ return true
+ }
+ case BlockARM64NZ:
+ // match: (NZ (Equal cc) yes no)
+ // result: (EQ cc yes no)
+ for b.Controls[0].Op == OpARM64Equal {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARM64EQ, cc)
+ return true
+ }
+ // match: (NZ (NotEqual cc) yes no)
+ // result: (NE cc yes no)
+ for b.Controls[0].Op == OpARM64NotEqual {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARM64NE, cc)
+ return true
+ }
+ // match: (NZ (LessThan cc) yes no)
+ // result: (LT cc yes no)
+ for b.Controls[0].Op == OpARM64LessThan {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARM64LT, cc)
+ return true
+ }
+ // match: (NZ (LessThanU cc) yes no)
+ // result: (ULT cc yes no)
+ for b.Controls[0].Op == OpARM64LessThanU {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARM64ULT, cc)
+ return true
+ }
+ // match: (NZ (LessEqual cc) yes no)
+ // result: (LE cc yes no)
+ for b.Controls[0].Op == OpARM64LessEqual {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARM64LE, cc)
+ return true
+ }
+ // match: (NZ (LessEqualU cc) yes no)
+ // result: (ULE cc yes no)
+ for b.Controls[0].Op == OpARM64LessEqualU {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARM64ULE, cc)
+ return true
+ }
+ // match: (NZ (GreaterThan cc) yes no)
+ // result: (GT cc yes no)
+ for b.Controls[0].Op == OpARM64GreaterThan {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARM64GT, cc)
+ return true
+ }
+ // match: (NZ (GreaterThanU cc) yes no)
+ // result: (UGT cc yes no)
+ for b.Controls[0].Op == OpARM64GreaterThanU {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARM64UGT, cc)
+ return true
+ }
+ // match: (NZ (GreaterEqual cc) yes no)
+ // result: (GE cc yes no)
+ for b.Controls[0].Op == OpARM64GreaterEqual {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARM64GE, cc)
+ return true
+ }
+ // match: (NZ (GreaterEqualU cc) yes no)
+ // result: (UGE cc yes no)
+ for b.Controls[0].Op == OpARM64GreaterEqualU {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARM64UGE, cc)
+ return true
+ }
+ // match: (NZ (LessThanF cc) yes no)
+ // result: (FLT cc yes no)
+ for b.Controls[0].Op == OpARM64LessThanF {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARM64FLT, cc)
+ return true
+ }
+ // match: (NZ (LessEqualF cc) yes no)
+ // result: (FLE cc yes no)
+ for b.Controls[0].Op == OpARM64LessEqualF {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARM64FLE, cc)
+ return true
+ }
+ // match: (NZ (GreaterThanF cc) yes no)
+ // result: (FGT cc yes no)
+ for b.Controls[0].Op == OpARM64GreaterThanF {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARM64FGT, cc)
+ return true
+ }
+ // match: (NZ (GreaterEqualF cc) yes no)
+ // result: (FGE cc yes no)
+ for b.Controls[0].Op == OpARM64GreaterEqualF {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockARM64FGE, cc)
+ return true
+ }
+ // match: (NZ (ANDconst [c] x) yes no)
+ // cond: oneBit(c)
+ // result: (TBNZ [int64(ntz64(c))] x yes no)
+ for b.Controls[0].Op == OpARM64ANDconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(oneBit(c)) {
+ break
+ }
+ b.resetWithControl(BlockARM64TBNZ, x)
+ b.AuxInt = int64ToAuxInt(int64(ntz64(c)))
+ return true
+ }
+ // match: (NZ (MOVDconst [0]) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpARM64MOVDconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (NZ (MOVDconst [c]) yes no)
+ // cond: c != 0
+ // result: (First yes no)
+ for b.Controls[0].Op == OpARM64MOVDconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt64(v_0.AuxInt)
+ if !(c != 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ case BlockARM64NZW:
+ // match: (NZW (ANDconst [c] x) yes no)
+ // cond: oneBit(int64(uint32(c)))
+ // result: (TBNZ [int64(ntz64(int64(uint32(c))))] x yes no)
+ for b.Controls[0].Op == OpARM64ANDconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(oneBit(int64(uint32(c)))) {
+ break
+ }
+ b.resetWithControl(BlockARM64TBNZ, x)
+ b.AuxInt = int64ToAuxInt(int64(ntz64(int64(uint32(c)))))
+ return true
+ }
+ // match: (NZW (MOVDconst [c]) yes no)
+ // cond: int32(c) == 0
+ // result: (First no yes)
+ for b.Controls[0].Op == OpARM64MOVDconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt64(v_0.AuxInt)
+ if !(int32(c) == 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (NZW (MOVDconst [c]) yes no)
+ // cond: int32(c) != 0
+ // result: (First yes no)
+ for b.Controls[0].Op == OpARM64MOVDconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt64(v_0.AuxInt)
+ if !(int32(c) != 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ case BlockARM64TBNZ:
+ // match: (TBNZ [0] (Equal cc) yes no)
+ // result: (EQ cc yes no)
+ for b.Controls[0].Op == OpARM64Equal {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ if auxIntToInt64(b.AuxInt) != 0 {
+ break
+ }
+ b.resetWithControl(BlockARM64EQ, cc)
+ return true
+ }
+ // match: (TBNZ [0] (NotEqual cc) yes no)
+ // result: (NE cc yes no)
+ for b.Controls[0].Op == OpARM64NotEqual {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ if auxIntToInt64(b.AuxInt) != 0 {
+ break
+ }
+ b.resetWithControl(BlockARM64NE, cc)
+ return true
+ }
+ // match: (TBNZ [0] (LessThan cc) yes no)
+ // result: (LT cc yes no)
+ for b.Controls[0].Op == OpARM64LessThan {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ if auxIntToInt64(b.AuxInt) != 0 {
+ break
+ }
+ b.resetWithControl(BlockARM64LT, cc)
+ return true
+ }
+ // match: (TBNZ [0] (LessThanU cc) yes no)
+ // result: (ULT cc yes no)
+ for b.Controls[0].Op == OpARM64LessThanU {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ if auxIntToInt64(b.AuxInt) != 0 {
+ break
+ }
+ b.resetWithControl(BlockARM64ULT, cc)
+ return true
+ }
+ // match: (TBNZ [0] (LessEqual cc) yes no)
+ // result: (LE cc yes no)
+ for b.Controls[0].Op == OpARM64LessEqual {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ if auxIntToInt64(b.AuxInt) != 0 {
+ break
+ }
+ b.resetWithControl(BlockARM64LE, cc)
+ return true
+ }
+ // match: (TBNZ [0] (LessEqualU cc) yes no)
+ // result: (ULE cc yes no)
+ for b.Controls[0].Op == OpARM64LessEqualU {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ if auxIntToInt64(b.AuxInt) != 0 {
+ break
+ }
+ b.resetWithControl(BlockARM64ULE, cc)
+ return true
+ }
+ // match: (TBNZ [0] (GreaterThan cc) yes no)
+ // result: (GT cc yes no)
+ for b.Controls[0].Op == OpARM64GreaterThan {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ if auxIntToInt64(b.AuxInt) != 0 {
+ break
+ }
+ b.resetWithControl(BlockARM64GT, cc)
+ return true
+ }
+ // match: (TBNZ [0] (GreaterThanU cc) yes no)
+ // result: (UGT cc yes no)
+ for b.Controls[0].Op == OpARM64GreaterThanU {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ if auxIntToInt64(b.AuxInt) != 0 {
+ break
+ }
+ b.resetWithControl(BlockARM64UGT, cc)
+ return true
+ }
+ // match: (TBNZ [0] (GreaterEqual cc) yes no)
+ // result: (GE cc yes no)
+ for b.Controls[0].Op == OpARM64GreaterEqual {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ if auxIntToInt64(b.AuxInt) != 0 {
+ break
+ }
+ b.resetWithControl(BlockARM64GE, cc)
+ return true
+ }
+ // match: (TBNZ [0] (GreaterEqualU cc) yes no)
+ // result: (UGE cc yes no)
+ for b.Controls[0].Op == OpARM64GreaterEqualU {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ if auxIntToInt64(b.AuxInt) != 0 {
+ break
+ }
+ b.resetWithControl(BlockARM64UGE, cc)
+ return true
+ }
+ // match: (TBNZ [0] (LessThanF cc) yes no)
+ // result: (FLT cc yes no)
+ for b.Controls[0].Op == OpARM64LessThanF {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ if auxIntToInt64(b.AuxInt) != 0 {
+ break
+ }
+ b.resetWithControl(BlockARM64FLT, cc)
+ return true
+ }
+ // match: (TBNZ [0] (LessEqualF cc) yes no)
+ // result: (FLE cc yes no)
+ for b.Controls[0].Op == OpARM64LessEqualF {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ if auxIntToInt64(b.AuxInt) != 0 {
+ break
+ }
+ b.resetWithControl(BlockARM64FLE, cc)
+ return true
+ }
+ // match: (TBNZ [0] (GreaterThanF cc) yes no)
+ // result: (FGT cc yes no)
+ for b.Controls[0].Op == OpARM64GreaterThanF {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ if auxIntToInt64(b.AuxInt) != 0 {
+ break
+ }
+ b.resetWithControl(BlockARM64FGT, cc)
+ return true
+ }
+ // match: (TBNZ [0] (GreaterEqualF cc) yes no)
+ // result: (FGE cc yes no)
+ for b.Controls[0].Op == OpARM64GreaterEqualF {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ if auxIntToInt64(b.AuxInt) != 0 {
+ break
+ }
+ b.resetWithControl(BlockARM64FGE, cc)
+ return true
+ }
+ case BlockARM64UGE:
+ // match: (UGE (FlagConstant [fc]) yes no)
+ // cond: fc.uge()
+ // result: (First yes no)
+ for b.Controls[0].Op == OpARM64FlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(fc.uge()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (UGE (FlagConstant [fc]) yes no)
+ // cond: !fc.uge()
+ // result: (First no yes)
+ for b.Controls[0].Op == OpARM64FlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(!fc.uge()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (UGE (InvertFlags cmp) yes no)
+ // result: (ULE cmp yes no)
+ for b.Controls[0].Op == OpARM64InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockARM64ULE, cmp)
+ return true
+ }
+ case BlockARM64UGT:
+ // match: (UGT (FlagConstant [fc]) yes no)
+ // cond: fc.ugt()
+ // result: (First yes no)
+ for b.Controls[0].Op == OpARM64FlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(fc.ugt()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (UGT (FlagConstant [fc]) yes no)
+ // cond: !fc.ugt()
+ // result: (First no yes)
+ for b.Controls[0].Op == OpARM64FlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(!fc.ugt()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (UGT (InvertFlags cmp) yes no)
+ // result: (ULT cmp yes no)
+ for b.Controls[0].Op == OpARM64InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockARM64ULT, cmp)
+ return true
+ }
+ case BlockARM64ULE:
+ // match: (ULE (FlagConstant [fc]) yes no)
+ // cond: fc.ule()
+ // result: (First yes no)
+ for b.Controls[0].Op == OpARM64FlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(fc.ule()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (ULE (FlagConstant [fc]) yes no)
+ // cond: !fc.ule()
+ // result: (First no yes)
+ for b.Controls[0].Op == OpARM64FlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(!fc.ule()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (ULE (InvertFlags cmp) yes no)
+ // result: (UGE cmp yes no)
+ for b.Controls[0].Op == OpARM64InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockARM64UGE, cmp)
+ return true
+ }
+ case BlockARM64ULT:
+ // match: (ULT (FlagConstant [fc]) yes no)
+ // cond: fc.ult()
+ // result: (First yes no)
+ for b.Controls[0].Op == OpARM64FlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(fc.ult()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (ULT (FlagConstant [fc]) yes no)
+ // cond: !fc.ult()
+ // result: (First no yes)
+ for b.Controls[0].Op == OpARM64FlagConstant {
+ v_0 := b.Controls[0]
+ fc := auxIntToFlagConstant(v_0.AuxInt)
+ if !(!fc.ult()) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (ULT (InvertFlags cmp) yes no)
+ // result: (UGT cmp yes no)
+ for b.Controls[0].Op == OpARM64InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockARM64UGT, cmp)
+ return true
+ }
+ case BlockARM64Z:
+ // match: (Z (ANDconst [c] x) yes no)
+ // cond: oneBit(c)
+ // result: (TBZ [int64(ntz64(c))] x yes no)
+ for b.Controls[0].Op == OpARM64ANDconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(oneBit(c)) {
+ break
+ }
+ b.resetWithControl(BlockARM64TBZ, x)
+ b.AuxInt = int64ToAuxInt(int64(ntz64(c)))
+ return true
+ }
+ // match: (Z (MOVDconst [0]) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpARM64MOVDconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (Z (MOVDconst [c]) yes no)
+ // cond: c != 0
+ // result: (First no yes)
+ for b.Controls[0].Op == OpARM64MOVDconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt64(v_0.AuxInt)
+ if !(c != 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ case BlockARM64ZW:
+ // match: (ZW (ANDconst [c] x) yes no)
+ // cond: oneBit(int64(uint32(c)))
+ // result: (TBZ [int64(ntz64(int64(uint32(c))))] x yes no)
+ for b.Controls[0].Op == OpARM64ANDconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(oneBit(int64(uint32(c)))) {
+ break
+ }
+ b.resetWithControl(BlockARM64TBZ, x)
+ b.AuxInt = int64ToAuxInt(int64(ntz64(int64(uint32(c)))))
+ return true
+ }
+ // match: (ZW (MOVDconst [c]) yes no)
+ // cond: int32(c) == 0
+ // result: (First yes no)
+ for b.Controls[0].Op == OpARM64MOVDconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt64(v_0.AuxInt)
+ if !(int32(c) == 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (ZW (MOVDconst [c]) yes no)
+ // cond: int32(c) != 0
+ // result: (First no yes)
+ for b.Controls[0].Op == OpARM64MOVDconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt64(v_0.AuxInt)
+ if !(int32(c) != 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ }
+ return false
+}
diff --git a/src/cmd/compile/internal/ssa/rewriteCond_test.go b/src/cmd/compile/internal/ssa/rewriteCond_test.go
new file mode 100644
index 0000000..2c26fdf
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/rewriteCond_test.go
@@ -0,0 +1,597 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "math"
+ "math/rand"
+ "testing"
+)
+
+var (
+ x64 int64 = math.MaxInt64 - 2
+ x64b int64 = math.MaxInt64 - 2
+ x64c int64 = math.MaxInt64 - 2
+ y64 int64 = math.MinInt64 + 1
+ x32 int32 = math.MaxInt32 - 2
+ x32b int32 = math.MaxInt32 - 2
+ x32c int32 = math.MaxInt32 - 2
+ y32 int32 = math.MinInt32 + 1
+ one64 int64 = 1
+ one32 int32 = 1
+ v64 int64 = 11 // ensure it's not 2**n +/- 1
+ v64_n int64 = -11
+ v32 int32 = 11
+ v32_n int32 = -11
+ uv32 uint32 = 19
+ uz uint8 = 1 // for lowering to SLL/SRL/SRA
+)
+
+var crTests = []struct {
+ name string
+ tf func(t *testing.T)
+}{
+ {"AddConst64", testAddConst64},
+ {"AddConst32", testAddConst32},
+ {"AddVar64", testAddVar64},
+ {"AddVar32", testAddVar32},
+ {"MAddVar64", testMAddVar64},
+ {"MAddVar32", testMAddVar32},
+ {"MSubVar64", testMSubVar64},
+ {"MSubVar32", testMSubVar32},
+ {"AddShift32", testAddShift32},
+ {"SubShift32", testSubShift32},
+}
+
+var crBenches = []struct {
+ name string
+ bf func(b *testing.B)
+}{
+ {"SoloJump", benchSoloJump},
+ {"CombJump", benchCombJump},
+}
+
+// Test int32/int64's add/sub/madd/msub operations with boundary values to
+// ensure the optimization to 'comparing to zero' expressions of if-statements
+// yield expected results.
+// 32 rewriting rules are covered. At least two scenarios for "Canonicalize
+// the order of arguments to comparisons", which helps with CSE, are covered.
+// The tedious if-else structures are necessary to ensure all concerned rules
+// and machine code sequences are covered.
+// It's for arm64 initially, please see https://github.com/golang/go/issues/38740
+func TestCondRewrite(t *testing.T) {
+ for _, test := range crTests {
+ t.Run(test.name, test.tf)
+ }
+}
+
+// Profile the aforementioned optimization from two angles:
+// SoloJump: generated branching code has one 'jump', for '<' and '>='
+// CombJump: generated branching code has two consecutive 'jump', for '<=' and '>'
+// We expect that 'CombJump' is generally on par with the non-optimized code, and
+// 'SoloJump' demonstrates some improvement.
+// It's for arm64 initially, please see https://github.com/golang/go/issues/38740
+func BenchmarkCondRewrite(b *testing.B) {
+ for _, bench := range crBenches {
+ b.Run(bench.name, bench.bf)
+ }
+}
+
+// var +/- const
+func testAddConst64(t *testing.T) {
+ if x64+11 < 0 {
+ } else {
+ t.Errorf("'%#x + 11 < 0' failed", x64)
+ }
+
+ if x64+13 <= 0 {
+ } else {
+ t.Errorf("'%#x + 13 <= 0' failed", x64)
+ }
+
+ if y64-11 > 0 {
+ } else {
+ t.Errorf("'%#x - 11 > 0' failed", y64)
+ }
+
+ if y64-13 >= 0 {
+ } else {
+ t.Errorf("'%#x - 13 >= 0' failed", y64)
+ }
+
+ if x64+19 > 0 {
+ t.Errorf("'%#x + 19 > 0' failed", x64)
+ }
+
+ if x64+23 >= 0 {
+ t.Errorf("'%#x + 23 >= 0' failed", x64)
+ }
+
+ if y64-19 < 0 {
+ t.Errorf("'%#x - 19 < 0' failed", y64)
+ }
+
+ if y64-23 <= 0 {
+ t.Errorf("'%#x - 23 <= 0' failed", y64)
+ }
+}
+
+// 32-bit var +/- const
+func testAddConst32(t *testing.T) {
+ if x32+11 < 0 {
+ } else {
+ t.Errorf("'%#x + 11 < 0' failed", x32)
+ }
+
+ if x32+13 <= 0 {
+ } else {
+ t.Errorf("'%#x + 13 <= 0' failed", x32)
+ }
+
+ if y32-11 > 0 {
+ } else {
+ t.Errorf("'%#x - 11 > 0' failed", y32)
+ }
+
+ if y32-13 >= 0 {
+ } else {
+ t.Errorf("'%#x - 13 >= 0' failed", y32)
+ }
+
+ if x32+19 > 0 {
+ t.Errorf("'%#x + 19 > 0' failed", x32)
+ }
+
+ if x32+23 >= 0 {
+ t.Errorf("'%#x + 23 >= 0' failed", x32)
+ }
+
+ if y32-19 < 0 {
+ t.Errorf("'%#x - 19 < 0' failed", y32)
+ }
+
+ if y32-23 <= 0 {
+ t.Errorf("'%#x - 23 <= 0' failed", y32)
+ }
+}
+
+// var + var
+func testAddVar64(t *testing.T) {
+ if x64+v64 < 0 {
+ } else {
+ t.Errorf("'%#x + %#x < 0' failed", x64, v64)
+ }
+
+ if x64+v64 <= 0 {
+ } else {
+ t.Errorf("'%#x + %#x <= 0' failed", x64, v64)
+ }
+
+ if y64+v64_n > 0 {
+ } else {
+ t.Errorf("'%#x + %#x > 0' failed", y64, v64_n)
+ }
+
+ if y64+v64_n >= 0 {
+ } else {
+ t.Errorf("'%#x + %#x >= 0' failed", y64, v64_n)
+ }
+
+ if x64+v64 > 0 {
+ t.Errorf("'%#x + %#x > 0' failed", x64, v64)
+ }
+
+ if x64+v64 >= 0 {
+ t.Errorf("'%#x + %#x >= 0' failed", x64, v64)
+ }
+
+ if y64+v64_n < 0 {
+ t.Errorf("'%#x + %#x < 0' failed", y64, v64_n)
+ }
+
+ if y64+v64_n <= 0 {
+ t.Errorf("'%#x + %#x <= 0' failed", y64, v64_n)
+ }
+}
+
+// 32-bit var+var
+func testAddVar32(t *testing.T) {
+ if x32+v32 < 0 {
+ } else {
+ t.Errorf("'%#x + %#x < 0' failed", x32, v32)
+ }
+
+ if x32+v32 <= 0 {
+ } else {
+ t.Errorf("'%#x + %#x <= 0' failed", x32, v32)
+ }
+
+ if y32+v32_n > 0 {
+ } else {
+ t.Errorf("'%#x + %#x > 0' failed", y32, v32_n)
+ }
+
+ if y32+v32_n >= 0 {
+ } else {
+ t.Errorf("'%#x + %#x >= 0' failed", y32, v32_n)
+ }
+
+ if x32+v32 > 0 {
+ t.Errorf("'%#x + %#x > 0' failed", x32, v32)
+ }
+
+ if x32+v32 >= 0 {
+ t.Errorf("'%#x + %#x >= 0' failed", x32, v32)
+ }
+
+ if y32+v32_n < 0 {
+ t.Errorf("'%#x + %#x < 0' failed", y32, v32_n)
+ }
+
+ if y32+v32_n <= 0 {
+ t.Errorf("'%#x + %#x <= 0' failed", y32, v32_n)
+ }
+}
+
+// multiply-add
+func testMAddVar64(t *testing.T) {
+ if x64+v64*one64 < 0 {
+ } else {
+ t.Errorf("'%#x + %#x*1 < 0' failed", x64, v64)
+ }
+
+ if x64+v64*one64 <= 0 {
+ } else {
+ t.Errorf("'%#x + %#x*1 <= 0' failed", x64, v64)
+ }
+
+ if y64+v64_n*one64 > 0 {
+ } else {
+ t.Errorf("'%#x + %#x*1 > 0' failed", y64, v64_n)
+ }
+
+ if y64+v64_n*one64 >= 0 {
+ } else {
+ t.Errorf("'%#x + %#x*1 >= 0' failed", y64, v64_n)
+ }
+
+ if x64+v64*one64 > 0 {
+ t.Errorf("'%#x + %#x*1 > 0' failed", x64, v64)
+ }
+
+ if x64+v64*one64 >= 0 {
+ t.Errorf("'%#x + %#x*1 >= 0' failed", x64, v64)
+ }
+
+ if y64+v64_n*one64 < 0 {
+ t.Errorf("'%#x + %#x*1 < 0' failed", y64, v64_n)
+ }
+
+ if y64+v64_n*one64 <= 0 {
+ t.Errorf("'%#x + %#x*1 <= 0' failed", y64, v64_n)
+ }
+}
+
+// 32-bit multiply-add
+func testMAddVar32(t *testing.T) {
+ if x32+v32*one32 < 0 {
+ } else {
+ t.Errorf("'%#x + %#x*1 < 0' failed", x32, v32)
+ }
+
+ if x32+v32*one32 <= 0 {
+ } else {
+ t.Errorf("'%#x + %#x*1 <= 0' failed", x32, v32)
+ }
+
+ if y32+v32_n*one32 > 0 {
+ } else {
+ t.Errorf("'%#x + %#x*1 > 0' failed", y32, v32_n)
+ }
+
+ if y32+v32_n*one32 >= 0 {
+ } else {
+ t.Errorf("'%#x + %#x*1 >= 0' failed", y32, v32_n)
+ }
+
+ if x32+v32*one32 > 0 {
+ t.Errorf("'%#x + %#x*1 > 0' failed", x32, v32)
+ }
+
+ if x32+v32*one32 >= 0 {
+ t.Errorf("'%#x + %#x*1 >= 0' failed", x32, v32)
+ }
+
+ if y32+v32_n*one32 < 0 {
+ t.Errorf("'%#x + %#x*1 < 0' failed", y32, v32_n)
+ }
+
+ if y32+v32_n*one32 <= 0 {
+ t.Errorf("'%#x + %#x*1 <= 0' failed", y32, v32_n)
+ }
+}
+
+// multiply-sub
+func testMSubVar64(t *testing.T) {
+ if x64-v64_n*one64 < 0 {
+ } else {
+ t.Errorf("'%#x - %#x*1 < 0' failed", x64, v64_n)
+ }
+
+ if x64-v64_n*one64 <= 0 {
+ } else {
+ t.Errorf("'%#x - %#x*1 <= 0' failed", x64, v64_n)
+ }
+
+ if y64-v64*one64 > 0 {
+ } else {
+ t.Errorf("'%#x - %#x*1 > 0' failed", y64, v64)
+ }
+
+ if y64-v64*one64 >= 0 {
+ } else {
+ t.Errorf("'%#x - %#x*1 >= 0' failed", y64, v64)
+ }
+
+ if x64-v64_n*one64 > 0 {
+ t.Errorf("'%#x - %#x*1 > 0' failed", x64, v64_n)
+ }
+
+ if x64-v64_n*one64 >= 0 {
+ t.Errorf("'%#x - %#x*1 >= 0' failed", x64, v64_n)
+ }
+
+ if y64-v64*one64 < 0 {
+ t.Errorf("'%#x - %#x*1 < 0' failed", y64, v64)
+ }
+
+ if y64-v64*one64 <= 0 {
+ t.Errorf("'%#x - %#x*1 <= 0' failed", y64, v64)
+ }
+
+ if x64-x64b*one64 < 0 {
+ t.Errorf("'%#x - %#x*1 < 0' failed", x64, x64b)
+ }
+
+ if x64-x64b*one64 >= 0 {
+ } else {
+ t.Errorf("'%#x - %#x*1 >= 0' failed", x64, x64b)
+ }
+}
+
+// 32-bit multiply-sub
+func testMSubVar32(t *testing.T) {
+ if x32-v32_n*one32 < 0 {
+ } else {
+ t.Errorf("'%#x - %#x*1 < 0' failed", x32, v32_n)
+ }
+
+ if x32-v32_n*one32 <= 0 {
+ } else {
+ t.Errorf("'%#x - %#x*1 <= 0' failed", x32, v32_n)
+ }
+
+ if y32-v32*one32 > 0 {
+ } else {
+ t.Errorf("'%#x - %#x*1 > 0' failed", y32, v32)
+ }
+
+ if y32-v32*one32 >= 0 {
+ } else {
+ t.Errorf("'%#x - %#x*1 >= 0' failed", y32, v32)
+ }
+
+ if x32-v32_n*one32 > 0 {
+ t.Errorf("'%#x - %#x*1 > 0' failed", x32, v32_n)
+ }
+
+ if x32-v32_n*one32 >= 0 {
+ t.Errorf("'%#x - %#x*1 >= 0' failed", x32, v32_n)
+ }
+
+ if y32-v32*one32 < 0 {
+ t.Errorf("'%#x - %#x*1 < 0' failed", y32, v32)
+ }
+
+ if y32-v32*one32 <= 0 {
+ t.Errorf("'%#x - %#x*1 <= 0' failed", y32, v32)
+ }
+
+ if x32-x32b*one32 < 0 {
+ t.Errorf("'%#x - %#x*1 < 0' failed", x32, x32b)
+ }
+
+ if x32-x32b*one32 >= 0 {
+ } else {
+ t.Errorf("'%#x - %#x*1 >= 0' failed", x32, x32b)
+ }
+}
+
+// 32-bit ADDshift, pick up 1~2 scenarios randomly for each condition
+func testAddShift32(t *testing.T) {
+ if x32+v32<<1 < 0 {
+ } else {
+ t.Errorf("'%#x + %#x<<%#x < 0' failed", x32, v32, 1)
+ }
+
+ if x32+v32>>1 <= 0 {
+ } else {
+ t.Errorf("'%#x + %#x>>%#x <= 0' failed", x32, v32, 1)
+ }
+
+ if x32+int32(uv32>>1) > 0 {
+ t.Errorf("'%#x + int32(%#x>>%#x) > 0' failed", x32, uv32, 1)
+ }
+
+ if x32+v32<<uz >= 0 {
+ t.Errorf("'%#x + %#x<<%#x >= 0' failed", x32, v32, uz)
+ }
+
+ if x32+v32>>uz > 0 {
+ t.Errorf("'%#x + %#x>>%#x > 0' failed", x32, v32, uz)
+ }
+
+ if x32+int32(uv32>>uz) < 0 {
+ } else {
+ t.Errorf("'%#x + int32(%#x>>%#x) < 0' failed", x32, uv32, uz)
+ }
+}
+
+// 32-bit SUBshift, pick up 1~2 scenarios randomly for each condition
+func testSubShift32(t *testing.T) {
+ if y32-v32<<1 > 0 {
+ } else {
+ t.Errorf("'%#x - %#x<<%#x > 0' failed", y32, v32, 1)
+ }
+
+ if y32-v32>>1 < 0 {
+ t.Errorf("'%#x - %#x>>%#x < 0' failed", y32, v32, 1)
+ }
+
+ if y32-int32(uv32>>1) >= 0 {
+ } else {
+ t.Errorf("'%#x - int32(%#x>>%#x) >= 0' failed", y32, uv32, 1)
+ }
+
+ if y32-v32<<uz < 0 {
+ t.Errorf("'%#x - %#x<<%#x < 0' failed", y32, v32, uz)
+ }
+
+ if y32-v32>>uz >= 0 {
+ } else {
+ t.Errorf("'%#x - %#x>>%#x >= 0' failed", y32, v32, uz)
+ }
+
+ if y32-int32(uv32>>uz) <= 0 {
+ t.Errorf("'%#x - int32(%#x>>%#x) <= 0' failed", y32, uv32, uz)
+ }
+}
+
+var rnd = rand.New(rand.NewSource(0))
+var sink int64
+
+func benchSoloJump(b *testing.B) {
+ r1 := x64
+ r2 := x64b
+ r3 := x64c
+ r4 := y64
+ d := rnd.Int63n(10)
+
+ // 6 out 10 conditions evaluate to true
+ for i := 0; i < b.N; i++ {
+ if r1+r2 < 0 {
+ d *= 2
+ d /= 2
+ }
+
+ if r1+r3 >= 0 {
+ d *= 2
+ d /= 2
+ }
+
+ if r1+r2*one64 < 0 {
+ d *= 2
+ d /= 2
+ }
+
+ if r2+r3*one64 >= 0 {
+ d *= 2
+ d /= 2
+ }
+
+ if r1-r2*v64 >= 0 {
+ d *= 2
+ d /= 2
+ }
+
+ if r3-r4*v64 < 0 {
+ d *= 2
+ d /= 2
+ }
+
+ if r1+11 < 0 {
+ d *= 2
+ d /= 2
+ }
+
+ if r1+13 >= 0 {
+ d *= 2
+ d /= 2
+ }
+
+ if r4-17 < 0 {
+ d *= 2
+ d /= 2
+ }
+
+ if r4-19 >= 0 {
+ d *= 2
+ d /= 2
+ }
+ }
+ sink = d
+}
+
+func benchCombJump(b *testing.B) {
+ r1 := x64
+ r2 := x64b
+ r3 := x64c
+ r4 := y64
+ d := rnd.Int63n(10)
+
+ // 6 out 10 conditions evaluate to true
+ for i := 0; i < b.N; i++ {
+ if r1+r2 <= 0 {
+ d *= 2
+ d /= 2
+ }
+
+ if r1+r3 > 0 {
+ d *= 2
+ d /= 2
+ }
+
+ if r1+r2*one64 <= 0 {
+ d *= 2
+ d /= 2
+ }
+
+ if r2+r3*one64 > 0 {
+ d *= 2
+ d /= 2
+ }
+
+ if r1-r2*v64 > 0 {
+ d *= 2
+ d /= 2
+ }
+
+ if r3-r4*v64 <= 0 {
+ d *= 2
+ d /= 2
+ }
+
+ if r1+11 <= 0 {
+ d *= 2
+ d /= 2
+ }
+
+ if r1+13 > 0 {
+ d *= 2
+ d /= 2
+ }
+
+ if r4-17 <= 0 {
+ d *= 2
+ d /= 2
+ }
+
+ if r4-19 > 0 {
+ d *= 2
+ d /= 2
+ }
+ }
+ sink = d
+}
diff --git a/src/cmd/compile/internal/ssa/rewriteMIPS.go b/src/cmd/compile/internal/ssa/rewriteMIPS.go
new file mode 100644
index 0000000..811ea9d
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/rewriteMIPS.go
@@ -0,0 +1,7558 @@
+// Code generated from gen/MIPS.rules; DO NOT EDIT.
+// generated with: cd gen; go run *.go
+
+package ssa
+
+import "cmd/compile/internal/types"
+
+func rewriteValueMIPS(v *Value) bool {
+ switch v.Op {
+ case OpAdd16:
+ v.Op = OpMIPSADD
+ return true
+ case OpAdd32:
+ v.Op = OpMIPSADD
+ return true
+ case OpAdd32F:
+ v.Op = OpMIPSADDF
+ return true
+ case OpAdd32withcarry:
+ return rewriteValueMIPS_OpAdd32withcarry(v)
+ case OpAdd64F:
+ v.Op = OpMIPSADDD
+ return true
+ case OpAdd8:
+ v.Op = OpMIPSADD
+ return true
+ case OpAddPtr:
+ v.Op = OpMIPSADD
+ return true
+ case OpAddr:
+ return rewriteValueMIPS_OpAddr(v)
+ case OpAnd16:
+ v.Op = OpMIPSAND
+ return true
+ case OpAnd32:
+ v.Op = OpMIPSAND
+ return true
+ case OpAnd8:
+ v.Op = OpMIPSAND
+ return true
+ case OpAndB:
+ v.Op = OpMIPSAND
+ return true
+ case OpAtomicAdd32:
+ v.Op = OpMIPSLoweredAtomicAdd
+ return true
+ case OpAtomicAnd32:
+ v.Op = OpMIPSLoweredAtomicAnd
+ return true
+ case OpAtomicAnd8:
+ return rewriteValueMIPS_OpAtomicAnd8(v)
+ case OpAtomicCompareAndSwap32:
+ v.Op = OpMIPSLoweredAtomicCas
+ return true
+ case OpAtomicExchange32:
+ v.Op = OpMIPSLoweredAtomicExchange
+ return true
+ case OpAtomicLoad32:
+ v.Op = OpMIPSLoweredAtomicLoad32
+ return true
+ case OpAtomicLoad8:
+ v.Op = OpMIPSLoweredAtomicLoad8
+ return true
+ case OpAtomicLoadPtr:
+ v.Op = OpMIPSLoweredAtomicLoad32
+ return true
+ case OpAtomicOr32:
+ v.Op = OpMIPSLoweredAtomicOr
+ return true
+ case OpAtomicOr8:
+ return rewriteValueMIPS_OpAtomicOr8(v)
+ case OpAtomicStore32:
+ v.Op = OpMIPSLoweredAtomicStore32
+ return true
+ case OpAtomicStore8:
+ v.Op = OpMIPSLoweredAtomicStore8
+ return true
+ case OpAtomicStorePtrNoWB:
+ v.Op = OpMIPSLoweredAtomicStore32
+ return true
+ case OpAvg32u:
+ return rewriteValueMIPS_OpAvg32u(v)
+ case OpBitLen32:
+ return rewriteValueMIPS_OpBitLen32(v)
+ case OpClosureCall:
+ v.Op = OpMIPSCALLclosure
+ return true
+ case OpCom16:
+ return rewriteValueMIPS_OpCom16(v)
+ case OpCom32:
+ return rewriteValueMIPS_OpCom32(v)
+ case OpCom8:
+ return rewriteValueMIPS_OpCom8(v)
+ case OpConst16:
+ return rewriteValueMIPS_OpConst16(v)
+ case OpConst32:
+ return rewriteValueMIPS_OpConst32(v)
+ case OpConst32F:
+ v.Op = OpMIPSMOVFconst
+ return true
+ case OpConst64F:
+ v.Op = OpMIPSMOVDconst
+ return true
+ case OpConst8:
+ return rewriteValueMIPS_OpConst8(v)
+ case OpConstBool:
+ return rewriteValueMIPS_OpConstBool(v)
+ case OpConstNil:
+ return rewriteValueMIPS_OpConstNil(v)
+ case OpCtz32:
+ return rewriteValueMIPS_OpCtz32(v)
+ case OpCtz32NonZero:
+ v.Op = OpCtz32
+ return true
+ case OpCvt32Fto32:
+ v.Op = OpMIPSTRUNCFW
+ return true
+ case OpCvt32Fto64F:
+ v.Op = OpMIPSMOVFD
+ return true
+ case OpCvt32to32F:
+ v.Op = OpMIPSMOVWF
+ return true
+ case OpCvt32to64F:
+ v.Op = OpMIPSMOVWD
+ return true
+ case OpCvt64Fto32:
+ v.Op = OpMIPSTRUNCDW
+ return true
+ case OpCvt64Fto32F:
+ v.Op = OpMIPSMOVDF
+ return true
+ case OpCvtBoolToUint8:
+ v.Op = OpCopy
+ return true
+ case OpDiv16:
+ return rewriteValueMIPS_OpDiv16(v)
+ case OpDiv16u:
+ return rewriteValueMIPS_OpDiv16u(v)
+ case OpDiv32:
+ return rewriteValueMIPS_OpDiv32(v)
+ case OpDiv32F:
+ v.Op = OpMIPSDIVF
+ return true
+ case OpDiv32u:
+ return rewriteValueMIPS_OpDiv32u(v)
+ case OpDiv64F:
+ v.Op = OpMIPSDIVD
+ return true
+ case OpDiv8:
+ return rewriteValueMIPS_OpDiv8(v)
+ case OpDiv8u:
+ return rewriteValueMIPS_OpDiv8u(v)
+ case OpEq16:
+ return rewriteValueMIPS_OpEq16(v)
+ case OpEq32:
+ return rewriteValueMIPS_OpEq32(v)
+ case OpEq32F:
+ return rewriteValueMIPS_OpEq32F(v)
+ case OpEq64F:
+ return rewriteValueMIPS_OpEq64F(v)
+ case OpEq8:
+ return rewriteValueMIPS_OpEq8(v)
+ case OpEqB:
+ return rewriteValueMIPS_OpEqB(v)
+ case OpEqPtr:
+ return rewriteValueMIPS_OpEqPtr(v)
+ case OpGetCallerPC:
+ v.Op = OpMIPSLoweredGetCallerPC
+ return true
+ case OpGetCallerSP:
+ v.Op = OpMIPSLoweredGetCallerSP
+ return true
+ case OpGetClosurePtr:
+ v.Op = OpMIPSLoweredGetClosurePtr
+ return true
+ case OpHmul32:
+ return rewriteValueMIPS_OpHmul32(v)
+ case OpHmul32u:
+ return rewriteValueMIPS_OpHmul32u(v)
+ case OpInterCall:
+ v.Op = OpMIPSCALLinter
+ return true
+ case OpIsInBounds:
+ return rewriteValueMIPS_OpIsInBounds(v)
+ case OpIsNonNil:
+ return rewriteValueMIPS_OpIsNonNil(v)
+ case OpIsSliceInBounds:
+ return rewriteValueMIPS_OpIsSliceInBounds(v)
+ case OpLeq16:
+ return rewriteValueMIPS_OpLeq16(v)
+ case OpLeq16U:
+ return rewriteValueMIPS_OpLeq16U(v)
+ case OpLeq32:
+ return rewriteValueMIPS_OpLeq32(v)
+ case OpLeq32F:
+ return rewriteValueMIPS_OpLeq32F(v)
+ case OpLeq32U:
+ return rewriteValueMIPS_OpLeq32U(v)
+ case OpLeq64F:
+ return rewriteValueMIPS_OpLeq64F(v)
+ case OpLeq8:
+ return rewriteValueMIPS_OpLeq8(v)
+ case OpLeq8U:
+ return rewriteValueMIPS_OpLeq8U(v)
+ case OpLess16:
+ return rewriteValueMIPS_OpLess16(v)
+ case OpLess16U:
+ return rewriteValueMIPS_OpLess16U(v)
+ case OpLess32:
+ return rewriteValueMIPS_OpLess32(v)
+ case OpLess32F:
+ return rewriteValueMIPS_OpLess32F(v)
+ case OpLess32U:
+ return rewriteValueMIPS_OpLess32U(v)
+ case OpLess64F:
+ return rewriteValueMIPS_OpLess64F(v)
+ case OpLess8:
+ return rewriteValueMIPS_OpLess8(v)
+ case OpLess8U:
+ return rewriteValueMIPS_OpLess8U(v)
+ case OpLoad:
+ return rewriteValueMIPS_OpLoad(v)
+ case OpLocalAddr:
+ return rewriteValueMIPS_OpLocalAddr(v)
+ case OpLsh16x16:
+ return rewriteValueMIPS_OpLsh16x16(v)
+ case OpLsh16x32:
+ return rewriteValueMIPS_OpLsh16x32(v)
+ case OpLsh16x64:
+ return rewriteValueMIPS_OpLsh16x64(v)
+ case OpLsh16x8:
+ return rewriteValueMIPS_OpLsh16x8(v)
+ case OpLsh32x16:
+ return rewriteValueMIPS_OpLsh32x16(v)
+ case OpLsh32x32:
+ return rewriteValueMIPS_OpLsh32x32(v)
+ case OpLsh32x64:
+ return rewriteValueMIPS_OpLsh32x64(v)
+ case OpLsh32x8:
+ return rewriteValueMIPS_OpLsh32x8(v)
+ case OpLsh8x16:
+ return rewriteValueMIPS_OpLsh8x16(v)
+ case OpLsh8x32:
+ return rewriteValueMIPS_OpLsh8x32(v)
+ case OpLsh8x64:
+ return rewriteValueMIPS_OpLsh8x64(v)
+ case OpLsh8x8:
+ return rewriteValueMIPS_OpLsh8x8(v)
+ case OpMIPSADD:
+ return rewriteValueMIPS_OpMIPSADD(v)
+ case OpMIPSADDconst:
+ return rewriteValueMIPS_OpMIPSADDconst(v)
+ case OpMIPSAND:
+ return rewriteValueMIPS_OpMIPSAND(v)
+ case OpMIPSANDconst:
+ return rewriteValueMIPS_OpMIPSANDconst(v)
+ case OpMIPSCMOVZ:
+ return rewriteValueMIPS_OpMIPSCMOVZ(v)
+ case OpMIPSCMOVZzero:
+ return rewriteValueMIPS_OpMIPSCMOVZzero(v)
+ case OpMIPSLoweredAtomicAdd:
+ return rewriteValueMIPS_OpMIPSLoweredAtomicAdd(v)
+ case OpMIPSLoweredAtomicStore32:
+ return rewriteValueMIPS_OpMIPSLoweredAtomicStore32(v)
+ case OpMIPSMOVBUload:
+ return rewriteValueMIPS_OpMIPSMOVBUload(v)
+ case OpMIPSMOVBUreg:
+ return rewriteValueMIPS_OpMIPSMOVBUreg(v)
+ case OpMIPSMOVBload:
+ return rewriteValueMIPS_OpMIPSMOVBload(v)
+ case OpMIPSMOVBreg:
+ return rewriteValueMIPS_OpMIPSMOVBreg(v)
+ case OpMIPSMOVBstore:
+ return rewriteValueMIPS_OpMIPSMOVBstore(v)
+ case OpMIPSMOVBstorezero:
+ return rewriteValueMIPS_OpMIPSMOVBstorezero(v)
+ case OpMIPSMOVDload:
+ return rewriteValueMIPS_OpMIPSMOVDload(v)
+ case OpMIPSMOVDstore:
+ return rewriteValueMIPS_OpMIPSMOVDstore(v)
+ case OpMIPSMOVFload:
+ return rewriteValueMIPS_OpMIPSMOVFload(v)
+ case OpMIPSMOVFstore:
+ return rewriteValueMIPS_OpMIPSMOVFstore(v)
+ case OpMIPSMOVHUload:
+ return rewriteValueMIPS_OpMIPSMOVHUload(v)
+ case OpMIPSMOVHUreg:
+ return rewriteValueMIPS_OpMIPSMOVHUreg(v)
+ case OpMIPSMOVHload:
+ return rewriteValueMIPS_OpMIPSMOVHload(v)
+ case OpMIPSMOVHreg:
+ return rewriteValueMIPS_OpMIPSMOVHreg(v)
+ case OpMIPSMOVHstore:
+ return rewriteValueMIPS_OpMIPSMOVHstore(v)
+ case OpMIPSMOVHstorezero:
+ return rewriteValueMIPS_OpMIPSMOVHstorezero(v)
+ case OpMIPSMOVWload:
+ return rewriteValueMIPS_OpMIPSMOVWload(v)
+ case OpMIPSMOVWnop:
+ return rewriteValueMIPS_OpMIPSMOVWnop(v)
+ case OpMIPSMOVWreg:
+ return rewriteValueMIPS_OpMIPSMOVWreg(v)
+ case OpMIPSMOVWstore:
+ return rewriteValueMIPS_OpMIPSMOVWstore(v)
+ case OpMIPSMOVWstorezero:
+ return rewriteValueMIPS_OpMIPSMOVWstorezero(v)
+ case OpMIPSMUL:
+ return rewriteValueMIPS_OpMIPSMUL(v)
+ case OpMIPSNEG:
+ return rewriteValueMIPS_OpMIPSNEG(v)
+ case OpMIPSNOR:
+ return rewriteValueMIPS_OpMIPSNOR(v)
+ case OpMIPSNORconst:
+ return rewriteValueMIPS_OpMIPSNORconst(v)
+ case OpMIPSOR:
+ return rewriteValueMIPS_OpMIPSOR(v)
+ case OpMIPSORconst:
+ return rewriteValueMIPS_OpMIPSORconst(v)
+ case OpMIPSSGT:
+ return rewriteValueMIPS_OpMIPSSGT(v)
+ case OpMIPSSGTU:
+ return rewriteValueMIPS_OpMIPSSGTU(v)
+ case OpMIPSSGTUconst:
+ return rewriteValueMIPS_OpMIPSSGTUconst(v)
+ case OpMIPSSGTUzero:
+ return rewriteValueMIPS_OpMIPSSGTUzero(v)
+ case OpMIPSSGTconst:
+ return rewriteValueMIPS_OpMIPSSGTconst(v)
+ case OpMIPSSGTzero:
+ return rewriteValueMIPS_OpMIPSSGTzero(v)
+ case OpMIPSSLL:
+ return rewriteValueMIPS_OpMIPSSLL(v)
+ case OpMIPSSLLconst:
+ return rewriteValueMIPS_OpMIPSSLLconst(v)
+ case OpMIPSSRA:
+ return rewriteValueMIPS_OpMIPSSRA(v)
+ case OpMIPSSRAconst:
+ return rewriteValueMIPS_OpMIPSSRAconst(v)
+ case OpMIPSSRL:
+ return rewriteValueMIPS_OpMIPSSRL(v)
+ case OpMIPSSRLconst:
+ return rewriteValueMIPS_OpMIPSSRLconst(v)
+ case OpMIPSSUB:
+ return rewriteValueMIPS_OpMIPSSUB(v)
+ case OpMIPSSUBconst:
+ return rewriteValueMIPS_OpMIPSSUBconst(v)
+ case OpMIPSXOR:
+ return rewriteValueMIPS_OpMIPSXOR(v)
+ case OpMIPSXORconst:
+ return rewriteValueMIPS_OpMIPSXORconst(v)
+ case OpMod16:
+ return rewriteValueMIPS_OpMod16(v)
+ case OpMod16u:
+ return rewriteValueMIPS_OpMod16u(v)
+ case OpMod32:
+ return rewriteValueMIPS_OpMod32(v)
+ case OpMod32u:
+ return rewriteValueMIPS_OpMod32u(v)
+ case OpMod8:
+ return rewriteValueMIPS_OpMod8(v)
+ case OpMod8u:
+ return rewriteValueMIPS_OpMod8u(v)
+ case OpMove:
+ return rewriteValueMIPS_OpMove(v)
+ case OpMul16:
+ v.Op = OpMIPSMUL
+ return true
+ case OpMul32:
+ v.Op = OpMIPSMUL
+ return true
+ case OpMul32F:
+ v.Op = OpMIPSMULF
+ return true
+ case OpMul32uhilo:
+ v.Op = OpMIPSMULTU
+ return true
+ case OpMul64F:
+ v.Op = OpMIPSMULD
+ return true
+ case OpMul8:
+ v.Op = OpMIPSMUL
+ return true
+ case OpNeg16:
+ v.Op = OpMIPSNEG
+ return true
+ case OpNeg32:
+ v.Op = OpMIPSNEG
+ return true
+ case OpNeg32F:
+ v.Op = OpMIPSNEGF
+ return true
+ case OpNeg64F:
+ v.Op = OpMIPSNEGD
+ return true
+ case OpNeg8:
+ v.Op = OpMIPSNEG
+ return true
+ case OpNeq16:
+ return rewriteValueMIPS_OpNeq16(v)
+ case OpNeq32:
+ return rewriteValueMIPS_OpNeq32(v)
+ case OpNeq32F:
+ return rewriteValueMIPS_OpNeq32F(v)
+ case OpNeq64F:
+ return rewriteValueMIPS_OpNeq64F(v)
+ case OpNeq8:
+ return rewriteValueMIPS_OpNeq8(v)
+ case OpNeqB:
+ v.Op = OpMIPSXOR
+ return true
+ case OpNeqPtr:
+ return rewriteValueMIPS_OpNeqPtr(v)
+ case OpNilCheck:
+ v.Op = OpMIPSLoweredNilCheck
+ return true
+ case OpNot:
+ return rewriteValueMIPS_OpNot(v)
+ case OpOffPtr:
+ return rewriteValueMIPS_OpOffPtr(v)
+ case OpOr16:
+ v.Op = OpMIPSOR
+ return true
+ case OpOr32:
+ v.Op = OpMIPSOR
+ return true
+ case OpOr8:
+ v.Op = OpMIPSOR
+ return true
+ case OpOrB:
+ v.Op = OpMIPSOR
+ return true
+ case OpPanicBounds:
+ return rewriteValueMIPS_OpPanicBounds(v)
+ case OpPanicExtend:
+ return rewriteValueMIPS_OpPanicExtend(v)
+ case OpRotateLeft16:
+ return rewriteValueMIPS_OpRotateLeft16(v)
+ case OpRotateLeft32:
+ return rewriteValueMIPS_OpRotateLeft32(v)
+ case OpRotateLeft64:
+ return rewriteValueMIPS_OpRotateLeft64(v)
+ case OpRotateLeft8:
+ return rewriteValueMIPS_OpRotateLeft8(v)
+ case OpRound32F:
+ v.Op = OpCopy
+ return true
+ case OpRound64F:
+ v.Op = OpCopy
+ return true
+ case OpRsh16Ux16:
+ return rewriteValueMIPS_OpRsh16Ux16(v)
+ case OpRsh16Ux32:
+ return rewriteValueMIPS_OpRsh16Ux32(v)
+ case OpRsh16Ux64:
+ return rewriteValueMIPS_OpRsh16Ux64(v)
+ case OpRsh16Ux8:
+ return rewriteValueMIPS_OpRsh16Ux8(v)
+ case OpRsh16x16:
+ return rewriteValueMIPS_OpRsh16x16(v)
+ case OpRsh16x32:
+ return rewriteValueMIPS_OpRsh16x32(v)
+ case OpRsh16x64:
+ return rewriteValueMIPS_OpRsh16x64(v)
+ case OpRsh16x8:
+ return rewriteValueMIPS_OpRsh16x8(v)
+ case OpRsh32Ux16:
+ return rewriteValueMIPS_OpRsh32Ux16(v)
+ case OpRsh32Ux32:
+ return rewriteValueMIPS_OpRsh32Ux32(v)
+ case OpRsh32Ux64:
+ return rewriteValueMIPS_OpRsh32Ux64(v)
+ case OpRsh32Ux8:
+ return rewriteValueMIPS_OpRsh32Ux8(v)
+ case OpRsh32x16:
+ return rewriteValueMIPS_OpRsh32x16(v)
+ case OpRsh32x32:
+ return rewriteValueMIPS_OpRsh32x32(v)
+ case OpRsh32x64:
+ return rewriteValueMIPS_OpRsh32x64(v)
+ case OpRsh32x8:
+ return rewriteValueMIPS_OpRsh32x8(v)
+ case OpRsh8Ux16:
+ return rewriteValueMIPS_OpRsh8Ux16(v)
+ case OpRsh8Ux32:
+ return rewriteValueMIPS_OpRsh8Ux32(v)
+ case OpRsh8Ux64:
+ return rewriteValueMIPS_OpRsh8Ux64(v)
+ case OpRsh8Ux8:
+ return rewriteValueMIPS_OpRsh8Ux8(v)
+ case OpRsh8x16:
+ return rewriteValueMIPS_OpRsh8x16(v)
+ case OpRsh8x32:
+ return rewriteValueMIPS_OpRsh8x32(v)
+ case OpRsh8x64:
+ return rewriteValueMIPS_OpRsh8x64(v)
+ case OpRsh8x8:
+ return rewriteValueMIPS_OpRsh8x8(v)
+ case OpSelect0:
+ return rewriteValueMIPS_OpSelect0(v)
+ case OpSelect1:
+ return rewriteValueMIPS_OpSelect1(v)
+ case OpSignExt16to32:
+ v.Op = OpMIPSMOVHreg
+ return true
+ case OpSignExt8to16:
+ v.Op = OpMIPSMOVBreg
+ return true
+ case OpSignExt8to32:
+ v.Op = OpMIPSMOVBreg
+ return true
+ case OpSignmask:
+ return rewriteValueMIPS_OpSignmask(v)
+ case OpSlicemask:
+ return rewriteValueMIPS_OpSlicemask(v)
+ case OpSqrt:
+ v.Op = OpMIPSSQRTD
+ return true
+ case OpSqrt32:
+ v.Op = OpMIPSSQRTF
+ return true
+ case OpStaticCall:
+ v.Op = OpMIPSCALLstatic
+ return true
+ case OpStore:
+ return rewriteValueMIPS_OpStore(v)
+ case OpSub16:
+ v.Op = OpMIPSSUB
+ return true
+ case OpSub32:
+ v.Op = OpMIPSSUB
+ return true
+ case OpSub32F:
+ v.Op = OpMIPSSUBF
+ return true
+ case OpSub32withcarry:
+ return rewriteValueMIPS_OpSub32withcarry(v)
+ case OpSub64F:
+ v.Op = OpMIPSSUBD
+ return true
+ case OpSub8:
+ v.Op = OpMIPSSUB
+ return true
+ case OpSubPtr:
+ v.Op = OpMIPSSUB
+ return true
+ case OpTailCall:
+ v.Op = OpMIPSCALLtail
+ return true
+ case OpTrunc16to8:
+ v.Op = OpCopy
+ return true
+ case OpTrunc32to16:
+ v.Op = OpCopy
+ return true
+ case OpTrunc32to8:
+ v.Op = OpCopy
+ return true
+ case OpWB:
+ v.Op = OpMIPSLoweredWB
+ return true
+ case OpXor16:
+ v.Op = OpMIPSXOR
+ return true
+ case OpXor32:
+ v.Op = OpMIPSXOR
+ return true
+ case OpXor8:
+ v.Op = OpMIPSXOR
+ return true
+ case OpZero:
+ return rewriteValueMIPS_OpZero(v)
+ case OpZeroExt16to32:
+ v.Op = OpMIPSMOVHUreg
+ return true
+ case OpZeroExt8to16:
+ v.Op = OpMIPSMOVBUreg
+ return true
+ case OpZeroExt8to32:
+ v.Op = OpMIPSMOVBUreg
+ return true
+ case OpZeromask:
+ return rewriteValueMIPS_OpZeromask(v)
+ }
+ return false
+}
+func rewriteValueMIPS_OpAdd32withcarry(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Add32withcarry <t> x y c)
+ // result: (ADD c (ADD <t> x y))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ c := v_2
+ v.reset(OpMIPSADD)
+ v0 := b.NewValue0(v.Pos, OpMIPSADD, t)
+ v0.AddArg2(x, y)
+ v.AddArg2(c, v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpAddr(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Addr {sym} base)
+ // result: (MOVWaddr {sym} base)
+ for {
+ sym := auxToSym(v.Aux)
+ base := v_0
+ v.reset(OpMIPSMOVWaddr)
+ v.Aux = symToAux(sym)
+ v.AddArg(base)
+ return true
+ }
+}
+func rewriteValueMIPS_OpAtomicAnd8(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (AtomicAnd8 ptr val mem)
+ // cond: !config.BigEndian
+ // result: (LoweredAtomicAnd (AND <typ.UInt32Ptr> (MOVWconst [^3]) ptr) (OR <typ.UInt32> (SLL <typ.UInt32> (ZeroExt8to32 val) (SLLconst <typ.UInt32> [3] (ANDconst <typ.UInt32> [3] ptr))) (NORconst [0] <typ.UInt32> (SLL <typ.UInt32> (MOVWconst [0xff]) (SLLconst <typ.UInt32> [3] (ANDconst <typ.UInt32> [3] ptr))))) mem)
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(!config.BigEndian) {
+ break
+ }
+ v.reset(OpMIPSLoweredAtomicAnd)
+ v0 := b.NewValue0(v.Pos, OpMIPSAND, typ.UInt32Ptr)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(^3)
+ v0.AddArg2(v1, ptr)
+ v2 := b.NewValue0(v.Pos, OpMIPSOR, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpMIPSSLL, typ.UInt32)
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v4.AddArg(val)
+ v5 := b.NewValue0(v.Pos, OpMIPSSLLconst, typ.UInt32)
+ v5.AuxInt = int32ToAuxInt(3)
+ v6 := b.NewValue0(v.Pos, OpMIPSANDconst, typ.UInt32)
+ v6.AuxInt = int32ToAuxInt(3)
+ v6.AddArg(ptr)
+ v5.AddArg(v6)
+ v3.AddArg2(v4, v5)
+ v7 := b.NewValue0(v.Pos, OpMIPSNORconst, typ.UInt32)
+ v7.AuxInt = int32ToAuxInt(0)
+ v8 := b.NewValue0(v.Pos, OpMIPSSLL, typ.UInt32)
+ v9 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v9.AuxInt = int32ToAuxInt(0xff)
+ v8.AddArg2(v9, v5)
+ v7.AddArg(v8)
+ v2.AddArg2(v3, v7)
+ v.AddArg3(v0, v2, mem)
+ return true
+ }
+ // match: (AtomicAnd8 ptr val mem)
+ // cond: config.BigEndian
+ // result: (LoweredAtomicAnd (AND <typ.UInt32Ptr> (MOVWconst [^3]) ptr) (OR <typ.UInt32> (SLL <typ.UInt32> (ZeroExt8to32 val) (SLLconst <typ.UInt32> [3] (ANDconst <typ.UInt32> [3] (XORconst <typ.UInt32> [3] ptr)))) (NORconst [0] <typ.UInt32> (SLL <typ.UInt32> (MOVWconst [0xff]) (SLLconst <typ.UInt32> [3] (ANDconst <typ.UInt32> [3] (XORconst <typ.UInt32> [3] ptr)))))) mem)
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(config.BigEndian) {
+ break
+ }
+ v.reset(OpMIPSLoweredAtomicAnd)
+ v0 := b.NewValue0(v.Pos, OpMIPSAND, typ.UInt32Ptr)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(^3)
+ v0.AddArg2(v1, ptr)
+ v2 := b.NewValue0(v.Pos, OpMIPSOR, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpMIPSSLL, typ.UInt32)
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v4.AddArg(val)
+ v5 := b.NewValue0(v.Pos, OpMIPSSLLconst, typ.UInt32)
+ v5.AuxInt = int32ToAuxInt(3)
+ v6 := b.NewValue0(v.Pos, OpMIPSANDconst, typ.UInt32)
+ v6.AuxInt = int32ToAuxInt(3)
+ v7 := b.NewValue0(v.Pos, OpMIPSXORconst, typ.UInt32)
+ v7.AuxInt = int32ToAuxInt(3)
+ v7.AddArg(ptr)
+ v6.AddArg(v7)
+ v5.AddArg(v6)
+ v3.AddArg2(v4, v5)
+ v8 := b.NewValue0(v.Pos, OpMIPSNORconst, typ.UInt32)
+ v8.AuxInt = int32ToAuxInt(0)
+ v9 := b.NewValue0(v.Pos, OpMIPSSLL, typ.UInt32)
+ v10 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v10.AuxInt = int32ToAuxInt(0xff)
+ v9.AddArg2(v10, v5)
+ v8.AddArg(v9)
+ v2.AddArg2(v3, v8)
+ v.AddArg3(v0, v2, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpAtomicOr8(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (AtomicOr8 ptr val mem)
+ // cond: !config.BigEndian
+ // result: (LoweredAtomicOr (AND <typ.UInt32Ptr> (MOVWconst [^3]) ptr) (SLL <typ.UInt32> (ZeroExt8to32 val) (SLLconst <typ.UInt32> [3] (ANDconst <typ.UInt32> [3] ptr))) mem)
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(!config.BigEndian) {
+ break
+ }
+ v.reset(OpMIPSLoweredAtomicOr)
+ v0 := b.NewValue0(v.Pos, OpMIPSAND, typ.UInt32Ptr)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(^3)
+ v0.AddArg2(v1, ptr)
+ v2 := b.NewValue0(v.Pos, OpMIPSSLL, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v3.AddArg(val)
+ v4 := b.NewValue0(v.Pos, OpMIPSSLLconst, typ.UInt32)
+ v4.AuxInt = int32ToAuxInt(3)
+ v5 := b.NewValue0(v.Pos, OpMIPSANDconst, typ.UInt32)
+ v5.AuxInt = int32ToAuxInt(3)
+ v5.AddArg(ptr)
+ v4.AddArg(v5)
+ v2.AddArg2(v3, v4)
+ v.AddArg3(v0, v2, mem)
+ return true
+ }
+ // match: (AtomicOr8 ptr val mem)
+ // cond: config.BigEndian
+ // result: (LoweredAtomicOr (AND <typ.UInt32Ptr> (MOVWconst [^3]) ptr) (SLL <typ.UInt32> (ZeroExt8to32 val) (SLLconst <typ.UInt32> [3] (ANDconst <typ.UInt32> [3] (XORconst <typ.UInt32> [3] ptr)))) mem)
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(config.BigEndian) {
+ break
+ }
+ v.reset(OpMIPSLoweredAtomicOr)
+ v0 := b.NewValue0(v.Pos, OpMIPSAND, typ.UInt32Ptr)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(^3)
+ v0.AddArg2(v1, ptr)
+ v2 := b.NewValue0(v.Pos, OpMIPSSLL, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v3.AddArg(val)
+ v4 := b.NewValue0(v.Pos, OpMIPSSLLconst, typ.UInt32)
+ v4.AuxInt = int32ToAuxInt(3)
+ v5 := b.NewValue0(v.Pos, OpMIPSANDconst, typ.UInt32)
+ v5.AuxInt = int32ToAuxInt(3)
+ v6 := b.NewValue0(v.Pos, OpMIPSXORconst, typ.UInt32)
+ v6.AuxInt = int32ToAuxInt(3)
+ v6.AddArg(ptr)
+ v5.AddArg(v6)
+ v4.AddArg(v5)
+ v2.AddArg2(v3, v4)
+ v.AddArg3(v0, v2, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpAvg32u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Avg32u <t> x y)
+ // result: (ADD (SRLconst <t> (SUB <t> x y) [1]) y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSADD)
+ v0 := b.NewValue0(v.Pos, OpMIPSSRLconst, t)
+ v0.AuxInt = int32ToAuxInt(1)
+ v1 := b.NewValue0(v.Pos, OpMIPSSUB, t)
+ v1.AddArg2(x, y)
+ v0.AddArg(v1)
+ v.AddArg2(v0, y)
+ return true
+ }
+}
+func rewriteValueMIPS_OpBitLen32(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (BitLen32 <t> x)
+ // result: (SUB (MOVWconst [32]) (CLZ <t> x))
+ for {
+ t := v.Type
+ x := v_0
+ v.reset(OpMIPSSUB)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(32)
+ v1 := b.NewValue0(v.Pos, OpMIPSCLZ, t)
+ v1.AddArg(x)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS_OpCom16(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Com16 x)
+ // result: (NORconst [0] x)
+ for {
+ x := v_0
+ v.reset(OpMIPSNORconst)
+ v.AuxInt = int32ToAuxInt(0)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueMIPS_OpCom32(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Com32 x)
+ // result: (NORconst [0] x)
+ for {
+ x := v_0
+ v.reset(OpMIPSNORconst)
+ v.AuxInt = int32ToAuxInt(0)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueMIPS_OpCom8(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Com8 x)
+ // result: (NORconst [0] x)
+ for {
+ x := v_0
+ v.reset(OpMIPSNORconst)
+ v.AuxInt = int32ToAuxInt(0)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueMIPS_OpConst16(v *Value) bool {
+ // match: (Const16 [val])
+ // result: (MOVWconst [int32(val)])
+ for {
+ val := auxIntToInt16(v.AuxInt)
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(int32(val))
+ return true
+ }
+}
+func rewriteValueMIPS_OpConst32(v *Value) bool {
+ // match: (Const32 [val])
+ // result: (MOVWconst [int32(val)])
+ for {
+ val := auxIntToInt32(v.AuxInt)
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(int32(val))
+ return true
+ }
+}
+func rewriteValueMIPS_OpConst8(v *Value) bool {
+ // match: (Const8 [val])
+ // result: (MOVWconst [int32(val)])
+ for {
+ val := auxIntToInt8(v.AuxInt)
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(int32(val))
+ return true
+ }
+}
+func rewriteValueMIPS_OpConstBool(v *Value) bool {
+ // match: (ConstBool [t])
+ // result: (MOVWconst [b2i32(t)])
+ for {
+ t := auxIntToBool(v.AuxInt)
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(b2i32(t))
+ return true
+ }
+}
+func rewriteValueMIPS_OpConstNil(v *Value) bool {
+ // match: (ConstNil)
+ // result: (MOVWconst [0])
+ for {
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpCtz32(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Ctz32 <t> x)
+ // result: (SUB (MOVWconst [32]) (CLZ <t> (SUBconst <t> [1] (AND <t> x (NEG <t> x)))))
+ for {
+ t := v.Type
+ x := v_0
+ v.reset(OpMIPSSUB)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(32)
+ v1 := b.NewValue0(v.Pos, OpMIPSCLZ, t)
+ v2 := b.NewValue0(v.Pos, OpMIPSSUBconst, t)
+ v2.AuxInt = int32ToAuxInt(1)
+ v3 := b.NewValue0(v.Pos, OpMIPSAND, t)
+ v4 := b.NewValue0(v.Pos, OpMIPSNEG, t)
+ v4.AddArg(x)
+ v3.AddArg2(x, v4)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS_OpDiv16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div16 x y)
+ // result: (Select1 (DIV (SignExt16to32 x) (SignExt16to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpMIPSDIV, types.NewTuple(typ.Int32, typ.Int32))
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpDiv16u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div16u x y)
+ // result: (Select1 (DIVU (ZeroExt16to32 x) (ZeroExt16to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpMIPSDIVU, types.NewTuple(typ.UInt32, typ.UInt32))
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpDiv32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div32 x y)
+ // result: (Select1 (DIV x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpMIPSDIV, types.NewTuple(typ.Int32, typ.Int32))
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpDiv32u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div32u x y)
+ // result: (Select1 (DIVU x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpMIPSDIVU, types.NewTuple(typ.UInt32, typ.UInt32))
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpDiv8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div8 x y)
+ // result: (Select1 (DIV (SignExt8to32 x) (SignExt8to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpMIPSDIV, types.NewTuple(typ.Int32, typ.Int32))
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpDiv8u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div8u x y)
+ // result: (Select1 (DIVU (ZeroExt8to32 x) (ZeroExt8to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpMIPSDIVU, types.NewTuple(typ.UInt32, typ.UInt32))
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpEq16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Eq16 x y)
+ // result: (SGTUconst [1] (XOR (ZeroExt16to32 x) (ZeroExt16to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSSGTUconst)
+ v.AuxInt = int32ToAuxInt(1)
+ v0 := b.NewValue0(v.Pos, OpMIPSXOR, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpEq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Eq32 x y)
+ // result: (SGTUconst [1] (XOR x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSSGTUconst)
+ v.AuxInt = int32ToAuxInt(1)
+ v0 := b.NewValue0(v.Pos, OpMIPSXOR, typ.UInt32)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpEq32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Eq32F x y)
+ // result: (FPFlagTrue (CMPEQF x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSFPFlagTrue)
+ v0 := b.NewValue0(v.Pos, OpMIPSCMPEQF, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpEq64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Eq64F x y)
+ // result: (FPFlagTrue (CMPEQD x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSFPFlagTrue)
+ v0 := b.NewValue0(v.Pos, OpMIPSCMPEQD, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpEq8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Eq8 x y)
+ // result: (SGTUconst [1] (XOR (ZeroExt8to32 x) (ZeroExt8to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSSGTUconst)
+ v.AuxInt = int32ToAuxInt(1)
+ v0 := b.NewValue0(v.Pos, OpMIPSXOR, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpEqB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (EqB x y)
+ // result: (XORconst [1] (XOR <typ.Bool> x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSXORconst)
+ v.AuxInt = int32ToAuxInt(1)
+ v0 := b.NewValue0(v.Pos, OpMIPSXOR, typ.Bool)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpEqPtr(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (EqPtr x y)
+ // result: (SGTUconst [1] (XOR x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSSGTUconst)
+ v.AuxInt = int32ToAuxInt(1)
+ v0 := b.NewValue0(v.Pos, OpMIPSXOR, typ.UInt32)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpHmul32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Hmul32 x y)
+ // result: (Select0 (MULT x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpMIPSMULT, types.NewTuple(typ.Int32, typ.Int32))
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpHmul32u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Hmul32u x y)
+ // result: (Select0 (MULTU x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpMIPSMULTU, types.NewTuple(typ.UInt32, typ.UInt32))
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpIsInBounds(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (IsInBounds idx len)
+ // result: (SGTU len idx)
+ for {
+ idx := v_0
+ len := v_1
+ v.reset(OpMIPSSGTU)
+ v.AddArg2(len, idx)
+ return true
+ }
+}
+func rewriteValueMIPS_OpIsNonNil(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (IsNonNil ptr)
+ // result: (SGTU ptr (MOVWconst [0]))
+ for {
+ ptr := v_0
+ v.reset(OpMIPSSGTU)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg2(ptr, v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpIsSliceInBounds(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (IsSliceInBounds idx len)
+ // result: (XORconst [1] (SGTU idx len))
+ for {
+ idx := v_0
+ len := v_1
+ v.reset(OpMIPSXORconst)
+ v.AuxInt = int32ToAuxInt(1)
+ v0 := b.NewValue0(v.Pos, OpMIPSSGTU, typ.Bool)
+ v0.AddArg2(idx, len)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpLeq16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq16 x y)
+ // result: (XORconst [1] (SGT (SignExt16to32 x) (SignExt16to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSXORconst)
+ v.AuxInt = int32ToAuxInt(1)
+ v0 := b.NewValue0(v.Pos, OpMIPSSGT, typ.Bool)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpLeq16U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq16U x y)
+ // result: (XORconst [1] (SGTU (ZeroExt16to32 x) (ZeroExt16to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSXORconst)
+ v.AuxInt = int32ToAuxInt(1)
+ v0 := b.NewValue0(v.Pos, OpMIPSSGTU, typ.Bool)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpLeq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq32 x y)
+ // result: (XORconst [1] (SGT x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSXORconst)
+ v.AuxInt = int32ToAuxInt(1)
+ v0 := b.NewValue0(v.Pos, OpMIPSSGT, typ.Bool)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpLeq32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq32F x y)
+ // result: (FPFlagTrue (CMPGEF y x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSFPFlagTrue)
+ v0 := b.NewValue0(v.Pos, OpMIPSCMPGEF, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpLeq32U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq32U x y)
+ // result: (XORconst [1] (SGTU x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSXORconst)
+ v.AuxInt = int32ToAuxInt(1)
+ v0 := b.NewValue0(v.Pos, OpMIPSSGTU, typ.Bool)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpLeq64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq64F x y)
+ // result: (FPFlagTrue (CMPGED y x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSFPFlagTrue)
+ v0 := b.NewValue0(v.Pos, OpMIPSCMPGED, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpLeq8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq8 x y)
+ // result: (XORconst [1] (SGT (SignExt8to32 x) (SignExt8to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSXORconst)
+ v.AuxInt = int32ToAuxInt(1)
+ v0 := b.NewValue0(v.Pos, OpMIPSSGT, typ.Bool)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpLeq8U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq8U x y)
+ // result: (XORconst [1] (SGTU (ZeroExt8to32 x) (ZeroExt8to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSXORconst)
+ v.AuxInt = int32ToAuxInt(1)
+ v0 := b.NewValue0(v.Pos, OpMIPSSGTU, typ.Bool)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpLess16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less16 x y)
+ // result: (SGT (SignExt16to32 y) (SignExt16to32 x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSSGT)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v1.AddArg(x)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS_OpLess16U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less16U x y)
+ // result: (SGTU (ZeroExt16to32 y) (ZeroExt16to32 x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSSGTU)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(x)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS_OpLess32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Less32 x y)
+ // result: (SGT y x)
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSSGT)
+ v.AddArg2(y, x)
+ return true
+ }
+}
+func rewriteValueMIPS_OpLess32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less32F x y)
+ // result: (FPFlagTrue (CMPGTF y x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSFPFlagTrue)
+ v0 := b.NewValue0(v.Pos, OpMIPSCMPGTF, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpLess32U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Less32U x y)
+ // result: (SGTU y x)
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSSGTU)
+ v.AddArg2(y, x)
+ return true
+ }
+}
+func rewriteValueMIPS_OpLess64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less64F x y)
+ // result: (FPFlagTrue (CMPGTD y x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSFPFlagTrue)
+ v0 := b.NewValue0(v.Pos, OpMIPSCMPGTD, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpLess8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less8 x y)
+ // result: (SGT (SignExt8to32 y) (SignExt8to32 x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSSGT)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v1.AddArg(x)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS_OpLess8U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less8U x y)
+ // result: (SGTU (ZeroExt8to32 y) (ZeroExt8to32 x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSSGTU)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(x)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS_OpLoad(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Load <t> ptr mem)
+ // cond: t.IsBoolean()
+ // result: (MOVBUload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(t.IsBoolean()) {
+ break
+ }
+ v.reset(OpMIPSMOVBUload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is8BitInt(t) && isSigned(t))
+ // result: (MOVBload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is8BitInt(t) && isSigned(t)) {
+ break
+ }
+ v.reset(OpMIPSMOVBload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is8BitInt(t) && !isSigned(t))
+ // result: (MOVBUload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is8BitInt(t) && !isSigned(t)) {
+ break
+ }
+ v.reset(OpMIPSMOVBUload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is16BitInt(t) && isSigned(t))
+ // result: (MOVHload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is16BitInt(t) && isSigned(t)) {
+ break
+ }
+ v.reset(OpMIPSMOVHload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is16BitInt(t) && !isSigned(t))
+ // result: (MOVHUload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is16BitInt(t) && !isSigned(t)) {
+ break
+ }
+ v.reset(OpMIPSMOVHUload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is32BitInt(t) || isPtr(t))
+ // result: (MOVWload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is32BitInt(t) || isPtr(t)) {
+ break
+ }
+ v.reset(OpMIPSMOVWload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is32BitFloat(t)
+ // result: (MOVFload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is32BitFloat(t)) {
+ break
+ }
+ v.reset(OpMIPSMOVFload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is64BitFloat(t)
+ // result: (MOVDload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is64BitFloat(t)) {
+ break
+ }
+ v.reset(OpMIPSMOVDload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpLocalAddr(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (LocalAddr {sym} base _)
+ // result: (MOVWaddr {sym} base)
+ for {
+ sym := auxToSym(v.Aux)
+ base := v_0
+ v.reset(OpMIPSMOVWaddr)
+ v.Aux = symToAux(sym)
+ v.AddArg(base)
+ return true
+ }
+}
+func rewriteValueMIPS_OpLsh16x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh16x16 <t> x y)
+ // result: (CMOVZ (SLL <t> x (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSCMOVZ)
+ v0 := b.NewValue0(v.Pos, OpMIPSSLL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v2.AuxInt = int32ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
+ v3.AuxInt = int32ToAuxInt(32)
+ v3.AddArg(v1)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValueMIPS_OpLsh16x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh16x32 <t> x y)
+ // result: (CMOVZ (SLL <t> x y) (MOVWconst [0]) (SGTUconst [32] y))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSCMOVZ)
+ v0 := b.NewValue0(v.Pos, OpMIPSSLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
+ v2.AuxInt = int32ToAuxInt(32)
+ v2.AddArg(y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueMIPS_OpLsh16x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Lsh16x64 x (Const64 [c]))
+ // cond: uint32(c) < 16
+ // result: (SLLconst x [int32(c)])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint32(c) < 16) {
+ break
+ }
+ v.reset(OpMIPSSLLconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (Lsh16x64 _ (Const64 [c]))
+ // cond: uint32(c) >= 16
+ // result: (MOVWconst [0])
+ for {
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint32(c) >= 16) {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpLsh16x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh16x8 <t> x y)
+ // result: (CMOVZ (SLL <t> x (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSCMOVZ)
+ v0 := b.NewValue0(v.Pos, OpMIPSSLL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v2.AuxInt = int32ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
+ v3.AuxInt = int32ToAuxInt(32)
+ v3.AddArg(v1)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValueMIPS_OpLsh32x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh32x16 <t> x y)
+ // result: (CMOVZ (SLL <t> x (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSCMOVZ)
+ v0 := b.NewValue0(v.Pos, OpMIPSSLL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v2.AuxInt = int32ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
+ v3.AuxInt = int32ToAuxInt(32)
+ v3.AddArg(v1)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValueMIPS_OpLsh32x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh32x32 <t> x y)
+ // result: (CMOVZ (SLL <t> x y) (MOVWconst [0]) (SGTUconst [32] y))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSCMOVZ)
+ v0 := b.NewValue0(v.Pos, OpMIPSSLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
+ v2.AuxInt = int32ToAuxInt(32)
+ v2.AddArg(y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueMIPS_OpLsh32x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Lsh32x64 x (Const64 [c]))
+ // cond: uint32(c) < 32
+ // result: (SLLconst x [int32(c)])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint32(c) < 32) {
+ break
+ }
+ v.reset(OpMIPSSLLconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (Lsh32x64 _ (Const64 [c]))
+ // cond: uint32(c) >= 32
+ // result: (MOVWconst [0])
+ for {
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint32(c) >= 32) {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpLsh32x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh32x8 <t> x y)
+ // result: (CMOVZ (SLL <t> x (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSCMOVZ)
+ v0 := b.NewValue0(v.Pos, OpMIPSSLL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v2.AuxInt = int32ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
+ v3.AuxInt = int32ToAuxInt(32)
+ v3.AddArg(v1)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValueMIPS_OpLsh8x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh8x16 <t> x y)
+ // result: (CMOVZ (SLL <t> x (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSCMOVZ)
+ v0 := b.NewValue0(v.Pos, OpMIPSSLL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v2.AuxInt = int32ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
+ v3.AuxInt = int32ToAuxInt(32)
+ v3.AddArg(v1)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValueMIPS_OpLsh8x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh8x32 <t> x y)
+ // result: (CMOVZ (SLL <t> x y) (MOVWconst [0]) (SGTUconst [32] y))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSCMOVZ)
+ v0 := b.NewValue0(v.Pos, OpMIPSSLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
+ v2.AuxInt = int32ToAuxInt(32)
+ v2.AddArg(y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueMIPS_OpLsh8x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Lsh8x64 x (Const64 [c]))
+ // cond: uint32(c) < 8
+ // result: (SLLconst x [int32(c)])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint32(c) < 8) {
+ break
+ }
+ v.reset(OpMIPSSLLconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (Lsh8x64 _ (Const64 [c]))
+ // cond: uint32(c) >= 8
+ // result: (MOVWconst [0])
+ for {
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint32(c) >= 8) {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpLsh8x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh8x8 <t> x y)
+ // result: (CMOVZ (SLL <t> x (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSCMOVZ)
+ v0 := b.NewValue0(v.Pos, OpMIPSSLL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v2.AuxInt = int32ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
+ v3.AuxInt = int32ToAuxInt(32)
+ v3.AddArg(v1)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValueMIPS_OpMIPSADD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ADD x (MOVWconst [c]))
+ // result: (ADDconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpMIPSMOVWconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpMIPSADDconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ADD x (NEG y))
+ // result: (SUB x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpMIPSNEG {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(OpMIPSSUB)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSADDconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ADDconst [off1] (MOVWaddr [off2] {sym} ptr))
+ // result: (MOVWaddr [off1+off2] {sym} ptr)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSMOVWaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ v.reset(OpMIPSMOVWaddr)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg(ptr)
+ return true
+ }
+ // match: (ADDconst [0] x)
+ // result: x
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (ADDconst [c] (MOVWconst [d]))
+ // result: (MOVWconst [int32(c+d)])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(int32(c + d))
+ return true
+ }
+ // match: (ADDconst [c] (ADDconst [d] x))
+ // result: (ADDconst [c+d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSADDconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpMIPSADDconst)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDconst [c] (SUBconst [d] x))
+ // result: (ADDconst [c-d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSSUBconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpMIPSADDconst)
+ v.AuxInt = int32ToAuxInt(c - d)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSAND(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (AND x (MOVWconst [c]))
+ // result: (ANDconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpMIPSMOVWconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpMIPSANDconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (AND x x)
+ // result: x
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (AND (SGTUconst [1] x) (SGTUconst [1] y))
+ // result: (SGTUconst [1] (OR <x.Type> x y))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpMIPSSGTUconst || auxIntToInt32(v_0.AuxInt) != 1 {
+ continue
+ }
+ x := v_0.Args[0]
+ if v_1.Op != OpMIPSSGTUconst || auxIntToInt32(v_1.AuxInt) != 1 {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(OpMIPSSGTUconst)
+ v.AuxInt = int32ToAuxInt(1)
+ v0 := b.NewValue0(v.Pos, OpMIPSOR, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSANDconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ANDconst [0] _)
+ // result: (MOVWconst [0])
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (ANDconst [-1] x)
+ // result: x
+ for {
+ if auxIntToInt32(v.AuxInt) != -1 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (ANDconst [c] (MOVWconst [d]))
+ // result: (MOVWconst [c&d])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(c & d)
+ return true
+ }
+ // match: (ANDconst [c] (ANDconst [d] x))
+ // result: (ANDconst [c&d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSANDconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpMIPSANDconst)
+ v.AuxInt = int32ToAuxInt(c & d)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSCMOVZ(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMOVZ _ f (MOVWconst [0]))
+ // result: f
+ for {
+ f := v_1
+ if v_2.Op != OpMIPSMOVWconst || auxIntToInt32(v_2.AuxInt) != 0 {
+ break
+ }
+ v.copyOf(f)
+ return true
+ }
+ // match: (CMOVZ a _ (MOVWconst [c]))
+ // cond: c!=0
+ // result: a
+ for {
+ a := v_0
+ if v_2.Op != OpMIPSMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_2.AuxInt)
+ if !(c != 0) {
+ break
+ }
+ v.copyOf(a)
+ return true
+ }
+ // match: (CMOVZ a (MOVWconst [0]) c)
+ // result: (CMOVZzero a c)
+ for {
+ a := v_0
+ if v_1.Op != OpMIPSMOVWconst || auxIntToInt32(v_1.AuxInt) != 0 {
+ break
+ }
+ c := v_2
+ v.reset(OpMIPSCMOVZzero)
+ v.AddArg2(a, c)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSCMOVZzero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CMOVZzero _ (MOVWconst [0]))
+ // result: (MOVWconst [0])
+ for {
+ if v_1.Op != OpMIPSMOVWconst || auxIntToInt32(v_1.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (CMOVZzero a (MOVWconst [c]))
+ // cond: c!=0
+ // result: a
+ for {
+ a := v_0
+ if v_1.Op != OpMIPSMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(c != 0) {
+ break
+ }
+ v.copyOf(a)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSLoweredAtomicAdd(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LoweredAtomicAdd ptr (MOVWconst [c]) mem)
+ // cond: is16Bit(int64(c))
+ // result: (LoweredAtomicAddconst [c] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpMIPSMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ mem := v_2
+ if !(is16Bit(int64(c))) {
+ break
+ }
+ v.reset(OpMIPSLoweredAtomicAddconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSLoweredAtomicStore32(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LoweredAtomicStore32 ptr (MOVWconst [0]) mem)
+ // result: (LoweredAtomicStorezero ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpMIPSMOVWconst || auxIntToInt32(v_1.AuxInt) != 0 {
+ break
+ }
+ mem := v_2
+ v.reset(OpMIPSLoweredAtomicStorezero)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSMOVBUload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVBUload [off1] {sym} x:(ADDconst [off2] ptr) mem)
+ // cond: (is16Bit(int64(off1+off2)) || x.Uses == 1)
+ // result: (MOVBUload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if x.Op != OpMIPSADDconst {
+ break
+ }
+ off2 := auxIntToInt32(x.AuxInt)
+ ptr := x.Args[0]
+ mem := v_1
+ if !(is16Bit(int64(off1+off2)) || x.Uses == 1) {
+ break
+ }
+ v.reset(OpMIPSMOVBUload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBUload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVBUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpMIPSMOVWaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpMIPSMOVBUload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBUload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: (MOVBUreg x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPSMOVBstore {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpMIPSMOVBUreg)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSMOVBUreg(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MOVBUreg x:(MOVBUload _ _))
+ // result: (MOVWreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPSMOVBUload {
+ break
+ }
+ v.reset(OpMIPSMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBUreg x:(MOVBUreg _))
+ // result: (MOVWreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPSMOVBUreg {
+ break
+ }
+ v.reset(OpMIPSMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBUreg <t> x:(MOVBload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVBUload <t> [off] {sym} ptr mem)
+ for {
+ t := v.Type
+ x := v_0
+ if x.Op != OpMIPSMOVBload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpMIPSMOVBUload, t)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBUreg (ANDconst [c] x))
+ // result: (ANDconst [c&0xff] x)
+ for {
+ if v_0.Op != OpMIPSANDconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpMIPSANDconst)
+ v.AuxInt = int32ToAuxInt(c & 0xff)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBUreg (MOVWconst [c]))
+ // result: (MOVWconst [int32(uint8(c))])
+ for {
+ if v_0.Op != OpMIPSMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(int32(uint8(c)))
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSMOVBload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVBload [off1] {sym} x:(ADDconst [off2] ptr) mem)
+ // cond: (is16Bit(int64(off1+off2)) || x.Uses == 1)
+ // result: (MOVBload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if x.Op != OpMIPSADDconst {
+ break
+ }
+ off2 := auxIntToInt32(x.AuxInt)
+ ptr := x.Args[0]
+ mem := v_1
+ if !(is16Bit(int64(off1+off2)) || x.Uses == 1) {
+ break
+ }
+ v.reset(OpMIPSMOVBload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpMIPSMOVWaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpMIPSMOVBload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBload [off] {sym} ptr (MOVBstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: (MOVBreg x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPSMOVBstore {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpMIPSMOVBreg)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSMOVBreg(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MOVBreg x:(MOVBload _ _))
+ // result: (MOVWreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPSMOVBload {
+ break
+ }
+ v.reset(OpMIPSMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBreg x:(MOVBreg _))
+ // result: (MOVWreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPSMOVBreg {
+ break
+ }
+ v.reset(OpMIPSMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBreg <t> x:(MOVBUload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVBload <t> [off] {sym} ptr mem)
+ for {
+ t := v.Type
+ x := v_0
+ if x.Op != OpMIPSMOVBUload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpMIPSMOVBload, t)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBreg (ANDconst [c] x))
+ // cond: c & 0x80 == 0
+ // result: (ANDconst [c&0x7f] x)
+ for {
+ if v_0.Op != OpMIPSANDconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(c&0x80 == 0) {
+ break
+ }
+ v.reset(OpMIPSANDconst)
+ v.AuxInt = int32ToAuxInt(c & 0x7f)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBreg (MOVWconst [c]))
+ // result: (MOVWconst [int32(int8(c))])
+ for {
+ if v_0.Op != OpMIPSMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(int32(int8(c)))
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSMOVBstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVBstore [off1] {sym} x:(ADDconst [off2] ptr) val mem)
+ // cond: (is16Bit(int64(off1+off2)) || x.Uses == 1)
+ // result: (MOVBstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if x.Op != OpMIPSADDconst {
+ break
+ }
+ off2 := auxIntToInt32(x.AuxInt)
+ ptr := x.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is16Bit(int64(off1+off2)) || x.Uses == 1) {
+ break
+ }
+ v.reset(OpMIPSMOVBstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVBstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpMIPSMOVWaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpMIPSMOVBstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVWconst [0]) mem)
+ // result: (MOVBstorezero [off] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPSMOVWconst || auxIntToInt32(v_1.AuxInt) != 0 {
+ break
+ }
+ mem := v_2
+ v.reset(OpMIPSMOVBstorezero)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVBreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPSMOVBreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpMIPSMOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVBUreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPSMOVBUreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpMIPSMOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVHreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPSMOVHreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpMIPSMOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVHUreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPSMOVHUreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpMIPSMOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVWreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPSMOVWreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpMIPSMOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSMOVBstorezero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVBstorezero [off1] {sym} x:(ADDconst [off2] ptr) mem)
+ // cond: (is16Bit(int64(off1+off2)) || x.Uses == 1)
+ // result: (MOVBstorezero [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if x.Op != OpMIPSADDconst {
+ break
+ }
+ off2 := auxIntToInt32(x.AuxInt)
+ ptr := x.Args[0]
+ mem := v_1
+ if !(is16Bit(int64(off1+off2)) || x.Uses == 1) {
+ break
+ }
+ v.reset(OpMIPSMOVBstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBstorezero [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpMIPSMOVWaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpMIPSMOVBstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSMOVDload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVDload [off1] {sym} x:(ADDconst [off2] ptr) mem)
+ // cond: (is16Bit(int64(off1+off2)) || x.Uses == 1)
+ // result: (MOVDload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if x.Op != OpMIPSADDconst {
+ break
+ }
+ off2 := auxIntToInt32(x.AuxInt)
+ ptr := x.Args[0]
+ mem := v_1
+ if !(is16Bit(int64(off1+off2)) || x.Uses == 1) {
+ break
+ }
+ v.reset(OpMIPSMOVDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVDload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpMIPSMOVWaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpMIPSMOVDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVDload [off] {sym} ptr (MOVDstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: x
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPSMOVDstore {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSMOVDstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVDstore [off1] {sym} x:(ADDconst [off2] ptr) val mem)
+ // cond: (is16Bit(int64(off1+off2)) || x.Uses == 1)
+ // result: (MOVDstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if x.Op != OpMIPSADDconst {
+ break
+ }
+ off2 := auxIntToInt32(x.AuxInt)
+ ptr := x.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is16Bit(int64(off1+off2)) || x.Uses == 1) {
+ break
+ }
+ v.reset(OpMIPSMOVDstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVDstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpMIPSMOVWaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpMIPSMOVDstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSMOVFload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVFload [off1] {sym} x:(ADDconst [off2] ptr) mem)
+ // cond: (is16Bit(int64(off1+off2)) || x.Uses == 1)
+ // result: (MOVFload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if x.Op != OpMIPSADDconst {
+ break
+ }
+ off2 := auxIntToInt32(x.AuxInt)
+ ptr := x.Args[0]
+ mem := v_1
+ if !(is16Bit(int64(off1+off2)) || x.Uses == 1) {
+ break
+ }
+ v.reset(OpMIPSMOVFload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVFload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVFload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpMIPSMOVWaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpMIPSMOVFload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVFload [off] {sym} ptr (MOVFstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: x
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPSMOVFstore {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSMOVFstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVFstore [off1] {sym} x:(ADDconst [off2] ptr) val mem)
+ // cond: (is16Bit(int64(off1+off2)) || x.Uses == 1)
+ // result: (MOVFstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if x.Op != OpMIPSADDconst {
+ break
+ }
+ off2 := auxIntToInt32(x.AuxInt)
+ ptr := x.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is16Bit(int64(off1+off2)) || x.Uses == 1) {
+ break
+ }
+ v.reset(OpMIPSMOVFstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVFstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVFstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpMIPSMOVWaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpMIPSMOVFstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSMOVHUload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHUload [off1] {sym} x:(ADDconst [off2] ptr) mem)
+ // cond: (is16Bit(int64(off1+off2)) || x.Uses == 1)
+ // result: (MOVHUload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if x.Op != OpMIPSADDconst {
+ break
+ }
+ off2 := auxIntToInt32(x.AuxInt)
+ ptr := x.Args[0]
+ mem := v_1
+ if !(is16Bit(int64(off1+off2)) || x.Uses == 1) {
+ break
+ }
+ v.reset(OpMIPSMOVHUload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHUload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVHUload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpMIPSMOVWaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpMIPSMOVHUload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHUload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: (MOVHUreg x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPSMOVHstore {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpMIPSMOVHUreg)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSMOVHUreg(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MOVHUreg x:(MOVBUload _ _))
+ // result: (MOVWreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPSMOVBUload {
+ break
+ }
+ v.reset(OpMIPSMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUreg x:(MOVHUload _ _))
+ // result: (MOVWreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPSMOVHUload {
+ break
+ }
+ v.reset(OpMIPSMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUreg x:(MOVBUreg _))
+ // result: (MOVWreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPSMOVBUreg {
+ break
+ }
+ v.reset(OpMIPSMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUreg x:(MOVHUreg _))
+ // result: (MOVWreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPSMOVHUreg {
+ break
+ }
+ v.reset(OpMIPSMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUreg <t> x:(MOVHload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVHUload <t> [off] {sym} ptr mem)
+ for {
+ t := v.Type
+ x := v_0
+ if x.Op != OpMIPSMOVHload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpMIPSMOVHUload, t)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHUreg (ANDconst [c] x))
+ // result: (ANDconst [c&0xffff] x)
+ for {
+ if v_0.Op != OpMIPSANDconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpMIPSANDconst)
+ v.AuxInt = int32ToAuxInt(c & 0xffff)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUreg (MOVWconst [c]))
+ // result: (MOVWconst [int32(uint16(c))])
+ for {
+ if v_0.Op != OpMIPSMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(int32(uint16(c)))
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSMOVHload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHload [off1] {sym} x:(ADDconst [off2] ptr) mem)
+ // cond: (is16Bit(int64(off1+off2)) || x.Uses == 1)
+ // result: (MOVHload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if x.Op != OpMIPSADDconst {
+ break
+ }
+ off2 := auxIntToInt32(x.AuxInt)
+ ptr := x.Args[0]
+ mem := v_1
+ if !(is16Bit(int64(off1+off2)) || x.Uses == 1) {
+ break
+ }
+ v.reset(OpMIPSMOVHload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpMIPSMOVWaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpMIPSMOVHload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHload [off] {sym} ptr (MOVHstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: (MOVHreg x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPSMOVHstore {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.reset(OpMIPSMOVHreg)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSMOVHreg(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MOVHreg x:(MOVBload _ _))
+ // result: (MOVWreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPSMOVBload {
+ break
+ }
+ v.reset(OpMIPSMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVBUload _ _))
+ // result: (MOVWreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPSMOVBUload {
+ break
+ }
+ v.reset(OpMIPSMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVHload _ _))
+ // result: (MOVWreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPSMOVHload {
+ break
+ }
+ v.reset(OpMIPSMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVBreg _))
+ // result: (MOVWreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPSMOVBreg {
+ break
+ }
+ v.reset(OpMIPSMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVBUreg _))
+ // result: (MOVWreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPSMOVBUreg {
+ break
+ }
+ v.reset(OpMIPSMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVHreg _))
+ // result: (MOVWreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPSMOVHreg {
+ break
+ }
+ v.reset(OpMIPSMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg <t> x:(MOVHUload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVHload <t> [off] {sym} ptr mem)
+ for {
+ t := v.Type
+ x := v_0
+ if x.Op != OpMIPSMOVHUload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpMIPSMOVHload, t)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHreg (ANDconst [c] x))
+ // cond: c & 0x8000 == 0
+ // result: (ANDconst [c&0x7fff] x)
+ for {
+ if v_0.Op != OpMIPSANDconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(c&0x8000 == 0) {
+ break
+ }
+ v.reset(OpMIPSANDconst)
+ v.AuxInt = int32ToAuxInt(c & 0x7fff)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg (MOVWconst [c]))
+ // result: (MOVWconst [int32(int16(c))])
+ for {
+ if v_0.Op != OpMIPSMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(int32(int16(c)))
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSMOVHstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHstore [off1] {sym} x:(ADDconst [off2] ptr) val mem)
+ // cond: (is16Bit(int64(off1+off2)) || x.Uses == 1)
+ // result: (MOVHstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if x.Op != OpMIPSADDconst {
+ break
+ }
+ off2 := auxIntToInt32(x.AuxInt)
+ ptr := x.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is16Bit(int64(off1+off2)) || x.Uses == 1) {
+ break
+ }
+ v.reset(OpMIPSMOVHstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVHstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpMIPSMOVWaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpMIPSMOVHstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (MOVWconst [0]) mem)
+ // result: (MOVHstorezero [off] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPSMOVWconst || auxIntToInt32(v_1.AuxInt) != 0 {
+ break
+ }
+ mem := v_2
+ v.reset(OpMIPSMOVHstorezero)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (MOVHreg x) mem)
+ // result: (MOVHstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPSMOVHreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpMIPSMOVHstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (MOVHUreg x) mem)
+ // result: (MOVHstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPSMOVHUreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpMIPSMOVHstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (MOVWreg x) mem)
+ // result: (MOVHstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPSMOVWreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpMIPSMOVHstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSMOVHstorezero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHstorezero [off1] {sym} x:(ADDconst [off2] ptr) mem)
+ // cond: (is16Bit(int64(off1+off2)) || x.Uses == 1)
+ // result: (MOVHstorezero [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if x.Op != OpMIPSADDconst {
+ break
+ }
+ off2 := auxIntToInt32(x.AuxInt)
+ ptr := x.Args[0]
+ mem := v_1
+ if !(is16Bit(int64(off1+off2)) || x.Uses == 1) {
+ break
+ }
+ v.reset(OpMIPSMOVHstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHstorezero [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpMIPSMOVWaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpMIPSMOVHstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSMOVWload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWload [off1] {sym} x:(ADDconst [off2] ptr) mem)
+ // cond: (is16Bit(int64(off1+off2)) || x.Uses == 1)
+ // result: (MOVWload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if x.Op != OpMIPSADDconst {
+ break
+ }
+ off2 := auxIntToInt32(x.AuxInt)
+ ptr := x.Args[0]
+ mem := v_1
+ if !(is16Bit(int64(off1+off2)) || x.Uses == 1) {
+ break
+ }
+ v.reset(OpMIPSMOVWload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWload [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpMIPSMOVWaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpMIPSMOVWload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWload [off] {sym} ptr (MOVWstore [off2] {sym2} ptr2 x _))
+ // cond: sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)
+ // result: x
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPSMOVWstore {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(sym == sym2 && off == off2 && isSamePtr(ptr, ptr2)) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSMOVWnop(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MOVWnop (MOVWconst [c]))
+ // result: (MOVWconst [c])
+ for {
+ if v_0.Op != OpMIPSMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(c)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSMOVWreg(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MOVWreg x)
+ // cond: x.Uses == 1
+ // result: (MOVWnop x)
+ for {
+ x := v_0
+ if !(x.Uses == 1) {
+ break
+ }
+ v.reset(OpMIPSMOVWnop)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg (MOVWconst [c]))
+ // result: (MOVWconst [c])
+ for {
+ if v_0.Op != OpMIPSMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(c)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSMOVWstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWstore [off1] {sym} x:(ADDconst [off2] ptr) val mem)
+ // cond: (is16Bit(int64(off1+off2)) || x.Uses == 1)
+ // result: (MOVWstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if x.Op != OpMIPSADDconst {
+ break
+ }
+ off2 := auxIntToInt32(x.AuxInt)
+ ptr := x.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is16Bit(int64(off1+off2)) || x.Uses == 1) {
+ break
+ }
+ v.reset(OpMIPSMOVWstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVWstore [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpMIPSMOVWaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpMIPSMOVWstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVWstore [off] {sym} ptr (MOVWconst [0]) mem)
+ // result: (MOVWstorezero [off] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPSMOVWconst || auxIntToInt32(v_1.AuxInt) != 0 {
+ break
+ }
+ mem := v_2
+ v.reset(OpMIPSMOVWstorezero)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWstore [off] {sym} ptr (MOVWreg x) mem)
+ // result: (MOVWstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPSMOVWreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpMIPSMOVWstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSMOVWstorezero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWstorezero [off1] {sym} x:(ADDconst [off2] ptr) mem)
+ // cond: (is16Bit(int64(off1+off2)) || x.Uses == 1)
+ // result: (MOVWstorezero [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if x.Op != OpMIPSADDconst {
+ break
+ }
+ off2 := auxIntToInt32(x.AuxInt)
+ ptr := x.Args[0]
+ mem := v_1
+ if !(is16Bit(int64(off1+off2)) || x.Uses == 1) {
+ break
+ }
+ v.reset(OpMIPSMOVWstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWstorezero [off1] {sym1} (MOVWaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2)
+ // result: (MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpMIPSMOVWaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpMIPSMOVWstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSMUL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MUL (MOVWconst [0]) _ )
+ // result: (MOVWconst [0])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpMIPSMOVWconst || auxIntToInt32(v_0.AuxInt) != 0 {
+ continue
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ break
+ }
+ // match: (MUL (MOVWconst [1]) x )
+ // result: x
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpMIPSMOVWconst || auxIntToInt32(v_0.AuxInt) != 1 {
+ continue
+ }
+ x := v_1
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (MUL (MOVWconst [-1]) x )
+ // result: (NEG x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpMIPSMOVWconst || auxIntToInt32(v_0.AuxInt) != -1 {
+ continue
+ }
+ x := v_1
+ v.reset(OpMIPSNEG)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (MUL (MOVWconst [c]) x )
+ // cond: isPowerOfTwo64(int64(uint32(c)))
+ // result: (SLLconst [int32(log2uint32(int64(c)))] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpMIPSMOVWconst {
+ continue
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ if !(isPowerOfTwo64(int64(uint32(c)))) {
+ continue
+ }
+ v.reset(OpMIPSSLLconst)
+ v.AuxInt = int32ToAuxInt(int32(log2uint32(int64(c))))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (MUL (MOVWconst [c]) (MOVWconst [d]))
+ // result: (MOVWconst [c*d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpMIPSMOVWconst {
+ continue
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpMIPSMOVWconst {
+ continue
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(c * d)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSNEG(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (NEG (MOVWconst [c]))
+ // result: (MOVWconst [-c])
+ for {
+ if v_0.Op != OpMIPSMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(-c)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSNOR(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (NOR x (MOVWconst [c]))
+ // result: (NORconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpMIPSMOVWconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpMIPSNORconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSNORconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (NORconst [c] (MOVWconst [d]))
+ // result: (MOVWconst [^(c|d)])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(^(c | d))
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSOR(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (OR x (MOVWconst [c]))
+ // result: (ORconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpMIPSMOVWconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpMIPSORconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (OR x x)
+ // result: x
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (OR (SGTUzero x) (SGTUzero y))
+ // result: (SGTUzero (OR <x.Type> x y))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpMIPSSGTUzero {
+ continue
+ }
+ x := v_0.Args[0]
+ if v_1.Op != OpMIPSSGTUzero {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(OpMIPSSGTUzero)
+ v0 := b.NewValue0(v.Pos, OpMIPSOR, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSORconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ORconst [0] x)
+ // result: x
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (ORconst [-1] _)
+ // result: (MOVWconst [-1])
+ for {
+ if auxIntToInt32(v.AuxInt) != -1 {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(-1)
+ return true
+ }
+ // match: (ORconst [c] (MOVWconst [d]))
+ // result: (MOVWconst [c|d])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(c | d)
+ return true
+ }
+ // match: (ORconst [c] (ORconst [d] x))
+ // result: (ORconst [c|d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSORconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpMIPSORconst)
+ v.AuxInt = int32ToAuxInt(c | d)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSSGT(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SGT (MOVWconst [c]) x)
+ // result: (SGTconst [c] x)
+ for {
+ if v_0.Op != OpMIPSMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpMIPSSGTconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SGT x (MOVWconst [0]))
+ // result: (SGTzero x)
+ for {
+ x := v_0
+ if v_1.Op != OpMIPSMOVWconst || auxIntToInt32(v_1.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpMIPSSGTzero)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSSGTU(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SGTU (MOVWconst [c]) x)
+ // result: (SGTUconst [c] x)
+ for {
+ if v_0.Op != OpMIPSMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ x := v_1
+ v.reset(OpMIPSSGTUconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SGTU x (MOVWconst [0]))
+ // result: (SGTUzero x)
+ for {
+ x := v_0
+ if v_1.Op != OpMIPSMOVWconst || auxIntToInt32(v_1.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpMIPSSGTUzero)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSSGTUconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SGTUconst [c] (MOVWconst [d]))
+ // cond: uint32(c) > uint32(d)
+ // result: (MOVWconst [1])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ if !(uint32(c) > uint32(d)) {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SGTUconst [c] (MOVWconst [d]))
+ // cond: uint32(c) <= uint32(d)
+ // result: (MOVWconst [0])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ if !(uint32(c) <= uint32(d)) {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SGTUconst [c] (MOVBUreg _))
+ // cond: 0xff < uint32(c)
+ // result: (MOVWconst [1])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSMOVBUreg || !(0xff < uint32(c)) {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SGTUconst [c] (MOVHUreg _))
+ // cond: 0xffff < uint32(c)
+ // result: (MOVWconst [1])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSMOVHUreg || !(0xffff < uint32(c)) {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SGTUconst [c] (ANDconst [m] _))
+ // cond: uint32(m) < uint32(c)
+ // result: (MOVWconst [1])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSANDconst {
+ break
+ }
+ m := auxIntToInt32(v_0.AuxInt)
+ if !(uint32(m) < uint32(c)) {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SGTUconst [c] (SRLconst _ [d]))
+ // cond: uint32(d) <= 31 && 0xffffffff>>uint32(d) < uint32(c)
+ // result: (MOVWconst [1])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSSRLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ if !(uint32(d) <= 31 && 0xffffffff>>uint32(d) < uint32(c)) {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSSGTUzero(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SGTUzero (MOVWconst [d]))
+ // cond: d != 0
+ // result: (MOVWconst [1])
+ for {
+ if v_0.Op != OpMIPSMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SGTUzero (MOVWconst [d]))
+ // cond: d == 0
+ // result: (MOVWconst [0])
+ for {
+ if v_0.Op != OpMIPSMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ if !(d == 0) {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSSGTconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SGTconst [c] (MOVWconst [d]))
+ // cond: c > d
+ // result: (MOVWconst [1])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ if !(c > d) {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SGTconst [c] (MOVWconst [d]))
+ // cond: c <= d
+ // result: (MOVWconst [0])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ if !(c <= d) {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SGTconst [c] (MOVBreg _))
+ // cond: 0x7f < c
+ // result: (MOVWconst [1])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSMOVBreg || !(0x7f < c) {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SGTconst [c] (MOVBreg _))
+ // cond: c <= -0x80
+ // result: (MOVWconst [0])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSMOVBreg || !(c <= -0x80) {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SGTconst [c] (MOVBUreg _))
+ // cond: 0xff < c
+ // result: (MOVWconst [1])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSMOVBUreg || !(0xff < c) {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SGTconst [c] (MOVBUreg _))
+ // cond: c < 0
+ // result: (MOVWconst [0])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSMOVBUreg || !(c < 0) {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SGTconst [c] (MOVHreg _))
+ // cond: 0x7fff < c
+ // result: (MOVWconst [1])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSMOVHreg || !(0x7fff < c) {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SGTconst [c] (MOVHreg _))
+ // cond: c <= -0x8000
+ // result: (MOVWconst [0])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSMOVHreg || !(c <= -0x8000) {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SGTconst [c] (MOVHUreg _))
+ // cond: 0xffff < c
+ // result: (MOVWconst [1])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSMOVHUreg || !(0xffff < c) {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SGTconst [c] (MOVHUreg _))
+ // cond: c < 0
+ // result: (MOVWconst [0])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSMOVHUreg || !(c < 0) {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SGTconst [c] (ANDconst [m] _))
+ // cond: 0 <= m && m < c
+ // result: (MOVWconst [1])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSANDconst {
+ break
+ }
+ m := auxIntToInt32(v_0.AuxInt)
+ if !(0 <= m && m < c) {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SGTconst [c] (SRLconst _ [d]))
+ // cond: 0 <= c && uint32(d) <= 31 && 0xffffffff>>uint32(d) < uint32(c)
+ // result: (MOVWconst [1])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSSRLconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ if !(0 <= c && uint32(d) <= 31 && 0xffffffff>>uint32(d) < uint32(c)) {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSSGTzero(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SGTzero (MOVWconst [d]))
+ // cond: d > 0
+ // result: (MOVWconst [1])
+ for {
+ if v_0.Op != OpMIPSMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ if !(d > 0) {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(1)
+ return true
+ }
+ // match: (SGTzero (MOVWconst [d]))
+ // cond: d <= 0
+ // result: (MOVWconst [0])
+ for {
+ if v_0.Op != OpMIPSMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ if !(d <= 0) {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSSLL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SLL x (MOVWconst [c]))
+ // result: (SLLconst x [c&31])
+ for {
+ x := v_0
+ if v_1.Op != OpMIPSMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpMIPSSLLconst)
+ v.AuxInt = int32ToAuxInt(c & 31)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSSLLconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SLLconst [c] (MOVWconst [d]))
+ // result: (MOVWconst [d<<uint32(c)])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(d << uint32(c))
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSSRA(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SRA x (MOVWconst [c]))
+ // result: (SRAconst x [c&31])
+ for {
+ x := v_0
+ if v_1.Op != OpMIPSMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpMIPSSRAconst)
+ v.AuxInt = int32ToAuxInt(c & 31)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSSRAconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SRAconst [c] (MOVWconst [d]))
+ // result: (MOVWconst [d>>uint32(c)])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(d >> uint32(c))
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSSRL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SRL x (MOVWconst [c]))
+ // result: (SRLconst x [c&31])
+ for {
+ x := v_0
+ if v_1.Op != OpMIPSMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpMIPSSRLconst)
+ v.AuxInt = int32ToAuxInt(c & 31)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSSRLconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SRLconst [c] (MOVWconst [d]))
+ // result: (MOVWconst [int32(uint32(d)>>uint32(c))])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(int32(uint32(d) >> uint32(c)))
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSSUB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SUB x (MOVWconst [c]))
+ // result: (SUBconst [c] x)
+ for {
+ x := v_0
+ if v_1.Op != OpMIPSMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpMIPSSUBconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUB x x)
+ // result: (MOVWconst [0])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (SUB (MOVWconst [0]) x)
+ // result: (NEG x)
+ for {
+ if v_0.Op != OpMIPSMOVWconst || auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_1
+ v.reset(OpMIPSNEG)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSSUBconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SUBconst [0] x)
+ // result: x
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (SUBconst [c] (MOVWconst [d]))
+ // result: (MOVWconst [d-c])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(d - c)
+ return true
+ }
+ // match: (SUBconst [c] (SUBconst [d] x))
+ // result: (ADDconst [-c-d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSSUBconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpMIPSADDconst)
+ v.AuxInt = int32ToAuxInt(-c - d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBconst [c] (ADDconst [d] x))
+ // result: (ADDconst [-c+d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSADDconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpMIPSADDconst)
+ v.AuxInt = int32ToAuxInt(-c + d)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSXOR(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (XOR x (MOVWconst [c]))
+ // result: (XORconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpMIPSMOVWconst {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpMIPSXORconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (XOR x x)
+ // result: (MOVWconst [0])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMIPSXORconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (XORconst [0] x)
+ // result: x
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (XORconst [-1] x)
+ // result: (NORconst [0] x)
+ for {
+ if auxIntToInt32(v.AuxInt) != -1 {
+ break
+ }
+ x := v_0
+ v.reset(OpMIPSNORconst)
+ v.AuxInt = int32ToAuxInt(0)
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORconst [c] (MOVWconst [d]))
+ // result: (MOVWconst [c^d])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(c ^ d)
+ return true
+ }
+ // match: (XORconst [c] (XORconst [d] x))
+ // result: (XORconst [c^d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpMIPSXORconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpMIPSXORconst)
+ v.AuxInt = int32ToAuxInt(c ^ d)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpMod16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod16 x y)
+ // result: (Select0 (DIV (SignExt16to32 x) (SignExt16to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpMIPSDIV, types.NewTuple(typ.Int32, typ.Int32))
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpMod16u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod16u x y)
+ // result: (Select0 (DIVU (ZeroExt16to32 x) (ZeroExt16to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpMIPSDIVU, types.NewTuple(typ.UInt32, typ.UInt32))
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpMod32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod32 x y)
+ // result: (Select0 (DIV x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpMIPSDIV, types.NewTuple(typ.Int32, typ.Int32))
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpMod32u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod32u x y)
+ // result: (Select0 (DIVU x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpMIPSDIVU, types.NewTuple(typ.UInt32, typ.UInt32))
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpMod8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod8 x y)
+ // result: (Select0 (DIV (SignExt8to32 x) (SignExt8to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpMIPSDIV, types.NewTuple(typ.Int32, typ.Int32))
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpMod8u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod8u x y)
+ // result: (Select0 (DIVU (ZeroExt8to32 x) (ZeroExt8to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpMIPSDIVU, types.NewTuple(typ.UInt32, typ.UInt32))
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpMove(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (Move [0] _ _ mem)
+ // result: mem
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ mem := v_2
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Move [1] dst src mem)
+ // result: (MOVBstore dst (MOVBUload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 1 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpMIPSMOVBstore)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVBUload, typ.UInt8)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [2] {t} dst src mem)
+ // cond: t.Alignment()%2 == 0
+ // result: (MOVHstore dst (MOVHUload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 2 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%2 == 0) {
+ break
+ }
+ v.reset(OpMIPSMOVHstore)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVHUload, typ.UInt16)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [2] dst src mem)
+ // result: (MOVBstore [1] dst (MOVBUload [1] src mem) (MOVBstore dst (MOVBUload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 2 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpMIPSMOVBstore)
+ v.AuxInt = int32ToAuxInt(1)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVBUload, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(1)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVBUload, typ.UInt8)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [4] {t} dst src mem)
+ // cond: t.Alignment()%4 == 0
+ // result: (MOVWstore dst (MOVWload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%4 == 0) {
+ break
+ }
+ v.reset(OpMIPSMOVWstore)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWload, typ.UInt32)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [4] {t} dst src mem)
+ // cond: t.Alignment()%2 == 0
+ // result: (MOVHstore [2] dst (MOVHUload [2] src mem) (MOVHstore dst (MOVHUload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%2 == 0) {
+ break
+ }
+ v.reset(OpMIPSMOVHstore)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVHUload, typ.UInt16)
+ v0.AuxInt = int32ToAuxInt(2)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVHstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVHUload, typ.UInt16)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [4] dst src mem)
+ // result: (MOVBstore [3] dst (MOVBUload [3] src mem) (MOVBstore [2] dst (MOVBUload [2] src mem) (MOVBstore [1] dst (MOVBUload [1] src mem) (MOVBstore dst (MOVBUload src mem) mem))))
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpMIPSMOVBstore)
+ v.AuxInt = int32ToAuxInt(3)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVBUload, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(3)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(2)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVBUload, typ.UInt8)
+ v2.AuxInt = int32ToAuxInt(2)
+ v2.AddArg2(src, mem)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem)
+ v3.AuxInt = int32ToAuxInt(1)
+ v4 := b.NewValue0(v.Pos, OpMIPSMOVBUload, typ.UInt8)
+ v4.AuxInt = int32ToAuxInt(1)
+ v4.AddArg2(src, mem)
+ v5 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem)
+ v6 := b.NewValue0(v.Pos, OpMIPSMOVBUload, typ.UInt8)
+ v6.AddArg2(src, mem)
+ v5.AddArg3(dst, v6, mem)
+ v3.AddArg3(dst, v4, v5)
+ v1.AddArg3(dst, v2, v3)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [3] dst src mem)
+ // result: (MOVBstore [2] dst (MOVBUload [2] src mem) (MOVBstore [1] dst (MOVBUload [1] src mem) (MOVBstore dst (MOVBUload src mem) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 3 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpMIPSMOVBstore)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVBUload, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(2)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVBUload, typ.UInt8)
+ v2.AuxInt = int32ToAuxInt(1)
+ v2.AddArg2(src, mem)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem)
+ v4 := b.NewValue0(v.Pos, OpMIPSMOVBUload, typ.UInt8)
+ v4.AddArg2(src, mem)
+ v3.AddArg3(dst, v4, mem)
+ v1.AddArg3(dst, v2, v3)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [8] {t} dst src mem)
+ // cond: t.Alignment()%4 == 0
+ // result: (MOVWstore [4] dst (MOVWload [4] src mem) (MOVWstore dst (MOVWload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%4 == 0) {
+ break
+ }
+ v.reset(OpMIPSMOVWstore)
+ v.AuxInt = int32ToAuxInt(4)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWload, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(4)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWload, typ.UInt32)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [8] {t} dst src mem)
+ // cond: t.Alignment()%2 == 0
+ // result: (MOVHstore [6] dst (MOVHload [6] src mem) (MOVHstore [4] dst (MOVHload [4] src mem) (MOVHstore [2] dst (MOVHload [2] src mem) (MOVHstore dst (MOVHload src mem) mem))))
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%2 == 0) {
+ break
+ }
+ v.reset(OpMIPSMOVHstore)
+ v.AuxInt = int32ToAuxInt(6)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVHload, typ.Int16)
+ v0.AuxInt = int32ToAuxInt(6)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVHstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(4)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVHload, typ.Int16)
+ v2.AuxInt = int32ToAuxInt(4)
+ v2.AddArg2(src, mem)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVHstore, types.TypeMem)
+ v3.AuxInt = int32ToAuxInt(2)
+ v4 := b.NewValue0(v.Pos, OpMIPSMOVHload, typ.Int16)
+ v4.AuxInt = int32ToAuxInt(2)
+ v4.AddArg2(src, mem)
+ v5 := b.NewValue0(v.Pos, OpMIPSMOVHstore, types.TypeMem)
+ v6 := b.NewValue0(v.Pos, OpMIPSMOVHload, typ.Int16)
+ v6.AddArg2(src, mem)
+ v5.AddArg3(dst, v6, mem)
+ v3.AddArg3(dst, v4, v5)
+ v1.AddArg3(dst, v2, v3)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [6] {t} dst src mem)
+ // cond: t.Alignment()%2 == 0
+ // result: (MOVHstore [4] dst (MOVHload [4] src mem) (MOVHstore [2] dst (MOVHload [2] src mem) (MOVHstore dst (MOVHload src mem) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 6 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%2 == 0) {
+ break
+ }
+ v.reset(OpMIPSMOVHstore)
+ v.AuxInt = int32ToAuxInt(4)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVHload, typ.Int16)
+ v0.AuxInt = int32ToAuxInt(4)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVHstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(2)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVHload, typ.Int16)
+ v2.AuxInt = int32ToAuxInt(2)
+ v2.AddArg2(src, mem)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVHstore, types.TypeMem)
+ v4 := b.NewValue0(v.Pos, OpMIPSMOVHload, typ.Int16)
+ v4.AddArg2(src, mem)
+ v3.AddArg3(dst, v4, mem)
+ v1.AddArg3(dst, v2, v3)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [12] {t} dst src mem)
+ // cond: t.Alignment()%4 == 0
+ // result: (MOVWstore [8] dst (MOVWload [8] src mem) (MOVWstore [4] dst (MOVWload [4] src mem) (MOVWstore dst (MOVWload src mem) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 12 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%4 == 0) {
+ break
+ }
+ v.reset(OpMIPSMOVWstore)
+ v.AuxInt = int32ToAuxInt(8)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWload, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(8)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(4)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWload, typ.UInt32)
+ v2.AuxInt = int32ToAuxInt(4)
+ v2.AddArg2(src, mem)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem)
+ v4 := b.NewValue0(v.Pos, OpMIPSMOVWload, typ.UInt32)
+ v4.AddArg2(src, mem)
+ v3.AddArg3(dst, v4, mem)
+ v1.AddArg3(dst, v2, v3)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [16] {t} dst src mem)
+ // cond: t.Alignment()%4 == 0
+ // result: (MOVWstore [12] dst (MOVWload [12] src mem) (MOVWstore [8] dst (MOVWload [8] src mem) (MOVWstore [4] dst (MOVWload [4] src mem) (MOVWstore dst (MOVWload src mem) mem))))
+ for {
+ if auxIntToInt64(v.AuxInt) != 16 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%4 == 0) {
+ break
+ }
+ v.reset(OpMIPSMOVWstore)
+ v.AuxInt = int32ToAuxInt(12)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWload, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(12)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(8)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWload, typ.UInt32)
+ v2.AuxInt = int32ToAuxInt(8)
+ v2.AddArg2(src, mem)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem)
+ v3.AuxInt = int32ToAuxInt(4)
+ v4 := b.NewValue0(v.Pos, OpMIPSMOVWload, typ.UInt32)
+ v4.AuxInt = int32ToAuxInt(4)
+ v4.AddArg2(src, mem)
+ v5 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem)
+ v6 := b.NewValue0(v.Pos, OpMIPSMOVWload, typ.UInt32)
+ v6.AddArg2(src, mem)
+ v5.AddArg3(dst, v6, mem)
+ v3.AddArg3(dst, v4, v5)
+ v1.AddArg3(dst, v2, v3)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [s] {t} dst src mem)
+ // cond: (s > 16 && logLargeCopy(v, s) || t.Alignment()%4 != 0)
+ // result: (LoweredMove [int32(t.Alignment())] dst src (ADDconst <src.Type> src [int32(s-moveSize(t.Alignment(), config))]) mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(s > 16 && logLargeCopy(v, s) || t.Alignment()%4 != 0) {
+ break
+ }
+ v.reset(OpMIPSLoweredMove)
+ v.AuxInt = int32ToAuxInt(int32(t.Alignment()))
+ v0 := b.NewValue0(v.Pos, OpMIPSADDconst, src.Type)
+ v0.AuxInt = int32ToAuxInt(int32(s - moveSize(t.Alignment(), config)))
+ v0.AddArg(src)
+ v.AddArg4(dst, src, v0, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpNeq16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neq16 x y)
+ // result: (SGTU (XOR (ZeroExt16to32 x) (ZeroExt16to32 y)) (MOVWconst [0]))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSSGTU)
+ v0 := b.NewValue0(v.Pos, OpMIPSXOR, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v3.AuxInt = int32ToAuxInt(0)
+ v.AddArg2(v0, v3)
+ return true
+ }
+}
+func rewriteValueMIPS_OpNeq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neq32 x y)
+ // result: (SGTU (XOR x y) (MOVWconst [0]))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSSGTU)
+ v0 := b.NewValue0(v.Pos, OpMIPSXOR, typ.UInt32)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(0)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS_OpNeq32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Neq32F x y)
+ // result: (FPFlagFalse (CMPEQF x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSFPFlagFalse)
+ v0 := b.NewValue0(v.Pos, OpMIPSCMPEQF, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpNeq64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Neq64F x y)
+ // result: (FPFlagFalse (CMPEQD x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSFPFlagFalse)
+ v0 := b.NewValue0(v.Pos, OpMIPSCMPEQD, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpNeq8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neq8 x y)
+ // result: (SGTU (XOR (ZeroExt8to32 x) (ZeroExt8to32 y)) (MOVWconst [0]))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSSGTU)
+ v0 := b.NewValue0(v.Pos, OpMIPSXOR, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v3.AuxInt = int32ToAuxInt(0)
+ v.AddArg2(v0, v3)
+ return true
+ }
+}
+func rewriteValueMIPS_OpNeqPtr(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (NeqPtr x y)
+ // result: (SGTU (XOR x y) (MOVWconst [0]))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSSGTU)
+ v0 := b.NewValue0(v.Pos, OpMIPSXOR, typ.UInt32)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(0)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS_OpNot(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Not x)
+ // result: (XORconst [1] x)
+ for {
+ x := v_0
+ v.reset(OpMIPSXORconst)
+ v.AuxInt = int32ToAuxInt(1)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueMIPS_OpOffPtr(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (OffPtr [off] ptr:(SP))
+ // result: (MOVWaddr [int32(off)] ptr)
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ ptr := v_0
+ if ptr.Op != OpSP {
+ break
+ }
+ v.reset(OpMIPSMOVWaddr)
+ v.AuxInt = int32ToAuxInt(int32(off))
+ v.AddArg(ptr)
+ return true
+ }
+ // match: (OffPtr [off] ptr)
+ // result: (ADDconst [int32(off)] ptr)
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ ptr := v_0
+ v.reset(OpMIPSADDconst)
+ v.AuxInt = int32ToAuxInt(int32(off))
+ v.AddArg(ptr)
+ return true
+ }
+}
+func rewriteValueMIPS_OpPanicBounds(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (PanicBounds [kind] x y mem)
+ // cond: boundsABI(kind) == 0
+ // result: (LoweredPanicBoundsA [kind] x y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ x := v_0
+ y := v_1
+ mem := v_2
+ if !(boundsABI(kind) == 0) {
+ break
+ }
+ v.reset(OpMIPSLoweredPanicBoundsA)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg3(x, y, mem)
+ return true
+ }
+ // match: (PanicBounds [kind] x y mem)
+ // cond: boundsABI(kind) == 1
+ // result: (LoweredPanicBoundsB [kind] x y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ x := v_0
+ y := v_1
+ mem := v_2
+ if !(boundsABI(kind) == 1) {
+ break
+ }
+ v.reset(OpMIPSLoweredPanicBoundsB)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg3(x, y, mem)
+ return true
+ }
+ // match: (PanicBounds [kind] x y mem)
+ // cond: boundsABI(kind) == 2
+ // result: (LoweredPanicBoundsC [kind] x y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ x := v_0
+ y := v_1
+ mem := v_2
+ if !(boundsABI(kind) == 2) {
+ break
+ }
+ v.reset(OpMIPSLoweredPanicBoundsC)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg3(x, y, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpPanicExtend(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (PanicExtend [kind] hi lo y mem)
+ // cond: boundsABI(kind) == 0
+ // result: (LoweredPanicExtendA [kind] hi lo y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ hi := v_0
+ lo := v_1
+ y := v_2
+ mem := v_3
+ if !(boundsABI(kind) == 0) {
+ break
+ }
+ v.reset(OpMIPSLoweredPanicExtendA)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg4(hi, lo, y, mem)
+ return true
+ }
+ // match: (PanicExtend [kind] hi lo y mem)
+ // cond: boundsABI(kind) == 1
+ // result: (LoweredPanicExtendB [kind] hi lo y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ hi := v_0
+ lo := v_1
+ y := v_2
+ mem := v_3
+ if !(boundsABI(kind) == 1) {
+ break
+ }
+ v.reset(OpMIPSLoweredPanicExtendB)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg4(hi, lo, y, mem)
+ return true
+ }
+ // match: (PanicExtend [kind] hi lo y mem)
+ // cond: boundsABI(kind) == 2
+ // result: (LoweredPanicExtendC [kind] hi lo y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ hi := v_0
+ lo := v_1
+ y := v_2
+ mem := v_3
+ if !(boundsABI(kind) == 2) {
+ break
+ }
+ v.reset(OpMIPSLoweredPanicExtendC)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg4(hi, lo, y, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpRotateLeft16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (RotateLeft16 <t> x (MOVWconst [c]))
+ // result: (Or16 (Lsh16x32 <t> x (MOVWconst [c&15])) (Rsh16Ux32 <t> x (MOVWconst [-c&15])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpMIPSMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpOr16)
+ v0 := b.NewValue0(v.Pos, OpLsh16x32, t)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(c & 15)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpRsh16Ux32, t)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v3.AuxInt = int32ToAuxInt(-c & 15)
+ v2.AddArg2(x, v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpRotateLeft32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (RotateLeft32 <t> x (MOVWconst [c]))
+ // result: (Or32 (Lsh32x32 <t> x (MOVWconst [c&31])) (Rsh32Ux32 <t> x (MOVWconst [-c&31])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpMIPSMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpOr32)
+ v0 := b.NewValue0(v.Pos, OpLsh32x32, t)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(c & 31)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpRsh32Ux32, t)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v3.AuxInt = int32ToAuxInt(-c & 31)
+ v2.AddArg2(x, v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpRotateLeft64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (RotateLeft64 <t> x (MOVWconst [c]))
+ // result: (Or64 (Lsh64x32 <t> x (MOVWconst [c&63])) (Rsh64Ux32 <t> x (MOVWconst [-c&63])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpMIPSMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpOr64)
+ v0 := b.NewValue0(v.Pos, OpLsh64x32, t)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(c & 63)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpRsh64Ux32, t)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v3.AuxInt = int32ToAuxInt(-c & 63)
+ v2.AddArg2(x, v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpRotateLeft8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (RotateLeft8 <t> x (MOVWconst [c]))
+ // result: (Or8 (Lsh8x32 <t> x (MOVWconst [c&7])) (Rsh8Ux32 <t> x (MOVWconst [-c&7])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpMIPSMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpOr8)
+ v0 := b.NewValue0(v.Pos, OpLsh8x32, t)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(c & 7)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpRsh8Ux32, t)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v3.AuxInt = int32ToAuxInt(-c & 7)
+ v2.AddArg2(x, v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpRsh16Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux16 <t> x y)
+ // result: (CMOVZ (SRL <t> (ZeroExt16to32 x) (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSCMOVZ)
+ v0 := b.NewValue0(v.Pos, OpMIPSSRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v3.AuxInt = int32ToAuxInt(0)
+ v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
+ v4.AuxInt = int32ToAuxInt(32)
+ v4.AddArg(v2)
+ v.AddArg3(v0, v3, v4)
+ return true
+ }
+}
+func rewriteValueMIPS_OpRsh16Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux32 <t> x y)
+ // result: (CMOVZ (SRL <t> (ZeroExt16to32 x) y) (MOVWconst [0]) (SGTUconst [32] y))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSCMOVZ)
+ v0 := b.NewValue0(v.Pos, OpMIPSSRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v2.AuxInt = int32ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
+ v3.AuxInt = int32ToAuxInt(32)
+ v3.AddArg(y)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValueMIPS_OpRsh16Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux64 x (Const64 [c]))
+ // cond: uint32(c) < 16
+ // result: (SRLconst (SLLconst <typ.UInt32> x [16]) [int32(c+16)])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint32(c) < 16) {
+ break
+ }
+ v.reset(OpMIPSSRLconst)
+ v.AuxInt = int32ToAuxInt(int32(c + 16))
+ v0 := b.NewValue0(v.Pos, OpMIPSSLLconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(16)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Rsh16Ux64 _ (Const64 [c]))
+ // cond: uint32(c) >= 16
+ // result: (MOVWconst [0])
+ for {
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint32(c) >= 16) {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpRsh16Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux8 <t> x y)
+ // result: (CMOVZ (SRL <t> (ZeroExt16to32 x) (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSCMOVZ)
+ v0 := b.NewValue0(v.Pos, OpMIPSSRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v3.AuxInt = int32ToAuxInt(0)
+ v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
+ v4.AuxInt = int32ToAuxInt(32)
+ v4.AddArg(v2)
+ v.AddArg3(v0, v3, v4)
+ return true
+ }
+}
+func rewriteValueMIPS_OpRsh16x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x16 x y)
+ // result: (SRA (SignExt16to32 x) ( CMOVZ <typ.UInt32> (ZeroExt16to32 y) (MOVWconst [31]) (SGTUconst [32] (ZeroExt16to32 y))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSSRA)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpMIPSCMOVZ, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v2.AddArg(y)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v3.AuxInt = int32ToAuxInt(31)
+ v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
+ v4.AuxInt = int32ToAuxInt(32)
+ v4.AddArg(v2)
+ v1.AddArg3(v2, v3, v4)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS_OpRsh16x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x32 x y)
+ // result: (SRA (SignExt16to32 x) ( CMOVZ <typ.UInt32> y (MOVWconst [31]) (SGTUconst [32] y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSSRA)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpMIPSCMOVZ, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v2.AuxInt = int32ToAuxInt(31)
+ v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
+ v3.AuxInt = int32ToAuxInt(32)
+ v3.AddArg(y)
+ v1.AddArg3(y, v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS_OpRsh16x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x64 x (Const64 [c]))
+ // cond: uint32(c) < 16
+ // result: (SRAconst (SLLconst <typ.UInt32> x [16]) [int32(c+16)])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint32(c) < 16) {
+ break
+ }
+ v.reset(OpMIPSSRAconst)
+ v.AuxInt = int32ToAuxInt(int32(c + 16))
+ v0 := b.NewValue0(v.Pos, OpMIPSSLLconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(16)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Rsh16x64 x (Const64 [c]))
+ // cond: uint32(c) >= 16
+ // result: (SRAconst (SLLconst <typ.UInt32> x [16]) [31])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint32(c) >= 16) {
+ break
+ }
+ v.reset(OpMIPSSRAconst)
+ v.AuxInt = int32ToAuxInt(31)
+ v0 := b.NewValue0(v.Pos, OpMIPSSLLconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(16)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpRsh16x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x8 x y)
+ // result: (SRA (SignExt16to32 x) ( CMOVZ <typ.UInt32> (ZeroExt8to32 y) (MOVWconst [31]) (SGTUconst [32] (ZeroExt8to32 y))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSSRA)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpMIPSCMOVZ, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v2.AddArg(y)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v3.AuxInt = int32ToAuxInt(31)
+ v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
+ v4.AuxInt = int32ToAuxInt(32)
+ v4.AddArg(v2)
+ v1.AddArg3(v2, v3, v4)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS_OpRsh32Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32Ux16 <t> x y)
+ // result: (CMOVZ (SRL <t> x (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSCMOVZ)
+ v0 := b.NewValue0(v.Pos, OpMIPSSRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v2.AuxInt = int32ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
+ v3.AuxInt = int32ToAuxInt(32)
+ v3.AddArg(v1)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValueMIPS_OpRsh32Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32Ux32 <t> x y)
+ // result: (CMOVZ (SRL <t> x y) (MOVWconst [0]) (SGTUconst [32] y))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSCMOVZ)
+ v0 := b.NewValue0(v.Pos, OpMIPSSRL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
+ v2.AuxInt = int32ToAuxInt(32)
+ v2.AddArg(y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueMIPS_OpRsh32Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Rsh32Ux64 x (Const64 [c]))
+ // cond: uint32(c) < 32
+ // result: (SRLconst x [int32(c)])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint32(c) < 32) {
+ break
+ }
+ v.reset(OpMIPSSRLconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (Rsh32Ux64 _ (Const64 [c]))
+ // cond: uint32(c) >= 32
+ // result: (MOVWconst [0])
+ for {
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint32(c) >= 32) {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpRsh32Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32Ux8 <t> x y)
+ // result: (CMOVZ (SRL <t> x (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSCMOVZ)
+ v0 := b.NewValue0(v.Pos, OpMIPSSRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v2.AuxInt = int32ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
+ v3.AuxInt = int32ToAuxInt(32)
+ v3.AddArg(v1)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValueMIPS_OpRsh32x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32x16 x y)
+ // result: (SRA x ( CMOVZ <typ.UInt32> (ZeroExt16to32 y) (MOVWconst [31]) (SGTUconst [32] (ZeroExt16to32 y))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSSRA)
+ v0 := b.NewValue0(v.Pos, OpMIPSCMOVZ, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(y)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v2.AuxInt = int32ToAuxInt(31)
+ v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
+ v3.AuxInt = int32ToAuxInt(32)
+ v3.AddArg(v1)
+ v0.AddArg3(v1, v2, v3)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpRsh32x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32x32 x y)
+ // result: (SRA x ( CMOVZ <typ.UInt32> y (MOVWconst [31]) (SGTUconst [32] y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSSRA)
+ v0 := b.NewValue0(v.Pos, OpMIPSCMOVZ, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(31)
+ v2 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
+ v2.AuxInt = int32ToAuxInt(32)
+ v2.AddArg(y)
+ v0.AddArg3(y, v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpRsh32x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Rsh32x64 x (Const64 [c]))
+ // cond: uint32(c) < 32
+ // result: (SRAconst x [int32(c)])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint32(c) < 32) {
+ break
+ }
+ v.reset(OpMIPSSRAconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (Rsh32x64 x (Const64 [c]))
+ // cond: uint32(c) >= 32
+ // result: (SRAconst x [31])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint32(c) >= 32) {
+ break
+ }
+ v.reset(OpMIPSSRAconst)
+ v.AuxInt = int32ToAuxInt(31)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpRsh32x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32x8 x y)
+ // result: (SRA x ( CMOVZ <typ.UInt32> (ZeroExt8to32 y) (MOVWconst [31]) (SGTUconst [32] (ZeroExt8to32 y))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSSRA)
+ v0 := b.NewValue0(v.Pos, OpMIPSCMOVZ, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(y)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v2.AuxInt = int32ToAuxInt(31)
+ v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
+ v3.AuxInt = int32ToAuxInt(32)
+ v3.AddArg(v1)
+ v0.AddArg3(v1, v2, v3)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpRsh8Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux16 <t> x y)
+ // result: (CMOVZ (SRL <t> (ZeroExt8to32 x) (ZeroExt16to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt16to32 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSCMOVZ)
+ v0 := b.NewValue0(v.Pos, OpMIPSSRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v3.AuxInt = int32ToAuxInt(0)
+ v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
+ v4.AuxInt = int32ToAuxInt(32)
+ v4.AddArg(v2)
+ v.AddArg3(v0, v3, v4)
+ return true
+ }
+}
+func rewriteValueMIPS_OpRsh8Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux32 <t> x y)
+ // result: (CMOVZ (SRL <t> (ZeroExt8to32 x) y) (MOVWconst [0]) (SGTUconst [32] y))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSCMOVZ)
+ v0 := b.NewValue0(v.Pos, OpMIPSSRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v2.AuxInt = int32ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
+ v3.AuxInt = int32ToAuxInt(32)
+ v3.AddArg(y)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValueMIPS_OpRsh8Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux64 x (Const64 [c]))
+ // cond: uint32(c) < 8
+ // result: (SRLconst (SLLconst <typ.UInt32> x [24]) [int32(c+24)])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint32(c) < 8) {
+ break
+ }
+ v.reset(OpMIPSSRLconst)
+ v.AuxInt = int32ToAuxInt(int32(c + 24))
+ v0 := b.NewValue0(v.Pos, OpMIPSSLLconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(24)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Rsh8Ux64 _ (Const64 [c]))
+ // cond: uint32(c) >= 8
+ // result: (MOVWconst [0])
+ for {
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint32(c) >= 8) {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpRsh8Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux8 <t> x y)
+ // result: (CMOVZ (SRL <t> (ZeroExt8to32 x) (ZeroExt8to32 y) ) (MOVWconst [0]) (SGTUconst [32] (ZeroExt8to32 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSCMOVZ)
+ v0 := b.NewValue0(v.Pos, OpMIPSSRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v3.AuxInt = int32ToAuxInt(0)
+ v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
+ v4.AuxInt = int32ToAuxInt(32)
+ v4.AddArg(v2)
+ v.AddArg3(v0, v3, v4)
+ return true
+ }
+}
+func rewriteValueMIPS_OpRsh8x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x16 x y)
+ // result: (SRA (SignExt16to32 x) ( CMOVZ <typ.UInt32> (ZeroExt16to32 y) (MOVWconst [31]) (SGTUconst [32] (ZeroExt16to32 y))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSSRA)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpMIPSCMOVZ, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v2.AddArg(y)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v3.AuxInt = int32ToAuxInt(31)
+ v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
+ v4.AuxInt = int32ToAuxInt(32)
+ v4.AddArg(v2)
+ v1.AddArg3(v2, v3, v4)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS_OpRsh8x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x32 x y)
+ // result: (SRA (SignExt16to32 x) ( CMOVZ <typ.UInt32> y (MOVWconst [31]) (SGTUconst [32] y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSSRA)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpMIPSCMOVZ, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v2.AuxInt = int32ToAuxInt(31)
+ v3 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
+ v3.AuxInt = int32ToAuxInt(32)
+ v3.AddArg(y)
+ v1.AddArg3(y, v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS_OpRsh8x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x64 x (Const64 [c]))
+ // cond: uint32(c) < 8
+ // result: (SRAconst (SLLconst <typ.UInt32> x [24]) [int32(c+24)])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint32(c) < 8) {
+ break
+ }
+ v.reset(OpMIPSSRAconst)
+ v.AuxInt = int32ToAuxInt(int32(c + 24))
+ v0 := b.NewValue0(v.Pos, OpMIPSSLLconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(24)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Rsh8x64 x (Const64 [c]))
+ // cond: uint32(c) >= 8
+ // result: (SRAconst (SLLconst <typ.UInt32> x [24]) [31])
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint32(c) >= 8) {
+ break
+ }
+ v.reset(OpMIPSSRAconst)
+ v.AuxInt = int32ToAuxInt(31)
+ v0 := b.NewValue0(v.Pos, OpMIPSSLLconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(24)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpRsh8x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x8 x y)
+ // result: (SRA (SignExt16to32 x) ( CMOVZ <typ.UInt32> (ZeroExt8to32 y) (MOVWconst [31]) (SGTUconst [32] (ZeroExt8to32 y))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPSSRA)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpMIPSCMOVZ, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v2.AddArg(y)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v3.AuxInt = int32ToAuxInt(31)
+ v4 := b.NewValue0(v.Pos, OpMIPSSGTUconst, typ.Bool)
+ v4.AuxInt = int32ToAuxInt(32)
+ v4.AddArg(v2)
+ v1.AddArg3(v2, v3, v4)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS_OpSelect0(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Select0 (Add32carry <t> x y))
+ // result: (ADD <t.FieldType(0)> x y)
+ for {
+ if v_0.Op != OpAdd32carry {
+ break
+ }
+ t := v_0.Type
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpMIPSADD)
+ v.Type = t.FieldType(0)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Select0 (Sub32carry <t> x y))
+ // result: (SUB <t.FieldType(0)> x y)
+ for {
+ if v_0.Op != OpSub32carry {
+ break
+ }
+ t := v_0.Type
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpMIPSSUB)
+ v.Type = t.FieldType(0)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Select0 (MULTU (MOVWconst [0]) _ ))
+ // result: (MOVWconst [0])
+ for {
+ if v_0.Op != OpMIPSMULTU {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpMIPSMOVWconst || auxIntToInt32(v_0_0.AuxInt) != 0 {
+ continue
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ break
+ }
+ // match: (Select0 (MULTU (MOVWconst [1]) _ ))
+ // result: (MOVWconst [0])
+ for {
+ if v_0.Op != OpMIPSMULTU {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpMIPSMOVWconst || auxIntToInt32(v_0_0.AuxInt) != 1 {
+ continue
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ break
+ }
+ // match: (Select0 (MULTU (MOVWconst [-1]) x ))
+ // result: (CMOVZ (ADDconst <x.Type> [-1] x) (MOVWconst [0]) x)
+ for {
+ if v_0.Op != OpMIPSMULTU {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpMIPSMOVWconst || auxIntToInt32(v_0_0.AuxInt) != -1 {
+ continue
+ }
+ x := v_0_1
+ v.reset(OpMIPSCMOVZ)
+ v0 := b.NewValue0(v.Pos, OpMIPSADDconst, x.Type)
+ v0.AuxInt = int32ToAuxInt(-1)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(v0, v1, x)
+ return true
+ }
+ break
+ }
+ // match: (Select0 (MULTU (MOVWconst [c]) x ))
+ // cond: isPowerOfTwo64(int64(uint32(c)))
+ // result: (SRLconst [int32(32-log2uint32(int64(c)))] x)
+ for {
+ if v_0.Op != OpMIPSMULTU {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpMIPSMOVWconst {
+ continue
+ }
+ c := auxIntToInt32(v_0_0.AuxInt)
+ x := v_0_1
+ if !(isPowerOfTwo64(int64(uint32(c)))) {
+ continue
+ }
+ v.reset(OpMIPSSRLconst)
+ v.AuxInt = int32ToAuxInt(int32(32 - log2uint32(int64(c))))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (Select0 (MULTU (MOVWconst [c]) (MOVWconst [d])))
+ // result: (MOVWconst [int32((int64(uint32(c))*int64(uint32(d)))>>32)])
+ for {
+ if v_0.Op != OpMIPSMULTU {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpMIPSMOVWconst {
+ continue
+ }
+ c := auxIntToInt32(v_0_0.AuxInt)
+ if v_0_1.Op != OpMIPSMOVWconst {
+ continue
+ }
+ d := auxIntToInt32(v_0_1.AuxInt)
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(int32((int64(uint32(c)) * int64(uint32(d))) >> 32))
+ return true
+ }
+ break
+ }
+ // match: (Select0 (DIV (MOVWconst [c]) (MOVWconst [d])))
+ // cond: d != 0
+ // result: (MOVWconst [c%d])
+ for {
+ if v_0.Op != OpMIPSDIV {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpMIPSMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0_0.AuxInt)
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpMIPSMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(c % d)
+ return true
+ }
+ // match: (Select0 (DIVU (MOVWconst [c]) (MOVWconst [d])))
+ // cond: d != 0
+ // result: (MOVWconst [int32(uint32(c)%uint32(d))])
+ for {
+ if v_0.Op != OpMIPSDIVU {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpMIPSMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0_0.AuxInt)
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpMIPSMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(int32(uint32(c) % uint32(d)))
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpSelect1(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Select1 (Add32carry <t> x y))
+ // result: (SGTU <typ.Bool> x (ADD <t.FieldType(0)> x y))
+ for {
+ if v_0.Op != OpAdd32carry {
+ break
+ }
+ t := v_0.Type
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpMIPSSGTU)
+ v.Type = typ.Bool
+ v0 := b.NewValue0(v.Pos, OpMIPSADD, t.FieldType(0))
+ v0.AddArg2(x, y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Select1 (Sub32carry <t> x y))
+ // result: (SGTU <typ.Bool> (SUB <t.FieldType(0)> x y) x)
+ for {
+ if v_0.Op != OpSub32carry {
+ break
+ }
+ t := v_0.Type
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpMIPSSGTU)
+ v.Type = typ.Bool
+ v0 := b.NewValue0(v.Pos, OpMIPSSUB, t.FieldType(0))
+ v0.AddArg2(x, y)
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (Select1 (MULTU (MOVWconst [0]) _ ))
+ // result: (MOVWconst [0])
+ for {
+ if v_0.Op != OpMIPSMULTU {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpMIPSMOVWconst || auxIntToInt32(v_0_0.AuxInt) != 0 {
+ continue
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ break
+ }
+ // match: (Select1 (MULTU (MOVWconst [1]) x ))
+ // result: x
+ for {
+ if v_0.Op != OpMIPSMULTU {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpMIPSMOVWconst || auxIntToInt32(v_0_0.AuxInt) != 1 {
+ continue
+ }
+ x := v_0_1
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (Select1 (MULTU (MOVWconst [-1]) x ))
+ // result: (NEG <x.Type> x)
+ for {
+ if v_0.Op != OpMIPSMULTU {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpMIPSMOVWconst || auxIntToInt32(v_0_0.AuxInt) != -1 {
+ continue
+ }
+ x := v_0_1
+ v.reset(OpMIPSNEG)
+ v.Type = x.Type
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (Select1 (MULTU (MOVWconst [c]) x ))
+ // cond: isPowerOfTwo64(int64(uint32(c)))
+ // result: (SLLconst [int32(log2uint32(int64(c)))] x)
+ for {
+ if v_0.Op != OpMIPSMULTU {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpMIPSMOVWconst {
+ continue
+ }
+ c := auxIntToInt32(v_0_0.AuxInt)
+ x := v_0_1
+ if !(isPowerOfTwo64(int64(uint32(c)))) {
+ continue
+ }
+ v.reset(OpMIPSSLLconst)
+ v.AuxInt = int32ToAuxInt(int32(log2uint32(int64(c))))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (Select1 (MULTU (MOVWconst [c]) (MOVWconst [d])))
+ // result: (MOVWconst [int32(uint32(c)*uint32(d))])
+ for {
+ if v_0.Op != OpMIPSMULTU {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpMIPSMOVWconst {
+ continue
+ }
+ c := auxIntToInt32(v_0_0.AuxInt)
+ if v_0_1.Op != OpMIPSMOVWconst {
+ continue
+ }
+ d := auxIntToInt32(v_0_1.AuxInt)
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(int32(uint32(c) * uint32(d)))
+ return true
+ }
+ break
+ }
+ // match: (Select1 (DIV (MOVWconst [c]) (MOVWconst [d])))
+ // cond: d != 0
+ // result: (MOVWconst [c/d])
+ for {
+ if v_0.Op != OpMIPSDIV {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpMIPSMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0_0.AuxInt)
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpMIPSMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(c / d)
+ return true
+ }
+ // match: (Select1 (DIVU (MOVWconst [c]) (MOVWconst [d])))
+ // cond: d != 0
+ // result: (MOVWconst [int32(uint32(c)/uint32(d))])
+ for {
+ if v_0.Op != OpMIPSDIVU {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpMIPSMOVWconst {
+ break
+ }
+ c := auxIntToInt32(v_0_0.AuxInt)
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpMIPSMOVWconst {
+ break
+ }
+ d := auxIntToInt32(v_0_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpMIPSMOVWconst)
+ v.AuxInt = int32ToAuxInt(int32(uint32(c) / uint32(d)))
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpSignmask(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Signmask x)
+ // result: (SRAconst x [31])
+ for {
+ x := v_0
+ v.reset(OpMIPSSRAconst)
+ v.AuxInt = int32ToAuxInt(31)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueMIPS_OpSlicemask(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Slicemask <t> x)
+ // result: (SRAconst (NEG <t> x) [31])
+ for {
+ t := v.Type
+ x := v_0
+ v.reset(OpMIPSSRAconst)
+ v.AuxInt = int32ToAuxInt(31)
+ v0 := b.NewValue0(v.Pos, OpMIPSNEG, t)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS_OpStore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 1
+ // result: (MOVBstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 1) {
+ break
+ }
+ v.reset(OpMIPSMOVBstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 2
+ // result: (MOVHstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 2) {
+ break
+ }
+ v.reset(OpMIPSMOVHstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 4 && !is32BitFloat(val.Type)
+ // result: (MOVWstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 4 && !is32BitFloat(val.Type)) {
+ break
+ }
+ v.reset(OpMIPSMOVWstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 4 && is32BitFloat(val.Type)
+ // result: (MOVFstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 4 && is32BitFloat(val.Type)) {
+ break
+ }
+ v.reset(OpMIPSMOVFstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 8 && is64BitFloat(val.Type)
+ // result: (MOVDstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 8 && is64BitFloat(val.Type)) {
+ break
+ }
+ v.reset(OpMIPSMOVDstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpSub32withcarry(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Sub32withcarry <t> x y c)
+ // result: (SUB (SUB <t> x y) c)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ c := v_2
+ v.reset(OpMIPSSUB)
+ v0 := b.NewValue0(v.Pos, OpMIPSSUB, t)
+ v0.AddArg2(x, y)
+ v.AddArg2(v0, c)
+ return true
+ }
+}
+func rewriteValueMIPS_OpZero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (Zero [0] _ mem)
+ // result: mem
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ mem := v_1
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Zero [1] ptr mem)
+ // result: (MOVBstore ptr (MOVWconst [0]) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 1 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpMIPSMOVBstore)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (Zero [2] {t} ptr mem)
+ // cond: t.Alignment()%2 == 0
+ // result: (MOVHstore ptr (MOVWconst [0]) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 2 {
+ break
+ }
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(t.Alignment()%2 == 0) {
+ break
+ }
+ v.reset(OpMIPSMOVHstore)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (Zero [2] ptr mem)
+ // result: (MOVBstore [1] ptr (MOVWconst [0]) (MOVBstore [0] ptr (MOVWconst [0]) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 2 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpMIPSMOVBstore)
+ v.AuxInt = int32ToAuxInt(1)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(0)
+ v1.AddArg3(ptr, v0, mem)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [4] {t} ptr mem)
+ // cond: t.Alignment()%4 == 0
+ // result: (MOVWstore ptr (MOVWconst [0]) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(t.Alignment()%4 == 0) {
+ break
+ }
+ v.reset(OpMIPSMOVWstore)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (Zero [4] {t} ptr mem)
+ // cond: t.Alignment()%2 == 0
+ // result: (MOVHstore [2] ptr (MOVWconst [0]) (MOVHstore [0] ptr (MOVWconst [0]) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(t.Alignment()%2 == 0) {
+ break
+ }
+ v.reset(OpMIPSMOVHstore)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVHstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(0)
+ v1.AddArg3(ptr, v0, mem)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [4] ptr mem)
+ // result: (MOVBstore [3] ptr (MOVWconst [0]) (MOVBstore [2] ptr (MOVWconst [0]) (MOVBstore [1] ptr (MOVWconst [0]) (MOVBstore [0] ptr (MOVWconst [0]) mem))))
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpMIPSMOVBstore)
+ v.AuxInt = int32ToAuxInt(3)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(2)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem)
+ v2.AuxInt = int32ToAuxInt(1)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem)
+ v3.AuxInt = int32ToAuxInt(0)
+ v3.AddArg3(ptr, v0, mem)
+ v2.AddArg3(ptr, v0, v3)
+ v1.AddArg3(ptr, v0, v2)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [3] ptr mem)
+ // result: (MOVBstore [2] ptr (MOVWconst [0]) (MOVBstore [1] ptr (MOVWconst [0]) (MOVBstore [0] ptr (MOVWconst [0]) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 3 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpMIPSMOVBstore)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVBstore, types.TypeMem)
+ v2.AuxInt = int32ToAuxInt(0)
+ v2.AddArg3(ptr, v0, mem)
+ v1.AddArg3(ptr, v0, v2)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [6] {t} ptr mem)
+ // cond: t.Alignment()%2 == 0
+ // result: (MOVHstore [4] ptr (MOVWconst [0]) (MOVHstore [2] ptr (MOVWconst [0]) (MOVHstore [0] ptr (MOVWconst [0]) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 6 {
+ break
+ }
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(t.Alignment()%2 == 0) {
+ break
+ }
+ v.reset(OpMIPSMOVHstore)
+ v.AuxInt = int32ToAuxInt(4)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVHstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(2)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVHstore, types.TypeMem)
+ v2.AuxInt = int32ToAuxInt(0)
+ v2.AddArg3(ptr, v0, mem)
+ v1.AddArg3(ptr, v0, v2)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [8] {t} ptr mem)
+ // cond: t.Alignment()%4 == 0
+ // result: (MOVWstore [4] ptr (MOVWconst [0]) (MOVWstore [0] ptr (MOVWconst [0]) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 {
+ break
+ }
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(t.Alignment()%4 == 0) {
+ break
+ }
+ v.reset(OpMIPSMOVWstore)
+ v.AuxInt = int32ToAuxInt(4)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(0)
+ v1.AddArg3(ptr, v0, mem)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [12] {t} ptr mem)
+ // cond: t.Alignment()%4 == 0
+ // result: (MOVWstore [8] ptr (MOVWconst [0]) (MOVWstore [4] ptr (MOVWconst [0]) (MOVWstore [0] ptr (MOVWconst [0]) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 12 {
+ break
+ }
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(t.Alignment()%4 == 0) {
+ break
+ }
+ v.reset(OpMIPSMOVWstore)
+ v.AuxInt = int32ToAuxInt(8)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(4)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem)
+ v2.AuxInt = int32ToAuxInt(0)
+ v2.AddArg3(ptr, v0, mem)
+ v1.AddArg3(ptr, v0, v2)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [16] {t} ptr mem)
+ // cond: t.Alignment()%4 == 0
+ // result: (MOVWstore [12] ptr (MOVWconst [0]) (MOVWstore [8] ptr (MOVWconst [0]) (MOVWstore [4] ptr (MOVWconst [0]) (MOVWstore [0] ptr (MOVWconst [0]) mem))))
+ for {
+ if auxIntToInt64(v.AuxInt) != 16 {
+ break
+ }
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(t.Alignment()%4 == 0) {
+ break
+ }
+ v.reset(OpMIPSMOVWstore)
+ v.AuxInt = int32ToAuxInt(12)
+ v0 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(8)
+ v2 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem)
+ v2.AuxInt = int32ToAuxInt(4)
+ v3 := b.NewValue0(v.Pos, OpMIPSMOVWstore, types.TypeMem)
+ v3.AuxInt = int32ToAuxInt(0)
+ v3.AddArg3(ptr, v0, mem)
+ v2.AddArg3(ptr, v0, v3)
+ v1.AddArg3(ptr, v0, v2)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [s] {t} ptr mem)
+ // cond: (s > 16 || t.Alignment()%4 != 0)
+ // result: (LoweredZero [int32(t.Alignment())] ptr (ADDconst <ptr.Type> ptr [int32(s-moveSize(t.Alignment(), config))]) mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(s > 16 || t.Alignment()%4 != 0) {
+ break
+ }
+ v.reset(OpMIPSLoweredZero)
+ v.AuxInt = int32ToAuxInt(int32(t.Alignment()))
+ v0 := b.NewValue0(v.Pos, OpMIPSADDconst, ptr.Type)
+ v0.AuxInt = int32ToAuxInt(int32(s - moveSize(t.Alignment(), config)))
+ v0.AddArg(ptr)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS_OpZeromask(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Zeromask x)
+ // result: (NEG (SGTU x (MOVWconst [0])))
+ for {
+ x := v_0
+ v.reset(OpMIPSNEG)
+ v0 := b.NewValue0(v.Pos, OpMIPSSGTU, typ.Bool)
+ v1 := b.NewValue0(v.Pos, OpMIPSMOVWconst, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(0)
+ v0.AddArg2(x, v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteBlockMIPS(b *Block) bool {
+ switch b.Kind {
+ case BlockMIPSEQ:
+ // match: (EQ (FPFlagTrue cmp) yes no)
+ // result: (FPF cmp yes no)
+ for b.Controls[0].Op == OpMIPSFPFlagTrue {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockMIPSFPF, cmp)
+ return true
+ }
+ // match: (EQ (FPFlagFalse cmp) yes no)
+ // result: (FPT cmp yes no)
+ for b.Controls[0].Op == OpMIPSFPFlagFalse {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockMIPSFPT, cmp)
+ return true
+ }
+ // match: (EQ (XORconst [1] cmp:(SGT _ _)) yes no)
+ // result: (NE cmp yes no)
+ for b.Controls[0].Op == OpMIPSXORconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 1 {
+ break
+ }
+ cmp := v_0.Args[0]
+ if cmp.Op != OpMIPSSGT {
+ break
+ }
+ b.resetWithControl(BlockMIPSNE, cmp)
+ return true
+ }
+ // match: (EQ (XORconst [1] cmp:(SGTU _ _)) yes no)
+ // result: (NE cmp yes no)
+ for b.Controls[0].Op == OpMIPSXORconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 1 {
+ break
+ }
+ cmp := v_0.Args[0]
+ if cmp.Op != OpMIPSSGTU {
+ break
+ }
+ b.resetWithControl(BlockMIPSNE, cmp)
+ return true
+ }
+ // match: (EQ (XORconst [1] cmp:(SGTconst _)) yes no)
+ // result: (NE cmp yes no)
+ for b.Controls[0].Op == OpMIPSXORconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 1 {
+ break
+ }
+ cmp := v_0.Args[0]
+ if cmp.Op != OpMIPSSGTconst {
+ break
+ }
+ b.resetWithControl(BlockMIPSNE, cmp)
+ return true
+ }
+ // match: (EQ (XORconst [1] cmp:(SGTUconst _)) yes no)
+ // result: (NE cmp yes no)
+ for b.Controls[0].Op == OpMIPSXORconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 1 {
+ break
+ }
+ cmp := v_0.Args[0]
+ if cmp.Op != OpMIPSSGTUconst {
+ break
+ }
+ b.resetWithControl(BlockMIPSNE, cmp)
+ return true
+ }
+ // match: (EQ (XORconst [1] cmp:(SGTzero _)) yes no)
+ // result: (NE cmp yes no)
+ for b.Controls[0].Op == OpMIPSXORconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 1 {
+ break
+ }
+ cmp := v_0.Args[0]
+ if cmp.Op != OpMIPSSGTzero {
+ break
+ }
+ b.resetWithControl(BlockMIPSNE, cmp)
+ return true
+ }
+ // match: (EQ (XORconst [1] cmp:(SGTUzero _)) yes no)
+ // result: (NE cmp yes no)
+ for b.Controls[0].Op == OpMIPSXORconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 1 {
+ break
+ }
+ cmp := v_0.Args[0]
+ if cmp.Op != OpMIPSSGTUzero {
+ break
+ }
+ b.resetWithControl(BlockMIPSNE, cmp)
+ return true
+ }
+ // match: (EQ (SGTUconst [1] x) yes no)
+ // result: (NE x yes no)
+ for b.Controls[0].Op == OpMIPSSGTUconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 1 {
+ break
+ }
+ x := v_0.Args[0]
+ b.resetWithControl(BlockMIPSNE, x)
+ return true
+ }
+ // match: (EQ (SGTUzero x) yes no)
+ // result: (EQ x yes no)
+ for b.Controls[0].Op == OpMIPSSGTUzero {
+ v_0 := b.Controls[0]
+ x := v_0.Args[0]
+ b.resetWithControl(BlockMIPSEQ, x)
+ return true
+ }
+ // match: (EQ (SGTconst [0] x) yes no)
+ // result: (GEZ x yes no)
+ for b.Controls[0].Op == OpMIPSSGTconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ b.resetWithControl(BlockMIPSGEZ, x)
+ return true
+ }
+ // match: (EQ (SGTzero x) yes no)
+ // result: (LEZ x yes no)
+ for b.Controls[0].Op == OpMIPSSGTzero {
+ v_0 := b.Controls[0]
+ x := v_0.Args[0]
+ b.resetWithControl(BlockMIPSLEZ, x)
+ return true
+ }
+ // match: (EQ (MOVWconst [0]) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpMIPSMOVWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (EQ (MOVWconst [c]) yes no)
+ // cond: c != 0
+ // result: (First no yes)
+ for b.Controls[0].Op == OpMIPSMOVWconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt32(v_0.AuxInt)
+ if !(c != 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ case BlockMIPSGEZ:
+ // match: (GEZ (MOVWconst [c]) yes no)
+ // cond: c >= 0
+ // result: (First yes no)
+ for b.Controls[0].Op == OpMIPSMOVWconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt32(v_0.AuxInt)
+ if !(c >= 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (GEZ (MOVWconst [c]) yes no)
+ // cond: c < 0
+ // result: (First no yes)
+ for b.Controls[0].Op == OpMIPSMOVWconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt32(v_0.AuxInt)
+ if !(c < 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ case BlockMIPSGTZ:
+ // match: (GTZ (MOVWconst [c]) yes no)
+ // cond: c > 0
+ // result: (First yes no)
+ for b.Controls[0].Op == OpMIPSMOVWconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt32(v_0.AuxInt)
+ if !(c > 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (GTZ (MOVWconst [c]) yes no)
+ // cond: c <= 0
+ // result: (First no yes)
+ for b.Controls[0].Op == OpMIPSMOVWconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt32(v_0.AuxInt)
+ if !(c <= 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ case BlockIf:
+ // match: (If cond yes no)
+ // result: (NE cond yes no)
+ for {
+ cond := b.Controls[0]
+ b.resetWithControl(BlockMIPSNE, cond)
+ return true
+ }
+ case BlockMIPSLEZ:
+ // match: (LEZ (MOVWconst [c]) yes no)
+ // cond: c <= 0
+ // result: (First yes no)
+ for b.Controls[0].Op == OpMIPSMOVWconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt32(v_0.AuxInt)
+ if !(c <= 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (LEZ (MOVWconst [c]) yes no)
+ // cond: c > 0
+ // result: (First no yes)
+ for b.Controls[0].Op == OpMIPSMOVWconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt32(v_0.AuxInt)
+ if !(c > 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ case BlockMIPSLTZ:
+ // match: (LTZ (MOVWconst [c]) yes no)
+ // cond: c < 0
+ // result: (First yes no)
+ for b.Controls[0].Op == OpMIPSMOVWconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt32(v_0.AuxInt)
+ if !(c < 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (LTZ (MOVWconst [c]) yes no)
+ // cond: c >= 0
+ // result: (First no yes)
+ for b.Controls[0].Op == OpMIPSMOVWconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt32(v_0.AuxInt)
+ if !(c >= 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ case BlockMIPSNE:
+ // match: (NE (FPFlagTrue cmp) yes no)
+ // result: (FPT cmp yes no)
+ for b.Controls[0].Op == OpMIPSFPFlagTrue {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockMIPSFPT, cmp)
+ return true
+ }
+ // match: (NE (FPFlagFalse cmp) yes no)
+ // result: (FPF cmp yes no)
+ for b.Controls[0].Op == OpMIPSFPFlagFalse {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockMIPSFPF, cmp)
+ return true
+ }
+ // match: (NE (XORconst [1] cmp:(SGT _ _)) yes no)
+ // result: (EQ cmp yes no)
+ for b.Controls[0].Op == OpMIPSXORconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 1 {
+ break
+ }
+ cmp := v_0.Args[0]
+ if cmp.Op != OpMIPSSGT {
+ break
+ }
+ b.resetWithControl(BlockMIPSEQ, cmp)
+ return true
+ }
+ // match: (NE (XORconst [1] cmp:(SGTU _ _)) yes no)
+ // result: (EQ cmp yes no)
+ for b.Controls[0].Op == OpMIPSXORconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 1 {
+ break
+ }
+ cmp := v_0.Args[0]
+ if cmp.Op != OpMIPSSGTU {
+ break
+ }
+ b.resetWithControl(BlockMIPSEQ, cmp)
+ return true
+ }
+ // match: (NE (XORconst [1] cmp:(SGTconst _)) yes no)
+ // result: (EQ cmp yes no)
+ for b.Controls[0].Op == OpMIPSXORconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 1 {
+ break
+ }
+ cmp := v_0.Args[0]
+ if cmp.Op != OpMIPSSGTconst {
+ break
+ }
+ b.resetWithControl(BlockMIPSEQ, cmp)
+ return true
+ }
+ // match: (NE (XORconst [1] cmp:(SGTUconst _)) yes no)
+ // result: (EQ cmp yes no)
+ for b.Controls[0].Op == OpMIPSXORconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 1 {
+ break
+ }
+ cmp := v_0.Args[0]
+ if cmp.Op != OpMIPSSGTUconst {
+ break
+ }
+ b.resetWithControl(BlockMIPSEQ, cmp)
+ return true
+ }
+ // match: (NE (XORconst [1] cmp:(SGTzero _)) yes no)
+ // result: (EQ cmp yes no)
+ for b.Controls[0].Op == OpMIPSXORconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 1 {
+ break
+ }
+ cmp := v_0.Args[0]
+ if cmp.Op != OpMIPSSGTzero {
+ break
+ }
+ b.resetWithControl(BlockMIPSEQ, cmp)
+ return true
+ }
+ // match: (NE (XORconst [1] cmp:(SGTUzero _)) yes no)
+ // result: (EQ cmp yes no)
+ for b.Controls[0].Op == OpMIPSXORconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 1 {
+ break
+ }
+ cmp := v_0.Args[0]
+ if cmp.Op != OpMIPSSGTUzero {
+ break
+ }
+ b.resetWithControl(BlockMIPSEQ, cmp)
+ return true
+ }
+ // match: (NE (SGTUconst [1] x) yes no)
+ // result: (EQ x yes no)
+ for b.Controls[0].Op == OpMIPSSGTUconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 1 {
+ break
+ }
+ x := v_0.Args[0]
+ b.resetWithControl(BlockMIPSEQ, x)
+ return true
+ }
+ // match: (NE (SGTUzero x) yes no)
+ // result: (NE x yes no)
+ for b.Controls[0].Op == OpMIPSSGTUzero {
+ v_0 := b.Controls[0]
+ x := v_0.Args[0]
+ b.resetWithControl(BlockMIPSNE, x)
+ return true
+ }
+ // match: (NE (SGTconst [0] x) yes no)
+ // result: (LTZ x yes no)
+ for b.Controls[0].Op == OpMIPSSGTconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ b.resetWithControl(BlockMIPSLTZ, x)
+ return true
+ }
+ // match: (NE (SGTzero x) yes no)
+ // result: (GTZ x yes no)
+ for b.Controls[0].Op == OpMIPSSGTzero {
+ v_0 := b.Controls[0]
+ x := v_0.Args[0]
+ b.resetWithControl(BlockMIPSGTZ, x)
+ return true
+ }
+ // match: (NE (MOVWconst [0]) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpMIPSMOVWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (NE (MOVWconst [c]) yes no)
+ // cond: c != 0
+ // result: (First yes no)
+ for b.Controls[0].Op == OpMIPSMOVWconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt32(v_0.AuxInt)
+ if !(c != 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ }
+ return false
+}
diff --git a/src/cmd/compile/internal/ssa/rewriteMIPS64.go b/src/cmd/compile/internal/ssa/rewriteMIPS64.go
new file mode 100644
index 0000000..6a0fd3a
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/rewriteMIPS64.go
@@ -0,0 +1,8141 @@
+// Code generated from gen/MIPS64.rules; DO NOT EDIT.
+// generated with: cd gen; go run *.go
+
+package ssa
+
+import "cmd/compile/internal/types"
+
+func rewriteValueMIPS64(v *Value) bool {
+ switch v.Op {
+ case OpAdd16:
+ v.Op = OpMIPS64ADDV
+ return true
+ case OpAdd32:
+ v.Op = OpMIPS64ADDV
+ return true
+ case OpAdd32F:
+ v.Op = OpMIPS64ADDF
+ return true
+ case OpAdd64:
+ v.Op = OpMIPS64ADDV
+ return true
+ case OpAdd64F:
+ v.Op = OpMIPS64ADDD
+ return true
+ case OpAdd8:
+ v.Op = OpMIPS64ADDV
+ return true
+ case OpAddPtr:
+ v.Op = OpMIPS64ADDV
+ return true
+ case OpAddr:
+ return rewriteValueMIPS64_OpAddr(v)
+ case OpAnd16:
+ v.Op = OpMIPS64AND
+ return true
+ case OpAnd32:
+ v.Op = OpMIPS64AND
+ return true
+ case OpAnd64:
+ v.Op = OpMIPS64AND
+ return true
+ case OpAnd8:
+ v.Op = OpMIPS64AND
+ return true
+ case OpAndB:
+ v.Op = OpMIPS64AND
+ return true
+ case OpAtomicAdd32:
+ v.Op = OpMIPS64LoweredAtomicAdd32
+ return true
+ case OpAtomicAdd64:
+ v.Op = OpMIPS64LoweredAtomicAdd64
+ return true
+ case OpAtomicCompareAndSwap32:
+ return rewriteValueMIPS64_OpAtomicCompareAndSwap32(v)
+ case OpAtomicCompareAndSwap64:
+ v.Op = OpMIPS64LoweredAtomicCas64
+ return true
+ case OpAtomicExchange32:
+ v.Op = OpMIPS64LoweredAtomicExchange32
+ return true
+ case OpAtomicExchange64:
+ v.Op = OpMIPS64LoweredAtomicExchange64
+ return true
+ case OpAtomicLoad32:
+ v.Op = OpMIPS64LoweredAtomicLoad32
+ return true
+ case OpAtomicLoad64:
+ v.Op = OpMIPS64LoweredAtomicLoad64
+ return true
+ case OpAtomicLoad8:
+ v.Op = OpMIPS64LoweredAtomicLoad8
+ return true
+ case OpAtomicLoadPtr:
+ v.Op = OpMIPS64LoweredAtomicLoad64
+ return true
+ case OpAtomicStore32:
+ v.Op = OpMIPS64LoweredAtomicStore32
+ return true
+ case OpAtomicStore64:
+ v.Op = OpMIPS64LoweredAtomicStore64
+ return true
+ case OpAtomicStore8:
+ v.Op = OpMIPS64LoweredAtomicStore8
+ return true
+ case OpAtomicStorePtrNoWB:
+ v.Op = OpMIPS64LoweredAtomicStore64
+ return true
+ case OpAvg64u:
+ return rewriteValueMIPS64_OpAvg64u(v)
+ case OpClosureCall:
+ v.Op = OpMIPS64CALLclosure
+ return true
+ case OpCom16:
+ return rewriteValueMIPS64_OpCom16(v)
+ case OpCom32:
+ return rewriteValueMIPS64_OpCom32(v)
+ case OpCom64:
+ return rewriteValueMIPS64_OpCom64(v)
+ case OpCom8:
+ return rewriteValueMIPS64_OpCom8(v)
+ case OpConst16:
+ return rewriteValueMIPS64_OpConst16(v)
+ case OpConst32:
+ return rewriteValueMIPS64_OpConst32(v)
+ case OpConst32F:
+ return rewriteValueMIPS64_OpConst32F(v)
+ case OpConst64:
+ return rewriteValueMIPS64_OpConst64(v)
+ case OpConst64F:
+ return rewriteValueMIPS64_OpConst64F(v)
+ case OpConst8:
+ return rewriteValueMIPS64_OpConst8(v)
+ case OpConstBool:
+ return rewriteValueMIPS64_OpConstBool(v)
+ case OpConstNil:
+ return rewriteValueMIPS64_OpConstNil(v)
+ case OpCvt32Fto32:
+ v.Op = OpMIPS64TRUNCFW
+ return true
+ case OpCvt32Fto64:
+ v.Op = OpMIPS64TRUNCFV
+ return true
+ case OpCvt32Fto64F:
+ v.Op = OpMIPS64MOVFD
+ return true
+ case OpCvt32to32F:
+ v.Op = OpMIPS64MOVWF
+ return true
+ case OpCvt32to64F:
+ v.Op = OpMIPS64MOVWD
+ return true
+ case OpCvt64Fto32:
+ v.Op = OpMIPS64TRUNCDW
+ return true
+ case OpCvt64Fto32F:
+ v.Op = OpMIPS64MOVDF
+ return true
+ case OpCvt64Fto64:
+ v.Op = OpMIPS64TRUNCDV
+ return true
+ case OpCvt64to32F:
+ v.Op = OpMIPS64MOVVF
+ return true
+ case OpCvt64to64F:
+ v.Op = OpMIPS64MOVVD
+ return true
+ case OpCvtBoolToUint8:
+ v.Op = OpCopy
+ return true
+ case OpDiv16:
+ return rewriteValueMIPS64_OpDiv16(v)
+ case OpDiv16u:
+ return rewriteValueMIPS64_OpDiv16u(v)
+ case OpDiv32:
+ return rewriteValueMIPS64_OpDiv32(v)
+ case OpDiv32F:
+ v.Op = OpMIPS64DIVF
+ return true
+ case OpDiv32u:
+ return rewriteValueMIPS64_OpDiv32u(v)
+ case OpDiv64:
+ return rewriteValueMIPS64_OpDiv64(v)
+ case OpDiv64F:
+ v.Op = OpMIPS64DIVD
+ return true
+ case OpDiv64u:
+ return rewriteValueMIPS64_OpDiv64u(v)
+ case OpDiv8:
+ return rewriteValueMIPS64_OpDiv8(v)
+ case OpDiv8u:
+ return rewriteValueMIPS64_OpDiv8u(v)
+ case OpEq16:
+ return rewriteValueMIPS64_OpEq16(v)
+ case OpEq32:
+ return rewriteValueMIPS64_OpEq32(v)
+ case OpEq32F:
+ return rewriteValueMIPS64_OpEq32F(v)
+ case OpEq64:
+ return rewriteValueMIPS64_OpEq64(v)
+ case OpEq64F:
+ return rewriteValueMIPS64_OpEq64F(v)
+ case OpEq8:
+ return rewriteValueMIPS64_OpEq8(v)
+ case OpEqB:
+ return rewriteValueMIPS64_OpEqB(v)
+ case OpEqPtr:
+ return rewriteValueMIPS64_OpEqPtr(v)
+ case OpGetCallerPC:
+ v.Op = OpMIPS64LoweredGetCallerPC
+ return true
+ case OpGetCallerSP:
+ v.Op = OpMIPS64LoweredGetCallerSP
+ return true
+ case OpGetClosurePtr:
+ v.Op = OpMIPS64LoweredGetClosurePtr
+ return true
+ case OpHmul32:
+ return rewriteValueMIPS64_OpHmul32(v)
+ case OpHmul32u:
+ return rewriteValueMIPS64_OpHmul32u(v)
+ case OpHmul64:
+ return rewriteValueMIPS64_OpHmul64(v)
+ case OpHmul64u:
+ return rewriteValueMIPS64_OpHmul64u(v)
+ case OpInterCall:
+ v.Op = OpMIPS64CALLinter
+ return true
+ case OpIsInBounds:
+ return rewriteValueMIPS64_OpIsInBounds(v)
+ case OpIsNonNil:
+ return rewriteValueMIPS64_OpIsNonNil(v)
+ case OpIsSliceInBounds:
+ return rewriteValueMIPS64_OpIsSliceInBounds(v)
+ case OpLeq16:
+ return rewriteValueMIPS64_OpLeq16(v)
+ case OpLeq16U:
+ return rewriteValueMIPS64_OpLeq16U(v)
+ case OpLeq32:
+ return rewriteValueMIPS64_OpLeq32(v)
+ case OpLeq32F:
+ return rewriteValueMIPS64_OpLeq32F(v)
+ case OpLeq32U:
+ return rewriteValueMIPS64_OpLeq32U(v)
+ case OpLeq64:
+ return rewriteValueMIPS64_OpLeq64(v)
+ case OpLeq64F:
+ return rewriteValueMIPS64_OpLeq64F(v)
+ case OpLeq64U:
+ return rewriteValueMIPS64_OpLeq64U(v)
+ case OpLeq8:
+ return rewriteValueMIPS64_OpLeq8(v)
+ case OpLeq8U:
+ return rewriteValueMIPS64_OpLeq8U(v)
+ case OpLess16:
+ return rewriteValueMIPS64_OpLess16(v)
+ case OpLess16U:
+ return rewriteValueMIPS64_OpLess16U(v)
+ case OpLess32:
+ return rewriteValueMIPS64_OpLess32(v)
+ case OpLess32F:
+ return rewriteValueMIPS64_OpLess32F(v)
+ case OpLess32U:
+ return rewriteValueMIPS64_OpLess32U(v)
+ case OpLess64:
+ return rewriteValueMIPS64_OpLess64(v)
+ case OpLess64F:
+ return rewriteValueMIPS64_OpLess64F(v)
+ case OpLess64U:
+ return rewriteValueMIPS64_OpLess64U(v)
+ case OpLess8:
+ return rewriteValueMIPS64_OpLess8(v)
+ case OpLess8U:
+ return rewriteValueMIPS64_OpLess8U(v)
+ case OpLoad:
+ return rewriteValueMIPS64_OpLoad(v)
+ case OpLocalAddr:
+ return rewriteValueMIPS64_OpLocalAddr(v)
+ case OpLsh16x16:
+ return rewriteValueMIPS64_OpLsh16x16(v)
+ case OpLsh16x32:
+ return rewriteValueMIPS64_OpLsh16x32(v)
+ case OpLsh16x64:
+ return rewriteValueMIPS64_OpLsh16x64(v)
+ case OpLsh16x8:
+ return rewriteValueMIPS64_OpLsh16x8(v)
+ case OpLsh32x16:
+ return rewriteValueMIPS64_OpLsh32x16(v)
+ case OpLsh32x32:
+ return rewriteValueMIPS64_OpLsh32x32(v)
+ case OpLsh32x64:
+ return rewriteValueMIPS64_OpLsh32x64(v)
+ case OpLsh32x8:
+ return rewriteValueMIPS64_OpLsh32x8(v)
+ case OpLsh64x16:
+ return rewriteValueMIPS64_OpLsh64x16(v)
+ case OpLsh64x32:
+ return rewriteValueMIPS64_OpLsh64x32(v)
+ case OpLsh64x64:
+ return rewriteValueMIPS64_OpLsh64x64(v)
+ case OpLsh64x8:
+ return rewriteValueMIPS64_OpLsh64x8(v)
+ case OpLsh8x16:
+ return rewriteValueMIPS64_OpLsh8x16(v)
+ case OpLsh8x32:
+ return rewriteValueMIPS64_OpLsh8x32(v)
+ case OpLsh8x64:
+ return rewriteValueMIPS64_OpLsh8x64(v)
+ case OpLsh8x8:
+ return rewriteValueMIPS64_OpLsh8x8(v)
+ case OpMIPS64ADDV:
+ return rewriteValueMIPS64_OpMIPS64ADDV(v)
+ case OpMIPS64ADDVconst:
+ return rewriteValueMIPS64_OpMIPS64ADDVconst(v)
+ case OpMIPS64AND:
+ return rewriteValueMIPS64_OpMIPS64AND(v)
+ case OpMIPS64ANDconst:
+ return rewriteValueMIPS64_OpMIPS64ANDconst(v)
+ case OpMIPS64LoweredAtomicAdd32:
+ return rewriteValueMIPS64_OpMIPS64LoweredAtomicAdd32(v)
+ case OpMIPS64LoweredAtomicAdd64:
+ return rewriteValueMIPS64_OpMIPS64LoweredAtomicAdd64(v)
+ case OpMIPS64LoweredAtomicStore32:
+ return rewriteValueMIPS64_OpMIPS64LoweredAtomicStore32(v)
+ case OpMIPS64LoweredAtomicStore64:
+ return rewriteValueMIPS64_OpMIPS64LoweredAtomicStore64(v)
+ case OpMIPS64MOVBUload:
+ return rewriteValueMIPS64_OpMIPS64MOVBUload(v)
+ case OpMIPS64MOVBUreg:
+ return rewriteValueMIPS64_OpMIPS64MOVBUreg(v)
+ case OpMIPS64MOVBload:
+ return rewriteValueMIPS64_OpMIPS64MOVBload(v)
+ case OpMIPS64MOVBreg:
+ return rewriteValueMIPS64_OpMIPS64MOVBreg(v)
+ case OpMIPS64MOVBstore:
+ return rewriteValueMIPS64_OpMIPS64MOVBstore(v)
+ case OpMIPS64MOVBstorezero:
+ return rewriteValueMIPS64_OpMIPS64MOVBstorezero(v)
+ case OpMIPS64MOVDload:
+ return rewriteValueMIPS64_OpMIPS64MOVDload(v)
+ case OpMIPS64MOVDstore:
+ return rewriteValueMIPS64_OpMIPS64MOVDstore(v)
+ case OpMIPS64MOVFload:
+ return rewriteValueMIPS64_OpMIPS64MOVFload(v)
+ case OpMIPS64MOVFstore:
+ return rewriteValueMIPS64_OpMIPS64MOVFstore(v)
+ case OpMIPS64MOVHUload:
+ return rewriteValueMIPS64_OpMIPS64MOVHUload(v)
+ case OpMIPS64MOVHUreg:
+ return rewriteValueMIPS64_OpMIPS64MOVHUreg(v)
+ case OpMIPS64MOVHload:
+ return rewriteValueMIPS64_OpMIPS64MOVHload(v)
+ case OpMIPS64MOVHreg:
+ return rewriteValueMIPS64_OpMIPS64MOVHreg(v)
+ case OpMIPS64MOVHstore:
+ return rewriteValueMIPS64_OpMIPS64MOVHstore(v)
+ case OpMIPS64MOVHstorezero:
+ return rewriteValueMIPS64_OpMIPS64MOVHstorezero(v)
+ case OpMIPS64MOVVload:
+ return rewriteValueMIPS64_OpMIPS64MOVVload(v)
+ case OpMIPS64MOVVnop:
+ return rewriteValueMIPS64_OpMIPS64MOVVnop(v)
+ case OpMIPS64MOVVreg:
+ return rewriteValueMIPS64_OpMIPS64MOVVreg(v)
+ case OpMIPS64MOVVstore:
+ return rewriteValueMIPS64_OpMIPS64MOVVstore(v)
+ case OpMIPS64MOVVstorezero:
+ return rewriteValueMIPS64_OpMIPS64MOVVstorezero(v)
+ case OpMIPS64MOVWUload:
+ return rewriteValueMIPS64_OpMIPS64MOVWUload(v)
+ case OpMIPS64MOVWUreg:
+ return rewriteValueMIPS64_OpMIPS64MOVWUreg(v)
+ case OpMIPS64MOVWload:
+ return rewriteValueMIPS64_OpMIPS64MOVWload(v)
+ case OpMIPS64MOVWreg:
+ return rewriteValueMIPS64_OpMIPS64MOVWreg(v)
+ case OpMIPS64MOVWstore:
+ return rewriteValueMIPS64_OpMIPS64MOVWstore(v)
+ case OpMIPS64MOVWstorezero:
+ return rewriteValueMIPS64_OpMIPS64MOVWstorezero(v)
+ case OpMIPS64NEGV:
+ return rewriteValueMIPS64_OpMIPS64NEGV(v)
+ case OpMIPS64NOR:
+ return rewriteValueMIPS64_OpMIPS64NOR(v)
+ case OpMIPS64NORconst:
+ return rewriteValueMIPS64_OpMIPS64NORconst(v)
+ case OpMIPS64OR:
+ return rewriteValueMIPS64_OpMIPS64OR(v)
+ case OpMIPS64ORconst:
+ return rewriteValueMIPS64_OpMIPS64ORconst(v)
+ case OpMIPS64SGT:
+ return rewriteValueMIPS64_OpMIPS64SGT(v)
+ case OpMIPS64SGTU:
+ return rewriteValueMIPS64_OpMIPS64SGTU(v)
+ case OpMIPS64SGTUconst:
+ return rewriteValueMIPS64_OpMIPS64SGTUconst(v)
+ case OpMIPS64SGTconst:
+ return rewriteValueMIPS64_OpMIPS64SGTconst(v)
+ case OpMIPS64SLLV:
+ return rewriteValueMIPS64_OpMIPS64SLLV(v)
+ case OpMIPS64SLLVconst:
+ return rewriteValueMIPS64_OpMIPS64SLLVconst(v)
+ case OpMIPS64SRAV:
+ return rewriteValueMIPS64_OpMIPS64SRAV(v)
+ case OpMIPS64SRAVconst:
+ return rewriteValueMIPS64_OpMIPS64SRAVconst(v)
+ case OpMIPS64SRLV:
+ return rewriteValueMIPS64_OpMIPS64SRLV(v)
+ case OpMIPS64SRLVconst:
+ return rewriteValueMIPS64_OpMIPS64SRLVconst(v)
+ case OpMIPS64SUBV:
+ return rewriteValueMIPS64_OpMIPS64SUBV(v)
+ case OpMIPS64SUBVconst:
+ return rewriteValueMIPS64_OpMIPS64SUBVconst(v)
+ case OpMIPS64XOR:
+ return rewriteValueMIPS64_OpMIPS64XOR(v)
+ case OpMIPS64XORconst:
+ return rewriteValueMIPS64_OpMIPS64XORconst(v)
+ case OpMod16:
+ return rewriteValueMIPS64_OpMod16(v)
+ case OpMod16u:
+ return rewriteValueMIPS64_OpMod16u(v)
+ case OpMod32:
+ return rewriteValueMIPS64_OpMod32(v)
+ case OpMod32u:
+ return rewriteValueMIPS64_OpMod32u(v)
+ case OpMod64:
+ return rewriteValueMIPS64_OpMod64(v)
+ case OpMod64u:
+ return rewriteValueMIPS64_OpMod64u(v)
+ case OpMod8:
+ return rewriteValueMIPS64_OpMod8(v)
+ case OpMod8u:
+ return rewriteValueMIPS64_OpMod8u(v)
+ case OpMove:
+ return rewriteValueMIPS64_OpMove(v)
+ case OpMul16:
+ return rewriteValueMIPS64_OpMul16(v)
+ case OpMul32:
+ return rewriteValueMIPS64_OpMul32(v)
+ case OpMul32F:
+ v.Op = OpMIPS64MULF
+ return true
+ case OpMul64:
+ return rewriteValueMIPS64_OpMul64(v)
+ case OpMul64F:
+ v.Op = OpMIPS64MULD
+ return true
+ case OpMul64uhilo:
+ v.Op = OpMIPS64MULVU
+ return true
+ case OpMul8:
+ return rewriteValueMIPS64_OpMul8(v)
+ case OpNeg16:
+ v.Op = OpMIPS64NEGV
+ return true
+ case OpNeg32:
+ v.Op = OpMIPS64NEGV
+ return true
+ case OpNeg32F:
+ v.Op = OpMIPS64NEGF
+ return true
+ case OpNeg64:
+ v.Op = OpMIPS64NEGV
+ return true
+ case OpNeg64F:
+ v.Op = OpMIPS64NEGD
+ return true
+ case OpNeg8:
+ v.Op = OpMIPS64NEGV
+ return true
+ case OpNeq16:
+ return rewriteValueMIPS64_OpNeq16(v)
+ case OpNeq32:
+ return rewriteValueMIPS64_OpNeq32(v)
+ case OpNeq32F:
+ return rewriteValueMIPS64_OpNeq32F(v)
+ case OpNeq64:
+ return rewriteValueMIPS64_OpNeq64(v)
+ case OpNeq64F:
+ return rewriteValueMIPS64_OpNeq64F(v)
+ case OpNeq8:
+ return rewriteValueMIPS64_OpNeq8(v)
+ case OpNeqB:
+ v.Op = OpMIPS64XOR
+ return true
+ case OpNeqPtr:
+ return rewriteValueMIPS64_OpNeqPtr(v)
+ case OpNilCheck:
+ v.Op = OpMIPS64LoweredNilCheck
+ return true
+ case OpNot:
+ return rewriteValueMIPS64_OpNot(v)
+ case OpOffPtr:
+ return rewriteValueMIPS64_OpOffPtr(v)
+ case OpOr16:
+ v.Op = OpMIPS64OR
+ return true
+ case OpOr32:
+ v.Op = OpMIPS64OR
+ return true
+ case OpOr64:
+ v.Op = OpMIPS64OR
+ return true
+ case OpOr8:
+ v.Op = OpMIPS64OR
+ return true
+ case OpOrB:
+ v.Op = OpMIPS64OR
+ return true
+ case OpPanicBounds:
+ return rewriteValueMIPS64_OpPanicBounds(v)
+ case OpRotateLeft16:
+ return rewriteValueMIPS64_OpRotateLeft16(v)
+ case OpRotateLeft32:
+ return rewriteValueMIPS64_OpRotateLeft32(v)
+ case OpRotateLeft64:
+ return rewriteValueMIPS64_OpRotateLeft64(v)
+ case OpRotateLeft8:
+ return rewriteValueMIPS64_OpRotateLeft8(v)
+ case OpRound32F:
+ v.Op = OpCopy
+ return true
+ case OpRound64F:
+ v.Op = OpCopy
+ return true
+ case OpRsh16Ux16:
+ return rewriteValueMIPS64_OpRsh16Ux16(v)
+ case OpRsh16Ux32:
+ return rewriteValueMIPS64_OpRsh16Ux32(v)
+ case OpRsh16Ux64:
+ return rewriteValueMIPS64_OpRsh16Ux64(v)
+ case OpRsh16Ux8:
+ return rewriteValueMIPS64_OpRsh16Ux8(v)
+ case OpRsh16x16:
+ return rewriteValueMIPS64_OpRsh16x16(v)
+ case OpRsh16x32:
+ return rewriteValueMIPS64_OpRsh16x32(v)
+ case OpRsh16x64:
+ return rewriteValueMIPS64_OpRsh16x64(v)
+ case OpRsh16x8:
+ return rewriteValueMIPS64_OpRsh16x8(v)
+ case OpRsh32Ux16:
+ return rewriteValueMIPS64_OpRsh32Ux16(v)
+ case OpRsh32Ux32:
+ return rewriteValueMIPS64_OpRsh32Ux32(v)
+ case OpRsh32Ux64:
+ return rewriteValueMIPS64_OpRsh32Ux64(v)
+ case OpRsh32Ux8:
+ return rewriteValueMIPS64_OpRsh32Ux8(v)
+ case OpRsh32x16:
+ return rewriteValueMIPS64_OpRsh32x16(v)
+ case OpRsh32x32:
+ return rewriteValueMIPS64_OpRsh32x32(v)
+ case OpRsh32x64:
+ return rewriteValueMIPS64_OpRsh32x64(v)
+ case OpRsh32x8:
+ return rewriteValueMIPS64_OpRsh32x8(v)
+ case OpRsh64Ux16:
+ return rewriteValueMIPS64_OpRsh64Ux16(v)
+ case OpRsh64Ux32:
+ return rewriteValueMIPS64_OpRsh64Ux32(v)
+ case OpRsh64Ux64:
+ return rewriteValueMIPS64_OpRsh64Ux64(v)
+ case OpRsh64Ux8:
+ return rewriteValueMIPS64_OpRsh64Ux8(v)
+ case OpRsh64x16:
+ return rewriteValueMIPS64_OpRsh64x16(v)
+ case OpRsh64x32:
+ return rewriteValueMIPS64_OpRsh64x32(v)
+ case OpRsh64x64:
+ return rewriteValueMIPS64_OpRsh64x64(v)
+ case OpRsh64x8:
+ return rewriteValueMIPS64_OpRsh64x8(v)
+ case OpRsh8Ux16:
+ return rewriteValueMIPS64_OpRsh8Ux16(v)
+ case OpRsh8Ux32:
+ return rewriteValueMIPS64_OpRsh8Ux32(v)
+ case OpRsh8Ux64:
+ return rewriteValueMIPS64_OpRsh8Ux64(v)
+ case OpRsh8Ux8:
+ return rewriteValueMIPS64_OpRsh8Ux8(v)
+ case OpRsh8x16:
+ return rewriteValueMIPS64_OpRsh8x16(v)
+ case OpRsh8x32:
+ return rewriteValueMIPS64_OpRsh8x32(v)
+ case OpRsh8x64:
+ return rewriteValueMIPS64_OpRsh8x64(v)
+ case OpRsh8x8:
+ return rewriteValueMIPS64_OpRsh8x8(v)
+ case OpSelect0:
+ return rewriteValueMIPS64_OpSelect0(v)
+ case OpSelect1:
+ return rewriteValueMIPS64_OpSelect1(v)
+ case OpSignExt16to32:
+ v.Op = OpMIPS64MOVHreg
+ return true
+ case OpSignExt16to64:
+ v.Op = OpMIPS64MOVHreg
+ return true
+ case OpSignExt32to64:
+ v.Op = OpMIPS64MOVWreg
+ return true
+ case OpSignExt8to16:
+ v.Op = OpMIPS64MOVBreg
+ return true
+ case OpSignExt8to32:
+ v.Op = OpMIPS64MOVBreg
+ return true
+ case OpSignExt8to64:
+ v.Op = OpMIPS64MOVBreg
+ return true
+ case OpSlicemask:
+ return rewriteValueMIPS64_OpSlicemask(v)
+ case OpSqrt:
+ v.Op = OpMIPS64SQRTD
+ return true
+ case OpSqrt32:
+ v.Op = OpMIPS64SQRTF
+ return true
+ case OpStaticCall:
+ v.Op = OpMIPS64CALLstatic
+ return true
+ case OpStore:
+ return rewriteValueMIPS64_OpStore(v)
+ case OpSub16:
+ v.Op = OpMIPS64SUBV
+ return true
+ case OpSub32:
+ v.Op = OpMIPS64SUBV
+ return true
+ case OpSub32F:
+ v.Op = OpMIPS64SUBF
+ return true
+ case OpSub64:
+ v.Op = OpMIPS64SUBV
+ return true
+ case OpSub64F:
+ v.Op = OpMIPS64SUBD
+ return true
+ case OpSub8:
+ v.Op = OpMIPS64SUBV
+ return true
+ case OpSubPtr:
+ v.Op = OpMIPS64SUBV
+ return true
+ case OpTailCall:
+ v.Op = OpMIPS64CALLtail
+ return true
+ case OpTrunc16to8:
+ v.Op = OpCopy
+ return true
+ case OpTrunc32to16:
+ v.Op = OpCopy
+ return true
+ case OpTrunc32to8:
+ v.Op = OpCopy
+ return true
+ case OpTrunc64to16:
+ v.Op = OpCopy
+ return true
+ case OpTrunc64to32:
+ v.Op = OpCopy
+ return true
+ case OpTrunc64to8:
+ v.Op = OpCopy
+ return true
+ case OpWB:
+ v.Op = OpMIPS64LoweredWB
+ return true
+ case OpXor16:
+ v.Op = OpMIPS64XOR
+ return true
+ case OpXor32:
+ v.Op = OpMIPS64XOR
+ return true
+ case OpXor64:
+ v.Op = OpMIPS64XOR
+ return true
+ case OpXor8:
+ v.Op = OpMIPS64XOR
+ return true
+ case OpZero:
+ return rewriteValueMIPS64_OpZero(v)
+ case OpZeroExt16to32:
+ v.Op = OpMIPS64MOVHUreg
+ return true
+ case OpZeroExt16to64:
+ v.Op = OpMIPS64MOVHUreg
+ return true
+ case OpZeroExt32to64:
+ v.Op = OpMIPS64MOVWUreg
+ return true
+ case OpZeroExt8to16:
+ v.Op = OpMIPS64MOVBUreg
+ return true
+ case OpZeroExt8to32:
+ v.Op = OpMIPS64MOVBUreg
+ return true
+ case OpZeroExt8to64:
+ v.Op = OpMIPS64MOVBUreg
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpAddr(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Addr {sym} base)
+ // result: (MOVVaddr {sym} base)
+ for {
+ sym := auxToSym(v.Aux)
+ base := v_0
+ v.reset(OpMIPS64MOVVaddr)
+ v.Aux = symToAux(sym)
+ v.AddArg(base)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpAtomicCompareAndSwap32(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (AtomicCompareAndSwap32 ptr old new mem)
+ // result: (LoweredAtomicCas32 ptr (SignExt32to64 old) new mem)
+ for {
+ ptr := v_0
+ old := v_1
+ new := v_2
+ mem := v_3
+ v.reset(OpMIPS64LoweredAtomicCas32)
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v0.AddArg(old)
+ v.AddArg4(ptr, v0, new, mem)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpAvg64u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Avg64u <t> x y)
+ // result: (ADDV (SRLVconst <t> (SUBV <t> x y) [1]) y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64ADDV)
+ v0 := b.NewValue0(v.Pos, OpMIPS64SRLVconst, t)
+ v0.AuxInt = int64ToAuxInt(1)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SUBV, t)
+ v1.AddArg2(x, y)
+ v0.AddArg(v1)
+ v.AddArg2(v0, y)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpCom16(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Com16 x)
+ // result: (NOR (MOVVconst [0]) x)
+ for {
+ x := v_0
+ v.reset(OpMIPS64NOR)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(v0, x)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpCom32(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Com32 x)
+ // result: (NOR (MOVVconst [0]) x)
+ for {
+ x := v_0
+ v.reset(OpMIPS64NOR)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(v0, x)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpCom64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Com64 x)
+ // result: (NOR (MOVVconst [0]) x)
+ for {
+ x := v_0
+ v.reset(OpMIPS64NOR)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(v0, x)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpCom8(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Com8 x)
+ // result: (NOR (MOVVconst [0]) x)
+ for {
+ x := v_0
+ v.reset(OpMIPS64NOR)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(v0, x)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpConst16(v *Value) bool {
+ // match: (Const16 [val])
+ // result: (MOVVconst [int64(val)])
+ for {
+ val := auxIntToInt16(v.AuxInt)
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(int64(val))
+ return true
+ }
+}
+func rewriteValueMIPS64_OpConst32(v *Value) bool {
+ // match: (Const32 [val])
+ // result: (MOVVconst [int64(val)])
+ for {
+ val := auxIntToInt32(v.AuxInt)
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(int64(val))
+ return true
+ }
+}
+func rewriteValueMIPS64_OpConst32F(v *Value) bool {
+ // match: (Const32F [val])
+ // result: (MOVFconst [float64(val)])
+ for {
+ val := auxIntToFloat32(v.AuxInt)
+ v.reset(OpMIPS64MOVFconst)
+ v.AuxInt = float64ToAuxInt(float64(val))
+ return true
+ }
+}
+func rewriteValueMIPS64_OpConst64(v *Value) bool {
+ // match: (Const64 [val])
+ // result: (MOVVconst [int64(val)])
+ for {
+ val := auxIntToInt64(v.AuxInt)
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(int64(val))
+ return true
+ }
+}
+func rewriteValueMIPS64_OpConst64F(v *Value) bool {
+ // match: (Const64F [val])
+ // result: (MOVDconst [float64(val)])
+ for {
+ val := auxIntToFloat64(v.AuxInt)
+ v.reset(OpMIPS64MOVDconst)
+ v.AuxInt = float64ToAuxInt(float64(val))
+ return true
+ }
+}
+func rewriteValueMIPS64_OpConst8(v *Value) bool {
+ // match: (Const8 [val])
+ // result: (MOVVconst [int64(val)])
+ for {
+ val := auxIntToInt8(v.AuxInt)
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(int64(val))
+ return true
+ }
+}
+func rewriteValueMIPS64_OpConstBool(v *Value) bool {
+ // match: (ConstBool [t])
+ // result: (MOVVconst [int64(b2i(t))])
+ for {
+ t := auxIntToBool(v.AuxInt)
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(int64(b2i(t)))
+ return true
+ }
+}
+func rewriteValueMIPS64_OpConstNil(v *Value) bool {
+ // match: (ConstNil)
+ // result: (MOVVconst [0])
+ for {
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpDiv16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div16 x y)
+ // result: (Select1 (DIVV (SignExt16to64 x) (SignExt16to64 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpMIPS64DIVV, types.NewTuple(typ.Int64, typ.Int64))
+ v1 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpDiv16u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div16u x y)
+ // result: (Select1 (DIVVU (ZeroExt16to64 x) (ZeroExt16to64 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpMIPS64DIVVU, types.NewTuple(typ.UInt64, typ.UInt64))
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpDiv32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div32 x y)
+ // result: (Select1 (DIVV (SignExt32to64 x) (SignExt32to64 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpMIPS64DIVV, types.NewTuple(typ.Int64, typ.Int64))
+ v1 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpDiv32u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div32u x y)
+ // result: (Select1 (DIVVU (ZeroExt32to64 x) (ZeroExt32to64 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpMIPS64DIVVU, types.NewTuple(typ.UInt64, typ.UInt64))
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpDiv64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div64 x y)
+ // result: (Select1 (DIVV x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpMIPS64DIVV, types.NewTuple(typ.Int64, typ.Int64))
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpDiv64u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div64u x y)
+ // result: (Select1 (DIVVU x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpMIPS64DIVVU, types.NewTuple(typ.UInt64, typ.UInt64))
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpDiv8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div8 x y)
+ // result: (Select1 (DIVV (SignExt8to64 x) (SignExt8to64 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpMIPS64DIVV, types.NewTuple(typ.Int64, typ.Int64))
+ v1 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpDiv8u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div8u x y)
+ // result: (Select1 (DIVVU (ZeroExt8to64 x) (ZeroExt8to64 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpMIPS64DIVVU, types.NewTuple(typ.UInt64, typ.UInt64))
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpEq16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Eq16 x y)
+ // result: (SGTU (MOVVconst [1]) (XOR (ZeroExt16to64 x) (ZeroExt16to64 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SGTU)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v1 := b.NewValue0(v.Pos, OpMIPS64XOR, typ.UInt64)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v2.AddArg(x)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpEq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Eq32 x y)
+ // result: (SGTU (MOVVconst [1]) (XOR (ZeroExt32to64 x) (ZeroExt32to64 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SGTU)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v1 := b.NewValue0(v.Pos, OpMIPS64XOR, typ.UInt64)
+ v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v2.AddArg(x)
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpEq32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Eq32F x y)
+ // result: (FPFlagTrue (CMPEQF x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64FPFlagTrue)
+ v0 := b.NewValue0(v.Pos, OpMIPS64CMPEQF, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpEq64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Eq64 x y)
+ // result: (SGTU (MOVVconst [1]) (XOR x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SGTU)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v1 := b.NewValue0(v.Pos, OpMIPS64XOR, typ.UInt64)
+ v1.AddArg2(x, y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpEq64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Eq64F x y)
+ // result: (FPFlagTrue (CMPEQD x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64FPFlagTrue)
+ v0 := b.NewValue0(v.Pos, OpMIPS64CMPEQD, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpEq8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Eq8 x y)
+ // result: (SGTU (MOVVconst [1]) (XOR (ZeroExt8to64 x) (ZeroExt8to64 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SGTU)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v1 := b.NewValue0(v.Pos, OpMIPS64XOR, typ.UInt64)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v2.AddArg(x)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpEqB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (EqB x y)
+ // result: (XOR (MOVVconst [1]) (XOR <typ.Bool> x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64XOR)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v1 := b.NewValue0(v.Pos, OpMIPS64XOR, typ.Bool)
+ v1.AddArg2(x, y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpEqPtr(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (EqPtr x y)
+ // result: (SGTU (MOVVconst [1]) (XOR x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SGTU)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v1 := b.NewValue0(v.Pos, OpMIPS64XOR, typ.UInt64)
+ v1.AddArg2(x, y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpHmul32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Hmul32 x y)
+ // result: (SRAVconst (Select1 <typ.Int64> (MULV (SignExt32to64 x) (SignExt32to64 y))) [32])
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SRAVconst)
+ v.AuxInt = int64ToAuxInt(32)
+ v0 := b.NewValue0(v.Pos, OpSelect1, typ.Int64)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MULV, types.NewTuple(typ.Int64, typ.Int64))
+ v2 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v2.AddArg(x)
+ v3 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpHmul32u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Hmul32u x y)
+ // result: (SRLVconst (Select1 <typ.UInt64> (MULVU (ZeroExt32to64 x) (ZeroExt32to64 y))) [32])
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SRLVconst)
+ v.AuxInt = int64ToAuxInt(32)
+ v0 := b.NewValue0(v.Pos, OpSelect1, typ.UInt64)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MULVU, types.NewTuple(typ.UInt64, typ.UInt64))
+ v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v2.AddArg(x)
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpHmul64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Hmul64 x y)
+ // result: (Select0 (MULV x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MULV, types.NewTuple(typ.Int64, typ.Int64))
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpHmul64u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Hmul64u x y)
+ // result: (Select0 (MULVU x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MULVU, types.NewTuple(typ.UInt64, typ.UInt64))
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpIsInBounds(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (IsInBounds idx len)
+ // result: (SGTU len idx)
+ for {
+ idx := v_0
+ len := v_1
+ v.reset(OpMIPS64SGTU)
+ v.AddArg2(len, idx)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpIsNonNil(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (IsNonNil ptr)
+ // result: (SGTU ptr (MOVVconst [0]))
+ for {
+ ptr := v_0
+ v.reset(OpMIPS64SGTU)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(ptr, v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpIsSliceInBounds(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (IsSliceInBounds idx len)
+ // result: (XOR (MOVVconst [1]) (SGTU idx len))
+ for {
+ idx := v_0
+ len := v_1
+ v.reset(OpMIPS64XOR)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v1.AddArg2(idx, len)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLeq16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq16 x y)
+ // result: (XOR (MOVVconst [1]) (SGT (SignExt16to64 x) (SignExt16to64 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64XOR)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGT, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v2.AddArg(x)
+ v3 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLeq16U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq16U x y)
+ // result: (XOR (MOVVconst [1]) (SGTU (ZeroExt16to64 x) (ZeroExt16to64 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64XOR)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v2.AddArg(x)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLeq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq32 x y)
+ // result: (XOR (MOVVconst [1]) (SGT (SignExt32to64 x) (SignExt32to64 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64XOR)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGT, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v2.AddArg(x)
+ v3 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLeq32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq32F x y)
+ // result: (FPFlagTrue (CMPGEF y x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64FPFlagTrue)
+ v0 := b.NewValue0(v.Pos, OpMIPS64CMPGEF, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLeq32U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq32U x y)
+ // result: (XOR (MOVVconst [1]) (SGTU (ZeroExt32to64 x) (ZeroExt32to64 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64XOR)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v2.AddArg(x)
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLeq64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq64 x y)
+ // result: (XOR (MOVVconst [1]) (SGT x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64XOR)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGT, typ.Bool)
+ v1.AddArg2(x, y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLeq64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq64F x y)
+ // result: (FPFlagTrue (CMPGED y x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64FPFlagTrue)
+ v0 := b.NewValue0(v.Pos, OpMIPS64CMPGED, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLeq64U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq64U x y)
+ // result: (XOR (MOVVconst [1]) (SGTU x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64XOR)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v1.AddArg2(x, y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLeq8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq8 x y)
+ // result: (XOR (MOVVconst [1]) (SGT (SignExt8to64 x) (SignExt8to64 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64XOR)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGT, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v2.AddArg(x)
+ v3 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLeq8U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq8U x y)
+ // result: (XOR (MOVVconst [1]) (SGTU (ZeroExt8to64 x) (ZeroExt8to64 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64XOR)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v2.AddArg(x)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLess16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less16 x y)
+ // result: (SGT (SignExt16to64 y) (SignExt16to64 x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SGT)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v1.AddArg(x)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLess16U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less16U x y)
+ // result: (SGTU (ZeroExt16to64 y) (ZeroExt16to64 x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SGTU)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(x)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLess32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less32 x y)
+ // result: (SGT (SignExt32to64 y) (SignExt32to64 x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SGT)
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v1.AddArg(x)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLess32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less32F x y)
+ // result: (FPFlagTrue (CMPGTF y x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64FPFlagTrue)
+ v0 := b.NewValue0(v.Pos, OpMIPS64CMPGTF, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLess32U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less32U x y)
+ // result: (SGTU (ZeroExt32to64 y) (ZeroExt32to64 x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SGTU)
+ v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(x)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLess64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Less64 x y)
+ // result: (SGT y x)
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SGT)
+ v.AddArg2(y, x)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLess64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less64F x y)
+ // result: (FPFlagTrue (CMPGTD y x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64FPFlagTrue)
+ v0 := b.NewValue0(v.Pos, OpMIPS64CMPGTD, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLess64U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Less64U x y)
+ // result: (SGTU y x)
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SGTU)
+ v.AddArg2(y, x)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLess8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less8 x y)
+ // result: (SGT (SignExt8to64 y) (SignExt8to64 x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SGT)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v1.AddArg(x)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLess8U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less8U x y)
+ // result: (SGTU (ZeroExt8to64 y) (ZeroExt8to64 x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SGTU)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v0.AddArg(y)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(x)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLoad(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Load <t> ptr mem)
+ // cond: t.IsBoolean()
+ // result: (MOVBUload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(t.IsBoolean()) {
+ break
+ }
+ v.reset(OpMIPS64MOVBUload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is8BitInt(t) && isSigned(t))
+ // result: (MOVBload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is8BitInt(t) && isSigned(t)) {
+ break
+ }
+ v.reset(OpMIPS64MOVBload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is8BitInt(t) && !isSigned(t))
+ // result: (MOVBUload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is8BitInt(t) && !isSigned(t)) {
+ break
+ }
+ v.reset(OpMIPS64MOVBUload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is16BitInt(t) && isSigned(t))
+ // result: (MOVHload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is16BitInt(t) && isSigned(t)) {
+ break
+ }
+ v.reset(OpMIPS64MOVHload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is16BitInt(t) && !isSigned(t))
+ // result: (MOVHUload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is16BitInt(t) && !isSigned(t)) {
+ break
+ }
+ v.reset(OpMIPS64MOVHUload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is32BitInt(t) && isSigned(t))
+ // result: (MOVWload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is32BitInt(t) && isSigned(t)) {
+ break
+ }
+ v.reset(OpMIPS64MOVWload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is32BitInt(t) && !isSigned(t))
+ // result: (MOVWUload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is32BitInt(t) && !isSigned(t)) {
+ break
+ }
+ v.reset(OpMIPS64MOVWUload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is64BitInt(t) || isPtr(t))
+ // result: (MOVVload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is64BitInt(t) || isPtr(t)) {
+ break
+ }
+ v.reset(OpMIPS64MOVVload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is32BitFloat(t)
+ // result: (MOVFload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is32BitFloat(t)) {
+ break
+ }
+ v.reset(OpMIPS64MOVFload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is64BitFloat(t)
+ // result: (MOVDload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is64BitFloat(t)) {
+ break
+ }
+ v.reset(OpMIPS64MOVDload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpLocalAddr(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (LocalAddr {sym} base _)
+ // result: (MOVVaddr {sym} base)
+ for {
+ sym := auxToSym(v.Aux)
+ base := v_0
+ v.reset(OpMIPS64MOVVaddr)
+ v.Aux = symToAux(sym)
+ v.AddArg(base)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLsh16x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh16x16 <t> x y)
+ // result: (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y))) (SLLV <t> x (ZeroExt16to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64AND)
+ v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v0.AddArg(v1)
+ v4 := b.NewValue0(v.Pos, OpMIPS64SLLV, t)
+ v4.AddArg2(x, v3)
+ v.AddArg2(v0, v4)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLsh16x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh16x32 <t> x y)
+ // result: (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y))) (SLLV <t> x (ZeroExt32to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64AND)
+ v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v0.AddArg(v1)
+ v4 := b.NewValue0(v.Pos, OpMIPS64SLLV, t)
+ v4.AddArg2(x, v3)
+ v.AddArg2(v0, v4)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLsh16x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh16x64 <t> x y)
+ // result: (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) y)) (SLLV <t> x y))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64AND)
+ v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(64)
+ v1.AddArg2(v2, y)
+ v0.AddArg(v1)
+ v3 := b.NewValue0(v.Pos, OpMIPS64SLLV, t)
+ v3.AddArg2(x, y)
+ v.AddArg2(v0, v3)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLsh16x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh16x8 <t> x y)
+ // result: (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y))) (SLLV <t> x (ZeroExt8to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64AND)
+ v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v0.AddArg(v1)
+ v4 := b.NewValue0(v.Pos, OpMIPS64SLLV, t)
+ v4.AddArg2(x, v3)
+ v.AddArg2(v0, v4)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLsh32x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh32x16 <t> x y)
+ // result: (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y))) (SLLV <t> x (ZeroExt16to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64AND)
+ v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v0.AddArg(v1)
+ v4 := b.NewValue0(v.Pos, OpMIPS64SLLV, t)
+ v4.AddArg2(x, v3)
+ v.AddArg2(v0, v4)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLsh32x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh32x32 <t> x y)
+ // result: (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y))) (SLLV <t> x (ZeroExt32to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64AND)
+ v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v0.AddArg(v1)
+ v4 := b.NewValue0(v.Pos, OpMIPS64SLLV, t)
+ v4.AddArg2(x, v3)
+ v.AddArg2(v0, v4)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLsh32x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh32x64 <t> x y)
+ // result: (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) y)) (SLLV <t> x y))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64AND)
+ v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(64)
+ v1.AddArg2(v2, y)
+ v0.AddArg(v1)
+ v3 := b.NewValue0(v.Pos, OpMIPS64SLLV, t)
+ v3.AddArg2(x, y)
+ v.AddArg2(v0, v3)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLsh32x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh32x8 <t> x y)
+ // result: (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y))) (SLLV <t> x (ZeroExt8to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64AND)
+ v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v0.AddArg(v1)
+ v4 := b.NewValue0(v.Pos, OpMIPS64SLLV, t)
+ v4.AddArg2(x, v3)
+ v.AddArg2(v0, v4)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLsh64x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh64x16 <t> x y)
+ // result: (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y))) (SLLV <t> x (ZeroExt16to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64AND)
+ v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v0.AddArg(v1)
+ v4 := b.NewValue0(v.Pos, OpMIPS64SLLV, t)
+ v4.AddArg2(x, v3)
+ v.AddArg2(v0, v4)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLsh64x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh64x32 <t> x y)
+ // result: (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y))) (SLLV <t> x (ZeroExt32to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64AND)
+ v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v0.AddArg(v1)
+ v4 := b.NewValue0(v.Pos, OpMIPS64SLLV, t)
+ v4.AddArg2(x, v3)
+ v.AddArg2(v0, v4)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLsh64x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh64x64 <t> x y)
+ // result: (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) y)) (SLLV <t> x y))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64AND)
+ v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(64)
+ v1.AddArg2(v2, y)
+ v0.AddArg(v1)
+ v3 := b.NewValue0(v.Pos, OpMIPS64SLLV, t)
+ v3.AddArg2(x, y)
+ v.AddArg2(v0, v3)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLsh64x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh64x8 <t> x y)
+ // result: (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y))) (SLLV <t> x (ZeroExt8to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64AND)
+ v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v0.AddArg(v1)
+ v4 := b.NewValue0(v.Pos, OpMIPS64SLLV, t)
+ v4.AddArg2(x, v3)
+ v.AddArg2(v0, v4)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLsh8x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh8x16 <t> x y)
+ // result: (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y))) (SLLV <t> x (ZeroExt16to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64AND)
+ v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v0.AddArg(v1)
+ v4 := b.NewValue0(v.Pos, OpMIPS64SLLV, t)
+ v4.AddArg2(x, v3)
+ v.AddArg2(v0, v4)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLsh8x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh8x32 <t> x y)
+ // result: (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y))) (SLLV <t> x (ZeroExt32to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64AND)
+ v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v0.AddArg(v1)
+ v4 := b.NewValue0(v.Pos, OpMIPS64SLLV, t)
+ v4.AddArg2(x, v3)
+ v.AddArg2(v0, v4)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLsh8x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh8x64 <t> x y)
+ // result: (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) y)) (SLLV <t> x y))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64AND)
+ v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(64)
+ v1.AddArg2(v2, y)
+ v0.AddArg(v1)
+ v3 := b.NewValue0(v.Pos, OpMIPS64SLLV, t)
+ v3.AddArg2(x, y)
+ v.AddArg2(v0, v3)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpLsh8x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh8x8 <t> x y)
+ // result: (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y))) (SLLV <t> x (ZeroExt8to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64AND)
+ v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v0.AddArg(v1)
+ v4 := b.NewValue0(v.Pos, OpMIPS64SLLV, t)
+ v4.AddArg2(x, v3)
+ v.AddArg2(v0, v4)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpMIPS64ADDV(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ADDV x (MOVVconst [c]))
+ // cond: is32Bit(c)
+ // result: (ADDVconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpMIPS64MOVVconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(is32Bit(c)) {
+ continue
+ }
+ v.reset(OpMIPS64ADDVconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ADDV x (NEGV y))
+ // result: (SUBV x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpMIPS64NEGV {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(OpMIPS64SUBV)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64ADDVconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ADDVconst [off1] (MOVVaddr [off2] {sym} ptr))
+ // cond: is32Bit(off1+int64(off2))
+ // result: (MOVVaddr [int32(off1)+int32(off2)] {sym} ptr)
+ for {
+ off1 := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64MOVVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ if !(is32Bit(off1 + int64(off2))) {
+ break
+ }
+ v.reset(OpMIPS64MOVVaddr)
+ v.AuxInt = int32ToAuxInt(int32(off1) + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg(ptr)
+ return true
+ }
+ // match: (ADDVconst [0] x)
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (ADDVconst [c] (MOVVconst [d]))
+ // result: (MOVVconst [c+d])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64MOVVconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(c + d)
+ return true
+ }
+ // match: (ADDVconst [c] (ADDVconst [d] x))
+ // cond: is32Bit(c+d)
+ // result: (ADDVconst [c+d] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64ADDVconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(is32Bit(c + d)) {
+ break
+ }
+ v.reset(OpMIPS64ADDVconst)
+ v.AuxInt = int64ToAuxInt(c + d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDVconst [c] (SUBVconst [d] x))
+ // cond: is32Bit(c-d)
+ // result: (ADDVconst [c-d] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64SUBVconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(is32Bit(c - d)) {
+ break
+ }
+ v.reset(OpMIPS64ADDVconst)
+ v.AuxInt = int64ToAuxInt(c - d)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64AND(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AND x (MOVVconst [c]))
+ // cond: is32Bit(c)
+ // result: (ANDconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpMIPS64MOVVconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(is32Bit(c)) {
+ continue
+ }
+ v.reset(OpMIPS64ANDconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (AND x x)
+ // result: x
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64ANDconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ANDconst [0] _)
+ // result: (MOVVconst [0])
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (ANDconst [-1] x)
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != -1 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (ANDconst [c] (MOVVconst [d]))
+ // result: (MOVVconst [c&d])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64MOVVconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(c & d)
+ return true
+ }
+ // match: (ANDconst [c] (ANDconst [d] x))
+ // result: (ANDconst [c&d] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64ANDconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpMIPS64ANDconst)
+ v.AuxInt = int64ToAuxInt(c & d)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64LoweredAtomicAdd32(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LoweredAtomicAdd32 ptr (MOVVconst [c]) mem)
+ // cond: is32Bit(c)
+ // result: (LoweredAtomicAddconst32 [int32(c)] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpMIPS64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpMIPS64LoweredAtomicAddconst32)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64LoweredAtomicAdd64(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LoweredAtomicAdd64 ptr (MOVVconst [c]) mem)
+ // cond: is32Bit(c)
+ // result: (LoweredAtomicAddconst64 [c] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpMIPS64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpMIPS64LoweredAtomicAddconst64)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64LoweredAtomicStore32(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LoweredAtomicStore32 ptr (MOVVconst [0]) mem)
+ // result: (LoweredAtomicStorezero32 ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpMIPS64MOVVconst || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ mem := v_2
+ v.reset(OpMIPS64LoweredAtomicStorezero32)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64LoweredAtomicStore64(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LoweredAtomicStore64 ptr (MOVVconst [0]) mem)
+ // result: (LoweredAtomicStorezero64 ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpMIPS64MOVVconst || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ mem := v_2
+ v.reset(OpMIPS64LoweredAtomicStorezero64)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVBUload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVBUload [off1] {sym} (ADDVconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (MOVBUload [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64ADDVconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpMIPS64MOVBUload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ // result: (MOVBUload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64MOVVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
+ break
+ }
+ v.reset(OpMIPS64MOVBUload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVBUreg(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MOVBUreg x:(MOVBUload _ _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPS64MOVBUload {
+ break
+ }
+ v.reset(OpMIPS64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBUreg x:(MOVBUreg _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPS64MOVBUreg {
+ break
+ }
+ v.reset(OpMIPS64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBUreg (MOVVconst [c]))
+ // result: (MOVVconst [int64(uint8(c))])
+ for {
+ if v_0.Op != OpMIPS64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(int64(uint8(c)))
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVBload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVBload [off1] {sym} (ADDVconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (MOVBload [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64ADDVconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpMIPS64MOVBload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ // result: (MOVBload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64MOVVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
+ break
+ }
+ v.reset(OpMIPS64MOVBload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBload [off] {sym} (SB) _)
+ // cond: symIsRO(sym)
+ // result: (MOVVconst [int64(read8(sym, int64(off)))])
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpSB || !(symIsRO(sym)) {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(int64(read8(sym, int64(off))))
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVBreg(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MOVBreg x:(MOVBload _ _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPS64MOVBload {
+ break
+ }
+ v.reset(OpMIPS64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBreg x:(MOVBreg _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPS64MOVBreg {
+ break
+ }
+ v.reset(OpMIPS64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBreg (MOVVconst [c]))
+ // result: (MOVVconst [int64(int8(c))])
+ for {
+ if v_0.Op != OpMIPS64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(int64(int8(c)))
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVBstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVBstore [off1] {sym} (ADDVconst [off2] ptr) val mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (MOVBstore [off1+int32(off2)] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64ADDVconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpMIPS64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVBstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ // result: (MOVBstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64MOVVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
+ break
+ }
+ v.reset(OpMIPS64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVVconst [0]) mem)
+ // result: (MOVBstorezero [off] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPS64MOVVconst || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ mem := v_2
+ v.reset(OpMIPS64MOVBstorezero)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVBreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPS64MOVBreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpMIPS64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVBUreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPS64MOVBUreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpMIPS64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVHreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPS64MOVHreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpMIPS64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVHUreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPS64MOVHUreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpMIPS64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVWreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPS64MOVWreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpMIPS64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVWUreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPS64MOVWUreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpMIPS64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVBstorezero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVBstorezero [off1] {sym} (ADDVconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (MOVBstorezero [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64ADDVconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpMIPS64MOVBstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ // result: (MOVBstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64MOVVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
+ break
+ }
+ v.reset(OpMIPS64MOVBstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVDload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVDload [off1] {sym} (ADDVconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (MOVDload [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64ADDVconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpMIPS64MOVDload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVDload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ // result: (MOVDload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64MOVVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
+ break
+ }
+ v.reset(OpMIPS64MOVDload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVDstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVDstore [off1] {sym} (ADDVconst [off2] ptr) val mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (MOVDstore [off1+int32(off2)] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64ADDVconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpMIPS64MOVDstore)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVDstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ // result: (MOVDstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64MOVVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
+ break
+ }
+ v.reset(OpMIPS64MOVDstore)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVFload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVFload [off1] {sym} (ADDVconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (MOVFload [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64ADDVconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpMIPS64MOVFload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVFload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ // result: (MOVFload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64MOVVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
+ break
+ }
+ v.reset(OpMIPS64MOVFload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVFstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVFstore [off1] {sym} (ADDVconst [off2] ptr) val mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (MOVFstore [off1+int32(off2)] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64ADDVconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpMIPS64MOVFstore)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVFstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ // result: (MOVFstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64MOVVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
+ break
+ }
+ v.reset(OpMIPS64MOVFstore)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVHUload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHUload [off1] {sym} (ADDVconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (MOVHUload [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64ADDVconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpMIPS64MOVHUload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ // result: (MOVHUload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64MOVVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
+ break
+ }
+ v.reset(OpMIPS64MOVHUload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVHUreg(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MOVHUreg x:(MOVBUload _ _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPS64MOVBUload {
+ break
+ }
+ v.reset(OpMIPS64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUreg x:(MOVHUload _ _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPS64MOVHUload {
+ break
+ }
+ v.reset(OpMIPS64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUreg x:(MOVBUreg _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPS64MOVBUreg {
+ break
+ }
+ v.reset(OpMIPS64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUreg x:(MOVHUreg _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPS64MOVHUreg {
+ break
+ }
+ v.reset(OpMIPS64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUreg (MOVVconst [c]))
+ // result: (MOVVconst [int64(uint16(c))])
+ for {
+ if v_0.Op != OpMIPS64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(int64(uint16(c)))
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVHload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVHload [off1] {sym} (ADDVconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (MOVHload [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64ADDVconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpMIPS64MOVHload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ // result: (MOVHload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64MOVVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
+ break
+ }
+ v.reset(OpMIPS64MOVHload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHload [off] {sym} (SB) _)
+ // cond: symIsRO(sym)
+ // result: (MOVVconst [int64(read16(sym, int64(off), config.ctxt.Arch.ByteOrder))])
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpSB || !(symIsRO(sym)) {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(int64(read16(sym, int64(off), config.ctxt.Arch.ByteOrder)))
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVHreg(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MOVHreg x:(MOVBload _ _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPS64MOVBload {
+ break
+ }
+ v.reset(OpMIPS64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVBUload _ _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPS64MOVBUload {
+ break
+ }
+ v.reset(OpMIPS64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVHload _ _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPS64MOVHload {
+ break
+ }
+ v.reset(OpMIPS64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVBreg _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPS64MOVBreg {
+ break
+ }
+ v.reset(OpMIPS64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVBUreg _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPS64MOVBUreg {
+ break
+ }
+ v.reset(OpMIPS64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVHreg _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPS64MOVHreg {
+ break
+ }
+ v.reset(OpMIPS64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg (MOVVconst [c]))
+ // result: (MOVVconst [int64(int16(c))])
+ for {
+ if v_0.Op != OpMIPS64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(int64(int16(c)))
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVHstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHstore [off1] {sym} (ADDVconst [off2] ptr) val mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (MOVHstore [off1+int32(off2)] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64ADDVconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpMIPS64MOVHstore)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVHstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ // result: (MOVHstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64MOVVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
+ break
+ }
+ v.reset(OpMIPS64MOVHstore)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (MOVVconst [0]) mem)
+ // result: (MOVHstorezero [off] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPS64MOVVconst || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ mem := v_2
+ v.reset(OpMIPS64MOVHstorezero)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (MOVHreg x) mem)
+ // result: (MOVHstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPS64MOVHreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpMIPS64MOVHstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (MOVHUreg x) mem)
+ // result: (MOVHstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPS64MOVHUreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpMIPS64MOVHstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (MOVWreg x) mem)
+ // result: (MOVHstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPS64MOVWreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpMIPS64MOVHstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (MOVWUreg x) mem)
+ // result: (MOVHstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPS64MOVWUreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpMIPS64MOVHstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVHstorezero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHstorezero [off1] {sym} (ADDVconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (MOVHstorezero [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64ADDVconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpMIPS64MOVHstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ // result: (MOVHstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64MOVVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
+ break
+ }
+ v.reset(OpMIPS64MOVHstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVVload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVVload [off1] {sym} (ADDVconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (MOVVload [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64ADDVconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpMIPS64MOVVload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVVload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ // result: (MOVVload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64MOVVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
+ break
+ }
+ v.reset(OpMIPS64MOVVload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVVload [off] {sym} (SB) _)
+ // cond: symIsRO(sym)
+ // result: (MOVVconst [int64(read64(sym, int64(off), config.ctxt.Arch.ByteOrder))])
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpSB || !(symIsRO(sym)) {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(int64(read64(sym, int64(off), config.ctxt.Arch.ByteOrder)))
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVVnop(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MOVVnop (MOVVconst [c]))
+ // result: (MOVVconst [c])
+ for {
+ if v_0.Op != OpMIPS64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(c)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVVreg(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MOVVreg x)
+ // cond: x.Uses == 1
+ // result: (MOVVnop x)
+ for {
+ x := v_0
+ if !(x.Uses == 1) {
+ break
+ }
+ v.reset(OpMIPS64MOVVnop)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVVreg (MOVVconst [c]))
+ // result: (MOVVconst [c])
+ for {
+ if v_0.Op != OpMIPS64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(c)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVVstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVVstore [off1] {sym} (ADDVconst [off2] ptr) val mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (MOVVstore [off1+int32(off2)] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64ADDVconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpMIPS64MOVVstore)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVVstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ // result: (MOVVstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64MOVVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
+ break
+ }
+ v.reset(OpMIPS64MOVVstore)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVVstore [off] {sym} ptr (MOVVconst [0]) mem)
+ // result: (MOVVstorezero [off] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPS64MOVVconst || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ mem := v_2
+ v.reset(OpMIPS64MOVVstorezero)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVVstorezero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVVstorezero [off1] {sym} (ADDVconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (MOVVstorezero [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64ADDVconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpMIPS64MOVVstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVVstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ // result: (MOVVstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64MOVVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
+ break
+ }
+ v.reset(OpMIPS64MOVVstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVWUload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWUload [off1] {sym} (ADDVconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (MOVWUload [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64ADDVconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpMIPS64MOVWUload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWUload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ // result: (MOVWUload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64MOVVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
+ break
+ }
+ v.reset(OpMIPS64MOVWUload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVWUreg(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MOVWUreg x:(MOVBUload _ _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPS64MOVBUload {
+ break
+ }
+ v.reset(OpMIPS64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWUreg x:(MOVHUload _ _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPS64MOVHUload {
+ break
+ }
+ v.reset(OpMIPS64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWUreg x:(MOVWUload _ _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPS64MOVWUload {
+ break
+ }
+ v.reset(OpMIPS64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWUreg x:(MOVBUreg _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPS64MOVBUreg {
+ break
+ }
+ v.reset(OpMIPS64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWUreg x:(MOVHUreg _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPS64MOVHUreg {
+ break
+ }
+ v.reset(OpMIPS64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWUreg x:(MOVWUreg _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPS64MOVWUreg {
+ break
+ }
+ v.reset(OpMIPS64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWUreg (MOVVconst [c]))
+ // result: (MOVVconst [int64(uint32(c))])
+ for {
+ if v_0.Op != OpMIPS64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(int64(uint32(c)))
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVWload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVWload [off1] {sym} (ADDVconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (MOVWload [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64ADDVconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpMIPS64MOVWload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWload [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ // result: (MOVWload [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64MOVVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
+ break
+ }
+ v.reset(OpMIPS64MOVWload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWload [off] {sym} (SB) _)
+ // cond: symIsRO(sym)
+ // result: (MOVVconst [int64(read32(sym, int64(off), config.ctxt.Arch.ByteOrder))])
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpSB || !(symIsRO(sym)) {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(int64(read32(sym, int64(off), config.ctxt.Arch.ByteOrder)))
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVWreg(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MOVWreg x:(MOVBload _ _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPS64MOVBload {
+ break
+ }
+ v.reset(OpMIPS64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVBUload _ _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPS64MOVBUload {
+ break
+ }
+ v.reset(OpMIPS64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVHload _ _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPS64MOVHload {
+ break
+ }
+ v.reset(OpMIPS64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVHUload _ _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPS64MOVHUload {
+ break
+ }
+ v.reset(OpMIPS64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVWload _ _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPS64MOVWload {
+ break
+ }
+ v.reset(OpMIPS64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVBreg _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPS64MOVBreg {
+ break
+ }
+ v.reset(OpMIPS64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVBUreg _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPS64MOVBUreg {
+ break
+ }
+ v.reset(OpMIPS64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVHreg _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPS64MOVHreg {
+ break
+ }
+ v.reset(OpMIPS64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVWreg _))
+ // result: (MOVVreg x)
+ for {
+ x := v_0
+ if x.Op != OpMIPS64MOVWreg {
+ break
+ }
+ v.reset(OpMIPS64MOVVreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg (MOVVconst [c]))
+ // result: (MOVVconst [int64(int32(c))])
+ for {
+ if v_0.Op != OpMIPS64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(int64(int32(c)))
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVWstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWstore [off1] {sym} (ADDVconst [off2] ptr) val mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (MOVWstore [off1+int32(off2)] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64ADDVconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpMIPS64MOVWstore)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVWstore [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ // result: (MOVWstore [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64MOVVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
+ break
+ }
+ v.reset(OpMIPS64MOVWstore)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVWstore [off] {sym} ptr (MOVVconst [0]) mem)
+ // result: (MOVWstorezero [off] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPS64MOVVconst || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ mem := v_2
+ v.reset(OpMIPS64MOVWstorezero)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWstore [off] {sym} ptr (MOVWreg x) mem)
+ // result: (MOVWstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPS64MOVWreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpMIPS64MOVWstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVWstore [off] {sym} ptr (MOVWUreg x) mem)
+ // result: (MOVWstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpMIPS64MOVWUreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpMIPS64MOVWstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64MOVWstorezero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWstorezero [off1] {sym} (ADDVconst [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (MOVWstorezero [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64ADDVconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpMIPS64MOVWstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWstorezero [off1] {sym1} (MOVVaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ // result: (MOVWstorezero [off1+int32(off2)] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpMIPS64MOVVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
+ break
+ }
+ v.reset(OpMIPS64MOVWstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64NEGV(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (NEGV (MOVVconst [c]))
+ // result: (MOVVconst [-c])
+ for {
+ if v_0.Op != OpMIPS64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(-c)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64NOR(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (NOR x (MOVVconst [c]))
+ // cond: is32Bit(c)
+ // result: (NORconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpMIPS64MOVVconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(is32Bit(c)) {
+ continue
+ }
+ v.reset(OpMIPS64NORconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64NORconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (NORconst [c] (MOVVconst [d]))
+ // result: (MOVVconst [^(c|d)])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64MOVVconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(^(c | d))
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64OR(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (OR x (MOVVconst [c]))
+ // cond: is32Bit(c)
+ // result: (ORconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpMIPS64MOVVconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(is32Bit(c)) {
+ continue
+ }
+ v.reset(OpMIPS64ORconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (OR x x)
+ // result: x
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64ORconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ORconst [0] x)
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (ORconst [-1] _)
+ // result: (MOVVconst [-1])
+ for {
+ if auxIntToInt64(v.AuxInt) != -1 {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(-1)
+ return true
+ }
+ // match: (ORconst [c] (MOVVconst [d]))
+ // result: (MOVVconst [c|d])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64MOVVconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(c | d)
+ return true
+ }
+ // match: (ORconst [c] (ORconst [d] x))
+ // cond: is32Bit(c|d)
+ // result: (ORconst [c|d] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64ORconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(is32Bit(c | d)) {
+ break
+ }
+ v.reset(OpMIPS64ORconst)
+ v.AuxInt = int64ToAuxInt(c | d)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64SGT(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SGT (MOVVconst [c]) x)
+ // cond: is32Bit(c)
+ // result: (SGTconst [c] x)
+ for {
+ if v_0.Op != OpMIPS64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpMIPS64SGTconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64SGTU(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SGTU (MOVVconst [c]) x)
+ // cond: is32Bit(c)
+ // result: (SGTUconst [c] x)
+ for {
+ if v_0.Op != OpMIPS64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpMIPS64SGTUconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64SGTUconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SGTUconst [c] (MOVVconst [d]))
+ // cond: uint64(c)>uint64(d)
+ // result: (MOVVconst [1])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64MOVVconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ if !(uint64(c) > uint64(d)) {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ // match: (SGTUconst [c] (MOVVconst [d]))
+ // cond: uint64(c)<=uint64(d)
+ // result: (MOVVconst [0])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64MOVVconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ if !(uint64(c) <= uint64(d)) {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (SGTUconst [c] (MOVBUreg _))
+ // cond: 0xff < uint64(c)
+ // result: (MOVVconst [1])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64MOVBUreg || !(0xff < uint64(c)) {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ // match: (SGTUconst [c] (MOVHUreg _))
+ // cond: 0xffff < uint64(c)
+ // result: (MOVVconst [1])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64MOVHUreg || !(0xffff < uint64(c)) {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ // match: (SGTUconst [c] (ANDconst [m] _))
+ // cond: uint64(m) < uint64(c)
+ // result: (MOVVconst [1])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64ANDconst {
+ break
+ }
+ m := auxIntToInt64(v_0.AuxInt)
+ if !(uint64(m) < uint64(c)) {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ // match: (SGTUconst [c] (SRLVconst _ [d]))
+ // cond: 0 < d && d <= 63 && 0xffffffffffffffff>>uint64(d) < uint64(c)
+ // result: (MOVVconst [1])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64SRLVconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ if !(0 < d && d <= 63 && 0xffffffffffffffff>>uint64(d) < uint64(c)) {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64SGTconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SGTconst [c] (MOVVconst [d]))
+ // cond: c>d
+ // result: (MOVVconst [1])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64MOVVconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ if !(c > d) {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ // match: (SGTconst [c] (MOVVconst [d]))
+ // cond: c<=d
+ // result: (MOVVconst [0])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64MOVVconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ if !(c <= d) {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (SGTconst [c] (MOVBreg _))
+ // cond: 0x7f < c
+ // result: (MOVVconst [1])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64MOVBreg || !(0x7f < c) {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ // match: (SGTconst [c] (MOVBreg _))
+ // cond: c <= -0x80
+ // result: (MOVVconst [0])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64MOVBreg || !(c <= -0x80) {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (SGTconst [c] (MOVBUreg _))
+ // cond: 0xff < c
+ // result: (MOVVconst [1])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64MOVBUreg || !(0xff < c) {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ // match: (SGTconst [c] (MOVBUreg _))
+ // cond: c < 0
+ // result: (MOVVconst [0])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64MOVBUreg || !(c < 0) {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (SGTconst [c] (MOVHreg _))
+ // cond: 0x7fff < c
+ // result: (MOVVconst [1])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64MOVHreg || !(0x7fff < c) {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ // match: (SGTconst [c] (MOVHreg _))
+ // cond: c <= -0x8000
+ // result: (MOVVconst [0])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64MOVHreg || !(c <= -0x8000) {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (SGTconst [c] (MOVHUreg _))
+ // cond: 0xffff < c
+ // result: (MOVVconst [1])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64MOVHUreg || !(0xffff < c) {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ // match: (SGTconst [c] (MOVHUreg _))
+ // cond: c < 0
+ // result: (MOVVconst [0])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64MOVHUreg || !(c < 0) {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (SGTconst [c] (MOVWUreg _))
+ // cond: c < 0
+ // result: (MOVVconst [0])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64MOVWUreg || !(c < 0) {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (SGTconst [c] (ANDconst [m] _))
+ // cond: 0 <= m && m < c
+ // result: (MOVVconst [1])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64ANDconst {
+ break
+ }
+ m := auxIntToInt64(v_0.AuxInt)
+ if !(0 <= m && m < c) {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ // match: (SGTconst [c] (SRLVconst _ [d]))
+ // cond: 0 <= c && 0 < d && d <= 63 && 0xffffffffffffffff>>uint64(d) < uint64(c)
+ // result: (MOVVconst [1])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64SRLVconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ if !(0 <= c && 0 < d && d <= 63 && 0xffffffffffffffff>>uint64(d) < uint64(c)) {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64SLLV(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SLLV _ (MOVVconst [c]))
+ // cond: uint64(c)>=64
+ // result: (MOVVconst [0])
+ for {
+ if v_1.Op != OpMIPS64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 64) {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (SLLV x (MOVVconst [c]))
+ // result: (SLLVconst x [c])
+ for {
+ x := v_0
+ if v_1.Op != OpMIPS64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpMIPS64SLLVconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64SLLVconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SLLVconst [c] (MOVVconst [d]))
+ // result: (MOVVconst [d<<uint64(c)])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64MOVVconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(d << uint64(c))
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64SRAV(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SRAV x (MOVVconst [c]))
+ // cond: uint64(c)>=64
+ // result: (SRAVconst x [63])
+ for {
+ x := v_0
+ if v_1.Op != OpMIPS64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 64) {
+ break
+ }
+ v.reset(OpMIPS64SRAVconst)
+ v.AuxInt = int64ToAuxInt(63)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SRAV x (MOVVconst [c]))
+ // result: (SRAVconst x [c])
+ for {
+ x := v_0
+ if v_1.Op != OpMIPS64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpMIPS64SRAVconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64SRAVconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SRAVconst [c] (MOVVconst [d]))
+ // result: (MOVVconst [d>>uint64(c)])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64MOVVconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(d >> uint64(c))
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64SRLV(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SRLV _ (MOVVconst [c]))
+ // cond: uint64(c)>=64
+ // result: (MOVVconst [0])
+ for {
+ if v_1.Op != OpMIPS64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 64) {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (SRLV x (MOVVconst [c]))
+ // result: (SRLVconst x [c])
+ for {
+ x := v_0
+ if v_1.Op != OpMIPS64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpMIPS64SRLVconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64SRLVconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SRLVconst [c] (MOVVconst [d]))
+ // result: (MOVVconst [int64(uint64(d)>>uint64(c))])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64MOVVconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(int64(uint64(d) >> uint64(c)))
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64SUBV(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SUBV x (MOVVconst [c]))
+ // cond: is32Bit(c)
+ // result: (SUBVconst [c] x)
+ for {
+ x := v_0
+ if v_1.Op != OpMIPS64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpMIPS64SUBVconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBV x x)
+ // result: (MOVVconst [0])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (SUBV (MOVVconst [0]) x)
+ // result: (NEGV x)
+ for {
+ if v_0.Op != OpMIPS64MOVVconst || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_1
+ v.reset(OpMIPS64NEGV)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64SUBVconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SUBVconst [0] x)
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (SUBVconst [c] (MOVVconst [d]))
+ // result: (MOVVconst [d-c])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64MOVVconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(d - c)
+ return true
+ }
+ // match: (SUBVconst [c] (SUBVconst [d] x))
+ // cond: is32Bit(-c-d)
+ // result: (ADDVconst [-c-d] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64SUBVconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(is32Bit(-c - d)) {
+ break
+ }
+ v.reset(OpMIPS64ADDVconst)
+ v.AuxInt = int64ToAuxInt(-c - d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBVconst [c] (ADDVconst [d] x))
+ // cond: is32Bit(-c+d)
+ // result: (ADDVconst [-c+d] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64ADDVconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(is32Bit(-c + d)) {
+ break
+ }
+ v.reset(OpMIPS64ADDVconst)
+ v.AuxInt = int64ToAuxInt(-c + d)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64XOR(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (XOR x (MOVVconst [c]))
+ // cond: is32Bit(c)
+ // result: (XORconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpMIPS64MOVVconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(is32Bit(c)) {
+ continue
+ }
+ v.reset(OpMIPS64XORconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (XOR x x)
+ // result: (MOVVconst [0])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMIPS64XORconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (XORconst [0] x)
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (XORconst [-1] x)
+ // result: (NORconst [0] x)
+ for {
+ if auxIntToInt64(v.AuxInt) != -1 {
+ break
+ }
+ x := v_0
+ v.reset(OpMIPS64NORconst)
+ v.AuxInt = int64ToAuxInt(0)
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORconst [c] (MOVVconst [d]))
+ // result: (MOVVconst [c^d])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64MOVVconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(c ^ d)
+ return true
+ }
+ // match: (XORconst [c] (XORconst [d] x))
+ // cond: is32Bit(c^d)
+ // result: (XORconst [c^d] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpMIPS64XORconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(is32Bit(c ^ d)) {
+ break
+ }
+ v.reset(OpMIPS64XORconst)
+ v.AuxInt = int64ToAuxInt(c ^ d)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMod16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod16 x y)
+ // result: (Select0 (DIVV (SignExt16to64 x) (SignExt16to64 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpMIPS64DIVV, types.NewTuple(typ.Int64, typ.Int64))
+ v1 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpMod16u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod16u x y)
+ // result: (Select0 (DIVVU (ZeroExt16to64 x) (ZeroExt16to64 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpMIPS64DIVVU, types.NewTuple(typ.UInt64, typ.UInt64))
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpMod32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod32 x y)
+ // result: (Select0 (DIVV (SignExt32to64 x) (SignExt32to64 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpMIPS64DIVV, types.NewTuple(typ.Int64, typ.Int64))
+ v1 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpMod32u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod32u x y)
+ // result: (Select0 (DIVVU (ZeroExt32to64 x) (ZeroExt32to64 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpMIPS64DIVVU, types.NewTuple(typ.UInt64, typ.UInt64))
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpMod64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod64 x y)
+ // result: (Select0 (DIVV x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpMIPS64DIVV, types.NewTuple(typ.Int64, typ.Int64))
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpMod64u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod64u x y)
+ // result: (Select0 (DIVVU x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpMIPS64DIVVU, types.NewTuple(typ.UInt64, typ.UInt64))
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpMod8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod8 x y)
+ // result: (Select0 (DIVV (SignExt8to64 x) (SignExt8to64 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpMIPS64DIVV, types.NewTuple(typ.Int64, typ.Int64))
+ v1 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpMod8u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod8u x y)
+ // result: (Select0 (DIVVU (ZeroExt8to64 x) (ZeroExt8to64 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpMIPS64DIVVU, types.NewTuple(typ.UInt64, typ.UInt64))
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpMove(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (Move [0] _ _ mem)
+ // result: mem
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ mem := v_2
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Move [1] dst src mem)
+ // result: (MOVBstore dst (MOVBload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 1 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpMIPS64MOVBstore)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVBload, typ.Int8)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [2] {t} dst src mem)
+ // cond: t.Alignment()%2 == 0
+ // result: (MOVHstore dst (MOVHload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 2 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%2 == 0) {
+ break
+ }
+ v.reset(OpMIPS64MOVHstore)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVHload, typ.Int16)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [2] dst src mem)
+ // result: (MOVBstore [1] dst (MOVBload [1] src mem) (MOVBstore dst (MOVBload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 2 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpMIPS64MOVBstore)
+ v.AuxInt = int32ToAuxInt(1)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVBload, typ.Int8)
+ v0.AuxInt = int32ToAuxInt(1)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVBload, typ.Int8)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [4] {t} dst src mem)
+ // cond: t.Alignment()%4 == 0
+ // result: (MOVWstore dst (MOVWload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%4 == 0) {
+ break
+ }
+ v.reset(OpMIPS64MOVWstore)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVWload, typ.Int32)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [4] {t} dst src mem)
+ // cond: t.Alignment()%2 == 0
+ // result: (MOVHstore [2] dst (MOVHload [2] src mem) (MOVHstore dst (MOVHload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%2 == 0) {
+ break
+ }
+ v.reset(OpMIPS64MOVHstore)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVHload, typ.Int16)
+ v0.AuxInt = int32ToAuxInt(2)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVHload, typ.Int16)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [4] dst src mem)
+ // result: (MOVBstore [3] dst (MOVBload [3] src mem) (MOVBstore [2] dst (MOVBload [2] src mem) (MOVBstore [1] dst (MOVBload [1] src mem) (MOVBstore dst (MOVBload src mem) mem))))
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpMIPS64MOVBstore)
+ v.AuxInt = int32ToAuxInt(3)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVBload, typ.Int8)
+ v0.AuxInt = int32ToAuxInt(3)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(2)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVBload, typ.Int8)
+ v2.AuxInt = int32ToAuxInt(2)
+ v2.AddArg2(src, mem)
+ v3 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem)
+ v3.AuxInt = int32ToAuxInt(1)
+ v4 := b.NewValue0(v.Pos, OpMIPS64MOVBload, typ.Int8)
+ v4.AuxInt = int32ToAuxInt(1)
+ v4.AddArg2(src, mem)
+ v5 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem)
+ v6 := b.NewValue0(v.Pos, OpMIPS64MOVBload, typ.Int8)
+ v6.AddArg2(src, mem)
+ v5.AddArg3(dst, v6, mem)
+ v3.AddArg3(dst, v4, v5)
+ v1.AddArg3(dst, v2, v3)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [8] {t} dst src mem)
+ // cond: t.Alignment()%8 == 0
+ // result: (MOVVstore dst (MOVVload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%8 == 0) {
+ break
+ }
+ v.reset(OpMIPS64MOVVstore)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVload, typ.UInt64)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [8] {t} dst src mem)
+ // cond: t.Alignment()%4 == 0
+ // result: (MOVWstore [4] dst (MOVWload [4] src mem) (MOVWstore dst (MOVWload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%4 == 0) {
+ break
+ }
+ v.reset(OpMIPS64MOVWstore)
+ v.AuxInt = int32ToAuxInt(4)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVWload, typ.Int32)
+ v0.AuxInt = int32ToAuxInt(4)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVWstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVWload, typ.Int32)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [8] {t} dst src mem)
+ // cond: t.Alignment()%2 == 0
+ // result: (MOVHstore [6] dst (MOVHload [6] src mem) (MOVHstore [4] dst (MOVHload [4] src mem) (MOVHstore [2] dst (MOVHload [2] src mem) (MOVHstore dst (MOVHload src mem) mem))))
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%2 == 0) {
+ break
+ }
+ v.reset(OpMIPS64MOVHstore)
+ v.AuxInt = int32ToAuxInt(6)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVHload, typ.Int16)
+ v0.AuxInt = int32ToAuxInt(6)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(4)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVHload, typ.Int16)
+ v2.AuxInt = int32ToAuxInt(4)
+ v2.AddArg2(src, mem)
+ v3 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem)
+ v3.AuxInt = int32ToAuxInt(2)
+ v4 := b.NewValue0(v.Pos, OpMIPS64MOVHload, typ.Int16)
+ v4.AuxInt = int32ToAuxInt(2)
+ v4.AddArg2(src, mem)
+ v5 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem)
+ v6 := b.NewValue0(v.Pos, OpMIPS64MOVHload, typ.Int16)
+ v6.AddArg2(src, mem)
+ v5.AddArg3(dst, v6, mem)
+ v3.AddArg3(dst, v4, v5)
+ v1.AddArg3(dst, v2, v3)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [3] dst src mem)
+ // result: (MOVBstore [2] dst (MOVBload [2] src mem) (MOVBstore [1] dst (MOVBload [1] src mem) (MOVBstore dst (MOVBload src mem) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 3 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpMIPS64MOVBstore)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVBload, typ.Int8)
+ v0.AuxInt = int32ToAuxInt(2)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVBload, typ.Int8)
+ v2.AuxInt = int32ToAuxInt(1)
+ v2.AddArg2(src, mem)
+ v3 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem)
+ v4 := b.NewValue0(v.Pos, OpMIPS64MOVBload, typ.Int8)
+ v4.AddArg2(src, mem)
+ v3.AddArg3(dst, v4, mem)
+ v1.AddArg3(dst, v2, v3)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [6] {t} dst src mem)
+ // cond: t.Alignment()%2 == 0
+ // result: (MOVHstore [4] dst (MOVHload [4] src mem) (MOVHstore [2] dst (MOVHload [2] src mem) (MOVHstore dst (MOVHload src mem) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 6 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%2 == 0) {
+ break
+ }
+ v.reset(OpMIPS64MOVHstore)
+ v.AuxInt = int32ToAuxInt(4)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVHload, typ.Int16)
+ v0.AuxInt = int32ToAuxInt(4)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(2)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVHload, typ.Int16)
+ v2.AuxInt = int32ToAuxInt(2)
+ v2.AddArg2(src, mem)
+ v3 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem)
+ v4 := b.NewValue0(v.Pos, OpMIPS64MOVHload, typ.Int16)
+ v4.AddArg2(src, mem)
+ v3.AddArg3(dst, v4, mem)
+ v1.AddArg3(dst, v2, v3)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [12] {t} dst src mem)
+ // cond: t.Alignment()%4 == 0
+ // result: (MOVWstore [8] dst (MOVWload [8] src mem) (MOVWstore [4] dst (MOVWload [4] src mem) (MOVWstore dst (MOVWload src mem) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 12 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%4 == 0) {
+ break
+ }
+ v.reset(OpMIPS64MOVWstore)
+ v.AuxInt = int32ToAuxInt(8)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVWload, typ.Int32)
+ v0.AuxInt = int32ToAuxInt(8)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVWstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(4)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVWload, typ.Int32)
+ v2.AuxInt = int32ToAuxInt(4)
+ v2.AddArg2(src, mem)
+ v3 := b.NewValue0(v.Pos, OpMIPS64MOVWstore, types.TypeMem)
+ v4 := b.NewValue0(v.Pos, OpMIPS64MOVWload, typ.Int32)
+ v4.AddArg2(src, mem)
+ v3.AddArg3(dst, v4, mem)
+ v1.AddArg3(dst, v2, v3)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [16] {t} dst src mem)
+ // cond: t.Alignment()%8 == 0
+ // result: (MOVVstore [8] dst (MOVVload [8] src mem) (MOVVstore dst (MOVVload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 16 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%8 == 0) {
+ break
+ }
+ v.reset(OpMIPS64MOVVstore)
+ v.AuxInt = int32ToAuxInt(8)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVload, typ.UInt64)
+ v0.AuxInt = int32ToAuxInt(8)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVVstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVload, typ.UInt64)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [24] {t} dst src mem)
+ // cond: t.Alignment()%8 == 0
+ // result: (MOVVstore [16] dst (MOVVload [16] src mem) (MOVVstore [8] dst (MOVVload [8] src mem) (MOVVstore dst (MOVVload src mem) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 24 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%8 == 0) {
+ break
+ }
+ v.reset(OpMIPS64MOVVstore)
+ v.AuxInt = int32ToAuxInt(16)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVload, typ.UInt64)
+ v0.AuxInt = int32ToAuxInt(16)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVVstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(8)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVload, typ.UInt64)
+ v2.AuxInt = int32ToAuxInt(8)
+ v2.AddArg2(src, mem)
+ v3 := b.NewValue0(v.Pos, OpMIPS64MOVVstore, types.TypeMem)
+ v4 := b.NewValue0(v.Pos, OpMIPS64MOVVload, typ.UInt64)
+ v4.AddArg2(src, mem)
+ v3.AddArg3(dst, v4, mem)
+ v1.AddArg3(dst, v2, v3)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [s] {t} dst src mem)
+ // cond: s%8 == 0 && s >= 24 && s <= 8*128 && t.Alignment()%8 == 0 && !config.noDuffDevice && logLargeCopy(v, s)
+ // result: (DUFFCOPY [16 * (128 - s/8)] dst src mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(s%8 == 0 && s >= 24 && s <= 8*128 && t.Alignment()%8 == 0 && !config.noDuffDevice && logLargeCopy(v, s)) {
+ break
+ }
+ v.reset(OpMIPS64DUFFCOPY)
+ v.AuxInt = int64ToAuxInt(16 * (128 - s/8))
+ v.AddArg3(dst, src, mem)
+ return true
+ }
+ // match: (Move [s] {t} dst src mem)
+ // cond: s > 24 && logLargeCopy(v, s) || t.Alignment()%8 != 0
+ // result: (LoweredMove [t.Alignment()] dst src (ADDVconst <src.Type> src [s-moveSize(t.Alignment(), config)]) mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(s > 24 && logLargeCopy(v, s) || t.Alignment()%8 != 0) {
+ break
+ }
+ v.reset(OpMIPS64LoweredMove)
+ v.AuxInt = int64ToAuxInt(t.Alignment())
+ v0 := b.NewValue0(v.Pos, OpMIPS64ADDVconst, src.Type)
+ v0.AuxInt = int64ToAuxInt(s - moveSize(t.Alignment(), config))
+ v0.AddArg(src)
+ v.AddArg4(dst, src, v0, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpMul16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mul16 x y)
+ // result: (Select1 (MULVU x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MULVU, types.NewTuple(typ.UInt64, typ.UInt64))
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpMul32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mul32 x y)
+ // result: (Select1 (MULVU x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MULVU, types.NewTuple(typ.UInt64, typ.UInt64))
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpMul64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mul64 x y)
+ // result: (Select1 (MULVU x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MULVU, types.NewTuple(typ.UInt64, typ.UInt64))
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpMul8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mul8 x y)
+ // result: (Select1 (MULVU x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect1)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MULVU, types.NewTuple(typ.UInt64, typ.UInt64))
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpNeq16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neq16 x y)
+ // result: (SGTU (XOR (ZeroExt16to32 x) (ZeroExt16to64 y)) (MOVVconst [0]))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SGTU)
+ v0 := b.NewValue0(v.Pos, OpMIPS64XOR, typ.UInt64)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(v0, v3)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpNeq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neq32 x y)
+ // result: (SGTU (XOR (ZeroExt32to64 x) (ZeroExt32to64 y)) (MOVVconst [0]))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SGTU)
+ v0 := b.NewValue0(v.Pos, OpMIPS64XOR, typ.UInt64)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(v0, v3)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpNeq32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Neq32F x y)
+ // result: (FPFlagFalse (CMPEQF x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64FPFlagFalse)
+ v0 := b.NewValue0(v.Pos, OpMIPS64CMPEQF, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpNeq64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neq64 x y)
+ // result: (SGTU (XOR x y) (MOVVconst [0]))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SGTU)
+ v0 := b.NewValue0(v.Pos, OpMIPS64XOR, typ.UInt64)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpNeq64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Neq64F x y)
+ // result: (FPFlagFalse (CMPEQD x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64FPFlagFalse)
+ v0 := b.NewValue0(v.Pos, OpMIPS64CMPEQD, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpNeq8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neq8 x y)
+ // result: (SGTU (XOR (ZeroExt8to64 x) (ZeroExt8to64 y)) (MOVVconst [0]))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SGTU)
+ v0 := b.NewValue0(v.Pos, OpMIPS64XOR, typ.UInt64)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(v0, v3)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpNeqPtr(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (NeqPtr x y)
+ // result: (SGTU (XOR x y) (MOVVconst [0]))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SGTU)
+ v0 := b.NewValue0(v.Pos, OpMIPS64XOR, typ.UInt64)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpNot(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Not x)
+ // result: (XORconst [1] x)
+ for {
+ x := v_0
+ v.reset(OpMIPS64XORconst)
+ v.AuxInt = int64ToAuxInt(1)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpOffPtr(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (OffPtr [off] ptr:(SP))
+ // cond: is32Bit(off)
+ // result: (MOVVaddr [int32(off)] ptr)
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ ptr := v_0
+ if ptr.Op != OpSP || !(is32Bit(off)) {
+ break
+ }
+ v.reset(OpMIPS64MOVVaddr)
+ v.AuxInt = int32ToAuxInt(int32(off))
+ v.AddArg(ptr)
+ return true
+ }
+ // match: (OffPtr [off] ptr)
+ // result: (ADDVconst [off] ptr)
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ ptr := v_0
+ v.reset(OpMIPS64ADDVconst)
+ v.AuxInt = int64ToAuxInt(off)
+ v.AddArg(ptr)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpPanicBounds(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (PanicBounds [kind] x y mem)
+ // cond: boundsABI(kind) == 0
+ // result: (LoweredPanicBoundsA [kind] x y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ x := v_0
+ y := v_1
+ mem := v_2
+ if !(boundsABI(kind) == 0) {
+ break
+ }
+ v.reset(OpMIPS64LoweredPanicBoundsA)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg3(x, y, mem)
+ return true
+ }
+ // match: (PanicBounds [kind] x y mem)
+ // cond: boundsABI(kind) == 1
+ // result: (LoweredPanicBoundsB [kind] x y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ x := v_0
+ y := v_1
+ mem := v_2
+ if !(boundsABI(kind) == 1) {
+ break
+ }
+ v.reset(OpMIPS64LoweredPanicBoundsB)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg3(x, y, mem)
+ return true
+ }
+ // match: (PanicBounds [kind] x y mem)
+ // cond: boundsABI(kind) == 2
+ // result: (LoweredPanicBoundsC [kind] x y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ x := v_0
+ y := v_1
+ mem := v_2
+ if !(boundsABI(kind) == 2) {
+ break
+ }
+ v.reset(OpMIPS64LoweredPanicBoundsC)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg3(x, y, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpRotateLeft16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (RotateLeft16 <t> x (MOVVconst [c]))
+ // result: (Or16 (Lsh16x64 <t> x (MOVVconst [c&15])) (Rsh16Ux64 <t> x (MOVVconst [-c&15])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpMIPS64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpOr16)
+ v0 := b.NewValue0(v.Pos, OpLsh16x64, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(c & 15)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpRsh16Ux64, t)
+ v3 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(-c & 15)
+ v2.AddArg2(x, v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpRotateLeft32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (RotateLeft32 <t> x (MOVVconst [c]))
+ // result: (Or32 (Lsh32x64 <t> x (MOVVconst [c&31])) (Rsh32Ux64 <t> x (MOVVconst [-c&31])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpMIPS64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpOr32)
+ v0 := b.NewValue0(v.Pos, OpLsh32x64, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(c & 31)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpRsh32Ux64, t)
+ v3 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(-c & 31)
+ v2.AddArg2(x, v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpRotateLeft64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (RotateLeft64 <t> x (MOVVconst [c]))
+ // result: (Or64 (Lsh64x64 <t> x (MOVVconst [c&63])) (Rsh64Ux64 <t> x (MOVVconst [-c&63])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpMIPS64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpOr64)
+ v0 := b.NewValue0(v.Pos, OpLsh64x64, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(c & 63)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpRsh64Ux64, t)
+ v3 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(-c & 63)
+ v2.AddArg2(x, v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpRotateLeft8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (RotateLeft8 <t> x (MOVVconst [c]))
+ // result: (Or8 (Lsh8x64 <t> x (MOVVconst [c&7])) (Rsh8Ux64 <t> x (MOVVconst [-c&7])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpMIPS64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpOr8)
+ v0 := b.NewValue0(v.Pos, OpLsh8x64, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(c & 7)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpRsh8Ux64, t)
+ v3 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(-c & 7)
+ v2.AddArg2(x, v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpRsh16Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux16 <t> x y)
+ // result: (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y))) (SRLV <t> (ZeroExt16to64 x) (ZeroExt16to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64AND)
+ v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v0.AddArg(v1)
+ v4 := b.NewValue0(v.Pos, OpMIPS64SRLV, t)
+ v5 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v5.AddArg(x)
+ v4.AddArg2(v5, v3)
+ v.AddArg2(v0, v4)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpRsh16Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux32 <t> x y)
+ // result: (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y))) (SRLV <t> (ZeroExt16to64 x) (ZeroExt32to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64AND)
+ v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v0.AddArg(v1)
+ v4 := b.NewValue0(v.Pos, OpMIPS64SRLV, t)
+ v5 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v5.AddArg(x)
+ v4.AddArg2(v5, v3)
+ v.AddArg2(v0, v4)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpRsh16Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux64 <t> x y)
+ // result: (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) y)) (SRLV <t> (ZeroExt16to64 x) y))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64AND)
+ v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(64)
+ v1.AddArg2(v2, y)
+ v0.AddArg(v1)
+ v3 := b.NewValue0(v.Pos, OpMIPS64SRLV, t)
+ v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v4.AddArg(x)
+ v3.AddArg2(v4, y)
+ v.AddArg2(v0, v3)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpRsh16Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux8 <t> x y)
+ // result: (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y))) (SRLV <t> (ZeroExt16to64 x) (ZeroExt8to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64AND)
+ v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v0.AddArg(v1)
+ v4 := b.NewValue0(v.Pos, OpMIPS64SRLV, t)
+ v5 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v5.AddArg(x)
+ v4.AddArg2(v5, v3)
+ v.AddArg2(v0, v4)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpRsh16x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x16 <t> x y)
+ // result: (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt16to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SRAV)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpMIPS64OR, t)
+ v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v4.AddArg(y)
+ v5 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v5.AuxInt = int64ToAuxInt(63)
+ v3.AddArg2(v4, v5)
+ v2.AddArg(v3)
+ v1.AddArg2(v2, v4)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpRsh16x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x32 <t> x y)
+ // result: (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt32to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SRAV)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpMIPS64OR, t)
+ v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v4.AddArg(y)
+ v5 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v5.AuxInt = int64ToAuxInt(63)
+ v3.AddArg2(v4, v5)
+ v2.AddArg(v3)
+ v1.AddArg2(v2, v4)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpRsh16x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x64 <t> x y)
+ // result: (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU y (MOVVconst <typ.UInt64> [63]))) y))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SRAV)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpMIPS64OR, t)
+ v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v4 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v4.AuxInt = int64ToAuxInt(63)
+ v3.AddArg2(y, v4)
+ v2.AddArg(v3)
+ v1.AddArg2(v2, y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpRsh16x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x8 <t> x y)
+ // result: (SRAV (SignExt16to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt8to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SRAV)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpMIPS64OR, t)
+ v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v4.AddArg(y)
+ v5 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v5.AuxInt = int64ToAuxInt(63)
+ v3.AddArg2(v4, v5)
+ v2.AddArg(v3)
+ v1.AddArg2(v2, v4)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpRsh32Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32Ux16 <t> x y)
+ // result: (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y))) (SRLV <t> (ZeroExt32to64 x) (ZeroExt16to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64AND)
+ v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v0.AddArg(v1)
+ v4 := b.NewValue0(v.Pos, OpMIPS64SRLV, t)
+ v5 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v5.AddArg(x)
+ v4.AddArg2(v5, v3)
+ v.AddArg2(v0, v4)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpRsh32Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32Ux32 <t> x y)
+ // result: (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y))) (SRLV <t> (ZeroExt32to64 x) (ZeroExt32to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64AND)
+ v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v0.AddArg(v1)
+ v4 := b.NewValue0(v.Pos, OpMIPS64SRLV, t)
+ v5 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v5.AddArg(x)
+ v4.AddArg2(v5, v3)
+ v.AddArg2(v0, v4)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpRsh32Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32Ux64 <t> x y)
+ // result: (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) y)) (SRLV <t> (ZeroExt32to64 x) y))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64AND)
+ v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(64)
+ v1.AddArg2(v2, y)
+ v0.AddArg(v1)
+ v3 := b.NewValue0(v.Pos, OpMIPS64SRLV, t)
+ v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v4.AddArg(x)
+ v3.AddArg2(v4, y)
+ v.AddArg2(v0, v3)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpRsh32Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32Ux8 <t> x y)
+ // result: (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y))) (SRLV <t> (ZeroExt32to64 x) (ZeroExt8to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64AND)
+ v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v0.AddArg(v1)
+ v4 := b.NewValue0(v.Pos, OpMIPS64SRLV, t)
+ v5 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v5.AddArg(x)
+ v4.AddArg2(v5, v3)
+ v.AddArg2(v0, v4)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpRsh32x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32x16 <t> x y)
+ // result: (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt16to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SRAV)
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpMIPS64OR, t)
+ v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v4.AddArg(y)
+ v5 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v5.AuxInt = int64ToAuxInt(63)
+ v3.AddArg2(v4, v5)
+ v2.AddArg(v3)
+ v1.AddArg2(v2, v4)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpRsh32x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32x32 <t> x y)
+ // result: (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt32to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SRAV)
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpMIPS64OR, t)
+ v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v4.AddArg(y)
+ v5 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v5.AuxInt = int64ToAuxInt(63)
+ v3.AddArg2(v4, v5)
+ v2.AddArg(v3)
+ v1.AddArg2(v2, v4)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpRsh32x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32x64 <t> x y)
+ // result: (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU y (MOVVconst <typ.UInt64> [63]))) y))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SRAV)
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpMIPS64OR, t)
+ v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v4 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v4.AuxInt = int64ToAuxInt(63)
+ v3.AddArg2(y, v4)
+ v2.AddArg(v3)
+ v1.AddArg2(v2, y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpRsh32x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32x8 <t> x y)
+ // result: (SRAV (SignExt32to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt8to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SRAV)
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpMIPS64OR, t)
+ v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v4.AddArg(y)
+ v5 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v5.AuxInt = int64ToAuxInt(63)
+ v3.AddArg2(v4, v5)
+ v2.AddArg(v3)
+ v1.AddArg2(v2, v4)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpRsh64Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64Ux16 <t> x y)
+ // result: (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y))) (SRLV <t> x (ZeroExt16to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64AND)
+ v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v0.AddArg(v1)
+ v4 := b.NewValue0(v.Pos, OpMIPS64SRLV, t)
+ v4.AddArg2(x, v3)
+ v.AddArg2(v0, v4)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpRsh64Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64Ux32 <t> x y)
+ // result: (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y))) (SRLV <t> x (ZeroExt32to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64AND)
+ v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v0.AddArg(v1)
+ v4 := b.NewValue0(v.Pos, OpMIPS64SRLV, t)
+ v4.AddArg2(x, v3)
+ v.AddArg2(v0, v4)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpRsh64Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64Ux64 <t> x y)
+ // result: (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) y)) (SRLV <t> x y))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64AND)
+ v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(64)
+ v1.AddArg2(v2, y)
+ v0.AddArg(v1)
+ v3 := b.NewValue0(v.Pos, OpMIPS64SRLV, t)
+ v3.AddArg2(x, y)
+ v.AddArg2(v0, v3)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpRsh64Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64Ux8 <t> x y)
+ // result: (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y))) (SRLV <t> x (ZeroExt8to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64AND)
+ v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v0.AddArg(v1)
+ v4 := b.NewValue0(v.Pos, OpMIPS64SRLV, t)
+ v4.AddArg2(x, v3)
+ v.AddArg2(v0, v4)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpRsh64x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64x16 <t> x y)
+ // result: (SRAV x (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt16to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SRAV)
+ v0 := b.NewValue0(v.Pos, OpMIPS64OR, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v2 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v3.AddArg(y)
+ v4 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v4.AuxInt = int64ToAuxInt(63)
+ v2.AddArg2(v3, v4)
+ v1.AddArg(v2)
+ v0.AddArg2(v1, v3)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpRsh64x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64x32 <t> x y)
+ // result: (SRAV x (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt32to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SRAV)
+ v0 := b.NewValue0(v.Pos, OpMIPS64OR, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v2 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v3.AddArg(y)
+ v4 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v4.AuxInt = int64ToAuxInt(63)
+ v2.AddArg2(v3, v4)
+ v1.AddArg(v2)
+ v0.AddArg2(v1, v3)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpRsh64x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64x64 <t> x y)
+ // result: (SRAV x (OR <t> (NEGV <t> (SGTU y (MOVVconst <typ.UInt64> [63]))) y))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SRAV)
+ v0 := b.NewValue0(v.Pos, OpMIPS64OR, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v2 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v3 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(63)
+ v2.AddArg2(y, v3)
+ v1.AddArg(v2)
+ v0.AddArg2(v1, y)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpRsh64x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64x8 <t> x y)
+ // result: (SRAV x (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt8to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SRAV)
+ v0 := b.NewValue0(v.Pos, OpMIPS64OR, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v2 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v3.AddArg(y)
+ v4 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v4.AuxInt = int64ToAuxInt(63)
+ v2.AddArg2(v3, v4)
+ v1.AddArg(v2)
+ v0.AddArg2(v1, v3)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpRsh8Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux16 <t> x y)
+ // result: (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt16to64 y))) (SRLV <t> (ZeroExt8to64 x) (ZeroExt16to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64AND)
+ v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v0.AddArg(v1)
+ v4 := b.NewValue0(v.Pos, OpMIPS64SRLV, t)
+ v5 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v5.AddArg(x)
+ v4.AddArg2(v5, v3)
+ v.AddArg2(v0, v4)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpRsh8Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux32 <t> x y)
+ // result: (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt32to64 y))) (SRLV <t> (ZeroExt8to64 x) (ZeroExt32to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64AND)
+ v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v0.AddArg(v1)
+ v4 := b.NewValue0(v.Pos, OpMIPS64SRLV, t)
+ v5 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v5.AddArg(x)
+ v4.AddArg2(v5, v3)
+ v.AddArg2(v0, v4)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpRsh8Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux64 <t> x y)
+ // result: (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) y)) (SRLV <t> (ZeroExt8to64 x) y))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64AND)
+ v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(64)
+ v1.AddArg2(v2, y)
+ v0.AddArg(v1)
+ v3 := b.NewValue0(v.Pos, OpMIPS64SRLV, t)
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v4.AddArg(x)
+ v3.AddArg2(v4, y)
+ v.AddArg2(v0, v3)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpRsh8Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux8 <t> x y)
+ // result: (AND (NEGV <t> (SGTU (MOVVconst <typ.UInt64> [64]) (ZeroExt8to64 y))) (SRLV <t> (ZeroExt8to64 x) (ZeroExt8to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64AND)
+ v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v1 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v0.AddArg(v1)
+ v4 := b.NewValue0(v.Pos, OpMIPS64SRLV, t)
+ v5 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v5.AddArg(x)
+ v4.AddArg2(v5, v3)
+ v.AddArg2(v0, v4)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpRsh8x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x16 <t> x y)
+ // result: (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt16to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt16to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SRAV)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpMIPS64OR, t)
+ v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v4.AddArg(y)
+ v5 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v5.AuxInt = int64ToAuxInt(63)
+ v3.AddArg2(v4, v5)
+ v2.AddArg(v3)
+ v1.AddArg2(v2, v4)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpRsh8x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x32 <t> x y)
+ // result: (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt32to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt32to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SRAV)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpMIPS64OR, t)
+ v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v4.AddArg(y)
+ v5 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v5.AuxInt = int64ToAuxInt(63)
+ v3.AddArg2(v4, v5)
+ v2.AddArg(v3)
+ v1.AddArg2(v2, v4)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpRsh8x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x64 <t> x y)
+ // result: (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU y (MOVVconst <typ.UInt64> [63]))) y))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SRAV)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpMIPS64OR, t)
+ v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v4 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v4.AuxInt = int64ToAuxInt(63)
+ v3.AddArg2(y, v4)
+ v2.AddArg(v3)
+ v1.AddArg2(v2, y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpRsh8x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x8 <t> x y)
+ // result: (SRAV (SignExt8to64 x) (OR <t> (NEGV <t> (SGTU (ZeroExt8to64 y) (MOVVconst <typ.UInt64> [63]))) (ZeroExt8to64 y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpMIPS64SRAV)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpMIPS64OR, t)
+ v2 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v3 := b.NewValue0(v.Pos, OpMIPS64SGTU, typ.Bool)
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v4.AddArg(y)
+ v5 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v5.AuxInt = int64ToAuxInt(63)
+ v3.AddArg2(v4, v5)
+ v2.AddArg(v3)
+ v1.AddArg2(v2, v4)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpSelect0(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Select0 (Mul64uover x y))
+ // result: (Select1 <typ.UInt64> (MULVU x y))
+ for {
+ if v_0.Op != OpMul64uover {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpSelect1)
+ v.Type = typ.UInt64
+ v0 := b.NewValue0(v.Pos, OpMIPS64MULVU, types.NewTuple(typ.UInt64, typ.UInt64))
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Select0 (DIVVU _ (MOVVconst [1])))
+ // result: (MOVVconst [0])
+ for {
+ if v_0.Op != OpMIPS64DIVVU {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpMIPS64MOVVconst || auxIntToInt64(v_0_1.AuxInt) != 1 {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (Select0 (DIVVU x (MOVVconst [c])))
+ // cond: isPowerOfTwo64(c)
+ // result: (ANDconst [c-1] x)
+ for {
+ if v_0.Op != OpMIPS64DIVVU {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpMIPS64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_0_1.AuxInt)
+ if !(isPowerOfTwo64(c)) {
+ break
+ }
+ v.reset(OpMIPS64ANDconst)
+ v.AuxInt = int64ToAuxInt(c - 1)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Select0 (DIVV (MOVVconst [c]) (MOVVconst [d])))
+ // cond: d != 0
+ // result: (MOVVconst [c%d])
+ for {
+ if v_0.Op != OpMIPS64DIVV {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpMIPS64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpMIPS64MOVVconst {
+ break
+ }
+ d := auxIntToInt64(v_0_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(c % d)
+ return true
+ }
+ // match: (Select0 (DIVVU (MOVVconst [c]) (MOVVconst [d])))
+ // cond: d != 0
+ // result: (MOVVconst [int64(uint64(c)%uint64(d))])
+ for {
+ if v_0.Op != OpMIPS64DIVVU {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpMIPS64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpMIPS64MOVVconst {
+ break
+ }
+ d := auxIntToInt64(v_0_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) % uint64(d)))
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpSelect1(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Select1 (Mul64uover x y))
+ // result: (SGTU <typ.Bool> (Select0 <typ.UInt64> (MULVU x y)) (MOVVconst <typ.UInt64> [0]))
+ for {
+ if v_0.Op != OpMul64uover {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpMIPS64SGTU)
+ v.Type = typ.Bool
+ v0 := b.NewValue0(v.Pos, OpSelect0, typ.UInt64)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MULVU, types.NewTuple(typ.UInt64, typ.UInt64))
+ v1.AddArg2(x, y)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ // match: (Select1 (MULVU x (MOVVconst [-1])))
+ // result: (NEGV x)
+ for {
+ if v_0.Op != OpMIPS64MULVU {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpMIPS64MOVVconst || auxIntToInt64(v_0_1.AuxInt) != -1 {
+ continue
+ }
+ v.reset(OpMIPS64NEGV)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (Select1 (MULVU _ (MOVVconst [0])))
+ // result: (MOVVconst [0])
+ for {
+ if v_0.Op != OpMIPS64MULVU {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_1.Op != OpMIPS64MOVVconst || auxIntToInt64(v_0_1.AuxInt) != 0 {
+ continue
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ break
+ }
+ // match: (Select1 (MULVU x (MOVVconst [1])))
+ // result: x
+ for {
+ if v_0.Op != OpMIPS64MULVU {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpMIPS64MOVVconst || auxIntToInt64(v_0_1.AuxInt) != 1 {
+ continue
+ }
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (Select1 (MULVU x (MOVVconst [c])))
+ // cond: isPowerOfTwo64(c)
+ // result: (SLLVconst [log64(c)] x)
+ for {
+ if v_0.Op != OpMIPS64MULVU {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpMIPS64MOVVconst {
+ continue
+ }
+ c := auxIntToInt64(v_0_1.AuxInt)
+ if !(isPowerOfTwo64(c)) {
+ continue
+ }
+ v.reset(OpMIPS64SLLVconst)
+ v.AuxInt = int64ToAuxInt(log64(c))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (Select1 (DIVVU x (MOVVconst [1])))
+ // result: x
+ for {
+ if v_0.Op != OpMIPS64DIVVU {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpMIPS64MOVVconst || auxIntToInt64(v_0_1.AuxInt) != 1 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (Select1 (DIVVU x (MOVVconst [c])))
+ // cond: isPowerOfTwo64(c)
+ // result: (SRLVconst [log64(c)] x)
+ for {
+ if v_0.Op != OpMIPS64DIVVU {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpMIPS64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_0_1.AuxInt)
+ if !(isPowerOfTwo64(c)) {
+ break
+ }
+ v.reset(OpMIPS64SRLVconst)
+ v.AuxInt = int64ToAuxInt(log64(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (Select1 (MULVU (MOVVconst [c]) (MOVVconst [d])))
+ // result: (MOVVconst [c*d])
+ for {
+ if v_0.Op != OpMIPS64MULVU {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpMIPS64MOVVconst {
+ continue
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ if v_0_1.Op != OpMIPS64MOVVconst {
+ continue
+ }
+ d := auxIntToInt64(v_0_1.AuxInt)
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(c * d)
+ return true
+ }
+ break
+ }
+ // match: (Select1 (DIVV (MOVVconst [c]) (MOVVconst [d])))
+ // cond: d != 0
+ // result: (MOVVconst [c/d])
+ for {
+ if v_0.Op != OpMIPS64DIVV {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpMIPS64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpMIPS64MOVVconst {
+ break
+ }
+ d := auxIntToInt64(v_0_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(c / d)
+ return true
+ }
+ // match: (Select1 (DIVVU (MOVVconst [c]) (MOVVconst [d])))
+ // cond: d != 0
+ // result: (MOVVconst [int64(uint64(c)/uint64(d))])
+ for {
+ if v_0.Op != OpMIPS64DIVVU {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpMIPS64MOVVconst {
+ break
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpMIPS64MOVVconst {
+ break
+ }
+ d := auxIntToInt64(v_0_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpMIPS64MOVVconst)
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) / uint64(d)))
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpSlicemask(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Slicemask <t> x)
+ // result: (SRAVconst (NEGV <t> x) [63])
+ for {
+ t := v.Type
+ x := v_0
+ v.reset(OpMIPS64SRAVconst)
+ v.AuxInt = int64ToAuxInt(63)
+ v0 := b.NewValue0(v.Pos, OpMIPS64NEGV, t)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueMIPS64_OpStore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 1
+ // result: (MOVBstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 1) {
+ break
+ }
+ v.reset(OpMIPS64MOVBstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 2
+ // result: (MOVHstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 2) {
+ break
+ }
+ v.reset(OpMIPS64MOVHstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 4 && !is32BitFloat(val.Type)
+ // result: (MOVWstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 4 && !is32BitFloat(val.Type)) {
+ break
+ }
+ v.reset(OpMIPS64MOVWstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 8 && !is64BitFloat(val.Type)
+ // result: (MOVVstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 8 && !is64BitFloat(val.Type)) {
+ break
+ }
+ v.reset(OpMIPS64MOVVstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 4 && is32BitFloat(val.Type)
+ // result: (MOVFstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 4 && is32BitFloat(val.Type)) {
+ break
+ }
+ v.reset(OpMIPS64MOVFstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 8 && is64BitFloat(val.Type)
+ // result: (MOVDstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 8 && is64BitFloat(val.Type)) {
+ break
+ }
+ v.reset(OpMIPS64MOVDstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueMIPS64_OpZero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (Zero [0] _ mem)
+ // result: mem
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ mem := v_1
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Zero [1] ptr mem)
+ // result: (MOVBstore ptr (MOVVconst [0]) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 1 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpMIPS64MOVBstore)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (Zero [2] {t} ptr mem)
+ // cond: t.Alignment()%2 == 0
+ // result: (MOVHstore ptr (MOVVconst [0]) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 2 {
+ break
+ }
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(t.Alignment()%2 == 0) {
+ break
+ }
+ v.reset(OpMIPS64MOVHstore)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (Zero [2] ptr mem)
+ // result: (MOVBstore [1] ptr (MOVVconst [0]) (MOVBstore [0] ptr (MOVVconst [0]) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 2 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpMIPS64MOVBstore)
+ v.AuxInt = int32ToAuxInt(1)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(0)
+ v1.AddArg3(ptr, v0, mem)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [4] {t} ptr mem)
+ // cond: t.Alignment()%4 == 0
+ // result: (MOVWstore ptr (MOVVconst [0]) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(t.Alignment()%4 == 0) {
+ break
+ }
+ v.reset(OpMIPS64MOVWstore)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (Zero [4] {t} ptr mem)
+ // cond: t.Alignment()%2 == 0
+ // result: (MOVHstore [2] ptr (MOVVconst [0]) (MOVHstore [0] ptr (MOVVconst [0]) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(t.Alignment()%2 == 0) {
+ break
+ }
+ v.reset(OpMIPS64MOVHstore)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(0)
+ v1.AddArg3(ptr, v0, mem)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [4] ptr mem)
+ // result: (MOVBstore [3] ptr (MOVVconst [0]) (MOVBstore [2] ptr (MOVVconst [0]) (MOVBstore [1] ptr (MOVVconst [0]) (MOVBstore [0] ptr (MOVVconst [0]) mem))))
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpMIPS64MOVBstore)
+ v.AuxInt = int32ToAuxInt(3)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(2)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem)
+ v2.AuxInt = int32ToAuxInt(1)
+ v3 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem)
+ v3.AuxInt = int32ToAuxInt(0)
+ v3.AddArg3(ptr, v0, mem)
+ v2.AddArg3(ptr, v0, v3)
+ v1.AddArg3(ptr, v0, v2)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [8] {t} ptr mem)
+ // cond: t.Alignment()%8 == 0
+ // result: (MOVVstore ptr (MOVVconst [0]) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 {
+ break
+ }
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(t.Alignment()%8 == 0) {
+ break
+ }
+ v.reset(OpMIPS64MOVVstore)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (Zero [8] {t} ptr mem)
+ // cond: t.Alignment()%4 == 0
+ // result: (MOVWstore [4] ptr (MOVVconst [0]) (MOVWstore [0] ptr (MOVVconst [0]) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 {
+ break
+ }
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(t.Alignment()%4 == 0) {
+ break
+ }
+ v.reset(OpMIPS64MOVWstore)
+ v.AuxInt = int32ToAuxInt(4)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVWstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(0)
+ v1.AddArg3(ptr, v0, mem)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [8] {t} ptr mem)
+ // cond: t.Alignment()%2 == 0
+ // result: (MOVHstore [6] ptr (MOVVconst [0]) (MOVHstore [4] ptr (MOVVconst [0]) (MOVHstore [2] ptr (MOVVconst [0]) (MOVHstore [0] ptr (MOVVconst [0]) mem))))
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 {
+ break
+ }
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(t.Alignment()%2 == 0) {
+ break
+ }
+ v.reset(OpMIPS64MOVHstore)
+ v.AuxInt = int32ToAuxInt(6)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(4)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem)
+ v2.AuxInt = int32ToAuxInt(2)
+ v3 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem)
+ v3.AuxInt = int32ToAuxInt(0)
+ v3.AddArg3(ptr, v0, mem)
+ v2.AddArg3(ptr, v0, v3)
+ v1.AddArg3(ptr, v0, v2)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [3] ptr mem)
+ // result: (MOVBstore [2] ptr (MOVVconst [0]) (MOVBstore [1] ptr (MOVVconst [0]) (MOVBstore [0] ptr (MOVVconst [0]) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 3 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpMIPS64MOVBstore)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVBstore, types.TypeMem)
+ v2.AuxInt = int32ToAuxInt(0)
+ v2.AddArg3(ptr, v0, mem)
+ v1.AddArg3(ptr, v0, v2)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [6] {t} ptr mem)
+ // cond: t.Alignment()%2 == 0
+ // result: (MOVHstore [4] ptr (MOVVconst [0]) (MOVHstore [2] ptr (MOVVconst [0]) (MOVHstore [0] ptr (MOVVconst [0]) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 6 {
+ break
+ }
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(t.Alignment()%2 == 0) {
+ break
+ }
+ v.reset(OpMIPS64MOVHstore)
+ v.AuxInt = int32ToAuxInt(4)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(2)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVHstore, types.TypeMem)
+ v2.AuxInt = int32ToAuxInt(0)
+ v2.AddArg3(ptr, v0, mem)
+ v1.AddArg3(ptr, v0, v2)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [12] {t} ptr mem)
+ // cond: t.Alignment()%4 == 0
+ // result: (MOVWstore [8] ptr (MOVVconst [0]) (MOVWstore [4] ptr (MOVVconst [0]) (MOVWstore [0] ptr (MOVVconst [0]) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 12 {
+ break
+ }
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(t.Alignment()%4 == 0) {
+ break
+ }
+ v.reset(OpMIPS64MOVWstore)
+ v.AuxInt = int32ToAuxInt(8)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVWstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(4)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVWstore, types.TypeMem)
+ v2.AuxInt = int32ToAuxInt(0)
+ v2.AddArg3(ptr, v0, mem)
+ v1.AddArg3(ptr, v0, v2)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [16] {t} ptr mem)
+ // cond: t.Alignment()%8 == 0
+ // result: (MOVVstore [8] ptr (MOVVconst [0]) (MOVVstore [0] ptr (MOVVconst [0]) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 16 {
+ break
+ }
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(t.Alignment()%8 == 0) {
+ break
+ }
+ v.reset(OpMIPS64MOVVstore)
+ v.AuxInt = int32ToAuxInt(8)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVVstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(0)
+ v1.AddArg3(ptr, v0, mem)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [24] {t} ptr mem)
+ // cond: t.Alignment()%8 == 0
+ // result: (MOVVstore [16] ptr (MOVVconst [0]) (MOVVstore [8] ptr (MOVVconst [0]) (MOVVstore [0] ptr (MOVVconst [0]) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 24 {
+ break
+ }
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(t.Alignment()%8 == 0) {
+ break
+ }
+ v.reset(OpMIPS64MOVVstore)
+ v.AuxInt = int32ToAuxInt(16)
+ v0 := b.NewValue0(v.Pos, OpMIPS64MOVVconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpMIPS64MOVVstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(8)
+ v2 := b.NewValue0(v.Pos, OpMIPS64MOVVstore, types.TypeMem)
+ v2.AuxInt = int32ToAuxInt(0)
+ v2.AddArg3(ptr, v0, mem)
+ v1.AddArg3(ptr, v0, v2)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [s] {t} ptr mem)
+ // cond: s%8 == 0 && s > 24 && s <= 8*128 && t.Alignment()%8 == 0 && !config.noDuffDevice
+ // result: (DUFFZERO [8 * (128 - s/8)] ptr mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(s%8 == 0 && s > 24 && s <= 8*128 && t.Alignment()%8 == 0 && !config.noDuffDevice) {
+ break
+ }
+ v.reset(OpMIPS64DUFFZERO)
+ v.AuxInt = int64ToAuxInt(8 * (128 - s/8))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Zero [s] {t} ptr mem)
+ // cond: (s > 8*128 || config.noDuffDevice) || t.Alignment()%8 != 0
+ // result: (LoweredZero [t.Alignment()] ptr (ADDVconst <ptr.Type> ptr [s-moveSize(t.Alignment(), config)]) mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !((s > 8*128 || config.noDuffDevice) || t.Alignment()%8 != 0) {
+ break
+ }
+ v.reset(OpMIPS64LoweredZero)
+ v.AuxInt = int64ToAuxInt(t.Alignment())
+ v0 := b.NewValue0(v.Pos, OpMIPS64ADDVconst, ptr.Type)
+ v0.AuxInt = int64ToAuxInt(s - moveSize(t.Alignment(), config))
+ v0.AddArg(ptr)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ return false
+}
+func rewriteBlockMIPS64(b *Block) bool {
+ switch b.Kind {
+ case BlockMIPS64EQ:
+ // match: (EQ (FPFlagTrue cmp) yes no)
+ // result: (FPF cmp yes no)
+ for b.Controls[0].Op == OpMIPS64FPFlagTrue {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockMIPS64FPF, cmp)
+ return true
+ }
+ // match: (EQ (FPFlagFalse cmp) yes no)
+ // result: (FPT cmp yes no)
+ for b.Controls[0].Op == OpMIPS64FPFlagFalse {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockMIPS64FPT, cmp)
+ return true
+ }
+ // match: (EQ (XORconst [1] cmp:(SGT _ _)) yes no)
+ // result: (NE cmp yes no)
+ for b.Controls[0].Op == OpMIPS64XORconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 1 {
+ break
+ }
+ cmp := v_0.Args[0]
+ if cmp.Op != OpMIPS64SGT {
+ break
+ }
+ b.resetWithControl(BlockMIPS64NE, cmp)
+ return true
+ }
+ // match: (EQ (XORconst [1] cmp:(SGTU _ _)) yes no)
+ // result: (NE cmp yes no)
+ for b.Controls[0].Op == OpMIPS64XORconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 1 {
+ break
+ }
+ cmp := v_0.Args[0]
+ if cmp.Op != OpMIPS64SGTU {
+ break
+ }
+ b.resetWithControl(BlockMIPS64NE, cmp)
+ return true
+ }
+ // match: (EQ (XORconst [1] cmp:(SGTconst _)) yes no)
+ // result: (NE cmp yes no)
+ for b.Controls[0].Op == OpMIPS64XORconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 1 {
+ break
+ }
+ cmp := v_0.Args[0]
+ if cmp.Op != OpMIPS64SGTconst {
+ break
+ }
+ b.resetWithControl(BlockMIPS64NE, cmp)
+ return true
+ }
+ // match: (EQ (XORconst [1] cmp:(SGTUconst _)) yes no)
+ // result: (NE cmp yes no)
+ for b.Controls[0].Op == OpMIPS64XORconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 1 {
+ break
+ }
+ cmp := v_0.Args[0]
+ if cmp.Op != OpMIPS64SGTUconst {
+ break
+ }
+ b.resetWithControl(BlockMIPS64NE, cmp)
+ return true
+ }
+ // match: (EQ (SGTUconst [1] x) yes no)
+ // result: (NE x yes no)
+ for b.Controls[0].Op == OpMIPS64SGTUconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 1 {
+ break
+ }
+ x := v_0.Args[0]
+ b.resetWithControl(BlockMIPS64NE, x)
+ return true
+ }
+ // match: (EQ (SGTU x (MOVVconst [0])) yes no)
+ // result: (EQ x yes no)
+ for b.Controls[0].Op == OpMIPS64SGTU {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpMIPS64MOVVconst || auxIntToInt64(v_0_1.AuxInt) != 0 {
+ break
+ }
+ b.resetWithControl(BlockMIPS64EQ, x)
+ return true
+ }
+ // match: (EQ (SGTconst [0] x) yes no)
+ // result: (GEZ x yes no)
+ for b.Controls[0].Op == OpMIPS64SGTconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ b.resetWithControl(BlockMIPS64GEZ, x)
+ return true
+ }
+ // match: (EQ (SGT x (MOVVconst [0])) yes no)
+ // result: (LEZ x yes no)
+ for b.Controls[0].Op == OpMIPS64SGT {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpMIPS64MOVVconst || auxIntToInt64(v_0_1.AuxInt) != 0 {
+ break
+ }
+ b.resetWithControl(BlockMIPS64LEZ, x)
+ return true
+ }
+ // match: (EQ (MOVVconst [0]) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpMIPS64MOVVconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (EQ (MOVVconst [c]) yes no)
+ // cond: c != 0
+ // result: (First no yes)
+ for b.Controls[0].Op == OpMIPS64MOVVconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt64(v_0.AuxInt)
+ if !(c != 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ case BlockMIPS64GEZ:
+ // match: (GEZ (MOVVconst [c]) yes no)
+ // cond: c >= 0
+ // result: (First yes no)
+ for b.Controls[0].Op == OpMIPS64MOVVconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt64(v_0.AuxInt)
+ if !(c >= 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (GEZ (MOVVconst [c]) yes no)
+ // cond: c < 0
+ // result: (First no yes)
+ for b.Controls[0].Op == OpMIPS64MOVVconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt64(v_0.AuxInt)
+ if !(c < 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ case BlockMIPS64GTZ:
+ // match: (GTZ (MOVVconst [c]) yes no)
+ // cond: c > 0
+ // result: (First yes no)
+ for b.Controls[0].Op == OpMIPS64MOVVconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt64(v_0.AuxInt)
+ if !(c > 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (GTZ (MOVVconst [c]) yes no)
+ // cond: c <= 0
+ // result: (First no yes)
+ for b.Controls[0].Op == OpMIPS64MOVVconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt64(v_0.AuxInt)
+ if !(c <= 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ case BlockIf:
+ // match: (If cond yes no)
+ // result: (NE cond yes no)
+ for {
+ cond := b.Controls[0]
+ b.resetWithControl(BlockMIPS64NE, cond)
+ return true
+ }
+ case BlockMIPS64LEZ:
+ // match: (LEZ (MOVVconst [c]) yes no)
+ // cond: c <= 0
+ // result: (First yes no)
+ for b.Controls[0].Op == OpMIPS64MOVVconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt64(v_0.AuxInt)
+ if !(c <= 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (LEZ (MOVVconst [c]) yes no)
+ // cond: c > 0
+ // result: (First no yes)
+ for b.Controls[0].Op == OpMIPS64MOVVconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt64(v_0.AuxInt)
+ if !(c > 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ case BlockMIPS64LTZ:
+ // match: (LTZ (MOVVconst [c]) yes no)
+ // cond: c < 0
+ // result: (First yes no)
+ for b.Controls[0].Op == OpMIPS64MOVVconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt64(v_0.AuxInt)
+ if !(c < 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (LTZ (MOVVconst [c]) yes no)
+ // cond: c >= 0
+ // result: (First no yes)
+ for b.Controls[0].Op == OpMIPS64MOVVconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt64(v_0.AuxInt)
+ if !(c >= 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ case BlockMIPS64NE:
+ // match: (NE (FPFlagTrue cmp) yes no)
+ // result: (FPT cmp yes no)
+ for b.Controls[0].Op == OpMIPS64FPFlagTrue {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockMIPS64FPT, cmp)
+ return true
+ }
+ // match: (NE (FPFlagFalse cmp) yes no)
+ // result: (FPF cmp yes no)
+ for b.Controls[0].Op == OpMIPS64FPFlagFalse {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockMIPS64FPF, cmp)
+ return true
+ }
+ // match: (NE (XORconst [1] cmp:(SGT _ _)) yes no)
+ // result: (EQ cmp yes no)
+ for b.Controls[0].Op == OpMIPS64XORconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 1 {
+ break
+ }
+ cmp := v_0.Args[0]
+ if cmp.Op != OpMIPS64SGT {
+ break
+ }
+ b.resetWithControl(BlockMIPS64EQ, cmp)
+ return true
+ }
+ // match: (NE (XORconst [1] cmp:(SGTU _ _)) yes no)
+ // result: (EQ cmp yes no)
+ for b.Controls[0].Op == OpMIPS64XORconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 1 {
+ break
+ }
+ cmp := v_0.Args[0]
+ if cmp.Op != OpMIPS64SGTU {
+ break
+ }
+ b.resetWithControl(BlockMIPS64EQ, cmp)
+ return true
+ }
+ // match: (NE (XORconst [1] cmp:(SGTconst _)) yes no)
+ // result: (EQ cmp yes no)
+ for b.Controls[0].Op == OpMIPS64XORconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 1 {
+ break
+ }
+ cmp := v_0.Args[0]
+ if cmp.Op != OpMIPS64SGTconst {
+ break
+ }
+ b.resetWithControl(BlockMIPS64EQ, cmp)
+ return true
+ }
+ // match: (NE (XORconst [1] cmp:(SGTUconst _)) yes no)
+ // result: (EQ cmp yes no)
+ for b.Controls[0].Op == OpMIPS64XORconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 1 {
+ break
+ }
+ cmp := v_0.Args[0]
+ if cmp.Op != OpMIPS64SGTUconst {
+ break
+ }
+ b.resetWithControl(BlockMIPS64EQ, cmp)
+ return true
+ }
+ // match: (NE (SGTUconst [1] x) yes no)
+ // result: (EQ x yes no)
+ for b.Controls[0].Op == OpMIPS64SGTUconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 1 {
+ break
+ }
+ x := v_0.Args[0]
+ b.resetWithControl(BlockMIPS64EQ, x)
+ return true
+ }
+ // match: (NE (SGTU x (MOVVconst [0])) yes no)
+ // result: (NE x yes no)
+ for b.Controls[0].Op == OpMIPS64SGTU {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpMIPS64MOVVconst || auxIntToInt64(v_0_1.AuxInt) != 0 {
+ break
+ }
+ b.resetWithControl(BlockMIPS64NE, x)
+ return true
+ }
+ // match: (NE (SGTconst [0] x) yes no)
+ // result: (LTZ x yes no)
+ for b.Controls[0].Op == OpMIPS64SGTconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_0.Args[0]
+ b.resetWithControl(BlockMIPS64LTZ, x)
+ return true
+ }
+ // match: (NE (SGT x (MOVVconst [0])) yes no)
+ // result: (GTZ x yes no)
+ for b.Controls[0].Op == OpMIPS64SGT {
+ v_0 := b.Controls[0]
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpMIPS64MOVVconst || auxIntToInt64(v_0_1.AuxInt) != 0 {
+ break
+ }
+ b.resetWithControl(BlockMIPS64GTZ, x)
+ return true
+ }
+ // match: (NE (MOVVconst [0]) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpMIPS64MOVVconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (NE (MOVVconst [c]) yes no)
+ // cond: c != 0
+ // result: (First yes no)
+ for b.Controls[0].Op == OpMIPS64MOVVconst {
+ v_0 := b.Controls[0]
+ c := auxIntToInt64(v_0.AuxInt)
+ if !(c != 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ }
+ return false
+}
diff --git a/src/cmd/compile/internal/ssa/rewritePPC64.go b/src/cmd/compile/internal/ssa/rewritePPC64.go
new file mode 100644
index 0000000..c7bcc24
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/rewritePPC64.go
@@ -0,0 +1,18471 @@
+// Code generated from gen/PPC64.rules; DO NOT EDIT.
+// generated with: cd gen; go run *.go
+
+package ssa
+
+import "internal/buildcfg"
+import "math"
+import "cmd/compile/internal/types"
+
+func rewriteValuePPC64(v *Value) bool {
+ switch v.Op {
+ case OpAbs:
+ v.Op = OpPPC64FABS
+ return true
+ case OpAdd16:
+ v.Op = OpPPC64ADD
+ return true
+ case OpAdd32:
+ v.Op = OpPPC64ADD
+ return true
+ case OpAdd32F:
+ v.Op = OpPPC64FADDS
+ return true
+ case OpAdd64:
+ v.Op = OpPPC64ADD
+ return true
+ case OpAdd64F:
+ v.Op = OpPPC64FADD
+ return true
+ case OpAdd64carry:
+ v.Op = OpPPC64LoweredAdd64Carry
+ return true
+ case OpAdd8:
+ v.Op = OpPPC64ADD
+ return true
+ case OpAddPtr:
+ v.Op = OpPPC64ADD
+ return true
+ case OpAddr:
+ return rewriteValuePPC64_OpAddr(v)
+ case OpAnd16:
+ v.Op = OpPPC64AND
+ return true
+ case OpAnd32:
+ v.Op = OpPPC64AND
+ return true
+ case OpAnd64:
+ v.Op = OpPPC64AND
+ return true
+ case OpAnd8:
+ v.Op = OpPPC64AND
+ return true
+ case OpAndB:
+ v.Op = OpPPC64AND
+ return true
+ case OpAtomicAdd32:
+ v.Op = OpPPC64LoweredAtomicAdd32
+ return true
+ case OpAtomicAdd64:
+ v.Op = OpPPC64LoweredAtomicAdd64
+ return true
+ case OpAtomicAnd32:
+ v.Op = OpPPC64LoweredAtomicAnd32
+ return true
+ case OpAtomicAnd8:
+ v.Op = OpPPC64LoweredAtomicAnd8
+ return true
+ case OpAtomicCompareAndSwap32:
+ return rewriteValuePPC64_OpAtomicCompareAndSwap32(v)
+ case OpAtomicCompareAndSwap64:
+ return rewriteValuePPC64_OpAtomicCompareAndSwap64(v)
+ case OpAtomicCompareAndSwapRel32:
+ return rewriteValuePPC64_OpAtomicCompareAndSwapRel32(v)
+ case OpAtomicExchange32:
+ v.Op = OpPPC64LoweredAtomicExchange32
+ return true
+ case OpAtomicExchange64:
+ v.Op = OpPPC64LoweredAtomicExchange64
+ return true
+ case OpAtomicLoad32:
+ return rewriteValuePPC64_OpAtomicLoad32(v)
+ case OpAtomicLoad64:
+ return rewriteValuePPC64_OpAtomicLoad64(v)
+ case OpAtomicLoad8:
+ return rewriteValuePPC64_OpAtomicLoad8(v)
+ case OpAtomicLoadAcq32:
+ return rewriteValuePPC64_OpAtomicLoadAcq32(v)
+ case OpAtomicLoadAcq64:
+ return rewriteValuePPC64_OpAtomicLoadAcq64(v)
+ case OpAtomicLoadPtr:
+ return rewriteValuePPC64_OpAtomicLoadPtr(v)
+ case OpAtomicOr32:
+ v.Op = OpPPC64LoweredAtomicOr32
+ return true
+ case OpAtomicOr8:
+ v.Op = OpPPC64LoweredAtomicOr8
+ return true
+ case OpAtomicStore32:
+ return rewriteValuePPC64_OpAtomicStore32(v)
+ case OpAtomicStore64:
+ return rewriteValuePPC64_OpAtomicStore64(v)
+ case OpAtomicStore8:
+ return rewriteValuePPC64_OpAtomicStore8(v)
+ case OpAtomicStoreRel32:
+ return rewriteValuePPC64_OpAtomicStoreRel32(v)
+ case OpAtomicStoreRel64:
+ return rewriteValuePPC64_OpAtomicStoreRel64(v)
+ case OpAvg64u:
+ return rewriteValuePPC64_OpAvg64u(v)
+ case OpBitLen32:
+ return rewriteValuePPC64_OpBitLen32(v)
+ case OpBitLen64:
+ return rewriteValuePPC64_OpBitLen64(v)
+ case OpCeil:
+ v.Op = OpPPC64FCEIL
+ return true
+ case OpClosureCall:
+ v.Op = OpPPC64CALLclosure
+ return true
+ case OpCom16:
+ return rewriteValuePPC64_OpCom16(v)
+ case OpCom32:
+ return rewriteValuePPC64_OpCom32(v)
+ case OpCom64:
+ return rewriteValuePPC64_OpCom64(v)
+ case OpCom8:
+ return rewriteValuePPC64_OpCom8(v)
+ case OpCondSelect:
+ return rewriteValuePPC64_OpCondSelect(v)
+ case OpConst16:
+ return rewriteValuePPC64_OpConst16(v)
+ case OpConst32:
+ return rewriteValuePPC64_OpConst32(v)
+ case OpConst32F:
+ v.Op = OpPPC64FMOVSconst
+ return true
+ case OpConst64:
+ return rewriteValuePPC64_OpConst64(v)
+ case OpConst64F:
+ v.Op = OpPPC64FMOVDconst
+ return true
+ case OpConst8:
+ return rewriteValuePPC64_OpConst8(v)
+ case OpConstBool:
+ return rewriteValuePPC64_OpConstBool(v)
+ case OpConstNil:
+ return rewriteValuePPC64_OpConstNil(v)
+ case OpCopysign:
+ return rewriteValuePPC64_OpCopysign(v)
+ case OpCtz16:
+ return rewriteValuePPC64_OpCtz16(v)
+ case OpCtz32:
+ return rewriteValuePPC64_OpCtz32(v)
+ case OpCtz32NonZero:
+ v.Op = OpCtz32
+ return true
+ case OpCtz64:
+ return rewriteValuePPC64_OpCtz64(v)
+ case OpCtz64NonZero:
+ v.Op = OpCtz64
+ return true
+ case OpCtz8:
+ return rewriteValuePPC64_OpCtz8(v)
+ case OpCvt32Fto32:
+ return rewriteValuePPC64_OpCvt32Fto32(v)
+ case OpCvt32Fto64:
+ return rewriteValuePPC64_OpCvt32Fto64(v)
+ case OpCvt32Fto64F:
+ v.Op = OpCopy
+ return true
+ case OpCvt32to32F:
+ return rewriteValuePPC64_OpCvt32to32F(v)
+ case OpCvt32to64F:
+ return rewriteValuePPC64_OpCvt32to64F(v)
+ case OpCvt64Fto32:
+ return rewriteValuePPC64_OpCvt64Fto32(v)
+ case OpCvt64Fto32F:
+ v.Op = OpPPC64FRSP
+ return true
+ case OpCvt64Fto64:
+ return rewriteValuePPC64_OpCvt64Fto64(v)
+ case OpCvt64to32F:
+ return rewriteValuePPC64_OpCvt64to32F(v)
+ case OpCvt64to64F:
+ return rewriteValuePPC64_OpCvt64to64F(v)
+ case OpCvtBoolToUint8:
+ v.Op = OpCopy
+ return true
+ case OpDiv16:
+ return rewriteValuePPC64_OpDiv16(v)
+ case OpDiv16u:
+ return rewriteValuePPC64_OpDiv16u(v)
+ case OpDiv32:
+ return rewriteValuePPC64_OpDiv32(v)
+ case OpDiv32F:
+ v.Op = OpPPC64FDIVS
+ return true
+ case OpDiv32u:
+ v.Op = OpPPC64DIVWU
+ return true
+ case OpDiv64:
+ return rewriteValuePPC64_OpDiv64(v)
+ case OpDiv64F:
+ v.Op = OpPPC64FDIV
+ return true
+ case OpDiv64u:
+ v.Op = OpPPC64DIVDU
+ return true
+ case OpDiv8:
+ return rewriteValuePPC64_OpDiv8(v)
+ case OpDiv8u:
+ return rewriteValuePPC64_OpDiv8u(v)
+ case OpEq16:
+ return rewriteValuePPC64_OpEq16(v)
+ case OpEq32:
+ return rewriteValuePPC64_OpEq32(v)
+ case OpEq32F:
+ return rewriteValuePPC64_OpEq32F(v)
+ case OpEq64:
+ return rewriteValuePPC64_OpEq64(v)
+ case OpEq64F:
+ return rewriteValuePPC64_OpEq64F(v)
+ case OpEq8:
+ return rewriteValuePPC64_OpEq8(v)
+ case OpEqB:
+ return rewriteValuePPC64_OpEqB(v)
+ case OpEqPtr:
+ return rewriteValuePPC64_OpEqPtr(v)
+ case OpFMA:
+ v.Op = OpPPC64FMADD
+ return true
+ case OpFloor:
+ v.Op = OpPPC64FFLOOR
+ return true
+ case OpGetCallerPC:
+ v.Op = OpPPC64LoweredGetCallerPC
+ return true
+ case OpGetCallerSP:
+ v.Op = OpPPC64LoweredGetCallerSP
+ return true
+ case OpGetClosurePtr:
+ v.Op = OpPPC64LoweredGetClosurePtr
+ return true
+ case OpHmul32:
+ v.Op = OpPPC64MULHW
+ return true
+ case OpHmul32u:
+ v.Op = OpPPC64MULHWU
+ return true
+ case OpHmul64:
+ v.Op = OpPPC64MULHD
+ return true
+ case OpHmul64u:
+ v.Op = OpPPC64MULHDU
+ return true
+ case OpInterCall:
+ v.Op = OpPPC64CALLinter
+ return true
+ case OpIsInBounds:
+ return rewriteValuePPC64_OpIsInBounds(v)
+ case OpIsNonNil:
+ return rewriteValuePPC64_OpIsNonNil(v)
+ case OpIsSliceInBounds:
+ return rewriteValuePPC64_OpIsSliceInBounds(v)
+ case OpLeq16:
+ return rewriteValuePPC64_OpLeq16(v)
+ case OpLeq16U:
+ return rewriteValuePPC64_OpLeq16U(v)
+ case OpLeq32:
+ return rewriteValuePPC64_OpLeq32(v)
+ case OpLeq32F:
+ return rewriteValuePPC64_OpLeq32F(v)
+ case OpLeq32U:
+ return rewriteValuePPC64_OpLeq32U(v)
+ case OpLeq64:
+ return rewriteValuePPC64_OpLeq64(v)
+ case OpLeq64F:
+ return rewriteValuePPC64_OpLeq64F(v)
+ case OpLeq64U:
+ return rewriteValuePPC64_OpLeq64U(v)
+ case OpLeq8:
+ return rewriteValuePPC64_OpLeq8(v)
+ case OpLeq8U:
+ return rewriteValuePPC64_OpLeq8U(v)
+ case OpLess16:
+ return rewriteValuePPC64_OpLess16(v)
+ case OpLess16U:
+ return rewriteValuePPC64_OpLess16U(v)
+ case OpLess32:
+ return rewriteValuePPC64_OpLess32(v)
+ case OpLess32F:
+ return rewriteValuePPC64_OpLess32F(v)
+ case OpLess32U:
+ return rewriteValuePPC64_OpLess32U(v)
+ case OpLess64:
+ return rewriteValuePPC64_OpLess64(v)
+ case OpLess64F:
+ return rewriteValuePPC64_OpLess64F(v)
+ case OpLess64U:
+ return rewriteValuePPC64_OpLess64U(v)
+ case OpLess8:
+ return rewriteValuePPC64_OpLess8(v)
+ case OpLess8U:
+ return rewriteValuePPC64_OpLess8U(v)
+ case OpLoad:
+ return rewriteValuePPC64_OpLoad(v)
+ case OpLocalAddr:
+ return rewriteValuePPC64_OpLocalAddr(v)
+ case OpLsh16x16:
+ return rewriteValuePPC64_OpLsh16x16(v)
+ case OpLsh16x32:
+ return rewriteValuePPC64_OpLsh16x32(v)
+ case OpLsh16x64:
+ return rewriteValuePPC64_OpLsh16x64(v)
+ case OpLsh16x8:
+ return rewriteValuePPC64_OpLsh16x8(v)
+ case OpLsh32x16:
+ return rewriteValuePPC64_OpLsh32x16(v)
+ case OpLsh32x32:
+ return rewriteValuePPC64_OpLsh32x32(v)
+ case OpLsh32x64:
+ return rewriteValuePPC64_OpLsh32x64(v)
+ case OpLsh32x8:
+ return rewriteValuePPC64_OpLsh32x8(v)
+ case OpLsh64x16:
+ return rewriteValuePPC64_OpLsh64x16(v)
+ case OpLsh64x32:
+ return rewriteValuePPC64_OpLsh64x32(v)
+ case OpLsh64x64:
+ return rewriteValuePPC64_OpLsh64x64(v)
+ case OpLsh64x8:
+ return rewriteValuePPC64_OpLsh64x8(v)
+ case OpLsh8x16:
+ return rewriteValuePPC64_OpLsh8x16(v)
+ case OpLsh8x32:
+ return rewriteValuePPC64_OpLsh8x32(v)
+ case OpLsh8x64:
+ return rewriteValuePPC64_OpLsh8x64(v)
+ case OpLsh8x8:
+ return rewriteValuePPC64_OpLsh8x8(v)
+ case OpMod16:
+ return rewriteValuePPC64_OpMod16(v)
+ case OpMod16u:
+ return rewriteValuePPC64_OpMod16u(v)
+ case OpMod32:
+ return rewriteValuePPC64_OpMod32(v)
+ case OpMod32u:
+ return rewriteValuePPC64_OpMod32u(v)
+ case OpMod64:
+ return rewriteValuePPC64_OpMod64(v)
+ case OpMod64u:
+ return rewriteValuePPC64_OpMod64u(v)
+ case OpMod8:
+ return rewriteValuePPC64_OpMod8(v)
+ case OpMod8u:
+ return rewriteValuePPC64_OpMod8u(v)
+ case OpMove:
+ return rewriteValuePPC64_OpMove(v)
+ case OpMul16:
+ v.Op = OpPPC64MULLW
+ return true
+ case OpMul32:
+ v.Op = OpPPC64MULLW
+ return true
+ case OpMul32F:
+ v.Op = OpPPC64FMULS
+ return true
+ case OpMul64:
+ v.Op = OpPPC64MULLD
+ return true
+ case OpMul64F:
+ v.Op = OpPPC64FMUL
+ return true
+ case OpMul64uhilo:
+ v.Op = OpPPC64LoweredMuluhilo
+ return true
+ case OpMul8:
+ v.Op = OpPPC64MULLW
+ return true
+ case OpNeg16:
+ v.Op = OpPPC64NEG
+ return true
+ case OpNeg32:
+ v.Op = OpPPC64NEG
+ return true
+ case OpNeg32F:
+ v.Op = OpPPC64FNEG
+ return true
+ case OpNeg64:
+ v.Op = OpPPC64NEG
+ return true
+ case OpNeg64F:
+ v.Op = OpPPC64FNEG
+ return true
+ case OpNeg8:
+ v.Op = OpPPC64NEG
+ return true
+ case OpNeq16:
+ return rewriteValuePPC64_OpNeq16(v)
+ case OpNeq32:
+ return rewriteValuePPC64_OpNeq32(v)
+ case OpNeq32F:
+ return rewriteValuePPC64_OpNeq32F(v)
+ case OpNeq64:
+ return rewriteValuePPC64_OpNeq64(v)
+ case OpNeq64F:
+ return rewriteValuePPC64_OpNeq64F(v)
+ case OpNeq8:
+ return rewriteValuePPC64_OpNeq8(v)
+ case OpNeqB:
+ v.Op = OpPPC64XOR
+ return true
+ case OpNeqPtr:
+ return rewriteValuePPC64_OpNeqPtr(v)
+ case OpNilCheck:
+ v.Op = OpPPC64LoweredNilCheck
+ return true
+ case OpNot:
+ return rewriteValuePPC64_OpNot(v)
+ case OpOffPtr:
+ return rewriteValuePPC64_OpOffPtr(v)
+ case OpOr16:
+ v.Op = OpPPC64OR
+ return true
+ case OpOr32:
+ v.Op = OpPPC64OR
+ return true
+ case OpOr64:
+ v.Op = OpPPC64OR
+ return true
+ case OpOr8:
+ v.Op = OpPPC64OR
+ return true
+ case OpOrB:
+ v.Op = OpPPC64OR
+ return true
+ case OpPPC64ADD:
+ return rewriteValuePPC64_OpPPC64ADD(v)
+ case OpPPC64ADDconst:
+ return rewriteValuePPC64_OpPPC64ADDconst(v)
+ case OpPPC64AND:
+ return rewriteValuePPC64_OpPPC64AND(v)
+ case OpPPC64ANDN:
+ return rewriteValuePPC64_OpPPC64ANDN(v)
+ case OpPPC64ANDconst:
+ return rewriteValuePPC64_OpPPC64ANDconst(v)
+ case OpPPC64CLRLSLDI:
+ return rewriteValuePPC64_OpPPC64CLRLSLDI(v)
+ case OpPPC64CMP:
+ return rewriteValuePPC64_OpPPC64CMP(v)
+ case OpPPC64CMPU:
+ return rewriteValuePPC64_OpPPC64CMPU(v)
+ case OpPPC64CMPUconst:
+ return rewriteValuePPC64_OpPPC64CMPUconst(v)
+ case OpPPC64CMPW:
+ return rewriteValuePPC64_OpPPC64CMPW(v)
+ case OpPPC64CMPWU:
+ return rewriteValuePPC64_OpPPC64CMPWU(v)
+ case OpPPC64CMPWUconst:
+ return rewriteValuePPC64_OpPPC64CMPWUconst(v)
+ case OpPPC64CMPWconst:
+ return rewriteValuePPC64_OpPPC64CMPWconst(v)
+ case OpPPC64CMPconst:
+ return rewriteValuePPC64_OpPPC64CMPconst(v)
+ case OpPPC64Equal:
+ return rewriteValuePPC64_OpPPC64Equal(v)
+ case OpPPC64FABS:
+ return rewriteValuePPC64_OpPPC64FABS(v)
+ case OpPPC64FADD:
+ return rewriteValuePPC64_OpPPC64FADD(v)
+ case OpPPC64FADDS:
+ return rewriteValuePPC64_OpPPC64FADDS(v)
+ case OpPPC64FCEIL:
+ return rewriteValuePPC64_OpPPC64FCEIL(v)
+ case OpPPC64FFLOOR:
+ return rewriteValuePPC64_OpPPC64FFLOOR(v)
+ case OpPPC64FGreaterEqual:
+ return rewriteValuePPC64_OpPPC64FGreaterEqual(v)
+ case OpPPC64FGreaterThan:
+ return rewriteValuePPC64_OpPPC64FGreaterThan(v)
+ case OpPPC64FLessEqual:
+ return rewriteValuePPC64_OpPPC64FLessEqual(v)
+ case OpPPC64FLessThan:
+ return rewriteValuePPC64_OpPPC64FLessThan(v)
+ case OpPPC64FMOVDload:
+ return rewriteValuePPC64_OpPPC64FMOVDload(v)
+ case OpPPC64FMOVDstore:
+ return rewriteValuePPC64_OpPPC64FMOVDstore(v)
+ case OpPPC64FMOVSload:
+ return rewriteValuePPC64_OpPPC64FMOVSload(v)
+ case OpPPC64FMOVSstore:
+ return rewriteValuePPC64_OpPPC64FMOVSstore(v)
+ case OpPPC64FNEG:
+ return rewriteValuePPC64_OpPPC64FNEG(v)
+ case OpPPC64FSQRT:
+ return rewriteValuePPC64_OpPPC64FSQRT(v)
+ case OpPPC64FSUB:
+ return rewriteValuePPC64_OpPPC64FSUB(v)
+ case OpPPC64FSUBS:
+ return rewriteValuePPC64_OpPPC64FSUBS(v)
+ case OpPPC64FTRUNC:
+ return rewriteValuePPC64_OpPPC64FTRUNC(v)
+ case OpPPC64GreaterEqual:
+ return rewriteValuePPC64_OpPPC64GreaterEqual(v)
+ case OpPPC64GreaterThan:
+ return rewriteValuePPC64_OpPPC64GreaterThan(v)
+ case OpPPC64ISEL:
+ return rewriteValuePPC64_OpPPC64ISEL(v)
+ case OpPPC64ISELB:
+ return rewriteValuePPC64_OpPPC64ISELB(v)
+ case OpPPC64LessEqual:
+ return rewriteValuePPC64_OpPPC64LessEqual(v)
+ case OpPPC64LessThan:
+ return rewriteValuePPC64_OpPPC64LessThan(v)
+ case OpPPC64MFVSRD:
+ return rewriteValuePPC64_OpPPC64MFVSRD(v)
+ case OpPPC64MOVBZload:
+ return rewriteValuePPC64_OpPPC64MOVBZload(v)
+ case OpPPC64MOVBZloadidx:
+ return rewriteValuePPC64_OpPPC64MOVBZloadidx(v)
+ case OpPPC64MOVBZreg:
+ return rewriteValuePPC64_OpPPC64MOVBZreg(v)
+ case OpPPC64MOVBreg:
+ return rewriteValuePPC64_OpPPC64MOVBreg(v)
+ case OpPPC64MOVBstore:
+ return rewriteValuePPC64_OpPPC64MOVBstore(v)
+ case OpPPC64MOVBstoreidx:
+ return rewriteValuePPC64_OpPPC64MOVBstoreidx(v)
+ case OpPPC64MOVBstorezero:
+ return rewriteValuePPC64_OpPPC64MOVBstorezero(v)
+ case OpPPC64MOVDload:
+ return rewriteValuePPC64_OpPPC64MOVDload(v)
+ case OpPPC64MOVDloadidx:
+ return rewriteValuePPC64_OpPPC64MOVDloadidx(v)
+ case OpPPC64MOVDstore:
+ return rewriteValuePPC64_OpPPC64MOVDstore(v)
+ case OpPPC64MOVDstoreidx:
+ return rewriteValuePPC64_OpPPC64MOVDstoreidx(v)
+ case OpPPC64MOVDstorezero:
+ return rewriteValuePPC64_OpPPC64MOVDstorezero(v)
+ case OpPPC64MOVHBRstore:
+ return rewriteValuePPC64_OpPPC64MOVHBRstore(v)
+ case OpPPC64MOVHZload:
+ return rewriteValuePPC64_OpPPC64MOVHZload(v)
+ case OpPPC64MOVHZloadidx:
+ return rewriteValuePPC64_OpPPC64MOVHZloadidx(v)
+ case OpPPC64MOVHZreg:
+ return rewriteValuePPC64_OpPPC64MOVHZreg(v)
+ case OpPPC64MOVHload:
+ return rewriteValuePPC64_OpPPC64MOVHload(v)
+ case OpPPC64MOVHloadidx:
+ return rewriteValuePPC64_OpPPC64MOVHloadidx(v)
+ case OpPPC64MOVHreg:
+ return rewriteValuePPC64_OpPPC64MOVHreg(v)
+ case OpPPC64MOVHstore:
+ return rewriteValuePPC64_OpPPC64MOVHstore(v)
+ case OpPPC64MOVHstoreidx:
+ return rewriteValuePPC64_OpPPC64MOVHstoreidx(v)
+ case OpPPC64MOVHstorezero:
+ return rewriteValuePPC64_OpPPC64MOVHstorezero(v)
+ case OpPPC64MOVWBRstore:
+ return rewriteValuePPC64_OpPPC64MOVWBRstore(v)
+ case OpPPC64MOVWZload:
+ return rewriteValuePPC64_OpPPC64MOVWZload(v)
+ case OpPPC64MOVWZloadidx:
+ return rewriteValuePPC64_OpPPC64MOVWZloadidx(v)
+ case OpPPC64MOVWZreg:
+ return rewriteValuePPC64_OpPPC64MOVWZreg(v)
+ case OpPPC64MOVWload:
+ return rewriteValuePPC64_OpPPC64MOVWload(v)
+ case OpPPC64MOVWloadidx:
+ return rewriteValuePPC64_OpPPC64MOVWloadidx(v)
+ case OpPPC64MOVWreg:
+ return rewriteValuePPC64_OpPPC64MOVWreg(v)
+ case OpPPC64MOVWstore:
+ return rewriteValuePPC64_OpPPC64MOVWstore(v)
+ case OpPPC64MOVWstoreidx:
+ return rewriteValuePPC64_OpPPC64MOVWstoreidx(v)
+ case OpPPC64MOVWstorezero:
+ return rewriteValuePPC64_OpPPC64MOVWstorezero(v)
+ case OpPPC64MTVSRD:
+ return rewriteValuePPC64_OpPPC64MTVSRD(v)
+ case OpPPC64MULLD:
+ return rewriteValuePPC64_OpPPC64MULLD(v)
+ case OpPPC64MULLW:
+ return rewriteValuePPC64_OpPPC64MULLW(v)
+ case OpPPC64NEG:
+ return rewriteValuePPC64_OpPPC64NEG(v)
+ case OpPPC64NOR:
+ return rewriteValuePPC64_OpPPC64NOR(v)
+ case OpPPC64NotEqual:
+ return rewriteValuePPC64_OpPPC64NotEqual(v)
+ case OpPPC64OR:
+ return rewriteValuePPC64_OpPPC64OR(v)
+ case OpPPC64ORN:
+ return rewriteValuePPC64_OpPPC64ORN(v)
+ case OpPPC64ORconst:
+ return rewriteValuePPC64_OpPPC64ORconst(v)
+ case OpPPC64ROTL:
+ return rewriteValuePPC64_OpPPC64ROTL(v)
+ case OpPPC64ROTLW:
+ return rewriteValuePPC64_OpPPC64ROTLW(v)
+ case OpPPC64ROTLWconst:
+ return rewriteValuePPC64_OpPPC64ROTLWconst(v)
+ case OpPPC64SLD:
+ return rewriteValuePPC64_OpPPC64SLD(v)
+ case OpPPC64SLDconst:
+ return rewriteValuePPC64_OpPPC64SLDconst(v)
+ case OpPPC64SLW:
+ return rewriteValuePPC64_OpPPC64SLW(v)
+ case OpPPC64SLWconst:
+ return rewriteValuePPC64_OpPPC64SLWconst(v)
+ case OpPPC64SRAD:
+ return rewriteValuePPC64_OpPPC64SRAD(v)
+ case OpPPC64SRAW:
+ return rewriteValuePPC64_OpPPC64SRAW(v)
+ case OpPPC64SRD:
+ return rewriteValuePPC64_OpPPC64SRD(v)
+ case OpPPC64SRW:
+ return rewriteValuePPC64_OpPPC64SRW(v)
+ case OpPPC64SRWconst:
+ return rewriteValuePPC64_OpPPC64SRWconst(v)
+ case OpPPC64SUB:
+ return rewriteValuePPC64_OpPPC64SUB(v)
+ case OpPPC64SUBFCconst:
+ return rewriteValuePPC64_OpPPC64SUBFCconst(v)
+ case OpPPC64XOR:
+ return rewriteValuePPC64_OpPPC64XOR(v)
+ case OpPPC64XORconst:
+ return rewriteValuePPC64_OpPPC64XORconst(v)
+ case OpPanicBounds:
+ return rewriteValuePPC64_OpPanicBounds(v)
+ case OpPopCount16:
+ return rewriteValuePPC64_OpPopCount16(v)
+ case OpPopCount32:
+ return rewriteValuePPC64_OpPopCount32(v)
+ case OpPopCount64:
+ v.Op = OpPPC64POPCNTD
+ return true
+ case OpPopCount8:
+ return rewriteValuePPC64_OpPopCount8(v)
+ case OpPrefetchCache:
+ return rewriteValuePPC64_OpPrefetchCache(v)
+ case OpPrefetchCacheStreamed:
+ return rewriteValuePPC64_OpPrefetchCacheStreamed(v)
+ case OpRotateLeft16:
+ return rewriteValuePPC64_OpRotateLeft16(v)
+ case OpRotateLeft32:
+ return rewriteValuePPC64_OpRotateLeft32(v)
+ case OpRotateLeft64:
+ return rewriteValuePPC64_OpRotateLeft64(v)
+ case OpRotateLeft8:
+ return rewriteValuePPC64_OpRotateLeft8(v)
+ case OpRound:
+ v.Op = OpPPC64FROUND
+ return true
+ case OpRound32F:
+ v.Op = OpPPC64LoweredRound32F
+ return true
+ case OpRound64F:
+ v.Op = OpPPC64LoweredRound64F
+ return true
+ case OpRsh16Ux16:
+ return rewriteValuePPC64_OpRsh16Ux16(v)
+ case OpRsh16Ux32:
+ return rewriteValuePPC64_OpRsh16Ux32(v)
+ case OpRsh16Ux64:
+ return rewriteValuePPC64_OpRsh16Ux64(v)
+ case OpRsh16Ux8:
+ return rewriteValuePPC64_OpRsh16Ux8(v)
+ case OpRsh16x16:
+ return rewriteValuePPC64_OpRsh16x16(v)
+ case OpRsh16x32:
+ return rewriteValuePPC64_OpRsh16x32(v)
+ case OpRsh16x64:
+ return rewriteValuePPC64_OpRsh16x64(v)
+ case OpRsh16x8:
+ return rewriteValuePPC64_OpRsh16x8(v)
+ case OpRsh32Ux16:
+ return rewriteValuePPC64_OpRsh32Ux16(v)
+ case OpRsh32Ux32:
+ return rewriteValuePPC64_OpRsh32Ux32(v)
+ case OpRsh32Ux64:
+ return rewriteValuePPC64_OpRsh32Ux64(v)
+ case OpRsh32Ux8:
+ return rewriteValuePPC64_OpRsh32Ux8(v)
+ case OpRsh32x16:
+ return rewriteValuePPC64_OpRsh32x16(v)
+ case OpRsh32x32:
+ return rewriteValuePPC64_OpRsh32x32(v)
+ case OpRsh32x64:
+ return rewriteValuePPC64_OpRsh32x64(v)
+ case OpRsh32x8:
+ return rewriteValuePPC64_OpRsh32x8(v)
+ case OpRsh64Ux16:
+ return rewriteValuePPC64_OpRsh64Ux16(v)
+ case OpRsh64Ux32:
+ return rewriteValuePPC64_OpRsh64Ux32(v)
+ case OpRsh64Ux64:
+ return rewriteValuePPC64_OpRsh64Ux64(v)
+ case OpRsh64Ux8:
+ return rewriteValuePPC64_OpRsh64Ux8(v)
+ case OpRsh64x16:
+ return rewriteValuePPC64_OpRsh64x16(v)
+ case OpRsh64x32:
+ return rewriteValuePPC64_OpRsh64x32(v)
+ case OpRsh64x64:
+ return rewriteValuePPC64_OpRsh64x64(v)
+ case OpRsh64x8:
+ return rewriteValuePPC64_OpRsh64x8(v)
+ case OpRsh8Ux16:
+ return rewriteValuePPC64_OpRsh8Ux16(v)
+ case OpRsh8Ux32:
+ return rewriteValuePPC64_OpRsh8Ux32(v)
+ case OpRsh8Ux64:
+ return rewriteValuePPC64_OpRsh8Ux64(v)
+ case OpRsh8Ux8:
+ return rewriteValuePPC64_OpRsh8Ux8(v)
+ case OpRsh8x16:
+ return rewriteValuePPC64_OpRsh8x16(v)
+ case OpRsh8x32:
+ return rewriteValuePPC64_OpRsh8x32(v)
+ case OpRsh8x64:
+ return rewriteValuePPC64_OpRsh8x64(v)
+ case OpRsh8x8:
+ return rewriteValuePPC64_OpRsh8x8(v)
+ case OpSelectN:
+ return rewriteValuePPC64_OpSelectN(v)
+ case OpSignExt16to32:
+ v.Op = OpPPC64MOVHreg
+ return true
+ case OpSignExt16to64:
+ v.Op = OpPPC64MOVHreg
+ return true
+ case OpSignExt32to64:
+ v.Op = OpPPC64MOVWreg
+ return true
+ case OpSignExt8to16:
+ v.Op = OpPPC64MOVBreg
+ return true
+ case OpSignExt8to32:
+ v.Op = OpPPC64MOVBreg
+ return true
+ case OpSignExt8to64:
+ v.Op = OpPPC64MOVBreg
+ return true
+ case OpSlicemask:
+ return rewriteValuePPC64_OpSlicemask(v)
+ case OpSqrt:
+ v.Op = OpPPC64FSQRT
+ return true
+ case OpSqrt32:
+ v.Op = OpPPC64FSQRTS
+ return true
+ case OpStaticCall:
+ v.Op = OpPPC64CALLstatic
+ return true
+ case OpStore:
+ return rewriteValuePPC64_OpStore(v)
+ case OpSub16:
+ v.Op = OpPPC64SUB
+ return true
+ case OpSub32:
+ v.Op = OpPPC64SUB
+ return true
+ case OpSub32F:
+ v.Op = OpPPC64FSUBS
+ return true
+ case OpSub64:
+ v.Op = OpPPC64SUB
+ return true
+ case OpSub64F:
+ v.Op = OpPPC64FSUB
+ return true
+ case OpSub8:
+ v.Op = OpPPC64SUB
+ return true
+ case OpSubPtr:
+ v.Op = OpPPC64SUB
+ return true
+ case OpTailCall:
+ v.Op = OpPPC64CALLtail
+ return true
+ case OpTrunc:
+ v.Op = OpPPC64FTRUNC
+ return true
+ case OpTrunc16to8:
+ return rewriteValuePPC64_OpTrunc16to8(v)
+ case OpTrunc32to16:
+ return rewriteValuePPC64_OpTrunc32to16(v)
+ case OpTrunc32to8:
+ return rewriteValuePPC64_OpTrunc32to8(v)
+ case OpTrunc64to16:
+ return rewriteValuePPC64_OpTrunc64to16(v)
+ case OpTrunc64to32:
+ return rewriteValuePPC64_OpTrunc64to32(v)
+ case OpTrunc64to8:
+ return rewriteValuePPC64_OpTrunc64to8(v)
+ case OpWB:
+ v.Op = OpPPC64LoweredWB
+ return true
+ case OpXor16:
+ v.Op = OpPPC64XOR
+ return true
+ case OpXor32:
+ v.Op = OpPPC64XOR
+ return true
+ case OpXor64:
+ v.Op = OpPPC64XOR
+ return true
+ case OpXor8:
+ v.Op = OpPPC64XOR
+ return true
+ case OpZero:
+ return rewriteValuePPC64_OpZero(v)
+ case OpZeroExt16to32:
+ v.Op = OpPPC64MOVHZreg
+ return true
+ case OpZeroExt16to64:
+ v.Op = OpPPC64MOVHZreg
+ return true
+ case OpZeroExt32to64:
+ v.Op = OpPPC64MOVWZreg
+ return true
+ case OpZeroExt8to16:
+ v.Op = OpPPC64MOVBZreg
+ return true
+ case OpZeroExt8to32:
+ v.Op = OpPPC64MOVBZreg
+ return true
+ case OpZeroExt8to64:
+ v.Op = OpPPC64MOVBZreg
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpAddr(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Addr {sym} base)
+ // result: (MOVDaddr {sym} [0] base)
+ for {
+ sym := auxToSym(v.Aux)
+ base := v_0
+ v.reset(OpPPC64MOVDaddr)
+ v.AuxInt = int32ToAuxInt(0)
+ v.Aux = symToAux(sym)
+ v.AddArg(base)
+ return true
+ }
+}
+func rewriteValuePPC64_OpAtomicCompareAndSwap32(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicCompareAndSwap32 ptr old new_ mem)
+ // result: (LoweredAtomicCas32 [1] ptr old new_ mem)
+ for {
+ ptr := v_0
+ old := v_1
+ new_ := v_2
+ mem := v_3
+ v.reset(OpPPC64LoweredAtomicCas32)
+ v.AuxInt = int64ToAuxInt(1)
+ v.AddArg4(ptr, old, new_, mem)
+ return true
+ }
+}
+func rewriteValuePPC64_OpAtomicCompareAndSwap64(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicCompareAndSwap64 ptr old new_ mem)
+ // result: (LoweredAtomicCas64 [1] ptr old new_ mem)
+ for {
+ ptr := v_0
+ old := v_1
+ new_ := v_2
+ mem := v_3
+ v.reset(OpPPC64LoweredAtomicCas64)
+ v.AuxInt = int64ToAuxInt(1)
+ v.AddArg4(ptr, old, new_, mem)
+ return true
+ }
+}
+func rewriteValuePPC64_OpAtomicCompareAndSwapRel32(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicCompareAndSwapRel32 ptr old new_ mem)
+ // result: (LoweredAtomicCas32 [0] ptr old new_ mem)
+ for {
+ ptr := v_0
+ old := v_1
+ new_ := v_2
+ mem := v_3
+ v.reset(OpPPC64LoweredAtomicCas32)
+ v.AuxInt = int64ToAuxInt(0)
+ v.AddArg4(ptr, old, new_, mem)
+ return true
+ }
+}
+func rewriteValuePPC64_OpAtomicLoad32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicLoad32 ptr mem)
+ // result: (LoweredAtomicLoad32 [1] ptr mem)
+ for {
+ ptr := v_0
+ mem := v_1
+ v.reset(OpPPC64LoweredAtomicLoad32)
+ v.AuxInt = int64ToAuxInt(1)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+}
+func rewriteValuePPC64_OpAtomicLoad64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicLoad64 ptr mem)
+ // result: (LoweredAtomicLoad64 [1] ptr mem)
+ for {
+ ptr := v_0
+ mem := v_1
+ v.reset(OpPPC64LoweredAtomicLoad64)
+ v.AuxInt = int64ToAuxInt(1)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+}
+func rewriteValuePPC64_OpAtomicLoad8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicLoad8 ptr mem)
+ // result: (LoweredAtomicLoad8 [1] ptr mem)
+ for {
+ ptr := v_0
+ mem := v_1
+ v.reset(OpPPC64LoweredAtomicLoad8)
+ v.AuxInt = int64ToAuxInt(1)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+}
+func rewriteValuePPC64_OpAtomicLoadAcq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicLoadAcq32 ptr mem)
+ // result: (LoweredAtomicLoad32 [0] ptr mem)
+ for {
+ ptr := v_0
+ mem := v_1
+ v.reset(OpPPC64LoweredAtomicLoad32)
+ v.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+}
+func rewriteValuePPC64_OpAtomicLoadAcq64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicLoadAcq64 ptr mem)
+ // result: (LoweredAtomicLoad64 [0] ptr mem)
+ for {
+ ptr := v_0
+ mem := v_1
+ v.reset(OpPPC64LoweredAtomicLoad64)
+ v.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+}
+func rewriteValuePPC64_OpAtomicLoadPtr(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicLoadPtr ptr mem)
+ // result: (LoweredAtomicLoadPtr [1] ptr mem)
+ for {
+ ptr := v_0
+ mem := v_1
+ v.reset(OpPPC64LoweredAtomicLoadPtr)
+ v.AuxInt = int64ToAuxInt(1)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+}
+func rewriteValuePPC64_OpAtomicStore32(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicStore32 ptr val mem)
+ // result: (LoweredAtomicStore32 [1] ptr val mem)
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpPPC64LoweredAtomicStore32)
+ v.AuxInt = int64ToAuxInt(1)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+}
+func rewriteValuePPC64_OpAtomicStore64(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicStore64 ptr val mem)
+ // result: (LoweredAtomicStore64 [1] ptr val mem)
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpPPC64LoweredAtomicStore64)
+ v.AuxInt = int64ToAuxInt(1)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+}
+func rewriteValuePPC64_OpAtomicStore8(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicStore8 ptr val mem)
+ // result: (LoweredAtomicStore8 [1] ptr val mem)
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpPPC64LoweredAtomicStore8)
+ v.AuxInt = int64ToAuxInt(1)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+}
+func rewriteValuePPC64_OpAtomicStoreRel32(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicStoreRel32 ptr val mem)
+ // result: (LoweredAtomicStore32 [0] ptr val mem)
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpPPC64LoweredAtomicStore32)
+ v.AuxInt = int64ToAuxInt(0)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+}
+func rewriteValuePPC64_OpAtomicStoreRel64(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicStoreRel64 ptr val mem)
+ // result: (LoweredAtomicStore64 [0] ptr val mem)
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpPPC64LoweredAtomicStore64)
+ v.AuxInt = int64ToAuxInt(0)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+}
+func rewriteValuePPC64_OpAvg64u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Avg64u <t> x y)
+ // result: (ADD (SRDconst <t> (SUB <t> x y) [1]) y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64ADD)
+ v0 := b.NewValue0(v.Pos, OpPPC64SRDconst, t)
+ v0.AuxInt = int64ToAuxInt(1)
+ v1 := b.NewValue0(v.Pos, OpPPC64SUB, t)
+ v1.AddArg2(x, y)
+ v0.AddArg(v1)
+ v.AddArg2(v0, y)
+ return true
+ }
+}
+func rewriteValuePPC64_OpBitLen32(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (BitLen32 x)
+ // result: (SUBFCconst [32] (CNTLZW <typ.Int> x))
+ for {
+ x := v_0
+ v.reset(OpPPC64SUBFCconst)
+ v.AuxInt = int64ToAuxInt(32)
+ v0 := b.NewValue0(v.Pos, OpPPC64CNTLZW, typ.Int)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpBitLen64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (BitLen64 x)
+ // result: (SUBFCconst [64] (CNTLZD <typ.Int> x))
+ for {
+ x := v_0
+ v.reset(OpPPC64SUBFCconst)
+ v.AuxInt = int64ToAuxInt(64)
+ v0 := b.NewValue0(v.Pos, OpPPC64CNTLZD, typ.Int)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpCom16(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Com16 x)
+ // result: (NOR x x)
+ for {
+ x := v_0
+ v.reset(OpPPC64NOR)
+ v.AddArg2(x, x)
+ return true
+ }
+}
+func rewriteValuePPC64_OpCom32(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Com32 x)
+ // result: (NOR x x)
+ for {
+ x := v_0
+ v.reset(OpPPC64NOR)
+ v.AddArg2(x, x)
+ return true
+ }
+}
+func rewriteValuePPC64_OpCom64(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Com64 x)
+ // result: (NOR x x)
+ for {
+ x := v_0
+ v.reset(OpPPC64NOR)
+ v.AddArg2(x, x)
+ return true
+ }
+}
+func rewriteValuePPC64_OpCom8(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Com8 x)
+ // result: (NOR x x)
+ for {
+ x := v_0
+ v.reset(OpPPC64NOR)
+ v.AddArg2(x, x)
+ return true
+ }
+}
+func rewriteValuePPC64_OpCondSelect(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (CondSelect x y bool)
+ // cond: flagArg(bool) == nil
+ // result: (ISEL [6] x y (Select1 <types.TypeFlags> (ANDCCconst [1] bool)))
+ for {
+ x := v_0
+ y := v_1
+ bool := v_2
+ if !(flagArg(bool) == nil) {
+ break
+ }
+ v.reset(OpPPC64ISEL)
+ v.AuxInt = int32ToAuxInt(6)
+ v0 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpPPC64ANDCCconst, types.NewTuple(typ.Int, types.TypeFlags))
+ v1.AuxInt = int64ToAuxInt(1)
+ v1.AddArg(bool)
+ v0.AddArg(v1)
+ v.AddArg3(x, y, v0)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpConst16(v *Value) bool {
+ // match: (Const16 [val])
+ // result: (MOVDconst [int64(val)])
+ for {
+ val := auxIntToInt16(v.AuxInt)
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(val))
+ return true
+ }
+}
+func rewriteValuePPC64_OpConst32(v *Value) bool {
+ // match: (Const32 [val])
+ // result: (MOVDconst [int64(val)])
+ for {
+ val := auxIntToInt32(v.AuxInt)
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(val))
+ return true
+ }
+}
+func rewriteValuePPC64_OpConst64(v *Value) bool {
+ // match: (Const64 [val])
+ // result: (MOVDconst [int64(val)])
+ for {
+ val := auxIntToInt64(v.AuxInt)
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(val))
+ return true
+ }
+}
+func rewriteValuePPC64_OpConst8(v *Value) bool {
+ // match: (Const8 [val])
+ // result: (MOVDconst [int64(val)])
+ for {
+ val := auxIntToInt8(v.AuxInt)
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(val))
+ return true
+ }
+}
+func rewriteValuePPC64_OpConstBool(v *Value) bool {
+ // match: (ConstBool [t])
+ // result: (MOVDconst [b2i(t)])
+ for {
+ t := auxIntToBool(v.AuxInt)
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(b2i(t))
+ return true
+ }
+}
+func rewriteValuePPC64_OpConstNil(v *Value) bool {
+ // match: (ConstNil)
+ // result: (MOVDconst [0])
+ for {
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpCopysign(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Copysign x y)
+ // result: (FCPSGN y x)
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64FCPSGN)
+ v.AddArg2(y, x)
+ return true
+ }
+}
+func rewriteValuePPC64_OpCtz16(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Ctz16 x)
+ // result: (POPCNTW (MOVHZreg (ANDN <typ.Int16> (ADDconst <typ.Int16> [-1] x) x)))
+ for {
+ x := v_0
+ v.reset(OpPPC64POPCNTW)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVHZreg, typ.Int64)
+ v1 := b.NewValue0(v.Pos, OpPPC64ANDN, typ.Int16)
+ v2 := b.NewValue0(v.Pos, OpPPC64ADDconst, typ.Int16)
+ v2.AuxInt = int64ToAuxInt(-1)
+ v2.AddArg(x)
+ v1.AddArg2(v2, x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpCtz32(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Ctz32 x)
+ // cond: buildcfg.GOPPC64<=8
+ // result: (POPCNTW (MOVWZreg (ANDN <typ.Int> (ADDconst <typ.Int> [-1] x) x)))
+ for {
+ x := v_0
+ if !(buildcfg.GOPPC64 <= 8) {
+ break
+ }
+ v.reset(OpPPC64POPCNTW)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVWZreg, typ.Int64)
+ v1 := b.NewValue0(v.Pos, OpPPC64ANDN, typ.Int)
+ v2 := b.NewValue0(v.Pos, OpPPC64ADDconst, typ.Int)
+ v2.AuxInt = int64ToAuxInt(-1)
+ v2.AddArg(x)
+ v1.AddArg2(v2, x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Ctz32 x)
+ // result: (CNTTZW (MOVWZreg x))
+ for {
+ x := v_0
+ v.reset(OpPPC64CNTTZW)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVWZreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpCtz64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Ctz64 x)
+ // cond: buildcfg.GOPPC64<=8
+ // result: (POPCNTD (ANDN <typ.Int64> (ADDconst <typ.Int64> [-1] x) x))
+ for {
+ x := v_0
+ if !(buildcfg.GOPPC64 <= 8) {
+ break
+ }
+ v.reset(OpPPC64POPCNTD)
+ v0 := b.NewValue0(v.Pos, OpPPC64ANDN, typ.Int64)
+ v1 := b.NewValue0(v.Pos, OpPPC64ADDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(-1)
+ v1.AddArg(x)
+ v0.AddArg2(v1, x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Ctz64 x)
+ // result: (CNTTZD x)
+ for {
+ x := v_0
+ v.reset(OpPPC64CNTTZD)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValuePPC64_OpCtz8(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Ctz8 x)
+ // result: (POPCNTB (MOVBZreg (ANDN <typ.UInt8> (ADDconst <typ.UInt8> [-1] x) x)))
+ for {
+ x := v_0
+ v.reset(OpPPC64POPCNTB)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVBZreg, typ.Int64)
+ v1 := b.NewValue0(v.Pos, OpPPC64ANDN, typ.UInt8)
+ v2 := b.NewValue0(v.Pos, OpPPC64ADDconst, typ.UInt8)
+ v2.AuxInt = int64ToAuxInt(-1)
+ v2.AddArg(x)
+ v1.AddArg2(v2, x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpCvt32Fto32(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Cvt32Fto32 x)
+ // result: (MFVSRD (FCTIWZ x))
+ for {
+ x := v_0
+ v.reset(OpPPC64MFVSRD)
+ v0 := b.NewValue0(v.Pos, OpPPC64FCTIWZ, typ.Float64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpCvt32Fto64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Cvt32Fto64 x)
+ // result: (MFVSRD (FCTIDZ x))
+ for {
+ x := v_0
+ v.reset(OpPPC64MFVSRD)
+ v0 := b.NewValue0(v.Pos, OpPPC64FCTIDZ, typ.Float64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpCvt32to32F(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Cvt32to32F x)
+ // result: (FCFIDS (MTVSRD (SignExt32to64 x)))
+ for {
+ x := v_0
+ v.reset(OpPPC64FCFIDS)
+ v0 := b.NewValue0(v.Pos, OpPPC64MTVSRD, typ.Float64)
+ v1 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpCvt32to64F(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Cvt32to64F x)
+ // result: (FCFID (MTVSRD (SignExt32to64 x)))
+ for {
+ x := v_0
+ v.reset(OpPPC64FCFID)
+ v0 := b.NewValue0(v.Pos, OpPPC64MTVSRD, typ.Float64)
+ v1 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpCvt64Fto32(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Cvt64Fto32 x)
+ // result: (MFVSRD (FCTIWZ x))
+ for {
+ x := v_0
+ v.reset(OpPPC64MFVSRD)
+ v0 := b.NewValue0(v.Pos, OpPPC64FCTIWZ, typ.Float64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpCvt64Fto64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Cvt64Fto64 x)
+ // result: (MFVSRD (FCTIDZ x))
+ for {
+ x := v_0
+ v.reset(OpPPC64MFVSRD)
+ v0 := b.NewValue0(v.Pos, OpPPC64FCTIDZ, typ.Float64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpCvt64to32F(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Cvt64to32F x)
+ // result: (FCFIDS (MTVSRD x))
+ for {
+ x := v_0
+ v.reset(OpPPC64FCFIDS)
+ v0 := b.NewValue0(v.Pos, OpPPC64MTVSRD, typ.Float64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpCvt64to64F(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Cvt64to64F x)
+ // result: (FCFID (MTVSRD x))
+ for {
+ x := v_0
+ v.reset(OpPPC64FCFID)
+ v0 := b.NewValue0(v.Pos, OpPPC64MTVSRD, typ.Float64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpDiv16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div16 [false] x y)
+ // result: (DIVW (SignExt16to32 x) (SignExt16to32 y))
+ for {
+ if auxIntToBool(v.AuxInt) != false {
+ break
+ }
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64DIVW)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpDiv16u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div16u x y)
+ // result: (DIVWU (ZeroExt16to32 x) (ZeroExt16to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64DIVWU)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValuePPC64_OpDiv32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Div32 [false] x y)
+ // result: (DIVW x y)
+ for {
+ if auxIntToBool(v.AuxInt) != false {
+ break
+ }
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64DIVW)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpDiv64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Div64 [false] x y)
+ // result: (DIVD x y)
+ for {
+ if auxIntToBool(v.AuxInt) != false {
+ break
+ }
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64DIVD)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpDiv8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div8 x y)
+ // result: (DIVW (SignExt8to32 x) (SignExt8to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64DIVW)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValuePPC64_OpDiv8u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div8u x y)
+ // result: (DIVWU (ZeroExt8to32 x) (ZeroExt8to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64DIVWU)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValuePPC64_OpEq16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Eq16 x y)
+ // cond: isSigned(x.Type) && isSigned(y.Type)
+ // result: (Equal (CMPW (SignExt16to32 x) (SignExt16to32 y)))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ y := v_1
+ if !(isSigned(x.Type) && isSigned(y.Type)) {
+ continue
+ }
+ v.reset(OpPPC64Equal)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (Eq16 x y)
+ // result: (Equal (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64Equal)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpEq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Eq32 x y)
+ // result: (Equal (CMPW x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64Equal)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpEq32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Eq32F x y)
+ // result: (Equal (FCMPU x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64Equal)
+ v0 := b.NewValue0(v.Pos, OpPPC64FCMPU, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpEq64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Eq64 x y)
+ // result: (Equal (CMP x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64Equal)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMP, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpEq64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Eq64F x y)
+ // result: (Equal (FCMPU x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64Equal)
+ v0 := b.NewValue0(v.Pos, OpPPC64FCMPU, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpEq8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Eq8 x y)
+ // cond: isSigned(x.Type) && isSigned(y.Type)
+ // result: (Equal (CMPW (SignExt8to32 x) (SignExt8to32 y)))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ y := v_1
+ if !(isSigned(x.Type) && isSigned(y.Type)) {
+ continue
+ }
+ v.reset(OpPPC64Equal)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (Eq8 x y)
+ // result: (Equal (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64Equal)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpEqB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (EqB x y)
+ // result: (ANDconst [1] (EQV x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64ANDconst)
+ v.AuxInt = int64ToAuxInt(1)
+ v0 := b.NewValue0(v.Pos, OpPPC64EQV, typ.Int64)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpEqPtr(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (EqPtr x y)
+ // result: (Equal (CMP x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64Equal)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMP, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpIsInBounds(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (IsInBounds idx len)
+ // result: (LessThan (CMPU idx len))
+ for {
+ idx := v_0
+ len := v_1
+ v.reset(OpPPC64LessThan)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v0.AddArg2(idx, len)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpIsNonNil(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (IsNonNil ptr)
+ // result: (NotEqual (CMPconst [0] ptr))
+ for {
+ ptr := v_0
+ v.reset(OpPPC64NotEqual)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(0)
+ v0.AddArg(ptr)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpIsSliceInBounds(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (IsSliceInBounds idx len)
+ // result: (LessEqual (CMPU idx len))
+ for {
+ idx := v_0
+ len := v_1
+ v.reset(OpPPC64LessEqual)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v0.AddArg2(idx, len)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLeq16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq16 x y)
+ // result: (LessEqual (CMPW (SignExt16to32 x) (SignExt16to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64LessEqual)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLeq16U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq16U x y)
+ // result: (LessEqual (CMPWU (ZeroExt16to32 x) (ZeroExt16to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64LessEqual)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPWU, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLeq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq32 x y)
+ // result: (LessEqual (CMPW x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64LessEqual)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLeq32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq32F x y)
+ // result: (FLessEqual (FCMPU x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64FLessEqual)
+ v0 := b.NewValue0(v.Pos, OpPPC64FCMPU, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLeq32U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq32U x y)
+ // result: (LessEqual (CMPWU x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64LessEqual)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPWU, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLeq64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq64 x y)
+ // result: (LessEqual (CMP x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64LessEqual)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMP, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLeq64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq64F x y)
+ // result: (FLessEqual (FCMPU x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64FLessEqual)
+ v0 := b.NewValue0(v.Pos, OpPPC64FCMPU, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLeq64U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Leq64U x y)
+ // result: (LessEqual (CMPU x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64LessEqual)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLeq8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq8 x y)
+ // result: (LessEqual (CMPW (SignExt8to32 x) (SignExt8to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64LessEqual)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLeq8U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq8U x y)
+ // result: (LessEqual (CMPWU (ZeroExt8to32 x) (ZeroExt8to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64LessEqual)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPWU, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLess16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less16 x y)
+ // result: (LessThan (CMPW (SignExt16to32 x) (SignExt16to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64LessThan)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLess16U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less16U x y)
+ // result: (LessThan (CMPWU (ZeroExt16to32 x) (ZeroExt16to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64LessThan)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPWU, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLess32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less32 x y)
+ // result: (LessThan (CMPW x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64LessThan)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLess32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less32F x y)
+ // result: (FLessThan (FCMPU x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64FLessThan)
+ v0 := b.NewValue0(v.Pos, OpPPC64FCMPU, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLess32U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less32U x y)
+ // result: (LessThan (CMPWU x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64LessThan)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPWU, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLess64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less64 x y)
+ // result: (LessThan (CMP x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64LessThan)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMP, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLess64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less64F x y)
+ // result: (FLessThan (FCMPU x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64FLessThan)
+ v0 := b.NewValue0(v.Pos, OpPPC64FCMPU, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLess64U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less64U x y)
+ // result: (LessThan (CMPU x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64LessThan)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLess8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less8 x y)
+ // result: (LessThan (CMPW (SignExt8to32 x) (SignExt8to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64LessThan)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLess8U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less8U x y)
+ // result: (LessThan (CMPWU (ZeroExt8to32 x) (ZeroExt8to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64LessThan)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPWU, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLoad(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Load <t> ptr mem)
+ // cond: (is64BitInt(t) || isPtr(t))
+ // result: (MOVDload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is64BitInt(t) || isPtr(t)) {
+ break
+ }
+ v.reset(OpPPC64MOVDload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is32BitInt(t) && isSigned(t)
+ // result: (MOVWload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is32BitInt(t) && isSigned(t)) {
+ break
+ }
+ v.reset(OpPPC64MOVWload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is32BitInt(t) && !isSigned(t)
+ // result: (MOVWZload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is32BitInt(t) && !isSigned(t)) {
+ break
+ }
+ v.reset(OpPPC64MOVWZload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is16BitInt(t) && isSigned(t)
+ // result: (MOVHload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is16BitInt(t) && isSigned(t)) {
+ break
+ }
+ v.reset(OpPPC64MOVHload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is16BitInt(t) && !isSigned(t)
+ // result: (MOVHZload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is16BitInt(t) && !isSigned(t)) {
+ break
+ }
+ v.reset(OpPPC64MOVHZload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: t.IsBoolean()
+ // result: (MOVBZload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(t.IsBoolean()) {
+ break
+ }
+ v.reset(OpPPC64MOVBZload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is8BitInt(t) && isSigned(t)
+ // result: (MOVBreg (MOVBZload ptr mem))
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is8BitInt(t) && isSigned(t)) {
+ break
+ }
+ v.reset(OpPPC64MOVBreg)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVBZload, typ.UInt8)
+ v0.AddArg2(ptr, mem)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is8BitInt(t) && !isSigned(t)
+ // result: (MOVBZload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is8BitInt(t) && !isSigned(t)) {
+ break
+ }
+ v.reset(OpPPC64MOVBZload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is32BitFloat(t)
+ // result: (FMOVSload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is32BitFloat(t)) {
+ break
+ }
+ v.reset(OpPPC64FMOVSload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is64BitFloat(t)
+ // result: (FMOVDload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is64BitFloat(t)) {
+ break
+ }
+ v.reset(OpPPC64FMOVDload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpLocalAddr(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (LocalAddr {sym} base _)
+ // result: (MOVDaddr {sym} base)
+ for {
+ sym := auxToSym(v.Aux)
+ base := v_0
+ v.reset(OpPPC64MOVDaddr)
+ v.Aux = symToAux(sym)
+ v.AddArg(base)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLsh16x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh16x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh16x16 x y)
+ // result: (SLW x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt16to64 y) (MOVDconst [16]))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64SLW)
+ v0 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(-1)
+ v2 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v3.AddArg(y)
+ v4 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v4.AuxInt = int64ToAuxInt(16)
+ v2.AddArg2(v3, v4)
+ v0.AddArg3(y, v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLsh16x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh16x32 x (MOVDconst [c]))
+ // cond: uint32(c) < 16
+ // result: (SLWconst x [c&31])
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint32(c) < 16) {
+ break
+ }
+ v.reset(OpPPC64SLWconst)
+ v.AuxInt = int64ToAuxInt(c & 31)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Lsh16x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh16x32 x y)
+ // result: (SLW x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [16]))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64SLW)
+ v0 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(-1)
+ v2 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v3.AuxInt = int64ToAuxInt(16)
+ v2.AddArg2(y, v3)
+ v0.AddArg3(y, v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLsh16x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh16x64 _ (MOVDconst [c]))
+ // cond: uint64(c) >= 16
+ // result: (MOVDconst [0])
+ for {
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 16) {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (Lsh16x64 x (MOVDconst [c]))
+ // cond: uint64(c) < 16
+ // result: (SLWconst x [c])
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) < 16) {
+ break
+ }
+ v.reset(OpPPC64SLWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Lsh16x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh16x64 x y)
+ // result: (SLW x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [16]))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64SLW)
+ v0 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(-1)
+ v2 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v3.AuxInt = int64ToAuxInt(16)
+ v2.AddArg2(y, v3)
+ v0.AddArg3(y, v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLsh16x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh16x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh16x8 x y)
+ // result: (SLW x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt8to64 y) (MOVDconst [16]))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64SLW)
+ v0 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(-1)
+ v2 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v3.AddArg(y)
+ v4 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v4.AuxInt = int64ToAuxInt(16)
+ v2.AddArg2(v3, v4)
+ v0.AddArg3(y, v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLsh32x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh32x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh32x16 x y)
+ // result: (SLW x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt16to64 y) (MOVDconst [32]))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64SLW)
+ v0 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(-1)
+ v2 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v3.AddArg(y)
+ v4 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v4.AuxInt = int64ToAuxInt(32)
+ v2.AddArg2(v3, v4)
+ v0.AddArg3(y, v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLsh32x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh32x32 x (MOVDconst [c]))
+ // cond: uint32(c) < 32
+ // result: (SLWconst x [c&31])
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint32(c) < 32) {
+ break
+ }
+ v.reset(OpPPC64SLWconst)
+ v.AuxInt = int64ToAuxInt(c & 31)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Lsh32x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh32x32 x y)
+ // result: (SLW x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [32]))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64SLW)
+ v0 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(-1)
+ v2 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v3.AuxInt = int64ToAuxInt(32)
+ v2.AddArg2(y, v3)
+ v0.AddArg3(y, v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLsh32x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh32x64 _ (MOVDconst [c]))
+ // cond: uint64(c) >= 32
+ // result: (MOVDconst [0])
+ for {
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 32) {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (Lsh32x64 x (MOVDconst [c]))
+ // cond: uint64(c) < 32
+ // result: (SLWconst x [c])
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) < 32) {
+ break
+ }
+ v.reset(OpPPC64SLWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Lsh32x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh32x64 x (AND y (MOVDconst [31])))
+ // result: (SLW x (ANDconst <typ.Int32> [31] y))
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64AND {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ y := v_1_0
+ if v_1_1.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_1.AuxInt) != 31 {
+ continue
+ }
+ v.reset(OpPPC64SLW)
+ v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int32)
+ v0.AuxInt = int64ToAuxInt(31)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ break
+ }
+ // match: (Lsh32x64 x (ANDconst <typ.Int32> [31] y))
+ // result: (SLW x (ANDconst <typ.Int32> [31] y))
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64ANDconst || v_1.Type != typ.Int32 || auxIntToInt64(v_1.AuxInt) != 31 {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpPPC64SLW)
+ v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int32)
+ v0.AuxInt = int64ToAuxInt(31)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Lsh32x64 x y)
+ // result: (SLW x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [32]))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64SLW)
+ v0 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(-1)
+ v2 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v3.AuxInt = int64ToAuxInt(32)
+ v2.AddArg2(y, v3)
+ v0.AddArg3(y, v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLsh32x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh32x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh32x8 x y)
+ // result: (SLW x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt8to64 y) (MOVDconst [32]))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64SLW)
+ v0 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(-1)
+ v2 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v3.AddArg(y)
+ v4 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v4.AuxInt = int64ToAuxInt(32)
+ v2.AddArg2(v3, v4)
+ v0.AddArg3(y, v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLsh64x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh64x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLD x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SLD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh64x16 x y)
+ // result: (SLD x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt16to64 y) (MOVDconst [64]))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64SLD)
+ v0 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(-1)
+ v2 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v3.AddArg(y)
+ v4 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v4.AuxInt = int64ToAuxInt(64)
+ v2.AddArg2(v3, v4)
+ v0.AddArg3(y, v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLsh64x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh64x32 x (MOVDconst [c]))
+ // cond: uint32(c) < 64
+ // result: (SLDconst x [c&63])
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint32(c) < 64) {
+ break
+ }
+ v.reset(OpPPC64SLDconst)
+ v.AuxInt = int64ToAuxInt(c & 63)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Lsh64x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLD x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SLD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh64x32 x y)
+ // result: (SLD x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [64]))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64SLD)
+ v0 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(-1)
+ v2 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v3.AuxInt = int64ToAuxInt(64)
+ v2.AddArg2(y, v3)
+ v0.AddArg3(y, v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLsh64x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh64x64 _ (MOVDconst [c]))
+ // cond: uint64(c) >= 64
+ // result: (MOVDconst [0])
+ for {
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 64) {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (Lsh64x64 x (MOVDconst [c]))
+ // cond: uint64(c) < 64
+ // result: (SLDconst x [c])
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) < 64) {
+ break
+ }
+ v.reset(OpPPC64SLDconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Lsh64x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLD x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SLD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh64x64 x (AND y (MOVDconst [63])))
+ // result: (SLD x (ANDconst <typ.Int64> [63] y))
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64AND {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ y := v_1_0
+ if v_1_1.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_1.AuxInt) != 63 {
+ continue
+ }
+ v.reset(OpPPC64SLD)
+ v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(63)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ break
+ }
+ // match: (Lsh64x64 x (ANDconst <typ.Int64> [63] y))
+ // result: (SLD x (ANDconst <typ.Int64> [63] y))
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64ANDconst || v_1.Type != typ.Int64 || auxIntToInt64(v_1.AuxInt) != 63 {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpPPC64SLD)
+ v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(63)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Lsh64x64 x y)
+ // result: (SLD x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [64]))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64SLD)
+ v0 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(-1)
+ v2 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v3.AuxInt = int64ToAuxInt(64)
+ v2.AddArg2(y, v3)
+ v0.AddArg3(y, v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLsh64x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh64x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLD x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SLD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh64x8 x y)
+ // result: (SLD x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt8to64 y) (MOVDconst [64]))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64SLD)
+ v0 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(-1)
+ v2 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v3.AddArg(y)
+ v4 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v4.AuxInt = int64ToAuxInt(64)
+ v2.AddArg2(v3, v4)
+ v0.AddArg3(y, v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLsh8x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh8x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh8x16 x y)
+ // result: (SLW x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt16to64 y) (MOVDconst [8]))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64SLW)
+ v0 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(-1)
+ v2 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v3.AddArg(y)
+ v4 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v4.AuxInt = int64ToAuxInt(8)
+ v2.AddArg2(v3, v4)
+ v0.AddArg3(y, v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLsh8x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh8x32 x (MOVDconst [c]))
+ // cond: uint32(c) < 8
+ // result: (SLWconst x [c&7])
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint32(c) < 8) {
+ break
+ }
+ v.reset(OpPPC64SLWconst)
+ v.AuxInt = int64ToAuxInt(c & 7)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Lsh8x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh8x32 x y)
+ // result: (SLW x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [8]))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64SLW)
+ v0 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(-1)
+ v2 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v3.AuxInt = int64ToAuxInt(8)
+ v2.AddArg2(y, v3)
+ v0.AddArg3(y, v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLsh8x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh8x64 _ (MOVDconst [c]))
+ // cond: uint64(c) >= 8
+ // result: (MOVDconst [0])
+ for {
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 8) {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (Lsh8x64 x (MOVDconst [c]))
+ // cond: uint64(c) < 8
+ // result: (SLWconst x [c])
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) < 8) {
+ break
+ }
+ v.reset(OpPPC64SLWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Lsh8x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh8x64 x y)
+ // result: (SLW x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [8]))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64SLW)
+ v0 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(-1)
+ v2 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v3.AuxInt = int64ToAuxInt(8)
+ v2.AddArg2(y, v3)
+ v0.AddArg3(y, v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpLsh8x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh8x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh8x8 x y)
+ // result: (SLW x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt8to64 y) (MOVDconst [8]))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64SLW)
+ v0 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(-1)
+ v2 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v3.AddArg(y)
+ v4 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v4.AuxInt = int64ToAuxInt(8)
+ v2.AddArg2(v3, v4)
+ v0.AddArg3(y, v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpMod16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod16 x y)
+ // result: (Mod32 (SignExt16to32 x) (SignExt16to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMod32)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValuePPC64_OpMod16u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod16u x y)
+ // result: (Mod32u (ZeroExt16to32 x) (ZeroExt16to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMod32u)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValuePPC64_OpMod32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod32 x y)
+ // cond: buildcfg.GOPPC64 >= 9
+ // result: (MODSW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(buildcfg.GOPPC64 >= 9) {
+ break
+ }
+ v.reset(OpPPC64MODSW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Mod32 x y)
+ // cond: buildcfg.GOPPC64 <= 8
+ // result: (SUB x (MULLW y (DIVW x y)))
+ for {
+ x := v_0
+ y := v_1
+ if !(buildcfg.GOPPC64 <= 8) {
+ break
+ }
+ v.reset(OpPPC64SUB)
+ v0 := b.NewValue0(v.Pos, OpPPC64MULLW, typ.Int32)
+ v1 := b.NewValue0(v.Pos, OpPPC64DIVW, typ.Int32)
+ v1.AddArg2(x, y)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpMod32u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod32u x y)
+ // cond: buildcfg.GOPPC64 >= 9
+ // result: (MODUW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(buildcfg.GOPPC64 >= 9) {
+ break
+ }
+ v.reset(OpPPC64MODUW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Mod32u x y)
+ // cond: buildcfg.GOPPC64 <= 8
+ // result: (SUB x (MULLW y (DIVWU x y)))
+ for {
+ x := v_0
+ y := v_1
+ if !(buildcfg.GOPPC64 <= 8) {
+ break
+ }
+ v.reset(OpPPC64SUB)
+ v0 := b.NewValue0(v.Pos, OpPPC64MULLW, typ.Int32)
+ v1 := b.NewValue0(v.Pos, OpPPC64DIVWU, typ.Int32)
+ v1.AddArg2(x, y)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpMod64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod64 x y)
+ // cond: buildcfg.GOPPC64 >=9
+ // result: (MODSD x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(buildcfg.GOPPC64 >= 9) {
+ break
+ }
+ v.reset(OpPPC64MODSD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Mod64 x y)
+ // cond: buildcfg.GOPPC64 <=8
+ // result: (SUB x (MULLD y (DIVD x y)))
+ for {
+ x := v_0
+ y := v_1
+ if !(buildcfg.GOPPC64 <= 8) {
+ break
+ }
+ v.reset(OpPPC64SUB)
+ v0 := b.NewValue0(v.Pos, OpPPC64MULLD, typ.Int64)
+ v1 := b.NewValue0(v.Pos, OpPPC64DIVD, typ.Int64)
+ v1.AddArg2(x, y)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpMod64u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod64u x y)
+ // cond: buildcfg.GOPPC64 >= 9
+ // result: (MODUD x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(buildcfg.GOPPC64 >= 9) {
+ break
+ }
+ v.reset(OpPPC64MODUD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Mod64u x y)
+ // cond: buildcfg.GOPPC64 <= 8
+ // result: (SUB x (MULLD y (DIVDU x y)))
+ for {
+ x := v_0
+ y := v_1
+ if !(buildcfg.GOPPC64 <= 8) {
+ break
+ }
+ v.reset(OpPPC64SUB)
+ v0 := b.NewValue0(v.Pos, OpPPC64MULLD, typ.Int64)
+ v1 := b.NewValue0(v.Pos, OpPPC64DIVDU, typ.Int64)
+ v1.AddArg2(x, y)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpMod8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod8 x y)
+ // result: (Mod32 (SignExt8to32 x) (SignExt8to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMod32)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValuePPC64_OpMod8u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod8u x y)
+ // result: (Mod32u (ZeroExt8to32 x) (ZeroExt8to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpMod32u)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValuePPC64_OpMove(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Move [0] _ _ mem)
+ // result: mem
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ mem := v_2
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Move [1] dst src mem)
+ // result: (MOVBstore dst (MOVBZload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 1 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpPPC64MOVBstore)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVBZload, typ.UInt8)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [2] dst src mem)
+ // result: (MOVHstore dst (MOVHZload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 2 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpPPC64MOVHstore)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVHZload, typ.UInt16)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [4] dst src mem)
+ // result: (MOVWstore dst (MOVWZload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpPPC64MOVWstore)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVWZload, typ.UInt32)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [8] {t} dst src mem)
+ // result: (MOVDstore dst (MOVDload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpPPC64MOVDstore)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVDload, typ.Int64)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [3] dst src mem)
+ // result: (MOVBstore [2] dst (MOVBZload [2] src mem) (MOVHstore dst (MOVHload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 3 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpPPC64MOVBstore)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVBZload, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(2)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVHstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpPPC64MOVHload, typ.Int16)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [5] dst src mem)
+ // result: (MOVBstore [4] dst (MOVBZload [4] src mem) (MOVWstore dst (MOVWZload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 5 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpPPC64MOVBstore)
+ v.AuxInt = int32ToAuxInt(4)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVBZload, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(4)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVWstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpPPC64MOVWZload, typ.UInt32)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [6] dst src mem)
+ // result: (MOVHstore [4] dst (MOVHZload [4] src mem) (MOVWstore dst (MOVWZload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 6 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpPPC64MOVHstore)
+ v.AuxInt = int32ToAuxInt(4)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVHZload, typ.UInt16)
+ v0.AuxInt = int32ToAuxInt(4)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVWstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpPPC64MOVWZload, typ.UInt32)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [7] dst src mem)
+ // result: (MOVBstore [6] dst (MOVBZload [6] src mem) (MOVHstore [4] dst (MOVHZload [4] src mem) (MOVWstore dst (MOVWZload src mem) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 7 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpPPC64MOVBstore)
+ v.AuxInt = int32ToAuxInt(6)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVBZload, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(6)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVHstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(4)
+ v2 := b.NewValue0(v.Pos, OpPPC64MOVHZload, typ.UInt16)
+ v2.AuxInt = int32ToAuxInt(4)
+ v2.AddArg2(src, mem)
+ v3 := b.NewValue0(v.Pos, OpPPC64MOVWstore, types.TypeMem)
+ v4 := b.NewValue0(v.Pos, OpPPC64MOVWZload, typ.UInt32)
+ v4.AddArg2(src, mem)
+ v3.AddArg3(dst, v4, mem)
+ v1.AddArg3(dst, v2, v3)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: s > 8 && buildcfg.GOPPC64 <= 8 && logLargeCopy(v, s)
+ // result: (LoweredMove [s] dst src mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(s > 8 && buildcfg.GOPPC64 <= 8 && logLargeCopy(v, s)) {
+ break
+ }
+ v.reset(OpPPC64LoweredMove)
+ v.AuxInt = int64ToAuxInt(s)
+ v.AddArg3(dst, src, mem)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: s > 8 && s <= 64 && buildcfg.GOPPC64 >= 9
+ // result: (LoweredQuadMoveShort [s] dst src mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(s > 8 && s <= 64 && buildcfg.GOPPC64 >= 9) {
+ break
+ }
+ v.reset(OpPPC64LoweredQuadMoveShort)
+ v.AuxInt = int64ToAuxInt(s)
+ v.AddArg3(dst, src, mem)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: s > 8 && buildcfg.GOPPC64 >= 9 && logLargeCopy(v, s)
+ // result: (LoweredQuadMove [s] dst src mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(s > 8 && buildcfg.GOPPC64 >= 9 && logLargeCopy(v, s)) {
+ break
+ }
+ v.reset(OpPPC64LoweredQuadMove)
+ v.AuxInt = int64ToAuxInt(s)
+ v.AddArg3(dst, src, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpNeq16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neq16 x y)
+ // cond: isSigned(x.Type) && isSigned(y.Type)
+ // result: (NotEqual (CMPW (SignExt16to32 x) (SignExt16to32 y)))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ y := v_1
+ if !(isSigned(x.Type) && isSigned(y.Type)) {
+ continue
+ }
+ v.reset(OpPPC64NotEqual)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (Neq16 x y)
+ // result: (NotEqual (CMPW (ZeroExt16to32 x) (ZeroExt16to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64NotEqual)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpNeq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Neq32 x y)
+ // result: (NotEqual (CMPW x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64NotEqual)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpNeq32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Neq32F x y)
+ // result: (NotEqual (FCMPU x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64NotEqual)
+ v0 := b.NewValue0(v.Pos, OpPPC64FCMPU, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpNeq64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Neq64 x y)
+ // result: (NotEqual (CMP x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64NotEqual)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMP, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpNeq64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Neq64F x y)
+ // result: (NotEqual (FCMPU x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64NotEqual)
+ v0 := b.NewValue0(v.Pos, OpPPC64FCMPU, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpNeq8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neq8 x y)
+ // cond: isSigned(x.Type) && isSigned(y.Type)
+ // result: (NotEqual (CMPW (SignExt8to32 x) (SignExt8to32 y)))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ y := v_1
+ if !(isSigned(x.Type) && isSigned(y.Type)) {
+ continue
+ }
+ v.reset(OpPPC64NotEqual)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (Neq8 x y)
+ // result: (NotEqual (CMPW (ZeroExt8to32 x) (ZeroExt8to32 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64NotEqual)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpNeqPtr(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (NeqPtr x y)
+ // result: (NotEqual (CMP x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64NotEqual)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMP, types.TypeFlags)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpNot(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Not x)
+ // result: (XORconst [1] x)
+ for {
+ x := v_0
+ v.reset(OpPPC64XORconst)
+ v.AuxInt = int64ToAuxInt(1)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValuePPC64_OpOffPtr(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (OffPtr [off] ptr)
+ // result: (ADD (MOVDconst <typ.Int64> [off]) ptr)
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ ptr := v_0
+ v.reset(OpPPC64ADD)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(off)
+ v.AddArg2(v0, ptr)
+ return true
+ }
+}
+func rewriteValuePPC64_OpPPC64ADD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (ADD l:(MULLD x y) z)
+ // cond: buildcfg.GOPPC64 >= 9 && l.Uses == 1 && clobber(l)
+ // result: (MADDLD x y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ l := v_0
+ if l.Op != OpPPC64MULLD {
+ continue
+ }
+ y := l.Args[1]
+ x := l.Args[0]
+ z := v_1
+ if !(buildcfg.GOPPC64 >= 9 && l.Uses == 1 && clobber(l)) {
+ continue
+ }
+ v.reset(OpPPC64MADDLD)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ break
+ }
+ // match: (ADD (SLDconst x [c]) (SRDconst x [d]))
+ // cond: d == 64-c
+ // result: (ROTLconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpPPC64SLDconst {
+ continue
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if v_1.Op != OpPPC64SRDconst {
+ continue
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if x != v_1.Args[0] || !(d == 64-c) {
+ continue
+ }
+ v.reset(OpPPC64ROTLconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ADD (SLWconst x [c]) (SRWconst x [d]))
+ // cond: d == 32-c
+ // result: (ROTLWconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpPPC64SLWconst {
+ continue
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if v_1.Op != OpPPC64SRWconst {
+ continue
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if x != v_1.Args[0] || !(d == 32-c) {
+ continue
+ }
+ v.reset(OpPPC64ROTLWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ADD (SLD x (ANDconst <typ.Int64> [63] y)) (SRD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y))))
+ // result: (ROTL x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpPPC64SLD {
+ continue
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpPPC64ANDconst || v_0_1.Type != typ.Int64 || auxIntToInt64(v_0_1.AuxInt) != 63 {
+ continue
+ }
+ y := v_0_1.Args[0]
+ if v_1.Op != OpPPC64SRD {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpPPC64SUB || v_1_1.Type != typ.UInt {
+ continue
+ }
+ _ = v_1_1.Args[1]
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_1_0.AuxInt) != 64 {
+ continue
+ }
+ v_1_1_1 := v_1_1.Args[1]
+ if v_1_1_1.Op != OpPPC64ANDconst || v_1_1_1.Type != typ.UInt || auxIntToInt64(v_1_1_1.AuxInt) != 63 || y != v_1_1_1.Args[0] {
+ continue
+ }
+ v.reset(OpPPC64ROTL)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADD (SLD x (ANDconst <typ.Int64> [63] y)) (SRD x (SUBFCconst <typ.UInt> [64] (ANDconst <typ.UInt> [63] y))))
+ // result: (ROTL x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpPPC64SLD {
+ continue
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpPPC64ANDconst || v_0_1.Type != typ.Int64 || auxIntToInt64(v_0_1.AuxInt) != 63 {
+ continue
+ }
+ y := v_0_1.Args[0]
+ if v_1.Op != OpPPC64SRD {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpPPC64SUBFCconst || v_1_1.Type != typ.UInt || auxIntToInt64(v_1_1.AuxInt) != 64 {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpPPC64ANDconst || v_1_1_0.Type != typ.UInt || auxIntToInt64(v_1_1_0.AuxInt) != 63 || y != v_1_1_0.Args[0] {
+ continue
+ }
+ v.reset(OpPPC64ROTL)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADD (SLW x (ANDconst <typ.Int32> [31] y)) (SRW x (SUBFCconst <typ.UInt> [32] (ANDconst <typ.UInt> [31] y))))
+ // result: (ROTLW x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpPPC64SLW {
+ continue
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpPPC64ANDconst || v_0_1.Type != typ.Int32 || auxIntToInt64(v_0_1.AuxInt) != 31 {
+ continue
+ }
+ y := v_0_1.Args[0]
+ if v_1.Op != OpPPC64SRW {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpPPC64SUBFCconst || v_1_1.Type != typ.UInt || auxIntToInt64(v_1_1.AuxInt) != 32 {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpPPC64ANDconst || v_1_1_0.Type != typ.UInt || auxIntToInt64(v_1_1_0.AuxInt) != 31 || y != v_1_1_0.Args[0] {
+ continue
+ }
+ v.reset(OpPPC64ROTLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADD (SLW x (ANDconst <typ.Int32> [31] y)) (SRW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y))))
+ // result: (ROTLW x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpPPC64SLW {
+ continue
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpPPC64ANDconst || v_0_1.Type != typ.Int32 || auxIntToInt64(v_0_1.AuxInt) != 31 {
+ continue
+ }
+ y := v_0_1.Args[0]
+ if v_1.Op != OpPPC64SRW {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpPPC64SUB || v_1_1.Type != typ.UInt {
+ continue
+ }
+ _ = v_1_1.Args[1]
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_1_0.AuxInt) != 32 {
+ continue
+ }
+ v_1_1_1 := v_1_1.Args[1]
+ if v_1_1_1.Op != OpPPC64ANDconst || v_1_1_1.Type != typ.UInt || auxIntToInt64(v_1_1_1.AuxInt) != 31 || y != v_1_1_1.Args[0] {
+ continue
+ }
+ v.reset(OpPPC64ROTLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADD x (MOVDconst [c]))
+ // cond: is32Bit(c)
+ // result: (ADDconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(is32Bit(c)) {
+ continue
+ }
+ v.reset(OpPPC64ADDconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64ADDconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ADDconst [c] (ADDconst [d] x))
+ // cond: is32Bit(c+d)
+ // result: (ADDconst [c+d] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64ADDconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(is32Bit(c + d)) {
+ break
+ }
+ v.reset(OpPPC64ADDconst)
+ v.AuxInt = int64ToAuxInt(c + d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDconst [0] x)
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (ADDconst [c] (MOVDaddr [d] {sym} x))
+ // cond: is32Bit(c+int64(d))
+ // result: (MOVDaddr [int32(c+int64(d))] {sym} x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64MOVDaddr {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ sym := auxToSym(v_0.Aux)
+ x := v_0.Args[0]
+ if !(is32Bit(c + int64(d))) {
+ break
+ }
+ v.reset(OpPPC64MOVDaddr)
+ v.AuxInt = int32ToAuxInt(int32(c + int64(d)))
+ v.Aux = symToAux(sym)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDconst [c] x:(SP))
+ // cond: is32Bit(c)
+ // result: (MOVDaddr [int32(c)] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if x.Op != OpSP || !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpPPC64MOVDaddr)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDconst [c] (SUBFCconst [d] x))
+ // cond: is32Bit(c+d)
+ // result: (SUBFCconst [c+d] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64SUBFCconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(is32Bit(c + d)) {
+ break
+ }
+ v.reset(OpPPC64SUBFCconst)
+ v.AuxInt = int64ToAuxInt(c + d)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64AND(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AND (MOVDconst [m]) (ROTLWconst [r] x))
+ // cond: isPPC64WordRotateMask(m)
+ // result: (RLWINM [encodePPC64RotateMask(r,m,32)] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpPPC64MOVDconst {
+ continue
+ }
+ m := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpPPC64ROTLWconst {
+ continue
+ }
+ r := auxIntToInt64(v_1.AuxInt)
+ x := v_1.Args[0]
+ if !(isPPC64WordRotateMask(m)) {
+ continue
+ }
+ v.reset(OpPPC64RLWINM)
+ v.AuxInt = int64ToAuxInt(encodePPC64RotateMask(r, m, 32))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (AND (MOVDconst [m]) (ROTLW x r))
+ // cond: isPPC64WordRotateMask(m)
+ // result: (RLWNM [encodePPC64RotateMask(0,m,32)] x r)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpPPC64MOVDconst {
+ continue
+ }
+ m := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpPPC64ROTLW {
+ continue
+ }
+ r := v_1.Args[1]
+ x := v_1.Args[0]
+ if !(isPPC64WordRotateMask(m)) {
+ continue
+ }
+ v.reset(OpPPC64RLWNM)
+ v.AuxInt = int64ToAuxInt(encodePPC64RotateMask(0, m, 32))
+ v.AddArg2(x, r)
+ return true
+ }
+ break
+ }
+ // match: (AND (MOVDconst [m]) (SRWconst x [s]))
+ // cond: mergePPC64RShiftMask(m,s,32) == 0
+ // result: (MOVDconst [0])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpPPC64MOVDconst {
+ continue
+ }
+ m := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpPPC64SRWconst {
+ continue
+ }
+ s := auxIntToInt64(v_1.AuxInt)
+ if !(mergePPC64RShiftMask(m, s, 32) == 0) {
+ continue
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ break
+ }
+ // match: (AND (MOVDconst [m]) (SRWconst x [s]))
+ // cond: mergePPC64AndSrwi(m,s) != 0
+ // result: (RLWINM [mergePPC64AndSrwi(m,s)] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpPPC64MOVDconst {
+ continue
+ }
+ m := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpPPC64SRWconst {
+ continue
+ }
+ s := auxIntToInt64(v_1.AuxInt)
+ x := v_1.Args[0]
+ if !(mergePPC64AndSrwi(m, s) != 0) {
+ continue
+ }
+ v.reset(OpPPC64RLWINM)
+ v.AuxInt = int64ToAuxInt(mergePPC64AndSrwi(m, s))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (AND x (NOR y y))
+ // result: (ANDN x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpPPC64NOR {
+ continue
+ }
+ y := v_1.Args[1]
+ if y != v_1.Args[0] {
+ continue
+ }
+ v.reset(OpPPC64ANDN)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (AND (MOVDconst [c]) (MOVDconst [d]))
+ // result: (MOVDconst [c&d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpPPC64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpPPC64MOVDconst {
+ continue
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(c & d)
+ return true
+ }
+ break
+ }
+ // match: (AND x (MOVDconst [c]))
+ // cond: isU16Bit(c)
+ // result: (ANDconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(isU16Bit(c)) {
+ continue
+ }
+ v.reset(OpPPC64ANDconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (AND (MOVDconst [c]) y:(MOVWZreg _))
+ // cond: c&0xFFFFFFFF == 0xFFFFFFFF
+ // result: y
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpPPC64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ y := v_1
+ if y.Op != OpPPC64MOVWZreg || !(c&0xFFFFFFFF == 0xFFFFFFFF) {
+ continue
+ }
+ v.copyOf(y)
+ return true
+ }
+ break
+ }
+ // match: (AND (MOVDconst [0xFFFFFFFF]) y:(MOVWreg x))
+ // result: (MOVWZreg x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpPPC64MOVDconst || auxIntToInt64(v_0.AuxInt) != 0xFFFFFFFF {
+ continue
+ }
+ y := v_1
+ if y.Op != OpPPC64MOVWreg {
+ continue
+ }
+ x := y.Args[0]
+ v.reset(OpPPC64MOVWZreg)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (AND (MOVDconst [c]) x:(MOVBZload _ _))
+ // result: (ANDconst [c&0xFF] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpPPC64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ if x.Op != OpPPC64MOVBZload {
+ continue
+ }
+ v.reset(OpPPC64ANDconst)
+ v.AuxInt = int64ToAuxInt(c & 0xFF)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64ANDN(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ANDN (MOVDconst [c]) (MOVDconst [d]))
+ // result: (MOVDconst [c&^d])
+ for {
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(c &^ d)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64ANDconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ANDconst [m] (ROTLWconst [r] x))
+ // cond: isPPC64WordRotateMask(m)
+ // result: (RLWINM [encodePPC64RotateMask(r,m,32)] x)
+ for {
+ m := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64ROTLWconst {
+ break
+ }
+ r := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(isPPC64WordRotateMask(m)) {
+ break
+ }
+ v.reset(OpPPC64RLWINM)
+ v.AuxInt = int64ToAuxInt(encodePPC64RotateMask(r, m, 32))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDconst [m] (ROTLW x r))
+ // cond: isPPC64WordRotateMask(m)
+ // result: (RLWNM [encodePPC64RotateMask(0,m,32)] x r)
+ for {
+ m := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64ROTLW {
+ break
+ }
+ r := v_0.Args[1]
+ x := v_0.Args[0]
+ if !(isPPC64WordRotateMask(m)) {
+ break
+ }
+ v.reset(OpPPC64RLWNM)
+ v.AuxInt = int64ToAuxInt(encodePPC64RotateMask(0, m, 32))
+ v.AddArg2(x, r)
+ return true
+ }
+ // match: (ANDconst [m] (SRWconst x [s]))
+ // cond: mergePPC64RShiftMask(m,s,32) == 0
+ // result: (MOVDconst [0])
+ for {
+ m := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64SRWconst {
+ break
+ }
+ s := auxIntToInt64(v_0.AuxInt)
+ if !(mergePPC64RShiftMask(m, s, 32) == 0) {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (ANDconst [m] (SRWconst x [s]))
+ // cond: mergePPC64AndSrwi(m,s) != 0
+ // result: (RLWINM [mergePPC64AndSrwi(m,s)] x)
+ for {
+ m := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64SRWconst {
+ break
+ }
+ s := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(mergePPC64AndSrwi(m, s) != 0) {
+ break
+ }
+ v.reset(OpPPC64RLWINM)
+ v.AuxInt = int64ToAuxInt(mergePPC64AndSrwi(m, s))
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDconst [c] (ANDconst [d] x))
+ // result: (ANDconst [c&d] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64ANDconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpPPC64ANDconst)
+ v.AuxInt = int64ToAuxInt(c & d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDconst [-1] x)
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != -1 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (ANDconst [0] _)
+ // result: (MOVDconst [0])
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (ANDconst [c] y:(MOVBZreg _))
+ // cond: c&0xFF == 0xFF
+ // result: y
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ y := v_0
+ if y.Op != OpPPC64MOVBZreg || !(c&0xFF == 0xFF) {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (ANDconst [0xFF] y:(MOVBreg _))
+ // result: y
+ for {
+ if auxIntToInt64(v.AuxInt) != 0xFF {
+ break
+ }
+ y := v_0
+ if y.Op != OpPPC64MOVBreg {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (ANDconst [c] y:(MOVHZreg _))
+ // cond: c&0xFFFF == 0xFFFF
+ // result: y
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ y := v_0
+ if y.Op != OpPPC64MOVHZreg || !(c&0xFFFF == 0xFFFF) {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (ANDconst [0xFFFF] y:(MOVHreg _))
+ // result: y
+ for {
+ if auxIntToInt64(v.AuxInt) != 0xFFFF {
+ break
+ }
+ y := v_0
+ if y.Op != OpPPC64MOVHreg {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (ANDconst [c] (MOVBreg x))
+ // result: (ANDconst [c&0xFF] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64MOVBreg {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpPPC64ANDconst)
+ v.AuxInt = int64ToAuxInt(c & 0xFF)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDconst [c] (MOVBZreg x))
+ // result: (ANDconst [c&0xFF] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64MOVBZreg {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpPPC64ANDconst)
+ v.AuxInt = int64ToAuxInt(c & 0xFF)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDconst [c] (MOVHreg x))
+ // result: (ANDconst [c&0xFFFF] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64MOVHreg {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpPPC64ANDconst)
+ v.AuxInt = int64ToAuxInt(c & 0xFFFF)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDconst [c] (MOVHZreg x))
+ // result: (ANDconst [c&0xFFFF] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64MOVHZreg {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpPPC64ANDconst)
+ v.AuxInt = int64ToAuxInt(c & 0xFFFF)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDconst [c] (MOVWreg x))
+ // result: (ANDconst [c&0xFFFFFFFF] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64MOVWreg {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpPPC64ANDconst)
+ v.AuxInt = int64ToAuxInt(c & 0xFFFFFFFF)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDconst [c] (MOVWZreg x))
+ // result: (ANDconst [c&0xFFFFFFFF] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64MOVWZreg {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpPPC64ANDconst)
+ v.AuxInt = int64ToAuxInt(c & 0xFFFFFFFF)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64CLRLSLDI(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (CLRLSLDI [c] (SRWconst [s] x))
+ // cond: mergePPC64ClrlsldiSrw(int64(c),s) != 0
+ // result: (RLWINM [mergePPC64ClrlsldiSrw(int64(c),s)] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpPPC64SRWconst {
+ break
+ }
+ s := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(mergePPC64ClrlsldiSrw(int64(c), s) != 0) {
+ break
+ }
+ v.reset(OpPPC64RLWINM)
+ v.AuxInt = int64ToAuxInt(mergePPC64ClrlsldiSrw(int64(c), s))
+ v.AddArg(x)
+ return true
+ }
+ // match: (CLRLSLDI [c] i:(RLWINM [s] x))
+ // cond: mergePPC64ClrlsldiRlwinm(c,s) != 0
+ // result: (RLWINM [mergePPC64ClrlsldiRlwinm(c,s)] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ i := v_0
+ if i.Op != OpPPC64RLWINM {
+ break
+ }
+ s := auxIntToInt64(i.AuxInt)
+ x := i.Args[0]
+ if !(mergePPC64ClrlsldiRlwinm(c, s) != 0) {
+ break
+ }
+ v.reset(OpPPC64RLWINM)
+ v.AuxInt = int64ToAuxInt(mergePPC64ClrlsldiRlwinm(c, s))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64CMP(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMP x (MOVDconst [c]))
+ // cond: is16Bit(c)
+ // result: (CMPconst x [c])
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(is16Bit(c)) {
+ break
+ }
+ v.reset(OpPPC64CMPconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMP (MOVDconst [c]) y)
+ // cond: is16Bit(c)
+ // result: (InvertFlags (CMPconst y [c]))
+ for {
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ y := v_1
+ if !(is16Bit(c)) {
+ break
+ }
+ v.reset(OpPPC64InvertFlags)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMP x y)
+ // cond: canonLessThan(x,y)
+ // result: (InvertFlags (CMP y x))
+ for {
+ x := v_0
+ y := v_1
+ if !(canonLessThan(x, y)) {
+ break
+ }
+ v.reset(OpPPC64InvertFlags)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMP, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64CMPU(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMPU x (MOVDconst [c]))
+ // cond: isU16Bit(c)
+ // result: (CMPUconst x [c])
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(isU16Bit(c)) {
+ break
+ }
+ v.reset(OpPPC64CMPUconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPU (MOVDconst [c]) y)
+ // cond: isU16Bit(c)
+ // result: (InvertFlags (CMPUconst y [c]))
+ for {
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ y := v_1
+ if !(isU16Bit(c)) {
+ break
+ }
+ v.reset(OpPPC64InvertFlags)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPUconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPU x y)
+ // cond: canonLessThan(x,y)
+ // result: (InvertFlags (CMPU y x))
+ for {
+ x := v_0
+ y := v_1
+ if !(canonLessThan(x, y)) {
+ break
+ }
+ v.reset(OpPPC64InvertFlags)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64CMPUconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (CMPUconst (MOVDconst [x]) [y])
+ // cond: x==y
+ // result: (FlagEQ)
+ for {
+ y := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if !(x == y) {
+ break
+ }
+ v.reset(OpPPC64FlagEQ)
+ return true
+ }
+ // match: (CMPUconst (MOVDconst [x]) [y])
+ // cond: uint64(x)<uint64(y)
+ // result: (FlagLT)
+ for {
+ y := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if !(uint64(x) < uint64(y)) {
+ break
+ }
+ v.reset(OpPPC64FlagLT)
+ return true
+ }
+ // match: (CMPUconst (MOVDconst [x]) [y])
+ // cond: uint64(x)>uint64(y)
+ // result: (FlagGT)
+ for {
+ y := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if !(uint64(x) > uint64(y)) {
+ break
+ }
+ v.reset(OpPPC64FlagGT)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64CMPW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMPW x (MOVWreg y))
+ // result: (CMPW x y)
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVWreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpPPC64CMPW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (CMPW (MOVWreg x) y)
+ // result: (CMPW x y)
+ for {
+ if v_0.Op != OpPPC64MOVWreg {
+ break
+ }
+ x := v_0.Args[0]
+ y := v_1
+ v.reset(OpPPC64CMPW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (CMPW x (MOVDconst [c]))
+ // cond: is16Bit(c)
+ // result: (CMPWconst x [int32(c)])
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(is16Bit(c)) {
+ break
+ }
+ v.reset(OpPPC64CMPWconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPW (MOVDconst [c]) y)
+ // cond: is16Bit(c)
+ // result: (InvertFlags (CMPWconst y [int32(c)]))
+ for {
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ y := v_1
+ if !(is16Bit(c)) {
+ break
+ }
+ v.reset(OpPPC64InvertFlags)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPWconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(int32(c))
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPW x y)
+ // cond: canonLessThan(x,y)
+ // result: (InvertFlags (CMPW y x))
+ for {
+ x := v_0
+ y := v_1
+ if !(canonLessThan(x, y)) {
+ break
+ }
+ v.reset(OpPPC64InvertFlags)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPW, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64CMPWU(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMPWU x (MOVWZreg y))
+ // result: (CMPWU x y)
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVWZreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpPPC64CMPWU)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (CMPWU (MOVWZreg x) y)
+ // result: (CMPWU x y)
+ for {
+ if v_0.Op != OpPPC64MOVWZreg {
+ break
+ }
+ x := v_0.Args[0]
+ y := v_1
+ v.reset(OpPPC64CMPWU)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (CMPWU x (MOVDconst [c]))
+ // cond: isU16Bit(c)
+ // result: (CMPWUconst x [int32(c)])
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(isU16Bit(c)) {
+ break
+ }
+ v.reset(OpPPC64CMPWUconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPWU (MOVDconst [c]) y)
+ // cond: isU16Bit(c)
+ // result: (InvertFlags (CMPWUconst y [int32(c)]))
+ for {
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ y := v_1
+ if !(isU16Bit(c)) {
+ break
+ }
+ v.reset(OpPPC64InvertFlags)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPWUconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(int32(c))
+ v0.AddArg(y)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPWU x y)
+ // cond: canonLessThan(x,y)
+ // result: (InvertFlags (CMPWU y x))
+ for {
+ x := v_0
+ y := v_1
+ if !(canonLessThan(x, y)) {
+ break
+ }
+ v.reset(OpPPC64InvertFlags)
+ v0 := b.NewValue0(v.Pos, OpPPC64CMPWU, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64CMPWUconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (CMPWUconst (MOVDconst [x]) [y])
+ // cond: int32(x)==int32(y)
+ // result: (FlagEQ)
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if !(int32(x) == int32(y)) {
+ break
+ }
+ v.reset(OpPPC64FlagEQ)
+ return true
+ }
+ // match: (CMPWUconst (MOVDconst [x]) [y])
+ // cond: uint32(x)<uint32(y)
+ // result: (FlagLT)
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if !(uint32(x) < uint32(y)) {
+ break
+ }
+ v.reset(OpPPC64FlagLT)
+ return true
+ }
+ // match: (CMPWUconst (MOVDconst [x]) [y])
+ // cond: uint32(x)>uint32(y)
+ // result: (FlagGT)
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if !(uint32(x) > uint32(y)) {
+ break
+ }
+ v.reset(OpPPC64FlagGT)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64CMPWconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (CMPWconst (MOVDconst [x]) [y])
+ // cond: int32(x)==int32(y)
+ // result: (FlagEQ)
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if !(int32(x) == int32(y)) {
+ break
+ }
+ v.reset(OpPPC64FlagEQ)
+ return true
+ }
+ // match: (CMPWconst (MOVDconst [x]) [y])
+ // cond: int32(x)<int32(y)
+ // result: (FlagLT)
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if !(int32(x) < int32(y)) {
+ break
+ }
+ v.reset(OpPPC64FlagLT)
+ return true
+ }
+ // match: (CMPWconst (MOVDconst [x]) [y])
+ // cond: int32(x)>int32(y)
+ // result: (FlagGT)
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if !(int32(x) > int32(y)) {
+ break
+ }
+ v.reset(OpPPC64FlagGT)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64CMPconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (CMPconst (MOVDconst [x]) [y])
+ // cond: x==y
+ // result: (FlagEQ)
+ for {
+ y := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if !(x == y) {
+ break
+ }
+ v.reset(OpPPC64FlagEQ)
+ return true
+ }
+ // match: (CMPconst (MOVDconst [x]) [y])
+ // cond: x<y
+ // result: (FlagLT)
+ for {
+ y := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if !(x < y) {
+ break
+ }
+ v.reset(OpPPC64FlagLT)
+ return true
+ }
+ // match: (CMPconst (MOVDconst [x]) [y])
+ // cond: x>y
+ // result: (FlagGT)
+ for {
+ y := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if !(x > y) {
+ break
+ }
+ v.reset(OpPPC64FlagGT)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64Equal(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Equal (FlagEQ))
+ // result: (MOVDconst [1])
+ for {
+ if v_0.Op != OpPPC64FlagEQ {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ // match: (Equal (FlagLT))
+ // result: (MOVDconst [0])
+ for {
+ if v_0.Op != OpPPC64FlagLT {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (Equal (FlagGT))
+ // result: (MOVDconst [0])
+ for {
+ if v_0.Op != OpPPC64FlagGT {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (Equal (InvertFlags x))
+ // result: (Equal x)
+ for {
+ if v_0.Op != OpPPC64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpPPC64Equal)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Equal cmp)
+ // result: (ISELB [2] (MOVDconst [1]) cmp)
+ for {
+ cmp := v_0
+ v.reset(OpPPC64ISELB)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v.AddArg2(v0, cmp)
+ return true
+ }
+}
+func rewriteValuePPC64_OpPPC64FABS(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (FABS (FMOVDconst [x]))
+ // result: (FMOVDconst [math.Abs(x)])
+ for {
+ if v_0.Op != OpPPC64FMOVDconst {
+ break
+ }
+ x := auxIntToFloat64(v_0.AuxInt)
+ v.reset(OpPPC64FMOVDconst)
+ v.AuxInt = float64ToAuxInt(math.Abs(x))
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64FADD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FADD (FMUL x y) z)
+ // result: (FMADD x y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpPPC64FMUL {
+ continue
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ z := v_1
+ v.reset(OpPPC64FMADD)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64FADDS(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FADDS (FMULS x y) z)
+ // result: (FMADDS x y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpPPC64FMULS {
+ continue
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ z := v_1
+ v.reset(OpPPC64FMADDS)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64FCEIL(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (FCEIL (FMOVDconst [x]))
+ // result: (FMOVDconst [math.Ceil(x)])
+ for {
+ if v_0.Op != OpPPC64FMOVDconst {
+ break
+ }
+ x := auxIntToFloat64(v_0.AuxInt)
+ v.reset(OpPPC64FMOVDconst)
+ v.AuxInt = float64ToAuxInt(math.Ceil(x))
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64FFLOOR(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (FFLOOR (FMOVDconst [x]))
+ // result: (FMOVDconst [math.Floor(x)])
+ for {
+ if v_0.Op != OpPPC64FMOVDconst {
+ break
+ }
+ x := auxIntToFloat64(v_0.AuxInt)
+ v.reset(OpPPC64FMOVDconst)
+ v.AuxInt = float64ToAuxInt(math.Floor(x))
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64FGreaterEqual(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (FGreaterEqual cmp)
+ // result: (ISEL [2] (MOVDconst [1]) (ISELB [1] (MOVDconst [1]) cmp) cmp)
+ for {
+ cmp := v_0
+ v.reset(OpPPC64ISEL)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v1 := b.NewValue0(v.Pos, OpPPC64ISELB, typ.Int32)
+ v1.AuxInt = int32ToAuxInt(1)
+ v1.AddArg2(v0, cmp)
+ v.AddArg3(v0, v1, cmp)
+ return true
+ }
+}
+func rewriteValuePPC64_OpPPC64FGreaterThan(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (FGreaterThan cmp)
+ // result: (ISELB [1] (MOVDconst [1]) cmp)
+ for {
+ cmp := v_0
+ v.reset(OpPPC64ISELB)
+ v.AuxInt = int32ToAuxInt(1)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v.AddArg2(v0, cmp)
+ return true
+ }
+}
+func rewriteValuePPC64_OpPPC64FLessEqual(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (FLessEqual cmp)
+ // result: (ISEL [2] (MOVDconst [1]) (ISELB [0] (MOVDconst [1]) cmp) cmp)
+ for {
+ cmp := v_0
+ v.reset(OpPPC64ISEL)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v1 := b.NewValue0(v.Pos, OpPPC64ISELB, typ.Int32)
+ v1.AuxInt = int32ToAuxInt(0)
+ v1.AddArg2(v0, cmp)
+ v.AddArg3(v0, v1, cmp)
+ return true
+ }
+}
+func rewriteValuePPC64_OpPPC64FLessThan(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (FLessThan cmp)
+ // result: (ISELB [0] (MOVDconst [1]) cmp)
+ for {
+ cmp := v_0
+ v.reset(OpPPC64ISELB)
+ v.AuxInt = int32ToAuxInt(0)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v.AddArg2(v0, cmp)
+ return true
+ }
+}
+func rewriteValuePPC64_OpPPC64FMOVDload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FMOVDload [off] {sym} ptr (MOVDstore [off] {sym} ptr x _))
+ // result: (MTVSRD x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpPPC64MOVDstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
+ break
+ }
+ x := v_1.Args[1]
+ if ptr != v_1.Args[0] {
+ break
+ }
+ v.reset(OpPPC64MTVSRD)
+ v.AddArg(x)
+ return true
+ }
+ // match: (FMOVDload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)
+ // result: (FMOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ p := v_0
+ if p.Op != OpPPC64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(p.AuxInt)
+ sym2 := auxToSym(p.Aux)
+ ptr := p.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) {
+ break
+ }
+ v.reset(OpPPC64FMOVDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (FMOVDload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond: is16Bit(int64(off1)+off2)
+ // result: (FMOVDload [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpPPC64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is16Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpPPC64FMOVDload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64FMOVDstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FMOVDstore [off] {sym} ptr (MTVSRD x) mem)
+ // result: (MOVDstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpPPC64MTVSRD {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpPPC64MOVDstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (FMOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+ // cond: is16Bit(int64(off1)+off2)
+ // result: (FMOVDstore [off1+int32(off2)] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpPPC64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is16Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpPPC64FMOVDstore)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (FMOVDstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)
+ // result: (FMOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ p := v_0
+ if p.Op != OpPPC64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(p.AuxInt)
+ sym2 := auxToSym(p.Aux)
+ ptr := p.Args[0]
+ val := v_1
+ mem := v_2
+ if !(canMergeSym(sym1, sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) {
+ break
+ }
+ v.reset(OpPPC64FMOVDstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64FMOVSload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FMOVSload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)
+ // result: (FMOVSload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ p := v_0
+ if p.Op != OpPPC64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(p.AuxInt)
+ sym2 := auxToSym(p.Aux)
+ ptr := p.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) {
+ break
+ }
+ v.reset(OpPPC64FMOVSload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (FMOVSload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond: is16Bit(int64(off1)+off2)
+ // result: (FMOVSload [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpPPC64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is16Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpPPC64FMOVSload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64FMOVSstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FMOVSstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+ // cond: is16Bit(int64(off1)+off2)
+ // result: (FMOVSstore [off1+int32(off2)] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpPPC64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is16Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpPPC64FMOVSstore)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (FMOVSstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)
+ // result: (FMOVSstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ p := v_0
+ if p.Op != OpPPC64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(p.AuxInt)
+ sym2 := auxToSym(p.Aux)
+ ptr := p.Args[0]
+ val := v_1
+ mem := v_2
+ if !(canMergeSym(sym1, sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) {
+ break
+ }
+ v.reset(OpPPC64FMOVSstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64FNEG(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (FNEG (FABS x))
+ // result: (FNABS x)
+ for {
+ if v_0.Op != OpPPC64FABS {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpPPC64FNABS)
+ v.AddArg(x)
+ return true
+ }
+ // match: (FNEG (FNABS x))
+ // result: (FABS x)
+ for {
+ if v_0.Op != OpPPC64FNABS {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpPPC64FABS)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64FSQRT(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (FSQRT (FMOVDconst [x]))
+ // cond: x >= 0
+ // result: (FMOVDconst [math.Sqrt(x)])
+ for {
+ if v_0.Op != OpPPC64FMOVDconst {
+ break
+ }
+ x := auxIntToFloat64(v_0.AuxInt)
+ if !(x >= 0) {
+ break
+ }
+ v.reset(OpPPC64FMOVDconst)
+ v.AuxInt = float64ToAuxInt(math.Sqrt(x))
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64FSUB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FSUB (FMUL x y) z)
+ // result: (FMSUB x y z)
+ for {
+ if v_0.Op != OpPPC64FMUL {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ z := v_1
+ v.reset(OpPPC64FMSUB)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64FSUBS(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FSUBS (FMULS x y) z)
+ // result: (FMSUBS x y z)
+ for {
+ if v_0.Op != OpPPC64FMULS {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ z := v_1
+ v.reset(OpPPC64FMSUBS)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64FTRUNC(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (FTRUNC (FMOVDconst [x]))
+ // result: (FMOVDconst [math.Trunc(x)])
+ for {
+ if v_0.Op != OpPPC64FMOVDconst {
+ break
+ }
+ x := auxIntToFloat64(v_0.AuxInt)
+ v.reset(OpPPC64FMOVDconst)
+ v.AuxInt = float64ToAuxInt(math.Trunc(x))
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64GreaterEqual(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (GreaterEqual (FlagEQ))
+ // result: (MOVDconst [1])
+ for {
+ if v_0.Op != OpPPC64FlagEQ {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ // match: (GreaterEqual (FlagLT))
+ // result: (MOVDconst [0])
+ for {
+ if v_0.Op != OpPPC64FlagLT {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (GreaterEqual (FlagGT))
+ // result: (MOVDconst [1])
+ for {
+ if v_0.Op != OpPPC64FlagGT {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ // match: (GreaterEqual (InvertFlags x))
+ // result: (LessEqual x)
+ for {
+ if v_0.Op != OpPPC64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpPPC64LessEqual)
+ v.AddArg(x)
+ return true
+ }
+ // match: (GreaterEqual cmp)
+ // result: (ISELB [4] (MOVDconst [1]) cmp)
+ for {
+ cmp := v_0
+ v.reset(OpPPC64ISELB)
+ v.AuxInt = int32ToAuxInt(4)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v.AddArg2(v0, cmp)
+ return true
+ }
+}
+func rewriteValuePPC64_OpPPC64GreaterThan(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (GreaterThan (FlagEQ))
+ // result: (MOVDconst [0])
+ for {
+ if v_0.Op != OpPPC64FlagEQ {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (GreaterThan (FlagLT))
+ // result: (MOVDconst [0])
+ for {
+ if v_0.Op != OpPPC64FlagLT {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (GreaterThan (FlagGT))
+ // result: (MOVDconst [1])
+ for {
+ if v_0.Op != OpPPC64FlagGT {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ // match: (GreaterThan (InvertFlags x))
+ // result: (LessThan x)
+ for {
+ if v_0.Op != OpPPC64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpPPC64LessThan)
+ v.AddArg(x)
+ return true
+ }
+ // match: (GreaterThan cmp)
+ // result: (ISELB [1] (MOVDconst [1]) cmp)
+ for {
+ cmp := v_0
+ v.reset(OpPPC64ISELB)
+ v.AuxInt = int32ToAuxInt(1)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v.AddArg2(v0, cmp)
+ return true
+ }
+}
+func rewriteValuePPC64_OpPPC64ISEL(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ISEL [0] (ANDconst [d] y) (MOVDconst [-1]) (CMPU (ANDconst [d] y) (MOVDconst [c])))
+ // cond: c >= d
+ // result: (ANDconst [d] y)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 || v_0.Op != OpPPC64ANDconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ y := v_0.Args[0]
+ if v_1.Op != OpPPC64MOVDconst || auxIntToInt64(v_1.AuxInt) != -1 || v_2.Op != OpPPC64CMPU {
+ break
+ }
+ _ = v_2.Args[1]
+ v_2_0 := v_2.Args[0]
+ if v_2_0.Op != OpPPC64ANDconst || auxIntToInt64(v_2_0.AuxInt) != d || y != v_2_0.Args[0] {
+ break
+ }
+ v_2_1 := v_2.Args[1]
+ if v_2_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_2_1.AuxInt)
+ if !(c >= d) {
+ break
+ }
+ v.reset(OpPPC64ANDconst)
+ v.AuxInt = int64ToAuxInt(d)
+ v.AddArg(y)
+ return true
+ }
+ // match: (ISEL [0] (ANDconst [d] y) (MOVDconst [-1]) (CMPUconst [c] (ANDconst [d] y)))
+ // cond: c >= d
+ // result: (ANDconst [d] y)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 || v_0.Op != OpPPC64ANDconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ y := v_0.Args[0]
+ if v_1.Op != OpPPC64MOVDconst || auxIntToInt64(v_1.AuxInt) != -1 || v_2.Op != OpPPC64CMPUconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ v_2_0 := v_2.Args[0]
+ if v_2_0.Op != OpPPC64ANDconst || auxIntToInt64(v_2_0.AuxInt) != d || y != v_2_0.Args[0] || !(c >= d) {
+ break
+ }
+ v.reset(OpPPC64ANDconst)
+ v.AuxInt = int64ToAuxInt(d)
+ v.AddArg(y)
+ return true
+ }
+ // match: (ISEL [6] x y (Select1 (ANDCCconst [1] (ISELB [c] one cmp))))
+ // result: (ISEL [c] x y cmp)
+ for {
+ if auxIntToInt32(v.AuxInt) != 6 {
+ break
+ }
+ x := v_0
+ y := v_1
+ if v_2.Op != OpSelect1 {
+ break
+ }
+ v_2_0 := v_2.Args[0]
+ if v_2_0.Op != OpPPC64ANDCCconst || auxIntToInt64(v_2_0.AuxInt) != 1 {
+ break
+ }
+ v_2_0_0 := v_2_0.Args[0]
+ if v_2_0_0.Op != OpPPC64ISELB {
+ break
+ }
+ c := auxIntToInt32(v_2_0_0.AuxInt)
+ cmp := v_2_0_0.Args[1]
+ v.reset(OpPPC64ISEL)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg3(x, y, cmp)
+ return true
+ }
+ // match: (ISEL [2] x _ (FlagEQ))
+ // result: x
+ for {
+ if auxIntToInt32(v.AuxInt) != 2 {
+ break
+ }
+ x := v_0
+ if v_2.Op != OpPPC64FlagEQ {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (ISEL [2] _ y (FlagLT))
+ // result: y
+ for {
+ if auxIntToInt32(v.AuxInt) != 2 {
+ break
+ }
+ y := v_1
+ if v_2.Op != OpPPC64FlagLT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (ISEL [2] _ y (FlagGT))
+ // result: y
+ for {
+ if auxIntToInt32(v.AuxInt) != 2 {
+ break
+ }
+ y := v_1
+ if v_2.Op != OpPPC64FlagGT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (ISEL [6] _ y (FlagEQ))
+ // result: y
+ for {
+ if auxIntToInt32(v.AuxInt) != 6 {
+ break
+ }
+ y := v_1
+ if v_2.Op != OpPPC64FlagEQ {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (ISEL [6] x _ (FlagLT))
+ // result: x
+ for {
+ if auxIntToInt32(v.AuxInt) != 6 {
+ break
+ }
+ x := v_0
+ if v_2.Op != OpPPC64FlagLT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (ISEL [6] x _ (FlagGT))
+ // result: x
+ for {
+ if auxIntToInt32(v.AuxInt) != 6 {
+ break
+ }
+ x := v_0
+ if v_2.Op != OpPPC64FlagGT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (ISEL [0] _ y (FlagEQ))
+ // result: y
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ y := v_1
+ if v_2.Op != OpPPC64FlagEQ {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (ISEL [0] _ y (FlagGT))
+ // result: y
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ y := v_1
+ if v_2.Op != OpPPC64FlagGT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (ISEL [0] x _ (FlagLT))
+ // result: x
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ if v_2.Op != OpPPC64FlagLT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (ISEL [5] _ x (FlagEQ))
+ // result: x
+ for {
+ if auxIntToInt32(v.AuxInt) != 5 {
+ break
+ }
+ x := v_1
+ if v_2.Op != OpPPC64FlagEQ {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (ISEL [5] _ x (FlagLT))
+ // result: x
+ for {
+ if auxIntToInt32(v.AuxInt) != 5 {
+ break
+ }
+ x := v_1
+ if v_2.Op != OpPPC64FlagLT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (ISEL [5] y _ (FlagGT))
+ // result: y
+ for {
+ if auxIntToInt32(v.AuxInt) != 5 {
+ break
+ }
+ y := v_0
+ if v_2.Op != OpPPC64FlagGT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (ISEL [1] _ y (FlagEQ))
+ // result: y
+ for {
+ if auxIntToInt32(v.AuxInt) != 1 {
+ break
+ }
+ y := v_1
+ if v_2.Op != OpPPC64FlagEQ {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (ISEL [1] _ y (FlagLT))
+ // result: y
+ for {
+ if auxIntToInt32(v.AuxInt) != 1 {
+ break
+ }
+ y := v_1
+ if v_2.Op != OpPPC64FlagLT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (ISEL [1] x _ (FlagGT))
+ // result: x
+ for {
+ if auxIntToInt32(v.AuxInt) != 1 {
+ break
+ }
+ x := v_0
+ if v_2.Op != OpPPC64FlagGT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (ISEL [4] x _ (FlagEQ))
+ // result: x
+ for {
+ if auxIntToInt32(v.AuxInt) != 4 {
+ break
+ }
+ x := v_0
+ if v_2.Op != OpPPC64FlagEQ {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (ISEL [4] x _ (FlagGT))
+ // result: x
+ for {
+ if auxIntToInt32(v.AuxInt) != 4 {
+ break
+ }
+ x := v_0
+ if v_2.Op != OpPPC64FlagGT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (ISEL [4] _ y (FlagLT))
+ // result: y
+ for {
+ if auxIntToInt32(v.AuxInt) != 4 {
+ break
+ }
+ y := v_1
+ if v_2.Op != OpPPC64FlagLT {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (ISEL [n] x y (InvertFlags bool))
+ // cond: n%4 == 0
+ // result: (ISEL [n+1] x y bool)
+ for {
+ n := auxIntToInt32(v.AuxInt)
+ x := v_0
+ y := v_1
+ if v_2.Op != OpPPC64InvertFlags {
+ break
+ }
+ bool := v_2.Args[0]
+ if !(n%4 == 0) {
+ break
+ }
+ v.reset(OpPPC64ISEL)
+ v.AuxInt = int32ToAuxInt(n + 1)
+ v.AddArg3(x, y, bool)
+ return true
+ }
+ // match: (ISEL [n] x y (InvertFlags bool))
+ // cond: n%4 == 1
+ // result: (ISEL [n-1] x y bool)
+ for {
+ n := auxIntToInt32(v.AuxInt)
+ x := v_0
+ y := v_1
+ if v_2.Op != OpPPC64InvertFlags {
+ break
+ }
+ bool := v_2.Args[0]
+ if !(n%4 == 1) {
+ break
+ }
+ v.reset(OpPPC64ISEL)
+ v.AuxInt = int32ToAuxInt(n - 1)
+ v.AddArg3(x, y, bool)
+ return true
+ }
+ // match: (ISEL [n] x y (InvertFlags bool))
+ // cond: n%4 == 2
+ // result: (ISEL [n] x y bool)
+ for {
+ n := auxIntToInt32(v.AuxInt)
+ x := v_0
+ y := v_1
+ if v_2.Op != OpPPC64InvertFlags {
+ break
+ }
+ bool := v_2.Args[0]
+ if !(n%4 == 2) {
+ break
+ }
+ v.reset(OpPPC64ISEL)
+ v.AuxInt = int32ToAuxInt(n)
+ v.AddArg3(x, y, bool)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64ISELB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (ISELB [0] _ (FlagLT))
+ // result: (MOVDconst [1])
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 || v_1.Op != OpPPC64FlagLT {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ // match: (ISELB [0] _ (FlagGT))
+ // result: (MOVDconst [0])
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 || v_1.Op != OpPPC64FlagGT {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (ISELB [0] _ (FlagEQ))
+ // result: (MOVDconst [0])
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 || v_1.Op != OpPPC64FlagEQ {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (ISELB [1] _ (FlagGT))
+ // result: (MOVDconst [1])
+ for {
+ if auxIntToInt32(v.AuxInt) != 1 || v_1.Op != OpPPC64FlagGT {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ // match: (ISELB [1] _ (FlagLT))
+ // result: (MOVDconst [0])
+ for {
+ if auxIntToInt32(v.AuxInt) != 1 || v_1.Op != OpPPC64FlagLT {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (ISELB [1] _ (FlagEQ))
+ // result: (MOVDconst [0])
+ for {
+ if auxIntToInt32(v.AuxInt) != 1 || v_1.Op != OpPPC64FlagEQ {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (ISELB [2] _ (FlagEQ))
+ // result: (MOVDconst [1])
+ for {
+ if auxIntToInt32(v.AuxInt) != 2 || v_1.Op != OpPPC64FlagEQ {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ // match: (ISELB [2] _ (FlagLT))
+ // result: (MOVDconst [0])
+ for {
+ if auxIntToInt32(v.AuxInt) != 2 || v_1.Op != OpPPC64FlagLT {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (ISELB [2] _ (FlagGT))
+ // result: (MOVDconst [0])
+ for {
+ if auxIntToInt32(v.AuxInt) != 2 || v_1.Op != OpPPC64FlagGT {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (ISELB [4] _ (FlagLT))
+ // result: (MOVDconst [0])
+ for {
+ if auxIntToInt32(v.AuxInt) != 4 || v_1.Op != OpPPC64FlagLT {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (ISELB [4] _ (FlagGT))
+ // result: (MOVDconst [1])
+ for {
+ if auxIntToInt32(v.AuxInt) != 4 || v_1.Op != OpPPC64FlagGT {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ // match: (ISELB [4] _ (FlagEQ))
+ // result: (MOVDconst [1])
+ for {
+ if auxIntToInt32(v.AuxInt) != 4 || v_1.Op != OpPPC64FlagEQ {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ // match: (ISELB [5] _ (FlagGT))
+ // result: (MOVDconst [0])
+ for {
+ if auxIntToInt32(v.AuxInt) != 5 || v_1.Op != OpPPC64FlagGT {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (ISELB [5] _ (FlagLT))
+ // result: (MOVDconst [1])
+ for {
+ if auxIntToInt32(v.AuxInt) != 5 || v_1.Op != OpPPC64FlagLT {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ // match: (ISELB [5] _ (FlagEQ))
+ // result: (MOVDconst [1])
+ for {
+ if auxIntToInt32(v.AuxInt) != 5 || v_1.Op != OpPPC64FlagEQ {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ // match: (ISELB [6] _ (FlagEQ))
+ // result: (MOVDconst [0])
+ for {
+ if auxIntToInt32(v.AuxInt) != 6 || v_1.Op != OpPPC64FlagEQ {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (ISELB [6] _ (FlagLT))
+ // result: (MOVDconst [1])
+ for {
+ if auxIntToInt32(v.AuxInt) != 6 || v_1.Op != OpPPC64FlagLT {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ // match: (ISELB [6] _ (FlagGT))
+ // result: (MOVDconst [1])
+ for {
+ if auxIntToInt32(v.AuxInt) != 6 || v_1.Op != OpPPC64FlagGT {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ // match: (ISELB [n] (MOVDconst [1]) (InvertFlags bool))
+ // cond: n%4 == 0
+ // result: (ISELB [n+1] (MOVDconst [1]) bool)
+ for {
+ n := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpPPC64MOVDconst || auxIntToInt64(v_0.AuxInt) != 1 || v_1.Op != OpPPC64InvertFlags {
+ break
+ }
+ bool := v_1.Args[0]
+ if !(n%4 == 0) {
+ break
+ }
+ v.reset(OpPPC64ISELB)
+ v.AuxInt = int32ToAuxInt(n + 1)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v.AddArg2(v0, bool)
+ return true
+ }
+ // match: (ISELB [n] (MOVDconst [1]) (InvertFlags bool))
+ // cond: n%4 == 1
+ // result: (ISELB [n-1] (MOVDconst [1]) bool)
+ for {
+ n := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpPPC64MOVDconst || auxIntToInt64(v_0.AuxInt) != 1 || v_1.Op != OpPPC64InvertFlags {
+ break
+ }
+ bool := v_1.Args[0]
+ if !(n%4 == 1) {
+ break
+ }
+ v.reset(OpPPC64ISELB)
+ v.AuxInt = int32ToAuxInt(n - 1)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v.AddArg2(v0, bool)
+ return true
+ }
+ // match: (ISELB [n] (MOVDconst [1]) (InvertFlags bool))
+ // cond: n%4 == 2
+ // result: (ISELB [n] (MOVDconst [1]) bool)
+ for {
+ n := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpPPC64MOVDconst || auxIntToInt64(v_0.AuxInt) != 1 || v_1.Op != OpPPC64InvertFlags {
+ break
+ }
+ bool := v_1.Args[0]
+ if !(n%4 == 2) {
+ break
+ }
+ v.reset(OpPPC64ISELB)
+ v.AuxInt = int32ToAuxInt(n)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v.AddArg2(v0, bool)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64LessEqual(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (LessEqual (FlagEQ))
+ // result: (MOVDconst [1])
+ for {
+ if v_0.Op != OpPPC64FlagEQ {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ // match: (LessEqual (FlagLT))
+ // result: (MOVDconst [1])
+ for {
+ if v_0.Op != OpPPC64FlagLT {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ // match: (LessEqual (FlagGT))
+ // result: (MOVDconst [0])
+ for {
+ if v_0.Op != OpPPC64FlagGT {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (LessEqual (InvertFlags x))
+ // result: (GreaterEqual x)
+ for {
+ if v_0.Op != OpPPC64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpPPC64GreaterEqual)
+ v.AddArg(x)
+ return true
+ }
+ // match: (LessEqual cmp)
+ // result: (ISELB [5] (MOVDconst [1]) cmp)
+ for {
+ cmp := v_0
+ v.reset(OpPPC64ISELB)
+ v.AuxInt = int32ToAuxInt(5)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v.AddArg2(v0, cmp)
+ return true
+ }
+}
+func rewriteValuePPC64_OpPPC64LessThan(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (LessThan (FlagEQ))
+ // result: (MOVDconst [0])
+ for {
+ if v_0.Op != OpPPC64FlagEQ {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (LessThan (FlagLT))
+ // result: (MOVDconst [1])
+ for {
+ if v_0.Op != OpPPC64FlagLT {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ // match: (LessThan (FlagGT))
+ // result: (MOVDconst [0])
+ for {
+ if v_0.Op != OpPPC64FlagGT {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (LessThan (InvertFlags x))
+ // result: (GreaterThan x)
+ for {
+ if v_0.Op != OpPPC64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpPPC64GreaterThan)
+ v.AddArg(x)
+ return true
+ }
+ // match: (LessThan cmp)
+ // result: (ISELB [0] (MOVDconst [1]) cmp)
+ for {
+ cmp := v_0
+ v.reset(OpPPC64ISELB)
+ v.AuxInt = int32ToAuxInt(0)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v.AddArg2(v0, cmp)
+ return true
+ }
+}
+func rewriteValuePPC64_OpPPC64MFVSRD(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (MFVSRD (FMOVDconst [c]))
+ // result: (MOVDconst [int64(math.Float64bits(c))])
+ for {
+ if v_0.Op != OpPPC64FMOVDconst {
+ break
+ }
+ c := auxIntToFloat64(v_0.AuxInt)
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(math.Float64bits(c)))
+ return true
+ }
+ // match: (MFVSRD x:(FMOVDload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVDload [off] {sym} ptr mem)
+ for {
+ x := v_0
+ if x.Op != OpPPC64FMOVDload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpPPC64MOVDload, typ.Int64)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVBZload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVBZload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)
+ // result: (MOVBZload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ p := v_0
+ if p.Op != OpPPC64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(p.AuxInt)
+ sym2 := auxToSym(p.Aux)
+ ptr := p.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) {
+ break
+ }
+ v.reset(OpPPC64MOVBZload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBZload [off1] {sym} (ADDconst [off2] x) mem)
+ // cond: is16Bit(int64(off1)+off2)
+ // result: (MOVBZload [off1+int32(off2)] {sym} x mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpPPC64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ mem := v_1
+ if !(is16Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpPPC64MOVBZload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(x, mem)
+ return true
+ }
+ // match: (MOVBZload [0] {sym} p:(ADD ptr idx) mem)
+ // cond: sym == nil && p.Uses == 1
+ // result: (MOVBZloadidx ptr idx mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ sym := auxToSym(v.Aux)
+ p := v_0
+ if p.Op != OpPPC64ADD {
+ break
+ }
+ idx := p.Args[1]
+ ptr := p.Args[0]
+ mem := v_1
+ if !(sym == nil && p.Uses == 1) {
+ break
+ }
+ v.reset(OpPPC64MOVBZloadidx)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVBZloadidx(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVBZloadidx ptr (MOVDconst [c]) mem)
+ // cond: is16Bit(c)
+ // result: (MOVBZload [int32(c)] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(is16Bit(c)) {
+ break
+ }
+ v.reset(OpPPC64MOVBZload)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBZloadidx (MOVDconst [c]) ptr mem)
+ // cond: is16Bit(c)
+ // result: (MOVBZload [int32(c)] ptr mem)
+ for {
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ ptr := v_1
+ mem := v_2
+ if !(is16Bit(c)) {
+ break
+ }
+ v.reset(OpPPC64MOVBZload)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVBZreg(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (MOVBZreg y:(ANDconst [c] _))
+ // cond: uint64(c) <= 0xFF
+ // result: y
+ for {
+ y := v_0
+ if y.Op != OpPPC64ANDconst {
+ break
+ }
+ c := auxIntToInt64(y.AuxInt)
+ if !(uint64(c) <= 0xFF) {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (MOVBZreg (SRWconst [c] (MOVBZreg x)))
+ // result: (SRWconst [c] (MOVBZreg x))
+ for {
+ if v_0.Op != OpPPC64SRWconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64MOVBZreg {
+ break
+ }
+ x := v_0_0.Args[0]
+ v.reset(OpPPC64SRWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVBZreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MOVBZreg (SRWconst [c] x))
+ // cond: sizeof(x.Type) == 8
+ // result: (SRWconst [c] x)
+ for {
+ if v_0.Op != OpPPC64SRWconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(sizeof(x.Type) == 8) {
+ break
+ }
+ v.reset(OpPPC64SRWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBZreg (SRDconst [c] x))
+ // cond: c>=56
+ // result: (SRDconst [c] x)
+ for {
+ if v_0.Op != OpPPC64SRDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(c >= 56) {
+ break
+ }
+ v.reset(OpPPC64SRDconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBZreg (SRWconst [c] x))
+ // cond: c>=24
+ // result: (SRWconst [c] x)
+ for {
+ if v_0.Op != OpPPC64SRWconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(c >= 24) {
+ break
+ }
+ v.reset(OpPPC64SRWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBZreg y:(MOVBZreg _))
+ // result: y
+ for {
+ y := v_0
+ if y.Op != OpPPC64MOVBZreg {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (MOVBZreg (MOVBreg x))
+ // result: (MOVBZreg x)
+ for {
+ if v_0.Op != OpPPC64MOVBreg {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpPPC64MOVBZreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBZreg (OR <t> x (MOVWZreg y)))
+ // result: (MOVBZreg (OR <t> x y))
+ for {
+ if v_0.Op != OpPPC64OR {
+ break
+ }
+ t := v_0.Type
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpPPC64MOVWZreg {
+ continue
+ }
+ y := v_0_1.Args[0]
+ v.reset(OpPPC64MOVBZreg)
+ v0 := b.NewValue0(v.Pos, OpPPC64OR, t)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MOVBZreg (XOR <t> x (MOVWZreg y)))
+ // result: (MOVBZreg (XOR <t> x y))
+ for {
+ if v_0.Op != OpPPC64XOR {
+ break
+ }
+ t := v_0.Type
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpPPC64MOVWZreg {
+ continue
+ }
+ y := v_0_1.Args[0]
+ v.reset(OpPPC64MOVBZreg)
+ v0 := b.NewValue0(v.Pos, OpPPC64XOR, t)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MOVBZreg (AND <t> x (MOVWZreg y)))
+ // result: (MOVBZreg (AND <t> x y))
+ for {
+ if v_0.Op != OpPPC64AND {
+ break
+ }
+ t := v_0.Type
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpPPC64MOVWZreg {
+ continue
+ }
+ y := v_0_1.Args[0]
+ v.reset(OpPPC64MOVBZreg)
+ v0 := b.NewValue0(v.Pos, OpPPC64AND, t)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MOVBZreg (OR <t> x (MOVHZreg y)))
+ // result: (MOVBZreg (OR <t> x y))
+ for {
+ if v_0.Op != OpPPC64OR {
+ break
+ }
+ t := v_0.Type
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpPPC64MOVHZreg {
+ continue
+ }
+ y := v_0_1.Args[0]
+ v.reset(OpPPC64MOVBZreg)
+ v0 := b.NewValue0(v.Pos, OpPPC64OR, t)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MOVBZreg (XOR <t> x (MOVHZreg y)))
+ // result: (MOVBZreg (XOR <t> x y))
+ for {
+ if v_0.Op != OpPPC64XOR {
+ break
+ }
+ t := v_0.Type
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpPPC64MOVHZreg {
+ continue
+ }
+ y := v_0_1.Args[0]
+ v.reset(OpPPC64MOVBZreg)
+ v0 := b.NewValue0(v.Pos, OpPPC64XOR, t)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MOVBZreg (AND <t> x (MOVHZreg y)))
+ // result: (MOVBZreg (AND <t> x y))
+ for {
+ if v_0.Op != OpPPC64AND {
+ break
+ }
+ t := v_0.Type
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpPPC64MOVHZreg {
+ continue
+ }
+ y := v_0_1.Args[0]
+ v.reset(OpPPC64MOVBZreg)
+ v0 := b.NewValue0(v.Pos, OpPPC64AND, t)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MOVBZreg (OR <t> x (MOVBZreg y)))
+ // result: (MOVBZreg (OR <t> x y))
+ for {
+ if v_0.Op != OpPPC64OR {
+ break
+ }
+ t := v_0.Type
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpPPC64MOVBZreg {
+ continue
+ }
+ y := v_0_1.Args[0]
+ v.reset(OpPPC64MOVBZreg)
+ v0 := b.NewValue0(v.Pos, OpPPC64OR, t)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MOVBZreg (XOR <t> x (MOVBZreg y)))
+ // result: (MOVBZreg (XOR <t> x y))
+ for {
+ if v_0.Op != OpPPC64XOR {
+ break
+ }
+ t := v_0.Type
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpPPC64MOVBZreg {
+ continue
+ }
+ y := v_0_1.Args[0]
+ v.reset(OpPPC64MOVBZreg)
+ v0 := b.NewValue0(v.Pos, OpPPC64XOR, t)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MOVBZreg (AND <t> x (MOVBZreg y)))
+ // result: (MOVBZreg (AND <t> x y))
+ for {
+ if v_0.Op != OpPPC64AND {
+ break
+ }
+ t := v_0.Type
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpPPC64MOVBZreg {
+ continue
+ }
+ y := v_0_1.Args[0]
+ v.reset(OpPPC64MOVBZreg)
+ v0 := b.NewValue0(v.Pos, OpPPC64AND, t)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MOVBZreg z:(ANDconst [c] (MOVBZload ptr x)))
+ // result: z
+ for {
+ z := v_0
+ if z.Op != OpPPC64ANDconst {
+ break
+ }
+ z_0 := z.Args[0]
+ if z_0.Op != OpPPC64MOVBZload {
+ break
+ }
+ v.copyOf(z)
+ return true
+ }
+ // match: (MOVBZreg z:(AND y (MOVBZload ptr x)))
+ // result: z
+ for {
+ z := v_0
+ if z.Op != OpPPC64AND {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ if z_1.Op != OpPPC64MOVBZload {
+ continue
+ }
+ v.copyOf(z)
+ return true
+ }
+ break
+ }
+ // match: (MOVBZreg x:(MOVBZload _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpPPC64MOVBZload {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVBZreg x:(MOVBZloadidx _ _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpPPC64MOVBZloadidx {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVBZreg x:(Select0 (LoweredAtomicLoad8 _ _)))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpSelect0 {
+ break
+ }
+ x_0 := x.Args[0]
+ if x_0.Op != OpPPC64LoweredAtomicLoad8 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVBZreg x:(Arg <t>))
+ // cond: is8BitInt(t) && !isSigned(t)
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpArg {
+ break
+ }
+ t := x.Type
+ if !(is8BitInt(t) && !isSigned(t)) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVBZreg (MOVDconst [c]))
+ // result: (MOVDconst [int64(uint8(c))])
+ for {
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(uint8(c)))
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVBreg(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (MOVBreg y:(ANDconst [c] _))
+ // cond: uint64(c) <= 0x7F
+ // result: y
+ for {
+ y := v_0
+ if y.Op != OpPPC64ANDconst {
+ break
+ }
+ c := auxIntToInt64(y.AuxInt)
+ if !(uint64(c) <= 0x7F) {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (MOVBreg (SRAWconst [c] (MOVBreg x)))
+ // result: (SRAWconst [c] (MOVBreg x))
+ for {
+ if v_0.Op != OpPPC64SRAWconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64MOVBreg {
+ break
+ }
+ x := v_0_0.Args[0]
+ v.reset(OpPPC64SRAWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVBreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MOVBreg (SRAWconst [c] x))
+ // cond: sizeof(x.Type) == 8
+ // result: (SRAWconst [c] x)
+ for {
+ if v_0.Op != OpPPC64SRAWconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(sizeof(x.Type) == 8) {
+ break
+ }
+ v.reset(OpPPC64SRAWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBreg (SRDconst [c] x))
+ // cond: c>56
+ // result: (SRDconst [c] x)
+ for {
+ if v_0.Op != OpPPC64SRDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(c > 56) {
+ break
+ }
+ v.reset(OpPPC64SRDconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBreg (SRDconst [c] x))
+ // cond: c==56
+ // result: (SRADconst [c] x)
+ for {
+ if v_0.Op != OpPPC64SRDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(c == 56) {
+ break
+ }
+ v.reset(OpPPC64SRADconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBreg (SRADconst [c] x))
+ // cond: c>=56
+ // result: (SRADconst [c] x)
+ for {
+ if v_0.Op != OpPPC64SRADconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(c >= 56) {
+ break
+ }
+ v.reset(OpPPC64SRADconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBreg (SRWconst [c] x))
+ // cond: c>24
+ // result: (SRWconst [c] x)
+ for {
+ if v_0.Op != OpPPC64SRWconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(c > 24) {
+ break
+ }
+ v.reset(OpPPC64SRWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBreg (SRWconst [c] x))
+ // cond: c==24
+ // result: (SRAWconst [c] x)
+ for {
+ if v_0.Op != OpPPC64SRWconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(c == 24) {
+ break
+ }
+ v.reset(OpPPC64SRAWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBreg (SRAWconst [c] x))
+ // cond: c>=24
+ // result: (SRAWconst [c] x)
+ for {
+ if v_0.Op != OpPPC64SRAWconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(c >= 24) {
+ break
+ }
+ v.reset(OpPPC64SRAWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBreg y:(MOVBreg _))
+ // result: y
+ for {
+ y := v_0
+ if y.Op != OpPPC64MOVBreg {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (MOVBreg (MOVBZreg x))
+ // result: (MOVBreg x)
+ for {
+ if v_0.Op != OpPPC64MOVBZreg {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpPPC64MOVBreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBreg x:(Arg <t>))
+ // cond: is8BitInt(t) && isSigned(t)
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpArg {
+ break
+ }
+ t := x.Type
+ if !(is8BitInt(t) && isSigned(t)) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVBreg (MOVDconst [c]))
+ // result: (MOVDconst [int64(int8(c))])
+ for {
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(int8(c)))
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVBstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (MOVBstore [off1] {sym} (ADDconst [off2] x) val mem)
+ // cond: is16Bit(int64(off1)+off2)
+ // result: (MOVBstore [off1+int32(off2)] {sym} x val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpPPC64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is16Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpPPC64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, val, mem)
+ return true
+ }
+ // match: (MOVBstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)
+ // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ p := v_0
+ if p.Op != OpPPC64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(p.AuxInt)
+ sym2 := auxToSym(p.Aux)
+ ptr := p.Args[0]
+ val := v_1
+ mem := v_2
+ if !(canMergeSym(sym1, sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) {
+ break
+ }
+ v.reset(OpPPC64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVDconst [0]) mem)
+ // result: (MOVBstorezero [off] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpPPC64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ mem := v_2
+ v.reset(OpPPC64MOVBstorezero)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBstore [0] {sym} p:(ADD ptr idx) val mem)
+ // cond: sym == nil && p.Uses == 1
+ // result: (MOVBstoreidx ptr idx val mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ sym := auxToSym(v.Aux)
+ p := v_0
+ if p.Op != OpPPC64ADD {
+ break
+ }
+ idx := p.Args[1]
+ ptr := p.Args[0]
+ val := v_1
+ mem := v_2
+ if !(sym == nil && p.Uses == 1) {
+ break
+ }
+ v.reset(OpPPC64MOVBstoreidx)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVBreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpPPC64MOVBreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpPPC64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVBZreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpPPC64MOVBZreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpPPC64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVHreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpPPC64MOVHreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpPPC64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVHZreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpPPC64MOVHZreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpPPC64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVWreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpPPC64MOVWreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpPPC64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVWZreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpPPC64MOVWZreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpPPC64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (SRWconst (MOVHreg x) [c]) mem)
+ // cond: c <= 8
+ // result: (MOVBstore [off] {sym} ptr (SRWconst <typ.UInt32> x [c]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpPPC64SRWconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpPPC64MOVHreg {
+ break
+ }
+ x := v_1_0.Args[0]
+ mem := v_2
+ if !(c <= 8) {
+ break
+ }
+ v.reset(OpPPC64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpPPC64SRWconst, typ.UInt32)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(x)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (SRWconst (MOVHZreg x) [c]) mem)
+ // cond: c <= 8
+ // result: (MOVBstore [off] {sym} ptr (SRWconst <typ.UInt32> x [c]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpPPC64SRWconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpPPC64MOVHZreg {
+ break
+ }
+ x := v_1_0.Args[0]
+ mem := v_2
+ if !(c <= 8) {
+ break
+ }
+ v.reset(OpPPC64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpPPC64SRWconst, typ.UInt32)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(x)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (SRWconst (MOVWreg x) [c]) mem)
+ // cond: c <= 24
+ // result: (MOVBstore [off] {sym} ptr (SRWconst <typ.UInt32> x [c]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpPPC64SRWconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpPPC64MOVWreg {
+ break
+ }
+ x := v_1_0.Args[0]
+ mem := v_2
+ if !(c <= 24) {
+ break
+ }
+ v.reset(OpPPC64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpPPC64SRWconst, typ.UInt32)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(x)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (SRWconst (MOVWZreg x) [c]) mem)
+ // cond: c <= 24
+ // result: (MOVBstore [off] {sym} ptr (SRWconst <typ.UInt32> x [c]) mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpPPC64SRWconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpPPC64MOVWZreg {
+ break
+ }
+ x := v_1_0.Args[0]
+ mem := v_2
+ if !(c <= 24) {
+ break
+ }
+ v.reset(OpPPC64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v0 := b.NewValue0(v.Pos, OpPPC64SRWconst, typ.UInt32)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(x)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (MOVBstore [i1] {s} p (SRWconst w [24]) x0:(MOVBstore [i0] {s} p (SRWconst w [16]) mem))
+ // cond: !config.BigEndian && x0.Uses == 1 && i1 == i0+1 && clobber(x0)
+ // result: (MOVHstore [i0] {s} p (SRWconst <typ.UInt16> w [16]) mem)
+ for {
+ i1 := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ if v_1.Op != OpPPC64SRWconst || auxIntToInt64(v_1.AuxInt) != 24 {
+ break
+ }
+ w := v_1.Args[0]
+ x0 := v_2
+ if x0.Op != OpPPC64MOVBstore {
+ break
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ if auxToSym(x0.Aux) != s {
+ break
+ }
+ mem := x0.Args[2]
+ if p != x0.Args[0] {
+ break
+ }
+ x0_1 := x0.Args[1]
+ if x0_1.Op != OpPPC64SRWconst || auxIntToInt64(x0_1.AuxInt) != 16 || w != x0_1.Args[0] || !(!config.BigEndian && x0.Uses == 1 && i1 == i0+1 && clobber(x0)) {
+ break
+ }
+ v.reset(OpPPC64MOVHstore)
+ v.AuxInt = int32ToAuxInt(i0)
+ v.Aux = symToAux(s)
+ v0 := b.NewValue0(x0.Pos, OpPPC64SRWconst, typ.UInt16)
+ v0.AuxInt = int64ToAuxInt(16)
+ v0.AddArg(w)
+ v.AddArg3(p, v0, mem)
+ return true
+ }
+ // match: (MOVBstore [i1] {s} p (SRDconst w [24]) x0:(MOVBstore [i0] {s} p (SRDconst w [16]) mem))
+ // cond: !config.BigEndian && x0.Uses == 1 && i1 == i0+1 && clobber(x0)
+ // result: (MOVHstore [i0] {s} p (SRWconst <typ.UInt16> w [16]) mem)
+ for {
+ i1 := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ if v_1.Op != OpPPC64SRDconst || auxIntToInt64(v_1.AuxInt) != 24 {
+ break
+ }
+ w := v_1.Args[0]
+ x0 := v_2
+ if x0.Op != OpPPC64MOVBstore {
+ break
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ if auxToSym(x0.Aux) != s {
+ break
+ }
+ mem := x0.Args[2]
+ if p != x0.Args[0] {
+ break
+ }
+ x0_1 := x0.Args[1]
+ if x0_1.Op != OpPPC64SRDconst || auxIntToInt64(x0_1.AuxInt) != 16 || w != x0_1.Args[0] || !(!config.BigEndian && x0.Uses == 1 && i1 == i0+1 && clobber(x0)) {
+ break
+ }
+ v.reset(OpPPC64MOVHstore)
+ v.AuxInt = int32ToAuxInt(i0)
+ v.Aux = symToAux(s)
+ v0 := b.NewValue0(x0.Pos, OpPPC64SRWconst, typ.UInt16)
+ v0.AuxInt = int64ToAuxInt(16)
+ v0.AddArg(w)
+ v.AddArg3(p, v0, mem)
+ return true
+ }
+ // match: (MOVBstore [i1] {s} p (SRWconst w [8]) x0:(MOVBstore [i0] {s} p w mem))
+ // cond: !config.BigEndian && x0.Uses == 1 && i1 == i0+1 && clobber(x0)
+ // result: (MOVHstore [i0] {s} p w mem)
+ for {
+ i1 := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ if v_1.Op != OpPPC64SRWconst || auxIntToInt64(v_1.AuxInt) != 8 {
+ break
+ }
+ w := v_1.Args[0]
+ x0 := v_2
+ if x0.Op != OpPPC64MOVBstore {
+ break
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ if auxToSym(x0.Aux) != s {
+ break
+ }
+ mem := x0.Args[2]
+ if p != x0.Args[0] || w != x0.Args[1] || !(!config.BigEndian && x0.Uses == 1 && i1 == i0+1 && clobber(x0)) {
+ break
+ }
+ v.reset(OpPPC64MOVHstore)
+ v.AuxInt = int32ToAuxInt(i0)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, w, mem)
+ return true
+ }
+ // match: (MOVBstore [i1] {s} p (SRDconst w [8]) x0:(MOVBstore [i0] {s} p w mem))
+ // cond: !config.BigEndian && x0.Uses == 1 && i1 == i0+1 && clobber(x0)
+ // result: (MOVHstore [i0] {s} p w mem)
+ for {
+ i1 := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ if v_1.Op != OpPPC64SRDconst || auxIntToInt64(v_1.AuxInt) != 8 {
+ break
+ }
+ w := v_1.Args[0]
+ x0 := v_2
+ if x0.Op != OpPPC64MOVBstore {
+ break
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ if auxToSym(x0.Aux) != s {
+ break
+ }
+ mem := x0.Args[2]
+ if p != x0.Args[0] || w != x0.Args[1] || !(!config.BigEndian && x0.Uses == 1 && i1 == i0+1 && clobber(x0)) {
+ break
+ }
+ v.reset(OpPPC64MOVHstore)
+ v.AuxInt = int32ToAuxInt(i0)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, w, mem)
+ return true
+ }
+ // match: (MOVBstore [i3] {s} p w x0:(MOVBstore [i2] {s} p (SRWconst w [8]) x1:(MOVBstore [i1] {s} p (SRWconst w [16]) x2:(MOVBstore [i0] {s} p (SRWconst w [24]) mem))))
+ // cond: !config.BigEndian && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && clobber(x0, x1, x2)
+ // result: (MOVWBRstore (MOVDaddr <typ.Uintptr> [i0] {s} p) w mem)
+ for {
+ i3 := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ w := v_1
+ x0 := v_2
+ if x0.Op != OpPPC64MOVBstore {
+ break
+ }
+ i2 := auxIntToInt32(x0.AuxInt)
+ if auxToSym(x0.Aux) != s {
+ break
+ }
+ _ = x0.Args[2]
+ if p != x0.Args[0] {
+ break
+ }
+ x0_1 := x0.Args[1]
+ if x0_1.Op != OpPPC64SRWconst || auxIntToInt64(x0_1.AuxInt) != 8 || w != x0_1.Args[0] {
+ break
+ }
+ x1 := x0.Args[2]
+ if x1.Op != OpPPC64MOVBstore {
+ break
+ }
+ i1 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
+ break
+ }
+ _ = x1.Args[2]
+ if p != x1.Args[0] {
+ break
+ }
+ x1_1 := x1.Args[1]
+ if x1_1.Op != OpPPC64SRWconst || auxIntToInt64(x1_1.AuxInt) != 16 || w != x1_1.Args[0] {
+ break
+ }
+ x2 := x1.Args[2]
+ if x2.Op != OpPPC64MOVBstore {
+ break
+ }
+ i0 := auxIntToInt32(x2.AuxInt)
+ if auxToSym(x2.Aux) != s {
+ break
+ }
+ mem := x2.Args[2]
+ if p != x2.Args[0] {
+ break
+ }
+ x2_1 := x2.Args[1]
+ if x2_1.Op != OpPPC64SRWconst || auxIntToInt64(x2_1.AuxInt) != 24 || w != x2_1.Args[0] || !(!config.BigEndian && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && clobber(x0, x1, x2)) {
+ break
+ }
+ v.reset(OpPPC64MOVWBRstore)
+ v0 := b.NewValue0(x2.Pos, OpPPC64MOVDaddr, typ.Uintptr)
+ v0.AuxInt = int32ToAuxInt(i0)
+ v0.Aux = symToAux(s)
+ v0.AddArg(p)
+ v.AddArg3(v0, w, mem)
+ return true
+ }
+ // match: (MOVBstore [i1] {s} p w x0:(MOVBstore [i0] {s} p (SRWconst w [8]) mem))
+ // cond: !config.BigEndian && x0.Uses == 1 && i1 == i0+1 && clobber(x0)
+ // result: (MOVHBRstore (MOVDaddr <typ.Uintptr> [i0] {s} p) w mem)
+ for {
+ i1 := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ w := v_1
+ x0 := v_2
+ if x0.Op != OpPPC64MOVBstore {
+ break
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ if auxToSym(x0.Aux) != s {
+ break
+ }
+ mem := x0.Args[2]
+ if p != x0.Args[0] {
+ break
+ }
+ x0_1 := x0.Args[1]
+ if x0_1.Op != OpPPC64SRWconst || auxIntToInt64(x0_1.AuxInt) != 8 || w != x0_1.Args[0] || !(!config.BigEndian && x0.Uses == 1 && i1 == i0+1 && clobber(x0)) {
+ break
+ }
+ v.reset(OpPPC64MOVHBRstore)
+ v0 := b.NewValue0(x0.Pos, OpPPC64MOVDaddr, typ.Uintptr)
+ v0.AuxInt = int32ToAuxInt(i0)
+ v0.Aux = symToAux(s)
+ v0.AddArg(p)
+ v.AddArg3(v0, w, mem)
+ return true
+ }
+ // match: (MOVBstore [i7] {s} p (SRDconst w [56]) x0:(MOVBstore [i6] {s} p (SRDconst w [48]) x1:(MOVBstore [i5] {s} p (SRDconst w [40]) x2:(MOVBstore [i4] {s} p (SRDconst w [32]) x3:(MOVWstore [i0] {s} p w mem)))))
+ // cond: !config.BigEndian && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && clobber(x0, x1, x2, x3)
+ // result: (MOVDstore [i0] {s} p w mem)
+ for {
+ i7 := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ if v_1.Op != OpPPC64SRDconst || auxIntToInt64(v_1.AuxInt) != 56 {
+ break
+ }
+ w := v_1.Args[0]
+ x0 := v_2
+ if x0.Op != OpPPC64MOVBstore {
+ break
+ }
+ i6 := auxIntToInt32(x0.AuxInt)
+ if auxToSym(x0.Aux) != s {
+ break
+ }
+ _ = x0.Args[2]
+ if p != x0.Args[0] {
+ break
+ }
+ x0_1 := x0.Args[1]
+ if x0_1.Op != OpPPC64SRDconst || auxIntToInt64(x0_1.AuxInt) != 48 || w != x0_1.Args[0] {
+ break
+ }
+ x1 := x0.Args[2]
+ if x1.Op != OpPPC64MOVBstore {
+ break
+ }
+ i5 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
+ break
+ }
+ _ = x1.Args[2]
+ if p != x1.Args[0] {
+ break
+ }
+ x1_1 := x1.Args[1]
+ if x1_1.Op != OpPPC64SRDconst || auxIntToInt64(x1_1.AuxInt) != 40 || w != x1_1.Args[0] {
+ break
+ }
+ x2 := x1.Args[2]
+ if x2.Op != OpPPC64MOVBstore {
+ break
+ }
+ i4 := auxIntToInt32(x2.AuxInt)
+ if auxToSym(x2.Aux) != s {
+ break
+ }
+ _ = x2.Args[2]
+ if p != x2.Args[0] {
+ break
+ }
+ x2_1 := x2.Args[1]
+ if x2_1.Op != OpPPC64SRDconst || auxIntToInt64(x2_1.AuxInt) != 32 || w != x2_1.Args[0] {
+ break
+ }
+ x3 := x2.Args[2]
+ if x3.Op != OpPPC64MOVWstore {
+ break
+ }
+ i0 := auxIntToInt32(x3.AuxInt)
+ if auxToSym(x3.Aux) != s {
+ break
+ }
+ mem := x3.Args[2]
+ if p != x3.Args[0] || w != x3.Args[1] || !(!config.BigEndian && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && clobber(x0, x1, x2, x3)) {
+ break
+ }
+ v.reset(OpPPC64MOVDstore)
+ v.AuxInt = int32ToAuxInt(i0)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, w, mem)
+ return true
+ }
+ // match: (MOVBstore [i7] {s} p w x0:(MOVBstore [i6] {s} p (SRDconst w [8]) x1:(MOVBstore [i5] {s} p (SRDconst w [16]) x2:(MOVBstore [i4] {s} p (SRDconst w [24]) x3:(MOVBstore [i3] {s} p (SRDconst w [32]) x4:(MOVBstore [i2] {s} p (SRDconst w [40]) x5:(MOVBstore [i1] {s} p (SRDconst w [48]) x6:(MOVBstore [i0] {s} p (SRDconst w [56]) mem))))))))
+ // cond: !config.BigEndian && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && clobber(x0, x1, x2, x3, x4, x5, x6)
+ // result: (MOVDBRstore (MOVDaddr <typ.Uintptr> [i0] {s} p) w mem)
+ for {
+ i7 := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ w := v_1
+ x0 := v_2
+ if x0.Op != OpPPC64MOVBstore {
+ break
+ }
+ i6 := auxIntToInt32(x0.AuxInt)
+ if auxToSym(x0.Aux) != s {
+ break
+ }
+ _ = x0.Args[2]
+ if p != x0.Args[0] {
+ break
+ }
+ x0_1 := x0.Args[1]
+ if x0_1.Op != OpPPC64SRDconst || auxIntToInt64(x0_1.AuxInt) != 8 || w != x0_1.Args[0] {
+ break
+ }
+ x1 := x0.Args[2]
+ if x1.Op != OpPPC64MOVBstore {
+ break
+ }
+ i5 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
+ break
+ }
+ _ = x1.Args[2]
+ if p != x1.Args[0] {
+ break
+ }
+ x1_1 := x1.Args[1]
+ if x1_1.Op != OpPPC64SRDconst || auxIntToInt64(x1_1.AuxInt) != 16 || w != x1_1.Args[0] {
+ break
+ }
+ x2 := x1.Args[2]
+ if x2.Op != OpPPC64MOVBstore {
+ break
+ }
+ i4 := auxIntToInt32(x2.AuxInt)
+ if auxToSym(x2.Aux) != s {
+ break
+ }
+ _ = x2.Args[2]
+ if p != x2.Args[0] {
+ break
+ }
+ x2_1 := x2.Args[1]
+ if x2_1.Op != OpPPC64SRDconst || auxIntToInt64(x2_1.AuxInt) != 24 || w != x2_1.Args[0] {
+ break
+ }
+ x3 := x2.Args[2]
+ if x3.Op != OpPPC64MOVBstore {
+ break
+ }
+ i3 := auxIntToInt32(x3.AuxInt)
+ if auxToSym(x3.Aux) != s {
+ break
+ }
+ _ = x3.Args[2]
+ if p != x3.Args[0] {
+ break
+ }
+ x3_1 := x3.Args[1]
+ if x3_1.Op != OpPPC64SRDconst || auxIntToInt64(x3_1.AuxInt) != 32 || w != x3_1.Args[0] {
+ break
+ }
+ x4 := x3.Args[2]
+ if x4.Op != OpPPC64MOVBstore {
+ break
+ }
+ i2 := auxIntToInt32(x4.AuxInt)
+ if auxToSym(x4.Aux) != s {
+ break
+ }
+ _ = x4.Args[2]
+ if p != x4.Args[0] {
+ break
+ }
+ x4_1 := x4.Args[1]
+ if x4_1.Op != OpPPC64SRDconst || auxIntToInt64(x4_1.AuxInt) != 40 || w != x4_1.Args[0] {
+ break
+ }
+ x5 := x4.Args[2]
+ if x5.Op != OpPPC64MOVBstore {
+ break
+ }
+ i1 := auxIntToInt32(x5.AuxInt)
+ if auxToSym(x5.Aux) != s {
+ break
+ }
+ _ = x5.Args[2]
+ if p != x5.Args[0] {
+ break
+ }
+ x5_1 := x5.Args[1]
+ if x5_1.Op != OpPPC64SRDconst || auxIntToInt64(x5_1.AuxInt) != 48 || w != x5_1.Args[0] {
+ break
+ }
+ x6 := x5.Args[2]
+ if x6.Op != OpPPC64MOVBstore {
+ break
+ }
+ i0 := auxIntToInt32(x6.AuxInt)
+ if auxToSym(x6.Aux) != s {
+ break
+ }
+ mem := x6.Args[2]
+ if p != x6.Args[0] {
+ break
+ }
+ x6_1 := x6.Args[1]
+ if x6_1.Op != OpPPC64SRDconst || auxIntToInt64(x6_1.AuxInt) != 56 || w != x6_1.Args[0] || !(!config.BigEndian && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && clobber(x0, x1, x2, x3, x4, x5, x6)) {
+ break
+ }
+ v.reset(OpPPC64MOVDBRstore)
+ v0 := b.NewValue0(x6.Pos, OpPPC64MOVDaddr, typ.Uintptr)
+ v0.AuxInt = int32ToAuxInt(i0)
+ v0.Aux = symToAux(s)
+ v0.AddArg(p)
+ v.AddArg3(v0, w, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVBstoreidx(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (MOVBstoreidx ptr (MOVDconst [c]) val mem)
+ // cond: is16Bit(c)
+ // result: (MOVBstore [int32(c)] ptr val mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ val := v_2
+ mem := v_3
+ if !(is16Bit(c)) {
+ break
+ }
+ v.reset(OpPPC64MOVBstore)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVBstoreidx (MOVDconst [c]) ptr val mem)
+ // cond: is16Bit(c)
+ // result: (MOVBstore [int32(c)] ptr val mem)
+ for {
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ ptr := v_1
+ val := v_2
+ mem := v_3
+ if !(is16Bit(c)) {
+ break
+ }
+ v.reset(OpPPC64MOVBstore)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVBstoreidx ptr idx (MOVBreg x) mem)
+ // result: (MOVBstoreidx ptr idx x mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpPPC64MOVBreg {
+ break
+ }
+ x := v_2.Args[0]
+ mem := v_3
+ v.reset(OpPPC64MOVBstoreidx)
+ v.AddArg4(ptr, idx, x, mem)
+ return true
+ }
+ // match: (MOVBstoreidx ptr idx (MOVBZreg x) mem)
+ // result: (MOVBstoreidx ptr idx x mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpPPC64MOVBZreg {
+ break
+ }
+ x := v_2.Args[0]
+ mem := v_3
+ v.reset(OpPPC64MOVBstoreidx)
+ v.AddArg4(ptr, idx, x, mem)
+ return true
+ }
+ // match: (MOVBstoreidx ptr idx (MOVHreg x) mem)
+ // result: (MOVBstoreidx ptr idx x mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpPPC64MOVHreg {
+ break
+ }
+ x := v_2.Args[0]
+ mem := v_3
+ v.reset(OpPPC64MOVBstoreidx)
+ v.AddArg4(ptr, idx, x, mem)
+ return true
+ }
+ // match: (MOVBstoreidx ptr idx (MOVHZreg x) mem)
+ // result: (MOVBstoreidx ptr idx x mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpPPC64MOVHZreg {
+ break
+ }
+ x := v_2.Args[0]
+ mem := v_3
+ v.reset(OpPPC64MOVBstoreidx)
+ v.AddArg4(ptr, idx, x, mem)
+ return true
+ }
+ // match: (MOVBstoreidx ptr idx (MOVWreg x) mem)
+ // result: (MOVBstoreidx ptr idx x mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpPPC64MOVWreg {
+ break
+ }
+ x := v_2.Args[0]
+ mem := v_3
+ v.reset(OpPPC64MOVBstoreidx)
+ v.AddArg4(ptr, idx, x, mem)
+ return true
+ }
+ // match: (MOVBstoreidx ptr idx (MOVWZreg x) mem)
+ // result: (MOVBstoreidx ptr idx x mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpPPC64MOVWZreg {
+ break
+ }
+ x := v_2.Args[0]
+ mem := v_3
+ v.reset(OpPPC64MOVBstoreidx)
+ v.AddArg4(ptr, idx, x, mem)
+ return true
+ }
+ // match: (MOVBstoreidx ptr idx (SRWconst (MOVHreg x) [c]) mem)
+ // cond: c <= 8
+ // result: (MOVBstoreidx ptr idx (SRWconst <typ.UInt32> x [c]) mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpPPC64SRWconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ v_2_0 := v_2.Args[0]
+ if v_2_0.Op != OpPPC64MOVHreg {
+ break
+ }
+ x := v_2_0.Args[0]
+ mem := v_3
+ if !(c <= 8) {
+ break
+ }
+ v.reset(OpPPC64MOVBstoreidx)
+ v0 := b.NewValue0(v.Pos, OpPPC64SRWconst, typ.UInt32)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(x)
+ v.AddArg4(ptr, idx, v0, mem)
+ return true
+ }
+ // match: (MOVBstoreidx ptr idx (SRWconst (MOVHZreg x) [c]) mem)
+ // cond: c <= 8
+ // result: (MOVBstoreidx ptr idx (SRWconst <typ.UInt32> x [c]) mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpPPC64SRWconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ v_2_0 := v_2.Args[0]
+ if v_2_0.Op != OpPPC64MOVHZreg {
+ break
+ }
+ x := v_2_0.Args[0]
+ mem := v_3
+ if !(c <= 8) {
+ break
+ }
+ v.reset(OpPPC64MOVBstoreidx)
+ v0 := b.NewValue0(v.Pos, OpPPC64SRWconst, typ.UInt32)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(x)
+ v.AddArg4(ptr, idx, v0, mem)
+ return true
+ }
+ // match: (MOVBstoreidx ptr idx (SRWconst (MOVWreg x) [c]) mem)
+ // cond: c <= 24
+ // result: (MOVBstoreidx ptr idx (SRWconst <typ.UInt32> x [c]) mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpPPC64SRWconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ v_2_0 := v_2.Args[0]
+ if v_2_0.Op != OpPPC64MOVWreg {
+ break
+ }
+ x := v_2_0.Args[0]
+ mem := v_3
+ if !(c <= 24) {
+ break
+ }
+ v.reset(OpPPC64MOVBstoreidx)
+ v0 := b.NewValue0(v.Pos, OpPPC64SRWconst, typ.UInt32)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(x)
+ v.AddArg4(ptr, idx, v0, mem)
+ return true
+ }
+ // match: (MOVBstoreidx ptr idx (SRWconst (MOVWZreg x) [c]) mem)
+ // cond: c <= 24
+ // result: (MOVBstoreidx ptr idx (SRWconst <typ.UInt32> x [c]) mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpPPC64SRWconst {
+ break
+ }
+ c := auxIntToInt64(v_2.AuxInt)
+ v_2_0 := v_2.Args[0]
+ if v_2_0.Op != OpPPC64MOVWZreg {
+ break
+ }
+ x := v_2_0.Args[0]
+ mem := v_3
+ if !(c <= 24) {
+ break
+ }
+ v.reset(OpPPC64MOVBstoreidx)
+ v0 := b.NewValue0(v.Pos, OpPPC64SRWconst, typ.UInt32)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(x)
+ v.AddArg4(ptr, idx, v0, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVBstorezero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVBstorezero [off1] {sym} (ADDconst [off2] x) mem)
+ // cond: is16Bit(int64(off1)+off2)
+ // result: (MOVBstorezero [off1+int32(off2)] {sym} x mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpPPC64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ mem := v_1
+ if !(is16Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpPPC64MOVBstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(x, mem)
+ return true
+ }
+ // match: (MOVBstorezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem)
+ // cond: canMergeSym(sym1,sym2) && (x.Op != OpSB || p.Uses == 1)
+ // result: (MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ p := v_0
+ if p.Op != OpPPC64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(p.AuxInt)
+ sym2 := auxToSym(p.Aux)
+ x := p.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && (x.Op != OpSB || p.Uses == 1)) {
+ break
+ }
+ v.reset(OpPPC64MOVBstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(x, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVDload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVDload [off] {sym} ptr (FMOVDstore [off] {sym} ptr x _))
+ // result: (MFVSRD x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpPPC64FMOVDstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
+ break
+ }
+ x := v_1.Args[1]
+ if ptr != v_1.Args[0] {
+ break
+ }
+ v.reset(OpPPC64MFVSRD)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVDload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)
+ // result: (MOVDload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ p := v_0
+ if p.Op != OpPPC64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(p.AuxInt)
+ sym2 := auxToSym(p.Aux)
+ ptr := p.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) {
+ break
+ }
+ v.reset(OpPPC64MOVDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVDload [off1] {sym} (ADDconst [off2] x) mem)
+ // cond: is16Bit(int64(off1)+off2)
+ // result: (MOVDload [off1+int32(off2)] {sym} x mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpPPC64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ mem := v_1
+ if !(is16Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpPPC64MOVDload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(x, mem)
+ return true
+ }
+ // match: (MOVDload [0] {sym} p:(ADD ptr idx) mem)
+ // cond: sym == nil && p.Uses == 1
+ // result: (MOVDloadidx ptr idx mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ sym := auxToSym(v.Aux)
+ p := v_0
+ if p.Op != OpPPC64ADD {
+ break
+ }
+ idx := p.Args[1]
+ ptr := p.Args[0]
+ mem := v_1
+ if !(sym == nil && p.Uses == 1) {
+ break
+ }
+ v.reset(OpPPC64MOVDloadidx)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVDloadidx(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVDloadidx ptr (MOVDconst [c]) mem)
+ // cond: is16Bit(c) && c%4 == 0
+ // result: (MOVDload [int32(c)] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(is16Bit(c) && c%4 == 0) {
+ break
+ }
+ v.reset(OpPPC64MOVDload)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVDloadidx (MOVDconst [c]) ptr mem)
+ // cond: is16Bit(c) && c%4 == 0
+ // result: (MOVDload [int32(c)] ptr mem)
+ for {
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ ptr := v_1
+ mem := v_2
+ if !(is16Bit(c) && c%4 == 0) {
+ break
+ }
+ v.reset(OpPPC64MOVDload)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVDstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVDstore [off] {sym} ptr (MFVSRD x) mem)
+ // result: (FMOVDstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpPPC64MFVSRD {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpPPC64FMOVDstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVDstore [off1] {sym} (ADDconst [off2] x) val mem)
+ // cond: is16Bit(int64(off1)+off2)
+ // result: (MOVDstore [off1+int32(off2)] {sym} x val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpPPC64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is16Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpPPC64MOVDstore)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, val, mem)
+ return true
+ }
+ // match: (MOVDstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)
+ // result: (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ p := v_0
+ if p.Op != OpPPC64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(p.AuxInt)
+ sym2 := auxToSym(p.Aux)
+ ptr := p.Args[0]
+ val := v_1
+ mem := v_2
+ if !(canMergeSym(sym1, sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) {
+ break
+ }
+ v.reset(OpPPC64MOVDstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVDstore [off] {sym} ptr (MOVDconst [0]) mem)
+ // result: (MOVDstorezero [off] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpPPC64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ mem := v_2
+ v.reset(OpPPC64MOVDstorezero)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVDstore [0] {sym} p:(ADD ptr idx) val mem)
+ // cond: sym == nil && p.Uses == 1
+ // result: (MOVDstoreidx ptr idx val mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ sym := auxToSym(v.Aux)
+ p := v_0
+ if p.Op != OpPPC64ADD {
+ break
+ }
+ idx := p.Args[1]
+ ptr := p.Args[0]
+ val := v_1
+ mem := v_2
+ if !(sym == nil && p.Uses == 1) {
+ break
+ }
+ v.reset(OpPPC64MOVDstoreidx)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVDstoreidx(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVDstoreidx ptr (MOVDconst [c]) val mem)
+ // cond: is16Bit(c) && c%4 == 0
+ // result: (MOVDstore [int32(c)] ptr val mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ val := v_2
+ mem := v_3
+ if !(is16Bit(c) && c%4 == 0) {
+ break
+ }
+ v.reset(OpPPC64MOVDstore)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVDstoreidx (MOVDconst [c]) ptr val mem)
+ // cond: is16Bit(c) && c%4 == 0
+ // result: (MOVDstore [int32(c)] ptr val mem)
+ for {
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ ptr := v_1
+ val := v_2
+ mem := v_3
+ if !(is16Bit(c) && c%4 == 0) {
+ break
+ }
+ v.reset(OpPPC64MOVDstore)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVDstorezero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVDstorezero [off1] {sym} (ADDconst [off2] x) mem)
+ // cond: is16Bit(int64(off1)+off2)
+ // result: (MOVDstorezero [off1+int32(off2)] {sym} x mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpPPC64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ mem := v_1
+ if !(is16Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpPPC64MOVDstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(x, mem)
+ return true
+ }
+ // match: (MOVDstorezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem)
+ // cond: canMergeSym(sym1,sym2) && (x.Op != OpSB || p.Uses == 1)
+ // result: (MOVDstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ p := v_0
+ if p.Op != OpPPC64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(p.AuxInt)
+ sym2 := auxToSym(p.Aux)
+ x := p.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && (x.Op != OpSB || p.Uses == 1)) {
+ break
+ }
+ v.reset(OpPPC64MOVDstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(x, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVHBRstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHBRstore {sym} ptr (MOVHreg x) mem)
+ // result: (MOVHBRstore {sym} ptr x mem)
+ for {
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpPPC64MOVHreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpPPC64MOVHBRstore)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVHBRstore {sym} ptr (MOVHZreg x) mem)
+ // result: (MOVHBRstore {sym} ptr x mem)
+ for {
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpPPC64MOVHZreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpPPC64MOVHBRstore)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVHBRstore {sym} ptr (MOVWreg x) mem)
+ // result: (MOVHBRstore {sym} ptr x mem)
+ for {
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpPPC64MOVWreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpPPC64MOVHBRstore)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVHBRstore {sym} ptr (MOVWZreg x) mem)
+ // result: (MOVHBRstore {sym} ptr x mem)
+ for {
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpPPC64MOVWZreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpPPC64MOVHBRstore)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVHZload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHZload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)
+ // result: (MOVHZload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ p := v_0
+ if p.Op != OpPPC64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(p.AuxInt)
+ sym2 := auxToSym(p.Aux)
+ ptr := p.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) {
+ break
+ }
+ v.reset(OpPPC64MOVHZload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHZload [off1] {sym} (ADDconst [off2] x) mem)
+ // cond: is16Bit(int64(off1)+off2)
+ // result: (MOVHZload [off1+int32(off2)] {sym} x mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpPPC64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ mem := v_1
+ if !(is16Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpPPC64MOVHZload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(x, mem)
+ return true
+ }
+ // match: (MOVHZload [0] {sym} p:(ADD ptr idx) mem)
+ // cond: sym == nil && p.Uses == 1
+ // result: (MOVHZloadidx ptr idx mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ sym := auxToSym(v.Aux)
+ p := v_0
+ if p.Op != OpPPC64ADD {
+ break
+ }
+ idx := p.Args[1]
+ ptr := p.Args[0]
+ mem := v_1
+ if !(sym == nil && p.Uses == 1) {
+ break
+ }
+ v.reset(OpPPC64MOVHZloadidx)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVHZloadidx(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHZloadidx ptr (MOVDconst [c]) mem)
+ // cond: is16Bit(c)
+ // result: (MOVHZload [int32(c)] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(is16Bit(c)) {
+ break
+ }
+ v.reset(OpPPC64MOVHZload)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHZloadidx (MOVDconst [c]) ptr mem)
+ // cond: is16Bit(c)
+ // result: (MOVHZload [int32(c)] ptr mem)
+ for {
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ ptr := v_1
+ mem := v_2
+ if !(is16Bit(c)) {
+ break
+ }
+ v.reset(OpPPC64MOVHZload)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVHZreg(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (MOVHZreg y:(ANDconst [c] _))
+ // cond: uint64(c) <= 0xFFFF
+ // result: y
+ for {
+ y := v_0
+ if y.Op != OpPPC64ANDconst {
+ break
+ }
+ c := auxIntToInt64(y.AuxInt)
+ if !(uint64(c) <= 0xFFFF) {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (MOVHZreg (SRWconst [c] (MOVBZreg x)))
+ // result: (SRWconst [c] (MOVBZreg x))
+ for {
+ if v_0.Op != OpPPC64SRWconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64MOVBZreg {
+ break
+ }
+ x := v_0_0.Args[0]
+ v.reset(OpPPC64SRWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVBZreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MOVHZreg (SRWconst [c] (MOVHZreg x)))
+ // result: (SRWconst [c] (MOVHZreg x))
+ for {
+ if v_0.Op != OpPPC64SRWconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64MOVHZreg {
+ break
+ }
+ x := v_0_0.Args[0]
+ v.reset(OpPPC64SRWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVHZreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MOVHZreg (SRWconst [c] x))
+ // cond: sizeof(x.Type) <= 16
+ // result: (SRWconst [c] x)
+ for {
+ if v_0.Op != OpPPC64SRWconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(sizeof(x.Type) <= 16) {
+ break
+ }
+ v.reset(OpPPC64SRWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHZreg (SRDconst [c] x))
+ // cond: c>=48
+ // result: (SRDconst [c] x)
+ for {
+ if v_0.Op != OpPPC64SRDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(c >= 48) {
+ break
+ }
+ v.reset(OpPPC64SRDconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHZreg (SRWconst [c] x))
+ // cond: c>=16
+ // result: (SRWconst [c] x)
+ for {
+ if v_0.Op != OpPPC64SRWconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(c >= 16) {
+ break
+ }
+ v.reset(OpPPC64SRWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHZreg y:(MOVHZreg _))
+ // result: y
+ for {
+ y := v_0
+ if y.Op != OpPPC64MOVHZreg {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (MOVHZreg y:(MOVBZreg _))
+ // result: y
+ for {
+ y := v_0
+ if y.Op != OpPPC64MOVBZreg {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (MOVHZreg y:(MOVHBRload _ _))
+ // result: y
+ for {
+ y := v_0
+ if y.Op != OpPPC64MOVHBRload {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (MOVHZreg y:(MOVHreg x))
+ // result: (MOVHZreg x)
+ for {
+ y := v_0
+ if y.Op != OpPPC64MOVHreg {
+ break
+ }
+ x := y.Args[0]
+ v.reset(OpPPC64MOVHZreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHZreg (OR <t> x (MOVWZreg y)))
+ // result: (MOVHZreg (OR <t> x y))
+ for {
+ if v_0.Op != OpPPC64OR {
+ break
+ }
+ t := v_0.Type
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpPPC64MOVWZreg {
+ continue
+ }
+ y := v_0_1.Args[0]
+ v.reset(OpPPC64MOVHZreg)
+ v0 := b.NewValue0(v.Pos, OpPPC64OR, t)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MOVHZreg (XOR <t> x (MOVWZreg y)))
+ // result: (MOVHZreg (XOR <t> x y))
+ for {
+ if v_0.Op != OpPPC64XOR {
+ break
+ }
+ t := v_0.Type
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpPPC64MOVWZreg {
+ continue
+ }
+ y := v_0_1.Args[0]
+ v.reset(OpPPC64MOVHZreg)
+ v0 := b.NewValue0(v.Pos, OpPPC64XOR, t)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MOVHZreg (AND <t> x (MOVWZreg y)))
+ // result: (MOVHZreg (AND <t> x y))
+ for {
+ if v_0.Op != OpPPC64AND {
+ break
+ }
+ t := v_0.Type
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpPPC64MOVWZreg {
+ continue
+ }
+ y := v_0_1.Args[0]
+ v.reset(OpPPC64MOVHZreg)
+ v0 := b.NewValue0(v.Pos, OpPPC64AND, t)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MOVHZreg (OR <t> x (MOVHZreg y)))
+ // result: (MOVHZreg (OR <t> x y))
+ for {
+ if v_0.Op != OpPPC64OR {
+ break
+ }
+ t := v_0.Type
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpPPC64MOVHZreg {
+ continue
+ }
+ y := v_0_1.Args[0]
+ v.reset(OpPPC64MOVHZreg)
+ v0 := b.NewValue0(v.Pos, OpPPC64OR, t)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MOVHZreg (XOR <t> x (MOVHZreg y)))
+ // result: (MOVHZreg (XOR <t> x y))
+ for {
+ if v_0.Op != OpPPC64XOR {
+ break
+ }
+ t := v_0.Type
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpPPC64MOVHZreg {
+ continue
+ }
+ y := v_0_1.Args[0]
+ v.reset(OpPPC64MOVHZreg)
+ v0 := b.NewValue0(v.Pos, OpPPC64XOR, t)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MOVHZreg (AND <t> x (MOVHZreg y)))
+ // result: (MOVHZreg (AND <t> x y))
+ for {
+ if v_0.Op != OpPPC64AND {
+ break
+ }
+ t := v_0.Type
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpPPC64MOVHZreg {
+ continue
+ }
+ y := v_0_1.Args[0]
+ v.reset(OpPPC64MOVHZreg)
+ v0 := b.NewValue0(v.Pos, OpPPC64AND, t)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MOVHZreg z:(ANDconst [c] (MOVBZload ptr x)))
+ // result: z
+ for {
+ z := v_0
+ if z.Op != OpPPC64ANDconst {
+ break
+ }
+ z_0 := z.Args[0]
+ if z_0.Op != OpPPC64MOVBZload {
+ break
+ }
+ v.copyOf(z)
+ return true
+ }
+ // match: (MOVHZreg z:(ANDconst [c] (MOVHZload ptr x)))
+ // result: z
+ for {
+ z := v_0
+ if z.Op != OpPPC64ANDconst {
+ break
+ }
+ z_0 := z.Args[0]
+ if z_0.Op != OpPPC64MOVHZload {
+ break
+ }
+ v.copyOf(z)
+ return true
+ }
+ // match: (MOVHZreg z:(AND y (MOVHZload ptr x)))
+ // result: z
+ for {
+ z := v_0
+ if z.Op != OpPPC64AND {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ if z_1.Op != OpPPC64MOVHZload {
+ continue
+ }
+ v.copyOf(z)
+ return true
+ }
+ break
+ }
+ // match: (MOVHZreg x:(MOVBZload _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpPPC64MOVBZload {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVHZreg x:(MOVBZloadidx _ _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpPPC64MOVBZloadidx {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVHZreg x:(MOVHZload _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpPPC64MOVHZload {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVHZreg x:(MOVHZloadidx _ _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpPPC64MOVHZloadidx {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVHZreg x:(Arg <t>))
+ // cond: (is8BitInt(t) || is16BitInt(t)) && !isSigned(t)
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpArg {
+ break
+ }
+ t := x.Type
+ if !((is8BitInt(t) || is16BitInt(t)) && !isSigned(t)) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVHZreg (MOVDconst [c]))
+ // result: (MOVDconst [int64(uint16(c))])
+ for {
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(uint16(c)))
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVHload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)
+ // result: (MOVHload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ p := v_0
+ if p.Op != OpPPC64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(p.AuxInt)
+ sym2 := auxToSym(p.Aux)
+ ptr := p.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) {
+ break
+ }
+ v.reset(OpPPC64MOVHload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHload [off1] {sym} (ADDconst [off2] x) mem)
+ // cond: is16Bit(int64(off1)+off2)
+ // result: (MOVHload [off1+int32(off2)] {sym} x mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpPPC64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ mem := v_1
+ if !(is16Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpPPC64MOVHload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(x, mem)
+ return true
+ }
+ // match: (MOVHload [0] {sym} p:(ADD ptr idx) mem)
+ // cond: sym == nil && p.Uses == 1
+ // result: (MOVHloadidx ptr idx mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ sym := auxToSym(v.Aux)
+ p := v_0
+ if p.Op != OpPPC64ADD {
+ break
+ }
+ idx := p.Args[1]
+ ptr := p.Args[0]
+ mem := v_1
+ if !(sym == nil && p.Uses == 1) {
+ break
+ }
+ v.reset(OpPPC64MOVHloadidx)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVHloadidx(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHloadidx ptr (MOVDconst [c]) mem)
+ // cond: is16Bit(c)
+ // result: (MOVHload [int32(c)] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(is16Bit(c)) {
+ break
+ }
+ v.reset(OpPPC64MOVHload)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHloadidx (MOVDconst [c]) ptr mem)
+ // cond: is16Bit(c)
+ // result: (MOVHload [int32(c)] ptr mem)
+ for {
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ ptr := v_1
+ mem := v_2
+ if !(is16Bit(c)) {
+ break
+ }
+ v.reset(OpPPC64MOVHload)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVHreg(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (MOVHreg y:(ANDconst [c] _))
+ // cond: uint64(c) <= 0x7FFF
+ // result: y
+ for {
+ y := v_0
+ if y.Op != OpPPC64ANDconst {
+ break
+ }
+ c := auxIntToInt64(y.AuxInt)
+ if !(uint64(c) <= 0x7FFF) {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (MOVHreg (SRAWconst [c] (MOVBreg x)))
+ // result: (SRAWconst [c] (MOVBreg x))
+ for {
+ if v_0.Op != OpPPC64SRAWconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64MOVBreg {
+ break
+ }
+ x := v_0_0.Args[0]
+ v.reset(OpPPC64SRAWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVBreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MOVHreg (SRAWconst [c] (MOVHreg x)))
+ // result: (SRAWconst [c] (MOVHreg x))
+ for {
+ if v_0.Op != OpPPC64SRAWconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64MOVHreg {
+ break
+ }
+ x := v_0_0.Args[0]
+ v.reset(OpPPC64SRAWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVHreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MOVHreg (SRAWconst [c] x))
+ // cond: sizeof(x.Type) <= 16
+ // result: (SRAWconst [c] x)
+ for {
+ if v_0.Op != OpPPC64SRAWconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(sizeof(x.Type) <= 16) {
+ break
+ }
+ v.reset(OpPPC64SRAWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg (SRDconst [c] x))
+ // cond: c>48
+ // result: (SRDconst [c] x)
+ for {
+ if v_0.Op != OpPPC64SRDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(c > 48) {
+ break
+ }
+ v.reset(OpPPC64SRDconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg (SRDconst [c] x))
+ // cond: c==48
+ // result: (SRADconst [c] x)
+ for {
+ if v_0.Op != OpPPC64SRDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(c == 48) {
+ break
+ }
+ v.reset(OpPPC64SRADconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg (SRADconst [c] x))
+ // cond: c>=48
+ // result: (SRADconst [c] x)
+ for {
+ if v_0.Op != OpPPC64SRADconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(c >= 48) {
+ break
+ }
+ v.reset(OpPPC64SRADconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg (SRWconst [c] x))
+ // cond: c>16
+ // result: (SRWconst [c] x)
+ for {
+ if v_0.Op != OpPPC64SRWconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(c > 16) {
+ break
+ }
+ v.reset(OpPPC64SRWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg (SRAWconst [c] x))
+ // cond: c>=16
+ // result: (SRAWconst [c] x)
+ for {
+ if v_0.Op != OpPPC64SRAWconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(c >= 16) {
+ break
+ }
+ v.reset(OpPPC64SRAWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg (SRWconst [c] x))
+ // cond: c==16
+ // result: (SRAWconst [c] x)
+ for {
+ if v_0.Op != OpPPC64SRWconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(c == 16) {
+ break
+ }
+ v.reset(OpPPC64SRAWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg y:(MOVHreg _))
+ // result: y
+ for {
+ y := v_0
+ if y.Op != OpPPC64MOVHreg {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (MOVHreg y:(MOVBreg _))
+ // result: y
+ for {
+ y := v_0
+ if y.Op != OpPPC64MOVBreg {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (MOVHreg y:(MOVHZreg x))
+ // result: (MOVHreg x)
+ for {
+ y := v_0
+ if y.Op != OpPPC64MOVHZreg {
+ break
+ }
+ x := y.Args[0]
+ v.reset(OpPPC64MOVHreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVHload _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpPPC64MOVHload {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVHloadidx _ _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpPPC64MOVHloadidx {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVHreg x:(Arg <t>))
+ // cond: (is8BitInt(t) || is16BitInt(t)) && isSigned(t)
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpArg {
+ break
+ }
+ t := x.Type
+ if !((is8BitInt(t) || is16BitInt(t)) && isSigned(t)) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVHreg (MOVDconst [c]))
+ // result: (MOVDconst [int64(int16(c))])
+ for {
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(int16(c)))
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVHstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (MOVHstore [off1] {sym} (ADDconst [off2] x) val mem)
+ // cond: is16Bit(int64(off1)+off2)
+ // result: (MOVHstore [off1+int32(off2)] {sym} x val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpPPC64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is16Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpPPC64MOVHstore)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, val, mem)
+ return true
+ }
+ // match: (MOVHstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)
+ // result: (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ p := v_0
+ if p.Op != OpPPC64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(p.AuxInt)
+ sym2 := auxToSym(p.Aux)
+ ptr := p.Args[0]
+ val := v_1
+ mem := v_2
+ if !(canMergeSym(sym1, sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) {
+ break
+ }
+ v.reset(OpPPC64MOVHstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (MOVDconst [0]) mem)
+ // result: (MOVHstorezero [off] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpPPC64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ mem := v_2
+ v.reset(OpPPC64MOVHstorezero)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHstore [0] {sym} p:(ADD ptr idx) val mem)
+ // cond: sym == nil && p.Uses == 1
+ // result: (MOVHstoreidx ptr idx val mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ sym := auxToSym(v.Aux)
+ p := v_0
+ if p.Op != OpPPC64ADD {
+ break
+ }
+ idx := p.Args[1]
+ ptr := p.Args[0]
+ val := v_1
+ mem := v_2
+ if !(sym == nil && p.Uses == 1) {
+ break
+ }
+ v.reset(OpPPC64MOVHstoreidx)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (MOVHreg x) mem)
+ // result: (MOVHstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpPPC64MOVHreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpPPC64MOVHstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (MOVHZreg x) mem)
+ // result: (MOVHstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpPPC64MOVHZreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpPPC64MOVHstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (MOVWreg x) mem)
+ // result: (MOVHstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpPPC64MOVWreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpPPC64MOVHstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (MOVWZreg x) mem)
+ // result: (MOVHstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpPPC64MOVWZreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpPPC64MOVHstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVHstore [i1] {s} p (SRWconst w [16]) x0:(MOVHstore [i0] {s} p w mem))
+ // cond: !config.BigEndian && x0.Uses == 1 && i1 == i0+2 && clobber(x0)
+ // result: (MOVWstore [i0] {s} p w mem)
+ for {
+ i1 := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ if v_1.Op != OpPPC64SRWconst || auxIntToInt64(v_1.AuxInt) != 16 {
+ break
+ }
+ w := v_1.Args[0]
+ x0 := v_2
+ if x0.Op != OpPPC64MOVHstore {
+ break
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ if auxToSym(x0.Aux) != s {
+ break
+ }
+ mem := x0.Args[2]
+ if p != x0.Args[0] || w != x0.Args[1] || !(!config.BigEndian && x0.Uses == 1 && i1 == i0+2 && clobber(x0)) {
+ break
+ }
+ v.reset(OpPPC64MOVWstore)
+ v.AuxInt = int32ToAuxInt(i0)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, w, mem)
+ return true
+ }
+ // match: (MOVHstore [i1] {s} p (SRDconst w [16]) x0:(MOVHstore [i0] {s} p w mem))
+ // cond: !config.BigEndian && x0.Uses == 1 && i1 == i0+2 && clobber(x0)
+ // result: (MOVWstore [i0] {s} p w mem)
+ for {
+ i1 := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ if v_1.Op != OpPPC64SRDconst || auxIntToInt64(v_1.AuxInt) != 16 {
+ break
+ }
+ w := v_1.Args[0]
+ x0 := v_2
+ if x0.Op != OpPPC64MOVHstore {
+ break
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ if auxToSym(x0.Aux) != s {
+ break
+ }
+ mem := x0.Args[2]
+ if p != x0.Args[0] || w != x0.Args[1] || !(!config.BigEndian && x0.Uses == 1 && i1 == i0+2 && clobber(x0)) {
+ break
+ }
+ v.reset(OpPPC64MOVWstore)
+ v.AuxInt = int32ToAuxInt(i0)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, w, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVHstoreidx(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHstoreidx ptr (MOVDconst [c]) val mem)
+ // cond: is16Bit(c)
+ // result: (MOVHstore [int32(c)] ptr val mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ val := v_2
+ mem := v_3
+ if !(is16Bit(c)) {
+ break
+ }
+ v.reset(OpPPC64MOVHstore)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVHstoreidx (MOVDconst [c]) ptr val mem)
+ // cond: is16Bit(c)
+ // result: (MOVHstore [int32(c)] ptr val mem)
+ for {
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ ptr := v_1
+ val := v_2
+ mem := v_3
+ if !(is16Bit(c)) {
+ break
+ }
+ v.reset(OpPPC64MOVHstore)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVHstoreidx ptr idx (MOVHreg x) mem)
+ // result: (MOVHstoreidx ptr idx x mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpPPC64MOVHreg {
+ break
+ }
+ x := v_2.Args[0]
+ mem := v_3
+ v.reset(OpPPC64MOVHstoreidx)
+ v.AddArg4(ptr, idx, x, mem)
+ return true
+ }
+ // match: (MOVHstoreidx ptr idx (MOVHZreg x) mem)
+ // result: (MOVHstoreidx ptr idx x mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpPPC64MOVHZreg {
+ break
+ }
+ x := v_2.Args[0]
+ mem := v_3
+ v.reset(OpPPC64MOVHstoreidx)
+ v.AddArg4(ptr, idx, x, mem)
+ return true
+ }
+ // match: (MOVHstoreidx ptr idx (MOVWreg x) mem)
+ // result: (MOVHstoreidx ptr idx x mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpPPC64MOVWreg {
+ break
+ }
+ x := v_2.Args[0]
+ mem := v_3
+ v.reset(OpPPC64MOVHstoreidx)
+ v.AddArg4(ptr, idx, x, mem)
+ return true
+ }
+ // match: (MOVHstoreidx ptr idx (MOVWZreg x) mem)
+ // result: (MOVHstoreidx ptr idx x mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpPPC64MOVWZreg {
+ break
+ }
+ x := v_2.Args[0]
+ mem := v_3
+ v.reset(OpPPC64MOVHstoreidx)
+ v.AddArg4(ptr, idx, x, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVHstorezero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHstorezero [off1] {sym} (ADDconst [off2] x) mem)
+ // cond: is16Bit(int64(off1)+off2)
+ // result: (MOVHstorezero [off1+int32(off2)] {sym} x mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpPPC64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ mem := v_1
+ if !(is16Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpPPC64MOVHstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(x, mem)
+ return true
+ }
+ // match: (MOVHstorezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem)
+ // cond: canMergeSym(sym1,sym2) && (x.Op != OpSB || p.Uses == 1)
+ // result: (MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ p := v_0
+ if p.Op != OpPPC64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(p.AuxInt)
+ sym2 := auxToSym(p.Aux)
+ x := p.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && (x.Op != OpSB || p.Uses == 1)) {
+ break
+ }
+ v.reset(OpPPC64MOVHstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(x, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVWBRstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWBRstore {sym} ptr (MOVWreg x) mem)
+ // result: (MOVWBRstore {sym} ptr x mem)
+ for {
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpPPC64MOVWreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpPPC64MOVWBRstore)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVWBRstore {sym} ptr (MOVWZreg x) mem)
+ // result: (MOVWBRstore {sym} ptr x mem)
+ for {
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpPPC64MOVWZreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpPPC64MOVWBRstore)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVWZload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWZload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)
+ // result: (MOVWZload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ p := v_0
+ if p.Op != OpPPC64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(p.AuxInt)
+ sym2 := auxToSym(p.Aux)
+ ptr := p.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) {
+ break
+ }
+ v.reset(OpPPC64MOVWZload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWZload [off1] {sym} (ADDconst [off2] x) mem)
+ // cond: is16Bit(int64(off1)+off2)
+ // result: (MOVWZload [off1+int32(off2)] {sym} x mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpPPC64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ mem := v_1
+ if !(is16Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpPPC64MOVWZload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(x, mem)
+ return true
+ }
+ // match: (MOVWZload [0] {sym} p:(ADD ptr idx) mem)
+ // cond: sym == nil && p.Uses == 1
+ // result: (MOVWZloadidx ptr idx mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ sym := auxToSym(v.Aux)
+ p := v_0
+ if p.Op != OpPPC64ADD {
+ break
+ }
+ idx := p.Args[1]
+ ptr := p.Args[0]
+ mem := v_1
+ if !(sym == nil && p.Uses == 1) {
+ break
+ }
+ v.reset(OpPPC64MOVWZloadidx)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVWZloadidx(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWZloadidx ptr (MOVDconst [c]) mem)
+ // cond: is16Bit(c)
+ // result: (MOVWZload [int32(c)] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(is16Bit(c)) {
+ break
+ }
+ v.reset(OpPPC64MOVWZload)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWZloadidx (MOVDconst [c]) ptr mem)
+ // cond: is16Bit(c)
+ // result: (MOVWZload [int32(c)] ptr mem)
+ for {
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ ptr := v_1
+ mem := v_2
+ if !(is16Bit(c)) {
+ break
+ }
+ v.reset(OpPPC64MOVWZload)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVWZreg(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (MOVWZreg y:(ANDconst [c] _))
+ // cond: uint64(c) <= 0xFFFFFFFF
+ // result: y
+ for {
+ y := v_0
+ if y.Op != OpPPC64ANDconst {
+ break
+ }
+ c := auxIntToInt64(y.AuxInt)
+ if !(uint64(c) <= 0xFFFFFFFF) {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (MOVWZreg y:(AND (MOVDconst [c]) _))
+ // cond: uint64(c) <= 0xFFFFFFFF
+ // result: y
+ for {
+ y := v_0
+ if y.Op != OpPPC64AND {
+ break
+ }
+ y_0 := y.Args[0]
+ y_1 := y.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
+ if y_0.Op != OpPPC64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(y_0.AuxInt)
+ if !(uint64(c) <= 0xFFFFFFFF) {
+ continue
+ }
+ v.copyOf(y)
+ return true
+ }
+ break
+ }
+ // match: (MOVWZreg (SRWconst [c] (MOVBZreg x)))
+ // result: (SRWconst [c] (MOVBZreg x))
+ for {
+ if v_0.Op != OpPPC64SRWconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64MOVBZreg {
+ break
+ }
+ x := v_0_0.Args[0]
+ v.reset(OpPPC64SRWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVBZreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MOVWZreg (SRWconst [c] (MOVHZreg x)))
+ // result: (SRWconst [c] (MOVHZreg x))
+ for {
+ if v_0.Op != OpPPC64SRWconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64MOVHZreg {
+ break
+ }
+ x := v_0_0.Args[0]
+ v.reset(OpPPC64SRWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVHZreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MOVWZreg (SRWconst [c] (MOVWZreg x)))
+ // result: (SRWconst [c] (MOVWZreg x))
+ for {
+ if v_0.Op != OpPPC64SRWconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64MOVWZreg {
+ break
+ }
+ x := v_0_0.Args[0]
+ v.reset(OpPPC64SRWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVWZreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MOVWZreg (SRWconst [c] x))
+ // cond: sizeof(x.Type) <= 32
+ // result: (SRWconst [c] x)
+ for {
+ if v_0.Op != OpPPC64SRWconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(sizeof(x.Type) <= 32) {
+ break
+ }
+ v.reset(OpPPC64SRWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWZreg (SRDconst [c] x))
+ // cond: c>=32
+ // result: (SRDconst [c] x)
+ for {
+ if v_0.Op != OpPPC64SRDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(c >= 32) {
+ break
+ }
+ v.reset(OpPPC64SRDconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWZreg y:(MOVWZreg _))
+ // result: y
+ for {
+ y := v_0
+ if y.Op != OpPPC64MOVWZreg {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (MOVWZreg y:(MOVHZreg _))
+ // result: y
+ for {
+ y := v_0
+ if y.Op != OpPPC64MOVHZreg {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (MOVWZreg y:(MOVBZreg _))
+ // result: y
+ for {
+ y := v_0
+ if y.Op != OpPPC64MOVBZreg {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (MOVWZreg y:(MOVHBRload _ _))
+ // result: y
+ for {
+ y := v_0
+ if y.Op != OpPPC64MOVHBRload {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (MOVWZreg y:(MOVWBRload _ _))
+ // result: y
+ for {
+ y := v_0
+ if y.Op != OpPPC64MOVWBRload {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (MOVWZreg y:(MOVWreg x))
+ // result: (MOVWZreg x)
+ for {
+ y := v_0
+ if y.Op != OpPPC64MOVWreg {
+ break
+ }
+ x := y.Args[0]
+ v.reset(OpPPC64MOVWZreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWZreg (OR <t> x (MOVWZreg y)))
+ // result: (MOVWZreg (OR <t> x y))
+ for {
+ if v_0.Op != OpPPC64OR {
+ break
+ }
+ t := v_0.Type
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpPPC64MOVWZreg {
+ continue
+ }
+ y := v_0_1.Args[0]
+ v.reset(OpPPC64MOVWZreg)
+ v0 := b.NewValue0(v.Pos, OpPPC64OR, t)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MOVWZreg (XOR <t> x (MOVWZreg y)))
+ // result: (MOVWZreg (XOR <t> x y))
+ for {
+ if v_0.Op != OpPPC64XOR {
+ break
+ }
+ t := v_0.Type
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpPPC64MOVWZreg {
+ continue
+ }
+ y := v_0_1.Args[0]
+ v.reset(OpPPC64MOVWZreg)
+ v0 := b.NewValue0(v.Pos, OpPPC64XOR, t)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MOVWZreg (AND <t> x (MOVWZreg y)))
+ // result: (MOVWZreg (AND <t> x y))
+ for {
+ if v_0.Op != OpPPC64AND {
+ break
+ }
+ t := v_0.Type
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpPPC64MOVWZreg {
+ continue
+ }
+ y := v_0_1.Args[0]
+ v.reset(OpPPC64MOVWZreg)
+ v0 := b.NewValue0(v.Pos, OpPPC64AND, t)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (MOVWZreg z:(ANDconst [c] (MOVBZload ptr x)))
+ // result: z
+ for {
+ z := v_0
+ if z.Op != OpPPC64ANDconst {
+ break
+ }
+ z_0 := z.Args[0]
+ if z_0.Op != OpPPC64MOVBZload {
+ break
+ }
+ v.copyOf(z)
+ return true
+ }
+ // match: (MOVWZreg z:(ANDconst [c] (MOVHZload ptr x)))
+ // result: z
+ for {
+ z := v_0
+ if z.Op != OpPPC64ANDconst {
+ break
+ }
+ z_0 := z.Args[0]
+ if z_0.Op != OpPPC64MOVHZload {
+ break
+ }
+ v.copyOf(z)
+ return true
+ }
+ // match: (MOVWZreg z:(ANDconst [c] (MOVWZload ptr x)))
+ // result: z
+ for {
+ z := v_0
+ if z.Op != OpPPC64ANDconst {
+ break
+ }
+ z_0 := z.Args[0]
+ if z_0.Op != OpPPC64MOVWZload {
+ break
+ }
+ v.copyOf(z)
+ return true
+ }
+ // match: (MOVWZreg z:(AND y (MOVWZload ptr x)))
+ // result: z
+ for {
+ z := v_0
+ if z.Op != OpPPC64AND {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ if z_1.Op != OpPPC64MOVWZload {
+ continue
+ }
+ v.copyOf(z)
+ return true
+ }
+ break
+ }
+ // match: (MOVWZreg x:(MOVBZload _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpPPC64MOVBZload {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVWZreg x:(MOVBZloadidx _ _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpPPC64MOVBZloadidx {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVWZreg x:(MOVHZload _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpPPC64MOVHZload {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVWZreg x:(MOVHZloadidx _ _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpPPC64MOVHZloadidx {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVWZreg x:(MOVWZload _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpPPC64MOVWZload {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVWZreg x:(MOVWZloadidx _ _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpPPC64MOVWZloadidx {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVWZreg x:(Select0 (LoweredAtomicLoad32 _ _)))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpSelect0 {
+ break
+ }
+ x_0 := x.Args[0]
+ if x_0.Op != OpPPC64LoweredAtomicLoad32 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVWZreg x:(Arg <t>))
+ // cond: (is8BitInt(t) || is16BitInt(t) || is32BitInt(t)) && !isSigned(t)
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpArg {
+ break
+ }
+ t := x.Type
+ if !((is8BitInt(t) || is16BitInt(t) || is32BitInt(t)) && !isSigned(t)) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVWZreg (MOVDconst [c]))
+ // result: (MOVDconst [int64(uint32(c))])
+ for {
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(uint32(c)))
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVWload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWload [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)
+ // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ p := v_0
+ if p.Op != OpPPC64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(p.AuxInt)
+ sym2 := auxToSym(p.Aux)
+ ptr := p.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) {
+ break
+ }
+ v.reset(OpPPC64MOVWload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWload [off1] {sym} (ADDconst [off2] x) mem)
+ // cond: is16Bit(int64(off1)+off2)
+ // result: (MOVWload [off1+int32(off2)] {sym} x mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpPPC64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ mem := v_1
+ if !(is16Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpPPC64MOVWload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(x, mem)
+ return true
+ }
+ // match: (MOVWload [0] {sym} p:(ADD ptr idx) mem)
+ // cond: sym == nil && p.Uses == 1
+ // result: (MOVWloadidx ptr idx mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ sym := auxToSym(v.Aux)
+ p := v_0
+ if p.Op != OpPPC64ADD {
+ break
+ }
+ idx := p.Args[1]
+ ptr := p.Args[0]
+ mem := v_1
+ if !(sym == nil && p.Uses == 1) {
+ break
+ }
+ v.reset(OpPPC64MOVWloadidx)
+ v.AddArg3(ptr, idx, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVWloadidx(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWloadidx ptr (MOVDconst [c]) mem)
+ // cond: is16Bit(c) && c%4 == 0
+ // result: (MOVWload [int32(c)] ptr mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(is16Bit(c) && c%4 == 0) {
+ break
+ }
+ v.reset(OpPPC64MOVWload)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWloadidx (MOVDconst [c]) ptr mem)
+ // cond: is16Bit(c) && c%4 == 0
+ // result: (MOVWload [int32(c)] ptr mem)
+ for {
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ ptr := v_1
+ mem := v_2
+ if !(is16Bit(c) && c%4 == 0) {
+ break
+ }
+ v.reset(OpPPC64MOVWload)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVWreg(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (MOVWreg y:(ANDconst [c] _))
+ // cond: uint64(c) <= 0xFFFF
+ // result: y
+ for {
+ y := v_0
+ if y.Op != OpPPC64ANDconst {
+ break
+ }
+ c := auxIntToInt64(y.AuxInt)
+ if !(uint64(c) <= 0xFFFF) {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (MOVWreg y:(AND (MOVDconst [c]) _))
+ // cond: uint64(c) <= 0x7FFFFFFF
+ // result: y
+ for {
+ y := v_0
+ if y.Op != OpPPC64AND {
+ break
+ }
+ y_0 := y.Args[0]
+ y_1 := y.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, y_0, y_1 = _i0+1, y_1, y_0 {
+ if y_0.Op != OpPPC64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(y_0.AuxInt)
+ if !(uint64(c) <= 0x7FFFFFFF) {
+ continue
+ }
+ v.copyOf(y)
+ return true
+ }
+ break
+ }
+ // match: (MOVWreg (SRAWconst [c] (MOVBreg x)))
+ // result: (SRAWconst [c] (MOVBreg x))
+ for {
+ if v_0.Op != OpPPC64SRAWconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64MOVBreg {
+ break
+ }
+ x := v_0_0.Args[0]
+ v.reset(OpPPC64SRAWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVBreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MOVWreg (SRAWconst [c] (MOVHreg x)))
+ // result: (SRAWconst [c] (MOVHreg x))
+ for {
+ if v_0.Op != OpPPC64SRAWconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64MOVHreg {
+ break
+ }
+ x := v_0_0.Args[0]
+ v.reset(OpPPC64SRAWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVHreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MOVWreg (SRAWconst [c] (MOVWreg x)))
+ // result: (SRAWconst [c] (MOVWreg x))
+ for {
+ if v_0.Op != OpPPC64SRAWconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64MOVWreg {
+ break
+ }
+ x := v_0_0.Args[0]
+ v.reset(OpPPC64SRAWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVWreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (MOVWreg (SRAWconst [c] x))
+ // cond: sizeof(x.Type) <= 32
+ // result: (SRAWconst [c] x)
+ for {
+ if v_0.Op != OpPPC64SRAWconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(sizeof(x.Type) <= 32) {
+ break
+ }
+ v.reset(OpPPC64SRAWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg (SRDconst [c] x))
+ // cond: c>32
+ // result: (SRDconst [c] x)
+ for {
+ if v_0.Op != OpPPC64SRDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(c > 32) {
+ break
+ }
+ v.reset(OpPPC64SRDconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg (SRADconst [c] x))
+ // cond: c>=32
+ // result: (SRADconst [c] x)
+ for {
+ if v_0.Op != OpPPC64SRADconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(c >= 32) {
+ break
+ }
+ v.reset(OpPPC64SRADconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg (SRDconst [c] x))
+ // cond: c==32
+ // result: (SRADconst [c] x)
+ for {
+ if v_0.Op != OpPPC64SRDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(c == 32) {
+ break
+ }
+ v.reset(OpPPC64SRADconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg y:(MOVWreg _))
+ // result: y
+ for {
+ y := v_0
+ if y.Op != OpPPC64MOVWreg {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (MOVWreg y:(MOVHreg _))
+ // result: y
+ for {
+ y := v_0
+ if y.Op != OpPPC64MOVHreg {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (MOVWreg y:(MOVBreg _))
+ // result: y
+ for {
+ y := v_0
+ if y.Op != OpPPC64MOVBreg {
+ break
+ }
+ v.copyOf(y)
+ return true
+ }
+ // match: (MOVWreg y:(MOVWZreg x))
+ // result: (MOVWreg x)
+ for {
+ y := v_0
+ if y.Op != OpPPC64MOVWZreg {
+ break
+ }
+ x := y.Args[0]
+ v.reset(OpPPC64MOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVHload _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpPPC64MOVHload {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVHloadidx _ _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpPPC64MOVHloadidx {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVWload _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpPPC64MOVWload {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVWloadidx _ _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpPPC64MOVWloadidx {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVWreg x:(Arg <t>))
+ // cond: (is8BitInt(t) || is16BitInt(t) || is32BitInt(t)) && isSigned(t)
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpArg {
+ break
+ }
+ t := x.Type
+ if !((is8BitInt(t) || is16BitInt(t) || is32BitInt(t)) && isSigned(t)) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVWreg (MOVDconst [c]))
+ // result: (MOVDconst [int64(int32(c))])
+ for {
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(int32(c)))
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVWstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWstore [off1] {sym} (ADDconst [off2] x) val mem)
+ // cond: is16Bit(int64(off1)+off2)
+ // result: (MOVWstore [off1+int32(off2)] {sym} x val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpPPC64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is16Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpPPC64MOVWstore)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, val, mem)
+ return true
+ }
+ // match: (MOVWstore [off1] {sym1} p:(MOVDaddr [off2] {sym2} ptr) val mem)
+ // cond: canMergeSym(sym1,sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)
+ // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ p := v_0
+ if p.Op != OpPPC64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(p.AuxInt)
+ sym2 := auxToSym(p.Aux)
+ ptr := p.Args[0]
+ val := v_1
+ mem := v_2
+ if !(canMergeSym(sym1, sym2) && is16Bit(int64(off1+off2)) && (ptr.Op != OpSB || p.Uses == 1)) {
+ break
+ }
+ v.reset(OpPPC64MOVWstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVWstore [off] {sym} ptr (MOVDconst [0]) mem)
+ // result: (MOVWstorezero [off] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpPPC64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ mem := v_2
+ v.reset(OpPPC64MOVWstorezero)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWstore [0] {sym} p:(ADD ptr idx) val mem)
+ // cond: sym == nil && p.Uses == 1
+ // result: (MOVWstoreidx ptr idx val mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ sym := auxToSym(v.Aux)
+ p := v_0
+ if p.Op != OpPPC64ADD {
+ break
+ }
+ idx := p.Args[1]
+ ptr := p.Args[0]
+ val := v_1
+ mem := v_2
+ if !(sym == nil && p.Uses == 1) {
+ break
+ }
+ v.reset(OpPPC64MOVWstoreidx)
+ v.AddArg4(ptr, idx, val, mem)
+ return true
+ }
+ // match: (MOVWstore [off] {sym} ptr (MOVWreg x) mem)
+ // result: (MOVWstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpPPC64MOVWreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpPPC64MOVWstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVWstore [off] {sym} ptr (MOVWZreg x) mem)
+ // result: (MOVWstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpPPC64MOVWZreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpPPC64MOVWstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVWstoreidx(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWstoreidx ptr (MOVDconst [c]) val mem)
+ // cond: is16Bit(c)
+ // result: (MOVWstore [int32(c)] ptr val mem)
+ for {
+ ptr := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ val := v_2
+ mem := v_3
+ if !(is16Bit(c)) {
+ break
+ }
+ v.reset(OpPPC64MOVWstore)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVWstoreidx (MOVDconst [c]) ptr val mem)
+ // cond: is16Bit(c)
+ // result: (MOVWstore [int32(c)] ptr val mem)
+ for {
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ ptr := v_1
+ val := v_2
+ mem := v_3
+ if !(is16Bit(c)) {
+ break
+ }
+ v.reset(OpPPC64MOVWstore)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVWstoreidx ptr idx (MOVWreg x) mem)
+ // result: (MOVWstoreidx ptr idx x mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpPPC64MOVWreg {
+ break
+ }
+ x := v_2.Args[0]
+ mem := v_3
+ v.reset(OpPPC64MOVWstoreidx)
+ v.AddArg4(ptr, idx, x, mem)
+ return true
+ }
+ // match: (MOVWstoreidx ptr idx (MOVWZreg x) mem)
+ // result: (MOVWstoreidx ptr idx x mem)
+ for {
+ ptr := v_0
+ idx := v_1
+ if v_2.Op != OpPPC64MOVWZreg {
+ break
+ }
+ x := v_2.Args[0]
+ mem := v_3
+ v.reset(OpPPC64MOVWstoreidx)
+ v.AddArg4(ptr, idx, x, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MOVWstorezero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWstorezero [off1] {sym} (ADDconst [off2] x) mem)
+ // cond: is16Bit(int64(off1)+off2)
+ // result: (MOVWstorezero [off1+int32(off2)] {sym} x mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpPPC64ADDconst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ mem := v_1
+ if !(is16Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpPPC64MOVWstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(x, mem)
+ return true
+ }
+ // match: (MOVWstorezero [off1] {sym1} p:(MOVDaddr [off2] {sym2} x) mem)
+ // cond: canMergeSym(sym1,sym2) && (x.Op != OpSB || p.Uses == 1)
+ // result: (MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} x mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ p := v_0
+ if p.Op != OpPPC64MOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(p.AuxInt)
+ sym2 := auxToSym(p.Aux)
+ x := p.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && (x.Op != OpSB || p.Uses == 1)) {
+ break
+ }
+ v.reset(OpPPC64MOVWstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(x, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MTVSRD(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (MTVSRD (MOVDconst [c]))
+ // cond: !math.IsNaN(math.Float64frombits(uint64(c)))
+ // result: (FMOVDconst [math.Float64frombits(uint64(c))])
+ for {
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if !(!math.IsNaN(math.Float64frombits(uint64(c)))) {
+ break
+ }
+ v.reset(OpPPC64FMOVDconst)
+ v.AuxInt = float64ToAuxInt(math.Float64frombits(uint64(c)))
+ return true
+ }
+ // match: (MTVSRD x:(MOVDload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (FMOVDload [off] {sym} ptr mem)
+ for {
+ x := v_0
+ if x.Op != OpPPC64MOVDload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpPPC64FMOVDload, typ.Float64)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MULLD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MULLD x (MOVDconst [c]))
+ // cond: is16Bit(c)
+ // result: (MULLDconst [int32(c)] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(is16Bit(c)) {
+ continue
+ }
+ v.reset(OpPPC64MULLDconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64MULLW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MULLW x (MOVDconst [c]))
+ // cond: is16Bit(c)
+ // result: (MULLWconst [int32(c)] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(is16Bit(c)) {
+ continue
+ }
+ v.reset(OpPPC64MULLWconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64NEG(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (NEG (ADDconst [c] x))
+ // cond: is32Bit(-c)
+ // result: (SUBFCconst [-c] x)
+ for {
+ if v_0.Op != OpPPC64ADDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(is32Bit(-c)) {
+ break
+ }
+ v.reset(OpPPC64SUBFCconst)
+ v.AuxInt = int64ToAuxInt(-c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (NEG (SUBFCconst [c] x))
+ // cond: is32Bit(-c)
+ // result: (ADDconst [-c] x)
+ for {
+ if v_0.Op != OpPPC64SUBFCconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(is32Bit(-c)) {
+ break
+ }
+ v.reset(OpPPC64ADDconst)
+ v.AuxInt = int64ToAuxInt(-c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (NEG (SUB x y))
+ // result: (SUB y x)
+ for {
+ if v_0.Op != OpPPC64SUB {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpPPC64SUB)
+ v.AddArg2(y, x)
+ return true
+ }
+ // match: (NEG (NEG x))
+ // result: x
+ for {
+ if v_0.Op != OpPPC64NEG {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64NOR(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (NOR (MOVDconst [c]) (MOVDconst [d]))
+ // result: (MOVDconst [^(c|d)])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpPPC64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpPPC64MOVDconst {
+ continue
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(^(c | d))
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64NotEqual(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (NotEqual (FlagEQ))
+ // result: (MOVDconst [0])
+ for {
+ if v_0.Op != OpPPC64FlagEQ {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (NotEqual (FlagLT))
+ // result: (MOVDconst [1])
+ for {
+ if v_0.Op != OpPPC64FlagLT {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ // match: (NotEqual (FlagGT))
+ // result: (MOVDconst [1])
+ for {
+ if v_0.Op != OpPPC64FlagGT {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ // match: (NotEqual (InvertFlags x))
+ // result: (NotEqual x)
+ for {
+ if v_0.Op != OpPPC64InvertFlags {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpPPC64NotEqual)
+ v.AddArg(x)
+ return true
+ }
+ // match: (NotEqual cmp)
+ // result: (ISELB [6] (MOVDconst [1]) cmp)
+ for {
+ cmp := v_0
+ v.reset(OpPPC64ISELB)
+ v.AuxInt = int32ToAuxInt(6)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v.AddArg2(v0, cmp)
+ return true
+ }
+}
+func rewriteValuePPC64_OpPPC64OR(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: ( OR (SLDconst x [c]) (SRDconst x [d]))
+ // cond: d == 64-c
+ // result: (ROTLconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpPPC64SLDconst {
+ continue
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if v_1.Op != OpPPC64SRDconst {
+ continue
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if x != v_1.Args[0] || !(d == 64-c) {
+ continue
+ }
+ v.reset(OpPPC64ROTLconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: ( OR (SLWconst x [c]) (SRWconst x [d]))
+ // cond: d == 32-c
+ // result: (ROTLWconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpPPC64SLWconst {
+ continue
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if v_1.Op != OpPPC64SRWconst {
+ continue
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if x != v_1.Args[0] || !(d == 32-c) {
+ continue
+ }
+ v.reset(OpPPC64ROTLWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: ( OR (SLD x (ANDconst <typ.Int64> [63] y)) (SRD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y))))
+ // result: (ROTL x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpPPC64SLD {
+ continue
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpPPC64ANDconst || v_0_1.Type != typ.Int64 || auxIntToInt64(v_0_1.AuxInt) != 63 {
+ continue
+ }
+ y := v_0_1.Args[0]
+ if v_1.Op != OpPPC64SRD {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpPPC64SUB || v_1_1.Type != typ.UInt {
+ continue
+ }
+ _ = v_1_1.Args[1]
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_1_0.AuxInt) != 64 {
+ continue
+ }
+ v_1_1_1 := v_1_1.Args[1]
+ if v_1_1_1.Op != OpPPC64ANDconst || v_1_1_1.Type != typ.UInt || auxIntToInt64(v_1_1_1.AuxInt) != 63 || y != v_1_1_1.Args[0] {
+ continue
+ }
+ v.reset(OpPPC64ROTL)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: ( OR (SLD x (ANDconst <typ.Int64> [63] y)) (SRD x (SUBFCconst <typ.UInt> [64] (ANDconst <typ.UInt> [63] y))))
+ // result: (ROTL x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpPPC64SLD {
+ continue
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpPPC64ANDconst || v_0_1.Type != typ.Int64 || auxIntToInt64(v_0_1.AuxInt) != 63 {
+ continue
+ }
+ y := v_0_1.Args[0]
+ if v_1.Op != OpPPC64SRD {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpPPC64SUBFCconst || v_1_1.Type != typ.UInt || auxIntToInt64(v_1_1.AuxInt) != 64 {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpPPC64ANDconst || v_1_1_0.Type != typ.UInt || auxIntToInt64(v_1_1_0.AuxInt) != 63 || y != v_1_1_0.Args[0] {
+ continue
+ }
+ v.reset(OpPPC64ROTL)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: ( OR (SLW x (ANDconst <typ.Int32> [31] y)) (SRW x (SUBFCconst <typ.UInt> [32] (ANDconst <typ.UInt> [31] y))))
+ // result: (ROTLW x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpPPC64SLW {
+ continue
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpPPC64ANDconst || v_0_1.Type != typ.Int32 || auxIntToInt64(v_0_1.AuxInt) != 31 {
+ continue
+ }
+ y := v_0_1.Args[0]
+ if v_1.Op != OpPPC64SRW {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpPPC64SUBFCconst || v_1_1.Type != typ.UInt || auxIntToInt64(v_1_1.AuxInt) != 32 {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpPPC64ANDconst || v_1_1_0.Type != typ.UInt || auxIntToInt64(v_1_1_0.AuxInt) != 31 || y != v_1_1_0.Args[0] {
+ continue
+ }
+ v.reset(OpPPC64ROTLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: ( OR (SLW x (ANDconst <typ.Int32> [31] y)) (SRW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y))))
+ // result: (ROTLW x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpPPC64SLW {
+ continue
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpPPC64ANDconst || v_0_1.Type != typ.Int32 || auxIntToInt64(v_0_1.AuxInt) != 31 {
+ continue
+ }
+ y := v_0_1.Args[0]
+ if v_1.Op != OpPPC64SRW {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpPPC64SUB || v_1_1.Type != typ.UInt {
+ continue
+ }
+ _ = v_1_1.Args[1]
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_1_0.AuxInt) != 32 {
+ continue
+ }
+ v_1_1_1 := v_1_1.Args[1]
+ if v_1_1_1.Op != OpPPC64ANDconst || v_1_1_1.Type != typ.UInt || auxIntToInt64(v_1_1_1.AuxInt) != 31 || y != v_1_1_1.Args[0] {
+ continue
+ }
+ v.reset(OpPPC64ROTLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (OR (MOVDconst [c]) (MOVDconst [d]))
+ // result: (MOVDconst [c|d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpPPC64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpPPC64MOVDconst {
+ continue
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(c | d)
+ return true
+ }
+ break
+ }
+ // match: (OR x (MOVDconst [c]))
+ // cond: isU32Bit(c)
+ // result: (ORconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(isU32Bit(c)) {
+ continue
+ }
+ v.reset(OpPPC64ORconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (OR <t> x0:(MOVBZload [i0] {s} p mem) o1:(SLWconst x1:(MOVBZload [i1] {s} p mem) [8]))
+ // cond: !config.BigEndian && i1 == i0+1 && x0.Uses ==1 && x1.Uses == 1 && o1.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, o1)
+ // result: @mergePoint(b,x0,x1) (MOVHZload <t> {s} [i0] p mem)
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ if x0.Op != OpPPC64MOVBZload {
+ continue
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p := x0.Args[0]
+ o1 := v_1
+ if o1.Op != OpPPC64SLWconst || auxIntToInt64(o1.AuxInt) != 8 {
+ continue
+ }
+ x1 := o1.Args[0]
+ if x1.Op != OpPPC64MOVBZload {
+ continue
+ }
+ i1 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
+ continue
+ }
+ _ = x1.Args[1]
+ if p != x1.Args[0] || mem != x1.Args[1] || !(!config.BigEndian && i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && o1.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, o1)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(x1.Pos, OpPPC64MOVHZload, t)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(i0)
+ v0.Aux = symToAux(s)
+ v0.AddArg2(p, mem)
+ return true
+ }
+ break
+ }
+ // match: (OR <t> x0:(MOVBZload [i0] {s} p mem) o1:(SLDconst x1:(MOVBZload [i1] {s} p mem) [8]))
+ // cond: !config.BigEndian && i1 == i0+1 && x0.Uses ==1 && x1.Uses == 1 && o1.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, o1)
+ // result: @mergePoint(b,x0,x1) (MOVHZload <t> {s} [i0] p mem)
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ if x0.Op != OpPPC64MOVBZload {
+ continue
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p := x0.Args[0]
+ o1 := v_1
+ if o1.Op != OpPPC64SLDconst || auxIntToInt64(o1.AuxInt) != 8 {
+ continue
+ }
+ x1 := o1.Args[0]
+ if x1.Op != OpPPC64MOVBZload {
+ continue
+ }
+ i1 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
+ continue
+ }
+ _ = x1.Args[1]
+ if p != x1.Args[0] || mem != x1.Args[1] || !(!config.BigEndian && i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && o1.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, o1)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(x1.Pos, OpPPC64MOVHZload, t)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(i0)
+ v0.Aux = symToAux(s)
+ v0.AddArg2(p, mem)
+ return true
+ }
+ break
+ }
+ // match: (OR <t> x0:(MOVBZload [i1] {s} p mem) o1:(SLWconst x1:(MOVBZload [i0] {s} p mem) [8]))
+ // cond: !config.BigEndian && i1 == i0+1 && x0.Uses ==1 && x1.Uses == 1 && o1.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, o1)
+ // result: @mergePoint(b,x0,x1) (MOVHBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem)
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ if x0.Op != OpPPC64MOVBZload {
+ continue
+ }
+ i1 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p := x0.Args[0]
+ o1 := v_1
+ if o1.Op != OpPPC64SLWconst || auxIntToInt64(o1.AuxInt) != 8 {
+ continue
+ }
+ x1 := o1.Args[0]
+ if x1.Op != OpPPC64MOVBZload {
+ continue
+ }
+ i0 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
+ continue
+ }
+ _ = x1.Args[1]
+ if p != x1.Args[0] || mem != x1.Args[1] || !(!config.BigEndian && i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && o1.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, o1)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(x1.Pos, OpPPC64MOVHBRload, t)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x1.Pos, OpPPC64MOVDaddr, typ.Uintptr)
+ v1.AuxInt = int32ToAuxInt(i0)
+ v1.Aux = symToAux(s)
+ v1.AddArg(p)
+ v0.AddArg2(v1, mem)
+ return true
+ }
+ break
+ }
+ // match: (OR <t> x0:(MOVBZload [i1] {s} p mem) o1:(SLDconst x1:(MOVBZload [i0] {s} p mem) [8]))
+ // cond: !config.BigEndian && i1 == i0+1 && x0.Uses ==1 && x1.Uses == 1 && o1.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, o1)
+ // result: @mergePoint(b,x0,x1) (MOVHBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem)
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ if x0.Op != OpPPC64MOVBZload {
+ continue
+ }
+ i1 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p := x0.Args[0]
+ o1 := v_1
+ if o1.Op != OpPPC64SLDconst || auxIntToInt64(o1.AuxInt) != 8 {
+ continue
+ }
+ x1 := o1.Args[0]
+ if x1.Op != OpPPC64MOVBZload {
+ continue
+ }
+ i0 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
+ continue
+ }
+ _ = x1.Args[1]
+ if p != x1.Args[0] || mem != x1.Args[1] || !(!config.BigEndian && i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && o1.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, o1)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(x1.Pos, OpPPC64MOVHBRload, t)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x1.Pos, OpPPC64MOVDaddr, typ.Uintptr)
+ v1.AuxInt = int32ToAuxInt(i0)
+ v1.Aux = symToAux(s)
+ v1.AddArg(p)
+ v0.AddArg2(v1, mem)
+ return true
+ }
+ break
+ }
+ // match: (OR <t> s0:(SLWconst x0:(MOVBZload [i1] {s} p mem) [n1]) s1:(SLWconst x1:(MOVBZload [i0] {s} p mem) [n2]))
+ // cond: !config.BigEndian && i1 == i0+1 && n1%8 == 0 && n2 == n1+8 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, s0, s1)
+ // result: @mergePoint(b,x0,x1) (SLDconst <t> (MOVHBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem) [n1])
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ s0 := v_0
+ if s0.Op != OpPPC64SLWconst {
+ continue
+ }
+ n1 := auxIntToInt64(s0.AuxInt)
+ x0 := s0.Args[0]
+ if x0.Op != OpPPC64MOVBZload {
+ continue
+ }
+ i1 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p := x0.Args[0]
+ s1 := v_1
+ if s1.Op != OpPPC64SLWconst {
+ continue
+ }
+ n2 := auxIntToInt64(s1.AuxInt)
+ x1 := s1.Args[0]
+ if x1.Op != OpPPC64MOVBZload {
+ continue
+ }
+ i0 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
+ continue
+ }
+ _ = x1.Args[1]
+ if p != x1.Args[0] || mem != x1.Args[1] || !(!config.BigEndian && i1 == i0+1 && n1%8 == 0 && n2 == n1+8 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, s0, s1)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(x1.Pos, OpPPC64SLDconst, t)
+ v.copyOf(v0)
+ v0.AuxInt = int64ToAuxInt(n1)
+ v1 := b.NewValue0(x1.Pos, OpPPC64MOVHBRload, t)
+ v2 := b.NewValue0(x1.Pos, OpPPC64MOVDaddr, typ.Uintptr)
+ v2.AuxInt = int32ToAuxInt(i0)
+ v2.Aux = symToAux(s)
+ v2.AddArg(p)
+ v1.AddArg2(v2, mem)
+ v0.AddArg(v1)
+ return true
+ }
+ break
+ }
+ // match: (OR <t> s0:(SLDconst x0:(MOVBZload [i1] {s} p mem) [n1]) s1:(SLDconst x1:(MOVBZload [i0] {s} p mem) [n2]))
+ // cond: !config.BigEndian && i1 == i0+1 && n1%8 == 0 && n2 == n1+8 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, s0, s1)
+ // result: @mergePoint(b,x0,x1) (SLDconst <t> (MOVHBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem) [n1])
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ s0 := v_0
+ if s0.Op != OpPPC64SLDconst {
+ continue
+ }
+ n1 := auxIntToInt64(s0.AuxInt)
+ x0 := s0.Args[0]
+ if x0.Op != OpPPC64MOVBZload {
+ continue
+ }
+ i1 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p := x0.Args[0]
+ s1 := v_1
+ if s1.Op != OpPPC64SLDconst {
+ continue
+ }
+ n2 := auxIntToInt64(s1.AuxInt)
+ x1 := s1.Args[0]
+ if x1.Op != OpPPC64MOVBZload {
+ continue
+ }
+ i0 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
+ continue
+ }
+ _ = x1.Args[1]
+ if p != x1.Args[0] || mem != x1.Args[1] || !(!config.BigEndian && i1 == i0+1 && n1%8 == 0 && n2 == n1+8 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, s0, s1)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(x1.Pos, OpPPC64SLDconst, t)
+ v.copyOf(v0)
+ v0.AuxInt = int64ToAuxInt(n1)
+ v1 := b.NewValue0(x1.Pos, OpPPC64MOVHBRload, t)
+ v2 := b.NewValue0(x1.Pos, OpPPC64MOVDaddr, typ.Uintptr)
+ v2.AuxInt = int32ToAuxInt(i0)
+ v2.Aux = symToAux(s)
+ v2.AddArg(p)
+ v1.AddArg2(v2, mem)
+ v0.AddArg(v1)
+ return true
+ }
+ break
+ }
+ // match: (OR <t> s1:(SLWconst x2:(MOVBZload [i3] {s} p mem) [24]) o0:(OR <t> s0:(SLWconst x1:(MOVBZload [i2] {s} p mem) [16]) x0:(MOVHZload [i0] {s} p mem)))
+ // cond: !config.BigEndian && i2 == i0+2 && i3 == i0+3 && x0.Uses ==1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0, x1, x2, s0, s1, o0)
+ // result: @mergePoint(b,x0,x1,x2) (MOVWZload <t> {s} [i0] p mem)
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ s1 := v_0
+ if s1.Op != OpPPC64SLWconst || auxIntToInt64(s1.AuxInt) != 24 {
+ continue
+ }
+ x2 := s1.Args[0]
+ if x2.Op != OpPPC64MOVBZload {
+ continue
+ }
+ i3 := auxIntToInt32(x2.AuxInt)
+ s := auxToSym(x2.Aux)
+ mem := x2.Args[1]
+ p := x2.Args[0]
+ o0 := v_1
+ if o0.Op != OpPPC64OR || o0.Type != t {
+ continue
+ }
+ _ = o0.Args[1]
+ o0_0 := o0.Args[0]
+ o0_1 := o0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, o0_0, o0_1 = _i1+1, o0_1, o0_0 {
+ s0 := o0_0
+ if s0.Op != OpPPC64SLWconst || auxIntToInt64(s0.AuxInt) != 16 {
+ continue
+ }
+ x1 := s0.Args[0]
+ if x1.Op != OpPPC64MOVBZload {
+ continue
+ }
+ i2 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
+ continue
+ }
+ _ = x1.Args[1]
+ if p != x1.Args[0] || mem != x1.Args[1] {
+ continue
+ }
+ x0 := o0_1
+ if x0.Op != OpPPC64MOVHZload {
+ continue
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ if auxToSym(x0.Aux) != s {
+ continue
+ }
+ _ = x0.Args[1]
+ if p != x0.Args[0] || mem != x0.Args[1] || !(!config.BigEndian && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0, x1, x2, s0, s1, o0)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1, x2)
+ v0 := b.NewValue0(x0.Pos, OpPPC64MOVWZload, t)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(i0)
+ v0.Aux = symToAux(s)
+ v0.AddArg2(p, mem)
+ return true
+ }
+ }
+ break
+ }
+ // match: (OR <t> s1:(SLDconst x2:(MOVBZload [i3] {s} p mem) [24]) o0:(OR <t> s0:(SLDconst x1:(MOVBZload [i2] {s} p mem) [16]) x0:(MOVHZload [i0] {s} p mem)))
+ // cond: !config.BigEndian && i2 == i0+2 && i3 == i0+3 && x0.Uses ==1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0, x1, x2, s0, s1, o0)
+ // result: @mergePoint(b,x0,x1,x2) (MOVWZload <t> {s} [i0] p mem)
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ s1 := v_0
+ if s1.Op != OpPPC64SLDconst || auxIntToInt64(s1.AuxInt) != 24 {
+ continue
+ }
+ x2 := s1.Args[0]
+ if x2.Op != OpPPC64MOVBZload {
+ continue
+ }
+ i3 := auxIntToInt32(x2.AuxInt)
+ s := auxToSym(x2.Aux)
+ mem := x2.Args[1]
+ p := x2.Args[0]
+ o0 := v_1
+ if o0.Op != OpPPC64OR || o0.Type != t {
+ continue
+ }
+ _ = o0.Args[1]
+ o0_0 := o0.Args[0]
+ o0_1 := o0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, o0_0, o0_1 = _i1+1, o0_1, o0_0 {
+ s0 := o0_0
+ if s0.Op != OpPPC64SLDconst || auxIntToInt64(s0.AuxInt) != 16 {
+ continue
+ }
+ x1 := s0.Args[0]
+ if x1.Op != OpPPC64MOVBZload {
+ continue
+ }
+ i2 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
+ continue
+ }
+ _ = x1.Args[1]
+ if p != x1.Args[0] || mem != x1.Args[1] {
+ continue
+ }
+ x0 := o0_1
+ if x0.Op != OpPPC64MOVHZload {
+ continue
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ if auxToSym(x0.Aux) != s {
+ continue
+ }
+ _ = x0.Args[1]
+ if p != x0.Args[0] || mem != x0.Args[1] || !(!config.BigEndian && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0, x1, x2, s0, s1, o0)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1, x2)
+ v0 := b.NewValue0(x0.Pos, OpPPC64MOVWZload, t)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(i0)
+ v0.Aux = symToAux(s)
+ v0.AddArg2(p, mem)
+ return true
+ }
+ }
+ break
+ }
+ // match: (OR <t> s1:(SLWconst x2:(MOVBZload [i0] {s} p mem) [24]) o0:(OR <t> s0:(SLWconst x1:(MOVBZload [i1] {s} p mem) [16]) x0:(MOVHBRload <t> (MOVDaddr <typ.Uintptr> [i2] {s} p) mem)))
+ // cond: !config.BigEndian && i1 == i0+1 && i2 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0, x1, x2, s0, s1, o0)
+ // result: @mergePoint(b,x0,x1,x2) (MOVWBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem)
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ s1 := v_0
+ if s1.Op != OpPPC64SLWconst || auxIntToInt64(s1.AuxInt) != 24 {
+ continue
+ }
+ x2 := s1.Args[0]
+ if x2.Op != OpPPC64MOVBZload {
+ continue
+ }
+ i0 := auxIntToInt32(x2.AuxInt)
+ s := auxToSym(x2.Aux)
+ mem := x2.Args[1]
+ p := x2.Args[0]
+ o0 := v_1
+ if o0.Op != OpPPC64OR || o0.Type != t {
+ continue
+ }
+ _ = o0.Args[1]
+ o0_0 := o0.Args[0]
+ o0_1 := o0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, o0_0, o0_1 = _i1+1, o0_1, o0_0 {
+ s0 := o0_0
+ if s0.Op != OpPPC64SLWconst || auxIntToInt64(s0.AuxInt) != 16 {
+ continue
+ }
+ x1 := s0.Args[0]
+ if x1.Op != OpPPC64MOVBZload {
+ continue
+ }
+ i1 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
+ continue
+ }
+ _ = x1.Args[1]
+ if p != x1.Args[0] || mem != x1.Args[1] {
+ continue
+ }
+ x0 := o0_1
+ if x0.Op != OpPPC64MOVHBRload || x0.Type != t {
+ continue
+ }
+ _ = x0.Args[1]
+ x0_0 := x0.Args[0]
+ if x0_0.Op != OpPPC64MOVDaddr || x0_0.Type != typ.Uintptr {
+ continue
+ }
+ i2 := auxIntToInt32(x0_0.AuxInt)
+ if auxToSym(x0_0.Aux) != s || p != x0_0.Args[0] || mem != x0.Args[1] || !(!config.BigEndian && i1 == i0+1 && i2 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0, x1, x2, s0, s1, o0)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1, x2)
+ v0 := b.NewValue0(x0.Pos, OpPPC64MOVWBRload, t)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x0.Pos, OpPPC64MOVDaddr, typ.Uintptr)
+ v1.AuxInt = int32ToAuxInt(i0)
+ v1.Aux = symToAux(s)
+ v1.AddArg(p)
+ v0.AddArg2(v1, mem)
+ return true
+ }
+ }
+ break
+ }
+ // match: (OR <t> s1:(SLDconst x2:(MOVBZload [i0] {s} p mem) [24]) o0:(OR <t> s0:(SLDconst x1:(MOVBZload [i1] {s} p mem) [16]) x0:(MOVHBRload <t> (MOVDaddr <typ.Uintptr> [i2] {s} p) mem)))
+ // cond: !config.BigEndian && i1 == i0+1 && i2 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0, x1, x2, s0, s1, o0)
+ // result: @mergePoint(b,x0,x1,x2) (MOVWBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem)
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ s1 := v_0
+ if s1.Op != OpPPC64SLDconst || auxIntToInt64(s1.AuxInt) != 24 {
+ continue
+ }
+ x2 := s1.Args[0]
+ if x2.Op != OpPPC64MOVBZload {
+ continue
+ }
+ i0 := auxIntToInt32(x2.AuxInt)
+ s := auxToSym(x2.Aux)
+ mem := x2.Args[1]
+ p := x2.Args[0]
+ o0 := v_1
+ if o0.Op != OpPPC64OR || o0.Type != t {
+ continue
+ }
+ _ = o0.Args[1]
+ o0_0 := o0.Args[0]
+ o0_1 := o0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, o0_0, o0_1 = _i1+1, o0_1, o0_0 {
+ s0 := o0_0
+ if s0.Op != OpPPC64SLDconst || auxIntToInt64(s0.AuxInt) != 16 {
+ continue
+ }
+ x1 := s0.Args[0]
+ if x1.Op != OpPPC64MOVBZload {
+ continue
+ }
+ i1 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
+ continue
+ }
+ _ = x1.Args[1]
+ if p != x1.Args[0] || mem != x1.Args[1] {
+ continue
+ }
+ x0 := o0_1
+ if x0.Op != OpPPC64MOVHBRload || x0.Type != t {
+ continue
+ }
+ _ = x0.Args[1]
+ x0_0 := x0.Args[0]
+ if x0_0.Op != OpPPC64MOVDaddr || x0_0.Type != typ.Uintptr {
+ continue
+ }
+ i2 := auxIntToInt32(x0_0.AuxInt)
+ if auxToSym(x0_0.Aux) != s || p != x0_0.Args[0] || mem != x0.Args[1] || !(!config.BigEndian && i1 == i0+1 && i2 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0, x1, x2, s0, s1, o0)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1, x2)
+ v0 := b.NewValue0(x0.Pos, OpPPC64MOVWBRload, t)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x0.Pos, OpPPC64MOVDaddr, typ.Uintptr)
+ v1.AuxInt = int32ToAuxInt(i0)
+ v1.Aux = symToAux(s)
+ v1.AddArg(p)
+ v0.AddArg2(v1, mem)
+ return true
+ }
+ }
+ break
+ }
+ // match: (OR <t> x0:(MOVBZload [i3] {s} p mem) o0:(OR <t> s0:(SLWconst x1:(MOVBZload [i2] {s} p mem) [8]) s1:(SLWconst x2:(MOVHBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem) [16])))
+ // cond: !config.BigEndian && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0, x1, x2, s0, s1, o0)
+ // result: @mergePoint(b,x0,x1,x2) (MOVWBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem)
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ if x0.Op != OpPPC64MOVBZload {
+ continue
+ }
+ i3 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p := x0.Args[0]
+ o0 := v_1
+ if o0.Op != OpPPC64OR || o0.Type != t {
+ continue
+ }
+ _ = o0.Args[1]
+ o0_0 := o0.Args[0]
+ o0_1 := o0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, o0_0, o0_1 = _i1+1, o0_1, o0_0 {
+ s0 := o0_0
+ if s0.Op != OpPPC64SLWconst || auxIntToInt64(s0.AuxInt) != 8 {
+ continue
+ }
+ x1 := s0.Args[0]
+ if x1.Op != OpPPC64MOVBZload {
+ continue
+ }
+ i2 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
+ continue
+ }
+ _ = x1.Args[1]
+ if p != x1.Args[0] || mem != x1.Args[1] {
+ continue
+ }
+ s1 := o0_1
+ if s1.Op != OpPPC64SLWconst || auxIntToInt64(s1.AuxInt) != 16 {
+ continue
+ }
+ x2 := s1.Args[0]
+ if x2.Op != OpPPC64MOVHBRload || x2.Type != t {
+ continue
+ }
+ _ = x2.Args[1]
+ x2_0 := x2.Args[0]
+ if x2_0.Op != OpPPC64MOVDaddr || x2_0.Type != typ.Uintptr {
+ continue
+ }
+ i0 := auxIntToInt32(x2_0.AuxInt)
+ if auxToSym(x2_0.Aux) != s || p != x2_0.Args[0] || mem != x2.Args[1] || !(!config.BigEndian && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0, x1, x2, s0, s1, o0)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1, x2)
+ v0 := b.NewValue0(x2.Pos, OpPPC64MOVWBRload, t)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x2.Pos, OpPPC64MOVDaddr, typ.Uintptr)
+ v1.AuxInt = int32ToAuxInt(i0)
+ v1.Aux = symToAux(s)
+ v1.AddArg(p)
+ v0.AddArg2(v1, mem)
+ return true
+ }
+ }
+ break
+ }
+ // match: (OR <t> x0:(MOVBZload [i3] {s} p mem) o0:(OR <t> s0:(SLDconst x1:(MOVBZload [i2] {s} p mem) [8]) s1:(SLDconst x2:(MOVHBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem) [16])))
+ // cond: !config.BigEndian && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0, x1, x2, s0, s1, o0)
+ // result: @mergePoint(b,x0,x1,x2) (MOVWBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem)
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ if x0.Op != OpPPC64MOVBZload {
+ continue
+ }
+ i3 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p := x0.Args[0]
+ o0 := v_1
+ if o0.Op != OpPPC64OR || o0.Type != t {
+ continue
+ }
+ _ = o0.Args[1]
+ o0_0 := o0.Args[0]
+ o0_1 := o0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, o0_0, o0_1 = _i1+1, o0_1, o0_0 {
+ s0 := o0_0
+ if s0.Op != OpPPC64SLDconst || auxIntToInt64(s0.AuxInt) != 8 {
+ continue
+ }
+ x1 := s0.Args[0]
+ if x1.Op != OpPPC64MOVBZload {
+ continue
+ }
+ i2 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
+ continue
+ }
+ _ = x1.Args[1]
+ if p != x1.Args[0] || mem != x1.Args[1] {
+ continue
+ }
+ s1 := o0_1
+ if s1.Op != OpPPC64SLDconst || auxIntToInt64(s1.AuxInt) != 16 {
+ continue
+ }
+ x2 := s1.Args[0]
+ if x2.Op != OpPPC64MOVHBRload || x2.Type != t {
+ continue
+ }
+ _ = x2.Args[1]
+ x2_0 := x2.Args[0]
+ if x2_0.Op != OpPPC64MOVDaddr || x2_0.Type != typ.Uintptr {
+ continue
+ }
+ i0 := auxIntToInt32(x2_0.AuxInt)
+ if auxToSym(x2_0.Aux) != s || p != x2_0.Args[0] || mem != x2.Args[1] || !(!config.BigEndian && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0, x1, x2, s0, s1, o0)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1, x2)
+ v0 := b.NewValue0(x2.Pos, OpPPC64MOVWBRload, t)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x2.Pos, OpPPC64MOVDaddr, typ.Uintptr)
+ v1.AuxInt = int32ToAuxInt(i0)
+ v1.Aux = symToAux(s)
+ v1.AddArg(p)
+ v0.AddArg2(v1, mem)
+ return true
+ }
+ }
+ break
+ }
+ // match: (OR <t> s2:(SLDconst x2:(MOVBZload [i3] {s} p mem) [32]) o0:(OR <t> s1:(SLDconst x1:(MOVBZload [i2] {s} p mem) [40]) s0:(SLDconst x0:(MOVHBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem) [48])))
+ // cond: !config.BigEndian && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0, x1, x2, s0, s1, s2, o0)
+ // result: @mergePoint(b,x0,x1,x2) (SLDconst <t> (MOVWBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem) [32])
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ s2 := v_0
+ if s2.Op != OpPPC64SLDconst || auxIntToInt64(s2.AuxInt) != 32 {
+ continue
+ }
+ x2 := s2.Args[0]
+ if x2.Op != OpPPC64MOVBZload {
+ continue
+ }
+ i3 := auxIntToInt32(x2.AuxInt)
+ s := auxToSym(x2.Aux)
+ mem := x2.Args[1]
+ p := x2.Args[0]
+ o0 := v_1
+ if o0.Op != OpPPC64OR || o0.Type != t {
+ continue
+ }
+ _ = o0.Args[1]
+ o0_0 := o0.Args[0]
+ o0_1 := o0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, o0_0, o0_1 = _i1+1, o0_1, o0_0 {
+ s1 := o0_0
+ if s1.Op != OpPPC64SLDconst || auxIntToInt64(s1.AuxInt) != 40 {
+ continue
+ }
+ x1 := s1.Args[0]
+ if x1.Op != OpPPC64MOVBZload {
+ continue
+ }
+ i2 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
+ continue
+ }
+ _ = x1.Args[1]
+ if p != x1.Args[0] || mem != x1.Args[1] {
+ continue
+ }
+ s0 := o0_1
+ if s0.Op != OpPPC64SLDconst || auxIntToInt64(s0.AuxInt) != 48 {
+ continue
+ }
+ x0 := s0.Args[0]
+ if x0.Op != OpPPC64MOVHBRload || x0.Type != t {
+ continue
+ }
+ _ = x0.Args[1]
+ x0_0 := x0.Args[0]
+ if x0_0.Op != OpPPC64MOVDaddr || x0_0.Type != typ.Uintptr {
+ continue
+ }
+ i0 := auxIntToInt32(x0_0.AuxInt)
+ if auxToSym(x0_0.Aux) != s || p != x0_0.Args[0] || mem != x0.Args[1] || !(!config.BigEndian && i2 == i0+2 && i3 == i0+3 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0, x1, x2, s0, s1, s2, o0)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1, x2)
+ v0 := b.NewValue0(x0.Pos, OpPPC64SLDconst, t)
+ v.copyOf(v0)
+ v0.AuxInt = int64ToAuxInt(32)
+ v1 := b.NewValue0(x0.Pos, OpPPC64MOVWBRload, t)
+ v2 := b.NewValue0(x0.Pos, OpPPC64MOVDaddr, typ.Uintptr)
+ v2.AuxInt = int32ToAuxInt(i0)
+ v2.Aux = symToAux(s)
+ v2.AddArg(p)
+ v1.AddArg2(v2, mem)
+ v0.AddArg(v1)
+ return true
+ }
+ }
+ break
+ }
+ // match: (OR <t> s2:(SLDconst x2:(MOVBZload [i0] {s} p mem) [56]) o0:(OR <t> s1:(SLDconst x1:(MOVBZload [i1] {s} p mem) [48]) s0:(SLDconst x0:(MOVHBRload <t> (MOVDaddr <typ.Uintptr> [i2] {s} p) mem) [32])))
+ // cond: !config.BigEndian && i1 == i0+1 && i2 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0, x1, x2, s0, s1, s2, o0)
+ // result: @mergePoint(b,x0,x1,x2) (SLDconst <t> (MOVWBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem) [32])
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ s2 := v_0
+ if s2.Op != OpPPC64SLDconst || auxIntToInt64(s2.AuxInt) != 56 {
+ continue
+ }
+ x2 := s2.Args[0]
+ if x2.Op != OpPPC64MOVBZload {
+ continue
+ }
+ i0 := auxIntToInt32(x2.AuxInt)
+ s := auxToSym(x2.Aux)
+ mem := x2.Args[1]
+ p := x2.Args[0]
+ o0 := v_1
+ if o0.Op != OpPPC64OR || o0.Type != t {
+ continue
+ }
+ _ = o0.Args[1]
+ o0_0 := o0.Args[0]
+ o0_1 := o0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, o0_0, o0_1 = _i1+1, o0_1, o0_0 {
+ s1 := o0_0
+ if s1.Op != OpPPC64SLDconst || auxIntToInt64(s1.AuxInt) != 48 {
+ continue
+ }
+ x1 := s1.Args[0]
+ if x1.Op != OpPPC64MOVBZload {
+ continue
+ }
+ i1 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
+ continue
+ }
+ _ = x1.Args[1]
+ if p != x1.Args[0] || mem != x1.Args[1] {
+ continue
+ }
+ s0 := o0_1
+ if s0.Op != OpPPC64SLDconst || auxIntToInt64(s0.AuxInt) != 32 {
+ continue
+ }
+ x0 := s0.Args[0]
+ if x0.Op != OpPPC64MOVHBRload || x0.Type != t {
+ continue
+ }
+ _ = x0.Args[1]
+ x0_0 := x0.Args[0]
+ if x0_0.Op != OpPPC64MOVDaddr || x0_0.Type != typ.Uintptr {
+ continue
+ }
+ i2 := auxIntToInt32(x0_0.AuxInt)
+ if auxToSym(x0_0.Aux) != s || p != x0_0.Args[0] || mem != x0.Args[1] || !(!config.BigEndian && i1 == i0+1 && i2 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && o0.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && mergePoint(b, x0, x1, x2) != nil && clobber(x0, x1, x2, s0, s1, s2, o0)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1, x2)
+ v0 := b.NewValue0(x0.Pos, OpPPC64SLDconst, t)
+ v.copyOf(v0)
+ v0.AuxInt = int64ToAuxInt(32)
+ v1 := b.NewValue0(x0.Pos, OpPPC64MOVWBRload, t)
+ v2 := b.NewValue0(x0.Pos, OpPPC64MOVDaddr, typ.Uintptr)
+ v2.AuxInt = int32ToAuxInt(i0)
+ v2.Aux = symToAux(s)
+ v2.AddArg(p)
+ v1.AddArg2(v2, mem)
+ v0.AddArg(v1)
+ return true
+ }
+ }
+ break
+ }
+ // match: (OR <t> s6:(SLDconst x7:(MOVBZload [i7] {s} p mem) [56]) o5:(OR <t> s5:(SLDconst x6:(MOVBZload [i6] {s} p mem) [48]) o4:(OR <t> s4:(SLDconst x5:(MOVBZload [i5] {s} p mem) [40]) o3:(OR <t> s3:(SLDconst x4:(MOVBZload [i4] {s} p mem) [32]) x0:(MOVWZload {s} [i0] p mem)))))
+ // cond: !config.BigEndian && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses ==1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x4, x5, x6, x7) != nil && clobber(x0, x4, x5, x6, x7, s3, s4, s5, s6, o3, o4, o5)
+ // result: @mergePoint(b,x0,x4,x5,x6,x7) (MOVDload <t> {s} [i0] p mem)
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ s6 := v_0
+ if s6.Op != OpPPC64SLDconst || auxIntToInt64(s6.AuxInt) != 56 {
+ continue
+ }
+ x7 := s6.Args[0]
+ if x7.Op != OpPPC64MOVBZload {
+ continue
+ }
+ i7 := auxIntToInt32(x7.AuxInt)
+ s := auxToSym(x7.Aux)
+ mem := x7.Args[1]
+ p := x7.Args[0]
+ o5 := v_1
+ if o5.Op != OpPPC64OR || o5.Type != t {
+ continue
+ }
+ _ = o5.Args[1]
+ o5_0 := o5.Args[0]
+ o5_1 := o5.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, o5_0, o5_1 = _i1+1, o5_1, o5_0 {
+ s5 := o5_0
+ if s5.Op != OpPPC64SLDconst || auxIntToInt64(s5.AuxInt) != 48 {
+ continue
+ }
+ x6 := s5.Args[0]
+ if x6.Op != OpPPC64MOVBZload {
+ continue
+ }
+ i6 := auxIntToInt32(x6.AuxInt)
+ if auxToSym(x6.Aux) != s {
+ continue
+ }
+ _ = x6.Args[1]
+ if p != x6.Args[0] || mem != x6.Args[1] {
+ continue
+ }
+ o4 := o5_1
+ if o4.Op != OpPPC64OR || o4.Type != t {
+ continue
+ }
+ _ = o4.Args[1]
+ o4_0 := o4.Args[0]
+ o4_1 := o4.Args[1]
+ for _i2 := 0; _i2 <= 1; _i2, o4_0, o4_1 = _i2+1, o4_1, o4_0 {
+ s4 := o4_0
+ if s4.Op != OpPPC64SLDconst || auxIntToInt64(s4.AuxInt) != 40 {
+ continue
+ }
+ x5 := s4.Args[0]
+ if x5.Op != OpPPC64MOVBZload {
+ continue
+ }
+ i5 := auxIntToInt32(x5.AuxInt)
+ if auxToSym(x5.Aux) != s {
+ continue
+ }
+ _ = x5.Args[1]
+ if p != x5.Args[0] || mem != x5.Args[1] {
+ continue
+ }
+ o3 := o4_1
+ if o3.Op != OpPPC64OR || o3.Type != t {
+ continue
+ }
+ _ = o3.Args[1]
+ o3_0 := o3.Args[0]
+ o3_1 := o3.Args[1]
+ for _i3 := 0; _i3 <= 1; _i3, o3_0, o3_1 = _i3+1, o3_1, o3_0 {
+ s3 := o3_0
+ if s3.Op != OpPPC64SLDconst || auxIntToInt64(s3.AuxInt) != 32 {
+ continue
+ }
+ x4 := s3.Args[0]
+ if x4.Op != OpPPC64MOVBZload {
+ continue
+ }
+ i4 := auxIntToInt32(x4.AuxInt)
+ if auxToSym(x4.Aux) != s {
+ continue
+ }
+ _ = x4.Args[1]
+ if p != x4.Args[0] || mem != x4.Args[1] {
+ continue
+ }
+ x0 := o3_1
+ if x0.Op != OpPPC64MOVWZload {
+ continue
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ if auxToSym(x0.Aux) != s {
+ continue
+ }
+ _ = x0.Args[1]
+ if p != x0.Args[0] || mem != x0.Args[1] || !(!config.BigEndian && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x0.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s3.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x0, x4, x5, x6, x7) != nil && clobber(x0, x4, x5, x6, x7, s3, s4, s5, s6, o3, o4, o5)) {
+ continue
+ }
+ b = mergePoint(b, x0, x4, x5, x6, x7)
+ v0 := b.NewValue0(x0.Pos, OpPPC64MOVDload, t)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(i0)
+ v0.Aux = symToAux(s)
+ v0.AddArg2(p, mem)
+ return true
+ }
+ }
+ }
+ }
+ break
+ }
+ // match: (OR <t> s0:(SLDconst x0:(MOVBZload [i0] {s} p mem) [56]) o0:(OR <t> s1:(SLDconst x1:(MOVBZload [i1] {s} p mem) [48]) o1:(OR <t> s2:(SLDconst x2:(MOVBZload [i2] {s} p mem) [40]) o2:(OR <t> s3:(SLDconst x3:(MOVBZload [i3] {s} p mem) [32]) x4:(MOVWBRload <t> (MOVDaddr <typ.Uintptr> [i4] p) mem)))))
+ // cond: !config.BigEndian && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4) != nil && clobber(x0, x1, x2, x3, x4, o0, o1, o2, s0, s1, s2, s3)
+ // result: @mergePoint(b,x0,x1,x2,x3,x4) (MOVDBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem)
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ s0 := v_0
+ if s0.Op != OpPPC64SLDconst || auxIntToInt64(s0.AuxInt) != 56 {
+ continue
+ }
+ x0 := s0.Args[0]
+ if x0.Op != OpPPC64MOVBZload {
+ continue
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p := x0.Args[0]
+ o0 := v_1
+ if o0.Op != OpPPC64OR || o0.Type != t {
+ continue
+ }
+ _ = o0.Args[1]
+ o0_0 := o0.Args[0]
+ o0_1 := o0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, o0_0, o0_1 = _i1+1, o0_1, o0_0 {
+ s1 := o0_0
+ if s1.Op != OpPPC64SLDconst || auxIntToInt64(s1.AuxInt) != 48 {
+ continue
+ }
+ x1 := s1.Args[0]
+ if x1.Op != OpPPC64MOVBZload {
+ continue
+ }
+ i1 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
+ continue
+ }
+ _ = x1.Args[1]
+ if p != x1.Args[0] || mem != x1.Args[1] {
+ continue
+ }
+ o1 := o0_1
+ if o1.Op != OpPPC64OR || o1.Type != t {
+ continue
+ }
+ _ = o1.Args[1]
+ o1_0 := o1.Args[0]
+ o1_1 := o1.Args[1]
+ for _i2 := 0; _i2 <= 1; _i2, o1_0, o1_1 = _i2+1, o1_1, o1_0 {
+ s2 := o1_0
+ if s2.Op != OpPPC64SLDconst || auxIntToInt64(s2.AuxInt) != 40 {
+ continue
+ }
+ x2 := s2.Args[0]
+ if x2.Op != OpPPC64MOVBZload {
+ continue
+ }
+ i2 := auxIntToInt32(x2.AuxInt)
+ if auxToSym(x2.Aux) != s {
+ continue
+ }
+ _ = x2.Args[1]
+ if p != x2.Args[0] || mem != x2.Args[1] {
+ continue
+ }
+ o2 := o1_1
+ if o2.Op != OpPPC64OR || o2.Type != t {
+ continue
+ }
+ _ = o2.Args[1]
+ o2_0 := o2.Args[0]
+ o2_1 := o2.Args[1]
+ for _i3 := 0; _i3 <= 1; _i3, o2_0, o2_1 = _i3+1, o2_1, o2_0 {
+ s3 := o2_0
+ if s3.Op != OpPPC64SLDconst || auxIntToInt64(s3.AuxInt) != 32 {
+ continue
+ }
+ x3 := s3.Args[0]
+ if x3.Op != OpPPC64MOVBZload {
+ continue
+ }
+ i3 := auxIntToInt32(x3.AuxInt)
+ if auxToSym(x3.Aux) != s {
+ continue
+ }
+ _ = x3.Args[1]
+ if p != x3.Args[0] || mem != x3.Args[1] {
+ continue
+ }
+ x4 := o2_1
+ if x4.Op != OpPPC64MOVWBRload || x4.Type != t {
+ continue
+ }
+ _ = x4.Args[1]
+ x4_0 := x4.Args[0]
+ if x4_0.Op != OpPPC64MOVDaddr || x4_0.Type != typ.Uintptr {
+ continue
+ }
+ i4 := auxIntToInt32(x4_0.AuxInt)
+ if p != x4_0.Args[0] || mem != x4.Args[1] || !(!config.BigEndian && i1 == i0+1 && i2 == i0+2 && i3 == i0+3 && i4 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && x4.Uses == 1 && o0.Uses == 1 && o1.Uses == 1 && o2.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && mergePoint(b, x0, x1, x2, x3, x4) != nil && clobber(x0, x1, x2, x3, x4, o0, o1, o2, s0, s1, s2, s3)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1, x2, x3, x4)
+ v0 := b.NewValue0(x4.Pos, OpPPC64MOVDBRload, t)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x4.Pos, OpPPC64MOVDaddr, typ.Uintptr)
+ v1.AuxInt = int32ToAuxInt(i0)
+ v1.Aux = symToAux(s)
+ v1.AddArg(p)
+ v0.AddArg2(v1, mem)
+ return true
+ }
+ }
+ }
+ }
+ break
+ }
+ // match: (OR <t> x7:(MOVBZload [i7] {s} p mem) o5:(OR <t> s6:(SLDconst x6:(MOVBZload [i6] {s} p mem) [8]) o4:(OR <t> s5:(SLDconst x5:(MOVBZload [i5] {s} p mem) [16]) o3:(OR <t> s4:(SLDconst x4:(MOVBZload [i4] {s} p mem) [24]) s0:(SLWconst x3:(MOVWBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem) [32])))))
+ // cond: !config.BigEndian && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x3, x4, x5, x6, x7) != nil && clobber(x3, x4, x5, x6, x7, o3, o4, o5, s0, s4, s5, s6)
+ // result: @mergePoint(b,x3,x4,x5,x6,x7) (MOVDBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem)
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x7 := v_0
+ if x7.Op != OpPPC64MOVBZload {
+ continue
+ }
+ i7 := auxIntToInt32(x7.AuxInt)
+ s := auxToSym(x7.Aux)
+ mem := x7.Args[1]
+ p := x7.Args[0]
+ o5 := v_1
+ if o5.Op != OpPPC64OR || o5.Type != t {
+ continue
+ }
+ _ = o5.Args[1]
+ o5_0 := o5.Args[0]
+ o5_1 := o5.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, o5_0, o5_1 = _i1+1, o5_1, o5_0 {
+ s6 := o5_0
+ if s6.Op != OpPPC64SLDconst || auxIntToInt64(s6.AuxInt) != 8 {
+ continue
+ }
+ x6 := s6.Args[0]
+ if x6.Op != OpPPC64MOVBZload {
+ continue
+ }
+ i6 := auxIntToInt32(x6.AuxInt)
+ if auxToSym(x6.Aux) != s {
+ continue
+ }
+ _ = x6.Args[1]
+ if p != x6.Args[0] || mem != x6.Args[1] {
+ continue
+ }
+ o4 := o5_1
+ if o4.Op != OpPPC64OR || o4.Type != t {
+ continue
+ }
+ _ = o4.Args[1]
+ o4_0 := o4.Args[0]
+ o4_1 := o4.Args[1]
+ for _i2 := 0; _i2 <= 1; _i2, o4_0, o4_1 = _i2+1, o4_1, o4_0 {
+ s5 := o4_0
+ if s5.Op != OpPPC64SLDconst || auxIntToInt64(s5.AuxInt) != 16 {
+ continue
+ }
+ x5 := s5.Args[0]
+ if x5.Op != OpPPC64MOVBZload {
+ continue
+ }
+ i5 := auxIntToInt32(x5.AuxInt)
+ if auxToSym(x5.Aux) != s {
+ continue
+ }
+ _ = x5.Args[1]
+ if p != x5.Args[0] || mem != x5.Args[1] {
+ continue
+ }
+ o3 := o4_1
+ if o3.Op != OpPPC64OR || o3.Type != t {
+ continue
+ }
+ _ = o3.Args[1]
+ o3_0 := o3.Args[0]
+ o3_1 := o3.Args[1]
+ for _i3 := 0; _i3 <= 1; _i3, o3_0, o3_1 = _i3+1, o3_1, o3_0 {
+ s4 := o3_0
+ if s4.Op != OpPPC64SLDconst || auxIntToInt64(s4.AuxInt) != 24 {
+ continue
+ }
+ x4 := s4.Args[0]
+ if x4.Op != OpPPC64MOVBZload {
+ continue
+ }
+ i4 := auxIntToInt32(x4.AuxInt)
+ if auxToSym(x4.Aux) != s {
+ continue
+ }
+ _ = x4.Args[1]
+ if p != x4.Args[0] || mem != x4.Args[1] {
+ continue
+ }
+ s0 := o3_1
+ if s0.Op != OpPPC64SLWconst || auxIntToInt64(s0.AuxInt) != 32 {
+ continue
+ }
+ x3 := s0.Args[0]
+ if x3.Op != OpPPC64MOVWBRload || x3.Type != t {
+ continue
+ }
+ _ = x3.Args[1]
+ x3_0 := x3.Args[0]
+ if x3_0.Op != OpPPC64MOVDaddr || x3_0.Type != typ.Uintptr {
+ continue
+ }
+ i0 := auxIntToInt32(x3_0.AuxInt)
+ if auxToSym(x3_0.Aux) != s || p != x3_0.Args[0] || mem != x3.Args[1] || !(!config.BigEndian && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x3, x4, x5, x6, x7) != nil && clobber(x3, x4, x5, x6, x7, o3, o4, o5, s0, s4, s5, s6)) {
+ continue
+ }
+ b = mergePoint(b, x3, x4, x5, x6, x7)
+ v0 := b.NewValue0(x3.Pos, OpPPC64MOVDBRload, t)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x3.Pos, OpPPC64MOVDaddr, typ.Uintptr)
+ v1.AuxInt = int32ToAuxInt(i0)
+ v1.Aux = symToAux(s)
+ v1.AddArg(p)
+ v0.AddArg2(v1, mem)
+ return true
+ }
+ }
+ }
+ }
+ break
+ }
+ // match: (OR <t> x7:(MOVBZload [i7] {s} p mem) o5:(OR <t> s6:(SLDconst x6:(MOVBZload [i6] {s} p mem) [8]) o4:(OR <t> s5:(SLDconst x5:(MOVBZload [i5] {s} p mem) [16]) o3:(OR <t> s4:(SLDconst x4:(MOVBZload [i4] {s} p mem) [24]) s0:(SLDconst x3:(MOVWBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem) [32])))))
+ // cond: !config.BigEndian && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x3, x4, x5, x6, x7) != nil && clobber(x3, x4, x5, x6, x7, o3, o4, o5, s0, s4, s5, s6)
+ // result: @mergePoint(b,x3,x4,x5,x6,x7) (MOVDBRload <t> (MOVDaddr <typ.Uintptr> [i0] {s} p) mem)
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x7 := v_0
+ if x7.Op != OpPPC64MOVBZload {
+ continue
+ }
+ i7 := auxIntToInt32(x7.AuxInt)
+ s := auxToSym(x7.Aux)
+ mem := x7.Args[1]
+ p := x7.Args[0]
+ o5 := v_1
+ if o5.Op != OpPPC64OR || o5.Type != t {
+ continue
+ }
+ _ = o5.Args[1]
+ o5_0 := o5.Args[0]
+ o5_1 := o5.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, o5_0, o5_1 = _i1+1, o5_1, o5_0 {
+ s6 := o5_0
+ if s6.Op != OpPPC64SLDconst || auxIntToInt64(s6.AuxInt) != 8 {
+ continue
+ }
+ x6 := s6.Args[0]
+ if x6.Op != OpPPC64MOVBZload {
+ continue
+ }
+ i6 := auxIntToInt32(x6.AuxInt)
+ if auxToSym(x6.Aux) != s {
+ continue
+ }
+ _ = x6.Args[1]
+ if p != x6.Args[0] || mem != x6.Args[1] {
+ continue
+ }
+ o4 := o5_1
+ if o4.Op != OpPPC64OR || o4.Type != t {
+ continue
+ }
+ _ = o4.Args[1]
+ o4_0 := o4.Args[0]
+ o4_1 := o4.Args[1]
+ for _i2 := 0; _i2 <= 1; _i2, o4_0, o4_1 = _i2+1, o4_1, o4_0 {
+ s5 := o4_0
+ if s5.Op != OpPPC64SLDconst || auxIntToInt64(s5.AuxInt) != 16 {
+ continue
+ }
+ x5 := s5.Args[0]
+ if x5.Op != OpPPC64MOVBZload {
+ continue
+ }
+ i5 := auxIntToInt32(x5.AuxInt)
+ if auxToSym(x5.Aux) != s {
+ continue
+ }
+ _ = x5.Args[1]
+ if p != x5.Args[0] || mem != x5.Args[1] {
+ continue
+ }
+ o3 := o4_1
+ if o3.Op != OpPPC64OR || o3.Type != t {
+ continue
+ }
+ _ = o3.Args[1]
+ o3_0 := o3.Args[0]
+ o3_1 := o3.Args[1]
+ for _i3 := 0; _i3 <= 1; _i3, o3_0, o3_1 = _i3+1, o3_1, o3_0 {
+ s4 := o3_0
+ if s4.Op != OpPPC64SLDconst || auxIntToInt64(s4.AuxInt) != 24 {
+ continue
+ }
+ x4 := s4.Args[0]
+ if x4.Op != OpPPC64MOVBZload {
+ continue
+ }
+ i4 := auxIntToInt32(x4.AuxInt)
+ if auxToSym(x4.Aux) != s {
+ continue
+ }
+ _ = x4.Args[1]
+ if p != x4.Args[0] || mem != x4.Args[1] {
+ continue
+ }
+ s0 := o3_1
+ if s0.Op != OpPPC64SLDconst || auxIntToInt64(s0.AuxInt) != 32 {
+ continue
+ }
+ x3 := s0.Args[0]
+ if x3.Op != OpPPC64MOVWBRload || x3.Type != t {
+ continue
+ }
+ _ = x3.Args[1]
+ x3_0 := x3.Args[0]
+ if x3_0.Op != OpPPC64MOVDaddr || x3_0.Type != typ.Uintptr {
+ continue
+ }
+ i0 := auxIntToInt32(x3_0.AuxInt)
+ if auxToSym(x3_0.Aux) != s || p != x3_0.Args[0] || mem != x3.Args[1] || !(!config.BigEndian && i4 == i0+4 && i5 == i0+5 && i6 == i0+6 && i7 == i0+7 && x3.Uses == 1 && x4.Uses == 1 && x5.Uses == 1 && x6.Uses == 1 && x7.Uses == 1 && o3.Uses == 1 && o4.Uses == 1 && o5.Uses == 1 && s0.Uses == 1 && s4.Uses == 1 && s5.Uses == 1 && s6.Uses == 1 && mergePoint(b, x3, x4, x5, x6, x7) != nil && clobber(x3, x4, x5, x6, x7, o3, o4, o5, s0, s4, s5, s6)) {
+ continue
+ }
+ b = mergePoint(b, x3, x4, x5, x6, x7)
+ v0 := b.NewValue0(x3.Pos, OpPPC64MOVDBRload, t)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x3.Pos, OpPPC64MOVDaddr, typ.Uintptr)
+ v1.AuxInt = int32ToAuxInt(i0)
+ v1.Aux = symToAux(s)
+ v1.AddArg(p)
+ v0.AddArg2(v1, mem)
+ return true
+ }
+ }
+ }
+ }
+ break
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64ORN(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ORN x (MOVDconst [-1]))
+ // result: x
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst || auxIntToInt64(v_1.AuxInt) != -1 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (ORN (MOVDconst [c]) (MOVDconst [d]))
+ // result: (MOVDconst [c|^d])
+ for {
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(c | ^d)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64ORconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ORconst [c] (ORconst [d] x))
+ // result: (ORconst [c|d] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64ORconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpPPC64ORconst)
+ v.AuxInt = int64ToAuxInt(c | d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ORconst [-1] _)
+ // result: (MOVDconst [-1])
+ for {
+ if auxIntToInt64(v.AuxInt) != -1 {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(-1)
+ return true
+ }
+ // match: (ORconst [0] x)
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64ROTL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ROTL x (MOVDconst [c]))
+ // result: (ROTLconst x [c&63])
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpPPC64ROTLconst)
+ v.AuxInt = int64ToAuxInt(c & 63)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64ROTLW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ROTLW x (MOVDconst [c]))
+ // result: (ROTLWconst x [c&31])
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpPPC64ROTLWconst)
+ v.AuxInt = int64ToAuxInt(c & 31)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64ROTLWconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ROTLWconst [r] (AND (MOVDconst [m]) x))
+ // cond: isPPC64WordRotateMask(m)
+ // result: (RLWINM [encodePPC64RotateMask(r,rotateLeft32(m,r),32)] x)
+ for {
+ r := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64AND {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpPPC64MOVDconst {
+ continue
+ }
+ m := auxIntToInt64(v_0_0.AuxInt)
+ x := v_0_1
+ if !(isPPC64WordRotateMask(m)) {
+ continue
+ }
+ v.reset(OpPPC64RLWINM)
+ v.AuxInt = int64ToAuxInt(encodePPC64RotateMask(r, rotateLeft32(m, r), 32))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ROTLWconst [r] (ANDconst [m] x))
+ // cond: isPPC64WordRotateMask(m)
+ // result: (RLWINM [encodePPC64RotateMask(r,rotateLeft32(m,r),32)] x)
+ for {
+ r := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64ANDconst {
+ break
+ }
+ m := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(isPPC64WordRotateMask(m)) {
+ break
+ }
+ v.reset(OpPPC64RLWINM)
+ v.AuxInt = int64ToAuxInt(encodePPC64RotateMask(r, rotateLeft32(m, r), 32))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64SLD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SLD x (MOVDconst [c]))
+ // result: (SLDconst [c&63 | (c>>6&1*63)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpPPC64SLDconst)
+ v.AuxInt = int64ToAuxInt(c&63 | (c >> 6 & 1 * 63))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64SLDconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SLDconst [l] (SRWconst [r] x))
+ // cond: mergePPC64SldiSrw(l,r) != 0
+ // result: (RLWINM [mergePPC64SldiSrw(l,r)] x)
+ for {
+ l := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64SRWconst {
+ break
+ }
+ r := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(mergePPC64SldiSrw(l, r) != 0) {
+ break
+ }
+ v.reset(OpPPC64RLWINM)
+ v.AuxInt = int64ToAuxInt(mergePPC64SldiSrw(l, r))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SLDconst [c] z:(MOVBZreg x))
+ // cond: c < 8 && z.Uses == 1
+ // result: (CLRLSLDI [newPPC64ShiftAuxInt(c,56,63,64)] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ z := v_0
+ if z.Op != OpPPC64MOVBZreg {
+ break
+ }
+ x := z.Args[0]
+ if !(c < 8 && z.Uses == 1) {
+ break
+ }
+ v.reset(OpPPC64CLRLSLDI)
+ v.AuxInt = int32ToAuxInt(newPPC64ShiftAuxInt(c, 56, 63, 64))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SLDconst [c] z:(MOVHZreg x))
+ // cond: c < 16 && z.Uses == 1
+ // result: (CLRLSLDI [newPPC64ShiftAuxInt(c,48,63,64)] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ z := v_0
+ if z.Op != OpPPC64MOVHZreg {
+ break
+ }
+ x := z.Args[0]
+ if !(c < 16 && z.Uses == 1) {
+ break
+ }
+ v.reset(OpPPC64CLRLSLDI)
+ v.AuxInt = int32ToAuxInt(newPPC64ShiftAuxInt(c, 48, 63, 64))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SLDconst [c] z:(MOVWZreg x))
+ // cond: c < 32 && z.Uses == 1
+ // result: (CLRLSLDI [newPPC64ShiftAuxInt(c,32,63,64)] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ z := v_0
+ if z.Op != OpPPC64MOVWZreg {
+ break
+ }
+ x := z.Args[0]
+ if !(c < 32 && z.Uses == 1) {
+ break
+ }
+ v.reset(OpPPC64CLRLSLDI)
+ v.AuxInt = int32ToAuxInt(newPPC64ShiftAuxInt(c, 32, 63, 64))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SLDconst [c] z:(ANDconst [d] x))
+ // cond: z.Uses == 1 && isPPC64ValidShiftMask(d) && c <= (64-getPPC64ShiftMaskLength(d))
+ // result: (CLRLSLDI [newPPC64ShiftAuxInt(c,64-getPPC64ShiftMaskLength(d),63,64)] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ z := v_0
+ if z.Op != OpPPC64ANDconst {
+ break
+ }
+ d := auxIntToInt64(z.AuxInt)
+ x := z.Args[0]
+ if !(z.Uses == 1 && isPPC64ValidShiftMask(d) && c <= (64-getPPC64ShiftMaskLength(d))) {
+ break
+ }
+ v.reset(OpPPC64CLRLSLDI)
+ v.AuxInt = int32ToAuxInt(newPPC64ShiftAuxInt(c, 64-getPPC64ShiftMaskLength(d), 63, 64))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SLDconst [c] z:(AND (MOVDconst [d]) x))
+ // cond: z.Uses == 1 && isPPC64ValidShiftMask(d) && c<=(64-getPPC64ShiftMaskLength(d))
+ // result: (CLRLSLDI [newPPC64ShiftAuxInt(c,64-getPPC64ShiftMaskLength(d),63,64)] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ z := v_0
+ if z.Op != OpPPC64AND {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ if z_0.Op != OpPPC64MOVDconst {
+ continue
+ }
+ d := auxIntToInt64(z_0.AuxInt)
+ x := z_1
+ if !(z.Uses == 1 && isPPC64ValidShiftMask(d) && c <= (64-getPPC64ShiftMaskLength(d))) {
+ continue
+ }
+ v.reset(OpPPC64CLRLSLDI)
+ v.AuxInt = int32ToAuxInt(newPPC64ShiftAuxInt(c, 64-getPPC64ShiftMaskLength(d), 63, 64))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (SLDconst [c] z:(MOVWreg x))
+ // cond: c < 32 && buildcfg.GOPPC64 >= 9
+ // result: (EXTSWSLconst [c] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ z := v_0
+ if z.Op != OpPPC64MOVWreg {
+ break
+ }
+ x := z.Args[0]
+ if !(c < 32 && buildcfg.GOPPC64 >= 9) {
+ break
+ }
+ v.reset(OpPPC64EXTSWSLconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64SLW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SLW x (MOVDconst [c]))
+ // result: (SLWconst [c&31 | (c>>5&1*31)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpPPC64SLWconst)
+ v.AuxInt = int64ToAuxInt(c&31 | (c >> 5 & 1 * 31))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64SLWconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SLWconst [c] z:(MOVBZreg x))
+ // cond: z.Uses == 1 && c < 8
+ // result: (CLRLSLWI [newPPC64ShiftAuxInt(c,24,31,32)] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ z := v_0
+ if z.Op != OpPPC64MOVBZreg {
+ break
+ }
+ x := z.Args[0]
+ if !(z.Uses == 1 && c < 8) {
+ break
+ }
+ v.reset(OpPPC64CLRLSLWI)
+ v.AuxInt = int32ToAuxInt(newPPC64ShiftAuxInt(c, 24, 31, 32))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SLWconst [c] z:(MOVHZreg x))
+ // cond: z.Uses == 1 && c < 16
+ // result: (CLRLSLWI [newPPC64ShiftAuxInt(c,16,31,32)] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ z := v_0
+ if z.Op != OpPPC64MOVHZreg {
+ break
+ }
+ x := z.Args[0]
+ if !(z.Uses == 1 && c < 16) {
+ break
+ }
+ v.reset(OpPPC64CLRLSLWI)
+ v.AuxInt = int32ToAuxInt(newPPC64ShiftAuxInt(c, 16, 31, 32))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SLWconst [c] z:(ANDconst [d] x))
+ // cond: z.Uses == 1 && isPPC64ValidShiftMask(d) && c<=(32-getPPC64ShiftMaskLength(d))
+ // result: (CLRLSLWI [newPPC64ShiftAuxInt(c,32-getPPC64ShiftMaskLength(d),31,32)] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ z := v_0
+ if z.Op != OpPPC64ANDconst {
+ break
+ }
+ d := auxIntToInt64(z.AuxInt)
+ x := z.Args[0]
+ if !(z.Uses == 1 && isPPC64ValidShiftMask(d) && c <= (32-getPPC64ShiftMaskLength(d))) {
+ break
+ }
+ v.reset(OpPPC64CLRLSLWI)
+ v.AuxInt = int32ToAuxInt(newPPC64ShiftAuxInt(c, 32-getPPC64ShiftMaskLength(d), 31, 32))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SLWconst [c] z:(AND (MOVDconst [d]) x))
+ // cond: z.Uses == 1 && isPPC64ValidShiftMask(d) && c<=(32-getPPC64ShiftMaskLength(d))
+ // result: (CLRLSLWI [newPPC64ShiftAuxInt(c,32-getPPC64ShiftMaskLength(d),31,32)] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ z := v_0
+ if z.Op != OpPPC64AND {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ if z_0.Op != OpPPC64MOVDconst {
+ continue
+ }
+ d := auxIntToInt64(z_0.AuxInt)
+ x := z_1
+ if !(z.Uses == 1 && isPPC64ValidShiftMask(d) && c <= (32-getPPC64ShiftMaskLength(d))) {
+ continue
+ }
+ v.reset(OpPPC64CLRLSLWI)
+ v.AuxInt = int32ToAuxInt(newPPC64ShiftAuxInt(c, 32-getPPC64ShiftMaskLength(d), 31, 32))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (SLWconst [c] z:(MOVWreg x))
+ // cond: c < 32 && buildcfg.GOPPC64 >= 9
+ // result: (EXTSWSLconst [c] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ z := v_0
+ if z.Op != OpPPC64MOVWreg {
+ break
+ }
+ x := z.Args[0]
+ if !(c < 32 && buildcfg.GOPPC64 >= 9) {
+ break
+ }
+ v.reset(OpPPC64EXTSWSLconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64SRAD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SRAD x (MOVDconst [c]))
+ // result: (SRADconst [c&63 | (c>>6&1*63)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpPPC64SRADconst)
+ v.AuxInt = int64ToAuxInt(c&63 | (c >> 6 & 1 * 63))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64SRAW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SRAW x (MOVDconst [c]))
+ // result: (SRAWconst [c&31 | (c>>5&1*31)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpPPC64SRAWconst)
+ v.AuxInt = int64ToAuxInt(c&31 | (c >> 5 & 1 * 31))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64SRD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SRD x (MOVDconst [c]))
+ // result: (SRDconst [c&63 | (c>>6&1*63)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpPPC64SRDconst)
+ v.AuxInt = int64ToAuxInt(c&63 | (c >> 6 & 1 * 63))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64SRW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SRW x (MOVDconst [c]))
+ // result: (SRWconst [c&31 | (c>>5&1*31)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpPPC64SRWconst)
+ v.AuxInt = int64ToAuxInt(c&31 | (c >> 5 & 1 * 31))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64SRWconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SRWconst (ANDconst [m] x) [s])
+ // cond: mergePPC64RShiftMask(m>>uint(s),s,32) == 0
+ // result: (MOVDconst [0])
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64ANDconst {
+ break
+ }
+ m := auxIntToInt64(v_0.AuxInt)
+ if !(mergePPC64RShiftMask(m>>uint(s), s, 32) == 0) {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (SRWconst (ANDconst [m] x) [s])
+ // cond: mergePPC64AndSrwi(m>>uint(s),s) != 0
+ // result: (RLWINM [mergePPC64AndSrwi(m>>uint(s),s)] x)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64ANDconst {
+ break
+ }
+ m := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(mergePPC64AndSrwi(m>>uint(s), s) != 0) {
+ break
+ }
+ v.reset(OpPPC64RLWINM)
+ v.AuxInt = int64ToAuxInt(mergePPC64AndSrwi(m>>uint(s), s))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SRWconst (AND (MOVDconst [m]) x) [s])
+ // cond: mergePPC64RShiftMask(m>>uint(s),s,32) == 0
+ // result: (MOVDconst [0])
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64AND {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpPPC64MOVDconst {
+ continue
+ }
+ m := auxIntToInt64(v_0_0.AuxInt)
+ if !(mergePPC64RShiftMask(m>>uint(s), s, 32) == 0) {
+ continue
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ break
+ }
+ // match: (SRWconst (AND (MOVDconst [m]) x) [s])
+ // cond: mergePPC64AndSrwi(m>>uint(s),s) != 0
+ // result: (RLWINM [mergePPC64AndSrwi(m>>uint(s),s)] x)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64AND {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpPPC64MOVDconst {
+ continue
+ }
+ m := auxIntToInt64(v_0_0.AuxInt)
+ x := v_0_1
+ if !(mergePPC64AndSrwi(m>>uint(s), s) != 0) {
+ continue
+ }
+ v.reset(OpPPC64RLWINM)
+ v.AuxInt = int64ToAuxInt(mergePPC64AndSrwi(m>>uint(s), s))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64SUB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SUB x (MOVDconst [c]))
+ // cond: is32Bit(-c)
+ // result: (ADDconst [-c] x)
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(is32Bit(-c)) {
+ break
+ }
+ v.reset(OpPPC64ADDconst)
+ v.AuxInt = int64ToAuxInt(-c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUB (MOVDconst [c]) x)
+ // cond: is32Bit(c)
+ // result: (SUBFCconst [c] x)
+ for {
+ if v_0.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpPPC64SUBFCconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64SUBFCconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SUBFCconst [c] (NEG x))
+ // result: (ADDconst [c] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64NEG {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpPPC64ADDconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBFCconst [c] (SUBFCconst [d] x))
+ // cond: is32Bit(c-d)
+ // result: (ADDconst [c-d] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64SUBFCconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(is32Bit(c - d)) {
+ break
+ }
+ v.reset(OpPPC64ADDconst)
+ v.AuxInt = int64ToAuxInt(c - d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBFCconst [0] x)
+ // result: (NEG x)
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.reset(OpPPC64NEG)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64XOR(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (XOR (SLDconst x [c]) (SRDconst x [d]))
+ // cond: d == 64-c
+ // result: (ROTLconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpPPC64SLDconst {
+ continue
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if v_1.Op != OpPPC64SRDconst {
+ continue
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if x != v_1.Args[0] || !(d == 64-c) {
+ continue
+ }
+ v.reset(OpPPC64ROTLconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (XOR (SLWconst x [c]) (SRWconst x [d]))
+ // cond: d == 32-c
+ // result: (ROTLWconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpPPC64SLWconst {
+ continue
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ if v_1.Op != OpPPC64SRWconst {
+ continue
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if x != v_1.Args[0] || !(d == 32-c) {
+ continue
+ }
+ v.reset(OpPPC64ROTLWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (XOR (SLD x (ANDconst <typ.Int64> [63] y)) (SRD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y))))
+ // result: (ROTL x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpPPC64SLD {
+ continue
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpPPC64ANDconst || v_0_1.Type != typ.Int64 || auxIntToInt64(v_0_1.AuxInt) != 63 {
+ continue
+ }
+ y := v_0_1.Args[0]
+ if v_1.Op != OpPPC64SRD {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpPPC64SUB || v_1_1.Type != typ.UInt {
+ continue
+ }
+ _ = v_1_1.Args[1]
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_1_0.AuxInt) != 64 {
+ continue
+ }
+ v_1_1_1 := v_1_1.Args[1]
+ if v_1_1_1.Op != OpPPC64ANDconst || v_1_1_1.Type != typ.UInt || auxIntToInt64(v_1_1_1.AuxInt) != 63 || y != v_1_1_1.Args[0] {
+ continue
+ }
+ v.reset(OpPPC64ROTL)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (XOR (SLD x (ANDconst <typ.Int64> [63] y)) (SRD x (SUBFCconst <typ.UInt> [64] (ANDconst <typ.UInt> [63] y))))
+ // result: (ROTL x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpPPC64SLD {
+ continue
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpPPC64ANDconst || v_0_1.Type != typ.Int64 || auxIntToInt64(v_0_1.AuxInt) != 63 {
+ continue
+ }
+ y := v_0_1.Args[0]
+ if v_1.Op != OpPPC64SRD {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpPPC64SUBFCconst || v_1_1.Type != typ.UInt || auxIntToInt64(v_1_1.AuxInt) != 64 {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpPPC64ANDconst || v_1_1_0.Type != typ.UInt || auxIntToInt64(v_1_1_0.AuxInt) != 63 || y != v_1_1_0.Args[0] {
+ continue
+ }
+ v.reset(OpPPC64ROTL)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (XOR (SLW x (ANDconst <typ.Int32> [31] y)) (SRW x (SUBFCconst <typ.UInt> [32] (ANDconst <typ.UInt> [31] y))))
+ // result: (ROTLW x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpPPC64SLW {
+ continue
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpPPC64ANDconst || v_0_1.Type != typ.Int32 || auxIntToInt64(v_0_1.AuxInt) != 31 {
+ continue
+ }
+ y := v_0_1.Args[0]
+ if v_1.Op != OpPPC64SRW {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpPPC64SUBFCconst || v_1_1.Type != typ.UInt || auxIntToInt64(v_1_1.AuxInt) != 32 {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpPPC64ANDconst || v_1_1_0.Type != typ.UInt || auxIntToInt64(v_1_1_0.AuxInt) != 31 || y != v_1_1_0.Args[0] {
+ continue
+ }
+ v.reset(OpPPC64ROTLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (XOR (SLW x (ANDconst <typ.Int32> [31] y)) (SRW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y))))
+ // result: (ROTLW x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpPPC64SLW {
+ continue
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpPPC64ANDconst || v_0_1.Type != typ.Int32 || auxIntToInt64(v_0_1.AuxInt) != 31 {
+ continue
+ }
+ y := v_0_1.Args[0]
+ if v_1.Op != OpPPC64SRW {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpPPC64SUB || v_1_1.Type != typ.UInt {
+ continue
+ }
+ _ = v_1_1.Args[1]
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_1_0.AuxInt) != 32 {
+ continue
+ }
+ v_1_1_1 := v_1_1.Args[1]
+ if v_1_1_1.Op != OpPPC64ANDconst || v_1_1_1.Type != typ.UInt || auxIntToInt64(v_1_1_1.AuxInt) != 31 || y != v_1_1_1.Args[0] {
+ continue
+ }
+ v.reset(OpPPC64ROTLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (XOR (MOVDconst [c]) (MOVDconst [d]))
+ // result: (MOVDconst [c^d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpPPC64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpPPC64MOVDconst {
+ continue
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(c ^ d)
+ return true
+ }
+ break
+ }
+ // match: (XOR x (MOVDconst [c]))
+ // cond: isU32Bit(c)
+ // result: (XORconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(isU32Bit(c)) {
+ continue
+ }
+ v.reset(OpPPC64XORconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuePPC64_OpPPC64XORconst(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (XORconst [c] (XORconst [d] x))
+ // result: (XORconst [c^d] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpPPC64XORconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpPPC64XORconst)
+ v.AuxInt = int64ToAuxInt(c ^ d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (XORconst [0] x)
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (XORconst [1] (ISELB [6] (MOVDconst [1]) cmp))
+ // result: (ISELB [2] (MOVDconst [1]) cmp)
+ for {
+ if auxIntToInt64(v.AuxInt) != 1 || v_0.Op != OpPPC64ISELB || auxIntToInt32(v_0.AuxInt) != 6 {
+ break
+ }
+ cmp := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64MOVDconst || auxIntToInt64(v_0_0.AuxInt) != 1 {
+ break
+ }
+ v.reset(OpPPC64ISELB)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v.AddArg2(v0, cmp)
+ return true
+ }
+ // match: (XORconst [1] (ISELB [5] (MOVDconst [1]) cmp))
+ // result: (ISELB [1] (MOVDconst [1]) cmp)
+ for {
+ if auxIntToInt64(v.AuxInt) != 1 || v_0.Op != OpPPC64ISELB || auxIntToInt32(v_0.AuxInt) != 5 {
+ break
+ }
+ cmp := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64MOVDconst || auxIntToInt64(v_0_0.AuxInt) != 1 {
+ break
+ }
+ v.reset(OpPPC64ISELB)
+ v.AuxInt = int32ToAuxInt(1)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v.AddArg2(v0, cmp)
+ return true
+ }
+ // match: (XORconst [1] (ISELB [4] (MOVDconst [1]) cmp))
+ // result: (ISELB [0] (MOVDconst [1]) cmp)
+ for {
+ if auxIntToInt64(v.AuxInt) != 1 || v_0.Op != OpPPC64ISELB || auxIntToInt32(v_0.AuxInt) != 4 {
+ break
+ }
+ cmp := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64MOVDconst || auxIntToInt64(v_0_0.AuxInt) != 1 {
+ break
+ }
+ v.reset(OpPPC64ISELB)
+ v.AuxInt = int32ToAuxInt(0)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(1)
+ v.AddArg2(v0, cmp)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPanicBounds(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (PanicBounds [kind] x y mem)
+ // cond: boundsABI(kind) == 0
+ // result: (LoweredPanicBoundsA [kind] x y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ x := v_0
+ y := v_1
+ mem := v_2
+ if !(boundsABI(kind) == 0) {
+ break
+ }
+ v.reset(OpPPC64LoweredPanicBoundsA)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg3(x, y, mem)
+ return true
+ }
+ // match: (PanicBounds [kind] x y mem)
+ // cond: boundsABI(kind) == 1
+ // result: (LoweredPanicBoundsB [kind] x y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ x := v_0
+ y := v_1
+ mem := v_2
+ if !(boundsABI(kind) == 1) {
+ break
+ }
+ v.reset(OpPPC64LoweredPanicBoundsB)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg3(x, y, mem)
+ return true
+ }
+ // match: (PanicBounds [kind] x y mem)
+ // cond: boundsABI(kind) == 2
+ // result: (LoweredPanicBoundsC [kind] x y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ x := v_0
+ y := v_1
+ mem := v_2
+ if !(boundsABI(kind) == 2) {
+ break
+ }
+ v.reset(OpPPC64LoweredPanicBoundsC)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg3(x, y, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpPopCount16(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (PopCount16 x)
+ // result: (POPCNTW (MOVHZreg x))
+ for {
+ x := v_0
+ v.reset(OpPPC64POPCNTW)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVHZreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpPopCount32(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (PopCount32 x)
+ // result: (POPCNTW (MOVWZreg x))
+ for {
+ x := v_0
+ v.reset(OpPPC64POPCNTW)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVWZreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpPopCount8(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (PopCount8 x)
+ // result: (POPCNTB (MOVBZreg x))
+ for {
+ x := v_0
+ v.reset(OpPPC64POPCNTB)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVBZreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpPrefetchCache(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (PrefetchCache ptr mem)
+ // result: (DCBT ptr mem [0])
+ for {
+ ptr := v_0
+ mem := v_1
+ v.reset(OpPPC64DCBT)
+ v.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+}
+func rewriteValuePPC64_OpPrefetchCacheStreamed(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (PrefetchCacheStreamed ptr mem)
+ // result: (DCBT ptr mem [8])
+ for {
+ ptr := v_0
+ mem := v_1
+ v.reset(OpPPC64DCBT)
+ v.AuxInt = int64ToAuxInt(8)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRotateLeft16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (RotateLeft16 <t> x (MOVDconst [c]))
+ // result: (Or16 (Lsh16x64 <t> x (MOVDconst [c&15])) (Rsh16Ux64 <t> x (MOVDconst [-c&15])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpOr16)
+ v0 := b.NewValue0(v.Pos, OpLsh16x64, t)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(c & 15)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpRsh16Ux64, t)
+ v3 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v3.AuxInt = int64ToAuxInt(-c & 15)
+ v2.AddArg2(x, v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpRotateLeft32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (RotateLeft32 x (MOVDconst [c]))
+ // result: (ROTLWconst [c&31] x)
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpPPC64ROTLWconst)
+ v.AuxInt = int64ToAuxInt(c & 31)
+ v.AddArg(x)
+ return true
+ }
+ // match: (RotateLeft32 x y)
+ // result: (ROTLW x y)
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64ROTLW)
+ v.AddArg2(x, y)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRotateLeft64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (RotateLeft64 x (MOVDconst [c]))
+ // result: (ROTLconst [c&63] x)
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpPPC64ROTLconst)
+ v.AuxInt = int64ToAuxInt(c & 63)
+ v.AddArg(x)
+ return true
+ }
+ // match: (RotateLeft64 x y)
+ // result: (ROTL x y)
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64ROTL)
+ v.AddArg2(x, y)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRotateLeft8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (RotateLeft8 <t> x (MOVDconst [c]))
+ // result: (Or8 (Lsh8x64 <t> x (MOVDconst [c&7])) (Rsh8Ux64 <t> x (MOVDconst [-c&7])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpOr8)
+ v0 := b.NewValue0(v.Pos, OpLsh8x64, t)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(c & 7)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpRsh8Ux64, t)
+ v3 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v3.AuxInt = int64ToAuxInt(-c & 7)
+ v2.AddArg2(x, v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpRsh16Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRW (MOVHZreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRW)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVHZreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh16Ux16 x y)
+ // result: (SRW (ZeroExt16to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt16to64 y) (MOVDconst [16]))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64SRW)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
+ v1.AuxInt = int32ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v2.AuxInt = int64ToAuxInt(-1)
+ v3 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v4.AddArg(y)
+ v5 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v5.AuxInt = int64ToAuxInt(16)
+ v3.AddArg2(v4, v5)
+ v1.AddArg3(y, v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh16Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux32 x (MOVDconst [c]))
+ // cond: uint32(c) < 16
+ // result: (SRWconst (ZeroExt16to32 x) [c&15])
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint32(c) < 16) {
+ break
+ }
+ v.reset(OpPPC64SRWconst)
+ v.AuxInt = int64ToAuxInt(c & 15)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Rsh16Ux32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRW (MOVHZreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRW)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVHZreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh16Ux32 x y)
+ // result: (SRW (ZeroExt16to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [16]))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64SRW)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
+ v1.AuxInt = int32ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v2.AuxInt = int64ToAuxInt(-1)
+ v3 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v4 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v4.AuxInt = int64ToAuxInt(16)
+ v3.AddArg2(y, v4)
+ v1.AddArg3(y, v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh16Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux64 _ (MOVDconst [c]))
+ // cond: uint64(c) >= 16
+ // result: (MOVDconst [0])
+ for {
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 16) {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (Rsh16Ux64 x (MOVDconst [c]))
+ // cond: uint64(c) < 16
+ // result: (SRWconst (ZeroExt16to32 x) [c])
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) < 16) {
+ break
+ }
+ v.reset(OpPPC64SRWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Rsh16Ux64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRW (MOVHZreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRW)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVHZreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh16Ux64 x y)
+ // result: (SRW (ZeroExt16to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [16]))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64SRW)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
+ v1.AuxInt = int32ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v2.AuxInt = int64ToAuxInt(-1)
+ v3 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v4 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v4.AuxInt = int64ToAuxInt(16)
+ v3.AddArg2(y, v4)
+ v1.AddArg3(y, v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh16Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRW (MOVHZreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRW)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVHZreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh16Ux8 x y)
+ // result: (SRW (ZeroExt16to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt8to64 y) (MOVDconst [16]))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64SRW)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
+ v1.AuxInt = int32ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v2.AuxInt = int64ToAuxInt(-1)
+ v3 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v4.AddArg(y)
+ v5 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v5.AuxInt = int64ToAuxInt(16)
+ v3.AddArg2(v4, v5)
+ v1.AddArg3(y, v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh16x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAW (MOVHreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRAW)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVHreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh16x16 x y)
+ // result: (SRAW (SignExt16to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt16to64 y) (MOVDconst [16]))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64SRAW)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
+ v1.AuxInt = int32ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v2.AuxInt = int64ToAuxInt(-1)
+ v3 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v4.AddArg(y)
+ v5 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v5.AuxInt = int64ToAuxInt(16)
+ v3.AddArg2(v4, v5)
+ v1.AddArg3(y, v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh16x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x32 x (MOVDconst [c]))
+ // cond: uint32(c) < 16
+ // result: (SRAWconst (SignExt16to32 x) [c&15])
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint32(c) < 16) {
+ break
+ }
+ v.reset(OpPPC64SRAWconst)
+ v.AuxInt = int64ToAuxInt(c & 15)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Rsh16x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAW (MOVHreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRAW)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVHreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh16x32 x y)
+ // result: (SRAW (SignExt16to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [16]))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64SRAW)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
+ v1.AuxInt = int32ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v2.AuxInt = int64ToAuxInt(-1)
+ v3 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v4 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v4.AuxInt = int64ToAuxInt(16)
+ v3.AddArg2(y, v4)
+ v1.AddArg3(y, v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh16x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x64 x (MOVDconst [c]))
+ // cond: uint64(c) >= 16
+ // result: (SRAWconst (SignExt16to32 x) [63])
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 16) {
+ break
+ }
+ v.reset(OpPPC64SRAWconst)
+ v.AuxInt = int64ToAuxInt(63)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Rsh16x64 x (MOVDconst [c]))
+ // cond: uint64(c) < 16
+ // result: (SRAWconst (SignExt16to32 x) [c])
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) < 16) {
+ break
+ }
+ v.reset(OpPPC64SRAWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Rsh16x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAW (MOVHreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRAW)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVHreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh16x64 x y)
+ // result: (SRAW (SignExt16to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [16]))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64SRAW)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
+ v1.AuxInt = int32ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v2.AuxInt = int64ToAuxInt(-1)
+ v3 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v4 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v4.AuxInt = int64ToAuxInt(16)
+ v3.AddArg2(y, v4)
+ v1.AddArg3(y, v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh16x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAW (MOVHreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRAW)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVHreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh16x8 x y)
+ // result: (SRAW (SignExt16to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt8to64 y) (MOVDconst [16]))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64SRAW)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
+ v1.AuxInt = int32ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v2.AuxInt = int64ToAuxInt(-1)
+ v3 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v4.AddArg(y)
+ v5 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v5.AuxInt = int64ToAuxInt(16)
+ v3.AddArg2(v4, v5)
+ v1.AddArg3(y, v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh32Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32Ux16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh32Ux16 x y)
+ // result: (SRW x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt16to64 y) (MOVDconst [32]))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64SRW)
+ v0 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(-1)
+ v2 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v3.AddArg(y)
+ v4 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v4.AuxInt = int64ToAuxInt(32)
+ v2.AddArg2(v3, v4)
+ v0.AddArg3(y, v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh32Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32Ux32 x (MOVDconst [c]))
+ // cond: uint32(c) < 32
+ // result: (SRWconst x [c&31])
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint32(c) < 32) {
+ break
+ }
+ v.reset(OpPPC64SRWconst)
+ v.AuxInt = int64ToAuxInt(c & 31)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Rsh32Ux32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh32Ux32 x y)
+ // result: (SRW x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [32]))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64SRW)
+ v0 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(-1)
+ v2 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v3.AuxInt = int64ToAuxInt(32)
+ v2.AddArg2(y, v3)
+ v0.AddArg3(y, v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh32Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32Ux64 _ (MOVDconst [c]))
+ // cond: uint64(c) >= 32
+ // result: (MOVDconst [0])
+ for {
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 32) {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (Rsh32Ux64 x (MOVDconst [c]))
+ // cond: uint64(c) < 32
+ // result: (SRWconst x [c])
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) < 32) {
+ break
+ }
+ v.reset(OpPPC64SRWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Rsh32Ux64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh32Ux64 x (AND y (MOVDconst [31])))
+ // result: (SRW x (ANDconst <typ.Int32> [31] y))
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64AND {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ y := v_1_0
+ if v_1_1.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_1.AuxInt) != 31 {
+ continue
+ }
+ v.reset(OpPPC64SRW)
+ v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int32)
+ v0.AuxInt = int64ToAuxInt(31)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ break
+ }
+ // match: (Rsh32Ux64 x (ANDconst <typ.UInt> [31] y))
+ // result: (SRW x (ANDconst <typ.UInt> [31] y))
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64ANDconst || v_1.Type != typ.UInt || auxIntToInt64(v_1.AuxInt) != 31 {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpPPC64SRW)
+ v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt)
+ v0.AuxInt = int64ToAuxInt(31)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh32Ux64 x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))
+ // result: (SRW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64SUB || v_1.Type != typ.UInt {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_0.AuxInt) != 32 {
+ break
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpPPC64ANDconst || v_1_1.Type != typ.UInt || auxIntToInt64(v_1_1.AuxInt) != 31 {
+ break
+ }
+ y := v_1_1.Args[0]
+ v.reset(OpPPC64SRW)
+ v0 := b.NewValue0(v.Pos, OpPPC64SUB, typ.UInt)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(32)
+ v2 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt)
+ v2.AuxInt = int64ToAuxInt(31)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh32Ux64 x (SUBFCconst <typ.UInt> [32] (ANDconst <typ.UInt> [31] y)))
+ // result: (SRW x (SUBFCconst <typ.UInt> [32] (ANDconst <typ.UInt> [31] y)))
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64SUBFCconst || v_1.Type != typ.UInt || auxIntToInt64(v_1.AuxInt) != 32 {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpPPC64ANDconst || v_1_0.Type != typ.UInt || auxIntToInt64(v_1_0.AuxInt) != 31 {
+ break
+ }
+ y := v_1_0.Args[0]
+ v.reset(OpPPC64SRW)
+ v0 := b.NewValue0(v.Pos, OpPPC64SUBFCconst, typ.UInt)
+ v0.AuxInt = int64ToAuxInt(32)
+ v1 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt)
+ v1.AuxInt = int64ToAuxInt(31)
+ v1.AddArg(y)
+ v0.AddArg(v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh32Ux64 x (SUB <typ.UInt> (MOVDconst [32]) (AND <typ.UInt> y (MOVDconst [31]))))
+ // result: (SRW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64SUB || v_1.Type != typ.UInt {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_0.AuxInt) != 32 {
+ break
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpPPC64AND || v_1_1.Type != typ.UInt {
+ break
+ }
+ _ = v_1_1.Args[1]
+ v_1_1_0 := v_1_1.Args[0]
+ v_1_1_1 := v_1_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_1_0, v_1_1_1 = _i0+1, v_1_1_1, v_1_1_0 {
+ y := v_1_1_0
+ if v_1_1_1.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_1_1.AuxInt) != 31 {
+ continue
+ }
+ v.reset(OpPPC64SRW)
+ v0 := b.NewValue0(v.Pos, OpPPC64SUB, typ.UInt)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(32)
+ v2 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt)
+ v2.AuxInt = int64ToAuxInt(31)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+ break
+ }
+ // match: (Rsh32Ux64 x (SUBFCconst <typ.UInt> [32] (AND <typ.UInt> y (MOVDconst [31]))))
+ // result: (SRW x (SUBFCconst <typ.UInt> [32] (ANDconst <typ.UInt> [31] y)))
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64SUBFCconst || v_1.Type != typ.UInt || auxIntToInt64(v_1.AuxInt) != 32 {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpPPC64AND || v_1_0.Type != typ.UInt {
+ break
+ }
+ _ = v_1_0.Args[1]
+ v_1_0_0 := v_1_0.Args[0]
+ v_1_0_1 := v_1_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0_0, v_1_0_1 = _i0+1, v_1_0_1, v_1_0_0 {
+ y := v_1_0_0
+ if v_1_0_1.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_0_1.AuxInt) != 31 {
+ continue
+ }
+ v.reset(OpPPC64SRW)
+ v0 := b.NewValue0(v.Pos, OpPPC64SUBFCconst, typ.UInt)
+ v0.AuxInt = int64ToAuxInt(32)
+ v1 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt)
+ v1.AuxInt = int64ToAuxInt(31)
+ v1.AddArg(y)
+ v0.AddArg(v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ break
+ }
+ // match: (Rsh32Ux64 x y)
+ // result: (SRW x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [32]))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64SRW)
+ v0 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(-1)
+ v2 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v3.AuxInt = int64ToAuxInt(32)
+ v2.AddArg2(y, v3)
+ v0.AddArg3(y, v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh32Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32Ux8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh32Ux8 x y)
+ // result: (SRW x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt8to64 y) (MOVDconst [32]))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64SRW)
+ v0 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(-1)
+ v2 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v3.AddArg(y)
+ v4 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v4.AuxInt = int64ToAuxInt(32)
+ v2.AddArg2(v3, v4)
+ v0.AddArg3(y, v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh32x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRAW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh32x16 x y)
+ // result: (SRAW x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt16to64 y) (MOVDconst [32]))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64SRAW)
+ v0 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(-1)
+ v2 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v3.AddArg(y)
+ v4 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v4.AuxInt = int64ToAuxInt(32)
+ v2.AddArg2(v3, v4)
+ v0.AddArg3(y, v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh32x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32x32 x (MOVDconst [c]))
+ // cond: uint32(c) < 32
+ // result: (SRAWconst x [c&31])
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint32(c) < 32) {
+ break
+ }
+ v.reset(OpPPC64SRAWconst)
+ v.AuxInt = int64ToAuxInt(c & 31)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Rsh32x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRAW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh32x32 x y)
+ // result: (SRAW x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [32]))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64SRAW)
+ v0 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(-1)
+ v2 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v3.AuxInt = int64ToAuxInt(32)
+ v2.AddArg2(y, v3)
+ v0.AddArg3(y, v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh32x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32x64 x (MOVDconst [c]))
+ // cond: uint64(c) >= 32
+ // result: (SRAWconst x [63])
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 32) {
+ break
+ }
+ v.reset(OpPPC64SRAWconst)
+ v.AuxInt = int64ToAuxInt(63)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Rsh32x64 x (MOVDconst [c]))
+ // cond: uint64(c) < 32
+ // result: (SRAWconst x [c])
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) < 32) {
+ break
+ }
+ v.reset(OpPPC64SRAWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Rsh32x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRAW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh32x64 x (AND y (MOVDconst [31])))
+ // result: (SRAW x (ANDconst <typ.Int32> [31] y))
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64AND {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ y := v_1_0
+ if v_1_1.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_1.AuxInt) != 31 {
+ continue
+ }
+ v.reset(OpPPC64SRAW)
+ v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int32)
+ v0.AuxInt = int64ToAuxInt(31)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ break
+ }
+ // match: (Rsh32x64 x (ANDconst <typ.UInt> [31] y))
+ // result: (SRAW x (ANDconst <typ.UInt> [31] y))
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64ANDconst || v_1.Type != typ.UInt || auxIntToInt64(v_1.AuxInt) != 31 {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpPPC64SRAW)
+ v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt)
+ v0.AuxInt = int64ToAuxInt(31)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh32x64 x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))
+ // result: (SRAW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64SUB || v_1.Type != typ.UInt {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_0.AuxInt) != 32 {
+ break
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpPPC64ANDconst || v_1_1.Type != typ.UInt || auxIntToInt64(v_1_1.AuxInt) != 31 {
+ break
+ }
+ y := v_1_1.Args[0]
+ v.reset(OpPPC64SRAW)
+ v0 := b.NewValue0(v.Pos, OpPPC64SUB, typ.UInt)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(32)
+ v2 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt)
+ v2.AuxInt = int64ToAuxInt(31)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh32x64 x (SUBFCconst <typ.UInt> [32] (ANDconst <typ.UInt> [31] y)))
+ // result: (SRAW x (SUBFCconst <typ.UInt> [32] (ANDconst <typ.UInt> [31] y)))
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64SUBFCconst || v_1.Type != typ.UInt || auxIntToInt64(v_1.AuxInt) != 32 {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpPPC64ANDconst || v_1_0.Type != typ.UInt || auxIntToInt64(v_1_0.AuxInt) != 31 {
+ break
+ }
+ y := v_1_0.Args[0]
+ v.reset(OpPPC64SRAW)
+ v0 := b.NewValue0(v.Pos, OpPPC64SUBFCconst, typ.UInt)
+ v0.AuxInt = int64ToAuxInt(32)
+ v1 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt)
+ v1.AuxInt = int64ToAuxInt(31)
+ v1.AddArg(y)
+ v0.AddArg(v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh32x64 x (SUB <typ.UInt> (MOVDconst [32]) (AND <typ.UInt> y (MOVDconst [31]))))
+ // result: (SRAW x (SUB <typ.UInt> (MOVDconst [32]) (ANDconst <typ.UInt> [31] y)))
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64SUB || v_1.Type != typ.UInt {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_0.AuxInt) != 32 {
+ break
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpPPC64AND || v_1_1.Type != typ.UInt {
+ break
+ }
+ _ = v_1_1.Args[1]
+ v_1_1_0 := v_1_1.Args[0]
+ v_1_1_1 := v_1_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_1_0, v_1_1_1 = _i0+1, v_1_1_1, v_1_1_0 {
+ y := v_1_1_0
+ if v_1_1_1.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_1_1.AuxInt) != 31 {
+ continue
+ }
+ v.reset(OpPPC64SRAW)
+ v0 := b.NewValue0(v.Pos, OpPPC64SUB, typ.UInt)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(32)
+ v2 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt)
+ v2.AuxInt = int64ToAuxInt(31)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+ break
+ }
+ // match: (Rsh32x64 x (SUBFCconst <typ.UInt> [32] (AND <typ.UInt> y (MOVDconst [31]))))
+ // result: (SRAW x (SUBFCconst <typ.UInt> [32] (ANDconst <typ.UInt> [31] y)))
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64SUBFCconst || v_1.Type != typ.UInt || auxIntToInt64(v_1.AuxInt) != 32 {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpPPC64AND || v_1_0.Type != typ.UInt {
+ break
+ }
+ _ = v_1_0.Args[1]
+ v_1_0_0 := v_1_0.Args[0]
+ v_1_0_1 := v_1_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0_0, v_1_0_1 = _i0+1, v_1_0_1, v_1_0_0 {
+ y := v_1_0_0
+ if v_1_0_1.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_0_1.AuxInt) != 31 {
+ continue
+ }
+ v.reset(OpPPC64SRAW)
+ v0 := b.NewValue0(v.Pos, OpPPC64SUBFCconst, typ.UInt)
+ v0.AuxInt = int64ToAuxInt(32)
+ v1 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt)
+ v1.AuxInt = int64ToAuxInt(31)
+ v1.AddArg(y)
+ v0.AddArg(v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ break
+ }
+ // match: (Rsh32x64 x y)
+ // result: (SRAW x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [32]))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64SRAW)
+ v0 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(-1)
+ v2 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v3.AuxInt = int64ToAuxInt(32)
+ v2.AddArg2(y, v3)
+ v0.AddArg3(y, v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh32x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRAW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh32x8 x y)
+ // result: (SRAW x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt8to64 y) (MOVDconst [32]))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64SRAW)
+ v0 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(-1)
+ v2 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v3.AddArg(y)
+ v4 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v4.AuxInt = int64ToAuxInt(32)
+ v2.AddArg2(v3, v4)
+ v0.AddArg3(y, v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh64Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64Ux16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRD x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh64Ux16 x y)
+ // result: (SRD x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt16to64 y) (MOVDconst [64]))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64SRD)
+ v0 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(-1)
+ v2 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v3.AddArg(y)
+ v4 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v4.AuxInt = int64ToAuxInt(64)
+ v2.AddArg2(v3, v4)
+ v0.AddArg3(y, v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh64Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64Ux32 x (MOVDconst [c]))
+ // cond: uint32(c) < 64
+ // result: (SRDconst x [c&63])
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint32(c) < 64) {
+ break
+ }
+ v.reset(OpPPC64SRDconst)
+ v.AuxInt = int64ToAuxInt(c & 63)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Rsh64Ux32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRD x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh64Ux32 x y)
+ // result: (SRD x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [64]))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64SRD)
+ v0 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(-1)
+ v2 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v3.AuxInt = int64ToAuxInt(64)
+ v2.AddArg2(y, v3)
+ v0.AddArg3(y, v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh64Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64Ux64 _ (MOVDconst [c]))
+ // cond: uint64(c) >= 64
+ // result: (MOVDconst [0])
+ for {
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 64) {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (Rsh64Ux64 x (MOVDconst [c]))
+ // cond: uint64(c) < 64
+ // result: (SRDconst x [c])
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) < 64) {
+ break
+ }
+ v.reset(OpPPC64SRDconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Rsh64Ux64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRD x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh64Ux64 x (AND y (MOVDconst [63])))
+ // result: (SRD x (ANDconst <typ.Int64> [63] y))
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64AND {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ y := v_1_0
+ if v_1_1.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_1.AuxInt) != 63 {
+ continue
+ }
+ v.reset(OpPPC64SRD)
+ v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(63)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ break
+ }
+ // match: (Rsh64Ux64 x (ANDconst <typ.UInt> [63] y))
+ // result: (SRD x (ANDconst <typ.UInt> [63] y))
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64ANDconst || v_1.Type != typ.UInt || auxIntToInt64(v_1.AuxInt) != 63 {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpPPC64SRD)
+ v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt)
+ v0.AuxInt = int64ToAuxInt(63)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh64Ux64 x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))
+ // result: (SRD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64SUB || v_1.Type != typ.UInt {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_0.AuxInt) != 64 {
+ break
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpPPC64ANDconst || v_1_1.Type != typ.UInt || auxIntToInt64(v_1_1.AuxInt) != 63 {
+ break
+ }
+ y := v_1_1.Args[0]
+ v.reset(OpPPC64SRD)
+ v0 := b.NewValue0(v.Pos, OpPPC64SUB, typ.UInt)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(64)
+ v2 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt)
+ v2.AuxInt = int64ToAuxInt(63)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh64Ux64 x (SUBFCconst <typ.UInt> [64] (ANDconst <typ.UInt> [63] y)))
+ // result: (SRD x (SUBFCconst <typ.UInt> [64] (ANDconst <typ.UInt> [63] y)))
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64SUBFCconst || v_1.Type != typ.UInt || auxIntToInt64(v_1.AuxInt) != 64 {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpPPC64ANDconst || v_1_0.Type != typ.UInt || auxIntToInt64(v_1_0.AuxInt) != 63 {
+ break
+ }
+ y := v_1_0.Args[0]
+ v.reset(OpPPC64SRD)
+ v0 := b.NewValue0(v.Pos, OpPPC64SUBFCconst, typ.UInt)
+ v0.AuxInt = int64ToAuxInt(64)
+ v1 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt)
+ v1.AuxInt = int64ToAuxInt(63)
+ v1.AddArg(y)
+ v0.AddArg(v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh64Ux64 x (SUB <typ.UInt> (MOVDconst [64]) (AND <typ.UInt> y (MOVDconst [63]))))
+ // result: (SRD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64SUB || v_1.Type != typ.UInt {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_0.AuxInt) != 64 {
+ break
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpPPC64AND || v_1_1.Type != typ.UInt {
+ break
+ }
+ _ = v_1_1.Args[1]
+ v_1_1_0 := v_1_1.Args[0]
+ v_1_1_1 := v_1_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_1_0, v_1_1_1 = _i0+1, v_1_1_1, v_1_1_0 {
+ y := v_1_1_0
+ if v_1_1_1.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_1_1.AuxInt) != 63 {
+ continue
+ }
+ v.reset(OpPPC64SRD)
+ v0 := b.NewValue0(v.Pos, OpPPC64SUB, typ.UInt)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(64)
+ v2 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt)
+ v2.AuxInt = int64ToAuxInt(63)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+ break
+ }
+ // match: (Rsh64Ux64 x (SUBFCconst <typ.UInt> [64] (AND <typ.UInt> y (MOVDconst [63]))))
+ // result: (SRD x (SUBFCconst <typ.UInt> [64] (ANDconst <typ.UInt> [63] y)))
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64SUBFCconst || v_1.Type != typ.UInt || auxIntToInt64(v_1.AuxInt) != 64 {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpPPC64AND || v_1_0.Type != typ.UInt {
+ break
+ }
+ _ = v_1_0.Args[1]
+ v_1_0_0 := v_1_0.Args[0]
+ v_1_0_1 := v_1_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0_0, v_1_0_1 = _i0+1, v_1_0_1, v_1_0_0 {
+ y := v_1_0_0
+ if v_1_0_1.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_0_1.AuxInt) != 63 {
+ continue
+ }
+ v.reset(OpPPC64SRD)
+ v0 := b.NewValue0(v.Pos, OpPPC64SUBFCconst, typ.UInt)
+ v0.AuxInt = int64ToAuxInt(64)
+ v1 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt)
+ v1.AuxInt = int64ToAuxInt(63)
+ v1.AddArg(y)
+ v0.AddArg(v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ break
+ }
+ // match: (Rsh64Ux64 x y)
+ // result: (SRD x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [64]))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64SRD)
+ v0 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(-1)
+ v2 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v3.AuxInt = int64ToAuxInt(64)
+ v2.AddArg2(y, v3)
+ v0.AddArg3(y, v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh64Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64Ux8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRD x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh64Ux8 x y)
+ // result: (SRD x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt8to64 y) (MOVDconst [64]))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64SRD)
+ v0 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(-1)
+ v2 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v3.AddArg(y)
+ v4 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v4.AuxInt = int64ToAuxInt(64)
+ v2.AddArg2(v3, v4)
+ v0.AddArg3(y, v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh64x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAD x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRAD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh64x16 x y)
+ // result: (SRAD x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt16to64 y) (MOVDconst [64]))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64SRAD)
+ v0 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(-1)
+ v2 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v3.AddArg(y)
+ v4 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v4.AuxInt = int64ToAuxInt(64)
+ v2.AddArg2(v3, v4)
+ v0.AddArg3(y, v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh64x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64x32 x (MOVDconst [c]))
+ // cond: uint32(c) < 64
+ // result: (SRADconst x [c&63])
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint32(c) < 64) {
+ break
+ }
+ v.reset(OpPPC64SRADconst)
+ v.AuxInt = int64ToAuxInt(c & 63)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Rsh64x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAD x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRAD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh64x32 x y)
+ // result: (SRAD x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [64]))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64SRAD)
+ v0 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(-1)
+ v2 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v3.AuxInt = int64ToAuxInt(64)
+ v2.AddArg2(y, v3)
+ v0.AddArg3(y, v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh64x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64x64 x (MOVDconst [c]))
+ // cond: uint64(c) >= 64
+ // result: (SRADconst x [63])
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 64) {
+ break
+ }
+ v.reset(OpPPC64SRADconst)
+ v.AuxInt = int64ToAuxInt(63)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Rsh64x64 x (MOVDconst [c]))
+ // cond: uint64(c) < 64
+ // result: (SRADconst x [c])
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) < 64) {
+ break
+ }
+ v.reset(OpPPC64SRADconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Rsh64x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAD x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRAD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh64x64 x (AND y (MOVDconst [63])))
+ // result: (SRAD x (ANDconst <typ.Int64> [63] y))
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64AND {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ y := v_1_0
+ if v_1_1.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_1.AuxInt) != 63 {
+ continue
+ }
+ v.reset(OpPPC64SRAD)
+ v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(63)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ break
+ }
+ // match: (Rsh64x64 x (ANDconst <typ.UInt> [63] y))
+ // result: (SRAD x (ANDconst <typ.UInt> [63] y))
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64ANDconst || v_1.Type != typ.UInt || auxIntToInt64(v_1.AuxInt) != 63 {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpPPC64SRAD)
+ v0 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt)
+ v0.AuxInt = int64ToAuxInt(63)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh64x64 x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))
+ // result: (SRAD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64SUB || v_1.Type != typ.UInt {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_0.AuxInt) != 64 {
+ break
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpPPC64ANDconst || v_1_1.Type != typ.UInt || auxIntToInt64(v_1_1.AuxInt) != 63 {
+ break
+ }
+ y := v_1_1.Args[0]
+ v.reset(OpPPC64SRAD)
+ v0 := b.NewValue0(v.Pos, OpPPC64SUB, typ.UInt)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(64)
+ v2 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt)
+ v2.AuxInt = int64ToAuxInt(63)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh64x64 x (SUBFCconst <typ.UInt> [64] (ANDconst <typ.UInt> [63] y)))
+ // result: (SRAD x (SUBFCconst <typ.UInt> [64] (ANDconst <typ.UInt> [63] y)))
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64SUBFCconst || v_1.Type != typ.UInt || auxIntToInt64(v_1.AuxInt) != 64 {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpPPC64ANDconst || v_1_0.Type != typ.UInt || auxIntToInt64(v_1_0.AuxInt) != 63 {
+ break
+ }
+ y := v_1_0.Args[0]
+ v.reset(OpPPC64SRAD)
+ v0 := b.NewValue0(v.Pos, OpPPC64SUBFCconst, typ.UInt)
+ v0.AuxInt = int64ToAuxInt(64)
+ v1 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt)
+ v1.AuxInt = int64ToAuxInt(63)
+ v1.AddArg(y)
+ v0.AddArg(v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh64x64 x (SUB <typ.UInt> (MOVDconst [64]) (AND <typ.UInt> y (MOVDconst [63]))))
+ // result: (SRAD x (SUB <typ.UInt> (MOVDconst [64]) (ANDconst <typ.UInt> [63] y)))
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64SUB || v_1.Type != typ.UInt {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_0.AuxInt) != 64 {
+ break
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpPPC64AND || v_1_1.Type != typ.UInt {
+ break
+ }
+ _ = v_1_1.Args[1]
+ v_1_1_0 := v_1_1.Args[0]
+ v_1_1_1 := v_1_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_1_0, v_1_1_1 = _i0+1, v_1_1_1, v_1_1_0 {
+ y := v_1_1_0
+ if v_1_1_1.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_1_1.AuxInt) != 63 {
+ continue
+ }
+ v.reset(OpPPC64SRAD)
+ v0 := b.NewValue0(v.Pos, OpPPC64SUB, typ.UInt)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(64)
+ v2 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt)
+ v2.AuxInt = int64ToAuxInt(63)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+ break
+ }
+ // match: (Rsh64x64 x (SUBFCconst <typ.UInt> [64] (AND <typ.UInt> y (MOVDconst [63]))))
+ // result: (SRAD x (SUBFCconst <typ.UInt> [64] (ANDconst <typ.UInt> [63] y)))
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64SUBFCconst || v_1.Type != typ.UInt || auxIntToInt64(v_1.AuxInt) != 64 {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpPPC64AND || v_1_0.Type != typ.UInt {
+ break
+ }
+ _ = v_1_0.Args[1]
+ v_1_0_0 := v_1_0.Args[0]
+ v_1_0_1 := v_1_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0_0, v_1_0_1 = _i0+1, v_1_0_1, v_1_0_0 {
+ y := v_1_0_0
+ if v_1_0_1.Op != OpPPC64MOVDconst || auxIntToInt64(v_1_0_1.AuxInt) != 63 {
+ continue
+ }
+ v.reset(OpPPC64SRAD)
+ v0 := b.NewValue0(v.Pos, OpPPC64SUBFCconst, typ.UInt)
+ v0.AuxInt = int64ToAuxInt(64)
+ v1 := b.NewValue0(v.Pos, OpPPC64ANDconst, typ.UInt)
+ v1.AuxInt = int64ToAuxInt(63)
+ v1.AddArg(y)
+ v0.AddArg(v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+ break
+ }
+ // match: (Rsh64x64 x y)
+ // result: (SRAD x (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [64]))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64SRAD)
+ v0 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(-1)
+ v2 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v3.AuxInt = int64ToAuxInt(64)
+ v2.AddArg2(y, v3)
+ v0.AddArg3(y, v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh64x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAD x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRAD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh64x8 x y)
+ // result: (SRAD x (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt8to64 y) (MOVDconst [64]))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64SRAD)
+ v0 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(-1)
+ v2 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v3.AddArg(y)
+ v4 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v4.AuxInt = int64ToAuxInt(64)
+ v2.AddArg2(v3, v4)
+ v0.AddArg3(y, v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh8Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRW (MOVBZreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRW)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVBZreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh8Ux16 x y)
+ // result: (SRW (ZeroExt8to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt16to64 y) (MOVDconst [8]))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64SRW)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
+ v1.AuxInt = int32ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v2.AuxInt = int64ToAuxInt(-1)
+ v3 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v4.AddArg(y)
+ v5 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v5.AuxInt = int64ToAuxInt(8)
+ v3.AddArg2(v4, v5)
+ v1.AddArg3(y, v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh8Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux32 x (MOVDconst [c]))
+ // cond: uint32(c) < 8
+ // result: (SRWconst (ZeroExt8to32 x) [c&7])
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint32(c) < 8) {
+ break
+ }
+ v.reset(OpPPC64SRWconst)
+ v.AuxInt = int64ToAuxInt(c & 7)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Rsh8Ux32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRW (MOVBZreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRW)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVBZreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh8Ux32 x y)
+ // result: (SRW (ZeroExt8to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [8]))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64SRW)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
+ v1.AuxInt = int32ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v2.AuxInt = int64ToAuxInt(-1)
+ v3 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v4 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v4.AuxInt = int64ToAuxInt(8)
+ v3.AddArg2(y, v4)
+ v1.AddArg3(y, v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh8Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux64 _ (MOVDconst [c]))
+ // cond: uint64(c) >= 8
+ // result: (MOVDconst [0])
+ for {
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 8) {
+ break
+ }
+ v.reset(OpPPC64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (Rsh8Ux64 x (MOVDconst [c]))
+ // cond: uint64(c) < 8
+ // result: (SRWconst (ZeroExt8to32 x) [c])
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) < 8) {
+ break
+ }
+ v.reset(OpPPC64SRWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Rsh8Ux64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRW (MOVBZreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRW)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVBZreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh8Ux64 x y)
+ // result: (SRW (ZeroExt8to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [8]))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64SRW)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
+ v1.AuxInt = int32ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v2.AuxInt = int64ToAuxInt(-1)
+ v3 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v4 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v4.AuxInt = int64ToAuxInt(8)
+ v3.AddArg2(y, v4)
+ v1.AddArg3(y, v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh8Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRW (MOVBZreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRW)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVBZreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh8Ux8 x y)
+ // result: (SRW (ZeroExt8to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt8to64 y) (MOVDconst [8]))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64SRW)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
+ v1.AuxInt = int32ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v2.AuxInt = int64ToAuxInt(-1)
+ v3 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v4.AddArg(y)
+ v5 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v5.AuxInt = int64ToAuxInt(8)
+ v3.AddArg2(v4, v5)
+ v1.AddArg3(y, v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh8x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAW (MOVBreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRAW)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVBreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh8x16 x y)
+ // result: (SRAW (SignExt8to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt16to64 y) (MOVDconst [8]))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64SRAW)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
+ v1.AuxInt = int32ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v2.AuxInt = int64ToAuxInt(-1)
+ v3 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v4.AddArg(y)
+ v5 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v5.AuxInt = int64ToAuxInt(8)
+ v3.AddArg2(v4, v5)
+ v1.AddArg3(y, v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh8x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x32 x (MOVDconst [c]))
+ // cond: uint32(c) < 8
+ // result: (SRAWconst (SignExt8to32 x) [c&7])
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint32(c) < 8) {
+ break
+ }
+ v.reset(OpPPC64SRAWconst)
+ v.AuxInt = int64ToAuxInt(c & 7)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Rsh8x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAW (MOVBreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRAW)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVBreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh8x32 x y)
+ // result: (SRAW (SignExt8to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [8]))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64SRAW)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
+ v1.AuxInt = int32ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v2.AuxInt = int64ToAuxInt(-1)
+ v3 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v4 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v4.AuxInt = int64ToAuxInt(8)
+ v3.AddArg2(y, v4)
+ v1.AddArg3(y, v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh8x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x64 x (MOVDconst [c]))
+ // cond: uint64(c) >= 8
+ // result: (SRAWconst (SignExt8to32 x) [63])
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 8) {
+ break
+ }
+ v.reset(OpPPC64SRAWconst)
+ v.AuxInt = int64ToAuxInt(63)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Rsh8x64 x (MOVDconst [c]))
+ // cond: uint64(c) < 8
+ // result: (SRAWconst (SignExt8to32 x) [c])
+ for {
+ x := v_0
+ if v_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) < 8) {
+ break
+ }
+ v.reset(OpPPC64SRAWconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Rsh8x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAW (MOVBreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRAW)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVBreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh8x64 x y)
+ // result: (SRAW (SignExt8to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU y (MOVDconst [8]))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64SRAW)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
+ v1.AuxInt = int32ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v2.AuxInt = int64ToAuxInt(-1)
+ v3 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v4 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v4.AuxInt = int64ToAuxInt(8)
+ v3.AddArg2(y, v4)
+ v1.AddArg3(y, v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValuePPC64_OpRsh8x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAW (MOVBreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpPPC64SRAW)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVBreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh8x8 x y)
+ // result: (SRAW (SignExt8to32 x) (ISEL [0] y (MOVDconst [-1]) (CMPU (ZeroExt8to64 y) (MOVDconst [8]))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpPPC64SRAW)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpPPC64ISEL, typ.Int32)
+ v1.AuxInt = int32ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v2.AuxInt = int64ToAuxInt(-1)
+ v3 := b.NewValue0(v.Pos, OpPPC64CMPU, types.TypeFlags)
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v4.AddArg(y)
+ v5 := b.NewValue0(v.Pos, OpPPC64MOVDconst, typ.Int64)
+ v5.AuxInt = int64ToAuxInt(8)
+ v3.AddArg2(v4, v5)
+ v1.AddArg3(y, v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValuePPC64_OpSelectN(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (SelectN [0] call:(CALLstatic {sym} s1:(MOVDstore _ (MOVDconst [sz]) s2:(MOVDstore _ src s3:(MOVDstore {t} _ dst mem)))))
+ // cond: sz >= 0 && isSameCall(sym, "runtime.memmove") && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && isInlinableMemmove(dst, src, sz, config) && clobber(s1, s2, s3, call)
+ // result: (Move [sz] dst src mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ call := v_0
+ if call.Op != OpPPC64CALLstatic || len(call.Args) != 1 {
+ break
+ }
+ sym := auxToCall(call.Aux)
+ s1 := call.Args[0]
+ if s1.Op != OpPPC64MOVDstore {
+ break
+ }
+ _ = s1.Args[2]
+ s1_1 := s1.Args[1]
+ if s1_1.Op != OpPPC64MOVDconst {
+ break
+ }
+ sz := auxIntToInt64(s1_1.AuxInt)
+ s2 := s1.Args[2]
+ if s2.Op != OpPPC64MOVDstore {
+ break
+ }
+ _ = s2.Args[2]
+ src := s2.Args[1]
+ s3 := s2.Args[2]
+ if s3.Op != OpPPC64MOVDstore {
+ break
+ }
+ mem := s3.Args[2]
+ dst := s3.Args[1]
+ if !(sz >= 0 && isSameCall(sym, "runtime.memmove") && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && isInlinableMemmove(dst, src, sz, config) && clobber(s1, s2, s3, call)) {
+ break
+ }
+ v.reset(OpMove)
+ v.AuxInt = int64ToAuxInt(sz)
+ v.AddArg3(dst, src, mem)
+ return true
+ }
+ // match: (SelectN [0] call:(CALLstatic {sym} dst src (MOVDconst [sz]) mem))
+ // cond: sz >= 0 && isSameCall(sym, "runtime.memmove") && call.Uses == 1 && isInlinableMemmove(dst, src, sz, config) && clobber(call)
+ // result: (Move [sz] dst src mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ call := v_0
+ if call.Op != OpPPC64CALLstatic || len(call.Args) != 4 {
+ break
+ }
+ sym := auxToCall(call.Aux)
+ mem := call.Args[3]
+ dst := call.Args[0]
+ src := call.Args[1]
+ call_2 := call.Args[2]
+ if call_2.Op != OpPPC64MOVDconst {
+ break
+ }
+ sz := auxIntToInt64(call_2.AuxInt)
+ if !(sz >= 0 && isSameCall(sym, "runtime.memmove") && call.Uses == 1 && isInlinableMemmove(dst, src, sz, config) && clobber(call)) {
+ break
+ }
+ v.reset(OpMove)
+ v.AuxInt = int64ToAuxInt(sz)
+ v.AddArg3(dst, src, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpSlicemask(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Slicemask <t> x)
+ // result: (SRADconst (NEG <t> x) [63])
+ for {
+ t := v.Type
+ x := v_0
+ v.reset(OpPPC64SRADconst)
+ v.AuxInt = int64ToAuxInt(63)
+ v0 := b.NewValue0(v.Pos, OpPPC64NEG, t)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuePPC64_OpStore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 8 && is64BitFloat(val.Type)
+ // result: (FMOVDstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 8 && is64BitFloat(val.Type)) {
+ break
+ }
+ v.reset(OpPPC64FMOVDstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 8 && is32BitFloat(val.Type)
+ // result: (FMOVDstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 8 && is32BitFloat(val.Type)) {
+ break
+ }
+ v.reset(OpPPC64FMOVDstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 4 && is32BitFloat(val.Type)
+ // result: (FMOVSstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 4 && is32BitFloat(val.Type)) {
+ break
+ }
+ v.reset(OpPPC64FMOVSstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 8 && !is64BitFloat(val.Type)
+ // result: (MOVDstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 8 && !is64BitFloat(val.Type)) {
+ break
+ }
+ v.reset(OpPPC64MOVDstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 4 && is32BitInt(val.Type)
+ // result: (MOVWstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 4 && is32BitInt(val.Type)) {
+ break
+ }
+ v.reset(OpPPC64MOVWstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 2
+ // result: (MOVHstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 2) {
+ break
+ }
+ v.reset(OpPPC64MOVHstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 1
+ // result: (MOVBstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 1) {
+ break
+ }
+ v.reset(OpPPC64MOVBstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuePPC64_OpTrunc16to8(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Trunc16to8 <t> x)
+ // cond: isSigned(t)
+ // result: (MOVBreg x)
+ for {
+ t := v.Type
+ x := v_0
+ if !(isSigned(t)) {
+ break
+ }
+ v.reset(OpPPC64MOVBreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Trunc16to8 x)
+ // result: (MOVBZreg x)
+ for {
+ x := v_0
+ v.reset(OpPPC64MOVBZreg)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValuePPC64_OpTrunc32to16(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Trunc32to16 <t> x)
+ // cond: isSigned(t)
+ // result: (MOVHreg x)
+ for {
+ t := v.Type
+ x := v_0
+ if !(isSigned(t)) {
+ break
+ }
+ v.reset(OpPPC64MOVHreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Trunc32to16 x)
+ // result: (MOVHZreg x)
+ for {
+ x := v_0
+ v.reset(OpPPC64MOVHZreg)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValuePPC64_OpTrunc32to8(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Trunc32to8 <t> x)
+ // cond: isSigned(t)
+ // result: (MOVBreg x)
+ for {
+ t := v.Type
+ x := v_0
+ if !(isSigned(t)) {
+ break
+ }
+ v.reset(OpPPC64MOVBreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Trunc32to8 x)
+ // result: (MOVBZreg x)
+ for {
+ x := v_0
+ v.reset(OpPPC64MOVBZreg)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValuePPC64_OpTrunc64to16(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Trunc64to16 <t> x)
+ // cond: isSigned(t)
+ // result: (MOVHreg x)
+ for {
+ t := v.Type
+ x := v_0
+ if !(isSigned(t)) {
+ break
+ }
+ v.reset(OpPPC64MOVHreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Trunc64to16 x)
+ // result: (MOVHZreg x)
+ for {
+ x := v_0
+ v.reset(OpPPC64MOVHZreg)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValuePPC64_OpTrunc64to32(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Trunc64to32 <t> x)
+ // cond: isSigned(t)
+ // result: (MOVWreg x)
+ for {
+ t := v.Type
+ x := v_0
+ if !(isSigned(t)) {
+ break
+ }
+ v.reset(OpPPC64MOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Trunc64to32 x)
+ // result: (MOVWZreg x)
+ for {
+ x := v_0
+ v.reset(OpPPC64MOVWZreg)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValuePPC64_OpTrunc64to8(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Trunc64to8 <t> x)
+ // cond: isSigned(t)
+ // result: (MOVBreg x)
+ for {
+ t := v.Type
+ x := v_0
+ if !(isSigned(t)) {
+ break
+ }
+ v.reset(OpPPC64MOVBreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Trunc64to8 x)
+ // result: (MOVBZreg x)
+ for {
+ x := v_0
+ v.reset(OpPPC64MOVBZreg)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValuePPC64_OpZero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Zero [0] _ mem)
+ // result: mem
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ mem := v_1
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Zero [1] destptr mem)
+ // result: (MOVBstorezero destptr mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 1 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpPPC64MOVBstorezero)
+ v.AddArg2(destptr, mem)
+ return true
+ }
+ // match: (Zero [2] destptr mem)
+ // result: (MOVHstorezero destptr mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 2 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpPPC64MOVHstorezero)
+ v.AddArg2(destptr, mem)
+ return true
+ }
+ // match: (Zero [3] destptr mem)
+ // result: (MOVBstorezero [2] destptr (MOVHstorezero destptr mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 3 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpPPC64MOVBstorezero)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVHstorezero, types.TypeMem)
+ v0.AddArg2(destptr, mem)
+ v.AddArg2(destptr, v0)
+ return true
+ }
+ // match: (Zero [4] destptr mem)
+ // result: (MOVWstorezero destptr mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpPPC64MOVWstorezero)
+ v.AddArg2(destptr, mem)
+ return true
+ }
+ // match: (Zero [5] destptr mem)
+ // result: (MOVBstorezero [4] destptr (MOVWstorezero destptr mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 5 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpPPC64MOVBstorezero)
+ v.AuxInt = int32ToAuxInt(4)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVWstorezero, types.TypeMem)
+ v0.AddArg2(destptr, mem)
+ v.AddArg2(destptr, v0)
+ return true
+ }
+ // match: (Zero [6] destptr mem)
+ // result: (MOVHstorezero [4] destptr (MOVWstorezero destptr mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 6 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpPPC64MOVHstorezero)
+ v.AuxInt = int32ToAuxInt(4)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVWstorezero, types.TypeMem)
+ v0.AddArg2(destptr, mem)
+ v.AddArg2(destptr, v0)
+ return true
+ }
+ // match: (Zero [7] destptr mem)
+ // result: (MOVBstorezero [6] destptr (MOVHstorezero [4] destptr (MOVWstorezero destptr mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 7 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpPPC64MOVBstorezero)
+ v.AuxInt = int32ToAuxInt(6)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVHstorezero, types.TypeMem)
+ v0.AuxInt = int32ToAuxInt(4)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVWstorezero, types.TypeMem)
+ v1.AddArg2(destptr, mem)
+ v0.AddArg2(destptr, v1)
+ v.AddArg2(destptr, v0)
+ return true
+ }
+ // match: (Zero [8] {t} destptr mem)
+ // result: (MOVDstorezero destptr mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpPPC64MOVDstorezero)
+ v.AddArg2(destptr, mem)
+ return true
+ }
+ // match: (Zero [12] {t} destptr mem)
+ // result: (MOVWstorezero [8] destptr (MOVDstorezero [0] destptr mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 12 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpPPC64MOVWstorezero)
+ v.AuxInt = int32ToAuxInt(8)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem)
+ v0.AuxInt = int32ToAuxInt(0)
+ v0.AddArg2(destptr, mem)
+ v.AddArg2(destptr, v0)
+ return true
+ }
+ // match: (Zero [16] {t} destptr mem)
+ // result: (MOVDstorezero [8] destptr (MOVDstorezero [0] destptr mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 16 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpPPC64MOVDstorezero)
+ v.AuxInt = int32ToAuxInt(8)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem)
+ v0.AuxInt = int32ToAuxInt(0)
+ v0.AddArg2(destptr, mem)
+ v.AddArg2(destptr, v0)
+ return true
+ }
+ // match: (Zero [24] {t} destptr mem)
+ // result: (MOVDstorezero [16] destptr (MOVDstorezero [8] destptr (MOVDstorezero [0] destptr mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 24 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpPPC64MOVDstorezero)
+ v.AuxInt = int32ToAuxInt(16)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem)
+ v0.AuxInt = int32ToAuxInt(8)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(0)
+ v1.AddArg2(destptr, mem)
+ v0.AddArg2(destptr, v1)
+ v.AddArg2(destptr, v0)
+ return true
+ }
+ // match: (Zero [32] {t} destptr mem)
+ // result: (MOVDstorezero [24] destptr (MOVDstorezero [16] destptr (MOVDstorezero [8] destptr (MOVDstorezero [0] destptr mem))))
+ for {
+ if auxIntToInt64(v.AuxInt) != 32 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpPPC64MOVDstorezero)
+ v.AuxInt = int32ToAuxInt(24)
+ v0 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem)
+ v0.AuxInt = int32ToAuxInt(16)
+ v1 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(8)
+ v2 := b.NewValue0(v.Pos, OpPPC64MOVDstorezero, types.TypeMem)
+ v2.AuxInt = int32ToAuxInt(0)
+ v2.AddArg2(destptr, mem)
+ v1.AddArg2(destptr, v2)
+ v0.AddArg2(destptr, v1)
+ v.AddArg2(destptr, v0)
+ return true
+ }
+ // match: (Zero [s] ptr mem)
+ // cond: buildcfg.GOPPC64 <= 8 && s < 64
+ // result: (LoweredZeroShort [s] ptr mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ ptr := v_0
+ mem := v_1
+ if !(buildcfg.GOPPC64 <= 8 && s < 64) {
+ break
+ }
+ v.reset(OpPPC64LoweredZeroShort)
+ v.AuxInt = int64ToAuxInt(s)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Zero [s] ptr mem)
+ // cond: buildcfg.GOPPC64 <= 8
+ // result: (LoweredZero [s] ptr mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ ptr := v_0
+ mem := v_1
+ if !(buildcfg.GOPPC64 <= 8) {
+ break
+ }
+ v.reset(OpPPC64LoweredZero)
+ v.AuxInt = int64ToAuxInt(s)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Zero [s] ptr mem)
+ // cond: s < 128 && buildcfg.GOPPC64 >= 9
+ // result: (LoweredQuadZeroShort [s] ptr mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ ptr := v_0
+ mem := v_1
+ if !(s < 128 && buildcfg.GOPPC64 >= 9) {
+ break
+ }
+ v.reset(OpPPC64LoweredQuadZeroShort)
+ v.AuxInt = int64ToAuxInt(s)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Zero [s] ptr mem)
+ // cond: buildcfg.GOPPC64 >= 9
+ // result: (LoweredQuadZero [s] ptr mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ ptr := v_0
+ mem := v_1
+ if !(buildcfg.GOPPC64 >= 9) {
+ break
+ }
+ v.reset(OpPPC64LoweredQuadZero)
+ v.AuxInt = int64ToAuxInt(s)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteBlockPPC64(b *Block) bool {
+ typ := &b.Func.Config.Types
+ switch b.Kind {
+ case BlockPPC64EQ:
+ // match: (EQ (CMPconst [0] (ANDconst [c] x)) yes no)
+ // result: (EQ (ANDCCconst [c] x) yes no)
+ for b.Controls[0].Op == OpPPC64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64ANDconst {
+ break
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ x := v_0_0.Args[0]
+ v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(x)
+ b.resetWithControl(BlockPPC64EQ, v0)
+ return true
+ }
+ // match: (EQ (CMPWconst [0] (ANDconst [c] x)) yes no)
+ // result: (EQ (ANDCCconst [c] x) yes no)
+ for b.Controls[0].Op == OpPPC64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64ANDconst {
+ break
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ x := v_0_0.Args[0]
+ v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(x)
+ b.resetWithControl(BlockPPC64EQ, v0)
+ return true
+ }
+ // match: (EQ (FlagEQ) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpPPC64FlagEQ {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (EQ (FlagLT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpPPC64FlagLT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (EQ (FlagGT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpPPC64FlagGT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (EQ (InvertFlags cmp) yes no)
+ // result: (EQ cmp yes no)
+ for b.Controls[0].Op == OpPPC64InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockPPC64EQ, cmp)
+ return true
+ }
+ // match: (EQ (CMPconst [0] (ANDconst [c] x)) yes no)
+ // result: (EQ (ANDCCconst [c] x) yes no)
+ for b.Controls[0].Op == OpPPC64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64ANDconst {
+ break
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ x := v_0_0.Args[0]
+ v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(x)
+ b.resetWithControl(BlockPPC64EQ, v0)
+ return true
+ }
+ // match: (EQ (CMPWconst [0] (ANDconst [c] x)) yes no)
+ // result: (EQ (ANDCCconst [c] x) yes no)
+ for b.Controls[0].Op == OpPPC64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64ANDconst {
+ break
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ x := v_0_0.Args[0]
+ v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(x)
+ b.resetWithControl(BlockPPC64EQ, v0)
+ return true
+ }
+ // match: (EQ (CMPconst [0] z:(AND x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (EQ (ANDCC x y) yes no)
+ for b.Controls[0].Op == OpPPC64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpPPC64AND {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCC, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockPPC64EQ, v0)
+ return true
+ }
+ break
+ }
+ // match: (EQ (CMPconst [0] z:(OR x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (EQ (ORCC x y) yes no)
+ for b.Controls[0].Op == OpPPC64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpPPC64OR {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpPPC64ORCC, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockPPC64EQ, v0)
+ return true
+ }
+ break
+ }
+ // match: (EQ (CMPconst [0] z:(XOR x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (EQ (XORCC x y) yes no)
+ for b.Controls[0].Op == OpPPC64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpPPC64XOR {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpPPC64XORCC, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockPPC64EQ, v0)
+ return true
+ }
+ break
+ }
+ case BlockPPC64GE:
+ // match: (GE (FlagEQ) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpPPC64FlagEQ {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (GE (FlagLT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpPPC64FlagLT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (GE (FlagGT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpPPC64FlagGT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (GE (InvertFlags cmp) yes no)
+ // result: (LE cmp yes no)
+ for b.Controls[0].Op == OpPPC64InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockPPC64LE, cmp)
+ return true
+ }
+ // match: (GE (CMPconst [0] (ANDconst [c] x)) yes no)
+ // result: (GE (ANDCCconst [c] x) yes no)
+ for b.Controls[0].Op == OpPPC64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64ANDconst {
+ break
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ x := v_0_0.Args[0]
+ v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(x)
+ b.resetWithControl(BlockPPC64GE, v0)
+ return true
+ }
+ // match: (GE (CMPWconst [0] (ANDconst [c] x)) yes no)
+ // result: (GE (ANDCCconst [c] x) yes no)
+ for b.Controls[0].Op == OpPPC64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64ANDconst {
+ break
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ x := v_0_0.Args[0]
+ v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(x)
+ b.resetWithControl(BlockPPC64GE, v0)
+ return true
+ }
+ // match: (GE (CMPconst [0] z:(AND x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (GE (ANDCC x y) yes no)
+ for b.Controls[0].Op == OpPPC64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpPPC64AND {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCC, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockPPC64GE, v0)
+ return true
+ }
+ break
+ }
+ // match: (GE (CMPconst [0] z:(OR x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (GE (ORCC x y) yes no)
+ for b.Controls[0].Op == OpPPC64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpPPC64OR {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpPPC64ORCC, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockPPC64GE, v0)
+ return true
+ }
+ break
+ }
+ // match: (GE (CMPconst [0] z:(XOR x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (GE (XORCC x y) yes no)
+ for b.Controls[0].Op == OpPPC64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpPPC64XOR {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpPPC64XORCC, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockPPC64GE, v0)
+ return true
+ }
+ break
+ }
+ case BlockPPC64GT:
+ // match: (GT (FlagEQ) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpPPC64FlagEQ {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (GT (FlagLT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpPPC64FlagLT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (GT (FlagGT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpPPC64FlagGT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (GT (InvertFlags cmp) yes no)
+ // result: (LT cmp yes no)
+ for b.Controls[0].Op == OpPPC64InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockPPC64LT, cmp)
+ return true
+ }
+ // match: (GT (CMPconst [0] (ANDconst [c] x)) yes no)
+ // result: (GT (ANDCCconst [c] x) yes no)
+ for b.Controls[0].Op == OpPPC64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64ANDconst {
+ break
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ x := v_0_0.Args[0]
+ v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(x)
+ b.resetWithControl(BlockPPC64GT, v0)
+ return true
+ }
+ // match: (GT (CMPWconst [0] (ANDconst [c] x)) yes no)
+ // result: (GT (ANDCCconst [c] x) yes no)
+ for b.Controls[0].Op == OpPPC64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64ANDconst {
+ break
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ x := v_0_0.Args[0]
+ v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(x)
+ b.resetWithControl(BlockPPC64GT, v0)
+ return true
+ }
+ // match: (GT (CMPconst [0] z:(AND x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (GT (ANDCC x y) yes no)
+ for b.Controls[0].Op == OpPPC64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpPPC64AND {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCC, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockPPC64GT, v0)
+ return true
+ }
+ break
+ }
+ // match: (GT (CMPconst [0] z:(OR x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (GT (ORCC x y) yes no)
+ for b.Controls[0].Op == OpPPC64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpPPC64OR {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpPPC64ORCC, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockPPC64GT, v0)
+ return true
+ }
+ break
+ }
+ // match: (GT (CMPconst [0] z:(XOR x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (GT (XORCC x y) yes no)
+ for b.Controls[0].Op == OpPPC64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpPPC64XOR {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpPPC64XORCC, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockPPC64GT, v0)
+ return true
+ }
+ break
+ }
+ case BlockIf:
+ // match: (If (Equal cc) yes no)
+ // result: (EQ cc yes no)
+ for b.Controls[0].Op == OpPPC64Equal {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockPPC64EQ, cc)
+ return true
+ }
+ // match: (If (NotEqual cc) yes no)
+ // result: (NE cc yes no)
+ for b.Controls[0].Op == OpPPC64NotEqual {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockPPC64NE, cc)
+ return true
+ }
+ // match: (If (LessThan cc) yes no)
+ // result: (LT cc yes no)
+ for b.Controls[0].Op == OpPPC64LessThan {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockPPC64LT, cc)
+ return true
+ }
+ // match: (If (LessEqual cc) yes no)
+ // result: (LE cc yes no)
+ for b.Controls[0].Op == OpPPC64LessEqual {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockPPC64LE, cc)
+ return true
+ }
+ // match: (If (GreaterThan cc) yes no)
+ // result: (GT cc yes no)
+ for b.Controls[0].Op == OpPPC64GreaterThan {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockPPC64GT, cc)
+ return true
+ }
+ // match: (If (GreaterEqual cc) yes no)
+ // result: (GE cc yes no)
+ for b.Controls[0].Op == OpPPC64GreaterEqual {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockPPC64GE, cc)
+ return true
+ }
+ // match: (If (FLessThan cc) yes no)
+ // result: (FLT cc yes no)
+ for b.Controls[0].Op == OpPPC64FLessThan {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockPPC64FLT, cc)
+ return true
+ }
+ // match: (If (FLessEqual cc) yes no)
+ // result: (FLE cc yes no)
+ for b.Controls[0].Op == OpPPC64FLessEqual {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockPPC64FLE, cc)
+ return true
+ }
+ // match: (If (FGreaterThan cc) yes no)
+ // result: (FGT cc yes no)
+ for b.Controls[0].Op == OpPPC64FGreaterThan {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockPPC64FGT, cc)
+ return true
+ }
+ // match: (If (FGreaterEqual cc) yes no)
+ // result: (FGE cc yes no)
+ for b.Controls[0].Op == OpPPC64FGreaterEqual {
+ v_0 := b.Controls[0]
+ cc := v_0.Args[0]
+ b.resetWithControl(BlockPPC64FGE, cc)
+ return true
+ }
+ // match: (If cond yes no)
+ // result: (NE (CMPWconst [0] (ANDconst <typ.UInt32> [1] cond)) yes no)
+ for {
+ cond := b.Controls[0]
+ v0 := b.NewValue0(cond.Pos, OpPPC64CMPWconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(0)
+ v1 := b.NewValue0(cond.Pos, OpPPC64ANDconst, typ.UInt32)
+ v1.AuxInt = int64ToAuxInt(1)
+ v1.AddArg(cond)
+ v0.AddArg(v1)
+ b.resetWithControl(BlockPPC64NE, v0)
+ return true
+ }
+ case BlockPPC64LE:
+ // match: (LE (FlagEQ) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpPPC64FlagEQ {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (LE (FlagLT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpPPC64FlagLT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (LE (FlagGT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpPPC64FlagGT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (LE (InvertFlags cmp) yes no)
+ // result: (GE cmp yes no)
+ for b.Controls[0].Op == OpPPC64InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockPPC64GE, cmp)
+ return true
+ }
+ // match: (LE (CMPconst [0] (ANDconst [c] x)) yes no)
+ // result: (LE (ANDCCconst [c] x) yes no)
+ for b.Controls[0].Op == OpPPC64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64ANDconst {
+ break
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ x := v_0_0.Args[0]
+ v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(x)
+ b.resetWithControl(BlockPPC64LE, v0)
+ return true
+ }
+ // match: (LE (CMPWconst [0] (ANDconst [c] x)) yes no)
+ // result: (LE (ANDCCconst [c] x) yes no)
+ for b.Controls[0].Op == OpPPC64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64ANDconst {
+ break
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ x := v_0_0.Args[0]
+ v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(x)
+ b.resetWithControl(BlockPPC64LE, v0)
+ return true
+ }
+ // match: (LE (CMPconst [0] z:(AND x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (LE (ANDCC x y) yes no)
+ for b.Controls[0].Op == OpPPC64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpPPC64AND {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCC, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockPPC64LE, v0)
+ return true
+ }
+ break
+ }
+ // match: (LE (CMPconst [0] z:(OR x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (LE (ORCC x y) yes no)
+ for b.Controls[0].Op == OpPPC64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpPPC64OR {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpPPC64ORCC, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockPPC64LE, v0)
+ return true
+ }
+ break
+ }
+ // match: (LE (CMPconst [0] z:(XOR x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (LE (XORCC x y) yes no)
+ for b.Controls[0].Op == OpPPC64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpPPC64XOR {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpPPC64XORCC, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockPPC64LE, v0)
+ return true
+ }
+ break
+ }
+ case BlockPPC64LT:
+ // match: (LT (FlagEQ) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpPPC64FlagEQ {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (LT (FlagLT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpPPC64FlagLT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (LT (FlagGT) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpPPC64FlagGT {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (LT (InvertFlags cmp) yes no)
+ // result: (GT cmp yes no)
+ for b.Controls[0].Op == OpPPC64InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockPPC64GT, cmp)
+ return true
+ }
+ // match: (LT (CMPconst [0] (ANDconst [c] x)) yes no)
+ // result: (LT (ANDCCconst [c] x) yes no)
+ for b.Controls[0].Op == OpPPC64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64ANDconst {
+ break
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ x := v_0_0.Args[0]
+ v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(x)
+ b.resetWithControl(BlockPPC64LT, v0)
+ return true
+ }
+ // match: (LT (CMPWconst [0] (ANDconst [c] x)) yes no)
+ // result: (LT (ANDCCconst [c] x) yes no)
+ for b.Controls[0].Op == OpPPC64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64ANDconst {
+ break
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ x := v_0_0.Args[0]
+ v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(x)
+ b.resetWithControl(BlockPPC64LT, v0)
+ return true
+ }
+ // match: (LT (CMPconst [0] z:(AND x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (LT (ANDCC x y) yes no)
+ for b.Controls[0].Op == OpPPC64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpPPC64AND {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCC, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockPPC64LT, v0)
+ return true
+ }
+ break
+ }
+ // match: (LT (CMPconst [0] z:(OR x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (LT (ORCC x y) yes no)
+ for b.Controls[0].Op == OpPPC64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpPPC64OR {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpPPC64ORCC, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockPPC64LT, v0)
+ return true
+ }
+ break
+ }
+ // match: (LT (CMPconst [0] z:(XOR x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (LT (XORCC x y) yes no)
+ for b.Controls[0].Op == OpPPC64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpPPC64XOR {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpPPC64XORCC, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockPPC64LT, v0)
+ return true
+ }
+ break
+ }
+ case BlockPPC64NE:
+ // match: (NE (CMPWconst [0] (ANDconst [1] (Equal cc))) yes no)
+ // result: (EQ cc yes no)
+ for b.Controls[0].Op == OpPPC64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64ANDconst || auxIntToInt64(v_0_0.AuxInt) != 1 {
+ break
+ }
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpPPC64Equal {
+ break
+ }
+ cc := v_0_0_0.Args[0]
+ b.resetWithControl(BlockPPC64EQ, cc)
+ return true
+ }
+ // match: (NE (CMPWconst [0] (ANDconst [1] (NotEqual cc))) yes no)
+ // result: (NE cc yes no)
+ for b.Controls[0].Op == OpPPC64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64ANDconst || auxIntToInt64(v_0_0.AuxInt) != 1 {
+ break
+ }
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpPPC64NotEqual {
+ break
+ }
+ cc := v_0_0_0.Args[0]
+ b.resetWithControl(BlockPPC64NE, cc)
+ return true
+ }
+ // match: (NE (CMPWconst [0] (ANDconst [1] (LessThan cc))) yes no)
+ // result: (LT cc yes no)
+ for b.Controls[0].Op == OpPPC64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64ANDconst || auxIntToInt64(v_0_0.AuxInt) != 1 {
+ break
+ }
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpPPC64LessThan {
+ break
+ }
+ cc := v_0_0_0.Args[0]
+ b.resetWithControl(BlockPPC64LT, cc)
+ return true
+ }
+ // match: (NE (CMPWconst [0] (ANDconst [1] (LessEqual cc))) yes no)
+ // result: (LE cc yes no)
+ for b.Controls[0].Op == OpPPC64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64ANDconst || auxIntToInt64(v_0_0.AuxInt) != 1 {
+ break
+ }
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpPPC64LessEqual {
+ break
+ }
+ cc := v_0_0_0.Args[0]
+ b.resetWithControl(BlockPPC64LE, cc)
+ return true
+ }
+ // match: (NE (CMPWconst [0] (ANDconst [1] (GreaterThan cc))) yes no)
+ // result: (GT cc yes no)
+ for b.Controls[0].Op == OpPPC64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64ANDconst || auxIntToInt64(v_0_0.AuxInt) != 1 {
+ break
+ }
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpPPC64GreaterThan {
+ break
+ }
+ cc := v_0_0_0.Args[0]
+ b.resetWithControl(BlockPPC64GT, cc)
+ return true
+ }
+ // match: (NE (CMPWconst [0] (ANDconst [1] (GreaterEqual cc))) yes no)
+ // result: (GE cc yes no)
+ for b.Controls[0].Op == OpPPC64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64ANDconst || auxIntToInt64(v_0_0.AuxInt) != 1 {
+ break
+ }
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpPPC64GreaterEqual {
+ break
+ }
+ cc := v_0_0_0.Args[0]
+ b.resetWithControl(BlockPPC64GE, cc)
+ return true
+ }
+ // match: (NE (CMPWconst [0] (ANDconst [1] (FLessThan cc))) yes no)
+ // result: (FLT cc yes no)
+ for b.Controls[0].Op == OpPPC64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64ANDconst || auxIntToInt64(v_0_0.AuxInt) != 1 {
+ break
+ }
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpPPC64FLessThan {
+ break
+ }
+ cc := v_0_0_0.Args[0]
+ b.resetWithControl(BlockPPC64FLT, cc)
+ return true
+ }
+ // match: (NE (CMPWconst [0] (ANDconst [1] (FLessEqual cc))) yes no)
+ // result: (FLE cc yes no)
+ for b.Controls[0].Op == OpPPC64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64ANDconst || auxIntToInt64(v_0_0.AuxInt) != 1 {
+ break
+ }
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpPPC64FLessEqual {
+ break
+ }
+ cc := v_0_0_0.Args[0]
+ b.resetWithControl(BlockPPC64FLE, cc)
+ return true
+ }
+ // match: (NE (CMPWconst [0] (ANDconst [1] (FGreaterThan cc))) yes no)
+ // result: (FGT cc yes no)
+ for b.Controls[0].Op == OpPPC64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64ANDconst || auxIntToInt64(v_0_0.AuxInt) != 1 {
+ break
+ }
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpPPC64FGreaterThan {
+ break
+ }
+ cc := v_0_0_0.Args[0]
+ b.resetWithControl(BlockPPC64FGT, cc)
+ return true
+ }
+ // match: (NE (CMPWconst [0] (ANDconst [1] (FGreaterEqual cc))) yes no)
+ // result: (FGE cc yes no)
+ for b.Controls[0].Op == OpPPC64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64ANDconst || auxIntToInt64(v_0_0.AuxInt) != 1 {
+ break
+ }
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpPPC64FGreaterEqual {
+ break
+ }
+ cc := v_0_0_0.Args[0]
+ b.resetWithControl(BlockPPC64FGE, cc)
+ return true
+ }
+ // match: (NE (CMPconst [0] (ANDconst [c] x)) yes no)
+ // result: (NE (ANDCCconst [c] x) yes no)
+ for b.Controls[0].Op == OpPPC64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64ANDconst {
+ break
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ x := v_0_0.Args[0]
+ v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(x)
+ b.resetWithControl(BlockPPC64NE, v0)
+ return true
+ }
+ // match: (NE (CMPWconst [0] (ANDconst [c] x)) yes no)
+ // result: (NE (ANDCCconst [c] x) yes no)
+ for b.Controls[0].Op == OpPPC64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64ANDconst {
+ break
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ x := v_0_0.Args[0]
+ v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(x)
+ b.resetWithControl(BlockPPC64NE, v0)
+ return true
+ }
+ // match: (NE (FlagEQ) yes no)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpPPC64FlagEQ {
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (NE (FlagLT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpPPC64FlagLT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (NE (FlagGT) yes no)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpPPC64FlagGT {
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (NE (InvertFlags cmp) yes no)
+ // result: (NE cmp yes no)
+ for b.Controls[0].Op == OpPPC64InvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ b.resetWithControl(BlockPPC64NE, cmp)
+ return true
+ }
+ // match: (NE (CMPconst [0] (ANDconst [c] x)) yes no)
+ // result: (NE (ANDCCconst [c] x) yes no)
+ for b.Controls[0].Op == OpPPC64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64ANDconst {
+ break
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ x := v_0_0.Args[0]
+ v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(x)
+ b.resetWithControl(BlockPPC64NE, v0)
+ return true
+ }
+ // match: (NE (CMPWconst [0] (ANDconst [c] x)) yes no)
+ // result: (NE (ANDCCconst [c] x) yes no)
+ for b.Controls[0].Op == OpPPC64CMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpPPC64ANDconst {
+ break
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ x := v_0_0.Args[0]
+ v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCCconst, types.TypeFlags)
+ v0.AuxInt = int64ToAuxInt(c)
+ v0.AddArg(x)
+ b.resetWithControl(BlockPPC64NE, v0)
+ return true
+ }
+ // match: (NE (CMPconst [0] z:(AND x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (NE (ANDCC x y) yes no)
+ for b.Controls[0].Op == OpPPC64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpPPC64AND {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpPPC64ANDCC, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockPPC64NE, v0)
+ return true
+ }
+ break
+ }
+ // match: (NE (CMPconst [0] z:(OR x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (NE (ORCC x y) yes no)
+ for b.Controls[0].Op == OpPPC64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpPPC64OR {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpPPC64ORCC, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockPPC64NE, v0)
+ return true
+ }
+ break
+ }
+ // match: (NE (CMPconst [0] z:(XOR x y)) yes no)
+ // cond: z.Uses == 1
+ // result: (NE (XORCC x y) yes no)
+ for b.Controls[0].Op == OpPPC64CMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ z := v_0.Args[0]
+ if z.Op != OpPPC64XOR {
+ break
+ }
+ _ = z.Args[1]
+ z_0 := z.Args[0]
+ z_1 := z.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, z_0, z_1 = _i0+1, z_1, z_0 {
+ x := z_0
+ y := z_1
+ if !(z.Uses == 1) {
+ continue
+ }
+ v0 := b.NewValue0(v_0.Pos, OpPPC64XORCC, types.TypeFlags)
+ v0.AddArg2(x, y)
+ b.resetWithControl(BlockPPC64NE, v0)
+ return true
+ }
+ break
+ }
+ }
+ return false
+}
diff --git a/src/cmd/compile/internal/ssa/rewriteRISCV64.go b/src/cmd/compile/internal/ssa/rewriteRISCV64.go
new file mode 100644
index 0000000..b277979
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/rewriteRISCV64.go
@@ -0,0 +1,6764 @@
+// Code generated from gen/RISCV64.rules; DO NOT EDIT.
+// generated with: cd gen; go run *.go
+
+package ssa
+
+import "math"
+import "cmd/compile/internal/types"
+
+func rewriteValueRISCV64(v *Value) bool {
+ switch v.Op {
+ case OpAbs:
+ v.Op = OpRISCV64FABSD
+ return true
+ case OpAdd16:
+ v.Op = OpRISCV64ADD
+ return true
+ case OpAdd32:
+ v.Op = OpRISCV64ADD
+ return true
+ case OpAdd32F:
+ v.Op = OpRISCV64FADDS
+ return true
+ case OpAdd64:
+ v.Op = OpRISCV64ADD
+ return true
+ case OpAdd64F:
+ v.Op = OpRISCV64FADDD
+ return true
+ case OpAdd8:
+ v.Op = OpRISCV64ADD
+ return true
+ case OpAddPtr:
+ v.Op = OpRISCV64ADD
+ return true
+ case OpAddr:
+ return rewriteValueRISCV64_OpAddr(v)
+ case OpAnd16:
+ v.Op = OpRISCV64AND
+ return true
+ case OpAnd32:
+ v.Op = OpRISCV64AND
+ return true
+ case OpAnd64:
+ v.Op = OpRISCV64AND
+ return true
+ case OpAnd8:
+ v.Op = OpRISCV64AND
+ return true
+ case OpAndB:
+ v.Op = OpRISCV64AND
+ return true
+ case OpAtomicAdd32:
+ v.Op = OpRISCV64LoweredAtomicAdd32
+ return true
+ case OpAtomicAdd64:
+ v.Op = OpRISCV64LoweredAtomicAdd64
+ return true
+ case OpAtomicAnd32:
+ v.Op = OpRISCV64LoweredAtomicAnd32
+ return true
+ case OpAtomicAnd8:
+ return rewriteValueRISCV64_OpAtomicAnd8(v)
+ case OpAtomicCompareAndSwap32:
+ return rewriteValueRISCV64_OpAtomicCompareAndSwap32(v)
+ case OpAtomicCompareAndSwap64:
+ v.Op = OpRISCV64LoweredAtomicCas64
+ return true
+ case OpAtomicExchange32:
+ v.Op = OpRISCV64LoweredAtomicExchange32
+ return true
+ case OpAtomicExchange64:
+ v.Op = OpRISCV64LoweredAtomicExchange64
+ return true
+ case OpAtomicLoad32:
+ v.Op = OpRISCV64LoweredAtomicLoad32
+ return true
+ case OpAtomicLoad64:
+ v.Op = OpRISCV64LoweredAtomicLoad64
+ return true
+ case OpAtomicLoad8:
+ v.Op = OpRISCV64LoweredAtomicLoad8
+ return true
+ case OpAtomicLoadPtr:
+ v.Op = OpRISCV64LoweredAtomicLoad64
+ return true
+ case OpAtomicOr32:
+ v.Op = OpRISCV64LoweredAtomicOr32
+ return true
+ case OpAtomicOr8:
+ return rewriteValueRISCV64_OpAtomicOr8(v)
+ case OpAtomicStore32:
+ v.Op = OpRISCV64LoweredAtomicStore32
+ return true
+ case OpAtomicStore64:
+ v.Op = OpRISCV64LoweredAtomicStore64
+ return true
+ case OpAtomicStore8:
+ v.Op = OpRISCV64LoweredAtomicStore8
+ return true
+ case OpAtomicStorePtrNoWB:
+ v.Op = OpRISCV64LoweredAtomicStore64
+ return true
+ case OpAvg64u:
+ return rewriteValueRISCV64_OpAvg64u(v)
+ case OpClosureCall:
+ v.Op = OpRISCV64CALLclosure
+ return true
+ case OpCom16:
+ v.Op = OpRISCV64NOT
+ return true
+ case OpCom32:
+ v.Op = OpRISCV64NOT
+ return true
+ case OpCom64:
+ v.Op = OpRISCV64NOT
+ return true
+ case OpCom8:
+ v.Op = OpRISCV64NOT
+ return true
+ case OpConst16:
+ return rewriteValueRISCV64_OpConst16(v)
+ case OpConst32:
+ return rewriteValueRISCV64_OpConst32(v)
+ case OpConst32F:
+ return rewriteValueRISCV64_OpConst32F(v)
+ case OpConst64:
+ return rewriteValueRISCV64_OpConst64(v)
+ case OpConst64F:
+ return rewriteValueRISCV64_OpConst64F(v)
+ case OpConst8:
+ return rewriteValueRISCV64_OpConst8(v)
+ case OpConstBool:
+ return rewriteValueRISCV64_OpConstBool(v)
+ case OpConstNil:
+ return rewriteValueRISCV64_OpConstNil(v)
+ case OpConvert:
+ v.Op = OpRISCV64MOVconvert
+ return true
+ case OpCopysign:
+ v.Op = OpRISCV64FSGNJD
+ return true
+ case OpCvt32Fto32:
+ v.Op = OpRISCV64FCVTWS
+ return true
+ case OpCvt32Fto64:
+ v.Op = OpRISCV64FCVTLS
+ return true
+ case OpCvt32Fto64F:
+ v.Op = OpRISCV64FCVTDS
+ return true
+ case OpCvt32to32F:
+ v.Op = OpRISCV64FCVTSW
+ return true
+ case OpCvt32to64F:
+ v.Op = OpRISCV64FCVTDW
+ return true
+ case OpCvt64Fto32:
+ v.Op = OpRISCV64FCVTWD
+ return true
+ case OpCvt64Fto32F:
+ v.Op = OpRISCV64FCVTSD
+ return true
+ case OpCvt64Fto64:
+ v.Op = OpRISCV64FCVTLD
+ return true
+ case OpCvt64to32F:
+ v.Op = OpRISCV64FCVTSL
+ return true
+ case OpCvt64to64F:
+ v.Op = OpRISCV64FCVTDL
+ return true
+ case OpCvtBoolToUint8:
+ v.Op = OpCopy
+ return true
+ case OpDiv16:
+ return rewriteValueRISCV64_OpDiv16(v)
+ case OpDiv16u:
+ return rewriteValueRISCV64_OpDiv16u(v)
+ case OpDiv32:
+ return rewriteValueRISCV64_OpDiv32(v)
+ case OpDiv32F:
+ v.Op = OpRISCV64FDIVS
+ return true
+ case OpDiv32u:
+ v.Op = OpRISCV64DIVUW
+ return true
+ case OpDiv64:
+ return rewriteValueRISCV64_OpDiv64(v)
+ case OpDiv64F:
+ v.Op = OpRISCV64FDIVD
+ return true
+ case OpDiv64u:
+ v.Op = OpRISCV64DIVU
+ return true
+ case OpDiv8:
+ return rewriteValueRISCV64_OpDiv8(v)
+ case OpDiv8u:
+ return rewriteValueRISCV64_OpDiv8u(v)
+ case OpEq16:
+ return rewriteValueRISCV64_OpEq16(v)
+ case OpEq32:
+ return rewriteValueRISCV64_OpEq32(v)
+ case OpEq32F:
+ v.Op = OpRISCV64FEQS
+ return true
+ case OpEq64:
+ return rewriteValueRISCV64_OpEq64(v)
+ case OpEq64F:
+ v.Op = OpRISCV64FEQD
+ return true
+ case OpEq8:
+ return rewriteValueRISCV64_OpEq8(v)
+ case OpEqB:
+ return rewriteValueRISCV64_OpEqB(v)
+ case OpEqPtr:
+ return rewriteValueRISCV64_OpEqPtr(v)
+ case OpFMA:
+ v.Op = OpRISCV64FMADDD
+ return true
+ case OpGetCallerPC:
+ v.Op = OpRISCV64LoweredGetCallerPC
+ return true
+ case OpGetCallerSP:
+ v.Op = OpRISCV64LoweredGetCallerSP
+ return true
+ case OpGetClosurePtr:
+ v.Op = OpRISCV64LoweredGetClosurePtr
+ return true
+ case OpHmul32:
+ return rewriteValueRISCV64_OpHmul32(v)
+ case OpHmul32u:
+ return rewriteValueRISCV64_OpHmul32u(v)
+ case OpHmul64:
+ v.Op = OpRISCV64MULH
+ return true
+ case OpHmul64u:
+ v.Op = OpRISCV64MULHU
+ return true
+ case OpInterCall:
+ v.Op = OpRISCV64CALLinter
+ return true
+ case OpIsInBounds:
+ v.Op = OpLess64U
+ return true
+ case OpIsNonNil:
+ v.Op = OpRISCV64SNEZ
+ return true
+ case OpIsSliceInBounds:
+ v.Op = OpLeq64U
+ return true
+ case OpLeq16:
+ return rewriteValueRISCV64_OpLeq16(v)
+ case OpLeq16U:
+ return rewriteValueRISCV64_OpLeq16U(v)
+ case OpLeq32:
+ return rewriteValueRISCV64_OpLeq32(v)
+ case OpLeq32F:
+ v.Op = OpRISCV64FLES
+ return true
+ case OpLeq32U:
+ return rewriteValueRISCV64_OpLeq32U(v)
+ case OpLeq64:
+ return rewriteValueRISCV64_OpLeq64(v)
+ case OpLeq64F:
+ v.Op = OpRISCV64FLED
+ return true
+ case OpLeq64U:
+ return rewriteValueRISCV64_OpLeq64U(v)
+ case OpLeq8:
+ return rewriteValueRISCV64_OpLeq8(v)
+ case OpLeq8U:
+ return rewriteValueRISCV64_OpLeq8U(v)
+ case OpLess16:
+ return rewriteValueRISCV64_OpLess16(v)
+ case OpLess16U:
+ return rewriteValueRISCV64_OpLess16U(v)
+ case OpLess32:
+ return rewriteValueRISCV64_OpLess32(v)
+ case OpLess32F:
+ v.Op = OpRISCV64FLTS
+ return true
+ case OpLess32U:
+ return rewriteValueRISCV64_OpLess32U(v)
+ case OpLess64:
+ v.Op = OpRISCV64SLT
+ return true
+ case OpLess64F:
+ v.Op = OpRISCV64FLTD
+ return true
+ case OpLess64U:
+ v.Op = OpRISCV64SLTU
+ return true
+ case OpLess8:
+ return rewriteValueRISCV64_OpLess8(v)
+ case OpLess8U:
+ return rewriteValueRISCV64_OpLess8U(v)
+ case OpLoad:
+ return rewriteValueRISCV64_OpLoad(v)
+ case OpLocalAddr:
+ return rewriteValueRISCV64_OpLocalAddr(v)
+ case OpLsh16x16:
+ return rewriteValueRISCV64_OpLsh16x16(v)
+ case OpLsh16x32:
+ return rewriteValueRISCV64_OpLsh16x32(v)
+ case OpLsh16x64:
+ return rewriteValueRISCV64_OpLsh16x64(v)
+ case OpLsh16x8:
+ return rewriteValueRISCV64_OpLsh16x8(v)
+ case OpLsh32x16:
+ return rewriteValueRISCV64_OpLsh32x16(v)
+ case OpLsh32x32:
+ return rewriteValueRISCV64_OpLsh32x32(v)
+ case OpLsh32x64:
+ return rewriteValueRISCV64_OpLsh32x64(v)
+ case OpLsh32x8:
+ return rewriteValueRISCV64_OpLsh32x8(v)
+ case OpLsh64x16:
+ return rewriteValueRISCV64_OpLsh64x16(v)
+ case OpLsh64x32:
+ return rewriteValueRISCV64_OpLsh64x32(v)
+ case OpLsh64x64:
+ return rewriteValueRISCV64_OpLsh64x64(v)
+ case OpLsh64x8:
+ return rewriteValueRISCV64_OpLsh64x8(v)
+ case OpLsh8x16:
+ return rewriteValueRISCV64_OpLsh8x16(v)
+ case OpLsh8x32:
+ return rewriteValueRISCV64_OpLsh8x32(v)
+ case OpLsh8x64:
+ return rewriteValueRISCV64_OpLsh8x64(v)
+ case OpLsh8x8:
+ return rewriteValueRISCV64_OpLsh8x8(v)
+ case OpMod16:
+ return rewriteValueRISCV64_OpMod16(v)
+ case OpMod16u:
+ return rewriteValueRISCV64_OpMod16u(v)
+ case OpMod32:
+ return rewriteValueRISCV64_OpMod32(v)
+ case OpMod32u:
+ v.Op = OpRISCV64REMUW
+ return true
+ case OpMod64:
+ return rewriteValueRISCV64_OpMod64(v)
+ case OpMod64u:
+ v.Op = OpRISCV64REMU
+ return true
+ case OpMod8:
+ return rewriteValueRISCV64_OpMod8(v)
+ case OpMod8u:
+ return rewriteValueRISCV64_OpMod8u(v)
+ case OpMove:
+ return rewriteValueRISCV64_OpMove(v)
+ case OpMul16:
+ return rewriteValueRISCV64_OpMul16(v)
+ case OpMul32:
+ v.Op = OpRISCV64MULW
+ return true
+ case OpMul32F:
+ v.Op = OpRISCV64FMULS
+ return true
+ case OpMul64:
+ v.Op = OpRISCV64MUL
+ return true
+ case OpMul64F:
+ v.Op = OpRISCV64FMULD
+ return true
+ case OpMul64uhilo:
+ v.Op = OpRISCV64LoweredMuluhilo
+ return true
+ case OpMul64uover:
+ v.Op = OpRISCV64LoweredMuluover
+ return true
+ case OpMul8:
+ return rewriteValueRISCV64_OpMul8(v)
+ case OpNeg16:
+ v.Op = OpRISCV64NEG
+ return true
+ case OpNeg32:
+ v.Op = OpRISCV64NEG
+ return true
+ case OpNeg32F:
+ v.Op = OpRISCV64FNEGS
+ return true
+ case OpNeg64:
+ v.Op = OpRISCV64NEG
+ return true
+ case OpNeg64F:
+ v.Op = OpRISCV64FNEGD
+ return true
+ case OpNeg8:
+ v.Op = OpRISCV64NEG
+ return true
+ case OpNeq16:
+ return rewriteValueRISCV64_OpNeq16(v)
+ case OpNeq32:
+ return rewriteValueRISCV64_OpNeq32(v)
+ case OpNeq32F:
+ v.Op = OpRISCV64FNES
+ return true
+ case OpNeq64:
+ return rewriteValueRISCV64_OpNeq64(v)
+ case OpNeq64F:
+ v.Op = OpRISCV64FNED
+ return true
+ case OpNeq8:
+ return rewriteValueRISCV64_OpNeq8(v)
+ case OpNeqB:
+ v.Op = OpRISCV64XOR
+ return true
+ case OpNeqPtr:
+ return rewriteValueRISCV64_OpNeqPtr(v)
+ case OpNilCheck:
+ v.Op = OpRISCV64LoweredNilCheck
+ return true
+ case OpNot:
+ v.Op = OpRISCV64SEQZ
+ return true
+ case OpOffPtr:
+ return rewriteValueRISCV64_OpOffPtr(v)
+ case OpOr16:
+ v.Op = OpRISCV64OR
+ return true
+ case OpOr32:
+ v.Op = OpRISCV64OR
+ return true
+ case OpOr64:
+ v.Op = OpRISCV64OR
+ return true
+ case OpOr8:
+ v.Op = OpRISCV64OR
+ return true
+ case OpOrB:
+ v.Op = OpRISCV64OR
+ return true
+ case OpPanicBounds:
+ return rewriteValueRISCV64_OpPanicBounds(v)
+ case OpRISCV64ADD:
+ return rewriteValueRISCV64_OpRISCV64ADD(v)
+ case OpRISCV64ADDI:
+ return rewriteValueRISCV64_OpRISCV64ADDI(v)
+ case OpRISCV64AND:
+ return rewriteValueRISCV64_OpRISCV64AND(v)
+ case OpRISCV64ANDI:
+ return rewriteValueRISCV64_OpRISCV64ANDI(v)
+ case OpRISCV64FMADDD:
+ return rewriteValueRISCV64_OpRISCV64FMADDD(v)
+ case OpRISCV64FMSUBD:
+ return rewriteValueRISCV64_OpRISCV64FMSUBD(v)
+ case OpRISCV64FNMADDD:
+ return rewriteValueRISCV64_OpRISCV64FNMADDD(v)
+ case OpRISCV64FNMSUBD:
+ return rewriteValueRISCV64_OpRISCV64FNMSUBD(v)
+ case OpRISCV64MOVBUload:
+ return rewriteValueRISCV64_OpRISCV64MOVBUload(v)
+ case OpRISCV64MOVBUreg:
+ return rewriteValueRISCV64_OpRISCV64MOVBUreg(v)
+ case OpRISCV64MOVBload:
+ return rewriteValueRISCV64_OpRISCV64MOVBload(v)
+ case OpRISCV64MOVBreg:
+ return rewriteValueRISCV64_OpRISCV64MOVBreg(v)
+ case OpRISCV64MOVBstore:
+ return rewriteValueRISCV64_OpRISCV64MOVBstore(v)
+ case OpRISCV64MOVBstorezero:
+ return rewriteValueRISCV64_OpRISCV64MOVBstorezero(v)
+ case OpRISCV64MOVDload:
+ return rewriteValueRISCV64_OpRISCV64MOVDload(v)
+ case OpRISCV64MOVDnop:
+ return rewriteValueRISCV64_OpRISCV64MOVDnop(v)
+ case OpRISCV64MOVDreg:
+ return rewriteValueRISCV64_OpRISCV64MOVDreg(v)
+ case OpRISCV64MOVDstore:
+ return rewriteValueRISCV64_OpRISCV64MOVDstore(v)
+ case OpRISCV64MOVDstorezero:
+ return rewriteValueRISCV64_OpRISCV64MOVDstorezero(v)
+ case OpRISCV64MOVHUload:
+ return rewriteValueRISCV64_OpRISCV64MOVHUload(v)
+ case OpRISCV64MOVHUreg:
+ return rewriteValueRISCV64_OpRISCV64MOVHUreg(v)
+ case OpRISCV64MOVHload:
+ return rewriteValueRISCV64_OpRISCV64MOVHload(v)
+ case OpRISCV64MOVHreg:
+ return rewriteValueRISCV64_OpRISCV64MOVHreg(v)
+ case OpRISCV64MOVHstore:
+ return rewriteValueRISCV64_OpRISCV64MOVHstore(v)
+ case OpRISCV64MOVHstorezero:
+ return rewriteValueRISCV64_OpRISCV64MOVHstorezero(v)
+ case OpRISCV64MOVWUload:
+ return rewriteValueRISCV64_OpRISCV64MOVWUload(v)
+ case OpRISCV64MOVWUreg:
+ return rewriteValueRISCV64_OpRISCV64MOVWUreg(v)
+ case OpRISCV64MOVWload:
+ return rewriteValueRISCV64_OpRISCV64MOVWload(v)
+ case OpRISCV64MOVWreg:
+ return rewriteValueRISCV64_OpRISCV64MOVWreg(v)
+ case OpRISCV64MOVWstore:
+ return rewriteValueRISCV64_OpRISCV64MOVWstore(v)
+ case OpRISCV64MOVWstorezero:
+ return rewriteValueRISCV64_OpRISCV64MOVWstorezero(v)
+ case OpRISCV64NEG:
+ return rewriteValueRISCV64_OpRISCV64NEG(v)
+ case OpRISCV64NEGW:
+ return rewriteValueRISCV64_OpRISCV64NEGW(v)
+ case OpRISCV64OR:
+ return rewriteValueRISCV64_OpRISCV64OR(v)
+ case OpRISCV64ORI:
+ return rewriteValueRISCV64_OpRISCV64ORI(v)
+ case OpRISCV64SLL:
+ return rewriteValueRISCV64_OpRISCV64SLL(v)
+ case OpRISCV64SLLI:
+ return rewriteValueRISCV64_OpRISCV64SLLI(v)
+ case OpRISCV64SLTI:
+ return rewriteValueRISCV64_OpRISCV64SLTI(v)
+ case OpRISCV64SLTIU:
+ return rewriteValueRISCV64_OpRISCV64SLTIU(v)
+ case OpRISCV64SRA:
+ return rewriteValueRISCV64_OpRISCV64SRA(v)
+ case OpRISCV64SRAI:
+ return rewriteValueRISCV64_OpRISCV64SRAI(v)
+ case OpRISCV64SRL:
+ return rewriteValueRISCV64_OpRISCV64SRL(v)
+ case OpRISCV64SRLI:
+ return rewriteValueRISCV64_OpRISCV64SRLI(v)
+ case OpRISCV64SUB:
+ return rewriteValueRISCV64_OpRISCV64SUB(v)
+ case OpRISCV64SUBW:
+ return rewriteValueRISCV64_OpRISCV64SUBW(v)
+ case OpRISCV64XOR:
+ return rewriteValueRISCV64_OpRISCV64XOR(v)
+ case OpRotateLeft16:
+ return rewriteValueRISCV64_OpRotateLeft16(v)
+ case OpRotateLeft32:
+ return rewriteValueRISCV64_OpRotateLeft32(v)
+ case OpRotateLeft64:
+ return rewriteValueRISCV64_OpRotateLeft64(v)
+ case OpRotateLeft8:
+ return rewriteValueRISCV64_OpRotateLeft8(v)
+ case OpRound32F:
+ v.Op = OpCopy
+ return true
+ case OpRound64F:
+ v.Op = OpCopy
+ return true
+ case OpRsh16Ux16:
+ return rewriteValueRISCV64_OpRsh16Ux16(v)
+ case OpRsh16Ux32:
+ return rewriteValueRISCV64_OpRsh16Ux32(v)
+ case OpRsh16Ux64:
+ return rewriteValueRISCV64_OpRsh16Ux64(v)
+ case OpRsh16Ux8:
+ return rewriteValueRISCV64_OpRsh16Ux8(v)
+ case OpRsh16x16:
+ return rewriteValueRISCV64_OpRsh16x16(v)
+ case OpRsh16x32:
+ return rewriteValueRISCV64_OpRsh16x32(v)
+ case OpRsh16x64:
+ return rewriteValueRISCV64_OpRsh16x64(v)
+ case OpRsh16x8:
+ return rewriteValueRISCV64_OpRsh16x8(v)
+ case OpRsh32Ux16:
+ return rewriteValueRISCV64_OpRsh32Ux16(v)
+ case OpRsh32Ux32:
+ return rewriteValueRISCV64_OpRsh32Ux32(v)
+ case OpRsh32Ux64:
+ return rewriteValueRISCV64_OpRsh32Ux64(v)
+ case OpRsh32Ux8:
+ return rewriteValueRISCV64_OpRsh32Ux8(v)
+ case OpRsh32x16:
+ return rewriteValueRISCV64_OpRsh32x16(v)
+ case OpRsh32x32:
+ return rewriteValueRISCV64_OpRsh32x32(v)
+ case OpRsh32x64:
+ return rewriteValueRISCV64_OpRsh32x64(v)
+ case OpRsh32x8:
+ return rewriteValueRISCV64_OpRsh32x8(v)
+ case OpRsh64Ux16:
+ return rewriteValueRISCV64_OpRsh64Ux16(v)
+ case OpRsh64Ux32:
+ return rewriteValueRISCV64_OpRsh64Ux32(v)
+ case OpRsh64Ux64:
+ return rewriteValueRISCV64_OpRsh64Ux64(v)
+ case OpRsh64Ux8:
+ return rewriteValueRISCV64_OpRsh64Ux8(v)
+ case OpRsh64x16:
+ return rewriteValueRISCV64_OpRsh64x16(v)
+ case OpRsh64x32:
+ return rewriteValueRISCV64_OpRsh64x32(v)
+ case OpRsh64x64:
+ return rewriteValueRISCV64_OpRsh64x64(v)
+ case OpRsh64x8:
+ return rewriteValueRISCV64_OpRsh64x8(v)
+ case OpRsh8Ux16:
+ return rewriteValueRISCV64_OpRsh8Ux16(v)
+ case OpRsh8Ux32:
+ return rewriteValueRISCV64_OpRsh8Ux32(v)
+ case OpRsh8Ux64:
+ return rewriteValueRISCV64_OpRsh8Ux64(v)
+ case OpRsh8Ux8:
+ return rewriteValueRISCV64_OpRsh8Ux8(v)
+ case OpRsh8x16:
+ return rewriteValueRISCV64_OpRsh8x16(v)
+ case OpRsh8x32:
+ return rewriteValueRISCV64_OpRsh8x32(v)
+ case OpRsh8x64:
+ return rewriteValueRISCV64_OpRsh8x64(v)
+ case OpRsh8x8:
+ return rewriteValueRISCV64_OpRsh8x8(v)
+ case OpSignExt16to32:
+ v.Op = OpRISCV64MOVHreg
+ return true
+ case OpSignExt16to64:
+ v.Op = OpRISCV64MOVHreg
+ return true
+ case OpSignExt32to64:
+ v.Op = OpRISCV64MOVWreg
+ return true
+ case OpSignExt8to16:
+ v.Op = OpRISCV64MOVBreg
+ return true
+ case OpSignExt8to32:
+ v.Op = OpRISCV64MOVBreg
+ return true
+ case OpSignExt8to64:
+ v.Op = OpRISCV64MOVBreg
+ return true
+ case OpSlicemask:
+ return rewriteValueRISCV64_OpSlicemask(v)
+ case OpSqrt:
+ v.Op = OpRISCV64FSQRTD
+ return true
+ case OpSqrt32:
+ v.Op = OpRISCV64FSQRTS
+ return true
+ case OpStaticCall:
+ v.Op = OpRISCV64CALLstatic
+ return true
+ case OpStore:
+ return rewriteValueRISCV64_OpStore(v)
+ case OpSub16:
+ v.Op = OpRISCV64SUB
+ return true
+ case OpSub32:
+ v.Op = OpRISCV64SUB
+ return true
+ case OpSub32F:
+ v.Op = OpRISCV64FSUBS
+ return true
+ case OpSub64:
+ v.Op = OpRISCV64SUB
+ return true
+ case OpSub64F:
+ v.Op = OpRISCV64FSUBD
+ return true
+ case OpSub8:
+ v.Op = OpRISCV64SUB
+ return true
+ case OpSubPtr:
+ v.Op = OpRISCV64SUB
+ return true
+ case OpTailCall:
+ v.Op = OpRISCV64CALLtail
+ return true
+ case OpTrunc16to8:
+ v.Op = OpCopy
+ return true
+ case OpTrunc32to16:
+ v.Op = OpCopy
+ return true
+ case OpTrunc32to8:
+ v.Op = OpCopy
+ return true
+ case OpTrunc64to16:
+ v.Op = OpCopy
+ return true
+ case OpTrunc64to32:
+ v.Op = OpCopy
+ return true
+ case OpTrunc64to8:
+ v.Op = OpCopy
+ return true
+ case OpWB:
+ v.Op = OpRISCV64LoweredWB
+ return true
+ case OpXor16:
+ v.Op = OpRISCV64XOR
+ return true
+ case OpXor32:
+ v.Op = OpRISCV64XOR
+ return true
+ case OpXor64:
+ v.Op = OpRISCV64XOR
+ return true
+ case OpXor8:
+ v.Op = OpRISCV64XOR
+ return true
+ case OpZero:
+ return rewriteValueRISCV64_OpZero(v)
+ case OpZeroExt16to32:
+ v.Op = OpRISCV64MOVHUreg
+ return true
+ case OpZeroExt16to64:
+ v.Op = OpRISCV64MOVHUreg
+ return true
+ case OpZeroExt32to64:
+ v.Op = OpRISCV64MOVWUreg
+ return true
+ case OpZeroExt8to16:
+ v.Op = OpRISCV64MOVBUreg
+ return true
+ case OpZeroExt8to32:
+ v.Op = OpRISCV64MOVBUreg
+ return true
+ case OpZeroExt8to64:
+ v.Op = OpRISCV64MOVBUreg
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpAddr(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Addr {sym} base)
+ // result: (MOVaddr {sym} [0] base)
+ for {
+ sym := auxToSym(v.Aux)
+ base := v_0
+ v.reset(OpRISCV64MOVaddr)
+ v.AuxInt = int32ToAuxInt(0)
+ v.Aux = symToAux(sym)
+ v.AddArg(base)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpAtomicAnd8(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (AtomicAnd8 ptr val mem)
+ // result: (LoweredAtomicAnd32 (ANDI <typ.Uintptr> [^3] ptr) (NOT <typ.UInt32> (SLL <typ.UInt32> (XORI <typ.UInt32> [0xff] (ZeroExt8to32 val)) (SLLI <typ.UInt64> [3] (ANDI <typ.UInt64> [3] ptr)))) mem)
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpRISCV64LoweredAtomicAnd32)
+ v0 := b.NewValue0(v.Pos, OpRISCV64ANDI, typ.Uintptr)
+ v0.AuxInt = int64ToAuxInt(^3)
+ v0.AddArg(ptr)
+ v1 := b.NewValue0(v.Pos, OpRISCV64NOT, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLL, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpRISCV64XORI, typ.UInt32)
+ v3.AuxInt = int64ToAuxInt(0xff)
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v4.AddArg(val)
+ v3.AddArg(v4)
+ v5 := b.NewValue0(v.Pos, OpRISCV64SLLI, typ.UInt64)
+ v5.AuxInt = int64ToAuxInt(3)
+ v6 := b.NewValue0(v.Pos, OpRISCV64ANDI, typ.UInt64)
+ v6.AuxInt = int64ToAuxInt(3)
+ v6.AddArg(ptr)
+ v5.AddArg(v6)
+ v2.AddArg2(v3, v5)
+ v1.AddArg(v2)
+ v.AddArg3(v0, v1, mem)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpAtomicCompareAndSwap32(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (AtomicCompareAndSwap32 ptr old new mem)
+ // result: (LoweredAtomicCas32 ptr (SignExt32to64 old) new mem)
+ for {
+ ptr := v_0
+ old := v_1
+ new := v_2
+ mem := v_3
+ v.reset(OpRISCV64LoweredAtomicCas32)
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v0.AddArg(old)
+ v.AddArg4(ptr, v0, new, mem)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpAtomicOr8(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (AtomicOr8 ptr val mem)
+ // result: (LoweredAtomicOr32 (ANDI <typ.Uintptr> [^3] ptr) (SLL <typ.UInt32> (ZeroExt8to32 val) (SLLI <typ.UInt64> [3] (ANDI <typ.UInt64> [3] ptr))) mem)
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpRISCV64LoweredAtomicOr32)
+ v0 := b.NewValue0(v.Pos, OpRISCV64ANDI, typ.Uintptr)
+ v0.AuxInt = int64ToAuxInt(^3)
+ v0.AddArg(ptr)
+ v1 := b.NewValue0(v.Pos, OpRISCV64SLL, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v2.AddArg(val)
+ v3 := b.NewValue0(v.Pos, OpRISCV64SLLI, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(3)
+ v4 := b.NewValue0(v.Pos, OpRISCV64ANDI, typ.UInt64)
+ v4.AuxInt = int64ToAuxInt(3)
+ v4.AddArg(ptr)
+ v3.AddArg(v4)
+ v1.AddArg2(v2, v3)
+ v.AddArg3(v0, v1, mem)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpAvg64u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Avg64u <t> x y)
+ // result: (ADD (ADD <t> (SRLI <t> [1] x) (SRLI <t> [1] y)) (ANDI <t> [1] (AND <t> x y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64ADD)
+ v0 := b.NewValue0(v.Pos, OpRISCV64ADD, t)
+ v1 := b.NewValue0(v.Pos, OpRISCV64SRLI, t)
+ v1.AuxInt = int64ToAuxInt(1)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SRLI, t)
+ v2.AuxInt = int64ToAuxInt(1)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpRISCV64ANDI, t)
+ v3.AuxInt = int64ToAuxInt(1)
+ v4 := b.NewValue0(v.Pos, OpRISCV64AND, t)
+ v4.AddArg2(x, y)
+ v3.AddArg(v4)
+ v.AddArg2(v0, v3)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpConst16(v *Value) bool {
+ // match: (Const16 [val])
+ // result: (MOVDconst [int64(val)])
+ for {
+ val := auxIntToInt16(v.AuxInt)
+ v.reset(OpRISCV64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(val))
+ return true
+ }
+}
+func rewriteValueRISCV64_OpConst32(v *Value) bool {
+ // match: (Const32 [val])
+ // result: (MOVDconst [int64(val)])
+ for {
+ val := auxIntToInt32(v.AuxInt)
+ v.reset(OpRISCV64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(val))
+ return true
+ }
+}
+func rewriteValueRISCV64_OpConst32F(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Const32F [val])
+ // result: (FMVSX (MOVDconst [int64(math.Float32bits(val))]))
+ for {
+ val := auxIntToFloat32(v.AuxInt)
+ v.reset(OpRISCV64FMVSX)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(int64(math.Float32bits(val)))
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpConst64(v *Value) bool {
+ // match: (Const64 [val])
+ // result: (MOVDconst [int64(val)])
+ for {
+ val := auxIntToInt64(v.AuxInt)
+ v.reset(OpRISCV64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(val))
+ return true
+ }
+}
+func rewriteValueRISCV64_OpConst64F(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Const64F [val])
+ // result: (FMVDX (MOVDconst [int64(math.Float64bits(val))]))
+ for {
+ val := auxIntToFloat64(v.AuxInt)
+ v.reset(OpRISCV64FMVDX)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(int64(math.Float64bits(val)))
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpConst8(v *Value) bool {
+ // match: (Const8 [val])
+ // result: (MOVDconst [int64(val)])
+ for {
+ val := auxIntToInt8(v.AuxInt)
+ v.reset(OpRISCV64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(val))
+ return true
+ }
+}
+func rewriteValueRISCV64_OpConstBool(v *Value) bool {
+ // match: (ConstBool [val])
+ // result: (MOVDconst [int64(b2i(val))])
+ for {
+ val := auxIntToBool(v.AuxInt)
+ v.reset(OpRISCV64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(b2i(val)))
+ return true
+ }
+}
+func rewriteValueRISCV64_OpConstNil(v *Value) bool {
+ // match: (ConstNil)
+ // result: (MOVDconst [0])
+ for {
+ v.reset(OpRISCV64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpDiv16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div16 x y [false])
+ // result: (DIVW (SignExt16to32 x) (SignExt16to32 y))
+ for {
+ if auxIntToBool(v.AuxInt) != false {
+ break
+ }
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64DIVW)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpDiv16u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div16u x y)
+ // result: (DIVUW (ZeroExt16to32 x) (ZeroExt16to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64DIVUW)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpDiv32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Div32 x y [false])
+ // result: (DIVW x y)
+ for {
+ if auxIntToBool(v.AuxInt) != false {
+ break
+ }
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64DIVW)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpDiv64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Div64 x y [false])
+ // result: (DIV x y)
+ for {
+ if auxIntToBool(v.AuxInt) != false {
+ break
+ }
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64DIV)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpDiv8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div8 x y)
+ // result: (DIVW (SignExt8to32 x) (SignExt8to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64DIVW)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpDiv8u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div8u x y)
+ // result: (DIVUW (ZeroExt8to32 x) (ZeroExt8to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64DIVUW)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpEq16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Eq16 x y)
+ // result: (SEQZ (SUB <x.Type> (ZeroExt16to64 x) (ZeroExt16to64 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64SEQZ)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SUB, x.Type)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpEq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Eq32 x y)
+ // result: (SEQZ (SUB <x.Type> (ZeroExt32to64 x) (ZeroExt32to64 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64SEQZ)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SUB, x.Type)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpEq64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Eq64 x y)
+ // result: (SEQZ (SUB <x.Type> x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64SEQZ)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SUB, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpEq8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Eq8 x y)
+ // result: (SEQZ (SUB <x.Type> (ZeroExt8to64 x) (ZeroExt8to64 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64SEQZ)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SUB, x.Type)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpEqB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (EqB x y)
+ // result: (SEQZ (XOR <typ.Bool> x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64SEQZ)
+ v0 := b.NewValue0(v.Pos, OpRISCV64XOR, typ.Bool)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpEqPtr(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (EqPtr x y)
+ // result: (SEQZ (SUB <typ.Uintptr> x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64SEQZ)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SUB, typ.Uintptr)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpHmul32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Hmul32 x y)
+ // result: (SRAI [32] (MUL (SignExt32to64 x) (SignExt32to64 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64SRAI)
+ v.AuxInt = int64ToAuxInt(32)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MUL, typ.Int64)
+ v1 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpHmul32u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Hmul32u x y)
+ // result: (SRLI [32] (MUL (ZeroExt32to64 x) (ZeroExt32to64 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64SRLI)
+ v.AuxInt = int64ToAuxInt(32)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MUL, typ.Int64)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLeq16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq16 x y)
+ // result: (Not (Less16 y x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpNot)
+ v0 := b.NewValue0(v.Pos, OpLess16, typ.Bool)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLeq16U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq16U x y)
+ // result: (Not (Less16U y x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpNot)
+ v0 := b.NewValue0(v.Pos, OpLess16U, typ.Bool)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLeq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq32 x y)
+ // result: (Not (Less32 y x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpNot)
+ v0 := b.NewValue0(v.Pos, OpLess32, typ.Bool)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLeq32U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq32U x y)
+ // result: (Not (Less32U y x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpNot)
+ v0 := b.NewValue0(v.Pos, OpLess32U, typ.Bool)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLeq64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq64 x y)
+ // result: (Not (Less64 y x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpNot)
+ v0 := b.NewValue0(v.Pos, OpLess64, typ.Bool)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLeq64U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq64U x y)
+ // result: (Not (Less64U y x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpNot)
+ v0 := b.NewValue0(v.Pos, OpLess64U, typ.Bool)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLeq8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq8 x y)
+ // result: (Not (Less8 y x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpNot)
+ v0 := b.NewValue0(v.Pos, OpLess8, typ.Bool)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLeq8U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq8U x y)
+ // result: (Not (Less8U y x))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpNot)
+ v0 := b.NewValue0(v.Pos, OpLess8U, typ.Bool)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLess16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less16 x y)
+ // result: (SLT (SignExt16to64 x) (SignExt16to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64SLT)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLess16U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less16U x y)
+ // result: (SLTU (ZeroExt16to64 x) (ZeroExt16to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64SLTU)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLess32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less32 x y)
+ // result: (SLT (SignExt32to64 x) (SignExt32to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64SLT)
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLess32U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less32U x y)
+ // result: (SLTU (ZeroExt32to64 x) (ZeroExt32to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64SLTU)
+ v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLess8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less8 x y)
+ // result: (SLT (SignExt8to64 x) (SignExt8to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64SLT)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLess8U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less8U x y)
+ // result: (SLTU (ZeroExt8to64 x) (ZeroExt8to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64SLTU)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLoad(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Load <t> ptr mem)
+ // cond: t.IsBoolean()
+ // result: (MOVBUload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(t.IsBoolean()) {
+ break
+ }
+ v.reset(OpRISCV64MOVBUload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: ( is8BitInt(t) && isSigned(t))
+ // result: (MOVBload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is8BitInt(t) && isSigned(t)) {
+ break
+ }
+ v.reset(OpRISCV64MOVBload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: ( is8BitInt(t) && !isSigned(t))
+ // result: (MOVBUload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is8BitInt(t) && !isSigned(t)) {
+ break
+ }
+ v.reset(OpRISCV64MOVBUload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is16BitInt(t) && isSigned(t))
+ // result: (MOVHload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is16BitInt(t) && isSigned(t)) {
+ break
+ }
+ v.reset(OpRISCV64MOVHload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is16BitInt(t) && !isSigned(t))
+ // result: (MOVHUload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is16BitInt(t) && !isSigned(t)) {
+ break
+ }
+ v.reset(OpRISCV64MOVHUload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is32BitInt(t) && isSigned(t))
+ // result: (MOVWload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is32BitInt(t) && isSigned(t)) {
+ break
+ }
+ v.reset(OpRISCV64MOVWload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is32BitInt(t) && !isSigned(t))
+ // result: (MOVWUload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is32BitInt(t) && !isSigned(t)) {
+ break
+ }
+ v.reset(OpRISCV64MOVWUload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (is64BitInt(t) || isPtr(t))
+ // result: (MOVDload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is64BitInt(t) || isPtr(t)) {
+ break
+ }
+ v.reset(OpRISCV64MOVDload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is32BitFloat(t)
+ // result: (FMOVWload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is32BitFloat(t)) {
+ break
+ }
+ v.reset(OpRISCV64FMOVWload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is64BitFloat(t)
+ // result: (FMOVDload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is64BitFloat(t)) {
+ break
+ }
+ v.reset(OpRISCV64FMOVDload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpLocalAddr(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (LocalAddr {sym} base _)
+ // result: (MOVaddr {sym} base)
+ for {
+ sym := auxToSym(v.Aux)
+ base := v_0
+ v.reset(OpRISCV64MOVaddr)
+ v.Aux = symToAux(sym)
+ v.AddArg(base)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLsh16x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh16x16 <t> x y)
+ // result: (AND (SLL <t> x y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpNeg16, t)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLsh16x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh16x32 <t> x y)
+ // result: (AND (SLL <t> x y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpNeg16, t)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLsh16x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh16x64 <t> x y)
+ // result: (AND (SLL <t> x y) (Neg16 <t> (SLTIU <t> [64] y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpNeg16, t)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v2.AuxInt = int64ToAuxInt(64)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLsh16x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh16x8 <t> x y)
+ // result: (AND (SLL <t> x y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpNeg16, t)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLsh32x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh32x16 <t> x y)
+ // result: (AND (SLL <t> x y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpNeg32, t)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLsh32x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh32x32 <t> x y)
+ // result: (AND (SLL <t> x y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpNeg32, t)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLsh32x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh32x64 <t> x y)
+ // result: (AND (SLL <t> x y) (Neg32 <t> (SLTIU <t> [64] y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpNeg32, t)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v2.AuxInt = int64ToAuxInt(64)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLsh32x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh32x8 <t> x y)
+ // result: (AND (SLL <t> x y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpNeg32, t)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLsh64x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh64x16 <t> x y)
+ // result: (AND (SLL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpNeg64, t)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLsh64x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh64x32 <t> x y)
+ // result: (AND (SLL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpNeg64, t)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLsh64x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh64x64 <t> x y)
+ // result: (AND (SLL <t> x y) (Neg64 <t> (SLTIU <t> [64] y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpNeg64, t)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v2.AuxInt = int64ToAuxInt(64)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLsh64x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh64x8 <t> x y)
+ // result: (AND (SLL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpNeg64, t)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLsh8x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh8x16 <t> x y)
+ // result: (AND (SLL <t> x y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpNeg8, t)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLsh8x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh8x32 <t> x y)
+ // result: (AND (SLL <t> x y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpNeg8, t)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLsh8x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh8x64 <t> x y)
+ // result: (AND (SLL <t> x y) (Neg8 <t> (SLTIU <t> [64] y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpNeg8, t)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v2.AuxInt = int64ToAuxInt(64)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpLsh8x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh8x8 <t> x y)
+ // result: (AND (SLL <t> x y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SLL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpNeg8, t)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpMod16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod16 x y [false])
+ // result: (REMW (SignExt16to32 x) (SignExt16to32 y))
+ for {
+ if auxIntToBool(v.AuxInt) != false {
+ break
+ }
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64REMW)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpMod16u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod16u x y)
+ // result: (REMUW (ZeroExt16to32 x) (ZeroExt16to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64REMUW)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpMod32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Mod32 x y [false])
+ // result: (REMW x y)
+ for {
+ if auxIntToBool(v.AuxInt) != false {
+ break
+ }
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64REMW)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpMod64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Mod64 x y [false])
+ // result: (REM x y)
+ for {
+ if auxIntToBool(v.AuxInt) != false {
+ break
+ }
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64REM)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpMod8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod8 x y)
+ // result: (REMW (SignExt8to32 x) (SignExt8to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64REMW)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpMod8u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod8u x y)
+ // result: (REMUW (ZeroExt8to32 x) (ZeroExt8to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64REMUW)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpMove(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (Move [0] _ _ mem)
+ // result: mem
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ mem := v_2
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Move [1] dst src mem)
+ // result: (MOVBstore dst (MOVBload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 1 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpRISCV64MOVBstore)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVBload, typ.Int8)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [2] {t} dst src mem)
+ // cond: t.Alignment()%2 == 0
+ // result: (MOVHstore dst (MOVHload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 2 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%2 == 0) {
+ break
+ }
+ v.reset(OpRISCV64MOVHstore)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVHload, typ.Int16)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [2] dst src mem)
+ // result: (MOVBstore [1] dst (MOVBload [1] src mem) (MOVBstore dst (MOVBload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 2 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpRISCV64MOVBstore)
+ v.AuxInt = int32ToAuxInt(1)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVBload, typ.Int8)
+ v0.AuxInt = int32ToAuxInt(1)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpRISCV64MOVBstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpRISCV64MOVBload, typ.Int8)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [4] {t} dst src mem)
+ // cond: t.Alignment()%4 == 0
+ // result: (MOVWstore dst (MOVWload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%4 == 0) {
+ break
+ }
+ v.reset(OpRISCV64MOVWstore)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVWload, typ.Int32)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [4] {t} dst src mem)
+ // cond: t.Alignment()%2 == 0
+ // result: (MOVHstore [2] dst (MOVHload [2] src mem) (MOVHstore dst (MOVHload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%2 == 0) {
+ break
+ }
+ v.reset(OpRISCV64MOVHstore)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVHload, typ.Int16)
+ v0.AuxInt = int32ToAuxInt(2)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpRISCV64MOVHstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpRISCV64MOVHload, typ.Int16)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [4] dst src mem)
+ // result: (MOVBstore [3] dst (MOVBload [3] src mem) (MOVBstore [2] dst (MOVBload [2] src mem) (MOVBstore [1] dst (MOVBload [1] src mem) (MOVBstore dst (MOVBload src mem) mem))))
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpRISCV64MOVBstore)
+ v.AuxInt = int32ToAuxInt(3)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVBload, typ.Int8)
+ v0.AuxInt = int32ToAuxInt(3)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpRISCV64MOVBstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(2)
+ v2 := b.NewValue0(v.Pos, OpRISCV64MOVBload, typ.Int8)
+ v2.AuxInt = int32ToAuxInt(2)
+ v2.AddArg2(src, mem)
+ v3 := b.NewValue0(v.Pos, OpRISCV64MOVBstore, types.TypeMem)
+ v3.AuxInt = int32ToAuxInt(1)
+ v4 := b.NewValue0(v.Pos, OpRISCV64MOVBload, typ.Int8)
+ v4.AuxInt = int32ToAuxInt(1)
+ v4.AddArg2(src, mem)
+ v5 := b.NewValue0(v.Pos, OpRISCV64MOVBstore, types.TypeMem)
+ v6 := b.NewValue0(v.Pos, OpRISCV64MOVBload, typ.Int8)
+ v6.AddArg2(src, mem)
+ v5.AddArg3(dst, v6, mem)
+ v3.AddArg3(dst, v4, v5)
+ v1.AddArg3(dst, v2, v3)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [8] {t} dst src mem)
+ // cond: t.Alignment()%8 == 0
+ // result: (MOVDstore dst (MOVDload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%8 == 0) {
+ break
+ }
+ v.reset(OpRISCV64MOVDstore)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVDload, typ.Int64)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [8] {t} dst src mem)
+ // cond: t.Alignment()%4 == 0
+ // result: (MOVWstore [4] dst (MOVWload [4] src mem) (MOVWstore dst (MOVWload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%4 == 0) {
+ break
+ }
+ v.reset(OpRISCV64MOVWstore)
+ v.AuxInt = int32ToAuxInt(4)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVWload, typ.Int32)
+ v0.AuxInt = int32ToAuxInt(4)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpRISCV64MOVWstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpRISCV64MOVWload, typ.Int32)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [8] {t} dst src mem)
+ // cond: t.Alignment()%2 == 0
+ // result: (MOVHstore [6] dst (MOVHload [6] src mem) (MOVHstore [4] dst (MOVHload [4] src mem) (MOVHstore [2] dst (MOVHload [2] src mem) (MOVHstore dst (MOVHload src mem) mem))))
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%2 == 0) {
+ break
+ }
+ v.reset(OpRISCV64MOVHstore)
+ v.AuxInt = int32ToAuxInt(6)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVHload, typ.Int16)
+ v0.AuxInt = int32ToAuxInt(6)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpRISCV64MOVHstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(4)
+ v2 := b.NewValue0(v.Pos, OpRISCV64MOVHload, typ.Int16)
+ v2.AuxInt = int32ToAuxInt(4)
+ v2.AddArg2(src, mem)
+ v3 := b.NewValue0(v.Pos, OpRISCV64MOVHstore, types.TypeMem)
+ v3.AuxInt = int32ToAuxInt(2)
+ v4 := b.NewValue0(v.Pos, OpRISCV64MOVHload, typ.Int16)
+ v4.AuxInt = int32ToAuxInt(2)
+ v4.AddArg2(src, mem)
+ v5 := b.NewValue0(v.Pos, OpRISCV64MOVHstore, types.TypeMem)
+ v6 := b.NewValue0(v.Pos, OpRISCV64MOVHload, typ.Int16)
+ v6.AddArg2(src, mem)
+ v5.AddArg3(dst, v6, mem)
+ v3.AddArg3(dst, v4, v5)
+ v1.AddArg3(dst, v2, v3)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [3] dst src mem)
+ // result: (MOVBstore [2] dst (MOVBload [2] src mem) (MOVBstore [1] dst (MOVBload [1] src mem) (MOVBstore dst (MOVBload src mem) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 3 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpRISCV64MOVBstore)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVBload, typ.Int8)
+ v0.AuxInt = int32ToAuxInt(2)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpRISCV64MOVBstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpRISCV64MOVBload, typ.Int8)
+ v2.AuxInt = int32ToAuxInt(1)
+ v2.AddArg2(src, mem)
+ v3 := b.NewValue0(v.Pos, OpRISCV64MOVBstore, types.TypeMem)
+ v4 := b.NewValue0(v.Pos, OpRISCV64MOVBload, typ.Int8)
+ v4.AddArg2(src, mem)
+ v3.AddArg3(dst, v4, mem)
+ v1.AddArg3(dst, v2, v3)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [6] {t} dst src mem)
+ // cond: t.Alignment()%2 == 0
+ // result: (MOVHstore [4] dst (MOVHload [4] src mem) (MOVHstore [2] dst (MOVHload [2] src mem) (MOVHstore dst (MOVHload src mem) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 6 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%2 == 0) {
+ break
+ }
+ v.reset(OpRISCV64MOVHstore)
+ v.AuxInt = int32ToAuxInt(4)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVHload, typ.Int16)
+ v0.AuxInt = int32ToAuxInt(4)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpRISCV64MOVHstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(2)
+ v2 := b.NewValue0(v.Pos, OpRISCV64MOVHload, typ.Int16)
+ v2.AuxInt = int32ToAuxInt(2)
+ v2.AddArg2(src, mem)
+ v3 := b.NewValue0(v.Pos, OpRISCV64MOVHstore, types.TypeMem)
+ v4 := b.NewValue0(v.Pos, OpRISCV64MOVHload, typ.Int16)
+ v4.AddArg2(src, mem)
+ v3.AddArg3(dst, v4, mem)
+ v1.AddArg3(dst, v2, v3)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [12] {t} dst src mem)
+ // cond: t.Alignment()%4 == 0
+ // result: (MOVWstore [8] dst (MOVWload [8] src mem) (MOVWstore [4] dst (MOVWload [4] src mem) (MOVWstore dst (MOVWload src mem) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 12 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%4 == 0) {
+ break
+ }
+ v.reset(OpRISCV64MOVWstore)
+ v.AuxInt = int32ToAuxInt(8)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVWload, typ.Int32)
+ v0.AuxInt = int32ToAuxInt(8)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpRISCV64MOVWstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(4)
+ v2 := b.NewValue0(v.Pos, OpRISCV64MOVWload, typ.Int32)
+ v2.AuxInt = int32ToAuxInt(4)
+ v2.AddArg2(src, mem)
+ v3 := b.NewValue0(v.Pos, OpRISCV64MOVWstore, types.TypeMem)
+ v4 := b.NewValue0(v.Pos, OpRISCV64MOVWload, typ.Int32)
+ v4.AddArg2(src, mem)
+ v3.AddArg3(dst, v4, mem)
+ v1.AddArg3(dst, v2, v3)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [16] {t} dst src mem)
+ // cond: t.Alignment()%8 == 0
+ // result: (MOVDstore [8] dst (MOVDload [8] src mem) (MOVDstore dst (MOVDload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 16 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%8 == 0) {
+ break
+ }
+ v.reset(OpRISCV64MOVDstore)
+ v.AuxInt = int32ToAuxInt(8)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVDload, typ.Int64)
+ v0.AuxInt = int32ToAuxInt(8)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpRISCV64MOVDstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpRISCV64MOVDload, typ.Int64)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [24] {t} dst src mem)
+ // cond: t.Alignment()%8 == 0
+ // result: (MOVDstore [16] dst (MOVDload [16] src mem) (MOVDstore [8] dst (MOVDload [8] src mem) (MOVDstore dst (MOVDload src mem) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 24 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%8 == 0) {
+ break
+ }
+ v.reset(OpRISCV64MOVDstore)
+ v.AuxInt = int32ToAuxInt(16)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVDload, typ.Int64)
+ v0.AuxInt = int32ToAuxInt(16)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpRISCV64MOVDstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(8)
+ v2 := b.NewValue0(v.Pos, OpRISCV64MOVDload, typ.Int64)
+ v2.AuxInt = int32ToAuxInt(8)
+ v2.AddArg2(src, mem)
+ v3 := b.NewValue0(v.Pos, OpRISCV64MOVDstore, types.TypeMem)
+ v4 := b.NewValue0(v.Pos, OpRISCV64MOVDload, typ.Int64)
+ v4.AddArg2(src, mem)
+ v3.AddArg3(dst, v4, mem)
+ v1.AddArg3(dst, v2, v3)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [32] {t} dst src mem)
+ // cond: t.Alignment()%8 == 0
+ // result: (MOVDstore [24] dst (MOVDload [24] src mem) (MOVDstore [16] dst (MOVDload [16] src mem) (MOVDstore [8] dst (MOVDload [8] src mem) (MOVDstore dst (MOVDload src mem) mem))))
+ for {
+ if auxIntToInt64(v.AuxInt) != 32 {
+ break
+ }
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(t.Alignment()%8 == 0) {
+ break
+ }
+ v.reset(OpRISCV64MOVDstore)
+ v.AuxInt = int32ToAuxInt(24)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVDload, typ.Int64)
+ v0.AuxInt = int32ToAuxInt(24)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpRISCV64MOVDstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(16)
+ v2 := b.NewValue0(v.Pos, OpRISCV64MOVDload, typ.Int64)
+ v2.AuxInt = int32ToAuxInt(16)
+ v2.AddArg2(src, mem)
+ v3 := b.NewValue0(v.Pos, OpRISCV64MOVDstore, types.TypeMem)
+ v3.AuxInt = int32ToAuxInt(8)
+ v4 := b.NewValue0(v.Pos, OpRISCV64MOVDload, typ.Int64)
+ v4.AuxInt = int32ToAuxInt(8)
+ v4.AddArg2(src, mem)
+ v5 := b.NewValue0(v.Pos, OpRISCV64MOVDstore, types.TypeMem)
+ v6 := b.NewValue0(v.Pos, OpRISCV64MOVDload, typ.Int64)
+ v6.AddArg2(src, mem)
+ v5.AddArg3(dst, v6, mem)
+ v3.AddArg3(dst, v4, v5)
+ v1.AddArg3(dst, v2, v3)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [s] {t} dst src mem)
+ // cond: s%8 == 0 && s <= 8*128 && t.Alignment()%8 == 0 && !config.noDuffDevice && logLargeCopy(v, s)
+ // result: (DUFFCOPY [16 * (128 - s/8)] dst src mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(s%8 == 0 && s <= 8*128 && t.Alignment()%8 == 0 && !config.noDuffDevice && logLargeCopy(v, s)) {
+ break
+ }
+ v.reset(OpRISCV64DUFFCOPY)
+ v.AuxInt = int64ToAuxInt(16 * (128 - s/8))
+ v.AddArg3(dst, src, mem)
+ return true
+ }
+ // match: (Move [s] {t} dst src mem)
+ // cond: (s <= 16 || logLargeCopy(v, s))
+ // result: (LoweredMove [t.Alignment()] dst src (ADDI <src.Type> [s-moveSize(t.Alignment(), config)] src) mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ t := auxToType(v.Aux)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(s <= 16 || logLargeCopy(v, s)) {
+ break
+ }
+ v.reset(OpRISCV64LoweredMove)
+ v.AuxInt = int64ToAuxInt(t.Alignment())
+ v0 := b.NewValue0(v.Pos, OpRISCV64ADDI, src.Type)
+ v0.AuxInt = int64ToAuxInt(s - moveSize(t.Alignment(), config))
+ v0.AddArg(src)
+ v.AddArg4(dst, src, v0, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpMul16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mul16 x y)
+ // result: (MULW (SignExt16to32 x) (SignExt16to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64MULW)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpMul8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mul8 x y)
+ // result: (MULW (SignExt8to32 x) (SignExt8to32 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64MULW)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpNeq16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neq16 x y)
+ // result: (SNEZ (SUB <x.Type> (ZeroExt16to64 x) (ZeroExt16to64 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64SNEZ)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SUB, x.Type)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpNeq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neq32 x y)
+ // result: (SNEZ (SUB <x.Type> (ZeroExt32to64 x) (ZeroExt32to64 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64SNEZ)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SUB, x.Type)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpNeq64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Neq64 x y)
+ // result: (SNEZ (SUB <x.Type> x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64SNEZ)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SUB, x.Type)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpNeq8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neq8 x y)
+ // result: (SNEZ (SUB <x.Type> (ZeroExt8to64 x) (ZeroExt8to64 y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64SNEZ)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SUB, x.Type)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpNeqPtr(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (NeqPtr x y)
+ // result: (SNEZ (SUB <typ.Uintptr> x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64SNEZ)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SUB, typ.Uintptr)
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpOffPtr(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (OffPtr [off] ptr:(SP))
+ // cond: is32Bit(off)
+ // result: (MOVaddr [int32(off)] ptr)
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ ptr := v_0
+ if ptr.Op != OpSP || !(is32Bit(off)) {
+ break
+ }
+ v.reset(OpRISCV64MOVaddr)
+ v.AuxInt = int32ToAuxInt(int32(off))
+ v.AddArg(ptr)
+ return true
+ }
+ // match: (OffPtr [off] ptr)
+ // cond: is32Bit(off)
+ // result: (ADDI [off] ptr)
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ ptr := v_0
+ if !(is32Bit(off)) {
+ break
+ }
+ v.reset(OpRISCV64ADDI)
+ v.AuxInt = int64ToAuxInt(off)
+ v.AddArg(ptr)
+ return true
+ }
+ // match: (OffPtr [off] ptr)
+ // result: (ADD (MOVDconst [off]) ptr)
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ ptr := v_0
+ v.reset(OpRISCV64ADD)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(off)
+ v.AddArg2(v0, ptr)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpPanicBounds(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (PanicBounds [kind] x y mem)
+ // cond: boundsABI(kind) == 0
+ // result: (LoweredPanicBoundsA [kind] x y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ x := v_0
+ y := v_1
+ mem := v_2
+ if !(boundsABI(kind) == 0) {
+ break
+ }
+ v.reset(OpRISCV64LoweredPanicBoundsA)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg3(x, y, mem)
+ return true
+ }
+ // match: (PanicBounds [kind] x y mem)
+ // cond: boundsABI(kind) == 1
+ // result: (LoweredPanicBoundsB [kind] x y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ x := v_0
+ y := v_1
+ mem := v_2
+ if !(boundsABI(kind) == 1) {
+ break
+ }
+ v.reset(OpRISCV64LoweredPanicBoundsB)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg3(x, y, mem)
+ return true
+ }
+ // match: (PanicBounds [kind] x y mem)
+ // cond: boundsABI(kind) == 2
+ // result: (LoweredPanicBoundsC [kind] x y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ x := v_0
+ y := v_1
+ mem := v_2
+ if !(boundsABI(kind) == 2) {
+ break
+ }
+ v.reset(OpRISCV64LoweredPanicBoundsC)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg3(x, y, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64ADD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ADD (MOVDconst [val]) x)
+ // cond: is32Bit(val)
+ // result: (ADDI [val] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpRISCV64MOVDconst {
+ continue
+ }
+ val := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ if !(is32Bit(val)) {
+ continue
+ }
+ v.reset(OpRISCV64ADDI)
+ v.AuxInt = int64ToAuxInt(val)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64ADDI(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ADDI [c] (MOVaddr [d] {s} x))
+ // cond: is32Bit(c+int64(d))
+ // result: (MOVaddr [int32(c)+d] {s} x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpRISCV64MOVaddr {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ s := auxToSym(v_0.Aux)
+ x := v_0.Args[0]
+ if !(is32Bit(c + int64(d))) {
+ break
+ }
+ v.reset(OpRISCV64MOVaddr)
+ v.AuxInt = int32ToAuxInt(int32(c) + d)
+ v.Aux = symToAux(s)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDI [0] x)
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (ADDI [x] (MOVDconst [y]))
+ // cond: is32Bit(x + y)
+ // result: (MOVDconst [x + y])
+ for {
+ x := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpRISCV64MOVDconst {
+ break
+ }
+ y := auxIntToInt64(v_0.AuxInt)
+ if !(is32Bit(x + y)) {
+ break
+ }
+ v.reset(OpRISCV64MOVDconst)
+ v.AuxInt = int64ToAuxInt(x + y)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64AND(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AND (MOVDconst [val]) x)
+ // cond: is32Bit(val)
+ // result: (ANDI [val] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpRISCV64MOVDconst {
+ continue
+ }
+ val := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ if !(is32Bit(val)) {
+ continue
+ }
+ v.reset(OpRISCV64ANDI)
+ v.AuxInt = int64ToAuxInt(val)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64ANDI(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ANDI [0] x)
+ // result: (MOVDconst [0])
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpRISCV64MOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (ANDI [-1] x)
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != -1 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (ANDI [x] (MOVDconst [y]))
+ // result: (MOVDconst [x & y])
+ for {
+ x := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpRISCV64MOVDconst {
+ break
+ }
+ y := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpRISCV64MOVDconst)
+ v.AuxInt = int64ToAuxInt(x & y)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64FMADDD(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FMADDD neg:(FNEGD x) y z)
+ // cond: neg.Uses == 1
+ // result: (FNMADDD x y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ neg := v_0
+ if neg.Op != OpRISCV64FNEGD {
+ continue
+ }
+ x := neg.Args[0]
+ y := v_1
+ z := v_2
+ if !(neg.Uses == 1) {
+ continue
+ }
+ v.reset(OpRISCV64FNMADDD)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ break
+ }
+ // match: (FMADDD x y neg:(FNEGD z))
+ // cond: neg.Uses == 1
+ // result: (FMSUBD x y z)
+ for {
+ x := v_0
+ y := v_1
+ neg := v_2
+ if neg.Op != OpRISCV64FNEGD {
+ break
+ }
+ z := neg.Args[0]
+ if !(neg.Uses == 1) {
+ break
+ }
+ v.reset(OpRISCV64FMSUBD)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64FMSUBD(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FMSUBD neg:(FNEGD x) y z)
+ // cond: neg.Uses == 1
+ // result: (FNMSUBD x y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ neg := v_0
+ if neg.Op != OpRISCV64FNEGD {
+ continue
+ }
+ x := neg.Args[0]
+ y := v_1
+ z := v_2
+ if !(neg.Uses == 1) {
+ continue
+ }
+ v.reset(OpRISCV64FNMSUBD)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ break
+ }
+ // match: (FMSUBD x y neg:(FNEGD z))
+ // cond: neg.Uses == 1
+ // result: (FMADDD x y z)
+ for {
+ x := v_0
+ y := v_1
+ neg := v_2
+ if neg.Op != OpRISCV64FNEGD {
+ break
+ }
+ z := neg.Args[0]
+ if !(neg.Uses == 1) {
+ break
+ }
+ v.reset(OpRISCV64FMADDD)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64FNMADDD(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FNMADDD neg:(FNEGD x) y z)
+ // cond: neg.Uses == 1
+ // result: (FMADDD x y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ neg := v_0
+ if neg.Op != OpRISCV64FNEGD {
+ continue
+ }
+ x := neg.Args[0]
+ y := v_1
+ z := v_2
+ if !(neg.Uses == 1) {
+ continue
+ }
+ v.reset(OpRISCV64FMADDD)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ break
+ }
+ // match: (FNMADDD x y neg:(FNEGD z))
+ // cond: neg.Uses == 1
+ // result: (FNMSUBD x y z)
+ for {
+ x := v_0
+ y := v_1
+ neg := v_2
+ if neg.Op != OpRISCV64FNEGD {
+ break
+ }
+ z := neg.Args[0]
+ if !(neg.Uses == 1) {
+ break
+ }
+ v.reset(OpRISCV64FNMSUBD)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64FNMSUBD(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FNMSUBD neg:(FNEGD x) y z)
+ // cond: neg.Uses == 1
+ // result: (FMSUBD x y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ neg := v_0
+ if neg.Op != OpRISCV64FNEGD {
+ continue
+ }
+ x := neg.Args[0]
+ y := v_1
+ z := v_2
+ if !(neg.Uses == 1) {
+ continue
+ }
+ v.reset(OpRISCV64FMSUBD)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ break
+ }
+ // match: (FNMSUBD x y neg:(FNEGD z))
+ // cond: neg.Uses == 1
+ // result: (FNMADDD x y z)
+ for {
+ x := v_0
+ y := v_1
+ neg := v_2
+ if neg.Op != OpRISCV64FNEGD {
+ break
+ }
+ z := neg.Args[0]
+ if !(neg.Uses == 1) {
+ break
+ }
+ v.reset(OpRISCV64FNMADDD)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64MOVBUload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVBUload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVBUload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64MOVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVBUload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (MOVBUload [off1] {sym} (ADDI [off2] base) mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (MOVBUload [off1+int32(off2)] {sym} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64ADDI {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVBUload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64MOVBUreg(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MOVBUreg x:(SEQZ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpRISCV64SEQZ {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVBUreg x:(SNEZ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpRISCV64SNEZ {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVBUreg x:(SLT _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpRISCV64SLT {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVBUreg x:(SLTU _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpRISCV64SLTU {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVBUreg (MOVDconst [c]))
+ // result: (MOVDconst [int64(uint8(c))])
+ for {
+ if v_0.Op != OpRISCV64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpRISCV64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(uint8(c)))
+ return true
+ }
+ // match: (MOVBUreg x:(MOVBUload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVBUload {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBUreg x:(MOVBUreg _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVBUreg {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBUreg <t> x:(MOVBload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVBUload <t> [off] {sym} ptr mem)
+ for {
+ t := v.Type
+ x := v_0
+ if x.Op != OpRISCV64MOVBload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpRISCV64MOVBUload, t)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64MOVBload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVBload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64MOVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVBload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (MOVBload [off1] {sym} (ADDI [off2] base) mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (MOVBload [off1+int32(off2)] {sym} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64ADDI {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVBload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64MOVBreg(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MOVBreg (MOVDconst [c]))
+ // result: (MOVDconst [int64(int8(c))])
+ for {
+ if v_0.Op != OpRISCV64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpRISCV64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(int8(c)))
+ return true
+ }
+ // match: (MOVBreg x:(MOVBload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVBload {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBreg x:(MOVBreg _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVBreg {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBreg <t> x:(MOVBUload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVBload <t> [off] {sym} ptr mem)
+ for {
+ t := v.Type
+ x := v_0
+ if x.Op != OpRISCV64MOVBUload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpRISCV64MOVBload, t)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64MOVBstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVBstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64MOVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (MOVBstore [off1] {sym} (ADDI [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (MOVBstore [off1+int32(off2)] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64ADDI {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVDconst [0]) mem)
+ // result: (MOVBstorezero [off] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpRISCV64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ mem := v_2
+ v.reset(OpRISCV64MOVBstorezero)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVBreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpRISCV64MOVBreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpRISCV64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVHreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpRISCV64MOVHreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpRISCV64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVWreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpRISCV64MOVWreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpRISCV64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVBUreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpRISCV64MOVBUreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpRISCV64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVHUreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpRISCV64MOVHUreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpRISCV64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVWUreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpRISCV64MOVWUreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpRISCV64MOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64MOVBstorezero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVBstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ // result: (MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64MOVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
+ break
+ }
+ v.reset(OpRISCV64MOVBstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBstorezero [off1] {sym} (ADDI [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (MOVBstorezero [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64ADDI {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVBstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64MOVDload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVDload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVDload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64MOVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (MOVDload [off1] {sym} (ADDI [off2] base) mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (MOVDload [off1+int32(off2)] {sym} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64ADDI {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVDload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64MOVDnop(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MOVDnop (MOVDconst [c]))
+ // result: (MOVDconst [c])
+ for {
+ if v_0.Op != OpRISCV64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpRISCV64MOVDconst)
+ v.AuxInt = int64ToAuxInt(c)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64MOVDreg(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (MOVDreg x)
+ // cond: x.Uses == 1
+ // result: (MOVDnop x)
+ for {
+ x := v_0
+ if !(x.Uses == 1) {
+ break
+ }
+ v.reset(OpRISCV64MOVDnop)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64MOVDstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVDstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64MOVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVDstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (MOVDstore [off1] {sym} (ADDI [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (MOVDstore [off1+int32(off2)] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64ADDI {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVDstore)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (MOVDstore [off] {sym} ptr (MOVDconst [0]) mem)
+ // result: (MOVDstorezero [off] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpRISCV64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ mem := v_2
+ v.reset(OpRISCV64MOVDstorezero)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64MOVDstorezero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVDstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ // result: (MOVDstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64MOVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
+ break
+ }
+ v.reset(OpRISCV64MOVDstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVDstorezero [off1] {sym} (ADDI [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (MOVDstorezero [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64ADDI {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVDstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64MOVHUload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHUload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVHUload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64MOVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVHUload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (MOVHUload [off1] {sym} (ADDI [off2] base) mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (MOVHUload [off1+int32(off2)] {sym} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64ADDI {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVHUload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64MOVHUreg(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MOVHUreg (MOVDconst [c]))
+ // result: (MOVDconst [int64(uint16(c))])
+ for {
+ if v_0.Op != OpRISCV64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpRISCV64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(uint16(c)))
+ return true
+ }
+ // match: (MOVHUreg x:(MOVBUload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVBUload {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUreg x:(MOVHUload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVHUload {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUreg x:(MOVBUreg _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVBUreg {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUreg x:(MOVHUreg _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVHUreg {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHUreg <t> x:(MOVHload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVHUload <t> [off] {sym} ptr mem)
+ for {
+ t := v.Type
+ x := v_0
+ if x.Op != OpRISCV64MOVHload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpRISCV64MOVHUload, t)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64MOVHload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVHload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64MOVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVHload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (MOVHload [off1] {sym} (ADDI [off2] base) mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (MOVHload [off1+int32(off2)] {sym} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64ADDI {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVHload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64MOVHreg(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MOVHreg (MOVDconst [c]))
+ // result: (MOVDconst [int64(int16(c))])
+ for {
+ if v_0.Op != OpRISCV64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpRISCV64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(int16(c)))
+ return true
+ }
+ // match: (MOVHreg x:(MOVBload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVBload {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVBUload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVBUload {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVHload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVHload {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVBreg _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVBreg {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVBUreg _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVBUreg {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVHreg _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVHreg {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg <t> x:(MOVHUload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVHload <t> [off] {sym} ptr mem)
+ for {
+ t := v.Type
+ x := v_0
+ if x.Op != OpRISCV64MOVHUload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpRISCV64MOVHload, t)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64MOVHstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64MOVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVHstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (MOVHstore [off1] {sym} (ADDI [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (MOVHstore [off1+int32(off2)] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64ADDI {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVHstore)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (MOVDconst [0]) mem)
+ // result: (MOVHstorezero [off] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpRISCV64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ mem := v_2
+ v.reset(OpRISCV64MOVHstorezero)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (MOVHreg x) mem)
+ // result: (MOVHstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpRISCV64MOVHreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpRISCV64MOVHstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (MOVWreg x) mem)
+ // result: (MOVHstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpRISCV64MOVWreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpRISCV64MOVHstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (MOVHUreg x) mem)
+ // result: (MOVHstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpRISCV64MOVHUreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpRISCV64MOVHstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (MOVWUreg x) mem)
+ // result: (MOVHstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpRISCV64MOVWUreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpRISCV64MOVHstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64MOVHstorezero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ // result: (MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64MOVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
+ break
+ }
+ v.reset(OpRISCV64MOVHstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHstorezero [off1] {sym} (ADDI [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (MOVHstorezero [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64ADDI {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVHstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64MOVWUload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWUload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVWUload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64MOVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVWUload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (MOVWUload [off1] {sym} (ADDI [off2] base) mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (MOVWUload [off1+int32(off2)] {sym} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64ADDI {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVWUload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64MOVWUreg(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MOVWUreg (MOVDconst [c]))
+ // result: (MOVDconst [int64(uint32(c))])
+ for {
+ if v_0.Op != OpRISCV64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpRISCV64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(uint32(c)))
+ return true
+ }
+ // match: (MOVWUreg x:(MOVBUload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVBUload {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWUreg x:(MOVHUload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVHUload {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWUreg x:(MOVWUload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVWUload {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWUreg x:(MOVBUreg _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVBUreg {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWUreg x:(MOVHUreg _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVHUreg {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWUreg x:(MOVWUreg _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVWUreg {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWUreg <t> x:(MOVWload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVWUload <t> [off] {sym} ptr mem)
+ for {
+ t := v.Type
+ x := v_0
+ if x.Op != OpRISCV64MOVWload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpRISCV64MOVWUload, t)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64MOVWload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64MOVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVWload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ // match: (MOVWload [off1] {sym} (ADDI [off2] base) mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (MOVWload [off1+int32(off2)] {sym} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64ADDI {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVWload)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64MOVWreg(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MOVWreg (MOVDconst [c]))
+ // result: (MOVDconst [int64(int32(c))])
+ for {
+ if v_0.Op != OpRISCV64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpRISCV64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(int32(c)))
+ return true
+ }
+ // match: (MOVWreg x:(MOVBload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVBload {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVBUload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVBUload {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVHload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVHload {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVHUload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVHUload {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVWload _ _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVWload {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVBreg _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVBreg {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVBUreg _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVBUreg {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVHreg _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVHreg {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVWreg _))
+ // result: (MOVDreg x)
+ for {
+ x := v_0
+ if x.Op != OpRISCV64MOVWreg {
+ break
+ }
+ v.reset(OpRISCV64MOVDreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg <t> x:(MOVWUload [off] {sym} ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVWload <t> [off] {sym} ptr mem)
+ for {
+ t := v.Type
+ x := v_0
+ if x.Op != OpRISCV64MOVWUload {
+ break
+ }
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpRISCV64MOVWload, t)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(sym)
+ v0.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64MOVWstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64MOVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVWstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (MOVWstore [off1] {sym} (ADDI [off2] base) val mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (MOVWstore [off1+int32(off2)] {sym} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64ADDI {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVWstore)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (MOVWstore [off] {sym} ptr (MOVDconst [0]) mem)
+ // result: (MOVWstorezero [off] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpRISCV64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ mem := v_2
+ v.reset(OpRISCV64MOVWstorezero)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWstore [off] {sym} ptr (MOVWreg x) mem)
+ // result: (MOVWstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpRISCV64MOVWreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpRISCV64MOVWstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVWstore [off] {sym} ptr (MOVWUreg x) mem)
+ // result: (MOVWstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpRISCV64MOVWUreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpRISCV64MOVWstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64MOVWstorezero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem)
+ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+ // result: (MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64MOVaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
+ break
+ }
+ v.reset(OpRISCV64MOVWstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWstorezero [off1] {sym} (ADDI [off2] ptr) mem)
+ // cond: is32Bit(int64(off1)+off2)
+ // result: (MOVWstorezero [off1+int32(off2)] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpRISCV64ADDI {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + off2)) {
+ break
+ }
+ v.reset(OpRISCV64MOVWstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64NEG(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (NEG (MOVDconst [x]))
+ // result: (MOVDconst [-x])
+ for {
+ if v_0.Op != OpRISCV64MOVDconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpRISCV64MOVDconst)
+ v.AuxInt = int64ToAuxInt(-x)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64NEGW(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (NEGW (MOVDconst [x]))
+ // result: (MOVDconst [int64(int32(-x))])
+ for {
+ if v_0.Op != OpRISCV64MOVDconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpRISCV64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(int32(-x)))
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64OR(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (OR (MOVDconst [val]) x)
+ // cond: is32Bit(val)
+ // result: (ORI [val] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpRISCV64MOVDconst {
+ continue
+ }
+ val := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ if !(is32Bit(val)) {
+ continue
+ }
+ v.reset(OpRISCV64ORI)
+ v.AuxInt = int64ToAuxInt(val)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64ORI(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ORI [0] x)
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (ORI [-1] x)
+ // result: (MOVDconst [-1])
+ for {
+ if auxIntToInt64(v.AuxInt) != -1 {
+ break
+ }
+ v.reset(OpRISCV64MOVDconst)
+ v.AuxInt = int64ToAuxInt(-1)
+ return true
+ }
+ // match: (ORI [x] (MOVDconst [y]))
+ // result: (MOVDconst [x | y])
+ for {
+ x := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpRISCV64MOVDconst {
+ break
+ }
+ y := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpRISCV64MOVDconst)
+ v.AuxInt = int64ToAuxInt(x | y)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64SLL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SLL x (MOVDconst [val]))
+ // result: (SLLI [int64(val&63)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpRISCV64MOVDconst {
+ break
+ }
+ val := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpRISCV64SLLI)
+ v.AuxInt = int64ToAuxInt(int64(val & 63))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64SLLI(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SLLI [x] (MOVDconst [y]))
+ // cond: is32Bit(y << x)
+ // result: (MOVDconst [y << x])
+ for {
+ x := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpRISCV64MOVDconst {
+ break
+ }
+ y := auxIntToInt64(v_0.AuxInt)
+ if !(is32Bit(y << x)) {
+ break
+ }
+ v.reset(OpRISCV64MOVDconst)
+ v.AuxInt = int64ToAuxInt(y << x)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64SLTI(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SLTI [x] (MOVDconst [y]))
+ // result: (MOVDconst [b2i(int64(y) < int64(x))])
+ for {
+ x := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpRISCV64MOVDconst {
+ break
+ }
+ y := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpRISCV64MOVDconst)
+ v.AuxInt = int64ToAuxInt(b2i(int64(y) < int64(x)))
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64SLTIU(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SLTIU [x] (MOVDconst [y]))
+ // result: (MOVDconst [b2i(uint64(y) < uint64(x))])
+ for {
+ x := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpRISCV64MOVDconst {
+ break
+ }
+ y := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpRISCV64MOVDconst)
+ v.AuxInt = int64ToAuxInt(b2i(uint64(y) < uint64(x)))
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64SRA(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SRA x (MOVDconst [val]))
+ // result: (SRAI [int64(val&63)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpRISCV64MOVDconst {
+ break
+ }
+ val := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpRISCV64SRAI)
+ v.AuxInt = int64ToAuxInt(int64(val & 63))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64SRAI(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SRAI [x] (MOVDconst [y]))
+ // result: (MOVDconst [int64(y) >> x])
+ for {
+ x := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpRISCV64MOVDconst {
+ break
+ }
+ y := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpRISCV64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(y) >> x)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64SRL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SRL x (MOVDconst [val]))
+ // result: (SRLI [int64(val&63)] x)
+ for {
+ x := v_0
+ if v_1.Op != OpRISCV64MOVDconst {
+ break
+ }
+ val := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpRISCV64SRLI)
+ v.AuxInt = int64ToAuxInt(int64(val & 63))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64SRLI(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SRLI [x] (MOVDconst [y]))
+ // result: (MOVDconst [int64(uint64(y) >> x)])
+ for {
+ x := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpRISCV64MOVDconst {
+ break
+ }
+ y := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpRISCV64MOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(uint64(y) >> x))
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64SUB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SUB x (MOVDconst [val]))
+ // cond: is32Bit(-val)
+ // result: (ADDI [-val] x)
+ for {
+ x := v_0
+ if v_1.Op != OpRISCV64MOVDconst {
+ break
+ }
+ val := auxIntToInt64(v_1.AuxInt)
+ if !(is32Bit(-val)) {
+ break
+ }
+ v.reset(OpRISCV64ADDI)
+ v.AuxInt = int64ToAuxInt(-val)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUB x (MOVDconst [0]))
+ // result: x
+ for {
+ x := v_0
+ if v_1.Op != OpRISCV64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (SUB (MOVDconst [0]) x)
+ // result: (NEG x)
+ for {
+ if v_0.Op != OpRISCV64MOVDconst || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_1
+ v.reset(OpRISCV64NEG)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64SUBW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SUBW x (MOVDconst [0]))
+ // result: (ADDIW [0] x)
+ for {
+ x := v_0
+ if v_1.Op != OpRISCV64MOVDconst || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpRISCV64ADDIW)
+ v.AuxInt = int64ToAuxInt(0)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBW (MOVDconst [0]) x)
+ // result: (NEGW x)
+ for {
+ if v_0.Op != OpRISCV64MOVDconst || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_1
+ v.reset(OpRISCV64NEGW)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRISCV64XOR(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (XOR (MOVDconst [val]) x)
+ // cond: is32Bit(val)
+ // result: (XORI [val] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpRISCV64MOVDconst {
+ continue
+ }
+ val := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ if !(is32Bit(val)) {
+ continue
+ }
+ v.reset(OpRISCV64XORI)
+ v.AuxInt = int64ToAuxInt(val)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRotateLeft16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (RotateLeft16 <t> x (MOVDconst [c]))
+ // result: (Or16 (Lsh16x64 <t> x (MOVDconst [c&15])) (Rsh16Ux64 <t> x (MOVDconst [-c&15])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpRISCV64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpOr16)
+ v0 := b.NewValue0(v.Pos, OpLsh16x64, t)
+ v1 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(c & 15)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpRsh16Ux64, t)
+ v3 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(-c & 15)
+ v2.AddArg2(x, v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRotateLeft32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (RotateLeft32 <t> x (MOVDconst [c]))
+ // result: (Or32 (Lsh32x64 <t> x (MOVDconst [c&31])) (Rsh32Ux64 <t> x (MOVDconst [-c&31])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpRISCV64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpOr32)
+ v0 := b.NewValue0(v.Pos, OpLsh32x64, t)
+ v1 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(c & 31)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpRsh32Ux64, t)
+ v3 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(-c & 31)
+ v2.AddArg2(x, v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRotateLeft64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (RotateLeft64 <t> x (MOVDconst [c]))
+ // result: (Or64 (Lsh64x64 <t> x (MOVDconst [c&63])) (Rsh64Ux64 <t> x (MOVDconst [-c&63])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpRISCV64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpOr64)
+ v0 := b.NewValue0(v.Pos, OpLsh64x64, t)
+ v1 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(c & 63)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpRsh64Ux64, t)
+ v3 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(-c & 63)
+ v2.AddArg2(x, v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRotateLeft8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (RotateLeft8 <t> x (MOVDconst [c]))
+ // result: (Or8 (Lsh8x64 <t> x (MOVDconst [c&7])) (Rsh8Ux64 <t> x (MOVDconst [-c&7])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpRISCV64MOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpOr8)
+ v0 := b.NewValue0(v.Pos, OpLsh8x64, t)
+ v1 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(c & 7)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpRsh8Ux64, t)
+ v3 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(-c & 7)
+ v2.AddArg2(x, v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpRsh16Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux16 <t> x y)
+ // result: (AND (SRL <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpNeg16, t)
+ v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v3.AuxInt = int64ToAuxInt(64)
+ v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v2.AddArg(v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpRsh16Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux32 <t> x y)
+ // result: (AND (SRL <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpNeg16, t)
+ v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v3.AuxInt = int64ToAuxInt(64)
+ v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v2.AddArg(v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpRsh16Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux64 <t> x y)
+ // result: (AND (SRL <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpNeg16, t)
+ v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v3.AuxInt = int64ToAuxInt(64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpRsh16Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux8 <t> x y)
+ // result: (AND (SRL <t> (ZeroExt16to64 x) y) (Neg16 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpNeg16, t)
+ v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v3.AuxInt = int64ToAuxInt(64)
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v2.AddArg(v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpRsh16x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x16 <t> x y)
+ // result: (SRA <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y)))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64SRA)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
+ v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type)
+ v2.AuxInt = int64ToAuxInt(-1)
+ v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type)
+ v3.AuxInt = int64ToAuxInt(64)
+ v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v2.AddArg(v3)
+ v1.AddArg2(y, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpRsh16x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x32 <t> x y)
+ // result: (SRA <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y)))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64SRA)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
+ v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type)
+ v2.AuxInt = int64ToAuxInt(-1)
+ v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type)
+ v3.AuxInt = int64ToAuxInt(64)
+ v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v2.AddArg(v3)
+ v1.AddArg2(y, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpRsh16x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x64 <t> x y)
+ // result: (SRA <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64SRA)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
+ v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type)
+ v2.AuxInt = int64ToAuxInt(-1)
+ v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type)
+ v3.AuxInt = int64ToAuxInt(64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg2(y, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpRsh16x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x8 <t> x y)
+ // result: (SRA <t> (SignExt16to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64 y)))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64SRA)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
+ v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type)
+ v2.AuxInt = int64ToAuxInt(-1)
+ v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type)
+ v3.AuxInt = int64ToAuxInt(64)
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v2.AddArg(v3)
+ v1.AddArg2(y, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpRsh32Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32Ux16 <t> x y)
+ // result: (AND (SRL <t> (ZeroExt32to64 x) y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpNeg32, t)
+ v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v3.AuxInt = int64ToAuxInt(64)
+ v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v2.AddArg(v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpRsh32Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32Ux32 <t> x y)
+ // result: (AND (SRL <t> (ZeroExt32to64 x) y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpNeg32, t)
+ v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v3.AuxInt = int64ToAuxInt(64)
+ v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v2.AddArg(v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpRsh32Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32Ux64 <t> x y)
+ // result: (AND (SRL <t> (ZeroExt32to64 x) y) (Neg32 <t> (SLTIU <t> [64] y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpNeg32, t)
+ v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v3.AuxInt = int64ToAuxInt(64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpRsh32Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32Ux8 <t> x y)
+ // result: (AND (SRL <t> (ZeroExt32to64 x) y) (Neg32 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpNeg32, t)
+ v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v3.AuxInt = int64ToAuxInt(64)
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v2.AddArg(v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpRsh32x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32x16 <t> x y)
+ // result: (SRA <t> (SignExt32to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y)))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64SRA)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
+ v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type)
+ v2.AuxInt = int64ToAuxInt(-1)
+ v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type)
+ v3.AuxInt = int64ToAuxInt(64)
+ v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v2.AddArg(v3)
+ v1.AddArg2(y, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpRsh32x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32x32 <t> x y)
+ // result: (SRA <t> (SignExt32to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y)))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64SRA)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
+ v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type)
+ v2.AuxInt = int64ToAuxInt(-1)
+ v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type)
+ v3.AuxInt = int64ToAuxInt(64)
+ v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v2.AddArg(v3)
+ v1.AddArg2(y, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpRsh32x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32x64 <t> x y)
+ // result: (SRA <t> (SignExt32to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64SRA)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
+ v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type)
+ v2.AuxInt = int64ToAuxInt(-1)
+ v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type)
+ v3.AuxInt = int64ToAuxInt(64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg2(y, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpRsh32x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32x8 <t> x y)
+ // result: (SRA <t> (SignExt32to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64 y)))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64SRA)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
+ v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type)
+ v2.AuxInt = int64ToAuxInt(-1)
+ v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type)
+ v3.AuxInt = int64ToAuxInt(64)
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v2.AddArg(v3)
+ v1.AddArg2(y, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpRsh64Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64Ux16 <t> x y)
+ // result: (AND (SRL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpNeg64, t)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpRsh64Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64Ux32 <t> x y)
+ // result: (AND (SRL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpNeg64, t)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpRsh64Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh64Ux64 <t> x y)
+ // result: (AND (SRL <t> x y) (Neg64 <t> (SLTIU <t> [64] y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpNeg64, t)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v2.AuxInt = int64ToAuxInt(64)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpRsh64Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64Ux8 <t> x y)
+ // result: (AND (SRL <t> x y) (Neg64 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpNeg64, t)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpRsh64x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64x16 <t> x y)
+ // result: (SRA <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y)))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64SRA)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
+ v1 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type)
+ v1.AuxInt = int64ToAuxInt(-1)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpRsh64x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64x32 <t> x y)
+ // result: (SRA <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y)))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64SRA)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
+ v1 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type)
+ v1.AuxInt = int64ToAuxInt(-1)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpRsh64x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh64x64 <t> x y)
+ // result: (SRA <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64SRA)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
+ v1 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type)
+ v1.AuxInt = int64ToAuxInt(-1)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type)
+ v2.AuxInt = int64ToAuxInt(64)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpRsh64x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64x8 <t> x y)
+ // result: (SRA <t> x (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64 y)))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64SRA)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
+ v1 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type)
+ v1.AuxInt = int64ToAuxInt(-1)
+ v2 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type)
+ v2.AuxInt = int64ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(y, v1)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpRsh8Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux16 <t> x y)
+ // result: (AND (SRL <t> (ZeroExt8to64 x) y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt16to64 y))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpNeg8, t)
+ v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v3.AuxInt = int64ToAuxInt(64)
+ v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v2.AddArg(v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpRsh8Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux32 <t> x y)
+ // result: (AND (SRL <t> (ZeroExt8to64 x) y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt32to64 y))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpNeg8, t)
+ v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v3.AuxInt = int64ToAuxInt(64)
+ v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v2.AddArg(v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpRsh8Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux64 <t> x y)
+ // result: (AND (SRL <t> (ZeroExt8to64 x) y) (Neg8 <t> (SLTIU <t> [64] y)))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpNeg8, t)
+ v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v3.AuxInt = int64ToAuxInt(64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpRsh8Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux8 <t> x y)
+ // result: (AND (SRL <t> (ZeroExt8to64 x) y) (Neg8 <t> (SLTIU <t> [64] (ZeroExt8to64 y))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64AND)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SRL, t)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpNeg8, t)
+ v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, t)
+ v3.AuxInt = int64ToAuxInt(64)
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v2.AddArg(v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpRsh8x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x16 <t> x y)
+ // result: (SRA <t> (SignExt8to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt16to64 y)))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64SRA)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
+ v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type)
+ v2.AuxInt = int64ToAuxInt(-1)
+ v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type)
+ v3.AuxInt = int64ToAuxInt(64)
+ v4 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v2.AddArg(v3)
+ v1.AddArg2(y, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpRsh8x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x32 <t> x y)
+ // result: (SRA <t> (SignExt8to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt32to64 y)))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64SRA)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
+ v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type)
+ v2.AuxInt = int64ToAuxInt(-1)
+ v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type)
+ v3.AuxInt = int64ToAuxInt(64)
+ v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v2.AddArg(v3)
+ v1.AddArg2(y, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpRsh8x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x64 <t> x y)
+ // result: (SRA <t> (SignExt8to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] y))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64SRA)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
+ v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type)
+ v2.AuxInt = int64ToAuxInt(-1)
+ v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type)
+ v3.AuxInt = int64ToAuxInt(64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v1.AddArg2(y, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpRsh8x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x8 <t> x y)
+ // result: (SRA <t> (SignExt8to64 x) (OR <y.Type> y (ADDI <y.Type> [-1] (SLTIU <y.Type> [64] (ZeroExt8to64 y)))))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpRISCV64SRA)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpRISCV64OR, y.Type)
+ v2 := b.NewValue0(v.Pos, OpRISCV64ADDI, y.Type)
+ v2.AuxInt = int64ToAuxInt(-1)
+ v3 := b.NewValue0(v.Pos, OpRISCV64SLTIU, y.Type)
+ v3.AuxInt = int64ToAuxInt(64)
+ v4 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v2.AddArg(v3)
+ v1.AddArg2(y, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpSlicemask(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Slicemask <t> x)
+ // result: (NOT (SRAI <t> [63] (ADDI <t> [-1] x)))
+ for {
+ t := v.Type
+ x := v_0
+ v.reset(OpRISCV64NOT)
+ v0 := b.NewValue0(v.Pos, OpRISCV64SRAI, t)
+ v0.AuxInt = int64ToAuxInt(63)
+ v1 := b.NewValue0(v.Pos, OpRISCV64ADDI, t)
+ v1.AuxInt = int64ToAuxInt(-1)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueRISCV64_OpStore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 1
+ // result: (MOVBstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 1) {
+ break
+ }
+ v.reset(OpRISCV64MOVBstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 2
+ // result: (MOVHstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 2) {
+ break
+ }
+ v.reset(OpRISCV64MOVHstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 4 && !is32BitFloat(val.Type)
+ // result: (MOVWstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 4 && !is32BitFloat(val.Type)) {
+ break
+ }
+ v.reset(OpRISCV64MOVWstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 8 && !is64BitFloat(val.Type)
+ // result: (MOVDstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 8 && !is64BitFloat(val.Type)) {
+ break
+ }
+ v.reset(OpRISCV64MOVDstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 4 && is32BitFloat(val.Type)
+ // result: (FMOVWstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 4 && is32BitFloat(val.Type)) {
+ break
+ }
+ v.reset(OpRISCV64FMOVWstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 8 && is64BitFloat(val.Type)
+ // result: (FMOVDstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 8 && is64BitFloat(val.Type)) {
+ break
+ }
+ v.reset(OpRISCV64FMOVDstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueRISCV64_OpZero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (Zero [0] _ mem)
+ // result: mem
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ mem := v_1
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Zero [1] ptr mem)
+ // result: (MOVBstore ptr (MOVDconst [0]) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 1 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpRISCV64MOVBstore)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (Zero [2] {t} ptr mem)
+ // cond: t.Alignment()%2 == 0
+ // result: (MOVHstore ptr (MOVDconst [0]) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 2 {
+ break
+ }
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(t.Alignment()%2 == 0) {
+ break
+ }
+ v.reset(OpRISCV64MOVHstore)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (Zero [2] ptr mem)
+ // result: (MOVBstore [1] ptr (MOVDconst [0]) (MOVBstore ptr (MOVDconst [0]) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 2 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpRISCV64MOVBstore)
+ v.AuxInt = int32ToAuxInt(1)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpRISCV64MOVBstore, types.TypeMem)
+ v1.AddArg3(ptr, v0, mem)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [4] {t} ptr mem)
+ // cond: t.Alignment()%4 == 0
+ // result: (MOVWstore ptr (MOVDconst [0]) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(t.Alignment()%4 == 0) {
+ break
+ }
+ v.reset(OpRISCV64MOVWstore)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (Zero [4] {t} ptr mem)
+ // cond: t.Alignment()%2 == 0
+ // result: (MOVHstore [2] ptr (MOVDconst [0]) (MOVHstore ptr (MOVDconst [0]) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(t.Alignment()%2 == 0) {
+ break
+ }
+ v.reset(OpRISCV64MOVHstore)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpRISCV64MOVHstore, types.TypeMem)
+ v1.AddArg3(ptr, v0, mem)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [4] ptr mem)
+ // result: (MOVBstore [3] ptr (MOVDconst [0]) (MOVBstore [2] ptr (MOVDconst [0]) (MOVBstore [1] ptr (MOVDconst [0]) (MOVBstore ptr (MOVDconst [0]) mem))))
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpRISCV64MOVBstore)
+ v.AuxInt = int32ToAuxInt(3)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpRISCV64MOVBstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(2)
+ v2 := b.NewValue0(v.Pos, OpRISCV64MOVBstore, types.TypeMem)
+ v2.AuxInt = int32ToAuxInt(1)
+ v3 := b.NewValue0(v.Pos, OpRISCV64MOVBstore, types.TypeMem)
+ v3.AddArg3(ptr, v0, mem)
+ v2.AddArg3(ptr, v0, v3)
+ v1.AddArg3(ptr, v0, v2)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [8] {t} ptr mem)
+ // cond: t.Alignment()%8 == 0
+ // result: (MOVDstore ptr (MOVDconst [0]) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 {
+ break
+ }
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(t.Alignment()%8 == 0) {
+ break
+ }
+ v.reset(OpRISCV64MOVDstore)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+ // match: (Zero [8] {t} ptr mem)
+ // cond: t.Alignment()%4 == 0
+ // result: (MOVWstore [4] ptr (MOVDconst [0]) (MOVWstore ptr (MOVDconst [0]) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 {
+ break
+ }
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(t.Alignment()%4 == 0) {
+ break
+ }
+ v.reset(OpRISCV64MOVWstore)
+ v.AuxInt = int32ToAuxInt(4)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpRISCV64MOVWstore, types.TypeMem)
+ v1.AddArg3(ptr, v0, mem)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [8] {t} ptr mem)
+ // cond: t.Alignment()%2 == 0
+ // result: (MOVHstore [6] ptr (MOVDconst [0]) (MOVHstore [4] ptr (MOVDconst [0]) (MOVHstore [2] ptr (MOVDconst [0]) (MOVHstore ptr (MOVDconst [0]) mem))))
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 {
+ break
+ }
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(t.Alignment()%2 == 0) {
+ break
+ }
+ v.reset(OpRISCV64MOVHstore)
+ v.AuxInt = int32ToAuxInt(6)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpRISCV64MOVHstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(4)
+ v2 := b.NewValue0(v.Pos, OpRISCV64MOVHstore, types.TypeMem)
+ v2.AuxInt = int32ToAuxInt(2)
+ v3 := b.NewValue0(v.Pos, OpRISCV64MOVHstore, types.TypeMem)
+ v3.AddArg3(ptr, v0, mem)
+ v2.AddArg3(ptr, v0, v3)
+ v1.AddArg3(ptr, v0, v2)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [3] ptr mem)
+ // result: (MOVBstore [2] ptr (MOVDconst [0]) (MOVBstore [1] ptr (MOVDconst [0]) (MOVBstore ptr (MOVDconst [0]) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 3 {
+ break
+ }
+ ptr := v_0
+ mem := v_1
+ v.reset(OpRISCV64MOVBstore)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpRISCV64MOVBstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpRISCV64MOVBstore, types.TypeMem)
+ v2.AddArg3(ptr, v0, mem)
+ v1.AddArg3(ptr, v0, v2)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [6] {t} ptr mem)
+ // cond: t.Alignment()%2 == 0
+ // result: (MOVHstore [4] ptr (MOVDconst [0]) (MOVHstore [2] ptr (MOVDconst [0]) (MOVHstore ptr (MOVDconst [0]) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 6 {
+ break
+ }
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(t.Alignment()%2 == 0) {
+ break
+ }
+ v.reset(OpRISCV64MOVHstore)
+ v.AuxInt = int32ToAuxInt(4)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpRISCV64MOVHstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(2)
+ v2 := b.NewValue0(v.Pos, OpRISCV64MOVHstore, types.TypeMem)
+ v2.AddArg3(ptr, v0, mem)
+ v1.AddArg3(ptr, v0, v2)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [12] {t} ptr mem)
+ // cond: t.Alignment()%4 == 0
+ // result: (MOVWstore [8] ptr (MOVDconst [0]) (MOVWstore [4] ptr (MOVDconst [0]) (MOVWstore ptr (MOVDconst [0]) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 12 {
+ break
+ }
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(t.Alignment()%4 == 0) {
+ break
+ }
+ v.reset(OpRISCV64MOVWstore)
+ v.AuxInt = int32ToAuxInt(8)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpRISCV64MOVWstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(4)
+ v2 := b.NewValue0(v.Pos, OpRISCV64MOVWstore, types.TypeMem)
+ v2.AddArg3(ptr, v0, mem)
+ v1.AddArg3(ptr, v0, v2)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [16] {t} ptr mem)
+ // cond: t.Alignment()%8 == 0
+ // result: (MOVDstore [8] ptr (MOVDconst [0]) (MOVDstore ptr (MOVDconst [0]) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 16 {
+ break
+ }
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(t.Alignment()%8 == 0) {
+ break
+ }
+ v.reset(OpRISCV64MOVDstore)
+ v.AuxInt = int32ToAuxInt(8)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpRISCV64MOVDstore, types.TypeMem)
+ v1.AddArg3(ptr, v0, mem)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [24] {t} ptr mem)
+ // cond: t.Alignment()%8 == 0
+ // result: (MOVDstore [16] ptr (MOVDconst [0]) (MOVDstore [8] ptr (MOVDconst [0]) (MOVDstore ptr (MOVDconst [0]) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 24 {
+ break
+ }
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(t.Alignment()%8 == 0) {
+ break
+ }
+ v.reset(OpRISCV64MOVDstore)
+ v.AuxInt = int32ToAuxInt(16)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpRISCV64MOVDstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(8)
+ v2 := b.NewValue0(v.Pos, OpRISCV64MOVDstore, types.TypeMem)
+ v2.AddArg3(ptr, v0, mem)
+ v1.AddArg3(ptr, v0, v2)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [32] {t} ptr mem)
+ // cond: t.Alignment()%8 == 0
+ // result: (MOVDstore [24] ptr (MOVDconst [0]) (MOVDstore [16] ptr (MOVDconst [0]) (MOVDstore [8] ptr (MOVDconst [0]) (MOVDstore ptr (MOVDconst [0]) mem))))
+ for {
+ if auxIntToInt64(v.AuxInt) != 32 {
+ break
+ }
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(t.Alignment()%8 == 0) {
+ break
+ }
+ v.reset(OpRISCV64MOVDstore)
+ v.AuxInt = int32ToAuxInt(24)
+ v0 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpRISCV64MOVDstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(16)
+ v2 := b.NewValue0(v.Pos, OpRISCV64MOVDstore, types.TypeMem)
+ v2.AuxInt = int32ToAuxInt(8)
+ v3 := b.NewValue0(v.Pos, OpRISCV64MOVDstore, types.TypeMem)
+ v3.AddArg3(ptr, v0, mem)
+ v2.AddArg3(ptr, v0, v3)
+ v1.AddArg3(ptr, v0, v2)
+ v.AddArg3(ptr, v0, v1)
+ return true
+ }
+ // match: (Zero [s] {t} ptr mem)
+ // cond: s%8 == 0 && s <= 8*128 && t.Alignment()%8 == 0 && !config.noDuffDevice
+ // result: (DUFFZERO [8 * (128 - s/8)] ptr mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ if !(s%8 == 0 && s <= 8*128 && t.Alignment()%8 == 0 && !config.noDuffDevice) {
+ break
+ }
+ v.reset(OpRISCV64DUFFZERO)
+ v.AuxInt = int64ToAuxInt(8 * (128 - s/8))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Zero [s] {t} ptr mem)
+ // result: (LoweredZero [t.Alignment()] ptr (ADD <ptr.Type> ptr (MOVDconst [s-moveSize(t.Alignment(), config)])) mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ t := auxToType(v.Aux)
+ ptr := v_0
+ mem := v_1
+ v.reset(OpRISCV64LoweredZero)
+ v.AuxInt = int64ToAuxInt(t.Alignment())
+ v0 := b.NewValue0(v.Pos, OpRISCV64ADD, ptr.Type)
+ v1 := b.NewValue0(v.Pos, OpRISCV64MOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(s - moveSize(t.Alignment(), config))
+ v0.AddArg2(ptr, v1)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+}
+func rewriteBlockRISCV64(b *Block) bool {
+ typ := &b.Func.Config.Types
+ switch b.Kind {
+ case BlockRISCV64BEQ:
+ // match: (BEQ (MOVDconst [0]) cond yes no)
+ // result: (BEQZ cond yes no)
+ for b.Controls[0].Op == OpRISCV64MOVDconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ cond := b.Controls[1]
+ b.resetWithControl(BlockRISCV64BEQZ, cond)
+ return true
+ }
+ // match: (BEQ cond (MOVDconst [0]) yes no)
+ // result: (BEQZ cond yes no)
+ for b.Controls[1].Op == OpRISCV64MOVDconst {
+ cond := b.Controls[0]
+ v_1 := b.Controls[1]
+ if auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ b.resetWithControl(BlockRISCV64BEQZ, cond)
+ return true
+ }
+ case BlockRISCV64BEQZ:
+ // match: (BEQZ (SEQZ x) yes no)
+ // result: (BNEZ x yes no)
+ for b.Controls[0].Op == OpRISCV64SEQZ {
+ v_0 := b.Controls[0]
+ x := v_0.Args[0]
+ b.resetWithControl(BlockRISCV64BNEZ, x)
+ return true
+ }
+ // match: (BEQZ (SNEZ x) yes no)
+ // result: (BEQZ x yes no)
+ for b.Controls[0].Op == OpRISCV64SNEZ {
+ v_0 := b.Controls[0]
+ x := v_0.Args[0]
+ b.resetWithControl(BlockRISCV64BEQZ, x)
+ return true
+ }
+ // match: (BEQZ x:(NEG y) yes no)
+ // cond: x.Uses == 1
+ // result: (BEQZ y yes no)
+ for b.Controls[0].Op == OpRISCV64NEG {
+ x := b.Controls[0]
+ y := x.Args[0]
+ if !(x.Uses == 1) {
+ break
+ }
+ b.resetWithControl(BlockRISCV64BEQZ, y)
+ return true
+ }
+ // match: (BEQZ (SUB x y) yes no)
+ // result: (BEQ x y yes no)
+ for b.Controls[0].Op == OpRISCV64SUB {
+ v_0 := b.Controls[0]
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ b.resetWithControl2(BlockRISCV64BEQ, x, y)
+ return true
+ }
+ // match: (BEQZ (SLT x y) yes no)
+ // result: (BGE x y yes no)
+ for b.Controls[0].Op == OpRISCV64SLT {
+ v_0 := b.Controls[0]
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ b.resetWithControl2(BlockRISCV64BGE, x, y)
+ return true
+ }
+ // match: (BEQZ (SLTU x y) yes no)
+ // result: (BGEU x y yes no)
+ for b.Controls[0].Op == OpRISCV64SLTU {
+ v_0 := b.Controls[0]
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ b.resetWithControl2(BlockRISCV64BGEU, x, y)
+ return true
+ }
+ case BlockRISCV64BGE:
+ // match: (BGE (MOVDconst [0]) cond yes no)
+ // result: (BLEZ cond yes no)
+ for b.Controls[0].Op == OpRISCV64MOVDconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ cond := b.Controls[1]
+ b.resetWithControl(BlockRISCV64BLEZ, cond)
+ return true
+ }
+ // match: (BGE cond (MOVDconst [0]) yes no)
+ // result: (BGEZ cond yes no)
+ for b.Controls[1].Op == OpRISCV64MOVDconst {
+ cond := b.Controls[0]
+ v_1 := b.Controls[1]
+ if auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ b.resetWithControl(BlockRISCV64BGEZ, cond)
+ return true
+ }
+ case BlockRISCV64BLT:
+ // match: (BLT (MOVDconst [0]) cond yes no)
+ // result: (BGTZ cond yes no)
+ for b.Controls[0].Op == OpRISCV64MOVDconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ cond := b.Controls[1]
+ b.resetWithControl(BlockRISCV64BGTZ, cond)
+ return true
+ }
+ // match: (BLT cond (MOVDconst [0]) yes no)
+ // result: (BLTZ cond yes no)
+ for b.Controls[1].Op == OpRISCV64MOVDconst {
+ cond := b.Controls[0]
+ v_1 := b.Controls[1]
+ if auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ b.resetWithControl(BlockRISCV64BLTZ, cond)
+ return true
+ }
+ case BlockRISCV64BNE:
+ // match: (BNE (MOVDconst [0]) cond yes no)
+ // result: (BNEZ cond yes no)
+ for b.Controls[0].Op == OpRISCV64MOVDconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ cond := b.Controls[1]
+ b.resetWithControl(BlockRISCV64BNEZ, cond)
+ return true
+ }
+ // match: (BNE cond (MOVDconst [0]) yes no)
+ // result: (BNEZ cond yes no)
+ for b.Controls[1].Op == OpRISCV64MOVDconst {
+ cond := b.Controls[0]
+ v_1 := b.Controls[1]
+ if auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ b.resetWithControl(BlockRISCV64BNEZ, cond)
+ return true
+ }
+ case BlockRISCV64BNEZ:
+ // match: (BNEZ (SEQZ x) yes no)
+ // result: (BEQZ x yes no)
+ for b.Controls[0].Op == OpRISCV64SEQZ {
+ v_0 := b.Controls[0]
+ x := v_0.Args[0]
+ b.resetWithControl(BlockRISCV64BEQZ, x)
+ return true
+ }
+ // match: (BNEZ (SNEZ x) yes no)
+ // result: (BNEZ x yes no)
+ for b.Controls[0].Op == OpRISCV64SNEZ {
+ v_0 := b.Controls[0]
+ x := v_0.Args[0]
+ b.resetWithControl(BlockRISCV64BNEZ, x)
+ return true
+ }
+ // match: (BNEZ x:(NEG y) yes no)
+ // cond: x.Uses == 1
+ // result: (BNEZ y yes no)
+ for b.Controls[0].Op == OpRISCV64NEG {
+ x := b.Controls[0]
+ y := x.Args[0]
+ if !(x.Uses == 1) {
+ break
+ }
+ b.resetWithControl(BlockRISCV64BNEZ, y)
+ return true
+ }
+ // match: (BNEZ (SUB x y) yes no)
+ // result: (BNE x y yes no)
+ for b.Controls[0].Op == OpRISCV64SUB {
+ v_0 := b.Controls[0]
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ b.resetWithControl2(BlockRISCV64BNE, x, y)
+ return true
+ }
+ // match: (BNEZ (SLT x y) yes no)
+ // result: (BLT x y yes no)
+ for b.Controls[0].Op == OpRISCV64SLT {
+ v_0 := b.Controls[0]
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ b.resetWithControl2(BlockRISCV64BLT, x, y)
+ return true
+ }
+ // match: (BNEZ (SLTU x y) yes no)
+ // result: (BLTU x y yes no)
+ for b.Controls[0].Op == OpRISCV64SLTU {
+ v_0 := b.Controls[0]
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ b.resetWithControl2(BlockRISCV64BLTU, x, y)
+ return true
+ }
+ case BlockIf:
+ // match: (If cond yes no)
+ // result: (BNEZ (MOVBUreg <typ.UInt64> cond) yes no)
+ for {
+ cond := b.Controls[0]
+ v0 := b.NewValue0(cond.Pos, OpRISCV64MOVBUreg, typ.UInt64)
+ v0.AddArg(cond)
+ b.resetWithControl(BlockRISCV64BNEZ, v0)
+ return true
+ }
+ }
+ return false
+}
diff --git a/src/cmd/compile/internal/ssa/rewriteS390X.go b/src/cmd/compile/internal/ssa/rewriteS390X.go
new file mode 100644
index 0000000..0d63586
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/rewriteS390X.go
@@ -0,0 +1,17915 @@
+// Code generated from gen/S390X.rules; DO NOT EDIT.
+// generated with: cd gen; go run *.go
+
+package ssa
+
+import "math"
+import "cmd/compile/internal/types"
+import "cmd/internal/obj/s390x"
+
+func rewriteValueS390X(v *Value) bool {
+ switch v.Op {
+ case OpAdd16:
+ v.Op = OpS390XADDW
+ return true
+ case OpAdd32:
+ v.Op = OpS390XADDW
+ return true
+ case OpAdd32F:
+ return rewriteValueS390X_OpAdd32F(v)
+ case OpAdd64:
+ v.Op = OpS390XADD
+ return true
+ case OpAdd64F:
+ return rewriteValueS390X_OpAdd64F(v)
+ case OpAdd8:
+ v.Op = OpS390XADDW
+ return true
+ case OpAddPtr:
+ v.Op = OpS390XADD
+ return true
+ case OpAddr:
+ return rewriteValueS390X_OpAddr(v)
+ case OpAnd16:
+ v.Op = OpS390XANDW
+ return true
+ case OpAnd32:
+ v.Op = OpS390XANDW
+ return true
+ case OpAnd64:
+ v.Op = OpS390XAND
+ return true
+ case OpAnd8:
+ v.Op = OpS390XANDW
+ return true
+ case OpAndB:
+ v.Op = OpS390XANDW
+ return true
+ case OpAtomicAdd32:
+ return rewriteValueS390X_OpAtomicAdd32(v)
+ case OpAtomicAdd64:
+ return rewriteValueS390X_OpAtomicAdd64(v)
+ case OpAtomicAnd32:
+ v.Op = OpS390XLAN
+ return true
+ case OpAtomicAnd8:
+ return rewriteValueS390X_OpAtomicAnd8(v)
+ case OpAtomicCompareAndSwap32:
+ return rewriteValueS390X_OpAtomicCompareAndSwap32(v)
+ case OpAtomicCompareAndSwap64:
+ return rewriteValueS390X_OpAtomicCompareAndSwap64(v)
+ case OpAtomicExchange32:
+ return rewriteValueS390X_OpAtomicExchange32(v)
+ case OpAtomicExchange64:
+ return rewriteValueS390X_OpAtomicExchange64(v)
+ case OpAtomicLoad32:
+ return rewriteValueS390X_OpAtomicLoad32(v)
+ case OpAtomicLoad64:
+ return rewriteValueS390X_OpAtomicLoad64(v)
+ case OpAtomicLoad8:
+ return rewriteValueS390X_OpAtomicLoad8(v)
+ case OpAtomicLoadAcq32:
+ return rewriteValueS390X_OpAtomicLoadAcq32(v)
+ case OpAtomicLoadPtr:
+ return rewriteValueS390X_OpAtomicLoadPtr(v)
+ case OpAtomicOr32:
+ v.Op = OpS390XLAO
+ return true
+ case OpAtomicOr8:
+ return rewriteValueS390X_OpAtomicOr8(v)
+ case OpAtomicStore32:
+ return rewriteValueS390X_OpAtomicStore32(v)
+ case OpAtomicStore64:
+ return rewriteValueS390X_OpAtomicStore64(v)
+ case OpAtomicStore8:
+ return rewriteValueS390X_OpAtomicStore8(v)
+ case OpAtomicStorePtrNoWB:
+ return rewriteValueS390X_OpAtomicStorePtrNoWB(v)
+ case OpAtomicStoreRel32:
+ return rewriteValueS390X_OpAtomicStoreRel32(v)
+ case OpAvg64u:
+ return rewriteValueS390X_OpAvg64u(v)
+ case OpBitLen64:
+ return rewriteValueS390X_OpBitLen64(v)
+ case OpBswap32:
+ v.Op = OpS390XMOVWBR
+ return true
+ case OpBswap64:
+ v.Op = OpS390XMOVDBR
+ return true
+ case OpCeil:
+ return rewriteValueS390X_OpCeil(v)
+ case OpClosureCall:
+ v.Op = OpS390XCALLclosure
+ return true
+ case OpCom16:
+ v.Op = OpS390XNOTW
+ return true
+ case OpCom32:
+ v.Op = OpS390XNOTW
+ return true
+ case OpCom64:
+ v.Op = OpS390XNOT
+ return true
+ case OpCom8:
+ v.Op = OpS390XNOTW
+ return true
+ case OpConst16:
+ return rewriteValueS390X_OpConst16(v)
+ case OpConst32:
+ return rewriteValueS390X_OpConst32(v)
+ case OpConst32F:
+ v.Op = OpS390XFMOVSconst
+ return true
+ case OpConst64:
+ return rewriteValueS390X_OpConst64(v)
+ case OpConst64F:
+ v.Op = OpS390XFMOVDconst
+ return true
+ case OpConst8:
+ return rewriteValueS390X_OpConst8(v)
+ case OpConstBool:
+ return rewriteValueS390X_OpConstBool(v)
+ case OpConstNil:
+ return rewriteValueS390X_OpConstNil(v)
+ case OpCtz32:
+ return rewriteValueS390X_OpCtz32(v)
+ case OpCtz32NonZero:
+ v.Op = OpCtz32
+ return true
+ case OpCtz64:
+ return rewriteValueS390X_OpCtz64(v)
+ case OpCtz64NonZero:
+ v.Op = OpCtz64
+ return true
+ case OpCvt32Fto32:
+ v.Op = OpS390XCFEBRA
+ return true
+ case OpCvt32Fto32U:
+ v.Op = OpS390XCLFEBR
+ return true
+ case OpCvt32Fto64:
+ v.Op = OpS390XCGEBRA
+ return true
+ case OpCvt32Fto64F:
+ v.Op = OpS390XLDEBR
+ return true
+ case OpCvt32Fto64U:
+ v.Op = OpS390XCLGEBR
+ return true
+ case OpCvt32Uto32F:
+ v.Op = OpS390XCELFBR
+ return true
+ case OpCvt32Uto64F:
+ v.Op = OpS390XCDLFBR
+ return true
+ case OpCvt32to32F:
+ v.Op = OpS390XCEFBRA
+ return true
+ case OpCvt32to64F:
+ v.Op = OpS390XCDFBRA
+ return true
+ case OpCvt64Fto32:
+ v.Op = OpS390XCFDBRA
+ return true
+ case OpCvt64Fto32F:
+ v.Op = OpS390XLEDBR
+ return true
+ case OpCvt64Fto32U:
+ v.Op = OpS390XCLFDBR
+ return true
+ case OpCvt64Fto64:
+ v.Op = OpS390XCGDBRA
+ return true
+ case OpCvt64Fto64U:
+ v.Op = OpS390XCLGDBR
+ return true
+ case OpCvt64Uto32F:
+ v.Op = OpS390XCELGBR
+ return true
+ case OpCvt64Uto64F:
+ v.Op = OpS390XCDLGBR
+ return true
+ case OpCvt64to32F:
+ v.Op = OpS390XCEGBRA
+ return true
+ case OpCvt64to64F:
+ v.Op = OpS390XCDGBRA
+ return true
+ case OpCvtBoolToUint8:
+ v.Op = OpCopy
+ return true
+ case OpDiv16:
+ return rewriteValueS390X_OpDiv16(v)
+ case OpDiv16u:
+ return rewriteValueS390X_OpDiv16u(v)
+ case OpDiv32:
+ return rewriteValueS390X_OpDiv32(v)
+ case OpDiv32F:
+ v.Op = OpS390XFDIVS
+ return true
+ case OpDiv32u:
+ return rewriteValueS390X_OpDiv32u(v)
+ case OpDiv64:
+ return rewriteValueS390X_OpDiv64(v)
+ case OpDiv64F:
+ v.Op = OpS390XFDIV
+ return true
+ case OpDiv64u:
+ v.Op = OpS390XDIVDU
+ return true
+ case OpDiv8:
+ return rewriteValueS390X_OpDiv8(v)
+ case OpDiv8u:
+ return rewriteValueS390X_OpDiv8u(v)
+ case OpEq16:
+ return rewriteValueS390X_OpEq16(v)
+ case OpEq32:
+ return rewriteValueS390X_OpEq32(v)
+ case OpEq32F:
+ return rewriteValueS390X_OpEq32F(v)
+ case OpEq64:
+ return rewriteValueS390X_OpEq64(v)
+ case OpEq64F:
+ return rewriteValueS390X_OpEq64F(v)
+ case OpEq8:
+ return rewriteValueS390X_OpEq8(v)
+ case OpEqB:
+ return rewriteValueS390X_OpEqB(v)
+ case OpEqPtr:
+ return rewriteValueS390X_OpEqPtr(v)
+ case OpFMA:
+ return rewriteValueS390X_OpFMA(v)
+ case OpFloor:
+ return rewriteValueS390X_OpFloor(v)
+ case OpGetCallerPC:
+ v.Op = OpS390XLoweredGetCallerPC
+ return true
+ case OpGetCallerSP:
+ v.Op = OpS390XLoweredGetCallerSP
+ return true
+ case OpGetClosurePtr:
+ v.Op = OpS390XLoweredGetClosurePtr
+ return true
+ case OpGetG:
+ v.Op = OpS390XLoweredGetG
+ return true
+ case OpHmul32:
+ return rewriteValueS390X_OpHmul32(v)
+ case OpHmul32u:
+ return rewriteValueS390X_OpHmul32u(v)
+ case OpHmul64:
+ v.Op = OpS390XMULHD
+ return true
+ case OpHmul64u:
+ v.Op = OpS390XMULHDU
+ return true
+ case OpITab:
+ return rewriteValueS390X_OpITab(v)
+ case OpInterCall:
+ v.Op = OpS390XCALLinter
+ return true
+ case OpIsInBounds:
+ return rewriteValueS390X_OpIsInBounds(v)
+ case OpIsNonNil:
+ return rewriteValueS390X_OpIsNonNil(v)
+ case OpIsSliceInBounds:
+ return rewriteValueS390X_OpIsSliceInBounds(v)
+ case OpLeq16:
+ return rewriteValueS390X_OpLeq16(v)
+ case OpLeq16U:
+ return rewriteValueS390X_OpLeq16U(v)
+ case OpLeq32:
+ return rewriteValueS390X_OpLeq32(v)
+ case OpLeq32F:
+ return rewriteValueS390X_OpLeq32F(v)
+ case OpLeq32U:
+ return rewriteValueS390X_OpLeq32U(v)
+ case OpLeq64:
+ return rewriteValueS390X_OpLeq64(v)
+ case OpLeq64F:
+ return rewriteValueS390X_OpLeq64F(v)
+ case OpLeq64U:
+ return rewriteValueS390X_OpLeq64U(v)
+ case OpLeq8:
+ return rewriteValueS390X_OpLeq8(v)
+ case OpLeq8U:
+ return rewriteValueS390X_OpLeq8U(v)
+ case OpLess16:
+ return rewriteValueS390X_OpLess16(v)
+ case OpLess16U:
+ return rewriteValueS390X_OpLess16U(v)
+ case OpLess32:
+ return rewriteValueS390X_OpLess32(v)
+ case OpLess32F:
+ return rewriteValueS390X_OpLess32F(v)
+ case OpLess32U:
+ return rewriteValueS390X_OpLess32U(v)
+ case OpLess64:
+ return rewriteValueS390X_OpLess64(v)
+ case OpLess64F:
+ return rewriteValueS390X_OpLess64F(v)
+ case OpLess64U:
+ return rewriteValueS390X_OpLess64U(v)
+ case OpLess8:
+ return rewriteValueS390X_OpLess8(v)
+ case OpLess8U:
+ return rewriteValueS390X_OpLess8U(v)
+ case OpLoad:
+ return rewriteValueS390X_OpLoad(v)
+ case OpLocalAddr:
+ return rewriteValueS390X_OpLocalAddr(v)
+ case OpLsh16x16:
+ return rewriteValueS390X_OpLsh16x16(v)
+ case OpLsh16x32:
+ return rewriteValueS390X_OpLsh16x32(v)
+ case OpLsh16x64:
+ return rewriteValueS390X_OpLsh16x64(v)
+ case OpLsh16x8:
+ return rewriteValueS390X_OpLsh16x8(v)
+ case OpLsh32x16:
+ return rewriteValueS390X_OpLsh32x16(v)
+ case OpLsh32x32:
+ return rewriteValueS390X_OpLsh32x32(v)
+ case OpLsh32x64:
+ return rewriteValueS390X_OpLsh32x64(v)
+ case OpLsh32x8:
+ return rewriteValueS390X_OpLsh32x8(v)
+ case OpLsh64x16:
+ return rewriteValueS390X_OpLsh64x16(v)
+ case OpLsh64x32:
+ return rewriteValueS390X_OpLsh64x32(v)
+ case OpLsh64x64:
+ return rewriteValueS390X_OpLsh64x64(v)
+ case OpLsh64x8:
+ return rewriteValueS390X_OpLsh64x8(v)
+ case OpLsh8x16:
+ return rewriteValueS390X_OpLsh8x16(v)
+ case OpLsh8x32:
+ return rewriteValueS390X_OpLsh8x32(v)
+ case OpLsh8x64:
+ return rewriteValueS390X_OpLsh8x64(v)
+ case OpLsh8x8:
+ return rewriteValueS390X_OpLsh8x8(v)
+ case OpMod16:
+ return rewriteValueS390X_OpMod16(v)
+ case OpMod16u:
+ return rewriteValueS390X_OpMod16u(v)
+ case OpMod32:
+ return rewriteValueS390X_OpMod32(v)
+ case OpMod32u:
+ return rewriteValueS390X_OpMod32u(v)
+ case OpMod64:
+ return rewriteValueS390X_OpMod64(v)
+ case OpMod64u:
+ v.Op = OpS390XMODDU
+ return true
+ case OpMod8:
+ return rewriteValueS390X_OpMod8(v)
+ case OpMod8u:
+ return rewriteValueS390X_OpMod8u(v)
+ case OpMove:
+ return rewriteValueS390X_OpMove(v)
+ case OpMul16:
+ v.Op = OpS390XMULLW
+ return true
+ case OpMul32:
+ v.Op = OpS390XMULLW
+ return true
+ case OpMul32F:
+ v.Op = OpS390XFMULS
+ return true
+ case OpMul64:
+ v.Op = OpS390XMULLD
+ return true
+ case OpMul64F:
+ v.Op = OpS390XFMUL
+ return true
+ case OpMul64uhilo:
+ v.Op = OpS390XMLGR
+ return true
+ case OpMul8:
+ v.Op = OpS390XMULLW
+ return true
+ case OpNeg16:
+ v.Op = OpS390XNEGW
+ return true
+ case OpNeg32:
+ v.Op = OpS390XNEGW
+ return true
+ case OpNeg32F:
+ v.Op = OpS390XFNEGS
+ return true
+ case OpNeg64:
+ v.Op = OpS390XNEG
+ return true
+ case OpNeg64F:
+ v.Op = OpS390XFNEG
+ return true
+ case OpNeg8:
+ v.Op = OpS390XNEGW
+ return true
+ case OpNeq16:
+ return rewriteValueS390X_OpNeq16(v)
+ case OpNeq32:
+ return rewriteValueS390X_OpNeq32(v)
+ case OpNeq32F:
+ return rewriteValueS390X_OpNeq32F(v)
+ case OpNeq64:
+ return rewriteValueS390X_OpNeq64(v)
+ case OpNeq64F:
+ return rewriteValueS390X_OpNeq64F(v)
+ case OpNeq8:
+ return rewriteValueS390X_OpNeq8(v)
+ case OpNeqB:
+ return rewriteValueS390X_OpNeqB(v)
+ case OpNeqPtr:
+ return rewriteValueS390X_OpNeqPtr(v)
+ case OpNilCheck:
+ v.Op = OpS390XLoweredNilCheck
+ return true
+ case OpNot:
+ return rewriteValueS390X_OpNot(v)
+ case OpOffPtr:
+ return rewriteValueS390X_OpOffPtr(v)
+ case OpOr16:
+ v.Op = OpS390XORW
+ return true
+ case OpOr32:
+ v.Op = OpS390XORW
+ return true
+ case OpOr64:
+ v.Op = OpS390XOR
+ return true
+ case OpOr8:
+ v.Op = OpS390XORW
+ return true
+ case OpOrB:
+ v.Op = OpS390XORW
+ return true
+ case OpPanicBounds:
+ return rewriteValueS390X_OpPanicBounds(v)
+ case OpPopCount16:
+ return rewriteValueS390X_OpPopCount16(v)
+ case OpPopCount32:
+ return rewriteValueS390X_OpPopCount32(v)
+ case OpPopCount64:
+ return rewriteValueS390X_OpPopCount64(v)
+ case OpPopCount8:
+ return rewriteValueS390X_OpPopCount8(v)
+ case OpRotateLeft16:
+ return rewriteValueS390X_OpRotateLeft16(v)
+ case OpRotateLeft32:
+ v.Op = OpS390XRLL
+ return true
+ case OpRotateLeft64:
+ v.Op = OpS390XRLLG
+ return true
+ case OpRotateLeft8:
+ return rewriteValueS390X_OpRotateLeft8(v)
+ case OpRound:
+ return rewriteValueS390X_OpRound(v)
+ case OpRound32F:
+ v.Op = OpS390XLoweredRound32F
+ return true
+ case OpRound64F:
+ v.Op = OpS390XLoweredRound64F
+ return true
+ case OpRoundToEven:
+ return rewriteValueS390X_OpRoundToEven(v)
+ case OpRsh16Ux16:
+ return rewriteValueS390X_OpRsh16Ux16(v)
+ case OpRsh16Ux32:
+ return rewriteValueS390X_OpRsh16Ux32(v)
+ case OpRsh16Ux64:
+ return rewriteValueS390X_OpRsh16Ux64(v)
+ case OpRsh16Ux8:
+ return rewriteValueS390X_OpRsh16Ux8(v)
+ case OpRsh16x16:
+ return rewriteValueS390X_OpRsh16x16(v)
+ case OpRsh16x32:
+ return rewriteValueS390X_OpRsh16x32(v)
+ case OpRsh16x64:
+ return rewriteValueS390X_OpRsh16x64(v)
+ case OpRsh16x8:
+ return rewriteValueS390X_OpRsh16x8(v)
+ case OpRsh32Ux16:
+ return rewriteValueS390X_OpRsh32Ux16(v)
+ case OpRsh32Ux32:
+ return rewriteValueS390X_OpRsh32Ux32(v)
+ case OpRsh32Ux64:
+ return rewriteValueS390X_OpRsh32Ux64(v)
+ case OpRsh32Ux8:
+ return rewriteValueS390X_OpRsh32Ux8(v)
+ case OpRsh32x16:
+ return rewriteValueS390X_OpRsh32x16(v)
+ case OpRsh32x32:
+ return rewriteValueS390X_OpRsh32x32(v)
+ case OpRsh32x64:
+ return rewriteValueS390X_OpRsh32x64(v)
+ case OpRsh32x8:
+ return rewriteValueS390X_OpRsh32x8(v)
+ case OpRsh64Ux16:
+ return rewriteValueS390X_OpRsh64Ux16(v)
+ case OpRsh64Ux32:
+ return rewriteValueS390X_OpRsh64Ux32(v)
+ case OpRsh64Ux64:
+ return rewriteValueS390X_OpRsh64Ux64(v)
+ case OpRsh64Ux8:
+ return rewriteValueS390X_OpRsh64Ux8(v)
+ case OpRsh64x16:
+ return rewriteValueS390X_OpRsh64x16(v)
+ case OpRsh64x32:
+ return rewriteValueS390X_OpRsh64x32(v)
+ case OpRsh64x64:
+ return rewriteValueS390X_OpRsh64x64(v)
+ case OpRsh64x8:
+ return rewriteValueS390X_OpRsh64x8(v)
+ case OpRsh8Ux16:
+ return rewriteValueS390X_OpRsh8Ux16(v)
+ case OpRsh8Ux32:
+ return rewriteValueS390X_OpRsh8Ux32(v)
+ case OpRsh8Ux64:
+ return rewriteValueS390X_OpRsh8Ux64(v)
+ case OpRsh8Ux8:
+ return rewriteValueS390X_OpRsh8Ux8(v)
+ case OpRsh8x16:
+ return rewriteValueS390X_OpRsh8x16(v)
+ case OpRsh8x32:
+ return rewriteValueS390X_OpRsh8x32(v)
+ case OpRsh8x64:
+ return rewriteValueS390X_OpRsh8x64(v)
+ case OpRsh8x8:
+ return rewriteValueS390X_OpRsh8x8(v)
+ case OpS390XADD:
+ return rewriteValueS390X_OpS390XADD(v)
+ case OpS390XADDC:
+ return rewriteValueS390X_OpS390XADDC(v)
+ case OpS390XADDE:
+ return rewriteValueS390X_OpS390XADDE(v)
+ case OpS390XADDW:
+ return rewriteValueS390X_OpS390XADDW(v)
+ case OpS390XADDWconst:
+ return rewriteValueS390X_OpS390XADDWconst(v)
+ case OpS390XADDWload:
+ return rewriteValueS390X_OpS390XADDWload(v)
+ case OpS390XADDconst:
+ return rewriteValueS390X_OpS390XADDconst(v)
+ case OpS390XADDload:
+ return rewriteValueS390X_OpS390XADDload(v)
+ case OpS390XAND:
+ return rewriteValueS390X_OpS390XAND(v)
+ case OpS390XANDW:
+ return rewriteValueS390X_OpS390XANDW(v)
+ case OpS390XANDWconst:
+ return rewriteValueS390X_OpS390XANDWconst(v)
+ case OpS390XANDWload:
+ return rewriteValueS390X_OpS390XANDWload(v)
+ case OpS390XANDconst:
+ return rewriteValueS390X_OpS390XANDconst(v)
+ case OpS390XANDload:
+ return rewriteValueS390X_OpS390XANDload(v)
+ case OpS390XCMP:
+ return rewriteValueS390X_OpS390XCMP(v)
+ case OpS390XCMPU:
+ return rewriteValueS390X_OpS390XCMPU(v)
+ case OpS390XCMPUconst:
+ return rewriteValueS390X_OpS390XCMPUconst(v)
+ case OpS390XCMPW:
+ return rewriteValueS390X_OpS390XCMPW(v)
+ case OpS390XCMPWU:
+ return rewriteValueS390X_OpS390XCMPWU(v)
+ case OpS390XCMPWUconst:
+ return rewriteValueS390X_OpS390XCMPWUconst(v)
+ case OpS390XCMPWconst:
+ return rewriteValueS390X_OpS390XCMPWconst(v)
+ case OpS390XCMPconst:
+ return rewriteValueS390X_OpS390XCMPconst(v)
+ case OpS390XCPSDR:
+ return rewriteValueS390X_OpS390XCPSDR(v)
+ case OpS390XFCMP:
+ return rewriteValueS390X_OpS390XFCMP(v)
+ case OpS390XFCMPS:
+ return rewriteValueS390X_OpS390XFCMPS(v)
+ case OpS390XFMOVDload:
+ return rewriteValueS390X_OpS390XFMOVDload(v)
+ case OpS390XFMOVDstore:
+ return rewriteValueS390X_OpS390XFMOVDstore(v)
+ case OpS390XFMOVSload:
+ return rewriteValueS390X_OpS390XFMOVSload(v)
+ case OpS390XFMOVSstore:
+ return rewriteValueS390X_OpS390XFMOVSstore(v)
+ case OpS390XFNEG:
+ return rewriteValueS390X_OpS390XFNEG(v)
+ case OpS390XFNEGS:
+ return rewriteValueS390X_OpS390XFNEGS(v)
+ case OpS390XLDGR:
+ return rewriteValueS390X_OpS390XLDGR(v)
+ case OpS390XLEDBR:
+ return rewriteValueS390X_OpS390XLEDBR(v)
+ case OpS390XLGDR:
+ return rewriteValueS390X_OpS390XLGDR(v)
+ case OpS390XLOCGR:
+ return rewriteValueS390X_OpS390XLOCGR(v)
+ case OpS390XLTDBR:
+ return rewriteValueS390X_OpS390XLTDBR(v)
+ case OpS390XLTEBR:
+ return rewriteValueS390X_OpS390XLTEBR(v)
+ case OpS390XLoweredRound32F:
+ return rewriteValueS390X_OpS390XLoweredRound32F(v)
+ case OpS390XLoweredRound64F:
+ return rewriteValueS390X_OpS390XLoweredRound64F(v)
+ case OpS390XMOVBZload:
+ return rewriteValueS390X_OpS390XMOVBZload(v)
+ case OpS390XMOVBZreg:
+ return rewriteValueS390X_OpS390XMOVBZreg(v)
+ case OpS390XMOVBload:
+ return rewriteValueS390X_OpS390XMOVBload(v)
+ case OpS390XMOVBreg:
+ return rewriteValueS390X_OpS390XMOVBreg(v)
+ case OpS390XMOVBstore:
+ return rewriteValueS390X_OpS390XMOVBstore(v)
+ case OpS390XMOVBstoreconst:
+ return rewriteValueS390X_OpS390XMOVBstoreconst(v)
+ case OpS390XMOVDaddridx:
+ return rewriteValueS390X_OpS390XMOVDaddridx(v)
+ case OpS390XMOVDload:
+ return rewriteValueS390X_OpS390XMOVDload(v)
+ case OpS390XMOVDstore:
+ return rewriteValueS390X_OpS390XMOVDstore(v)
+ case OpS390XMOVDstoreconst:
+ return rewriteValueS390X_OpS390XMOVDstoreconst(v)
+ case OpS390XMOVHBRstore:
+ return rewriteValueS390X_OpS390XMOVHBRstore(v)
+ case OpS390XMOVHZload:
+ return rewriteValueS390X_OpS390XMOVHZload(v)
+ case OpS390XMOVHZreg:
+ return rewriteValueS390X_OpS390XMOVHZreg(v)
+ case OpS390XMOVHload:
+ return rewriteValueS390X_OpS390XMOVHload(v)
+ case OpS390XMOVHreg:
+ return rewriteValueS390X_OpS390XMOVHreg(v)
+ case OpS390XMOVHstore:
+ return rewriteValueS390X_OpS390XMOVHstore(v)
+ case OpS390XMOVHstoreconst:
+ return rewriteValueS390X_OpS390XMOVHstoreconst(v)
+ case OpS390XMOVWBRstore:
+ return rewriteValueS390X_OpS390XMOVWBRstore(v)
+ case OpS390XMOVWZload:
+ return rewriteValueS390X_OpS390XMOVWZload(v)
+ case OpS390XMOVWZreg:
+ return rewriteValueS390X_OpS390XMOVWZreg(v)
+ case OpS390XMOVWload:
+ return rewriteValueS390X_OpS390XMOVWload(v)
+ case OpS390XMOVWreg:
+ return rewriteValueS390X_OpS390XMOVWreg(v)
+ case OpS390XMOVWstore:
+ return rewriteValueS390X_OpS390XMOVWstore(v)
+ case OpS390XMOVWstoreconst:
+ return rewriteValueS390X_OpS390XMOVWstoreconst(v)
+ case OpS390XMULLD:
+ return rewriteValueS390X_OpS390XMULLD(v)
+ case OpS390XMULLDconst:
+ return rewriteValueS390X_OpS390XMULLDconst(v)
+ case OpS390XMULLDload:
+ return rewriteValueS390X_OpS390XMULLDload(v)
+ case OpS390XMULLW:
+ return rewriteValueS390X_OpS390XMULLW(v)
+ case OpS390XMULLWconst:
+ return rewriteValueS390X_OpS390XMULLWconst(v)
+ case OpS390XMULLWload:
+ return rewriteValueS390X_OpS390XMULLWload(v)
+ case OpS390XNEG:
+ return rewriteValueS390X_OpS390XNEG(v)
+ case OpS390XNEGW:
+ return rewriteValueS390X_OpS390XNEGW(v)
+ case OpS390XNOT:
+ return rewriteValueS390X_OpS390XNOT(v)
+ case OpS390XNOTW:
+ return rewriteValueS390X_OpS390XNOTW(v)
+ case OpS390XOR:
+ return rewriteValueS390X_OpS390XOR(v)
+ case OpS390XORW:
+ return rewriteValueS390X_OpS390XORW(v)
+ case OpS390XORWconst:
+ return rewriteValueS390X_OpS390XORWconst(v)
+ case OpS390XORWload:
+ return rewriteValueS390X_OpS390XORWload(v)
+ case OpS390XORconst:
+ return rewriteValueS390X_OpS390XORconst(v)
+ case OpS390XORload:
+ return rewriteValueS390X_OpS390XORload(v)
+ case OpS390XRISBGZ:
+ return rewriteValueS390X_OpS390XRISBGZ(v)
+ case OpS390XRLL:
+ return rewriteValueS390X_OpS390XRLL(v)
+ case OpS390XRLLG:
+ return rewriteValueS390X_OpS390XRLLG(v)
+ case OpS390XSLD:
+ return rewriteValueS390X_OpS390XSLD(v)
+ case OpS390XSLDconst:
+ return rewriteValueS390X_OpS390XSLDconst(v)
+ case OpS390XSLW:
+ return rewriteValueS390X_OpS390XSLW(v)
+ case OpS390XSLWconst:
+ return rewriteValueS390X_OpS390XSLWconst(v)
+ case OpS390XSRAD:
+ return rewriteValueS390X_OpS390XSRAD(v)
+ case OpS390XSRADconst:
+ return rewriteValueS390X_OpS390XSRADconst(v)
+ case OpS390XSRAW:
+ return rewriteValueS390X_OpS390XSRAW(v)
+ case OpS390XSRAWconst:
+ return rewriteValueS390X_OpS390XSRAWconst(v)
+ case OpS390XSRD:
+ return rewriteValueS390X_OpS390XSRD(v)
+ case OpS390XSRDconst:
+ return rewriteValueS390X_OpS390XSRDconst(v)
+ case OpS390XSRW:
+ return rewriteValueS390X_OpS390XSRW(v)
+ case OpS390XSRWconst:
+ return rewriteValueS390X_OpS390XSRWconst(v)
+ case OpS390XSTM2:
+ return rewriteValueS390X_OpS390XSTM2(v)
+ case OpS390XSTMG2:
+ return rewriteValueS390X_OpS390XSTMG2(v)
+ case OpS390XSUB:
+ return rewriteValueS390X_OpS390XSUB(v)
+ case OpS390XSUBE:
+ return rewriteValueS390X_OpS390XSUBE(v)
+ case OpS390XSUBW:
+ return rewriteValueS390X_OpS390XSUBW(v)
+ case OpS390XSUBWconst:
+ return rewriteValueS390X_OpS390XSUBWconst(v)
+ case OpS390XSUBWload:
+ return rewriteValueS390X_OpS390XSUBWload(v)
+ case OpS390XSUBconst:
+ return rewriteValueS390X_OpS390XSUBconst(v)
+ case OpS390XSUBload:
+ return rewriteValueS390X_OpS390XSUBload(v)
+ case OpS390XSumBytes2:
+ return rewriteValueS390X_OpS390XSumBytes2(v)
+ case OpS390XSumBytes4:
+ return rewriteValueS390X_OpS390XSumBytes4(v)
+ case OpS390XSumBytes8:
+ return rewriteValueS390X_OpS390XSumBytes8(v)
+ case OpS390XXOR:
+ return rewriteValueS390X_OpS390XXOR(v)
+ case OpS390XXORW:
+ return rewriteValueS390X_OpS390XXORW(v)
+ case OpS390XXORWconst:
+ return rewriteValueS390X_OpS390XXORWconst(v)
+ case OpS390XXORWload:
+ return rewriteValueS390X_OpS390XXORWload(v)
+ case OpS390XXORconst:
+ return rewriteValueS390X_OpS390XXORconst(v)
+ case OpS390XXORload:
+ return rewriteValueS390X_OpS390XXORload(v)
+ case OpSelect0:
+ return rewriteValueS390X_OpSelect0(v)
+ case OpSelect1:
+ return rewriteValueS390X_OpSelect1(v)
+ case OpSignExt16to32:
+ v.Op = OpS390XMOVHreg
+ return true
+ case OpSignExt16to64:
+ v.Op = OpS390XMOVHreg
+ return true
+ case OpSignExt32to64:
+ v.Op = OpS390XMOVWreg
+ return true
+ case OpSignExt8to16:
+ v.Op = OpS390XMOVBreg
+ return true
+ case OpSignExt8to32:
+ v.Op = OpS390XMOVBreg
+ return true
+ case OpSignExt8to64:
+ v.Op = OpS390XMOVBreg
+ return true
+ case OpSlicemask:
+ return rewriteValueS390X_OpSlicemask(v)
+ case OpSqrt:
+ v.Op = OpS390XFSQRT
+ return true
+ case OpSqrt32:
+ v.Op = OpS390XFSQRTS
+ return true
+ case OpStaticCall:
+ v.Op = OpS390XCALLstatic
+ return true
+ case OpStore:
+ return rewriteValueS390X_OpStore(v)
+ case OpSub16:
+ v.Op = OpS390XSUBW
+ return true
+ case OpSub32:
+ v.Op = OpS390XSUBW
+ return true
+ case OpSub32F:
+ return rewriteValueS390X_OpSub32F(v)
+ case OpSub64:
+ v.Op = OpS390XSUB
+ return true
+ case OpSub64F:
+ return rewriteValueS390X_OpSub64F(v)
+ case OpSub8:
+ v.Op = OpS390XSUBW
+ return true
+ case OpSubPtr:
+ v.Op = OpS390XSUB
+ return true
+ case OpTailCall:
+ v.Op = OpS390XCALLtail
+ return true
+ case OpTrunc:
+ return rewriteValueS390X_OpTrunc(v)
+ case OpTrunc16to8:
+ v.Op = OpCopy
+ return true
+ case OpTrunc32to16:
+ v.Op = OpCopy
+ return true
+ case OpTrunc32to8:
+ v.Op = OpCopy
+ return true
+ case OpTrunc64to16:
+ v.Op = OpCopy
+ return true
+ case OpTrunc64to32:
+ v.Op = OpCopy
+ return true
+ case OpTrunc64to8:
+ v.Op = OpCopy
+ return true
+ case OpWB:
+ v.Op = OpS390XLoweredWB
+ return true
+ case OpXor16:
+ v.Op = OpS390XXORW
+ return true
+ case OpXor32:
+ v.Op = OpS390XXORW
+ return true
+ case OpXor64:
+ v.Op = OpS390XXOR
+ return true
+ case OpXor8:
+ v.Op = OpS390XXORW
+ return true
+ case OpZero:
+ return rewriteValueS390X_OpZero(v)
+ case OpZeroExt16to32:
+ v.Op = OpS390XMOVHZreg
+ return true
+ case OpZeroExt16to64:
+ v.Op = OpS390XMOVHZreg
+ return true
+ case OpZeroExt32to64:
+ v.Op = OpS390XMOVWZreg
+ return true
+ case OpZeroExt8to16:
+ v.Op = OpS390XMOVBZreg
+ return true
+ case OpZeroExt8to32:
+ v.Op = OpS390XMOVBZreg
+ return true
+ case OpZeroExt8to64:
+ v.Op = OpS390XMOVBZreg
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpAdd32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Add32F x y)
+ // result: (Select0 (FADDS x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpS390XFADDS, types.NewTuple(typ.Float32, types.TypeFlags))
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueS390X_OpAdd64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Add64F x y)
+ // result: (Select0 (FADD x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpS390XFADD, types.NewTuple(typ.Float64, types.TypeFlags))
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueS390X_OpAddr(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Addr {sym} base)
+ // result: (MOVDaddr {sym} base)
+ for {
+ sym := auxToSym(v.Aux)
+ base := v_0
+ v.reset(OpS390XMOVDaddr)
+ v.Aux = symToAux(sym)
+ v.AddArg(base)
+ return true
+ }
+}
+func rewriteValueS390X_OpAtomicAdd32(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (AtomicAdd32 ptr val mem)
+ // result: (AddTupleFirst32 val (LAA ptr val mem))
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpS390XAddTupleFirst32)
+ v0 := b.NewValue0(v.Pos, OpS390XLAA, types.NewTuple(typ.UInt32, types.TypeMem))
+ v0.AddArg3(ptr, val, mem)
+ v.AddArg2(val, v0)
+ return true
+ }
+}
+func rewriteValueS390X_OpAtomicAdd64(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (AtomicAdd64 ptr val mem)
+ // result: (AddTupleFirst64 val (LAAG ptr val mem))
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpS390XAddTupleFirst64)
+ v0 := b.NewValue0(v.Pos, OpS390XLAAG, types.NewTuple(typ.UInt64, types.TypeMem))
+ v0.AddArg3(ptr, val, mem)
+ v.AddArg2(val, v0)
+ return true
+ }
+}
+func rewriteValueS390X_OpAtomicAnd8(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (AtomicAnd8 ptr val mem)
+ // result: (LANfloor ptr (RLL <typ.UInt32> (ORWconst <typ.UInt32> val [-1<<8]) (RXSBG <typ.UInt32> {s390x.NewRotateParams(59, 60, 3)} (MOVDconst [3<<3]) ptr)) mem)
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpS390XLANfloor)
+ v0 := b.NewValue0(v.Pos, OpS390XRLL, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpS390XORWconst, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(-1 << 8)
+ v1.AddArg(val)
+ v2 := b.NewValue0(v.Pos, OpS390XRXSBG, typ.UInt32)
+ v2.Aux = s390xRotateParamsToAux(s390x.NewRotateParams(59, 60, 3))
+ v3 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(3 << 3)
+ v2.AddArg2(v3, ptr)
+ v0.AddArg2(v1, v2)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+}
+func rewriteValueS390X_OpAtomicCompareAndSwap32(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicCompareAndSwap32 ptr old new_ mem)
+ // result: (LoweredAtomicCas32 ptr old new_ mem)
+ for {
+ ptr := v_0
+ old := v_1
+ new_ := v_2
+ mem := v_3
+ v.reset(OpS390XLoweredAtomicCas32)
+ v.AddArg4(ptr, old, new_, mem)
+ return true
+ }
+}
+func rewriteValueS390X_OpAtomicCompareAndSwap64(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicCompareAndSwap64 ptr old new_ mem)
+ // result: (LoweredAtomicCas64 ptr old new_ mem)
+ for {
+ ptr := v_0
+ old := v_1
+ new_ := v_2
+ mem := v_3
+ v.reset(OpS390XLoweredAtomicCas64)
+ v.AddArg4(ptr, old, new_, mem)
+ return true
+ }
+}
+func rewriteValueS390X_OpAtomicExchange32(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicExchange32 ptr val mem)
+ // result: (LoweredAtomicExchange32 ptr val mem)
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpS390XLoweredAtomicExchange32)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+}
+func rewriteValueS390X_OpAtomicExchange64(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicExchange64 ptr val mem)
+ // result: (LoweredAtomicExchange64 ptr val mem)
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpS390XLoweredAtomicExchange64)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+}
+func rewriteValueS390X_OpAtomicLoad32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicLoad32 ptr mem)
+ // result: (MOVWZatomicload ptr mem)
+ for {
+ ptr := v_0
+ mem := v_1
+ v.reset(OpS390XMOVWZatomicload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+}
+func rewriteValueS390X_OpAtomicLoad64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicLoad64 ptr mem)
+ // result: (MOVDatomicload ptr mem)
+ for {
+ ptr := v_0
+ mem := v_1
+ v.reset(OpS390XMOVDatomicload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+}
+func rewriteValueS390X_OpAtomicLoad8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicLoad8 ptr mem)
+ // result: (MOVBZatomicload ptr mem)
+ for {
+ ptr := v_0
+ mem := v_1
+ v.reset(OpS390XMOVBZatomicload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+}
+func rewriteValueS390X_OpAtomicLoadAcq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicLoadAcq32 ptr mem)
+ // result: (MOVWZatomicload ptr mem)
+ for {
+ ptr := v_0
+ mem := v_1
+ v.reset(OpS390XMOVWZatomicload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+}
+func rewriteValueS390X_OpAtomicLoadPtr(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicLoadPtr ptr mem)
+ // result: (MOVDatomicload ptr mem)
+ for {
+ ptr := v_0
+ mem := v_1
+ v.reset(OpS390XMOVDatomicload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+}
+func rewriteValueS390X_OpAtomicOr8(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (AtomicOr8 ptr val mem)
+ // result: (LAOfloor ptr (SLW <typ.UInt32> (MOVBZreg <typ.UInt32> val) (RXSBG <typ.UInt32> {s390x.NewRotateParams(59, 60, 3)} (MOVDconst [3<<3]) ptr)) mem)
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpS390XLAOfloor)
+ v0 := b.NewValue0(v.Pos, OpS390XSLW, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt32)
+ v1.AddArg(val)
+ v2 := b.NewValue0(v.Pos, OpS390XRXSBG, typ.UInt32)
+ v2.Aux = s390xRotateParamsToAux(s390x.NewRotateParams(59, 60, 3))
+ v3 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(3 << 3)
+ v2.AddArg2(v3, ptr)
+ v0.AddArg2(v1, v2)
+ v.AddArg3(ptr, v0, mem)
+ return true
+ }
+}
+func rewriteValueS390X_OpAtomicStore32(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (AtomicStore32 ptr val mem)
+ // result: (SYNC (MOVWatomicstore ptr val mem))
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpS390XSYNC)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVWatomicstore, types.TypeMem)
+ v0.AddArg3(ptr, val, mem)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueS390X_OpAtomicStore64(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (AtomicStore64 ptr val mem)
+ // result: (SYNC (MOVDatomicstore ptr val mem))
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpS390XSYNC)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDatomicstore, types.TypeMem)
+ v0.AddArg3(ptr, val, mem)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueS390X_OpAtomicStore8(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (AtomicStore8 ptr val mem)
+ // result: (SYNC (MOVBatomicstore ptr val mem))
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpS390XSYNC)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVBatomicstore, types.TypeMem)
+ v0.AddArg3(ptr, val, mem)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueS390X_OpAtomicStorePtrNoWB(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (AtomicStorePtrNoWB ptr val mem)
+ // result: (SYNC (MOVDatomicstore ptr val mem))
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpS390XSYNC)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDatomicstore, types.TypeMem)
+ v0.AddArg3(ptr, val, mem)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueS390X_OpAtomicStoreRel32(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AtomicStoreRel32 ptr val mem)
+ // result: (MOVWatomicstore ptr val mem)
+ for {
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ v.reset(OpS390XMOVWatomicstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+}
+func rewriteValueS390X_OpAvg64u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Avg64u <t> x y)
+ // result: (ADD (SRDconst <t> (SUB <t> x y) [1]) y)
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpS390XADD)
+ v0 := b.NewValue0(v.Pos, OpS390XSRDconst, t)
+ v0.AuxInt = uint8ToAuxInt(1)
+ v1 := b.NewValue0(v.Pos, OpS390XSUB, t)
+ v1.AddArg2(x, y)
+ v0.AddArg(v1)
+ v.AddArg2(v0, y)
+ return true
+ }
+}
+func rewriteValueS390X_OpBitLen64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (BitLen64 x)
+ // result: (SUB (MOVDconst [64]) (FLOGR x))
+ for {
+ x := v_0
+ v.reset(OpS390XSUB)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(64)
+ v1 := b.NewValue0(v.Pos, OpS390XFLOGR, typ.UInt64)
+ v1.AddArg(x)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueS390X_OpCeil(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Ceil x)
+ // result: (FIDBR [6] x)
+ for {
+ x := v_0
+ v.reset(OpS390XFIDBR)
+ v.AuxInt = int8ToAuxInt(6)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueS390X_OpConst16(v *Value) bool {
+ // match: (Const16 [val])
+ // result: (MOVDconst [int64(val)])
+ for {
+ val := auxIntToInt16(v.AuxInt)
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(val))
+ return true
+ }
+}
+func rewriteValueS390X_OpConst32(v *Value) bool {
+ // match: (Const32 [val])
+ // result: (MOVDconst [int64(val)])
+ for {
+ val := auxIntToInt32(v.AuxInt)
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(val))
+ return true
+ }
+}
+func rewriteValueS390X_OpConst64(v *Value) bool {
+ // match: (Const64 [val])
+ // result: (MOVDconst [int64(val)])
+ for {
+ val := auxIntToInt64(v.AuxInt)
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(val))
+ return true
+ }
+}
+func rewriteValueS390X_OpConst8(v *Value) bool {
+ // match: (Const8 [val])
+ // result: (MOVDconst [int64(val)])
+ for {
+ val := auxIntToInt8(v.AuxInt)
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(val))
+ return true
+ }
+}
+func rewriteValueS390X_OpConstBool(v *Value) bool {
+ // match: (ConstBool [t])
+ // result: (MOVDconst [b2i(t)])
+ for {
+ t := auxIntToBool(v.AuxInt)
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(b2i(t))
+ return true
+ }
+}
+func rewriteValueS390X_OpConstNil(v *Value) bool {
+ // match: (ConstNil)
+ // result: (MOVDconst [0])
+ for {
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+}
+func rewriteValueS390X_OpCtz32(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Ctz32 <t> x)
+ // result: (SUB (MOVDconst [64]) (FLOGR (MOVWZreg (ANDW <t> (SUBWconst <t> [1] x) (NOTW <t> x)))))
+ for {
+ t := v.Type
+ x := v_0
+ v.reset(OpS390XSUB)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(64)
+ v1 := b.NewValue0(v.Pos, OpS390XFLOGR, typ.UInt64)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64)
+ v3 := b.NewValue0(v.Pos, OpS390XANDW, t)
+ v4 := b.NewValue0(v.Pos, OpS390XSUBWconst, t)
+ v4.AuxInt = int32ToAuxInt(1)
+ v4.AddArg(x)
+ v5 := b.NewValue0(v.Pos, OpS390XNOTW, t)
+ v5.AddArg(x)
+ v3.AddArg2(v4, v5)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueS390X_OpCtz64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Ctz64 <t> x)
+ // result: (SUB (MOVDconst [64]) (FLOGR (AND <t> (SUBconst <t> [1] x) (NOT <t> x))))
+ for {
+ t := v.Type
+ x := v_0
+ v.reset(OpS390XSUB)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(64)
+ v1 := b.NewValue0(v.Pos, OpS390XFLOGR, typ.UInt64)
+ v2 := b.NewValue0(v.Pos, OpS390XAND, t)
+ v3 := b.NewValue0(v.Pos, OpS390XSUBconst, t)
+ v3.AuxInt = int32ToAuxInt(1)
+ v3.AddArg(x)
+ v4 := b.NewValue0(v.Pos, OpS390XNOT, t)
+ v4.AddArg(x)
+ v2.AddArg2(v3, v4)
+ v1.AddArg(v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueS390X_OpDiv16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div16 x y)
+ // result: (DIVW (MOVHreg x) (MOVHreg y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XDIVW)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueS390X_OpDiv16u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div16u x y)
+ // result: (DIVWU (MOVHZreg x) (MOVHZreg y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XDIVWU)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueS390X_OpDiv32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div32 x y)
+ // result: (DIVW (MOVWreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XDIVW)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVWreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+}
+func rewriteValueS390X_OpDiv32u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div32u x y)
+ // result: (DIVWU (MOVWZreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XDIVWU)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+}
+func rewriteValueS390X_OpDiv64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Div64 x y)
+ // result: (DIVD x y)
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XDIVD)
+ v.AddArg2(x, y)
+ return true
+ }
+}
+func rewriteValueS390X_OpDiv8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div8 x y)
+ // result: (DIVW (MOVBreg x) (MOVBreg y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XDIVW)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueS390X_OpDiv8u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div8u x y)
+ // result: (DIVWU (MOVBZreg x) (MOVBZreg y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XDIVWU)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueS390X_OpEq16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Eq16 x y)
+ // result: (LOCGR {s390x.Equal} (MOVDconst [0]) (MOVDconst [1]) (CMPW (MOVHreg x) (MOVHreg y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.Equal)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPW, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64)
+ v3.AddArg(x)
+ v4 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64)
+ v4.AddArg(y)
+ v2.AddArg2(v3, v4)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpEq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Eq32 x y)
+ // result: (LOCGR {s390x.Equal} (MOVDconst [0]) (MOVDconst [1]) (CMPW x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.Equal)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPW, types.TypeFlags)
+ v2.AddArg2(x, y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpEq32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Eq32F x y)
+ // result: (LOCGR {s390x.Equal} (MOVDconst [0]) (MOVDconst [1]) (FCMPS x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.Equal)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XFCMPS, types.TypeFlags)
+ v2.AddArg2(x, y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpEq64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Eq64 x y)
+ // result: (LOCGR {s390x.Equal} (MOVDconst [0]) (MOVDconst [1]) (CMP x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.Equal)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XCMP, types.TypeFlags)
+ v2.AddArg2(x, y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpEq64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Eq64F x y)
+ // result: (LOCGR {s390x.Equal} (MOVDconst [0]) (MOVDconst [1]) (FCMP x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.Equal)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XFCMP, types.TypeFlags)
+ v2.AddArg2(x, y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpEq8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Eq8 x y)
+ // result: (LOCGR {s390x.Equal} (MOVDconst [0]) (MOVDconst [1]) (CMPW (MOVBreg x) (MOVBreg y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.Equal)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPW, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64)
+ v3.AddArg(x)
+ v4 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64)
+ v4.AddArg(y)
+ v2.AddArg2(v3, v4)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpEqB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (EqB x y)
+ // result: (LOCGR {s390x.Equal} (MOVDconst [0]) (MOVDconst [1]) (CMPW (MOVBreg x) (MOVBreg y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.Equal)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPW, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64)
+ v3.AddArg(x)
+ v4 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64)
+ v4.AddArg(y)
+ v2.AddArg2(v3, v4)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpEqPtr(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (EqPtr x y)
+ // result: (LOCGR {s390x.Equal} (MOVDconst [0]) (MOVDconst [1]) (CMP x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.Equal)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XCMP, types.TypeFlags)
+ v2.AddArg2(x, y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpFMA(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FMA x y z)
+ // result: (FMADD z x y)
+ for {
+ x := v_0
+ y := v_1
+ z := v_2
+ v.reset(OpS390XFMADD)
+ v.AddArg3(z, x, y)
+ return true
+ }
+}
+func rewriteValueS390X_OpFloor(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Floor x)
+ // result: (FIDBR [7] x)
+ for {
+ x := v_0
+ v.reset(OpS390XFIDBR)
+ v.AuxInt = int8ToAuxInt(7)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueS390X_OpHmul32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Hmul32 x y)
+ // result: (SRDconst [32] (MULLD (MOVWreg x) (MOVWreg y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XSRDconst)
+ v.AuxInt = uint8ToAuxInt(32)
+ v0 := b.NewValue0(v.Pos, OpS390XMULLD, typ.Int64)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVWreg, typ.Int64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVWreg, typ.Int64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueS390X_OpHmul32u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Hmul32u x y)
+ // result: (SRDconst [32] (MULLD (MOVWZreg x) (MOVWZreg y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XSRDconst)
+ v.AuxInt = uint8ToAuxInt(32)
+ v0 := b.NewValue0(v.Pos, OpS390XMULLD, typ.Int64)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueS390X_OpITab(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ITab (Load ptr mem))
+ // result: (MOVDload ptr mem)
+ for {
+ if v_0.Op != OpLoad {
+ break
+ }
+ mem := v_0.Args[1]
+ ptr := v_0.Args[0]
+ v.reset(OpS390XMOVDload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpIsInBounds(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (IsInBounds idx len)
+ // result: (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (CMPU idx len))
+ for {
+ idx := v_0
+ len := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.Less)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPU, types.TypeFlags)
+ v2.AddArg2(idx, len)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpIsNonNil(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (IsNonNil p)
+ // result: (LOCGR {s390x.NotEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPconst p [0]))
+ for {
+ p := v_0
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.NotEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(0)
+ v2.AddArg(p)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpIsSliceInBounds(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (IsSliceInBounds idx len)
+ // result: (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPU idx len))
+ for {
+ idx := v_0
+ len := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.LessOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPU, types.TypeFlags)
+ v2.AddArg2(idx, len)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLeq16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq16 x y)
+ // result: (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPW (MOVHreg x) (MOVHreg y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.LessOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPW, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64)
+ v3.AddArg(x)
+ v4 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64)
+ v4.AddArg(y)
+ v2.AddArg2(v3, v4)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLeq16U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq16U x y)
+ // result: (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPWU (MOVHZreg x) (MOVHZreg y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.LessOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWU, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v3.AddArg(x)
+ v4 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v4.AddArg(y)
+ v2.AddArg2(v3, v4)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLeq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq32 x y)
+ // result: (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPW x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.LessOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPW, types.TypeFlags)
+ v2.AddArg2(x, y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLeq32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq32F x y)
+ // result: (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (FCMPS x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.LessOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XFCMPS, types.TypeFlags)
+ v2.AddArg2(x, y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLeq32U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq32U x y)
+ // result: (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPWU x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.LessOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWU, types.TypeFlags)
+ v2.AddArg2(x, y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLeq64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq64 x y)
+ // result: (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (CMP x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.LessOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XCMP, types.TypeFlags)
+ v2.AddArg2(x, y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLeq64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq64F x y)
+ // result: (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (FCMP x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.LessOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XFCMP, types.TypeFlags)
+ v2.AddArg2(x, y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLeq64U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq64U x y)
+ // result: (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPU x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.LessOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPU, types.TypeFlags)
+ v2.AddArg2(x, y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLeq8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq8 x y)
+ // result: (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPW (MOVBreg x) (MOVBreg y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.LessOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPW, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64)
+ v3.AddArg(x)
+ v4 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64)
+ v4.AddArg(y)
+ v2.AddArg2(v3, v4)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLeq8U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq8U x y)
+ // result: (LOCGR {s390x.LessOrEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPWU (MOVBZreg x) (MOVBZreg y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.LessOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWU, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
+ v3.AddArg(x)
+ v4 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
+ v4.AddArg(y)
+ v2.AddArg2(v3, v4)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLess16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less16 x y)
+ // result: (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (CMPW (MOVHreg x) (MOVHreg y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.Less)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPW, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64)
+ v3.AddArg(x)
+ v4 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64)
+ v4.AddArg(y)
+ v2.AddArg2(v3, v4)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLess16U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less16U x y)
+ // result: (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (CMPWU (MOVHZreg x) (MOVHZreg y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.Less)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWU, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v3.AddArg(x)
+ v4 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v4.AddArg(y)
+ v2.AddArg2(v3, v4)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLess32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less32 x y)
+ // result: (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (CMPW x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.Less)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPW, types.TypeFlags)
+ v2.AddArg2(x, y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLess32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less32F x y)
+ // result: (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (FCMPS x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.Less)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XFCMPS, types.TypeFlags)
+ v2.AddArg2(x, y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLess32U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less32U x y)
+ // result: (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (CMPWU x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.Less)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWU, types.TypeFlags)
+ v2.AddArg2(x, y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLess64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less64 x y)
+ // result: (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (CMP x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.Less)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XCMP, types.TypeFlags)
+ v2.AddArg2(x, y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLess64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less64F x y)
+ // result: (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (FCMP x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.Less)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XFCMP, types.TypeFlags)
+ v2.AddArg2(x, y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLess64U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less64U x y)
+ // result: (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (CMPU x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.Less)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPU, types.TypeFlags)
+ v2.AddArg2(x, y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLess8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less8 x y)
+ // result: (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (CMPW (MOVBreg x) (MOVBreg y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.Less)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPW, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64)
+ v3.AddArg(x)
+ v4 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64)
+ v4.AddArg(y)
+ v2.AddArg2(v3, v4)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLess8U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less8U x y)
+ // result: (LOCGR {s390x.Less} (MOVDconst [0]) (MOVDconst [1]) (CMPWU (MOVBZreg x) (MOVBZreg y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.Less)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWU, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
+ v3.AddArg(x)
+ v4 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
+ v4.AddArg(y)
+ v2.AddArg2(v3, v4)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLoad(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Load <t> ptr mem)
+ // cond: (is64BitInt(t) || isPtr(t))
+ // result: (MOVDload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is64BitInt(t) || isPtr(t)) {
+ break
+ }
+ v.reset(OpS390XMOVDload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is32BitInt(t) && isSigned(t)
+ // result: (MOVWload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is32BitInt(t) && isSigned(t)) {
+ break
+ }
+ v.reset(OpS390XMOVWload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is32BitInt(t) && !isSigned(t)
+ // result: (MOVWZload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is32BitInt(t) && !isSigned(t)) {
+ break
+ }
+ v.reset(OpS390XMOVWZload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is16BitInt(t) && isSigned(t)
+ // result: (MOVHload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is16BitInt(t) && isSigned(t)) {
+ break
+ }
+ v.reset(OpS390XMOVHload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is16BitInt(t) && !isSigned(t)
+ // result: (MOVHZload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is16BitInt(t) && !isSigned(t)) {
+ break
+ }
+ v.reset(OpS390XMOVHZload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is8BitInt(t) && isSigned(t)
+ // result: (MOVBload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is8BitInt(t) && isSigned(t)) {
+ break
+ }
+ v.reset(OpS390XMOVBload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: (t.IsBoolean() || (is8BitInt(t) && !isSigned(t)))
+ // result: (MOVBZload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(t.IsBoolean() || (is8BitInt(t) && !isSigned(t))) {
+ break
+ }
+ v.reset(OpS390XMOVBZload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is32BitFloat(t)
+ // result: (FMOVSload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is32BitFloat(t)) {
+ break
+ }
+ v.reset(OpS390XFMOVSload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is64BitFloat(t)
+ // result: (FMOVDload ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is64BitFloat(t)) {
+ break
+ }
+ v.reset(OpS390XFMOVDload)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpLocalAddr(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (LocalAddr {sym} base _)
+ // result: (MOVDaddr {sym} base)
+ for {
+ sym := auxToSym(v.Aux)
+ base := v_0
+ v.reset(OpS390XMOVDaddr)
+ v.Aux = symToAux(sym)
+ v.AddArg(base)
+ return true
+ }
+}
+func rewriteValueS390X_OpLsh16x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh16x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh16x16 <t> x y)
+ // result: (LOCGR {s390x.GreaterOrEqual} <t> (SLW <t> x y) (MOVDconst [0]) (CMPWUconst (MOVHZreg y) [64]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Type = t
+ v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XSLW, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLsh16x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh16x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh16x32 <t> x y)
+ // result: (LOCGR {s390x.GreaterOrEqual} <t> (SLW <t> x y) (MOVDconst [0]) (CMPWUconst y [64]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Type = t
+ v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XSLW, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v2.AddArg(y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLsh16x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh16x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh16x64 <t> x y)
+ // result: (LOCGR {s390x.GreaterOrEqual} <t> (SLW <t> x y) (MOVDconst [0]) (CMPUconst y [64]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Type = t
+ v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XSLW, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPUconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v2.AddArg(y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLsh16x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh16x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh16x8 <t> x y)
+ // result: (LOCGR {s390x.GreaterOrEqual} <t> (SLW <t> x y) (MOVDconst [0]) (CMPWUconst (MOVBZreg y) [64]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Type = t
+ v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XSLW, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLsh32x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh32x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh32x16 <t> x y)
+ // result: (LOCGR {s390x.GreaterOrEqual} <t> (SLW <t> x y) (MOVDconst [0]) (CMPWUconst (MOVHZreg y) [64]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Type = t
+ v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XSLW, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLsh32x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh32x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh32x32 <t> x y)
+ // result: (LOCGR {s390x.GreaterOrEqual} <t> (SLW <t> x y) (MOVDconst [0]) (CMPWUconst y [64]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Type = t
+ v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XSLW, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v2.AddArg(y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLsh32x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh32x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh32x64 <t> x y)
+ // result: (LOCGR {s390x.GreaterOrEqual} <t> (SLW <t> x y) (MOVDconst [0]) (CMPUconst y [64]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Type = t
+ v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XSLW, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPUconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v2.AddArg(y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLsh32x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh32x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh32x8 <t> x y)
+ // result: (LOCGR {s390x.GreaterOrEqual} <t> (SLW <t> x y) (MOVDconst [0]) (CMPWUconst (MOVBZreg y) [64]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Type = t
+ v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XSLW, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLsh64x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh64x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLD x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSLD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh64x16 <t> x y)
+ // result: (LOCGR {s390x.GreaterOrEqual} <t> (SLD <t> x y) (MOVDconst [0]) (CMPWUconst (MOVHZreg y) [64]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Type = t
+ v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XSLD, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLsh64x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh64x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLD x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSLD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh64x32 <t> x y)
+ // result: (LOCGR {s390x.GreaterOrEqual} <t> (SLD <t> x y) (MOVDconst [0]) (CMPWUconst y [64]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Type = t
+ v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XSLD, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v2.AddArg(y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLsh64x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh64x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLD x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSLD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh64x64 <t> x y)
+ // result: (LOCGR {s390x.GreaterOrEqual} <t> (SLD <t> x y) (MOVDconst [0]) (CMPUconst y [64]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Type = t
+ v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XSLD, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPUconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v2.AddArg(y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLsh64x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh64x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLD x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSLD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh64x8 <t> x y)
+ // result: (LOCGR {s390x.GreaterOrEqual} <t> (SLD <t> x y) (MOVDconst [0]) (CMPWUconst (MOVBZreg y) [64]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Type = t
+ v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XSLD, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLsh8x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh8x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh8x16 <t> x y)
+ // result: (LOCGR {s390x.GreaterOrEqual} <t> (SLW <t> x y) (MOVDconst [0]) (CMPWUconst (MOVHZreg y) [64]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Type = t
+ v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XSLW, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLsh8x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh8x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh8x32 <t> x y)
+ // result: (LOCGR {s390x.GreaterOrEqual} <t> (SLW <t> x y) (MOVDconst [0]) (CMPWUconst y [64]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Type = t
+ v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XSLW, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v2.AddArg(y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLsh8x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh8x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh8x64 <t> x y)
+ // result: (LOCGR {s390x.GreaterOrEqual} <t> (SLW <t> x y) (MOVDconst [0]) (CMPUconst y [64]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Type = t
+ v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XSLW, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPUconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v2.AddArg(y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpLsh8x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh8x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SLW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh8x8 <t> x y)
+ // result: (LOCGR {s390x.GreaterOrEqual} <t> (SLW <t> x y) (MOVDconst [0]) (CMPWUconst (MOVBZreg y) [64]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Type = t
+ v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XSLW, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpMod16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod16 x y)
+ // result: (MODW (MOVHreg x) (MOVHreg y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XMODW)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueS390X_OpMod16u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod16u x y)
+ // result: (MODWU (MOVHZreg x) (MOVHZreg y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XMODWU)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueS390X_OpMod32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod32 x y)
+ // result: (MODW (MOVWreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XMODW)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVWreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+}
+func rewriteValueS390X_OpMod32u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod32u x y)
+ // result: (MODWU (MOVWZreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XMODWU)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVWZreg, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+}
+func rewriteValueS390X_OpMod64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Mod64 x y)
+ // result: (MODD x y)
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XMODD)
+ v.AddArg2(x, y)
+ return true
+ }
+}
+func rewriteValueS390X_OpMod8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod8 x y)
+ // result: (MODW (MOVBreg x) (MOVBreg y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XMODW)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueS390X_OpMod8u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod8u x y)
+ // result: (MODWU (MOVBZreg x) (MOVBZreg y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XMODWU)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueS390X_OpMove(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Move [0] _ _ mem)
+ // result: mem
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ mem := v_2
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Move [1] dst src mem)
+ // result: (MOVBstore dst (MOVBZload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 1 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpS390XMOVBstore)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVBZload, typ.UInt8)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [2] dst src mem)
+ // result: (MOVHstore dst (MOVHZload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 2 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpS390XMOVHstore)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHZload, typ.UInt16)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [4] dst src mem)
+ // result: (MOVWstore dst (MOVWZload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpS390XMOVWstore)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVWZload, typ.UInt32)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [8] dst src mem)
+ // result: (MOVDstore dst (MOVDload src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpS390XMOVDstore)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDload, typ.UInt64)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [16] dst src mem)
+ // result: (MOVDstore [8] dst (MOVDload [8] src mem) (MOVDstore dst (MOVDload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 16 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpS390XMOVDstore)
+ v.AuxInt = int32ToAuxInt(8)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDload, typ.UInt64)
+ v0.AuxInt = int32ToAuxInt(8)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVDload, typ.UInt64)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [24] dst src mem)
+ // result: (MOVDstore [16] dst (MOVDload [16] src mem) (MOVDstore [8] dst (MOVDload [8] src mem) (MOVDstore dst (MOVDload src mem) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 24 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpS390XMOVDstore)
+ v.AuxInt = int32ToAuxInt(16)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDload, typ.UInt64)
+ v0.AuxInt = int32ToAuxInt(16)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(8)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVDload, typ.UInt64)
+ v2.AuxInt = int32ToAuxInt(8)
+ v2.AddArg2(src, mem)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVDstore, types.TypeMem)
+ v4 := b.NewValue0(v.Pos, OpS390XMOVDload, typ.UInt64)
+ v4.AddArg2(src, mem)
+ v3.AddArg3(dst, v4, mem)
+ v1.AddArg3(dst, v2, v3)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [3] dst src mem)
+ // result: (MOVBstore [2] dst (MOVBZload [2] src mem) (MOVHstore dst (MOVHZload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 3 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpS390XMOVBstore)
+ v.AuxInt = int32ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVBZload, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(2)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVHstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVHZload, typ.UInt16)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [5] dst src mem)
+ // result: (MOVBstore [4] dst (MOVBZload [4] src mem) (MOVWstore dst (MOVWZload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 5 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpS390XMOVBstore)
+ v.AuxInt = int32ToAuxInt(4)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVBZload, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(4)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVWstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVWZload, typ.UInt32)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [6] dst src mem)
+ // result: (MOVHstore [4] dst (MOVHZload [4] src mem) (MOVWstore dst (MOVWZload src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 6 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpS390XMOVHstore)
+ v.AuxInt = int32ToAuxInt(4)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHZload, typ.UInt16)
+ v0.AuxInt = int32ToAuxInt(4)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVWstore, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVWZload, typ.UInt32)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [7] dst src mem)
+ // result: (MOVBstore [6] dst (MOVBZload [6] src mem) (MOVHstore [4] dst (MOVHZload [4] src mem) (MOVWstore dst (MOVWZload src mem) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 7 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpS390XMOVBstore)
+ v.AuxInt = int32ToAuxInt(6)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVBZload, typ.UInt8)
+ v0.AuxInt = int32ToAuxInt(6)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVHstore, types.TypeMem)
+ v1.AuxInt = int32ToAuxInt(4)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVHZload, typ.UInt16)
+ v2.AuxInt = int32ToAuxInt(4)
+ v2.AddArg2(src, mem)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVWstore, types.TypeMem)
+ v4 := b.NewValue0(v.Pos, OpS390XMOVWZload, typ.UInt32)
+ v4.AddArg2(src, mem)
+ v3.AddArg3(dst, v4, mem)
+ v1.AddArg3(dst, v2, v3)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: s > 0 && s <= 256 && logLargeCopy(v, s)
+ // result: (MVC [makeValAndOff(int32(s), 0)] dst src mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(s > 0 && s <= 256 && logLargeCopy(v, s)) {
+ break
+ }
+ v.reset(OpS390XMVC)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(s), 0))
+ v.AddArg3(dst, src, mem)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: s > 256 && s <= 512 && logLargeCopy(v, s)
+ // result: (MVC [makeValAndOff(int32(s)-256, 256)] dst src (MVC [makeValAndOff(256, 0)] dst src mem))
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(s > 256 && s <= 512 && logLargeCopy(v, s)) {
+ break
+ }
+ v.reset(OpS390XMVC)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(s)-256, 256))
+ v0 := b.NewValue0(v.Pos, OpS390XMVC, types.TypeMem)
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff(256, 0))
+ v0.AddArg3(dst, src, mem)
+ v.AddArg3(dst, src, v0)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: s > 512 && s <= 768 && logLargeCopy(v, s)
+ // result: (MVC [makeValAndOff(int32(s)-512, 512)] dst src (MVC [makeValAndOff(256, 256)] dst src (MVC [makeValAndOff(256, 0)] dst src mem)))
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(s > 512 && s <= 768 && logLargeCopy(v, s)) {
+ break
+ }
+ v.reset(OpS390XMVC)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(s)-512, 512))
+ v0 := b.NewValue0(v.Pos, OpS390XMVC, types.TypeMem)
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff(256, 256))
+ v1 := b.NewValue0(v.Pos, OpS390XMVC, types.TypeMem)
+ v1.AuxInt = valAndOffToAuxInt(makeValAndOff(256, 0))
+ v1.AddArg3(dst, src, mem)
+ v0.AddArg3(dst, src, v1)
+ v.AddArg3(dst, src, v0)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: s > 768 && s <= 1024 && logLargeCopy(v, s)
+ // result: (MVC [makeValAndOff(int32(s)-768, 768)] dst src (MVC [makeValAndOff(256, 512)] dst src (MVC [makeValAndOff(256, 256)] dst src (MVC [makeValAndOff(256, 0)] dst src mem))))
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(s > 768 && s <= 1024 && logLargeCopy(v, s)) {
+ break
+ }
+ v.reset(OpS390XMVC)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(s)-768, 768))
+ v0 := b.NewValue0(v.Pos, OpS390XMVC, types.TypeMem)
+ v0.AuxInt = valAndOffToAuxInt(makeValAndOff(256, 512))
+ v1 := b.NewValue0(v.Pos, OpS390XMVC, types.TypeMem)
+ v1.AuxInt = valAndOffToAuxInt(makeValAndOff(256, 256))
+ v2 := b.NewValue0(v.Pos, OpS390XMVC, types.TypeMem)
+ v2.AuxInt = valAndOffToAuxInt(makeValAndOff(256, 0))
+ v2.AddArg3(dst, src, mem)
+ v1.AddArg3(dst, src, v2)
+ v0.AddArg3(dst, src, v1)
+ v.AddArg3(dst, src, v0)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: s > 1024 && logLargeCopy(v, s)
+ // result: (LoweredMove [s%256] dst src (ADD <src.Type> src (MOVDconst [(s/256)*256])) mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(s > 1024 && logLargeCopy(v, s)) {
+ break
+ }
+ v.reset(OpS390XLoweredMove)
+ v.AuxInt = int64ToAuxInt(s % 256)
+ v0 := b.NewValue0(v.Pos, OpS390XADD, src.Type)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt((s / 256) * 256)
+ v0.AddArg2(src, v1)
+ v.AddArg4(dst, src, v0, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpNeq16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neq16 x y)
+ // result: (LOCGR {s390x.NotEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPW (MOVHreg x) (MOVHreg y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.NotEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPW, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64)
+ v3.AddArg(x)
+ v4 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64)
+ v4.AddArg(y)
+ v2.AddArg2(v3, v4)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpNeq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neq32 x y)
+ // result: (LOCGR {s390x.NotEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPW x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.NotEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPW, types.TypeFlags)
+ v2.AddArg2(x, y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpNeq32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neq32F x y)
+ // result: (LOCGR {s390x.NotEqual} (MOVDconst [0]) (MOVDconst [1]) (FCMPS x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.NotEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XFCMPS, types.TypeFlags)
+ v2.AddArg2(x, y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpNeq64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neq64 x y)
+ // result: (LOCGR {s390x.NotEqual} (MOVDconst [0]) (MOVDconst [1]) (CMP x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.NotEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XCMP, types.TypeFlags)
+ v2.AddArg2(x, y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpNeq64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neq64F x y)
+ // result: (LOCGR {s390x.NotEqual} (MOVDconst [0]) (MOVDconst [1]) (FCMP x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.NotEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XFCMP, types.TypeFlags)
+ v2.AddArg2(x, y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpNeq8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neq8 x y)
+ // result: (LOCGR {s390x.NotEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPW (MOVBreg x) (MOVBreg y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.NotEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPW, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64)
+ v3.AddArg(x)
+ v4 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64)
+ v4.AddArg(y)
+ v2.AddArg2(v3, v4)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpNeqB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (NeqB x y)
+ // result: (LOCGR {s390x.NotEqual} (MOVDconst [0]) (MOVDconst [1]) (CMPW (MOVBreg x) (MOVBreg y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.NotEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPW, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64)
+ v3.AddArg(x)
+ v4 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64)
+ v4.AddArg(y)
+ v2.AddArg2(v3, v4)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpNeqPtr(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (NeqPtr x y)
+ // result: (LOCGR {s390x.NotEqual} (MOVDconst [0]) (MOVDconst [1]) (CMP x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(s390x.NotEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(1)
+ v2 := b.NewValue0(v.Pos, OpS390XCMP, types.TypeFlags)
+ v2.AddArg2(x, y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpNot(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Not x)
+ // result: (XORWconst [1] x)
+ for {
+ x := v_0
+ v.reset(OpS390XXORWconst)
+ v.AuxInt = int32ToAuxInt(1)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueS390X_OpOffPtr(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (OffPtr [off] ptr:(SP))
+ // result: (MOVDaddr [int32(off)] ptr)
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ ptr := v_0
+ if ptr.Op != OpSP {
+ break
+ }
+ v.reset(OpS390XMOVDaddr)
+ v.AuxInt = int32ToAuxInt(int32(off))
+ v.AddArg(ptr)
+ return true
+ }
+ // match: (OffPtr [off] ptr)
+ // cond: is32Bit(off)
+ // result: (ADDconst [int32(off)] ptr)
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ ptr := v_0
+ if !(is32Bit(off)) {
+ break
+ }
+ v.reset(OpS390XADDconst)
+ v.AuxInt = int32ToAuxInt(int32(off))
+ v.AddArg(ptr)
+ return true
+ }
+ // match: (OffPtr [off] ptr)
+ // result: (ADD (MOVDconst [off]) ptr)
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ ptr := v_0
+ v.reset(OpS390XADD)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(off)
+ v.AddArg2(v0, ptr)
+ return true
+ }
+}
+func rewriteValueS390X_OpPanicBounds(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (PanicBounds [kind] x y mem)
+ // cond: boundsABI(kind) == 0
+ // result: (LoweredPanicBoundsA [kind] x y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ x := v_0
+ y := v_1
+ mem := v_2
+ if !(boundsABI(kind) == 0) {
+ break
+ }
+ v.reset(OpS390XLoweredPanicBoundsA)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg3(x, y, mem)
+ return true
+ }
+ // match: (PanicBounds [kind] x y mem)
+ // cond: boundsABI(kind) == 1
+ // result: (LoweredPanicBoundsB [kind] x y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ x := v_0
+ y := v_1
+ mem := v_2
+ if !(boundsABI(kind) == 1) {
+ break
+ }
+ v.reset(OpS390XLoweredPanicBoundsB)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg3(x, y, mem)
+ return true
+ }
+ // match: (PanicBounds [kind] x y mem)
+ // cond: boundsABI(kind) == 2
+ // result: (LoweredPanicBoundsC [kind] x y mem)
+ for {
+ kind := auxIntToInt64(v.AuxInt)
+ x := v_0
+ y := v_1
+ mem := v_2
+ if !(boundsABI(kind) == 2) {
+ break
+ }
+ v.reset(OpS390XLoweredPanicBoundsC)
+ v.AuxInt = int64ToAuxInt(kind)
+ v.AddArg3(x, y, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpPopCount16(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (PopCount16 x)
+ // result: (MOVBZreg (SumBytes2 (POPCNT <typ.UInt16> x)))
+ for {
+ x := v_0
+ v.reset(OpS390XMOVBZreg)
+ v0 := b.NewValue0(v.Pos, OpS390XSumBytes2, typ.UInt8)
+ v1 := b.NewValue0(v.Pos, OpS390XPOPCNT, typ.UInt16)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueS390X_OpPopCount32(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (PopCount32 x)
+ // result: (MOVBZreg (SumBytes4 (POPCNT <typ.UInt32> x)))
+ for {
+ x := v_0
+ v.reset(OpS390XMOVBZreg)
+ v0 := b.NewValue0(v.Pos, OpS390XSumBytes4, typ.UInt8)
+ v1 := b.NewValue0(v.Pos, OpS390XPOPCNT, typ.UInt32)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueS390X_OpPopCount64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (PopCount64 x)
+ // result: (MOVBZreg (SumBytes8 (POPCNT <typ.UInt64> x)))
+ for {
+ x := v_0
+ v.reset(OpS390XMOVBZreg)
+ v0 := b.NewValue0(v.Pos, OpS390XSumBytes8, typ.UInt8)
+ v1 := b.NewValue0(v.Pos, OpS390XPOPCNT, typ.UInt64)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueS390X_OpPopCount8(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (PopCount8 x)
+ // result: (POPCNT (MOVBZreg x))
+ for {
+ x := v_0
+ v.reset(OpS390XPOPCNT)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueS390X_OpRotateLeft16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (RotateLeft16 <t> x (MOVDconst [c]))
+ // result: (Or16 (Lsh16x64 <t> x (MOVDconst [c&15])) (Rsh16Ux64 <t> x (MOVDconst [-c&15])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpOr16)
+ v0 := b.NewValue0(v.Pos, OpLsh16x64, t)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(c & 15)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpRsh16Ux64, t)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(-c & 15)
+ v2.AddArg2(x, v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpRotateLeft8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (RotateLeft8 <t> x (MOVDconst [c]))
+ // result: (Or8 (Lsh8x64 <t> x (MOVDconst [c&7])) (Rsh8Ux64 <t> x (MOVDconst [-c&7])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpOr8)
+ v0 := b.NewValue0(v.Pos, OpLsh8x64, t)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(c & 7)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpRsh8Ux64, t)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(-c & 7)
+ v2.AddArg2(x, v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpRound(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Round x)
+ // result: (FIDBR [1] x)
+ for {
+ x := v_0
+ v.reset(OpS390XFIDBR)
+ v.AuxInt = int8ToAuxInt(1)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueS390X_OpRoundToEven(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (RoundToEven x)
+ // result: (FIDBR [4] x)
+ for {
+ x := v_0
+ v.reset(OpS390XFIDBR)
+ v.AuxInt = int8ToAuxInt(4)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh16Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRW (MOVHZreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSRW)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh16Ux16 <t> x y)
+ // result: (LOCGR {s390x.GreaterOrEqual} <t> (SRW <t> (MOVHZreg x) y) (MOVDconst [0]) (CMPWUconst (MOVHZreg y) [64]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Type = t
+ v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XSRW, t)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v3.AuxInt = int32ToAuxInt(64)
+ v4 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh16Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRW (MOVHZreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSRW)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh16Ux32 <t> x y)
+ // result: (LOCGR {s390x.GreaterOrEqual} <t> (SRW <t> (MOVHZreg x) y) (MOVDconst [0]) (CMPWUconst y [64]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Type = t
+ v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XSRW, t)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v3.AuxInt = int32ToAuxInt(64)
+ v3.AddArg(y)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh16Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRW (MOVHZreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSRW)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh16Ux64 <t> x y)
+ // result: (LOCGR {s390x.GreaterOrEqual} <t> (SRW <t> (MOVHZreg x) y) (MOVDconst [0]) (CMPUconst y [64]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Type = t
+ v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XSRW, t)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpS390XCMPUconst, types.TypeFlags)
+ v3.AuxInt = int32ToAuxInt(64)
+ v3.AddArg(y)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh16Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRW (MOVHZreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSRW)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh16Ux8 <t> x y)
+ // result: (LOCGR {s390x.GreaterOrEqual} <t> (SRW <t> (MOVHZreg x) y) (MOVDconst [0]) (CMPWUconst (MOVBZreg y) [64]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Type = t
+ v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XSRW, t)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v3.AuxInt = int32ToAuxInt(64)
+ v4 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh16x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAW (MOVHreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSRAW)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh16x16 x y)
+ // result: (SRAW (MOVHreg x) (LOCGR {s390x.GreaterOrEqual} <y.Type> y (MOVDconst <y.Type> [63]) (CMPWUconst (MOVHZreg y) [64])))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XSRAW)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpS390XLOCGR, y.Type)
+ v1.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVDconst, y.Type)
+ v2.AuxInt = int64ToAuxInt(63)
+ v3 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v3.AuxInt = int32ToAuxInt(64)
+ v4 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v1.AddArg3(y, v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh16x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAW (MOVHreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSRAW)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh16x32 x y)
+ // result: (SRAW (MOVHreg x) (LOCGR {s390x.GreaterOrEqual} <y.Type> y (MOVDconst <y.Type> [63]) (CMPWUconst y [64])))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XSRAW)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpS390XLOCGR, y.Type)
+ v1.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVDconst, y.Type)
+ v2.AuxInt = int64ToAuxInt(63)
+ v3 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v3.AuxInt = int32ToAuxInt(64)
+ v3.AddArg(y)
+ v1.AddArg3(y, v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh16x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAW (MOVHreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSRAW)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh16x64 x y)
+ // result: (SRAW (MOVHreg x) (LOCGR {s390x.GreaterOrEqual} <y.Type> y (MOVDconst <y.Type> [63]) (CMPUconst y [64])))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XSRAW)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpS390XLOCGR, y.Type)
+ v1.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVDconst, y.Type)
+ v2.AuxInt = int64ToAuxInt(63)
+ v3 := b.NewValue0(v.Pos, OpS390XCMPUconst, types.TypeFlags)
+ v3.AuxInt = int32ToAuxInt(64)
+ v3.AddArg(y)
+ v1.AddArg3(y, v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh16x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAW (MOVHreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSRAW)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh16x8 x y)
+ // result: (SRAW (MOVHreg x) (LOCGR {s390x.GreaterOrEqual} <y.Type> y (MOVDconst <y.Type> [63]) (CMPWUconst (MOVBZreg y) [64])))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XSRAW)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHreg, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpS390XLOCGR, y.Type)
+ v1.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVDconst, y.Type)
+ v2.AuxInt = int64ToAuxInt(63)
+ v3 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v3.AuxInt = int32ToAuxInt(64)
+ v4 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v1.AddArg3(y, v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh32Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32Ux16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSRW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh32Ux16 <t> x y)
+ // result: (LOCGR {s390x.GreaterOrEqual} <t> (SRW <t> x y) (MOVDconst [0]) (CMPWUconst (MOVHZreg y) [64]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Type = t
+ v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XSRW, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh32Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32Ux32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSRW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh32Ux32 <t> x y)
+ // result: (LOCGR {s390x.GreaterOrEqual} <t> (SRW <t> x y) (MOVDconst [0]) (CMPWUconst y [64]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Type = t
+ v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XSRW, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v2.AddArg(y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh32Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32Ux64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSRW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh32Ux64 <t> x y)
+ // result: (LOCGR {s390x.GreaterOrEqual} <t> (SRW <t> x y) (MOVDconst [0]) (CMPUconst y [64]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Type = t
+ v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XSRW, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPUconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v2.AddArg(y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh32Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32Ux8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSRW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh32Ux8 <t> x y)
+ // result: (LOCGR {s390x.GreaterOrEqual} <t> (SRW <t> x y) (MOVDconst [0]) (CMPWUconst (MOVBZreg y) [64]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Type = t
+ v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XSRW, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh32x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSRAW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh32x16 x y)
+ // result: (SRAW x (LOCGR {s390x.GreaterOrEqual} <y.Type> y (MOVDconst <y.Type> [63]) (CMPWUconst (MOVHZreg y) [64])))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XSRAW)
+ v0 := b.NewValue0(v.Pos, OpS390XLOCGR, y.Type)
+ v0.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, y.Type)
+ v1.AuxInt = int64ToAuxInt(63)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v0.AddArg3(y, v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh32x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh32x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSRAW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh32x32 x y)
+ // result: (SRAW x (LOCGR {s390x.GreaterOrEqual} <y.Type> y (MOVDconst <y.Type> [63]) (CMPWUconst y [64])))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XSRAW)
+ v0 := b.NewValue0(v.Pos, OpS390XLOCGR, y.Type)
+ v0.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, y.Type)
+ v1.AuxInt = int64ToAuxInt(63)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v2.AddArg(y)
+ v0.AddArg3(y, v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh32x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh32x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSRAW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh32x64 x y)
+ // result: (SRAW x (LOCGR {s390x.GreaterOrEqual} <y.Type> y (MOVDconst <y.Type> [63]) (CMPUconst y [64])))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XSRAW)
+ v0 := b.NewValue0(v.Pos, OpS390XLOCGR, y.Type)
+ v0.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, y.Type)
+ v1.AuxInt = int64ToAuxInt(63)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPUconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v2.AddArg(y)
+ v0.AddArg3(y, v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh32x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAW x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSRAW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh32x8 x y)
+ // result: (SRAW x (LOCGR {s390x.GreaterOrEqual} <y.Type> y (MOVDconst <y.Type> [63]) (CMPWUconst (MOVBZreg y) [64])))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XSRAW)
+ v0 := b.NewValue0(v.Pos, OpS390XLOCGR, y.Type)
+ v0.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, y.Type)
+ v1.AuxInt = int64ToAuxInt(63)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v0.AddArg3(y, v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh64Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64Ux16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRD x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSRD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh64Ux16 <t> x y)
+ // result: (LOCGR {s390x.GreaterOrEqual} <t> (SRD <t> x y) (MOVDconst [0]) (CMPWUconst (MOVHZreg y) [64]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Type = t
+ v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XSRD, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh64Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64Ux32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRD x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSRD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh64Ux32 <t> x y)
+ // result: (LOCGR {s390x.GreaterOrEqual} <t> (SRD <t> x y) (MOVDconst [0]) (CMPWUconst y [64]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Type = t
+ v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XSRD, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v2.AddArg(y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh64Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64Ux64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRD x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSRD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh64Ux64 <t> x y)
+ // result: (LOCGR {s390x.GreaterOrEqual} <t> (SRD <t> x y) (MOVDconst [0]) (CMPUconst y [64]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Type = t
+ v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XSRD, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPUconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v2.AddArg(y)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh64Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64Ux8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRD x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSRD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh64Ux8 <t> x y)
+ // result: (LOCGR {s390x.GreaterOrEqual} <t> (SRD <t> x y) (MOVDconst [0]) (CMPWUconst (MOVBZreg y) [64]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Type = t
+ v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XSRD, t)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh64x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAD x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSRAD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh64x16 x y)
+ // result: (SRAD x (LOCGR {s390x.GreaterOrEqual} <y.Type> y (MOVDconst <y.Type> [63]) (CMPWUconst (MOVHZreg y) [64])))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XSRAD)
+ v0 := b.NewValue0(v.Pos, OpS390XLOCGR, y.Type)
+ v0.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, y.Type)
+ v1.AuxInt = int64ToAuxInt(63)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v0.AddArg3(y, v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh64x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh64x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAD x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSRAD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh64x32 x y)
+ // result: (SRAD x (LOCGR {s390x.GreaterOrEqual} <y.Type> y (MOVDconst <y.Type> [63]) (CMPWUconst y [64])))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XSRAD)
+ v0 := b.NewValue0(v.Pos, OpS390XLOCGR, y.Type)
+ v0.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, y.Type)
+ v1.AuxInt = int64ToAuxInt(63)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v2.AddArg(y)
+ v0.AddArg3(y, v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh64x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh64x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAD x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSRAD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh64x64 x y)
+ // result: (SRAD x (LOCGR {s390x.GreaterOrEqual} <y.Type> y (MOVDconst <y.Type> [63]) (CMPUconst y [64])))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XSRAD)
+ v0 := b.NewValue0(v.Pos, OpS390XLOCGR, y.Type)
+ v0.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, y.Type)
+ v1.AuxInt = int64ToAuxInt(63)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPUconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v2.AddArg(y)
+ v0.AddArg3(y, v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh64x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAD x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSRAD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh64x8 x y)
+ // result: (SRAD x (LOCGR {s390x.GreaterOrEqual} <y.Type> y (MOVDconst <y.Type> [63]) (CMPWUconst (MOVBZreg y) [64])))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XSRAD)
+ v0 := b.NewValue0(v.Pos, OpS390XLOCGR, y.Type)
+ v0.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, y.Type)
+ v1.AuxInt = int64ToAuxInt(63)
+ v2 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v2.AuxInt = int32ToAuxInt(64)
+ v3 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
+ v3.AddArg(y)
+ v2.AddArg(v3)
+ v0.AddArg3(y, v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh8Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRW (MOVBZreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSRW)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh8Ux16 <t> x y)
+ // result: (LOCGR {s390x.GreaterOrEqual} <t> (SRW <t> (MOVBZreg x) y) (MOVDconst [0]) (CMPWUconst (MOVHZreg y) [64]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Type = t
+ v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XSRW, t)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v3.AuxInt = int32ToAuxInt(64)
+ v4 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh8Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRW (MOVBZreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSRW)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh8Ux32 <t> x y)
+ // result: (LOCGR {s390x.GreaterOrEqual} <t> (SRW <t> (MOVBZreg x) y) (MOVDconst [0]) (CMPWUconst y [64]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Type = t
+ v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XSRW, t)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v3.AuxInt = int32ToAuxInt(64)
+ v3.AddArg(y)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh8Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRW (MOVBZreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSRW)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh8Ux64 <t> x y)
+ // result: (LOCGR {s390x.GreaterOrEqual} <t> (SRW <t> (MOVBZreg x) y) (MOVDconst [0]) (CMPUconst y [64]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Type = t
+ v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XSRW, t)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpS390XCMPUconst, types.TypeFlags)
+ v3.AuxInt = int32ToAuxInt(64)
+ v3.AddArg(y)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh8Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRW (MOVBZreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSRW)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh8Ux8 <t> x y)
+ // result: (LOCGR {s390x.GreaterOrEqual} <t> (SRW <t> (MOVBZreg x) y) (MOVDconst [0]) (CMPWUconst (MOVBZreg y) [64]))
+ for {
+ t := v.Type
+ x := v_0
+ y := v_1
+ v.reset(OpS390XLOCGR)
+ v.Type = t
+ v.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v0 := b.NewValue0(v.Pos, OpS390XSRW, t)
+ v1 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
+ v1.AddArg(x)
+ v0.AddArg2(v1, y)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v3.AuxInt = int32ToAuxInt(64)
+ v4 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v.AddArg3(v0, v2, v3)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh8x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x16 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAW (MOVBreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSRAW)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh8x16 x y)
+ // result: (SRAW (MOVBreg x) (LOCGR {s390x.GreaterOrEqual} <y.Type> y (MOVDconst <y.Type> [63]) (CMPWUconst (MOVHZreg y) [64])))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XSRAW)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpS390XLOCGR, y.Type)
+ v1.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVDconst, y.Type)
+ v2.AuxInt = int64ToAuxInt(63)
+ v3 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v3.AuxInt = int32ToAuxInt(64)
+ v4 := b.NewValue0(v.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v1.AddArg3(y, v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh8x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x32 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAW (MOVBreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSRAW)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh8x32 x y)
+ // result: (SRAW (MOVBreg x) (LOCGR {s390x.GreaterOrEqual} <y.Type> y (MOVDconst <y.Type> [63]) (CMPWUconst y [64])))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XSRAW)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpS390XLOCGR, y.Type)
+ v1.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVDconst, y.Type)
+ v2.AuxInt = int64ToAuxInt(63)
+ v3 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v3.AuxInt = int32ToAuxInt(64)
+ v3.AddArg(y)
+ v1.AddArg3(y, v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh8x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAW (MOVBreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSRAW)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh8x64 x y)
+ // result: (SRAW (MOVBreg x) (LOCGR {s390x.GreaterOrEqual} <y.Type> y (MOVDconst <y.Type> [63]) (CMPUconst y [64])))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XSRAW)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpS390XLOCGR, y.Type)
+ v1.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVDconst, y.Type)
+ v2.AuxInt = int64ToAuxInt(63)
+ v3 := b.NewValue0(v.Pos, OpS390XCMPUconst, types.TypeFlags)
+ v3.AuxInt = int32ToAuxInt(64)
+ v3.AddArg(y)
+ v1.AddArg3(y, v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueS390X_OpRsh8x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x8 x y)
+ // cond: shiftIsBounded(v)
+ // result: (SRAW (MOVBreg x) y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpS390XSRAW)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+ // match: (Rsh8x8 x y)
+ // result: (SRAW (MOVBreg x) (LOCGR {s390x.GreaterOrEqual} <y.Type> y (MOVDconst <y.Type> [63]) (CMPWUconst (MOVBZreg y) [64])))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpS390XSRAW)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVBreg, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpS390XLOCGR, y.Type)
+ v1.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ v2 := b.NewValue0(v.Pos, OpS390XMOVDconst, y.Type)
+ v2.AuxInt = int64ToAuxInt(63)
+ v3 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v3.AuxInt = int32ToAuxInt(64)
+ v4 := b.NewValue0(v.Pos, OpS390XMOVBZreg, typ.UInt64)
+ v4.AddArg(y)
+ v3.AddArg(v4)
+ v1.AddArg3(y, v2, v3)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueS390X_OpS390XADD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ADD x (MOVDconst [c]))
+ // cond: is32Bit(c)
+ // result: (ADDconst [int32(c)] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(is32Bit(c)) {
+ continue
+ }
+ v.reset(OpS390XADDconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ADD (SLDconst x [c]) (SRDconst x [64-c]))
+ // result: (RISBGZ x {s390x.NewRotateParams(0, 63, c)})
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpS390XSLDconst {
+ continue
+ }
+ c := auxIntToUint8(v_0.AuxInt)
+ x := v_0.Args[0]
+ if v_1.Op != OpS390XSRDconst || auxIntToUint8(v_1.AuxInt) != 64-c || x != v_1.Args[0] {
+ continue
+ }
+ v.reset(OpS390XRISBGZ)
+ v.Aux = s390xRotateParamsToAux(s390x.NewRotateParams(0, 63, c))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ADD idx (MOVDaddr [c] {s} ptr))
+ // cond: ptr.Op != OpSB
+ // result: (MOVDaddridx [c] {s} ptr idx)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ idx := v_0
+ if v_1.Op != OpS390XMOVDaddr {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ s := auxToSym(v_1.Aux)
+ ptr := v_1.Args[0]
+ if !(ptr.Op != OpSB) {
+ continue
+ }
+ v.reset(OpS390XMOVDaddridx)
+ v.AuxInt = int32ToAuxInt(c)
+ v.Aux = symToAux(s)
+ v.AddArg2(ptr, idx)
+ return true
+ }
+ break
+ }
+ // match: (ADD x (NEG y))
+ // result: (SUB x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpS390XNEG {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSUB)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADD <t> x g:(MOVDload [off] {sym} ptr mem))
+ // cond: ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)
+ // result: (ADDload <t> [off] {sym} x ptr mem)
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ g := v_1
+ if g.Op != OpS390XMOVDload {
+ continue
+ }
+ off := auxIntToInt32(g.AuxInt)
+ sym := auxToSym(g.Aux)
+ mem := g.Args[1]
+ ptr := g.Args[0]
+ if !(ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)) {
+ continue
+ }
+ v.reset(OpS390XADDload)
+ v.Type = t
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XADDC(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ADDC x (MOVDconst [c]))
+ // cond: is16Bit(c)
+ // result: (ADDCconst x [int16(c)])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(is16Bit(c)) {
+ continue
+ }
+ v.reset(OpS390XADDCconst)
+ v.AuxInt = int16ToAuxInt(int16(c))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XADDE(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ADDE x y (FlagEQ))
+ // result: (ADDC x y)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpS390XFlagEQ {
+ break
+ }
+ v.reset(OpS390XADDC)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (ADDE x y (FlagLT))
+ // result: (ADDC x y)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpS390XFlagLT {
+ break
+ }
+ v.reset(OpS390XADDC)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (ADDE x y (Select1 (ADDCconst [-1] (Select0 (ADDE (MOVDconst [0]) (MOVDconst [0]) c)))))
+ // result: (ADDE x y c)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpSelect1 {
+ break
+ }
+ v_2_0 := v_2.Args[0]
+ if v_2_0.Op != OpS390XADDCconst || auxIntToInt16(v_2_0.AuxInt) != -1 {
+ break
+ }
+ v_2_0_0 := v_2_0.Args[0]
+ if v_2_0_0.Op != OpSelect0 {
+ break
+ }
+ v_2_0_0_0 := v_2_0_0.Args[0]
+ if v_2_0_0_0.Op != OpS390XADDE {
+ break
+ }
+ c := v_2_0_0_0.Args[2]
+ v_2_0_0_0_0 := v_2_0_0_0.Args[0]
+ if v_2_0_0_0_0.Op != OpS390XMOVDconst || auxIntToInt64(v_2_0_0_0_0.AuxInt) != 0 {
+ break
+ }
+ v_2_0_0_0_1 := v_2_0_0_0.Args[1]
+ if v_2_0_0_0_1.Op != OpS390XMOVDconst || auxIntToInt64(v_2_0_0_0_1.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpS390XADDE)
+ v.AddArg3(x, y, c)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XADDW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ADDW x (MOVDconst [c]))
+ // result: (ADDWconst [int32(c)] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpS390XADDWconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ADDW (SLWconst x [c]) (SRWconst x [32-c]))
+ // result: (RLLconst x [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpS390XSLWconst {
+ continue
+ }
+ c := auxIntToUint8(v_0.AuxInt)
+ x := v_0.Args[0]
+ if v_1.Op != OpS390XSRWconst || auxIntToUint8(v_1.AuxInt) != 32-c || x != v_1.Args[0] {
+ continue
+ }
+ v.reset(OpS390XRLLconst)
+ v.AuxInt = uint8ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ADDW x (NEGW y))
+ // result: (SUBW x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpS390XNEGW {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSUBW)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (ADDW <t> x g:(MOVWload [off] {sym} ptr mem))
+ // cond: ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)
+ // result: (ADDWload <t> [off] {sym} x ptr mem)
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ g := v_1
+ if g.Op != OpS390XMOVWload {
+ continue
+ }
+ off := auxIntToInt32(g.AuxInt)
+ sym := auxToSym(g.Aux)
+ mem := g.Args[1]
+ ptr := g.Args[0]
+ if !(ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)) {
+ continue
+ }
+ v.reset(OpS390XADDWload)
+ v.Type = t
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ // match: (ADDW <t> x g:(MOVWZload [off] {sym} ptr mem))
+ // cond: ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)
+ // result: (ADDWload <t> [off] {sym} x ptr mem)
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ g := v_1
+ if g.Op != OpS390XMOVWZload {
+ continue
+ }
+ off := auxIntToInt32(g.AuxInt)
+ sym := auxToSym(g.Aux)
+ mem := g.Args[1]
+ ptr := g.Args[0]
+ if !(ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)) {
+ continue
+ }
+ v.reset(OpS390XADDWload)
+ v.Type = t
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XADDWconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ADDWconst [c] x)
+ // cond: int32(c)==0
+ // result: x
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(int32(c) == 0) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (ADDWconst [c] (MOVDconst [d]))
+ // result: (MOVDconst [int64(c)+d])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(c) + d)
+ return true
+ }
+ // match: (ADDWconst [c] (ADDWconst [d] x))
+ // result: (ADDWconst [int32(c+d)] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XADDWconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpS390XADDWconst)
+ v.AuxInt = int32ToAuxInt(int32(c + d))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XADDWload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ADDWload [off1] {sym} x (ADDconst [off2] ptr) mem)
+ // cond: ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2))
+ // result: (ADDWload [off1+off2] {sym} x ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpS390XADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ ptr := v_1.Args[0]
+ mem := v_2
+ if !(ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2))) {
+ break
+ }
+ v.reset(OpS390XADDWload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ // match: (ADDWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem)
+ // cond: ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)
+ // result: (ADDWload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
+ for {
+ o1 := auxIntToInt32(v.AuxInt)
+ s1 := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpS390XMOVDaddr {
+ break
+ }
+ o2 := auxIntToInt32(v_1.AuxInt)
+ s2 := auxToSym(v_1.Aux)
+ ptr := v_1.Args[0]
+ mem := v_2
+ if !(ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)) {
+ break
+ }
+ v.reset(OpS390XADDWload)
+ v.AuxInt = int32ToAuxInt(o1 + o2)
+ v.Aux = symToAux(mergeSym(s1, s2))
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XADDconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ADDconst [c] (MOVDaddr [d] {s} x:(SB)))
+ // cond: ((c+d)&1 == 0) && is32Bit(int64(c)+int64(d))
+ // result: (MOVDaddr [c+d] {s} x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVDaddr {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ s := auxToSym(v_0.Aux)
+ x := v_0.Args[0]
+ if x.Op != OpSB || !(((c+d)&1 == 0) && is32Bit(int64(c)+int64(d))) {
+ break
+ }
+ v.reset(OpS390XMOVDaddr)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDconst [c] (MOVDaddr [d] {s} x))
+ // cond: x.Op != OpSB && is20Bit(int64(c)+int64(d))
+ // result: (MOVDaddr [c+d] {s} x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVDaddr {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ s := auxToSym(v_0.Aux)
+ x := v_0.Args[0]
+ if !(x.Op != OpSB && is20Bit(int64(c)+int64(d))) {
+ break
+ }
+ v.reset(OpS390XMOVDaddr)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ADDconst [c] (MOVDaddridx [d] {s} x y))
+ // cond: is20Bit(int64(c)+int64(d))
+ // result: (MOVDaddridx [c+d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVDaddridx {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ s := auxToSym(v_0.Aux)
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if !(is20Bit(int64(c) + int64(d))) {
+ break
+ }
+ v.reset(OpS390XMOVDaddridx)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (ADDconst [0] x)
+ // result: x
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (ADDconst [c] (MOVDconst [d]))
+ // result: (MOVDconst [int64(c)+d])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(c) + d)
+ return true
+ }
+ // match: (ADDconst [c] (ADDconst [d] x))
+ // cond: is32Bit(int64(c)+int64(d))
+ // result: (ADDconst [c+d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XADDconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(is32Bit(int64(c) + int64(d))) {
+ break
+ }
+ v.reset(OpS390XADDconst)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XADDload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ADDload <t> [off] {sym} x ptr1 (FMOVDstore [off] {sym} ptr2 y _))
+ // cond: isSamePtr(ptr1, ptr2)
+ // result: (ADD x (LGDR <t> y))
+ for {
+ t := v.Type
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ ptr1 := v_1
+ if v_2.Op != OpS390XFMOVDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
+ break
+ }
+ y := v_2.Args[1]
+ ptr2 := v_2.Args[0]
+ if !(isSamePtr(ptr1, ptr2)) {
+ break
+ }
+ v.reset(OpS390XADD)
+ v0 := b.NewValue0(v_2.Pos, OpS390XLGDR, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (ADDload [off1] {sym} x (ADDconst [off2] ptr) mem)
+ // cond: ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2))
+ // result: (ADDload [off1+off2] {sym} x ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpS390XADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ ptr := v_1.Args[0]
+ mem := v_2
+ if !(ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2))) {
+ break
+ }
+ v.reset(OpS390XADDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ // match: (ADDload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem)
+ // cond: ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)
+ // result: (ADDload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
+ for {
+ o1 := auxIntToInt32(v.AuxInt)
+ s1 := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpS390XMOVDaddr {
+ break
+ }
+ o2 := auxIntToInt32(v_1.AuxInt)
+ s2 := auxToSym(v_1.Aux)
+ ptr := v_1.Args[0]
+ mem := v_2
+ if !(ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)) {
+ break
+ }
+ v.reset(OpS390XADDload)
+ v.AuxInt = int32ToAuxInt(o1 + o2)
+ v.Aux = symToAux(mergeSym(s1, s2))
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XAND(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (AND x (MOVDconst [c]))
+ // cond: s390x.NewRotateParams(0, 63, 0).OutMerge(uint64(c)) != nil
+ // result: (RISBGZ x {*s390x.NewRotateParams(0, 63, 0).OutMerge(uint64(c))})
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(s390x.NewRotateParams(0, 63, 0).OutMerge(uint64(c)) != nil) {
+ continue
+ }
+ v.reset(OpS390XRISBGZ)
+ v.Aux = s390xRotateParamsToAux(*s390x.NewRotateParams(0, 63, 0).OutMerge(uint64(c)))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (AND x (MOVDconst [c]))
+ // cond: is32Bit(c) && c < 0
+ // result: (ANDconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(is32Bit(c) && c < 0) {
+ continue
+ }
+ v.reset(OpS390XANDconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (AND x (MOVDconst [c]))
+ // cond: is32Bit(c) && c >= 0
+ // result: (MOVWZreg (ANDWconst <typ.UInt32> [int32(c)] x))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(is32Bit(c) && c >= 0) {
+ continue
+ }
+ v.reset(OpS390XMOVWZreg)
+ v0 := b.NewValue0(v.Pos, OpS390XANDWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(int32(c))
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (AND (MOVDconst [c]) (MOVDconst [d]))
+ // result: (MOVDconst [c&d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpS390XMOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpS390XMOVDconst {
+ continue
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(c & d)
+ return true
+ }
+ break
+ }
+ // match: (AND x x)
+ // result: x
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (AND <t> x g:(MOVDload [off] {sym} ptr mem))
+ // cond: ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)
+ // result: (ANDload <t> [off] {sym} x ptr mem)
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ g := v_1
+ if g.Op != OpS390XMOVDload {
+ continue
+ }
+ off := auxIntToInt32(g.AuxInt)
+ sym := auxToSym(g.Aux)
+ mem := g.Args[1]
+ ptr := g.Args[0]
+ if !(ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)) {
+ continue
+ }
+ v.reset(OpS390XANDload)
+ v.Type = t
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XANDW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ANDW x (MOVDconst [c]))
+ // result: (ANDWconst [int32(c)] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpS390XANDWconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ANDW x x)
+ // result: x
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (ANDW <t> x g:(MOVWload [off] {sym} ptr mem))
+ // cond: ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)
+ // result: (ANDWload <t> [off] {sym} x ptr mem)
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ g := v_1
+ if g.Op != OpS390XMOVWload {
+ continue
+ }
+ off := auxIntToInt32(g.AuxInt)
+ sym := auxToSym(g.Aux)
+ mem := g.Args[1]
+ ptr := g.Args[0]
+ if !(ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)) {
+ continue
+ }
+ v.reset(OpS390XANDWload)
+ v.Type = t
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ // match: (ANDW <t> x g:(MOVWZload [off] {sym} ptr mem))
+ // cond: ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)
+ // result: (ANDWload <t> [off] {sym} x ptr mem)
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ g := v_1
+ if g.Op != OpS390XMOVWZload {
+ continue
+ }
+ off := auxIntToInt32(g.AuxInt)
+ sym := auxToSym(g.Aux)
+ mem := g.Args[1]
+ ptr := g.Args[0]
+ if !(ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)) {
+ continue
+ }
+ v.reset(OpS390XANDWload)
+ v.Type = t
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XANDWconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ANDWconst [c] (ANDWconst [d] x))
+ // result: (ANDWconst [c&d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XANDWconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpS390XANDWconst)
+ v.AuxInt = int32ToAuxInt(c & d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDWconst [0x00ff] x)
+ // result: (MOVBZreg x)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0x00ff {
+ break
+ }
+ x := v_0
+ v.reset(OpS390XMOVBZreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDWconst [0xffff] x)
+ // result: (MOVHZreg x)
+ for {
+ if auxIntToInt32(v.AuxInt) != 0xffff {
+ break
+ }
+ x := v_0
+ v.reset(OpS390XMOVHZreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDWconst [c] _)
+ // cond: int32(c)==0
+ // result: (MOVDconst [0])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if !(int32(c) == 0) {
+ break
+ }
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (ANDWconst [c] x)
+ // cond: int32(c)==-1
+ // result: x
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(int32(c) == -1) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (ANDWconst [c] (MOVDconst [d]))
+ // result: (MOVDconst [int64(c)&d])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(c) & d)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XANDWload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ANDWload [off1] {sym} x (ADDconst [off2] ptr) mem)
+ // cond: ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2))
+ // result: (ANDWload [off1+off2] {sym} x ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpS390XADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ ptr := v_1.Args[0]
+ mem := v_2
+ if !(ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2))) {
+ break
+ }
+ v.reset(OpS390XANDWload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ // match: (ANDWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem)
+ // cond: ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)
+ // result: (ANDWload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
+ for {
+ o1 := auxIntToInt32(v.AuxInt)
+ s1 := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpS390XMOVDaddr {
+ break
+ }
+ o2 := auxIntToInt32(v_1.AuxInt)
+ s2 := auxToSym(v_1.Aux)
+ ptr := v_1.Args[0]
+ mem := v_2
+ if !(ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)) {
+ break
+ }
+ v.reset(OpS390XANDWload)
+ v.AuxInt = int32ToAuxInt(o1 + o2)
+ v.Aux = symToAux(mergeSym(s1, s2))
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XANDconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ANDconst [c] (ANDconst [d] x))
+ // result: (ANDconst [c&d] x)
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpS390XANDconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpS390XANDconst)
+ v.AuxInt = int64ToAuxInt(c & d)
+ v.AddArg(x)
+ return true
+ }
+ // match: (ANDconst [0] _)
+ // result: (MOVDconst [0])
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (ANDconst [-1] x)
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != -1 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (ANDconst [c] (MOVDconst [d]))
+ // result: (MOVDconst [c&d])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(c & d)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XANDload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ANDload <t> [off] {sym} x ptr1 (FMOVDstore [off] {sym} ptr2 y _))
+ // cond: isSamePtr(ptr1, ptr2)
+ // result: (AND x (LGDR <t> y))
+ for {
+ t := v.Type
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ ptr1 := v_1
+ if v_2.Op != OpS390XFMOVDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
+ break
+ }
+ y := v_2.Args[1]
+ ptr2 := v_2.Args[0]
+ if !(isSamePtr(ptr1, ptr2)) {
+ break
+ }
+ v.reset(OpS390XAND)
+ v0 := b.NewValue0(v_2.Pos, OpS390XLGDR, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (ANDload [off1] {sym} x (ADDconst [off2] ptr) mem)
+ // cond: ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2))
+ // result: (ANDload [off1+off2] {sym} x ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpS390XADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ ptr := v_1.Args[0]
+ mem := v_2
+ if !(ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2))) {
+ break
+ }
+ v.reset(OpS390XANDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ // match: (ANDload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem)
+ // cond: ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)
+ // result: (ANDload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
+ for {
+ o1 := auxIntToInt32(v.AuxInt)
+ s1 := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpS390XMOVDaddr {
+ break
+ }
+ o2 := auxIntToInt32(v_1.AuxInt)
+ s2 := auxToSym(v_1.Aux)
+ ptr := v_1.Args[0]
+ mem := v_2
+ if !(ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)) {
+ break
+ }
+ v.reset(OpS390XANDload)
+ v.AuxInt = int32ToAuxInt(o1 + o2)
+ v.Aux = symToAux(mergeSym(s1, s2))
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XCMP(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMP x (MOVDconst [c]))
+ // cond: is32Bit(c)
+ // result: (CMPconst x [int32(c)])
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpS390XCMPconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMP (MOVDconst [c]) x)
+ // cond: is32Bit(c)
+ // result: (InvertFlags (CMPconst x [int32(c)]))
+ for {
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpS390XInvertFlags)
+ v0 := b.NewValue0(v.Pos, OpS390XCMPconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(int32(c))
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMP x y)
+ // cond: canonLessThan(x,y)
+ // result: (InvertFlags (CMP y x))
+ for {
+ x := v_0
+ y := v_1
+ if !(canonLessThan(x, y)) {
+ break
+ }
+ v.reset(OpS390XInvertFlags)
+ v0 := b.NewValue0(v.Pos, OpS390XCMP, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XCMPU(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMPU x (MOVDconst [c]))
+ // cond: isU32Bit(c)
+ // result: (CMPUconst x [int32(c)])
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(isU32Bit(c)) {
+ break
+ }
+ v.reset(OpS390XCMPUconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPU (MOVDconst [c]) x)
+ // cond: isU32Bit(c)
+ // result: (InvertFlags (CMPUconst x [int32(c)]))
+ for {
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ if !(isU32Bit(c)) {
+ break
+ }
+ v.reset(OpS390XInvertFlags)
+ v0 := b.NewValue0(v.Pos, OpS390XCMPUconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(int32(c))
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPU x y)
+ // cond: canonLessThan(x,y)
+ // result: (InvertFlags (CMPU y x))
+ for {
+ x := v_0
+ y := v_1
+ if !(canonLessThan(x, y)) {
+ break
+ }
+ v.reset(OpS390XInvertFlags)
+ v0 := b.NewValue0(v.Pos, OpS390XCMPU, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XCMPUconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (CMPUconst (MOVDconst [x]) [y])
+ // cond: uint64(x)==uint64(y)
+ // result: (FlagEQ)
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if !(uint64(x) == uint64(y)) {
+ break
+ }
+ v.reset(OpS390XFlagEQ)
+ return true
+ }
+ // match: (CMPUconst (MOVDconst [x]) [y])
+ // cond: uint64(x)<uint64(y)
+ // result: (FlagLT)
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if !(uint64(x) < uint64(y)) {
+ break
+ }
+ v.reset(OpS390XFlagLT)
+ return true
+ }
+ // match: (CMPUconst (MOVDconst [x]) [y])
+ // cond: uint64(x)>uint64(y)
+ // result: (FlagGT)
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if !(uint64(x) > uint64(y)) {
+ break
+ }
+ v.reset(OpS390XFlagGT)
+ return true
+ }
+ // match: (CMPUconst (SRDconst _ [c]) [n])
+ // cond: c > 0 && c < 64 && (1<<uint(64-c)) <= uint64(n)
+ // result: (FlagLT)
+ for {
+ n := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XSRDconst {
+ break
+ }
+ c := auxIntToUint8(v_0.AuxInt)
+ if !(c > 0 && c < 64 && (1<<uint(64-c)) <= uint64(n)) {
+ break
+ }
+ v.reset(OpS390XFlagLT)
+ return true
+ }
+ // match: (CMPUconst (RISBGZ x {r}) [c])
+ // cond: r.OutMask() < uint64(uint32(c))
+ // result: (FlagLT)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XRISBGZ {
+ break
+ }
+ r := auxToS390xRotateParams(v_0.Aux)
+ if !(r.OutMask() < uint64(uint32(c))) {
+ break
+ }
+ v.reset(OpS390XFlagLT)
+ return true
+ }
+ // match: (CMPUconst (MOVWZreg x) [c])
+ // result: (CMPWUconst x [c])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVWZreg {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpS390XCMPWUconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPUconst x:(MOVHreg _) [c])
+ // result: (CMPWUconst x [c])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if x.Op != OpS390XMOVHreg {
+ break
+ }
+ v.reset(OpS390XCMPWUconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPUconst x:(MOVHZreg _) [c])
+ // result: (CMPWUconst x [c])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if x.Op != OpS390XMOVHZreg {
+ break
+ }
+ v.reset(OpS390XCMPWUconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPUconst x:(MOVBreg _) [c])
+ // result: (CMPWUconst x [c])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if x.Op != OpS390XMOVBreg {
+ break
+ }
+ v.reset(OpS390XCMPWUconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPUconst x:(MOVBZreg _) [c])
+ // result: (CMPWUconst x [c])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if x.Op != OpS390XMOVBZreg {
+ break
+ }
+ v.reset(OpS390XCMPWUconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPUconst (MOVWZreg x:(ANDWconst [m] _)) [c])
+ // cond: int32(m) >= 0
+ // result: (CMPWUconst x [c])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVWZreg {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpS390XANDWconst {
+ break
+ }
+ m := auxIntToInt32(x.AuxInt)
+ if !(int32(m) >= 0) {
+ break
+ }
+ v.reset(OpS390XCMPWUconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPUconst (MOVWreg x:(ANDWconst [m] _)) [c])
+ // cond: int32(m) >= 0
+ // result: (CMPWUconst x [c])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVWreg {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpS390XANDWconst {
+ break
+ }
+ m := auxIntToInt32(x.AuxInt)
+ if !(int32(m) >= 0) {
+ break
+ }
+ v.reset(OpS390XCMPWUconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XCMPW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMPW x (MOVDconst [c]))
+ // result: (CMPWconst x [int32(c)])
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpS390XCMPWconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPW (MOVDconst [c]) x)
+ // result: (InvertFlags (CMPWconst x [int32(c)]))
+ for {
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ v.reset(OpS390XInvertFlags)
+ v0 := b.NewValue0(v.Pos, OpS390XCMPWconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(int32(c))
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPW x y)
+ // cond: canonLessThan(x,y)
+ // result: (InvertFlags (CMPW y x))
+ for {
+ x := v_0
+ y := v_1
+ if !(canonLessThan(x, y)) {
+ break
+ }
+ v.reset(OpS390XInvertFlags)
+ v0 := b.NewValue0(v.Pos, OpS390XCMPW, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPW x (MOVWreg y))
+ // result: (CMPW x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVWreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XCMPW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (CMPW x (MOVWZreg y))
+ // result: (CMPW x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVWZreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XCMPW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (CMPW (MOVWreg x) y)
+ // result: (CMPW x y)
+ for {
+ if v_0.Op != OpS390XMOVWreg {
+ break
+ }
+ x := v_0.Args[0]
+ y := v_1
+ v.reset(OpS390XCMPW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (CMPW (MOVWZreg x) y)
+ // result: (CMPW x y)
+ for {
+ if v_0.Op != OpS390XMOVWZreg {
+ break
+ }
+ x := v_0.Args[0]
+ y := v_1
+ v.reset(OpS390XCMPW)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XCMPWU(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (CMPWU x (MOVDconst [c]))
+ // result: (CMPWUconst x [int32(c)])
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpS390XCMPWUconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPWU (MOVDconst [c]) x)
+ // result: (InvertFlags (CMPWUconst x [int32(c)]))
+ for {
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ v.reset(OpS390XInvertFlags)
+ v0 := b.NewValue0(v.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(int32(c))
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPWU x y)
+ // cond: canonLessThan(x,y)
+ // result: (InvertFlags (CMPWU y x))
+ for {
+ x := v_0
+ y := v_1
+ if !(canonLessThan(x, y)) {
+ break
+ }
+ v.reset(OpS390XInvertFlags)
+ v0 := b.NewValue0(v.Pos, OpS390XCMPWU, types.TypeFlags)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (CMPWU x (MOVWreg y))
+ // result: (CMPWU x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVWreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XCMPWU)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (CMPWU x (MOVWZreg y))
+ // result: (CMPWU x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVWZreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XCMPWU)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (CMPWU (MOVWreg x) y)
+ // result: (CMPWU x y)
+ for {
+ if v_0.Op != OpS390XMOVWreg {
+ break
+ }
+ x := v_0.Args[0]
+ y := v_1
+ v.reset(OpS390XCMPWU)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (CMPWU (MOVWZreg x) y)
+ // result: (CMPWU x y)
+ for {
+ if v_0.Op != OpS390XMOVWZreg {
+ break
+ }
+ x := v_0.Args[0]
+ y := v_1
+ v.reset(OpS390XCMPWU)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XCMPWUconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (CMPWUconst (MOVDconst [x]) [y])
+ // cond: uint32(x)==uint32(y)
+ // result: (FlagEQ)
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if !(uint32(x) == uint32(y)) {
+ break
+ }
+ v.reset(OpS390XFlagEQ)
+ return true
+ }
+ // match: (CMPWUconst (MOVDconst [x]) [y])
+ // cond: uint32(x)<uint32(y)
+ // result: (FlagLT)
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if !(uint32(x) < uint32(y)) {
+ break
+ }
+ v.reset(OpS390XFlagLT)
+ return true
+ }
+ // match: (CMPWUconst (MOVDconst [x]) [y])
+ // cond: uint32(x)>uint32(y)
+ // result: (FlagGT)
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if !(uint32(x) > uint32(y)) {
+ break
+ }
+ v.reset(OpS390XFlagGT)
+ return true
+ }
+ // match: (CMPWUconst (MOVBZreg _) [c])
+ // cond: 0xff < c
+ // result: (FlagLT)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVBZreg || !(0xff < c) {
+ break
+ }
+ v.reset(OpS390XFlagLT)
+ return true
+ }
+ // match: (CMPWUconst (MOVHZreg _) [c])
+ // cond: 0xffff < c
+ // result: (FlagLT)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVHZreg || !(0xffff < c) {
+ break
+ }
+ v.reset(OpS390XFlagLT)
+ return true
+ }
+ // match: (CMPWUconst (SRWconst _ [c]) [n])
+ // cond: c > 0 && c < 32 && (1<<uint(32-c)) <= uint32(n)
+ // result: (FlagLT)
+ for {
+ n := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XSRWconst {
+ break
+ }
+ c := auxIntToUint8(v_0.AuxInt)
+ if !(c > 0 && c < 32 && (1<<uint(32-c)) <= uint32(n)) {
+ break
+ }
+ v.reset(OpS390XFlagLT)
+ return true
+ }
+ // match: (CMPWUconst (ANDWconst _ [m]) [n])
+ // cond: uint32(m) < uint32(n)
+ // result: (FlagLT)
+ for {
+ n := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XANDWconst {
+ break
+ }
+ m := auxIntToInt32(v_0.AuxInt)
+ if !(uint32(m) < uint32(n)) {
+ break
+ }
+ v.reset(OpS390XFlagLT)
+ return true
+ }
+ // match: (CMPWUconst (MOVWreg x) [c])
+ // result: (CMPWUconst x [c])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVWreg {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpS390XCMPWUconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPWUconst (MOVWZreg x) [c])
+ // result: (CMPWUconst x [c])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVWZreg {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpS390XCMPWUconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XCMPWconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (CMPWconst (MOVDconst [x]) [y])
+ // cond: int32(x)==int32(y)
+ // result: (FlagEQ)
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if !(int32(x) == int32(y)) {
+ break
+ }
+ v.reset(OpS390XFlagEQ)
+ return true
+ }
+ // match: (CMPWconst (MOVDconst [x]) [y])
+ // cond: int32(x)<int32(y)
+ // result: (FlagLT)
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if !(int32(x) < int32(y)) {
+ break
+ }
+ v.reset(OpS390XFlagLT)
+ return true
+ }
+ // match: (CMPWconst (MOVDconst [x]) [y])
+ // cond: int32(x)>int32(y)
+ // result: (FlagGT)
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if !(int32(x) > int32(y)) {
+ break
+ }
+ v.reset(OpS390XFlagGT)
+ return true
+ }
+ // match: (CMPWconst (MOVBZreg _) [c])
+ // cond: 0xff < c
+ // result: (FlagLT)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVBZreg || !(0xff < c) {
+ break
+ }
+ v.reset(OpS390XFlagLT)
+ return true
+ }
+ // match: (CMPWconst (MOVHZreg _) [c])
+ // cond: 0xffff < c
+ // result: (FlagLT)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVHZreg || !(0xffff < c) {
+ break
+ }
+ v.reset(OpS390XFlagLT)
+ return true
+ }
+ // match: (CMPWconst (SRWconst _ [c]) [n])
+ // cond: c > 0 && n < 0
+ // result: (FlagGT)
+ for {
+ n := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XSRWconst {
+ break
+ }
+ c := auxIntToUint8(v_0.AuxInt)
+ if !(c > 0 && n < 0) {
+ break
+ }
+ v.reset(OpS390XFlagGT)
+ return true
+ }
+ // match: (CMPWconst (ANDWconst _ [m]) [n])
+ // cond: int32(m) >= 0 && int32(m) < int32(n)
+ // result: (FlagLT)
+ for {
+ n := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XANDWconst {
+ break
+ }
+ m := auxIntToInt32(v_0.AuxInt)
+ if !(int32(m) >= 0 && int32(m) < int32(n)) {
+ break
+ }
+ v.reset(OpS390XFlagLT)
+ return true
+ }
+ // match: (CMPWconst x:(SRWconst _ [c]) [n])
+ // cond: c > 0 && n >= 0
+ // result: (CMPWUconst x [n])
+ for {
+ n := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if x.Op != OpS390XSRWconst {
+ break
+ }
+ c := auxIntToUint8(x.AuxInt)
+ if !(c > 0 && n >= 0) {
+ break
+ }
+ v.reset(OpS390XCMPWUconst)
+ v.AuxInt = int32ToAuxInt(n)
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPWconst (MOVWreg x) [c])
+ // result: (CMPWconst x [c])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVWreg {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpS390XCMPWconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPWconst (MOVWZreg x) [c])
+ // result: (CMPWconst x [c])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVWZreg {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpS390XCMPWconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XCMPconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (CMPconst (MOVDconst [x]) [y])
+ // cond: x==int64(y)
+ // result: (FlagEQ)
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if !(x == int64(y)) {
+ break
+ }
+ v.reset(OpS390XFlagEQ)
+ return true
+ }
+ // match: (CMPconst (MOVDconst [x]) [y])
+ // cond: x<int64(y)
+ // result: (FlagLT)
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if !(x < int64(y)) {
+ break
+ }
+ v.reset(OpS390XFlagLT)
+ return true
+ }
+ // match: (CMPconst (MOVDconst [x]) [y])
+ // cond: x>int64(y)
+ // result: (FlagGT)
+ for {
+ y := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if !(x > int64(y)) {
+ break
+ }
+ v.reset(OpS390XFlagGT)
+ return true
+ }
+ // match: (CMPconst (SRDconst _ [c]) [n])
+ // cond: c > 0 && n < 0
+ // result: (FlagGT)
+ for {
+ n := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XSRDconst {
+ break
+ }
+ c := auxIntToUint8(v_0.AuxInt)
+ if !(c > 0 && n < 0) {
+ break
+ }
+ v.reset(OpS390XFlagGT)
+ return true
+ }
+ // match: (CMPconst (RISBGZ x {r}) [c])
+ // cond: c > 0 && r.OutMask() < uint64(c)
+ // result: (FlagLT)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XRISBGZ {
+ break
+ }
+ r := auxToS390xRotateParams(v_0.Aux)
+ if !(c > 0 && r.OutMask() < uint64(c)) {
+ break
+ }
+ v.reset(OpS390XFlagLT)
+ return true
+ }
+ // match: (CMPconst (MOVWreg x) [c])
+ // result: (CMPWconst x [c])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVWreg {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpS390XCMPWconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPconst x:(MOVHreg _) [c])
+ // result: (CMPWconst x [c])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if x.Op != OpS390XMOVHreg {
+ break
+ }
+ v.reset(OpS390XCMPWconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPconst x:(MOVHZreg _) [c])
+ // result: (CMPWconst x [c])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if x.Op != OpS390XMOVHZreg {
+ break
+ }
+ v.reset(OpS390XCMPWconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPconst x:(MOVBreg _) [c])
+ // result: (CMPWconst x [c])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if x.Op != OpS390XMOVBreg {
+ break
+ }
+ v.reset(OpS390XCMPWconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPconst x:(MOVBZreg _) [c])
+ // result: (CMPWconst x [c])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if x.Op != OpS390XMOVBZreg {
+ break
+ }
+ v.reset(OpS390XCMPWconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPconst (MOVWZreg x:(ANDWconst [m] _)) [c])
+ // cond: int32(m) >= 0 && c >= 0
+ // result: (CMPWUconst x [c])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVWZreg {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpS390XANDWconst {
+ break
+ }
+ m := auxIntToInt32(x.AuxInt)
+ if !(int32(m) >= 0 && c >= 0) {
+ break
+ }
+ v.reset(OpS390XCMPWUconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPconst (MOVWreg x:(ANDWconst [m] _)) [c])
+ // cond: int32(m) >= 0 && c >= 0
+ // result: (CMPWUconst x [c])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVWreg {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpS390XANDWconst {
+ break
+ }
+ m := auxIntToInt32(x.AuxInt)
+ if !(int32(m) >= 0 && c >= 0) {
+ break
+ }
+ v.reset(OpS390XCMPWUconst)
+ v.AuxInt = int32ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (CMPconst x:(SRDconst _ [c]) [n])
+ // cond: c > 0 && n >= 0
+ // result: (CMPUconst x [n])
+ for {
+ n := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if x.Op != OpS390XSRDconst {
+ break
+ }
+ c := auxIntToUint8(x.AuxInt)
+ if !(c > 0 && n >= 0) {
+ break
+ }
+ v.reset(OpS390XCMPUconst)
+ v.AuxInt = int32ToAuxInt(n)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XCPSDR(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (CPSDR y (FMOVDconst [c]))
+ // cond: !math.Signbit(c)
+ // result: (LPDFR y)
+ for {
+ y := v_0
+ if v_1.Op != OpS390XFMOVDconst {
+ break
+ }
+ c := auxIntToFloat64(v_1.AuxInt)
+ if !(!math.Signbit(c)) {
+ break
+ }
+ v.reset(OpS390XLPDFR)
+ v.AddArg(y)
+ return true
+ }
+ // match: (CPSDR y (FMOVDconst [c]))
+ // cond: math.Signbit(c)
+ // result: (LNDFR y)
+ for {
+ y := v_0
+ if v_1.Op != OpS390XFMOVDconst {
+ break
+ }
+ c := auxIntToFloat64(v_1.AuxInt)
+ if !(math.Signbit(c)) {
+ break
+ }
+ v.reset(OpS390XLNDFR)
+ v.AddArg(y)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XFCMP(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (FCMP x (FMOVDconst [0.0]))
+ // result: (LTDBR x)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XFMOVDconst || auxIntToFloat64(v_1.AuxInt) != 0.0 {
+ break
+ }
+ v.reset(OpS390XLTDBR)
+ v.AddArg(x)
+ return true
+ }
+ // match: (FCMP (FMOVDconst [0.0]) x)
+ // result: (InvertFlags (LTDBR <v.Type> x))
+ for {
+ if v_0.Op != OpS390XFMOVDconst || auxIntToFloat64(v_0.AuxInt) != 0.0 {
+ break
+ }
+ x := v_1
+ v.reset(OpS390XInvertFlags)
+ v0 := b.NewValue0(v.Pos, OpS390XLTDBR, v.Type)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XFCMPS(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (FCMPS x (FMOVSconst [0.0]))
+ // result: (LTEBR x)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XFMOVSconst || auxIntToFloat32(v_1.AuxInt) != 0.0 {
+ break
+ }
+ v.reset(OpS390XLTEBR)
+ v.AddArg(x)
+ return true
+ }
+ // match: (FCMPS (FMOVSconst [0.0]) x)
+ // result: (InvertFlags (LTEBR <v.Type> x))
+ for {
+ if v_0.Op != OpS390XFMOVSconst || auxIntToFloat32(v_0.AuxInt) != 0.0 {
+ break
+ }
+ x := v_1
+ v.reset(OpS390XInvertFlags)
+ v0 := b.NewValue0(v.Pos, OpS390XLTEBR, v.Type)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XFMOVDload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FMOVDload [off] {sym} ptr1 (MOVDstore [off] {sym} ptr2 x _))
+ // cond: isSamePtr(ptr1, ptr2)
+ // result: (LDGR x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr1 := v_0
+ if v_1.Op != OpS390XMOVDstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
+ break
+ }
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(isSamePtr(ptr1, ptr2)) {
+ break
+ }
+ v.reset(OpS390XLDGR)
+ v.AddArg(x)
+ return true
+ }
+ // match: (FMOVDload [off] {sym} ptr1 (FMOVDstore [off] {sym} ptr2 x _))
+ // cond: isSamePtr(ptr1, ptr2)
+ // result: x
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr1 := v_0
+ if v_1.Op != OpS390XFMOVDstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
+ break
+ }
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(isSamePtr(ptr1, ptr2)) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (FMOVDload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond: is20Bit(int64(off1)+int64(off2))
+ // result: (FMOVDload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpS390XADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is20Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpS390XFMOVDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (FMOVDload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (FMOVDload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpS390XMOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpS390XFMOVDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XFMOVDstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FMOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+ // cond: is20Bit(int64(off1)+int64(off2))
+ // result: (FMOVDstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpS390XADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is20Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpS390XFMOVDstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (FMOVDstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (FMOVDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpS390XMOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpS390XFMOVDstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XFMOVSload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FMOVSload [off] {sym} ptr1 (FMOVSstore [off] {sym} ptr2 x _))
+ // cond: isSamePtr(ptr1, ptr2)
+ // result: x
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr1 := v_0
+ if v_1.Op != OpS390XFMOVSstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
+ break
+ }
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(isSamePtr(ptr1, ptr2)) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (FMOVSload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond: is20Bit(int64(off1)+int64(off2))
+ // result: (FMOVSload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpS390XADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is20Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpS390XFMOVSload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (FMOVSload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (FMOVSload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpS390XMOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpS390XFMOVSload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XFMOVSstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (FMOVSstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+ // cond: is20Bit(int64(off1)+int64(off2))
+ // result: (FMOVSstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpS390XADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is20Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpS390XFMOVSstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (FMOVSstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (FMOVSstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpS390XMOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpS390XFMOVSstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XFNEG(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (FNEG (LPDFR x))
+ // result: (LNDFR x)
+ for {
+ if v_0.Op != OpS390XLPDFR {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpS390XLNDFR)
+ v.AddArg(x)
+ return true
+ }
+ // match: (FNEG (LNDFR x))
+ // result: (LPDFR x)
+ for {
+ if v_0.Op != OpS390XLNDFR {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpS390XLPDFR)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XFNEGS(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (FNEGS (LPDFR x))
+ // result: (LNDFR x)
+ for {
+ if v_0.Op != OpS390XLPDFR {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpS390XLNDFR)
+ v.AddArg(x)
+ return true
+ }
+ // match: (FNEGS (LNDFR x))
+ // result: (LPDFR x)
+ for {
+ if v_0.Op != OpS390XLNDFR {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpS390XLPDFR)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XLDGR(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (LDGR <t> (RISBGZ x {r}))
+ // cond: r == s390x.NewRotateParams(1, 63, 0)
+ // result: (LPDFR (LDGR <t> x))
+ for {
+ t := v.Type
+ if v_0.Op != OpS390XRISBGZ {
+ break
+ }
+ r := auxToS390xRotateParams(v_0.Aux)
+ x := v_0.Args[0]
+ if !(r == s390x.NewRotateParams(1, 63, 0)) {
+ break
+ }
+ v.reset(OpS390XLPDFR)
+ v0 := b.NewValue0(v.Pos, OpS390XLDGR, t)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (LDGR <t> (OR (MOVDconst [-1<<63]) x))
+ // result: (LNDFR (LDGR <t> x))
+ for {
+ t := v.Type
+ if v_0.Op != OpS390XOR {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0.AuxInt) != -1<<63 {
+ continue
+ }
+ x := v_0_1
+ v.reset(OpS390XLNDFR)
+ v0 := b.NewValue0(v.Pos, OpS390XLDGR, t)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (LDGR <t> x:(ORload <t1> [off] {sym} (MOVDconst [-1<<63]) ptr mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (LNDFR <t> (LDGR <t> (MOVDload <t1> [off] {sym} ptr mem)))
+ for {
+ t := v.Type
+ x := v_0
+ if x.Op != OpS390XORload {
+ break
+ }
+ t1 := x.Type
+ off := auxIntToInt32(x.AuxInt)
+ sym := auxToSym(x.Aux)
+ mem := x.Args[2]
+ x_0 := x.Args[0]
+ if x_0.Op != OpS390XMOVDconst || auxIntToInt64(x_0.AuxInt) != -1<<63 {
+ break
+ }
+ ptr := x.Args[1]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpS390XLNDFR, t)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x.Pos, OpS390XLDGR, t)
+ v2 := b.NewValue0(x.Pos, OpS390XMOVDload, t1)
+ v2.AuxInt = int32ToAuxInt(off)
+ v2.Aux = symToAux(sym)
+ v2.AddArg2(ptr, mem)
+ v1.AddArg(v2)
+ v0.AddArg(v1)
+ return true
+ }
+ // match: (LDGR (LGDR x))
+ // result: x
+ for {
+ if v_0.Op != OpS390XLGDR {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XLEDBR(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (LEDBR (LPDFR (LDEBR x)))
+ // result: (LPDFR x)
+ for {
+ if v_0.Op != OpS390XLPDFR {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpS390XLDEBR {
+ break
+ }
+ x := v_0_0.Args[0]
+ v.reset(OpS390XLPDFR)
+ v.AddArg(x)
+ return true
+ }
+ // match: (LEDBR (LNDFR (LDEBR x)))
+ // result: (LNDFR x)
+ for {
+ if v_0.Op != OpS390XLNDFR {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpS390XLDEBR {
+ break
+ }
+ x := v_0_0.Args[0]
+ v.reset(OpS390XLNDFR)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XLGDR(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (LGDR (LDGR x))
+ // result: x
+ for {
+ if v_0.Op != OpS390XLDGR {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XLOCGR(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (LOCGR {c} x y (InvertFlags cmp))
+ // result: (LOCGR {c.ReverseComparison()} x y cmp)
+ for {
+ c := auxToS390xCCMask(v.Aux)
+ x := v_0
+ y := v_1
+ if v_2.Op != OpS390XInvertFlags {
+ break
+ }
+ cmp := v_2.Args[0]
+ v.reset(OpS390XLOCGR)
+ v.Aux = s390xCCMaskToAux(c.ReverseComparison())
+ v.AddArg3(x, y, cmp)
+ return true
+ }
+ // match: (LOCGR {c} _ x (FlagEQ))
+ // cond: c&s390x.Equal != 0
+ // result: x
+ for {
+ c := auxToS390xCCMask(v.Aux)
+ x := v_1
+ if v_2.Op != OpS390XFlagEQ || !(c&s390x.Equal != 0) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (LOCGR {c} _ x (FlagLT))
+ // cond: c&s390x.Less != 0
+ // result: x
+ for {
+ c := auxToS390xCCMask(v.Aux)
+ x := v_1
+ if v_2.Op != OpS390XFlagLT || !(c&s390x.Less != 0) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (LOCGR {c} _ x (FlagGT))
+ // cond: c&s390x.Greater != 0
+ // result: x
+ for {
+ c := auxToS390xCCMask(v.Aux)
+ x := v_1
+ if v_2.Op != OpS390XFlagGT || !(c&s390x.Greater != 0) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (LOCGR {c} _ x (FlagOV))
+ // cond: c&s390x.Unordered != 0
+ // result: x
+ for {
+ c := auxToS390xCCMask(v.Aux)
+ x := v_1
+ if v_2.Op != OpS390XFlagOV || !(c&s390x.Unordered != 0) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (LOCGR {c} x _ (FlagEQ))
+ // cond: c&s390x.Equal == 0
+ // result: x
+ for {
+ c := auxToS390xCCMask(v.Aux)
+ x := v_0
+ if v_2.Op != OpS390XFlagEQ || !(c&s390x.Equal == 0) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (LOCGR {c} x _ (FlagLT))
+ // cond: c&s390x.Less == 0
+ // result: x
+ for {
+ c := auxToS390xCCMask(v.Aux)
+ x := v_0
+ if v_2.Op != OpS390XFlagLT || !(c&s390x.Less == 0) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (LOCGR {c} x _ (FlagGT))
+ // cond: c&s390x.Greater == 0
+ // result: x
+ for {
+ c := auxToS390xCCMask(v.Aux)
+ x := v_0
+ if v_2.Op != OpS390XFlagGT || !(c&s390x.Greater == 0) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (LOCGR {c} x _ (FlagOV))
+ // cond: c&s390x.Unordered == 0
+ // result: x
+ for {
+ c := auxToS390xCCMask(v.Aux)
+ x := v_0
+ if v_2.Op != OpS390XFlagOV || !(c&s390x.Unordered == 0) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XLTDBR(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (LTDBR (Select0 x:(FADD _ _)))
+ // cond: b == x.Block
+ // result: (Select1 x)
+ for {
+ if v_0.Op != OpSelect0 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpS390XFADD || !(b == x.Block) {
+ break
+ }
+ v.reset(OpSelect1)
+ v.AddArg(x)
+ return true
+ }
+ // match: (LTDBR (Select0 x:(FSUB _ _)))
+ // cond: b == x.Block
+ // result: (Select1 x)
+ for {
+ if v_0.Op != OpSelect0 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpS390XFSUB || !(b == x.Block) {
+ break
+ }
+ v.reset(OpSelect1)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XLTEBR(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (LTEBR (Select0 x:(FADDS _ _)))
+ // cond: b == x.Block
+ // result: (Select1 x)
+ for {
+ if v_0.Op != OpSelect0 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpS390XFADDS || !(b == x.Block) {
+ break
+ }
+ v.reset(OpSelect1)
+ v.AddArg(x)
+ return true
+ }
+ // match: (LTEBR (Select0 x:(FSUBS _ _)))
+ // cond: b == x.Block
+ // result: (Select1 x)
+ for {
+ if v_0.Op != OpSelect0 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpS390XFSUBS || !(b == x.Block) {
+ break
+ }
+ v.reset(OpSelect1)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XLoweredRound32F(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (LoweredRound32F x:(FMOVSconst))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpS390XFMOVSconst {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XLoweredRound64F(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (LoweredRound64F x:(FMOVDconst))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpS390XFMOVDconst {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVBZload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVBZload [off] {sym} ptr1 (MOVBstore [off] {sym} ptr2 x _))
+ // cond: isSamePtr(ptr1, ptr2)
+ // result: (MOVBZreg x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr1 := v_0
+ if v_1.Op != OpS390XMOVBstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
+ break
+ }
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(isSamePtr(ptr1, ptr2)) {
+ break
+ }
+ v.reset(OpS390XMOVBZreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBZload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond: is20Bit(int64(off1)+int64(off2))
+ // result: (MOVBZload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpS390XADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is20Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpS390XMOVBZload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBZload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVBZload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpS390XMOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpS390XMOVBZload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVBZreg(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (MOVBZreg e:(MOVBreg x))
+ // cond: clobberIfDead(e)
+ // result: (MOVBZreg x)
+ for {
+ e := v_0
+ if e.Op != OpS390XMOVBreg {
+ break
+ }
+ x := e.Args[0]
+ if !(clobberIfDead(e)) {
+ break
+ }
+ v.reset(OpS390XMOVBZreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBZreg e:(MOVHreg x))
+ // cond: clobberIfDead(e)
+ // result: (MOVBZreg x)
+ for {
+ e := v_0
+ if e.Op != OpS390XMOVHreg {
+ break
+ }
+ x := e.Args[0]
+ if !(clobberIfDead(e)) {
+ break
+ }
+ v.reset(OpS390XMOVBZreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBZreg e:(MOVWreg x))
+ // cond: clobberIfDead(e)
+ // result: (MOVBZreg x)
+ for {
+ e := v_0
+ if e.Op != OpS390XMOVWreg {
+ break
+ }
+ x := e.Args[0]
+ if !(clobberIfDead(e)) {
+ break
+ }
+ v.reset(OpS390XMOVBZreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBZreg e:(MOVBZreg x))
+ // cond: clobberIfDead(e)
+ // result: (MOVBZreg x)
+ for {
+ e := v_0
+ if e.Op != OpS390XMOVBZreg {
+ break
+ }
+ x := e.Args[0]
+ if !(clobberIfDead(e)) {
+ break
+ }
+ v.reset(OpS390XMOVBZreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBZreg e:(MOVHZreg x))
+ // cond: clobberIfDead(e)
+ // result: (MOVBZreg x)
+ for {
+ e := v_0
+ if e.Op != OpS390XMOVHZreg {
+ break
+ }
+ x := e.Args[0]
+ if !(clobberIfDead(e)) {
+ break
+ }
+ v.reset(OpS390XMOVBZreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBZreg e:(MOVWZreg x))
+ // cond: clobberIfDead(e)
+ // result: (MOVBZreg x)
+ for {
+ e := v_0
+ if e.Op != OpS390XMOVWZreg {
+ break
+ }
+ x := e.Args[0]
+ if !(clobberIfDead(e)) {
+ break
+ }
+ v.reset(OpS390XMOVBZreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBZreg x:(MOVBZload _ _))
+ // cond: (!x.Type.IsSigned() || x.Type.Size() > 1)
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpS390XMOVBZload || !(!x.Type.IsSigned() || x.Type.Size() > 1) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVBZreg <t> x:(MOVBload [o] {s} p mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVBZload <t> [o] {s} p mem)
+ for {
+ t := v.Type
+ x := v_0
+ if x.Op != OpS390XMOVBload {
+ break
+ }
+ o := auxIntToInt32(x.AuxInt)
+ s := auxToSym(x.Aux)
+ mem := x.Args[1]
+ p := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpS390XMOVBZload, t)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(o)
+ v0.Aux = symToAux(s)
+ v0.AddArg2(p, mem)
+ return true
+ }
+ // match: (MOVBZreg x:(Arg <t>))
+ // cond: !t.IsSigned() && t.Size() == 1
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpArg {
+ break
+ }
+ t := x.Type
+ if !(!t.IsSigned() && t.Size() == 1) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVBZreg (MOVDconst [c]))
+ // result: (MOVDconst [int64( uint8(c))])
+ for {
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(uint8(c)))
+ return true
+ }
+ // match: (MOVBZreg x:(LOCGR (MOVDconst [c]) (MOVDconst [d]) _))
+ // cond: int64(uint8(c)) == c && int64(uint8(d)) == d && (!x.Type.IsSigned() || x.Type.Size() > 1)
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpS390XLOCGR {
+ break
+ }
+ _ = x.Args[1]
+ x_0 := x.Args[0]
+ if x_0.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(x_0.AuxInt)
+ x_1 := x.Args[1]
+ if x_1.Op != OpS390XMOVDconst {
+ break
+ }
+ d := auxIntToInt64(x_1.AuxInt)
+ if !(int64(uint8(c)) == c && int64(uint8(d)) == d && (!x.Type.IsSigned() || x.Type.Size() > 1)) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVBZreg (RISBGZ x {r}))
+ // cond: r.OutMerge(0x000000ff) != nil
+ // result: (RISBGZ x {*r.OutMerge(0x000000ff)})
+ for {
+ if v_0.Op != OpS390XRISBGZ {
+ break
+ }
+ r := auxToS390xRotateParams(v_0.Aux)
+ x := v_0.Args[0]
+ if !(r.OutMerge(0x000000ff) != nil) {
+ break
+ }
+ v.reset(OpS390XRISBGZ)
+ v.Aux = s390xRotateParamsToAux(*r.OutMerge(0x000000ff))
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBZreg (ANDWconst [m] x))
+ // result: (MOVWZreg (ANDWconst <typ.UInt32> [int32( uint8(m))] x))
+ for {
+ if v_0.Op != OpS390XANDWconst {
+ break
+ }
+ m := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpS390XMOVWZreg)
+ v0 := b.NewValue0(v.Pos, OpS390XANDWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(int32(uint8(m)))
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVBload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVBload [off] {sym} ptr1 (MOVBstore [off] {sym} ptr2 x _))
+ // cond: isSamePtr(ptr1, ptr2)
+ // result: (MOVBreg x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr1 := v_0
+ if v_1.Op != OpS390XMOVBstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
+ break
+ }
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(isSamePtr(ptr1, ptr2)) {
+ break
+ }
+ v.reset(OpS390XMOVBreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond: is20Bit(int64(off1)+int64(off2))
+ // result: (MOVBload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpS390XADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is20Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpS390XMOVBload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBload [off1] {sym1} (MOVDaddr [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpS390XMOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpS390XMOVBload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVBreg(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (MOVBreg e:(MOVBreg x))
+ // cond: clobberIfDead(e)
+ // result: (MOVBreg x)
+ for {
+ e := v_0
+ if e.Op != OpS390XMOVBreg {
+ break
+ }
+ x := e.Args[0]
+ if !(clobberIfDead(e)) {
+ break
+ }
+ v.reset(OpS390XMOVBreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBreg e:(MOVHreg x))
+ // cond: clobberIfDead(e)
+ // result: (MOVBreg x)
+ for {
+ e := v_0
+ if e.Op != OpS390XMOVHreg {
+ break
+ }
+ x := e.Args[0]
+ if !(clobberIfDead(e)) {
+ break
+ }
+ v.reset(OpS390XMOVBreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBreg e:(MOVWreg x))
+ // cond: clobberIfDead(e)
+ // result: (MOVBreg x)
+ for {
+ e := v_0
+ if e.Op != OpS390XMOVWreg {
+ break
+ }
+ x := e.Args[0]
+ if !(clobberIfDead(e)) {
+ break
+ }
+ v.reset(OpS390XMOVBreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBreg e:(MOVBZreg x))
+ // cond: clobberIfDead(e)
+ // result: (MOVBreg x)
+ for {
+ e := v_0
+ if e.Op != OpS390XMOVBZreg {
+ break
+ }
+ x := e.Args[0]
+ if !(clobberIfDead(e)) {
+ break
+ }
+ v.reset(OpS390XMOVBreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBreg e:(MOVHZreg x))
+ // cond: clobberIfDead(e)
+ // result: (MOVBreg x)
+ for {
+ e := v_0
+ if e.Op != OpS390XMOVHZreg {
+ break
+ }
+ x := e.Args[0]
+ if !(clobberIfDead(e)) {
+ break
+ }
+ v.reset(OpS390XMOVBreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBreg e:(MOVWZreg x))
+ // cond: clobberIfDead(e)
+ // result: (MOVBreg x)
+ for {
+ e := v_0
+ if e.Op != OpS390XMOVWZreg {
+ break
+ }
+ x := e.Args[0]
+ if !(clobberIfDead(e)) {
+ break
+ }
+ v.reset(OpS390XMOVBreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVBreg x:(MOVBload _ _))
+ // cond: (x.Type.IsSigned() || x.Type.Size() == 8)
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpS390XMOVBload || !(x.Type.IsSigned() || x.Type.Size() == 8) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVBreg <t> x:(MOVBZload [o] {s} p mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVBload <t> [o] {s} p mem)
+ for {
+ t := v.Type
+ x := v_0
+ if x.Op != OpS390XMOVBZload {
+ break
+ }
+ o := auxIntToInt32(x.AuxInt)
+ s := auxToSym(x.Aux)
+ mem := x.Args[1]
+ p := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpS390XMOVBload, t)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(o)
+ v0.Aux = symToAux(s)
+ v0.AddArg2(p, mem)
+ return true
+ }
+ // match: (MOVBreg x:(Arg <t>))
+ // cond: t.IsSigned() && t.Size() == 1
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpArg {
+ break
+ }
+ t := x.Type
+ if !(t.IsSigned() && t.Size() == 1) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVBreg (MOVDconst [c]))
+ // result: (MOVDconst [int64( int8(c))])
+ for {
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(int8(c)))
+ return true
+ }
+ // match: (MOVBreg (ANDWconst [m] x))
+ // cond: int8(m) >= 0
+ // result: (MOVWZreg (ANDWconst <typ.UInt32> [int32( uint8(m))] x))
+ for {
+ if v_0.Op != OpS390XANDWconst {
+ break
+ }
+ m := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(int8(m) >= 0) {
+ break
+ }
+ v.reset(OpS390XMOVWZreg)
+ v0 := b.NewValue0(v.Pos, OpS390XANDWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(int32(uint8(m)))
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVBstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVBstore [off] {sym} ptr (MOVBreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpS390XMOVBreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpS390XMOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVBZreg x) mem)
+ // result: (MOVBstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpS390XMOVBZreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpS390XMOVBstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVBstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+ // cond: is20Bit(int64(off1)+int64(off2))
+ // result: (MOVBstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpS390XADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is20Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpS390XMOVBstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVBstore [off] {sym} ptr (MOVDconst [c]) mem)
+ // cond: is20Bit(int64(off)) && ptr.Op != OpSB
+ // result: (MOVBstoreconst [makeValAndOff(int32(int8(c)),off)] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(is20Bit(int64(off)) && ptr.Op != OpSB) {
+ break
+ }
+ v.reset(OpS390XMOVBstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int8(c)), off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBstore [off1] {sym1} (MOVDaddr [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
+ // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpS390XMOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
+ break
+ }
+ v.reset(OpS390XMOVBstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (MOVBstore [i] {s} p w x:(MOVBstore [i-1] {s} p (SRDconst [8] w) mem))
+ // cond: p.Op != OpSB && x.Uses == 1 && clobber(x)
+ // result: (MOVHstore [i-1] {s} p w mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ w := v_1
+ x := v_2
+ if x.Op != OpS390XMOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ if p != x.Args[0] {
+ break
+ }
+ x_1 := x.Args[1]
+ if x_1.Op != OpS390XSRDconst || auxIntToUint8(x_1.AuxInt) != 8 || w != x_1.Args[0] || !(p.Op != OpSB && x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpS390XMOVHstore)
+ v.AuxInt = int32ToAuxInt(i - 1)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, w, mem)
+ return true
+ }
+ // match: (MOVBstore [i] {s} p w0:(SRDconst [j] w) x:(MOVBstore [i-1] {s} p (SRDconst [j+8] w) mem))
+ // cond: p.Op != OpSB && x.Uses == 1 && clobber(x)
+ // result: (MOVHstore [i-1] {s} p w0 mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ w0 := v_1
+ if w0.Op != OpS390XSRDconst {
+ break
+ }
+ j := auxIntToUint8(w0.AuxInt)
+ w := w0.Args[0]
+ x := v_2
+ if x.Op != OpS390XMOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ if p != x.Args[0] {
+ break
+ }
+ x_1 := x.Args[1]
+ if x_1.Op != OpS390XSRDconst || auxIntToUint8(x_1.AuxInt) != j+8 || w != x_1.Args[0] || !(p.Op != OpSB && x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpS390XMOVHstore)
+ v.AuxInt = int32ToAuxInt(i - 1)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, w0, mem)
+ return true
+ }
+ // match: (MOVBstore [i] {s} p w x:(MOVBstore [i-1] {s} p (SRWconst [8] w) mem))
+ // cond: p.Op != OpSB && x.Uses == 1 && clobber(x)
+ // result: (MOVHstore [i-1] {s} p w mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ w := v_1
+ x := v_2
+ if x.Op != OpS390XMOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ if p != x.Args[0] {
+ break
+ }
+ x_1 := x.Args[1]
+ if x_1.Op != OpS390XSRWconst || auxIntToUint8(x_1.AuxInt) != 8 || w != x_1.Args[0] || !(p.Op != OpSB && x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpS390XMOVHstore)
+ v.AuxInt = int32ToAuxInt(i - 1)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, w, mem)
+ return true
+ }
+ // match: (MOVBstore [i] {s} p w0:(SRWconst [j] w) x:(MOVBstore [i-1] {s} p (SRWconst [j+8] w) mem))
+ // cond: p.Op != OpSB && x.Uses == 1 && clobber(x)
+ // result: (MOVHstore [i-1] {s} p w0 mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ w0 := v_1
+ if w0.Op != OpS390XSRWconst {
+ break
+ }
+ j := auxIntToUint8(w0.AuxInt)
+ w := w0.Args[0]
+ x := v_2
+ if x.Op != OpS390XMOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ if p != x.Args[0] {
+ break
+ }
+ x_1 := x.Args[1]
+ if x_1.Op != OpS390XSRWconst || auxIntToUint8(x_1.AuxInt) != j+8 || w != x_1.Args[0] || !(p.Op != OpSB && x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpS390XMOVHstore)
+ v.AuxInt = int32ToAuxInt(i - 1)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, w0, mem)
+ return true
+ }
+ // match: (MOVBstore [i] {s} p (SRDconst [8] w) x:(MOVBstore [i-1] {s} p w mem))
+ // cond: p.Op != OpSB && x.Uses == 1 && clobber(x)
+ // result: (MOVHBRstore [i-1] {s} p w mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ if v_1.Op != OpS390XSRDconst || auxIntToUint8(v_1.AuxInt) != 8 {
+ break
+ }
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpS390XMOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ if p != x.Args[0] || w != x.Args[1] || !(p.Op != OpSB && x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpS390XMOVHBRstore)
+ v.AuxInt = int32ToAuxInt(i - 1)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, w, mem)
+ return true
+ }
+ // match: (MOVBstore [i] {s} p (SRDconst [j] w) x:(MOVBstore [i-1] {s} p w0:(SRDconst [j-8] w) mem))
+ // cond: p.Op != OpSB && x.Uses == 1 && clobber(x)
+ // result: (MOVHBRstore [i-1] {s} p w0 mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ if v_1.Op != OpS390XSRDconst {
+ break
+ }
+ j := auxIntToUint8(v_1.AuxInt)
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpS390XMOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ if p != x.Args[0] {
+ break
+ }
+ w0 := x.Args[1]
+ if w0.Op != OpS390XSRDconst || auxIntToUint8(w0.AuxInt) != j-8 || w != w0.Args[0] || !(p.Op != OpSB && x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpS390XMOVHBRstore)
+ v.AuxInt = int32ToAuxInt(i - 1)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, w0, mem)
+ return true
+ }
+ // match: (MOVBstore [i] {s} p (SRWconst [8] w) x:(MOVBstore [i-1] {s} p w mem))
+ // cond: p.Op != OpSB && x.Uses == 1 && clobber(x)
+ // result: (MOVHBRstore [i-1] {s} p w mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ if v_1.Op != OpS390XSRWconst || auxIntToUint8(v_1.AuxInt) != 8 {
+ break
+ }
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpS390XMOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ if p != x.Args[0] || w != x.Args[1] || !(p.Op != OpSB && x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpS390XMOVHBRstore)
+ v.AuxInt = int32ToAuxInt(i - 1)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, w, mem)
+ return true
+ }
+ // match: (MOVBstore [i] {s} p (SRWconst [j] w) x:(MOVBstore [i-1] {s} p w0:(SRWconst [j-8] w) mem))
+ // cond: p.Op != OpSB && x.Uses == 1 && clobber(x)
+ // result: (MOVHBRstore [i-1] {s} p w0 mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ if v_1.Op != OpS390XSRWconst {
+ break
+ }
+ j := auxIntToUint8(v_1.AuxInt)
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpS390XMOVBstore || auxIntToInt32(x.AuxInt) != i-1 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ if p != x.Args[0] {
+ break
+ }
+ w0 := x.Args[1]
+ if w0.Op != OpS390XSRWconst || auxIntToUint8(w0.AuxInt) != j-8 || w != w0.Args[0] || !(p.Op != OpSB && x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpS390XMOVHBRstore)
+ v.AuxInt = int32ToAuxInt(i - 1)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, w0, mem)
+ return true
+ }
+ // match: (MOVBstore [7] {s} p1 (SRDconst w) x1:(MOVHBRstore [5] {s} p1 (SRDconst w) x2:(MOVWBRstore [1] {s} p1 (SRDconst w) x3:(MOVBstore [0] {s} p1 w mem))))
+ // cond: x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && clobber(x1, x2, x3)
+ // result: (MOVDBRstore {s} p1 w mem)
+ for {
+ if auxIntToInt32(v.AuxInt) != 7 {
+ break
+ }
+ s := auxToSym(v.Aux)
+ p1 := v_0
+ if v_1.Op != OpS390XSRDconst {
+ break
+ }
+ w := v_1.Args[0]
+ x1 := v_2
+ if x1.Op != OpS390XMOVHBRstore || auxIntToInt32(x1.AuxInt) != 5 || auxToSym(x1.Aux) != s {
+ break
+ }
+ _ = x1.Args[2]
+ if p1 != x1.Args[0] {
+ break
+ }
+ x1_1 := x1.Args[1]
+ if x1_1.Op != OpS390XSRDconst || w != x1_1.Args[0] {
+ break
+ }
+ x2 := x1.Args[2]
+ if x2.Op != OpS390XMOVWBRstore || auxIntToInt32(x2.AuxInt) != 1 || auxToSym(x2.Aux) != s {
+ break
+ }
+ _ = x2.Args[2]
+ if p1 != x2.Args[0] {
+ break
+ }
+ x2_1 := x2.Args[1]
+ if x2_1.Op != OpS390XSRDconst || w != x2_1.Args[0] {
+ break
+ }
+ x3 := x2.Args[2]
+ if x3.Op != OpS390XMOVBstore || auxIntToInt32(x3.AuxInt) != 0 || auxToSym(x3.Aux) != s {
+ break
+ }
+ mem := x3.Args[2]
+ if p1 != x3.Args[0] || w != x3.Args[1] || !(x1.Uses == 1 && x2.Uses == 1 && x3.Uses == 1 && clobber(x1, x2, x3)) {
+ break
+ }
+ v.reset(OpS390XMOVDBRstore)
+ v.Aux = symToAux(s)
+ v.AddArg3(p1, w, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVBstoreconst(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVBstoreconst [sc] {s} (ADDconst [off] ptr) mem)
+ // cond: is20Bit(sc.Off64()+int64(off))
+ // result: (MOVBstoreconst [sc.addOffset32(off)] {s} ptr mem)
+ for {
+ sc := auxIntToValAndOff(v.AuxInt)
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpS390XADDconst {
+ break
+ }
+ off := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is20Bit(sc.Off64() + int64(off))) {
+ break
+ }
+ v.reset(OpS390XMOVBstoreconst)
+ v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off))
+ v.Aux = symToAux(s)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem)
+ // cond: ptr.Op != OpSB && canMergeSym(sym1, sym2) && sc.canAdd32(off)
+ // result: (MOVBstoreconst [sc.addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
+ for {
+ sc := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpS390XMOVDaddr {
+ break
+ }
+ off := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(ptr.Op != OpSB && canMergeSym(sym1, sym2) && sc.canAdd32(off)) {
+ break
+ }
+ v.reset(OpS390XMOVBstoreconst)
+ v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVBstoreconst [c] {s} p x:(MOVBstoreconst [a] {s} p mem))
+ // cond: p.Op != OpSB && x.Uses == 1 && a.Off() + 1 == c.Off() && clobber(x)
+ // result: (MOVHstoreconst [makeValAndOff(c.Val()&0xff | a.Val()<<8, a.Off())] {s} p mem)
+ for {
+ c := auxIntToValAndOff(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ x := v_1
+ if x.Op != OpS390XMOVBstoreconst {
+ break
+ }
+ a := auxIntToValAndOff(x.AuxInt)
+ if auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[1]
+ if p != x.Args[0] || !(p.Op != OpSB && x.Uses == 1 && a.Off()+1 == c.Off() && clobber(x)) {
+ break
+ }
+ v.reset(OpS390XMOVHstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(c.Val()&0xff|a.Val()<<8, a.Off()))
+ v.Aux = symToAux(s)
+ v.AddArg2(p, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVDaddridx(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVDaddridx [c] {s} (ADDconst [d] x) y)
+ // cond: is20Bit(int64(c)+int64(d))
+ // result: (MOVDaddridx [c+d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpS390XADDconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ y := v_1
+ if !(is20Bit(int64(c) + int64(d))) {
+ break
+ }
+ v.reset(OpS390XMOVDaddridx)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (MOVDaddridx [c] {s} x (ADDconst [d] y))
+ // cond: is20Bit(int64(c)+int64(d))
+ // result: (MOVDaddridx [c+d] {s} x y)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpS390XADDconst {
+ break
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(is20Bit(int64(c) + int64(d))) {
+ break
+ }
+ v.reset(OpS390XMOVDaddridx)
+ v.AuxInt = int32ToAuxInt(c + d)
+ v.Aux = symToAux(s)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (MOVDaddridx [off1] {sym1} (MOVDaddr [off2] {sym2} x) y)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB
+ // result: (MOVDaddridx [off1+off2] {mergeSym(sym1,sym2)} x y)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpS390XMOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ x := v_0.Args[0]
+ y := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && x.Op != OpSB) {
+ break
+ }
+ v.reset(OpS390XMOVDaddridx)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (MOVDaddridx [off1] {sym1} x (MOVDaddr [off2] {sym2} y))
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && y.Op != OpSB
+ // result: (MOVDaddridx [off1+off2] {mergeSym(sym1,sym2)} x y)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpS390XMOVDaddr {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ sym2 := auxToSym(v_1.Aux)
+ y := v_1.Args[0]
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && y.Op != OpSB) {
+ break
+ }
+ v.reset(OpS390XMOVDaddridx)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVDload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVDload [off] {sym} ptr1 (MOVDstore [off] {sym} ptr2 x _))
+ // cond: isSamePtr(ptr1, ptr2)
+ // result: x
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr1 := v_0
+ if v_1.Op != OpS390XMOVDstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
+ break
+ }
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(isSamePtr(ptr1, ptr2)) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVDload [off] {sym} ptr1 (FMOVDstore [off] {sym} ptr2 x _))
+ // cond: isSamePtr(ptr1, ptr2)
+ // result: (LGDR x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr1 := v_0
+ if v_1.Op != OpS390XFMOVDstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
+ break
+ }
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(isSamePtr(ptr1, ptr2)) {
+ break
+ }
+ v.reset(OpS390XLGDR)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVDload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond: is20Bit(int64(off1)+int64(off2))
+ // result: (MOVDload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpS390XADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is20Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpS390XMOVDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVDload [off1] {sym1} (MOVDaddr <t> [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%8 == 0 && (off1+off2)%8 == 0))
+ // result: (MOVDload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpS390XMOVDaddr {
+ break
+ }
+ t := v_0.Type
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%8 == 0 && (off1+off2)%8 == 0))) {
+ break
+ }
+ v.reset(OpS390XMOVDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVDstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVDstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+ // cond: is20Bit(int64(off1)+int64(off2))
+ // result: (MOVDstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpS390XADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is20Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpS390XMOVDstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVDstore [off] {sym} ptr (MOVDconst [c]) mem)
+ // cond: is16Bit(c) && isU12Bit(int64(off)) && ptr.Op != OpSB
+ // result: (MOVDstoreconst [makeValAndOff(int32(c),off)] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(is16Bit(c) && isU12Bit(int64(off)) && ptr.Op != OpSB) {
+ break
+ }
+ v.reset(OpS390XMOVDstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVDstore [off1] {sym1} (MOVDaddr <t> [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%8 == 0 && (off1+off2)%8 == 0))
+ // result: (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpS390XMOVDaddr {
+ break
+ }
+ t := v_0.Type
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%8 == 0 && (off1+off2)%8 == 0))) {
+ break
+ }
+ v.reset(OpS390XMOVDstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (MOVDstore [i] {s} p w1 x:(MOVDstore [i-8] {s} p w0 mem))
+ // cond: p.Op != OpSB && x.Uses == 1 && is20Bit(int64(i)-8) && clobber(x)
+ // result: (STMG2 [i-8] {s} p w0 w1 mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ w1 := v_1
+ x := v_2
+ if x.Op != OpS390XMOVDstore || auxIntToInt32(x.AuxInt) != i-8 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ if p != x.Args[0] {
+ break
+ }
+ w0 := x.Args[1]
+ if !(p.Op != OpSB && x.Uses == 1 && is20Bit(int64(i)-8) && clobber(x)) {
+ break
+ }
+ v.reset(OpS390XSTMG2)
+ v.AuxInt = int32ToAuxInt(i - 8)
+ v.Aux = symToAux(s)
+ v.AddArg4(p, w0, w1, mem)
+ return true
+ }
+ // match: (MOVDstore [i] {s} p w2 x:(STMG2 [i-16] {s} p w0 w1 mem))
+ // cond: x.Uses == 1 && is20Bit(int64(i)-16) && clobber(x)
+ // result: (STMG3 [i-16] {s} p w0 w1 w2 mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ w2 := v_1
+ x := v_2
+ if x.Op != OpS390XSTMG2 || auxIntToInt32(x.AuxInt) != i-16 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[3]
+ if p != x.Args[0] {
+ break
+ }
+ w0 := x.Args[1]
+ w1 := x.Args[2]
+ if !(x.Uses == 1 && is20Bit(int64(i)-16) && clobber(x)) {
+ break
+ }
+ v.reset(OpS390XSTMG3)
+ v.AuxInt = int32ToAuxInt(i - 16)
+ v.Aux = symToAux(s)
+ v.AddArg5(p, w0, w1, w2, mem)
+ return true
+ }
+ // match: (MOVDstore [i] {s} p w3 x:(STMG3 [i-24] {s} p w0 w1 w2 mem))
+ // cond: x.Uses == 1 && is20Bit(int64(i)-24) && clobber(x)
+ // result: (STMG4 [i-24] {s} p w0 w1 w2 w3 mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ w3 := v_1
+ x := v_2
+ if x.Op != OpS390XSTMG3 || auxIntToInt32(x.AuxInt) != i-24 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[4]
+ if p != x.Args[0] {
+ break
+ }
+ w0 := x.Args[1]
+ w1 := x.Args[2]
+ w2 := x.Args[3]
+ if !(x.Uses == 1 && is20Bit(int64(i)-24) && clobber(x)) {
+ break
+ }
+ v.reset(OpS390XSTMG4)
+ v.AuxInt = int32ToAuxInt(i - 24)
+ v.Aux = symToAux(s)
+ v.AddArg6(p, w0, w1, w2, w3, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVDstoreconst(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVDstoreconst [sc] {s} (ADDconst [off] ptr) mem)
+ // cond: isU12Bit(sc.Off64()+int64(off))
+ // result: (MOVDstoreconst [sc.addOffset32(off)] {s} ptr mem)
+ for {
+ sc := auxIntToValAndOff(v.AuxInt)
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpS390XADDconst {
+ break
+ }
+ off := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(isU12Bit(sc.Off64() + int64(off))) {
+ break
+ }
+ v.reset(OpS390XMOVDstoreconst)
+ v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off))
+ v.Aux = symToAux(s)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVDstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem)
+ // cond: ptr.Op != OpSB && canMergeSym(sym1, sym2) && sc.canAdd32(off)
+ // result: (MOVDstoreconst [sc.addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
+ for {
+ sc := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpS390XMOVDaddr {
+ break
+ }
+ off := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(ptr.Op != OpSB && canMergeSym(sym1, sym2) && sc.canAdd32(off)) {
+ break
+ }
+ v.reset(OpS390XMOVDstoreconst)
+ v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVHBRstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHBRstore [i] {s} p (SRDconst [16] w) x:(MOVHBRstore [i-2] {s} p w mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVWBRstore [i-2] {s} p w mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ if v_1.Op != OpS390XSRDconst || auxIntToUint8(v_1.AuxInt) != 16 {
+ break
+ }
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpS390XMOVHBRstore || auxIntToInt32(x.AuxInt) != i-2 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ if p != x.Args[0] || w != x.Args[1] || !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpS390XMOVWBRstore)
+ v.AuxInt = int32ToAuxInt(i - 2)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, w, mem)
+ return true
+ }
+ // match: (MOVHBRstore [i] {s} p (SRDconst [j] w) x:(MOVHBRstore [i-2] {s} p w0:(SRDconst [j-16] w) mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVWBRstore [i-2] {s} p w0 mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ if v_1.Op != OpS390XSRDconst {
+ break
+ }
+ j := auxIntToUint8(v_1.AuxInt)
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpS390XMOVHBRstore || auxIntToInt32(x.AuxInt) != i-2 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ if p != x.Args[0] {
+ break
+ }
+ w0 := x.Args[1]
+ if w0.Op != OpS390XSRDconst || auxIntToUint8(w0.AuxInt) != j-16 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpS390XMOVWBRstore)
+ v.AuxInt = int32ToAuxInt(i - 2)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, w0, mem)
+ return true
+ }
+ // match: (MOVHBRstore [i] {s} p (SRWconst [16] w) x:(MOVHBRstore [i-2] {s} p w mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVWBRstore [i-2] {s} p w mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ if v_1.Op != OpS390XSRWconst || auxIntToUint8(v_1.AuxInt) != 16 {
+ break
+ }
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpS390XMOVHBRstore || auxIntToInt32(x.AuxInt) != i-2 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ if p != x.Args[0] || w != x.Args[1] || !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpS390XMOVWBRstore)
+ v.AuxInt = int32ToAuxInt(i - 2)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, w, mem)
+ return true
+ }
+ // match: (MOVHBRstore [i] {s} p (SRWconst [j] w) x:(MOVHBRstore [i-2] {s} p w0:(SRWconst [j-16] w) mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVWBRstore [i-2] {s} p w0 mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ if v_1.Op != OpS390XSRWconst {
+ break
+ }
+ j := auxIntToUint8(v_1.AuxInt)
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpS390XMOVHBRstore || auxIntToInt32(x.AuxInt) != i-2 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ if p != x.Args[0] {
+ break
+ }
+ w0 := x.Args[1]
+ if w0.Op != OpS390XSRWconst || auxIntToUint8(w0.AuxInt) != j-16 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpS390XMOVWBRstore)
+ v.AuxInt = int32ToAuxInt(i - 2)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, w0, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVHZload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHZload [off] {sym} ptr1 (MOVHstore [off] {sym} ptr2 x _))
+ // cond: isSamePtr(ptr1, ptr2)
+ // result: (MOVHZreg x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr1 := v_0
+ if v_1.Op != OpS390XMOVHstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
+ break
+ }
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(isSamePtr(ptr1, ptr2)) {
+ break
+ }
+ v.reset(OpS390XMOVHZreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHZload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond: is20Bit(int64(off1)+int64(off2))
+ // result: (MOVHZload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpS390XADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is20Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpS390XMOVHZload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHZload [off1] {sym1} (MOVDaddr <t> [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%2 == 0 && (off1+off2)%2 == 0))
+ // result: (MOVHZload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpS390XMOVDaddr {
+ break
+ }
+ t := v_0.Type
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%2 == 0 && (off1+off2)%2 == 0))) {
+ break
+ }
+ v.reset(OpS390XMOVHZload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVHZreg(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (MOVHZreg e:(MOVBZreg x))
+ // cond: clobberIfDead(e)
+ // result: (MOVBZreg x)
+ for {
+ e := v_0
+ if e.Op != OpS390XMOVBZreg {
+ break
+ }
+ x := e.Args[0]
+ if !(clobberIfDead(e)) {
+ break
+ }
+ v.reset(OpS390XMOVBZreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHZreg e:(MOVHreg x))
+ // cond: clobberIfDead(e)
+ // result: (MOVHZreg x)
+ for {
+ e := v_0
+ if e.Op != OpS390XMOVHreg {
+ break
+ }
+ x := e.Args[0]
+ if !(clobberIfDead(e)) {
+ break
+ }
+ v.reset(OpS390XMOVHZreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHZreg e:(MOVWreg x))
+ // cond: clobberIfDead(e)
+ // result: (MOVHZreg x)
+ for {
+ e := v_0
+ if e.Op != OpS390XMOVWreg {
+ break
+ }
+ x := e.Args[0]
+ if !(clobberIfDead(e)) {
+ break
+ }
+ v.reset(OpS390XMOVHZreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHZreg e:(MOVHZreg x))
+ // cond: clobberIfDead(e)
+ // result: (MOVHZreg x)
+ for {
+ e := v_0
+ if e.Op != OpS390XMOVHZreg {
+ break
+ }
+ x := e.Args[0]
+ if !(clobberIfDead(e)) {
+ break
+ }
+ v.reset(OpS390XMOVHZreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHZreg e:(MOVWZreg x))
+ // cond: clobberIfDead(e)
+ // result: (MOVHZreg x)
+ for {
+ e := v_0
+ if e.Op != OpS390XMOVWZreg {
+ break
+ }
+ x := e.Args[0]
+ if !(clobberIfDead(e)) {
+ break
+ }
+ v.reset(OpS390XMOVHZreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHZreg x:(MOVBZload _ _))
+ // cond: (!x.Type.IsSigned() || x.Type.Size() > 1)
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpS390XMOVBZload || !(!x.Type.IsSigned() || x.Type.Size() > 1) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVHZreg x:(MOVHZload _ _))
+ // cond: (!x.Type.IsSigned() || x.Type.Size() > 2)
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpS390XMOVHZload || !(!x.Type.IsSigned() || x.Type.Size() > 2) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVHZreg <t> x:(MOVHload [o] {s} p mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVHZload <t> [o] {s} p mem)
+ for {
+ t := v.Type
+ x := v_0
+ if x.Op != OpS390XMOVHload {
+ break
+ }
+ o := auxIntToInt32(x.AuxInt)
+ s := auxToSym(x.Aux)
+ mem := x.Args[1]
+ p := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpS390XMOVHZload, t)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(o)
+ v0.Aux = symToAux(s)
+ v0.AddArg2(p, mem)
+ return true
+ }
+ // match: (MOVHZreg x:(Arg <t>))
+ // cond: !t.IsSigned() && t.Size() <= 2
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpArg {
+ break
+ }
+ t := x.Type
+ if !(!t.IsSigned() && t.Size() <= 2) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVHZreg (MOVDconst [c]))
+ // result: (MOVDconst [int64(uint16(c))])
+ for {
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(uint16(c)))
+ return true
+ }
+ // match: (MOVHZreg (RISBGZ x {r}))
+ // cond: r.OutMerge(0x0000ffff) != nil
+ // result: (RISBGZ x {*r.OutMerge(0x0000ffff)})
+ for {
+ if v_0.Op != OpS390XRISBGZ {
+ break
+ }
+ r := auxToS390xRotateParams(v_0.Aux)
+ x := v_0.Args[0]
+ if !(r.OutMerge(0x0000ffff) != nil) {
+ break
+ }
+ v.reset(OpS390XRISBGZ)
+ v.Aux = s390xRotateParamsToAux(*r.OutMerge(0x0000ffff))
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHZreg (ANDWconst [m] x))
+ // result: (MOVWZreg (ANDWconst <typ.UInt32> [int32(uint16(m))] x))
+ for {
+ if v_0.Op != OpS390XANDWconst {
+ break
+ }
+ m := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpS390XMOVWZreg)
+ v0 := b.NewValue0(v.Pos, OpS390XANDWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(int32(uint16(m)))
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVHload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHload [off] {sym} ptr1 (MOVHstore [off] {sym} ptr2 x _))
+ // cond: isSamePtr(ptr1, ptr2)
+ // result: (MOVHreg x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr1 := v_0
+ if v_1.Op != OpS390XMOVHstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
+ break
+ }
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(isSamePtr(ptr1, ptr2)) {
+ break
+ }
+ v.reset(OpS390XMOVHreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond: is20Bit(int64(off1)+int64(off2))
+ // result: (MOVHload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpS390XADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is20Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpS390XMOVHload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHload [off1] {sym1} (MOVDaddr <t> [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%2 == 0 && (off1+off2)%2 == 0))
+ // result: (MOVHload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpS390XMOVDaddr {
+ break
+ }
+ t := v_0.Type
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%2 == 0 && (off1+off2)%2 == 0))) {
+ break
+ }
+ v.reset(OpS390XMOVHload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVHreg(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (MOVHreg e:(MOVBreg x))
+ // cond: clobberIfDead(e)
+ // result: (MOVBreg x)
+ for {
+ e := v_0
+ if e.Op != OpS390XMOVBreg {
+ break
+ }
+ x := e.Args[0]
+ if !(clobberIfDead(e)) {
+ break
+ }
+ v.reset(OpS390XMOVBreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg e:(MOVHreg x))
+ // cond: clobberIfDead(e)
+ // result: (MOVHreg x)
+ for {
+ e := v_0
+ if e.Op != OpS390XMOVHreg {
+ break
+ }
+ x := e.Args[0]
+ if !(clobberIfDead(e)) {
+ break
+ }
+ v.reset(OpS390XMOVHreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg e:(MOVWreg x))
+ // cond: clobberIfDead(e)
+ // result: (MOVHreg x)
+ for {
+ e := v_0
+ if e.Op != OpS390XMOVWreg {
+ break
+ }
+ x := e.Args[0]
+ if !(clobberIfDead(e)) {
+ break
+ }
+ v.reset(OpS390XMOVHreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg e:(MOVHZreg x))
+ // cond: clobberIfDead(e)
+ // result: (MOVHreg x)
+ for {
+ e := v_0
+ if e.Op != OpS390XMOVHZreg {
+ break
+ }
+ x := e.Args[0]
+ if !(clobberIfDead(e)) {
+ break
+ }
+ v.reset(OpS390XMOVHreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg e:(MOVWZreg x))
+ // cond: clobberIfDead(e)
+ // result: (MOVHreg x)
+ for {
+ e := v_0
+ if e.Op != OpS390XMOVWZreg {
+ break
+ }
+ x := e.Args[0]
+ if !(clobberIfDead(e)) {
+ break
+ }
+ v.reset(OpS390XMOVHreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVBload _ _))
+ // cond: (x.Type.IsSigned() || x.Type.Size() == 8)
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpS390XMOVBload || !(x.Type.IsSigned() || x.Type.Size() == 8) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVHload _ _))
+ // cond: (x.Type.IsSigned() || x.Type.Size() == 8)
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpS390XMOVHload || !(x.Type.IsSigned() || x.Type.Size() == 8) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVHreg x:(MOVBZload _ _))
+ // cond: (!x.Type.IsSigned() || x.Type.Size() > 1)
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpS390XMOVBZload || !(!x.Type.IsSigned() || x.Type.Size() > 1) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVHreg <t> x:(MOVHZload [o] {s} p mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVHload <t> [o] {s} p mem)
+ for {
+ t := v.Type
+ x := v_0
+ if x.Op != OpS390XMOVHZload {
+ break
+ }
+ o := auxIntToInt32(x.AuxInt)
+ s := auxToSym(x.Aux)
+ mem := x.Args[1]
+ p := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpS390XMOVHload, t)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(o)
+ v0.Aux = symToAux(s)
+ v0.AddArg2(p, mem)
+ return true
+ }
+ // match: (MOVHreg x:(Arg <t>))
+ // cond: t.IsSigned() && t.Size() <= 2
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpArg {
+ break
+ }
+ t := x.Type
+ if !(t.IsSigned() && t.Size() <= 2) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVHreg (MOVDconst [c]))
+ // result: (MOVDconst [int64(int16(c))])
+ for {
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(int16(c)))
+ return true
+ }
+ // match: (MOVHreg (ANDWconst [m] x))
+ // cond: int16(m) >= 0
+ // result: (MOVWZreg (ANDWconst <typ.UInt32> [int32(uint16(m))] x))
+ for {
+ if v_0.Op != OpS390XANDWconst {
+ break
+ }
+ m := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(int16(m) >= 0) {
+ break
+ }
+ v.reset(OpS390XMOVWZreg)
+ v0 := b.NewValue0(v.Pos, OpS390XANDWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(int32(uint16(m)))
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVHstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVHstore [off] {sym} ptr (MOVHreg x) mem)
+ // result: (MOVHstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpS390XMOVHreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpS390XMOVHstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (MOVHZreg x) mem)
+ // result: (MOVHstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpS390XMOVHZreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpS390XMOVHstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVHstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+ // cond: is20Bit(int64(off1)+int64(off2))
+ // result: (MOVHstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpS390XADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is20Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpS390XMOVHstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVHstore [off] {sym} ptr (MOVDconst [c]) mem)
+ // cond: isU12Bit(int64(off)) && ptr.Op != OpSB
+ // result: (MOVHstoreconst [makeValAndOff(int32(int16(c)),off)] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(isU12Bit(int64(off)) && ptr.Op != OpSB) {
+ break
+ }
+ v.reset(OpS390XMOVHstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(int16(c)), off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHstore [off1] {sym1} (MOVDaddr <t> [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%2 == 0 && (off1+off2)%2 == 0))
+ // result: (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpS390XMOVDaddr {
+ break
+ }
+ t := v_0.Type
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%2 == 0 && (off1+off2)%2 == 0))) {
+ break
+ }
+ v.reset(OpS390XMOVHstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (MOVHstore [i] {s} p w x:(MOVHstore [i-2] {s} p (SRDconst [16] w) mem))
+ // cond: p.Op != OpSB && x.Uses == 1 && clobber(x)
+ // result: (MOVWstore [i-2] {s} p w mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ w := v_1
+ x := v_2
+ if x.Op != OpS390XMOVHstore || auxIntToInt32(x.AuxInt) != i-2 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ if p != x.Args[0] {
+ break
+ }
+ x_1 := x.Args[1]
+ if x_1.Op != OpS390XSRDconst || auxIntToUint8(x_1.AuxInt) != 16 || w != x_1.Args[0] || !(p.Op != OpSB && x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpS390XMOVWstore)
+ v.AuxInt = int32ToAuxInt(i - 2)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, w, mem)
+ return true
+ }
+ // match: (MOVHstore [i] {s} p w0:(SRDconst [j] w) x:(MOVHstore [i-2] {s} p (SRDconst [j+16] w) mem))
+ // cond: p.Op != OpSB && x.Uses == 1 && clobber(x)
+ // result: (MOVWstore [i-2] {s} p w0 mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ w0 := v_1
+ if w0.Op != OpS390XSRDconst {
+ break
+ }
+ j := auxIntToUint8(w0.AuxInt)
+ w := w0.Args[0]
+ x := v_2
+ if x.Op != OpS390XMOVHstore || auxIntToInt32(x.AuxInt) != i-2 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ if p != x.Args[0] {
+ break
+ }
+ x_1 := x.Args[1]
+ if x_1.Op != OpS390XSRDconst || auxIntToUint8(x_1.AuxInt) != j+16 || w != x_1.Args[0] || !(p.Op != OpSB && x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpS390XMOVWstore)
+ v.AuxInt = int32ToAuxInt(i - 2)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, w0, mem)
+ return true
+ }
+ // match: (MOVHstore [i] {s} p w x:(MOVHstore [i-2] {s} p (SRWconst [16] w) mem))
+ // cond: p.Op != OpSB && x.Uses == 1 && clobber(x)
+ // result: (MOVWstore [i-2] {s} p w mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ w := v_1
+ x := v_2
+ if x.Op != OpS390XMOVHstore || auxIntToInt32(x.AuxInt) != i-2 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ if p != x.Args[0] {
+ break
+ }
+ x_1 := x.Args[1]
+ if x_1.Op != OpS390XSRWconst || auxIntToUint8(x_1.AuxInt) != 16 || w != x_1.Args[0] || !(p.Op != OpSB && x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpS390XMOVWstore)
+ v.AuxInt = int32ToAuxInt(i - 2)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, w, mem)
+ return true
+ }
+ // match: (MOVHstore [i] {s} p w0:(SRWconst [j] w) x:(MOVHstore [i-2] {s} p (SRWconst [j+16] w) mem))
+ // cond: p.Op != OpSB && x.Uses == 1 && clobber(x)
+ // result: (MOVWstore [i-2] {s} p w0 mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ w0 := v_1
+ if w0.Op != OpS390XSRWconst {
+ break
+ }
+ j := auxIntToUint8(w0.AuxInt)
+ w := w0.Args[0]
+ x := v_2
+ if x.Op != OpS390XMOVHstore || auxIntToInt32(x.AuxInt) != i-2 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ if p != x.Args[0] {
+ break
+ }
+ x_1 := x.Args[1]
+ if x_1.Op != OpS390XSRWconst || auxIntToUint8(x_1.AuxInt) != j+16 || w != x_1.Args[0] || !(p.Op != OpSB && x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpS390XMOVWstore)
+ v.AuxInt = int32ToAuxInt(i - 2)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, w0, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVHstoreconst(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (MOVHstoreconst [sc] {s} (ADDconst [off] ptr) mem)
+ // cond: isU12Bit(sc.Off64()+int64(off))
+ // result: (MOVHstoreconst [sc.addOffset32(off)] {s} ptr mem)
+ for {
+ sc := auxIntToValAndOff(v.AuxInt)
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpS390XADDconst {
+ break
+ }
+ off := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(isU12Bit(sc.Off64() + int64(off))) {
+ break
+ }
+ v.reset(OpS390XMOVHstoreconst)
+ v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off))
+ v.Aux = symToAux(s)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem)
+ // cond: ptr.Op != OpSB && canMergeSym(sym1, sym2) && sc.canAdd32(off)
+ // result: (MOVHstoreconst [sc.addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
+ for {
+ sc := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpS390XMOVDaddr {
+ break
+ }
+ off := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(ptr.Op != OpSB && canMergeSym(sym1, sym2) && sc.canAdd32(off)) {
+ break
+ }
+ v.reset(OpS390XMOVHstoreconst)
+ v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVHstoreconst [c] {s} p x:(MOVHstoreconst [a] {s} p mem))
+ // cond: p.Op != OpSB && x.Uses == 1 && a.Off() + 2 == c.Off() && clobber(x)
+ // result: (MOVWstore [a.Off()] {s} p (MOVDconst [int64(c.Val()&0xffff | a.Val()<<16)]) mem)
+ for {
+ c := auxIntToValAndOff(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ x := v_1
+ if x.Op != OpS390XMOVHstoreconst {
+ break
+ }
+ a := auxIntToValAndOff(x.AuxInt)
+ if auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[1]
+ if p != x.Args[0] || !(p.Op != OpSB && x.Uses == 1 && a.Off()+2 == c.Off() && clobber(x)) {
+ break
+ }
+ v.reset(OpS390XMOVWstore)
+ v.AuxInt = int32ToAuxInt(a.Off())
+ v.Aux = symToAux(s)
+ v0 := b.NewValue0(x.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(int64(c.Val()&0xffff | a.Val()<<16))
+ v.AddArg3(p, v0, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVWBRstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWBRstore [i] {s} p (SRDconst [32] w) x:(MOVWBRstore [i-4] {s} p w mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVDBRstore [i-4] {s} p w mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ if v_1.Op != OpS390XSRDconst || auxIntToUint8(v_1.AuxInt) != 32 {
+ break
+ }
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpS390XMOVWBRstore || auxIntToInt32(x.AuxInt) != i-4 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ if p != x.Args[0] || w != x.Args[1] || !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpS390XMOVDBRstore)
+ v.AuxInt = int32ToAuxInt(i - 4)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, w, mem)
+ return true
+ }
+ // match: (MOVWBRstore [i] {s} p (SRDconst [j] w) x:(MOVWBRstore [i-4] {s} p w0:(SRDconst [j-32] w) mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: (MOVDBRstore [i-4] {s} p w0 mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ if v_1.Op != OpS390XSRDconst {
+ break
+ }
+ j := auxIntToUint8(v_1.AuxInt)
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpS390XMOVWBRstore || auxIntToInt32(x.AuxInt) != i-4 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ if p != x.Args[0] {
+ break
+ }
+ w0 := x.Args[1]
+ if w0.Op != OpS390XSRDconst || auxIntToUint8(w0.AuxInt) != j-32 || w != w0.Args[0] || !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpS390XMOVDBRstore)
+ v.AuxInt = int32ToAuxInt(i - 4)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, w0, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVWZload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWZload [off] {sym} ptr1 (MOVWstore [off] {sym} ptr2 x _))
+ // cond: isSamePtr(ptr1, ptr2)
+ // result: (MOVWZreg x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr1 := v_0
+ if v_1.Op != OpS390XMOVWstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
+ break
+ }
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(isSamePtr(ptr1, ptr2)) {
+ break
+ }
+ v.reset(OpS390XMOVWZreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWZload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond: is20Bit(int64(off1)+int64(off2))
+ // result: (MOVWZload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpS390XADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is20Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpS390XMOVWZload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWZload [off1] {sym1} (MOVDaddr <t> [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%4 == 0 && (off1+off2)%4 == 0))
+ // result: (MOVWZload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpS390XMOVDaddr {
+ break
+ }
+ t := v_0.Type
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%4 == 0 && (off1+off2)%4 == 0))) {
+ break
+ }
+ v.reset(OpS390XMOVWZload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVWZreg(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MOVWZreg e:(MOVBZreg x))
+ // cond: clobberIfDead(e)
+ // result: (MOVBZreg x)
+ for {
+ e := v_0
+ if e.Op != OpS390XMOVBZreg {
+ break
+ }
+ x := e.Args[0]
+ if !(clobberIfDead(e)) {
+ break
+ }
+ v.reset(OpS390XMOVBZreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWZreg e:(MOVHZreg x))
+ // cond: clobberIfDead(e)
+ // result: (MOVHZreg x)
+ for {
+ e := v_0
+ if e.Op != OpS390XMOVHZreg {
+ break
+ }
+ x := e.Args[0]
+ if !(clobberIfDead(e)) {
+ break
+ }
+ v.reset(OpS390XMOVHZreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWZreg e:(MOVWreg x))
+ // cond: clobberIfDead(e)
+ // result: (MOVWZreg x)
+ for {
+ e := v_0
+ if e.Op != OpS390XMOVWreg {
+ break
+ }
+ x := e.Args[0]
+ if !(clobberIfDead(e)) {
+ break
+ }
+ v.reset(OpS390XMOVWZreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWZreg e:(MOVWZreg x))
+ // cond: clobberIfDead(e)
+ // result: (MOVWZreg x)
+ for {
+ e := v_0
+ if e.Op != OpS390XMOVWZreg {
+ break
+ }
+ x := e.Args[0]
+ if !(clobberIfDead(e)) {
+ break
+ }
+ v.reset(OpS390XMOVWZreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWZreg x:(MOVBZload _ _))
+ // cond: (!x.Type.IsSigned() || x.Type.Size() > 1)
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpS390XMOVBZload || !(!x.Type.IsSigned() || x.Type.Size() > 1) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVWZreg x:(MOVHZload _ _))
+ // cond: (!x.Type.IsSigned() || x.Type.Size() > 2)
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpS390XMOVHZload || !(!x.Type.IsSigned() || x.Type.Size() > 2) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVWZreg x:(MOVWZload _ _))
+ // cond: (!x.Type.IsSigned() || x.Type.Size() > 4)
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpS390XMOVWZload || !(!x.Type.IsSigned() || x.Type.Size() > 4) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVWZreg <t> x:(MOVWload [o] {s} p mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVWZload <t> [o] {s} p mem)
+ for {
+ t := v.Type
+ x := v_0
+ if x.Op != OpS390XMOVWload {
+ break
+ }
+ o := auxIntToInt32(x.AuxInt)
+ s := auxToSym(x.Aux)
+ mem := x.Args[1]
+ p := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpS390XMOVWZload, t)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(o)
+ v0.Aux = symToAux(s)
+ v0.AddArg2(p, mem)
+ return true
+ }
+ // match: (MOVWZreg x:(Arg <t>))
+ // cond: !t.IsSigned() && t.Size() <= 4
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpArg {
+ break
+ }
+ t := x.Type
+ if !(!t.IsSigned() && t.Size() <= 4) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVWZreg (MOVDconst [c]))
+ // result: (MOVDconst [int64(uint32(c))])
+ for {
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(uint32(c)))
+ return true
+ }
+ // match: (MOVWZreg (RISBGZ x {r}))
+ // cond: r.OutMerge(0xffffffff) != nil
+ // result: (RISBGZ x {*r.OutMerge(0xffffffff)})
+ for {
+ if v_0.Op != OpS390XRISBGZ {
+ break
+ }
+ r := auxToS390xRotateParams(v_0.Aux)
+ x := v_0.Args[0]
+ if !(r.OutMerge(0xffffffff) != nil) {
+ break
+ }
+ v.reset(OpS390XRISBGZ)
+ v.Aux = s390xRotateParamsToAux(*r.OutMerge(0xffffffff))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVWload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWload [off] {sym} ptr1 (MOVWstore [off] {sym} ptr2 x _))
+ // cond: isSamePtr(ptr1, ptr2)
+ // result: (MOVWreg x)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr1 := v_0
+ if v_1.Op != OpS390XMOVWstore || auxIntToInt32(v_1.AuxInt) != off || auxToSym(v_1.Aux) != sym {
+ break
+ }
+ x := v_1.Args[1]
+ ptr2 := v_1.Args[0]
+ if !(isSamePtr(ptr1, ptr2)) {
+ break
+ }
+ v.reset(OpS390XMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWload [off1] {sym} (ADDconst [off2] ptr) mem)
+ // cond: is20Bit(int64(off1)+int64(off2))
+ // result: (MOVWload [off1+off2] {sym} ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpS390XADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(is20Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpS390XMOVWload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWload [off1] {sym1} (MOVDaddr <t> [off2] {sym2} base) mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%4 == 0 && (off1+off2)%4 == 0))
+ // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpS390XMOVDaddr {
+ break
+ }
+ t := v_0.Type
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%4 == 0 && (off1+off2)%4 == 0))) {
+ break
+ }
+ v.reset(OpS390XMOVWload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVWreg(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MOVWreg e:(MOVBreg x))
+ // cond: clobberIfDead(e)
+ // result: (MOVBreg x)
+ for {
+ e := v_0
+ if e.Op != OpS390XMOVBreg {
+ break
+ }
+ x := e.Args[0]
+ if !(clobberIfDead(e)) {
+ break
+ }
+ v.reset(OpS390XMOVBreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg e:(MOVHreg x))
+ // cond: clobberIfDead(e)
+ // result: (MOVHreg x)
+ for {
+ e := v_0
+ if e.Op != OpS390XMOVHreg {
+ break
+ }
+ x := e.Args[0]
+ if !(clobberIfDead(e)) {
+ break
+ }
+ v.reset(OpS390XMOVHreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg e:(MOVWreg x))
+ // cond: clobberIfDead(e)
+ // result: (MOVWreg x)
+ for {
+ e := v_0
+ if e.Op != OpS390XMOVWreg {
+ break
+ }
+ x := e.Args[0]
+ if !(clobberIfDead(e)) {
+ break
+ }
+ v.reset(OpS390XMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg e:(MOVWZreg x))
+ // cond: clobberIfDead(e)
+ // result: (MOVWreg x)
+ for {
+ e := v_0
+ if e.Op != OpS390XMOVWZreg {
+ break
+ }
+ x := e.Args[0]
+ if !(clobberIfDead(e)) {
+ break
+ }
+ v.reset(OpS390XMOVWreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVBload _ _))
+ // cond: (x.Type.IsSigned() || x.Type.Size() == 8)
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpS390XMOVBload || !(x.Type.IsSigned() || x.Type.Size() == 8) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVHload _ _))
+ // cond: (x.Type.IsSigned() || x.Type.Size() == 8)
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpS390XMOVHload || !(x.Type.IsSigned() || x.Type.Size() == 8) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVWload _ _))
+ // cond: (x.Type.IsSigned() || x.Type.Size() == 8)
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpS390XMOVWload || !(x.Type.IsSigned() || x.Type.Size() == 8) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVBZload _ _))
+ // cond: (!x.Type.IsSigned() || x.Type.Size() > 1)
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpS390XMOVBZload || !(!x.Type.IsSigned() || x.Type.Size() > 1) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVWreg x:(MOVHZload _ _))
+ // cond: (!x.Type.IsSigned() || x.Type.Size() > 2)
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpS390XMOVHZload || !(!x.Type.IsSigned() || x.Type.Size() > 2) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVWreg <t> x:(MOVWZload [o] {s} p mem))
+ // cond: x.Uses == 1 && clobber(x)
+ // result: @x.Block (MOVWload <t> [o] {s} p mem)
+ for {
+ t := v.Type
+ x := v_0
+ if x.Op != OpS390XMOVWZload {
+ break
+ }
+ o := auxIntToInt32(x.AuxInt)
+ s := auxToSym(x.Aux)
+ mem := x.Args[1]
+ p := x.Args[0]
+ if !(x.Uses == 1 && clobber(x)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(x.Pos, OpS390XMOVWload, t)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(o)
+ v0.Aux = symToAux(s)
+ v0.AddArg2(p, mem)
+ return true
+ }
+ // match: (MOVWreg x:(Arg <t>))
+ // cond: t.IsSigned() && t.Size() <= 4
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpArg {
+ break
+ }
+ t := x.Type
+ if !(t.IsSigned() && t.Size() <= 4) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (MOVWreg (MOVDconst [c]))
+ // result: (MOVDconst [int64(int32(c))])
+ for {
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(int32(c)))
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVWstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MOVWstore [off] {sym} ptr (MOVWreg x) mem)
+ // result: (MOVWstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpS390XMOVWreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpS390XMOVWstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVWstore [off] {sym} ptr (MOVWZreg x) mem)
+ // result: (MOVWstore [off] {sym} ptr x mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpS390XMOVWZreg {
+ break
+ }
+ x := v_1.Args[0]
+ mem := v_2
+ v.reset(OpS390XMOVWstore)
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, x, mem)
+ return true
+ }
+ // match: (MOVWstore [off1] {sym} (ADDconst [off2] ptr) val mem)
+ // cond: is20Bit(int64(off1)+int64(off2))
+ // result: (MOVWstore [off1+off2] {sym} ptr val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ if v_0.Op != OpS390XADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is20Bit(int64(off1) + int64(off2))) {
+ break
+ }
+ v.reset(OpS390XMOVWstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (MOVWstore [off] {sym} ptr (MOVDconst [c]) mem)
+ // cond: is16Bit(c) && isU12Bit(int64(off)) && ptr.Op != OpSB
+ // result: (MOVWstoreconst [makeValAndOff(int32(c),off)] {sym} ptr mem)
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ ptr := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ mem := v_2
+ if !(is16Bit(c) && isU12Bit(int64(off)) && ptr.Op != OpSB) {
+ break
+ }
+ v.reset(OpS390XMOVWstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(c), off))
+ v.Aux = symToAux(sym)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWstore [off1] {sym1} (MOVDaddr <t> [off2] {sym2} base) val mem)
+ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%4 == 0 && (off1+off2)%4 == 0))
+ // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpS390XMOVDaddr {
+ break
+ }
+ t := v_0.Type
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || (t.IsPtr() && t.Elem().Alignment()%4 == 0 && (off1+off2)%4 == 0))) {
+ break
+ }
+ v.reset(OpS390XMOVWstore)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg3(base, val, mem)
+ return true
+ }
+ // match: (MOVWstore [i] {s} p (SRDconst [32] w) x:(MOVWstore [i-4] {s} p w mem))
+ // cond: p.Op != OpSB && x.Uses == 1 && clobber(x)
+ // result: (MOVDstore [i-4] {s} p w mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ if v_1.Op != OpS390XSRDconst || auxIntToUint8(v_1.AuxInt) != 32 {
+ break
+ }
+ w := v_1.Args[0]
+ x := v_2
+ if x.Op != OpS390XMOVWstore || auxIntToInt32(x.AuxInt) != i-4 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ if p != x.Args[0] || w != x.Args[1] || !(p.Op != OpSB && x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpS390XMOVDstore)
+ v.AuxInt = int32ToAuxInt(i - 4)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, w, mem)
+ return true
+ }
+ // match: (MOVWstore [i] {s} p w0:(SRDconst [j] w) x:(MOVWstore [i-4] {s} p (SRDconst [j+32] w) mem))
+ // cond: p.Op != OpSB && x.Uses == 1 && clobber(x)
+ // result: (MOVDstore [i-4] {s} p w0 mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ w0 := v_1
+ if w0.Op != OpS390XSRDconst {
+ break
+ }
+ j := auxIntToUint8(w0.AuxInt)
+ w := w0.Args[0]
+ x := v_2
+ if x.Op != OpS390XMOVWstore || auxIntToInt32(x.AuxInt) != i-4 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ if p != x.Args[0] {
+ break
+ }
+ x_1 := x.Args[1]
+ if x_1.Op != OpS390XSRDconst || auxIntToUint8(x_1.AuxInt) != j+32 || w != x_1.Args[0] || !(p.Op != OpSB && x.Uses == 1 && clobber(x)) {
+ break
+ }
+ v.reset(OpS390XMOVDstore)
+ v.AuxInt = int32ToAuxInt(i - 4)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, w0, mem)
+ return true
+ }
+ // match: (MOVWstore [i] {s} p w1 x:(MOVWstore [i-4] {s} p w0 mem))
+ // cond: p.Op != OpSB && x.Uses == 1 && is20Bit(int64(i)-4) && clobber(x)
+ // result: (STM2 [i-4] {s} p w0 w1 mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ w1 := v_1
+ x := v_2
+ if x.Op != OpS390XMOVWstore || auxIntToInt32(x.AuxInt) != i-4 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[2]
+ if p != x.Args[0] {
+ break
+ }
+ w0 := x.Args[1]
+ if !(p.Op != OpSB && x.Uses == 1 && is20Bit(int64(i)-4) && clobber(x)) {
+ break
+ }
+ v.reset(OpS390XSTM2)
+ v.AuxInt = int32ToAuxInt(i - 4)
+ v.Aux = symToAux(s)
+ v.AddArg4(p, w0, w1, mem)
+ return true
+ }
+ // match: (MOVWstore [i] {s} p w2 x:(STM2 [i-8] {s} p w0 w1 mem))
+ // cond: x.Uses == 1 && is20Bit(int64(i)-8) && clobber(x)
+ // result: (STM3 [i-8] {s} p w0 w1 w2 mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ w2 := v_1
+ x := v_2
+ if x.Op != OpS390XSTM2 || auxIntToInt32(x.AuxInt) != i-8 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[3]
+ if p != x.Args[0] {
+ break
+ }
+ w0 := x.Args[1]
+ w1 := x.Args[2]
+ if !(x.Uses == 1 && is20Bit(int64(i)-8) && clobber(x)) {
+ break
+ }
+ v.reset(OpS390XSTM3)
+ v.AuxInt = int32ToAuxInt(i - 8)
+ v.Aux = symToAux(s)
+ v.AddArg5(p, w0, w1, w2, mem)
+ return true
+ }
+ // match: (MOVWstore [i] {s} p w3 x:(STM3 [i-12] {s} p w0 w1 w2 mem))
+ // cond: x.Uses == 1 && is20Bit(int64(i)-12) && clobber(x)
+ // result: (STM4 [i-12] {s} p w0 w1 w2 w3 mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ w3 := v_1
+ x := v_2
+ if x.Op != OpS390XSTM3 || auxIntToInt32(x.AuxInt) != i-12 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[4]
+ if p != x.Args[0] {
+ break
+ }
+ w0 := x.Args[1]
+ w1 := x.Args[2]
+ w2 := x.Args[3]
+ if !(x.Uses == 1 && is20Bit(int64(i)-12) && clobber(x)) {
+ break
+ }
+ v.reset(OpS390XSTM4)
+ v.AuxInt = int32ToAuxInt(i - 12)
+ v.Aux = symToAux(s)
+ v.AddArg6(p, w0, w1, w2, w3, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMOVWstoreconst(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (MOVWstoreconst [sc] {s} (ADDconst [off] ptr) mem)
+ // cond: isU12Bit(sc.Off64()+int64(off))
+ // result: (MOVWstoreconst [sc.addOffset32(off)] {s} ptr mem)
+ for {
+ sc := auxIntToValAndOff(v.AuxInt)
+ s := auxToSym(v.Aux)
+ if v_0.Op != OpS390XADDconst {
+ break
+ }
+ off := auxIntToInt32(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(isU12Bit(sc.Off64() + int64(off))) {
+ break
+ }
+ v.reset(OpS390XMOVWstoreconst)
+ v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off))
+ v.Aux = symToAux(s)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWstoreconst [sc] {sym1} (MOVDaddr [off] {sym2} ptr) mem)
+ // cond: ptr.Op != OpSB && canMergeSym(sym1, sym2) && sc.canAdd32(off)
+ // result: (MOVWstoreconst [sc.addOffset32(off)] {mergeSym(sym1, sym2)} ptr mem)
+ for {
+ sc := auxIntToValAndOff(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+ if v_0.Op != OpS390XMOVDaddr {
+ break
+ }
+ off := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(ptr.Op != OpSB && canMergeSym(sym1, sym2) && sc.canAdd32(off)) {
+ break
+ }
+ v.reset(OpS390XMOVWstoreconst)
+ v.AuxInt = valAndOffToAuxInt(sc.addOffset32(off))
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (MOVWstoreconst [c] {s} p x:(MOVWstoreconst [a] {s} p mem))
+ // cond: p.Op != OpSB && x.Uses == 1 && a.Off() + 4 == c.Off() && clobber(x)
+ // result: (MOVDstore [a.Off()] {s} p (MOVDconst [c.Val64()&0xffffffff | a.Val64()<<32]) mem)
+ for {
+ c := auxIntToValAndOff(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ x := v_1
+ if x.Op != OpS390XMOVWstoreconst {
+ break
+ }
+ a := auxIntToValAndOff(x.AuxInt)
+ if auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[1]
+ if p != x.Args[0] || !(p.Op != OpSB && x.Uses == 1 && a.Off()+4 == c.Off() && clobber(x)) {
+ break
+ }
+ v.reset(OpS390XMOVDstore)
+ v.AuxInt = int32ToAuxInt(a.Off())
+ v.Aux = symToAux(s)
+ v0 := b.NewValue0(x.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(c.Val64()&0xffffffff | a.Val64()<<32)
+ v.AddArg3(p, v0, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMULLD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MULLD x (MOVDconst [c]))
+ // cond: is32Bit(c)
+ // result: (MULLDconst [int32(c)] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(is32Bit(c)) {
+ continue
+ }
+ v.reset(OpS390XMULLDconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (MULLD <t> x g:(MOVDload [off] {sym} ptr mem))
+ // cond: ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)
+ // result: (MULLDload <t> [off] {sym} x ptr mem)
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ g := v_1
+ if g.Op != OpS390XMOVDload {
+ continue
+ }
+ off := auxIntToInt32(g.AuxInt)
+ sym := auxToSym(g.Aux)
+ mem := g.Args[1]
+ ptr := g.Args[0]
+ if !(ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)) {
+ continue
+ }
+ v.reset(OpS390XMULLDload)
+ v.Type = t
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMULLDconst(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MULLDconst <t> x [c])
+ // cond: isPowerOfTwo32(c&(c-1))
+ // result: (ADD (SLDconst <t> x [uint8(log32(c&(c-1)))]) (SLDconst <t> x [uint8(log32(c&^(c-1)))]))
+ for {
+ t := v.Type
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(isPowerOfTwo32(c & (c - 1))) {
+ break
+ }
+ v.reset(OpS390XADD)
+ v0 := b.NewValue0(v.Pos, OpS390XSLDconst, t)
+ v0.AuxInt = uint8ToAuxInt(uint8(log32(c & (c - 1))))
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpS390XSLDconst, t)
+ v1.AuxInt = uint8ToAuxInt(uint8(log32(c &^ (c - 1))))
+ v1.AddArg(x)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (MULLDconst <t> x [c])
+ // cond: isPowerOfTwo32(c+(c&^(c-1)))
+ // result: (SUB (SLDconst <t> x [uint8(log32(c+(c&^(c-1))))]) (SLDconst <t> x [uint8(log32(c&^(c-1)))]))
+ for {
+ t := v.Type
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(isPowerOfTwo32(c + (c &^ (c - 1)))) {
+ break
+ }
+ v.reset(OpS390XSUB)
+ v0 := b.NewValue0(v.Pos, OpS390XSLDconst, t)
+ v0.AuxInt = uint8ToAuxInt(uint8(log32(c + (c &^ (c - 1)))))
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpS390XSLDconst, t)
+ v1.AuxInt = uint8ToAuxInt(uint8(log32(c &^ (c - 1))))
+ v1.AddArg(x)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (MULLDconst <t> x [c])
+ // cond: isPowerOfTwo32(-c+(-c&^(-c-1)))
+ // result: (SUB (SLDconst <t> x [uint8(log32(-c&^(-c-1)))]) (SLDconst <t> x [uint8(log32(-c+(-c&^(-c-1))))]))
+ for {
+ t := v.Type
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(isPowerOfTwo32(-c + (-c &^ (-c - 1)))) {
+ break
+ }
+ v.reset(OpS390XSUB)
+ v0 := b.NewValue0(v.Pos, OpS390XSLDconst, t)
+ v0.AuxInt = uint8ToAuxInt(uint8(log32(-c &^ (-c - 1))))
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpS390XSLDconst, t)
+ v1.AuxInt = uint8ToAuxInt(uint8(log32(-c + (-c &^ (-c - 1)))))
+ v1.AddArg(x)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (MULLDconst [c] (MOVDconst [d]))
+ // result: (MOVDconst [int64(c)*d])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(c) * d)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMULLDload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MULLDload <t> [off] {sym} x ptr1 (FMOVDstore [off] {sym} ptr2 y _))
+ // cond: isSamePtr(ptr1, ptr2)
+ // result: (MULLD x (LGDR <t> y))
+ for {
+ t := v.Type
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ ptr1 := v_1
+ if v_2.Op != OpS390XFMOVDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
+ break
+ }
+ y := v_2.Args[1]
+ ptr2 := v_2.Args[0]
+ if !(isSamePtr(ptr1, ptr2)) {
+ break
+ }
+ v.reset(OpS390XMULLD)
+ v0 := b.NewValue0(v_2.Pos, OpS390XLGDR, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (MULLDload [off1] {sym} x (ADDconst [off2] ptr) mem)
+ // cond: ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2))
+ // result: (MULLDload [off1+off2] {sym} x ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpS390XADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ ptr := v_1.Args[0]
+ mem := v_2
+ if !(ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2))) {
+ break
+ }
+ v.reset(OpS390XMULLDload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ // match: (MULLDload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem)
+ // cond: ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)
+ // result: (MULLDload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
+ for {
+ o1 := auxIntToInt32(v.AuxInt)
+ s1 := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpS390XMOVDaddr {
+ break
+ }
+ o2 := auxIntToInt32(v_1.AuxInt)
+ s2 := auxToSym(v_1.Aux)
+ ptr := v_1.Args[0]
+ mem := v_2
+ if !(ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)) {
+ break
+ }
+ v.reset(OpS390XMULLDload)
+ v.AuxInt = int32ToAuxInt(o1 + o2)
+ v.Aux = symToAux(mergeSym(s1, s2))
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMULLW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MULLW x (MOVDconst [c]))
+ // result: (MULLWconst [int32(c)] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpS390XMULLWconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (MULLW <t> x g:(MOVWload [off] {sym} ptr mem))
+ // cond: ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)
+ // result: (MULLWload <t> [off] {sym} x ptr mem)
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ g := v_1
+ if g.Op != OpS390XMOVWload {
+ continue
+ }
+ off := auxIntToInt32(g.AuxInt)
+ sym := auxToSym(g.Aux)
+ mem := g.Args[1]
+ ptr := g.Args[0]
+ if !(ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)) {
+ continue
+ }
+ v.reset(OpS390XMULLWload)
+ v.Type = t
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ // match: (MULLW <t> x g:(MOVWZload [off] {sym} ptr mem))
+ // cond: ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)
+ // result: (MULLWload <t> [off] {sym} x ptr mem)
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ g := v_1
+ if g.Op != OpS390XMOVWZload {
+ continue
+ }
+ off := auxIntToInt32(g.AuxInt)
+ sym := auxToSym(g.Aux)
+ mem := g.Args[1]
+ ptr := g.Args[0]
+ if !(ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)) {
+ continue
+ }
+ v.reset(OpS390XMULLWload)
+ v.Type = t
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMULLWconst(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (MULLWconst <t> x [c])
+ // cond: isPowerOfTwo32(c&(c-1))
+ // result: (ADDW (SLWconst <t> x [uint8(log32(c&(c-1)))]) (SLWconst <t> x [uint8(log32(c&^(c-1)))]))
+ for {
+ t := v.Type
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(isPowerOfTwo32(c & (c - 1))) {
+ break
+ }
+ v.reset(OpS390XADDW)
+ v0 := b.NewValue0(v.Pos, OpS390XSLWconst, t)
+ v0.AuxInt = uint8ToAuxInt(uint8(log32(c & (c - 1))))
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpS390XSLWconst, t)
+ v1.AuxInt = uint8ToAuxInt(uint8(log32(c &^ (c - 1))))
+ v1.AddArg(x)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (MULLWconst <t> x [c])
+ // cond: isPowerOfTwo32(c+(c&^(c-1)))
+ // result: (SUBW (SLWconst <t> x [uint8(log32(c+(c&^(c-1))))]) (SLWconst <t> x [uint8(log32(c&^(c-1)))]))
+ for {
+ t := v.Type
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(isPowerOfTwo32(c + (c &^ (c - 1)))) {
+ break
+ }
+ v.reset(OpS390XSUBW)
+ v0 := b.NewValue0(v.Pos, OpS390XSLWconst, t)
+ v0.AuxInt = uint8ToAuxInt(uint8(log32(c + (c &^ (c - 1)))))
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpS390XSLWconst, t)
+ v1.AuxInt = uint8ToAuxInt(uint8(log32(c &^ (c - 1))))
+ v1.AddArg(x)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (MULLWconst <t> x [c])
+ // cond: isPowerOfTwo32(-c+(-c&^(-c-1)))
+ // result: (SUBW (SLWconst <t> x [uint8(log32(-c&^(-c-1)))]) (SLWconst <t> x [uint8(log32(-c+(-c&^(-c-1))))]))
+ for {
+ t := v.Type
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(isPowerOfTwo32(-c + (-c &^ (-c - 1)))) {
+ break
+ }
+ v.reset(OpS390XSUBW)
+ v0 := b.NewValue0(v.Pos, OpS390XSLWconst, t)
+ v0.AuxInt = uint8ToAuxInt(uint8(log32(-c &^ (-c - 1))))
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpS390XSLWconst, t)
+ v1.AuxInt = uint8ToAuxInt(uint8(log32(-c + (-c &^ (-c - 1)))))
+ v1.AddArg(x)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (MULLWconst [c] (MOVDconst [d]))
+ // result: (MOVDconst [int64(c*int32(d))])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(c * int32(d)))
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XMULLWload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (MULLWload [off1] {sym} x (ADDconst [off2] ptr) mem)
+ // cond: ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2))
+ // result: (MULLWload [off1+off2] {sym} x ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpS390XADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ ptr := v_1.Args[0]
+ mem := v_2
+ if !(ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2))) {
+ break
+ }
+ v.reset(OpS390XMULLWload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ // match: (MULLWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem)
+ // cond: ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)
+ // result: (MULLWload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
+ for {
+ o1 := auxIntToInt32(v.AuxInt)
+ s1 := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpS390XMOVDaddr {
+ break
+ }
+ o2 := auxIntToInt32(v_1.AuxInt)
+ s2 := auxToSym(v_1.Aux)
+ ptr := v_1.Args[0]
+ mem := v_2
+ if !(ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)) {
+ break
+ }
+ v.reset(OpS390XMULLWload)
+ v.AuxInt = int32ToAuxInt(o1 + o2)
+ v.Aux = symToAux(mergeSym(s1, s2))
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XNEG(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (NEG (MOVDconst [c]))
+ // result: (MOVDconst [-c])
+ for {
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(-c)
+ return true
+ }
+ // match: (NEG (ADDconst [c] (NEG x)))
+ // cond: c != -(1<<31)
+ // result: (ADDconst [-c] x)
+ for {
+ if v_0.Op != OpS390XADDconst {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpS390XNEG {
+ break
+ }
+ x := v_0_0.Args[0]
+ if !(c != -(1 << 31)) {
+ break
+ }
+ v.reset(OpS390XADDconst)
+ v.AuxInt = int32ToAuxInt(-c)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XNEGW(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (NEGW (MOVDconst [c]))
+ // result: (MOVDconst [int64(int32(-c))])
+ for {
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(int32(-c)))
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XNOT(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (NOT x)
+ // result: (XOR (MOVDconst [-1]) x)
+ for {
+ x := v_0
+ v.reset(OpS390XXOR)
+ v0 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(-1)
+ v.AddArg2(v0, x)
+ return true
+ }
+}
+func rewriteValueS390X_OpS390XNOTW(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (NOTW x)
+ // result: (XORWconst [-1] x)
+ for {
+ x := v_0
+ v.reset(OpS390XXORWconst)
+ v.AuxInt = int32ToAuxInt(-1)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueS390X_OpS390XOR(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (OR x (MOVDconst [c]))
+ // cond: isU32Bit(c)
+ // result: (ORconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(isU32Bit(c)) {
+ continue
+ }
+ v.reset(OpS390XORconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (OR (SLDconst x [c]) (SRDconst x [64-c]))
+ // result: (RISBGZ x {s390x.NewRotateParams(0, 63, c)})
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpS390XSLDconst {
+ continue
+ }
+ c := auxIntToUint8(v_0.AuxInt)
+ x := v_0.Args[0]
+ if v_1.Op != OpS390XSRDconst || auxIntToUint8(v_1.AuxInt) != 64-c || x != v_1.Args[0] {
+ continue
+ }
+ v.reset(OpS390XRISBGZ)
+ v.Aux = s390xRotateParamsToAux(s390x.NewRotateParams(0, 63, c))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (OR (MOVDconst [-1<<63]) (LGDR <t> x))
+ // result: (LGDR <t> (LNDFR <x.Type> x))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpS390XMOVDconst || auxIntToInt64(v_0.AuxInt) != -1<<63 || v_1.Op != OpS390XLGDR {
+ continue
+ }
+ t := v_1.Type
+ x := v_1.Args[0]
+ v.reset(OpS390XLGDR)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpS390XLNDFR, x.Type)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (OR (RISBGZ (LGDR x) {r}) (LGDR (LPDFR <t> y)))
+ // cond: r == s390x.NewRotateParams(0, 0, 0)
+ // result: (LGDR (CPSDR <t> y x))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpS390XRISBGZ {
+ continue
+ }
+ r := auxToS390xRotateParams(v_0.Aux)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpS390XLGDR {
+ continue
+ }
+ x := v_0_0.Args[0]
+ if v_1.Op != OpS390XLGDR {
+ continue
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpS390XLPDFR {
+ continue
+ }
+ t := v_1_0.Type
+ y := v_1_0.Args[0]
+ if !(r == s390x.NewRotateParams(0, 0, 0)) {
+ continue
+ }
+ v.reset(OpS390XLGDR)
+ v0 := b.NewValue0(v.Pos, OpS390XCPSDR, t)
+ v0.AddArg2(y, x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (OR (RISBGZ (LGDR x) {r}) (MOVDconst [c]))
+ // cond: c >= 0 && r == s390x.NewRotateParams(0, 0, 0)
+ // result: (LGDR (CPSDR <x.Type> (FMOVDconst <x.Type> [math.Float64frombits(uint64(c))]) x))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpS390XRISBGZ {
+ continue
+ }
+ r := auxToS390xRotateParams(v_0.Aux)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpS390XLGDR {
+ continue
+ }
+ x := v_0_0.Args[0]
+ if v_1.Op != OpS390XMOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(c >= 0 && r == s390x.NewRotateParams(0, 0, 0)) {
+ continue
+ }
+ v.reset(OpS390XLGDR)
+ v0 := b.NewValue0(v.Pos, OpS390XCPSDR, x.Type)
+ v1 := b.NewValue0(v.Pos, OpS390XFMOVDconst, x.Type)
+ v1.AuxInt = float64ToAuxInt(math.Float64frombits(uint64(c)))
+ v0.AddArg2(v1, x)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (OR (MOVDconst [c]) (MOVDconst [d]))
+ // result: (MOVDconst [c|d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpS390XMOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpS390XMOVDconst {
+ continue
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(c | d)
+ return true
+ }
+ break
+ }
+ // match: (OR x x)
+ // result: x
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (OR <t> x g:(MOVDload [off] {sym} ptr mem))
+ // cond: ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)
+ // result: (ORload <t> [off] {sym} x ptr mem)
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ g := v_1
+ if g.Op != OpS390XMOVDload {
+ continue
+ }
+ off := auxIntToInt32(g.AuxInt)
+ sym := auxToSym(g.Aux)
+ mem := g.Args[1]
+ ptr := g.Args[0]
+ if !(ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)) {
+ continue
+ }
+ v.reset(OpS390XORload)
+ v.Type = t
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ // match: (OR x1:(MOVBZload [i1] {s} p mem) sh:(SLDconst [8] x0:(MOVBZload [i0] {s} p mem)))
+ // cond: i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh)
+ // result: @mergePoint(b,x0,x1) (MOVHZload [i0] {s} p mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x1 := v_0
+ if x1.Op != OpS390XMOVBZload {
+ continue
+ }
+ i1 := auxIntToInt32(x1.AuxInt)
+ s := auxToSym(x1.Aux)
+ mem := x1.Args[1]
+ p := x1.Args[0]
+ sh := v_1
+ if sh.Op != OpS390XSLDconst || auxIntToUint8(sh.AuxInt) != 8 {
+ continue
+ }
+ x0 := sh.Args[0]
+ if x0.Op != OpS390XMOVBZload {
+ continue
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ if auxToSym(x0.Aux) != s {
+ continue
+ }
+ _ = x0.Args[1]
+ if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(x0.Pos, OpS390XMOVHZload, typ.UInt16)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(i0)
+ v0.Aux = symToAux(s)
+ v0.AddArg2(p, mem)
+ return true
+ }
+ break
+ }
+ // match: (OR x1:(MOVHZload [i1] {s} p mem) sh:(SLDconst [16] x0:(MOVHZload [i0] {s} p mem)))
+ // cond: i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh)
+ // result: @mergePoint(b,x0,x1) (MOVWZload [i0] {s} p mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x1 := v_0
+ if x1.Op != OpS390XMOVHZload {
+ continue
+ }
+ i1 := auxIntToInt32(x1.AuxInt)
+ s := auxToSym(x1.Aux)
+ mem := x1.Args[1]
+ p := x1.Args[0]
+ sh := v_1
+ if sh.Op != OpS390XSLDconst || auxIntToUint8(sh.AuxInt) != 16 {
+ continue
+ }
+ x0 := sh.Args[0]
+ if x0.Op != OpS390XMOVHZload {
+ continue
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ if auxToSym(x0.Aux) != s {
+ continue
+ }
+ _ = x0.Args[1]
+ if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(x0.Pos, OpS390XMOVWZload, typ.UInt32)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(i0)
+ v0.Aux = symToAux(s)
+ v0.AddArg2(p, mem)
+ return true
+ }
+ break
+ }
+ // match: (OR x1:(MOVWZload [i1] {s} p mem) sh:(SLDconst [32] x0:(MOVWZload [i0] {s} p mem)))
+ // cond: i1 == i0+4 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh)
+ // result: @mergePoint(b,x0,x1) (MOVDload [i0] {s} p mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x1 := v_0
+ if x1.Op != OpS390XMOVWZload {
+ continue
+ }
+ i1 := auxIntToInt32(x1.AuxInt)
+ s := auxToSym(x1.Aux)
+ mem := x1.Args[1]
+ p := x1.Args[0]
+ sh := v_1
+ if sh.Op != OpS390XSLDconst || auxIntToUint8(sh.AuxInt) != 32 {
+ continue
+ }
+ x0 := sh.Args[0]
+ if x0.Op != OpS390XMOVWZload {
+ continue
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ if auxToSym(x0.Aux) != s {
+ continue
+ }
+ _ = x0.Args[1]
+ if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+4 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(x0.Pos, OpS390XMOVDload, typ.UInt64)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(i0)
+ v0.Aux = symToAux(s)
+ v0.AddArg2(p, mem)
+ return true
+ }
+ break
+ }
+ // match: (OR s0:(SLDconst [j0] x0:(MOVBZload [i0] {s} p mem)) or:(OR s1:(SLDconst [j1] x1:(MOVBZload [i1] {s} p mem)) y))
+ // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, s0, s1, or)
+ // result: @mergePoint(b,x0,x1,y) (OR <v.Type> (SLDconst <v.Type> [j1] (MOVHZload [i0] {s} p mem)) y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ s0 := v_0
+ if s0.Op != OpS390XSLDconst {
+ continue
+ }
+ j0 := auxIntToUint8(s0.AuxInt)
+ x0 := s0.Args[0]
+ if x0.Op != OpS390XMOVBZload {
+ continue
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p := x0.Args[0]
+ or := v_1
+ if or.Op != OpS390XOR {
+ continue
+ }
+ _ = or.Args[1]
+ or_0 := or.Args[0]
+ or_1 := or.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, or_0, or_1 = _i1+1, or_1, or_0 {
+ s1 := or_0
+ if s1.Op != OpS390XSLDconst {
+ continue
+ }
+ j1 := auxIntToUint8(s1.AuxInt)
+ x1 := s1.Args[0]
+ if x1.Op != OpS390XMOVBZload {
+ continue
+ }
+ i1 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
+ continue
+ }
+ _ = x1.Args[1]
+ if p != x1.Args[0] || mem != x1.Args[1] {
+ continue
+ }
+ y := or_1
+ if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0, x1, s0, s1, or)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1, y)
+ v0 := b.NewValue0(x1.Pos, OpS390XOR, v.Type)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x1.Pos, OpS390XSLDconst, v.Type)
+ v1.AuxInt = uint8ToAuxInt(j1)
+ v2 := b.NewValue0(x1.Pos, OpS390XMOVHZload, typ.UInt16)
+ v2.AuxInt = int32ToAuxInt(i0)
+ v2.Aux = symToAux(s)
+ v2.AddArg2(p, mem)
+ v1.AddArg(v2)
+ v0.AddArg2(v1, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (OR s0:(SLDconst [j0] x0:(MOVHZload [i0] {s} p mem)) or:(OR s1:(SLDconst [j1] x1:(MOVHZload [i1] {s} p mem)) y))
+ // cond: i1 == i0+2 && j1 == j0-16 && j1 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, s0, s1, or)
+ // result: @mergePoint(b,x0,x1,y) (OR <v.Type> (SLDconst <v.Type> [j1] (MOVWZload [i0] {s} p mem)) y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ s0 := v_0
+ if s0.Op != OpS390XSLDconst {
+ continue
+ }
+ j0 := auxIntToUint8(s0.AuxInt)
+ x0 := s0.Args[0]
+ if x0.Op != OpS390XMOVHZload {
+ continue
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p := x0.Args[0]
+ or := v_1
+ if or.Op != OpS390XOR {
+ continue
+ }
+ _ = or.Args[1]
+ or_0 := or.Args[0]
+ or_1 := or.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, or_0, or_1 = _i1+1, or_1, or_0 {
+ s1 := or_0
+ if s1.Op != OpS390XSLDconst {
+ continue
+ }
+ j1 := auxIntToUint8(s1.AuxInt)
+ x1 := s1.Args[0]
+ if x1.Op != OpS390XMOVHZload {
+ continue
+ }
+ i1 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
+ continue
+ }
+ _ = x1.Args[1]
+ if p != x1.Args[0] || mem != x1.Args[1] {
+ continue
+ }
+ y := or_1
+ if !(i1 == i0+2 && j1 == j0-16 && j1%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0, x1, s0, s1, or)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1, y)
+ v0 := b.NewValue0(x1.Pos, OpS390XOR, v.Type)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x1.Pos, OpS390XSLDconst, v.Type)
+ v1.AuxInt = uint8ToAuxInt(j1)
+ v2 := b.NewValue0(x1.Pos, OpS390XMOVWZload, typ.UInt32)
+ v2.AuxInt = int32ToAuxInt(i0)
+ v2.Aux = symToAux(s)
+ v2.AddArg2(p, mem)
+ v1.AddArg(v2)
+ v0.AddArg2(v1, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (OR x0:(MOVBZload [i0] {s} p mem) sh:(SLDconst [8] x1:(MOVBZload [i1] {s} p mem)))
+ // cond: p.Op != OpSB && i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh)
+ // result: @mergePoint(b,x0,x1) (MOVHZreg (MOVHBRload [i0] {s} p mem))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ if x0.Op != OpS390XMOVBZload {
+ continue
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p := x0.Args[0]
+ sh := v_1
+ if sh.Op != OpS390XSLDconst || auxIntToUint8(sh.AuxInt) != 8 {
+ continue
+ }
+ x1 := sh.Args[0]
+ if x1.Op != OpS390XMOVBZload {
+ continue
+ }
+ i1 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
+ continue
+ }
+ _ = x1.Args[1]
+ if p != x1.Args[0] || mem != x1.Args[1] || !(p.Op != OpSB && i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(x1.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x1.Pos, OpS390XMOVHBRload, typ.UInt16)
+ v1.AuxInt = int32ToAuxInt(i0)
+ v1.Aux = symToAux(s)
+ v1.AddArg2(p, mem)
+ v0.AddArg(v1)
+ return true
+ }
+ break
+ }
+ // match: (OR r0:(MOVHZreg x0:(MOVHBRload [i0] {s} p mem)) sh:(SLDconst [16] r1:(MOVHZreg x1:(MOVHBRload [i1] {s} p mem))))
+ // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, r0, r1, sh)
+ // result: @mergePoint(b,x0,x1) (MOVWZreg (MOVWBRload [i0] {s} p mem))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ r0 := v_0
+ if r0.Op != OpS390XMOVHZreg {
+ continue
+ }
+ x0 := r0.Args[0]
+ if x0.Op != OpS390XMOVHBRload {
+ continue
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p := x0.Args[0]
+ sh := v_1
+ if sh.Op != OpS390XSLDconst || auxIntToUint8(sh.AuxInt) != 16 {
+ continue
+ }
+ r1 := sh.Args[0]
+ if r1.Op != OpS390XMOVHZreg {
+ continue
+ }
+ x1 := r1.Args[0]
+ if x1.Op != OpS390XMOVHBRload {
+ continue
+ }
+ i1 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
+ continue
+ }
+ _ = x1.Args[1]
+ if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, r0, r1, sh)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(x1.Pos, OpS390XMOVWZreg, typ.UInt64)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x1.Pos, OpS390XMOVWBRload, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(i0)
+ v1.Aux = symToAux(s)
+ v1.AddArg2(p, mem)
+ v0.AddArg(v1)
+ return true
+ }
+ break
+ }
+ // match: (OR r0:(MOVWZreg x0:(MOVWBRload [i0] {s} p mem)) sh:(SLDconst [32] r1:(MOVWZreg x1:(MOVWBRload [i1] {s} p mem))))
+ // cond: i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, r0, r1, sh)
+ // result: @mergePoint(b,x0,x1) (MOVDBRload [i0] {s} p mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ r0 := v_0
+ if r0.Op != OpS390XMOVWZreg {
+ continue
+ }
+ x0 := r0.Args[0]
+ if x0.Op != OpS390XMOVWBRload {
+ continue
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p := x0.Args[0]
+ sh := v_1
+ if sh.Op != OpS390XSLDconst || auxIntToUint8(sh.AuxInt) != 32 {
+ continue
+ }
+ r1 := sh.Args[0]
+ if r1.Op != OpS390XMOVWZreg {
+ continue
+ }
+ x1 := r1.Args[0]
+ if x1.Op != OpS390XMOVWBRload {
+ continue
+ }
+ i1 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
+ continue
+ }
+ _ = x1.Args[1]
+ if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+4 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, r0, r1, sh)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(x1.Pos, OpS390XMOVDBRload, typ.UInt64)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(i0)
+ v0.Aux = symToAux(s)
+ v0.AddArg2(p, mem)
+ return true
+ }
+ break
+ }
+ // match: (OR s1:(SLDconst [j1] x1:(MOVBZload [i1] {s} p mem)) or:(OR s0:(SLDconst [j0] x0:(MOVBZload [i0] {s} p mem)) y))
+ // cond: p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, s0, s1, or)
+ // result: @mergePoint(b,x0,x1,y) (OR <v.Type> (SLDconst <v.Type> [j0] (MOVHZreg (MOVHBRload [i0] {s} p mem))) y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ s1 := v_0
+ if s1.Op != OpS390XSLDconst {
+ continue
+ }
+ j1 := auxIntToUint8(s1.AuxInt)
+ x1 := s1.Args[0]
+ if x1.Op != OpS390XMOVBZload {
+ continue
+ }
+ i1 := auxIntToInt32(x1.AuxInt)
+ s := auxToSym(x1.Aux)
+ mem := x1.Args[1]
+ p := x1.Args[0]
+ or := v_1
+ if or.Op != OpS390XOR {
+ continue
+ }
+ _ = or.Args[1]
+ or_0 := or.Args[0]
+ or_1 := or.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, or_0, or_1 = _i1+1, or_1, or_0 {
+ s0 := or_0
+ if s0.Op != OpS390XSLDconst {
+ continue
+ }
+ j0 := auxIntToUint8(s0.AuxInt)
+ x0 := s0.Args[0]
+ if x0.Op != OpS390XMOVBZload {
+ continue
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ if auxToSym(x0.Aux) != s {
+ continue
+ }
+ _ = x0.Args[1]
+ if p != x0.Args[0] || mem != x0.Args[1] {
+ continue
+ }
+ y := or_1
+ if !(p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0, x1, s0, s1, or)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1, y)
+ v0 := b.NewValue0(x0.Pos, OpS390XOR, v.Type)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x0.Pos, OpS390XSLDconst, v.Type)
+ v1.AuxInt = uint8ToAuxInt(j0)
+ v2 := b.NewValue0(x0.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v3 := b.NewValue0(x0.Pos, OpS390XMOVHBRload, typ.UInt16)
+ v3.AuxInt = int32ToAuxInt(i0)
+ v3.Aux = symToAux(s)
+ v3.AddArg2(p, mem)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(v1, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (OR s1:(SLDconst [j1] r1:(MOVHZreg x1:(MOVHBRload [i1] {s} p mem))) or:(OR s0:(SLDconst [j0] r0:(MOVHZreg x0:(MOVHBRload [i0] {s} p mem))) y))
+ // cond: i1 == i0+2 && j1 == j0+16 && j0 % 32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, r0, r1, s0, s1, or)
+ // result: @mergePoint(b,x0,x1,y) (OR <v.Type> (SLDconst <v.Type> [j0] (MOVWZreg (MOVWBRload [i0] {s} p mem))) y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ s1 := v_0
+ if s1.Op != OpS390XSLDconst {
+ continue
+ }
+ j1 := auxIntToUint8(s1.AuxInt)
+ r1 := s1.Args[0]
+ if r1.Op != OpS390XMOVHZreg {
+ continue
+ }
+ x1 := r1.Args[0]
+ if x1.Op != OpS390XMOVHBRload {
+ continue
+ }
+ i1 := auxIntToInt32(x1.AuxInt)
+ s := auxToSym(x1.Aux)
+ mem := x1.Args[1]
+ p := x1.Args[0]
+ or := v_1
+ if or.Op != OpS390XOR {
+ continue
+ }
+ _ = or.Args[1]
+ or_0 := or.Args[0]
+ or_1 := or.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, or_0, or_1 = _i1+1, or_1, or_0 {
+ s0 := or_0
+ if s0.Op != OpS390XSLDconst {
+ continue
+ }
+ j0 := auxIntToUint8(s0.AuxInt)
+ r0 := s0.Args[0]
+ if r0.Op != OpS390XMOVHZreg {
+ continue
+ }
+ x0 := r0.Args[0]
+ if x0.Op != OpS390XMOVHBRload {
+ continue
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ if auxToSym(x0.Aux) != s {
+ continue
+ }
+ _ = x0.Args[1]
+ if p != x0.Args[0] || mem != x0.Args[1] {
+ continue
+ }
+ y := or_1
+ if !(i1 == i0+2 && j1 == j0+16 && j0%32 == 0 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0, x1, r0, r1, s0, s1, or)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1, y)
+ v0 := b.NewValue0(x0.Pos, OpS390XOR, v.Type)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x0.Pos, OpS390XSLDconst, v.Type)
+ v1.AuxInt = uint8ToAuxInt(j0)
+ v2 := b.NewValue0(x0.Pos, OpS390XMOVWZreg, typ.UInt64)
+ v3 := b.NewValue0(x0.Pos, OpS390XMOVWBRload, typ.UInt32)
+ v3.AuxInt = int32ToAuxInt(i0)
+ v3.Aux = symToAux(s)
+ v3.AddArg2(p, mem)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(v1, y)
+ return true
+ }
+ }
+ break
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XORW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (ORW x (MOVDconst [c]))
+ // result: (ORWconst [int32(c)] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpS390XORWconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ORW (SLWconst x [c]) (SRWconst x [32-c]))
+ // result: (RLLconst x [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpS390XSLWconst {
+ continue
+ }
+ c := auxIntToUint8(v_0.AuxInt)
+ x := v_0.Args[0]
+ if v_1.Op != OpS390XSRWconst || auxIntToUint8(v_1.AuxInt) != 32-c || x != v_1.Args[0] {
+ continue
+ }
+ v.reset(OpS390XRLLconst)
+ v.AuxInt = uint8ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (ORW x x)
+ // result: x
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (ORW <t> x g:(MOVWload [off] {sym} ptr mem))
+ // cond: ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)
+ // result: (ORWload <t> [off] {sym} x ptr mem)
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ g := v_1
+ if g.Op != OpS390XMOVWload {
+ continue
+ }
+ off := auxIntToInt32(g.AuxInt)
+ sym := auxToSym(g.Aux)
+ mem := g.Args[1]
+ ptr := g.Args[0]
+ if !(ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)) {
+ continue
+ }
+ v.reset(OpS390XORWload)
+ v.Type = t
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ // match: (ORW <t> x g:(MOVWZload [off] {sym} ptr mem))
+ // cond: ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)
+ // result: (ORWload <t> [off] {sym} x ptr mem)
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ g := v_1
+ if g.Op != OpS390XMOVWZload {
+ continue
+ }
+ off := auxIntToInt32(g.AuxInt)
+ sym := auxToSym(g.Aux)
+ mem := g.Args[1]
+ ptr := g.Args[0]
+ if !(ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)) {
+ continue
+ }
+ v.reset(OpS390XORWload)
+ v.Type = t
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ // match: (ORW x1:(MOVBZload [i1] {s} p mem) sh:(SLWconst [8] x0:(MOVBZload [i0] {s} p mem)))
+ // cond: i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh)
+ // result: @mergePoint(b,x0,x1) (MOVHZload [i0] {s} p mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x1 := v_0
+ if x1.Op != OpS390XMOVBZload {
+ continue
+ }
+ i1 := auxIntToInt32(x1.AuxInt)
+ s := auxToSym(x1.Aux)
+ mem := x1.Args[1]
+ p := x1.Args[0]
+ sh := v_1
+ if sh.Op != OpS390XSLWconst || auxIntToUint8(sh.AuxInt) != 8 {
+ continue
+ }
+ x0 := sh.Args[0]
+ if x0.Op != OpS390XMOVBZload {
+ continue
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ if auxToSym(x0.Aux) != s {
+ continue
+ }
+ _ = x0.Args[1]
+ if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+1 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(x0.Pos, OpS390XMOVHZload, typ.UInt16)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(i0)
+ v0.Aux = symToAux(s)
+ v0.AddArg2(p, mem)
+ return true
+ }
+ break
+ }
+ // match: (ORW x1:(MOVHZload [i1] {s} p mem) sh:(SLWconst [16] x0:(MOVHZload [i0] {s} p mem)))
+ // cond: i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh)
+ // result: @mergePoint(b,x0,x1) (MOVWZload [i0] {s} p mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x1 := v_0
+ if x1.Op != OpS390XMOVHZload {
+ continue
+ }
+ i1 := auxIntToInt32(x1.AuxInt)
+ s := auxToSym(x1.Aux)
+ mem := x1.Args[1]
+ p := x1.Args[0]
+ sh := v_1
+ if sh.Op != OpS390XSLWconst || auxIntToUint8(sh.AuxInt) != 16 {
+ continue
+ }
+ x0 := sh.Args[0]
+ if x0.Op != OpS390XMOVHZload {
+ continue
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ if auxToSym(x0.Aux) != s {
+ continue
+ }
+ _ = x0.Args[1]
+ if p != x0.Args[0] || mem != x0.Args[1] || !(i1 == i0+2 && p.Op != OpSB && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(x0.Pos, OpS390XMOVWZload, typ.UInt32)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(i0)
+ v0.Aux = symToAux(s)
+ v0.AddArg2(p, mem)
+ return true
+ }
+ break
+ }
+ // match: (ORW s0:(SLWconst [j0] x0:(MOVBZload [i0] {s} p mem)) or:(ORW s1:(SLWconst [j1] x1:(MOVBZload [i1] {s} p mem)) y))
+ // cond: i1 == i0+1 && j1 == j0-8 && j1 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, s0, s1, or)
+ // result: @mergePoint(b,x0,x1,y) (ORW <v.Type> (SLWconst <v.Type> [j1] (MOVHZload [i0] {s} p mem)) y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ s0 := v_0
+ if s0.Op != OpS390XSLWconst {
+ continue
+ }
+ j0 := auxIntToUint8(s0.AuxInt)
+ x0 := s0.Args[0]
+ if x0.Op != OpS390XMOVBZload {
+ continue
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p := x0.Args[0]
+ or := v_1
+ if or.Op != OpS390XORW {
+ continue
+ }
+ _ = or.Args[1]
+ or_0 := or.Args[0]
+ or_1 := or.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, or_0, or_1 = _i1+1, or_1, or_0 {
+ s1 := or_0
+ if s1.Op != OpS390XSLWconst {
+ continue
+ }
+ j1 := auxIntToUint8(s1.AuxInt)
+ x1 := s1.Args[0]
+ if x1.Op != OpS390XMOVBZload {
+ continue
+ }
+ i1 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
+ continue
+ }
+ _ = x1.Args[1]
+ if p != x1.Args[0] || mem != x1.Args[1] {
+ continue
+ }
+ y := or_1
+ if !(i1 == i0+1 && j1 == j0-8 && j1%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0, x1, s0, s1, or)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1, y)
+ v0 := b.NewValue0(x1.Pos, OpS390XORW, v.Type)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x1.Pos, OpS390XSLWconst, v.Type)
+ v1.AuxInt = uint8ToAuxInt(j1)
+ v2 := b.NewValue0(x1.Pos, OpS390XMOVHZload, typ.UInt16)
+ v2.AuxInt = int32ToAuxInt(i0)
+ v2.Aux = symToAux(s)
+ v2.AddArg2(p, mem)
+ v1.AddArg(v2)
+ v0.AddArg2(v1, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (ORW x0:(MOVBZload [i0] {s} p mem) sh:(SLWconst [8] x1:(MOVBZload [i1] {s} p mem)))
+ // cond: p.Op != OpSB && i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, sh)
+ // result: @mergePoint(b,x0,x1) (MOVHZreg (MOVHBRload [i0] {s} p mem))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x0 := v_0
+ if x0.Op != OpS390XMOVBZload {
+ continue
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p := x0.Args[0]
+ sh := v_1
+ if sh.Op != OpS390XSLWconst || auxIntToUint8(sh.AuxInt) != 8 {
+ continue
+ }
+ x1 := sh.Args[0]
+ if x1.Op != OpS390XMOVBZload {
+ continue
+ }
+ i1 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
+ continue
+ }
+ _ = x1.Args[1]
+ if p != x1.Args[0] || mem != x1.Args[1] || !(p.Op != OpSB && i1 == i0+1 && x0.Uses == 1 && x1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, sh)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(x1.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x1.Pos, OpS390XMOVHBRload, typ.UInt16)
+ v1.AuxInt = int32ToAuxInt(i0)
+ v1.Aux = symToAux(s)
+ v1.AddArg2(p, mem)
+ v0.AddArg(v1)
+ return true
+ }
+ break
+ }
+ // match: (ORW r0:(MOVHZreg x0:(MOVHBRload [i0] {s} p mem)) sh:(SLWconst [16] r1:(MOVHZreg x1:(MOVHBRload [i1] {s} p mem))))
+ // cond: i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b,x0,x1) != nil && clobber(x0, x1, r0, r1, sh)
+ // result: @mergePoint(b,x0,x1) (MOVWBRload [i0] {s} p mem)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ r0 := v_0
+ if r0.Op != OpS390XMOVHZreg {
+ continue
+ }
+ x0 := r0.Args[0]
+ if x0.Op != OpS390XMOVHBRload {
+ continue
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ s := auxToSym(x0.Aux)
+ mem := x0.Args[1]
+ p := x0.Args[0]
+ sh := v_1
+ if sh.Op != OpS390XSLWconst || auxIntToUint8(sh.AuxInt) != 16 {
+ continue
+ }
+ r1 := sh.Args[0]
+ if r1.Op != OpS390XMOVHZreg {
+ continue
+ }
+ x1 := r1.Args[0]
+ if x1.Op != OpS390XMOVHBRload {
+ continue
+ }
+ i1 := auxIntToInt32(x1.AuxInt)
+ if auxToSym(x1.Aux) != s {
+ continue
+ }
+ _ = x1.Args[1]
+ if p != x1.Args[0] || mem != x1.Args[1] || !(i1 == i0+2 && x0.Uses == 1 && x1.Uses == 1 && r0.Uses == 1 && r1.Uses == 1 && sh.Uses == 1 && mergePoint(b, x0, x1) != nil && clobber(x0, x1, r0, r1, sh)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1)
+ v0 := b.NewValue0(x1.Pos, OpS390XMOVWBRload, typ.UInt32)
+ v.copyOf(v0)
+ v0.AuxInt = int32ToAuxInt(i0)
+ v0.Aux = symToAux(s)
+ v0.AddArg2(p, mem)
+ return true
+ }
+ break
+ }
+ // match: (ORW s1:(SLWconst [j1] x1:(MOVBZload [i1] {s} p mem)) or:(ORW s0:(SLWconst [j0] x0:(MOVBZload [i0] {s} p mem)) y))
+ // cond: p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0 % 16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b,x0,x1,y) != nil && clobber(x0, x1, s0, s1, or)
+ // result: @mergePoint(b,x0,x1,y) (ORW <v.Type> (SLWconst <v.Type> [j0] (MOVHZreg (MOVHBRload [i0] {s} p mem))) y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ s1 := v_0
+ if s1.Op != OpS390XSLWconst {
+ continue
+ }
+ j1 := auxIntToUint8(s1.AuxInt)
+ x1 := s1.Args[0]
+ if x1.Op != OpS390XMOVBZload {
+ continue
+ }
+ i1 := auxIntToInt32(x1.AuxInt)
+ s := auxToSym(x1.Aux)
+ mem := x1.Args[1]
+ p := x1.Args[0]
+ or := v_1
+ if or.Op != OpS390XORW {
+ continue
+ }
+ _ = or.Args[1]
+ or_0 := or.Args[0]
+ or_1 := or.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, or_0, or_1 = _i1+1, or_1, or_0 {
+ s0 := or_0
+ if s0.Op != OpS390XSLWconst {
+ continue
+ }
+ j0 := auxIntToUint8(s0.AuxInt)
+ x0 := s0.Args[0]
+ if x0.Op != OpS390XMOVBZload {
+ continue
+ }
+ i0 := auxIntToInt32(x0.AuxInt)
+ if auxToSym(x0.Aux) != s {
+ continue
+ }
+ _ = x0.Args[1]
+ if p != x0.Args[0] || mem != x0.Args[1] {
+ continue
+ }
+ y := or_1
+ if !(p.Op != OpSB && i1 == i0+1 && j1 == j0+8 && j0%16 == 0 && x0.Uses == 1 && x1.Uses == 1 && s0.Uses == 1 && s1.Uses == 1 && or.Uses == 1 && mergePoint(b, x0, x1, y) != nil && clobber(x0, x1, s0, s1, or)) {
+ continue
+ }
+ b = mergePoint(b, x0, x1, y)
+ v0 := b.NewValue0(x0.Pos, OpS390XORW, v.Type)
+ v.copyOf(v0)
+ v1 := b.NewValue0(x0.Pos, OpS390XSLWconst, v.Type)
+ v1.AuxInt = uint8ToAuxInt(j0)
+ v2 := b.NewValue0(x0.Pos, OpS390XMOVHZreg, typ.UInt64)
+ v3 := b.NewValue0(x0.Pos, OpS390XMOVHBRload, typ.UInt16)
+ v3.AuxInt = int32ToAuxInt(i0)
+ v3.Aux = symToAux(s)
+ v3.AddArg2(p, mem)
+ v2.AddArg(v3)
+ v1.AddArg(v2)
+ v0.AddArg2(v1, y)
+ return true
+ }
+ }
+ break
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XORWconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ORWconst [c] x)
+ // cond: int32(c)==0
+ // result: x
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(int32(c) == 0) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (ORWconst [c] _)
+ // cond: int32(c)==-1
+ // result: (MOVDconst [-1])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if !(int32(c) == -1) {
+ break
+ }
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(-1)
+ return true
+ }
+ // match: (ORWconst [c] (MOVDconst [d]))
+ // result: (MOVDconst [int64(c)|d])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(c) | d)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XORWload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (ORWload [off1] {sym} x (ADDconst [off2] ptr) mem)
+ // cond: ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2))
+ // result: (ORWload [off1+off2] {sym} x ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpS390XADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ ptr := v_1.Args[0]
+ mem := v_2
+ if !(ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2))) {
+ break
+ }
+ v.reset(OpS390XORWload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ // match: (ORWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem)
+ // cond: ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)
+ // result: (ORWload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
+ for {
+ o1 := auxIntToInt32(v.AuxInt)
+ s1 := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpS390XMOVDaddr {
+ break
+ }
+ o2 := auxIntToInt32(v_1.AuxInt)
+ s2 := auxToSym(v_1.Aux)
+ ptr := v_1.Args[0]
+ mem := v_2
+ if !(ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)) {
+ break
+ }
+ v.reset(OpS390XORWload)
+ v.AuxInt = int32ToAuxInt(o1 + o2)
+ v.Aux = symToAux(mergeSym(s1, s2))
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XORconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ORconst [0] x)
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (ORconst [-1] _)
+ // result: (MOVDconst [-1])
+ for {
+ if auxIntToInt64(v.AuxInt) != -1 {
+ break
+ }
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(-1)
+ return true
+ }
+ // match: (ORconst [c] (MOVDconst [d]))
+ // result: (MOVDconst [c|d])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(c | d)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XORload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (ORload <t> [off] {sym} x ptr1 (FMOVDstore [off] {sym} ptr2 y _))
+ // cond: isSamePtr(ptr1, ptr2)
+ // result: (OR x (LGDR <t> y))
+ for {
+ t := v.Type
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ ptr1 := v_1
+ if v_2.Op != OpS390XFMOVDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
+ break
+ }
+ y := v_2.Args[1]
+ ptr2 := v_2.Args[0]
+ if !(isSamePtr(ptr1, ptr2)) {
+ break
+ }
+ v.reset(OpS390XOR)
+ v0 := b.NewValue0(v_2.Pos, OpS390XLGDR, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (ORload [off1] {sym} x (ADDconst [off2] ptr) mem)
+ // cond: ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2))
+ // result: (ORload [off1+off2] {sym} x ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpS390XADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ ptr := v_1.Args[0]
+ mem := v_2
+ if !(ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2))) {
+ break
+ }
+ v.reset(OpS390XORload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ // match: (ORload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem)
+ // cond: ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)
+ // result: (ORload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
+ for {
+ o1 := auxIntToInt32(v.AuxInt)
+ s1 := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpS390XMOVDaddr {
+ break
+ }
+ o2 := auxIntToInt32(v_1.AuxInt)
+ s2 := auxToSym(v_1.Aux)
+ ptr := v_1.Args[0]
+ mem := v_2
+ if !(ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)) {
+ break
+ }
+ v.reset(OpS390XORload)
+ v.AuxInt = int32ToAuxInt(o1 + o2)
+ v.Aux = symToAux(mergeSym(s1, s2))
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XRISBGZ(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (RISBGZ (MOVWZreg x) {r})
+ // cond: r.InMerge(0xffffffff) != nil
+ // result: (RISBGZ x {*r.InMerge(0xffffffff)})
+ for {
+ r := auxToS390xRotateParams(v.Aux)
+ if v_0.Op != OpS390XMOVWZreg {
+ break
+ }
+ x := v_0.Args[0]
+ if !(r.InMerge(0xffffffff) != nil) {
+ break
+ }
+ v.reset(OpS390XRISBGZ)
+ v.Aux = s390xRotateParamsToAux(*r.InMerge(0xffffffff))
+ v.AddArg(x)
+ return true
+ }
+ // match: (RISBGZ (MOVHZreg x) {r})
+ // cond: r.InMerge(0x0000ffff) != nil
+ // result: (RISBGZ x {*r.InMerge(0x0000ffff)})
+ for {
+ r := auxToS390xRotateParams(v.Aux)
+ if v_0.Op != OpS390XMOVHZreg {
+ break
+ }
+ x := v_0.Args[0]
+ if !(r.InMerge(0x0000ffff) != nil) {
+ break
+ }
+ v.reset(OpS390XRISBGZ)
+ v.Aux = s390xRotateParamsToAux(*r.InMerge(0x0000ffff))
+ v.AddArg(x)
+ return true
+ }
+ // match: (RISBGZ (MOVBZreg x) {r})
+ // cond: r.InMerge(0x000000ff) != nil
+ // result: (RISBGZ x {*r.InMerge(0x000000ff)})
+ for {
+ r := auxToS390xRotateParams(v.Aux)
+ if v_0.Op != OpS390XMOVBZreg {
+ break
+ }
+ x := v_0.Args[0]
+ if !(r.InMerge(0x000000ff) != nil) {
+ break
+ }
+ v.reset(OpS390XRISBGZ)
+ v.Aux = s390xRotateParamsToAux(*r.InMerge(0x000000ff))
+ v.AddArg(x)
+ return true
+ }
+ // match: (RISBGZ (SLDconst x [c]) {r})
+ // cond: r.InMerge(^uint64(0)<<c) != nil
+ // result: (RISBGZ x {(*r.InMerge(^uint64(0)<<c)).RotateLeft(c)})
+ for {
+ r := auxToS390xRotateParams(v.Aux)
+ if v_0.Op != OpS390XSLDconst {
+ break
+ }
+ c := auxIntToUint8(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(r.InMerge(^uint64(0)<<c) != nil) {
+ break
+ }
+ v.reset(OpS390XRISBGZ)
+ v.Aux = s390xRotateParamsToAux((*r.InMerge(^uint64(0) << c)).RotateLeft(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (RISBGZ (SRDconst x [c]) {r})
+ // cond: r.InMerge(^uint64(0)>>c) != nil
+ // result: (RISBGZ x {(*r.InMerge(^uint64(0)>>c)).RotateLeft(-c)})
+ for {
+ r := auxToS390xRotateParams(v.Aux)
+ if v_0.Op != OpS390XSRDconst {
+ break
+ }
+ c := auxIntToUint8(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(r.InMerge(^uint64(0)>>c) != nil) {
+ break
+ }
+ v.reset(OpS390XRISBGZ)
+ v.Aux = s390xRotateParamsToAux((*r.InMerge(^uint64(0) >> c)).RotateLeft(-c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (RISBGZ (RISBGZ x {y}) {z})
+ // cond: z.InMerge(y.OutMask()) != nil
+ // result: (RISBGZ x {(*z.InMerge(y.OutMask())).RotateLeft(y.Amount)})
+ for {
+ z := auxToS390xRotateParams(v.Aux)
+ if v_0.Op != OpS390XRISBGZ {
+ break
+ }
+ y := auxToS390xRotateParams(v_0.Aux)
+ x := v_0.Args[0]
+ if !(z.InMerge(y.OutMask()) != nil) {
+ break
+ }
+ v.reset(OpS390XRISBGZ)
+ v.Aux = s390xRotateParamsToAux((*z.InMerge(y.OutMask())).RotateLeft(y.Amount))
+ v.AddArg(x)
+ return true
+ }
+ // match: (RISBGZ x {r})
+ // cond: r.End == 63 && r.Start == -r.Amount&63
+ // result: (SRDconst x [-r.Amount&63])
+ for {
+ r := auxToS390xRotateParams(v.Aux)
+ x := v_0
+ if !(r.End == 63 && r.Start == -r.Amount&63) {
+ break
+ }
+ v.reset(OpS390XSRDconst)
+ v.AuxInt = uint8ToAuxInt(-r.Amount & 63)
+ v.AddArg(x)
+ return true
+ }
+ // match: (RISBGZ x {r})
+ // cond: r.Start == 0 && r.End == 63-r.Amount
+ // result: (SLDconst x [r.Amount])
+ for {
+ r := auxToS390xRotateParams(v.Aux)
+ x := v_0
+ if !(r.Start == 0 && r.End == 63-r.Amount) {
+ break
+ }
+ v.reset(OpS390XSLDconst)
+ v.AuxInt = uint8ToAuxInt(r.Amount)
+ v.AddArg(x)
+ return true
+ }
+ // match: (RISBGZ (SRADconst x [c]) {r})
+ // cond: r.Start == r.End && (r.Start+r.Amount)&63 <= c
+ // result: (RISBGZ x {s390x.NewRotateParams(r.Start, r.Start, -r.Start&63)})
+ for {
+ r := auxToS390xRotateParams(v.Aux)
+ if v_0.Op != OpS390XSRADconst {
+ break
+ }
+ c := auxIntToUint8(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(r.Start == r.End && (r.Start+r.Amount)&63 <= c) {
+ break
+ }
+ v.reset(OpS390XRISBGZ)
+ v.Aux = s390xRotateParamsToAux(s390x.NewRotateParams(r.Start, r.Start, -r.Start&63))
+ v.AddArg(x)
+ return true
+ }
+ // match: (RISBGZ x {r})
+ // cond: r == s390x.NewRotateParams(56, 63, 0)
+ // result: (MOVBZreg x)
+ for {
+ r := auxToS390xRotateParams(v.Aux)
+ x := v_0
+ if !(r == s390x.NewRotateParams(56, 63, 0)) {
+ break
+ }
+ v.reset(OpS390XMOVBZreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (RISBGZ x {r})
+ // cond: r == s390x.NewRotateParams(48, 63, 0)
+ // result: (MOVHZreg x)
+ for {
+ r := auxToS390xRotateParams(v.Aux)
+ x := v_0
+ if !(r == s390x.NewRotateParams(48, 63, 0)) {
+ break
+ }
+ v.reset(OpS390XMOVHZreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (RISBGZ x {r})
+ // cond: r == s390x.NewRotateParams(32, 63, 0)
+ // result: (MOVWZreg x)
+ for {
+ r := auxToS390xRotateParams(v.Aux)
+ x := v_0
+ if !(r == s390x.NewRotateParams(32, 63, 0)) {
+ break
+ }
+ v.reset(OpS390XMOVWZreg)
+ v.AddArg(x)
+ return true
+ }
+ // match: (RISBGZ (LGDR <t> x) {r})
+ // cond: r == s390x.NewRotateParams(1, 63, 0)
+ // result: (LGDR <t> (LPDFR <x.Type> x))
+ for {
+ r := auxToS390xRotateParams(v.Aux)
+ if v_0.Op != OpS390XLGDR {
+ break
+ }
+ t := v_0.Type
+ x := v_0.Args[0]
+ if !(r == s390x.NewRotateParams(1, 63, 0)) {
+ break
+ }
+ v.reset(OpS390XLGDR)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpS390XLPDFR, x.Type)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XRLL(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (RLL x (MOVDconst [c]))
+ // result: (RLLconst x [uint8(c&31)])
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpS390XRLLconst)
+ v.AuxInt = uint8ToAuxInt(uint8(c & 31))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XRLLG(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (RLLG x (MOVDconst [c]))
+ // result: (RISBGZ x {s390x.NewRotateParams(0, 63, uint8(c&63))})
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpS390XRISBGZ)
+ v.Aux = s390xRotateParamsToAux(s390x.NewRotateParams(0, 63, uint8(c&63)))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XSLD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SLD x (MOVDconst [c]))
+ // result: (SLDconst x [uint8(c&63)])
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpS390XSLDconst)
+ v.AuxInt = uint8ToAuxInt(uint8(c & 63))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SLD x (RISBGZ y {r}))
+ // cond: r.Amount == 0 && r.OutMask()&63 == 63
+ // result: (SLD x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XRISBGZ {
+ break
+ }
+ r := auxToS390xRotateParams(v_1.Aux)
+ y := v_1.Args[0]
+ if !(r.Amount == 0 && r.OutMask()&63 == 63) {
+ break
+ }
+ v.reset(OpS390XSLD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SLD x (AND (MOVDconst [c]) y))
+ // result: (SLD x (ANDWconst <typ.UInt32> [int32(c&63)] y))
+ for {
+ x := v_0
+ if v_1.Op != OpS390XAND {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpS390XMOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1_0.AuxInt)
+ y := v_1_1
+ v.reset(OpS390XSLD)
+ v0 := b.NewValue0(v.Pos, OpS390XANDWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(int32(c & 63))
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ break
+ }
+ // match: (SLD x (ANDWconst [c] y))
+ // cond: c&63 == 63
+ // result: (SLD x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XANDWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&63 == 63) {
+ break
+ }
+ v.reset(OpS390XSLD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SLD x (MOVWreg y))
+ // result: (SLD x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVWreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSLD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SLD x (MOVHreg y))
+ // result: (SLD x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVHreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSLD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SLD x (MOVBreg y))
+ // result: (SLD x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVBreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSLD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SLD x (MOVWZreg y))
+ // result: (SLD x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVWZreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSLD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SLD x (MOVHZreg y))
+ // result: (SLD x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVHZreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSLD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SLD x (MOVBZreg y))
+ // result: (SLD x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVBZreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSLD)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XSLDconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SLDconst (SRDconst x [c]) [d])
+ // result: (RISBGZ x {s390x.NewRotateParams(uint8(max8(0, int8(c-d))), 63-d, uint8(int8(d-c)&63))})
+ for {
+ d := auxIntToUint8(v.AuxInt)
+ if v_0.Op != OpS390XSRDconst {
+ break
+ }
+ c := auxIntToUint8(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpS390XRISBGZ)
+ v.Aux = s390xRotateParamsToAux(s390x.NewRotateParams(uint8(max8(0, int8(c-d))), 63-d, uint8(int8(d-c)&63)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SLDconst (RISBGZ x {r}) [c])
+ // cond: s390x.NewRotateParams(0, 63-c, c).InMerge(r.OutMask()) != nil
+ // result: (RISBGZ x {(*s390x.NewRotateParams(0, 63-c, c).InMerge(r.OutMask())).RotateLeft(r.Amount)})
+ for {
+ c := auxIntToUint8(v.AuxInt)
+ if v_0.Op != OpS390XRISBGZ {
+ break
+ }
+ r := auxToS390xRotateParams(v_0.Aux)
+ x := v_0.Args[0]
+ if !(s390x.NewRotateParams(0, 63-c, c).InMerge(r.OutMask()) != nil) {
+ break
+ }
+ v.reset(OpS390XRISBGZ)
+ v.Aux = s390xRotateParamsToAux((*s390x.NewRotateParams(0, 63-c, c).InMerge(r.OutMask())).RotateLeft(r.Amount))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SLDconst x [0])
+ // result: x
+ for {
+ if auxIntToUint8(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XSLW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SLW x (MOVDconst [c]))
+ // cond: c&32 == 0
+ // result: (SLWconst x [uint8(c&31)])
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(c&32 == 0) {
+ break
+ }
+ v.reset(OpS390XSLWconst)
+ v.AuxInt = uint8ToAuxInt(uint8(c & 31))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SLW _ (MOVDconst [c]))
+ // cond: c&32 != 0
+ // result: (MOVDconst [0])
+ for {
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(c&32 != 0) {
+ break
+ }
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (SLW x (RISBGZ y {r}))
+ // cond: r.Amount == 0 && r.OutMask()&63 == 63
+ // result: (SLW x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XRISBGZ {
+ break
+ }
+ r := auxToS390xRotateParams(v_1.Aux)
+ y := v_1.Args[0]
+ if !(r.Amount == 0 && r.OutMask()&63 == 63) {
+ break
+ }
+ v.reset(OpS390XSLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SLW x (AND (MOVDconst [c]) y))
+ // result: (SLW x (ANDWconst <typ.UInt32> [int32(c&63)] y))
+ for {
+ x := v_0
+ if v_1.Op != OpS390XAND {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpS390XMOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1_0.AuxInt)
+ y := v_1_1
+ v.reset(OpS390XSLW)
+ v0 := b.NewValue0(v.Pos, OpS390XANDWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(int32(c & 63))
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ break
+ }
+ // match: (SLW x (ANDWconst [c] y))
+ // cond: c&63 == 63
+ // result: (SLW x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XANDWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&63 == 63) {
+ break
+ }
+ v.reset(OpS390XSLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SLW x (MOVWreg y))
+ // result: (SLW x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVWreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SLW x (MOVHreg y))
+ // result: (SLW x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVHreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SLW x (MOVBreg y))
+ // result: (SLW x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVBreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SLW x (MOVWZreg y))
+ // result: (SLW x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVWZreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SLW x (MOVHZreg y))
+ // result: (SLW x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVHZreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SLW x (MOVBZreg y))
+ // result: (SLW x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVBZreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSLW)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XSLWconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SLWconst x [0])
+ // result: x
+ for {
+ if auxIntToUint8(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XSRAD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SRAD x (MOVDconst [c]))
+ // result: (SRADconst x [uint8(c&63)])
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpS390XSRADconst)
+ v.AuxInt = uint8ToAuxInt(uint8(c & 63))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SRAD x (RISBGZ y {r}))
+ // cond: r.Amount == 0 && r.OutMask()&63 == 63
+ // result: (SRAD x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XRISBGZ {
+ break
+ }
+ r := auxToS390xRotateParams(v_1.Aux)
+ y := v_1.Args[0]
+ if !(r.Amount == 0 && r.OutMask()&63 == 63) {
+ break
+ }
+ v.reset(OpS390XSRAD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SRAD x (AND (MOVDconst [c]) y))
+ // result: (SRAD x (ANDWconst <typ.UInt32> [int32(c&63)] y))
+ for {
+ x := v_0
+ if v_1.Op != OpS390XAND {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpS390XMOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1_0.AuxInt)
+ y := v_1_1
+ v.reset(OpS390XSRAD)
+ v0 := b.NewValue0(v.Pos, OpS390XANDWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(int32(c & 63))
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ break
+ }
+ // match: (SRAD x (ANDWconst [c] y))
+ // cond: c&63 == 63
+ // result: (SRAD x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XANDWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&63 == 63) {
+ break
+ }
+ v.reset(OpS390XSRAD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SRAD x (MOVWreg y))
+ // result: (SRAD x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVWreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSRAD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SRAD x (MOVHreg y))
+ // result: (SRAD x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVHreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSRAD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SRAD x (MOVBreg y))
+ // result: (SRAD x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVBreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSRAD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SRAD x (MOVWZreg y))
+ // result: (SRAD x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVWZreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSRAD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SRAD x (MOVHZreg y))
+ // result: (SRAD x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVHZreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSRAD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SRAD x (MOVBZreg y))
+ // result: (SRAD x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVBZreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSRAD)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XSRADconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SRADconst x [0])
+ // result: x
+ for {
+ if auxIntToUint8(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (SRADconst [c] (MOVDconst [d]))
+ // result: (MOVDconst [d>>uint64(c)])
+ for {
+ c := auxIntToUint8(v.AuxInt)
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(d >> uint64(c))
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XSRAW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SRAW x (MOVDconst [c]))
+ // cond: c&32 == 0
+ // result: (SRAWconst x [uint8(c&31)])
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(c&32 == 0) {
+ break
+ }
+ v.reset(OpS390XSRAWconst)
+ v.AuxInt = uint8ToAuxInt(uint8(c & 31))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SRAW x (MOVDconst [c]))
+ // cond: c&32 != 0
+ // result: (SRAWconst x [31])
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(c&32 != 0) {
+ break
+ }
+ v.reset(OpS390XSRAWconst)
+ v.AuxInt = uint8ToAuxInt(31)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SRAW x (RISBGZ y {r}))
+ // cond: r.Amount == 0 && r.OutMask()&63 == 63
+ // result: (SRAW x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XRISBGZ {
+ break
+ }
+ r := auxToS390xRotateParams(v_1.Aux)
+ y := v_1.Args[0]
+ if !(r.Amount == 0 && r.OutMask()&63 == 63) {
+ break
+ }
+ v.reset(OpS390XSRAW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SRAW x (AND (MOVDconst [c]) y))
+ // result: (SRAW x (ANDWconst <typ.UInt32> [int32(c&63)] y))
+ for {
+ x := v_0
+ if v_1.Op != OpS390XAND {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpS390XMOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1_0.AuxInt)
+ y := v_1_1
+ v.reset(OpS390XSRAW)
+ v0 := b.NewValue0(v.Pos, OpS390XANDWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(int32(c & 63))
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ break
+ }
+ // match: (SRAW x (ANDWconst [c] y))
+ // cond: c&63 == 63
+ // result: (SRAW x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XANDWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&63 == 63) {
+ break
+ }
+ v.reset(OpS390XSRAW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SRAW x (MOVWreg y))
+ // result: (SRAW x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVWreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSRAW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SRAW x (MOVHreg y))
+ // result: (SRAW x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVHreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSRAW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SRAW x (MOVBreg y))
+ // result: (SRAW x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVBreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSRAW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SRAW x (MOVWZreg y))
+ // result: (SRAW x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVWZreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSRAW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SRAW x (MOVHZreg y))
+ // result: (SRAW x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVHZreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSRAW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SRAW x (MOVBZreg y))
+ // result: (SRAW x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVBZreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSRAW)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XSRAWconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SRAWconst x [0])
+ // result: x
+ for {
+ if auxIntToUint8(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (SRAWconst [c] (MOVDconst [d]))
+ // result: (MOVDconst [int64(int32(d))>>uint64(c)])
+ for {
+ c := auxIntToUint8(v.AuxInt)
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(int32(d)) >> uint64(c))
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XSRD(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SRD x (MOVDconst [c]))
+ // result: (SRDconst x [uint8(c&63)])
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpS390XSRDconst)
+ v.AuxInt = uint8ToAuxInt(uint8(c & 63))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SRD x (RISBGZ y {r}))
+ // cond: r.Amount == 0 && r.OutMask()&63 == 63
+ // result: (SRD x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XRISBGZ {
+ break
+ }
+ r := auxToS390xRotateParams(v_1.Aux)
+ y := v_1.Args[0]
+ if !(r.Amount == 0 && r.OutMask()&63 == 63) {
+ break
+ }
+ v.reset(OpS390XSRD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SRD x (AND (MOVDconst [c]) y))
+ // result: (SRD x (ANDWconst <typ.UInt32> [int32(c&63)] y))
+ for {
+ x := v_0
+ if v_1.Op != OpS390XAND {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpS390XMOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1_0.AuxInt)
+ y := v_1_1
+ v.reset(OpS390XSRD)
+ v0 := b.NewValue0(v.Pos, OpS390XANDWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(int32(c & 63))
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ break
+ }
+ // match: (SRD x (ANDWconst [c] y))
+ // cond: c&63 == 63
+ // result: (SRD x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XANDWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&63 == 63) {
+ break
+ }
+ v.reset(OpS390XSRD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SRD x (MOVWreg y))
+ // result: (SRD x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVWreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSRD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SRD x (MOVHreg y))
+ // result: (SRD x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVHreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSRD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SRD x (MOVBreg y))
+ // result: (SRD x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVBreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSRD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SRD x (MOVWZreg y))
+ // result: (SRD x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVWZreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSRD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SRD x (MOVHZreg y))
+ // result: (SRD x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVHZreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSRD)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SRD x (MOVBZreg y))
+ // result: (SRD x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVBZreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSRD)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XSRDconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SRDconst (SLDconst x [c]) [d])
+ // result: (RISBGZ x {s390x.NewRotateParams(d, uint8(min8(63, int8(63-c+d))), uint8(int8(c-d)&63))})
+ for {
+ d := auxIntToUint8(v.AuxInt)
+ if v_0.Op != OpS390XSLDconst {
+ break
+ }
+ c := auxIntToUint8(v_0.AuxInt)
+ x := v_0.Args[0]
+ v.reset(OpS390XRISBGZ)
+ v.Aux = s390xRotateParamsToAux(s390x.NewRotateParams(d, uint8(min8(63, int8(63-c+d))), uint8(int8(c-d)&63)))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SRDconst (RISBGZ x {r}) [c])
+ // cond: s390x.NewRotateParams(c, 63, -c&63).InMerge(r.OutMask()) != nil
+ // result: (RISBGZ x {(*s390x.NewRotateParams(c, 63, -c&63).InMerge(r.OutMask())).RotateLeft(r.Amount)})
+ for {
+ c := auxIntToUint8(v.AuxInt)
+ if v_0.Op != OpS390XRISBGZ {
+ break
+ }
+ r := auxToS390xRotateParams(v_0.Aux)
+ x := v_0.Args[0]
+ if !(s390x.NewRotateParams(c, 63, -c&63).InMerge(r.OutMask()) != nil) {
+ break
+ }
+ v.reset(OpS390XRISBGZ)
+ v.Aux = s390xRotateParamsToAux((*s390x.NewRotateParams(c, 63, -c&63).InMerge(r.OutMask())).RotateLeft(r.Amount))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SRDconst x [0])
+ // result: x
+ for {
+ if auxIntToUint8(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XSRW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SRW x (MOVDconst [c]))
+ // cond: c&32 == 0
+ // result: (SRWconst x [uint8(c&31)])
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(c&32 == 0) {
+ break
+ }
+ v.reset(OpS390XSRWconst)
+ v.AuxInt = uint8ToAuxInt(uint8(c & 31))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SRW _ (MOVDconst [c]))
+ // cond: c&32 != 0
+ // result: (MOVDconst [0])
+ for {
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(c&32 != 0) {
+ break
+ }
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (SRW x (RISBGZ y {r}))
+ // cond: r.Amount == 0 && r.OutMask()&63 == 63
+ // result: (SRW x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XRISBGZ {
+ break
+ }
+ r := auxToS390xRotateParams(v_1.Aux)
+ y := v_1.Args[0]
+ if !(r.Amount == 0 && r.OutMask()&63 == 63) {
+ break
+ }
+ v.reset(OpS390XSRW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SRW x (AND (MOVDconst [c]) y))
+ // result: (SRW x (ANDWconst <typ.UInt32> [int32(c&63)] y))
+ for {
+ x := v_0
+ if v_1.Op != OpS390XAND {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpS390XMOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1_0.AuxInt)
+ y := v_1_1
+ v.reset(OpS390XSRW)
+ v0 := b.NewValue0(v.Pos, OpS390XANDWconst, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(int32(c & 63))
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ break
+ }
+ // match: (SRW x (ANDWconst [c] y))
+ // cond: c&63 == 63
+ // result: (SRW x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XANDWconst {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ y := v_1.Args[0]
+ if !(c&63 == 63) {
+ break
+ }
+ v.reset(OpS390XSRW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SRW x (MOVWreg y))
+ // result: (SRW x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVWreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSRW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SRW x (MOVHreg y))
+ // result: (SRW x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVHreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSRW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SRW x (MOVBreg y))
+ // result: (SRW x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVBreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSRW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SRW x (MOVWZreg y))
+ // result: (SRW x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVWZreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSRW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SRW x (MOVHZreg y))
+ // result: (SRW x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVHZreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSRW)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SRW x (MOVBZreg y))
+ // result: (SRW x y)
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVBZreg {
+ break
+ }
+ y := v_1.Args[0]
+ v.reset(OpS390XSRW)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XSRWconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SRWconst x [0])
+ // result: x
+ for {
+ if auxIntToUint8(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XSTM2(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (STM2 [i] {s} p w2 w3 x:(STM2 [i-8] {s} p w0 w1 mem))
+ // cond: x.Uses == 1 && is20Bit(int64(i)-8) && clobber(x)
+ // result: (STM4 [i-8] {s} p w0 w1 w2 w3 mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ w2 := v_1
+ w3 := v_2
+ x := v_3
+ if x.Op != OpS390XSTM2 || auxIntToInt32(x.AuxInt) != i-8 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[3]
+ if p != x.Args[0] {
+ break
+ }
+ w0 := x.Args[1]
+ w1 := x.Args[2]
+ if !(x.Uses == 1 && is20Bit(int64(i)-8) && clobber(x)) {
+ break
+ }
+ v.reset(OpS390XSTM4)
+ v.AuxInt = int32ToAuxInt(i - 8)
+ v.Aux = symToAux(s)
+ v.AddArg6(p, w0, w1, w2, w3, mem)
+ return true
+ }
+ // match: (STM2 [i] {s} p (SRDconst [32] x) x mem)
+ // result: (MOVDstore [i] {s} p x mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ if v_1.Op != OpS390XSRDconst || auxIntToUint8(v_1.AuxInt) != 32 {
+ break
+ }
+ x := v_1.Args[0]
+ if x != v_2 {
+ break
+ }
+ mem := v_3
+ v.reset(OpS390XMOVDstore)
+ v.AuxInt = int32ToAuxInt(i)
+ v.Aux = symToAux(s)
+ v.AddArg3(p, x, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XSTMG2(v *Value) bool {
+ v_3 := v.Args[3]
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (STMG2 [i] {s} p w2 w3 x:(STMG2 [i-16] {s} p w0 w1 mem))
+ // cond: x.Uses == 1 && is20Bit(int64(i)-16) && clobber(x)
+ // result: (STMG4 [i-16] {s} p w0 w1 w2 w3 mem)
+ for {
+ i := auxIntToInt32(v.AuxInt)
+ s := auxToSym(v.Aux)
+ p := v_0
+ w2 := v_1
+ w3 := v_2
+ x := v_3
+ if x.Op != OpS390XSTMG2 || auxIntToInt32(x.AuxInt) != i-16 || auxToSym(x.Aux) != s {
+ break
+ }
+ mem := x.Args[3]
+ if p != x.Args[0] {
+ break
+ }
+ w0 := x.Args[1]
+ w1 := x.Args[2]
+ if !(x.Uses == 1 && is20Bit(int64(i)-16) && clobber(x)) {
+ break
+ }
+ v.reset(OpS390XSTMG4)
+ v.AuxInt = int32ToAuxInt(i - 16)
+ v.Aux = symToAux(s)
+ v.AddArg6(p, w0, w1, w2, w3, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XSUB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SUB x (MOVDconst [c]))
+ // cond: is32Bit(c)
+ // result: (SUBconst x [int32(c)])
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpS390XSUBconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUB (MOVDconst [c]) x)
+ // cond: is32Bit(c)
+ // result: (NEG (SUBconst <v.Type> x [int32(c)]))
+ for {
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ if !(is32Bit(c)) {
+ break
+ }
+ v.reset(OpS390XNEG)
+ v0 := b.NewValue0(v.Pos, OpS390XSUBconst, v.Type)
+ v0.AuxInt = int32ToAuxInt(int32(c))
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SUB x x)
+ // result: (MOVDconst [0])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (SUB <t> x g:(MOVDload [off] {sym} ptr mem))
+ // cond: ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)
+ // result: (SUBload <t> [off] {sym} x ptr mem)
+ for {
+ t := v.Type
+ x := v_0
+ g := v_1
+ if g.Op != OpS390XMOVDload {
+ break
+ }
+ off := auxIntToInt32(g.AuxInt)
+ sym := auxToSym(g.Aux)
+ mem := g.Args[1]
+ ptr := g.Args[0]
+ if !(ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)) {
+ break
+ }
+ v.reset(OpS390XSUBload)
+ v.Type = t
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XSUBE(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SUBE x y (FlagGT))
+ // result: (SUBC x y)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpS390XFlagGT {
+ break
+ }
+ v.reset(OpS390XSUBC)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SUBE x y (FlagOV))
+ // result: (SUBC x y)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpS390XFlagOV {
+ break
+ }
+ v.reset(OpS390XSUBC)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (SUBE x y (Select1 (SUBC (MOVDconst [0]) (NEG (Select0 (SUBE (MOVDconst [0]) (MOVDconst [0]) c))))))
+ // result: (SUBE x y c)
+ for {
+ x := v_0
+ y := v_1
+ if v_2.Op != OpSelect1 {
+ break
+ }
+ v_2_0 := v_2.Args[0]
+ if v_2_0.Op != OpS390XSUBC {
+ break
+ }
+ _ = v_2_0.Args[1]
+ v_2_0_0 := v_2_0.Args[0]
+ if v_2_0_0.Op != OpS390XMOVDconst || auxIntToInt64(v_2_0_0.AuxInt) != 0 {
+ break
+ }
+ v_2_0_1 := v_2_0.Args[1]
+ if v_2_0_1.Op != OpS390XNEG {
+ break
+ }
+ v_2_0_1_0 := v_2_0_1.Args[0]
+ if v_2_0_1_0.Op != OpSelect0 {
+ break
+ }
+ v_2_0_1_0_0 := v_2_0_1_0.Args[0]
+ if v_2_0_1_0_0.Op != OpS390XSUBE {
+ break
+ }
+ c := v_2_0_1_0_0.Args[2]
+ v_2_0_1_0_0_0 := v_2_0_1_0_0.Args[0]
+ if v_2_0_1_0_0_0.Op != OpS390XMOVDconst || auxIntToInt64(v_2_0_1_0_0_0.AuxInt) != 0 {
+ break
+ }
+ v_2_0_1_0_0_1 := v_2_0_1_0_0.Args[1]
+ if v_2_0_1_0_0_1.Op != OpS390XMOVDconst || auxIntToInt64(v_2_0_1_0_0_1.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpS390XSUBE)
+ v.AddArg3(x, y, c)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XSUBW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SUBW x (MOVDconst [c]))
+ // result: (SUBWconst x [int32(c)])
+ for {
+ x := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpS390XSUBWconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBW (MOVDconst [c]) x)
+ // result: (NEGW (SUBWconst <v.Type> x [int32(c)]))
+ for {
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ x := v_1
+ v.reset(OpS390XNEGW)
+ v0 := b.NewValue0(v.Pos, OpS390XSUBWconst, v.Type)
+ v0.AuxInt = int32ToAuxInt(int32(c))
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (SUBW x x)
+ // result: (MOVDconst [0])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (SUBW <t> x g:(MOVWload [off] {sym} ptr mem))
+ // cond: ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)
+ // result: (SUBWload <t> [off] {sym} x ptr mem)
+ for {
+ t := v.Type
+ x := v_0
+ g := v_1
+ if g.Op != OpS390XMOVWload {
+ break
+ }
+ off := auxIntToInt32(g.AuxInt)
+ sym := auxToSym(g.Aux)
+ mem := g.Args[1]
+ ptr := g.Args[0]
+ if !(ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)) {
+ break
+ }
+ v.reset(OpS390XSUBWload)
+ v.Type = t
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ // match: (SUBW <t> x g:(MOVWZload [off] {sym} ptr mem))
+ // cond: ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)
+ // result: (SUBWload <t> [off] {sym} x ptr mem)
+ for {
+ t := v.Type
+ x := v_0
+ g := v_1
+ if g.Op != OpS390XMOVWZload {
+ break
+ }
+ off := auxIntToInt32(g.AuxInt)
+ sym := auxToSym(g.Aux)
+ mem := g.Args[1]
+ ptr := g.Args[0]
+ if !(ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)) {
+ break
+ }
+ v.reset(OpS390XSUBWload)
+ v.Type = t
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XSUBWconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SUBWconst [c] x)
+ // cond: int32(c) == 0
+ // result: x
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(int32(c) == 0) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (SUBWconst [c] x)
+ // result: (ADDWconst [-int32(c)] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ v.reset(OpS390XADDWconst)
+ v.AuxInt = int32ToAuxInt(-int32(c))
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueS390X_OpS390XSUBWload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (SUBWload [off1] {sym} x (ADDconst [off2] ptr) mem)
+ // cond: ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2))
+ // result: (SUBWload [off1+off2] {sym} x ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpS390XADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ ptr := v_1.Args[0]
+ mem := v_2
+ if !(ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2))) {
+ break
+ }
+ v.reset(OpS390XSUBWload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ // match: (SUBWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem)
+ // cond: ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)
+ // result: (SUBWload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
+ for {
+ o1 := auxIntToInt32(v.AuxInt)
+ s1 := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpS390XMOVDaddr {
+ break
+ }
+ o2 := auxIntToInt32(v_1.AuxInt)
+ s2 := auxToSym(v_1.Aux)
+ ptr := v_1.Args[0]
+ mem := v_2
+ if !(ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)) {
+ break
+ }
+ v.reset(OpS390XSUBWload)
+ v.AuxInt = int32ToAuxInt(o1 + o2)
+ v.Aux = symToAux(mergeSym(s1, s2))
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XSUBconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SUBconst [0] x)
+ // result: x
+ for {
+ if auxIntToInt32(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (SUBconst [c] x)
+ // cond: c != -(1<<31)
+ // result: (ADDconst [-c] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(c != -(1 << 31)) {
+ break
+ }
+ v.reset(OpS390XADDconst)
+ v.AuxInt = int32ToAuxInt(-c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SUBconst (MOVDconst [d]) [c])
+ // result: (MOVDconst [d-int64(c)])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(d - int64(c))
+ return true
+ }
+ // match: (SUBconst (SUBconst x [d]) [c])
+ // cond: is32Bit(-int64(c)-int64(d))
+ // result: (ADDconst [-c-d] x)
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XSUBconst {
+ break
+ }
+ d := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ if !(is32Bit(-int64(c) - int64(d))) {
+ break
+ }
+ v.reset(OpS390XADDconst)
+ v.AuxInt = int32ToAuxInt(-c - d)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XSUBload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (SUBload <t> [off] {sym} x ptr1 (FMOVDstore [off] {sym} ptr2 y _))
+ // cond: isSamePtr(ptr1, ptr2)
+ // result: (SUB x (LGDR <t> y))
+ for {
+ t := v.Type
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ ptr1 := v_1
+ if v_2.Op != OpS390XFMOVDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
+ break
+ }
+ y := v_2.Args[1]
+ ptr2 := v_2.Args[0]
+ if !(isSamePtr(ptr1, ptr2)) {
+ break
+ }
+ v.reset(OpS390XSUB)
+ v0 := b.NewValue0(v_2.Pos, OpS390XLGDR, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (SUBload [off1] {sym} x (ADDconst [off2] ptr) mem)
+ // cond: ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2))
+ // result: (SUBload [off1+off2] {sym} x ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpS390XADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ ptr := v_1.Args[0]
+ mem := v_2
+ if !(ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2))) {
+ break
+ }
+ v.reset(OpS390XSUBload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ // match: (SUBload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem)
+ // cond: ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)
+ // result: (SUBload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
+ for {
+ o1 := auxIntToInt32(v.AuxInt)
+ s1 := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpS390XMOVDaddr {
+ break
+ }
+ o2 := auxIntToInt32(v_1.AuxInt)
+ s2 := auxToSym(v_1.Aux)
+ ptr := v_1.Args[0]
+ mem := v_2
+ if !(ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)) {
+ break
+ }
+ v.reset(OpS390XSUBload)
+ v.AuxInt = int32ToAuxInt(o1 + o2)
+ v.Aux = symToAux(mergeSym(s1, s2))
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XSumBytes2(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SumBytes2 x)
+ // result: (ADDW (SRWconst <typ.UInt8> x [8]) x)
+ for {
+ x := v_0
+ v.reset(OpS390XADDW)
+ v0 := b.NewValue0(v.Pos, OpS390XSRWconst, typ.UInt8)
+ v0.AuxInt = uint8ToAuxInt(8)
+ v0.AddArg(x)
+ v.AddArg2(v0, x)
+ return true
+ }
+}
+func rewriteValueS390X_OpS390XSumBytes4(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SumBytes4 x)
+ // result: (SumBytes2 (ADDW <typ.UInt16> (SRWconst <typ.UInt16> x [16]) x))
+ for {
+ x := v_0
+ v.reset(OpS390XSumBytes2)
+ v0 := b.NewValue0(v.Pos, OpS390XADDW, typ.UInt16)
+ v1 := b.NewValue0(v.Pos, OpS390XSRWconst, typ.UInt16)
+ v1.AuxInt = uint8ToAuxInt(16)
+ v1.AddArg(x)
+ v0.AddArg2(v1, x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueS390X_OpS390XSumBytes8(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SumBytes8 x)
+ // result: (SumBytes4 (ADDW <typ.UInt32> (SRDconst <typ.UInt32> x [32]) x))
+ for {
+ x := v_0
+ v.reset(OpS390XSumBytes4)
+ v0 := b.NewValue0(v.Pos, OpS390XADDW, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpS390XSRDconst, typ.UInt32)
+ v1.AuxInt = uint8ToAuxInt(32)
+ v1.AddArg(x)
+ v0.AddArg2(v1, x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueS390X_OpS390XXOR(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (XOR x (MOVDconst [c]))
+ // cond: isU32Bit(c)
+ // result: (XORconst [c] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(isU32Bit(c)) {
+ continue
+ }
+ v.reset(OpS390XXORconst)
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (XOR (SLDconst x [c]) (SRDconst x [64-c]))
+ // result: (RISBGZ x {s390x.NewRotateParams(0, 63, c)})
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpS390XSLDconst {
+ continue
+ }
+ c := auxIntToUint8(v_0.AuxInt)
+ x := v_0.Args[0]
+ if v_1.Op != OpS390XSRDconst || auxIntToUint8(v_1.AuxInt) != 64-c || x != v_1.Args[0] {
+ continue
+ }
+ v.reset(OpS390XRISBGZ)
+ v.Aux = s390xRotateParamsToAux(s390x.NewRotateParams(0, 63, c))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (XOR (MOVDconst [c]) (MOVDconst [d]))
+ // result: (MOVDconst [c^d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpS390XMOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpS390XMOVDconst {
+ continue
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(c ^ d)
+ return true
+ }
+ break
+ }
+ // match: (XOR x x)
+ // result: (MOVDconst [0])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (XOR <t> x g:(MOVDload [off] {sym} ptr mem))
+ // cond: ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)
+ // result: (XORload <t> [off] {sym} x ptr mem)
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ g := v_1
+ if g.Op != OpS390XMOVDload {
+ continue
+ }
+ off := auxIntToInt32(g.AuxInt)
+ sym := auxToSym(g.Aux)
+ mem := g.Args[1]
+ ptr := g.Args[0]
+ if !(ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)) {
+ continue
+ }
+ v.reset(OpS390XXORload)
+ v.Type = t
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XXORW(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (XORW x (MOVDconst [c]))
+ // result: (XORWconst [int32(c)] x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpS390XMOVDconst {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpS390XXORWconst)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (XORW (SLWconst x [c]) (SRWconst x [32-c]))
+ // result: (RLLconst x [c])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpS390XSLWconst {
+ continue
+ }
+ c := auxIntToUint8(v_0.AuxInt)
+ x := v_0.Args[0]
+ if v_1.Op != OpS390XSRWconst || auxIntToUint8(v_1.AuxInt) != 32-c || x != v_1.Args[0] {
+ continue
+ }
+ v.reset(OpS390XRLLconst)
+ v.AuxInt = uint8ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (XORW x x)
+ // result: (MOVDconst [0])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (XORW <t> x g:(MOVWload [off] {sym} ptr mem))
+ // cond: ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)
+ // result: (XORWload <t> [off] {sym} x ptr mem)
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ g := v_1
+ if g.Op != OpS390XMOVWload {
+ continue
+ }
+ off := auxIntToInt32(g.AuxInt)
+ sym := auxToSym(g.Aux)
+ mem := g.Args[1]
+ ptr := g.Args[0]
+ if !(ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)) {
+ continue
+ }
+ v.reset(OpS390XXORWload)
+ v.Type = t
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ // match: (XORW <t> x g:(MOVWZload [off] {sym} ptr mem))
+ // cond: ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)
+ // result: (XORWload <t> [off] {sym} x ptr mem)
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ g := v_1
+ if g.Op != OpS390XMOVWZload {
+ continue
+ }
+ off := auxIntToInt32(g.AuxInt)
+ sym := auxToSym(g.Aux)
+ mem := g.Args[1]
+ ptr := g.Args[0]
+ if !(ptr.Op != OpSB && is20Bit(int64(off)) && canMergeLoadClobber(v, g, x) && clobber(g)) {
+ continue
+ }
+ v.reset(OpS390XXORWload)
+ v.Type = t
+ v.AuxInt = int32ToAuxInt(off)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XXORWconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (XORWconst [c] x)
+ // cond: int32(c)==0
+ // result: x
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ x := v_0
+ if !(int32(c) == 0) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (XORWconst [c] (MOVDconst [d]))
+ // result: (MOVDconst [int64(c)^d])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(int64(c) ^ d)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XXORWload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (XORWload [off1] {sym} x (ADDconst [off2] ptr) mem)
+ // cond: ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2))
+ // result: (XORWload [off1+off2] {sym} x ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpS390XADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ ptr := v_1.Args[0]
+ mem := v_2
+ if !(ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2))) {
+ break
+ }
+ v.reset(OpS390XXORWload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ // match: (XORWload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem)
+ // cond: ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)
+ // result: (XORWload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
+ for {
+ o1 := auxIntToInt32(v.AuxInt)
+ s1 := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpS390XMOVDaddr {
+ break
+ }
+ o2 := auxIntToInt32(v_1.AuxInt)
+ s2 := auxToSym(v_1.Aux)
+ ptr := v_1.Args[0]
+ mem := v_2
+ if !(ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)) {
+ break
+ }
+ v.reset(OpS390XXORWload)
+ v.AuxInt = int32ToAuxInt(o1 + o2)
+ v.Aux = symToAux(mergeSym(s1, s2))
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XXORconst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (XORconst [0] x)
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (XORconst [c] (MOVDconst [d]))
+ // result: (MOVDconst [c^d])
+ for {
+ c := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpS390XMOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(c ^ d)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpS390XXORload(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (XORload <t> [off] {sym} x ptr1 (FMOVDstore [off] {sym} ptr2 y _))
+ // cond: isSamePtr(ptr1, ptr2)
+ // result: (XOR x (LGDR <t> y))
+ for {
+ t := v.Type
+ off := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ ptr1 := v_1
+ if v_2.Op != OpS390XFMOVDstore || auxIntToInt32(v_2.AuxInt) != off || auxToSym(v_2.Aux) != sym {
+ break
+ }
+ y := v_2.Args[1]
+ ptr2 := v_2.Args[0]
+ if !(isSamePtr(ptr1, ptr2)) {
+ break
+ }
+ v.reset(OpS390XXOR)
+ v0 := b.NewValue0(v_2.Pos, OpS390XLGDR, t)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (XORload [off1] {sym} x (ADDconst [off2] ptr) mem)
+ // cond: ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2))
+ // result: (XORload [off1+off2] {sym} x ptr mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpS390XADDconst {
+ break
+ }
+ off2 := auxIntToInt32(v_1.AuxInt)
+ ptr := v_1.Args[0]
+ mem := v_2
+ if !(ptr.Op != OpSB && is20Bit(int64(off1)+int64(off2))) {
+ break
+ }
+ v.reset(OpS390XXORload)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ // match: (XORload [o1] {s1} x (MOVDaddr [o2] {s2} ptr) mem)
+ // cond: ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)
+ // result: (XORload [o1+o2] {mergeSym(s1, s2)} x ptr mem)
+ for {
+ o1 := auxIntToInt32(v.AuxInt)
+ s1 := auxToSym(v.Aux)
+ x := v_0
+ if v_1.Op != OpS390XMOVDaddr {
+ break
+ }
+ o2 := auxIntToInt32(v_1.AuxInt)
+ s2 := auxToSym(v_1.Aux)
+ ptr := v_1.Args[0]
+ mem := v_2
+ if !(ptr.Op != OpSB && is20Bit(int64(o1)+int64(o2)) && canMergeSym(s1, s2)) {
+ break
+ }
+ v.reset(OpS390XXORload)
+ v.AuxInt = int32ToAuxInt(o1 + o2)
+ v.Aux = symToAux(mergeSym(s1, s2))
+ v.AddArg3(x, ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpSelect0(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Select0 (Add64carry x y c))
+ // result: (Select0 <typ.UInt64> (ADDE x y (Select1 <types.TypeFlags> (ADDCconst c [-1]))))
+ for {
+ if v_0.Op != OpAdd64carry {
+ break
+ }
+ c := v_0.Args[2]
+ x := v_0.Args[0]
+ y := v_0.Args[1]
+ v.reset(OpSelect0)
+ v.Type = typ.UInt64
+ v0 := b.NewValue0(v.Pos, OpS390XADDE, types.NewTuple(typ.UInt64, types.TypeFlags))
+ v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpS390XADDCconst, types.NewTuple(typ.UInt64, types.TypeFlags))
+ v2.AuxInt = int16ToAuxInt(-1)
+ v2.AddArg(c)
+ v1.AddArg(v2)
+ v0.AddArg3(x, y, v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Select0 (Sub64borrow x y c))
+ // result: (Select0 <typ.UInt64> (SUBE x y (Select1 <types.TypeFlags> (SUBC (MOVDconst [0]) c))))
+ for {
+ if v_0.Op != OpSub64borrow {
+ break
+ }
+ c := v_0.Args[2]
+ x := v_0.Args[0]
+ y := v_0.Args[1]
+ v.reset(OpSelect0)
+ v.Type = typ.UInt64
+ v0 := b.NewValue0(v.Pos, OpS390XSUBE, types.NewTuple(typ.UInt64, types.TypeFlags))
+ v1 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v2 := b.NewValue0(v.Pos, OpS390XSUBC, types.NewTuple(typ.UInt64, types.TypeFlags))
+ v3 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(0)
+ v2.AddArg2(v3, c)
+ v1.AddArg(v2)
+ v0.AddArg3(x, y, v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Select0 <t> (AddTupleFirst32 val tuple))
+ // result: (ADDW val (Select0 <t> tuple))
+ for {
+ t := v.Type
+ if v_0.Op != OpS390XAddTupleFirst32 {
+ break
+ }
+ tuple := v_0.Args[1]
+ val := v_0.Args[0]
+ v.reset(OpS390XADDW)
+ v0 := b.NewValue0(v.Pos, OpSelect0, t)
+ v0.AddArg(tuple)
+ v.AddArg2(val, v0)
+ return true
+ }
+ // match: (Select0 <t> (AddTupleFirst64 val tuple))
+ // result: (ADD val (Select0 <t> tuple))
+ for {
+ t := v.Type
+ if v_0.Op != OpS390XAddTupleFirst64 {
+ break
+ }
+ tuple := v_0.Args[1]
+ val := v_0.Args[0]
+ v.reset(OpS390XADD)
+ v0 := b.NewValue0(v.Pos, OpSelect0, t)
+ v0.AddArg(tuple)
+ v.AddArg2(val, v0)
+ return true
+ }
+ // match: (Select0 (ADDCconst (MOVDconst [c]) [d]))
+ // result: (MOVDconst [c+int64(d)])
+ for {
+ if v_0.Op != OpS390XADDCconst {
+ break
+ }
+ d := auxIntToInt16(v_0.AuxInt)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(c + int64(d))
+ return true
+ }
+ // match: (Select0 (SUBC (MOVDconst [c]) (MOVDconst [d])))
+ // result: (MOVDconst [c-d])
+ for {
+ if v_0.Op != OpS390XSUBC {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpS390XMOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_0_1.AuxInt)
+ v.reset(OpS390XMOVDconst)
+ v.AuxInt = int64ToAuxInt(c - d)
+ return true
+ }
+ // match: (Select0 (FADD (FMUL y z) x))
+ // result: (FMADD x y z)
+ for {
+ if v_0.Op != OpS390XFADD {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpS390XFMUL {
+ continue
+ }
+ z := v_0_0.Args[1]
+ y := v_0_0.Args[0]
+ x := v_0_1
+ v.reset(OpS390XFMADD)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ break
+ }
+ // match: (Select0 (FSUB (FMUL y z) x))
+ // result: (FMSUB x y z)
+ for {
+ if v_0.Op != OpS390XFSUB {
+ break
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpS390XFMUL {
+ break
+ }
+ z := v_0_0.Args[1]
+ y := v_0_0.Args[0]
+ v.reset(OpS390XFMSUB)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ // match: (Select0 (FADDS (FMULS y z) x))
+ // result: (FMADDS x y z)
+ for {
+ if v_0.Op != OpS390XFADDS {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpS390XFMULS {
+ continue
+ }
+ z := v_0_0.Args[1]
+ y := v_0_0.Args[0]
+ x := v_0_1
+ v.reset(OpS390XFMADDS)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ break
+ }
+ // match: (Select0 (FSUBS (FMULS y z) x))
+ // result: (FMSUBS x y z)
+ for {
+ if v_0.Op != OpS390XFSUBS {
+ break
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpS390XFMULS {
+ break
+ }
+ z := v_0_0.Args[1]
+ y := v_0_0.Args[0]
+ v.reset(OpS390XFMSUBS)
+ v.AddArg3(x, y, z)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpSelect1(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Select1 (Add64carry x y c))
+ // result: (Select0 <typ.UInt64> (ADDE (MOVDconst [0]) (MOVDconst [0]) (Select1 <types.TypeFlags> (ADDE x y (Select1 <types.TypeFlags> (ADDCconst c [-1]))))))
+ for {
+ if v_0.Op != OpAdd64carry {
+ break
+ }
+ c := v_0.Args[2]
+ x := v_0.Args[0]
+ y := v_0.Args[1]
+ v.reset(OpSelect0)
+ v.Type = typ.UInt64
+ v0 := b.NewValue0(v.Pos, OpS390XADDE, types.NewTuple(typ.UInt64, types.TypeFlags))
+ v1 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v3 := b.NewValue0(v.Pos, OpS390XADDE, types.NewTuple(typ.UInt64, types.TypeFlags))
+ v4 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v5 := b.NewValue0(v.Pos, OpS390XADDCconst, types.NewTuple(typ.UInt64, types.TypeFlags))
+ v5.AuxInt = int16ToAuxInt(-1)
+ v5.AddArg(c)
+ v4.AddArg(v5)
+ v3.AddArg3(x, y, v4)
+ v2.AddArg(v3)
+ v0.AddArg3(v1, v1, v2)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Select1 (Sub64borrow x y c))
+ // result: (NEG (Select0 <typ.UInt64> (SUBE (MOVDconst [0]) (MOVDconst [0]) (Select1 <types.TypeFlags> (SUBE x y (Select1 <types.TypeFlags> (SUBC (MOVDconst [0]) c)))))))
+ for {
+ if v_0.Op != OpSub64borrow {
+ break
+ }
+ c := v_0.Args[2]
+ x := v_0.Args[0]
+ y := v_0.Args[1]
+ v.reset(OpS390XNEG)
+ v0 := b.NewValue0(v.Pos, OpSelect0, typ.UInt64)
+ v1 := b.NewValue0(v.Pos, OpS390XSUBE, types.NewTuple(typ.UInt64, types.TypeFlags))
+ v2 := b.NewValue0(v.Pos, OpS390XMOVDconst, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(0)
+ v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v4 := b.NewValue0(v.Pos, OpS390XSUBE, types.NewTuple(typ.UInt64, types.TypeFlags))
+ v5 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v6 := b.NewValue0(v.Pos, OpS390XSUBC, types.NewTuple(typ.UInt64, types.TypeFlags))
+ v6.AddArg2(v2, c)
+ v5.AddArg(v6)
+ v4.AddArg3(x, y, v5)
+ v3.AddArg(v4)
+ v1.AddArg3(v2, v2, v3)
+ v0.AddArg(v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Select1 (AddTupleFirst32 _ tuple))
+ // result: (Select1 tuple)
+ for {
+ if v_0.Op != OpS390XAddTupleFirst32 {
+ break
+ }
+ tuple := v_0.Args[1]
+ v.reset(OpSelect1)
+ v.AddArg(tuple)
+ return true
+ }
+ // match: (Select1 (AddTupleFirst64 _ tuple))
+ // result: (Select1 tuple)
+ for {
+ if v_0.Op != OpS390XAddTupleFirst64 {
+ break
+ }
+ tuple := v_0.Args[1]
+ v.reset(OpSelect1)
+ v.AddArg(tuple)
+ return true
+ }
+ // match: (Select1 (ADDCconst (MOVDconst [c]) [d]))
+ // cond: uint64(c+int64(d)) >= uint64(c) && c+int64(d) == 0
+ // result: (FlagEQ)
+ for {
+ if v_0.Op != OpS390XADDCconst {
+ break
+ }
+ d := auxIntToInt16(v_0.AuxInt)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ if !(uint64(c+int64(d)) >= uint64(c) && c+int64(d) == 0) {
+ break
+ }
+ v.reset(OpS390XFlagEQ)
+ return true
+ }
+ // match: (Select1 (ADDCconst (MOVDconst [c]) [d]))
+ // cond: uint64(c+int64(d)) >= uint64(c) && c+int64(d) != 0
+ // result: (FlagLT)
+ for {
+ if v_0.Op != OpS390XADDCconst {
+ break
+ }
+ d := auxIntToInt16(v_0.AuxInt)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ if !(uint64(c+int64(d)) >= uint64(c) && c+int64(d) != 0) {
+ break
+ }
+ v.reset(OpS390XFlagLT)
+ return true
+ }
+ // match: (Select1 (SUBC (MOVDconst [c]) (MOVDconst [d])))
+ // cond: uint64(d) <= uint64(c) && c-d == 0
+ // result: (FlagGT)
+ for {
+ if v_0.Op != OpS390XSUBC {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpS390XMOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_0_1.AuxInt)
+ if !(uint64(d) <= uint64(c) && c-d == 0) {
+ break
+ }
+ v.reset(OpS390XFlagGT)
+ return true
+ }
+ // match: (Select1 (SUBC (MOVDconst [c]) (MOVDconst [d])))
+ // cond: uint64(d) <= uint64(c) && c-d != 0
+ // result: (FlagOV)
+ for {
+ if v_0.Op != OpS390XSUBC {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpS390XMOVDconst {
+ break
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpS390XMOVDconst {
+ break
+ }
+ d := auxIntToInt64(v_0_1.AuxInt)
+ if !(uint64(d) <= uint64(c) && c-d != 0) {
+ break
+ }
+ v.reset(OpS390XFlagOV)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpSlicemask(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Slicemask <t> x)
+ // result: (SRADconst (NEG <t> x) [63])
+ for {
+ t := v.Type
+ x := v_0
+ v.reset(OpS390XSRADconst)
+ v.AuxInt = uint8ToAuxInt(63)
+ v0 := b.NewValue0(v.Pos, OpS390XNEG, t)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueS390X_OpStore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 8 && is64BitFloat(val.Type)
+ // result: (FMOVDstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 8 && is64BitFloat(val.Type)) {
+ break
+ }
+ v.reset(OpS390XFMOVDstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 4 && is32BitFloat(val.Type)
+ // result: (FMOVSstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 4 && is32BitFloat(val.Type)) {
+ break
+ }
+ v.reset(OpS390XFMOVSstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 8
+ // result: (MOVDstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 8) {
+ break
+ }
+ v.reset(OpS390XMOVDstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 4
+ // result: (MOVWstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 4) {
+ break
+ }
+ v.reset(OpS390XMOVWstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 2
+ // result: (MOVHstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 2) {
+ break
+ }
+ v.reset(OpS390XMOVHstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 1
+ // result: (MOVBstore ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 1) {
+ break
+ }
+ v.reset(OpS390XMOVBstore)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueS390X_OpSub32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Sub32F x y)
+ // result: (Select0 (FSUBS x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpS390XFSUBS, types.NewTuple(typ.Float32, types.TypeFlags))
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueS390X_OpSub64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Sub64F x y)
+ // result: (Select0 (FSUB x y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpSelect0)
+ v0 := b.NewValue0(v.Pos, OpS390XFSUB, types.NewTuple(typ.Float64, types.TypeFlags))
+ v0.AddArg2(x, y)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueS390X_OpTrunc(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Trunc x)
+ // result: (FIDBR [5] x)
+ for {
+ x := v_0
+ v.reset(OpS390XFIDBR)
+ v.AuxInt = int8ToAuxInt(5)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValueS390X_OpZero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Zero [0] _ mem)
+ // result: mem
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ mem := v_1
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Zero [1] destptr mem)
+ // result: (MOVBstoreconst [0] destptr mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 1 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpS390XMOVBstoreconst)
+ v.AuxInt = valAndOffToAuxInt(0)
+ v.AddArg2(destptr, mem)
+ return true
+ }
+ // match: (Zero [2] destptr mem)
+ // result: (MOVHstoreconst [0] destptr mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 2 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpS390XMOVHstoreconst)
+ v.AuxInt = valAndOffToAuxInt(0)
+ v.AddArg2(destptr, mem)
+ return true
+ }
+ // match: (Zero [4] destptr mem)
+ // result: (MOVWstoreconst [0] destptr mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpS390XMOVWstoreconst)
+ v.AuxInt = valAndOffToAuxInt(0)
+ v.AddArg2(destptr, mem)
+ return true
+ }
+ // match: (Zero [8] destptr mem)
+ // result: (MOVDstoreconst [0] destptr mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpS390XMOVDstoreconst)
+ v.AuxInt = valAndOffToAuxInt(0)
+ v.AddArg2(destptr, mem)
+ return true
+ }
+ // match: (Zero [3] destptr mem)
+ // result: (MOVBstoreconst [makeValAndOff(0,2)] destptr (MOVHstoreconst [0] destptr mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 3 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpS390XMOVBstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 2))
+ v0 := b.NewValue0(v.Pos, OpS390XMOVHstoreconst, types.TypeMem)
+ v0.AuxInt = valAndOffToAuxInt(0)
+ v0.AddArg2(destptr, mem)
+ v.AddArg2(destptr, v0)
+ return true
+ }
+ // match: (Zero [5] destptr mem)
+ // result: (MOVBstoreconst [makeValAndOff(0,4)] destptr (MOVWstoreconst [0] destptr mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 5 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpS390XMOVBstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 4))
+ v0 := b.NewValue0(v.Pos, OpS390XMOVWstoreconst, types.TypeMem)
+ v0.AuxInt = valAndOffToAuxInt(0)
+ v0.AddArg2(destptr, mem)
+ v.AddArg2(destptr, v0)
+ return true
+ }
+ // match: (Zero [6] destptr mem)
+ // result: (MOVHstoreconst [makeValAndOff(0,4)] destptr (MOVWstoreconst [0] destptr mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 6 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpS390XMOVHstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 4))
+ v0 := b.NewValue0(v.Pos, OpS390XMOVWstoreconst, types.TypeMem)
+ v0.AuxInt = valAndOffToAuxInt(0)
+ v0.AddArg2(destptr, mem)
+ v.AddArg2(destptr, v0)
+ return true
+ }
+ // match: (Zero [7] destptr mem)
+ // result: (MOVWstoreconst [makeValAndOff(0,3)] destptr (MOVWstoreconst [0] destptr mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 7 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpS390XMOVWstoreconst)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(0, 3))
+ v0 := b.NewValue0(v.Pos, OpS390XMOVWstoreconst, types.TypeMem)
+ v0.AuxInt = valAndOffToAuxInt(0)
+ v0.AddArg2(destptr, mem)
+ v.AddArg2(destptr, v0)
+ return true
+ }
+ // match: (Zero [s] destptr mem)
+ // cond: s > 0 && s <= 1024
+ // result: (CLEAR [makeValAndOff(int32(s), 0)] destptr mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ destptr := v_0
+ mem := v_1
+ if !(s > 0 && s <= 1024) {
+ break
+ }
+ v.reset(OpS390XCLEAR)
+ v.AuxInt = valAndOffToAuxInt(makeValAndOff(int32(s), 0))
+ v.AddArg2(destptr, mem)
+ return true
+ }
+ // match: (Zero [s] destptr mem)
+ // cond: s > 1024
+ // result: (LoweredZero [s%256] destptr (ADDconst <destptr.Type> destptr [(int32(s)/256)*256]) mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ destptr := v_0
+ mem := v_1
+ if !(s > 1024) {
+ break
+ }
+ v.reset(OpS390XLoweredZero)
+ v.AuxInt = int64ToAuxInt(s % 256)
+ v0 := b.NewValue0(v.Pos, OpS390XADDconst, destptr.Type)
+ v0.AuxInt = int32ToAuxInt((int32(s) / 256) * 256)
+ v0.AddArg(destptr)
+ v.AddArg3(destptr, v0, mem)
+ return true
+ }
+ return false
+}
+func rewriteBlockS390X(b *Block) bool {
+ typ := &b.Func.Config.Types
+ switch b.Kind {
+ case BlockS390XBRC:
+ // match: (BRC {c} x:(CMP _ _) yes no)
+ // cond: c&s390x.Unordered != 0
+ // result: (BRC {c&^s390x.Unordered} x yes no)
+ for b.Controls[0].Op == OpS390XCMP {
+ x := b.Controls[0]
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Unordered != 0) {
+ break
+ }
+ b.resetWithControl(BlockS390XBRC, x)
+ b.Aux = s390xCCMaskToAux(c &^ s390x.Unordered)
+ return true
+ }
+ // match: (BRC {c} x:(CMPW _ _) yes no)
+ // cond: c&s390x.Unordered != 0
+ // result: (BRC {c&^s390x.Unordered} x yes no)
+ for b.Controls[0].Op == OpS390XCMPW {
+ x := b.Controls[0]
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Unordered != 0) {
+ break
+ }
+ b.resetWithControl(BlockS390XBRC, x)
+ b.Aux = s390xCCMaskToAux(c &^ s390x.Unordered)
+ return true
+ }
+ // match: (BRC {c} x:(CMPU _ _) yes no)
+ // cond: c&s390x.Unordered != 0
+ // result: (BRC {c&^s390x.Unordered} x yes no)
+ for b.Controls[0].Op == OpS390XCMPU {
+ x := b.Controls[0]
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Unordered != 0) {
+ break
+ }
+ b.resetWithControl(BlockS390XBRC, x)
+ b.Aux = s390xCCMaskToAux(c &^ s390x.Unordered)
+ return true
+ }
+ // match: (BRC {c} x:(CMPWU _ _) yes no)
+ // cond: c&s390x.Unordered != 0
+ // result: (BRC {c&^s390x.Unordered} x yes no)
+ for b.Controls[0].Op == OpS390XCMPWU {
+ x := b.Controls[0]
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Unordered != 0) {
+ break
+ }
+ b.resetWithControl(BlockS390XBRC, x)
+ b.Aux = s390xCCMaskToAux(c &^ s390x.Unordered)
+ return true
+ }
+ // match: (BRC {c} x:(CMPconst _) yes no)
+ // cond: c&s390x.Unordered != 0
+ // result: (BRC {c&^s390x.Unordered} x yes no)
+ for b.Controls[0].Op == OpS390XCMPconst {
+ x := b.Controls[0]
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Unordered != 0) {
+ break
+ }
+ b.resetWithControl(BlockS390XBRC, x)
+ b.Aux = s390xCCMaskToAux(c &^ s390x.Unordered)
+ return true
+ }
+ // match: (BRC {c} x:(CMPWconst _) yes no)
+ // cond: c&s390x.Unordered != 0
+ // result: (BRC {c&^s390x.Unordered} x yes no)
+ for b.Controls[0].Op == OpS390XCMPWconst {
+ x := b.Controls[0]
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Unordered != 0) {
+ break
+ }
+ b.resetWithControl(BlockS390XBRC, x)
+ b.Aux = s390xCCMaskToAux(c &^ s390x.Unordered)
+ return true
+ }
+ // match: (BRC {c} x:(CMPUconst _) yes no)
+ // cond: c&s390x.Unordered != 0
+ // result: (BRC {c&^s390x.Unordered} x yes no)
+ for b.Controls[0].Op == OpS390XCMPUconst {
+ x := b.Controls[0]
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Unordered != 0) {
+ break
+ }
+ b.resetWithControl(BlockS390XBRC, x)
+ b.Aux = s390xCCMaskToAux(c &^ s390x.Unordered)
+ return true
+ }
+ // match: (BRC {c} x:(CMPWUconst _) yes no)
+ // cond: c&s390x.Unordered != 0
+ // result: (BRC {c&^s390x.Unordered} x yes no)
+ for b.Controls[0].Op == OpS390XCMPWUconst {
+ x := b.Controls[0]
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Unordered != 0) {
+ break
+ }
+ b.resetWithControl(BlockS390XBRC, x)
+ b.Aux = s390xCCMaskToAux(c &^ s390x.Unordered)
+ return true
+ }
+ // match: (BRC {c} (CMP x y) yes no)
+ // result: (CGRJ {c&^s390x.Unordered} x y yes no)
+ for b.Controls[0].Op == OpS390XCMP {
+ v_0 := b.Controls[0]
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ c := auxToS390xCCMask(b.Aux)
+ b.resetWithControl2(BlockS390XCGRJ, x, y)
+ b.Aux = s390xCCMaskToAux(c &^ s390x.Unordered)
+ return true
+ }
+ // match: (BRC {c} (CMPW x y) yes no)
+ // result: (CRJ {c&^s390x.Unordered} x y yes no)
+ for b.Controls[0].Op == OpS390XCMPW {
+ v_0 := b.Controls[0]
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ c := auxToS390xCCMask(b.Aux)
+ b.resetWithControl2(BlockS390XCRJ, x, y)
+ b.Aux = s390xCCMaskToAux(c &^ s390x.Unordered)
+ return true
+ }
+ // match: (BRC {c} (CMPU x y) yes no)
+ // result: (CLGRJ {c&^s390x.Unordered} x y yes no)
+ for b.Controls[0].Op == OpS390XCMPU {
+ v_0 := b.Controls[0]
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ c := auxToS390xCCMask(b.Aux)
+ b.resetWithControl2(BlockS390XCLGRJ, x, y)
+ b.Aux = s390xCCMaskToAux(c &^ s390x.Unordered)
+ return true
+ }
+ // match: (BRC {c} (CMPWU x y) yes no)
+ // result: (CLRJ {c&^s390x.Unordered} x y yes no)
+ for b.Controls[0].Op == OpS390XCMPWU {
+ v_0 := b.Controls[0]
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ c := auxToS390xCCMask(b.Aux)
+ b.resetWithControl2(BlockS390XCLRJ, x, y)
+ b.Aux = s390xCCMaskToAux(c &^ s390x.Unordered)
+ return true
+ }
+ // match: (BRC {c} (CMPconst x [y]) yes no)
+ // cond: y == int32( int8(y))
+ // result: (CGIJ {c&^s390x.Unordered} x [ int8(y)] yes no)
+ for b.Controls[0].Op == OpS390XCMPconst {
+ v_0 := b.Controls[0]
+ y := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ c := auxToS390xCCMask(b.Aux)
+ if !(y == int32(int8(y))) {
+ break
+ }
+ b.resetWithControl(BlockS390XCGIJ, x)
+ b.AuxInt = int8ToAuxInt(int8(y))
+ b.Aux = s390xCCMaskToAux(c &^ s390x.Unordered)
+ return true
+ }
+ // match: (BRC {c} (CMPWconst x [y]) yes no)
+ // cond: y == int32( int8(y))
+ // result: (CIJ {c&^s390x.Unordered} x [ int8(y)] yes no)
+ for b.Controls[0].Op == OpS390XCMPWconst {
+ v_0 := b.Controls[0]
+ y := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ c := auxToS390xCCMask(b.Aux)
+ if !(y == int32(int8(y))) {
+ break
+ }
+ b.resetWithControl(BlockS390XCIJ, x)
+ b.AuxInt = int8ToAuxInt(int8(y))
+ b.Aux = s390xCCMaskToAux(c &^ s390x.Unordered)
+ return true
+ }
+ // match: (BRC {c} (CMPUconst x [y]) yes no)
+ // cond: y == int32(uint8(y))
+ // result: (CLGIJ {c&^s390x.Unordered} x [uint8(y)] yes no)
+ for b.Controls[0].Op == OpS390XCMPUconst {
+ v_0 := b.Controls[0]
+ y := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ c := auxToS390xCCMask(b.Aux)
+ if !(y == int32(uint8(y))) {
+ break
+ }
+ b.resetWithControl(BlockS390XCLGIJ, x)
+ b.AuxInt = uint8ToAuxInt(uint8(y))
+ b.Aux = s390xCCMaskToAux(c &^ s390x.Unordered)
+ return true
+ }
+ // match: (BRC {c} (CMPWUconst x [y]) yes no)
+ // cond: y == int32(uint8(y))
+ // result: (CLIJ {c&^s390x.Unordered} x [uint8(y)] yes no)
+ for b.Controls[0].Op == OpS390XCMPWUconst {
+ v_0 := b.Controls[0]
+ y := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ c := auxToS390xCCMask(b.Aux)
+ if !(y == int32(uint8(y))) {
+ break
+ }
+ b.resetWithControl(BlockS390XCLIJ, x)
+ b.AuxInt = uint8ToAuxInt(uint8(y))
+ b.Aux = s390xCCMaskToAux(c &^ s390x.Unordered)
+ return true
+ }
+ // match: (BRC {s390x.Less} (CMPconst x [ 128]) yes no)
+ // result: (CGIJ {s390x.LessOrEqual} x [ 127] yes no)
+ for b.Controls[0].Op == OpS390XCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 128 {
+ break
+ }
+ x := v_0.Args[0]
+ if auxToS390xCCMask(b.Aux) != s390x.Less {
+ break
+ }
+ b.resetWithControl(BlockS390XCGIJ, x)
+ b.AuxInt = int8ToAuxInt(127)
+ b.Aux = s390xCCMaskToAux(s390x.LessOrEqual)
+ return true
+ }
+ // match: (BRC {s390x.Less} (CMPWconst x [ 128]) yes no)
+ // result: (CIJ {s390x.LessOrEqual} x [ 127] yes no)
+ for b.Controls[0].Op == OpS390XCMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 128 {
+ break
+ }
+ x := v_0.Args[0]
+ if auxToS390xCCMask(b.Aux) != s390x.Less {
+ break
+ }
+ b.resetWithControl(BlockS390XCIJ, x)
+ b.AuxInt = int8ToAuxInt(127)
+ b.Aux = s390xCCMaskToAux(s390x.LessOrEqual)
+ return true
+ }
+ // match: (BRC {s390x.LessOrEqual} (CMPconst x [-129]) yes no)
+ // result: (CGIJ {s390x.Less} x [-128] yes no)
+ for b.Controls[0].Op == OpS390XCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != -129 {
+ break
+ }
+ x := v_0.Args[0]
+ if auxToS390xCCMask(b.Aux) != s390x.LessOrEqual {
+ break
+ }
+ b.resetWithControl(BlockS390XCGIJ, x)
+ b.AuxInt = int8ToAuxInt(-128)
+ b.Aux = s390xCCMaskToAux(s390x.Less)
+ return true
+ }
+ // match: (BRC {s390x.LessOrEqual} (CMPWconst x [-129]) yes no)
+ // result: (CIJ {s390x.Less} x [-128] yes no)
+ for b.Controls[0].Op == OpS390XCMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != -129 {
+ break
+ }
+ x := v_0.Args[0]
+ if auxToS390xCCMask(b.Aux) != s390x.LessOrEqual {
+ break
+ }
+ b.resetWithControl(BlockS390XCIJ, x)
+ b.AuxInt = int8ToAuxInt(-128)
+ b.Aux = s390xCCMaskToAux(s390x.Less)
+ return true
+ }
+ // match: (BRC {s390x.Greater} (CMPconst x [-129]) yes no)
+ // result: (CGIJ {s390x.GreaterOrEqual} x [-128] yes no)
+ for b.Controls[0].Op == OpS390XCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != -129 {
+ break
+ }
+ x := v_0.Args[0]
+ if auxToS390xCCMask(b.Aux) != s390x.Greater {
+ break
+ }
+ b.resetWithControl(BlockS390XCGIJ, x)
+ b.AuxInt = int8ToAuxInt(-128)
+ b.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ return true
+ }
+ // match: (BRC {s390x.Greater} (CMPWconst x [-129]) yes no)
+ // result: (CIJ {s390x.GreaterOrEqual} x [-128] yes no)
+ for b.Controls[0].Op == OpS390XCMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != -129 {
+ break
+ }
+ x := v_0.Args[0]
+ if auxToS390xCCMask(b.Aux) != s390x.Greater {
+ break
+ }
+ b.resetWithControl(BlockS390XCIJ, x)
+ b.AuxInt = int8ToAuxInt(-128)
+ b.Aux = s390xCCMaskToAux(s390x.GreaterOrEqual)
+ return true
+ }
+ // match: (BRC {s390x.GreaterOrEqual} (CMPconst x [ 128]) yes no)
+ // result: (CGIJ {s390x.Greater} x [ 127] yes no)
+ for b.Controls[0].Op == OpS390XCMPconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 128 {
+ break
+ }
+ x := v_0.Args[0]
+ if auxToS390xCCMask(b.Aux) != s390x.GreaterOrEqual {
+ break
+ }
+ b.resetWithControl(BlockS390XCGIJ, x)
+ b.AuxInt = int8ToAuxInt(127)
+ b.Aux = s390xCCMaskToAux(s390x.Greater)
+ return true
+ }
+ // match: (BRC {s390x.GreaterOrEqual} (CMPWconst x [ 128]) yes no)
+ // result: (CIJ {s390x.Greater} x [ 127] yes no)
+ for b.Controls[0].Op == OpS390XCMPWconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 128 {
+ break
+ }
+ x := v_0.Args[0]
+ if auxToS390xCCMask(b.Aux) != s390x.GreaterOrEqual {
+ break
+ }
+ b.resetWithControl(BlockS390XCIJ, x)
+ b.AuxInt = int8ToAuxInt(127)
+ b.Aux = s390xCCMaskToAux(s390x.Greater)
+ return true
+ }
+ // match: (BRC {s390x.Less} (CMPWUconst x [256]) yes no)
+ // result: (CLIJ {s390x.LessOrEqual} x [255] yes no)
+ for b.Controls[0].Op == OpS390XCMPWUconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 256 {
+ break
+ }
+ x := v_0.Args[0]
+ if auxToS390xCCMask(b.Aux) != s390x.Less {
+ break
+ }
+ b.resetWithControl(BlockS390XCLIJ, x)
+ b.AuxInt = uint8ToAuxInt(255)
+ b.Aux = s390xCCMaskToAux(s390x.LessOrEqual)
+ return true
+ }
+ // match: (BRC {s390x.Less} (CMPUconst x [256]) yes no)
+ // result: (CLGIJ {s390x.LessOrEqual} x [255] yes no)
+ for b.Controls[0].Op == OpS390XCMPUconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 256 {
+ break
+ }
+ x := v_0.Args[0]
+ if auxToS390xCCMask(b.Aux) != s390x.Less {
+ break
+ }
+ b.resetWithControl(BlockS390XCLGIJ, x)
+ b.AuxInt = uint8ToAuxInt(255)
+ b.Aux = s390xCCMaskToAux(s390x.LessOrEqual)
+ return true
+ }
+ // match: (BRC {s390x.GreaterOrEqual} (CMPWUconst x [256]) yes no)
+ // result: (CLIJ {s390x.Greater} x [255] yes no)
+ for b.Controls[0].Op == OpS390XCMPWUconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 256 {
+ break
+ }
+ x := v_0.Args[0]
+ if auxToS390xCCMask(b.Aux) != s390x.GreaterOrEqual {
+ break
+ }
+ b.resetWithControl(BlockS390XCLIJ, x)
+ b.AuxInt = uint8ToAuxInt(255)
+ b.Aux = s390xCCMaskToAux(s390x.Greater)
+ return true
+ }
+ // match: (BRC {s390x.GreaterOrEqual} (CMPUconst x [256]) yes no)
+ // result: (CLGIJ {s390x.Greater} x [255] yes no)
+ for b.Controls[0].Op == OpS390XCMPUconst {
+ v_0 := b.Controls[0]
+ if auxIntToInt32(v_0.AuxInt) != 256 {
+ break
+ }
+ x := v_0.Args[0]
+ if auxToS390xCCMask(b.Aux) != s390x.GreaterOrEqual {
+ break
+ }
+ b.resetWithControl(BlockS390XCLGIJ, x)
+ b.AuxInt = uint8ToAuxInt(255)
+ b.Aux = s390xCCMaskToAux(s390x.Greater)
+ return true
+ }
+ // match: (BRC {c} (CMPconst x [y]) yes no)
+ // cond: y == int32(uint8(y)) && (c == s390x.Equal || c == s390x.LessOrGreater)
+ // result: (CLGIJ {c} x [uint8(y)] yes no)
+ for b.Controls[0].Op == OpS390XCMPconst {
+ v_0 := b.Controls[0]
+ y := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ c := auxToS390xCCMask(b.Aux)
+ if !(y == int32(uint8(y)) && (c == s390x.Equal || c == s390x.LessOrGreater)) {
+ break
+ }
+ b.resetWithControl(BlockS390XCLGIJ, x)
+ b.AuxInt = uint8ToAuxInt(uint8(y))
+ b.Aux = s390xCCMaskToAux(c)
+ return true
+ }
+ // match: (BRC {c} (CMPWconst x [y]) yes no)
+ // cond: y == int32(uint8(y)) && (c == s390x.Equal || c == s390x.LessOrGreater)
+ // result: (CLIJ {c} x [uint8(y)] yes no)
+ for b.Controls[0].Op == OpS390XCMPWconst {
+ v_0 := b.Controls[0]
+ y := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ c := auxToS390xCCMask(b.Aux)
+ if !(y == int32(uint8(y)) && (c == s390x.Equal || c == s390x.LessOrGreater)) {
+ break
+ }
+ b.resetWithControl(BlockS390XCLIJ, x)
+ b.AuxInt = uint8ToAuxInt(uint8(y))
+ b.Aux = s390xCCMaskToAux(c)
+ return true
+ }
+ // match: (BRC {c} (CMPUconst x [y]) yes no)
+ // cond: y == int32( int8(y)) && (c == s390x.Equal || c == s390x.LessOrGreater)
+ // result: (CGIJ {c} x [ int8(y)] yes no)
+ for b.Controls[0].Op == OpS390XCMPUconst {
+ v_0 := b.Controls[0]
+ y := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ c := auxToS390xCCMask(b.Aux)
+ if !(y == int32(int8(y)) && (c == s390x.Equal || c == s390x.LessOrGreater)) {
+ break
+ }
+ b.resetWithControl(BlockS390XCGIJ, x)
+ b.AuxInt = int8ToAuxInt(int8(y))
+ b.Aux = s390xCCMaskToAux(c)
+ return true
+ }
+ // match: (BRC {c} (CMPWUconst x [y]) yes no)
+ // cond: y == int32( int8(y)) && (c == s390x.Equal || c == s390x.LessOrGreater)
+ // result: (CIJ {c} x [ int8(y)] yes no)
+ for b.Controls[0].Op == OpS390XCMPWUconst {
+ v_0 := b.Controls[0]
+ y := auxIntToInt32(v_0.AuxInt)
+ x := v_0.Args[0]
+ c := auxToS390xCCMask(b.Aux)
+ if !(y == int32(int8(y)) && (c == s390x.Equal || c == s390x.LessOrGreater)) {
+ break
+ }
+ b.resetWithControl(BlockS390XCIJ, x)
+ b.AuxInt = int8ToAuxInt(int8(y))
+ b.Aux = s390xCCMaskToAux(c)
+ return true
+ }
+ // match: (BRC {c} (InvertFlags cmp) yes no)
+ // result: (BRC {c.ReverseComparison()} cmp yes no)
+ for b.Controls[0].Op == OpS390XInvertFlags {
+ v_0 := b.Controls[0]
+ cmp := v_0.Args[0]
+ c := auxToS390xCCMask(b.Aux)
+ b.resetWithControl(BlockS390XBRC, cmp)
+ b.Aux = s390xCCMaskToAux(c.ReverseComparison())
+ return true
+ }
+ // match: (BRC {c} (FlagEQ) yes no)
+ // cond: c&s390x.Equal != 0
+ // result: (First yes no)
+ for b.Controls[0].Op == OpS390XFlagEQ {
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Equal != 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (BRC {c} (FlagLT) yes no)
+ // cond: c&s390x.Less != 0
+ // result: (First yes no)
+ for b.Controls[0].Op == OpS390XFlagLT {
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Less != 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (BRC {c} (FlagGT) yes no)
+ // cond: c&s390x.Greater != 0
+ // result: (First yes no)
+ for b.Controls[0].Op == OpS390XFlagGT {
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Greater != 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (BRC {c} (FlagOV) yes no)
+ // cond: c&s390x.Unordered != 0
+ // result: (First yes no)
+ for b.Controls[0].Op == OpS390XFlagOV {
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Unordered != 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (BRC {c} (FlagEQ) yes no)
+ // cond: c&s390x.Equal == 0
+ // result: (First no yes)
+ for b.Controls[0].Op == OpS390XFlagEQ {
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Equal == 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (BRC {c} (FlagLT) yes no)
+ // cond: c&s390x.Less == 0
+ // result: (First no yes)
+ for b.Controls[0].Op == OpS390XFlagLT {
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Less == 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (BRC {c} (FlagGT) yes no)
+ // cond: c&s390x.Greater == 0
+ // result: (First no yes)
+ for b.Controls[0].Op == OpS390XFlagGT {
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Greater == 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (BRC {c} (FlagOV) yes no)
+ // cond: c&s390x.Unordered == 0
+ // result: (First no yes)
+ for b.Controls[0].Op == OpS390XFlagOV {
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Unordered == 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ case BlockS390XCGIJ:
+ // match: (CGIJ {c} (MOVDconst [x]) [y] yes no)
+ // cond: c&s390x.Equal != 0 && int64(x) == int64(y)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpS390XMOVDconst {
+ v_0 := b.Controls[0]
+ x := auxIntToInt64(v_0.AuxInt)
+ y := auxIntToInt8(b.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Equal != 0 && int64(x) == int64(y)) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (CGIJ {c} (MOVDconst [x]) [y] yes no)
+ // cond: c&s390x.Less != 0 && int64(x) < int64(y)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpS390XMOVDconst {
+ v_0 := b.Controls[0]
+ x := auxIntToInt64(v_0.AuxInt)
+ y := auxIntToInt8(b.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Less != 0 && int64(x) < int64(y)) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (CGIJ {c} (MOVDconst [x]) [y] yes no)
+ // cond: c&s390x.Greater != 0 && int64(x) > int64(y)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpS390XMOVDconst {
+ v_0 := b.Controls[0]
+ x := auxIntToInt64(v_0.AuxInt)
+ y := auxIntToInt8(b.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Greater != 0 && int64(x) > int64(y)) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (CGIJ {c} (MOVDconst [x]) [y] yes no)
+ // cond: c&s390x.Equal == 0 && int64(x) == int64(y)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpS390XMOVDconst {
+ v_0 := b.Controls[0]
+ x := auxIntToInt64(v_0.AuxInt)
+ y := auxIntToInt8(b.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Equal == 0 && int64(x) == int64(y)) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (CGIJ {c} (MOVDconst [x]) [y] yes no)
+ // cond: c&s390x.Less == 0 && int64(x) < int64(y)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpS390XMOVDconst {
+ v_0 := b.Controls[0]
+ x := auxIntToInt64(v_0.AuxInt)
+ y := auxIntToInt8(b.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Less == 0 && int64(x) < int64(y)) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (CGIJ {c} (MOVDconst [x]) [y] yes no)
+ // cond: c&s390x.Greater == 0 && int64(x) > int64(y)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpS390XMOVDconst {
+ v_0 := b.Controls[0]
+ x := auxIntToInt64(v_0.AuxInt)
+ y := auxIntToInt8(b.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Greater == 0 && int64(x) > int64(y)) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (CGIJ {s390x.Equal} (Select0 (ADDE (MOVDconst [0]) (MOVDconst [0]) carry)) [0])
+ // result: (BRC {s390x.NoCarry} carry)
+ for b.Controls[0].Op == OpSelect0 {
+ v_0 := b.Controls[0]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpS390XADDE {
+ break
+ }
+ carry := v_0_0.Args[2]
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0_1 := v_0_0.Args[1]
+ if v_0_0_1.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_1.AuxInt) != 0 || auxIntToInt8(b.AuxInt) != 0 || auxToS390xCCMask(b.Aux) != s390x.Equal {
+ break
+ }
+ b.resetWithControl(BlockS390XBRC, carry)
+ b.Aux = s390xCCMaskToAux(s390x.NoCarry)
+ return true
+ }
+ // match: (CGIJ {s390x.Equal} (Select0 (ADDE (MOVDconst [0]) (MOVDconst [0]) carry)) [1])
+ // result: (BRC {s390x.Carry} carry)
+ for b.Controls[0].Op == OpSelect0 {
+ v_0 := b.Controls[0]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpS390XADDE {
+ break
+ }
+ carry := v_0_0.Args[2]
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0_1 := v_0_0.Args[1]
+ if v_0_0_1.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_1.AuxInt) != 0 || auxIntToInt8(b.AuxInt) != 1 || auxToS390xCCMask(b.Aux) != s390x.Equal {
+ break
+ }
+ b.resetWithControl(BlockS390XBRC, carry)
+ b.Aux = s390xCCMaskToAux(s390x.Carry)
+ return true
+ }
+ // match: (CGIJ {s390x.LessOrGreater} (Select0 (ADDE (MOVDconst [0]) (MOVDconst [0]) carry)) [0])
+ // result: (BRC {s390x.Carry} carry)
+ for b.Controls[0].Op == OpSelect0 {
+ v_0 := b.Controls[0]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpS390XADDE {
+ break
+ }
+ carry := v_0_0.Args[2]
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0_1 := v_0_0.Args[1]
+ if v_0_0_1.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_1.AuxInt) != 0 || auxIntToInt8(b.AuxInt) != 0 || auxToS390xCCMask(b.Aux) != s390x.LessOrGreater {
+ break
+ }
+ b.resetWithControl(BlockS390XBRC, carry)
+ b.Aux = s390xCCMaskToAux(s390x.Carry)
+ return true
+ }
+ // match: (CGIJ {s390x.LessOrGreater} (Select0 (ADDE (MOVDconst [0]) (MOVDconst [0]) carry)) [1])
+ // result: (BRC {s390x.NoCarry} carry)
+ for b.Controls[0].Op == OpSelect0 {
+ v_0 := b.Controls[0]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpS390XADDE {
+ break
+ }
+ carry := v_0_0.Args[2]
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0_1 := v_0_0.Args[1]
+ if v_0_0_1.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_1.AuxInt) != 0 || auxIntToInt8(b.AuxInt) != 1 || auxToS390xCCMask(b.Aux) != s390x.LessOrGreater {
+ break
+ }
+ b.resetWithControl(BlockS390XBRC, carry)
+ b.Aux = s390xCCMaskToAux(s390x.NoCarry)
+ return true
+ }
+ // match: (CGIJ {s390x.Greater} (Select0 (ADDE (MOVDconst [0]) (MOVDconst [0]) carry)) [0])
+ // result: (BRC {s390x.Carry} carry)
+ for b.Controls[0].Op == OpSelect0 {
+ v_0 := b.Controls[0]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpS390XADDE {
+ break
+ }
+ carry := v_0_0.Args[2]
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0_1 := v_0_0.Args[1]
+ if v_0_0_1.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_1.AuxInt) != 0 || auxIntToInt8(b.AuxInt) != 0 || auxToS390xCCMask(b.Aux) != s390x.Greater {
+ break
+ }
+ b.resetWithControl(BlockS390XBRC, carry)
+ b.Aux = s390xCCMaskToAux(s390x.Carry)
+ return true
+ }
+ // match: (CGIJ {s390x.Equal} (NEG (Select0 (SUBE (MOVDconst [0]) (MOVDconst [0]) borrow))) [0])
+ // result: (BRC {s390x.NoBorrow} borrow)
+ for b.Controls[0].Op == OpS390XNEG {
+ v_0 := b.Controls[0]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpSelect0 {
+ break
+ }
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpS390XSUBE {
+ break
+ }
+ borrow := v_0_0_0.Args[2]
+ v_0_0_0_0 := v_0_0_0.Args[0]
+ if v_0_0_0_0.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0_0_1 := v_0_0_0.Args[1]
+ if v_0_0_0_1.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0_1.AuxInt) != 0 || auxIntToInt8(b.AuxInt) != 0 || auxToS390xCCMask(b.Aux) != s390x.Equal {
+ break
+ }
+ b.resetWithControl(BlockS390XBRC, borrow)
+ b.Aux = s390xCCMaskToAux(s390x.NoBorrow)
+ return true
+ }
+ // match: (CGIJ {s390x.Equal} (NEG (Select0 (SUBE (MOVDconst [0]) (MOVDconst [0]) borrow))) [1])
+ // result: (BRC {s390x.Borrow} borrow)
+ for b.Controls[0].Op == OpS390XNEG {
+ v_0 := b.Controls[0]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpSelect0 {
+ break
+ }
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpS390XSUBE {
+ break
+ }
+ borrow := v_0_0_0.Args[2]
+ v_0_0_0_0 := v_0_0_0.Args[0]
+ if v_0_0_0_0.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0_0_1 := v_0_0_0.Args[1]
+ if v_0_0_0_1.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0_1.AuxInt) != 0 || auxIntToInt8(b.AuxInt) != 1 || auxToS390xCCMask(b.Aux) != s390x.Equal {
+ break
+ }
+ b.resetWithControl(BlockS390XBRC, borrow)
+ b.Aux = s390xCCMaskToAux(s390x.Borrow)
+ return true
+ }
+ // match: (CGIJ {s390x.LessOrGreater} (NEG (Select0 (SUBE (MOVDconst [0]) (MOVDconst [0]) borrow))) [0])
+ // result: (BRC {s390x.Borrow} borrow)
+ for b.Controls[0].Op == OpS390XNEG {
+ v_0 := b.Controls[0]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpSelect0 {
+ break
+ }
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpS390XSUBE {
+ break
+ }
+ borrow := v_0_0_0.Args[2]
+ v_0_0_0_0 := v_0_0_0.Args[0]
+ if v_0_0_0_0.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0_0_1 := v_0_0_0.Args[1]
+ if v_0_0_0_1.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0_1.AuxInt) != 0 || auxIntToInt8(b.AuxInt) != 0 || auxToS390xCCMask(b.Aux) != s390x.LessOrGreater {
+ break
+ }
+ b.resetWithControl(BlockS390XBRC, borrow)
+ b.Aux = s390xCCMaskToAux(s390x.Borrow)
+ return true
+ }
+ // match: (CGIJ {s390x.LessOrGreater} (NEG (Select0 (SUBE (MOVDconst [0]) (MOVDconst [0]) borrow))) [1])
+ // result: (BRC {s390x.NoBorrow} borrow)
+ for b.Controls[0].Op == OpS390XNEG {
+ v_0 := b.Controls[0]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpSelect0 {
+ break
+ }
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpS390XSUBE {
+ break
+ }
+ borrow := v_0_0_0.Args[2]
+ v_0_0_0_0 := v_0_0_0.Args[0]
+ if v_0_0_0_0.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0_0_1 := v_0_0_0.Args[1]
+ if v_0_0_0_1.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0_1.AuxInt) != 0 || auxIntToInt8(b.AuxInt) != 1 || auxToS390xCCMask(b.Aux) != s390x.LessOrGreater {
+ break
+ }
+ b.resetWithControl(BlockS390XBRC, borrow)
+ b.Aux = s390xCCMaskToAux(s390x.NoBorrow)
+ return true
+ }
+ // match: (CGIJ {s390x.Greater} (NEG (Select0 (SUBE (MOVDconst [0]) (MOVDconst [0]) borrow))) [0])
+ // result: (BRC {s390x.Borrow} borrow)
+ for b.Controls[0].Op == OpS390XNEG {
+ v_0 := b.Controls[0]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpSelect0 {
+ break
+ }
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpS390XSUBE {
+ break
+ }
+ borrow := v_0_0_0.Args[2]
+ v_0_0_0_0 := v_0_0_0.Args[0]
+ if v_0_0_0_0.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0_0_1 := v_0_0_0.Args[1]
+ if v_0_0_0_1.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0_1.AuxInt) != 0 || auxIntToInt8(b.AuxInt) != 0 || auxToS390xCCMask(b.Aux) != s390x.Greater {
+ break
+ }
+ b.resetWithControl(BlockS390XBRC, borrow)
+ b.Aux = s390xCCMaskToAux(s390x.Borrow)
+ return true
+ }
+ case BlockS390XCGRJ:
+ // match: (CGRJ {c} x (MOVDconst [y]) yes no)
+ // cond: is8Bit(y)
+ // result: (CGIJ {c} x [ int8(y)] yes no)
+ for b.Controls[1].Op == OpS390XMOVDconst {
+ x := b.Controls[0]
+ v_1 := b.Controls[1]
+ y := auxIntToInt64(v_1.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ if !(is8Bit(y)) {
+ break
+ }
+ b.resetWithControl(BlockS390XCGIJ, x)
+ b.AuxInt = int8ToAuxInt(int8(y))
+ b.Aux = s390xCCMaskToAux(c)
+ return true
+ }
+ // match: (CGRJ {c} (MOVDconst [x]) y yes no)
+ // cond: is8Bit(x)
+ // result: (CGIJ {c.ReverseComparison()} y [ int8(x)] yes no)
+ for b.Controls[0].Op == OpS390XMOVDconst {
+ v_0 := b.Controls[0]
+ x := auxIntToInt64(v_0.AuxInt)
+ y := b.Controls[1]
+ c := auxToS390xCCMask(b.Aux)
+ if !(is8Bit(x)) {
+ break
+ }
+ b.resetWithControl(BlockS390XCGIJ, y)
+ b.AuxInt = int8ToAuxInt(int8(x))
+ b.Aux = s390xCCMaskToAux(c.ReverseComparison())
+ return true
+ }
+ // match: (CGRJ {c} x (MOVDconst [y]) yes no)
+ // cond: !is8Bit(y) && is32Bit(y)
+ // result: (BRC {c} (CMPconst x [int32(y)]) yes no)
+ for b.Controls[1].Op == OpS390XMOVDconst {
+ x := b.Controls[0]
+ v_1 := b.Controls[1]
+ y := auxIntToInt64(v_1.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ if !(!is8Bit(y) && is32Bit(y)) {
+ break
+ }
+ v0 := b.NewValue0(x.Pos, OpS390XCMPconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(int32(y))
+ v0.AddArg(x)
+ b.resetWithControl(BlockS390XBRC, v0)
+ b.Aux = s390xCCMaskToAux(c)
+ return true
+ }
+ // match: (CGRJ {c} (MOVDconst [x]) y yes no)
+ // cond: !is8Bit(x) && is32Bit(x)
+ // result: (BRC {c.ReverseComparison()} (CMPconst y [int32(x)]) yes no)
+ for b.Controls[0].Op == OpS390XMOVDconst {
+ v_0 := b.Controls[0]
+ x := auxIntToInt64(v_0.AuxInt)
+ y := b.Controls[1]
+ c := auxToS390xCCMask(b.Aux)
+ if !(!is8Bit(x) && is32Bit(x)) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpS390XCMPconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(int32(x))
+ v0.AddArg(y)
+ b.resetWithControl(BlockS390XBRC, v0)
+ b.Aux = s390xCCMaskToAux(c.ReverseComparison())
+ return true
+ }
+ // match: (CGRJ {c} x y yes no)
+ // cond: x == y && c&s390x.Equal != 0
+ // result: (First yes no)
+ for {
+ x := b.Controls[0]
+ y := b.Controls[1]
+ c := auxToS390xCCMask(b.Aux)
+ if !(x == y && c&s390x.Equal != 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (CGRJ {c} x y yes no)
+ // cond: x == y && c&s390x.Equal == 0
+ // result: (First no yes)
+ for {
+ x := b.Controls[0]
+ y := b.Controls[1]
+ c := auxToS390xCCMask(b.Aux)
+ if !(x == y && c&s390x.Equal == 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ case BlockS390XCIJ:
+ // match: (CIJ {c} (MOVWreg x) [y] yes no)
+ // result: (CIJ {c} x [y] yes no)
+ for b.Controls[0].Op == OpS390XMOVWreg {
+ v_0 := b.Controls[0]
+ x := v_0.Args[0]
+ y := auxIntToInt8(b.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ b.resetWithControl(BlockS390XCIJ, x)
+ b.AuxInt = int8ToAuxInt(y)
+ b.Aux = s390xCCMaskToAux(c)
+ return true
+ }
+ // match: (CIJ {c} (MOVWZreg x) [y] yes no)
+ // result: (CIJ {c} x [y] yes no)
+ for b.Controls[0].Op == OpS390XMOVWZreg {
+ v_0 := b.Controls[0]
+ x := v_0.Args[0]
+ y := auxIntToInt8(b.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ b.resetWithControl(BlockS390XCIJ, x)
+ b.AuxInt = int8ToAuxInt(y)
+ b.Aux = s390xCCMaskToAux(c)
+ return true
+ }
+ // match: (CIJ {c} (MOVDconst [x]) [y] yes no)
+ // cond: c&s390x.Equal != 0 && int32(x) == int32(y)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpS390XMOVDconst {
+ v_0 := b.Controls[0]
+ x := auxIntToInt64(v_0.AuxInt)
+ y := auxIntToInt8(b.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Equal != 0 && int32(x) == int32(y)) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (CIJ {c} (MOVDconst [x]) [y] yes no)
+ // cond: c&s390x.Less != 0 && int32(x) < int32(y)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpS390XMOVDconst {
+ v_0 := b.Controls[0]
+ x := auxIntToInt64(v_0.AuxInt)
+ y := auxIntToInt8(b.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Less != 0 && int32(x) < int32(y)) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (CIJ {c} (MOVDconst [x]) [y] yes no)
+ // cond: c&s390x.Greater != 0 && int32(x) > int32(y)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpS390XMOVDconst {
+ v_0 := b.Controls[0]
+ x := auxIntToInt64(v_0.AuxInt)
+ y := auxIntToInt8(b.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Greater != 0 && int32(x) > int32(y)) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (CIJ {c} (MOVDconst [x]) [y] yes no)
+ // cond: c&s390x.Equal == 0 && int32(x) == int32(y)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpS390XMOVDconst {
+ v_0 := b.Controls[0]
+ x := auxIntToInt64(v_0.AuxInt)
+ y := auxIntToInt8(b.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Equal == 0 && int32(x) == int32(y)) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (CIJ {c} (MOVDconst [x]) [y] yes no)
+ // cond: c&s390x.Less == 0 && int32(x) < int32(y)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpS390XMOVDconst {
+ v_0 := b.Controls[0]
+ x := auxIntToInt64(v_0.AuxInt)
+ y := auxIntToInt8(b.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Less == 0 && int32(x) < int32(y)) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (CIJ {c} (MOVDconst [x]) [y] yes no)
+ // cond: c&s390x.Greater == 0 && int32(x) > int32(y)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpS390XMOVDconst {
+ v_0 := b.Controls[0]
+ x := auxIntToInt64(v_0.AuxInt)
+ y := auxIntToInt8(b.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Greater == 0 && int32(x) > int32(y)) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ case BlockS390XCLGIJ:
+ // match: (CLGIJ {c} (MOVDconst [x]) [y] yes no)
+ // cond: c&s390x.Equal != 0 && uint64(x) == uint64(y)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpS390XMOVDconst {
+ v_0 := b.Controls[0]
+ x := auxIntToInt64(v_0.AuxInt)
+ y := auxIntToUint8(b.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Equal != 0 && uint64(x) == uint64(y)) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (CLGIJ {c} (MOVDconst [x]) [y] yes no)
+ // cond: c&s390x.Less != 0 && uint64(x) < uint64(y)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpS390XMOVDconst {
+ v_0 := b.Controls[0]
+ x := auxIntToInt64(v_0.AuxInt)
+ y := auxIntToUint8(b.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Less != 0 && uint64(x) < uint64(y)) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (CLGIJ {c} (MOVDconst [x]) [y] yes no)
+ // cond: c&s390x.Greater != 0 && uint64(x) > uint64(y)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpS390XMOVDconst {
+ v_0 := b.Controls[0]
+ x := auxIntToInt64(v_0.AuxInt)
+ y := auxIntToUint8(b.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Greater != 0 && uint64(x) > uint64(y)) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (CLGIJ {c} (MOVDconst [x]) [y] yes no)
+ // cond: c&s390x.Equal == 0 && uint64(x) == uint64(y)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpS390XMOVDconst {
+ v_0 := b.Controls[0]
+ x := auxIntToInt64(v_0.AuxInt)
+ y := auxIntToUint8(b.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Equal == 0 && uint64(x) == uint64(y)) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (CLGIJ {c} (MOVDconst [x]) [y] yes no)
+ // cond: c&s390x.Less == 0 && uint64(x) < uint64(y)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpS390XMOVDconst {
+ v_0 := b.Controls[0]
+ x := auxIntToInt64(v_0.AuxInt)
+ y := auxIntToUint8(b.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Less == 0 && uint64(x) < uint64(y)) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (CLGIJ {c} (MOVDconst [x]) [y] yes no)
+ // cond: c&s390x.Greater == 0 && uint64(x) > uint64(y)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpS390XMOVDconst {
+ v_0 := b.Controls[0]
+ x := auxIntToInt64(v_0.AuxInt)
+ y := auxIntToUint8(b.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Greater == 0 && uint64(x) > uint64(y)) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (CLGIJ {s390x.GreaterOrEqual} _ [0] yes no)
+ // result: (First yes no)
+ for {
+ if auxIntToUint8(b.AuxInt) != 0 || auxToS390xCCMask(b.Aux) != s390x.GreaterOrEqual {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (CLGIJ {s390x.Less} _ [0] yes no)
+ // result: (First no yes)
+ for {
+ if auxIntToUint8(b.AuxInt) != 0 || auxToS390xCCMask(b.Aux) != s390x.Less {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (CLGIJ {s390x.Equal} (Select0 (ADDE (MOVDconst [0]) (MOVDconst [0]) carry)) [0])
+ // result: (BRC {s390x.NoCarry} carry)
+ for b.Controls[0].Op == OpSelect0 {
+ v_0 := b.Controls[0]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpS390XADDE {
+ break
+ }
+ carry := v_0_0.Args[2]
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0_1 := v_0_0.Args[1]
+ if v_0_0_1.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_1.AuxInt) != 0 || auxIntToUint8(b.AuxInt) != 0 || auxToS390xCCMask(b.Aux) != s390x.Equal {
+ break
+ }
+ b.resetWithControl(BlockS390XBRC, carry)
+ b.Aux = s390xCCMaskToAux(s390x.NoCarry)
+ return true
+ }
+ // match: (CLGIJ {s390x.Equal} (Select0 (ADDE (MOVDconst [0]) (MOVDconst [0]) carry)) [1])
+ // result: (BRC {s390x.Carry} carry)
+ for b.Controls[0].Op == OpSelect0 {
+ v_0 := b.Controls[0]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpS390XADDE {
+ break
+ }
+ carry := v_0_0.Args[2]
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0_1 := v_0_0.Args[1]
+ if v_0_0_1.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_1.AuxInt) != 0 || auxIntToUint8(b.AuxInt) != 1 || auxToS390xCCMask(b.Aux) != s390x.Equal {
+ break
+ }
+ b.resetWithControl(BlockS390XBRC, carry)
+ b.Aux = s390xCCMaskToAux(s390x.Carry)
+ return true
+ }
+ // match: (CLGIJ {s390x.LessOrGreater} (Select0 (ADDE (MOVDconst [0]) (MOVDconst [0]) carry)) [0])
+ // result: (BRC {s390x.Carry} carry)
+ for b.Controls[0].Op == OpSelect0 {
+ v_0 := b.Controls[0]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpS390XADDE {
+ break
+ }
+ carry := v_0_0.Args[2]
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0_1 := v_0_0.Args[1]
+ if v_0_0_1.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_1.AuxInt) != 0 || auxIntToUint8(b.AuxInt) != 0 || auxToS390xCCMask(b.Aux) != s390x.LessOrGreater {
+ break
+ }
+ b.resetWithControl(BlockS390XBRC, carry)
+ b.Aux = s390xCCMaskToAux(s390x.Carry)
+ return true
+ }
+ // match: (CLGIJ {s390x.LessOrGreater} (Select0 (ADDE (MOVDconst [0]) (MOVDconst [0]) carry)) [1])
+ // result: (BRC {s390x.NoCarry} carry)
+ for b.Controls[0].Op == OpSelect0 {
+ v_0 := b.Controls[0]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpS390XADDE {
+ break
+ }
+ carry := v_0_0.Args[2]
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0_1 := v_0_0.Args[1]
+ if v_0_0_1.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_1.AuxInt) != 0 || auxIntToUint8(b.AuxInt) != 1 || auxToS390xCCMask(b.Aux) != s390x.LessOrGreater {
+ break
+ }
+ b.resetWithControl(BlockS390XBRC, carry)
+ b.Aux = s390xCCMaskToAux(s390x.NoCarry)
+ return true
+ }
+ // match: (CLGIJ {s390x.Greater} (Select0 (ADDE (MOVDconst [0]) (MOVDconst [0]) carry)) [0])
+ // result: (BRC {s390x.Carry} carry)
+ for b.Controls[0].Op == OpSelect0 {
+ v_0 := b.Controls[0]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpS390XADDE {
+ break
+ }
+ carry := v_0_0.Args[2]
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0_1 := v_0_0.Args[1]
+ if v_0_0_1.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_1.AuxInt) != 0 || auxIntToUint8(b.AuxInt) != 0 || auxToS390xCCMask(b.Aux) != s390x.Greater {
+ break
+ }
+ b.resetWithControl(BlockS390XBRC, carry)
+ b.Aux = s390xCCMaskToAux(s390x.Carry)
+ return true
+ }
+ // match: (CLGIJ {s390x.Equal} (NEG (Select0 (SUBE (MOVDconst [0]) (MOVDconst [0]) borrow))) [0])
+ // result: (BRC {s390x.NoBorrow} borrow)
+ for b.Controls[0].Op == OpS390XNEG {
+ v_0 := b.Controls[0]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpSelect0 {
+ break
+ }
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpS390XSUBE {
+ break
+ }
+ borrow := v_0_0_0.Args[2]
+ v_0_0_0_0 := v_0_0_0.Args[0]
+ if v_0_0_0_0.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0_0_1 := v_0_0_0.Args[1]
+ if v_0_0_0_1.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0_1.AuxInt) != 0 || auxIntToUint8(b.AuxInt) != 0 || auxToS390xCCMask(b.Aux) != s390x.Equal {
+ break
+ }
+ b.resetWithControl(BlockS390XBRC, borrow)
+ b.Aux = s390xCCMaskToAux(s390x.NoBorrow)
+ return true
+ }
+ // match: (CLGIJ {s390x.Equal} (NEG (Select0 (SUBE (MOVDconst [0]) (MOVDconst [0]) borrow))) [1])
+ // result: (BRC {s390x.Borrow} borrow)
+ for b.Controls[0].Op == OpS390XNEG {
+ v_0 := b.Controls[0]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpSelect0 {
+ break
+ }
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpS390XSUBE {
+ break
+ }
+ borrow := v_0_0_0.Args[2]
+ v_0_0_0_0 := v_0_0_0.Args[0]
+ if v_0_0_0_0.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0_0_1 := v_0_0_0.Args[1]
+ if v_0_0_0_1.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0_1.AuxInt) != 0 || auxIntToUint8(b.AuxInt) != 1 || auxToS390xCCMask(b.Aux) != s390x.Equal {
+ break
+ }
+ b.resetWithControl(BlockS390XBRC, borrow)
+ b.Aux = s390xCCMaskToAux(s390x.Borrow)
+ return true
+ }
+ // match: (CLGIJ {s390x.LessOrGreater} (NEG (Select0 (SUBE (MOVDconst [0]) (MOVDconst [0]) borrow))) [0])
+ // result: (BRC {s390x.Borrow} borrow)
+ for b.Controls[0].Op == OpS390XNEG {
+ v_0 := b.Controls[0]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpSelect0 {
+ break
+ }
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpS390XSUBE {
+ break
+ }
+ borrow := v_0_0_0.Args[2]
+ v_0_0_0_0 := v_0_0_0.Args[0]
+ if v_0_0_0_0.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0_0_1 := v_0_0_0.Args[1]
+ if v_0_0_0_1.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0_1.AuxInt) != 0 || auxIntToUint8(b.AuxInt) != 0 || auxToS390xCCMask(b.Aux) != s390x.LessOrGreater {
+ break
+ }
+ b.resetWithControl(BlockS390XBRC, borrow)
+ b.Aux = s390xCCMaskToAux(s390x.Borrow)
+ return true
+ }
+ // match: (CLGIJ {s390x.LessOrGreater} (NEG (Select0 (SUBE (MOVDconst [0]) (MOVDconst [0]) borrow))) [1])
+ // result: (BRC {s390x.NoBorrow} borrow)
+ for b.Controls[0].Op == OpS390XNEG {
+ v_0 := b.Controls[0]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpSelect0 {
+ break
+ }
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpS390XSUBE {
+ break
+ }
+ borrow := v_0_0_0.Args[2]
+ v_0_0_0_0 := v_0_0_0.Args[0]
+ if v_0_0_0_0.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0_0_1 := v_0_0_0.Args[1]
+ if v_0_0_0_1.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0_1.AuxInt) != 0 || auxIntToUint8(b.AuxInt) != 1 || auxToS390xCCMask(b.Aux) != s390x.LessOrGreater {
+ break
+ }
+ b.resetWithControl(BlockS390XBRC, borrow)
+ b.Aux = s390xCCMaskToAux(s390x.NoBorrow)
+ return true
+ }
+ // match: (CLGIJ {s390x.Greater} (NEG (Select0 (SUBE (MOVDconst [0]) (MOVDconst [0]) borrow))) [0])
+ // result: (BRC {s390x.Borrow} borrow)
+ for b.Controls[0].Op == OpS390XNEG {
+ v_0 := b.Controls[0]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpSelect0 {
+ break
+ }
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpS390XSUBE {
+ break
+ }
+ borrow := v_0_0_0.Args[2]
+ v_0_0_0_0 := v_0_0_0.Args[0]
+ if v_0_0_0_0.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0_0.AuxInt) != 0 {
+ break
+ }
+ v_0_0_0_1 := v_0_0_0.Args[1]
+ if v_0_0_0_1.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0_0_1.AuxInt) != 0 || auxIntToUint8(b.AuxInt) != 0 || auxToS390xCCMask(b.Aux) != s390x.Greater {
+ break
+ }
+ b.resetWithControl(BlockS390XBRC, borrow)
+ b.Aux = s390xCCMaskToAux(s390x.Borrow)
+ return true
+ }
+ case BlockS390XCLGRJ:
+ // match: (CLGRJ {c} x (MOVDconst [y]) yes no)
+ // cond: isU8Bit(y)
+ // result: (CLGIJ {c} x [uint8(y)] yes no)
+ for b.Controls[1].Op == OpS390XMOVDconst {
+ x := b.Controls[0]
+ v_1 := b.Controls[1]
+ y := auxIntToInt64(v_1.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ if !(isU8Bit(y)) {
+ break
+ }
+ b.resetWithControl(BlockS390XCLGIJ, x)
+ b.AuxInt = uint8ToAuxInt(uint8(y))
+ b.Aux = s390xCCMaskToAux(c)
+ return true
+ }
+ // match: (CLGRJ {c} (MOVDconst [x]) y yes no)
+ // cond: isU8Bit(x)
+ // result: (CLGIJ {c.ReverseComparison()} y [uint8(x)] yes no)
+ for b.Controls[0].Op == OpS390XMOVDconst {
+ v_0 := b.Controls[0]
+ x := auxIntToInt64(v_0.AuxInt)
+ y := b.Controls[1]
+ c := auxToS390xCCMask(b.Aux)
+ if !(isU8Bit(x)) {
+ break
+ }
+ b.resetWithControl(BlockS390XCLGIJ, y)
+ b.AuxInt = uint8ToAuxInt(uint8(x))
+ b.Aux = s390xCCMaskToAux(c.ReverseComparison())
+ return true
+ }
+ // match: (CLGRJ {c} x (MOVDconst [y]) yes no)
+ // cond: !isU8Bit(y) && isU32Bit(y)
+ // result: (BRC {c} (CMPUconst x [int32(y)]) yes no)
+ for b.Controls[1].Op == OpS390XMOVDconst {
+ x := b.Controls[0]
+ v_1 := b.Controls[1]
+ y := auxIntToInt64(v_1.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ if !(!isU8Bit(y) && isU32Bit(y)) {
+ break
+ }
+ v0 := b.NewValue0(x.Pos, OpS390XCMPUconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(int32(y))
+ v0.AddArg(x)
+ b.resetWithControl(BlockS390XBRC, v0)
+ b.Aux = s390xCCMaskToAux(c)
+ return true
+ }
+ // match: (CLGRJ {c} (MOVDconst [x]) y yes no)
+ // cond: !isU8Bit(x) && isU32Bit(x)
+ // result: (BRC {c.ReverseComparison()} (CMPUconst y [int32(x)]) yes no)
+ for b.Controls[0].Op == OpS390XMOVDconst {
+ v_0 := b.Controls[0]
+ x := auxIntToInt64(v_0.AuxInt)
+ y := b.Controls[1]
+ c := auxToS390xCCMask(b.Aux)
+ if !(!isU8Bit(x) && isU32Bit(x)) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpS390XCMPUconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(int32(x))
+ v0.AddArg(y)
+ b.resetWithControl(BlockS390XBRC, v0)
+ b.Aux = s390xCCMaskToAux(c.ReverseComparison())
+ return true
+ }
+ // match: (CLGRJ {c} x y yes no)
+ // cond: x == y && c&s390x.Equal != 0
+ // result: (First yes no)
+ for {
+ x := b.Controls[0]
+ y := b.Controls[1]
+ c := auxToS390xCCMask(b.Aux)
+ if !(x == y && c&s390x.Equal != 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (CLGRJ {c} x y yes no)
+ // cond: x == y && c&s390x.Equal == 0
+ // result: (First no yes)
+ for {
+ x := b.Controls[0]
+ y := b.Controls[1]
+ c := auxToS390xCCMask(b.Aux)
+ if !(x == y && c&s390x.Equal == 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ case BlockS390XCLIJ:
+ // match: (CLIJ {s390x.LessOrGreater} (LOCGR {d} (MOVDconst [0]) (MOVDconst [x]) cmp) [0] yes no)
+ // cond: int32(x) != 0
+ // result: (BRC {d} cmp yes no)
+ for b.Controls[0].Op == OpS390XLOCGR {
+ v_0 := b.Controls[0]
+ d := auxToS390xCCMask(v_0.Aux)
+ cmp := v_0.Args[2]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpS390XMOVDconst || auxIntToInt64(v_0_0.AuxInt) != 0 {
+ break
+ }
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpS390XMOVDconst {
+ break
+ }
+ x := auxIntToInt64(v_0_1.AuxInt)
+ if auxIntToUint8(b.AuxInt) != 0 || auxToS390xCCMask(b.Aux) != s390x.LessOrGreater || !(int32(x) != 0) {
+ break
+ }
+ b.resetWithControl(BlockS390XBRC, cmp)
+ b.Aux = s390xCCMaskToAux(d)
+ return true
+ }
+ // match: (CLIJ {c} (MOVWreg x) [y] yes no)
+ // result: (CLIJ {c} x [y] yes no)
+ for b.Controls[0].Op == OpS390XMOVWreg {
+ v_0 := b.Controls[0]
+ x := v_0.Args[0]
+ y := auxIntToUint8(b.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ b.resetWithControl(BlockS390XCLIJ, x)
+ b.AuxInt = uint8ToAuxInt(y)
+ b.Aux = s390xCCMaskToAux(c)
+ return true
+ }
+ // match: (CLIJ {c} (MOVWZreg x) [y] yes no)
+ // result: (CLIJ {c} x [y] yes no)
+ for b.Controls[0].Op == OpS390XMOVWZreg {
+ v_0 := b.Controls[0]
+ x := v_0.Args[0]
+ y := auxIntToUint8(b.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ b.resetWithControl(BlockS390XCLIJ, x)
+ b.AuxInt = uint8ToAuxInt(y)
+ b.Aux = s390xCCMaskToAux(c)
+ return true
+ }
+ // match: (CLIJ {c} (MOVDconst [x]) [y] yes no)
+ // cond: c&s390x.Equal != 0 && uint32(x) == uint32(y)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpS390XMOVDconst {
+ v_0 := b.Controls[0]
+ x := auxIntToInt64(v_0.AuxInt)
+ y := auxIntToUint8(b.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Equal != 0 && uint32(x) == uint32(y)) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (CLIJ {c} (MOVDconst [x]) [y] yes no)
+ // cond: c&s390x.Less != 0 && uint32(x) < uint32(y)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpS390XMOVDconst {
+ v_0 := b.Controls[0]
+ x := auxIntToInt64(v_0.AuxInt)
+ y := auxIntToUint8(b.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Less != 0 && uint32(x) < uint32(y)) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (CLIJ {c} (MOVDconst [x]) [y] yes no)
+ // cond: c&s390x.Greater != 0 && uint32(x) > uint32(y)
+ // result: (First yes no)
+ for b.Controls[0].Op == OpS390XMOVDconst {
+ v_0 := b.Controls[0]
+ x := auxIntToInt64(v_0.AuxInt)
+ y := auxIntToUint8(b.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Greater != 0 && uint32(x) > uint32(y)) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (CLIJ {c} (MOVDconst [x]) [y] yes no)
+ // cond: c&s390x.Equal == 0 && uint32(x) == uint32(y)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpS390XMOVDconst {
+ v_0 := b.Controls[0]
+ x := auxIntToInt64(v_0.AuxInt)
+ y := auxIntToUint8(b.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Equal == 0 && uint32(x) == uint32(y)) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (CLIJ {c} (MOVDconst [x]) [y] yes no)
+ // cond: c&s390x.Less == 0 && uint32(x) < uint32(y)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpS390XMOVDconst {
+ v_0 := b.Controls[0]
+ x := auxIntToInt64(v_0.AuxInt)
+ y := auxIntToUint8(b.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Less == 0 && uint32(x) < uint32(y)) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (CLIJ {c} (MOVDconst [x]) [y] yes no)
+ // cond: c&s390x.Greater == 0 && uint32(x) > uint32(y)
+ // result: (First no yes)
+ for b.Controls[0].Op == OpS390XMOVDconst {
+ v_0 := b.Controls[0]
+ x := auxIntToInt64(v_0.AuxInt)
+ y := auxIntToUint8(b.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ if !(c&s390x.Greater == 0 && uint32(x) > uint32(y)) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (CLIJ {s390x.GreaterOrEqual} _ [0] yes no)
+ // result: (First yes no)
+ for {
+ if auxIntToUint8(b.AuxInt) != 0 || auxToS390xCCMask(b.Aux) != s390x.GreaterOrEqual {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (CLIJ {s390x.Less} _ [0] yes no)
+ // result: (First no yes)
+ for {
+ if auxIntToUint8(b.AuxInt) != 0 || auxToS390xCCMask(b.Aux) != s390x.Less {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ case BlockS390XCLRJ:
+ // match: (CLRJ {c} x (MOVDconst [y]) yes no)
+ // cond: isU8Bit(y)
+ // result: (CLIJ {c} x [uint8(y)] yes no)
+ for b.Controls[1].Op == OpS390XMOVDconst {
+ x := b.Controls[0]
+ v_1 := b.Controls[1]
+ y := auxIntToInt64(v_1.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ if !(isU8Bit(y)) {
+ break
+ }
+ b.resetWithControl(BlockS390XCLIJ, x)
+ b.AuxInt = uint8ToAuxInt(uint8(y))
+ b.Aux = s390xCCMaskToAux(c)
+ return true
+ }
+ // match: (CLRJ {c} (MOVDconst [x]) y yes no)
+ // cond: isU8Bit(x)
+ // result: (CLIJ {c.ReverseComparison()} y [uint8(x)] yes no)
+ for b.Controls[0].Op == OpS390XMOVDconst {
+ v_0 := b.Controls[0]
+ x := auxIntToInt64(v_0.AuxInt)
+ y := b.Controls[1]
+ c := auxToS390xCCMask(b.Aux)
+ if !(isU8Bit(x)) {
+ break
+ }
+ b.resetWithControl(BlockS390XCLIJ, y)
+ b.AuxInt = uint8ToAuxInt(uint8(x))
+ b.Aux = s390xCCMaskToAux(c.ReverseComparison())
+ return true
+ }
+ // match: (CLRJ {c} x (MOVDconst [y]) yes no)
+ // cond: !isU8Bit(y) && isU32Bit(y)
+ // result: (BRC {c} (CMPWUconst x [int32(y)]) yes no)
+ for b.Controls[1].Op == OpS390XMOVDconst {
+ x := b.Controls[0]
+ v_1 := b.Controls[1]
+ y := auxIntToInt64(v_1.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ if !(!isU8Bit(y) && isU32Bit(y)) {
+ break
+ }
+ v0 := b.NewValue0(x.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(int32(y))
+ v0.AddArg(x)
+ b.resetWithControl(BlockS390XBRC, v0)
+ b.Aux = s390xCCMaskToAux(c)
+ return true
+ }
+ // match: (CLRJ {c} (MOVDconst [x]) y yes no)
+ // cond: !isU8Bit(x) && isU32Bit(x)
+ // result: (BRC {c.ReverseComparison()} (CMPWUconst y [int32(x)]) yes no)
+ for b.Controls[0].Op == OpS390XMOVDconst {
+ v_0 := b.Controls[0]
+ x := auxIntToInt64(v_0.AuxInt)
+ y := b.Controls[1]
+ c := auxToS390xCCMask(b.Aux)
+ if !(!isU8Bit(x) && isU32Bit(x)) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpS390XCMPWUconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(int32(x))
+ v0.AddArg(y)
+ b.resetWithControl(BlockS390XBRC, v0)
+ b.Aux = s390xCCMaskToAux(c.ReverseComparison())
+ return true
+ }
+ // match: (CLRJ {c} x y yes no)
+ // cond: x == y && c&s390x.Equal != 0
+ // result: (First yes no)
+ for {
+ x := b.Controls[0]
+ y := b.Controls[1]
+ c := auxToS390xCCMask(b.Aux)
+ if !(x == y && c&s390x.Equal != 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (CLRJ {c} x y yes no)
+ // cond: x == y && c&s390x.Equal == 0
+ // result: (First no yes)
+ for {
+ x := b.Controls[0]
+ y := b.Controls[1]
+ c := auxToS390xCCMask(b.Aux)
+ if !(x == y && c&s390x.Equal == 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ case BlockS390XCRJ:
+ // match: (CRJ {c} x (MOVDconst [y]) yes no)
+ // cond: is8Bit(y)
+ // result: (CIJ {c} x [ int8(y)] yes no)
+ for b.Controls[1].Op == OpS390XMOVDconst {
+ x := b.Controls[0]
+ v_1 := b.Controls[1]
+ y := auxIntToInt64(v_1.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ if !(is8Bit(y)) {
+ break
+ }
+ b.resetWithControl(BlockS390XCIJ, x)
+ b.AuxInt = int8ToAuxInt(int8(y))
+ b.Aux = s390xCCMaskToAux(c)
+ return true
+ }
+ // match: (CRJ {c} (MOVDconst [x]) y yes no)
+ // cond: is8Bit(x)
+ // result: (CIJ {c.ReverseComparison()} y [ int8(x)] yes no)
+ for b.Controls[0].Op == OpS390XMOVDconst {
+ v_0 := b.Controls[0]
+ x := auxIntToInt64(v_0.AuxInt)
+ y := b.Controls[1]
+ c := auxToS390xCCMask(b.Aux)
+ if !(is8Bit(x)) {
+ break
+ }
+ b.resetWithControl(BlockS390XCIJ, y)
+ b.AuxInt = int8ToAuxInt(int8(x))
+ b.Aux = s390xCCMaskToAux(c.ReverseComparison())
+ return true
+ }
+ // match: (CRJ {c} x (MOVDconst [y]) yes no)
+ // cond: !is8Bit(y) && is32Bit(y)
+ // result: (BRC {c} (CMPWconst x [int32(y)]) yes no)
+ for b.Controls[1].Op == OpS390XMOVDconst {
+ x := b.Controls[0]
+ v_1 := b.Controls[1]
+ y := auxIntToInt64(v_1.AuxInt)
+ c := auxToS390xCCMask(b.Aux)
+ if !(!is8Bit(y) && is32Bit(y)) {
+ break
+ }
+ v0 := b.NewValue0(x.Pos, OpS390XCMPWconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(int32(y))
+ v0.AddArg(x)
+ b.resetWithControl(BlockS390XBRC, v0)
+ b.Aux = s390xCCMaskToAux(c)
+ return true
+ }
+ // match: (CRJ {c} (MOVDconst [x]) y yes no)
+ // cond: !is8Bit(x) && is32Bit(x)
+ // result: (BRC {c.ReverseComparison()} (CMPWconst y [int32(x)]) yes no)
+ for b.Controls[0].Op == OpS390XMOVDconst {
+ v_0 := b.Controls[0]
+ x := auxIntToInt64(v_0.AuxInt)
+ y := b.Controls[1]
+ c := auxToS390xCCMask(b.Aux)
+ if !(!is8Bit(x) && is32Bit(x)) {
+ break
+ }
+ v0 := b.NewValue0(v_0.Pos, OpS390XCMPWconst, types.TypeFlags)
+ v0.AuxInt = int32ToAuxInt(int32(x))
+ v0.AddArg(y)
+ b.resetWithControl(BlockS390XBRC, v0)
+ b.Aux = s390xCCMaskToAux(c.ReverseComparison())
+ return true
+ }
+ // match: (CRJ {c} x y yes no)
+ // cond: x == y && c&s390x.Equal != 0
+ // result: (First yes no)
+ for {
+ x := b.Controls[0]
+ y := b.Controls[1]
+ c := auxToS390xCCMask(b.Aux)
+ if !(x == y && c&s390x.Equal != 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (CRJ {c} x y yes no)
+ // cond: x == y && c&s390x.Equal == 0
+ // result: (First no yes)
+ for {
+ x := b.Controls[0]
+ y := b.Controls[1]
+ c := auxToS390xCCMask(b.Aux)
+ if !(x == y && c&s390x.Equal == 0) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ case BlockIf:
+ // match: (If cond yes no)
+ // result: (CLIJ {s390x.LessOrGreater} (MOVBZreg <typ.Bool> cond) [0] yes no)
+ for {
+ cond := b.Controls[0]
+ v0 := b.NewValue0(cond.Pos, OpS390XMOVBZreg, typ.Bool)
+ v0.AddArg(cond)
+ b.resetWithControl(BlockS390XCLIJ, v0)
+ b.AuxInt = uint8ToAuxInt(0)
+ b.Aux = s390xCCMaskToAux(s390x.LessOrGreater)
+ return true
+ }
+ }
+ return false
+}
diff --git a/src/cmd/compile/internal/ssa/rewriteWasm.go b/src/cmd/compile/internal/ssa/rewriteWasm.go
new file mode 100644
index 0000000..defd40d
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/rewriteWasm.go
@@ -0,0 +1,4909 @@
+// Code generated from gen/Wasm.rules; DO NOT EDIT.
+// generated with: cd gen; go run *.go
+
+package ssa
+
+import "internal/buildcfg"
+import "math"
+import "cmd/compile/internal/types"
+
+func rewriteValueWasm(v *Value) bool {
+ switch v.Op {
+ case OpAbs:
+ v.Op = OpWasmF64Abs
+ return true
+ case OpAdd16:
+ v.Op = OpWasmI64Add
+ return true
+ case OpAdd32:
+ v.Op = OpWasmI64Add
+ return true
+ case OpAdd32F:
+ v.Op = OpWasmF32Add
+ return true
+ case OpAdd64:
+ v.Op = OpWasmI64Add
+ return true
+ case OpAdd64F:
+ v.Op = OpWasmF64Add
+ return true
+ case OpAdd8:
+ v.Op = OpWasmI64Add
+ return true
+ case OpAddPtr:
+ v.Op = OpWasmI64Add
+ return true
+ case OpAddr:
+ return rewriteValueWasm_OpAddr(v)
+ case OpAnd16:
+ v.Op = OpWasmI64And
+ return true
+ case OpAnd32:
+ v.Op = OpWasmI64And
+ return true
+ case OpAnd64:
+ v.Op = OpWasmI64And
+ return true
+ case OpAnd8:
+ v.Op = OpWasmI64And
+ return true
+ case OpAndB:
+ v.Op = OpWasmI64And
+ return true
+ case OpBitLen64:
+ return rewriteValueWasm_OpBitLen64(v)
+ case OpCeil:
+ v.Op = OpWasmF64Ceil
+ return true
+ case OpClosureCall:
+ v.Op = OpWasmLoweredClosureCall
+ return true
+ case OpCom16:
+ return rewriteValueWasm_OpCom16(v)
+ case OpCom32:
+ return rewriteValueWasm_OpCom32(v)
+ case OpCom64:
+ return rewriteValueWasm_OpCom64(v)
+ case OpCom8:
+ return rewriteValueWasm_OpCom8(v)
+ case OpCondSelect:
+ v.Op = OpWasmSelect
+ return true
+ case OpConst16:
+ return rewriteValueWasm_OpConst16(v)
+ case OpConst32:
+ return rewriteValueWasm_OpConst32(v)
+ case OpConst32F:
+ v.Op = OpWasmF32Const
+ return true
+ case OpConst64:
+ v.Op = OpWasmI64Const
+ return true
+ case OpConst64F:
+ v.Op = OpWasmF64Const
+ return true
+ case OpConst8:
+ return rewriteValueWasm_OpConst8(v)
+ case OpConstBool:
+ return rewriteValueWasm_OpConstBool(v)
+ case OpConstNil:
+ return rewriteValueWasm_OpConstNil(v)
+ case OpConvert:
+ v.Op = OpWasmLoweredConvert
+ return true
+ case OpCopysign:
+ v.Op = OpWasmF64Copysign
+ return true
+ case OpCtz16:
+ return rewriteValueWasm_OpCtz16(v)
+ case OpCtz16NonZero:
+ v.Op = OpWasmI64Ctz
+ return true
+ case OpCtz32:
+ return rewriteValueWasm_OpCtz32(v)
+ case OpCtz32NonZero:
+ v.Op = OpWasmI64Ctz
+ return true
+ case OpCtz64:
+ v.Op = OpWasmI64Ctz
+ return true
+ case OpCtz64NonZero:
+ v.Op = OpWasmI64Ctz
+ return true
+ case OpCtz8:
+ return rewriteValueWasm_OpCtz8(v)
+ case OpCtz8NonZero:
+ v.Op = OpWasmI64Ctz
+ return true
+ case OpCvt32Fto32:
+ v.Op = OpWasmI64TruncSatF32S
+ return true
+ case OpCvt32Fto32U:
+ v.Op = OpWasmI64TruncSatF32U
+ return true
+ case OpCvt32Fto64:
+ v.Op = OpWasmI64TruncSatF32S
+ return true
+ case OpCvt32Fto64F:
+ v.Op = OpWasmF64PromoteF32
+ return true
+ case OpCvt32Fto64U:
+ v.Op = OpWasmI64TruncSatF32U
+ return true
+ case OpCvt32Uto32F:
+ return rewriteValueWasm_OpCvt32Uto32F(v)
+ case OpCvt32Uto64F:
+ return rewriteValueWasm_OpCvt32Uto64F(v)
+ case OpCvt32to32F:
+ return rewriteValueWasm_OpCvt32to32F(v)
+ case OpCvt32to64F:
+ return rewriteValueWasm_OpCvt32to64F(v)
+ case OpCvt64Fto32:
+ v.Op = OpWasmI64TruncSatF64S
+ return true
+ case OpCvt64Fto32F:
+ v.Op = OpWasmF32DemoteF64
+ return true
+ case OpCvt64Fto32U:
+ v.Op = OpWasmI64TruncSatF64U
+ return true
+ case OpCvt64Fto64:
+ v.Op = OpWasmI64TruncSatF64S
+ return true
+ case OpCvt64Fto64U:
+ v.Op = OpWasmI64TruncSatF64U
+ return true
+ case OpCvt64Uto32F:
+ v.Op = OpWasmF32ConvertI64U
+ return true
+ case OpCvt64Uto64F:
+ v.Op = OpWasmF64ConvertI64U
+ return true
+ case OpCvt64to32F:
+ v.Op = OpWasmF32ConvertI64S
+ return true
+ case OpCvt64to64F:
+ v.Op = OpWasmF64ConvertI64S
+ return true
+ case OpCvtBoolToUint8:
+ v.Op = OpCopy
+ return true
+ case OpDiv16:
+ return rewriteValueWasm_OpDiv16(v)
+ case OpDiv16u:
+ return rewriteValueWasm_OpDiv16u(v)
+ case OpDiv32:
+ return rewriteValueWasm_OpDiv32(v)
+ case OpDiv32F:
+ v.Op = OpWasmF32Div
+ return true
+ case OpDiv32u:
+ return rewriteValueWasm_OpDiv32u(v)
+ case OpDiv64:
+ return rewriteValueWasm_OpDiv64(v)
+ case OpDiv64F:
+ v.Op = OpWasmF64Div
+ return true
+ case OpDiv64u:
+ v.Op = OpWasmI64DivU
+ return true
+ case OpDiv8:
+ return rewriteValueWasm_OpDiv8(v)
+ case OpDiv8u:
+ return rewriteValueWasm_OpDiv8u(v)
+ case OpEq16:
+ return rewriteValueWasm_OpEq16(v)
+ case OpEq32:
+ return rewriteValueWasm_OpEq32(v)
+ case OpEq32F:
+ v.Op = OpWasmF32Eq
+ return true
+ case OpEq64:
+ v.Op = OpWasmI64Eq
+ return true
+ case OpEq64F:
+ v.Op = OpWasmF64Eq
+ return true
+ case OpEq8:
+ return rewriteValueWasm_OpEq8(v)
+ case OpEqB:
+ v.Op = OpWasmI64Eq
+ return true
+ case OpEqPtr:
+ v.Op = OpWasmI64Eq
+ return true
+ case OpFloor:
+ v.Op = OpWasmF64Floor
+ return true
+ case OpGetCallerPC:
+ v.Op = OpWasmLoweredGetCallerPC
+ return true
+ case OpGetCallerSP:
+ v.Op = OpWasmLoweredGetCallerSP
+ return true
+ case OpGetClosurePtr:
+ v.Op = OpWasmLoweredGetClosurePtr
+ return true
+ case OpInterCall:
+ v.Op = OpWasmLoweredInterCall
+ return true
+ case OpIsInBounds:
+ v.Op = OpWasmI64LtU
+ return true
+ case OpIsNonNil:
+ return rewriteValueWasm_OpIsNonNil(v)
+ case OpIsSliceInBounds:
+ v.Op = OpWasmI64LeU
+ return true
+ case OpLeq16:
+ return rewriteValueWasm_OpLeq16(v)
+ case OpLeq16U:
+ return rewriteValueWasm_OpLeq16U(v)
+ case OpLeq32:
+ return rewriteValueWasm_OpLeq32(v)
+ case OpLeq32F:
+ v.Op = OpWasmF32Le
+ return true
+ case OpLeq32U:
+ return rewriteValueWasm_OpLeq32U(v)
+ case OpLeq64:
+ v.Op = OpWasmI64LeS
+ return true
+ case OpLeq64F:
+ v.Op = OpWasmF64Le
+ return true
+ case OpLeq64U:
+ v.Op = OpWasmI64LeU
+ return true
+ case OpLeq8:
+ return rewriteValueWasm_OpLeq8(v)
+ case OpLeq8U:
+ return rewriteValueWasm_OpLeq8U(v)
+ case OpLess16:
+ return rewriteValueWasm_OpLess16(v)
+ case OpLess16U:
+ return rewriteValueWasm_OpLess16U(v)
+ case OpLess32:
+ return rewriteValueWasm_OpLess32(v)
+ case OpLess32F:
+ v.Op = OpWasmF32Lt
+ return true
+ case OpLess32U:
+ return rewriteValueWasm_OpLess32U(v)
+ case OpLess64:
+ v.Op = OpWasmI64LtS
+ return true
+ case OpLess64F:
+ v.Op = OpWasmF64Lt
+ return true
+ case OpLess64U:
+ v.Op = OpWasmI64LtU
+ return true
+ case OpLess8:
+ return rewriteValueWasm_OpLess8(v)
+ case OpLess8U:
+ return rewriteValueWasm_OpLess8U(v)
+ case OpLoad:
+ return rewriteValueWasm_OpLoad(v)
+ case OpLocalAddr:
+ return rewriteValueWasm_OpLocalAddr(v)
+ case OpLsh16x16:
+ return rewriteValueWasm_OpLsh16x16(v)
+ case OpLsh16x32:
+ return rewriteValueWasm_OpLsh16x32(v)
+ case OpLsh16x64:
+ v.Op = OpLsh64x64
+ return true
+ case OpLsh16x8:
+ return rewriteValueWasm_OpLsh16x8(v)
+ case OpLsh32x16:
+ return rewriteValueWasm_OpLsh32x16(v)
+ case OpLsh32x32:
+ return rewriteValueWasm_OpLsh32x32(v)
+ case OpLsh32x64:
+ v.Op = OpLsh64x64
+ return true
+ case OpLsh32x8:
+ return rewriteValueWasm_OpLsh32x8(v)
+ case OpLsh64x16:
+ return rewriteValueWasm_OpLsh64x16(v)
+ case OpLsh64x32:
+ return rewriteValueWasm_OpLsh64x32(v)
+ case OpLsh64x64:
+ return rewriteValueWasm_OpLsh64x64(v)
+ case OpLsh64x8:
+ return rewriteValueWasm_OpLsh64x8(v)
+ case OpLsh8x16:
+ return rewriteValueWasm_OpLsh8x16(v)
+ case OpLsh8x32:
+ return rewriteValueWasm_OpLsh8x32(v)
+ case OpLsh8x64:
+ v.Op = OpLsh64x64
+ return true
+ case OpLsh8x8:
+ return rewriteValueWasm_OpLsh8x8(v)
+ case OpMod16:
+ return rewriteValueWasm_OpMod16(v)
+ case OpMod16u:
+ return rewriteValueWasm_OpMod16u(v)
+ case OpMod32:
+ return rewriteValueWasm_OpMod32(v)
+ case OpMod32u:
+ return rewriteValueWasm_OpMod32u(v)
+ case OpMod64:
+ return rewriteValueWasm_OpMod64(v)
+ case OpMod64u:
+ v.Op = OpWasmI64RemU
+ return true
+ case OpMod8:
+ return rewriteValueWasm_OpMod8(v)
+ case OpMod8u:
+ return rewriteValueWasm_OpMod8u(v)
+ case OpMove:
+ return rewriteValueWasm_OpMove(v)
+ case OpMul16:
+ v.Op = OpWasmI64Mul
+ return true
+ case OpMul32:
+ v.Op = OpWasmI64Mul
+ return true
+ case OpMul32F:
+ v.Op = OpWasmF32Mul
+ return true
+ case OpMul64:
+ v.Op = OpWasmI64Mul
+ return true
+ case OpMul64F:
+ v.Op = OpWasmF64Mul
+ return true
+ case OpMul8:
+ v.Op = OpWasmI64Mul
+ return true
+ case OpNeg16:
+ return rewriteValueWasm_OpNeg16(v)
+ case OpNeg32:
+ return rewriteValueWasm_OpNeg32(v)
+ case OpNeg32F:
+ v.Op = OpWasmF32Neg
+ return true
+ case OpNeg64:
+ return rewriteValueWasm_OpNeg64(v)
+ case OpNeg64F:
+ v.Op = OpWasmF64Neg
+ return true
+ case OpNeg8:
+ return rewriteValueWasm_OpNeg8(v)
+ case OpNeq16:
+ return rewriteValueWasm_OpNeq16(v)
+ case OpNeq32:
+ return rewriteValueWasm_OpNeq32(v)
+ case OpNeq32F:
+ v.Op = OpWasmF32Ne
+ return true
+ case OpNeq64:
+ v.Op = OpWasmI64Ne
+ return true
+ case OpNeq64F:
+ v.Op = OpWasmF64Ne
+ return true
+ case OpNeq8:
+ return rewriteValueWasm_OpNeq8(v)
+ case OpNeqB:
+ v.Op = OpWasmI64Ne
+ return true
+ case OpNeqPtr:
+ v.Op = OpWasmI64Ne
+ return true
+ case OpNilCheck:
+ v.Op = OpWasmLoweredNilCheck
+ return true
+ case OpNot:
+ v.Op = OpWasmI64Eqz
+ return true
+ case OpOffPtr:
+ v.Op = OpWasmI64AddConst
+ return true
+ case OpOr16:
+ v.Op = OpWasmI64Or
+ return true
+ case OpOr32:
+ v.Op = OpWasmI64Or
+ return true
+ case OpOr64:
+ v.Op = OpWasmI64Or
+ return true
+ case OpOr8:
+ v.Op = OpWasmI64Or
+ return true
+ case OpOrB:
+ v.Op = OpWasmI64Or
+ return true
+ case OpPopCount16:
+ return rewriteValueWasm_OpPopCount16(v)
+ case OpPopCount32:
+ return rewriteValueWasm_OpPopCount32(v)
+ case OpPopCount64:
+ v.Op = OpWasmI64Popcnt
+ return true
+ case OpPopCount8:
+ return rewriteValueWasm_OpPopCount8(v)
+ case OpRotateLeft16:
+ return rewriteValueWasm_OpRotateLeft16(v)
+ case OpRotateLeft32:
+ v.Op = OpWasmI32Rotl
+ return true
+ case OpRotateLeft64:
+ v.Op = OpWasmI64Rotl
+ return true
+ case OpRotateLeft8:
+ return rewriteValueWasm_OpRotateLeft8(v)
+ case OpRound32F:
+ v.Op = OpCopy
+ return true
+ case OpRound64F:
+ v.Op = OpCopy
+ return true
+ case OpRoundToEven:
+ v.Op = OpWasmF64Nearest
+ return true
+ case OpRsh16Ux16:
+ return rewriteValueWasm_OpRsh16Ux16(v)
+ case OpRsh16Ux32:
+ return rewriteValueWasm_OpRsh16Ux32(v)
+ case OpRsh16Ux64:
+ return rewriteValueWasm_OpRsh16Ux64(v)
+ case OpRsh16Ux8:
+ return rewriteValueWasm_OpRsh16Ux8(v)
+ case OpRsh16x16:
+ return rewriteValueWasm_OpRsh16x16(v)
+ case OpRsh16x32:
+ return rewriteValueWasm_OpRsh16x32(v)
+ case OpRsh16x64:
+ return rewriteValueWasm_OpRsh16x64(v)
+ case OpRsh16x8:
+ return rewriteValueWasm_OpRsh16x8(v)
+ case OpRsh32Ux16:
+ return rewriteValueWasm_OpRsh32Ux16(v)
+ case OpRsh32Ux32:
+ return rewriteValueWasm_OpRsh32Ux32(v)
+ case OpRsh32Ux64:
+ return rewriteValueWasm_OpRsh32Ux64(v)
+ case OpRsh32Ux8:
+ return rewriteValueWasm_OpRsh32Ux8(v)
+ case OpRsh32x16:
+ return rewriteValueWasm_OpRsh32x16(v)
+ case OpRsh32x32:
+ return rewriteValueWasm_OpRsh32x32(v)
+ case OpRsh32x64:
+ return rewriteValueWasm_OpRsh32x64(v)
+ case OpRsh32x8:
+ return rewriteValueWasm_OpRsh32x8(v)
+ case OpRsh64Ux16:
+ return rewriteValueWasm_OpRsh64Ux16(v)
+ case OpRsh64Ux32:
+ return rewriteValueWasm_OpRsh64Ux32(v)
+ case OpRsh64Ux64:
+ return rewriteValueWasm_OpRsh64Ux64(v)
+ case OpRsh64Ux8:
+ return rewriteValueWasm_OpRsh64Ux8(v)
+ case OpRsh64x16:
+ return rewriteValueWasm_OpRsh64x16(v)
+ case OpRsh64x32:
+ return rewriteValueWasm_OpRsh64x32(v)
+ case OpRsh64x64:
+ return rewriteValueWasm_OpRsh64x64(v)
+ case OpRsh64x8:
+ return rewriteValueWasm_OpRsh64x8(v)
+ case OpRsh8Ux16:
+ return rewriteValueWasm_OpRsh8Ux16(v)
+ case OpRsh8Ux32:
+ return rewriteValueWasm_OpRsh8Ux32(v)
+ case OpRsh8Ux64:
+ return rewriteValueWasm_OpRsh8Ux64(v)
+ case OpRsh8Ux8:
+ return rewriteValueWasm_OpRsh8Ux8(v)
+ case OpRsh8x16:
+ return rewriteValueWasm_OpRsh8x16(v)
+ case OpRsh8x32:
+ return rewriteValueWasm_OpRsh8x32(v)
+ case OpRsh8x64:
+ return rewriteValueWasm_OpRsh8x64(v)
+ case OpRsh8x8:
+ return rewriteValueWasm_OpRsh8x8(v)
+ case OpSignExt16to32:
+ return rewriteValueWasm_OpSignExt16to32(v)
+ case OpSignExt16to64:
+ return rewriteValueWasm_OpSignExt16to64(v)
+ case OpSignExt32to64:
+ return rewriteValueWasm_OpSignExt32to64(v)
+ case OpSignExt8to16:
+ return rewriteValueWasm_OpSignExt8to16(v)
+ case OpSignExt8to32:
+ return rewriteValueWasm_OpSignExt8to32(v)
+ case OpSignExt8to64:
+ return rewriteValueWasm_OpSignExt8to64(v)
+ case OpSlicemask:
+ return rewriteValueWasm_OpSlicemask(v)
+ case OpSqrt:
+ v.Op = OpWasmF64Sqrt
+ return true
+ case OpSqrt32:
+ v.Op = OpWasmF32Sqrt
+ return true
+ case OpStaticCall:
+ v.Op = OpWasmLoweredStaticCall
+ return true
+ case OpStore:
+ return rewriteValueWasm_OpStore(v)
+ case OpSub16:
+ v.Op = OpWasmI64Sub
+ return true
+ case OpSub32:
+ v.Op = OpWasmI64Sub
+ return true
+ case OpSub32F:
+ v.Op = OpWasmF32Sub
+ return true
+ case OpSub64:
+ v.Op = OpWasmI64Sub
+ return true
+ case OpSub64F:
+ v.Op = OpWasmF64Sub
+ return true
+ case OpSub8:
+ v.Op = OpWasmI64Sub
+ return true
+ case OpSubPtr:
+ v.Op = OpWasmI64Sub
+ return true
+ case OpTailCall:
+ v.Op = OpWasmLoweredTailCall
+ return true
+ case OpTrunc:
+ v.Op = OpWasmF64Trunc
+ return true
+ case OpTrunc16to8:
+ v.Op = OpCopy
+ return true
+ case OpTrunc32to16:
+ v.Op = OpCopy
+ return true
+ case OpTrunc32to8:
+ v.Op = OpCopy
+ return true
+ case OpTrunc64to16:
+ v.Op = OpCopy
+ return true
+ case OpTrunc64to32:
+ v.Op = OpCopy
+ return true
+ case OpTrunc64to8:
+ v.Op = OpCopy
+ return true
+ case OpWB:
+ v.Op = OpWasmLoweredWB
+ return true
+ case OpWasmF64Add:
+ return rewriteValueWasm_OpWasmF64Add(v)
+ case OpWasmF64Mul:
+ return rewriteValueWasm_OpWasmF64Mul(v)
+ case OpWasmI64Add:
+ return rewriteValueWasm_OpWasmI64Add(v)
+ case OpWasmI64AddConst:
+ return rewriteValueWasm_OpWasmI64AddConst(v)
+ case OpWasmI64And:
+ return rewriteValueWasm_OpWasmI64And(v)
+ case OpWasmI64Eq:
+ return rewriteValueWasm_OpWasmI64Eq(v)
+ case OpWasmI64Eqz:
+ return rewriteValueWasm_OpWasmI64Eqz(v)
+ case OpWasmI64LeU:
+ return rewriteValueWasm_OpWasmI64LeU(v)
+ case OpWasmI64Load:
+ return rewriteValueWasm_OpWasmI64Load(v)
+ case OpWasmI64Load16S:
+ return rewriteValueWasm_OpWasmI64Load16S(v)
+ case OpWasmI64Load16U:
+ return rewriteValueWasm_OpWasmI64Load16U(v)
+ case OpWasmI64Load32S:
+ return rewriteValueWasm_OpWasmI64Load32S(v)
+ case OpWasmI64Load32U:
+ return rewriteValueWasm_OpWasmI64Load32U(v)
+ case OpWasmI64Load8S:
+ return rewriteValueWasm_OpWasmI64Load8S(v)
+ case OpWasmI64Load8U:
+ return rewriteValueWasm_OpWasmI64Load8U(v)
+ case OpWasmI64LtU:
+ return rewriteValueWasm_OpWasmI64LtU(v)
+ case OpWasmI64Mul:
+ return rewriteValueWasm_OpWasmI64Mul(v)
+ case OpWasmI64Ne:
+ return rewriteValueWasm_OpWasmI64Ne(v)
+ case OpWasmI64Or:
+ return rewriteValueWasm_OpWasmI64Or(v)
+ case OpWasmI64Shl:
+ return rewriteValueWasm_OpWasmI64Shl(v)
+ case OpWasmI64ShrS:
+ return rewriteValueWasm_OpWasmI64ShrS(v)
+ case OpWasmI64ShrU:
+ return rewriteValueWasm_OpWasmI64ShrU(v)
+ case OpWasmI64Store:
+ return rewriteValueWasm_OpWasmI64Store(v)
+ case OpWasmI64Store16:
+ return rewriteValueWasm_OpWasmI64Store16(v)
+ case OpWasmI64Store32:
+ return rewriteValueWasm_OpWasmI64Store32(v)
+ case OpWasmI64Store8:
+ return rewriteValueWasm_OpWasmI64Store8(v)
+ case OpWasmI64Xor:
+ return rewriteValueWasm_OpWasmI64Xor(v)
+ case OpXor16:
+ v.Op = OpWasmI64Xor
+ return true
+ case OpXor32:
+ v.Op = OpWasmI64Xor
+ return true
+ case OpXor64:
+ v.Op = OpWasmI64Xor
+ return true
+ case OpXor8:
+ v.Op = OpWasmI64Xor
+ return true
+ case OpZero:
+ return rewriteValueWasm_OpZero(v)
+ case OpZeroExt16to32:
+ return rewriteValueWasm_OpZeroExt16to32(v)
+ case OpZeroExt16to64:
+ return rewriteValueWasm_OpZeroExt16to64(v)
+ case OpZeroExt32to64:
+ return rewriteValueWasm_OpZeroExt32to64(v)
+ case OpZeroExt8to16:
+ return rewriteValueWasm_OpZeroExt8to16(v)
+ case OpZeroExt8to32:
+ return rewriteValueWasm_OpZeroExt8to32(v)
+ case OpZeroExt8to64:
+ return rewriteValueWasm_OpZeroExt8to64(v)
+ }
+ return false
+}
+func rewriteValueWasm_OpAddr(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Addr {sym} base)
+ // result: (LoweredAddr {sym} [0] base)
+ for {
+ sym := auxToSym(v.Aux)
+ base := v_0
+ v.reset(OpWasmLoweredAddr)
+ v.AuxInt = int32ToAuxInt(0)
+ v.Aux = symToAux(sym)
+ v.AddArg(base)
+ return true
+ }
+}
+func rewriteValueWasm_OpBitLen64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (BitLen64 x)
+ // result: (I64Sub (I64Const [64]) (I64Clz x))
+ for {
+ x := v_0
+ v.reset(OpWasmI64Sub)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(64)
+ v1 := b.NewValue0(v.Pos, OpWasmI64Clz, typ.Int64)
+ v1.AddArg(x)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpCom16(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Com16 x)
+ // result: (I64Xor x (I64Const [-1]))
+ for {
+ x := v_0
+ v.reset(OpWasmI64Xor)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(-1)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpCom32(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Com32 x)
+ // result: (I64Xor x (I64Const [-1]))
+ for {
+ x := v_0
+ v.reset(OpWasmI64Xor)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(-1)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpCom64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Com64 x)
+ // result: (I64Xor x (I64Const [-1]))
+ for {
+ x := v_0
+ v.reset(OpWasmI64Xor)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(-1)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpCom8(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Com8 x)
+ // result: (I64Xor x (I64Const [-1]))
+ for {
+ x := v_0
+ v.reset(OpWasmI64Xor)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(-1)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpConst16(v *Value) bool {
+ // match: (Const16 [c])
+ // result: (I64Const [int64(c)])
+ for {
+ c := auxIntToInt16(v.AuxInt)
+ v.reset(OpWasmI64Const)
+ v.AuxInt = int64ToAuxInt(int64(c))
+ return true
+ }
+}
+func rewriteValueWasm_OpConst32(v *Value) bool {
+ // match: (Const32 [c])
+ // result: (I64Const [int64(c)])
+ for {
+ c := auxIntToInt32(v.AuxInt)
+ v.reset(OpWasmI64Const)
+ v.AuxInt = int64ToAuxInt(int64(c))
+ return true
+ }
+}
+func rewriteValueWasm_OpConst8(v *Value) bool {
+ // match: (Const8 [c])
+ // result: (I64Const [int64(c)])
+ for {
+ c := auxIntToInt8(v.AuxInt)
+ v.reset(OpWasmI64Const)
+ v.AuxInt = int64ToAuxInt(int64(c))
+ return true
+ }
+}
+func rewriteValueWasm_OpConstBool(v *Value) bool {
+ // match: (ConstBool [c])
+ // result: (I64Const [b2i(c)])
+ for {
+ c := auxIntToBool(v.AuxInt)
+ v.reset(OpWasmI64Const)
+ v.AuxInt = int64ToAuxInt(b2i(c))
+ return true
+ }
+}
+func rewriteValueWasm_OpConstNil(v *Value) bool {
+ // match: (ConstNil)
+ // result: (I64Const [0])
+ for {
+ v.reset(OpWasmI64Const)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+}
+func rewriteValueWasm_OpCtz16(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Ctz16 x)
+ // result: (I64Ctz (I64Or x (I64Const [0x10000])))
+ for {
+ x := v_0
+ v.reset(OpWasmI64Ctz)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Or, typ.Int64)
+ v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(0x10000)
+ v0.AddArg2(x, v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpCtz32(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Ctz32 x)
+ // result: (I64Ctz (I64Or x (I64Const [0x100000000])))
+ for {
+ x := v_0
+ v.reset(OpWasmI64Ctz)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Or, typ.Int64)
+ v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(0x100000000)
+ v0.AddArg2(x, v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpCtz8(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Ctz8 x)
+ // result: (I64Ctz (I64Or x (I64Const [0x100])))
+ for {
+ x := v_0
+ v.reset(OpWasmI64Ctz)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Or, typ.Int64)
+ v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(0x100)
+ v0.AddArg2(x, v1)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpCvt32Uto32F(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Cvt32Uto32F x)
+ // result: (F32ConvertI64U (ZeroExt32to64 x))
+ for {
+ x := v_0
+ v.reset(OpWasmF32ConvertI64U)
+ v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpCvt32Uto64F(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Cvt32Uto64F x)
+ // result: (F64ConvertI64U (ZeroExt32to64 x))
+ for {
+ x := v_0
+ v.reset(OpWasmF64ConvertI64U)
+ v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpCvt32to32F(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Cvt32to32F x)
+ // result: (F32ConvertI64S (SignExt32to64 x))
+ for {
+ x := v_0
+ v.reset(OpWasmF32ConvertI64S)
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpCvt32to64F(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Cvt32to64F x)
+ // result: (F64ConvertI64S (SignExt32to64 x))
+ for {
+ x := v_0
+ v.reset(OpWasmF64ConvertI64S)
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpDiv16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div16 [false] x y)
+ // result: (I64DivS (SignExt16to64 x) (SignExt16to64 y))
+ for {
+ if auxIntToBool(v.AuxInt) != false {
+ break
+ }
+ x := v_0
+ y := v_1
+ v.reset(OpWasmI64DivS)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpDiv16u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div16u x y)
+ // result: (I64DivU (ZeroExt16to64 x) (ZeroExt16to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpWasmI64DivU)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpDiv32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div32 [false] x y)
+ // result: (I64DivS (SignExt32to64 x) (SignExt32to64 y))
+ for {
+ if auxIntToBool(v.AuxInt) != false {
+ break
+ }
+ x := v_0
+ y := v_1
+ v.reset(OpWasmI64DivS)
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpDiv32u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div32u x y)
+ // result: (I64DivU (ZeroExt32to64 x) (ZeroExt32to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpWasmI64DivU)
+ v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpDiv64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Div64 [false] x y)
+ // result: (I64DivS x y)
+ for {
+ if auxIntToBool(v.AuxInt) != false {
+ break
+ }
+ x := v_0
+ y := v_1
+ v.reset(OpWasmI64DivS)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpDiv8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div8 x y)
+ // result: (I64DivS (SignExt8to64 x) (SignExt8to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpWasmI64DivS)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpDiv8u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div8u x y)
+ // result: (I64DivU (ZeroExt8to64 x) (ZeroExt8to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpWasmI64DivU)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpEq16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Eq16 x y)
+ // result: (I64Eq (ZeroExt16to64 x) (ZeroExt16to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpWasmI64Eq)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpEq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Eq32 x y)
+ // result: (I64Eq (ZeroExt32to64 x) (ZeroExt32to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpWasmI64Eq)
+ v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpEq8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Eq8 x y)
+ // result: (I64Eq (ZeroExt8to64 x) (ZeroExt8to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpWasmI64Eq)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpIsNonNil(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (IsNonNil p)
+ // result: (I64Eqz (I64Eqz p))
+ for {
+ p := v_0
+ v.reset(OpWasmI64Eqz)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Eqz, typ.Bool)
+ v0.AddArg(p)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpLeq16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq16 x y)
+ // result: (I64LeS (SignExt16to64 x) (SignExt16to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpWasmI64LeS)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpLeq16U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq16U x y)
+ // result: (I64LeU (ZeroExt16to64 x) (ZeroExt16to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpWasmI64LeU)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpLeq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq32 x y)
+ // result: (I64LeS (SignExt32to64 x) (SignExt32to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpWasmI64LeS)
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpLeq32U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq32U x y)
+ // result: (I64LeU (ZeroExt32to64 x) (ZeroExt32to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpWasmI64LeU)
+ v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpLeq8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq8 x y)
+ // result: (I64LeS (SignExt8to64 x) (SignExt8to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpWasmI64LeS)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpLeq8U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq8U x y)
+ // result: (I64LeU (ZeroExt8to64 x) (ZeroExt8to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpWasmI64LeU)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpLess16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less16 x y)
+ // result: (I64LtS (SignExt16to64 x) (SignExt16to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpWasmI64LtS)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpLess16U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less16U x y)
+ // result: (I64LtU (ZeroExt16to64 x) (ZeroExt16to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpWasmI64LtU)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpLess32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less32 x y)
+ // result: (I64LtS (SignExt32to64 x) (SignExt32to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpWasmI64LtS)
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpLess32U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less32U x y)
+ // result: (I64LtU (ZeroExt32to64 x) (ZeroExt32to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpWasmI64LtU)
+ v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpLess8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less8 x y)
+ // result: (I64LtS (SignExt8to64 x) (SignExt8to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpWasmI64LtS)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpLess8U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less8U x y)
+ // result: (I64LtU (ZeroExt8to64 x) (ZeroExt8to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpWasmI64LtU)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpLoad(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Load <t> ptr mem)
+ // cond: is32BitFloat(t)
+ // result: (F32Load ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is32BitFloat(t)) {
+ break
+ }
+ v.reset(OpWasmF32Load)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is64BitFloat(t)
+ // result: (F64Load ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is64BitFloat(t)) {
+ break
+ }
+ v.reset(OpWasmF64Load)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: t.Size() == 8
+ // result: (I64Load ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(t.Size() == 8) {
+ break
+ }
+ v.reset(OpWasmI64Load)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: t.Size() == 4 && !t.IsSigned()
+ // result: (I64Load32U ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(t.Size() == 4 && !t.IsSigned()) {
+ break
+ }
+ v.reset(OpWasmI64Load32U)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: t.Size() == 4 && t.IsSigned()
+ // result: (I64Load32S ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(t.Size() == 4 && t.IsSigned()) {
+ break
+ }
+ v.reset(OpWasmI64Load32S)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: t.Size() == 2 && !t.IsSigned()
+ // result: (I64Load16U ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(t.Size() == 2 && !t.IsSigned()) {
+ break
+ }
+ v.reset(OpWasmI64Load16U)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: t.Size() == 2 && t.IsSigned()
+ // result: (I64Load16S ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(t.Size() == 2 && t.IsSigned()) {
+ break
+ }
+ v.reset(OpWasmI64Load16S)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: t.Size() == 1 && !t.IsSigned()
+ // result: (I64Load8U ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(t.Size() == 1 && !t.IsSigned()) {
+ break
+ }
+ v.reset(OpWasmI64Load8U)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: t.Size() == 1 && t.IsSigned()
+ // result: (I64Load8S ptr mem)
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(t.Size() == 1 && t.IsSigned()) {
+ break
+ }
+ v.reset(OpWasmI64Load8S)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpLocalAddr(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (LocalAddr {sym} base _)
+ // result: (LoweredAddr {sym} base)
+ for {
+ sym := auxToSym(v.Aux)
+ base := v_0
+ v.reset(OpWasmLoweredAddr)
+ v.Aux = symToAux(sym)
+ v.AddArg(base)
+ return true
+ }
+}
+func rewriteValueWasm_OpLsh16x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh16x16 [c] x y)
+ // result: (Lsh64x64 [c] x (ZeroExt16to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpLsh64x64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpLsh16x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh16x32 [c] x y)
+ // result: (Lsh64x64 [c] x (ZeroExt32to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpLsh64x64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpLsh16x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh16x8 [c] x y)
+ // result: (Lsh64x64 [c] x (ZeroExt8to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpLsh64x64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpLsh32x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh32x16 [c] x y)
+ // result: (Lsh64x64 [c] x (ZeroExt16to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpLsh64x64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpLsh32x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh32x32 [c] x y)
+ // result: (Lsh64x64 [c] x (ZeroExt32to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpLsh64x64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpLsh32x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh32x8 [c] x y)
+ // result: (Lsh64x64 [c] x (ZeroExt8to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpLsh64x64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpLsh64x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh64x16 [c] x y)
+ // result: (Lsh64x64 [c] x (ZeroExt16to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpLsh64x64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpLsh64x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh64x32 [c] x y)
+ // result: (Lsh64x64 [c] x (ZeroExt32to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpLsh64x64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpLsh64x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh64x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (I64Shl x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpWasmI64Shl)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Lsh64x64 x (I64Const [c]))
+ // cond: uint64(c) < 64
+ // result: (I64Shl x (I64Const [c]))
+ for {
+ x := v_0
+ if v_1.Op != OpWasmI64Const {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) < 64) {
+ break
+ }
+ v.reset(OpWasmI64Shl)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Lsh64x64 x (I64Const [c]))
+ // cond: uint64(c) >= 64
+ // result: (I64Const [0])
+ for {
+ if v_1.Op != OpWasmI64Const {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 64) {
+ break
+ }
+ v.reset(OpWasmI64Const)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (Lsh64x64 x y)
+ // result: (Select (I64Shl x y) (I64Const [0]) (I64LtU y (I64Const [64])))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpWasmSelect)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Shl, typ.Int64)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpWasmI64LtU, typ.Bool)
+ v3 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v3.AuxInt = int64ToAuxInt(64)
+ v2.AddArg2(y, v3)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueWasm_OpLsh64x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh64x8 [c] x y)
+ // result: (Lsh64x64 [c] x (ZeroExt8to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpLsh64x64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpLsh8x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh8x16 [c] x y)
+ // result: (Lsh64x64 [c] x (ZeroExt16to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpLsh64x64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpLsh8x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh8x32 [c] x y)
+ // result: (Lsh64x64 [c] x (ZeroExt32to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpLsh64x64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpLsh8x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh8x8 [c] x y)
+ // result: (Lsh64x64 [c] x (ZeroExt8to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpLsh64x64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpMod16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod16 [false] x y)
+ // result: (I64RemS (SignExt16to64 x) (SignExt16to64 y))
+ for {
+ if auxIntToBool(v.AuxInt) != false {
+ break
+ }
+ x := v_0
+ y := v_1
+ v.reset(OpWasmI64RemS)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpMod16u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod16u x y)
+ // result: (I64RemU (ZeroExt16to64 x) (ZeroExt16to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpWasmI64RemU)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpMod32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod32 [false] x y)
+ // result: (I64RemS (SignExt32to64 x) (SignExt32to64 y))
+ for {
+ if auxIntToBool(v.AuxInt) != false {
+ break
+ }
+ x := v_0
+ y := v_1
+ v.reset(OpWasmI64RemS)
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpMod32u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod32u x y)
+ // result: (I64RemU (ZeroExt32to64 x) (ZeroExt32to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpWasmI64RemU)
+ v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpMod64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Mod64 [false] x y)
+ // result: (I64RemS x y)
+ for {
+ if auxIntToBool(v.AuxInt) != false {
+ break
+ }
+ x := v_0
+ y := v_1
+ v.reset(OpWasmI64RemS)
+ v.AddArg2(x, y)
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpMod8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod8 x y)
+ // result: (I64RemS (SignExt8to64 x) (SignExt8to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpWasmI64RemS)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpMod8u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mod8u x y)
+ // result: (I64RemU (ZeroExt8to64 x) (ZeroExt8to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpWasmI64RemU)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpMove(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Move [0] _ _ mem)
+ // result: mem
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ mem := v_2
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Move [1] dst src mem)
+ // result: (I64Store8 dst (I64Load8U src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 1 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpWasmI64Store8)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Load8U, typ.UInt8)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [2] dst src mem)
+ // result: (I64Store16 dst (I64Load16U src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 2 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpWasmI64Store16)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Load16U, typ.UInt16)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [4] dst src mem)
+ // result: (I64Store32 dst (I64Load32U src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpWasmI64Store32)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Load32U, typ.UInt32)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [8] dst src mem)
+ // result: (I64Store dst (I64Load src mem) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpWasmI64Store)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Load, typ.UInt64)
+ v0.AddArg2(src, mem)
+ v.AddArg3(dst, v0, mem)
+ return true
+ }
+ // match: (Move [16] dst src mem)
+ // result: (I64Store [8] dst (I64Load [8] src mem) (I64Store dst (I64Load src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 16 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpWasmI64Store)
+ v.AuxInt = int64ToAuxInt(8)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Load, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(8)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpWasmI64Store, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpWasmI64Load, typ.UInt64)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [3] dst src mem)
+ // result: (I64Store8 [2] dst (I64Load8U [2] src mem) (I64Store16 dst (I64Load16U src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 3 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpWasmI64Store8)
+ v.AuxInt = int64ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Load8U, typ.UInt8)
+ v0.AuxInt = int64ToAuxInt(2)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpWasmI64Store16, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpWasmI64Load16U, typ.UInt16)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [5] dst src mem)
+ // result: (I64Store8 [4] dst (I64Load8U [4] src mem) (I64Store32 dst (I64Load32U src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 5 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpWasmI64Store8)
+ v.AuxInt = int64ToAuxInt(4)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Load8U, typ.UInt8)
+ v0.AuxInt = int64ToAuxInt(4)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpWasmI64Store32, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpWasmI64Load32U, typ.UInt32)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [6] dst src mem)
+ // result: (I64Store16 [4] dst (I64Load16U [4] src mem) (I64Store32 dst (I64Load32U src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 6 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpWasmI64Store16)
+ v.AuxInt = int64ToAuxInt(4)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Load16U, typ.UInt16)
+ v0.AuxInt = int64ToAuxInt(4)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpWasmI64Store32, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpWasmI64Load32U, typ.UInt32)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [7] dst src mem)
+ // result: (I64Store32 [3] dst (I64Load32U [3] src mem) (I64Store32 dst (I64Load32U src mem) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 7 {
+ break
+ }
+ dst := v_0
+ src := v_1
+ mem := v_2
+ v.reset(OpWasmI64Store32)
+ v.AuxInt = int64ToAuxInt(3)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Load32U, typ.UInt32)
+ v0.AuxInt = int64ToAuxInt(3)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpWasmI64Store32, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpWasmI64Load32U, typ.UInt32)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: s > 8 && s < 16
+ // result: (I64Store [s-8] dst (I64Load [s-8] src mem) (I64Store dst (I64Load src mem) mem))
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(s > 8 && s < 16) {
+ break
+ }
+ v.reset(OpWasmI64Store)
+ v.AuxInt = int64ToAuxInt(s - 8)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Load, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(s - 8)
+ v0.AddArg2(src, mem)
+ v1 := b.NewValue0(v.Pos, OpWasmI64Store, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpWasmI64Load, typ.UInt64)
+ v2.AddArg2(src, mem)
+ v1.AddArg3(dst, v2, mem)
+ v.AddArg3(dst, v0, v1)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: s > 16 && s%16 != 0 && s%16 <= 8
+ // result: (Move [s-s%16] (OffPtr <dst.Type> dst [s%16]) (OffPtr <src.Type> src [s%16]) (I64Store dst (I64Load src mem) mem))
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(s > 16 && s%16 != 0 && s%16 <= 8) {
+ break
+ }
+ v.reset(OpMove)
+ v.AuxInt = int64ToAuxInt(s - s%16)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
+ v0.AuxInt = int64ToAuxInt(s % 16)
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
+ v1.AuxInt = int64ToAuxInt(s % 16)
+ v1.AddArg(src)
+ v2 := b.NewValue0(v.Pos, OpWasmI64Store, types.TypeMem)
+ v3 := b.NewValue0(v.Pos, OpWasmI64Load, typ.UInt64)
+ v3.AddArg2(src, mem)
+ v2.AddArg3(dst, v3, mem)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: s > 16 && s%16 != 0 && s%16 > 8
+ // result: (Move [s-s%16] (OffPtr <dst.Type> dst [s%16]) (OffPtr <src.Type> src [s%16]) (I64Store [8] dst (I64Load [8] src mem) (I64Store dst (I64Load src mem) mem)))
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(s > 16 && s%16 != 0 && s%16 > 8) {
+ break
+ }
+ v.reset(OpMove)
+ v.AuxInt = int64ToAuxInt(s - s%16)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, dst.Type)
+ v0.AuxInt = int64ToAuxInt(s % 16)
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpOffPtr, src.Type)
+ v1.AuxInt = int64ToAuxInt(s % 16)
+ v1.AddArg(src)
+ v2 := b.NewValue0(v.Pos, OpWasmI64Store, types.TypeMem)
+ v2.AuxInt = int64ToAuxInt(8)
+ v3 := b.NewValue0(v.Pos, OpWasmI64Load, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(8)
+ v3.AddArg2(src, mem)
+ v4 := b.NewValue0(v.Pos, OpWasmI64Store, types.TypeMem)
+ v5 := b.NewValue0(v.Pos, OpWasmI64Load, typ.UInt64)
+ v5.AddArg2(src, mem)
+ v4.AddArg3(dst, v5, mem)
+ v2.AddArg3(dst, v3, v4)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+ // match: (Move [s] dst src mem)
+ // cond: s%8 == 0 && logLargeCopy(v, s)
+ // result: (LoweredMove [s/8] dst src mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(s%8 == 0 && logLargeCopy(v, s)) {
+ break
+ }
+ v.reset(OpWasmLoweredMove)
+ v.AuxInt = int64ToAuxInt(s / 8)
+ v.AddArg3(dst, src, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpNeg16(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neg16 x)
+ // result: (I64Sub (I64Const [0]) x)
+ for {
+ x := v_0
+ v.reset(OpWasmI64Sub)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(v0, x)
+ return true
+ }
+}
+func rewriteValueWasm_OpNeg32(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neg32 x)
+ // result: (I64Sub (I64Const [0]) x)
+ for {
+ x := v_0
+ v.reset(OpWasmI64Sub)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(v0, x)
+ return true
+ }
+}
+func rewriteValueWasm_OpNeg64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neg64 x)
+ // result: (I64Sub (I64Const [0]) x)
+ for {
+ x := v_0
+ v.reset(OpWasmI64Sub)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(v0, x)
+ return true
+ }
+}
+func rewriteValueWasm_OpNeg8(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neg8 x)
+ // result: (I64Sub (I64Const [0]) x)
+ for {
+ x := v_0
+ v.reset(OpWasmI64Sub)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(v0, x)
+ return true
+ }
+}
+func rewriteValueWasm_OpNeq16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neq16 x y)
+ // result: (I64Ne (ZeroExt16to64 x) (ZeroExt16to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpWasmI64Ne)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpNeq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neq32 x y)
+ // result: (I64Ne (ZeroExt32to64 x) (ZeroExt32to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpWasmI64Ne)
+ v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpNeq8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neq8 x y)
+ // result: (I64Ne (ZeroExt8to64 x) (ZeroExt8to64 y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpWasmI64Ne)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpPopCount16(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (PopCount16 x)
+ // result: (I64Popcnt (ZeroExt16to64 x))
+ for {
+ x := v_0
+ v.reset(OpWasmI64Popcnt)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpPopCount32(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (PopCount32 x)
+ // result: (I64Popcnt (ZeroExt32to64 x))
+ for {
+ x := v_0
+ v.reset(OpWasmI64Popcnt)
+ v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpPopCount8(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (PopCount8 x)
+ // result: (I64Popcnt (ZeroExt8to64 x))
+ for {
+ x := v_0
+ v.reset(OpWasmI64Popcnt)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpRotateLeft16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (RotateLeft16 <t> x (I64Const [c]))
+ // result: (Or16 (Lsh16x64 <t> x (I64Const [c&15])) (Rsh16Ux64 <t> x (I64Const [-c&15])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpWasmI64Const {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpOr16)
+ v0 := b.NewValue0(v.Pos, OpLsh16x64, t)
+ v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(c & 15)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpRsh16Ux64, t)
+ v3 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v3.AuxInt = int64ToAuxInt(-c & 15)
+ v2.AddArg2(x, v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpRotateLeft8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (RotateLeft8 <t> x (I64Const [c]))
+ // result: (Or8 (Lsh8x64 <t> x (I64Const [c&7])) (Rsh8Ux64 <t> x (I64Const [-c&7])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpWasmI64Const {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpOr8)
+ v0 := b.NewValue0(v.Pos, OpLsh8x64, t)
+ v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(c & 7)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpRsh8Ux64, t)
+ v3 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v3.AuxInt = int64ToAuxInt(-c & 7)
+ v2.AddArg2(x, v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpRsh16Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux16 [c] x y)
+ // result: (Rsh64Ux64 [c] (ZeroExt16to64 x) (ZeroExt16to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpRsh64Ux64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpRsh16Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux32 [c] x y)
+ // result: (Rsh64Ux64 [c] (ZeroExt16to64 x) (ZeroExt32to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpRsh64Ux64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpRsh16Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux64 [c] x y)
+ // result: (Rsh64Ux64 [c] (ZeroExt16to64 x) y)
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpRsh64Ux64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+}
+func rewriteValueWasm_OpRsh16Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux8 [c] x y)
+ // result: (Rsh64Ux64 [c] (ZeroExt16to64 x) (ZeroExt8to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpRsh64Ux64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpRsh16x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x16 [c] x y)
+ // result: (Rsh64x64 [c] (SignExt16to64 x) (ZeroExt16to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpRsh64x64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpRsh16x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x32 [c] x y)
+ // result: (Rsh64x64 [c] (SignExt16to64 x) (ZeroExt32to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpRsh64x64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpRsh16x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x64 [c] x y)
+ // result: (Rsh64x64 [c] (SignExt16to64 x) y)
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpRsh64x64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+}
+func rewriteValueWasm_OpRsh16x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x8 [c] x y)
+ // result: (Rsh64x64 [c] (SignExt16to64 x) (ZeroExt8to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpRsh64x64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpRsh32Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32Ux16 [c] x y)
+ // result: (Rsh64Ux64 [c] (ZeroExt32to64 x) (ZeroExt16to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpRsh64Ux64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpRsh32Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32Ux32 [c] x y)
+ // result: (Rsh64Ux64 [c] (ZeroExt32to64 x) (ZeroExt32to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpRsh64Ux64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpRsh32Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32Ux64 [c] x y)
+ // result: (Rsh64Ux64 [c] (ZeroExt32to64 x) y)
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpRsh64Ux64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+}
+func rewriteValueWasm_OpRsh32Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32Ux8 [c] x y)
+ // result: (Rsh64Ux64 [c] (ZeroExt32to64 x) (ZeroExt8to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpRsh64Ux64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpRsh32x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32x16 [c] x y)
+ // result: (Rsh64x64 [c] (SignExt32to64 x) (ZeroExt16to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpRsh64x64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpRsh32x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32x32 [c] x y)
+ // result: (Rsh64x64 [c] (SignExt32to64 x) (ZeroExt32to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpRsh64x64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpRsh32x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32x64 [c] x y)
+ // result: (Rsh64x64 [c] (SignExt32to64 x) y)
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpRsh64x64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+}
+func rewriteValueWasm_OpRsh32x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32x8 [c] x y)
+ // result: (Rsh64x64 [c] (SignExt32to64 x) (ZeroExt8to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpRsh64x64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpRsh64Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64Ux16 [c] x y)
+ // result: (Rsh64Ux64 [c] x (ZeroExt16to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpRsh64Ux64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpRsh64Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64Ux32 [c] x y)
+ // result: (Rsh64Ux64 [c] x (ZeroExt32to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpRsh64Ux64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpRsh64Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64Ux64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (I64ShrU x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpWasmI64ShrU)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh64Ux64 x (I64Const [c]))
+ // cond: uint64(c) < 64
+ // result: (I64ShrU x (I64Const [c]))
+ for {
+ x := v_0
+ if v_1.Op != OpWasmI64Const {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) < 64) {
+ break
+ }
+ v.reset(OpWasmI64ShrU)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh64Ux64 x (I64Const [c]))
+ // cond: uint64(c) >= 64
+ // result: (I64Const [0])
+ for {
+ if v_1.Op != OpWasmI64Const {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 64) {
+ break
+ }
+ v.reset(OpWasmI64Const)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (Rsh64Ux64 x y)
+ // result: (Select (I64ShrU x y) (I64Const [0]) (I64LtU y (I64Const [64])))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpWasmSelect)
+ v0 := b.NewValue0(v.Pos, OpWasmI64ShrU, typ.Int64)
+ v0.AddArg2(x, y)
+ v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v2 := b.NewValue0(v.Pos, OpWasmI64LtU, typ.Bool)
+ v3 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v3.AuxInt = int64ToAuxInt(64)
+ v2.AddArg2(y, v3)
+ v.AddArg3(v0, v1, v2)
+ return true
+ }
+}
+func rewriteValueWasm_OpRsh64Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64Ux8 [c] x y)
+ // result: (Rsh64Ux64 [c] x (ZeroExt8to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpRsh64Ux64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpRsh64x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64x16 [c] x y)
+ // result: (Rsh64x64 [c] x (ZeroExt16to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpRsh64x64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpRsh64x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64x32 [c] x y)
+ // result: (Rsh64x64 [c] x (ZeroExt32to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpRsh64x64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpRsh64x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64x64 x y)
+ // cond: shiftIsBounded(v)
+ // result: (I64ShrS x y)
+ for {
+ x := v_0
+ y := v_1
+ if !(shiftIsBounded(v)) {
+ break
+ }
+ v.reset(OpWasmI64ShrS)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Rsh64x64 x (I64Const [c]))
+ // cond: uint64(c) < 64
+ // result: (I64ShrS x (I64Const [c]))
+ for {
+ x := v_0
+ if v_1.Op != OpWasmI64Const {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) < 64) {
+ break
+ }
+ v.reset(OpWasmI64ShrS)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(c)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh64x64 x (I64Const [c]))
+ // cond: uint64(c) >= 64
+ // result: (I64ShrS x (I64Const [63]))
+ for {
+ x := v_0
+ if v_1.Op != OpWasmI64Const {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 64) {
+ break
+ }
+ v.reset(OpWasmI64ShrS)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(63)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh64x64 x y)
+ // result: (I64ShrS x (Select <typ.Int64> y (I64Const [63]) (I64LtU y (I64Const [64]))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpWasmI64ShrS)
+ v0 := b.NewValue0(v.Pos, OpWasmSelect, typ.Int64)
+ v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(63)
+ v2 := b.NewValue0(v.Pos, OpWasmI64LtU, typ.Bool)
+ v3 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v3.AuxInt = int64ToAuxInt(64)
+ v2.AddArg2(y, v3)
+ v0.AddArg3(y, v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpRsh64x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64x8 [c] x y)
+ // result: (Rsh64x64 [c] x (ZeroExt8to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpRsh64x64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v0.AddArg(y)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpRsh8Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux16 [c] x y)
+ // result: (Rsh64Ux64 [c] (ZeroExt8to64 x) (ZeroExt16to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpRsh64Ux64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpRsh8Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux32 [c] x y)
+ // result: (Rsh64Ux64 [c] (ZeroExt8to64 x) (ZeroExt32to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpRsh64Ux64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpRsh8Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux64 [c] x y)
+ // result: (Rsh64Ux64 [c] (ZeroExt8to64 x) y)
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpRsh64Ux64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+}
+func rewriteValueWasm_OpRsh8Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux8 [c] x y)
+ // result: (Rsh64Ux64 [c] (ZeroExt8to64 x) (ZeroExt8to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpRsh64Ux64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpRsh8x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x16 [c] x y)
+ // result: (Rsh64x64 [c] (SignExt8to64 x) (ZeroExt16to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpRsh64x64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpRsh8x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x32 [c] x y)
+ // result: (Rsh64x64 [c] (SignExt8to64 x) (ZeroExt32to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpRsh64x64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpRsh8x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x64 [c] x y)
+ // result: (Rsh64x64 [c] (SignExt8to64 x) y)
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpRsh64x64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v0.AddArg(x)
+ v.AddArg2(v0, y)
+ return true
+ }
+}
+func rewriteValueWasm_OpRsh8x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x8 [c] x y)
+ // result: (Rsh64x64 [c] (SignExt8to64 x) (ZeroExt8to64 y))
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ y := v_1
+ v.reset(OpRsh64x64)
+ v.AuxInt = boolToAuxInt(c)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to64, typ.Int64)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to64, typ.UInt64)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpSignExt16to32(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SignExt16to32 x:(I64Load16S _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpWasmI64Load16S {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (SignExt16to32 x)
+ // cond: buildcfg.GOWASM.SignExt
+ // result: (I64Extend16S x)
+ for {
+ x := v_0
+ if !(buildcfg.GOWASM.SignExt) {
+ break
+ }
+ v.reset(OpWasmI64Extend16S)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SignExt16to32 x)
+ // result: (I64ShrS (I64Shl x (I64Const [48])) (I64Const [48]))
+ for {
+ x := v_0
+ v.reset(OpWasmI64ShrS)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Shl, typ.Int64)
+ v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(48)
+ v0.AddArg2(x, v1)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpSignExt16to64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SignExt16to64 x:(I64Load16S _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpWasmI64Load16S {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (SignExt16to64 x)
+ // cond: buildcfg.GOWASM.SignExt
+ // result: (I64Extend16S x)
+ for {
+ x := v_0
+ if !(buildcfg.GOWASM.SignExt) {
+ break
+ }
+ v.reset(OpWasmI64Extend16S)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SignExt16to64 x)
+ // result: (I64ShrS (I64Shl x (I64Const [48])) (I64Const [48]))
+ for {
+ x := v_0
+ v.reset(OpWasmI64ShrS)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Shl, typ.Int64)
+ v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(48)
+ v0.AddArg2(x, v1)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpSignExt32to64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SignExt32to64 x:(I64Load32S _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpWasmI64Load32S {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (SignExt32to64 x)
+ // cond: buildcfg.GOWASM.SignExt
+ // result: (I64Extend32S x)
+ for {
+ x := v_0
+ if !(buildcfg.GOWASM.SignExt) {
+ break
+ }
+ v.reset(OpWasmI64Extend32S)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SignExt32to64 x)
+ // result: (I64ShrS (I64Shl x (I64Const [32])) (I64Const [32]))
+ for {
+ x := v_0
+ v.reset(OpWasmI64ShrS)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Shl, typ.Int64)
+ v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(32)
+ v0.AddArg2(x, v1)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpSignExt8to16(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SignExt8to16 x:(I64Load8S _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpWasmI64Load8S {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (SignExt8to16 x)
+ // cond: buildcfg.GOWASM.SignExt
+ // result: (I64Extend8S x)
+ for {
+ x := v_0
+ if !(buildcfg.GOWASM.SignExt) {
+ break
+ }
+ v.reset(OpWasmI64Extend8S)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SignExt8to16 x)
+ // result: (I64ShrS (I64Shl x (I64Const [56])) (I64Const [56]))
+ for {
+ x := v_0
+ v.reset(OpWasmI64ShrS)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Shl, typ.Int64)
+ v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(56)
+ v0.AddArg2(x, v1)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpSignExt8to32(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SignExt8to32 x:(I64Load8S _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpWasmI64Load8S {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (SignExt8to32 x)
+ // cond: buildcfg.GOWASM.SignExt
+ // result: (I64Extend8S x)
+ for {
+ x := v_0
+ if !(buildcfg.GOWASM.SignExt) {
+ break
+ }
+ v.reset(OpWasmI64Extend8S)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SignExt8to32 x)
+ // result: (I64ShrS (I64Shl x (I64Const [56])) (I64Const [56]))
+ for {
+ x := v_0
+ v.reset(OpWasmI64ShrS)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Shl, typ.Int64)
+ v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(56)
+ v0.AddArg2(x, v1)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpSignExt8to64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SignExt8to64 x:(I64Load8S _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpWasmI64Load8S {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (SignExt8to64 x)
+ // cond: buildcfg.GOWASM.SignExt
+ // result: (I64Extend8S x)
+ for {
+ x := v_0
+ if !(buildcfg.GOWASM.SignExt) {
+ break
+ }
+ v.reset(OpWasmI64Extend8S)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SignExt8to64 x)
+ // result: (I64ShrS (I64Shl x (I64Const [56])) (I64Const [56]))
+ for {
+ x := v_0
+ v.reset(OpWasmI64ShrS)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Shl, typ.Int64)
+ v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(56)
+ v0.AddArg2(x, v1)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValueWasm_OpSlicemask(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Slicemask x)
+ // result: (I64ShrS (I64Sub (I64Const [0]) x) (I64Const [63]))
+ for {
+ x := v_0
+ v.reset(OpWasmI64ShrS)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Sub, typ.Int64)
+ v1 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v1.AuxInt = int64ToAuxInt(0)
+ v0.AddArg2(v1, x)
+ v2 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v2.AuxInt = int64ToAuxInt(63)
+ v.AddArg2(v0, v2)
+ return true
+ }
+}
+func rewriteValueWasm_OpStore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Store {t} ptr val mem)
+ // cond: is64BitFloat(t)
+ // result: (F64Store ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(is64BitFloat(t)) {
+ break
+ }
+ v.reset(OpWasmF64Store)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: is32BitFloat(t)
+ // result: (F32Store ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(is32BitFloat(t)) {
+ break
+ }
+ v.reset(OpWasmF32Store)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 8
+ // result: (I64Store ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 8) {
+ break
+ }
+ v.reset(OpWasmI64Store)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 4
+ // result: (I64Store32 ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 4) {
+ break
+ }
+ v.reset(OpWasmI64Store32)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 2
+ // result: (I64Store16 ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 2) {
+ break
+ }
+ v.reset(OpWasmI64Store16)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ // match: (Store {t} ptr val mem)
+ // cond: t.Size() == 1
+ // result: (I64Store8 ptr val mem)
+ for {
+ t := auxToType(v.Aux)
+ ptr := v_0
+ val := v_1
+ mem := v_2
+ if !(t.Size() == 1) {
+ break
+ }
+ v.reset(OpWasmI64Store8)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpWasmF64Add(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (F64Add (F64Const [x]) (F64Const [y]))
+ // result: (F64Const [x + y])
+ for {
+ if v_0.Op != OpWasmF64Const {
+ break
+ }
+ x := auxIntToFloat64(v_0.AuxInt)
+ if v_1.Op != OpWasmF64Const {
+ break
+ }
+ y := auxIntToFloat64(v_1.AuxInt)
+ v.reset(OpWasmF64Const)
+ v.AuxInt = float64ToAuxInt(x + y)
+ return true
+ }
+ // match: (F64Add (F64Const [x]) y)
+ // cond: y.Op != OpWasmF64Const
+ // result: (F64Add y (F64Const [x]))
+ for {
+ if v_0.Op != OpWasmF64Const {
+ break
+ }
+ x := auxIntToFloat64(v_0.AuxInt)
+ y := v_1
+ if !(y.Op != OpWasmF64Const) {
+ break
+ }
+ v.reset(OpWasmF64Add)
+ v0 := b.NewValue0(v.Pos, OpWasmF64Const, typ.Float64)
+ v0.AuxInt = float64ToAuxInt(x)
+ v.AddArg2(y, v0)
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpWasmF64Mul(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (F64Mul (F64Const [x]) (F64Const [y]))
+ // cond: !math.IsNaN(x * y)
+ // result: (F64Const [x * y])
+ for {
+ if v_0.Op != OpWasmF64Const {
+ break
+ }
+ x := auxIntToFloat64(v_0.AuxInt)
+ if v_1.Op != OpWasmF64Const {
+ break
+ }
+ y := auxIntToFloat64(v_1.AuxInt)
+ if !(!math.IsNaN(x * y)) {
+ break
+ }
+ v.reset(OpWasmF64Const)
+ v.AuxInt = float64ToAuxInt(x * y)
+ return true
+ }
+ // match: (F64Mul (F64Const [x]) y)
+ // cond: y.Op != OpWasmF64Const
+ // result: (F64Mul y (F64Const [x]))
+ for {
+ if v_0.Op != OpWasmF64Const {
+ break
+ }
+ x := auxIntToFloat64(v_0.AuxInt)
+ y := v_1
+ if !(y.Op != OpWasmF64Const) {
+ break
+ }
+ v.reset(OpWasmF64Mul)
+ v0 := b.NewValue0(v.Pos, OpWasmF64Const, typ.Float64)
+ v0.AuxInt = float64ToAuxInt(x)
+ v.AddArg2(y, v0)
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpWasmI64Add(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (I64Add (I64Const [x]) (I64Const [y]))
+ // result: (I64Const [x + y])
+ for {
+ if v_0.Op != OpWasmI64Const {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpWasmI64Const {
+ break
+ }
+ y := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpWasmI64Const)
+ v.AuxInt = int64ToAuxInt(x + y)
+ return true
+ }
+ // match: (I64Add (I64Const [x]) y)
+ // cond: y.Op != OpWasmI64Const
+ // result: (I64Add y (I64Const [x]))
+ for {
+ if v_0.Op != OpWasmI64Const {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ y := v_1
+ if !(y.Op != OpWasmI64Const) {
+ break
+ }
+ v.reset(OpWasmI64Add)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(x)
+ v.AddArg2(y, v0)
+ return true
+ }
+ // match: (I64Add x (I64Const [y]))
+ // result: (I64AddConst [y] x)
+ for {
+ x := v_0
+ if v_1.Op != OpWasmI64Const {
+ break
+ }
+ y := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpWasmI64AddConst)
+ v.AuxInt = int64ToAuxInt(y)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpWasmI64AddConst(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (I64AddConst [0] x)
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ x := v_0
+ v.copyOf(x)
+ return true
+ }
+ // match: (I64AddConst [off] (LoweredAddr {sym} [off2] base))
+ // cond: isU32Bit(off+int64(off2))
+ // result: (LoweredAddr {sym} [int32(off)+off2] base)
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpWasmLoweredAddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ if !(isU32Bit(off + int64(off2))) {
+ break
+ }
+ v.reset(OpWasmLoweredAddr)
+ v.AuxInt = int32ToAuxInt(int32(off) + off2)
+ v.Aux = symToAux(sym)
+ v.AddArg(base)
+ return true
+ }
+ // match: (I64AddConst [off] x:(SP))
+ // cond: isU32Bit(off)
+ // result: (LoweredAddr [int32(off)] x)
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if x.Op != OpSP || !(isU32Bit(off)) {
+ break
+ }
+ v.reset(OpWasmLoweredAddr)
+ v.AuxInt = int32ToAuxInt(int32(off))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpWasmI64And(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (I64And (I64Const [x]) (I64Const [y]))
+ // result: (I64Const [x & y])
+ for {
+ if v_0.Op != OpWasmI64Const {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpWasmI64Const {
+ break
+ }
+ y := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpWasmI64Const)
+ v.AuxInt = int64ToAuxInt(x & y)
+ return true
+ }
+ // match: (I64And (I64Const [x]) y)
+ // cond: y.Op != OpWasmI64Const
+ // result: (I64And y (I64Const [x]))
+ for {
+ if v_0.Op != OpWasmI64Const {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ y := v_1
+ if !(y.Op != OpWasmI64Const) {
+ break
+ }
+ v.reset(OpWasmI64And)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(x)
+ v.AddArg2(y, v0)
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpWasmI64Eq(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (I64Eq (I64Const [x]) (I64Const [y]))
+ // cond: x == y
+ // result: (I64Const [1])
+ for {
+ if v_0.Op != OpWasmI64Const {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpWasmI64Const {
+ break
+ }
+ y := auxIntToInt64(v_1.AuxInt)
+ if !(x == y) {
+ break
+ }
+ v.reset(OpWasmI64Const)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ // match: (I64Eq (I64Const [x]) (I64Const [y]))
+ // cond: x != y
+ // result: (I64Const [0])
+ for {
+ if v_0.Op != OpWasmI64Const {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpWasmI64Const {
+ break
+ }
+ y := auxIntToInt64(v_1.AuxInt)
+ if !(x != y) {
+ break
+ }
+ v.reset(OpWasmI64Const)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (I64Eq (I64Const [x]) y)
+ // cond: y.Op != OpWasmI64Const
+ // result: (I64Eq y (I64Const [x]))
+ for {
+ if v_0.Op != OpWasmI64Const {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ y := v_1
+ if !(y.Op != OpWasmI64Const) {
+ break
+ }
+ v.reset(OpWasmI64Eq)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(x)
+ v.AddArg2(y, v0)
+ return true
+ }
+ // match: (I64Eq x (I64Const [0]))
+ // result: (I64Eqz x)
+ for {
+ x := v_0
+ if v_1.Op != OpWasmI64Const || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpWasmI64Eqz)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpWasmI64Eqz(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (I64Eqz (I64Eqz (I64Eqz x)))
+ // result: (I64Eqz x)
+ for {
+ if v_0.Op != OpWasmI64Eqz {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpWasmI64Eqz {
+ break
+ }
+ x := v_0_0.Args[0]
+ v.reset(OpWasmI64Eqz)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpWasmI64LeU(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (I64LeU x (I64Const [0]))
+ // result: (I64Eqz x)
+ for {
+ x := v_0
+ if v_1.Op != OpWasmI64Const || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpWasmI64Eqz)
+ v.AddArg(x)
+ return true
+ }
+ // match: (I64LeU (I64Const [1]) x)
+ // result: (I64Eqz (I64Eqz x))
+ for {
+ if v_0.Op != OpWasmI64Const || auxIntToInt64(v_0.AuxInt) != 1 {
+ break
+ }
+ x := v_1
+ v.reset(OpWasmI64Eqz)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Eqz, typ.Bool)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpWasmI64Load(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (I64Load [off] (I64AddConst [off2] ptr) mem)
+ // cond: isU32Bit(off+off2)
+ // result: (I64Load [off+off2] ptr mem)
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpWasmI64AddConst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(isU32Bit(off + off2)) {
+ break
+ }
+ v.reset(OpWasmI64Load)
+ v.AuxInt = int64ToAuxInt(off + off2)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (I64Load [off] (LoweredAddr {sym} [off2] (SB)) _)
+ // cond: symIsRO(sym) && isU32Bit(off+int64(off2))
+ // result: (I64Const [int64(read64(sym, off+int64(off2), config.ctxt.Arch.ByteOrder))])
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpWasmLoweredAddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym := auxToSym(v_0.Aux)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpSB || !(symIsRO(sym) && isU32Bit(off+int64(off2))) {
+ break
+ }
+ v.reset(OpWasmI64Const)
+ v.AuxInt = int64ToAuxInt(int64(read64(sym, off+int64(off2), config.ctxt.Arch.ByteOrder)))
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpWasmI64Load16S(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (I64Load16S [off] (I64AddConst [off2] ptr) mem)
+ // cond: isU32Bit(off+off2)
+ // result: (I64Load16S [off+off2] ptr mem)
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpWasmI64AddConst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(isU32Bit(off + off2)) {
+ break
+ }
+ v.reset(OpWasmI64Load16S)
+ v.AuxInt = int64ToAuxInt(off + off2)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpWasmI64Load16U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (I64Load16U [off] (I64AddConst [off2] ptr) mem)
+ // cond: isU32Bit(off+off2)
+ // result: (I64Load16U [off+off2] ptr mem)
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpWasmI64AddConst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(isU32Bit(off + off2)) {
+ break
+ }
+ v.reset(OpWasmI64Load16U)
+ v.AuxInt = int64ToAuxInt(off + off2)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (I64Load16U [off] (LoweredAddr {sym} [off2] (SB)) _)
+ // cond: symIsRO(sym) && isU32Bit(off+int64(off2))
+ // result: (I64Const [int64(read16(sym, off+int64(off2), config.ctxt.Arch.ByteOrder))])
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpWasmLoweredAddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym := auxToSym(v_0.Aux)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpSB || !(symIsRO(sym) && isU32Bit(off+int64(off2))) {
+ break
+ }
+ v.reset(OpWasmI64Const)
+ v.AuxInt = int64ToAuxInt(int64(read16(sym, off+int64(off2), config.ctxt.Arch.ByteOrder)))
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpWasmI64Load32S(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (I64Load32S [off] (I64AddConst [off2] ptr) mem)
+ // cond: isU32Bit(off+off2)
+ // result: (I64Load32S [off+off2] ptr mem)
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpWasmI64AddConst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(isU32Bit(off + off2)) {
+ break
+ }
+ v.reset(OpWasmI64Load32S)
+ v.AuxInt = int64ToAuxInt(off + off2)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpWasmI64Load32U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (I64Load32U [off] (I64AddConst [off2] ptr) mem)
+ // cond: isU32Bit(off+off2)
+ // result: (I64Load32U [off+off2] ptr mem)
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpWasmI64AddConst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(isU32Bit(off + off2)) {
+ break
+ }
+ v.reset(OpWasmI64Load32U)
+ v.AuxInt = int64ToAuxInt(off + off2)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (I64Load32U [off] (LoweredAddr {sym} [off2] (SB)) _)
+ // cond: symIsRO(sym) && isU32Bit(off+int64(off2))
+ // result: (I64Const [int64(read32(sym, off+int64(off2), config.ctxt.Arch.ByteOrder))])
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpWasmLoweredAddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym := auxToSym(v_0.Aux)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpSB || !(symIsRO(sym) && isU32Bit(off+int64(off2))) {
+ break
+ }
+ v.reset(OpWasmI64Const)
+ v.AuxInt = int64ToAuxInt(int64(read32(sym, off+int64(off2), config.ctxt.Arch.ByteOrder)))
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpWasmI64Load8S(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (I64Load8S [off] (I64AddConst [off2] ptr) mem)
+ // cond: isU32Bit(off+off2)
+ // result: (I64Load8S [off+off2] ptr mem)
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpWasmI64AddConst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(isU32Bit(off + off2)) {
+ break
+ }
+ v.reset(OpWasmI64Load8S)
+ v.AuxInt = int64ToAuxInt(off + off2)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpWasmI64Load8U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (I64Load8U [off] (I64AddConst [off2] ptr) mem)
+ // cond: isU32Bit(off+off2)
+ // result: (I64Load8U [off+off2] ptr mem)
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpWasmI64AddConst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ mem := v_1
+ if !(isU32Bit(off + off2)) {
+ break
+ }
+ v.reset(OpWasmI64Load8U)
+ v.AuxInt = int64ToAuxInt(off + off2)
+ v.AddArg2(ptr, mem)
+ return true
+ }
+ // match: (I64Load8U [off] (LoweredAddr {sym} [off2] (SB)) _)
+ // cond: symIsRO(sym) && isU32Bit(off+int64(off2))
+ // result: (I64Const [int64(read8(sym, off+int64(off2)))])
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpWasmLoweredAddr {
+ break
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym := auxToSym(v_0.Aux)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpSB || !(symIsRO(sym) && isU32Bit(off+int64(off2))) {
+ break
+ }
+ v.reset(OpWasmI64Const)
+ v.AuxInt = int64ToAuxInt(int64(read8(sym, off+int64(off2))))
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpWasmI64LtU(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (I64LtU (I64Const [0]) x)
+ // result: (I64Eqz (I64Eqz x))
+ for {
+ if v_0.Op != OpWasmI64Const || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_1
+ v.reset(OpWasmI64Eqz)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Eqz, typ.Bool)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (I64LtU x (I64Const [1]))
+ // result: (I64Eqz x)
+ for {
+ x := v_0
+ if v_1.Op != OpWasmI64Const || auxIntToInt64(v_1.AuxInt) != 1 {
+ break
+ }
+ v.reset(OpWasmI64Eqz)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpWasmI64Mul(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (I64Mul (I64Const [x]) (I64Const [y]))
+ // result: (I64Const [x * y])
+ for {
+ if v_0.Op != OpWasmI64Const {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpWasmI64Const {
+ break
+ }
+ y := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpWasmI64Const)
+ v.AuxInt = int64ToAuxInt(x * y)
+ return true
+ }
+ // match: (I64Mul (I64Const [x]) y)
+ // cond: y.Op != OpWasmI64Const
+ // result: (I64Mul y (I64Const [x]))
+ for {
+ if v_0.Op != OpWasmI64Const {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ y := v_1
+ if !(y.Op != OpWasmI64Const) {
+ break
+ }
+ v.reset(OpWasmI64Mul)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(x)
+ v.AddArg2(y, v0)
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpWasmI64Ne(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (I64Ne (I64Const [x]) (I64Const [y]))
+ // cond: x == y
+ // result: (I64Const [0])
+ for {
+ if v_0.Op != OpWasmI64Const {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpWasmI64Const {
+ break
+ }
+ y := auxIntToInt64(v_1.AuxInt)
+ if !(x == y) {
+ break
+ }
+ v.reset(OpWasmI64Const)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (I64Ne (I64Const [x]) (I64Const [y]))
+ // cond: x != y
+ // result: (I64Const [1])
+ for {
+ if v_0.Op != OpWasmI64Const {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpWasmI64Const {
+ break
+ }
+ y := auxIntToInt64(v_1.AuxInt)
+ if !(x != y) {
+ break
+ }
+ v.reset(OpWasmI64Const)
+ v.AuxInt = int64ToAuxInt(1)
+ return true
+ }
+ // match: (I64Ne (I64Const [x]) y)
+ // cond: y.Op != OpWasmI64Const
+ // result: (I64Ne y (I64Const [x]))
+ for {
+ if v_0.Op != OpWasmI64Const {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ y := v_1
+ if !(y.Op != OpWasmI64Const) {
+ break
+ }
+ v.reset(OpWasmI64Ne)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(x)
+ v.AddArg2(y, v0)
+ return true
+ }
+ // match: (I64Ne x (I64Const [0]))
+ // result: (I64Eqz (I64Eqz x))
+ for {
+ x := v_0
+ if v_1.Op != OpWasmI64Const || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpWasmI64Eqz)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Eqz, typ.Bool)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpWasmI64Or(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (I64Or (I64Const [x]) (I64Const [y]))
+ // result: (I64Const [x | y])
+ for {
+ if v_0.Op != OpWasmI64Const {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpWasmI64Const {
+ break
+ }
+ y := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpWasmI64Const)
+ v.AuxInt = int64ToAuxInt(x | y)
+ return true
+ }
+ // match: (I64Or (I64Const [x]) y)
+ // cond: y.Op != OpWasmI64Const
+ // result: (I64Or y (I64Const [x]))
+ for {
+ if v_0.Op != OpWasmI64Const {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ y := v_1
+ if !(y.Op != OpWasmI64Const) {
+ break
+ }
+ v.reset(OpWasmI64Or)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(x)
+ v.AddArg2(y, v0)
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpWasmI64Shl(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (I64Shl (I64Const [x]) (I64Const [y]))
+ // result: (I64Const [x << uint64(y)])
+ for {
+ if v_0.Op != OpWasmI64Const {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpWasmI64Const {
+ break
+ }
+ y := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpWasmI64Const)
+ v.AuxInt = int64ToAuxInt(x << uint64(y))
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpWasmI64ShrS(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (I64ShrS (I64Const [x]) (I64Const [y]))
+ // result: (I64Const [x >> uint64(y)])
+ for {
+ if v_0.Op != OpWasmI64Const {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpWasmI64Const {
+ break
+ }
+ y := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpWasmI64Const)
+ v.AuxInt = int64ToAuxInt(x >> uint64(y))
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpWasmI64ShrU(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (I64ShrU (I64Const [x]) (I64Const [y]))
+ // result: (I64Const [int64(uint64(x) >> uint64(y))])
+ for {
+ if v_0.Op != OpWasmI64Const {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpWasmI64Const {
+ break
+ }
+ y := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpWasmI64Const)
+ v.AuxInt = int64ToAuxInt(int64(uint64(x) >> uint64(y)))
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpWasmI64Store(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (I64Store [off] (I64AddConst [off2] ptr) val mem)
+ // cond: isU32Bit(off+off2)
+ // result: (I64Store [off+off2] ptr val mem)
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpWasmI64AddConst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(isU32Bit(off + off2)) {
+ break
+ }
+ v.reset(OpWasmI64Store)
+ v.AuxInt = int64ToAuxInt(off + off2)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpWasmI64Store16(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (I64Store16 [off] (I64AddConst [off2] ptr) val mem)
+ // cond: isU32Bit(off+off2)
+ // result: (I64Store16 [off+off2] ptr val mem)
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpWasmI64AddConst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(isU32Bit(off + off2)) {
+ break
+ }
+ v.reset(OpWasmI64Store16)
+ v.AuxInt = int64ToAuxInt(off + off2)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpWasmI64Store32(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (I64Store32 [off] (I64AddConst [off2] ptr) val mem)
+ // cond: isU32Bit(off+off2)
+ // result: (I64Store32 [off+off2] ptr val mem)
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpWasmI64AddConst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(isU32Bit(off + off2)) {
+ break
+ }
+ v.reset(OpWasmI64Store32)
+ v.AuxInt = int64ToAuxInt(off + off2)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpWasmI64Store8(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (I64Store8 [off] (I64AddConst [off2] ptr) val mem)
+ // cond: isU32Bit(off+off2)
+ // result: (I64Store8 [off+off2] ptr val mem)
+ for {
+ off := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpWasmI64AddConst {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+ ptr := v_0.Args[0]
+ val := v_1
+ mem := v_2
+ if !(isU32Bit(off + off2)) {
+ break
+ }
+ v.reset(OpWasmI64Store8)
+ v.AuxInt = int64ToAuxInt(off + off2)
+ v.AddArg3(ptr, val, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpWasmI64Xor(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (I64Xor (I64Const [x]) (I64Const [y]))
+ // result: (I64Const [x ^ y])
+ for {
+ if v_0.Op != OpWasmI64Const {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpWasmI64Const {
+ break
+ }
+ y := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpWasmI64Const)
+ v.AuxInt = int64ToAuxInt(x ^ y)
+ return true
+ }
+ // match: (I64Xor (I64Const [x]) y)
+ // cond: y.Op != OpWasmI64Const
+ // result: (I64Xor y (I64Const [x]))
+ for {
+ if v_0.Op != OpWasmI64Const {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ y := v_1
+ if !(y.Op != OpWasmI64Const) {
+ break
+ }
+ v.reset(OpWasmI64Xor)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(x)
+ v.AddArg2(y, v0)
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpZero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Zero [0] _ mem)
+ // result: mem
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ mem := v_1
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Zero [1] destptr mem)
+ // result: (I64Store8 destptr (I64Const [0]) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 1 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpWasmI64Store8)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg3(destptr, v0, mem)
+ return true
+ }
+ // match: (Zero [2] destptr mem)
+ // result: (I64Store16 destptr (I64Const [0]) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 2 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpWasmI64Store16)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg3(destptr, v0, mem)
+ return true
+ }
+ // match: (Zero [4] destptr mem)
+ // result: (I64Store32 destptr (I64Const [0]) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 4 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpWasmI64Store32)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg3(destptr, v0, mem)
+ return true
+ }
+ // match: (Zero [8] destptr mem)
+ // result: (I64Store destptr (I64Const [0]) mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 8 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpWasmI64Store)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg3(destptr, v0, mem)
+ return true
+ }
+ // match: (Zero [3] destptr mem)
+ // result: (I64Store8 [2] destptr (I64Const [0]) (I64Store16 destptr (I64Const [0]) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 3 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpWasmI64Store8)
+ v.AuxInt = int64ToAuxInt(2)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpWasmI64Store16, types.TypeMem)
+ v1.AddArg3(destptr, v0, mem)
+ v.AddArg3(destptr, v0, v1)
+ return true
+ }
+ // match: (Zero [5] destptr mem)
+ // result: (I64Store8 [4] destptr (I64Const [0]) (I64Store32 destptr (I64Const [0]) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 5 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpWasmI64Store8)
+ v.AuxInt = int64ToAuxInt(4)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpWasmI64Store32, types.TypeMem)
+ v1.AddArg3(destptr, v0, mem)
+ v.AddArg3(destptr, v0, v1)
+ return true
+ }
+ // match: (Zero [6] destptr mem)
+ // result: (I64Store16 [4] destptr (I64Const [0]) (I64Store32 destptr (I64Const [0]) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 6 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpWasmI64Store16)
+ v.AuxInt = int64ToAuxInt(4)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpWasmI64Store32, types.TypeMem)
+ v1.AddArg3(destptr, v0, mem)
+ v.AddArg3(destptr, v0, v1)
+ return true
+ }
+ // match: (Zero [7] destptr mem)
+ // result: (I64Store32 [3] destptr (I64Const [0]) (I64Store32 destptr (I64Const [0]) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 7 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpWasmI64Store32)
+ v.AuxInt = int64ToAuxInt(3)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpWasmI64Store32, types.TypeMem)
+ v1.AddArg3(destptr, v0, mem)
+ v.AddArg3(destptr, v0, v1)
+ return true
+ }
+ // match: (Zero [s] destptr mem)
+ // cond: s%8 != 0 && s > 8
+ // result: (Zero [s-s%8] (OffPtr <destptr.Type> destptr [s%8]) (I64Store destptr (I64Const [0]) mem))
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ destptr := v_0
+ mem := v_1
+ if !(s%8 != 0 && s > 8) {
+ break
+ }
+ v.reset(OpZero)
+ v.AuxInt = int64ToAuxInt(s - s%8)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, destptr.Type)
+ v0.AuxInt = int64ToAuxInt(s % 8)
+ v0.AddArg(destptr)
+ v1 := b.NewValue0(v.Pos, OpWasmI64Store, types.TypeMem)
+ v2 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v2.AuxInt = int64ToAuxInt(0)
+ v1.AddArg3(destptr, v2, mem)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Zero [16] destptr mem)
+ // result: (I64Store [8] destptr (I64Const [0]) (I64Store destptr (I64Const [0]) mem))
+ for {
+ if auxIntToInt64(v.AuxInt) != 16 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpWasmI64Store)
+ v.AuxInt = int64ToAuxInt(8)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpWasmI64Store, types.TypeMem)
+ v1.AddArg3(destptr, v0, mem)
+ v.AddArg3(destptr, v0, v1)
+ return true
+ }
+ // match: (Zero [24] destptr mem)
+ // result: (I64Store [16] destptr (I64Const [0]) (I64Store [8] destptr (I64Const [0]) (I64Store destptr (I64Const [0]) mem)))
+ for {
+ if auxIntToInt64(v.AuxInt) != 24 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpWasmI64Store)
+ v.AuxInt = int64ToAuxInt(16)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpWasmI64Store, types.TypeMem)
+ v1.AuxInt = int64ToAuxInt(8)
+ v2 := b.NewValue0(v.Pos, OpWasmI64Store, types.TypeMem)
+ v2.AddArg3(destptr, v0, mem)
+ v1.AddArg3(destptr, v0, v2)
+ v.AddArg3(destptr, v0, v1)
+ return true
+ }
+ // match: (Zero [32] destptr mem)
+ // result: (I64Store [24] destptr (I64Const [0]) (I64Store [16] destptr (I64Const [0]) (I64Store [8] destptr (I64Const [0]) (I64Store destptr (I64Const [0]) mem))))
+ for {
+ if auxIntToInt64(v.AuxInt) != 32 {
+ break
+ }
+ destptr := v_0
+ mem := v_1
+ v.reset(OpWasmI64Store)
+ v.AuxInt = int64ToAuxInt(24)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(0)
+ v1 := b.NewValue0(v.Pos, OpWasmI64Store, types.TypeMem)
+ v1.AuxInt = int64ToAuxInt(16)
+ v2 := b.NewValue0(v.Pos, OpWasmI64Store, types.TypeMem)
+ v2.AuxInt = int64ToAuxInt(8)
+ v3 := b.NewValue0(v.Pos, OpWasmI64Store, types.TypeMem)
+ v3.AddArg3(destptr, v0, mem)
+ v2.AddArg3(destptr, v0, v3)
+ v1.AddArg3(destptr, v0, v2)
+ v.AddArg3(destptr, v0, v1)
+ return true
+ }
+ // match: (Zero [s] destptr mem)
+ // cond: s%8 == 0 && s > 32
+ // result: (LoweredZero [s/8] destptr mem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ destptr := v_0
+ mem := v_1
+ if !(s%8 == 0 && s > 32) {
+ break
+ }
+ v.reset(OpWasmLoweredZero)
+ v.AuxInt = int64ToAuxInt(s / 8)
+ v.AddArg2(destptr, mem)
+ return true
+ }
+ return false
+}
+func rewriteValueWasm_OpZeroExt16to32(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (ZeroExt16to32 x:(I64Load16U _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpWasmI64Load16U {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (ZeroExt16to32 x)
+ // result: (I64And x (I64Const [0xffff]))
+ for {
+ x := v_0
+ v.reset(OpWasmI64And)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(0xffff)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpZeroExt16to64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (ZeroExt16to64 x:(I64Load16U _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpWasmI64Load16U {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (ZeroExt16to64 x)
+ // result: (I64And x (I64Const [0xffff]))
+ for {
+ x := v_0
+ v.reset(OpWasmI64And)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(0xffff)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpZeroExt32to64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (ZeroExt32to64 x:(I64Load32U _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpWasmI64Load32U {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (ZeroExt32to64 x)
+ // result: (I64And x (I64Const [0xffffffff]))
+ for {
+ x := v_0
+ v.reset(OpWasmI64And)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(0xffffffff)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpZeroExt8to16(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (ZeroExt8to16 x:(I64Load8U _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpWasmI64Load8U {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (ZeroExt8to16 x)
+ // result: (I64And x (I64Const [0xff]))
+ for {
+ x := v_0
+ v.reset(OpWasmI64And)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(0xff)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpZeroExt8to32(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (ZeroExt8to32 x:(I64Load8U _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpWasmI64Load8U {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (ZeroExt8to32 x)
+ // result: (I64And x (I64Const [0xff]))
+ for {
+ x := v_0
+ v.reset(OpWasmI64And)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(0xff)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValueWasm_OpZeroExt8to64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (ZeroExt8to64 x:(I64Load8U _ _))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpWasmI64Load8U {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (ZeroExt8to64 x)
+ // result: (I64And x (I64Const [0xff]))
+ for {
+ x := v_0
+ v.reset(OpWasmI64And)
+ v0 := b.NewValue0(v.Pos, OpWasmI64Const, typ.Int64)
+ v0.AuxInt = int64ToAuxInt(0xff)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteBlockWasm(b *Block) bool {
+ return false
+}
diff --git a/src/cmd/compile/internal/ssa/rewrite_test.go b/src/cmd/compile/internal/ssa/rewrite_test.go
new file mode 100644
index 0000000..357fe11
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/rewrite_test.go
@@ -0,0 +1,220 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import "testing"
+
+// We generate memmove for copy(x[1:], x[:]), however we may change it to OpMove,
+// because size is known. Check that OpMove is alias-safe, or we did call memmove.
+func TestMove(t *testing.T) {
+ x := [...]byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40}
+ copy(x[1:], x[:])
+ for i := 1; i < len(x); i++ {
+ if int(x[i]) != i {
+ t.Errorf("Memmove got converted to OpMove in alias-unsafe way. Got %d instead of %d in position %d", int(x[i]), i, i+1)
+ }
+ }
+}
+
+func TestMoveSmall(t *testing.T) {
+ x := [...]byte{1, 2, 3, 4, 5, 6, 7}
+ copy(x[1:], x[:])
+ for i := 1; i < len(x); i++ {
+ if int(x[i]) != i {
+ t.Errorf("Memmove got converted to OpMove in alias-unsafe way. Got %d instead of %d in position %d", int(x[i]), i, i+1)
+ }
+ }
+}
+
+func TestSubFlags(t *testing.T) {
+ if !subFlags32(0, 1).lt() {
+ t.Errorf("subFlags32(0,1).lt() returned false")
+ }
+ if !subFlags32(0, 1).ult() {
+ t.Errorf("subFlags32(0,1).ult() returned false")
+ }
+}
+
+func TestIsPPC64WordRotateMask(t *testing.T) {
+ tests := []struct {
+ input int64
+ expected bool
+ }{
+ {0x00000001, true},
+ {0x80000001, true},
+ {0x80010001, false},
+ {0xFFFFFFFA, false},
+ {0xF0F0F0F0, false},
+ {0xFFFFFFFD, true},
+ {0x80000000, true},
+ {0x00000000, false},
+ {0xFFFFFFFF, true},
+ {0x0000FFFF, true},
+ {0xFF0000FF, true},
+ {0x00FFFF00, true},
+ }
+
+ for _, v := range tests {
+ if v.expected != isPPC64WordRotateMask(v.input) {
+ t.Errorf("isPPC64WordRotateMask(0x%x) failed", v.input)
+ }
+ }
+}
+
+func TestEncodeDecodePPC64WordRotateMask(t *testing.T) {
+ tests := []struct {
+ rotate int64
+ mask uint64
+ nbits,
+ mb,
+ me,
+ encoded int64
+ }{
+ {1, 0x00000001, 32, 31, 31, 0x20011f20},
+ {2, 0x80000001, 32, 31, 0, 0x20021f01},
+ {3, 0xFFFFFFFD, 32, 31, 29, 0x20031f1e},
+ {4, 0x80000000, 32, 0, 0, 0x20040001},
+ {5, 0xFFFFFFFF, 32, 0, 31, 0x20050020},
+ {6, 0x0000FFFF, 32, 16, 31, 0x20061020},
+ {7, 0xFF0000FF, 32, 24, 7, 0x20071808},
+ {8, 0x00FFFF00, 32, 8, 23, 0x20080818},
+
+ {9, 0x0000000000FFFF00, 64, 40, 55, 0x40092838},
+ {10, 0xFFFF000000000000, 64, 0, 15, 0x400A0010},
+ {10, 0xFFFF000000000001, 64, 63, 15, 0x400A3f10},
+ }
+
+ for i, v := range tests {
+ result := encodePPC64RotateMask(v.rotate, int64(v.mask), v.nbits)
+ if result != v.encoded {
+ t.Errorf("encodePPC64RotateMask(%d,0x%x,%d) = 0x%x, expected 0x%x", v.rotate, v.mask, v.nbits, result, v.encoded)
+ }
+ rotate, mb, me, mask := DecodePPC64RotateMask(result)
+ if rotate != v.rotate || mb != v.mb || me != v.me || mask != v.mask {
+ t.Errorf("DecodePPC64Failure(Test %d) got (%d, %d, %d, %x) expected (%d, %d, %d, %x)", i, rotate, mb, me, mask, v.rotate, v.mb, v.me, v.mask)
+ }
+ }
+}
+
+func TestMergePPC64ClrlsldiSrw(t *testing.T) {
+ tests := []struct {
+ clrlsldi int32
+ srw int64
+ valid bool
+ rotate int64
+ mask uint64
+ }{
+ // ((x>>4)&0xFF)<<4
+ {newPPC64ShiftAuxInt(4, 56, 63, 64), 4, true, 0, 0xFF0},
+ // ((x>>4)&0xFFFF)<<4
+ {newPPC64ShiftAuxInt(4, 48, 63, 64), 4, true, 0, 0xFFFF0},
+ // ((x>>4)&0xFFFF)<<17
+ {newPPC64ShiftAuxInt(17, 48, 63, 64), 4, false, 0, 0},
+ // ((x>>4)&0xFFFF)<<16
+ {newPPC64ShiftAuxInt(16, 48, 63, 64), 4, true, 12, 0xFFFF0000},
+ // ((x>>32)&0xFFFF)<<17
+ {newPPC64ShiftAuxInt(17, 48, 63, 64), 32, false, 0, 0},
+ }
+ for i, v := range tests {
+ result := mergePPC64ClrlsldiSrw(int64(v.clrlsldi), v.srw)
+ if v.valid && result == 0 {
+ t.Errorf("mergePPC64ClrlsldiSrw(Test %d) did not merge", i)
+ } else if !v.valid && result != 0 {
+ t.Errorf("mergePPC64ClrlsldiSrw(Test %d) should return 0", i)
+ } else if r, _, _, m := DecodePPC64RotateMask(result); v.rotate != r || v.mask != m {
+ t.Errorf("mergePPC64ClrlsldiSrw(Test %d) got (%d,0x%x) expected (%d,0x%x)", i, r, m, v.rotate, v.mask)
+ }
+ }
+}
+
+func TestMergePPC64ClrlsldiRlwinm(t *testing.T) {
+ tests := []struct {
+ clrlsldi int32
+ rlwinm int64
+ valid bool
+ rotate int64
+ mask uint64
+ }{
+ // ((x<<4)&0xFF00)<<4
+ {newPPC64ShiftAuxInt(4, 56, 63, 64), encodePPC64RotateMask(4, 0xFF00, 32), false, 0, 0},
+ // ((x>>4)&0xFF)<<4
+ {newPPC64ShiftAuxInt(4, 56, 63, 64), encodePPC64RotateMask(28, 0x0FFFFFFF, 32), true, 0, 0xFF0},
+ // ((x>>4)&0xFFFF)<<4
+ {newPPC64ShiftAuxInt(4, 48, 63, 64), encodePPC64RotateMask(28, 0xFFFF, 32), true, 0, 0xFFFF0},
+ // ((x>>4)&0xFFFF)<<17
+ {newPPC64ShiftAuxInt(17, 48, 63, 64), encodePPC64RotateMask(28, 0xFFFF, 32), false, 0, 0},
+ // ((x>>4)&0xFFFF)<<16
+ {newPPC64ShiftAuxInt(16, 48, 63, 64), encodePPC64RotateMask(28, 0xFFFF, 32), true, 12, 0xFFFF0000},
+ // ((x>>4)&0xF000FFFF)<<16
+ {newPPC64ShiftAuxInt(16, 48, 63, 64), encodePPC64RotateMask(28, 0xF000FFFF, 32), true, 12, 0xFFFF0000},
+ }
+ for i, v := range tests {
+ result := mergePPC64ClrlsldiRlwinm(v.clrlsldi, v.rlwinm)
+ if v.valid && result == 0 {
+ t.Errorf("mergePPC64ClrlsldiRlwinm(Test %d) did not merge", i)
+ } else if !v.valid && result != 0 {
+ t.Errorf("mergePPC64ClrlsldiRlwinm(Test %d) should return 0", i)
+ } else if r, _, _, m := DecodePPC64RotateMask(result); v.rotate != r || v.mask != m {
+ t.Errorf("mergePPC64ClrlsldiRlwinm(Test %d) got (%d,0x%x) expected (%d,0x%x)", i, r, m, v.rotate, v.mask)
+ }
+ }
+}
+
+func TestMergePPC64SldiSrw(t *testing.T) {
+ tests := []struct {
+ sld int64
+ srw int64
+ valid bool
+ rotate int64
+ mask uint64
+ }{
+ {4, 4, true, 0, 0xFFFFFFF0},
+ {4, 8, true, 28, 0x0FFFFFF0},
+ {0, 0, true, 0, 0xFFFFFFFF},
+ {8, 4, false, 0, 0},
+ {0, 32, false, 0, 0},
+ {0, 31, true, 1, 0x1},
+ {31, 31, true, 0, 0x80000000},
+ {32, 32, false, 0, 0},
+ }
+ for i, v := range tests {
+ result := mergePPC64SldiSrw(v.sld, v.srw)
+ if v.valid && result == 0 {
+ t.Errorf("mergePPC64SldiSrw(Test %d) did not merge", i)
+ } else if !v.valid && result != 0 {
+ t.Errorf("mergePPC64SldiSrw(Test %d) should return 0", i)
+ } else if r, _, _, m := DecodePPC64RotateMask(result); v.rotate != r || v.mask != m {
+ t.Errorf("mergePPC64SldiSrw(Test %d) got (%d,0x%x) expected (%d,0x%x)", i, r, m, v.rotate, v.mask)
+ }
+ }
+}
+
+func TestMergePPC64AndSrwi(t *testing.T) {
+ tests := []struct {
+ and int64
+ srw int64
+ valid bool
+ rotate int64
+ mask uint64
+ }{
+ {0x000000FF, 8, true, 24, 0xFF},
+ {0xF00000FF, 8, true, 24, 0xFF},
+ {0x0F0000FF, 4, false, 0, 0},
+ {0x00000000, 4, false, 0, 0},
+ {0xF0000000, 4, false, 0, 0},
+ {0xF0000000, 32, false, 0, 0},
+ {0xFFFFFFFF, 0, true, 0, 0xFFFFFFFF},
+ }
+ for i, v := range tests {
+ result := mergePPC64AndSrwi(v.and, v.srw)
+ if v.valid && result == 0 {
+ t.Errorf("mergePPC64AndSrwi(Test %d) did not merge", i)
+ } else if !v.valid && result != 0 {
+ t.Errorf("mergePPC64AndSrwi(Test %d) should return 0", i)
+ } else if r, _, _, m := DecodePPC64RotateMask(result); v.rotate != r || v.mask != m {
+ t.Errorf("mergePPC64AndSrwi(Test %d) got (%d,0x%x) expected (%d,0x%x)", i, r, m, v.rotate, v.mask)
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/rewritedec.go b/src/cmd/compile/internal/ssa/rewritedec.go
new file mode 100644
index 0000000..2a73a5d
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/rewritedec.go
@@ -0,0 +1,429 @@
+// Code generated from gen/dec.rules; DO NOT EDIT.
+// generated with: cd gen; go run *.go
+
+package ssa
+
+import "cmd/compile/internal/types"
+
+func rewriteValuedec(v *Value) bool {
+ switch v.Op {
+ case OpComplexImag:
+ return rewriteValuedec_OpComplexImag(v)
+ case OpComplexReal:
+ return rewriteValuedec_OpComplexReal(v)
+ case OpIData:
+ return rewriteValuedec_OpIData(v)
+ case OpITab:
+ return rewriteValuedec_OpITab(v)
+ case OpLoad:
+ return rewriteValuedec_OpLoad(v)
+ case OpSliceCap:
+ return rewriteValuedec_OpSliceCap(v)
+ case OpSliceLen:
+ return rewriteValuedec_OpSliceLen(v)
+ case OpSlicePtr:
+ return rewriteValuedec_OpSlicePtr(v)
+ case OpSlicePtrUnchecked:
+ return rewriteValuedec_OpSlicePtrUnchecked(v)
+ case OpStore:
+ return rewriteValuedec_OpStore(v)
+ case OpStringLen:
+ return rewriteValuedec_OpStringLen(v)
+ case OpStringPtr:
+ return rewriteValuedec_OpStringPtr(v)
+ }
+ return false
+}
+func rewriteValuedec_OpComplexImag(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ComplexImag (ComplexMake _ imag ))
+ // result: imag
+ for {
+ if v_0.Op != OpComplexMake {
+ break
+ }
+ imag := v_0.Args[1]
+ v.copyOf(imag)
+ return true
+ }
+ return false
+}
+func rewriteValuedec_OpComplexReal(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ComplexReal (ComplexMake real _ ))
+ // result: real
+ for {
+ if v_0.Op != OpComplexMake {
+ break
+ }
+ real := v_0.Args[0]
+ v.copyOf(real)
+ return true
+ }
+ return false
+}
+func rewriteValuedec_OpIData(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (IData (IMake _ data))
+ // result: data
+ for {
+ if v_0.Op != OpIMake {
+ break
+ }
+ data := v_0.Args[1]
+ v.copyOf(data)
+ return true
+ }
+ return false
+}
+func rewriteValuedec_OpITab(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ITab (IMake itab _))
+ // result: itab
+ for {
+ if v_0.Op != OpIMake {
+ break
+ }
+ itab := v_0.Args[0]
+ v.copyOf(itab)
+ return true
+ }
+ return false
+}
+func rewriteValuedec_OpLoad(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (Load <t> ptr mem)
+ // cond: t.IsComplex() && t.Size() == 8
+ // result: (ComplexMake (Load <typ.Float32> ptr mem) (Load <typ.Float32> (OffPtr <typ.Float32Ptr> [4] ptr) mem) )
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(t.IsComplex() && t.Size() == 8) {
+ break
+ }
+ v.reset(OpComplexMake)
+ v0 := b.NewValue0(v.Pos, OpLoad, typ.Float32)
+ v0.AddArg2(ptr, mem)
+ v1 := b.NewValue0(v.Pos, OpLoad, typ.Float32)
+ v2 := b.NewValue0(v.Pos, OpOffPtr, typ.Float32Ptr)
+ v2.AuxInt = int64ToAuxInt(4)
+ v2.AddArg(ptr)
+ v1.AddArg2(v2, mem)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: t.IsComplex() && t.Size() == 16
+ // result: (ComplexMake (Load <typ.Float64> ptr mem) (Load <typ.Float64> (OffPtr <typ.Float64Ptr> [8] ptr) mem) )
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(t.IsComplex() && t.Size() == 16) {
+ break
+ }
+ v.reset(OpComplexMake)
+ v0 := b.NewValue0(v.Pos, OpLoad, typ.Float64)
+ v0.AddArg2(ptr, mem)
+ v1 := b.NewValue0(v.Pos, OpLoad, typ.Float64)
+ v2 := b.NewValue0(v.Pos, OpOffPtr, typ.Float64Ptr)
+ v2.AuxInt = int64ToAuxInt(8)
+ v2.AddArg(ptr)
+ v1.AddArg2(v2, mem)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: t.IsString()
+ // result: (StringMake (Load <typ.BytePtr> ptr mem) (Load <typ.Int> (OffPtr <typ.IntPtr> [config.PtrSize] ptr) mem))
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(t.IsString()) {
+ break
+ }
+ v.reset(OpStringMake)
+ v0 := b.NewValue0(v.Pos, OpLoad, typ.BytePtr)
+ v0.AddArg2(ptr, mem)
+ v1 := b.NewValue0(v.Pos, OpLoad, typ.Int)
+ v2 := b.NewValue0(v.Pos, OpOffPtr, typ.IntPtr)
+ v2.AuxInt = int64ToAuxInt(config.PtrSize)
+ v2.AddArg(ptr)
+ v1.AddArg2(v2, mem)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: t.IsSlice()
+ // result: (SliceMake (Load <t.Elem().PtrTo()> ptr mem) (Load <typ.Int> (OffPtr <typ.IntPtr> [config.PtrSize] ptr) mem) (Load <typ.Int> (OffPtr <typ.IntPtr> [2*config.PtrSize] ptr) mem))
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(t.IsSlice()) {
+ break
+ }
+ v.reset(OpSliceMake)
+ v0 := b.NewValue0(v.Pos, OpLoad, t.Elem().PtrTo())
+ v0.AddArg2(ptr, mem)
+ v1 := b.NewValue0(v.Pos, OpLoad, typ.Int)
+ v2 := b.NewValue0(v.Pos, OpOffPtr, typ.IntPtr)
+ v2.AuxInt = int64ToAuxInt(config.PtrSize)
+ v2.AddArg(ptr)
+ v1.AddArg2(v2, mem)
+ v3 := b.NewValue0(v.Pos, OpLoad, typ.Int)
+ v4 := b.NewValue0(v.Pos, OpOffPtr, typ.IntPtr)
+ v4.AuxInt = int64ToAuxInt(2 * config.PtrSize)
+ v4.AddArg(ptr)
+ v3.AddArg2(v4, mem)
+ v.AddArg3(v0, v1, v3)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: t.IsInterface()
+ // result: (IMake (Load <typ.Uintptr> ptr mem) (Load <typ.BytePtr> (OffPtr <typ.BytePtrPtr> [config.PtrSize] ptr) mem))
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(t.IsInterface()) {
+ break
+ }
+ v.reset(OpIMake)
+ v0 := b.NewValue0(v.Pos, OpLoad, typ.Uintptr)
+ v0.AddArg2(ptr, mem)
+ v1 := b.NewValue0(v.Pos, OpLoad, typ.BytePtr)
+ v2 := b.NewValue0(v.Pos, OpOffPtr, typ.BytePtrPtr)
+ v2.AuxInt = int64ToAuxInt(config.PtrSize)
+ v2.AddArg(ptr)
+ v1.AddArg2(v2, mem)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ return false
+}
+func rewriteValuedec_OpSliceCap(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SliceCap (SliceMake _ _ cap))
+ // result: cap
+ for {
+ if v_0.Op != OpSliceMake {
+ break
+ }
+ cap := v_0.Args[2]
+ v.copyOf(cap)
+ return true
+ }
+ return false
+}
+func rewriteValuedec_OpSliceLen(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SliceLen (SliceMake _ len _))
+ // result: len
+ for {
+ if v_0.Op != OpSliceMake {
+ break
+ }
+ len := v_0.Args[1]
+ v.copyOf(len)
+ return true
+ }
+ return false
+}
+func rewriteValuedec_OpSlicePtr(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SlicePtr (SliceMake ptr _ _ ))
+ // result: ptr
+ for {
+ if v_0.Op != OpSliceMake {
+ break
+ }
+ ptr := v_0.Args[0]
+ v.copyOf(ptr)
+ return true
+ }
+ return false
+}
+func rewriteValuedec_OpSlicePtrUnchecked(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SlicePtrUnchecked (SliceMake ptr _ _ ))
+ // result: ptr
+ for {
+ if v_0.Op != OpSliceMake {
+ break
+ }
+ ptr := v_0.Args[0]
+ v.copyOf(ptr)
+ return true
+ }
+ return false
+}
+func rewriteValuedec_OpStore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (Store {t} dst (ComplexMake real imag) mem)
+ // cond: t.Size() == 8
+ // result: (Store {typ.Float32} (OffPtr <typ.Float32Ptr> [4] dst) imag (Store {typ.Float32} dst real mem))
+ for {
+ t := auxToType(v.Aux)
+ dst := v_0
+ if v_1.Op != OpComplexMake {
+ break
+ }
+ imag := v_1.Args[1]
+ real := v_1.Args[0]
+ mem := v_2
+ if !(t.Size() == 8) {
+ break
+ }
+ v.reset(OpStore)
+ v.Aux = typeToAux(typ.Float32)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, typ.Float32Ptr)
+ v0.AuxInt = int64ToAuxInt(4)
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v1.Aux = typeToAux(typ.Float32)
+ v1.AddArg3(dst, real, mem)
+ v.AddArg3(v0, imag, v1)
+ return true
+ }
+ // match: (Store {t} dst (ComplexMake real imag) mem)
+ // cond: t.Size() == 16
+ // result: (Store {typ.Float64} (OffPtr <typ.Float64Ptr> [8] dst) imag (Store {typ.Float64} dst real mem))
+ for {
+ t := auxToType(v.Aux)
+ dst := v_0
+ if v_1.Op != OpComplexMake {
+ break
+ }
+ imag := v_1.Args[1]
+ real := v_1.Args[0]
+ mem := v_2
+ if !(t.Size() == 16) {
+ break
+ }
+ v.reset(OpStore)
+ v.Aux = typeToAux(typ.Float64)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, typ.Float64Ptr)
+ v0.AuxInt = int64ToAuxInt(8)
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v1.Aux = typeToAux(typ.Float64)
+ v1.AddArg3(dst, real, mem)
+ v.AddArg3(v0, imag, v1)
+ return true
+ }
+ // match: (Store dst (StringMake ptr len) mem)
+ // result: (Store {typ.Int} (OffPtr <typ.IntPtr> [config.PtrSize] dst) len (Store {typ.BytePtr} dst ptr mem))
+ for {
+ dst := v_0
+ if v_1.Op != OpStringMake {
+ break
+ }
+ len := v_1.Args[1]
+ ptr := v_1.Args[0]
+ mem := v_2
+ v.reset(OpStore)
+ v.Aux = typeToAux(typ.Int)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, typ.IntPtr)
+ v0.AuxInt = int64ToAuxInt(config.PtrSize)
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v1.Aux = typeToAux(typ.BytePtr)
+ v1.AddArg3(dst, ptr, mem)
+ v.AddArg3(v0, len, v1)
+ return true
+ }
+ // match: (Store {t} dst (SliceMake ptr len cap) mem)
+ // result: (Store {typ.Int} (OffPtr <typ.IntPtr> [2*config.PtrSize] dst) cap (Store {typ.Int} (OffPtr <typ.IntPtr> [config.PtrSize] dst) len (Store {t.Elem().PtrTo()} dst ptr mem)))
+ for {
+ t := auxToType(v.Aux)
+ dst := v_0
+ if v_1.Op != OpSliceMake {
+ break
+ }
+ cap := v_1.Args[2]
+ ptr := v_1.Args[0]
+ len := v_1.Args[1]
+ mem := v_2
+ v.reset(OpStore)
+ v.Aux = typeToAux(typ.Int)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, typ.IntPtr)
+ v0.AuxInt = int64ToAuxInt(2 * config.PtrSize)
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v1.Aux = typeToAux(typ.Int)
+ v2 := b.NewValue0(v.Pos, OpOffPtr, typ.IntPtr)
+ v2.AuxInt = int64ToAuxInt(config.PtrSize)
+ v2.AddArg(dst)
+ v3 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v3.Aux = typeToAux(t.Elem().PtrTo())
+ v3.AddArg3(dst, ptr, mem)
+ v1.AddArg3(v2, len, v3)
+ v.AddArg3(v0, cap, v1)
+ return true
+ }
+ // match: (Store dst (IMake itab data) mem)
+ // result: (Store {typ.BytePtr} (OffPtr <typ.BytePtrPtr> [config.PtrSize] dst) data (Store {typ.Uintptr} dst itab mem))
+ for {
+ dst := v_0
+ if v_1.Op != OpIMake {
+ break
+ }
+ data := v_1.Args[1]
+ itab := v_1.Args[0]
+ mem := v_2
+ v.reset(OpStore)
+ v.Aux = typeToAux(typ.BytePtr)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, typ.BytePtrPtr)
+ v0.AuxInt = int64ToAuxInt(config.PtrSize)
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v1.Aux = typeToAux(typ.Uintptr)
+ v1.AddArg3(dst, itab, mem)
+ v.AddArg3(v0, data, v1)
+ return true
+ }
+ return false
+}
+func rewriteValuedec_OpStringLen(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (StringLen (StringMake _ len))
+ // result: len
+ for {
+ if v_0.Op != OpStringMake {
+ break
+ }
+ len := v_0.Args[1]
+ v.copyOf(len)
+ return true
+ }
+ return false
+}
+func rewriteValuedec_OpStringPtr(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (StringPtr (StringMake ptr _))
+ // result: ptr
+ for {
+ if v_0.Op != OpStringMake {
+ break
+ }
+ ptr := v_0.Args[0]
+ v.copyOf(ptr)
+ return true
+ }
+ return false
+}
+func rewriteBlockdec(b *Block) bool {
+ return false
+}
diff --git a/src/cmd/compile/internal/ssa/rewritedec64.go b/src/cmd/compile/internal/ssa/rewritedec64.go
new file mode 100644
index 0000000..7d9656a
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/rewritedec64.go
@@ -0,0 +1,2462 @@
+// Code generated from gen/dec64.rules; DO NOT EDIT.
+// generated with: cd gen; go run *.go
+
+package ssa
+
+import "cmd/compile/internal/types"
+
+func rewriteValuedec64(v *Value) bool {
+ switch v.Op {
+ case OpAdd64:
+ return rewriteValuedec64_OpAdd64(v)
+ case OpAnd64:
+ return rewriteValuedec64_OpAnd64(v)
+ case OpArg:
+ return rewriteValuedec64_OpArg(v)
+ case OpBitLen64:
+ return rewriteValuedec64_OpBitLen64(v)
+ case OpBswap64:
+ return rewriteValuedec64_OpBswap64(v)
+ case OpCom64:
+ return rewriteValuedec64_OpCom64(v)
+ case OpConst64:
+ return rewriteValuedec64_OpConst64(v)
+ case OpCtz64:
+ return rewriteValuedec64_OpCtz64(v)
+ case OpCtz64NonZero:
+ v.Op = OpCtz64
+ return true
+ case OpEq64:
+ return rewriteValuedec64_OpEq64(v)
+ case OpInt64Hi:
+ return rewriteValuedec64_OpInt64Hi(v)
+ case OpInt64Lo:
+ return rewriteValuedec64_OpInt64Lo(v)
+ case OpLeq64:
+ return rewriteValuedec64_OpLeq64(v)
+ case OpLeq64U:
+ return rewriteValuedec64_OpLeq64U(v)
+ case OpLess64:
+ return rewriteValuedec64_OpLess64(v)
+ case OpLess64U:
+ return rewriteValuedec64_OpLess64U(v)
+ case OpLoad:
+ return rewriteValuedec64_OpLoad(v)
+ case OpLsh16x64:
+ return rewriteValuedec64_OpLsh16x64(v)
+ case OpLsh32x64:
+ return rewriteValuedec64_OpLsh32x64(v)
+ case OpLsh64x16:
+ return rewriteValuedec64_OpLsh64x16(v)
+ case OpLsh64x32:
+ return rewriteValuedec64_OpLsh64x32(v)
+ case OpLsh64x64:
+ return rewriteValuedec64_OpLsh64x64(v)
+ case OpLsh64x8:
+ return rewriteValuedec64_OpLsh64x8(v)
+ case OpLsh8x64:
+ return rewriteValuedec64_OpLsh8x64(v)
+ case OpMul64:
+ return rewriteValuedec64_OpMul64(v)
+ case OpNeg64:
+ return rewriteValuedec64_OpNeg64(v)
+ case OpNeq64:
+ return rewriteValuedec64_OpNeq64(v)
+ case OpOr32:
+ return rewriteValuedec64_OpOr32(v)
+ case OpOr64:
+ return rewriteValuedec64_OpOr64(v)
+ case OpRsh16Ux64:
+ return rewriteValuedec64_OpRsh16Ux64(v)
+ case OpRsh16x64:
+ return rewriteValuedec64_OpRsh16x64(v)
+ case OpRsh32Ux64:
+ return rewriteValuedec64_OpRsh32Ux64(v)
+ case OpRsh32x64:
+ return rewriteValuedec64_OpRsh32x64(v)
+ case OpRsh64Ux16:
+ return rewriteValuedec64_OpRsh64Ux16(v)
+ case OpRsh64Ux32:
+ return rewriteValuedec64_OpRsh64Ux32(v)
+ case OpRsh64Ux64:
+ return rewriteValuedec64_OpRsh64Ux64(v)
+ case OpRsh64Ux8:
+ return rewriteValuedec64_OpRsh64Ux8(v)
+ case OpRsh64x16:
+ return rewriteValuedec64_OpRsh64x16(v)
+ case OpRsh64x32:
+ return rewriteValuedec64_OpRsh64x32(v)
+ case OpRsh64x64:
+ return rewriteValuedec64_OpRsh64x64(v)
+ case OpRsh64x8:
+ return rewriteValuedec64_OpRsh64x8(v)
+ case OpRsh8Ux64:
+ return rewriteValuedec64_OpRsh8Ux64(v)
+ case OpRsh8x64:
+ return rewriteValuedec64_OpRsh8x64(v)
+ case OpSignExt16to64:
+ return rewriteValuedec64_OpSignExt16to64(v)
+ case OpSignExt32to64:
+ return rewriteValuedec64_OpSignExt32to64(v)
+ case OpSignExt8to64:
+ return rewriteValuedec64_OpSignExt8to64(v)
+ case OpStore:
+ return rewriteValuedec64_OpStore(v)
+ case OpSub64:
+ return rewriteValuedec64_OpSub64(v)
+ case OpTrunc64to16:
+ return rewriteValuedec64_OpTrunc64to16(v)
+ case OpTrunc64to32:
+ return rewriteValuedec64_OpTrunc64to32(v)
+ case OpTrunc64to8:
+ return rewriteValuedec64_OpTrunc64to8(v)
+ case OpXor64:
+ return rewriteValuedec64_OpXor64(v)
+ case OpZeroExt16to64:
+ return rewriteValuedec64_OpZeroExt16to64(v)
+ case OpZeroExt32to64:
+ return rewriteValuedec64_OpZeroExt32to64(v)
+ case OpZeroExt8to64:
+ return rewriteValuedec64_OpZeroExt8to64(v)
+ }
+ return false
+}
+func rewriteValuedec64_OpAdd64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Add64 x y)
+ // result: (Int64Make (Add32withcarry <typ.Int32> (Int64Hi x) (Int64Hi y) (Select1 <types.TypeFlags> (Add32carry (Int64Lo x) (Int64Lo y)))) (Select0 <typ.UInt32> (Add32carry (Int64Lo x) (Int64Lo y))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Pos, OpAdd32withcarry, typ.Int32)
+ v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v2.AddArg(y)
+ v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v4 := b.NewValue0(v.Pos, OpAdd32carry, types.NewTuple(typ.UInt32, types.TypeFlags))
+ v5 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v5.AddArg(x)
+ v6 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v6.AddArg(y)
+ v4.AddArg2(v5, v6)
+ v3.AddArg(v4)
+ v0.AddArg3(v1, v2, v3)
+ v7 := b.NewValue0(v.Pos, OpSelect0, typ.UInt32)
+ v7.AddArg(v4)
+ v.AddArg2(v0, v7)
+ return true
+ }
+}
+func rewriteValuedec64_OpAnd64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (And64 x y)
+ // result: (Int64Make (And32 <typ.UInt32> (Int64Hi x) (Int64Hi y)) (And32 <typ.UInt32> (Int64Lo x) (Int64Lo y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Pos, OpAnd32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpAnd32, typ.UInt32)
+ v4 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v4.AddArg(x)
+ v5 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v5.AddArg(y)
+ v3.AddArg2(v4, v5)
+ v.AddArg2(v0, v3)
+ return true
+ }
+}
+func rewriteValuedec64_OpArg(v *Value) bool {
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (Arg {n} [off])
+ // cond: is64BitInt(v.Type) && !config.BigEndian && v.Type.IsSigned() && !(b.Func.pass.name == "decompose builtin")
+ // result: (Int64Make (Arg <typ.Int32> {n} [off+4]) (Arg <typ.UInt32> {n} [off]))
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ n := auxToSym(v.Aux)
+ if !(is64BitInt(v.Type) && !config.BigEndian && v.Type.IsSigned() && !(b.Func.pass.name == "decompose builtin")) {
+ break
+ }
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Pos, OpArg, typ.Int32)
+ v0.AuxInt = int32ToAuxInt(off + 4)
+ v0.Aux = symToAux(n)
+ v1 := b.NewValue0(v.Pos, OpArg, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(off)
+ v1.Aux = symToAux(n)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Arg {n} [off])
+ // cond: is64BitInt(v.Type) && !config.BigEndian && !v.Type.IsSigned() && !(b.Func.pass.name == "decompose builtin")
+ // result: (Int64Make (Arg <typ.UInt32> {n} [off+4]) (Arg <typ.UInt32> {n} [off]))
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ n := auxToSym(v.Aux)
+ if !(is64BitInt(v.Type) && !config.BigEndian && !v.Type.IsSigned() && !(b.Func.pass.name == "decompose builtin")) {
+ break
+ }
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Pos, OpArg, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(off + 4)
+ v0.Aux = symToAux(n)
+ v1 := b.NewValue0(v.Pos, OpArg, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(off)
+ v1.Aux = symToAux(n)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Arg {n} [off])
+ // cond: is64BitInt(v.Type) && config.BigEndian && v.Type.IsSigned() && !(b.Func.pass.name == "decompose builtin")
+ // result: (Int64Make (Arg <typ.Int32> {n} [off]) (Arg <typ.UInt32> {n} [off+4]))
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ n := auxToSym(v.Aux)
+ if !(is64BitInt(v.Type) && config.BigEndian && v.Type.IsSigned() && !(b.Func.pass.name == "decompose builtin")) {
+ break
+ }
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Pos, OpArg, typ.Int32)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(n)
+ v1 := b.NewValue0(v.Pos, OpArg, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(off + 4)
+ v1.Aux = symToAux(n)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Arg {n} [off])
+ // cond: is64BitInt(v.Type) && config.BigEndian && !v.Type.IsSigned() && !(b.Func.pass.name == "decompose builtin")
+ // result: (Int64Make (Arg <typ.UInt32> {n} [off]) (Arg <typ.UInt32> {n} [off+4]))
+ for {
+ off := auxIntToInt32(v.AuxInt)
+ n := auxToSym(v.Aux)
+ if !(is64BitInt(v.Type) && config.BigEndian && !v.Type.IsSigned() && !(b.Func.pass.name == "decompose builtin")) {
+ break
+ }
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Pos, OpArg, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(off)
+ v0.Aux = symToAux(n)
+ v1 := b.NewValue0(v.Pos, OpArg, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(off + 4)
+ v1.Aux = symToAux(n)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ return false
+}
+func rewriteValuedec64_OpBitLen64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (BitLen64 x)
+ // result: (Add32 <typ.Int> (BitLen32 <typ.Int> (Int64Hi x)) (BitLen32 <typ.Int> (Or32 <typ.UInt32> (Int64Lo x) (Zeromask (Int64Hi x)))))
+ for {
+ x := v_0
+ v.reset(OpAdd32)
+ v.Type = typ.Int
+ v0 := b.NewValue0(v.Pos, OpBitLen32, typ.Int)
+ v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Pos, OpBitLen32, typ.Int)
+ v3 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v4 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v4.AddArg(x)
+ v5 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v5.AddArg(v1)
+ v3.AddArg2(v4, v5)
+ v2.AddArg(v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+}
+func rewriteValuedec64_OpBswap64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Bswap64 x)
+ // result: (Int64Make (Bswap32 <typ.UInt32> (Int64Lo x)) (Bswap32 <typ.UInt32> (Int64Hi x)))
+ for {
+ x := v_0
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Pos, OpBswap32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Pos, OpBswap32, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v3.AddArg(x)
+ v2.AddArg(v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+}
+func rewriteValuedec64_OpCom64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Com64 x)
+ // result: (Int64Make (Com32 <typ.UInt32> (Int64Hi x)) (Com32 <typ.UInt32> (Int64Lo x)))
+ for {
+ x := v_0
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Pos, OpCom32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Pos, OpCom32, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v3.AddArg(x)
+ v2.AddArg(v3)
+ v.AddArg2(v0, v2)
+ return true
+ }
+}
+func rewriteValuedec64_OpConst64(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Const64 <t> [c])
+ // cond: t.IsSigned()
+ // result: (Int64Make (Const32 <typ.Int32> [int32(c>>32)]) (Const32 <typ.UInt32> [int32(c)]))
+ for {
+ t := v.Type
+ c := auxIntToInt64(v.AuxInt)
+ if !(t.IsSigned()) {
+ break
+ }
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Pos, OpConst32, typ.Int32)
+ v0.AuxInt = int32ToAuxInt(int32(c >> 32))
+ v1 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Const64 <t> [c])
+ // cond: !t.IsSigned()
+ // result: (Int64Make (Const32 <typ.UInt32> [int32(c>>32)]) (Const32 <typ.UInt32> [int32(c)]))
+ for {
+ t := v.Type
+ c := auxIntToInt64(v.AuxInt)
+ if !(!t.IsSigned()) {
+ break
+ }
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(int32(c >> 32))
+ v1 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(int32(c))
+ v.AddArg2(v0, v1)
+ return true
+ }
+ return false
+}
+func rewriteValuedec64_OpCtz64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Ctz64 x)
+ // result: (Add32 <typ.UInt32> (Ctz32 <typ.UInt32> (Int64Lo x)) (And32 <typ.UInt32> (Com32 <typ.UInt32> (Zeromask (Int64Lo x))) (Ctz32 <typ.UInt32> (Int64Hi x))))
+ for {
+ x := v_0
+ v.reset(OpAdd32)
+ v.Type = typ.UInt32
+ v0 := b.NewValue0(v.Pos, OpCtz32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Pos, OpAnd32, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpCom32, typ.UInt32)
+ v4 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v4.AddArg(v1)
+ v3.AddArg(v4)
+ v5 := b.NewValue0(v.Pos, OpCtz32, typ.UInt32)
+ v6 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v6.AddArg(x)
+ v5.AddArg(v6)
+ v2.AddArg2(v3, v5)
+ v.AddArg2(v0, v2)
+ return true
+ }
+}
+func rewriteValuedec64_OpEq64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Eq64 x y)
+ // result: (AndB (Eq32 (Int64Hi x) (Int64Hi y)) (Eq32 (Int64Lo x) (Int64Lo y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpAndB)
+ v0 := b.NewValue0(v.Pos, OpEq32, typ.Bool)
+ v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpEq32, typ.Bool)
+ v4 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v4.AddArg(x)
+ v5 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v5.AddArg(y)
+ v3.AddArg2(v4, v5)
+ v.AddArg2(v0, v3)
+ return true
+ }
+}
+func rewriteValuedec64_OpInt64Hi(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Int64Hi (Int64Make hi _))
+ // result: hi
+ for {
+ if v_0.Op != OpInt64Make {
+ break
+ }
+ hi := v_0.Args[0]
+ v.copyOf(hi)
+ return true
+ }
+ return false
+}
+func rewriteValuedec64_OpInt64Lo(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Int64Lo (Int64Make _ lo))
+ // result: lo
+ for {
+ if v_0.Op != OpInt64Make {
+ break
+ }
+ lo := v_0.Args[1]
+ v.copyOf(lo)
+ return true
+ }
+ return false
+}
+func rewriteValuedec64_OpLeq64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq64 x y)
+ // result: (OrB (Less32 (Int64Hi x) (Int64Hi y)) (AndB (Eq32 (Int64Hi x) (Int64Hi y)) (Leq32U (Int64Lo x) (Int64Lo y))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpOrB)
+ v0 := b.NewValue0(v.Pos, OpLess32, typ.Bool)
+ v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpAndB, typ.Bool)
+ v4 := b.NewValue0(v.Pos, OpEq32, typ.Bool)
+ v4.AddArg2(v1, v2)
+ v5 := b.NewValue0(v.Pos, OpLeq32U, typ.Bool)
+ v6 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v6.AddArg(x)
+ v7 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v7.AddArg(y)
+ v5.AddArg2(v6, v7)
+ v3.AddArg2(v4, v5)
+ v.AddArg2(v0, v3)
+ return true
+ }
+}
+func rewriteValuedec64_OpLeq64U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Leq64U x y)
+ // result: (OrB (Less32U (Int64Hi x) (Int64Hi y)) (AndB (Eq32 (Int64Hi x) (Int64Hi y)) (Leq32U (Int64Lo x) (Int64Lo y))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpOrB)
+ v0 := b.NewValue0(v.Pos, OpLess32U, typ.Bool)
+ v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpAndB, typ.Bool)
+ v4 := b.NewValue0(v.Pos, OpEq32, typ.Bool)
+ v4.AddArg2(v1, v2)
+ v5 := b.NewValue0(v.Pos, OpLeq32U, typ.Bool)
+ v6 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v6.AddArg(x)
+ v7 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v7.AddArg(y)
+ v5.AddArg2(v6, v7)
+ v3.AddArg2(v4, v5)
+ v.AddArg2(v0, v3)
+ return true
+ }
+}
+func rewriteValuedec64_OpLess64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less64 x y)
+ // result: (OrB (Less32 (Int64Hi x) (Int64Hi y)) (AndB (Eq32 (Int64Hi x) (Int64Hi y)) (Less32U (Int64Lo x) (Int64Lo y))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpOrB)
+ v0 := b.NewValue0(v.Pos, OpLess32, typ.Bool)
+ v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpAndB, typ.Bool)
+ v4 := b.NewValue0(v.Pos, OpEq32, typ.Bool)
+ v4.AddArg2(v1, v2)
+ v5 := b.NewValue0(v.Pos, OpLess32U, typ.Bool)
+ v6 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v6.AddArg(x)
+ v7 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v7.AddArg(y)
+ v5.AddArg2(v6, v7)
+ v3.AddArg2(v4, v5)
+ v.AddArg2(v0, v3)
+ return true
+ }
+}
+func rewriteValuedec64_OpLess64U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Less64U x y)
+ // result: (OrB (Less32U (Int64Hi x) (Int64Hi y)) (AndB (Eq32 (Int64Hi x) (Int64Hi y)) (Less32U (Int64Lo x) (Int64Lo y))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpOrB)
+ v0 := b.NewValue0(v.Pos, OpLess32U, typ.Bool)
+ v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpAndB, typ.Bool)
+ v4 := b.NewValue0(v.Pos, OpEq32, typ.Bool)
+ v4.AddArg2(v1, v2)
+ v5 := b.NewValue0(v.Pos, OpLess32U, typ.Bool)
+ v6 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v6.AddArg(x)
+ v7 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v7.AddArg(y)
+ v5.AddArg2(v6, v7)
+ v3.AddArg2(v4, v5)
+ v.AddArg2(v0, v3)
+ return true
+ }
+}
+func rewriteValuedec64_OpLoad(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (Load <t> ptr mem)
+ // cond: is64BitInt(t) && !config.BigEndian && t.IsSigned()
+ // result: (Int64Make (Load <typ.Int32> (OffPtr <typ.Int32Ptr> [4] ptr) mem) (Load <typ.UInt32> ptr mem))
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is64BitInt(t) && !config.BigEndian && t.IsSigned()) {
+ break
+ }
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Pos, OpLoad, typ.Int32)
+ v1 := b.NewValue0(v.Pos, OpOffPtr, typ.Int32Ptr)
+ v1.AuxInt = int64ToAuxInt(4)
+ v1.AddArg(ptr)
+ v0.AddArg2(v1, mem)
+ v2 := b.NewValue0(v.Pos, OpLoad, typ.UInt32)
+ v2.AddArg2(ptr, mem)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is64BitInt(t) && !config.BigEndian && !t.IsSigned()
+ // result: (Int64Make (Load <typ.UInt32> (OffPtr <typ.UInt32Ptr> [4] ptr) mem) (Load <typ.UInt32> ptr mem))
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is64BitInt(t) && !config.BigEndian && !t.IsSigned()) {
+ break
+ }
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Pos, OpLoad, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpOffPtr, typ.UInt32Ptr)
+ v1.AuxInt = int64ToAuxInt(4)
+ v1.AddArg(ptr)
+ v0.AddArg2(v1, mem)
+ v2 := b.NewValue0(v.Pos, OpLoad, typ.UInt32)
+ v2.AddArg2(ptr, mem)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is64BitInt(t) && config.BigEndian && t.IsSigned()
+ // result: (Int64Make (Load <typ.Int32> ptr mem) (Load <typ.UInt32> (OffPtr <typ.UInt32Ptr> [4] ptr) mem))
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is64BitInt(t) && config.BigEndian && t.IsSigned()) {
+ break
+ }
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Pos, OpLoad, typ.Int32)
+ v0.AddArg2(ptr, mem)
+ v1 := b.NewValue0(v.Pos, OpLoad, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpOffPtr, typ.UInt32Ptr)
+ v2.AuxInt = int64ToAuxInt(4)
+ v2.AddArg(ptr)
+ v1.AddArg2(v2, mem)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: is64BitInt(t) && config.BigEndian && !t.IsSigned()
+ // result: (Int64Make (Load <typ.UInt32> ptr mem) (Load <typ.UInt32> (OffPtr <typ.UInt32Ptr> [4] ptr) mem))
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(is64BitInt(t) && config.BigEndian && !t.IsSigned()) {
+ break
+ }
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Pos, OpLoad, typ.UInt32)
+ v0.AddArg2(ptr, mem)
+ v1 := b.NewValue0(v.Pos, OpLoad, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpOffPtr, typ.UInt32Ptr)
+ v2.AuxInt = int64ToAuxInt(4)
+ v2.AddArg(ptr)
+ v1.AddArg2(v2, mem)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ return false
+}
+func rewriteValuedec64_OpLsh16x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh16x64 _ (Int64Make (Const32 [c]) _))
+ // cond: c != 0
+ // result: (Const32 [0])
+ for {
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ if !(c != 0) {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (Lsh16x64 [c] x (Int64Make (Const32 [0]) lo))
+ // result: (Lsh16x32 [c] x lo)
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ lo := v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 || auxIntToInt32(v_1_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpLsh16x32)
+ v.AuxInt = boolToAuxInt(c)
+ v.AddArg2(x, lo)
+ return true
+ }
+ // match: (Lsh16x64 x (Int64Make hi lo))
+ // cond: hi.Op != OpConst32
+ // result: (Lsh16x32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
+ for {
+ x := v_0
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ lo := v_1.Args[1]
+ hi := v_1.Args[0]
+ if !(hi.Op != OpConst32) {
+ break
+ }
+ v.reset(OpLsh16x32)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v1.AddArg(hi)
+ v0.AddArg2(v1, lo)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Lsh16x64 x y)
+ // result: (Lsh16x32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpLsh16x32)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v3 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v3.AddArg(y)
+ v0.AddArg2(v1, v3)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValuedec64_OpLsh32x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh32x64 _ (Int64Make (Const32 [c]) _))
+ // cond: c != 0
+ // result: (Const32 [0])
+ for {
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ if !(c != 0) {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (Lsh32x64 [c] x (Int64Make (Const32 [0]) lo))
+ // result: (Lsh32x32 [c] x lo)
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ lo := v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 || auxIntToInt32(v_1_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpLsh32x32)
+ v.AuxInt = boolToAuxInt(c)
+ v.AddArg2(x, lo)
+ return true
+ }
+ // match: (Lsh32x64 x (Int64Make hi lo))
+ // cond: hi.Op != OpConst32
+ // result: (Lsh32x32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
+ for {
+ x := v_0
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ lo := v_1.Args[1]
+ hi := v_1.Args[0]
+ if !(hi.Op != OpConst32) {
+ break
+ }
+ v.reset(OpLsh32x32)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v1.AddArg(hi)
+ v0.AddArg2(v1, lo)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Lsh32x64 x y)
+ // result: (Lsh32x32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpLsh32x32)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v3 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v3.AddArg(y)
+ v0.AddArg2(v1, v3)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValuedec64_OpLsh64x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh64x16 x s)
+ // result: (Int64Make (Or32 <typ.UInt32> (Or32 <typ.UInt32> (Lsh32x16 <typ.UInt32> (Int64Hi x) s) (Rsh32Ux16 <typ.UInt32> (Int64Lo x) (Sub16 <typ.UInt16> (Const16 <typ.UInt16> [32]) s))) (Lsh32x16 <typ.UInt32> (Int64Lo x) (Sub16 <typ.UInt16> s (Const16 <typ.UInt16> [32])))) (Lsh32x16 <typ.UInt32> (Int64Lo x) s))
+ for {
+ x := v_0
+ s := v_1
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpLsh32x16, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v3.AddArg(x)
+ v2.AddArg2(v3, s)
+ v4 := b.NewValue0(v.Pos, OpRsh32Ux16, typ.UInt32)
+ v5 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v5.AddArg(x)
+ v6 := b.NewValue0(v.Pos, OpSub16, typ.UInt16)
+ v7 := b.NewValue0(v.Pos, OpConst16, typ.UInt16)
+ v7.AuxInt = int16ToAuxInt(32)
+ v6.AddArg2(v7, s)
+ v4.AddArg2(v5, v6)
+ v1.AddArg2(v2, v4)
+ v8 := b.NewValue0(v.Pos, OpLsh32x16, typ.UInt32)
+ v9 := b.NewValue0(v.Pos, OpSub16, typ.UInt16)
+ v9.AddArg2(s, v7)
+ v8.AddArg2(v5, v9)
+ v0.AddArg2(v1, v8)
+ v10 := b.NewValue0(v.Pos, OpLsh32x16, typ.UInt32)
+ v10.AddArg2(v5, s)
+ v.AddArg2(v0, v10)
+ return true
+ }
+}
+func rewriteValuedec64_OpLsh64x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh64x32 x s)
+ // result: (Int64Make (Or32 <typ.UInt32> (Or32 <typ.UInt32> (Lsh32x32 <typ.UInt32> (Int64Hi x) s) (Rsh32Ux32 <typ.UInt32> (Int64Lo x) (Sub32 <typ.UInt32> (Const32 <typ.UInt32> [32]) s))) (Lsh32x32 <typ.UInt32> (Int64Lo x) (Sub32 <typ.UInt32> s (Const32 <typ.UInt32> [32])))) (Lsh32x32 <typ.UInt32> (Int64Lo x) s))
+ for {
+ x := v_0
+ s := v_1
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpLsh32x32, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v3.AddArg(x)
+ v2.AddArg2(v3, s)
+ v4 := b.NewValue0(v.Pos, OpRsh32Ux32, typ.UInt32)
+ v5 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v5.AddArg(x)
+ v6 := b.NewValue0(v.Pos, OpSub32, typ.UInt32)
+ v7 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v7.AuxInt = int32ToAuxInt(32)
+ v6.AddArg2(v7, s)
+ v4.AddArg2(v5, v6)
+ v1.AddArg2(v2, v4)
+ v8 := b.NewValue0(v.Pos, OpLsh32x32, typ.UInt32)
+ v9 := b.NewValue0(v.Pos, OpSub32, typ.UInt32)
+ v9.AddArg2(s, v7)
+ v8.AddArg2(v5, v9)
+ v0.AddArg2(v1, v8)
+ v10 := b.NewValue0(v.Pos, OpLsh32x32, typ.UInt32)
+ v10.AddArg2(v5, s)
+ v.AddArg2(v0, v10)
+ return true
+ }
+}
+func rewriteValuedec64_OpLsh64x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh64x64 _ (Int64Make (Const32 [c]) _))
+ // cond: c != 0
+ // result: (Const64 [0])
+ for {
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ if !(c != 0) {
+ break
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (Lsh64x64 [c] x (Int64Make (Const32 [0]) lo))
+ // result: (Lsh64x32 [c] x lo)
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ lo := v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 || auxIntToInt32(v_1_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpLsh64x32)
+ v.AuxInt = boolToAuxInt(c)
+ v.AddArg2(x, lo)
+ return true
+ }
+ // match: (Lsh64x64 x (Int64Make hi lo))
+ // cond: hi.Op != OpConst32
+ // result: (Lsh64x32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
+ for {
+ x := v_0
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ lo := v_1.Args[1]
+ hi := v_1.Args[0]
+ if !(hi.Op != OpConst32) {
+ break
+ }
+ v.reset(OpLsh64x32)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v1.AddArg(hi)
+ v0.AddArg2(v1, lo)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Lsh64x64 x y)
+ // result: (Lsh64x32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpLsh64x32)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v3 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v3.AddArg(y)
+ v0.AddArg2(v1, v3)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValuedec64_OpLsh64x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh64x8 x s)
+ // result: (Int64Make (Or32 <typ.UInt32> (Or32 <typ.UInt32> (Lsh32x8 <typ.UInt32> (Int64Hi x) s) (Rsh32Ux8 <typ.UInt32> (Int64Lo x) (Sub8 <typ.UInt8> (Const8 <typ.UInt8> [32]) s))) (Lsh32x8 <typ.UInt32> (Int64Lo x) (Sub8 <typ.UInt8> s (Const8 <typ.UInt8> [32])))) (Lsh32x8 <typ.UInt32> (Int64Lo x) s))
+ for {
+ x := v_0
+ s := v_1
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpLsh32x8, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v3.AddArg(x)
+ v2.AddArg2(v3, s)
+ v4 := b.NewValue0(v.Pos, OpRsh32Ux8, typ.UInt32)
+ v5 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v5.AddArg(x)
+ v6 := b.NewValue0(v.Pos, OpSub8, typ.UInt8)
+ v7 := b.NewValue0(v.Pos, OpConst8, typ.UInt8)
+ v7.AuxInt = int8ToAuxInt(32)
+ v6.AddArg2(v7, s)
+ v4.AddArg2(v5, v6)
+ v1.AddArg2(v2, v4)
+ v8 := b.NewValue0(v.Pos, OpLsh32x8, typ.UInt32)
+ v9 := b.NewValue0(v.Pos, OpSub8, typ.UInt8)
+ v9.AddArg2(s, v7)
+ v8.AddArg2(v5, v9)
+ v0.AddArg2(v1, v8)
+ v10 := b.NewValue0(v.Pos, OpLsh32x8, typ.UInt32)
+ v10.AddArg2(v5, s)
+ v.AddArg2(v0, v10)
+ return true
+ }
+}
+func rewriteValuedec64_OpLsh8x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh8x64 _ (Int64Make (Const32 [c]) _))
+ // cond: c != 0
+ // result: (Const32 [0])
+ for {
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ if !(c != 0) {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (Lsh8x64 [c] x (Int64Make (Const32 [0]) lo))
+ // result: (Lsh8x32 [c] x lo)
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ lo := v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 || auxIntToInt32(v_1_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpLsh8x32)
+ v.AuxInt = boolToAuxInt(c)
+ v.AddArg2(x, lo)
+ return true
+ }
+ // match: (Lsh8x64 x (Int64Make hi lo))
+ // cond: hi.Op != OpConst32
+ // result: (Lsh8x32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
+ for {
+ x := v_0
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ lo := v_1.Args[1]
+ hi := v_1.Args[0]
+ if !(hi.Op != OpConst32) {
+ break
+ }
+ v.reset(OpLsh8x32)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v1.AddArg(hi)
+ v0.AddArg2(v1, lo)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Lsh8x64 x y)
+ // result: (Lsh8x32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpLsh8x32)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v3 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v3.AddArg(y)
+ v0.AddArg2(v1, v3)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValuedec64_OpMul64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mul64 x y)
+ // result: (Int64Make (Add32 <typ.UInt32> (Mul32 <typ.UInt32> (Int64Lo x) (Int64Hi y)) (Add32 <typ.UInt32> (Mul32 <typ.UInt32> (Int64Hi x) (Int64Lo y)) (Select0 <typ.UInt32> (Mul32uhilo (Int64Lo x) (Int64Lo y))))) (Select1 <typ.UInt32> (Mul32uhilo (Int64Lo x) (Int64Lo y))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Pos, OpAdd32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v2.AddArg(x)
+ v3 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v3.AddArg(y)
+ v1.AddArg2(v2, v3)
+ v4 := b.NewValue0(v.Pos, OpAdd32, typ.UInt32)
+ v5 := b.NewValue0(v.Pos, OpMul32, typ.UInt32)
+ v6 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v6.AddArg(x)
+ v7 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v7.AddArg(y)
+ v5.AddArg2(v6, v7)
+ v8 := b.NewValue0(v.Pos, OpSelect0, typ.UInt32)
+ v9 := b.NewValue0(v.Pos, OpMul32uhilo, types.NewTuple(typ.UInt32, typ.UInt32))
+ v9.AddArg2(v2, v7)
+ v8.AddArg(v9)
+ v4.AddArg2(v5, v8)
+ v0.AddArg2(v1, v4)
+ v10 := b.NewValue0(v.Pos, OpSelect1, typ.UInt32)
+ v10.AddArg(v9)
+ v.AddArg2(v0, v10)
+ return true
+ }
+}
+func rewriteValuedec64_OpNeg64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Neg64 <t> x)
+ // result: (Sub64 (Const64 <t> [0]) x)
+ for {
+ t := v.Type
+ x := v_0
+ v.reset(OpSub64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(v0, x)
+ return true
+ }
+}
+func rewriteValuedec64_OpNeq64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neq64 x y)
+ // result: (OrB (Neq32 (Int64Hi x) (Int64Hi y)) (Neq32 (Int64Lo x) (Int64Lo y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpOrB)
+ v0 := b.NewValue0(v.Pos, OpNeq32, typ.Bool)
+ v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpNeq32, typ.Bool)
+ v4 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v4.AddArg(x)
+ v5 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v5.AddArg(y)
+ v3.AddArg2(v4, v5)
+ v.AddArg2(v0, v3)
+ return true
+ }
+}
+func rewriteValuedec64_OpOr32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Or32 <typ.UInt32> (Zeromask (Const32 [c])) y)
+ // cond: c == 0
+ // result: y
+ for {
+ if v.Type != typ.UInt32 {
+ break
+ }
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpZeromask {
+ continue
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_0_0.AuxInt)
+ y := v_1
+ if !(c == 0) {
+ continue
+ }
+ v.copyOf(y)
+ return true
+ }
+ break
+ }
+ // match: (Or32 <typ.UInt32> (Zeromask (Const32 [c])) y)
+ // cond: c != 0
+ // result: (Const32 <typ.UInt32> [-1])
+ for {
+ if v.Type != typ.UInt32 {
+ break
+ }
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpZeromask {
+ continue
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_0_0.AuxInt)
+ if !(c != 0) {
+ continue
+ }
+ v.reset(OpConst32)
+ v.Type = typ.UInt32
+ v.AuxInt = int32ToAuxInt(-1)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuedec64_OpOr64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Or64 x y)
+ // result: (Int64Make (Or32 <typ.UInt32> (Int64Hi x) (Int64Hi y)) (Or32 <typ.UInt32> (Int64Lo x) (Int64Lo y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v4 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v4.AddArg(x)
+ v5 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v5.AddArg(y)
+ v3.AddArg2(v4, v5)
+ v.AddArg2(v0, v3)
+ return true
+ }
+}
+func rewriteValuedec64_OpRsh16Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux64 _ (Int64Make (Const32 [c]) _))
+ // cond: c != 0
+ // result: (Const32 [0])
+ for {
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ if !(c != 0) {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (Rsh16Ux64 [c] x (Int64Make (Const32 [0]) lo))
+ // result: (Rsh16Ux32 [c] x lo)
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ lo := v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 || auxIntToInt32(v_1_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpRsh16Ux32)
+ v.AuxInt = boolToAuxInt(c)
+ v.AddArg2(x, lo)
+ return true
+ }
+ // match: (Rsh16Ux64 x (Int64Make hi lo))
+ // cond: hi.Op != OpConst32
+ // result: (Rsh16Ux32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
+ for {
+ x := v_0
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ lo := v_1.Args[1]
+ hi := v_1.Args[0]
+ if !(hi.Op != OpConst32) {
+ break
+ }
+ v.reset(OpRsh16Ux32)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v1.AddArg(hi)
+ v0.AddArg2(v1, lo)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh16Ux64 x y)
+ // result: (Rsh16Ux32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRsh16Ux32)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v3 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v3.AddArg(y)
+ v0.AddArg2(v1, v3)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValuedec64_OpRsh16x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x64 x (Int64Make (Const32 [c]) _))
+ // cond: c != 0
+ // result: (Signmask (SignExt16to32 x))
+ for {
+ x := v_0
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ if !(c != 0) {
+ break
+ }
+ v.reset(OpSignmask)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Rsh16x64 [c] x (Int64Make (Const32 [0]) lo))
+ // result: (Rsh16x32 [c] x lo)
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ lo := v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 || auxIntToInt32(v_1_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpRsh16x32)
+ v.AuxInt = boolToAuxInt(c)
+ v.AddArg2(x, lo)
+ return true
+ }
+ // match: (Rsh16x64 x (Int64Make hi lo))
+ // cond: hi.Op != OpConst32
+ // result: (Rsh16x32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
+ for {
+ x := v_0
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ lo := v_1.Args[1]
+ hi := v_1.Args[0]
+ if !(hi.Op != OpConst32) {
+ break
+ }
+ v.reset(OpRsh16x32)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v1.AddArg(hi)
+ v0.AddArg2(v1, lo)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh16x64 x y)
+ // result: (Rsh16x32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRsh16x32)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v3 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v3.AddArg(y)
+ v0.AddArg2(v1, v3)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValuedec64_OpRsh32Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32Ux64 _ (Int64Make (Const32 [c]) _))
+ // cond: c != 0
+ // result: (Const32 [0])
+ for {
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ if !(c != 0) {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (Rsh32Ux64 [c] x (Int64Make (Const32 [0]) lo))
+ // result: (Rsh32Ux32 [c] x lo)
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ lo := v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 || auxIntToInt32(v_1_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpRsh32Ux32)
+ v.AuxInt = boolToAuxInt(c)
+ v.AddArg2(x, lo)
+ return true
+ }
+ // match: (Rsh32Ux64 x (Int64Make hi lo))
+ // cond: hi.Op != OpConst32
+ // result: (Rsh32Ux32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
+ for {
+ x := v_0
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ lo := v_1.Args[1]
+ hi := v_1.Args[0]
+ if !(hi.Op != OpConst32) {
+ break
+ }
+ v.reset(OpRsh32Ux32)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v1.AddArg(hi)
+ v0.AddArg2(v1, lo)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh32Ux64 x y)
+ // result: (Rsh32Ux32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRsh32Ux32)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v3 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v3.AddArg(y)
+ v0.AddArg2(v1, v3)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValuedec64_OpRsh32x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32x64 x (Int64Make (Const32 [c]) _))
+ // cond: c != 0
+ // result: (Signmask x)
+ for {
+ x := v_0
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ if !(c != 0) {
+ break
+ }
+ v.reset(OpSignmask)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Rsh32x64 [c] x (Int64Make (Const32 [0]) lo))
+ // result: (Rsh32x32 [c] x lo)
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ lo := v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 || auxIntToInt32(v_1_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpRsh32x32)
+ v.AuxInt = boolToAuxInt(c)
+ v.AddArg2(x, lo)
+ return true
+ }
+ // match: (Rsh32x64 x (Int64Make hi lo))
+ // cond: hi.Op != OpConst32
+ // result: (Rsh32x32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
+ for {
+ x := v_0
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ lo := v_1.Args[1]
+ hi := v_1.Args[0]
+ if !(hi.Op != OpConst32) {
+ break
+ }
+ v.reset(OpRsh32x32)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v1.AddArg(hi)
+ v0.AddArg2(v1, lo)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh32x64 x y)
+ // result: (Rsh32x32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRsh32x32)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v3 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v3.AddArg(y)
+ v0.AddArg2(v1, v3)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValuedec64_OpRsh64Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64Ux16 x s)
+ // result: (Int64Make (Rsh32Ux16 <typ.UInt32> (Int64Hi x) s) (Or32 <typ.UInt32> (Or32 <typ.UInt32> (Rsh32Ux16 <typ.UInt32> (Int64Lo x) s) (Lsh32x16 <typ.UInt32> (Int64Hi x) (Sub16 <typ.UInt16> (Const16 <typ.UInt16> [32]) s))) (Rsh32Ux16 <typ.UInt32> (Int64Hi x) (Sub16 <typ.UInt16> s (Const16 <typ.UInt16> [32])))))
+ for {
+ x := v_0
+ s := v_1
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Pos, OpRsh32Ux16, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v1.AddArg(x)
+ v0.AddArg2(v1, s)
+ v2 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v4 := b.NewValue0(v.Pos, OpRsh32Ux16, typ.UInt32)
+ v5 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v5.AddArg(x)
+ v4.AddArg2(v5, s)
+ v6 := b.NewValue0(v.Pos, OpLsh32x16, typ.UInt32)
+ v7 := b.NewValue0(v.Pos, OpSub16, typ.UInt16)
+ v8 := b.NewValue0(v.Pos, OpConst16, typ.UInt16)
+ v8.AuxInt = int16ToAuxInt(32)
+ v7.AddArg2(v8, s)
+ v6.AddArg2(v1, v7)
+ v3.AddArg2(v4, v6)
+ v9 := b.NewValue0(v.Pos, OpRsh32Ux16, typ.UInt32)
+ v10 := b.NewValue0(v.Pos, OpSub16, typ.UInt16)
+ v10.AddArg2(s, v8)
+ v9.AddArg2(v1, v10)
+ v2.AddArg2(v3, v9)
+ v.AddArg2(v0, v2)
+ return true
+ }
+}
+func rewriteValuedec64_OpRsh64Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64Ux32 x s)
+ // result: (Int64Make (Rsh32Ux32 <typ.UInt32> (Int64Hi x) s) (Or32 <typ.UInt32> (Or32 <typ.UInt32> (Rsh32Ux32 <typ.UInt32> (Int64Lo x) s) (Lsh32x32 <typ.UInt32> (Int64Hi x) (Sub32 <typ.UInt32> (Const32 <typ.UInt32> [32]) s))) (Rsh32Ux32 <typ.UInt32> (Int64Hi x) (Sub32 <typ.UInt32> s (Const32 <typ.UInt32> [32])))))
+ for {
+ x := v_0
+ s := v_1
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Pos, OpRsh32Ux32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v1.AddArg(x)
+ v0.AddArg2(v1, s)
+ v2 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v4 := b.NewValue0(v.Pos, OpRsh32Ux32, typ.UInt32)
+ v5 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v5.AddArg(x)
+ v4.AddArg2(v5, s)
+ v6 := b.NewValue0(v.Pos, OpLsh32x32, typ.UInt32)
+ v7 := b.NewValue0(v.Pos, OpSub32, typ.UInt32)
+ v8 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v8.AuxInt = int32ToAuxInt(32)
+ v7.AddArg2(v8, s)
+ v6.AddArg2(v1, v7)
+ v3.AddArg2(v4, v6)
+ v9 := b.NewValue0(v.Pos, OpRsh32Ux32, typ.UInt32)
+ v10 := b.NewValue0(v.Pos, OpSub32, typ.UInt32)
+ v10.AddArg2(s, v8)
+ v9.AddArg2(v1, v10)
+ v2.AddArg2(v3, v9)
+ v.AddArg2(v0, v2)
+ return true
+ }
+}
+func rewriteValuedec64_OpRsh64Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64Ux64 _ (Int64Make (Const32 [c]) _))
+ // cond: c != 0
+ // result: (Const64 [0])
+ for {
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ if !(c != 0) {
+ break
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (Rsh64Ux64 [c] x (Int64Make (Const32 [0]) lo))
+ // result: (Rsh64Ux32 [c] x lo)
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ lo := v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 || auxIntToInt32(v_1_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpRsh64Ux32)
+ v.AuxInt = boolToAuxInt(c)
+ v.AddArg2(x, lo)
+ return true
+ }
+ // match: (Rsh64Ux64 x (Int64Make hi lo))
+ // cond: hi.Op != OpConst32
+ // result: (Rsh64Ux32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
+ for {
+ x := v_0
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ lo := v_1.Args[1]
+ hi := v_1.Args[0]
+ if !(hi.Op != OpConst32) {
+ break
+ }
+ v.reset(OpRsh64Ux32)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v1.AddArg(hi)
+ v0.AddArg2(v1, lo)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh64Ux64 x y)
+ // result: (Rsh64Ux32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRsh64Ux32)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v3 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v3.AddArg(y)
+ v0.AddArg2(v1, v3)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValuedec64_OpRsh64Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64Ux8 x s)
+ // result: (Int64Make (Rsh32Ux8 <typ.UInt32> (Int64Hi x) s) (Or32 <typ.UInt32> (Or32 <typ.UInt32> (Rsh32Ux8 <typ.UInt32> (Int64Lo x) s) (Lsh32x8 <typ.UInt32> (Int64Hi x) (Sub8 <typ.UInt8> (Const8 <typ.UInt8> [32]) s))) (Rsh32Ux8 <typ.UInt32> (Int64Hi x) (Sub8 <typ.UInt8> s (Const8 <typ.UInt8> [32])))))
+ for {
+ x := v_0
+ s := v_1
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Pos, OpRsh32Ux8, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v1.AddArg(x)
+ v0.AddArg2(v1, s)
+ v2 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v4 := b.NewValue0(v.Pos, OpRsh32Ux8, typ.UInt32)
+ v5 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v5.AddArg(x)
+ v4.AddArg2(v5, s)
+ v6 := b.NewValue0(v.Pos, OpLsh32x8, typ.UInt32)
+ v7 := b.NewValue0(v.Pos, OpSub8, typ.UInt8)
+ v8 := b.NewValue0(v.Pos, OpConst8, typ.UInt8)
+ v8.AuxInt = int8ToAuxInt(32)
+ v7.AddArg2(v8, s)
+ v6.AddArg2(v1, v7)
+ v3.AddArg2(v4, v6)
+ v9 := b.NewValue0(v.Pos, OpRsh32Ux8, typ.UInt32)
+ v10 := b.NewValue0(v.Pos, OpSub8, typ.UInt8)
+ v10.AddArg2(s, v8)
+ v9.AddArg2(v1, v10)
+ v2.AddArg2(v3, v9)
+ v.AddArg2(v0, v2)
+ return true
+ }
+}
+func rewriteValuedec64_OpRsh64x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64x16 x s)
+ // result: (Int64Make (Rsh32x16 <typ.UInt32> (Int64Hi x) s) (Or32 <typ.UInt32> (Or32 <typ.UInt32> (Rsh32Ux16 <typ.UInt32> (Int64Lo x) s) (Lsh32x16 <typ.UInt32> (Int64Hi x) (Sub16 <typ.UInt16> (Const16 <typ.UInt16> [32]) s))) (And32 <typ.UInt32> (Rsh32x16 <typ.UInt32> (Int64Hi x) (Sub16 <typ.UInt16> s (Const16 <typ.UInt16> [32]))) (Zeromask (ZeroExt16to32 (Rsh16Ux32 <typ.UInt16> s (Const32 <typ.UInt32> [5])))))))
+ for {
+ x := v_0
+ s := v_1
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Pos, OpRsh32x16, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v1.AddArg(x)
+ v0.AddArg2(v1, s)
+ v2 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v4 := b.NewValue0(v.Pos, OpRsh32Ux16, typ.UInt32)
+ v5 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v5.AddArg(x)
+ v4.AddArg2(v5, s)
+ v6 := b.NewValue0(v.Pos, OpLsh32x16, typ.UInt32)
+ v7 := b.NewValue0(v.Pos, OpSub16, typ.UInt16)
+ v8 := b.NewValue0(v.Pos, OpConst16, typ.UInt16)
+ v8.AuxInt = int16ToAuxInt(32)
+ v7.AddArg2(v8, s)
+ v6.AddArg2(v1, v7)
+ v3.AddArg2(v4, v6)
+ v9 := b.NewValue0(v.Pos, OpAnd32, typ.UInt32)
+ v10 := b.NewValue0(v.Pos, OpRsh32x16, typ.UInt32)
+ v11 := b.NewValue0(v.Pos, OpSub16, typ.UInt16)
+ v11.AddArg2(s, v8)
+ v10.AddArg2(v1, v11)
+ v12 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v13 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v14 := b.NewValue0(v.Pos, OpRsh16Ux32, typ.UInt16)
+ v15 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v15.AuxInt = int32ToAuxInt(5)
+ v14.AddArg2(s, v15)
+ v13.AddArg(v14)
+ v12.AddArg(v13)
+ v9.AddArg2(v10, v12)
+ v2.AddArg2(v3, v9)
+ v.AddArg2(v0, v2)
+ return true
+ }
+}
+func rewriteValuedec64_OpRsh64x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64x32 x s)
+ // result: (Int64Make (Rsh32x32 <typ.UInt32> (Int64Hi x) s) (Or32 <typ.UInt32> (Or32 <typ.UInt32> (Rsh32Ux32 <typ.UInt32> (Int64Lo x) s) (Lsh32x32 <typ.UInt32> (Int64Hi x) (Sub32 <typ.UInt32> (Const32 <typ.UInt32> [32]) s))) (And32 <typ.UInt32> (Rsh32x32 <typ.UInt32> (Int64Hi x) (Sub32 <typ.UInt32> s (Const32 <typ.UInt32> [32]))) (Zeromask (Rsh32Ux32 <typ.UInt32> s (Const32 <typ.UInt32> [5]))))))
+ for {
+ x := v_0
+ s := v_1
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Pos, OpRsh32x32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v1.AddArg(x)
+ v0.AddArg2(v1, s)
+ v2 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v4 := b.NewValue0(v.Pos, OpRsh32Ux32, typ.UInt32)
+ v5 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v5.AddArg(x)
+ v4.AddArg2(v5, s)
+ v6 := b.NewValue0(v.Pos, OpLsh32x32, typ.UInt32)
+ v7 := b.NewValue0(v.Pos, OpSub32, typ.UInt32)
+ v8 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v8.AuxInt = int32ToAuxInt(32)
+ v7.AddArg2(v8, s)
+ v6.AddArg2(v1, v7)
+ v3.AddArg2(v4, v6)
+ v9 := b.NewValue0(v.Pos, OpAnd32, typ.UInt32)
+ v10 := b.NewValue0(v.Pos, OpRsh32x32, typ.UInt32)
+ v11 := b.NewValue0(v.Pos, OpSub32, typ.UInt32)
+ v11.AddArg2(s, v8)
+ v10.AddArg2(v1, v11)
+ v12 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v13 := b.NewValue0(v.Pos, OpRsh32Ux32, typ.UInt32)
+ v14 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v14.AuxInt = int32ToAuxInt(5)
+ v13.AddArg2(s, v14)
+ v12.AddArg(v13)
+ v9.AddArg2(v10, v12)
+ v2.AddArg2(v3, v9)
+ v.AddArg2(v0, v2)
+ return true
+ }
+}
+func rewriteValuedec64_OpRsh64x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64x64 x (Int64Make (Const32 [c]) _))
+ // cond: c != 0
+ // result: (Int64Make (Signmask (Int64Hi x)) (Signmask (Int64Hi x)))
+ for {
+ x := v_0
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ if !(c != 0) {
+ break
+ }
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Pos, OpSignmask, typ.Int32)
+ v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v1.AddArg(x)
+ v0.AddArg(v1)
+ v.AddArg2(v0, v0)
+ return true
+ }
+ // match: (Rsh64x64 [c] x (Int64Make (Const32 [0]) lo))
+ // result: (Rsh64x32 [c] x lo)
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ lo := v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 || auxIntToInt32(v_1_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpRsh64x32)
+ v.AuxInt = boolToAuxInt(c)
+ v.AddArg2(x, lo)
+ return true
+ }
+ // match: (Rsh64x64 x (Int64Make hi lo))
+ // cond: hi.Op != OpConst32
+ // result: (Rsh64x32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
+ for {
+ x := v_0
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ lo := v_1.Args[1]
+ hi := v_1.Args[0]
+ if !(hi.Op != OpConst32) {
+ break
+ }
+ v.reset(OpRsh64x32)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v1.AddArg(hi)
+ v0.AddArg2(v1, lo)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh64x64 x y)
+ // result: (Rsh64x32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRsh64x32)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v3 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v3.AddArg(y)
+ v0.AddArg2(v1, v3)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValuedec64_OpRsh64x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64x8 x s)
+ // result: (Int64Make (Rsh32x8 <typ.UInt32> (Int64Hi x) s) (Or32 <typ.UInt32> (Or32 <typ.UInt32> (Rsh32Ux8 <typ.UInt32> (Int64Lo x) s) (Lsh32x8 <typ.UInt32> (Int64Hi x) (Sub8 <typ.UInt8> (Const8 <typ.UInt8> [32]) s))) (And32 <typ.UInt32> (Rsh32x8 <typ.UInt32> (Int64Hi x) (Sub8 <typ.UInt8> s (Const8 <typ.UInt8> [32]))) (Zeromask (ZeroExt8to32 (Rsh8Ux32 <typ.UInt8> s (Const32 <typ.UInt32> [5])))))))
+ for {
+ x := v_0
+ s := v_1
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Pos, OpRsh32x8, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v1.AddArg(x)
+ v0.AddArg2(v1, s)
+ v2 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v4 := b.NewValue0(v.Pos, OpRsh32Ux8, typ.UInt32)
+ v5 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v5.AddArg(x)
+ v4.AddArg2(v5, s)
+ v6 := b.NewValue0(v.Pos, OpLsh32x8, typ.UInt32)
+ v7 := b.NewValue0(v.Pos, OpSub8, typ.UInt8)
+ v8 := b.NewValue0(v.Pos, OpConst8, typ.UInt8)
+ v8.AuxInt = int8ToAuxInt(32)
+ v7.AddArg2(v8, s)
+ v6.AddArg2(v1, v7)
+ v3.AddArg2(v4, v6)
+ v9 := b.NewValue0(v.Pos, OpAnd32, typ.UInt32)
+ v10 := b.NewValue0(v.Pos, OpRsh32x8, typ.UInt32)
+ v11 := b.NewValue0(v.Pos, OpSub8, typ.UInt8)
+ v11.AddArg2(s, v8)
+ v10.AddArg2(v1, v11)
+ v12 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v13 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v14 := b.NewValue0(v.Pos, OpRsh8Ux32, typ.UInt8)
+ v15 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v15.AuxInt = int32ToAuxInt(5)
+ v14.AddArg2(s, v15)
+ v13.AddArg(v14)
+ v12.AddArg(v13)
+ v9.AddArg2(v10, v12)
+ v2.AddArg2(v3, v9)
+ v.AddArg2(v0, v2)
+ return true
+ }
+}
+func rewriteValuedec64_OpRsh8Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux64 _ (Int64Make (Const32 [c]) _))
+ // cond: c != 0
+ // result: (Const32 [0])
+ for {
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ if !(c != 0) {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (Rsh8Ux64 [c] x (Int64Make (Const32 [0]) lo))
+ // result: (Rsh8Ux32 [c] x lo)
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ lo := v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 || auxIntToInt32(v_1_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpRsh8Ux32)
+ v.AuxInt = boolToAuxInt(c)
+ v.AddArg2(x, lo)
+ return true
+ }
+ // match: (Rsh8Ux64 x (Int64Make hi lo))
+ // cond: hi.Op != OpConst32
+ // result: (Rsh8Ux32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
+ for {
+ x := v_0
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ lo := v_1.Args[1]
+ hi := v_1.Args[0]
+ if !(hi.Op != OpConst32) {
+ break
+ }
+ v.reset(OpRsh8Ux32)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v1.AddArg(hi)
+ v0.AddArg2(v1, lo)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh8Ux64 x y)
+ // result: (Rsh8Ux32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRsh8Ux32)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v3 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v3.AddArg(y)
+ v0.AddArg2(v1, v3)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValuedec64_OpRsh8x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8x64 x (Int64Make (Const32 [c]) _))
+ // cond: c != 0
+ // result: (Signmask (SignExt8to32 x))
+ for {
+ x := v_0
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ if !(c != 0) {
+ break
+ }
+ v.reset(OpSignmask)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Rsh8x64 [c] x (Int64Make (Const32 [0]) lo))
+ // result: (Rsh8x32 [c] x lo)
+ for {
+ c := auxIntToBool(v.AuxInt)
+ x := v_0
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ lo := v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 || auxIntToInt32(v_1_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpRsh8x32)
+ v.AuxInt = boolToAuxInt(c)
+ v.AddArg2(x, lo)
+ return true
+ }
+ // match: (Rsh8x64 x (Int64Make hi lo))
+ // cond: hi.Op != OpConst32
+ // result: (Rsh8x32 x (Or32 <typ.UInt32> (Zeromask hi) lo))
+ for {
+ x := v_0
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ lo := v_1.Args[1]
+ hi := v_1.Args[0]
+ if !(hi.Op != OpConst32) {
+ break
+ }
+ v.reset(OpRsh8x32)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v1.AddArg(hi)
+ v0.AddArg2(v1, lo)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh8x64 x y)
+ // result: (Rsh8x32 x (Or32 <typ.UInt32> (Zeromask (Int64Hi y)) (Int64Lo y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpRsh8x32)
+ v0 := b.NewValue0(v.Pos, OpOr32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeromask, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v2.AddArg(y)
+ v1.AddArg(v2)
+ v3 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v3.AddArg(y)
+ v0.AddArg2(v1, v3)
+ v.AddArg2(x, v0)
+ return true
+ }
+}
+func rewriteValuedec64_OpSignExt16to64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SignExt16to64 x)
+ // result: (SignExt32to64 (SignExt16to32 x))
+ for {
+ x := v_0
+ v.reset(OpSignExt32to64)
+ v0 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuedec64_OpSignExt32to64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SignExt32to64 x)
+ // result: (Int64Make (Signmask x) x)
+ for {
+ x := v_0
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Pos, OpSignmask, typ.Int32)
+ v0.AddArg(x)
+ v.AddArg2(v0, x)
+ return true
+ }
+}
+func rewriteValuedec64_OpSignExt8to64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (SignExt8to64 x)
+ // result: (SignExt32to64 (SignExt8to32 x))
+ for {
+ x := v_0
+ v.reset(OpSignExt32to64)
+ v0 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuedec64_OpStore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (Store {t} dst (Int64Make hi lo) mem)
+ // cond: t.Size() == 8 && !config.BigEndian
+ // result: (Store {hi.Type} (OffPtr <hi.Type.PtrTo()> [4] dst) hi (Store {lo.Type} dst lo mem))
+ for {
+ t := auxToType(v.Aux)
+ dst := v_0
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ lo := v_1.Args[1]
+ hi := v_1.Args[0]
+ mem := v_2
+ if !(t.Size() == 8 && !config.BigEndian) {
+ break
+ }
+ v.reset(OpStore)
+ v.Aux = typeToAux(hi.Type)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, hi.Type.PtrTo())
+ v0.AuxInt = int64ToAuxInt(4)
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v1.Aux = typeToAux(lo.Type)
+ v1.AddArg3(dst, lo, mem)
+ v.AddArg3(v0, hi, v1)
+ return true
+ }
+ // match: (Store {t} dst (Int64Make hi lo) mem)
+ // cond: t.Size() == 8 && config.BigEndian
+ // result: (Store {lo.Type} (OffPtr <lo.Type.PtrTo()> [4] dst) lo (Store {hi.Type} dst hi mem))
+ for {
+ t := auxToType(v.Aux)
+ dst := v_0
+ if v_1.Op != OpInt64Make {
+ break
+ }
+ lo := v_1.Args[1]
+ hi := v_1.Args[0]
+ mem := v_2
+ if !(t.Size() == 8 && config.BigEndian) {
+ break
+ }
+ v.reset(OpStore)
+ v.Aux = typeToAux(lo.Type)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, lo.Type.PtrTo())
+ v0.AuxInt = int64ToAuxInt(4)
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v1.Aux = typeToAux(hi.Type)
+ v1.AddArg3(dst, hi, mem)
+ v.AddArg3(v0, lo, v1)
+ return true
+ }
+ return false
+}
+func rewriteValuedec64_OpSub64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Sub64 x y)
+ // result: (Int64Make (Sub32withcarry <typ.Int32> (Int64Hi x) (Int64Hi y) (Select1 <types.TypeFlags> (Sub32carry (Int64Lo x) (Int64Lo y)))) (Select0 <typ.UInt32> (Sub32carry (Int64Lo x) (Int64Lo y))))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Pos, OpSub32withcarry, typ.Int32)
+ v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v2.AddArg(y)
+ v3 := b.NewValue0(v.Pos, OpSelect1, types.TypeFlags)
+ v4 := b.NewValue0(v.Pos, OpSub32carry, types.NewTuple(typ.UInt32, types.TypeFlags))
+ v5 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v5.AddArg(x)
+ v6 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v6.AddArg(y)
+ v4.AddArg2(v5, v6)
+ v3.AddArg(v4)
+ v0.AddArg3(v1, v2, v3)
+ v7 := b.NewValue0(v.Pos, OpSelect0, typ.UInt32)
+ v7.AddArg(v4)
+ v.AddArg2(v0, v7)
+ return true
+ }
+}
+func rewriteValuedec64_OpTrunc64to16(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Trunc64to16 (Int64Make _ lo))
+ // result: (Trunc32to16 lo)
+ for {
+ if v_0.Op != OpInt64Make {
+ break
+ }
+ lo := v_0.Args[1]
+ v.reset(OpTrunc32to16)
+ v.AddArg(lo)
+ return true
+ }
+ // match: (Trunc64to16 x)
+ // result: (Trunc32to16 (Int64Lo x))
+ for {
+ x := v_0
+ v.reset(OpTrunc32to16)
+ v0 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuedec64_OpTrunc64to32(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Trunc64to32 (Int64Make _ lo))
+ // result: lo
+ for {
+ if v_0.Op != OpInt64Make {
+ break
+ }
+ lo := v_0.Args[1]
+ v.copyOf(lo)
+ return true
+ }
+ // match: (Trunc64to32 x)
+ // result: (Int64Lo x)
+ for {
+ x := v_0
+ v.reset(OpInt64Lo)
+ v.AddArg(x)
+ return true
+ }
+}
+func rewriteValuedec64_OpTrunc64to8(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Trunc64to8 (Int64Make _ lo))
+ // result: (Trunc32to8 lo)
+ for {
+ if v_0.Op != OpInt64Make {
+ break
+ }
+ lo := v_0.Args[1]
+ v.reset(OpTrunc32to8)
+ v.AddArg(lo)
+ return true
+ }
+ // match: (Trunc64to8 x)
+ // result: (Trunc32to8 (Int64Lo x))
+ for {
+ x := v_0
+ v.reset(OpTrunc32to8)
+ v0 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuedec64_OpXor64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Xor64 x y)
+ // result: (Int64Make (Xor32 <typ.UInt32> (Int64Hi x) (Int64Hi y)) (Xor32 <typ.UInt32> (Int64Lo x) (Int64Lo y)))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Pos, OpXor32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpInt64Hi, typ.UInt32)
+ v2.AddArg(y)
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpXor32, typ.UInt32)
+ v4 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v4.AddArg(x)
+ v5 := b.NewValue0(v.Pos, OpInt64Lo, typ.UInt32)
+ v5.AddArg(y)
+ v3.AddArg2(v4, v5)
+ v.AddArg2(v0, v3)
+ return true
+ }
+}
+func rewriteValuedec64_OpZeroExt16to64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (ZeroExt16to64 x)
+ // result: (ZeroExt32to64 (ZeroExt16to32 x))
+ for {
+ x := v_0
+ v.reset(OpZeroExt32to64)
+ v0 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteValuedec64_OpZeroExt32to64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (ZeroExt32to64 x)
+ // result: (Int64Make (Const32 <typ.UInt32> [0]) x)
+ for {
+ x := v_0
+ v.reset(OpInt64Make)
+ v0 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg2(v0, x)
+ return true
+ }
+}
+func rewriteValuedec64_OpZeroExt8to64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (ZeroExt8to64 x)
+ // result: (ZeroExt32to64 (ZeroExt8to32 x))
+ for {
+ x := v_0
+ v.reset(OpZeroExt32to64)
+ v0 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+}
+func rewriteBlockdec64(b *Block) bool {
+ return false
+}
diff --git a/src/cmd/compile/internal/ssa/rewritegeneric.go b/src/cmd/compile/internal/ssa/rewritegeneric.go
new file mode 100644
index 0000000..434402b
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/rewritegeneric.go
@@ -0,0 +1,25604 @@
+// Code generated from gen/generic.rules; DO NOT EDIT.
+// generated with: cd gen; go run *.go
+
+package ssa
+
+import "math"
+import "cmd/compile/internal/types"
+
+func rewriteValuegeneric(v *Value) bool {
+ switch v.Op {
+ case OpAdd16:
+ return rewriteValuegeneric_OpAdd16(v)
+ case OpAdd32:
+ return rewriteValuegeneric_OpAdd32(v)
+ case OpAdd32F:
+ return rewriteValuegeneric_OpAdd32F(v)
+ case OpAdd64:
+ return rewriteValuegeneric_OpAdd64(v)
+ case OpAdd64F:
+ return rewriteValuegeneric_OpAdd64F(v)
+ case OpAdd8:
+ return rewriteValuegeneric_OpAdd8(v)
+ case OpAddPtr:
+ return rewriteValuegeneric_OpAddPtr(v)
+ case OpAnd16:
+ return rewriteValuegeneric_OpAnd16(v)
+ case OpAnd32:
+ return rewriteValuegeneric_OpAnd32(v)
+ case OpAnd64:
+ return rewriteValuegeneric_OpAnd64(v)
+ case OpAnd8:
+ return rewriteValuegeneric_OpAnd8(v)
+ case OpAndB:
+ return rewriteValuegeneric_OpAndB(v)
+ case OpArraySelect:
+ return rewriteValuegeneric_OpArraySelect(v)
+ case OpCom16:
+ return rewriteValuegeneric_OpCom16(v)
+ case OpCom32:
+ return rewriteValuegeneric_OpCom32(v)
+ case OpCom64:
+ return rewriteValuegeneric_OpCom64(v)
+ case OpCom8:
+ return rewriteValuegeneric_OpCom8(v)
+ case OpConstInterface:
+ return rewriteValuegeneric_OpConstInterface(v)
+ case OpConstSlice:
+ return rewriteValuegeneric_OpConstSlice(v)
+ case OpConstString:
+ return rewriteValuegeneric_OpConstString(v)
+ case OpConvert:
+ return rewriteValuegeneric_OpConvert(v)
+ case OpCtz16:
+ return rewriteValuegeneric_OpCtz16(v)
+ case OpCtz32:
+ return rewriteValuegeneric_OpCtz32(v)
+ case OpCtz64:
+ return rewriteValuegeneric_OpCtz64(v)
+ case OpCtz8:
+ return rewriteValuegeneric_OpCtz8(v)
+ case OpCvt32Fto32:
+ return rewriteValuegeneric_OpCvt32Fto32(v)
+ case OpCvt32Fto64:
+ return rewriteValuegeneric_OpCvt32Fto64(v)
+ case OpCvt32Fto64F:
+ return rewriteValuegeneric_OpCvt32Fto64F(v)
+ case OpCvt32to32F:
+ return rewriteValuegeneric_OpCvt32to32F(v)
+ case OpCvt32to64F:
+ return rewriteValuegeneric_OpCvt32to64F(v)
+ case OpCvt64Fto32:
+ return rewriteValuegeneric_OpCvt64Fto32(v)
+ case OpCvt64Fto32F:
+ return rewriteValuegeneric_OpCvt64Fto32F(v)
+ case OpCvt64Fto64:
+ return rewriteValuegeneric_OpCvt64Fto64(v)
+ case OpCvt64to32F:
+ return rewriteValuegeneric_OpCvt64to32F(v)
+ case OpCvt64to64F:
+ return rewriteValuegeneric_OpCvt64to64F(v)
+ case OpCvtBoolToUint8:
+ return rewriteValuegeneric_OpCvtBoolToUint8(v)
+ case OpDiv16:
+ return rewriteValuegeneric_OpDiv16(v)
+ case OpDiv16u:
+ return rewriteValuegeneric_OpDiv16u(v)
+ case OpDiv32:
+ return rewriteValuegeneric_OpDiv32(v)
+ case OpDiv32F:
+ return rewriteValuegeneric_OpDiv32F(v)
+ case OpDiv32u:
+ return rewriteValuegeneric_OpDiv32u(v)
+ case OpDiv64:
+ return rewriteValuegeneric_OpDiv64(v)
+ case OpDiv64F:
+ return rewriteValuegeneric_OpDiv64F(v)
+ case OpDiv64u:
+ return rewriteValuegeneric_OpDiv64u(v)
+ case OpDiv8:
+ return rewriteValuegeneric_OpDiv8(v)
+ case OpDiv8u:
+ return rewriteValuegeneric_OpDiv8u(v)
+ case OpEq16:
+ return rewriteValuegeneric_OpEq16(v)
+ case OpEq32:
+ return rewriteValuegeneric_OpEq32(v)
+ case OpEq32F:
+ return rewriteValuegeneric_OpEq32F(v)
+ case OpEq64:
+ return rewriteValuegeneric_OpEq64(v)
+ case OpEq64F:
+ return rewriteValuegeneric_OpEq64F(v)
+ case OpEq8:
+ return rewriteValuegeneric_OpEq8(v)
+ case OpEqB:
+ return rewriteValuegeneric_OpEqB(v)
+ case OpEqInter:
+ return rewriteValuegeneric_OpEqInter(v)
+ case OpEqPtr:
+ return rewriteValuegeneric_OpEqPtr(v)
+ case OpEqSlice:
+ return rewriteValuegeneric_OpEqSlice(v)
+ case OpIMake:
+ return rewriteValuegeneric_OpIMake(v)
+ case OpInterLECall:
+ return rewriteValuegeneric_OpInterLECall(v)
+ case OpIsInBounds:
+ return rewriteValuegeneric_OpIsInBounds(v)
+ case OpIsNonNil:
+ return rewriteValuegeneric_OpIsNonNil(v)
+ case OpIsSliceInBounds:
+ return rewriteValuegeneric_OpIsSliceInBounds(v)
+ case OpLeq16:
+ return rewriteValuegeneric_OpLeq16(v)
+ case OpLeq16U:
+ return rewriteValuegeneric_OpLeq16U(v)
+ case OpLeq32:
+ return rewriteValuegeneric_OpLeq32(v)
+ case OpLeq32F:
+ return rewriteValuegeneric_OpLeq32F(v)
+ case OpLeq32U:
+ return rewriteValuegeneric_OpLeq32U(v)
+ case OpLeq64:
+ return rewriteValuegeneric_OpLeq64(v)
+ case OpLeq64F:
+ return rewriteValuegeneric_OpLeq64F(v)
+ case OpLeq64U:
+ return rewriteValuegeneric_OpLeq64U(v)
+ case OpLeq8:
+ return rewriteValuegeneric_OpLeq8(v)
+ case OpLeq8U:
+ return rewriteValuegeneric_OpLeq8U(v)
+ case OpLess16:
+ return rewriteValuegeneric_OpLess16(v)
+ case OpLess16U:
+ return rewriteValuegeneric_OpLess16U(v)
+ case OpLess32:
+ return rewriteValuegeneric_OpLess32(v)
+ case OpLess32F:
+ return rewriteValuegeneric_OpLess32F(v)
+ case OpLess32U:
+ return rewriteValuegeneric_OpLess32U(v)
+ case OpLess64:
+ return rewriteValuegeneric_OpLess64(v)
+ case OpLess64F:
+ return rewriteValuegeneric_OpLess64F(v)
+ case OpLess64U:
+ return rewriteValuegeneric_OpLess64U(v)
+ case OpLess8:
+ return rewriteValuegeneric_OpLess8(v)
+ case OpLess8U:
+ return rewriteValuegeneric_OpLess8U(v)
+ case OpLoad:
+ return rewriteValuegeneric_OpLoad(v)
+ case OpLsh16x16:
+ return rewriteValuegeneric_OpLsh16x16(v)
+ case OpLsh16x32:
+ return rewriteValuegeneric_OpLsh16x32(v)
+ case OpLsh16x64:
+ return rewriteValuegeneric_OpLsh16x64(v)
+ case OpLsh16x8:
+ return rewriteValuegeneric_OpLsh16x8(v)
+ case OpLsh32x16:
+ return rewriteValuegeneric_OpLsh32x16(v)
+ case OpLsh32x32:
+ return rewriteValuegeneric_OpLsh32x32(v)
+ case OpLsh32x64:
+ return rewriteValuegeneric_OpLsh32x64(v)
+ case OpLsh32x8:
+ return rewriteValuegeneric_OpLsh32x8(v)
+ case OpLsh64x16:
+ return rewriteValuegeneric_OpLsh64x16(v)
+ case OpLsh64x32:
+ return rewriteValuegeneric_OpLsh64x32(v)
+ case OpLsh64x64:
+ return rewriteValuegeneric_OpLsh64x64(v)
+ case OpLsh64x8:
+ return rewriteValuegeneric_OpLsh64x8(v)
+ case OpLsh8x16:
+ return rewriteValuegeneric_OpLsh8x16(v)
+ case OpLsh8x32:
+ return rewriteValuegeneric_OpLsh8x32(v)
+ case OpLsh8x64:
+ return rewriteValuegeneric_OpLsh8x64(v)
+ case OpLsh8x8:
+ return rewriteValuegeneric_OpLsh8x8(v)
+ case OpMod16:
+ return rewriteValuegeneric_OpMod16(v)
+ case OpMod16u:
+ return rewriteValuegeneric_OpMod16u(v)
+ case OpMod32:
+ return rewriteValuegeneric_OpMod32(v)
+ case OpMod32u:
+ return rewriteValuegeneric_OpMod32u(v)
+ case OpMod64:
+ return rewriteValuegeneric_OpMod64(v)
+ case OpMod64u:
+ return rewriteValuegeneric_OpMod64u(v)
+ case OpMod8:
+ return rewriteValuegeneric_OpMod8(v)
+ case OpMod8u:
+ return rewriteValuegeneric_OpMod8u(v)
+ case OpMove:
+ return rewriteValuegeneric_OpMove(v)
+ case OpMul16:
+ return rewriteValuegeneric_OpMul16(v)
+ case OpMul32:
+ return rewriteValuegeneric_OpMul32(v)
+ case OpMul32F:
+ return rewriteValuegeneric_OpMul32F(v)
+ case OpMul64:
+ return rewriteValuegeneric_OpMul64(v)
+ case OpMul64F:
+ return rewriteValuegeneric_OpMul64F(v)
+ case OpMul8:
+ return rewriteValuegeneric_OpMul8(v)
+ case OpNeg16:
+ return rewriteValuegeneric_OpNeg16(v)
+ case OpNeg32:
+ return rewriteValuegeneric_OpNeg32(v)
+ case OpNeg32F:
+ return rewriteValuegeneric_OpNeg32F(v)
+ case OpNeg64:
+ return rewriteValuegeneric_OpNeg64(v)
+ case OpNeg64F:
+ return rewriteValuegeneric_OpNeg64F(v)
+ case OpNeg8:
+ return rewriteValuegeneric_OpNeg8(v)
+ case OpNeq16:
+ return rewriteValuegeneric_OpNeq16(v)
+ case OpNeq32:
+ return rewriteValuegeneric_OpNeq32(v)
+ case OpNeq32F:
+ return rewriteValuegeneric_OpNeq32F(v)
+ case OpNeq64:
+ return rewriteValuegeneric_OpNeq64(v)
+ case OpNeq64F:
+ return rewriteValuegeneric_OpNeq64F(v)
+ case OpNeq8:
+ return rewriteValuegeneric_OpNeq8(v)
+ case OpNeqB:
+ return rewriteValuegeneric_OpNeqB(v)
+ case OpNeqInter:
+ return rewriteValuegeneric_OpNeqInter(v)
+ case OpNeqPtr:
+ return rewriteValuegeneric_OpNeqPtr(v)
+ case OpNeqSlice:
+ return rewriteValuegeneric_OpNeqSlice(v)
+ case OpNilCheck:
+ return rewriteValuegeneric_OpNilCheck(v)
+ case OpNot:
+ return rewriteValuegeneric_OpNot(v)
+ case OpOffPtr:
+ return rewriteValuegeneric_OpOffPtr(v)
+ case OpOr16:
+ return rewriteValuegeneric_OpOr16(v)
+ case OpOr32:
+ return rewriteValuegeneric_OpOr32(v)
+ case OpOr64:
+ return rewriteValuegeneric_OpOr64(v)
+ case OpOr8:
+ return rewriteValuegeneric_OpOr8(v)
+ case OpOrB:
+ return rewriteValuegeneric_OpOrB(v)
+ case OpPhi:
+ return rewriteValuegeneric_OpPhi(v)
+ case OpPtrIndex:
+ return rewriteValuegeneric_OpPtrIndex(v)
+ case OpRotateLeft16:
+ return rewriteValuegeneric_OpRotateLeft16(v)
+ case OpRotateLeft32:
+ return rewriteValuegeneric_OpRotateLeft32(v)
+ case OpRotateLeft64:
+ return rewriteValuegeneric_OpRotateLeft64(v)
+ case OpRotateLeft8:
+ return rewriteValuegeneric_OpRotateLeft8(v)
+ case OpRound32F:
+ return rewriteValuegeneric_OpRound32F(v)
+ case OpRound64F:
+ return rewriteValuegeneric_OpRound64F(v)
+ case OpRsh16Ux16:
+ return rewriteValuegeneric_OpRsh16Ux16(v)
+ case OpRsh16Ux32:
+ return rewriteValuegeneric_OpRsh16Ux32(v)
+ case OpRsh16Ux64:
+ return rewriteValuegeneric_OpRsh16Ux64(v)
+ case OpRsh16Ux8:
+ return rewriteValuegeneric_OpRsh16Ux8(v)
+ case OpRsh16x16:
+ return rewriteValuegeneric_OpRsh16x16(v)
+ case OpRsh16x32:
+ return rewriteValuegeneric_OpRsh16x32(v)
+ case OpRsh16x64:
+ return rewriteValuegeneric_OpRsh16x64(v)
+ case OpRsh16x8:
+ return rewriteValuegeneric_OpRsh16x8(v)
+ case OpRsh32Ux16:
+ return rewriteValuegeneric_OpRsh32Ux16(v)
+ case OpRsh32Ux32:
+ return rewriteValuegeneric_OpRsh32Ux32(v)
+ case OpRsh32Ux64:
+ return rewriteValuegeneric_OpRsh32Ux64(v)
+ case OpRsh32Ux8:
+ return rewriteValuegeneric_OpRsh32Ux8(v)
+ case OpRsh32x16:
+ return rewriteValuegeneric_OpRsh32x16(v)
+ case OpRsh32x32:
+ return rewriteValuegeneric_OpRsh32x32(v)
+ case OpRsh32x64:
+ return rewriteValuegeneric_OpRsh32x64(v)
+ case OpRsh32x8:
+ return rewriteValuegeneric_OpRsh32x8(v)
+ case OpRsh64Ux16:
+ return rewriteValuegeneric_OpRsh64Ux16(v)
+ case OpRsh64Ux32:
+ return rewriteValuegeneric_OpRsh64Ux32(v)
+ case OpRsh64Ux64:
+ return rewriteValuegeneric_OpRsh64Ux64(v)
+ case OpRsh64Ux8:
+ return rewriteValuegeneric_OpRsh64Ux8(v)
+ case OpRsh64x16:
+ return rewriteValuegeneric_OpRsh64x16(v)
+ case OpRsh64x32:
+ return rewriteValuegeneric_OpRsh64x32(v)
+ case OpRsh64x64:
+ return rewriteValuegeneric_OpRsh64x64(v)
+ case OpRsh64x8:
+ return rewriteValuegeneric_OpRsh64x8(v)
+ case OpRsh8Ux16:
+ return rewriteValuegeneric_OpRsh8Ux16(v)
+ case OpRsh8Ux32:
+ return rewriteValuegeneric_OpRsh8Ux32(v)
+ case OpRsh8Ux64:
+ return rewriteValuegeneric_OpRsh8Ux64(v)
+ case OpRsh8Ux8:
+ return rewriteValuegeneric_OpRsh8Ux8(v)
+ case OpRsh8x16:
+ return rewriteValuegeneric_OpRsh8x16(v)
+ case OpRsh8x32:
+ return rewriteValuegeneric_OpRsh8x32(v)
+ case OpRsh8x64:
+ return rewriteValuegeneric_OpRsh8x64(v)
+ case OpRsh8x8:
+ return rewriteValuegeneric_OpRsh8x8(v)
+ case OpSelect0:
+ return rewriteValuegeneric_OpSelect0(v)
+ case OpSelect1:
+ return rewriteValuegeneric_OpSelect1(v)
+ case OpSelectN:
+ return rewriteValuegeneric_OpSelectN(v)
+ case OpSignExt16to32:
+ return rewriteValuegeneric_OpSignExt16to32(v)
+ case OpSignExt16to64:
+ return rewriteValuegeneric_OpSignExt16to64(v)
+ case OpSignExt32to64:
+ return rewriteValuegeneric_OpSignExt32to64(v)
+ case OpSignExt8to16:
+ return rewriteValuegeneric_OpSignExt8to16(v)
+ case OpSignExt8to32:
+ return rewriteValuegeneric_OpSignExt8to32(v)
+ case OpSignExt8to64:
+ return rewriteValuegeneric_OpSignExt8to64(v)
+ case OpSliceCap:
+ return rewriteValuegeneric_OpSliceCap(v)
+ case OpSliceLen:
+ return rewriteValuegeneric_OpSliceLen(v)
+ case OpSlicePtr:
+ return rewriteValuegeneric_OpSlicePtr(v)
+ case OpSlicemask:
+ return rewriteValuegeneric_OpSlicemask(v)
+ case OpSqrt:
+ return rewriteValuegeneric_OpSqrt(v)
+ case OpStaticLECall:
+ return rewriteValuegeneric_OpStaticLECall(v)
+ case OpStore:
+ return rewriteValuegeneric_OpStore(v)
+ case OpStringLen:
+ return rewriteValuegeneric_OpStringLen(v)
+ case OpStringPtr:
+ return rewriteValuegeneric_OpStringPtr(v)
+ case OpStructSelect:
+ return rewriteValuegeneric_OpStructSelect(v)
+ case OpSub16:
+ return rewriteValuegeneric_OpSub16(v)
+ case OpSub32:
+ return rewriteValuegeneric_OpSub32(v)
+ case OpSub32F:
+ return rewriteValuegeneric_OpSub32F(v)
+ case OpSub64:
+ return rewriteValuegeneric_OpSub64(v)
+ case OpSub64F:
+ return rewriteValuegeneric_OpSub64F(v)
+ case OpSub8:
+ return rewriteValuegeneric_OpSub8(v)
+ case OpTrunc16to8:
+ return rewriteValuegeneric_OpTrunc16to8(v)
+ case OpTrunc32to16:
+ return rewriteValuegeneric_OpTrunc32to16(v)
+ case OpTrunc32to8:
+ return rewriteValuegeneric_OpTrunc32to8(v)
+ case OpTrunc64to16:
+ return rewriteValuegeneric_OpTrunc64to16(v)
+ case OpTrunc64to32:
+ return rewriteValuegeneric_OpTrunc64to32(v)
+ case OpTrunc64to8:
+ return rewriteValuegeneric_OpTrunc64to8(v)
+ case OpXor16:
+ return rewriteValuegeneric_OpXor16(v)
+ case OpXor32:
+ return rewriteValuegeneric_OpXor32(v)
+ case OpXor64:
+ return rewriteValuegeneric_OpXor64(v)
+ case OpXor8:
+ return rewriteValuegeneric_OpXor8(v)
+ case OpZero:
+ return rewriteValuegeneric_OpZero(v)
+ case OpZeroExt16to32:
+ return rewriteValuegeneric_OpZeroExt16to32(v)
+ case OpZeroExt16to64:
+ return rewriteValuegeneric_OpZeroExt16to64(v)
+ case OpZeroExt32to64:
+ return rewriteValuegeneric_OpZeroExt32to64(v)
+ case OpZeroExt8to16:
+ return rewriteValuegeneric_OpZeroExt8to16(v)
+ case OpZeroExt8to32:
+ return rewriteValuegeneric_OpZeroExt8to32(v)
+ case OpZeroExt8to64:
+ return rewriteValuegeneric_OpZeroExt8to64(v)
+ }
+ return false
+}
+func rewriteValuegeneric_OpAdd16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Add16 (Const16 [c]) (Const16 [d]))
+ // result: (Const16 [c+d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_0.AuxInt)
+ if v_1.Op != OpConst16 {
+ continue
+ }
+ d := auxIntToInt16(v_1.AuxInt)
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(c + d)
+ return true
+ }
+ break
+ }
+ // match: (Add16 <t> (Mul16 x y) (Mul16 x z))
+ // result: (Mul16 x (Add16 <t> y z))
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpMul16 {
+ continue
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ x := v_0_0
+ y := v_0_1
+ if v_1.Op != OpMul16 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i2 := 0; _i2 <= 1; _i2, v_1_0, v_1_1 = _i2+1, v_1_1, v_1_0 {
+ if x != v_1_0 {
+ continue
+ }
+ z := v_1_1
+ v.reset(OpMul16)
+ v0 := b.NewValue0(v.Pos, OpAdd16, t)
+ v0.AddArg2(y, z)
+ v.AddArg2(x, v0)
+ return true
+ }
+ }
+ }
+ break
+ }
+ // match: (Add16 (Const16 [0]) x)
+ // result: x
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != 0 {
+ continue
+ }
+ x := v_1
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (Add16 (Const16 [1]) (Com16 x))
+ // result: (Neg16 x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != 1 || v_1.Op != OpCom16 {
+ continue
+ }
+ x := v_1.Args[0]
+ v.reset(OpNeg16)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (Add16 x (Sub16 y x))
+ // result: y
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpSub16 {
+ continue
+ }
+ _ = v_1.Args[1]
+ y := v_1.Args[0]
+ if x != v_1.Args[1] {
+ continue
+ }
+ v.copyOf(y)
+ return true
+ }
+ break
+ }
+ // match: (Add16 x (Add16 y (Sub16 z x)))
+ // result: (Add16 y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAdd16 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ y := v_1_0
+ if v_1_1.Op != OpSub16 {
+ continue
+ }
+ _ = v_1_1.Args[1]
+ z := v_1_1.Args[0]
+ if x != v_1_1.Args[1] {
+ continue
+ }
+ v.reset(OpAdd16)
+ v.AddArg2(y, z)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Add16 (Add16 i:(Const16 <t>) z) x)
+ // cond: (z.Op != OpConst16 && x.Op != OpConst16)
+ // result: (Add16 i (Add16 <t> z x))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAdd16 {
+ continue
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ i := v_0_0
+ if i.Op != OpConst16 {
+ continue
+ }
+ t := i.Type
+ z := v_0_1
+ x := v_1
+ if !(z.Op != OpConst16 && x.Op != OpConst16) {
+ continue
+ }
+ v.reset(OpAdd16)
+ v0 := b.NewValue0(v.Pos, OpAdd16, t)
+ v0.AddArg2(z, x)
+ v.AddArg2(i, v0)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Add16 (Sub16 i:(Const16 <t>) z) x)
+ // cond: (z.Op != OpConst16 && x.Op != OpConst16)
+ // result: (Add16 i (Sub16 <t> x z))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpSub16 {
+ continue
+ }
+ z := v_0.Args[1]
+ i := v_0.Args[0]
+ if i.Op != OpConst16 {
+ continue
+ }
+ t := i.Type
+ x := v_1
+ if !(z.Op != OpConst16 && x.Op != OpConst16) {
+ continue
+ }
+ v.reset(OpAdd16)
+ v0 := b.NewValue0(v.Pos, OpSub16, t)
+ v0.AddArg2(x, z)
+ v.AddArg2(i, v0)
+ return true
+ }
+ break
+ }
+ // match: (Add16 (Const16 <t> [c]) (Add16 (Const16 <t> [d]) x))
+ // result: (Add16 (Const16 <t> [c+d]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst16 {
+ continue
+ }
+ t := v_0.Type
+ c := auxIntToInt16(v_0.AuxInt)
+ if v_1.Op != OpAdd16 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst16 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt16(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpAdd16)
+ v0 := b.NewValue0(v.Pos, OpConst16, t)
+ v0.AuxInt = int16ToAuxInt(c + d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Add16 (Const16 <t> [c]) (Sub16 (Const16 <t> [d]) x))
+ // result: (Sub16 (Const16 <t> [c+d]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst16 {
+ continue
+ }
+ t := v_0.Type
+ c := auxIntToInt16(v_0.AuxInt)
+ if v_1.Op != OpSub16 {
+ continue
+ }
+ x := v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst16 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt16(v_1_0.AuxInt)
+ v.reset(OpSub16)
+ v0 := b.NewValue0(v.Pos, OpConst16, t)
+ v0.AuxInt = int16ToAuxInt(c + d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpAdd32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Add32 (Const32 [c]) (Const32 [d]))
+ // result: (Const32 [c+d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpConst32 {
+ continue
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(c + d)
+ return true
+ }
+ break
+ }
+ // match: (Add32 <t> (Mul32 x y) (Mul32 x z))
+ // result: (Mul32 x (Add32 <t> y z))
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpMul32 {
+ continue
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ x := v_0_0
+ y := v_0_1
+ if v_1.Op != OpMul32 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i2 := 0; _i2 <= 1; _i2, v_1_0, v_1_1 = _i2+1, v_1_1, v_1_0 {
+ if x != v_1_0 {
+ continue
+ }
+ z := v_1_1
+ v.reset(OpMul32)
+ v0 := b.NewValue0(v.Pos, OpAdd32, t)
+ v0.AddArg2(y, z)
+ v.AddArg2(x, v0)
+ return true
+ }
+ }
+ }
+ break
+ }
+ // match: (Add32 (Const32 [0]) x)
+ // result: x
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 0 {
+ continue
+ }
+ x := v_1
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (Add32 (Const32 [1]) (Com32 x))
+ // result: (Neg32 x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 1 || v_1.Op != OpCom32 {
+ continue
+ }
+ x := v_1.Args[0]
+ v.reset(OpNeg32)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (Add32 x (Sub32 y x))
+ // result: y
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpSub32 {
+ continue
+ }
+ _ = v_1.Args[1]
+ y := v_1.Args[0]
+ if x != v_1.Args[1] {
+ continue
+ }
+ v.copyOf(y)
+ return true
+ }
+ break
+ }
+ // match: (Add32 x (Add32 y (Sub32 z x)))
+ // result: (Add32 y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAdd32 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ y := v_1_0
+ if v_1_1.Op != OpSub32 {
+ continue
+ }
+ _ = v_1_1.Args[1]
+ z := v_1_1.Args[0]
+ if x != v_1_1.Args[1] {
+ continue
+ }
+ v.reset(OpAdd32)
+ v.AddArg2(y, z)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Add32 (Add32 i:(Const32 <t>) z) x)
+ // cond: (z.Op != OpConst32 && x.Op != OpConst32)
+ // result: (Add32 i (Add32 <t> z x))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAdd32 {
+ continue
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ i := v_0_0
+ if i.Op != OpConst32 {
+ continue
+ }
+ t := i.Type
+ z := v_0_1
+ x := v_1
+ if !(z.Op != OpConst32 && x.Op != OpConst32) {
+ continue
+ }
+ v.reset(OpAdd32)
+ v0 := b.NewValue0(v.Pos, OpAdd32, t)
+ v0.AddArg2(z, x)
+ v.AddArg2(i, v0)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Add32 (Sub32 i:(Const32 <t>) z) x)
+ // cond: (z.Op != OpConst32 && x.Op != OpConst32)
+ // result: (Add32 i (Sub32 <t> x z))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpSub32 {
+ continue
+ }
+ z := v_0.Args[1]
+ i := v_0.Args[0]
+ if i.Op != OpConst32 {
+ continue
+ }
+ t := i.Type
+ x := v_1
+ if !(z.Op != OpConst32 && x.Op != OpConst32) {
+ continue
+ }
+ v.reset(OpAdd32)
+ v0 := b.NewValue0(v.Pos, OpSub32, t)
+ v0.AddArg2(x, z)
+ v.AddArg2(i, v0)
+ return true
+ }
+ break
+ }
+ // match: (Add32 (Const32 <t> [c]) (Add32 (Const32 <t> [d]) x))
+ // result: (Add32 (Const32 <t> [c+d]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32 {
+ continue
+ }
+ t := v_0.Type
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpAdd32 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst32 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt32(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpAdd32)
+ v0 := b.NewValue0(v.Pos, OpConst32, t)
+ v0.AuxInt = int32ToAuxInt(c + d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Add32 (Const32 <t> [c]) (Sub32 (Const32 <t> [d]) x))
+ // result: (Sub32 (Const32 <t> [c+d]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32 {
+ continue
+ }
+ t := v_0.Type
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpSub32 {
+ continue
+ }
+ x := v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt32(v_1_0.AuxInt)
+ v.reset(OpSub32)
+ v0 := b.NewValue0(v.Pos, OpConst32, t)
+ v0.AuxInt = int32ToAuxInt(c + d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpAdd32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Add32F (Const32F [c]) (Const32F [d]))
+ // cond: c+d == c+d
+ // result: (Const32F [c+d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32F {
+ continue
+ }
+ c := auxIntToFloat32(v_0.AuxInt)
+ if v_1.Op != OpConst32F {
+ continue
+ }
+ d := auxIntToFloat32(v_1.AuxInt)
+ if !(c+d == c+d) {
+ continue
+ }
+ v.reset(OpConst32F)
+ v.AuxInt = float32ToAuxInt(c + d)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpAdd64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Add64 (Const64 [c]) (Const64 [d]))
+ // result: (Const64 [c+d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(c + d)
+ return true
+ }
+ break
+ }
+ // match: (Add64 <t> (Mul64 x y) (Mul64 x z))
+ // result: (Mul64 x (Add64 <t> y z))
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpMul64 {
+ continue
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ x := v_0_0
+ y := v_0_1
+ if v_1.Op != OpMul64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i2 := 0; _i2 <= 1; _i2, v_1_0, v_1_1 = _i2+1, v_1_1, v_1_0 {
+ if x != v_1_0 {
+ continue
+ }
+ z := v_1_1
+ v.reset(OpMul64)
+ v0 := b.NewValue0(v.Pos, OpAdd64, t)
+ v0.AddArg2(y, z)
+ v.AddArg2(x, v0)
+ return true
+ }
+ }
+ }
+ break
+ }
+ // match: (Add64 (Const64 [0]) x)
+ // result: x
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 {
+ continue
+ }
+ x := v_1
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (Add64 (Const64 [1]) (Com64 x))
+ // result: (Neg64 x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 1 || v_1.Op != OpCom64 {
+ continue
+ }
+ x := v_1.Args[0]
+ v.reset(OpNeg64)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (Add64 x (Sub64 y x))
+ // result: y
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpSub64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ y := v_1.Args[0]
+ if x != v_1.Args[1] {
+ continue
+ }
+ v.copyOf(y)
+ return true
+ }
+ break
+ }
+ // match: (Add64 x (Add64 y (Sub64 z x)))
+ // result: (Add64 y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAdd64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ y := v_1_0
+ if v_1_1.Op != OpSub64 {
+ continue
+ }
+ _ = v_1_1.Args[1]
+ z := v_1_1.Args[0]
+ if x != v_1_1.Args[1] {
+ continue
+ }
+ v.reset(OpAdd64)
+ v.AddArg2(y, z)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Add64 (Add64 i:(Const64 <t>) z) x)
+ // cond: (z.Op != OpConst64 && x.Op != OpConst64)
+ // result: (Add64 i (Add64 <t> z x))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAdd64 {
+ continue
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ i := v_0_0
+ if i.Op != OpConst64 {
+ continue
+ }
+ t := i.Type
+ z := v_0_1
+ x := v_1
+ if !(z.Op != OpConst64 && x.Op != OpConst64) {
+ continue
+ }
+ v.reset(OpAdd64)
+ v0 := b.NewValue0(v.Pos, OpAdd64, t)
+ v0.AddArg2(z, x)
+ v.AddArg2(i, v0)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Add64 (Sub64 i:(Const64 <t>) z) x)
+ // cond: (z.Op != OpConst64 && x.Op != OpConst64)
+ // result: (Add64 i (Sub64 <t> x z))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpSub64 {
+ continue
+ }
+ z := v_0.Args[1]
+ i := v_0.Args[0]
+ if i.Op != OpConst64 {
+ continue
+ }
+ t := i.Type
+ x := v_1
+ if !(z.Op != OpConst64 && x.Op != OpConst64) {
+ continue
+ }
+ v.reset(OpAdd64)
+ v0 := b.NewValue0(v.Pos, OpSub64, t)
+ v0.AddArg2(x, z)
+ v.AddArg2(i, v0)
+ return true
+ }
+ break
+ }
+ // match: (Add64 (Const64 <t> [c]) (Add64 (Const64 <t> [d]) x))
+ // result: (Add64 (Const64 <t> [c+d]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64 {
+ continue
+ }
+ t := v_0.Type
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpAdd64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst64 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt64(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpAdd64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(c + d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Add64 (Const64 <t> [c]) (Sub64 (Const64 <t> [d]) x))
+ // result: (Sub64 (Const64 <t> [c+d]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64 {
+ continue
+ }
+ t := v_0.Type
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpSub64 {
+ continue
+ }
+ x := v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst64 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt64(v_1_0.AuxInt)
+ v.reset(OpSub64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(c + d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpAdd64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Add64F (Const64F [c]) (Const64F [d]))
+ // cond: c+d == c+d
+ // result: (Const64F [c+d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64F {
+ continue
+ }
+ c := auxIntToFloat64(v_0.AuxInt)
+ if v_1.Op != OpConst64F {
+ continue
+ }
+ d := auxIntToFloat64(v_1.AuxInt)
+ if !(c+d == c+d) {
+ continue
+ }
+ v.reset(OpConst64F)
+ v.AuxInt = float64ToAuxInt(c + d)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpAdd8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Add8 (Const8 [c]) (Const8 [d]))
+ // result: (Const8 [c+d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ if v_1.Op != OpConst8 {
+ continue
+ }
+ d := auxIntToInt8(v_1.AuxInt)
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(c + d)
+ return true
+ }
+ break
+ }
+ // match: (Add8 <t> (Mul8 x y) (Mul8 x z))
+ // result: (Mul8 x (Add8 <t> y z))
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpMul8 {
+ continue
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ x := v_0_0
+ y := v_0_1
+ if v_1.Op != OpMul8 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i2 := 0; _i2 <= 1; _i2, v_1_0, v_1_1 = _i2+1, v_1_1, v_1_0 {
+ if x != v_1_0 {
+ continue
+ }
+ z := v_1_1
+ v.reset(OpMul8)
+ v0 := b.NewValue0(v.Pos, OpAdd8, t)
+ v0.AddArg2(y, z)
+ v.AddArg2(x, v0)
+ return true
+ }
+ }
+ }
+ break
+ }
+ // match: (Add8 (Const8 [0]) x)
+ // result: x
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != 0 {
+ continue
+ }
+ x := v_1
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (Add8 (Const8 [1]) (Com8 x))
+ // result: (Neg8 x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != 1 || v_1.Op != OpCom8 {
+ continue
+ }
+ x := v_1.Args[0]
+ v.reset(OpNeg8)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (Add8 x (Sub8 y x))
+ // result: y
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpSub8 {
+ continue
+ }
+ _ = v_1.Args[1]
+ y := v_1.Args[0]
+ if x != v_1.Args[1] {
+ continue
+ }
+ v.copyOf(y)
+ return true
+ }
+ break
+ }
+ // match: (Add8 x (Add8 y (Sub8 z x)))
+ // result: (Add8 y z)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAdd8 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ y := v_1_0
+ if v_1_1.Op != OpSub8 {
+ continue
+ }
+ _ = v_1_1.Args[1]
+ z := v_1_1.Args[0]
+ if x != v_1_1.Args[1] {
+ continue
+ }
+ v.reset(OpAdd8)
+ v.AddArg2(y, z)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Add8 (Add8 i:(Const8 <t>) z) x)
+ // cond: (z.Op != OpConst8 && x.Op != OpConst8)
+ // result: (Add8 i (Add8 <t> z x))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAdd8 {
+ continue
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ i := v_0_0
+ if i.Op != OpConst8 {
+ continue
+ }
+ t := i.Type
+ z := v_0_1
+ x := v_1
+ if !(z.Op != OpConst8 && x.Op != OpConst8) {
+ continue
+ }
+ v.reset(OpAdd8)
+ v0 := b.NewValue0(v.Pos, OpAdd8, t)
+ v0.AddArg2(z, x)
+ v.AddArg2(i, v0)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Add8 (Sub8 i:(Const8 <t>) z) x)
+ // cond: (z.Op != OpConst8 && x.Op != OpConst8)
+ // result: (Add8 i (Sub8 <t> x z))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpSub8 {
+ continue
+ }
+ z := v_0.Args[1]
+ i := v_0.Args[0]
+ if i.Op != OpConst8 {
+ continue
+ }
+ t := i.Type
+ x := v_1
+ if !(z.Op != OpConst8 && x.Op != OpConst8) {
+ continue
+ }
+ v.reset(OpAdd8)
+ v0 := b.NewValue0(v.Pos, OpSub8, t)
+ v0.AddArg2(x, z)
+ v.AddArg2(i, v0)
+ return true
+ }
+ break
+ }
+ // match: (Add8 (Const8 <t> [c]) (Add8 (Const8 <t> [d]) x))
+ // result: (Add8 (Const8 <t> [c+d]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst8 {
+ continue
+ }
+ t := v_0.Type
+ c := auxIntToInt8(v_0.AuxInt)
+ if v_1.Op != OpAdd8 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst8 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt8(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpAdd8)
+ v0 := b.NewValue0(v.Pos, OpConst8, t)
+ v0.AuxInt = int8ToAuxInt(c + d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Add8 (Const8 <t> [c]) (Sub8 (Const8 <t> [d]) x))
+ // result: (Sub8 (Const8 <t> [c+d]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst8 {
+ continue
+ }
+ t := v_0.Type
+ c := auxIntToInt8(v_0.AuxInt)
+ if v_1.Op != OpSub8 {
+ continue
+ }
+ x := v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst8 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt8(v_1_0.AuxInt)
+ v.reset(OpSub8)
+ v0 := b.NewValue0(v.Pos, OpConst8, t)
+ v0.AuxInt = int8ToAuxInt(c + d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpAddPtr(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (AddPtr <t> x (Const64 [c]))
+ // result: (OffPtr <t> x [c])
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpOffPtr)
+ v.Type = t
+ v.AuxInt = int64ToAuxInt(c)
+ v.AddArg(x)
+ return true
+ }
+ // match: (AddPtr <t> x (Const32 [c]))
+ // result: (OffPtr <t> x [int64(c)])
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpOffPtr)
+ v.Type = t
+ v.AuxInt = int64ToAuxInt(int64(c))
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpAnd16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (And16 (Const16 [c]) (Const16 [d]))
+ // result: (Const16 [c&d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_0.AuxInt)
+ if v_1.Op != OpConst16 {
+ continue
+ }
+ d := auxIntToInt16(v_1.AuxInt)
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(c & d)
+ return true
+ }
+ break
+ }
+ // match: (And16 (Const16 [m]) (Rsh16Ux64 _ (Const64 [c])))
+ // cond: c >= int64(16-ntz16(m))
+ // result: (Const16 [0])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst16 {
+ continue
+ }
+ m := auxIntToInt16(v_0.AuxInt)
+ if v_1.Op != OpRsh16Ux64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_1_1.AuxInt)
+ if !(c >= int64(16-ntz16(m))) {
+ continue
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(0)
+ return true
+ }
+ break
+ }
+ // match: (And16 (Const16 [m]) (Lsh16x64 _ (Const64 [c])))
+ // cond: c >= int64(16-nlz16(m))
+ // result: (Const16 [0])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst16 {
+ continue
+ }
+ m := auxIntToInt16(v_0.AuxInt)
+ if v_1.Op != OpLsh16x64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_1_1.AuxInt)
+ if !(c >= int64(16-nlz16(m))) {
+ continue
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(0)
+ return true
+ }
+ break
+ }
+ // match: (And16 x x)
+ // result: x
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (And16 (Const16 [-1]) x)
+ // result: x
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != -1 {
+ continue
+ }
+ x := v_1
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (And16 (Const16 [0]) _)
+ // result: (Const16 [0])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != 0 {
+ continue
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(0)
+ return true
+ }
+ break
+ }
+ // match: (And16 x (And16 x y))
+ // result: (And16 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAnd16 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if x != v_1_0 {
+ continue
+ }
+ y := v_1_1
+ v.reset(OpAnd16)
+ v.AddArg2(x, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (And16 (And16 i:(Const16 <t>) z) x)
+ // cond: (z.Op != OpConst16 && x.Op != OpConst16)
+ // result: (And16 i (And16 <t> z x))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAnd16 {
+ continue
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ i := v_0_0
+ if i.Op != OpConst16 {
+ continue
+ }
+ t := i.Type
+ z := v_0_1
+ x := v_1
+ if !(z.Op != OpConst16 && x.Op != OpConst16) {
+ continue
+ }
+ v.reset(OpAnd16)
+ v0 := b.NewValue0(v.Pos, OpAnd16, t)
+ v0.AddArg2(z, x)
+ v.AddArg2(i, v0)
+ return true
+ }
+ }
+ break
+ }
+ // match: (And16 (Const16 <t> [c]) (And16 (Const16 <t> [d]) x))
+ // result: (And16 (Const16 <t> [c&d]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst16 {
+ continue
+ }
+ t := v_0.Type
+ c := auxIntToInt16(v_0.AuxInt)
+ if v_1.Op != OpAnd16 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst16 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt16(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpAnd16)
+ v0 := b.NewValue0(v.Pos, OpConst16, t)
+ v0.AuxInt = int16ToAuxInt(c & d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpAnd32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (And32 (Const32 [c]) (Const32 [d]))
+ // result: (Const32 [c&d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpConst32 {
+ continue
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(c & d)
+ return true
+ }
+ break
+ }
+ // match: (And32 (Const32 [m]) (Rsh32Ux64 _ (Const64 [c])))
+ // cond: c >= int64(32-ntz32(m))
+ // result: (Const32 [0])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32 {
+ continue
+ }
+ m := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpRsh32Ux64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_1_1.AuxInt)
+ if !(c >= int64(32-ntz32(m))) {
+ continue
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ break
+ }
+ // match: (And32 (Const32 [m]) (Lsh32x64 _ (Const64 [c])))
+ // cond: c >= int64(32-nlz32(m))
+ // result: (Const32 [0])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32 {
+ continue
+ }
+ m := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpLsh32x64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_1_1.AuxInt)
+ if !(c >= int64(32-nlz32(m))) {
+ continue
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ break
+ }
+ // match: (And32 x x)
+ // result: x
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (And32 (Const32 [-1]) x)
+ // result: x
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != -1 {
+ continue
+ }
+ x := v_1
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (And32 (Const32 [0]) _)
+ // result: (Const32 [0])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 0 {
+ continue
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ break
+ }
+ // match: (And32 x (And32 x y))
+ // result: (And32 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAnd32 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if x != v_1_0 {
+ continue
+ }
+ y := v_1_1
+ v.reset(OpAnd32)
+ v.AddArg2(x, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (And32 (And32 i:(Const32 <t>) z) x)
+ // cond: (z.Op != OpConst32 && x.Op != OpConst32)
+ // result: (And32 i (And32 <t> z x))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAnd32 {
+ continue
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ i := v_0_0
+ if i.Op != OpConst32 {
+ continue
+ }
+ t := i.Type
+ z := v_0_1
+ x := v_1
+ if !(z.Op != OpConst32 && x.Op != OpConst32) {
+ continue
+ }
+ v.reset(OpAnd32)
+ v0 := b.NewValue0(v.Pos, OpAnd32, t)
+ v0.AddArg2(z, x)
+ v.AddArg2(i, v0)
+ return true
+ }
+ }
+ break
+ }
+ // match: (And32 (Const32 <t> [c]) (And32 (Const32 <t> [d]) x))
+ // result: (And32 (Const32 <t> [c&d]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32 {
+ continue
+ }
+ t := v_0.Type
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpAnd32 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst32 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt32(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpAnd32)
+ v0 := b.NewValue0(v.Pos, OpConst32, t)
+ v0.AuxInt = int32ToAuxInt(c & d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpAnd64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (And64 (Const64 [c]) (Const64 [d]))
+ // result: (Const64 [c&d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(c & d)
+ return true
+ }
+ break
+ }
+ // match: (And64 (Const64 [m]) (Rsh64Ux64 _ (Const64 [c])))
+ // cond: c >= int64(64-ntz64(m))
+ // result: (Const64 [0])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64 {
+ continue
+ }
+ m := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpRsh64Ux64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_1_1.AuxInt)
+ if !(c >= int64(64-ntz64(m))) {
+ continue
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ break
+ }
+ // match: (And64 (Const64 [m]) (Lsh64x64 _ (Const64 [c])))
+ // cond: c >= int64(64-nlz64(m))
+ // result: (Const64 [0])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64 {
+ continue
+ }
+ m := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpLsh64x64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_1_1.AuxInt)
+ if !(c >= int64(64-nlz64(m))) {
+ continue
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ break
+ }
+ // match: (And64 x x)
+ // result: x
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (And64 (Const64 [-1]) x)
+ // result: x
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != -1 {
+ continue
+ }
+ x := v_1
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (And64 (Const64 [0]) _)
+ // result: (Const64 [0])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 {
+ continue
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ break
+ }
+ // match: (And64 x (And64 x y))
+ // result: (And64 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAnd64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if x != v_1_0 {
+ continue
+ }
+ y := v_1_1
+ v.reset(OpAnd64)
+ v.AddArg2(x, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (And64 (And64 i:(Const64 <t>) z) x)
+ // cond: (z.Op != OpConst64 && x.Op != OpConst64)
+ // result: (And64 i (And64 <t> z x))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAnd64 {
+ continue
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ i := v_0_0
+ if i.Op != OpConst64 {
+ continue
+ }
+ t := i.Type
+ z := v_0_1
+ x := v_1
+ if !(z.Op != OpConst64 && x.Op != OpConst64) {
+ continue
+ }
+ v.reset(OpAnd64)
+ v0 := b.NewValue0(v.Pos, OpAnd64, t)
+ v0.AddArg2(z, x)
+ v.AddArg2(i, v0)
+ return true
+ }
+ }
+ break
+ }
+ // match: (And64 (Const64 <t> [c]) (And64 (Const64 <t> [d]) x))
+ // result: (And64 (Const64 <t> [c&d]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64 {
+ continue
+ }
+ t := v_0.Type
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpAnd64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst64 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt64(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpAnd64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(c & d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpAnd8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (And8 (Const8 [c]) (Const8 [d]))
+ // result: (Const8 [c&d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ if v_1.Op != OpConst8 {
+ continue
+ }
+ d := auxIntToInt8(v_1.AuxInt)
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(c & d)
+ return true
+ }
+ break
+ }
+ // match: (And8 (Const8 [m]) (Rsh8Ux64 _ (Const64 [c])))
+ // cond: c >= int64(8-ntz8(m))
+ // result: (Const8 [0])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst8 {
+ continue
+ }
+ m := auxIntToInt8(v_0.AuxInt)
+ if v_1.Op != OpRsh8Ux64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_1_1.AuxInt)
+ if !(c >= int64(8-ntz8(m))) {
+ continue
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(0)
+ return true
+ }
+ break
+ }
+ // match: (And8 (Const8 [m]) (Lsh8x64 _ (Const64 [c])))
+ // cond: c >= int64(8-nlz8(m))
+ // result: (Const8 [0])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst8 {
+ continue
+ }
+ m := auxIntToInt8(v_0.AuxInt)
+ if v_1.Op != OpLsh8x64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_1_1.AuxInt)
+ if !(c >= int64(8-nlz8(m))) {
+ continue
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(0)
+ return true
+ }
+ break
+ }
+ // match: (And8 x x)
+ // result: x
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (And8 (Const8 [-1]) x)
+ // result: x
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != -1 {
+ continue
+ }
+ x := v_1
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (And8 (Const8 [0]) _)
+ // result: (Const8 [0])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != 0 {
+ continue
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(0)
+ return true
+ }
+ break
+ }
+ // match: (And8 x (And8 x y))
+ // result: (And8 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpAnd8 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if x != v_1_0 {
+ continue
+ }
+ y := v_1_1
+ v.reset(OpAnd8)
+ v.AddArg2(x, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (And8 (And8 i:(Const8 <t>) z) x)
+ // cond: (z.Op != OpConst8 && x.Op != OpConst8)
+ // result: (And8 i (And8 <t> z x))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAnd8 {
+ continue
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ i := v_0_0
+ if i.Op != OpConst8 {
+ continue
+ }
+ t := i.Type
+ z := v_0_1
+ x := v_1
+ if !(z.Op != OpConst8 && x.Op != OpConst8) {
+ continue
+ }
+ v.reset(OpAnd8)
+ v0 := b.NewValue0(v.Pos, OpAnd8, t)
+ v0.AddArg2(z, x)
+ v.AddArg2(i, v0)
+ return true
+ }
+ }
+ break
+ }
+ // match: (And8 (Const8 <t> [c]) (And8 (Const8 <t> [d]) x))
+ // result: (And8 (Const8 <t> [c&d]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst8 {
+ continue
+ }
+ t := v_0.Type
+ c := auxIntToInt8(v_0.AuxInt)
+ if v_1.Op != OpAnd8 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst8 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt8(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpAnd8)
+ v0 := b.NewValue0(v.Pos, OpConst8, t)
+ v0.AuxInt = int8ToAuxInt(c & d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpAndB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (AndB (Leq64 (Const64 [c]) x) (Less64 x (Const64 [d])))
+ // cond: d >= c
+ // result: (Less64U (Sub64 <x.Type> x (Const64 <x.Type> [c])) (Const64 <x.Type> [d-c]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLeq64 {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ if v_1.Op != OpLess64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1_1.AuxInt)
+ if !(d >= c) {
+ continue
+ }
+ v.reset(OpLess64U)
+ v0 := b.NewValue0(v.Pos, OpSub64, x.Type)
+ v1 := b.NewValue0(v.Pos, OpConst64, x.Type)
+ v1.AuxInt = int64ToAuxInt(c)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst64, x.Type)
+ v2.AuxInt = int64ToAuxInt(d - c)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ break
+ }
+ // match: (AndB (Leq64 (Const64 [c]) x) (Leq64 x (Const64 [d])))
+ // cond: d >= c
+ // result: (Leq64U (Sub64 <x.Type> x (Const64 <x.Type> [c])) (Const64 <x.Type> [d-c]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLeq64 {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ if v_1.Op != OpLeq64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1_1.AuxInt)
+ if !(d >= c) {
+ continue
+ }
+ v.reset(OpLeq64U)
+ v0 := b.NewValue0(v.Pos, OpSub64, x.Type)
+ v1 := b.NewValue0(v.Pos, OpConst64, x.Type)
+ v1.AuxInt = int64ToAuxInt(c)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst64, x.Type)
+ v2.AuxInt = int64ToAuxInt(d - c)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ break
+ }
+ // match: (AndB (Leq32 (Const32 [c]) x) (Less32 x (Const32 [d])))
+ // cond: d >= c
+ // result: (Less32U (Sub32 <x.Type> x (Const32 <x.Type> [c])) (Const32 <x.Type> [d-c]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLeq32 {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_0_0.AuxInt)
+ if v_1.Op != OpLess32 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst32 {
+ continue
+ }
+ d := auxIntToInt32(v_1_1.AuxInt)
+ if !(d >= c) {
+ continue
+ }
+ v.reset(OpLess32U)
+ v0 := b.NewValue0(v.Pos, OpSub32, x.Type)
+ v1 := b.NewValue0(v.Pos, OpConst32, x.Type)
+ v1.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst32, x.Type)
+ v2.AuxInt = int32ToAuxInt(d - c)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ break
+ }
+ // match: (AndB (Leq32 (Const32 [c]) x) (Leq32 x (Const32 [d])))
+ // cond: d >= c
+ // result: (Leq32U (Sub32 <x.Type> x (Const32 <x.Type> [c])) (Const32 <x.Type> [d-c]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLeq32 {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_0_0.AuxInt)
+ if v_1.Op != OpLeq32 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst32 {
+ continue
+ }
+ d := auxIntToInt32(v_1_1.AuxInt)
+ if !(d >= c) {
+ continue
+ }
+ v.reset(OpLeq32U)
+ v0 := b.NewValue0(v.Pos, OpSub32, x.Type)
+ v1 := b.NewValue0(v.Pos, OpConst32, x.Type)
+ v1.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst32, x.Type)
+ v2.AuxInt = int32ToAuxInt(d - c)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ break
+ }
+ // match: (AndB (Leq16 (Const16 [c]) x) (Less16 x (Const16 [d])))
+ // cond: d >= c
+ // result: (Less16U (Sub16 <x.Type> x (Const16 <x.Type> [c])) (Const16 <x.Type> [d-c]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLeq16 {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_0_0.AuxInt)
+ if v_1.Op != OpLess16 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst16 {
+ continue
+ }
+ d := auxIntToInt16(v_1_1.AuxInt)
+ if !(d >= c) {
+ continue
+ }
+ v.reset(OpLess16U)
+ v0 := b.NewValue0(v.Pos, OpSub16, x.Type)
+ v1 := b.NewValue0(v.Pos, OpConst16, x.Type)
+ v1.AuxInt = int16ToAuxInt(c)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst16, x.Type)
+ v2.AuxInt = int16ToAuxInt(d - c)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ break
+ }
+ // match: (AndB (Leq16 (Const16 [c]) x) (Leq16 x (Const16 [d])))
+ // cond: d >= c
+ // result: (Leq16U (Sub16 <x.Type> x (Const16 <x.Type> [c])) (Const16 <x.Type> [d-c]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLeq16 {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_0_0.AuxInt)
+ if v_1.Op != OpLeq16 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst16 {
+ continue
+ }
+ d := auxIntToInt16(v_1_1.AuxInt)
+ if !(d >= c) {
+ continue
+ }
+ v.reset(OpLeq16U)
+ v0 := b.NewValue0(v.Pos, OpSub16, x.Type)
+ v1 := b.NewValue0(v.Pos, OpConst16, x.Type)
+ v1.AuxInt = int16ToAuxInt(c)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst16, x.Type)
+ v2.AuxInt = int16ToAuxInt(d - c)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ break
+ }
+ // match: (AndB (Leq8 (Const8 [c]) x) (Less8 x (Const8 [d])))
+ // cond: d >= c
+ // result: (Less8U (Sub8 <x.Type> x (Const8 <x.Type> [c])) (Const8 <x.Type> [d-c]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLeq8 {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_0_0.AuxInt)
+ if v_1.Op != OpLess8 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst8 {
+ continue
+ }
+ d := auxIntToInt8(v_1_1.AuxInt)
+ if !(d >= c) {
+ continue
+ }
+ v.reset(OpLess8U)
+ v0 := b.NewValue0(v.Pos, OpSub8, x.Type)
+ v1 := b.NewValue0(v.Pos, OpConst8, x.Type)
+ v1.AuxInt = int8ToAuxInt(c)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst8, x.Type)
+ v2.AuxInt = int8ToAuxInt(d - c)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ break
+ }
+ // match: (AndB (Leq8 (Const8 [c]) x) (Leq8 x (Const8 [d])))
+ // cond: d >= c
+ // result: (Leq8U (Sub8 <x.Type> x (Const8 <x.Type> [c])) (Const8 <x.Type> [d-c]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLeq8 {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_0_0.AuxInt)
+ if v_1.Op != OpLeq8 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst8 {
+ continue
+ }
+ d := auxIntToInt8(v_1_1.AuxInt)
+ if !(d >= c) {
+ continue
+ }
+ v.reset(OpLeq8U)
+ v0 := b.NewValue0(v.Pos, OpSub8, x.Type)
+ v1 := b.NewValue0(v.Pos, OpConst8, x.Type)
+ v1.AuxInt = int8ToAuxInt(c)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst8, x.Type)
+ v2.AuxInt = int8ToAuxInt(d - c)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ break
+ }
+ // match: (AndB (Less64 (Const64 [c]) x) (Less64 x (Const64 [d])))
+ // cond: d >= c+1 && c+1 > c
+ // result: (Less64U (Sub64 <x.Type> x (Const64 <x.Type> [c+1])) (Const64 <x.Type> [d-c-1]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLess64 {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ if v_1.Op != OpLess64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1_1.AuxInt)
+ if !(d >= c+1 && c+1 > c) {
+ continue
+ }
+ v.reset(OpLess64U)
+ v0 := b.NewValue0(v.Pos, OpSub64, x.Type)
+ v1 := b.NewValue0(v.Pos, OpConst64, x.Type)
+ v1.AuxInt = int64ToAuxInt(c + 1)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst64, x.Type)
+ v2.AuxInt = int64ToAuxInt(d - c - 1)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ break
+ }
+ // match: (AndB (Less64 (Const64 [c]) x) (Leq64 x (Const64 [d])))
+ // cond: d >= c+1 && c+1 > c
+ // result: (Leq64U (Sub64 <x.Type> x (Const64 <x.Type> [c+1])) (Const64 <x.Type> [d-c-1]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLess64 {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ if v_1.Op != OpLeq64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1_1.AuxInt)
+ if !(d >= c+1 && c+1 > c) {
+ continue
+ }
+ v.reset(OpLeq64U)
+ v0 := b.NewValue0(v.Pos, OpSub64, x.Type)
+ v1 := b.NewValue0(v.Pos, OpConst64, x.Type)
+ v1.AuxInt = int64ToAuxInt(c + 1)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst64, x.Type)
+ v2.AuxInt = int64ToAuxInt(d - c - 1)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ break
+ }
+ // match: (AndB (Less32 (Const32 [c]) x) (Less32 x (Const32 [d])))
+ // cond: d >= c+1 && c+1 > c
+ // result: (Less32U (Sub32 <x.Type> x (Const32 <x.Type> [c+1])) (Const32 <x.Type> [d-c-1]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLess32 {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_0_0.AuxInt)
+ if v_1.Op != OpLess32 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst32 {
+ continue
+ }
+ d := auxIntToInt32(v_1_1.AuxInt)
+ if !(d >= c+1 && c+1 > c) {
+ continue
+ }
+ v.reset(OpLess32U)
+ v0 := b.NewValue0(v.Pos, OpSub32, x.Type)
+ v1 := b.NewValue0(v.Pos, OpConst32, x.Type)
+ v1.AuxInt = int32ToAuxInt(c + 1)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst32, x.Type)
+ v2.AuxInt = int32ToAuxInt(d - c - 1)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ break
+ }
+ // match: (AndB (Less32 (Const32 [c]) x) (Leq32 x (Const32 [d])))
+ // cond: d >= c+1 && c+1 > c
+ // result: (Leq32U (Sub32 <x.Type> x (Const32 <x.Type> [c+1])) (Const32 <x.Type> [d-c-1]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLess32 {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_0_0.AuxInt)
+ if v_1.Op != OpLeq32 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst32 {
+ continue
+ }
+ d := auxIntToInt32(v_1_1.AuxInt)
+ if !(d >= c+1 && c+1 > c) {
+ continue
+ }
+ v.reset(OpLeq32U)
+ v0 := b.NewValue0(v.Pos, OpSub32, x.Type)
+ v1 := b.NewValue0(v.Pos, OpConst32, x.Type)
+ v1.AuxInt = int32ToAuxInt(c + 1)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst32, x.Type)
+ v2.AuxInt = int32ToAuxInt(d - c - 1)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ break
+ }
+ // match: (AndB (Less16 (Const16 [c]) x) (Less16 x (Const16 [d])))
+ // cond: d >= c+1 && c+1 > c
+ // result: (Less16U (Sub16 <x.Type> x (Const16 <x.Type> [c+1])) (Const16 <x.Type> [d-c-1]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLess16 {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_0_0.AuxInt)
+ if v_1.Op != OpLess16 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst16 {
+ continue
+ }
+ d := auxIntToInt16(v_1_1.AuxInt)
+ if !(d >= c+1 && c+1 > c) {
+ continue
+ }
+ v.reset(OpLess16U)
+ v0 := b.NewValue0(v.Pos, OpSub16, x.Type)
+ v1 := b.NewValue0(v.Pos, OpConst16, x.Type)
+ v1.AuxInt = int16ToAuxInt(c + 1)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst16, x.Type)
+ v2.AuxInt = int16ToAuxInt(d - c - 1)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ break
+ }
+ // match: (AndB (Less16 (Const16 [c]) x) (Leq16 x (Const16 [d])))
+ // cond: d >= c+1 && c+1 > c
+ // result: (Leq16U (Sub16 <x.Type> x (Const16 <x.Type> [c+1])) (Const16 <x.Type> [d-c-1]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLess16 {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_0_0.AuxInt)
+ if v_1.Op != OpLeq16 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst16 {
+ continue
+ }
+ d := auxIntToInt16(v_1_1.AuxInt)
+ if !(d >= c+1 && c+1 > c) {
+ continue
+ }
+ v.reset(OpLeq16U)
+ v0 := b.NewValue0(v.Pos, OpSub16, x.Type)
+ v1 := b.NewValue0(v.Pos, OpConst16, x.Type)
+ v1.AuxInt = int16ToAuxInt(c + 1)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst16, x.Type)
+ v2.AuxInt = int16ToAuxInt(d - c - 1)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ break
+ }
+ // match: (AndB (Less8 (Const8 [c]) x) (Less8 x (Const8 [d])))
+ // cond: d >= c+1 && c+1 > c
+ // result: (Less8U (Sub8 <x.Type> x (Const8 <x.Type> [c+1])) (Const8 <x.Type> [d-c-1]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLess8 {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_0_0.AuxInt)
+ if v_1.Op != OpLess8 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst8 {
+ continue
+ }
+ d := auxIntToInt8(v_1_1.AuxInt)
+ if !(d >= c+1 && c+1 > c) {
+ continue
+ }
+ v.reset(OpLess8U)
+ v0 := b.NewValue0(v.Pos, OpSub8, x.Type)
+ v1 := b.NewValue0(v.Pos, OpConst8, x.Type)
+ v1.AuxInt = int8ToAuxInt(c + 1)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst8, x.Type)
+ v2.AuxInt = int8ToAuxInt(d - c - 1)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ break
+ }
+ // match: (AndB (Less8 (Const8 [c]) x) (Leq8 x (Const8 [d])))
+ // cond: d >= c+1 && c+1 > c
+ // result: (Leq8U (Sub8 <x.Type> x (Const8 <x.Type> [c+1])) (Const8 <x.Type> [d-c-1]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLess8 {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_0_0.AuxInt)
+ if v_1.Op != OpLeq8 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst8 {
+ continue
+ }
+ d := auxIntToInt8(v_1_1.AuxInt)
+ if !(d >= c+1 && c+1 > c) {
+ continue
+ }
+ v.reset(OpLeq8U)
+ v0 := b.NewValue0(v.Pos, OpSub8, x.Type)
+ v1 := b.NewValue0(v.Pos, OpConst8, x.Type)
+ v1.AuxInt = int8ToAuxInt(c + 1)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst8, x.Type)
+ v2.AuxInt = int8ToAuxInt(d - c - 1)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ break
+ }
+ // match: (AndB (Leq64U (Const64 [c]) x) (Less64U x (Const64 [d])))
+ // cond: uint64(d) >= uint64(c)
+ // result: (Less64U (Sub64 <x.Type> x (Const64 <x.Type> [c])) (Const64 <x.Type> [d-c]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLeq64U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ if v_1.Op != OpLess64U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1_1.AuxInt)
+ if !(uint64(d) >= uint64(c)) {
+ continue
+ }
+ v.reset(OpLess64U)
+ v0 := b.NewValue0(v.Pos, OpSub64, x.Type)
+ v1 := b.NewValue0(v.Pos, OpConst64, x.Type)
+ v1.AuxInt = int64ToAuxInt(c)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst64, x.Type)
+ v2.AuxInt = int64ToAuxInt(d - c)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ break
+ }
+ // match: (AndB (Leq64U (Const64 [c]) x) (Leq64U x (Const64 [d])))
+ // cond: uint64(d) >= uint64(c)
+ // result: (Leq64U (Sub64 <x.Type> x (Const64 <x.Type> [c])) (Const64 <x.Type> [d-c]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLeq64U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ if v_1.Op != OpLeq64U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1_1.AuxInt)
+ if !(uint64(d) >= uint64(c)) {
+ continue
+ }
+ v.reset(OpLeq64U)
+ v0 := b.NewValue0(v.Pos, OpSub64, x.Type)
+ v1 := b.NewValue0(v.Pos, OpConst64, x.Type)
+ v1.AuxInt = int64ToAuxInt(c)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst64, x.Type)
+ v2.AuxInt = int64ToAuxInt(d - c)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ break
+ }
+ // match: (AndB (Leq32U (Const32 [c]) x) (Less32U x (Const32 [d])))
+ // cond: uint32(d) >= uint32(c)
+ // result: (Less32U (Sub32 <x.Type> x (Const32 <x.Type> [c])) (Const32 <x.Type> [d-c]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLeq32U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_0_0.AuxInt)
+ if v_1.Op != OpLess32U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst32 {
+ continue
+ }
+ d := auxIntToInt32(v_1_1.AuxInt)
+ if !(uint32(d) >= uint32(c)) {
+ continue
+ }
+ v.reset(OpLess32U)
+ v0 := b.NewValue0(v.Pos, OpSub32, x.Type)
+ v1 := b.NewValue0(v.Pos, OpConst32, x.Type)
+ v1.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst32, x.Type)
+ v2.AuxInt = int32ToAuxInt(d - c)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ break
+ }
+ // match: (AndB (Leq32U (Const32 [c]) x) (Leq32U x (Const32 [d])))
+ // cond: uint32(d) >= uint32(c)
+ // result: (Leq32U (Sub32 <x.Type> x (Const32 <x.Type> [c])) (Const32 <x.Type> [d-c]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLeq32U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_0_0.AuxInt)
+ if v_1.Op != OpLeq32U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst32 {
+ continue
+ }
+ d := auxIntToInt32(v_1_1.AuxInt)
+ if !(uint32(d) >= uint32(c)) {
+ continue
+ }
+ v.reset(OpLeq32U)
+ v0 := b.NewValue0(v.Pos, OpSub32, x.Type)
+ v1 := b.NewValue0(v.Pos, OpConst32, x.Type)
+ v1.AuxInt = int32ToAuxInt(c)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst32, x.Type)
+ v2.AuxInt = int32ToAuxInt(d - c)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ break
+ }
+ // match: (AndB (Leq16U (Const16 [c]) x) (Less16U x (Const16 [d])))
+ // cond: uint16(d) >= uint16(c)
+ // result: (Less16U (Sub16 <x.Type> x (Const16 <x.Type> [c])) (Const16 <x.Type> [d-c]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLeq16U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_0_0.AuxInt)
+ if v_1.Op != OpLess16U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst16 {
+ continue
+ }
+ d := auxIntToInt16(v_1_1.AuxInt)
+ if !(uint16(d) >= uint16(c)) {
+ continue
+ }
+ v.reset(OpLess16U)
+ v0 := b.NewValue0(v.Pos, OpSub16, x.Type)
+ v1 := b.NewValue0(v.Pos, OpConst16, x.Type)
+ v1.AuxInt = int16ToAuxInt(c)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst16, x.Type)
+ v2.AuxInt = int16ToAuxInt(d - c)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ break
+ }
+ // match: (AndB (Leq16U (Const16 [c]) x) (Leq16U x (Const16 [d])))
+ // cond: uint16(d) >= uint16(c)
+ // result: (Leq16U (Sub16 <x.Type> x (Const16 <x.Type> [c])) (Const16 <x.Type> [d-c]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLeq16U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_0_0.AuxInt)
+ if v_1.Op != OpLeq16U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst16 {
+ continue
+ }
+ d := auxIntToInt16(v_1_1.AuxInt)
+ if !(uint16(d) >= uint16(c)) {
+ continue
+ }
+ v.reset(OpLeq16U)
+ v0 := b.NewValue0(v.Pos, OpSub16, x.Type)
+ v1 := b.NewValue0(v.Pos, OpConst16, x.Type)
+ v1.AuxInt = int16ToAuxInt(c)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst16, x.Type)
+ v2.AuxInt = int16ToAuxInt(d - c)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ break
+ }
+ // match: (AndB (Leq8U (Const8 [c]) x) (Less8U x (Const8 [d])))
+ // cond: uint8(d) >= uint8(c)
+ // result: (Less8U (Sub8 <x.Type> x (Const8 <x.Type> [c])) (Const8 <x.Type> [d-c]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLeq8U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_0_0.AuxInt)
+ if v_1.Op != OpLess8U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst8 {
+ continue
+ }
+ d := auxIntToInt8(v_1_1.AuxInt)
+ if !(uint8(d) >= uint8(c)) {
+ continue
+ }
+ v.reset(OpLess8U)
+ v0 := b.NewValue0(v.Pos, OpSub8, x.Type)
+ v1 := b.NewValue0(v.Pos, OpConst8, x.Type)
+ v1.AuxInt = int8ToAuxInt(c)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst8, x.Type)
+ v2.AuxInt = int8ToAuxInt(d - c)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ break
+ }
+ // match: (AndB (Leq8U (Const8 [c]) x) (Leq8U x (Const8 [d])))
+ // cond: uint8(d) >= uint8(c)
+ // result: (Leq8U (Sub8 <x.Type> x (Const8 <x.Type> [c])) (Const8 <x.Type> [d-c]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLeq8U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_0_0.AuxInt)
+ if v_1.Op != OpLeq8U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst8 {
+ continue
+ }
+ d := auxIntToInt8(v_1_1.AuxInt)
+ if !(uint8(d) >= uint8(c)) {
+ continue
+ }
+ v.reset(OpLeq8U)
+ v0 := b.NewValue0(v.Pos, OpSub8, x.Type)
+ v1 := b.NewValue0(v.Pos, OpConst8, x.Type)
+ v1.AuxInt = int8ToAuxInt(c)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst8, x.Type)
+ v2.AuxInt = int8ToAuxInt(d - c)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ break
+ }
+ // match: (AndB (Less64U (Const64 [c]) x) (Less64U x (Const64 [d])))
+ // cond: uint64(d) >= uint64(c+1) && uint64(c+1) > uint64(c)
+ // result: (Less64U (Sub64 <x.Type> x (Const64 <x.Type> [c+1])) (Const64 <x.Type> [d-c-1]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLess64U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ if v_1.Op != OpLess64U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1_1.AuxInt)
+ if !(uint64(d) >= uint64(c+1) && uint64(c+1) > uint64(c)) {
+ continue
+ }
+ v.reset(OpLess64U)
+ v0 := b.NewValue0(v.Pos, OpSub64, x.Type)
+ v1 := b.NewValue0(v.Pos, OpConst64, x.Type)
+ v1.AuxInt = int64ToAuxInt(c + 1)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst64, x.Type)
+ v2.AuxInt = int64ToAuxInt(d - c - 1)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ break
+ }
+ // match: (AndB (Less64U (Const64 [c]) x) (Leq64U x (Const64 [d])))
+ // cond: uint64(d) >= uint64(c+1) && uint64(c+1) > uint64(c)
+ // result: (Leq64U (Sub64 <x.Type> x (Const64 <x.Type> [c+1])) (Const64 <x.Type> [d-c-1]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLess64U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ if v_1.Op != OpLeq64U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1_1.AuxInt)
+ if !(uint64(d) >= uint64(c+1) && uint64(c+1) > uint64(c)) {
+ continue
+ }
+ v.reset(OpLeq64U)
+ v0 := b.NewValue0(v.Pos, OpSub64, x.Type)
+ v1 := b.NewValue0(v.Pos, OpConst64, x.Type)
+ v1.AuxInt = int64ToAuxInt(c + 1)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst64, x.Type)
+ v2.AuxInt = int64ToAuxInt(d - c - 1)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ break
+ }
+ // match: (AndB (Less32U (Const32 [c]) x) (Less32U x (Const32 [d])))
+ // cond: uint32(d) >= uint32(c+1) && uint32(c+1) > uint32(c)
+ // result: (Less32U (Sub32 <x.Type> x (Const32 <x.Type> [c+1])) (Const32 <x.Type> [d-c-1]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLess32U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_0_0.AuxInt)
+ if v_1.Op != OpLess32U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst32 {
+ continue
+ }
+ d := auxIntToInt32(v_1_1.AuxInt)
+ if !(uint32(d) >= uint32(c+1) && uint32(c+1) > uint32(c)) {
+ continue
+ }
+ v.reset(OpLess32U)
+ v0 := b.NewValue0(v.Pos, OpSub32, x.Type)
+ v1 := b.NewValue0(v.Pos, OpConst32, x.Type)
+ v1.AuxInt = int32ToAuxInt(c + 1)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst32, x.Type)
+ v2.AuxInt = int32ToAuxInt(d - c - 1)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ break
+ }
+ // match: (AndB (Less32U (Const32 [c]) x) (Leq32U x (Const32 [d])))
+ // cond: uint32(d) >= uint32(c+1) && uint32(c+1) > uint32(c)
+ // result: (Leq32U (Sub32 <x.Type> x (Const32 <x.Type> [c+1])) (Const32 <x.Type> [d-c-1]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLess32U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_0_0.AuxInt)
+ if v_1.Op != OpLeq32U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst32 {
+ continue
+ }
+ d := auxIntToInt32(v_1_1.AuxInt)
+ if !(uint32(d) >= uint32(c+1) && uint32(c+1) > uint32(c)) {
+ continue
+ }
+ v.reset(OpLeq32U)
+ v0 := b.NewValue0(v.Pos, OpSub32, x.Type)
+ v1 := b.NewValue0(v.Pos, OpConst32, x.Type)
+ v1.AuxInt = int32ToAuxInt(c + 1)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst32, x.Type)
+ v2.AuxInt = int32ToAuxInt(d - c - 1)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ break
+ }
+ // match: (AndB (Less16U (Const16 [c]) x) (Less16U x (Const16 [d])))
+ // cond: uint16(d) >= uint16(c+1) && uint16(c+1) > uint16(c)
+ // result: (Less16U (Sub16 <x.Type> x (Const16 <x.Type> [c+1])) (Const16 <x.Type> [d-c-1]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLess16U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_0_0.AuxInt)
+ if v_1.Op != OpLess16U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst16 {
+ continue
+ }
+ d := auxIntToInt16(v_1_1.AuxInt)
+ if !(uint16(d) >= uint16(c+1) && uint16(c+1) > uint16(c)) {
+ continue
+ }
+ v.reset(OpLess16U)
+ v0 := b.NewValue0(v.Pos, OpSub16, x.Type)
+ v1 := b.NewValue0(v.Pos, OpConst16, x.Type)
+ v1.AuxInt = int16ToAuxInt(c + 1)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst16, x.Type)
+ v2.AuxInt = int16ToAuxInt(d - c - 1)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ break
+ }
+ // match: (AndB (Less16U (Const16 [c]) x) (Leq16U x (Const16 [d])))
+ // cond: uint16(d) >= uint16(c+1) && uint16(c+1) > uint16(c)
+ // result: (Leq16U (Sub16 <x.Type> x (Const16 <x.Type> [c+1])) (Const16 <x.Type> [d-c-1]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLess16U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_0_0.AuxInt)
+ if v_1.Op != OpLeq16U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst16 {
+ continue
+ }
+ d := auxIntToInt16(v_1_1.AuxInt)
+ if !(uint16(d) >= uint16(c+1) && uint16(c+1) > uint16(c)) {
+ continue
+ }
+ v.reset(OpLeq16U)
+ v0 := b.NewValue0(v.Pos, OpSub16, x.Type)
+ v1 := b.NewValue0(v.Pos, OpConst16, x.Type)
+ v1.AuxInt = int16ToAuxInt(c + 1)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst16, x.Type)
+ v2.AuxInt = int16ToAuxInt(d - c - 1)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ break
+ }
+ // match: (AndB (Less8U (Const8 [c]) x) (Less8U x (Const8 [d])))
+ // cond: uint8(d) >= uint8(c+1) && uint8(c+1) > uint8(c)
+ // result: (Less8U (Sub8 <x.Type> x (Const8 <x.Type> [c+1])) (Const8 <x.Type> [d-c-1]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLess8U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_0_0.AuxInt)
+ if v_1.Op != OpLess8U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst8 {
+ continue
+ }
+ d := auxIntToInt8(v_1_1.AuxInt)
+ if !(uint8(d) >= uint8(c+1) && uint8(c+1) > uint8(c)) {
+ continue
+ }
+ v.reset(OpLess8U)
+ v0 := b.NewValue0(v.Pos, OpSub8, x.Type)
+ v1 := b.NewValue0(v.Pos, OpConst8, x.Type)
+ v1.AuxInt = int8ToAuxInt(c + 1)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst8, x.Type)
+ v2.AuxInt = int8ToAuxInt(d - c - 1)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ break
+ }
+ // match: (AndB (Less8U (Const8 [c]) x) (Leq8U x (Const8 [d])))
+ // cond: uint8(d) >= uint8(c+1) && uint8(c+1) > uint8(c)
+ // result: (Leq8U (Sub8 <x.Type> x (Const8 <x.Type> [c+1])) (Const8 <x.Type> [d-c-1]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLess8U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_0_0.AuxInt)
+ if v_1.Op != OpLeq8U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst8 {
+ continue
+ }
+ d := auxIntToInt8(v_1_1.AuxInt)
+ if !(uint8(d) >= uint8(c+1) && uint8(c+1) > uint8(c)) {
+ continue
+ }
+ v.reset(OpLeq8U)
+ v0 := b.NewValue0(v.Pos, OpSub8, x.Type)
+ v1 := b.NewValue0(v.Pos, OpConst8, x.Type)
+ v1.AuxInt = int8ToAuxInt(c + 1)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst8, x.Type)
+ v2.AuxInt = int8ToAuxInt(d - c - 1)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpArraySelect(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ArraySelect (ArrayMake1 x))
+ // result: x
+ for {
+ if v_0.Op != OpArrayMake1 {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (ArraySelect [0] (IData x))
+ // result: (IData x)
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 || v_0.Op != OpIData {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpIData)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpCom16(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Com16 (Com16 x))
+ // result: x
+ for {
+ if v_0.Op != OpCom16 {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (Com16 (Const16 [c]))
+ // result: (Const16 [^c])
+ for {
+ if v_0.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_0.AuxInt)
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(^c)
+ return true
+ }
+ // match: (Com16 (Add16 (Const16 [-1]) x))
+ // result: (Neg16 x)
+ for {
+ if v_0.Op != OpAdd16 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpConst16 || auxIntToInt16(v_0_0.AuxInt) != -1 {
+ continue
+ }
+ x := v_0_1
+ v.reset(OpNeg16)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpCom32(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Com32 (Com32 x))
+ // result: x
+ for {
+ if v_0.Op != OpCom32 {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (Com32 (Const32 [c]))
+ // result: (Const32 [^c])
+ for {
+ if v_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(^c)
+ return true
+ }
+ // match: (Com32 (Add32 (Const32 [-1]) x))
+ // result: (Neg32 x)
+ for {
+ if v_0.Op != OpAdd32 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpConst32 || auxIntToInt32(v_0_0.AuxInt) != -1 {
+ continue
+ }
+ x := v_0_1
+ v.reset(OpNeg32)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpCom64(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Com64 (Com64 x))
+ // result: x
+ for {
+ if v_0.Op != OpCom64 {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (Com64 (Const64 [c]))
+ // result: (Const64 [^c])
+ for {
+ if v_0.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(^c)
+ return true
+ }
+ // match: (Com64 (Add64 (Const64 [-1]) x))
+ // result: (Neg64 x)
+ for {
+ if v_0.Op != OpAdd64 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpConst64 || auxIntToInt64(v_0_0.AuxInt) != -1 {
+ continue
+ }
+ x := v_0_1
+ v.reset(OpNeg64)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpCom8(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Com8 (Com8 x))
+ // result: x
+ for {
+ if v_0.Op != OpCom8 {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (Com8 (Const8 [c]))
+ // result: (Const8 [^c])
+ for {
+ if v_0.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(^c)
+ return true
+ }
+ // match: (Com8 (Add8 (Const8 [-1]) x))
+ // result: (Neg8 x)
+ for {
+ if v_0.Op != OpAdd8 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpConst8 || auxIntToInt8(v_0_0.AuxInt) != -1 {
+ continue
+ }
+ x := v_0_1
+ v.reset(OpNeg8)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpConstInterface(v *Value) bool {
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (ConstInterface)
+ // result: (IMake (ConstNil <typ.Uintptr>) (ConstNil <typ.BytePtr>))
+ for {
+ v.reset(OpIMake)
+ v0 := b.NewValue0(v.Pos, OpConstNil, typ.Uintptr)
+ v1 := b.NewValue0(v.Pos, OpConstNil, typ.BytePtr)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValuegeneric_OpConstSlice(v *Value) bool {
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (ConstSlice)
+ // cond: config.PtrSize == 4
+ // result: (SliceMake (ConstNil <v.Type.Elem().PtrTo()>) (Const32 <typ.Int> [0]) (Const32 <typ.Int> [0]))
+ for {
+ if !(config.PtrSize == 4) {
+ break
+ }
+ v.reset(OpSliceMake)
+ v0 := b.NewValue0(v.Pos, OpConstNil, v.Type.Elem().PtrTo())
+ v1 := b.NewValue0(v.Pos, OpConst32, typ.Int)
+ v1.AuxInt = int32ToAuxInt(0)
+ v.AddArg3(v0, v1, v1)
+ return true
+ }
+ // match: (ConstSlice)
+ // cond: config.PtrSize == 8
+ // result: (SliceMake (ConstNil <v.Type.Elem().PtrTo()>) (Const64 <typ.Int> [0]) (Const64 <typ.Int> [0]))
+ for {
+ if !(config.PtrSize == 8) {
+ break
+ }
+ v.reset(OpSliceMake)
+ v0 := b.NewValue0(v.Pos, OpConstNil, v.Type.Elem().PtrTo())
+ v1 := b.NewValue0(v.Pos, OpConst64, typ.Int)
+ v1.AuxInt = int64ToAuxInt(0)
+ v.AddArg3(v0, v1, v1)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpConstString(v *Value) bool {
+ b := v.Block
+ config := b.Func.Config
+ fe := b.Func.fe
+ typ := &b.Func.Config.Types
+ // match: (ConstString {str})
+ // cond: config.PtrSize == 4 && str == ""
+ // result: (StringMake (ConstNil) (Const32 <typ.Int> [0]))
+ for {
+ str := auxToString(v.Aux)
+ if !(config.PtrSize == 4 && str == "") {
+ break
+ }
+ v.reset(OpStringMake)
+ v0 := b.NewValue0(v.Pos, OpConstNil, typ.BytePtr)
+ v1 := b.NewValue0(v.Pos, OpConst32, typ.Int)
+ v1.AuxInt = int32ToAuxInt(0)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (ConstString {str})
+ // cond: config.PtrSize == 8 && str == ""
+ // result: (StringMake (ConstNil) (Const64 <typ.Int> [0]))
+ for {
+ str := auxToString(v.Aux)
+ if !(config.PtrSize == 8 && str == "") {
+ break
+ }
+ v.reset(OpStringMake)
+ v0 := b.NewValue0(v.Pos, OpConstNil, typ.BytePtr)
+ v1 := b.NewValue0(v.Pos, OpConst64, typ.Int)
+ v1.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ // match: (ConstString {str})
+ // cond: config.PtrSize == 4 && str != ""
+ // result: (StringMake (Addr <typ.BytePtr> {fe.StringData(str)} (SB)) (Const32 <typ.Int> [int32(len(str))]))
+ for {
+ str := auxToString(v.Aux)
+ if !(config.PtrSize == 4 && str != "") {
+ break
+ }
+ v.reset(OpStringMake)
+ v0 := b.NewValue0(v.Pos, OpAddr, typ.BytePtr)
+ v0.Aux = symToAux(fe.StringData(str))
+ v1 := b.NewValue0(v.Pos, OpSB, typ.Uintptr)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Pos, OpConst32, typ.Int)
+ v2.AuxInt = int32ToAuxInt(int32(len(str)))
+ v.AddArg2(v0, v2)
+ return true
+ }
+ // match: (ConstString {str})
+ // cond: config.PtrSize == 8 && str != ""
+ // result: (StringMake (Addr <typ.BytePtr> {fe.StringData(str)} (SB)) (Const64 <typ.Int> [int64(len(str))]))
+ for {
+ str := auxToString(v.Aux)
+ if !(config.PtrSize == 8 && str != "") {
+ break
+ }
+ v.reset(OpStringMake)
+ v0 := b.NewValue0(v.Pos, OpAddr, typ.BytePtr)
+ v0.Aux = symToAux(fe.StringData(str))
+ v1 := b.NewValue0(v.Pos, OpSB, typ.Uintptr)
+ v0.AddArg(v1)
+ v2 := b.NewValue0(v.Pos, OpConst64, typ.Int)
+ v2.AuxInt = int64ToAuxInt(int64(len(str)))
+ v.AddArg2(v0, v2)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpConvert(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Convert (Add64 (Convert ptr mem) off) mem)
+ // result: (AddPtr ptr off)
+ for {
+ if v_0.Op != OpAdd64 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpConvert {
+ continue
+ }
+ mem := v_0_0.Args[1]
+ ptr := v_0_0.Args[0]
+ off := v_0_1
+ if mem != v_1 {
+ continue
+ }
+ v.reset(OpAddPtr)
+ v.AddArg2(ptr, off)
+ return true
+ }
+ break
+ }
+ // match: (Convert (Add32 (Convert ptr mem) off) mem)
+ // result: (AddPtr ptr off)
+ for {
+ if v_0.Op != OpAdd32 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpConvert {
+ continue
+ }
+ mem := v_0_0.Args[1]
+ ptr := v_0_0.Args[0]
+ off := v_0_1
+ if mem != v_1 {
+ continue
+ }
+ v.reset(OpAddPtr)
+ v.AddArg2(ptr, off)
+ return true
+ }
+ break
+ }
+ // match: (Convert (Convert ptr mem) mem)
+ // result: ptr
+ for {
+ if v_0.Op != OpConvert {
+ break
+ }
+ mem := v_0.Args[1]
+ ptr := v_0.Args[0]
+ if mem != v_1 {
+ break
+ }
+ v.copyOf(ptr)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpCtz16(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (Ctz16 (Const16 [c]))
+ // cond: config.PtrSize == 4
+ // result: (Const32 [int32(ntz16(c))])
+ for {
+ if v_0.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_0.AuxInt)
+ if !(config.PtrSize == 4) {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(int32(ntz16(c)))
+ return true
+ }
+ // match: (Ctz16 (Const16 [c]))
+ // cond: config.PtrSize == 8
+ // result: (Const64 [int64(ntz16(c))])
+ for {
+ if v_0.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_0.AuxInt)
+ if !(config.PtrSize == 8) {
+ break
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(int64(ntz16(c)))
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpCtz32(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (Ctz32 (Const32 [c]))
+ // cond: config.PtrSize == 4
+ // result: (Const32 [int32(ntz32(c))])
+ for {
+ if v_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ if !(config.PtrSize == 4) {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(int32(ntz32(c)))
+ return true
+ }
+ // match: (Ctz32 (Const32 [c]))
+ // cond: config.PtrSize == 8
+ // result: (Const64 [int64(ntz32(c))])
+ for {
+ if v_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ if !(config.PtrSize == 8) {
+ break
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(int64(ntz32(c)))
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpCtz64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (Ctz64 (Const64 [c]))
+ // cond: config.PtrSize == 4
+ // result: (Const32 [int32(ntz64(c))])
+ for {
+ if v_0.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if !(config.PtrSize == 4) {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(int32(ntz64(c)))
+ return true
+ }
+ // match: (Ctz64 (Const64 [c]))
+ // cond: config.PtrSize == 8
+ // result: (Const64 [int64(ntz64(c))])
+ for {
+ if v_0.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if !(config.PtrSize == 8) {
+ break
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(int64(ntz64(c)))
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpCtz8(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (Ctz8 (Const8 [c]))
+ // cond: config.PtrSize == 4
+ // result: (Const32 [int32(ntz8(c))])
+ for {
+ if v_0.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ if !(config.PtrSize == 4) {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(int32(ntz8(c)))
+ return true
+ }
+ // match: (Ctz8 (Const8 [c]))
+ // cond: config.PtrSize == 8
+ // result: (Const64 [int64(ntz8(c))])
+ for {
+ if v_0.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ if !(config.PtrSize == 8) {
+ break
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(int64(ntz8(c)))
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpCvt32Fto32(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Cvt32Fto32 (Const32F [c]))
+ // result: (Const32 [int32(c)])
+ for {
+ if v_0.Op != OpConst32F {
+ break
+ }
+ c := auxIntToFloat32(v_0.AuxInt)
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpCvt32Fto64(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Cvt32Fto64 (Const32F [c]))
+ // result: (Const64 [int64(c)])
+ for {
+ if v_0.Op != OpConst32F {
+ break
+ }
+ c := auxIntToFloat32(v_0.AuxInt)
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(int64(c))
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpCvt32Fto64F(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Cvt32Fto64F (Const32F [c]))
+ // result: (Const64F [float64(c)])
+ for {
+ if v_0.Op != OpConst32F {
+ break
+ }
+ c := auxIntToFloat32(v_0.AuxInt)
+ v.reset(OpConst64F)
+ v.AuxInt = float64ToAuxInt(float64(c))
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpCvt32to32F(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Cvt32to32F (Const32 [c]))
+ // result: (Const32F [float32(c)])
+ for {
+ if v_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpConst32F)
+ v.AuxInt = float32ToAuxInt(float32(c))
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpCvt32to64F(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Cvt32to64F (Const32 [c]))
+ // result: (Const64F [float64(c)])
+ for {
+ if v_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpConst64F)
+ v.AuxInt = float64ToAuxInt(float64(c))
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpCvt64Fto32(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Cvt64Fto32 (Const64F [c]))
+ // result: (Const32 [int32(c)])
+ for {
+ if v_0.Op != OpConst64F {
+ break
+ }
+ c := auxIntToFloat64(v_0.AuxInt)
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpCvt64Fto32F(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Cvt64Fto32F (Const64F [c]))
+ // result: (Const32F [float32(c)])
+ for {
+ if v_0.Op != OpConst64F {
+ break
+ }
+ c := auxIntToFloat64(v_0.AuxInt)
+ v.reset(OpConst32F)
+ v.AuxInt = float32ToAuxInt(float32(c))
+ return true
+ }
+ // match: (Cvt64Fto32F sqrt0:(Sqrt (Cvt32Fto64F x)))
+ // cond: sqrt0.Uses==1
+ // result: (Sqrt32 x)
+ for {
+ sqrt0 := v_0
+ if sqrt0.Op != OpSqrt {
+ break
+ }
+ sqrt0_0 := sqrt0.Args[0]
+ if sqrt0_0.Op != OpCvt32Fto64F {
+ break
+ }
+ x := sqrt0_0.Args[0]
+ if !(sqrt0.Uses == 1) {
+ break
+ }
+ v.reset(OpSqrt32)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpCvt64Fto64(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Cvt64Fto64 (Const64F [c]))
+ // result: (Const64 [int64(c)])
+ for {
+ if v_0.Op != OpConst64F {
+ break
+ }
+ c := auxIntToFloat64(v_0.AuxInt)
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(int64(c))
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpCvt64to32F(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Cvt64to32F (Const64 [c]))
+ // result: (Const32F [float32(c)])
+ for {
+ if v_0.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpConst32F)
+ v.AuxInt = float32ToAuxInt(float32(c))
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpCvt64to64F(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Cvt64to64F (Const64 [c]))
+ // result: (Const64F [float64(c)])
+ for {
+ if v_0.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpConst64F)
+ v.AuxInt = float64ToAuxInt(float64(c))
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpCvtBoolToUint8(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (CvtBoolToUint8 (ConstBool [false]))
+ // result: (Const8 [0])
+ for {
+ if v_0.Op != OpConstBool || auxIntToBool(v_0.AuxInt) != false {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(0)
+ return true
+ }
+ // match: (CvtBoolToUint8 (ConstBool [true]))
+ // result: (Const8 [1])
+ for {
+ if v_0.Op != OpConstBool || auxIntToBool(v_0.AuxInt) != true {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(1)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpDiv16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div16 (Const16 [c]) (Const16 [d]))
+ // cond: d != 0
+ // result: (Const16 [c/d])
+ for {
+ if v_0.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_0.AuxInt)
+ if v_1.Op != OpConst16 {
+ break
+ }
+ d := auxIntToInt16(v_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(c / d)
+ return true
+ }
+ // match: (Div16 n (Const16 [c]))
+ // cond: isNonNegative(n) && isPowerOfTwo16(c)
+ // result: (Rsh16Ux64 n (Const64 <typ.UInt64> [log16(c)]))
+ for {
+ n := v_0
+ if v_1.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_1.AuxInt)
+ if !(isNonNegative(n) && isPowerOfTwo16(c)) {
+ break
+ }
+ v.reset(OpRsh16Ux64)
+ v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(log16(c))
+ v.AddArg2(n, v0)
+ return true
+ }
+ // match: (Div16 <t> n (Const16 [c]))
+ // cond: c < 0 && c != -1<<15
+ // result: (Neg16 (Div16 <t> n (Const16 <t> [-c])))
+ for {
+ t := v.Type
+ n := v_0
+ if v_1.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_1.AuxInt)
+ if !(c < 0 && c != -1<<15) {
+ break
+ }
+ v.reset(OpNeg16)
+ v0 := b.NewValue0(v.Pos, OpDiv16, t)
+ v1 := b.NewValue0(v.Pos, OpConst16, t)
+ v1.AuxInt = int16ToAuxInt(-c)
+ v0.AddArg2(n, v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Div16 <t> x (Const16 [-1<<15]))
+ // result: (Rsh16Ux64 (And16 <t> x (Neg16 <t> x)) (Const64 <typ.UInt64> [15]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst16 || auxIntToInt16(v_1.AuxInt) != -1<<15 {
+ break
+ }
+ v.reset(OpRsh16Ux64)
+ v0 := b.NewValue0(v.Pos, OpAnd16, t)
+ v1 := b.NewValue0(v.Pos, OpNeg16, t)
+ v1.AddArg(x)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(15)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ // match: (Div16 <t> n (Const16 [c]))
+ // cond: isPowerOfTwo16(c)
+ // result: (Rsh16x64 (Add16 <t> n (Rsh16Ux64 <t> (Rsh16x64 <t> n (Const64 <typ.UInt64> [15])) (Const64 <typ.UInt64> [int64(16-log16(c))]))) (Const64 <typ.UInt64> [int64(log16(c))]))
+ for {
+ t := v.Type
+ n := v_0
+ if v_1.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_1.AuxInt)
+ if !(isPowerOfTwo16(c)) {
+ break
+ }
+ v.reset(OpRsh16x64)
+ v0 := b.NewValue0(v.Pos, OpAdd16, t)
+ v1 := b.NewValue0(v.Pos, OpRsh16Ux64, t)
+ v2 := b.NewValue0(v.Pos, OpRsh16x64, t)
+ v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(15)
+ v2.AddArg2(n, v3)
+ v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v4.AuxInt = int64ToAuxInt(int64(16 - log16(c)))
+ v1.AddArg2(v2, v4)
+ v0.AddArg2(n, v1)
+ v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v5.AuxInt = int64ToAuxInt(int64(log16(c)))
+ v.AddArg2(v0, v5)
+ return true
+ }
+ // match: (Div16 <t> x (Const16 [c]))
+ // cond: smagicOK16(c)
+ // result: (Sub16 <t> (Rsh32x64 <t> (Mul32 <typ.UInt32> (Const32 <typ.UInt32> [int32(smagic16(c).m)]) (SignExt16to32 x)) (Const64 <typ.UInt64> [16+smagic16(c).s])) (Rsh32x64 <t> (SignExt16to32 x) (Const64 <typ.UInt64> [31])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_1.AuxInt)
+ if !(smagicOK16(c)) {
+ break
+ }
+ v.reset(OpSub16)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpRsh32x64, t)
+ v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v2.AuxInt = int32ToAuxInt(int32(smagic16(c).m))
+ v3 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v3.AddArg(x)
+ v1.AddArg2(v2, v3)
+ v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v4.AuxInt = int64ToAuxInt(16 + smagic16(c).s)
+ v0.AddArg2(v1, v4)
+ v5 := b.NewValue0(v.Pos, OpRsh32x64, t)
+ v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v6.AuxInt = int64ToAuxInt(31)
+ v5.AddArg2(v3, v6)
+ v.AddArg2(v0, v5)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpDiv16u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (Div16u (Const16 [c]) (Const16 [d]))
+ // cond: d != 0
+ // result: (Const16 [int16(uint16(c)/uint16(d))])
+ for {
+ if v_0.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_0.AuxInt)
+ if v_1.Op != OpConst16 {
+ break
+ }
+ d := auxIntToInt16(v_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(int16(uint16(c) / uint16(d)))
+ return true
+ }
+ // match: (Div16u n (Const16 [c]))
+ // cond: isPowerOfTwo16(c)
+ // result: (Rsh16Ux64 n (Const64 <typ.UInt64> [log16(c)]))
+ for {
+ n := v_0
+ if v_1.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_1.AuxInt)
+ if !(isPowerOfTwo16(c)) {
+ break
+ }
+ v.reset(OpRsh16Ux64)
+ v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(log16(c))
+ v.AddArg2(n, v0)
+ return true
+ }
+ // match: (Div16u x (Const16 [c]))
+ // cond: umagicOK16(c) && config.RegSize == 8
+ // result: (Trunc64to16 (Rsh64Ux64 <typ.UInt64> (Mul64 <typ.UInt64> (Const64 <typ.UInt64> [int64(1<<16+umagic16(c).m)]) (ZeroExt16to64 x)) (Const64 <typ.UInt64> [16+umagic16(c).s])))
+ for {
+ x := v_0
+ if v_1.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_1.AuxInt)
+ if !(umagicOK16(c) && config.RegSize == 8) {
+ break
+ }
+ v.reset(OpTrunc64to16)
+ v0 := b.NewValue0(v.Pos, OpRsh64Ux64, typ.UInt64)
+ v1 := b.NewValue0(v.Pos, OpMul64, typ.UInt64)
+ v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(int64(1<<16 + umagic16(c).m))
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to64, typ.UInt64)
+ v3.AddArg(x)
+ v1.AddArg2(v2, v3)
+ v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v4.AuxInt = int64ToAuxInt(16 + umagic16(c).s)
+ v0.AddArg2(v1, v4)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Div16u x (Const16 [c]))
+ // cond: umagicOK16(c) && config.RegSize == 4 && umagic16(c).m&1 == 0
+ // result: (Trunc32to16 (Rsh32Ux64 <typ.UInt32> (Mul32 <typ.UInt32> (Const32 <typ.UInt32> [int32(1<<15+umagic16(c).m/2)]) (ZeroExt16to32 x)) (Const64 <typ.UInt64> [16+umagic16(c).s-1])))
+ for {
+ x := v_0
+ if v_1.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_1.AuxInt)
+ if !(umagicOK16(c) && config.RegSize == 4 && umagic16(c).m&1 == 0) {
+ break
+ }
+ v.reset(OpTrunc32to16)
+ v0 := b.NewValue0(v.Pos, OpRsh32Ux64, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v2.AuxInt = int32ToAuxInt(int32(1<<15 + umagic16(c).m/2))
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v3.AddArg(x)
+ v1.AddArg2(v2, v3)
+ v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v4.AuxInt = int64ToAuxInt(16 + umagic16(c).s - 1)
+ v0.AddArg2(v1, v4)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Div16u x (Const16 [c]))
+ // cond: umagicOK16(c) && config.RegSize == 4 && c&1 == 0
+ // result: (Trunc32to16 (Rsh32Ux64 <typ.UInt32> (Mul32 <typ.UInt32> (Const32 <typ.UInt32> [int32(1<<15+(umagic16(c).m+1)/2)]) (Rsh32Ux64 <typ.UInt32> (ZeroExt16to32 x) (Const64 <typ.UInt64> [1]))) (Const64 <typ.UInt64> [16+umagic16(c).s-2])))
+ for {
+ x := v_0
+ if v_1.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_1.AuxInt)
+ if !(umagicOK16(c) && config.RegSize == 4 && c&1 == 0) {
+ break
+ }
+ v.reset(OpTrunc32to16)
+ v0 := b.NewValue0(v.Pos, OpRsh32Ux64, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v2.AuxInt = int32ToAuxInt(int32(1<<15 + (umagic16(c).m+1)/2))
+ v3 := b.NewValue0(v.Pos, OpRsh32Ux64, typ.UInt32)
+ v4 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v4.AddArg(x)
+ v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v5.AuxInt = int64ToAuxInt(1)
+ v3.AddArg2(v4, v5)
+ v1.AddArg2(v2, v3)
+ v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v6.AuxInt = int64ToAuxInt(16 + umagic16(c).s - 2)
+ v0.AddArg2(v1, v6)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Div16u x (Const16 [c]))
+ // cond: umagicOK16(c) && config.RegSize == 4 && config.useAvg
+ // result: (Trunc32to16 (Rsh32Ux64 <typ.UInt32> (Avg32u (Lsh32x64 <typ.UInt32> (ZeroExt16to32 x) (Const64 <typ.UInt64> [16])) (Mul32 <typ.UInt32> (Const32 <typ.UInt32> [int32(umagic16(c).m)]) (ZeroExt16to32 x))) (Const64 <typ.UInt64> [16+umagic16(c).s-1])))
+ for {
+ x := v_0
+ if v_1.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_1.AuxInt)
+ if !(umagicOK16(c) && config.RegSize == 4 && config.useAvg) {
+ break
+ }
+ v.reset(OpTrunc32to16)
+ v0 := b.NewValue0(v.Pos, OpRsh32Ux64, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpAvg32u, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpLsh32x64, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v3.AddArg(x)
+ v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v4.AuxInt = int64ToAuxInt(16)
+ v2.AddArg2(v3, v4)
+ v5 := b.NewValue0(v.Pos, OpMul32, typ.UInt32)
+ v6 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v6.AuxInt = int32ToAuxInt(int32(umagic16(c).m))
+ v5.AddArg2(v6, v3)
+ v1.AddArg2(v2, v5)
+ v7 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v7.AuxInt = int64ToAuxInt(16 + umagic16(c).s - 1)
+ v0.AddArg2(v1, v7)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpDiv32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (Div32 (Const32 [c]) (Const32 [d]))
+ // cond: d != 0
+ // result: (Const32 [c/d])
+ for {
+ if v_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpConst32 {
+ break
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(c / d)
+ return true
+ }
+ // match: (Div32 n (Const32 [c]))
+ // cond: isNonNegative(n) && isPowerOfTwo32(c)
+ // result: (Rsh32Ux64 n (Const64 <typ.UInt64> [log32(c)]))
+ for {
+ n := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(isNonNegative(n) && isPowerOfTwo32(c)) {
+ break
+ }
+ v.reset(OpRsh32Ux64)
+ v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(log32(c))
+ v.AddArg2(n, v0)
+ return true
+ }
+ // match: (Div32 <t> n (Const32 [c]))
+ // cond: c < 0 && c != -1<<31
+ // result: (Neg32 (Div32 <t> n (Const32 <t> [-c])))
+ for {
+ t := v.Type
+ n := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(c < 0 && c != -1<<31) {
+ break
+ }
+ v.reset(OpNeg32)
+ v0 := b.NewValue0(v.Pos, OpDiv32, t)
+ v1 := b.NewValue0(v.Pos, OpConst32, t)
+ v1.AuxInt = int32ToAuxInt(-c)
+ v0.AddArg2(n, v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Div32 <t> x (Const32 [-1<<31]))
+ // result: (Rsh32Ux64 (And32 <t> x (Neg32 <t> x)) (Const64 <typ.UInt64> [31]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst32 || auxIntToInt32(v_1.AuxInt) != -1<<31 {
+ break
+ }
+ v.reset(OpRsh32Ux64)
+ v0 := b.NewValue0(v.Pos, OpAnd32, t)
+ v1 := b.NewValue0(v.Pos, OpNeg32, t)
+ v1.AddArg(x)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(31)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ // match: (Div32 <t> n (Const32 [c]))
+ // cond: isPowerOfTwo32(c)
+ // result: (Rsh32x64 (Add32 <t> n (Rsh32Ux64 <t> (Rsh32x64 <t> n (Const64 <typ.UInt64> [31])) (Const64 <typ.UInt64> [int64(32-log32(c))]))) (Const64 <typ.UInt64> [int64(log32(c))]))
+ for {
+ t := v.Type
+ n := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(isPowerOfTwo32(c)) {
+ break
+ }
+ v.reset(OpRsh32x64)
+ v0 := b.NewValue0(v.Pos, OpAdd32, t)
+ v1 := b.NewValue0(v.Pos, OpRsh32Ux64, t)
+ v2 := b.NewValue0(v.Pos, OpRsh32x64, t)
+ v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(31)
+ v2.AddArg2(n, v3)
+ v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v4.AuxInt = int64ToAuxInt(int64(32 - log32(c)))
+ v1.AddArg2(v2, v4)
+ v0.AddArg2(n, v1)
+ v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v5.AuxInt = int64ToAuxInt(int64(log32(c)))
+ v.AddArg2(v0, v5)
+ return true
+ }
+ // match: (Div32 <t> x (Const32 [c]))
+ // cond: smagicOK32(c) && config.RegSize == 8
+ // result: (Sub32 <t> (Rsh64x64 <t> (Mul64 <typ.UInt64> (Const64 <typ.UInt64> [int64(smagic32(c).m)]) (SignExt32to64 x)) (Const64 <typ.UInt64> [32+smagic32(c).s])) (Rsh64x64 <t> (SignExt32to64 x) (Const64 <typ.UInt64> [63])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(smagicOK32(c) && config.RegSize == 8) {
+ break
+ }
+ v.reset(OpSub32)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpRsh64x64, t)
+ v1 := b.NewValue0(v.Pos, OpMul64, typ.UInt64)
+ v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(int64(smagic32(c).m))
+ v3 := b.NewValue0(v.Pos, OpSignExt32to64, typ.Int64)
+ v3.AddArg(x)
+ v1.AddArg2(v2, v3)
+ v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v4.AuxInt = int64ToAuxInt(32 + smagic32(c).s)
+ v0.AddArg2(v1, v4)
+ v5 := b.NewValue0(v.Pos, OpRsh64x64, t)
+ v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v6.AuxInt = int64ToAuxInt(63)
+ v5.AddArg2(v3, v6)
+ v.AddArg2(v0, v5)
+ return true
+ }
+ // match: (Div32 <t> x (Const32 [c]))
+ // cond: smagicOK32(c) && config.RegSize == 4 && smagic32(c).m&1 == 0 && config.useHmul
+ // result: (Sub32 <t> (Rsh32x64 <t> (Hmul32 <t> (Const32 <typ.UInt32> [int32(smagic32(c).m/2)]) x) (Const64 <typ.UInt64> [smagic32(c).s-1])) (Rsh32x64 <t> x (Const64 <typ.UInt64> [31])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(smagicOK32(c) && config.RegSize == 4 && smagic32(c).m&1 == 0 && config.useHmul) {
+ break
+ }
+ v.reset(OpSub32)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpRsh32x64, t)
+ v1 := b.NewValue0(v.Pos, OpHmul32, t)
+ v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v2.AuxInt = int32ToAuxInt(int32(smagic32(c).m / 2))
+ v1.AddArg2(v2, x)
+ v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(smagic32(c).s - 1)
+ v0.AddArg2(v1, v3)
+ v4 := b.NewValue0(v.Pos, OpRsh32x64, t)
+ v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v5.AuxInt = int64ToAuxInt(31)
+ v4.AddArg2(x, v5)
+ v.AddArg2(v0, v4)
+ return true
+ }
+ // match: (Div32 <t> x (Const32 [c]))
+ // cond: smagicOK32(c) && config.RegSize == 4 && smagic32(c).m&1 != 0 && config.useHmul
+ // result: (Sub32 <t> (Rsh32x64 <t> (Add32 <t> (Hmul32 <t> (Const32 <typ.UInt32> [int32(smagic32(c).m)]) x) x) (Const64 <typ.UInt64> [smagic32(c).s])) (Rsh32x64 <t> x (Const64 <typ.UInt64> [31])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(smagicOK32(c) && config.RegSize == 4 && smagic32(c).m&1 != 0 && config.useHmul) {
+ break
+ }
+ v.reset(OpSub32)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpRsh32x64, t)
+ v1 := b.NewValue0(v.Pos, OpAdd32, t)
+ v2 := b.NewValue0(v.Pos, OpHmul32, t)
+ v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v3.AuxInt = int32ToAuxInt(int32(smagic32(c).m))
+ v2.AddArg2(v3, x)
+ v1.AddArg2(v2, x)
+ v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v4.AuxInt = int64ToAuxInt(smagic32(c).s)
+ v0.AddArg2(v1, v4)
+ v5 := b.NewValue0(v.Pos, OpRsh32x64, t)
+ v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v6.AuxInt = int64ToAuxInt(31)
+ v5.AddArg2(x, v6)
+ v.AddArg2(v0, v5)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpDiv32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Div32F (Const32F [c]) (Const32F [d]))
+ // cond: c/d == c/d
+ // result: (Const32F [c/d])
+ for {
+ if v_0.Op != OpConst32F {
+ break
+ }
+ c := auxIntToFloat32(v_0.AuxInt)
+ if v_1.Op != OpConst32F {
+ break
+ }
+ d := auxIntToFloat32(v_1.AuxInt)
+ if !(c/d == c/d) {
+ break
+ }
+ v.reset(OpConst32F)
+ v.AuxInt = float32ToAuxInt(c / d)
+ return true
+ }
+ // match: (Div32F x (Const32F <t> [c]))
+ // cond: reciprocalExact32(c)
+ // result: (Mul32F x (Const32F <t> [1/c]))
+ for {
+ x := v_0
+ if v_1.Op != OpConst32F {
+ break
+ }
+ t := v_1.Type
+ c := auxIntToFloat32(v_1.AuxInt)
+ if !(reciprocalExact32(c)) {
+ break
+ }
+ v.reset(OpMul32F)
+ v0 := b.NewValue0(v.Pos, OpConst32F, t)
+ v0.AuxInt = float32ToAuxInt(1 / c)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpDiv32u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (Div32u (Const32 [c]) (Const32 [d]))
+ // cond: d != 0
+ // result: (Const32 [int32(uint32(c)/uint32(d))])
+ for {
+ if v_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpConst32 {
+ break
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(int32(uint32(c) / uint32(d)))
+ return true
+ }
+ // match: (Div32u n (Const32 [c]))
+ // cond: isPowerOfTwo32(c)
+ // result: (Rsh32Ux64 n (Const64 <typ.UInt64> [log32(c)]))
+ for {
+ n := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(isPowerOfTwo32(c)) {
+ break
+ }
+ v.reset(OpRsh32Ux64)
+ v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(log32(c))
+ v.AddArg2(n, v0)
+ return true
+ }
+ // match: (Div32u x (Const32 [c]))
+ // cond: umagicOK32(c) && config.RegSize == 4 && umagic32(c).m&1 == 0 && config.useHmul
+ // result: (Rsh32Ux64 <typ.UInt32> (Hmul32u <typ.UInt32> (Const32 <typ.UInt32> [int32(1<<31+umagic32(c).m/2)]) x) (Const64 <typ.UInt64> [umagic32(c).s-1]))
+ for {
+ x := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(umagicOK32(c) && config.RegSize == 4 && umagic32(c).m&1 == 0 && config.useHmul) {
+ break
+ }
+ v.reset(OpRsh32Ux64)
+ v.Type = typ.UInt32
+ v0 := b.NewValue0(v.Pos, OpHmul32u, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(int32(1<<31 + umagic32(c).m/2))
+ v0.AddArg2(v1, x)
+ v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(umagic32(c).s - 1)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ // match: (Div32u x (Const32 [c]))
+ // cond: umagicOK32(c) && config.RegSize == 4 && c&1 == 0 && config.useHmul
+ // result: (Rsh32Ux64 <typ.UInt32> (Hmul32u <typ.UInt32> (Const32 <typ.UInt32> [int32(1<<31+(umagic32(c).m+1)/2)]) (Rsh32Ux64 <typ.UInt32> x (Const64 <typ.UInt64> [1]))) (Const64 <typ.UInt64> [umagic32(c).s-2]))
+ for {
+ x := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(umagicOK32(c) && config.RegSize == 4 && c&1 == 0 && config.useHmul) {
+ break
+ }
+ v.reset(OpRsh32Ux64)
+ v.Type = typ.UInt32
+ v0 := b.NewValue0(v.Pos, OpHmul32u, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v1.AuxInt = int32ToAuxInt(int32(1<<31 + (umagic32(c).m+1)/2))
+ v2 := b.NewValue0(v.Pos, OpRsh32Ux64, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(1)
+ v2.AddArg2(x, v3)
+ v0.AddArg2(v1, v2)
+ v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v4.AuxInt = int64ToAuxInt(umagic32(c).s - 2)
+ v.AddArg2(v0, v4)
+ return true
+ }
+ // match: (Div32u x (Const32 [c]))
+ // cond: umagicOK32(c) && config.RegSize == 4 && config.useAvg && config.useHmul
+ // result: (Rsh32Ux64 <typ.UInt32> (Avg32u x (Hmul32u <typ.UInt32> (Const32 <typ.UInt32> [int32(umagic32(c).m)]) x)) (Const64 <typ.UInt64> [umagic32(c).s-1]))
+ for {
+ x := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(umagicOK32(c) && config.RegSize == 4 && config.useAvg && config.useHmul) {
+ break
+ }
+ v.reset(OpRsh32Ux64)
+ v.Type = typ.UInt32
+ v0 := b.NewValue0(v.Pos, OpAvg32u, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpHmul32u, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v2.AuxInt = int32ToAuxInt(int32(umagic32(c).m))
+ v1.AddArg2(v2, x)
+ v0.AddArg2(x, v1)
+ v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(umagic32(c).s - 1)
+ v.AddArg2(v0, v3)
+ return true
+ }
+ // match: (Div32u x (Const32 [c]))
+ // cond: umagicOK32(c) && config.RegSize == 8 && umagic32(c).m&1 == 0
+ // result: (Trunc64to32 (Rsh64Ux64 <typ.UInt64> (Mul64 <typ.UInt64> (Const64 <typ.UInt64> [int64(1<<31+umagic32(c).m/2)]) (ZeroExt32to64 x)) (Const64 <typ.UInt64> [32+umagic32(c).s-1])))
+ for {
+ x := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(umagicOK32(c) && config.RegSize == 8 && umagic32(c).m&1 == 0) {
+ break
+ }
+ v.reset(OpTrunc64to32)
+ v0 := b.NewValue0(v.Pos, OpRsh64Ux64, typ.UInt64)
+ v1 := b.NewValue0(v.Pos, OpMul64, typ.UInt64)
+ v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(int64(1<<31 + umagic32(c).m/2))
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v3.AddArg(x)
+ v1.AddArg2(v2, v3)
+ v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v4.AuxInt = int64ToAuxInt(32 + umagic32(c).s - 1)
+ v0.AddArg2(v1, v4)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Div32u x (Const32 [c]))
+ // cond: umagicOK32(c) && config.RegSize == 8 && c&1 == 0
+ // result: (Trunc64to32 (Rsh64Ux64 <typ.UInt64> (Mul64 <typ.UInt64> (Const64 <typ.UInt64> [int64(1<<31+(umagic32(c).m+1)/2)]) (Rsh64Ux64 <typ.UInt64> (ZeroExt32to64 x) (Const64 <typ.UInt64> [1]))) (Const64 <typ.UInt64> [32+umagic32(c).s-2])))
+ for {
+ x := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(umagicOK32(c) && config.RegSize == 8 && c&1 == 0) {
+ break
+ }
+ v.reset(OpTrunc64to32)
+ v0 := b.NewValue0(v.Pos, OpRsh64Ux64, typ.UInt64)
+ v1 := b.NewValue0(v.Pos, OpMul64, typ.UInt64)
+ v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(int64(1<<31 + (umagic32(c).m+1)/2))
+ v3 := b.NewValue0(v.Pos, OpRsh64Ux64, typ.UInt64)
+ v4 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v4.AddArg(x)
+ v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v5.AuxInt = int64ToAuxInt(1)
+ v3.AddArg2(v4, v5)
+ v1.AddArg2(v2, v3)
+ v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v6.AuxInt = int64ToAuxInt(32 + umagic32(c).s - 2)
+ v0.AddArg2(v1, v6)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Div32u x (Const32 [c]))
+ // cond: umagicOK32(c) && config.RegSize == 8 && config.useAvg
+ // result: (Trunc64to32 (Rsh64Ux64 <typ.UInt64> (Avg64u (Lsh64x64 <typ.UInt64> (ZeroExt32to64 x) (Const64 <typ.UInt64> [32])) (Mul64 <typ.UInt64> (Const64 <typ.UInt32> [int64(umagic32(c).m)]) (ZeroExt32to64 x))) (Const64 <typ.UInt64> [32+umagic32(c).s-1])))
+ for {
+ x := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(umagicOK32(c) && config.RegSize == 8 && config.useAvg) {
+ break
+ }
+ v.reset(OpTrunc64to32)
+ v0 := b.NewValue0(v.Pos, OpRsh64Ux64, typ.UInt64)
+ v1 := b.NewValue0(v.Pos, OpAvg64u, typ.UInt64)
+ v2 := b.NewValue0(v.Pos, OpLsh64x64, typ.UInt64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v3.AddArg(x)
+ v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v4.AuxInt = int64ToAuxInt(32)
+ v2.AddArg2(v3, v4)
+ v5 := b.NewValue0(v.Pos, OpMul64, typ.UInt64)
+ v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt32)
+ v6.AuxInt = int64ToAuxInt(int64(umagic32(c).m))
+ v5.AddArg2(v6, v3)
+ v1.AddArg2(v2, v5)
+ v7 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v7.AuxInt = int64ToAuxInt(32 + umagic32(c).s - 1)
+ v0.AddArg2(v1, v7)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpDiv64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (Div64 (Const64 [c]) (Const64 [d]))
+ // cond: d != 0
+ // result: (Const64 [c/d])
+ for {
+ if v_0.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(c / d)
+ return true
+ }
+ // match: (Div64 n (Const64 [c]))
+ // cond: isNonNegative(n) && isPowerOfTwo64(c)
+ // result: (Rsh64Ux64 n (Const64 <typ.UInt64> [log64(c)]))
+ for {
+ n := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(isNonNegative(n) && isPowerOfTwo64(c)) {
+ break
+ }
+ v.reset(OpRsh64Ux64)
+ v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(log64(c))
+ v.AddArg2(n, v0)
+ return true
+ }
+ // match: (Div64 n (Const64 [-1<<63]))
+ // cond: isNonNegative(n)
+ // result: (Const64 [0])
+ for {
+ n := v_0
+ if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != -1<<63 || !(isNonNegative(n)) {
+ break
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (Div64 <t> n (Const64 [c]))
+ // cond: c < 0 && c != -1<<63
+ // result: (Neg64 (Div64 <t> n (Const64 <t> [-c])))
+ for {
+ t := v.Type
+ n := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(c < 0 && c != -1<<63) {
+ break
+ }
+ v.reset(OpNeg64)
+ v0 := b.NewValue0(v.Pos, OpDiv64, t)
+ v1 := b.NewValue0(v.Pos, OpConst64, t)
+ v1.AuxInt = int64ToAuxInt(-c)
+ v0.AddArg2(n, v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Div64 <t> x (Const64 [-1<<63]))
+ // result: (Rsh64Ux64 (And64 <t> x (Neg64 <t> x)) (Const64 <typ.UInt64> [63]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != -1<<63 {
+ break
+ }
+ v.reset(OpRsh64Ux64)
+ v0 := b.NewValue0(v.Pos, OpAnd64, t)
+ v1 := b.NewValue0(v.Pos, OpNeg64, t)
+ v1.AddArg(x)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(63)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ // match: (Div64 <t> n (Const64 [c]))
+ // cond: isPowerOfTwo64(c)
+ // result: (Rsh64x64 (Add64 <t> n (Rsh64Ux64 <t> (Rsh64x64 <t> n (Const64 <typ.UInt64> [63])) (Const64 <typ.UInt64> [int64(64-log64(c))]))) (Const64 <typ.UInt64> [int64(log64(c))]))
+ for {
+ t := v.Type
+ n := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(isPowerOfTwo64(c)) {
+ break
+ }
+ v.reset(OpRsh64x64)
+ v0 := b.NewValue0(v.Pos, OpAdd64, t)
+ v1 := b.NewValue0(v.Pos, OpRsh64Ux64, t)
+ v2 := b.NewValue0(v.Pos, OpRsh64x64, t)
+ v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(63)
+ v2.AddArg2(n, v3)
+ v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v4.AuxInt = int64ToAuxInt(int64(64 - log64(c)))
+ v1.AddArg2(v2, v4)
+ v0.AddArg2(n, v1)
+ v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v5.AuxInt = int64ToAuxInt(int64(log64(c)))
+ v.AddArg2(v0, v5)
+ return true
+ }
+ // match: (Div64 <t> x (Const64 [c]))
+ // cond: smagicOK64(c) && smagic64(c).m&1 == 0 && config.useHmul
+ // result: (Sub64 <t> (Rsh64x64 <t> (Hmul64 <t> (Const64 <typ.UInt64> [int64(smagic64(c).m/2)]) x) (Const64 <typ.UInt64> [smagic64(c).s-1])) (Rsh64x64 <t> x (Const64 <typ.UInt64> [63])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(smagicOK64(c) && smagic64(c).m&1 == 0 && config.useHmul) {
+ break
+ }
+ v.reset(OpSub64)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpRsh64x64, t)
+ v1 := b.NewValue0(v.Pos, OpHmul64, t)
+ v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(int64(smagic64(c).m / 2))
+ v1.AddArg2(v2, x)
+ v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(smagic64(c).s - 1)
+ v0.AddArg2(v1, v3)
+ v4 := b.NewValue0(v.Pos, OpRsh64x64, t)
+ v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v5.AuxInt = int64ToAuxInt(63)
+ v4.AddArg2(x, v5)
+ v.AddArg2(v0, v4)
+ return true
+ }
+ // match: (Div64 <t> x (Const64 [c]))
+ // cond: smagicOK64(c) && smagic64(c).m&1 != 0 && config.useHmul
+ // result: (Sub64 <t> (Rsh64x64 <t> (Add64 <t> (Hmul64 <t> (Const64 <typ.UInt64> [int64(smagic64(c).m)]) x) x) (Const64 <typ.UInt64> [smagic64(c).s])) (Rsh64x64 <t> x (Const64 <typ.UInt64> [63])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(smagicOK64(c) && smagic64(c).m&1 != 0 && config.useHmul) {
+ break
+ }
+ v.reset(OpSub64)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpRsh64x64, t)
+ v1 := b.NewValue0(v.Pos, OpAdd64, t)
+ v2 := b.NewValue0(v.Pos, OpHmul64, t)
+ v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(int64(smagic64(c).m))
+ v2.AddArg2(v3, x)
+ v1.AddArg2(v2, x)
+ v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v4.AuxInt = int64ToAuxInt(smagic64(c).s)
+ v0.AddArg2(v1, v4)
+ v5 := b.NewValue0(v.Pos, OpRsh64x64, t)
+ v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v6.AuxInt = int64ToAuxInt(63)
+ v5.AddArg2(x, v6)
+ v.AddArg2(v0, v5)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpDiv64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Div64F (Const64F [c]) (Const64F [d]))
+ // cond: c/d == c/d
+ // result: (Const64F [c/d])
+ for {
+ if v_0.Op != OpConst64F {
+ break
+ }
+ c := auxIntToFloat64(v_0.AuxInt)
+ if v_1.Op != OpConst64F {
+ break
+ }
+ d := auxIntToFloat64(v_1.AuxInt)
+ if !(c/d == c/d) {
+ break
+ }
+ v.reset(OpConst64F)
+ v.AuxInt = float64ToAuxInt(c / d)
+ return true
+ }
+ // match: (Div64F x (Const64F <t> [c]))
+ // cond: reciprocalExact64(c)
+ // result: (Mul64F x (Const64F <t> [1/c]))
+ for {
+ x := v_0
+ if v_1.Op != OpConst64F {
+ break
+ }
+ t := v_1.Type
+ c := auxIntToFloat64(v_1.AuxInt)
+ if !(reciprocalExact64(c)) {
+ break
+ }
+ v.reset(OpMul64F)
+ v0 := b.NewValue0(v.Pos, OpConst64F, t)
+ v0.AuxInt = float64ToAuxInt(1 / c)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpDiv64u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (Div64u (Const64 [c]) (Const64 [d]))
+ // cond: d != 0
+ // result: (Const64 [int64(uint64(c)/uint64(d))])
+ for {
+ if v_0.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) / uint64(d)))
+ return true
+ }
+ // match: (Div64u n (Const64 [c]))
+ // cond: isPowerOfTwo64(c)
+ // result: (Rsh64Ux64 n (Const64 <typ.UInt64> [log64(c)]))
+ for {
+ n := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(isPowerOfTwo64(c)) {
+ break
+ }
+ v.reset(OpRsh64Ux64)
+ v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(log64(c))
+ v.AddArg2(n, v0)
+ return true
+ }
+ // match: (Div64u n (Const64 [-1<<63]))
+ // result: (Rsh64Ux64 n (Const64 <typ.UInt64> [63]))
+ for {
+ n := v_0
+ if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != -1<<63 {
+ break
+ }
+ v.reset(OpRsh64Ux64)
+ v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(63)
+ v.AddArg2(n, v0)
+ return true
+ }
+ // match: (Div64u x (Const64 [c]))
+ // cond: c > 0 && c <= 0xFFFF && umagicOK32(int32(c)) && config.RegSize == 4 && config.useHmul
+ // result: (Add64 (Add64 <typ.UInt64> (Add64 <typ.UInt64> (Lsh64x64 <typ.UInt64> (ZeroExt32to64 (Div32u <typ.UInt32> (Trunc64to32 <typ.UInt32> (Rsh64Ux64 <typ.UInt64> x (Const64 <typ.UInt64> [32]))) (Const32 <typ.UInt32> [int32(c)]))) (Const64 <typ.UInt64> [32])) (ZeroExt32to64 (Div32u <typ.UInt32> (Trunc64to32 <typ.UInt32> x) (Const32 <typ.UInt32> [int32(c)])))) (Mul64 <typ.UInt64> (ZeroExt32to64 <typ.UInt64> (Mod32u <typ.UInt32> (Trunc64to32 <typ.UInt32> (Rsh64Ux64 <typ.UInt64> x (Const64 <typ.UInt64> [32]))) (Const32 <typ.UInt32> [int32(c)]))) (Const64 <typ.UInt64> [int64((1<<32)/c)]))) (ZeroExt32to64 (Div32u <typ.UInt32> (Add32 <typ.UInt32> (Mod32u <typ.UInt32> (Trunc64to32 <typ.UInt32> x) (Const32 <typ.UInt32> [int32(c)])) (Mul32 <typ.UInt32> (Mod32u <typ.UInt32> (Trunc64to32 <typ.UInt32> (Rsh64Ux64 <typ.UInt64> x (Const64 <typ.UInt64> [32]))) (Const32 <typ.UInt32> [int32(c)])) (Const32 <typ.UInt32> [int32((1<<32)%c)]))) (Const32 <typ.UInt32> [int32(c)]))))
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(c > 0 && c <= 0xFFFF && umagicOK32(int32(c)) && config.RegSize == 4 && config.useHmul) {
+ break
+ }
+ v.reset(OpAdd64)
+ v0 := b.NewValue0(v.Pos, OpAdd64, typ.UInt64)
+ v1 := b.NewValue0(v.Pos, OpAdd64, typ.UInt64)
+ v2 := b.NewValue0(v.Pos, OpLsh64x64, typ.UInt64)
+ v3 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v4 := b.NewValue0(v.Pos, OpDiv32u, typ.UInt32)
+ v5 := b.NewValue0(v.Pos, OpTrunc64to32, typ.UInt32)
+ v6 := b.NewValue0(v.Pos, OpRsh64Ux64, typ.UInt64)
+ v7 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v7.AuxInt = int64ToAuxInt(32)
+ v6.AddArg2(x, v7)
+ v5.AddArg(v6)
+ v8 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v8.AuxInt = int32ToAuxInt(int32(c))
+ v4.AddArg2(v5, v8)
+ v3.AddArg(v4)
+ v2.AddArg2(v3, v7)
+ v9 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v10 := b.NewValue0(v.Pos, OpDiv32u, typ.UInt32)
+ v11 := b.NewValue0(v.Pos, OpTrunc64to32, typ.UInt32)
+ v11.AddArg(x)
+ v10.AddArg2(v11, v8)
+ v9.AddArg(v10)
+ v1.AddArg2(v2, v9)
+ v12 := b.NewValue0(v.Pos, OpMul64, typ.UInt64)
+ v13 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v14 := b.NewValue0(v.Pos, OpMod32u, typ.UInt32)
+ v14.AddArg2(v5, v8)
+ v13.AddArg(v14)
+ v15 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v15.AuxInt = int64ToAuxInt(int64((1 << 32) / c))
+ v12.AddArg2(v13, v15)
+ v0.AddArg2(v1, v12)
+ v16 := b.NewValue0(v.Pos, OpZeroExt32to64, typ.UInt64)
+ v17 := b.NewValue0(v.Pos, OpDiv32u, typ.UInt32)
+ v18 := b.NewValue0(v.Pos, OpAdd32, typ.UInt32)
+ v19 := b.NewValue0(v.Pos, OpMod32u, typ.UInt32)
+ v19.AddArg2(v11, v8)
+ v20 := b.NewValue0(v.Pos, OpMul32, typ.UInt32)
+ v21 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v21.AuxInt = int32ToAuxInt(int32((1 << 32) % c))
+ v20.AddArg2(v14, v21)
+ v18.AddArg2(v19, v20)
+ v17.AddArg2(v18, v8)
+ v16.AddArg(v17)
+ v.AddArg2(v0, v16)
+ return true
+ }
+ // match: (Div64u x (Const64 [c]))
+ // cond: umagicOK64(c) && config.RegSize == 8 && umagic64(c).m&1 == 0 && config.useHmul
+ // result: (Rsh64Ux64 <typ.UInt64> (Hmul64u <typ.UInt64> (Const64 <typ.UInt64> [int64(1<<63+umagic64(c).m/2)]) x) (Const64 <typ.UInt64> [umagic64(c).s-1]))
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(umagicOK64(c) && config.RegSize == 8 && umagic64(c).m&1 == 0 && config.useHmul) {
+ break
+ }
+ v.reset(OpRsh64Ux64)
+ v.Type = typ.UInt64
+ v0 := b.NewValue0(v.Pos, OpHmul64u, typ.UInt64)
+ v1 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(int64(1<<63 + umagic64(c).m/2))
+ v0.AddArg2(v1, x)
+ v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(umagic64(c).s - 1)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ // match: (Div64u x (Const64 [c]))
+ // cond: umagicOK64(c) && config.RegSize == 8 && c&1 == 0 && config.useHmul
+ // result: (Rsh64Ux64 <typ.UInt64> (Hmul64u <typ.UInt64> (Const64 <typ.UInt64> [int64(1<<63+(umagic64(c).m+1)/2)]) (Rsh64Ux64 <typ.UInt64> x (Const64 <typ.UInt64> [1]))) (Const64 <typ.UInt64> [umagic64(c).s-2]))
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(umagicOK64(c) && config.RegSize == 8 && c&1 == 0 && config.useHmul) {
+ break
+ }
+ v.reset(OpRsh64Ux64)
+ v.Type = typ.UInt64
+ v0 := b.NewValue0(v.Pos, OpHmul64u, typ.UInt64)
+ v1 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(int64(1<<63 + (umagic64(c).m+1)/2))
+ v2 := b.NewValue0(v.Pos, OpRsh64Ux64, typ.UInt64)
+ v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(1)
+ v2.AddArg2(x, v3)
+ v0.AddArg2(v1, v2)
+ v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v4.AuxInt = int64ToAuxInt(umagic64(c).s - 2)
+ v.AddArg2(v0, v4)
+ return true
+ }
+ // match: (Div64u x (Const64 [c]))
+ // cond: umagicOK64(c) && config.RegSize == 8 && config.useAvg && config.useHmul
+ // result: (Rsh64Ux64 <typ.UInt64> (Avg64u x (Hmul64u <typ.UInt64> (Const64 <typ.UInt64> [int64(umagic64(c).m)]) x)) (Const64 <typ.UInt64> [umagic64(c).s-1]))
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(umagicOK64(c) && config.RegSize == 8 && config.useAvg && config.useHmul) {
+ break
+ }
+ v.reset(OpRsh64Ux64)
+ v.Type = typ.UInt64
+ v0 := b.NewValue0(v.Pos, OpAvg64u, typ.UInt64)
+ v1 := b.NewValue0(v.Pos, OpHmul64u, typ.UInt64)
+ v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(int64(umagic64(c).m))
+ v1.AddArg2(v2, x)
+ v0.AddArg2(x, v1)
+ v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(umagic64(c).s - 1)
+ v.AddArg2(v0, v3)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpDiv8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div8 (Const8 [c]) (Const8 [d]))
+ // cond: d != 0
+ // result: (Const8 [c/d])
+ for {
+ if v_0.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ if v_1.Op != OpConst8 {
+ break
+ }
+ d := auxIntToInt8(v_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(c / d)
+ return true
+ }
+ // match: (Div8 n (Const8 [c]))
+ // cond: isNonNegative(n) && isPowerOfTwo8(c)
+ // result: (Rsh8Ux64 n (Const64 <typ.UInt64> [log8(c)]))
+ for {
+ n := v_0
+ if v_1.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_1.AuxInt)
+ if !(isNonNegative(n) && isPowerOfTwo8(c)) {
+ break
+ }
+ v.reset(OpRsh8Ux64)
+ v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(log8(c))
+ v.AddArg2(n, v0)
+ return true
+ }
+ // match: (Div8 <t> n (Const8 [c]))
+ // cond: c < 0 && c != -1<<7
+ // result: (Neg8 (Div8 <t> n (Const8 <t> [-c])))
+ for {
+ t := v.Type
+ n := v_0
+ if v_1.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_1.AuxInt)
+ if !(c < 0 && c != -1<<7) {
+ break
+ }
+ v.reset(OpNeg8)
+ v0 := b.NewValue0(v.Pos, OpDiv8, t)
+ v1 := b.NewValue0(v.Pos, OpConst8, t)
+ v1.AuxInt = int8ToAuxInt(-c)
+ v0.AddArg2(n, v1)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Div8 <t> x (Const8 [-1<<7 ]))
+ // result: (Rsh8Ux64 (And8 <t> x (Neg8 <t> x)) (Const64 <typ.UInt64> [7 ]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst8 || auxIntToInt8(v_1.AuxInt) != -1<<7 {
+ break
+ }
+ v.reset(OpRsh8Ux64)
+ v0 := b.NewValue0(v.Pos, OpAnd8, t)
+ v1 := b.NewValue0(v.Pos, OpNeg8, t)
+ v1.AddArg(x)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(7)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ // match: (Div8 <t> n (Const8 [c]))
+ // cond: isPowerOfTwo8(c)
+ // result: (Rsh8x64 (Add8 <t> n (Rsh8Ux64 <t> (Rsh8x64 <t> n (Const64 <typ.UInt64> [ 7])) (Const64 <typ.UInt64> [int64( 8-log8(c))]))) (Const64 <typ.UInt64> [int64(log8(c))]))
+ for {
+ t := v.Type
+ n := v_0
+ if v_1.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_1.AuxInt)
+ if !(isPowerOfTwo8(c)) {
+ break
+ }
+ v.reset(OpRsh8x64)
+ v0 := b.NewValue0(v.Pos, OpAdd8, t)
+ v1 := b.NewValue0(v.Pos, OpRsh8Ux64, t)
+ v2 := b.NewValue0(v.Pos, OpRsh8x64, t)
+ v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(7)
+ v2.AddArg2(n, v3)
+ v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v4.AuxInt = int64ToAuxInt(int64(8 - log8(c)))
+ v1.AddArg2(v2, v4)
+ v0.AddArg2(n, v1)
+ v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v5.AuxInt = int64ToAuxInt(int64(log8(c)))
+ v.AddArg2(v0, v5)
+ return true
+ }
+ // match: (Div8 <t> x (Const8 [c]))
+ // cond: smagicOK8(c)
+ // result: (Sub8 <t> (Rsh32x64 <t> (Mul32 <typ.UInt32> (Const32 <typ.UInt32> [int32(smagic8(c).m)]) (SignExt8to32 x)) (Const64 <typ.UInt64> [8+smagic8(c).s])) (Rsh32x64 <t> (SignExt8to32 x) (Const64 <typ.UInt64> [31])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_1.AuxInt)
+ if !(smagicOK8(c)) {
+ break
+ }
+ v.reset(OpSub8)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpRsh32x64, t)
+ v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v2.AuxInt = int32ToAuxInt(int32(smagic8(c).m))
+ v3 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v3.AddArg(x)
+ v1.AddArg2(v2, v3)
+ v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v4.AuxInt = int64ToAuxInt(8 + smagic8(c).s)
+ v0.AddArg2(v1, v4)
+ v5 := b.NewValue0(v.Pos, OpRsh32x64, t)
+ v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v6.AuxInt = int64ToAuxInt(31)
+ v5.AddArg2(v3, v6)
+ v.AddArg2(v0, v5)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpDiv8u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Div8u (Const8 [c]) (Const8 [d]))
+ // cond: d != 0
+ // result: (Const8 [int8(uint8(c)/uint8(d))])
+ for {
+ if v_0.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ if v_1.Op != OpConst8 {
+ break
+ }
+ d := auxIntToInt8(v_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(int8(uint8(c) / uint8(d)))
+ return true
+ }
+ // match: (Div8u n (Const8 [c]))
+ // cond: isPowerOfTwo8(c)
+ // result: (Rsh8Ux64 n (Const64 <typ.UInt64> [log8(c)]))
+ for {
+ n := v_0
+ if v_1.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_1.AuxInt)
+ if !(isPowerOfTwo8(c)) {
+ break
+ }
+ v.reset(OpRsh8Ux64)
+ v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(log8(c))
+ v.AddArg2(n, v0)
+ return true
+ }
+ // match: (Div8u x (Const8 [c]))
+ // cond: umagicOK8(c)
+ // result: (Trunc32to8 (Rsh32Ux64 <typ.UInt32> (Mul32 <typ.UInt32> (Const32 <typ.UInt32> [int32(1<<8+umagic8(c).m)]) (ZeroExt8to32 x)) (Const64 <typ.UInt64> [8+umagic8(c).s])))
+ for {
+ x := v_0
+ if v_1.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_1.AuxInt)
+ if !(umagicOK8(c)) {
+ break
+ }
+ v.reset(OpTrunc32to8)
+ v0 := b.NewValue0(v.Pos, OpRsh32Ux64, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v2.AuxInt = int32ToAuxInt(int32(1<<8 + umagic8(c).m))
+ v3 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v3.AddArg(x)
+ v1.AddArg2(v2, v3)
+ v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v4.AuxInt = int64ToAuxInt(8 + umagic8(c).s)
+ v0.AddArg2(v1, v4)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpEq16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (Eq16 x x)
+ // result: (ConstBool [true])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ // match: (Eq16 (Const16 <t> [c]) (Add16 (Const16 <t> [d]) x))
+ // result: (Eq16 (Const16 <t> [c-d]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst16 {
+ continue
+ }
+ t := v_0.Type
+ c := auxIntToInt16(v_0.AuxInt)
+ if v_1.Op != OpAdd16 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst16 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt16(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpEq16)
+ v0 := b.NewValue0(v.Pos, OpConst16, t)
+ v0.AuxInt = int16ToAuxInt(c - d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Eq16 (Const16 [c]) (Const16 [d]))
+ // result: (ConstBool [c == d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_0.AuxInt)
+ if v_1.Op != OpConst16 {
+ continue
+ }
+ d := auxIntToInt16(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(c == d)
+ return true
+ }
+ break
+ }
+ // match: (Eq16 (Mod16u x (Const16 [c])) (Const16 [0]))
+ // cond: x.Op != OpConst16 && udivisibleOK16(c) && !hasSmallRotate(config)
+ // result: (Eq32 (Mod32u <typ.UInt32> (ZeroExt16to32 <typ.UInt32> x) (Const32 <typ.UInt32> [int32(uint16(c))])) (Const32 <typ.UInt32> [0]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpMod16u {
+ continue
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_0_1.AuxInt)
+ if v_1.Op != OpConst16 || auxIntToInt16(v_1.AuxInt) != 0 || !(x.Op != OpConst16 && udivisibleOK16(c) && !hasSmallRotate(config)) {
+ continue
+ }
+ v.reset(OpEq32)
+ v0 := b.NewValue0(v.Pos, OpMod32u, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeroExt16to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v2.AuxInt = int32ToAuxInt(int32(uint16(c)))
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v3.AuxInt = int32ToAuxInt(0)
+ v.AddArg2(v0, v3)
+ return true
+ }
+ break
+ }
+ // match: (Eq16 (Mod16 x (Const16 [c])) (Const16 [0]))
+ // cond: x.Op != OpConst16 && sdivisibleOK16(c) && !hasSmallRotate(config)
+ // result: (Eq32 (Mod32 <typ.Int32> (SignExt16to32 <typ.Int32> x) (Const32 <typ.Int32> [int32(c)])) (Const32 <typ.Int32> [0]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpMod16 {
+ continue
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_0_1.AuxInt)
+ if v_1.Op != OpConst16 || auxIntToInt16(v_1.AuxInt) != 0 || !(x.Op != OpConst16 && sdivisibleOK16(c) && !hasSmallRotate(config)) {
+ continue
+ }
+ v.reset(OpEq32)
+ v0 := b.NewValue0(v.Pos, OpMod32, typ.Int32)
+ v1 := b.NewValue0(v.Pos, OpSignExt16to32, typ.Int32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpConst32, typ.Int32)
+ v2.AuxInt = int32ToAuxInt(int32(c))
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpConst32, typ.Int32)
+ v3.AuxInt = int32ToAuxInt(0)
+ v.AddArg2(v0, v3)
+ return true
+ }
+ break
+ }
+ // match: (Eq16 x (Mul16 (Const16 [c]) (Trunc64to16 (Rsh64Ux64 mul:(Mul64 (Const64 [m]) (ZeroExt16to64 x)) (Const64 [s]))) ) )
+ // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<16+umagic16(c).m) && s == 16+umagic16(c).s && x.Op != OpConst16 && udivisibleOK16(c)
+ // result: (Leq16U (RotateLeft16 <typ.UInt16> (Mul16 <typ.UInt16> (Const16 <typ.UInt16> [int16(udivisible16(c).m)]) x) (Const16 <typ.UInt16> [int16(16-udivisible16(c).k)]) ) (Const16 <typ.UInt16> [int16(udivisible16(c).max)]) )
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpMul16 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_1_0.AuxInt)
+ if v_1_1.Op != OpTrunc64to16 {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpRsh64Ux64 {
+ continue
+ }
+ _ = v_1_1_0.Args[1]
+ mul := v_1_1_0.Args[0]
+ if mul.Op != OpMul64 {
+ continue
+ }
+ _ = mul.Args[1]
+ mul_0 := mul.Args[0]
+ mul_1 := mul.Args[1]
+ for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 {
+ if mul_0.Op != OpConst64 {
+ continue
+ }
+ m := auxIntToInt64(mul_0.AuxInt)
+ if mul_1.Op != OpZeroExt16to64 || x != mul_1.Args[0] {
+ continue
+ }
+ v_1_1_0_1 := v_1_1_0.Args[1]
+ if v_1_1_0_1.Op != OpConst64 {
+ continue
+ }
+ s := auxIntToInt64(v_1_1_0_1.AuxInt)
+ if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<16+umagic16(c).m) && s == 16+umagic16(c).s && x.Op != OpConst16 && udivisibleOK16(c)) {
+ continue
+ }
+ v.reset(OpLeq16U)
+ v0 := b.NewValue0(v.Pos, OpRotateLeft16, typ.UInt16)
+ v1 := b.NewValue0(v.Pos, OpMul16, typ.UInt16)
+ v2 := b.NewValue0(v.Pos, OpConst16, typ.UInt16)
+ v2.AuxInt = int16ToAuxInt(int16(udivisible16(c).m))
+ v1.AddArg2(v2, x)
+ v3 := b.NewValue0(v.Pos, OpConst16, typ.UInt16)
+ v3.AuxInt = int16ToAuxInt(int16(16 - udivisible16(c).k))
+ v0.AddArg2(v1, v3)
+ v4 := b.NewValue0(v.Pos, OpConst16, typ.UInt16)
+ v4.AuxInt = int16ToAuxInt(int16(udivisible16(c).max))
+ v.AddArg2(v0, v4)
+ return true
+ }
+ }
+ }
+ break
+ }
+ // match: (Eq16 x (Mul16 (Const16 [c]) (Trunc32to16 (Rsh32Ux64 mul:(Mul32 (Const32 [m]) (ZeroExt16to32 x)) (Const64 [s]))) ) )
+ // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(1<<15+umagic16(c).m/2) && s == 16+umagic16(c).s-1 && x.Op != OpConst16 && udivisibleOK16(c)
+ // result: (Leq16U (RotateLeft16 <typ.UInt16> (Mul16 <typ.UInt16> (Const16 <typ.UInt16> [int16(udivisible16(c).m)]) x) (Const16 <typ.UInt16> [int16(16-udivisible16(c).k)]) ) (Const16 <typ.UInt16> [int16(udivisible16(c).max)]) )
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpMul16 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_1_0.AuxInt)
+ if v_1_1.Op != OpTrunc32to16 {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpRsh32Ux64 {
+ continue
+ }
+ _ = v_1_1_0.Args[1]
+ mul := v_1_1_0.Args[0]
+ if mul.Op != OpMul32 {
+ continue
+ }
+ _ = mul.Args[1]
+ mul_0 := mul.Args[0]
+ mul_1 := mul.Args[1]
+ for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 {
+ if mul_0.Op != OpConst32 {
+ continue
+ }
+ m := auxIntToInt32(mul_0.AuxInt)
+ if mul_1.Op != OpZeroExt16to32 || x != mul_1.Args[0] {
+ continue
+ }
+ v_1_1_0_1 := v_1_1_0.Args[1]
+ if v_1_1_0_1.Op != OpConst64 {
+ continue
+ }
+ s := auxIntToInt64(v_1_1_0_1.AuxInt)
+ if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(1<<15+umagic16(c).m/2) && s == 16+umagic16(c).s-1 && x.Op != OpConst16 && udivisibleOK16(c)) {
+ continue
+ }
+ v.reset(OpLeq16U)
+ v0 := b.NewValue0(v.Pos, OpRotateLeft16, typ.UInt16)
+ v1 := b.NewValue0(v.Pos, OpMul16, typ.UInt16)
+ v2 := b.NewValue0(v.Pos, OpConst16, typ.UInt16)
+ v2.AuxInt = int16ToAuxInt(int16(udivisible16(c).m))
+ v1.AddArg2(v2, x)
+ v3 := b.NewValue0(v.Pos, OpConst16, typ.UInt16)
+ v3.AuxInt = int16ToAuxInt(int16(16 - udivisible16(c).k))
+ v0.AddArg2(v1, v3)
+ v4 := b.NewValue0(v.Pos, OpConst16, typ.UInt16)
+ v4.AuxInt = int16ToAuxInt(int16(udivisible16(c).max))
+ v.AddArg2(v0, v4)
+ return true
+ }
+ }
+ }
+ break
+ }
+ // match: (Eq16 x (Mul16 (Const16 [c]) (Trunc32to16 (Rsh32Ux64 mul:(Mul32 (Const32 [m]) (Rsh32Ux64 (ZeroExt16to32 x) (Const64 [1]))) (Const64 [s]))) ) )
+ // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(1<<15+(umagic16(c).m+1)/2) && s == 16+umagic16(c).s-2 && x.Op != OpConst16 && udivisibleOK16(c)
+ // result: (Leq16U (RotateLeft16 <typ.UInt16> (Mul16 <typ.UInt16> (Const16 <typ.UInt16> [int16(udivisible16(c).m)]) x) (Const16 <typ.UInt16> [int16(16-udivisible16(c).k)]) ) (Const16 <typ.UInt16> [int16(udivisible16(c).max)]) )
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpMul16 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_1_0.AuxInt)
+ if v_1_1.Op != OpTrunc32to16 {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpRsh32Ux64 {
+ continue
+ }
+ _ = v_1_1_0.Args[1]
+ mul := v_1_1_0.Args[0]
+ if mul.Op != OpMul32 {
+ continue
+ }
+ _ = mul.Args[1]
+ mul_0 := mul.Args[0]
+ mul_1 := mul.Args[1]
+ for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 {
+ if mul_0.Op != OpConst32 {
+ continue
+ }
+ m := auxIntToInt32(mul_0.AuxInt)
+ if mul_1.Op != OpRsh32Ux64 {
+ continue
+ }
+ _ = mul_1.Args[1]
+ mul_1_0 := mul_1.Args[0]
+ if mul_1_0.Op != OpZeroExt16to32 || x != mul_1_0.Args[0] {
+ continue
+ }
+ mul_1_1 := mul_1.Args[1]
+ if mul_1_1.Op != OpConst64 || auxIntToInt64(mul_1_1.AuxInt) != 1 {
+ continue
+ }
+ v_1_1_0_1 := v_1_1_0.Args[1]
+ if v_1_1_0_1.Op != OpConst64 {
+ continue
+ }
+ s := auxIntToInt64(v_1_1_0_1.AuxInt)
+ if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(1<<15+(umagic16(c).m+1)/2) && s == 16+umagic16(c).s-2 && x.Op != OpConst16 && udivisibleOK16(c)) {
+ continue
+ }
+ v.reset(OpLeq16U)
+ v0 := b.NewValue0(v.Pos, OpRotateLeft16, typ.UInt16)
+ v1 := b.NewValue0(v.Pos, OpMul16, typ.UInt16)
+ v2 := b.NewValue0(v.Pos, OpConst16, typ.UInt16)
+ v2.AuxInt = int16ToAuxInt(int16(udivisible16(c).m))
+ v1.AddArg2(v2, x)
+ v3 := b.NewValue0(v.Pos, OpConst16, typ.UInt16)
+ v3.AuxInt = int16ToAuxInt(int16(16 - udivisible16(c).k))
+ v0.AddArg2(v1, v3)
+ v4 := b.NewValue0(v.Pos, OpConst16, typ.UInt16)
+ v4.AuxInt = int16ToAuxInt(int16(udivisible16(c).max))
+ v.AddArg2(v0, v4)
+ return true
+ }
+ }
+ }
+ break
+ }
+ // match: (Eq16 x (Mul16 (Const16 [c]) (Trunc32to16 (Rsh32Ux64 (Avg32u (Lsh32x64 (ZeroExt16to32 x) (Const64 [16])) mul:(Mul32 (Const32 [m]) (ZeroExt16to32 x))) (Const64 [s]))) ) )
+ // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(umagic16(c).m) && s == 16+umagic16(c).s-1 && x.Op != OpConst16 && udivisibleOK16(c)
+ // result: (Leq16U (RotateLeft16 <typ.UInt16> (Mul16 <typ.UInt16> (Const16 <typ.UInt16> [int16(udivisible16(c).m)]) x) (Const16 <typ.UInt16> [int16(16-udivisible16(c).k)]) ) (Const16 <typ.UInt16> [int16(udivisible16(c).max)]) )
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpMul16 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_1_0.AuxInt)
+ if v_1_1.Op != OpTrunc32to16 {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpRsh32Ux64 {
+ continue
+ }
+ _ = v_1_1_0.Args[1]
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpAvg32u {
+ continue
+ }
+ _ = v_1_1_0_0.Args[1]
+ v_1_1_0_0_0 := v_1_1_0_0.Args[0]
+ if v_1_1_0_0_0.Op != OpLsh32x64 {
+ continue
+ }
+ _ = v_1_1_0_0_0.Args[1]
+ v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
+ if v_1_1_0_0_0_0.Op != OpZeroExt16to32 || x != v_1_1_0_0_0_0.Args[0] {
+ continue
+ }
+ v_1_1_0_0_0_1 := v_1_1_0_0_0.Args[1]
+ if v_1_1_0_0_0_1.Op != OpConst64 || auxIntToInt64(v_1_1_0_0_0_1.AuxInt) != 16 {
+ continue
+ }
+ mul := v_1_1_0_0.Args[1]
+ if mul.Op != OpMul32 {
+ continue
+ }
+ _ = mul.Args[1]
+ mul_0 := mul.Args[0]
+ mul_1 := mul.Args[1]
+ for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 {
+ if mul_0.Op != OpConst32 {
+ continue
+ }
+ m := auxIntToInt32(mul_0.AuxInt)
+ if mul_1.Op != OpZeroExt16to32 || x != mul_1.Args[0] {
+ continue
+ }
+ v_1_1_0_1 := v_1_1_0.Args[1]
+ if v_1_1_0_1.Op != OpConst64 {
+ continue
+ }
+ s := auxIntToInt64(v_1_1_0_1.AuxInt)
+ if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(umagic16(c).m) && s == 16+umagic16(c).s-1 && x.Op != OpConst16 && udivisibleOK16(c)) {
+ continue
+ }
+ v.reset(OpLeq16U)
+ v0 := b.NewValue0(v.Pos, OpRotateLeft16, typ.UInt16)
+ v1 := b.NewValue0(v.Pos, OpMul16, typ.UInt16)
+ v2 := b.NewValue0(v.Pos, OpConst16, typ.UInt16)
+ v2.AuxInt = int16ToAuxInt(int16(udivisible16(c).m))
+ v1.AddArg2(v2, x)
+ v3 := b.NewValue0(v.Pos, OpConst16, typ.UInt16)
+ v3.AuxInt = int16ToAuxInt(int16(16 - udivisible16(c).k))
+ v0.AddArg2(v1, v3)
+ v4 := b.NewValue0(v.Pos, OpConst16, typ.UInt16)
+ v4.AuxInt = int16ToAuxInt(int16(udivisible16(c).max))
+ v.AddArg2(v0, v4)
+ return true
+ }
+ }
+ }
+ break
+ }
+ // match: (Eq16 x (Mul16 (Const16 [c]) (Sub16 (Rsh32x64 mul:(Mul32 (Const32 [m]) (SignExt16to32 x)) (Const64 [s])) (Rsh32x64 (SignExt16to32 x) (Const64 [31]))) ) )
+ // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(smagic16(c).m) && s == 16+smagic16(c).s && x.Op != OpConst16 && sdivisibleOK16(c)
+ // result: (Leq16U (RotateLeft16 <typ.UInt16> (Add16 <typ.UInt16> (Mul16 <typ.UInt16> (Const16 <typ.UInt16> [int16(sdivisible16(c).m)]) x) (Const16 <typ.UInt16> [int16(sdivisible16(c).a)]) ) (Const16 <typ.UInt16> [int16(16-sdivisible16(c).k)]) ) (Const16 <typ.UInt16> [int16(sdivisible16(c).max)]) )
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpMul16 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_1_0.AuxInt)
+ if v_1_1.Op != OpSub16 {
+ continue
+ }
+ _ = v_1_1.Args[1]
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpRsh32x64 {
+ continue
+ }
+ _ = v_1_1_0.Args[1]
+ mul := v_1_1_0.Args[0]
+ if mul.Op != OpMul32 {
+ continue
+ }
+ _ = mul.Args[1]
+ mul_0 := mul.Args[0]
+ mul_1 := mul.Args[1]
+ for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 {
+ if mul_0.Op != OpConst32 {
+ continue
+ }
+ m := auxIntToInt32(mul_0.AuxInt)
+ if mul_1.Op != OpSignExt16to32 || x != mul_1.Args[0] {
+ continue
+ }
+ v_1_1_0_1 := v_1_1_0.Args[1]
+ if v_1_1_0_1.Op != OpConst64 {
+ continue
+ }
+ s := auxIntToInt64(v_1_1_0_1.AuxInt)
+ v_1_1_1 := v_1_1.Args[1]
+ if v_1_1_1.Op != OpRsh32x64 {
+ continue
+ }
+ _ = v_1_1_1.Args[1]
+ v_1_1_1_0 := v_1_1_1.Args[0]
+ if v_1_1_1_0.Op != OpSignExt16to32 || x != v_1_1_1_0.Args[0] {
+ continue
+ }
+ v_1_1_1_1 := v_1_1_1.Args[1]
+ if v_1_1_1_1.Op != OpConst64 || auxIntToInt64(v_1_1_1_1.AuxInt) != 31 || !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(smagic16(c).m) && s == 16+smagic16(c).s && x.Op != OpConst16 && sdivisibleOK16(c)) {
+ continue
+ }
+ v.reset(OpLeq16U)
+ v0 := b.NewValue0(v.Pos, OpRotateLeft16, typ.UInt16)
+ v1 := b.NewValue0(v.Pos, OpAdd16, typ.UInt16)
+ v2 := b.NewValue0(v.Pos, OpMul16, typ.UInt16)
+ v3 := b.NewValue0(v.Pos, OpConst16, typ.UInt16)
+ v3.AuxInt = int16ToAuxInt(int16(sdivisible16(c).m))
+ v2.AddArg2(v3, x)
+ v4 := b.NewValue0(v.Pos, OpConst16, typ.UInt16)
+ v4.AuxInt = int16ToAuxInt(int16(sdivisible16(c).a))
+ v1.AddArg2(v2, v4)
+ v5 := b.NewValue0(v.Pos, OpConst16, typ.UInt16)
+ v5.AuxInt = int16ToAuxInt(int16(16 - sdivisible16(c).k))
+ v0.AddArg2(v1, v5)
+ v6 := b.NewValue0(v.Pos, OpConst16, typ.UInt16)
+ v6.AuxInt = int16ToAuxInt(int16(sdivisible16(c).max))
+ v.AddArg2(v0, v6)
+ return true
+ }
+ }
+ }
+ break
+ }
+ // match: (Eq16 n (Lsh16x64 (Rsh16x64 (Add16 <t> n (Rsh16Ux64 <t> (Rsh16x64 <t> n (Const64 <typ.UInt64> [15])) (Const64 <typ.UInt64> [kbar]))) (Const64 <typ.UInt64> [k])) (Const64 <typ.UInt64> [k])) )
+ // cond: k > 0 && k < 15 && kbar == 16 - k
+ // result: (Eq16 (And16 <t> n (Const16 <t> [1<<uint(k)-1])) (Const16 <t> [0]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ n := v_0
+ if v_1.Op != OpLsh16x64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpRsh16x64 {
+ continue
+ }
+ _ = v_1_0.Args[1]
+ v_1_0_0 := v_1_0.Args[0]
+ if v_1_0_0.Op != OpAdd16 {
+ continue
+ }
+ t := v_1_0_0.Type
+ _ = v_1_0_0.Args[1]
+ v_1_0_0_0 := v_1_0_0.Args[0]
+ v_1_0_0_1 := v_1_0_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0_0_0, v_1_0_0_1 = _i1+1, v_1_0_0_1, v_1_0_0_0 {
+ if n != v_1_0_0_0 || v_1_0_0_1.Op != OpRsh16Ux64 || v_1_0_0_1.Type != t {
+ continue
+ }
+ _ = v_1_0_0_1.Args[1]
+ v_1_0_0_1_0 := v_1_0_0_1.Args[0]
+ if v_1_0_0_1_0.Op != OpRsh16x64 || v_1_0_0_1_0.Type != t {
+ continue
+ }
+ _ = v_1_0_0_1_0.Args[1]
+ if n != v_1_0_0_1_0.Args[0] {
+ continue
+ }
+ v_1_0_0_1_0_1 := v_1_0_0_1_0.Args[1]
+ if v_1_0_0_1_0_1.Op != OpConst64 || v_1_0_0_1_0_1.Type != typ.UInt64 || auxIntToInt64(v_1_0_0_1_0_1.AuxInt) != 15 {
+ continue
+ }
+ v_1_0_0_1_1 := v_1_0_0_1.Args[1]
+ if v_1_0_0_1_1.Op != OpConst64 || v_1_0_0_1_1.Type != typ.UInt64 {
+ continue
+ }
+ kbar := auxIntToInt64(v_1_0_0_1_1.AuxInt)
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpConst64 || v_1_0_1.Type != typ.UInt64 {
+ continue
+ }
+ k := auxIntToInt64(v_1_0_1.AuxInt)
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 || v_1_1.Type != typ.UInt64 || auxIntToInt64(v_1_1.AuxInt) != k || !(k > 0 && k < 15 && kbar == 16-k) {
+ continue
+ }
+ v.reset(OpEq16)
+ v0 := b.NewValue0(v.Pos, OpAnd16, t)
+ v1 := b.NewValue0(v.Pos, OpConst16, t)
+ v1.AuxInt = int16ToAuxInt(1<<uint(k) - 1)
+ v0.AddArg2(n, v1)
+ v2 := b.NewValue0(v.Pos, OpConst16, t)
+ v2.AuxInt = int16ToAuxInt(0)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Eq16 s:(Sub16 x y) (Const16 [0]))
+ // cond: s.Uses == 1
+ // result: (Eq16 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ s := v_0
+ if s.Op != OpSub16 {
+ continue
+ }
+ y := s.Args[1]
+ x := s.Args[0]
+ if v_1.Op != OpConst16 || auxIntToInt16(v_1.AuxInt) != 0 || !(s.Uses == 1) {
+ continue
+ }
+ v.reset(OpEq16)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (Eq16 (And16 <t> x (Const16 <t> [y])) (Const16 <t> [y]))
+ // cond: oneBit16(y)
+ // result: (Neq16 (And16 <t> x (Const16 <t> [y])) (Const16 <t> [0]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAnd16 {
+ continue
+ }
+ t := v_0.Type
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpConst16 || v_0_1.Type != t {
+ continue
+ }
+ y := auxIntToInt16(v_0_1.AuxInt)
+ if v_1.Op != OpConst16 || v_1.Type != t || auxIntToInt16(v_1.AuxInt) != y || !(oneBit16(y)) {
+ continue
+ }
+ v.reset(OpNeq16)
+ v0 := b.NewValue0(v.Pos, OpAnd16, t)
+ v1 := b.NewValue0(v.Pos, OpConst16, t)
+ v1.AuxInt = int16ToAuxInt(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst16, t)
+ v2.AuxInt = int16ToAuxInt(0)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpEq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Eq32 x x)
+ // result: (ConstBool [true])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ // match: (Eq32 (Const32 <t> [c]) (Add32 (Const32 <t> [d]) x))
+ // result: (Eq32 (Const32 <t> [c-d]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32 {
+ continue
+ }
+ t := v_0.Type
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpAdd32 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst32 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt32(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpEq32)
+ v0 := b.NewValue0(v.Pos, OpConst32, t)
+ v0.AuxInt = int32ToAuxInt(c - d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Eq32 (Const32 [c]) (Const32 [d]))
+ // result: (ConstBool [c == d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpConst32 {
+ continue
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(c == d)
+ return true
+ }
+ break
+ }
+ // match: (Eq32 x (Mul32 (Const32 [c]) (Rsh32Ux64 mul:(Hmul32u (Const32 [m]) x) (Const64 [s])) ) )
+ // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(1<<31+umagic32(c).m/2) && s == umagic32(c).s-1 && x.Op != OpConst32 && udivisibleOK32(c)
+ // result: (Leq32U (RotateLeft32 <typ.UInt32> (Mul32 <typ.UInt32> (Const32 <typ.UInt32> [int32(udivisible32(c).m)]) x) (Const32 <typ.UInt32> [int32(32-udivisible32(c).k)]) ) (Const32 <typ.UInt32> [int32(udivisible32(c).max)]) )
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpMul32 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ if v_1_1.Op != OpRsh32Ux64 {
+ continue
+ }
+ _ = v_1_1.Args[1]
+ mul := v_1_1.Args[0]
+ if mul.Op != OpHmul32u {
+ continue
+ }
+ _ = mul.Args[1]
+ mul_0 := mul.Args[0]
+ mul_1 := mul.Args[1]
+ for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 {
+ if mul_0.Op != OpConst32 {
+ continue
+ }
+ m := auxIntToInt32(mul_0.AuxInt)
+ if x != mul_1 {
+ continue
+ }
+ v_1_1_1 := v_1_1.Args[1]
+ if v_1_1_1.Op != OpConst64 {
+ continue
+ }
+ s := auxIntToInt64(v_1_1_1.AuxInt)
+ if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(1<<31+umagic32(c).m/2) && s == umagic32(c).s-1 && x.Op != OpConst32 && udivisibleOK32(c)) {
+ continue
+ }
+ v.reset(OpLeq32U)
+ v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v2.AuxInt = int32ToAuxInt(int32(udivisible32(c).m))
+ v1.AddArg2(v2, x)
+ v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v3.AuxInt = int32ToAuxInt(int32(32 - udivisible32(c).k))
+ v0.AddArg2(v1, v3)
+ v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v4.AuxInt = int32ToAuxInt(int32(udivisible32(c).max))
+ v.AddArg2(v0, v4)
+ return true
+ }
+ }
+ }
+ break
+ }
+ // match: (Eq32 x (Mul32 (Const32 [c]) (Rsh32Ux64 mul:(Hmul32u (Const32 <typ.UInt32> [m]) (Rsh32Ux64 x (Const64 [1]))) (Const64 [s])) ) )
+ // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(1<<31+(umagic32(c).m+1)/2) && s == umagic32(c).s-2 && x.Op != OpConst32 && udivisibleOK32(c)
+ // result: (Leq32U (RotateLeft32 <typ.UInt32> (Mul32 <typ.UInt32> (Const32 <typ.UInt32> [int32(udivisible32(c).m)]) x) (Const32 <typ.UInt32> [int32(32-udivisible32(c).k)]) ) (Const32 <typ.UInt32> [int32(udivisible32(c).max)]) )
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpMul32 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ if v_1_1.Op != OpRsh32Ux64 {
+ continue
+ }
+ _ = v_1_1.Args[1]
+ mul := v_1_1.Args[0]
+ if mul.Op != OpHmul32u {
+ continue
+ }
+ _ = mul.Args[1]
+ mul_0 := mul.Args[0]
+ mul_1 := mul.Args[1]
+ for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 {
+ if mul_0.Op != OpConst32 || mul_0.Type != typ.UInt32 {
+ continue
+ }
+ m := auxIntToInt32(mul_0.AuxInt)
+ if mul_1.Op != OpRsh32Ux64 {
+ continue
+ }
+ _ = mul_1.Args[1]
+ if x != mul_1.Args[0] {
+ continue
+ }
+ mul_1_1 := mul_1.Args[1]
+ if mul_1_1.Op != OpConst64 || auxIntToInt64(mul_1_1.AuxInt) != 1 {
+ continue
+ }
+ v_1_1_1 := v_1_1.Args[1]
+ if v_1_1_1.Op != OpConst64 {
+ continue
+ }
+ s := auxIntToInt64(v_1_1_1.AuxInt)
+ if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(1<<31+(umagic32(c).m+1)/2) && s == umagic32(c).s-2 && x.Op != OpConst32 && udivisibleOK32(c)) {
+ continue
+ }
+ v.reset(OpLeq32U)
+ v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v2.AuxInt = int32ToAuxInt(int32(udivisible32(c).m))
+ v1.AddArg2(v2, x)
+ v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v3.AuxInt = int32ToAuxInt(int32(32 - udivisible32(c).k))
+ v0.AddArg2(v1, v3)
+ v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v4.AuxInt = int32ToAuxInt(int32(udivisible32(c).max))
+ v.AddArg2(v0, v4)
+ return true
+ }
+ }
+ }
+ break
+ }
+ // match: (Eq32 x (Mul32 (Const32 [c]) (Rsh32Ux64 (Avg32u x mul:(Hmul32u (Const32 [m]) x)) (Const64 [s])) ) )
+ // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(umagic32(c).m) && s == umagic32(c).s-1 && x.Op != OpConst32 && udivisibleOK32(c)
+ // result: (Leq32U (RotateLeft32 <typ.UInt32> (Mul32 <typ.UInt32> (Const32 <typ.UInt32> [int32(udivisible32(c).m)]) x) (Const32 <typ.UInt32> [int32(32-udivisible32(c).k)]) ) (Const32 <typ.UInt32> [int32(udivisible32(c).max)]) )
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpMul32 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ if v_1_1.Op != OpRsh32Ux64 {
+ continue
+ }
+ _ = v_1_1.Args[1]
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpAvg32u {
+ continue
+ }
+ _ = v_1_1_0.Args[1]
+ if x != v_1_1_0.Args[0] {
+ continue
+ }
+ mul := v_1_1_0.Args[1]
+ if mul.Op != OpHmul32u {
+ continue
+ }
+ _ = mul.Args[1]
+ mul_0 := mul.Args[0]
+ mul_1 := mul.Args[1]
+ for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 {
+ if mul_0.Op != OpConst32 {
+ continue
+ }
+ m := auxIntToInt32(mul_0.AuxInt)
+ if x != mul_1 {
+ continue
+ }
+ v_1_1_1 := v_1_1.Args[1]
+ if v_1_1_1.Op != OpConst64 {
+ continue
+ }
+ s := auxIntToInt64(v_1_1_1.AuxInt)
+ if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(umagic32(c).m) && s == umagic32(c).s-1 && x.Op != OpConst32 && udivisibleOK32(c)) {
+ continue
+ }
+ v.reset(OpLeq32U)
+ v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v2.AuxInt = int32ToAuxInt(int32(udivisible32(c).m))
+ v1.AddArg2(v2, x)
+ v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v3.AuxInt = int32ToAuxInt(int32(32 - udivisible32(c).k))
+ v0.AddArg2(v1, v3)
+ v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v4.AuxInt = int32ToAuxInt(int32(udivisible32(c).max))
+ v.AddArg2(v0, v4)
+ return true
+ }
+ }
+ }
+ break
+ }
+ // match: (Eq32 x (Mul32 (Const32 [c]) (Trunc64to32 (Rsh64Ux64 mul:(Mul64 (Const64 [m]) (ZeroExt32to64 x)) (Const64 [s]))) ) )
+ // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<31+umagic32(c).m/2) && s == 32+umagic32(c).s-1 && x.Op != OpConst32 && udivisibleOK32(c)
+ // result: (Leq32U (RotateLeft32 <typ.UInt32> (Mul32 <typ.UInt32> (Const32 <typ.UInt32> [int32(udivisible32(c).m)]) x) (Const32 <typ.UInt32> [int32(32-udivisible32(c).k)]) ) (Const32 <typ.UInt32> [int32(udivisible32(c).max)]) )
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpMul32 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ if v_1_1.Op != OpTrunc64to32 {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpRsh64Ux64 {
+ continue
+ }
+ _ = v_1_1_0.Args[1]
+ mul := v_1_1_0.Args[0]
+ if mul.Op != OpMul64 {
+ continue
+ }
+ _ = mul.Args[1]
+ mul_0 := mul.Args[0]
+ mul_1 := mul.Args[1]
+ for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 {
+ if mul_0.Op != OpConst64 {
+ continue
+ }
+ m := auxIntToInt64(mul_0.AuxInt)
+ if mul_1.Op != OpZeroExt32to64 || x != mul_1.Args[0] {
+ continue
+ }
+ v_1_1_0_1 := v_1_1_0.Args[1]
+ if v_1_1_0_1.Op != OpConst64 {
+ continue
+ }
+ s := auxIntToInt64(v_1_1_0_1.AuxInt)
+ if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<31+umagic32(c).m/2) && s == 32+umagic32(c).s-1 && x.Op != OpConst32 && udivisibleOK32(c)) {
+ continue
+ }
+ v.reset(OpLeq32U)
+ v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v2.AuxInt = int32ToAuxInt(int32(udivisible32(c).m))
+ v1.AddArg2(v2, x)
+ v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v3.AuxInt = int32ToAuxInt(int32(32 - udivisible32(c).k))
+ v0.AddArg2(v1, v3)
+ v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v4.AuxInt = int32ToAuxInt(int32(udivisible32(c).max))
+ v.AddArg2(v0, v4)
+ return true
+ }
+ }
+ }
+ break
+ }
+ // match: (Eq32 x (Mul32 (Const32 [c]) (Trunc64to32 (Rsh64Ux64 mul:(Mul64 (Const64 [m]) (Rsh64Ux64 (ZeroExt32to64 x) (Const64 [1]))) (Const64 [s]))) ) )
+ // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<31+(umagic32(c).m+1)/2) && s == 32+umagic32(c).s-2 && x.Op != OpConst32 && udivisibleOK32(c)
+ // result: (Leq32U (RotateLeft32 <typ.UInt32> (Mul32 <typ.UInt32> (Const32 <typ.UInt32> [int32(udivisible32(c).m)]) x) (Const32 <typ.UInt32> [int32(32-udivisible32(c).k)]) ) (Const32 <typ.UInt32> [int32(udivisible32(c).max)]) )
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpMul32 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ if v_1_1.Op != OpTrunc64to32 {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpRsh64Ux64 {
+ continue
+ }
+ _ = v_1_1_0.Args[1]
+ mul := v_1_1_0.Args[0]
+ if mul.Op != OpMul64 {
+ continue
+ }
+ _ = mul.Args[1]
+ mul_0 := mul.Args[0]
+ mul_1 := mul.Args[1]
+ for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 {
+ if mul_0.Op != OpConst64 {
+ continue
+ }
+ m := auxIntToInt64(mul_0.AuxInt)
+ if mul_1.Op != OpRsh64Ux64 {
+ continue
+ }
+ _ = mul_1.Args[1]
+ mul_1_0 := mul_1.Args[0]
+ if mul_1_0.Op != OpZeroExt32to64 || x != mul_1_0.Args[0] {
+ continue
+ }
+ mul_1_1 := mul_1.Args[1]
+ if mul_1_1.Op != OpConst64 || auxIntToInt64(mul_1_1.AuxInt) != 1 {
+ continue
+ }
+ v_1_1_0_1 := v_1_1_0.Args[1]
+ if v_1_1_0_1.Op != OpConst64 {
+ continue
+ }
+ s := auxIntToInt64(v_1_1_0_1.AuxInt)
+ if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<31+(umagic32(c).m+1)/2) && s == 32+umagic32(c).s-2 && x.Op != OpConst32 && udivisibleOK32(c)) {
+ continue
+ }
+ v.reset(OpLeq32U)
+ v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v2.AuxInt = int32ToAuxInt(int32(udivisible32(c).m))
+ v1.AddArg2(v2, x)
+ v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v3.AuxInt = int32ToAuxInt(int32(32 - udivisible32(c).k))
+ v0.AddArg2(v1, v3)
+ v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v4.AuxInt = int32ToAuxInt(int32(udivisible32(c).max))
+ v.AddArg2(v0, v4)
+ return true
+ }
+ }
+ }
+ break
+ }
+ // match: (Eq32 x (Mul32 (Const32 [c]) (Trunc64to32 (Rsh64Ux64 (Avg64u (Lsh64x64 (ZeroExt32to64 x) (Const64 [32])) mul:(Mul64 (Const64 [m]) (ZeroExt32to64 x))) (Const64 [s]))) ) )
+ // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(umagic32(c).m) && s == 32+umagic32(c).s-1 && x.Op != OpConst32 && udivisibleOK32(c)
+ // result: (Leq32U (RotateLeft32 <typ.UInt32> (Mul32 <typ.UInt32> (Const32 <typ.UInt32> [int32(udivisible32(c).m)]) x) (Const32 <typ.UInt32> [int32(32-udivisible32(c).k)]) ) (Const32 <typ.UInt32> [int32(udivisible32(c).max)]) )
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpMul32 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ if v_1_1.Op != OpTrunc64to32 {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpRsh64Ux64 {
+ continue
+ }
+ _ = v_1_1_0.Args[1]
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpAvg64u {
+ continue
+ }
+ _ = v_1_1_0_0.Args[1]
+ v_1_1_0_0_0 := v_1_1_0_0.Args[0]
+ if v_1_1_0_0_0.Op != OpLsh64x64 {
+ continue
+ }
+ _ = v_1_1_0_0_0.Args[1]
+ v_1_1_0_0_0_0 := v_1_1_0_0_0.Args[0]
+ if v_1_1_0_0_0_0.Op != OpZeroExt32to64 || x != v_1_1_0_0_0_0.Args[0] {
+ continue
+ }
+ v_1_1_0_0_0_1 := v_1_1_0_0_0.Args[1]
+ if v_1_1_0_0_0_1.Op != OpConst64 || auxIntToInt64(v_1_1_0_0_0_1.AuxInt) != 32 {
+ continue
+ }
+ mul := v_1_1_0_0.Args[1]
+ if mul.Op != OpMul64 {
+ continue
+ }
+ _ = mul.Args[1]
+ mul_0 := mul.Args[0]
+ mul_1 := mul.Args[1]
+ for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 {
+ if mul_0.Op != OpConst64 {
+ continue
+ }
+ m := auxIntToInt64(mul_0.AuxInt)
+ if mul_1.Op != OpZeroExt32to64 || x != mul_1.Args[0] {
+ continue
+ }
+ v_1_1_0_1 := v_1_1_0.Args[1]
+ if v_1_1_0_1.Op != OpConst64 {
+ continue
+ }
+ s := auxIntToInt64(v_1_1_0_1.AuxInt)
+ if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(umagic32(c).m) && s == 32+umagic32(c).s-1 && x.Op != OpConst32 && udivisibleOK32(c)) {
+ continue
+ }
+ v.reset(OpLeq32U)
+ v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpMul32, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v2.AuxInt = int32ToAuxInt(int32(udivisible32(c).m))
+ v1.AddArg2(v2, x)
+ v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v3.AuxInt = int32ToAuxInt(int32(32 - udivisible32(c).k))
+ v0.AddArg2(v1, v3)
+ v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v4.AuxInt = int32ToAuxInt(int32(udivisible32(c).max))
+ v.AddArg2(v0, v4)
+ return true
+ }
+ }
+ }
+ break
+ }
+ // match: (Eq32 x (Mul32 (Const32 [c]) (Sub32 (Rsh64x64 mul:(Mul64 (Const64 [m]) (SignExt32to64 x)) (Const64 [s])) (Rsh64x64 (SignExt32to64 x) (Const64 [63]))) ) )
+ // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic32(c).m) && s == 32+smagic32(c).s && x.Op != OpConst32 && sdivisibleOK32(c)
+ // result: (Leq32U (RotateLeft32 <typ.UInt32> (Add32 <typ.UInt32> (Mul32 <typ.UInt32> (Const32 <typ.UInt32> [int32(sdivisible32(c).m)]) x) (Const32 <typ.UInt32> [int32(sdivisible32(c).a)]) ) (Const32 <typ.UInt32> [int32(32-sdivisible32(c).k)]) ) (Const32 <typ.UInt32> [int32(sdivisible32(c).max)]) )
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpMul32 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ if v_1_1.Op != OpSub32 {
+ continue
+ }
+ _ = v_1_1.Args[1]
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpRsh64x64 {
+ continue
+ }
+ _ = v_1_1_0.Args[1]
+ mul := v_1_1_0.Args[0]
+ if mul.Op != OpMul64 {
+ continue
+ }
+ _ = mul.Args[1]
+ mul_0 := mul.Args[0]
+ mul_1 := mul.Args[1]
+ for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 {
+ if mul_0.Op != OpConst64 {
+ continue
+ }
+ m := auxIntToInt64(mul_0.AuxInt)
+ if mul_1.Op != OpSignExt32to64 || x != mul_1.Args[0] {
+ continue
+ }
+ v_1_1_0_1 := v_1_1_0.Args[1]
+ if v_1_1_0_1.Op != OpConst64 {
+ continue
+ }
+ s := auxIntToInt64(v_1_1_0_1.AuxInt)
+ v_1_1_1 := v_1_1.Args[1]
+ if v_1_1_1.Op != OpRsh64x64 {
+ continue
+ }
+ _ = v_1_1_1.Args[1]
+ v_1_1_1_0 := v_1_1_1.Args[0]
+ if v_1_1_1_0.Op != OpSignExt32to64 || x != v_1_1_1_0.Args[0] {
+ continue
+ }
+ v_1_1_1_1 := v_1_1_1.Args[1]
+ if v_1_1_1_1.Op != OpConst64 || auxIntToInt64(v_1_1_1_1.AuxInt) != 63 || !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic32(c).m) && s == 32+smagic32(c).s && x.Op != OpConst32 && sdivisibleOK32(c)) {
+ continue
+ }
+ v.reset(OpLeq32U)
+ v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpAdd32, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpMul32, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v3.AuxInt = int32ToAuxInt(int32(sdivisible32(c).m))
+ v2.AddArg2(v3, x)
+ v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v4.AuxInt = int32ToAuxInt(int32(sdivisible32(c).a))
+ v1.AddArg2(v2, v4)
+ v5 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v5.AuxInt = int32ToAuxInt(int32(32 - sdivisible32(c).k))
+ v0.AddArg2(v1, v5)
+ v6 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v6.AuxInt = int32ToAuxInt(int32(sdivisible32(c).max))
+ v.AddArg2(v0, v6)
+ return true
+ }
+ }
+ }
+ break
+ }
+ // match: (Eq32 x (Mul32 (Const32 [c]) (Sub32 (Rsh32x64 mul:(Hmul32 (Const32 [m]) x) (Const64 [s])) (Rsh32x64 x (Const64 [31]))) ) )
+ // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(smagic32(c).m/2) && s == smagic32(c).s-1 && x.Op != OpConst32 && sdivisibleOK32(c)
+ // result: (Leq32U (RotateLeft32 <typ.UInt32> (Add32 <typ.UInt32> (Mul32 <typ.UInt32> (Const32 <typ.UInt32> [int32(sdivisible32(c).m)]) x) (Const32 <typ.UInt32> [int32(sdivisible32(c).a)]) ) (Const32 <typ.UInt32> [int32(32-sdivisible32(c).k)]) ) (Const32 <typ.UInt32> [int32(sdivisible32(c).max)]) )
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpMul32 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ if v_1_1.Op != OpSub32 {
+ continue
+ }
+ _ = v_1_1.Args[1]
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpRsh32x64 {
+ continue
+ }
+ _ = v_1_1_0.Args[1]
+ mul := v_1_1_0.Args[0]
+ if mul.Op != OpHmul32 {
+ continue
+ }
+ _ = mul.Args[1]
+ mul_0 := mul.Args[0]
+ mul_1 := mul.Args[1]
+ for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 {
+ if mul_0.Op != OpConst32 {
+ continue
+ }
+ m := auxIntToInt32(mul_0.AuxInt)
+ if x != mul_1 {
+ continue
+ }
+ v_1_1_0_1 := v_1_1_0.Args[1]
+ if v_1_1_0_1.Op != OpConst64 {
+ continue
+ }
+ s := auxIntToInt64(v_1_1_0_1.AuxInt)
+ v_1_1_1 := v_1_1.Args[1]
+ if v_1_1_1.Op != OpRsh32x64 {
+ continue
+ }
+ _ = v_1_1_1.Args[1]
+ if x != v_1_1_1.Args[0] {
+ continue
+ }
+ v_1_1_1_1 := v_1_1_1.Args[1]
+ if v_1_1_1_1.Op != OpConst64 || auxIntToInt64(v_1_1_1_1.AuxInt) != 31 || !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(smagic32(c).m/2) && s == smagic32(c).s-1 && x.Op != OpConst32 && sdivisibleOK32(c)) {
+ continue
+ }
+ v.reset(OpLeq32U)
+ v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpAdd32, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpMul32, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v3.AuxInt = int32ToAuxInt(int32(sdivisible32(c).m))
+ v2.AddArg2(v3, x)
+ v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v4.AuxInt = int32ToAuxInt(int32(sdivisible32(c).a))
+ v1.AddArg2(v2, v4)
+ v5 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v5.AuxInt = int32ToAuxInt(int32(32 - sdivisible32(c).k))
+ v0.AddArg2(v1, v5)
+ v6 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v6.AuxInt = int32ToAuxInt(int32(sdivisible32(c).max))
+ v.AddArg2(v0, v6)
+ return true
+ }
+ }
+ }
+ break
+ }
+ // match: (Eq32 x (Mul32 (Const32 [c]) (Sub32 (Rsh32x64 (Add32 mul:(Hmul32 (Const32 [m]) x) x) (Const64 [s])) (Rsh32x64 x (Const64 [31]))) ) )
+ // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(smagic32(c).m) && s == smagic32(c).s && x.Op != OpConst32 && sdivisibleOK32(c)
+ // result: (Leq32U (RotateLeft32 <typ.UInt32> (Add32 <typ.UInt32> (Mul32 <typ.UInt32> (Const32 <typ.UInt32> [int32(sdivisible32(c).m)]) x) (Const32 <typ.UInt32> [int32(sdivisible32(c).a)]) ) (Const32 <typ.UInt32> [int32(32-sdivisible32(c).k)]) ) (Const32 <typ.UInt32> [int32(sdivisible32(c).max)]) )
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpMul32 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_1_0.AuxInt)
+ if v_1_1.Op != OpSub32 {
+ continue
+ }
+ _ = v_1_1.Args[1]
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpRsh32x64 {
+ continue
+ }
+ _ = v_1_1_0.Args[1]
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpAdd32 {
+ continue
+ }
+ _ = v_1_1_0_0.Args[1]
+ v_1_1_0_0_0 := v_1_1_0_0.Args[0]
+ v_1_1_0_0_1 := v_1_1_0_0.Args[1]
+ for _i2 := 0; _i2 <= 1; _i2, v_1_1_0_0_0, v_1_1_0_0_1 = _i2+1, v_1_1_0_0_1, v_1_1_0_0_0 {
+ mul := v_1_1_0_0_0
+ if mul.Op != OpHmul32 {
+ continue
+ }
+ _ = mul.Args[1]
+ mul_0 := mul.Args[0]
+ mul_1 := mul.Args[1]
+ for _i3 := 0; _i3 <= 1; _i3, mul_0, mul_1 = _i3+1, mul_1, mul_0 {
+ if mul_0.Op != OpConst32 {
+ continue
+ }
+ m := auxIntToInt32(mul_0.AuxInt)
+ if x != mul_1 || x != v_1_1_0_0_1 {
+ continue
+ }
+ v_1_1_0_1 := v_1_1_0.Args[1]
+ if v_1_1_0_1.Op != OpConst64 {
+ continue
+ }
+ s := auxIntToInt64(v_1_1_0_1.AuxInt)
+ v_1_1_1 := v_1_1.Args[1]
+ if v_1_1_1.Op != OpRsh32x64 {
+ continue
+ }
+ _ = v_1_1_1.Args[1]
+ if x != v_1_1_1.Args[0] {
+ continue
+ }
+ v_1_1_1_1 := v_1_1_1.Args[1]
+ if v_1_1_1_1.Op != OpConst64 || auxIntToInt64(v_1_1_1_1.AuxInt) != 31 || !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(smagic32(c).m) && s == smagic32(c).s && x.Op != OpConst32 && sdivisibleOK32(c)) {
+ continue
+ }
+ v.reset(OpLeq32U)
+ v0 := b.NewValue0(v.Pos, OpRotateLeft32, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpAdd32, typ.UInt32)
+ v2 := b.NewValue0(v.Pos, OpMul32, typ.UInt32)
+ v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v3.AuxInt = int32ToAuxInt(int32(sdivisible32(c).m))
+ v2.AddArg2(v3, x)
+ v4 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v4.AuxInt = int32ToAuxInt(int32(sdivisible32(c).a))
+ v1.AddArg2(v2, v4)
+ v5 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v5.AuxInt = int32ToAuxInt(int32(32 - sdivisible32(c).k))
+ v0.AddArg2(v1, v5)
+ v6 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v6.AuxInt = int32ToAuxInt(int32(sdivisible32(c).max))
+ v.AddArg2(v0, v6)
+ return true
+ }
+ }
+ }
+ }
+ break
+ }
+ // match: (Eq32 n (Lsh32x64 (Rsh32x64 (Add32 <t> n (Rsh32Ux64 <t> (Rsh32x64 <t> n (Const64 <typ.UInt64> [31])) (Const64 <typ.UInt64> [kbar]))) (Const64 <typ.UInt64> [k])) (Const64 <typ.UInt64> [k])) )
+ // cond: k > 0 && k < 31 && kbar == 32 - k
+ // result: (Eq32 (And32 <t> n (Const32 <t> [1<<uint(k)-1])) (Const32 <t> [0]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ n := v_0
+ if v_1.Op != OpLsh32x64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpRsh32x64 {
+ continue
+ }
+ _ = v_1_0.Args[1]
+ v_1_0_0 := v_1_0.Args[0]
+ if v_1_0_0.Op != OpAdd32 {
+ continue
+ }
+ t := v_1_0_0.Type
+ _ = v_1_0_0.Args[1]
+ v_1_0_0_0 := v_1_0_0.Args[0]
+ v_1_0_0_1 := v_1_0_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0_0_0, v_1_0_0_1 = _i1+1, v_1_0_0_1, v_1_0_0_0 {
+ if n != v_1_0_0_0 || v_1_0_0_1.Op != OpRsh32Ux64 || v_1_0_0_1.Type != t {
+ continue
+ }
+ _ = v_1_0_0_1.Args[1]
+ v_1_0_0_1_0 := v_1_0_0_1.Args[0]
+ if v_1_0_0_1_0.Op != OpRsh32x64 || v_1_0_0_1_0.Type != t {
+ continue
+ }
+ _ = v_1_0_0_1_0.Args[1]
+ if n != v_1_0_0_1_0.Args[0] {
+ continue
+ }
+ v_1_0_0_1_0_1 := v_1_0_0_1_0.Args[1]
+ if v_1_0_0_1_0_1.Op != OpConst64 || v_1_0_0_1_0_1.Type != typ.UInt64 || auxIntToInt64(v_1_0_0_1_0_1.AuxInt) != 31 {
+ continue
+ }
+ v_1_0_0_1_1 := v_1_0_0_1.Args[1]
+ if v_1_0_0_1_1.Op != OpConst64 || v_1_0_0_1_1.Type != typ.UInt64 {
+ continue
+ }
+ kbar := auxIntToInt64(v_1_0_0_1_1.AuxInt)
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpConst64 || v_1_0_1.Type != typ.UInt64 {
+ continue
+ }
+ k := auxIntToInt64(v_1_0_1.AuxInt)
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 || v_1_1.Type != typ.UInt64 || auxIntToInt64(v_1_1.AuxInt) != k || !(k > 0 && k < 31 && kbar == 32-k) {
+ continue
+ }
+ v.reset(OpEq32)
+ v0 := b.NewValue0(v.Pos, OpAnd32, t)
+ v1 := b.NewValue0(v.Pos, OpConst32, t)
+ v1.AuxInt = int32ToAuxInt(1<<uint(k) - 1)
+ v0.AddArg2(n, v1)
+ v2 := b.NewValue0(v.Pos, OpConst32, t)
+ v2.AuxInt = int32ToAuxInt(0)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Eq32 s:(Sub32 x y) (Const32 [0]))
+ // cond: s.Uses == 1
+ // result: (Eq32 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ s := v_0
+ if s.Op != OpSub32 {
+ continue
+ }
+ y := s.Args[1]
+ x := s.Args[0]
+ if v_1.Op != OpConst32 || auxIntToInt32(v_1.AuxInt) != 0 || !(s.Uses == 1) {
+ continue
+ }
+ v.reset(OpEq32)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (Eq32 (And32 <t> x (Const32 <t> [y])) (Const32 <t> [y]))
+ // cond: oneBit32(y)
+ // result: (Neq32 (And32 <t> x (Const32 <t> [y])) (Const32 <t> [0]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAnd32 {
+ continue
+ }
+ t := v_0.Type
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpConst32 || v_0_1.Type != t {
+ continue
+ }
+ y := auxIntToInt32(v_0_1.AuxInt)
+ if v_1.Op != OpConst32 || v_1.Type != t || auxIntToInt32(v_1.AuxInt) != y || !(oneBit32(y)) {
+ continue
+ }
+ v.reset(OpNeq32)
+ v0 := b.NewValue0(v.Pos, OpAnd32, t)
+ v1 := b.NewValue0(v.Pos, OpConst32, t)
+ v1.AuxInt = int32ToAuxInt(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst32, t)
+ v2.AuxInt = int32ToAuxInt(0)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpEq32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Eq32F (Const32F [c]) (Const32F [d]))
+ // result: (ConstBool [c == d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32F {
+ continue
+ }
+ c := auxIntToFloat32(v_0.AuxInt)
+ if v_1.Op != OpConst32F {
+ continue
+ }
+ d := auxIntToFloat32(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(c == d)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpEq64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Eq64 x x)
+ // result: (ConstBool [true])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ // match: (Eq64 (Const64 <t> [c]) (Add64 (Const64 <t> [d]) x))
+ // result: (Eq64 (Const64 <t> [c-d]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64 {
+ continue
+ }
+ t := v_0.Type
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpAdd64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst64 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt64(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpEq64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(c - d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Eq64 (Const64 [c]) (Const64 [d]))
+ // result: (ConstBool [c == d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(c == d)
+ return true
+ }
+ break
+ }
+ // match: (Eq64 x (Mul64 (Const64 [c]) (Rsh64Ux64 mul:(Hmul64u (Const64 [m]) x) (Const64 [s])) ) )
+ // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<63+umagic64(c).m/2) && s == umagic64(c).s-1 && x.Op != OpConst64 && udivisibleOK64(c)
+ // result: (Leq64U (RotateLeft64 <typ.UInt64> (Mul64 <typ.UInt64> (Const64 <typ.UInt64> [int64(udivisible64(c).m)]) x) (Const64 <typ.UInt64> [64-udivisible64(c).k]) ) (Const64 <typ.UInt64> [int64(udivisible64(c).max)]) )
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpMul64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_1_0.AuxInt)
+ if v_1_1.Op != OpRsh64Ux64 {
+ continue
+ }
+ _ = v_1_1.Args[1]
+ mul := v_1_1.Args[0]
+ if mul.Op != OpHmul64u {
+ continue
+ }
+ _ = mul.Args[1]
+ mul_0 := mul.Args[0]
+ mul_1 := mul.Args[1]
+ for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 {
+ if mul_0.Op != OpConst64 {
+ continue
+ }
+ m := auxIntToInt64(mul_0.AuxInt)
+ if x != mul_1 {
+ continue
+ }
+ v_1_1_1 := v_1_1.Args[1]
+ if v_1_1_1.Op != OpConst64 {
+ continue
+ }
+ s := auxIntToInt64(v_1_1_1.AuxInt)
+ if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<63+umagic64(c).m/2) && s == umagic64(c).s-1 && x.Op != OpConst64 && udivisibleOK64(c)) {
+ continue
+ }
+ v.reset(OpLeq64U)
+ v0 := b.NewValue0(v.Pos, OpRotateLeft64, typ.UInt64)
+ v1 := b.NewValue0(v.Pos, OpMul64, typ.UInt64)
+ v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(int64(udivisible64(c).m))
+ v1.AddArg2(v2, x)
+ v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(64 - udivisible64(c).k)
+ v0.AddArg2(v1, v3)
+ v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v4.AuxInt = int64ToAuxInt(int64(udivisible64(c).max))
+ v.AddArg2(v0, v4)
+ return true
+ }
+ }
+ }
+ break
+ }
+ // match: (Eq64 x (Mul64 (Const64 [c]) (Rsh64Ux64 mul:(Hmul64u (Const64 [m]) (Rsh64Ux64 x (Const64 [1]))) (Const64 [s])) ) )
+ // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<63+(umagic64(c).m+1)/2) && s == umagic64(c).s-2 && x.Op != OpConst64 && udivisibleOK64(c)
+ // result: (Leq64U (RotateLeft64 <typ.UInt64> (Mul64 <typ.UInt64> (Const64 <typ.UInt64> [int64(udivisible64(c).m)]) x) (Const64 <typ.UInt64> [64-udivisible64(c).k]) ) (Const64 <typ.UInt64> [int64(udivisible64(c).max)]) )
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpMul64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_1_0.AuxInt)
+ if v_1_1.Op != OpRsh64Ux64 {
+ continue
+ }
+ _ = v_1_1.Args[1]
+ mul := v_1_1.Args[0]
+ if mul.Op != OpHmul64u {
+ continue
+ }
+ _ = mul.Args[1]
+ mul_0 := mul.Args[0]
+ mul_1 := mul.Args[1]
+ for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 {
+ if mul_0.Op != OpConst64 {
+ continue
+ }
+ m := auxIntToInt64(mul_0.AuxInt)
+ if mul_1.Op != OpRsh64Ux64 {
+ continue
+ }
+ _ = mul_1.Args[1]
+ if x != mul_1.Args[0] {
+ continue
+ }
+ mul_1_1 := mul_1.Args[1]
+ if mul_1_1.Op != OpConst64 || auxIntToInt64(mul_1_1.AuxInt) != 1 {
+ continue
+ }
+ v_1_1_1 := v_1_1.Args[1]
+ if v_1_1_1.Op != OpConst64 {
+ continue
+ }
+ s := auxIntToInt64(v_1_1_1.AuxInt)
+ if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(1<<63+(umagic64(c).m+1)/2) && s == umagic64(c).s-2 && x.Op != OpConst64 && udivisibleOK64(c)) {
+ continue
+ }
+ v.reset(OpLeq64U)
+ v0 := b.NewValue0(v.Pos, OpRotateLeft64, typ.UInt64)
+ v1 := b.NewValue0(v.Pos, OpMul64, typ.UInt64)
+ v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(int64(udivisible64(c).m))
+ v1.AddArg2(v2, x)
+ v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(64 - udivisible64(c).k)
+ v0.AddArg2(v1, v3)
+ v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v4.AuxInt = int64ToAuxInt(int64(udivisible64(c).max))
+ v.AddArg2(v0, v4)
+ return true
+ }
+ }
+ }
+ break
+ }
+ // match: (Eq64 x (Mul64 (Const64 [c]) (Rsh64Ux64 (Avg64u x mul:(Hmul64u (Const64 [m]) x)) (Const64 [s])) ) )
+ // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(umagic64(c).m) && s == umagic64(c).s-1 && x.Op != OpConst64 && udivisibleOK64(c)
+ // result: (Leq64U (RotateLeft64 <typ.UInt64> (Mul64 <typ.UInt64> (Const64 <typ.UInt64> [int64(udivisible64(c).m)]) x) (Const64 <typ.UInt64> [64-udivisible64(c).k]) ) (Const64 <typ.UInt64> [int64(udivisible64(c).max)]) )
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpMul64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_1_0.AuxInt)
+ if v_1_1.Op != OpRsh64Ux64 {
+ continue
+ }
+ _ = v_1_1.Args[1]
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpAvg64u {
+ continue
+ }
+ _ = v_1_1_0.Args[1]
+ if x != v_1_1_0.Args[0] {
+ continue
+ }
+ mul := v_1_1_0.Args[1]
+ if mul.Op != OpHmul64u {
+ continue
+ }
+ _ = mul.Args[1]
+ mul_0 := mul.Args[0]
+ mul_1 := mul.Args[1]
+ for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 {
+ if mul_0.Op != OpConst64 {
+ continue
+ }
+ m := auxIntToInt64(mul_0.AuxInt)
+ if x != mul_1 {
+ continue
+ }
+ v_1_1_1 := v_1_1.Args[1]
+ if v_1_1_1.Op != OpConst64 {
+ continue
+ }
+ s := auxIntToInt64(v_1_1_1.AuxInt)
+ if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(umagic64(c).m) && s == umagic64(c).s-1 && x.Op != OpConst64 && udivisibleOK64(c)) {
+ continue
+ }
+ v.reset(OpLeq64U)
+ v0 := b.NewValue0(v.Pos, OpRotateLeft64, typ.UInt64)
+ v1 := b.NewValue0(v.Pos, OpMul64, typ.UInt64)
+ v2 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v2.AuxInt = int64ToAuxInt(int64(udivisible64(c).m))
+ v1.AddArg2(v2, x)
+ v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(64 - udivisible64(c).k)
+ v0.AddArg2(v1, v3)
+ v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v4.AuxInt = int64ToAuxInt(int64(udivisible64(c).max))
+ v.AddArg2(v0, v4)
+ return true
+ }
+ }
+ }
+ break
+ }
+ // match: (Eq64 x (Mul64 (Const64 [c]) (Sub64 (Rsh64x64 mul:(Hmul64 (Const64 [m]) x) (Const64 [s])) (Rsh64x64 x (Const64 [63]))) ) )
+ // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic64(c).m/2) && s == smagic64(c).s-1 && x.Op != OpConst64 && sdivisibleOK64(c)
+ // result: (Leq64U (RotateLeft64 <typ.UInt64> (Add64 <typ.UInt64> (Mul64 <typ.UInt64> (Const64 <typ.UInt64> [int64(sdivisible64(c).m)]) x) (Const64 <typ.UInt64> [int64(sdivisible64(c).a)]) ) (Const64 <typ.UInt64> [64-sdivisible64(c).k]) ) (Const64 <typ.UInt64> [int64(sdivisible64(c).max)]) )
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpMul64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_1_0.AuxInt)
+ if v_1_1.Op != OpSub64 {
+ continue
+ }
+ _ = v_1_1.Args[1]
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpRsh64x64 {
+ continue
+ }
+ _ = v_1_1_0.Args[1]
+ mul := v_1_1_0.Args[0]
+ if mul.Op != OpHmul64 {
+ continue
+ }
+ _ = mul.Args[1]
+ mul_0 := mul.Args[0]
+ mul_1 := mul.Args[1]
+ for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 {
+ if mul_0.Op != OpConst64 {
+ continue
+ }
+ m := auxIntToInt64(mul_0.AuxInt)
+ if x != mul_1 {
+ continue
+ }
+ v_1_1_0_1 := v_1_1_0.Args[1]
+ if v_1_1_0_1.Op != OpConst64 {
+ continue
+ }
+ s := auxIntToInt64(v_1_1_0_1.AuxInt)
+ v_1_1_1 := v_1_1.Args[1]
+ if v_1_1_1.Op != OpRsh64x64 {
+ continue
+ }
+ _ = v_1_1_1.Args[1]
+ if x != v_1_1_1.Args[0] {
+ continue
+ }
+ v_1_1_1_1 := v_1_1_1.Args[1]
+ if v_1_1_1_1.Op != OpConst64 || auxIntToInt64(v_1_1_1_1.AuxInt) != 63 || !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic64(c).m/2) && s == smagic64(c).s-1 && x.Op != OpConst64 && sdivisibleOK64(c)) {
+ continue
+ }
+ v.reset(OpLeq64U)
+ v0 := b.NewValue0(v.Pos, OpRotateLeft64, typ.UInt64)
+ v1 := b.NewValue0(v.Pos, OpAdd64, typ.UInt64)
+ v2 := b.NewValue0(v.Pos, OpMul64, typ.UInt64)
+ v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(int64(sdivisible64(c).m))
+ v2.AddArg2(v3, x)
+ v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v4.AuxInt = int64ToAuxInt(int64(sdivisible64(c).a))
+ v1.AddArg2(v2, v4)
+ v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v5.AuxInt = int64ToAuxInt(64 - sdivisible64(c).k)
+ v0.AddArg2(v1, v5)
+ v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v6.AuxInt = int64ToAuxInt(int64(sdivisible64(c).max))
+ v.AddArg2(v0, v6)
+ return true
+ }
+ }
+ }
+ break
+ }
+ // match: (Eq64 x (Mul64 (Const64 [c]) (Sub64 (Rsh64x64 (Add64 mul:(Hmul64 (Const64 [m]) x) x) (Const64 [s])) (Rsh64x64 x (Const64 [63]))) ) )
+ // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic64(c).m) && s == smagic64(c).s && x.Op != OpConst64 && sdivisibleOK64(c)
+ // result: (Leq64U (RotateLeft64 <typ.UInt64> (Add64 <typ.UInt64> (Mul64 <typ.UInt64> (Const64 <typ.UInt64> [int64(sdivisible64(c).m)]) x) (Const64 <typ.UInt64> [int64(sdivisible64(c).a)]) ) (Const64 <typ.UInt64> [64-sdivisible64(c).k]) ) (Const64 <typ.UInt64> [int64(sdivisible64(c).max)]) )
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpMul64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_1_0.AuxInt)
+ if v_1_1.Op != OpSub64 {
+ continue
+ }
+ _ = v_1_1.Args[1]
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpRsh64x64 {
+ continue
+ }
+ _ = v_1_1_0.Args[1]
+ v_1_1_0_0 := v_1_1_0.Args[0]
+ if v_1_1_0_0.Op != OpAdd64 {
+ continue
+ }
+ _ = v_1_1_0_0.Args[1]
+ v_1_1_0_0_0 := v_1_1_0_0.Args[0]
+ v_1_1_0_0_1 := v_1_1_0_0.Args[1]
+ for _i2 := 0; _i2 <= 1; _i2, v_1_1_0_0_0, v_1_1_0_0_1 = _i2+1, v_1_1_0_0_1, v_1_1_0_0_0 {
+ mul := v_1_1_0_0_0
+ if mul.Op != OpHmul64 {
+ continue
+ }
+ _ = mul.Args[1]
+ mul_0 := mul.Args[0]
+ mul_1 := mul.Args[1]
+ for _i3 := 0; _i3 <= 1; _i3, mul_0, mul_1 = _i3+1, mul_1, mul_0 {
+ if mul_0.Op != OpConst64 {
+ continue
+ }
+ m := auxIntToInt64(mul_0.AuxInt)
+ if x != mul_1 || x != v_1_1_0_0_1 {
+ continue
+ }
+ v_1_1_0_1 := v_1_1_0.Args[1]
+ if v_1_1_0_1.Op != OpConst64 {
+ continue
+ }
+ s := auxIntToInt64(v_1_1_0_1.AuxInt)
+ v_1_1_1 := v_1_1.Args[1]
+ if v_1_1_1.Op != OpRsh64x64 {
+ continue
+ }
+ _ = v_1_1_1.Args[1]
+ if x != v_1_1_1.Args[0] {
+ continue
+ }
+ v_1_1_1_1 := v_1_1_1.Args[1]
+ if v_1_1_1_1.Op != OpConst64 || auxIntToInt64(v_1_1_1_1.AuxInt) != 63 || !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int64(smagic64(c).m) && s == smagic64(c).s && x.Op != OpConst64 && sdivisibleOK64(c)) {
+ continue
+ }
+ v.reset(OpLeq64U)
+ v0 := b.NewValue0(v.Pos, OpRotateLeft64, typ.UInt64)
+ v1 := b.NewValue0(v.Pos, OpAdd64, typ.UInt64)
+ v2 := b.NewValue0(v.Pos, OpMul64, typ.UInt64)
+ v3 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v3.AuxInt = int64ToAuxInt(int64(sdivisible64(c).m))
+ v2.AddArg2(v3, x)
+ v4 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v4.AuxInt = int64ToAuxInt(int64(sdivisible64(c).a))
+ v1.AddArg2(v2, v4)
+ v5 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v5.AuxInt = int64ToAuxInt(64 - sdivisible64(c).k)
+ v0.AddArg2(v1, v5)
+ v6 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v6.AuxInt = int64ToAuxInt(int64(sdivisible64(c).max))
+ v.AddArg2(v0, v6)
+ return true
+ }
+ }
+ }
+ }
+ break
+ }
+ // match: (Eq64 n (Lsh64x64 (Rsh64x64 (Add64 <t> n (Rsh64Ux64 <t> (Rsh64x64 <t> n (Const64 <typ.UInt64> [63])) (Const64 <typ.UInt64> [kbar]))) (Const64 <typ.UInt64> [k])) (Const64 <typ.UInt64> [k])) )
+ // cond: k > 0 && k < 63 && kbar == 64 - k
+ // result: (Eq64 (And64 <t> n (Const64 <t> [1<<uint(k)-1])) (Const64 <t> [0]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ n := v_0
+ if v_1.Op != OpLsh64x64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpRsh64x64 {
+ continue
+ }
+ _ = v_1_0.Args[1]
+ v_1_0_0 := v_1_0.Args[0]
+ if v_1_0_0.Op != OpAdd64 {
+ continue
+ }
+ t := v_1_0_0.Type
+ _ = v_1_0_0.Args[1]
+ v_1_0_0_0 := v_1_0_0.Args[0]
+ v_1_0_0_1 := v_1_0_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0_0_0, v_1_0_0_1 = _i1+1, v_1_0_0_1, v_1_0_0_0 {
+ if n != v_1_0_0_0 || v_1_0_0_1.Op != OpRsh64Ux64 || v_1_0_0_1.Type != t {
+ continue
+ }
+ _ = v_1_0_0_1.Args[1]
+ v_1_0_0_1_0 := v_1_0_0_1.Args[0]
+ if v_1_0_0_1_0.Op != OpRsh64x64 || v_1_0_0_1_0.Type != t {
+ continue
+ }
+ _ = v_1_0_0_1_0.Args[1]
+ if n != v_1_0_0_1_0.Args[0] {
+ continue
+ }
+ v_1_0_0_1_0_1 := v_1_0_0_1_0.Args[1]
+ if v_1_0_0_1_0_1.Op != OpConst64 || v_1_0_0_1_0_1.Type != typ.UInt64 || auxIntToInt64(v_1_0_0_1_0_1.AuxInt) != 63 {
+ continue
+ }
+ v_1_0_0_1_1 := v_1_0_0_1.Args[1]
+ if v_1_0_0_1_1.Op != OpConst64 || v_1_0_0_1_1.Type != typ.UInt64 {
+ continue
+ }
+ kbar := auxIntToInt64(v_1_0_0_1_1.AuxInt)
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpConst64 || v_1_0_1.Type != typ.UInt64 {
+ continue
+ }
+ k := auxIntToInt64(v_1_0_1.AuxInt)
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 || v_1_1.Type != typ.UInt64 || auxIntToInt64(v_1_1.AuxInt) != k || !(k > 0 && k < 63 && kbar == 64-k) {
+ continue
+ }
+ v.reset(OpEq64)
+ v0 := b.NewValue0(v.Pos, OpAnd64, t)
+ v1 := b.NewValue0(v.Pos, OpConst64, t)
+ v1.AuxInt = int64ToAuxInt(1<<uint(k) - 1)
+ v0.AddArg2(n, v1)
+ v2 := b.NewValue0(v.Pos, OpConst64, t)
+ v2.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Eq64 s:(Sub64 x y) (Const64 [0]))
+ // cond: s.Uses == 1
+ // result: (Eq64 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ s := v_0
+ if s.Op != OpSub64 {
+ continue
+ }
+ y := s.Args[1]
+ x := s.Args[0]
+ if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 0 || !(s.Uses == 1) {
+ continue
+ }
+ v.reset(OpEq64)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (Eq64 (And64 <t> x (Const64 <t> [y])) (Const64 <t> [y]))
+ // cond: oneBit64(y)
+ // result: (Neq64 (And64 <t> x (Const64 <t> [y])) (Const64 <t> [0]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAnd64 {
+ continue
+ }
+ t := v_0.Type
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpConst64 || v_0_1.Type != t {
+ continue
+ }
+ y := auxIntToInt64(v_0_1.AuxInt)
+ if v_1.Op != OpConst64 || v_1.Type != t || auxIntToInt64(v_1.AuxInt) != y || !(oneBit64(y)) {
+ continue
+ }
+ v.reset(OpNeq64)
+ v0 := b.NewValue0(v.Pos, OpAnd64, t)
+ v1 := b.NewValue0(v.Pos, OpConst64, t)
+ v1.AuxInt = int64ToAuxInt(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst64, t)
+ v2.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpEq64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Eq64F (Const64F [c]) (Const64F [d]))
+ // result: (ConstBool [c == d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64F {
+ continue
+ }
+ c := auxIntToFloat64(v_0.AuxInt)
+ if v_1.Op != OpConst64F {
+ continue
+ }
+ d := auxIntToFloat64(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(c == d)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpEq8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (Eq8 x x)
+ // result: (ConstBool [true])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ // match: (Eq8 (Const8 <t> [c]) (Add8 (Const8 <t> [d]) x))
+ // result: (Eq8 (Const8 <t> [c-d]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst8 {
+ continue
+ }
+ t := v_0.Type
+ c := auxIntToInt8(v_0.AuxInt)
+ if v_1.Op != OpAdd8 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst8 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt8(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpEq8)
+ v0 := b.NewValue0(v.Pos, OpConst8, t)
+ v0.AuxInt = int8ToAuxInt(c - d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Eq8 (Const8 [c]) (Const8 [d]))
+ // result: (ConstBool [c == d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ if v_1.Op != OpConst8 {
+ continue
+ }
+ d := auxIntToInt8(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(c == d)
+ return true
+ }
+ break
+ }
+ // match: (Eq8 (Mod8u x (Const8 [c])) (Const8 [0]))
+ // cond: x.Op != OpConst8 && udivisibleOK8(c) && !hasSmallRotate(config)
+ // result: (Eq32 (Mod32u <typ.UInt32> (ZeroExt8to32 <typ.UInt32> x) (Const32 <typ.UInt32> [int32(uint8(c))])) (Const32 <typ.UInt32> [0]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpMod8u {
+ continue
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_0_1.AuxInt)
+ if v_1.Op != OpConst8 || auxIntToInt8(v_1.AuxInt) != 0 || !(x.Op != OpConst8 && udivisibleOK8(c) && !hasSmallRotate(config)) {
+ continue
+ }
+ v.reset(OpEq32)
+ v0 := b.NewValue0(v.Pos, OpMod32u, typ.UInt32)
+ v1 := b.NewValue0(v.Pos, OpZeroExt8to32, typ.UInt32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v2.AuxInt = int32ToAuxInt(int32(uint8(c)))
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpConst32, typ.UInt32)
+ v3.AuxInt = int32ToAuxInt(0)
+ v.AddArg2(v0, v3)
+ return true
+ }
+ break
+ }
+ // match: (Eq8 (Mod8 x (Const8 [c])) (Const8 [0]))
+ // cond: x.Op != OpConst8 && sdivisibleOK8(c) && !hasSmallRotate(config)
+ // result: (Eq32 (Mod32 <typ.Int32> (SignExt8to32 <typ.Int32> x) (Const32 <typ.Int32> [int32(c)])) (Const32 <typ.Int32> [0]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpMod8 {
+ continue
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_0_1.AuxInt)
+ if v_1.Op != OpConst8 || auxIntToInt8(v_1.AuxInt) != 0 || !(x.Op != OpConst8 && sdivisibleOK8(c) && !hasSmallRotate(config)) {
+ continue
+ }
+ v.reset(OpEq32)
+ v0 := b.NewValue0(v.Pos, OpMod32, typ.Int32)
+ v1 := b.NewValue0(v.Pos, OpSignExt8to32, typ.Int32)
+ v1.AddArg(x)
+ v2 := b.NewValue0(v.Pos, OpConst32, typ.Int32)
+ v2.AuxInt = int32ToAuxInt(int32(c))
+ v0.AddArg2(v1, v2)
+ v3 := b.NewValue0(v.Pos, OpConst32, typ.Int32)
+ v3.AuxInt = int32ToAuxInt(0)
+ v.AddArg2(v0, v3)
+ return true
+ }
+ break
+ }
+ // match: (Eq8 x (Mul8 (Const8 [c]) (Trunc32to8 (Rsh32Ux64 mul:(Mul32 (Const32 [m]) (ZeroExt8to32 x)) (Const64 [s]))) ) )
+ // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(1<<8+umagic8(c).m) && s == 8+umagic8(c).s && x.Op != OpConst8 && udivisibleOK8(c)
+ // result: (Leq8U (RotateLeft8 <typ.UInt8> (Mul8 <typ.UInt8> (Const8 <typ.UInt8> [int8(udivisible8(c).m)]) x) (Const8 <typ.UInt8> [int8(8-udivisible8(c).k)]) ) (Const8 <typ.UInt8> [int8(udivisible8(c).max)]) )
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpMul8 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_1_0.AuxInt)
+ if v_1_1.Op != OpTrunc32to8 {
+ continue
+ }
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpRsh32Ux64 {
+ continue
+ }
+ _ = v_1_1_0.Args[1]
+ mul := v_1_1_0.Args[0]
+ if mul.Op != OpMul32 {
+ continue
+ }
+ _ = mul.Args[1]
+ mul_0 := mul.Args[0]
+ mul_1 := mul.Args[1]
+ for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 {
+ if mul_0.Op != OpConst32 {
+ continue
+ }
+ m := auxIntToInt32(mul_0.AuxInt)
+ if mul_1.Op != OpZeroExt8to32 || x != mul_1.Args[0] {
+ continue
+ }
+ v_1_1_0_1 := v_1_1_0.Args[1]
+ if v_1_1_0_1.Op != OpConst64 {
+ continue
+ }
+ s := auxIntToInt64(v_1_1_0_1.AuxInt)
+ if !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(1<<8+umagic8(c).m) && s == 8+umagic8(c).s && x.Op != OpConst8 && udivisibleOK8(c)) {
+ continue
+ }
+ v.reset(OpLeq8U)
+ v0 := b.NewValue0(v.Pos, OpRotateLeft8, typ.UInt8)
+ v1 := b.NewValue0(v.Pos, OpMul8, typ.UInt8)
+ v2 := b.NewValue0(v.Pos, OpConst8, typ.UInt8)
+ v2.AuxInt = int8ToAuxInt(int8(udivisible8(c).m))
+ v1.AddArg2(v2, x)
+ v3 := b.NewValue0(v.Pos, OpConst8, typ.UInt8)
+ v3.AuxInt = int8ToAuxInt(int8(8 - udivisible8(c).k))
+ v0.AddArg2(v1, v3)
+ v4 := b.NewValue0(v.Pos, OpConst8, typ.UInt8)
+ v4.AuxInt = int8ToAuxInt(int8(udivisible8(c).max))
+ v.AddArg2(v0, v4)
+ return true
+ }
+ }
+ }
+ break
+ }
+ // match: (Eq8 x (Mul8 (Const8 [c]) (Sub8 (Rsh32x64 mul:(Mul32 (Const32 [m]) (SignExt8to32 x)) (Const64 [s])) (Rsh32x64 (SignExt8to32 x) (Const64 [31]))) ) )
+ // cond: v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(smagic8(c).m) && s == 8+smagic8(c).s && x.Op != OpConst8 && sdivisibleOK8(c)
+ // result: (Leq8U (RotateLeft8 <typ.UInt8> (Add8 <typ.UInt8> (Mul8 <typ.UInt8> (Const8 <typ.UInt8> [int8(sdivisible8(c).m)]) x) (Const8 <typ.UInt8> [int8(sdivisible8(c).a)]) ) (Const8 <typ.UInt8> [int8(8-sdivisible8(c).k)]) ) (Const8 <typ.UInt8> [int8(sdivisible8(c).max)]) )
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpMul8 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_1_0.AuxInt)
+ if v_1_1.Op != OpSub8 {
+ continue
+ }
+ _ = v_1_1.Args[1]
+ v_1_1_0 := v_1_1.Args[0]
+ if v_1_1_0.Op != OpRsh32x64 {
+ continue
+ }
+ _ = v_1_1_0.Args[1]
+ mul := v_1_1_0.Args[0]
+ if mul.Op != OpMul32 {
+ continue
+ }
+ _ = mul.Args[1]
+ mul_0 := mul.Args[0]
+ mul_1 := mul.Args[1]
+ for _i2 := 0; _i2 <= 1; _i2, mul_0, mul_1 = _i2+1, mul_1, mul_0 {
+ if mul_0.Op != OpConst32 {
+ continue
+ }
+ m := auxIntToInt32(mul_0.AuxInt)
+ if mul_1.Op != OpSignExt8to32 || x != mul_1.Args[0] {
+ continue
+ }
+ v_1_1_0_1 := v_1_1_0.Args[1]
+ if v_1_1_0_1.Op != OpConst64 {
+ continue
+ }
+ s := auxIntToInt64(v_1_1_0_1.AuxInt)
+ v_1_1_1 := v_1_1.Args[1]
+ if v_1_1_1.Op != OpRsh32x64 {
+ continue
+ }
+ _ = v_1_1_1.Args[1]
+ v_1_1_1_0 := v_1_1_1.Args[0]
+ if v_1_1_1_0.Op != OpSignExt8to32 || x != v_1_1_1_0.Args[0] {
+ continue
+ }
+ v_1_1_1_1 := v_1_1_1.Args[1]
+ if v_1_1_1_1.Op != OpConst64 || auxIntToInt64(v_1_1_1_1.AuxInt) != 31 || !(v.Block.Func.pass.name != "opt" && mul.Uses == 1 && m == int32(smagic8(c).m) && s == 8+smagic8(c).s && x.Op != OpConst8 && sdivisibleOK8(c)) {
+ continue
+ }
+ v.reset(OpLeq8U)
+ v0 := b.NewValue0(v.Pos, OpRotateLeft8, typ.UInt8)
+ v1 := b.NewValue0(v.Pos, OpAdd8, typ.UInt8)
+ v2 := b.NewValue0(v.Pos, OpMul8, typ.UInt8)
+ v3 := b.NewValue0(v.Pos, OpConst8, typ.UInt8)
+ v3.AuxInt = int8ToAuxInt(int8(sdivisible8(c).m))
+ v2.AddArg2(v3, x)
+ v4 := b.NewValue0(v.Pos, OpConst8, typ.UInt8)
+ v4.AuxInt = int8ToAuxInt(int8(sdivisible8(c).a))
+ v1.AddArg2(v2, v4)
+ v5 := b.NewValue0(v.Pos, OpConst8, typ.UInt8)
+ v5.AuxInt = int8ToAuxInt(int8(8 - sdivisible8(c).k))
+ v0.AddArg2(v1, v5)
+ v6 := b.NewValue0(v.Pos, OpConst8, typ.UInt8)
+ v6.AuxInt = int8ToAuxInt(int8(sdivisible8(c).max))
+ v.AddArg2(v0, v6)
+ return true
+ }
+ }
+ }
+ break
+ }
+ // match: (Eq8 n (Lsh8x64 (Rsh8x64 (Add8 <t> n (Rsh8Ux64 <t> (Rsh8x64 <t> n (Const64 <typ.UInt64> [ 7])) (Const64 <typ.UInt64> [kbar]))) (Const64 <typ.UInt64> [k])) (Const64 <typ.UInt64> [k])) )
+ // cond: k > 0 && k < 7 && kbar == 8 - k
+ // result: (Eq8 (And8 <t> n (Const8 <t> [1<<uint(k)-1])) (Const8 <t> [0]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ n := v_0
+ if v_1.Op != OpLsh8x64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpRsh8x64 {
+ continue
+ }
+ _ = v_1_0.Args[1]
+ v_1_0_0 := v_1_0.Args[0]
+ if v_1_0_0.Op != OpAdd8 {
+ continue
+ }
+ t := v_1_0_0.Type
+ _ = v_1_0_0.Args[1]
+ v_1_0_0_0 := v_1_0_0.Args[0]
+ v_1_0_0_1 := v_1_0_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0_0_0, v_1_0_0_1 = _i1+1, v_1_0_0_1, v_1_0_0_0 {
+ if n != v_1_0_0_0 || v_1_0_0_1.Op != OpRsh8Ux64 || v_1_0_0_1.Type != t {
+ continue
+ }
+ _ = v_1_0_0_1.Args[1]
+ v_1_0_0_1_0 := v_1_0_0_1.Args[0]
+ if v_1_0_0_1_0.Op != OpRsh8x64 || v_1_0_0_1_0.Type != t {
+ continue
+ }
+ _ = v_1_0_0_1_0.Args[1]
+ if n != v_1_0_0_1_0.Args[0] {
+ continue
+ }
+ v_1_0_0_1_0_1 := v_1_0_0_1_0.Args[1]
+ if v_1_0_0_1_0_1.Op != OpConst64 || v_1_0_0_1_0_1.Type != typ.UInt64 || auxIntToInt64(v_1_0_0_1_0_1.AuxInt) != 7 {
+ continue
+ }
+ v_1_0_0_1_1 := v_1_0_0_1.Args[1]
+ if v_1_0_0_1_1.Op != OpConst64 || v_1_0_0_1_1.Type != typ.UInt64 {
+ continue
+ }
+ kbar := auxIntToInt64(v_1_0_0_1_1.AuxInt)
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpConst64 || v_1_0_1.Type != typ.UInt64 {
+ continue
+ }
+ k := auxIntToInt64(v_1_0_1.AuxInt)
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 || v_1_1.Type != typ.UInt64 || auxIntToInt64(v_1_1.AuxInt) != k || !(k > 0 && k < 7 && kbar == 8-k) {
+ continue
+ }
+ v.reset(OpEq8)
+ v0 := b.NewValue0(v.Pos, OpAnd8, t)
+ v1 := b.NewValue0(v.Pos, OpConst8, t)
+ v1.AuxInt = int8ToAuxInt(1<<uint(k) - 1)
+ v0.AddArg2(n, v1)
+ v2 := b.NewValue0(v.Pos, OpConst8, t)
+ v2.AuxInt = int8ToAuxInt(0)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Eq8 s:(Sub8 x y) (Const8 [0]))
+ // cond: s.Uses == 1
+ // result: (Eq8 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ s := v_0
+ if s.Op != OpSub8 {
+ continue
+ }
+ y := s.Args[1]
+ x := s.Args[0]
+ if v_1.Op != OpConst8 || auxIntToInt8(v_1.AuxInt) != 0 || !(s.Uses == 1) {
+ continue
+ }
+ v.reset(OpEq8)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (Eq8 (And8 <t> x (Const8 <t> [y])) (Const8 <t> [y]))
+ // cond: oneBit8(y)
+ // result: (Neq8 (And8 <t> x (Const8 <t> [y])) (Const8 <t> [0]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAnd8 {
+ continue
+ }
+ t := v_0.Type
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpConst8 || v_0_1.Type != t {
+ continue
+ }
+ y := auxIntToInt8(v_0_1.AuxInt)
+ if v_1.Op != OpConst8 || v_1.Type != t || auxIntToInt8(v_1.AuxInt) != y || !(oneBit8(y)) {
+ continue
+ }
+ v.reset(OpNeq8)
+ v0 := b.NewValue0(v.Pos, OpAnd8, t)
+ v1 := b.NewValue0(v.Pos, OpConst8, t)
+ v1.AuxInt = int8ToAuxInt(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst8, t)
+ v2.AuxInt = int8ToAuxInt(0)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpEqB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (EqB (ConstBool [c]) (ConstBool [d]))
+ // result: (ConstBool [c == d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConstBool {
+ continue
+ }
+ c := auxIntToBool(v_0.AuxInt)
+ if v_1.Op != OpConstBool {
+ continue
+ }
+ d := auxIntToBool(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(c == d)
+ return true
+ }
+ break
+ }
+ // match: (EqB (ConstBool [false]) x)
+ // result: (Not x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConstBool || auxIntToBool(v_0.AuxInt) != false {
+ continue
+ }
+ x := v_1
+ v.reset(OpNot)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (EqB (ConstBool [true]) x)
+ // result: x
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConstBool || auxIntToBool(v_0.AuxInt) != true {
+ continue
+ }
+ x := v_1
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpEqInter(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (EqInter x y)
+ // result: (EqPtr (ITab x) (ITab y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpEqPtr)
+ v0 := b.NewValue0(v.Pos, OpITab, typ.Uintptr)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpITab, typ.Uintptr)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValuegeneric_OpEqPtr(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (EqPtr x x)
+ // result: (ConstBool [true])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ // match: (EqPtr (Addr {x} _) (Addr {y} _))
+ // result: (ConstBool [x == y])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAddr {
+ continue
+ }
+ x := auxToSym(v_0.Aux)
+ if v_1.Op != OpAddr {
+ continue
+ }
+ y := auxToSym(v_1.Aux)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(x == y)
+ return true
+ }
+ break
+ }
+ // match: (EqPtr (Addr {x} _) (OffPtr [o] (Addr {y} _)))
+ // result: (ConstBool [x == y && o == 0])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAddr {
+ continue
+ }
+ x := auxToSym(v_0.Aux)
+ if v_1.Op != OpOffPtr {
+ continue
+ }
+ o := auxIntToInt64(v_1.AuxInt)
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAddr {
+ continue
+ }
+ y := auxToSym(v_1_0.Aux)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(x == y && o == 0)
+ return true
+ }
+ break
+ }
+ // match: (EqPtr (OffPtr [o1] (Addr {x} _)) (OffPtr [o2] (Addr {y} _)))
+ // result: (ConstBool [x == y && o1 == o2])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpOffPtr {
+ continue
+ }
+ o1 := auxIntToInt64(v_0.AuxInt)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAddr {
+ continue
+ }
+ x := auxToSym(v_0_0.Aux)
+ if v_1.Op != OpOffPtr {
+ continue
+ }
+ o2 := auxIntToInt64(v_1.AuxInt)
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAddr {
+ continue
+ }
+ y := auxToSym(v_1_0.Aux)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(x == y && o1 == o2)
+ return true
+ }
+ break
+ }
+ // match: (EqPtr (LocalAddr {x} _ _) (LocalAddr {y} _ _))
+ // result: (ConstBool [x == y])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLocalAddr {
+ continue
+ }
+ x := auxToSym(v_0.Aux)
+ if v_1.Op != OpLocalAddr {
+ continue
+ }
+ y := auxToSym(v_1.Aux)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(x == y)
+ return true
+ }
+ break
+ }
+ // match: (EqPtr (LocalAddr {x} _ _) (OffPtr [o] (LocalAddr {y} _ _)))
+ // result: (ConstBool [x == y && o == 0])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLocalAddr {
+ continue
+ }
+ x := auxToSym(v_0.Aux)
+ if v_1.Op != OpOffPtr {
+ continue
+ }
+ o := auxIntToInt64(v_1.AuxInt)
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpLocalAddr {
+ continue
+ }
+ y := auxToSym(v_1_0.Aux)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(x == y && o == 0)
+ return true
+ }
+ break
+ }
+ // match: (EqPtr (OffPtr [o1] (LocalAddr {x} _ _)) (OffPtr [o2] (LocalAddr {y} _ _)))
+ // result: (ConstBool [x == y && o1 == o2])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpOffPtr {
+ continue
+ }
+ o1 := auxIntToInt64(v_0.AuxInt)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpLocalAddr {
+ continue
+ }
+ x := auxToSym(v_0_0.Aux)
+ if v_1.Op != OpOffPtr {
+ continue
+ }
+ o2 := auxIntToInt64(v_1.AuxInt)
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpLocalAddr {
+ continue
+ }
+ y := auxToSym(v_1_0.Aux)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(x == y && o1 == o2)
+ return true
+ }
+ break
+ }
+ // match: (EqPtr (OffPtr [o1] p1) p2)
+ // cond: isSamePtr(p1, p2)
+ // result: (ConstBool [o1 == 0])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpOffPtr {
+ continue
+ }
+ o1 := auxIntToInt64(v_0.AuxInt)
+ p1 := v_0.Args[0]
+ p2 := v_1
+ if !(isSamePtr(p1, p2)) {
+ continue
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(o1 == 0)
+ return true
+ }
+ break
+ }
+ // match: (EqPtr (OffPtr [o1] p1) (OffPtr [o2] p2))
+ // cond: isSamePtr(p1, p2)
+ // result: (ConstBool [o1 == o2])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpOffPtr {
+ continue
+ }
+ o1 := auxIntToInt64(v_0.AuxInt)
+ p1 := v_0.Args[0]
+ if v_1.Op != OpOffPtr {
+ continue
+ }
+ o2 := auxIntToInt64(v_1.AuxInt)
+ p2 := v_1.Args[0]
+ if !(isSamePtr(p1, p2)) {
+ continue
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(o1 == o2)
+ return true
+ }
+ break
+ }
+ // match: (EqPtr (Const32 [c]) (Const32 [d]))
+ // result: (ConstBool [c == d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpConst32 {
+ continue
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(c == d)
+ return true
+ }
+ break
+ }
+ // match: (EqPtr (Const64 [c]) (Const64 [d]))
+ // result: (ConstBool [c == d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(c == d)
+ return true
+ }
+ break
+ }
+ // match: (EqPtr (LocalAddr _ _) (Addr _))
+ // result: (ConstBool [false])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLocalAddr || v_1.Op != OpAddr {
+ continue
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(false)
+ return true
+ }
+ break
+ }
+ // match: (EqPtr (OffPtr (LocalAddr _ _)) (Addr _))
+ // result: (ConstBool [false])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpOffPtr {
+ continue
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpLocalAddr || v_1.Op != OpAddr {
+ continue
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(false)
+ return true
+ }
+ break
+ }
+ // match: (EqPtr (LocalAddr _ _) (OffPtr (Addr _)))
+ // result: (ConstBool [false])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLocalAddr || v_1.Op != OpOffPtr {
+ continue
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAddr {
+ continue
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(false)
+ return true
+ }
+ break
+ }
+ // match: (EqPtr (OffPtr (LocalAddr _ _)) (OffPtr (Addr _)))
+ // result: (ConstBool [false])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpOffPtr {
+ continue
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpLocalAddr || v_1.Op != OpOffPtr {
+ continue
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAddr {
+ continue
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(false)
+ return true
+ }
+ break
+ }
+ // match: (EqPtr (AddPtr p1 o1) p2)
+ // cond: isSamePtr(p1, p2)
+ // result: (Not (IsNonNil o1))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAddPtr {
+ continue
+ }
+ o1 := v_0.Args[1]
+ p1 := v_0.Args[0]
+ p2 := v_1
+ if !(isSamePtr(p1, p2)) {
+ continue
+ }
+ v.reset(OpNot)
+ v0 := b.NewValue0(v.Pos, OpIsNonNil, typ.Bool)
+ v0.AddArg(o1)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (EqPtr (Const32 [0]) p)
+ // result: (Not (IsNonNil p))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 0 {
+ continue
+ }
+ p := v_1
+ v.reset(OpNot)
+ v0 := b.NewValue0(v.Pos, OpIsNonNil, typ.Bool)
+ v0.AddArg(p)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (EqPtr (Const64 [0]) p)
+ // result: (Not (IsNonNil p))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 {
+ continue
+ }
+ p := v_1
+ v.reset(OpNot)
+ v0 := b.NewValue0(v.Pos, OpIsNonNil, typ.Bool)
+ v0.AddArg(p)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (EqPtr (ConstNil) p)
+ // result: (Not (IsNonNil p))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConstNil {
+ continue
+ }
+ p := v_1
+ v.reset(OpNot)
+ v0 := b.NewValue0(v.Pos, OpIsNonNil, typ.Bool)
+ v0.AddArg(p)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpEqSlice(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (EqSlice x y)
+ // result: (EqPtr (SlicePtr x) (SlicePtr y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpEqPtr)
+ v0 := b.NewValue0(v.Pos, OpSlicePtr, typ.BytePtr)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSlicePtr, typ.BytePtr)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValuegeneric_OpIMake(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (IMake _typ (StructMake1 val))
+ // result: (IMake _typ val)
+ for {
+ _typ := v_0
+ if v_1.Op != OpStructMake1 {
+ break
+ }
+ val := v_1.Args[0]
+ v.reset(OpIMake)
+ v.AddArg2(_typ, val)
+ return true
+ }
+ // match: (IMake _typ (ArrayMake1 val))
+ // result: (IMake _typ val)
+ for {
+ _typ := v_0
+ if v_1.Op != OpArrayMake1 {
+ break
+ }
+ val := v_1.Args[0]
+ v.reset(OpIMake)
+ v.AddArg2(_typ, val)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpInterLECall(v *Value) bool {
+ // match: (InterLECall [argsize] {auxCall} (Load (OffPtr [off] (ITab (IMake (Addr {itab} (SB)) _))) _) ___)
+ // cond: devirtLESym(v, auxCall, itab, off) != nil
+ // result: devirtLECall(v, devirtLESym(v, auxCall, itab, off))
+ for {
+ if len(v.Args) < 1 {
+ break
+ }
+ auxCall := auxToCall(v.Aux)
+ v_0 := v.Args[0]
+ if v_0.Op != OpLoad {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpOffPtr {
+ break
+ }
+ off := auxIntToInt64(v_0_0.AuxInt)
+ v_0_0_0 := v_0_0.Args[0]
+ if v_0_0_0.Op != OpITab {
+ break
+ }
+ v_0_0_0_0 := v_0_0_0.Args[0]
+ if v_0_0_0_0.Op != OpIMake {
+ break
+ }
+ v_0_0_0_0_0 := v_0_0_0_0.Args[0]
+ if v_0_0_0_0_0.Op != OpAddr {
+ break
+ }
+ itab := auxToSym(v_0_0_0_0_0.Aux)
+ v_0_0_0_0_0_0 := v_0_0_0_0_0.Args[0]
+ if v_0_0_0_0_0_0.Op != OpSB || !(devirtLESym(v, auxCall, itab, off) != nil) {
+ break
+ }
+ v.copyOf(devirtLECall(v, devirtLESym(v, auxCall, itab, off)))
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpIsInBounds(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (IsInBounds (ZeroExt8to32 _) (Const32 [c]))
+ // cond: (1 << 8) <= c
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpZeroExt8to32 || v_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !((1 << 8) <= c) {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ // match: (IsInBounds (ZeroExt8to64 _) (Const64 [c]))
+ // cond: (1 << 8) <= c
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpZeroExt8to64 || v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !((1 << 8) <= c) {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ // match: (IsInBounds (ZeroExt16to32 _) (Const32 [c]))
+ // cond: (1 << 16) <= c
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpZeroExt16to32 || v_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !((1 << 16) <= c) {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ // match: (IsInBounds (ZeroExt16to64 _) (Const64 [c]))
+ // cond: (1 << 16) <= c
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpZeroExt16to64 || v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !((1 << 16) <= c) {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ // match: (IsInBounds x x)
+ // result: (ConstBool [false])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(false)
+ return true
+ }
+ // match: (IsInBounds (And8 (Const8 [c]) _) (Const8 [d]))
+ // cond: 0 <= c && c < d
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpAnd8 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_0_0.AuxInt)
+ if v_1.Op != OpConst8 {
+ continue
+ }
+ d := auxIntToInt8(v_1.AuxInt)
+ if !(0 <= c && c < d) {
+ continue
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ break
+ }
+ // match: (IsInBounds (ZeroExt8to16 (And8 (Const8 [c]) _)) (Const16 [d]))
+ // cond: 0 <= c && int16(c) < d
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpZeroExt8to16 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAnd8 {
+ break
+ }
+ v_0_0_0 := v_0_0.Args[0]
+ v_0_0_1 := v_0_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0_0, v_0_0_1 = _i0+1, v_0_0_1, v_0_0_0 {
+ if v_0_0_0.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_0_0_0.AuxInt)
+ if v_1.Op != OpConst16 {
+ continue
+ }
+ d := auxIntToInt16(v_1.AuxInt)
+ if !(0 <= c && int16(c) < d) {
+ continue
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ break
+ }
+ // match: (IsInBounds (ZeroExt8to32 (And8 (Const8 [c]) _)) (Const32 [d]))
+ // cond: 0 <= c && int32(c) < d
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpZeroExt8to32 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAnd8 {
+ break
+ }
+ v_0_0_0 := v_0_0.Args[0]
+ v_0_0_1 := v_0_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0_0, v_0_0_1 = _i0+1, v_0_0_1, v_0_0_0 {
+ if v_0_0_0.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_0_0_0.AuxInt)
+ if v_1.Op != OpConst32 {
+ continue
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ if !(0 <= c && int32(c) < d) {
+ continue
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ break
+ }
+ // match: (IsInBounds (ZeroExt8to64 (And8 (Const8 [c]) _)) (Const64 [d]))
+ // cond: 0 <= c && int64(c) < d
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpZeroExt8to64 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAnd8 {
+ break
+ }
+ v_0_0_0 := v_0_0.Args[0]
+ v_0_0_1 := v_0_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0_0, v_0_0_1 = _i0+1, v_0_0_1, v_0_0_0 {
+ if v_0_0_0.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_0_0_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(0 <= c && int64(c) < d) {
+ continue
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ break
+ }
+ // match: (IsInBounds (And16 (Const16 [c]) _) (Const16 [d]))
+ // cond: 0 <= c && c < d
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpAnd16 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_0_0.AuxInt)
+ if v_1.Op != OpConst16 {
+ continue
+ }
+ d := auxIntToInt16(v_1.AuxInt)
+ if !(0 <= c && c < d) {
+ continue
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ break
+ }
+ // match: (IsInBounds (ZeroExt16to32 (And16 (Const16 [c]) _)) (Const32 [d]))
+ // cond: 0 <= c && int32(c) < d
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpZeroExt16to32 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAnd16 {
+ break
+ }
+ v_0_0_0 := v_0_0.Args[0]
+ v_0_0_1 := v_0_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0_0, v_0_0_1 = _i0+1, v_0_0_1, v_0_0_0 {
+ if v_0_0_0.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_0_0_0.AuxInt)
+ if v_1.Op != OpConst32 {
+ continue
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ if !(0 <= c && int32(c) < d) {
+ continue
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ break
+ }
+ // match: (IsInBounds (ZeroExt16to64 (And16 (Const16 [c]) _)) (Const64 [d]))
+ // cond: 0 <= c && int64(c) < d
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpZeroExt16to64 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAnd16 {
+ break
+ }
+ v_0_0_0 := v_0_0.Args[0]
+ v_0_0_1 := v_0_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0_0, v_0_0_1 = _i0+1, v_0_0_1, v_0_0_0 {
+ if v_0_0_0.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_0_0_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(0 <= c && int64(c) < d) {
+ continue
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ break
+ }
+ // match: (IsInBounds (And32 (Const32 [c]) _) (Const32 [d]))
+ // cond: 0 <= c && c < d
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpAnd32 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_0_0.AuxInt)
+ if v_1.Op != OpConst32 {
+ continue
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ if !(0 <= c && c < d) {
+ continue
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ break
+ }
+ // match: (IsInBounds (ZeroExt32to64 (And32 (Const32 [c]) _)) (Const64 [d]))
+ // cond: 0 <= c && int64(c) < d
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpZeroExt32to64 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAnd32 {
+ break
+ }
+ v_0_0_0 := v_0_0.Args[0]
+ v_0_0_1 := v_0_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0_0, v_0_0_1 = _i0+1, v_0_0_1, v_0_0_0 {
+ if v_0_0_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_0_0_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(0 <= c && int64(c) < d) {
+ continue
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ break
+ }
+ // match: (IsInBounds (And64 (Const64 [c]) _) (Const64 [d]))
+ // cond: 0 <= c && c < d
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpAnd64 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(0 <= c && c < d) {
+ continue
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ break
+ }
+ // match: (IsInBounds (Const32 [c]) (Const32 [d]))
+ // result: (ConstBool [0 <= c && c < d])
+ for {
+ if v_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpConst32 {
+ break
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(0 <= c && c < d)
+ return true
+ }
+ // match: (IsInBounds (Const64 [c]) (Const64 [d]))
+ // result: (ConstBool [0 <= c && c < d])
+ for {
+ if v_0.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(0 <= c && c < d)
+ return true
+ }
+ // match: (IsInBounds (Mod32u _ y) y)
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpMod32u {
+ break
+ }
+ y := v_0.Args[1]
+ if y != v_1 {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ // match: (IsInBounds (Mod64u _ y) y)
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpMod64u {
+ break
+ }
+ y := v_0.Args[1]
+ if y != v_1 {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ // match: (IsInBounds (ZeroExt8to64 (Rsh8Ux64 _ (Const64 [c]))) (Const64 [d]))
+ // cond: 0 < c && c < 8 && 1<<uint( 8-c)-1 < d
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpZeroExt8to64 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpRsh8Ux64 {
+ break
+ }
+ _ = v_0_0.Args[1]
+ v_0_0_1 := v_0_0.Args[1]
+ if v_0_0_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0_0_1.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(0 < c && c < 8 && 1<<uint(8-c)-1 < d) {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ // match: (IsInBounds (ZeroExt8to32 (Rsh8Ux64 _ (Const64 [c]))) (Const32 [d]))
+ // cond: 0 < c && c < 8 && 1<<uint( 8-c)-1 < d
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpZeroExt8to32 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpRsh8Ux64 {
+ break
+ }
+ _ = v_0_0.Args[1]
+ v_0_0_1 := v_0_0.Args[1]
+ if v_0_0_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0_0_1.AuxInt)
+ if v_1.Op != OpConst32 {
+ break
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ if !(0 < c && c < 8 && 1<<uint(8-c)-1 < d) {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ // match: (IsInBounds (ZeroExt8to16 (Rsh8Ux64 _ (Const64 [c]))) (Const16 [d]))
+ // cond: 0 < c && c < 8 && 1<<uint( 8-c)-1 < d
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpZeroExt8to16 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpRsh8Ux64 {
+ break
+ }
+ _ = v_0_0.Args[1]
+ v_0_0_1 := v_0_0.Args[1]
+ if v_0_0_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0_0_1.AuxInt)
+ if v_1.Op != OpConst16 {
+ break
+ }
+ d := auxIntToInt16(v_1.AuxInt)
+ if !(0 < c && c < 8 && 1<<uint(8-c)-1 < d) {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ // match: (IsInBounds (Rsh8Ux64 _ (Const64 [c])) (Const64 [d]))
+ // cond: 0 < c && c < 8 && 1<<uint( 8-c)-1 < d
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpRsh8Ux64 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0_1.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(0 < c && c < 8 && 1<<uint(8-c)-1 < d) {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ // match: (IsInBounds (ZeroExt16to64 (Rsh16Ux64 _ (Const64 [c]))) (Const64 [d]))
+ // cond: 0 < c && c < 16 && 1<<uint(16-c)-1 < d
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpZeroExt16to64 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpRsh16Ux64 {
+ break
+ }
+ _ = v_0_0.Args[1]
+ v_0_0_1 := v_0_0.Args[1]
+ if v_0_0_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0_0_1.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(0 < c && c < 16 && 1<<uint(16-c)-1 < d) {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ // match: (IsInBounds (ZeroExt16to32 (Rsh16Ux64 _ (Const64 [c]))) (Const64 [d]))
+ // cond: 0 < c && c < 16 && 1<<uint(16-c)-1 < d
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpZeroExt16to32 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpRsh16Ux64 {
+ break
+ }
+ _ = v_0_0.Args[1]
+ v_0_0_1 := v_0_0.Args[1]
+ if v_0_0_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0_0_1.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(0 < c && c < 16 && 1<<uint(16-c)-1 < d) {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ // match: (IsInBounds (Rsh16Ux64 _ (Const64 [c])) (Const64 [d]))
+ // cond: 0 < c && c < 16 && 1<<uint(16-c)-1 < d
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpRsh16Ux64 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0_1.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(0 < c && c < 16 && 1<<uint(16-c)-1 < d) {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ // match: (IsInBounds (ZeroExt32to64 (Rsh32Ux64 _ (Const64 [c]))) (Const64 [d]))
+ // cond: 0 < c && c < 32 && 1<<uint(32-c)-1 < d
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpZeroExt32to64 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpRsh32Ux64 {
+ break
+ }
+ _ = v_0_0.Args[1]
+ v_0_0_1 := v_0_0.Args[1]
+ if v_0_0_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0_0_1.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(0 < c && c < 32 && 1<<uint(32-c)-1 < d) {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ // match: (IsInBounds (Rsh32Ux64 _ (Const64 [c])) (Const64 [d]))
+ // cond: 0 < c && c < 32 && 1<<uint(32-c)-1 < d
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpRsh32Ux64 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0_1.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(0 < c && c < 32 && 1<<uint(32-c)-1 < d) {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ // match: (IsInBounds (Rsh64Ux64 _ (Const64 [c])) (Const64 [d]))
+ // cond: 0 < c && c < 64 && 1<<uint(64-c)-1 < d
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpRsh64Ux64 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0_1.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(0 < c && c < 64 && 1<<uint(64-c)-1 < d) {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpIsNonNil(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (IsNonNil (ConstNil))
+ // result: (ConstBool [false])
+ for {
+ if v_0.Op != OpConstNil {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(false)
+ return true
+ }
+ // match: (IsNonNil (Const32 [c]))
+ // result: (ConstBool [c != 0])
+ for {
+ if v_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(c != 0)
+ return true
+ }
+ // match: (IsNonNil (Const64 [c]))
+ // result: (ConstBool [c != 0])
+ for {
+ if v_0.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(c != 0)
+ return true
+ }
+ // match: (IsNonNil (Addr _))
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpAddr {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ // match: (IsNonNil (LocalAddr _ _))
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpLocalAddr {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpIsSliceInBounds(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (IsSliceInBounds x x)
+ // result: (ConstBool [true])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ // match: (IsSliceInBounds (And32 (Const32 [c]) _) (Const32 [d]))
+ // cond: 0 <= c && c <= d
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpAnd32 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_0_0.AuxInt)
+ if v_1.Op != OpConst32 {
+ continue
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ if !(0 <= c && c <= d) {
+ continue
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ break
+ }
+ // match: (IsSliceInBounds (And64 (Const64 [c]) _) (Const64 [d]))
+ // cond: 0 <= c && c <= d
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpAnd64 {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(0 <= c && c <= d) {
+ continue
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ break
+ }
+ // match: (IsSliceInBounds (Const32 [0]) _)
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ // match: (IsSliceInBounds (Const64 [0]) _)
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ // match: (IsSliceInBounds (Const32 [c]) (Const32 [d]))
+ // result: (ConstBool [0 <= c && c <= d])
+ for {
+ if v_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpConst32 {
+ break
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(0 <= c && c <= d)
+ return true
+ }
+ // match: (IsSliceInBounds (Const64 [c]) (Const64 [d]))
+ // result: (ConstBool [0 <= c && c <= d])
+ for {
+ if v_0.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(0 <= c && c <= d)
+ return true
+ }
+ // match: (IsSliceInBounds (SliceLen x) (SliceCap x))
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpSliceLen {
+ break
+ }
+ x := v_0.Args[0]
+ if v_1.Op != OpSliceCap || x != v_1.Args[0] {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLeq16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Leq16 (Const16 [c]) (Const16 [d]))
+ // result: (ConstBool [c <= d])
+ for {
+ if v_0.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_0.AuxInt)
+ if v_1.Op != OpConst16 {
+ break
+ }
+ d := auxIntToInt16(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(c <= d)
+ return true
+ }
+ // match: (Leq16 (Const16 [0]) (And16 _ (Const16 [c])))
+ // cond: c >= 0
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != 0 || v_1.Op != OpAnd16 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ if v_1_1.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_1_1.AuxInt)
+ if !(c >= 0) {
+ continue
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ break
+ }
+ // match: (Leq16 (Const16 [0]) (Rsh16Ux64 _ (Const64 [c])))
+ // cond: c > 0
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != 0 || v_1.Op != OpRsh16Ux64 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1_1.AuxInt)
+ if !(c > 0) {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLeq16U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Leq16U (Const16 [c]) (Const16 [d]))
+ // result: (ConstBool [uint16(c) <= uint16(d)])
+ for {
+ if v_0.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_0.AuxInt)
+ if v_1.Op != OpConst16 {
+ break
+ }
+ d := auxIntToInt16(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(uint16(c) <= uint16(d))
+ return true
+ }
+ // match: (Leq16U (Const16 [0]) _)
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLeq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Leq32 (Const32 [c]) (Const32 [d]))
+ // result: (ConstBool [c <= d])
+ for {
+ if v_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpConst32 {
+ break
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(c <= d)
+ return true
+ }
+ // match: (Leq32 (Const32 [0]) (And32 _ (Const32 [c])))
+ // cond: c >= 0
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 0 || v_1.Op != OpAnd32 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ if v_1_1.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_1_1.AuxInt)
+ if !(c >= 0) {
+ continue
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ break
+ }
+ // match: (Leq32 (Const32 [0]) (Rsh32Ux64 _ (Const64 [c])))
+ // cond: c > 0
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 0 || v_1.Op != OpRsh32Ux64 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1_1.AuxInt)
+ if !(c > 0) {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLeq32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Leq32F (Const32F [c]) (Const32F [d]))
+ // result: (ConstBool [c <= d])
+ for {
+ if v_0.Op != OpConst32F {
+ break
+ }
+ c := auxIntToFloat32(v_0.AuxInt)
+ if v_1.Op != OpConst32F {
+ break
+ }
+ d := auxIntToFloat32(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(c <= d)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLeq32U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Leq32U (Const32 [c]) (Const32 [d]))
+ // result: (ConstBool [uint32(c) <= uint32(d)])
+ for {
+ if v_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpConst32 {
+ break
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(uint32(c) <= uint32(d))
+ return true
+ }
+ // match: (Leq32U (Const32 [0]) _)
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLeq64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Leq64 (Const64 [c]) (Const64 [d]))
+ // result: (ConstBool [c <= d])
+ for {
+ if v_0.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(c <= d)
+ return true
+ }
+ // match: (Leq64 (Const64 [0]) (And64 _ (Const64 [c])))
+ // cond: c >= 0
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 || v_1.Op != OpAnd64 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_1_1.AuxInt)
+ if !(c >= 0) {
+ continue
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ break
+ }
+ // match: (Leq64 (Const64 [0]) (Rsh64Ux64 _ (Const64 [c])))
+ // cond: c > 0
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 || v_1.Op != OpRsh64Ux64 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1_1.AuxInt)
+ if !(c > 0) {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLeq64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Leq64F (Const64F [c]) (Const64F [d]))
+ // result: (ConstBool [c <= d])
+ for {
+ if v_0.Op != OpConst64F {
+ break
+ }
+ c := auxIntToFloat64(v_0.AuxInt)
+ if v_1.Op != OpConst64F {
+ break
+ }
+ d := auxIntToFloat64(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(c <= d)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLeq64U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Leq64U (Const64 [c]) (Const64 [d]))
+ // result: (ConstBool [uint64(c) <= uint64(d)])
+ for {
+ if v_0.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(uint64(c) <= uint64(d))
+ return true
+ }
+ // match: (Leq64U (Const64 [0]) _)
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLeq8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Leq8 (Const8 [c]) (Const8 [d]))
+ // result: (ConstBool [c <= d])
+ for {
+ if v_0.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ if v_1.Op != OpConst8 {
+ break
+ }
+ d := auxIntToInt8(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(c <= d)
+ return true
+ }
+ // match: (Leq8 (Const8 [0]) (And8 _ (Const8 [c])))
+ // cond: c >= 0
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != 0 || v_1.Op != OpAnd8 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ if v_1_1.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_1_1.AuxInt)
+ if !(c >= 0) {
+ continue
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ break
+ }
+ // match: (Leq8 (Const8 [0]) (Rsh8Ux64 _ (Const64 [c])))
+ // cond: c > 0
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != 0 || v_1.Op != OpRsh8Ux64 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1_1.AuxInt)
+ if !(c > 0) {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLeq8U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Leq8U (Const8 [c]) (Const8 [d]))
+ // result: (ConstBool [ uint8(c) <= uint8(d)])
+ for {
+ if v_0.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ if v_1.Op != OpConst8 {
+ break
+ }
+ d := auxIntToInt8(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(uint8(c) <= uint8(d))
+ return true
+ }
+ // match: (Leq8U (Const8 [0]) _)
+ // result: (ConstBool [true])
+ for {
+ if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLess16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less16 (Const16 [c]) (Const16 [d]))
+ // result: (ConstBool [c < d])
+ for {
+ if v_0.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_0.AuxInt)
+ if v_1.Op != OpConst16 {
+ break
+ }
+ d := auxIntToInt16(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(c < d)
+ return true
+ }
+ // match: (Less16 (Const16 <t> [0]) x)
+ // cond: isNonNegative(x)
+ // result: (Neq16 (Const16 <t> [0]) x)
+ for {
+ if v_0.Op != OpConst16 {
+ break
+ }
+ t := v_0.Type
+ if auxIntToInt16(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_1
+ if !(isNonNegative(x)) {
+ break
+ }
+ v.reset(OpNeq16)
+ v0 := b.NewValue0(v.Pos, OpConst16, t)
+ v0.AuxInt = int16ToAuxInt(0)
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (Less16 x (Const16 <t> [1]))
+ // cond: isNonNegative(x)
+ // result: (Eq16 (Const16 <t> [0]) x)
+ for {
+ x := v_0
+ if v_1.Op != OpConst16 {
+ break
+ }
+ t := v_1.Type
+ if auxIntToInt16(v_1.AuxInt) != 1 || !(isNonNegative(x)) {
+ break
+ }
+ v.reset(OpEq16)
+ v0 := b.NewValue0(v.Pos, OpConst16, t)
+ v0.AuxInt = int16ToAuxInt(0)
+ v.AddArg2(v0, x)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLess16U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Less16U (Const16 [c]) (Const16 [d]))
+ // result: (ConstBool [uint16(c) < uint16(d)])
+ for {
+ if v_0.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_0.AuxInt)
+ if v_1.Op != OpConst16 {
+ break
+ }
+ d := auxIntToInt16(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(uint16(c) < uint16(d))
+ return true
+ }
+ // match: (Less16U _ (Const16 [0]))
+ // result: (ConstBool [false])
+ for {
+ if v_1.Op != OpConst16 || auxIntToInt16(v_1.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(false)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLess32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less32 (Const32 [c]) (Const32 [d]))
+ // result: (ConstBool [c < d])
+ for {
+ if v_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpConst32 {
+ break
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(c < d)
+ return true
+ }
+ // match: (Less32 (Const32 <t> [0]) x)
+ // cond: isNonNegative(x)
+ // result: (Neq32 (Const32 <t> [0]) x)
+ for {
+ if v_0.Op != OpConst32 {
+ break
+ }
+ t := v_0.Type
+ if auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_1
+ if !(isNonNegative(x)) {
+ break
+ }
+ v.reset(OpNeq32)
+ v0 := b.NewValue0(v.Pos, OpConst32, t)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (Less32 x (Const32 <t> [1]))
+ // cond: isNonNegative(x)
+ // result: (Eq32 (Const32 <t> [0]) x)
+ for {
+ x := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ t := v_1.Type
+ if auxIntToInt32(v_1.AuxInt) != 1 || !(isNonNegative(x)) {
+ break
+ }
+ v.reset(OpEq32)
+ v0 := b.NewValue0(v.Pos, OpConst32, t)
+ v0.AuxInt = int32ToAuxInt(0)
+ v.AddArg2(v0, x)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLess32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Less32F (Const32F [c]) (Const32F [d]))
+ // result: (ConstBool [c < d])
+ for {
+ if v_0.Op != OpConst32F {
+ break
+ }
+ c := auxIntToFloat32(v_0.AuxInt)
+ if v_1.Op != OpConst32F {
+ break
+ }
+ d := auxIntToFloat32(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(c < d)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLess32U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Less32U (Const32 [c]) (Const32 [d]))
+ // result: (ConstBool [uint32(c) < uint32(d)])
+ for {
+ if v_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpConst32 {
+ break
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(uint32(c) < uint32(d))
+ return true
+ }
+ // match: (Less32U _ (Const32 [0]))
+ // result: (ConstBool [false])
+ for {
+ if v_1.Op != OpConst32 || auxIntToInt32(v_1.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(false)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLess64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less64 (Const64 [c]) (Const64 [d]))
+ // result: (ConstBool [c < d])
+ for {
+ if v_0.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(c < d)
+ return true
+ }
+ // match: (Less64 (Const64 <t> [0]) x)
+ // cond: isNonNegative(x)
+ // result: (Neq64 (Const64 <t> [0]) x)
+ for {
+ if v_0.Op != OpConst64 {
+ break
+ }
+ t := v_0.Type
+ if auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_1
+ if !(isNonNegative(x)) {
+ break
+ }
+ v.reset(OpNeq64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (Less64 x (Const64 <t> [1]))
+ // cond: isNonNegative(x)
+ // result: (Eq64 (Const64 <t> [0]) x)
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ t := v_1.Type
+ if auxIntToInt64(v_1.AuxInt) != 1 || !(isNonNegative(x)) {
+ break
+ }
+ v.reset(OpEq64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(v0, x)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLess64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Less64F (Const64F [c]) (Const64F [d]))
+ // result: (ConstBool [c < d])
+ for {
+ if v_0.Op != OpConst64F {
+ break
+ }
+ c := auxIntToFloat64(v_0.AuxInt)
+ if v_1.Op != OpConst64F {
+ break
+ }
+ d := auxIntToFloat64(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(c < d)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLess64U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Less64U (Const64 [c]) (Const64 [d]))
+ // result: (ConstBool [uint64(c) < uint64(d)])
+ for {
+ if v_0.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(uint64(c) < uint64(d))
+ return true
+ }
+ // match: (Less64U _ (Const64 [0]))
+ // result: (ConstBool [false])
+ for {
+ if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(false)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLess8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Less8 (Const8 [c]) (Const8 [d]))
+ // result: (ConstBool [c < d])
+ for {
+ if v_0.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ if v_1.Op != OpConst8 {
+ break
+ }
+ d := auxIntToInt8(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(c < d)
+ return true
+ }
+ // match: (Less8 (Const8 <t> [0]) x)
+ // cond: isNonNegative(x)
+ // result: (Neq8 (Const8 <t> [0]) x)
+ for {
+ if v_0.Op != OpConst8 {
+ break
+ }
+ t := v_0.Type
+ if auxIntToInt8(v_0.AuxInt) != 0 {
+ break
+ }
+ x := v_1
+ if !(isNonNegative(x)) {
+ break
+ }
+ v.reset(OpNeq8)
+ v0 := b.NewValue0(v.Pos, OpConst8, t)
+ v0.AuxInt = int8ToAuxInt(0)
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (Less8 x (Const8 <t> [1]))
+ // cond: isNonNegative(x)
+ // result: (Eq8 (Const8 <t> [0]) x)
+ for {
+ x := v_0
+ if v_1.Op != OpConst8 {
+ break
+ }
+ t := v_1.Type
+ if auxIntToInt8(v_1.AuxInt) != 1 || !(isNonNegative(x)) {
+ break
+ }
+ v.reset(OpEq8)
+ v0 := b.NewValue0(v.Pos, OpConst8, t)
+ v0.AuxInt = int8ToAuxInt(0)
+ v.AddArg2(v0, x)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLess8U(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Less8U (Const8 [c]) (Const8 [d]))
+ // result: (ConstBool [ uint8(c) < uint8(d)])
+ for {
+ if v_0.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ if v_1.Op != OpConst8 {
+ break
+ }
+ d := auxIntToInt8(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(uint8(c) < uint8(d))
+ return true
+ }
+ // match: (Less8U _ (Const8 [0]))
+ // result: (ConstBool [false])
+ for {
+ if v_1.Op != OpConst8 || auxIntToInt8(v_1.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(false)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLoad(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ fe := b.Func.fe
+ // match: (Load <t1> p1 (Store {t2} p2 x _))
+ // cond: isSamePtr(p1, p2) && t1.Compare(x.Type) == types.CMPeq && t1.Size() == t2.Size()
+ // result: x
+ for {
+ t1 := v.Type
+ p1 := v_0
+ if v_1.Op != OpStore {
+ break
+ }
+ t2 := auxToType(v_1.Aux)
+ x := v_1.Args[1]
+ p2 := v_1.Args[0]
+ if !(isSamePtr(p1, p2) && t1.Compare(x.Type) == types.CMPeq && t1.Size() == t2.Size()) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (Load <t1> p1 (Store {t2} p2 _ (Store {t3} p3 x _)))
+ // cond: isSamePtr(p1, p3) && t1.Compare(x.Type) == types.CMPeq && t1.Size() == t2.Size() && disjoint(p3, t3.Size(), p2, t2.Size())
+ // result: x
+ for {
+ t1 := v.Type
+ p1 := v_0
+ if v_1.Op != OpStore {
+ break
+ }
+ t2 := auxToType(v_1.Aux)
+ _ = v_1.Args[2]
+ p2 := v_1.Args[0]
+ v_1_2 := v_1.Args[2]
+ if v_1_2.Op != OpStore {
+ break
+ }
+ t3 := auxToType(v_1_2.Aux)
+ x := v_1_2.Args[1]
+ p3 := v_1_2.Args[0]
+ if !(isSamePtr(p1, p3) && t1.Compare(x.Type) == types.CMPeq && t1.Size() == t2.Size() && disjoint(p3, t3.Size(), p2, t2.Size())) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (Load <t1> p1 (Store {t2} p2 _ (Store {t3} p3 _ (Store {t4} p4 x _))))
+ // cond: isSamePtr(p1, p4) && t1.Compare(x.Type) == types.CMPeq && t1.Size() == t2.Size() && disjoint(p4, t4.Size(), p2, t2.Size()) && disjoint(p4, t4.Size(), p3, t3.Size())
+ // result: x
+ for {
+ t1 := v.Type
+ p1 := v_0
+ if v_1.Op != OpStore {
+ break
+ }
+ t2 := auxToType(v_1.Aux)
+ _ = v_1.Args[2]
+ p2 := v_1.Args[0]
+ v_1_2 := v_1.Args[2]
+ if v_1_2.Op != OpStore {
+ break
+ }
+ t3 := auxToType(v_1_2.Aux)
+ _ = v_1_2.Args[2]
+ p3 := v_1_2.Args[0]
+ v_1_2_2 := v_1_2.Args[2]
+ if v_1_2_2.Op != OpStore {
+ break
+ }
+ t4 := auxToType(v_1_2_2.Aux)
+ x := v_1_2_2.Args[1]
+ p4 := v_1_2_2.Args[0]
+ if !(isSamePtr(p1, p4) && t1.Compare(x.Type) == types.CMPeq && t1.Size() == t2.Size() && disjoint(p4, t4.Size(), p2, t2.Size()) && disjoint(p4, t4.Size(), p3, t3.Size())) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (Load <t1> p1 (Store {t2} p2 _ (Store {t3} p3 _ (Store {t4} p4 _ (Store {t5} p5 x _)))))
+ // cond: isSamePtr(p1, p5) && t1.Compare(x.Type) == types.CMPeq && t1.Size() == t2.Size() && disjoint(p5, t5.Size(), p2, t2.Size()) && disjoint(p5, t5.Size(), p3, t3.Size()) && disjoint(p5, t5.Size(), p4, t4.Size())
+ // result: x
+ for {
+ t1 := v.Type
+ p1 := v_0
+ if v_1.Op != OpStore {
+ break
+ }
+ t2 := auxToType(v_1.Aux)
+ _ = v_1.Args[2]
+ p2 := v_1.Args[0]
+ v_1_2 := v_1.Args[2]
+ if v_1_2.Op != OpStore {
+ break
+ }
+ t3 := auxToType(v_1_2.Aux)
+ _ = v_1_2.Args[2]
+ p3 := v_1_2.Args[0]
+ v_1_2_2 := v_1_2.Args[2]
+ if v_1_2_2.Op != OpStore {
+ break
+ }
+ t4 := auxToType(v_1_2_2.Aux)
+ _ = v_1_2_2.Args[2]
+ p4 := v_1_2_2.Args[0]
+ v_1_2_2_2 := v_1_2_2.Args[2]
+ if v_1_2_2_2.Op != OpStore {
+ break
+ }
+ t5 := auxToType(v_1_2_2_2.Aux)
+ x := v_1_2_2_2.Args[1]
+ p5 := v_1_2_2_2.Args[0]
+ if !(isSamePtr(p1, p5) && t1.Compare(x.Type) == types.CMPeq && t1.Size() == t2.Size() && disjoint(p5, t5.Size(), p2, t2.Size()) && disjoint(p5, t5.Size(), p3, t3.Size()) && disjoint(p5, t5.Size(), p4, t4.Size())) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (Load <t1> p1 (Store {t2} p2 (Const64 [x]) _))
+ // cond: isSamePtr(p1,p2) && sizeof(t2) == 8 && is64BitFloat(t1) && !math.IsNaN(math.Float64frombits(uint64(x)))
+ // result: (Const64F [math.Float64frombits(uint64(x))])
+ for {
+ t1 := v.Type
+ p1 := v_0
+ if v_1.Op != OpStore {
+ break
+ }
+ t2 := auxToType(v_1.Aux)
+ _ = v_1.Args[1]
+ p2 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ break
+ }
+ x := auxIntToInt64(v_1_1.AuxInt)
+ if !(isSamePtr(p1, p2) && sizeof(t2) == 8 && is64BitFloat(t1) && !math.IsNaN(math.Float64frombits(uint64(x)))) {
+ break
+ }
+ v.reset(OpConst64F)
+ v.AuxInt = float64ToAuxInt(math.Float64frombits(uint64(x)))
+ return true
+ }
+ // match: (Load <t1> p1 (Store {t2} p2 (Const32 [x]) _))
+ // cond: isSamePtr(p1,p2) && sizeof(t2) == 4 && is32BitFloat(t1) && !math.IsNaN(float64(math.Float32frombits(uint32(x))))
+ // result: (Const32F [math.Float32frombits(uint32(x))])
+ for {
+ t1 := v.Type
+ p1 := v_0
+ if v_1.Op != OpStore {
+ break
+ }
+ t2 := auxToType(v_1.Aux)
+ _ = v_1.Args[1]
+ p2 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst32 {
+ break
+ }
+ x := auxIntToInt32(v_1_1.AuxInt)
+ if !(isSamePtr(p1, p2) && sizeof(t2) == 4 && is32BitFloat(t1) && !math.IsNaN(float64(math.Float32frombits(uint32(x))))) {
+ break
+ }
+ v.reset(OpConst32F)
+ v.AuxInt = float32ToAuxInt(math.Float32frombits(uint32(x)))
+ return true
+ }
+ // match: (Load <t1> p1 (Store {t2} p2 (Const64F [x]) _))
+ // cond: isSamePtr(p1,p2) && sizeof(t2) == 8 && is64BitInt(t1)
+ // result: (Const64 [int64(math.Float64bits(x))])
+ for {
+ t1 := v.Type
+ p1 := v_0
+ if v_1.Op != OpStore {
+ break
+ }
+ t2 := auxToType(v_1.Aux)
+ _ = v_1.Args[1]
+ p2 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64F {
+ break
+ }
+ x := auxIntToFloat64(v_1_1.AuxInt)
+ if !(isSamePtr(p1, p2) && sizeof(t2) == 8 && is64BitInt(t1)) {
+ break
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(int64(math.Float64bits(x)))
+ return true
+ }
+ // match: (Load <t1> p1 (Store {t2} p2 (Const32F [x]) _))
+ // cond: isSamePtr(p1,p2) && sizeof(t2) == 4 && is32BitInt(t1)
+ // result: (Const32 [int32(math.Float32bits(x))])
+ for {
+ t1 := v.Type
+ p1 := v_0
+ if v_1.Op != OpStore {
+ break
+ }
+ t2 := auxToType(v_1.Aux)
+ _ = v_1.Args[1]
+ p2 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst32F {
+ break
+ }
+ x := auxIntToFloat32(v_1_1.AuxInt)
+ if !(isSamePtr(p1, p2) && sizeof(t2) == 4 && is32BitInt(t1)) {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(int32(math.Float32bits(x)))
+ return true
+ }
+ // match: (Load <t1> op:(OffPtr [o1] p1) (Store {t2} p2 _ mem:(Zero [n] p3 _)))
+ // cond: o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p3) && fe.CanSSA(t1) && disjoint(op, t1.Size(), p2, t2.Size())
+ // result: @mem.Block (Load <t1> (OffPtr <op.Type> [o1] p3) mem)
+ for {
+ t1 := v.Type
+ op := v_0
+ if op.Op != OpOffPtr {
+ break
+ }
+ o1 := auxIntToInt64(op.AuxInt)
+ p1 := op.Args[0]
+ if v_1.Op != OpStore {
+ break
+ }
+ t2 := auxToType(v_1.Aux)
+ _ = v_1.Args[2]
+ p2 := v_1.Args[0]
+ mem := v_1.Args[2]
+ if mem.Op != OpZero {
+ break
+ }
+ n := auxIntToInt64(mem.AuxInt)
+ p3 := mem.Args[0]
+ if !(o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p3) && fe.CanSSA(t1) && disjoint(op, t1.Size(), p2, t2.Size())) {
+ break
+ }
+ b = mem.Block
+ v0 := b.NewValue0(v.Pos, OpLoad, t1)
+ v.copyOf(v0)
+ v1 := b.NewValue0(v.Pos, OpOffPtr, op.Type)
+ v1.AuxInt = int64ToAuxInt(o1)
+ v1.AddArg(p3)
+ v0.AddArg2(v1, mem)
+ return true
+ }
+ // match: (Load <t1> op:(OffPtr [o1] p1) (Store {t2} p2 _ (Store {t3} p3 _ mem:(Zero [n] p4 _))))
+ // cond: o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p4) && fe.CanSSA(t1) && disjoint(op, t1.Size(), p2, t2.Size()) && disjoint(op, t1.Size(), p3, t3.Size())
+ // result: @mem.Block (Load <t1> (OffPtr <op.Type> [o1] p4) mem)
+ for {
+ t1 := v.Type
+ op := v_0
+ if op.Op != OpOffPtr {
+ break
+ }
+ o1 := auxIntToInt64(op.AuxInt)
+ p1 := op.Args[0]
+ if v_1.Op != OpStore {
+ break
+ }
+ t2 := auxToType(v_1.Aux)
+ _ = v_1.Args[2]
+ p2 := v_1.Args[0]
+ v_1_2 := v_1.Args[2]
+ if v_1_2.Op != OpStore {
+ break
+ }
+ t3 := auxToType(v_1_2.Aux)
+ _ = v_1_2.Args[2]
+ p3 := v_1_2.Args[0]
+ mem := v_1_2.Args[2]
+ if mem.Op != OpZero {
+ break
+ }
+ n := auxIntToInt64(mem.AuxInt)
+ p4 := mem.Args[0]
+ if !(o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p4) && fe.CanSSA(t1) && disjoint(op, t1.Size(), p2, t2.Size()) && disjoint(op, t1.Size(), p3, t3.Size())) {
+ break
+ }
+ b = mem.Block
+ v0 := b.NewValue0(v.Pos, OpLoad, t1)
+ v.copyOf(v0)
+ v1 := b.NewValue0(v.Pos, OpOffPtr, op.Type)
+ v1.AuxInt = int64ToAuxInt(o1)
+ v1.AddArg(p4)
+ v0.AddArg2(v1, mem)
+ return true
+ }
+ // match: (Load <t1> op:(OffPtr [o1] p1) (Store {t2} p2 _ (Store {t3} p3 _ (Store {t4} p4 _ mem:(Zero [n] p5 _)))))
+ // cond: o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p5) && fe.CanSSA(t1) && disjoint(op, t1.Size(), p2, t2.Size()) && disjoint(op, t1.Size(), p3, t3.Size()) && disjoint(op, t1.Size(), p4, t4.Size())
+ // result: @mem.Block (Load <t1> (OffPtr <op.Type> [o1] p5) mem)
+ for {
+ t1 := v.Type
+ op := v_0
+ if op.Op != OpOffPtr {
+ break
+ }
+ o1 := auxIntToInt64(op.AuxInt)
+ p1 := op.Args[0]
+ if v_1.Op != OpStore {
+ break
+ }
+ t2 := auxToType(v_1.Aux)
+ _ = v_1.Args[2]
+ p2 := v_1.Args[0]
+ v_1_2 := v_1.Args[2]
+ if v_1_2.Op != OpStore {
+ break
+ }
+ t3 := auxToType(v_1_2.Aux)
+ _ = v_1_2.Args[2]
+ p3 := v_1_2.Args[0]
+ v_1_2_2 := v_1_2.Args[2]
+ if v_1_2_2.Op != OpStore {
+ break
+ }
+ t4 := auxToType(v_1_2_2.Aux)
+ _ = v_1_2_2.Args[2]
+ p4 := v_1_2_2.Args[0]
+ mem := v_1_2_2.Args[2]
+ if mem.Op != OpZero {
+ break
+ }
+ n := auxIntToInt64(mem.AuxInt)
+ p5 := mem.Args[0]
+ if !(o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p5) && fe.CanSSA(t1) && disjoint(op, t1.Size(), p2, t2.Size()) && disjoint(op, t1.Size(), p3, t3.Size()) && disjoint(op, t1.Size(), p4, t4.Size())) {
+ break
+ }
+ b = mem.Block
+ v0 := b.NewValue0(v.Pos, OpLoad, t1)
+ v.copyOf(v0)
+ v1 := b.NewValue0(v.Pos, OpOffPtr, op.Type)
+ v1.AuxInt = int64ToAuxInt(o1)
+ v1.AddArg(p5)
+ v0.AddArg2(v1, mem)
+ return true
+ }
+ // match: (Load <t1> op:(OffPtr [o1] p1) (Store {t2} p2 _ (Store {t3} p3 _ (Store {t4} p4 _ (Store {t5} p5 _ mem:(Zero [n] p6 _))))))
+ // cond: o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p6) && fe.CanSSA(t1) && disjoint(op, t1.Size(), p2, t2.Size()) && disjoint(op, t1.Size(), p3, t3.Size()) && disjoint(op, t1.Size(), p4, t4.Size()) && disjoint(op, t1.Size(), p5, t5.Size())
+ // result: @mem.Block (Load <t1> (OffPtr <op.Type> [o1] p6) mem)
+ for {
+ t1 := v.Type
+ op := v_0
+ if op.Op != OpOffPtr {
+ break
+ }
+ o1 := auxIntToInt64(op.AuxInt)
+ p1 := op.Args[0]
+ if v_1.Op != OpStore {
+ break
+ }
+ t2 := auxToType(v_1.Aux)
+ _ = v_1.Args[2]
+ p2 := v_1.Args[0]
+ v_1_2 := v_1.Args[2]
+ if v_1_2.Op != OpStore {
+ break
+ }
+ t3 := auxToType(v_1_2.Aux)
+ _ = v_1_2.Args[2]
+ p3 := v_1_2.Args[0]
+ v_1_2_2 := v_1_2.Args[2]
+ if v_1_2_2.Op != OpStore {
+ break
+ }
+ t4 := auxToType(v_1_2_2.Aux)
+ _ = v_1_2_2.Args[2]
+ p4 := v_1_2_2.Args[0]
+ v_1_2_2_2 := v_1_2_2.Args[2]
+ if v_1_2_2_2.Op != OpStore {
+ break
+ }
+ t5 := auxToType(v_1_2_2_2.Aux)
+ _ = v_1_2_2_2.Args[2]
+ p5 := v_1_2_2_2.Args[0]
+ mem := v_1_2_2_2.Args[2]
+ if mem.Op != OpZero {
+ break
+ }
+ n := auxIntToInt64(mem.AuxInt)
+ p6 := mem.Args[0]
+ if !(o1 >= 0 && o1+t1.Size() <= n && isSamePtr(p1, p6) && fe.CanSSA(t1) && disjoint(op, t1.Size(), p2, t2.Size()) && disjoint(op, t1.Size(), p3, t3.Size()) && disjoint(op, t1.Size(), p4, t4.Size()) && disjoint(op, t1.Size(), p5, t5.Size())) {
+ break
+ }
+ b = mem.Block
+ v0 := b.NewValue0(v.Pos, OpLoad, t1)
+ v.copyOf(v0)
+ v1 := b.NewValue0(v.Pos, OpOffPtr, op.Type)
+ v1.AuxInt = int64ToAuxInt(o1)
+ v1.AddArg(p6)
+ v0.AddArg2(v1, mem)
+ return true
+ }
+ // match: (Load <t1> (OffPtr [o] p1) (Zero [n] p2 _))
+ // cond: t1.IsBoolean() && isSamePtr(p1, p2) && n >= o + 1
+ // result: (ConstBool [false])
+ for {
+ t1 := v.Type
+ if v_0.Op != OpOffPtr {
+ break
+ }
+ o := auxIntToInt64(v_0.AuxInt)
+ p1 := v_0.Args[0]
+ if v_1.Op != OpZero {
+ break
+ }
+ n := auxIntToInt64(v_1.AuxInt)
+ p2 := v_1.Args[0]
+ if !(t1.IsBoolean() && isSamePtr(p1, p2) && n >= o+1) {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(false)
+ return true
+ }
+ // match: (Load <t1> (OffPtr [o] p1) (Zero [n] p2 _))
+ // cond: is8BitInt(t1) && isSamePtr(p1, p2) && n >= o + 1
+ // result: (Const8 [0])
+ for {
+ t1 := v.Type
+ if v_0.Op != OpOffPtr {
+ break
+ }
+ o := auxIntToInt64(v_0.AuxInt)
+ p1 := v_0.Args[0]
+ if v_1.Op != OpZero {
+ break
+ }
+ n := auxIntToInt64(v_1.AuxInt)
+ p2 := v_1.Args[0]
+ if !(is8BitInt(t1) && isSamePtr(p1, p2) && n >= o+1) {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(0)
+ return true
+ }
+ // match: (Load <t1> (OffPtr [o] p1) (Zero [n] p2 _))
+ // cond: is16BitInt(t1) && isSamePtr(p1, p2) && n >= o + 2
+ // result: (Const16 [0])
+ for {
+ t1 := v.Type
+ if v_0.Op != OpOffPtr {
+ break
+ }
+ o := auxIntToInt64(v_0.AuxInt)
+ p1 := v_0.Args[0]
+ if v_1.Op != OpZero {
+ break
+ }
+ n := auxIntToInt64(v_1.AuxInt)
+ p2 := v_1.Args[0]
+ if !(is16BitInt(t1) && isSamePtr(p1, p2) && n >= o+2) {
+ break
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(0)
+ return true
+ }
+ // match: (Load <t1> (OffPtr [o] p1) (Zero [n] p2 _))
+ // cond: is32BitInt(t1) && isSamePtr(p1, p2) && n >= o + 4
+ // result: (Const32 [0])
+ for {
+ t1 := v.Type
+ if v_0.Op != OpOffPtr {
+ break
+ }
+ o := auxIntToInt64(v_0.AuxInt)
+ p1 := v_0.Args[0]
+ if v_1.Op != OpZero {
+ break
+ }
+ n := auxIntToInt64(v_1.AuxInt)
+ p2 := v_1.Args[0]
+ if !(is32BitInt(t1) && isSamePtr(p1, p2) && n >= o+4) {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (Load <t1> (OffPtr [o] p1) (Zero [n] p2 _))
+ // cond: is64BitInt(t1) && isSamePtr(p1, p2) && n >= o + 8
+ // result: (Const64 [0])
+ for {
+ t1 := v.Type
+ if v_0.Op != OpOffPtr {
+ break
+ }
+ o := auxIntToInt64(v_0.AuxInt)
+ p1 := v_0.Args[0]
+ if v_1.Op != OpZero {
+ break
+ }
+ n := auxIntToInt64(v_1.AuxInt)
+ p2 := v_1.Args[0]
+ if !(is64BitInt(t1) && isSamePtr(p1, p2) && n >= o+8) {
+ break
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (Load <t1> (OffPtr [o] p1) (Zero [n] p2 _))
+ // cond: is32BitFloat(t1) && isSamePtr(p1, p2) && n >= o + 4
+ // result: (Const32F [0])
+ for {
+ t1 := v.Type
+ if v_0.Op != OpOffPtr {
+ break
+ }
+ o := auxIntToInt64(v_0.AuxInt)
+ p1 := v_0.Args[0]
+ if v_1.Op != OpZero {
+ break
+ }
+ n := auxIntToInt64(v_1.AuxInt)
+ p2 := v_1.Args[0]
+ if !(is32BitFloat(t1) && isSamePtr(p1, p2) && n >= o+4) {
+ break
+ }
+ v.reset(OpConst32F)
+ v.AuxInt = float32ToAuxInt(0)
+ return true
+ }
+ // match: (Load <t1> (OffPtr [o] p1) (Zero [n] p2 _))
+ // cond: is64BitFloat(t1) && isSamePtr(p1, p2) && n >= o + 8
+ // result: (Const64F [0])
+ for {
+ t1 := v.Type
+ if v_0.Op != OpOffPtr {
+ break
+ }
+ o := auxIntToInt64(v_0.AuxInt)
+ p1 := v_0.Args[0]
+ if v_1.Op != OpZero {
+ break
+ }
+ n := auxIntToInt64(v_1.AuxInt)
+ p2 := v_1.Args[0]
+ if !(is64BitFloat(t1) && isSamePtr(p1, p2) && n >= o+8) {
+ break
+ }
+ v.reset(OpConst64F)
+ v.AuxInt = float64ToAuxInt(0)
+ return true
+ }
+ // match: (Load <t> _ _)
+ // cond: t.IsStruct() && t.NumFields() == 0 && fe.CanSSA(t)
+ // result: (StructMake0)
+ for {
+ t := v.Type
+ if !(t.IsStruct() && t.NumFields() == 0 && fe.CanSSA(t)) {
+ break
+ }
+ v.reset(OpStructMake0)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: t.IsStruct() && t.NumFields() == 1 && fe.CanSSA(t)
+ // result: (StructMake1 (Load <t.FieldType(0)> (OffPtr <t.FieldType(0).PtrTo()> [0] ptr) mem))
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(t.IsStruct() && t.NumFields() == 1 && fe.CanSSA(t)) {
+ break
+ }
+ v.reset(OpStructMake1)
+ v0 := b.NewValue0(v.Pos, OpLoad, t.FieldType(0))
+ v1 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(0).PtrTo())
+ v1.AuxInt = int64ToAuxInt(0)
+ v1.AddArg(ptr)
+ v0.AddArg2(v1, mem)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: t.IsStruct() && t.NumFields() == 2 && fe.CanSSA(t)
+ // result: (StructMake2 (Load <t.FieldType(0)> (OffPtr <t.FieldType(0).PtrTo()> [0] ptr) mem) (Load <t.FieldType(1)> (OffPtr <t.FieldType(1).PtrTo()> [t.FieldOff(1)] ptr) mem))
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(t.IsStruct() && t.NumFields() == 2 && fe.CanSSA(t)) {
+ break
+ }
+ v.reset(OpStructMake2)
+ v0 := b.NewValue0(v.Pos, OpLoad, t.FieldType(0))
+ v1 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(0).PtrTo())
+ v1.AuxInt = int64ToAuxInt(0)
+ v1.AddArg(ptr)
+ v0.AddArg2(v1, mem)
+ v2 := b.NewValue0(v.Pos, OpLoad, t.FieldType(1))
+ v3 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(1).PtrTo())
+ v3.AuxInt = int64ToAuxInt(t.FieldOff(1))
+ v3.AddArg(ptr)
+ v2.AddArg2(v3, mem)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: t.IsStruct() && t.NumFields() == 3 && fe.CanSSA(t)
+ // result: (StructMake3 (Load <t.FieldType(0)> (OffPtr <t.FieldType(0).PtrTo()> [0] ptr) mem) (Load <t.FieldType(1)> (OffPtr <t.FieldType(1).PtrTo()> [t.FieldOff(1)] ptr) mem) (Load <t.FieldType(2)> (OffPtr <t.FieldType(2).PtrTo()> [t.FieldOff(2)] ptr) mem))
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(t.IsStruct() && t.NumFields() == 3 && fe.CanSSA(t)) {
+ break
+ }
+ v.reset(OpStructMake3)
+ v0 := b.NewValue0(v.Pos, OpLoad, t.FieldType(0))
+ v1 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(0).PtrTo())
+ v1.AuxInt = int64ToAuxInt(0)
+ v1.AddArg(ptr)
+ v0.AddArg2(v1, mem)
+ v2 := b.NewValue0(v.Pos, OpLoad, t.FieldType(1))
+ v3 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(1).PtrTo())
+ v3.AuxInt = int64ToAuxInt(t.FieldOff(1))
+ v3.AddArg(ptr)
+ v2.AddArg2(v3, mem)
+ v4 := b.NewValue0(v.Pos, OpLoad, t.FieldType(2))
+ v5 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(2).PtrTo())
+ v5.AuxInt = int64ToAuxInt(t.FieldOff(2))
+ v5.AddArg(ptr)
+ v4.AddArg2(v5, mem)
+ v.AddArg3(v0, v2, v4)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: t.IsStruct() && t.NumFields() == 4 && fe.CanSSA(t)
+ // result: (StructMake4 (Load <t.FieldType(0)> (OffPtr <t.FieldType(0).PtrTo()> [0] ptr) mem) (Load <t.FieldType(1)> (OffPtr <t.FieldType(1).PtrTo()> [t.FieldOff(1)] ptr) mem) (Load <t.FieldType(2)> (OffPtr <t.FieldType(2).PtrTo()> [t.FieldOff(2)] ptr) mem) (Load <t.FieldType(3)> (OffPtr <t.FieldType(3).PtrTo()> [t.FieldOff(3)] ptr) mem))
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(t.IsStruct() && t.NumFields() == 4 && fe.CanSSA(t)) {
+ break
+ }
+ v.reset(OpStructMake4)
+ v0 := b.NewValue0(v.Pos, OpLoad, t.FieldType(0))
+ v1 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(0).PtrTo())
+ v1.AuxInt = int64ToAuxInt(0)
+ v1.AddArg(ptr)
+ v0.AddArg2(v1, mem)
+ v2 := b.NewValue0(v.Pos, OpLoad, t.FieldType(1))
+ v3 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(1).PtrTo())
+ v3.AuxInt = int64ToAuxInt(t.FieldOff(1))
+ v3.AddArg(ptr)
+ v2.AddArg2(v3, mem)
+ v4 := b.NewValue0(v.Pos, OpLoad, t.FieldType(2))
+ v5 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(2).PtrTo())
+ v5.AuxInt = int64ToAuxInt(t.FieldOff(2))
+ v5.AddArg(ptr)
+ v4.AddArg2(v5, mem)
+ v6 := b.NewValue0(v.Pos, OpLoad, t.FieldType(3))
+ v7 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(3).PtrTo())
+ v7.AuxInt = int64ToAuxInt(t.FieldOff(3))
+ v7.AddArg(ptr)
+ v6.AddArg2(v7, mem)
+ v.AddArg4(v0, v2, v4, v6)
+ return true
+ }
+ // match: (Load <t> _ _)
+ // cond: t.IsArray() && t.NumElem() == 0
+ // result: (ArrayMake0)
+ for {
+ t := v.Type
+ if !(t.IsArray() && t.NumElem() == 0) {
+ break
+ }
+ v.reset(OpArrayMake0)
+ return true
+ }
+ // match: (Load <t> ptr mem)
+ // cond: t.IsArray() && t.NumElem() == 1 && fe.CanSSA(t)
+ // result: (ArrayMake1 (Load <t.Elem()> ptr mem))
+ for {
+ t := v.Type
+ ptr := v_0
+ mem := v_1
+ if !(t.IsArray() && t.NumElem() == 1 && fe.CanSSA(t)) {
+ break
+ }
+ v.reset(OpArrayMake1)
+ v0 := b.NewValue0(v.Pos, OpLoad, t.Elem())
+ v0.AddArg2(ptr, mem)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLsh16x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh16x16 <t> x (Const16 [c]))
+ // result: (Lsh16x64 x (Const64 <t> [int64(uint16(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_1.AuxInt)
+ v.reset(OpLsh16x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint16(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Lsh16x16 (Const16 [0]) _)
+ // result: (Const16 [0])
+ for {
+ if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLsh16x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh16x32 <t> x (Const32 [c]))
+ // result: (Lsh16x64 x (Const64 <t> [int64(uint32(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpLsh16x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint32(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Lsh16x32 (Const16 [0]) _)
+ // result: (Const16 [0])
+ for {
+ if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLsh16x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh16x64 (Const16 [c]) (Const64 [d]))
+ // result: (Const16 [c << uint64(d)])
+ for {
+ if v_0.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(c << uint64(d))
+ return true
+ }
+ // match: (Lsh16x64 x (Const64 [0]))
+ // result: x
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (Lsh16x64 (Const16 [0]) _)
+ // result: (Const16 [0])
+ for {
+ if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(0)
+ return true
+ }
+ // match: (Lsh16x64 _ (Const64 [c]))
+ // cond: uint64(c) >= 16
+ // result: (Const16 [0])
+ for {
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 16) {
+ break
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(0)
+ return true
+ }
+ // match: (Lsh16x64 <t> (Lsh16x64 x (Const64 [c])) (Const64 [d]))
+ // cond: !uaddOvf(c,d)
+ // result: (Lsh16x64 x (Const64 <t> [c+d]))
+ for {
+ t := v.Type
+ if v_0.Op != OpLsh16x64 {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0_1.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(!uaddOvf(c, d)) {
+ break
+ }
+ v.reset(OpLsh16x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(c + d)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Lsh16x64 (Rsh16Ux64 (Lsh16x64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3]))
+ // cond: uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)
+ // result: (Lsh16x64 x (Const64 <typ.UInt64> [c1-c2+c3]))
+ for {
+ if v_0.Op != OpRsh16Ux64 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpLsh16x64 {
+ break
+ }
+ _ = v_0_0.Args[1]
+ x := v_0_0.Args[0]
+ v_0_0_1 := v_0_0.Args[1]
+ if v_0_0_1.Op != OpConst64 {
+ break
+ }
+ c1 := auxIntToInt64(v_0_0_1.AuxInt)
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 {
+ break
+ }
+ c2 := auxIntToInt64(v_0_1.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c3 := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)) {
+ break
+ }
+ v.reset(OpLsh16x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(c1 - c2 + c3)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLsh16x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh16x8 <t> x (Const8 [c]))
+ // result: (Lsh16x64 x (Const64 <t> [int64(uint8(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_1.AuxInt)
+ v.reset(OpLsh16x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint8(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Lsh16x8 (Const16 [0]) _)
+ // result: (Const16 [0])
+ for {
+ if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLsh32x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh32x16 <t> x (Const16 [c]))
+ // result: (Lsh32x64 x (Const64 <t> [int64(uint16(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_1.AuxInt)
+ v.reset(OpLsh32x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint16(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Lsh32x16 (Const32 [0]) _)
+ // result: (Const32 [0])
+ for {
+ if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLsh32x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh32x32 <t> x (Const32 [c]))
+ // result: (Lsh32x64 x (Const64 <t> [int64(uint32(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpLsh32x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint32(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Lsh32x32 (Const32 [0]) _)
+ // result: (Const32 [0])
+ for {
+ if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLsh32x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh32x64 (Const32 [c]) (Const64 [d]))
+ // result: (Const32 [c << uint64(d)])
+ for {
+ if v_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(c << uint64(d))
+ return true
+ }
+ // match: (Lsh32x64 x (Const64 [0]))
+ // result: x
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (Lsh32x64 (Const32 [0]) _)
+ // result: (Const32 [0])
+ for {
+ if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (Lsh32x64 _ (Const64 [c]))
+ // cond: uint64(c) >= 32
+ // result: (Const32 [0])
+ for {
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 32) {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (Lsh32x64 <t> (Lsh32x64 x (Const64 [c])) (Const64 [d]))
+ // cond: !uaddOvf(c,d)
+ // result: (Lsh32x64 x (Const64 <t> [c+d]))
+ for {
+ t := v.Type
+ if v_0.Op != OpLsh32x64 {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0_1.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(!uaddOvf(c, d)) {
+ break
+ }
+ v.reset(OpLsh32x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(c + d)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Lsh32x64 (Rsh32Ux64 (Lsh32x64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3]))
+ // cond: uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)
+ // result: (Lsh32x64 x (Const64 <typ.UInt64> [c1-c2+c3]))
+ for {
+ if v_0.Op != OpRsh32Ux64 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpLsh32x64 {
+ break
+ }
+ _ = v_0_0.Args[1]
+ x := v_0_0.Args[0]
+ v_0_0_1 := v_0_0.Args[1]
+ if v_0_0_1.Op != OpConst64 {
+ break
+ }
+ c1 := auxIntToInt64(v_0_0_1.AuxInt)
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 {
+ break
+ }
+ c2 := auxIntToInt64(v_0_1.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c3 := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)) {
+ break
+ }
+ v.reset(OpLsh32x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(c1 - c2 + c3)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLsh32x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh32x8 <t> x (Const8 [c]))
+ // result: (Lsh32x64 x (Const64 <t> [int64(uint8(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_1.AuxInt)
+ v.reset(OpLsh32x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint8(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Lsh32x8 (Const32 [0]) _)
+ // result: (Const32 [0])
+ for {
+ if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLsh64x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh64x16 <t> x (Const16 [c]))
+ // result: (Lsh64x64 x (Const64 <t> [int64(uint16(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_1.AuxInt)
+ v.reset(OpLsh64x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint16(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Lsh64x16 (Const64 [0]) _)
+ // result: (Const64 [0])
+ for {
+ if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLsh64x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh64x32 <t> x (Const32 [c]))
+ // result: (Lsh64x64 x (Const64 <t> [int64(uint32(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpLsh64x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint32(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Lsh64x32 (Const64 [0]) _)
+ // result: (Const64 [0])
+ for {
+ if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLsh64x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh64x64 (Const64 [c]) (Const64 [d]))
+ // result: (Const64 [c << uint64(d)])
+ for {
+ if v_0.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(c << uint64(d))
+ return true
+ }
+ // match: (Lsh64x64 x (Const64 [0]))
+ // result: x
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (Lsh64x64 (Const64 [0]) _)
+ // result: (Const64 [0])
+ for {
+ if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (Lsh64x64 _ (Const64 [c]))
+ // cond: uint64(c) >= 64
+ // result: (Const64 [0])
+ for {
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 64) {
+ break
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (Lsh64x64 <t> (Lsh64x64 x (Const64 [c])) (Const64 [d]))
+ // cond: !uaddOvf(c,d)
+ // result: (Lsh64x64 x (Const64 <t> [c+d]))
+ for {
+ t := v.Type
+ if v_0.Op != OpLsh64x64 {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0_1.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(!uaddOvf(c, d)) {
+ break
+ }
+ v.reset(OpLsh64x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(c + d)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Lsh64x64 (Rsh64Ux64 (Lsh64x64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3]))
+ // cond: uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)
+ // result: (Lsh64x64 x (Const64 <typ.UInt64> [c1-c2+c3]))
+ for {
+ if v_0.Op != OpRsh64Ux64 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpLsh64x64 {
+ break
+ }
+ _ = v_0_0.Args[1]
+ x := v_0_0.Args[0]
+ v_0_0_1 := v_0_0.Args[1]
+ if v_0_0_1.Op != OpConst64 {
+ break
+ }
+ c1 := auxIntToInt64(v_0_0_1.AuxInt)
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 {
+ break
+ }
+ c2 := auxIntToInt64(v_0_1.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c3 := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)) {
+ break
+ }
+ v.reset(OpLsh64x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(c1 - c2 + c3)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLsh64x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh64x8 <t> x (Const8 [c]))
+ // result: (Lsh64x64 x (Const64 <t> [int64(uint8(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_1.AuxInt)
+ v.reset(OpLsh64x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint8(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Lsh64x8 (Const64 [0]) _)
+ // result: (Const64 [0])
+ for {
+ if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLsh8x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh8x16 <t> x (Const16 [c]))
+ // result: (Lsh8x64 x (Const64 <t> [int64(uint16(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_1.AuxInt)
+ v.reset(OpLsh8x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint16(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Lsh8x16 (Const8 [0]) _)
+ // result: (Const8 [0])
+ for {
+ if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLsh8x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh8x32 <t> x (Const32 [c]))
+ // result: (Lsh8x64 x (Const64 <t> [int64(uint32(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpLsh8x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint32(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Lsh8x32 (Const8 [0]) _)
+ // result: (Const8 [0])
+ for {
+ if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLsh8x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Lsh8x64 (Const8 [c]) (Const64 [d]))
+ // result: (Const8 [c << uint64(d)])
+ for {
+ if v_0.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(c << uint64(d))
+ return true
+ }
+ // match: (Lsh8x64 x (Const64 [0]))
+ // result: x
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (Lsh8x64 (Const8 [0]) _)
+ // result: (Const8 [0])
+ for {
+ if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(0)
+ return true
+ }
+ // match: (Lsh8x64 _ (Const64 [c]))
+ // cond: uint64(c) >= 8
+ // result: (Const8 [0])
+ for {
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 8) {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(0)
+ return true
+ }
+ // match: (Lsh8x64 <t> (Lsh8x64 x (Const64 [c])) (Const64 [d]))
+ // cond: !uaddOvf(c,d)
+ // result: (Lsh8x64 x (Const64 <t> [c+d]))
+ for {
+ t := v.Type
+ if v_0.Op != OpLsh8x64 {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0_1.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(!uaddOvf(c, d)) {
+ break
+ }
+ v.reset(OpLsh8x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(c + d)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Lsh8x64 (Rsh8Ux64 (Lsh8x64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3]))
+ // cond: uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)
+ // result: (Lsh8x64 x (Const64 <typ.UInt64> [c1-c2+c3]))
+ for {
+ if v_0.Op != OpRsh8Ux64 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpLsh8x64 {
+ break
+ }
+ _ = v_0_0.Args[1]
+ x := v_0_0.Args[0]
+ v_0_0_1 := v_0_0.Args[1]
+ if v_0_0_1.Op != OpConst64 {
+ break
+ }
+ c1 := auxIntToInt64(v_0_0_1.AuxInt)
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 {
+ break
+ }
+ c2 := auxIntToInt64(v_0_1.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c3 := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)) {
+ break
+ }
+ v.reset(OpLsh8x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(c1 - c2 + c3)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpLsh8x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Lsh8x8 <t> x (Const8 [c]))
+ // result: (Lsh8x64 x (Const64 <t> [int64(uint8(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_1.AuxInt)
+ v.reset(OpLsh8x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint8(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Lsh8x8 (Const8 [0]) _)
+ // result: (Const8 [0])
+ for {
+ if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpMod16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Mod16 (Const16 [c]) (Const16 [d]))
+ // cond: d != 0
+ // result: (Const16 [c % d])
+ for {
+ if v_0.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_0.AuxInt)
+ if v_1.Op != OpConst16 {
+ break
+ }
+ d := auxIntToInt16(v_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(c % d)
+ return true
+ }
+ // match: (Mod16 <t> n (Const16 [c]))
+ // cond: isNonNegative(n) && isPowerOfTwo16(c)
+ // result: (And16 n (Const16 <t> [c-1]))
+ for {
+ t := v.Type
+ n := v_0
+ if v_1.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_1.AuxInt)
+ if !(isNonNegative(n) && isPowerOfTwo16(c)) {
+ break
+ }
+ v.reset(OpAnd16)
+ v0 := b.NewValue0(v.Pos, OpConst16, t)
+ v0.AuxInt = int16ToAuxInt(c - 1)
+ v.AddArg2(n, v0)
+ return true
+ }
+ // match: (Mod16 <t> n (Const16 [c]))
+ // cond: c < 0 && c != -1<<15
+ // result: (Mod16 <t> n (Const16 <t> [-c]))
+ for {
+ t := v.Type
+ n := v_0
+ if v_1.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_1.AuxInt)
+ if !(c < 0 && c != -1<<15) {
+ break
+ }
+ v.reset(OpMod16)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpConst16, t)
+ v0.AuxInt = int16ToAuxInt(-c)
+ v.AddArg2(n, v0)
+ return true
+ }
+ // match: (Mod16 <t> x (Const16 [c]))
+ // cond: x.Op != OpConst16 && (c > 0 || c == -1<<15)
+ // result: (Sub16 x (Mul16 <t> (Div16 <t> x (Const16 <t> [c])) (Const16 <t> [c])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_1.AuxInt)
+ if !(x.Op != OpConst16 && (c > 0 || c == -1<<15)) {
+ break
+ }
+ v.reset(OpSub16)
+ v0 := b.NewValue0(v.Pos, OpMul16, t)
+ v1 := b.NewValue0(v.Pos, OpDiv16, t)
+ v2 := b.NewValue0(v.Pos, OpConst16, t)
+ v2.AuxInt = int16ToAuxInt(c)
+ v1.AddArg2(x, v2)
+ v0.AddArg2(v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpMod16u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Mod16u (Const16 [c]) (Const16 [d]))
+ // cond: d != 0
+ // result: (Const16 [int16(uint16(c) % uint16(d))])
+ for {
+ if v_0.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_0.AuxInt)
+ if v_1.Op != OpConst16 {
+ break
+ }
+ d := auxIntToInt16(v_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(int16(uint16(c) % uint16(d)))
+ return true
+ }
+ // match: (Mod16u <t> n (Const16 [c]))
+ // cond: isPowerOfTwo16(c)
+ // result: (And16 n (Const16 <t> [c-1]))
+ for {
+ t := v.Type
+ n := v_0
+ if v_1.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_1.AuxInt)
+ if !(isPowerOfTwo16(c)) {
+ break
+ }
+ v.reset(OpAnd16)
+ v0 := b.NewValue0(v.Pos, OpConst16, t)
+ v0.AuxInt = int16ToAuxInt(c - 1)
+ v.AddArg2(n, v0)
+ return true
+ }
+ // match: (Mod16u <t> x (Const16 [c]))
+ // cond: x.Op != OpConst16 && c > 0 && umagicOK16(c)
+ // result: (Sub16 x (Mul16 <t> (Div16u <t> x (Const16 <t> [c])) (Const16 <t> [c])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_1.AuxInt)
+ if !(x.Op != OpConst16 && c > 0 && umagicOK16(c)) {
+ break
+ }
+ v.reset(OpSub16)
+ v0 := b.NewValue0(v.Pos, OpMul16, t)
+ v1 := b.NewValue0(v.Pos, OpDiv16u, t)
+ v2 := b.NewValue0(v.Pos, OpConst16, t)
+ v2.AuxInt = int16ToAuxInt(c)
+ v1.AddArg2(x, v2)
+ v0.AddArg2(v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpMod32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Mod32 (Const32 [c]) (Const32 [d]))
+ // cond: d != 0
+ // result: (Const32 [c % d])
+ for {
+ if v_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpConst32 {
+ break
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(c % d)
+ return true
+ }
+ // match: (Mod32 <t> n (Const32 [c]))
+ // cond: isNonNegative(n) && isPowerOfTwo32(c)
+ // result: (And32 n (Const32 <t> [c-1]))
+ for {
+ t := v.Type
+ n := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(isNonNegative(n) && isPowerOfTwo32(c)) {
+ break
+ }
+ v.reset(OpAnd32)
+ v0 := b.NewValue0(v.Pos, OpConst32, t)
+ v0.AuxInt = int32ToAuxInt(c - 1)
+ v.AddArg2(n, v0)
+ return true
+ }
+ // match: (Mod32 <t> n (Const32 [c]))
+ // cond: c < 0 && c != -1<<31
+ // result: (Mod32 <t> n (Const32 <t> [-c]))
+ for {
+ t := v.Type
+ n := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(c < 0 && c != -1<<31) {
+ break
+ }
+ v.reset(OpMod32)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpConst32, t)
+ v0.AuxInt = int32ToAuxInt(-c)
+ v.AddArg2(n, v0)
+ return true
+ }
+ // match: (Mod32 <t> x (Const32 [c]))
+ // cond: x.Op != OpConst32 && (c > 0 || c == -1<<31)
+ // result: (Sub32 x (Mul32 <t> (Div32 <t> x (Const32 <t> [c])) (Const32 <t> [c])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(x.Op != OpConst32 && (c > 0 || c == -1<<31)) {
+ break
+ }
+ v.reset(OpSub32)
+ v0 := b.NewValue0(v.Pos, OpMul32, t)
+ v1 := b.NewValue0(v.Pos, OpDiv32, t)
+ v2 := b.NewValue0(v.Pos, OpConst32, t)
+ v2.AuxInt = int32ToAuxInt(c)
+ v1.AddArg2(x, v2)
+ v0.AddArg2(v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpMod32u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Mod32u (Const32 [c]) (Const32 [d]))
+ // cond: d != 0
+ // result: (Const32 [int32(uint32(c) % uint32(d))])
+ for {
+ if v_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpConst32 {
+ break
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(int32(uint32(c) % uint32(d)))
+ return true
+ }
+ // match: (Mod32u <t> n (Const32 [c]))
+ // cond: isPowerOfTwo32(c)
+ // result: (And32 n (Const32 <t> [c-1]))
+ for {
+ t := v.Type
+ n := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(isPowerOfTwo32(c)) {
+ break
+ }
+ v.reset(OpAnd32)
+ v0 := b.NewValue0(v.Pos, OpConst32, t)
+ v0.AuxInt = int32ToAuxInt(c - 1)
+ v.AddArg2(n, v0)
+ return true
+ }
+ // match: (Mod32u <t> x (Const32 [c]))
+ // cond: x.Op != OpConst32 && c > 0 && umagicOK32(c)
+ // result: (Sub32 x (Mul32 <t> (Div32u <t> x (Const32 <t> [c])) (Const32 <t> [c])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(x.Op != OpConst32 && c > 0 && umagicOK32(c)) {
+ break
+ }
+ v.reset(OpSub32)
+ v0 := b.NewValue0(v.Pos, OpMul32, t)
+ v1 := b.NewValue0(v.Pos, OpDiv32u, t)
+ v2 := b.NewValue0(v.Pos, OpConst32, t)
+ v2.AuxInt = int32ToAuxInt(c)
+ v1.AddArg2(x, v2)
+ v0.AddArg2(v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpMod64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Mod64 (Const64 [c]) (Const64 [d]))
+ // cond: d != 0
+ // result: (Const64 [c % d])
+ for {
+ if v_0.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(c % d)
+ return true
+ }
+ // match: (Mod64 <t> n (Const64 [c]))
+ // cond: isNonNegative(n) && isPowerOfTwo64(c)
+ // result: (And64 n (Const64 <t> [c-1]))
+ for {
+ t := v.Type
+ n := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(isNonNegative(n) && isPowerOfTwo64(c)) {
+ break
+ }
+ v.reset(OpAnd64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(c - 1)
+ v.AddArg2(n, v0)
+ return true
+ }
+ // match: (Mod64 n (Const64 [-1<<63]))
+ // cond: isNonNegative(n)
+ // result: n
+ for {
+ n := v_0
+ if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != -1<<63 || !(isNonNegative(n)) {
+ break
+ }
+ v.copyOf(n)
+ return true
+ }
+ // match: (Mod64 <t> n (Const64 [c]))
+ // cond: c < 0 && c != -1<<63
+ // result: (Mod64 <t> n (Const64 <t> [-c]))
+ for {
+ t := v.Type
+ n := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(c < 0 && c != -1<<63) {
+ break
+ }
+ v.reset(OpMod64)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(-c)
+ v.AddArg2(n, v0)
+ return true
+ }
+ // match: (Mod64 <t> x (Const64 [c]))
+ // cond: x.Op != OpConst64 && (c > 0 || c == -1<<63)
+ // result: (Sub64 x (Mul64 <t> (Div64 <t> x (Const64 <t> [c])) (Const64 <t> [c])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(x.Op != OpConst64 && (c > 0 || c == -1<<63)) {
+ break
+ }
+ v.reset(OpSub64)
+ v0 := b.NewValue0(v.Pos, OpMul64, t)
+ v1 := b.NewValue0(v.Pos, OpDiv64, t)
+ v2 := b.NewValue0(v.Pos, OpConst64, t)
+ v2.AuxInt = int64ToAuxInt(c)
+ v1.AddArg2(x, v2)
+ v0.AddArg2(v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpMod64u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Mod64u (Const64 [c]) (Const64 [d]))
+ // cond: d != 0
+ // result: (Const64 [int64(uint64(c) % uint64(d))])
+ for {
+ if v_0.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) % uint64(d)))
+ return true
+ }
+ // match: (Mod64u <t> n (Const64 [c]))
+ // cond: isPowerOfTwo64(c)
+ // result: (And64 n (Const64 <t> [c-1]))
+ for {
+ t := v.Type
+ n := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(isPowerOfTwo64(c)) {
+ break
+ }
+ v.reset(OpAnd64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(c - 1)
+ v.AddArg2(n, v0)
+ return true
+ }
+ // match: (Mod64u <t> n (Const64 [-1<<63]))
+ // result: (And64 n (Const64 <t> [1<<63-1]))
+ for {
+ t := v.Type
+ n := v_0
+ if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != -1<<63 {
+ break
+ }
+ v.reset(OpAnd64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(1<<63 - 1)
+ v.AddArg2(n, v0)
+ return true
+ }
+ // match: (Mod64u <t> x (Const64 [c]))
+ // cond: x.Op != OpConst64 && c > 0 && umagicOK64(c)
+ // result: (Sub64 x (Mul64 <t> (Div64u <t> x (Const64 <t> [c])) (Const64 <t> [c])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(x.Op != OpConst64 && c > 0 && umagicOK64(c)) {
+ break
+ }
+ v.reset(OpSub64)
+ v0 := b.NewValue0(v.Pos, OpMul64, t)
+ v1 := b.NewValue0(v.Pos, OpDiv64u, t)
+ v2 := b.NewValue0(v.Pos, OpConst64, t)
+ v2.AuxInt = int64ToAuxInt(c)
+ v1.AddArg2(x, v2)
+ v0.AddArg2(v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpMod8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Mod8 (Const8 [c]) (Const8 [d]))
+ // cond: d != 0
+ // result: (Const8 [c % d])
+ for {
+ if v_0.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ if v_1.Op != OpConst8 {
+ break
+ }
+ d := auxIntToInt8(v_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(c % d)
+ return true
+ }
+ // match: (Mod8 <t> n (Const8 [c]))
+ // cond: isNonNegative(n) && isPowerOfTwo8(c)
+ // result: (And8 n (Const8 <t> [c-1]))
+ for {
+ t := v.Type
+ n := v_0
+ if v_1.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_1.AuxInt)
+ if !(isNonNegative(n) && isPowerOfTwo8(c)) {
+ break
+ }
+ v.reset(OpAnd8)
+ v0 := b.NewValue0(v.Pos, OpConst8, t)
+ v0.AuxInt = int8ToAuxInt(c - 1)
+ v.AddArg2(n, v0)
+ return true
+ }
+ // match: (Mod8 <t> n (Const8 [c]))
+ // cond: c < 0 && c != -1<<7
+ // result: (Mod8 <t> n (Const8 <t> [-c]))
+ for {
+ t := v.Type
+ n := v_0
+ if v_1.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_1.AuxInt)
+ if !(c < 0 && c != -1<<7) {
+ break
+ }
+ v.reset(OpMod8)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpConst8, t)
+ v0.AuxInt = int8ToAuxInt(-c)
+ v.AddArg2(n, v0)
+ return true
+ }
+ // match: (Mod8 <t> x (Const8 [c]))
+ // cond: x.Op != OpConst8 && (c > 0 || c == -1<<7)
+ // result: (Sub8 x (Mul8 <t> (Div8 <t> x (Const8 <t> [c])) (Const8 <t> [c])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_1.AuxInt)
+ if !(x.Op != OpConst8 && (c > 0 || c == -1<<7)) {
+ break
+ }
+ v.reset(OpSub8)
+ v0 := b.NewValue0(v.Pos, OpMul8, t)
+ v1 := b.NewValue0(v.Pos, OpDiv8, t)
+ v2 := b.NewValue0(v.Pos, OpConst8, t)
+ v2.AuxInt = int8ToAuxInt(c)
+ v1.AddArg2(x, v2)
+ v0.AddArg2(v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpMod8u(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Mod8u (Const8 [c]) (Const8 [d]))
+ // cond: d != 0
+ // result: (Const8 [int8(uint8(c) % uint8(d))])
+ for {
+ if v_0.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ if v_1.Op != OpConst8 {
+ break
+ }
+ d := auxIntToInt8(v_1.AuxInt)
+ if !(d != 0) {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(int8(uint8(c) % uint8(d)))
+ return true
+ }
+ // match: (Mod8u <t> n (Const8 [c]))
+ // cond: isPowerOfTwo8(c)
+ // result: (And8 n (Const8 <t> [c-1]))
+ for {
+ t := v.Type
+ n := v_0
+ if v_1.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_1.AuxInt)
+ if !(isPowerOfTwo8(c)) {
+ break
+ }
+ v.reset(OpAnd8)
+ v0 := b.NewValue0(v.Pos, OpConst8, t)
+ v0.AuxInt = int8ToAuxInt(c - 1)
+ v.AddArg2(n, v0)
+ return true
+ }
+ // match: (Mod8u <t> x (Const8 [c]))
+ // cond: x.Op != OpConst8 && c > 0 && umagicOK8( c)
+ // result: (Sub8 x (Mul8 <t> (Div8u <t> x (Const8 <t> [c])) (Const8 <t> [c])))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_1.AuxInt)
+ if !(x.Op != OpConst8 && c > 0 && umagicOK8(c)) {
+ break
+ }
+ v.reset(OpSub8)
+ v0 := b.NewValue0(v.Pos, OpMul8, t)
+ v1 := b.NewValue0(v.Pos, OpDiv8u, t)
+ v2 := b.NewValue0(v.Pos, OpConst8, t)
+ v2.AuxInt = int8ToAuxInt(c)
+ v1.AddArg2(x, v2)
+ v0.AddArg2(v1, v2)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpMove(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (Move {t} [n] dst1 src mem:(Zero {t} [n] dst2 _))
+ // cond: isSamePtr(src, dst2)
+ // result: (Zero {t} [n] dst1 mem)
+ for {
+ n := auxIntToInt64(v.AuxInt)
+ t := auxToType(v.Aux)
+ dst1 := v_0
+ src := v_1
+ mem := v_2
+ if mem.Op != OpZero || auxIntToInt64(mem.AuxInt) != n || auxToType(mem.Aux) != t {
+ break
+ }
+ dst2 := mem.Args[0]
+ if !(isSamePtr(src, dst2)) {
+ break
+ }
+ v.reset(OpZero)
+ v.AuxInt = int64ToAuxInt(n)
+ v.Aux = typeToAux(t)
+ v.AddArg2(dst1, mem)
+ return true
+ }
+ // match: (Move {t} [n] dst1 src mem:(VarDef (Zero {t} [n] dst0 _)))
+ // cond: isSamePtr(src, dst0)
+ // result: (Zero {t} [n] dst1 mem)
+ for {
+ n := auxIntToInt64(v.AuxInt)
+ t := auxToType(v.Aux)
+ dst1 := v_0
+ src := v_1
+ mem := v_2
+ if mem.Op != OpVarDef {
+ break
+ }
+ mem_0 := mem.Args[0]
+ if mem_0.Op != OpZero || auxIntToInt64(mem_0.AuxInt) != n || auxToType(mem_0.Aux) != t {
+ break
+ }
+ dst0 := mem_0.Args[0]
+ if !(isSamePtr(src, dst0)) {
+ break
+ }
+ v.reset(OpZero)
+ v.AuxInt = int64ToAuxInt(n)
+ v.Aux = typeToAux(t)
+ v.AddArg2(dst1, mem)
+ return true
+ }
+ // match: (Move {t} [n] dst (Addr {sym} (SB)) mem)
+ // cond: symIsROZero(sym)
+ // result: (Zero {t} [n] dst mem)
+ for {
+ n := auxIntToInt64(v.AuxInt)
+ t := auxToType(v.Aux)
+ dst := v_0
+ if v_1.Op != OpAddr {
+ break
+ }
+ sym := auxToSym(v_1.Aux)
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpSB {
+ break
+ }
+ mem := v_2
+ if !(symIsROZero(sym)) {
+ break
+ }
+ v.reset(OpZero)
+ v.AuxInt = int64ToAuxInt(n)
+ v.Aux = typeToAux(t)
+ v.AddArg2(dst, mem)
+ return true
+ }
+ // match: (Move {t1} [n] dst1 src1 store:(Store {t2} op:(OffPtr [o2] dst2) _ mem))
+ // cond: isSamePtr(dst1, dst2) && store.Uses == 1 && n >= o2 + t2.Size() && disjoint(src1, n, op, t2.Size()) && clobber(store)
+ // result: (Move {t1} [n] dst1 src1 mem)
+ for {
+ n := auxIntToInt64(v.AuxInt)
+ t1 := auxToType(v.Aux)
+ dst1 := v_0
+ src1 := v_1
+ store := v_2
+ if store.Op != OpStore {
+ break
+ }
+ t2 := auxToType(store.Aux)
+ mem := store.Args[2]
+ op := store.Args[0]
+ if op.Op != OpOffPtr {
+ break
+ }
+ o2 := auxIntToInt64(op.AuxInt)
+ dst2 := op.Args[0]
+ if !(isSamePtr(dst1, dst2) && store.Uses == 1 && n >= o2+t2.Size() && disjoint(src1, n, op, t2.Size()) && clobber(store)) {
+ break
+ }
+ v.reset(OpMove)
+ v.AuxInt = int64ToAuxInt(n)
+ v.Aux = typeToAux(t1)
+ v.AddArg3(dst1, src1, mem)
+ return true
+ }
+ // match: (Move {t} [n] dst1 src1 move:(Move {t} [n] dst2 _ mem))
+ // cond: move.Uses == 1 && isSamePtr(dst1, dst2) && disjoint(src1, n, dst2, n) && clobber(move)
+ // result: (Move {t} [n] dst1 src1 mem)
+ for {
+ n := auxIntToInt64(v.AuxInt)
+ t := auxToType(v.Aux)
+ dst1 := v_0
+ src1 := v_1
+ move := v_2
+ if move.Op != OpMove || auxIntToInt64(move.AuxInt) != n || auxToType(move.Aux) != t {
+ break
+ }
+ mem := move.Args[2]
+ dst2 := move.Args[0]
+ if !(move.Uses == 1 && isSamePtr(dst1, dst2) && disjoint(src1, n, dst2, n) && clobber(move)) {
+ break
+ }
+ v.reset(OpMove)
+ v.AuxInt = int64ToAuxInt(n)
+ v.Aux = typeToAux(t)
+ v.AddArg3(dst1, src1, mem)
+ return true
+ }
+ // match: (Move {t} [n] dst1 src1 vardef:(VarDef {x} move:(Move {t} [n] dst2 _ mem)))
+ // cond: move.Uses == 1 && vardef.Uses == 1 && isSamePtr(dst1, dst2) && disjoint(src1, n, dst2, n) && clobber(move, vardef)
+ // result: (Move {t} [n] dst1 src1 (VarDef {x} mem))
+ for {
+ n := auxIntToInt64(v.AuxInt)
+ t := auxToType(v.Aux)
+ dst1 := v_0
+ src1 := v_1
+ vardef := v_2
+ if vardef.Op != OpVarDef {
+ break
+ }
+ x := auxToSym(vardef.Aux)
+ move := vardef.Args[0]
+ if move.Op != OpMove || auxIntToInt64(move.AuxInt) != n || auxToType(move.Aux) != t {
+ break
+ }
+ mem := move.Args[2]
+ dst2 := move.Args[0]
+ if !(move.Uses == 1 && vardef.Uses == 1 && isSamePtr(dst1, dst2) && disjoint(src1, n, dst2, n) && clobber(move, vardef)) {
+ break
+ }
+ v.reset(OpMove)
+ v.AuxInt = int64ToAuxInt(n)
+ v.Aux = typeToAux(t)
+ v0 := b.NewValue0(v.Pos, OpVarDef, types.TypeMem)
+ v0.Aux = symToAux(x)
+ v0.AddArg(mem)
+ v.AddArg3(dst1, src1, v0)
+ return true
+ }
+ // match: (Move {t} [n] dst1 src1 zero:(Zero {t} [n] dst2 mem))
+ // cond: zero.Uses == 1 && isSamePtr(dst1, dst2) && disjoint(src1, n, dst2, n) && clobber(zero)
+ // result: (Move {t} [n] dst1 src1 mem)
+ for {
+ n := auxIntToInt64(v.AuxInt)
+ t := auxToType(v.Aux)
+ dst1 := v_0
+ src1 := v_1
+ zero := v_2
+ if zero.Op != OpZero || auxIntToInt64(zero.AuxInt) != n || auxToType(zero.Aux) != t {
+ break
+ }
+ mem := zero.Args[1]
+ dst2 := zero.Args[0]
+ if !(zero.Uses == 1 && isSamePtr(dst1, dst2) && disjoint(src1, n, dst2, n) && clobber(zero)) {
+ break
+ }
+ v.reset(OpMove)
+ v.AuxInt = int64ToAuxInt(n)
+ v.Aux = typeToAux(t)
+ v.AddArg3(dst1, src1, mem)
+ return true
+ }
+ // match: (Move {t} [n] dst1 src1 vardef:(VarDef {x} zero:(Zero {t} [n] dst2 mem)))
+ // cond: zero.Uses == 1 && vardef.Uses == 1 && isSamePtr(dst1, dst2) && disjoint(src1, n, dst2, n) && clobber(zero, vardef)
+ // result: (Move {t} [n] dst1 src1 (VarDef {x} mem))
+ for {
+ n := auxIntToInt64(v.AuxInt)
+ t := auxToType(v.Aux)
+ dst1 := v_0
+ src1 := v_1
+ vardef := v_2
+ if vardef.Op != OpVarDef {
+ break
+ }
+ x := auxToSym(vardef.Aux)
+ zero := vardef.Args[0]
+ if zero.Op != OpZero || auxIntToInt64(zero.AuxInt) != n || auxToType(zero.Aux) != t {
+ break
+ }
+ mem := zero.Args[1]
+ dst2 := zero.Args[0]
+ if !(zero.Uses == 1 && vardef.Uses == 1 && isSamePtr(dst1, dst2) && disjoint(src1, n, dst2, n) && clobber(zero, vardef)) {
+ break
+ }
+ v.reset(OpMove)
+ v.AuxInt = int64ToAuxInt(n)
+ v.Aux = typeToAux(t)
+ v0 := b.NewValue0(v.Pos, OpVarDef, types.TypeMem)
+ v0.Aux = symToAux(x)
+ v0.AddArg(mem)
+ v.AddArg3(dst1, src1, v0)
+ return true
+ }
+ // match: (Move {t1} [n] dst p1 mem:(Store {t2} op2:(OffPtr <tt2> [o2] p2) d1 (Store {t3} op3:(OffPtr <tt3> [0] p3) d2 _)))
+ // cond: isSamePtr(p1, p2) && isSamePtr(p2, p3) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && o2 == t3.Size() && n == t2.Size() + t3.Size()
+ // result: (Store {t2} (OffPtr <tt2> [o2] dst) d1 (Store {t3} (OffPtr <tt3> [0] dst) d2 mem))
+ for {
+ n := auxIntToInt64(v.AuxInt)
+ t1 := auxToType(v.Aux)
+ dst := v_0
+ p1 := v_1
+ mem := v_2
+ if mem.Op != OpStore {
+ break
+ }
+ t2 := auxToType(mem.Aux)
+ _ = mem.Args[2]
+ op2 := mem.Args[0]
+ if op2.Op != OpOffPtr {
+ break
+ }
+ tt2 := op2.Type
+ o2 := auxIntToInt64(op2.AuxInt)
+ p2 := op2.Args[0]
+ d1 := mem.Args[1]
+ mem_2 := mem.Args[2]
+ if mem_2.Op != OpStore {
+ break
+ }
+ t3 := auxToType(mem_2.Aux)
+ d2 := mem_2.Args[1]
+ op3 := mem_2.Args[0]
+ if op3.Op != OpOffPtr {
+ break
+ }
+ tt3 := op3.Type
+ if auxIntToInt64(op3.AuxInt) != 0 {
+ break
+ }
+ p3 := op3.Args[0]
+ if !(isSamePtr(p1, p2) && isSamePtr(p2, p3) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && o2 == t3.Size() && n == t2.Size()+t3.Size()) {
+ break
+ }
+ v.reset(OpStore)
+ v.Aux = typeToAux(t2)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, tt2)
+ v0.AuxInt = int64ToAuxInt(o2)
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v1.Aux = typeToAux(t3)
+ v2 := b.NewValue0(v.Pos, OpOffPtr, tt3)
+ v2.AuxInt = int64ToAuxInt(0)
+ v2.AddArg(dst)
+ v1.AddArg3(v2, d2, mem)
+ v.AddArg3(v0, d1, v1)
+ return true
+ }
+ // match: (Move {t1} [n] dst p1 mem:(Store {t2} op2:(OffPtr <tt2> [o2] p2) d1 (Store {t3} op3:(OffPtr <tt3> [o3] p3) d2 (Store {t4} op4:(OffPtr <tt4> [0] p4) d3 _))))
+ // cond: isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && t4.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && o3 == t4.Size() && o2-o3 == t3.Size() && n == t2.Size() + t3.Size() + t4.Size()
+ // result: (Store {t2} (OffPtr <tt2> [o2] dst) d1 (Store {t3} (OffPtr <tt3> [o3] dst) d2 (Store {t4} (OffPtr <tt4> [0] dst) d3 mem)))
+ for {
+ n := auxIntToInt64(v.AuxInt)
+ t1 := auxToType(v.Aux)
+ dst := v_0
+ p1 := v_1
+ mem := v_2
+ if mem.Op != OpStore {
+ break
+ }
+ t2 := auxToType(mem.Aux)
+ _ = mem.Args[2]
+ op2 := mem.Args[0]
+ if op2.Op != OpOffPtr {
+ break
+ }
+ tt2 := op2.Type
+ o2 := auxIntToInt64(op2.AuxInt)
+ p2 := op2.Args[0]
+ d1 := mem.Args[1]
+ mem_2 := mem.Args[2]
+ if mem_2.Op != OpStore {
+ break
+ }
+ t3 := auxToType(mem_2.Aux)
+ _ = mem_2.Args[2]
+ op3 := mem_2.Args[0]
+ if op3.Op != OpOffPtr {
+ break
+ }
+ tt3 := op3.Type
+ o3 := auxIntToInt64(op3.AuxInt)
+ p3 := op3.Args[0]
+ d2 := mem_2.Args[1]
+ mem_2_2 := mem_2.Args[2]
+ if mem_2_2.Op != OpStore {
+ break
+ }
+ t4 := auxToType(mem_2_2.Aux)
+ d3 := mem_2_2.Args[1]
+ op4 := mem_2_2.Args[0]
+ if op4.Op != OpOffPtr {
+ break
+ }
+ tt4 := op4.Type
+ if auxIntToInt64(op4.AuxInt) != 0 {
+ break
+ }
+ p4 := op4.Args[0]
+ if !(isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && t4.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && o3 == t4.Size() && o2-o3 == t3.Size() && n == t2.Size()+t3.Size()+t4.Size()) {
+ break
+ }
+ v.reset(OpStore)
+ v.Aux = typeToAux(t2)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, tt2)
+ v0.AuxInt = int64ToAuxInt(o2)
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v1.Aux = typeToAux(t3)
+ v2 := b.NewValue0(v.Pos, OpOffPtr, tt3)
+ v2.AuxInt = int64ToAuxInt(o3)
+ v2.AddArg(dst)
+ v3 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v3.Aux = typeToAux(t4)
+ v4 := b.NewValue0(v.Pos, OpOffPtr, tt4)
+ v4.AuxInt = int64ToAuxInt(0)
+ v4.AddArg(dst)
+ v3.AddArg3(v4, d3, mem)
+ v1.AddArg3(v2, d2, v3)
+ v.AddArg3(v0, d1, v1)
+ return true
+ }
+ // match: (Move {t1} [n] dst p1 mem:(Store {t2} op2:(OffPtr <tt2> [o2] p2) d1 (Store {t3} op3:(OffPtr <tt3> [o3] p3) d2 (Store {t4} op4:(OffPtr <tt4> [o4] p4) d3 (Store {t5} op5:(OffPtr <tt5> [0] p5) d4 _)))))
+ // cond: isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && t4.Alignment() <= t1.Alignment() && t5.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && registerizable(b, t5) && o4 == t5.Size() && o3-o4 == t4.Size() && o2-o3 == t3.Size() && n == t2.Size() + t3.Size() + t4.Size() + t5.Size()
+ // result: (Store {t2} (OffPtr <tt2> [o2] dst) d1 (Store {t3} (OffPtr <tt3> [o3] dst) d2 (Store {t4} (OffPtr <tt4> [o4] dst) d3 (Store {t5} (OffPtr <tt5> [0] dst) d4 mem))))
+ for {
+ n := auxIntToInt64(v.AuxInt)
+ t1 := auxToType(v.Aux)
+ dst := v_0
+ p1 := v_1
+ mem := v_2
+ if mem.Op != OpStore {
+ break
+ }
+ t2 := auxToType(mem.Aux)
+ _ = mem.Args[2]
+ op2 := mem.Args[0]
+ if op2.Op != OpOffPtr {
+ break
+ }
+ tt2 := op2.Type
+ o2 := auxIntToInt64(op2.AuxInt)
+ p2 := op2.Args[0]
+ d1 := mem.Args[1]
+ mem_2 := mem.Args[2]
+ if mem_2.Op != OpStore {
+ break
+ }
+ t3 := auxToType(mem_2.Aux)
+ _ = mem_2.Args[2]
+ op3 := mem_2.Args[0]
+ if op3.Op != OpOffPtr {
+ break
+ }
+ tt3 := op3.Type
+ o3 := auxIntToInt64(op3.AuxInt)
+ p3 := op3.Args[0]
+ d2 := mem_2.Args[1]
+ mem_2_2 := mem_2.Args[2]
+ if mem_2_2.Op != OpStore {
+ break
+ }
+ t4 := auxToType(mem_2_2.Aux)
+ _ = mem_2_2.Args[2]
+ op4 := mem_2_2.Args[0]
+ if op4.Op != OpOffPtr {
+ break
+ }
+ tt4 := op4.Type
+ o4 := auxIntToInt64(op4.AuxInt)
+ p4 := op4.Args[0]
+ d3 := mem_2_2.Args[1]
+ mem_2_2_2 := mem_2_2.Args[2]
+ if mem_2_2_2.Op != OpStore {
+ break
+ }
+ t5 := auxToType(mem_2_2_2.Aux)
+ d4 := mem_2_2_2.Args[1]
+ op5 := mem_2_2_2.Args[0]
+ if op5.Op != OpOffPtr {
+ break
+ }
+ tt5 := op5.Type
+ if auxIntToInt64(op5.AuxInt) != 0 {
+ break
+ }
+ p5 := op5.Args[0]
+ if !(isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && t4.Alignment() <= t1.Alignment() && t5.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && registerizable(b, t5) && o4 == t5.Size() && o3-o4 == t4.Size() && o2-o3 == t3.Size() && n == t2.Size()+t3.Size()+t4.Size()+t5.Size()) {
+ break
+ }
+ v.reset(OpStore)
+ v.Aux = typeToAux(t2)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, tt2)
+ v0.AuxInt = int64ToAuxInt(o2)
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v1.Aux = typeToAux(t3)
+ v2 := b.NewValue0(v.Pos, OpOffPtr, tt3)
+ v2.AuxInt = int64ToAuxInt(o3)
+ v2.AddArg(dst)
+ v3 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v3.Aux = typeToAux(t4)
+ v4 := b.NewValue0(v.Pos, OpOffPtr, tt4)
+ v4.AuxInt = int64ToAuxInt(o4)
+ v4.AddArg(dst)
+ v5 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v5.Aux = typeToAux(t5)
+ v6 := b.NewValue0(v.Pos, OpOffPtr, tt5)
+ v6.AuxInt = int64ToAuxInt(0)
+ v6.AddArg(dst)
+ v5.AddArg3(v6, d4, mem)
+ v3.AddArg3(v4, d3, v5)
+ v1.AddArg3(v2, d2, v3)
+ v.AddArg3(v0, d1, v1)
+ return true
+ }
+ // match: (Move {t1} [n] dst p1 mem:(VarDef (Store {t2} op2:(OffPtr <tt2> [o2] p2) d1 (Store {t3} op3:(OffPtr <tt3> [0] p3) d2 _))))
+ // cond: isSamePtr(p1, p2) && isSamePtr(p2, p3) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && o2 == t3.Size() && n == t2.Size() + t3.Size()
+ // result: (Store {t2} (OffPtr <tt2> [o2] dst) d1 (Store {t3} (OffPtr <tt3> [0] dst) d2 mem))
+ for {
+ n := auxIntToInt64(v.AuxInt)
+ t1 := auxToType(v.Aux)
+ dst := v_0
+ p1 := v_1
+ mem := v_2
+ if mem.Op != OpVarDef {
+ break
+ }
+ mem_0 := mem.Args[0]
+ if mem_0.Op != OpStore {
+ break
+ }
+ t2 := auxToType(mem_0.Aux)
+ _ = mem_0.Args[2]
+ op2 := mem_0.Args[0]
+ if op2.Op != OpOffPtr {
+ break
+ }
+ tt2 := op2.Type
+ o2 := auxIntToInt64(op2.AuxInt)
+ p2 := op2.Args[0]
+ d1 := mem_0.Args[1]
+ mem_0_2 := mem_0.Args[2]
+ if mem_0_2.Op != OpStore {
+ break
+ }
+ t3 := auxToType(mem_0_2.Aux)
+ d2 := mem_0_2.Args[1]
+ op3 := mem_0_2.Args[0]
+ if op3.Op != OpOffPtr {
+ break
+ }
+ tt3 := op3.Type
+ if auxIntToInt64(op3.AuxInt) != 0 {
+ break
+ }
+ p3 := op3.Args[0]
+ if !(isSamePtr(p1, p2) && isSamePtr(p2, p3) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && o2 == t3.Size() && n == t2.Size()+t3.Size()) {
+ break
+ }
+ v.reset(OpStore)
+ v.Aux = typeToAux(t2)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, tt2)
+ v0.AuxInt = int64ToAuxInt(o2)
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v1.Aux = typeToAux(t3)
+ v2 := b.NewValue0(v.Pos, OpOffPtr, tt3)
+ v2.AuxInt = int64ToAuxInt(0)
+ v2.AddArg(dst)
+ v1.AddArg3(v2, d2, mem)
+ v.AddArg3(v0, d1, v1)
+ return true
+ }
+ // match: (Move {t1} [n] dst p1 mem:(VarDef (Store {t2} op2:(OffPtr <tt2> [o2] p2) d1 (Store {t3} op3:(OffPtr <tt3> [o3] p3) d2 (Store {t4} op4:(OffPtr <tt4> [0] p4) d3 _)))))
+ // cond: isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && t4.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && o3 == t4.Size() && o2-o3 == t3.Size() && n == t2.Size() + t3.Size() + t4.Size()
+ // result: (Store {t2} (OffPtr <tt2> [o2] dst) d1 (Store {t3} (OffPtr <tt3> [o3] dst) d2 (Store {t4} (OffPtr <tt4> [0] dst) d3 mem)))
+ for {
+ n := auxIntToInt64(v.AuxInt)
+ t1 := auxToType(v.Aux)
+ dst := v_0
+ p1 := v_1
+ mem := v_2
+ if mem.Op != OpVarDef {
+ break
+ }
+ mem_0 := mem.Args[0]
+ if mem_0.Op != OpStore {
+ break
+ }
+ t2 := auxToType(mem_0.Aux)
+ _ = mem_0.Args[2]
+ op2 := mem_0.Args[0]
+ if op2.Op != OpOffPtr {
+ break
+ }
+ tt2 := op2.Type
+ o2 := auxIntToInt64(op2.AuxInt)
+ p2 := op2.Args[0]
+ d1 := mem_0.Args[1]
+ mem_0_2 := mem_0.Args[2]
+ if mem_0_2.Op != OpStore {
+ break
+ }
+ t3 := auxToType(mem_0_2.Aux)
+ _ = mem_0_2.Args[2]
+ op3 := mem_0_2.Args[0]
+ if op3.Op != OpOffPtr {
+ break
+ }
+ tt3 := op3.Type
+ o3 := auxIntToInt64(op3.AuxInt)
+ p3 := op3.Args[0]
+ d2 := mem_0_2.Args[1]
+ mem_0_2_2 := mem_0_2.Args[2]
+ if mem_0_2_2.Op != OpStore {
+ break
+ }
+ t4 := auxToType(mem_0_2_2.Aux)
+ d3 := mem_0_2_2.Args[1]
+ op4 := mem_0_2_2.Args[0]
+ if op4.Op != OpOffPtr {
+ break
+ }
+ tt4 := op4.Type
+ if auxIntToInt64(op4.AuxInt) != 0 {
+ break
+ }
+ p4 := op4.Args[0]
+ if !(isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && t4.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && o3 == t4.Size() && o2-o3 == t3.Size() && n == t2.Size()+t3.Size()+t4.Size()) {
+ break
+ }
+ v.reset(OpStore)
+ v.Aux = typeToAux(t2)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, tt2)
+ v0.AuxInt = int64ToAuxInt(o2)
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v1.Aux = typeToAux(t3)
+ v2 := b.NewValue0(v.Pos, OpOffPtr, tt3)
+ v2.AuxInt = int64ToAuxInt(o3)
+ v2.AddArg(dst)
+ v3 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v3.Aux = typeToAux(t4)
+ v4 := b.NewValue0(v.Pos, OpOffPtr, tt4)
+ v4.AuxInt = int64ToAuxInt(0)
+ v4.AddArg(dst)
+ v3.AddArg3(v4, d3, mem)
+ v1.AddArg3(v2, d2, v3)
+ v.AddArg3(v0, d1, v1)
+ return true
+ }
+ // match: (Move {t1} [n] dst p1 mem:(VarDef (Store {t2} op2:(OffPtr <tt2> [o2] p2) d1 (Store {t3} op3:(OffPtr <tt3> [o3] p3) d2 (Store {t4} op4:(OffPtr <tt4> [o4] p4) d3 (Store {t5} op5:(OffPtr <tt5> [0] p5) d4 _))))))
+ // cond: isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && t4.Alignment() <= t1.Alignment() && t5.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && registerizable(b, t5) && o4 == t5.Size() && o3-o4 == t4.Size() && o2-o3 == t3.Size() && n == t2.Size() + t3.Size() + t4.Size() + t5.Size()
+ // result: (Store {t2} (OffPtr <tt2> [o2] dst) d1 (Store {t3} (OffPtr <tt3> [o3] dst) d2 (Store {t4} (OffPtr <tt4> [o4] dst) d3 (Store {t5} (OffPtr <tt5> [0] dst) d4 mem))))
+ for {
+ n := auxIntToInt64(v.AuxInt)
+ t1 := auxToType(v.Aux)
+ dst := v_0
+ p1 := v_1
+ mem := v_2
+ if mem.Op != OpVarDef {
+ break
+ }
+ mem_0 := mem.Args[0]
+ if mem_0.Op != OpStore {
+ break
+ }
+ t2 := auxToType(mem_0.Aux)
+ _ = mem_0.Args[2]
+ op2 := mem_0.Args[0]
+ if op2.Op != OpOffPtr {
+ break
+ }
+ tt2 := op2.Type
+ o2 := auxIntToInt64(op2.AuxInt)
+ p2 := op2.Args[0]
+ d1 := mem_0.Args[1]
+ mem_0_2 := mem_0.Args[2]
+ if mem_0_2.Op != OpStore {
+ break
+ }
+ t3 := auxToType(mem_0_2.Aux)
+ _ = mem_0_2.Args[2]
+ op3 := mem_0_2.Args[0]
+ if op3.Op != OpOffPtr {
+ break
+ }
+ tt3 := op3.Type
+ o3 := auxIntToInt64(op3.AuxInt)
+ p3 := op3.Args[0]
+ d2 := mem_0_2.Args[1]
+ mem_0_2_2 := mem_0_2.Args[2]
+ if mem_0_2_2.Op != OpStore {
+ break
+ }
+ t4 := auxToType(mem_0_2_2.Aux)
+ _ = mem_0_2_2.Args[2]
+ op4 := mem_0_2_2.Args[0]
+ if op4.Op != OpOffPtr {
+ break
+ }
+ tt4 := op4.Type
+ o4 := auxIntToInt64(op4.AuxInt)
+ p4 := op4.Args[0]
+ d3 := mem_0_2_2.Args[1]
+ mem_0_2_2_2 := mem_0_2_2.Args[2]
+ if mem_0_2_2_2.Op != OpStore {
+ break
+ }
+ t5 := auxToType(mem_0_2_2_2.Aux)
+ d4 := mem_0_2_2_2.Args[1]
+ op5 := mem_0_2_2_2.Args[0]
+ if op5.Op != OpOffPtr {
+ break
+ }
+ tt5 := op5.Type
+ if auxIntToInt64(op5.AuxInt) != 0 {
+ break
+ }
+ p5 := op5.Args[0]
+ if !(isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && t4.Alignment() <= t1.Alignment() && t5.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && registerizable(b, t5) && o4 == t5.Size() && o3-o4 == t4.Size() && o2-o3 == t3.Size() && n == t2.Size()+t3.Size()+t4.Size()+t5.Size()) {
+ break
+ }
+ v.reset(OpStore)
+ v.Aux = typeToAux(t2)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, tt2)
+ v0.AuxInt = int64ToAuxInt(o2)
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v1.Aux = typeToAux(t3)
+ v2 := b.NewValue0(v.Pos, OpOffPtr, tt3)
+ v2.AuxInt = int64ToAuxInt(o3)
+ v2.AddArg(dst)
+ v3 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v3.Aux = typeToAux(t4)
+ v4 := b.NewValue0(v.Pos, OpOffPtr, tt4)
+ v4.AuxInt = int64ToAuxInt(o4)
+ v4.AddArg(dst)
+ v5 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v5.Aux = typeToAux(t5)
+ v6 := b.NewValue0(v.Pos, OpOffPtr, tt5)
+ v6.AuxInt = int64ToAuxInt(0)
+ v6.AddArg(dst)
+ v5.AddArg3(v6, d4, mem)
+ v3.AddArg3(v4, d3, v5)
+ v1.AddArg3(v2, d2, v3)
+ v.AddArg3(v0, d1, v1)
+ return true
+ }
+ // match: (Move {t1} [n] dst p1 mem:(Store {t2} op2:(OffPtr <tt2> [o2] p2) d1 (Zero {t3} [n] p3 _)))
+ // cond: isSamePtr(p1, p2) && isSamePtr(p2, p3) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && registerizable(b, t2) && n >= o2 + t2.Size()
+ // result: (Store {t2} (OffPtr <tt2> [o2] dst) d1 (Zero {t1} [n] dst mem))
+ for {
+ n := auxIntToInt64(v.AuxInt)
+ t1 := auxToType(v.Aux)
+ dst := v_0
+ p1 := v_1
+ mem := v_2
+ if mem.Op != OpStore {
+ break
+ }
+ t2 := auxToType(mem.Aux)
+ _ = mem.Args[2]
+ op2 := mem.Args[0]
+ if op2.Op != OpOffPtr {
+ break
+ }
+ tt2 := op2.Type
+ o2 := auxIntToInt64(op2.AuxInt)
+ p2 := op2.Args[0]
+ d1 := mem.Args[1]
+ mem_2 := mem.Args[2]
+ if mem_2.Op != OpZero || auxIntToInt64(mem_2.AuxInt) != n {
+ break
+ }
+ t3 := auxToType(mem_2.Aux)
+ p3 := mem_2.Args[0]
+ if !(isSamePtr(p1, p2) && isSamePtr(p2, p3) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && registerizable(b, t2) && n >= o2+t2.Size()) {
+ break
+ }
+ v.reset(OpStore)
+ v.Aux = typeToAux(t2)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, tt2)
+ v0.AuxInt = int64ToAuxInt(o2)
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpZero, types.TypeMem)
+ v1.AuxInt = int64ToAuxInt(n)
+ v1.Aux = typeToAux(t1)
+ v1.AddArg2(dst, mem)
+ v.AddArg3(v0, d1, v1)
+ return true
+ }
+ // match: (Move {t1} [n] dst p1 mem:(Store {t2} (OffPtr <tt2> [o2] p2) d1 (Store {t3} (OffPtr <tt3> [o3] p3) d2 (Zero {t4} [n] p4 _))))
+ // cond: isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && t4.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && n >= o2 + t2.Size() && n >= o3 + t3.Size()
+ // result: (Store {t2} (OffPtr <tt2> [o2] dst) d1 (Store {t3} (OffPtr <tt3> [o3] dst) d2 (Zero {t1} [n] dst mem)))
+ for {
+ n := auxIntToInt64(v.AuxInt)
+ t1 := auxToType(v.Aux)
+ dst := v_0
+ p1 := v_1
+ mem := v_2
+ if mem.Op != OpStore {
+ break
+ }
+ t2 := auxToType(mem.Aux)
+ _ = mem.Args[2]
+ mem_0 := mem.Args[0]
+ if mem_0.Op != OpOffPtr {
+ break
+ }
+ tt2 := mem_0.Type
+ o2 := auxIntToInt64(mem_0.AuxInt)
+ p2 := mem_0.Args[0]
+ d1 := mem.Args[1]
+ mem_2 := mem.Args[2]
+ if mem_2.Op != OpStore {
+ break
+ }
+ t3 := auxToType(mem_2.Aux)
+ _ = mem_2.Args[2]
+ mem_2_0 := mem_2.Args[0]
+ if mem_2_0.Op != OpOffPtr {
+ break
+ }
+ tt3 := mem_2_0.Type
+ o3 := auxIntToInt64(mem_2_0.AuxInt)
+ p3 := mem_2_0.Args[0]
+ d2 := mem_2.Args[1]
+ mem_2_2 := mem_2.Args[2]
+ if mem_2_2.Op != OpZero || auxIntToInt64(mem_2_2.AuxInt) != n {
+ break
+ }
+ t4 := auxToType(mem_2_2.Aux)
+ p4 := mem_2_2.Args[0]
+ if !(isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && t4.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && n >= o2+t2.Size() && n >= o3+t3.Size()) {
+ break
+ }
+ v.reset(OpStore)
+ v.Aux = typeToAux(t2)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, tt2)
+ v0.AuxInt = int64ToAuxInt(o2)
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v1.Aux = typeToAux(t3)
+ v2 := b.NewValue0(v.Pos, OpOffPtr, tt3)
+ v2.AuxInt = int64ToAuxInt(o3)
+ v2.AddArg(dst)
+ v3 := b.NewValue0(v.Pos, OpZero, types.TypeMem)
+ v3.AuxInt = int64ToAuxInt(n)
+ v3.Aux = typeToAux(t1)
+ v3.AddArg2(dst, mem)
+ v1.AddArg3(v2, d2, v3)
+ v.AddArg3(v0, d1, v1)
+ return true
+ }
+ // match: (Move {t1} [n] dst p1 mem:(Store {t2} (OffPtr <tt2> [o2] p2) d1 (Store {t3} (OffPtr <tt3> [o3] p3) d2 (Store {t4} (OffPtr <tt4> [o4] p4) d3 (Zero {t5} [n] p5 _)))))
+ // cond: isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && t4.Alignment() <= t1.Alignment() && t5.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && n >= o2 + t2.Size() && n >= o3 + t3.Size() && n >= o4 + t4.Size()
+ // result: (Store {t2} (OffPtr <tt2> [o2] dst) d1 (Store {t3} (OffPtr <tt3> [o3] dst) d2 (Store {t4} (OffPtr <tt4> [o4] dst) d3 (Zero {t1} [n] dst mem))))
+ for {
+ n := auxIntToInt64(v.AuxInt)
+ t1 := auxToType(v.Aux)
+ dst := v_0
+ p1 := v_1
+ mem := v_2
+ if mem.Op != OpStore {
+ break
+ }
+ t2 := auxToType(mem.Aux)
+ _ = mem.Args[2]
+ mem_0 := mem.Args[0]
+ if mem_0.Op != OpOffPtr {
+ break
+ }
+ tt2 := mem_0.Type
+ o2 := auxIntToInt64(mem_0.AuxInt)
+ p2 := mem_0.Args[0]
+ d1 := mem.Args[1]
+ mem_2 := mem.Args[2]
+ if mem_2.Op != OpStore {
+ break
+ }
+ t3 := auxToType(mem_2.Aux)
+ _ = mem_2.Args[2]
+ mem_2_0 := mem_2.Args[0]
+ if mem_2_0.Op != OpOffPtr {
+ break
+ }
+ tt3 := mem_2_0.Type
+ o3 := auxIntToInt64(mem_2_0.AuxInt)
+ p3 := mem_2_0.Args[0]
+ d2 := mem_2.Args[1]
+ mem_2_2 := mem_2.Args[2]
+ if mem_2_2.Op != OpStore {
+ break
+ }
+ t4 := auxToType(mem_2_2.Aux)
+ _ = mem_2_2.Args[2]
+ mem_2_2_0 := mem_2_2.Args[0]
+ if mem_2_2_0.Op != OpOffPtr {
+ break
+ }
+ tt4 := mem_2_2_0.Type
+ o4 := auxIntToInt64(mem_2_2_0.AuxInt)
+ p4 := mem_2_2_0.Args[0]
+ d3 := mem_2_2.Args[1]
+ mem_2_2_2 := mem_2_2.Args[2]
+ if mem_2_2_2.Op != OpZero || auxIntToInt64(mem_2_2_2.AuxInt) != n {
+ break
+ }
+ t5 := auxToType(mem_2_2_2.Aux)
+ p5 := mem_2_2_2.Args[0]
+ if !(isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && t4.Alignment() <= t1.Alignment() && t5.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && n >= o2+t2.Size() && n >= o3+t3.Size() && n >= o4+t4.Size()) {
+ break
+ }
+ v.reset(OpStore)
+ v.Aux = typeToAux(t2)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, tt2)
+ v0.AuxInt = int64ToAuxInt(o2)
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v1.Aux = typeToAux(t3)
+ v2 := b.NewValue0(v.Pos, OpOffPtr, tt3)
+ v2.AuxInt = int64ToAuxInt(o3)
+ v2.AddArg(dst)
+ v3 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v3.Aux = typeToAux(t4)
+ v4 := b.NewValue0(v.Pos, OpOffPtr, tt4)
+ v4.AuxInt = int64ToAuxInt(o4)
+ v4.AddArg(dst)
+ v5 := b.NewValue0(v.Pos, OpZero, types.TypeMem)
+ v5.AuxInt = int64ToAuxInt(n)
+ v5.Aux = typeToAux(t1)
+ v5.AddArg2(dst, mem)
+ v3.AddArg3(v4, d3, v5)
+ v1.AddArg3(v2, d2, v3)
+ v.AddArg3(v0, d1, v1)
+ return true
+ }
+ // match: (Move {t1} [n] dst p1 mem:(Store {t2} (OffPtr <tt2> [o2] p2) d1 (Store {t3} (OffPtr <tt3> [o3] p3) d2 (Store {t4} (OffPtr <tt4> [o4] p4) d3 (Store {t5} (OffPtr <tt5> [o5] p5) d4 (Zero {t6} [n] p6 _))))))
+ // cond: isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && isSamePtr(p5, p6) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && t4.Alignment() <= t1.Alignment() && t5.Alignment() <= t1.Alignment() && t6.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && registerizable(b, t5) && n >= o2 + t2.Size() && n >= o3 + t3.Size() && n >= o4 + t4.Size() && n >= o5 + t5.Size()
+ // result: (Store {t2} (OffPtr <tt2> [o2] dst) d1 (Store {t3} (OffPtr <tt3> [o3] dst) d2 (Store {t4} (OffPtr <tt4> [o4] dst) d3 (Store {t5} (OffPtr <tt5> [o5] dst) d4 (Zero {t1} [n] dst mem)))))
+ for {
+ n := auxIntToInt64(v.AuxInt)
+ t1 := auxToType(v.Aux)
+ dst := v_0
+ p1 := v_1
+ mem := v_2
+ if mem.Op != OpStore {
+ break
+ }
+ t2 := auxToType(mem.Aux)
+ _ = mem.Args[2]
+ mem_0 := mem.Args[0]
+ if mem_0.Op != OpOffPtr {
+ break
+ }
+ tt2 := mem_0.Type
+ o2 := auxIntToInt64(mem_0.AuxInt)
+ p2 := mem_0.Args[0]
+ d1 := mem.Args[1]
+ mem_2 := mem.Args[2]
+ if mem_2.Op != OpStore {
+ break
+ }
+ t3 := auxToType(mem_2.Aux)
+ _ = mem_2.Args[2]
+ mem_2_0 := mem_2.Args[0]
+ if mem_2_0.Op != OpOffPtr {
+ break
+ }
+ tt3 := mem_2_0.Type
+ o3 := auxIntToInt64(mem_2_0.AuxInt)
+ p3 := mem_2_0.Args[0]
+ d2 := mem_2.Args[1]
+ mem_2_2 := mem_2.Args[2]
+ if mem_2_2.Op != OpStore {
+ break
+ }
+ t4 := auxToType(mem_2_2.Aux)
+ _ = mem_2_2.Args[2]
+ mem_2_2_0 := mem_2_2.Args[0]
+ if mem_2_2_0.Op != OpOffPtr {
+ break
+ }
+ tt4 := mem_2_2_0.Type
+ o4 := auxIntToInt64(mem_2_2_0.AuxInt)
+ p4 := mem_2_2_0.Args[0]
+ d3 := mem_2_2.Args[1]
+ mem_2_2_2 := mem_2_2.Args[2]
+ if mem_2_2_2.Op != OpStore {
+ break
+ }
+ t5 := auxToType(mem_2_2_2.Aux)
+ _ = mem_2_2_2.Args[2]
+ mem_2_2_2_0 := mem_2_2_2.Args[0]
+ if mem_2_2_2_0.Op != OpOffPtr {
+ break
+ }
+ tt5 := mem_2_2_2_0.Type
+ o5 := auxIntToInt64(mem_2_2_2_0.AuxInt)
+ p5 := mem_2_2_2_0.Args[0]
+ d4 := mem_2_2_2.Args[1]
+ mem_2_2_2_2 := mem_2_2_2.Args[2]
+ if mem_2_2_2_2.Op != OpZero || auxIntToInt64(mem_2_2_2_2.AuxInt) != n {
+ break
+ }
+ t6 := auxToType(mem_2_2_2_2.Aux)
+ p6 := mem_2_2_2_2.Args[0]
+ if !(isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && isSamePtr(p5, p6) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && t4.Alignment() <= t1.Alignment() && t5.Alignment() <= t1.Alignment() && t6.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && registerizable(b, t5) && n >= o2+t2.Size() && n >= o3+t3.Size() && n >= o4+t4.Size() && n >= o5+t5.Size()) {
+ break
+ }
+ v.reset(OpStore)
+ v.Aux = typeToAux(t2)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, tt2)
+ v0.AuxInt = int64ToAuxInt(o2)
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v1.Aux = typeToAux(t3)
+ v2 := b.NewValue0(v.Pos, OpOffPtr, tt3)
+ v2.AuxInt = int64ToAuxInt(o3)
+ v2.AddArg(dst)
+ v3 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v3.Aux = typeToAux(t4)
+ v4 := b.NewValue0(v.Pos, OpOffPtr, tt4)
+ v4.AuxInt = int64ToAuxInt(o4)
+ v4.AddArg(dst)
+ v5 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v5.Aux = typeToAux(t5)
+ v6 := b.NewValue0(v.Pos, OpOffPtr, tt5)
+ v6.AuxInt = int64ToAuxInt(o5)
+ v6.AddArg(dst)
+ v7 := b.NewValue0(v.Pos, OpZero, types.TypeMem)
+ v7.AuxInt = int64ToAuxInt(n)
+ v7.Aux = typeToAux(t1)
+ v7.AddArg2(dst, mem)
+ v5.AddArg3(v6, d4, v7)
+ v3.AddArg3(v4, d3, v5)
+ v1.AddArg3(v2, d2, v3)
+ v.AddArg3(v0, d1, v1)
+ return true
+ }
+ // match: (Move {t1} [n] dst p1 mem:(VarDef (Store {t2} op2:(OffPtr <tt2> [o2] p2) d1 (Zero {t3} [n] p3 _))))
+ // cond: isSamePtr(p1, p2) && isSamePtr(p2, p3) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && registerizable(b, t2) && n >= o2 + t2.Size()
+ // result: (Store {t2} (OffPtr <tt2> [o2] dst) d1 (Zero {t1} [n] dst mem))
+ for {
+ n := auxIntToInt64(v.AuxInt)
+ t1 := auxToType(v.Aux)
+ dst := v_0
+ p1 := v_1
+ mem := v_2
+ if mem.Op != OpVarDef {
+ break
+ }
+ mem_0 := mem.Args[0]
+ if mem_0.Op != OpStore {
+ break
+ }
+ t2 := auxToType(mem_0.Aux)
+ _ = mem_0.Args[2]
+ op2 := mem_0.Args[0]
+ if op2.Op != OpOffPtr {
+ break
+ }
+ tt2 := op2.Type
+ o2 := auxIntToInt64(op2.AuxInt)
+ p2 := op2.Args[0]
+ d1 := mem_0.Args[1]
+ mem_0_2 := mem_0.Args[2]
+ if mem_0_2.Op != OpZero || auxIntToInt64(mem_0_2.AuxInt) != n {
+ break
+ }
+ t3 := auxToType(mem_0_2.Aux)
+ p3 := mem_0_2.Args[0]
+ if !(isSamePtr(p1, p2) && isSamePtr(p2, p3) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && registerizable(b, t2) && n >= o2+t2.Size()) {
+ break
+ }
+ v.reset(OpStore)
+ v.Aux = typeToAux(t2)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, tt2)
+ v0.AuxInt = int64ToAuxInt(o2)
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpZero, types.TypeMem)
+ v1.AuxInt = int64ToAuxInt(n)
+ v1.Aux = typeToAux(t1)
+ v1.AddArg2(dst, mem)
+ v.AddArg3(v0, d1, v1)
+ return true
+ }
+ // match: (Move {t1} [n] dst p1 mem:(VarDef (Store {t2} (OffPtr <tt2> [o2] p2) d1 (Store {t3} (OffPtr <tt3> [o3] p3) d2 (Zero {t4} [n] p4 _)))))
+ // cond: isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && t4.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && n >= o2 + t2.Size() && n >= o3 + t3.Size()
+ // result: (Store {t2} (OffPtr <tt2> [o2] dst) d1 (Store {t3} (OffPtr <tt3> [o3] dst) d2 (Zero {t1} [n] dst mem)))
+ for {
+ n := auxIntToInt64(v.AuxInt)
+ t1 := auxToType(v.Aux)
+ dst := v_0
+ p1 := v_1
+ mem := v_2
+ if mem.Op != OpVarDef {
+ break
+ }
+ mem_0 := mem.Args[0]
+ if mem_0.Op != OpStore {
+ break
+ }
+ t2 := auxToType(mem_0.Aux)
+ _ = mem_0.Args[2]
+ mem_0_0 := mem_0.Args[0]
+ if mem_0_0.Op != OpOffPtr {
+ break
+ }
+ tt2 := mem_0_0.Type
+ o2 := auxIntToInt64(mem_0_0.AuxInt)
+ p2 := mem_0_0.Args[0]
+ d1 := mem_0.Args[1]
+ mem_0_2 := mem_0.Args[2]
+ if mem_0_2.Op != OpStore {
+ break
+ }
+ t3 := auxToType(mem_0_2.Aux)
+ _ = mem_0_2.Args[2]
+ mem_0_2_0 := mem_0_2.Args[0]
+ if mem_0_2_0.Op != OpOffPtr {
+ break
+ }
+ tt3 := mem_0_2_0.Type
+ o3 := auxIntToInt64(mem_0_2_0.AuxInt)
+ p3 := mem_0_2_0.Args[0]
+ d2 := mem_0_2.Args[1]
+ mem_0_2_2 := mem_0_2.Args[2]
+ if mem_0_2_2.Op != OpZero || auxIntToInt64(mem_0_2_2.AuxInt) != n {
+ break
+ }
+ t4 := auxToType(mem_0_2_2.Aux)
+ p4 := mem_0_2_2.Args[0]
+ if !(isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && t4.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && n >= o2+t2.Size() && n >= o3+t3.Size()) {
+ break
+ }
+ v.reset(OpStore)
+ v.Aux = typeToAux(t2)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, tt2)
+ v0.AuxInt = int64ToAuxInt(o2)
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v1.Aux = typeToAux(t3)
+ v2 := b.NewValue0(v.Pos, OpOffPtr, tt3)
+ v2.AuxInt = int64ToAuxInt(o3)
+ v2.AddArg(dst)
+ v3 := b.NewValue0(v.Pos, OpZero, types.TypeMem)
+ v3.AuxInt = int64ToAuxInt(n)
+ v3.Aux = typeToAux(t1)
+ v3.AddArg2(dst, mem)
+ v1.AddArg3(v2, d2, v3)
+ v.AddArg3(v0, d1, v1)
+ return true
+ }
+ // match: (Move {t1} [n] dst p1 mem:(VarDef (Store {t2} (OffPtr <tt2> [o2] p2) d1 (Store {t3} (OffPtr <tt3> [o3] p3) d2 (Store {t4} (OffPtr <tt4> [o4] p4) d3 (Zero {t5} [n] p5 _))))))
+ // cond: isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && t4.Alignment() <= t1.Alignment() && t5.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && n >= o2 + t2.Size() && n >= o3 + t3.Size() && n >= o4 + t4.Size()
+ // result: (Store {t2} (OffPtr <tt2> [o2] dst) d1 (Store {t3} (OffPtr <tt3> [o3] dst) d2 (Store {t4} (OffPtr <tt4> [o4] dst) d3 (Zero {t1} [n] dst mem))))
+ for {
+ n := auxIntToInt64(v.AuxInt)
+ t1 := auxToType(v.Aux)
+ dst := v_0
+ p1 := v_1
+ mem := v_2
+ if mem.Op != OpVarDef {
+ break
+ }
+ mem_0 := mem.Args[0]
+ if mem_0.Op != OpStore {
+ break
+ }
+ t2 := auxToType(mem_0.Aux)
+ _ = mem_0.Args[2]
+ mem_0_0 := mem_0.Args[0]
+ if mem_0_0.Op != OpOffPtr {
+ break
+ }
+ tt2 := mem_0_0.Type
+ o2 := auxIntToInt64(mem_0_0.AuxInt)
+ p2 := mem_0_0.Args[0]
+ d1 := mem_0.Args[1]
+ mem_0_2 := mem_0.Args[2]
+ if mem_0_2.Op != OpStore {
+ break
+ }
+ t3 := auxToType(mem_0_2.Aux)
+ _ = mem_0_2.Args[2]
+ mem_0_2_0 := mem_0_2.Args[0]
+ if mem_0_2_0.Op != OpOffPtr {
+ break
+ }
+ tt3 := mem_0_2_0.Type
+ o3 := auxIntToInt64(mem_0_2_0.AuxInt)
+ p3 := mem_0_2_0.Args[0]
+ d2 := mem_0_2.Args[1]
+ mem_0_2_2 := mem_0_2.Args[2]
+ if mem_0_2_2.Op != OpStore {
+ break
+ }
+ t4 := auxToType(mem_0_2_2.Aux)
+ _ = mem_0_2_2.Args[2]
+ mem_0_2_2_0 := mem_0_2_2.Args[0]
+ if mem_0_2_2_0.Op != OpOffPtr {
+ break
+ }
+ tt4 := mem_0_2_2_0.Type
+ o4 := auxIntToInt64(mem_0_2_2_0.AuxInt)
+ p4 := mem_0_2_2_0.Args[0]
+ d3 := mem_0_2_2.Args[1]
+ mem_0_2_2_2 := mem_0_2_2.Args[2]
+ if mem_0_2_2_2.Op != OpZero || auxIntToInt64(mem_0_2_2_2.AuxInt) != n {
+ break
+ }
+ t5 := auxToType(mem_0_2_2_2.Aux)
+ p5 := mem_0_2_2_2.Args[0]
+ if !(isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && t4.Alignment() <= t1.Alignment() && t5.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && n >= o2+t2.Size() && n >= o3+t3.Size() && n >= o4+t4.Size()) {
+ break
+ }
+ v.reset(OpStore)
+ v.Aux = typeToAux(t2)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, tt2)
+ v0.AuxInt = int64ToAuxInt(o2)
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v1.Aux = typeToAux(t3)
+ v2 := b.NewValue0(v.Pos, OpOffPtr, tt3)
+ v2.AuxInt = int64ToAuxInt(o3)
+ v2.AddArg(dst)
+ v3 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v3.Aux = typeToAux(t4)
+ v4 := b.NewValue0(v.Pos, OpOffPtr, tt4)
+ v4.AuxInt = int64ToAuxInt(o4)
+ v4.AddArg(dst)
+ v5 := b.NewValue0(v.Pos, OpZero, types.TypeMem)
+ v5.AuxInt = int64ToAuxInt(n)
+ v5.Aux = typeToAux(t1)
+ v5.AddArg2(dst, mem)
+ v3.AddArg3(v4, d3, v5)
+ v1.AddArg3(v2, d2, v3)
+ v.AddArg3(v0, d1, v1)
+ return true
+ }
+ // match: (Move {t1} [n] dst p1 mem:(VarDef (Store {t2} (OffPtr <tt2> [o2] p2) d1 (Store {t3} (OffPtr <tt3> [o3] p3) d2 (Store {t4} (OffPtr <tt4> [o4] p4) d3 (Store {t5} (OffPtr <tt5> [o5] p5) d4 (Zero {t6} [n] p6 _)))))))
+ // cond: isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && isSamePtr(p5, p6) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && t4.Alignment() <= t1.Alignment() && t5.Alignment() <= t1.Alignment() && t6.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && registerizable(b, t5) && n >= o2 + t2.Size() && n >= o3 + t3.Size() && n >= o4 + t4.Size() && n >= o5 + t5.Size()
+ // result: (Store {t2} (OffPtr <tt2> [o2] dst) d1 (Store {t3} (OffPtr <tt3> [o3] dst) d2 (Store {t4} (OffPtr <tt4> [o4] dst) d3 (Store {t5} (OffPtr <tt5> [o5] dst) d4 (Zero {t1} [n] dst mem)))))
+ for {
+ n := auxIntToInt64(v.AuxInt)
+ t1 := auxToType(v.Aux)
+ dst := v_0
+ p1 := v_1
+ mem := v_2
+ if mem.Op != OpVarDef {
+ break
+ }
+ mem_0 := mem.Args[0]
+ if mem_0.Op != OpStore {
+ break
+ }
+ t2 := auxToType(mem_0.Aux)
+ _ = mem_0.Args[2]
+ mem_0_0 := mem_0.Args[0]
+ if mem_0_0.Op != OpOffPtr {
+ break
+ }
+ tt2 := mem_0_0.Type
+ o2 := auxIntToInt64(mem_0_0.AuxInt)
+ p2 := mem_0_0.Args[0]
+ d1 := mem_0.Args[1]
+ mem_0_2 := mem_0.Args[2]
+ if mem_0_2.Op != OpStore {
+ break
+ }
+ t3 := auxToType(mem_0_2.Aux)
+ _ = mem_0_2.Args[2]
+ mem_0_2_0 := mem_0_2.Args[0]
+ if mem_0_2_0.Op != OpOffPtr {
+ break
+ }
+ tt3 := mem_0_2_0.Type
+ o3 := auxIntToInt64(mem_0_2_0.AuxInt)
+ p3 := mem_0_2_0.Args[0]
+ d2 := mem_0_2.Args[1]
+ mem_0_2_2 := mem_0_2.Args[2]
+ if mem_0_2_2.Op != OpStore {
+ break
+ }
+ t4 := auxToType(mem_0_2_2.Aux)
+ _ = mem_0_2_2.Args[2]
+ mem_0_2_2_0 := mem_0_2_2.Args[0]
+ if mem_0_2_2_0.Op != OpOffPtr {
+ break
+ }
+ tt4 := mem_0_2_2_0.Type
+ o4 := auxIntToInt64(mem_0_2_2_0.AuxInt)
+ p4 := mem_0_2_2_0.Args[0]
+ d3 := mem_0_2_2.Args[1]
+ mem_0_2_2_2 := mem_0_2_2.Args[2]
+ if mem_0_2_2_2.Op != OpStore {
+ break
+ }
+ t5 := auxToType(mem_0_2_2_2.Aux)
+ _ = mem_0_2_2_2.Args[2]
+ mem_0_2_2_2_0 := mem_0_2_2_2.Args[0]
+ if mem_0_2_2_2_0.Op != OpOffPtr {
+ break
+ }
+ tt5 := mem_0_2_2_2_0.Type
+ o5 := auxIntToInt64(mem_0_2_2_2_0.AuxInt)
+ p5 := mem_0_2_2_2_0.Args[0]
+ d4 := mem_0_2_2_2.Args[1]
+ mem_0_2_2_2_2 := mem_0_2_2_2.Args[2]
+ if mem_0_2_2_2_2.Op != OpZero || auxIntToInt64(mem_0_2_2_2_2.AuxInt) != n {
+ break
+ }
+ t6 := auxToType(mem_0_2_2_2_2.Aux)
+ p6 := mem_0_2_2_2_2.Args[0]
+ if !(isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && isSamePtr(p5, p6) && t2.Alignment() <= t1.Alignment() && t3.Alignment() <= t1.Alignment() && t4.Alignment() <= t1.Alignment() && t5.Alignment() <= t1.Alignment() && t6.Alignment() <= t1.Alignment() && registerizable(b, t2) && registerizable(b, t3) && registerizable(b, t4) && registerizable(b, t5) && n >= o2+t2.Size() && n >= o3+t3.Size() && n >= o4+t4.Size() && n >= o5+t5.Size()) {
+ break
+ }
+ v.reset(OpStore)
+ v.Aux = typeToAux(t2)
+ v0 := b.NewValue0(v.Pos, OpOffPtr, tt2)
+ v0.AuxInt = int64ToAuxInt(o2)
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v1.Aux = typeToAux(t3)
+ v2 := b.NewValue0(v.Pos, OpOffPtr, tt3)
+ v2.AuxInt = int64ToAuxInt(o3)
+ v2.AddArg(dst)
+ v3 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v3.Aux = typeToAux(t4)
+ v4 := b.NewValue0(v.Pos, OpOffPtr, tt4)
+ v4.AuxInt = int64ToAuxInt(o4)
+ v4.AddArg(dst)
+ v5 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v5.Aux = typeToAux(t5)
+ v6 := b.NewValue0(v.Pos, OpOffPtr, tt5)
+ v6.AuxInt = int64ToAuxInt(o5)
+ v6.AddArg(dst)
+ v7 := b.NewValue0(v.Pos, OpZero, types.TypeMem)
+ v7.AuxInt = int64ToAuxInt(n)
+ v7.Aux = typeToAux(t1)
+ v7.AddArg2(dst, mem)
+ v5.AddArg3(v6, d4, v7)
+ v3.AddArg3(v4, d3, v5)
+ v1.AddArg3(v2, d2, v3)
+ v.AddArg3(v0, d1, v1)
+ return true
+ }
+ // match: (Move {t1} [s] dst tmp1 midmem:(Move {t2} [s] tmp2 src _))
+ // cond: t1.Compare(t2) == types.CMPeq && isSamePtr(tmp1, tmp2) && isStackPtr(src) && !isVolatile(src) && disjoint(src, s, tmp2, s) && (disjoint(src, s, dst, s) || isInlinableMemmove(dst, src, s, config))
+ // result: (Move {t1} [s] dst src midmem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ t1 := auxToType(v.Aux)
+ dst := v_0
+ tmp1 := v_1
+ midmem := v_2
+ if midmem.Op != OpMove || auxIntToInt64(midmem.AuxInt) != s {
+ break
+ }
+ t2 := auxToType(midmem.Aux)
+ src := midmem.Args[1]
+ tmp2 := midmem.Args[0]
+ if !(t1.Compare(t2) == types.CMPeq && isSamePtr(tmp1, tmp2) && isStackPtr(src) && !isVolatile(src) && disjoint(src, s, tmp2, s) && (disjoint(src, s, dst, s) || isInlinableMemmove(dst, src, s, config))) {
+ break
+ }
+ v.reset(OpMove)
+ v.AuxInt = int64ToAuxInt(s)
+ v.Aux = typeToAux(t1)
+ v.AddArg3(dst, src, midmem)
+ return true
+ }
+ // match: (Move {t1} [s] dst tmp1 midmem:(VarDef (Move {t2} [s] tmp2 src _)))
+ // cond: t1.Compare(t2) == types.CMPeq && isSamePtr(tmp1, tmp2) && isStackPtr(src) && !isVolatile(src) && disjoint(src, s, tmp2, s) && (disjoint(src, s, dst, s) || isInlinableMemmove(dst, src, s, config))
+ // result: (Move {t1} [s] dst src midmem)
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ t1 := auxToType(v.Aux)
+ dst := v_0
+ tmp1 := v_1
+ midmem := v_2
+ if midmem.Op != OpVarDef {
+ break
+ }
+ midmem_0 := midmem.Args[0]
+ if midmem_0.Op != OpMove || auxIntToInt64(midmem_0.AuxInt) != s {
+ break
+ }
+ t2 := auxToType(midmem_0.Aux)
+ src := midmem_0.Args[1]
+ tmp2 := midmem_0.Args[0]
+ if !(t1.Compare(t2) == types.CMPeq && isSamePtr(tmp1, tmp2) && isStackPtr(src) && !isVolatile(src) && disjoint(src, s, tmp2, s) && (disjoint(src, s, dst, s) || isInlinableMemmove(dst, src, s, config))) {
+ break
+ }
+ v.reset(OpMove)
+ v.AuxInt = int64ToAuxInt(s)
+ v.Aux = typeToAux(t1)
+ v.AddArg3(dst, src, midmem)
+ return true
+ }
+ // match: (Move dst src mem)
+ // cond: isSamePtr(dst, src)
+ // result: mem
+ for {
+ dst := v_0
+ src := v_1
+ mem := v_2
+ if !(isSamePtr(dst, src)) {
+ break
+ }
+ v.copyOf(mem)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpMul16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mul16 (Const16 [c]) (Const16 [d]))
+ // result: (Const16 [c*d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_0.AuxInt)
+ if v_1.Op != OpConst16 {
+ continue
+ }
+ d := auxIntToInt16(v_1.AuxInt)
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(c * d)
+ return true
+ }
+ break
+ }
+ // match: (Mul16 (Const16 [1]) x)
+ // result: x
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != 1 {
+ continue
+ }
+ x := v_1
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (Mul16 (Const16 [-1]) x)
+ // result: (Neg16 x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != -1 {
+ continue
+ }
+ x := v_1
+ v.reset(OpNeg16)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (Mul16 <t> n (Const16 [c]))
+ // cond: isPowerOfTwo16(c)
+ // result: (Lsh16x64 <t> n (Const64 <typ.UInt64> [log16(c)]))
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ n := v_0
+ if v_1.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_1.AuxInt)
+ if !(isPowerOfTwo16(c)) {
+ continue
+ }
+ v.reset(OpLsh16x64)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(log16(c))
+ v.AddArg2(n, v0)
+ return true
+ }
+ break
+ }
+ // match: (Mul16 <t> n (Const16 [c]))
+ // cond: t.IsSigned() && isPowerOfTwo16(-c)
+ // result: (Neg16 (Lsh16x64 <t> n (Const64 <typ.UInt64> [log16(-c)])))
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ n := v_0
+ if v_1.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_1.AuxInt)
+ if !(t.IsSigned() && isPowerOfTwo16(-c)) {
+ continue
+ }
+ v.reset(OpNeg16)
+ v0 := b.NewValue0(v.Pos, OpLsh16x64, t)
+ v1 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(log16(-c))
+ v0.AddArg2(n, v1)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (Mul16 (Const16 [0]) _)
+ // result: (Const16 [0])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != 0 {
+ continue
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(0)
+ return true
+ }
+ break
+ }
+ // match: (Mul16 (Mul16 i:(Const16 <t>) z) x)
+ // cond: (z.Op != OpConst16 && x.Op != OpConst16)
+ // result: (Mul16 i (Mul16 <t> x z))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpMul16 {
+ continue
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ i := v_0_0
+ if i.Op != OpConst16 {
+ continue
+ }
+ t := i.Type
+ z := v_0_1
+ x := v_1
+ if !(z.Op != OpConst16 && x.Op != OpConst16) {
+ continue
+ }
+ v.reset(OpMul16)
+ v0 := b.NewValue0(v.Pos, OpMul16, t)
+ v0.AddArg2(x, z)
+ v.AddArg2(i, v0)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Mul16 (Const16 <t> [c]) (Mul16 (Const16 <t> [d]) x))
+ // result: (Mul16 (Const16 <t> [c*d]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst16 {
+ continue
+ }
+ t := v_0.Type
+ c := auxIntToInt16(v_0.AuxInt)
+ if v_1.Op != OpMul16 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst16 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt16(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpMul16)
+ v0 := b.NewValue0(v.Pos, OpConst16, t)
+ v0.AuxInt = int16ToAuxInt(c * d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpMul32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mul32 (Const32 [c]) (Const32 [d]))
+ // result: (Const32 [c*d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpConst32 {
+ continue
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(c * d)
+ return true
+ }
+ break
+ }
+ // match: (Mul32 (Const32 [1]) x)
+ // result: x
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 1 {
+ continue
+ }
+ x := v_1
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (Mul32 (Const32 [-1]) x)
+ // result: (Neg32 x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != -1 {
+ continue
+ }
+ x := v_1
+ v.reset(OpNeg32)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (Mul32 <t> n (Const32 [c]))
+ // cond: isPowerOfTwo32(c)
+ // result: (Lsh32x64 <t> n (Const64 <typ.UInt64> [log32(c)]))
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ n := v_0
+ if v_1.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(isPowerOfTwo32(c)) {
+ continue
+ }
+ v.reset(OpLsh32x64)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(log32(c))
+ v.AddArg2(n, v0)
+ return true
+ }
+ break
+ }
+ // match: (Mul32 <t> n (Const32 [c]))
+ // cond: t.IsSigned() && isPowerOfTwo32(-c)
+ // result: (Neg32 (Lsh32x64 <t> n (Const64 <typ.UInt64> [log32(-c)])))
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ n := v_0
+ if v_1.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(t.IsSigned() && isPowerOfTwo32(-c)) {
+ continue
+ }
+ v.reset(OpNeg32)
+ v0 := b.NewValue0(v.Pos, OpLsh32x64, t)
+ v1 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(log32(-c))
+ v0.AddArg2(n, v1)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (Mul32 (Const32 <t> [c]) (Add32 <t> (Const32 <t> [d]) x))
+ // result: (Add32 (Const32 <t> [c*d]) (Mul32 <t> (Const32 <t> [c]) x))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32 {
+ continue
+ }
+ t := v_0.Type
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpAdd32 || v_1.Type != t {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst32 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt32(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpAdd32)
+ v0 := b.NewValue0(v.Pos, OpConst32, t)
+ v0.AuxInt = int32ToAuxInt(c * d)
+ v1 := b.NewValue0(v.Pos, OpMul32, t)
+ v2 := b.NewValue0(v.Pos, OpConst32, t)
+ v2.AuxInt = int32ToAuxInt(c)
+ v1.AddArg2(v2, x)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Mul32 (Const32 [0]) _)
+ // result: (Const32 [0])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 0 {
+ continue
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ break
+ }
+ // match: (Mul32 (Mul32 i:(Const32 <t>) z) x)
+ // cond: (z.Op != OpConst32 && x.Op != OpConst32)
+ // result: (Mul32 i (Mul32 <t> x z))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpMul32 {
+ continue
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ i := v_0_0
+ if i.Op != OpConst32 {
+ continue
+ }
+ t := i.Type
+ z := v_0_1
+ x := v_1
+ if !(z.Op != OpConst32 && x.Op != OpConst32) {
+ continue
+ }
+ v.reset(OpMul32)
+ v0 := b.NewValue0(v.Pos, OpMul32, t)
+ v0.AddArg2(x, z)
+ v.AddArg2(i, v0)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Mul32 (Const32 <t> [c]) (Mul32 (Const32 <t> [d]) x))
+ // result: (Mul32 (Const32 <t> [c*d]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32 {
+ continue
+ }
+ t := v_0.Type
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpMul32 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst32 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt32(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpMul32)
+ v0 := b.NewValue0(v.Pos, OpConst32, t)
+ v0.AuxInt = int32ToAuxInt(c * d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpMul32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Mul32F (Const32F [c]) (Const32F [d]))
+ // cond: c*d == c*d
+ // result: (Const32F [c*d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32F {
+ continue
+ }
+ c := auxIntToFloat32(v_0.AuxInt)
+ if v_1.Op != OpConst32F {
+ continue
+ }
+ d := auxIntToFloat32(v_1.AuxInt)
+ if !(c*d == c*d) {
+ continue
+ }
+ v.reset(OpConst32F)
+ v.AuxInt = float32ToAuxInt(c * d)
+ return true
+ }
+ break
+ }
+ // match: (Mul32F x (Const32F [1]))
+ // result: x
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpConst32F || auxIntToFloat32(v_1.AuxInt) != 1 {
+ continue
+ }
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (Mul32F x (Const32F [-1]))
+ // result: (Neg32F x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpConst32F || auxIntToFloat32(v_1.AuxInt) != -1 {
+ continue
+ }
+ v.reset(OpNeg32F)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (Mul32F x (Const32F [2]))
+ // result: (Add32F x x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpConst32F || auxIntToFloat32(v_1.AuxInt) != 2 {
+ continue
+ }
+ v.reset(OpAdd32F)
+ v.AddArg2(x, x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpMul64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mul64 (Const64 [c]) (Const64 [d]))
+ // result: (Const64 [c*d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(c * d)
+ return true
+ }
+ break
+ }
+ // match: (Mul64 (Const64 [1]) x)
+ // result: x
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 1 {
+ continue
+ }
+ x := v_1
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (Mul64 (Const64 [-1]) x)
+ // result: (Neg64 x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != -1 {
+ continue
+ }
+ x := v_1
+ v.reset(OpNeg64)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (Mul64 <t> n (Const64 [c]))
+ // cond: isPowerOfTwo64(c)
+ // result: (Lsh64x64 <t> n (Const64 <typ.UInt64> [log64(c)]))
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ n := v_0
+ if v_1.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(isPowerOfTwo64(c)) {
+ continue
+ }
+ v.reset(OpLsh64x64)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(log64(c))
+ v.AddArg2(n, v0)
+ return true
+ }
+ break
+ }
+ // match: (Mul64 <t> n (Const64 [c]))
+ // cond: t.IsSigned() && isPowerOfTwo64(-c)
+ // result: (Neg64 (Lsh64x64 <t> n (Const64 <typ.UInt64> [log64(-c)])))
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ n := v_0
+ if v_1.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(t.IsSigned() && isPowerOfTwo64(-c)) {
+ continue
+ }
+ v.reset(OpNeg64)
+ v0 := b.NewValue0(v.Pos, OpLsh64x64, t)
+ v1 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(log64(-c))
+ v0.AddArg2(n, v1)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (Mul64 (Const64 <t> [c]) (Add64 <t> (Const64 <t> [d]) x))
+ // result: (Add64 (Const64 <t> [c*d]) (Mul64 <t> (Const64 <t> [c]) x))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64 {
+ continue
+ }
+ t := v_0.Type
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpAdd64 || v_1.Type != t {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst64 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt64(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpAdd64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(c * d)
+ v1 := b.NewValue0(v.Pos, OpMul64, t)
+ v2 := b.NewValue0(v.Pos, OpConst64, t)
+ v2.AuxInt = int64ToAuxInt(c)
+ v1.AddArg2(v2, x)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Mul64 (Const64 [0]) _)
+ // result: (Const64 [0])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 {
+ continue
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ break
+ }
+ // match: (Mul64 (Mul64 i:(Const64 <t>) z) x)
+ // cond: (z.Op != OpConst64 && x.Op != OpConst64)
+ // result: (Mul64 i (Mul64 <t> x z))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpMul64 {
+ continue
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ i := v_0_0
+ if i.Op != OpConst64 {
+ continue
+ }
+ t := i.Type
+ z := v_0_1
+ x := v_1
+ if !(z.Op != OpConst64 && x.Op != OpConst64) {
+ continue
+ }
+ v.reset(OpMul64)
+ v0 := b.NewValue0(v.Pos, OpMul64, t)
+ v0.AddArg2(x, z)
+ v.AddArg2(i, v0)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Mul64 (Const64 <t> [c]) (Mul64 (Const64 <t> [d]) x))
+ // result: (Mul64 (Const64 <t> [c*d]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64 {
+ continue
+ }
+ t := v_0.Type
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpMul64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst64 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt64(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpMul64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(c * d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpMul64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Mul64F (Const64F [c]) (Const64F [d]))
+ // cond: c*d == c*d
+ // result: (Const64F [c*d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64F {
+ continue
+ }
+ c := auxIntToFloat64(v_0.AuxInt)
+ if v_1.Op != OpConst64F {
+ continue
+ }
+ d := auxIntToFloat64(v_1.AuxInt)
+ if !(c*d == c*d) {
+ continue
+ }
+ v.reset(OpConst64F)
+ v.AuxInt = float64ToAuxInt(c * d)
+ return true
+ }
+ break
+ }
+ // match: (Mul64F x (Const64F [1]))
+ // result: x
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpConst64F || auxIntToFloat64(v_1.AuxInt) != 1 {
+ continue
+ }
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (Mul64F x (Const64F [-1]))
+ // result: (Neg64F x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpConst64F || auxIntToFloat64(v_1.AuxInt) != -1 {
+ continue
+ }
+ v.reset(OpNeg64F)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (Mul64F x (Const64F [2]))
+ // result: (Add64F x x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpConst64F || auxIntToFloat64(v_1.AuxInt) != 2 {
+ continue
+ }
+ v.reset(OpAdd64F)
+ v.AddArg2(x, x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpMul8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Mul8 (Const8 [c]) (Const8 [d]))
+ // result: (Const8 [c*d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ if v_1.Op != OpConst8 {
+ continue
+ }
+ d := auxIntToInt8(v_1.AuxInt)
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(c * d)
+ return true
+ }
+ break
+ }
+ // match: (Mul8 (Const8 [1]) x)
+ // result: x
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != 1 {
+ continue
+ }
+ x := v_1
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (Mul8 (Const8 [-1]) x)
+ // result: (Neg8 x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != -1 {
+ continue
+ }
+ x := v_1
+ v.reset(OpNeg8)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (Mul8 <t> n (Const8 [c]))
+ // cond: isPowerOfTwo8(c)
+ // result: (Lsh8x64 <t> n (Const64 <typ.UInt64> [log8(c)]))
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ n := v_0
+ if v_1.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_1.AuxInt)
+ if !(isPowerOfTwo8(c)) {
+ continue
+ }
+ v.reset(OpLsh8x64)
+ v.Type = t
+ v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(log8(c))
+ v.AddArg2(n, v0)
+ return true
+ }
+ break
+ }
+ // match: (Mul8 <t> n (Const8 [c]))
+ // cond: t.IsSigned() && isPowerOfTwo8(-c)
+ // result: (Neg8 (Lsh8x64 <t> n (Const64 <typ.UInt64> [log8(-c)])))
+ for {
+ t := v.Type
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ n := v_0
+ if v_1.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_1.AuxInt)
+ if !(t.IsSigned() && isPowerOfTwo8(-c)) {
+ continue
+ }
+ v.reset(OpNeg8)
+ v0 := b.NewValue0(v.Pos, OpLsh8x64, t)
+ v1 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v1.AuxInt = int64ToAuxInt(log8(-c))
+ v0.AddArg2(n, v1)
+ v.AddArg(v0)
+ return true
+ }
+ break
+ }
+ // match: (Mul8 (Const8 [0]) _)
+ // result: (Const8 [0])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != 0 {
+ continue
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(0)
+ return true
+ }
+ break
+ }
+ // match: (Mul8 (Mul8 i:(Const8 <t>) z) x)
+ // cond: (z.Op != OpConst8 && x.Op != OpConst8)
+ // result: (Mul8 i (Mul8 <t> x z))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpMul8 {
+ continue
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ i := v_0_0
+ if i.Op != OpConst8 {
+ continue
+ }
+ t := i.Type
+ z := v_0_1
+ x := v_1
+ if !(z.Op != OpConst8 && x.Op != OpConst8) {
+ continue
+ }
+ v.reset(OpMul8)
+ v0 := b.NewValue0(v.Pos, OpMul8, t)
+ v0.AddArg2(x, z)
+ v.AddArg2(i, v0)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Mul8 (Const8 <t> [c]) (Mul8 (Const8 <t> [d]) x))
+ // result: (Mul8 (Const8 <t> [c*d]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst8 {
+ continue
+ }
+ t := v_0.Type
+ c := auxIntToInt8(v_0.AuxInt)
+ if v_1.Op != OpMul8 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst8 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt8(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpMul8)
+ v0 := b.NewValue0(v.Pos, OpConst8, t)
+ v0.AuxInt = int8ToAuxInt(c * d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpNeg16(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Neg16 (Const16 [c]))
+ // result: (Const16 [-c])
+ for {
+ if v_0.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_0.AuxInt)
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(-c)
+ return true
+ }
+ // match: (Neg16 (Sub16 x y))
+ // result: (Sub16 y x)
+ for {
+ if v_0.Op != OpSub16 {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpSub16)
+ v.AddArg2(y, x)
+ return true
+ }
+ // match: (Neg16 (Neg16 x))
+ // result: x
+ for {
+ if v_0.Op != OpNeg16 {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (Neg16 <t> (Com16 x))
+ // result: (Add16 (Const16 <t> [1]) x)
+ for {
+ t := v.Type
+ if v_0.Op != OpCom16 {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAdd16)
+ v0 := b.NewValue0(v.Pos, OpConst16, t)
+ v0.AuxInt = int16ToAuxInt(1)
+ v.AddArg2(v0, x)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpNeg32(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Neg32 (Const32 [c]))
+ // result: (Const32 [-c])
+ for {
+ if v_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(-c)
+ return true
+ }
+ // match: (Neg32 (Sub32 x y))
+ // result: (Sub32 y x)
+ for {
+ if v_0.Op != OpSub32 {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpSub32)
+ v.AddArg2(y, x)
+ return true
+ }
+ // match: (Neg32 (Neg32 x))
+ // result: x
+ for {
+ if v_0.Op != OpNeg32 {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (Neg32 <t> (Com32 x))
+ // result: (Add32 (Const32 <t> [1]) x)
+ for {
+ t := v.Type
+ if v_0.Op != OpCom32 {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAdd32)
+ v0 := b.NewValue0(v.Pos, OpConst32, t)
+ v0.AuxInt = int32ToAuxInt(1)
+ v.AddArg2(v0, x)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpNeg32F(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Neg32F (Const32F [c]))
+ // cond: c != 0
+ // result: (Const32F [-c])
+ for {
+ if v_0.Op != OpConst32F {
+ break
+ }
+ c := auxIntToFloat32(v_0.AuxInt)
+ if !(c != 0) {
+ break
+ }
+ v.reset(OpConst32F)
+ v.AuxInt = float32ToAuxInt(-c)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpNeg64(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Neg64 (Const64 [c]))
+ // result: (Const64 [-c])
+ for {
+ if v_0.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(-c)
+ return true
+ }
+ // match: (Neg64 (Sub64 x y))
+ // result: (Sub64 y x)
+ for {
+ if v_0.Op != OpSub64 {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpSub64)
+ v.AddArg2(y, x)
+ return true
+ }
+ // match: (Neg64 (Neg64 x))
+ // result: x
+ for {
+ if v_0.Op != OpNeg64 {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (Neg64 <t> (Com64 x))
+ // result: (Add64 (Const64 <t> [1]) x)
+ for {
+ t := v.Type
+ if v_0.Op != OpCom64 {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAdd64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(1)
+ v.AddArg2(v0, x)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpNeg64F(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Neg64F (Const64F [c]))
+ // cond: c != 0
+ // result: (Const64F [-c])
+ for {
+ if v_0.Op != OpConst64F {
+ break
+ }
+ c := auxIntToFloat64(v_0.AuxInt)
+ if !(c != 0) {
+ break
+ }
+ v.reset(OpConst64F)
+ v.AuxInt = float64ToAuxInt(-c)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpNeg8(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Neg8 (Const8 [c]))
+ // result: (Const8 [-c])
+ for {
+ if v_0.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(-c)
+ return true
+ }
+ // match: (Neg8 (Sub8 x y))
+ // result: (Sub8 y x)
+ for {
+ if v_0.Op != OpSub8 {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpSub8)
+ v.AddArg2(y, x)
+ return true
+ }
+ // match: (Neg8 (Neg8 x))
+ // result: x
+ for {
+ if v_0.Op != OpNeg8 {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (Neg8 <t> (Com8 x))
+ // result: (Add8 (Const8 <t> [1]) x)
+ for {
+ t := v.Type
+ if v_0.Op != OpCom8 {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpAdd8)
+ v0 := b.NewValue0(v.Pos, OpConst8, t)
+ v0.AuxInt = int8ToAuxInt(1)
+ v.AddArg2(v0, x)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpNeq16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neq16 x x)
+ // result: (ConstBool [false])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(false)
+ return true
+ }
+ // match: (Neq16 (Const16 <t> [c]) (Add16 (Const16 <t> [d]) x))
+ // result: (Neq16 (Const16 <t> [c-d]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst16 {
+ continue
+ }
+ t := v_0.Type
+ c := auxIntToInt16(v_0.AuxInt)
+ if v_1.Op != OpAdd16 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst16 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt16(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpNeq16)
+ v0 := b.NewValue0(v.Pos, OpConst16, t)
+ v0.AuxInt = int16ToAuxInt(c - d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Neq16 (Const16 [c]) (Const16 [d]))
+ // result: (ConstBool [c != d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_0.AuxInt)
+ if v_1.Op != OpConst16 {
+ continue
+ }
+ d := auxIntToInt16(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(c != d)
+ return true
+ }
+ break
+ }
+ // match: (Neq16 n (Lsh16x64 (Rsh16x64 (Add16 <t> n (Rsh16Ux64 <t> (Rsh16x64 <t> n (Const64 <typ.UInt64> [15])) (Const64 <typ.UInt64> [kbar]))) (Const64 <typ.UInt64> [k])) (Const64 <typ.UInt64> [k])) )
+ // cond: k > 0 && k < 15 && kbar == 16 - k
+ // result: (Neq16 (And16 <t> n (Const16 <t> [1<<uint(k)-1])) (Const16 <t> [0]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ n := v_0
+ if v_1.Op != OpLsh16x64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpRsh16x64 {
+ continue
+ }
+ _ = v_1_0.Args[1]
+ v_1_0_0 := v_1_0.Args[0]
+ if v_1_0_0.Op != OpAdd16 {
+ continue
+ }
+ t := v_1_0_0.Type
+ _ = v_1_0_0.Args[1]
+ v_1_0_0_0 := v_1_0_0.Args[0]
+ v_1_0_0_1 := v_1_0_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0_0_0, v_1_0_0_1 = _i1+1, v_1_0_0_1, v_1_0_0_0 {
+ if n != v_1_0_0_0 || v_1_0_0_1.Op != OpRsh16Ux64 || v_1_0_0_1.Type != t {
+ continue
+ }
+ _ = v_1_0_0_1.Args[1]
+ v_1_0_0_1_0 := v_1_0_0_1.Args[0]
+ if v_1_0_0_1_0.Op != OpRsh16x64 || v_1_0_0_1_0.Type != t {
+ continue
+ }
+ _ = v_1_0_0_1_0.Args[1]
+ if n != v_1_0_0_1_0.Args[0] {
+ continue
+ }
+ v_1_0_0_1_0_1 := v_1_0_0_1_0.Args[1]
+ if v_1_0_0_1_0_1.Op != OpConst64 || v_1_0_0_1_0_1.Type != typ.UInt64 || auxIntToInt64(v_1_0_0_1_0_1.AuxInt) != 15 {
+ continue
+ }
+ v_1_0_0_1_1 := v_1_0_0_1.Args[1]
+ if v_1_0_0_1_1.Op != OpConst64 || v_1_0_0_1_1.Type != typ.UInt64 {
+ continue
+ }
+ kbar := auxIntToInt64(v_1_0_0_1_1.AuxInt)
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpConst64 || v_1_0_1.Type != typ.UInt64 {
+ continue
+ }
+ k := auxIntToInt64(v_1_0_1.AuxInt)
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 || v_1_1.Type != typ.UInt64 || auxIntToInt64(v_1_1.AuxInt) != k || !(k > 0 && k < 15 && kbar == 16-k) {
+ continue
+ }
+ v.reset(OpNeq16)
+ v0 := b.NewValue0(v.Pos, OpAnd16, t)
+ v1 := b.NewValue0(v.Pos, OpConst16, t)
+ v1.AuxInt = int16ToAuxInt(1<<uint(k) - 1)
+ v0.AddArg2(n, v1)
+ v2 := b.NewValue0(v.Pos, OpConst16, t)
+ v2.AuxInt = int16ToAuxInt(0)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Neq16 s:(Sub16 x y) (Const16 [0]))
+ // cond: s.Uses == 1
+ // result: (Neq16 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ s := v_0
+ if s.Op != OpSub16 {
+ continue
+ }
+ y := s.Args[1]
+ x := s.Args[0]
+ if v_1.Op != OpConst16 || auxIntToInt16(v_1.AuxInt) != 0 || !(s.Uses == 1) {
+ continue
+ }
+ v.reset(OpNeq16)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (Neq16 (And16 <t> x (Const16 <t> [y])) (Const16 <t> [y]))
+ // cond: oneBit16(y)
+ // result: (Eq16 (And16 <t> x (Const16 <t> [y])) (Const16 <t> [0]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAnd16 {
+ continue
+ }
+ t := v_0.Type
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpConst16 || v_0_1.Type != t {
+ continue
+ }
+ y := auxIntToInt16(v_0_1.AuxInt)
+ if v_1.Op != OpConst16 || v_1.Type != t || auxIntToInt16(v_1.AuxInt) != y || !(oneBit16(y)) {
+ continue
+ }
+ v.reset(OpEq16)
+ v0 := b.NewValue0(v.Pos, OpAnd16, t)
+ v1 := b.NewValue0(v.Pos, OpConst16, t)
+ v1.AuxInt = int16ToAuxInt(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst16, t)
+ v2.AuxInt = int16ToAuxInt(0)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpNeq32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neq32 x x)
+ // result: (ConstBool [false])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(false)
+ return true
+ }
+ // match: (Neq32 (Const32 <t> [c]) (Add32 (Const32 <t> [d]) x))
+ // result: (Neq32 (Const32 <t> [c-d]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32 {
+ continue
+ }
+ t := v_0.Type
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpAdd32 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst32 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt32(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpNeq32)
+ v0 := b.NewValue0(v.Pos, OpConst32, t)
+ v0.AuxInt = int32ToAuxInt(c - d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Neq32 (Const32 [c]) (Const32 [d]))
+ // result: (ConstBool [c != d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpConst32 {
+ continue
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(c != d)
+ return true
+ }
+ break
+ }
+ // match: (Neq32 n (Lsh32x64 (Rsh32x64 (Add32 <t> n (Rsh32Ux64 <t> (Rsh32x64 <t> n (Const64 <typ.UInt64> [31])) (Const64 <typ.UInt64> [kbar]))) (Const64 <typ.UInt64> [k])) (Const64 <typ.UInt64> [k])) )
+ // cond: k > 0 && k < 31 && kbar == 32 - k
+ // result: (Neq32 (And32 <t> n (Const32 <t> [1<<uint(k)-1])) (Const32 <t> [0]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ n := v_0
+ if v_1.Op != OpLsh32x64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpRsh32x64 {
+ continue
+ }
+ _ = v_1_0.Args[1]
+ v_1_0_0 := v_1_0.Args[0]
+ if v_1_0_0.Op != OpAdd32 {
+ continue
+ }
+ t := v_1_0_0.Type
+ _ = v_1_0_0.Args[1]
+ v_1_0_0_0 := v_1_0_0.Args[0]
+ v_1_0_0_1 := v_1_0_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0_0_0, v_1_0_0_1 = _i1+1, v_1_0_0_1, v_1_0_0_0 {
+ if n != v_1_0_0_0 || v_1_0_0_1.Op != OpRsh32Ux64 || v_1_0_0_1.Type != t {
+ continue
+ }
+ _ = v_1_0_0_1.Args[1]
+ v_1_0_0_1_0 := v_1_0_0_1.Args[0]
+ if v_1_0_0_1_0.Op != OpRsh32x64 || v_1_0_0_1_0.Type != t {
+ continue
+ }
+ _ = v_1_0_0_1_0.Args[1]
+ if n != v_1_0_0_1_0.Args[0] {
+ continue
+ }
+ v_1_0_0_1_0_1 := v_1_0_0_1_0.Args[1]
+ if v_1_0_0_1_0_1.Op != OpConst64 || v_1_0_0_1_0_1.Type != typ.UInt64 || auxIntToInt64(v_1_0_0_1_0_1.AuxInt) != 31 {
+ continue
+ }
+ v_1_0_0_1_1 := v_1_0_0_1.Args[1]
+ if v_1_0_0_1_1.Op != OpConst64 || v_1_0_0_1_1.Type != typ.UInt64 {
+ continue
+ }
+ kbar := auxIntToInt64(v_1_0_0_1_1.AuxInt)
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpConst64 || v_1_0_1.Type != typ.UInt64 {
+ continue
+ }
+ k := auxIntToInt64(v_1_0_1.AuxInt)
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 || v_1_1.Type != typ.UInt64 || auxIntToInt64(v_1_1.AuxInt) != k || !(k > 0 && k < 31 && kbar == 32-k) {
+ continue
+ }
+ v.reset(OpNeq32)
+ v0 := b.NewValue0(v.Pos, OpAnd32, t)
+ v1 := b.NewValue0(v.Pos, OpConst32, t)
+ v1.AuxInt = int32ToAuxInt(1<<uint(k) - 1)
+ v0.AddArg2(n, v1)
+ v2 := b.NewValue0(v.Pos, OpConst32, t)
+ v2.AuxInt = int32ToAuxInt(0)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Neq32 s:(Sub32 x y) (Const32 [0]))
+ // cond: s.Uses == 1
+ // result: (Neq32 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ s := v_0
+ if s.Op != OpSub32 {
+ continue
+ }
+ y := s.Args[1]
+ x := s.Args[0]
+ if v_1.Op != OpConst32 || auxIntToInt32(v_1.AuxInt) != 0 || !(s.Uses == 1) {
+ continue
+ }
+ v.reset(OpNeq32)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (Neq32 (And32 <t> x (Const32 <t> [y])) (Const32 <t> [y]))
+ // cond: oneBit32(y)
+ // result: (Eq32 (And32 <t> x (Const32 <t> [y])) (Const32 <t> [0]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAnd32 {
+ continue
+ }
+ t := v_0.Type
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpConst32 || v_0_1.Type != t {
+ continue
+ }
+ y := auxIntToInt32(v_0_1.AuxInt)
+ if v_1.Op != OpConst32 || v_1.Type != t || auxIntToInt32(v_1.AuxInt) != y || !(oneBit32(y)) {
+ continue
+ }
+ v.reset(OpEq32)
+ v0 := b.NewValue0(v.Pos, OpAnd32, t)
+ v1 := b.NewValue0(v.Pos, OpConst32, t)
+ v1.AuxInt = int32ToAuxInt(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst32, t)
+ v2.AuxInt = int32ToAuxInt(0)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpNeq32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Neq32F (Const32F [c]) (Const32F [d]))
+ // result: (ConstBool [c != d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32F {
+ continue
+ }
+ c := auxIntToFloat32(v_0.AuxInt)
+ if v_1.Op != OpConst32F {
+ continue
+ }
+ d := auxIntToFloat32(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(c != d)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpNeq64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neq64 x x)
+ // result: (ConstBool [false])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(false)
+ return true
+ }
+ // match: (Neq64 (Const64 <t> [c]) (Add64 (Const64 <t> [d]) x))
+ // result: (Neq64 (Const64 <t> [c-d]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64 {
+ continue
+ }
+ t := v_0.Type
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpAdd64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst64 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt64(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpNeq64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(c - d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Neq64 (Const64 [c]) (Const64 [d]))
+ // result: (ConstBool [c != d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(c != d)
+ return true
+ }
+ break
+ }
+ // match: (Neq64 n (Lsh64x64 (Rsh64x64 (Add64 <t> n (Rsh64Ux64 <t> (Rsh64x64 <t> n (Const64 <typ.UInt64> [63])) (Const64 <typ.UInt64> [kbar]))) (Const64 <typ.UInt64> [k])) (Const64 <typ.UInt64> [k])) )
+ // cond: k > 0 && k < 63 && kbar == 64 - k
+ // result: (Neq64 (And64 <t> n (Const64 <t> [1<<uint(k)-1])) (Const64 <t> [0]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ n := v_0
+ if v_1.Op != OpLsh64x64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpRsh64x64 {
+ continue
+ }
+ _ = v_1_0.Args[1]
+ v_1_0_0 := v_1_0.Args[0]
+ if v_1_0_0.Op != OpAdd64 {
+ continue
+ }
+ t := v_1_0_0.Type
+ _ = v_1_0_0.Args[1]
+ v_1_0_0_0 := v_1_0_0.Args[0]
+ v_1_0_0_1 := v_1_0_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0_0_0, v_1_0_0_1 = _i1+1, v_1_0_0_1, v_1_0_0_0 {
+ if n != v_1_0_0_0 || v_1_0_0_1.Op != OpRsh64Ux64 || v_1_0_0_1.Type != t {
+ continue
+ }
+ _ = v_1_0_0_1.Args[1]
+ v_1_0_0_1_0 := v_1_0_0_1.Args[0]
+ if v_1_0_0_1_0.Op != OpRsh64x64 || v_1_0_0_1_0.Type != t {
+ continue
+ }
+ _ = v_1_0_0_1_0.Args[1]
+ if n != v_1_0_0_1_0.Args[0] {
+ continue
+ }
+ v_1_0_0_1_0_1 := v_1_0_0_1_0.Args[1]
+ if v_1_0_0_1_0_1.Op != OpConst64 || v_1_0_0_1_0_1.Type != typ.UInt64 || auxIntToInt64(v_1_0_0_1_0_1.AuxInt) != 63 {
+ continue
+ }
+ v_1_0_0_1_1 := v_1_0_0_1.Args[1]
+ if v_1_0_0_1_1.Op != OpConst64 || v_1_0_0_1_1.Type != typ.UInt64 {
+ continue
+ }
+ kbar := auxIntToInt64(v_1_0_0_1_1.AuxInt)
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpConst64 || v_1_0_1.Type != typ.UInt64 {
+ continue
+ }
+ k := auxIntToInt64(v_1_0_1.AuxInt)
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 || v_1_1.Type != typ.UInt64 || auxIntToInt64(v_1_1.AuxInt) != k || !(k > 0 && k < 63 && kbar == 64-k) {
+ continue
+ }
+ v.reset(OpNeq64)
+ v0 := b.NewValue0(v.Pos, OpAnd64, t)
+ v1 := b.NewValue0(v.Pos, OpConst64, t)
+ v1.AuxInt = int64ToAuxInt(1<<uint(k) - 1)
+ v0.AddArg2(n, v1)
+ v2 := b.NewValue0(v.Pos, OpConst64, t)
+ v2.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Neq64 s:(Sub64 x y) (Const64 [0]))
+ // cond: s.Uses == 1
+ // result: (Neq64 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ s := v_0
+ if s.Op != OpSub64 {
+ continue
+ }
+ y := s.Args[1]
+ x := s.Args[0]
+ if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 0 || !(s.Uses == 1) {
+ continue
+ }
+ v.reset(OpNeq64)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (Neq64 (And64 <t> x (Const64 <t> [y])) (Const64 <t> [y]))
+ // cond: oneBit64(y)
+ // result: (Eq64 (And64 <t> x (Const64 <t> [y])) (Const64 <t> [0]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAnd64 {
+ continue
+ }
+ t := v_0.Type
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpConst64 || v_0_1.Type != t {
+ continue
+ }
+ y := auxIntToInt64(v_0_1.AuxInt)
+ if v_1.Op != OpConst64 || v_1.Type != t || auxIntToInt64(v_1.AuxInt) != y || !(oneBit64(y)) {
+ continue
+ }
+ v.reset(OpEq64)
+ v0 := b.NewValue0(v.Pos, OpAnd64, t)
+ v1 := b.NewValue0(v.Pos, OpConst64, t)
+ v1.AuxInt = int64ToAuxInt(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst64, t)
+ v2.AuxInt = int64ToAuxInt(0)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpNeq64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Neq64F (Const64F [c]) (Const64F [d]))
+ // result: (ConstBool [c != d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64F {
+ continue
+ }
+ c := auxIntToFloat64(v_0.AuxInt)
+ if v_1.Op != OpConst64F {
+ continue
+ }
+ d := auxIntToFloat64(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(c != d)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpNeq8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Neq8 x x)
+ // result: (ConstBool [false])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(false)
+ return true
+ }
+ // match: (Neq8 (Const8 <t> [c]) (Add8 (Const8 <t> [d]) x))
+ // result: (Neq8 (Const8 <t> [c-d]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst8 {
+ continue
+ }
+ t := v_0.Type
+ c := auxIntToInt8(v_0.AuxInt)
+ if v_1.Op != OpAdd8 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst8 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt8(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpNeq8)
+ v0 := b.NewValue0(v.Pos, OpConst8, t)
+ v0.AuxInt = int8ToAuxInt(c - d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Neq8 (Const8 [c]) (Const8 [d]))
+ // result: (ConstBool [c != d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ if v_1.Op != OpConst8 {
+ continue
+ }
+ d := auxIntToInt8(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(c != d)
+ return true
+ }
+ break
+ }
+ // match: (Neq8 n (Lsh8x64 (Rsh8x64 (Add8 <t> n (Rsh8Ux64 <t> (Rsh8x64 <t> n (Const64 <typ.UInt64> [ 7])) (Const64 <typ.UInt64> [kbar]))) (Const64 <typ.UInt64> [k])) (Const64 <typ.UInt64> [k])) )
+ // cond: k > 0 && k < 7 && kbar == 8 - k
+ // result: (Neq8 (And8 <t> n (Const8 <t> [1<<uint(k)-1])) (Const8 <t> [0]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ n := v_0
+ if v_1.Op != OpLsh8x64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpRsh8x64 {
+ continue
+ }
+ _ = v_1_0.Args[1]
+ v_1_0_0 := v_1_0.Args[0]
+ if v_1_0_0.Op != OpAdd8 {
+ continue
+ }
+ t := v_1_0_0.Type
+ _ = v_1_0_0.Args[1]
+ v_1_0_0_0 := v_1_0_0.Args[0]
+ v_1_0_0_1 := v_1_0_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0_0_0, v_1_0_0_1 = _i1+1, v_1_0_0_1, v_1_0_0_0 {
+ if n != v_1_0_0_0 || v_1_0_0_1.Op != OpRsh8Ux64 || v_1_0_0_1.Type != t {
+ continue
+ }
+ _ = v_1_0_0_1.Args[1]
+ v_1_0_0_1_0 := v_1_0_0_1.Args[0]
+ if v_1_0_0_1_0.Op != OpRsh8x64 || v_1_0_0_1_0.Type != t {
+ continue
+ }
+ _ = v_1_0_0_1_0.Args[1]
+ if n != v_1_0_0_1_0.Args[0] {
+ continue
+ }
+ v_1_0_0_1_0_1 := v_1_0_0_1_0.Args[1]
+ if v_1_0_0_1_0_1.Op != OpConst64 || v_1_0_0_1_0_1.Type != typ.UInt64 || auxIntToInt64(v_1_0_0_1_0_1.AuxInt) != 7 {
+ continue
+ }
+ v_1_0_0_1_1 := v_1_0_0_1.Args[1]
+ if v_1_0_0_1_1.Op != OpConst64 || v_1_0_0_1_1.Type != typ.UInt64 {
+ continue
+ }
+ kbar := auxIntToInt64(v_1_0_0_1_1.AuxInt)
+ v_1_0_1 := v_1_0.Args[1]
+ if v_1_0_1.Op != OpConst64 || v_1_0_1.Type != typ.UInt64 {
+ continue
+ }
+ k := auxIntToInt64(v_1_0_1.AuxInt)
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 || v_1_1.Type != typ.UInt64 || auxIntToInt64(v_1_1.AuxInt) != k || !(k > 0 && k < 7 && kbar == 8-k) {
+ continue
+ }
+ v.reset(OpNeq8)
+ v0 := b.NewValue0(v.Pos, OpAnd8, t)
+ v1 := b.NewValue0(v.Pos, OpConst8, t)
+ v1.AuxInt = int8ToAuxInt(1<<uint(k) - 1)
+ v0.AddArg2(n, v1)
+ v2 := b.NewValue0(v.Pos, OpConst8, t)
+ v2.AuxInt = int8ToAuxInt(0)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Neq8 s:(Sub8 x y) (Const8 [0]))
+ // cond: s.Uses == 1
+ // result: (Neq8 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ s := v_0
+ if s.Op != OpSub8 {
+ continue
+ }
+ y := s.Args[1]
+ x := s.Args[0]
+ if v_1.Op != OpConst8 || auxIntToInt8(v_1.AuxInt) != 0 || !(s.Uses == 1) {
+ continue
+ }
+ v.reset(OpNeq8)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ // match: (Neq8 (And8 <t> x (Const8 <t> [y])) (Const8 <t> [y]))
+ // cond: oneBit8(y)
+ // result: (Eq8 (And8 <t> x (Const8 <t> [y])) (Const8 <t> [0]))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAnd8 {
+ continue
+ }
+ t := v_0.Type
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpConst8 || v_0_1.Type != t {
+ continue
+ }
+ y := auxIntToInt8(v_0_1.AuxInt)
+ if v_1.Op != OpConst8 || v_1.Type != t || auxIntToInt8(v_1.AuxInt) != y || !(oneBit8(y)) {
+ continue
+ }
+ v.reset(OpEq8)
+ v0 := b.NewValue0(v.Pos, OpAnd8, t)
+ v1 := b.NewValue0(v.Pos, OpConst8, t)
+ v1.AuxInt = int8ToAuxInt(y)
+ v0.AddArg2(x, v1)
+ v2 := b.NewValue0(v.Pos, OpConst8, t)
+ v2.AuxInt = int8ToAuxInt(0)
+ v.AddArg2(v0, v2)
+ return true
+ }
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpNeqB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (NeqB (ConstBool [c]) (ConstBool [d]))
+ // result: (ConstBool [c != d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConstBool {
+ continue
+ }
+ c := auxIntToBool(v_0.AuxInt)
+ if v_1.Op != OpConstBool {
+ continue
+ }
+ d := auxIntToBool(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(c != d)
+ return true
+ }
+ break
+ }
+ // match: (NeqB (ConstBool [false]) x)
+ // result: x
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConstBool || auxIntToBool(v_0.AuxInt) != false {
+ continue
+ }
+ x := v_1
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (NeqB (ConstBool [true]) x)
+ // result: (Not x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConstBool || auxIntToBool(v_0.AuxInt) != true {
+ continue
+ }
+ x := v_1
+ v.reset(OpNot)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ // match: (NeqB (Not x) (Not y))
+ // result: (NeqB x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpNot {
+ continue
+ }
+ x := v_0.Args[0]
+ if v_1.Op != OpNot {
+ continue
+ }
+ y := v_1.Args[0]
+ v.reset(OpNeqB)
+ v.AddArg2(x, y)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpNeqInter(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (NeqInter x y)
+ // result: (NeqPtr (ITab x) (ITab y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpNeqPtr)
+ v0 := b.NewValue0(v.Pos, OpITab, typ.Uintptr)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpITab, typ.Uintptr)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValuegeneric_OpNeqPtr(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (NeqPtr x x)
+ // result: (ConstBool [false])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(false)
+ return true
+ }
+ // match: (NeqPtr (Addr {x} _) (Addr {y} _))
+ // result: (ConstBool [x != y])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAddr {
+ continue
+ }
+ x := auxToSym(v_0.Aux)
+ if v_1.Op != OpAddr {
+ continue
+ }
+ y := auxToSym(v_1.Aux)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(x != y)
+ return true
+ }
+ break
+ }
+ // match: (NeqPtr (Addr {x} _) (OffPtr [o] (Addr {y} _)))
+ // result: (ConstBool [x != y || o != 0])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAddr {
+ continue
+ }
+ x := auxToSym(v_0.Aux)
+ if v_1.Op != OpOffPtr {
+ continue
+ }
+ o := auxIntToInt64(v_1.AuxInt)
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAddr {
+ continue
+ }
+ y := auxToSym(v_1_0.Aux)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(x != y || o != 0)
+ return true
+ }
+ break
+ }
+ // match: (NeqPtr (OffPtr [o1] (Addr {x} _)) (OffPtr [o2] (Addr {y} _)))
+ // result: (ConstBool [x != y || o1 != o2])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpOffPtr {
+ continue
+ }
+ o1 := auxIntToInt64(v_0.AuxInt)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAddr {
+ continue
+ }
+ x := auxToSym(v_0_0.Aux)
+ if v_1.Op != OpOffPtr {
+ continue
+ }
+ o2 := auxIntToInt64(v_1.AuxInt)
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAddr {
+ continue
+ }
+ y := auxToSym(v_1_0.Aux)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(x != y || o1 != o2)
+ return true
+ }
+ break
+ }
+ // match: (NeqPtr (LocalAddr {x} _ _) (LocalAddr {y} _ _))
+ // result: (ConstBool [x != y])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLocalAddr {
+ continue
+ }
+ x := auxToSym(v_0.Aux)
+ if v_1.Op != OpLocalAddr {
+ continue
+ }
+ y := auxToSym(v_1.Aux)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(x != y)
+ return true
+ }
+ break
+ }
+ // match: (NeqPtr (LocalAddr {x} _ _) (OffPtr [o] (LocalAddr {y} _ _)))
+ // result: (ConstBool [x != y || o != 0])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLocalAddr {
+ continue
+ }
+ x := auxToSym(v_0.Aux)
+ if v_1.Op != OpOffPtr {
+ continue
+ }
+ o := auxIntToInt64(v_1.AuxInt)
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpLocalAddr {
+ continue
+ }
+ y := auxToSym(v_1_0.Aux)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(x != y || o != 0)
+ return true
+ }
+ break
+ }
+ // match: (NeqPtr (OffPtr [o1] (LocalAddr {x} _ _)) (OffPtr [o2] (LocalAddr {y} _ _)))
+ // result: (ConstBool [x != y || o1 != o2])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpOffPtr {
+ continue
+ }
+ o1 := auxIntToInt64(v_0.AuxInt)
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpLocalAddr {
+ continue
+ }
+ x := auxToSym(v_0_0.Aux)
+ if v_1.Op != OpOffPtr {
+ continue
+ }
+ o2 := auxIntToInt64(v_1.AuxInt)
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpLocalAddr {
+ continue
+ }
+ y := auxToSym(v_1_0.Aux)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(x != y || o1 != o2)
+ return true
+ }
+ break
+ }
+ // match: (NeqPtr (OffPtr [o1] p1) p2)
+ // cond: isSamePtr(p1, p2)
+ // result: (ConstBool [o1 != 0])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpOffPtr {
+ continue
+ }
+ o1 := auxIntToInt64(v_0.AuxInt)
+ p1 := v_0.Args[0]
+ p2 := v_1
+ if !(isSamePtr(p1, p2)) {
+ continue
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(o1 != 0)
+ return true
+ }
+ break
+ }
+ // match: (NeqPtr (OffPtr [o1] p1) (OffPtr [o2] p2))
+ // cond: isSamePtr(p1, p2)
+ // result: (ConstBool [o1 != o2])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpOffPtr {
+ continue
+ }
+ o1 := auxIntToInt64(v_0.AuxInt)
+ p1 := v_0.Args[0]
+ if v_1.Op != OpOffPtr {
+ continue
+ }
+ o2 := auxIntToInt64(v_1.AuxInt)
+ p2 := v_1.Args[0]
+ if !(isSamePtr(p1, p2)) {
+ continue
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(o1 != o2)
+ return true
+ }
+ break
+ }
+ // match: (NeqPtr (Const32 [c]) (Const32 [d]))
+ // result: (ConstBool [c != d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpConst32 {
+ continue
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(c != d)
+ return true
+ }
+ break
+ }
+ // match: (NeqPtr (Const64 [c]) (Const64 [d]))
+ // result: (ConstBool [c != d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(c != d)
+ return true
+ }
+ break
+ }
+ // match: (NeqPtr (LocalAddr _ _) (Addr _))
+ // result: (ConstBool [true])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLocalAddr || v_1.Op != OpAddr {
+ continue
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ break
+ }
+ // match: (NeqPtr (OffPtr (LocalAddr _ _)) (Addr _))
+ // result: (ConstBool [true])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpOffPtr {
+ continue
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpLocalAddr || v_1.Op != OpAddr {
+ continue
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ break
+ }
+ // match: (NeqPtr (LocalAddr _ _) (OffPtr (Addr _)))
+ // result: (ConstBool [true])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLocalAddr || v_1.Op != OpOffPtr {
+ continue
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAddr {
+ continue
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ break
+ }
+ // match: (NeqPtr (OffPtr (LocalAddr _ _)) (OffPtr (Addr _)))
+ // result: (ConstBool [true])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpOffPtr {
+ continue
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpLocalAddr || v_1.Op != OpOffPtr {
+ continue
+ }
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpAddr {
+ continue
+ }
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(true)
+ return true
+ }
+ break
+ }
+ // match: (NeqPtr (AddPtr p1 o1) p2)
+ // cond: isSamePtr(p1, p2)
+ // result: (IsNonNil o1)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAddPtr {
+ continue
+ }
+ o1 := v_0.Args[1]
+ p1 := v_0.Args[0]
+ p2 := v_1
+ if !(isSamePtr(p1, p2)) {
+ continue
+ }
+ v.reset(OpIsNonNil)
+ v.AddArg(o1)
+ return true
+ }
+ break
+ }
+ // match: (NeqPtr (Const32 [0]) p)
+ // result: (IsNonNil p)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 0 {
+ continue
+ }
+ p := v_1
+ v.reset(OpIsNonNil)
+ v.AddArg(p)
+ return true
+ }
+ break
+ }
+ // match: (NeqPtr (Const64 [0]) p)
+ // result: (IsNonNil p)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 {
+ continue
+ }
+ p := v_1
+ v.reset(OpIsNonNil)
+ v.AddArg(p)
+ return true
+ }
+ break
+ }
+ // match: (NeqPtr (ConstNil) p)
+ // result: (IsNonNil p)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConstNil {
+ continue
+ }
+ p := v_1
+ v.reset(OpIsNonNil)
+ v.AddArg(p)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpNeqSlice(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (NeqSlice x y)
+ // result: (NeqPtr (SlicePtr x) (SlicePtr y))
+ for {
+ x := v_0
+ y := v_1
+ v.reset(OpNeqPtr)
+ v0 := b.NewValue0(v.Pos, OpSlicePtr, typ.BytePtr)
+ v0.AddArg(x)
+ v1 := b.NewValue0(v.Pos, OpSlicePtr, typ.BytePtr)
+ v1.AddArg(y)
+ v.AddArg2(v0, v1)
+ return true
+ }
+}
+func rewriteValuegeneric_OpNilCheck(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ fe := b.Func.fe
+ // match: (NilCheck (GetG mem) mem)
+ // result: mem
+ for {
+ if v_0.Op != OpGetG {
+ break
+ }
+ mem := v_0.Args[0]
+ if mem != v_1 {
+ break
+ }
+ v.copyOf(mem)
+ return true
+ }
+ // match: (NilCheck (SelectN [0] call:(StaticLECall _ _)) _)
+ // cond: isSameCall(call.Aux, "runtime.newobject") && warnRule(fe.Debug_checknil(), v, "removed nil check")
+ // result: (Invalid)
+ for {
+ if v_0.Op != OpSelectN || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ call := v_0.Args[0]
+ if call.Op != OpStaticLECall || len(call.Args) != 2 || !(isSameCall(call.Aux, "runtime.newobject") && warnRule(fe.Debug_checknil(), v, "removed nil check")) {
+ break
+ }
+ v.reset(OpInvalid)
+ return true
+ }
+ // match: (NilCheck (OffPtr (SelectN [0] call:(StaticLECall _ _))) _)
+ // cond: isSameCall(call.Aux, "runtime.newobject") && warnRule(fe.Debug_checknil(), v, "removed nil check")
+ // result: (Invalid)
+ for {
+ if v_0.Op != OpOffPtr {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpSelectN || auxIntToInt64(v_0_0.AuxInt) != 0 {
+ break
+ }
+ call := v_0_0.Args[0]
+ if call.Op != OpStaticLECall || len(call.Args) != 2 || !(isSameCall(call.Aux, "runtime.newobject") && warnRule(fe.Debug_checknil(), v, "removed nil check")) {
+ break
+ }
+ v.reset(OpInvalid)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpNot(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Not (ConstBool [c]))
+ // result: (ConstBool [!c])
+ for {
+ if v_0.Op != OpConstBool {
+ break
+ }
+ c := auxIntToBool(v_0.AuxInt)
+ v.reset(OpConstBool)
+ v.AuxInt = boolToAuxInt(!c)
+ return true
+ }
+ // match: (Not (Eq64 x y))
+ // result: (Neq64 x y)
+ for {
+ if v_0.Op != OpEq64 {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpNeq64)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Not (Eq32 x y))
+ // result: (Neq32 x y)
+ for {
+ if v_0.Op != OpEq32 {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpNeq32)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Not (Eq16 x y))
+ // result: (Neq16 x y)
+ for {
+ if v_0.Op != OpEq16 {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpNeq16)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Not (Eq8 x y))
+ // result: (Neq8 x y)
+ for {
+ if v_0.Op != OpEq8 {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpNeq8)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Not (EqB x y))
+ // result: (NeqB x y)
+ for {
+ if v_0.Op != OpEqB {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpNeqB)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Not (EqPtr x y))
+ // result: (NeqPtr x y)
+ for {
+ if v_0.Op != OpEqPtr {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpNeqPtr)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Not (Eq64F x y))
+ // result: (Neq64F x y)
+ for {
+ if v_0.Op != OpEq64F {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpNeq64F)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Not (Eq32F x y))
+ // result: (Neq32F x y)
+ for {
+ if v_0.Op != OpEq32F {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpNeq32F)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Not (Neq64 x y))
+ // result: (Eq64 x y)
+ for {
+ if v_0.Op != OpNeq64 {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpEq64)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Not (Neq32 x y))
+ // result: (Eq32 x y)
+ for {
+ if v_0.Op != OpNeq32 {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpEq32)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Not (Neq16 x y))
+ // result: (Eq16 x y)
+ for {
+ if v_0.Op != OpNeq16 {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpEq16)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Not (Neq8 x y))
+ // result: (Eq8 x y)
+ for {
+ if v_0.Op != OpNeq8 {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpEq8)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Not (NeqB x y))
+ // result: (EqB x y)
+ for {
+ if v_0.Op != OpNeqB {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpEqB)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Not (NeqPtr x y))
+ // result: (EqPtr x y)
+ for {
+ if v_0.Op != OpNeqPtr {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpEqPtr)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Not (Neq64F x y))
+ // result: (Eq64F x y)
+ for {
+ if v_0.Op != OpNeq64F {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpEq64F)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Not (Neq32F x y))
+ // result: (Eq32F x y)
+ for {
+ if v_0.Op != OpNeq32F {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpEq32F)
+ v.AddArg2(x, y)
+ return true
+ }
+ // match: (Not (Less64 x y))
+ // result: (Leq64 y x)
+ for {
+ if v_0.Op != OpLess64 {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpLeq64)
+ v.AddArg2(y, x)
+ return true
+ }
+ // match: (Not (Less32 x y))
+ // result: (Leq32 y x)
+ for {
+ if v_0.Op != OpLess32 {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpLeq32)
+ v.AddArg2(y, x)
+ return true
+ }
+ // match: (Not (Less16 x y))
+ // result: (Leq16 y x)
+ for {
+ if v_0.Op != OpLess16 {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpLeq16)
+ v.AddArg2(y, x)
+ return true
+ }
+ // match: (Not (Less8 x y))
+ // result: (Leq8 y x)
+ for {
+ if v_0.Op != OpLess8 {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpLeq8)
+ v.AddArg2(y, x)
+ return true
+ }
+ // match: (Not (Less64U x y))
+ // result: (Leq64U y x)
+ for {
+ if v_0.Op != OpLess64U {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpLeq64U)
+ v.AddArg2(y, x)
+ return true
+ }
+ // match: (Not (Less32U x y))
+ // result: (Leq32U y x)
+ for {
+ if v_0.Op != OpLess32U {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpLeq32U)
+ v.AddArg2(y, x)
+ return true
+ }
+ // match: (Not (Less16U x y))
+ // result: (Leq16U y x)
+ for {
+ if v_0.Op != OpLess16U {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpLeq16U)
+ v.AddArg2(y, x)
+ return true
+ }
+ // match: (Not (Less8U x y))
+ // result: (Leq8U y x)
+ for {
+ if v_0.Op != OpLess8U {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpLeq8U)
+ v.AddArg2(y, x)
+ return true
+ }
+ // match: (Not (Leq64 x y))
+ // result: (Less64 y x)
+ for {
+ if v_0.Op != OpLeq64 {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpLess64)
+ v.AddArg2(y, x)
+ return true
+ }
+ // match: (Not (Leq32 x y))
+ // result: (Less32 y x)
+ for {
+ if v_0.Op != OpLeq32 {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpLess32)
+ v.AddArg2(y, x)
+ return true
+ }
+ // match: (Not (Leq16 x y))
+ // result: (Less16 y x)
+ for {
+ if v_0.Op != OpLeq16 {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpLess16)
+ v.AddArg2(y, x)
+ return true
+ }
+ // match: (Not (Leq8 x y))
+ // result: (Less8 y x)
+ for {
+ if v_0.Op != OpLeq8 {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpLess8)
+ v.AddArg2(y, x)
+ return true
+ }
+ // match: (Not (Leq64U x y))
+ // result: (Less64U y x)
+ for {
+ if v_0.Op != OpLeq64U {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpLess64U)
+ v.AddArg2(y, x)
+ return true
+ }
+ // match: (Not (Leq32U x y))
+ // result: (Less32U y x)
+ for {
+ if v_0.Op != OpLeq32U {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpLess32U)
+ v.AddArg2(y, x)
+ return true
+ }
+ // match: (Not (Leq16U x y))
+ // result: (Less16U y x)
+ for {
+ if v_0.Op != OpLeq16U {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpLess16U)
+ v.AddArg2(y, x)
+ return true
+ }
+ // match: (Not (Leq8U x y))
+ // result: (Less8U y x)
+ for {
+ if v_0.Op != OpLeq8U {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ v.reset(OpLess8U)
+ v.AddArg2(y, x)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpOffPtr(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (OffPtr (OffPtr p [y]) [x])
+ // result: (OffPtr p [x+y])
+ for {
+ x := auxIntToInt64(v.AuxInt)
+ if v_0.Op != OpOffPtr {
+ break
+ }
+ y := auxIntToInt64(v_0.AuxInt)
+ p := v_0.Args[0]
+ v.reset(OpOffPtr)
+ v.AuxInt = int64ToAuxInt(x + y)
+ v.AddArg(p)
+ return true
+ }
+ // match: (OffPtr p [0])
+ // cond: v.Type.Compare(p.Type) == types.CMPeq
+ // result: p
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ p := v_0
+ if !(v.Type.Compare(p.Type) == types.CMPeq) {
+ break
+ }
+ v.copyOf(p)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpOr16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Or16 (Const16 [c]) (Const16 [d]))
+ // result: (Const16 [c|d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_0.AuxInt)
+ if v_1.Op != OpConst16 {
+ continue
+ }
+ d := auxIntToInt16(v_1.AuxInt)
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(c | d)
+ return true
+ }
+ break
+ }
+ // match: (Or16 x x)
+ // result: x
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (Or16 (Const16 [0]) x)
+ // result: x
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != 0 {
+ continue
+ }
+ x := v_1
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (Or16 (Const16 [-1]) _)
+ // result: (Const16 [-1])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != -1 {
+ continue
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(-1)
+ return true
+ }
+ break
+ }
+ // match: (Or16 x (Or16 x y))
+ // result: (Or16 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpOr16 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if x != v_1_0 {
+ continue
+ }
+ y := v_1_1
+ v.reset(OpOr16)
+ v.AddArg2(x, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Or16 (And16 x (Const16 [c2])) (Const16 <t> [c1]))
+ // cond: ^(c1 | c2) == 0
+ // result: (Or16 (Const16 <t> [c1]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAnd16 {
+ continue
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpConst16 {
+ continue
+ }
+ c2 := auxIntToInt16(v_0_1.AuxInt)
+ if v_1.Op != OpConst16 {
+ continue
+ }
+ t := v_1.Type
+ c1 := auxIntToInt16(v_1.AuxInt)
+ if !(^(c1 | c2) == 0) {
+ continue
+ }
+ v.reset(OpOr16)
+ v0 := b.NewValue0(v.Pos, OpConst16, t)
+ v0.AuxInt = int16ToAuxInt(c1)
+ v.AddArg2(v0, x)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Or16 (Or16 i:(Const16 <t>) z) x)
+ // cond: (z.Op != OpConst16 && x.Op != OpConst16)
+ // result: (Or16 i (Or16 <t> z x))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpOr16 {
+ continue
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ i := v_0_0
+ if i.Op != OpConst16 {
+ continue
+ }
+ t := i.Type
+ z := v_0_1
+ x := v_1
+ if !(z.Op != OpConst16 && x.Op != OpConst16) {
+ continue
+ }
+ v.reset(OpOr16)
+ v0 := b.NewValue0(v.Pos, OpOr16, t)
+ v0.AddArg2(z, x)
+ v.AddArg2(i, v0)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Or16 (Const16 <t> [c]) (Or16 (Const16 <t> [d]) x))
+ // result: (Or16 (Const16 <t> [c|d]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst16 {
+ continue
+ }
+ t := v_0.Type
+ c := auxIntToInt16(v_0.AuxInt)
+ if v_1.Op != OpOr16 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst16 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt16(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpOr16)
+ v0 := b.NewValue0(v.Pos, OpConst16, t)
+ v0.AuxInt = int16ToAuxInt(c | d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpOr32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Or32 (Const32 [c]) (Const32 [d]))
+ // result: (Const32 [c|d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpConst32 {
+ continue
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(c | d)
+ return true
+ }
+ break
+ }
+ // match: (Or32 x x)
+ // result: x
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (Or32 (Const32 [0]) x)
+ // result: x
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 0 {
+ continue
+ }
+ x := v_1
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (Or32 (Const32 [-1]) _)
+ // result: (Const32 [-1])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != -1 {
+ continue
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(-1)
+ return true
+ }
+ break
+ }
+ // match: (Or32 x (Or32 x y))
+ // result: (Or32 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpOr32 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if x != v_1_0 {
+ continue
+ }
+ y := v_1_1
+ v.reset(OpOr32)
+ v.AddArg2(x, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Or32 (And32 x (Const32 [c2])) (Const32 <t> [c1]))
+ // cond: ^(c1 | c2) == 0
+ // result: (Or32 (Const32 <t> [c1]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAnd32 {
+ continue
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpConst32 {
+ continue
+ }
+ c2 := auxIntToInt32(v_0_1.AuxInt)
+ if v_1.Op != OpConst32 {
+ continue
+ }
+ t := v_1.Type
+ c1 := auxIntToInt32(v_1.AuxInt)
+ if !(^(c1 | c2) == 0) {
+ continue
+ }
+ v.reset(OpOr32)
+ v0 := b.NewValue0(v.Pos, OpConst32, t)
+ v0.AuxInt = int32ToAuxInt(c1)
+ v.AddArg2(v0, x)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Or32 (Or32 i:(Const32 <t>) z) x)
+ // cond: (z.Op != OpConst32 && x.Op != OpConst32)
+ // result: (Or32 i (Or32 <t> z x))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpOr32 {
+ continue
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ i := v_0_0
+ if i.Op != OpConst32 {
+ continue
+ }
+ t := i.Type
+ z := v_0_1
+ x := v_1
+ if !(z.Op != OpConst32 && x.Op != OpConst32) {
+ continue
+ }
+ v.reset(OpOr32)
+ v0 := b.NewValue0(v.Pos, OpOr32, t)
+ v0.AddArg2(z, x)
+ v.AddArg2(i, v0)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Or32 (Const32 <t> [c]) (Or32 (Const32 <t> [d]) x))
+ // result: (Or32 (Const32 <t> [c|d]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32 {
+ continue
+ }
+ t := v_0.Type
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpOr32 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst32 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt32(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpOr32)
+ v0 := b.NewValue0(v.Pos, OpConst32, t)
+ v0.AuxInt = int32ToAuxInt(c | d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpOr64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Or64 (Const64 [c]) (Const64 [d]))
+ // result: (Const64 [c|d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(c | d)
+ return true
+ }
+ break
+ }
+ // match: (Or64 x x)
+ // result: x
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (Or64 (Const64 [0]) x)
+ // result: x
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 {
+ continue
+ }
+ x := v_1
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (Or64 (Const64 [-1]) _)
+ // result: (Const64 [-1])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != -1 {
+ continue
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(-1)
+ return true
+ }
+ break
+ }
+ // match: (Or64 x (Or64 x y))
+ // result: (Or64 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpOr64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if x != v_1_0 {
+ continue
+ }
+ y := v_1_1
+ v.reset(OpOr64)
+ v.AddArg2(x, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Or64 (And64 x (Const64 [c2])) (Const64 <t> [c1]))
+ // cond: ^(c1 | c2) == 0
+ // result: (Or64 (Const64 <t> [c1]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAnd64 {
+ continue
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpConst64 {
+ continue
+ }
+ c2 := auxIntToInt64(v_0_1.AuxInt)
+ if v_1.Op != OpConst64 {
+ continue
+ }
+ t := v_1.Type
+ c1 := auxIntToInt64(v_1.AuxInt)
+ if !(^(c1 | c2) == 0) {
+ continue
+ }
+ v.reset(OpOr64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(c1)
+ v.AddArg2(v0, x)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Or64 (Or64 i:(Const64 <t>) z) x)
+ // cond: (z.Op != OpConst64 && x.Op != OpConst64)
+ // result: (Or64 i (Or64 <t> z x))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpOr64 {
+ continue
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ i := v_0_0
+ if i.Op != OpConst64 {
+ continue
+ }
+ t := i.Type
+ z := v_0_1
+ x := v_1
+ if !(z.Op != OpConst64 && x.Op != OpConst64) {
+ continue
+ }
+ v.reset(OpOr64)
+ v0 := b.NewValue0(v.Pos, OpOr64, t)
+ v0.AddArg2(z, x)
+ v.AddArg2(i, v0)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Or64 (Const64 <t> [c]) (Or64 (Const64 <t> [d]) x))
+ // result: (Or64 (Const64 <t> [c|d]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64 {
+ continue
+ }
+ t := v_0.Type
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpOr64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst64 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt64(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpOr64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(c | d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpOr8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Or8 (Const8 [c]) (Const8 [d]))
+ // result: (Const8 [c|d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ if v_1.Op != OpConst8 {
+ continue
+ }
+ d := auxIntToInt8(v_1.AuxInt)
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(c | d)
+ return true
+ }
+ break
+ }
+ // match: (Or8 x x)
+ // result: x
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (Or8 (Const8 [0]) x)
+ // result: x
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != 0 {
+ continue
+ }
+ x := v_1
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (Or8 (Const8 [-1]) _)
+ // result: (Const8 [-1])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != -1 {
+ continue
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(-1)
+ return true
+ }
+ break
+ }
+ // match: (Or8 x (Or8 x y))
+ // result: (Or8 x y)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpOr8 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if x != v_1_0 {
+ continue
+ }
+ y := v_1_1
+ v.reset(OpOr8)
+ v.AddArg2(x, y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Or8 (And8 x (Const8 [c2])) (Const8 <t> [c1]))
+ // cond: ^(c1 | c2) == 0
+ // result: (Or8 (Const8 <t> [c1]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpAnd8 {
+ continue
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ x := v_0_0
+ if v_0_1.Op != OpConst8 {
+ continue
+ }
+ c2 := auxIntToInt8(v_0_1.AuxInt)
+ if v_1.Op != OpConst8 {
+ continue
+ }
+ t := v_1.Type
+ c1 := auxIntToInt8(v_1.AuxInt)
+ if !(^(c1 | c2) == 0) {
+ continue
+ }
+ v.reset(OpOr8)
+ v0 := b.NewValue0(v.Pos, OpConst8, t)
+ v0.AuxInt = int8ToAuxInt(c1)
+ v.AddArg2(v0, x)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Or8 (Or8 i:(Const8 <t>) z) x)
+ // cond: (z.Op != OpConst8 && x.Op != OpConst8)
+ // result: (Or8 i (Or8 <t> z x))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpOr8 {
+ continue
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ i := v_0_0
+ if i.Op != OpConst8 {
+ continue
+ }
+ t := i.Type
+ z := v_0_1
+ x := v_1
+ if !(z.Op != OpConst8 && x.Op != OpConst8) {
+ continue
+ }
+ v.reset(OpOr8)
+ v0 := b.NewValue0(v.Pos, OpOr8, t)
+ v0.AddArg2(z, x)
+ v.AddArg2(i, v0)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Or8 (Const8 <t> [c]) (Or8 (Const8 <t> [d]) x))
+ // result: (Or8 (Const8 <t> [c|d]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst8 {
+ continue
+ }
+ t := v_0.Type
+ c := auxIntToInt8(v_0.AuxInt)
+ if v_1.Op != OpOr8 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst8 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt8(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpOr8)
+ v0 := b.NewValue0(v.Pos, OpConst8, t)
+ v0.AuxInt = int8ToAuxInt(c | d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpOrB(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (OrB (Less64 (Const64 [c]) x) (Less64 x (Const64 [d])))
+ // cond: c >= d
+ // result: (Less64U (Const64 <x.Type> [c-d]) (Sub64 <x.Type> x (Const64 <x.Type> [d])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLess64 {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ if v_1.Op != OpLess64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1_1.AuxInt)
+ if !(c >= d) {
+ continue
+ }
+ v.reset(OpLess64U)
+ v0 := b.NewValue0(v.Pos, OpConst64, x.Type)
+ v0.AuxInt = int64ToAuxInt(c - d)
+ v1 := b.NewValue0(v.Pos, OpSub64, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst64, x.Type)
+ v2.AuxInt = int64ToAuxInt(d)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Leq64 (Const64 [c]) x) (Less64 x (Const64 [d])))
+ // cond: c >= d
+ // result: (Leq64U (Const64 <x.Type> [c-d]) (Sub64 <x.Type> x (Const64 <x.Type> [d])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLeq64 {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ if v_1.Op != OpLess64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1_1.AuxInt)
+ if !(c >= d) {
+ continue
+ }
+ v.reset(OpLeq64U)
+ v0 := b.NewValue0(v.Pos, OpConst64, x.Type)
+ v0.AuxInt = int64ToAuxInt(c - d)
+ v1 := b.NewValue0(v.Pos, OpSub64, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst64, x.Type)
+ v2.AuxInt = int64ToAuxInt(d)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Less32 (Const32 [c]) x) (Less32 x (Const32 [d])))
+ // cond: c >= d
+ // result: (Less32U (Const32 <x.Type> [c-d]) (Sub32 <x.Type> x (Const32 <x.Type> [d])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLess32 {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_0_0.AuxInt)
+ if v_1.Op != OpLess32 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst32 {
+ continue
+ }
+ d := auxIntToInt32(v_1_1.AuxInt)
+ if !(c >= d) {
+ continue
+ }
+ v.reset(OpLess32U)
+ v0 := b.NewValue0(v.Pos, OpConst32, x.Type)
+ v0.AuxInt = int32ToAuxInt(c - d)
+ v1 := b.NewValue0(v.Pos, OpSub32, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst32, x.Type)
+ v2.AuxInt = int32ToAuxInt(d)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Leq32 (Const32 [c]) x) (Less32 x (Const32 [d])))
+ // cond: c >= d
+ // result: (Leq32U (Const32 <x.Type> [c-d]) (Sub32 <x.Type> x (Const32 <x.Type> [d])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLeq32 {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_0_0.AuxInt)
+ if v_1.Op != OpLess32 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst32 {
+ continue
+ }
+ d := auxIntToInt32(v_1_1.AuxInt)
+ if !(c >= d) {
+ continue
+ }
+ v.reset(OpLeq32U)
+ v0 := b.NewValue0(v.Pos, OpConst32, x.Type)
+ v0.AuxInt = int32ToAuxInt(c - d)
+ v1 := b.NewValue0(v.Pos, OpSub32, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst32, x.Type)
+ v2.AuxInt = int32ToAuxInt(d)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Less16 (Const16 [c]) x) (Less16 x (Const16 [d])))
+ // cond: c >= d
+ // result: (Less16U (Const16 <x.Type> [c-d]) (Sub16 <x.Type> x (Const16 <x.Type> [d])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLess16 {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_0_0.AuxInt)
+ if v_1.Op != OpLess16 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst16 {
+ continue
+ }
+ d := auxIntToInt16(v_1_1.AuxInt)
+ if !(c >= d) {
+ continue
+ }
+ v.reset(OpLess16U)
+ v0 := b.NewValue0(v.Pos, OpConst16, x.Type)
+ v0.AuxInt = int16ToAuxInt(c - d)
+ v1 := b.NewValue0(v.Pos, OpSub16, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst16, x.Type)
+ v2.AuxInt = int16ToAuxInt(d)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Leq16 (Const16 [c]) x) (Less16 x (Const16 [d])))
+ // cond: c >= d
+ // result: (Leq16U (Const16 <x.Type> [c-d]) (Sub16 <x.Type> x (Const16 <x.Type> [d])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLeq16 {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_0_0.AuxInt)
+ if v_1.Op != OpLess16 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst16 {
+ continue
+ }
+ d := auxIntToInt16(v_1_1.AuxInt)
+ if !(c >= d) {
+ continue
+ }
+ v.reset(OpLeq16U)
+ v0 := b.NewValue0(v.Pos, OpConst16, x.Type)
+ v0.AuxInt = int16ToAuxInt(c - d)
+ v1 := b.NewValue0(v.Pos, OpSub16, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst16, x.Type)
+ v2.AuxInt = int16ToAuxInt(d)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Less8 (Const8 [c]) x) (Less8 x (Const8 [d])))
+ // cond: c >= d
+ // result: (Less8U (Const8 <x.Type> [c-d]) (Sub8 <x.Type> x (Const8 <x.Type> [d])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLess8 {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_0_0.AuxInt)
+ if v_1.Op != OpLess8 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst8 {
+ continue
+ }
+ d := auxIntToInt8(v_1_1.AuxInt)
+ if !(c >= d) {
+ continue
+ }
+ v.reset(OpLess8U)
+ v0 := b.NewValue0(v.Pos, OpConst8, x.Type)
+ v0.AuxInt = int8ToAuxInt(c - d)
+ v1 := b.NewValue0(v.Pos, OpSub8, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst8, x.Type)
+ v2.AuxInt = int8ToAuxInt(d)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Leq8 (Const8 [c]) x) (Less8 x (Const8 [d])))
+ // cond: c >= d
+ // result: (Leq8U (Const8 <x.Type> [c-d]) (Sub8 <x.Type> x (Const8 <x.Type> [d])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLeq8 {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_0_0.AuxInt)
+ if v_1.Op != OpLess8 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst8 {
+ continue
+ }
+ d := auxIntToInt8(v_1_1.AuxInt)
+ if !(c >= d) {
+ continue
+ }
+ v.reset(OpLeq8U)
+ v0 := b.NewValue0(v.Pos, OpConst8, x.Type)
+ v0.AuxInt = int8ToAuxInt(c - d)
+ v1 := b.NewValue0(v.Pos, OpSub8, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst8, x.Type)
+ v2.AuxInt = int8ToAuxInt(d)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Less64 (Const64 [c]) x) (Leq64 x (Const64 [d])))
+ // cond: c >= d+1 && d+1 > d
+ // result: (Less64U (Const64 <x.Type> [c-d-1]) (Sub64 <x.Type> x (Const64 <x.Type> [d+1])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLess64 {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ if v_1.Op != OpLeq64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1_1.AuxInt)
+ if !(c >= d+1 && d+1 > d) {
+ continue
+ }
+ v.reset(OpLess64U)
+ v0 := b.NewValue0(v.Pos, OpConst64, x.Type)
+ v0.AuxInt = int64ToAuxInt(c - d - 1)
+ v1 := b.NewValue0(v.Pos, OpSub64, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst64, x.Type)
+ v2.AuxInt = int64ToAuxInt(d + 1)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Leq64 (Const64 [c]) x) (Leq64 x (Const64 [d])))
+ // cond: c >= d+1 && d+1 > d
+ // result: (Leq64U (Const64 <x.Type> [c-d-1]) (Sub64 <x.Type> x (Const64 <x.Type> [d+1])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLeq64 {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ if v_1.Op != OpLeq64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1_1.AuxInt)
+ if !(c >= d+1 && d+1 > d) {
+ continue
+ }
+ v.reset(OpLeq64U)
+ v0 := b.NewValue0(v.Pos, OpConst64, x.Type)
+ v0.AuxInt = int64ToAuxInt(c - d - 1)
+ v1 := b.NewValue0(v.Pos, OpSub64, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst64, x.Type)
+ v2.AuxInt = int64ToAuxInt(d + 1)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Less32 (Const32 [c]) x) (Leq32 x (Const32 [d])))
+ // cond: c >= d+1 && d+1 > d
+ // result: (Less32U (Const32 <x.Type> [c-d-1]) (Sub32 <x.Type> x (Const32 <x.Type> [d+1])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLess32 {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_0_0.AuxInt)
+ if v_1.Op != OpLeq32 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst32 {
+ continue
+ }
+ d := auxIntToInt32(v_1_1.AuxInt)
+ if !(c >= d+1 && d+1 > d) {
+ continue
+ }
+ v.reset(OpLess32U)
+ v0 := b.NewValue0(v.Pos, OpConst32, x.Type)
+ v0.AuxInt = int32ToAuxInt(c - d - 1)
+ v1 := b.NewValue0(v.Pos, OpSub32, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst32, x.Type)
+ v2.AuxInt = int32ToAuxInt(d + 1)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Leq32 (Const32 [c]) x) (Leq32 x (Const32 [d])))
+ // cond: c >= d+1 && d+1 > d
+ // result: (Leq32U (Const32 <x.Type> [c-d-1]) (Sub32 <x.Type> x (Const32 <x.Type> [d+1])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLeq32 {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_0_0.AuxInt)
+ if v_1.Op != OpLeq32 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst32 {
+ continue
+ }
+ d := auxIntToInt32(v_1_1.AuxInt)
+ if !(c >= d+1 && d+1 > d) {
+ continue
+ }
+ v.reset(OpLeq32U)
+ v0 := b.NewValue0(v.Pos, OpConst32, x.Type)
+ v0.AuxInt = int32ToAuxInt(c - d - 1)
+ v1 := b.NewValue0(v.Pos, OpSub32, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst32, x.Type)
+ v2.AuxInt = int32ToAuxInt(d + 1)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Less16 (Const16 [c]) x) (Leq16 x (Const16 [d])))
+ // cond: c >= d+1 && d+1 > d
+ // result: (Less16U (Const16 <x.Type> [c-d-1]) (Sub16 <x.Type> x (Const16 <x.Type> [d+1])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLess16 {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_0_0.AuxInt)
+ if v_1.Op != OpLeq16 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst16 {
+ continue
+ }
+ d := auxIntToInt16(v_1_1.AuxInt)
+ if !(c >= d+1 && d+1 > d) {
+ continue
+ }
+ v.reset(OpLess16U)
+ v0 := b.NewValue0(v.Pos, OpConst16, x.Type)
+ v0.AuxInt = int16ToAuxInt(c - d - 1)
+ v1 := b.NewValue0(v.Pos, OpSub16, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst16, x.Type)
+ v2.AuxInt = int16ToAuxInt(d + 1)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Leq16 (Const16 [c]) x) (Leq16 x (Const16 [d])))
+ // cond: c >= d+1 && d+1 > d
+ // result: (Leq16U (Const16 <x.Type> [c-d-1]) (Sub16 <x.Type> x (Const16 <x.Type> [d+1])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLeq16 {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_0_0.AuxInt)
+ if v_1.Op != OpLeq16 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst16 {
+ continue
+ }
+ d := auxIntToInt16(v_1_1.AuxInt)
+ if !(c >= d+1 && d+1 > d) {
+ continue
+ }
+ v.reset(OpLeq16U)
+ v0 := b.NewValue0(v.Pos, OpConst16, x.Type)
+ v0.AuxInt = int16ToAuxInt(c - d - 1)
+ v1 := b.NewValue0(v.Pos, OpSub16, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst16, x.Type)
+ v2.AuxInt = int16ToAuxInt(d + 1)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Less8 (Const8 [c]) x) (Leq8 x (Const8 [d])))
+ // cond: c >= d+1 && d+1 > d
+ // result: (Less8U (Const8 <x.Type> [c-d-1]) (Sub8 <x.Type> x (Const8 <x.Type> [d+1])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLess8 {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_0_0.AuxInt)
+ if v_1.Op != OpLeq8 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst8 {
+ continue
+ }
+ d := auxIntToInt8(v_1_1.AuxInt)
+ if !(c >= d+1 && d+1 > d) {
+ continue
+ }
+ v.reset(OpLess8U)
+ v0 := b.NewValue0(v.Pos, OpConst8, x.Type)
+ v0.AuxInt = int8ToAuxInt(c - d - 1)
+ v1 := b.NewValue0(v.Pos, OpSub8, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst8, x.Type)
+ v2.AuxInt = int8ToAuxInt(d + 1)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Leq8 (Const8 [c]) x) (Leq8 x (Const8 [d])))
+ // cond: c >= d+1 && d+1 > d
+ // result: (Leq8U (Const8 <x.Type> [c-d-1]) (Sub8 <x.Type> x (Const8 <x.Type> [d+1])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLeq8 {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_0_0.AuxInt)
+ if v_1.Op != OpLeq8 {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst8 {
+ continue
+ }
+ d := auxIntToInt8(v_1_1.AuxInt)
+ if !(c >= d+1 && d+1 > d) {
+ continue
+ }
+ v.reset(OpLeq8U)
+ v0 := b.NewValue0(v.Pos, OpConst8, x.Type)
+ v0.AuxInt = int8ToAuxInt(c - d - 1)
+ v1 := b.NewValue0(v.Pos, OpSub8, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst8, x.Type)
+ v2.AuxInt = int8ToAuxInt(d + 1)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Less64U (Const64 [c]) x) (Less64U x (Const64 [d])))
+ // cond: uint64(c) >= uint64(d)
+ // result: (Less64U (Const64 <x.Type> [c-d]) (Sub64 <x.Type> x (Const64 <x.Type> [d])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLess64U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ if v_1.Op != OpLess64U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1_1.AuxInt)
+ if !(uint64(c) >= uint64(d)) {
+ continue
+ }
+ v.reset(OpLess64U)
+ v0 := b.NewValue0(v.Pos, OpConst64, x.Type)
+ v0.AuxInt = int64ToAuxInt(c - d)
+ v1 := b.NewValue0(v.Pos, OpSub64, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst64, x.Type)
+ v2.AuxInt = int64ToAuxInt(d)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Leq64U (Const64 [c]) x) (Less64U x (Const64 [d])))
+ // cond: uint64(c) >= uint64(d)
+ // result: (Leq64U (Const64 <x.Type> [c-d]) (Sub64 <x.Type> x (Const64 <x.Type> [d])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLeq64U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ if v_1.Op != OpLess64U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1_1.AuxInt)
+ if !(uint64(c) >= uint64(d)) {
+ continue
+ }
+ v.reset(OpLeq64U)
+ v0 := b.NewValue0(v.Pos, OpConst64, x.Type)
+ v0.AuxInt = int64ToAuxInt(c - d)
+ v1 := b.NewValue0(v.Pos, OpSub64, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst64, x.Type)
+ v2.AuxInt = int64ToAuxInt(d)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Less32U (Const32 [c]) x) (Less32U x (Const32 [d])))
+ // cond: uint32(c) >= uint32(d)
+ // result: (Less32U (Const32 <x.Type> [c-d]) (Sub32 <x.Type> x (Const32 <x.Type> [d])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLess32U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_0_0.AuxInt)
+ if v_1.Op != OpLess32U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst32 {
+ continue
+ }
+ d := auxIntToInt32(v_1_1.AuxInt)
+ if !(uint32(c) >= uint32(d)) {
+ continue
+ }
+ v.reset(OpLess32U)
+ v0 := b.NewValue0(v.Pos, OpConst32, x.Type)
+ v0.AuxInt = int32ToAuxInt(c - d)
+ v1 := b.NewValue0(v.Pos, OpSub32, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst32, x.Type)
+ v2.AuxInt = int32ToAuxInt(d)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Leq32U (Const32 [c]) x) (Less32U x (Const32 [d])))
+ // cond: uint32(c) >= uint32(d)
+ // result: (Leq32U (Const32 <x.Type> [c-d]) (Sub32 <x.Type> x (Const32 <x.Type> [d])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLeq32U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_0_0.AuxInt)
+ if v_1.Op != OpLess32U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst32 {
+ continue
+ }
+ d := auxIntToInt32(v_1_1.AuxInt)
+ if !(uint32(c) >= uint32(d)) {
+ continue
+ }
+ v.reset(OpLeq32U)
+ v0 := b.NewValue0(v.Pos, OpConst32, x.Type)
+ v0.AuxInt = int32ToAuxInt(c - d)
+ v1 := b.NewValue0(v.Pos, OpSub32, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst32, x.Type)
+ v2.AuxInt = int32ToAuxInt(d)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Less16U (Const16 [c]) x) (Less16U x (Const16 [d])))
+ // cond: uint16(c) >= uint16(d)
+ // result: (Less16U (Const16 <x.Type> [c-d]) (Sub16 <x.Type> x (Const16 <x.Type> [d])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLess16U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_0_0.AuxInt)
+ if v_1.Op != OpLess16U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst16 {
+ continue
+ }
+ d := auxIntToInt16(v_1_1.AuxInt)
+ if !(uint16(c) >= uint16(d)) {
+ continue
+ }
+ v.reset(OpLess16U)
+ v0 := b.NewValue0(v.Pos, OpConst16, x.Type)
+ v0.AuxInt = int16ToAuxInt(c - d)
+ v1 := b.NewValue0(v.Pos, OpSub16, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst16, x.Type)
+ v2.AuxInt = int16ToAuxInt(d)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Leq16U (Const16 [c]) x) (Less16U x (Const16 [d])))
+ // cond: uint16(c) >= uint16(d)
+ // result: (Leq16U (Const16 <x.Type> [c-d]) (Sub16 <x.Type> x (Const16 <x.Type> [d])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLeq16U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_0_0.AuxInt)
+ if v_1.Op != OpLess16U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst16 {
+ continue
+ }
+ d := auxIntToInt16(v_1_1.AuxInt)
+ if !(uint16(c) >= uint16(d)) {
+ continue
+ }
+ v.reset(OpLeq16U)
+ v0 := b.NewValue0(v.Pos, OpConst16, x.Type)
+ v0.AuxInt = int16ToAuxInt(c - d)
+ v1 := b.NewValue0(v.Pos, OpSub16, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst16, x.Type)
+ v2.AuxInt = int16ToAuxInt(d)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Less8U (Const8 [c]) x) (Less8U x (Const8 [d])))
+ // cond: uint8(c) >= uint8(d)
+ // result: (Less8U (Const8 <x.Type> [c-d]) (Sub8 <x.Type> x (Const8 <x.Type> [d])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLess8U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_0_0.AuxInt)
+ if v_1.Op != OpLess8U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst8 {
+ continue
+ }
+ d := auxIntToInt8(v_1_1.AuxInt)
+ if !(uint8(c) >= uint8(d)) {
+ continue
+ }
+ v.reset(OpLess8U)
+ v0 := b.NewValue0(v.Pos, OpConst8, x.Type)
+ v0.AuxInt = int8ToAuxInt(c - d)
+ v1 := b.NewValue0(v.Pos, OpSub8, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst8, x.Type)
+ v2.AuxInt = int8ToAuxInt(d)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Leq8U (Const8 [c]) x) (Less8U x (Const8 [d])))
+ // cond: uint8(c) >= uint8(d)
+ // result: (Leq8U (Const8 <x.Type> [c-d]) (Sub8 <x.Type> x (Const8 <x.Type> [d])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLeq8U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_0_0.AuxInt)
+ if v_1.Op != OpLess8U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst8 {
+ continue
+ }
+ d := auxIntToInt8(v_1_1.AuxInt)
+ if !(uint8(c) >= uint8(d)) {
+ continue
+ }
+ v.reset(OpLeq8U)
+ v0 := b.NewValue0(v.Pos, OpConst8, x.Type)
+ v0.AuxInt = int8ToAuxInt(c - d)
+ v1 := b.NewValue0(v.Pos, OpSub8, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst8, x.Type)
+ v2.AuxInt = int8ToAuxInt(d)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Less64U (Const64 [c]) x) (Leq64U x (Const64 [d])))
+ // cond: uint64(c) >= uint64(d+1) && uint64(d+1) > uint64(d)
+ // result: (Less64U (Const64 <x.Type> [c-d-1]) (Sub64 <x.Type> x (Const64 <x.Type> [d+1])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLess64U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ if v_1.Op != OpLeq64U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1_1.AuxInt)
+ if !(uint64(c) >= uint64(d+1) && uint64(d+1) > uint64(d)) {
+ continue
+ }
+ v.reset(OpLess64U)
+ v0 := b.NewValue0(v.Pos, OpConst64, x.Type)
+ v0.AuxInt = int64ToAuxInt(c - d - 1)
+ v1 := b.NewValue0(v.Pos, OpSub64, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst64, x.Type)
+ v2.AuxInt = int64ToAuxInt(d + 1)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Leq64U (Const64 [c]) x) (Leq64U x (Const64 [d])))
+ // cond: uint64(c) >= uint64(d+1) && uint64(d+1) > uint64(d)
+ // result: (Leq64U (Const64 <x.Type> [c-d-1]) (Sub64 <x.Type> x (Const64 <x.Type> [d+1])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLeq64U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_0_0.AuxInt)
+ if v_1.Op != OpLeq64U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1_1.AuxInt)
+ if !(uint64(c) >= uint64(d+1) && uint64(d+1) > uint64(d)) {
+ continue
+ }
+ v.reset(OpLeq64U)
+ v0 := b.NewValue0(v.Pos, OpConst64, x.Type)
+ v0.AuxInt = int64ToAuxInt(c - d - 1)
+ v1 := b.NewValue0(v.Pos, OpSub64, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst64, x.Type)
+ v2.AuxInt = int64ToAuxInt(d + 1)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Less32U (Const32 [c]) x) (Leq32U x (Const32 [d])))
+ // cond: uint32(c) >= uint32(d+1) && uint32(d+1) > uint32(d)
+ // result: (Less32U (Const32 <x.Type> [c-d-1]) (Sub32 <x.Type> x (Const32 <x.Type> [d+1])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLess32U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_0_0.AuxInt)
+ if v_1.Op != OpLeq32U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst32 {
+ continue
+ }
+ d := auxIntToInt32(v_1_1.AuxInt)
+ if !(uint32(c) >= uint32(d+1) && uint32(d+1) > uint32(d)) {
+ continue
+ }
+ v.reset(OpLess32U)
+ v0 := b.NewValue0(v.Pos, OpConst32, x.Type)
+ v0.AuxInt = int32ToAuxInt(c - d - 1)
+ v1 := b.NewValue0(v.Pos, OpSub32, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst32, x.Type)
+ v2.AuxInt = int32ToAuxInt(d + 1)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Leq32U (Const32 [c]) x) (Leq32U x (Const32 [d])))
+ // cond: uint32(c) >= uint32(d+1) && uint32(d+1) > uint32(d)
+ // result: (Leq32U (Const32 <x.Type> [c-d-1]) (Sub32 <x.Type> x (Const32 <x.Type> [d+1])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLeq32U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_0_0.AuxInt)
+ if v_1.Op != OpLeq32U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst32 {
+ continue
+ }
+ d := auxIntToInt32(v_1_1.AuxInt)
+ if !(uint32(c) >= uint32(d+1) && uint32(d+1) > uint32(d)) {
+ continue
+ }
+ v.reset(OpLeq32U)
+ v0 := b.NewValue0(v.Pos, OpConst32, x.Type)
+ v0.AuxInt = int32ToAuxInt(c - d - 1)
+ v1 := b.NewValue0(v.Pos, OpSub32, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst32, x.Type)
+ v2.AuxInt = int32ToAuxInt(d + 1)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Less16U (Const16 [c]) x) (Leq16U x (Const16 [d])))
+ // cond: uint16(c) >= uint16(d+1) && uint16(d+1) > uint16(d)
+ // result: (Less16U (Const16 <x.Type> [c-d-1]) (Sub16 <x.Type> x (Const16 <x.Type> [d+1])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLess16U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_0_0.AuxInt)
+ if v_1.Op != OpLeq16U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst16 {
+ continue
+ }
+ d := auxIntToInt16(v_1_1.AuxInt)
+ if !(uint16(c) >= uint16(d+1) && uint16(d+1) > uint16(d)) {
+ continue
+ }
+ v.reset(OpLess16U)
+ v0 := b.NewValue0(v.Pos, OpConst16, x.Type)
+ v0.AuxInt = int16ToAuxInt(c - d - 1)
+ v1 := b.NewValue0(v.Pos, OpSub16, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst16, x.Type)
+ v2.AuxInt = int16ToAuxInt(d + 1)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Leq16U (Const16 [c]) x) (Leq16U x (Const16 [d])))
+ // cond: uint16(c) >= uint16(d+1) && uint16(d+1) > uint16(d)
+ // result: (Leq16U (Const16 <x.Type> [c-d-1]) (Sub16 <x.Type> x (Const16 <x.Type> [d+1])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLeq16U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_0_0.AuxInt)
+ if v_1.Op != OpLeq16U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst16 {
+ continue
+ }
+ d := auxIntToInt16(v_1_1.AuxInt)
+ if !(uint16(c) >= uint16(d+1) && uint16(d+1) > uint16(d)) {
+ continue
+ }
+ v.reset(OpLeq16U)
+ v0 := b.NewValue0(v.Pos, OpConst16, x.Type)
+ v0.AuxInt = int16ToAuxInt(c - d - 1)
+ v1 := b.NewValue0(v.Pos, OpSub16, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst16, x.Type)
+ v2.AuxInt = int16ToAuxInt(d + 1)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Less8U (Const8 [c]) x) (Leq8U x (Const8 [d])))
+ // cond: uint8(c) >= uint8(d+1) && uint8(d+1) > uint8(d)
+ // result: (Less8U (Const8 <x.Type> [c-d-1]) (Sub8 <x.Type> x (Const8 <x.Type> [d+1])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLess8U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_0_0.AuxInt)
+ if v_1.Op != OpLeq8U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst8 {
+ continue
+ }
+ d := auxIntToInt8(v_1_1.AuxInt)
+ if !(uint8(c) >= uint8(d+1) && uint8(d+1) > uint8(d)) {
+ continue
+ }
+ v.reset(OpLess8U)
+ v0 := b.NewValue0(v.Pos, OpConst8, x.Type)
+ v0.AuxInt = int8ToAuxInt(c - d - 1)
+ v1 := b.NewValue0(v.Pos, OpSub8, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst8, x.Type)
+ v2.AuxInt = int8ToAuxInt(d + 1)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ // match: (OrB (Leq8U (Const8 [c]) x) (Leq8U x (Const8 [d])))
+ // cond: uint8(c) >= uint8(d+1) && uint8(d+1) > uint8(d)
+ // result: (Leq8U (Const8 <x.Type> [c-d-1]) (Sub8 <x.Type> x (Const8 <x.Type> [d+1])))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpLeq8U {
+ continue
+ }
+ x := v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_0_0.AuxInt)
+ if v_1.Op != OpLeq8U {
+ continue
+ }
+ _ = v_1.Args[1]
+ if x != v_1.Args[0] {
+ continue
+ }
+ v_1_1 := v_1.Args[1]
+ if v_1_1.Op != OpConst8 {
+ continue
+ }
+ d := auxIntToInt8(v_1_1.AuxInt)
+ if !(uint8(c) >= uint8(d+1) && uint8(d+1) > uint8(d)) {
+ continue
+ }
+ v.reset(OpLeq8U)
+ v0 := b.NewValue0(v.Pos, OpConst8, x.Type)
+ v0.AuxInt = int8ToAuxInt(c - d - 1)
+ v1 := b.NewValue0(v.Pos, OpSub8, x.Type)
+ v2 := b.NewValue0(v.Pos, OpConst8, x.Type)
+ v2.AuxInt = int8ToAuxInt(d + 1)
+ v1.AddArg2(x, v2)
+ v.AddArg2(v0, v1)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpPhi(v *Value) bool {
+ // match: (Phi (Const8 [c]) (Const8 [c]))
+ // result: (Const8 [c])
+ for {
+ if len(v.Args) != 2 {
+ break
+ }
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst8 || auxIntToInt8(v_1.AuxInt) != c {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(c)
+ return true
+ }
+ // match: (Phi (Const16 [c]) (Const16 [c]))
+ // result: (Const16 [c])
+ for {
+ if len(v.Args) != 2 {
+ break
+ }
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_0.AuxInt)
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst16 || auxIntToInt16(v_1.AuxInt) != c {
+ break
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(c)
+ return true
+ }
+ // match: (Phi (Const32 [c]) (Const32 [c]))
+ // result: (Const32 [c])
+ for {
+ if len(v.Args) != 2 {
+ break
+ }
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst32 || auxIntToInt32(v_1.AuxInt) != c {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(c)
+ return true
+ }
+ // match: (Phi (Const64 [c]) (Const64 [c]))
+ // result: (Const64 [c])
+ for {
+ if len(v.Args) != 2 {
+ break
+ }
+ _ = v.Args[1]
+ v_0 := v.Args[0]
+ if v_0.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v_1 := v.Args[1]
+ if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != c {
+ break
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(c)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpPtrIndex(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (PtrIndex <t> ptr idx)
+ // cond: config.PtrSize == 4 && is32Bit(t.Elem().Size())
+ // result: (AddPtr ptr (Mul32 <typ.Int> idx (Const32 <typ.Int> [int32(t.Elem().Size())])))
+ for {
+ t := v.Type
+ ptr := v_0
+ idx := v_1
+ if !(config.PtrSize == 4 && is32Bit(t.Elem().Size())) {
+ break
+ }
+ v.reset(OpAddPtr)
+ v0 := b.NewValue0(v.Pos, OpMul32, typ.Int)
+ v1 := b.NewValue0(v.Pos, OpConst32, typ.Int)
+ v1.AuxInt = int32ToAuxInt(int32(t.Elem().Size()))
+ v0.AddArg2(idx, v1)
+ v.AddArg2(ptr, v0)
+ return true
+ }
+ // match: (PtrIndex <t> ptr idx)
+ // cond: config.PtrSize == 8
+ // result: (AddPtr ptr (Mul64 <typ.Int> idx (Const64 <typ.Int> [t.Elem().Size()])))
+ for {
+ t := v.Type
+ ptr := v_0
+ idx := v_1
+ if !(config.PtrSize == 8) {
+ break
+ }
+ v.reset(OpAddPtr)
+ v0 := b.NewValue0(v.Pos, OpMul64, typ.Int)
+ v1 := b.NewValue0(v.Pos, OpConst64, typ.Int)
+ v1.AuxInt = int64ToAuxInt(t.Elem().Size())
+ v0.AddArg2(idx, v1)
+ v.AddArg2(ptr, v0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRotateLeft16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (RotateLeft16 x (Const16 [c]))
+ // cond: c%16 == 0
+ // result: x
+ for {
+ x := v_0
+ if v_1.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_1.AuxInt)
+ if !(c%16 == 0) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRotateLeft32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (RotateLeft32 x (Const32 [c]))
+ // cond: c%32 == 0
+ // result: x
+ for {
+ x := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(c%32 == 0) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRotateLeft64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (RotateLeft64 x (Const64 [c]))
+ // cond: c%64 == 0
+ // result: x
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(c%64 == 0) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRotateLeft8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (RotateLeft8 x (Const8 [c]))
+ // cond: c%8 == 0
+ // result: x
+ for {
+ x := v_0
+ if v_1.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_1.AuxInt)
+ if !(c%8 == 0) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRound32F(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Round32F x:(Const32F))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpConst32F {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRound64F(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Round64F x:(Const64F))
+ // result: x
+ for {
+ x := v_0
+ if x.Op != OpConst64F {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRsh16Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh16Ux16 <t> x (Const16 [c]))
+ // result: (Rsh16Ux64 x (Const64 <t> [int64(uint16(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_1.AuxInt)
+ v.reset(OpRsh16Ux64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint16(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh16Ux16 (Const16 [0]) _)
+ // result: (Const16 [0])
+ for {
+ if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRsh16Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh16Ux32 <t> x (Const32 [c]))
+ // result: (Rsh16Ux64 x (Const64 <t> [int64(uint32(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpRsh16Ux64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint32(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh16Ux32 (Const16 [0]) _)
+ // result: (Const16 [0])
+ for {
+ if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRsh16Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16Ux64 (Const16 [c]) (Const64 [d]))
+ // result: (Const16 [int16(uint16(c) >> uint64(d))])
+ for {
+ if v_0.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(int16(uint16(c) >> uint64(d)))
+ return true
+ }
+ // match: (Rsh16Ux64 x (Const64 [0]))
+ // result: x
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (Rsh16Ux64 (Const16 [0]) _)
+ // result: (Const16 [0])
+ for {
+ if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(0)
+ return true
+ }
+ // match: (Rsh16Ux64 _ (Const64 [c]))
+ // cond: uint64(c) >= 16
+ // result: (Const16 [0])
+ for {
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 16) {
+ break
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(0)
+ return true
+ }
+ // match: (Rsh16Ux64 <t> (Rsh16Ux64 x (Const64 [c])) (Const64 [d]))
+ // cond: !uaddOvf(c,d)
+ // result: (Rsh16Ux64 x (Const64 <t> [c+d]))
+ for {
+ t := v.Type
+ if v_0.Op != OpRsh16Ux64 {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0_1.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(!uaddOvf(c, d)) {
+ break
+ }
+ v.reset(OpRsh16Ux64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(c + d)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh16Ux64 (Rsh16x64 x _) (Const64 <t> [15]))
+ // result: (Rsh16Ux64 x (Const64 <t> [15]))
+ for {
+ if v_0.Op != OpRsh16x64 {
+ break
+ }
+ x := v_0.Args[0]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ t := v_1.Type
+ if auxIntToInt64(v_1.AuxInt) != 15 {
+ break
+ }
+ v.reset(OpRsh16Ux64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(15)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh16Ux64 (Lsh16x64 (Rsh16Ux64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3]))
+ // cond: uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)
+ // result: (Rsh16Ux64 x (Const64 <typ.UInt64> [c1-c2+c3]))
+ for {
+ if v_0.Op != OpLsh16x64 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpRsh16Ux64 {
+ break
+ }
+ _ = v_0_0.Args[1]
+ x := v_0_0.Args[0]
+ v_0_0_1 := v_0_0.Args[1]
+ if v_0_0_1.Op != OpConst64 {
+ break
+ }
+ c1 := auxIntToInt64(v_0_0_1.AuxInt)
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 {
+ break
+ }
+ c2 := auxIntToInt64(v_0_1.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c3 := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)) {
+ break
+ }
+ v.reset(OpRsh16Ux64)
+ v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(c1 - c2 + c3)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh16Ux64 (Lsh16x64 x (Const64 [8])) (Const64 [8]))
+ // result: (ZeroExt8to16 (Trunc16to8 <typ.UInt8> x))
+ for {
+ if v_0.Op != OpLsh16x64 {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 || auxIntToInt64(v_0_1.AuxInt) != 8 || v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 8 {
+ break
+ }
+ v.reset(OpZeroExt8to16)
+ v0 := b.NewValue0(v.Pos, OpTrunc16to8, typ.UInt8)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRsh16Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh16Ux8 <t> x (Const8 [c]))
+ // result: (Rsh16Ux64 x (Const64 <t> [int64(uint8(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_1.AuxInt)
+ v.reset(OpRsh16Ux64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint8(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh16Ux8 (Const16 [0]) _)
+ // result: (Const16 [0])
+ for {
+ if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRsh16x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh16x16 <t> x (Const16 [c]))
+ // result: (Rsh16x64 x (Const64 <t> [int64(uint16(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_1.AuxInt)
+ v.reset(OpRsh16x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint16(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh16x16 (Const16 [0]) _)
+ // result: (Const16 [0])
+ for {
+ if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRsh16x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh16x32 <t> x (Const32 [c]))
+ // result: (Rsh16x64 x (Const64 <t> [int64(uint32(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpRsh16x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint32(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh16x32 (Const16 [0]) _)
+ // result: (Const16 [0])
+ for {
+ if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRsh16x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh16x64 (Const16 [c]) (Const64 [d]))
+ // result: (Const16 [c >> uint64(d)])
+ for {
+ if v_0.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(c >> uint64(d))
+ return true
+ }
+ // match: (Rsh16x64 x (Const64 [0]))
+ // result: x
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (Rsh16x64 (Const16 [0]) _)
+ // result: (Const16 [0])
+ for {
+ if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(0)
+ return true
+ }
+ // match: (Rsh16x64 <t> (Rsh16x64 x (Const64 [c])) (Const64 [d]))
+ // cond: !uaddOvf(c,d)
+ // result: (Rsh16x64 x (Const64 <t> [c+d]))
+ for {
+ t := v.Type
+ if v_0.Op != OpRsh16x64 {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0_1.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(!uaddOvf(c, d)) {
+ break
+ }
+ v.reset(OpRsh16x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(c + d)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh16x64 (Lsh16x64 x (Const64 [8])) (Const64 [8]))
+ // result: (SignExt8to16 (Trunc16to8 <typ.Int8> x))
+ for {
+ if v_0.Op != OpLsh16x64 {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 || auxIntToInt64(v_0_1.AuxInt) != 8 || v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 8 {
+ break
+ }
+ v.reset(OpSignExt8to16)
+ v0 := b.NewValue0(v.Pos, OpTrunc16to8, typ.Int8)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRsh16x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh16x8 <t> x (Const8 [c]))
+ // result: (Rsh16x64 x (Const64 <t> [int64(uint8(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_1.AuxInt)
+ v.reset(OpRsh16x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint8(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh16x8 (Const16 [0]) _)
+ // result: (Const16 [0])
+ for {
+ if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRsh32Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh32Ux16 <t> x (Const16 [c]))
+ // result: (Rsh32Ux64 x (Const64 <t> [int64(uint16(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_1.AuxInt)
+ v.reset(OpRsh32Ux64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint16(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh32Ux16 (Const32 [0]) _)
+ // result: (Const32 [0])
+ for {
+ if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRsh32Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh32Ux32 <t> x (Const32 [c]))
+ // result: (Rsh32Ux64 x (Const64 <t> [int64(uint32(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpRsh32Ux64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint32(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh32Ux32 (Const32 [0]) _)
+ // result: (Const32 [0])
+ for {
+ if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRsh32Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32Ux64 (Const32 [c]) (Const64 [d]))
+ // result: (Const32 [int32(uint32(c) >> uint64(d))])
+ for {
+ if v_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(int32(uint32(c) >> uint64(d)))
+ return true
+ }
+ // match: (Rsh32Ux64 x (Const64 [0]))
+ // result: x
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (Rsh32Ux64 (Const32 [0]) _)
+ // result: (Const32 [0])
+ for {
+ if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (Rsh32Ux64 _ (Const64 [c]))
+ // cond: uint64(c) >= 32
+ // result: (Const32 [0])
+ for {
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 32) {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (Rsh32Ux64 <t> (Rsh32Ux64 x (Const64 [c])) (Const64 [d]))
+ // cond: !uaddOvf(c,d)
+ // result: (Rsh32Ux64 x (Const64 <t> [c+d]))
+ for {
+ t := v.Type
+ if v_0.Op != OpRsh32Ux64 {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0_1.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(!uaddOvf(c, d)) {
+ break
+ }
+ v.reset(OpRsh32Ux64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(c + d)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh32Ux64 (Rsh32x64 x _) (Const64 <t> [31]))
+ // result: (Rsh32Ux64 x (Const64 <t> [31]))
+ for {
+ if v_0.Op != OpRsh32x64 {
+ break
+ }
+ x := v_0.Args[0]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ t := v_1.Type
+ if auxIntToInt64(v_1.AuxInt) != 31 {
+ break
+ }
+ v.reset(OpRsh32Ux64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(31)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh32Ux64 (Lsh32x64 (Rsh32Ux64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3]))
+ // cond: uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)
+ // result: (Rsh32Ux64 x (Const64 <typ.UInt64> [c1-c2+c3]))
+ for {
+ if v_0.Op != OpLsh32x64 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpRsh32Ux64 {
+ break
+ }
+ _ = v_0_0.Args[1]
+ x := v_0_0.Args[0]
+ v_0_0_1 := v_0_0.Args[1]
+ if v_0_0_1.Op != OpConst64 {
+ break
+ }
+ c1 := auxIntToInt64(v_0_0_1.AuxInt)
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 {
+ break
+ }
+ c2 := auxIntToInt64(v_0_1.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c3 := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)) {
+ break
+ }
+ v.reset(OpRsh32Ux64)
+ v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(c1 - c2 + c3)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh32Ux64 (Lsh32x64 x (Const64 [24])) (Const64 [24]))
+ // result: (ZeroExt8to32 (Trunc32to8 <typ.UInt8> x))
+ for {
+ if v_0.Op != OpLsh32x64 {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 || auxIntToInt64(v_0_1.AuxInt) != 24 || v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 24 {
+ break
+ }
+ v.reset(OpZeroExt8to32)
+ v0 := b.NewValue0(v.Pos, OpTrunc32to8, typ.UInt8)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Rsh32Ux64 (Lsh32x64 x (Const64 [16])) (Const64 [16]))
+ // result: (ZeroExt16to32 (Trunc32to16 <typ.UInt16> x))
+ for {
+ if v_0.Op != OpLsh32x64 {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 || auxIntToInt64(v_0_1.AuxInt) != 16 || v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 16 {
+ break
+ }
+ v.reset(OpZeroExt16to32)
+ v0 := b.NewValue0(v.Pos, OpTrunc32to16, typ.UInt16)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRsh32Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh32Ux8 <t> x (Const8 [c]))
+ // result: (Rsh32Ux64 x (Const64 <t> [int64(uint8(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_1.AuxInt)
+ v.reset(OpRsh32Ux64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint8(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh32Ux8 (Const32 [0]) _)
+ // result: (Const32 [0])
+ for {
+ if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRsh32x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh32x16 <t> x (Const16 [c]))
+ // result: (Rsh32x64 x (Const64 <t> [int64(uint16(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_1.AuxInt)
+ v.reset(OpRsh32x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint16(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh32x16 (Const32 [0]) _)
+ // result: (Const32 [0])
+ for {
+ if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRsh32x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh32x32 <t> x (Const32 [c]))
+ // result: (Rsh32x64 x (Const64 <t> [int64(uint32(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpRsh32x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint32(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh32x32 (Const32 [0]) _)
+ // result: (Const32 [0])
+ for {
+ if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRsh32x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh32x64 (Const32 [c]) (Const64 [d]))
+ // result: (Const32 [c >> uint64(d)])
+ for {
+ if v_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(c >> uint64(d))
+ return true
+ }
+ // match: (Rsh32x64 x (Const64 [0]))
+ // result: x
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (Rsh32x64 (Const32 [0]) _)
+ // result: (Const32 [0])
+ for {
+ if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (Rsh32x64 <t> (Rsh32x64 x (Const64 [c])) (Const64 [d]))
+ // cond: !uaddOvf(c,d)
+ // result: (Rsh32x64 x (Const64 <t> [c+d]))
+ for {
+ t := v.Type
+ if v_0.Op != OpRsh32x64 {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0_1.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(!uaddOvf(c, d)) {
+ break
+ }
+ v.reset(OpRsh32x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(c + d)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh32x64 (Lsh32x64 x (Const64 [24])) (Const64 [24]))
+ // result: (SignExt8to32 (Trunc32to8 <typ.Int8> x))
+ for {
+ if v_0.Op != OpLsh32x64 {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 || auxIntToInt64(v_0_1.AuxInt) != 24 || v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 24 {
+ break
+ }
+ v.reset(OpSignExt8to32)
+ v0 := b.NewValue0(v.Pos, OpTrunc32to8, typ.Int8)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Rsh32x64 (Lsh32x64 x (Const64 [16])) (Const64 [16]))
+ // result: (SignExt16to32 (Trunc32to16 <typ.Int16> x))
+ for {
+ if v_0.Op != OpLsh32x64 {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 || auxIntToInt64(v_0_1.AuxInt) != 16 || v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 16 {
+ break
+ }
+ v.reset(OpSignExt16to32)
+ v0 := b.NewValue0(v.Pos, OpTrunc32to16, typ.Int16)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRsh32x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh32x8 <t> x (Const8 [c]))
+ // result: (Rsh32x64 x (Const64 <t> [int64(uint8(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_1.AuxInt)
+ v.reset(OpRsh32x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint8(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh32x8 (Const32 [0]) _)
+ // result: (Const32 [0])
+ for {
+ if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRsh64Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh64Ux16 <t> x (Const16 [c]))
+ // result: (Rsh64Ux64 x (Const64 <t> [int64(uint16(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_1.AuxInt)
+ v.reset(OpRsh64Ux64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint16(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh64Ux16 (Const64 [0]) _)
+ // result: (Const64 [0])
+ for {
+ if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRsh64Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh64Ux32 <t> x (Const32 [c]))
+ // result: (Rsh64Ux64 x (Const64 <t> [int64(uint32(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpRsh64Ux64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint32(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh64Ux32 (Const64 [0]) _)
+ // result: (Const64 [0])
+ for {
+ if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRsh64Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64Ux64 (Const64 [c]) (Const64 [d]))
+ // result: (Const64 [int64(uint64(c) >> uint64(d))])
+ for {
+ if v_0.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(int64(uint64(c) >> uint64(d)))
+ return true
+ }
+ // match: (Rsh64Ux64 x (Const64 [0]))
+ // result: x
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (Rsh64Ux64 (Const64 [0]) _)
+ // result: (Const64 [0])
+ for {
+ if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (Rsh64Ux64 _ (Const64 [c]))
+ // cond: uint64(c) >= 64
+ // result: (Const64 [0])
+ for {
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 64) {
+ break
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (Rsh64Ux64 <t> (Rsh64Ux64 x (Const64 [c])) (Const64 [d]))
+ // cond: !uaddOvf(c,d)
+ // result: (Rsh64Ux64 x (Const64 <t> [c+d]))
+ for {
+ t := v.Type
+ if v_0.Op != OpRsh64Ux64 {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0_1.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(!uaddOvf(c, d)) {
+ break
+ }
+ v.reset(OpRsh64Ux64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(c + d)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh64Ux64 (Rsh64x64 x _) (Const64 <t> [63]))
+ // result: (Rsh64Ux64 x (Const64 <t> [63]))
+ for {
+ if v_0.Op != OpRsh64x64 {
+ break
+ }
+ x := v_0.Args[0]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ t := v_1.Type
+ if auxIntToInt64(v_1.AuxInt) != 63 {
+ break
+ }
+ v.reset(OpRsh64Ux64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(63)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh64Ux64 (Lsh64x64 (Rsh64Ux64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3]))
+ // cond: uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)
+ // result: (Rsh64Ux64 x (Const64 <typ.UInt64> [c1-c2+c3]))
+ for {
+ if v_0.Op != OpLsh64x64 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpRsh64Ux64 {
+ break
+ }
+ _ = v_0_0.Args[1]
+ x := v_0_0.Args[0]
+ v_0_0_1 := v_0_0.Args[1]
+ if v_0_0_1.Op != OpConst64 {
+ break
+ }
+ c1 := auxIntToInt64(v_0_0_1.AuxInt)
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 {
+ break
+ }
+ c2 := auxIntToInt64(v_0_1.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c3 := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)) {
+ break
+ }
+ v.reset(OpRsh64Ux64)
+ v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(c1 - c2 + c3)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh64Ux64 (Lsh64x64 x (Const64 [56])) (Const64 [56]))
+ // result: (ZeroExt8to64 (Trunc64to8 <typ.UInt8> x))
+ for {
+ if v_0.Op != OpLsh64x64 {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 || auxIntToInt64(v_0_1.AuxInt) != 56 || v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 56 {
+ break
+ }
+ v.reset(OpZeroExt8to64)
+ v0 := b.NewValue0(v.Pos, OpTrunc64to8, typ.UInt8)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Rsh64Ux64 (Lsh64x64 x (Const64 [48])) (Const64 [48]))
+ // result: (ZeroExt16to64 (Trunc64to16 <typ.UInt16> x))
+ for {
+ if v_0.Op != OpLsh64x64 {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 || auxIntToInt64(v_0_1.AuxInt) != 48 || v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 48 {
+ break
+ }
+ v.reset(OpZeroExt16to64)
+ v0 := b.NewValue0(v.Pos, OpTrunc64to16, typ.UInt16)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Rsh64Ux64 (Lsh64x64 x (Const64 [32])) (Const64 [32]))
+ // result: (ZeroExt32to64 (Trunc64to32 <typ.UInt32> x))
+ for {
+ if v_0.Op != OpLsh64x64 {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 || auxIntToInt64(v_0_1.AuxInt) != 32 || v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 32 {
+ break
+ }
+ v.reset(OpZeroExt32to64)
+ v0 := b.NewValue0(v.Pos, OpTrunc64to32, typ.UInt32)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRsh64Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh64Ux8 <t> x (Const8 [c]))
+ // result: (Rsh64Ux64 x (Const64 <t> [int64(uint8(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_1.AuxInt)
+ v.reset(OpRsh64Ux64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint8(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh64Ux8 (Const64 [0]) _)
+ // result: (Const64 [0])
+ for {
+ if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRsh64x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh64x16 <t> x (Const16 [c]))
+ // result: (Rsh64x64 x (Const64 <t> [int64(uint16(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_1.AuxInt)
+ v.reset(OpRsh64x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint16(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh64x16 (Const64 [0]) _)
+ // result: (Const64 [0])
+ for {
+ if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRsh64x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh64x32 <t> x (Const32 [c]))
+ // result: (Rsh64x64 x (Const64 <t> [int64(uint32(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpRsh64x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint32(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh64x32 (Const64 [0]) _)
+ // result: (Const64 [0])
+ for {
+ if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRsh64x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh64x64 (Const64 [c]) (Const64 [d]))
+ // result: (Const64 [c >> uint64(d)])
+ for {
+ if v_0.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(c >> uint64(d))
+ return true
+ }
+ // match: (Rsh64x64 x (Const64 [0]))
+ // result: x
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (Rsh64x64 (Const64 [0]) _)
+ // result: (Const64 [0])
+ for {
+ if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (Rsh64x64 <t> (Rsh64x64 x (Const64 [c])) (Const64 [d]))
+ // cond: !uaddOvf(c,d)
+ // result: (Rsh64x64 x (Const64 <t> [c+d]))
+ for {
+ t := v.Type
+ if v_0.Op != OpRsh64x64 {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0_1.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(!uaddOvf(c, d)) {
+ break
+ }
+ v.reset(OpRsh64x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(c + d)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh64x64 (Lsh64x64 x (Const64 [56])) (Const64 [56]))
+ // result: (SignExt8to64 (Trunc64to8 <typ.Int8> x))
+ for {
+ if v_0.Op != OpLsh64x64 {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 || auxIntToInt64(v_0_1.AuxInt) != 56 || v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 56 {
+ break
+ }
+ v.reset(OpSignExt8to64)
+ v0 := b.NewValue0(v.Pos, OpTrunc64to8, typ.Int8)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Rsh64x64 (Lsh64x64 x (Const64 [48])) (Const64 [48]))
+ // result: (SignExt16to64 (Trunc64to16 <typ.Int16> x))
+ for {
+ if v_0.Op != OpLsh64x64 {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 || auxIntToInt64(v_0_1.AuxInt) != 48 || v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 48 {
+ break
+ }
+ v.reset(OpSignExt16to64)
+ v0 := b.NewValue0(v.Pos, OpTrunc64to16, typ.Int16)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ // match: (Rsh64x64 (Lsh64x64 x (Const64 [32])) (Const64 [32]))
+ // result: (SignExt32to64 (Trunc64to32 <typ.Int32> x))
+ for {
+ if v_0.Op != OpLsh64x64 {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 || auxIntToInt64(v_0_1.AuxInt) != 32 || v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 32 {
+ break
+ }
+ v.reset(OpSignExt32to64)
+ v0 := b.NewValue0(v.Pos, OpTrunc64to32, typ.Int32)
+ v0.AddArg(x)
+ v.AddArg(v0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRsh64x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh64x8 <t> x (Const8 [c]))
+ // result: (Rsh64x64 x (Const64 <t> [int64(uint8(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_1.AuxInt)
+ v.reset(OpRsh64x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint8(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh64x8 (Const64 [0]) _)
+ // result: (Const64 [0])
+ for {
+ if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRsh8Ux16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh8Ux16 <t> x (Const16 [c]))
+ // result: (Rsh8Ux64 x (Const64 <t> [int64(uint16(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_1.AuxInt)
+ v.reset(OpRsh8Ux64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint16(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh8Ux16 (Const8 [0]) _)
+ // result: (Const8 [0])
+ for {
+ if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRsh8Ux32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh8Ux32 <t> x (Const32 [c]))
+ // result: (Rsh8Ux64 x (Const64 <t> [int64(uint32(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpRsh8Ux64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint32(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh8Ux32 (Const8 [0]) _)
+ // result: (Const8 [0])
+ for {
+ if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRsh8Ux64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ typ := &b.Func.Config.Types
+ // match: (Rsh8Ux64 (Const8 [c]) (Const64 [d]))
+ // result: (Const8 [int8(uint8(c) >> uint64(d))])
+ for {
+ if v_0.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(int8(uint8(c) >> uint64(d)))
+ return true
+ }
+ // match: (Rsh8Ux64 x (Const64 [0]))
+ // result: x
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (Rsh8Ux64 (Const8 [0]) _)
+ // result: (Const8 [0])
+ for {
+ if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(0)
+ return true
+ }
+ // match: (Rsh8Ux64 _ (Const64 [c]))
+ // cond: uint64(c) >= 8
+ // result: (Const8 [0])
+ for {
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c) >= 8) {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(0)
+ return true
+ }
+ // match: (Rsh8Ux64 <t> (Rsh8Ux64 x (Const64 [c])) (Const64 [d]))
+ // cond: !uaddOvf(c,d)
+ // result: (Rsh8Ux64 x (Const64 <t> [c+d]))
+ for {
+ t := v.Type
+ if v_0.Op != OpRsh8Ux64 {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0_1.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(!uaddOvf(c, d)) {
+ break
+ }
+ v.reset(OpRsh8Ux64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(c + d)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh8Ux64 (Rsh8x64 x _) (Const64 <t> [7] ))
+ // result: (Rsh8Ux64 x (Const64 <t> [7] ))
+ for {
+ if v_0.Op != OpRsh8x64 {
+ break
+ }
+ x := v_0.Args[0]
+ if v_1.Op != OpConst64 {
+ break
+ }
+ t := v_1.Type
+ if auxIntToInt64(v_1.AuxInt) != 7 {
+ break
+ }
+ v.reset(OpRsh8Ux64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(7)
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh8Ux64 (Lsh8x64 (Rsh8Ux64 x (Const64 [c1])) (Const64 [c2])) (Const64 [c3]))
+ // cond: uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)
+ // result: (Rsh8Ux64 x (Const64 <typ.UInt64> [c1-c2+c3]))
+ for {
+ if v_0.Op != OpLsh8x64 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpRsh8Ux64 {
+ break
+ }
+ _ = v_0_0.Args[1]
+ x := v_0_0.Args[0]
+ v_0_0_1 := v_0_0.Args[1]
+ if v_0_0_1.Op != OpConst64 {
+ break
+ }
+ c1 := auxIntToInt64(v_0_0_1.AuxInt)
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 {
+ break
+ }
+ c2 := auxIntToInt64(v_0_1.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ c3 := auxIntToInt64(v_1.AuxInt)
+ if !(uint64(c1) >= uint64(c2) && uint64(c3) >= uint64(c2) && !uaddOvf(c1-c2, c3)) {
+ break
+ }
+ v.reset(OpRsh8Ux64)
+ v0 := b.NewValue0(v.Pos, OpConst64, typ.UInt64)
+ v0.AuxInt = int64ToAuxInt(c1 - c2 + c3)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRsh8Ux8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh8Ux8 <t> x (Const8 [c]))
+ // result: (Rsh8Ux64 x (Const64 <t> [int64(uint8(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_1.AuxInt)
+ v.reset(OpRsh8Ux64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint8(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh8Ux8 (Const8 [0]) _)
+ // result: (Const8 [0])
+ for {
+ if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRsh8x16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh8x16 <t> x (Const16 [c]))
+ // result: (Rsh8x64 x (Const64 <t> [int64(uint16(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_1.AuxInt)
+ v.reset(OpRsh8x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint16(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh8x16 (Const8 [0]) _)
+ // result: (Const8 [0])
+ for {
+ if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRsh8x32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh8x32 <t> x (Const32 [c]))
+ // result: (Rsh8x64 x (Const64 <t> [int64(uint32(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpRsh8x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint32(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh8x32 (Const8 [0]) _)
+ // result: (Const8 [0])
+ for {
+ if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRsh8x64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh8x64 (Const8 [c]) (Const64 [d]))
+ // result: (Const8 [c >> uint64(d)])
+ for {
+ if v_0.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(c >> uint64(d))
+ return true
+ }
+ // match: (Rsh8x64 x (Const64 [0]))
+ // result: x
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 || auxIntToInt64(v_1.AuxInt) != 0 {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (Rsh8x64 (Const8 [0]) _)
+ // result: (Const8 [0])
+ for {
+ if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(0)
+ return true
+ }
+ // match: (Rsh8x64 <t> (Rsh8x64 x (Const64 [c])) (Const64 [d]))
+ // cond: !uaddOvf(c,d)
+ // result: (Rsh8x64 x (Const64 <t> [c+d]))
+ for {
+ t := v.Type
+ if v_0.Op != OpRsh8x64 {
+ break
+ }
+ _ = v_0.Args[1]
+ x := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0_1.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ if !(!uaddOvf(c, d)) {
+ break
+ }
+ v.reset(OpRsh8x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(c + d)
+ v.AddArg2(x, v0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpRsh8x8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Rsh8x8 <t> x (Const8 [c]))
+ // result: (Rsh8x64 x (Const64 <t> [int64(uint8(c))]))
+ for {
+ t := v.Type
+ x := v_0
+ if v_1.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_1.AuxInt)
+ v.reset(OpRsh8x64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(int64(uint8(c)))
+ v.AddArg2(x, v0)
+ return true
+ }
+ // match: (Rsh8x8 (Const8 [0]) _)
+ // result: (Const8 [0])
+ for {
+ if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpSelect0(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Select0 (Div128u (Const64 [0]) lo y))
+ // result: (Div64u lo y)
+ for {
+ if v_0.Op != OpDiv128u {
+ break
+ }
+ y := v_0.Args[2]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst64 || auxIntToInt64(v_0_0.AuxInt) != 0 {
+ break
+ }
+ lo := v_0.Args[1]
+ v.reset(OpDiv64u)
+ v.AddArg2(lo, y)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpSelect1(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Select1 (Div128u (Const64 [0]) lo y))
+ // result: (Mod64u lo y)
+ for {
+ if v_0.Op != OpDiv128u {
+ break
+ }
+ y := v_0.Args[2]
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpConst64 || auxIntToInt64(v_0_0.AuxInt) != 0 {
+ break
+ }
+ lo := v_0.Args[1]
+ v.reset(OpMod64u)
+ v.AddArg2(lo, y)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpSelectN(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ config := b.Func.Config
+ // match: (SelectN [0] (MakeResult x ___))
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 || v_0.Op != OpMakeResult || len(v_0.Args) < 1 {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (SelectN [1] (MakeResult x y ___))
+ // result: y
+ for {
+ if auxIntToInt64(v.AuxInt) != 1 || v_0.Op != OpMakeResult || len(v_0.Args) < 2 {
+ break
+ }
+ y := v_0.Args[1]
+ v.copyOf(y)
+ return true
+ }
+ // match: (SelectN [2] (MakeResult x y z ___))
+ // result: z
+ for {
+ if auxIntToInt64(v.AuxInt) != 2 || v_0.Op != OpMakeResult || len(v_0.Args) < 3 {
+ break
+ }
+ z := v_0.Args[2]
+ v.copyOf(z)
+ return true
+ }
+ // match: (SelectN [0] call:(StaticCall {sym} s1:(Store _ (Const64 [sz]) s2:(Store _ src s3:(Store {t} _ dst mem)))))
+ // cond: sz >= 0 && isSameCall(sym, "runtime.memmove") && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && isInlinableMemmove(dst, src, int64(sz), config) && clobber(s1, s2, s3, call)
+ // result: (Move {types.Types[types.TUINT8]} [int64(sz)] dst src mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ call := v_0
+ if call.Op != OpStaticCall || len(call.Args) != 1 {
+ break
+ }
+ sym := auxToCall(call.Aux)
+ s1 := call.Args[0]
+ if s1.Op != OpStore {
+ break
+ }
+ _ = s1.Args[2]
+ s1_1 := s1.Args[1]
+ if s1_1.Op != OpConst64 {
+ break
+ }
+ sz := auxIntToInt64(s1_1.AuxInt)
+ s2 := s1.Args[2]
+ if s2.Op != OpStore {
+ break
+ }
+ _ = s2.Args[2]
+ src := s2.Args[1]
+ s3 := s2.Args[2]
+ if s3.Op != OpStore {
+ break
+ }
+ mem := s3.Args[2]
+ dst := s3.Args[1]
+ if !(sz >= 0 && isSameCall(sym, "runtime.memmove") && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && isInlinableMemmove(dst, src, int64(sz), config) && clobber(s1, s2, s3, call)) {
+ break
+ }
+ v.reset(OpMove)
+ v.AuxInt = int64ToAuxInt(int64(sz))
+ v.Aux = typeToAux(types.Types[types.TUINT8])
+ v.AddArg3(dst, src, mem)
+ return true
+ }
+ // match: (SelectN [0] call:(StaticCall {sym} s1:(Store _ (Const32 [sz]) s2:(Store _ src s3:(Store {t} _ dst mem)))))
+ // cond: sz >= 0 && isSameCall(sym, "runtime.memmove") && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && isInlinableMemmove(dst, src, int64(sz), config) && clobber(s1, s2, s3, call)
+ // result: (Move {types.Types[types.TUINT8]} [int64(sz)] dst src mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ call := v_0
+ if call.Op != OpStaticCall || len(call.Args) != 1 {
+ break
+ }
+ sym := auxToCall(call.Aux)
+ s1 := call.Args[0]
+ if s1.Op != OpStore {
+ break
+ }
+ _ = s1.Args[2]
+ s1_1 := s1.Args[1]
+ if s1_1.Op != OpConst32 {
+ break
+ }
+ sz := auxIntToInt32(s1_1.AuxInt)
+ s2 := s1.Args[2]
+ if s2.Op != OpStore {
+ break
+ }
+ _ = s2.Args[2]
+ src := s2.Args[1]
+ s3 := s2.Args[2]
+ if s3.Op != OpStore {
+ break
+ }
+ mem := s3.Args[2]
+ dst := s3.Args[1]
+ if !(sz >= 0 && isSameCall(sym, "runtime.memmove") && s1.Uses == 1 && s2.Uses == 1 && s3.Uses == 1 && isInlinableMemmove(dst, src, int64(sz), config) && clobber(s1, s2, s3, call)) {
+ break
+ }
+ v.reset(OpMove)
+ v.AuxInt = int64ToAuxInt(int64(sz))
+ v.Aux = typeToAux(types.Types[types.TUINT8])
+ v.AddArg3(dst, src, mem)
+ return true
+ }
+ // match: (SelectN [0] call:(StaticCall {sym} dst src (Const64 [sz]) mem))
+ // cond: sz >= 0 && call.Uses == 1 && isSameCall(sym, "runtime.memmove") && isInlinableMemmove(dst, src, int64(sz), config) && clobber(call)
+ // result: (Move {types.Types[types.TUINT8]} [int64(sz)] dst src mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ call := v_0
+ if call.Op != OpStaticCall || len(call.Args) != 4 {
+ break
+ }
+ sym := auxToCall(call.Aux)
+ mem := call.Args[3]
+ dst := call.Args[0]
+ src := call.Args[1]
+ call_2 := call.Args[2]
+ if call_2.Op != OpConst64 {
+ break
+ }
+ sz := auxIntToInt64(call_2.AuxInt)
+ if !(sz >= 0 && call.Uses == 1 && isSameCall(sym, "runtime.memmove") && isInlinableMemmove(dst, src, int64(sz), config) && clobber(call)) {
+ break
+ }
+ v.reset(OpMove)
+ v.AuxInt = int64ToAuxInt(int64(sz))
+ v.Aux = typeToAux(types.Types[types.TUINT8])
+ v.AddArg3(dst, src, mem)
+ return true
+ }
+ // match: (SelectN [0] call:(StaticCall {sym} dst src (Const32 [sz]) mem))
+ // cond: sz >= 0 && call.Uses == 1 && isSameCall(sym, "runtime.memmove") && isInlinableMemmove(dst, src, int64(sz), config) && clobber(call)
+ // result: (Move {types.Types[types.TUINT8]} [int64(sz)] dst src mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ call := v_0
+ if call.Op != OpStaticCall || len(call.Args) != 4 {
+ break
+ }
+ sym := auxToCall(call.Aux)
+ mem := call.Args[3]
+ dst := call.Args[0]
+ src := call.Args[1]
+ call_2 := call.Args[2]
+ if call_2.Op != OpConst32 {
+ break
+ }
+ sz := auxIntToInt32(call_2.AuxInt)
+ if !(sz >= 0 && call.Uses == 1 && isSameCall(sym, "runtime.memmove") && isInlinableMemmove(dst, src, int64(sz), config) && clobber(call)) {
+ break
+ }
+ v.reset(OpMove)
+ v.AuxInt = int64ToAuxInt(int64(sz))
+ v.Aux = typeToAux(types.Types[types.TUINT8])
+ v.AddArg3(dst, src, mem)
+ return true
+ }
+ // match: (SelectN [0] call:(StaticLECall {sym} dst src (Const64 [sz]) mem))
+ // cond: sz >= 0 && call.Uses == 1 && isSameCall(sym, "runtime.memmove") && isInlinableMemmove(dst, src, int64(sz), config) && clobber(call)
+ // result: (Move {types.Types[types.TUINT8]} [int64(sz)] dst src mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ call := v_0
+ if call.Op != OpStaticLECall || len(call.Args) != 4 {
+ break
+ }
+ sym := auxToCall(call.Aux)
+ mem := call.Args[3]
+ dst := call.Args[0]
+ src := call.Args[1]
+ call_2 := call.Args[2]
+ if call_2.Op != OpConst64 {
+ break
+ }
+ sz := auxIntToInt64(call_2.AuxInt)
+ if !(sz >= 0 && call.Uses == 1 && isSameCall(sym, "runtime.memmove") && isInlinableMemmove(dst, src, int64(sz), config) && clobber(call)) {
+ break
+ }
+ v.reset(OpMove)
+ v.AuxInt = int64ToAuxInt(int64(sz))
+ v.Aux = typeToAux(types.Types[types.TUINT8])
+ v.AddArg3(dst, src, mem)
+ return true
+ }
+ // match: (SelectN [0] call:(StaticLECall {sym} dst src (Const32 [sz]) mem))
+ // cond: sz >= 0 && call.Uses == 1 && isSameCall(sym, "runtime.memmove") && isInlinableMemmove(dst, src, int64(sz), config) && clobber(call)
+ // result: (Move {types.Types[types.TUINT8]} [int64(sz)] dst src mem)
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ call := v_0
+ if call.Op != OpStaticLECall || len(call.Args) != 4 {
+ break
+ }
+ sym := auxToCall(call.Aux)
+ mem := call.Args[3]
+ dst := call.Args[0]
+ src := call.Args[1]
+ call_2 := call.Args[2]
+ if call_2.Op != OpConst32 {
+ break
+ }
+ sz := auxIntToInt32(call_2.AuxInt)
+ if !(sz >= 0 && call.Uses == 1 && isSameCall(sym, "runtime.memmove") && isInlinableMemmove(dst, src, int64(sz), config) && clobber(call)) {
+ break
+ }
+ v.reset(OpMove)
+ v.AuxInt = int64ToAuxInt(int64(sz))
+ v.Aux = typeToAux(types.Types[types.TUINT8])
+ v.AddArg3(dst, src, mem)
+ return true
+ }
+ // match: (SelectN [0] call:(StaticLECall {sym} a x))
+ // cond: needRaceCleanup(sym, call) && clobber(call)
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ call := v_0
+ if call.Op != OpStaticLECall || len(call.Args) != 2 {
+ break
+ }
+ sym := auxToCall(call.Aux)
+ x := call.Args[1]
+ if !(needRaceCleanup(sym, call) && clobber(call)) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ // match: (SelectN [0] call:(StaticLECall {sym} x))
+ // cond: needRaceCleanup(sym, call) && clobber(call)
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 {
+ break
+ }
+ call := v_0
+ if call.Op != OpStaticLECall || len(call.Args) != 1 {
+ break
+ }
+ sym := auxToCall(call.Aux)
+ x := call.Args[0]
+ if !(needRaceCleanup(sym, call) && clobber(call)) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpSignExt16to32(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SignExt16to32 (Const16 [c]))
+ // result: (Const32 [int32(c)])
+ for {
+ if v_0.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_0.AuxInt)
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ return true
+ }
+ // match: (SignExt16to32 (Trunc32to16 x:(Rsh32x64 _ (Const64 [s]))))
+ // cond: s >= 16
+ // result: x
+ for {
+ if v_0.Op != OpTrunc32to16 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpRsh32x64 {
+ break
+ }
+ _ = x.Args[1]
+ x_1 := x.Args[1]
+ if x_1.Op != OpConst64 {
+ break
+ }
+ s := auxIntToInt64(x_1.AuxInt)
+ if !(s >= 16) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpSignExt16to64(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SignExt16to64 (Const16 [c]))
+ // result: (Const64 [int64(c)])
+ for {
+ if v_0.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_0.AuxInt)
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(int64(c))
+ return true
+ }
+ // match: (SignExt16to64 (Trunc64to16 x:(Rsh64x64 _ (Const64 [s]))))
+ // cond: s >= 48
+ // result: x
+ for {
+ if v_0.Op != OpTrunc64to16 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpRsh64x64 {
+ break
+ }
+ _ = x.Args[1]
+ x_1 := x.Args[1]
+ if x_1.Op != OpConst64 {
+ break
+ }
+ s := auxIntToInt64(x_1.AuxInt)
+ if !(s >= 48) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpSignExt32to64(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SignExt32to64 (Const32 [c]))
+ // result: (Const64 [int64(c)])
+ for {
+ if v_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(int64(c))
+ return true
+ }
+ // match: (SignExt32to64 (Trunc64to32 x:(Rsh64x64 _ (Const64 [s]))))
+ // cond: s >= 32
+ // result: x
+ for {
+ if v_0.Op != OpTrunc64to32 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpRsh64x64 {
+ break
+ }
+ _ = x.Args[1]
+ x_1 := x.Args[1]
+ if x_1.Op != OpConst64 {
+ break
+ }
+ s := auxIntToInt64(x_1.AuxInt)
+ if !(s >= 32) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpSignExt8to16(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SignExt8to16 (Const8 [c]))
+ // result: (Const16 [int16(c)])
+ for {
+ if v_0.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(int16(c))
+ return true
+ }
+ // match: (SignExt8to16 (Trunc16to8 x:(Rsh16x64 _ (Const64 [s]))))
+ // cond: s >= 8
+ // result: x
+ for {
+ if v_0.Op != OpTrunc16to8 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpRsh16x64 {
+ break
+ }
+ _ = x.Args[1]
+ x_1 := x.Args[1]
+ if x_1.Op != OpConst64 {
+ break
+ }
+ s := auxIntToInt64(x_1.AuxInt)
+ if !(s >= 8) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpSignExt8to32(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SignExt8to32 (Const8 [c]))
+ // result: (Const32 [int32(c)])
+ for {
+ if v_0.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ return true
+ }
+ // match: (SignExt8to32 (Trunc32to8 x:(Rsh32x64 _ (Const64 [s]))))
+ // cond: s >= 24
+ // result: x
+ for {
+ if v_0.Op != OpTrunc32to8 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpRsh32x64 {
+ break
+ }
+ _ = x.Args[1]
+ x_1 := x.Args[1]
+ if x_1.Op != OpConst64 {
+ break
+ }
+ s := auxIntToInt64(x_1.AuxInt)
+ if !(s >= 24) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpSignExt8to64(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SignExt8to64 (Const8 [c]))
+ // result: (Const64 [int64(c)])
+ for {
+ if v_0.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(int64(c))
+ return true
+ }
+ // match: (SignExt8to64 (Trunc64to8 x:(Rsh64x64 _ (Const64 [s]))))
+ // cond: s >= 56
+ // result: x
+ for {
+ if v_0.Op != OpTrunc64to8 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpRsh64x64 {
+ break
+ }
+ _ = x.Args[1]
+ x_1 := x.Args[1]
+ if x_1.Op != OpConst64 {
+ break
+ }
+ s := auxIntToInt64(x_1.AuxInt)
+ if !(s >= 56) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpSliceCap(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SliceCap (SliceMake _ _ (Const64 <t> [c])))
+ // result: (Const64 <t> [c])
+ for {
+ if v_0.Op != OpSliceMake {
+ break
+ }
+ _ = v_0.Args[2]
+ v_0_2 := v_0.Args[2]
+ if v_0_2.Op != OpConst64 {
+ break
+ }
+ t := v_0_2.Type
+ c := auxIntToInt64(v_0_2.AuxInt)
+ v.reset(OpConst64)
+ v.Type = t
+ v.AuxInt = int64ToAuxInt(c)
+ return true
+ }
+ // match: (SliceCap (SliceMake _ _ (Const32 <t> [c])))
+ // result: (Const32 <t> [c])
+ for {
+ if v_0.Op != OpSliceMake {
+ break
+ }
+ _ = v_0.Args[2]
+ v_0_2 := v_0.Args[2]
+ if v_0_2.Op != OpConst32 {
+ break
+ }
+ t := v_0_2.Type
+ c := auxIntToInt32(v_0_2.AuxInt)
+ v.reset(OpConst32)
+ v.Type = t
+ v.AuxInt = int32ToAuxInt(c)
+ return true
+ }
+ // match: (SliceCap (SliceMake _ _ (SliceCap x)))
+ // result: (SliceCap x)
+ for {
+ if v_0.Op != OpSliceMake {
+ break
+ }
+ _ = v_0.Args[2]
+ v_0_2 := v_0.Args[2]
+ if v_0_2.Op != OpSliceCap {
+ break
+ }
+ x := v_0_2.Args[0]
+ v.reset(OpSliceCap)
+ v.AddArg(x)
+ return true
+ }
+ // match: (SliceCap (SliceMake _ _ (SliceLen x)))
+ // result: (SliceLen x)
+ for {
+ if v_0.Op != OpSliceMake {
+ break
+ }
+ _ = v_0.Args[2]
+ v_0_2 := v_0.Args[2]
+ if v_0_2.Op != OpSliceLen {
+ break
+ }
+ x := v_0_2.Args[0]
+ v.reset(OpSliceLen)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpSliceLen(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SliceLen (SliceMake _ (Const64 <t> [c]) _))
+ // result: (Const64 <t> [c])
+ for {
+ if v_0.Op != OpSliceMake {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 {
+ break
+ }
+ t := v_0_1.Type
+ c := auxIntToInt64(v_0_1.AuxInt)
+ v.reset(OpConst64)
+ v.Type = t
+ v.AuxInt = int64ToAuxInt(c)
+ return true
+ }
+ // match: (SliceLen (SliceMake _ (Const32 <t> [c]) _))
+ // result: (Const32 <t> [c])
+ for {
+ if v_0.Op != OpSliceMake {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst32 {
+ break
+ }
+ t := v_0_1.Type
+ c := auxIntToInt32(v_0_1.AuxInt)
+ v.reset(OpConst32)
+ v.Type = t
+ v.AuxInt = int32ToAuxInt(c)
+ return true
+ }
+ // match: (SliceLen (SliceMake _ (SliceLen x) _))
+ // result: (SliceLen x)
+ for {
+ if v_0.Op != OpSliceMake {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpSliceLen {
+ break
+ }
+ x := v_0_1.Args[0]
+ v.reset(OpSliceLen)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpSlicePtr(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (SlicePtr (SliceMake (SlicePtr x) _ _))
+ // result: (SlicePtr x)
+ for {
+ if v_0.Op != OpSliceMake {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpSlicePtr {
+ break
+ }
+ x := v_0_0.Args[0]
+ v.reset(OpSlicePtr)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpSlicemask(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Slicemask (Const32 [x]))
+ // cond: x > 0
+ // result: (Const32 [-1])
+ for {
+ if v_0.Op != OpConst32 {
+ break
+ }
+ x := auxIntToInt32(v_0.AuxInt)
+ if !(x > 0) {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(-1)
+ return true
+ }
+ // match: (Slicemask (Const32 [0]))
+ // result: (Const32 [0])
+ for {
+ if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (Slicemask (Const64 [x]))
+ // cond: x > 0
+ // result: (Const64 [-1])
+ for {
+ if v_0.Op != OpConst64 {
+ break
+ }
+ x := auxIntToInt64(v_0.AuxInt)
+ if !(x > 0) {
+ break
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(-1)
+ return true
+ }
+ // match: (Slicemask (Const64 [0]))
+ // result: (Const64 [0])
+ for {
+ if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpSqrt(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Sqrt (Const64F [c]))
+ // cond: !math.IsNaN(math.Sqrt(c))
+ // result: (Const64F [math.Sqrt(c)])
+ for {
+ if v_0.Op != OpConst64F {
+ break
+ }
+ c := auxIntToFloat64(v_0.AuxInt)
+ if !(!math.IsNaN(math.Sqrt(c))) {
+ break
+ }
+ v.reset(OpConst64F)
+ v.AuxInt = float64ToAuxInt(math.Sqrt(c))
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpStaticLECall(v *Value) bool {
+ b := v.Block
+ config := b.Func.Config
+ typ := &b.Func.Config.Types
+ // match: (StaticLECall {callAux} sptr (Addr {scon} (SB)) (Const64 [1]) mem)
+ // cond: isSameCall(callAux, "runtime.memequal") && symIsRO(scon)
+ // result: (MakeResult (Eq8 (Load <typ.Int8> sptr mem) (Const8 <typ.Int8> [int8(read8(scon,0))])) mem)
+ for {
+ if len(v.Args) != 4 {
+ break
+ }
+ callAux := auxToCall(v.Aux)
+ mem := v.Args[3]
+ sptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAddr {
+ break
+ }
+ scon := auxToSym(v_1.Aux)
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpSB {
+ break
+ }
+ v_2 := v.Args[2]
+ if v_2.Op != OpConst64 || auxIntToInt64(v_2.AuxInt) != 1 || !(isSameCall(callAux, "runtime.memequal") && symIsRO(scon)) {
+ break
+ }
+ v.reset(OpMakeResult)
+ v0 := b.NewValue0(v.Pos, OpEq8, typ.Bool)
+ v1 := b.NewValue0(v.Pos, OpLoad, typ.Int8)
+ v1.AddArg2(sptr, mem)
+ v2 := b.NewValue0(v.Pos, OpConst8, typ.Int8)
+ v2.AuxInt = int8ToAuxInt(int8(read8(scon, 0)))
+ v0.AddArg2(v1, v2)
+ v.AddArg2(v0, mem)
+ return true
+ }
+ // match: (StaticLECall {callAux} sptr (Addr {scon} (SB)) (Const64 [2]) mem)
+ // cond: isSameCall(callAux, "runtime.memequal") && symIsRO(scon) && canLoadUnaligned(config)
+ // result: (MakeResult (Eq16 (Load <typ.Int16> sptr mem) (Const16 <typ.Int16> [int16(read16(scon,0,config.ctxt.Arch.ByteOrder))])) mem)
+ for {
+ if len(v.Args) != 4 {
+ break
+ }
+ callAux := auxToCall(v.Aux)
+ mem := v.Args[3]
+ sptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAddr {
+ break
+ }
+ scon := auxToSym(v_1.Aux)
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpSB {
+ break
+ }
+ v_2 := v.Args[2]
+ if v_2.Op != OpConst64 || auxIntToInt64(v_2.AuxInt) != 2 || !(isSameCall(callAux, "runtime.memequal") && symIsRO(scon) && canLoadUnaligned(config)) {
+ break
+ }
+ v.reset(OpMakeResult)
+ v0 := b.NewValue0(v.Pos, OpEq16, typ.Bool)
+ v1 := b.NewValue0(v.Pos, OpLoad, typ.Int16)
+ v1.AddArg2(sptr, mem)
+ v2 := b.NewValue0(v.Pos, OpConst16, typ.Int16)
+ v2.AuxInt = int16ToAuxInt(int16(read16(scon, 0, config.ctxt.Arch.ByteOrder)))
+ v0.AddArg2(v1, v2)
+ v.AddArg2(v0, mem)
+ return true
+ }
+ // match: (StaticLECall {callAux} sptr (Addr {scon} (SB)) (Const64 [4]) mem)
+ // cond: isSameCall(callAux, "runtime.memequal") && symIsRO(scon) && canLoadUnaligned(config)
+ // result: (MakeResult (Eq32 (Load <typ.Int32> sptr mem) (Const32 <typ.Int32> [int32(read32(scon,0,config.ctxt.Arch.ByteOrder))])) mem)
+ for {
+ if len(v.Args) != 4 {
+ break
+ }
+ callAux := auxToCall(v.Aux)
+ mem := v.Args[3]
+ sptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAddr {
+ break
+ }
+ scon := auxToSym(v_1.Aux)
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpSB {
+ break
+ }
+ v_2 := v.Args[2]
+ if v_2.Op != OpConst64 || auxIntToInt64(v_2.AuxInt) != 4 || !(isSameCall(callAux, "runtime.memequal") && symIsRO(scon) && canLoadUnaligned(config)) {
+ break
+ }
+ v.reset(OpMakeResult)
+ v0 := b.NewValue0(v.Pos, OpEq32, typ.Bool)
+ v1 := b.NewValue0(v.Pos, OpLoad, typ.Int32)
+ v1.AddArg2(sptr, mem)
+ v2 := b.NewValue0(v.Pos, OpConst32, typ.Int32)
+ v2.AuxInt = int32ToAuxInt(int32(read32(scon, 0, config.ctxt.Arch.ByteOrder)))
+ v0.AddArg2(v1, v2)
+ v.AddArg2(v0, mem)
+ return true
+ }
+ // match: (StaticLECall {callAux} sptr (Addr {scon} (SB)) (Const64 [8]) mem)
+ // cond: isSameCall(callAux, "runtime.memequal") && symIsRO(scon) && canLoadUnaligned(config) && config.PtrSize == 8
+ // result: (MakeResult (Eq64 (Load <typ.Int64> sptr mem) (Const64 <typ.Int64> [int64(read64(scon,0,config.ctxt.Arch.ByteOrder))])) mem)
+ for {
+ if len(v.Args) != 4 {
+ break
+ }
+ callAux := auxToCall(v.Aux)
+ mem := v.Args[3]
+ sptr := v.Args[0]
+ v_1 := v.Args[1]
+ if v_1.Op != OpAddr {
+ break
+ }
+ scon := auxToSym(v_1.Aux)
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpSB {
+ break
+ }
+ v_2 := v.Args[2]
+ if v_2.Op != OpConst64 || auxIntToInt64(v_2.AuxInt) != 8 || !(isSameCall(callAux, "runtime.memequal") && symIsRO(scon) && canLoadUnaligned(config) && config.PtrSize == 8) {
+ break
+ }
+ v.reset(OpMakeResult)
+ v0 := b.NewValue0(v.Pos, OpEq64, typ.Bool)
+ v1 := b.NewValue0(v.Pos, OpLoad, typ.Int64)
+ v1.AddArg2(sptr, mem)
+ v2 := b.NewValue0(v.Pos, OpConst64, typ.Int64)
+ v2.AuxInt = int64ToAuxInt(int64(read64(scon, 0, config.ctxt.Arch.ByteOrder)))
+ v0.AddArg2(v1, v2)
+ v.AddArg2(v0, mem)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpStore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ fe := b.Func.fe
+ // match: (Store {t1} p1 (Load <t2> p2 mem) mem)
+ // cond: isSamePtr(p1, p2) && t2.Size() == t1.Size()
+ // result: mem
+ for {
+ t1 := auxToType(v.Aux)
+ p1 := v_0
+ if v_1.Op != OpLoad {
+ break
+ }
+ t2 := v_1.Type
+ mem := v_1.Args[1]
+ p2 := v_1.Args[0]
+ if mem != v_2 || !(isSamePtr(p1, p2) && t2.Size() == t1.Size()) {
+ break
+ }
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Store {t1} p1 (Load <t2> p2 oldmem) mem:(Store {t3} p3 _ oldmem))
+ // cond: isSamePtr(p1, p2) && t2.Size() == t1.Size() && disjoint(p1, t1.Size(), p3, t3.Size())
+ // result: mem
+ for {
+ t1 := auxToType(v.Aux)
+ p1 := v_0
+ if v_1.Op != OpLoad {
+ break
+ }
+ t2 := v_1.Type
+ oldmem := v_1.Args[1]
+ p2 := v_1.Args[0]
+ mem := v_2
+ if mem.Op != OpStore {
+ break
+ }
+ t3 := auxToType(mem.Aux)
+ _ = mem.Args[2]
+ p3 := mem.Args[0]
+ if oldmem != mem.Args[2] || !(isSamePtr(p1, p2) && t2.Size() == t1.Size() && disjoint(p1, t1.Size(), p3, t3.Size())) {
+ break
+ }
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Store {t1} p1 (Load <t2> p2 oldmem) mem:(Store {t3} p3 _ (Store {t4} p4 _ oldmem)))
+ // cond: isSamePtr(p1, p2) && t2.Size() == t1.Size() && disjoint(p1, t1.Size(), p3, t3.Size()) && disjoint(p1, t1.Size(), p4, t4.Size())
+ // result: mem
+ for {
+ t1 := auxToType(v.Aux)
+ p1 := v_0
+ if v_1.Op != OpLoad {
+ break
+ }
+ t2 := v_1.Type
+ oldmem := v_1.Args[1]
+ p2 := v_1.Args[0]
+ mem := v_2
+ if mem.Op != OpStore {
+ break
+ }
+ t3 := auxToType(mem.Aux)
+ _ = mem.Args[2]
+ p3 := mem.Args[0]
+ mem_2 := mem.Args[2]
+ if mem_2.Op != OpStore {
+ break
+ }
+ t4 := auxToType(mem_2.Aux)
+ _ = mem_2.Args[2]
+ p4 := mem_2.Args[0]
+ if oldmem != mem_2.Args[2] || !(isSamePtr(p1, p2) && t2.Size() == t1.Size() && disjoint(p1, t1.Size(), p3, t3.Size()) && disjoint(p1, t1.Size(), p4, t4.Size())) {
+ break
+ }
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Store {t1} p1 (Load <t2> p2 oldmem) mem:(Store {t3} p3 _ (Store {t4} p4 _ (Store {t5} p5 _ oldmem))))
+ // cond: isSamePtr(p1, p2) && t2.Size() == t1.Size() && disjoint(p1, t1.Size(), p3, t3.Size()) && disjoint(p1, t1.Size(), p4, t4.Size()) && disjoint(p1, t1.Size(), p5, t5.Size())
+ // result: mem
+ for {
+ t1 := auxToType(v.Aux)
+ p1 := v_0
+ if v_1.Op != OpLoad {
+ break
+ }
+ t2 := v_1.Type
+ oldmem := v_1.Args[1]
+ p2 := v_1.Args[0]
+ mem := v_2
+ if mem.Op != OpStore {
+ break
+ }
+ t3 := auxToType(mem.Aux)
+ _ = mem.Args[2]
+ p3 := mem.Args[0]
+ mem_2 := mem.Args[2]
+ if mem_2.Op != OpStore {
+ break
+ }
+ t4 := auxToType(mem_2.Aux)
+ _ = mem_2.Args[2]
+ p4 := mem_2.Args[0]
+ mem_2_2 := mem_2.Args[2]
+ if mem_2_2.Op != OpStore {
+ break
+ }
+ t5 := auxToType(mem_2_2.Aux)
+ _ = mem_2_2.Args[2]
+ p5 := mem_2_2.Args[0]
+ if oldmem != mem_2_2.Args[2] || !(isSamePtr(p1, p2) && t2.Size() == t1.Size() && disjoint(p1, t1.Size(), p3, t3.Size()) && disjoint(p1, t1.Size(), p4, t4.Size()) && disjoint(p1, t1.Size(), p5, t5.Size())) {
+ break
+ }
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Store {t} (OffPtr [o] p1) x mem:(Zero [n] p2 _))
+ // cond: isConstZero(x) && o >= 0 && t.Size() + o <= n && isSamePtr(p1, p2)
+ // result: mem
+ for {
+ t := auxToType(v.Aux)
+ if v_0.Op != OpOffPtr {
+ break
+ }
+ o := auxIntToInt64(v_0.AuxInt)
+ p1 := v_0.Args[0]
+ x := v_1
+ mem := v_2
+ if mem.Op != OpZero {
+ break
+ }
+ n := auxIntToInt64(mem.AuxInt)
+ p2 := mem.Args[0]
+ if !(isConstZero(x) && o >= 0 && t.Size()+o <= n && isSamePtr(p1, p2)) {
+ break
+ }
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Store {t1} op:(OffPtr [o1] p1) x mem:(Store {t2} p2 _ (Zero [n] p3 _)))
+ // cond: isConstZero(x) && o1 >= 0 && t1.Size() + o1 <= n && isSamePtr(p1, p3) && disjoint(op, t1.Size(), p2, t2.Size())
+ // result: mem
+ for {
+ t1 := auxToType(v.Aux)
+ op := v_0
+ if op.Op != OpOffPtr {
+ break
+ }
+ o1 := auxIntToInt64(op.AuxInt)
+ p1 := op.Args[0]
+ x := v_1
+ mem := v_2
+ if mem.Op != OpStore {
+ break
+ }
+ t2 := auxToType(mem.Aux)
+ _ = mem.Args[2]
+ p2 := mem.Args[0]
+ mem_2 := mem.Args[2]
+ if mem_2.Op != OpZero {
+ break
+ }
+ n := auxIntToInt64(mem_2.AuxInt)
+ p3 := mem_2.Args[0]
+ if !(isConstZero(x) && o1 >= 0 && t1.Size()+o1 <= n && isSamePtr(p1, p3) && disjoint(op, t1.Size(), p2, t2.Size())) {
+ break
+ }
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Store {t1} op:(OffPtr [o1] p1) x mem:(Store {t2} p2 _ (Store {t3} p3 _ (Zero [n] p4 _))))
+ // cond: isConstZero(x) && o1 >= 0 && t1.Size() + o1 <= n && isSamePtr(p1, p4) && disjoint(op, t1.Size(), p2, t2.Size()) && disjoint(op, t1.Size(), p3, t3.Size())
+ // result: mem
+ for {
+ t1 := auxToType(v.Aux)
+ op := v_0
+ if op.Op != OpOffPtr {
+ break
+ }
+ o1 := auxIntToInt64(op.AuxInt)
+ p1 := op.Args[0]
+ x := v_1
+ mem := v_2
+ if mem.Op != OpStore {
+ break
+ }
+ t2 := auxToType(mem.Aux)
+ _ = mem.Args[2]
+ p2 := mem.Args[0]
+ mem_2 := mem.Args[2]
+ if mem_2.Op != OpStore {
+ break
+ }
+ t3 := auxToType(mem_2.Aux)
+ _ = mem_2.Args[2]
+ p3 := mem_2.Args[0]
+ mem_2_2 := mem_2.Args[2]
+ if mem_2_2.Op != OpZero {
+ break
+ }
+ n := auxIntToInt64(mem_2_2.AuxInt)
+ p4 := mem_2_2.Args[0]
+ if !(isConstZero(x) && o1 >= 0 && t1.Size()+o1 <= n && isSamePtr(p1, p4) && disjoint(op, t1.Size(), p2, t2.Size()) && disjoint(op, t1.Size(), p3, t3.Size())) {
+ break
+ }
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Store {t1} op:(OffPtr [o1] p1) x mem:(Store {t2} p2 _ (Store {t3} p3 _ (Store {t4} p4 _ (Zero [n] p5 _)))))
+ // cond: isConstZero(x) && o1 >= 0 && t1.Size() + o1 <= n && isSamePtr(p1, p5) && disjoint(op, t1.Size(), p2, t2.Size()) && disjoint(op, t1.Size(), p3, t3.Size()) && disjoint(op, t1.Size(), p4, t4.Size())
+ // result: mem
+ for {
+ t1 := auxToType(v.Aux)
+ op := v_0
+ if op.Op != OpOffPtr {
+ break
+ }
+ o1 := auxIntToInt64(op.AuxInt)
+ p1 := op.Args[0]
+ x := v_1
+ mem := v_2
+ if mem.Op != OpStore {
+ break
+ }
+ t2 := auxToType(mem.Aux)
+ _ = mem.Args[2]
+ p2 := mem.Args[0]
+ mem_2 := mem.Args[2]
+ if mem_2.Op != OpStore {
+ break
+ }
+ t3 := auxToType(mem_2.Aux)
+ _ = mem_2.Args[2]
+ p3 := mem_2.Args[0]
+ mem_2_2 := mem_2.Args[2]
+ if mem_2_2.Op != OpStore {
+ break
+ }
+ t4 := auxToType(mem_2_2.Aux)
+ _ = mem_2_2.Args[2]
+ p4 := mem_2_2.Args[0]
+ mem_2_2_2 := mem_2_2.Args[2]
+ if mem_2_2_2.Op != OpZero {
+ break
+ }
+ n := auxIntToInt64(mem_2_2_2.AuxInt)
+ p5 := mem_2_2_2.Args[0]
+ if !(isConstZero(x) && o1 >= 0 && t1.Size()+o1 <= n && isSamePtr(p1, p5) && disjoint(op, t1.Size(), p2, t2.Size()) && disjoint(op, t1.Size(), p3, t3.Size()) && disjoint(op, t1.Size(), p4, t4.Size())) {
+ break
+ }
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Store _ (StructMake0) mem)
+ // result: mem
+ for {
+ if v_1.Op != OpStructMake0 {
+ break
+ }
+ mem := v_2
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Store dst (StructMake1 <t> f0) mem)
+ // result: (Store {t.FieldType(0)} (OffPtr <t.FieldType(0).PtrTo()> [0] dst) f0 mem)
+ for {
+ dst := v_0
+ if v_1.Op != OpStructMake1 {
+ break
+ }
+ t := v_1.Type
+ f0 := v_1.Args[0]
+ mem := v_2
+ v.reset(OpStore)
+ v.Aux = typeToAux(t.FieldType(0))
+ v0 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(0).PtrTo())
+ v0.AuxInt = int64ToAuxInt(0)
+ v0.AddArg(dst)
+ v.AddArg3(v0, f0, mem)
+ return true
+ }
+ // match: (Store dst (StructMake2 <t> f0 f1) mem)
+ // result: (Store {t.FieldType(1)} (OffPtr <t.FieldType(1).PtrTo()> [t.FieldOff(1)] dst) f1 (Store {t.FieldType(0)} (OffPtr <t.FieldType(0).PtrTo()> [0] dst) f0 mem))
+ for {
+ dst := v_0
+ if v_1.Op != OpStructMake2 {
+ break
+ }
+ t := v_1.Type
+ f1 := v_1.Args[1]
+ f0 := v_1.Args[0]
+ mem := v_2
+ v.reset(OpStore)
+ v.Aux = typeToAux(t.FieldType(1))
+ v0 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(1).PtrTo())
+ v0.AuxInt = int64ToAuxInt(t.FieldOff(1))
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v1.Aux = typeToAux(t.FieldType(0))
+ v2 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(0).PtrTo())
+ v2.AuxInt = int64ToAuxInt(0)
+ v2.AddArg(dst)
+ v1.AddArg3(v2, f0, mem)
+ v.AddArg3(v0, f1, v1)
+ return true
+ }
+ // match: (Store dst (StructMake3 <t> f0 f1 f2) mem)
+ // result: (Store {t.FieldType(2)} (OffPtr <t.FieldType(2).PtrTo()> [t.FieldOff(2)] dst) f2 (Store {t.FieldType(1)} (OffPtr <t.FieldType(1).PtrTo()> [t.FieldOff(1)] dst) f1 (Store {t.FieldType(0)} (OffPtr <t.FieldType(0).PtrTo()> [0] dst) f0 mem)))
+ for {
+ dst := v_0
+ if v_1.Op != OpStructMake3 {
+ break
+ }
+ t := v_1.Type
+ f2 := v_1.Args[2]
+ f0 := v_1.Args[0]
+ f1 := v_1.Args[1]
+ mem := v_2
+ v.reset(OpStore)
+ v.Aux = typeToAux(t.FieldType(2))
+ v0 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(2).PtrTo())
+ v0.AuxInt = int64ToAuxInt(t.FieldOff(2))
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v1.Aux = typeToAux(t.FieldType(1))
+ v2 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(1).PtrTo())
+ v2.AuxInt = int64ToAuxInt(t.FieldOff(1))
+ v2.AddArg(dst)
+ v3 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v3.Aux = typeToAux(t.FieldType(0))
+ v4 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(0).PtrTo())
+ v4.AuxInt = int64ToAuxInt(0)
+ v4.AddArg(dst)
+ v3.AddArg3(v4, f0, mem)
+ v1.AddArg3(v2, f1, v3)
+ v.AddArg3(v0, f2, v1)
+ return true
+ }
+ // match: (Store dst (StructMake4 <t> f0 f1 f2 f3) mem)
+ // result: (Store {t.FieldType(3)} (OffPtr <t.FieldType(3).PtrTo()> [t.FieldOff(3)] dst) f3 (Store {t.FieldType(2)} (OffPtr <t.FieldType(2).PtrTo()> [t.FieldOff(2)] dst) f2 (Store {t.FieldType(1)} (OffPtr <t.FieldType(1).PtrTo()> [t.FieldOff(1)] dst) f1 (Store {t.FieldType(0)} (OffPtr <t.FieldType(0).PtrTo()> [0] dst) f0 mem))))
+ for {
+ dst := v_0
+ if v_1.Op != OpStructMake4 {
+ break
+ }
+ t := v_1.Type
+ f3 := v_1.Args[3]
+ f0 := v_1.Args[0]
+ f1 := v_1.Args[1]
+ f2 := v_1.Args[2]
+ mem := v_2
+ v.reset(OpStore)
+ v.Aux = typeToAux(t.FieldType(3))
+ v0 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(3).PtrTo())
+ v0.AuxInt = int64ToAuxInt(t.FieldOff(3))
+ v0.AddArg(dst)
+ v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v1.Aux = typeToAux(t.FieldType(2))
+ v2 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(2).PtrTo())
+ v2.AuxInt = int64ToAuxInt(t.FieldOff(2))
+ v2.AddArg(dst)
+ v3 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v3.Aux = typeToAux(t.FieldType(1))
+ v4 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(1).PtrTo())
+ v4.AuxInt = int64ToAuxInt(t.FieldOff(1))
+ v4.AddArg(dst)
+ v5 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v5.Aux = typeToAux(t.FieldType(0))
+ v6 := b.NewValue0(v.Pos, OpOffPtr, t.FieldType(0).PtrTo())
+ v6.AuxInt = int64ToAuxInt(0)
+ v6.AddArg(dst)
+ v5.AddArg3(v6, f0, mem)
+ v3.AddArg3(v4, f1, v5)
+ v1.AddArg3(v2, f2, v3)
+ v.AddArg3(v0, f3, v1)
+ return true
+ }
+ // match: (Store {t} dst (Load src mem) mem)
+ // cond: !fe.CanSSA(t)
+ // result: (Move {t} [t.Size()] dst src mem)
+ for {
+ t := auxToType(v.Aux)
+ dst := v_0
+ if v_1.Op != OpLoad {
+ break
+ }
+ mem := v_1.Args[1]
+ src := v_1.Args[0]
+ if mem != v_2 || !(!fe.CanSSA(t)) {
+ break
+ }
+ v.reset(OpMove)
+ v.AuxInt = int64ToAuxInt(t.Size())
+ v.Aux = typeToAux(t)
+ v.AddArg3(dst, src, mem)
+ return true
+ }
+ // match: (Store {t} dst (Load src mem) (VarDef {x} mem))
+ // cond: !fe.CanSSA(t)
+ // result: (Move {t} [t.Size()] dst src (VarDef {x} mem))
+ for {
+ t := auxToType(v.Aux)
+ dst := v_0
+ if v_1.Op != OpLoad {
+ break
+ }
+ mem := v_1.Args[1]
+ src := v_1.Args[0]
+ if v_2.Op != OpVarDef {
+ break
+ }
+ x := auxToSym(v_2.Aux)
+ if mem != v_2.Args[0] || !(!fe.CanSSA(t)) {
+ break
+ }
+ v.reset(OpMove)
+ v.AuxInt = int64ToAuxInt(t.Size())
+ v.Aux = typeToAux(t)
+ v0 := b.NewValue0(v.Pos, OpVarDef, types.TypeMem)
+ v0.Aux = symToAux(x)
+ v0.AddArg(mem)
+ v.AddArg3(dst, src, v0)
+ return true
+ }
+ // match: (Store _ (ArrayMake0) mem)
+ // result: mem
+ for {
+ if v_1.Op != OpArrayMake0 {
+ break
+ }
+ mem := v_2
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Store dst (ArrayMake1 e) mem)
+ // result: (Store {e.Type} dst e mem)
+ for {
+ dst := v_0
+ if v_1.Op != OpArrayMake1 {
+ break
+ }
+ e := v_1.Args[0]
+ mem := v_2
+ v.reset(OpStore)
+ v.Aux = typeToAux(e.Type)
+ v.AddArg3(dst, e, mem)
+ return true
+ }
+ // match: (Store (SelectN [0] call:(StaticLECall _ _)) x mem:(SelectN [1] call))
+ // cond: isConstZero(x) && isSameCall(call.Aux, "runtime.newobject")
+ // result: mem
+ for {
+ if v_0.Op != OpSelectN || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ call := v_0.Args[0]
+ if call.Op != OpStaticLECall || len(call.Args) != 2 {
+ break
+ }
+ x := v_1
+ mem := v_2
+ if mem.Op != OpSelectN || auxIntToInt64(mem.AuxInt) != 1 || call != mem.Args[0] || !(isConstZero(x) && isSameCall(call.Aux, "runtime.newobject")) {
+ break
+ }
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Store (OffPtr (SelectN [0] call:(StaticLECall _ _))) x mem:(SelectN [1] call))
+ // cond: isConstZero(x) && isSameCall(call.Aux, "runtime.newobject")
+ // result: mem
+ for {
+ if v_0.Op != OpOffPtr {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpSelectN || auxIntToInt64(v_0_0.AuxInt) != 0 {
+ break
+ }
+ call := v_0_0.Args[0]
+ if call.Op != OpStaticLECall || len(call.Args) != 2 {
+ break
+ }
+ x := v_1
+ mem := v_2
+ if mem.Op != OpSelectN || auxIntToInt64(mem.AuxInt) != 1 || call != mem.Args[0] || !(isConstZero(x) && isSameCall(call.Aux, "runtime.newobject")) {
+ break
+ }
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Store {t1} op1:(OffPtr [o1] p1) d1 m2:(Store {t2} op2:(OffPtr [0] p2) d2 m3:(Move [n] p3 _ mem)))
+ // cond: m2.Uses == 1 && m3.Uses == 1 && o1 == t2.Size() && n == t2.Size() + t1.Size() && isSamePtr(p1, p2) && isSamePtr(p2, p3) && clobber(m2, m3)
+ // result: (Store {t1} op1 d1 (Store {t2} op2 d2 mem))
+ for {
+ t1 := auxToType(v.Aux)
+ op1 := v_0
+ if op1.Op != OpOffPtr {
+ break
+ }
+ o1 := auxIntToInt64(op1.AuxInt)
+ p1 := op1.Args[0]
+ d1 := v_1
+ m2 := v_2
+ if m2.Op != OpStore {
+ break
+ }
+ t2 := auxToType(m2.Aux)
+ _ = m2.Args[2]
+ op2 := m2.Args[0]
+ if op2.Op != OpOffPtr || auxIntToInt64(op2.AuxInt) != 0 {
+ break
+ }
+ p2 := op2.Args[0]
+ d2 := m2.Args[1]
+ m3 := m2.Args[2]
+ if m3.Op != OpMove {
+ break
+ }
+ n := auxIntToInt64(m3.AuxInt)
+ mem := m3.Args[2]
+ p3 := m3.Args[0]
+ if !(m2.Uses == 1 && m3.Uses == 1 && o1 == t2.Size() && n == t2.Size()+t1.Size() && isSamePtr(p1, p2) && isSamePtr(p2, p3) && clobber(m2, m3)) {
+ break
+ }
+ v.reset(OpStore)
+ v.Aux = typeToAux(t1)
+ v0 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v0.Aux = typeToAux(t2)
+ v0.AddArg3(op2, d2, mem)
+ v.AddArg3(op1, d1, v0)
+ return true
+ }
+ // match: (Store {t1} op1:(OffPtr [o1] p1) d1 m2:(Store {t2} op2:(OffPtr [o2] p2) d2 m3:(Store {t3} op3:(OffPtr [0] p3) d3 m4:(Move [n] p4 _ mem))))
+ // cond: m2.Uses == 1 && m3.Uses == 1 && m4.Uses == 1 && o2 == t3.Size() && o1-o2 == t2.Size() && n == t3.Size() + t2.Size() + t1.Size() && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && clobber(m2, m3, m4)
+ // result: (Store {t1} op1 d1 (Store {t2} op2 d2 (Store {t3} op3 d3 mem)))
+ for {
+ t1 := auxToType(v.Aux)
+ op1 := v_0
+ if op1.Op != OpOffPtr {
+ break
+ }
+ o1 := auxIntToInt64(op1.AuxInt)
+ p1 := op1.Args[0]
+ d1 := v_1
+ m2 := v_2
+ if m2.Op != OpStore {
+ break
+ }
+ t2 := auxToType(m2.Aux)
+ _ = m2.Args[2]
+ op2 := m2.Args[0]
+ if op2.Op != OpOffPtr {
+ break
+ }
+ o2 := auxIntToInt64(op2.AuxInt)
+ p2 := op2.Args[0]
+ d2 := m2.Args[1]
+ m3 := m2.Args[2]
+ if m3.Op != OpStore {
+ break
+ }
+ t3 := auxToType(m3.Aux)
+ _ = m3.Args[2]
+ op3 := m3.Args[0]
+ if op3.Op != OpOffPtr || auxIntToInt64(op3.AuxInt) != 0 {
+ break
+ }
+ p3 := op3.Args[0]
+ d3 := m3.Args[1]
+ m4 := m3.Args[2]
+ if m4.Op != OpMove {
+ break
+ }
+ n := auxIntToInt64(m4.AuxInt)
+ mem := m4.Args[2]
+ p4 := m4.Args[0]
+ if !(m2.Uses == 1 && m3.Uses == 1 && m4.Uses == 1 && o2 == t3.Size() && o1-o2 == t2.Size() && n == t3.Size()+t2.Size()+t1.Size() && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && clobber(m2, m3, m4)) {
+ break
+ }
+ v.reset(OpStore)
+ v.Aux = typeToAux(t1)
+ v0 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v0.Aux = typeToAux(t2)
+ v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v1.Aux = typeToAux(t3)
+ v1.AddArg3(op3, d3, mem)
+ v0.AddArg3(op2, d2, v1)
+ v.AddArg3(op1, d1, v0)
+ return true
+ }
+ // match: (Store {t1} op1:(OffPtr [o1] p1) d1 m2:(Store {t2} op2:(OffPtr [o2] p2) d2 m3:(Store {t3} op3:(OffPtr [o3] p3) d3 m4:(Store {t4} op4:(OffPtr [0] p4) d4 m5:(Move [n] p5 _ mem)))))
+ // cond: m2.Uses == 1 && m3.Uses == 1 && m4.Uses == 1 && m5.Uses == 1 && o3 == t4.Size() && o2-o3 == t3.Size() && o1-o2 == t2.Size() && n == t4.Size() + t3.Size() + t2.Size() + t1.Size() && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && clobber(m2, m3, m4, m5)
+ // result: (Store {t1} op1 d1 (Store {t2} op2 d2 (Store {t3} op3 d3 (Store {t4} op4 d4 mem))))
+ for {
+ t1 := auxToType(v.Aux)
+ op1 := v_0
+ if op1.Op != OpOffPtr {
+ break
+ }
+ o1 := auxIntToInt64(op1.AuxInt)
+ p1 := op1.Args[0]
+ d1 := v_1
+ m2 := v_2
+ if m2.Op != OpStore {
+ break
+ }
+ t2 := auxToType(m2.Aux)
+ _ = m2.Args[2]
+ op2 := m2.Args[0]
+ if op2.Op != OpOffPtr {
+ break
+ }
+ o2 := auxIntToInt64(op2.AuxInt)
+ p2 := op2.Args[0]
+ d2 := m2.Args[1]
+ m3 := m2.Args[2]
+ if m3.Op != OpStore {
+ break
+ }
+ t3 := auxToType(m3.Aux)
+ _ = m3.Args[2]
+ op3 := m3.Args[0]
+ if op3.Op != OpOffPtr {
+ break
+ }
+ o3 := auxIntToInt64(op3.AuxInt)
+ p3 := op3.Args[0]
+ d3 := m3.Args[1]
+ m4 := m3.Args[2]
+ if m4.Op != OpStore {
+ break
+ }
+ t4 := auxToType(m4.Aux)
+ _ = m4.Args[2]
+ op4 := m4.Args[0]
+ if op4.Op != OpOffPtr || auxIntToInt64(op4.AuxInt) != 0 {
+ break
+ }
+ p4 := op4.Args[0]
+ d4 := m4.Args[1]
+ m5 := m4.Args[2]
+ if m5.Op != OpMove {
+ break
+ }
+ n := auxIntToInt64(m5.AuxInt)
+ mem := m5.Args[2]
+ p5 := m5.Args[0]
+ if !(m2.Uses == 1 && m3.Uses == 1 && m4.Uses == 1 && m5.Uses == 1 && o3 == t4.Size() && o2-o3 == t3.Size() && o1-o2 == t2.Size() && n == t4.Size()+t3.Size()+t2.Size()+t1.Size() && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && clobber(m2, m3, m4, m5)) {
+ break
+ }
+ v.reset(OpStore)
+ v.Aux = typeToAux(t1)
+ v0 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v0.Aux = typeToAux(t2)
+ v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v1.Aux = typeToAux(t3)
+ v2 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v2.Aux = typeToAux(t4)
+ v2.AddArg3(op4, d4, mem)
+ v1.AddArg3(op3, d3, v2)
+ v0.AddArg3(op2, d2, v1)
+ v.AddArg3(op1, d1, v0)
+ return true
+ }
+ // match: (Store {t1} op1:(OffPtr [o1] p1) d1 m2:(Store {t2} op2:(OffPtr [0] p2) d2 m3:(Zero [n] p3 mem)))
+ // cond: m2.Uses == 1 && m3.Uses == 1 && o1 == t2.Size() && n == t2.Size() + t1.Size() && isSamePtr(p1, p2) && isSamePtr(p2, p3) && clobber(m2, m3)
+ // result: (Store {t1} op1 d1 (Store {t2} op2 d2 mem))
+ for {
+ t1 := auxToType(v.Aux)
+ op1 := v_0
+ if op1.Op != OpOffPtr {
+ break
+ }
+ o1 := auxIntToInt64(op1.AuxInt)
+ p1 := op1.Args[0]
+ d1 := v_1
+ m2 := v_2
+ if m2.Op != OpStore {
+ break
+ }
+ t2 := auxToType(m2.Aux)
+ _ = m2.Args[2]
+ op2 := m2.Args[0]
+ if op2.Op != OpOffPtr || auxIntToInt64(op2.AuxInt) != 0 {
+ break
+ }
+ p2 := op2.Args[0]
+ d2 := m2.Args[1]
+ m3 := m2.Args[2]
+ if m3.Op != OpZero {
+ break
+ }
+ n := auxIntToInt64(m3.AuxInt)
+ mem := m3.Args[1]
+ p3 := m3.Args[0]
+ if !(m2.Uses == 1 && m3.Uses == 1 && o1 == t2.Size() && n == t2.Size()+t1.Size() && isSamePtr(p1, p2) && isSamePtr(p2, p3) && clobber(m2, m3)) {
+ break
+ }
+ v.reset(OpStore)
+ v.Aux = typeToAux(t1)
+ v0 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v0.Aux = typeToAux(t2)
+ v0.AddArg3(op2, d2, mem)
+ v.AddArg3(op1, d1, v0)
+ return true
+ }
+ // match: (Store {t1} op1:(OffPtr [o1] p1) d1 m2:(Store {t2} op2:(OffPtr [o2] p2) d2 m3:(Store {t3} op3:(OffPtr [0] p3) d3 m4:(Zero [n] p4 mem))))
+ // cond: m2.Uses == 1 && m3.Uses == 1 && m4.Uses == 1 && o2 == t3.Size() && o1-o2 == t2.Size() && n == t3.Size() + t2.Size() + t1.Size() && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && clobber(m2, m3, m4)
+ // result: (Store {t1} op1 d1 (Store {t2} op2 d2 (Store {t3} op3 d3 mem)))
+ for {
+ t1 := auxToType(v.Aux)
+ op1 := v_0
+ if op1.Op != OpOffPtr {
+ break
+ }
+ o1 := auxIntToInt64(op1.AuxInt)
+ p1 := op1.Args[0]
+ d1 := v_1
+ m2 := v_2
+ if m2.Op != OpStore {
+ break
+ }
+ t2 := auxToType(m2.Aux)
+ _ = m2.Args[2]
+ op2 := m2.Args[0]
+ if op2.Op != OpOffPtr {
+ break
+ }
+ o2 := auxIntToInt64(op2.AuxInt)
+ p2 := op2.Args[0]
+ d2 := m2.Args[1]
+ m3 := m2.Args[2]
+ if m3.Op != OpStore {
+ break
+ }
+ t3 := auxToType(m3.Aux)
+ _ = m3.Args[2]
+ op3 := m3.Args[0]
+ if op3.Op != OpOffPtr || auxIntToInt64(op3.AuxInt) != 0 {
+ break
+ }
+ p3 := op3.Args[0]
+ d3 := m3.Args[1]
+ m4 := m3.Args[2]
+ if m4.Op != OpZero {
+ break
+ }
+ n := auxIntToInt64(m4.AuxInt)
+ mem := m4.Args[1]
+ p4 := m4.Args[0]
+ if !(m2.Uses == 1 && m3.Uses == 1 && m4.Uses == 1 && o2 == t3.Size() && o1-o2 == t2.Size() && n == t3.Size()+t2.Size()+t1.Size() && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && clobber(m2, m3, m4)) {
+ break
+ }
+ v.reset(OpStore)
+ v.Aux = typeToAux(t1)
+ v0 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v0.Aux = typeToAux(t2)
+ v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v1.Aux = typeToAux(t3)
+ v1.AddArg3(op3, d3, mem)
+ v0.AddArg3(op2, d2, v1)
+ v.AddArg3(op1, d1, v0)
+ return true
+ }
+ // match: (Store {t1} op1:(OffPtr [o1] p1) d1 m2:(Store {t2} op2:(OffPtr [o2] p2) d2 m3:(Store {t3} op3:(OffPtr [o3] p3) d3 m4:(Store {t4} op4:(OffPtr [0] p4) d4 m5:(Zero [n] p5 mem)))))
+ // cond: m2.Uses == 1 && m3.Uses == 1 && m4.Uses == 1 && m5.Uses == 1 && o3 == t4.Size() && o2-o3 == t3.Size() && o1-o2 == t2.Size() && n == t4.Size() + t3.Size() + t2.Size() + t1.Size() && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && clobber(m2, m3, m4, m5)
+ // result: (Store {t1} op1 d1 (Store {t2} op2 d2 (Store {t3} op3 d3 (Store {t4} op4 d4 mem))))
+ for {
+ t1 := auxToType(v.Aux)
+ op1 := v_0
+ if op1.Op != OpOffPtr {
+ break
+ }
+ o1 := auxIntToInt64(op1.AuxInt)
+ p1 := op1.Args[0]
+ d1 := v_1
+ m2 := v_2
+ if m2.Op != OpStore {
+ break
+ }
+ t2 := auxToType(m2.Aux)
+ _ = m2.Args[2]
+ op2 := m2.Args[0]
+ if op2.Op != OpOffPtr {
+ break
+ }
+ o2 := auxIntToInt64(op2.AuxInt)
+ p2 := op2.Args[0]
+ d2 := m2.Args[1]
+ m3 := m2.Args[2]
+ if m3.Op != OpStore {
+ break
+ }
+ t3 := auxToType(m3.Aux)
+ _ = m3.Args[2]
+ op3 := m3.Args[0]
+ if op3.Op != OpOffPtr {
+ break
+ }
+ o3 := auxIntToInt64(op3.AuxInt)
+ p3 := op3.Args[0]
+ d3 := m3.Args[1]
+ m4 := m3.Args[2]
+ if m4.Op != OpStore {
+ break
+ }
+ t4 := auxToType(m4.Aux)
+ _ = m4.Args[2]
+ op4 := m4.Args[0]
+ if op4.Op != OpOffPtr || auxIntToInt64(op4.AuxInt) != 0 {
+ break
+ }
+ p4 := op4.Args[0]
+ d4 := m4.Args[1]
+ m5 := m4.Args[2]
+ if m5.Op != OpZero {
+ break
+ }
+ n := auxIntToInt64(m5.AuxInt)
+ mem := m5.Args[1]
+ p5 := m5.Args[0]
+ if !(m2.Uses == 1 && m3.Uses == 1 && m4.Uses == 1 && m5.Uses == 1 && o3 == t4.Size() && o2-o3 == t3.Size() && o1-o2 == t2.Size() && n == t4.Size()+t3.Size()+t2.Size()+t1.Size() && isSamePtr(p1, p2) && isSamePtr(p2, p3) && isSamePtr(p3, p4) && isSamePtr(p4, p5) && clobber(m2, m3, m4, m5)) {
+ break
+ }
+ v.reset(OpStore)
+ v.Aux = typeToAux(t1)
+ v0 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v0.Aux = typeToAux(t2)
+ v1 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v1.Aux = typeToAux(t3)
+ v2 := b.NewValue0(v.Pos, OpStore, types.TypeMem)
+ v2.Aux = typeToAux(t4)
+ v2.AddArg3(op4, d4, mem)
+ v1.AddArg3(op3, d3, v2)
+ v0.AddArg3(op2, d2, v1)
+ v.AddArg3(op1, d1, v0)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpStringLen(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (StringLen (StringMake _ (Const64 <t> [c])))
+ // result: (Const64 <t> [c])
+ for {
+ if v_0.Op != OpStringMake {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_1 := v_0.Args[1]
+ if v_0_1.Op != OpConst64 {
+ break
+ }
+ t := v_0_1.Type
+ c := auxIntToInt64(v_0_1.AuxInt)
+ v.reset(OpConst64)
+ v.Type = t
+ v.AuxInt = int64ToAuxInt(c)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpStringPtr(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (StringPtr (StringMake (Addr <t> {s} base) _))
+ // result: (Addr <t> {s} base)
+ for {
+ if v_0.Op != OpStringMake {
+ break
+ }
+ v_0_0 := v_0.Args[0]
+ if v_0_0.Op != OpAddr {
+ break
+ }
+ t := v_0_0.Type
+ s := auxToSym(v_0_0.Aux)
+ base := v_0_0.Args[0]
+ v.reset(OpAddr)
+ v.Type = t
+ v.Aux = symToAux(s)
+ v.AddArg(base)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpStructSelect(v *Value) bool {
+ v_0 := v.Args[0]
+ b := v.Block
+ fe := b.Func.fe
+ // match: (StructSelect (StructMake1 x))
+ // result: x
+ for {
+ if v_0.Op != OpStructMake1 {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (StructSelect [0] (StructMake2 x _))
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 || v_0.Op != OpStructMake2 {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (StructSelect [1] (StructMake2 _ x))
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 1 || v_0.Op != OpStructMake2 {
+ break
+ }
+ x := v_0.Args[1]
+ v.copyOf(x)
+ return true
+ }
+ // match: (StructSelect [0] (StructMake3 x _ _))
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 || v_0.Op != OpStructMake3 {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (StructSelect [1] (StructMake3 _ x _))
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 1 || v_0.Op != OpStructMake3 {
+ break
+ }
+ x := v_0.Args[1]
+ v.copyOf(x)
+ return true
+ }
+ // match: (StructSelect [2] (StructMake3 _ _ x))
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 2 || v_0.Op != OpStructMake3 {
+ break
+ }
+ x := v_0.Args[2]
+ v.copyOf(x)
+ return true
+ }
+ // match: (StructSelect [0] (StructMake4 x _ _ _))
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 || v_0.Op != OpStructMake4 {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (StructSelect [1] (StructMake4 _ x _ _))
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 1 || v_0.Op != OpStructMake4 {
+ break
+ }
+ x := v_0.Args[1]
+ v.copyOf(x)
+ return true
+ }
+ // match: (StructSelect [2] (StructMake4 _ _ x _))
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 2 || v_0.Op != OpStructMake4 {
+ break
+ }
+ x := v_0.Args[2]
+ v.copyOf(x)
+ return true
+ }
+ // match: (StructSelect [3] (StructMake4 _ _ _ x))
+ // result: x
+ for {
+ if auxIntToInt64(v.AuxInt) != 3 || v_0.Op != OpStructMake4 {
+ break
+ }
+ x := v_0.Args[3]
+ v.copyOf(x)
+ return true
+ }
+ // match: (StructSelect [i] x:(Load <t> ptr mem))
+ // cond: !fe.CanSSA(t)
+ // result: @x.Block (Load <v.Type> (OffPtr <v.Type.PtrTo()> [t.FieldOff(int(i))] ptr) mem)
+ for {
+ i := auxIntToInt64(v.AuxInt)
+ x := v_0
+ if x.Op != OpLoad {
+ break
+ }
+ t := x.Type
+ mem := x.Args[1]
+ ptr := x.Args[0]
+ if !(!fe.CanSSA(t)) {
+ break
+ }
+ b = x.Block
+ v0 := b.NewValue0(v.Pos, OpLoad, v.Type)
+ v.copyOf(v0)
+ v1 := b.NewValue0(v.Pos, OpOffPtr, v.Type.PtrTo())
+ v1.AuxInt = int64ToAuxInt(t.FieldOff(int(i)))
+ v1.AddArg(ptr)
+ v0.AddArg2(v1, mem)
+ return true
+ }
+ // match: (StructSelect [0] (IData x))
+ // result: (IData x)
+ for {
+ if auxIntToInt64(v.AuxInt) != 0 || v_0.Op != OpIData {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpIData)
+ v.AddArg(x)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpSub16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Sub16 (Const16 [c]) (Const16 [d]))
+ // result: (Const16 [c-d])
+ for {
+ if v_0.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_0.AuxInt)
+ if v_1.Op != OpConst16 {
+ break
+ }
+ d := auxIntToInt16(v_1.AuxInt)
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(c - d)
+ return true
+ }
+ // match: (Sub16 x (Const16 <t> [c]))
+ // cond: x.Op != OpConst16
+ // result: (Add16 (Const16 <t> [-c]) x)
+ for {
+ x := v_0
+ if v_1.Op != OpConst16 {
+ break
+ }
+ t := v_1.Type
+ c := auxIntToInt16(v_1.AuxInt)
+ if !(x.Op != OpConst16) {
+ break
+ }
+ v.reset(OpAdd16)
+ v0 := b.NewValue0(v.Pos, OpConst16, t)
+ v0.AuxInt = int16ToAuxInt(-c)
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (Sub16 <t> (Mul16 x y) (Mul16 x z))
+ // result: (Mul16 x (Sub16 <t> y z))
+ for {
+ t := v.Type
+ if v_0.Op != OpMul16 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ y := v_0_1
+ if v_1.Op != OpMul16 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if x != v_1_0 {
+ continue
+ }
+ z := v_1_1
+ v.reset(OpMul16)
+ v0 := b.NewValue0(v.Pos, OpSub16, t)
+ v0.AddArg2(y, z)
+ v.AddArg2(x, v0)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Sub16 x x)
+ // result: (Const16 [0])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(0)
+ return true
+ }
+ // match: (Sub16 (Add16 x y) x)
+ // result: y
+ for {
+ if v_0.Op != OpAdd16 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ y := v_0_1
+ if x != v_1 {
+ continue
+ }
+ v.copyOf(y)
+ return true
+ }
+ break
+ }
+ // match: (Sub16 (Add16 x y) y)
+ // result: x
+ for {
+ if v_0.Op != OpAdd16 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ y := v_0_1
+ if y != v_1 {
+ continue
+ }
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (Sub16 (Sub16 x y) x)
+ // result: (Neg16 y)
+ for {
+ if v_0.Op != OpSub16 {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpNeg16)
+ v.AddArg(y)
+ return true
+ }
+ // match: (Sub16 x (Add16 x y))
+ // result: (Neg16 y)
+ for {
+ x := v_0
+ if v_1.Op != OpAdd16 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ if x != v_1_0 {
+ continue
+ }
+ y := v_1_1
+ v.reset(OpNeg16)
+ v.AddArg(y)
+ return true
+ }
+ break
+ }
+ // match: (Sub16 x (Sub16 i:(Const16 <t>) z))
+ // cond: (z.Op != OpConst16 && x.Op != OpConst16)
+ // result: (Sub16 (Add16 <t> x z) i)
+ for {
+ x := v_0
+ if v_1.Op != OpSub16 {
+ break
+ }
+ z := v_1.Args[1]
+ i := v_1.Args[0]
+ if i.Op != OpConst16 {
+ break
+ }
+ t := i.Type
+ if !(z.Op != OpConst16 && x.Op != OpConst16) {
+ break
+ }
+ v.reset(OpSub16)
+ v0 := b.NewValue0(v.Pos, OpAdd16, t)
+ v0.AddArg2(x, z)
+ v.AddArg2(v0, i)
+ return true
+ }
+ // match: (Sub16 x (Add16 z i:(Const16 <t>)))
+ // cond: (z.Op != OpConst16 && x.Op != OpConst16)
+ // result: (Sub16 (Sub16 <t> x z) i)
+ for {
+ x := v_0
+ if v_1.Op != OpAdd16 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ z := v_1_0
+ i := v_1_1
+ if i.Op != OpConst16 {
+ continue
+ }
+ t := i.Type
+ if !(z.Op != OpConst16 && x.Op != OpConst16) {
+ continue
+ }
+ v.reset(OpSub16)
+ v0 := b.NewValue0(v.Pos, OpSub16, t)
+ v0.AddArg2(x, z)
+ v.AddArg2(v0, i)
+ return true
+ }
+ break
+ }
+ // match: (Sub16 (Sub16 i:(Const16 <t>) z) x)
+ // cond: (z.Op != OpConst16 && x.Op != OpConst16)
+ // result: (Sub16 i (Add16 <t> z x))
+ for {
+ if v_0.Op != OpSub16 {
+ break
+ }
+ z := v_0.Args[1]
+ i := v_0.Args[0]
+ if i.Op != OpConst16 {
+ break
+ }
+ t := i.Type
+ x := v_1
+ if !(z.Op != OpConst16 && x.Op != OpConst16) {
+ break
+ }
+ v.reset(OpSub16)
+ v0 := b.NewValue0(v.Pos, OpAdd16, t)
+ v0.AddArg2(z, x)
+ v.AddArg2(i, v0)
+ return true
+ }
+ // match: (Sub16 (Add16 z i:(Const16 <t>)) x)
+ // cond: (z.Op != OpConst16 && x.Op != OpConst16)
+ // result: (Add16 i (Sub16 <t> z x))
+ for {
+ if v_0.Op != OpAdd16 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ z := v_0_0
+ i := v_0_1
+ if i.Op != OpConst16 {
+ continue
+ }
+ t := i.Type
+ x := v_1
+ if !(z.Op != OpConst16 && x.Op != OpConst16) {
+ continue
+ }
+ v.reset(OpAdd16)
+ v0 := b.NewValue0(v.Pos, OpSub16, t)
+ v0.AddArg2(z, x)
+ v.AddArg2(i, v0)
+ return true
+ }
+ break
+ }
+ // match: (Sub16 (Const16 <t> [c]) (Sub16 (Const16 <t> [d]) x))
+ // result: (Add16 (Const16 <t> [c-d]) x)
+ for {
+ if v_0.Op != OpConst16 {
+ break
+ }
+ t := v_0.Type
+ c := auxIntToInt16(v_0.AuxInt)
+ if v_1.Op != OpSub16 {
+ break
+ }
+ x := v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst16 || v_1_0.Type != t {
+ break
+ }
+ d := auxIntToInt16(v_1_0.AuxInt)
+ v.reset(OpAdd16)
+ v0 := b.NewValue0(v.Pos, OpConst16, t)
+ v0.AuxInt = int16ToAuxInt(c - d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (Sub16 (Const16 <t> [c]) (Add16 (Const16 <t> [d]) x))
+ // result: (Sub16 (Const16 <t> [c-d]) x)
+ for {
+ if v_0.Op != OpConst16 {
+ break
+ }
+ t := v_0.Type
+ c := auxIntToInt16(v_0.AuxInt)
+ if v_1.Op != OpAdd16 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst16 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt16(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpSub16)
+ v0 := b.NewValue0(v.Pos, OpConst16, t)
+ v0.AuxInt = int16ToAuxInt(c - d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpSub32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Sub32 (Const32 [c]) (Const32 [d]))
+ // result: (Const32 [c-d])
+ for {
+ if v_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpConst32 {
+ break
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(c - d)
+ return true
+ }
+ // match: (Sub32 x (Const32 <t> [c]))
+ // cond: x.Op != OpConst32
+ // result: (Add32 (Const32 <t> [-c]) x)
+ for {
+ x := v_0
+ if v_1.Op != OpConst32 {
+ break
+ }
+ t := v_1.Type
+ c := auxIntToInt32(v_1.AuxInt)
+ if !(x.Op != OpConst32) {
+ break
+ }
+ v.reset(OpAdd32)
+ v0 := b.NewValue0(v.Pos, OpConst32, t)
+ v0.AuxInt = int32ToAuxInt(-c)
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (Sub32 <t> (Mul32 x y) (Mul32 x z))
+ // result: (Mul32 x (Sub32 <t> y z))
+ for {
+ t := v.Type
+ if v_0.Op != OpMul32 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ y := v_0_1
+ if v_1.Op != OpMul32 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if x != v_1_0 {
+ continue
+ }
+ z := v_1_1
+ v.reset(OpMul32)
+ v0 := b.NewValue0(v.Pos, OpSub32, t)
+ v0.AddArg2(y, z)
+ v.AddArg2(x, v0)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Sub32 x x)
+ // result: (Const32 [0])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (Sub32 (Add32 x y) x)
+ // result: y
+ for {
+ if v_0.Op != OpAdd32 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ y := v_0_1
+ if x != v_1 {
+ continue
+ }
+ v.copyOf(y)
+ return true
+ }
+ break
+ }
+ // match: (Sub32 (Add32 x y) y)
+ // result: x
+ for {
+ if v_0.Op != OpAdd32 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ y := v_0_1
+ if y != v_1 {
+ continue
+ }
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (Sub32 (Sub32 x y) x)
+ // result: (Neg32 y)
+ for {
+ if v_0.Op != OpSub32 {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpNeg32)
+ v.AddArg(y)
+ return true
+ }
+ // match: (Sub32 x (Add32 x y))
+ // result: (Neg32 y)
+ for {
+ x := v_0
+ if v_1.Op != OpAdd32 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ if x != v_1_0 {
+ continue
+ }
+ y := v_1_1
+ v.reset(OpNeg32)
+ v.AddArg(y)
+ return true
+ }
+ break
+ }
+ // match: (Sub32 x (Sub32 i:(Const32 <t>) z))
+ // cond: (z.Op != OpConst32 && x.Op != OpConst32)
+ // result: (Sub32 (Add32 <t> x z) i)
+ for {
+ x := v_0
+ if v_1.Op != OpSub32 {
+ break
+ }
+ z := v_1.Args[1]
+ i := v_1.Args[0]
+ if i.Op != OpConst32 {
+ break
+ }
+ t := i.Type
+ if !(z.Op != OpConst32 && x.Op != OpConst32) {
+ break
+ }
+ v.reset(OpSub32)
+ v0 := b.NewValue0(v.Pos, OpAdd32, t)
+ v0.AddArg2(x, z)
+ v.AddArg2(v0, i)
+ return true
+ }
+ // match: (Sub32 x (Add32 z i:(Const32 <t>)))
+ // cond: (z.Op != OpConst32 && x.Op != OpConst32)
+ // result: (Sub32 (Sub32 <t> x z) i)
+ for {
+ x := v_0
+ if v_1.Op != OpAdd32 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ z := v_1_0
+ i := v_1_1
+ if i.Op != OpConst32 {
+ continue
+ }
+ t := i.Type
+ if !(z.Op != OpConst32 && x.Op != OpConst32) {
+ continue
+ }
+ v.reset(OpSub32)
+ v0 := b.NewValue0(v.Pos, OpSub32, t)
+ v0.AddArg2(x, z)
+ v.AddArg2(v0, i)
+ return true
+ }
+ break
+ }
+ // match: (Sub32 (Sub32 i:(Const32 <t>) z) x)
+ // cond: (z.Op != OpConst32 && x.Op != OpConst32)
+ // result: (Sub32 i (Add32 <t> z x))
+ for {
+ if v_0.Op != OpSub32 {
+ break
+ }
+ z := v_0.Args[1]
+ i := v_0.Args[0]
+ if i.Op != OpConst32 {
+ break
+ }
+ t := i.Type
+ x := v_1
+ if !(z.Op != OpConst32 && x.Op != OpConst32) {
+ break
+ }
+ v.reset(OpSub32)
+ v0 := b.NewValue0(v.Pos, OpAdd32, t)
+ v0.AddArg2(z, x)
+ v.AddArg2(i, v0)
+ return true
+ }
+ // match: (Sub32 (Add32 z i:(Const32 <t>)) x)
+ // cond: (z.Op != OpConst32 && x.Op != OpConst32)
+ // result: (Add32 i (Sub32 <t> z x))
+ for {
+ if v_0.Op != OpAdd32 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ z := v_0_0
+ i := v_0_1
+ if i.Op != OpConst32 {
+ continue
+ }
+ t := i.Type
+ x := v_1
+ if !(z.Op != OpConst32 && x.Op != OpConst32) {
+ continue
+ }
+ v.reset(OpAdd32)
+ v0 := b.NewValue0(v.Pos, OpSub32, t)
+ v0.AddArg2(z, x)
+ v.AddArg2(i, v0)
+ return true
+ }
+ break
+ }
+ // match: (Sub32 (Const32 <t> [c]) (Sub32 (Const32 <t> [d]) x))
+ // result: (Add32 (Const32 <t> [c-d]) x)
+ for {
+ if v_0.Op != OpConst32 {
+ break
+ }
+ t := v_0.Type
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpSub32 {
+ break
+ }
+ x := v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst32 || v_1_0.Type != t {
+ break
+ }
+ d := auxIntToInt32(v_1_0.AuxInt)
+ v.reset(OpAdd32)
+ v0 := b.NewValue0(v.Pos, OpConst32, t)
+ v0.AuxInt = int32ToAuxInt(c - d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (Sub32 (Const32 <t> [c]) (Add32 (Const32 <t> [d]) x))
+ // result: (Sub32 (Const32 <t> [c-d]) x)
+ for {
+ if v_0.Op != OpConst32 {
+ break
+ }
+ t := v_0.Type
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpAdd32 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst32 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt32(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpSub32)
+ v0 := b.NewValue0(v.Pos, OpConst32, t)
+ v0.AuxInt = int32ToAuxInt(c - d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpSub32F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Sub32F (Const32F [c]) (Const32F [d]))
+ // cond: c-d == c-d
+ // result: (Const32F [c-d])
+ for {
+ if v_0.Op != OpConst32F {
+ break
+ }
+ c := auxIntToFloat32(v_0.AuxInt)
+ if v_1.Op != OpConst32F {
+ break
+ }
+ d := auxIntToFloat32(v_1.AuxInt)
+ if !(c-d == c-d) {
+ break
+ }
+ v.reset(OpConst32F)
+ v.AuxInt = float32ToAuxInt(c - d)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpSub64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Sub64 (Const64 [c]) (Const64 [d]))
+ // result: (Const64 [c-d])
+ for {
+ if v_0.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ break
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(c - d)
+ return true
+ }
+ // match: (Sub64 x (Const64 <t> [c]))
+ // cond: x.Op != OpConst64
+ // result: (Add64 (Const64 <t> [-c]) x)
+ for {
+ x := v_0
+ if v_1.Op != OpConst64 {
+ break
+ }
+ t := v_1.Type
+ c := auxIntToInt64(v_1.AuxInt)
+ if !(x.Op != OpConst64) {
+ break
+ }
+ v.reset(OpAdd64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(-c)
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (Sub64 <t> (Mul64 x y) (Mul64 x z))
+ // result: (Mul64 x (Sub64 <t> y z))
+ for {
+ t := v.Type
+ if v_0.Op != OpMul64 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ y := v_0_1
+ if v_1.Op != OpMul64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if x != v_1_0 {
+ continue
+ }
+ z := v_1_1
+ v.reset(OpMul64)
+ v0 := b.NewValue0(v.Pos, OpSub64, t)
+ v0.AddArg2(y, z)
+ v.AddArg2(x, v0)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Sub64 x x)
+ // result: (Const64 [0])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (Sub64 (Add64 x y) x)
+ // result: y
+ for {
+ if v_0.Op != OpAdd64 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ y := v_0_1
+ if x != v_1 {
+ continue
+ }
+ v.copyOf(y)
+ return true
+ }
+ break
+ }
+ // match: (Sub64 (Add64 x y) y)
+ // result: x
+ for {
+ if v_0.Op != OpAdd64 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ y := v_0_1
+ if y != v_1 {
+ continue
+ }
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (Sub64 (Sub64 x y) x)
+ // result: (Neg64 y)
+ for {
+ if v_0.Op != OpSub64 {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpNeg64)
+ v.AddArg(y)
+ return true
+ }
+ // match: (Sub64 x (Add64 x y))
+ // result: (Neg64 y)
+ for {
+ x := v_0
+ if v_1.Op != OpAdd64 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ if x != v_1_0 {
+ continue
+ }
+ y := v_1_1
+ v.reset(OpNeg64)
+ v.AddArg(y)
+ return true
+ }
+ break
+ }
+ // match: (Sub64 x (Sub64 i:(Const64 <t>) z))
+ // cond: (z.Op != OpConst64 && x.Op != OpConst64)
+ // result: (Sub64 (Add64 <t> x z) i)
+ for {
+ x := v_0
+ if v_1.Op != OpSub64 {
+ break
+ }
+ z := v_1.Args[1]
+ i := v_1.Args[0]
+ if i.Op != OpConst64 {
+ break
+ }
+ t := i.Type
+ if !(z.Op != OpConst64 && x.Op != OpConst64) {
+ break
+ }
+ v.reset(OpSub64)
+ v0 := b.NewValue0(v.Pos, OpAdd64, t)
+ v0.AddArg2(x, z)
+ v.AddArg2(v0, i)
+ return true
+ }
+ // match: (Sub64 x (Add64 z i:(Const64 <t>)))
+ // cond: (z.Op != OpConst64 && x.Op != OpConst64)
+ // result: (Sub64 (Sub64 <t> x z) i)
+ for {
+ x := v_0
+ if v_1.Op != OpAdd64 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ z := v_1_0
+ i := v_1_1
+ if i.Op != OpConst64 {
+ continue
+ }
+ t := i.Type
+ if !(z.Op != OpConst64 && x.Op != OpConst64) {
+ continue
+ }
+ v.reset(OpSub64)
+ v0 := b.NewValue0(v.Pos, OpSub64, t)
+ v0.AddArg2(x, z)
+ v.AddArg2(v0, i)
+ return true
+ }
+ break
+ }
+ // match: (Sub64 (Sub64 i:(Const64 <t>) z) x)
+ // cond: (z.Op != OpConst64 && x.Op != OpConst64)
+ // result: (Sub64 i (Add64 <t> z x))
+ for {
+ if v_0.Op != OpSub64 {
+ break
+ }
+ z := v_0.Args[1]
+ i := v_0.Args[0]
+ if i.Op != OpConst64 {
+ break
+ }
+ t := i.Type
+ x := v_1
+ if !(z.Op != OpConst64 && x.Op != OpConst64) {
+ break
+ }
+ v.reset(OpSub64)
+ v0 := b.NewValue0(v.Pos, OpAdd64, t)
+ v0.AddArg2(z, x)
+ v.AddArg2(i, v0)
+ return true
+ }
+ // match: (Sub64 (Add64 z i:(Const64 <t>)) x)
+ // cond: (z.Op != OpConst64 && x.Op != OpConst64)
+ // result: (Add64 i (Sub64 <t> z x))
+ for {
+ if v_0.Op != OpAdd64 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ z := v_0_0
+ i := v_0_1
+ if i.Op != OpConst64 {
+ continue
+ }
+ t := i.Type
+ x := v_1
+ if !(z.Op != OpConst64 && x.Op != OpConst64) {
+ continue
+ }
+ v.reset(OpAdd64)
+ v0 := b.NewValue0(v.Pos, OpSub64, t)
+ v0.AddArg2(z, x)
+ v.AddArg2(i, v0)
+ return true
+ }
+ break
+ }
+ // match: (Sub64 (Const64 <t> [c]) (Sub64 (Const64 <t> [d]) x))
+ // result: (Add64 (Const64 <t> [c-d]) x)
+ for {
+ if v_0.Op != OpConst64 {
+ break
+ }
+ t := v_0.Type
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpSub64 {
+ break
+ }
+ x := v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst64 || v_1_0.Type != t {
+ break
+ }
+ d := auxIntToInt64(v_1_0.AuxInt)
+ v.reset(OpAdd64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(c - d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (Sub64 (Const64 <t> [c]) (Add64 (Const64 <t> [d]) x))
+ // result: (Sub64 (Const64 <t> [c-d]) x)
+ for {
+ if v_0.Op != OpConst64 {
+ break
+ }
+ t := v_0.Type
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpAdd64 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst64 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt64(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpSub64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(c - d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpSub64F(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ // match: (Sub64F (Const64F [c]) (Const64F [d]))
+ // cond: c-d == c-d
+ // result: (Const64F [c-d])
+ for {
+ if v_0.Op != OpConst64F {
+ break
+ }
+ c := auxIntToFloat64(v_0.AuxInt)
+ if v_1.Op != OpConst64F {
+ break
+ }
+ d := auxIntToFloat64(v_1.AuxInt)
+ if !(c-d == c-d) {
+ break
+ }
+ v.reset(OpConst64F)
+ v.AuxInt = float64ToAuxInt(c - d)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpSub8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Sub8 (Const8 [c]) (Const8 [d]))
+ // result: (Const8 [c-d])
+ for {
+ if v_0.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ if v_1.Op != OpConst8 {
+ break
+ }
+ d := auxIntToInt8(v_1.AuxInt)
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(c - d)
+ return true
+ }
+ // match: (Sub8 x (Const8 <t> [c]))
+ // cond: x.Op != OpConst8
+ // result: (Add8 (Const8 <t> [-c]) x)
+ for {
+ x := v_0
+ if v_1.Op != OpConst8 {
+ break
+ }
+ t := v_1.Type
+ c := auxIntToInt8(v_1.AuxInt)
+ if !(x.Op != OpConst8) {
+ break
+ }
+ v.reset(OpAdd8)
+ v0 := b.NewValue0(v.Pos, OpConst8, t)
+ v0.AuxInt = int8ToAuxInt(-c)
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (Sub8 <t> (Mul8 x y) (Mul8 x z))
+ // result: (Mul8 x (Sub8 <t> y z))
+ for {
+ t := v.Type
+ if v_0.Op != OpMul8 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ y := v_0_1
+ if v_1.Op != OpMul8 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if x != v_1_0 {
+ continue
+ }
+ z := v_1_1
+ v.reset(OpMul8)
+ v0 := b.NewValue0(v.Pos, OpSub8, t)
+ v0.AddArg2(y, z)
+ v.AddArg2(x, v0)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Sub8 x x)
+ // result: (Const8 [0])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(0)
+ return true
+ }
+ // match: (Sub8 (Add8 x y) x)
+ // result: y
+ for {
+ if v_0.Op != OpAdd8 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ y := v_0_1
+ if x != v_1 {
+ continue
+ }
+ v.copyOf(y)
+ return true
+ }
+ break
+ }
+ // match: (Sub8 (Add8 x y) y)
+ // result: x
+ for {
+ if v_0.Op != OpAdd8 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ x := v_0_0
+ y := v_0_1
+ if y != v_1 {
+ continue
+ }
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (Sub8 (Sub8 x y) x)
+ // result: (Neg8 y)
+ for {
+ if v_0.Op != OpSub8 {
+ break
+ }
+ y := v_0.Args[1]
+ x := v_0.Args[0]
+ if x != v_1 {
+ break
+ }
+ v.reset(OpNeg8)
+ v.AddArg(y)
+ return true
+ }
+ // match: (Sub8 x (Add8 x y))
+ // result: (Neg8 y)
+ for {
+ x := v_0
+ if v_1.Op != OpAdd8 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ if x != v_1_0 {
+ continue
+ }
+ y := v_1_1
+ v.reset(OpNeg8)
+ v.AddArg(y)
+ return true
+ }
+ break
+ }
+ // match: (Sub8 x (Sub8 i:(Const8 <t>) z))
+ // cond: (z.Op != OpConst8 && x.Op != OpConst8)
+ // result: (Sub8 (Add8 <t> x z) i)
+ for {
+ x := v_0
+ if v_1.Op != OpSub8 {
+ break
+ }
+ z := v_1.Args[1]
+ i := v_1.Args[0]
+ if i.Op != OpConst8 {
+ break
+ }
+ t := i.Type
+ if !(z.Op != OpConst8 && x.Op != OpConst8) {
+ break
+ }
+ v.reset(OpSub8)
+ v0 := b.NewValue0(v.Pos, OpAdd8, t)
+ v0.AddArg2(x, z)
+ v.AddArg2(v0, i)
+ return true
+ }
+ // match: (Sub8 x (Add8 z i:(Const8 <t>)))
+ // cond: (z.Op != OpConst8 && x.Op != OpConst8)
+ // result: (Sub8 (Sub8 <t> x z) i)
+ for {
+ x := v_0
+ if v_1.Op != OpAdd8 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ z := v_1_0
+ i := v_1_1
+ if i.Op != OpConst8 {
+ continue
+ }
+ t := i.Type
+ if !(z.Op != OpConst8 && x.Op != OpConst8) {
+ continue
+ }
+ v.reset(OpSub8)
+ v0 := b.NewValue0(v.Pos, OpSub8, t)
+ v0.AddArg2(x, z)
+ v.AddArg2(v0, i)
+ return true
+ }
+ break
+ }
+ // match: (Sub8 (Sub8 i:(Const8 <t>) z) x)
+ // cond: (z.Op != OpConst8 && x.Op != OpConst8)
+ // result: (Sub8 i (Add8 <t> z x))
+ for {
+ if v_0.Op != OpSub8 {
+ break
+ }
+ z := v_0.Args[1]
+ i := v_0.Args[0]
+ if i.Op != OpConst8 {
+ break
+ }
+ t := i.Type
+ x := v_1
+ if !(z.Op != OpConst8 && x.Op != OpConst8) {
+ break
+ }
+ v.reset(OpSub8)
+ v0 := b.NewValue0(v.Pos, OpAdd8, t)
+ v0.AddArg2(z, x)
+ v.AddArg2(i, v0)
+ return true
+ }
+ // match: (Sub8 (Add8 z i:(Const8 <t>)) x)
+ // cond: (z.Op != OpConst8 && x.Op != OpConst8)
+ // result: (Add8 i (Sub8 <t> z x))
+ for {
+ if v_0.Op != OpAdd8 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ z := v_0_0
+ i := v_0_1
+ if i.Op != OpConst8 {
+ continue
+ }
+ t := i.Type
+ x := v_1
+ if !(z.Op != OpConst8 && x.Op != OpConst8) {
+ continue
+ }
+ v.reset(OpAdd8)
+ v0 := b.NewValue0(v.Pos, OpSub8, t)
+ v0.AddArg2(z, x)
+ v.AddArg2(i, v0)
+ return true
+ }
+ break
+ }
+ // match: (Sub8 (Const8 <t> [c]) (Sub8 (Const8 <t> [d]) x))
+ // result: (Add8 (Const8 <t> [c-d]) x)
+ for {
+ if v_0.Op != OpConst8 {
+ break
+ }
+ t := v_0.Type
+ c := auxIntToInt8(v_0.AuxInt)
+ if v_1.Op != OpSub8 {
+ break
+ }
+ x := v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ if v_1_0.Op != OpConst8 || v_1_0.Type != t {
+ break
+ }
+ d := auxIntToInt8(v_1_0.AuxInt)
+ v.reset(OpAdd8)
+ v0 := b.NewValue0(v.Pos, OpConst8, t)
+ v0.AuxInt = int8ToAuxInt(c - d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ // match: (Sub8 (Const8 <t> [c]) (Add8 (Const8 <t> [d]) x))
+ // result: (Sub8 (Const8 <t> [c-d]) x)
+ for {
+ if v_0.Op != OpConst8 {
+ break
+ }
+ t := v_0.Type
+ c := auxIntToInt8(v_0.AuxInt)
+ if v_1.Op != OpAdd8 {
+ break
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_1_0, v_1_1 = _i0+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst8 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt8(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpSub8)
+ v0 := b.NewValue0(v.Pos, OpConst8, t)
+ v0.AuxInt = int8ToAuxInt(c - d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpTrunc16to8(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Trunc16to8 (Const16 [c]))
+ // result: (Const8 [int8(c)])
+ for {
+ if v_0.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_0.AuxInt)
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(int8(c))
+ return true
+ }
+ // match: (Trunc16to8 (ZeroExt8to16 x))
+ // result: x
+ for {
+ if v_0.Op != OpZeroExt8to16 {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (Trunc16to8 (SignExt8to16 x))
+ // result: x
+ for {
+ if v_0.Op != OpSignExt8to16 {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (Trunc16to8 (And16 (Const16 [y]) x))
+ // cond: y&0xFF == 0xFF
+ // result: (Trunc16to8 x)
+ for {
+ if v_0.Op != OpAnd16 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpConst16 {
+ continue
+ }
+ y := auxIntToInt16(v_0_0.AuxInt)
+ x := v_0_1
+ if !(y&0xFF == 0xFF) {
+ continue
+ }
+ v.reset(OpTrunc16to8)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpTrunc32to16(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Trunc32to16 (Const32 [c]))
+ // result: (Const16 [int16(c)])
+ for {
+ if v_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(int16(c))
+ return true
+ }
+ // match: (Trunc32to16 (ZeroExt8to32 x))
+ // result: (ZeroExt8to16 x)
+ for {
+ if v_0.Op != OpZeroExt8to32 {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpZeroExt8to16)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Trunc32to16 (ZeroExt16to32 x))
+ // result: x
+ for {
+ if v_0.Op != OpZeroExt16to32 {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (Trunc32to16 (SignExt8to32 x))
+ // result: (SignExt8to16 x)
+ for {
+ if v_0.Op != OpSignExt8to32 {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpSignExt8to16)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Trunc32to16 (SignExt16to32 x))
+ // result: x
+ for {
+ if v_0.Op != OpSignExt16to32 {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (Trunc32to16 (And32 (Const32 [y]) x))
+ // cond: y&0xFFFF == 0xFFFF
+ // result: (Trunc32to16 x)
+ for {
+ if v_0.Op != OpAnd32 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpConst32 {
+ continue
+ }
+ y := auxIntToInt32(v_0_0.AuxInt)
+ x := v_0_1
+ if !(y&0xFFFF == 0xFFFF) {
+ continue
+ }
+ v.reset(OpTrunc32to16)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpTrunc32to8(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Trunc32to8 (Const32 [c]))
+ // result: (Const8 [int8(c)])
+ for {
+ if v_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(int8(c))
+ return true
+ }
+ // match: (Trunc32to8 (ZeroExt8to32 x))
+ // result: x
+ for {
+ if v_0.Op != OpZeroExt8to32 {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (Trunc32to8 (SignExt8to32 x))
+ // result: x
+ for {
+ if v_0.Op != OpSignExt8to32 {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (Trunc32to8 (And32 (Const32 [y]) x))
+ // cond: y&0xFF == 0xFF
+ // result: (Trunc32to8 x)
+ for {
+ if v_0.Op != OpAnd32 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpConst32 {
+ continue
+ }
+ y := auxIntToInt32(v_0_0.AuxInt)
+ x := v_0_1
+ if !(y&0xFF == 0xFF) {
+ continue
+ }
+ v.reset(OpTrunc32to8)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpTrunc64to16(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Trunc64to16 (Const64 [c]))
+ // result: (Const16 [int16(c)])
+ for {
+ if v_0.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(int16(c))
+ return true
+ }
+ // match: (Trunc64to16 (ZeroExt8to64 x))
+ // result: (ZeroExt8to16 x)
+ for {
+ if v_0.Op != OpZeroExt8to64 {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpZeroExt8to16)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Trunc64to16 (ZeroExt16to64 x))
+ // result: x
+ for {
+ if v_0.Op != OpZeroExt16to64 {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (Trunc64to16 (SignExt8to64 x))
+ // result: (SignExt8to16 x)
+ for {
+ if v_0.Op != OpSignExt8to64 {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpSignExt8to16)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Trunc64to16 (SignExt16to64 x))
+ // result: x
+ for {
+ if v_0.Op != OpSignExt16to64 {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (Trunc64to16 (And64 (Const64 [y]) x))
+ // cond: y&0xFFFF == 0xFFFF
+ // result: (Trunc64to16 x)
+ for {
+ if v_0.Op != OpAnd64 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpConst64 {
+ continue
+ }
+ y := auxIntToInt64(v_0_0.AuxInt)
+ x := v_0_1
+ if !(y&0xFFFF == 0xFFFF) {
+ continue
+ }
+ v.reset(OpTrunc64to16)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpTrunc64to32(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Trunc64to32 (Const64 [c]))
+ // result: (Const32 [int32(c)])
+ for {
+ if v_0.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(int32(c))
+ return true
+ }
+ // match: (Trunc64to32 (ZeroExt8to64 x))
+ // result: (ZeroExt8to32 x)
+ for {
+ if v_0.Op != OpZeroExt8to64 {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpZeroExt8to32)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Trunc64to32 (ZeroExt16to64 x))
+ // result: (ZeroExt16to32 x)
+ for {
+ if v_0.Op != OpZeroExt16to64 {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpZeroExt16to32)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Trunc64to32 (ZeroExt32to64 x))
+ // result: x
+ for {
+ if v_0.Op != OpZeroExt32to64 {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (Trunc64to32 (SignExt8to64 x))
+ // result: (SignExt8to32 x)
+ for {
+ if v_0.Op != OpSignExt8to64 {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpSignExt8to32)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Trunc64to32 (SignExt16to64 x))
+ // result: (SignExt16to32 x)
+ for {
+ if v_0.Op != OpSignExt16to64 {
+ break
+ }
+ x := v_0.Args[0]
+ v.reset(OpSignExt16to32)
+ v.AddArg(x)
+ return true
+ }
+ // match: (Trunc64to32 (SignExt32to64 x))
+ // result: x
+ for {
+ if v_0.Op != OpSignExt32to64 {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (Trunc64to32 (And64 (Const64 [y]) x))
+ // cond: y&0xFFFFFFFF == 0xFFFFFFFF
+ // result: (Trunc64to32 x)
+ for {
+ if v_0.Op != OpAnd64 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpConst64 {
+ continue
+ }
+ y := auxIntToInt64(v_0_0.AuxInt)
+ x := v_0_1
+ if !(y&0xFFFFFFFF == 0xFFFFFFFF) {
+ continue
+ }
+ v.reset(OpTrunc64to32)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpTrunc64to8(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (Trunc64to8 (Const64 [c]))
+ // result: (Const8 [int8(c)])
+ for {
+ if v_0.Op != OpConst64 {
+ break
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(int8(c))
+ return true
+ }
+ // match: (Trunc64to8 (ZeroExt8to64 x))
+ // result: x
+ for {
+ if v_0.Op != OpZeroExt8to64 {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (Trunc64to8 (SignExt8to64 x))
+ // result: x
+ for {
+ if v_0.Op != OpSignExt8to64 {
+ break
+ }
+ x := v_0.Args[0]
+ v.copyOf(x)
+ return true
+ }
+ // match: (Trunc64to8 (And64 (Const64 [y]) x))
+ // cond: y&0xFF == 0xFF
+ // result: (Trunc64to8 x)
+ for {
+ if v_0.Op != OpAnd64 {
+ break
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i0 := 0; _i0 <= 1; _i0, v_0_0, v_0_1 = _i0+1, v_0_1, v_0_0 {
+ if v_0_0.Op != OpConst64 {
+ continue
+ }
+ y := auxIntToInt64(v_0_0.AuxInt)
+ x := v_0_1
+ if !(y&0xFF == 0xFF) {
+ continue
+ }
+ v.reset(OpTrunc64to8)
+ v.AddArg(x)
+ return true
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpXor16(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Xor16 (Const16 [c]) (Const16 [d]))
+ // result: (Const16 [c^d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst16 {
+ continue
+ }
+ c := auxIntToInt16(v_0.AuxInt)
+ if v_1.Op != OpConst16 {
+ continue
+ }
+ d := auxIntToInt16(v_1.AuxInt)
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(c ^ d)
+ return true
+ }
+ break
+ }
+ // match: (Xor16 x x)
+ // result: (Const16 [0])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(0)
+ return true
+ }
+ // match: (Xor16 (Const16 [0]) x)
+ // result: x
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst16 || auxIntToInt16(v_0.AuxInt) != 0 {
+ continue
+ }
+ x := v_1
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (Xor16 x (Xor16 x y))
+ // result: y
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpXor16 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if x != v_1_0 {
+ continue
+ }
+ y := v_1_1
+ v.copyOf(y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Xor16 (Xor16 i:(Const16 <t>) z) x)
+ // cond: (z.Op != OpConst16 && x.Op != OpConst16)
+ // result: (Xor16 i (Xor16 <t> z x))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpXor16 {
+ continue
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ i := v_0_0
+ if i.Op != OpConst16 {
+ continue
+ }
+ t := i.Type
+ z := v_0_1
+ x := v_1
+ if !(z.Op != OpConst16 && x.Op != OpConst16) {
+ continue
+ }
+ v.reset(OpXor16)
+ v0 := b.NewValue0(v.Pos, OpXor16, t)
+ v0.AddArg2(z, x)
+ v.AddArg2(i, v0)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Xor16 (Const16 <t> [c]) (Xor16 (Const16 <t> [d]) x))
+ // result: (Xor16 (Const16 <t> [c^d]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst16 {
+ continue
+ }
+ t := v_0.Type
+ c := auxIntToInt16(v_0.AuxInt)
+ if v_1.Op != OpXor16 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst16 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt16(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpXor16)
+ v0 := b.NewValue0(v.Pos, OpConst16, t)
+ v0.AuxInt = int16ToAuxInt(c ^ d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpXor32(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Xor32 (Const32 [c]) (Const32 [d]))
+ // result: (Const32 [c^d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32 {
+ continue
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpConst32 {
+ continue
+ }
+ d := auxIntToInt32(v_1.AuxInt)
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(c ^ d)
+ return true
+ }
+ break
+ }
+ // match: (Xor32 x x)
+ // result: (Const32 [0])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(0)
+ return true
+ }
+ // match: (Xor32 (Const32 [0]) x)
+ // result: x
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32 || auxIntToInt32(v_0.AuxInt) != 0 {
+ continue
+ }
+ x := v_1
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (Xor32 x (Xor32 x y))
+ // result: y
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpXor32 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if x != v_1_0 {
+ continue
+ }
+ y := v_1_1
+ v.copyOf(y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Xor32 (Xor32 i:(Const32 <t>) z) x)
+ // cond: (z.Op != OpConst32 && x.Op != OpConst32)
+ // result: (Xor32 i (Xor32 <t> z x))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpXor32 {
+ continue
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ i := v_0_0
+ if i.Op != OpConst32 {
+ continue
+ }
+ t := i.Type
+ z := v_0_1
+ x := v_1
+ if !(z.Op != OpConst32 && x.Op != OpConst32) {
+ continue
+ }
+ v.reset(OpXor32)
+ v0 := b.NewValue0(v.Pos, OpXor32, t)
+ v0.AddArg2(z, x)
+ v.AddArg2(i, v0)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Xor32 (Const32 <t> [c]) (Xor32 (Const32 <t> [d]) x))
+ // result: (Xor32 (Const32 <t> [c^d]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst32 {
+ continue
+ }
+ t := v_0.Type
+ c := auxIntToInt32(v_0.AuxInt)
+ if v_1.Op != OpXor32 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst32 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt32(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpXor32)
+ v0 := b.NewValue0(v.Pos, OpConst32, t)
+ v0.AuxInt = int32ToAuxInt(c ^ d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpXor64(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Xor64 (Const64 [c]) (Const64 [d]))
+ // result: (Const64 [c^d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64 {
+ continue
+ }
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpConst64 {
+ continue
+ }
+ d := auxIntToInt64(v_1.AuxInt)
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(c ^ d)
+ return true
+ }
+ break
+ }
+ // match: (Xor64 x x)
+ // result: (Const64 [0])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(0)
+ return true
+ }
+ // match: (Xor64 (Const64 [0]) x)
+ // result: x
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64 || auxIntToInt64(v_0.AuxInt) != 0 {
+ continue
+ }
+ x := v_1
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (Xor64 x (Xor64 x y))
+ // result: y
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpXor64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if x != v_1_0 {
+ continue
+ }
+ y := v_1_1
+ v.copyOf(y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Xor64 (Xor64 i:(Const64 <t>) z) x)
+ // cond: (z.Op != OpConst64 && x.Op != OpConst64)
+ // result: (Xor64 i (Xor64 <t> z x))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpXor64 {
+ continue
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ i := v_0_0
+ if i.Op != OpConst64 {
+ continue
+ }
+ t := i.Type
+ z := v_0_1
+ x := v_1
+ if !(z.Op != OpConst64 && x.Op != OpConst64) {
+ continue
+ }
+ v.reset(OpXor64)
+ v0 := b.NewValue0(v.Pos, OpXor64, t)
+ v0.AddArg2(z, x)
+ v.AddArg2(i, v0)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Xor64 (Const64 <t> [c]) (Xor64 (Const64 <t> [d]) x))
+ // result: (Xor64 (Const64 <t> [c^d]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst64 {
+ continue
+ }
+ t := v_0.Type
+ c := auxIntToInt64(v_0.AuxInt)
+ if v_1.Op != OpXor64 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst64 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt64(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpXor64)
+ v0 := b.NewValue0(v.Pos, OpConst64, t)
+ v0.AuxInt = int64ToAuxInt(c ^ d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpXor8(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Xor8 (Const8 [c]) (Const8 [d]))
+ // result: (Const8 [c^d])
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst8 {
+ continue
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ if v_1.Op != OpConst8 {
+ continue
+ }
+ d := auxIntToInt8(v_1.AuxInt)
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(c ^ d)
+ return true
+ }
+ break
+ }
+ // match: (Xor8 x x)
+ // result: (Const8 [0])
+ for {
+ x := v_0
+ if x != v_1 {
+ break
+ }
+ v.reset(OpConst8)
+ v.AuxInt = int8ToAuxInt(0)
+ return true
+ }
+ // match: (Xor8 (Const8 [0]) x)
+ // result: x
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst8 || auxIntToInt8(v_0.AuxInt) != 0 {
+ continue
+ }
+ x := v_1
+ v.copyOf(x)
+ return true
+ }
+ break
+ }
+ // match: (Xor8 x (Xor8 x y))
+ // result: y
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ x := v_0
+ if v_1.Op != OpXor8 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if x != v_1_0 {
+ continue
+ }
+ y := v_1_1
+ v.copyOf(y)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Xor8 (Xor8 i:(Const8 <t>) z) x)
+ // cond: (z.Op != OpConst8 && x.Op != OpConst8)
+ // result: (Xor8 i (Xor8 <t> z x))
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpXor8 {
+ continue
+ }
+ _ = v_0.Args[1]
+ v_0_0 := v_0.Args[0]
+ v_0_1 := v_0.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_0_0, v_0_1 = _i1+1, v_0_1, v_0_0 {
+ i := v_0_0
+ if i.Op != OpConst8 {
+ continue
+ }
+ t := i.Type
+ z := v_0_1
+ x := v_1
+ if !(z.Op != OpConst8 && x.Op != OpConst8) {
+ continue
+ }
+ v.reset(OpXor8)
+ v0 := b.NewValue0(v.Pos, OpXor8, t)
+ v0.AddArg2(z, x)
+ v.AddArg2(i, v0)
+ return true
+ }
+ }
+ break
+ }
+ // match: (Xor8 (Const8 <t> [c]) (Xor8 (Const8 <t> [d]) x))
+ // result: (Xor8 (Const8 <t> [c^d]) x)
+ for {
+ for _i0 := 0; _i0 <= 1; _i0, v_0, v_1 = _i0+1, v_1, v_0 {
+ if v_0.Op != OpConst8 {
+ continue
+ }
+ t := v_0.Type
+ c := auxIntToInt8(v_0.AuxInt)
+ if v_1.Op != OpXor8 {
+ continue
+ }
+ _ = v_1.Args[1]
+ v_1_0 := v_1.Args[0]
+ v_1_1 := v_1.Args[1]
+ for _i1 := 0; _i1 <= 1; _i1, v_1_0, v_1_1 = _i1+1, v_1_1, v_1_0 {
+ if v_1_0.Op != OpConst8 || v_1_0.Type != t {
+ continue
+ }
+ d := auxIntToInt8(v_1_0.AuxInt)
+ x := v_1_1
+ v.reset(OpXor8)
+ v0 := b.NewValue0(v.Pos, OpConst8, t)
+ v0.AuxInt = int8ToAuxInt(c ^ d)
+ v.AddArg2(v0, x)
+ return true
+ }
+ }
+ break
+ }
+ return false
+}
+func rewriteValuegeneric_OpZero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+ b := v.Block
+ // match: (Zero (SelectN [0] call:(StaticLECall _ _)) mem:(SelectN [1] call))
+ // cond: isSameCall(call.Aux, "runtime.newobject")
+ // result: mem
+ for {
+ if v_0.Op != OpSelectN || auxIntToInt64(v_0.AuxInt) != 0 {
+ break
+ }
+ call := v_0.Args[0]
+ if call.Op != OpStaticLECall || len(call.Args) != 2 {
+ break
+ }
+ mem := v_1
+ if mem.Op != OpSelectN || auxIntToInt64(mem.AuxInt) != 1 || call != mem.Args[0] || !(isSameCall(call.Aux, "runtime.newobject")) {
+ break
+ }
+ v.copyOf(mem)
+ return true
+ }
+ // match: (Zero {t1} [n] p1 store:(Store {t2} (OffPtr [o2] p2) _ mem))
+ // cond: isSamePtr(p1, p2) && store.Uses == 1 && n >= o2 + t2.Size() && clobber(store)
+ // result: (Zero {t1} [n] p1 mem)
+ for {
+ n := auxIntToInt64(v.AuxInt)
+ t1 := auxToType(v.Aux)
+ p1 := v_0
+ store := v_1
+ if store.Op != OpStore {
+ break
+ }
+ t2 := auxToType(store.Aux)
+ mem := store.Args[2]
+ store_0 := store.Args[0]
+ if store_0.Op != OpOffPtr {
+ break
+ }
+ o2 := auxIntToInt64(store_0.AuxInt)
+ p2 := store_0.Args[0]
+ if !(isSamePtr(p1, p2) && store.Uses == 1 && n >= o2+t2.Size() && clobber(store)) {
+ break
+ }
+ v.reset(OpZero)
+ v.AuxInt = int64ToAuxInt(n)
+ v.Aux = typeToAux(t1)
+ v.AddArg2(p1, mem)
+ return true
+ }
+ // match: (Zero {t} [n] dst1 move:(Move {t} [n] dst2 _ mem))
+ // cond: move.Uses == 1 && isSamePtr(dst1, dst2) && clobber(move)
+ // result: (Zero {t} [n] dst1 mem)
+ for {
+ n := auxIntToInt64(v.AuxInt)
+ t := auxToType(v.Aux)
+ dst1 := v_0
+ move := v_1
+ if move.Op != OpMove || auxIntToInt64(move.AuxInt) != n || auxToType(move.Aux) != t {
+ break
+ }
+ mem := move.Args[2]
+ dst2 := move.Args[0]
+ if !(move.Uses == 1 && isSamePtr(dst1, dst2) && clobber(move)) {
+ break
+ }
+ v.reset(OpZero)
+ v.AuxInt = int64ToAuxInt(n)
+ v.Aux = typeToAux(t)
+ v.AddArg2(dst1, mem)
+ return true
+ }
+ // match: (Zero {t} [n] dst1 vardef:(VarDef {x} move:(Move {t} [n] dst2 _ mem)))
+ // cond: move.Uses == 1 && vardef.Uses == 1 && isSamePtr(dst1, dst2) && clobber(move, vardef)
+ // result: (Zero {t} [n] dst1 (VarDef {x} mem))
+ for {
+ n := auxIntToInt64(v.AuxInt)
+ t := auxToType(v.Aux)
+ dst1 := v_0
+ vardef := v_1
+ if vardef.Op != OpVarDef {
+ break
+ }
+ x := auxToSym(vardef.Aux)
+ move := vardef.Args[0]
+ if move.Op != OpMove || auxIntToInt64(move.AuxInt) != n || auxToType(move.Aux) != t {
+ break
+ }
+ mem := move.Args[2]
+ dst2 := move.Args[0]
+ if !(move.Uses == 1 && vardef.Uses == 1 && isSamePtr(dst1, dst2) && clobber(move, vardef)) {
+ break
+ }
+ v.reset(OpZero)
+ v.AuxInt = int64ToAuxInt(n)
+ v.Aux = typeToAux(t)
+ v0 := b.NewValue0(v.Pos, OpVarDef, types.TypeMem)
+ v0.Aux = symToAux(x)
+ v0.AddArg(mem)
+ v.AddArg2(dst1, v0)
+ return true
+ }
+ // match: (Zero {t} [s] dst1 zero:(Zero {t} [s] dst2 _))
+ // cond: isSamePtr(dst1, dst2)
+ // result: zero
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ t := auxToType(v.Aux)
+ dst1 := v_0
+ zero := v_1
+ if zero.Op != OpZero || auxIntToInt64(zero.AuxInt) != s || auxToType(zero.Aux) != t {
+ break
+ }
+ dst2 := zero.Args[0]
+ if !(isSamePtr(dst1, dst2)) {
+ break
+ }
+ v.copyOf(zero)
+ return true
+ }
+ // match: (Zero {t} [s] dst1 vardef:(VarDef (Zero {t} [s] dst2 _)))
+ // cond: isSamePtr(dst1, dst2)
+ // result: vardef
+ for {
+ s := auxIntToInt64(v.AuxInt)
+ t := auxToType(v.Aux)
+ dst1 := v_0
+ vardef := v_1
+ if vardef.Op != OpVarDef {
+ break
+ }
+ vardef_0 := vardef.Args[0]
+ if vardef_0.Op != OpZero || auxIntToInt64(vardef_0.AuxInt) != s || auxToType(vardef_0.Aux) != t {
+ break
+ }
+ dst2 := vardef_0.Args[0]
+ if !(isSamePtr(dst1, dst2)) {
+ break
+ }
+ v.copyOf(vardef)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpZeroExt16to32(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ZeroExt16to32 (Const16 [c]))
+ // result: (Const32 [int32(uint16(c))])
+ for {
+ if v_0.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_0.AuxInt)
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(int32(uint16(c)))
+ return true
+ }
+ // match: (ZeroExt16to32 (Trunc32to16 x:(Rsh32Ux64 _ (Const64 [s]))))
+ // cond: s >= 16
+ // result: x
+ for {
+ if v_0.Op != OpTrunc32to16 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpRsh32Ux64 {
+ break
+ }
+ _ = x.Args[1]
+ x_1 := x.Args[1]
+ if x_1.Op != OpConst64 {
+ break
+ }
+ s := auxIntToInt64(x_1.AuxInt)
+ if !(s >= 16) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpZeroExt16to64(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ZeroExt16to64 (Const16 [c]))
+ // result: (Const64 [int64(uint16(c))])
+ for {
+ if v_0.Op != OpConst16 {
+ break
+ }
+ c := auxIntToInt16(v_0.AuxInt)
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(int64(uint16(c)))
+ return true
+ }
+ // match: (ZeroExt16to64 (Trunc64to16 x:(Rsh64Ux64 _ (Const64 [s]))))
+ // cond: s >= 48
+ // result: x
+ for {
+ if v_0.Op != OpTrunc64to16 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpRsh64Ux64 {
+ break
+ }
+ _ = x.Args[1]
+ x_1 := x.Args[1]
+ if x_1.Op != OpConst64 {
+ break
+ }
+ s := auxIntToInt64(x_1.AuxInt)
+ if !(s >= 48) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpZeroExt32to64(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ZeroExt32to64 (Const32 [c]))
+ // result: (Const64 [int64(uint32(c))])
+ for {
+ if v_0.Op != OpConst32 {
+ break
+ }
+ c := auxIntToInt32(v_0.AuxInt)
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(int64(uint32(c)))
+ return true
+ }
+ // match: (ZeroExt32to64 (Trunc64to32 x:(Rsh64Ux64 _ (Const64 [s]))))
+ // cond: s >= 32
+ // result: x
+ for {
+ if v_0.Op != OpTrunc64to32 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpRsh64Ux64 {
+ break
+ }
+ _ = x.Args[1]
+ x_1 := x.Args[1]
+ if x_1.Op != OpConst64 {
+ break
+ }
+ s := auxIntToInt64(x_1.AuxInt)
+ if !(s >= 32) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpZeroExt8to16(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ZeroExt8to16 (Const8 [c]))
+ // result: (Const16 [int16( uint8(c))])
+ for {
+ if v_0.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ v.reset(OpConst16)
+ v.AuxInt = int16ToAuxInt(int16(uint8(c)))
+ return true
+ }
+ // match: (ZeroExt8to16 (Trunc16to8 x:(Rsh16Ux64 _ (Const64 [s]))))
+ // cond: s >= 8
+ // result: x
+ for {
+ if v_0.Op != OpTrunc16to8 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpRsh16Ux64 {
+ break
+ }
+ _ = x.Args[1]
+ x_1 := x.Args[1]
+ if x_1.Op != OpConst64 {
+ break
+ }
+ s := auxIntToInt64(x_1.AuxInt)
+ if !(s >= 8) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpZeroExt8to32(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ZeroExt8to32 (Const8 [c]))
+ // result: (Const32 [int32( uint8(c))])
+ for {
+ if v_0.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ v.reset(OpConst32)
+ v.AuxInt = int32ToAuxInt(int32(uint8(c)))
+ return true
+ }
+ // match: (ZeroExt8to32 (Trunc32to8 x:(Rsh32Ux64 _ (Const64 [s]))))
+ // cond: s >= 24
+ // result: x
+ for {
+ if v_0.Op != OpTrunc32to8 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpRsh32Ux64 {
+ break
+ }
+ _ = x.Args[1]
+ x_1 := x.Args[1]
+ if x_1.Op != OpConst64 {
+ break
+ }
+ s := auxIntToInt64(x_1.AuxInt)
+ if !(s >= 24) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteValuegeneric_OpZeroExt8to64(v *Value) bool {
+ v_0 := v.Args[0]
+ // match: (ZeroExt8to64 (Const8 [c]))
+ // result: (Const64 [int64( uint8(c))])
+ for {
+ if v_0.Op != OpConst8 {
+ break
+ }
+ c := auxIntToInt8(v_0.AuxInt)
+ v.reset(OpConst64)
+ v.AuxInt = int64ToAuxInt(int64(uint8(c)))
+ return true
+ }
+ // match: (ZeroExt8to64 (Trunc64to8 x:(Rsh64Ux64 _ (Const64 [s]))))
+ // cond: s >= 56
+ // result: x
+ for {
+ if v_0.Op != OpTrunc64to8 {
+ break
+ }
+ x := v_0.Args[0]
+ if x.Op != OpRsh64Ux64 {
+ break
+ }
+ _ = x.Args[1]
+ x_1 := x.Args[1]
+ if x_1.Op != OpConst64 {
+ break
+ }
+ s := auxIntToInt64(x_1.AuxInt)
+ if !(s >= 56) {
+ break
+ }
+ v.copyOf(x)
+ return true
+ }
+ return false
+}
+func rewriteBlockgeneric(b *Block) bool {
+ switch b.Kind {
+ case BlockIf:
+ // match: (If (Not cond) yes no)
+ // result: (If cond no yes)
+ for b.Controls[0].Op == OpNot {
+ v_0 := b.Controls[0]
+ cond := v_0.Args[0]
+ b.resetWithControl(BlockIf, cond)
+ b.swapSuccessors()
+ return true
+ }
+ // match: (If (ConstBool [c]) yes no)
+ // cond: c
+ // result: (First yes no)
+ for b.Controls[0].Op == OpConstBool {
+ v_0 := b.Controls[0]
+ c := auxIntToBool(v_0.AuxInt)
+ if !(c) {
+ break
+ }
+ b.Reset(BlockFirst)
+ return true
+ }
+ // match: (If (ConstBool [c]) yes no)
+ // cond: !c
+ // result: (First no yes)
+ for b.Controls[0].Op == OpConstBool {
+ v_0 := b.Controls[0]
+ c := auxIntToBool(v_0.AuxInt)
+ if !(!c) {
+ break
+ }
+ b.Reset(BlockFirst)
+ b.swapSuccessors()
+ return true
+ }
+ }
+ return false
+}
diff --git a/src/cmd/compile/internal/ssa/schedule.go b/src/cmd/compile/internal/ssa/schedule.go
new file mode 100644
index 0000000..c5130b2
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/schedule.go
@@ -0,0 +1,524 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/compile/internal/types"
+ "container/heap"
+ "sort"
+)
+
+const (
+ ScorePhi = iota // towards top of block
+ ScoreArg
+ ScoreNilCheck
+ ScoreReadTuple
+ ScoreVarDef
+ ScoreMemory
+ ScoreReadFlags
+ ScoreDefault
+ ScoreFlags
+ ScoreControl // towards bottom of block
+)
+
+type ValHeap struct {
+ a []*Value
+ score []int8
+}
+
+func (h ValHeap) Len() int { return len(h.a) }
+func (h ValHeap) Swap(i, j int) { a := h.a; a[i], a[j] = a[j], a[i] }
+
+func (h *ValHeap) Push(x interface{}) {
+ // Push and Pop use pointer receivers because they modify the slice's length,
+ // not just its contents.
+ v := x.(*Value)
+ h.a = append(h.a, v)
+}
+func (h *ValHeap) Pop() interface{} {
+ old := h.a
+ n := len(old)
+ x := old[n-1]
+ h.a = old[0 : n-1]
+ return x
+}
+func (h ValHeap) Less(i, j int) bool {
+ x := h.a[i]
+ y := h.a[j]
+ sx := h.score[x.ID]
+ sy := h.score[y.ID]
+ if c := sx - sy; c != 0 {
+ return c > 0 // higher score comes later.
+ }
+ if x.Pos != y.Pos { // Favor in-order line stepping
+ return x.Pos.After(y.Pos)
+ }
+ if x.Op != OpPhi {
+ if c := len(x.Args) - len(y.Args); c != 0 {
+ return c < 0 // smaller args comes later
+ }
+ }
+ if c := x.Uses - y.Uses; c != 0 {
+ return c < 0 // smaller uses come later
+ }
+ // These comparisons are fairly arbitrary.
+ // The goal here is stability in the face
+ // of unrelated changes elsewhere in the compiler.
+ if c := x.AuxInt - y.AuxInt; c != 0 {
+ return c > 0
+ }
+ if cmp := x.Type.Compare(y.Type); cmp != types.CMPeq {
+ return cmp == types.CMPgt
+ }
+ return x.ID > y.ID
+}
+
+func (op Op) isLoweredGetClosurePtr() bool {
+ switch op {
+ case OpAMD64LoweredGetClosurePtr, OpPPC64LoweredGetClosurePtr, OpARMLoweredGetClosurePtr, OpARM64LoweredGetClosurePtr,
+ Op386LoweredGetClosurePtr, OpMIPS64LoweredGetClosurePtr, OpS390XLoweredGetClosurePtr, OpMIPSLoweredGetClosurePtr,
+ OpRISCV64LoweredGetClosurePtr, OpWasmLoweredGetClosurePtr:
+ return true
+ }
+ return false
+}
+
+// Schedule the Values in each Block. After this phase returns, the
+// order of b.Values matters and is the order in which those values
+// will appear in the assembly output. For now it generates a
+// reasonable valid schedule using a priority queue. TODO(khr):
+// schedule smarter.
+func schedule(f *Func) {
+ // For each value, the number of times it is used in the block
+ // by values that have not been scheduled yet.
+ uses := make([]int32, f.NumValues())
+
+ // reusable priority queue
+ priq := new(ValHeap)
+
+ // "priority" for a value
+ score := make([]int8, f.NumValues())
+
+ // scheduling order. We queue values in this list in reverse order.
+ // A constant bound allows this to be stack-allocated. 64 is
+ // enough to cover almost every schedule call.
+ order := make([]*Value, 0, 64)
+
+ // maps mem values to the next live memory value
+ nextMem := make([]*Value, f.NumValues())
+ // additional pretend arguments for each Value. Used to enforce load/store ordering.
+ additionalArgs := make([][]*Value, f.NumValues())
+
+ for _, b := range f.Blocks {
+ // Compute score. Larger numbers are scheduled closer to the end of the block.
+ for _, v := range b.Values {
+ switch {
+ case v.Op.isLoweredGetClosurePtr():
+ // We also score GetLoweredClosurePtr as early as possible to ensure that the
+ // context register is not stomped. GetLoweredClosurePtr should only appear
+ // in the entry block where there are no phi functions, so there is no
+ // conflict or ambiguity here.
+ if b != f.Entry {
+ f.Fatalf("LoweredGetClosurePtr appeared outside of entry block, b=%s", b.String())
+ }
+ score[v.ID] = ScorePhi
+ case v.Op == OpAMD64LoweredNilCheck || v.Op == OpPPC64LoweredNilCheck ||
+ v.Op == OpARMLoweredNilCheck || v.Op == OpARM64LoweredNilCheck ||
+ v.Op == Op386LoweredNilCheck || v.Op == OpMIPS64LoweredNilCheck ||
+ v.Op == OpS390XLoweredNilCheck || v.Op == OpMIPSLoweredNilCheck ||
+ v.Op == OpRISCV64LoweredNilCheck || v.Op == OpWasmLoweredNilCheck:
+ // Nil checks must come before loads from the same address.
+ score[v.ID] = ScoreNilCheck
+ case v.Op == OpPhi:
+ // We want all the phis first.
+ score[v.ID] = ScorePhi
+ case v.Op == OpVarDef:
+ // We want all the vardefs next.
+ score[v.ID] = ScoreVarDef
+ case v.Op == OpArgIntReg || v.Op == OpArgFloatReg:
+ // In-register args must be scheduled as early as possible to ensure that the
+ // context register is not stomped. They should only appear in the entry block.
+ if b != f.Entry {
+ f.Fatalf("%s appeared outside of entry block, b=%s", v.Op, b.String())
+ }
+ score[v.ID] = ScorePhi
+ case v.Op == OpArg:
+ // We want all the args as early as possible, for better debugging.
+ score[v.ID] = ScoreArg
+ case v.Type.IsMemory():
+ // Schedule stores as early as possible. This tends to
+ // reduce register pressure. It also helps make sure
+ // VARDEF ops are scheduled before the corresponding LEA.
+ score[v.ID] = ScoreMemory
+ case v.Op == OpSelect0 || v.Op == OpSelect1 || v.Op == OpSelectN:
+ // Schedule the pseudo-op of reading part of a tuple
+ // immediately after the tuple-generating op, since
+ // this value is already live. This also removes its
+ // false dependency on the other part of the tuple.
+ // Also ensures tuple is never spilled.
+ score[v.ID] = ScoreReadTuple
+ case v.Type.IsFlags() || v.Type.IsTuple() && v.Type.FieldType(1).IsFlags():
+ // Schedule flag register generation as late as possible.
+ // This makes sure that we only have one live flags
+ // value at a time.
+ score[v.ID] = ScoreFlags
+ default:
+ score[v.ID] = ScoreDefault
+ // If we're reading flags, schedule earlier to keep flag lifetime short.
+ for _, a := range v.Args {
+ if a.Type.IsFlags() {
+ score[v.ID] = ScoreReadFlags
+ }
+ }
+ }
+ }
+ }
+
+ for _, b := range f.Blocks {
+ // Find store chain for block.
+ // Store chains for different blocks overwrite each other, so
+ // the calculated store chain is good only for this block.
+ for _, v := range b.Values {
+ if v.Op != OpPhi && v.Type.IsMemory() {
+ for _, w := range v.Args {
+ if w.Type.IsMemory() {
+ nextMem[w.ID] = v
+ }
+ }
+ }
+ }
+
+ // Compute uses.
+ for _, v := range b.Values {
+ if v.Op == OpPhi {
+ // If a value is used by a phi, it does not induce
+ // a scheduling edge because that use is from the
+ // previous iteration.
+ continue
+ }
+ for _, w := range v.Args {
+ if w.Block == b {
+ uses[w.ID]++
+ }
+ // Any load must come before the following store.
+ if !v.Type.IsMemory() && w.Type.IsMemory() {
+ // v is a load.
+ s := nextMem[w.ID]
+ if s == nil || s.Block != b {
+ continue
+ }
+ additionalArgs[s.ID] = append(additionalArgs[s.ID], v)
+ uses[v.ID]++
+ }
+ }
+ }
+
+ for _, c := range b.ControlValues() {
+ // Force the control values to be scheduled at the end,
+ // unless they are phi values (which must be first).
+ // OpArg also goes first -- if it is stack it register allocates
+ // to a LoadReg, if it is register it is from the beginning anyway.
+ if score[c.ID] == ScorePhi || score[c.ID] == ScoreArg {
+ continue
+ }
+ score[c.ID] = ScoreControl
+
+ // Schedule values dependent on the control values at the end.
+ // This reduces the number of register spills. We don't find
+ // all values that depend on the controls, just values with a
+ // direct dependency. This is cheaper and in testing there
+ // was no difference in the number of spills.
+ for _, v := range b.Values {
+ if v.Op != OpPhi {
+ for _, a := range v.Args {
+ if a == c {
+ score[v.ID] = ScoreControl
+ }
+ }
+ }
+ }
+
+ }
+
+ // To put things into a priority queue
+ // The values that should come last are least.
+ priq.score = score
+ priq.a = priq.a[:0]
+
+ // Initialize priority queue with schedulable values.
+ for _, v := range b.Values {
+ if uses[v.ID] == 0 {
+ heap.Push(priq, v)
+ }
+ }
+
+ // Schedule highest priority value, update use counts, repeat.
+ order = order[:0]
+ tuples := make(map[ID][]*Value)
+ for priq.Len() > 0 {
+ // Find highest priority schedulable value.
+ // Note that schedule is assembled backwards.
+
+ v := heap.Pop(priq).(*Value)
+
+ // Add it to the schedule.
+ // Do not emit tuple-reading ops until we're ready to emit the tuple-generating op.
+ //TODO: maybe remove ReadTuple score above, if it does not help on performance
+ switch {
+ case v.Op == OpSelect0:
+ if tuples[v.Args[0].ID] == nil {
+ tuples[v.Args[0].ID] = make([]*Value, 2)
+ }
+ tuples[v.Args[0].ID][0] = v
+ case v.Op == OpSelect1:
+ if tuples[v.Args[0].ID] == nil {
+ tuples[v.Args[0].ID] = make([]*Value, 2)
+ }
+ tuples[v.Args[0].ID][1] = v
+ case v.Op == OpSelectN:
+ if tuples[v.Args[0].ID] == nil {
+ tuples[v.Args[0].ID] = make([]*Value, v.Args[0].Type.NumFields())
+ }
+ tuples[v.Args[0].ID][v.AuxInt] = v
+ case v.Type.IsResults() && tuples[v.ID] != nil:
+ tup := tuples[v.ID]
+ for i := len(tup) - 1; i >= 0; i-- {
+ if tup[i] != nil {
+ order = append(order, tup[i])
+ }
+ }
+ delete(tuples, v.ID)
+ order = append(order, v)
+ case v.Type.IsTuple() && tuples[v.ID] != nil:
+ if tuples[v.ID][1] != nil {
+ order = append(order, tuples[v.ID][1])
+ }
+ if tuples[v.ID][0] != nil {
+ order = append(order, tuples[v.ID][0])
+ }
+ delete(tuples, v.ID)
+ fallthrough
+ default:
+ order = append(order, v)
+ }
+
+ // Update use counts of arguments.
+ for _, w := range v.Args {
+ if w.Block != b {
+ continue
+ }
+ uses[w.ID]--
+ if uses[w.ID] == 0 {
+ // All uses scheduled, w is now schedulable.
+ heap.Push(priq, w)
+ }
+ }
+ for _, w := range additionalArgs[v.ID] {
+ uses[w.ID]--
+ if uses[w.ID] == 0 {
+ // All uses scheduled, w is now schedulable.
+ heap.Push(priq, w)
+ }
+ }
+ }
+ if len(order) != len(b.Values) {
+ f.Fatalf("schedule does not include all values in block %s", b)
+ }
+ for i := 0; i < len(b.Values); i++ {
+ b.Values[i] = order[len(b.Values)-1-i]
+ }
+ }
+
+ f.scheduled = true
+}
+
+// storeOrder orders values with respect to stores. That is,
+// if v transitively depends on store s, v is ordered after s,
+// otherwise v is ordered before s.
+// Specifically, values are ordered like
+// store1
+// NilCheck that depends on store1
+// other values that depends on store1
+// store2
+// NilCheck that depends on store2
+// other values that depends on store2
+// ...
+// The order of non-store and non-NilCheck values are undefined
+// (not necessarily dependency order). This should be cheaper
+// than a full scheduling as done above.
+// Note that simple dependency order won't work: there is no
+// dependency between NilChecks and values like IsNonNil.
+// Auxiliary data structures are passed in as arguments, so
+// that they can be allocated in the caller and be reused.
+// This function takes care of reset them.
+func storeOrder(values []*Value, sset *sparseSet, storeNumber []int32) []*Value {
+ if len(values) == 0 {
+ return values
+ }
+
+ f := values[0].Block.Func
+
+ // find all stores
+
+ // Members of values that are store values.
+ // A constant bound allows this to be stack-allocated. 64 is
+ // enough to cover almost every storeOrder call.
+ stores := make([]*Value, 0, 64)
+ hasNilCheck := false
+ sset.clear() // sset is the set of stores that are used in other values
+ for _, v := range values {
+ if v.Type.IsMemory() {
+ stores = append(stores, v)
+ if v.Op == OpInitMem || v.Op == OpPhi {
+ continue
+ }
+ sset.add(v.MemoryArg().ID) // record that v's memory arg is used
+ }
+ if v.Op == OpNilCheck {
+ hasNilCheck = true
+ }
+ }
+ if len(stores) == 0 || !hasNilCheck && f.pass.name == "nilcheckelim" {
+ // there is no store, the order does not matter
+ return values
+ }
+
+ // find last store, which is the one that is not used by other stores
+ var last *Value
+ for _, v := range stores {
+ if !sset.contains(v.ID) {
+ if last != nil {
+ f.Fatalf("two stores live simultaneously: %v and %v", v, last)
+ }
+ last = v
+ }
+ }
+
+ // We assign a store number to each value. Store number is the
+ // index of the latest store that this value transitively depends.
+ // The i-th store in the current block gets store number 3*i. A nil
+ // check that depends on the i-th store gets store number 3*i+1.
+ // Other values that depends on the i-th store gets store number 3*i+2.
+ // Special case: 0 -- unassigned, 1 or 2 -- the latest store it depends
+ // is in the previous block (or no store at all, e.g. value is Const).
+ // First we assign the number to all stores by walking back the store chain,
+ // then assign the number to other values in DFS order.
+ count := make([]int32, 3*(len(stores)+1))
+ sset.clear() // reuse sparse set to ensure that a value is pushed to stack only once
+ for n, w := len(stores), last; n > 0; n-- {
+ storeNumber[w.ID] = int32(3 * n)
+ count[3*n]++
+ sset.add(w.ID)
+ if w.Op == OpInitMem || w.Op == OpPhi {
+ if n != 1 {
+ f.Fatalf("store order is wrong: there are stores before %v", w)
+ }
+ break
+ }
+ w = w.MemoryArg()
+ }
+ var stack []*Value
+ for _, v := range values {
+ if sset.contains(v.ID) {
+ // in sset means v is a store, or already pushed to stack, or already assigned a store number
+ continue
+ }
+ stack = append(stack, v)
+ sset.add(v.ID)
+
+ for len(stack) > 0 {
+ w := stack[len(stack)-1]
+ if storeNumber[w.ID] != 0 {
+ stack = stack[:len(stack)-1]
+ continue
+ }
+ if w.Op == OpPhi {
+ // Phi value doesn't depend on store in the current block.
+ // Do this early to avoid dependency cycle.
+ storeNumber[w.ID] = 2
+ count[2]++
+ stack = stack[:len(stack)-1]
+ continue
+ }
+
+ max := int32(0) // latest store dependency
+ argsdone := true
+ for _, a := range w.Args {
+ if a.Block != w.Block {
+ continue
+ }
+ if !sset.contains(a.ID) {
+ stack = append(stack, a)
+ sset.add(a.ID)
+ argsdone = false
+ break
+ }
+ if storeNumber[a.ID]/3 > max {
+ max = storeNumber[a.ID] / 3
+ }
+ }
+ if !argsdone {
+ continue
+ }
+
+ n := 3*max + 2
+ if w.Op == OpNilCheck {
+ n = 3*max + 1
+ }
+ storeNumber[w.ID] = n
+ count[n]++
+ stack = stack[:len(stack)-1]
+ }
+ }
+
+ // convert count to prefix sum of counts: count'[i] = sum_{j<=i} count[i]
+ for i := range count {
+ if i == 0 {
+ continue
+ }
+ count[i] += count[i-1]
+ }
+ if count[len(count)-1] != int32(len(values)) {
+ f.Fatalf("storeOrder: value is missing, total count = %d, values = %v", count[len(count)-1], values)
+ }
+
+ // place values in count-indexed bins, which are in the desired store order
+ order := make([]*Value, len(values))
+ for _, v := range values {
+ s := storeNumber[v.ID]
+ order[count[s-1]] = v
+ count[s-1]++
+ }
+
+ // Order nil checks in source order. We want the first in source order to trigger.
+ // If two are on the same line, we don't really care which happens first.
+ // See issue 18169.
+ if hasNilCheck {
+ start := -1
+ for i, v := range order {
+ if v.Op == OpNilCheck {
+ if start == -1 {
+ start = i
+ }
+ } else {
+ if start != -1 {
+ sort.Sort(bySourcePos(order[start:i]))
+ start = -1
+ }
+ }
+ }
+ if start != -1 {
+ sort.Sort(bySourcePos(order[start:]))
+ }
+ }
+
+ return order
+}
+
+type bySourcePos []*Value
+
+func (s bySourcePos) Len() int { return len(s) }
+func (s bySourcePos) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+func (s bySourcePos) Less(i, j int) bool { return s[i].Pos.Before(s[j].Pos) }
diff --git a/src/cmd/compile/internal/ssa/schedule_test.go b/src/cmd/compile/internal/ssa/schedule_test.go
new file mode 100644
index 0000000..f7177dd
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/schedule_test.go
@@ -0,0 +1,101 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/compile/internal/types"
+ "testing"
+)
+
+func TestSchedule(t *testing.T) {
+ c := testConfig(t)
+ cases := []fun{
+ c.Fun("entry",
+ Bloc("entry",
+ Valu("mem0", OpInitMem, types.TypeMem, 0, nil),
+ Valu("ptr", OpConst64, c.config.Types.Int64, 0xABCD, nil),
+ Valu("v", OpConst64, c.config.Types.Int64, 12, nil),
+ Valu("mem1", OpStore, types.TypeMem, 0, c.config.Types.Int64, "ptr", "v", "mem0"),
+ Valu("mem2", OpStore, types.TypeMem, 0, c.config.Types.Int64, "ptr", "v", "mem1"),
+ Valu("mem3", OpStore, types.TypeMem, 0, c.config.Types.Int64, "ptr", "sum", "mem2"),
+ Valu("l1", OpLoad, c.config.Types.Int64, 0, nil, "ptr", "mem1"),
+ Valu("l2", OpLoad, c.config.Types.Int64, 0, nil, "ptr", "mem2"),
+ Valu("sum", OpAdd64, c.config.Types.Int64, 0, nil, "l1", "l2"),
+ Goto("exit")),
+ Bloc("exit",
+ Exit("mem3"))),
+ }
+ for _, c := range cases {
+ schedule(c.f)
+ if !isSingleLiveMem(c.f) {
+ t.Error("single-live-mem restriction not enforced by schedule for func:")
+ printFunc(c.f)
+ }
+ }
+}
+
+func isSingleLiveMem(f *Func) bool {
+ for _, b := range f.Blocks {
+ var liveMem *Value
+ for _, v := range b.Values {
+ for _, w := range v.Args {
+ if w.Type.IsMemory() {
+ if liveMem == nil {
+ liveMem = w
+ continue
+ }
+ if w != liveMem {
+ return false
+ }
+ }
+ }
+ if v.Type.IsMemory() {
+ liveMem = v
+ }
+ }
+ }
+ return true
+}
+
+func TestStoreOrder(t *testing.T) {
+ // In the function below, v2 depends on v3 and v4, v4 depends on v3, and v3 depends on store v5.
+ // storeOrder did not handle this case correctly.
+ c := testConfig(t)
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("mem0", OpInitMem, types.TypeMem, 0, nil),
+ Valu("a", OpAdd64, c.config.Types.Int64, 0, nil, "b", "c"), // v2
+ Valu("b", OpLoad, c.config.Types.Int64, 0, nil, "ptr", "mem1"), // v3
+ Valu("c", OpNeg64, c.config.Types.Int64, 0, nil, "b"), // v4
+ Valu("mem1", OpStore, types.TypeMem, 0, c.config.Types.Int64, "ptr", "v", "mem0"), // v5
+ Valu("mem2", OpStore, types.TypeMem, 0, c.config.Types.Int64, "ptr", "a", "mem1"),
+ Valu("ptr", OpConst64, c.config.Types.Int64, 0xABCD, nil),
+ Valu("v", OpConst64, c.config.Types.Int64, 12, nil),
+ Goto("exit")),
+ Bloc("exit",
+ Exit("mem2")))
+
+ CheckFunc(fun.f)
+ order := storeOrder(fun.f.Blocks[0].Values, fun.f.newSparseSet(fun.f.NumValues()), make([]int32, fun.f.NumValues()))
+
+ // check that v2, v3, v4 is sorted after v5
+ var ai, bi, ci, si int
+ for i, v := range order {
+ switch v.ID {
+ case 2:
+ ai = i
+ case 3:
+ bi = i
+ case 4:
+ ci = i
+ case 5:
+ si = i
+ }
+ }
+ if ai < si || bi < si || ci < si {
+ t.Logf("Func: %s", fun.f)
+ t.Errorf("store order is wrong: got %v, want v2 v3 v4 after v5", order)
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/shift_test.go b/src/cmd/compile/internal/ssa/shift_test.go
new file mode 100644
index 0000000..3876d8d
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/shift_test.go
@@ -0,0 +1,107 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/compile/internal/types"
+ "testing"
+)
+
+func TestShiftConstAMD64(t *testing.T) {
+ c := testConfig(t)
+ fun := makeConstShiftFunc(c, 18, OpLsh64x64, c.config.Types.UInt64)
+ checkOpcodeCounts(t, fun.f, map[Op]int{OpAMD64SHLQconst: 1, OpAMD64CMPQconst: 0, OpAMD64ANDQconst: 0})
+
+ fun = makeConstShiftFunc(c, 66, OpLsh64x64, c.config.Types.UInt64)
+ checkOpcodeCounts(t, fun.f, map[Op]int{OpAMD64SHLQconst: 0, OpAMD64CMPQconst: 0, OpAMD64ANDQconst: 0})
+
+ fun = makeConstShiftFunc(c, 18, OpRsh64Ux64, c.config.Types.UInt64)
+ checkOpcodeCounts(t, fun.f, map[Op]int{OpAMD64SHRQconst: 1, OpAMD64CMPQconst: 0, OpAMD64ANDQconst: 0})
+
+ fun = makeConstShiftFunc(c, 66, OpRsh64Ux64, c.config.Types.UInt64)
+ checkOpcodeCounts(t, fun.f, map[Op]int{OpAMD64SHRQconst: 0, OpAMD64CMPQconst: 0, OpAMD64ANDQconst: 0})
+
+ fun = makeConstShiftFunc(c, 18, OpRsh64x64, c.config.Types.Int64)
+ checkOpcodeCounts(t, fun.f, map[Op]int{OpAMD64SARQconst: 1, OpAMD64CMPQconst: 0})
+
+ fun = makeConstShiftFunc(c, 66, OpRsh64x64, c.config.Types.Int64)
+ checkOpcodeCounts(t, fun.f, map[Op]int{OpAMD64SARQconst: 1, OpAMD64CMPQconst: 0})
+}
+
+func makeConstShiftFunc(c *Conf, amount int64, op Op, typ *types.Type) fun {
+ ptyp := c.config.Types.BytePtr
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("SP", OpSP, c.config.Types.Uintptr, 0, nil),
+ Valu("argptr", OpOffPtr, ptyp, 8, nil, "SP"),
+ Valu("resptr", OpOffPtr, ptyp, 16, nil, "SP"),
+ Valu("load", OpLoad, typ, 0, nil, "argptr", "mem"),
+ Valu("c", OpConst64, c.config.Types.UInt64, amount, nil),
+ Valu("shift", op, typ, 0, nil, "load", "c"),
+ Valu("store", OpStore, types.TypeMem, 0, c.config.Types.UInt64, "resptr", "shift", "mem"),
+ Exit("store")))
+ Compile(fun.f)
+ return fun
+}
+
+func TestShiftToExtensionAMD64(t *testing.T) {
+ c := testConfig(t)
+ // Test that eligible pairs of constant shifts are converted to extensions.
+ // For example:
+ // (uint64(x) << 32) >> 32 -> uint64(uint32(x))
+ ops := map[Op]int{
+ OpAMD64SHLQconst: 0, OpAMD64SHLLconst: 0,
+ OpAMD64SHRQconst: 0, OpAMD64SHRLconst: 0,
+ OpAMD64SARQconst: 0, OpAMD64SARLconst: 0,
+ }
+ tests := [...]struct {
+ amount int64
+ left, right Op
+ typ *types.Type
+ }{
+ // unsigned
+ {56, OpLsh64x64, OpRsh64Ux64, c.config.Types.UInt64},
+ {48, OpLsh64x64, OpRsh64Ux64, c.config.Types.UInt64},
+ {32, OpLsh64x64, OpRsh64Ux64, c.config.Types.UInt64},
+ {24, OpLsh32x64, OpRsh32Ux64, c.config.Types.UInt32},
+ {16, OpLsh32x64, OpRsh32Ux64, c.config.Types.UInt32},
+ {8, OpLsh16x64, OpRsh16Ux64, c.config.Types.UInt16},
+ // signed
+ {56, OpLsh64x64, OpRsh64x64, c.config.Types.Int64},
+ {48, OpLsh64x64, OpRsh64x64, c.config.Types.Int64},
+ {32, OpLsh64x64, OpRsh64x64, c.config.Types.Int64},
+ {24, OpLsh32x64, OpRsh32x64, c.config.Types.Int32},
+ {16, OpLsh32x64, OpRsh32x64, c.config.Types.Int32},
+ {8, OpLsh16x64, OpRsh16x64, c.config.Types.Int16},
+ }
+ for _, tc := range tests {
+ fun := makeShiftExtensionFunc(c, tc.amount, tc.left, tc.right, tc.typ)
+ checkOpcodeCounts(t, fun.f, ops)
+ }
+}
+
+// makeShiftExtensionFunc generates a function containing:
+//
+// (rshift (lshift (Const64 [amount])) (Const64 [amount]))
+//
+// This may be equivalent to a sign or zero extension.
+func makeShiftExtensionFunc(c *Conf, amount int64, lshift, rshift Op, typ *types.Type) fun {
+ ptyp := c.config.Types.BytePtr
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("SP", OpSP, c.config.Types.Uintptr, 0, nil),
+ Valu("argptr", OpOffPtr, ptyp, 8, nil, "SP"),
+ Valu("resptr", OpOffPtr, ptyp, 16, nil, "SP"),
+ Valu("load", OpLoad, typ, 0, nil, "argptr", "mem"),
+ Valu("c", OpConst64, c.config.Types.UInt64, amount, nil),
+ Valu("lshift", lshift, typ, 0, nil, "load", "c"),
+ Valu("rshift", rshift, typ, 0, nil, "lshift", "c"),
+ Valu("store", OpStore, types.TypeMem, 0, c.config.Types.UInt64, "resptr", "rshift", "mem"),
+ Exit("store")))
+ Compile(fun.f)
+ return fun
+}
diff --git a/src/cmd/compile/internal/ssa/shortcircuit.go b/src/cmd/compile/internal/ssa/shortcircuit.go
new file mode 100644
index 0000000..c0b9eac
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/shortcircuit.go
@@ -0,0 +1,513 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// Shortcircuit finds situations where branch directions
+// are always correlated and rewrites the CFG to take
+// advantage of that fact.
+// This optimization is useful for compiling && and || expressions.
+func shortcircuit(f *Func) {
+ // Step 1: Replace a phi arg with a constant if that arg
+ // is the control value of a preceding If block.
+ // b1:
+ // If a goto b2 else b3
+ // b2: <- b1 ...
+ // x = phi(a, ...)
+ //
+ // We can replace the "a" in the phi with the constant true.
+ var ct, cf *Value
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ if v.Op != OpPhi {
+ continue
+ }
+ if !v.Type.IsBoolean() {
+ continue
+ }
+ for i, a := range v.Args {
+ e := b.Preds[i]
+ p := e.b
+ if p.Kind != BlockIf {
+ continue
+ }
+ if p.Controls[0] != a {
+ continue
+ }
+ if e.i == 0 {
+ if ct == nil {
+ ct = f.ConstBool(f.Config.Types.Bool, true)
+ }
+ v.SetArg(i, ct)
+ } else {
+ if cf == nil {
+ cf = f.ConstBool(f.Config.Types.Bool, false)
+ }
+ v.SetArg(i, cf)
+ }
+ }
+ }
+ }
+
+ // Step 2: Redirect control flow around known branches.
+ // p:
+ // ... goto b ...
+ // b: <- p ...
+ // v = phi(true, ...)
+ // if v goto t else u
+ // We can redirect p to go directly to t instead of b.
+ // (If v is not live after b).
+ fuse(f, fuseTypePlain|fuseTypeShortCircuit)
+}
+
+// shortcircuitBlock checks for a CFG in which an If block
+// has as its control value a Phi that has a ConstBool arg.
+// In some such cases, we can rewrite the CFG into a flatter form.
+//
+// (1) Look for a CFG of the form
+//
+// p other pred(s)
+// \ /
+// b
+// / \
+// t other succ
+//
+// in which b is an If block containing a single phi value with a single use (b's Control),
+// which has a ConstBool arg.
+// p is the predecessor corresponding to the argument slot in which the ConstBool is found.
+// t is the successor corresponding to the value of the ConstBool arg.
+//
+// Rewrite this into
+//
+// p other pred(s)
+// | /
+// | b
+// |/ \
+// t u
+//
+// and remove the appropriate phi arg(s).
+//
+// (2) Look for a CFG of the form
+//
+// p q
+// \ /
+// b
+// / \
+// t u
+//
+// in which b is as described in (1).
+// However, b may also contain other phi values.
+// The CFG will be modified as described in (1).
+// However, in order to handle those other phi values,
+// for each other phi value w, we must be able to eliminate w from b.
+// We can do that though a combination of moving w to a different block
+// and rewriting uses of w to use a different value instead.
+// See shortcircuitPhiPlan for details.
+func shortcircuitBlock(b *Block) bool {
+ if b.Kind != BlockIf {
+ return false
+ }
+ // Look for control values of the form Copy(Not(Copy(Phi(const, ...)))).
+ // Those must be the only values in the b, and they each must be used only by b.
+ // Track the negations so that we can swap successors as needed later.
+ ctl := b.Controls[0]
+ nval := 1 // the control value
+ var swap int64
+ for ctl.Uses == 1 && ctl.Block == b && (ctl.Op == OpCopy || ctl.Op == OpNot) {
+ if ctl.Op == OpNot {
+ swap = 1 ^ swap
+ }
+ ctl = ctl.Args[0]
+ nval++ // wrapper around control value
+ }
+ if ctl.Op != OpPhi || ctl.Block != b || ctl.Uses != 1 {
+ return false
+ }
+ nOtherPhi := 0
+ for _, w := range b.Values {
+ if w.Op == OpPhi && w != ctl {
+ nOtherPhi++
+ }
+ }
+ if nOtherPhi > 0 && len(b.Preds) != 2 {
+ // We rely on b having exactly two preds in shortcircuitPhiPlan
+ // to reason about the values of phis.
+ return false
+ }
+ if len(b.Values) != nval+nOtherPhi {
+ return false
+ }
+ if nOtherPhi > 0 {
+ // Check for any phi which is the argument of another phi.
+ // These cases are tricky, as substitutions done by replaceUses
+ // are no longer trivial to do in any ordering. See issue 45175.
+ m := make(map[*Value]bool, 1+nOtherPhi)
+ for _, v := range b.Values {
+ if v.Op == OpPhi {
+ m[v] = true
+ }
+ }
+ for v := range m {
+ for _, a := range v.Args {
+ if a != v && m[a] {
+ return false
+ }
+ }
+ }
+ }
+
+ // Locate index of first const phi arg.
+ cidx := -1
+ for i, a := range ctl.Args {
+ if a.Op == OpConstBool {
+ cidx = i
+ break
+ }
+ }
+ if cidx == -1 {
+ return false
+ }
+
+ // p is the predecessor corresponding to cidx.
+ pe := b.Preds[cidx]
+ p := pe.b
+ pi := pe.i
+
+ // t is the "taken" branch: the successor we always go to when coming in from p.
+ ti := 1 ^ ctl.Args[cidx].AuxInt ^ swap
+ te := b.Succs[ti]
+ t := te.b
+ if p == b || t == b {
+ // This is an infinite loop; we can't remove it. See issue 33903.
+ return false
+ }
+
+ var fixPhi func(*Value, int)
+ if nOtherPhi > 0 {
+ fixPhi = shortcircuitPhiPlan(b, ctl, cidx, ti)
+ if fixPhi == nil {
+ return false
+ }
+ }
+
+ // We're committed. Update CFG and Phis.
+ // If you modify this section, update shortcircuitPhiPlan corresponding.
+
+ // Remove b's incoming edge from p.
+ b.removePred(cidx)
+ b.removePhiArg(ctl, cidx)
+
+ // Redirect p's outgoing edge to t.
+ p.Succs[pi] = Edge{t, len(t.Preds)}
+
+ // Fix up t to have one more predecessor.
+ t.Preds = append(t.Preds, Edge{p, pi})
+ for _, v := range t.Values {
+ if v.Op != OpPhi {
+ continue
+ }
+ v.AddArg(v.Args[te.i])
+ }
+
+ if nOtherPhi != 0 {
+ // Adjust all other phis as necessary.
+ // Use a plain for loop instead of range because fixPhi may move phis,
+ // thus modifying b.Values.
+ for i := 0; i < len(b.Values); i++ {
+ phi := b.Values[i]
+ if phi.Uses == 0 || phi == ctl || phi.Op != OpPhi {
+ continue
+ }
+ fixPhi(phi, i)
+ if phi.Block == b {
+ continue
+ }
+ // phi got moved to a different block with v.moveTo.
+ // Adjust phi values in this new block that refer
+ // to phi to refer to the corresponding phi arg instead.
+ // phi used to be evaluated prior to this block,
+ // and now it is evaluated in this block.
+ for _, v := range phi.Block.Values {
+ if v.Op != OpPhi || v == phi {
+ continue
+ }
+ for j, a := range v.Args {
+ if a == phi {
+ v.SetArg(j, phi.Args[j])
+ }
+ }
+ }
+ if phi.Uses != 0 {
+ phielimValue(phi)
+ } else {
+ phi.reset(OpInvalid)
+ }
+ i-- // v.moveTo put a new value at index i; reprocess
+ }
+
+ // We may have left behind some phi values with no uses
+ // but the wrong number of arguments. Eliminate those.
+ for _, v := range b.Values {
+ if v.Uses == 0 {
+ v.reset(OpInvalid)
+ }
+ }
+ }
+
+ if len(b.Preds) == 0 {
+ // Block is now dead.
+ b.Kind = BlockInvalid
+ }
+
+ phielimValue(ctl)
+ return true
+}
+
+// shortcircuitPhiPlan returns a function to handle non-ctl phi values in b,
+// where b is as described in shortcircuitBlock.
+// The returned function accepts a value v
+// and the index i of v in v.Block: v.Block.Values[i] == v.
+// If the returned function moves v to a different block, it will use v.moveTo.
+// cidx is the index in ctl of the ConstBool arg.
+// ti is the index in b.Succs of the always taken branch when arriving from p.
+// If shortcircuitPhiPlan returns nil, there is no plan available,
+// and the CFG modifications must not proceed.
+// The returned function assumes that shortcircuitBlock has completed its CFG modifications.
+func shortcircuitPhiPlan(b *Block, ctl *Value, cidx int, ti int64) func(*Value, int) {
+ // t is the "taken" branch: the successor we always go to when coming in from p.
+ t := b.Succs[ti].b
+ // u is the "untaken" branch: the successor we never go to when coming in from p.
+ u := b.Succs[1^ti].b
+
+ // In the following CFG matching, ensure that b's preds are entirely distinct from b's succs.
+ // This is probably a stronger condition than required, but this happens extremely rarely,
+ // and it makes it easier to avoid getting deceived by pretty ASCII charts. See #44465.
+ if p0, p1 := b.Preds[0].b, b.Preds[1].b; p0 == t || p1 == t || p0 == u || p1 == u {
+ return nil
+ }
+
+ // Look for some common CFG structures
+ // in which the outbound paths from b merge,
+ // with no other preds joining them.
+ // In these cases, we can reconstruct what the value
+ // of any phi in b must be in the successor blocks.
+
+ if len(t.Preds) == 1 && len(t.Succs) == 1 &&
+ len(u.Preds) == 1 && len(u.Succs) == 1 &&
+ t.Succs[0].b == u.Succs[0].b && len(t.Succs[0].b.Preds) == 2 {
+ // p q
+ // \ /
+ // b
+ // / \
+ // t u
+ // \ /
+ // m
+ //
+ // After the CFG modifications, this will look like
+ //
+ // p q
+ // | /
+ // | b
+ // |/ \
+ // t u
+ // \ /
+ // m
+ //
+ // NB: t.Preds is (b, p), not (p, b).
+ m := t.Succs[0].b
+ return func(v *Value, i int) {
+ // Replace any uses of v in t and u with the value v must have,
+ // given that we have arrived at that block.
+ // Then move v to m and adjust its value accordingly;
+ // this handles all other uses of v.
+ argP, argQ := v.Args[cidx], v.Args[1^cidx]
+ u.replaceUses(v, argQ)
+ phi := t.Func.newValue(OpPhi, v.Type, t, v.Pos)
+ phi.AddArg2(argQ, argP)
+ t.replaceUses(v, phi)
+ if v.Uses == 0 {
+ return
+ }
+ v.moveTo(m, i)
+ // The phi in m belongs to whichever pred idx corresponds to t.
+ if m.Preds[0].b == t {
+ v.SetArgs2(phi, argQ)
+ } else {
+ v.SetArgs2(argQ, phi)
+ }
+ }
+ }
+
+ if len(t.Preds) == 2 && len(u.Preds) == 1 && len(u.Succs) == 1 && u.Succs[0].b == t {
+ // p q
+ // \ /
+ // b
+ // |\
+ // | u
+ // |/
+ // t
+ //
+ // After the CFG modifications, this will look like
+ //
+ // q
+ // /
+ // b
+ // |\
+ // p | u
+ // \|/
+ // t
+ //
+ // NB: t.Preds is (b or u, b or u, p).
+ return func(v *Value, i int) {
+ // Replace any uses of v in u. Then move v to t.
+ argP, argQ := v.Args[cidx], v.Args[1^cidx]
+ u.replaceUses(v, argQ)
+ v.moveTo(t, i)
+ v.SetArgs3(argQ, argQ, argP)
+ }
+ }
+
+ if len(u.Preds) == 2 && len(t.Preds) == 1 && len(t.Succs) == 1 && t.Succs[0].b == u {
+ // p q
+ // \ /
+ // b
+ // /|
+ // t |
+ // \|
+ // u
+ //
+ // After the CFG modifications, this will look like
+ //
+ // p q
+ // | /
+ // | b
+ // |/|
+ // t |
+ // \|
+ // u
+ //
+ // NB: t.Preds is (b, p), not (p, b).
+ return func(v *Value, i int) {
+ // Replace any uses of v in t. Then move v to u.
+ argP, argQ := v.Args[cidx], v.Args[1^cidx]
+ phi := t.Func.newValue(OpPhi, v.Type, t, v.Pos)
+ phi.AddArg2(argQ, argP)
+ t.replaceUses(v, phi)
+ if v.Uses == 0 {
+ return
+ }
+ v.moveTo(u, i)
+ v.SetArgs2(argQ, phi)
+ }
+ }
+
+ // Look for some common CFG structures
+ // in which one outbound path from b exits,
+ // with no other preds joining.
+ // In these cases, we can reconstruct what the value
+ // of any phi in b must be in the path leading to exit,
+ // and move the phi to the non-exit path.
+
+ if len(t.Preds) == 1 && len(u.Preds) == 1 && len(t.Succs) == 0 {
+ // p q
+ // \ /
+ // b
+ // / \
+ // t u
+ //
+ // where t is an Exit/Ret block.
+ //
+ // After the CFG modifications, this will look like
+ //
+ // p q
+ // | /
+ // | b
+ // |/ \
+ // t u
+ //
+ // NB: t.Preds is (b, p), not (p, b).
+ return func(v *Value, i int) {
+ // Replace any uses of v in t and x. Then move v to u.
+ argP, argQ := v.Args[cidx], v.Args[1^cidx]
+ // If there are no uses of v in t or x, this phi will be unused.
+ // That's OK; it's not worth the cost to prevent that.
+ phi := t.Func.newValue(OpPhi, v.Type, t, v.Pos)
+ phi.AddArg2(argQ, argP)
+ t.replaceUses(v, phi)
+ if v.Uses == 0 {
+ return
+ }
+ v.moveTo(u, i)
+ v.SetArgs1(argQ)
+ }
+ }
+
+ if len(u.Preds) == 1 && len(t.Preds) == 1 && len(u.Succs) == 0 {
+ // p q
+ // \ /
+ // b
+ // / \
+ // t u
+ //
+ // where u is an Exit/Ret block.
+ //
+ // After the CFG modifications, this will look like
+ //
+ // p q
+ // | /
+ // | b
+ // |/ \
+ // t u
+ //
+ // NB: t.Preds is (b, p), not (p, b).
+ return func(v *Value, i int) {
+ // Replace any uses of v in u (and x). Then move v to t.
+ argP, argQ := v.Args[cidx], v.Args[1^cidx]
+ u.replaceUses(v, argQ)
+ v.moveTo(t, i)
+ v.SetArgs2(argQ, argP)
+ }
+ }
+
+ // TODO: handle more cases; shortcircuit optimizations turn out to be reasonably high impact
+ return nil
+}
+
+// replaceUses replaces all uses of old in b with new.
+func (b *Block) replaceUses(old, new *Value) {
+ for _, v := range b.Values {
+ for i, a := range v.Args {
+ if a == old {
+ v.SetArg(i, new)
+ }
+ }
+ }
+ for i, v := range b.ControlValues() {
+ if v == old {
+ b.ReplaceControl(i, new)
+ }
+ }
+}
+
+// moveTo moves v to dst, adjusting the appropriate Block.Values slices.
+// The caller is responsible for ensuring that this is safe.
+// i is the index of v in v.Block.Values.
+func (v *Value) moveTo(dst *Block, i int) {
+ if dst.Func.scheduled {
+ v.Fatalf("moveTo after scheduling")
+ }
+ src := v.Block
+ if src.Values[i] != v {
+ v.Fatalf("moveTo bad index %d", v, i)
+ }
+ if src == dst {
+ return
+ }
+ v.Block = dst
+ dst.Values = append(dst.Values, v)
+ last := len(src.Values) - 1
+ src.Values[i] = src.Values[last]
+ src.Values[last] = nil
+ src.Values = src.Values[:last]
+}
diff --git a/src/cmd/compile/internal/ssa/shortcircuit_test.go b/src/cmd/compile/internal/ssa/shortcircuit_test.go
new file mode 100644
index 0000000..b25eeb4
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/shortcircuit_test.go
@@ -0,0 +1,53 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/compile/internal/types"
+ "testing"
+)
+
+func TestShortCircuit(t *testing.T) {
+ c := testConfig(t)
+
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("mem", OpInitMem, types.TypeMem, 0, nil),
+ Valu("arg1", OpArg, c.config.Types.Int64, 0, nil),
+ Valu("arg2", OpArg, c.config.Types.Int64, 0, nil),
+ Valu("arg3", OpArg, c.config.Types.Int64, 0, nil),
+ Goto("b1")),
+ Bloc("b1",
+ Valu("cmp1", OpLess64, c.config.Types.Bool, 0, nil, "arg1", "arg2"),
+ If("cmp1", "b2", "b3")),
+ Bloc("b2",
+ Valu("cmp2", OpLess64, c.config.Types.Bool, 0, nil, "arg2", "arg3"),
+ Goto("b3")),
+ Bloc("b3",
+ Valu("phi2", OpPhi, c.config.Types.Bool, 0, nil, "cmp1", "cmp2"),
+ If("phi2", "b4", "b5")),
+ Bloc("b4",
+ Valu("cmp3", OpLess64, c.config.Types.Bool, 0, nil, "arg3", "arg1"),
+ Goto("b5")),
+ Bloc("b5",
+ Valu("phi3", OpPhi, c.config.Types.Bool, 0, nil, "phi2", "cmp3"),
+ If("phi3", "b6", "b7")),
+ Bloc("b6",
+ Exit("mem")),
+ Bloc("b7",
+ Exit("mem")))
+
+ CheckFunc(fun.f)
+ shortcircuit(fun.f)
+ CheckFunc(fun.f)
+
+ for _, b := range fun.f.Blocks {
+ for _, v := range b.Values {
+ if v.Op == OpPhi {
+ t.Errorf("phi %s remains", v)
+ }
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/sizeof_test.go b/src/cmd/compile/internal/ssa/sizeof_test.go
new file mode 100644
index 0000000..a27002e
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/sizeof_test.go
@@ -0,0 +1,39 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "reflect"
+ "testing"
+ "unsafe"
+)
+
+// Assert that the size of important structures do not change unexpectedly.
+
+func TestSizeof(t *testing.T) {
+ const _64bit = unsafe.Sizeof(uintptr(0)) == 8
+
+ var tests = []struct {
+ val interface{} // type as a value
+ _32bit uintptr // size on 32bit platforms
+ _64bit uintptr // size on 64bit platforms
+ }{
+ {Value{}, 72, 112},
+ {Block{}, 164, 304},
+ {LocalSlot{}, 28, 40},
+ {valState{}, 28, 40},
+ }
+
+ for _, tt := range tests {
+ want := tt._32bit
+ if _64bit {
+ want = tt._64bit
+ }
+ got := reflect.TypeOf(tt.val).Size()
+ if want != got {
+ t.Errorf("unsafe.Sizeof(%T) = %d, want %d", tt.val, got, want)
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/softfloat.go b/src/cmd/compile/internal/ssa/softfloat.go
new file mode 100644
index 0000000..351f824
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/softfloat.go
@@ -0,0 +1,80 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/compile/internal/types"
+ "math"
+)
+
+func softfloat(f *Func) {
+ if !f.Config.SoftFloat {
+ return
+ }
+ newInt64 := false
+
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ if v.Type.IsFloat() {
+ f.unCache(v)
+ switch v.Op {
+ case OpPhi, OpLoad, OpArg:
+ if v.Type.Size() == 4 {
+ v.Type = f.Config.Types.UInt32
+ } else {
+ v.Type = f.Config.Types.UInt64
+ }
+ case OpConst32F:
+ v.Op = OpConst32
+ v.Type = f.Config.Types.UInt32
+ v.AuxInt = int64(int32(math.Float32bits(auxTo32F(v.AuxInt))))
+ case OpConst64F:
+ v.Op = OpConst64
+ v.Type = f.Config.Types.UInt64
+ case OpNeg32F:
+ arg0 := v.Args[0]
+ v.reset(OpXor32)
+ v.Type = f.Config.Types.UInt32
+ v.AddArg(arg0)
+ mask := v.Block.NewValue0(v.Pos, OpConst32, v.Type)
+ mask.AuxInt = -0x80000000
+ v.AddArg(mask)
+ case OpNeg64F:
+ arg0 := v.Args[0]
+ v.reset(OpXor64)
+ v.Type = f.Config.Types.UInt64
+ v.AddArg(arg0)
+ mask := v.Block.NewValue0(v.Pos, OpConst64, v.Type)
+ mask.AuxInt = -0x8000000000000000
+ v.AddArg(mask)
+ case OpRound32F:
+ v.Op = OpCopy
+ v.Type = f.Config.Types.UInt32
+ case OpRound64F:
+ v.Op = OpCopy
+ v.Type = f.Config.Types.UInt64
+ }
+ newInt64 = newInt64 || v.Type.Size() == 8
+ } else if (v.Op == OpStore || v.Op == OpZero || v.Op == OpMove) && v.Aux.(*types.Type).IsFloat() {
+ switch size := v.Aux.(*types.Type).Size(); size {
+ case 4:
+ v.Aux = f.Config.Types.UInt32
+ case 8:
+ v.Aux = f.Config.Types.UInt64
+ newInt64 = true
+ default:
+ v.Fatalf("bad float type with size %d", size)
+ }
+ }
+ }
+ }
+
+ if newInt64 && f.Config.RegSize == 4 {
+ // On 32bit arch, decompose Uint64 introduced in the switch above.
+ decomposeBuiltIn(f)
+ applyRewrite(f, rewriteBlockdec64, rewriteValuedec64, removeDeadValues)
+ }
+
+}
diff --git a/src/cmd/compile/internal/ssa/sparsemap.go b/src/cmd/compile/internal/ssa/sparsemap.go
new file mode 100644
index 0000000..f55db54
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/sparsemap.go
@@ -0,0 +1,93 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import "cmd/internal/src"
+
+// from https://research.swtch.com/sparse
+// in turn, from Briggs and Torczon
+
+type sparseEntry struct {
+ key ID
+ val int32
+ aux src.XPos
+}
+
+type sparseMap struct {
+ dense []sparseEntry
+ sparse []int32
+}
+
+// newSparseMap returns a sparseMap that can map
+// integers between 0 and n-1 to int32s.
+func newSparseMap(n int) *sparseMap {
+ return &sparseMap{dense: nil, sparse: make([]int32, n)}
+}
+
+func (s *sparseMap) cap() int {
+ return len(s.sparse)
+}
+
+func (s *sparseMap) size() int {
+ return len(s.dense)
+}
+
+func (s *sparseMap) contains(k ID) bool {
+ i := s.sparse[k]
+ return i < int32(len(s.dense)) && s.dense[i].key == k
+}
+
+// get returns the value for key k, or -1 if k does
+// not appear in the map.
+func (s *sparseMap) get(k ID) int32 {
+ i := s.sparse[k]
+ if i < int32(len(s.dense)) && s.dense[i].key == k {
+ return s.dense[i].val
+ }
+ return -1
+}
+
+func (s *sparseMap) set(k ID, v int32, a src.XPos) {
+ i := s.sparse[k]
+ if i < int32(len(s.dense)) && s.dense[i].key == k {
+ s.dense[i].val = v
+ s.dense[i].aux = a
+ return
+ }
+ s.dense = append(s.dense, sparseEntry{k, v, a})
+ s.sparse[k] = int32(len(s.dense)) - 1
+}
+
+// setBit sets the v'th bit of k's value, where 0 <= v < 32
+func (s *sparseMap) setBit(k ID, v uint) {
+ if v >= 32 {
+ panic("bit index too large.")
+ }
+ i := s.sparse[k]
+ if i < int32(len(s.dense)) && s.dense[i].key == k {
+ s.dense[i].val |= 1 << v
+ return
+ }
+ s.dense = append(s.dense, sparseEntry{k, 1 << v, src.NoXPos})
+ s.sparse[k] = int32(len(s.dense)) - 1
+}
+
+func (s *sparseMap) remove(k ID) {
+ i := s.sparse[k]
+ if i < int32(len(s.dense)) && s.dense[i].key == k {
+ y := s.dense[len(s.dense)-1]
+ s.dense[i] = y
+ s.sparse[y.key] = i
+ s.dense = s.dense[:len(s.dense)-1]
+ }
+}
+
+func (s *sparseMap) clear() {
+ s.dense = s.dense[:0]
+}
+
+func (s *sparseMap) contents() []sparseEntry {
+ return s.dense
+}
diff --git a/src/cmd/compile/internal/ssa/sparseset.go b/src/cmd/compile/internal/ssa/sparseset.go
new file mode 100644
index 0000000..395931d
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/sparseset.go
@@ -0,0 +1,79 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// from https://research.swtch.com/sparse
+// in turn, from Briggs and Torczon
+
+type sparseSet struct {
+ dense []ID
+ sparse []int32
+}
+
+// newSparseSet returns a sparseSet that can represent
+// integers between 0 and n-1
+func newSparseSet(n int) *sparseSet {
+ return &sparseSet{dense: nil, sparse: make([]int32, n)}
+}
+
+func (s *sparseSet) cap() int {
+ return len(s.sparse)
+}
+
+func (s *sparseSet) size() int {
+ return len(s.dense)
+}
+
+func (s *sparseSet) contains(x ID) bool {
+ i := s.sparse[x]
+ return i < int32(len(s.dense)) && s.dense[i] == x
+}
+
+func (s *sparseSet) add(x ID) {
+ i := s.sparse[x]
+ if i < int32(len(s.dense)) && s.dense[i] == x {
+ return
+ }
+ s.dense = append(s.dense, x)
+ s.sparse[x] = int32(len(s.dense)) - 1
+}
+
+func (s *sparseSet) addAll(a []ID) {
+ for _, x := range a {
+ s.add(x)
+ }
+}
+
+func (s *sparseSet) addAllValues(a []*Value) {
+ for _, v := range a {
+ s.add(v.ID)
+ }
+}
+
+func (s *sparseSet) remove(x ID) {
+ i := s.sparse[x]
+ if i < int32(len(s.dense)) && s.dense[i] == x {
+ y := s.dense[len(s.dense)-1]
+ s.dense[i] = y
+ s.sparse[y] = i
+ s.dense = s.dense[:len(s.dense)-1]
+ }
+}
+
+// pop removes an arbitrary element from the set.
+// The set must be nonempty.
+func (s *sparseSet) pop() ID {
+ x := s.dense[len(s.dense)-1]
+ s.dense = s.dense[:len(s.dense)-1]
+ return x
+}
+
+func (s *sparseSet) clear() {
+ s.dense = s.dense[:0]
+}
+
+func (s *sparseSet) contents() []ID {
+ return s.dense
+}
diff --git a/src/cmd/compile/internal/ssa/sparsetree.go b/src/cmd/compile/internal/ssa/sparsetree.go
new file mode 100644
index 0000000..be914c8
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/sparsetree.go
@@ -0,0 +1,241 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "fmt"
+ "strings"
+)
+
+type SparseTreeNode struct {
+ child *Block
+ sibling *Block
+ parent *Block
+
+ // Every block has 6 numbers associated with it:
+ // entry-1, entry, entry+1, exit-1, and exit, exit+1.
+ // entry and exit are conceptually the top of the block (phi functions)
+ // entry+1 and exit-1 are conceptually the bottom of the block (ordinary defs)
+ // entry-1 and exit+1 are conceptually "just before" the block (conditions flowing in)
+ //
+ // This simplifies life if we wish to query information about x
+ // when x is both an input to and output of a block.
+ entry, exit int32
+}
+
+func (s *SparseTreeNode) String() string {
+ return fmt.Sprintf("[%d,%d]", s.entry, s.exit)
+}
+
+func (s *SparseTreeNode) Entry() int32 {
+ return s.entry
+}
+
+func (s *SparseTreeNode) Exit() int32 {
+ return s.exit
+}
+
+const (
+ // When used to lookup up definitions in a sparse tree,
+ // these adjustments to a block's entry (+adjust) and
+ // exit (-adjust) numbers allow a distinction to be made
+ // between assignments (typically branch-dependent
+ // conditionals) occurring "before" the block (e.g., as inputs
+ // to the block and its phi functions), "within" the block,
+ // and "after" the block.
+ AdjustBefore = -1 // defined before phi
+ AdjustWithin = 0 // defined by phi
+ AdjustAfter = 1 // defined within block
+)
+
+// A SparseTree is a tree of Blocks.
+// It allows rapid ancestor queries,
+// such as whether one block dominates another.
+type SparseTree []SparseTreeNode
+
+// newSparseTree creates a SparseTree from a block-to-parent map (array indexed by Block.ID)
+func newSparseTree(f *Func, parentOf []*Block) SparseTree {
+ t := make(SparseTree, f.NumBlocks())
+ for _, b := range f.Blocks {
+ n := &t[b.ID]
+ if p := parentOf[b.ID]; p != nil {
+ n.parent = p
+ n.sibling = t[p.ID].child
+ t[p.ID].child = b
+ }
+ }
+ t.numberBlock(f.Entry, 1)
+ return t
+}
+
+// newSparseOrderedTree creates a SparseTree from a block-to-parent map (array indexed by Block.ID)
+// children will appear in the reverse of their order in reverseOrder
+// in particular, if reverseOrder is a dfs-reversePostOrder, then the root-to-children
+// walk of the tree will yield a pre-order.
+func newSparseOrderedTree(f *Func, parentOf, reverseOrder []*Block) SparseTree {
+ t := make(SparseTree, f.NumBlocks())
+ for _, b := range reverseOrder {
+ n := &t[b.ID]
+ if p := parentOf[b.ID]; p != nil {
+ n.parent = p
+ n.sibling = t[p.ID].child
+ t[p.ID].child = b
+ }
+ }
+ t.numberBlock(f.Entry, 1)
+ return t
+}
+
+// treestructure provides a string description of the dominator
+// tree and flow structure of block b and all blocks that it
+// dominates.
+func (t SparseTree) treestructure(b *Block) string {
+ return t.treestructure1(b, 0)
+}
+func (t SparseTree) treestructure1(b *Block, i int) string {
+ s := "\n" + strings.Repeat("\t", i) + b.String() + "->["
+ for i, e := range b.Succs {
+ if i > 0 {
+ s += ","
+ }
+ s += e.b.String()
+ }
+ s += "]"
+ if c0 := t[b.ID].child; c0 != nil {
+ s += "("
+ for c := c0; c != nil; c = t[c.ID].sibling {
+ if c != c0 {
+ s += " "
+ }
+ s += t.treestructure1(c, i+1)
+ }
+ s += ")"
+ }
+ return s
+}
+
+// numberBlock assigns entry and exit numbers for b and b's
+// children in an in-order walk from a gappy sequence, where n
+// is the first number not yet assigned or reserved. N should
+// be larger than zero. For each entry and exit number, the
+// values one larger and smaller are reserved to indicate
+// "strictly above" and "strictly below". numberBlock returns
+// the smallest number not yet assigned or reserved (i.e., the
+// exit number of the last block visited, plus two, because
+// last.exit+1 is a reserved value.)
+//
+// examples:
+//
+// single node tree Root, call with n=1
+// entry=2 Root exit=5; returns 7
+//
+// two node tree, Root->Child, call with n=1
+// entry=2 Root exit=11; returns 13
+// entry=5 Child exit=8
+//
+// three node tree, Root->(Left, Right), call with n=1
+// entry=2 Root exit=17; returns 19
+// entry=5 Left exit=8; entry=11 Right exit=14
+//
+// This is the in-order sequence of assigned and reserved numbers
+// for the last example:
+// root left left right right root
+// 1 2e 3 | 4 5e 6 | 7 8x 9 | 10 11e 12 | 13 14x 15 | 16 17x 18
+
+func (t SparseTree) numberBlock(b *Block, n int32) int32 {
+ // reserve n for entry-1, assign n+1 to entry
+ n++
+ t[b.ID].entry = n
+ // reserve n+1 for entry+1, n+2 is next free number
+ n += 2
+ for c := t[b.ID].child; c != nil; c = t[c.ID].sibling {
+ n = t.numberBlock(c, n) // preserves n = next free number
+ }
+ // reserve n for exit-1, assign n+1 to exit
+ n++
+ t[b.ID].exit = n
+ // reserve n+1 for exit+1, n+2 is next free number, returned.
+ return n + 2
+}
+
+// Sibling returns a sibling of x in the dominator tree (i.e.,
+// a node with the same immediate dominator) or nil if there
+// are no remaining siblings in the arbitrary but repeatable
+// order chosen. Because the Child-Sibling order is used
+// to assign entry and exit numbers in the treewalk, those
+// numbers are also consistent with this order (i.e.,
+// Sibling(x) has entry number larger than x's exit number).
+func (t SparseTree) Sibling(x *Block) *Block {
+ return t[x.ID].sibling
+}
+
+// Child returns a child of x in the dominator tree, or
+// nil if there are none. The choice of first child is
+// arbitrary but repeatable.
+func (t SparseTree) Child(x *Block) *Block {
+ return t[x.ID].child
+}
+
+// Parent returns the parent of x in the dominator tree, or
+// nil if x is the function's entry.
+func (t SparseTree) Parent(x *Block) *Block {
+ return t[x.ID].parent
+}
+
+// isAncestorEq reports whether x is an ancestor of or equal to y.
+func (t SparseTree) IsAncestorEq(x, y *Block) bool {
+ if x == y {
+ return true
+ }
+ xx := &t[x.ID]
+ yy := &t[y.ID]
+ return xx.entry <= yy.entry && yy.exit <= xx.exit
+}
+
+// isAncestor reports whether x is a strict ancestor of y.
+func (t SparseTree) isAncestor(x, y *Block) bool {
+ if x == y {
+ return false
+ }
+ xx := &t[x.ID]
+ yy := &t[y.ID]
+ return xx.entry < yy.entry && yy.exit < xx.exit
+}
+
+// domorder returns a value for dominator-oriented sorting.
+// Block domination does not provide a total ordering,
+// but domorder two has useful properties.
+// (1) If domorder(x) > domorder(y) then x does not dominate y.
+// (2) If domorder(x) < domorder(y) and domorder(y) < domorder(z) and x does not dominate y,
+// then x does not dominate z.
+// Property (1) means that blocks sorted by domorder always have a maximal dominant block first.
+// Property (2) allows searches for dominated blocks to exit early.
+func (t SparseTree) domorder(x *Block) int32 {
+ // Here is an argument that entry(x) provides the properties documented above.
+ //
+ // Entry and exit values are assigned in a depth-first dominator tree walk.
+ // For all blocks x and y, one of the following holds:
+ //
+ // (x-dom-y) x dominates y => entry(x) < entry(y) < exit(y) < exit(x)
+ // (y-dom-x) y dominates x => entry(y) < entry(x) < exit(x) < exit(y)
+ // (x-then-y) neither x nor y dominates the other and x walked before y => entry(x) < exit(x) < entry(y) < exit(y)
+ // (y-then-x) neither x nor y dominates the other and y walked before y => entry(y) < exit(y) < entry(x) < exit(x)
+ //
+ // entry(x) > entry(y) eliminates case x-dom-y. This provides property (1) above.
+ //
+ // For property (2), assume entry(x) < entry(y) and entry(y) < entry(z) and x does not dominate y.
+ // entry(x) < entry(y) allows cases x-dom-y and x-then-y.
+ // But by supposition, x does not dominate y. So we have x-then-y.
+ //
+ // For contradiction, assume x dominates z.
+ // Then entry(x) < entry(z) < exit(z) < exit(x).
+ // But we know x-then-y, so entry(x) < exit(x) < entry(y) < exit(y).
+ // Combining those, entry(x) < entry(z) < exit(z) < exit(x) < entry(y) < exit(y).
+ // By supposition, entry(y) < entry(z), which allows cases y-dom-z and y-then-z.
+ // y-dom-z requires entry(y) < entry(z), but we have entry(z) < entry(y).
+ // y-then-z requires exit(y) < entry(z), but we have entry(z) < exit(y).
+ // We have a contradiction, so x does not dominate z, as required.
+ return t[x.ID].entry
+}
diff --git a/src/cmd/compile/internal/ssa/stackalloc.go b/src/cmd/compile/internal/ssa/stackalloc.go
new file mode 100644
index 0000000..d41f399
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/stackalloc.go
@@ -0,0 +1,472 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// TODO: live at start of block instead?
+
+package ssa
+
+import (
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+ "fmt"
+)
+
+type stackAllocState struct {
+ f *Func
+
+ // live is the output of stackalloc.
+ // live[b.id] = live values at the end of block b.
+ live [][]ID
+
+ // The following slices are reused across multiple users
+ // of stackAllocState.
+ values []stackValState
+ interfere [][]ID // interfere[v.id] = values that interfere with v.
+ names []LocalSlot
+ slots []int
+ used []bool
+
+ nArgSlot, // Number of Values sourced to arg slot
+ nNotNeed, // Number of Values not needing a stack slot
+ nNamedSlot, // Number of Values using a named stack slot
+ nReuse, // Number of values reusing a stack slot
+ nAuto, // Number of autos allocated for stack slots.
+ nSelfInterfere int32 // Number of self-interferences
+}
+
+func newStackAllocState(f *Func) *stackAllocState {
+ s := f.Cache.stackAllocState
+ if s == nil {
+ return new(stackAllocState)
+ }
+ if s.f != nil {
+ f.fe.Fatalf(src.NoXPos, "newStackAllocState called without previous free")
+ }
+ return s
+}
+
+func putStackAllocState(s *stackAllocState) {
+ for i := range s.values {
+ s.values[i] = stackValState{}
+ }
+ for i := range s.interfere {
+ s.interfere[i] = nil
+ }
+ for i := range s.names {
+ s.names[i] = LocalSlot{}
+ }
+ for i := range s.slots {
+ s.slots[i] = 0
+ }
+ for i := range s.used {
+ s.used[i] = false
+ }
+ s.f.Cache.stackAllocState = s
+ s.f = nil
+ s.live = nil
+ s.nArgSlot, s.nNotNeed, s.nNamedSlot, s.nReuse, s.nAuto, s.nSelfInterfere = 0, 0, 0, 0, 0, 0
+}
+
+type stackValState struct {
+ typ *types.Type
+ spill *Value
+ needSlot bool
+ isArg bool
+}
+
+// stackalloc allocates storage in the stack frame for
+// all Values that did not get a register.
+// Returns a map from block ID to the stack values live at the end of that block.
+func stackalloc(f *Func, spillLive [][]ID) [][]ID {
+ if f.pass.debug > stackDebug {
+ fmt.Println("before stackalloc")
+ fmt.Println(f.String())
+ }
+ s := newStackAllocState(f)
+ s.init(f, spillLive)
+ defer putStackAllocState(s)
+
+ s.stackalloc()
+ if f.pass.stats > 0 {
+ f.LogStat("stack_alloc_stats",
+ s.nArgSlot, "arg_slots", s.nNotNeed, "slot_not_needed",
+ s.nNamedSlot, "named_slots", s.nAuto, "auto_slots",
+ s.nReuse, "reused_slots", s.nSelfInterfere, "self_interfering")
+ }
+
+ return s.live
+}
+
+func (s *stackAllocState) init(f *Func, spillLive [][]ID) {
+ s.f = f
+
+ // Initialize value information.
+ if n := f.NumValues(); cap(s.values) >= n {
+ s.values = s.values[:n]
+ } else {
+ s.values = make([]stackValState, n)
+ }
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ s.values[v.ID].typ = v.Type
+ s.values[v.ID].needSlot = !v.Type.IsMemory() && !v.Type.IsVoid() && !v.Type.IsFlags() && f.getHome(v.ID) == nil && !v.rematerializeable() && !v.OnWasmStack
+ s.values[v.ID].isArg = hasAnyArgOp(v)
+ if f.pass.debug > stackDebug && s.values[v.ID].needSlot {
+ fmt.Printf("%s needs a stack slot\n", v)
+ }
+ if v.Op == OpStoreReg {
+ s.values[v.Args[0].ID].spill = v
+ }
+ }
+ }
+
+ // Compute liveness info for values needing a slot.
+ s.computeLive(spillLive)
+
+ // Build interference graph among values needing a slot.
+ s.buildInterferenceGraph()
+}
+
+func (s *stackAllocState) stackalloc() {
+ f := s.f
+
+ // Build map from values to their names, if any.
+ // A value may be associated with more than one name (e.g. after
+ // the assignment i=j). This step picks one name per value arbitrarily.
+ if n := f.NumValues(); cap(s.names) >= n {
+ s.names = s.names[:n]
+ } else {
+ s.names = make([]LocalSlot, n)
+ }
+ names := s.names
+ empty := LocalSlot{}
+ for _, name := range f.Names {
+ // Note: not "range f.NamedValues" above, because
+ // that would be nondeterministic.
+ for _, v := range f.NamedValues[*name] {
+ if v.Op == OpArgIntReg || v.Op == OpArgFloatReg {
+ aux := v.Aux.(*AuxNameOffset)
+ // Never let an arg be bound to a differently named thing.
+ if name.N != aux.Name || name.Off != aux.Offset {
+ if f.pass.debug > stackDebug {
+ fmt.Printf("stackalloc register arg %s skipping name %s\n", v, name)
+ }
+ continue
+ }
+ } else if name.N.Class == ir.PPARAM && v.Op != OpArg {
+ // PPARAM's only bind to OpArg
+ if f.pass.debug > stackDebug {
+ fmt.Printf("stackalloc PPARAM name %s skipping non-Arg %s\n", name, v)
+ }
+ continue
+ }
+
+ if names[v.ID] == empty {
+ if f.pass.debug > stackDebug {
+ fmt.Printf("stackalloc value %s to name %s\n", v, *name)
+ }
+ names[v.ID] = *name
+ }
+ }
+ }
+
+ // Allocate args to their assigned locations.
+ for _, v := range f.Entry.Values {
+ if !hasAnyArgOp(v) {
+ continue
+ }
+ if v.Aux == nil {
+ f.Fatalf("%s has nil Aux\n", v.LongString())
+ }
+ if v.Op == OpArg {
+ loc := LocalSlot{N: v.Aux.(*ir.Name), Type: v.Type, Off: v.AuxInt}
+ if f.pass.debug > stackDebug {
+ fmt.Printf("stackalloc OpArg %s to %s\n", v, loc)
+ }
+ f.setHome(v, loc)
+ continue
+ }
+ // You might think this below would be the right idea, but you would be wrong.
+ // It almost works; as of 105a6e9518 - 2021-04-23,
+ // GOSSAHASH=11011011001011111 == cmd/compile/internal/noder.(*noder).embedded
+ // is compiled incorrectly. I believe the cause is one of those SSA-to-registers
+ // puzzles that the register allocator untangles; in the event that a register
+ // parameter does not end up bound to a name, "fixing" it is a bad idea.
+ //
+ //if f.DebugTest {
+ // if v.Op == OpArgIntReg || v.Op == OpArgFloatReg {
+ // aux := v.Aux.(*AuxNameOffset)
+ // loc := LocalSlot{N: aux.Name, Type: v.Type, Off: aux.Offset}
+ // if f.pass.debug > stackDebug {
+ // fmt.Printf("stackalloc Op%s %s to %s\n", v.Op, v, loc)
+ // }
+ // names[v.ID] = loc
+ // continue
+ // }
+ //}
+
+ }
+
+ // For each type, we keep track of all the stack slots we
+ // have allocated for that type.
+ // TODO: share slots among equivalent types. We would need to
+ // only share among types with the same GC signature. See the
+ // type.Equal calls below for where this matters.
+ locations := map[*types.Type][]LocalSlot{}
+
+ // Each time we assign a stack slot to a value v, we remember
+ // the slot we used via an index into locations[v.Type].
+ slots := s.slots
+ if n := f.NumValues(); cap(slots) >= n {
+ slots = slots[:n]
+ } else {
+ slots = make([]int, n)
+ s.slots = slots
+ }
+ for i := range slots {
+ slots[i] = -1
+ }
+
+ // Pick a stack slot for each value needing one.
+ var used []bool
+ if n := f.NumValues(); cap(s.used) >= n {
+ used = s.used[:n]
+ } else {
+ used = make([]bool, n)
+ s.used = used
+ }
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ if !s.values[v.ID].needSlot {
+ s.nNotNeed++
+ continue
+ }
+ if hasAnyArgOp(v) {
+ s.nArgSlot++
+ continue // already picked
+ }
+
+ // If this is a named value, try to use the name as
+ // the spill location.
+ var name LocalSlot
+ if v.Op == OpStoreReg {
+ name = names[v.Args[0].ID]
+ } else {
+ name = names[v.ID]
+ }
+ if name.N != nil && v.Type.Compare(name.Type) == types.CMPeq {
+ for _, id := range s.interfere[v.ID] {
+ h := f.getHome(id)
+ if h != nil && h.(LocalSlot).N == name.N && h.(LocalSlot).Off == name.Off {
+ // A variable can interfere with itself.
+ // It is rare, but it can happen.
+ s.nSelfInterfere++
+ goto noname
+ }
+ }
+ if f.pass.debug > stackDebug {
+ fmt.Printf("stackalloc %s to %s\n", v, name)
+ }
+ s.nNamedSlot++
+ f.setHome(v, name)
+ continue
+ }
+
+ noname:
+ // Set of stack slots we could reuse.
+ locs := locations[v.Type]
+ // Mark all positions in locs used by interfering values.
+ for i := 0; i < len(locs); i++ {
+ used[i] = false
+ }
+ for _, xid := range s.interfere[v.ID] {
+ slot := slots[xid]
+ if slot >= 0 {
+ used[slot] = true
+ }
+ }
+ // Find an unused stack slot.
+ var i int
+ for i = 0; i < len(locs); i++ {
+ if !used[i] {
+ s.nReuse++
+ break
+ }
+ }
+ // If there is no unused stack slot, allocate a new one.
+ if i == len(locs) {
+ s.nAuto++
+ locs = append(locs, LocalSlot{N: f.fe.Auto(v.Pos, v.Type), Type: v.Type, Off: 0})
+ locations[v.Type] = locs
+ }
+ // Use the stack variable at that index for v.
+ loc := locs[i]
+ if f.pass.debug > stackDebug {
+ fmt.Printf("stackalloc %s to %s\n", v, loc)
+ }
+ f.setHome(v, loc)
+ slots[v.ID] = i
+ }
+ }
+}
+
+// computeLive computes a map from block ID to a list of
+// stack-slot-needing value IDs live at the end of that block.
+// TODO: this could be quadratic if lots of variables are live across lots of
+// basic blocks. Figure out a way to make this function (or, more precisely, the user
+// of this function) require only linear size & time.
+func (s *stackAllocState) computeLive(spillLive [][]ID) {
+ s.live = make([][]ID, s.f.NumBlocks())
+ var phis []*Value
+ live := s.f.newSparseSet(s.f.NumValues())
+ defer s.f.retSparseSet(live)
+ t := s.f.newSparseSet(s.f.NumValues())
+ defer s.f.retSparseSet(t)
+
+ // Instead of iterating over f.Blocks, iterate over their postordering.
+ // Liveness information flows backward, so starting at the end
+ // increases the probability that we will stabilize quickly.
+ po := s.f.postorder()
+ for {
+ changed := false
+ for _, b := range po {
+ // Start with known live values at the end of the block
+ live.clear()
+ live.addAll(s.live[b.ID])
+
+ // Propagate backwards to the start of the block
+ phis = phis[:0]
+ for i := len(b.Values) - 1; i >= 0; i-- {
+ v := b.Values[i]
+ live.remove(v.ID)
+ if v.Op == OpPhi {
+ // Save phi for later.
+ // Note: its args might need a stack slot even though
+ // the phi itself doesn't. So don't use needSlot.
+ if !v.Type.IsMemory() && !v.Type.IsVoid() {
+ phis = append(phis, v)
+ }
+ continue
+ }
+ for _, a := range v.Args {
+ if s.values[a.ID].needSlot {
+ live.add(a.ID)
+ }
+ }
+ }
+
+ // for each predecessor of b, expand its list of live-at-end values
+ // invariant: s contains the values live at the start of b (excluding phi inputs)
+ for i, e := range b.Preds {
+ p := e.b
+ t.clear()
+ t.addAll(s.live[p.ID])
+ t.addAll(live.contents())
+ t.addAll(spillLive[p.ID])
+ for _, v := range phis {
+ a := v.Args[i]
+ if s.values[a.ID].needSlot {
+ t.add(a.ID)
+ }
+ if spill := s.values[a.ID].spill; spill != nil {
+ //TODO: remove? Subsumed by SpillUse?
+ t.add(spill.ID)
+ }
+ }
+ if t.size() == len(s.live[p.ID]) {
+ continue
+ }
+ // grow p's live set
+ s.live[p.ID] = append(s.live[p.ID][:0], t.contents()...)
+ changed = true
+ }
+ }
+
+ if !changed {
+ break
+ }
+ }
+ if s.f.pass.debug > stackDebug {
+ for _, b := range s.f.Blocks {
+ fmt.Printf("stacklive %s %v\n", b, s.live[b.ID])
+ }
+ }
+}
+
+func (f *Func) getHome(vid ID) Location {
+ if int(vid) >= len(f.RegAlloc) {
+ return nil
+ }
+ return f.RegAlloc[vid]
+}
+
+func (f *Func) setHome(v *Value, loc Location) {
+ for v.ID >= ID(len(f.RegAlloc)) {
+ f.RegAlloc = append(f.RegAlloc, nil)
+ }
+ f.RegAlloc[v.ID] = loc
+}
+
+func (s *stackAllocState) buildInterferenceGraph() {
+ f := s.f
+ if n := f.NumValues(); cap(s.interfere) >= n {
+ s.interfere = s.interfere[:n]
+ } else {
+ s.interfere = make([][]ID, n)
+ }
+ live := f.newSparseSet(f.NumValues())
+ defer f.retSparseSet(live)
+ for _, b := range f.Blocks {
+ // Propagate liveness backwards to the start of the block.
+ // Two values interfere if one is defined while the other is live.
+ live.clear()
+ live.addAll(s.live[b.ID])
+ for i := len(b.Values) - 1; i >= 0; i-- {
+ v := b.Values[i]
+ if s.values[v.ID].needSlot {
+ live.remove(v.ID)
+ for _, id := range live.contents() {
+ // Note: args can have different types and still interfere
+ // (with each other or with other values). See issue 23522.
+ if s.values[v.ID].typ.Compare(s.values[id].typ) == types.CMPeq || hasAnyArgOp(v) || s.values[id].isArg {
+ s.interfere[v.ID] = append(s.interfere[v.ID], id)
+ s.interfere[id] = append(s.interfere[id], v.ID)
+ }
+ }
+ }
+ for _, a := range v.Args {
+ if s.values[a.ID].needSlot {
+ live.add(a.ID)
+ }
+ }
+ if hasAnyArgOp(v) && s.values[v.ID].needSlot {
+ // OpArg is an input argument which is pre-spilled.
+ // We add back v.ID here because we want this value
+ // to appear live even before this point. Being live
+ // all the way to the start of the entry block prevents other
+ // values from being allocated to the same slot and clobbering
+ // the input value before we have a chance to load it.
+
+ // TODO(register args) this is apparently not wrong for register args -- is it necessary?
+ live.add(v.ID)
+ }
+ }
+ }
+ if f.pass.debug > stackDebug {
+ for vid, i := range s.interfere {
+ if len(i) > 0 {
+ fmt.Printf("v%d interferes with", vid)
+ for _, x := range i {
+ fmt.Printf(" v%d", x)
+ }
+ fmt.Println()
+ }
+ }
+ }
+}
+
+func hasAnyArgOp(v *Value) bool {
+ return v.Op == OpArg || v.Op == OpArgIntReg || v.Op == OpArgFloatReg
+}
diff --git a/src/cmd/compile/internal/ssa/stackframe.go b/src/cmd/compile/internal/ssa/stackframe.go
new file mode 100644
index 0000000..08be62a
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/stackframe.go
@@ -0,0 +1,10 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// stackframe calls back into the frontend to assign frame offsets.
+func stackframe(f *Func) {
+ f.fe.AllocFrame(f)
+}
diff --git a/src/cmd/compile/internal/ssa/stmtlines_test.go b/src/cmd/compile/internal/ssa/stmtlines_test.go
new file mode 100644
index 0000000..088f980
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/stmtlines_test.go
@@ -0,0 +1,141 @@
+package ssa_test
+
+import (
+ cmddwarf "cmd/internal/dwarf"
+ "cmd/internal/quoted"
+ "debug/dwarf"
+ "debug/elf"
+ "debug/macho"
+ "debug/pe"
+ "fmt"
+ "internal/testenv"
+ "internal/xcoff"
+ "io"
+ "os"
+ "runtime"
+ "sort"
+ "testing"
+)
+
+func open(path string) (*dwarf.Data, error) {
+ if fh, err := elf.Open(path); err == nil {
+ return fh.DWARF()
+ }
+
+ if fh, err := pe.Open(path); err == nil {
+ return fh.DWARF()
+ }
+
+ if fh, err := macho.Open(path); err == nil {
+ return fh.DWARF()
+ }
+
+ if fh, err := xcoff.Open(path); err == nil {
+ return fh.DWARF()
+ }
+
+ return nil, fmt.Errorf("unrecognized executable format")
+}
+
+func must(err error) {
+ if err != nil {
+ panic(err)
+ }
+}
+
+type Line struct {
+ File string
+ Line int
+}
+
+func TestStmtLines(t *testing.T) {
+ if runtime.GOOS == "plan9" {
+ t.Skip("skipping on plan9; no DWARF symbol table in executables")
+ }
+
+ if runtime.GOOS == "aix" {
+ extld := os.Getenv("CC")
+ if extld == "" {
+ extld = "gcc"
+ }
+ extldArgs, err := quoted.Split(extld)
+ if err != nil {
+ t.Fatal(err)
+ }
+ enabled, err := cmddwarf.IsDWARFEnabledOnAIXLd(extldArgs)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if !enabled {
+ t.Skip("skipping on aix: no DWARF with ld version < 7.2.2 ")
+ }
+ }
+
+ lines := map[Line]bool{}
+ dw, err := open(testenv.GoToolPath(t))
+ must(err)
+ rdr := dw.Reader()
+ rdr.Seek(0)
+ for {
+ e, err := rdr.Next()
+ must(err)
+ if e == nil {
+ break
+ }
+ if e.Tag != dwarf.TagCompileUnit {
+ continue
+ }
+ pkgname, _ := e.Val(dwarf.AttrName).(string)
+ if pkgname == "runtime" {
+ continue
+ }
+ if pkgname == "crypto/elliptic/internal/fiat" {
+ continue // golang.org/issue/49372
+ }
+ if e.Val(dwarf.AttrStmtList) == nil {
+ continue
+ }
+ lrdr, err := dw.LineReader(e)
+ must(err)
+
+ var le dwarf.LineEntry
+
+ for {
+ err := lrdr.Next(&le)
+ if err == io.EOF {
+ break
+ }
+ must(err)
+ fl := Line{le.File.Name, le.Line}
+ lines[fl] = lines[fl] || le.IsStmt
+ }
+ }
+
+ nonStmtLines := []Line{}
+ for line, isstmt := range lines {
+ if !isstmt {
+ nonStmtLines = append(nonStmtLines, line)
+ }
+ }
+
+ if runtime.GOARCH == "amd64" {
+ if len(nonStmtLines)*100 > len(lines) { // > 99% obtained on amd64, no backsliding
+ t.Errorf("Saw too many (amd64, > 1%%) lines without statement marks, total=%d, nostmt=%d ('-run TestStmtLines -v' lists failing lines)\n", len(lines), len(nonStmtLines))
+ }
+ } else if len(nonStmtLines)*100 > 2*len(lines) { // expect 98% elsewhere.
+ t.Errorf("Saw too many (not amd64, > 2%%) lines without statement marks, total=%d, nostmt=%d ('-run TestStmtLines -v' lists failing lines)\n", len(lines), len(nonStmtLines))
+ }
+ t.Logf("Saw %d out of %d lines without statement marks", len(nonStmtLines), len(lines))
+ if testing.Verbose() {
+ sort.Slice(nonStmtLines, func(i, j int) bool {
+ if nonStmtLines[i].File != nonStmtLines[j].File {
+ return nonStmtLines[i].File < nonStmtLines[j].File
+ }
+ return nonStmtLines[i].Line < nonStmtLines[j].Line
+ })
+ for _, l := range nonStmtLines {
+ t.Logf("%s:%d has no DWARF is_stmt mark\n", l.File, l.Line)
+ }
+ }
+ t.Logf("total=%d, nostmt=%d\n", len(lines), len(nonStmtLines))
+}
diff --git a/src/cmd/compile/internal/ssa/testdata/convertline.go b/src/cmd/compile/internal/ssa/testdata/convertline.go
new file mode 100644
index 0000000..08f3ae8
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/testdata/convertline.go
@@ -0,0 +1,16 @@
+package main
+
+import "fmt"
+
+func F[T any](n T) {
+ fmt.Printf("called\n")
+}
+
+func G[T any](n T) {
+ F(n)
+ fmt.Printf("after\n")
+}
+
+func main() {
+ G(3)
+}
diff --git a/src/cmd/compile/internal/ssa/testdata/hist.dlv-dbg.nexts b/src/cmd/compile/internal/ssa/testdata/hist.dlv-dbg.nexts
new file mode 100644
index 0000000..a0404e4
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/testdata/hist.dlv-dbg.nexts
@@ -0,0 +1,99 @@
+ ./testdata/hist.go
+55: func test() {
+57: l := line{point{1 + zero, 2 + zero}, point{3 + zero, 4 + zero}}
+58: tinycall() // this forces l etc to stack
+59: dx := l.end.x - l.begin.x //gdb-dbg=(l.begin.x,l.end.y)//gdb-opt=(l,dx/O,dy/O)
+60: dy := l.end.y - l.begin.y //gdb-opt=(dx,dy/O)
+61: sink = dx + dy //gdb-opt=(dx,dy)
+63: hist := make([]int, 7) //gdb-opt=(dx/O,dy/O) // TODO sink is missing if this code is in 'test' instead of 'main'
+64: var reader io.Reader = strings.NewReader(cannedInput) //gdb-dbg=(hist/A) // TODO cannedInput/A is missing if this code is in 'test' instead of 'main'
+65: if len(os.Args) > 1 {
+73: scanner := bufio.NewScanner(reader)
+74: for scanner.Scan() { //gdb-opt=(scanner/A)
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+74: for scanner.Scan() { //gdb-opt=(scanner/A)
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+74: for scanner.Scan() { //gdb-opt=(scanner/A)
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+74: for scanner.Scan() { //gdb-opt=(scanner/A)
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+74: for scanner.Scan() { //gdb-opt=(scanner/A)
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+74: for scanner.Scan() { //gdb-opt=(scanner/A)
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+74: for scanner.Scan() { //gdb-opt=(scanner/A)
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+74: for scanner.Scan() { //gdb-opt=(scanner/A)
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+74: for scanner.Scan() { //gdb-opt=(scanner/A)
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+74: for scanner.Scan() { //gdb-opt=(scanner/A)
+84: t := 0
+85: n := 0
+86: for i, a := range hist {
+87: if a == 0 { //gdb-opt=(a,n,t)
+88: continue
+86: for i, a := range hist {
+87: if a == 0 { //gdb-opt=(a,n,t)
+90: t += i * a
+91: n += a
+92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t)
+86: for i, a := range hist {
+87: if a == 0 { //gdb-opt=(a,n,t)
+90: t += i * a
+91: n += a
+92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t)
+86: for i, a := range hist {
+87: if a == 0 { //gdb-opt=(a,n,t)
+88: continue
+86: for i, a := range hist {
+87: if a == 0 { //gdb-opt=(a,n,t)
+90: t += i * a
+91: n += a
+92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t)
+86: for i, a := range hist {
+87: if a == 0 { //gdb-opt=(a,n,t)
+90: t += i * a
+91: n += a
+92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t)
+86: for i, a := range hist {
+87: if a == 0 { //gdb-opt=(a,n,t)
+88: continue
+86: for i, a := range hist {
+99: }
diff --git a/src/cmd/compile/internal/ssa/testdata/hist.dlv-opt.nexts b/src/cmd/compile/internal/ssa/testdata/hist.dlv-opt.nexts
new file mode 100644
index 0000000..2be83ce
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/testdata/hist.dlv-opt.nexts
@@ -0,0 +1,94 @@
+ ./testdata/hist.go
+55: func test() {
+57: l := line{point{1 + zero, 2 + zero}, point{3 + zero, 4 + zero}}
+58: tinycall() // this forces l etc to stack
+59: dx := l.end.x - l.begin.x //gdb-dbg=(l.begin.x,l.end.y)//gdb-opt=(l,dx/O,dy/O)
+60: dy := l.end.y - l.begin.y //gdb-opt=(dx,dy/O)
+61: sink = dx + dy //gdb-opt=(dx,dy)
+63: hist := make([]int, 7) //gdb-opt=(dx/O,dy/O) // TODO sink is missing if this code is in 'test' instead of 'main'
+64: var reader io.Reader = strings.NewReader(cannedInput) //gdb-dbg=(hist/A) // TODO cannedInput/A is missing if this code is in 'test' instead of 'main'
+65: if len(os.Args) > 1 {
+73: scanner := bufio.NewScanner(reader)
+74: for scanner.Scan() { //gdb-opt=(scanner/A)
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+74: for scanner.Scan() { //gdb-opt=(scanner/A)
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+74: for scanner.Scan() { //gdb-opt=(scanner/A)
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+74: for scanner.Scan() { //gdb-opt=(scanner/A)
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+74: for scanner.Scan() { //gdb-opt=(scanner/A)
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+74: for scanner.Scan() { //gdb-opt=(scanner/A)
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+74: for scanner.Scan() { //gdb-opt=(scanner/A)
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+74: for scanner.Scan() { //gdb-opt=(scanner/A)
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+74: for scanner.Scan() { //gdb-opt=(scanner/A)
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+74: for scanner.Scan() { //gdb-opt=(scanner/A)
+86: for i, a := range hist {
+87: if a == 0 { //gdb-opt=(a,n,t)
+86: for i, a := range hist {
+87: if a == 0 { //gdb-opt=(a,n,t)
+92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t)
+91: n += a
+90: t += i * a
+86: for i, a := range hist {
+87: if a == 0 { //gdb-opt=(a,n,t)
+92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t)
+91: n += a
+90: t += i * a
+86: for i, a := range hist {
+87: if a == 0 { //gdb-opt=(a,n,t)
+86: for i, a := range hist {
+87: if a == 0 { //gdb-opt=(a,n,t)
+92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t)
+91: n += a
+90: t += i * a
+86: for i, a := range hist {
+87: if a == 0 { //gdb-opt=(a,n,t)
+92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t)
+91: n += a
+90: t += i * a
+86: for i, a := range hist {
+87: if a == 0 { //gdb-opt=(a,n,t)
+86: for i, a := range hist {
+99: }
diff --git a/src/cmd/compile/internal/ssa/testdata/hist.gdb-dbg.nexts b/src/cmd/compile/internal/ssa/testdata/hist.gdb-dbg.nexts
new file mode 100644
index 0000000..72df60c
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/testdata/hist.gdb-dbg.nexts
@@ -0,0 +1,123 @@
+ src/cmd/compile/internal/ssa/testdata/hist.go
+55: func test() {
+57: l := line{point{1 + zero, 2 + zero}, point{3 + zero, 4 + zero}}
+58: tinycall() // this forces l etc to stack
+59: dx := l.end.x - l.begin.x //gdb-dbg=(l.begin.x,l.end.y)//gdb-opt=(l,dx/O,dy/O)
+l.begin.x = 1
+l.end.y = 4
+60: dy := l.end.y - l.begin.y //gdb-opt=(dx,dy/O)
+61: sink = dx + dy //gdb-opt=(dx,dy)
+63: hist := make([]int, 7) //gdb-opt=(dx/O,dy/O) // TODO sink is missing if this code is in 'test' instead of 'main'
+64: var reader io.Reader = strings.NewReader(cannedInput) //gdb-dbg=(hist/A) // TODO cannedInput/A is missing if this code is in 'test' instead of 'main'
+hist = {array = <A>, len = 7, cap = 7}
+65: if len(os.Args) > 1 {
+73: scanner := bufio.NewScanner(reader)
+74: for scanner.Scan() { //gdb-opt=(scanner/A)
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+i = 1
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+74: for scanner.Scan() { //gdb-opt=(scanner/A)
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+i = 1
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+74: for scanner.Scan() { //gdb-opt=(scanner/A)
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+i = 1
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+74: for scanner.Scan() { //gdb-opt=(scanner/A)
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+i = 2
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+74: for scanner.Scan() { //gdb-opt=(scanner/A)
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+i = 2
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+74: for scanner.Scan() { //gdb-opt=(scanner/A)
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+i = 2
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+74: for scanner.Scan() { //gdb-opt=(scanner/A)
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+i = 4
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+74: for scanner.Scan() { //gdb-opt=(scanner/A)
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+i = 4
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+74: for scanner.Scan() { //gdb-opt=(scanner/A)
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+i = 5
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+74: for scanner.Scan() { //gdb-opt=(scanner/A)
+84: t := 0
+85: n := 0
+86: for i, a := range hist {
+87: if a == 0 { //gdb-opt=(a,n,t)
+88: continue
+86: for i, a := range hist {
+87: if a == 0 { //gdb-opt=(a,n,t)
+90: t += i * a
+91: n += a
+92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t)
+n = 3
+i = 1
+t = 3
+86: for i, a := range hist {
+87: if a == 0 { //gdb-opt=(a,n,t)
+90: t += i * a
+91: n += a
+92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t)
+n = 6
+i = 2
+t = 9
+86: for i, a := range hist {
+87: if a == 0 { //gdb-opt=(a,n,t)
+88: continue
+86: for i, a := range hist {
+87: if a == 0 { //gdb-opt=(a,n,t)
+90: t += i * a
+91: n += a
+92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t)
+n = 8
+i = 4
+t = 17
+86: for i, a := range hist {
+87: if a == 0 { //gdb-opt=(a,n,t)
+90: t += i * a
+91: n += a
+92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t)
+n = 9
+i = 5
+t = 22
+86: for i, a := range hist {
+87: if a == 0 { //gdb-opt=(a,n,t)
+88: continue
+86: for i, a := range hist {
+99: }
diff --git a/src/cmd/compile/internal/ssa/testdata/hist.gdb-opt.nexts b/src/cmd/compile/internal/ssa/testdata/hist.gdb-opt.nexts
new file mode 100644
index 0000000..d3a34ac
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/testdata/hist.gdb-opt.nexts
@@ -0,0 +1,143 @@
+ src/cmd/compile/internal/ssa/testdata/hist.go
+55: func test() {
+57: l := line{point{1 + zero, 2 + zero}, point{3 + zero, 4 + zero}}
+58: tinycall() // this forces l etc to stack
+59: dx := l.end.x - l.begin.x //gdb-dbg=(l.begin.x,l.end.y)//gdb-opt=(l,dx/O,dy/O)
+l = {begin = {x = 1, y = 2}, end = {x = 3, y = 4}}
+dx = <Optimized out, as expected>
+dy = <Optimized out, as expected>
+60: dy := l.end.y - l.begin.y //gdb-opt=(dx,dy/O)
+dx = 2
+dy = <Optimized out, as expected>
+61: sink = dx + dy //gdb-opt=(dx,dy)
+dx = 2
+dy = 2
+63: hist := make([]int, 7) //gdb-opt=(dx/O,dy/O) // TODO sink is missing if this code is in 'test' instead of 'main'
+dx = 2
+dy = <Optimized out, as expected>
+64: var reader io.Reader = strings.NewReader(cannedInput) //gdb-dbg=(hist/A) // TODO cannedInput/A is missing if this code is in 'test' instead of 'main'
+65: if len(os.Args) > 1 {
+73: scanner := bufio.NewScanner(reader)
+74: for scanner.Scan() { //gdb-opt=(scanner/A)
+scanner = (bufio.Scanner *) <A>
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+err = {tab = 0x0, data = 0x0}
+hist = {array = 0xc00005ae50, len = 7, cap = 7}
+i = 1
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+err = {tab = 0x0, data = 0x0}
+hist = {array = 0xc00005ae50, len = 7, cap = 7}
+i = 1
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+err = {tab = 0x0, data = 0x0}
+hist = {array = 0xc00005ae50, len = 7, cap = 7}
+i = 1
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+err = {tab = 0x0, data = 0x0}
+hist = {array = 0xc00005ae50, len = 7, cap = 7}
+i = 2
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+err = {tab = 0x0, data = 0x0}
+hist = {array = 0xc00005ae50, len = 7, cap = 7}
+i = 2
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+err = {tab = 0x0, data = 0x0}
+hist = {array = 0xc00005ae50, len = 7, cap = 7}
+i = 2
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+err = {tab = 0x0, data = 0x0}
+hist = {array = 0xc00005ae50, len = 7, cap = 7}
+i = 4
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+err = {tab = 0x0, data = 0x0}
+hist = {array = 0xc00005ae50, len = 7, cap = 7}
+i = 4
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+75: s := scanner.Text()
+76: i, err := strconv.ParseInt(s, 10, 64)
+77: if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+err = {tab = 0x0, data = 0x0}
+hist = {array = 0xc00005ae50, len = 7, cap = 7}
+i = 5
+81: hist = ensure(int(i), hist)
+82: hist[int(i)]++
+86: for i, a := range hist {
+87: if a == 0 { //gdb-opt=(a,n,t)
+a = 0
+n = 0
+t = 0
+86: for i, a := range hist {
+87: if a == 0 { //gdb-opt=(a,n,t)
+a = 3
+n = 0
+t = 0
+92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t)
+91: n += a
+90: t += i * a
+86: for i, a := range hist {
+87: if a == 0 { //gdb-opt=(a,n,t)
+a = 3
+n = 3
+t = 3
+92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t)
+91: n += a
+90: t += i * a
+86: for i, a := range hist {
+87: if a == 0 { //gdb-opt=(a,n,t)
+a = 0
+n = 6
+t = 9
+86: for i, a := range hist {
+87: if a == 0 { //gdb-opt=(a,n,t)
+a = 2
+n = 6
+t = 9
+92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t)
+91: n += a
+90: t += i * a
+86: for i, a := range hist {
+87: if a == 0 { //gdb-opt=(a,n,t)
+a = 1
+n = 8
+t = 17
+92: fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t)
+91: n += a
+90: t += i * a
+86: for i, a := range hist {
+87: if a == 0 { //gdb-opt=(a,n,t)
+a = 0
+n = 9
+t = 22
+86: for i, a := range hist {
+99: }
diff --git a/src/cmd/compile/internal/ssa/testdata/hist.go b/src/cmd/compile/internal/ssa/testdata/hist.go
new file mode 100644
index 0000000..f8fa6e6
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/testdata/hist.go
@@ -0,0 +1,106 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This is the input program for an end-to-end test of the DWARF produced
+// by the compiler. It is compiled with various flags, then the resulting
+// binary is "debugged" under the control of a harness. Because the compile+debug
+// step is time-consuming, the tests for different bugs are all accumulated here
+// so that their cost is only the time to "n" through the additional code.
+
+package main
+
+import (
+ "bufio"
+ "fmt"
+ "io"
+ "os"
+ "strconv"
+ "strings"
+)
+
+type point struct {
+ x, y int
+}
+
+type line struct {
+ begin, end point
+}
+
+var zero int
+var sink int
+
+//go:noinline
+func tinycall() {
+}
+
+func ensure(n int, sl []int) []int {
+ for len(sl) <= n {
+ sl = append(sl, 0)
+ }
+ return sl
+}
+
+var cannedInput string = `1
+1
+1
+2
+2
+2
+4
+4
+5
+`
+
+func test() {
+ // For #19868
+ l := line{point{1 + zero, 2 + zero}, point{3 + zero, 4 + zero}}
+ tinycall() // this forces l etc to stack
+ dx := l.end.x - l.begin.x //gdb-dbg=(l.begin.x,l.end.y)//gdb-opt=(l,dx/O,dy/O)
+ dy := l.end.y - l.begin.y //gdb-opt=(dx,dy/O)
+ sink = dx + dy //gdb-opt=(dx,dy)
+ // For #21098
+ hist := make([]int, 7) //gdb-opt=(dx/O,dy/O) // TODO sink is missing if this code is in 'test' instead of 'main'
+ var reader io.Reader = strings.NewReader(cannedInput) //gdb-dbg=(hist/A) // TODO cannedInput/A is missing if this code is in 'test' instead of 'main'
+ if len(os.Args) > 1 {
+ var err error
+ reader, err = os.Open(os.Args[1])
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "There was an error opening %s: %v\n", os.Args[1], err)
+ return
+ }
+ }
+ scanner := bufio.NewScanner(reader)
+ for scanner.Scan() { //gdb-opt=(scanner/A)
+ s := scanner.Text()
+ i, err := strconv.ParseInt(s, 10, 64)
+ if err != nil { //gdb-dbg=(i) //gdb-opt=(err,hist,i)
+ fmt.Fprintf(os.Stderr, "There was an error: %v\n", err)
+ return
+ }
+ hist = ensure(int(i), hist)
+ hist[int(i)]++
+ }
+ t := 0
+ n := 0
+ for i, a := range hist {
+ if a == 0 { //gdb-opt=(a,n,t)
+ continue
+ }
+ t += i * a
+ n += a
+ fmt.Fprintf(os.Stderr, "%d\t%d\t%d\t%d\t%d\n", i, a, n, i*a, t) //gdb-dbg=(n,i,t)
+ }
+}
+
+func main() {
+ growstack() // Use stack early to prevent growth during test, which confuses gdb
+ test()
+}
+
+var snk string
+
+//go:noinline
+func growstack() {
+ snk = fmt.Sprintf("%#v,%#v,%#v", 1, true, "cat")
+}
diff --git a/src/cmd/compile/internal/ssa/testdata/i22558.dlv-dbg.nexts b/src/cmd/compile/internal/ssa/testdata/i22558.dlv-dbg.nexts
new file mode 100644
index 0000000..a00934b
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/testdata/i22558.dlv-dbg.nexts
@@ -0,0 +1,11 @@
+ ./testdata/i22558.go
+19: func test(t *thing, u *thing) {
+20: if t.next != nil {
+23: fmt.Fprintf(os.Stderr, "%s\n", t.name)
+24: u.self = u
+25: t.self = t
+26: t.next = u
+27: for _, p := range t.stuff {
+28: if isFoo(t, p) {
+29: return
+44: }
diff --git a/src/cmd/compile/internal/ssa/testdata/i22558.gdb-dbg.nexts b/src/cmd/compile/internal/ssa/testdata/i22558.gdb-dbg.nexts
new file mode 100644
index 0000000..70dfa07
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/testdata/i22558.gdb-dbg.nexts
@@ -0,0 +1,11 @@
+ src/cmd/compile/internal/ssa/testdata/i22558.go
+19: func test(t *thing, u *thing) {
+20: if t.next != nil {
+23: fmt.Fprintf(os.Stderr, "%s\n", t.name)
+24: u.self = u
+25: t.self = t
+26: t.next = u
+27: for _, p := range t.stuff {
+28: if isFoo(t, p) {
+29: return
+44: }
diff --git a/src/cmd/compile/internal/ssa/testdata/i22558.go b/src/cmd/compile/internal/ssa/testdata/i22558.go
new file mode 100644
index 0000000..8aea76c
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/testdata/i22558.go
@@ -0,0 +1,51 @@
+package main
+
+import (
+ "fmt"
+ "os"
+)
+
+type big struct {
+ pile [768]int8
+}
+
+type thing struct {
+ name string
+ next *thing
+ self *thing
+ stuff []big
+}
+
+func test(t *thing, u *thing) {
+ if t.next != nil {
+ return
+ }
+ fmt.Fprintf(os.Stderr, "%s\n", t.name)
+ u.self = u
+ t.self = t
+ t.next = u
+ for _, p := range t.stuff {
+ if isFoo(t, p) {
+ return
+ }
+ }
+}
+
+//go:noinline
+func isFoo(t *thing, b big) bool {
+ return true
+}
+
+func main() {
+ growstack() // Use stack early to prevent growth during test, which confuses gdb
+ t := &thing{name: "t", self: nil, next: nil, stuff: make([]big, 1)}
+ u := thing{name: "u", self: t, next: t, stuff: make([]big, 1)}
+ test(t, &u)
+}
+
+var snk string
+
+//go:noinline
+func growstack() {
+ snk = fmt.Sprintf("%#v,%#v,%#v", 1, true, "cat")
+}
diff --git a/src/cmd/compile/internal/ssa/testdata/i22600.dlv-dbg-race.nexts b/src/cmd/compile/internal/ssa/testdata/i22600.dlv-dbg-race.nexts
new file mode 100644
index 0000000..18a5ff9
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/testdata/i22600.dlv-dbg-race.nexts
@@ -0,0 +1,7 @@
+ ./testdata/i22600.go
+8: func test() {
+9: pwd, err := os.Getwd()
+10: if err != nil {
+14: fmt.Println(pwd)
+15: }
+20: }
diff --git a/src/cmd/compile/internal/ssa/testdata/i22600.gdb-dbg-race.nexts b/src/cmd/compile/internal/ssa/testdata/i22600.gdb-dbg-race.nexts
new file mode 100644
index 0000000..46285e2
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/testdata/i22600.gdb-dbg-race.nexts
@@ -0,0 +1,7 @@
+ src/cmd/compile/internal/ssa/testdata/i22600.go
+8: func test() {
+9: pwd, err := os.Getwd()
+10: if err != nil {
+14: fmt.Println(pwd)
+15: }
+20: }
diff --git a/src/cmd/compile/internal/ssa/testdata/i22600.go b/src/cmd/compile/internal/ssa/testdata/i22600.go
new file mode 100644
index 0000000..27f0d3d
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/testdata/i22600.go
@@ -0,0 +1,27 @@
+package main
+
+import (
+ "fmt"
+ "os"
+)
+
+func test() {
+ pwd, err := os.Getwd()
+ if err != nil {
+ fmt.Println(err)
+ os.Exit(1)
+ }
+ fmt.Println(pwd)
+}
+
+func main() {
+ growstack() // Use stack early to prevent growth during test, which confuses gdb
+ test()
+}
+
+var snk string
+
+//go:noinline
+func growstack() {
+ snk = fmt.Sprintf("%#v,%#v,%#v", 1, true, "cat")
+}
diff --git a/src/cmd/compile/internal/ssa/testdata/infloop.dlv-opt.nexts b/src/cmd/compile/internal/ssa/testdata/infloop.dlv-opt.nexts
new file mode 100644
index 0000000..0b9f06f
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/testdata/infloop.dlv-opt.nexts
@@ -0,0 +1,12 @@
+ ./testdata/infloop.go
+6: func test() {
+8: go func() {}()
+10: for {
+1: package main
+10: for {
+1: package main
+10: for {
+1: package main
+10: for {
+1: package main
+10: for {
diff --git a/src/cmd/compile/internal/ssa/testdata/infloop.gdb-opt.nexts b/src/cmd/compile/internal/ssa/testdata/infloop.gdb-opt.nexts
new file mode 100644
index 0000000..d465ad1
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/testdata/infloop.gdb-opt.nexts
@@ -0,0 +1,4 @@
+ src/cmd/compile/internal/ssa/testdata/infloop.go
+6: func test() {
+8: go func() {}()
+10: for {
diff --git a/src/cmd/compile/internal/ssa/testdata/infloop.go b/src/cmd/compile/internal/ssa/testdata/infloop.go
new file mode 100644
index 0000000..cdb374f
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/testdata/infloop.go
@@ -0,0 +1,16 @@
+package main
+
+var sink int
+
+//go:noinline
+func test() {
+ // This is for #30167, incorrect line numbers in an infinite loop
+ go func() {}()
+
+ for {
+ }
+}
+
+func main() {
+ test()
+}
diff --git a/src/cmd/compile/internal/ssa/testdata/inline-dump.go b/src/cmd/compile/internal/ssa/testdata/inline-dump.go
new file mode 100644
index 0000000..97893b6
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/testdata/inline-dump.go
@@ -0,0 +1,17 @@
+package foo
+
+func f(m, n int) int {
+ a := g(n)
+ b := g(m)
+ return a + b
+}
+
+func g(x int) int {
+ y := h(x + 1)
+ z := h(x - 1)
+ return y + z
+}
+
+func h(x int) int {
+ return x * x
+}
diff --git a/src/cmd/compile/internal/ssa/testdata/pushback.go b/src/cmd/compile/internal/ssa/testdata/pushback.go
new file mode 100644
index 0000000..754e6cb
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/testdata/pushback.go
@@ -0,0 +1,30 @@
+package main
+
+type Node struct {
+ Circular bool
+}
+
+type ExtNode[V any] struct {
+ v V
+ Node
+}
+
+type List[V any] struct {
+ root *ExtNode[V]
+ len int
+}
+
+func (list *List[V]) PushBack(arg V) {
+ if list.len == 0 {
+ list.root = &ExtNode[V]{v: arg}
+ list.root.Circular = true
+ list.len++
+ return
+ }
+ list.len++
+}
+
+func main() {
+ var v List[int]
+ v.PushBack(1)
+}
diff --git a/src/cmd/compile/internal/ssa/testdata/sayhi.go b/src/cmd/compile/internal/ssa/testdata/sayhi.go
new file mode 100644
index 0000000..680e1eb
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/testdata/sayhi.go
@@ -0,0 +1,12 @@
+package foo
+
+import (
+ "fmt"
+ "sync"
+)
+
+func sayhi(n int, wg *sync.WaitGroup) {
+ fmt.Println("hi", n)
+ fmt.Println("hi", n)
+ wg.Done()
+}
diff --git a/src/cmd/compile/internal/ssa/testdata/scopes.dlv-dbg.nexts b/src/cmd/compile/internal/ssa/testdata/scopes.dlv-dbg.nexts
new file mode 100644
index 0000000..f182ff4
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/testdata/scopes.dlv-dbg.nexts
@@ -0,0 +1,56 @@
+ ./testdata/scopes.go
+22: func test() {
+23: x := id(0)
+24: y := id(0)
+25: fmt.Println(x)
+26: for i := x; i < 3; i++ {
+27: x := i * i
+28: y += id(x) //gdb-dbg=(x,y)//gdb-opt=(x,y)
+26: for i := x; i < 3; i++ {
+27: x := i * i
+28: y += id(x) //gdb-dbg=(x,y)//gdb-opt=(x,y)
+26: for i := x; i < 3; i++ {
+27: x := i * i
+28: y += id(x) //gdb-dbg=(x,y)//gdb-opt=(x,y)
+26: for i := x; i < 3; i++ {
+30: y = x + y //gdb-dbg=(x,y)//gdb-opt=(x,y)
+31: fmt.Println(x, y)
+33: for x := 0; x <= 1; x++ { // From delve scopetest.go
+34: a := y
+35: f1(a)
+37: b := 0
+38: f2(b)
+39: if gretbool() {
+40: c := 0
+41: f3(c)
+46: f5(b)
+48: f6(a)
+33: for x := 0; x <= 1; x++ { // From delve scopetest.go
+34: a := y
+35: f1(a)
+37: b := 0
+38: f2(b)
+39: if gretbool() {
+43: c := 1.1
+44: f4(int(c))
+46: f5(b)
+48: f6(a)
+33: for x := 0; x <= 1; x++ { // From delve scopetest.go
+53: j = id(1)
+54: f = id(2)
+56: for i := 0; i <= 5; i++ {
+57: j += j * (j ^ 3) / 100
+58: if i == f {
+62: sleepytime()
+56: for i := 0; i <= 5; i++ {
+57: j += j * (j ^ 3) / 100
+58: if i == f {
+62: sleepytime()
+56: for i := 0; i <= 5; i++ {
+57: j += j * (j ^ 3) / 100
+58: if i == f {
+59: fmt.Println("foo")
+60: break
+64: helloworld()
+66: }
+15: }
diff --git a/src/cmd/compile/internal/ssa/testdata/scopes.dlv-opt.nexts b/src/cmd/compile/internal/ssa/testdata/scopes.dlv-opt.nexts
new file mode 100644
index 0000000..b5e41aa
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/testdata/scopes.dlv-opt.nexts
@@ -0,0 +1,46 @@
+ ./testdata/scopes.go
+22: func test() {
+23: x := id(0)
+24: y := id(0)
+25: fmt.Println(x)
+26: for i := x; i < 3; i++ {
+27: x := i * i
+28: y += id(x) //gdb-dbg=(x,y)//gdb-opt=(x,y)
+26: for i := x; i < 3; i++ {
+27: x := i * i
+28: y += id(x) //gdb-dbg=(x,y)//gdb-opt=(x,y)
+26: for i := x; i < 3; i++ {
+27: x := i * i
+28: y += id(x) //gdb-dbg=(x,y)//gdb-opt=(x,y)
+26: for i := x; i < 3; i++ {
+31: fmt.Println(x, y)
+30: y = x + y //gdb-dbg=(x,y)//gdb-opt=(x,y)
+31: fmt.Println(x, y)
+33: for x := 0; x <= 1; x++ { // From delve scopetest.go
+35: f1(a)
+38: f2(b)
+39: if gretbool() {
+41: f3(c)
+46: f5(b)
+48: f6(a)
+33: for x := 0; x <= 1; x++ { // From delve scopetest.go
+35: f1(a)
+38: f2(b)
+39: if gretbool() {
+44: f4(int(c))
+46: f5(b)
+48: f6(a)
+33: for x := 0; x <= 1; x++ { // From delve scopetest.go
+53: j = id(1)
+54: f = id(2)
+56: for i := 0; i <= 5; i++ {
+58: if i == f {
+62: sleepytime()
+56: for i := 0; i <= 5; i++ {
+58: if i == f {
+62: sleepytime()
+56: for i := 0; i <= 5; i++ {
+58: if i == f {
+59: fmt.Println("foo")
+64: helloworld()
+15: }
diff --git a/src/cmd/compile/internal/ssa/testdata/scopes.gdb-dbg.nexts b/src/cmd/compile/internal/ssa/testdata/scopes.gdb-dbg.nexts
new file mode 100644
index 0000000..6eb4903
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/testdata/scopes.gdb-dbg.nexts
@@ -0,0 +1,64 @@
+ src/cmd/compile/internal/ssa/testdata/scopes.go
+22: func test() {
+23: x := id(0)
+24: y := id(0)
+25: fmt.Println(x)
+0:
+27: x := i * i
+28: y += id(x) //gdb-dbg=(x,y)//gdb-opt=(x,y)
+x = 0
+y = 0
+26: for i := x; i < 3; i++ {
+27: x := i * i
+28: y += id(x) //gdb-dbg=(x,y)//gdb-opt=(x,y)
+x = 1
+y = 0
+26: for i := x; i < 3; i++ {
+27: x := i * i
+28: y += id(x) //gdb-dbg=(x,y)//gdb-opt=(x,y)
+x = 4
+y = 1
+26: for i := x; i < 3; i++ {
+30: y = x + y //gdb-dbg=(x,y)//gdb-opt=(x,y)
+x = 0
+y = 5
+31: fmt.Println(x, y)
+0: 5
+34: a := y
+35: f1(a)
+37: b := 0
+38: f2(b)
+39: if gretbool() {
+40: c := 0
+41: f3(c)
+46: f5(b)
+48: f6(a)
+33: for x := 0; x <= 1; x++ { // From delve scopetest.go
+34: a := y
+35: f1(a)
+37: b := 0
+38: f2(b)
+39: if gretbool() {
+43: c := 1.1
+44: f4(int(c))
+46: f5(b)
+48: f6(a)
+33: for x := 0; x <= 1; x++ { // From delve scopetest.go
+53: j = id(1)
+54: f = id(2)
+56: for i := 0; i <= 5; i++ {
+57: j += j * (j ^ 3) / 100
+58: if i == f {
+62: sleepytime()
+56: for i := 0; i <= 5; i++ {
+57: j += j * (j ^ 3) / 100
+58: if i == f {
+62: sleepytime()
+56: for i := 0; i <= 5; i++ {
+57: j += j * (j ^ 3) / 100
+58: if i == f {
+59: fmt.Println("foo")
+60: break
+64: helloworld()
+66: }
+15: }
diff --git a/src/cmd/compile/internal/ssa/testdata/scopes.gdb-opt.nexts b/src/cmd/compile/internal/ssa/testdata/scopes.gdb-opt.nexts
new file mode 100644
index 0000000..5a186b5
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/testdata/scopes.gdb-opt.nexts
@@ -0,0 +1,55 @@
+ src/cmd/compile/internal/ssa/testdata/scopes.go
+22: func test() {
+23: x := id(0)
+24: y := id(0)
+25: fmt.Println(x)
+0:
+27: x := i * i
+28: y += id(x) //gdb-dbg=(x,y)//gdb-opt=(x,y)
+x = 0
+y = 0
+26: for i := x; i < 3; i++ {
+27: x := i * i
+28: y += id(x) //gdb-dbg=(x,y)//gdb-opt=(x,y)
+x = 1
+y = 0
+26: for i := x; i < 3; i++ {
+27: x := i * i
+28: y += id(x) //gdb-dbg=(x,y)//gdb-opt=(x,y)
+x = 4
+y = 1
+26: for i := x; i < 3; i++ {
+31: fmt.Println(x, y)
+30: y = x + y //gdb-dbg=(x,y)//gdb-opt=(x,y)
+x = 0
+y = 5
+31: fmt.Println(x, y)
+0: 5
+35: f1(a)
+38: f2(b)
+39: if gretbool() {
+41: f3(c)
+46: f5(b)
+48: f6(a)
+33: for x := 0; x <= 1; x++ { // From delve scopetest.go
+35: f1(a)
+38: f2(b)
+39: if gretbool() {
+44: f4(int(c))
+46: f5(b)
+48: f6(a)
+33: for x := 0; x <= 1; x++ { // From delve scopetest.go
+53: j = id(1)
+54: f = id(2)
+56: for i := 0; i <= 5; i++ {
+58: if i == f {
+62: sleepytime()
+56: for i := 0; i <= 5; i++ {
+58: if i == f {
+62: sleepytime()
+56: for i := 0; i <= 5; i++ {
+58: if i == f {
+59: fmt.Println("foo")
+64: helloworld()
+66: }
+15: }
diff --git a/src/cmd/compile/internal/ssa/testdata/scopes.go b/src/cmd/compile/internal/ssa/testdata/scopes.go
new file mode 100644
index 0000000..e93d699
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/testdata/scopes.go
@@ -0,0 +1,107 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "fmt"
+ "time"
+)
+
+func main() {
+ growstack() // Use stack early to prevent growth during test, which confuses gdb
+ test()
+}
+
+//go:noinline
+func id(x int) int {
+ return x
+}
+
+func test() {
+ x := id(0)
+ y := id(0)
+ fmt.Println(x)
+ for i := x; i < 3; i++ {
+ x := i * i
+ y += id(x) //gdb-dbg=(x,y)//gdb-opt=(x,y)
+ }
+ y = x + y //gdb-dbg=(x,y)//gdb-opt=(x,y)
+ fmt.Println(x, y)
+
+ for x := 0; x <= 1; x++ { // From delve scopetest.go
+ a := y
+ f1(a)
+ {
+ b := 0
+ f2(b)
+ if gretbool() {
+ c := 0
+ f3(c)
+ } else {
+ c := 1.1
+ f4(int(c))
+ }
+ f5(b)
+ }
+ f6(a)
+ }
+
+ { // From delve testnextprog.go
+ var (
+ j = id(1)
+ f = id(2)
+ )
+ for i := 0; i <= 5; i++ {
+ j += j * (j ^ 3) / 100
+ if i == f {
+ fmt.Println("foo")
+ break
+ }
+ sleepytime()
+ }
+ helloworld()
+ }
+}
+
+func sleepytime() {
+ time.Sleep(5 * time.Millisecond)
+}
+
+func helloworld() {
+ fmt.Println("Hello, World!")
+}
+
+//go:noinline
+func f1(x int) {}
+
+//go:noinline
+func f2(x int) {}
+
+//go:noinline
+func f3(x int) {}
+
+//go:noinline
+func f4(x int) {}
+
+//go:noinline
+func f5(x int) {}
+
+//go:noinline
+func f6(x int) {}
+
+var boolvar = true
+
+func gretbool() bool {
+ x := boolvar
+ boolvar = !boolvar
+ return x
+}
+
+var sink string
+
+//go:noinline
+func growstack() {
+ sink = fmt.Sprintf("%#v,%#v,%#v", 1, true, "cat")
+}
diff --git a/src/cmd/compile/internal/ssa/tighten.go b/src/cmd/compile/internal/ssa/tighten.go
new file mode 100644
index 0000000..214bf62
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/tighten.go
@@ -0,0 +1,165 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// tighten moves Values closer to the Blocks in which they are used.
+// This can reduce the amount of register spilling required,
+// if it doesn't also create more live values.
+// A Value can be moved to any block that
+// dominates all blocks in which it is used.
+func tighten(f *Func) {
+ canMove := make([]bool, f.NumValues())
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ if v.Op.isLoweredGetClosurePtr() {
+ // Must stay in the entry block.
+ continue
+ }
+ switch v.Op {
+ case OpPhi, OpArg, OpArgIntReg, OpArgFloatReg, OpSelect0, OpSelect1, OpSelectN:
+ // Phis need to stay in their block.
+ // Arg must stay in the entry block.
+ // Tuple selectors must stay with the tuple generator.
+ // SelectN is typically, ultimately, a register.
+ continue
+ }
+ if v.MemoryArg() != nil {
+ // We can't move values which have a memory arg - it might
+ // make two memory values live across a block boundary.
+ continue
+ }
+ // Count arguments which will need a register.
+ narg := 0
+ for _, a := range v.Args {
+ if !a.rematerializeable() {
+ narg++
+ }
+ }
+ if narg >= 2 && !v.Type.IsFlags() {
+ // Don't move values with more than one input, as that may
+ // increase register pressure.
+ // We make an exception for flags, as we want flag generators
+ // moved next to uses (because we only have 1 flag register).
+ continue
+ }
+ canMove[v.ID] = true
+ }
+ }
+
+ // Build data structure for fast least-common-ancestor queries.
+ lca := makeLCArange(f)
+
+ // For each moveable value, record the block that dominates all uses found so far.
+ target := make([]*Block, f.NumValues())
+
+ // Grab loop information.
+ // We use this to make sure we don't tighten a value into a (deeper) loop.
+ idom := f.Idom()
+ loops := f.loopnest()
+ loops.calculateDepths()
+
+ changed := true
+ for changed {
+ changed = false
+
+ // Reset target
+ for i := range target {
+ target[i] = nil
+ }
+
+ // Compute target locations (for moveable values only).
+ // target location = the least common ancestor of all uses in the dominator tree.
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ for i, a := range v.Args {
+ if !canMove[a.ID] {
+ continue
+ }
+ use := b
+ if v.Op == OpPhi {
+ use = b.Preds[i].b
+ }
+ if target[a.ID] == nil {
+ target[a.ID] = use
+ } else {
+ target[a.ID] = lca.find(target[a.ID], use)
+ }
+ }
+ }
+ for _, c := range b.ControlValues() {
+ if !canMove[c.ID] {
+ continue
+ }
+ if target[c.ID] == nil {
+ target[c.ID] = b
+ } else {
+ target[c.ID] = lca.find(target[c.ID], b)
+ }
+ }
+ }
+
+ // If the target location is inside a loop,
+ // move the target location up to just before the loop head.
+ for _, b := range f.Blocks {
+ origloop := loops.b2l[b.ID]
+ for _, v := range b.Values {
+ t := target[v.ID]
+ if t == nil {
+ continue
+ }
+ targetloop := loops.b2l[t.ID]
+ for targetloop != nil && (origloop == nil || targetloop.depth > origloop.depth) {
+ t = idom[targetloop.header.ID]
+ target[v.ID] = t
+ targetloop = loops.b2l[t.ID]
+ }
+ }
+ }
+
+ // Move values to target locations.
+ for _, b := range f.Blocks {
+ for i := 0; i < len(b.Values); i++ {
+ v := b.Values[i]
+ t := target[v.ID]
+ if t == nil || t == b {
+ // v is not moveable, or is already in correct place.
+ continue
+ }
+ // Move v to the block which dominates its uses.
+ t.Values = append(t.Values, v)
+ v.Block = t
+ last := len(b.Values) - 1
+ b.Values[i] = b.Values[last]
+ b.Values[last] = nil
+ b.Values = b.Values[:last]
+ changed = true
+ i--
+ }
+ }
+ }
+}
+
+// phiTighten moves constants closer to phi users.
+// This pass avoids having lots of constants live for lots of the program.
+// See issue 16407.
+func phiTighten(f *Func) {
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ if v.Op != OpPhi {
+ continue
+ }
+ for i, a := range v.Args {
+ if !a.rematerializeable() {
+ continue // not a constant we can move around
+ }
+ if a.Block == b.Preds[i].b {
+ continue // already in the right place
+ }
+ // Make a copy of a, put in predecessor block.
+ v.SetArg(i, a.copyInto(b.Preds[i].b))
+ }
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/trim.go b/src/cmd/compile/internal/ssa/trim.go
new file mode 100644
index 0000000..c930a20
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/trim.go
@@ -0,0 +1,172 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import "cmd/internal/src"
+
+// trim removes blocks with no code in them.
+// These blocks were inserted to remove critical edges.
+func trim(f *Func) {
+ n := 0
+ for _, b := range f.Blocks {
+ if !trimmableBlock(b) {
+ f.Blocks[n] = b
+ n++
+ continue
+ }
+
+ bPos := b.Pos
+ bIsStmt := bPos.IsStmt() == src.PosIsStmt
+
+ // Splice b out of the graph. NOTE: `mergePhi` depends on the
+ // order, in which the predecessors edges are merged here.
+ p, i := b.Preds[0].b, b.Preds[0].i
+ s, j := b.Succs[0].b, b.Succs[0].i
+ ns := len(s.Preds)
+ p.Succs[i] = Edge{s, j}
+ s.Preds[j] = Edge{p, i}
+
+ for _, e := range b.Preds[1:] {
+ p, i := e.b, e.i
+ p.Succs[i] = Edge{s, len(s.Preds)}
+ s.Preds = append(s.Preds, Edge{p, i})
+ }
+
+ // Attempt to preserve a statement boundary
+ if bIsStmt {
+ sawStmt := false
+ for _, v := range s.Values {
+ if isPoorStatementOp(v.Op) {
+ continue
+ }
+ if v.Pos.SameFileAndLine(bPos) {
+ v.Pos = v.Pos.WithIsStmt()
+ }
+ sawStmt = true
+ break
+ }
+ if !sawStmt && s.Pos.SameFileAndLine(bPos) {
+ s.Pos = s.Pos.WithIsStmt()
+ }
+ }
+ // If `s` had more than one predecessor, update its phi-ops to
+ // account for the merge.
+ if ns > 1 {
+ for _, v := range s.Values {
+ if v.Op == OpPhi {
+ mergePhi(v, j, b)
+ }
+
+ }
+ // Remove the phi-ops from `b` if they were merged into the
+ // phi-ops of `s`.
+ k := 0
+ for _, v := range b.Values {
+ if v.Op == OpPhi {
+ if v.Uses == 0 {
+ v.resetArgs()
+ continue
+ }
+ // Pad the arguments of the remaining phi-ops so
+ // they match the new predecessor count of `s`.
+ // Since s did not have a Phi op corresponding to
+ // the phi op in b, the other edges coming into s
+ // must be loopback edges from s, so v is the right
+ // argument to v!
+ args := make([]*Value, len(v.Args))
+ copy(args, v.Args)
+ v.resetArgs()
+ for x := 0; x < j; x++ {
+ v.AddArg(v)
+ }
+ v.AddArg(args[0])
+ for x := j + 1; x < ns; x++ {
+ v.AddArg(v)
+ }
+ for _, a := range args[1:] {
+ v.AddArg(a)
+ }
+ }
+ b.Values[k] = v
+ k++
+ }
+ b.Values = b.Values[:k]
+ }
+
+ // Merge the blocks' values.
+ for _, v := range b.Values {
+ v.Block = s
+ }
+ k := len(b.Values)
+ m := len(s.Values)
+ for i := 0; i < k; i++ {
+ s.Values = append(s.Values, nil)
+ }
+ copy(s.Values[k:], s.Values[:m])
+ copy(s.Values, b.Values)
+ }
+ if n < len(f.Blocks) {
+ f.invalidateCFG()
+ tail := f.Blocks[n:]
+ for i := range tail {
+ tail[i] = nil
+ }
+ f.Blocks = f.Blocks[:n]
+ }
+}
+
+// emptyBlock reports whether the block does not contain actual
+// instructions
+func emptyBlock(b *Block) bool {
+ for _, v := range b.Values {
+ if v.Op != OpPhi {
+ return false
+ }
+ }
+ return true
+}
+
+// trimmableBlock reports whether the block can be trimmed from the CFG,
+// subject to the following criteria:
+// - it should not be the first block
+// - it should be BlockPlain
+// - it should not loop back to itself
+// - it either is the single predecessor of the successor block or
+// contains no actual instructions
+func trimmableBlock(b *Block) bool {
+ if b.Kind != BlockPlain || b == b.Func.Entry {
+ return false
+ }
+ s := b.Succs[0].b
+ return s != b && (len(s.Preds) == 1 || emptyBlock(b))
+}
+
+// mergePhi adjusts the number of `v`s arguments to account for merge
+// of `b`, which was `i`th predecessor of the `v`s block.
+func mergePhi(v *Value, i int, b *Block) {
+ u := v.Args[i]
+ if u.Block == b {
+ if u.Op != OpPhi {
+ b.Func.Fatalf("value %s is not a phi operation", u.LongString())
+ }
+ // If the original block contained u = φ(u0, u1, ..., un) and
+ // the current phi is
+ // v = φ(v0, v1, ..., u, ..., vk)
+ // then the merged phi is
+ // v = φ(v0, v1, ..., u0, ..., vk, u1, ..., un)
+ v.SetArg(i, u.Args[0])
+ v.AddArgs(u.Args[1:]...)
+ } else {
+ // If the original block contained u = φ(u0, u1, ..., un) and
+ // the current phi is
+ // v = φ(v0, v1, ..., vi, ..., vk)
+ // i.e. it does not use a value from the predecessor block,
+ // then the merged phi is
+ // v = φ(v0, v1, ..., vk, vi, vi, ...)
+ for j := 1; j < len(b.Preds); j++ {
+ v.AddArg(v.Args[i])
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/tuple.go b/src/cmd/compile/internal/ssa/tuple.go
new file mode 100644
index 0000000..289df40
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/tuple.go
@@ -0,0 +1,71 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+// tightenTupleSelectors ensures that tuple selectors (Select0, Select1,
+// and SelectN ops) are in the same block as their tuple generator. The
+// function also ensures that there are no duplicate tuple selectors.
+// These properties are expected by the scheduler but may not have
+// been maintained by the optimization pipeline up to this point.
+//
+// See issues 16741 and 39472.
+func tightenTupleSelectors(f *Func) {
+ selectors := make(map[struct {
+ id ID
+ which int
+ }]*Value)
+ for _, b := range f.Blocks {
+ for _, selector := range b.Values {
+ // Key fields for de-duplication
+ var tuple *Value
+ idx := 0
+ switch selector.Op {
+ default:
+ continue
+ case OpSelect1:
+ idx = 1
+ fallthrough
+ case OpSelect0:
+ tuple = selector.Args[0]
+ if !tuple.Type.IsTuple() {
+ f.Fatalf("arg of tuple selector %s is not a tuple: %s", selector.String(), tuple.LongString())
+ }
+ case OpSelectN:
+ tuple = selector.Args[0]
+ idx = int(selector.AuxInt)
+ if !tuple.Type.IsResults() {
+ f.Fatalf("arg of result selector %s is not a results: %s", selector.String(), tuple.LongString())
+ }
+ }
+
+ // If there is a pre-existing selector in the target block then
+ // use that. Do this even if the selector is already in the
+ // target block to avoid duplicate tuple selectors.
+ key := struct {
+ id ID
+ which int
+ }{tuple.ID, idx}
+ if t := selectors[key]; t != nil {
+ if selector != t {
+ selector.copyOf(t)
+ }
+ continue
+ }
+
+ // If the selector is in the wrong block copy it into the target
+ // block.
+ if selector.Block != tuple.Block {
+ t := selector.copyInto(tuple.Block)
+ selector.copyOf(t)
+ selectors[key] = t
+ continue
+ }
+
+ // The selector is in the target block. Add it to the map so it
+ // cannot be duplicated.
+ selectors[key] = selector
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/value.go b/src/cmd/compile/internal/ssa/value.go
new file mode 100644
index 0000000..7b411a4
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/value.go
@@ -0,0 +1,559 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+ "fmt"
+ "math"
+ "sort"
+ "strings"
+)
+
+// A Value represents a value in the SSA representation of the program.
+// The ID and Type fields must not be modified. The remainder may be modified
+// if they preserve the value of the Value (e.g. changing a (mul 2 x) to an (add x x)).
+type Value struct {
+ // A unique identifier for the value. For performance we allocate these IDs
+ // densely starting at 1. There is no guarantee that there won't be occasional holes, though.
+ ID ID
+
+ // The operation that computes this value. See op.go.
+ Op Op
+
+ // The type of this value. Normally this will be a Go type, but there
+ // are a few other pseudo-types, see ../types/type.go.
+ Type *types.Type
+
+ // Auxiliary info for this value. The type of this information depends on the opcode and type.
+ // AuxInt is used for integer values, Aux is used for other values.
+ // Floats are stored in AuxInt using math.Float64bits(f).
+ // Unused portions of AuxInt are filled by sign-extending the used portion,
+ // even if the represented value is unsigned.
+ // Users of AuxInt which interpret AuxInt as unsigned (e.g. shifts) must be careful.
+ // Use Value.AuxUnsigned to get the zero-extended value of AuxInt.
+ AuxInt int64
+ Aux Aux
+
+ // Arguments of this value
+ Args []*Value
+
+ // Containing basic block
+ Block *Block
+
+ // Source position
+ Pos src.XPos
+
+ // Use count. Each appearance in Value.Args and Block.Controls counts once.
+ Uses int32
+
+ // wasm: Value stays on the WebAssembly stack. This value will not get a "register" (WebAssembly variable)
+ // nor a slot on Go stack, and the generation of this value is delayed to its use time.
+ OnWasmStack bool
+
+ // Is this value in the per-function constant cache? If so, remove from cache before changing it or recycling it.
+ InCache bool
+
+ // Storage for the first three args
+ argstorage [3]*Value
+}
+
+// Examples:
+// Opcode aux args
+// OpAdd nil 2
+// OpConst string 0 string constant
+// OpConst int64 0 int64 constant
+// OpAddcq int64 1 amd64 op: v = arg[0] + constant
+
+// short form print. Just v#.
+func (v *Value) String() string {
+ if v == nil {
+ return "nil" // should never happen, but not panicking helps with debugging
+ }
+ return fmt.Sprintf("v%d", v.ID)
+}
+
+func (v *Value) AuxInt8() int8 {
+ if opcodeTable[v.Op].auxType != auxInt8 && opcodeTable[v.Op].auxType != auxNameOffsetInt8 {
+ v.Fatalf("op %s doesn't have an int8 aux field", v.Op)
+ }
+ return int8(v.AuxInt)
+}
+
+func (v *Value) AuxInt16() int16 {
+ if opcodeTable[v.Op].auxType != auxInt16 {
+ v.Fatalf("op %s doesn't have an int16 aux field", v.Op)
+ }
+ return int16(v.AuxInt)
+}
+
+func (v *Value) AuxInt32() int32 {
+ if opcodeTable[v.Op].auxType != auxInt32 {
+ v.Fatalf("op %s doesn't have an int32 aux field", v.Op)
+ }
+ return int32(v.AuxInt)
+}
+
+// AuxUnsigned returns v.AuxInt as an unsigned value for OpConst*.
+// v.AuxInt is always sign-extended to 64 bits, even if the
+// represented value is unsigned. This undoes that sign extension.
+func (v *Value) AuxUnsigned() uint64 {
+ c := v.AuxInt
+ switch v.Op {
+ case OpConst64:
+ return uint64(c)
+ case OpConst32:
+ return uint64(uint32(c))
+ case OpConst16:
+ return uint64(uint16(c))
+ case OpConst8:
+ return uint64(uint8(c))
+ }
+ v.Fatalf("op %s isn't OpConst*", v.Op)
+ return 0
+}
+
+func (v *Value) AuxFloat() float64 {
+ if opcodeTable[v.Op].auxType != auxFloat32 && opcodeTable[v.Op].auxType != auxFloat64 {
+ v.Fatalf("op %s doesn't have a float aux field", v.Op)
+ }
+ return math.Float64frombits(uint64(v.AuxInt))
+}
+func (v *Value) AuxValAndOff() ValAndOff {
+ if opcodeTable[v.Op].auxType != auxSymValAndOff {
+ v.Fatalf("op %s doesn't have a ValAndOff aux field", v.Op)
+ }
+ return ValAndOff(v.AuxInt)
+}
+
+func (v *Value) AuxArm64BitField() arm64BitField {
+ if opcodeTable[v.Op].auxType != auxARM64BitField {
+ v.Fatalf("op %s doesn't have a ValAndOff aux field", v.Op)
+ }
+ return arm64BitField(v.AuxInt)
+}
+
+// long form print. v# = opcode <type> [aux] args [: reg] (names)
+func (v *Value) LongString() string {
+ if v == nil {
+ return "<NIL VALUE>"
+ }
+ s := fmt.Sprintf("v%d = %s", v.ID, v.Op)
+ s += " <" + v.Type.String() + ">"
+ s += v.auxString()
+ for _, a := range v.Args {
+ s += fmt.Sprintf(" %v", a)
+ }
+ var r []Location
+ if v.Block != nil {
+ r = v.Block.Func.RegAlloc
+ }
+ if int(v.ID) < len(r) && r[v.ID] != nil {
+ s += " : " + r[v.ID].String()
+ }
+ var names []string
+ if v.Block != nil {
+ for name, values := range v.Block.Func.NamedValues {
+ for _, value := range values {
+ if value == v {
+ names = append(names, name.String())
+ break // drop duplicates.
+ }
+ }
+ }
+ }
+ if len(names) != 0 {
+ sort.Strings(names) // Otherwise a source of variation in debugging output.
+ s += " (" + strings.Join(names, ", ") + ")"
+ }
+ return s
+}
+
+func (v *Value) auxString() string {
+ switch opcodeTable[v.Op].auxType {
+ case auxBool:
+ if v.AuxInt == 0 {
+ return " [false]"
+ } else {
+ return " [true]"
+ }
+ case auxInt8:
+ return fmt.Sprintf(" [%d]", v.AuxInt8())
+ case auxInt16:
+ return fmt.Sprintf(" [%d]", v.AuxInt16())
+ case auxInt32:
+ return fmt.Sprintf(" [%d]", v.AuxInt32())
+ case auxInt64, auxInt128:
+ return fmt.Sprintf(" [%d]", v.AuxInt)
+ case auxARM64BitField:
+ lsb := v.AuxArm64BitField().getARM64BFlsb()
+ width := v.AuxArm64BitField().getARM64BFwidth()
+ return fmt.Sprintf(" [lsb=%d,width=%d]", lsb, width)
+ case auxFloat32, auxFloat64:
+ return fmt.Sprintf(" [%g]", v.AuxFloat())
+ case auxString:
+ return fmt.Sprintf(" {%q}", v.Aux)
+ case auxSym, auxCall, auxTyp:
+ if v.Aux != nil {
+ return fmt.Sprintf(" {%v}", v.Aux)
+ }
+ case auxSymOff, auxCallOff, auxTypSize, auxNameOffsetInt8:
+ s := ""
+ if v.Aux != nil {
+ s = fmt.Sprintf(" {%v}", v.Aux)
+ }
+ if v.AuxInt != 0 || opcodeTable[v.Op].auxType == auxNameOffsetInt8 {
+ s += fmt.Sprintf(" [%v]", v.AuxInt)
+ }
+ return s
+ case auxSymValAndOff:
+ s := ""
+ if v.Aux != nil {
+ s = fmt.Sprintf(" {%v}", v.Aux)
+ }
+ return s + fmt.Sprintf(" [%s]", v.AuxValAndOff())
+ case auxCCop:
+ return fmt.Sprintf(" {%s}", Op(v.AuxInt))
+ case auxS390XCCMask, auxS390XRotateParams:
+ return fmt.Sprintf(" {%v}", v.Aux)
+ case auxFlagConstant:
+ return fmt.Sprintf("[%s]", flagConstant(v.AuxInt))
+ }
+ return ""
+}
+
+// If/when midstack inlining is enabled (-l=4), the compiler gets both larger and slower.
+// Not-inlining this method is a help (*Value.reset and *Block.NewValue0 are similar).
+//go:noinline
+func (v *Value) AddArg(w *Value) {
+ if v.Args == nil {
+ v.resetArgs() // use argstorage
+ }
+ v.Args = append(v.Args, w)
+ w.Uses++
+}
+
+//go:noinline
+func (v *Value) AddArg2(w1, w2 *Value) {
+ if v.Args == nil {
+ v.resetArgs() // use argstorage
+ }
+ v.Args = append(v.Args, w1, w2)
+ w1.Uses++
+ w2.Uses++
+}
+
+//go:noinline
+func (v *Value) AddArg3(w1, w2, w3 *Value) {
+ if v.Args == nil {
+ v.resetArgs() // use argstorage
+ }
+ v.Args = append(v.Args, w1, w2, w3)
+ w1.Uses++
+ w2.Uses++
+ w3.Uses++
+}
+
+//go:noinline
+func (v *Value) AddArg4(w1, w2, w3, w4 *Value) {
+ v.Args = append(v.Args, w1, w2, w3, w4)
+ w1.Uses++
+ w2.Uses++
+ w3.Uses++
+ w4.Uses++
+}
+
+//go:noinline
+func (v *Value) AddArg5(w1, w2, w3, w4, w5 *Value) {
+ v.Args = append(v.Args, w1, w2, w3, w4, w5)
+ w1.Uses++
+ w2.Uses++
+ w3.Uses++
+ w4.Uses++
+ w5.Uses++
+}
+
+//go:noinline
+func (v *Value) AddArg6(w1, w2, w3, w4, w5, w6 *Value) {
+ v.Args = append(v.Args, w1, w2, w3, w4, w5, w6)
+ w1.Uses++
+ w2.Uses++
+ w3.Uses++
+ w4.Uses++
+ w5.Uses++
+ w6.Uses++
+}
+
+func (v *Value) AddArgs(a ...*Value) {
+ if v.Args == nil {
+ v.resetArgs() // use argstorage
+ }
+ v.Args = append(v.Args, a...)
+ for _, x := range a {
+ x.Uses++
+ }
+}
+func (v *Value) SetArg(i int, w *Value) {
+ v.Args[i].Uses--
+ v.Args[i] = w
+ w.Uses++
+}
+func (v *Value) SetArgs1(a *Value) {
+ v.resetArgs()
+ v.AddArg(a)
+}
+func (v *Value) SetArgs2(a, b *Value) {
+ v.resetArgs()
+ v.AddArg(a)
+ v.AddArg(b)
+}
+func (v *Value) SetArgs3(a, b, c *Value) {
+ v.resetArgs()
+ v.AddArg(a)
+ v.AddArg(b)
+ v.AddArg(c)
+}
+
+func (v *Value) resetArgs() {
+ for _, a := range v.Args {
+ a.Uses--
+ }
+ v.argstorage[0] = nil
+ v.argstorage[1] = nil
+ v.argstorage[2] = nil
+ v.Args = v.argstorage[:0]
+}
+
+// reset is called from most rewrite rules.
+// Allowing it to be inlined increases the size
+// of cmd/compile by almost 10%, and slows it down.
+//go:noinline
+func (v *Value) reset(op Op) {
+ if v.InCache {
+ v.Block.Func.unCache(v)
+ }
+ v.Op = op
+ v.resetArgs()
+ v.AuxInt = 0
+ v.Aux = nil
+}
+
+// invalidateRecursively marks a value as invalid (unused)
+// and after decrementing reference counts on its Args,
+// also recursively invalidates any of those whose use
+// count goes to zero. It returns whether any of the
+// invalidated values was marked with IsStmt.
+//
+// BEWARE of doing this *before* you've applied intended
+// updates to SSA.
+func (v *Value) invalidateRecursively() bool {
+ lostStmt := v.Pos.IsStmt() == src.PosIsStmt
+ if v.InCache {
+ v.Block.Func.unCache(v)
+ }
+ v.Op = OpInvalid
+
+ for _, a := range v.Args {
+ a.Uses--
+ if a.Uses == 0 {
+ lost := a.invalidateRecursively()
+ lostStmt = lost || lostStmt
+ }
+ }
+
+ v.argstorage[0] = nil
+ v.argstorage[1] = nil
+ v.argstorage[2] = nil
+ v.Args = v.argstorage[:0]
+
+ v.AuxInt = 0
+ v.Aux = nil
+ return lostStmt
+}
+
+// copyOf is called from rewrite rules.
+// It modifies v to be (Copy a).
+//go:noinline
+func (v *Value) copyOf(a *Value) {
+ if v == a {
+ return
+ }
+ if v.InCache {
+ v.Block.Func.unCache(v)
+ }
+ v.Op = OpCopy
+ v.resetArgs()
+ v.AddArg(a)
+ v.AuxInt = 0
+ v.Aux = nil
+ v.Type = a.Type
+}
+
+// copyInto makes a new value identical to v and adds it to the end of b.
+// unlike copyIntoWithXPos this does not check for v.Pos being a statement.
+func (v *Value) copyInto(b *Block) *Value {
+ c := b.NewValue0(v.Pos.WithNotStmt(), v.Op, v.Type) // Lose the position, this causes line number churn otherwise.
+ c.Aux = v.Aux
+ c.AuxInt = v.AuxInt
+ c.AddArgs(v.Args...)
+ for _, a := range v.Args {
+ if a.Type.IsMemory() {
+ v.Fatalf("can't move a value with a memory arg %s", v.LongString())
+ }
+ }
+ return c
+}
+
+// copyIntoWithXPos makes a new value identical to v and adds it to the end of b.
+// The supplied position is used as the position of the new value.
+// Because this is used for rematerialization, check for case that (rematerialized)
+// input to value with position 'pos' carried a statement mark, and that the supplied
+// position (of the instruction using the rematerialized value) is not marked, and
+// preserve that mark if its line matches the supplied position.
+func (v *Value) copyIntoWithXPos(b *Block, pos src.XPos) *Value {
+ if v.Pos.IsStmt() == src.PosIsStmt && pos.IsStmt() != src.PosIsStmt && v.Pos.SameFileAndLine(pos) {
+ pos = pos.WithIsStmt()
+ }
+ c := b.NewValue0(pos, v.Op, v.Type)
+ c.Aux = v.Aux
+ c.AuxInt = v.AuxInt
+ c.AddArgs(v.Args...)
+ for _, a := range v.Args {
+ if a.Type.IsMemory() {
+ v.Fatalf("can't move a value with a memory arg %s", v.LongString())
+ }
+ }
+ return c
+}
+
+func (v *Value) Logf(msg string, args ...interface{}) { v.Block.Logf(msg, args...) }
+func (v *Value) Log() bool { return v.Block.Log() }
+func (v *Value) Fatalf(msg string, args ...interface{}) {
+ v.Block.Func.fe.Fatalf(v.Pos, msg, args...)
+}
+
+// isGenericIntConst reports whether v is a generic integer constant.
+func (v *Value) isGenericIntConst() bool {
+ return v != nil && (v.Op == OpConst64 || v.Op == OpConst32 || v.Op == OpConst16 || v.Op == OpConst8)
+}
+
+// ResultReg returns the result register assigned to v, in cmd/internal/obj/$ARCH numbering.
+// It is similar to Reg and Reg0, except that it is usable interchangeably for all Value Ops.
+// If you know v.Op, using Reg or Reg0 (as appropriate) will be more efficient.
+func (v *Value) ResultReg() int16 {
+ reg := v.Block.Func.RegAlloc[v.ID]
+ if reg == nil {
+ v.Fatalf("nil reg for value: %s\n%s\n", v.LongString(), v.Block.Func)
+ }
+ if pair, ok := reg.(LocPair); ok {
+ reg = pair[0]
+ }
+ if reg == nil {
+ v.Fatalf("nil reg0 for value: %s\n%s\n", v.LongString(), v.Block.Func)
+ }
+ return reg.(*Register).objNum
+}
+
+// Reg returns the register assigned to v, in cmd/internal/obj/$ARCH numbering.
+func (v *Value) Reg() int16 {
+ reg := v.Block.Func.RegAlloc[v.ID]
+ if reg == nil {
+ v.Fatalf("nil register for value: %s\n%s\n", v.LongString(), v.Block.Func)
+ }
+ return reg.(*Register).objNum
+}
+
+// Reg0 returns the register assigned to the first output of v, in cmd/internal/obj/$ARCH numbering.
+func (v *Value) Reg0() int16 {
+ reg := v.Block.Func.RegAlloc[v.ID].(LocPair)[0]
+ if reg == nil {
+ v.Fatalf("nil first register for value: %s\n%s\n", v.LongString(), v.Block.Func)
+ }
+ return reg.(*Register).objNum
+}
+
+// Reg1 returns the register assigned to the second output of v, in cmd/internal/obj/$ARCH numbering.
+func (v *Value) Reg1() int16 {
+ reg := v.Block.Func.RegAlloc[v.ID].(LocPair)[1]
+ if reg == nil {
+ v.Fatalf("nil second register for value: %s\n%s\n", v.LongString(), v.Block.Func)
+ }
+ return reg.(*Register).objNum
+}
+
+func (v *Value) RegName() string {
+ reg := v.Block.Func.RegAlloc[v.ID]
+ if reg == nil {
+ v.Fatalf("nil register for value: %s\n%s\n", v.LongString(), v.Block.Func)
+ }
+ return reg.(*Register).name
+}
+
+// MemoryArg returns the memory argument for the Value.
+// The returned value, if non-nil, will be memory-typed (or a tuple with a memory-typed second part).
+// Otherwise, nil is returned.
+func (v *Value) MemoryArg() *Value {
+ if v.Op == OpPhi {
+ v.Fatalf("MemoryArg on Phi")
+ }
+ na := len(v.Args)
+ if na == 0 {
+ return nil
+ }
+ if m := v.Args[na-1]; m.Type.IsMemory() {
+ return m
+ }
+ return nil
+}
+
+// LackingPos indicates whether v is a value that is unlikely to have a correct
+// position assigned to it. Ignoring such values leads to more user-friendly positions
+// assigned to nearby values and the blocks containing them.
+func (v *Value) LackingPos() bool {
+ // The exact definition of LackingPos is somewhat heuristically defined and may change
+ // in the future, for example if some of these operations are generated more carefully
+ // with respect to their source position.
+ return v.Op == OpVarDef || v.Op == OpVarKill || v.Op == OpVarLive || v.Op == OpPhi ||
+ (v.Op == OpFwdRef || v.Op == OpCopy) && v.Type == types.TypeMem
+}
+
+// removeable reports whether the value v can be removed from the SSA graph entirely
+// if its use count drops to 0.
+func (v *Value) removeable() bool {
+ if v.Type.IsVoid() {
+ // Void ops, like nil pointer checks, must stay.
+ return false
+ }
+ if v.Type.IsMemory() {
+ // We don't need to preserve all memory ops, but we do need
+ // to keep calls at least (because they might have
+ // synchronization operations we can't see).
+ return false
+ }
+ if v.Op.HasSideEffects() {
+ // These are mostly synchronization operations.
+ return false
+ }
+ return true
+}
+
+// TODO(mdempsky): Shouldn't be necessary; see discussion at golang.org/cl/275756
+func (*Value) CanBeAnSSAAux() {}
+
+// AutoVar returns a *Name and int64 representing the auto variable and offset within it
+// where v should be spilled.
+func AutoVar(v *Value) (*ir.Name, int64) {
+ if loc, ok := v.Block.Func.RegAlloc[v.ID].(LocalSlot); ok {
+ if v.Type.Size() > loc.Type.Size() {
+ v.Fatalf("spill/restore type %s doesn't fit in slot type %s", v.Type, loc.Type)
+ }
+ return loc.N, loc.Off
+ }
+ // Assume it is a register, return its spill slot, which needs to be live
+ nameOff := v.Aux.(*AuxNameOffset)
+ return nameOff.Name, nameOff.Offset
+}
diff --git a/src/cmd/compile/internal/ssa/writebarrier.go b/src/cmd/compile/internal/ssa/writebarrier.go
new file mode 100644
index 0000000..5120cd1
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/writebarrier.go
@@ -0,0 +1,665 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/compile/internal/reflectdata"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/objabi"
+ "cmd/internal/src"
+ "fmt"
+)
+
+// A ZeroRegion records parts of an object which are known to be zero.
+// A ZeroRegion only applies to a single memory state.
+// Each bit in mask is set if the corresponding pointer-sized word of
+// the base object is known to be zero.
+// In other words, if mask & (1<<i) != 0, then [base+i*ptrSize, base+(i+1)*ptrSize)
+// is known to be zero.
+type ZeroRegion struct {
+ base *Value
+ mask uint64
+}
+
+// needwb reports whether we need write barrier for store op v.
+// v must be Store/Move/Zero.
+// zeroes provides known zero information (keyed by ID of memory-type values).
+func needwb(v *Value, zeroes map[ID]ZeroRegion) bool {
+ t, ok := v.Aux.(*types.Type)
+ if !ok {
+ v.Fatalf("store aux is not a type: %s", v.LongString())
+ }
+ if !t.HasPointers() {
+ return false
+ }
+ if IsStackAddr(v.Args[0]) {
+ return false // write on stack doesn't need write barrier
+ }
+ if v.Op == OpMove && IsReadOnlyGlobalAddr(v.Args[1]) {
+ if mem, ok := IsNewObject(v.Args[0]); ok && mem == v.MemoryArg() {
+ // Copying data from readonly memory into a fresh object doesn't need a write barrier.
+ return false
+ }
+ }
+ if v.Op == OpStore && IsGlobalAddr(v.Args[1]) {
+ // Storing pointers to non-heap locations into zeroed memory doesn't need a write barrier.
+ ptr := v.Args[0]
+ var off int64
+ size := v.Aux.(*types.Type).Size()
+ for ptr.Op == OpOffPtr {
+ off += ptr.AuxInt
+ ptr = ptr.Args[0]
+ }
+ ptrSize := v.Block.Func.Config.PtrSize
+ if off%ptrSize != 0 || size%ptrSize != 0 {
+ v.Fatalf("unaligned pointer write")
+ }
+ if off < 0 || off+size > 64*ptrSize {
+ // write goes off end of tracked offsets
+ return true
+ }
+ z := zeroes[v.MemoryArg().ID]
+ if ptr != z.base {
+ return true
+ }
+ for i := off; i < off+size; i += ptrSize {
+ if z.mask>>uint(i/ptrSize)&1 == 0 {
+ return true // not known to be zero
+ }
+ }
+ // All written locations are known to be zero - write barrier not needed.
+ return false
+ }
+ return true
+}
+
+// writebarrier pass inserts write barriers for store ops (Store, Move, Zero)
+// when necessary (the condition above). It rewrites store ops to branches
+// and runtime calls, like
+//
+// if writeBarrier.enabled {
+// gcWriteBarrier(ptr, val) // Not a regular Go call
+// } else {
+// *ptr = val
+// }
+//
+// A sequence of WB stores for many pointer fields of a single type will
+// be emitted together, with a single branch.
+func writebarrier(f *Func) {
+ if !f.fe.UseWriteBarrier() {
+ return
+ }
+
+ var sb, sp, wbaddr, const0 *Value
+ var typedmemmove, typedmemclr, gcWriteBarrier *obj.LSym
+ var stores, after []*Value
+ var sset *sparseSet
+ var storeNumber []int32
+
+ zeroes := f.computeZeroMap()
+ for _, b := range f.Blocks { // range loop is safe since the blocks we added contain no stores to expand
+ // first, identify all the stores that need to insert a write barrier.
+ // mark them with WB ops temporarily. record presence of WB ops.
+ nWBops := 0 // count of temporarily created WB ops remaining to be rewritten in the current block
+ for _, v := range b.Values {
+ switch v.Op {
+ case OpStore, OpMove, OpZero:
+ if needwb(v, zeroes) {
+ switch v.Op {
+ case OpStore:
+ v.Op = OpStoreWB
+ case OpMove:
+ v.Op = OpMoveWB
+ case OpZero:
+ v.Op = OpZeroWB
+ }
+ nWBops++
+ }
+ }
+ }
+ if nWBops == 0 {
+ continue
+ }
+
+ if wbaddr == nil {
+ // lazily initialize global values for write barrier test and calls
+ // find SB and SP values in entry block
+ initpos := f.Entry.Pos
+ sp, sb = f.spSb()
+ wbsym := f.fe.Syslook("writeBarrier")
+ wbaddr = f.Entry.NewValue1A(initpos, OpAddr, f.Config.Types.UInt32Ptr, wbsym, sb)
+ gcWriteBarrier = f.fe.Syslook("gcWriteBarrier")
+ typedmemmove = f.fe.Syslook("typedmemmove")
+ typedmemclr = f.fe.Syslook("typedmemclr")
+ const0 = f.ConstInt32(f.Config.Types.UInt32, 0)
+
+ // allocate auxiliary data structures for computing store order
+ sset = f.newSparseSet(f.NumValues())
+ defer f.retSparseSet(sset)
+ storeNumber = make([]int32, f.NumValues())
+ }
+
+ // order values in store order
+ b.Values = storeOrder(b.Values, sset, storeNumber)
+
+ firstSplit := true
+ again:
+ // find the start and end of the last contiguous WB store sequence.
+ // a branch will be inserted there. values after it will be moved
+ // to a new block.
+ var last *Value
+ var start, end int
+ values := b.Values
+ FindSeq:
+ for i := len(values) - 1; i >= 0; i-- {
+ w := values[i]
+ switch w.Op {
+ case OpStoreWB, OpMoveWB, OpZeroWB:
+ start = i
+ if last == nil {
+ last = w
+ end = i + 1
+ }
+ case OpVarDef, OpVarLive, OpVarKill:
+ continue
+ default:
+ if last == nil {
+ continue
+ }
+ break FindSeq
+ }
+ }
+ stores = append(stores[:0], b.Values[start:end]...) // copy to avoid aliasing
+ after = append(after[:0], b.Values[end:]...)
+ b.Values = b.Values[:start]
+
+ // find the memory before the WB stores
+ mem := stores[0].MemoryArg()
+ pos := stores[0].Pos
+ bThen := f.NewBlock(BlockPlain)
+ bElse := f.NewBlock(BlockPlain)
+ bEnd := f.NewBlock(b.Kind)
+ bThen.Pos = pos
+ bElse.Pos = pos
+ bEnd.Pos = b.Pos
+ b.Pos = pos
+
+ // set up control flow for end block
+ bEnd.CopyControls(b)
+ bEnd.Likely = b.Likely
+ for _, e := range b.Succs {
+ bEnd.Succs = append(bEnd.Succs, e)
+ e.b.Preds[e.i].b = bEnd
+ }
+
+ // set up control flow for write barrier test
+ // load word, test word, avoiding partial register write from load byte.
+ cfgtypes := &f.Config.Types
+ flag := b.NewValue2(pos, OpLoad, cfgtypes.UInt32, wbaddr, mem)
+ flag = b.NewValue2(pos, OpNeq32, cfgtypes.Bool, flag, const0)
+ b.Kind = BlockIf
+ b.SetControl(flag)
+ b.Likely = BranchUnlikely
+ b.Succs = b.Succs[:0]
+ b.AddEdgeTo(bThen)
+ b.AddEdgeTo(bElse)
+ // TODO: For OpStoreWB and the buffered write barrier,
+ // we could move the write out of the write barrier,
+ // which would lead to fewer branches. We could do
+ // something similar to OpZeroWB, since the runtime
+ // could provide just the barrier half and then we
+ // could unconditionally do an OpZero (which could
+ // also generate better zeroing code). OpMoveWB is
+ // trickier and would require changing how
+ // cgoCheckMemmove works.
+ bThen.AddEdgeTo(bEnd)
+ bElse.AddEdgeTo(bEnd)
+
+ // for each write barrier store, append write barrier version to bThen
+ // and simple store version to bElse
+ memThen := mem
+ memElse := mem
+
+ // If the source of a MoveWB is volatile (will be clobbered by a
+ // function call), we need to copy it to a temporary location, as
+ // marshaling the args of typedmemmove might clobber the value we're
+ // trying to move.
+ // Look for volatile source, copy it to temporary before we emit any
+ // call.
+ // It is unlikely to have more than one of them. Just do a linear
+ // search instead of using a map.
+ type volatileCopy struct {
+ src *Value // address of original volatile value
+ tmp *Value // address of temporary we've copied the volatile value into
+ }
+ var volatiles []volatileCopy
+ copyLoop:
+ for _, w := range stores {
+ if w.Op == OpMoveWB {
+ val := w.Args[1]
+ if isVolatile(val) {
+ for _, c := range volatiles {
+ if val == c.src {
+ continue copyLoop // already copied
+ }
+ }
+
+ t := val.Type.Elem()
+ tmp := f.fe.Auto(w.Pos, t)
+ memThen = bThen.NewValue1A(w.Pos, OpVarDef, types.TypeMem, tmp, memThen)
+ tmpaddr := bThen.NewValue2A(w.Pos, OpLocalAddr, t.PtrTo(), tmp, sp, memThen)
+ siz := t.Size()
+ memThen = bThen.NewValue3I(w.Pos, OpMove, types.TypeMem, siz, tmpaddr, val, memThen)
+ memThen.Aux = t
+ volatiles = append(volatiles, volatileCopy{val, tmpaddr})
+ }
+ }
+ }
+
+ for _, w := range stores {
+ ptr := w.Args[0]
+ pos := w.Pos
+
+ var fn *obj.LSym
+ var typ *obj.LSym
+ var val *Value
+ switch w.Op {
+ case OpStoreWB:
+ val = w.Args[1]
+ nWBops--
+ case OpMoveWB:
+ fn = typedmemmove
+ val = w.Args[1]
+ typ = reflectdata.TypeLinksym(w.Aux.(*types.Type))
+ nWBops--
+ case OpZeroWB:
+ fn = typedmemclr
+ typ = reflectdata.TypeLinksym(w.Aux.(*types.Type))
+ nWBops--
+ case OpVarDef, OpVarLive, OpVarKill:
+ }
+
+ // then block: emit write barrier call
+ switch w.Op {
+ case OpStoreWB, OpMoveWB, OpZeroWB:
+ if w.Op == OpStoreWB {
+ memThen = bThen.NewValue3A(pos, OpWB, types.TypeMem, gcWriteBarrier, ptr, val, memThen)
+ } else {
+ srcval := val
+ if w.Op == OpMoveWB && isVolatile(srcval) {
+ for _, c := range volatiles {
+ if srcval == c.src {
+ srcval = c.tmp
+ break
+ }
+ }
+ }
+ memThen = wbcall(pos, bThen, fn, typ, ptr, srcval, memThen, sp, sb)
+ }
+ // Note that we set up a writebarrier function call.
+ f.fe.SetWBPos(pos)
+ case OpVarDef, OpVarLive, OpVarKill:
+ memThen = bThen.NewValue1A(pos, w.Op, types.TypeMem, w.Aux, memThen)
+ }
+
+ // else block: normal store
+ switch w.Op {
+ case OpStoreWB:
+ memElse = bElse.NewValue3A(pos, OpStore, types.TypeMem, w.Aux, ptr, val, memElse)
+ case OpMoveWB:
+ memElse = bElse.NewValue3I(pos, OpMove, types.TypeMem, w.AuxInt, ptr, val, memElse)
+ memElse.Aux = w.Aux
+ case OpZeroWB:
+ memElse = bElse.NewValue2I(pos, OpZero, types.TypeMem, w.AuxInt, ptr, memElse)
+ memElse.Aux = w.Aux
+ case OpVarDef, OpVarLive, OpVarKill:
+ memElse = bElse.NewValue1A(pos, w.Op, types.TypeMem, w.Aux, memElse)
+ }
+ }
+
+ // mark volatile temps dead
+ for _, c := range volatiles {
+ tmpNode := c.tmp.Aux
+ memThen = bThen.NewValue1A(memThen.Pos, OpVarKill, types.TypeMem, tmpNode, memThen)
+ }
+
+ // merge memory
+ // Splice memory Phi into the last memory of the original sequence,
+ // which may be used in subsequent blocks. Other memories in the
+ // sequence must be dead after this block since there can be only
+ // one memory live.
+ bEnd.Values = append(bEnd.Values, last)
+ last.Block = bEnd
+ last.reset(OpPhi)
+ last.Pos = last.Pos.WithNotStmt()
+ last.Type = types.TypeMem
+ last.AddArg(memThen)
+ last.AddArg(memElse)
+ for _, w := range stores {
+ if w != last {
+ w.resetArgs()
+ }
+ }
+ for _, w := range stores {
+ if w != last {
+ f.freeValue(w)
+ }
+ }
+
+ // put values after the store sequence into the end block
+ bEnd.Values = append(bEnd.Values, after...)
+ for _, w := range after {
+ w.Block = bEnd
+ }
+
+ // Preemption is unsafe between loading the write
+ // barrier-enabled flag and performing the write
+ // because that would allow a GC phase transition,
+ // which would invalidate the flag. Remember the
+ // conditional block so liveness analysis can disable
+ // safe-points. This is somewhat subtle because we're
+ // splitting b bottom-up.
+ if firstSplit {
+ // Add b itself.
+ b.Func.WBLoads = append(b.Func.WBLoads, b)
+ firstSplit = false
+ } else {
+ // We've already split b, so we just pushed a
+ // write barrier test into bEnd.
+ b.Func.WBLoads = append(b.Func.WBLoads, bEnd)
+ }
+
+ // if we have more stores in this block, do this block again
+ if nWBops > 0 {
+ goto again
+ }
+ }
+}
+
+// computeZeroMap returns a map from an ID of a memory value to
+// a set of locations that are known to be zeroed at that memory value.
+func (f *Func) computeZeroMap() map[ID]ZeroRegion {
+ ptrSize := f.Config.PtrSize
+ // Keep track of which parts of memory are known to be zero.
+ // This helps with removing write barriers for various initialization patterns.
+ // This analysis is conservative. We only keep track, for each memory state, of
+ // which of the first 64 words of a single object are known to be zero.
+ zeroes := map[ID]ZeroRegion{}
+ // Find new objects.
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ if mem, ok := IsNewObject(v); ok {
+ nptr := v.Type.Elem().Size() / ptrSize
+ if nptr > 64 {
+ nptr = 64
+ }
+ zeroes[mem.ID] = ZeroRegion{base: v, mask: 1<<uint(nptr) - 1}
+ }
+ }
+ }
+ // Find stores to those new objects.
+ for {
+ changed := false
+ for _, b := range f.Blocks {
+ // Note: iterating forwards helps convergence, as values are
+ // typically (but not always!) in store order.
+ for _, v := range b.Values {
+ if v.Op != OpStore {
+ continue
+ }
+ z, ok := zeroes[v.MemoryArg().ID]
+ if !ok {
+ continue
+ }
+ ptr := v.Args[0]
+ var off int64
+ size := v.Aux.(*types.Type).Size()
+ for ptr.Op == OpOffPtr {
+ off += ptr.AuxInt
+ ptr = ptr.Args[0]
+ }
+ if ptr != z.base {
+ // Different base object - we don't know anything.
+ // We could even be writing to the base object we know
+ // about, but through an aliased but offset pointer.
+ // So we have to throw all the zero information we have away.
+ continue
+ }
+ // Round to cover any partially written pointer slots.
+ // Pointer writes should never be unaligned like this, but non-pointer
+ // writes to pointer-containing types will do this.
+ if d := off % ptrSize; d != 0 {
+ off -= d
+ size += d
+ }
+ if d := size % ptrSize; d != 0 {
+ size += ptrSize - d
+ }
+ // Clip to the 64 words that we track.
+ min := off
+ max := off + size
+ if min < 0 {
+ min = 0
+ }
+ if max > 64*ptrSize {
+ max = 64 * ptrSize
+ }
+ // Clear bits for parts that we are writing (and hence
+ // will no longer necessarily be zero).
+ for i := min; i < max; i += ptrSize {
+ bit := i / ptrSize
+ z.mask &^= 1 << uint(bit)
+ }
+ if z.mask == 0 {
+ // No more known zeros - don't bother keeping.
+ continue
+ }
+ // Save updated known zero contents for new store.
+ if zeroes[v.ID] != z {
+ zeroes[v.ID] = z
+ changed = true
+ }
+ }
+ }
+ if !changed {
+ break
+ }
+ }
+ if f.pass.debug > 0 {
+ fmt.Printf("func %s\n", f.Name)
+ for mem, z := range zeroes {
+ fmt.Printf(" memory=v%d ptr=%v zeromask=%b\n", mem, z.base, z.mask)
+ }
+ }
+ return zeroes
+}
+
+// wbcall emits write barrier runtime call in b, returns memory.
+func wbcall(pos src.XPos, b *Block, fn, typ *obj.LSym, ptr, val, mem, sp, sb *Value) *Value {
+ config := b.Func.Config
+
+ var wbargs []*Value
+ // TODO (register args) this is a bit of a hack.
+ inRegs := b.Func.ABIDefault == b.Func.ABI1 && len(config.intParamRegs) >= 3
+
+ // put arguments on stack
+ off := config.ctxt.FixedFrameSize()
+
+ var argTypes []*types.Type
+ if typ != nil { // for typedmemmove
+ taddr := b.NewValue1A(pos, OpAddr, b.Func.Config.Types.Uintptr, typ, sb)
+ argTypes = append(argTypes, b.Func.Config.Types.Uintptr)
+ off = round(off, taddr.Type.Alignment())
+ if inRegs {
+ wbargs = append(wbargs, taddr)
+ } else {
+ arg := b.NewValue1I(pos, OpOffPtr, taddr.Type.PtrTo(), off, sp)
+ mem = b.NewValue3A(pos, OpStore, types.TypeMem, ptr.Type, arg, taddr, mem)
+ }
+ off += taddr.Type.Size()
+ }
+
+ argTypes = append(argTypes, ptr.Type)
+ off = round(off, ptr.Type.Alignment())
+ if inRegs {
+ wbargs = append(wbargs, ptr)
+ } else {
+ arg := b.NewValue1I(pos, OpOffPtr, ptr.Type.PtrTo(), off, sp)
+ mem = b.NewValue3A(pos, OpStore, types.TypeMem, ptr.Type, arg, ptr, mem)
+ }
+ off += ptr.Type.Size()
+
+ if val != nil {
+ argTypes = append(argTypes, val.Type)
+ off = round(off, val.Type.Alignment())
+ if inRegs {
+ wbargs = append(wbargs, val)
+ } else {
+ arg := b.NewValue1I(pos, OpOffPtr, val.Type.PtrTo(), off, sp)
+ mem = b.NewValue3A(pos, OpStore, types.TypeMem, val.Type, arg, val, mem)
+ }
+ off += val.Type.Size()
+ }
+ off = round(off, config.PtrSize)
+ wbargs = append(wbargs, mem)
+
+ // issue call
+ call := b.NewValue0A(pos, OpStaticCall, types.TypeResultMem, StaticAuxCall(fn, b.Func.ABIDefault.ABIAnalyzeTypes(nil, argTypes, nil)))
+ call.AddArgs(wbargs...)
+ call.AuxInt = off - config.ctxt.FixedFrameSize()
+ return b.NewValue1I(pos, OpSelectN, types.TypeMem, 0, call)
+}
+
+// round to a multiple of r, r is a power of 2
+func round(o int64, r int64) int64 {
+ return (o + r - 1) &^ (r - 1)
+}
+
+// IsStackAddr reports whether v is known to be an address of a stack slot.
+func IsStackAddr(v *Value) bool {
+ for v.Op == OpOffPtr || v.Op == OpAddPtr || v.Op == OpPtrIndex || v.Op == OpCopy {
+ v = v.Args[0]
+ }
+ switch v.Op {
+ case OpSP, OpLocalAddr, OpSelectNAddr, OpGetCallerSP:
+ return true
+ }
+ return false
+}
+
+// IsGlobalAddr reports whether v is known to be an address of a global (or nil).
+func IsGlobalAddr(v *Value) bool {
+ for v.Op == OpOffPtr || v.Op == OpAddPtr || v.Op == OpPtrIndex || v.Op == OpCopy {
+ v = v.Args[0]
+ }
+ if v.Op == OpAddr && v.Args[0].Op == OpSB {
+ return true // address of a global
+ }
+ if v.Op == OpConstNil {
+ return true
+ }
+ if v.Op == OpLoad && IsReadOnlyGlobalAddr(v.Args[0]) {
+ return true // loading from a read-only global - the resulting address can't be a heap address.
+ }
+ return false
+}
+
+// IsReadOnlyGlobalAddr reports whether v is known to be an address of a read-only global.
+func IsReadOnlyGlobalAddr(v *Value) bool {
+ if v.Op == OpConstNil {
+ // Nil pointers are read only. See issue 33438.
+ return true
+ }
+ if v.Op == OpAddr && v.Aux.(*obj.LSym).Type == objabi.SRODATA {
+ return true
+ }
+ return false
+}
+
+// IsNewObject reports whether v is a pointer to a freshly allocated & zeroed object,
+// if so, also returns the memory state mem at which v is zero.
+func IsNewObject(v *Value) (mem *Value, ok bool) {
+ f := v.Block.Func
+ c := f.Config
+ if f.ABIDefault == f.ABI1 && len(c.intParamRegs) >= 1 {
+ if v.Op != OpSelectN || v.AuxInt != 0 {
+ return nil, false
+ }
+ // Find the memory
+ for _, w := range v.Block.Values {
+ if w.Op == OpSelectN && w.AuxInt == 1 && w.Args[0] == v.Args[0] {
+ mem = w
+ break
+ }
+ }
+ if mem == nil {
+ return nil, false
+ }
+ } else {
+ if v.Op != OpLoad {
+ return nil, false
+ }
+ mem = v.MemoryArg()
+ if mem.Op != OpSelectN {
+ return nil, false
+ }
+ if mem.Type != types.TypeMem {
+ return nil, false
+ } // assume it is the right selection if true
+ }
+ call := mem.Args[0]
+ if call.Op != OpStaticCall {
+ return nil, false
+ }
+ if !isSameCall(call.Aux, "runtime.newobject") {
+ return nil, false
+ }
+ if f.ABIDefault == f.ABI1 && len(c.intParamRegs) >= 1 {
+ if v.Args[0] == call {
+ return mem, true
+ }
+ return nil, false
+ }
+ if v.Args[0].Op != OpOffPtr {
+ return nil, false
+ }
+ if v.Args[0].Args[0].Op != OpSP {
+ return nil, false
+ }
+ if v.Args[0].AuxInt != c.ctxt.FixedFrameSize()+c.RegSize { // offset of return value
+ return nil, false
+ }
+ return mem, true
+}
+
+// IsSanitizerSafeAddr reports whether v is known to be an address
+// that doesn't need instrumentation.
+func IsSanitizerSafeAddr(v *Value) bool {
+ for v.Op == OpOffPtr || v.Op == OpAddPtr || v.Op == OpPtrIndex || v.Op == OpCopy {
+ v = v.Args[0]
+ }
+ switch v.Op {
+ case OpSP, OpLocalAddr, OpSelectNAddr:
+ // Stack addresses are always safe.
+ return true
+ case OpITab, OpStringPtr, OpGetClosurePtr:
+ // Itabs, string data, and closure fields are
+ // read-only once initialized.
+ return true
+ case OpAddr:
+ return v.Aux.(*obj.LSym).Type == objabi.SRODATA || v.Aux.(*obj.LSym).Type == objabi.SLIBFUZZER_EXTRA_COUNTER
+ }
+ return false
+}
+
+// isVolatile reports whether v is a pointer to argument region on stack which
+// will be clobbered by a function call.
+func isVolatile(v *Value) bool {
+ for v.Op == OpOffPtr || v.Op == OpAddPtr || v.Op == OpPtrIndex || v.Op == OpCopy || v.Op == OpSelectNAddr {
+ v = v.Args[0]
+ }
+ return v.Op == OpSP
+}
diff --git a/src/cmd/compile/internal/ssa/writebarrier_test.go b/src/cmd/compile/internal/ssa/writebarrier_test.go
new file mode 100644
index 0000000..0b11afc
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/writebarrier_test.go
@@ -0,0 +1,56 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/compile/internal/types"
+ "testing"
+)
+
+func TestWriteBarrierStoreOrder(t *testing.T) {
+ // Make sure writebarrier phase works even StoreWB ops are not in dependency order
+ c := testConfig(t)
+ ptrType := c.config.Types.BytePtr
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("start", OpInitMem, types.TypeMem, 0, nil),
+ Valu("sb", OpSB, c.config.Types.Uintptr, 0, nil),
+ Valu("sp", OpSP, c.config.Types.Uintptr, 0, nil),
+ Valu("v", OpConstNil, ptrType, 0, nil),
+ Valu("addr1", OpAddr, ptrType, 0, nil, "sb"),
+ Valu("wb2", OpStore, types.TypeMem, 0, ptrType, "addr1", "v", "wb1"),
+ Valu("wb1", OpStore, types.TypeMem, 0, ptrType, "addr1", "v", "start"), // wb1 and wb2 are out of order
+ Goto("exit")),
+ Bloc("exit",
+ Exit("wb2")))
+
+ CheckFunc(fun.f)
+ writebarrier(fun.f)
+ CheckFunc(fun.f)
+}
+
+func TestWriteBarrierPhi(t *testing.T) {
+ // Make sure writebarrier phase works for single-block loop, where
+ // a Phi op takes the store in the same block as argument.
+ // See issue #19067.
+ c := testConfig(t)
+ ptrType := c.config.Types.BytePtr
+ fun := c.Fun("entry",
+ Bloc("entry",
+ Valu("start", OpInitMem, types.TypeMem, 0, nil),
+ Valu("sb", OpSB, c.config.Types.Uintptr, 0, nil),
+ Valu("sp", OpSP, c.config.Types.Uintptr, 0, nil),
+ Goto("loop")),
+ Bloc("loop",
+ Valu("phi", OpPhi, types.TypeMem, 0, nil, "start", "wb"),
+ Valu("v", OpConstNil, ptrType, 0, nil),
+ Valu("addr", OpAddr, ptrType, 0, nil, "sb"),
+ Valu("wb", OpStore, types.TypeMem, 0, ptrType, "addr", "v", "phi"), // has write barrier
+ Goto("loop")))
+
+ CheckFunc(fun.f)
+ writebarrier(fun.f)
+ CheckFunc(fun.f)
+}
diff --git a/src/cmd/compile/internal/ssa/xposmap.go b/src/cmd/compile/internal/ssa/xposmap.go
new file mode 100644
index 0000000..93582e1
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/xposmap.go
@@ -0,0 +1,116 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import (
+ "cmd/internal/src"
+ "fmt"
+)
+
+type lineRange struct {
+ first, last uint32
+}
+
+// An xposmap is a map from fileindex and line of src.XPos to int32,
+// implemented sparsely to save space (column and statement status are ignored).
+// The sparse skeleton is constructed once, and then reused by ssa phases
+// that (re)move values with statements attached.
+type xposmap struct {
+ // A map from file index to maps from line range to integers (block numbers)
+ maps map[int32]*biasedSparseMap
+ // The next two fields provide a single-item cache for common case of repeated lines from same file.
+ lastIndex int32 // -1 means no entry in cache
+ lastMap *biasedSparseMap // map found at maps[lastIndex]
+}
+
+// newXposmap constructs an xposmap valid for inputs which have a file index in the keys of x,
+// and line numbers in the range x[file index].
+// The resulting xposmap will panic if a caller attempts to set or add an XPos not in that range.
+func newXposmap(x map[int]lineRange) *xposmap {
+ maps := make(map[int32]*biasedSparseMap)
+ for i, p := range x {
+ maps[int32(i)] = newBiasedSparseMap(int(p.first), int(p.last))
+ }
+ return &xposmap{maps: maps, lastIndex: -1} // zero for the rest is okay
+}
+
+// clear removes data from the map but leaves the sparse skeleton.
+func (m *xposmap) clear() {
+ for _, l := range m.maps {
+ if l != nil {
+ l.clear()
+ }
+ }
+ m.lastIndex = -1
+ m.lastMap = nil
+}
+
+// mapFor returns the line range map for a given file index.
+func (m *xposmap) mapFor(index int32) *biasedSparseMap {
+ if index == m.lastIndex {
+ return m.lastMap
+ }
+ mf := m.maps[index]
+ m.lastIndex = index
+ m.lastMap = mf
+ return mf
+}
+
+// set inserts p->v into the map.
+// If p does not fall within the set of fileindex->lineRange used to construct m, this will panic.
+func (m *xposmap) set(p src.XPos, v int32) {
+ s := m.mapFor(p.FileIndex())
+ if s == nil {
+ panic(fmt.Sprintf("xposmap.set(%d), file index not found in map\n", p.FileIndex()))
+ }
+ s.set(p.Line(), v)
+}
+
+// get returns the int32 associated with the file index and line of p.
+func (m *xposmap) get(p src.XPos) int32 {
+ s := m.mapFor(p.FileIndex())
+ if s == nil {
+ return -1
+ }
+ return s.get(p.Line())
+}
+
+// add adds p to m, treating m as a set instead of as a map.
+// If p does not fall within the set of fileindex->lineRange used to construct m, this will panic.
+// Use clear() in between set/map interpretations of m.
+func (m *xposmap) add(p src.XPos) {
+ m.set(p, 0)
+}
+
+// contains returns whether the file index and line of p are in m,
+// treating m as a set instead of as a map.
+func (m *xposmap) contains(p src.XPos) bool {
+ s := m.mapFor(p.FileIndex())
+ if s == nil {
+ return false
+ }
+ return s.contains(p.Line())
+}
+
+// remove removes the file index and line for p from m,
+// whether m is currently treated as a map or set.
+func (m *xposmap) remove(p src.XPos) {
+ s := m.mapFor(p.FileIndex())
+ if s == nil {
+ return
+ }
+ s.remove(p.Line())
+}
+
+// foreachEntry applies f to each (fileindex, line, value) triple in m.
+func (m *xposmap) foreachEntry(f func(j int32, l uint, v int32)) {
+ for j, mm := range m.maps {
+ s := mm.size()
+ for i := 0; i < s; i++ {
+ l, v := mm.getEntry(i)
+ f(j, l, v)
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/zcse.go b/src/cmd/compile/internal/ssa/zcse.go
new file mode 100644
index 0000000..e08272c
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/zcse.go
@@ -0,0 +1,79 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import "cmd/compile/internal/types"
+
+// zcse does an initial pass of common-subexpression elimination on the
+// function for values with zero arguments to allow the more expensive cse
+// to begin with a reduced number of values. Values are just relinked,
+// nothing is deleted. A subsequent deadcode pass is required to actually
+// remove duplicate expressions.
+func zcse(f *Func) {
+ vals := make(map[vkey]*Value)
+
+ for _, b := range f.Blocks {
+ for i := 0; i < len(b.Values); i++ {
+ v := b.Values[i]
+ if opcodeTable[v.Op].argLen == 0 {
+ key := vkey{v.Op, keyFor(v), v.Aux, v.Type}
+ if vals[key] == nil {
+ vals[key] = v
+ if b != f.Entry {
+ // Move v to the entry block so it will dominate every block
+ // where we might use it. This prevents the need for any dominator
+ // calculations in this pass.
+ v.Block = f.Entry
+ f.Entry.Values = append(f.Entry.Values, v)
+ last := len(b.Values) - 1
+ b.Values[i] = b.Values[last]
+ b.Values[last] = nil
+ b.Values = b.Values[:last]
+
+ i-- // process b.Values[i] again
+ }
+ }
+ }
+ }
+ }
+
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ for i, a := range v.Args {
+ if opcodeTable[a.Op].argLen == 0 {
+ key := vkey{a.Op, keyFor(a), a.Aux, a.Type}
+ if rv, ok := vals[key]; ok {
+ v.SetArg(i, rv)
+ }
+ }
+ }
+ }
+ }
+}
+
+// vkey is a type used to uniquely identify a zero arg value.
+type vkey struct {
+ op Op
+ ai int64 // aux int
+ ax Aux // aux
+ t *types.Type // type
+}
+
+// keyFor returns the AuxInt portion of a key structure uniquely identifying a
+// zero arg value for the supported ops.
+func keyFor(v *Value) int64 {
+ switch v.Op {
+ case OpConst64, OpConst64F, OpConst32F:
+ return v.AuxInt
+ case OpConst32:
+ return int64(int32(v.AuxInt))
+ case OpConst16:
+ return int64(int16(v.AuxInt))
+ case OpConst8, OpConstBool:
+ return int64(int8(v.AuxInt))
+ default:
+ return v.AuxInt
+ }
+}
diff --git a/src/cmd/compile/internal/ssa/zeroextension_test.go b/src/cmd/compile/internal/ssa/zeroextension_test.go
new file mode 100644
index 0000000..2e31621
--- /dev/null
+++ b/src/cmd/compile/internal/ssa/zeroextension_test.go
@@ -0,0 +1,34 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssa
+
+import "testing"
+
+type extTest struct {
+ f func(uint64, uint64) uint64
+ arg1 uint64
+ arg2 uint64
+ res uint64
+ name string
+}
+
+var extTests = [...]extTest{
+ {f: func(a, b uint64) uint64 { op1 := int32(a); op2 := int32(b); return uint64(uint32(op1 / op2)) }, arg1: 0x1, arg2: 0xfffffffeffffffff, res: 0xffffffff, name: "div"},
+ {f: func(a, b uint64) uint64 { op1 := int32(a); op2 := int32(b); return uint64(uint32(op1 * op2)) }, arg1: 0x1, arg2: 0x100000001, res: 0x1, name: "mul"},
+ {f: func(a, b uint64) uint64 { op1 := int32(a); op2 := int32(b); return uint64(uint32(op1 + op2)) }, arg1: 0x1, arg2: 0xeeeeeeeeffffffff, res: 0x0, name: "add"},
+ {f: func(a, b uint64) uint64 { op1 := int32(a); op2 := int32(b); return uint64(uint32(op1 - op2)) }, arg1: 0x1, arg2: 0xeeeeeeeeffffffff, res: 0x2, name: "sub"},
+ {f: func(a, b uint64) uint64 { op1 := int32(a); op2 := int32(b); return uint64(uint32(op1 | op2)) }, arg1: 0x100000000000001, arg2: 0xfffffffffffffff, res: 0xffffffff, name: "or"},
+ {f: func(a, b uint64) uint64 { op1 := int32(a); op2 := int32(b); return uint64(uint32(op1 ^ op2)) }, arg1: 0x100000000000001, arg2: 0xfffffffffffffff, res: 0xfffffffe, name: "xor"},
+ {f: func(a, b uint64) uint64 { op1 := int32(a); op2 := int32(b); return uint64(uint32(op1 & op2)) }, arg1: 0x100000000000001, arg2: 0x100000000000001, res: 0x1, name: "and"},
+}
+
+func TestZeroExtension(t *testing.T) {
+ for _, x := range extTests {
+ r := x.f(x.arg1, x.arg2)
+ if x.res != r {
+ t.Errorf("%s: got %d want %d", x.name, r, x.res)
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/ssagen/abi.go b/src/cmd/compile/internal/ssagen/abi.go
new file mode 100644
index 0000000..3a653e4
--- /dev/null
+++ b/src/cmd/compile/internal/ssagen/abi.go
@@ -0,0 +1,434 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssagen
+
+import (
+ "fmt"
+ "internal/buildcfg"
+ "io/ioutil"
+ "log"
+ "os"
+ "strings"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/staticdata"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/objabi"
+)
+
+// SymABIs records information provided by the assembler about symbol
+// definition ABIs and reference ABIs.
+type SymABIs struct {
+ defs map[string]obj.ABI
+ refs map[string]obj.ABISet
+
+ localPrefix string
+}
+
+func NewSymABIs(myimportpath string) *SymABIs {
+ var localPrefix string
+ if myimportpath != "" {
+ localPrefix = objabi.PathToPrefix(myimportpath) + "."
+ }
+
+ return &SymABIs{
+ defs: make(map[string]obj.ABI),
+ refs: make(map[string]obj.ABISet),
+ localPrefix: localPrefix,
+ }
+}
+
+// canonicalize returns the canonical name used for a linker symbol in
+// s's maps. Symbols in this package may be written either as "".X or
+// with the package's import path already in the symbol. This rewrites
+// both to `"".`, which matches compiler-generated linker symbol names.
+func (s *SymABIs) canonicalize(linksym string) string {
+ // If the symbol is already prefixed with localPrefix,
+ // rewrite it to start with "" so it matches the
+ // compiler's internal symbol names.
+ if s.localPrefix != "" && strings.HasPrefix(linksym, s.localPrefix) {
+ return `"".` + linksym[len(s.localPrefix):]
+ }
+ return linksym
+}
+
+// ReadSymABIs reads a symabis file that specifies definitions and
+// references of text symbols by ABI.
+//
+// The symabis format is a set of lines, where each line is a sequence
+// of whitespace-separated fields. The first field is a verb and is
+// either "def" for defining a symbol ABI or "ref" for referencing a
+// symbol using an ABI. For both "def" and "ref", the second field is
+// the symbol name and the third field is the ABI name, as one of the
+// named cmd/internal/obj.ABI constants.
+func (s *SymABIs) ReadSymABIs(file string) {
+ data, err := ioutil.ReadFile(file)
+ if err != nil {
+ log.Fatalf("-symabis: %v", err)
+ }
+
+ for lineNum, line := range strings.Split(string(data), "\n") {
+ lineNum++ // 1-based
+ line = strings.TrimSpace(line)
+ if line == "" || strings.HasPrefix(line, "#") {
+ continue
+ }
+
+ parts := strings.Fields(line)
+ switch parts[0] {
+ case "def", "ref":
+ // Parse line.
+ if len(parts) != 3 {
+ log.Fatalf(`%s:%d: invalid symabi: syntax is "%s sym abi"`, file, lineNum, parts[0])
+ }
+ sym, abistr := parts[1], parts[2]
+ abi, valid := obj.ParseABI(abistr)
+ if !valid {
+ log.Fatalf(`%s:%d: invalid symabi: unknown abi "%s"`, file, lineNum, abistr)
+ }
+
+ sym = s.canonicalize(sym)
+
+ // Record for later.
+ if parts[0] == "def" {
+ s.defs[sym] = abi
+ } else {
+ s.refs[sym] |= obj.ABISetOf(abi)
+ }
+ default:
+ log.Fatalf(`%s:%d: invalid symabi type "%s"`, file, lineNum, parts[0])
+ }
+ }
+}
+
+// GenABIWrappers applies ABI information to Funcs and generates ABI
+// wrapper functions where necessary.
+func (s *SymABIs) GenABIWrappers() {
+ // For cgo exported symbols, we tell the linker to export the
+ // definition ABI to C. That also means that we don't want to
+ // create ABI wrappers even if there's a linkname.
+ //
+ // TODO(austin): Maybe we want to create the ABI wrappers, but
+ // ensure the linker exports the right ABI definition under
+ // the unmangled name?
+ cgoExports := make(map[string][]*[]string)
+ for i, prag := range typecheck.Target.CgoPragmas {
+ switch prag[0] {
+ case "cgo_export_static", "cgo_export_dynamic":
+ symName := s.canonicalize(prag[1])
+ pprag := &typecheck.Target.CgoPragmas[i]
+ cgoExports[symName] = append(cgoExports[symName], pprag)
+ }
+ }
+
+ // Apply ABI defs and refs to Funcs and generate wrappers.
+ //
+ // This may generate new decls for the wrappers, but we
+ // specifically *don't* want to visit those, lest we create
+ // wrappers for wrappers.
+ for _, fn := range typecheck.Target.Decls {
+ if fn.Op() != ir.ODCLFUNC {
+ continue
+ }
+ fn := fn.(*ir.Func)
+ nam := fn.Nname
+ if ir.IsBlank(nam) {
+ continue
+ }
+ sym := nam.Sym()
+ var symName string
+ if sym.Linkname != "" {
+ symName = s.canonicalize(sym.Linkname)
+ } else {
+ // These names will already be canonical.
+ symName = sym.Pkg.Prefix + "." + sym.Name
+ }
+
+ // Apply definitions.
+ defABI, hasDefABI := s.defs[symName]
+ if hasDefABI {
+ if len(fn.Body) != 0 {
+ base.ErrorfAt(fn.Pos(), "%v defined in both Go and assembly", fn)
+ }
+ fn.ABI = defABI
+ }
+
+ if fn.Pragma&ir.CgoUnsafeArgs != 0 {
+ // CgoUnsafeArgs indicates the function (or its callee) uses
+ // offsets to dispatch arguments, which currently using ABI0
+ // frame layout. Pin it to ABI0.
+ fn.ABI = obj.ABI0
+ }
+
+ // If cgo-exported, add the definition ABI to the cgo
+ // pragmas.
+ cgoExport := cgoExports[symName]
+ for _, pprag := range cgoExport {
+ // The export pragmas have the form:
+ //
+ // cgo_export_* <local> [<remote>]
+ //
+ // If <remote> is omitted, it's the same as
+ // <local>.
+ //
+ // Expand to
+ //
+ // cgo_export_* <local> <remote> <ABI>
+ if len(*pprag) == 2 {
+ *pprag = append(*pprag, (*pprag)[1])
+ }
+ // Add the ABI argument.
+ *pprag = append(*pprag, fn.ABI.String())
+ }
+
+ // Apply references.
+ if abis, ok := s.refs[symName]; ok {
+ fn.ABIRefs |= abis
+ }
+ // Assume all functions are referenced at least as
+ // ABIInternal, since they may be referenced from
+ // other packages.
+ fn.ABIRefs.Set(obj.ABIInternal, true)
+
+ // If a symbol is defined in this package (either in
+ // Go or assembly) and given a linkname, it may be
+ // referenced from another package, so make it
+ // callable via any ABI. It's important that we know
+ // it's defined in this package since other packages
+ // may "pull" symbols using linkname and we don't want
+ // to create duplicate ABI wrappers.
+ //
+ // However, if it's given a linkname for exporting to
+ // C, then we don't make ABI wrappers because the cgo
+ // tool wants the original definition.
+ hasBody := len(fn.Body) != 0
+ if sym.Linkname != "" && (hasBody || hasDefABI) && len(cgoExport) == 0 {
+ fn.ABIRefs |= obj.ABISetCallable
+ }
+
+ // Double check that cgo-exported symbols don't get
+ // any wrappers.
+ if len(cgoExport) > 0 && fn.ABIRefs&^obj.ABISetOf(fn.ABI) != 0 {
+ base.Fatalf("cgo exported function %s cannot have ABI wrappers", fn)
+ }
+
+ if !buildcfg.Experiment.RegabiWrappers {
+ continue
+ }
+
+ forEachWrapperABI(fn, makeABIWrapper)
+ }
+}
+
+// InitLSym defines f's obj.LSym and initializes it based on the
+// properties of f. This includes setting the symbol flags and ABI and
+// creating and initializing related DWARF symbols.
+//
+// InitLSym must be called exactly once per function and must be
+// called for both functions with bodies and functions without bodies.
+// For body-less functions, we only create the LSym; for functions
+// with bodies call a helper to setup up / populate the LSym.
+func InitLSym(f *ir.Func, hasBody bool) {
+ if f.LSym != nil {
+ base.FatalfAt(f.Pos(), "InitLSym called twice on %v", f)
+ }
+
+ if nam := f.Nname; !ir.IsBlank(nam) {
+ f.LSym = nam.LinksymABI(f.ABI)
+ if f.Pragma&ir.Systemstack != 0 {
+ f.LSym.Set(obj.AttrCFunc, true)
+ }
+ if f.ABI == obj.ABIInternal || !buildcfg.Experiment.RegabiWrappers {
+ // Function values can only point to
+ // ABIInternal entry points. This will create
+ // the funcsym for either the defining
+ // function or its wrapper as appropriate.
+ //
+ // If we're not using ABI wrappers, we only
+ // InitLSym for the defining ABI of a function,
+ // so we make the funcsym when we see that.
+ staticdata.NeedFuncSym(f)
+ }
+ }
+ if hasBody {
+ setupTextLSym(f, 0)
+ }
+}
+
+func forEachWrapperABI(fn *ir.Func, cb func(fn *ir.Func, wrapperABI obj.ABI)) {
+ need := fn.ABIRefs &^ obj.ABISetOf(fn.ABI)
+ if need == 0 {
+ return
+ }
+
+ for wrapperABI := obj.ABI(0); wrapperABI < obj.ABICount; wrapperABI++ {
+ if !need.Get(wrapperABI) {
+ continue
+ }
+ cb(fn, wrapperABI)
+ }
+}
+
+// makeABIWrapper creates a new function that will be called with
+// wrapperABI and calls "f" using f.ABI.
+func makeABIWrapper(f *ir.Func, wrapperABI obj.ABI) {
+ if base.Debug.ABIWrap != 0 {
+ fmt.Fprintf(os.Stderr, "=-= %v to %v wrapper for %v\n", wrapperABI, f.ABI, f)
+ }
+
+ // Q: is this needed?
+ savepos := base.Pos
+ savedclcontext := typecheck.DeclContext
+ savedcurfn := ir.CurFunc
+
+ base.Pos = base.AutogeneratedPos
+ typecheck.DeclContext = ir.PEXTERN
+
+ // At the moment we don't support wrapping a method, we'd need machinery
+ // below to handle the receiver. Panic if we see this scenario.
+ ft := f.Nname.Type()
+ if ft.NumRecvs() != 0 {
+ panic("makeABIWrapper support for wrapping methods not implemented")
+ }
+
+ // Manufacture a new func type to use for the wrapper.
+ var noReceiver *ir.Field
+ tfn := ir.NewFuncType(base.Pos,
+ noReceiver,
+ typecheck.NewFuncParams(ft.Params(), true),
+ typecheck.NewFuncParams(ft.Results(), false))
+
+ // Reuse f's types.Sym to create a new ODCLFUNC/function.
+ fn := typecheck.DeclFunc(f.Nname.Sym(), tfn)
+ fn.ABI = wrapperABI
+
+ fn.SetABIWrapper(true)
+ fn.SetDupok(true)
+
+ // ABI0-to-ABIInternal wrappers will be mainly loading params from
+ // stack into registers (and/or storing stack locations back to
+ // registers after the wrapped call); in most cases they won't
+ // need to allocate stack space, so it should be OK to mark them
+ // as NOSPLIT in these cases. In addition, my assumption is that
+ // functions written in assembly are NOSPLIT in most (but not all)
+ // cases. In the case of an ABIInternal target that has too many
+ // parameters to fit into registers, the wrapper would need to
+ // allocate stack space, but this seems like an unlikely scenario.
+ // Hence: mark these wrappers NOSPLIT.
+ //
+ // ABIInternal-to-ABI0 wrappers on the other hand will be taking
+ // things in registers and pushing them onto the stack prior to
+ // the ABI0 call, meaning that they will always need to allocate
+ // stack space. If the compiler marks them as NOSPLIT this seems
+ // as though it could lead to situations where the linker's
+ // nosplit-overflow analysis would trigger a link failure. On the
+ // other hand if they not tagged NOSPLIT then this could cause
+ // problems when building the runtime (since there may be calls to
+ // asm routine in cases where it's not safe to grow the stack). In
+ // most cases the wrapper would be (in effect) inlined, but are
+ // there (perhaps) indirect calls from the runtime that could run
+ // into trouble here.
+ // FIXME: at the moment all.bash does not pass when I leave out
+ // NOSPLIT for these wrappers, so all are currently tagged with NOSPLIT.
+ fn.Pragma |= ir.Nosplit
+
+ // Generate call. Use tail call if no params and no returns,
+ // but a regular call otherwise.
+ //
+ // Note: ideally we would be using a tail call in cases where
+ // there are params but no returns for ABI0->ABIInternal wrappers,
+ // provided that all params fit into registers (e.g. we don't have
+ // to allocate any stack space). Doing this will require some
+ // extra work in typecheck/walk/ssa, might want to add a new node
+ // OTAILCALL or something to this effect.
+ tailcall := tfn.Type().NumResults() == 0 && tfn.Type().NumParams() == 0 && tfn.Type().NumRecvs() == 0
+ if base.Ctxt.Arch.Name == "ppc64le" && base.Ctxt.Flag_dynlink {
+ // cannot tailcall on PPC64 with dynamic linking, as we need
+ // to restore R2 after call.
+ tailcall = false
+ }
+ if base.Ctxt.Arch.Name == "amd64" && wrapperABI == obj.ABIInternal {
+ // cannot tailcall from ABIInternal to ABI0 on AMD64, as we need
+ // to special registers (X15) when returning to ABIInternal.
+ tailcall = false
+ }
+
+ var tail ir.Node
+ call := ir.NewCallExpr(base.Pos, ir.OCALL, f.Nname, nil)
+ call.Args = ir.ParamNames(tfn.Type())
+ call.IsDDD = tfn.Type().IsVariadic()
+ tail = call
+ if tailcall {
+ tail = ir.NewTailCallStmt(base.Pos, call)
+ } else if tfn.Type().NumResults() > 0 {
+ n := ir.NewReturnStmt(base.Pos, nil)
+ n.Results = []ir.Node{call}
+ tail = n
+ }
+ fn.Body.Append(tail)
+
+ typecheck.FinishFuncBody()
+ if base.Debug.DclStack != 0 {
+ types.CheckDclstack()
+ }
+
+ typecheck.Func(fn)
+ ir.CurFunc = fn
+ typecheck.Stmts(fn.Body)
+
+ typecheck.Target.Decls = append(typecheck.Target.Decls, fn)
+
+ // Restore previous context.
+ base.Pos = savepos
+ typecheck.DeclContext = savedclcontext
+ ir.CurFunc = savedcurfn
+}
+
+// setupTextLsym initializes the LSym for a with-body text symbol.
+func setupTextLSym(f *ir.Func, flag int) {
+ if f.Dupok() {
+ flag |= obj.DUPOK
+ }
+ if f.Wrapper() {
+ flag |= obj.WRAPPER
+ }
+ if f.ABIWrapper() {
+ flag |= obj.ABIWRAPPER
+ }
+ if f.Needctxt() {
+ flag |= obj.NEEDCTXT
+ }
+ if f.Pragma&ir.Nosplit != 0 {
+ flag |= obj.NOSPLIT
+ }
+ if f.ReflectMethod() {
+ flag |= obj.REFLECTMETHOD
+ }
+
+ // Clumsy but important.
+ // For functions that could be on the path of invoking a deferred
+ // function that can recover (runtime.reflectcall, reflect.callReflect,
+ // and reflect.callMethod), we want the panic+recover special handling.
+ // See test/recover.go for test cases and src/reflect/value.go
+ // for the actual functions being considered.
+ //
+ // runtime.reflectcall is an assembly function which tailcalls
+ // WRAPPER functions (runtime.callNN). Its ABI wrapper needs WRAPPER
+ // flag as well.
+ fnname := f.Sym().Name
+ if base.Ctxt.Pkgpath == "runtime" && fnname == "reflectcall" {
+ flag |= obj.WRAPPER
+ } else if base.Ctxt.Pkgpath == "reflect" {
+ switch fnname {
+ case "callReflect", "callMethod":
+ flag |= obj.WRAPPER
+ }
+ }
+
+ base.Ctxt.InitTextSym(f.LSym, flag)
+}
diff --git a/src/cmd/compile/internal/ssagen/arch.go b/src/cmd/compile/internal/ssagen/arch.go
new file mode 100644
index 0000000..483e45c
--- /dev/null
+++ b/src/cmd/compile/internal/ssagen/arch.go
@@ -0,0 +1,51 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssagen
+
+import (
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/objw"
+ "cmd/compile/internal/ssa"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+)
+
+var Arch ArchInfo
+
+// interface to back end
+
+type ArchInfo struct {
+ LinkArch *obj.LinkArch
+
+ REGSP int
+ MAXWIDTH int64
+ SoftFloat bool
+
+ PadFrame func(int64) int64
+
+ // ZeroRange zeroes a range of memory on stack. It is only inserted
+ // at function entry, and it is ok to clobber registers.
+ ZeroRange func(*objw.Progs, *obj.Prog, int64, int64, *uint32) *obj.Prog
+
+ Ginsnop func(*objw.Progs) *obj.Prog
+
+ // SSAMarkMoves marks any MOVXconst ops that need to avoid clobbering flags.
+ SSAMarkMoves func(*State, *ssa.Block)
+
+ // SSAGenValue emits Prog(s) for the Value.
+ SSAGenValue func(*State, *ssa.Value)
+
+ // SSAGenBlock emits end-of-block Progs. SSAGenValue should be called
+ // for all values in the block before SSAGenBlock.
+ SSAGenBlock func(s *State, b, next *ssa.Block)
+
+ // LoadRegResult emits instructions that loads register-assigned result
+ // at n+off (n is PPARAMOUT) to register reg. The result is already in
+ // memory. Used in open-coded defer return path.
+ LoadRegResult func(s *State, f *ssa.Func, t *types.Type, reg int16, n *ir.Name, off int64) *obj.Prog
+
+ // SpillArgReg emits instructions that spill reg to n+off.
+ SpillArgReg func(pp *objw.Progs, p *obj.Prog, f *ssa.Func, t *types.Type, reg int16, n *ir.Name, off int64) *obj.Prog
+}
diff --git a/src/cmd/compile/internal/ssagen/nowb.go b/src/cmd/compile/internal/ssagen/nowb.go
new file mode 100644
index 0000000..1fbc6a8
--- /dev/null
+++ b/src/cmd/compile/internal/ssagen/nowb.go
@@ -0,0 +1,206 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssagen
+
+import (
+ "bytes"
+ "fmt"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/src"
+)
+
+func EnableNoWriteBarrierRecCheck() {
+ nowritebarrierrecCheck = newNowritebarrierrecChecker()
+}
+
+func NoWriteBarrierRecCheck() {
+ // Write barriers are now known. Check the
+ // call graph.
+ nowritebarrierrecCheck.check()
+ nowritebarrierrecCheck = nil
+}
+
+var nowritebarrierrecCheck *nowritebarrierrecChecker
+
+type nowritebarrierrecChecker struct {
+ // extraCalls contains extra function calls that may not be
+ // visible during later analysis. It maps from the ODCLFUNC of
+ // the caller to a list of callees.
+ extraCalls map[*ir.Func][]nowritebarrierrecCall
+
+ // curfn is the current function during AST walks.
+ curfn *ir.Func
+}
+
+type nowritebarrierrecCall struct {
+ target *ir.Func // caller or callee
+ lineno src.XPos // line of call
+}
+
+// newNowritebarrierrecChecker creates a nowritebarrierrecChecker. It
+// must be called before walk
+func newNowritebarrierrecChecker() *nowritebarrierrecChecker {
+ c := &nowritebarrierrecChecker{
+ extraCalls: make(map[*ir.Func][]nowritebarrierrecCall),
+ }
+
+ // Find all systemstack calls and record their targets. In
+ // general, flow analysis can't see into systemstack, but it's
+ // important to handle it for this check, so we model it
+ // directly. This has to happen before transforming closures in walk since
+ // it's a lot harder to work out the argument after.
+ for _, n := range typecheck.Target.Decls {
+ if n.Op() != ir.ODCLFUNC {
+ continue
+ }
+ c.curfn = n.(*ir.Func)
+ if c.curfn.ABIWrapper() {
+ // We only want "real" calls to these
+ // functions, not the generated ones within
+ // their own ABI wrappers.
+ continue
+ }
+ ir.Visit(n, c.findExtraCalls)
+ }
+ c.curfn = nil
+ return c
+}
+
+func (c *nowritebarrierrecChecker) findExtraCalls(nn ir.Node) {
+ if nn.Op() != ir.OCALLFUNC {
+ return
+ }
+ n := nn.(*ir.CallExpr)
+ if n.X == nil || n.X.Op() != ir.ONAME {
+ return
+ }
+ fn := n.X.(*ir.Name)
+ if fn.Class != ir.PFUNC || fn.Defn == nil {
+ return
+ }
+ if !types.IsRuntimePkg(fn.Sym().Pkg) || fn.Sym().Name != "systemstack" {
+ return
+ }
+
+ var callee *ir.Func
+ arg := n.Args[0]
+ switch arg.Op() {
+ case ir.ONAME:
+ arg := arg.(*ir.Name)
+ callee = arg.Defn.(*ir.Func)
+ case ir.OCLOSURE:
+ arg := arg.(*ir.ClosureExpr)
+ callee = arg.Func
+ default:
+ base.Fatalf("expected ONAME or OCLOSURE node, got %+v", arg)
+ }
+ if callee.Op() != ir.ODCLFUNC {
+ base.Fatalf("expected ODCLFUNC node, got %+v", callee)
+ }
+ c.extraCalls[c.curfn] = append(c.extraCalls[c.curfn], nowritebarrierrecCall{callee, n.Pos()})
+}
+
+// recordCall records a call from ODCLFUNC node "from", to function
+// symbol "to" at position pos.
+//
+// This should be done as late as possible during compilation to
+// capture precise call graphs. The target of the call is an LSym
+// because that's all we know after we start SSA.
+//
+// This can be called concurrently for different from Nodes.
+func (c *nowritebarrierrecChecker) recordCall(fn *ir.Func, to *obj.LSym, pos src.XPos) {
+ // We record this information on the *Func so this is concurrent-safe.
+ if fn.NWBRCalls == nil {
+ fn.NWBRCalls = new([]ir.SymAndPos)
+ }
+ *fn.NWBRCalls = append(*fn.NWBRCalls, ir.SymAndPos{Sym: to, Pos: pos})
+}
+
+func (c *nowritebarrierrecChecker) check() {
+ // We walk the call graph as late as possible so we can
+ // capture all calls created by lowering, but this means we
+ // only get to see the obj.LSyms of calls. symToFunc lets us
+ // get back to the ODCLFUNCs.
+ symToFunc := make(map[*obj.LSym]*ir.Func)
+ // funcs records the back-edges of the BFS call graph walk. It
+ // maps from the ODCLFUNC of each function that must not have
+ // write barriers to the call that inhibits them. Functions
+ // that are directly marked go:nowritebarrierrec are in this
+ // map with a zero-valued nowritebarrierrecCall. This also
+ // acts as the set of marks for the BFS of the call graph.
+ funcs := make(map[*ir.Func]nowritebarrierrecCall)
+ // q is the queue of ODCLFUNC Nodes to visit in BFS order.
+ var q ir.NameQueue
+
+ for _, n := range typecheck.Target.Decls {
+ if n.Op() != ir.ODCLFUNC {
+ continue
+ }
+ fn := n.(*ir.Func)
+
+ symToFunc[fn.LSym] = fn
+
+ // Make nowritebarrierrec functions BFS roots.
+ if fn.Pragma&ir.Nowritebarrierrec != 0 {
+ funcs[fn] = nowritebarrierrecCall{}
+ q.PushRight(fn.Nname)
+ }
+ // Check go:nowritebarrier functions.
+ if fn.Pragma&ir.Nowritebarrier != 0 && fn.WBPos.IsKnown() {
+ base.ErrorfAt(fn.WBPos, "write barrier prohibited")
+ }
+ }
+
+ // Perform a BFS of the call graph from all
+ // go:nowritebarrierrec functions.
+ enqueue := func(src, target *ir.Func, pos src.XPos) {
+ if target.Pragma&ir.Yeswritebarrierrec != 0 {
+ // Don't flow into this function.
+ return
+ }
+ if _, ok := funcs[target]; ok {
+ // Already found a path to target.
+ return
+ }
+
+ // Record the path.
+ funcs[target] = nowritebarrierrecCall{target: src, lineno: pos}
+ q.PushRight(target.Nname)
+ }
+ for !q.Empty() {
+ fn := q.PopLeft().Func
+
+ // Check fn.
+ if fn.WBPos.IsKnown() {
+ var err bytes.Buffer
+ call := funcs[fn]
+ for call.target != nil {
+ fmt.Fprintf(&err, "\n\t%v: called by %v", base.FmtPos(call.lineno), call.target.Nname)
+ call = funcs[call.target]
+ }
+ base.ErrorfAt(fn.WBPos, "write barrier prohibited by caller; %v%s", fn.Nname, err.String())
+ continue
+ }
+
+ // Enqueue fn's calls.
+ for _, callee := range c.extraCalls[fn] {
+ enqueue(fn, callee.target, callee.lineno)
+ }
+ if fn.NWBRCalls == nil {
+ continue
+ }
+ for _, callee := range *fn.NWBRCalls {
+ target := symToFunc[callee.Sym]
+ if target != nil {
+ enqueue(fn, target, callee.Pos)
+ }
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/ssagen/pgen.go b/src/cmd/compile/internal/ssagen/pgen.go
new file mode 100644
index 0000000..86d40e2
--- /dev/null
+++ b/src/cmd/compile/internal/ssagen/pgen.go
@@ -0,0 +1,291 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssagen
+
+import (
+ "internal/buildcfg"
+ "internal/race"
+ "math/rand"
+ "sort"
+ "sync"
+ "time"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/objw"
+ "cmd/compile/internal/ssa"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/objabi"
+ "cmd/internal/src"
+)
+
+// cmpstackvarlt reports whether the stack variable a sorts before b.
+//
+// Sort the list of stack variables. Autos after anything else,
+// within autos, unused after used, within used, things with
+// pointers first, zeroed things first, and then decreasing size.
+// Because autos are laid out in decreasing addresses
+// on the stack, pointers first, zeroed things first and decreasing size
+// really means, in memory, things with pointers needing zeroing at
+// the top of the stack and increasing in size.
+// Non-autos sort on offset.
+func cmpstackvarlt(a, b *ir.Name) bool {
+ if needAlloc(a) != needAlloc(b) {
+ return needAlloc(b)
+ }
+
+ if !needAlloc(a) {
+ return a.FrameOffset() < b.FrameOffset()
+ }
+
+ if a.Used() != b.Used() {
+ return a.Used()
+ }
+
+ ap := a.Type().HasPointers()
+ bp := b.Type().HasPointers()
+ if ap != bp {
+ return ap
+ }
+
+ ap = a.Needzero()
+ bp = b.Needzero()
+ if ap != bp {
+ return ap
+ }
+
+ if a.Type().Size() != b.Type().Size() {
+ return a.Type().Size() > b.Type().Size()
+ }
+
+ return a.Sym().Name < b.Sym().Name
+}
+
+// byStackvar implements sort.Interface for []*Node using cmpstackvarlt.
+type byStackVar []*ir.Name
+
+func (s byStackVar) Len() int { return len(s) }
+func (s byStackVar) Less(i, j int) bool { return cmpstackvarlt(s[i], s[j]) }
+func (s byStackVar) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
+
+// needAlloc reports whether n is within the current frame, for which we need to
+// allocate space. In particular, it excludes arguments and results, which are in
+// the callers frame.
+func needAlloc(n *ir.Name) bool {
+ if n.Op() != ir.ONAME {
+ base.FatalfAt(n.Pos(), "%v has unexpected Op %v", n, n.Op())
+ }
+
+ switch n.Class {
+ case ir.PAUTO:
+ return true
+ case ir.PPARAM:
+ return false
+ case ir.PPARAMOUT:
+ return n.IsOutputParamInRegisters()
+
+ default:
+ base.FatalfAt(n.Pos(), "%v has unexpected Class %v", n, n.Class)
+ return false
+ }
+}
+
+func (s *ssafn) AllocFrame(f *ssa.Func) {
+ s.stksize = 0
+ s.stkptrsize = 0
+ fn := s.curfn
+
+ // Mark the PAUTO's unused.
+ for _, ln := range fn.Dcl {
+ if needAlloc(ln) {
+ ln.SetUsed(false)
+ }
+ }
+
+ for _, l := range f.RegAlloc {
+ if ls, ok := l.(ssa.LocalSlot); ok {
+ ls.N.SetUsed(true)
+ }
+ }
+
+ for _, b := range f.Blocks {
+ for _, v := range b.Values {
+ if n, ok := v.Aux.(*ir.Name); ok {
+ switch n.Class {
+ case ir.PPARAMOUT:
+ if n.IsOutputParamInRegisters() && v.Op == ssa.OpVarDef {
+ // ignore VarDef, look for "real" uses.
+ // TODO: maybe do this for PAUTO as well?
+ continue
+ }
+ fallthrough
+ case ir.PPARAM, ir.PAUTO:
+ n.SetUsed(true)
+ }
+ }
+ }
+ }
+
+ // Use sort.Stable instead of sort.Sort so stack layout (and thus
+ // compiler output) is less sensitive to frontend changes that
+ // introduce or remove unused variables.
+ sort.Stable(byStackVar(fn.Dcl))
+
+ // Reassign stack offsets of the locals that are used.
+ lastHasPtr := false
+ for i, n := range fn.Dcl {
+ if n.Op() != ir.ONAME || n.Class != ir.PAUTO && !(n.Class == ir.PPARAMOUT && n.IsOutputParamInRegisters()) {
+ // i.e., stack assign if AUTO, or if PARAMOUT in registers (which has no predefined spill locations)
+ continue
+ }
+ if !n.Used() {
+ fn.Dcl = fn.Dcl[:i]
+ break
+ }
+
+ types.CalcSize(n.Type())
+ w := n.Type().Size()
+ if w >= types.MaxWidth || w < 0 {
+ base.Fatalf("bad width")
+ }
+ if w == 0 && lastHasPtr {
+ // Pad between a pointer-containing object and a zero-sized object.
+ // This prevents a pointer to the zero-sized object from being interpreted
+ // as a pointer to the pointer-containing object (and causing it
+ // to be scanned when it shouldn't be). See issue 24993.
+ w = 1
+ }
+ s.stksize += w
+ s.stksize = types.Rnd(s.stksize, n.Type().Alignment())
+ if n.Type().HasPointers() {
+ s.stkptrsize = s.stksize
+ lastHasPtr = true
+ } else {
+ lastHasPtr = false
+ }
+ n.SetFrameOffset(-s.stksize)
+ }
+
+ s.stksize = types.Rnd(s.stksize, int64(types.RegSize))
+ s.stkptrsize = types.Rnd(s.stkptrsize, int64(types.RegSize))
+}
+
+const maxStackSize = 1 << 30
+
+// Compile builds an SSA backend function,
+// uses it to generate a plist,
+// and flushes that plist to machine code.
+// worker indicates which of the backend workers is doing the processing.
+func Compile(fn *ir.Func, worker int) {
+ f := buildssa(fn, worker)
+ // Note: check arg size to fix issue 25507.
+ if f.Frontend().(*ssafn).stksize >= maxStackSize || f.OwnAux.ArgWidth() >= maxStackSize {
+ largeStackFramesMu.Lock()
+ largeStackFrames = append(largeStackFrames, largeStack{locals: f.Frontend().(*ssafn).stksize, args: f.OwnAux.ArgWidth(), pos: fn.Pos()})
+ largeStackFramesMu.Unlock()
+ return
+ }
+ pp := objw.NewProgs(fn, worker)
+ defer pp.Free()
+ genssa(f, pp)
+ // Check frame size again.
+ // The check above included only the space needed for local variables.
+ // After genssa, the space needed includes local variables and the callee arg region.
+ // We must do this check prior to calling pp.Flush.
+ // If there are any oversized stack frames,
+ // the assembler may emit inscrutable complaints about invalid instructions.
+ if pp.Text.To.Offset >= maxStackSize {
+ largeStackFramesMu.Lock()
+ locals := f.Frontend().(*ssafn).stksize
+ largeStackFrames = append(largeStackFrames, largeStack{locals: locals, args: f.OwnAux.ArgWidth(), callee: pp.Text.To.Offset - locals, pos: fn.Pos()})
+ largeStackFramesMu.Unlock()
+ return
+ }
+
+ pp.Flush() // assemble, fill in boilerplate, etc.
+ // fieldtrack must be called after pp.Flush. See issue 20014.
+ fieldtrack(pp.Text.From.Sym, fn.FieldTrack)
+}
+
+func init() {
+ if race.Enabled {
+ rand.Seed(time.Now().UnixNano())
+ }
+}
+
+// StackOffset returns the stack location of a LocalSlot relative to the
+// stack pointer, suitable for use in a DWARF location entry. This has nothing
+// to do with its offset in the user variable.
+func StackOffset(slot ssa.LocalSlot) int32 {
+ n := slot.N
+ var off int64
+ switch n.Class {
+ case ir.PPARAM, ir.PPARAMOUT:
+ if !n.IsOutputParamInRegisters() {
+ off = n.FrameOffset() + base.Ctxt.FixedFrameSize()
+ break
+ }
+ fallthrough // PPARAMOUT in registers allocates like an AUTO
+ case ir.PAUTO:
+ off = n.FrameOffset()
+ if base.Ctxt.FixedFrameSize() == 0 {
+ off -= int64(types.PtrSize)
+ }
+ if buildcfg.FramePointerEnabled {
+ off -= int64(types.PtrSize)
+ }
+ }
+ return int32(off + slot.Off)
+}
+
+// fieldtrack adds R_USEFIELD relocations to fnsym to record any
+// struct fields that it used.
+func fieldtrack(fnsym *obj.LSym, tracked map[*obj.LSym]struct{}) {
+ if fnsym == nil {
+ return
+ }
+ if !buildcfg.Experiment.FieldTrack || len(tracked) == 0 {
+ return
+ }
+
+ trackSyms := make([]*obj.LSym, 0, len(tracked))
+ for sym := range tracked {
+ trackSyms = append(trackSyms, sym)
+ }
+ sort.Slice(trackSyms, func(i, j int) bool { return trackSyms[i].Name < trackSyms[j].Name })
+ for _, sym := range trackSyms {
+ r := obj.Addrel(fnsym)
+ r.Sym = sym
+ r.Type = objabi.R_USEFIELD
+ }
+}
+
+// largeStack is info about a function whose stack frame is too large (rare).
+type largeStack struct {
+ locals int64
+ args int64
+ callee int64
+ pos src.XPos
+}
+
+var (
+ largeStackFramesMu sync.Mutex // protects largeStackFrames
+ largeStackFrames []largeStack
+)
+
+func CheckLargeStacks() {
+ // Check whether any of the functions we have compiled have gigantic stack frames.
+ sort.Slice(largeStackFrames, func(i, j int) bool {
+ return largeStackFrames[i].pos.Before(largeStackFrames[j].pos)
+ })
+ for _, large := range largeStackFrames {
+ if large.callee != 0 {
+ base.ErrorfAt(large.pos, "stack frame too large (>1GB): %d MB locals + %d MB args + %d MB callee", large.locals>>20, large.args>>20, large.callee>>20)
+ } else {
+ base.ErrorfAt(large.pos, "stack frame too large (>1GB): %d MB locals + %d MB args", large.locals>>20, large.args>>20)
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/ssagen/phi.go b/src/cmd/compile/internal/ssagen/phi.go
new file mode 100644
index 0000000..01ad211
--- /dev/null
+++ b/src/cmd/compile/internal/ssagen/phi.go
@@ -0,0 +1,557 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssagen
+
+import (
+ "container/heap"
+ "fmt"
+
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/ssa"
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+)
+
+// This file contains the algorithm to place phi nodes in a function.
+// For small functions, we use Braun, Buchwald, Hack, Leißa, Mallon, and Zwinkau.
+// https://pp.info.uni-karlsruhe.de/uploads/publikationen/braun13cc.pdf
+// For large functions, we use Sreedhar & Gao: A Linear Time Algorithm for Placing Φ-Nodes.
+// http://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.8.1979&rep=rep1&type=pdf
+
+const smallBlocks = 500
+
+const debugPhi = false
+
+// fwdRefAux wraps an arbitrary ir.Node as an ssa.Aux for use with OpFwdref.
+type fwdRefAux struct {
+ _ [0]func() // ensure ir.Node isn't compared for equality
+ N ir.Node
+}
+
+func (fwdRefAux) CanBeAnSSAAux() {}
+
+// insertPhis finds all the places in the function where a phi is
+// necessary and inserts them.
+// Uses FwdRef ops to find all uses of variables, and s.defvars to find
+// all definitions.
+// Phi values are inserted, and all FwdRefs are changed to a Copy
+// of the appropriate phi or definition.
+// TODO: make this part of cmd/compile/internal/ssa somehow?
+func (s *state) insertPhis() {
+ if len(s.f.Blocks) <= smallBlocks {
+ sps := simplePhiState{s: s, f: s.f, defvars: s.defvars}
+ sps.insertPhis()
+ return
+ }
+ ps := phiState{s: s, f: s.f, defvars: s.defvars}
+ ps.insertPhis()
+}
+
+type phiState struct {
+ s *state // SSA state
+ f *ssa.Func // function to work on
+ defvars []map[ir.Node]*ssa.Value // defined variables at end of each block
+
+ varnum map[ir.Node]int32 // variable numbering
+
+ // properties of the dominator tree
+ idom []*ssa.Block // dominator parents
+ tree []domBlock // dominator child+sibling
+ level []int32 // level in dominator tree (0 = root or unreachable, 1 = children of root, ...)
+
+ // scratch locations
+ priq blockHeap // priority queue of blocks, higher level (toward leaves) = higher priority
+ q []*ssa.Block // inner loop queue
+ queued *sparseSet // has been put in q
+ hasPhi *sparseSet // has a phi
+ hasDef *sparseSet // has a write of the variable we're processing
+
+ // miscellaneous
+ placeholder *ssa.Value // value to use as a "not set yet" placeholder.
+}
+
+func (s *phiState) insertPhis() {
+ if debugPhi {
+ fmt.Println(s.f.String())
+ }
+
+ // Find all the variables for which we need to match up reads & writes.
+ // This step prunes any basic-block-only variables from consideration.
+ // Generate a numbering for these variables.
+ s.varnum = map[ir.Node]int32{}
+ var vars []ir.Node
+ var vartypes []*types.Type
+ for _, b := range s.f.Blocks {
+ for _, v := range b.Values {
+ if v.Op != ssa.OpFwdRef {
+ continue
+ }
+ var_ := v.Aux.(fwdRefAux).N
+
+ // Optimization: look back 1 block for the definition.
+ if len(b.Preds) == 1 {
+ c := b.Preds[0].Block()
+ if w := s.defvars[c.ID][var_]; w != nil {
+ v.Op = ssa.OpCopy
+ v.Aux = nil
+ v.AddArg(w)
+ continue
+ }
+ }
+
+ if _, ok := s.varnum[var_]; ok {
+ continue
+ }
+ s.varnum[var_] = int32(len(vartypes))
+ if debugPhi {
+ fmt.Printf("var%d = %v\n", len(vartypes), var_)
+ }
+ vars = append(vars, var_)
+ vartypes = append(vartypes, v.Type)
+ }
+ }
+
+ if len(vartypes) == 0 {
+ return
+ }
+
+ // Find all definitions of the variables we need to process.
+ // defs[n] contains all the blocks in which variable number n is assigned.
+ defs := make([][]*ssa.Block, len(vartypes))
+ for _, b := range s.f.Blocks {
+ for var_ := range s.defvars[b.ID] { // TODO: encode defvars some other way (explicit ops)? make defvars[n] a slice instead of a map.
+ if n, ok := s.varnum[var_]; ok {
+ defs[n] = append(defs[n], b)
+ }
+ }
+ }
+
+ // Make dominator tree.
+ s.idom = s.f.Idom()
+ s.tree = make([]domBlock, s.f.NumBlocks())
+ for _, b := range s.f.Blocks {
+ p := s.idom[b.ID]
+ if p != nil {
+ s.tree[b.ID].sibling = s.tree[p.ID].firstChild
+ s.tree[p.ID].firstChild = b
+ }
+ }
+ // Compute levels in dominator tree.
+ // With parent pointers we can do a depth-first walk without
+ // any auxiliary storage.
+ s.level = make([]int32, s.f.NumBlocks())
+ b := s.f.Entry
+levels:
+ for {
+ if p := s.idom[b.ID]; p != nil {
+ s.level[b.ID] = s.level[p.ID] + 1
+ if debugPhi {
+ fmt.Printf("level %s = %d\n", b, s.level[b.ID])
+ }
+ }
+ if c := s.tree[b.ID].firstChild; c != nil {
+ b = c
+ continue
+ }
+ for {
+ if c := s.tree[b.ID].sibling; c != nil {
+ b = c
+ continue levels
+ }
+ b = s.idom[b.ID]
+ if b == nil {
+ break levels
+ }
+ }
+ }
+
+ // Allocate scratch locations.
+ s.priq.level = s.level
+ s.q = make([]*ssa.Block, 0, s.f.NumBlocks())
+ s.queued = newSparseSet(s.f.NumBlocks())
+ s.hasPhi = newSparseSet(s.f.NumBlocks())
+ s.hasDef = newSparseSet(s.f.NumBlocks())
+ s.placeholder = s.s.entryNewValue0(ssa.OpUnknown, types.TypeInvalid)
+
+ // Generate phi ops for each variable.
+ for n := range vartypes {
+ s.insertVarPhis(n, vars[n], defs[n], vartypes[n])
+ }
+
+ // Resolve FwdRefs to the correct write or phi.
+ s.resolveFwdRefs()
+
+ // Erase variable numbers stored in AuxInt fields of phi ops. They are no longer needed.
+ for _, b := range s.f.Blocks {
+ for _, v := range b.Values {
+ if v.Op == ssa.OpPhi {
+ v.AuxInt = 0
+ }
+ // Any remaining FwdRefs are dead code.
+ if v.Op == ssa.OpFwdRef {
+ v.Op = ssa.OpUnknown
+ v.Aux = nil
+ }
+ }
+ }
+}
+
+func (s *phiState) insertVarPhis(n int, var_ ir.Node, defs []*ssa.Block, typ *types.Type) {
+ priq := &s.priq
+ q := s.q
+ queued := s.queued
+ queued.clear()
+ hasPhi := s.hasPhi
+ hasPhi.clear()
+ hasDef := s.hasDef
+ hasDef.clear()
+
+ // Add defining blocks to priority queue.
+ for _, b := range defs {
+ priq.a = append(priq.a, b)
+ hasDef.add(b.ID)
+ if debugPhi {
+ fmt.Printf("def of var%d in %s\n", n, b)
+ }
+ }
+ heap.Init(priq)
+
+ // Visit blocks defining variable n, from deepest to shallowest.
+ for len(priq.a) > 0 {
+ currentRoot := heap.Pop(priq).(*ssa.Block)
+ if debugPhi {
+ fmt.Printf("currentRoot %s\n", currentRoot)
+ }
+ // Walk subtree below definition.
+ // Skip subtrees we've done in previous iterations.
+ // Find edges exiting tree dominated by definition (the dominance frontier).
+ // Insert phis at target blocks.
+ if queued.contains(currentRoot.ID) {
+ s.s.Fatalf("root already in queue")
+ }
+ q = append(q, currentRoot)
+ queued.add(currentRoot.ID)
+ for len(q) > 0 {
+ b := q[len(q)-1]
+ q = q[:len(q)-1]
+ if debugPhi {
+ fmt.Printf(" processing %s\n", b)
+ }
+
+ currentRootLevel := s.level[currentRoot.ID]
+ for _, e := range b.Succs {
+ c := e.Block()
+ // TODO: if the variable is dead at c, skip it.
+ if s.level[c.ID] > currentRootLevel {
+ // a D-edge, or an edge whose target is in currentRoot's subtree.
+ continue
+ }
+ if hasPhi.contains(c.ID) {
+ continue
+ }
+ // Add a phi to block c for variable n.
+ hasPhi.add(c.ID)
+ v := c.NewValue0I(currentRoot.Pos, ssa.OpPhi, typ, int64(n)) // TODO: line number right?
+ // Note: we store the variable number in the phi's AuxInt field. Used temporarily by phi building.
+ if var_.Op() == ir.ONAME {
+ s.s.addNamedValue(var_.(*ir.Name), v)
+ }
+ for range c.Preds {
+ v.AddArg(s.placeholder) // Actual args will be filled in by resolveFwdRefs.
+ }
+ if debugPhi {
+ fmt.Printf("new phi for var%d in %s: %s\n", n, c, v)
+ }
+ if !hasDef.contains(c.ID) {
+ // There's now a new definition of this variable in block c.
+ // Add it to the priority queue to explore.
+ heap.Push(priq, c)
+ hasDef.add(c.ID)
+ }
+ }
+
+ // Visit children if they have not been visited yet.
+ for c := s.tree[b.ID].firstChild; c != nil; c = s.tree[c.ID].sibling {
+ if !queued.contains(c.ID) {
+ q = append(q, c)
+ queued.add(c.ID)
+ }
+ }
+ }
+ }
+}
+
+// resolveFwdRefs links all FwdRef uses up to their nearest dominating definition.
+func (s *phiState) resolveFwdRefs() {
+ // Do a depth-first walk of the dominator tree, keeping track
+ // of the most-recently-seen value for each variable.
+
+ // Map from variable ID to SSA value at the current point of the walk.
+ values := make([]*ssa.Value, len(s.varnum))
+ for i := range values {
+ values[i] = s.placeholder
+ }
+
+ // Stack of work to do.
+ type stackEntry struct {
+ b *ssa.Block // block to explore
+
+ // variable/value pair to reinstate on exit
+ n int32 // variable ID
+ v *ssa.Value
+
+ // Note: only one of b or n,v will be set.
+ }
+ var stk []stackEntry
+
+ stk = append(stk, stackEntry{b: s.f.Entry})
+ for len(stk) > 0 {
+ work := stk[len(stk)-1]
+ stk = stk[:len(stk)-1]
+
+ b := work.b
+ if b == nil {
+ // On exit from a block, this case will undo any assignments done below.
+ values[work.n] = work.v
+ continue
+ }
+
+ // Process phis as new defs. They come before FwdRefs in this block.
+ for _, v := range b.Values {
+ if v.Op != ssa.OpPhi {
+ continue
+ }
+ n := int32(v.AuxInt)
+ // Remember the old assignment so we can undo it when we exit b.
+ stk = append(stk, stackEntry{n: n, v: values[n]})
+ // Record the new assignment.
+ values[n] = v
+ }
+
+ // Replace a FwdRef op with the current incoming value for its variable.
+ for _, v := range b.Values {
+ if v.Op != ssa.OpFwdRef {
+ continue
+ }
+ n := s.varnum[v.Aux.(fwdRefAux).N]
+ v.Op = ssa.OpCopy
+ v.Aux = nil
+ v.AddArg(values[n])
+ }
+
+ // Establish values for variables defined in b.
+ for var_, v := range s.defvars[b.ID] {
+ n, ok := s.varnum[var_]
+ if !ok {
+ // some variable not live across a basic block boundary.
+ continue
+ }
+ // Remember the old assignment so we can undo it when we exit b.
+ stk = append(stk, stackEntry{n: n, v: values[n]})
+ // Record the new assignment.
+ values[n] = v
+ }
+
+ // Replace phi args in successors with the current incoming value.
+ for _, e := range b.Succs {
+ c, i := e.Block(), e.Index()
+ for j := len(c.Values) - 1; j >= 0; j-- {
+ v := c.Values[j]
+ if v.Op != ssa.OpPhi {
+ break // All phis will be at the end of the block during phi building.
+ }
+ // Only set arguments that have been resolved.
+ // For very wide CFGs, this significantly speeds up phi resolution.
+ // See golang.org/issue/8225.
+ if w := values[v.AuxInt]; w.Op != ssa.OpUnknown {
+ v.SetArg(i, w)
+ }
+ }
+ }
+
+ // Walk children in dominator tree.
+ for c := s.tree[b.ID].firstChild; c != nil; c = s.tree[c.ID].sibling {
+ stk = append(stk, stackEntry{b: c})
+ }
+ }
+}
+
+// domBlock contains extra per-block information to record the dominator tree.
+type domBlock struct {
+ firstChild *ssa.Block // first child of block in dominator tree
+ sibling *ssa.Block // next child of parent in dominator tree
+}
+
+// A block heap is used as a priority queue to implement the PiggyBank
+// from Sreedhar and Gao. That paper uses an array which is better
+// asymptotically but worse in the common case when the PiggyBank
+// holds a sparse set of blocks.
+type blockHeap struct {
+ a []*ssa.Block // block IDs in heap
+ level []int32 // depth in dominator tree (static, used for determining priority)
+}
+
+func (h *blockHeap) Len() int { return len(h.a) }
+func (h *blockHeap) Swap(i, j int) { a := h.a; a[i], a[j] = a[j], a[i] }
+
+func (h *blockHeap) Push(x interface{}) {
+ v := x.(*ssa.Block)
+ h.a = append(h.a, v)
+}
+func (h *blockHeap) Pop() interface{} {
+ old := h.a
+ n := len(old)
+ x := old[n-1]
+ h.a = old[:n-1]
+ return x
+}
+func (h *blockHeap) Less(i, j int) bool {
+ return h.level[h.a[i].ID] > h.level[h.a[j].ID]
+}
+
+// TODO: stop walking the iterated domininance frontier when
+// the variable is dead. Maybe detect that by checking if the
+// node we're on is reverse dominated by all the reads?
+// Reverse dominated by the highest common successor of all the reads?
+
+// copy of ../ssa/sparseset.go
+// TODO: move this file to ../ssa, then use sparseSet there.
+type sparseSet struct {
+ dense []ssa.ID
+ sparse []int32
+}
+
+// newSparseSet returns a sparseSet that can represent
+// integers between 0 and n-1
+func newSparseSet(n int) *sparseSet {
+ return &sparseSet{dense: nil, sparse: make([]int32, n)}
+}
+
+func (s *sparseSet) contains(x ssa.ID) bool {
+ i := s.sparse[x]
+ return i < int32(len(s.dense)) && s.dense[i] == x
+}
+
+func (s *sparseSet) add(x ssa.ID) {
+ i := s.sparse[x]
+ if i < int32(len(s.dense)) && s.dense[i] == x {
+ return
+ }
+ s.dense = append(s.dense, x)
+ s.sparse[x] = int32(len(s.dense)) - 1
+}
+
+func (s *sparseSet) clear() {
+ s.dense = s.dense[:0]
+}
+
+// Variant to use for small functions.
+type simplePhiState struct {
+ s *state // SSA state
+ f *ssa.Func // function to work on
+ fwdrefs []*ssa.Value // list of FwdRefs to be processed
+ defvars []map[ir.Node]*ssa.Value // defined variables at end of each block
+ reachable []bool // which blocks are reachable
+}
+
+func (s *simplePhiState) insertPhis() {
+ s.reachable = ssa.ReachableBlocks(s.f)
+
+ // Find FwdRef ops.
+ for _, b := range s.f.Blocks {
+ for _, v := range b.Values {
+ if v.Op != ssa.OpFwdRef {
+ continue
+ }
+ s.fwdrefs = append(s.fwdrefs, v)
+ var_ := v.Aux.(fwdRefAux).N
+ if _, ok := s.defvars[b.ID][var_]; !ok {
+ s.defvars[b.ID][var_] = v // treat FwdDefs as definitions.
+ }
+ }
+ }
+
+ var args []*ssa.Value
+
+loop:
+ for len(s.fwdrefs) > 0 {
+ v := s.fwdrefs[len(s.fwdrefs)-1]
+ s.fwdrefs = s.fwdrefs[:len(s.fwdrefs)-1]
+ b := v.Block
+ var_ := v.Aux.(fwdRefAux).N
+ if b == s.f.Entry {
+ // No variable should be live at entry.
+ s.s.Fatalf("Value live at entry. It shouldn't be. func %s, node %v, value %v", s.f.Name, var_, v)
+ }
+ if !s.reachable[b.ID] {
+ // This block is dead.
+ // It doesn't matter what we use here as long as it is well-formed.
+ v.Op = ssa.OpUnknown
+ v.Aux = nil
+ continue
+ }
+ // Find variable value on each predecessor.
+ args = args[:0]
+ for _, e := range b.Preds {
+ args = append(args, s.lookupVarOutgoing(e.Block(), v.Type, var_, v.Pos))
+ }
+
+ // Decide if we need a phi or not. We need a phi if there
+ // are two different args (which are both not v).
+ var w *ssa.Value
+ for _, a := range args {
+ if a == v {
+ continue // self-reference
+ }
+ if a == w {
+ continue // already have this witness
+ }
+ if w != nil {
+ // two witnesses, need a phi value
+ v.Op = ssa.OpPhi
+ v.AddArgs(args...)
+ v.Aux = nil
+ continue loop
+ }
+ w = a // save witness
+ }
+ if w == nil {
+ s.s.Fatalf("no witness for reachable phi %s", v)
+ }
+ // One witness. Make v a copy of w.
+ v.Op = ssa.OpCopy
+ v.Aux = nil
+ v.AddArg(w)
+ }
+}
+
+// lookupVarOutgoing finds the variable's value at the end of block b.
+func (s *simplePhiState) lookupVarOutgoing(b *ssa.Block, t *types.Type, var_ ir.Node, line src.XPos) *ssa.Value {
+ for {
+ if v := s.defvars[b.ID][var_]; v != nil {
+ return v
+ }
+ // The variable is not defined by b and we haven't looked it up yet.
+ // If b has exactly one predecessor, loop to look it up there.
+ // Otherwise, give up and insert a new FwdRef and resolve it later.
+ if len(b.Preds) != 1 {
+ break
+ }
+ b = b.Preds[0].Block()
+ if !s.reachable[b.ID] {
+ // This is rare; it happens with oddly interleaved infinite loops in dead code.
+ // See issue 19783.
+ break
+ }
+ }
+ // Generate a FwdRef for the variable and return that.
+ v := b.NewValue0A(line, ssa.OpFwdRef, t, fwdRefAux{N: var_})
+ s.defvars[b.ID][var_] = v
+ if var_.Op() == ir.ONAME {
+ s.s.addNamedValue(var_.(*ir.Name), v)
+ }
+ s.fwdrefs = append(s.fwdrefs, v)
+ return v
+}
diff --git a/src/cmd/compile/internal/ssagen/ssa.go b/src/cmd/compile/internal/ssagen/ssa.go
new file mode 100644
index 0000000..b19f3c8
--- /dev/null
+++ b/src/cmd/compile/internal/ssagen/ssa.go
@@ -0,0 +1,7943 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package ssagen
+
+import (
+ "bufio"
+ "bytes"
+ "cmd/compile/internal/abi"
+ "fmt"
+ "go/constant"
+ "html"
+ "internal/buildcfg"
+ "os"
+ "path/filepath"
+ "sort"
+ "strings"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/liveness"
+ "cmd/compile/internal/objw"
+ "cmd/compile/internal/reflectdata"
+ "cmd/compile/internal/ssa"
+ "cmd/compile/internal/staticdata"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/obj/x86"
+ "cmd/internal/objabi"
+ "cmd/internal/src"
+ "cmd/internal/sys"
+)
+
+var ssaConfig *ssa.Config
+var ssaCaches []ssa.Cache
+
+var ssaDump string // early copy of $GOSSAFUNC; the func name to dump output for
+var ssaDir string // optional destination for ssa dump file
+var ssaDumpStdout bool // whether to dump to stdout
+var ssaDumpCFG string // generate CFGs for these phases
+const ssaDumpFile = "ssa.html"
+
+// ssaDumpInlined holds all inlined functions when ssaDump contains a function name.
+var ssaDumpInlined []*ir.Func
+
+func DumpInline(fn *ir.Func) {
+ if ssaDump != "" && ssaDump == ir.FuncName(fn) {
+ ssaDumpInlined = append(ssaDumpInlined, fn)
+ }
+}
+
+func InitEnv() {
+ ssaDump = os.Getenv("GOSSAFUNC")
+ ssaDir = os.Getenv("GOSSADIR")
+ if ssaDump != "" {
+ if strings.HasSuffix(ssaDump, "+") {
+ ssaDump = ssaDump[:len(ssaDump)-1]
+ ssaDumpStdout = true
+ }
+ spl := strings.Split(ssaDump, ":")
+ if len(spl) > 1 {
+ ssaDump = spl[0]
+ ssaDumpCFG = spl[1]
+ }
+ }
+}
+
+func InitConfig() {
+ types_ := ssa.NewTypes()
+
+ if Arch.SoftFloat {
+ softfloatInit()
+ }
+
+ // Generate a few pointer types that are uncommon in the frontend but common in the backend.
+ // Caching is disabled in the backend, so generating these here avoids allocations.
+ _ = types.NewPtr(types.Types[types.TINTER]) // *interface{}
+ _ = types.NewPtr(types.NewPtr(types.Types[types.TSTRING])) // **string
+ _ = types.NewPtr(types.NewSlice(types.Types[types.TINTER])) // *[]interface{}
+ _ = types.NewPtr(types.NewPtr(types.ByteType)) // **byte
+ _ = types.NewPtr(types.NewSlice(types.ByteType)) // *[]byte
+ _ = types.NewPtr(types.NewSlice(types.Types[types.TSTRING])) // *[]string
+ _ = types.NewPtr(types.NewPtr(types.NewPtr(types.Types[types.TUINT8]))) // ***uint8
+ _ = types.NewPtr(types.Types[types.TINT16]) // *int16
+ _ = types.NewPtr(types.Types[types.TINT64]) // *int64
+ _ = types.NewPtr(types.ErrorType) // *error
+ types.NewPtrCacheEnabled = false
+ ssaConfig = ssa.NewConfig(base.Ctxt.Arch.Name, *types_, base.Ctxt, base.Flag.N == 0, Arch.SoftFloat)
+ ssaConfig.Race = base.Flag.Race
+ ssaCaches = make([]ssa.Cache, base.Flag.LowerC)
+
+ // Set up some runtime functions we'll need to call.
+ ir.Syms.AssertE2I = typecheck.LookupRuntimeFunc("assertE2I")
+ ir.Syms.AssertE2I2 = typecheck.LookupRuntimeFunc("assertE2I2")
+ ir.Syms.AssertI2I = typecheck.LookupRuntimeFunc("assertI2I")
+ ir.Syms.AssertI2I2 = typecheck.LookupRuntimeFunc("assertI2I2")
+ ir.Syms.CheckPtrAlignment = typecheck.LookupRuntimeFunc("checkptrAlignment")
+ ir.Syms.Deferproc = typecheck.LookupRuntimeFunc("deferproc")
+ ir.Syms.DeferprocStack = typecheck.LookupRuntimeFunc("deferprocStack")
+ ir.Syms.Deferreturn = typecheck.LookupRuntimeFunc("deferreturn")
+ ir.Syms.Duffcopy = typecheck.LookupRuntimeFunc("duffcopy")
+ ir.Syms.Duffzero = typecheck.LookupRuntimeFunc("duffzero")
+ ir.Syms.GCWriteBarrier = typecheck.LookupRuntimeFunc("gcWriteBarrier")
+ ir.Syms.Goschedguarded = typecheck.LookupRuntimeFunc("goschedguarded")
+ ir.Syms.Growslice = typecheck.LookupRuntimeFunc("growslice")
+ ir.Syms.Memmove = typecheck.LookupRuntimeFunc("memmove")
+ ir.Syms.Msanread = typecheck.LookupRuntimeFunc("msanread")
+ ir.Syms.Msanwrite = typecheck.LookupRuntimeFunc("msanwrite")
+ ir.Syms.Msanmove = typecheck.LookupRuntimeFunc("msanmove")
+ ir.Syms.Asanread = typecheck.LookupRuntimeFunc("asanread")
+ ir.Syms.Asanwrite = typecheck.LookupRuntimeFunc("asanwrite")
+ ir.Syms.Newobject = typecheck.LookupRuntimeFunc("newobject")
+ ir.Syms.Newproc = typecheck.LookupRuntimeFunc("newproc")
+ ir.Syms.Panicdivide = typecheck.LookupRuntimeFunc("panicdivide")
+ ir.Syms.PanicdottypeE = typecheck.LookupRuntimeFunc("panicdottypeE")
+ ir.Syms.PanicdottypeI = typecheck.LookupRuntimeFunc("panicdottypeI")
+ ir.Syms.Panicnildottype = typecheck.LookupRuntimeFunc("panicnildottype")
+ ir.Syms.Panicoverflow = typecheck.LookupRuntimeFunc("panicoverflow")
+ ir.Syms.Panicshift = typecheck.LookupRuntimeFunc("panicshift")
+ ir.Syms.Raceread = typecheck.LookupRuntimeFunc("raceread")
+ ir.Syms.Racereadrange = typecheck.LookupRuntimeFunc("racereadrange")
+ ir.Syms.Racewrite = typecheck.LookupRuntimeFunc("racewrite")
+ ir.Syms.Racewriterange = typecheck.LookupRuntimeFunc("racewriterange")
+ ir.Syms.X86HasPOPCNT = typecheck.LookupRuntimeVar("x86HasPOPCNT") // bool
+ ir.Syms.X86HasSSE41 = typecheck.LookupRuntimeVar("x86HasSSE41") // bool
+ ir.Syms.X86HasFMA = typecheck.LookupRuntimeVar("x86HasFMA") // bool
+ ir.Syms.ARMHasVFPv4 = typecheck.LookupRuntimeVar("armHasVFPv4") // bool
+ ir.Syms.ARM64HasATOMICS = typecheck.LookupRuntimeVar("arm64HasATOMICS") // bool
+ ir.Syms.Staticuint64s = typecheck.LookupRuntimeVar("staticuint64s")
+ ir.Syms.Typedmemclr = typecheck.LookupRuntimeFunc("typedmemclr")
+ ir.Syms.Typedmemmove = typecheck.LookupRuntimeFunc("typedmemmove")
+ ir.Syms.Udiv = typecheck.LookupRuntimeVar("udiv") // asm func with special ABI
+ ir.Syms.WriteBarrier = typecheck.LookupRuntimeVar("writeBarrier") // struct { bool; ... }
+ ir.Syms.Zerobase = typecheck.LookupRuntimeVar("zerobase")
+
+ // asm funcs with special ABI
+ if base.Ctxt.Arch.Name == "amd64" {
+ GCWriteBarrierReg = map[int16]*obj.LSym{
+ x86.REG_AX: typecheck.LookupRuntimeFunc("gcWriteBarrier"),
+ x86.REG_CX: typecheck.LookupRuntimeFunc("gcWriteBarrierCX"),
+ x86.REG_DX: typecheck.LookupRuntimeFunc("gcWriteBarrierDX"),
+ x86.REG_BX: typecheck.LookupRuntimeFunc("gcWriteBarrierBX"),
+ x86.REG_BP: typecheck.LookupRuntimeFunc("gcWriteBarrierBP"),
+ x86.REG_SI: typecheck.LookupRuntimeFunc("gcWriteBarrierSI"),
+ x86.REG_R8: typecheck.LookupRuntimeFunc("gcWriteBarrierR8"),
+ x86.REG_R9: typecheck.LookupRuntimeFunc("gcWriteBarrierR9"),
+ }
+ }
+
+ if Arch.LinkArch.Family == sys.Wasm {
+ BoundsCheckFunc[ssa.BoundsIndex] = typecheck.LookupRuntimeFunc("goPanicIndex")
+ BoundsCheckFunc[ssa.BoundsIndexU] = typecheck.LookupRuntimeFunc("goPanicIndexU")
+ BoundsCheckFunc[ssa.BoundsSliceAlen] = typecheck.LookupRuntimeFunc("goPanicSliceAlen")
+ BoundsCheckFunc[ssa.BoundsSliceAlenU] = typecheck.LookupRuntimeFunc("goPanicSliceAlenU")
+ BoundsCheckFunc[ssa.BoundsSliceAcap] = typecheck.LookupRuntimeFunc("goPanicSliceAcap")
+ BoundsCheckFunc[ssa.BoundsSliceAcapU] = typecheck.LookupRuntimeFunc("goPanicSliceAcapU")
+ BoundsCheckFunc[ssa.BoundsSliceB] = typecheck.LookupRuntimeFunc("goPanicSliceB")
+ BoundsCheckFunc[ssa.BoundsSliceBU] = typecheck.LookupRuntimeFunc("goPanicSliceBU")
+ BoundsCheckFunc[ssa.BoundsSlice3Alen] = typecheck.LookupRuntimeFunc("goPanicSlice3Alen")
+ BoundsCheckFunc[ssa.BoundsSlice3AlenU] = typecheck.LookupRuntimeFunc("goPanicSlice3AlenU")
+ BoundsCheckFunc[ssa.BoundsSlice3Acap] = typecheck.LookupRuntimeFunc("goPanicSlice3Acap")
+ BoundsCheckFunc[ssa.BoundsSlice3AcapU] = typecheck.LookupRuntimeFunc("goPanicSlice3AcapU")
+ BoundsCheckFunc[ssa.BoundsSlice3B] = typecheck.LookupRuntimeFunc("goPanicSlice3B")
+ BoundsCheckFunc[ssa.BoundsSlice3BU] = typecheck.LookupRuntimeFunc("goPanicSlice3BU")
+ BoundsCheckFunc[ssa.BoundsSlice3C] = typecheck.LookupRuntimeFunc("goPanicSlice3C")
+ BoundsCheckFunc[ssa.BoundsSlice3CU] = typecheck.LookupRuntimeFunc("goPanicSlice3CU")
+ BoundsCheckFunc[ssa.BoundsConvert] = typecheck.LookupRuntimeFunc("goPanicSliceConvert")
+ } else {
+ BoundsCheckFunc[ssa.BoundsIndex] = typecheck.LookupRuntimeFunc("panicIndex")
+ BoundsCheckFunc[ssa.BoundsIndexU] = typecheck.LookupRuntimeFunc("panicIndexU")
+ BoundsCheckFunc[ssa.BoundsSliceAlen] = typecheck.LookupRuntimeFunc("panicSliceAlen")
+ BoundsCheckFunc[ssa.BoundsSliceAlenU] = typecheck.LookupRuntimeFunc("panicSliceAlenU")
+ BoundsCheckFunc[ssa.BoundsSliceAcap] = typecheck.LookupRuntimeFunc("panicSliceAcap")
+ BoundsCheckFunc[ssa.BoundsSliceAcapU] = typecheck.LookupRuntimeFunc("panicSliceAcapU")
+ BoundsCheckFunc[ssa.BoundsSliceB] = typecheck.LookupRuntimeFunc("panicSliceB")
+ BoundsCheckFunc[ssa.BoundsSliceBU] = typecheck.LookupRuntimeFunc("panicSliceBU")
+ BoundsCheckFunc[ssa.BoundsSlice3Alen] = typecheck.LookupRuntimeFunc("panicSlice3Alen")
+ BoundsCheckFunc[ssa.BoundsSlice3AlenU] = typecheck.LookupRuntimeFunc("panicSlice3AlenU")
+ BoundsCheckFunc[ssa.BoundsSlice3Acap] = typecheck.LookupRuntimeFunc("panicSlice3Acap")
+ BoundsCheckFunc[ssa.BoundsSlice3AcapU] = typecheck.LookupRuntimeFunc("panicSlice3AcapU")
+ BoundsCheckFunc[ssa.BoundsSlice3B] = typecheck.LookupRuntimeFunc("panicSlice3B")
+ BoundsCheckFunc[ssa.BoundsSlice3BU] = typecheck.LookupRuntimeFunc("panicSlice3BU")
+ BoundsCheckFunc[ssa.BoundsSlice3C] = typecheck.LookupRuntimeFunc("panicSlice3C")
+ BoundsCheckFunc[ssa.BoundsSlice3CU] = typecheck.LookupRuntimeFunc("panicSlice3CU")
+ BoundsCheckFunc[ssa.BoundsConvert] = typecheck.LookupRuntimeFunc("panicSliceConvert")
+ }
+ if Arch.LinkArch.PtrSize == 4 {
+ ExtendCheckFunc[ssa.BoundsIndex] = typecheck.LookupRuntimeVar("panicExtendIndex")
+ ExtendCheckFunc[ssa.BoundsIndexU] = typecheck.LookupRuntimeVar("panicExtendIndexU")
+ ExtendCheckFunc[ssa.BoundsSliceAlen] = typecheck.LookupRuntimeVar("panicExtendSliceAlen")
+ ExtendCheckFunc[ssa.BoundsSliceAlenU] = typecheck.LookupRuntimeVar("panicExtendSliceAlenU")
+ ExtendCheckFunc[ssa.BoundsSliceAcap] = typecheck.LookupRuntimeVar("panicExtendSliceAcap")
+ ExtendCheckFunc[ssa.BoundsSliceAcapU] = typecheck.LookupRuntimeVar("panicExtendSliceAcapU")
+ ExtendCheckFunc[ssa.BoundsSliceB] = typecheck.LookupRuntimeVar("panicExtendSliceB")
+ ExtendCheckFunc[ssa.BoundsSliceBU] = typecheck.LookupRuntimeVar("panicExtendSliceBU")
+ ExtendCheckFunc[ssa.BoundsSlice3Alen] = typecheck.LookupRuntimeVar("panicExtendSlice3Alen")
+ ExtendCheckFunc[ssa.BoundsSlice3AlenU] = typecheck.LookupRuntimeVar("panicExtendSlice3AlenU")
+ ExtendCheckFunc[ssa.BoundsSlice3Acap] = typecheck.LookupRuntimeVar("panicExtendSlice3Acap")
+ ExtendCheckFunc[ssa.BoundsSlice3AcapU] = typecheck.LookupRuntimeVar("panicExtendSlice3AcapU")
+ ExtendCheckFunc[ssa.BoundsSlice3B] = typecheck.LookupRuntimeVar("panicExtendSlice3B")
+ ExtendCheckFunc[ssa.BoundsSlice3BU] = typecheck.LookupRuntimeVar("panicExtendSlice3BU")
+ ExtendCheckFunc[ssa.BoundsSlice3C] = typecheck.LookupRuntimeVar("panicExtendSlice3C")
+ ExtendCheckFunc[ssa.BoundsSlice3CU] = typecheck.LookupRuntimeVar("panicExtendSlice3CU")
+ }
+
+ // Wasm (all asm funcs with special ABIs)
+ ir.Syms.WasmMove = typecheck.LookupRuntimeVar("wasmMove")
+ ir.Syms.WasmZero = typecheck.LookupRuntimeVar("wasmZero")
+ ir.Syms.WasmDiv = typecheck.LookupRuntimeVar("wasmDiv")
+ ir.Syms.WasmTruncS = typecheck.LookupRuntimeVar("wasmTruncS")
+ ir.Syms.WasmTruncU = typecheck.LookupRuntimeVar("wasmTruncU")
+ ir.Syms.SigPanic = typecheck.LookupRuntimeFunc("sigpanic")
+}
+
+// AbiForBodylessFuncStackMap returns the ABI for a bodyless function's stack map.
+// This is not necessarily the ABI used to call it.
+// Currently (1.17 dev) such a stack map is always ABI0;
+// any ABI wrapper that is present is nosplit, hence a precise
+// stack map is not needed there (the parameters survive only long
+// enough to call the wrapped assembly function).
+// This always returns a freshly copied ABI.
+func AbiForBodylessFuncStackMap(fn *ir.Func) *abi.ABIConfig {
+ return ssaConfig.ABI0.Copy() // No idea what races will result, be safe
+}
+
+// These are disabled but remain ready for use in case they are needed for the next regabi port.
+// TODO if they are not needed for 1.18 / next register abi port, delete them.
+const magicNameDotSuffix = ".*disabled*MagicMethodNameForTestingRegisterABI"
+const magicLastTypeName = "*disabled*MagicLastTypeNameForTestingRegisterABI"
+
+// abiForFunc implements ABI policy for a function, but does not return a copy of the ABI.
+// Passing a nil function returns the default ABI based on experiment configuration.
+func abiForFunc(fn *ir.Func, abi0, abi1 *abi.ABIConfig) *abi.ABIConfig {
+ if buildcfg.Experiment.RegabiArgs {
+ // Select the ABI based on the function's defining ABI.
+ if fn == nil {
+ return abi1
+ }
+ switch fn.ABI {
+ case obj.ABI0:
+ return abi0
+ case obj.ABIInternal:
+ // TODO(austin): Clean up the nomenclature here.
+ // It's not clear that "abi1" is ABIInternal.
+ return abi1
+ }
+ base.Fatalf("function %v has unknown ABI %v", fn, fn.ABI)
+ panic("not reachable")
+ }
+
+ a := abi0
+ if fn != nil {
+ name := ir.FuncName(fn)
+ magicName := strings.HasSuffix(name, magicNameDotSuffix)
+ if fn.Pragma&ir.RegisterParams != 0 { // TODO(register args) remove after register abi is working
+ if strings.Contains(name, ".") {
+ if !magicName {
+ base.ErrorfAt(fn.Pos(), "Calls to //go:registerparams method %s won't work, remove the pragma from the declaration.", name)
+ }
+ }
+ a = abi1
+ } else if magicName {
+ if base.FmtPos(fn.Pos()) == "<autogenerated>:1" {
+ // no way to put a pragma here, and it will error out in the real source code if they did not do it there.
+ a = abi1
+ } else {
+ base.ErrorfAt(fn.Pos(), "Methods with magic name %s (method %s) must also specify //go:registerparams", magicNameDotSuffix[1:], name)
+ }
+ }
+ if regAbiForFuncType(fn.Type().FuncType()) {
+ // fmt.Printf("Saw magic last type name for function %s\n", name)
+ a = abi1
+ }
+ }
+ return a
+}
+
+func regAbiForFuncType(ft *types.Func) bool {
+ np := ft.Params.NumFields()
+ return np > 0 && strings.Contains(ft.Params.FieldType(np-1).String(), magicLastTypeName)
+}
+
+// dvarint writes a varint v to the funcdata in symbol x and returns the new offset
+func dvarint(x *obj.LSym, off int, v int64) int {
+ if v < 0 || v > 1e9 {
+ panic(fmt.Sprintf("dvarint: bad offset for funcdata - %v", v))
+ }
+ if v < 1<<7 {
+ return objw.Uint8(x, off, uint8(v))
+ }
+ off = objw.Uint8(x, off, uint8((v&127)|128))
+ if v < 1<<14 {
+ return objw.Uint8(x, off, uint8(v>>7))
+ }
+ off = objw.Uint8(x, off, uint8(((v>>7)&127)|128))
+ if v < 1<<21 {
+ return objw.Uint8(x, off, uint8(v>>14))
+ }
+ off = objw.Uint8(x, off, uint8(((v>>14)&127)|128))
+ if v < 1<<28 {
+ return objw.Uint8(x, off, uint8(v>>21))
+ }
+ off = objw.Uint8(x, off, uint8(((v>>21)&127)|128))
+ return objw.Uint8(x, off, uint8(v>>28))
+}
+
+// emitOpenDeferInfo emits FUNCDATA information about the defers in a function
+// that is using open-coded defers. This funcdata is used to determine the active
+// defers in a function and execute those defers during panic processing.
+//
+// The funcdata is all encoded in varints (since values will almost always be less than
+// 128, but stack offsets could potentially be up to 2Gbyte). All "locations" (offsets)
+// for stack variables are specified as the number of bytes below varp (pointer to the
+// top of the local variables) for their starting address. The format is:
+//
+// - Offset of the deferBits variable
+// - Number of defers in the function
+// - Information about each defer call, in reverse order of appearance in the function:
+// - Offset of the closure value to call
+func (s *state) emitOpenDeferInfo() {
+ x := base.Ctxt.Lookup(s.curfn.LSym.Name + ".opendefer")
+ x.Set(obj.AttrContentAddressable, true)
+ s.curfn.LSym.Func().OpenCodedDeferInfo = x
+ off := 0
+ off = dvarint(x, off, -s.deferBitsTemp.FrameOffset())
+ off = dvarint(x, off, int64(len(s.openDefers)))
+
+ // Write in reverse-order, for ease of running in that order at runtime
+ for i := len(s.openDefers) - 1; i >= 0; i-- {
+ r := s.openDefers[i]
+ off = dvarint(x, off, -r.closureNode.FrameOffset())
+ }
+}
+
+func okOffset(offset int64) int64 {
+ if offset == types.BOGUS_FUNARG_OFFSET {
+ panic(fmt.Errorf("Bogus offset %d", offset))
+ }
+ return offset
+}
+
+// buildssa builds an SSA function for fn.
+// worker indicates which of the backend workers is doing the processing.
+func buildssa(fn *ir.Func, worker int) *ssa.Func {
+ name := ir.FuncName(fn)
+ printssa := false
+ if ssaDump != "" { // match either a simple name e.g. "(*Reader).Reset", package.name e.g. "compress/gzip.(*Reader).Reset", or subpackage name "gzip.(*Reader).Reset"
+ pkgDotName := base.Ctxt.Pkgpath + "." + name
+ printssa = name == ssaDump ||
+ strings.HasSuffix(pkgDotName, ssaDump) && (pkgDotName == ssaDump || strings.HasSuffix(pkgDotName, "/"+ssaDump))
+ }
+ var astBuf *bytes.Buffer
+ if printssa {
+ astBuf = &bytes.Buffer{}
+ ir.FDumpList(astBuf, "buildssa-enter", fn.Enter)
+ ir.FDumpList(astBuf, "buildssa-body", fn.Body)
+ ir.FDumpList(astBuf, "buildssa-exit", fn.Exit)
+ if ssaDumpStdout {
+ fmt.Println("generating SSA for", name)
+ fmt.Print(astBuf.String())
+ }
+ }
+
+ var s state
+ s.pushLine(fn.Pos())
+ defer s.popLine()
+
+ s.hasdefer = fn.HasDefer()
+ if fn.Pragma&ir.CgoUnsafeArgs != 0 {
+ s.cgoUnsafeArgs = true
+ }
+ s.checkPtrEnabled = ir.ShouldCheckPtr(fn, 1)
+
+ fe := ssafn{
+ curfn: fn,
+ log: printssa && ssaDumpStdout,
+ }
+ s.curfn = fn
+
+ s.f = ssa.NewFunc(&fe)
+ s.config = ssaConfig
+ s.f.Type = fn.Type()
+ s.f.Config = ssaConfig
+ s.f.Cache = &ssaCaches[worker]
+ s.f.Cache.Reset()
+ s.f.Name = name
+ s.f.DebugTest = s.f.DebugHashMatch("GOSSAHASH")
+ s.f.PrintOrHtmlSSA = printssa
+ if fn.Pragma&ir.Nosplit != 0 {
+ s.f.NoSplit = true
+ }
+ s.f.ABI0 = ssaConfig.ABI0.Copy() // Make a copy to avoid racy map operations in type-register-width cache.
+ s.f.ABI1 = ssaConfig.ABI1.Copy()
+ s.f.ABIDefault = abiForFunc(nil, s.f.ABI0, s.f.ABI1)
+ s.f.ABISelf = abiForFunc(fn, s.f.ABI0, s.f.ABI1)
+
+ s.panics = map[funcLine]*ssa.Block{}
+ s.softFloat = s.config.SoftFloat
+
+ // Allocate starting block
+ s.f.Entry = s.f.NewBlock(ssa.BlockPlain)
+ s.f.Entry.Pos = fn.Pos()
+
+ if printssa {
+ ssaDF := ssaDumpFile
+ if ssaDir != "" {
+ ssaDF = filepath.Join(ssaDir, base.Ctxt.Pkgpath+"."+name+".html")
+ ssaD := filepath.Dir(ssaDF)
+ os.MkdirAll(ssaD, 0755)
+ }
+ s.f.HTMLWriter = ssa.NewHTMLWriter(ssaDF, s.f, ssaDumpCFG)
+ // TODO: generate and print a mapping from nodes to values and blocks
+ dumpSourcesColumn(s.f.HTMLWriter, fn)
+ s.f.HTMLWriter.WriteAST("AST", astBuf)
+ }
+
+ // Allocate starting values
+ s.labels = map[string]*ssaLabel{}
+ s.fwdVars = map[ir.Node]*ssa.Value{}
+ s.startmem = s.entryNewValue0(ssa.OpInitMem, types.TypeMem)
+
+ s.hasOpenDefers = base.Flag.N == 0 && s.hasdefer && !s.curfn.OpenCodedDeferDisallowed()
+ switch {
+ case base.Debug.NoOpenDefer != 0:
+ s.hasOpenDefers = false
+ case s.hasOpenDefers && (base.Ctxt.Flag_shared || base.Ctxt.Flag_dynlink) && base.Ctxt.Arch.Name == "386":
+ // Don't support open-coded defers for 386 ONLY when using shared
+ // libraries, because there is extra code (added by rewriteToUseGot())
+ // preceding the deferreturn/ret code that we don't track correctly.
+ s.hasOpenDefers = false
+ }
+ if s.hasOpenDefers && len(s.curfn.Exit) > 0 {
+ // Skip doing open defers if there is any extra exit code (likely
+ // race detection), since we will not generate that code in the
+ // case of the extra deferreturn/ret segment.
+ s.hasOpenDefers = false
+ }
+ if s.hasOpenDefers {
+ // Similarly, skip if there are any heap-allocated result
+ // parameters that need to be copied back to their stack slots.
+ for _, f := range s.curfn.Type().Results().FieldSlice() {
+ if !f.Nname.(*ir.Name).OnStack() {
+ s.hasOpenDefers = false
+ break
+ }
+ }
+ }
+ if s.hasOpenDefers &&
+ s.curfn.NumReturns*s.curfn.NumDefers > 15 {
+ // Since we are generating defer calls at every exit for
+ // open-coded defers, skip doing open-coded defers if there are
+ // too many returns (especially if there are multiple defers).
+ // Open-coded defers are most important for improving performance
+ // for smaller functions (which don't have many returns).
+ s.hasOpenDefers = false
+ }
+
+ s.sp = s.entryNewValue0(ssa.OpSP, types.Types[types.TUINTPTR]) // TODO: use generic pointer type (unsafe.Pointer?) instead
+ s.sb = s.entryNewValue0(ssa.OpSB, types.Types[types.TUINTPTR])
+
+ s.startBlock(s.f.Entry)
+ s.vars[memVar] = s.startmem
+ if s.hasOpenDefers {
+ // Create the deferBits variable and stack slot. deferBits is a
+ // bitmask showing which of the open-coded defers in this function
+ // have been activated.
+ deferBitsTemp := typecheck.TempAt(src.NoXPos, s.curfn, types.Types[types.TUINT8])
+ deferBitsTemp.SetAddrtaken(true)
+ s.deferBitsTemp = deferBitsTemp
+ // For this value, AuxInt is initialized to zero by default
+ startDeferBits := s.entryNewValue0(ssa.OpConst8, types.Types[types.TUINT8])
+ s.vars[deferBitsVar] = startDeferBits
+ s.deferBitsAddr = s.addr(deferBitsTemp)
+ s.store(types.Types[types.TUINT8], s.deferBitsAddr, startDeferBits)
+ // Make sure that the deferBits stack slot is kept alive (for use
+ // by panics) and stores to deferBits are not eliminated, even if
+ // all checking code on deferBits in the function exit can be
+ // eliminated, because the defer statements were all
+ // unconditional.
+ s.vars[memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, deferBitsTemp, s.mem(), false)
+ }
+
+ var params *abi.ABIParamResultInfo
+ params = s.f.ABISelf.ABIAnalyze(fn.Type(), true)
+
+ // The backend's stackframe pass prunes away entries from the fn's
+ // Dcl list, including PARAMOUT nodes that correspond to output
+ // params passed in registers. Walk the Dcl list and capture these
+ // nodes to a side list, so that we'll have them available during
+ // DWARF-gen later on. See issue 48573 for more details.
+ var debugInfo ssa.FuncDebug
+ for _, n := range fn.Dcl {
+ if n.Class == ir.PPARAMOUT && n.IsOutputParamInRegisters() {
+ debugInfo.RegOutputParams = append(debugInfo.RegOutputParams, n)
+ }
+ }
+ fn.DebugInfo = &debugInfo
+
+ // Generate addresses of local declarations
+ s.decladdrs = map[*ir.Name]*ssa.Value{}
+ for _, n := range fn.Dcl {
+ switch n.Class {
+ case ir.PPARAM:
+ // Be aware that blank and unnamed input parameters will not appear here, but do appear in the type
+ s.decladdrs[n] = s.entryNewValue2A(ssa.OpLocalAddr, types.NewPtr(n.Type()), n, s.sp, s.startmem)
+ case ir.PPARAMOUT:
+ s.decladdrs[n] = s.entryNewValue2A(ssa.OpLocalAddr, types.NewPtr(n.Type()), n, s.sp, s.startmem)
+ case ir.PAUTO:
+ // processed at each use, to prevent Addr coming
+ // before the decl.
+ default:
+ s.Fatalf("local variable with class %v unimplemented", n.Class)
+ }
+ }
+
+ s.f.OwnAux = ssa.OwnAuxCall(fn.LSym, params)
+
+ // Populate SSAable arguments.
+ for _, n := range fn.Dcl {
+ if n.Class == ir.PPARAM {
+ if s.canSSA(n) {
+ v := s.newValue0A(ssa.OpArg, n.Type(), n)
+ s.vars[n] = v
+ s.addNamedValue(n, v) // This helps with debugging information, not needed for compilation itself.
+ } else { // address was taken AND/OR too large for SSA
+ paramAssignment := ssa.ParamAssignmentForArgName(s.f, n)
+ if len(paramAssignment.Registers) > 0 {
+ if TypeOK(n.Type()) { // SSA-able type, so address was taken -- receive value in OpArg, DO NOT bind to var, store immediately to memory.
+ v := s.newValue0A(ssa.OpArg, n.Type(), n)
+ s.store(n.Type(), s.decladdrs[n], v)
+ } else { // Too big for SSA.
+ // Brute force, and early, do a bunch of stores from registers
+ // TODO fix the nasty storeArgOrLoad recursion in ssa/expand_calls.go so this Just Works with store of a big Arg.
+ s.storeParameterRegsToStack(s.f.ABISelf, paramAssignment, n, s.decladdrs[n], false)
+ }
+ }
+ }
+ }
+ }
+
+ // Populate closure variables.
+ if fn.Needctxt() {
+ clo := s.entryNewValue0(ssa.OpGetClosurePtr, s.f.Config.Types.BytePtr)
+ offset := int64(types.PtrSize) // PtrSize to skip past function entry PC field
+ for _, n := range fn.ClosureVars {
+ typ := n.Type()
+ if !n.Byval() {
+ typ = types.NewPtr(typ)
+ }
+
+ offset = types.Rnd(offset, typ.Alignment())
+ ptr := s.newValue1I(ssa.OpOffPtr, types.NewPtr(typ), offset, clo)
+ offset += typ.Size()
+
+ // If n is a small variable captured by value, promote
+ // it to PAUTO so it can be converted to SSA.
+ //
+ // Note: While we never capture a variable by value if
+ // the user took its address, we may have generated
+ // runtime calls that did (#43701). Since we don't
+ // convert Addrtaken variables to SSA anyway, no point
+ // in promoting them either.
+ if n.Byval() && !n.Addrtaken() && TypeOK(n.Type()) {
+ n.Class = ir.PAUTO
+ fn.Dcl = append(fn.Dcl, n)
+ s.assign(n, s.load(n.Type(), ptr), false, 0)
+ continue
+ }
+
+ if !n.Byval() {
+ ptr = s.load(typ, ptr)
+ }
+ s.setHeapaddr(fn.Pos(), n, ptr)
+ }
+ }
+
+ // Convert the AST-based IR to the SSA-based IR
+ s.stmtList(fn.Enter)
+ s.zeroResults()
+ s.paramsToHeap()
+ s.stmtList(fn.Body)
+
+ // fallthrough to exit
+ if s.curBlock != nil {
+ s.pushLine(fn.Endlineno)
+ s.exit()
+ s.popLine()
+ }
+
+ for _, b := range s.f.Blocks {
+ if b.Pos != src.NoXPos {
+ s.updateUnsetPredPos(b)
+ }
+ }
+
+ s.f.HTMLWriter.WritePhase("before insert phis", "before insert phis")
+
+ s.insertPhis()
+
+ // Main call to ssa package to compile function
+ ssa.Compile(s.f)
+
+ if s.hasOpenDefers {
+ s.emitOpenDeferInfo()
+ }
+
+ // Record incoming parameter spill information for morestack calls emitted in the assembler.
+ // This is done here, using all the parameters (used, partially used, and unused) because
+ // it mimics the behavior of the former ABI (everything stored) and because it's not 100%
+ // clear if naming conventions are respected in autogenerated code.
+ // TODO figure out exactly what's unused, don't spill it. Make liveness fine-grained, also.
+ for _, p := range params.InParams() {
+ typs, offs := p.RegisterTypesAndOffsets()
+ for i, t := range typs {
+ o := offs[i] // offset within parameter
+ fo := p.FrameOffset(params) // offset of parameter in frame
+ reg := ssa.ObjRegForAbiReg(p.Registers[i], s.f.Config)
+ s.f.RegArgs = append(s.f.RegArgs, ssa.Spill{Reg: reg, Offset: fo + o, Type: t})
+ }
+ }
+
+ return s.f
+}
+
+func (s *state) storeParameterRegsToStack(abi *abi.ABIConfig, paramAssignment *abi.ABIParamAssignment, n *ir.Name, addr *ssa.Value, pointersOnly bool) {
+ typs, offs := paramAssignment.RegisterTypesAndOffsets()
+ for i, t := range typs {
+ if pointersOnly && !t.IsPtrShaped() {
+ continue
+ }
+ r := paramAssignment.Registers[i]
+ o := offs[i]
+ op, reg := ssa.ArgOpAndRegisterFor(r, abi)
+ aux := &ssa.AuxNameOffset{Name: n, Offset: o}
+ v := s.newValue0I(op, t, reg)
+ v.Aux = aux
+ p := s.newValue1I(ssa.OpOffPtr, types.NewPtr(t), o, addr)
+ s.store(t, p, v)
+ }
+}
+
+// zeroResults zeros the return values at the start of the function.
+// We need to do this very early in the function. Defer might stop a
+// panic and show the return values as they exist at the time of
+// panic. For precise stacks, the garbage collector assumes results
+// are always live, so we need to zero them before any allocations,
+// even allocations to move params/results to the heap.
+func (s *state) zeroResults() {
+ for _, f := range s.curfn.Type().Results().FieldSlice() {
+ n := f.Nname.(*ir.Name)
+ if !n.OnStack() {
+ // The local which points to the return value is the
+ // thing that needs zeroing. This is already handled
+ // by a Needzero annotation in plive.go:(*liveness).epilogue.
+ continue
+ }
+ // Zero the stack location containing f.
+ if typ := n.Type(); TypeOK(typ) {
+ s.assign(n, s.zeroVal(typ), false, 0)
+ } else {
+ s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, n, s.mem())
+ s.zero(n.Type(), s.decladdrs[n])
+ }
+ }
+}
+
+// paramsToHeap produces code to allocate memory for heap-escaped parameters
+// and to copy non-result parameters' values from the stack.
+func (s *state) paramsToHeap() {
+ do := func(params *types.Type) {
+ for _, f := range params.FieldSlice() {
+ if f.Nname == nil {
+ continue // anonymous or blank parameter
+ }
+ n := f.Nname.(*ir.Name)
+ if ir.IsBlank(n) || n.OnStack() {
+ continue
+ }
+ s.newHeapaddr(n)
+ if n.Class == ir.PPARAM {
+ s.move(n.Type(), s.expr(n.Heapaddr), s.decladdrs[n])
+ }
+ }
+ }
+
+ typ := s.curfn.Type()
+ do(typ.Recvs())
+ do(typ.Params())
+ do(typ.Results())
+}
+
+// newHeapaddr allocates heap memory for n and sets its heap address.
+func (s *state) newHeapaddr(n *ir.Name) {
+ s.setHeapaddr(n.Pos(), n, s.newObject(n.Type()))
+}
+
+// setHeapaddr allocates a new PAUTO variable to store ptr (which must be non-nil)
+// and then sets it as n's heap address.
+func (s *state) setHeapaddr(pos src.XPos, n *ir.Name, ptr *ssa.Value) {
+ if !ptr.Type.IsPtr() || !types.Identical(n.Type(), ptr.Type.Elem()) {
+ base.FatalfAt(n.Pos(), "setHeapaddr %L with type %v", n, ptr.Type)
+ }
+
+ // Declare variable to hold address.
+ addr := ir.NewNameAt(pos, &types.Sym{Name: "&" + n.Sym().Name, Pkg: types.LocalPkg})
+ addr.SetType(types.NewPtr(n.Type()))
+ addr.Class = ir.PAUTO
+ addr.SetUsed(true)
+ addr.Curfn = s.curfn
+ s.curfn.Dcl = append(s.curfn.Dcl, addr)
+ types.CalcSize(addr.Type())
+
+ if n.Class == ir.PPARAMOUT {
+ addr.SetIsOutputParamHeapAddr(true)
+ }
+
+ n.Heapaddr = addr
+ s.assign(addr, ptr, false, 0)
+}
+
+// newObject returns an SSA value denoting new(typ).
+func (s *state) newObject(typ *types.Type) *ssa.Value {
+ if typ.Size() == 0 {
+ return s.newValue1A(ssa.OpAddr, types.NewPtr(typ), ir.Syms.Zerobase, s.sb)
+ }
+ return s.rtcall(ir.Syms.Newobject, true, []*types.Type{types.NewPtr(typ)}, s.reflectType(typ))[0]
+}
+
+func (s *state) checkPtrAlignment(n *ir.ConvExpr, v *ssa.Value, count *ssa.Value) {
+ if !n.Type().IsPtr() {
+ s.Fatalf("expected pointer type: %v", n.Type())
+ }
+ elem := n.Type().Elem()
+ if count != nil {
+ if !elem.IsArray() {
+ s.Fatalf("expected array type: %v", elem)
+ }
+ elem = elem.Elem()
+ }
+ size := elem.Size()
+ // Casting from larger type to smaller one is ok, so for smallest type, do nothing.
+ if elem.Alignment() == 1 && (size == 0 || size == 1 || count == nil) {
+ return
+ }
+ if count == nil {
+ count = s.constInt(types.Types[types.TUINTPTR], 1)
+ }
+ if count.Type.Size() != s.config.PtrSize {
+ s.Fatalf("expected count fit to an uintptr size, have: %d, want: %d", count.Type.Size(), s.config.PtrSize)
+ }
+ s.rtcall(ir.Syms.CheckPtrAlignment, true, nil, v, s.reflectType(elem), count)
+}
+
+// reflectType returns an SSA value representing a pointer to typ's
+// reflection type descriptor.
+func (s *state) reflectType(typ *types.Type) *ssa.Value {
+ lsym := reflectdata.TypeLinksym(typ)
+ return s.entryNewValue1A(ssa.OpAddr, types.NewPtr(types.Types[types.TUINT8]), lsym, s.sb)
+}
+
+func dumpSourcesColumn(writer *ssa.HTMLWriter, fn *ir.Func) {
+ // Read sources of target function fn.
+ fname := base.Ctxt.PosTable.Pos(fn.Pos()).Filename()
+ targetFn, err := readFuncLines(fname, fn.Pos().Line(), fn.Endlineno.Line())
+ if err != nil {
+ writer.Logf("cannot read sources for function %v: %v", fn, err)
+ }
+
+ // Read sources of inlined functions.
+ var inlFns []*ssa.FuncLines
+ for _, fi := range ssaDumpInlined {
+ elno := fi.Endlineno
+ fname := base.Ctxt.PosTable.Pos(fi.Pos()).Filename()
+ fnLines, err := readFuncLines(fname, fi.Pos().Line(), elno.Line())
+ if err != nil {
+ writer.Logf("cannot read sources for inlined function %v: %v", fi, err)
+ continue
+ }
+ inlFns = append(inlFns, fnLines)
+ }
+
+ sort.Sort(ssa.ByTopo(inlFns))
+ if targetFn != nil {
+ inlFns = append([]*ssa.FuncLines{targetFn}, inlFns...)
+ }
+
+ writer.WriteSources("sources", inlFns)
+}
+
+func readFuncLines(file string, start, end uint) (*ssa.FuncLines, error) {
+ f, err := os.Open(os.ExpandEnv(file))
+ if err != nil {
+ return nil, err
+ }
+ defer f.Close()
+ var lines []string
+ ln := uint(1)
+ scanner := bufio.NewScanner(f)
+ for scanner.Scan() && ln <= end {
+ if ln >= start {
+ lines = append(lines, scanner.Text())
+ }
+ ln++
+ }
+ return &ssa.FuncLines{Filename: file, StartLineno: start, Lines: lines}, nil
+}
+
+// updateUnsetPredPos propagates the earliest-value position information for b
+// towards all of b's predecessors that need a position, and recurs on that
+// predecessor if its position is updated. B should have a non-empty position.
+func (s *state) updateUnsetPredPos(b *ssa.Block) {
+ if b.Pos == src.NoXPos {
+ s.Fatalf("Block %s should have a position", b)
+ }
+ bestPos := src.NoXPos
+ for _, e := range b.Preds {
+ p := e.Block()
+ if !p.LackingPos() {
+ continue
+ }
+ if bestPos == src.NoXPos {
+ bestPos = b.Pos
+ for _, v := range b.Values {
+ if v.LackingPos() {
+ continue
+ }
+ if v.Pos != src.NoXPos {
+ // Assume values are still in roughly textual order;
+ // TODO: could also seek minimum position?
+ bestPos = v.Pos
+ break
+ }
+ }
+ }
+ p.Pos = bestPos
+ s.updateUnsetPredPos(p) // We do not expect long chains of these, thus recursion is okay.
+ }
+}
+
+// Information about each open-coded defer.
+type openDeferInfo struct {
+ // The node representing the call of the defer
+ n *ir.CallExpr
+ // If defer call is closure call, the address of the argtmp where the
+ // closure is stored.
+ closure *ssa.Value
+ // The node representing the argtmp where the closure is stored - used for
+ // function, method, or interface call, to store a closure that panic
+ // processing can use for this defer.
+ closureNode *ir.Name
+}
+
+type state struct {
+ // configuration (arch) information
+ config *ssa.Config
+
+ // function we're building
+ f *ssa.Func
+
+ // Node for function
+ curfn *ir.Func
+
+ // labels in f
+ labels map[string]*ssaLabel
+
+ // unlabeled break and continue statement tracking
+ breakTo *ssa.Block // current target for plain break statement
+ continueTo *ssa.Block // current target for plain continue statement
+
+ // current location where we're interpreting the AST
+ curBlock *ssa.Block
+
+ // variable assignments in the current block (map from variable symbol to ssa value)
+ // *Node is the unique identifier (an ONAME Node) for the variable.
+ // TODO: keep a single varnum map, then make all of these maps slices instead?
+ vars map[ir.Node]*ssa.Value
+
+ // fwdVars are variables that are used before they are defined in the current block.
+ // This map exists just to coalesce multiple references into a single FwdRef op.
+ // *Node is the unique identifier (an ONAME Node) for the variable.
+ fwdVars map[ir.Node]*ssa.Value
+
+ // all defined variables at the end of each block. Indexed by block ID.
+ defvars []map[ir.Node]*ssa.Value
+
+ // addresses of PPARAM and PPARAMOUT variables on the stack.
+ decladdrs map[*ir.Name]*ssa.Value
+
+ // starting values. Memory, stack pointer, and globals pointer
+ startmem *ssa.Value
+ sp *ssa.Value
+ sb *ssa.Value
+ // value representing address of where deferBits autotmp is stored
+ deferBitsAddr *ssa.Value
+ deferBitsTemp *ir.Name
+
+ // line number stack. The current line number is top of stack
+ line []src.XPos
+ // the last line number processed; it may have been popped
+ lastPos src.XPos
+
+ // list of panic calls by function name and line number.
+ // Used to deduplicate panic calls.
+ panics map[funcLine]*ssa.Block
+
+ cgoUnsafeArgs bool
+ hasdefer bool // whether the function contains a defer statement
+ softFloat bool
+ hasOpenDefers bool // whether we are doing open-coded defers
+ checkPtrEnabled bool // whether to insert checkptr instrumentation
+
+ // If doing open-coded defers, list of info about the defer calls in
+ // scanning order. Hence, at exit we should run these defers in reverse
+ // order of this list
+ openDefers []*openDeferInfo
+ // For open-coded defers, this is the beginning and end blocks of the last
+ // defer exit code that we have generated so far. We use these to share
+ // code between exits if the shareDeferExits option (disabled by default)
+ // is on.
+ lastDeferExit *ssa.Block // Entry block of last defer exit code we generated
+ lastDeferFinalBlock *ssa.Block // Final block of last defer exit code we generated
+ lastDeferCount int // Number of defers encountered at that point
+
+ prevCall *ssa.Value // the previous call; use this to tie results to the call op.
+}
+
+type funcLine struct {
+ f *obj.LSym
+ base *src.PosBase
+ line uint
+}
+
+type ssaLabel struct {
+ target *ssa.Block // block identified by this label
+ breakTarget *ssa.Block // block to break to in control flow node identified by this label
+ continueTarget *ssa.Block // block to continue to in control flow node identified by this label
+}
+
+// label returns the label associated with sym, creating it if necessary.
+func (s *state) label(sym *types.Sym) *ssaLabel {
+ lab := s.labels[sym.Name]
+ if lab == nil {
+ lab = new(ssaLabel)
+ s.labels[sym.Name] = lab
+ }
+ return lab
+}
+
+func (s *state) Logf(msg string, args ...interface{}) { s.f.Logf(msg, args...) }
+func (s *state) Log() bool { return s.f.Log() }
+func (s *state) Fatalf(msg string, args ...interface{}) {
+ s.f.Frontend().Fatalf(s.peekPos(), msg, args...)
+}
+func (s *state) Warnl(pos src.XPos, msg string, args ...interface{}) { s.f.Warnl(pos, msg, args...) }
+func (s *state) Debug_checknil() bool { return s.f.Frontend().Debug_checknil() }
+
+func ssaMarker(name string) *ir.Name {
+ return typecheck.NewName(&types.Sym{Name: name})
+}
+
+var (
+ // marker node for the memory variable
+ memVar = ssaMarker("mem")
+
+ // marker nodes for temporary variables
+ ptrVar = ssaMarker("ptr")
+ lenVar = ssaMarker("len")
+ newlenVar = ssaMarker("newlen")
+ capVar = ssaMarker("cap")
+ typVar = ssaMarker("typ")
+ okVar = ssaMarker("ok")
+ deferBitsVar = ssaMarker("deferBits")
+)
+
+// startBlock sets the current block we're generating code in to b.
+func (s *state) startBlock(b *ssa.Block) {
+ if s.curBlock != nil {
+ s.Fatalf("starting block %v when block %v has not ended", b, s.curBlock)
+ }
+ s.curBlock = b
+ s.vars = map[ir.Node]*ssa.Value{}
+ for n := range s.fwdVars {
+ delete(s.fwdVars, n)
+ }
+}
+
+// endBlock marks the end of generating code for the current block.
+// Returns the (former) current block. Returns nil if there is no current
+// block, i.e. if no code flows to the current execution point.
+func (s *state) endBlock() *ssa.Block {
+ b := s.curBlock
+ if b == nil {
+ return nil
+ }
+ for len(s.defvars) <= int(b.ID) {
+ s.defvars = append(s.defvars, nil)
+ }
+ s.defvars[b.ID] = s.vars
+ s.curBlock = nil
+ s.vars = nil
+ if b.LackingPos() {
+ // Empty plain blocks get the line of their successor (handled after all blocks created),
+ // except for increment blocks in For statements (handled in ssa conversion of OFOR),
+ // and for blocks ending in GOTO/BREAK/CONTINUE.
+ b.Pos = src.NoXPos
+ } else {
+ b.Pos = s.lastPos
+ }
+ return b
+}
+
+// pushLine pushes a line number on the line number stack.
+func (s *state) pushLine(line src.XPos) {
+ if !line.IsKnown() {
+ // the frontend may emit node with line number missing,
+ // use the parent line number in this case.
+ line = s.peekPos()
+ if base.Flag.K != 0 {
+ base.Warn("buildssa: unknown position (line 0)")
+ }
+ } else {
+ s.lastPos = line
+ }
+
+ s.line = append(s.line, line)
+}
+
+// popLine pops the top of the line number stack.
+func (s *state) popLine() {
+ s.line = s.line[:len(s.line)-1]
+}
+
+// peekPos peeks the top of the line number stack.
+func (s *state) peekPos() src.XPos {
+ return s.line[len(s.line)-1]
+}
+
+// newValue0 adds a new value with no arguments to the current block.
+func (s *state) newValue0(op ssa.Op, t *types.Type) *ssa.Value {
+ return s.curBlock.NewValue0(s.peekPos(), op, t)
+}
+
+// newValue0A adds a new value with no arguments and an aux value to the current block.
+func (s *state) newValue0A(op ssa.Op, t *types.Type, aux ssa.Aux) *ssa.Value {
+ return s.curBlock.NewValue0A(s.peekPos(), op, t, aux)
+}
+
+// newValue0I adds a new value with no arguments and an auxint value to the current block.
+func (s *state) newValue0I(op ssa.Op, t *types.Type, auxint int64) *ssa.Value {
+ return s.curBlock.NewValue0I(s.peekPos(), op, t, auxint)
+}
+
+// newValue1 adds a new value with one argument to the current block.
+func (s *state) newValue1(op ssa.Op, t *types.Type, arg *ssa.Value) *ssa.Value {
+ return s.curBlock.NewValue1(s.peekPos(), op, t, arg)
+}
+
+// newValue1A adds a new value with one argument and an aux value to the current block.
+func (s *state) newValue1A(op ssa.Op, t *types.Type, aux ssa.Aux, arg *ssa.Value) *ssa.Value {
+ return s.curBlock.NewValue1A(s.peekPos(), op, t, aux, arg)
+}
+
+// newValue1Apos adds a new value with one argument and an aux value to the current block.
+// isStmt determines whether the created values may be a statement or not
+// (i.e., false means never, yes means maybe).
+func (s *state) newValue1Apos(op ssa.Op, t *types.Type, aux ssa.Aux, arg *ssa.Value, isStmt bool) *ssa.Value {
+ if isStmt {
+ return s.curBlock.NewValue1A(s.peekPos(), op, t, aux, arg)
+ }
+ return s.curBlock.NewValue1A(s.peekPos().WithNotStmt(), op, t, aux, arg)
+}
+
+// newValue1I adds a new value with one argument and an auxint value to the current block.
+func (s *state) newValue1I(op ssa.Op, t *types.Type, aux int64, arg *ssa.Value) *ssa.Value {
+ return s.curBlock.NewValue1I(s.peekPos(), op, t, aux, arg)
+}
+
+// newValue2 adds a new value with two arguments to the current block.
+func (s *state) newValue2(op ssa.Op, t *types.Type, arg0, arg1 *ssa.Value) *ssa.Value {
+ return s.curBlock.NewValue2(s.peekPos(), op, t, arg0, arg1)
+}
+
+// newValue2A adds a new value with two arguments and an aux value to the current block.
+func (s *state) newValue2A(op ssa.Op, t *types.Type, aux ssa.Aux, arg0, arg1 *ssa.Value) *ssa.Value {
+ return s.curBlock.NewValue2A(s.peekPos(), op, t, aux, arg0, arg1)
+}
+
+// newValue2Apos adds a new value with two arguments and an aux value to the current block.
+// isStmt determines whether the created values may be a statement or not
+// (i.e., false means never, yes means maybe).
+func (s *state) newValue2Apos(op ssa.Op, t *types.Type, aux ssa.Aux, arg0, arg1 *ssa.Value, isStmt bool) *ssa.Value {
+ if isStmt {
+ return s.curBlock.NewValue2A(s.peekPos(), op, t, aux, arg0, arg1)
+ }
+ return s.curBlock.NewValue2A(s.peekPos().WithNotStmt(), op, t, aux, arg0, arg1)
+}
+
+// newValue2I adds a new value with two arguments and an auxint value to the current block.
+func (s *state) newValue2I(op ssa.Op, t *types.Type, aux int64, arg0, arg1 *ssa.Value) *ssa.Value {
+ return s.curBlock.NewValue2I(s.peekPos(), op, t, aux, arg0, arg1)
+}
+
+// newValue3 adds a new value with three arguments to the current block.
+func (s *state) newValue3(op ssa.Op, t *types.Type, arg0, arg1, arg2 *ssa.Value) *ssa.Value {
+ return s.curBlock.NewValue3(s.peekPos(), op, t, arg0, arg1, arg2)
+}
+
+// newValue3I adds a new value with three arguments and an auxint value to the current block.
+func (s *state) newValue3I(op ssa.Op, t *types.Type, aux int64, arg0, arg1, arg2 *ssa.Value) *ssa.Value {
+ return s.curBlock.NewValue3I(s.peekPos(), op, t, aux, arg0, arg1, arg2)
+}
+
+// newValue3A adds a new value with three arguments and an aux value to the current block.
+func (s *state) newValue3A(op ssa.Op, t *types.Type, aux ssa.Aux, arg0, arg1, arg2 *ssa.Value) *ssa.Value {
+ return s.curBlock.NewValue3A(s.peekPos(), op, t, aux, arg0, arg1, arg2)
+}
+
+// newValue3Apos adds a new value with three arguments and an aux value to the current block.
+// isStmt determines whether the created values may be a statement or not
+// (i.e., false means never, yes means maybe).
+func (s *state) newValue3Apos(op ssa.Op, t *types.Type, aux ssa.Aux, arg0, arg1, arg2 *ssa.Value, isStmt bool) *ssa.Value {
+ if isStmt {
+ return s.curBlock.NewValue3A(s.peekPos(), op, t, aux, arg0, arg1, arg2)
+ }
+ return s.curBlock.NewValue3A(s.peekPos().WithNotStmt(), op, t, aux, arg0, arg1, arg2)
+}
+
+// newValue4 adds a new value with four arguments to the current block.
+func (s *state) newValue4(op ssa.Op, t *types.Type, arg0, arg1, arg2, arg3 *ssa.Value) *ssa.Value {
+ return s.curBlock.NewValue4(s.peekPos(), op, t, arg0, arg1, arg2, arg3)
+}
+
+// newValue4 adds a new value with four arguments and an auxint value to the current block.
+func (s *state) newValue4I(op ssa.Op, t *types.Type, aux int64, arg0, arg1, arg2, arg3 *ssa.Value) *ssa.Value {
+ return s.curBlock.NewValue4I(s.peekPos(), op, t, aux, arg0, arg1, arg2, arg3)
+}
+
+func (s *state) entryBlock() *ssa.Block {
+ b := s.f.Entry
+ if base.Flag.N > 0 && s.curBlock != nil {
+ // If optimizations are off, allocate in current block instead. Since with -N
+ // we're not doing the CSE or tighten passes, putting lots of stuff in the
+ // entry block leads to O(n^2) entries in the live value map during regalloc.
+ // See issue 45897.
+ b = s.curBlock
+ }
+ return b
+}
+
+// entryNewValue0 adds a new value with no arguments to the entry block.
+func (s *state) entryNewValue0(op ssa.Op, t *types.Type) *ssa.Value {
+ return s.entryBlock().NewValue0(src.NoXPos, op, t)
+}
+
+// entryNewValue0A adds a new value with no arguments and an aux value to the entry block.
+func (s *state) entryNewValue0A(op ssa.Op, t *types.Type, aux ssa.Aux) *ssa.Value {
+ return s.entryBlock().NewValue0A(src.NoXPos, op, t, aux)
+}
+
+// entryNewValue1 adds a new value with one argument to the entry block.
+func (s *state) entryNewValue1(op ssa.Op, t *types.Type, arg *ssa.Value) *ssa.Value {
+ return s.entryBlock().NewValue1(src.NoXPos, op, t, arg)
+}
+
+// entryNewValue1 adds a new value with one argument and an auxint value to the entry block.
+func (s *state) entryNewValue1I(op ssa.Op, t *types.Type, auxint int64, arg *ssa.Value) *ssa.Value {
+ return s.entryBlock().NewValue1I(src.NoXPos, op, t, auxint, arg)
+}
+
+// entryNewValue1A adds a new value with one argument and an aux value to the entry block.
+func (s *state) entryNewValue1A(op ssa.Op, t *types.Type, aux ssa.Aux, arg *ssa.Value) *ssa.Value {
+ return s.entryBlock().NewValue1A(src.NoXPos, op, t, aux, arg)
+}
+
+// entryNewValue2 adds a new value with two arguments to the entry block.
+func (s *state) entryNewValue2(op ssa.Op, t *types.Type, arg0, arg1 *ssa.Value) *ssa.Value {
+ return s.entryBlock().NewValue2(src.NoXPos, op, t, arg0, arg1)
+}
+
+// entryNewValue2A adds a new value with two arguments and an aux value to the entry block.
+func (s *state) entryNewValue2A(op ssa.Op, t *types.Type, aux ssa.Aux, arg0, arg1 *ssa.Value) *ssa.Value {
+ return s.entryBlock().NewValue2A(src.NoXPos, op, t, aux, arg0, arg1)
+}
+
+// const* routines add a new const value to the entry block.
+func (s *state) constSlice(t *types.Type) *ssa.Value {
+ return s.f.ConstSlice(t)
+}
+func (s *state) constInterface(t *types.Type) *ssa.Value {
+ return s.f.ConstInterface(t)
+}
+func (s *state) constNil(t *types.Type) *ssa.Value { return s.f.ConstNil(t) }
+func (s *state) constEmptyString(t *types.Type) *ssa.Value {
+ return s.f.ConstEmptyString(t)
+}
+func (s *state) constBool(c bool) *ssa.Value {
+ return s.f.ConstBool(types.Types[types.TBOOL], c)
+}
+func (s *state) constInt8(t *types.Type, c int8) *ssa.Value {
+ return s.f.ConstInt8(t, c)
+}
+func (s *state) constInt16(t *types.Type, c int16) *ssa.Value {
+ return s.f.ConstInt16(t, c)
+}
+func (s *state) constInt32(t *types.Type, c int32) *ssa.Value {
+ return s.f.ConstInt32(t, c)
+}
+func (s *state) constInt64(t *types.Type, c int64) *ssa.Value {
+ return s.f.ConstInt64(t, c)
+}
+func (s *state) constFloat32(t *types.Type, c float64) *ssa.Value {
+ return s.f.ConstFloat32(t, c)
+}
+func (s *state) constFloat64(t *types.Type, c float64) *ssa.Value {
+ return s.f.ConstFloat64(t, c)
+}
+func (s *state) constInt(t *types.Type, c int64) *ssa.Value {
+ if s.config.PtrSize == 8 {
+ return s.constInt64(t, c)
+ }
+ if int64(int32(c)) != c {
+ s.Fatalf("integer constant too big %d", c)
+ }
+ return s.constInt32(t, int32(c))
+}
+func (s *state) constOffPtrSP(t *types.Type, c int64) *ssa.Value {
+ return s.f.ConstOffPtrSP(t, c, s.sp)
+}
+
+// newValueOrSfCall* are wrappers around newValue*, which may create a call to a
+// soft-float runtime function instead (when emitting soft-float code).
+func (s *state) newValueOrSfCall1(op ssa.Op, t *types.Type, arg *ssa.Value) *ssa.Value {
+ if s.softFloat {
+ if c, ok := s.sfcall(op, arg); ok {
+ return c
+ }
+ }
+ return s.newValue1(op, t, arg)
+}
+func (s *state) newValueOrSfCall2(op ssa.Op, t *types.Type, arg0, arg1 *ssa.Value) *ssa.Value {
+ if s.softFloat {
+ if c, ok := s.sfcall(op, arg0, arg1); ok {
+ return c
+ }
+ }
+ return s.newValue2(op, t, arg0, arg1)
+}
+
+type instrumentKind uint8
+
+const (
+ instrumentRead = iota
+ instrumentWrite
+ instrumentMove
+)
+
+func (s *state) instrument(t *types.Type, addr *ssa.Value, kind instrumentKind) {
+ s.instrument2(t, addr, nil, kind)
+}
+
+// instrumentFields instruments a read/write operation on addr.
+// If it is instrumenting for MSAN or ASAN and t is a struct type, it instruments
+// operation for each field, instead of for the whole struct.
+func (s *state) instrumentFields(t *types.Type, addr *ssa.Value, kind instrumentKind) {
+ if !(base.Flag.MSan || base.Flag.ASan) || !t.IsStruct() {
+ s.instrument(t, addr, kind)
+ return
+ }
+ for _, f := range t.Fields().Slice() {
+ if f.Sym.IsBlank() {
+ continue
+ }
+ offptr := s.newValue1I(ssa.OpOffPtr, types.NewPtr(f.Type), f.Offset, addr)
+ s.instrumentFields(f.Type, offptr, kind)
+ }
+}
+
+func (s *state) instrumentMove(t *types.Type, dst, src *ssa.Value) {
+ if base.Flag.MSan {
+ s.instrument2(t, dst, src, instrumentMove)
+ } else {
+ s.instrument(t, src, instrumentRead)
+ s.instrument(t, dst, instrumentWrite)
+ }
+}
+
+func (s *state) instrument2(t *types.Type, addr, addr2 *ssa.Value, kind instrumentKind) {
+ if !s.curfn.InstrumentBody() {
+ return
+ }
+
+ w := t.Size()
+ if w == 0 {
+ return // can't race on zero-sized things
+ }
+
+ if ssa.IsSanitizerSafeAddr(addr) {
+ return
+ }
+
+ var fn *obj.LSym
+ needWidth := false
+
+ if addr2 != nil && kind != instrumentMove {
+ panic("instrument2: non-nil addr2 for non-move instrumentation")
+ }
+
+ if base.Flag.MSan {
+ switch kind {
+ case instrumentRead:
+ fn = ir.Syms.Msanread
+ case instrumentWrite:
+ fn = ir.Syms.Msanwrite
+ case instrumentMove:
+ fn = ir.Syms.Msanmove
+ default:
+ panic("unreachable")
+ }
+ needWidth = true
+ } else if base.Flag.Race && t.NumComponents(types.CountBlankFields) > 1 {
+ // for composite objects we have to write every address
+ // because a write might happen to any subobject.
+ // composites with only one element don't have subobjects, though.
+ switch kind {
+ case instrumentRead:
+ fn = ir.Syms.Racereadrange
+ case instrumentWrite:
+ fn = ir.Syms.Racewriterange
+ default:
+ panic("unreachable")
+ }
+ needWidth = true
+ } else if base.Flag.Race {
+ // for non-composite objects we can write just the start
+ // address, as any write must write the first byte.
+ switch kind {
+ case instrumentRead:
+ fn = ir.Syms.Raceread
+ case instrumentWrite:
+ fn = ir.Syms.Racewrite
+ default:
+ panic("unreachable")
+ }
+ } else if base.Flag.ASan {
+ switch kind {
+ case instrumentRead:
+ fn = ir.Syms.Asanread
+ case instrumentWrite:
+ fn = ir.Syms.Asanwrite
+ default:
+ panic("unreachable")
+ }
+ needWidth = true
+ } else {
+ panic("unreachable")
+ }
+
+ args := []*ssa.Value{addr}
+ if addr2 != nil {
+ args = append(args, addr2)
+ }
+ if needWidth {
+ args = append(args, s.constInt(types.Types[types.TUINTPTR], w))
+ }
+ s.rtcall(fn, true, nil, args...)
+}
+
+func (s *state) load(t *types.Type, src *ssa.Value) *ssa.Value {
+ s.instrumentFields(t, src, instrumentRead)
+ return s.rawLoad(t, src)
+}
+
+func (s *state) rawLoad(t *types.Type, src *ssa.Value) *ssa.Value {
+ return s.newValue2(ssa.OpLoad, t, src, s.mem())
+}
+
+func (s *state) store(t *types.Type, dst, val *ssa.Value) {
+ s.vars[memVar] = s.newValue3A(ssa.OpStore, types.TypeMem, t, dst, val, s.mem())
+}
+
+func (s *state) zero(t *types.Type, dst *ssa.Value) {
+ s.instrument(t, dst, instrumentWrite)
+ store := s.newValue2I(ssa.OpZero, types.TypeMem, t.Size(), dst, s.mem())
+ store.Aux = t
+ s.vars[memVar] = store
+}
+
+func (s *state) move(t *types.Type, dst, src *ssa.Value) {
+ s.moveWhichMayOverlap(t, dst, src, false)
+}
+func (s *state) moveWhichMayOverlap(t *types.Type, dst, src *ssa.Value, mayOverlap bool) {
+ s.instrumentMove(t, dst, src)
+ if mayOverlap && t.IsArray() && t.NumElem() > 1 && !ssa.IsInlinableMemmove(dst, src, t.Size(), s.f.Config) {
+ // Normally, when moving Go values of type T from one location to another,
+ // we don't need to worry about partial overlaps. The two Ts must either be
+ // in disjoint (nonoverlapping) memory or in exactly the same location.
+ // There are 2 cases where this isn't true:
+ // 1) Using unsafe you can arrange partial overlaps.
+ // 2) Since Go 1.17, you can use a cast from a slice to a ptr-to-array.
+ // https://go.dev/ref/spec#Conversions_from_slice_to_array_pointer
+ // This feature can be used to construct partial overlaps of array types.
+ // var a [3]int
+ // p := (*[2]int)(a[:])
+ // q := (*[2]int)(a[1:])
+ // *p = *q
+ // We don't care about solving 1. Or at least, we haven't historically
+ // and no one has complained.
+ // For 2, we need to ensure that if there might be partial overlap,
+ // then we can't use OpMove; we must use memmove instead.
+ // (memmove handles partial overlap by copying in the correct
+ // direction. OpMove does not.)
+ //
+ // Note that we have to be careful here not to introduce a call when
+ // we're marshaling arguments to a call or unmarshaling results from a call.
+ // Cases where this is happening must pass mayOverlap to false.
+ // (Currently this only happens when unmarshaling results of a call.)
+ if t.HasPointers() {
+ s.rtcall(ir.Syms.Typedmemmove, true, nil, s.reflectType(t), dst, src)
+ // We would have otherwise implemented this move with straightline code,
+ // including a write barrier. Pretend we issue a write barrier here,
+ // so that the write barrier tests work. (Otherwise they'd need to know
+ // the details of IsInlineableMemmove.)
+ s.curfn.SetWBPos(s.peekPos())
+ } else {
+ s.rtcall(ir.Syms.Memmove, true, nil, dst, src, s.constInt(types.Types[types.TUINTPTR], t.Size()))
+ }
+ ssa.LogLargeCopy(s.f.Name, s.peekPos(), t.Size())
+ return
+ }
+ store := s.newValue3I(ssa.OpMove, types.TypeMem, t.Size(), dst, src, s.mem())
+ store.Aux = t
+ s.vars[memVar] = store
+}
+
+// stmtList converts the statement list n to SSA and adds it to s.
+func (s *state) stmtList(l ir.Nodes) {
+ for _, n := range l {
+ s.stmt(n)
+ }
+}
+
+// stmt converts the statement n to SSA and adds it to s.
+func (s *state) stmt(n ir.Node) {
+ if !(n.Op() == ir.OVARKILL || n.Op() == ir.OVARLIVE || n.Op() == ir.OVARDEF) {
+ // OVARKILL, OVARLIVE, and OVARDEF are invisible to the programmer, so we don't use their line numbers to avoid confusion in debugging.
+ s.pushLine(n.Pos())
+ defer s.popLine()
+ }
+
+ // If s.curBlock is nil, and n isn't a label (which might have an associated goto somewhere),
+ // then this code is dead. Stop here.
+ if s.curBlock == nil && n.Op() != ir.OLABEL {
+ return
+ }
+
+ s.stmtList(n.Init())
+ switch n.Op() {
+
+ case ir.OBLOCK:
+ n := n.(*ir.BlockStmt)
+ s.stmtList(n.List)
+
+ // No-ops
+ case ir.ODCLCONST, ir.ODCLTYPE, ir.OFALL:
+
+ // Expression statements
+ case ir.OCALLFUNC:
+ n := n.(*ir.CallExpr)
+ if ir.IsIntrinsicCall(n) {
+ s.intrinsicCall(n)
+ return
+ }
+ fallthrough
+
+ case ir.OCALLINTER:
+ n := n.(*ir.CallExpr)
+ s.callResult(n, callNormal)
+ if n.Op() == ir.OCALLFUNC && n.X.Op() == ir.ONAME && n.X.(*ir.Name).Class == ir.PFUNC {
+ if fn := n.X.Sym().Name; base.Flag.CompilingRuntime && fn == "throw" ||
+ n.X.Sym().Pkg == ir.Pkgs.Runtime && (fn == "throwinit" || fn == "gopanic" || fn == "panicwrap" || fn == "block" || fn == "panicmakeslicelen" || fn == "panicmakeslicecap") {
+ m := s.mem()
+ b := s.endBlock()
+ b.Kind = ssa.BlockExit
+ b.SetControl(m)
+ // TODO: never rewrite OPANIC to OCALLFUNC in the
+ // first place. Need to wait until all backends
+ // go through SSA.
+ }
+ }
+ case ir.ODEFER:
+ n := n.(*ir.GoDeferStmt)
+ if base.Debug.Defer > 0 {
+ var defertype string
+ if s.hasOpenDefers {
+ defertype = "open-coded"
+ } else if n.Esc() == ir.EscNever {
+ defertype = "stack-allocated"
+ } else {
+ defertype = "heap-allocated"
+ }
+ base.WarnfAt(n.Pos(), "%s defer", defertype)
+ }
+ if s.hasOpenDefers {
+ s.openDeferRecord(n.Call.(*ir.CallExpr))
+ } else {
+ d := callDefer
+ if n.Esc() == ir.EscNever {
+ d = callDeferStack
+ }
+ s.callResult(n.Call.(*ir.CallExpr), d)
+ }
+ case ir.OGO:
+ n := n.(*ir.GoDeferStmt)
+ s.callResult(n.Call.(*ir.CallExpr), callGo)
+
+ case ir.OAS2DOTTYPE:
+ n := n.(*ir.AssignListStmt)
+ var res, resok *ssa.Value
+ if n.Rhs[0].Op() == ir.ODOTTYPE2 {
+ res, resok = s.dottype(n.Rhs[0].(*ir.TypeAssertExpr), true)
+ } else {
+ res, resok = s.dynamicDottype(n.Rhs[0].(*ir.DynamicTypeAssertExpr), true)
+ }
+ deref := false
+ if !TypeOK(n.Rhs[0].Type()) {
+ if res.Op != ssa.OpLoad {
+ s.Fatalf("dottype of non-load")
+ }
+ mem := s.mem()
+ if mem.Op == ssa.OpVarKill {
+ mem = mem.Args[0]
+ }
+ if res.Args[1] != mem {
+ s.Fatalf("memory no longer live from 2-result dottype load")
+ }
+ deref = true
+ res = res.Args[0]
+ }
+ s.assign(n.Lhs[0], res, deref, 0)
+ s.assign(n.Lhs[1], resok, false, 0)
+ return
+
+ case ir.OAS2FUNC:
+ // We come here only when it is an intrinsic call returning two values.
+ n := n.(*ir.AssignListStmt)
+ call := n.Rhs[0].(*ir.CallExpr)
+ if !ir.IsIntrinsicCall(call) {
+ s.Fatalf("non-intrinsic AS2FUNC not expanded %v", call)
+ }
+ v := s.intrinsicCall(call)
+ v1 := s.newValue1(ssa.OpSelect0, n.Lhs[0].Type(), v)
+ v2 := s.newValue1(ssa.OpSelect1, n.Lhs[1].Type(), v)
+ s.assign(n.Lhs[0], v1, false, 0)
+ s.assign(n.Lhs[1], v2, false, 0)
+ return
+
+ case ir.ODCL:
+ n := n.(*ir.Decl)
+ if v := n.X; v.Esc() == ir.EscHeap {
+ s.newHeapaddr(v)
+ }
+
+ case ir.OLABEL:
+ n := n.(*ir.LabelStmt)
+ sym := n.Label
+ lab := s.label(sym)
+
+ // The label might already have a target block via a goto.
+ if lab.target == nil {
+ lab.target = s.f.NewBlock(ssa.BlockPlain)
+ }
+
+ // Go to that label.
+ // (We pretend "label:" is preceded by "goto label", unless the predecessor is unreachable.)
+ if s.curBlock != nil {
+ b := s.endBlock()
+ b.AddEdgeTo(lab.target)
+ }
+ s.startBlock(lab.target)
+
+ case ir.OGOTO:
+ n := n.(*ir.BranchStmt)
+ sym := n.Label
+
+ lab := s.label(sym)
+ if lab.target == nil {
+ lab.target = s.f.NewBlock(ssa.BlockPlain)
+ }
+
+ b := s.endBlock()
+ b.Pos = s.lastPos.WithIsStmt() // Do this even if b is an empty block.
+ b.AddEdgeTo(lab.target)
+
+ case ir.OAS:
+ n := n.(*ir.AssignStmt)
+ if n.X == n.Y && n.X.Op() == ir.ONAME {
+ // An x=x assignment. No point in doing anything
+ // here. In addition, skipping this assignment
+ // prevents generating:
+ // VARDEF x
+ // COPY x -> x
+ // which is bad because x is incorrectly considered
+ // dead before the vardef. See issue #14904.
+ return
+ }
+
+ // mayOverlap keeps track of whether the LHS and RHS might
+ // refer to overlapping memory.
+ mayOverlap := true
+ if n.Y == nil {
+ // Not a move at all, mayOverlap is not relevant.
+ } else if n.Def {
+ // A variable being defined cannot overlap anything else.
+ mayOverlap = false
+ } else if n.X.Op() == ir.ONAME && n.Y.Op() == ir.ONAME {
+ // Two named things never overlap.
+ // (Or they are identical, which we treat as nonoverlapping.)
+ mayOverlap = false
+ } else if n.Y.Op() == ir.ODEREF {
+ p := n.Y.(*ir.StarExpr).X
+ for p.Op() == ir.OCONVNOP {
+ p = p.(*ir.ConvExpr).X
+ }
+ if p.Op() == ir.OSPTR && p.(*ir.UnaryExpr).X.Type().IsString() {
+ // Pointer fields of strings point to unmodifiable memory.
+ // That memory can't overlap with the memory being written.
+ mayOverlap = false
+ }
+ } else if n.Y.Op() == ir.ORESULT || n.Y.Op() == ir.OCALLFUNC || n.Y.Op() == ir.OCALLINTER {
+ // When copying values out of the return area of a call, we know
+ // the source and destination don't overlap. Importantly, we must
+ // set mayOverlap so we don't introduce a call to memmove while
+ // we still have live data in the argument area.
+ mayOverlap = false
+ }
+
+ // Evaluate RHS.
+ rhs := n.Y
+ if rhs != nil {
+ switch rhs.Op() {
+ case ir.OSTRUCTLIT, ir.OARRAYLIT, ir.OSLICELIT:
+ // All literals with nonzero fields have already been
+ // rewritten during walk. Any that remain are just T{}
+ // or equivalents. Use the zero value.
+ if !ir.IsZero(rhs) {
+ s.Fatalf("literal with nonzero value in SSA: %v", rhs)
+ }
+ rhs = nil
+ case ir.OAPPEND:
+ rhs := rhs.(*ir.CallExpr)
+ // Check whether we're writing the result of an append back to the same slice.
+ // If so, we handle it specially to avoid write barriers on the fast
+ // (non-growth) path.
+ if !ir.SameSafeExpr(n.X, rhs.Args[0]) || base.Flag.N != 0 {
+ break
+ }
+ // If the slice can be SSA'd, it'll be on the stack,
+ // so there will be no write barriers,
+ // so there's no need to attempt to prevent them.
+ if s.canSSA(n.X) {
+ if base.Debug.Append > 0 { // replicating old diagnostic message
+ base.WarnfAt(n.Pos(), "append: len-only update (in local slice)")
+ }
+ break
+ }
+ if base.Debug.Append > 0 {
+ base.WarnfAt(n.Pos(), "append: len-only update")
+ }
+ s.append(rhs, true)
+ return
+ }
+ }
+
+ if ir.IsBlank(n.X) {
+ // _ = rhs
+ // Just evaluate rhs for side-effects.
+ if rhs != nil {
+ s.expr(rhs)
+ }
+ return
+ }
+
+ var t *types.Type
+ if n.Y != nil {
+ t = n.Y.Type()
+ } else {
+ t = n.X.Type()
+ }
+
+ var r *ssa.Value
+ deref := !TypeOK(t)
+ if deref {
+ if rhs == nil {
+ r = nil // Signal assign to use OpZero.
+ } else {
+ r = s.addr(rhs)
+ }
+ } else {
+ if rhs == nil {
+ r = s.zeroVal(t)
+ } else {
+ r = s.expr(rhs)
+ }
+ }
+
+ var skip skipMask
+ if rhs != nil && (rhs.Op() == ir.OSLICE || rhs.Op() == ir.OSLICE3 || rhs.Op() == ir.OSLICESTR) && ir.SameSafeExpr(rhs.(*ir.SliceExpr).X, n.X) {
+ // We're assigning a slicing operation back to its source.
+ // Don't write back fields we aren't changing. See issue #14855.
+ rhs := rhs.(*ir.SliceExpr)
+ i, j, k := rhs.Low, rhs.High, rhs.Max
+ if i != nil && (i.Op() == ir.OLITERAL && i.Val().Kind() == constant.Int && ir.Int64Val(i) == 0) {
+ // [0:...] is the same as [:...]
+ i = nil
+ }
+ // TODO: detect defaults for len/cap also.
+ // Currently doesn't really work because (*p)[:len(*p)] appears here as:
+ // tmp = len(*p)
+ // (*p)[:tmp]
+ //if j != nil && (j.Op == OLEN && SameSafeExpr(j.Left, n.Left)) {
+ // j = nil
+ //}
+ //if k != nil && (k.Op == OCAP && SameSafeExpr(k.Left, n.Left)) {
+ // k = nil
+ //}
+ if i == nil {
+ skip |= skipPtr
+ if j == nil {
+ skip |= skipLen
+ }
+ if k == nil {
+ skip |= skipCap
+ }
+ }
+ }
+
+ s.assignWhichMayOverlap(n.X, r, deref, skip, mayOverlap)
+
+ case ir.OIF:
+ n := n.(*ir.IfStmt)
+ if ir.IsConst(n.Cond, constant.Bool) {
+ s.stmtList(n.Cond.Init())
+ if ir.BoolVal(n.Cond) {
+ s.stmtList(n.Body)
+ } else {
+ s.stmtList(n.Else)
+ }
+ break
+ }
+
+ bEnd := s.f.NewBlock(ssa.BlockPlain)
+ var likely int8
+ if n.Likely {
+ likely = 1
+ }
+ var bThen *ssa.Block
+ if len(n.Body) != 0 {
+ bThen = s.f.NewBlock(ssa.BlockPlain)
+ } else {
+ bThen = bEnd
+ }
+ var bElse *ssa.Block
+ if len(n.Else) != 0 {
+ bElse = s.f.NewBlock(ssa.BlockPlain)
+ } else {
+ bElse = bEnd
+ }
+ s.condBranch(n.Cond, bThen, bElse, likely)
+
+ if len(n.Body) != 0 {
+ s.startBlock(bThen)
+ s.stmtList(n.Body)
+ if b := s.endBlock(); b != nil {
+ b.AddEdgeTo(bEnd)
+ }
+ }
+ if len(n.Else) != 0 {
+ s.startBlock(bElse)
+ s.stmtList(n.Else)
+ if b := s.endBlock(); b != nil {
+ b.AddEdgeTo(bEnd)
+ }
+ }
+ s.startBlock(bEnd)
+
+ case ir.ORETURN:
+ n := n.(*ir.ReturnStmt)
+ s.stmtList(n.Results)
+ b := s.exit()
+ b.Pos = s.lastPos.WithIsStmt()
+
+ case ir.OTAILCALL:
+ n := n.(*ir.TailCallStmt)
+ s.callResult(n.Call, callTail)
+ call := s.mem()
+ b := s.endBlock()
+ b.Kind = ssa.BlockRetJmp // could use BlockExit. BlockRetJmp is mostly for clarity.
+ b.SetControl(call)
+
+ case ir.OCONTINUE, ir.OBREAK:
+ n := n.(*ir.BranchStmt)
+ var to *ssa.Block
+ if n.Label == nil {
+ // plain break/continue
+ switch n.Op() {
+ case ir.OCONTINUE:
+ to = s.continueTo
+ case ir.OBREAK:
+ to = s.breakTo
+ }
+ } else {
+ // labeled break/continue; look up the target
+ sym := n.Label
+ lab := s.label(sym)
+ switch n.Op() {
+ case ir.OCONTINUE:
+ to = lab.continueTarget
+ case ir.OBREAK:
+ to = lab.breakTarget
+ }
+ }
+
+ b := s.endBlock()
+ b.Pos = s.lastPos.WithIsStmt() // Do this even if b is an empty block.
+ b.AddEdgeTo(to)
+
+ case ir.OFOR, ir.OFORUNTIL:
+ // OFOR: for Ninit; Left; Right { Nbody }
+ // cond (Left); body (Nbody); incr (Right)
+ //
+ // OFORUNTIL: for Ninit; Left; Right; List { Nbody }
+ // => body: { Nbody }; incr: Right; if Left { lateincr: List; goto body }; end:
+ n := n.(*ir.ForStmt)
+ bCond := s.f.NewBlock(ssa.BlockPlain)
+ bBody := s.f.NewBlock(ssa.BlockPlain)
+ bIncr := s.f.NewBlock(ssa.BlockPlain)
+ bEnd := s.f.NewBlock(ssa.BlockPlain)
+
+ // ensure empty for loops have correct position; issue #30167
+ bBody.Pos = n.Pos()
+
+ // first, jump to condition test (OFOR) or body (OFORUNTIL)
+ b := s.endBlock()
+ if n.Op() == ir.OFOR {
+ b.AddEdgeTo(bCond)
+ // generate code to test condition
+ s.startBlock(bCond)
+ if n.Cond != nil {
+ s.condBranch(n.Cond, bBody, bEnd, 1)
+ } else {
+ b := s.endBlock()
+ b.Kind = ssa.BlockPlain
+ b.AddEdgeTo(bBody)
+ }
+
+ } else {
+ b.AddEdgeTo(bBody)
+ }
+
+ // set up for continue/break in body
+ prevContinue := s.continueTo
+ prevBreak := s.breakTo
+ s.continueTo = bIncr
+ s.breakTo = bEnd
+ var lab *ssaLabel
+ if sym := n.Label; sym != nil {
+ // labeled for loop
+ lab = s.label(sym)
+ lab.continueTarget = bIncr
+ lab.breakTarget = bEnd
+ }
+
+ // generate body
+ s.startBlock(bBody)
+ s.stmtList(n.Body)
+
+ // tear down continue/break
+ s.continueTo = prevContinue
+ s.breakTo = prevBreak
+ if lab != nil {
+ lab.continueTarget = nil
+ lab.breakTarget = nil
+ }
+
+ // done with body, goto incr
+ if b := s.endBlock(); b != nil {
+ b.AddEdgeTo(bIncr)
+ }
+
+ // generate incr (and, for OFORUNTIL, condition)
+ s.startBlock(bIncr)
+ if n.Post != nil {
+ s.stmt(n.Post)
+ }
+ if n.Op() == ir.OFOR {
+ if b := s.endBlock(); b != nil {
+ b.AddEdgeTo(bCond)
+ // It can happen that bIncr ends in a block containing only VARKILL,
+ // and that muddles the debugging experience.
+ if b.Pos == src.NoXPos {
+ b.Pos = bCond.Pos
+ }
+ }
+ } else {
+ // bCond is unused in OFORUNTIL, so repurpose it.
+ bLateIncr := bCond
+ // test condition
+ s.condBranch(n.Cond, bLateIncr, bEnd, 1)
+ // generate late increment
+ s.startBlock(bLateIncr)
+ s.stmtList(n.Late)
+ s.endBlock().AddEdgeTo(bBody)
+ }
+
+ s.startBlock(bEnd)
+
+ case ir.OSWITCH, ir.OSELECT:
+ // These have been mostly rewritten by the front end into their Nbody fields.
+ // Our main task is to correctly hook up any break statements.
+ bEnd := s.f.NewBlock(ssa.BlockPlain)
+
+ prevBreak := s.breakTo
+ s.breakTo = bEnd
+ var sym *types.Sym
+ var body ir.Nodes
+ if n.Op() == ir.OSWITCH {
+ n := n.(*ir.SwitchStmt)
+ sym = n.Label
+ body = n.Compiled
+ } else {
+ n := n.(*ir.SelectStmt)
+ sym = n.Label
+ body = n.Compiled
+ }
+
+ var lab *ssaLabel
+ if sym != nil {
+ // labeled
+ lab = s.label(sym)
+ lab.breakTarget = bEnd
+ }
+
+ // generate body code
+ s.stmtList(body)
+
+ s.breakTo = prevBreak
+ if lab != nil {
+ lab.breakTarget = nil
+ }
+
+ // walk adds explicit OBREAK nodes to the end of all reachable code paths.
+ // If we still have a current block here, then mark it unreachable.
+ if s.curBlock != nil {
+ m := s.mem()
+ b := s.endBlock()
+ b.Kind = ssa.BlockExit
+ b.SetControl(m)
+ }
+ s.startBlock(bEnd)
+
+ case ir.OVARDEF:
+ n := n.(*ir.UnaryExpr)
+ if !s.canSSA(n.X) {
+ s.vars[memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, n.X.(*ir.Name), s.mem(), false)
+ }
+ case ir.OVARKILL:
+ // Insert a varkill op to record that a variable is no longer live.
+ // We only care about liveness info at call sites, so putting the
+ // varkill in the store chain is enough to keep it correctly ordered
+ // with respect to call ops.
+ n := n.(*ir.UnaryExpr)
+ if !s.canSSA(n.X) {
+ s.vars[memVar] = s.newValue1Apos(ssa.OpVarKill, types.TypeMem, n.X.(*ir.Name), s.mem(), false)
+ }
+
+ case ir.OVARLIVE:
+ // Insert a varlive op to record that a variable is still live.
+ n := n.(*ir.UnaryExpr)
+ v := n.X.(*ir.Name)
+ if !v.Addrtaken() {
+ s.Fatalf("VARLIVE variable %v must have Addrtaken set", v)
+ }
+ switch v.Class {
+ case ir.PAUTO, ir.PPARAM, ir.PPARAMOUT:
+ default:
+ s.Fatalf("VARLIVE variable %v must be Auto or Arg", v)
+ }
+ s.vars[memVar] = s.newValue1A(ssa.OpVarLive, types.TypeMem, v, s.mem())
+
+ case ir.OCHECKNIL:
+ n := n.(*ir.UnaryExpr)
+ p := s.expr(n.X)
+ s.nilCheck(p)
+
+ case ir.OINLMARK:
+ n := n.(*ir.InlineMarkStmt)
+ s.newValue1I(ssa.OpInlMark, types.TypeVoid, n.Index, s.mem())
+
+ default:
+ s.Fatalf("unhandled stmt %v", n.Op())
+ }
+}
+
+// If true, share as many open-coded defer exits as possible (with the downside of
+// worse line-number information)
+const shareDeferExits = false
+
+// exit processes any code that needs to be generated just before returning.
+// It returns a BlockRet block that ends the control flow. Its control value
+// will be set to the final memory state.
+func (s *state) exit() *ssa.Block {
+ if s.hasdefer {
+ if s.hasOpenDefers {
+ if shareDeferExits && s.lastDeferExit != nil && len(s.openDefers) == s.lastDeferCount {
+ if s.curBlock.Kind != ssa.BlockPlain {
+ panic("Block for an exit should be BlockPlain")
+ }
+ s.curBlock.AddEdgeTo(s.lastDeferExit)
+ s.endBlock()
+ return s.lastDeferFinalBlock
+ }
+ s.openDeferExit()
+ } else {
+ s.rtcall(ir.Syms.Deferreturn, true, nil)
+ }
+ }
+
+ var b *ssa.Block
+ var m *ssa.Value
+ // Do actual return.
+ // These currently turn into self-copies (in many cases).
+ resultFields := s.curfn.Type().Results().FieldSlice()
+ results := make([]*ssa.Value, len(resultFields)+1, len(resultFields)+1)
+ m = s.newValue0(ssa.OpMakeResult, s.f.OwnAux.LateExpansionResultType())
+ // Store SSAable and heap-escaped PPARAMOUT variables back to stack locations.
+ for i, f := range resultFields {
+ n := f.Nname.(*ir.Name)
+ if s.canSSA(n) { // result is in some SSA variable
+ if !n.IsOutputParamInRegisters() {
+ // We are about to store to the result slot.
+ s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, n, s.mem())
+ }
+ results[i] = s.variable(n, n.Type())
+ } else if !n.OnStack() { // result is actually heap allocated
+ // We are about to copy the in-heap result to the result slot.
+ s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, n, s.mem())
+ ha := s.expr(n.Heapaddr)
+ s.instrumentFields(n.Type(), ha, instrumentRead)
+ results[i] = s.newValue2(ssa.OpDereference, n.Type(), ha, s.mem())
+ } else { // result is not SSA-able; not escaped, so not on heap, but too large for SSA.
+ // Before register ABI this ought to be a self-move, home=dest,
+ // With register ABI, it's still a self-move if parameter is on stack (i.e., too big or overflowed)
+ // No VarDef, as the result slot is already holding live value.
+ results[i] = s.newValue2(ssa.OpDereference, n.Type(), s.addr(n), s.mem())
+ }
+ }
+
+ // Run exit code. Today, this is just racefuncexit, in -race mode.
+ // TODO(register args) this seems risky here with a register-ABI, but not clear it is right to do it earlier either.
+ // Spills in register allocation might just fix it.
+ s.stmtList(s.curfn.Exit)
+
+ results[len(results)-1] = s.mem()
+ m.AddArgs(results...)
+
+ b = s.endBlock()
+ b.Kind = ssa.BlockRet
+ b.SetControl(m)
+ if s.hasdefer && s.hasOpenDefers {
+ s.lastDeferFinalBlock = b
+ }
+ return b
+}
+
+type opAndType struct {
+ op ir.Op
+ etype types.Kind
+}
+
+var opToSSA = map[opAndType]ssa.Op{
+ opAndType{ir.OADD, types.TINT8}: ssa.OpAdd8,
+ opAndType{ir.OADD, types.TUINT8}: ssa.OpAdd8,
+ opAndType{ir.OADD, types.TINT16}: ssa.OpAdd16,
+ opAndType{ir.OADD, types.TUINT16}: ssa.OpAdd16,
+ opAndType{ir.OADD, types.TINT32}: ssa.OpAdd32,
+ opAndType{ir.OADD, types.TUINT32}: ssa.OpAdd32,
+ opAndType{ir.OADD, types.TINT64}: ssa.OpAdd64,
+ opAndType{ir.OADD, types.TUINT64}: ssa.OpAdd64,
+ opAndType{ir.OADD, types.TFLOAT32}: ssa.OpAdd32F,
+ opAndType{ir.OADD, types.TFLOAT64}: ssa.OpAdd64F,
+
+ opAndType{ir.OSUB, types.TINT8}: ssa.OpSub8,
+ opAndType{ir.OSUB, types.TUINT8}: ssa.OpSub8,
+ opAndType{ir.OSUB, types.TINT16}: ssa.OpSub16,
+ opAndType{ir.OSUB, types.TUINT16}: ssa.OpSub16,
+ opAndType{ir.OSUB, types.TINT32}: ssa.OpSub32,
+ opAndType{ir.OSUB, types.TUINT32}: ssa.OpSub32,
+ opAndType{ir.OSUB, types.TINT64}: ssa.OpSub64,
+ opAndType{ir.OSUB, types.TUINT64}: ssa.OpSub64,
+ opAndType{ir.OSUB, types.TFLOAT32}: ssa.OpSub32F,
+ opAndType{ir.OSUB, types.TFLOAT64}: ssa.OpSub64F,
+
+ opAndType{ir.ONOT, types.TBOOL}: ssa.OpNot,
+
+ opAndType{ir.ONEG, types.TINT8}: ssa.OpNeg8,
+ opAndType{ir.ONEG, types.TUINT8}: ssa.OpNeg8,
+ opAndType{ir.ONEG, types.TINT16}: ssa.OpNeg16,
+ opAndType{ir.ONEG, types.TUINT16}: ssa.OpNeg16,
+ opAndType{ir.ONEG, types.TINT32}: ssa.OpNeg32,
+ opAndType{ir.ONEG, types.TUINT32}: ssa.OpNeg32,
+ opAndType{ir.ONEG, types.TINT64}: ssa.OpNeg64,
+ opAndType{ir.ONEG, types.TUINT64}: ssa.OpNeg64,
+ opAndType{ir.ONEG, types.TFLOAT32}: ssa.OpNeg32F,
+ opAndType{ir.ONEG, types.TFLOAT64}: ssa.OpNeg64F,
+
+ opAndType{ir.OBITNOT, types.TINT8}: ssa.OpCom8,
+ opAndType{ir.OBITNOT, types.TUINT8}: ssa.OpCom8,
+ opAndType{ir.OBITNOT, types.TINT16}: ssa.OpCom16,
+ opAndType{ir.OBITNOT, types.TUINT16}: ssa.OpCom16,
+ opAndType{ir.OBITNOT, types.TINT32}: ssa.OpCom32,
+ opAndType{ir.OBITNOT, types.TUINT32}: ssa.OpCom32,
+ opAndType{ir.OBITNOT, types.TINT64}: ssa.OpCom64,
+ opAndType{ir.OBITNOT, types.TUINT64}: ssa.OpCom64,
+
+ opAndType{ir.OIMAG, types.TCOMPLEX64}: ssa.OpComplexImag,
+ opAndType{ir.OIMAG, types.TCOMPLEX128}: ssa.OpComplexImag,
+ opAndType{ir.OREAL, types.TCOMPLEX64}: ssa.OpComplexReal,
+ opAndType{ir.OREAL, types.TCOMPLEX128}: ssa.OpComplexReal,
+
+ opAndType{ir.OMUL, types.TINT8}: ssa.OpMul8,
+ opAndType{ir.OMUL, types.TUINT8}: ssa.OpMul8,
+ opAndType{ir.OMUL, types.TINT16}: ssa.OpMul16,
+ opAndType{ir.OMUL, types.TUINT16}: ssa.OpMul16,
+ opAndType{ir.OMUL, types.TINT32}: ssa.OpMul32,
+ opAndType{ir.OMUL, types.TUINT32}: ssa.OpMul32,
+ opAndType{ir.OMUL, types.TINT64}: ssa.OpMul64,
+ opAndType{ir.OMUL, types.TUINT64}: ssa.OpMul64,
+ opAndType{ir.OMUL, types.TFLOAT32}: ssa.OpMul32F,
+ opAndType{ir.OMUL, types.TFLOAT64}: ssa.OpMul64F,
+
+ opAndType{ir.ODIV, types.TFLOAT32}: ssa.OpDiv32F,
+ opAndType{ir.ODIV, types.TFLOAT64}: ssa.OpDiv64F,
+
+ opAndType{ir.ODIV, types.TINT8}: ssa.OpDiv8,
+ opAndType{ir.ODIV, types.TUINT8}: ssa.OpDiv8u,
+ opAndType{ir.ODIV, types.TINT16}: ssa.OpDiv16,
+ opAndType{ir.ODIV, types.TUINT16}: ssa.OpDiv16u,
+ opAndType{ir.ODIV, types.TINT32}: ssa.OpDiv32,
+ opAndType{ir.ODIV, types.TUINT32}: ssa.OpDiv32u,
+ opAndType{ir.ODIV, types.TINT64}: ssa.OpDiv64,
+ opAndType{ir.ODIV, types.TUINT64}: ssa.OpDiv64u,
+
+ opAndType{ir.OMOD, types.TINT8}: ssa.OpMod8,
+ opAndType{ir.OMOD, types.TUINT8}: ssa.OpMod8u,
+ opAndType{ir.OMOD, types.TINT16}: ssa.OpMod16,
+ opAndType{ir.OMOD, types.TUINT16}: ssa.OpMod16u,
+ opAndType{ir.OMOD, types.TINT32}: ssa.OpMod32,
+ opAndType{ir.OMOD, types.TUINT32}: ssa.OpMod32u,
+ opAndType{ir.OMOD, types.TINT64}: ssa.OpMod64,
+ opAndType{ir.OMOD, types.TUINT64}: ssa.OpMod64u,
+
+ opAndType{ir.OAND, types.TINT8}: ssa.OpAnd8,
+ opAndType{ir.OAND, types.TUINT8}: ssa.OpAnd8,
+ opAndType{ir.OAND, types.TINT16}: ssa.OpAnd16,
+ opAndType{ir.OAND, types.TUINT16}: ssa.OpAnd16,
+ opAndType{ir.OAND, types.TINT32}: ssa.OpAnd32,
+ opAndType{ir.OAND, types.TUINT32}: ssa.OpAnd32,
+ opAndType{ir.OAND, types.TINT64}: ssa.OpAnd64,
+ opAndType{ir.OAND, types.TUINT64}: ssa.OpAnd64,
+
+ opAndType{ir.OOR, types.TINT8}: ssa.OpOr8,
+ opAndType{ir.OOR, types.TUINT8}: ssa.OpOr8,
+ opAndType{ir.OOR, types.TINT16}: ssa.OpOr16,
+ opAndType{ir.OOR, types.TUINT16}: ssa.OpOr16,
+ opAndType{ir.OOR, types.TINT32}: ssa.OpOr32,
+ opAndType{ir.OOR, types.TUINT32}: ssa.OpOr32,
+ opAndType{ir.OOR, types.TINT64}: ssa.OpOr64,
+ opAndType{ir.OOR, types.TUINT64}: ssa.OpOr64,
+
+ opAndType{ir.OXOR, types.TINT8}: ssa.OpXor8,
+ opAndType{ir.OXOR, types.TUINT8}: ssa.OpXor8,
+ opAndType{ir.OXOR, types.TINT16}: ssa.OpXor16,
+ opAndType{ir.OXOR, types.TUINT16}: ssa.OpXor16,
+ opAndType{ir.OXOR, types.TINT32}: ssa.OpXor32,
+ opAndType{ir.OXOR, types.TUINT32}: ssa.OpXor32,
+ opAndType{ir.OXOR, types.TINT64}: ssa.OpXor64,
+ opAndType{ir.OXOR, types.TUINT64}: ssa.OpXor64,
+
+ opAndType{ir.OEQ, types.TBOOL}: ssa.OpEqB,
+ opAndType{ir.OEQ, types.TINT8}: ssa.OpEq8,
+ opAndType{ir.OEQ, types.TUINT8}: ssa.OpEq8,
+ opAndType{ir.OEQ, types.TINT16}: ssa.OpEq16,
+ opAndType{ir.OEQ, types.TUINT16}: ssa.OpEq16,
+ opAndType{ir.OEQ, types.TINT32}: ssa.OpEq32,
+ opAndType{ir.OEQ, types.TUINT32}: ssa.OpEq32,
+ opAndType{ir.OEQ, types.TINT64}: ssa.OpEq64,
+ opAndType{ir.OEQ, types.TUINT64}: ssa.OpEq64,
+ opAndType{ir.OEQ, types.TINTER}: ssa.OpEqInter,
+ opAndType{ir.OEQ, types.TSLICE}: ssa.OpEqSlice,
+ opAndType{ir.OEQ, types.TFUNC}: ssa.OpEqPtr,
+ opAndType{ir.OEQ, types.TMAP}: ssa.OpEqPtr,
+ opAndType{ir.OEQ, types.TCHAN}: ssa.OpEqPtr,
+ opAndType{ir.OEQ, types.TPTR}: ssa.OpEqPtr,
+ opAndType{ir.OEQ, types.TUINTPTR}: ssa.OpEqPtr,
+ opAndType{ir.OEQ, types.TUNSAFEPTR}: ssa.OpEqPtr,
+ opAndType{ir.OEQ, types.TFLOAT64}: ssa.OpEq64F,
+ opAndType{ir.OEQ, types.TFLOAT32}: ssa.OpEq32F,
+
+ opAndType{ir.ONE, types.TBOOL}: ssa.OpNeqB,
+ opAndType{ir.ONE, types.TINT8}: ssa.OpNeq8,
+ opAndType{ir.ONE, types.TUINT8}: ssa.OpNeq8,
+ opAndType{ir.ONE, types.TINT16}: ssa.OpNeq16,
+ opAndType{ir.ONE, types.TUINT16}: ssa.OpNeq16,
+ opAndType{ir.ONE, types.TINT32}: ssa.OpNeq32,
+ opAndType{ir.ONE, types.TUINT32}: ssa.OpNeq32,
+ opAndType{ir.ONE, types.TINT64}: ssa.OpNeq64,
+ opAndType{ir.ONE, types.TUINT64}: ssa.OpNeq64,
+ opAndType{ir.ONE, types.TINTER}: ssa.OpNeqInter,
+ opAndType{ir.ONE, types.TSLICE}: ssa.OpNeqSlice,
+ opAndType{ir.ONE, types.TFUNC}: ssa.OpNeqPtr,
+ opAndType{ir.ONE, types.TMAP}: ssa.OpNeqPtr,
+ opAndType{ir.ONE, types.TCHAN}: ssa.OpNeqPtr,
+ opAndType{ir.ONE, types.TPTR}: ssa.OpNeqPtr,
+ opAndType{ir.ONE, types.TUINTPTR}: ssa.OpNeqPtr,
+ opAndType{ir.ONE, types.TUNSAFEPTR}: ssa.OpNeqPtr,
+ opAndType{ir.ONE, types.TFLOAT64}: ssa.OpNeq64F,
+ opAndType{ir.ONE, types.TFLOAT32}: ssa.OpNeq32F,
+
+ opAndType{ir.OLT, types.TINT8}: ssa.OpLess8,
+ opAndType{ir.OLT, types.TUINT8}: ssa.OpLess8U,
+ opAndType{ir.OLT, types.TINT16}: ssa.OpLess16,
+ opAndType{ir.OLT, types.TUINT16}: ssa.OpLess16U,
+ opAndType{ir.OLT, types.TINT32}: ssa.OpLess32,
+ opAndType{ir.OLT, types.TUINT32}: ssa.OpLess32U,
+ opAndType{ir.OLT, types.TINT64}: ssa.OpLess64,
+ opAndType{ir.OLT, types.TUINT64}: ssa.OpLess64U,
+ opAndType{ir.OLT, types.TFLOAT64}: ssa.OpLess64F,
+ opAndType{ir.OLT, types.TFLOAT32}: ssa.OpLess32F,
+
+ opAndType{ir.OLE, types.TINT8}: ssa.OpLeq8,
+ opAndType{ir.OLE, types.TUINT8}: ssa.OpLeq8U,
+ opAndType{ir.OLE, types.TINT16}: ssa.OpLeq16,
+ opAndType{ir.OLE, types.TUINT16}: ssa.OpLeq16U,
+ opAndType{ir.OLE, types.TINT32}: ssa.OpLeq32,
+ opAndType{ir.OLE, types.TUINT32}: ssa.OpLeq32U,
+ opAndType{ir.OLE, types.TINT64}: ssa.OpLeq64,
+ opAndType{ir.OLE, types.TUINT64}: ssa.OpLeq64U,
+ opAndType{ir.OLE, types.TFLOAT64}: ssa.OpLeq64F,
+ opAndType{ir.OLE, types.TFLOAT32}: ssa.OpLeq32F,
+}
+
+func (s *state) concreteEtype(t *types.Type) types.Kind {
+ e := t.Kind()
+ switch e {
+ default:
+ return e
+ case types.TINT:
+ if s.config.PtrSize == 8 {
+ return types.TINT64
+ }
+ return types.TINT32
+ case types.TUINT:
+ if s.config.PtrSize == 8 {
+ return types.TUINT64
+ }
+ return types.TUINT32
+ case types.TUINTPTR:
+ if s.config.PtrSize == 8 {
+ return types.TUINT64
+ }
+ return types.TUINT32
+ }
+}
+
+func (s *state) ssaOp(op ir.Op, t *types.Type) ssa.Op {
+ etype := s.concreteEtype(t)
+ x, ok := opToSSA[opAndType{op, etype}]
+ if !ok {
+ s.Fatalf("unhandled binary op %v %s", op, etype)
+ }
+ return x
+}
+
+type opAndTwoTypes struct {
+ op ir.Op
+ etype1 types.Kind
+ etype2 types.Kind
+}
+
+type twoTypes struct {
+ etype1 types.Kind
+ etype2 types.Kind
+}
+
+type twoOpsAndType struct {
+ op1 ssa.Op
+ op2 ssa.Op
+ intermediateType types.Kind
+}
+
+var fpConvOpToSSA = map[twoTypes]twoOpsAndType{
+
+ twoTypes{types.TINT8, types.TFLOAT32}: twoOpsAndType{ssa.OpSignExt8to32, ssa.OpCvt32to32F, types.TINT32},
+ twoTypes{types.TINT16, types.TFLOAT32}: twoOpsAndType{ssa.OpSignExt16to32, ssa.OpCvt32to32F, types.TINT32},
+ twoTypes{types.TINT32, types.TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32to32F, types.TINT32},
+ twoTypes{types.TINT64, types.TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64to32F, types.TINT64},
+
+ twoTypes{types.TINT8, types.TFLOAT64}: twoOpsAndType{ssa.OpSignExt8to32, ssa.OpCvt32to64F, types.TINT32},
+ twoTypes{types.TINT16, types.TFLOAT64}: twoOpsAndType{ssa.OpSignExt16to32, ssa.OpCvt32to64F, types.TINT32},
+ twoTypes{types.TINT32, types.TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32to64F, types.TINT32},
+ twoTypes{types.TINT64, types.TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64to64F, types.TINT64},
+
+ twoTypes{types.TFLOAT32, types.TINT8}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to8, types.TINT32},
+ twoTypes{types.TFLOAT32, types.TINT16}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to16, types.TINT32},
+ twoTypes{types.TFLOAT32, types.TINT32}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpCopy, types.TINT32},
+ twoTypes{types.TFLOAT32, types.TINT64}: twoOpsAndType{ssa.OpCvt32Fto64, ssa.OpCopy, types.TINT64},
+
+ twoTypes{types.TFLOAT64, types.TINT8}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to8, types.TINT32},
+ twoTypes{types.TFLOAT64, types.TINT16}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to16, types.TINT32},
+ twoTypes{types.TFLOAT64, types.TINT32}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpCopy, types.TINT32},
+ twoTypes{types.TFLOAT64, types.TINT64}: twoOpsAndType{ssa.OpCvt64Fto64, ssa.OpCopy, types.TINT64},
+ // unsigned
+ twoTypes{types.TUINT8, types.TFLOAT32}: twoOpsAndType{ssa.OpZeroExt8to32, ssa.OpCvt32to32F, types.TINT32},
+ twoTypes{types.TUINT16, types.TFLOAT32}: twoOpsAndType{ssa.OpZeroExt16to32, ssa.OpCvt32to32F, types.TINT32},
+ twoTypes{types.TUINT32, types.TFLOAT32}: twoOpsAndType{ssa.OpZeroExt32to64, ssa.OpCvt64to32F, types.TINT64}, // go wide to dodge unsigned
+ twoTypes{types.TUINT64, types.TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpInvalid, types.TUINT64}, // Cvt64Uto32F, branchy code expansion instead
+
+ twoTypes{types.TUINT8, types.TFLOAT64}: twoOpsAndType{ssa.OpZeroExt8to32, ssa.OpCvt32to64F, types.TINT32},
+ twoTypes{types.TUINT16, types.TFLOAT64}: twoOpsAndType{ssa.OpZeroExt16to32, ssa.OpCvt32to64F, types.TINT32},
+ twoTypes{types.TUINT32, types.TFLOAT64}: twoOpsAndType{ssa.OpZeroExt32to64, ssa.OpCvt64to64F, types.TINT64}, // go wide to dodge unsigned
+ twoTypes{types.TUINT64, types.TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpInvalid, types.TUINT64}, // Cvt64Uto64F, branchy code expansion instead
+
+ twoTypes{types.TFLOAT32, types.TUINT8}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to8, types.TINT32},
+ twoTypes{types.TFLOAT32, types.TUINT16}: twoOpsAndType{ssa.OpCvt32Fto32, ssa.OpTrunc32to16, types.TINT32},
+ twoTypes{types.TFLOAT32, types.TUINT32}: twoOpsAndType{ssa.OpCvt32Fto64, ssa.OpTrunc64to32, types.TINT64}, // go wide to dodge unsigned
+ twoTypes{types.TFLOAT32, types.TUINT64}: twoOpsAndType{ssa.OpInvalid, ssa.OpCopy, types.TUINT64}, // Cvt32Fto64U, branchy code expansion instead
+
+ twoTypes{types.TFLOAT64, types.TUINT8}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to8, types.TINT32},
+ twoTypes{types.TFLOAT64, types.TUINT16}: twoOpsAndType{ssa.OpCvt64Fto32, ssa.OpTrunc32to16, types.TINT32},
+ twoTypes{types.TFLOAT64, types.TUINT32}: twoOpsAndType{ssa.OpCvt64Fto64, ssa.OpTrunc64to32, types.TINT64}, // go wide to dodge unsigned
+ twoTypes{types.TFLOAT64, types.TUINT64}: twoOpsAndType{ssa.OpInvalid, ssa.OpCopy, types.TUINT64}, // Cvt64Fto64U, branchy code expansion instead
+
+ // float
+ twoTypes{types.TFLOAT64, types.TFLOAT32}: twoOpsAndType{ssa.OpCvt64Fto32F, ssa.OpCopy, types.TFLOAT32},
+ twoTypes{types.TFLOAT64, types.TFLOAT64}: twoOpsAndType{ssa.OpRound64F, ssa.OpCopy, types.TFLOAT64},
+ twoTypes{types.TFLOAT32, types.TFLOAT32}: twoOpsAndType{ssa.OpRound32F, ssa.OpCopy, types.TFLOAT32},
+ twoTypes{types.TFLOAT32, types.TFLOAT64}: twoOpsAndType{ssa.OpCvt32Fto64F, ssa.OpCopy, types.TFLOAT64},
+}
+
+// this map is used only for 32-bit arch, and only includes the difference
+// on 32-bit arch, don't use int64<->float conversion for uint32
+var fpConvOpToSSA32 = map[twoTypes]twoOpsAndType{
+ twoTypes{types.TUINT32, types.TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32Uto32F, types.TUINT32},
+ twoTypes{types.TUINT32, types.TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt32Uto64F, types.TUINT32},
+ twoTypes{types.TFLOAT32, types.TUINT32}: twoOpsAndType{ssa.OpCvt32Fto32U, ssa.OpCopy, types.TUINT32},
+ twoTypes{types.TFLOAT64, types.TUINT32}: twoOpsAndType{ssa.OpCvt64Fto32U, ssa.OpCopy, types.TUINT32},
+}
+
+// uint64<->float conversions, only on machines that have instructions for that
+var uint64fpConvOpToSSA = map[twoTypes]twoOpsAndType{
+ twoTypes{types.TUINT64, types.TFLOAT32}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64Uto32F, types.TUINT64},
+ twoTypes{types.TUINT64, types.TFLOAT64}: twoOpsAndType{ssa.OpCopy, ssa.OpCvt64Uto64F, types.TUINT64},
+ twoTypes{types.TFLOAT32, types.TUINT64}: twoOpsAndType{ssa.OpCvt32Fto64U, ssa.OpCopy, types.TUINT64},
+ twoTypes{types.TFLOAT64, types.TUINT64}: twoOpsAndType{ssa.OpCvt64Fto64U, ssa.OpCopy, types.TUINT64},
+}
+
+var shiftOpToSSA = map[opAndTwoTypes]ssa.Op{
+ opAndTwoTypes{ir.OLSH, types.TINT8, types.TUINT8}: ssa.OpLsh8x8,
+ opAndTwoTypes{ir.OLSH, types.TUINT8, types.TUINT8}: ssa.OpLsh8x8,
+ opAndTwoTypes{ir.OLSH, types.TINT8, types.TUINT16}: ssa.OpLsh8x16,
+ opAndTwoTypes{ir.OLSH, types.TUINT8, types.TUINT16}: ssa.OpLsh8x16,
+ opAndTwoTypes{ir.OLSH, types.TINT8, types.TUINT32}: ssa.OpLsh8x32,
+ opAndTwoTypes{ir.OLSH, types.TUINT8, types.TUINT32}: ssa.OpLsh8x32,
+ opAndTwoTypes{ir.OLSH, types.TINT8, types.TUINT64}: ssa.OpLsh8x64,
+ opAndTwoTypes{ir.OLSH, types.TUINT8, types.TUINT64}: ssa.OpLsh8x64,
+
+ opAndTwoTypes{ir.OLSH, types.TINT16, types.TUINT8}: ssa.OpLsh16x8,
+ opAndTwoTypes{ir.OLSH, types.TUINT16, types.TUINT8}: ssa.OpLsh16x8,
+ opAndTwoTypes{ir.OLSH, types.TINT16, types.TUINT16}: ssa.OpLsh16x16,
+ opAndTwoTypes{ir.OLSH, types.TUINT16, types.TUINT16}: ssa.OpLsh16x16,
+ opAndTwoTypes{ir.OLSH, types.TINT16, types.TUINT32}: ssa.OpLsh16x32,
+ opAndTwoTypes{ir.OLSH, types.TUINT16, types.TUINT32}: ssa.OpLsh16x32,
+ opAndTwoTypes{ir.OLSH, types.TINT16, types.TUINT64}: ssa.OpLsh16x64,
+ opAndTwoTypes{ir.OLSH, types.TUINT16, types.TUINT64}: ssa.OpLsh16x64,
+
+ opAndTwoTypes{ir.OLSH, types.TINT32, types.TUINT8}: ssa.OpLsh32x8,
+ opAndTwoTypes{ir.OLSH, types.TUINT32, types.TUINT8}: ssa.OpLsh32x8,
+ opAndTwoTypes{ir.OLSH, types.TINT32, types.TUINT16}: ssa.OpLsh32x16,
+ opAndTwoTypes{ir.OLSH, types.TUINT32, types.TUINT16}: ssa.OpLsh32x16,
+ opAndTwoTypes{ir.OLSH, types.TINT32, types.TUINT32}: ssa.OpLsh32x32,
+ opAndTwoTypes{ir.OLSH, types.TUINT32, types.TUINT32}: ssa.OpLsh32x32,
+ opAndTwoTypes{ir.OLSH, types.TINT32, types.TUINT64}: ssa.OpLsh32x64,
+ opAndTwoTypes{ir.OLSH, types.TUINT32, types.TUINT64}: ssa.OpLsh32x64,
+
+ opAndTwoTypes{ir.OLSH, types.TINT64, types.TUINT8}: ssa.OpLsh64x8,
+ opAndTwoTypes{ir.OLSH, types.TUINT64, types.TUINT8}: ssa.OpLsh64x8,
+ opAndTwoTypes{ir.OLSH, types.TINT64, types.TUINT16}: ssa.OpLsh64x16,
+ opAndTwoTypes{ir.OLSH, types.TUINT64, types.TUINT16}: ssa.OpLsh64x16,
+ opAndTwoTypes{ir.OLSH, types.TINT64, types.TUINT32}: ssa.OpLsh64x32,
+ opAndTwoTypes{ir.OLSH, types.TUINT64, types.TUINT32}: ssa.OpLsh64x32,
+ opAndTwoTypes{ir.OLSH, types.TINT64, types.TUINT64}: ssa.OpLsh64x64,
+ opAndTwoTypes{ir.OLSH, types.TUINT64, types.TUINT64}: ssa.OpLsh64x64,
+
+ opAndTwoTypes{ir.ORSH, types.TINT8, types.TUINT8}: ssa.OpRsh8x8,
+ opAndTwoTypes{ir.ORSH, types.TUINT8, types.TUINT8}: ssa.OpRsh8Ux8,
+ opAndTwoTypes{ir.ORSH, types.TINT8, types.TUINT16}: ssa.OpRsh8x16,
+ opAndTwoTypes{ir.ORSH, types.TUINT8, types.TUINT16}: ssa.OpRsh8Ux16,
+ opAndTwoTypes{ir.ORSH, types.TINT8, types.TUINT32}: ssa.OpRsh8x32,
+ opAndTwoTypes{ir.ORSH, types.TUINT8, types.TUINT32}: ssa.OpRsh8Ux32,
+ opAndTwoTypes{ir.ORSH, types.TINT8, types.TUINT64}: ssa.OpRsh8x64,
+ opAndTwoTypes{ir.ORSH, types.TUINT8, types.TUINT64}: ssa.OpRsh8Ux64,
+
+ opAndTwoTypes{ir.ORSH, types.TINT16, types.TUINT8}: ssa.OpRsh16x8,
+ opAndTwoTypes{ir.ORSH, types.TUINT16, types.TUINT8}: ssa.OpRsh16Ux8,
+ opAndTwoTypes{ir.ORSH, types.TINT16, types.TUINT16}: ssa.OpRsh16x16,
+ opAndTwoTypes{ir.ORSH, types.TUINT16, types.TUINT16}: ssa.OpRsh16Ux16,
+ opAndTwoTypes{ir.ORSH, types.TINT16, types.TUINT32}: ssa.OpRsh16x32,
+ opAndTwoTypes{ir.ORSH, types.TUINT16, types.TUINT32}: ssa.OpRsh16Ux32,
+ opAndTwoTypes{ir.ORSH, types.TINT16, types.TUINT64}: ssa.OpRsh16x64,
+ opAndTwoTypes{ir.ORSH, types.TUINT16, types.TUINT64}: ssa.OpRsh16Ux64,
+
+ opAndTwoTypes{ir.ORSH, types.TINT32, types.TUINT8}: ssa.OpRsh32x8,
+ opAndTwoTypes{ir.ORSH, types.TUINT32, types.TUINT8}: ssa.OpRsh32Ux8,
+ opAndTwoTypes{ir.ORSH, types.TINT32, types.TUINT16}: ssa.OpRsh32x16,
+ opAndTwoTypes{ir.ORSH, types.TUINT32, types.TUINT16}: ssa.OpRsh32Ux16,
+ opAndTwoTypes{ir.ORSH, types.TINT32, types.TUINT32}: ssa.OpRsh32x32,
+ opAndTwoTypes{ir.ORSH, types.TUINT32, types.TUINT32}: ssa.OpRsh32Ux32,
+ opAndTwoTypes{ir.ORSH, types.TINT32, types.TUINT64}: ssa.OpRsh32x64,
+ opAndTwoTypes{ir.ORSH, types.TUINT32, types.TUINT64}: ssa.OpRsh32Ux64,
+
+ opAndTwoTypes{ir.ORSH, types.TINT64, types.TUINT8}: ssa.OpRsh64x8,
+ opAndTwoTypes{ir.ORSH, types.TUINT64, types.TUINT8}: ssa.OpRsh64Ux8,
+ opAndTwoTypes{ir.ORSH, types.TINT64, types.TUINT16}: ssa.OpRsh64x16,
+ opAndTwoTypes{ir.ORSH, types.TUINT64, types.TUINT16}: ssa.OpRsh64Ux16,
+ opAndTwoTypes{ir.ORSH, types.TINT64, types.TUINT32}: ssa.OpRsh64x32,
+ opAndTwoTypes{ir.ORSH, types.TUINT64, types.TUINT32}: ssa.OpRsh64Ux32,
+ opAndTwoTypes{ir.ORSH, types.TINT64, types.TUINT64}: ssa.OpRsh64x64,
+ opAndTwoTypes{ir.ORSH, types.TUINT64, types.TUINT64}: ssa.OpRsh64Ux64,
+}
+
+func (s *state) ssaShiftOp(op ir.Op, t *types.Type, u *types.Type) ssa.Op {
+ etype1 := s.concreteEtype(t)
+ etype2 := s.concreteEtype(u)
+ x, ok := shiftOpToSSA[opAndTwoTypes{op, etype1, etype2}]
+ if !ok {
+ s.Fatalf("unhandled shift op %v etype=%s/%s", op, etype1, etype2)
+ }
+ return x
+}
+
+func (s *state) conv(n ir.Node, v *ssa.Value, ft, tt *types.Type) *ssa.Value {
+ if ft.IsBoolean() && tt.IsKind(types.TUINT8) {
+ // Bool -> uint8 is generated internally when indexing into runtime.staticbyte.
+ return s.newValue1(ssa.OpCopy, tt, v)
+ }
+ if ft.IsInteger() && tt.IsInteger() {
+ var op ssa.Op
+ if tt.Size() == ft.Size() {
+ op = ssa.OpCopy
+ } else if tt.Size() < ft.Size() {
+ // truncation
+ switch 10*ft.Size() + tt.Size() {
+ case 21:
+ op = ssa.OpTrunc16to8
+ case 41:
+ op = ssa.OpTrunc32to8
+ case 42:
+ op = ssa.OpTrunc32to16
+ case 81:
+ op = ssa.OpTrunc64to8
+ case 82:
+ op = ssa.OpTrunc64to16
+ case 84:
+ op = ssa.OpTrunc64to32
+ default:
+ s.Fatalf("weird integer truncation %v -> %v", ft, tt)
+ }
+ } else if ft.IsSigned() {
+ // sign extension
+ switch 10*ft.Size() + tt.Size() {
+ case 12:
+ op = ssa.OpSignExt8to16
+ case 14:
+ op = ssa.OpSignExt8to32
+ case 18:
+ op = ssa.OpSignExt8to64
+ case 24:
+ op = ssa.OpSignExt16to32
+ case 28:
+ op = ssa.OpSignExt16to64
+ case 48:
+ op = ssa.OpSignExt32to64
+ default:
+ s.Fatalf("bad integer sign extension %v -> %v", ft, tt)
+ }
+ } else {
+ // zero extension
+ switch 10*ft.Size() + tt.Size() {
+ case 12:
+ op = ssa.OpZeroExt8to16
+ case 14:
+ op = ssa.OpZeroExt8to32
+ case 18:
+ op = ssa.OpZeroExt8to64
+ case 24:
+ op = ssa.OpZeroExt16to32
+ case 28:
+ op = ssa.OpZeroExt16to64
+ case 48:
+ op = ssa.OpZeroExt32to64
+ default:
+ s.Fatalf("weird integer sign extension %v -> %v", ft, tt)
+ }
+ }
+ return s.newValue1(op, tt, v)
+ }
+
+ if ft.IsComplex() && tt.IsComplex() {
+ var op ssa.Op
+ if ft.Size() == tt.Size() {
+ switch ft.Size() {
+ case 8:
+ op = ssa.OpRound32F
+ case 16:
+ op = ssa.OpRound64F
+ default:
+ s.Fatalf("weird complex conversion %v -> %v", ft, tt)
+ }
+ } else if ft.Size() == 8 && tt.Size() == 16 {
+ op = ssa.OpCvt32Fto64F
+ } else if ft.Size() == 16 && tt.Size() == 8 {
+ op = ssa.OpCvt64Fto32F
+ } else {
+ s.Fatalf("weird complex conversion %v -> %v", ft, tt)
+ }
+ ftp := types.FloatForComplex(ft)
+ ttp := types.FloatForComplex(tt)
+ return s.newValue2(ssa.OpComplexMake, tt,
+ s.newValueOrSfCall1(op, ttp, s.newValue1(ssa.OpComplexReal, ftp, v)),
+ s.newValueOrSfCall1(op, ttp, s.newValue1(ssa.OpComplexImag, ftp, v)))
+ }
+
+ if tt.IsComplex() { // and ft is not complex
+ // Needed for generics support - can't happen in normal Go code.
+ et := types.FloatForComplex(tt)
+ v = s.conv(n, v, ft, et)
+ return s.newValue2(ssa.OpComplexMake, tt, v, s.zeroVal(et))
+ }
+
+ if ft.IsFloat() || tt.IsFloat() {
+ conv, ok := fpConvOpToSSA[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]
+ if s.config.RegSize == 4 && Arch.LinkArch.Family != sys.MIPS && !s.softFloat {
+ if conv1, ok1 := fpConvOpToSSA32[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]; ok1 {
+ conv = conv1
+ }
+ }
+ if Arch.LinkArch.Family == sys.ARM64 || Arch.LinkArch.Family == sys.Wasm || Arch.LinkArch.Family == sys.S390X || s.softFloat {
+ if conv1, ok1 := uint64fpConvOpToSSA[twoTypes{s.concreteEtype(ft), s.concreteEtype(tt)}]; ok1 {
+ conv = conv1
+ }
+ }
+
+ if Arch.LinkArch.Family == sys.MIPS && !s.softFloat {
+ if ft.Size() == 4 && ft.IsInteger() && !ft.IsSigned() {
+ // tt is float32 or float64, and ft is also unsigned
+ if tt.Size() == 4 {
+ return s.uint32Tofloat32(n, v, ft, tt)
+ }
+ if tt.Size() == 8 {
+ return s.uint32Tofloat64(n, v, ft, tt)
+ }
+ } else if tt.Size() == 4 && tt.IsInteger() && !tt.IsSigned() {
+ // ft is float32 or float64, and tt is unsigned integer
+ if ft.Size() == 4 {
+ return s.float32ToUint32(n, v, ft, tt)
+ }
+ if ft.Size() == 8 {
+ return s.float64ToUint32(n, v, ft, tt)
+ }
+ }
+ }
+
+ if !ok {
+ s.Fatalf("weird float conversion %v -> %v", ft, tt)
+ }
+ op1, op2, it := conv.op1, conv.op2, conv.intermediateType
+
+ if op1 != ssa.OpInvalid && op2 != ssa.OpInvalid {
+ // normal case, not tripping over unsigned 64
+ if op1 == ssa.OpCopy {
+ if op2 == ssa.OpCopy {
+ return v
+ }
+ return s.newValueOrSfCall1(op2, tt, v)
+ }
+ if op2 == ssa.OpCopy {
+ return s.newValueOrSfCall1(op1, tt, v)
+ }
+ return s.newValueOrSfCall1(op2, tt, s.newValueOrSfCall1(op1, types.Types[it], v))
+ }
+ // Tricky 64-bit unsigned cases.
+ if ft.IsInteger() {
+ // tt is float32 or float64, and ft is also unsigned
+ if tt.Size() == 4 {
+ return s.uint64Tofloat32(n, v, ft, tt)
+ }
+ if tt.Size() == 8 {
+ return s.uint64Tofloat64(n, v, ft, tt)
+ }
+ s.Fatalf("weird unsigned integer to float conversion %v -> %v", ft, tt)
+ }
+ // ft is float32 or float64, and tt is unsigned integer
+ if ft.Size() == 4 {
+ return s.float32ToUint64(n, v, ft, tt)
+ }
+ if ft.Size() == 8 {
+ return s.float64ToUint64(n, v, ft, tt)
+ }
+ s.Fatalf("weird float to unsigned integer conversion %v -> %v", ft, tt)
+ return nil
+ }
+
+ s.Fatalf("unhandled OCONV %s -> %s", ft.Kind(), tt.Kind())
+ return nil
+}
+
+// expr converts the expression n to ssa, adds it to s and returns the ssa result.
+func (s *state) expr(n ir.Node) *ssa.Value {
+ return s.exprCheckPtr(n, true)
+}
+
+func (s *state) exprCheckPtr(n ir.Node, checkPtrOK bool) *ssa.Value {
+ if ir.HasUniquePos(n) {
+ // ONAMEs and named OLITERALs have the line number
+ // of the decl, not the use. See issue 14742.
+ s.pushLine(n.Pos())
+ defer s.popLine()
+ }
+
+ s.stmtList(n.Init())
+ switch n.Op() {
+ case ir.OBYTES2STRTMP:
+ n := n.(*ir.ConvExpr)
+ slice := s.expr(n.X)
+ ptr := s.newValue1(ssa.OpSlicePtr, s.f.Config.Types.BytePtr, slice)
+ len := s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], slice)
+ return s.newValue2(ssa.OpStringMake, n.Type(), ptr, len)
+ case ir.OSTR2BYTESTMP:
+ n := n.(*ir.ConvExpr)
+ str := s.expr(n.X)
+ ptr := s.newValue1(ssa.OpStringPtr, s.f.Config.Types.BytePtr, str)
+ len := s.newValue1(ssa.OpStringLen, types.Types[types.TINT], str)
+ return s.newValue3(ssa.OpSliceMake, n.Type(), ptr, len, len)
+ case ir.OCFUNC:
+ n := n.(*ir.UnaryExpr)
+ aux := n.X.(*ir.Name).Linksym()
+ // OCFUNC is used to build function values, which must
+ // always reference ABIInternal entry points.
+ if aux.ABI() != obj.ABIInternal {
+ s.Fatalf("expected ABIInternal: %v", aux.ABI())
+ }
+ return s.entryNewValue1A(ssa.OpAddr, n.Type(), aux, s.sb)
+ case ir.ONAME:
+ n := n.(*ir.Name)
+ if n.Class == ir.PFUNC {
+ // "value" of a function is the address of the function's closure
+ sym := staticdata.FuncLinksym(n)
+ return s.entryNewValue1A(ssa.OpAddr, types.NewPtr(n.Type()), sym, s.sb)
+ }
+ if s.canSSA(n) {
+ return s.variable(n, n.Type())
+ }
+ return s.load(n.Type(), s.addr(n))
+ case ir.OLINKSYMOFFSET:
+ n := n.(*ir.LinksymOffsetExpr)
+ return s.load(n.Type(), s.addr(n))
+ case ir.ONIL:
+ n := n.(*ir.NilExpr)
+ t := n.Type()
+ switch {
+ case t.IsSlice():
+ return s.constSlice(t)
+ case t.IsInterface():
+ return s.constInterface(t)
+ default:
+ return s.constNil(t)
+ }
+ case ir.OLITERAL:
+ switch u := n.Val(); u.Kind() {
+ case constant.Int:
+ i := ir.IntVal(n.Type(), u)
+ switch n.Type().Size() {
+ case 1:
+ return s.constInt8(n.Type(), int8(i))
+ case 2:
+ return s.constInt16(n.Type(), int16(i))
+ case 4:
+ return s.constInt32(n.Type(), int32(i))
+ case 8:
+ return s.constInt64(n.Type(), i)
+ default:
+ s.Fatalf("bad integer size %d", n.Type().Size())
+ return nil
+ }
+ case constant.String:
+ i := constant.StringVal(u)
+ if i == "" {
+ return s.constEmptyString(n.Type())
+ }
+ return s.entryNewValue0A(ssa.OpConstString, n.Type(), ssa.StringToAux(i))
+ case constant.Bool:
+ return s.constBool(constant.BoolVal(u))
+ case constant.Float:
+ f, _ := constant.Float64Val(u)
+ switch n.Type().Size() {
+ case 4:
+ return s.constFloat32(n.Type(), f)
+ case 8:
+ return s.constFloat64(n.Type(), f)
+ default:
+ s.Fatalf("bad float size %d", n.Type().Size())
+ return nil
+ }
+ case constant.Complex:
+ re, _ := constant.Float64Val(constant.Real(u))
+ im, _ := constant.Float64Val(constant.Imag(u))
+ switch n.Type().Size() {
+ case 8:
+ pt := types.Types[types.TFLOAT32]
+ return s.newValue2(ssa.OpComplexMake, n.Type(),
+ s.constFloat32(pt, re),
+ s.constFloat32(pt, im))
+ case 16:
+ pt := types.Types[types.TFLOAT64]
+ return s.newValue2(ssa.OpComplexMake, n.Type(),
+ s.constFloat64(pt, re),
+ s.constFloat64(pt, im))
+ default:
+ s.Fatalf("bad complex size %d", n.Type().Size())
+ return nil
+ }
+ default:
+ s.Fatalf("unhandled OLITERAL %v", u.Kind())
+ return nil
+ }
+ case ir.OCONVNOP:
+ n := n.(*ir.ConvExpr)
+ to := n.Type()
+ from := n.X.Type()
+
+ // Assume everything will work out, so set up our return value.
+ // Anything interesting that happens from here is a fatal.
+ x := s.expr(n.X)
+ if to == from {
+ return x
+ }
+
+ // Special case for not confusing GC and liveness.
+ // We don't want pointers accidentally classified
+ // as not-pointers or vice-versa because of copy
+ // elision.
+ if to.IsPtrShaped() != from.IsPtrShaped() {
+ return s.newValue2(ssa.OpConvert, to, x, s.mem())
+ }
+
+ v := s.newValue1(ssa.OpCopy, to, x) // ensure that v has the right type
+
+ // CONVNOP closure
+ if to.Kind() == types.TFUNC && from.IsPtrShaped() {
+ return v
+ }
+
+ // named <--> unnamed type or typed <--> untyped const
+ if from.Kind() == to.Kind() {
+ return v
+ }
+
+ // unsafe.Pointer <--> *T
+ if to.IsUnsafePtr() && from.IsPtrShaped() || from.IsUnsafePtr() && to.IsPtrShaped() {
+ if s.checkPtrEnabled && checkPtrOK && to.IsPtr() && from.IsUnsafePtr() {
+ s.checkPtrAlignment(n, v, nil)
+ }
+ return v
+ }
+
+ // map <--> *hmap
+ if to.Kind() == types.TMAP && from.IsPtr() &&
+ to.MapType().Hmap == from.Elem() {
+ return v
+ }
+
+ types.CalcSize(from)
+ types.CalcSize(to)
+ if from.Size() != to.Size() {
+ s.Fatalf("CONVNOP width mismatch %v (%d) -> %v (%d)\n", from, from.Size(), to, to.Size())
+ return nil
+ }
+ if etypesign(from.Kind()) != etypesign(to.Kind()) {
+ s.Fatalf("CONVNOP sign mismatch %v (%s) -> %v (%s)\n", from, from.Kind(), to, to.Kind())
+ return nil
+ }
+
+ if base.Flag.Cfg.Instrumenting {
+ // These appear to be fine, but they fail the
+ // integer constraint below, so okay them here.
+ // Sample non-integer conversion: map[string]string -> *uint8
+ return v
+ }
+
+ if etypesign(from.Kind()) == 0 {
+ s.Fatalf("CONVNOP unrecognized non-integer %v -> %v\n", from, to)
+ return nil
+ }
+
+ // integer, same width, same sign
+ return v
+
+ case ir.OCONV:
+ n := n.(*ir.ConvExpr)
+ x := s.expr(n.X)
+ return s.conv(n, x, n.X.Type(), n.Type())
+
+ case ir.ODOTTYPE:
+ n := n.(*ir.TypeAssertExpr)
+ res, _ := s.dottype(n, false)
+ return res
+
+ case ir.ODYNAMICDOTTYPE:
+ n := n.(*ir.DynamicTypeAssertExpr)
+ res, _ := s.dynamicDottype(n, false)
+ return res
+
+ // binary ops
+ case ir.OLT, ir.OEQ, ir.ONE, ir.OLE, ir.OGE, ir.OGT:
+ n := n.(*ir.BinaryExpr)
+ a := s.expr(n.X)
+ b := s.expr(n.Y)
+ if n.X.Type().IsComplex() {
+ pt := types.FloatForComplex(n.X.Type())
+ op := s.ssaOp(ir.OEQ, pt)
+ r := s.newValueOrSfCall2(op, types.Types[types.TBOOL], s.newValue1(ssa.OpComplexReal, pt, a), s.newValue1(ssa.OpComplexReal, pt, b))
+ i := s.newValueOrSfCall2(op, types.Types[types.TBOOL], s.newValue1(ssa.OpComplexImag, pt, a), s.newValue1(ssa.OpComplexImag, pt, b))
+ c := s.newValue2(ssa.OpAndB, types.Types[types.TBOOL], r, i)
+ switch n.Op() {
+ case ir.OEQ:
+ return c
+ case ir.ONE:
+ return s.newValue1(ssa.OpNot, types.Types[types.TBOOL], c)
+ default:
+ s.Fatalf("ordered complex compare %v", n.Op())
+ }
+ }
+
+ // Convert OGE and OGT into OLE and OLT.
+ op := n.Op()
+ switch op {
+ case ir.OGE:
+ op, a, b = ir.OLE, b, a
+ case ir.OGT:
+ op, a, b = ir.OLT, b, a
+ }
+ if n.X.Type().IsFloat() {
+ // float comparison
+ return s.newValueOrSfCall2(s.ssaOp(op, n.X.Type()), types.Types[types.TBOOL], a, b)
+ }
+ // integer comparison
+ return s.newValue2(s.ssaOp(op, n.X.Type()), types.Types[types.TBOOL], a, b)
+ case ir.OMUL:
+ n := n.(*ir.BinaryExpr)
+ a := s.expr(n.X)
+ b := s.expr(n.Y)
+ if n.Type().IsComplex() {
+ mulop := ssa.OpMul64F
+ addop := ssa.OpAdd64F
+ subop := ssa.OpSub64F
+ pt := types.FloatForComplex(n.Type()) // Could be Float32 or Float64
+ wt := types.Types[types.TFLOAT64] // Compute in Float64 to minimize cancellation error
+
+ areal := s.newValue1(ssa.OpComplexReal, pt, a)
+ breal := s.newValue1(ssa.OpComplexReal, pt, b)
+ aimag := s.newValue1(ssa.OpComplexImag, pt, a)
+ bimag := s.newValue1(ssa.OpComplexImag, pt, b)
+
+ if pt != wt { // Widen for calculation
+ areal = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, areal)
+ breal = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, breal)
+ aimag = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, aimag)
+ bimag = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, bimag)
+ }
+
+ xreal := s.newValueOrSfCall2(subop, wt, s.newValueOrSfCall2(mulop, wt, areal, breal), s.newValueOrSfCall2(mulop, wt, aimag, bimag))
+ ximag := s.newValueOrSfCall2(addop, wt, s.newValueOrSfCall2(mulop, wt, areal, bimag), s.newValueOrSfCall2(mulop, wt, aimag, breal))
+
+ if pt != wt { // Narrow to store back
+ xreal = s.newValueOrSfCall1(ssa.OpCvt64Fto32F, pt, xreal)
+ ximag = s.newValueOrSfCall1(ssa.OpCvt64Fto32F, pt, ximag)
+ }
+
+ return s.newValue2(ssa.OpComplexMake, n.Type(), xreal, ximag)
+ }
+
+ if n.Type().IsFloat() {
+ return s.newValueOrSfCall2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b)
+ }
+
+ return s.newValue2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b)
+
+ case ir.ODIV:
+ n := n.(*ir.BinaryExpr)
+ a := s.expr(n.X)
+ b := s.expr(n.Y)
+ if n.Type().IsComplex() {
+ // TODO this is not executed because the front-end substitutes a runtime call.
+ // That probably ought to change; with modest optimization the widen/narrow
+ // conversions could all be elided in larger expression trees.
+ mulop := ssa.OpMul64F
+ addop := ssa.OpAdd64F
+ subop := ssa.OpSub64F
+ divop := ssa.OpDiv64F
+ pt := types.FloatForComplex(n.Type()) // Could be Float32 or Float64
+ wt := types.Types[types.TFLOAT64] // Compute in Float64 to minimize cancellation error
+
+ areal := s.newValue1(ssa.OpComplexReal, pt, a)
+ breal := s.newValue1(ssa.OpComplexReal, pt, b)
+ aimag := s.newValue1(ssa.OpComplexImag, pt, a)
+ bimag := s.newValue1(ssa.OpComplexImag, pt, b)
+
+ if pt != wt { // Widen for calculation
+ areal = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, areal)
+ breal = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, breal)
+ aimag = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, aimag)
+ bimag = s.newValueOrSfCall1(ssa.OpCvt32Fto64F, wt, bimag)
+ }
+
+ denom := s.newValueOrSfCall2(addop, wt, s.newValueOrSfCall2(mulop, wt, breal, breal), s.newValueOrSfCall2(mulop, wt, bimag, bimag))
+ xreal := s.newValueOrSfCall2(addop, wt, s.newValueOrSfCall2(mulop, wt, areal, breal), s.newValueOrSfCall2(mulop, wt, aimag, bimag))
+ ximag := s.newValueOrSfCall2(subop, wt, s.newValueOrSfCall2(mulop, wt, aimag, breal), s.newValueOrSfCall2(mulop, wt, areal, bimag))
+
+ // TODO not sure if this is best done in wide precision or narrow
+ // Double-rounding might be an issue.
+ // Note that the pre-SSA implementation does the entire calculation
+ // in wide format, so wide is compatible.
+ xreal = s.newValueOrSfCall2(divop, wt, xreal, denom)
+ ximag = s.newValueOrSfCall2(divop, wt, ximag, denom)
+
+ if pt != wt { // Narrow to store back
+ xreal = s.newValueOrSfCall1(ssa.OpCvt64Fto32F, pt, xreal)
+ ximag = s.newValueOrSfCall1(ssa.OpCvt64Fto32F, pt, ximag)
+ }
+ return s.newValue2(ssa.OpComplexMake, n.Type(), xreal, ximag)
+ }
+ if n.Type().IsFloat() {
+ return s.newValueOrSfCall2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b)
+ }
+ return s.intDivide(n, a, b)
+ case ir.OMOD:
+ n := n.(*ir.BinaryExpr)
+ a := s.expr(n.X)
+ b := s.expr(n.Y)
+ return s.intDivide(n, a, b)
+ case ir.OADD, ir.OSUB:
+ n := n.(*ir.BinaryExpr)
+ a := s.expr(n.X)
+ b := s.expr(n.Y)
+ if n.Type().IsComplex() {
+ pt := types.FloatForComplex(n.Type())
+ op := s.ssaOp(n.Op(), pt)
+ return s.newValue2(ssa.OpComplexMake, n.Type(),
+ s.newValueOrSfCall2(op, pt, s.newValue1(ssa.OpComplexReal, pt, a), s.newValue1(ssa.OpComplexReal, pt, b)),
+ s.newValueOrSfCall2(op, pt, s.newValue1(ssa.OpComplexImag, pt, a), s.newValue1(ssa.OpComplexImag, pt, b)))
+ }
+ if n.Type().IsFloat() {
+ return s.newValueOrSfCall2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b)
+ }
+ return s.newValue2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b)
+ case ir.OAND, ir.OOR, ir.OXOR:
+ n := n.(*ir.BinaryExpr)
+ a := s.expr(n.X)
+ b := s.expr(n.Y)
+ return s.newValue2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b)
+ case ir.OANDNOT:
+ n := n.(*ir.BinaryExpr)
+ a := s.expr(n.X)
+ b := s.expr(n.Y)
+ b = s.newValue1(s.ssaOp(ir.OBITNOT, b.Type), b.Type, b)
+ return s.newValue2(s.ssaOp(ir.OAND, n.Type()), a.Type, a, b)
+ case ir.OLSH, ir.ORSH:
+ n := n.(*ir.BinaryExpr)
+ a := s.expr(n.X)
+ b := s.expr(n.Y)
+ bt := b.Type
+ if bt.IsSigned() {
+ cmp := s.newValue2(s.ssaOp(ir.OLE, bt), types.Types[types.TBOOL], s.zeroVal(bt), b)
+ s.check(cmp, ir.Syms.Panicshift)
+ bt = bt.ToUnsigned()
+ }
+ return s.newValue2(s.ssaShiftOp(n.Op(), n.Type(), bt), a.Type, a, b)
+ case ir.OANDAND, ir.OOROR:
+ // To implement OANDAND (and OOROR), we introduce a
+ // new temporary variable to hold the result. The
+ // variable is associated with the OANDAND node in the
+ // s.vars table (normally variables are only
+ // associated with ONAME nodes). We convert
+ // A && B
+ // to
+ // var = A
+ // if var {
+ // var = B
+ // }
+ // Using var in the subsequent block introduces the
+ // necessary phi variable.
+ n := n.(*ir.LogicalExpr)
+ el := s.expr(n.X)
+ s.vars[n] = el
+
+ b := s.endBlock()
+ b.Kind = ssa.BlockIf
+ b.SetControl(el)
+ // In theory, we should set b.Likely here based on context.
+ // However, gc only gives us likeliness hints
+ // in a single place, for plain OIF statements,
+ // and passing around context is finnicky, so don't bother for now.
+
+ bRight := s.f.NewBlock(ssa.BlockPlain)
+ bResult := s.f.NewBlock(ssa.BlockPlain)
+ if n.Op() == ir.OANDAND {
+ b.AddEdgeTo(bRight)
+ b.AddEdgeTo(bResult)
+ } else if n.Op() == ir.OOROR {
+ b.AddEdgeTo(bResult)
+ b.AddEdgeTo(bRight)
+ }
+
+ s.startBlock(bRight)
+ er := s.expr(n.Y)
+ s.vars[n] = er
+
+ b = s.endBlock()
+ b.AddEdgeTo(bResult)
+
+ s.startBlock(bResult)
+ return s.variable(n, types.Types[types.TBOOL])
+ case ir.OCOMPLEX:
+ n := n.(*ir.BinaryExpr)
+ r := s.expr(n.X)
+ i := s.expr(n.Y)
+ return s.newValue2(ssa.OpComplexMake, n.Type(), r, i)
+
+ // unary ops
+ case ir.ONEG:
+ n := n.(*ir.UnaryExpr)
+ a := s.expr(n.X)
+ if n.Type().IsComplex() {
+ tp := types.FloatForComplex(n.Type())
+ negop := s.ssaOp(n.Op(), tp)
+ return s.newValue2(ssa.OpComplexMake, n.Type(),
+ s.newValue1(negop, tp, s.newValue1(ssa.OpComplexReal, tp, a)),
+ s.newValue1(negop, tp, s.newValue1(ssa.OpComplexImag, tp, a)))
+ }
+ return s.newValue1(s.ssaOp(n.Op(), n.Type()), a.Type, a)
+ case ir.ONOT, ir.OBITNOT:
+ n := n.(*ir.UnaryExpr)
+ a := s.expr(n.X)
+ return s.newValue1(s.ssaOp(n.Op(), n.Type()), a.Type, a)
+ case ir.OIMAG, ir.OREAL:
+ n := n.(*ir.UnaryExpr)
+ a := s.expr(n.X)
+ return s.newValue1(s.ssaOp(n.Op(), n.X.Type()), n.Type(), a)
+ case ir.OPLUS:
+ n := n.(*ir.UnaryExpr)
+ return s.expr(n.X)
+
+ case ir.OADDR:
+ n := n.(*ir.AddrExpr)
+ return s.addr(n.X)
+
+ case ir.ORESULT:
+ n := n.(*ir.ResultExpr)
+ if s.prevCall == nil || s.prevCall.Op != ssa.OpStaticLECall && s.prevCall.Op != ssa.OpInterLECall && s.prevCall.Op != ssa.OpClosureLECall {
+ panic("Expected to see a previous call")
+ }
+ which := n.Index
+ if which == -1 {
+ panic(fmt.Errorf("ORESULT %v does not match call %s", n, s.prevCall))
+ }
+ return s.resultOfCall(s.prevCall, which, n.Type())
+
+ case ir.ODEREF:
+ n := n.(*ir.StarExpr)
+ p := s.exprPtr(n.X, n.Bounded(), n.Pos())
+ return s.load(n.Type(), p)
+
+ case ir.ODOT:
+ n := n.(*ir.SelectorExpr)
+ if n.X.Op() == ir.OSTRUCTLIT {
+ // All literals with nonzero fields have already been
+ // rewritten during walk. Any that remain are just T{}
+ // or equivalents. Use the zero value.
+ if !ir.IsZero(n.X) {
+ s.Fatalf("literal with nonzero value in SSA: %v", n.X)
+ }
+ return s.zeroVal(n.Type())
+ }
+ // If n is addressable and can't be represented in
+ // SSA, then load just the selected field. This
+ // prevents false memory dependencies in race/msan/asan
+ // instrumentation.
+ if ir.IsAddressable(n) && !s.canSSA(n) {
+ p := s.addr(n)
+ return s.load(n.Type(), p)
+ }
+ v := s.expr(n.X)
+ return s.newValue1I(ssa.OpStructSelect, n.Type(), int64(fieldIdx(n)), v)
+
+ case ir.ODOTPTR:
+ n := n.(*ir.SelectorExpr)
+ p := s.exprPtr(n.X, n.Bounded(), n.Pos())
+ p = s.newValue1I(ssa.OpOffPtr, types.NewPtr(n.Type()), n.Offset(), p)
+ return s.load(n.Type(), p)
+
+ case ir.OINDEX:
+ n := n.(*ir.IndexExpr)
+ switch {
+ case n.X.Type().IsString():
+ if n.Bounded() && ir.IsConst(n.X, constant.String) && ir.IsConst(n.Index, constant.Int) {
+ // Replace "abc"[1] with 'b'.
+ // Delayed until now because "abc"[1] is not an ideal constant.
+ // See test/fixedbugs/issue11370.go.
+ return s.newValue0I(ssa.OpConst8, types.Types[types.TUINT8], int64(int8(ir.StringVal(n.X)[ir.Int64Val(n.Index)])))
+ }
+ a := s.expr(n.X)
+ i := s.expr(n.Index)
+ len := s.newValue1(ssa.OpStringLen, types.Types[types.TINT], a)
+ i = s.boundsCheck(i, len, ssa.BoundsIndex, n.Bounded())
+ ptrtyp := s.f.Config.Types.BytePtr
+ ptr := s.newValue1(ssa.OpStringPtr, ptrtyp, a)
+ if ir.IsConst(n.Index, constant.Int) {
+ ptr = s.newValue1I(ssa.OpOffPtr, ptrtyp, ir.Int64Val(n.Index), ptr)
+ } else {
+ ptr = s.newValue2(ssa.OpAddPtr, ptrtyp, ptr, i)
+ }
+ return s.load(types.Types[types.TUINT8], ptr)
+ case n.X.Type().IsSlice():
+ p := s.addr(n)
+ return s.load(n.X.Type().Elem(), p)
+ case n.X.Type().IsArray():
+ if TypeOK(n.X.Type()) {
+ // SSA can handle arrays of length at most 1.
+ bound := n.X.Type().NumElem()
+ a := s.expr(n.X)
+ i := s.expr(n.Index)
+ if bound == 0 {
+ // Bounds check will never succeed. Might as well
+ // use constants for the bounds check.
+ z := s.constInt(types.Types[types.TINT], 0)
+ s.boundsCheck(z, z, ssa.BoundsIndex, false)
+ // The return value won't be live, return junk.
+ // But not quite junk, in case bounds checks are turned off. See issue 48092.
+ return s.zeroVal(n.Type())
+ }
+ len := s.constInt(types.Types[types.TINT], bound)
+ s.boundsCheck(i, len, ssa.BoundsIndex, n.Bounded()) // checks i == 0
+ return s.newValue1I(ssa.OpArraySelect, n.Type(), 0, a)
+ }
+ p := s.addr(n)
+ return s.load(n.X.Type().Elem(), p)
+ default:
+ s.Fatalf("bad type for index %v", n.X.Type())
+ return nil
+ }
+
+ case ir.OLEN, ir.OCAP:
+ n := n.(*ir.UnaryExpr)
+ switch {
+ case n.X.Type().IsSlice():
+ op := ssa.OpSliceLen
+ if n.Op() == ir.OCAP {
+ op = ssa.OpSliceCap
+ }
+ return s.newValue1(op, types.Types[types.TINT], s.expr(n.X))
+ case n.X.Type().IsString(): // string; not reachable for OCAP
+ return s.newValue1(ssa.OpStringLen, types.Types[types.TINT], s.expr(n.X))
+ case n.X.Type().IsMap(), n.X.Type().IsChan():
+ return s.referenceTypeBuiltin(n, s.expr(n.X))
+ default: // array
+ return s.constInt(types.Types[types.TINT], n.X.Type().NumElem())
+ }
+
+ case ir.OSPTR:
+ n := n.(*ir.UnaryExpr)
+ a := s.expr(n.X)
+ if n.X.Type().IsSlice() {
+ return s.newValue1(ssa.OpSlicePtr, n.Type(), a)
+ } else {
+ return s.newValue1(ssa.OpStringPtr, n.Type(), a)
+ }
+
+ case ir.OITAB:
+ n := n.(*ir.UnaryExpr)
+ a := s.expr(n.X)
+ return s.newValue1(ssa.OpITab, n.Type(), a)
+
+ case ir.OIDATA:
+ n := n.(*ir.UnaryExpr)
+ a := s.expr(n.X)
+ return s.newValue1(ssa.OpIData, n.Type(), a)
+
+ case ir.OEFACE:
+ n := n.(*ir.BinaryExpr)
+ tab := s.expr(n.X)
+ data := s.expr(n.Y)
+ return s.newValue2(ssa.OpIMake, n.Type(), tab, data)
+
+ case ir.OSLICEHEADER:
+ n := n.(*ir.SliceHeaderExpr)
+ p := s.expr(n.Ptr)
+ l := s.expr(n.Len)
+ c := s.expr(n.Cap)
+ return s.newValue3(ssa.OpSliceMake, n.Type(), p, l, c)
+
+ case ir.OSLICE, ir.OSLICEARR, ir.OSLICE3, ir.OSLICE3ARR:
+ n := n.(*ir.SliceExpr)
+ check := s.checkPtrEnabled && n.Op() == ir.OSLICE3ARR && n.X.Op() == ir.OCONVNOP && n.X.(*ir.ConvExpr).X.Type().IsUnsafePtr()
+ v := s.exprCheckPtr(n.X, !check)
+ var i, j, k *ssa.Value
+ if n.Low != nil {
+ i = s.expr(n.Low)
+ }
+ if n.High != nil {
+ j = s.expr(n.High)
+ }
+ if n.Max != nil {
+ k = s.expr(n.Max)
+ }
+ p, l, c := s.slice(v, i, j, k, n.Bounded())
+ if check {
+ // Emit checkptr instrumentation after bound check to prevent false positive, see #46938.
+ s.checkPtrAlignment(n.X.(*ir.ConvExpr), v, s.conv(n.Max, k, k.Type, types.Types[types.TUINTPTR]))
+ }
+ return s.newValue3(ssa.OpSliceMake, n.Type(), p, l, c)
+
+ case ir.OSLICESTR:
+ n := n.(*ir.SliceExpr)
+ v := s.expr(n.X)
+ var i, j *ssa.Value
+ if n.Low != nil {
+ i = s.expr(n.Low)
+ }
+ if n.High != nil {
+ j = s.expr(n.High)
+ }
+ p, l, _ := s.slice(v, i, j, nil, n.Bounded())
+ return s.newValue2(ssa.OpStringMake, n.Type(), p, l)
+
+ case ir.OSLICE2ARRPTR:
+ // if arrlen > slice.len {
+ // panic(...)
+ // }
+ // slice.ptr
+ n := n.(*ir.ConvExpr)
+ v := s.expr(n.X)
+ arrlen := s.constInt(types.Types[types.TINT], n.Type().Elem().NumElem())
+ cap := s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], v)
+ s.boundsCheck(arrlen, cap, ssa.BoundsConvert, false)
+ return s.newValue1(ssa.OpSlicePtrUnchecked, n.Type(), v)
+
+ case ir.OCALLFUNC:
+ n := n.(*ir.CallExpr)
+ if ir.IsIntrinsicCall(n) {
+ return s.intrinsicCall(n)
+ }
+ fallthrough
+
+ case ir.OCALLINTER:
+ n := n.(*ir.CallExpr)
+ return s.callResult(n, callNormal)
+
+ case ir.OGETG:
+ n := n.(*ir.CallExpr)
+ return s.newValue1(ssa.OpGetG, n.Type(), s.mem())
+
+ case ir.OGETCALLERPC:
+ n := n.(*ir.CallExpr)
+ return s.newValue0(ssa.OpGetCallerPC, n.Type())
+
+ case ir.OGETCALLERSP:
+ n := n.(*ir.CallExpr)
+ return s.newValue0(ssa.OpGetCallerSP, n.Type())
+
+ case ir.OAPPEND:
+ return s.append(n.(*ir.CallExpr), false)
+
+ case ir.OSTRUCTLIT, ir.OARRAYLIT:
+ // All literals with nonzero fields have already been
+ // rewritten during walk. Any that remain are just T{}
+ // or equivalents. Use the zero value.
+ n := n.(*ir.CompLitExpr)
+ if !ir.IsZero(n) {
+ s.Fatalf("literal with nonzero value in SSA: %v", n)
+ }
+ return s.zeroVal(n.Type())
+
+ case ir.ONEW:
+ n := n.(*ir.UnaryExpr)
+ return s.newObject(n.Type().Elem())
+
+ case ir.OUNSAFEADD:
+ n := n.(*ir.BinaryExpr)
+ ptr := s.expr(n.X)
+ len := s.expr(n.Y)
+
+ // Force len to uintptr to prevent misuse of garbage bits in the
+ // upper part of the register (#48536).
+ len = s.conv(n, len, len.Type, types.Types[types.TUINTPTR])
+
+ return s.newValue2(ssa.OpAddPtr, n.Type(), ptr, len)
+
+ default:
+ s.Fatalf("unhandled expr %v", n.Op())
+ return nil
+ }
+}
+
+func (s *state) resultOfCall(c *ssa.Value, which int64, t *types.Type) *ssa.Value {
+ aux := c.Aux.(*ssa.AuxCall)
+ pa := aux.ParamAssignmentForResult(which)
+ // TODO(register args) determine if in-memory TypeOK is better loaded early from SelectNAddr or later when SelectN is expanded.
+ // SelectN is better for pattern-matching and possible call-aware analysis we might want to do in the future.
+ if len(pa.Registers) == 0 && !TypeOK(t) {
+ addr := s.newValue1I(ssa.OpSelectNAddr, types.NewPtr(t), which, c)
+ return s.rawLoad(t, addr)
+ }
+ return s.newValue1I(ssa.OpSelectN, t, which, c)
+}
+
+func (s *state) resultAddrOfCall(c *ssa.Value, which int64, t *types.Type) *ssa.Value {
+ aux := c.Aux.(*ssa.AuxCall)
+ pa := aux.ParamAssignmentForResult(which)
+ if len(pa.Registers) == 0 {
+ return s.newValue1I(ssa.OpSelectNAddr, types.NewPtr(t), which, c)
+ }
+ _, addr := s.temp(c.Pos, t)
+ rval := s.newValue1I(ssa.OpSelectN, t, which, c)
+ s.vars[memVar] = s.newValue3Apos(ssa.OpStore, types.TypeMem, t, addr, rval, s.mem(), false)
+ return addr
+}
+
+// append converts an OAPPEND node to SSA.
+// If inplace is false, it converts the OAPPEND expression n to an ssa.Value,
+// adds it to s, and returns the Value.
+// If inplace is true, it writes the result of the OAPPEND expression n
+// back to the slice being appended to, and returns nil.
+// inplace MUST be set to false if the slice can be SSA'd.
+func (s *state) append(n *ir.CallExpr, inplace bool) *ssa.Value {
+ // If inplace is false, process as expression "append(s, e1, e2, e3)":
+ //
+ // ptr, len, cap := s
+ // newlen := len + 3
+ // if newlen > cap {
+ // ptr, len, cap = growslice(s, newlen)
+ // newlen = len + 3 // recalculate to avoid a spill
+ // }
+ // // with write barriers, if needed:
+ // *(ptr+len) = e1
+ // *(ptr+len+1) = e2
+ // *(ptr+len+2) = e3
+ // return makeslice(ptr, newlen, cap)
+ //
+ //
+ // If inplace is true, process as statement "s = append(s, e1, e2, e3)":
+ //
+ // a := &s
+ // ptr, len, cap := s
+ // newlen := len + 3
+ // if uint(newlen) > uint(cap) {
+ // newptr, len, newcap = growslice(ptr, len, cap, newlen)
+ // vardef(a) // if necessary, advise liveness we are writing a new a
+ // *a.cap = newcap // write before ptr to avoid a spill
+ // *a.ptr = newptr // with write barrier
+ // }
+ // newlen = len + 3 // recalculate to avoid a spill
+ // *a.len = newlen
+ // // with write barriers, if needed:
+ // *(ptr+len) = e1
+ // *(ptr+len+1) = e2
+ // *(ptr+len+2) = e3
+
+ et := n.Type().Elem()
+ pt := types.NewPtr(et)
+
+ // Evaluate slice
+ sn := n.Args[0] // the slice node is the first in the list
+
+ var slice, addr *ssa.Value
+ if inplace {
+ addr = s.addr(sn)
+ slice = s.load(n.Type(), addr)
+ } else {
+ slice = s.expr(sn)
+ }
+
+ // Allocate new blocks
+ grow := s.f.NewBlock(ssa.BlockPlain)
+ assign := s.f.NewBlock(ssa.BlockPlain)
+
+ // Decide if we need to grow
+ nargs := int64(len(n.Args) - 1)
+ p := s.newValue1(ssa.OpSlicePtr, pt, slice)
+ l := s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], slice)
+ c := s.newValue1(ssa.OpSliceCap, types.Types[types.TINT], slice)
+ nl := s.newValue2(s.ssaOp(ir.OADD, types.Types[types.TINT]), types.Types[types.TINT], l, s.constInt(types.Types[types.TINT], nargs))
+
+ cmp := s.newValue2(s.ssaOp(ir.OLT, types.Types[types.TUINT]), types.Types[types.TBOOL], c, nl)
+ s.vars[ptrVar] = p
+
+ if !inplace {
+ s.vars[newlenVar] = nl
+ s.vars[capVar] = c
+ } else {
+ s.vars[lenVar] = l
+ }
+
+ b := s.endBlock()
+ b.Kind = ssa.BlockIf
+ b.Likely = ssa.BranchUnlikely
+ b.SetControl(cmp)
+ b.AddEdgeTo(grow)
+ b.AddEdgeTo(assign)
+
+ // Call growslice
+ s.startBlock(grow)
+ taddr := s.expr(n.X)
+ r := s.rtcall(ir.Syms.Growslice, true, []*types.Type{pt, types.Types[types.TINT], types.Types[types.TINT]}, taddr, p, l, c, nl)
+
+ if inplace {
+ if sn.Op() == ir.ONAME {
+ sn := sn.(*ir.Name)
+ if sn.Class != ir.PEXTERN {
+ // Tell liveness we're about to build a new slice
+ s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, sn, s.mem())
+ }
+ }
+ capaddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, types.SliceCapOffset, addr)
+ s.store(types.Types[types.TINT], capaddr, r[2])
+ s.store(pt, addr, r[0])
+ // load the value we just stored to avoid having to spill it
+ s.vars[ptrVar] = s.load(pt, addr)
+ s.vars[lenVar] = r[1] // avoid a spill in the fast path
+ } else {
+ s.vars[ptrVar] = r[0]
+ s.vars[newlenVar] = s.newValue2(s.ssaOp(ir.OADD, types.Types[types.TINT]), types.Types[types.TINT], r[1], s.constInt(types.Types[types.TINT], nargs))
+ s.vars[capVar] = r[2]
+ }
+
+ b = s.endBlock()
+ b.AddEdgeTo(assign)
+
+ // assign new elements to slots
+ s.startBlock(assign)
+
+ if inplace {
+ l = s.variable(lenVar, types.Types[types.TINT]) // generates phi for len
+ nl = s.newValue2(s.ssaOp(ir.OADD, types.Types[types.TINT]), types.Types[types.TINT], l, s.constInt(types.Types[types.TINT], nargs))
+ lenaddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, types.SliceLenOffset, addr)
+ s.store(types.Types[types.TINT], lenaddr, nl)
+ }
+
+ // Evaluate args
+ type argRec struct {
+ // if store is true, we're appending the value v. If false, we're appending the
+ // value at *v.
+ v *ssa.Value
+ store bool
+ }
+ args := make([]argRec, 0, nargs)
+ for _, n := range n.Args[1:] {
+ if TypeOK(n.Type()) {
+ args = append(args, argRec{v: s.expr(n), store: true})
+ } else {
+ v := s.addr(n)
+ args = append(args, argRec{v: v})
+ }
+ }
+
+ p = s.variable(ptrVar, pt) // generates phi for ptr
+ if !inplace {
+ nl = s.variable(newlenVar, types.Types[types.TINT]) // generates phi for nl
+ c = s.variable(capVar, types.Types[types.TINT]) // generates phi for cap
+ }
+ p2 := s.newValue2(ssa.OpPtrIndex, pt, p, l)
+ for i, arg := range args {
+ addr := s.newValue2(ssa.OpPtrIndex, pt, p2, s.constInt(types.Types[types.TINT], int64(i)))
+ if arg.store {
+ s.storeType(et, addr, arg.v, 0, true)
+ } else {
+ s.move(et, addr, arg.v)
+ }
+ }
+
+ delete(s.vars, ptrVar)
+ if inplace {
+ delete(s.vars, lenVar)
+ return nil
+ }
+ delete(s.vars, newlenVar)
+ delete(s.vars, capVar)
+ // make result
+ return s.newValue3(ssa.OpSliceMake, n.Type(), p, nl, c)
+}
+
+// condBranch evaluates the boolean expression cond and branches to yes
+// if cond is true and no if cond is false.
+// This function is intended to handle && and || better than just calling
+// s.expr(cond) and branching on the result.
+func (s *state) condBranch(cond ir.Node, yes, no *ssa.Block, likely int8) {
+ switch cond.Op() {
+ case ir.OANDAND:
+ cond := cond.(*ir.LogicalExpr)
+ mid := s.f.NewBlock(ssa.BlockPlain)
+ s.stmtList(cond.Init())
+ s.condBranch(cond.X, mid, no, max8(likely, 0))
+ s.startBlock(mid)
+ s.condBranch(cond.Y, yes, no, likely)
+ return
+ // Note: if likely==1, then both recursive calls pass 1.
+ // If likely==-1, then we don't have enough information to decide
+ // whether the first branch is likely or not. So we pass 0 for
+ // the likeliness of the first branch.
+ // TODO: have the frontend give us branch prediction hints for
+ // OANDAND and OOROR nodes (if it ever has such info).
+ case ir.OOROR:
+ cond := cond.(*ir.LogicalExpr)
+ mid := s.f.NewBlock(ssa.BlockPlain)
+ s.stmtList(cond.Init())
+ s.condBranch(cond.X, yes, mid, min8(likely, 0))
+ s.startBlock(mid)
+ s.condBranch(cond.Y, yes, no, likely)
+ return
+ // Note: if likely==-1, then both recursive calls pass -1.
+ // If likely==1, then we don't have enough info to decide
+ // the likelihood of the first branch.
+ case ir.ONOT:
+ cond := cond.(*ir.UnaryExpr)
+ s.stmtList(cond.Init())
+ s.condBranch(cond.X, no, yes, -likely)
+ return
+ case ir.OCONVNOP:
+ cond := cond.(*ir.ConvExpr)
+ s.stmtList(cond.Init())
+ s.condBranch(cond.X, yes, no, likely)
+ return
+ }
+ c := s.expr(cond)
+ b := s.endBlock()
+ b.Kind = ssa.BlockIf
+ b.SetControl(c)
+ b.Likely = ssa.BranchPrediction(likely) // gc and ssa both use -1/0/+1 for likeliness
+ b.AddEdgeTo(yes)
+ b.AddEdgeTo(no)
+}
+
+type skipMask uint8
+
+const (
+ skipPtr skipMask = 1 << iota
+ skipLen
+ skipCap
+)
+
+// assign does left = right.
+// Right has already been evaluated to ssa, left has not.
+// If deref is true, then we do left = *right instead (and right has already been nil-checked).
+// If deref is true and right == nil, just do left = 0.
+// skip indicates assignments (at the top level) that can be avoided.
+// mayOverlap indicates whether left&right might partially overlap in memory. Default is false.
+func (s *state) assign(left ir.Node, right *ssa.Value, deref bool, skip skipMask) {
+ s.assignWhichMayOverlap(left, right, deref, skip, false)
+}
+func (s *state) assignWhichMayOverlap(left ir.Node, right *ssa.Value, deref bool, skip skipMask, mayOverlap bool) {
+ if left.Op() == ir.ONAME && ir.IsBlank(left) {
+ return
+ }
+ t := left.Type()
+ types.CalcSize(t)
+ if s.canSSA(left) {
+ if deref {
+ s.Fatalf("can SSA LHS %v but not RHS %s", left, right)
+ }
+ if left.Op() == ir.ODOT {
+ // We're assigning to a field of an ssa-able value.
+ // We need to build a new structure with the new value for the
+ // field we're assigning and the old values for the other fields.
+ // For instance:
+ // type T struct {a, b, c int}
+ // var T x
+ // x.b = 5
+ // For the x.b = 5 assignment we want to generate x = T{x.a, 5, x.c}
+
+ // Grab information about the structure type.
+ left := left.(*ir.SelectorExpr)
+ t := left.X.Type()
+ nf := t.NumFields()
+ idx := fieldIdx(left)
+
+ // Grab old value of structure.
+ old := s.expr(left.X)
+
+ // Make new structure.
+ new := s.newValue0(ssa.StructMakeOp(t.NumFields()), t)
+
+ // Add fields as args.
+ for i := 0; i < nf; i++ {
+ if i == idx {
+ new.AddArg(right)
+ } else {
+ new.AddArg(s.newValue1I(ssa.OpStructSelect, t.FieldType(i), int64(i), old))
+ }
+ }
+
+ // Recursively assign the new value we've made to the base of the dot op.
+ s.assign(left.X, new, false, 0)
+ // TODO: do we need to update named values here?
+ return
+ }
+ if left.Op() == ir.OINDEX && left.(*ir.IndexExpr).X.Type().IsArray() {
+ left := left.(*ir.IndexExpr)
+ s.pushLine(left.Pos())
+ defer s.popLine()
+ // We're assigning to an element of an ssa-able array.
+ // a[i] = v
+ t := left.X.Type()
+ n := t.NumElem()
+
+ i := s.expr(left.Index) // index
+ if n == 0 {
+ // The bounds check must fail. Might as well
+ // ignore the actual index and just use zeros.
+ z := s.constInt(types.Types[types.TINT], 0)
+ s.boundsCheck(z, z, ssa.BoundsIndex, false)
+ return
+ }
+ if n != 1 {
+ s.Fatalf("assigning to non-1-length array")
+ }
+ // Rewrite to a = [1]{v}
+ len := s.constInt(types.Types[types.TINT], 1)
+ s.boundsCheck(i, len, ssa.BoundsIndex, false) // checks i == 0
+ v := s.newValue1(ssa.OpArrayMake1, t, right)
+ s.assign(left.X, v, false, 0)
+ return
+ }
+ left := left.(*ir.Name)
+ // Update variable assignment.
+ s.vars[left] = right
+ s.addNamedValue(left, right)
+ return
+ }
+
+ // If this assignment clobbers an entire local variable, then emit
+ // OpVarDef so liveness analysis knows the variable is redefined.
+ if base, ok := clobberBase(left).(*ir.Name); ok && base.OnStack() && skip == 0 {
+ s.vars[memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, base, s.mem(), !ir.IsAutoTmp(base))
+ }
+
+ // Left is not ssa-able. Compute its address.
+ addr := s.addr(left)
+ if ir.IsReflectHeaderDataField(left) {
+ // Package unsafe's documentation says storing pointers into
+ // reflect.SliceHeader and reflect.StringHeader's Data fields
+ // is valid, even though they have type uintptr (#19168).
+ // Mark it pointer type to signal the writebarrier pass to
+ // insert a write barrier.
+ t = types.Types[types.TUNSAFEPTR]
+ }
+ if deref {
+ // Treat as a mem->mem move.
+ if right == nil {
+ s.zero(t, addr)
+ } else {
+ s.moveWhichMayOverlap(t, addr, right, mayOverlap)
+ }
+ return
+ }
+ // Treat as a store.
+ s.storeType(t, addr, right, skip, !ir.IsAutoTmp(left))
+}
+
+// zeroVal returns the zero value for type t.
+func (s *state) zeroVal(t *types.Type) *ssa.Value {
+ switch {
+ case t.IsInteger():
+ switch t.Size() {
+ case 1:
+ return s.constInt8(t, 0)
+ case 2:
+ return s.constInt16(t, 0)
+ case 4:
+ return s.constInt32(t, 0)
+ case 8:
+ return s.constInt64(t, 0)
+ default:
+ s.Fatalf("bad sized integer type %v", t)
+ }
+ case t.IsFloat():
+ switch t.Size() {
+ case 4:
+ return s.constFloat32(t, 0)
+ case 8:
+ return s.constFloat64(t, 0)
+ default:
+ s.Fatalf("bad sized float type %v", t)
+ }
+ case t.IsComplex():
+ switch t.Size() {
+ case 8:
+ z := s.constFloat32(types.Types[types.TFLOAT32], 0)
+ return s.entryNewValue2(ssa.OpComplexMake, t, z, z)
+ case 16:
+ z := s.constFloat64(types.Types[types.TFLOAT64], 0)
+ return s.entryNewValue2(ssa.OpComplexMake, t, z, z)
+ default:
+ s.Fatalf("bad sized complex type %v", t)
+ }
+
+ case t.IsString():
+ return s.constEmptyString(t)
+ case t.IsPtrShaped():
+ return s.constNil(t)
+ case t.IsBoolean():
+ return s.constBool(false)
+ case t.IsInterface():
+ return s.constInterface(t)
+ case t.IsSlice():
+ return s.constSlice(t)
+ case t.IsStruct():
+ n := t.NumFields()
+ v := s.entryNewValue0(ssa.StructMakeOp(t.NumFields()), t)
+ for i := 0; i < n; i++ {
+ v.AddArg(s.zeroVal(t.FieldType(i)))
+ }
+ return v
+ case t.IsArray():
+ switch t.NumElem() {
+ case 0:
+ return s.entryNewValue0(ssa.OpArrayMake0, t)
+ case 1:
+ return s.entryNewValue1(ssa.OpArrayMake1, t, s.zeroVal(t.Elem()))
+ }
+ }
+ s.Fatalf("zero for type %v not implemented", t)
+ return nil
+}
+
+type callKind int8
+
+const (
+ callNormal callKind = iota
+ callDefer
+ callDeferStack
+ callGo
+ callTail
+)
+
+type sfRtCallDef struct {
+ rtfn *obj.LSym
+ rtype types.Kind
+}
+
+var softFloatOps map[ssa.Op]sfRtCallDef
+
+func softfloatInit() {
+ // Some of these operations get transformed by sfcall.
+ softFloatOps = map[ssa.Op]sfRtCallDef{
+ ssa.OpAdd32F: sfRtCallDef{typecheck.LookupRuntimeFunc("fadd32"), types.TFLOAT32},
+ ssa.OpAdd64F: sfRtCallDef{typecheck.LookupRuntimeFunc("fadd64"), types.TFLOAT64},
+ ssa.OpSub32F: sfRtCallDef{typecheck.LookupRuntimeFunc("fadd32"), types.TFLOAT32},
+ ssa.OpSub64F: sfRtCallDef{typecheck.LookupRuntimeFunc("fadd64"), types.TFLOAT64},
+ ssa.OpMul32F: sfRtCallDef{typecheck.LookupRuntimeFunc("fmul32"), types.TFLOAT32},
+ ssa.OpMul64F: sfRtCallDef{typecheck.LookupRuntimeFunc("fmul64"), types.TFLOAT64},
+ ssa.OpDiv32F: sfRtCallDef{typecheck.LookupRuntimeFunc("fdiv32"), types.TFLOAT32},
+ ssa.OpDiv64F: sfRtCallDef{typecheck.LookupRuntimeFunc("fdiv64"), types.TFLOAT64},
+
+ ssa.OpEq64F: sfRtCallDef{typecheck.LookupRuntimeFunc("feq64"), types.TBOOL},
+ ssa.OpEq32F: sfRtCallDef{typecheck.LookupRuntimeFunc("feq32"), types.TBOOL},
+ ssa.OpNeq64F: sfRtCallDef{typecheck.LookupRuntimeFunc("feq64"), types.TBOOL},
+ ssa.OpNeq32F: sfRtCallDef{typecheck.LookupRuntimeFunc("feq32"), types.TBOOL},
+ ssa.OpLess64F: sfRtCallDef{typecheck.LookupRuntimeFunc("fgt64"), types.TBOOL},
+ ssa.OpLess32F: sfRtCallDef{typecheck.LookupRuntimeFunc("fgt32"), types.TBOOL},
+ ssa.OpLeq64F: sfRtCallDef{typecheck.LookupRuntimeFunc("fge64"), types.TBOOL},
+ ssa.OpLeq32F: sfRtCallDef{typecheck.LookupRuntimeFunc("fge32"), types.TBOOL},
+
+ ssa.OpCvt32to32F: sfRtCallDef{typecheck.LookupRuntimeFunc("fint32to32"), types.TFLOAT32},
+ ssa.OpCvt32Fto32: sfRtCallDef{typecheck.LookupRuntimeFunc("f32toint32"), types.TINT32},
+ ssa.OpCvt64to32F: sfRtCallDef{typecheck.LookupRuntimeFunc("fint64to32"), types.TFLOAT32},
+ ssa.OpCvt32Fto64: sfRtCallDef{typecheck.LookupRuntimeFunc("f32toint64"), types.TINT64},
+ ssa.OpCvt64Uto32F: sfRtCallDef{typecheck.LookupRuntimeFunc("fuint64to32"), types.TFLOAT32},
+ ssa.OpCvt32Fto64U: sfRtCallDef{typecheck.LookupRuntimeFunc("f32touint64"), types.TUINT64},
+ ssa.OpCvt32to64F: sfRtCallDef{typecheck.LookupRuntimeFunc("fint32to64"), types.TFLOAT64},
+ ssa.OpCvt64Fto32: sfRtCallDef{typecheck.LookupRuntimeFunc("f64toint32"), types.TINT32},
+ ssa.OpCvt64to64F: sfRtCallDef{typecheck.LookupRuntimeFunc("fint64to64"), types.TFLOAT64},
+ ssa.OpCvt64Fto64: sfRtCallDef{typecheck.LookupRuntimeFunc("f64toint64"), types.TINT64},
+ ssa.OpCvt64Uto64F: sfRtCallDef{typecheck.LookupRuntimeFunc("fuint64to64"), types.TFLOAT64},
+ ssa.OpCvt64Fto64U: sfRtCallDef{typecheck.LookupRuntimeFunc("f64touint64"), types.TUINT64},
+ ssa.OpCvt32Fto64F: sfRtCallDef{typecheck.LookupRuntimeFunc("f32to64"), types.TFLOAT64},
+ ssa.OpCvt64Fto32F: sfRtCallDef{typecheck.LookupRuntimeFunc("f64to32"), types.TFLOAT32},
+ }
+}
+
+// TODO: do not emit sfcall if operation can be optimized to constant in later
+// opt phase
+func (s *state) sfcall(op ssa.Op, args ...*ssa.Value) (*ssa.Value, bool) {
+ f2i := func(t *types.Type) *types.Type {
+ switch t.Kind() {
+ case types.TFLOAT32:
+ return types.Types[types.TUINT32]
+ case types.TFLOAT64:
+ return types.Types[types.TUINT64]
+ }
+ return t
+ }
+
+ if callDef, ok := softFloatOps[op]; ok {
+ switch op {
+ case ssa.OpLess32F,
+ ssa.OpLess64F,
+ ssa.OpLeq32F,
+ ssa.OpLeq64F:
+ args[0], args[1] = args[1], args[0]
+ case ssa.OpSub32F,
+ ssa.OpSub64F:
+ args[1] = s.newValue1(s.ssaOp(ir.ONEG, types.Types[callDef.rtype]), args[1].Type, args[1])
+ }
+
+ // runtime functions take uints for floats and returns uints.
+ // Convert to uints so we use the right calling convention.
+ for i, a := range args {
+ if a.Type.IsFloat() {
+ args[i] = s.newValue1(ssa.OpCopy, f2i(a.Type), a)
+ }
+ }
+
+ rt := types.Types[callDef.rtype]
+ result := s.rtcall(callDef.rtfn, true, []*types.Type{f2i(rt)}, args...)[0]
+ if rt.IsFloat() {
+ result = s.newValue1(ssa.OpCopy, rt, result)
+ }
+ if op == ssa.OpNeq32F || op == ssa.OpNeq64F {
+ result = s.newValue1(ssa.OpNot, result.Type, result)
+ }
+ return result, true
+ }
+ return nil, false
+}
+
+var intrinsics map[intrinsicKey]intrinsicBuilder
+
+// An intrinsicBuilder converts a call node n into an ssa value that
+// implements that call as an intrinsic. args is a list of arguments to the func.
+type intrinsicBuilder func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value
+
+type intrinsicKey struct {
+ arch *sys.Arch
+ pkg string
+ fn string
+}
+
+func InitTables() {
+ intrinsics = map[intrinsicKey]intrinsicBuilder{}
+
+ var all []*sys.Arch
+ var p4 []*sys.Arch
+ var p8 []*sys.Arch
+ var lwatomics []*sys.Arch
+ for _, a := range &sys.Archs {
+ all = append(all, a)
+ if a.PtrSize == 4 {
+ p4 = append(p4, a)
+ } else {
+ p8 = append(p8, a)
+ }
+ if a.Family != sys.PPC64 {
+ lwatomics = append(lwatomics, a)
+ }
+ }
+
+ // add adds the intrinsic b for pkg.fn for the given list of architectures.
+ add := func(pkg, fn string, b intrinsicBuilder, archs ...*sys.Arch) {
+ for _, a := range archs {
+ intrinsics[intrinsicKey{a, pkg, fn}] = b
+ }
+ }
+ // addF does the same as add but operates on architecture families.
+ addF := func(pkg, fn string, b intrinsicBuilder, archFamilies ...sys.ArchFamily) {
+ m := 0
+ for _, f := range archFamilies {
+ if f >= 32 {
+ panic("too many architecture families")
+ }
+ m |= 1 << uint(f)
+ }
+ for _, a := range all {
+ if m>>uint(a.Family)&1 != 0 {
+ intrinsics[intrinsicKey{a, pkg, fn}] = b
+ }
+ }
+ }
+ // alias defines pkg.fn = pkg2.fn2 for all architectures in archs for which pkg2.fn2 exists.
+ alias := func(pkg, fn, pkg2, fn2 string, archs ...*sys.Arch) {
+ aliased := false
+ for _, a := range archs {
+ if b, ok := intrinsics[intrinsicKey{a, pkg2, fn2}]; ok {
+ intrinsics[intrinsicKey{a, pkg, fn}] = b
+ aliased = true
+ }
+ }
+ if !aliased {
+ panic(fmt.Sprintf("attempted to alias undefined intrinsic: %s.%s", pkg, fn))
+ }
+ }
+
+ /******** runtime ********/
+ if !base.Flag.Cfg.Instrumenting {
+ add("runtime", "slicebytetostringtmp",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ // Compiler frontend optimizations emit OBYTES2STRTMP nodes
+ // for the backend instead of slicebytetostringtmp calls
+ // when not instrumenting.
+ return s.newValue2(ssa.OpStringMake, n.Type(), args[0], args[1])
+ },
+ all...)
+ }
+ addF("runtime/internal/math", "MulUintptr",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ if s.config.PtrSize == 4 {
+ return s.newValue2(ssa.OpMul32uover, types.NewTuple(types.Types[types.TUINT], types.Types[types.TUINT]), args[0], args[1])
+ }
+ return s.newValue2(ssa.OpMul64uover, types.NewTuple(types.Types[types.TUINT], types.Types[types.TUINT]), args[0], args[1])
+ },
+ sys.AMD64, sys.I386, sys.MIPS64, sys.RISCV64)
+ add("runtime", "KeepAlive",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ data := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, args[0])
+ s.vars[memVar] = s.newValue2(ssa.OpKeepAlive, types.TypeMem, data, s.mem())
+ return nil
+ },
+ all...)
+ add("runtime", "getclosureptr",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue0(ssa.OpGetClosurePtr, s.f.Config.Types.Uintptr)
+ },
+ all...)
+
+ add("runtime", "getcallerpc",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue0(ssa.OpGetCallerPC, s.f.Config.Types.Uintptr)
+ },
+ all...)
+
+ add("runtime", "getcallersp",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue0(ssa.OpGetCallerSP, s.f.Config.Types.Uintptr)
+ },
+ all...)
+
+ addF("runtime", "publicationBarrier",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ s.vars[memVar] = s.newValue1(ssa.OpPubBarrier, types.TypeMem, s.mem())
+ return nil
+ },
+ sys.ARM64)
+
+ /******** runtime/internal/sys ********/
+ addF("runtime/internal/sys", "Ctz32",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpCtz32, types.Types[types.TINT], args[0])
+ },
+ sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64)
+ addF("runtime/internal/sys", "Ctz64",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpCtz64, types.Types[types.TINT], args[0])
+ },
+ sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64)
+ addF("runtime/internal/sys", "Bswap32",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpBswap32, types.Types[types.TUINT32], args[0])
+ },
+ sys.AMD64, sys.ARM64, sys.ARM, sys.S390X)
+ addF("runtime/internal/sys", "Bswap64",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpBswap64, types.Types[types.TUINT64], args[0])
+ },
+ sys.AMD64, sys.ARM64, sys.ARM, sys.S390X)
+
+ /****** Prefetch ******/
+ makePrefetchFunc := func(op ssa.Op) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ s.vars[memVar] = s.newValue2(op, types.TypeMem, args[0], s.mem())
+ return nil
+ }
+ }
+
+ // Make Prefetch intrinsics for supported platforms
+ // On the unsupported platforms stub function will be eliminated
+ addF("runtime/internal/sys", "Prefetch", makePrefetchFunc(ssa.OpPrefetchCache),
+ sys.AMD64, sys.ARM64, sys.PPC64)
+ addF("runtime/internal/sys", "PrefetchStreamed", makePrefetchFunc(ssa.OpPrefetchCacheStreamed),
+ sys.AMD64, sys.ARM64, sys.PPC64)
+
+ /******** runtime/internal/atomic ********/
+ addF("runtime/internal/atomic", "Load",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ v := s.newValue2(ssa.OpAtomicLoad32, types.NewTuple(types.Types[types.TUINT32], types.TypeMem), args[0], s.mem())
+ s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+ return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT32], v)
+ },
+ sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
+ addF("runtime/internal/atomic", "Load8",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ v := s.newValue2(ssa.OpAtomicLoad8, types.NewTuple(types.Types[types.TUINT8], types.TypeMem), args[0], s.mem())
+ s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+ return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT8], v)
+ },
+ sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
+ addF("runtime/internal/atomic", "Load64",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ v := s.newValue2(ssa.OpAtomicLoad64, types.NewTuple(types.Types[types.TUINT64], types.TypeMem), args[0], s.mem())
+ s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+ return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT64], v)
+ },
+ sys.AMD64, sys.ARM64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
+ addF("runtime/internal/atomic", "LoadAcq",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ v := s.newValue2(ssa.OpAtomicLoadAcq32, types.NewTuple(types.Types[types.TUINT32], types.TypeMem), args[0], s.mem())
+ s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+ return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT32], v)
+ },
+ sys.PPC64, sys.S390X)
+ addF("runtime/internal/atomic", "LoadAcq64",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ v := s.newValue2(ssa.OpAtomicLoadAcq64, types.NewTuple(types.Types[types.TUINT64], types.TypeMem), args[0], s.mem())
+ s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+ return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT64], v)
+ },
+ sys.PPC64)
+ addF("runtime/internal/atomic", "Loadp",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ v := s.newValue2(ssa.OpAtomicLoadPtr, types.NewTuple(s.f.Config.Types.BytePtr, types.TypeMem), args[0], s.mem())
+ s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+ return s.newValue1(ssa.OpSelect0, s.f.Config.Types.BytePtr, v)
+ },
+ sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
+
+ addF("runtime/internal/atomic", "Store",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ s.vars[memVar] = s.newValue3(ssa.OpAtomicStore32, types.TypeMem, args[0], args[1], s.mem())
+ return nil
+ },
+ sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
+ addF("runtime/internal/atomic", "Store8",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ s.vars[memVar] = s.newValue3(ssa.OpAtomicStore8, types.TypeMem, args[0], args[1], s.mem())
+ return nil
+ },
+ sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
+ addF("runtime/internal/atomic", "Store64",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ s.vars[memVar] = s.newValue3(ssa.OpAtomicStore64, types.TypeMem, args[0], args[1], s.mem())
+ return nil
+ },
+ sys.AMD64, sys.ARM64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
+ addF("runtime/internal/atomic", "StorepNoWB",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ s.vars[memVar] = s.newValue3(ssa.OpAtomicStorePtrNoWB, types.TypeMem, args[0], args[1], s.mem())
+ return nil
+ },
+ sys.AMD64, sys.ARM64, sys.MIPS, sys.MIPS64, sys.RISCV64, sys.S390X)
+ addF("runtime/internal/atomic", "StoreRel",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ s.vars[memVar] = s.newValue3(ssa.OpAtomicStoreRel32, types.TypeMem, args[0], args[1], s.mem())
+ return nil
+ },
+ sys.PPC64, sys.S390X)
+ addF("runtime/internal/atomic", "StoreRel64",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ s.vars[memVar] = s.newValue3(ssa.OpAtomicStoreRel64, types.TypeMem, args[0], args[1], s.mem())
+ return nil
+ },
+ sys.PPC64)
+
+ addF("runtime/internal/atomic", "Xchg",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ v := s.newValue3(ssa.OpAtomicExchange32, types.NewTuple(types.Types[types.TUINT32], types.TypeMem), args[0], args[1], s.mem())
+ s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+ return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT32], v)
+ },
+ sys.AMD64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
+ addF("runtime/internal/atomic", "Xchg64",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ v := s.newValue3(ssa.OpAtomicExchange64, types.NewTuple(types.Types[types.TUINT64], types.TypeMem), args[0], args[1], s.mem())
+ s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+ return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT64], v)
+ },
+ sys.AMD64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
+
+ type atomicOpEmitter func(s *state, n *ir.CallExpr, args []*ssa.Value, op ssa.Op, typ types.Kind)
+
+ makeAtomicGuardedIntrinsicARM64 := func(op0, op1 ssa.Op, typ, rtyp types.Kind, emit atomicOpEmitter) intrinsicBuilder {
+
+ return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ // Target Atomic feature is identified by dynamic detection
+ addr := s.entryNewValue1A(ssa.OpAddr, types.Types[types.TBOOL].PtrTo(), ir.Syms.ARM64HasATOMICS, s.sb)
+ v := s.load(types.Types[types.TBOOL], addr)
+ b := s.endBlock()
+ b.Kind = ssa.BlockIf
+ b.SetControl(v)
+ bTrue := s.f.NewBlock(ssa.BlockPlain)
+ bFalse := s.f.NewBlock(ssa.BlockPlain)
+ bEnd := s.f.NewBlock(ssa.BlockPlain)
+ b.AddEdgeTo(bTrue)
+ b.AddEdgeTo(bFalse)
+ b.Likely = ssa.BranchLikely
+
+ // We have atomic instructions - use it directly.
+ s.startBlock(bTrue)
+ emit(s, n, args, op1, typ)
+ s.endBlock().AddEdgeTo(bEnd)
+
+ // Use original instruction sequence.
+ s.startBlock(bFalse)
+ emit(s, n, args, op0, typ)
+ s.endBlock().AddEdgeTo(bEnd)
+
+ // Merge results.
+ s.startBlock(bEnd)
+ if rtyp == types.TNIL {
+ return nil
+ } else {
+ return s.variable(n, types.Types[rtyp])
+ }
+ }
+ }
+
+ atomicXchgXaddEmitterARM64 := func(s *state, n *ir.CallExpr, args []*ssa.Value, op ssa.Op, typ types.Kind) {
+ v := s.newValue3(op, types.NewTuple(types.Types[typ], types.TypeMem), args[0], args[1], s.mem())
+ s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+ s.vars[n] = s.newValue1(ssa.OpSelect0, types.Types[typ], v)
+ }
+ addF("runtime/internal/atomic", "Xchg",
+ makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicExchange32, ssa.OpAtomicExchange32Variant, types.TUINT32, types.TUINT32, atomicXchgXaddEmitterARM64),
+ sys.ARM64)
+ addF("runtime/internal/atomic", "Xchg64",
+ makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicExchange64, ssa.OpAtomicExchange64Variant, types.TUINT64, types.TUINT64, atomicXchgXaddEmitterARM64),
+ sys.ARM64)
+
+ addF("runtime/internal/atomic", "Xadd",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ v := s.newValue3(ssa.OpAtomicAdd32, types.NewTuple(types.Types[types.TUINT32], types.TypeMem), args[0], args[1], s.mem())
+ s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+ return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT32], v)
+ },
+ sys.AMD64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
+ addF("runtime/internal/atomic", "Xadd64",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ v := s.newValue3(ssa.OpAtomicAdd64, types.NewTuple(types.Types[types.TUINT64], types.TypeMem), args[0], args[1], s.mem())
+ s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+ return s.newValue1(ssa.OpSelect0, types.Types[types.TUINT64], v)
+ },
+ sys.AMD64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
+
+ addF("runtime/internal/atomic", "Xadd",
+ makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicAdd32, ssa.OpAtomicAdd32Variant, types.TUINT32, types.TUINT32, atomicXchgXaddEmitterARM64),
+ sys.ARM64)
+ addF("runtime/internal/atomic", "Xadd64",
+ makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicAdd64, ssa.OpAtomicAdd64Variant, types.TUINT64, types.TUINT64, atomicXchgXaddEmitterARM64),
+ sys.ARM64)
+
+ addF("runtime/internal/atomic", "Cas",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ v := s.newValue4(ssa.OpAtomicCompareAndSwap32, types.NewTuple(types.Types[types.TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem())
+ s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+ return s.newValue1(ssa.OpSelect0, types.Types[types.TBOOL], v)
+ },
+ sys.AMD64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
+ addF("runtime/internal/atomic", "Cas64",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ v := s.newValue4(ssa.OpAtomicCompareAndSwap64, types.NewTuple(types.Types[types.TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem())
+ s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+ return s.newValue1(ssa.OpSelect0, types.Types[types.TBOOL], v)
+ },
+ sys.AMD64, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X)
+ addF("runtime/internal/atomic", "CasRel",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ v := s.newValue4(ssa.OpAtomicCompareAndSwap32, types.NewTuple(types.Types[types.TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem())
+ s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+ return s.newValue1(ssa.OpSelect0, types.Types[types.TBOOL], v)
+ },
+ sys.PPC64)
+
+ atomicCasEmitterARM64 := func(s *state, n *ir.CallExpr, args []*ssa.Value, op ssa.Op, typ types.Kind) {
+ v := s.newValue4(op, types.NewTuple(types.Types[types.TBOOL], types.TypeMem), args[0], args[1], args[2], s.mem())
+ s.vars[memVar] = s.newValue1(ssa.OpSelect1, types.TypeMem, v)
+ s.vars[n] = s.newValue1(ssa.OpSelect0, types.Types[typ], v)
+ }
+
+ addF("runtime/internal/atomic", "Cas",
+ makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicCompareAndSwap32, ssa.OpAtomicCompareAndSwap32Variant, types.TUINT32, types.TBOOL, atomicCasEmitterARM64),
+ sys.ARM64)
+ addF("runtime/internal/atomic", "Cas64",
+ makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicCompareAndSwap64, ssa.OpAtomicCompareAndSwap64Variant, types.TUINT64, types.TBOOL, atomicCasEmitterARM64),
+ sys.ARM64)
+
+ addF("runtime/internal/atomic", "And8",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ s.vars[memVar] = s.newValue3(ssa.OpAtomicAnd8, types.TypeMem, args[0], args[1], s.mem())
+ return nil
+ },
+ sys.AMD64, sys.MIPS, sys.PPC64, sys.RISCV64, sys.S390X)
+ addF("runtime/internal/atomic", "And",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ s.vars[memVar] = s.newValue3(ssa.OpAtomicAnd32, types.TypeMem, args[0], args[1], s.mem())
+ return nil
+ },
+ sys.AMD64, sys.MIPS, sys.PPC64, sys.RISCV64, sys.S390X)
+ addF("runtime/internal/atomic", "Or8",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ s.vars[memVar] = s.newValue3(ssa.OpAtomicOr8, types.TypeMem, args[0], args[1], s.mem())
+ return nil
+ },
+ sys.AMD64, sys.ARM64, sys.MIPS, sys.PPC64, sys.RISCV64, sys.S390X)
+ addF("runtime/internal/atomic", "Or",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ s.vars[memVar] = s.newValue3(ssa.OpAtomicOr32, types.TypeMem, args[0], args[1], s.mem())
+ return nil
+ },
+ sys.AMD64, sys.MIPS, sys.PPC64, sys.RISCV64, sys.S390X)
+
+ atomicAndOrEmitterARM64 := func(s *state, n *ir.CallExpr, args []*ssa.Value, op ssa.Op, typ types.Kind) {
+ s.vars[memVar] = s.newValue3(op, types.TypeMem, args[0], args[1], s.mem())
+ }
+
+ addF("runtime/internal/atomic", "And8",
+ makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicAnd8, ssa.OpAtomicAnd8Variant, types.TNIL, types.TNIL, atomicAndOrEmitterARM64),
+ sys.ARM64)
+ addF("runtime/internal/atomic", "And",
+ makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicAnd32, ssa.OpAtomicAnd32Variant, types.TNIL, types.TNIL, atomicAndOrEmitterARM64),
+ sys.ARM64)
+ addF("runtime/internal/atomic", "Or8",
+ makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicOr8, ssa.OpAtomicOr8Variant, types.TNIL, types.TNIL, atomicAndOrEmitterARM64),
+ sys.ARM64)
+ addF("runtime/internal/atomic", "Or",
+ makeAtomicGuardedIntrinsicARM64(ssa.OpAtomicOr32, ssa.OpAtomicOr32Variant, types.TNIL, types.TNIL, atomicAndOrEmitterARM64),
+ sys.ARM64)
+
+ // Aliases for atomic load operations
+ alias("runtime/internal/atomic", "Loadint32", "runtime/internal/atomic", "Load", all...)
+ alias("runtime/internal/atomic", "Loadint64", "runtime/internal/atomic", "Load64", all...)
+ alias("runtime/internal/atomic", "Loaduintptr", "runtime/internal/atomic", "Load", p4...)
+ alias("runtime/internal/atomic", "Loaduintptr", "runtime/internal/atomic", "Load64", p8...)
+ alias("runtime/internal/atomic", "Loaduint", "runtime/internal/atomic", "Load", p4...)
+ alias("runtime/internal/atomic", "Loaduint", "runtime/internal/atomic", "Load64", p8...)
+ alias("runtime/internal/atomic", "LoadAcq", "runtime/internal/atomic", "Load", lwatomics...)
+ alias("runtime/internal/atomic", "LoadAcq64", "runtime/internal/atomic", "Load64", lwatomics...)
+ alias("runtime/internal/atomic", "LoadAcquintptr", "runtime/internal/atomic", "LoadAcq", p4...)
+ alias("sync", "runtime_LoadAcquintptr", "runtime/internal/atomic", "LoadAcq", p4...) // linknamed
+ alias("runtime/internal/atomic", "LoadAcquintptr", "runtime/internal/atomic", "LoadAcq64", p8...)
+ alias("sync", "runtime_LoadAcquintptr", "runtime/internal/atomic", "LoadAcq64", p8...) // linknamed
+
+ // Aliases for atomic store operations
+ alias("runtime/internal/atomic", "Storeint32", "runtime/internal/atomic", "Store", all...)
+ alias("runtime/internal/atomic", "Storeint64", "runtime/internal/atomic", "Store64", all...)
+ alias("runtime/internal/atomic", "Storeuintptr", "runtime/internal/atomic", "Store", p4...)
+ alias("runtime/internal/atomic", "Storeuintptr", "runtime/internal/atomic", "Store64", p8...)
+ alias("runtime/internal/atomic", "StoreRel", "runtime/internal/atomic", "Store", lwatomics...)
+ alias("runtime/internal/atomic", "StoreRel64", "runtime/internal/atomic", "Store64", lwatomics...)
+ alias("runtime/internal/atomic", "StoreReluintptr", "runtime/internal/atomic", "StoreRel", p4...)
+ alias("sync", "runtime_StoreReluintptr", "runtime/internal/atomic", "StoreRel", p4...) // linknamed
+ alias("runtime/internal/atomic", "StoreReluintptr", "runtime/internal/atomic", "StoreRel64", p8...)
+ alias("sync", "runtime_StoreReluintptr", "runtime/internal/atomic", "StoreRel64", p8...) // linknamed
+
+ // Aliases for atomic swap operations
+ alias("runtime/internal/atomic", "Xchgint32", "runtime/internal/atomic", "Xchg", all...)
+ alias("runtime/internal/atomic", "Xchgint64", "runtime/internal/atomic", "Xchg64", all...)
+ alias("runtime/internal/atomic", "Xchguintptr", "runtime/internal/atomic", "Xchg", p4...)
+ alias("runtime/internal/atomic", "Xchguintptr", "runtime/internal/atomic", "Xchg64", p8...)
+
+ // Aliases for atomic add operations
+ alias("runtime/internal/atomic", "Xaddint32", "runtime/internal/atomic", "Xadd", all...)
+ alias("runtime/internal/atomic", "Xaddint64", "runtime/internal/atomic", "Xadd64", all...)
+ alias("runtime/internal/atomic", "Xadduintptr", "runtime/internal/atomic", "Xadd", p4...)
+ alias("runtime/internal/atomic", "Xadduintptr", "runtime/internal/atomic", "Xadd64", p8...)
+
+ // Aliases for atomic CAS operations
+ alias("runtime/internal/atomic", "Casint32", "runtime/internal/atomic", "Cas", all...)
+ alias("runtime/internal/atomic", "Casint64", "runtime/internal/atomic", "Cas64", all...)
+ alias("runtime/internal/atomic", "Casuintptr", "runtime/internal/atomic", "Cas", p4...)
+ alias("runtime/internal/atomic", "Casuintptr", "runtime/internal/atomic", "Cas64", p8...)
+ alias("runtime/internal/atomic", "Casp1", "runtime/internal/atomic", "Cas", p4...)
+ alias("runtime/internal/atomic", "Casp1", "runtime/internal/atomic", "Cas64", p8...)
+ alias("runtime/internal/atomic", "CasRel", "runtime/internal/atomic", "Cas", lwatomics...)
+
+ /******** math ********/
+ addF("math", "Sqrt",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpSqrt, types.Types[types.TFLOAT64], args[0])
+ },
+ sys.I386, sys.AMD64, sys.ARM, sys.ARM64, sys.MIPS, sys.MIPS64, sys.PPC64, sys.RISCV64, sys.S390X, sys.Wasm)
+ addF("math", "Trunc",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpTrunc, types.Types[types.TFLOAT64], args[0])
+ },
+ sys.ARM64, sys.PPC64, sys.S390X, sys.Wasm)
+ addF("math", "Ceil",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpCeil, types.Types[types.TFLOAT64], args[0])
+ },
+ sys.ARM64, sys.PPC64, sys.S390X, sys.Wasm)
+ addF("math", "Floor",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpFloor, types.Types[types.TFLOAT64], args[0])
+ },
+ sys.ARM64, sys.PPC64, sys.S390X, sys.Wasm)
+ addF("math", "Round",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpRound, types.Types[types.TFLOAT64], args[0])
+ },
+ sys.ARM64, sys.PPC64, sys.S390X)
+ addF("math", "RoundToEven",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpRoundToEven, types.Types[types.TFLOAT64], args[0])
+ },
+ sys.ARM64, sys.S390X, sys.Wasm)
+ addF("math", "Abs",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpAbs, types.Types[types.TFLOAT64], args[0])
+ },
+ sys.ARM64, sys.ARM, sys.PPC64, sys.RISCV64, sys.Wasm)
+ addF("math", "Copysign",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue2(ssa.OpCopysign, types.Types[types.TFLOAT64], args[0], args[1])
+ },
+ sys.PPC64, sys.RISCV64, sys.Wasm)
+ addF("math", "FMA",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue3(ssa.OpFMA, types.Types[types.TFLOAT64], args[0], args[1], args[2])
+ },
+ sys.ARM64, sys.PPC64, sys.RISCV64, sys.S390X)
+ addF("math", "FMA",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ if !s.config.UseFMA {
+ s.vars[n] = s.callResult(n, callNormal) // types.Types[TFLOAT64]
+ return s.variable(n, types.Types[types.TFLOAT64])
+ }
+
+ if buildcfg.GOAMD64 >= 3 {
+ return s.newValue3(ssa.OpFMA, types.Types[types.TFLOAT64], args[0], args[1], args[2])
+ }
+
+ v := s.entryNewValue0A(ssa.OpHasCPUFeature, types.Types[types.TBOOL], ir.Syms.X86HasFMA)
+ b := s.endBlock()
+ b.Kind = ssa.BlockIf
+ b.SetControl(v)
+ bTrue := s.f.NewBlock(ssa.BlockPlain)
+ bFalse := s.f.NewBlock(ssa.BlockPlain)
+ bEnd := s.f.NewBlock(ssa.BlockPlain)
+ b.AddEdgeTo(bTrue)
+ b.AddEdgeTo(bFalse)
+ b.Likely = ssa.BranchLikely // >= haswell cpus are common
+
+ // We have the intrinsic - use it directly.
+ s.startBlock(bTrue)
+ s.vars[n] = s.newValue3(ssa.OpFMA, types.Types[types.TFLOAT64], args[0], args[1], args[2])
+ s.endBlock().AddEdgeTo(bEnd)
+
+ // Call the pure Go version.
+ s.startBlock(bFalse)
+ s.vars[n] = s.callResult(n, callNormal) // types.Types[TFLOAT64]
+ s.endBlock().AddEdgeTo(bEnd)
+
+ // Merge results.
+ s.startBlock(bEnd)
+ return s.variable(n, types.Types[types.TFLOAT64])
+ },
+ sys.AMD64)
+ addF("math", "FMA",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ if !s.config.UseFMA {
+ s.vars[n] = s.callResult(n, callNormal) // types.Types[TFLOAT64]
+ return s.variable(n, types.Types[types.TFLOAT64])
+ }
+ addr := s.entryNewValue1A(ssa.OpAddr, types.Types[types.TBOOL].PtrTo(), ir.Syms.ARMHasVFPv4, s.sb)
+ v := s.load(types.Types[types.TBOOL], addr)
+ b := s.endBlock()
+ b.Kind = ssa.BlockIf
+ b.SetControl(v)
+ bTrue := s.f.NewBlock(ssa.BlockPlain)
+ bFalse := s.f.NewBlock(ssa.BlockPlain)
+ bEnd := s.f.NewBlock(ssa.BlockPlain)
+ b.AddEdgeTo(bTrue)
+ b.AddEdgeTo(bFalse)
+ b.Likely = ssa.BranchLikely
+
+ // We have the intrinsic - use it directly.
+ s.startBlock(bTrue)
+ s.vars[n] = s.newValue3(ssa.OpFMA, types.Types[types.TFLOAT64], args[0], args[1], args[2])
+ s.endBlock().AddEdgeTo(bEnd)
+
+ // Call the pure Go version.
+ s.startBlock(bFalse)
+ s.vars[n] = s.callResult(n, callNormal) // types.Types[TFLOAT64]
+ s.endBlock().AddEdgeTo(bEnd)
+
+ // Merge results.
+ s.startBlock(bEnd)
+ return s.variable(n, types.Types[types.TFLOAT64])
+ },
+ sys.ARM)
+
+ makeRoundAMD64 := func(op ssa.Op) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ if buildcfg.GOAMD64 >= 2 {
+ return s.newValue1(op, types.Types[types.TFLOAT64], args[0])
+ }
+
+ v := s.entryNewValue0A(ssa.OpHasCPUFeature, types.Types[types.TBOOL], ir.Syms.X86HasSSE41)
+ b := s.endBlock()
+ b.Kind = ssa.BlockIf
+ b.SetControl(v)
+ bTrue := s.f.NewBlock(ssa.BlockPlain)
+ bFalse := s.f.NewBlock(ssa.BlockPlain)
+ bEnd := s.f.NewBlock(ssa.BlockPlain)
+ b.AddEdgeTo(bTrue)
+ b.AddEdgeTo(bFalse)
+ b.Likely = ssa.BranchLikely // most machines have sse4.1 nowadays
+
+ // We have the intrinsic - use it directly.
+ s.startBlock(bTrue)
+ s.vars[n] = s.newValue1(op, types.Types[types.TFLOAT64], args[0])
+ s.endBlock().AddEdgeTo(bEnd)
+
+ // Call the pure Go version.
+ s.startBlock(bFalse)
+ s.vars[n] = s.callResult(n, callNormal) // types.Types[TFLOAT64]
+ s.endBlock().AddEdgeTo(bEnd)
+
+ // Merge results.
+ s.startBlock(bEnd)
+ return s.variable(n, types.Types[types.TFLOAT64])
+ }
+ }
+ addF("math", "RoundToEven",
+ makeRoundAMD64(ssa.OpRoundToEven),
+ sys.AMD64)
+ addF("math", "Floor",
+ makeRoundAMD64(ssa.OpFloor),
+ sys.AMD64)
+ addF("math", "Ceil",
+ makeRoundAMD64(ssa.OpCeil),
+ sys.AMD64)
+ addF("math", "Trunc",
+ makeRoundAMD64(ssa.OpTrunc),
+ sys.AMD64)
+
+ /******** math/bits ********/
+ addF("math/bits", "TrailingZeros64",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpCtz64, types.Types[types.TINT], args[0])
+ },
+ sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
+ addF("math/bits", "TrailingZeros32",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpCtz32, types.Types[types.TINT], args[0])
+ },
+ sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
+ addF("math/bits", "TrailingZeros16",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ x := s.newValue1(ssa.OpZeroExt16to32, types.Types[types.TUINT32], args[0])
+ c := s.constInt32(types.Types[types.TUINT32], 1<<16)
+ y := s.newValue2(ssa.OpOr32, types.Types[types.TUINT32], x, c)
+ return s.newValue1(ssa.OpCtz32, types.Types[types.TINT], y)
+ },
+ sys.MIPS)
+ addF("math/bits", "TrailingZeros16",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpCtz16, types.Types[types.TINT], args[0])
+ },
+ sys.AMD64, sys.I386, sys.ARM, sys.ARM64, sys.Wasm)
+ addF("math/bits", "TrailingZeros16",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ x := s.newValue1(ssa.OpZeroExt16to64, types.Types[types.TUINT64], args[0])
+ c := s.constInt64(types.Types[types.TUINT64], 1<<16)
+ y := s.newValue2(ssa.OpOr64, types.Types[types.TUINT64], x, c)
+ return s.newValue1(ssa.OpCtz64, types.Types[types.TINT], y)
+ },
+ sys.S390X, sys.PPC64)
+ addF("math/bits", "TrailingZeros8",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ x := s.newValue1(ssa.OpZeroExt8to32, types.Types[types.TUINT32], args[0])
+ c := s.constInt32(types.Types[types.TUINT32], 1<<8)
+ y := s.newValue2(ssa.OpOr32, types.Types[types.TUINT32], x, c)
+ return s.newValue1(ssa.OpCtz32, types.Types[types.TINT], y)
+ },
+ sys.MIPS)
+ addF("math/bits", "TrailingZeros8",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpCtz8, types.Types[types.TINT], args[0])
+ },
+ sys.AMD64, sys.ARM, sys.ARM64, sys.Wasm)
+ addF("math/bits", "TrailingZeros8",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ x := s.newValue1(ssa.OpZeroExt8to64, types.Types[types.TUINT64], args[0])
+ c := s.constInt64(types.Types[types.TUINT64], 1<<8)
+ y := s.newValue2(ssa.OpOr64, types.Types[types.TUINT64], x, c)
+ return s.newValue1(ssa.OpCtz64, types.Types[types.TINT], y)
+ },
+ sys.S390X)
+ alias("math/bits", "ReverseBytes64", "runtime/internal/sys", "Bswap64", all...)
+ alias("math/bits", "ReverseBytes32", "runtime/internal/sys", "Bswap32", all...)
+ // ReverseBytes inlines correctly, no need to intrinsify it.
+ // ReverseBytes16 lowers to a rotate, no need for anything special here.
+ addF("math/bits", "Len64",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpBitLen64, types.Types[types.TINT], args[0])
+ },
+ sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
+ addF("math/bits", "Len32",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpBitLen32, types.Types[types.TINT], args[0])
+ },
+ sys.AMD64, sys.ARM64, sys.PPC64)
+ addF("math/bits", "Len32",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ if s.config.PtrSize == 4 {
+ return s.newValue1(ssa.OpBitLen32, types.Types[types.TINT], args[0])
+ }
+ x := s.newValue1(ssa.OpZeroExt32to64, types.Types[types.TUINT64], args[0])
+ return s.newValue1(ssa.OpBitLen64, types.Types[types.TINT], x)
+ },
+ sys.ARM, sys.S390X, sys.MIPS, sys.Wasm)
+ addF("math/bits", "Len16",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ if s.config.PtrSize == 4 {
+ x := s.newValue1(ssa.OpZeroExt16to32, types.Types[types.TUINT32], args[0])
+ return s.newValue1(ssa.OpBitLen32, types.Types[types.TINT], x)
+ }
+ x := s.newValue1(ssa.OpZeroExt16to64, types.Types[types.TUINT64], args[0])
+ return s.newValue1(ssa.OpBitLen64, types.Types[types.TINT], x)
+ },
+ sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
+ addF("math/bits", "Len16",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpBitLen16, types.Types[types.TINT], args[0])
+ },
+ sys.AMD64)
+ addF("math/bits", "Len8",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ if s.config.PtrSize == 4 {
+ x := s.newValue1(ssa.OpZeroExt8to32, types.Types[types.TUINT32], args[0])
+ return s.newValue1(ssa.OpBitLen32, types.Types[types.TINT], x)
+ }
+ x := s.newValue1(ssa.OpZeroExt8to64, types.Types[types.TUINT64], args[0])
+ return s.newValue1(ssa.OpBitLen64, types.Types[types.TINT], x)
+ },
+ sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
+ addF("math/bits", "Len8",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpBitLen8, types.Types[types.TINT], args[0])
+ },
+ sys.AMD64)
+ addF("math/bits", "Len",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ if s.config.PtrSize == 4 {
+ return s.newValue1(ssa.OpBitLen32, types.Types[types.TINT], args[0])
+ }
+ return s.newValue1(ssa.OpBitLen64, types.Types[types.TINT], args[0])
+ },
+ sys.AMD64, sys.ARM64, sys.ARM, sys.S390X, sys.MIPS, sys.PPC64, sys.Wasm)
+ // LeadingZeros is handled because it trivially calls Len.
+ addF("math/bits", "Reverse64",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpBitRev64, types.Types[types.TINT], args[0])
+ },
+ sys.ARM64)
+ addF("math/bits", "Reverse32",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpBitRev32, types.Types[types.TINT], args[0])
+ },
+ sys.ARM64)
+ addF("math/bits", "Reverse16",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpBitRev16, types.Types[types.TINT], args[0])
+ },
+ sys.ARM64)
+ addF("math/bits", "Reverse8",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpBitRev8, types.Types[types.TINT], args[0])
+ },
+ sys.ARM64)
+ addF("math/bits", "Reverse",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ if s.config.PtrSize == 4 {
+ return s.newValue1(ssa.OpBitRev32, types.Types[types.TINT], args[0])
+ }
+ return s.newValue1(ssa.OpBitRev64, types.Types[types.TINT], args[0])
+ },
+ sys.ARM64)
+ addF("math/bits", "RotateLeft8",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue2(ssa.OpRotateLeft8, types.Types[types.TUINT8], args[0], args[1])
+ },
+ sys.AMD64)
+ addF("math/bits", "RotateLeft16",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue2(ssa.OpRotateLeft16, types.Types[types.TUINT16], args[0], args[1])
+ },
+ sys.AMD64)
+ addF("math/bits", "RotateLeft32",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue2(ssa.OpRotateLeft32, types.Types[types.TUINT32], args[0], args[1])
+ },
+ sys.AMD64, sys.ARM, sys.ARM64, sys.S390X, sys.PPC64, sys.Wasm)
+ addF("math/bits", "RotateLeft64",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue2(ssa.OpRotateLeft64, types.Types[types.TUINT64], args[0], args[1])
+ },
+ sys.AMD64, sys.ARM64, sys.S390X, sys.PPC64, sys.Wasm)
+ alias("math/bits", "RotateLeft", "math/bits", "RotateLeft64", p8...)
+
+ makeOnesCountAMD64 := func(op ssa.Op) func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ if buildcfg.GOAMD64 >= 2 {
+ return s.newValue1(op, types.Types[types.TINT], args[0])
+ }
+
+ v := s.entryNewValue0A(ssa.OpHasCPUFeature, types.Types[types.TBOOL], ir.Syms.X86HasPOPCNT)
+ b := s.endBlock()
+ b.Kind = ssa.BlockIf
+ b.SetControl(v)
+ bTrue := s.f.NewBlock(ssa.BlockPlain)
+ bFalse := s.f.NewBlock(ssa.BlockPlain)
+ bEnd := s.f.NewBlock(ssa.BlockPlain)
+ b.AddEdgeTo(bTrue)
+ b.AddEdgeTo(bFalse)
+ b.Likely = ssa.BranchLikely // most machines have popcnt nowadays
+
+ // We have the intrinsic - use it directly.
+ s.startBlock(bTrue)
+ s.vars[n] = s.newValue1(op, types.Types[types.TINT], args[0])
+ s.endBlock().AddEdgeTo(bEnd)
+
+ // Call the pure Go version.
+ s.startBlock(bFalse)
+ s.vars[n] = s.callResult(n, callNormal) // types.Types[TINT]
+ s.endBlock().AddEdgeTo(bEnd)
+
+ // Merge results.
+ s.startBlock(bEnd)
+ return s.variable(n, types.Types[types.TINT])
+ }
+ }
+ addF("math/bits", "OnesCount64",
+ makeOnesCountAMD64(ssa.OpPopCount64),
+ sys.AMD64)
+ addF("math/bits", "OnesCount64",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpPopCount64, types.Types[types.TINT], args[0])
+ },
+ sys.PPC64, sys.ARM64, sys.S390X, sys.Wasm)
+ addF("math/bits", "OnesCount32",
+ makeOnesCountAMD64(ssa.OpPopCount32),
+ sys.AMD64)
+ addF("math/bits", "OnesCount32",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpPopCount32, types.Types[types.TINT], args[0])
+ },
+ sys.PPC64, sys.ARM64, sys.S390X, sys.Wasm)
+ addF("math/bits", "OnesCount16",
+ makeOnesCountAMD64(ssa.OpPopCount16),
+ sys.AMD64)
+ addF("math/bits", "OnesCount16",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpPopCount16, types.Types[types.TINT], args[0])
+ },
+ sys.ARM64, sys.S390X, sys.PPC64, sys.Wasm)
+ addF("math/bits", "OnesCount8",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue1(ssa.OpPopCount8, types.Types[types.TINT], args[0])
+ },
+ sys.S390X, sys.PPC64, sys.Wasm)
+ addF("math/bits", "OnesCount",
+ makeOnesCountAMD64(ssa.OpPopCount64),
+ sys.AMD64)
+ addF("math/bits", "Mul64",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue2(ssa.OpMul64uhilo, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1])
+ },
+ sys.AMD64, sys.ARM64, sys.PPC64, sys.S390X, sys.MIPS64, sys.RISCV64)
+ alias("math/bits", "Mul", "math/bits", "Mul64", sys.ArchAMD64, sys.ArchARM64, sys.ArchPPC64, sys.ArchPPC64LE, sys.ArchS390X, sys.ArchMIPS64, sys.ArchMIPS64LE, sys.ArchRISCV64)
+ alias("runtime/internal/math", "Mul64", "math/bits", "Mul64", sys.ArchAMD64, sys.ArchARM64, sys.ArchPPC64, sys.ArchPPC64LE, sys.ArchS390X, sys.ArchMIPS64, sys.ArchMIPS64LE, sys.ArchRISCV64)
+ addF("math/bits", "Add64",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue3(ssa.OpAdd64carry, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1], args[2])
+ },
+ sys.AMD64, sys.ARM64, sys.PPC64, sys.S390X)
+ alias("math/bits", "Add", "math/bits", "Add64", sys.ArchAMD64, sys.ArchARM64, sys.ArchPPC64, sys.ArchPPC64LE, sys.ArchS390X)
+ addF("math/bits", "Sub64",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue3(ssa.OpSub64borrow, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1], args[2])
+ },
+ sys.AMD64, sys.ARM64, sys.S390X)
+ alias("math/bits", "Sub", "math/bits", "Sub64", sys.ArchAMD64, sys.ArchARM64, sys.ArchS390X)
+ addF("math/bits", "Div64",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ // check for divide-by-zero/overflow and panic with appropriate message
+ cmpZero := s.newValue2(s.ssaOp(ir.ONE, types.Types[types.TUINT64]), types.Types[types.TBOOL], args[2], s.zeroVal(types.Types[types.TUINT64]))
+ s.check(cmpZero, ir.Syms.Panicdivide)
+ cmpOverflow := s.newValue2(s.ssaOp(ir.OLT, types.Types[types.TUINT64]), types.Types[types.TBOOL], args[0], args[2])
+ s.check(cmpOverflow, ir.Syms.Panicoverflow)
+ return s.newValue3(ssa.OpDiv128u, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1], args[2])
+ },
+ sys.AMD64)
+ alias("math/bits", "Div", "math/bits", "Div64", sys.ArchAMD64)
+
+ alias("runtime/internal/sys", "Ctz8", "math/bits", "TrailingZeros8", all...)
+ alias("runtime/internal/sys", "TrailingZeros8", "math/bits", "TrailingZeros8", all...)
+ alias("runtime/internal/sys", "TrailingZeros64", "math/bits", "TrailingZeros64", all...)
+ alias("runtime/internal/sys", "Len8", "math/bits", "Len8", all...)
+ alias("runtime/internal/sys", "Len64", "math/bits", "Len64", all...)
+ alias("runtime/internal/sys", "OnesCount64", "math/bits", "OnesCount64", all...)
+
+ /******** sync/atomic ********/
+
+ // Note: these are disabled by flag_race in findIntrinsic below.
+ alias("sync/atomic", "LoadInt32", "runtime/internal/atomic", "Load", all...)
+ alias("sync/atomic", "LoadInt64", "runtime/internal/atomic", "Load64", all...)
+ alias("sync/atomic", "LoadPointer", "runtime/internal/atomic", "Loadp", all...)
+ alias("sync/atomic", "LoadUint32", "runtime/internal/atomic", "Load", all...)
+ alias("sync/atomic", "LoadUint64", "runtime/internal/atomic", "Load64", all...)
+ alias("sync/atomic", "LoadUintptr", "runtime/internal/atomic", "Load", p4...)
+ alias("sync/atomic", "LoadUintptr", "runtime/internal/atomic", "Load64", p8...)
+
+ alias("sync/atomic", "StoreInt32", "runtime/internal/atomic", "Store", all...)
+ alias("sync/atomic", "StoreInt64", "runtime/internal/atomic", "Store64", all...)
+ // Note: not StorePointer, that needs a write barrier. Same below for {CompareAnd}Swap.
+ alias("sync/atomic", "StoreUint32", "runtime/internal/atomic", "Store", all...)
+ alias("sync/atomic", "StoreUint64", "runtime/internal/atomic", "Store64", all...)
+ alias("sync/atomic", "StoreUintptr", "runtime/internal/atomic", "Store", p4...)
+ alias("sync/atomic", "StoreUintptr", "runtime/internal/atomic", "Store64", p8...)
+
+ alias("sync/atomic", "SwapInt32", "runtime/internal/atomic", "Xchg", all...)
+ alias("sync/atomic", "SwapInt64", "runtime/internal/atomic", "Xchg64", all...)
+ alias("sync/atomic", "SwapUint32", "runtime/internal/atomic", "Xchg", all...)
+ alias("sync/atomic", "SwapUint64", "runtime/internal/atomic", "Xchg64", all...)
+ alias("sync/atomic", "SwapUintptr", "runtime/internal/atomic", "Xchg", p4...)
+ alias("sync/atomic", "SwapUintptr", "runtime/internal/atomic", "Xchg64", p8...)
+
+ alias("sync/atomic", "CompareAndSwapInt32", "runtime/internal/atomic", "Cas", all...)
+ alias("sync/atomic", "CompareAndSwapInt64", "runtime/internal/atomic", "Cas64", all...)
+ alias("sync/atomic", "CompareAndSwapUint32", "runtime/internal/atomic", "Cas", all...)
+ alias("sync/atomic", "CompareAndSwapUint64", "runtime/internal/atomic", "Cas64", all...)
+ alias("sync/atomic", "CompareAndSwapUintptr", "runtime/internal/atomic", "Cas", p4...)
+ alias("sync/atomic", "CompareAndSwapUintptr", "runtime/internal/atomic", "Cas64", p8...)
+
+ alias("sync/atomic", "AddInt32", "runtime/internal/atomic", "Xadd", all...)
+ alias("sync/atomic", "AddInt64", "runtime/internal/atomic", "Xadd64", all...)
+ alias("sync/atomic", "AddUint32", "runtime/internal/atomic", "Xadd", all...)
+ alias("sync/atomic", "AddUint64", "runtime/internal/atomic", "Xadd64", all...)
+ alias("sync/atomic", "AddUintptr", "runtime/internal/atomic", "Xadd", p4...)
+ alias("sync/atomic", "AddUintptr", "runtime/internal/atomic", "Xadd64", p8...)
+
+ /******** math/big ********/
+ add("math/big", "mulWW",
+ func(s *state, n *ir.CallExpr, args []*ssa.Value) *ssa.Value {
+ return s.newValue2(ssa.OpMul64uhilo, types.NewTuple(types.Types[types.TUINT64], types.Types[types.TUINT64]), args[0], args[1])
+ },
+ sys.ArchAMD64, sys.ArchARM64, sys.ArchPPC64LE, sys.ArchPPC64, sys.ArchS390X)
+}
+
+// findIntrinsic returns a function which builds the SSA equivalent of the
+// function identified by the symbol sym. If sym is not an intrinsic call, returns nil.
+func findIntrinsic(sym *types.Sym) intrinsicBuilder {
+ if sym == nil || sym.Pkg == nil {
+ return nil
+ }
+ pkg := sym.Pkg.Path
+ if sym.Pkg == types.LocalPkg {
+ pkg = base.Ctxt.Pkgpath
+ }
+ if sym.Pkg == ir.Pkgs.Runtime {
+ pkg = "runtime"
+ }
+ if base.Flag.Race && pkg == "sync/atomic" {
+ // The race detector needs to be able to intercept these calls.
+ // We can't intrinsify them.
+ return nil
+ }
+ // Skip intrinsifying math functions (which may contain hard-float
+ // instructions) when soft-float
+ if Arch.SoftFloat && pkg == "math" {
+ return nil
+ }
+
+ fn := sym.Name
+ if ssa.IntrinsicsDisable {
+ if pkg == "runtime" && (fn == "getcallerpc" || fn == "getcallersp" || fn == "getclosureptr") {
+ // These runtime functions don't have definitions, must be intrinsics.
+ } else {
+ return nil
+ }
+ }
+ return intrinsics[intrinsicKey{Arch.LinkArch.Arch, pkg, fn}]
+}
+
+func IsIntrinsicCall(n *ir.CallExpr) bool {
+ if n == nil {
+ return false
+ }
+ name, ok := n.X.(*ir.Name)
+ if !ok {
+ return false
+ }
+ return findIntrinsic(name.Sym()) != nil
+}
+
+// intrinsicCall converts a call to a recognized intrinsic function into the intrinsic SSA operation.
+func (s *state) intrinsicCall(n *ir.CallExpr) *ssa.Value {
+ v := findIntrinsic(n.X.Sym())(s, n, s.intrinsicArgs(n))
+ if ssa.IntrinsicsDebug > 0 {
+ x := v
+ if x == nil {
+ x = s.mem()
+ }
+ if x.Op == ssa.OpSelect0 || x.Op == ssa.OpSelect1 {
+ x = x.Args[0]
+ }
+ base.WarnfAt(n.Pos(), "intrinsic substitution for %v with %s", n.X.Sym().Name, x.LongString())
+ }
+ return v
+}
+
+// intrinsicArgs extracts args from n, evaluates them to SSA values, and returns them.
+func (s *state) intrinsicArgs(n *ir.CallExpr) []*ssa.Value {
+ args := make([]*ssa.Value, len(n.Args))
+ for i, n := range n.Args {
+ args[i] = s.expr(n)
+ }
+ return args
+}
+
+// openDeferRecord adds code to evaluate and store the function for an open-code defer
+// call, and records info about the defer, so we can generate proper code on the
+// exit paths. n is the sub-node of the defer node that is the actual function
+// call. We will also record funcdata information on where the function is stored
+// (as well as the deferBits variable), and this will enable us to run the proper
+// defer calls during panics.
+func (s *state) openDeferRecord(n *ir.CallExpr) {
+ if len(n.Args) != 0 || n.Op() != ir.OCALLFUNC || n.X.Type().NumResults() != 0 {
+ s.Fatalf("defer call with arguments or results: %v", n)
+ }
+
+ opendefer := &openDeferInfo{
+ n: n,
+ }
+ fn := n.X
+ // We must always store the function value in a stack slot for the
+ // runtime panic code to use. But in the defer exit code, we will
+ // call the function directly if it is a static function.
+ closureVal := s.expr(fn)
+ closure := s.openDeferSave(fn.Type(), closureVal)
+ opendefer.closureNode = closure.Aux.(*ir.Name)
+ if !(fn.Op() == ir.ONAME && fn.(*ir.Name).Class == ir.PFUNC) {
+ opendefer.closure = closure
+ }
+ index := len(s.openDefers)
+ s.openDefers = append(s.openDefers, opendefer)
+
+ // Update deferBits only after evaluation and storage to stack of
+ // the function is successful.
+ bitvalue := s.constInt8(types.Types[types.TUINT8], 1<<uint(index))
+ newDeferBits := s.newValue2(ssa.OpOr8, types.Types[types.TUINT8], s.variable(deferBitsVar, types.Types[types.TUINT8]), bitvalue)
+ s.vars[deferBitsVar] = newDeferBits
+ s.store(types.Types[types.TUINT8], s.deferBitsAddr, newDeferBits)
+}
+
+// openDeferSave generates SSA nodes to store a value (with type t) for an
+// open-coded defer at an explicit autotmp location on the stack, so it can be
+// reloaded and used for the appropriate call on exit. Type t must be a function type
+// (therefore SSAable). val is the value to be stored. The function returns an SSA
+// value representing a pointer to the autotmp location.
+func (s *state) openDeferSave(t *types.Type, val *ssa.Value) *ssa.Value {
+ if !TypeOK(t) {
+ s.Fatalf("openDeferSave of non-SSA-able type %v val=%v", t, val)
+ }
+ if !t.HasPointers() {
+ s.Fatalf("openDeferSave of pointerless type %v val=%v", t, val)
+ }
+ pos := val.Pos
+ temp := typecheck.TempAt(pos.WithNotStmt(), s.curfn, t)
+ temp.SetOpenDeferSlot(true)
+ var addrTemp *ssa.Value
+ // Use OpVarLive to make sure stack slot for the closure is not removed by
+ // dead-store elimination
+ if s.curBlock.ID != s.f.Entry.ID {
+ // Force the tmp storing this defer function to be declared in the entry
+ // block, so that it will be live for the defer exit code (which will
+ // actually access it only if the associated defer call has been activated).
+ s.defvars[s.f.Entry.ID][memVar] = s.f.Entry.NewValue1A(src.NoXPos, ssa.OpVarDef, types.TypeMem, temp, s.defvars[s.f.Entry.ID][memVar])
+ s.defvars[s.f.Entry.ID][memVar] = s.f.Entry.NewValue1A(src.NoXPos, ssa.OpVarLive, types.TypeMem, temp, s.defvars[s.f.Entry.ID][memVar])
+ addrTemp = s.f.Entry.NewValue2A(src.NoXPos, ssa.OpLocalAddr, types.NewPtr(temp.Type()), temp, s.sp, s.defvars[s.f.Entry.ID][memVar])
+ } else {
+ // Special case if we're still in the entry block. We can't use
+ // the above code, since s.defvars[s.f.Entry.ID] isn't defined
+ // until we end the entry block with s.endBlock().
+ s.vars[memVar] = s.newValue1Apos(ssa.OpVarDef, types.TypeMem, temp, s.mem(), false)
+ s.vars[memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, temp, s.mem(), false)
+ addrTemp = s.newValue2Apos(ssa.OpLocalAddr, types.NewPtr(temp.Type()), temp, s.sp, s.mem(), false)
+ }
+ // Since we may use this temp during exit depending on the
+ // deferBits, we must define it unconditionally on entry.
+ // Therefore, we must make sure it is zeroed out in the entry
+ // block if it contains pointers, else GC may wrongly follow an
+ // uninitialized pointer value.
+ temp.SetNeedzero(true)
+ // We are storing to the stack, hence we can avoid the full checks in
+ // storeType() (no write barrier) and do a simple store().
+ s.store(t, addrTemp, val)
+ return addrTemp
+}
+
+// openDeferExit generates SSA for processing all the open coded defers at exit.
+// The code involves loading deferBits, and checking each of the bits to see if
+// the corresponding defer statement was executed. For each bit that is turned
+// on, the associated defer call is made.
+func (s *state) openDeferExit() {
+ deferExit := s.f.NewBlock(ssa.BlockPlain)
+ s.endBlock().AddEdgeTo(deferExit)
+ s.startBlock(deferExit)
+ s.lastDeferExit = deferExit
+ s.lastDeferCount = len(s.openDefers)
+ zeroval := s.constInt8(types.Types[types.TUINT8], 0)
+ // Test for and run defers in reverse order
+ for i := len(s.openDefers) - 1; i >= 0; i-- {
+ r := s.openDefers[i]
+ bCond := s.f.NewBlock(ssa.BlockPlain)
+ bEnd := s.f.NewBlock(ssa.BlockPlain)
+
+ deferBits := s.variable(deferBitsVar, types.Types[types.TUINT8])
+ // Generate code to check if the bit associated with the current
+ // defer is set.
+ bitval := s.constInt8(types.Types[types.TUINT8], 1<<uint(i))
+ andval := s.newValue2(ssa.OpAnd8, types.Types[types.TUINT8], deferBits, bitval)
+ eqVal := s.newValue2(ssa.OpEq8, types.Types[types.TBOOL], andval, zeroval)
+ b := s.endBlock()
+ b.Kind = ssa.BlockIf
+ b.SetControl(eqVal)
+ b.AddEdgeTo(bEnd)
+ b.AddEdgeTo(bCond)
+ bCond.AddEdgeTo(bEnd)
+ s.startBlock(bCond)
+
+ // Clear this bit in deferBits and force store back to stack, so
+ // we will not try to re-run this defer call if this defer call panics.
+ nbitval := s.newValue1(ssa.OpCom8, types.Types[types.TUINT8], bitval)
+ maskedval := s.newValue2(ssa.OpAnd8, types.Types[types.TUINT8], deferBits, nbitval)
+ s.store(types.Types[types.TUINT8], s.deferBitsAddr, maskedval)
+ // Use this value for following tests, so we keep previous
+ // bits cleared.
+ s.vars[deferBitsVar] = maskedval
+
+ // Generate code to call the function call of the defer, using the
+ // closure that were stored in argtmps at the point of the defer
+ // statement.
+ fn := r.n.X
+ stksize := fn.Type().ArgWidth()
+ var callArgs []*ssa.Value
+ var call *ssa.Value
+ if r.closure != nil {
+ v := s.load(r.closure.Type.Elem(), r.closure)
+ s.maybeNilCheckClosure(v, callDefer)
+ codeptr := s.rawLoad(types.Types[types.TUINTPTR], v)
+ aux := ssa.ClosureAuxCall(s.f.ABIDefault.ABIAnalyzeTypes(nil, nil, nil))
+ call = s.newValue2A(ssa.OpClosureLECall, aux.LateExpansionResultType(), aux, codeptr, v)
+ } else {
+ aux := ssa.StaticAuxCall(fn.(*ir.Name).Linksym(), s.f.ABIDefault.ABIAnalyzeTypes(nil, nil, nil))
+ call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
+ }
+ callArgs = append(callArgs, s.mem())
+ call.AddArgs(callArgs...)
+ call.AuxInt = stksize
+ s.vars[memVar] = s.newValue1I(ssa.OpSelectN, types.TypeMem, 0, call)
+ // Make sure that the stack slots with pointers are kept live
+ // through the call (which is a pre-emption point). Also, we will
+ // use the first call of the last defer exit to compute liveness
+ // for the deferreturn, so we want all stack slots to be live.
+ if r.closureNode != nil {
+ s.vars[memVar] = s.newValue1Apos(ssa.OpVarLive, types.TypeMem, r.closureNode, s.mem(), false)
+ }
+
+ s.endBlock()
+ s.startBlock(bEnd)
+ }
+}
+
+func (s *state) callResult(n *ir.CallExpr, k callKind) *ssa.Value {
+ return s.call(n, k, false)
+}
+
+func (s *state) callAddr(n *ir.CallExpr, k callKind) *ssa.Value {
+ return s.call(n, k, true)
+}
+
+// Calls the function n using the specified call type.
+// Returns the address of the return value (or nil if none).
+func (s *state) call(n *ir.CallExpr, k callKind, returnResultAddr bool) *ssa.Value {
+ s.prevCall = nil
+ var callee *ir.Name // target function (if static)
+ var closure *ssa.Value // ptr to closure to run (if dynamic)
+ var codeptr *ssa.Value // ptr to target code (if dynamic)
+ var rcvr *ssa.Value // receiver to set
+ fn := n.X
+ var ACArgs []*types.Type // AuxCall args
+ var ACResults []*types.Type // AuxCall results
+ var callArgs []*ssa.Value // For late-expansion, the args themselves (not stored, args to the call instead).
+
+ callABI := s.f.ABIDefault
+
+ if !buildcfg.Experiment.RegabiArgs {
+ var magicFnNameSym *types.Sym
+ if fn.Name() != nil {
+ magicFnNameSym = fn.Name().Sym()
+ ss := magicFnNameSym.Name
+ if strings.HasSuffix(ss, magicNameDotSuffix) {
+ callABI = s.f.ABI1
+ }
+ }
+ if magicFnNameSym == nil && n.Op() == ir.OCALLINTER {
+ magicFnNameSym = fn.(*ir.SelectorExpr).Sym()
+ ss := magicFnNameSym.Name
+ if strings.HasSuffix(ss, magicNameDotSuffix[1:]) {
+ callABI = s.f.ABI1
+ }
+ }
+ }
+
+ if k != callNormal && k != callTail && (len(n.Args) != 0 || n.Op() == ir.OCALLINTER || n.X.Type().NumResults() != 0) {
+ s.Fatalf("go/defer call with arguments: %v", n)
+ }
+
+ switch n.Op() {
+ case ir.OCALLFUNC:
+ if (k == callNormal || k == callTail) && fn.Op() == ir.ONAME && fn.(*ir.Name).Class == ir.PFUNC {
+ fn := fn.(*ir.Name)
+ callee = fn
+ if buildcfg.Experiment.RegabiArgs {
+ // This is a static call, so it may be
+ // a direct call to a non-ABIInternal
+ // function. fn.Func may be nil for
+ // some compiler-generated functions,
+ // but those are all ABIInternal.
+ if fn.Func != nil {
+ callABI = abiForFunc(fn.Func, s.f.ABI0, s.f.ABI1)
+ }
+ } else {
+ // TODO(register args) remove after register abi is working
+ inRegistersImported := fn.Pragma()&ir.RegisterParams != 0
+ inRegistersSamePackage := fn.Func != nil && fn.Func.Pragma&ir.RegisterParams != 0
+ if inRegistersImported || inRegistersSamePackage {
+ callABI = s.f.ABI1
+ }
+ }
+ break
+ }
+ closure = s.expr(fn)
+ if k != callDefer && k != callDeferStack {
+ // Deferred nil function needs to panic when the function is invoked,
+ // not the point of defer statement.
+ s.maybeNilCheckClosure(closure, k)
+ }
+ case ir.OCALLINTER:
+ if fn.Op() != ir.ODOTINTER {
+ s.Fatalf("OCALLINTER: n.Left not an ODOTINTER: %v", fn.Op())
+ }
+ fn := fn.(*ir.SelectorExpr)
+ var iclosure *ssa.Value
+ iclosure, rcvr = s.getClosureAndRcvr(fn)
+ if k == callNormal {
+ codeptr = s.load(types.Types[types.TUINTPTR], iclosure)
+ } else {
+ closure = iclosure
+ }
+ }
+
+ if !buildcfg.Experiment.RegabiArgs {
+ if regAbiForFuncType(n.X.Type().FuncType()) {
+ // Magic last type in input args to call
+ callABI = s.f.ABI1
+ }
+ }
+
+ params := callABI.ABIAnalyze(n.X.Type(), false /* Do not set (register) nNames from caller side -- can cause races. */)
+ types.CalcSize(fn.Type())
+ stksize := params.ArgWidth() // includes receiver, args, and results
+
+ res := n.X.Type().Results()
+ if k == callNormal || k == callTail {
+ for _, p := range params.OutParams() {
+ ACResults = append(ACResults, p.Type)
+ }
+ }
+
+ var call *ssa.Value
+ if k == callDeferStack {
+ // Make a defer struct d on the stack.
+ if stksize != 0 {
+ s.Fatalf("deferprocStack with non-zero stack size %d: %v", stksize, n)
+ }
+
+ t := deferstruct()
+ d := typecheck.TempAt(n.Pos(), s.curfn, t)
+
+ s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, d, s.mem())
+ addr := s.addr(d)
+
+ // Must match deferstruct() below and src/runtime/runtime2.go:_defer.
+ // 0: started, set in deferprocStack
+ // 1: heap, set in deferprocStack
+ // 2: openDefer
+ // 3: sp, set in deferprocStack
+ // 4: pc, set in deferprocStack
+ // 5: fn
+ s.store(closure.Type,
+ s.newValue1I(ssa.OpOffPtr, closure.Type.PtrTo(), t.FieldOff(5), addr),
+ closure)
+ // 6: panic, set in deferprocStack
+ // 7: link, set in deferprocStack
+ // 8: fd
+ // 9: varp
+ // 10: framepc
+
+ // Call runtime.deferprocStack with pointer to _defer record.
+ ACArgs = append(ACArgs, types.Types[types.TUINTPTR])
+ aux := ssa.StaticAuxCall(ir.Syms.DeferprocStack, s.f.ABIDefault.ABIAnalyzeTypes(nil, ACArgs, ACResults))
+ callArgs = append(callArgs, addr, s.mem())
+ call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
+ call.AddArgs(callArgs...)
+ call.AuxInt = int64(types.PtrSize) // deferprocStack takes a *_defer arg
+ } else {
+ // Store arguments to stack, including defer/go arguments and receiver for method calls.
+ // These are written in SP-offset order.
+ argStart := base.Ctxt.FixedFrameSize()
+ // Defer/go args.
+ if k != callNormal && k != callTail {
+ // Write closure (arg to newproc/deferproc).
+ ACArgs = append(ACArgs, types.Types[types.TUINTPTR]) // not argExtra
+ callArgs = append(callArgs, closure)
+ stksize += int64(types.PtrSize)
+ argStart += int64(types.PtrSize)
+ }
+
+ // Set receiver (for interface calls).
+ if rcvr != nil {
+ callArgs = append(callArgs, rcvr)
+ }
+
+ // Write args.
+ t := n.X.Type()
+ args := n.Args
+
+ for _, p := range params.InParams() { // includes receiver for interface calls
+ ACArgs = append(ACArgs, p.Type)
+ }
+
+ // Split the entry block if there are open defers, because later calls to
+ // openDeferSave may cause a mismatch between the mem for an OpDereference
+ // and the call site which uses it. See #49282.
+ if s.curBlock.ID == s.f.Entry.ID && s.hasOpenDefers {
+ b := s.endBlock()
+ b.Kind = ssa.BlockPlain
+ curb := s.f.NewBlock(ssa.BlockPlain)
+ b.AddEdgeTo(curb)
+ s.startBlock(curb)
+ }
+
+ for i, n := range args {
+ callArgs = append(callArgs, s.putArg(n, t.Params().Field(i).Type))
+ }
+
+ callArgs = append(callArgs, s.mem())
+
+ // call target
+ switch {
+ case k == callDefer:
+ aux := ssa.StaticAuxCall(ir.Syms.Deferproc, s.f.ABIDefault.ABIAnalyzeTypes(nil, ACArgs, ACResults)) // TODO paramResultInfo for DeferProc
+ call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
+ case k == callGo:
+ aux := ssa.StaticAuxCall(ir.Syms.Newproc, s.f.ABIDefault.ABIAnalyzeTypes(nil, ACArgs, ACResults))
+ call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux) // TODO paramResultInfo for NewProc
+ case closure != nil:
+ // rawLoad because loading the code pointer from a
+ // closure is always safe, but IsSanitizerSafeAddr
+ // can't always figure that out currently, and it's
+ // critical that we not clobber any arguments already
+ // stored onto the stack.
+ codeptr = s.rawLoad(types.Types[types.TUINTPTR], closure)
+ aux := ssa.ClosureAuxCall(callABI.ABIAnalyzeTypes(nil, ACArgs, ACResults))
+ call = s.newValue2A(ssa.OpClosureLECall, aux.LateExpansionResultType(), aux, codeptr, closure)
+ case codeptr != nil:
+ // Note that the "receiver" parameter is nil because the actual receiver is the first input parameter.
+ aux := ssa.InterfaceAuxCall(params)
+ call = s.newValue1A(ssa.OpInterLECall, aux.LateExpansionResultType(), aux, codeptr)
+ case callee != nil:
+ aux := ssa.StaticAuxCall(callTargetLSym(callee), params)
+ call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
+ if k == callTail {
+ call.Op = ssa.OpTailLECall
+ stksize = 0 // Tail call does not use stack. We reuse caller's frame.
+ }
+ default:
+ s.Fatalf("bad call type %v %v", n.Op(), n)
+ }
+ call.AddArgs(callArgs...)
+ call.AuxInt = stksize // Call operations carry the argsize of the callee along with them
+ }
+ s.prevCall = call
+ s.vars[memVar] = s.newValue1I(ssa.OpSelectN, types.TypeMem, int64(len(ACResults)), call)
+ // Insert OVARLIVE nodes
+ for _, name := range n.KeepAlive {
+ s.stmt(ir.NewUnaryExpr(n.Pos(), ir.OVARLIVE, name))
+ }
+
+ // Finish block for defers
+ if k == callDefer || k == callDeferStack {
+ b := s.endBlock()
+ b.Kind = ssa.BlockDefer
+ b.SetControl(call)
+ bNext := s.f.NewBlock(ssa.BlockPlain)
+ b.AddEdgeTo(bNext)
+ // Add recover edge to exit code.
+ r := s.f.NewBlock(ssa.BlockPlain)
+ s.startBlock(r)
+ s.exit()
+ b.AddEdgeTo(r)
+ b.Likely = ssa.BranchLikely
+ s.startBlock(bNext)
+ }
+
+ if res.NumFields() == 0 || k != callNormal {
+ // call has no return value. Continue with the next statement.
+ return nil
+ }
+ fp := res.Field(0)
+ if returnResultAddr {
+ return s.resultAddrOfCall(call, 0, fp.Type)
+ }
+ return s.newValue1I(ssa.OpSelectN, fp.Type, 0, call)
+}
+
+// maybeNilCheckClosure checks if a nil check of a closure is needed in some
+// architecture-dependent situations and, if so, emits the nil check.
+func (s *state) maybeNilCheckClosure(closure *ssa.Value, k callKind) {
+ if Arch.LinkArch.Family == sys.Wasm || buildcfg.GOOS == "aix" && k != callGo {
+ // On AIX, the closure needs to be verified as fn can be nil, except if it's a call go. This needs to be handled by the runtime to have the "go of nil func value" error.
+ // TODO(neelance): On other architectures this should be eliminated by the optimization steps
+ s.nilCheck(closure)
+ }
+}
+
+// getClosureAndRcvr returns values for the appropriate closure and receiver of an
+// interface call
+func (s *state) getClosureAndRcvr(fn *ir.SelectorExpr) (*ssa.Value, *ssa.Value) {
+ i := s.expr(fn.X)
+ itab := s.newValue1(ssa.OpITab, types.Types[types.TUINTPTR], i)
+ s.nilCheck(itab)
+ itabidx := fn.Offset() + 2*int64(types.PtrSize) + 8 // offset of fun field in runtime.itab
+ closure := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.UintptrPtr, itabidx, itab)
+ rcvr := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, i)
+ return closure, rcvr
+}
+
+// etypesign returns the signed-ness of e, for integer/pointer etypes.
+// -1 means signed, +1 means unsigned, 0 means non-integer/non-pointer.
+func etypesign(e types.Kind) int8 {
+ switch e {
+ case types.TINT8, types.TINT16, types.TINT32, types.TINT64, types.TINT:
+ return -1
+ case types.TUINT8, types.TUINT16, types.TUINT32, types.TUINT64, types.TUINT, types.TUINTPTR, types.TUNSAFEPTR:
+ return +1
+ }
+ return 0
+}
+
+// addr converts the address of the expression n to SSA, adds it to s and returns the SSA result.
+// The value that the returned Value represents is guaranteed to be non-nil.
+func (s *state) addr(n ir.Node) *ssa.Value {
+ if n.Op() != ir.ONAME {
+ s.pushLine(n.Pos())
+ defer s.popLine()
+ }
+
+ if s.canSSA(n) {
+ s.Fatalf("addr of canSSA expression: %+v", n)
+ }
+
+ t := types.NewPtr(n.Type())
+ linksymOffset := func(lsym *obj.LSym, offset int64) *ssa.Value {
+ v := s.entryNewValue1A(ssa.OpAddr, t, lsym, s.sb)
+ // TODO: Make OpAddr use AuxInt as well as Aux.
+ if offset != 0 {
+ v = s.entryNewValue1I(ssa.OpOffPtr, v.Type, offset, v)
+ }
+ return v
+ }
+ switch n.Op() {
+ case ir.OLINKSYMOFFSET:
+ no := n.(*ir.LinksymOffsetExpr)
+ return linksymOffset(no.Linksym, no.Offset_)
+ case ir.ONAME:
+ n := n.(*ir.Name)
+ if n.Heapaddr != nil {
+ return s.expr(n.Heapaddr)
+ }
+ switch n.Class {
+ case ir.PEXTERN:
+ // global variable
+ return linksymOffset(n.Linksym(), 0)
+ case ir.PPARAM:
+ // parameter slot
+ v := s.decladdrs[n]
+ if v != nil {
+ return v
+ }
+ s.Fatalf("addr of undeclared ONAME %v. declared: %v", n, s.decladdrs)
+ return nil
+ case ir.PAUTO:
+ return s.newValue2Apos(ssa.OpLocalAddr, t, n, s.sp, s.mem(), !ir.IsAutoTmp(n))
+
+ case ir.PPARAMOUT: // Same as PAUTO -- cannot generate LEA early.
+ // ensure that we reuse symbols for out parameters so
+ // that cse works on their addresses
+ return s.newValue2Apos(ssa.OpLocalAddr, t, n, s.sp, s.mem(), true)
+ default:
+ s.Fatalf("variable address class %v not implemented", n.Class)
+ return nil
+ }
+ case ir.ORESULT:
+ // load return from callee
+ n := n.(*ir.ResultExpr)
+ return s.resultAddrOfCall(s.prevCall, n.Index, n.Type())
+ case ir.OINDEX:
+ n := n.(*ir.IndexExpr)
+ if n.X.Type().IsSlice() {
+ a := s.expr(n.X)
+ i := s.expr(n.Index)
+ len := s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], a)
+ i = s.boundsCheck(i, len, ssa.BoundsIndex, n.Bounded())
+ p := s.newValue1(ssa.OpSlicePtr, t, a)
+ return s.newValue2(ssa.OpPtrIndex, t, p, i)
+ } else { // array
+ a := s.addr(n.X)
+ i := s.expr(n.Index)
+ len := s.constInt(types.Types[types.TINT], n.X.Type().NumElem())
+ i = s.boundsCheck(i, len, ssa.BoundsIndex, n.Bounded())
+ return s.newValue2(ssa.OpPtrIndex, types.NewPtr(n.X.Type().Elem()), a, i)
+ }
+ case ir.ODEREF:
+ n := n.(*ir.StarExpr)
+ return s.exprPtr(n.X, n.Bounded(), n.Pos())
+ case ir.ODOT:
+ n := n.(*ir.SelectorExpr)
+ p := s.addr(n.X)
+ return s.newValue1I(ssa.OpOffPtr, t, n.Offset(), p)
+ case ir.ODOTPTR:
+ n := n.(*ir.SelectorExpr)
+ p := s.exprPtr(n.X, n.Bounded(), n.Pos())
+ return s.newValue1I(ssa.OpOffPtr, t, n.Offset(), p)
+ case ir.OCONVNOP:
+ n := n.(*ir.ConvExpr)
+ if n.Type() == n.X.Type() {
+ return s.addr(n.X)
+ }
+ addr := s.addr(n.X)
+ return s.newValue1(ssa.OpCopy, t, addr) // ensure that addr has the right type
+ case ir.OCALLFUNC, ir.OCALLINTER:
+ n := n.(*ir.CallExpr)
+ return s.callAddr(n, callNormal)
+ case ir.ODOTTYPE, ir.ODYNAMICDOTTYPE:
+ var v *ssa.Value
+ if n.Op() == ir.ODOTTYPE {
+ v, _ = s.dottype(n.(*ir.TypeAssertExpr), false)
+ } else {
+ v, _ = s.dynamicDottype(n.(*ir.DynamicTypeAssertExpr), false)
+ }
+ if v.Op != ssa.OpLoad {
+ s.Fatalf("dottype of non-load")
+ }
+ if v.Args[1] != s.mem() {
+ s.Fatalf("memory no longer live from dottype load")
+ }
+ return v.Args[0]
+ default:
+ s.Fatalf("unhandled addr %v", n.Op())
+ return nil
+ }
+}
+
+// canSSA reports whether n is SSA-able.
+// n must be an ONAME (or an ODOT sequence with an ONAME base).
+func (s *state) canSSA(n ir.Node) bool {
+ if base.Flag.N != 0 {
+ return false
+ }
+ for {
+ nn := n
+ if nn.Op() == ir.ODOT {
+ nn := nn.(*ir.SelectorExpr)
+ n = nn.X
+ continue
+ }
+ if nn.Op() == ir.OINDEX {
+ nn := nn.(*ir.IndexExpr)
+ if nn.X.Type().IsArray() {
+ n = nn.X
+ continue
+ }
+ }
+ break
+ }
+ if n.Op() != ir.ONAME {
+ return false
+ }
+ return s.canSSAName(n.(*ir.Name)) && TypeOK(n.Type())
+}
+
+func (s *state) canSSAName(name *ir.Name) bool {
+ if name.Addrtaken() || !name.OnStack() {
+ return false
+ }
+ switch name.Class {
+ case ir.PPARAMOUT:
+ if s.hasdefer {
+ // TODO: handle this case? Named return values must be
+ // in memory so that the deferred function can see them.
+ // Maybe do: if !strings.HasPrefix(n.String(), "~") { return false }
+ // Or maybe not, see issue 18860. Even unnamed return values
+ // must be written back so if a defer recovers, the caller can see them.
+ return false
+ }
+ if s.cgoUnsafeArgs {
+ // Cgo effectively takes the address of all result args,
+ // but the compiler can't see that.
+ return false
+ }
+ }
+ return true
+ // TODO: try to make more variables SSAable?
+}
+
+// TypeOK reports whether variables of type t are SSA-able.
+func TypeOK(t *types.Type) bool {
+ types.CalcSize(t)
+ if t.Size() > int64(4*types.PtrSize) {
+ // 4*Widthptr is an arbitrary constant. We want it
+ // to be at least 3*Widthptr so slices can be registerized.
+ // Too big and we'll introduce too much register pressure.
+ return false
+ }
+ switch t.Kind() {
+ case types.TARRAY:
+ // We can't do larger arrays because dynamic indexing is
+ // not supported on SSA variables.
+ // TODO: allow if all indexes are constant.
+ if t.NumElem() <= 1 {
+ return TypeOK(t.Elem())
+ }
+ return false
+ case types.TSTRUCT:
+ if t.NumFields() > ssa.MaxStruct {
+ return false
+ }
+ for _, t1 := range t.Fields().Slice() {
+ if !TypeOK(t1.Type) {
+ return false
+ }
+ }
+ return true
+ default:
+ return true
+ }
+}
+
+// exprPtr evaluates n to a pointer and nil-checks it.
+func (s *state) exprPtr(n ir.Node, bounded bool, lineno src.XPos) *ssa.Value {
+ p := s.expr(n)
+ if bounded || n.NonNil() {
+ if s.f.Frontend().Debug_checknil() && lineno.Line() > 1 {
+ s.f.Warnl(lineno, "removed nil check")
+ }
+ return p
+ }
+ s.nilCheck(p)
+ return p
+}
+
+// nilCheck generates nil pointer checking code.
+// Used only for automatically inserted nil checks,
+// not for user code like 'x != nil'.
+func (s *state) nilCheck(ptr *ssa.Value) {
+ if base.Debug.DisableNil != 0 || s.curfn.NilCheckDisabled() {
+ return
+ }
+ s.newValue2(ssa.OpNilCheck, types.TypeVoid, ptr, s.mem())
+}
+
+// boundsCheck generates bounds checking code. Checks if 0 <= idx <[=] len, branches to exit if not.
+// Starts a new block on return.
+// On input, len must be converted to full int width and be nonnegative.
+// Returns idx converted to full int width.
+// If bounded is true then caller guarantees the index is not out of bounds
+// (but boundsCheck will still extend the index to full int width).
+func (s *state) boundsCheck(idx, len *ssa.Value, kind ssa.BoundsKind, bounded bool) *ssa.Value {
+ idx = s.extendIndex(idx, len, kind, bounded)
+
+ if bounded || base.Flag.B != 0 {
+ // If bounded or bounds checking is flag-disabled, then no check necessary,
+ // just return the extended index.
+ //
+ // Here, bounded == true if the compiler generated the index itself,
+ // such as in the expansion of a slice initializer. These indexes are
+ // compiler-generated, not Go program variables, so they cannot be
+ // attacker-controlled, so we can omit Spectre masking as well.
+ //
+ // Note that we do not want to omit Spectre masking in code like:
+ //
+ // if 0 <= i && i < len(x) {
+ // use(x[i])
+ // }
+ //
+ // Lucky for us, bounded==false for that code.
+ // In that case (handled below), we emit a bound check (and Spectre mask)
+ // and then the prove pass will remove the bounds check.
+ // In theory the prove pass could potentially remove certain
+ // Spectre masks, but it's very delicate and probably better
+ // to be conservative and leave them all in.
+ return idx
+ }
+
+ bNext := s.f.NewBlock(ssa.BlockPlain)
+ bPanic := s.f.NewBlock(ssa.BlockExit)
+
+ if !idx.Type.IsSigned() {
+ switch kind {
+ case ssa.BoundsIndex:
+ kind = ssa.BoundsIndexU
+ case ssa.BoundsSliceAlen:
+ kind = ssa.BoundsSliceAlenU
+ case ssa.BoundsSliceAcap:
+ kind = ssa.BoundsSliceAcapU
+ case ssa.BoundsSliceB:
+ kind = ssa.BoundsSliceBU
+ case ssa.BoundsSlice3Alen:
+ kind = ssa.BoundsSlice3AlenU
+ case ssa.BoundsSlice3Acap:
+ kind = ssa.BoundsSlice3AcapU
+ case ssa.BoundsSlice3B:
+ kind = ssa.BoundsSlice3BU
+ case ssa.BoundsSlice3C:
+ kind = ssa.BoundsSlice3CU
+ }
+ }
+
+ var cmp *ssa.Value
+ if kind == ssa.BoundsIndex || kind == ssa.BoundsIndexU {
+ cmp = s.newValue2(ssa.OpIsInBounds, types.Types[types.TBOOL], idx, len)
+ } else {
+ cmp = s.newValue2(ssa.OpIsSliceInBounds, types.Types[types.TBOOL], idx, len)
+ }
+ b := s.endBlock()
+ b.Kind = ssa.BlockIf
+ b.SetControl(cmp)
+ b.Likely = ssa.BranchLikely
+ b.AddEdgeTo(bNext)
+ b.AddEdgeTo(bPanic)
+
+ s.startBlock(bPanic)
+ if Arch.LinkArch.Family == sys.Wasm {
+ // TODO(khr): figure out how to do "register" based calling convention for bounds checks.
+ // Should be similar to gcWriteBarrier, but I can't make it work.
+ s.rtcall(BoundsCheckFunc[kind], false, nil, idx, len)
+ } else {
+ mem := s.newValue3I(ssa.OpPanicBounds, types.TypeMem, int64(kind), idx, len, s.mem())
+ s.endBlock().SetControl(mem)
+ }
+ s.startBlock(bNext)
+
+ // In Spectre index mode, apply an appropriate mask to avoid speculative out-of-bounds accesses.
+ if base.Flag.Cfg.SpectreIndex {
+ op := ssa.OpSpectreIndex
+ if kind != ssa.BoundsIndex && kind != ssa.BoundsIndexU {
+ op = ssa.OpSpectreSliceIndex
+ }
+ idx = s.newValue2(op, types.Types[types.TINT], idx, len)
+ }
+
+ return idx
+}
+
+// If cmp (a bool) is false, panic using the given function.
+func (s *state) check(cmp *ssa.Value, fn *obj.LSym) {
+ b := s.endBlock()
+ b.Kind = ssa.BlockIf
+ b.SetControl(cmp)
+ b.Likely = ssa.BranchLikely
+ bNext := s.f.NewBlock(ssa.BlockPlain)
+ line := s.peekPos()
+ pos := base.Ctxt.PosTable.Pos(line)
+ fl := funcLine{f: fn, base: pos.Base(), line: pos.Line()}
+ bPanic := s.panics[fl]
+ if bPanic == nil {
+ bPanic = s.f.NewBlock(ssa.BlockPlain)
+ s.panics[fl] = bPanic
+ s.startBlock(bPanic)
+ // The panic call takes/returns memory to ensure that the right
+ // memory state is observed if the panic happens.
+ s.rtcall(fn, false, nil)
+ }
+ b.AddEdgeTo(bNext)
+ b.AddEdgeTo(bPanic)
+ s.startBlock(bNext)
+}
+
+func (s *state) intDivide(n ir.Node, a, b *ssa.Value) *ssa.Value {
+ needcheck := true
+ switch b.Op {
+ case ssa.OpConst8, ssa.OpConst16, ssa.OpConst32, ssa.OpConst64:
+ if b.AuxInt != 0 {
+ needcheck = false
+ }
+ }
+ if needcheck {
+ // do a size-appropriate check for zero
+ cmp := s.newValue2(s.ssaOp(ir.ONE, n.Type()), types.Types[types.TBOOL], b, s.zeroVal(n.Type()))
+ s.check(cmp, ir.Syms.Panicdivide)
+ }
+ return s.newValue2(s.ssaOp(n.Op(), n.Type()), a.Type, a, b)
+}
+
+// rtcall issues a call to the given runtime function fn with the listed args.
+// Returns a slice of results of the given result types.
+// The call is added to the end of the current block.
+// If returns is false, the block is marked as an exit block.
+func (s *state) rtcall(fn *obj.LSym, returns bool, results []*types.Type, args ...*ssa.Value) []*ssa.Value {
+ s.prevCall = nil
+ // Write args to the stack
+ off := base.Ctxt.FixedFrameSize()
+ var callArgs []*ssa.Value
+ var callArgTypes []*types.Type
+
+ for _, arg := range args {
+ t := arg.Type
+ off = types.Rnd(off, t.Alignment())
+ size := t.Size()
+ callArgs = append(callArgs, arg)
+ callArgTypes = append(callArgTypes, t)
+ off += size
+ }
+ off = types.Rnd(off, int64(types.RegSize))
+
+ // Accumulate results types and offsets
+ offR := off
+ for _, t := range results {
+ offR = types.Rnd(offR, t.Alignment())
+ offR += t.Size()
+ }
+
+ // Issue call
+ var call *ssa.Value
+ aux := ssa.StaticAuxCall(fn, s.f.ABIDefault.ABIAnalyzeTypes(nil, callArgTypes, results))
+ callArgs = append(callArgs, s.mem())
+ call = s.newValue0A(ssa.OpStaticLECall, aux.LateExpansionResultType(), aux)
+ call.AddArgs(callArgs...)
+ s.vars[memVar] = s.newValue1I(ssa.OpSelectN, types.TypeMem, int64(len(results)), call)
+
+ if !returns {
+ // Finish block
+ b := s.endBlock()
+ b.Kind = ssa.BlockExit
+ b.SetControl(call)
+ call.AuxInt = off - base.Ctxt.FixedFrameSize()
+ if len(results) > 0 {
+ s.Fatalf("panic call can't have results")
+ }
+ return nil
+ }
+
+ // Load results
+ res := make([]*ssa.Value, len(results))
+ for i, t := range results {
+ off = types.Rnd(off, t.Alignment())
+ res[i] = s.resultOfCall(call, int64(i), t)
+ off += t.Size()
+ }
+ off = types.Rnd(off, int64(types.PtrSize))
+
+ // Remember how much callee stack space we needed.
+ call.AuxInt = off
+
+ return res
+}
+
+// do *left = right for type t.
+func (s *state) storeType(t *types.Type, left, right *ssa.Value, skip skipMask, leftIsStmt bool) {
+ s.instrument(t, left, instrumentWrite)
+
+ if skip == 0 && (!t.HasPointers() || ssa.IsStackAddr(left)) {
+ // Known to not have write barrier. Store the whole type.
+ s.vars[memVar] = s.newValue3Apos(ssa.OpStore, types.TypeMem, t, left, right, s.mem(), leftIsStmt)
+ return
+ }
+
+ // store scalar fields first, so write barrier stores for
+ // pointer fields can be grouped together, and scalar values
+ // don't need to be live across the write barrier call.
+ // TODO: if the writebarrier pass knows how to reorder stores,
+ // we can do a single store here as long as skip==0.
+ s.storeTypeScalars(t, left, right, skip)
+ if skip&skipPtr == 0 && t.HasPointers() {
+ s.storeTypePtrs(t, left, right)
+ }
+}
+
+// do *left = right for all scalar (non-pointer) parts of t.
+func (s *state) storeTypeScalars(t *types.Type, left, right *ssa.Value, skip skipMask) {
+ switch {
+ case t.IsBoolean() || t.IsInteger() || t.IsFloat() || t.IsComplex():
+ s.store(t, left, right)
+ case t.IsPtrShaped():
+ if t.IsPtr() && t.Elem().NotInHeap() {
+ s.store(t, left, right) // see issue 42032
+ }
+ // otherwise, no scalar fields.
+ case t.IsString():
+ if skip&skipLen != 0 {
+ return
+ }
+ len := s.newValue1(ssa.OpStringLen, types.Types[types.TINT], right)
+ lenAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, s.config.PtrSize, left)
+ s.store(types.Types[types.TINT], lenAddr, len)
+ case t.IsSlice():
+ if skip&skipLen == 0 {
+ len := s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], right)
+ lenAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, s.config.PtrSize, left)
+ s.store(types.Types[types.TINT], lenAddr, len)
+ }
+ if skip&skipCap == 0 {
+ cap := s.newValue1(ssa.OpSliceCap, types.Types[types.TINT], right)
+ capAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.IntPtr, 2*s.config.PtrSize, left)
+ s.store(types.Types[types.TINT], capAddr, cap)
+ }
+ case t.IsInterface():
+ // itab field doesn't need a write barrier (even though it is a pointer).
+ itab := s.newValue1(ssa.OpITab, s.f.Config.Types.BytePtr, right)
+ s.store(types.Types[types.TUINTPTR], left, itab)
+ case t.IsStruct():
+ n := t.NumFields()
+ for i := 0; i < n; i++ {
+ ft := t.FieldType(i)
+ addr := s.newValue1I(ssa.OpOffPtr, ft.PtrTo(), t.FieldOff(i), left)
+ val := s.newValue1I(ssa.OpStructSelect, ft, int64(i), right)
+ s.storeTypeScalars(ft, addr, val, 0)
+ }
+ case t.IsArray() && t.NumElem() == 0:
+ // nothing
+ case t.IsArray() && t.NumElem() == 1:
+ s.storeTypeScalars(t.Elem(), left, s.newValue1I(ssa.OpArraySelect, t.Elem(), 0, right), 0)
+ default:
+ s.Fatalf("bad write barrier type %v", t)
+ }
+}
+
+// do *left = right for all pointer parts of t.
+func (s *state) storeTypePtrs(t *types.Type, left, right *ssa.Value) {
+ switch {
+ case t.IsPtrShaped():
+ if t.IsPtr() && t.Elem().NotInHeap() {
+ break // see issue 42032
+ }
+ s.store(t, left, right)
+ case t.IsString():
+ ptr := s.newValue1(ssa.OpStringPtr, s.f.Config.Types.BytePtr, right)
+ s.store(s.f.Config.Types.BytePtr, left, ptr)
+ case t.IsSlice():
+ elType := types.NewPtr(t.Elem())
+ ptr := s.newValue1(ssa.OpSlicePtr, elType, right)
+ s.store(elType, left, ptr)
+ case t.IsInterface():
+ // itab field is treated as a scalar.
+ idata := s.newValue1(ssa.OpIData, s.f.Config.Types.BytePtr, right)
+ idataAddr := s.newValue1I(ssa.OpOffPtr, s.f.Config.Types.BytePtrPtr, s.config.PtrSize, left)
+ s.store(s.f.Config.Types.BytePtr, idataAddr, idata)
+ case t.IsStruct():
+ n := t.NumFields()
+ for i := 0; i < n; i++ {
+ ft := t.FieldType(i)
+ if !ft.HasPointers() {
+ continue
+ }
+ addr := s.newValue1I(ssa.OpOffPtr, ft.PtrTo(), t.FieldOff(i), left)
+ val := s.newValue1I(ssa.OpStructSelect, ft, int64(i), right)
+ s.storeTypePtrs(ft, addr, val)
+ }
+ case t.IsArray() && t.NumElem() == 0:
+ // nothing
+ case t.IsArray() && t.NumElem() == 1:
+ s.storeTypePtrs(t.Elem(), left, s.newValue1I(ssa.OpArraySelect, t.Elem(), 0, right))
+ default:
+ s.Fatalf("bad write barrier type %v", t)
+ }
+}
+
+// putArg evaluates n for the purpose of passing it as an argument to a function and returns the value for the call.
+func (s *state) putArg(n ir.Node, t *types.Type) *ssa.Value {
+ var a *ssa.Value
+ if !TypeOK(t) {
+ a = s.newValue2(ssa.OpDereference, t, s.addr(n), s.mem())
+ } else {
+ a = s.expr(n)
+ }
+ return a
+}
+
+func (s *state) storeArgWithBase(n ir.Node, t *types.Type, base *ssa.Value, off int64) {
+ pt := types.NewPtr(t)
+ var addr *ssa.Value
+ if base == s.sp {
+ // Use special routine that avoids allocation on duplicate offsets.
+ addr = s.constOffPtrSP(pt, off)
+ } else {
+ addr = s.newValue1I(ssa.OpOffPtr, pt, off, base)
+ }
+
+ if !TypeOK(t) {
+ a := s.addr(n)
+ s.move(t, addr, a)
+ return
+ }
+
+ a := s.expr(n)
+ s.storeType(t, addr, a, 0, false)
+}
+
+// slice computes the slice v[i:j:k] and returns ptr, len, and cap of result.
+// i,j,k may be nil, in which case they are set to their default value.
+// v may be a slice, string or pointer to an array.
+func (s *state) slice(v, i, j, k *ssa.Value, bounded bool) (p, l, c *ssa.Value) {
+ t := v.Type
+ var ptr, len, cap *ssa.Value
+ switch {
+ case t.IsSlice():
+ ptr = s.newValue1(ssa.OpSlicePtr, types.NewPtr(t.Elem()), v)
+ len = s.newValue1(ssa.OpSliceLen, types.Types[types.TINT], v)
+ cap = s.newValue1(ssa.OpSliceCap, types.Types[types.TINT], v)
+ case t.IsString():
+ ptr = s.newValue1(ssa.OpStringPtr, types.NewPtr(types.Types[types.TUINT8]), v)
+ len = s.newValue1(ssa.OpStringLen, types.Types[types.TINT], v)
+ cap = len
+ case t.IsPtr():
+ if !t.Elem().IsArray() {
+ s.Fatalf("bad ptr to array in slice %v\n", t)
+ }
+ s.nilCheck(v)
+ ptr = s.newValue1(ssa.OpCopy, types.NewPtr(t.Elem().Elem()), v)
+ len = s.constInt(types.Types[types.TINT], t.Elem().NumElem())
+ cap = len
+ default:
+ s.Fatalf("bad type in slice %v\n", t)
+ }
+
+ // Set default values
+ if i == nil {
+ i = s.constInt(types.Types[types.TINT], 0)
+ }
+ if j == nil {
+ j = len
+ }
+ three := true
+ if k == nil {
+ three = false
+ k = cap
+ }
+
+ // Panic if slice indices are not in bounds.
+ // Make sure we check these in reverse order so that we're always
+ // comparing against a value known to be nonnegative. See issue 28797.
+ if three {
+ if k != cap {
+ kind := ssa.BoundsSlice3Alen
+ if t.IsSlice() {
+ kind = ssa.BoundsSlice3Acap
+ }
+ k = s.boundsCheck(k, cap, kind, bounded)
+ }
+ if j != k {
+ j = s.boundsCheck(j, k, ssa.BoundsSlice3B, bounded)
+ }
+ i = s.boundsCheck(i, j, ssa.BoundsSlice3C, bounded)
+ } else {
+ if j != k {
+ kind := ssa.BoundsSliceAlen
+ if t.IsSlice() {
+ kind = ssa.BoundsSliceAcap
+ }
+ j = s.boundsCheck(j, k, kind, bounded)
+ }
+ i = s.boundsCheck(i, j, ssa.BoundsSliceB, bounded)
+ }
+
+ // Word-sized integer operations.
+ subOp := s.ssaOp(ir.OSUB, types.Types[types.TINT])
+ mulOp := s.ssaOp(ir.OMUL, types.Types[types.TINT])
+ andOp := s.ssaOp(ir.OAND, types.Types[types.TINT])
+
+ // Calculate the length (rlen) and capacity (rcap) of the new slice.
+ // For strings the capacity of the result is unimportant. However,
+ // we use rcap to test if we've generated a zero-length slice.
+ // Use length of strings for that.
+ rlen := s.newValue2(subOp, types.Types[types.TINT], j, i)
+ rcap := rlen
+ if j != k && !t.IsString() {
+ rcap = s.newValue2(subOp, types.Types[types.TINT], k, i)
+ }
+
+ if (i.Op == ssa.OpConst64 || i.Op == ssa.OpConst32) && i.AuxInt == 0 {
+ // No pointer arithmetic necessary.
+ return ptr, rlen, rcap
+ }
+
+ // Calculate the base pointer (rptr) for the new slice.
+ //
+ // Generate the following code assuming that indexes are in bounds.
+ // The masking is to make sure that we don't generate a slice
+ // that points to the next object in memory. We cannot just set
+ // the pointer to nil because then we would create a nil slice or
+ // string.
+ //
+ // rcap = k - i
+ // rlen = j - i
+ // rptr = ptr + (mask(rcap) & (i * stride))
+ //
+ // Where mask(x) is 0 if x==0 and -1 if x>0 and stride is the width
+ // of the element type.
+ stride := s.constInt(types.Types[types.TINT], ptr.Type.Elem().Size())
+
+ // The delta is the number of bytes to offset ptr by.
+ delta := s.newValue2(mulOp, types.Types[types.TINT], i, stride)
+
+ // If we're slicing to the point where the capacity is zero,
+ // zero out the delta.
+ mask := s.newValue1(ssa.OpSlicemask, types.Types[types.TINT], rcap)
+ delta = s.newValue2(andOp, types.Types[types.TINT], delta, mask)
+
+ // Compute rptr = ptr + delta.
+ rptr := s.newValue2(ssa.OpAddPtr, ptr.Type, ptr, delta)
+
+ return rptr, rlen, rcap
+}
+
+type u642fcvtTab struct {
+ leq, cvt2F, and, rsh, or, add ssa.Op
+ one func(*state, *types.Type, int64) *ssa.Value
+}
+
+var u64_f64 = u642fcvtTab{
+ leq: ssa.OpLeq64,
+ cvt2F: ssa.OpCvt64to64F,
+ and: ssa.OpAnd64,
+ rsh: ssa.OpRsh64Ux64,
+ or: ssa.OpOr64,
+ add: ssa.OpAdd64F,
+ one: (*state).constInt64,
+}
+
+var u64_f32 = u642fcvtTab{
+ leq: ssa.OpLeq64,
+ cvt2F: ssa.OpCvt64to32F,
+ and: ssa.OpAnd64,
+ rsh: ssa.OpRsh64Ux64,
+ or: ssa.OpOr64,
+ add: ssa.OpAdd32F,
+ one: (*state).constInt64,
+}
+
+func (s *state) uint64Tofloat64(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
+ return s.uint64Tofloat(&u64_f64, n, x, ft, tt)
+}
+
+func (s *state) uint64Tofloat32(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
+ return s.uint64Tofloat(&u64_f32, n, x, ft, tt)
+}
+
+func (s *state) uint64Tofloat(cvttab *u642fcvtTab, n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
+ // if x >= 0 {
+ // result = (floatY) x
+ // } else {
+ // y = uintX(x) ; y = x & 1
+ // z = uintX(x) ; z = z >> 1
+ // z = z | y
+ // result = floatY(z)
+ // result = result + result
+ // }
+ //
+ // Code borrowed from old code generator.
+ // What's going on: large 64-bit "unsigned" looks like
+ // negative number to hardware's integer-to-float
+ // conversion. However, because the mantissa is only
+ // 63 bits, we don't need the LSB, so instead we do an
+ // unsigned right shift (divide by two), convert, and
+ // double. However, before we do that, we need to be
+ // sure that we do not lose a "1" if that made the
+ // difference in the resulting rounding. Therefore, we
+ // preserve it, and OR (not ADD) it back in. The case
+ // that matters is when the eleven discarded bits are
+ // equal to 10000000001; that rounds up, and the 1 cannot
+ // be lost else it would round down if the LSB of the
+ // candidate mantissa is 0.
+ cmp := s.newValue2(cvttab.leq, types.Types[types.TBOOL], s.zeroVal(ft), x)
+ b := s.endBlock()
+ b.Kind = ssa.BlockIf
+ b.SetControl(cmp)
+ b.Likely = ssa.BranchLikely
+
+ bThen := s.f.NewBlock(ssa.BlockPlain)
+ bElse := s.f.NewBlock(ssa.BlockPlain)
+ bAfter := s.f.NewBlock(ssa.BlockPlain)
+
+ b.AddEdgeTo(bThen)
+ s.startBlock(bThen)
+ a0 := s.newValue1(cvttab.cvt2F, tt, x)
+ s.vars[n] = a0
+ s.endBlock()
+ bThen.AddEdgeTo(bAfter)
+
+ b.AddEdgeTo(bElse)
+ s.startBlock(bElse)
+ one := cvttab.one(s, ft, 1)
+ y := s.newValue2(cvttab.and, ft, x, one)
+ z := s.newValue2(cvttab.rsh, ft, x, one)
+ z = s.newValue2(cvttab.or, ft, z, y)
+ a := s.newValue1(cvttab.cvt2F, tt, z)
+ a1 := s.newValue2(cvttab.add, tt, a, a)
+ s.vars[n] = a1
+ s.endBlock()
+ bElse.AddEdgeTo(bAfter)
+
+ s.startBlock(bAfter)
+ return s.variable(n, n.Type())
+}
+
+type u322fcvtTab struct {
+ cvtI2F, cvtF2F ssa.Op
+}
+
+var u32_f64 = u322fcvtTab{
+ cvtI2F: ssa.OpCvt32to64F,
+ cvtF2F: ssa.OpCopy,
+}
+
+var u32_f32 = u322fcvtTab{
+ cvtI2F: ssa.OpCvt32to32F,
+ cvtF2F: ssa.OpCvt64Fto32F,
+}
+
+func (s *state) uint32Tofloat64(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
+ return s.uint32Tofloat(&u32_f64, n, x, ft, tt)
+}
+
+func (s *state) uint32Tofloat32(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
+ return s.uint32Tofloat(&u32_f32, n, x, ft, tt)
+}
+
+func (s *state) uint32Tofloat(cvttab *u322fcvtTab, n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
+ // if x >= 0 {
+ // result = floatY(x)
+ // } else {
+ // result = floatY(float64(x) + (1<<32))
+ // }
+ cmp := s.newValue2(ssa.OpLeq32, types.Types[types.TBOOL], s.zeroVal(ft), x)
+ b := s.endBlock()
+ b.Kind = ssa.BlockIf
+ b.SetControl(cmp)
+ b.Likely = ssa.BranchLikely
+
+ bThen := s.f.NewBlock(ssa.BlockPlain)
+ bElse := s.f.NewBlock(ssa.BlockPlain)
+ bAfter := s.f.NewBlock(ssa.BlockPlain)
+
+ b.AddEdgeTo(bThen)
+ s.startBlock(bThen)
+ a0 := s.newValue1(cvttab.cvtI2F, tt, x)
+ s.vars[n] = a0
+ s.endBlock()
+ bThen.AddEdgeTo(bAfter)
+
+ b.AddEdgeTo(bElse)
+ s.startBlock(bElse)
+ a1 := s.newValue1(ssa.OpCvt32to64F, types.Types[types.TFLOAT64], x)
+ twoToThe32 := s.constFloat64(types.Types[types.TFLOAT64], float64(1<<32))
+ a2 := s.newValue2(ssa.OpAdd64F, types.Types[types.TFLOAT64], a1, twoToThe32)
+ a3 := s.newValue1(cvttab.cvtF2F, tt, a2)
+
+ s.vars[n] = a3
+ s.endBlock()
+ bElse.AddEdgeTo(bAfter)
+
+ s.startBlock(bAfter)
+ return s.variable(n, n.Type())
+}
+
+// referenceTypeBuiltin generates code for the len/cap builtins for maps and channels.
+func (s *state) referenceTypeBuiltin(n *ir.UnaryExpr, x *ssa.Value) *ssa.Value {
+ if !n.X.Type().IsMap() && !n.X.Type().IsChan() {
+ s.Fatalf("node must be a map or a channel")
+ }
+ // if n == nil {
+ // return 0
+ // } else {
+ // // len
+ // return *((*int)n)
+ // // cap
+ // return *(((*int)n)+1)
+ // }
+ lenType := n.Type()
+ nilValue := s.constNil(types.Types[types.TUINTPTR])
+ cmp := s.newValue2(ssa.OpEqPtr, types.Types[types.TBOOL], x, nilValue)
+ b := s.endBlock()
+ b.Kind = ssa.BlockIf
+ b.SetControl(cmp)
+ b.Likely = ssa.BranchUnlikely
+
+ bThen := s.f.NewBlock(ssa.BlockPlain)
+ bElse := s.f.NewBlock(ssa.BlockPlain)
+ bAfter := s.f.NewBlock(ssa.BlockPlain)
+
+ // length/capacity of a nil map/chan is zero
+ b.AddEdgeTo(bThen)
+ s.startBlock(bThen)
+ s.vars[n] = s.zeroVal(lenType)
+ s.endBlock()
+ bThen.AddEdgeTo(bAfter)
+
+ b.AddEdgeTo(bElse)
+ s.startBlock(bElse)
+ switch n.Op() {
+ case ir.OLEN:
+ // length is stored in the first word for map/chan
+ s.vars[n] = s.load(lenType, x)
+ case ir.OCAP:
+ // capacity is stored in the second word for chan
+ sw := s.newValue1I(ssa.OpOffPtr, lenType.PtrTo(), lenType.Size(), x)
+ s.vars[n] = s.load(lenType, sw)
+ default:
+ s.Fatalf("op must be OLEN or OCAP")
+ }
+ s.endBlock()
+ bElse.AddEdgeTo(bAfter)
+
+ s.startBlock(bAfter)
+ return s.variable(n, lenType)
+}
+
+type f2uCvtTab struct {
+ ltf, cvt2U, subf, or ssa.Op
+ floatValue func(*state, *types.Type, float64) *ssa.Value
+ intValue func(*state, *types.Type, int64) *ssa.Value
+ cutoff uint64
+}
+
+var f32_u64 = f2uCvtTab{
+ ltf: ssa.OpLess32F,
+ cvt2U: ssa.OpCvt32Fto64,
+ subf: ssa.OpSub32F,
+ or: ssa.OpOr64,
+ floatValue: (*state).constFloat32,
+ intValue: (*state).constInt64,
+ cutoff: 1 << 63,
+}
+
+var f64_u64 = f2uCvtTab{
+ ltf: ssa.OpLess64F,
+ cvt2U: ssa.OpCvt64Fto64,
+ subf: ssa.OpSub64F,
+ or: ssa.OpOr64,
+ floatValue: (*state).constFloat64,
+ intValue: (*state).constInt64,
+ cutoff: 1 << 63,
+}
+
+var f32_u32 = f2uCvtTab{
+ ltf: ssa.OpLess32F,
+ cvt2U: ssa.OpCvt32Fto32,
+ subf: ssa.OpSub32F,
+ or: ssa.OpOr32,
+ floatValue: (*state).constFloat32,
+ intValue: func(s *state, t *types.Type, v int64) *ssa.Value { return s.constInt32(t, int32(v)) },
+ cutoff: 1 << 31,
+}
+
+var f64_u32 = f2uCvtTab{
+ ltf: ssa.OpLess64F,
+ cvt2U: ssa.OpCvt64Fto32,
+ subf: ssa.OpSub64F,
+ or: ssa.OpOr32,
+ floatValue: (*state).constFloat64,
+ intValue: func(s *state, t *types.Type, v int64) *ssa.Value { return s.constInt32(t, int32(v)) },
+ cutoff: 1 << 31,
+}
+
+func (s *state) float32ToUint64(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
+ return s.floatToUint(&f32_u64, n, x, ft, tt)
+}
+func (s *state) float64ToUint64(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
+ return s.floatToUint(&f64_u64, n, x, ft, tt)
+}
+
+func (s *state) float32ToUint32(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
+ return s.floatToUint(&f32_u32, n, x, ft, tt)
+}
+
+func (s *state) float64ToUint32(n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
+ return s.floatToUint(&f64_u32, n, x, ft, tt)
+}
+
+func (s *state) floatToUint(cvttab *f2uCvtTab, n ir.Node, x *ssa.Value, ft, tt *types.Type) *ssa.Value {
+ // cutoff:=1<<(intY_Size-1)
+ // if x < floatX(cutoff) {
+ // result = uintY(x)
+ // } else {
+ // y = x - floatX(cutoff)
+ // z = uintY(y)
+ // result = z | -(cutoff)
+ // }
+ cutoff := cvttab.floatValue(s, ft, float64(cvttab.cutoff))
+ cmp := s.newValue2(cvttab.ltf, types.Types[types.TBOOL], x, cutoff)
+ b := s.endBlock()
+ b.Kind = ssa.BlockIf
+ b.SetControl(cmp)
+ b.Likely = ssa.BranchLikely
+
+ bThen := s.f.NewBlock(ssa.BlockPlain)
+ bElse := s.f.NewBlock(ssa.BlockPlain)
+ bAfter := s.f.NewBlock(ssa.BlockPlain)
+
+ b.AddEdgeTo(bThen)
+ s.startBlock(bThen)
+ a0 := s.newValue1(cvttab.cvt2U, tt, x)
+ s.vars[n] = a0
+ s.endBlock()
+ bThen.AddEdgeTo(bAfter)
+
+ b.AddEdgeTo(bElse)
+ s.startBlock(bElse)
+ y := s.newValue2(cvttab.subf, ft, x, cutoff)
+ y = s.newValue1(cvttab.cvt2U, tt, y)
+ z := cvttab.intValue(s, tt, int64(-cvttab.cutoff))
+ a1 := s.newValue2(cvttab.or, tt, y, z)
+ s.vars[n] = a1
+ s.endBlock()
+ bElse.AddEdgeTo(bAfter)
+
+ s.startBlock(bAfter)
+ return s.variable(n, n.Type())
+}
+
+// dottype generates SSA for a type assertion node.
+// commaok indicates whether to panic or return a bool.
+// If commaok is false, resok will be nil.
+func (s *state) dottype(n *ir.TypeAssertExpr, commaok bool) (res, resok *ssa.Value) {
+ iface := s.expr(n.X) // input interface
+ target := s.reflectType(n.Type()) // target type
+ var targetItab *ssa.Value
+ if n.Itab != nil {
+ targetItab = s.expr(n.Itab)
+ }
+ return s.dottype1(n.Pos(), n.X.Type(), n.Type(), iface, target, targetItab, commaok)
+}
+
+func (s *state) dynamicDottype(n *ir.DynamicTypeAssertExpr, commaok bool) (res, resok *ssa.Value) {
+ iface := s.expr(n.X)
+ target := s.expr(n.T)
+ var itab *ssa.Value
+ if !n.X.Type().IsEmptyInterface() && !n.Type().IsInterface() {
+ byteptr := s.f.Config.Types.BytePtr
+ itab = target
+ target = s.load(byteptr, s.newValue1I(ssa.OpOffPtr, byteptr, int64(types.PtrSize), itab)) // itab.typ
+ }
+ return s.dottype1(n.Pos(), n.X.Type(), n.Type(), iface, target, itab, commaok)
+}
+
+// dottype1 implements a x.(T) operation. iface is the argument (x), dst is the type we're asserting to (T)
+// and src is the type we're asserting from.
+// target is the *runtime._type of dst.
+// If src is a nonempty interface and dst is not an interface, targetItab is an itab representing (dst, src). Otherwise it is nil.
+// commaok is true if the caller wants a boolean success value. Otherwise, the generated code panics if the conversion fails.
+func (s *state) dottype1(pos src.XPos, src, dst *types.Type, iface, target, targetItab *ssa.Value, commaok bool) (res, resok *ssa.Value) {
+ byteptr := s.f.Config.Types.BytePtr
+ if dst.IsInterface() {
+ if dst.IsEmptyInterface() {
+ // Converting to an empty interface.
+ // Input could be an empty or nonempty interface.
+ if base.Debug.TypeAssert > 0 {
+ base.WarnfAt(pos, "type assertion inlined")
+ }
+
+ // Get itab/type field from input.
+ itab := s.newValue1(ssa.OpITab, byteptr, iface)
+ // Conversion succeeds iff that field is not nil.
+ cond := s.newValue2(ssa.OpNeqPtr, types.Types[types.TBOOL], itab, s.constNil(byteptr))
+
+ if src.IsEmptyInterface() && commaok {
+ // Converting empty interface to empty interface with ,ok is just a nil check.
+ return iface, cond
+ }
+
+ // Branch on nilness.
+ b := s.endBlock()
+ b.Kind = ssa.BlockIf
+ b.SetControl(cond)
+ b.Likely = ssa.BranchLikely
+ bOk := s.f.NewBlock(ssa.BlockPlain)
+ bFail := s.f.NewBlock(ssa.BlockPlain)
+ b.AddEdgeTo(bOk)
+ b.AddEdgeTo(bFail)
+
+ if !commaok {
+ // On failure, panic by calling panicnildottype.
+ s.startBlock(bFail)
+ s.rtcall(ir.Syms.Panicnildottype, false, nil, target)
+
+ // On success, return (perhaps modified) input interface.
+ s.startBlock(bOk)
+ if src.IsEmptyInterface() {
+ res = iface // Use input interface unchanged.
+ return
+ }
+ // Load type out of itab, build interface with existing idata.
+ off := s.newValue1I(ssa.OpOffPtr, byteptr, int64(types.PtrSize), itab)
+ typ := s.load(byteptr, off)
+ idata := s.newValue1(ssa.OpIData, byteptr, iface)
+ res = s.newValue2(ssa.OpIMake, dst, typ, idata)
+ return
+ }
+
+ s.startBlock(bOk)
+ // nonempty -> empty
+ // Need to load type from itab
+ off := s.newValue1I(ssa.OpOffPtr, byteptr, int64(types.PtrSize), itab)
+ s.vars[typVar] = s.load(byteptr, off)
+ s.endBlock()
+
+ // itab is nil, might as well use that as the nil result.
+ s.startBlock(bFail)
+ s.vars[typVar] = itab
+ s.endBlock()
+
+ // Merge point.
+ bEnd := s.f.NewBlock(ssa.BlockPlain)
+ bOk.AddEdgeTo(bEnd)
+ bFail.AddEdgeTo(bEnd)
+ s.startBlock(bEnd)
+ idata := s.newValue1(ssa.OpIData, byteptr, iface)
+ res = s.newValue2(ssa.OpIMake, dst, s.variable(typVar, byteptr), idata)
+ resok = cond
+ delete(s.vars, typVar)
+ return
+ }
+ // converting to a nonempty interface needs a runtime call.
+ if base.Debug.TypeAssert > 0 {
+ base.WarnfAt(pos, "type assertion not inlined")
+ }
+ if !commaok {
+ fn := ir.Syms.AssertI2I
+ if src.IsEmptyInterface() {
+ fn = ir.Syms.AssertE2I
+ }
+ data := s.newValue1(ssa.OpIData, types.Types[types.TUNSAFEPTR], iface)
+ tab := s.newValue1(ssa.OpITab, byteptr, iface)
+ tab = s.rtcall(fn, true, []*types.Type{byteptr}, target, tab)[0]
+ return s.newValue2(ssa.OpIMake, dst, tab, data), nil
+ }
+ fn := ir.Syms.AssertI2I2
+ if src.IsEmptyInterface() {
+ fn = ir.Syms.AssertE2I2
+ }
+ res = s.rtcall(fn, true, []*types.Type{dst}, target, iface)[0]
+ resok = s.newValue2(ssa.OpNeqInter, types.Types[types.TBOOL], res, s.constInterface(dst))
+ return
+ }
+
+ if base.Debug.TypeAssert > 0 {
+ base.WarnfAt(pos, "type assertion inlined")
+ }
+
+ // Converting to a concrete type.
+ direct := types.IsDirectIface(dst)
+ itab := s.newValue1(ssa.OpITab, byteptr, iface) // type word of interface
+ if base.Debug.TypeAssert > 0 {
+ base.WarnfAt(pos, "type assertion inlined")
+ }
+ var wantedFirstWord *ssa.Value
+ if src.IsEmptyInterface() {
+ // Looking for pointer to target type.
+ wantedFirstWord = target
+ } else {
+ // Looking for pointer to itab for target type and source interface.
+ wantedFirstWord = targetItab
+ }
+
+ var tmp ir.Node // temporary for use with large types
+ var addr *ssa.Value // address of tmp
+ if commaok && !TypeOK(dst) {
+ // unSSAable type, use temporary.
+ // TODO: get rid of some of these temporaries.
+ tmp, addr = s.temp(pos, dst)
+ }
+
+ cond := s.newValue2(ssa.OpEqPtr, types.Types[types.TBOOL], itab, wantedFirstWord)
+ b := s.endBlock()
+ b.Kind = ssa.BlockIf
+ b.SetControl(cond)
+ b.Likely = ssa.BranchLikely
+
+ bOk := s.f.NewBlock(ssa.BlockPlain)
+ bFail := s.f.NewBlock(ssa.BlockPlain)
+ b.AddEdgeTo(bOk)
+ b.AddEdgeTo(bFail)
+
+ if !commaok {
+ // on failure, panic by calling panicdottype
+ s.startBlock(bFail)
+ taddr := s.reflectType(src)
+ if src.IsEmptyInterface() {
+ s.rtcall(ir.Syms.PanicdottypeE, false, nil, itab, target, taddr)
+ } else {
+ s.rtcall(ir.Syms.PanicdottypeI, false, nil, itab, target, taddr)
+ }
+
+ // on success, return data from interface
+ s.startBlock(bOk)
+ if direct {
+ return s.newValue1(ssa.OpIData, dst, iface), nil
+ }
+ p := s.newValue1(ssa.OpIData, types.NewPtr(dst), iface)
+ return s.load(dst, p), nil
+ }
+
+ // commaok is the more complicated case because we have
+ // a control flow merge point.
+ bEnd := s.f.NewBlock(ssa.BlockPlain)
+ // Note that we need a new valVar each time (unlike okVar where we can
+ // reuse the variable) because it might have a different type every time.
+ valVar := ssaMarker("val")
+
+ // type assertion succeeded
+ s.startBlock(bOk)
+ if tmp == nil {
+ if direct {
+ s.vars[valVar] = s.newValue1(ssa.OpIData, dst, iface)
+ } else {
+ p := s.newValue1(ssa.OpIData, types.NewPtr(dst), iface)
+ s.vars[valVar] = s.load(dst, p)
+ }
+ } else {
+ p := s.newValue1(ssa.OpIData, types.NewPtr(dst), iface)
+ s.move(dst, addr, p)
+ }
+ s.vars[okVar] = s.constBool(true)
+ s.endBlock()
+ bOk.AddEdgeTo(bEnd)
+
+ // type assertion failed
+ s.startBlock(bFail)
+ if tmp == nil {
+ s.vars[valVar] = s.zeroVal(dst)
+ } else {
+ s.zero(dst, addr)
+ }
+ s.vars[okVar] = s.constBool(false)
+ s.endBlock()
+ bFail.AddEdgeTo(bEnd)
+
+ // merge point
+ s.startBlock(bEnd)
+ if tmp == nil {
+ res = s.variable(valVar, dst)
+ delete(s.vars, valVar)
+ } else {
+ res = s.load(dst, addr)
+ s.vars[memVar] = s.newValue1A(ssa.OpVarKill, types.TypeMem, tmp.(*ir.Name), s.mem())
+ }
+ resok = s.variable(okVar, types.Types[types.TBOOL])
+ delete(s.vars, okVar)
+ return res, resok
+}
+
+// temp allocates a temp of type t at position pos
+func (s *state) temp(pos src.XPos, t *types.Type) (*ir.Name, *ssa.Value) {
+ tmp := typecheck.TempAt(pos, s.curfn, t)
+ s.vars[memVar] = s.newValue1A(ssa.OpVarDef, types.TypeMem, tmp, s.mem())
+ addr := s.addr(tmp)
+ return tmp, addr
+}
+
+// variable returns the value of a variable at the current location.
+func (s *state) variable(n ir.Node, t *types.Type) *ssa.Value {
+ v := s.vars[n]
+ if v != nil {
+ return v
+ }
+ v = s.fwdVars[n]
+ if v != nil {
+ return v
+ }
+
+ if s.curBlock == s.f.Entry {
+ // No variable should be live at entry.
+ s.Fatalf("Value live at entry. It shouldn't be. func %s, node %v, value %v", s.f.Name, n, v)
+ }
+ // Make a FwdRef, which records a value that's live on block input.
+ // We'll find the matching definition as part of insertPhis.
+ v = s.newValue0A(ssa.OpFwdRef, t, fwdRefAux{N: n})
+ s.fwdVars[n] = v
+ if n.Op() == ir.ONAME {
+ s.addNamedValue(n.(*ir.Name), v)
+ }
+ return v
+}
+
+func (s *state) mem() *ssa.Value {
+ return s.variable(memVar, types.TypeMem)
+}
+
+func (s *state) addNamedValue(n *ir.Name, v *ssa.Value) {
+ if n.Class == ir.Pxxx {
+ // Don't track our marker nodes (memVar etc.).
+ return
+ }
+ if ir.IsAutoTmp(n) {
+ // Don't track temporary variables.
+ return
+ }
+ if n.Class == ir.PPARAMOUT {
+ // Don't track named output values. This prevents return values
+ // from being assigned too early. See #14591 and #14762. TODO: allow this.
+ return
+ }
+ loc := ssa.LocalSlot{N: n, Type: n.Type(), Off: 0}
+ values, ok := s.f.NamedValues[loc]
+ if !ok {
+ s.f.Names = append(s.f.Names, &loc)
+ s.f.CanonicalLocalSlots[loc] = &loc
+ }
+ s.f.NamedValues[loc] = append(values, v)
+}
+
+// Branch is an unresolved branch.
+type Branch struct {
+ P *obj.Prog // branch instruction
+ B *ssa.Block // target
+}
+
+// State contains state needed during Prog generation.
+type State struct {
+ ABI obj.ABI
+
+ pp *objw.Progs
+
+ // Branches remembers all the branch instructions we've seen
+ // and where they would like to go.
+ Branches []Branch
+
+ // bstart remembers where each block starts (indexed by block ID)
+ bstart []*obj.Prog
+
+ maxarg int64 // largest frame size for arguments to calls made by the function
+
+ // Map from GC safe points to liveness index, generated by
+ // liveness analysis.
+ livenessMap liveness.Map
+
+ // partLiveArgs includes arguments that may be partially live, for which we
+ // need to generate instructions that spill the argument registers.
+ partLiveArgs map[*ir.Name]bool
+
+ // lineRunStart records the beginning of the current run of instructions
+ // within a single block sharing the same line number
+ // Used to move statement marks to the beginning of such runs.
+ lineRunStart *obj.Prog
+
+ // wasm: The number of values on the WebAssembly stack. This is only used as a safeguard.
+ OnWasmStackSkipped int
+}
+
+func (s *State) FuncInfo() *obj.FuncInfo {
+ return s.pp.CurFunc.LSym.Func()
+}
+
+// Prog appends a new Prog.
+func (s *State) Prog(as obj.As) *obj.Prog {
+ p := s.pp.Prog(as)
+ if objw.LosesStmtMark(as) {
+ return p
+ }
+ // Float a statement start to the beginning of any same-line run.
+ // lineRunStart is reset at block boundaries, which appears to work well.
+ if s.lineRunStart == nil || s.lineRunStart.Pos.Line() != p.Pos.Line() {
+ s.lineRunStart = p
+ } else if p.Pos.IsStmt() == src.PosIsStmt {
+ s.lineRunStart.Pos = s.lineRunStart.Pos.WithIsStmt()
+ p.Pos = p.Pos.WithNotStmt()
+ }
+ return p
+}
+
+// Pc returns the current Prog.
+func (s *State) Pc() *obj.Prog {
+ return s.pp.Next
+}
+
+// SetPos sets the current source position.
+func (s *State) SetPos(pos src.XPos) {
+ s.pp.Pos = pos
+}
+
+// Br emits a single branch instruction and returns the instruction.
+// Not all architectures need the returned instruction, but otherwise
+// the boilerplate is common to all.
+func (s *State) Br(op obj.As, target *ssa.Block) *obj.Prog {
+ p := s.Prog(op)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, Branch{P: p, B: target})
+ return p
+}
+
+// DebugFriendlySetPosFrom adjusts Pos.IsStmt subject to heuristics
+// that reduce "jumpy" line number churn when debugging.
+// Spill/fill/copy instructions from the register allocator,
+// phi functions, and instructions with a no-pos position
+// are examples of instructions that can cause churn.
+func (s *State) DebugFriendlySetPosFrom(v *ssa.Value) {
+ switch v.Op {
+ case ssa.OpPhi, ssa.OpCopy, ssa.OpLoadReg, ssa.OpStoreReg:
+ // These are not statements
+ s.SetPos(v.Pos.WithNotStmt())
+ default:
+ p := v.Pos
+ if p != src.NoXPos {
+ // If the position is defined, update the position.
+ // Also convert default IsStmt to NotStmt; only
+ // explicit statement boundaries should appear
+ // in the generated code.
+ if p.IsStmt() != src.PosIsStmt {
+ if s.pp.Pos.IsStmt() == src.PosIsStmt && s.pp.Pos.SameFileAndLine(p) {
+ // If s.pp.Pos already has a statement mark, then it was set here (below) for
+ // the previous value. If an actual instruction had been emitted for that
+ // value, then the statement mark would have been reset. Since the statement
+ // mark of s.pp.Pos was not reset, this position (file/line) still needs a
+ // statement mark on an instruction. If file and line for this value are
+ // the same as the previous value, then the first instruction for this
+ // value will work to take the statement mark. Return early to avoid
+ // resetting the statement mark.
+ //
+ // The reset of s.pp.Pos occurs in (*Progs).Prog() -- if it emits
+ // an instruction, and the instruction's statement mark was set,
+ // and it is not one of the LosesStmtMark instructions,
+ // then Prog() resets the statement mark on the (*Progs).Pos.
+ return
+ }
+ p = p.WithNotStmt()
+ // Calls use the pos attached to v, but copy the statement mark from State
+ }
+ s.SetPos(p)
+ } else {
+ s.SetPos(s.pp.Pos.WithNotStmt())
+ }
+ }
+}
+
+// emit argument info (locations on stack) for traceback.
+func emitArgInfo(e *ssafn, f *ssa.Func, pp *objw.Progs) {
+ ft := e.curfn.Type()
+ if ft.NumRecvs() == 0 && ft.NumParams() == 0 {
+ return
+ }
+
+ x := EmitArgInfo(e.curfn, f.OwnAux.ABIInfo())
+ x.Set(obj.AttrContentAddressable, true)
+ e.curfn.LSym.Func().ArgInfo = x
+
+ // Emit a funcdata pointing at the arg info data.
+ p := pp.Prog(obj.AFUNCDATA)
+ p.From.SetConst(objabi.FUNCDATA_ArgInfo)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = x
+}
+
+// emit argument info (locations on stack) of f for traceback.
+func EmitArgInfo(f *ir.Func, abiInfo *abi.ABIParamResultInfo) *obj.LSym {
+ x := base.Ctxt.Lookup(fmt.Sprintf("%s.arginfo%d", f.LSym.Name, f.ABI))
+ // NOTE: do not set ContentAddressable here. This may be referenced from
+ // assembly code by name (in this case f is a declaration).
+ // Instead, set it in emitArgInfo above.
+
+ PtrSize := int64(types.PtrSize)
+ uintptrTyp := types.Types[types.TUINTPTR]
+
+ isAggregate := func(t *types.Type) bool {
+ return t.IsStruct() || t.IsArray() || t.IsComplex() || t.IsInterface() || t.IsString() || t.IsSlice()
+ }
+
+ // Populate the data.
+ // The data is a stream of bytes, which contains the offsets and sizes of the
+ // non-aggregate arguments or non-aggregate fields/elements of aggregate-typed
+ // arguments, along with special "operators". Specifically,
+ // - for each non-aggrgate arg/field/element, its offset from FP (1 byte) and
+ // size (1 byte)
+ // - special operators:
+ // - 0xff - end of sequence
+ // - 0xfe - print { (at the start of an aggregate-typed argument)
+ // - 0xfd - print } (at the end of an aggregate-typed argument)
+ // - 0xfc - print ... (more args/fields/elements)
+ // - 0xfb - print _ (offset too large)
+ // These constants need to be in sync with runtime.traceback.go:printArgs.
+ const (
+ _endSeq = 0xff
+ _startAgg = 0xfe
+ _endAgg = 0xfd
+ _dotdotdot = 0xfc
+ _offsetTooLarge = 0xfb
+ _special = 0xf0 // above this are operators, below this are ordinary offsets
+ )
+
+ const (
+ limit = 10 // print no more than 10 args/components
+ maxDepth = 5 // no more than 5 layers of nesting
+
+ // maxLen is a (conservative) upper bound of the byte stream length. For
+ // each arg/component, it has no more than 2 bytes of data (size, offset),
+ // and no more than one {, }, ... at each level (it cannot have both the
+ // data and ... unless it is the last one, just be conservative). Plus 1
+ // for _endSeq.
+ maxLen = (maxDepth*3+2)*limit + 1
+ )
+
+ wOff := 0
+ n := 0
+ writebyte := func(o uint8) { wOff = objw.Uint8(x, wOff, o) }
+
+ // Write one non-aggrgate arg/field/element.
+ write1 := func(sz, offset int64) {
+ if offset >= _special {
+ writebyte(_offsetTooLarge)
+ } else {
+ writebyte(uint8(offset))
+ writebyte(uint8(sz))
+ }
+ n++
+ }
+
+ // Visit t recursively and write it out.
+ // Returns whether to continue visiting.
+ var visitType func(baseOffset int64, t *types.Type, depth int) bool
+ visitType = func(baseOffset int64, t *types.Type, depth int) bool {
+ if n >= limit {
+ writebyte(_dotdotdot)
+ return false
+ }
+ if !isAggregate(t) {
+ write1(t.Size(), baseOffset)
+ return true
+ }
+ writebyte(_startAgg)
+ depth++
+ if depth >= maxDepth {
+ writebyte(_dotdotdot)
+ writebyte(_endAgg)
+ n++
+ return true
+ }
+ switch {
+ case t.IsInterface(), t.IsString():
+ _ = visitType(baseOffset, uintptrTyp, depth) &&
+ visitType(baseOffset+PtrSize, uintptrTyp, depth)
+ case t.IsSlice():
+ _ = visitType(baseOffset, uintptrTyp, depth) &&
+ visitType(baseOffset+PtrSize, uintptrTyp, depth) &&
+ visitType(baseOffset+PtrSize*2, uintptrTyp, depth)
+ case t.IsComplex():
+ _ = visitType(baseOffset, types.FloatForComplex(t), depth) &&
+ visitType(baseOffset+t.Size()/2, types.FloatForComplex(t), depth)
+ case t.IsArray():
+ if t.NumElem() == 0 {
+ n++ // {} counts as a component
+ break
+ }
+ for i := int64(0); i < t.NumElem(); i++ {
+ if !visitType(baseOffset, t.Elem(), depth) {
+ break
+ }
+ baseOffset += t.Elem().Size()
+ }
+ case t.IsStruct():
+ if t.NumFields() == 0 {
+ n++ // {} counts as a component
+ break
+ }
+ for _, field := range t.Fields().Slice() {
+ if !visitType(baseOffset+field.Offset, field.Type, depth) {
+ break
+ }
+ }
+ }
+ writebyte(_endAgg)
+ return true
+ }
+
+ start := 0
+ if strings.Contains(f.LSym.Name, "[") {
+ // Skip the dictionary argument - it is implicit and the user doesn't need to see it.
+ start = 1
+ }
+
+ for _, a := range abiInfo.InParams()[start:] {
+ if !visitType(a.FrameOffset(abiInfo), a.Type, 0) {
+ break
+ }
+ }
+ writebyte(_endSeq)
+ if wOff > maxLen {
+ base.Fatalf("ArgInfo too large")
+ }
+
+ return x
+}
+
+// for wrapper, emit info of wrapped function.
+func emitWrappedFuncInfo(e *ssafn, pp *objw.Progs) {
+ if base.Ctxt.Flag_linkshared {
+ // Relative reference (SymPtrOff) to another shared object doesn't work.
+ // Unfortunate.
+ return
+ }
+
+ wfn := e.curfn.WrappedFunc
+ if wfn == nil {
+ return
+ }
+
+ wsym := wfn.Linksym()
+ x := base.Ctxt.LookupInit(fmt.Sprintf("%s.wrapinfo", wsym.Name), func(x *obj.LSym) {
+ objw.SymPtrOff(x, 0, wsym)
+ x.Set(obj.AttrContentAddressable, true)
+ })
+ e.curfn.LSym.Func().WrapInfo = x
+
+ // Emit a funcdata pointing at the wrap info data.
+ p := pp.Prog(obj.AFUNCDATA)
+ p.From.SetConst(objabi.FUNCDATA_WrapInfo)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = x
+}
+
+// genssa appends entries to pp for each instruction in f.
+func genssa(f *ssa.Func, pp *objw.Progs) {
+ var s State
+ s.ABI = f.OwnAux.Fn.ABI()
+
+ e := f.Frontend().(*ssafn)
+
+ s.livenessMap, s.partLiveArgs = liveness.Compute(e.curfn, f, e.stkptrsize, pp)
+ emitArgInfo(e, f, pp)
+ argLiveBlockMap, argLiveValueMap := liveness.ArgLiveness(e.curfn, f, pp)
+
+ openDeferInfo := e.curfn.LSym.Func().OpenCodedDeferInfo
+ if openDeferInfo != nil {
+ // This function uses open-coded defers -- write out the funcdata
+ // info that we computed at the end of genssa.
+ p := pp.Prog(obj.AFUNCDATA)
+ p.From.SetConst(objabi.FUNCDATA_OpenCodedDeferInfo)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = openDeferInfo
+ }
+
+ emitWrappedFuncInfo(e, pp)
+
+ // Remember where each block starts.
+ s.bstart = make([]*obj.Prog, f.NumBlocks())
+ s.pp = pp
+ var progToValue map[*obj.Prog]*ssa.Value
+ var progToBlock map[*obj.Prog]*ssa.Block
+ var valueToProgAfter []*obj.Prog // The first Prog following computation of a value v; v is visible at this point.
+ gatherPrintInfo := f.PrintOrHtmlSSA || ssa.GenssaDump[f.Name]
+ if gatherPrintInfo {
+ progToValue = make(map[*obj.Prog]*ssa.Value, f.NumValues())
+ progToBlock = make(map[*obj.Prog]*ssa.Block, f.NumBlocks())
+ f.Logf("genssa %s\n", f.Name)
+ progToBlock[s.pp.Next] = f.Blocks[0]
+ }
+
+ if base.Ctxt.Flag_locationlists {
+ if cap(f.Cache.ValueToProgAfter) < f.NumValues() {
+ f.Cache.ValueToProgAfter = make([]*obj.Prog, f.NumValues())
+ }
+ valueToProgAfter = f.Cache.ValueToProgAfter[:f.NumValues()]
+ for i := range valueToProgAfter {
+ valueToProgAfter[i] = nil
+ }
+ }
+
+ // If the very first instruction is not tagged as a statement,
+ // debuggers may attribute it to previous function in program.
+ firstPos := src.NoXPos
+ for _, v := range f.Entry.Values {
+ if v.Pos.IsStmt() == src.PosIsStmt {
+ firstPos = v.Pos
+ v.Pos = firstPos.WithDefaultStmt()
+ break
+ }
+ }
+
+ // inlMarks has an entry for each Prog that implements an inline mark.
+ // It maps from that Prog to the global inlining id of the inlined body
+ // which should unwind to this Prog's location.
+ var inlMarks map[*obj.Prog]int32
+ var inlMarkList []*obj.Prog
+
+ // inlMarksByPos maps from a (column 1) source position to the set of
+ // Progs that are in the set above and have that source position.
+ var inlMarksByPos map[src.XPos][]*obj.Prog
+
+ var argLiveIdx int = -1 // argument liveness info index
+
+ // Emit basic blocks
+ for i, b := range f.Blocks {
+ s.bstart[b.ID] = s.pp.Next
+ s.lineRunStart = nil
+ s.SetPos(s.pp.Pos.WithNotStmt()) // It needs a non-empty Pos, but cannot be a statement boundary (yet).
+
+ // Attach a "default" liveness info. Normally this will be
+ // overwritten in the Values loop below for each Value. But
+ // for an empty block this will be used for its control
+ // instruction. We won't use the actual liveness map on a
+ // control instruction. Just mark it something that is
+ // preemptible, unless this function is "all unsafe".
+ s.pp.NextLive = objw.LivenessIndex{StackMapIndex: -1, IsUnsafePoint: liveness.IsUnsafe(f)}
+
+ if idx, ok := argLiveBlockMap[b.ID]; ok && idx != argLiveIdx {
+ argLiveIdx = idx
+ p := s.pp.Prog(obj.APCDATA)
+ p.From.SetConst(objabi.PCDATA_ArgLiveIndex)
+ p.To.SetConst(int64(idx))
+ }
+
+ // Emit values in block
+ Arch.SSAMarkMoves(&s, b)
+ for _, v := range b.Values {
+ x := s.pp.Next
+ s.DebugFriendlySetPosFrom(v)
+
+ if v.Op.ResultInArg0() && v.ResultReg() != v.Args[0].Reg() {
+ v.Fatalf("input[0] and output not in same register %s", v.LongString())
+ }
+
+ switch v.Op {
+ case ssa.OpInitMem:
+ // memory arg needs no code
+ case ssa.OpArg:
+ // input args need no code
+ case ssa.OpSP, ssa.OpSB:
+ // nothing to do
+ case ssa.OpSelect0, ssa.OpSelect1, ssa.OpSelectN, ssa.OpMakeResult:
+ // nothing to do
+ case ssa.OpGetG:
+ // nothing to do when there's a g register,
+ // and checkLower complains if there's not
+ case ssa.OpVarDef, ssa.OpVarLive, ssa.OpKeepAlive, ssa.OpVarKill:
+ // nothing to do; already used by liveness
+ case ssa.OpPhi:
+ CheckLoweredPhi(v)
+ case ssa.OpConvert:
+ // nothing to do; no-op conversion for liveness
+ if v.Args[0].Reg() != v.Reg() {
+ v.Fatalf("OpConvert should be a no-op: %s; %s", v.Args[0].LongString(), v.LongString())
+ }
+ case ssa.OpInlMark:
+ p := Arch.Ginsnop(s.pp)
+ if inlMarks == nil {
+ inlMarks = map[*obj.Prog]int32{}
+ inlMarksByPos = map[src.XPos][]*obj.Prog{}
+ }
+ inlMarks[p] = v.AuxInt32()
+ inlMarkList = append(inlMarkList, p)
+ pos := v.Pos.AtColumn1()
+ inlMarksByPos[pos] = append(inlMarksByPos[pos], p)
+
+ default:
+ // Special case for first line in function; move it to the start (which cannot be a register-valued instruction)
+ if firstPos != src.NoXPos && v.Op != ssa.OpArgIntReg && v.Op != ssa.OpArgFloatReg && v.Op != ssa.OpLoadReg && v.Op != ssa.OpStoreReg {
+ s.SetPos(firstPos)
+ firstPos = src.NoXPos
+ }
+ // Attach this safe point to the next
+ // instruction.
+ s.pp.NextLive = s.livenessMap.Get(v)
+
+ // let the backend handle it
+ Arch.SSAGenValue(&s, v)
+ }
+
+ if idx, ok := argLiveValueMap[v.ID]; ok && idx != argLiveIdx {
+ argLiveIdx = idx
+ p := s.pp.Prog(obj.APCDATA)
+ p.From.SetConst(objabi.PCDATA_ArgLiveIndex)
+ p.To.SetConst(int64(idx))
+ }
+
+ if base.Ctxt.Flag_locationlists {
+ valueToProgAfter[v.ID] = s.pp.Next
+ }
+
+ if gatherPrintInfo {
+ for ; x != s.pp.Next; x = x.Link {
+ progToValue[x] = v
+ }
+ }
+ }
+ // If this is an empty infinite loop, stick a hardware NOP in there so that debuggers are less confused.
+ if s.bstart[b.ID] == s.pp.Next && len(b.Succs) == 1 && b.Succs[0].Block() == b {
+ p := Arch.Ginsnop(s.pp)
+ p.Pos = p.Pos.WithIsStmt()
+ if b.Pos == src.NoXPos {
+ b.Pos = p.Pos // It needs a file, otherwise a no-file non-zero line causes confusion. See #35652.
+ if b.Pos == src.NoXPos {
+ b.Pos = pp.Text.Pos // Sometimes p.Pos is empty. See #35695.
+ }
+ }
+ b.Pos = b.Pos.WithBogusLine() // Debuggers are not good about infinite loops, force a change in line number
+ }
+ // Emit control flow instructions for block
+ var next *ssa.Block
+ if i < len(f.Blocks)-1 && base.Flag.N == 0 {
+ // If -N, leave next==nil so every block with successors
+ // ends in a JMP (except call blocks - plive doesn't like
+ // select{send,recv} followed by a JMP call). Helps keep
+ // line numbers for otherwise empty blocks.
+ next = f.Blocks[i+1]
+ }
+ x := s.pp.Next
+ s.SetPos(b.Pos)
+ Arch.SSAGenBlock(&s, b, next)
+ if gatherPrintInfo {
+ for ; x != s.pp.Next; x = x.Link {
+ progToBlock[x] = b
+ }
+ }
+ }
+ if f.Blocks[len(f.Blocks)-1].Kind == ssa.BlockExit {
+ // We need the return address of a panic call to
+ // still be inside the function in question. So if
+ // it ends in a call which doesn't return, add a
+ // nop (which will never execute) after the call.
+ Arch.Ginsnop(pp)
+ }
+ if openDeferInfo != nil {
+ // When doing open-coded defers, generate a disconnected call to
+ // deferreturn and a return. This will be used to during panic
+ // recovery to unwind the stack and return back to the runtime.
+ s.pp.NextLive = s.livenessMap.DeferReturn
+ p := pp.Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = ir.Syms.Deferreturn
+
+ // Load results into registers. So when a deferred function
+ // recovers a panic, it will return to caller with right results.
+ // The results are already in memory, because they are not SSA'd
+ // when the function has defers (see canSSAName).
+ for _, o := range f.OwnAux.ABIInfo().OutParams() {
+ n := o.Name.(*ir.Name)
+ rts, offs := o.RegisterTypesAndOffsets()
+ for i := range o.Registers {
+ Arch.LoadRegResult(&s, f, rts[i], ssa.ObjRegForAbiReg(o.Registers[i], f.Config), n, offs[i])
+ }
+ }
+
+ pp.Prog(obj.ARET)
+ }
+
+ if inlMarks != nil {
+ // We have some inline marks. Try to find other instructions we're
+ // going to emit anyway, and use those instructions instead of the
+ // inline marks.
+ for p := pp.Text; p != nil; p = p.Link {
+ if p.As == obj.ANOP || p.As == obj.AFUNCDATA || p.As == obj.APCDATA || p.As == obj.ATEXT || p.As == obj.APCALIGN || Arch.LinkArch.Family == sys.Wasm {
+ // Don't use 0-sized instructions as inline marks, because we need
+ // to identify inline mark instructions by pc offset.
+ // (Some of these instructions are sometimes zero-sized, sometimes not.
+ // We must not use anything that even might be zero-sized.)
+ // TODO: are there others?
+ continue
+ }
+ if _, ok := inlMarks[p]; ok {
+ // Don't use inline marks themselves. We don't know
+ // whether they will be zero-sized or not yet.
+ continue
+ }
+ pos := p.Pos.AtColumn1()
+ s := inlMarksByPos[pos]
+ if len(s) == 0 {
+ continue
+ }
+ for _, m := range s {
+ // We found an instruction with the same source position as
+ // some of the inline marks.
+ // Use this instruction instead.
+ p.Pos = p.Pos.WithIsStmt() // promote position to a statement
+ pp.CurFunc.LSym.Func().AddInlMark(p, inlMarks[m])
+ // Make the inline mark a real nop, so it doesn't generate any code.
+ m.As = obj.ANOP
+ m.Pos = src.NoXPos
+ m.From = obj.Addr{}
+ m.To = obj.Addr{}
+ }
+ delete(inlMarksByPos, pos)
+ }
+ // Any unmatched inline marks now need to be added to the inlining tree (and will generate a nop instruction).
+ for _, p := range inlMarkList {
+ if p.As != obj.ANOP {
+ pp.CurFunc.LSym.Func().AddInlMark(p, inlMarks[p])
+ }
+ }
+ }
+
+ if base.Ctxt.Flag_locationlists {
+ var debugInfo *ssa.FuncDebug
+ debugInfo = e.curfn.DebugInfo.(*ssa.FuncDebug)
+ if e.curfn.ABI == obj.ABIInternal && base.Flag.N != 0 {
+ ssa.BuildFuncDebugNoOptimized(base.Ctxt, f, base.Debug.LocationLists > 1, StackOffset, debugInfo)
+ } else {
+ ssa.BuildFuncDebug(base.Ctxt, f, base.Debug.LocationLists > 1, StackOffset, debugInfo)
+ }
+ bstart := s.bstart
+ idToIdx := make([]int, f.NumBlocks())
+ for i, b := range f.Blocks {
+ idToIdx[b.ID] = i
+ }
+ // Note that at this moment, Prog.Pc is a sequence number; it's
+ // not a real PC until after assembly, so this mapping has to
+ // be done later.
+ debugInfo.GetPC = func(b, v ssa.ID) int64 {
+ switch v {
+ case ssa.BlockStart.ID:
+ if b == f.Entry.ID {
+ return 0 // Start at the very beginning, at the assembler-generated prologue.
+ // this should only happen for function args (ssa.OpArg)
+ }
+ return bstart[b].Pc
+ case ssa.BlockEnd.ID:
+ blk := f.Blocks[idToIdx[b]]
+ nv := len(blk.Values)
+ return valueToProgAfter[blk.Values[nv-1].ID].Pc
+ case ssa.FuncEnd.ID:
+ return e.curfn.LSym.Size
+ default:
+ return valueToProgAfter[v].Pc
+ }
+ }
+ }
+
+ // Resolve branches, and relax DefaultStmt into NotStmt
+ for _, br := range s.Branches {
+ br.P.To.SetTarget(s.bstart[br.B.ID])
+ if br.P.Pos.IsStmt() != src.PosIsStmt {
+ br.P.Pos = br.P.Pos.WithNotStmt()
+ } else if v0 := br.B.FirstPossibleStmtValue(); v0 != nil && v0.Pos.Line() == br.P.Pos.Line() && v0.Pos.IsStmt() == src.PosIsStmt {
+ br.P.Pos = br.P.Pos.WithNotStmt()
+ }
+
+ }
+
+ if e.log { // spew to stdout
+ filename := ""
+ for p := pp.Text; p != nil; p = p.Link {
+ if p.Pos.IsKnown() && p.InnermostFilename() != filename {
+ filename = p.InnermostFilename()
+ f.Logf("# %s\n", filename)
+ }
+
+ var s string
+ if v, ok := progToValue[p]; ok {
+ s = v.String()
+ } else if b, ok := progToBlock[p]; ok {
+ s = b.String()
+ } else {
+ s = " " // most value and branch strings are 2-3 characters long
+ }
+ f.Logf(" %-6s\t%.5d (%s)\t%s\n", s, p.Pc, p.InnermostLineNumber(), p.InstructionString())
+ }
+ }
+ if f.HTMLWriter != nil { // spew to ssa.html
+ var buf bytes.Buffer
+ buf.WriteString("<code>")
+ buf.WriteString("<dl class=\"ssa-gen\">")
+ filename := ""
+ for p := pp.Text; p != nil; p = p.Link {
+ // Don't spam every line with the file name, which is often huge.
+ // Only print changes, and "unknown" is not a change.
+ if p.Pos.IsKnown() && p.InnermostFilename() != filename {
+ filename = p.InnermostFilename()
+ buf.WriteString("<dt class=\"ssa-prog-src\"></dt><dd class=\"ssa-prog\">")
+ buf.WriteString(html.EscapeString("# " + filename))
+ buf.WriteString("</dd>")
+ }
+
+ buf.WriteString("<dt class=\"ssa-prog-src\">")
+ if v, ok := progToValue[p]; ok {
+ buf.WriteString(v.HTML())
+ } else if b, ok := progToBlock[p]; ok {
+ buf.WriteString("<b>" + b.HTML() + "</b>")
+ }
+ buf.WriteString("</dt>")
+ buf.WriteString("<dd class=\"ssa-prog\">")
+ buf.WriteString(fmt.Sprintf("%.5d <span class=\"l%v line-number\">(%s)</span> %s", p.Pc, p.InnermostLineNumber(), p.InnermostLineNumberHTML(), html.EscapeString(p.InstructionString())))
+ buf.WriteString("</dd>")
+ }
+ buf.WriteString("</dl>")
+ buf.WriteString("</code>")
+ f.HTMLWriter.WriteColumn("genssa", "genssa", "ssa-prog", buf.String())
+ }
+ if ssa.GenssaDump[f.Name] {
+ fi := f.DumpFileForPhase("genssa")
+ if fi != nil {
+
+ // inliningDiffers if any filename changes or if any line number except the innermost (index 0) changes.
+ inliningDiffers := func(a, b []src.Pos) bool {
+ if len(a) != len(b) {
+ return true
+ }
+ for i := range a {
+ if a[i].Filename() != b[i].Filename() {
+ return true
+ }
+ if i > 0 && a[i].Line() != b[i].Line() {
+ return true
+ }
+ }
+ return false
+ }
+
+ var allPosOld []src.Pos
+ var allPos []src.Pos
+
+ for p := pp.Text; p != nil; p = p.Link {
+ if p.Pos.IsKnown() {
+ allPos = p.AllPos(allPos)
+ if inliningDiffers(allPos, allPosOld) {
+ for i := len(allPos) - 1; i >= 0; i-- {
+ pos := allPos[i]
+ fmt.Fprintf(fi, "# %s:%d\n", pos.Filename(), pos.Line())
+ }
+ allPos, allPosOld = allPosOld, allPos // swap, not copy, so that they do not share slice storage.
+ }
+ }
+
+ var s string
+ if v, ok := progToValue[p]; ok {
+ s = v.String()
+ } else if b, ok := progToBlock[p]; ok {
+ s = b.String()
+ } else {
+ s = " " // most value and branch strings are 2-3 characters long
+ }
+ fmt.Fprintf(fi, " %-6s\t%.5d %s\t%s\n", s, p.Pc, ssa.StmtString(p.Pos), p.InstructionString())
+ }
+ fi.Close()
+ }
+ }
+
+ defframe(&s, e, f)
+
+ f.HTMLWriter.Close()
+ f.HTMLWriter = nil
+}
+
+func defframe(s *State, e *ssafn, f *ssa.Func) {
+ pp := s.pp
+
+ frame := types.Rnd(s.maxarg+e.stksize, int64(types.RegSize))
+ if Arch.PadFrame != nil {
+ frame = Arch.PadFrame(frame)
+ }
+
+ // Fill in argument and frame size.
+ pp.Text.To.Type = obj.TYPE_TEXTSIZE
+ pp.Text.To.Val = int32(types.Rnd(f.OwnAux.ArgWidth(), int64(types.RegSize)))
+ pp.Text.To.Offset = frame
+
+ p := pp.Text
+
+ // Insert code to spill argument registers if the named slot may be partially
+ // live. That is, the named slot is considered live by liveness analysis,
+ // (because a part of it is live), but we may not spill all parts into the
+ // slot. This can only happen with aggregate-typed arguments that are SSA-able
+ // and not address-taken (for non-SSA-able or address-taken arguments we always
+ // spill upfront).
+ // Note: spilling is unnecessary in the -N/no-optimize case, since all values
+ // will be considered non-SSAable and spilled up front.
+ // TODO(register args) Make liveness more fine-grained to that partial spilling is okay.
+ if f.OwnAux.ABIInfo().InRegistersUsed() != 0 && base.Flag.N == 0 {
+ // First, see if it is already spilled before it may be live. Look for a spill
+ // in the entry block up to the first safepoint.
+ type nameOff struct {
+ n *ir.Name
+ off int64
+ }
+ partLiveArgsSpilled := make(map[nameOff]bool)
+ for _, v := range f.Entry.Values {
+ if v.Op.IsCall() {
+ break
+ }
+ if v.Op != ssa.OpStoreReg || v.Args[0].Op != ssa.OpArgIntReg {
+ continue
+ }
+ n, off := ssa.AutoVar(v)
+ if n.Class != ir.PPARAM || n.Addrtaken() || !TypeOK(n.Type()) || !s.partLiveArgs[n] {
+ continue
+ }
+ partLiveArgsSpilled[nameOff{n, off}] = true
+ }
+
+ // Then, insert code to spill registers if not already.
+ for _, a := range f.OwnAux.ABIInfo().InParams() {
+ n, ok := a.Name.(*ir.Name)
+ if !ok || n.Addrtaken() || !TypeOK(n.Type()) || !s.partLiveArgs[n] || len(a.Registers) <= 1 {
+ continue
+ }
+ rts, offs := a.RegisterTypesAndOffsets()
+ for i := range a.Registers {
+ if !rts[i].HasPointers() {
+ continue
+ }
+ if partLiveArgsSpilled[nameOff{n, offs[i]}] {
+ continue // already spilled
+ }
+ reg := ssa.ObjRegForAbiReg(a.Registers[i], f.Config)
+ p = Arch.SpillArgReg(pp, p, f, rts[i], reg, n, offs[i])
+ }
+ }
+ }
+
+ // Insert code to zero ambiguously live variables so that the
+ // garbage collector only sees initialized values when it
+ // looks for pointers.
+ var lo, hi int64
+
+ // Opaque state for backend to use. Current backends use it to
+ // keep track of which helper registers have been zeroed.
+ var state uint32
+
+ // Iterate through declarations. Autos are sorted in decreasing
+ // frame offset order.
+ for _, n := range e.curfn.Dcl {
+ if !n.Needzero() {
+ continue
+ }
+ if n.Class != ir.PAUTO {
+ e.Fatalf(n.Pos(), "needzero class %d", n.Class)
+ }
+ if n.Type().Size()%int64(types.PtrSize) != 0 || n.FrameOffset()%int64(types.PtrSize) != 0 || n.Type().Size() == 0 {
+ e.Fatalf(n.Pos(), "var %L has size %d offset %d", n, n.Type().Size(), n.Offset_)
+ }
+
+ if lo != hi && n.FrameOffset()+n.Type().Size() >= lo-int64(2*types.RegSize) {
+ // Merge with range we already have.
+ lo = n.FrameOffset()
+ continue
+ }
+
+ // Zero old range
+ p = Arch.ZeroRange(pp, p, frame+lo, hi-lo, &state)
+
+ // Set new range.
+ lo = n.FrameOffset()
+ hi = lo + n.Type().Size()
+ }
+
+ // Zero final range.
+ Arch.ZeroRange(pp, p, frame+lo, hi-lo, &state)
+}
+
+// For generating consecutive jump instructions to model a specific branching
+type IndexJump struct {
+ Jump obj.As
+ Index int
+}
+
+func (s *State) oneJump(b *ssa.Block, jump *IndexJump) {
+ p := s.Br(jump.Jump, b.Succs[jump.Index].Block())
+ p.Pos = b.Pos
+}
+
+// CombJump generates combinational instructions (2 at present) for a block jump,
+// thereby the behaviour of non-standard condition codes could be simulated
+func (s *State) CombJump(b, next *ssa.Block, jumps *[2][2]IndexJump) {
+ switch next {
+ case b.Succs[0].Block():
+ s.oneJump(b, &jumps[0][0])
+ s.oneJump(b, &jumps[0][1])
+ case b.Succs[1].Block():
+ s.oneJump(b, &jumps[1][0])
+ s.oneJump(b, &jumps[1][1])
+ default:
+ var q *obj.Prog
+ if b.Likely != ssa.BranchUnlikely {
+ s.oneJump(b, &jumps[1][0])
+ s.oneJump(b, &jumps[1][1])
+ q = s.Br(obj.AJMP, b.Succs[1].Block())
+ } else {
+ s.oneJump(b, &jumps[0][0])
+ s.oneJump(b, &jumps[0][1])
+ q = s.Br(obj.AJMP, b.Succs[0].Block())
+ }
+ q.Pos = b.Pos
+ }
+}
+
+// AddAux adds the offset in the aux fields (AuxInt and Aux) of v to a.
+func AddAux(a *obj.Addr, v *ssa.Value) {
+ AddAux2(a, v, v.AuxInt)
+}
+func AddAux2(a *obj.Addr, v *ssa.Value, offset int64) {
+ if a.Type != obj.TYPE_MEM && a.Type != obj.TYPE_ADDR {
+ v.Fatalf("bad AddAux addr %v", a)
+ }
+ // add integer offset
+ a.Offset += offset
+
+ // If no additional symbol offset, we're done.
+ if v.Aux == nil {
+ return
+ }
+ // Add symbol's offset from its base register.
+ switch n := v.Aux.(type) {
+ case *ssa.AuxCall:
+ a.Name = obj.NAME_EXTERN
+ a.Sym = n.Fn
+ case *obj.LSym:
+ a.Name = obj.NAME_EXTERN
+ a.Sym = n
+ case *ir.Name:
+ if n.Class == ir.PPARAM || (n.Class == ir.PPARAMOUT && !n.IsOutputParamInRegisters()) {
+ a.Name = obj.NAME_PARAM
+ a.Sym = ir.Orig(n).(*ir.Name).Linksym()
+ a.Offset += n.FrameOffset()
+ break
+ }
+ a.Name = obj.NAME_AUTO
+ if n.Class == ir.PPARAMOUT {
+ a.Sym = ir.Orig(n).(*ir.Name).Linksym()
+ } else {
+ a.Sym = n.Linksym()
+ }
+ a.Offset += n.FrameOffset()
+ default:
+ v.Fatalf("aux in %s not implemented %#v", v, v.Aux)
+ }
+}
+
+// extendIndex extends v to a full int width.
+// panic with the given kind if v does not fit in an int (only on 32-bit archs).
+func (s *state) extendIndex(idx, len *ssa.Value, kind ssa.BoundsKind, bounded bool) *ssa.Value {
+ size := idx.Type.Size()
+ if size == s.config.PtrSize {
+ return idx
+ }
+ if size > s.config.PtrSize {
+ // truncate 64-bit indexes on 32-bit pointer archs. Test the
+ // high word and branch to out-of-bounds failure if it is not 0.
+ var lo *ssa.Value
+ if idx.Type.IsSigned() {
+ lo = s.newValue1(ssa.OpInt64Lo, types.Types[types.TINT], idx)
+ } else {
+ lo = s.newValue1(ssa.OpInt64Lo, types.Types[types.TUINT], idx)
+ }
+ if bounded || base.Flag.B != 0 {
+ return lo
+ }
+ bNext := s.f.NewBlock(ssa.BlockPlain)
+ bPanic := s.f.NewBlock(ssa.BlockExit)
+ hi := s.newValue1(ssa.OpInt64Hi, types.Types[types.TUINT32], idx)
+ cmp := s.newValue2(ssa.OpEq32, types.Types[types.TBOOL], hi, s.constInt32(types.Types[types.TUINT32], 0))
+ if !idx.Type.IsSigned() {
+ switch kind {
+ case ssa.BoundsIndex:
+ kind = ssa.BoundsIndexU
+ case ssa.BoundsSliceAlen:
+ kind = ssa.BoundsSliceAlenU
+ case ssa.BoundsSliceAcap:
+ kind = ssa.BoundsSliceAcapU
+ case ssa.BoundsSliceB:
+ kind = ssa.BoundsSliceBU
+ case ssa.BoundsSlice3Alen:
+ kind = ssa.BoundsSlice3AlenU
+ case ssa.BoundsSlice3Acap:
+ kind = ssa.BoundsSlice3AcapU
+ case ssa.BoundsSlice3B:
+ kind = ssa.BoundsSlice3BU
+ case ssa.BoundsSlice3C:
+ kind = ssa.BoundsSlice3CU
+ }
+ }
+ b := s.endBlock()
+ b.Kind = ssa.BlockIf
+ b.SetControl(cmp)
+ b.Likely = ssa.BranchLikely
+ b.AddEdgeTo(bNext)
+ b.AddEdgeTo(bPanic)
+
+ s.startBlock(bPanic)
+ mem := s.newValue4I(ssa.OpPanicExtend, types.TypeMem, int64(kind), hi, lo, len, s.mem())
+ s.endBlock().SetControl(mem)
+ s.startBlock(bNext)
+
+ return lo
+ }
+
+ // Extend value to the required size
+ var op ssa.Op
+ if idx.Type.IsSigned() {
+ switch 10*size + s.config.PtrSize {
+ case 14:
+ op = ssa.OpSignExt8to32
+ case 18:
+ op = ssa.OpSignExt8to64
+ case 24:
+ op = ssa.OpSignExt16to32
+ case 28:
+ op = ssa.OpSignExt16to64
+ case 48:
+ op = ssa.OpSignExt32to64
+ default:
+ s.Fatalf("bad signed index extension %s", idx.Type)
+ }
+ } else {
+ switch 10*size + s.config.PtrSize {
+ case 14:
+ op = ssa.OpZeroExt8to32
+ case 18:
+ op = ssa.OpZeroExt8to64
+ case 24:
+ op = ssa.OpZeroExt16to32
+ case 28:
+ op = ssa.OpZeroExt16to64
+ case 48:
+ op = ssa.OpZeroExt32to64
+ default:
+ s.Fatalf("bad unsigned index extension %s", idx.Type)
+ }
+ }
+ return s.newValue1(op, types.Types[types.TINT], idx)
+}
+
+// CheckLoweredPhi checks that regalloc and stackalloc correctly handled phi values.
+// Called during ssaGenValue.
+func CheckLoweredPhi(v *ssa.Value) {
+ if v.Op != ssa.OpPhi {
+ v.Fatalf("CheckLoweredPhi called with non-phi value: %v", v.LongString())
+ }
+ if v.Type.IsMemory() {
+ return
+ }
+ f := v.Block.Func
+ loc := f.RegAlloc[v.ID]
+ for _, a := range v.Args {
+ if aloc := f.RegAlloc[a.ID]; aloc != loc { // TODO: .Equal() instead?
+ v.Fatalf("phi arg at different location than phi: %v @ %s, but arg %v @ %s\n%s\n", v, loc, a, aloc, v.Block.Func)
+ }
+ }
+}
+
+// CheckLoweredGetClosurePtr checks that v is the first instruction in the function's entry block,
+// except for incoming in-register arguments.
+// The output of LoweredGetClosurePtr is generally hardwired to the correct register.
+// That register contains the closure pointer on closure entry.
+func CheckLoweredGetClosurePtr(v *ssa.Value) {
+ entry := v.Block.Func.Entry
+ if entry != v.Block {
+ base.Fatalf("in %s, badly placed LoweredGetClosurePtr: %v %v", v.Block.Func.Name, v.Block, v)
+ }
+ for _, w := range entry.Values {
+ if w == v {
+ break
+ }
+ switch w.Op {
+ case ssa.OpArgIntReg, ssa.OpArgFloatReg:
+ // okay
+ default:
+ base.Fatalf("in %s, badly placed LoweredGetClosurePtr: %v %v", v.Block.Func.Name, v.Block, v)
+ }
+ }
+}
+
+// CheckArgReg ensures that v is in the function's entry block.
+func CheckArgReg(v *ssa.Value) {
+ entry := v.Block.Func.Entry
+ if entry != v.Block {
+ base.Fatalf("in %s, badly placed ArgIReg or ArgFReg: %v %v", v.Block.Func.Name, v.Block, v)
+ }
+}
+
+func AddrAuto(a *obj.Addr, v *ssa.Value) {
+ n, off := ssa.AutoVar(v)
+ a.Type = obj.TYPE_MEM
+ a.Sym = n.Linksym()
+ a.Reg = int16(Arch.REGSP)
+ a.Offset = n.FrameOffset() + off
+ if n.Class == ir.PPARAM || (n.Class == ir.PPARAMOUT && !n.IsOutputParamInRegisters()) {
+ a.Name = obj.NAME_PARAM
+ } else {
+ a.Name = obj.NAME_AUTO
+ }
+}
+
+// Call returns a new CALL instruction for the SSA value v.
+// It uses PrepareCall to prepare the call.
+func (s *State) Call(v *ssa.Value) *obj.Prog {
+ pPosIsStmt := s.pp.Pos.IsStmt() // The statement-ness fo the call comes from ssaGenState
+ s.PrepareCall(v)
+
+ p := s.Prog(obj.ACALL)
+ if pPosIsStmt == src.PosIsStmt {
+ p.Pos = v.Pos.WithIsStmt()
+ } else {
+ p.Pos = v.Pos.WithNotStmt()
+ }
+ if sym, ok := v.Aux.(*ssa.AuxCall); ok && sym.Fn != nil {
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = sym.Fn
+ } else {
+ // TODO(mdempsky): Can these differences be eliminated?
+ switch Arch.LinkArch.Family {
+ case sys.AMD64, sys.I386, sys.PPC64, sys.RISCV64, sys.S390X, sys.Wasm:
+ p.To.Type = obj.TYPE_REG
+ case sys.ARM, sys.ARM64, sys.MIPS, sys.MIPS64:
+ p.To.Type = obj.TYPE_MEM
+ default:
+ base.Fatalf("unknown indirect call family")
+ }
+ p.To.Reg = v.Args[0].Reg()
+ }
+ return p
+}
+
+// TailCall returns a new tail call instruction for the SSA value v.
+// It is like Call, but for a tail call.
+func (s *State) TailCall(v *ssa.Value) *obj.Prog {
+ p := s.Call(v)
+ p.As = obj.ARET
+ return p
+}
+
+// PrepareCall prepares to emit a CALL instruction for v and does call-related bookkeeping.
+// It must be called immediately before emitting the actual CALL instruction,
+// since it emits PCDATA for the stack map at the call (calls are safe points).
+func (s *State) PrepareCall(v *ssa.Value) {
+ idx := s.livenessMap.Get(v)
+ if !idx.StackMapValid() {
+ // See Liveness.hasStackMap.
+ if sym, ok := v.Aux.(*ssa.AuxCall); !ok || !(sym.Fn == ir.Syms.Typedmemclr || sym.Fn == ir.Syms.Typedmemmove) {
+ base.Fatalf("missing stack map index for %v", v.LongString())
+ }
+ }
+
+ call, ok := v.Aux.(*ssa.AuxCall)
+
+ if ok {
+ // Record call graph information for nowritebarrierrec
+ // analysis.
+ if nowritebarrierrecCheck != nil {
+ nowritebarrierrecCheck.recordCall(s.pp.CurFunc, call.Fn, v.Pos)
+ }
+ }
+
+ if s.maxarg < v.AuxInt {
+ s.maxarg = v.AuxInt
+ }
+}
+
+// UseArgs records the fact that an instruction needs a certain amount of
+// callee args space for its use.
+func (s *State) UseArgs(n int64) {
+ if s.maxarg < n {
+ s.maxarg = n
+ }
+}
+
+// fieldIdx finds the index of the field referred to by the ODOT node n.
+func fieldIdx(n *ir.SelectorExpr) int {
+ t := n.X.Type()
+ if !t.IsStruct() {
+ panic("ODOT's LHS is not a struct")
+ }
+
+ for i, f := range t.Fields().Slice() {
+ if f.Sym == n.Sel {
+ if f.Offset != n.Offset() {
+ panic("field offset doesn't match")
+ }
+ return i
+ }
+ }
+ panic(fmt.Sprintf("can't find field in expr %v\n", n))
+
+ // TODO: keep the result of this function somewhere in the ODOT Node
+ // so we don't have to recompute it each time we need it.
+}
+
+// ssafn holds frontend information about a function that the backend is processing.
+// It also exports a bunch of compiler services for the ssa backend.
+type ssafn struct {
+ curfn *ir.Func
+ strings map[string]*obj.LSym // map from constant string to data symbols
+ stksize int64 // stack size for current frame
+ stkptrsize int64 // prefix of stack containing pointers
+ log bool // print ssa debug to the stdout
+}
+
+// StringData returns a symbol which
+// is the data component of a global string constant containing s.
+func (e *ssafn) StringData(s string) *obj.LSym {
+ if aux, ok := e.strings[s]; ok {
+ return aux
+ }
+ if e.strings == nil {
+ e.strings = make(map[string]*obj.LSym)
+ }
+ data := staticdata.StringSym(e.curfn.Pos(), s)
+ e.strings[s] = data
+ return data
+}
+
+func (e *ssafn) Auto(pos src.XPos, t *types.Type) *ir.Name {
+ return typecheck.TempAt(pos, e.curfn, t) // Note: adds new auto to e.curfn.Func.Dcl list
+}
+
+// SplitSlot returns a slot representing the data of parent starting at offset.
+func (e *ssafn) SplitSlot(parent *ssa.LocalSlot, suffix string, offset int64, t *types.Type) ssa.LocalSlot {
+ node := parent.N
+
+ if node.Class != ir.PAUTO || node.Addrtaken() {
+ // addressed things and non-autos retain their parents (i.e., cannot truly be split)
+ return ssa.LocalSlot{N: node, Type: t, Off: parent.Off + offset}
+ }
+
+ s := &types.Sym{Name: node.Sym().Name + suffix, Pkg: types.LocalPkg}
+ n := ir.NewNameAt(parent.N.Pos(), s)
+ s.Def = n
+ ir.AsNode(s.Def).Name().SetUsed(true)
+ n.SetType(t)
+ n.Class = ir.PAUTO
+ n.SetEsc(ir.EscNever)
+ n.Curfn = e.curfn
+ e.curfn.Dcl = append(e.curfn.Dcl, n)
+ types.CalcSize(t)
+ return ssa.LocalSlot{N: n, Type: t, Off: 0, SplitOf: parent, SplitOffset: offset}
+}
+
+func (e *ssafn) CanSSA(t *types.Type) bool {
+ return TypeOK(t)
+}
+
+func (e *ssafn) Line(pos src.XPos) string {
+ return base.FmtPos(pos)
+}
+
+// Log logs a message from the compiler.
+func (e *ssafn) Logf(msg string, args ...interface{}) {
+ if e.log {
+ fmt.Printf(msg, args...)
+ }
+}
+
+func (e *ssafn) Log() bool {
+ return e.log
+}
+
+// Fatal reports a compiler error and exits.
+func (e *ssafn) Fatalf(pos src.XPos, msg string, args ...interface{}) {
+ base.Pos = pos
+ nargs := append([]interface{}{ir.FuncName(e.curfn)}, args...)
+ base.Fatalf("'%s': "+msg, nargs...)
+}
+
+// Warnl reports a "warning", which is usually flag-triggered
+// logging output for the benefit of tests.
+func (e *ssafn) Warnl(pos src.XPos, fmt_ string, args ...interface{}) {
+ base.WarnfAt(pos, fmt_, args...)
+}
+
+func (e *ssafn) Debug_checknil() bool {
+ return base.Debug.Nil != 0
+}
+
+func (e *ssafn) UseWriteBarrier() bool {
+ return base.Flag.WB
+}
+
+func (e *ssafn) Syslook(name string) *obj.LSym {
+ switch name {
+ case "goschedguarded":
+ return ir.Syms.Goschedguarded
+ case "writeBarrier":
+ return ir.Syms.WriteBarrier
+ case "gcWriteBarrier":
+ return ir.Syms.GCWriteBarrier
+ case "typedmemmove":
+ return ir.Syms.Typedmemmove
+ case "typedmemclr":
+ return ir.Syms.Typedmemclr
+ }
+ e.Fatalf(src.NoXPos, "unknown Syslook func %v", name)
+ return nil
+}
+
+func (e *ssafn) SetWBPos(pos src.XPos) {
+ e.curfn.SetWBPos(pos)
+}
+
+func (e *ssafn) MyImportPath() string {
+ return base.Ctxt.Pkgpath
+}
+
+func clobberBase(n ir.Node) ir.Node {
+ if n.Op() == ir.ODOT {
+ n := n.(*ir.SelectorExpr)
+ if n.X.Type().NumFields() == 1 {
+ return clobberBase(n.X)
+ }
+ }
+ if n.Op() == ir.OINDEX {
+ n := n.(*ir.IndexExpr)
+ if n.X.Type().IsArray() && n.X.Type().NumElem() == 1 {
+ return clobberBase(n.X)
+ }
+ }
+ return n
+}
+
+// callTargetLSym returns the correct LSym to call 'callee' using its ABI.
+func callTargetLSym(callee *ir.Name) *obj.LSym {
+ if callee.Func == nil {
+ // TODO(austin): This happens in a few cases of
+ // compiler-generated functions. These are all
+ // ABIInternal. It would be better if callee.Func was
+ // never nil and we didn't need this case.
+ return callee.Linksym()
+ }
+
+ return callee.LinksymABI(callee.Func.ABI)
+}
+
+func min8(a, b int8) int8 {
+ if a < b {
+ return a
+ }
+ return b
+}
+
+func max8(a, b int8) int8 {
+ if a > b {
+ return a
+ }
+ return b
+}
+
+// deferstruct makes a runtime._defer structure.
+func deferstruct() *types.Type {
+ makefield := func(name string, typ *types.Type) *types.Field {
+ // Unlike the global makefield function, this one needs to set Pkg
+ // because these types might be compared (in SSA CSE sorting).
+ // TODO: unify this makefield and the global one above.
+ sym := &types.Sym{Name: name, Pkg: types.LocalPkg}
+ return types.NewField(src.NoXPos, sym, typ)
+ }
+ // These fields must match the ones in runtime/runtime2.go:_defer and
+ // (*state).call above.
+ fields := []*types.Field{
+ makefield("started", types.Types[types.TBOOL]),
+ makefield("heap", types.Types[types.TBOOL]),
+ makefield("openDefer", types.Types[types.TBOOL]),
+ makefield("sp", types.Types[types.TUINTPTR]),
+ makefield("pc", types.Types[types.TUINTPTR]),
+ // Note: the types here don't really matter. Defer structures
+ // are always scanned explicitly during stack copying and GC,
+ // so we make them uintptr type even though they are real pointers.
+ makefield("fn", types.Types[types.TUINTPTR]),
+ makefield("_panic", types.Types[types.TUINTPTR]),
+ makefield("link", types.Types[types.TUINTPTR]),
+ makefield("fd", types.Types[types.TUINTPTR]),
+ makefield("varp", types.Types[types.TUINTPTR]),
+ makefield("framepc", types.Types[types.TUINTPTR]),
+ }
+
+ // build struct holding the above fields
+ s := types.NewStruct(types.NoPkg, fields)
+ s.SetNoalg(true)
+ types.CalcStructSize(s)
+ return s
+}
+
+// SlotAddr uses LocalSlot information to initialize an obj.Addr
+// The resulting addr is used in a non-standard context -- in the prologue
+// of a function, before the frame has been constructed, so the standard
+// addressing for the parameters will be wrong.
+func SpillSlotAddr(spill ssa.Spill, baseReg int16, extraOffset int64) obj.Addr {
+ return obj.Addr{
+ Name: obj.NAME_NONE,
+ Type: obj.TYPE_MEM,
+ Reg: baseReg,
+ Offset: spill.Offset + extraOffset,
+ }
+}
+
+var (
+ BoundsCheckFunc [ssa.BoundsKindCount]*obj.LSym
+ ExtendCheckFunc [ssa.BoundsKindCount]*obj.LSym
+)
+
+// GCWriteBarrierReg maps from registers to gcWriteBarrier implementation LSyms.
+var GCWriteBarrierReg map[int16]*obj.LSym
diff --git a/src/cmd/compile/internal/staticdata/data.go b/src/cmd/compile/internal/staticdata/data.go
new file mode 100644
index 0000000..57c15a3
--- /dev/null
+++ b/src/cmd/compile/internal/staticdata/data.go
@@ -0,0 +1,376 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package staticdata
+
+import (
+ "crypto/sha256"
+ "fmt"
+ "go/constant"
+ "internal/buildcfg"
+ "io"
+ "io/ioutil"
+ "os"
+ "sort"
+ "strconv"
+ "sync"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/objw"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/objabi"
+ "cmd/internal/src"
+)
+
+// InitAddrOffset writes the static name symbol lsym to n, it does not modify n.
+// It's the caller responsibility to make sure lsym is from ONAME/PEXTERN node.
+func InitAddrOffset(n *ir.Name, noff int64, lsym *obj.LSym, off int64) {
+ if n.Op() != ir.ONAME {
+ base.Fatalf("InitAddr n op %v", n.Op())
+ }
+ if n.Sym() == nil {
+ base.Fatalf("InitAddr nil n sym")
+ }
+ s := n.Linksym()
+ s.WriteAddr(base.Ctxt, noff, types.PtrSize, lsym, off)
+}
+
+// InitAddr is InitAddrOffset, with offset fixed to 0.
+func InitAddr(n *ir.Name, noff int64, lsym *obj.LSym) {
+ InitAddrOffset(n, noff, lsym, 0)
+}
+
+// InitSlice writes a static slice symbol {lsym, lencap, lencap} to n+noff, it does not modify n.
+// It's the caller responsibility to make sure lsym is from ONAME node.
+func InitSlice(n *ir.Name, noff int64, lsym *obj.LSym, lencap int64) {
+ s := n.Linksym()
+ s.WriteAddr(base.Ctxt, noff, types.PtrSize, lsym, 0)
+ s.WriteInt(base.Ctxt, noff+types.SliceLenOffset, types.PtrSize, lencap)
+ s.WriteInt(base.Ctxt, noff+types.SliceCapOffset, types.PtrSize, lencap)
+}
+
+func InitSliceBytes(nam *ir.Name, off int64, s string) {
+ if nam.Op() != ir.ONAME {
+ base.Fatalf("InitSliceBytes %v", nam)
+ }
+ InitSlice(nam, off, slicedata(nam.Pos(), s).Linksym(), int64(len(s)))
+}
+
+const (
+ stringSymPrefix = "go.string."
+ stringSymPattern = ".gostring.%d.%x"
+)
+
+// StringSym returns a symbol containing the string s.
+// The symbol contains the string data, not a string header.
+func StringSym(pos src.XPos, s string) (data *obj.LSym) {
+ var symname string
+ if len(s) > 100 {
+ // Huge strings are hashed to avoid long names in object files.
+ // Indulge in some paranoia by writing the length of s, too,
+ // as protection against length extension attacks.
+ // Same pattern is known to fileStringSym below.
+ h := sha256.New()
+ io.WriteString(h, s)
+ symname = fmt.Sprintf(stringSymPattern, len(s), h.Sum(nil))
+ } else {
+ // Small strings get named directly by their contents.
+ symname = strconv.Quote(s)
+ }
+
+ symdata := base.Ctxt.Lookup(stringSymPrefix + symname)
+ if !symdata.OnList() {
+ off := dstringdata(symdata, 0, s, pos, "string")
+ objw.Global(symdata, int32(off), obj.DUPOK|obj.RODATA|obj.LOCAL)
+ symdata.Set(obj.AttrContentAddressable, true)
+ }
+
+ return symdata
+}
+
+// maxFileSize is the maximum file size permitted by the linker
+// (see issue #9862).
+const maxFileSize = int64(2e9)
+
+// fileStringSym returns a symbol for the contents and the size of file.
+// If readonly is true, the symbol shares storage with any literal string
+// or other file with the same content and is placed in a read-only section.
+// If readonly is false, the symbol is a read-write copy separate from any other,
+// for use as the backing store of a []byte.
+// The content hash of file is copied into hash. (If hash is nil, nothing is copied.)
+// The returned symbol contains the data itself, not a string header.
+func fileStringSym(pos src.XPos, file string, readonly bool, hash []byte) (*obj.LSym, int64, error) {
+ f, err := os.Open(file)
+ if err != nil {
+ return nil, 0, err
+ }
+ defer f.Close()
+ info, err := f.Stat()
+ if err != nil {
+ return nil, 0, err
+ }
+ if !info.Mode().IsRegular() {
+ return nil, 0, fmt.Errorf("not a regular file")
+ }
+ size := info.Size()
+ if size <= 1*1024 {
+ data, err := ioutil.ReadAll(f)
+ if err != nil {
+ return nil, 0, err
+ }
+ if int64(len(data)) != size {
+ return nil, 0, fmt.Errorf("file changed between reads")
+ }
+ var sym *obj.LSym
+ if readonly {
+ sym = StringSym(pos, string(data))
+ } else {
+ sym = slicedata(pos, string(data)).Linksym()
+ }
+ if len(hash) > 0 {
+ sum := sha256.Sum256(data)
+ copy(hash, sum[:])
+ }
+ return sym, size, nil
+ }
+ if size > maxFileSize {
+ // ggloblsym takes an int32,
+ // and probably the rest of the toolchain
+ // can't handle such big symbols either.
+ // See golang.org/issue/9862.
+ return nil, 0, fmt.Errorf("file too large (%d bytes > %d bytes)", size, maxFileSize)
+ }
+
+ // File is too big to read and keep in memory.
+ // Compute hash if needed for read-only content hashing or if the caller wants it.
+ var sum []byte
+ if readonly || len(hash) > 0 {
+ h := sha256.New()
+ n, err := io.Copy(h, f)
+ if err != nil {
+ return nil, 0, err
+ }
+ if n != size {
+ return nil, 0, fmt.Errorf("file changed between reads")
+ }
+ sum = h.Sum(nil)
+ copy(hash, sum)
+ }
+
+ var symdata *obj.LSym
+ if readonly {
+ symname := fmt.Sprintf(stringSymPattern, size, sum)
+ symdata = base.Ctxt.Lookup(stringSymPrefix + symname)
+ if !symdata.OnList() {
+ info := symdata.NewFileInfo()
+ info.Name = file
+ info.Size = size
+ objw.Global(symdata, int32(size), obj.DUPOK|obj.RODATA|obj.LOCAL)
+ // Note: AttrContentAddressable cannot be set here,
+ // because the content-addressable-handling code
+ // does not know about file symbols.
+ }
+ } else {
+ // Emit a zero-length data symbol
+ // and then fix up length and content to use file.
+ symdata = slicedata(pos, "").Linksym()
+ symdata.Size = size
+ symdata.Type = objabi.SNOPTRDATA
+ info := symdata.NewFileInfo()
+ info.Name = file
+ info.Size = size
+ }
+
+ return symdata, size, nil
+}
+
+var slicedataGen int
+
+func slicedata(pos src.XPos, s string) *ir.Name {
+ slicedataGen++
+ symname := fmt.Sprintf(".gobytes.%d", slicedataGen)
+ sym := types.LocalPkg.Lookup(symname)
+ symnode := typecheck.NewName(sym)
+ sym.Def = symnode
+
+ lsym := symnode.Linksym()
+ off := dstringdata(lsym, 0, s, pos, "slice")
+ objw.Global(lsym, int32(off), obj.NOPTR|obj.LOCAL)
+
+ return symnode
+}
+
+func dstringdata(s *obj.LSym, off int, t string, pos src.XPos, what string) int {
+ // Objects that are too large will cause the data section to overflow right away,
+ // causing a cryptic error message by the linker. Check for oversize objects here
+ // and provide a useful error message instead.
+ if int64(len(t)) > 2e9 {
+ base.ErrorfAt(pos, "%v with length %v is too big", what, len(t))
+ return 0
+ }
+
+ s.WriteString(base.Ctxt, int64(off), len(t), t)
+ return off + len(t)
+}
+
+var (
+ funcsymsmu sync.Mutex // protects funcsyms and associated package lookups (see func funcsym)
+ funcsyms []*ir.Name // functions that need function value symbols
+)
+
+// FuncLinksym returns n·f, the function value symbol for n.
+func FuncLinksym(n *ir.Name) *obj.LSym {
+ if n.Op() != ir.ONAME || n.Class != ir.PFUNC {
+ base.Fatalf("expected func name: %v", n)
+ }
+ s := n.Sym()
+
+ // funcsymsmu here serves to protect not just mutations of funcsyms (below),
+ // but also the package lookup of the func sym name,
+ // since this function gets called concurrently from the backend.
+ // There are no other concurrent package lookups in the backend,
+ // except for the types package, which is protected separately.
+ // Reusing funcsymsmu to also cover this package lookup
+ // avoids a general, broader, expensive package lookup mutex.
+ // Note NeedFuncSym also does package look-up of func sym names,
+ // but that it is only called serially, from the front end.
+ funcsymsmu.Lock()
+ sf, existed := s.Pkg.LookupOK(ir.FuncSymName(s))
+ // Don't export s·f when compiling for dynamic linking.
+ // When dynamically linking, the necessary function
+ // symbols will be created explicitly with NeedFuncSym.
+ // See the NeedFuncSym comment for details.
+ if !base.Ctxt.Flag_dynlink && !existed {
+ funcsyms = append(funcsyms, n)
+ }
+ funcsymsmu.Unlock()
+
+ return sf.Linksym()
+}
+
+func GlobalLinksym(n *ir.Name) *obj.LSym {
+ if n.Op() != ir.ONAME || n.Class != ir.PEXTERN {
+ base.Fatalf("expected global variable: %v", n)
+ }
+ return n.Linksym()
+}
+
+// NeedFuncSym ensures that fn·f is exported, if needed.
+// It is only used with -dynlink.
+// When not compiling for dynamic linking,
+// the funcsyms are created as needed by
+// the packages that use them.
+// Normally we emit the fn·f stubs as DUPOK syms,
+// but DUPOK doesn't work across shared library boundaries.
+// So instead, when dynamic linking, we only create
+// the fn·f stubs in fn's package.
+func NeedFuncSym(fn *ir.Func) {
+ if base.Ctxt.InParallel {
+ // The append below probably just needs to lock
+ // funcsymsmu, like in FuncSym.
+ base.Fatalf("NeedFuncSym must be called in serial")
+ }
+ if fn.ABI != obj.ABIInternal && buildcfg.Experiment.RegabiWrappers {
+ // Function values must always reference ABIInternal
+ // entry points, so it doesn't make sense to create a
+ // funcsym for other ABIs.
+ //
+ // (If we're not using ABI wrappers, it doesn't matter.)
+ base.Fatalf("expected ABIInternal: %v has %v", fn.Nname, fn.ABI)
+ }
+ if ir.IsBlank(fn.Nname) {
+ // Blank functions aren't unique, so we can't make a
+ // funcsym for them.
+ base.Fatalf("NeedFuncSym called for _")
+ }
+ if !base.Ctxt.Flag_dynlink {
+ return
+ }
+ s := fn.Nname.Sym()
+ if base.Flag.CompilingRuntime && (s.Name == "getg" || s.Name == "getclosureptr" || s.Name == "getcallerpc" || s.Name == "getcallersp") ||
+ (base.Ctxt.Pkgpath == "internal/abi" && (s.Name == "FuncPCABI0" || s.Name == "FuncPCABIInternal")) {
+ // runtime.getg(), getclosureptr(), getcallerpc(), getcallersp(),
+ // and internal/abi.FuncPCABIxxx() are not real functions and so
+ // do not get funcsyms.
+ return
+ }
+ funcsyms = append(funcsyms, fn.Nname)
+}
+
+func WriteFuncSyms() {
+ sort.Slice(funcsyms, func(i, j int) bool {
+ return funcsyms[i].Linksym().Name < funcsyms[j].Linksym().Name
+ })
+ for _, nam := range funcsyms {
+ s := nam.Sym()
+ sf := s.Pkg.Lookup(ir.FuncSymName(s)).Linksym()
+ // Function values must always reference ABIInternal
+ // entry points.
+ target := s.Linksym()
+ if target.ABI() != obj.ABIInternal {
+ base.Fatalf("expected ABIInternal: %v has %v", target, target.ABI())
+ }
+ objw.SymPtr(sf, 0, target, 0)
+ objw.Global(sf, int32(types.PtrSize), obj.DUPOK|obj.RODATA)
+ }
+}
+
+// InitConst writes the static literal c to n.
+// Neither n nor c is modified.
+func InitConst(n *ir.Name, noff int64, c ir.Node, wid int) {
+ if n.Op() != ir.ONAME {
+ base.Fatalf("InitConst n op %v", n.Op())
+ }
+ if n.Sym() == nil {
+ base.Fatalf("InitConst nil n sym")
+ }
+ if c.Op() == ir.ONIL {
+ return
+ }
+ if c.Op() != ir.OLITERAL {
+ base.Fatalf("InitConst c op %v", c.Op())
+ }
+ s := n.Linksym()
+ switch u := c.Val(); u.Kind() {
+ case constant.Bool:
+ i := int64(obj.Bool2int(constant.BoolVal(u)))
+ s.WriteInt(base.Ctxt, noff, wid, i)
+
+ case constant.Int:
+ s.WriteInt(base.Ctxt, noff, wid, ir.IntVal(c.Type(), u))
+
+ case constant.Float:
+ f, _ := constant.Float64Val(u)
+ switch c.Type().Kind() {
+ case types.TFLOAT32:
+ s.WriteFloat32(base.Ctxt, noff, float32(f))
+ case types.TFLOAT64:
+ s.WriteFloat64(base.Ctxt, noff, f)
+ }
+
+ case constant.Complex:
+ re, _ := constant.Float64Val(constant.Real(u))
+ im, _ := constant.Float64Val(constant.Imag(u))
+ switch c.Type().Kind() {
+ case types.TCOMPLEX64:
+ s.WriteFloat32(base.Ctxt, noff, float32(re))
+ s.WriteFloat32(base.Ctxt, noff+4, float32(im))
+ case types.TCOMPLEX128:
+ s.WriteFloat64(base.Ctxt, noff, re)
+ s.WriteFloat64(base.Ctxt, noff+8, im)
+ }
+
+ case constant.String:
+ i := constant.StringVal(u)
+ symdata := StringSym(n.Pos(), i)
+ s.WriteAddr(base.Ctxt, noff, types.PtrSize, symdata, 0)
+ s.WriteInt(base.Ctxt, noff+int64(types.PtrSize), types.PtrSize, int64(len(i)))
+
+ default:
+ base.Fatalf("InitConst unhandled OLITERAL %v", c)
+ }
+}
diff --git a/src/cmd/compile/internal/staticdata/embed.go b/src/cmd/compile/internal/staticdata/embed.go
new file mode 100644
index 0000000..627c98b
--- /dev/null
+++ b/src/cmd/compile/internal/staticdata/embed.go
@@ -0,0 +1,174 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package staticdata
+
+import (
+ "path"
+ "sort"
+ "strings"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/objw"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+)
+
+const (
+ embedUnknown = iota
+ embedBytes
+ embedString
+ embedFiles
+)
+
+func embedFileList(v *ir.Name, kind int) []string {
+ // Build list of files to store.
+ have := make(map[string]bool)
+ var list []string
+ for _, e := range *v.Embed {
+ for _, pattern := range e.Patterns {
+ files, ok := base.Flag.Cfg.Embed.Patterns[pattern]
+ if !ok {
+ base.ErrorfAt(e.Pos, "invalid go:embed: build system did not map pattern: %s", pattern)
+ }
+ for _, file := range files {
+ if base.Flag.Cfg.Embed.Files[file] == "" {
+ base.ErrorfAt(e.Pos, "invalid go:embed: build system did not map file: %s", file)
+ continue
+ }
+ if !have[file] {
+ have[file] = true
+ list = append(list, file)
+ }
+ if kind == embedFiles {
+ for dir := path.Dir(file); dir != "." && !have[dir]; dir = path.Dir(dir) {
+ have[dir] = true
+ list = append(list, dir+"/")
+ }
+ }
+ }
+ }
+ }
+ sort.Slice(list, func(i, j int) bool {
+ return embedFileLess(list[i], list[j])
+ })
+
+ if kind == embedString || kind == embedBytes {
+ if len(list) > 1 {
+ base.ErrorfAt(v.Pos(), "invalid go:embed: multiple files for type %v", v.Type())
+ return nil
+ }
+ }
+
+ return list
+}
+
+// embedKind determines the kind of embedding variable.
+func embedKind(typ *types.Type) int {
+ if typ.Sym() != nil && typ.Sym().Name == "FS" && (typ.Sym().Pkg.Path == "embed" || (typ.Sym().Pkg == types.LocalPkg && base.Ctxt.Pkgpath == "embed")) {
+ return embedFiles
+ }
+ if typ.Kind() == types.TSTRING {
+ return embedString
+ }
+ if typ.IsSlice() && typ.Elem().Kind() == types.TUINT8 {
+ return embedBytes
+ }
+ return embedUnknown
+}
+
+func embedFileNameSplit(name string) (dir, elem string, isDir bool) {
+ if name[len(name)-1] == '/' {
+ isDir = true
+ name = name[:len(name)-1]
+ }
+ i := len(name) - 1
+ for i >= 0 && name[i] != '/' {
+ i--
+ }
+ if i < 0 {
+ return ".", name, isDir
+ }
+ return name[:i], name[i+1:], isDir
+}
+
+// embedFileLess implements the sort order for a list of embedded files.
+// See the comment inside ../../../../embed/embed.go's Files struct for rationale.
+func embedFileLess(x, y string) bool {
+ xdir, xelem, _ := embedFileNameSplit(x)
+ ydir, yelem, _ := embedFileNameSplit(y)
+ return xdir < ydir || xdir == ydir && xelem < yelem
+}
+
+// WriteEmbed emits the init data for a //go:embed variable,
+// which is either a string, a []byte, or an embed.FS.
+func WriteEmbed(v *ir.Name) {
+ // TODO(mdempsky): User errors should be reported by the frontend.
+
+ commentPos := (*v.Embed)[0].Pos
+ if base.Flag.Cfg.Embed.Patterns == nil {
+ base.ErrorfAt(commentPos, "invalid go:embed: build system did not supply embed configuration")
+ return
+ }
+ kind := embedKind(v.Type())
+ if kind == embedUnknown {
+ base.ErrorfAt(v.Pos(), "go:embed cannot apply to var of type %v", v.Type())
+ return
+ }
+
+ files := embedFileList(v, kind)
+ switch kind {
+ case embedString, embedBytes:
+ file := files[0]
+ fsym, size, err := fileStringSym(v.Pos(), base.Flag.Cfg.Embed.Files[file], kind == embedString, nil)
+ if err != nil {
+ base.ErrorfAt(v.Pos(), "embed %s: %v", file, err)
+ }
+ sym := v.Linksym()
+ off := 0
+ off = objw.SymPtr(sym, off, fsym, 0) // data string
+ off = objw.Uintptr(sym, off, uint64(size)) // len
+ if kind == embedBytes {
+ objw.Uintptr(sym, off, uint64(size)) // cap for slice
+ }
+
+ case embedFiles:
+ slicedata := base.Ctxt.Lookup(`"".` + v.Sym().Name + `.files`)
+ off := 0
+ // []files pointed at by Files
+ off = objw.SymPtr(slicedata, off, slicedata, 3*types.PtrSize) // []file, pointing just past slice
+ off = objw.Uintptr(slicedata, off, uint64(len(files)))
+ off = objw.Uintptr(slicedata, off, uint64(len(files)))
+
+ // embed/embed.go type file is:
+ // name string
+ // data string
+ // hash [16]byte
+ // Emit one of these per file in the set.
+ const hashSize = 16
+ hash := make([]byte, hashSize)
+ for _, file := range files {
+ off = objw.SymPtr(slicedata, off, StringSym(v.Pos(), file), 0) // file string
+ off = objw.Uintptr(slicedata, off, uint64(len(file)))
+ if strings.HasSuffix(file, "/") {
+ // entry for directory - no data
+ off = objw.Uintptr(slicedata, off, 0)
+ off = objw.Uintptr(slicedata, off, 0)
+ off += hashSize
+ } else {
+ fsym, size, err := fileStringSym(v.Pos(), base.Flag.Cfg.Embed.Files[file], true, hash)
+ if err != nil {
+ base.ErrorfAt(v.Pos(), "embed %s: %v", file, err)
+ }
+ off = objw.SymPtr(slicedata, off, fsym, 0) // data string
+ off = objw.Uintptr(slicedata, off, uint64(size))
+ off = int(slicedata.WriteBytes(base.Ctxt, int64(off), hash))
+ }
+ }
+ objw.Global(slicedata, int32(off), obj.RODATA|obj.LOCAL)
+ sym := v.Linksym()
+ objw.SymPtr(sym, 0, slicedata, 0)
+ }
+}
diff --git a/src/cmd/compile/internal/staticinit/sched.go b/src/cmd/compile/internal/staticinit/sched.go
new file mode 100644
index 0000000..636199d
--- /dev/null
+++ b/src/cmd/compile/internal/staticinit/sched.go
@@ -0,0 +1,609 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package staticinit
+
+import (
+ "fmt"
+ "go/constant"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/reflectdata"
+ "cmd/compile/internal/staticdata"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/src"
+)
+
+type Entry struct {
+ Xoffset int64 // struct, array only
+ Expr ir.Node // bytes of run-time computed expressions
+}
+
+type Plan struct {
+ E []Entry
+}
+
+// An Schedule is used to decompose assignment statements into
+// static and dynamic initialization parts. Static initializations are
+// handled by populating variables' linker symbol data, while dynamic
+// initializations are accumulated to be executed in order.
+type Schedule struct {
+ // Out is the ordered list of dynamic initialization
+ // statements.
+ Out []ir.Node
+
+ Plans map[ir.Node]*Plan
+ Temps map[ir.Node]*ir.Name
+}
+
+func (s *Schedule) append(n ir.Node) {
+ s.Out = append(s.Out, n)
+}
+
+// StaticInit adds an initialization statement n to the schedule.
+func (s *Schedule) StaticInit(n ir.Node) {
+ if !s.tryStaticInit(n) {
+ if base.Flag.Percent != 0 {
+ ir.Dump("nonstatic", n)
+ }
+ s.append(n)
+ }
+}
+
+// tryStaticInit attempts to statically execute an initialization
+// statement and reports whether it succeeded.
+func (s *Schedule) tryStaticInit(nn ir.Node) bool {
+ // Only worry about simple "l = r" assignments. Multiple
+ // variable/expression OAS2 assignments have already been
+ // replaced by multiple simple OAS assignments, and the other
+ // OAS2* assignments mostly necessitate dynamic execution
+ // anyway.
+ if nn.Op() != ir.OAS {
+ return false
+ }
+ n := nn.(*ir.AssignStmt)
+ if ir.IsBlank(n.X) && !AnySideEffects(n.Y) {
+ // Discard.
+ return true
+ }
+ lno := ir.SetPos(n)
+ defer func() { base.Pos = lno }()
+ nam := n.X.(*ir.Name)
+ return s.StaticAssign(nam, 0, n.Y, nam.Type())
+}
+
+// like staticassign but we are copying an already
+// initialized value r.
+func (s *Schedule) staticcopy(l *ir.Name, loff int64, rn *ir.Name, typ *types.Type) bool {
+ if rn.Class == ir.PFUNC {
+ // TODO if roff != 0 { panic }
+ staticdata.InitAddr(l, loff, staticdata.FuncLinksym(rn))
+ return true
+ }
+ if rn.Class != ir.PEXTERN || rn.Sym().Pkg != types.LocalPkg {
+ return false
+ }
+ if rn.Defn.Op() != ir.OAS {
+ return false
+ }
+ if rn.Type().IsString() { // perhaps overwritten by cmd/link -X (#34675)
+ return false
+ }
+ if rn.Embed != nil {
+ return false
+ }
+ orig := rn
+ r := rn.Defn.(*ir.AssignStmt).Y
+ if r == nil {
+ // No explicit initialization value. Probably zeroed but perhaps
+ // supplied externally and of unknown value.
+ return false
+ }
+
+ for r.Op() == ir.OCONVNOP && !types.Identical(r.Type(), typ) {
+ r = r.(*ir.ConvExpr).X
+ }
+
+ switch r.Op() {
+ case ir.OMETHEXPR:
+ r = r.(*ir.SelectorExpr).FuncName()
+ fallthrough
+ case ir.ONAME:
+ r := r.(*ir.Name)
+ if s.staticcopy(l, loff, r, typ) {
+ return true
+ }
+ // We may have skipped past one or more OCONVNOPs, so
+ // use conv to ensure r is assignable to l (#13263).
+ dst := ir.Node(l)
+ if loff != 0 || !types.Identical(typ, l.Type()) {
+ dst = ir.NewNameOffsetExpr(base.Pos, l, loff, typ)
+ }
+ s.append(ir.NewAssignStmt(base.Pos, dst, typecheck.Conv(r, typ)))
+ return true
+
+ case ir.ONIL:
+ return true
+
+ case ir.OLITERAL:
+ if ir.IsZero(r) {
+ return true
+ }
+ staticdata.InitConst(l, loff, r, int(typ.Size()))
+ return true
+
+ case ir.OADDR:
+ r := r.(*ir.AddrExpr)
+ if a, ok := r.X.(*ir.Name); ok && a.Op() == ir.ONAME {
+ staticdata.InitAddr(l, loff, staticdata.GlobalLinksym(a))
+ return true
+ }
+
+ case ir.OPTRLIT:
+ r := r.(*ir.AddrExpr)
+ switch r.X.Op() {
+ case ir.OARRAYLIT, ir.OSLICELIT, ir.OSTRUCTLIT, ir.OMAPLIT:
+ // copy pointer
+ staticdata.InitAddr(l, loff, staticdata.GlobalLinksym(s.Temps[r]))
+ return true
+ }
+
+ case ir.OSLICELIT:
+ r := r.(*ir.CompLitExpr)
+ // copy slice
+ staticdata.InitSlice(l, loff, staticdata.GlobalLinksym(s.Temps[r]), r.Len)
+ return true
+
+ case ir.OARRAYLIT, ir.OSTRUCTLIT:
+ r := r.(*ir.CompLitExpr)
+ p := s.Plans[r]
+ for i := range p.E {
+ e := &p.E[i]
+ typ := e.Expr.Type()
+ if e.Expr.Op() == ir.OLITERAL || e.Expr.Op() == ir.ONIL {
+ staticdata.InitConst(l, loff+e.Xoffset, e.Expr, int(typ.Size()))
+ continue
+ }
+ x := e.Expr
+ if x.Op() == ir.OMETHEXPR {
+ x = x.(*ir.SelectorExpr).FuncName()
+ }
+ if x.Op() == ir.ONAME && s.staticcopy(l, loff+e.Xoffset, x.(*ir.Name), typ) {
+ continue
+ }
+ // Requires computation, but we're
+ // copying someone else's computation.
+ ll := ir.NewNameOffsetExpr(base.Pos, l, loff+e.Xoffset, typ)
+ rr := ir.NewNameOffsetExpr(base.Pos, orig, e.Xoffset, typ)
+ ir.SetPos(rr)
+ s.append(ir.NewAssignStmt(base.Pos, ll, rr))
+ }
+
+ return true
+ }
+
+ return false
+}
+
+func (s *Schedule) StaticAssign(l *ir.Name, loff int64, r ir.Node, typ *types.Type) bool {
+ if r == nil {
+ // No explicit initialization value. Either zero or supplied
+ // externally.
+ return true
+ }
+ for r.Op() == ir.OCONVNOP {
+ r = r.(*ir.ConvExpr).X
+ }
+
+ assign := func(pos src.XPos, a *ir.Name, aoff int64, v ir.Node) {
+ if s.StaticAssign(a, aoff, v, v.Type()) {
+ return
+ }
+ var lhs ir.Node
+ if ir.IsBlank(a) {
+ // Don't use NameOffsetExpr with blank (#43677).
+ lhs = ir.BlankNode
+ } else {
+ lhs = ir.NewNameOffsetExpr(pos, a, aoff, v.Type())
+ }
+ s.append(ir.NewAssignStmt(pos, lhs, v))
+ }
+
+ switch r.Op() {
+ case ir.ONAME:
+ r := r.(*ir.Name)
+ return s.staticcopy(l, loff, r, typ)
+
+ case ir.OMETHEXPR:
+ r := r.(*ir.SelectorExpr)
+ return s.staticcopy(l, loff, r.FuncName(), typ)
+
+ case ir.ONIL:
+ return true
+
+ case ir.OLITERAL:
+ if ir.IsZero(r) {
+ return true
+ }
+ staticdata.InitConst(l, loff, r, int(typ.Size()))
+ return true
+
+ case ir.OADDR:
+ r := r.(*ir.AddrExpr)
+ if name, offset, ok := StaticLoc(r.X); ok && name.Class == ir.PEXTERN {
+ staticdata.InitAddrOffset(l, loff, name.Linksym(), offset)
+ return true
+ }
+ fallthrough
+
+ case ir.OPTRLIT:
+ r := r.(*ir.AddrExpr)
+ switch r.X.Op() {
+ case ir.OARRAYLIT, ir.OSLICELIT, ir.OMAPLIT, ir.OSTRUCTLIT:
+ // Init pointer.
+ a := StaticName(r.X.Type())
+
+ s.Temps[r] = a
+ staticdata.InitAddr(l, loff, a.Linksym())
+
+ // Init underlying literal.
+ assign(base.Pos, a, 0, r.X)
+ return true
+ }
+ //dump("not static ptrlit", r);
+
+ case ir.OSTR2BYTES:
+ r := r.(*ir.ConvExpr)
+ if l.Class == ir.PEXTERN && r.X.Op() == ir.OLITERAL {
+ sval := ir.StringVal(r.X)
+ staticdata.InitSliceBytes(l, loff, sval)
+ return true
+ }
+
+ case ir.OSLICELIT:
+ r := r.(*ir.CompLitExpr)
+ s.initplan(r)
+ // Init slice.
+ ta := types.NewArray(r.Type().Elem(), r.Len)
+ ta.SetNoalg(true)
+ a := StaticName(ta)
+ s.Temps[r] = a
+ staticdata.InitSlice(l, loff, a.Linksym(), r.Len)
+ // Fall through to init underlying array.
+ l = a
+ loff = 0
+ fallthrough
+
+ case ir.OARRAYLIT, ir.OSTRUCTLIT:
+ r := r.(*ir.CompLitExpr)
+ s.initplan(r)
+
+ p := s.Plans[r]
+ for i := range p.E {
+ e := &p.E[i]
+ if e.Expr.Op() == ir.OLITERAL || e.Expr.Op() == ir.ONIL {
+ staticdata.InitConst(l, loff+e.Xoffset, e.Expr, int(e.Expr.Type().Size()))
+ continue
+ }
+ ir.SetPos(e.Expr)
+ assign(base.Pos, l, loff+e.Xoffset, e.Expr)
+ }
+
+ return true
+
+ case ir.OMAPLIT:
+ break
+
+ case ir.OCLOSURE:
+ r := r.(*ir.ClosureExpr)
+ if ir.IsTrivialClosure(r) {
+ if base.Debug.Closure > 0 {
+ base.WarnfAt(r.Pos(), "closure converted to global")
+ }
+ // Closures with no captured variables are globals,
+ // so the assignment can be done at link time.
+ // TODO if roff != 0 { panic }
+ staticdata.InitAddr(l, loff, staticdata.FuncLinksym(r.Func.Nname))
+ return true
+ }
+ ir.ClosureDebugRuntimeCheck(r)
+
+ case ir.OCONVIFACE:
+ // This logic is mirrored in isStaticCompositeLiteral.
+ // If you change something here, change it there, and vice versa.
+
+ // Determine the underlying concrete type and value we are converting from.
+ r := r.(*ir.ConvExpr)
+ val := ir.Node(r)
+ for val.Op() == ir.OCONVIFACE {
+ val = val.(*ir.ConvExpr).X
+ }
+
+ if val.Type().IsInterface() {
+ // val is an interface type.
+ // If val is nil, we can statically initialize l;
+ // both words are zero and so there no work to do, so report success.
+ // If val is non-nil, we have no concrete type to record,
+ // and we won't be able to statically initialize its value, so report failure.
+ return val.Op() == ir.ONIL
+ }
+
+ reflectdata.MarkTypeUsedInInterface(val.Type(), l.Linksym())
+
+ var itab *ir.AddrExpr
+ if typ.IsEmptyInterface() {
+ itab = reflectdata.TypePtr(val.Type())
+ } else {
+ itab = reflectdata.ITabAddr(val.Type(), typ)
+ }
+
+ // Create a copy of l to modify while we emit data.
+
+ // Emit itab, advance offset.
+ staticdata.InitAddr(l, loff, itab.X.(*ir.LinksymOffsetExpr).Linksym)
+
+ // Emit data.
+ if types.IsDirectIface(val.Type()) {
+ if val.Op() == ir.ONIL {
+ // Nil is zero, nothing to do.
+ return true
+ }
+ // Copy val directly into n.
+ ir.SetPos(val)
+ assign(base.Pos, l, loff+int64(types.PtrSize), val)
+ } else {
+ // Construct temp to hold val, write pointer to temp into n.
+ a := StaticName(val.Type())
+ s.Temps[val] = a
+ assign(base.Pos, a, 0, val)
+ staticdata.InitAddr(l, loff+int64(types.PtrSize), a.Linksym())
+ }
+
+ return true
+ }
+
+ //dump("not static", r);
+ return false
+}
+
+func (s *Schedule) initplan(n ir.Node) {
+ if s.Plans[n] != nil {
+ return
+ }
+ p := new(Plan)
+ s.Plans[n] = p
+ switch n.Op() {
+ default:
+ base.Fatalf("initplan")
+
+ case ir.OARRAYLIT, ir.OSLICELIT:
+ n := n.(*ir.CompLitExpr)
+ var k int64
+ for _, a := range n.List {
+ if a.Op() == ir.OKEY {
+ kv := a.(*ir.KeyExpr)
+ k = typecheck.IndexConst(kv.Key)
+ if k < 0 {
+ base.Fatalf("initplan arraylit: invalid index %v", kv.Key)
+ }
+ a = kv.Value
+ }
+ s.addvalue(p, k*n.Type().Elem().Size(), a)
+ k++
+ }
+
+ case ir.OSTRUCTLIT:
+ n := n.(*ir.CompLitExpr)
+ for _, a := range n.List {
+ if a.Op() != ir.OSTRUCTKEY {
+ base.Fatalf("initplan structlit")
+ }
+ a := a.(*ir.StructKeyExpr)
+ if a.Sym().IsBlank() {
+ continue
+ }
+ s.addvalue(p, a.Field.Offset, a.Value)
+ }
+
+ case ir.OMAPLIT:
+ n := n.(*ir.CompLitExpr)
+ for _, a := range n.List {
+ if a.Op() != ir.OKEY {
+ base.Fatalf("initplan maplit")
+ }
+ a := a.(*ir.KeyExpr)
+ s.addvalue(p, -1, a.Value)
+ }
+ }
+}
+
+func (s *Schedule) addvalue(p *Plan, xoffset int64, n ir.Node) {
+ // special case: zero can be dropped entirely
+ if ir.IsZero(n) {
+ return
+ }
+
+ // special case: inline struct and array (not slice) literals
+ if isvaluelit(n) {
+ s.initplan(n)
+ q := s.Plans[n]
+ for _, qe := range q.E {
+ // qe is a copy; we are not modifying entries in q.E
+ qe.Xoffset += xoffset
+ p.E = append(p.E, qe)
+ }
+ return
+ }
+
+ // add to plan
+ p.E = append(p.E, Entry{Xoffset: xoffset, Expr: n})
+}
+
+// from here down is the walk analysis
+// of composite literals.
+// most of the work is to generate
+// data statements for the constant
+// part of the composite literal.
+
+var statuniqgen int // name generator for static temps
+
+// StaticName returns a name backed by a (writable) static data symbol.
+// Use readonlystaticname for read-only node.
+func StaticName(t *types.Type) *ir.Name {
+ // Don't use LookupNum; it interns the resulting string, but these are all unique.
+ n := typecheck.NewName(typecheck.Lookup(fmt.Sprintf("%s%d", obj.StaticNamePref, statuniqgen)))
+ statuniqgen++
+ typecheck.Declare(n, ir.PEXTERN)
+ n.SetType(t)
+ return n
+}
+
+// StaticLoc returns the static address of n, if n has one, or else nil.
+func StaticLoc(n ir.Node) (name *ir.Name, offset int64, ok bool) {
+ if n == nil {
+ return nil, 0, false
+ }
+
+ switch n.Op() {
+ case ir.ONAME:
+ n := n.(*ir.Name)
+ return n, 0, true
+
+ case ir.OMETHEXPR:
+ n := n.(*ir.SelectorExpr)
+ return StaticLoc(n.FuncName())
+
+ case ir.ODOT:
+ n := n.(*ir.SelectorExpr)
+ if name, offset, ok = StaticLoc(n.X); !ok {
+ break
+ }
+ offset += n.Offset()
+ return name, offset, true
+
+ case ir.OINDEX:
+ n := n.(*ir.IndexExpr)
+ if n.X.Type().IsSlice() {
+ break
+ }
+ if name, offset, ok = StaticLoc(n.X); !ok {
+ break
+ }
+ l := getlit(n.Index)
+ if l < 0 {
+ break
+ }
+
+ // Check for overflow.
+ if n.Type().Size() != 0 && types.MaxWidth/n.Type().Size() <= int64(l) {
+ break
+ }
+ offset += int64(l) * n.Type().Size()
+ return name, offset, true
+ }
+
+ return nil, 0, false
+}
+
+// AnySideEffects reports whether n contains any operations that could have observable side effects.
+func AnySideEffects(n ir.Node) bool {
+ return ir.Any(n, func(n ir.Node) bool {
+ switch n.Op() {
+ // Assume side effects unless we know otherwise.
+ default:
+ return true
+
+ // No side effects here (arguments are checked separately).
+ case ir.ONAME,
+ ir.ONONAME,
+ ir.OTYPE,
+ ir.OPACK,
+ ir.OLITERAL,
+ ir.ONIL,
+ ir.OADD,
+ ir.OSUB,
+ ir.OOR,
+ ir.OXOR,
+ ir.OADDSTR,
+ ir.OADDR,
+ ir.OANDAND,
+ ir.OBYTES2STR,
+ ir.ORUNES2STR,
+ ir.OSTR2BYTES,
+ ir.OSTR2RUNES,
+ ir.OCAP,
+ ir.OCOMPLIT,
+ ir.OMAPLIT,
+ ir.OSTRUCTLIT,
+ ir.OARRAYLIT,
+ ir.OSLICELIT,
+ ir.OPTRLIT,
+ ir.OCONV,
+ ir.OCONVIFACE,
+ ir.OCONVNOP,
+ ir.ODOT,
+ ir.OEQ,
+ ir.ONE,
+ ir.OLT,
+ ir.OLE,
+ ir.OGT,
+ ir.OGE,
+ ir.OKEY,
+ ir.OSTRUCTKEY,
+ ir.OLEN,
+ ir.OMUL,
+ ir.OLSH,
+ ir.ORSH,
+ ir.OAND,
+ ir.OANDNOT,
+ ir.ONEW,
+ ir.ONOT,
+ ir.OBITNOT,
+ ir.OPLUS,
+ ir.ONEG,
+ ir.OOROR,
+ ir.OPAREN,
+ ir.ORUNESTR,
+ ir.OREAL,
+ ir.OIMAG,
+ ir.OCOMPLEX:
+ return false
+
+ // Only possible side effect is division by zero.
+ case ir.ODIV, ir.OMOD:
+ n := n.(*ir.BinaryExpr)
+ if n.Y.Op() != ir.OLITERAL || constant.Sign(n.Y.Val()) == 0 {
+ return true
+ }
+
+ // Only possible side effect is panic on invalid size,
+ // but many makechan and makemap use size zero, which is definitely OK.
+ case ir.OMAKECHAN, ir.OMAKEMAP:
+ n := n.(*ir.MakeExpr)
+ if !ir.IsConst(n.Len, constant.Int) || constant.Sign(n.Len.Val()) != 0 {
+ return true
+ }
+
+ // Only possible side effect is panic on invalid size.
+ // TODO(rsc): Merge with previous case (probably breaks toolstash -cmp).
+ case ir.OMAKESLICE, ir.OMAKESLICECOPY:
+ return true
+ }
+ return false
+ })
+}
+
+func getlit(lit ir.Node) int {
+ if ir.IsSmallIntConst(lit) {
+ return int(ir.Int64Val(lit))
+ }
+ return -1
+}
+
+func isvaluelit(n ir.Node) bool {
+ return n.Op() == ir.OARRAYLIT || n.Op() == ir.OSTRUCTLIT
+}
diff --git a/src/cmd/compile/internal/syntax/branches.go b/src/cmd/compile/internal/syntax/branches.go
new file mode 100644
index 0000000..56e97c7
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/branches.go
@@ -0,0 +1,311 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package syntax
+
+import "fmt"
+
+// TODO(gri) consider making this part of the parser code
+
+// checkBranches checks correct use of labels and branch
+// statements (break, continue, goto) in a function body.
+// It catches:
+// - misplaced breaks and continues
+// - bad labeled breaks and continues
+// - invalid, unused, duplicate, and missing labels
+// - gotos jumping over variable declarations and into blocks
+func checkBranches(body *BlockStmt, errh ErrorHandler) {
+ if body == nil {
+ return
+ }
+
+ // scope of all labels in this body
+ ls := &labelScope{errh: errh}
+ fwdGotos := ls.blockBranches(nil, targets{}, nil, body.Pos(), body.List)
+
+ // If there are any forward gotos left, no matching label was
+ // found for them. Either those labels were never defined, or
+ // they are inside blocks and not reachable from the gotos.
+ for _, fwd := range fwdGotos {
+ name := fwd.Label.Value
+ if l := ls.labels[name]; l != nil {
+ l.used = true // avoid "defined and not used" error
+ ls.err(fwd.Label.Pos(), "goto %s jumps into block starting at %s", name, l.parent.start)
+ } else {
+ ls.err(fwd.Label.Pos(), "label %s not defined", name)
+ }
+ }
+
+ // spec: "It is illegal to define a label that is never used."
+ for _, l := range ls.labels {
+ if !l.used {
+ l := l.lstmt.Label
+ ls.err(l.Pos(), "label %s defined and not used", l.Value)
+ }
+ }
+}
+
+type labelScope struct {
+ errh ErrorHandler
+ labels map[string]*label // all label declarations inside the function; allocated lazily
+}
+
+type label struct {
+ parent *block // block containing this label declaration
+ lstmt *LabeledStmt // statement declaring the label
+ used bool // whether the label is used or not
+}
+
+type block struct {
+ parent *block // immediately enclosing block, or nil
+ start Pos // start of block
+ lstmt *LabeledStmt // labeled statement associated with this block, or nil
+}
+
+func (ls *labelScope) err(pos Pos, format string, args ...interface{}) {
+ ls.errh(Error{pos, fmt.Sprintf(format, args...)})
+}
+
+// declare declares the label introduced by s in block b and returns
+// the new label. If the label was already declared, declare reports
+// and error and the existing label is returned instead.
+func (ls *labelScope) declare(b *block, s *LabeledStmt) *label {
+ name := s.Label.Value
+ labels := ls.labels
+ if labels == nil {
+ labels = make(map[string]*label)
+ ls.labels = labels
+ } else if alt := labels[name]; alt != nil {
+ ls.err(s.Label.Pos(), "label %s already defined at %s", name, alt.lstmt.Label.Pos().String())
+ return alt
+ }
+ l := &label{b, s, false}
+ labels[name] = l
+ return l
+}
+
+// gotoTarget returns the labeled statement matching the given name and
+// declared in block b or any of its enclosing blocks. The result is nil
+// if the label is not defined, or doesn't match a valid labeled statement.
+func (ls *labelScope) gotoTarget(b *block, name string) *LabeledStmt {
+ if l := ls.labels[name]; l != nil {
+ l.used = true // even if it's not a valid target
+ for ; b != nil; b = b.parent {
+ if l.parent == b {
+ return l.lstmt
+ }
+ }
+ }
+ return nil
+}
+
+var invalid = new(LabeledStmt) // singleton to signal invalid enclosing target
+
+// enclosingTarget returns the innermost enclosing labeled statement matching
+// the given name. The result is nil if the label is not defined, and invalid
+// if the label is defined but doesn't label a valid labeled statement.
+func (ls *labelScope) enclosingTarget(b *block, name string) *LabeledStmt {
+ if l := ls.labels[name]; l != nil {
+ l.used = true // even if it's not a valid target (see e.g., test/fixedbugs/bug136.go)
+ for ; b != nil; b = b.parent {
+ if l.lstmt == b.lstmt {
+ return l.lstmt
+ }
+ }
+ return invalid
+ }
+ return nil
+}
+
+// targets describes the target statements within which break
+// or continue statements are valid.
+type targets struct {
+ breaks Stmt // *ForStmt, *SwitchStmt, *SelectStmt, or nil
+ continues *ForStmt // or nil
+}
+
+// blockBranches processes a block's body starting at start and returns the
+// list of unresolved (forward) gotos. parent is the immediately enclosing
+// block (or nil), ctxt provides information about the enclosing statements,
+// and lstmt is the labeled statement associated with this block, or nil.
+func (ls *labelScope) blockBranches(parent *block, ctxt targets, lstmt *LabeledStmt, start Pos, body []Stmt) []*BranchStmt {
+ b := &block{parent: parent, start: start, lstmt: lstmt}
+
+ var varPos Pos
+ var varName Expr
+ var fwdGotos, badGotos []*BranchStmt
+
+ recordVarDecl := func(pos Pos, name Expr) {
+ varPos = pos
+ varName = name
+ // Any existing forward goto jumping over the variable
+ // declaration is invalid. The goto may still jump out
+ // of the block and be ok, but we don't know that yet.
+ // Remember all forward gotos as potential bad gotos.
+ badGotos = append(badGotos[:0], fwdGotos...)
+ }
+
+ jumpsOverVarDecl := func(fwd *BranchStmt) bool {
+ if varPos.IsKnown() {
+ for _, bad := range badGotos {
+ if fwd == bad {
+ return true
+ }
+ }
+ }
+ return false
+ }
+
+ innerBlock := func(ctxt targets, start Pos, body []Stmt) {
+ // Unresolved forward gotos from the inner block
+ // become forward gotos for the current block.
+ fwdGotos = append(fwdGotos, ls.blockBranches(b, ctxt, lstmt, start, body)...)
+ }
+
+ for _, stmt := range body {
+ lstmt = nil
+ L:
+ switch s := stmt.(type) {
+ case *DeclStmt:
+ for _, d := range s.DeclList {
+ if v, ok := d.(*VarDecl); ok {
+ recordVarDecl(v.Pos(), v.NameList[0])
+ break // the first VarDecl will do
+ }
+ }
+
+ case *LabeledStmt:
+ // declare non-blank label
+ if name := s.Label.Value; name != "_" {
+ l := ls.declare(b, s)
+ // resolve matching forward gotos
+ i := 0
+ for _, fwd := range fwdGotos {
+ if fwd.Label.Value == name {
+ fwd.Target = s
+ l.used = true
+ if jumpsOverVarDecl(fwd) {
+ ls.err(
+ fwd.Label.Pos(),
+ "goto %s jumps over declaration of %s at %s",
+ name, String(varName), varPos,
+ )
+ }
+ } else {
+ // no match - keep forward goto
+ fwdGotos[i] = fwd
+ i++
+ }
+ }
+ fwdGotos = fwdGotos[:i]
+ lstmt = s
+ }
+ // process labeled statement
+ stmt = s.Stmt
+ goto L
+
+ case *BranchStmt:
+ // unlabeled branch statement
+ if s.Label == nil {
+ switch s.Tok {
+ case _Break:
+ if t := ctxt.breaks; t != nil {
+ s.Target = t
+ } else {
+ ls.err(s.Pos(), "break is not in a loop, switch, or select")
+ }
+ case _Continue:
+ if t := ctxt.continues; t != nil {
+ s.Target = t
+ } else {
+ ls.err(s.Pos(), "continue is not in a loop")
+ }
+ case _Fallthrough:
+ // nothing to do
+ case _Goto:
+ fallthrough // should always have a label
+ default:
+ panic("invalid BranchStmt")
+ }
+ break
+ }
+
+ // labeled branch statement
+ name := s.Label.Value
+ switch s.Tok {
+ case _Break:
+ // spec: "If there is a label, it must be that of an enclosing
+ // "for", "switch", or "select" statement, and that is the one
+ // whose execution terminates."
+ if t := ls.enclosingTarget(b, name); t != nil {
+ switch t := t.Stmt.(type) {
+ case *SwitchStmt, *SelectStmt, *ForStmt:
+ s.Target = t
+ default:
+ ls.err(s.Label.Pos(), "invalid break label %s", name)
+ }
+ } else {
+ ls.err(s.Label.Pos(), "break label not defined: %s", name)
+ }
+
+ case _Continue:
+ // spec: "If there is a label, it must be that of an enclosing
+ // "for" statement, and that is the one whose execution advances."
+ if t := ls.enclosingTarget(b, name); t != nil {
+ if t, ok := t.Stmt.(*ForStmt); ok {
+ s.Target = t
+ } else {
+ ls.err(s.Label.Pos(), "invalid continue label %s", name)
+ }
+ } else {
+ ls.err(s.Label.Pos(), "continue label not defined: %s", name)
+ }
+
+ case _Goto:
+ if t := ls.gotoTarget(b, name); t != nil {
+ s.Target = t
+ } else {
+ // label may be declared later - add goto to forward gotos
+ fwdGotos = append(fwdGotos, s)
+ }
+
+ case _Fallthrough:
+ fallthrough // should never have a label
+ default:
+ panic("invalid BranchStmt")
+ }
+
+ case *AssignStmt:
+ if s.Op == Def {
+ recordVarDecl(s.Pos(), s.Lhs)
+ }
+
+ case *BlockStmt:
+ innerBlock(ctxt, s.Pos(), s.List)
+
+ case *IfStmt:
+ innerBlock(ctxt, s.Then.Pos(), s.Then.List)
+ if s.Else != nil {
+ innerBlock(ctxt, s.Else.Pos(), []Stmt{s.Else})
+ }
+
+ case *ForStmt:
+ innerBlock(targets{s, s}, s.Body.Pos(), s.Body.List)
+
+ case *SwitchStmt:
+ inner := targets{s, ctxt.continues}
+ for _, cc := range s.Body {
+ innerBlock(inner, cc.Pos(), cc.Body)
+ }
+
+ case *SelectStmt:
+ inner := targets{s, ctxt.continues}
+ for _, cc := range s.Body {
+ innerBlock(inner, cc.Pos(), cc.Body)
+ }
+ }
+ }
+
+ return fwdGotos
+}
diff --git a/src/cmd/compile/internal/syntax/dumper.go b/src/cmd/compile/internal/syntax/dumper.go
new file mode 100644
index 0000000..d524788
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/dumper.go
@@ -0,0 +1,212 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements printing of syntax tree structures.
+
+package syntax
+
+import (
+ "fmt"
+ "io"
+ "reflect"
+ "unicode"
+ "unicode/utf8"
+)
+
+// Fdump dumps the structure of the syntax tree rooted at n to w.
+// It is intended for debugging purposes; no specific output format
+// is guaranteed.
+func Fdump(w io.Writer, n Node) (err error) {
+ p := dumper{
+ output: w,
+ ptrmap: make(map[Node]int),
+ last: '\n', // force printing of line number on first line
+ }
+
+ defer func() {
+ if e := recover(); e != nil {
+ err = e.(writeError).err // re-panics if it's not a writeError
+ }
+ }()
+
+ if n == nil {
+ p.printf("nil\n")
+ return
+ }
+ p.dump(reflect.ValueOf(n), n)
+ p.printf("\n")
+
+ return
+}
+
+type dumper struct {
+ output io.Writer
+ ptrmap map[Node]int // node -> dump line number
+ indent int // current indentation level
+ last byte // last byte processed by Write
+ line int // current line number
+}
+
+var indentBytes = []byte(". ")
+
+func (p *dumper) Write(data []byte) (n int, err error) {
+ var m int
+ for i, b := range data {
+ // invariant: data[0:n] has been written
+ if b == '\n' {
+ m, err = p.output.Write(data[n : i+1])
+ n += m
+ if err != nil {
+ return
+ }
+ } else if p.last == '\n' {
+ p.line++
+ _, err = fmt.Fprintf(p.output, "%6d ", p.line)
+ if err != nil {
+ return
+ }
+ for j := p.indent; j > 0; j-- {
+ _, err = p.output.Write(indentBytes)
+ if err != nil {
+ return
+ }
+ }
+ }
+ p.last = b
+ }
+ if len(data) > n {
+ m, err = p.output.Write(data[n:])
+ n += m
+ }
+ return
+}
+
+// writeError wraps locally caught write errors so we can distinguish
+// them from genuine panics which we don't want to return as errors.
+type writeError struct {
+ err error
+}
+
+// printf is a convenience wrapper that takes care of print errors.
+func (p *dumper) printf(format string, args ...interface{}) {
+ if _, err := fmt.Fprintf(p, format, args...); err != nil {
+ panic(writeError{err})
+ }
+}
+
+// dump prints the contents of x.
+// If x is the reflect.Value of a struct s, where &s
+// implements Node, then &s should be passed for n -
+// this permits printing of the unexported span and
+// comments fields of the embedded isNode field by
+// calling the Span() and Comment() instead of using
+// reflection.
+func (p *dumper) dump(x reflect.Value, n Node) {
+ switch x.Kind() {
+ case reflect.Interface:
+ if x.IsNil() {
+ p.printf("nil")
+ return
+ }
+ p.dump(x.Elem(), nil)
+
+ case reflect.Ptr:
+ if x.IsNil() {
+ p.printf("nil")
+ return
+ }
+
+ // special cases for identifiers w/o attached comments (common case)
+ if x, ok := x.Interface().(*Name); ok {
+ p.printf("%s @ %v", x.Value, x.Pos())
+ return
+ }
+
+ p.printf("*")
+ // Fields may share type expressions, and declarations
+ // may share the same group - use ptrmap to keep track
+ // of nodes that have been printed already.
+ if ptr, ok := x.Interface().(Node); ok {
+ if line, exists := p.ptrmap[ptr]; exists {
+ p.printf("(Node @ %d)", line)
+ return
+ }
+ p.ptrmap[ptr] = p.line
+ n = ptr
+ }
+ p.dump(x.Elem(), n)
+
+ case reflect.Slice:
+ if x.IsNil() {
+ p.printf("nil")
+ return
+ }
+ p.printf("%s (%d entries) {", x.Type(), x.Len())
+ if x.Len() > 0 {
+ p.indent++
+ p.printf("\n")
+ for i, n := 0, x.Len(); i < n; i++ {
+ p.printf("%d: ", i)
+ p.dump(x.Index(i), nil)
+ p.printf("\n")
+ }
+ p.indent--
+ }
+ p.printf("}")
+
+ case reflect.Struct:
+ typ := x.Type()
+
+ // if span, ok := x.Interface().(lexical.Span); ok {
+ // p.printf("%s", &span)
+ // return
+ // }
+
+ p.printf("%s {", typ)
+ p.indent++
+
+ first := true
+ if n != nil {
+ p.printf("\n")
+ first = false
+ // p.printf("Span: %s\n", n.Span())
+ // if c := *n.Comments(); c != nil {
+ // p.printf("Comments: ")
+ // p.dump(reflect.ValueOf(c), nil) // a Comment is not a Node
+ // p.printf("\n")
+ // }
+ }
+
+ for i, n := 0, typ.NumField(); i < n; i++ {
+ // Exclude non-exported fields because their
+ // values cannot be accessed via reflection.
+ if name := typ.Field(i).Name; isExported(name) {
+ if first {
+ p.printf("\n")
+ first = false
+ }
+ p.printf("%s: ", name)
+ p.dump(x.Field(i), nil)
+ p.printf("\n")
+ }
+ }
+
+ p.indent--
+ p.printf("}")
+
+ default:
+ switch x := x.Interface().(type) {
+ case string:
+ // print strings in quotes
+ p.printf("%q", x)
+ default:
+ p.printf("%v", x)
+ }
+ }
+}
+
+func isExported(name string) bool {
+ ch, _ := utf8.DecodeRuneInString(name)
+ return unicode.IsUpper(ch)
+}
diff --git a/src/cmd/compile/internal/syntax/dumper_test.go b/src/cmd/compile/internal/syntax/dumper_test.go
new file mode 100644
index 0000000..033283a
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/dumper_test.go
@@ -0,0 +1,21 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package syntax
+
+import (
+ "testing"
+)
+
+func TestDump(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping test in short mode")
+ }
+
+ ast, _ := ParseFile(*src_, func(err error) { t.Error(err) }, nil, CheckBranches|AllowGenerics)
+
+ if ast != nil {
+ Fdump(testOut(), ast)
+ }
+}
diff --git a/src/cmd/compile/internal/syntax/error_test.go b/src/cmd/compile/internal/syntax/error_test.go
new file mode 100644
index 0000000..d87e8ea
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/error_test.go
@@ -0,0 +1,195 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements a regression test harness for syntax errors.
+// The files in the testdata directory are parsed and the reported
+// errors are compared against the errors declared in those files.
+//
+// Errors are declared in place in the form of "error comments",
+// just before (or on the same line as) the offending token.
+//
+// Error comments must be of the form // ERROR rx or /* ERROR rx */
+// where rx is a regular expression that matches the reported error
+// message. The rx text comprises the comment text after "ERROR ",
+// with any white space around it stripped.
+//
+// If the line comment form is used, the reported error's line must
+// match the line of the error comment.
+//
+// If the regular comment form is used, the reported error's position
+// must match the position of the token immediately following the
+// error comment. Thus, /* ERROR ... */ comments should appear
+// immediately before the position where the error is reported.
+//
+// Currently, the test harness only supports one error comment per
+// token. If multiple error comments appear before a token, only
+// the last one is considered.
+
+package syntax
+
+import (
+ "flag"
+ "fmt"
+ "internal/testenv"
+ "io/ioutil"
+ "os"
+ "path/filepath"
+ "regexp"
+ "sort"
+ "strings"
+ "testing"
+)
+
+const testdata = "testdata" // directory containing test files
+
+var print = flag.Bool("print", false, "only print errors")
+
+// A position represents a source position in the current file.
+type position struct {
+ line, col uint
+}
+
+func (pos position) String() string {
+ return fmt.Sprintf("%d:%d", pos.line, pos.col)
+}
+
+func sortedPositions(m map[position]string) []position {
+ list := make([]position, len(m))
+ i := 0
+ for pos := range m {
+ list[i] = pos
+ i++
+ }
+ sort.Slice(list, func(i, j int) bool {
+ a, b := list[i], list[j]
+ return a.line < b.line || a.line == b.line && a.col < b.col
+ })
+ return list
+}
+
+// declaredErrors returns a map of source positions to error
+// patterns, extracted from error comments in the given file.
+// Error comments in the form of line comments use col = 0
+// in their position.
+func declaredErrors(t *testing.T, filename string) map[position]string {
+ f, err := os.Open(filename)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer f.Close()
+
+ declared := make(map[position]string)
+
+ var s scanner
+ var pattern string
+ s.init(f, func(line, col uint, msg string) {
+ // errors never start with '/' so they are automatically excluded here
+ switch {
+ case strings.HasPrefix(msg, "// ERROR "):
+ // we can't have another comment on the same line - just add it
+ declared[position{s.line, 0}] = strings.TrimSpace(msg[9:])
+ case strings.HasPrefix(msg, "/* ERROR "):
+ // we may have more comments before the next token - collect them
+ pattern = strings.TrimSpace(msg[9 : len(msg)-2])
+ }
+ }, comments)
+
+ // consume file
+ for {
+ s.next()
+ if pattern != "" {
+ declared[position{s.line, s.col}] = pattern
+ pattern = ""
+ }
+ if s.tok == _EOF {
+ break
+ }
+ }
+
+ return declared
+}
+
+func testSyntaxErrors(t *testing.T, filename string) {
+ declared := declaredErrors(t, filename)
+ if *print {
+ fmt.Println("Declared errors:")
+ for _, pos := range sortedPositions(declared) {
+ fmt.Printf("%s:%s: %s\n", filename, pos, declared[pos])
+ }
+
+ fmt.Println()
+ fmt.Println("Reported errors:")
+ }
+
+ f, err := os.Open(filename)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer f.Close()
+
+ var mode Mode
+ if strings.HasSuffix(filename, ".go2") {
+ mode = AllowGenerics
+ }
+ ParseFile(filename, func(err error) {
+ e, ok := err.(Error)
+ if !ok {
+ return
+ }
+
+ if *print {
+ fmt.Println(err)
+ return
+ }
+
+ orig := position{e.Pos.Line(), e.Pos.Col()}
+ pos := orig
+ pattern, found := declared[pos]
+ if !found {
+ // try line comment (only line must match)
+ pos = position{e.Pos.Line(), 0}
+ pattern, found = declared[pos]
+ }
+ if found {
+ rx, err := regexp.Compile(pattern)
+ if err != nil {
+ t.Errorf("%s:%s: %v", filename, pos, err)
+ return
+ }
+ if match := rx.MatchString(e.Msg); !match {
+ t.Errorf("%s:%s: %q does not match %q", filename, pos, e.Msg, pattern)
+ return
+ }
+ // we have a match - eliminate this error
+ delete(declared, pos)
+ } else {
+ t.Errorf("%s:%s: unexpected error: %s", filename, orig, e.Msg)
+ }
+ }, nil, mode)
+
+ if *print {
+ fmt.Println()
+ return // we're done
+ }
+
+ // report expected but not reported errors
+ for pos, pattern := range declared {
+ t.Errorf("%s:%s: missing error: %s", filename, pos, pattern)
+ }
+}
+
+func TestSyntaxErrors(t *testing.T) {
+ testenv.MustHaveGoBuild(t) // we need access to source (testdata)
+
+ list, err := ioutil.ReadDir(testdata)
+ if err != nil {
+ t.Fatal(err)
+ }
+ for _, fi := range list {
+ name := fi.Name()
+ if !fi.IsDir() && !strings.HasPrefix(name, ".") {
+ testSyntaxErrors(t, filepath.Join(testdata, name))
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/syntax/nodes.go b/src/cmd/compile/internal/syntax/nodes.go
new file mode 100644
index 0000000..2f9b43e
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/nodes.go
@@ -0,0 +1,479 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package syntax
+
+// ----------------------------------------------------------------------------
+// Nodes
+
+type Node interface {
+ // Pos() returns the position associated with the node as follows:
+ // 1) The position of a node representing a terminal syntax production
+ // (Name, BasicLit, etc.) is the position of the respective production
+ // in the source.
+ // 2) The position of a node representing a non-terminal production
+ // (IndexExpr, IfStmt, etc.) is the position of a token uniquely
+ // associated with that production; usually the left-most one
+ // ('[' for IndexExpr, 'if' for IfStmt, etc.)
+ Pos() Pos
+ aNode()
+}
+
+type node struct {
+ // commented out for now since not yet used
+ // doc *Comment // nil means no comment(s) attached
+ pos Pos
+}
+
+func (n *node) Pos() Pos { return n.pos }
+func (*node) aNode() {}
+
+// ----------------------------------------------------------------------------
+// Files
+
+// package PkgName; DeclList[0], DeclList[1], ...
+type File struct {
+ Pragma Pragma
+ PkgName *Name
+ DeclList []Decl
+ EOF Pos
+ node
+}
+
+// ----------------------------------------------------------------------------
+// Declarations
+
+type (
+ Decl interface {
+ Node
+ aDecl()
+ }
+
+ // Path
+ // LocalPkgName Path
+ ImportDecl struct {
+ Group *Group // nil means not part of a group
+ Pragma Pragma
+ LocalPkgName *Name // including "."; nil means no rename present
+ Path *BasicLit // Path.Bad || Path.Kind == StringLit; nil means no path
+ decl
+ }
+
+ // NameList
+ // NameList = Values
+ // NameList Type = Values
+ ConstDecl struct {
+ Group *Group // nil means not part of a group
+ Pragma Pragma
+ NameList []*Name
+ Type Expr // nil means no type
+ Values Expr // nil means no values
+ decl
+ }
+
+ // Name Type
+ TypeDecl struct {
+ Group *Group // nil means not part of a group
+ Pragma Pragma
+ Name *Name
+ TParamList []*Field // nil means no type parameters
+ Alias bool
+ Type Expr
+ decl
+ }
+
+ // NameList Type
+ // NameList Type = Values
+ // NameList = Values
+ VarDecl struct {
+ Group *Group // nil means not part of a group
+ Pragma Pragma
+ NameList []*Name
+ Type Expr // nil means no type
+ Values Expr // nil means no values
+ decl
+ }
+
+ // func Name Type { Body }
+ // func Name Type
+ // func Receiver Name Type { Body }
+ // func Receiver Name Type
+ FuncDecl struct {
+ Pragma Pragma
+ Recv *Field // nil means regular function
+ Name *Name
+ TParamList []*Field // nil means no type parameters
+ Type *FuncType
+ Body *BlockStmt // nil means no body (forward declaration)
+ decl
+ }
+)
+
+type decl struct{ node }
+
+func (*decl) aDecl() {}
+
+// All declarations belonging to the same group point to the same Group node.
+type Group struct {
+ _ int // not empty so we are guaranteed different Group instances
+}
+
+// ----------------------------------------------------------------------------
+// Expressions
+
+func NewName(pos Pos, value string) *Name {
+ n := new(Name)
+ n.pos = pos
+ n.Value = value
+ return n
+}
+
+type (
+ Expr interface {
+ Node
+ aExpr()
+ }
+
+ // Placeholder for an expression that failed to parse
+ // correctly and where we can't provide a better node.
+ BadExpr struct {
+ expr
+ }
+
+ // Value
+ Name struct {
+ Value string
+ expr
+ }
+
+ // Value
+ BasicLit struct {
+ Value string
+ Kind LitKind
+ Bad bool // true means the literal Value has syntax errors
+ expr
+ }
+
+ // Type { ElemList[0], ElemList[1], ... }
+ CompositeLit struct {
+ Type Expr // nil means no literal type
+ ElemList []Expr
+ NKeys int // number of elements with keys
+ Rbrace Pos
+ expr
+ }
+
+ // Key: Value
+ KeyValueExpr struct {
+ Key, Value Expr
+ expr
+ }
+
+ // func Type { Body }
+ FuncLit struct {
+ Type *FuncType
+ Body *BlockStmt
+ expr
+ }
+
+ // (X)
+ ParenExpr struct {
+ X Expr
+ expr
+ }
+
+ // X.Sel
+ SelectorExpr struct {
+ X Expr
+ Sel *Name
+ expr
+ }
+
+ // X[Index]
+ // X[T1, T2, ...] (with Ti = Index.(*ListExpr).ElemList[i])
+ IndexExpr struct {
+ X Expr
+ Index Expr
+ expr
+ }
+
+ // X[Index[0] : Index[1] : Index[2]]
+ SliceExpr struct {
+ X Expr
+ Index [3]Expr
+ // Full indicates whether this is a simple or full slice expression.
+ // In a valid AST, this is equivalent to Index[2] != nil.
+ // TODO(mdempsky): This is only needed to report the "3-index
+ // slice of string" error when Index[2] is missing.
+ Full bool
+ expr
+ }
+
+ // X.(Type)
+ AssertExpr struct {
+ X Expr
+ Type Expr
+ expr
+ }
+
+ // X.(type)
+ // Lhs := X.(type)
+ TypeSwitchGuard struct {
+ Lhs *Name // nil means no Lhs :=
+ X Expr // X.(type)
+ expr
+ }
+
+ Operation struct {
+ Op Operator
+ X, Y Expr // Y == nil means unary expression
+ expr
+ }
+
+ // Fun(ArgList[0], ArgList[1], ...)
+ CallExpr struct {
+ Fun Expr
+ ArgList []Expr // nil means no arguments
+ HasDots bool // last argument is followed by ...
+ expr
+ }
+
+ // ElemList[0], ElemList[1], ...
+ ListExpr struct {
+ ElemList []Expr
+ expr
+ }
+
+ // [Len]Elem
+ ArrayType struct {
+ // TODO(gri) consider using Name{"..."} instead of nil (permits attaching of comments)
+ Len Expr // nil means Len is ...
+ Elem Expr
+ expr
+ }
+
+ // []Elem
+ SliceType struct {
+ Elem Expr
+ expr
+ }
+
+ // ...Elem
+ DotsType struct {
+ Elem Expr
+ expr
+ }
+
+ // struct { FieldList[0] TagList[0]; FieldList[1] TagList[1]; ... }
+ StructType struct {
+ FieldList []*Field
+ TagList []*BasicLit // i >= len(TagList) || TagList[i] == nil means no tag for field i
+ expr
+ }
+
+ // Name Type
+ // Type
+ Field struct {
+ Name *Name // nil means anonymous field/parameter (structs/parameters), or embedded element (interfaces)
+ Type Expr // field names declared in a list share the same Type (identical pointers)
+ node
+ }
+
+ // interface { MethodList[0]; MethodList[1]; ... }
+ InterfaceType struct {
+ MethodList []*Field
+ expr
+ }
+
+ FuncType struct {
+ ParamList []*Field
+ ResultList []*Field
+ expr
+ }
+
+ // map[Key]Value
+ MapType struct {
+ Key, Value Expr
+ expr
+ }
+
+ // chan Elem
+ // <-chan Elem
+ // chan<- Elem
+ ChanType struct {
+ Dir ChanDir // 0 means no direction
+ Elem Expr
+ expr
+ }
+)
+
+type expr struct{ node }
+
+func (*expr) aExpr() {}
+
+type ChanDir uint
+
+const (
+ _ ChanDir = iota
+ SendOnly
+ RecvOnly
+)
+
+// ----------------------------------------------------------------------------
+// Statements
+
+type (
+ Stmt interface {
+ Node
+ aStmt()
+ }
+
+ SimpleStmt interface {
+ Stmt
+ aSimpleStmt()
+ }
+
+ EmptyStmt struct {
+ simpleStmt
+ }
+
+ LabeledStmt struct {
+ Label *Name
+ Stmt Stmt
+ stmt
+ }
+
+ BlockStmt struct {
+ List []Stmt
+ Rbrace Pos
+ stmt
+ }
+
+ ExprStmt struct {
+ X Expr
+ simpleStmt
+ }
+
+ SendStmt struct {
+ Chan, Value Expr // Chan <- Value
+ simpleStmt
+ }
+
+ DeclStmt struct {
+ DeclList []Decl
+ stmt
+ }
+
+ AssignStmt struct {
+ Op Operator // 0 means no operation
+ Lhs, Rhs Expr // Rhs == nil means Lhs++ (Op == Add) or Lhs-- (Op == Sub)
+ simpleStmt
+ }
+
+ BranchStmt struct {
+ Tok token // Break, Continue, Fallthrough, or Goto
+ Label *Name
+ // Target is the continuation of the control flow after executing
+ // the branch; it is computed by the parser if CheckBranches is set.
+ // Target is a *LabeledStmt for gotos, and a *SwitchStmt, *SelectStmt,
+ // or *ForStmt for breaks and continues, depending on the context of
+ // the branch. Target is not set for fallthroughs.
+ Target Stmt
+ stmt
+ }
+
+ CallStmt struct {
+ Tok token // Go or Defer
+ Call *CallExpr
+ stmt
+ }
+
+ ReturnStmt struct {
+ Results Expr // nil means no explicit return values
+ stmt
+ }
+
+ IfStmt struct {
+ Init SimpleStmt
+ Cond Expr
+ Then *BlockStmt
+ Else Stmt // either nil, *IfStmt, or *BlockStmt
+ stmt
+ }
+
+ ForStmt struct {
+ Init SimpleStmt // incl. *RangeClause
+ Cond Expr
+ Post SimpleStmt
+ Body *BlockStmt
+ stmt
+ }
+
+ SwitchStmt struct {
+ Init SimpleStmt
+ Tag Expr // incl. *TypeSwitchGuard
+ Body []*CaseClause
+ Rbrace Pos
+ stmt
+ }
+
+ SelectStmt struct {
+ Body []*CommClause
+ Rbrace Pos
+ stmt
+ }
+)
+
+type (
+ RangeClause struct {
+ Lhs Expr // nil means no Lhs = or Lhs :=
+ Def bool // means :=
+ X Expr // range X
+ simpleStmt
+ }
+
+ CaseClause struct {
+ Cases Expr // nil means default clause
+ Body []Stmt
+ Colon Pos
+ node
+ }
+
+ CommClause struct {
+ Comm SimpleStmt // send or receive stmt; nil means default clause
+ Body []Stmt
+ Colon Pos
+ node
+ }
+)
+
+type stmt struct{ node }
+
+func (stmt) aStmt() {}
+
+type simpleStmt struct {
+ stmt
+}
+
+func (simpleStmt) aSimpleStmt() {}
+
+// ----------------------------------------------------------------------------
+// Comments
+
+// TODO(gri) Consider renaming to CommentPos, CommentPlacement, etc.
+// Kind = Above doesn't make much sense.
+type CommentKind uint
+
+const (
+ Above CommentKind = iota
+ Below
+ Left
+ Right
+)
+
+type Comment struct {
+ Kind CommentKind
+ Text string
+ Next *Comment
+}
diff --git a/src/cmd/compile/internal/syntax/nodes_test.go b/src/cmd/compile/internal/syntax/nodes_test.go
new file mode 100644
index 0000000..a39f08c
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/nodes_test.go
@@ -0,0 +1,329 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package syntax
+
+import (
+ "fmt"
+ "strings"
+ "testing"
+)
+
+// A test is a source code snippet of a particular node type.
+// In the snippet, a '@' indicates the position recorded by
+// the parser when creating the respective node.
+type test struct {
+ nodetyp string
+ snippet string
+}
+
+var decls = []test{
+ // The position of declarations is always the
+ // position of the first token of an individual
+ // declaration, independent of grouping.
+ {"ImportDecl", `import @"math"`},
+ {"ImportDecl", `import @mymath "math"`},
+ {"ImportDecl", `import @. "math"`},
+ {"ImportDecl", `import (@"math")`},
+ {"ImportDecl", `import (@mymath "math")`},
+ {"ImportDecl", `import (@. "math")`},
+
+ {"ConstDecl", `const @x`},
+ {"ConstDecl", `const @x = 0`},
+ {"ConstDecl", `const @x, y, z = 0, 1, 2`},
+ {"ConstDecl", `const (@x)`},
+ {"ConstDecl", `const (@x = 0)`},
+ {"ConstDecl", `const (@x, y, z = 0, 1, 2)`},
+
+ {"TypeDecl", `type @T int`},
+ {"TypeDecl", `type @T = int`},
+ {"TypeDecl", `type (@T int)`},
+ {"TypeDecl", `type (@T = int)`},
+
+ {"VarDecl", `var @x int`},
+ {"VarDecl", `var @x, y, z int`},
+ {"VarDecl", `var @x int = 0`},
+ {"VarDecl", `var @x, y, z int = 1, 2, 3`},
+ {"VarDecl", `var @x = 0`},
+ {"VarDecl", `var @x, y, z = 1, 2, 3`},
+ {"VarDecl", `var (@x int)`},
+ {"VarDecl", `var (@x, y, z int)`},
+ {"VarDecl", `var (@x int = 0)`},
+ {"VarDecl", `var (@x, y, z int = 1, 2, 3)`},
+ {"VarDecl", `var (@x = 0)`},
+ {"VarDecl", `var (@x, y, z = 1, 2, 3)`},
+
+ {"FuncDecl", `func @f() {}`},
+ {"FuncDecl", `func @(T) f() {}`},
+ {"FuncDecl", `func @(x T) f() {}`},
+}
+
+var exprs = []test{
+ // The position of an expression is the position
+ // of the left-most token that identifies the
+ // kind of expression.
+ {"Name", `@x`},
+
+ {"BasicLit", `@0`},
+ {"BasicLit", `@0x123`},
+ {"BasicLit", `@3.1415`},
+ {"BasicLit", `@.2718`},
+ {"BasicLit", `@1i`},
+ {"BasicLit", `@'a'`},
+ {"BasicLit", `@"abc"`},
+ {"BasicLit", "@`abc`"},
+
+ {"CompositeLit", `@{}`},
+ {"CompositeLit", `T@{}`},
+ {"CompositeLit", `struct{x, y int}@{}`},
+
+ {"KeyValueExpr", `"foo"@: true`},
+ {"KeyValueExpr", `"a"@: b`},
+
+ {"FuncLit", `@func (){}`},
+ {"ParenExpr", `@(x)`},
+ {"SelectorExpr", `a@.b`},
+ {"IndexExpr", `a@[i]`},
+
+ {"SliceExpr", `a@[:]`},
+ {"SliceExpr", `a@[i:]`},
+ {"SliceExpr", `a@[:j]`},
+ {"SliceExpr", `a@[i:j]`},
+ {"SliceExpr", `a@[i:j:k]`},
+
+ {"AssertExpr", `x@.(T)`},
+
+ {"Operation", `@*b`},
+ {"Operation", `@+b`},
+ {"Operation", `@-b`},
+ {"Operation", `@!b`},
+ {"Operation", `@^b`},
+ {"Operation", `@&b`},
+ {"Operation", `@<-b`},
+
+ {"Operation", `a @|| b`},
+ {"Operation", `a @&& b`},
+ {"Operation", `a @== b`},
+ {"Operation", `a @+ b`},
+ {"Operation", `a @* b`},
+
+ {"CallExpr", `f@()`},
+ {"CallExpr", `f@(x, y, z)`},
+ {"CallExpr", `obj.f@(1, 2, 3)`},
+ {"CallExpr", `func(x int) int { return x + 1 }@(y)`},
+
+ // ListExpr: tested via multi-value const/var declarations
+}
+
+var types = []test{
+ {"Operation", `@*T`},
+ {"Operation", `@*struct{}`},
+
+ {"ArrayType", `@[10]T`},
+ {"ArrayType", `@[...]T`},
+
+ {"SliceType", `@[]T`},
+ {"DotsType", `@...T`},
+ {"StructType", `@struct{}`},
+ {"InterfaceType", `@interface{}`},
+ {"FuncType", `func@()`},
+ {"MapType", `@map[T]T`},
+
+ {"ChanType", `@chan T`},
+ {"ChanType", `@chan<- T`},
+ {"ChanType", `@<-chan T`},
+}
+
+var fields = []test{
+ {"Field", `@T`},
+ {"Field", `@(T)`},
+ {"Field", `@x T`},
+ {"Field", `@x *(T)`},
+ {"Field", `@x, y, z T`},
+ {"Field", `@x, y, z (*T)`},
+}
+
+var stmts = []test{
+ {"EmptyStmt", `@`},
+
+ {"LabeledStmt", `L@:`},
+ {"LabeledStmt", `L@: ;`},
+ {"LabeledStmt", `L@: f()`},
+
+ {"BlockStmt", `@{}`},
+
+ // The position of an ExprStmt is the position of the expression.
+ {"ExprStmt", `@<-ch`},
+ {"ExprStmt", `f@()`},
+ {"ExprStmt", `append@(s, 1, 2, 3)`},
+
+ {"SendStmt", `ch @<- x`},
+
+ {"DeclStmt", `@const x = 0`},
+ {"DeclStmt", `@const (x = 0)`},
+ {"DeclStmt", `@type T int`},
+ {"DeclStmt", `@type T = int`},
+ {"DeclStmt", `@type (T1 = int; T2 = float32)`},
+ {"DeclStmt", `@var x = 0`},
+ {"DeclStmt", `@var x, y, z int`},
+ {"DeclStmt", `@var (a, b = 1, 2)`},
+
+ {"AssignStmt", `x @= y`},
+ {"AssignStmt", `a, b, x @= 1, 2, 3`},
+ {"AssignStmt", `x @+= y`},
+ {"AssignStmt", `x @:= y`},
+ {"AssignStmt", `x, ok @:= f()`},
+ {"AssignStmt", `x@++`},
+ {"AssignStmt", `a[i]@--`},
+
+ {"BranchStmt", `@break`},
+ {"BranchStmt", `@break L`},
+ {"BranchStmt", `@continue`},
+ {"BranchStmt", `@continue L`},
+ {"BranchStmt", `@fallthrough`},
+ {"BranchStmt", `@goto L`},
+
+ {"CallStmt", `@defer f()`},
+ {"CallStmt", `@go f()`},
+
+ {"ReturnStmt", `@return`},
+ {"ReturnStmt", `@return x`},
+ {"ReturnStmt", `@return a, b, a + b*f(1, 2, 3)`},
+
+ {"IfStmt", `@if cond {}`},
+ {"IfStmt", `@if cond { f() } else {}`},
+ {"IfStmt", `@if cond { f() } else { g(); h() }`},
+ {"ForStmt", `@for {}`},
+ {"ForStmt", `@for { f() }`},
+ {"SwitchStmt", `@switch {}`},
+ {"SwitchStmt", `@switch { default: }`},
+ {"SwitchStmt", `@switch { default: x++ }`},
+ {"SelectStmt", `@select {}`},
+ {"SelectStmt", `@select { default: }`},
+ {"SelectStmt", `@select { default: ch <- false }`},
+}
+
+var ranges = []test{
+ {"RangeClause", `@range s`},
+ {"RangeClause", `i = @range s`},
+ {"RangeClause", `i := @range s`},
+ {"RangeClause", `_, x = @range s`},
+ {"RangeClause", `i, x = @range s`},
+ {"RangeClause", `_, x := @range s.f`},
+ {"RangeClause", `i, x := @range f(i)`},
+}
+
+var guards = []test{
+ {"TypeSwitchGuard", `x@.(type)`},
+ {"TypeSwitchGuard", `x := x@.(type)`},
+}
+
+var cases = []test{
+ {"CaseClause", `@case x:`},
+ {"CaseClause", `@case x, y, z:`},
+ {"CaseClause", `@case x == 1, y == 2:`},
+ {"CaseClause", `@default:`},
+}
+
+var comms = []test{
+ {"CommClause", `@case <-ch:`},
+ {"CommClause", `@case x <- ch:`},
+ {"CommClause", `@case x = <-ch:`},
+ {"CommClause", `@case x := <-ch:`},
+ {"CommClause", `@case x, ok = <-ch: f(1, 2, 3)`},
+ {"CommClause", `@case x, ok := <-ch: x++`},
+ {"CommClause", `@default:`},
+ {"CommClause", `@default: ch <- true`},
+}
+
+func TestPos(t *testing.T) {
+ // TODO(gri) Once we have a general tree walker, we can use that to find
+ // the first occurrence of the respective node and we don't need to hand-
+ // extract the node for each specific kind of construct.
+
+ testPos(t, decls, "package p; ", "",
+ func(f *File) Node { return f.DeclList[0] },
+ )
+
+ // embed expressions in a composite literal so we can test key:value and naked composite literals
+ testPos(t, exprs, "package p; var _ = T{ ", " }",
+ func(f *File) Node { return f.DeclList[0].(*VarDecl).Values.(*CompositeLit).ElemList[0] },
+ )
+
+ // embed types in a function signature so we can test ... types
+ testPos(t, types, "package p; func f(", ")",
+ func(f *File) Node { return f.DeclList[0].(*FuncDecl).Type.ParamList[0].Type },
+ )
+
+ testPos(t, fields, "package p; func f(", ")",
+ func(f *File) Node { return f.DeclList[0].(*FuncDecl).Type.ParamList[0] },
+ )
+
+ testPos(t, stmts, "package p; func _() { ", "; }",
+ func(f *File) Node { return f.DeclList[0].(*FuncDecl).Body.List[0] },
+ )
+
+ testPos(t, ranges, "package p; func _() { for ", " {} }",
+ func(f *File) Node { return f.DeclList[0].(*FuncDecl).Body.List[0].(*ForStmt).Init.(*RangeClause) },
+ )
+
+ testPos(t, guards, "package p; func _() { switch ", " {} }",
+ func(f *File) Node { return f.DeclList[0].(*FuncDecl).Body.List[0].(*SwitchStmt).Tag.(*TypeSwitchGuard) },
+ )
+
+ testPos(t, cases, "package p; func _() { switch { ", " } }",
+ func(f *File) Node { return f.DeclList[0].(*FuncDecl).Body.List[0].(*SwitchStmt).Body[0] },
+ )
+
+ testPos(t, comms, "package p; func _() { select { ", " } }",
+ func(f *File) Node { return f.DeclList[0].(*FuncDecl).Body.List[0].(*SelectStmt).Body[0] },
+ )
+}
+
+func testPos(t *testing.T, list []test, prefix, suffix string, extract func(*File) Node) {
+ for _, test := range list {
+ // complete source, compute @ position, and strip @ from source
+ src, index := stripAt(prefix + test.snippet + suffix)
+ if index < 0 {
+ t.Errorf("missing @: %s (%s)", src, test.nodetyp)
+ continue
+ }
+
+ // build syntax tree
+ file, err := Parse(nil, strings.NewReader(src), nil, nil, 0)
+ if err != nil {
+ t.Errorf("parse error: %s: %v (%s)", src, err, test.nodetyp)
+ continue
+ }
+
+ // extract desired node
+ node := extract(file)
+ if typ := typeOf(node); typ != test.nodetyp {
+ t.Errorf("type error: %s: type = %s, want %s", src, typ, test.nodetyp)
+ continue
+ }
+
+ // verify node position with expected position as indicated by @
+ if pos := int(node.Pos().Col()); pos != index+colbase {
+ t.Errorf("pos error: %s: pos = %d, want %d (%s)", src, pos, index+colbase, test.nodetyp)
+ continue
+ }
+ }
+}
+
+func stripAt(s string) (string, int) {
+ if i := strings.Index(s, "@"); i >= 0 {
+ return s[:i] + s[i+1:], i
+ }
+ return s, -1
+}
+
+func typeOf(n Node) string {
+ const prefix = "*syntax."
+ k := fmt.Sprintf("%T", n)
+ if strings.HasPrefix(k, prefix) {
+ return k[len(prefix):]
+ }
+ return k
+}
diff --git a/src/cmd/compile/internal/syntax/operator_string.go b/src/cmd/compile/internal/syntax/operator_string.go
new file mode 100644
index 0000000..f045d8c
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/operator_string.go
@@ -0,0 +1,46 @@
+// Code generated by "stringer -type Operator -linecomment tokens.go"; DO NOT EDIT.
+
+package syntax
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[Def-1]
+ _ = x[Not-2]
+ _ = x[Recv-3]
+ _ = x[Tilde-4]
+ _ = x[OrOr-5]
+ _ = x[AndAnd-6]
+ _ = x[Eql-7]
+ _ = x[Neq-8]
+ _ = x[Lss-9]
+ _ = x[Leq-10]
+ _ = x[Gtr-11]
+ _ = x[Geq-12]
+ _ = x[Add-13]
+ _ = x[Sub-14]
+ _ = x[Or-15]
+ _ = x[Xor-16]
+ _ = x[Mul-17]
+ _ = x[Div-18]
+ _ = x[Rem-19]
+ _ = x[And-20]
+ _ = x[AndNot-21]
+ _ = x[Shl-22]
+ _ = x[Shr-23]
+}
+
+const _Operator_name = ":!<-~||&&==!=<<=>>=+-|^*/%&&^<<>>"
+
+var _Operator_index = [...]uint8{0, 1, 2, 4, 5, 7, 9, 11, 13, 14, 16, 17, 19, 20, 21, 22, 23, 24, 25, 26, 27, 29, 31, 33}
+
+func (i Operator) String() string {
+ i -= 1
+ if i >= Operator(len(_Operator_index)-1) {
+ return "Operator(" + strconv.FormatInt(int64(i+1), 10) + ")"
+ }
+ return _Operator_name[_Operator_index[i]:_Operator_index[i+1]]
+}
diff --git a/src/cmd/compile/internal/syntax/parser.go b/src/cmd/compile/internal/syntax/parser.go
new file mode 100644
index 0000000..a75a3b1
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/parser.go
@@ -0,0 +1,2862 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package syntax
+
+import (
+ "fmt"
+ "io"
+ "strconv"
+ "strings"
+)
+
+const debug = false
+const trace = false
+
+type parser struct {
+ file *PosBase
+ errh ErrorHandler
+ mode Mode
+ pragh PragmaHandler
+ scanner
+
+ base *PosBase // current position base
+ first error // first error encountered
+ errcnt int // number of errors encountered
+ pragma Pragma // pragmas
+
+ fnest int // function nesting level (for error handling)
+ xnest int // expression nesting level (for complit ambiguity resolution)
+ indent []byte // tracing support
+}
+
+func (p *parser) init(file *PosBase, r io.Reader, errh ErrorHandler, pragh PragmaHandler, mode Mode) {
+ p.file = file
+ p.errh = errh
+ p.mode = mode
+ p.pragh = pragh
+ p.scanner.init(
+ r,
+ // Error and directive handler for scanner.
+ // Because the (line, col) positions passed to the
+ // handler is always at or after the current reading
+ // position, it is safe to use the most recent position
+ // base to compute the corresponding Pos value.
+ func(line, col uint, msg string) {
+ if msg[0] != '/' {
+ p.errorAt(p.posAt(line, col), msg)
+ return
+ }
+
+ // otherwise it must be a comment containing a line or go: directive.
+ // //line directives must be at the start of the line (column colbase).
+ // /*line*/ directives can be anywhere in the line.
+ text := commentText(msg)
+ if (col == colbase || msg[1] == '*') && strings.HasPrefix(text, "line ") {
+ var pos Pos // position immediately following the comment
+ if msg[1] == '/' {
+ // line comment (newline is part of the comment)
+ pos = MakePos(p.file, line+1, colbase)
+ } else {
+ // regular comment
+ // (if the comment spans multiple lines it's not
+ // a valid line directive and will be discarded
+ // by updateBase)
+ pos = MakePos(p.file, line, col+uint(len(msg)))
+ }
+ p.updateBase(pos, line, col+2+5, text[5:]) // +2 to skip over // or /*
+ return
+ }
+
+ // go: directive (but be conservative and test)
+ if pragh != nil && strings.HasPrefix(text, "go:") {
+ p.pragma = pragh(p.posAt(line, col+2), p.scanner.blank, text, p.pragma) // +2 to skip over // or /*
+ }
+ },
+ directives,
+ )
+
+ p.base = file
+ p.first = nil
+ p.errcnt = 0
+ p.pragma = nil
+
+ p.fnest = 0
+ p.xnest = 0
+ p.indent = nil
+}
+
+func (p *parser) allowGenerics() bool { return p.mode&AllowGenerics != 0 }
+
+// takePragma returns the current parsed pragmas
+// and clears them from the parser state.
+func (p *parser) takePragma() Pragma {
+ prag := p.pragma
+ p.pragma = nil
+ return prag
+}
+
+// clearPragma is called at the end of a statement or
+// other Go form that does NOT accept a pragma.
+// It sends the pragma back to the pragma handler
+// to be reported as unused.
+func (p *parser) clearPragma() {
+ if p.pragma != nil {
+ p.pragh(p.pos(), p.scanner.blank, "", p.pragma)
+ p.pragma = nil
+ }
+}
+
+// updateBase sets the current position base to a new line base at pos.
+// The base's filename, line, and column values are extracted from text
+// which is positioned at (tline, tcol) (only needed for error messages).
+func (p *parser) updateBase(pos Pos, tline, tcol uint, text string) {
+ i, n, ok := trailingDigits(text)
+ if i == 0 {
+ return // ignore (not a line directive)
+ }
+ // i > 0
+
+ if !ok {
+ // text has a suffix :xxx but xxx is not a number
+ p.errorAt(p.posAt(tline, tcol+i), "invalid line number: "+text[i:])
+ return
+ }
+
+ var line, col uint
+ i2, n2, ok2 := trailingDigits(text[:i-1])
+ if ok2 {
+ //line filename:line:col
+ i, i2 = i2, i
+ line, col = n2, n
+ if col == 0 || col > PosMax {
+ p.errorAt(p.posAt(tline, tcol+i2), "invalid column number: "+text[i2:])
+ return
+ }
+ text = text[:i2-1] // lop off ":col"
+ } else {
+ //line filename:line
+ line = n
+ }
+
+ if line == 0 || line > PosMax {
+ p.errorAt(p.posAt(tline, tcol+i), "invalid line number: "+text[i:])
+ return
+ }
+
+ // If we have a column (//line filename:line:col form),
+ // an empty filename means to use the previous filename.
+ filename := text[:i-1] // lop off ":line"
+ trimmed := false
+ if filename == "" && ok2 {
+ filename = p.base.Filename()
+ trimmed = p.base.Trimmed()
+ }
+
+ p.base = NewLineBase(pos, filename, trimmed, line, col)
+}
+
+func commentText(s string) string {
+ if s[:2] == "/*" {
+ return s[2 : len(s)-2] // lop off /* and */
+ }
+
+ // line comment (does not include newline)
+ // (on Windows, the line comment may end in \r\n)
+ i := len(s)
+ if s[i-1] == '\r' {
+ i--
+ }
+ return s[2:i] // lop off //, and \r at end, if any
+}
+
+func trailingDigits(text string) (uint, uint, bool) {
+ // Want to use LastIndexByte below but it's not defined in Go1.4 and bootstrap fails.
+ i := strings.LastIndex(text, ":") // look from right (Windows filenames may contain ':')
+ if i < 0 {
+ return 0, 0, false // no ":"
+ }
+ // i >= 0
+ n, err := strconv.ParseUint(text[i+1:], 10, 0)
+ return uint(i + 1), uint(n), err == nil
+}
+
+func (p *parser) got(tok token) bool {
+ if p.tok == tok {
+ p.next()
+ return true
+ }
+ return false
+}
+
+func (p *parser) want(tok token) {
+ if !p.got(tok) {
+ p.syntaxError("expecting " + tokstring(tok))
+ p.advance()
+ }
+}
+
+// gotAssign is like got(_Assign) but it also accepts ":="
+// (and reports an error) for better parser error recovery.
+func (p *parser) gotAssign() bool {
+ switch p.tok {
+ case _Define:
+ p.syntaxError("expecting =")
+ fallthrough
+ case _Assign:
+ p.next()
+ return true
+ }
+ return false
+}
+
+// ----------------------------------------------------------------------------
+// Error handling
+
+// posAt returns the Pos value for (line, col) and the current position base.
+func (p *parser) posAt(line, col uint) Pos {
+ return MakePos(p.base, line, col)
+}
+
+// error reports an error at the given position.
+func (p *parser) errorAt(pos Pos, msg string) {
+ err := Error{pos, msg}
+ if p.first == nil {
+ p.first = err
+ }
+ p.errcnt++
+ if p.errh == nil {
+ panic(p.first)
+ }
+ p.errh(err)
+}
+
+// syntaxErrorAt reports a syntax error at the given position.
+func (p *parser) syntaxErrorAt(pos Pos, msg string) {
+ if trace {
+ p.print("syntax error: " + msg)
+ }
+
+ if p.tok == _EOF && p.first != nil {
+ return // avoid meaningless follow-up errors
+ }
+
+ // add punctuation etc. as needed to msg
+ switch {
+ case msg == "":
+ // nothing to do
+ case strings.HasPrefix(msg, "in "), strings.HasPrefix(msg, "at "), strings.HasPrefix(msg, "after "):
+ msg = " " + msg
+ case strings.HasPrefix(msg, "expecting "):
+ msg = ", " + msg
+ default:
+ // plain error - we don't care about current token
+ p.errorAt(pos, "syntax error: "+msg)
+ return
+ }
+
+ // determine token string
+ var tok string
+ switch p.tok {
+ case _Name, _Semi:
+ tok = p.lit
+ case _Literal:
+ tok = "literal " + p.lit
+ case _Operator:
+ tok = p.op.String()
+ case _AssignOp:
+ tok = p.op.String() + "="
+ case _IncOp:
+ tok = p.op.String()
+ tok += tok
+ default:
+ tok = tokstring(p.tok)
+ }
+
+ p.errorAt(pos, "syntax error: unexpected "+tok+msg)
+}
+
+// tokstring returns the English word for selected punctuation tokens
+// for more readable error messages. Use tokstring (not tok.String())
+// for user-facing (error) messages; use tok.String() for debugging
+// output.
+func tokstring(tok token) string {
+ switch tok {
+ case _Comma:
+ return "comma"
+ case _Semi:
+ return "semicolon or newline"
+ }
+ return tok.String()
+}
+
+// Convenience methods using the current token position.
+func (p *parser) pos() Pos { return p.posAt(p.line, p.col) }
+func (p *parser) error(msg string) { p.errorAt(p.pos(), msg) }
+func (p *parser) syntaxError(msg string) { p.syntaxErrorAt(p.pos(), msg) }
+
+// The stopset contains keywords that start a statement.
+// They are good synchronization points in case of syntax
+// errors and (usually) shouldn't be skipped over.
+const stopset uint64 = 1<<_Break |
+ 1<<_Const |
+ 1<<_Continue |
+ 1<<_Defer |
+ 1<<_Fallthrough |
+ 1<<_For |
+ 1<<_Go |
+ 1<<_Goto |
+ 1<<_If |
+ 1<<_Return |
+ 1<<_Select |
+ 1<<_Switch |
+ 1<<_Type |
+ 1<<_Var
+
+// Advance consumes tokens until it finds a token of the stopset or followlist.
+// The stopset is only considered if we are inside a function (p.fnest > 0).
+// The followlist is the list of valid tokens that can follow a production;
+// if it is empty, exactly one (non-EOF) token is consumed to ensure progress.
+func (p *parser) advance(followlist ...token) {
+ if trace {
+ p.print(fmt.Sprintf("advance %s", followlist))
+ }
+
+ // compute follow set
+ // (not speed critical, advance is only called in error situations)
+ var followset uint64 = 1 << _EOF // don't skip over EOF
+ if len(followlist) > 0 {
+ if p.fnest > 0 {
+ followset |= stopset
+ }
+ for _, tok := range followlist {
+ followset |= 1 << tok
+ }
+ }
+
+ for !contains(followset, p.tok) {
+ if trace {
+ p.print("skip " + p.tok.String())
+ }
+ p.next()
+ if len(followlist) == 0 {
+ break
+ }
+ }
+
+ if trace {
+ p.print("next " + p.tok.String())
+ }
+}
+
+// usage: defer p.trace(msg)()
+func (p *parser) trace(msg string) func() {
+ p.print(msg + " (")
+ const tab = ". "
+ p.indent = append(p.indent, tab...)
+ return func() {
+ p.indent = p.indent[:len(p.indent)-len(tab)]
+ if x := recover(); x != nil {
+ panic(x) // skip print_trace
+ }
+ p.print(")")
+ }
+}
+
+func (p *parser) print(msg string) {
+ fmt.Printf("%5d: %s%s\n", p.line, p.indent, msg)
+}
+
+// ----------------------------------------------------------------------------
+// Package files
+//
+// Parse methods are annotated with matching Go productions as appropriate.
+// The annotations are intended as guidelines only since a single Go grammar
+// rule may be covered by multiple parse methods and vice versa.
+//
+// Excluding methods returning slices, parse methods named xOrNil may return
+// nil; all others are expected to return a valid non-nil node.
+
+// SourceFile = PackageClause ";" { ImportDecl ";" } { TopLevelDecl ";" } .
+func (p *parser) fileOrNil() *File {
+ if trace {
+ defer p.trace("file")()
+ }
+
+ f := new(File)
+ f.pos = p.pos()
+
+ // PackageClause
+ if !p.got(_Package) {
+ p.syntaxError("package statement must be first")
+ return nil
+ }
+ f.Pragma = p.takePragma()
+ f.PkgName = p.name()
+ p.want(_Semi)
+
+ // don't bother continuing if package clause has errors
+ if p.first != nil {
+ return nil
+ }
+
+ // { ImportDecl ";" }
+ for p.got(_Import) {
+ f.DeclList = p.appendGroup(f.DeclList, p.importDecl)
+ p.want(_Semi)
+ }
+
+ // { TopLevelDecl ";" }
+ for p.tok != _EOF {
+ switch p.tok {
+ case _Const:
+ p.next()
+ f.DeclList = p.appendGroup(f.DeclList, p.constDecl)
+
+ case _Type:
+ p.next()
+ f.DeclList = p.appendGroup(f.DeclList, p.typeDecl)
+
+ case _Var:
+ p.next()
+ f.DeclList = p.appendGroup(f.DeclList, p.varDecl)
+
+ case _Func:
+ p.next()
+ if d := p.funcDeclOrNil(); d != nil {
+ f.DeclList = append(f.DeclList, d)
+ }
+
+ default:
+ if p.tok == _Lbrace && len(f.DeclList) > 0 && isEmptyFuncDecl(f.DeclList[len(f.DeclList)-1]) {
+ // opening { of function declaration on next line
+ p.syntaxError("unexpected semicolon or newline before {")
+ } else {
+ p.syntaxError("non-declaration statement outside function body")
+ }
+ p.advance(_Const, _Type, _Var, _Func)
+ continue
+ }
+
+ // Reset p.pragma BEFORE advancing to the next token (consuming ';')
+ // since comments before may set pragmas for the next function decl.
+ p.clearPragma()
+
+ if p.tok != _EOF && !p.got(_Semi) {
+ p.syntaxError("after top level declaration")
+ p.advance(_Const, _Type, _Var, _Func)
+ }
+ }
+ // p.tok == _EOF
+
+ p.clearPragma()
+ f.EOF = p.pos()
+
+ return f
+}
+
+func isEmptyFuncDecl(dcl Decl) bool {
+ f, ok := dcl.(*FuncDecl)
+ return ok && f.Body == nil
+}
+
+// ----------------------------------------------------------------------------
+// Declarations
+
+// list parses a possibly empty, sep-separated list of elements, optionally
+// followed by sep, and closed by close (or EOF). sep must be one of _Comma
+// or _Semi, and close must be one of _Rparen, _Rbrace, or _Rbrack.
+//
+// For each list element, f is called. Specifically, unless we're at close
+// (or EOF), f is called at least once. After f returns true, no more list
+// elements are accepted. list returns the position of the closing token.
+//
+// list = [ f { sep f } [sep] ] close .
+//
+func (p *parser) list(sep, close token, f func() bool) Pos {
+ if debug && (sep != _Comma && sep != _Semi || close != _Rparen && close != _Rbrace && close != _Rbrack) {
+ panic("invalid sep or close argument for list")
+ }
+
+ done := false
+ for p.tok != _EOF && p.tok != close && !done {
+ done = f()
+ // sep is optional before close
+ if !p.got(sep) && p.tok != close {
+ p.syntaxError(fmt.Sprintf("expecting %s or %s", tokstring(sep), tokstring(close)))
+ p.advance(_Rparen, _Rbrack, _Rbrace)
+ if p.tok != close {
+ // position could be better but we had an error so we don't care
+ return p.pos()
+ }
+ }
+ }
+
+ pos := p.pos()
+ p.want(close)
+ return pos
+}
+
+// appendGroup(f) = f | "(" { f ";" } ")" . // ";" is optional before ")"
+func (p *parser) appendGroup(list []Decl, f func(*Group) Decl) []Decl {
+ if p.tok == _Lparen {
+ g := new(Group)
+ p.clearPragma()
+ p.next() // must consume "(" after calling clearPragma!
+ p.list(_Semi, _Rparen, func() bool {
+ if x := f(g); x != nil {
+ list = append(list, x)
+ }
+ return false
+ })
+ } else {
+ if x := f(nil); x != nil {
+ list = append(list, x)
+ }
+ }
+ return list
+}
+
+// ImportSpec = [ "." | PackageName ] ImportPath .
+// ImportPath = string_lit .
+func (p *parser) importDecl(group *Group) Decl {
+ if trace {
+ defer p.trace("importDecl")()
+ }
+
+ d := new(ImportDecl)
+ d.pos = p.pos()
+ d.Group = group
+ d.Pragma = p.takePragma()
+
+ switch p.tok {
+ case _Name:
+ d.LocalPkgName = p.name()
+ case _Dot:
+ d.LocalPkgName = NewName(p.pos(), ".")
+ p.next()
+ }
+ d.Path = p.oliteral()
+ if d.Path == nil {
+ p.syntaxError("missing import path")
+ p.advance(_Semi, _Rparen)
+ return d
+ }
+ if !d.Path.Bad && d.Path.Kind != StringLit {
+ p.syntaxError("import path must be a string")
+ d.Path.Bad = true
+ }
+ // d.Path.Bad || d.Path.Kind == StringLit
+
+ return d
+}
+
+// ConstSpec = IdentifierList [ [ Type ] "=" ExpressionList ] .
+func (p *parser) constDecl(group *Group) Decl {
+ if trace {
+ defer p.trace("constDecl")()
+ }
+
+ d := new(ConstDecl)
+ d.pos = p.pos()
+ d.Group = group
+ d.Pragma = p.takePragma()
+
+ d.NameList = p.nameList(p.name())
+ if p.tok != _EOF && p.tok != _Semi && p.tok != _Rparen {
+ d.Type = p.typeOrNil()
+ if p.gotAssign() {
+ d.Values = p.exprList()
+ }
+ }
+
+ return d
+}
+
+// TypeSpec = identifier [ TypeParams ] [ "=" ] Type .
+func (p *parser) typeDecl(group *Group) Decl {
+ if trace {
+ defer p.trace("typeDecl")()
+ }
+
+ d := new(TypeDecl)
+ d.pos = p.pos()
+ d.Group = group
+ d.Pragma = p.takePragma()
+
+ d.Name = p.name()
+ if p.allowGenerics() && p.tok == _Lbrack {
+ // d.Name "[" ...
+ // array/slice type or type parameter list
+ pos := p.pos()
+ p.next()
+ switch p.tok {
+ case _Name:
+ // We may have an array type or a type parameter list.
+ // In either case we expect an expression x (which may
+ // just be a name, or a more complex expression) which
+ // we can analyze further.
+ //
+ // A type parameter list may have a type bound starting
+ // with a "[" as in: P []E. In that case, simply parsing
+ // an expression would lead to an error: P[] is invalid.
+ // But since index or slice expressions are never constant
+ // and thus invalid array length expressions, if we see a
+ // "[" following a name it must be the start of an array
+ // or slice constraint. Only if we don't see a "[" do we
+ // need to parse a full expression.
+ var x Expr = p.name()
+ if p.tok != _Lbrack {
+ // To parse the expression starting with name, expand
+ // the call sequence we would get by passing in name
+ // to parser.expr, and pass in name to parser.pexpr.
+ p.xnest++
+ x = p.binaryExpr(p.pexpr(x, false), 0)
+ p.xnest--
+ }
+
+ // analyze the cases
+ var pname *Name // pname != nil means pname is the type parameter name
+ var ptype Expr // ptype != nil means ptype is the type parameter type; pname != nil in this case
+ switch t := x.(type) {
+ case *Name:
+ // Unless we see a "]", we are at the start of a type parameter list.
+ if p.tok != _Rbrack {
+ // d.Name "[" name ...
+ pname = t
+ // no ptype
+ }
+ case *Operation:
+ // If we have an expression of the form name*T, and T is a (possibly
+ // parenthesized) type literal or the next token is a comma, we are
+ // at the start of a type parameter list.
+ if name, _ := t.X.(*Name); name != nil {
+ if t.Op == Mul && (isTypeLit(t.Y) || p.tok == _Comma) {
+ // d.Name "[" name "*" t.Y
+ // d.Name "[" name "*" t.Y ","
+ t.X, t.Y = t.Y, nil // convert t into unary *t.Y
+ pname = name
+ ptype = t
+ }
+ }
+ case *CallExpr:
+ // If we have an expression of the form name(T), and T is a (possibly
+ // parenthesized) type literal or the next token is a comma, we are
+ // at the start of a type parameter list.
+ if name, _ := t.Fun.(*Name); name != nil {
+ if len(t.ArgList) == 1 && !t.HasDots && (isTypeLit(t.ArgList[0]) || p.tok == _Comma) {
+ // d.Name "[" name "(" t.ArgList[0] ")"
+ // d.Name "[" name "(" t.ArgList[0] ")" ","
+ pname = name
+ ptype = t.ArgList[0]
+ }
+ }
+ }
+
+ if pname != nil {
+ // d.Name "[" pname ...
+ // d.Name "[" pname ptype ...
+ // d.Name "[" pname ptype "," ...
+ d.TParamList = p.paramList(pname, ptype, _Rbrack, true)
+ d.Alias = p.gotAssign()
+ d.Type = p.typeOrNil()
+ } else {
+ // d.Name "[" x ...
+ d.Type = p.arrayType(pos, x)
+ }
+ case _Rbrack:
+ // d.Name "[" "]" ...
+ p.next()
+ d.Type = p.sliceType(pos)
+ default:
+ // d.Name "[" ...
+ d.Type = p.arrayType(pos, nil)
+ }
+ } else {
+ d.Alias = p.gotAssign()
+ d.Type = p.typeOrNil()
+ }
+
+ if d.Type == nil {
+ d.Type = p.badExpr()
+ p.syntaxError("in type declaration")
+ p.advance(_Semi, _Rparen)
+ }
+
+ return d
+}
+
+// isTypeLit reports whether x is a (possibly parenthesized) type literal.
+func isTypeLit(x Expr) bool {
+ switch x := x.(type) {
+ case *ArrayType, *StructType, *FuncType, *InterfaceType, *SliceType, *MapType, *ChanType:
+ return true
+ case *Operation:
+ // *T may be a pointer dereferenciation.
+ // Only consider *T as type literal if T is a type literal.
+ return x.Op == Mul && x.Y == nil && isTypeLit(x.X)
+ case *ParenExpr:
+ return isTypeLit(x.X)
+ }
+ return false
+}
+
+// VarSpec = IdentifierList ( Type [ "=" ExpressionList ] | "=" ExpressionList ) .
+func (p *parser) varDecl(group *Group) Decl {
+ if trace {
+ defer p.trace("varDecl")()
+ }
+
+ d := new(VarDecl)
+ d.pos = p.pos()
+ d.Group = group
+ d.Pragma = p.takePragma()
+
+ d.NameList = p.nameList(p.name())
+ if p.gotAssign() {
+ d.Values = p.exprList()
+ } else {
+ d.Type = p.type_()
+ if p.gotAssign() {
+ d.Values = p.exprList()
+ }
+ }
+
+ return d
+}
+
+// FunctionDecl = "func" FunctionName [ TypeParams ] ( Function | Signature ) .
+// FunctionName = identifier .
+// Function = Signature FunctionBody .
+// MethodDecl = "func" Receiver MethodName ( Function | Signature ) .
+// Receiver = Parameters .
+func (p *parser) funcDeclOrNil() *FuncDecl {
+ if trace {
+ defer p.trace("funcDecl")()
+ }
+
+ f := new(FuncDecl)
+ f.pos = p.pos()
+ f.Pragma = p.takePragma()
+
+ if p.got(_Lparen) {
+ rcvr := p.paramList(nil, nil, _Rparen, false)
+ switch len(rcvr) {
+ case 0:
+ p.error("method has no receiver")
+ default:
+ p.error("method has multiple receivers")
+ fallthrough
+ case 1:
+ f.Recv = rcvr[0]
+ }
+ }
+
+ if p.tok != _Name {
+ p.syntaxError("expecting name or (")
+ p.advance(_Lbrace, _Semi)
+ return nil
+ }
+
+ f.Name = p.name()
+
+ context := ""
+ if f.Recv != nil && p.mode&AllowMethodTypeParams == 0 {
+ context = "method" // don't permit (method) type parameters in funcType
+ }
+ f.TParamList, f.Type = p.funcType(context)
+
+ if p.tok == _Lbrace {
+ f.Body = p.funcBody()
+ }
+
+ return f
+}
+
+func (p *parser) funcBody() *BlockStmt {
+ p.fnest++
+ errcnt := p.errcnt
+ body := p.blockStmt("")
+ p.fnest--
+
+ // Don't check branches if there were syntax errors in the function
+ // as it may lead to spurious errors (e.g., see test/switch2.go) or
+ // possibly crashes due to incomplete syntax trees.
+ if p.mode&CheckBranches != 0 && errcnt == p.errcnt {
+ checkBranches(body, p.errh)
+ }
+
+ return body
+}
+
+// ----------------------------------------------------------------------------
+// Expressions
+
+func (p *parser) expr() Expr {
+ if trace {
+ defer p.trace("expr")()
+ }
+
+ return p.binaryExpr(nil, 0)
+}
+
+// Expression = UnaryExpr | Expression binary_op Expression .
+func (p *parser) binaryExpr(x Expr, prec int) Expr {
+ // don't trace binaryExpr - only leads to overly nested trace output
+
+ if x == nil {
+ x = p.unaryExpr()
+ }
+ for (p.tok == _Operator || p.tok == _Star) && p.prec > prec {
+ t := new(Operation)
+ t.pos = p.pos()
+ t.Op = p.op
+ tprec := p.prec
+ p.next()
+ t.X = x
+ t.Y = p.binaryExpr(nil, tprec)
+ x = t
+ }
+ return x
+}
+
+// UnaryExpr = PrimaryExpr | unary_op UnaryExpr .
+func (p *parser) unaryExpr() Expr {
+ if trace {
+ defer p.trace("unaryExpr")()
+ }
+
+ switch p.tok {
+ case _Operator, _Star:
+ switch p.op {
+ case Mul, Add, Sub, Not, Xor:
+ x := new(Operation)
+ x.pos = p.pos()
+ x.Op = p.op
+ p.next()
+ x.X = p.unaryExpr()
+ return x
+
+ case And:
+ x := new(Operation)
+ x.pos = p.pos()
+ x.Op = And
+ p.next()
+ // unaryExpr may have returned a parenthesized composite literal
+ // (see comment in operand) - remove parentheses if any
+ x.X = unparen(p.unaryExpr())
+ return x
+ }
+
+ case _Arrow:
+ // receive op (<-x) or receive-only channel (<-chan E)
+ pos := p.pos()
+ p.next()
+
+ // If the next token is _Chan we still don't know if it is
+ // a channel (<-chan int) or a receive op (<-chan int(ch)).
+ // We only know once we have found the end of the unaryExpr.
+
+ x := p.unaryExpr()
+
+ // There are two cases:
+ //
+ // <-chan... => <-x is a channel type
+ // <-x => <-x is a receive operation
+ //
+ // In the first case, <- must be re-associated with
+ // the channel type parsed already:
+ //
+ // <-(chan E) => (<-chan E)
+ // <-(chan<-E) => (<-chan (<-E))
+
+ if _, ok := x.(*ChanType); ok {
+ // x is a channel type => re-associate <-
+ dir := SendOnly
+ t := x
+ for dir == SendOnly {
+ c, ok := t.(*ChanType)
+ if !ok {
+ break
+ }
+ dir = c.Dir
+ if dir == RecvOnly {
+ // t is type <-chan E but <-<-chan E is not permitted
+ // (report same error as for "type _ <-<-chan E")
+ p.syntaxError("unexpected <-, expecting chan")
+ // already progressed, no need to advance
+ }
+ c.Dir = RecvOnly
+ t = c.Elem
+ }
+ if dir == SendOnly {
+ // channel dir is <- but channel element E is not a channel
+ // (report same error as for "type _ <-chan<-E")
+ p.syntaxError(fmt.Sprintf("unexpected %s, expecting chan", String(t)))
+ // already progressed, no need to advance
+ }
+ return x
+ }
+
+ // x is not a channel type => we have a receive op
+ o := new(Operation)
+ o.pos = pos
+ o.Op = Recv
+ o.X = x
+ return o
+ }
+
+ // TODO(mdempsky): We need parens here so we can report an
+ // error for "(x) := true". It should be possible to detect
+ // and reject that more efficiently though.
+ return p.pexpr(nil, true)
+}
+
+// callStmt parses call-like statements that can be preceded by 'defer' and 'go'.
+func (p *parser) callStmt() *CallStmt {
+ if trace {
+ defer p.trace("callStmt")()
+ }
+
+ s := new(CallStmt)
+ s.pos = p.pos()
+ s.Tok = p.tok // _Defer or _Go
+ p.next()
+
+ x := p.pexpr(nil, p.tok == _Lparen) // keep_parens so we can report error below
+ if t := unparen(x); t != x {
+ p.errorAt(x.Pos(), fmt.Sprintf("expression in %s must not be parenthesized", s.Tok))
+ // already progressed, no need to advance
+ x = t
+ }
+
+ cx, ok := x.(*CallExpr)
+ if !ok {
+ p.errorAt(x.Pos(), fmt.Sprintf("expression in %s must be function call", s.Tok))
+ // already progressed, no need to advance
+ cx = new(CallExpr)
+ cx.pos = x.Pos()
+ cx.Fun = x // assume common error of missing parentheses (function invocation)
+ }
+
+ s.Call = cx
+ return s
+}
+
+// Operand = Literal | OperandName | MethodExpr | "(" Expression ")" .
+// Literal = BasicLit | CompositeLit | FunctionLit .
+// BasicLit = int_lit | float_lit | imaginary_lit | rune_lit | string_lit .
+// OperandName = identifier | QualifiedIdent.
+func (p *parser) operand(keep_parens bool) Expr {
+ if trace {
+ defer p.trace("operand " + p.tok.String())()
+ }
+
+ switch p.tok {
+ case _Name:
+ return p.name()
+
+ case _Literal:
+ return p.oliteral()
+
+ case _Lparen:
+ pos := p.pos()
+ p.next()
+ p.xnest++
+ x := p.expr()
+ p.xnest--
+ p.want(_Rparen)
+
+ // Optimization: Record presence of ()'s only where needed
+ // for error reporting. Don't bother in other cases; it is
+ // just a waste of memory and time.
+ //
+ // Parentheses are not permitted around T in a composite
+ // literal T{}. If the next token is a {, assume x is a
+ // composite literal type T (it may not be, { could be
+ // the opening brace of a block, but we don't know yet).
+ if p.tok == _Lbrace {
+ keep_parens = true
+ }
+
+ // Parentheses are also not permitted around the expression
+ // in a go/defer statement. In that case, operand is called
+ // with keep_parens set.
+ if keep_parens {
+ px := new(ParenExpr)
+ px.pos = pos
+ px.X = x
+ x = px
+ }
+ return x
+
+ case _Func:
+ pos := p.pos()
+ p.next()
+ _, ftyp := p.funcType("function literal")
+ if p.tok == _Lbrace {
+ p.xnest++
+
+ f := new(FuncLit)
+ f.pos = pos
+ f.Type = ftyp
+ f.Body = p.funcBody()
+
+ p.xnest--
+ return f
+ }
+ return ftyp
+
+ case _Lbrack, _Chan, _Map, _Struct, _Interface:
+ return p.type_() // othertype
+
+ default:
+ x := p.badExpr()
+ p.syntaxError("expecting expression")
+ p.advance(_Rparen, _Rbrack, _Rbrace)
+ return x
+ }
+
+ // Syntactically, composite literals are operands. Because a complit
+ // type may be a qualified identifier which is handled by pexpr
+ // (together with selector expressions), complits are parsed there
+ // as well (operand is only called from pexpr).
+}
+
+// PrimaryExpr =
+// Operand |
+// Conversion |
+// PrimaryExpr Selector |
+// PrimaryExpr Index |
+// PrimaryExpr Slice |
+// PrimaryExpr TypeAssertion |
+// PrimaryExpr Arguments .
+//
+// Selector = "." identifier .
+// Index = "[" Expression "]" .
+// Slice = "[" ( [ Expression ] ":" [ Expression ] ) |
+// ( [ Expression ] ":" Expression ":" Expression )
+// "]" .
+// TypeAssertion = "." "(" Type ")" .
+// Arguments = "(" [ ( ExpressionList | Type [ "," ExpressionList ] ) [ "..." ] [ "," ] ] ")" .
+func (p *parser) pexpr(x Expr, keep_parens bool) Expr {
+ if trace {
+ defer p.trace("pexpr")()
+ }
+
+ if x == nil {
+ x = p.operand(keep_parens)
+ }
+
+loop:
+ for {
+ pos := p.pos()
+ switch p.tok {
+ case _Dot:
+ p.next()
+ switch p.tok {
+ case _Name:
+ // pexpr '.' sym
+ t := new(SelectorExpr)
+ t.pos = pos
+ t.X = x
+ t.Sel = p.name()
+ x = t
+
+ case _Lparen:
+ p.next()
+ if p.got(_Type) {
+ t := new(TypeSwitchGuard)
+ // t.Lhs is filled in by parser.simpleStmt
+ t.pos = pos
+ t.X = x
+ x = t
+ } else {
+ t := new(AssertExpr)
+ t.pos = pos
+ t.X = x
+ t.Type = p.type_()
+ x = t
+ }
+ p.want(_Rparen)
+
+ default:
+ p.syntaxError("expecting name or (")
+ p.advance(_Semi, _Rparen)
+ }
+
+ case _Lbrack:
+ p.next()
+
+ if p.tok == _Rbrack {
+ // invalid empty instance, slice or index expression; accept but complain
+ p.syntaxError("expecting operand")
+ p.next()
+ break
+ }
+
+ var i Expr
+ if p.tok != _Colon {
+ if p.mode&AllowGenerics == 0 {
+ p.xnest++
+ i = p.expr()
+ p.xnest--
+ if p.got(_Rbrack) {
+ // x[i]
+ t := new(IndexExpr)
+ t.pos = pos
+ t.X = x
+ t.Index = i
+ x = t
+ break
+ }
+ } else {
+ var comma bool
+ i, comma = p.typeList()
+ if comma || p.tok == _Rbrack {
+ p.want(_Rbrack)
+ // x[i,] or x[i, j, ...]
+ t := new(IndexExpr)
+ t.pos = pos
+ t.X = x
+ t.Index = i
+ x = t
+ break
+ }
+ }
+ }
+
+ // x[i:...
+ // For better error message, don't simply use p.want(_Colon) here (issue #47704).
+ if !p.got(_Colon) {
+ if p.mode&AllowGenerics == 0 {
+ p.syntaxError("expecting : or ]")
+ p.advance(_Colon, _Rbrack)
+ } else {
+ p.syntaxError("expecting comma, : or ]")
+ p.advance(_Comma, _Colon, _Rbrack)
+ }
+ }
+ p.xnest++
+ t := new(SliceExpr)
+ t.pos = pos
+ t.X = x
+ t.Index[0] = i
+ if p.tok != _Colon && p.tok != _Rbrack {
+ // x[i:j...
+ t.Index[1] = p.expr()
+ }
+ if p.tok == _Colon {
+ t.Full = true
+ // x[i:j:...]
+ if t.Index[1] == nil {
+ p.error("middle index required in 3-index slice")
+ t.Index[1] = p.badExpr()
+ }
+ p.next()
+ if p.tok != _Rbrack {
+ // x[i:j:k...
+ t.Index[2] = p.expr()
+ } else {
+ p.error("final index required in 3-index slice")
+ t.Index[2] = p.badExpr()
+ }
+ }
+ p.xnest--
+ p.want(_Rbrack)
+ x = t
+
+ case _Lparen:
+ t := new(CallExpr)
+ t.pos = pos
+ p.next()
+ t.Fun = x
+ t.ArgList, t.HasDots = p.argList()
+ x = t
+
+ case _Lbrace:
+ // operand may have returned a parenthesized complit
+ // type; accept it but complain if we have a complit
+ t := unparen(x)
+ // determine if '{' belongs to a composite literal or a block statement
+ complit_ok := false
+ switch t.(type) {
+ case *Name, *SelectorExpr:
+ if p.xnest >= 0 {
+ // x is possibly a composite literal type
+ complit_ok = true
+ }
+ case *IndexExpr:
+ if p.xnest >= 0 && !isValue(t) {
+ // x is possibly a composite literal type
+ complit_ok = true
+ }
+ case *ArrayType, *SliceType, *StructType, *MapType:
+ // x is a comptype
+ complit_ok = true
+ }
+ if !complit_ok {
+ break loop
+ }
+ if t != x {
+ p.syntaxError("cannot parenthesize type in composite literal")
+ // already progressed, no need to advance
+ }
+ n := p.complitexpr()
+ n.Type = x
+ x = n
+
+ default:
+ break loop
+ }
+ }
+
+ return x
+}
+
+// isValue reports whether x syntactically must be a value (and not a type) expression.
+func isValue(x Expr) bool {
+ switch x := x.(type) {
+ case *BasicLit, *CompositeLit, *FuncLit, *SliceExpr, *AssertExpr, *TypeSwitchGuard, *CallExpr:
+ return true
+ case *Operation:
+ return x.Op != Mul || x.Y != nil // *T may be a type
+ case *ParenExpr:
+ return isValue(x.X)
+ case *IndexExpr:
+ return isValue(x.X) || isValue(x.Index)
+ }
+ return false
+}
+
+// Element = Expression | LiteralValue .
+func (p *parser) bare_complitexpr() Expr {
+ if trace {
+ defer p.trace("bare_complitexpr")()
+ }
+
+ if p.tok == _Lbrace {
+ // '{' start_complit braced_keyval_list '}'
+ return p.complitexpr()
+ }
+
+ return p.expr()
+}
+
+// LiteralValue = "{" [ ElementList [ "," ] ] "}" .
+func (p *parser) complitexpr() *CompositeLit {
+ if trace {
+ defer p.trace("complitexpr")()
+ }
+
+ x := new(CompositeLit)
+ x.pos = p.pos()
+
+ p.xnest++
+ p.want(_Lbrace)
+ x.Rbrace = p.list(_Comma, _Rbrace, func() bool {
+ // value
+ e := p.bare_complitexpr()
+ if p.tok == _Colon {
+ // key ':' value
+ l := new(KeyValueExpr)
+ l.pos = p.pos()
+ p.next()
+ l.Key = e
+ l.Value = p.bare_complitexpr()
+ e = l
+ x.NKeys++
+ }
+ x.ElemList = append(x.ElemList, e)
+ return false
+ })
+ p.xnest--
+
+ return x
+}
+
+// ----------------------------------------------------------------------------
+// Types
+
+func (p *parser) type_() Expr {
+ if trace {
+ defer p.trace("type_")()
+ }
+
+ typ := p.typeOrNil()
+ if typ == nil {
+ typ = p.badExpr()
+ p.syntaxError("expecting type")
+ p.advance(_Comma, _Colon, _Semi, _Rparen, _Rbrack, _Rbrace)
+ }
+
+ return typ
+}
+
+func newIndirect(pos Pos, typ Expr) Expr {
+ o := new(Operation)
+ o.pos = pos
+ o.Op = Mul
+ o.X = typ
+ return o
+}
+
+// typeOrNil is like type_ but it returns nil if there was no type
+// instead of reporting an error.
+//
+// Type = TypeName | TypeLit | "(" Type ")" .
+// TypeName = identifier | QualifiedIdent .
+// TypeLit = ArrayType | StructType | PointerType | FunctionType | InterfaceType |
+// SliceType | MapType | Channel_Type .
+func (p *parser) typeOrNil() Expr {
+ if trace {
+ defer p.trace("typeOrNil")()
+ }
+
+ pos := p.pos()
+ switch p.tok {
+ case _Star:
+ // ptrtype
+ p.next()
+ return newIndirect(pos, p.type_())
+
+ case _Arrow:
+ // recvchantype
+ p.next()
+ p.want(_Chan)
+ t := new(ChanType)
+ t.pos = pos
+ t.Dir = RecvOnly
+ t.Elem = p.chanElem()
+ return t
+
+ case _Func:
+ // fntype
+ p.next()
+ _, t := p.funcType("function type")
+ return t
+
+ case _Lbrack:
+ // '[' oexpr ']' ntype
+ // '[' _DotDotDot ']' ntype
+ p.next()
+ if p.got(_Rbrack) {
+ return p.sliceType(pos)
+ }
+ return p.arrayType(pos, nil)
+
+ case _Chan:
+ // _Chan non_recvchantype
+ // _Chan _Comm ntype
+ p.next()
+ t := new(ChanType)
+ t.pos = pos
+ if p.got(_Arrow) {
+ t.Dir = SendOnly
+ }
+ t.Elem = p.chanElem()
+ return t
+
+ case _Map:
+ // _Map '[' ntype ']' ntype
+ p.next()
+ p.want(_Lbrack)
+ t := new(MapType)
+ t.pos = pos
+ t.Key = p.type_()
+ p.want(_Rbrack)
+ t.Value = p.type_()
+ return t
+
+ case _Struct:
+ return p.structType()
+
+ case _Interface:
+ return p.interfaceType()
+
+ case _Name:
+ return p.qualifiedName(nil)
+
+ case _Lparen:
+ p.next()
+ t := p.type_()
+ p.want(_Rparen)
+ return t
+ }
+
+ return nil
+}
+
+func (p *parser) typeInstance(typ Expr) Expr {
+ if trace {
+ defer p.trace("typeInstance")()
+ }
+
+ pos := p.pos()
+ p.want(_Lbrack)
+ x := new(IndexExpr)
+ x.pos = pos
+ x.X = typ
+ if p.tok == _Rbrack {
+ p.syntaxError("expecting type")
+ x.Index = p.badExpr()
+ } else {
+ x.Index, _ = p.typeList()
+ }
+ p.want(_Rbrack)
+ return x
+}
+
+// If context != "", type parameters are not permitted.
+func (p *parser) funcType(context string) ([]*Field, *FuncType) {
+ if trace {
+ defer p.trace("funcType")()
+ }
+
+ typ := new(FuncType)
+ typ.pos = p.pos()
+
+ var tparamList []*Field
+ if p.allowGenerics() && p.got(_Lbrack) {
+ if context != "" {
+ // accept but complain
+ p.syntaxErrorAt(typ.pos, context+" must have no type parameters")
+ }
+ if p.tok == _Rbrack {
+ p.syntaxError("empty type parameter list")
+ p.next()
+ } else {
+ tparamList = p.paramList(nil, nil, _Rbrack, true)
+ }
+ }
+
+ p.want(_Lparen)
+ typ.ParamList = p.paramList(nil, nil, _Rparen, false)
+ typ.ResultList = p.funcResult()
+
+ return tparamList, typ
+}
+
+// "[" has already been consumed, and pos is its position.
+// If len != nil it is the already consumed array length.
+func (p *parser) arrayType(pos Pos, len Expr) Expr {
+ if trace {
+ defer p.trace("arrayType")()
+ }
+
+ if len == nil && !p.got(_DotDotDot) {
+ p.xnest++
+ len = p.expr()
+ p.xnest--
+ }
+ if p.tok == _Comma {
+ // Trailing commas are accepted in type parameter
+ // lists but not in array type declarations.
+ // Accept for better error handling but complain.
+ p.syntaxError("unexpected comma; expecting ]")
+ p.next()
+ }
+ p.want(_Rbrack)
+ t := new(ArrayType)
+ t.pos = pos
+ t.Len = len
+ t.Elem = p.type_()
+ return t
+}
+
+// "[" and "]" have already been consumed, and pos is the position of "[".
+func (p *parser) sliceType(pos Pos) Expr {
+ t := new(SliceType)
+ t.pos = pos
+ t.Elem = p.type_()
+ return t
+}
+
+func (p *parser) chanElem() Expr {
+ if trace {
+ defer p.trace("chanElem")()
+ }
+
+ typ := p.typeOrNil()
+ if typ == nil {
+ typ = p.badExpr()
+ p.syntaxError("missing channel element type")
+ // assume element type is simply absent - don't advance
+ }
+
+ return typ
+}
+
+// StructType = "struct" "{" { FieldDecl ";" } "}" .
+func (p *parser) structType() *StructType {
+ if trace {
+ defer p.trace("structType")()
+ }
+
+ typ := new(StructType)
+ typ.pos = p.pos()
+
+ p.want(_Struct)
+ p.want(_Lbrace)
+ p.list(_Semi, _Rbrace, func() bool {
+ p.fieldDecl(typ)
+ return false
+ })
+
+ return typ
+}
+
+// InterfaceType = "interface" "{" { ( MethodDecl | EmbeddedElem | TypeList ) ";" } "}" .
+// TypeList = "type" Type { "," Type } .
+// TODO(gri) remove TypeList syntax if we accept #45346
+func (p *parser) interfaceType() *InterfaceType {
+ if trace {
+ defer p.trace("interfaceType")()
+ }
+
+ typ := new(InterfaceType)
+ typ.pos = p.pos()
+
+ p.want(_Interface)
+ p.want(_Lbrace)
+ p.list(_Semi, _Rbrace, func() bool {
+ switch p.tok {
+ case _Name:
+ f := p.methodDecl()
+ if f.Name == nil && p.allowGenerics() {
+ f = p.embeddedElem(f)
+ }
+ typ.MethodList = append(typ.MethodList, f)
+ return false
+
+ case _Lparen:
+ // TODO(gri) Need to decide how to adjust this restriction.
+ p.syntaxError("cannot parenthesize embedded type")
+ f := new(Field)
+ f.pos = p.pos()
+ p.next()
+ f.Type = p.qualifiedName(nil)
+ p.want(_Rparen)
+ typ.MethodList = append(typ.MethodList, f)
+ return false
+
+ case _Operator:
+ if p.op == Tilde && p.allowGenerics() {
+ typ.MethodList = append(typ.MethodList, p.embeddedElem(nil))
+ return false
+ }
+
+ default:
+ if p.allowGenerics() {
+ pos := p.pos()
+ if t := p.typeOrNil(); t != nil {
+ f := new(Field)
+ f.pos = pos
+ f.Type = t
+ typ.MethodList = append(typ.MethodList, p.embeddedElem(f))
+ return false
+ }
+ }
+ }
+
+ if p.allowGenerics() {
+ p.syntaxError("expecting method or embedded element")
+ p.advance(_Semi, _Rbrace)
+ return false
+ }
+
+ p.syntaxError("expecting method or interface name")
+ p.advance(_Semi, _Rbrace)
+ return false
+ })
+
+ return typ
+}
+
+// Result = Parameters | Type .
+func (p *parser) funcResult() []*Field {
+ if trace {
+ defer p.trace("funcResult")()
+ }
+
+ if p.got(_Lparen) {
+ return p.paramList(nil, nil, _Rparen, false)
+ }
+
+ pos := p.pos()
+ if typ := p.typeOrNil(); typ != nil {
+ f := new(Field)
+ f.pos = pos
+ f.Type = typ
+ return []*Field{f}
+ }
+
+ return nil
+}
+
+func (p *parser) addField(styp *StructType, pos Pos, name *Name, typ Expr, tag *BasicLit) {
+ if tag != nil {
+ for i := len(styp.FieldList) - len(styp.TagList); i > 0; i-- {
+ styp.TagList = append(styp.TagList, nil)
+ }
+ styp.TagList = append(styp.TagList, tag)
+ }
+
+ f := new(Field)
+ f.pos = pos
+ f.Name = name
+ f.Type = typ
+ styp.FieldList = append(styp.FieldList, f)
+
+ if debug && tag != nil && len(styp.FieldList) != len(styp.TagList) {
+ panic("inconsistent struct field list")
+ }
+}
+
+// FieldDecl = (IdentifierList Type | AnonymousField) [ Tag ] .
+// AnonymousField = [ "*" ] TypeName .
+// Tag = string_lit .
+func (p *parser) fieldDecl(styp *StructType) {
+ if trace {
+ defer p.trace("fieldDecl")()
+ }
+
+ pos := p.pos()
+ switch p.tok {
+ case _Name:
+ name := p.name()
+ if p.tok == _Dot || p.tok == _Literal || p.tok == _Semi || p.tok == _Rbrace {
+ // embedded type
+ typ := p.qualifiedName(name)
+ tag := p.oliteral()
+ p.addField(styp, pos, nil, typ, tag)
+ break
+ }
+
+ // name1, name2, ... Type [ tag ]
+ names := p.nameList(name)
+ var typ Expr
+
+ // Careful dance: We don't know if we have an embedded instantiated
+ // type T[P1, P2, ...] or a field T of array/slice type [P]E or []E.
+ if p.allowGenerics() && len(names) == 1 && p.tok == _Lbrack {
+ typ = p.arrayOrTArgs()
+ if typ, ok := typ.(*IndexExpr); ok {
+ // embedded type T[P1, P2, ...]
+ typ.X = name // name == names[0]
+ tag := p.oliteral()
+ p.addField(styp, pos, nil, typ, tag)
+ break
+ }
+ } else {
+ // T P
+ typ = p.type_()
+ }
+
+ tag := p.oliteral()
+
+ for _, name := range names {
+ p.addField(styp, name.Pos(), name, typ, tag)
+ }
+
+ case _Star:
+ p.next()
+ var typ Expr
+ if p.tok == _Lparen {
+ // *(T)
+ p.syntaxError("cannot parenthesize embedded type")
+ p.next()
+ typ = p.qualifiedName(nil)
+ p.got(_Rparen) // no need to complain if missing
+ } else {
+ // *T
+ typ = p.qualifiedName(nil)
+ }
+ tag := p.oliteral()
+ p.addField(styp, pos, nil, newIndirect(pos, typ), tag)
+
+ case _Lparen:
+ p.syntaxError("cannot parenthesize embedded type")
+ p.next()
+ var typ Expr
+ if p.tok == _Star {
+ // (*T)
+ pos := p.pos()
+ p.next()
+ typ = newIndirect(pos, p.qualifiedName(nil))
+ } else {
+ // (T)
+ typ = p.qualifiedName(nil)
+ }
+ p.got(_Rparen) // no need to complain if missing
+ tag := p.oliteral()
+ p.addField(styp, pos, nil, typ, tag)
+
+ default:
+ p.syntaxError("expecting field name or embedded type")
+ p.advance(_Semi, _Rbrace)
+ }
+}
+
+func (p *parser) arrayOrTArgs() Expr {
+ if trace {
+ defer p.trace("arrayOrTArgs")()
+ }
+
+ pos := p.pos()
+ p.want(_Lbrack)
+ if p.got(_Rbrack) {
+ return p.sliceType(pos)
+ }
+
+ // x [n]E or x[n,], x[n1, n2], ...
+ n, comma := p.typeList()
+ p.want(_Rbrack)
+ if !comma {
+ if elem := p.typeOrNil(); elem != nil {
+ // x [n]E
+ t := new(ArrayType)
+ t.pos = pos
+ t.Len = n
+ t.Elem = elem
+ return t
+ }
+ }
+
+ // x[n,], x[n1, n2], ...
+ t := new(IndexExpr)
+ t.pos = pos
+ // t.X will be filled in by caller
+ t.Index = n
+ return t
+}
+
+func (p *parser) oliteral() *BasicLit {
+ if p.tok == _Literal {
+ b := new(BasicLit)
+ b.pos = p.pos()
+ b.Value = p.lit
+ b.Kind = p.kind
+ b.Bad = p.bad
+ p.next()
+ return b
+ }
+ return nil
+}
+
+// MethodSpec = MethodName Signature | InterfaceTypeName .
+// MethodName = identifier .
+// InterfaceTypeName = TypeName .
+func (p *parser) methodDecl() *Field {
+ if trace {
+ defer p.trace("methodDecl")()
+ }
+
+ f := new(Field)
+ f.pos = p.pos()
+ name := p.name()
+
+ // accept potential name list but complain
+ // TODO(gri) We probably don't need this special check anymore.
+ // Nobody writes this kind of code. It's from ancient
+ // Go beginnings.
+ hasNameList := false
+ for p.got(_Comma) {
+ p.name()
+ hasNameList = true
+ }
+ if hasNameList {
+ p.syntaxError("name list not allowed in interface type")
+ // already progressed, no need to advance
+ }
+
+ const context = "interface method"
+
+ switch p.tok {
+ case _Lparen:
+ // method
+ f.Name = name
+ _, f.Type = p.funcType(context)
+
+ case _Lbrack:
+ if p.allowGenerics() {
+ // Careful dance: We don't know if we have a generic method m[T C](x T)
+ // or an embedded instantiated type T[P1, P2] (we accept generic methods
+ // for generality and robustness of parsing).
+ pos := p.pos()
+ p.next()
+
+ // Empty type parameter or argument lists are not permitted.
+ // Treat as if [] were absent.
+ if p.tok == _Rbrack {
+ // name[]
+ pos := p.pos()
+ p.next()
+ if p.tok == _Lparen {
+ // name[](
+ p.errorAt(pos, "empty type parameter list")
+ f.Name = name
+ _, f.Type = p.funcType(context)
+ } else {
+ p.errorAt(pos, "empty type argument list")
+ f.Type = name
+ }
+ break
+ }
+
+ // A type argument list looks like a parameter list with only
+ // types. Parse a parameter list and decide afterwards.
+ list := p.paramList(nil, nil, _Rbrack, false)
+ if len(list) == 0 {
+ // The type parameter list is not [] but we got nothing
+ // due to other errors (reported by paramList). Treat
+ // as if [] were absent.
+ if p.tok == _Lparen {
+ f.Name = name
+ _, f.Type = p.funcType(context)
+ } else {
+ f.Type = name
+ }
+ break
+ }
+
+ // len(list) > 0
+ if list[0].Name != nil {
+ // generic method
+ f.Name = name
+ _, f.Type = p.funcType(context)
+ // TODO(gri) Record list as type parameter list with f.Type
+ // if we want to type-check the generic method.
+ // For now, report an error so this is not a silent event.
+ p.errorAt(pos, "interface method must have no type parameters")
+ break
+ }
+
+ // embedded instantiated type
+ t := new(IndexExpr)
+ t.pos = pos
+ t.X = name
+ if len(list) == 1 {
+ t.Index = list[0].Type
+ } else {
+ // len(list) > 1
+ l := new(ListExpr)
+ l.pos = list[0].Pos()
+ l.ElemList = make([]Expr, len(list))
+ for i := range list {
+ l.ElemList[i] = list[i].Type
+ }
+ t.Index = l
+ }
+ f.Type = t
+ break
+ }
+ fallthrough
+
+ default:
+ // embedded type
+ f.Type = p.qualifiedName(name)
+ }
+
+ return f
+}
+
+// EmbeddedElem = MethodSpec | EmbeddedTerm { "|" EmbeddedTerm } .
+func (p *parser) embeddedElem(f *Field) *Field {
+ if trace {
+ defer p.trace("embeddedElem")()
+ }
+
+ if f == nil {
+ f = new(Field)
+ f.pos = p.pos()
+ f.Type = p.embeddedTerm()
+ }
+
+ for p.tok == _Operator && p.op == Or {
+ t := new(Operation)
+ t.pos = p.pos()
+ t.Op = Or
+ p.next()
+ t.X = f.Type
+ t.Y = p.embeddedTerm()
+ f.Type = t
+ }
+
+ return f
+}
+
+// EmbeddedTerm = [ "~" ] Type .
+func (p *parser) embeddedTerm() Expr {
+ if trace {
+ defer p.trace("embeddedTerm")()
+ }
+
+ if p.tok == _Operator && p.op == Tilde {
+ t := new(Operation)
+ t.pos = p.pos()
+ t.Op = Tilde
+ p.next()
+ t.X = p.type_()
+ return t
+ }
+
+ t := p.typeOrNil()
+ if t == nil {
+ t = p.badExpr()
+ p.syntaxError("expecting ~ term or type")
+ p.advance(_Operator, _Semi, _Rparen, _Rbrack, _Rbrace)
+ }
+
+ return t
+}
+
+// ParameterDecl = [ IdentifierList ] [ "..." ] Type .
+func (p *parser) paramDeclOrNil(name *Name, follow token) *Field {
+ if trace {
+ defer p.trace("paramDeclOrNil")()
+ }
+
+ // type set notation is ok in type parameter lists
+ typeSetsOk := follow == _Rbrack
+
+ pos := p.pos()
+ if name != nil {
+ pos = name.pos
+ } else if typeSetsOk && p.tok == _Operator && p.op == Tilde {
+ // "~" ...
+ return p.embeddedElem(nil)
+ }
+
+ f := new(Field)
+ f.pos = pos
+
+ if p.tok == _Name || name != nil {
+ // name
+ if name == nil {
+ name = p.name()
+ }
+
+ if p.allowGenerics() && p.tok == _Lbrack {
+ // name "[" ...
+ f.Type = p.arrayOrTArgs()
+ if typ, ok := f.Type.(*IndexExpr); ok {
+ // name "[" ... "]"
+ typ.X = name
+ } else {
+ // name "[" n "]" E
+ f.Name = name
+ }
+ if typeSetsOk && p.tok == _Operator && p.op == Or {
+ // name "[" ... "]" "|" ...
+ // name "[" n "]" E "|" ...
+ f = p.embeddedElem(f)
+ }
+ return f
+ }
+
+ if p.tok == _Dot {
+ // name "." ...
+ f.Type = p.qualifiedName(name)
+ if typeSetsOk && p.tok == _Operator && p.op == Or {
+ // name "." name "|" ...
+ f = p.embeddedElem(f)
+ }
+ return f
+ }
+
+ if typeSetsOk && p.tok == _Operator && p.op == Or {
+ // name "|" ...
+ f.Type = name
+ return p.embeddedElem(f)
+ }
+
+ f.Name = name
+ }
+
+ if p.tok == _DotDotDot {
+ // [name] "..." ...
+ t := new(DotsType)
+ t.pos = p.pos()
+ p.next()
+ t.Elem = p.typeOrNil()
+ if t.Elem == nil {
+ t.Elem = p.badExpr()
+ p.syntaxError("... is missing type")
+ }
+ f.Type = t
+ return f
+ }
+
+ if typeSetsOk && p.tok == _Operator && p.op == Tilde {
+ // [name] "~" ...
+ f.Type = p.embeddedElem(nil).Type
+ return f
+ }
+
+ f.Type = p.typeOrNil()
+ if typeSetsOk && p.tok == _Operator && p.op == Or && f.Type != nil {
+ // [name] type "|"
+ f = p.embeddedElem(f)
+ }
+ if f.Name != nil || f.Type != nil {
+ return f
+ }
+
+ p.syntaxError("expecting " + tokstring(follow))
+ p.advance(_Comma, follow)
+ return nil
+}
+
+// Parameters = "(" [ ParameterList [ "," ] ] ")" .
+// ParameterList = ParameterDecl { "," ParameterDecl } .
+// "(" or "[" has already been consumed.
+// If name != nil, it is the first name after "(" or "[".
+// If typ != nil, name must be != nil, and (name, typ) is the first field in the list.
+// In the result list, either all fields have a name, or no field has a name.
+func (p *parser) paramList(name *Name, typ Expr, close token, requireNames bool) (list []*Field) {
+ if trace {
+ defer p.trace("paramList")()
+ }
+
+ // p.list won't invoke its function argument if we're at the end of the
+ // parameter list. If we have a complete field, handle this case here.
+ if name != nil && typ != nil && p.tok == close {
+ p.next()
+ par := new(Field)
+ par.pos = name.pos
+ par.Name = name
+ par.Type = typ
+ return []*Field{par}
+ }
+
+ var named int // number of parameters that have an explicit name and type
+ var typed int // number of parameters that have an explicit type
+ end := p.list(_Comma, close, func() bool {
+ var par *Field
+ if typ != nil {
+ if debug && name == nil {
+ panic("initial type provided without name")
+ }
+ par = new(Field)
+ par.pos = name.pos
+ par.Name = name
+ par.Type = typ
+ } else {
+ par = p.paramDeclOrNil(name, close)
+ }
+ name = nil // 1st name was consumed if present
+ typ = nil // 1st type was consumed if present
+ if par != nil {
+ if debug && par.Name == nil && par.Type == nil {
+ panic("parameter without name or type")
+ }
+ if par.Name != nil && par.Type != nil {
+ named++
+ }
+ if par.Type != nil {
+ typed++
+ }
+ list = append(list, par)
+ }
+ return false
+ })
+
+ if len(list) == 0 {
+ return
+ }
+
+ // distribute parameter types (len(list) > 0)
+ if named == 0 && !requireNames {
+ // all unnamed => found names are named types
+ for _, par := range list {
+ if typ := par.Name; typ != nil {
+ par.Type = typ
+ par.Name = nil
+ }
+ }
+ } else if named != len(list) {
+ // some named => all must have names and types
+ var pos Pos // left-most error position (or unknown)
+ var typ Expr // current type (from right to left)
+ for i := len(list) - 1; i >= 0; i-- {
+ par := list[i]
+ if par.Type != nil {
+ typ = par.Type
+ if par.Name == nil {
+ pos = StartPos(typ)
+ par.Name = NewName(pos, "_")
+ }
+ } else if typ != nil {
+ par.Type = typ
+ } else {
+ // par.Type == nil && typ == nil => we only have a par.Name
+ pos = par.Name.Pos()
+ t := p.badExpr()
+ t.pos = pos // correct position
+ par.Type = t
+ }
+ }
+ if pos.IsKnown() {
+ var msg string
+ if requireNames {
+ if named == typed {
+ pos = end // position error at closing ]
+ msg = "missing type constraint"
+ } else {
+ msg = "type parameters must be named"
+ }
+ } else {
+ msg = "mixed named and unnamed parameters"
+ }
+ p.syntaxErrorAt(pos, msg)
+ }
+ }
+
+ return
+}
+
+func (p *parser) badExpr() *BadExpr {
+ b := new(BadExpr)
+ b.pos = p.pos()
+ return b
+}
+
+// ----------------------------------------------------------------------------
+// Statements
+
+// SimpleStmt = EmptyStmt | ExpressionStmt | SendStmt | IncDecStmt | Assignment | ShortVarDecl .
+func (p *parser) simpleStmt(lhs Expr, keyword token) SimpleStmt {
+ if trace {
+ defer p.trace("simpleStmt")()
+ }
+
+ if keyword == _For && p.tok == _Range {
+ // _Range expr
+ if debug && lhs != nil {
+ panic("invalid call of simpleStmt")
+ }
+ return p.newRangeClause(nil, false)
+ }
+
+ if lhs == nil {
+ lhs = p.exprList()
+ }
+
+ if _, ok := lhs.(*ListExpr); !ok && p.tok != _Assign && p.tok != _Define {
+ // expr
+ pos := p.pos()
+ switch p.tok {
+ case _AssignOp:
+ // lhs op= rhs
+ op := p.op
+ p.next()
+ return p.newAssignStmt(pos, op, lhs, p.expr())
+
+ case _IncOp:
+ // lhs++ or lhs--
+ op := p.op
+ p.next()
+ return p.newAssignStmt(pos, op, lhs, nil)
+
+ case _Arrow:
+ // lhs <- rhs
+ s := new(SendStmt)
+ s.pos = pos
+ p.next()
+ s.Chan = lhs
+ s.Value = p.expr()
+ return s
+
+ default:
+ // expr
+ s := new(ExprStmt)
+ s.pos = lhs.Pos()
+ s.X = lhs
+ return s
+ }
+ }
+
+ // expr_list
+ switch p.tok {
+ case _Assign, _Define:
+ pos := p.pos()
+ var op Operator
+ if p.tok == _Define {
+ op = Def
+ }
+ p.next()
+
+ if keyword == _For && p.tok == _Range {
+ // expr_list op= _Range expr
+ return p.newRangeClause(lhs, op == Def)
+ }
+
+ // expr_list op= expr_list
+ rhs := p.exprList()
+
+ if x, ok := rhs.(*TypeSwitchGuard); ok && keyword == _Switch && op == Def {
+ if lhs, ok := lhs.(*Name); ok {
+ // switch … lhs := rhs.(type)
+ x.Lhs = lhs
+ s := new(ExprStmt)
+ s.pos = x.Pos()
+ s.X = x
+ return s
+ }
+ }
+
+ return p.newAssignStmt(pos, op, lhs, rhs)
+
+ default:
+ p.syntaxError("expecting := or = or comma")
+ p.advance(_Semi, _Rbrace)
+ // make the best of what we have
+ if x, ok := lhs.(*ListExpr); ok {
+ lhs = x.ElemList[0]
+ }
+ s := new(ExprStmt)
+ s.pos = lhs.Pos()
+ s.X = lhs
+ return s
+ }
+}
+
+func (p *parser) newRangeClause(lhs Expr, def bool) *RangeClause {
+ r := new(RangeClause)
+ r.pos = p.pos()
+ p.next() // consume _Range
+ r.Lhs = lhs
+ r.Def = def
+ r.X = p.expr()
+ return r
+}
+
+func (p *parser) newAssignStmt(pos Pos, op Operator, lhs, rhs Expr) *AssignStmt {
+ a := new(AssignStmt)
+ a.pos = pos
+ a.Op = op
+ a.Lhs = lhs
+ a.Rhs = rhs
+ return a
+}
+
+func (p *parser) labeledStmtOrNil(label *Name) Stmt {
+ if trace {
+ defer p.trace("labeledStmt")()
+ }
+
+ s := new(LabeledStmt)
+ s.pos = p.pos()
+ s.Label = label
+
+ p.want(_Colon)
+
+ if p.tok == _Rbrace {
+ // We expect a statement (incl. an empty statement), which must be
+ // terminated by a semicolon. Because semicolons may be omitted before
+ // an _Rbrace, seeing an _Rbrace implies an empty statement.
+ e := new(EmptyStmt)
+ e.pos = p.pos()
+ s.Stmt = e
+ return s
+ }
+
+ s.Stmt = p.stmtOrNil()
+ if s.Stmt != nil {
+ return s
+ }
+
+ // report error at line of ':' token
+ p.syntaxErrorAt(s.pos, "missing statement after label")
+ // we are already at the end of the labeled statement - no need to advance
+ return nil // avoids follow-on errors (see e.g., fixedbugs/bug274.go)
+}
+
+// context must be a non-empty string unless we know that p.tok == _Lbrace.
+func (p *parser) blockStmt(context string) *BlockStmt {
+ if trace {
+ defer p.trace("blockStmt")()
+ }
+
+ s := new(BlockStmt)
+ s.pos = p.pos()
+
+ // people coming from C may forget that braces are mandatory in Go
+ if !p.got(_Lbrace) {
+ p.syntaxError("expecting { after " + context)
+ p.advance(_Name, _Rbrace)
+ s.Rbrace = p.pos() // in case we found "}"
+ if p.got(_Rbrace) {
+ return s
+ }
+ }
+
+ s.List = p.stmtList()
+ s.Rbrace = p.pos()
+ p.want(_Rbrace)
+
+ return s
+}
+
+func (p *parser) declStmt(f func(*Group) Decl) *DeclStmt {
+ if trace {
+ defer p.trace("declStmt")()
+ }
+
+ s := new(DeclStmt)
+ s.pos = p.pos()
+
+ p.next() // _Const, _Type, or _Var
+ s.DeclList = p.appendGroup(nil, f)
+
+ return s
+}
+
+func (p *parser) forStmt() Stmt {
+ if trace {
+ defer p.trace("forStmt")()
+ }
+
+ s := new(ForStmt)
+ s.pos = p.pos()
+
+ s.Init, s.Cond, s.Post = p.header(_For)
+ s.Body = p.blockStmt("for clause")
+
+ return s
+}
+
+func (p *parser) header(keyword token) (init SimpleStmt, cond Expr, post SimpleStmt) {
+ p.want(keyword)
+
+ if p.tok == _Lbrace {
+ if keyword == _If {
+ p.syntaxError("missing condition in if statement")
+ cond = p.badExpr()
+ }
+ return
+ }
+ // p.tok != _Lbrace
+
+ outer := p.xnest
+ p.xnest = -1
+
+ if p.tok != _Semi {
+ // accept potential varDecl but complain
+ if p.got(_Var) {
+ p.syntaxError(fmt.Sprintf("var declaration not allowed in %s initializer", tokstring(keyword)))
+ }
+ init = p.simpleStmt(nil, keyword)
+ // If we have a range clause, we are done (can only happen for keyword == _For).
+ if _, ok := init.(*RangeClause); ok {
+ p.xnest = outer
+ return
+ }
+ }
+
+ var condStmt SimpleStmt
+ var semi struct {
+ pos Pos
+ lit string // valid if pos.IsKnown()
+ }
+ if p.tok != _Lbrace {
+ if p.tok == _Semi {
+ semi.pos = p.pos()
+ semi.lit = p.lit
+ p.next()
+ } else {
+ // asking for a '{' rather than a ';' here leads to a better error message
+ p.want(_Lbrace)
+ if p.tok != _Lbrace {
+ p.advance(_Lbrace, _Rbrace) // for better synchronization (e.g., issue #22581)
+ }
+ }
+ if keyword == _For {
+ if p.tok != _Semi {
+ if p.tok == _Lbrace {
+ p.syntaxError("expecting for loop condition")
+ goto done
+ }
+ condStmt = p.simpleStmt(nil, 0 /* range not permitted */)
+ }
+ p.want(_Semi)
+ if p.tok != _Lbrace {
+ post = p.simpleStmt(nil, 0 /* range not permitted */)
+ if a, _ := post.(*AssignStmt); a != nil && a.Op == Def {
+ p.syntaxErrorAt(a.Pos(), "cannot declare in post statement of for loop")
+ }
+ }
+ } else if p.tok != _Lbrace {
+ condStmt = p.simpleStmt(nil, keyword)
+ }
+ } else {
+ condStmt = init
+ init = nil
+ }
+
+done:
+ // unpack condStmt
+ switch s := condStmt.(type) {
+ case nil:
+ if keyword == _If && semi.pos.IsKnown() {
+ if semi.lit != "semicolon" {
+ p.syntaxErrorAt(semi.pos, fmt.Sprintf("unexpected %s, expecting { after if clause", semi.lit))
+ } else {
+ p.syntaxErrorAt(semi.pos, "missing condition in if statement")
+ }
+ b := new(BadExpr)
+ b.pos = semi.pos
+ cond = b
+ }
+ case *ExprStmt:
+ cond = s.X
+ default:
+ // A common syntax error is to write '=' instead of '==',
+ // which turns an expression into an assignment. Provide
+ // a more explicit error message in that case to prevent
+ // further confusion.
+ var str string
+ if as, ok := s.(*AssignStmt); ok && as.Op == 0 {
+ // Emphasize Lhs and Rhs of assignment with parentheses to highlight '='.
+ // Do it always - it's not worth going through the trouble of doing it
+ // only for "complex" left and right sides.
+ str = "assignment (" + String(as.Lhs) + ") = (" + String(as.Rhs) + ")"
+ } else {
+ str = String(s)
+ }
+ p.syntaxErrorAt(s.Pos(), fmt.Sprintf("cannot use %s as value", str))
+ }
+
+ p.xnest = outer
+ return
+}
+
+func (p *parser) ifStmt() *IfStmt {
+ if trace {
+ defer p.trace("ifStmt")()
+ }
+
+ s := new(IfStmt)
+ s.pos = p.pos()
+
+ s.Init, s.Cond, _ = p.header(_If)
+ s.Then = p.blockStmt("if clause")
+
+ if p.got(_Else) {
+ switch p.tok {
+ case _If:
+ s.Else = p.ifStmt()
+ case _Lbrace:
+ s.Else = p.blockStmt("")
+ default:
+ p.syntaxError("else must be followed by if or statement block")
+ p.advance(_Name, _Rbrace)
+ }
+ }
+
+ return s
+}
+
+func (p *parser) switchStmt() *SwitchStmt {
+ if trace {
+ defer p.trace("switchStmt")()
+ }
+
+ s := new(SwitchStmt)
+ s.pos = p.pos()
+
+ s.Init, s.Tag, _ = p.header(_Switch)
+
+ if !p.got(_Lbrace) {
+ p.syntaxError("missing { after switch clause")
+ p.advance(_Case, _Default, _Rbrace)
+ }
+ for p.tok != _EOF && p.tok != _Rbrace {
+ s.Body = append(s.Body, p.caseClause())
+ }
+ s.Rbrace = p.pos()
+ p.want(_Rbrace)
+
+ return s
+}
+
+func (p *parser) selectStmt() *SelectStmt {
+ if trace {
+ defer p.trace("selectStmt")()
+ }
+
+ s := new(SelectStmt)
+ s.pos = p.pos()
+
+ p.want(_Select)
+ if !p.got(_Lbrace) {
+ p.syntaxError("missing { after select clause")
+ p.advance(_Case, _Default, _Rbrace)
+ }
+ for p.tok != _EOF && p.tok != _Rbrace {
+ s.Body = append(s.Body, p.commClause())
+ }
+ s.Rbrace = p.pos()
+ p.want(_Rbrace)
+
+ return s
+}
+
+func (p *parser) caseClause() *CaseClause {
+ if trace {
+ defer p.trace("caseClause")()
+ }
+
+ c := new(CaseClause)
+ c.pos = p.pos()
+
+ switch p.tok {
+ case _Case:
+ p.next()
+ c.Cases = p.exprList()
+
+ case _Default:
+ p.next()
+
+ default:
+ p.syntaxError("expecting case or default or }")
+ p.advance(_Colon, _Case, _Default, _Rbrace)
+ }
+
+ c.Colon = p.pos()
+ p.want(_Colon)
+ c.Body = p.stmtList()
+
+ return c
+}
+
+func (p *parser) commClause() *CommClause {
+ if trace {
+ defer p.trace("commClause")()
+ }
+
+ c := new(CommClause)
+ c.pos = p.pos()
+
+ switch p.tok {
+ case _Case:
+ p.next()
+ c.Comm = p.simpleStmt(nil, 0)
+
+ // The syntax restricts the possible simple statements here to:
+ //
+ // lhs <- x (send statement)
+ // <-x
+ // lhs = <-x
+ // lhs := <-x
+ //
+ // All these (and more) are recognized by simpleStmt and invalid
+ // syntax trees are flagged later, during type checking.
+ // TODO(gri) eventually may want to restrict valid syntax trees
+ // here.
+
+ case _Default:
+ p.next()
+
+ default:
+ p.syntaxError("expecting case or default or }")
+ p.advance(_Colon, _Case, _Default, _Rbrace)
+ }
+
+ c.Colon = p.pos()
+ p.want(_Colon)
+ c.Body = p.stmtList()
+
+ return c
+}
+
+// Statement =
+// Declaration | LabeledStmt | SimpleStmt |
+// GoStmt | ReturnStmt | BreakStmt | ContinueStmt | GotoStmt |
+// FallthroughStmt | Block | IfStmt | SwitchStmt | SelectStmt | ForStmt |
+// DeferStmt .
+func (p *parser) stmtOrNil() Stmt {
+ if trace {
+ defer p.trace("stmt " + p.tok.String())()
+ }
+
+ // Most statements (assignments) start with an identifier;
+ // look for it first before doing anything more expensive.
+ if p.tok == _Name {
+ p.clearPragma()
+ lhs := p.exprList()
+ if label, ok := lhs.(*Name); ok && p.tok == _Colon {
+ return p.labeledStmtOrNil(label)
+ }
+ return p.simpleStmt(lhs, 0)
+ }
+
+ switch p.tok {
+ case _Var:
+ return p.declStmt(p.varDecl)
+
+ case _Const:
+ return p.declStmt(p.constDecl)
+
+ case _Type:
+ return p.declStmt(p.typeDecl)
+ }
+
+ p.clearPragma()
+
+ switch p.tok {
+ case _Lbrace:
+ return p.blockStmt("")
+
+ case _Operator, _Star:
+ switch p.op {
+ case Add, Sub, Mul, And, Xor, Not:
+ return p.simpleStmt(nil, 0) // unary operators
+ }
+
+ case _Literal, _Func, _Lparen, // operands
+ _Lbrack, _Struct, _Map, _Chan, _Interface, // composite types
+ _Arrow: // receive operator
+ return p.simpleStmt(nil, 0)
+
+ case _For:
+ return p.forStmt()
+
+ case _Switch:
+ return p.switchStmt()
+
+ case _Select:
+ return p.selectStmt()
+
+ case _If:
+ return p.ifStmt()
+
+ case _Fallthrough:
+ s := new(BranchStmt)
+ s.pos = p.pos()
+ p.next()
+ s.Tok = _Fallthrough
+ return s
+
+ case _Break, _Continue:
+ s := new(BranchStmt)
+ s.pos = p.pos()
+ s.Tok = p.tok
+ p.next()
+ if p.tok == _Name {
+ s.Label = p.name()
+ }
+ return s
+
+ case _Go, _Defer:
+ return p.callStmt()
+
+ case _Goto:
+ s := new(BranchStmt)
+ s.pos = p.pos()
+ s.Tok = _Goto
+ p.next()
+ s.Label = p.name()
+ return s
+
+ case _Return:
+ s := new(ReturnStmt)
+ s.pos = p.pos()
+ p.next()
+ if p.tok != _Semi && p.tok != _Rbrace {
+ s.Results = p.exprList()
+ }
+ return s
+
+ case _Semi:
+ s := new(EmptyStmt)
+ s.pos = p.pos()
+ return s
+ }
+
+ return nil
+}
+
+// StatementList = { Statement ";" } .
+func (p *parser) stmtList() (l []Stmt) {
+ if trace {
+ defer p.trace("stmtList")()
+ }
+
+ for p.tok != _EOF && p.tok != _Rbrace && p.tok != _Case && p.tok != _Default {
+ s := p.stmtOrNil()
+ p.clearPragma()
+ if s == nil {
+ break
+ }
+ l = append(l, s)
+ // ";" is optional before "}"
+ if !p.got(_Semi) && p.tok != _Rbrace {
+ p.syntaxError("at end of statement")
+ p.advance(_Semi, _Rbrace, _Case, _Default)
+ p.got(_Semi) // avoid spurious empty statement
+ }
+ }
+ return
+}
+
+// argList parses a possibly empty, comma-separated list of arguments,
+// optionally followed by a comma (if not empty), and closed by ")".
+// The last argument may be followed by "...".
+//
+// argList = [ arg { "," arg } [ "..." ] [ "," ] ] ")" .
+func (p *parser) argList() (list []Expr, hasDots bool) {
+ if trace {
+ defer p.trace("argList")()
+ }
+
+ p.xnest++
+ p.list(_Comma, _Rparen, func() bool {
+ list = append(list, p.expr())
+ hasDots = p.got(_DotDotDot)
+ return hasDots
+ })
+ p.xnest--
+
+ return
+}
+
+// ----------------------------------------------------------------------------
+// Common productions
+
+func (p *parser) name() *Name {
+ // no tracing to avoid overly verbose output
+
+ if p.tok == _Name {
+ n := NewName(p.pos(), p.lit)
+ p.next()
+ return n
+ }
+
+ n := NewName(p.pos(), "_")
+ p.syntaxError("expecting name")
+ p.advance()
+ return n
+}
+
+// IdentifierList = identifier { "," identifier } .
+// The first name must be provided.
+func (p *parser) nameList(first *Name) []*Name {
+ if trace {
+ defer p.trace("nameList")()
+ }
+
+ if debug && first == nil {
+ panic("first name not provided")
+ }
+
+ l := []*Name{first}
+ for p.got(_Comma) {
+ l = append(l, p.name())
+ }
+
+ return l
+}
+
+// The first name may be provided, or nil.
+func (p *parser) qualifiedName(name *Name) Expr {
+ if trace {
+ defer p.trace("qualifiedName")()
+ }
+
+ var x Expr
+ switch {
+ case name != nil:
+ x = name
+ case p.tok == _Name:
+ x = p.name()
+ default:
+ x = NewName(p.pos(), "_")
+ p.syntaxError("expecting name")
+ p.advance(_Dot, _Semi, _Rbrace)
+ }
+
+ if p.tok == _Dot {
+ s := new(SelectorExpr)
+ s.pos = p.pos()
+ p.next()
+ s.X = x
+ s.Sel = p.name()
+ x = s
+ }
+
+ if p.allowGenerics() && p.tok == _Lbrack {
+ x = p.typeInstance(x)
+ }
+
+ return x
+}
+
+// ExpressionList = Expression { "," Expression } .
+func (p *parser) exprList() Expr {
+ if trace {
+ defer p.trace("exprList")()
+ }
+
+ x := p.expr()
+ if p.got(_Comma) {
+ list := []Expr{x, p.expr()}
+ for p.got(_Comma) {
+ list = append(list, p.expr())
+ }
+ t := new(ListExpr)
+ t.pos = x.Pos()
+ t.ElemList = list
+ x = t
+ }
+ return x
+}
+
+// typeList parses a non-empty, comma-separated list of expressions,
+// optionally followed by a comma. The first list element may be any
+// expression, all other list elements must be type expressions.
+// If there is more than one argument, the result is a *ListExpr.
+// The comma result indicates whether there was a (separating or
+// trailing) comma.
+//
+// typeList = arg { "," arg } [ "," ] .
+func (p *parser) typeList() (x Expr, comma bool) {
+ if trace {
+ defer p.trace("typeList")()
+ }
+
+ p.xnest++
+ x = p.expr()
+ if p.got(_Comma) {
+ comma = true
+ if t := p.typeOrNil(); t != nil {
+ list := []Expr{x, t}
+ for p.got(_Comma) {
+ if t = p.typeOrNil(); t == nil {
+ break
+ }
+ list = append(list, t)
+ }
+ l := new(ListExpr)
+ l.pos = x.Pos() // == list[0].Pos()
+ l.ElemList = list
+ x = l
+ }
+ }
+ p.xnest--
+ return
+}
+
+// unparen removes all parentheses around an expression.
+func unparen(x Expr) Expr {
+ for {
+ p, ok := x.(*ParenExpr)
+ if !ok {
+ break
+ }
+ x = p.X
+ }
+ return x
+}
diff --git a/src/cmd/compile/internal/syntax/parser_test.go b/src/cmd/compile/internal/syntax/parser_test.go
new file mode 100644
index 0000000..e258a17
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/parser_test.go
@@ -0,0 +1,374 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package syntax
+
+import (
+ "bytes"
+ "flag"
+ "fmt"
+ "io/ioutil"
+ "path/filepath"
+ "regexp"
+ "runtime"
+ "strings"
+ "sync"
+ "testing"
+ "time"
+)
+
+var (
+ fast = flag.Bool("fast", false, "parse package files in parallel")
+ verify = flag.Bool("verify", false, "verify idempotent printing")
+ src_ = flag.String("src", "parser.go", "source file to parse")
+ skip = flag.String("skip", "", "files matching this regular expression are skipped by TestStdLib")
+)
+
+func TestParse(t *testing.T) {
+ ParseFile(*src_, func(err error) { t.Error(err) }, nil, AllowGenerics)
+}
+
+func TestVerify(t *testing.T) {
+ ast, err := ParseFile(*src_, func(err error) { t.Error(err) }, nil, AllowGenerics)
+ if err != nil {
+ return // error already reported
+ }
+ verifyPrint(t, *src_, ast)
+}
+
+func TestParseGo2(t *testing.T) {
+ dir := filepath.Join(testdata, "go2")
+ list, err := ioutil.ReadDir(dir)
+ if err != nil {
+ t.Fatal(err)
+ }
+ for _, fi := range list {
+ name := fi.Name()
+ if !fi.IsDir() && !strings.HasPrefix(name, ".") {
+ ParseFile(filepath.Join(dir, name), func(err error) { t.Error(err) }, nil, AllowGenerics|AllowMethodTypeParams)
+ }
+ }
+}
+
+func TestStdLib(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping test in short mode")
+ }
+
+ var skipRx *regexp.Regexp
+ if *skip != "" {
+ var err error
+ skipRx, err = regexp.Compile(*skip)
+ if err != nil {
+ t.Fatalf("invalid argument for -skip (%v)", err)
+ }
+ }
+
+ var m1 runtime.MemStats
+ runtime.ReadMemStats(&m1)
+ start := time.Now()
+
+ type parseResult struct {
+ filename string
+ lines uint
+ }
+
+ results := make(chan parseResult)
+ go func() {
+ defer close(results)
+ for _, dir := range []string{
+ runtime.GOROOT(),
+ } {
+ walkDirs(t, dir, func(filename string) {
+ if skipRx != nil && skipRx.MatchString(filename) {
+ // Always report skipped files since regexp
+ // typos can lead to surprising results.
+ fmt.Printf("skipping %s\n", filename)
+ return
+ }
+ if debug {
+ fmt.Printf("parsing %s\n", filename)
+ }
+ ast, err := ParseFile(filename, nil, nil, AllowGenerics)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+ if *verify {
+ verifyPrint(t, filename, ast)
+ }
+ results <- parseResult{filename, ast.EOF.Line()}
+ })
+ }
+ }()
+
+ var count, lines uint
+ for res := range results {
+ count++
+ lines += res.lines
+ if testing.Verbose() {
+ fmt.Printf("%5d %s (%d lines)\n", count, res.filename, res.lines)
+ }
+ }
+
+ dt := time.Since(start)
+ var m2 runtime.MemStats
+ runtime.ReadMemStats(&m2)
+ dm := float64(m2.TotalAlloc-m1.TotalAlloc) / 1e6
+
+ fmt.Printf("parsed %d lines (%d files) in %v (%d lines/s)\n", lines, count, dt, int64(float64(lines)/dt.Seconds()))
+ fmt.Printf("allocated %.3fMb (%.3fMb/s)\n", dm, dm/dt.Seconds())
+}
+
+func walkDirs(t *testing.T, dir string, action func(string)) {
+ fis, err := ioutil.ReadDir(dir)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ var files, dirs []string
+ for _, fi := range fis {
+ if fi.Mode().IsRegular() {
+ if strings.HasSuffix(fi.Name(), ".go") {
+ path := filepath.Join(dir, fi.Name())
+ files = append(files, path)
+ }
+ } else if fi.IsDir() && fi.Name() != "testdata" {
+ path := filepath.Join(dir, fi.Name())
+ if !strings.HasSuffix(path, string(filepath.Separator)+"test") {
+ dirs = append(dirs, path)
+ }
+ }
+ }
+
+ if *fast {
+ var wg sync.WaitGroup
+ wg.Add(len(files))
+ for _, filename := range files {
+ go func(filename string) {
+ defer wg.Done()
+ action(filename)
+ }(filename)
+ }
+ wg.Wait()
+ } else {
+ for _, filename := range files {
+ action(filename)
+ }
+ }
+
+ for _, dir := range dirs {
+ walkDirs(t, dir, action)
+ }
+}
+
+func verifyPrint(t *testing.T, filename string, ast1 *File) {
+ var buf1 bytes.Buffer
+ _, err := Fprint(&buf1, ast1, LineForm)
+ if err != nil {
+ panic(err)
+ }
+ bytes1 := buf1.Bytes()
+
+ ast2, err := Parse(NewFileBase(filename), &buf1, nil, nil, 0)
+ if err != nil {
+ panic(err)
+ }
+
+ var buf2 bytes.Buffer
+ _, err = Fprint(&buf2, ast2, LineForm)
+ if err != nil {
+ panic(err)
+ }
+ bytes2 := buf2.Bytes()
+
+ if bytes.Compare(bytes1, bytes2) != 0 {
+ fmt.Printf("--- %s ---\n", filename)
+ fmt.Printf("%s\n", bytes1)
+ fmt.Println()
+
+ fmt.Printf("--- %s ---\n", filename)
+ fmt.Printf("%s\n", bytes2)
+ fmt.Println()
+
+ t.Error("printed syntax trees do not match")
+ }
+}
+
+func TestIssue17697(t *testing.T) {
+ _, err := Parse(nil, bytes.NewReader(nil), nil, nil, 0) // return with parser error, don't panic
+ if err == nil {
+ t.Errorf("no error reported")
+ }
+}
+
+func TestParseFile(t *testing.T) {
+ _, err := ParseFile("", nil, nil, 0)
+ if err == nil {
+ t.Error("missing io error")
+ }
+
+ var first error
+ _, err = ParseFile("", func(err error) {
+ if first == nil {
+ first = err
+ }
+ }, nil, 0)
+ if err == nil || first == nil {
+ t.Error("missing io error")
+ }
+ if err != first {
+ t.Errorf("got %v; want first error %v", err, first)
+ }
+}
+
+// Make sure (PosMax + 1) doesn't overflow when converted to default
+// type int (when passed as argument to fmt.Sprintf) on 32bit platforms
+// (see test cases below).
+var tooLarge int = PosMax + 1
+
+func TestLineDirectives(t *testing.T) {
+ // valid line directives lead to a syntax error after them
+ const valid = "syntax error: package statement must be first"
+ const filename = "directives.go"
+
+ for _, test := range []struct {
+ src, msg string
+ filename string
+ line, col uint // 1-based; 0 means unknown
+ }{
+ // ignored //line directives
+ {"//\n", valid, filename, 2, 1}, // no directive
+ {"//line\n", valid, filename, 2, 1}, // missing colon
+ {"//line foo\n", valid, filename, 2, 1}, // missing colon
+ {" //line foo:\n", valid, filename, 2, 1}, // not a line start
+ {"// line foo:\n", valid, filename, 2, 1}, // space between // and line
+
+ // invalid //line directives with one colon
+ {"//line :\n", "invalid line number: ", filename, 1, 9},
+ {"//line :x\n", "invalid line number: x", filename, 1, 9},
+ {"//line foo :\n", "invalid line number: ", filename, 1, 13},
+ {"//line foo:x\n", "invalid line number: x", filename, 1, 12},
+ {"//line foo:0\n", "invalid line number: 0", filename, 1, 12},
+ {"//line foo:1 \n", "invalid line number: 1 ", filename, 1, 12},
+ {"//line foo:-12\n", "invalid line number: -12", filename, 1, 12},
+ {"//line C:foo:0\n", "invalid line number: 0", filename, 1, 14},
+ {fmt.Sprintf("//line foo:%d\n", tooLarge), fmt.Sprintf("invalid line number: %d", tooLarge), filename, 1, 12},
+
+ // invalid //line directives with two colons
+ {"//line ::\n", "invalid line number: ", filename, 1, 10},
+ {"//line ::x\n", "invalid line number: x", filename, 1, 10},
+ {"//line foo::123abc\n", "invalid line number: 123abc", filename, 1, 13},
+ {"//line foo::0\n", "invalid line number: 0", filename, 1, 13},
+ {"//line foo:0:1\n", "invalid line number: 0", filename, 1, 12},
+
+ {"//line :123:0\n", "invalid column number: 0", filename, 1, 13},
+ {"//line foo:123:0\n", "invalid column number: 0", filename, 1, 16},
+ {fmt.Sprintf("//line foo:10:%d\n", tooLarge), fmt.Sprintf("invalid column number: %d", tooLarge), filename, 1, 15},
+
+ // effect of valid //line directives on lines
+ {"//line foo:123\n foo", valid, "foo", 123, 0},
+ {"//line foo:123\n foo", valid, " foo", 123, 0},
+ {"//line foo:123\n//line bar:345\nfoo", valid, "bar", 345, 0},
+ {"//line C:foo:123\n", valid, "C:foo", 123, 0},
+ {"//line /src/a/a.go:123\n foo", valid, "/src/a/a.go", 123, 0},
+ {"//line :x:1\n", valid, ":x", 1, 0},
+ {"//line foo ::1\n", valid, "foo :", 1, 0},
+ {"//line foo:123abc:1\n", valid, "foo:123abc", 1, 0},
+ {"//line foo :123:1\n", valid, "foo ", 123, 1},
+ {"//line ::123\n", valid, ":", 123, 0},
+
+ // effect of valid //line directives on columns
+ {"//line :x:1:10\n", valid, ":x", 1, 10},
+ {"//line foo ::1:2\n", valid, "foo :", 1, 2},
+ {"//line foo:123abc:1:1000\n", valid, "foo:123abc", 1, 1000},
+ {"//line foo :123:1000\n\n", valid, "foo ", 124, 1},
+ {"//line ::123:1234\n", valid, ":", 123, 1234},
+
+ // //line directives with omitted filenames lead to empty filenames
+ {"//line :10\n", valid, "", 10, 0},
+ {"//line :10:20\n", valid, filename, 10, 20},
+ {"//line bar:1\n//line :10\n", valid, "", 10, 0},
+ {"//line bar:1\n//line :10:20\n", valid, "bar", 10, 20},
+
+ // ignored /*line directives
+ {"/**/", valid, filename, 1, 5}, // no directive
+ {"/*line*/", valid, filename, 1, 9}, // missing colon
+ {"/*line foo*/", valid, filename, 1, 13}, // missing colon
+ {" //line foo:*/", valid, filename, 1, 16}, // not a line start
+ {"/* line foo:*/", valid, filename, 1, 16}, // space between // and line
+
+ // invalid /*line directives with one colon
+ {"/*line :*/", "invalid line number: ", filename, 1, 9},
+ {"/*line :x*/", "invalid line number: x", filename, 1, 9},
+ {"/*line foo :*/", "invalid line number: ", filename, 1, 13},
+ {"/*line foo:x*/", "invalid line number: x", filename, 1, 12},
+ {"/*line foo:0*/", "invalid line number: 0", filename, 1, 12},
+ {"/*line foo:1 */", "invalid line number: 1 ", filename, 1, 12},
+ {"/*line C:foo:0*/", "invalid line number: 0", filename, 1, 14},
+ {fmt.Sprintf("/*line foo:%d*/", tooLarge), fmt.Sprintf("invalid line number: %d", tooLarge), filename, 1, 12},
+
+ // invalid /*line directives with two colons
+ {"/*line ::*/", "invalid line number: ", filename, 1, 10},
+ {"/*line ::x*/", "invalid line number: x", filename, 1, 10},
+ {"/*line foo::123abc*/", "invalid line number: 123abc", filename, 1, 13},
+ {"/*line foo::0*/", "invalid line number: 0", filename, 1, 13},
+ {"/*line foo:0:1*/", "invalid line number: 0", filename, 1, 12},
+
+ {"/*line :123:0*/", "invalid column number: 0", filename, 1, 13},
+ {"/*line foo:123:0*/", "invalid column number: 0", filename, 1, 16},
+ {fmt.Sprintf("/*line foo:10:%d*/", tooLarge), fmt.Sprintf("invalid column number: %d", tooLarge), filename, 1, 15},
+
+ // effect of valid /*line directives on lines
+ {"/*line foo:123*/ foo", valid, "foo", 123, 0},
+ {"/*line foo:123*/\n//line bar:345\nfoo", valid, "bar", 345, 0},
+ {"/*line C:foo:123*/", valid, "C:foo", 123, 0},
+ {"/*line /src/a/a.go:123*/ foo", valid, "/src/a/a.go", 123, 0},
+ {"/*line :x:1*/", valid, ":x", 1, 0},
+ {"/*line foo ::1*/", valid, "foo :", 1, 0},
+ {"/*line foo:123abc:1*/", valid, "foo:123abc", 1, 0},
+ {"/*line foo :123:10*/", valid, "foo ", 123, 10},
+ {"/*line ::123*/", valid, ":", 123, 0},
+
+ // effect of valid /*line directives on columns
+ {"/*line :x:1:10*/", valid, ":x", 1, 10},
+ {"/*line foo ::1:2*/", valid, "foo :", 1, 2},
+ {"/*line foo:123abc:1:1000*/", valid, "foo:123abc", 1, 1000},
+ {"/*line foo :123:1000*/\n", valid, "foo ", 124, 1},
+ {"/*line ::123:1234*/", valid, ":", 123, 1234},
+
+ // /*line directives with omitted filenames lead to the previously used filenames
+ {"/*line :10*/", valid, "", 10, 0},
+ {"/*line :10:20*/", valid, filename, 10, 20},
+ {"//line bar:1\n/*line :10*/", valid, "", 10, 0},
+ {"//line bar:1\n/*line :10:20*/", valid, "bar", 10, 20},
+ } {
+ base := NewFileBase(filename)
+ _, err := Parse(base, strings.NewReader(test.src), nil, nil, 0)
+ if err == nil {
+ t.Errorf("%s: no error reported", test.src)
+ continue
+ }
+ perr, ok := err.(Error)
+ if !ok {
+ t.Errorf("%s: got %v; want parser error", test.src, err)
+ continue
+ }
+ if msg := perr.Msg; msg != test.msg {
+ t.Errorf("%s: got msg = %q; want %q", test.src, msg, test.msg)
+ }
+
+ pos := perr.Pos
+ if filename := pos.RelFilename(); filename != test.filename {
+ t.Errorf("%s: got filename = %q; want %q", test.src, filename, test.filename)
+ }
+ if line := pos.RelLine(); line != test.line {
+ t.Errorf("%s: got line = %d; want %d", test.src, line, test.line)
+ }
+ if col := pos.RelCol(); col != test.col {
+ t.Errorf("%s: got col = %d; want %d", test.src, col, test.col)
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/syntax/pos.go b/src/cmd/compile/internal/syntax/pos.go
new file mode 100644
index 0000000..1494c09
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/pos.go
@@ -0,0 +1,209 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package syntax
+
+import "fmt"
+
+// PosMax is the largest line or column value that can be represented without loss.
+// Incoming values (arguments) larger than PosMax will be set to PosMax.
+const PosMax = 1 << 30
+
+// A Pos represents an absolute (line, col) source position
+// with a reference to position base for computing relative
+// (to a file, or line directive) position information.
+// Pos values are intentionally light-weight so that they
+// can be created without too much concern about space use.
+type Pos struct {
+ base *PosBase
+ line, col uint32
+}
+
+// MakePos returns a new Pos for the given PosBase, line and column.
+func MakePos(base *PosBase, line, col uint) Pos { return Pos{base, sat32(line), sat32(col)} }
+
+// TODO(gri) IsKnown makes an assumption about linebase < 1.
+// Maybe we should check for Base() != nil instead.
+
+func (pos Pos) Pos() Pos { return pos }
+func (pos Pos) IsKnown() bool { return pos.line > 0 }
+func (pos Pos) Base() *PosBase { return pos.base }
+func (pos Pos) Line() uint { return uint(pos.line) }
+func (pos Pos) Col() uint { return uint(pos.col) }
+
+func (pos Pos) RelFilename() string { return pos.base.Filename() }
+
+func (pos Pos) RelLine() uint {
+ b := pos.base
+ if b.Line() == 0 {
+ // base line is unknown => relative line is unknown
+ return 0
+ }
+ return b.Line() + (pos.Line() - b.Pos().Line())
+}
+
+func (pos Pos) RelCol() uint {
+ b := pos.base
+ if b.Col() == 0 {
+ // base column is unknown => relative column is unknown
+ // (the current specification for line directives requires
+ // this to apply until the next PosBase/line directive,
+ // not just until the new newline)
+ return 0
+ }
+ if pos.Line() == b.Pos().Line() {
+ // pos on same line as pos base => column is relative to pos base
+ return b.Col() + (pos.Col() - b.Pos().Col())
+ }
+ return pos.Col()
+}
+
+// Cmp compares the positions p and q and returns a result r as follows:
+//
+// r < 0: p is before q
+// r == 0: p and q are the same position (but may not be identical)
+// r > 0: p is after q
+//
+// If p and q are in different files, p is before q if the filename
+// of p sorts lexicographically before the filename of q.
+func (p Pos) Cmp(q Pos) int {
+ pname := p.RelFilename()
+ qname := q.RelFilename()
+ switch {
+ case pname < qname:
+ return -1
+ case pname > qname:
+ return +1
+ }
+
+ pline := p.Line()
+ qline := q.Line()
+ switch {
+ case pline < qline:
+ return -1
+ case pline > qline:
+ return +1
+ }
+
+ pcol := p.Col()
+ qcol := q.Col()
+ switch {
+ case pcol < qcol:
+ return -1
+ case pcol > qcol:
+ return +1
+ }
+
+ return 0
+}
+
+func (pos Pos) String() string {
+ rel := position_{pos.RelFilename(), pos.RelLine(), pos.RelCol()}
+ abs := position_{pos.Base().Pos().RelFilename(), pos.Line(), pos.Col()}
+ s := rel.String()
+ if rel != abs {
+ s += "[" + abs.String() + "]"
+ }
+ return s
+}
+
+// TODO(gri) cleanup: find better name, avoid conflict with position in error_test.go
+type position_ struct {
+ filename string
+ line, col uint
+}
+
+func (p position_) String() string {
+ if p.line == 0 {
+ if p.filename == "" {
+ return "<unknown position>"
+ }
+ return p.filename
+ }
+ if p.col == 0 {
+ return fmt.Sprintf("%s:%d", p.filename, p.line)
+ }
+ return fmt.Sprintf("%s:%d:%d", p.filename, p.line, p.col)
+}
+
+// A PosBase represents the base for relative position information:
+// At position pos, the relative position is filename:line:col.
+type PosBase struct {
+ pos Pos
+ filename string
+ line, col uint32
+ trimmed bool // whether -trimpath has been applied
+}
+
+// NewFileBase returns a new PosBase for the given filename.
+// A file PosBase's position is relative to itself, with the
+// position being filename:1:1.
+func NewFileBase(filename string) *PosBase {
+ return NewTrimmedFileBase(filename, false)
+}
+
+// NewTrimmedFileBase is like NewFileBase, but allows specifying Trimmed.
+func NewTrimmedFileBase(filename string, trimmed bool) *PosBase {
+ base := &PosBase{MakePos(nil, linebase, colbase), filename, linebase, colbase, trimmed}
+ base.pos.base = base
+ return base
+}
+
+// NewLineBase returns a new PosBase for a line directive "line filename:line:col"
+// relative to pos, which is the position of the character immediately following
+// the comment containing the line directive. For a directive in a line comment,
+// that position is the beginning of the next line (i.e., the newline character
+// belongs to the line comment).
+func NewLineBase(pos Pos, filename string, trimmed bool, line, col uint) *PosBase {
+ return &PosBase{pos, filename, sat32(line), sat32(col), trimmed}
+}
+
+func (base *PosBase) IsFileBase() bool {
+ if base == nil {
+ return false
+ }
+ return base.pos.base == base
+}
+
+func (base *PosBase) Pos() (_ Pos) {
+ if base == nil {
+ return
+ }
+ return base.pos
+}
+
+func (base *PosBase) Filename() string {
+ if base == nil {
+ return ""
+ }
+ return base.filename
+}
+
+func (base *PosBase) Line() uint {
+ if base == nil {
+ return 0
+ }
+ return uint(base.line)
+}
+
+func (base *PosBase) Col() uint {
+ if base == nil {
+ return 0
+ }
+ return uint(base.col)
+}
+
+func (base *PosBase) Trimmed() bool {
+ if base == nil {
+ return false
+ }
+ return base.trimmed
+}
+
+func sat32(x uint) uint32 {
+ if x > PosMax {
+ return PosMax
+ }
+ return uint32(x)
+}
diff --git a/src/cmd/compile/internal/syntax/positions.go b/src/cmd/compile/internal/syntax/positions.go
new file mode 100644
index 0000000..9359655
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/positions.go
@@ -0,0 +1,364 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements helper functions for scope position computations.
+
+package syntax
+
+// StartPos returns the start position of n.
+func StartPos(n Node) Pos {
+ // Cases for nodes which don't need a correction are commented out.
+ for m := n; ; {
+ switch n := m.(type) {
+ case nil:
+ panic("nil node")
+
+ // packages
+ case *File:
+ // file block starts at the beginning of the file
+ return MakePos(n.Pos().Base(), 1, 1)
+
+ // declarations
+ // case *ImportDecl:
+ // case *ConstDecl:
+ // case *TypeDecl:
+ // case *VarDecl:
+ // case *FuncDecl:
+
+ // expressions
+ // case *BadExpr:
+ // case *Name:
+ // case *BasicLit:
+ case *CompositeLit:
+ if n.Type != nil {
+ m = n.Type
+ continue
+ }
+ return n.Pos()
+ // case *KeyValueExpr:
+ // case *FuncLit:
+ // case *ParenExpr:
+ case *SelectorExpr:
+ m = n.X
+ case *IndexExpr:
+ m = n.X
+ // case *SliceExpr:
+ case *AssertExpr:
+ m = n.X
+ case *TypeSwitchGuard:
+ if n.Lhs != nil {
+ m = n.Lhs
+ continue
+ }
+ m = n.X
+ case *Operation:
+ if n.Y != nil {
+ m = n.X
+ continue
+ }
+ return n.Pos()
+ case *CallExpr:
+ m = n.Fun
+ case *ListExpr:
+ if len(n.ElemList) > 0 {
+ m = n.ElemList[0]
+ continue
+ }
+ return n.Pos()
+ // types
+ // case *ArrayType:
+ // case *SliceType:
+ // case *DotsType:
+ // case *StructType:
+ // case *Field:
+ // case *InterfaceType:
+ // case *FuncType:
+ // case *MapType:
+ // case *ChanType:
+
+ // statements
+ // case *EmptyStmt:
+ // case *LabeledStmt:
+ // case *BlockStmt:
+ // case *ExprStmt:
+ case *SendStmt:
+ m = n.Chan
+ // case *DeclStmt:
+ case *AssignStmt:
+ m = n.Lhs
+ // case *BranchStmt:
+ // case *CallStmt:
+ // case *ReturnStmt:
+ // case *IfStmt:
+ // case *ForStmt:
+ // case *SwitchStmt:
+ // case *SelectStmt:
+
+ // helper nodes
+ case *RangeClause:
+ if n.Lhs != nil {
+ m = n.Lhs
+ continue
+ }
+ m = n.X
+ // case *CaseClause:
+ // case *CommClause:
+
+ default:
+ return n.Pos()
+ }
+ }
+}
+
+// EndPos returns the approximate end position of n in the source.
+// For some nodes (*Name, *BasicLit) it returns the position immediately
+// following the node; for others (*BlockStmt, *SwitchStmt, etc.) it
+// returns the position of the closing '}'; and for some (*ParenExpr)
+// the returned position is the end position of the last enclosed
+// expression.
+// Thus, EndPos should not be used for exact demarcation of the
+// end of a node in the source; it is mostly useful to determine
+// scope ranges where there is some leeway.
+func EndPos(n Node) Pos {
+ for m := n; ; {
+ switch n := m.(type) {
+ case nil:
+ panic("nil node")
+
+ // packages
+ case *File:
+ return n.EOF
+
+ // declarations
+ case *ImportDecl:
+ m = n.Path
+ case *ConstDecl:
+ if n.Values != nil {
+ m = n.Values
+ continue
+ }
+ if n.Type != nil {
+ m = n.Type
+ continue
+ }
+ if l := len(n.NameList); l > 0 {
+ m = n.NameList[l-1]
+ continue
+ }
+ return n.Pos()
+ case *TypeDecl:
+ m = n.Type
+ case *VarDecl:
+ if n.Values != nil {
+ m = n.Values
+ continue
+ }
+ if n.Type != nil {
+ m = n.Type
+ continue
+ }
+ if l := len(n.NameList); l > 0 {
+ m = n.NameList[l-1]
+ continue
+ }
+ return n.Pos()
+ case *FuncDecl:
+ if n.Body != nil {
+ m = n.Body
+ continue
+ }
+ m = n.Type
+
+ // expressions
+ case *BadExpr:
+ return n.Pos()
+ case *Name:
+ p := n.Pos()
+ return MakePos(p.Base(), p.Line(), p.Col()+uint(len(n.Value)))
+ case *BasicLit:
+ p := n.Pos()
+ return MakePos(p.Base(), p.Line(), p.Col()+uint(len(n.Value)))
+ case *CompositeLit:
+ return n.Rbrace
+ case *KeyValueExpr:
+ m = n.Value
+ case *FuncLit:
+ m = n.Body
+ case *ParenExpr:
+ m = n.X
+ case *SelectorExpr:
+ m = n.Sel
+ case *IndexExpr:
+ m = n.Index
+ case *SliceExpr:
+ for i := len(n.Index) - 1; i >= 0; i-- {
+ if x := n.Index[i]; x != nil {
+ m = x
+ continue
+ }
+ }
+ m = n.X
+ case *AssertExpr:
+ m = n.Type
+ case *TypeSwitchGuard:
+ m = n.X
+ case *Operation:
+ if n.Y != nil {
+ m = n.Y
+ continue
+ }
+ m = n.X
+ case *CallExpr:
+ if l := lastExpr(n.ArgList); l != nil {
+ m = l
+ continue
+ }
+ m = n.Fun
+ case *ListExpr:
+ if l := lastExpr(n.ElemList); l != nil {
+ m = l
+ continue
+ }
+ return n.Pos()
+
+ // types
+ case *ArrayType:
+ m = n.Elem
+ case *SliceType:
+ m = n.Elem
+ case *DotsType:
+ m = n.Elem
+ case *StructType:
+ if l := lastField(n.FieldList); l != nil {
+ m = l
+ continue
+ }
+ return n.Pos()
+ // TODO(gri) need to take TagList into account
+ case *Field:
+ if n.Type != nil {
+ m = n.Type
+ continue
+ }
+ m = n.Name
+ case *InterfaceType:
+ if l := lastField(n.MethodList); l != nil {
+ m = l
+ continue
+ }
+ return n.Pos()
+ case *FuncType:
+ if l := lastField(n.ResultList); l != nil {
+ m = l
+ continue
+ }
+ if l := lastField(n.ParamList); l != nil {
+ m = l
+ continue
+ }
+ return n.Pos()
+ case *MapType:
+ m = n.Value
+ case *ChanType:
+ m = n.Elem
+
+ // statements
+ case *EmptyStmt:
+ return n.Pos()
+ case *LabeledStmt:
+ m = n.Stmt
+ case *BlockStmt:
+ return n.Rbrace
+ case *ExprStmt:
+ m = n.X
+ case *SendStmt:
+ m = n.Value
+ case *DeclStmt:
+ if l := lastDecl(n.DeclList); l != nil {
+ m = l
+ continue
+ }
+ return n.Pos()
+ case *AssignStmt:
+ m = n.Rhs
+ if m == nil {
+ p := EndPos(n.Lhs)
+ return MakePos(p.Base(), p.Line(), p.Col()+2)
+ }
+ case *BranchStmt:
+ if n.Label != nil {
+ m = n.Label
+ continue
+ }
+ return n.Pos()
+ case *CallStmt:
+ m = n.Call
+ case *ReturnStmt:
+ if n.Results != nil {
+ m = n.Results
+ continue
+ }
+ return n.Pos()
+ case *IfStmt:
+ if n.Else != nil {
+ m = n.Else
+ continue
+ }
+ m = n.Then
+ case *ForStmt:
+ m = n.Body
+ case *SwitchStmt:
+ return n.Rbrace
+ case *SelectStmt:
+ return n.Rbrace
+
+ // helper nodes
+ case *RangeClause:
+ m = n.X
+ case *CaseClause:
+ if l := lastStmt(n.Body); l != nil {
+ m = l
+ continue
+ }
+ return n.Colon
+ case *CommClause:
+ if l := lastStmt(n.Body); l != nil {
+ m = l
+ continue
+ }
+ return n.Colon
+
+ default:
+ return n.Pos()
+ }
+ }
+}
+
+func lastDecl(list []Decl) Decl {
+ if l := len(list); l > 0 {
+ return list[l-1]
+ }
+ return nil
+}
+
+func lastExpr(list []Expr) Expr {
+ if l := len(list); l > 0 {
+ return list[l-1]
+ }
+ return nil
+}
+
+func lastStmt(list []Stmt) Stmt {
+ if l := len(list); l > 0 {
+ return list[l-1]
+ }
+ return nil
+}
+
+func lastField(list []*Field) *Field {
+ if l := len(list); l > 0 {
+ return list[l-1]
+ }
+ return nil
+}
diff --git a/src/cmd/compile/internal/syntax/printer.go b/src/cmd/compile/internal/syntax/printer.go
new file mode 100644
index 0000000..0385227
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/printer.go
@@ -0,0 +1,996 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements printing of syntax trees in source format.
+
+package syntax
+
+import (
+ "bytes"
+ "fmt"
+ "io"
+ "strings"
+)
+
+// Form controls print formatting.
+type Form uint
+
+const (
+ _ Form = iota // default
+ LineForm // use spaces instead of linebreaks where possible
+ ShortForm // like LineForm but print "…" for non-empty function or composite literal bodies
+)
+
+// Fprint prints node x to w in the specified form.
+// It returns the number of bytes written, and whether there was an error.
+func Fprint(w io.Writer, x Node, form Form) (n int, err error) {
+ p := printer{
+ output: w,
+ form: form,
+ linebreaks: form == 0,
+ }
+
+ defer func() {
+ n = p.written
+ if e := recover(); e != nil {
+ err = e.(writeError).err // re-panics if it's not a writeError
+ }
+ }()
+
+ p.print(x)
+ p.flush(_EOF)
+
+ return
+}
+
+// String is a convenience function that prints n in ShortForm
+// and returns the printed string.
+func String(n Node) string {
+ var buf bytes.Buffer
+ _, err := Fprint(&buf, n, ShortForm)
+ if err != nil {
+ fmt.Fprintf(&buf, "<<< ERROR: %s", err)
+ }
+ return buf.String()
+}
+
+type ctrlSymbol int
+
+const (
+ none ctrlSymbol = iota
+ semi
+ blank
+ newline
+ indent
+ outdent
+ // comment
+ // eolComment
+)
+
+type whitespace struct {
+ last token
+ kind ctrlSymbol
+ //text string // comment text (possibly ""); valid if kind == comment
+}
+
+type printer struct {
+ output io.Writer
+ written int // number of bytes written
+ form Form
+ linebreaks bool // print linebreaks instead of semis
+
+ indent int // current indentation level
+ nlcount int // number of consecutive newlines
+
+ pending []whitespace // pending whitespace
+ lastTok token // last token (after any pending semi) processed by print
+}
+
+// write is a thin wrapper around p.output.Write
+// that takes care of accounting and error handling.
+func (p *printer) write(data []byte) {
+ n, err := p.output.Write(data)
+ p.written += n
+ if err != nil {
+ panic(writeError{err})
+ }
+}
+
+var (
+ tabBytes = []byte("\t\t\t\t\t\t\t\t")
+ newlineByte = []byte("\n")
+ blankByte = []byte(" ")
+)
+
+func (p *printer) writeBytes(data []byte) {
+ if len(data) == 0 {
+ panic("expected non-empty []byte")
+ }
+ if p.nlcount > 0 && p.indent > 0 {
+ // write indentation
+ n := p.indent
+ for n > len(tabBytes) {
+ p.write(tabBytes)
+ n -= len(tabBytes)
+ }
+ p.write(tabBytes[:n])
+ }
+ p.write(data)
+ p.nlcount = 0
+}
+
+func (p *printer) writeString(s string) {
+ p.writeBytes([]byte(s))
+}
+
+// If impliesSemi returns true for a non-blank line's final token tok,
+// a semicolon is automatically inserted. Vice versa, a semicolon may
+// be omitted in those cases.
+func impliesSemi(tok token) bool {
+ switch tok {
+ case _Name,
+ _Break, _Continue, _Fallthrough, _Return,
+ /*_Inc, _Dec,*/ _Rparen, _Rbrack, _Rbrace: // TODO(gri) fix this
+ return true
+ }
+ return false
+}
+
+// TODO(gri) provide table of []byte values for all tokens to avoid repeated string conversion
+
+func lineComment(text string) bool {
+ return strings.HasPrefix(text, "//")
+}
+
+func (p *printer) addWhitespace(kind ctrlSymbol, text string) {
+ p.pending = append(p.pending, whitespace{p.lastTok, kind /*text*/})
+ switch kind {
+ case semi:
+ p.lastTok = _Semi
+ case newline:
+ p.lastTok = 0
+ // TODO(gri) do we need to handle /*-style comments containing newlines here?
+ }
+}
+
+func (p *printer) flush(next token) {
+ // eliminate semis and redundant whitespace
+ sawNewline := next == _EOF
+ sawParen := next == _Rparen || next == _Rbrace
+ for i := len(p.pending) - 1; i >= 0; i-- {
+ switch p.pending[i].kind {
+ case semi:
+ k := semi
+ if sawParen {
+ sawParen = false
+ k = none // eliminate semi
+ } else if sawNewline && impliesSemi(p.pending[i].last) {
+ sawNewline = false
+ k = none // eliminate semi
+ }
+ p.pending[i].kind = k
+ case newline:
+ sawNewline = true
+ case blank, indent, outdent:
+ // nothing to do
+ // case comment:
+ // // A multi-line comment acts like a newline; and a ""
+ // // comment implies by definition at least one newline.
+ // if text := p.pending[i].text; strings.HasPrefix(text, "/*") && strings.ContainsRune(text, '\n') {
+ // sawNewline = true
+ // }
+ // case eolComment:
+ // // TODO(gri) act depending on sawNewline
+ default:
+ panic("unreachable")
+ }
+ }
+
+ // print pending
+ prev := none
+ for i := range p.pending {
+ switch p.pending[i].kind {
+ case none:
+ // nothing to do
+ case semi:
+ p.writeString(";")
+ p.nlcount = 0
+ prev = semi
+ case blank:
+ if prev != blank {
+ // at most one blank
+ p.writeBytes(blankByte)
+ p.nlcount = 0
+ prev = blank
+ }
+ case newline:
+ const maxEmptyLines = 1
+ if p.nlcount <= maxEmptyLines {
+ p.write(newlineByte)
+ p.nlcount++
+ prev = newline
+ }
+ case indent:
+ p.indent++
+ case outdent:
+ p.indent--
+ if p.indent < 0 {
+ panic("negative indentation")
+ }
+ // case comment:
+ // if text := p.pending[i].text; text != "" {
+ // p.writeString(text)
+ // p.nlcount = 0
+ // prev = comment
+ // }
+ // // TODO(gri) should check that line comments are always followed by newline
+ default:
+ panic("unreachable")
+ }
+ }
+
+ p.pending = p.pending[:0] // re-use underlying array
+}
+
+func mayCombine(prev token, next byte) (b bool) {
+ return // for now
+ // switch prev {
+ // case lexical.Int:
+ // b = next == '.' // 1.
+ // case lexical.Add:
+ // b = next == '+' // ++
+ // case lexical.Sub:
+ // b = next == '-' // --
+ // case lexical.Quo:
+ // b = next == '*' // /*
+ // case lexical.Lss:
+ // b = next == '-' || next == '<' // <- or <<
+ // case lexical.And:
+ // b = next == '&' || next == '^' // && or &^
+ // }
+ // return
+}
+
+func (p *printer) print(args ...interface{}) {
+ for i := 0; i < len(args); i++ {
+ switch x := args[i].(type) {
+ case nil:
+ // we should not reach here but don't crash
+
+ case Node:
+ p.printNode(x)
+
+ case token:
+ // _Name implies an immediately following string
+ // argument which is the actual value to print.
+ var s string
+ if x == _Name {
+ i++
+ if i >= len(args) {
+ panic("missing string argument after _Name")
+ }
+ s = args[i].(string)
+ } else {
+ s = x.String()
+ }
+
+ // TODO(gri) This check seems at the wrong place since it doesn't
+ // take into account pending white space.
+ if mayCombine(p.lastTok, s[0]) {
+ panic("adjacent tokens combine without whitespace")
+ }
+
+ if x == _Semi {
+ // delay printing of semi
+ p.addWhitespace(semi, "")
+ } else {
+ p.flush(x)
+ p.writeString(s)
+ p.nlcount = 0
+ p.lastTok = x
+ }
+
+ case Operator:
+ if x != 0 {
+ p.flush(_Operator)
+ p.writeString(x.String())
+ }
+
+ case ctrlSymbol:
+ switch x {
+ case none, semi /*, comment*/ :
+ panic("unreachable")
+ case newline:
+ // TODO(gri) need to handle mandatory newlines after a //-style comment
+ if !p.linebreaks {
+ x = blank
+ }
+ }
+ p.addWhitespace(x, "")
+
+ // case *Comment: // comments are not Nodes
+ // p.addWhitespace(comment, x.Text)
+
+ default:
+ panic(fmt.Sprintf("unexpected argument %v (%T)", x, x))
+ }
+ }
+}
+
+func (p *printer) printNode(n Node) {
+ // ncom := *n.Comments()
+ // if ncom != nil {
+ // // TODO(gri) in general we cannot make assumptions about whether
+ // // a comment is a /*- or a //-style comment since the syntax
+ // // tree may have been manipulated. Need to make sure the correct
+ // // whitespace is emitted.
+ // for _, c := range ncom.Alone {
+ // p.print(c, newline)
+ // }
+ // for _, c := range ncom.Before {
+ // if c.Text == "" || lineComment(c.Text) {
+ // panic("unexpected empty line or //-style 'before' comment")
+ // }
+ // p.print(c, blank)
+ // }
+ // }
+
+ p.printRawNode(n)
+
+ // if ncom != nil && len(ncom.After) > 0 {
+ // for i, c := range ncom.After {
+ // if i+1 < len(ncom.After) {
+ // if c.Text == "" || lineComment(c.Text) {
+ // panic("unexpected empty line or //-style non-final 'after' comment")
+ // }
+ // }
+ // p.print(blank, c)
+ // }
+ // //p.print(newline)
+ // }
+}
+
+func (p *printer) printRawNode(n Node) {
+ switch n := n.(type) {
+ case nil:
+ // we should not reach here but don't crash
+
+ // expressions and types
+ case *BadExpr:
+ p.print(_Name, "<bad expr>")
+
+ case *Name:
+ p.print(_Name, n.Value) // _Name requires actual value following immediately
+
+ case *BasicLit:
+ p.print(_Name, n.Value) // _Name requires actual value following immediately
+
+ case *FuncLit:
+ p.print(n.Type, blank)
+ if n.Body != nil {
+ if p.form == ShortForm {
+ p.print(_Lbrace)
+ if len(n.Body.List) > 0 {
+ p.print(_Name, "…")
+ }
+ p.print(_Rbrace)
+ } else {
+ p.print(n.Body)
+ }
+ }
+
+ case *CompositeLit:
+ if n.Type != nil {
+ p.print(n.Type)
+ }
+ p.print(_Lbrace)
+ if p.form == ShortForm {
+ if len(n.ElemList) > 0 {
+ p.print(_Name, "…")
+ }
+ } else {
+ if n.NKeys > 0 && n.NKeys == len(n.ElemList) {
+ p.printExprLines(n.ElemList)
+ } else {
+ p.printExprList(n.ElemList)
+ }
+ }
+ p.print(_Rbrace)
+
+ case *ParenExpr:
+ p.print(_Lparen, n.X, _Rparen)
+
+ case *SelectorExpr:
+ p.print(n.X, _Dot, n.Sel)
+
+ case *IndexExpr:
+ p.print(n.X, _Lbrack, n.Index, _Rbrack)
+
+ case *SliceExpr:
+ p.print(n.X, _Lbrack)
+ if i := n.Index[0]; i != nil {
+ p.printNode(i)
+ }
+ p.print(_Colon)
+ if j := n.Index[1]; j != nil {
+ p.printNode(j)
+ }
+ if k := n.Index[2]; k != nil {
+ p.print(_Colon, k)
+ }
+ p.print(_Rbrack)
+
+ case *AssertExpr:
+ p.print(n.X, _Dot, _Lparen, n.Type, _Rparen)
+
+ case *TypeSwitchGuard:
+ if n.Lhs != nil {
+ p.print(n.Lhs, blank, _Define, blank)
+ }
+ p.print(n.X, _Dot, _Lparen, _Type, _Rparen)
+
+ case *CallExpr:
+ p.print(n.Fun, _Lparen)
+ p.printExprList(n.ArgList)
+ if n.HasDots {
+ p.print(_DotDotDot)
+ }
+ p.print(_Rparen)
+
+ case *Operation:
+ if n.Y == nil {
+ // unary expr
+ p.print(n.Op)
+ // if n.Op == lexical.Range {
+ // p.print(blank)
+ // }
+ p.print(n.X)
+ } else {
+ // binary expr
+ // TODO(gri) eventually take precedence into account
+ // to control possibly missing parentheses
+ p.print(n.X, blank, n.Op, blank, n.Y)
+ }
+
+ case *KeyValueExpr:
+ p.print(n.Key, _Colon, blank, n.Value)
+
+ case *ListExpr:
+ p.printExprList(n.ElemList)
+
+ case *ArrayType:
+ var len interface{} = _DotDotDot
+ if n.Len != nil {
+ len = n.Len
+ }
+ p.print(_Lbrack, len, _Rbrack, n.Elem)
+
+ case *SliceType:
+ p.print(_Lbrack, _Rbrack, n.Elem)
+
+ case *DotsType:
+ p.print(_DotDotDot, n.Elem)
+
+ case *StructType:
+ p.print(_Struct)
+ if len(n.FieldList) > 0 && p.linebreaks {
+ p.print(blank)
+ }
+ p.print(_Lbrace)
+ if len(n.FieldList) > 0 {
+ if p.linebreaks {
+ p.print(newline, indent)
+ p.printFieldList(n.FieldList, n.TagList, _Semi)
+ p.print(outdent, newline)
+ } else {
+ p.printFieldList(n.FieldList, n.TagList, _Semi)
+ }
+ }
+ p.print(_Rbrace)
+
+ case *FuncType:
+ p.print(_Func)
+ p.printSignature(n)
+
+ case *InterfaceType:
+ p.print(_Interface)
+ if p.linebreaks && len(n.MethodList) > 1 {
+ p.print(blank)
+ p.print(_Lbrace)
+ p.print(newline, indent)
+ p.printMethodList(n.MethodList)
+ p.print(outdent, newline)
+ } else {
+ p.print(_Lbrace)
+ p.printMethodList(n.MethodList)
+ }
+ p.print(_Rbrace)
+
+ case *MapType:
+ p.print(_Map, _Lbrack, n.Key, _Rbrack, n.Value)
+
+ case *ChanType:
+ if n.Dir == RecvOnly {
+ p.print(_Arrow)
+ }
+ p.print(_Chan)
+ if n.Dir == SendOnly {
+ p.print(_Arrow)
+ }
+ p.print(blank)
+ if e, _ := n.Elem.(*ChanType); n.Dir == 0 && e != nil && e.Dir == RecvOnly {
+ // don't print chan (<-chan T) as chan <-chan T
+ p.print(_Lparen)
+ p.print(n.Elem)
+ p.print(_Rparen)
+ } else {
+ p.print(n.Elem)
+ }
+
+ // statements
+ case *DeclStmt:
+ p.printDecl(n.DeclList)
+
+ case *EmptyStmt:
+ // nothing to print
+
+ case *LabeledStmt:
+ p.print(outdent, n.Label, _Colon, indent, newline, n.Stmt)
+
+ case *ExprStmt:
+ p.print(n.X)
+
+ case *SendStmt:
+ p.print(n.Chan, blank, _Arrow, blank, n.Value)
+
+ case *AssignStmt:
+ p.print(n.Lhs)
+ if n.Rhs == nil {
+ // TODO(gri) This is going to break the mayCombine
+ // check once we enable that again.
+ p.print(n.Op, n.Op) // ++ or --
+ } else {
+ p.print(blank, n.Op, _Assign, blank)
+ p.print(n.Rhs)
+ }
+
+ case *CallStmt:
+ p.print(n.Tok, blank, n.Call)
+
+ case *ReturnStmt:
+ p.print(_Return)
+ if n.Results != nil {
+ p.print(blank, n.Results)
+ }
+
+ case *BranchStmt:
+ p.print(n.Tok)
+ if n.Label != nil {
+ p.print(blank, n.Label)
+ }
+
+ case *BlockStmt:
+ p.print(_Lbrace)
+ if len(n.List) > 0 {
+ p.print(newline, indent)
+ p.printStmtList(n.List, true)
+ p.print(outdent, newline)
+ }
+ p.print(_Rbrace)
+
+ case *IfStmt:
+ p.print(_If, blank)
+ if n.Init != nil {
+ p.print(n.Init, _Semi, blank)
+ }
+ p.print(n.Cond, blank, n.Then)
+ if n.Else != nil {
+ p.print(blank, _Else, blank, n.Else)
+ }
+
+ case *SwitchStmt:
+ p.print(_Switch, blank)
+ if n.Init != nil {
+ p.print(n.Init, _Semi, blank)
+ }
+ if n.Tag != nil {
+ p.print(n.Tag, blank)
+ }
+ p.printSwitchBody(n.Body)
+
+ case *SelectStmt:
+ p.print(_Select, blank) // for now
+ p.printSelectBody(n.Body)
+
+ case *RangeClause:
+ if n.Lhs != nil {
+ tok := _Assign
+ if n.Def {
+ tok = _Define
+ }
+ p.print(n.Lhs, blank, tok, blank)
+ }
+ p.print(_Range, blank, n.X)
+
+ case *ForStmt:
+ p.print(_For, blank)
+ if n.Init == nil && n.Post == nil {
+ if n.Cond != nil {
+ p.print(n.Cond, blank)
+ }
+ } else {
+ if n.Init != nil {
+ p.print(n.Init)
+ // TODO(gri) clean this up
+ if _, ok := n.Init.(*RangeClause); ok {
+ p.print(blank, n.Body)
+ break
+ }
+ }
+ p.print(_Semi, blank)
+ if n.Cond != nil {
+ p.print(n.Cond)
+ }
+ p.print(_Semi, blank)
+ if n.Post != nil {
+ p.print(n.Post, blank)
+ }
+ }
+ p.print(n.Body)
+
+ case *ImportDecl:
+ if n.Group == nil {
+ p.print(_Import, blank)
+ }
+ if n.LocalPkgName != nil {
+ p.print(n.LocalPkgName, blank)
+ }
+ p.print(n.Path)
+
+ case *ConstDecl:
+ if n.Group == nil {
+ p.print(_Const, blank)
+ }
+ p.printNameList(n.NameList)
+ if n.Type != nil {
+ p.print(blank, n.Type)
+ }
+ if n.Values != nil {
+ p.print(blank, _Assign, blank, n.Values)
+ }
+
+ case *TypeDecl:
+ if n.Group == nil {
+ p.print(_Type, blank)
+ }
+ p.print(n.Name)
+ if n.TParamList != nil {
+ p.printParameterList(n.TParamList, true)
+ }
+ p.print(blank)
+ if n.Alias {
+ p.print(_Assign, blank)
+ }
+ p.print(n.Type)
+
+ case *VarDecl:
+ if n.Group == nil {
+ p.print(_Var, blank)
+ }
+ p.printNameList(n.NameList)
+ if n.Type != nil {
+ p.print(blank, n.Type)
+ }
+ if n.Values != nil {
+ p.print(blank, _Assign, blank, n.Values)
+ }
+
+ case *FuncDecl:
+ p.print(_Func, blank)
+ if r := n.Recv; r != nil {
+ p.print(_Lparen)
+ if r.Name != nil {
+ p.print(r.Name, blank)
+ }
+ p.printNode(r.Type)
+ p.print(_Rparen, blank)
+ }
+ p.print(n.Name)
+ if n.TParamList != nil {
+ p.printParameterList(n.TParamList, true)
+ }
+ p.printSignature(n.Type)
+ if n.Body != nil {
+ p.print(blank, n.Body)
+ }
+
+ case *printGroup:
+ p.print(n.Tok, blank, _Lparen)
+ if len(n.Decls) > 0 {
+ p.print(newline, indent)
+ for _, d := range n.Decls {
+ p.printNode(d)
+ p.print(_Semi, newline)
+ }
+ p.print(outdent)
+ }
+ p.print(_Rparen)
+
+ // files
+ case *File:
+ p.print(_Package, blank, n.PkgName)
+ if len(n.DeclList) > 0 {
+ p.print(_Semi, newline, newline)
+ p.printDeclList(n.DeclList)
+ }
+
+ default:
+ panic(fmt.Sprintf("syntax.Iterate: unexpected node type %T", n))
+ }
+}
+
+func (p *printer) printFields(fields []*Field, tags []*BasicLit, i, j int) {
+ if i+1 == j && fields[i].Name == nil {
+ // anonymous field
+ p.printNode(fields[i].Type)
+ } else {
+ for k, f := range fields[i:j] {
+ if k > 0 {
+ p.print(_Comma, blank)
+ }
+ p.printNode(f.Name)
+ }
+ p.print(blank)
+ p.printNode(fields[i].Type)
+ }
+ if i < len(tags) && tags[i] != nil {
+ p.print(blank)
+ p.printNode(tags[i])
+ }
+}
+
+func (p *printer) printFieldList(fields []*Field, tags []*BasicLit, sep token) {
+ i0 := 0
+ var typ Expr
+ for i, f := range fields {
+ if f.Name == nil || f.Type != typ {
+ if i0 < i {
+ p.printFields(fields, tags, i0, i)
+ p.print(sep, newline)
+ i0 = i
+ }
+ typ = f.Type
+ }
+ }
+ p.printFields(fields, tags, i0, len(fields))
+}
+
+func (p *printer) printMethodList(methods []*Field) {
+ for i, m := range methods {
+ if i > 0 {
+ p.print(_Semi, newline)
+ }
+ if m.Name != nil {
+ p.printNode(m.Name)
+ p.printSignature(m.Type.(*FuncType))
+ } else {
+ p.printNode(m.Type)
+ }
+ }
+}
+
+func (p *printer) printNameList(list []*Name) {
+ for i, x := range list {
+ if i > 0 {
+ p.print(_Comma, blank)
+ }
+ p.printNode(x)
+ }
+}
+
+func (p *printer) printExprList(list []Expr) {
+ for i, x := range list {
+ if i > 0 {
+ p.print(_Comma, blank)
+ }
+ p.printNode(x)
+ }
+}
+
+func (p *printer) printExprLines(list []Expr) {
+ if len(list) > 0 {
+ p.print(newline, indent)
+ for _, x := range list {
+ p.print(x, _Comma, newline)
+ }
+ p.print(outdent)
+ }
+}
+
+func groupFor(d Decl) (token, *Group) {
+ switch d := d.(type) {
+ case *ImportDecl:
+ return _Import, d.Group
+ case *ConstDecl:
+ return _Const, d.Group
+ case *TypeDecl:
+ return _Type, d.Group
+ case *VarDecl:
+ return _Var, d.Group
+ case *FuncDecl:
+ return _Func, nil
+ default:
+ panic("unreachable")
+ }
+}
+
+type printGroup struct {
+ node
+ Tok token
+ Decls []Decl
+}
+
+func (p *printer) printDecl(list []Decl) {
+ tok, group := groupFor(list[0])
+
+ if group == nil {
+ if len(list) != 1 {
+ panic("unreachable")
+ }
+ p.printNode(list[0])
+ return
+ }
+
+ // if _, ok := list[0].(*EmptyDecl); ok {
+ // if len(list) != 1 {
+ // panic("unreachable")
+ // }
+ // // TODO(gri) if there are comments inside the empty
+ // // group, we may need to keep the list non-nil
+ // list = nil
+ // }
+
+ // printGroup is here for consistent comment handling
+ // (this is not yet used)
+ var pg printGroup
+ // *pg.Comments() = *group.Comments()
+ pg.Tok = tok
+ pg.Decls = list
+ p.printNode(&pg)
+}
+
+func (p *printer) printDeclList(list []Decl) {
+ i0 := 0
+ var tok token
+ var group *Group
+ for i, x := range list {
+ if s, g := groupFor(x); g == nil || g != group {
+ if i0 < i {
+ p.printDecl(list[i0:i])
+ p.print(_Semi, newline)
+ // print empty line between different declaration groups,
+ // different kinds of declarations, or between functions
+ if g != group || s != tok || s == _Func {
+ p.print(newline)
+ }
+ i0 = i
+ }
+ tok, group = s, g
+ }
+ }
+ p.printDecl(list[i0:])
+}
+
+func (p *printer) printSignature(sig *FuncType) {
+ p.printParameterList(sig.ParamList, false)
+ if list := sig.ResultList; list != nil {
+ p.print(blank)
+ if len(list) == 1 && list[0].Name == nil {
+ p.printNode(list[0].Type)
+ } else {
+ p.printParameterList(list, false)
+ }
+ }
+}
+
+func (p *printer) printParameterList(list []*Field, types bool) {
+ open, close := _Lparen, _Rparen
+ if types {
+ open, close = _Lbrack, _Rbrack
+ }
+ p.print(open)
+ for i, f := range list {
+ if i > 0 {
+ p.print(_Comma, blank)
+ }
+ if f.Name != nil {
+ p.printNode(f.Name)
+ if i+1 < len(list) {
+ f1 := list[i+1]
+ if f1.Name != nil && f1.Type == f.Type {
+ continue // no need to print type
+ }
+ }
+ p.print(blank)
+ }
+ p.printNode(unparen(f.Type)) // no need for (extra) parentheses around parameter types
+ }
+ // A type parameter list [P *T] where T is not a type literal requires a comma as in [P *T,]
+ // so that it's not parsed as [P*T].
+ if types && len(list) == 1 {
+ if t, _ := list[0].Type.(*Operation); t != nil && t.Op == Mul && t.Y == nil && !isTypeLit(t.X) {
+ p.print(_Comma)
+ }
+ }
+ p.print(close)
+}
+
+func (p *printer) printStmtList(list []Stmt, braces bool) {
+ for i, x := range list {
+ p.print(x, _Semi)
+ if i+1 < len(list) {
+ p.print(newline)
+ } else if braces {
+ // Print an extra semicolon if the last statement is
+ // an empty statement and we are in a braced block
+ // because one semicolon is automatically removed.
+ if _, ok := x.(*EmptyStmt); ok {
+ p.print(x, _Semi)
+ }
+ }
+ }
+}
+
+func (p *printer) printSwitchBody(list []*CaseClause) {
+ p.print(_Lbrace)
+ if len(list) > 0 {
+ p.print(newline)
+ for i, c := range list {
+ p.printCaseClause(c, i+1 == len(list))
+ p.print(newline)
+ }
+ }
+ p.print(_Rbrace)
+}
+
+func (p *printer) printSelectBody(list []*CommClause) {
+ p.print(_Lbrace)
+ if len(list) > 0 {
+ p.print(newline)
+ for i, c := range list {
+ p.printCommClause(c, i+1 == len(list))
+ p.print(newline)
+ }
+ }
+ p.print(_Rbrace)
+}
+
+func (p *printer) printCaseClause(c *CaseClause, braces bool) {
+ if c.Cases != nil {
+ p.print(_Case, blank, c.Cases)
+ } else {
+ p.print(_Default)
+ }
+ p.print(_Colon)
+ if len(c.Body) > 0 {
+ p.print(newline, indent)
+ p.printStmtList(c.Body, braces)
+ p.print(outdent)
+ }
+}
+
+func (p *printer) printCommClause(c *CommClause, braces bool) {
+ if c.Comm != nil {
+ p.print(_Case, blank)
+ p.print(c.Comm)
+ } else {
+ p.print(_Default)
+ }
+ p.print(_Colon)
+ if len(c.Body) > 0 {
+ p.print(newline, indent)
+ p.printStmtList(c.Body, braces)
+ p.print(outdent)
+ }
+}
diff --git a/src/cmd/compile/internal/syntax/printer_test.go b/src/cmd/compile/internal/syntax/printer_test.go
new file mode 100644
index 0000000..941af0a
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/printer_test.go
@@ -0,0 +1,250 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package syntax
+
+import (
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "strings"
+ "testing"
+)
+
+func TestPrint(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping test in short mode")
+ }
+
+ ast, _ := ParseFile(*src_, func(err error) { t.Error(err) }, nil, AllowGenerics)
+
+ if ast != nil {
+ Fprint(testOut(), ast, LineForm)
+ fmt.Println()
+ }
+}
+
+type shortBuffer struct {
+ buf []byte
+}
+
+func (w *shortBuffer) Write(data []byte) (n int, err error) {
+ w.buf = append(w.buf, data...)
+ n = len(data)
+ if len(w.buf) > 10 {
+ err = io.ErrShortBuffer
+ }
+ return
+}
+
+func TestPrintError(t *testing.T) {
+ const src = "package p; var x int"
+ ast, err := Parse(nil, strings.NewReader(src), nil, nil, 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ var buf shortBuffer
+ _, err = Fprint(&buf, ast, 0)
+ if err == nil || err != io.ErrShortBuffer {
+ t.Errorf("got err = %s, want %s", err, io.ErrShortBuffer)
+ }
+}
+
+var stringTests = [][2]string{
+ dup("package p"),
+ dup("package p; type _ int; type T1 = struct{}; type ( _ *struct{}; T2 = float32 )"),
+
+ // generic type declarations
+ dup("package p; type _[T any] struct{}"),
+ dup("package p; type _[A, B, C interface{m()}] struct{}"),
+ dup("package p; type _[T any, A, B, C interface{m()}, X, Y, Z interface{~int}] struct{}"),
+
+ dup("package p; type _[P *T,] struct{}"),
+ dup("package p; type _[P *T, _ any] struct{}"),
+ {"package p; type _[P (*T),] struct{}", "package p; type _[P *T,] struct{}"},
+ {"package p; type _[P (*T), _ any] struct{}", "package p; type _[P *T, _ any] struct{}"},
+ {"package p; type _[P (T),] struct{}", "package p; type _[P T] struct{}"},
+ {"package p; type _[P (T), _ any] struct{}", "package p; type _[P T, _ any] struct{}"},
+
+ dup("package p; type _[P *struct{}] struct{}"),
+ {"package p; type _[P (*struct{})] struct{}", "package p; type _[P *struct{}] struct{}"},
+ {"package p; type _[P ([]int)] struct{}", "package p; type _[P []int] struct{}"},
+
+ dup("package p; type _ [P(T)]struct{}"),
+ dup("package p; type _ [P((T))]struct{}"),
+ dup("package p; type _ [P * *T]struct{}"),
+ dup("package p; type _ [P * T]struct{}"),
+ dup("package p; type _ [P(*T)]struct{}"),
+ dup("package p; type _ [P(**T)]struct{}"),
+ dup("package p; type _ [P * T - T]struct{}"),
+
+ // array type declarations
+ dup("package p; type _ [P * T]struct{}"),
+ dup("package p; type _ [P * T - T]struct{}"),
+
+ // generic function declarations
+ dup("package p; func _[T any]()"),
+ dup("package p; func _[A, B, C interface{m()}]()"),
+ dup("package p; func _[T any, A, B, C interface{m()}, X, Y, Z interface{~int}]()"),
+
+ // methods with generic receiver types
+ dup("package p; func (R[T]) _()"),
+ dup("package p; func (*R[A, B, C]) _()"),
+ dup("package p; func (_ *R[A, B, C]) _()"),
+
+ // type constraint literals with elided interfaces
+ dup("package p; func _[P ~int, Q int | string]() {}"),
+ dup("package p; func _[P struct{f int}, Q *P]() {}"),
+
+ // channels
+ dup("package p; type _ chan chan int"),
+ dup("package p; type _ chan (<-chan int)"),
+ dup("package p; type _ chan chan<- int"),
+
+ dup("package p; type _ <-chan chan int"),
+ dup("package p; type _ <-chan <-chan int"),
+ dup("package p; type _ <-chan chan<- int"),
+
+ dup("package p; type _ chan<- chan int"),
+ dup("package p; type _ chan<- <-chan int"),
+ dup("package p; type _ chan<- chan<- int"),
+
+ // TODO(gri) expand
+}
+
+func TestPrintString(t *testing.T) {
+ for _, test := range stringTests {
+ ast, err := Parse(nil, strings.NewReader(test[0]), nil, nil, AllowGenerics)
+ if err != nil {
+ t.Error(err)
+ continue
+ }
+ if got := String(ast); got != test[1] {
+ t.Errorf("%q: got %q", test[1], got)
+ }
+ }
+}
+
+func testOut() io.Writer {
+ if testing.Verbose() {
+ return os.Stdout
+ }
+ return ioutil.Discard
+}
+
+func dup(s string) [2]string { return [2]string{s, s} }
+
+var exprTests = [][2]string{
+ // basic type literals
+ dup("x"),
+ dup("true"),
+ dup("42"),
+ dup("3.1415"),
+ dup("2.71828i"),
+ dup(`'a'`),
+ dup(`"foo"`),
+ dup("`bar`"),
+
+ // func and composite literals
+ dup("func() {}"),
+ dup("[]int{}"),
+ {"func(x int) complex128 { return 0 }", "func(x int) complex128 {…}"},
+ {"[]int{1, 2, 3}", "[]int{…}"},
+
+ // type expressions
+ dup("[1 << 10]byte"),
+ dup("[]int"),
+ dup("*int"),
+ dup("struct{x int}"),
+ dup("func()"),
+ dup("func(int, float32) string"),
+ dup("interface{m()}"),
+ dup("interface{m() string; n(x int)}"),
+ dup("interface{~int}"),
+ dup("interface{~int | ~float64 | ~string}"),
+ dup("interface{~int; m()}"),
+ dup("interface{~int | ~float64 | ~string; m() string; n(x int)}"),
+ dup("map[string]int"),
+ dup("chan E"),
+ dup("<-chan E"),
+ dup("chan<- E"),
+
+ // new interfaces
+ dup("interface{int}"),
+ dup("interface{~int}"),
+ dup("interface{~int}"),
+ dup("interface{int | string}"),
+ dup("interface{~int | ~string; float64; m()}"),
+ dup("interface{~a | ~b | ~c; ~int | ~string; float64; m()}"),
+ dup("interface{~T[int, string] | string}"),
+
+ // non-type expressions
+ dup("(x)"),
+ dup("x.f"),
+ dup("a[i]"),
+
+ dup("s[:]"),
+ dup("s[i:]"),
+ dup("s[:j]"),
+ dup("s[i:j]"),
+ dup("s[:j:k]"),
+ dup("s[i:j:k]"),
+
+ dup("x.(T)"),
+
+ dup("x.([10]int)"),
+ dup("x.([...]int)"),
+
+ dup("x.(struct{})"),
+ dup("x.(struct{x int; y, z float32; E})"),
+
+ dup("x.(func())"),
+ dup("x.(func(x int))"),
+ dup("x.(func() int)"),
+ dup("x.(func(x, y int, z float32) (r int))"),
+ dup("x.(func(a, b, c int))"),
+ dup("x.(func(x ...T))"),
+
+ dup("x.(interface{})"),
+ dup("x.(interface{m(); n(x int); E})"),
+ dup("x.(interface{m(); n(x int) T; E; F})"),
+
+ dup("x.(map[K]V)"),
+
+ dup("x.(chan E)"),
+ dup("x.(<-chan E)"),
+ dup("x.(chan<- chan int)"),
+ dup("x.(chan<- <-chan int)"),
+ dup("x.(<-chan chan int)"),
+ dup("x.(chan (<-chan int))"),
+
+ dup("f()"),
+ dup("f(x)"),
+ dup("int(x)"),
+ dup("f(x, x + y)"),
+ dup("f(s...)"),
+ dup("f(a, s...)"),
+
+ dup("*x"),
+ dup("&x"),
+ dup("x + y"),
+ dup("x + y << (2 * s)"),
+}
+
+func TestShortString(t *testing.T) {
+ for _, test := range exprTests {
+ src := "package p; var _ = " + test[0]
+ ast, err := Parse(nil, strings.NewReader(src), nil, nil, AllowGenerics)
+ if err != nil {
+ t.Errorf("%s: %s", test[0], err)
+ continue
+ }
+ x := ast.DeclList[0].(*VarDecl).Values
+ if got := String(x); got != test[1] {
+ t.Errorf("%s: got %s, want %s", test[0], got, test[1])
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/syntax/scanner.go b/src/cmd/compile/internal/syntax/scanner.go
new file mode 100644
index 0000000..218bc24
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/scanner.go
@@ -0,0 +1,881 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements scanner, a lexical tokenizer for
+// Go source. After initialization, consecutive calls of
+// next advance the scanner one token at a time.
+//
+// This file, source.go, tokens.go, and token_string.go are self-contained
+// (`go tool compile scanner.go source.go tokens.go token_string.go` compiles)
+// and thus could be made into their own package.
+
+package syntax
+
+import (
+ "fmt"
+ "io"
+ "unicode"
+ "unicode/utf8"
+)
+
+// The mode flags below control which comments are reported
+// by calling the error handler. If no flag is set, comments
+// are ignored.
+const (
+ comments uint = 1 << iota // call handler for all comments
+ directives // call handler for directives only
+)
+
+type scanner struct {
+ source
+ mode uint
+ nlsemi bool // if set '\n' and EOF translate to ';'
+
+ // current token, valid after calling next()
+ line, col uint
+ blank bool // line is blank up to col
+ tok token
+ lit string // valid if tok is _Name, _Literal, or _Semi ("semicolon", "newline", or "EOF"); may be malformed if bad is true
+ bad bool // valid if tok is _Literal, true if a syntax error occurred, lit may be malformed
+ kind LitKind // valid if tok is _Literal
+ op Operator // valid if tok is _Operator, _AssignOp, or _IncOp
+ prec int // valid if tok is _Operator, _AssignOp, or _IncOp
+}
+
+func (s *scanner) init(src io.Reader, errh func(line, col uint, msg string), mode uint) {
+ s.source.init(src, errh)
+ s.mode = mode
+ s.nlsemi = false
+}
+
+// errorf reports an error at the most recently read character position.
+func (s *scanner) errorf(format string, args ...interface{}) {
+ s.error(fmt.Sprintf(format, args...))
+}
+
+// errorAtf reports an error at a byte column offset relative to the current token start.
+func (s *scanner) errorAtf(offset int, format string, args ...interface{}) {
+ s.errh(s.line, s.col+uint(offset), fmt.Sprintf(format, args...))
+}
+
+// setLit sets the scanner state for a recognized _Literal token.
+func (s *scanner) setLit(kind LitKind, ok bool) {
+ s.nlsemi = true
+ s.tok = _Literal
+ s.lit = string(s.segment())
+ s.bad = !ok
+ s.kind = kind
+}
+
+// next advances the scanner by reading the next token.
+//
+// If a read, source encoding, or lexical error occurs, next calls
+// the installed error handler with the respective error position
+// and message. The error message is guaranteed to be non-empty and
+// never starts with a '/'. The error handler must exist.
+//
+// If the scanner mode includes the comments flag and a comment
+// (including comments containing directives) is encountered, the
+// error handler is also called with each comment position and text
+// (including opening /* or // and closing */, but without a newline
+// at the end of line comments). Comment text always starts with a /
+// which can be used to distinguish these handler calls from errors.
+//
+// If the scanner mode includes the directives (but not the comments)
+// flag, only comments containing a //line, /*line, or //go: directive
+// are reported, in the same way as regular comments.
+func (s *scanner) next() {
+ nlsemi := s.nlsemi
+ s.nlsemi = false
+
+redo:
+ // skip white space
+ s.stop()
+ startLine, startCol := s.pos()
+ for s.ch == ' ' || s.ch == '\t' || s.ch == '\n' && !nlsemi || s.ch == '\r' {
+ s.nextch()
+ }
+
+ // token start
+ s.line, s.col = s.pos()
+ s.blank = s.line > startLine || startCol == colbase
+ s.start()
+ if isLetter(s.ch) || s.ch >= utf8.RuneSelf && s.atIdentChar(true) {
+ s.nextch()
+ s.ident()
+ return
+ }
+
+ switch s.ch {
+ case -1:
+ if nlsemi {
+ s.lit = "EOF"
+ s.tok = _Semi
+ break
+ }
+ s.tok = _EOF
+
+ case '\n':
+ s.nextch()
+ s.lit = "newline"
+ s.tok = _Semi
+
+ case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
+ s.number(false)
+
+ case '"':
+ s.stdString()
+
+ case '`':
+ s.rawString()
+
+ case '\'':
+ s.rune()
+
+ case '(':
+ s.nextch()
+ s.tok = _Lparen
+
+ case '[':
+ s.nextch()
+ s.tok = _Lbrack
+
+ case '{':
+ s.nextch()
+ s.tok = _Lbrace
+
+ case ',':
+ s.nextch()
+ s.tok = _Comma
+
+ case ';':
+ s.nextch()
+ s.lit = "semicolon"
+ s.tok = _Semi
+
+ case ')':
+ s.nextch()
+ s.nlsemi = true
+ s.tok = _Rparen
+
+ case ']':
+ s.nextch()
+ s.nlsemi = true
+ s.tok = _Rbrack
+
+ case '}':
+ s.nextch()
+ s.nlsemi = true
+ s.tok = _Rbrace
+
+ case ':':
+ s.nextch()
+ if s.ch == '=' {
+ s.nextch()
+ s.tok = _Define
+ break
+ }
+ s.tok = _Colon
+
+ case '.':
+ s.nextch()
+ if isDecimal(s.ch) {
+ s.number(true)
+ break
+ }
+ if s.ch == '.' {
+ s.nextch()
+ if s.ch == '.' {
+ s.nextch()
+ s.tok = _DotDotDot
+ break
+ }
+ s.rewind() // now s.ch holds 1st '.'
+ s.nextch() // consume 1st '.' again
+ }
+ s.tok = _Dot
+
+ case '+':
+ s.nextch()
+ s.op, s.prec = Add, precAdd
+ if s.ch != '+' {
+ goto assignop
+ }
+ s.nextch()
+ s.nlsemi = true
+ s.tok = _IncOp
+
+ case '-':
+ s.nextch()
+ s.op, s.prec = Sub, precAdd
+ if s.ch != '-' {
+ goto assignop
+ }
+ s.nextch()
+ s.nlsemi = true
+ s.tok = _IncOp
+
+ case '*':
+ s.nextch()
+ s.op, s.prec = Mul, precMul
+ // don't goto assignop - want _Star token
+ if s.ch == '=' {
+ s.nextch()
+ s.tok = _AssignOp
+ break
+ }
+ s.tok = _Star
+
+ case '/':
+ s.nextch()
+ if s.ch == '/' {
+ s.nextch()
+ s.lineComment()
+ goto redo
+ }
+ if s.ch == '*' {
+ s.nextch()
+ s.fullComment()
+ if line, _ := s.pos(); line > s.line && nlsemi {
+ // A multi-line comment acts like a newline;
+ // it translates to a ';' if nlsemi is set.
+ s.lit = "newline"
+ s.tok = _Semi
+ break
+ }
+ goto redo
+ }
+ s.op, s.prec = Div, precMul
+ goto assignop
+
+ case '%':
+ s.nextch()
+ s.op, s.prec = Rem, precMul
+ goto assignop
+
+ case '&':
+ s.nextch()
+ if s.ch == '&' {
+ s.nextch()
+ s.op, s.prec = AndAnd, precAndAnd
+ s.tok = _Operator
+ break
+ }
+ s.op, s.prec = And, precMul
+ if s.ch == '^' {
+ s.nextch()
+ s.op = AndNot
+ }
+ goto assignop
+
+ case '|':
+ s.nextch()
+ if s.ch == '|' {
+ s.nextch()
+ s.op, s.prec = OrOr, precOrOr
+ s.tok = _Operator
+ break
+ }
+ s.op, s.prec = Or, precAdd
+ goto assignop
+
+ case '^':
+ s.nextch()
+ s.op, s.prec = Xor, precAdd
+ goto assignop
+
+ case '<':
+ s.nextch()
+ if s.ch == '=' {
+ s.nextch()
+ s.op, s.prec = Leq, precCmp
+ s.tok = _Operator
+ break
+ }
+ if s.ch == '<' {
+ s.nextch()
+ s.op, s.prec = Shl, precMul
+ goto assignop
+ }
+ if s.ch == '-' {
+ s.nextch()
+ s.tok = _Arrow
+ break
+ }
+ s.op, s.prec = Lss, precCmp
+ s.tok = _Operator
+
+ case '>':
+ s.nextch()
+ if s.ch == '=' {
+ s.nextch()
+ s.op, s.prec = Geq, precCmp
+ s.tok = _Operator
+ break
+ }
+ if s.ch == '>' {
+ s.nextch()
+ s.op, s.prec = Shr, precMul
+ goto assignop
+ }
+ s.op, s.prec = Gtr, precCmp
+ s.tok = _Operator
+
+ case '=':
+ s.nextch()
+ if s.ch == '=' {
+ s.nextch()
+ s.op, s.prec = Eql, precCmp
+ s.tok = _Operator
+ break
+ }
+ s.tok = _Assign
+
+ case '!':
+ s.nextch()
+ if s.ch == '=' {
+ s.nextch()
+ s.op, s.prec = Neq, precCmp
+ s.tok = _Operator
+ break
+ }
+ s.op, s.prec = Not, 0
+ s.tok = _Operator
+
+ case '~':
+ s.nextch()
+ s.op, s.prec = Tilde, 0
+ s.tok = _Operator
+
+ default:
+ s.errorf("invalid character %#U", s.ch)
+ s.nextch()
+ goto redo
+ }
+
+ return
+
+assignop:
+ if s.ch == '=' {
+ s.nextch()
+ s.tok = _AssignOp
+ return
+ }
+ s.tok = _Operator
+}
+
+func (s *scanner) ident() {
+ // accelerate common case (7bit ASCII)
+ for isLetter(s.ch) || isDecimal(s.ch) {
+ s.nextch()
+ }
+
+ // general case
+ if s.ch >= utf8.RuneSelf {
+ for s.atIdentChar(false) {
+ s.nextch()
+ }
+ }
+
+ // possibly a keyword
+ lit := s.segment()
+ if len(lit) >= 2 {
+ if tok := keywordMap[hash(lit)]; tok != 0 && tokStrFast(tok) == string(lit) {
+ s.nlsemi = contains(1<<_Break|1<<_Continue|1<<_Fallthrough|1<<_Return, tok)
+ s.tok = tok
+ return
+ }
+ }
+
+ s.nlsemi = true
+ s.lit = string(lit)
+ s.tok = _Name
+}
+
+// tokStrFast is a faster version of token.String, which assumes that tok
+// is one of the valid tokens - and can thus skip bounds checks.
+func tokStrFast(tok token) string {
+ return _token_name[_token_index[tok-1]:_token_index[tok]]
+}
+
+func (s *scanner) atIdentChar(first bool) bool {
+ switch {
+ case unicode.IsLetter(s.ch) || s.ch == '_':
+ // ok
+ case unicode.IsDigit(s.ch):
+ if first {
+ s.errorf("identifier cannot begin with digit %#U", s.ch)
+ }
+ case s.ch >= utf8.RuneSelf:
+ s.errorf("invalid character %#U in identifier", s.ch)
+ default:
+ return false
+ }
+ return true
+}
+
+// hash is a perfect hash function for keywords.
+// It assumes that s has at least length 2.
+func hash(s []byte) uint {
+ return (uint(s[0])<<4 ^ uint(s[1]) + uint(len(s))) & uint(len(keywordMap)-1)
+}
+
+var keywordMap [1 << 6]token // size must be power of two
+
+func init() {
+ // populate keywordMap
+ for tok := _Break; tok <= _Var; tok++ {
+ h := hash([]byte(tok.String()))
+ if keywordMap[h] != 0 {
+ panic("imperfect hash")
+ }
+ keywordMap[h] = tok
+ }
+}
+
+func lower(ch rune) rune { return ('a' - 'A') | ch } // returns lower-case ch iff ch is ASCII letter
+func isLetter(ch rune) bool { return 'a' <= lower(ch) && lower(ch) <= 'z' || ch == '_' }
+func isDecimal(ch rune) bool { return '0' <= ch && ch <= '9' }
+func isHex(ch rune) bool { return '0' <= ch && ch <= '9' || 'a' <= lower(ch) && lower(ch) <= 'f' }
+
+// digits accepts the sequence { digit | '_' }.
+// If base <= 10, digits accepts any decimal digit but records
+// the index (relative to the literal start) of a digit >= base
+// in *invalid, if *invalid < 0.
+// digits returns a bitset describing whether the sequence contained
+// digits (bit 0 is set), or separators '_' (bit 1 is set).
+func (s *scanner) digits(base int, invalid *int) (digsep int) {
+ if base <= 10 {
+ max := rune('0' + base)
+ for isDecimal(s.ch) || s.ch == '_' {
+ ds := 1
+ if s.ch == '_' {
+ ds = 2
+ } else if s.ch >= max && *invalid < 0 {
+ _, col := s.pos()
+ *invalid = int(col - s.col) // record invalid rune index
+ }
+ digsep |= ds
+ s.nextch()
+ }
+ } else {
+ for isHex(s.ch) || s.ch == '_' {
+ ds := 1
+ if s.ch == '_' {
+ ds = 2
+ }
+ digsep |= ds
+ s.nextch()
+ }
+ }
+ return
+}
+
+func (s *scanner) number(seenPoint bool) {
+ ok := true
+ kind := IntLit
+ base := 10 // number base
+ prefix := rune(0) // one of 0 (decimal), '0' (0-octal), 'x', 'o', or 'b'
+ digsep := 0 // bit 0: digit present, bit 1: '_' present
+ invalid := -1 // index of invalid digit in literal, or < 0
+
+ // integer part
+ if !seenPoint {
+ if s.ch == '0' {
+ s.nextch()
+ switch lower(s.ch) {
+ case 'x':
+ s.nextch()
+ base, prefix = 16, 'x'
+ case 'o':
+ s.nextch()
+ base, prefix = 8, 'o'
+ case 'b':
+ s.nextch()
+ base, prefix = 2, 'b'
+ default:
+ base, prefix = 8, '0'
+ digsep = 1 // leading 0
+ }
+ }
+ digsep |= s.digits(base, &invalid)
+ if s.ch == '.' {
+ if prefix == 'o' || prefix == 'b' {
+ s.errorf("invalid radix point in %s literal", baseName(base))
+ ok = false
+ }
+ s.nextch()
+ seenPoint = true
+ }
+ }
+
+ // fractional part
+ if seenPoint {
+ kind = FloatLit
+ digsep |= s.digits(base, &invalid)
+ }
+
+ if digsep&1 == 0 && ok {
+ s.errorf("%s literal has no digits", baseName(base))
+ ok = false
+ }
+
+ // exponent
+ if e := lower(s.ch); e == 'e' || e == 'p' {
+ if ok {
+ switch {
+ case e == 'e' && prefix != 0 && prefix != '0':
+ s.errorf("%q exponent requires decimal mantissa", s.ch)
+ ok = false
+ case e == 'p' && prefix != 'x':
+ s.errorf("%q exponent requires hexadecimal mantissa", s.ch)
+ ok = false
+ }
+ }
+ s.nextch()
+ kind = FloatLit
+ if s.ch == '+' || s.ch == '-' {
+ s.nextch()
+ }
+ digsep = s.digits(10, nil) | digsep&2 // don't lose sep bit
+ if digsep&1 == 0 && ok {
+ s.errorf("exponent has no digits")
+ ok = false
+ }
+ } else if prefix == 'x' && kind == FloatLit && ok {
+ s.errorf("hexadecimal mantissa requires a 'p' exponent")
+ ok = false
+ }
+
+ // suffix 'i'
+ if s.ch == 'i' {
+ kind = ImagLit
+ s.nextch()
+ }
+
+ s.setLit(kind, ok) // do this now so we can use s.lit below
+
+ if kind == IntLit && invalid >= 0 && ok {
+ s.errorAtf(invalid, "invalid digit %q in %s literal", s.lit[invalid], baseName(base))
+ ok = false
+ }
+
+ if digsep&2 != 0 && ok {
+ if i := invalidSep(s.lit); i >= 0 {
+ s.errorAtf(i, "'_' must separate successive digits")
+ ok = false
+ }
+ }
+
+ s.bad = !ok // correct s.bad
+}
+
+func baseName(base int) string {
+ switch base {
+ case 2:
+ return "binary"
+ case 8:
+ return "octal"
+ case 10:
+ return "decimal"
+ case 16:
+ return "hexadecimal"
+ }
+ panic("invalid base")
+}
+
+// invalidSep returns the index of the first invalid separator in x, or -1.
+func invalidSep(x string) int {
+ x1 := ' ' // prefix char, we only care if it's 'x'
+ d := '.' // digit, one of '_', '0' (a digit), or '.' (anything else)
+ i := 0
+
+ // a prefix counts as a digit
+ if len(x) >= 2 && x[0] == '0' {
+ x1 = lower(rune(x[1]))
+ if x1 == 'x' || x1 == 'o' || x1 == 'b' {
+ d = '0'
+ i = 2
+ }
+ }
+
+ // mantissa and exponent
+ for ; i < len(x); i++ {
+ p := d // previous digit
+ d = rune(x[i])
+ switch {
+ case d == '_':
+ if p != '0' {
+ return i
+ }
+ case isDecimal(d) || x1 == 'x' && isHex(d):
+ d = '0'
+ default:
+ if p == '_' {
+ return i - 1
+ }
+ d = '.'
+ }
+ }
+ if d == '_' {
+ return len(x) - 1
+ }
+
+ return -1
+}
+
+func (s *scanner) rune() {
+ ok := true
+ s.nextch()
+
+ n := 0
+ for ; ; n++ {
+ if s.ch == '\'' {
+ if ok {
+ if n == 0 {
+ s.errorf("empty rune literal or unescaped '")
+ ok = false
+ } else if n != 1 {
+ s.errorAtf(0, "more than one character in rune literal")
+ ok = false
+ }
+ }
+ s.nextch()
+ break
+ }
+ if s.ch == '\\' {
+ s.nextch()
+ if !s.escape('\'') {
+ ok = false
+ }
+ continue
+ }
+ if s.ch == '\n' {
+ if ok {
+ s.errorf("newline in rune literal")
+ ok = false
+ }
+ break
+ }
+ if s.ch < 0 {
+ if ok {
+ s.errorAtf(0, "rune literal not terminated")
+ ok = false
+ }
+ break
+ }
+ s.nextch()
+ }
+
+ s.setLit(RuneLit, ok)
+}
+
+func (s *scanner) stdString() {
+ ok := true
+ s.nextch()
+
+ for {
+ if s.ch == '"' {
+ s.nextch()
+ break
+ }
+ if s.ch == '\\' {
+ s.nextch()
+ if !s.escape('"') {
+ ok = false
+ }
+ continue
+ }
+ if s.ch == '\n' {
+ s.errorf("newline in string")
+ ok = false
+ break
+ }
+ if s.ch < 0 {
+ s.errorAtf(0, "string not terminated")
+ ok = false
+ break
+ }
+ s.nextch()
+ }
+
+ s.setLit(StringLit, ok)
+}
+
+func (s *scanner) rawString() {
+ ok := true
+ s.nextch()
+
+ for {
+ if s.ch == '`' {
+ s.nextch()
+ break
+ }
+ if s.ch < 0 {
+ s.errorAtf(0, "string not terminated")
+ ok = false
+ break
+ }
+ s.nextch()
+ }
+ // We leave CRs in the string since they are part of the
+ // literal (even though they are not part of the literal
+ // value).
+
+ s.setLit(StringLit, ok)
+}
+
+func (s *scanner) comment(text string) {
+ s.errorAtf(0, "%s", text)
+}
+
+func (s *scanner) skipLine() {
+ // don't consume '\n' - needed for nlsemi logic
+ for s.ch >= 0 && s.ch != '\n' {
+ s.nextch()
+ }
+}
+
+func (s *scanner) lineComment() {
+ // opening has already been consumed
+
+ if s.mode&comments != 0 {
+ s.skipLine()
+ s.comment(string(s.segment()))
+ return
+ }
+
+ // are we saving directives? or is this definitely not a directive?
+ if s.mode&directives == 0 || (s.ch != 'g' && s.ch != 'l') {
+ s.stop()
+ s.skipLine()
+ return
+ }
+
+ // recognize go: or line directives
+ prefix := "go:"
+ if s.ch == 'l' {
+ prefix = "line "
+ }
+ for _, m := range prefix {
+ if s.ch != m {
+ s.stop()
+ s.skipLine()
+ return
+ }
+ s.nextch()
+ }
+
+ // directive text
+ s.skipLine()
+ s.comment(string(s.segment()))
+}
+
+func (s *scanner) skipComment() bool {
+ for s.ch >= 0 {
+ for s.ch == '*' {
+ s.nextch()
+ if s.ch == '/' {
+ s.nextch()
+ return true
+ }
+ }
+ s.nextch()
+ }
+ s.errorAtf(0, "comment not terminated")
+ return false
+}
+
+func (s *scanner) fullComment() {
+ /* opening has already been consumed */
+
+ if s.mode&comments != 0 {
+ if s.skipComment() {
+ s.comment(string(s.segment()))
+ }
+ return
+ }
+
+ if s.mode&directives == 0 || s.ch != 'l' {
+ s.stop()
+ s.skipComment()
+ return
+ }
+
+ // recognize line directive
+ const prefix = "line "
+ for _, m := range prefix {
+ if s.ch != m {
+ s.stop()
+ s.skipComment()
+ return
+ }
+ s.nextch()
+ }
+
+ // directive text
+ if s.skipComment() {
+ s.comment(string(s.segment()))
+ }
+}
+
+func (s *scanner) escape(quote rune) bool {
+ var n int
+ var base, max uint32
+
+ switch s.ch {
+ case quote, 'a', 'b', 'f', 'n', 'r', 't', 'v', '\\':
+ s.nextch()
+ return true
+ case '0', '1', '2', '3', '4', '5', '6', '7':
+ n, base, max = 3, 8, 255
+ case 'x':
+ s.nextch()
+ n, base, max = 2, 16, 255
+ case 'u':
+ s.nextch()
+ n, base, max = 4, 16, unicode.MaxRune
+ case 'U':
+ s.nextch()
+ n, base, max = 8, 16, unicode.MaxRune
+ default:
+ if s.ch < 0 {
+ return true // complain in caller about EOF
+ }
+ s.errorf("unknown escape")
+ return false
+ }
+
+ var x uint32
+ for i := n; i > 0; i-- {
+ if s.ch < 0 {
+ return true // complain in caller about EOF
+ }
+ d := base
+ if isDecimal(s.ch) {
+ d = uint32(s.ch) - '0'
+ } else if 'a' <= lower(s.ch) && lower(s.ch) <= 'f' {
+ d = uint32(lower(s.ch)) - 'a' + 10
+ }
+ if d >= base {
+ s.errorf("invalid character %q in %s escape", s.ch, baseName(int(base)))
+ return false
+ }
+ // d < base
+ x = x*base + d
+ s.nextch()
+ }
+
+ if x > max && base == 8 {
+ s.errorf("octal escape value %d > 255", x)
+ return false
+ }
+
+ if x > max || 0xD800 <= x && x < 0xE000 /* surrogate range */ {
+ s.errorf("escape is invalid Unicode code point %#U", x)
+ return false
+ }
+
+ return true
+}
diff --git a/src/cmd/compile/internal/syntax/scanner_test.go b/src/cmd/compile/internal/syntax/scanner_test.go
new file mode 100644
index 0000000..2deb3bb
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/scanner_test.go
@@ -0,0 +1,767 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package syntax
+
+import (
+ "bytes"
+ "fmt"
+ "os"
+ "strings"
+ "testing"
+)
+
+// errh is a default error handler for basic tests.
+func errh(line, col uint, msg string) {
+ panic(fmt.Sprintf("%d:%d: %s", line, col, msg))
+}
+
+// Don't bother with other tests if TestSmoke doesn't pass.
+func TestSmoke(t *testing.T) {
+ const src = "if (+foo\t+=..123/***/0.9_0e-0i'a'`raw`\"string\"..f;//$"
+ tokens := []token{_If, _Lparen, _Operator, _Name, _AssignOp, _Dot, _Literal, _Literal, _Literal, _Literal, _Literal, _Dot, _Dot, _Name, _Semi, _EOF}
+
+ var got scanner
+ got.init(strings.NewReader(src), errh, 0)
+ for _, want := range tokens {
+ got.next()
+ if got.tok != want {
+ t.Errorf("%d:%d: got %s; want %s", got.line, got.col, got.tok, want)
+ continue
+ }
+ }
+}
+
+// Once TestSmoke passes, run TestTokens next.
+func TestTokens(t *testing.T) {
+ var got scanner
+ for _, want := range sampleTokens {
+ got.init(strings.NewReader(want.src), func(line, col uint, msg string) {
+ t.Errorf("%s:%d:%d: %s", want.src, line, col, msg)
+ }, 0)
+ got.next()
+ if got.tok != want.tok {
+ t.Errorf("%s: got %s; want %s", want.src, got.tok, want.tok)
+ continue
+ }
+ if (got.tok == _Name || got.tok == _Literal) && got.lit != want.src {
+ t.Errorf("%s: got %q; want %q", want.src, got.lit, want.src)
+ }
+ }
+}
+
+func TestScanner(t *testing.T) {
+ if testing.Short() {
+ t.Skip("skipping test in short mode")
+ }
+
+ filename := *src_ // can be changed via -src flag
+ src, err := os.Open(filename)
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer src.Close()
+
+ var s scanner
+ s.init(src, errh, 0)
+ for {
+ s.next()
+ if s.tok == _EOF {
+ break
+ }
+ if !testing.Verbose() {
+ continue
+ }
+ switch s.tok {
+ case _Name, _Literal:
+ fmt.Printf("%s:%d:%d: %s => %s\n", filename, s.line, s.col, s.tok, s.lit)
+ case _Operator:
+ fmt.Printf("%s:%d:%d: %s => %s (prec = %d)\n", filename, s.line, s.col, s.tok, s.op, s.prec)
+ default:
+ fmt.Printf("%s:%d:%d: %s\n", filename, s.line, s.col, s.tok)
+ }
+ }
+}
+
+func TestEmbeddedTokens(t *testing.T) {
+ // make source
+ var buf bytes.Buffer
+ for i, s := range sampleTokens {
+ buf.WriteString("\t\t\t\t"[:i&3]) // leading indentation
+ buf.WriteString(s.src) // token
+ buf.WriteString(" "[:i&7]) // trailing spaces
+ buf.WriteString(fmt.Sprintf("/*line foo:%d */ // bar\n", i)) // comments + newline (don't crash w/o directive handler)
+ }
+
+ // scan source
+ var got scanner
+ var src string
+ got.init(&buf, func(line, col uint, msg string) {
+ t.Fatalf("%s:%d:%d: %s", src, line, col, msg)
+ }, 0)
+ got.next()
+ for i, want := range sampleTokens {
+ src = want.src
+ nlsemi := false
+
+ if got.line-linebase != uint(i) {
+ t.Errorf("%s: got line %d; want %d", src, got.line-linebase, i)
+ }
+
+ if got.tok != want.tok {
+ t.Errorf("%s: got tok %s; want %s", src, got.tok, want.tok)
+ continue
+ }
+
+ switch want.tok {
+ case _Semi:
+ if got.lit != "semicolon" {
+ t.Errorf("%s: got %s; want semicolon", src, got.lit)
+ }
+
+ case _Name, _Literal:
+ if got.lit != want.src {
+ t.Errorf("%s: got lit %q; want %q", src, got.lit, want.src)
+ continue
+ }
+ nlsemi = true
+
+ case _Operator, _AssignOp, _IncOp:
+ if got.op != want.op {
+ t.Errorf("%s: got op %s; want %s", src, got.op, want.op)
+ continue
+ }
+ if got.prec != want.prec {
+ t.Errorf("%s: got prec %d; want %d", src, got.prec, want.prec)
+ continue
+ }
+ nlsemi = want.tok == _IncOp
+
+ case _Rparen, _Rbrack, _Rbrace, _Break, _Continue, _Fallthrough, _Return:
+ nlsemi = true
+ }
+
+ if nlsemi {
+ got.next()
+ if got.tok != _Semi {
+ t.Errorf("%s: got tok %s; want ;", src, got.tok)
+ continue
+ }
+ if got.lit != "newline" {
+ t.Errorf("%s: got %s; want newline", src, got.lit)
+ }
+ }
+
+ got.next()
+ }
+
+ if got.tok != _EOF {
+ t.Errorf("got %q; want _EOF", got.tok)
+ }
+}
+
+var sampleTokens = [...]struct {
+ tok token
+ src string
+ op Operator
+ prec int
+}{
+ // name samples
+ {_Name, "x", 0, 0},
+ {_Name, "X123", 0, 0},
+ {_Name, "foo", 0, 0},
+ {_Name, "Foo123", 0, 0},
+ {_Name, "foo_bar", 0, 0},
+ {_Name, "_", 0, 0},
+ {_Name, "_foobar", 0, 0},
+ {_Name, "a۰۱۸", 0, 0},
+ {_Name, "foo६४", 0, 0},
+ {_Name, "bar9876", 0, 0},
+ {_Name, "ŝ", 0, 0},
+ {_Name, "ŝfoo", 0, 0},
+
+ // literal samples
+ {_Literal, "0", 0, 0},
+ {_Literal, "1", 0, 0},
+ {_Literal, "12345", 0, 0},
+ {_Literal, "123456789012345678890123456789012345678890", 0, 0},
+ {_Literal, "01234567", 0, 0},
+ {_Literal, "0_1_234_567", 0, 0},
+ {_Literal, "0X0", 0, 0},
+ {_Literal, "0xcafebabe", 0, 0},
+ {_Literal, "0x_cafe_babe", 0, 0},
+ {_Literal, "0O0", 0, 0},
+ {_Literal, "0o000", 0, 0},
+ {_Literal, "0o_000", 0, 0},
+ {_Literal, "0B1", 0, 0},
+ {_Literal, "0b01100110", 0, 0},
+ {_Literal, "0b_0110_0110", 0, 0},
+ {_Literal, "0.", 0, 0},
+ {_Literal, "0.e0", 0, 0},
+ {_Literal, "0.e-1", 0, 0},
+ {_Literal, "0.e+123", 0, 0},
+ {_Literal, ".0", 0, 0},
+ {_Literal, ".0E00", 0, 0},
+ {_Literal, ".0E-0123", 0, 0},
+ {_Literal, ".0E+12345678901234567890", 0, 0},
+ {_Literal, ".45e1", 0, 0},
+ {_Literal, "3.14159265", 0, 0},
+ {_Literal, "1e0", 0, 0},
+ {_Literal, "1e+100", 0, 0},
+ {_Literal, "1e-100", 0, 0},
+ {_Literal, "2.71828e-1000", 0, 0},
+ {_Literal, "0i", 0, 0},
+ {_Literal, "1i", 0, 0},
+ {_Literal, "012345678901234567889i", 0, 0},
+ {_Literal, "123456789012345678890i", 0, 0},
+ {_Literal, "0.i", 0, 0},
+ {_Literal, ".0i", 0, 0},
+ {_Literal, "3.14159265i", 0, 0},
+ {_Literal, "1e0i", 0, 0},
+ {_Literal, "1e+100i", 0, 0},
+ {_Literal, "1e-100i", 0, 0},
+ {_Literal, "2.71828e-1000i", 0, 0},
+ {_Literal, "'a'", 0, 0},
+ {_Literal, "'\\000'", 0, 0},
+ {_Literal, "'\\xFF'", 0, 0},
+ {_Literal, "'\\uff16'", 0, 0},
+ {_Literal, "'\\U0000ff16'", 0, 0},
+ {_Literal, "`foobar`", 0, 0},
+ {_Literal, "`foo\tbar`", 0, 0},
+ {_Literal, "`\r`", 0, 0},
+
+ // operators
+ {_Operator, "!", Not, 0},
+ {_Operator, "~", Tilde, 0},
+
+ {_Operator, "||", OrOr, precOrOr},
+
+ {_Operator, "&&", AndAnd, precAndAnd},
+
+ {_Operator, "==", Eql, precCmp},
+ {_Operator, "!=", Neq, precCmp},
+ {_Operator, "<", Lss, precCmp},
+ {_Operator, "<=", Leq, precCmp},
+ {_Operator, ">", Gtr, precCmp},
+ {_Operator, ">=", Geq, precCmp},
+
+ {_Operator, "+", Add, precAdd},
+ {_Operator, "-", Sub, precAdd},
+ {_Operator, "|", Or, precAdd},
+ {_Operator, "^", Xor, precAdd},
+
+ {_Star, "*", Mul, precMul},
+ {_Operator, "/", Div, precMul},
+ {_Operator, "%", Rem, precMul},
+ {_Operator, "&", And, precMul},
+ {_Operator, "&^", AndNot, precMul},
+ {_Operator, "<<", Shl, precMul},
+ {_Operator, ">>", Shr, precMul},
+
+ // assignment operations
+ {_AssignOp, "+=", Add, precAdd},
+ {_AssignOp, "-=", Sub, precAdd},
+ {_AssignOp, "|=", Or, precAdd},
+ {_AssignOp, "^=", Xor, precAdd},
+
+ {_AssignOp, "*=", Mul, precMul},
+ {_AssignOp, "/=", Div, precMul},
+ {_AssignOp, "%=", Rem, precMul},
+ {_AssignOp, "&=", And, precMul},
+ {_AssignOp, "&^=", AndNot, precMul},
+ {_AssignOp, "<<=", Shl, precMul},
+ {_AssignOp, ">>=", Shr, precMul},
+
+ // other operations
+ {_IncOp, "++", Add, precAdd},
+ {_IncOp, "--", Sub, precAdd},
+ {_Assign, "=", 0, 0},
+ {_Define, ":=", 0, 0},
+ {_Arrow, "<-", 0, 0},
+
+ // delimiters
+ {_Lparen, "(", 0, 0},
+ {_Lbrack, "[", 0, 0},
+ {_Lbrace, "{", 0, 0},
+ {_Rparen, ")", 0, 0},
+ {_Rbrack, "]", 0, 0},
+ {_Rbrace, "}", 0, 0},
+ {_Comma, ",", 0, 0},
+ {_Semi, ";", 0, 0},
+ {_Colon, ":", 0, 0},
+ {_Dot, ".", 0, 0},
+ {_DotDotDot, "...", 0, 0},
+
+ // keywords
+ {_Break, "break", 0, 0},
+ {_Case, "case", 0, 0},
+ {_Chan, "chan", 0, 0},
+ {_Const, "const", 0, 0},
+ {_Continue, "continue", 0, 0},
+ {_Default, "default", 0, 0},
+ {_Defer, "defer", 0, 0},
+ {_Else, "else", 0, 0},
+ {_Fallthrough, "fallthrough", 0, 0},
+ {_For, "for", 0, 0},
+ {_Func, "func", 0, 0},
+ {_Go, "go", 0, 0},
+ {_Goto, "goto", 0, 0},
+ {_If, "if", 0, 0},
+ {_Import, "import", 0, 0},
+ {_Interface, "interface", 0, 0},
+ {_Map, "map", 0, 0},
+ {_Package, "package", 0, 0},
+ {_Range, "range", 0, 0},
+ {_Return, "return", 0, 0},
+ {_Select, "select", 0, 0},
+ {_Struct, "struct", 0, 0},
+ {_Switch, "switch", 0, 0},
+ {_Type, "type", 0, 0},
+ {_Var, "var", 0, 0},
+}
+
+func TestComments(t *testing.T) {
+ type comment struct {
+ line, col uint // 0-based
+ text string
+ }
+
+ for _, test := range []struct {
+ src string
+ want comment
+ }{
+ // no comments
+ {"no comment here", comment{0, 0, ""}},
+ {" /", comment{0, 0, ""}},
+ {"\n /*/", comment{0, 0, ""}},
+
+ //-style comments
+ {"// line comment\n", comment{0, 0, "// line comment"}},
+ {"package p // line comment\n", comment{0, 10, "// line comment"}},
+ {"//\n//\n\t// want this one\r\n", comment{2, 1, "// want this one\r"}},
+ {"\n\n//\n", comment{2, 0, "//"}},
+ {"//", comment{0, 0, "//"}},
+
+ /*-style comments */
+ {"123/* regular comment */", comment{0, 3, "/* regular comment */"}},
+ {"package p /* regular comment", comment{0, 0, ""}},
+ {"\n\n\n/*\n*//* want this one */", comment{4, 2, "/* want this one */"}},
+ {"\n\n/**/", comment{2, 0, "/**/"}},
+ {"/*", comment{0, 0, ""}},
+ } {
+ var s scanner
+ var got comment
+ s.init(strings.NewReader(test.src), func(line, col uint, msg string) {
+ if msg[0] != '/' {
+ // error
+ if msg != "comment not terminated" {
+ t.Errorf("%q: %s", test.src, msg)
+ }
+ return
+ }
+ got = comment{line - linebase, col - colbase, msg} // keep last one
+ }, comments)
+
+ for {
+ s.next()
+ if s.tok == _EOF {
+ break
+ }
+ }
+
+ want := test.want
+ if got.line != want.line || got.col != want.col {
+ t.Errorf("%q: got position %d:%d; want %d:%d", test.src, got.line, got.col, want.line, want.col)
+ }
+ if got.text != want.text {
+ t.Errorf("%q: got %q; want %q", test.src, got.text, want.text)
+ }
+ }
+}
+
+func TestNumbers(t *testing.T) {
+ for _, test := range []struct {
+ kind LitKind
+ src, tokens, err string
+ }{
+ // binaries
+ {IntLit, "0b0", "0b0", ""},
+ {IntLit, "0b1010", "0b1010", ""},
+ {IntLit, "0B1110", "0B1110", ""},
+
+ {IntLit, "0b", "0b", "binary literal has no digits"},
+ {IntLit, "0b0190", "0b0190", "invalid digit '9' in binary literal"},
+ {IntLit, "0b01a0", "0b01 a0", ""}, // only accept 0-9
+
+ {FloatLit, "0b.", "0b.", "invalid radix point in binary literal"},
+ {FloatLit, "0b.1", "0b.1", "invalid radix point in binary literal"},
+ {FloatLit, "0b1.0", "0b1.0", "invalid radix point in binary literal"},
+ {FloatLit, "0b1e10", "0b1e10", "'e' exponent requires decimal mantissa"},
+ {FloatLit, "0b1P-1", "0b1P-1", "'P' exponent requires hexadecimal mantissa"},
+
+ {ImagLit, "0b10i", "0b10i", ""},
+ {ImagLit, "0b10.0i", "0b10.0i", "invalid radix point in binary literal"},
+
+ // octals
+ {IntLit, "0o0", "0o0", ""},
+ {IntLit, "0o1234", "0o1234", ""},
+ {IntLit, "0O1234", "0O1234", ""},
+
+ {IntLit, "0o", "0o", "octal literal has no digits"},
+ {IntLit, "0o8123", "0o8123", "invalid digit '8' in octal literal"},
+ {IntLit, "0o1293", "0o1293", "invalid digit '9' in octal literal"},
+ {IntLit, "0o12a3", "0o12 a3", ""}, // only accept 0-9
+
+ {FloatLit, "0o.", "0o.", "invalid radix point in octal literal"},
+ {FloatLit, "0o.2", "0o.2", "invalid radix point in octal literal"},
+ {FloatLit, "0o1.2", "0o1.2", "invalid radix point in octal literal"},
+ {FloatLit, "0o1E+2", "0o1E+2", "'E' exponent requires decimal mantissa"},
+ {FloatLit, "0o1p10", "0o1p10", "'p' exponent requires hexadecimal mantissa"},
+
+ {ImagLit, "0o10i", "0o10i", ""},
+ {ImagLit, "0o10e0i", "0o10e0i", "'e' exponent requires decimal mantissa"},
+
+ // 0-octals
+ {IntLit, "0", "0", ""},
+ {IntLit, "0123", "0123", ""},
+
+ {IntLit, "08123", "08123", "invalid digit '8' in octal literal"},
+ {IntLit, "01293", "01293", "invalid digit '9' in octal literal"},
+ {IntLit, "0F.", "0 F .", ""}, // only accept 0-9
+ {IntLit, "0123F.", "0123 F .", ""},
+ {IntLit, "0123456x", "0123456 x", ""},
+
+ // decimals
+ {IntLit, "1", "1", ""},
+ {IntLit, "1234", "1234", ""},
+
+ {IntLit, "1f", "1 f", ""}, // only accept 0-9
+
+ {ImagLit, "0i", "0i", ""},
+ {ImagLit, "0678i", "0678i", ""},
+
+ // decimal floats
+ {FloatLit, "0.", "0.", ""},
+ {FloatLit, "123.", "123.", ""},
+ {FloatLit, "0123.", "0123.", ""},
+
+ {FloatLit, ".0", ".0", ""},
+ {FloatLit, ".123", ".123", ""},
+ {FloatLit, ".0123", ".0123", ""},
+
+ {FloatLit, "0.0", "0.0", ""},
+ {FloatLit, "123.123", "123.123", ""},
+ {FloatLit, "0123.0123", "0123.0123", ""},
+
+ {FloatLit, "0e0", "0e0", ""},
+ {FloatLit, "123e+0", "123e+0", ""},
+ {FloatLit, "0123E-1", "0123E-1", ""},
+
+ {FloatLit, "0.e+1", "0.e+1", ""},
+ {FloatLit, "123.E-10", "123.E-10", ""},
+ {FloatLit, "0123.e123", "0123.e123", ""},
+
+ {FloatLit, ".0e-1", ".0e-1", ""},
+ {FloatLit, ".123E+10", ".123E+10", ""},
+ {FloatLit, ".0123E123", ".0123E123", ""},
+
+ {FloatLit, "0.0e1", "0.0e1", ""},
+ {FloatLit, "123.123E-10", "123.123E-10", ""},
+ {FloatLit, "0123.0123e+456", "0123.0123e+456", ""},
+
+ {FloatLit, "0e", "0e", "exponent has no digits"},
+ {FloatLit, "0E+", "0E+", "exponent has no digits"},
+ {FloatLit, "1e+f", "1e+ f", "exponent has no digits"},
+ {FloatLit, "0p0", "0p0", "'p' exponent requires hexadecimal mantissa"},
+ {FloatLit, "1.0P-1", "1.0P-1", "'P' exponent requires hexadecimal mantissa"},
+
+ {ImagLit, "0.i", "0.i", ""},
+ {ImagLit, ".123i", ".123i", ""},
+ {ImagLit, "123.123i", "123.123i", ""},
+ {ImagLit, "123e+0i", "123e+0i", ""},
+ {ImagLit, "123.E-10i", "123.E-10i", ""},
+ {ImagLit, ".123E+10i", ".123E+10i", ""},
+
+ // hexadecimals
+ {IntLit, "0x0", "0x0", ""},
+ {IntLit, "0x1234", "0x1234", ""},
+ {IntLit, "0xcafef00d", "0xcafef00d", ""},
+ {IntLit, "0XCAFEF00D", "0XCAFEF00D", ""},
+
+ {IntLit, "0x", "0x", "hexadecimal literal has no digits"},
+ {IntLit, "0x1g", "0x1 g", ""},
+
+ {ImagLit, "0xf00i", "0xf00i", ""},
+
+ // hexadecimal floats
+ {FloatLit, "0x0p0", "0x0p0", ""},
+ {FloatLit, "0x12efp-123", "0x12efp-123", ""},
+ {FloatLit, "0xABCD.p+0", "0xABCD.p+0", ""},
+ {FloatLit, "0x.0189P-0", "0x.0189P-0", ""},
+ {FloatLit, "0x1.ffffp+1023", "0x1.ffffp+1023", ""},
+
+ {FloatLit, "0x.", "0x.", "hexadecimal literal has no digits"},
+ {FloatLit, "0x0.", "0x0.", "hexadecimal mantissa requires a 'p' exponent"},
+ {FloatLit, "0x.0", "0x.0", "hexadecimal mantissa requires a 'p' exponent"},
+ {FloatLit, "0x1.1", "0x1.1", "hexadecimal mantissa requires a 'p' exponent"},
+ {FloatLit, "0x1.1e0", "0x1.1e0", "hexadecimal mantissa requires a 'p' exponent"},
+ {FloatLit, "0x1.2gp1a", "0x1.2 gp1a", "hexadecimal mantissa requires a 'p' exponent"},
+ {FloatLit, "0x0p", "0x0p", "exponent has no digits"},
+ {FloatLit, "0xeP-", "0xeP-", "exponent has no digits"},
+ {FloatLit, "0x1234PAB", "0x1234P AB", "exponent has no digits"},
+ {FloatLit, "0x1.2p1a", "0x1.2p1 a", ""},
+
+ {ImagLit, "0xf00.bap+12i", "0xf00.bap+12i", ""},
+
+ // separators
+ {IntLit, "0b_1000_0001", "0b_1000_0001", ""},
+ {IntLit, "0o_600", "0o_600", ""},
+ {IntLit, "0_466", "0_466", ""},
+ {IntLit, "1_000", "1_000", ""},
+ {FloatLit, "1_000.000_1", "1_000.000_1", ""},
+ {ImagLit, "10e+1_2_3i", "10e+1_2_3i", ""},
+ {IntLit, "0x_f00d", "0x_f00d", ""},
+ {FloatLit, "0x_f00d.0p1_2", "0x_f00d.0p1_2", ""},
+
+ {IntLit, "0b__1000", "0b__1000", "'_' must separate successive digits"},
+ {IntLit, "0o60___0", "0o60___0", "'_' must separate successive digits"},
+ {IntLit, "0466_", "0466_", "'_' must separate successive digits"},
+ {FloatLit, "1_.", "1_.", "'_' must separate successive digits"},
+ {FloatLit, "0._1", "0._1", "'_' must separate successive digits"},
+ {FloatLit, "2.7_e0", "2.7_e0", "'_' must separate successive digits"},
+ {ImagLit, "10e+12_i", "10e+12_i", "'_' must separate successive digits"},
+ {IntLit, "0x___0", "0x___0", "'_' must separate successive digits"},
+ {FloatLit, "0x1.0_p0", "0x1.0_p0", "'_' must separate successive digits"},
+ } {
+ var s scanner
+ var err string
+ s.init(strings.NewReader(test.src), func(_, _ uint, msg string) {
+ if err == "" {
+ err = msg
+ }
+ }, 0)
+
+ for i, want := range strings.Split(test.tokens, " ") {
+ err = ""
+ s.next()
+
+ if err != "" && !s.bad {
+ t.Errorf("%q: got error but bad not set", test.src)
+ }
+
+ // compute lit where s.lit is not defined
+ var lit string
+ switch s.tok {
+ case _Name, _Literal:
+ lit = s.lit
+ case _Dot:
+ lit = "."
+ }
+
+ if i == 0 {
+ if s.tok != _Literal || s.kind != test.kind {
+ t.Errorf("%q: got token %s (kind = %d); want literal (kind = %d)", test.src, s.tok, s.kind, test.kind)
+ }
+ if err != test.err {
+ t.Errorf("%q: got error %q; want %q", test.src, err, test.err)
+ }
+ }
+
+ if lit != want {
+ t.Errorf("%q: got literal %q (%s); want %s", test.src, lit, s.tok, want)
+ }
+ }
+
+ // make sure we read all
+ s.next()
+ if s.tok == _Semi {
+ s.next()
+ }
+ if s.tok != _EOF {
+ t.Errorf("%q: got %s; want EOF", test.src, s.tok)
+ }
+ }
+}
+
+func TestScanErrors(t *testing.T) {
+ for _, test := range []struct {
+ src, err string
+ line, col uint // 0-based
+ }{
+ // Note: Positions for lexical errors are the earliest position
+ // where the error is apparent, not the beginning of the respective
+ // token.
+
+ // rune-level errors
+ {"fo\x00o", "invalid NUL character", 0, 2},
+ {"foo\n\ufeff bar", "invalid BOM in the middle of the file", 1, 0},
+ {"foo\n\n\xff ", "invalid UTF-8 encoding", 2, 0},
+
+ // token-level errors
+ {"\u00BD" /* ½ */, "invalid character U+00BD '½' in identifier", 0, 0},
+ {"\U0001d736\U0001d737\U0001d738_½" /* 𝜶𝜷𝜸_½ */, "invalid character U+00BD '½' in identifier", 0, 13 /* byte offset */},
+ {"\U0001d7d8" /* 𝟘 */, "identifier cannot begin with digit U+1D7D8 '𝟘'", 0, 0},
+ {"foo\U0001d7d8_½" /* foo𝟘_½ */, "invalid character U+00BD '½' in identifier", 0, 8 /* byte offset */},
+
+ {"x + #y", "invalid character U+0023 '#'", 0, 4},
+ {"foo$bar = 0", "invalid character U+0024 '$'", 0, 3},
+ {"0123456789", "invalid digit '8' in octal literal", 0, 8},
+ {"0123456789. /* foobar", "comment not terminated", 0, 12}, // valid float constant
+ {"0123456789e0 /*\nfoobar", "comment not terminated", 0, 13}, // valid float constant
+ {"var a, b = 09, 07\n", "invalid digit '9' in octal literal", 0, 12},
+
+ {`''`, "empty rune literal or unescaped '", 0, 1},
+ {"'\n", "newline in rune literal", 0, 1},
+ {`'\`, "rune literal not terminated", 0, 0},
+ {`'\'`, "rune literal not terminated", 0, 0},
+ {`'\x`, "rune literal not terminated", 0, 0},
+ {`'\x'`, "invalid character '\\'' in hexadecimal escape", 0, 3},
+ {`'\y'`, "unknown escape", 0, 2},
+ {`'\x0'`, "invalid character '\\'' in hexadecimal escape", 0, 4},
+ {`'\00'`, "invalid character '\\'' in octal escape", 0, 4},
+ {`'\377' /*`, "comment not terminated", 0, 7}, // valid octal escape
+ {`'\378`, "invalid character '8' in octal escape", 0, 4},
+ {`'\400'`, "octal escape value 256 > 255", 0, 5},
+ {`'xx`, "rune literal not terminated", 0, 0},
+ {`'xx'`, "more than one character in rune literal", 0, 0},
+
+ {"\n \"foo\n", "newline in string", 1, 7},
+ {`"`, "string not terminated", 0, 0},
+ {`"foo`, "string not terminated", 0, 0},
+ {"`", "string not terminated", 0, 0},
+ {"`foo", "string not terminated", 0, 0},
+ {"/*/", "comment not terminated", 0, 0},
+ {"/*\n\nfoo", "comment not terminated", 0, 0},
+ {`"\`, "string not terminated", 0, 0},
+ {`"\"`, "string not terminated", 0, 0},
+ {`"\x`, "string not terminated", 0, 0},
+ {`"\x"`, "invalid character '\"' in hexadecimal escape", 0, 3},
+ {`"\y"`, "unknown escape", 0, 2},
+ {`"\x0"`, "invalid character '\"' in hexadecimal escape", 0, 4},
+ {`"\00"`, "invalid character '\"' in octal escape", 0, 4},
+ {`"\377" /*`, "comment not terminated", 0, 7}, // valid octal escape
+ {`"\378"`, "invalid character '8' in octal escape", 0, 4},
+ {`"\400"`, "octal escape value 256 > 255", 0, 5},
+
+ {`s := "foo\z"`, "unknown escape", 0, 10},
+ {`s := "foo\z00\nbar"`, "unknown escape", 0, 10},
+ {`"\x`, "string not terminated", 0, 0},
+ {`"\x"`, "invalid character '\"' in hexadecimal escape", 0, 3},
+ {`var s string = "\x"`, "invalid character '\"' in hexadecimal escape", 0, 18},
+ {`return "\Uffffffff"`, "escape is invalid Unicode code point U+FFFFFFFF", 0, 18},
+
+ {"0b.0", "invalid radix point in binary literal", 0, 2},
+ {"0x.p0\n", "hexadecimal literal has no digits", 0, 3},
+
+ // former problem cases
+ {"package p\n\n\xef", "invalid UTF-8 encoding", 2, 0},
+ } {
+ var s scanner
+ var line, col uint
+ var err string
+ s.init(strings.NewReader(test.src), func(l, c uint, msg string) {
+ if err == "" {
+ line, col = l-linebase, c-colbase
+ err = msg
+ }
+ }, 0)
+
+ for {
+ s.next()
+ if s.tok == _EOF {
+ break
+ }
+ }
+
+ if err != "" {
+ if err != test.err {
+ t.Errorf("%q: got err = %q; want %q", test.src, err, test.err)
+ }
+ if line != test.line {
+ t.Errorf("%q: got line = %d; want %d", test.src, line, test.line)
+ }
+ if col != test.col {
+ t.Errorf("%q: got col = %d; want %d", test.src, col, test.col)
+ }
+ } else {
+ t.Errorf("%q: got no error; want %q", test.src, test.err)
+ }
+ }
+}
+
+func TestDirectives(t *testing.T) {
+ for _, src := range []string{
+ "line",
+ "// line",
+ "//line",
+ "//line foo",
+ "//line foo%bar",
+
+ "go",
+ "// go:",
+ "//go:",
+ "//go :foo",
+ "//go:foo",
+ "//go:foo%bar",
+ } {
+ got := ""
+ var s scanner
+ s.init(strings.NewReader(src), func(_, col uint, msg string) {
+ if col != colbase {
+ t.Errorf("%s: got col = %d; want %d", src, col, colbase)
+ }
+ if msg == "" {
+ t.Errorf("%s: handler called with empty msg", src)
+ }
+ got = msg
+ }, directives)
+
+ s.next()
+ if strings.HasPrefix(src, "//line ") || strings.HasPrefix(src, "//go:") {
+ // handler should have been called
+ if got != src {
+ t.Errorf("got %s; want %s", got, src)
+ }
+ } else {
+ // handler should not have been called
+ if got != "" {
+ t.Errorf("got %s for %s", got, src)
+ }
+ }
+ }
+}
+
+func TestIssue21938(t *testing.T) {
+ s := "/*" + strings.Repeat(" ", 4089) + "*/ .5"
+
+ var got scanner
+ got.init(strings.NewReader(s), errh, 0)
+ got.next()
+
+ if got.tok != _Literal || got.lit != ".5" {
+ t.Errorf("got %s %q; want %s %q", got.tok, got.lit, _Literal, ".5")
+ }
+}
+
+func TestIssue33961(t *testing.T) {
+ literals := `08__ 0b.p 0b_._p 0x.e 0x.p`
+ for _, lit := range strings.Split(literals, " ") {
+ n := 0
+ var got scanner
+ got.init(strings.NewReader(lit), func(_, _ uint, msg string) {
+ // fmt.Printf("%s: %s\n", lit, msg) // uncomment for debugging
+ n++
+ }, 0)
+ got.next()
+
+ if n != 1 {
+ t.Errorf("%q: got %d errors; want 1", lit, n)
+ continue
+ }
+
+ if !got.bad {
+ t.Errorf("%q: got error but bad not set", lit)
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/syntax/source.go b/src/cmd/compile/internal/syntax/source.go
new file mode 100644
index 0000000..01b5921
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/source.go
@@ -0,0 +1,218 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements source, a buffered rune reader
+// specialized for scanning Go code: Reading
+// ASCII characters, maintaining current (line, col)
+// position information, and recording of the most
+// recently read source segment are highly optimized.
+// This file is self-contained (go tool compile source.go
+// compiles) and thus could be made into its own package.
+
+package syntax
+
+import (
+ "io"
+ "unicode/utf8"
+)
+
+// The source buffer is accessed using three indices b (begin),
+// r (read), and e (end):
+//
+// - If b >= 0, it points to the beginning of a segment of most
+// recently read characters (typically a Go literal).
+//
+// - r points to the byte immediately following the most recently
+// read character ch, which starts at r-chw.
+//
+// - e points to the byte immediately following the last byte that
+// was read into the buffer.
+//
+// The buffer content is terminated at buf[e] with the sentinel
+// character utf8.RuneSelf. This makes it possible to test for
+// the common case of ASCII characters with a single 'if' (see
+// nextch method).
+//
+// +------ content in use -------+
+// v v
+// buf [...read...|...segment...|ch|...unread...|s|...free...]
+// ^ ^ ^ ^
+// | | | |
+// b r-chw r e
+//
+// Invariant: -1 <= b < r <= e < len(buf) && buf[e] == sentinel
+
+type source struct {
+ in io.Reader
+ errh func(line, col uint, msg string)
+
+ buf []byte // source buffer
+ ioerr error // pending I/O error, or nil
+ b, r, e int // buffer indices (see comment above)
+ line, col uint // source position of ch (0-based)
+ ch rune // most recently read character
+ chw int // width of ch
+}
+
+const sentinel = utf8.RuneSelf
+
+func (s *source) init(in io.Reader, errh func(line, col uint, msg string)) {
+ s.in = in
+ s.errh = errh
+
+ if s.buf == nil {
+ s.buf = make([]byte, nextSize(0))
+ }
+ s.buf[0] = sentinel
+ s.ioerr = nil
+ s.b, s.r, s.e = -1, 0, 0
+ s.line, s.col = 0, 0
+ s.ch = ' '
+ s.chw = 0
+}
+
+// starting points for line and column numbers
+const linebase = 1
+const colbase = 1
+
+// pos returns the (line, col) source position of s.ch.
+func (s *source) pos() (line, col uint) {
+ return linebase + s.line, colbase + s.col
+}
+
+// error reports the error msg at source position s.pos().
+func (s *source) error(msg string) {
+ line, col := s.pos()
+ s.errh(line, col, msg)
+}
+
+// start starts a new active source segment (including s.ch).
+// As long as stop has not been called, the active segment's
+// bytes (excluding s.ch) may be retrieved by calling segment.
+func (s *source) start() { s.b = s.r - s.chw }
+func (s *source) stop() { s.b = -1 }
+func (s *source) segment() []byte { return s.buf[s.b : s.r-s.chw] }
+
+// rewind rewinds the scanner's read position and character s.ch
+// to the start of the currently active segment, which must not
+// contain any newlines (otherwise position information will be
+// incorrect). Currently, rewind is only needed for handling the
+// source sequence ".."; it must not be called outside an active
+// segment.
+func (s *source) rewind() {
+ // ok to verify precondition - rewind is rarely called
+ if s.b < 0 {
+ panic("no active segment")
+ }
+ s.col -= uint(s.r - s.b)
+ s.r = s.b
+ s.nextch()
+}
+
+func (s *source) nextch() {
+redo:
+ s.col += uint(s.chw)
+ if s.ch == '\n' {
+ s.line++
+ s.col = 0
+ }
+
+ // fast common case: at least one ASCII character
+ if s.ch = rune(s.buf[s.r]); s.ch < sentinel {
+ s.r++
+ s.chw = 1
+ if s.ch == 0 {
+ s.error("invalid NUL character")
+ goto redo
+ }
+ return
+ }
+
+ // slower general case: add more bytes to buffer if we don't have a full rune
+ for s.e-s.r < utf8.UTFMax && !utf8.FullRune(s.buf[s.r:s.e]) && s.ioerr == nil {
+ s.fill()
+ }
+
+ // EOF
+ if s.r == s.e {
+ if s.ioerr != io.EOF {
+ // ensure we never start with a '/' (e.g., rooted path) in the error message
+ s.error("I/O error: " + s.ioerr.Error())
+ s.ioerr = nil
+ }
+ s.ch = -1
+ s.chw = 0
+ return
+ }
+
+ s.ch, s.chw = utf8.DecodeRune(s.buf[s.r:s.e])
+ s.r += s.chw
+
+ if s.ch == utf8.RuneError && s.chw == 1 {
+ s.error("invalid UTF-8 encoding")
+ goto redo
+ }
+
+ // BOM's are only allowed as the first character in a file
+ const BOM = 0xfeff
+ if s.ch == BOM {
+ if s.line > 0 || s.col > 0 {
+ s.error("invalid BOM in the middle of the file")
+ }
+ goto redo
+ }
+}
+
+// fill reads more source bytes into s.buf.
+// It returns with at least one more byte in the buffer, or with s.ioerr != nil.
+func (s *source) fill() {
+ // determine content to preserve
+ b := s.r
+ if s.b >= 0 {
+ b = s.b
+ s.b = 0 // after buffer has grown or content has been moved down
+ }
+ content := s.buf[b:s.e]
+
+ // grow buffer or move content down
+ if len(content)*2 > len(s.buf) {
+ s.buf = make([]byte, nextSize(len(s.buf)))
+ copy(s.buf, content)
+ } else if b > 0 {
+ copy(s.buf, content)
+ }
+ s.r -= b
+ s.e -= b
+
+ // read more data: try a limited number of times
+ for i := 0; i < 10; i++ {
+ var n int
+ n, s.ioerr = s.in.Read(s.buf[s.e : len(s.buf)-1]) // -1 to leave space for sentinel
+ if n < 0 {
+ panic("negative read") // incorrect underlying io.Reader implementation
+ }
+ if n > 0 || s.ioerr != nil {
+ s.e += n
+ s.buf[s.e] = sentinel
+ return
+ }
+ // n == 0
+ }
+
+ s.buf[s.e] = sentinel
+ s.ioerr = io.ErrNoProgress
+}
+
+// nextSize returns the next bigger size for a buffer of a given size.
+func nextSize(size int) int {
+ const min = 4 << 10 // 4K: minimum buffer size
+ const max = 1 << 20 // 1M: maximum buffer size which is still doubled
+ if size < min {
+ return min
+ }
+ if size <= max {
+ return size << 1
+ }
+ return size + max
+}
diff --git a/src/cmd/compile/internal/syntax/syntax.go b/src/cmd/compile/internal/syntax/syntax.go
new file mode 100644
index 0000000..25c8116
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/syntax.go
@@ -0,0 +1,97 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package syntax
+
+import (
+ "fmt"
+ "io"
+ "os"
+)
+
+// Mode describes the parser mode.
+type Mode uint
+
+// Modes supported by the parser.
+const (
+ CheckBranches Mode = 1 << iota // check correct use of labels, break, continue, and goto statements
+ AllowGenerics
+ AllowMethodTypeParams // does not support interface methods yet; ignored if AllowGenerics is not set
+)
+
+// Error describes a syntax error. Error implements the error interface.
+type Error struct {
+ Pos Pos
+ Msg string
+}
+
+func (err Error) Error() string {
+ return fmt.Sprintf("%s: %s", err.Pos, err.Msg)
+}
+
+var _ error = Error{} // verify that Error implements error
+
+// An ErrorHandler is called for each error encountered reading a .go file.
+type ErrorHandler func(err error)
+
+// A Pragma value augments a package, import, const, func, type, or var declaration.
+// Its meaning is entirely up to the PragmaHandler,
+// except that nil is used to mean “no pragma seen.”
+type Pragma interface{}
+
+// A PragmaHandler is used to process //go: directives while scanning.
+// It is passed the current pragma value, which starts out being nil,
+// and it returns an updated pragma value.
+// The text is the directive, with the "//" prefix stripped.
+// The current pragma is saved at each package, import, const, func, type, or var
+// declaration, into the File, ImportDecl, ConstDecl, FuncDecl, TypeDecl, or VarDecl node.
+//
+// If text is the empty string, the pragma is being returned
+// to the handler unused, meaning it appeared before a non-declaration.
+// The handler may wish to report an error. In this case, pos is the
+// current parser position, not the position of the pragma itself.
+// Blank specifies whether the line is blank before the pragma.
+type PragmaHandler func(pos Pos, blank bool, text string, current Pragma) Pragma
+
+// Parse parses a single Go source file from src and returns the corresponding
+// syntax tree. If there are errors, Parse will return the first error found,
+// and a possibly partially constructed syntax tree, or nil.
+//
+// If errh != nil, it is called with each error encountered, and Parse will
+// process as much source as possible. In this case, the returned syntax tree
+// is only nil if no correct package clause was found.
+// If errh is nil, Parse will terminate immediately upon encountering the first
+// error, and the returned syntax tree is nil.
+//
+// If pragh != nil, it is called with each pragma encountered.
+//
+func Parse(base *PosBase, src io.Reader, errh ErrorHandler, pragh PragmaHandler, mode Mode) (_ *File, first error) {
+ defer func() {
+ if p := recover(); p != nil {
+ if err, ok := p.(Error); ok {
+ first = err
+ return
+ }
+ panic(p)
+ }
+ }()
+
+ var p parser
+ p.init(base, src, errh, pragh, mode)
+ p.next()
+ return p.fileOrNil(), p.first
+}
+
+// ParseFile behaves like Parse but it reads the source from the named file.
+func ParseFile(filename string, errh ErrorHandler, pragh PragmaHandler, mode Mode) (*File, error) {
+ f, err := os.Open(filename)
+ if err != nil {
+ if errh != nil {
+ errh(err)
+ }
+ return nil, err
+ }
+ defer f.Close()
+ return Parse(NewFileBase(filename), f, errh, pragh, mode)
+}
diff --git a/src/cmd/compile/internal/syntax/testdata/go2/chans.go2 b/src/cmd/compile/internal/syntax/testdata/go2/chans.go2
new file mode 100644
index 0000000..fad2bce
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/testdata/go2/chans.go2
@@ -0,0 +1,62 @@
+package chans
+
+import "runtime"
+
+// Ranger returns a Sender and a Receiver. The Receiver provides a
+// Next method to retrieve values. The Sender provides a Send method
+// to send values and a Close method to stop sending values. The Next
+// method indicates when the Sender has been closed, and the Send
+// method indicates when the Receiver has been freed.
+//
+// This is a convenient way to exit a goroutine sending values when
+// the receiver stops reading them.
+func Ranger[T any]() (*Sender[T], *Receiver[T]) {
+ c := make(chan T)
+ d := make(chan bool)
+ s := &Sender[T]{values: c, done: d}
+ r := &Receiver[T]{values: c, done: d}
+ runtime.SetFinalizer(r, r.finalize)
+ return s, r
+}
+
+// A sender is used to send values to a Receiver.
+type Sender[T any] struct {
+ values chan<- T
+ done <-chan bool
+}
+
+// Send sends a value to the receiver. It returns whether any more
+// values may be sent; if it returns false the value was not sent.
+func (s *Sender[T]) Send(v T) bool {
+ select {
+ case s.values <- v:
+ return true
+ case <-s.done:
+ return false
+ }
+}
+
+// Close tells the receiver that no more values will arrive.
+// After Close is called, the Sender may no longer be used.
+func (s *Sender[T]) Close() {
+ close(s.values)
+}
+
+// A Receiver receives values from a Sender.
+type Receiver[T any] struct {
+ values <-chan T
+ done chan<- bool
+}
+
+// Next returns the next value from the channel. The bool result
+// indicates whether the value is valid, or whether the Sender has
+// been closed and no more values will be received.
+func (r *Receiver[T]) Next() (T, bool) {
+ v, ok := <-r.values
+ return v, ok
+}
+
+// finalize is a finalizer for the receiver.
+func (r *Receiver[T]) finalize() {
+ close(r.done)
+}
diff --git a/src/cmd/compile/internal/syntax/testdata/go2/linalg.go2 b/src/cmd/compile/internal/syntax/testdata/go2/linalg.go2
new file mode 100644
index 0000000..822d028
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/testdata/go2/linalg.go2
@@ -0,0 +1,83 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package linalg
+
+import "math"
+
+// Numeric is type bound that matches any numeric type.
+// It would likely be in a constraints package in the standard library.
+type Numeric interface {
+ ~int | ~int8 | ~int16 | ~int32 | ~int64 |
+ uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr |
+ float32 | ~float64 |
+ complex64 | ~complex128
+}
+
+func DotProduct[T Numeric](s1, s2 []T) T {
+ if len(s1) != len(s2) {
+ panic("DotProduct: slices of unequal length")
+ }
+ var r T
+ for i := range s1 {
+ r += s1[i] * s2[i]
+ }
+ return r
+}
+
+// NumericAbs matches numeric types with an Abs method.
+type NumericAbs[T any] interface {
+ Numeric
+
+ Abs() T
+}
+
+// AbsDifference computes the absolute value of the difference of
+// a and b, where the absolute value is determined by the Abs method.
+func AbsDifference[T NumericAbs[T]](a, b T) T {
+ d := a - b
+ return d.Abs()
+}
+
+// OrderedNumeric is a type bound that matches numeric types that support the < operator.
+type OrderedNumeric interface {
+ ~int | ~int8 | ~int16 | ~int32 | ~int64 |
+ uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr |
+ float32 | ~float64
+}
+
+// Complex is a type bound that matches the two complex types, which do not have a < operator.
+type Complex interface {
+ ~complex64 | ~complex128
+}
+
+// OrderedAbs is a helper type that defines an Abs method for
+// ordered numeric types.
+type OrderedAbs[T OrderedNumeric] T
+
+func (a OrderedAbs[T]) Abs() OrderedAbs[T] {
+ if a < 0 {
+ return -a
+ }
+ return a
+}
+
+// ComplexAbs is a helper type that defines an Abs method for
+// complex types.
+type ComplexAbs[T Complex] T
+
+func (a ComplexAbs[T]) Abs() ComplexAbs[T] {
+ r := float64(real(a))
+ i := float64(imag(a))
+ d := math.Sqrt(r * r + i * i)
+ return ComplexAbs[T](complex(d, 0))
+}
+
+func OrderedAbsDifference[T OrderedNumeric](a, b T) T {
+ return T(AbsDifference(OrderedAbs[T](a), OrderedAbs[T](b)))
+}
+
+func ComplexAbsDifference[T Complex](a, b T) T {
+ return T(AbsDifference(ComplexAbs[T](a), ComplexAbs[T](b)))
+}
diff --git a/src/cmd/compile/internal/syntax/testdata/go2/map.go2 b/src/cmd/compile/internal/syntax/testdata/go2/map.go2
new file mode 100644
index 0000000..814d953
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/testdata/go2/map.go2
@@ -0,0 +1,113 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package orderedmap provides an ordered map, implemented as a binary tree.
+package orderedmap
+
+// TODO(gri) fix imports for tests
+import "chans" // ERROR could not import
+
+// Map is an ordered map.
+type Map[K, V any] struct {
+ root *node[K, V]
+ compare func(K, K) int
+}
+
+// node is the type of a node in the binary tree.
+type node[K, V any] struct {
+ key K
+ val V
+ left, right *node[K, V]
+}
+
+// New returns a new map.
+func New[K, V any](compare func(K, K) int) *Map[K, V] {
+ return &Map[K, V]{compare: compare}
+}
+
+// find looks up key in the map, and returns either a pointer
+// to the node holding key, or a pointer to the location where
+// such a node would go.
+func (m *Map[K, V]) find(key K) **node[K, V] {
+ pn := &m.root
+ for *pn != nil {
+ switch cmp := m.compare(key, (*pn).key); {
+ case cmp < 0:
+ pn = &(*pn).left
+ case cmp > 0:
+ pn = &(*pn).right
+ default:
+ return pn
+ }
+ }
+ return pn
+}
+
+// Insert inserts a new key/value into the map.
+// If the key is already present, the value is replaced.
+// Returns true if this is a new key, false if already present.
+func (m *Map[K, V]) Insert(key K, val V) bool {
+ pn := m.find(key)
+ if *pn != nil {
+ (*pn).val = val
+ return false
+ }
+ *pn = &node[K, V]{key: key, val: val}
+ return true
+}
+
+// Find returns the value associated with a key, or zero if not present.
+// The found result reports whether the key was found.
+func (m *Map[K, V]) Find(key K) (V, bool) {
+ pn := m.find(key)
+ if *pn == nil {
+ var zero V // see the discussion of zero values, above
+ return zero, false
+ }
+ return (*pn).val, true
+}
+
+// keyValue is a pair of key and value used when iterating.
+type keyValue[K, V any] struct {
+ key K
+ val V
+}
+
+// InOrder returns an iterator that does an in-order traversal of the map.
+func (m *Map[K, V]) InOrder() *Iterator[K, V] {
+ sender, receiver := chans.Ranger[keyValue[K, V]]()
+ var f func(*node[K, V]) bool
+ f = func(n *node[K, V]) bool {
+ if n == nil {
+ return true
+ }
+ // Stop sending values if sender.Send returns false,
+ // meaning that nothing is listening at the receiver end.
+ return f(n.left) &&
+ sender.Send(keyValue[K, V]{n.key, n.val}) &&
+ f(n.right)
+ }
+ go func() {
+ f(m.root)
+ sender.Close()
+ }()
+ return &Iterator[K, V]{receiver}
+}
+
+// Iterator is used to iterate over the map.
+type Iterator[K, V any] struct {
+ r *chans.Receiver[keyValue[K, V]]
+}
+
+// Next returns the next key and value pair, and a boolean indicating
+// whether they are valid or whether we have reached the end.
+func (it *Iterator[K, V]) Next() (K, V, bool) {
+ keyval, ok := it.r.Next()
+ if !ok {
+ var zerok K
+ var zerov V
+ return zerok, zerov, false
+ }
+ return keyval.key, keyval.val, true
+}
diff --git a/src/cmd/compile/internal/syntax/testdata/go2/map2.go2 b/src/cmd/compile/internal/syntax/testdata/go2/map2.go2
new file mode 100644
index 0000000..2833445
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/testdata/go2/map2.go2
@@ -0,0 +1,146 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file is like map.go2, but instead if importing chans, it contains
+// the necessary functionality at the end of the file.
+
+// Package orderedmap provides an ordered map, implemented as a binary tree.
+package orderedmap
+
+// Map is an ordered map.
+type Map[K, V any] struct {
+ root *node[K, V]
+ compare func(K, K) int
+}
+
+// node is the type of a node in the binary tree.
+type node[K, V any] struct {
+ key K
+ val V
+ left, right *node[K, V]
+}
+
+// New returns a new map.
+func New[K, V any](compare func(K, K) int) *Map[K, V] {
+ return &Map[K, V]{compare: compare}
+}
+
+// find looks up key in the map, and returns either a pointer
+// to the node holding key, or a pointer to the location where
+// such a node would go.
+func (m *Map[K, V]) find(key K) **node[K, V] {
+ pn := &m.root
+ for *pn != nil {
+ switch cmp := m.compare(key, (*pn).key); {
+ case cmp < 0:
+ pn = &(*pn).left
+ case cmp > 0:
+ pn = &(*pn).right
+ default:
+ return pn
+ }
+ }
+ return pn
+}
+
+// Insert inserts a new key/value into the map.
+// If the key is already present, the value is replaced.
+// Returns true if this is a new key, false if already present.
+func (m *Map[K, V]) Insert(key K, val V) bool {
+ pn := m.find(key)
+ if *pn != nil {
+ (*pn).val = val
+ return false
+ }
+ *pn = &node[K, V]{key: key, val: val}
+ return true
+}
+
+// Find returns the value associated with a key, or zero if not present.
+// The found result reports whether the key was found.
+func (m *Map[K, V]) Find(key K) (V, bool) {
+ pn := m.find(key)
+ if *pn == nil {
+ var zero V // see the discussion of zero values, above
+ return zero, false
+ }
+ return (*pn).val, true
+}
+
+// keyValue is a pair of key and value used when iterating.
+type keyValue[K, V any] struct {
+ key K
+ val V
+}
+
+// InOrder returns an iterator that does an in-order traversal of the map.
+func (m *Map[K, V]) InOrder() *Iterator[K, V] {
+ sender, receiver := chans_Ranger[keyValue[K, V]]()
+ var f func(*node[K, V]) bool
+ f = func(n *node[K, V]) bool {
+ if n == nil {
+ return true
+ }
+ // Stop sending values if sender.Send returns false,
+ // meaning that nothing is listening at the receiver end.
+ return f(n.left) &&
+ sender.Send(keyValue[K, V]{n.key, n.val}) &&
+ f(n.right)
+ }
+ go func() {
+ f(m.root)
+ sender.Close()
+ }()
+ return &Iterator[K, V]{receiver}
+}
+
+// Iterator is used to iterate over the map.
+type Iterator[K, V any] struct {
+ r *chans_Receiver[keyValue[K, V]]
+}
+
+// Next returns the next key and value pair, and a boolean indicating
+// whether they are valid or whether we have reached the end.
+func (it *Iterator[K, V]) Next() (K, V, bool) {
+ keyval, ok := it.r.Next()
+ if !ok {
+ var zerok K
+ var zerov V
+ return zerok, zerov, false
+ }
+ return keyval.key, keyval.val, true
+}
+
+// chans
+
+func chans_Ranger[T any]() (*chans_Sender[T], *chans_Receiver[T])
+
+// A sender is used to send values to a Receiver.
+type chans_Sender[T any] struct {
+ values chan<- T
+ done <-chan bool
+}
+
+func (s *chans_Sender[T]) Send(v T) bool {
+ select {
+ case s.values <- v:
+ return true
+ case <-s.done:
+ return false
+ }
+}
+
+func (s *chans_Sender[T]) Close() {
+ close(s.values)
+}
+
+type chans_Receiver[T any] struct {
+ values <-chan T
+ done chan<- bool
+}
+
+func (r *chans_Receiver[T]) Next() (T, bool) {
+ v, ok := <-r.values
+ return v, ok
+} \ No newline at end of file
diff --git a/src/cmd/compile/internal/syntax/testdata/go2/slices.go2 b/src/cmd/compile/internal/syntax/testdata/go2/slices.go2
new file mode 100644
index 0000000..2bacd1c
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/testdata/go2/slices.go2
@@ -0,0 +1,68 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package slices implements various slice algorithms.
+package slices
+
+// Map turns a []T1 to a []T2 using a mapping function.
+func Map[T1, T2 any](s []T1, f func(T1) T2) []T2 {
+ r := make([]T2, len(s))
+ for i, v := range s {
+ r[i] = f(v)
+ }
+ return r
+}
+
+// Reduce reduces a []T1 to a single value using a reduction function.
+func Reduce[T1, T2 any](s []T1, initializer T2, f func(T2, T1) T2) T2 {
+ r := initializer
+ for _, v := range s {
+ r = f(r, v)
+ }
+ return r
+}
+
+// Filter filters values from a slice using a filter function.
+func Filter[T any](s []T, f func(T) bool) []T {
+ var r []T
+ for _, v := range s {
+ if f(v) {
+ r = append(r, v)
+ }
+ }
+ return r
+}
+
+// Example uses
+
+func limiter(x int) byte {
+ switch {
+ case x < 0:
+ return 0
+ default:
+ return byte(x)
+ case x > 255:
+ return 255
+ }
+}
+
+var input = []int{-4, 68954, 7, 44, 0, -555, 6945}
+var limited1 = Map[int, byte](input, limiter)
+var limited2 = Map(input, limiter) // using type inference
+
+func reducer(x float64, y int) float64 {
+ return x + float64(y)
+}
+
+var reduced1 = Reduce[int, float64](input, 0, reducer)
+var reduced2 = Reduce(input, 1i /* ERROR overflows */, reducer) // using type inference
+var reduced3 = Reduce(input, 1, reducer) // using type inference
+
+func filter(x int) bool {
+ return x&1 != 0
+}
+
+var filtered1 = Filter[int](input, filter)
+var filtered2 = Filter(input, filter) // using type inference
+
diff --git a/src/cmd/compile/internal/syntax/testdata/go2/smoketest.go2 b/src/cmd/compile/internal/syntax/testdata/go2/smoketest.go2
new file mode 100644
index 0000000..42efb42
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/testdata/go2/smoketest.go2
@@ -0,0 +1,83 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains basic generic code snippets.
+
+package p
+
+// type parameter lists
+type B[P any] struct{}
+type _[P interface{}] struct{}
+type _[P B] struct{}
+type _[P B[P]] struct{}
+
+type _[A, B, C any] struct{}
+type _[A, B, C B] struct{}
+type _[A, B, C B[A, B, C]] struct{}
+type _[A1, A2 B1, A3 B2, A4, A5, A6 B3] struct{}
+
+type _[A interface{}] struct{}
+type _[A, B interface{ m() }] struct{}
+
+type _[A, B, C any] struct{}
+
+// in functions
+func _[P any]()
+func _[P interface{}]()
+func _[P B]()
+func _[P B[P]]()
+
+// in methods
+func (T) _[P any]()
+func (T) _[P interface{}]()
+func (T) _[P B]()
+func (T) _[P B[P]]()
+
+// type instantiations
+type _ T[int]
+
+// in expressions
+var _ = T[int]{}
+
+// in embedded types
+type _ struct{ T[int] }
+
+// interfaces
+type _ interface{
+ m()
+ ~int
+}
+
+type _ interface{
+ ~int | ~float | ~string
+ ~complex128
+ underlying(underlying underlying) underlying
+}
+
+type _ interface{
+ T
+ T[int]
+}
+
+// tricky cases
+func _(T[P], T[P1, P2])
+func _(a [N]T)
+
+type _ struct{
+ T[P]
+ T[P1, P2]
+ f [N]
+}
+type _ interface{
+ m()
+
+ // generic methods - disabled for now
+ // m[] /* ERROR empty type parameter list */ ()
+ // m[ /* ERROR cannot have type parameters */ P any](P)
+
+ // instantiated types
+ // T[] /* ERROR empty type argument list */
+ T[P]
+ T[P1, P2]
+}
diff --git a/src/cmd/compile/internal/syntax/testdata/go2/typeinst.go2 b/src/cmd/compile/internal/syntax/testdata/go2/typeinst.go2
new file mode 100644
index 0000000..a422d5e
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/testdata/go2/typeinst.go2
@@ -0,0 +1,60 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type myInt int
+
+// Parameterized type declarations
+
+type T1[P any] P
+
+type T2[P any] struct {
+ f P
+ g int // int should still be in scope chain
+}
+
+type List[P any] []P
+
+// Alias type declarations cannot have type parameters. Syntax error.
+// TODO(gri) Disabled for now as we don't check syntax error here.
+// type A1[P any] = /* ERROR cannot be alias */ P
+
+// But an alias may refer to a generic, uninstantiated type.
+type A2 = List
+var _ A2[int]
+var _ A2 /* ERROR without instantiation */
+
+type A3 = List[int]
+var _ A3
+
+// Parameterized type instantiations
+
+var x int
+type _ x /* ERROR not a type */ [int]
+
+type _ int /* ERROR not a generic type */ [int]
+type _ myInt /* ERROR not a generic type */ [int]
+
+// TODO(gri) better error messages
+type _ T1[int]
+type _ T1[x /* ERROR not a type */ ]
+type _ T1 /* ERROR got 2 arguments but 1 type parameters */ [int, float32]
+
+var _ T2[int] = T2[int]{}
+
+var _ List[int] = []int{1, 2, 3}
+var _ List[[]int] = [][]int{{1, 2, 3}}
+var _ List[List[List[int]]]
+
+// Parameterized types containing parameterized types
+
+type T3[P any] List[P]
+
+var _ T3[int] = T3[int](List[int]{1, 2, 3})
+
+// Self-recursive generic types are not permitted
+
+type self1[P any] self1 /* ERROR illegal cycle */ [P]
+type self2[P any] *self2[P] // this is ok
diff --git a/src/cmd/compile/internal/syntax/testdata/go2/typeinst2.go2 b/src/cmd/compile/internal/syntax/testdata/go2/typeinst2.go2
new file mode 100644
index 0000000..76b8d55
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/testdata/go2/typeinst2.go2
@@ -0,0 +1,232 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type List[E any] []E
+var _ List[List[List[int]]]
+var _ List[List[List[int]]] = []List[List[int]]{}
+
+type (
+ T1[P1 any] struct {
+ f1 T2[P1, float32]
+ }
+
+ T2[P2, P3 any] struct {
+ f2 P2
+ f3 P3
+ }
+)
+
+func _() {
+ var x1 T1[int]
+ var x2 T2[int, float32]
+
+ x1.f1.f2 = 0
+ x1.f1 = x2
+}
+
+type T3[P any] T1[T2[P, P]]
+
+func _() {
+ var x1 T3[int]
+ var x2 T2[int, int]
+ x1.f1.f2 = x2
+}
+
+func f[P any] (x P) List[P] {
+ return List[P]{x}
+}
+
+var (
+ _ []int = f(0)
+ _ []float32 = f[float32](10)
+ _ List[complex128] = f(1i)
+ _ []List[int] = f(List[int]{})
+ _ List[List[int]] = []List[int]{}
+ _ = []List[int]{}
+)
+
+// Parameterized types with methods
+
+func (l List[E]) Head() (_ E, _ bool) {
+ if len(l) > 0 {
+ return l[0], true
+ }
+ return
+}
+
+// A test case for instantiating types with other types (extracted from map.go2)
+
+type Pair[K any] struct {
+ key K
+}
+
+type Receiver[T any] struct {
+ values T
+}
+
+type Iterator[K any] struct {
+ r Receiver[Pair[K]]
+}
+
+func Values [T any] (r Receiver[T]) T {
+ return r.values
+}
+
+func (it Iterator[K]) Next() K {
+ return Values[Pair[K]](it.r).key
+}
+
+// A more complex test case testing type bounds (extracted from linalg.go2 and reduced to essence)
+
+type NumericAbs[T any] interface {
+ Abs() T
+}
+
+func AbsDifference[T NumericAbs[T]](x T)
+
+type OrderedAbs[T any] T
+
+func (a OrderedAbs[T]) Abs() OrderedAbs[T]
+
+func OrderedAbsDifference[T any](x T) {
+ AbsDifference(OrderedAbs[T](x))
+}
+
+// same code, reduced to essence
+
+func g[P interface{ m() P }](x P)
+
+type T4[P any] P
+
+func (_ T4[P]) m() T4[P]
+
+func _[Q any](x Q) {
+ g(T4[Q](x))
+}
+
+// Another test case that caused problems in the past
+
+type T5[_ interface { a() }, _ interface{}] struct{}
+
+type A[P any] struct{ x P }
+
+func (_ A[P]) a() {}
+
+var _ T5[A[int], int]
+
+// Invoking methods with parameterized receiver types uses
+// type inference to determine the actual type arguments matching
+// the receiver type parameters from the actual receiver argument.
+// Go does implicit address-taking and dereferenciation depending
+// on the actual receiver and the method's receiver type. To make
+// type inference work, the type-checker matches "pointer-ness"
+// of the actual receiver and the method's receiver type.
+// The following code tests this mechanism.
+
+type R1[A any] struct{}
+func (_ R1[A]) vm()
+func (_ *R1[A]) pm()
+
+func _[T any](r R1[T], p *R1[T]) {
+ r.vm()
+ r.pm()
+ p.vm()
+ p.pm()
+}
+
+type R2[A, B any] struct{}
+func (_ R2[A, B]) vm()
+func (_ *R2[A, B]) pm()
+
+func _[T any](r R2[T, int], p *R2[string, T]) {
+ r.vm()
+ r.pm()
+ p.vm()
+ p.pm()
+}
+
+// Interface type constraints can contain any type, incl. *Named types.
+// Verify that we use the underlying type to compute the operational type.
+type MyInt int
+func add1[T interface{ ~MyInt }](x T) T {
+ return x + 1
+}
+
+type MyString string
+func double[T interface{ ~MyInt | ~MyString }](x T) T {
+ return x + x
+}
+
+// Embedding of interfaces with type constraints leads to interfaces
+// with type constraints that are the intersection of the embedded
+// type constraints.
+
+type E0 interface {
+ ~int | ~bool | ~string
+}
+
+type E1 interface {
+ ~int | ~float64 | ~string
+}
+
+type E2 interface {
+ ~float64
+}
+
+type I0 interface {
+ E0
+}
+
+func f0[T I0]()
+var _ = f0[int]
+var _ = f0[bool]
+var _ = f0[string]
+var _ = f0[float64 /* ERROR does not satisfy I0 */ ]
+
+type I01 interface {
+ E0
+ E1
+}
+
+func f01[T I01]()
+var _ = f01[int]
+var _ = f01[bool /* ERROR does not satisfy I0 */ ]
+var _ = f01[string]
+var _ = f01[float64 /* ERROR does not satisfy I0 */ ]
+
+type I012 interface {
+ E0
+ E1
+ E2
+}
+
+func f012[T I012]()
+var _ = f012[int /* ERROR does not satisfy I012 */ ]
+var _ = f012[bool /* ERROR does not satisfy I012 */ ]
+var _ = f012[string /* ERROR does not satisfy I012 */ ]
+var _ = f012[float64 /* ERROR does not satisfy I012 */ ]
+
+type I12 interface {
+ E1
+ E2
+}
+
+func f12[T I12]()
+var _ = f12[int /* ERROR does not satisfy I12 */ ]
+var _ = f12[bool /* ERROR does not satisfy I12 */ ]
+var _ = f12[string /* ERROR does not satisfy I12 */ ]
+var _ = f12[float64]
+
+type I0_ interface {
+ E0
+ ~int
+}
+
+func f0_[T I0_]()
+var _ = f0_[int]
+var _ = f0_[bool /* ERROR does not satisfy I0_ */ ]
+var _ = f0_[string /* ERROR does not satisfy I0_ */ ]
+var _ = f0_[float64 /* ERROR does not satisfy I0_ */ ]
diff --git a/src/cmd/compile/internal/syntax/testdata/go2/typeparams.go2 b/src/cmd/compile/internal/syntax/testdata/go2/typeparams.go2
new file mode 100644
index 0000000..111f7c1
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/testdata/go2/typeparams.go2
@@ -0,0 +1,451 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+// import "io" // for type assertion tests
+
+// The predeclared identifier "any" is only visible as a constraint
+// in a type parameter list.
+var _ any // ERROR undeclared
+func _[_ any /* ok here */ , _ interface{any /* ERROR undeclared */ }](any /* ERROR undeclared */ ) {
+ var _ any /* ERROR undeclared */
+}
+
+func identity[T any](x T) T { return x }
+
+func _[_ any](x int) int
+func _[T any](T /* ERROR redeclared */ T)()
+func _[T, T /* ERROR redeclared */ any]()
+
+func reverse[T any](list []T) []T {
+ rlist := make([]T, len(list))
+ i := len(list)
+ for _, x := range list {
+ i--
+ rlist[i] = x
+ }
+ return rlist
+}
+
+var _ = reverse /* ERROR cannot use generic function reverse */
+var _ = reverse[int, float32 /* ERROR got 2 type arguments */ ] ([]int{1, 2, 3})
+var _ = reverse[int]([ /* ERROR cannot use */ ]float32{1, 2, 3})
+var f = reverse[chan int]
+var _ = f(0 /* ERROR cannot convert 0 .* to \[\]chan int */ )
+
+func swap[A, B any](a A, b B) (B, A) { return b, a }
+
+var _ = swap /* ERROR single value is expected */ [int, float32](1, 2)
+var f32, i = swap[int, float32](swap(float32, int)(1, 2))
+var _ float32 = f32
+var _ int = i
+
+func swapswap[A, B any](a A, b B) (A, B) {
+ return swap[B, A](b, a)
+}
+
+type F[A, B any] func(A, B) (B, A)
+
+func min[T interface{ ~int }](x, y T) T {
+ if x < y {
+ return x
+ }
+ return y
+}
+
+func _[T interface{ ~int | ~float32 }](x, y T) bool { return x < y }
+func _[T any](x, y T) bool { return x /* ERROR cannot compare */ < y }
+func _[T interface{ ~int | ~float32 | ~bool }](x, y T) bool { return x /* ERROR cannot compare */ < y }
+
+func _[T C1[T]](x, y T) bool { return x /* ERROR cannot compare */ < y }
+func _[T C2[T]](x, y T) bool { return x < y }
+
+type C1[T any] interface{}
+type C2[T any] interface{ ~int | ~float32 }
+
+func new[T any]() *T {
+ var x T
+ return &x
+}
+
+var _ = new /* ERROR cannot use generic function new */
+var _ *int = new[int]()
+
+func _[T any](map[T /* ERROR invalid map key type T \(missing comparable constraint\) */]int) // w/o constraint we don't know if T is comparable
+
+func f1[T1 any](struct{T1}) int
+var _ = f1(int)(struct{T1}{})
+type T1 = int
+
+func f2[t1 any](struct{t1; x float32}) int
+var _ = f2(t1)(struct{t1; x float32}{})
+type t1 = int
+
+
+func f3[A, B, C any](A, struct{x B}, func(A, struct{x B}, *C)) int
+
+var _ = f3[int, rune, bool](1, struct{x rune}{}, nil)
+
+// indexing
+
+func _[T any] (x T, i int) { _ = x /* ERROR "cannot index" */ [i] }
+func _[T interface{ ~int }] (x T, i int) { _ = x /* ERROR "cannot index" */ [i] }
+func _[T interface{ ~string }] (x T, i int) { _ = x[i] }
+func _[T interface{ ~[]int }] (x T, i int) { _ = x[i] }
+func _[T interface{ ~[10]int | ~*[20]int | ~map[string]int }] (x T, i int) { _ = x[i] }
+func _[T interface{ ~string | ~[]byte }] (x T, i int) { _ = x[i] }
+func _[T interface{ ~[]int | ~[1]rune }] (x T, i int) { _ = x /* ERROR "cannot index" */ [i] }
+func _[T interface{ ~string | ~[]rune }] (x T, i int) { _ = x /* ERROR "cannot index" */ [i] }
+
+// slicing
+// TODO(gri) implement this
+
+func _[T interface{ ~string }] (x T, i, j, k int) { _ = x /* ERROR invalid operation */ [i:j:k] }
+
+// len/cap built-ins
+
+func _[T any](x T) { _ = len(x /* ERROR invalid argument */ ) }
+func _[T interface{ ~int }](x T) { _ = len(x /* ERROR invalid argument */ ) }
+func _[T interface{ ~string | ~[]byte | ~int }](x T) { _ = len(x /* ERROR invalid argument */ ) }
+func _[T interface{ ~string }](x T) { _ = len(x) }
+func _[T interface{ ~[10]int }](x T) { _ = len(x) }
+func _[T interface{ ~[]byte }](x T) { _ = len(x) }
+func _[T interface{ ~map[int]int }](x T) { _ = len(x) }
+func _[T interface{ ~chan int }](x T) { _ = len(x) }
+func _[T interface{ ~string | ~[]byte | ~chan int }](x T) { _ = len(x) }
+
+func _[T any](x T) { _ = cap(x /* ERROR invalid argument */ ) }
+func _[T interface{ ~int }](x T) { _ = cap(x /* ERROR invalid argument */ ) }
+func _[T interface{ ~string | ~[]byte | ~int }](x T) { _ = cap(x /* ERROR invalid argument */ ) }
+func _[T interface{ ~string }](x T) { _ = cap(x /* ERROR invalid argument */ ) }
+func _[T interface{ ~[10]int }](x T) { _ = cap(x) }
+func _[T interface{ ~[]byte }](x T) { _ = cap(x) }
+func _[T interface{ ~map[int]int }](x T) { _ = cap(x /* ERROR invalid argument */ ) }
+func _[T interface{ ~chan int }](x T) { _ = cap(x) }
+func _[T interface{ ~[]byte | ~chan int }](x T) { _ = cap(x) }
+
+// range iteration
+
+func _[T interface{}](x T) {
+ for range x /* ERROR cannot range */ {}
+}
+
+func _[T interface{ ~string | ~[]string }](x T) {
+ for range x {}
+ for i := range x { _ = i }
+ for i, _ := range x { _ = i }
+ for i, e := range x /* ERROR must have the same element type */ { _ = i }
+ for _, e := range x /* ERROR must have the same element type */ {}
+ var e rune
+ _ = e
+ for _, (e) = range x /* ERROR must have the same element type */ {}
+}
+
+
+func _[T interface{ ~string | ~[]rune | ~map[int]rune }](x T) {
+ for _, e := range x { _ = e }
+ for i, e := range x { _ = i; _ = e }
+}
+
+func _[T interface{ ~string | ~[]rune | ~map[string]rune }](x T) {
+ for _, e := range x { _ = e }
+ for i, e := range x /* ERROR must have the same key type */ { _ = e }
+}
+
+func _[T interface{ ~string | ~chan int }](x T) {
+ for range x {}
+ for i := range x { _ = i }
+ for i, _ := range x { _ = i } // TODO(gri) should get an error here: channels only return one value
+}
+
+func _[T interface{ ~string | ~chan<-int }](x T) {
+ for i := range x /* ERROR send-only channel */ { _ = i }
+}
+
+// type inference checks
+
+var _ = new() /* ERROR cannot infer T */
+
+func f4[A, B, C any](A, B) C
+
+var _ = f4(1, 2) /* ERROR cannot infer C */
+var _ = f4[int, float32, complex128](1, 2)
+
+func f5[A, B, C any](A, []*B, struct{f []C}) int
+
+var _ = f5[int, float32, complex128](0, nil, struct{f []complex128}{})
+var _ = f5(0, nil, struct{f []complex128}{}) // ERROR cannot infer
+var _ = f5(0, []*float32{new[float32]()}, struct{f []complex128}{})
+
+func f6[A any](A, []A) int
+
+var _ = f6(0, nil)
+
+func f6nil[A any](A) int
+
+var _ = f6nil(nil) // ERROR cannot infer
+
+// type inference with variadic functions
+
+func f7[T any](...T) T
+
+var _ int = f7() /* ERROR cannot infer T */
+var _ int = f7(1)
+var _ int = f7(1, 2)
+var _ int = f7([]int{}...)
+var _ int = f7 /* ERROR cannot use */ ([]float64{}...)
+var _ float64 = f7([]float64{}...)
+var _ = f7[float64](1, 2.3)
+var _ = f7(float64(1), 2.3)
+var _ = f7(1, 2.3 /* ERROR does not match */ )
+var _ = f7(1.2, 3 /* ERROR does not match */ )
+
+func f8[A, B any](A, B, ...B) int
+
+var _ = f8(1) /* ERROR not enough arguments */
+var _ = f8(1, 2.3)
+var _ = f8(1, 2.3, 3.4, 4.5)
+var _ = f8(1, 2.3, 3.4, 4 /* ERROR does not match */ )
+var _ = f8(int, float64)(1, 2.3, 3.4, 4)
+
+var _ = f8(int, float64)(0, 0, nil...) // test case for #18268
+
+// init functions cannot have type parameters
+
+func init() {}
+func init[/* ERROR func init must have no type parameters */ _ any]() {}
+func init[/* ERROR func init must have no type parameters */ P any]() {}
+
+type T struct {}
+
+func (T) m1() {}
+// The type checker accepts method type parameters if configured accordingly.
+func (T) m2[_ any]() {}
+func (T) m3[P any]() {}
+
+// type inference across parameterized types
+
+type S1[P any] struct { f P }
+
+func f9[P any](x S1[P])
+
+func _() {
+ f9[int](S1[int]{42})
+ f9(S1[int]{42})
+}
+
+type S2[A, B, C any] struct{}
+
+func f10[X, Y, Z any](a S2[X, int, Z], b S2[X, Y, bool])
+
+func _[P any]() {
+ f10[int, float32, string](S2[int, int, string]{}, S2[int, float32, bool]{})
+ f10(S2[int, int, string]{}, S2[int, float32, bool]{})
+ f10(S2[P, int, P]{}, S2[P, float32, bool]{})
+}
+
+// corner case for type inference
+// (was bug: after instanting f11, the type-checker didn't mark f11 as non-generic)
+
+func f11[T any]()
+
+func _() {
+ f11[int]()
+}
+
+// the previous example was extracted from
+
+func f12[T interface{m() T}]()
+
+type A[T any] T
+
+func (a A[T]) m() A[T]
+
+func _[T any]() {
+ f12(A[T])()
+}
+
+// method expressions
+
+func (_ S1[P]) m()
+
+func _() {
+ m := S1[int].m
+ m(struct { f int }{42})
+}
+
+func _[T any] (x T) {
+ m := S1[T].m
+ m(S1[T]{x})
+}
+
+// type parameters in methods (generalization)
+
+type R0 struct{}
+
+func (R0) _[T any](x T)
+func (R0 /* ERROR invalid receiver */ ) _[R0 any]() // scope of type parameters starts at "func"
+
+type R1[A, B any] struct{}
+
+func (_ R1[A, B]) m0(A, B)
+func (_ R1[A, B]) m1[T any](A, B, T) T
+func (_ R1 /* ERROR not a generic type */ [R1, _]) _()
+func (_ R1[A, B]) _[A /* ERROR redeclared */ any](B)
+
+func _() {
+ var r R1[int, string]
+ r.m1[rune](42, "foo", 'a')
+ r.m1[rune](42, "foo", 1.2 /* ERROR truncated to rune */)
+ r.m1(42, "foo", 1.2) // using type inference
+ var _ float64 = r.m1(42, "foo", 1.2)
+}
+
+type I1[A any] interface {
+ m1(A)
+}
+
+var _ I1[int] = r1[int]{}
+
+type r1[T any] struct{}
+
+func (_ r1[T]) m1(T)
+
+type I2[A, B any] interface {
+ m1(A)
+ m2(A) B
+}
+
+var _ I2[int, float32] = R2[int, float32]{}
+
+type R2[P, Q any] struct{}
+
+func (_ R2[X, Y]) m1(X)
+func (_ R2[X, Y]) m2(X) Y
+
+// type assertions and type switches over generic types
+// NOTE: These are currently disabled because it's unclear what the correct
+// approach is, and one can always work around by assigning the variable to
+// an interface first.
+
+// // ReadByte1 corresponds to the ReadByte example in the draft design.
+// func ReadByte1[T io.Reader](r T) (byte, error) {
+// if br, ok := r.(io.ByteReader); ok {
+// return br.ReadByte()
+// }
+// var b [1]byte
+// _, err := r.Read(b[:])
+// return b[0], err
+// }
+//
+// // ReadBytes2 is like ReadByte1 but uses a type switch instead.
+// func ReadByte2[T io.Reader](r T) (byte, error) {
+// switch br := r.(type) {
+// case io.ByteReader:
+// return br.ReadByte()
+// }
+// var b [1]byte
+// _, err := r.Read(b[:])
+// return b[0], err
+// }
+//
+// // type assertions and type switches over generic types are strict
+// type I3 interface {
+// m(int)
+// }
+//
+// type I4 interface {
+// m() int // different signature from I3.m
+// }
+//
+// func _[T I3](x I3, p T) {
+// // type assertions and type switches over interfaces are not strict
+// _ = x.(I4)
+// switch x.(type) {
+// case I4:
+// }
+//
+// // type assertions and type switches over generic types are strict
+// _ = p /* ERROR cannot have dynamic type I4 */.(I4)
+// switch p.(type) {
+// case I4 /* ERROR cannot have dynamic type I4 */ :
+// }
+// }
+
+// type assertions and type switches over generic types lead to errors for now
+
+func _[T any](x T) {
+ _ = x /* ERROR not an interface */ .(int)
+ switch x /* ERROR not an interface */ .(type) {
+ }
+
+ // work-around
+ var t interface{} = x
+ _ = t.(int)
+ switch t.(type) {
+ }
+}
+
+func _[T interface{ ~int }](x T) {
+ _ = x /* ERROR not an interface */ .(int)
+ switch x /* ERROR not an interface */ .(type) {
+ }
+
+ // work-around
+ var t interface{} = x
+ _ = t.(int)
+ switch t.(type) {
+ }
+}
+
+// error messages related to type bounds mention those bounds
+type C[P any] interface{}
+
+func _[P C[P]] (x P) {
+ x.m /* ERROR x.m undefined */ ()
+}
+
+type I interface {}
+
+func _[P I] (x P) {
+ x.m /* ERROR interface I has no method m */ ()
+}
+
+func _[P interface{}] (x P) {
+ x.m /* ERROR type bound for P has no method m */ ()
+}
+
+func _[P any] (x P) {
+ x.m /* ERROR type bound for P has no method m */ ()
+}
+
+// automatic distinguishing between array and generic types
+// NOTE: Disabled when using unified parameter list syntax.
+/*
+const P = 10
+type A1 [P]byte
+func _(a A1) {
+ assert(len(a) == 10)
+}
+
+type A2 [P]struct{
+ f [P]byte
+}
+func _(a A2) {
+ assert(len(a) == 10)
+ assert(len(a[0].f) == 10)
+}
+
+type A3 [P]func(x [P]A3)
+func _(a A3) {
+ assert(len(a) == 10)
+}
+
+type T2[P] struct{ P }
+var _ T2[int]
+
+type T3[P] func(P)
+var _ T3[int]
+*/ \ No newline at end of file
diff --git a/src/cmd/compile/internal/syntax/testdata/interface.go2 b/src/cmd/compile/internal/syntax/testdata/interface.go2
new file mode 100644
index 0000000..dbc4187
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/testdata/interface.go2
@@ -0,0 +1,74 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains test cases for interfaces containing
+// constraint elements.
+
+package p
+
+type _ interface {
+ m()
+ E
+}
+
+type _ interface {
+ m()
+ ~int
+ int | string
+ int | ~string
+ ~int | ~string
+}
+
+type _ interface {
+ m()
+ ~int
+ T[int, string] | string
+ int | ~T[string, struct{}]
+ ~int | ~string
+}
+
+type _ interface {
+ int
+ []byte
+ [10]int
+ struct{}
+ *int
+ func()
+ interface{}
+ map[string]int
+ chan T
+ chan<- T
+ <-chan T
+ T[int]
+}
+
+type _ interface {
+ int | string
+ []byte | string
+ [10]int | string
+ struct{} | string
+ *int | string
+ func() | string
+ interface{} | string
+ map[string]int | string
+ chan T | string
+ chan<- T | string
+ <-chan T | string
+ T[int] | string
+}
+
+type _ interface {
+ ~int | string
+ ~[]byte | string
+ ~[10]int | string
+ ~struct{} | string
+ ~*int | string
+ ~func() | string
+ ~interface{} | string
+ ~map[string]int | string
+ ~chan T | string
+ ~chan<- T | string
+ ~<-chan T | string
+ ~T[int] | string
+}
diff --git a/src/cmd/compile/internal/syntax/testdata/issue20789.src b/src/cmd/compile/internal/syntax/testdata/issue20789.src
new file mode 100644
index 0000000..5f150db
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/testdata/issue20789.src
@@ -0,0 +1,9 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Make sure this doesn't crash the compiler.
+// Line 9 must end in EOF for this test (no newline).
+
+package e
+func([<-chan<-[func /* ERROR unexpected u */ u){go /* ERROR must be function call */ \ No newline at end of file
diff --git a/src/cmd/compile/internal/syntax/testdata/issue23385.src b/src/cmd/compile/internal/syntax/testdata/issue23385.src
new file mode 100644
index 0000000..2459a73
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/testdata/issue23385.src
@@ -0,0 +1,17 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Check error message for use of = instead of == .
+
+package p
+
+func _() {
+ if true || 0 /* ERROR cannot use assignment .* as value */ = 1 {
+ }
+}
+
+func _(a, b string) {
+ if a == "a" && b /* ERROR cannot use assignment .* as value */ = "b" {
+ }
+}
diff --git a/src/cmd/compile/internal/syntax/testdata/issue23434.src b/src/cmd/compile/internal/syntax/testdata/issue23434.src
new file mode 100644
index 0000000..5a72a7f
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/testdata/issue23434.src
@@ -0,0 +1,31 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Test case for issue 23434: Better synchronization of
+// parser after missing type. There should be exactly
+// one error each time, with now follow errors.
+
+package p
+
+type T /* ERROR unexpected newline */
+
+type Map map[int] /* ERROR unexpected newline */
+
+// Examples from #23434:
+
+func g() {
+ m := make(map[string] /* ERROR unexpected ! */ !)
+ for {
+ x := 1
+ print(x)
+ }
+}
+
+func f() {
+ m := make(map[string] /* ERROR unexpected \) */ )
+ for {
+ x := 1
+ print(x)
+ }
+}
diff --git a/src/cmd/compile/internal/syntax/testdata/issue31092.src b/src/cmd/compile/internal/syntax/testdata/issue31092.src
new file mode 100644
index 0000000..b1839b8
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/testdata/issue31092.src
@@ -0,0 +1,16 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Test cases for issue 31092: Better synchronization of
+// parser after seeing an := rather than an = in a const,
+// type, or variable declaration.
+
+package p
+
+const _ /* ERROR unexpected := */ := 0
+type _ /* ERROR unexpected := */ := int
+var _ /* ERROR unexpected := */ := 0
+
+const _ int /* ERROR unexpected := */ := 0
+var _ int /* ERROR unexpected := */ := 0
diff --git a/src/cmd/compile/internal/syntax/testdata/issue43527.go2 b/src/cmd/compile/internal/syntax/testdata/issue43527.go2
new file mode 100644
index 0000000..dd2c9b1
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/testdata/issue43527.go2
@@ -0,0 +1,23 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type (
+ // 0 and 1-element []-lists are syntactically valid
+ _[A, B /* ERROR missing type constraint */ ] int
+ _[A, /* ERROR type parameters must be named */ interface{}] int
+ _[A, B, C /* ERROR missing type constraint */ ] int
+ _[A B, C /* ERROR missing type constraint */ ] int
+ _[A B, /* ERROR type parameters must be named */ interface{}] int
+ _[A B, /* ERROR type parameters must be named */ interface{}, C D] int
+ _[A B, /* ERROR type parameters must be named */ interface{}, C, D] int
+ _[A B, /* ERROR type parameters must be named */ interface{}, C, interface{}] int
+ _[A B, C interface{}, D, /* ERROR type parameters must be named */ interface{}] int
+)
+
+// function type parameters use the same parsing routine - just have a couple of tests
+
+func _[A, B /* ERROR missing type constraint */ ]() {}
+func _[A, /* ERROR type parameters must be named */ interface{}]() {}
diff --git a/src/cmd/compile/internal/syntax/testdata/issue43674.src b/src/cmd/compile/internal/syntax/testdata/issue43674.src
new file mode 100644
index 0000000..51c692a
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/testdata/issue43674.src
@@ -0,0 +1,13 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func _(... /* ERROR [.][.][.] is missing type */ )
+func _(... /* ERROR [.][.][.] is missing type */ , int)
+
+func _(a, b ... /* ERROR [.][.][.] is missing type */ )
+func _(a, b ... /* ERROR [.][.][.] is missing type */ , x int)
+
+func _()(... /* ERROR [.][.][.] is missing type */ )
diff --git a/src/cmd/compile/internal/syntax/testdata/issue46558.src b/src/cmd/compile/internal/syntax/testdata/issue46558.src
new file mode 100644
index 0000000..a22b600
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/testdata/issue46558.src
@@ -0,0 +1,14 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func F(s string) {
+ switch s[0] {
+ case 'a':
+ case s[2] { // ERROR unexpected {
+ case 'b':
+ }
+ }
+} // ERROR non-declaration statement
diff --git a/src/cmd/compile/internal/syntax/testdata/issue47704.go2 b/src/cmd/compile/internal/syntax/testdata/issue47704.go2
new file mode 100644
index 0000000..4e65857
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/testdata/issue47704.go2
@@ -0,0 +1,18 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+// error messages for parser in generic mode
+func _() {
+ _ = m[] // ERROR expecting operand
+ _ = m[x,]
+ _ = m[x /* ERROR unexpected a */ a b c d]
+}
+
+// test case from the issue
+func f(m map[int]int) int {
+ return m[0 // ERROR expecting comma, \: or \]
+ ]
+}
diff --git a/src/cmd/compile/internal/syntax/testdata/issue47704.src b/src/cmd/compile/internal/syntax/testdata/issue47704.src
new file mode 100644
index 0000000..0156af7
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/testdata/issue47704.src
@@ -0,0 +1,18 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+// error messages for parser in non-generic mode
+func _() {
+ _ = m[] // ERROR expecting operand
+ _ = m[x,] // ERROR unexpected comma, expecting \: or \]
+ _ = m[x /* ERROR unexpected a */ a b c d]
+}
+
+// test case from the issue
+func f(m map[int]int) int {
+ return m[0 // ERROR expecting \: or \]
+ ]
+}
diff --git a/src/cmd/compile/internal/syntax/testdata/issue48382.go2 b/src/cmd/compile/internal/syntax/testdata/issue48382.go2
new file mode 100644
index 0000000..c00fee6
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/testdata/issue48382.go2
@@ -0,0 +1,15 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type _ func /* ERROR function type must have no type parameters */ [ /* ERROR empty type parameter list */ ]()
+type _ func /* ERROR function type must have no type parameters */ [ x /* ERROR missing type constraint */ ]()
+type _ func /* ERROR function type must have no type parameters */ [P any]()
+
+var _ = func /* ERROR function literal must have no type parameters */ [P any]() {}
+
+type _ interface{
+ m /* ERROR interface method must have no type parameters */ [P any]()
+}
diff --git a/src/cmd/compile/internal/syntax/testdata/issue49482.go2 b/src/cmd/compile/internal/syntax/testdata/issue49482.go2
new file mode 100644
index 0000000..1fc303d
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/testdata/issue49482.go2
@@ -0,0 +1,31 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type (
+ // these need a comma to disambiguate
+ _[P *T,] struct{}
+ _[P *T, _ any] struct{}
+ _[P (*T),] struct{}
+ _[P (*T), _ any] struct{}
+ _[P (T),] struct{}
+ _[P (T), _ any] struct{}
+
+ // these parse as name followed by type
+ _[P *struct{}] struct{}
+ _[P (*struct{})] struct{}
+ _[P ([]int)] struct{}
+
+ // array declarations
+ _ [P(T)]struct{}
+ _ [P((T))]struct{}
+ _ [P * *T] struct{} // this could be a name followed by a type but it makes the rules more complicated
+ _ [P * T]struct{}
+ _ [P(*T)]struct{}
+ _ [P(**T)]struct{}
+ _ [P * T - T]struct{}
+ _ [P*T-T /* ERROR unexpected comma */ ,]struct{}
+ _ [10 /* ERROR unexpected comma */ ,]struct{}
+)
diff --git a/src/cmd/compile/internal/syntax/testdata/sample.src b/src/cmd/compile/internal/syntax/testdata/sample.src
new file mode 100644
index 0000000..5a2b4bf
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/testdata/sample.src
@@ -0,0 +1,33 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This is a sample test file illustrating the use
+// of error comments with the error test harness.
+
+package p
+
+// The following are invalid error comments; they are
+// silently ignored. The prefix must be exactly one of
+// "/* ERROR " or "// ERROR ".
+//
+/*ERROR*/
+/*ERROR foo*/
+/* ERRORfoo */
+/* ERROR foo */
+//ERROR
+// ERROR
+// ERRORfoo
+// ERROR foo
+
+// This is a valid error comment; it applies to the
+// immediately following token.
+import "math" /* ERROR unexpected comma */ ,
+
+// If there are multiple /*-style error comments before
+// the next token, only the last one is considered.
+type x = /* ERROR ignored */ /* ERROR literal 0 in type declaration */ 0
+
+// A //-style error comment matches any error position
+// on the same line.
+func () foo() // ERROR method has no receiver
diff --git a/src/cmd/compile/internal/syntax/testdata/tparams.go2 b/src/cmd/compile/internal/syntax/testdata/tparams.go2
new file mode 100644
index 0000000..a9bd72c
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/testdata/tparams.go2
@@ -0,0 +1,24 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type t[a, b /* ERROR missing type constraint */ ] struct{}
+type t[a t, b t, c /* ERROR missing type constraint */ ] struct{}
+type t struct {
+ t [n]byte
+ t[a]
+ t[a, b]
+}
+type t interface {
+ t[a]
+ m /* ERROR method must have no type parameters */ [_ _, /* ERROR mixed */ _]()
+ t[a, b]
+}
+
+func f[ /* ERROR empty type parameter list */ ]()
+func f[a, b /* ERROR missing type constraint */ ]()
+func f[a t, b t, c /* ERROR missing type constraint */ ]()
+
+func f[a b, /* ERROR expecting ] */ 0] ()
diff --git a/src/cmd/compile/internal/syntax/testdata/typeset.go2 b/src/cmd/compile/internal/syntax/testdata/typeset.go2
new file mode 100644
index 0000000..19b74f2
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/testdata/typeset.go2
@@ -0,0 +1,89 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains test cases for typeset-only constraint elements.
+
+package p
+
+type (
+ _[_ t] t
+ _[_ ~t] t
+ _[_ t|t] t
+ _[_ ~t|t] t
+ _[_ t|~t] t
+ _[_ ~t|~t] t
+
+ _[_ t, _, _ t|t] t
+ _[_ t, _, _ ~t|t] t
+ _[_ t, _, _ t|~t] t
+ _[_ t, _, _ ~t|~t] t
+
+ _[_ t.t] t
+ _[_ ~t.t] t
+ _[_ t.t|t.t] t
+ _[_ ~t.t|t.t] t
+ _[_ t.t|~t.t] t
+ _[_ ~t.t|~t.t] t
+
+ _[_ t, _, _ t.t|t.t] t
+ _[_ t, _, _ ~t.t|t.t] t
+ _[_ t, _, _ t.t|~t.t] t
+ _[_ t, _, _ ~t.t|~t.t] t
+
+ _[_ struct{}] t
+ _[_ ~struct{}] t
+
+ _[_ struct{}|t] t
+ _[_ ~struct{}|t] t
+ _[_ struct{}|~t] t
+ _[_ ~struct{}|~t] t
+
+ _[_ t|struct{}] t
+ _[_ ~t|struct{}] t
+ _[_ t|~struct{}] t
+ _[_ ~t|~struct{}] t
+
+ // test cases for issue #49175
+ _[_ []t]t
+ _[_ [1]t]t
+ _[_ ~[]t]t
+ _[_ ~[1]t]t
+ t [ /* ERROR type parameters must be named */ t[0]]t
+)
+
+// test cases for issue #49174
+func _[_ t]() {}
+func _[_ []t]() {}
+func _[_ [1]t]() {}
+func _[_ []t | t]() {}
+func _[_ [1]t | t]() {}
+func _[_ t | []t]() {}
+func _[_ []t | []t]() {}
+func _[_ [1]t | [1]t]() {}
+func _[_ t[t] | t[t]]() {}
+
+// Single-expression type parameter lists and those that don't start
+// with a (type parameter) name are considered array sizes.
+// The term must be a valid expression (it could be a type - and then
+// a type-checker will complain - but we don't allow ~ in the expr).
+type (
+ _[t] t
+ _[/* ERROR unexpected ~ */ ~t] t
+ _[t|t] t
+ _[/* ERROR unexpected ~ */ ~t|t] t
+ _[t| /* ERROR unexpected ~ */ ~t] t
+ _[/* ERROR unexpected ~ */ ~t|~t] t
+)
+
+type (
+ _[_ t, t /* ERROR missing type constraint */ ] t
+ _[_ ~t, t /* ERROR missing type constraint */ ] t
+ _[_ t, /* ERROR type parameters must be named */ ~t] t
+ _[_ ~t, /* ERROR type parameters must be named */ ~t] t
+
+ _[_ t|t, /* ERROR type parameters must be named */ t|t] t
+ _[_ ~t|t, /* ERROR type parameters must be named */ t|t] t
+ _[_ t|t, /* ERROR type parameters must be named */ ~t|t] t
+ _[_ ~t|t, /* ERROR type parameters must be named */ ~t|t] t
+)
diff --git a/src/cmd/compile/internal/syntax/testing.go b/src/cmd/compile/internal/syntax/testing.go
new file mode 100644
index 0000000..6a97dc0
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/testing.go
@@ -0,0 +1,72 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements testing support.
+
+package syntax
+
+import (
+ "io"
+ "regexp"
+ "strings"
+)
+
+// CommentsDo parses the given source and calls the provided handler for each
+// comment or error. If the text provided to handler starts with a '/' it is
+// the comment text; otherwise it is the error message.
+func CommentsDo(src io.Reader, handler func(line, col uint, text string)) {
+ var s scanner
+ s.init(src, handler, comments)
+ for s.tok != _EOF {
+ s.next()
+ }
+}
+
+// ERROR comments must start with text `ERROR "msg"` or `ERROR msg`.
+// Space around "msg" or msg is ignored.
+var errRx = regexp.MustCompile(`^ *ERROR *"?([^"]*)"?`)
+
+// ErrorMap collects all comments with comment text of the form
+// `ERROR "msg"` or `ERROR msg` from the given src and returns them
+// as []Error lists in a map indexed by line number. The position
+// for each Error is the position of the token immediately preceding
+// the comment, the Error message is the message msg extracted from
+// the comment, with all errors that are on the same line collected
+// in a slice, in source order. If there is no preceding token (the
+// `ERROR` comment appears in the beginning of the file), then the
+// recorded position is unknown (line, col = 0, 0). If there are no
+// ERROR comments, the result is nil.
+func ErrorMap(src io.Reader) (errmap map[uint][]Error) {
+ // position of previous token
+ var base *PosBase
+ var prev struct{ line, col uint }
+
+ var s scanner
+ s.init(src, func(_, _ uint, text string) {
+ if text[0] != '/' {
+ return // error, ignore
+ }
+ if text[1] == '*' {
+ text = text[:len(text)-2] // strip trailing */
+ }
+ if s := errRx.FindStringSubmatch(text[2:]); len(s) == 2 {
+ pos := MakePos(base, prev.line, prev.col)
+ err := Error{pos, strings.TrimSpace(s[1])}
+ if errmap == nil {
+ errmap = make(map[uint][]Error)
+ }
+ errmap[prev.line] = append(errmap[prev.line], err)
+ }
+ }, comments)
+
+ for s.tok != _EOF {
+ s.next()
+ if s.tok == _Semi && s.lit != "semicolon" {
+ continue // ignore automatically inserted semicolons
+ }
+ prev.line, prev.col = s.line, s.col
+ }
+
+ return
+}
diff --git a/src/cmd/compile/internal/syntax/testing_test.go b/src/cmd/compile/internal/syntax/testing_test.go
new file mode 100644
index 0000000..d34e5ea
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/testing_test.go
@@ -0,0 +1,45 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package syntax
+
+import (
+ "fmt"
+ "strings"
+ "testing"
+)
+
+func TestErrorMap(t *testing.T) {
+ const src = `/* ERROR 0:0 */ /* ERROR "0:0" */ // ERROR 0:0
+// ERROR "0:0"
+x /* ERROR 3:1 */ // ignore automatically inserted semicolon here
+/* ERROR 3:1 */ // position of x on previous line
+ x /* ERROR 5:4 */ ; // do not ignore this semicolon
+/* ERROR 5:22 */ // position of ; on previous line
+ package /* ERROR 7:2 */ // indented with tab
+ import /* ERROR 8:9 */ // indented with blanks
+`
+ m := ErrorMap(strings.NewReader(src))
+ got := 0 // number of errors found
+ for line, errlist := range m {
+ for _, err := range errlist {
+ if err.Pos.Line() != line {
+ t.Errorf("%v: got map line %d; want %d", err, err.Pos.Line(), line)
+ continue
+ }
+ // err.Pos.Line() == line
+ msg := fmt.Sprintf("%d:%d", line, err.Pos.Col())
+ if err.Msg != msg {
+ t.Errorf("%v: got msg %q; want %q", err, err.Msg, msg)
+ continue
+ }
+ }
+ got += len(errlist)
+ }
+
+ want := strings.Count(src, "ERROR")
+ if got != want {
+ t.Errorf("ErrorMap got %d errors; want %d", got, want)
+ }
+}
diff --git a/src/cmd/compile/internal/syntax/token_string.go b/src/cmd/compile/internal/syntax/token_string.go
new file mode 100644
index 0000000..ef295eb
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/token_string.go
@@ -0,0 +1,70 @@
+// Code generated by "stringer -type token -linecomment tokens.go"; DO NOT EDIT.
+
+package syntax
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[_EOF-1]
+ _ = x[_Name-2]
+ _ = x[_Literal-3]
+ _ = x[_Operator-4]
+ _ = x[_AssignOp-5]
+ _ = x[_IncOp-6]
+ _ = x[_Assign-7]
+ _ = x[_Define-8]
+ _ = x[_Arrow-9]
+ _ = x[_Star-10]
+ _ = x[_Lparen-11]
+ _ = x[_Lbrack-12]
+ _ = x[_Lbrace-13]
+ _ = x[_Rparen-14]
+ _ = x[_Rbrack-15]
+ _ = x[_Rbrace-16]
+ _ = x[_Comma-17]
+ _ = x[_Semi-18]
+ _ = x[_Colon-19]
+ _ = x[_Dot-20]
+ _ = x[_DotDotDot-21]
+ _ = x[_Break-22]
+ _ = x[_Case-23]
+ _ = x[_Chan-24]
+ _ = x[_Const-25]
+ _ = x[_Continue-26]
+ _ = x[_Default-27]
+ _ = x[_Defer-28]
+ _ = x[_Else-29]
+ _ = x[_Fallthrough-30]
+ _ = x[_For-31]
+ _ = x[_Func-32]
+ _ = x[_Go-33]
+ _ = x[_Goto-34]
+ _ = x[_If-35]
+ _ = x[_Import-36]
+ _ = x[_Interface-37]
+ _ = x[_Map-38]
+ _ = x[_Package-39]
+ _ = x[_Range-40]
+ _ = x[_Return-41]
+ _ = x[_Select-42]
+ _ = x[_Struct-43]
+ _ = x[_Switch-44]
+ _ = x[_Type-45]
+ _ = x[_Var-46]
+ _ = x[tokenCount-47]
+}
+
+const _token_name = "EOFnameliteralopop=opop=:=<-*([{)]},;:....breakcasechanconstcontinuedefaultdeferelsefallthroughforfuncgogotoifimportinterfacemappackagerangereturnselectstructswitchtypevar"
+
+var _token_index = [...]uint8{0, 3, 7, 14, 16, 19, 23, 24, 26, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 42, 47, 51, 55, 60, 68, 75, 80, 84, 95, 98, 102, 104, 108, 110, 116, 125, 128, 135, 140, 146, 152, 158, 164, 168, 171, 171}
+
+func (i token) String() string {
+ i -= 1
+ if i >= token(len(_token_index)-1) {
+ return "token(" + strconv.FormatInt(int64(i+1), 10) + ")"
+ }
+ return _token_name[_token_index[i]:_token_index[i+1]]
+}
diff --git a/src/cmd/compile/internal/syntax/tokens.go b/src/cmd/compile/internal/syntax/tokens.go
new file mode 100644
index 0000000..60eae36
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/tokens.go
@@ -0,0 +1,157 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package syntax
+
+type token uint
+
+//go:generate stringer -type token -linecomment tokens.go
+
+const (
+ _ token = iota
+ _EOF // EOF
+
+ // names and literals
+ _Name // name
+ _Literal // literal
+
+ // operators and operations
+ // _Operator is excluding '*' (_Star)
+ _Operator // op
+ _AssignOp // op=
+ _IncOp // opop
+ _Assign // =
+ _Define // :=
+ _Arrow // <-
+ _Star // *
+
+ // delimiters
+ _Lparen // (
+ _Lbrack // [
+ _Lbrace // {
+ _Rparen // )
+ _Rbrack // ]
+ _Rbrace // }
+ _Comma // ,
+ _Semi // ;
+ _Colon // :
+ _Dot // .
+ _DotDotDot // ...
+
+ // keywords
+ _Break // break
+ _Case // case
+ _Chan // chan
+ _Const // const
+ _Continue // continue
+ _Default // default
+ _Defer // defer
+ _Else // else
+ _Fallthrough // fallthrough
+ _For // for
+ _Func // func
+ _Go // go
+ _Goto // goto
+ _If // if
+ _Import // import
+ _Interface // interface
+ _Map // map
+ _Package // package
+ _Range // range
+ _Return // return
+ _Select // select
+ _Struct // struct
+ _Switch // switch
+ _Type // type
+ _Var // var
+
+ // empty line comment to exclude it from .String
+ tokenCount //
+)
+
+const (
+ // for BranchStmt
+ Break = _Break
+ Continue = _Continue
+ Fallthrough = _Fallthrough
+ Goto = _Goto
+
+ // for CallStmt
+ Go = _Go
+ Defer = _Defer
+)
+
+// Make sure we have at most 64 tokens so we can use them in a set.
+const _ uint64 = 1 << (tokenCount - 1)
+
+// contains reports whether tok is in tokset.
+func contains(tokset uint64, tok token) bool {
+ return tokset&(1<<tok) != 0
+}
+
+type LitKind uint8
+
+// TODO(gri) With the 'i' (imaginary) suffix now permitted on integer
+// and floating-point numbers, having a single ImagLit does
+// not represent the literal kind well anymore. Remove it?
+const (
+ IntLit LitKind = iota
+ FloatLit
+ ImagLit
+ RuneLit
+ StringLit
+)
+
+type Operator uint
+
+//go:generate stringer -type Operator -linecomment tokens.go
+
+const (
+ _ Operator = iota
+
+ // Def is the : in :=
+ Def // :
+ Not // !
+ Recv // <-
+ Tilde // ~
+
+ // precOrOr
+ OrOr // ||
+
+ // precAndAnd
+ AndAnd // &&
+
+ // precCmp
+ Eql // ==
+ Neq // !=
+ Lss // <
+ Leq // <=
+ Gtr // >
+ Geq // >=
+
+ // precAdd
+ Add // +
+ Sub // -
+ Or // |
+ Xor // ^
+
+ // precMul
+ Mul // *
+ Div // /
+ Rem // %
+ And // &
+ AndNot // &^
+ Shl // <<
+ Shr // >>
+)
+
+// Operator precedences
+const (
+ _ = iota
+ precOrOr
+ precAndAnd
+ precCmp
+ precAdd
+ precMul
+)
diff --git a/src/cmd/compile/internal/syntax/walk.go b/src/cmd/compile/internal/syntax/walk.go
new file mode 100644
index 0000000..b025844
--- /dev/null
+++ b/src/cmd/compile/internal/syntax/walk.go
@@ -0,0 +1,362 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements syntax tree walking.
+
+package syntax
+
+import "fmt"
+
+// Inspect traverses an AST in pre-order: It starts by calling
+// f(node); node must not be nil. If f returns true, Inspect invokes f
+// recursively for each of the non-nil children of node, followed by a
+// call of f(nil).
+//
+// See Walk for caveats about shared nodes.
+func Inspect(root Node, f func(Node) bool) {
+ Walk(root, inspector(f))
+}
+
+type inspector func(Node) bool
+
+func (v inspector) Visit(node Node) Visitor {
+ if v(node) {
+ return v
+ }
+ return nil
+}
+
+// Crawl traverses a syntax in pre-order: It starts by calling f(root);
+// root must not be nil. If f returns false (== "continue"), Crawl calls
+// f recursively for each of the non-nil children of that node; if f
+// returns true (== "stop"), Crawl does not traverse the respective node's
+// children.
+//
+// See Walk for caveats about shared nodes.
+//
+// Deprecated: Use Inspect instead.
+func Crawl(root Node, f func(Node) bool) {
+ Inspect(root, func(node Node) bool {
+ return node != nil && !f(node)
+ })
+}
+
+// Walk traverses an AST in pre-order: It starts by calling
+// v.Visit(node); node must not be nil. If the visitor w returned by
+// v.Visit(node) is not nil, Walk is invoked recursively with visitor
+// w for each of the non-nil children of node, followed by a call of
+// w.Visit(nil).
+//
+// Some nodes may be shared among multiple parent nodes (e.g., types in
+// field lists such as type T in "a, b, c T"). Such shared nodes are
+// walked multiple times.
+// TODO(gri) Revisit this design. It may make sense to walk those nodes
+// only once. A place where this matters is types2.TestResolveIdents.
+func Walk(root Node, v Visitor) {
+ walker{v}.node(root)
+}
+
+// A Visitor's Visit method is invoked for each node encountered by Walk.
+// If the result visitor w is not nil, Walk visits each of the children
+// of node with the visitor w, followed by a call of w.Visit(nil).
+type Visitor interface {
+ Visit(node Node) (w Visitor)
+}
+
+type walker struct {
+ v Visitor
+}
+
+func (w walker) node(n Node) {
+ if n == nil {
+ panic("nil node")
+ }
+
+ w.v = w.v.Visit(n)
+ if w.v == nil {
+ return
+ }
+
+ switch n := n.(type) {
+ // packages
+ case *File:
+ w.node(n.PkgName)
+ w.declList(n.DeclList)
+
+ // declarations
+ case *ImportDecl:
+ if n.LocalPkgName != nil {
+ w.node(n.LocalPkgName)
+ }
+ w.node(n.Path)
+
+ case *ConstDecl:
+ w.nameList(n.NameList)
+ if n.Type != nil {
+ w.node(n.Type)
+ }
+ if n.Values != nil {
+ w.node(n.Values)
+ }
+
+ case *TypeDecl:
+ w.node(n.Name)
+ w.fieldList(n.TParamList)
+ w.node(n.Type)
+
+ case *VarDecl:
+ w.nameList(n.NameList)
+ if n.Type != nil {
+ w.node(n.Type)
+ }
+ if n.Values != nil {
+ w.node(n.Values)
+ }
+
+ case *FuncDecl:
+ if n.Recv != nil {
+ w.node(n.Recv)
+ }
+ w.node(n.Name)
+ w.fieldList(n.TParamList)
+ w.node(n.Type)
+ if n.Body != nil {
+ w.node(n.Body)
+ }
+
+ // expressions
+ case *BadExpr: // nothing to do
+ case *Name: // nothing to do
+ case *BasicLit: // nothing to do
+
+ case *CompositeLit:
+ if n.Type != nil {
+ w.node(n.Type)
+ }
+ w.exprList(n.ElemList)
+
+ case *KeyValueExpr:
+ w.node(n.Key)
+ w.node(n.Value)
+
+ case *FuncLit:
+ w.node(n.Type)
+ w.node(n.Body)
+
+ case *ParenExpr:
+ w.node(n.X)
+
+ case *SelectorExpr:
+ w.node(n.X)
+ w.node(n.Sel)
+
+ case *IndexExpr:
+ w.node(n.X)
+ w.node(n.Index)
+
+ case *SliceExpr:
+ w.node(n.X)
+ for _, x := range n.Index {
+ if x != nil {
+ w.node(x)
+ }
+ }
+
+ case *AssertExpr:
+ w.node(n.X)
+ w.node(n.Type)
+
+ case *TypeSwitchGuard:
+ if n.Lhs != nil {
+ w.node(n.Lhs)
+ }
+ w.node(n.X)
+
+ case *Operation:
+ w.node(n.X)
+ if n.Y != nil {
+ w.node(n.Y)
+ }
+
+ case *CallExpr:
+ w.node(n.Fun)
+ w.exprList(n.ArgList)
+
+ case *ListExpr:
+ w.exprList(n.ElemList)
+
+ // types
+ case *ArrayType:
+ if n.Len != nil {
+ w.node(n.Len)
+ }
+ w.node(n.Elem)
+
+ case *SliceType:
+ w.node(n.Elem)
+
+ case *DotsType:
+ w.node(n.Elem)
+
+ case *StructType:
+ w.fieldList(n.FieldList)
+ for _, t := range n.TagList {
+ if t != nil {
+ w.node(t)
+ }
+ }
+
+ case *Field:
+ if n.Name != nil {
+ w.node(n.Name)
+ }
+ w.node(n.Type)
+
+ case *InterfaceType:
+ w.fieldList(n.MethodList)
+
+ case *FuncType:
+ w.fieldList(n.ParamList)
+ w.fieldList(n.ResultList)
+
+ case *MapType:
+ w.node(n.Key)
+ w.node(n.Value)
+
+ case *ChanType:
+ w.node(n.Elem)
+
+ // statements
+ case *EmptyStmt: // nothing to do
+
+ case *LabeledStmt:
+ w.node(n.Label)
+ w.node(n.Stmt)
+
+ case *BlockStmt:
+ w.stmtList(n.List)
+
+ case *ExprStmt:
+ w.node(n.X)
+
+ case *SendStmt:
+ w.node(n.Chan)
+ w.node(n.Value)
+
+ case *DeclStmt:
+ w.declList(n.DeclList)
+
+ case *AssignStmt:
+ w.node(n.Lhs)
+ if n.Rhs != nil {
+ w.node(n.Rhs)
+ }
+
+ case *BranchStmt:
+ if n.Label != nil {
+ w.node(n.Label)
+ }
+ // Target points to nodes elsewhere in the syntax tree
+
+ case *CallStmt:
+ w.node(n.Call)
+
+ case *ReturnStmt:
+ if n.Results != nil {
+ w.node(n.Results)
+ }
+
+ case *IfStmt:
+ if n.Init != nil {
+ w.node(n.Init)
+ }
+ w.node(n.Cond)
+ w.node(n.Then)
+ if n.Else != nil {
+ w.node(n.Else)
+ }
+
+ case *ForStmt:
+ if n.Init != nil {
+ w.node(n.Init)
+ }
+ if n.Cond != nil {
+ w.node(n.Cond)
+ }
+ if n.Post != nil {
+ w.node(n.Post)
+ }
+ w.node(n.Body)
+
+ case *SwitchStmt:
+ if n.Init != nil {
+ w.node(n.Init)
+ }
+ if n.Tag != nil {
+ w.node(n.Tag)
+ }
+ for _, s := range n.Body {
+ w.node(s)
+ }
+
+ case *SelectStmt:
+ for _, s := range n.Body {
+ w.node(s)
+ }
+
+ // helper nodes
+ case *RangeClause:
+ if n.Lhs != nil {
+ w.node(n.Lhs)
+ }
+ w.node(n.X)
+
+ case *CaseClause:
+ if n.Cases != nil {
+ w.node(n.Cases)
+ }
+ w.stmtList(n.Body)
+
+ case *CommClause:
+ if n.Comm != nil {
+ w.node(n.Comm)
+ }
+ w.stmtList(n.Body)
+
+ default:
+ panic(fmt.Sprintf("internal error: unknown node type %T", n))
+ }
+
+ w.v.Visit(nil)
+}
+
+func (w walker) declList(list []Decl) {
+ for _, n := range list {
+ w.node(n)
+ }
+}
+
+func (w walker) exprList(list []Expr) {
+ for _, n := range list {
+ w.node(n)
+ }
+}
+
+func (w walker) stmtList(list []Stmt) {
+ for _, n := range list {
+ w.node(n)
+ }
+}
+
+func (w walker) nameList(list []*Name) {
+ for _, n := range list {
+ w.node(n)
+ }
+}
+
+func (w walker) fieldList(list []*Field) {
+ for _, n := range list {
+ w.node(n)
+ }
+}
diff --git a/src/cmd/compile/internal/test/README b/src/cmd/compile/internal/test/README
new file mode 100644
index 0000000..242ff79
--- /dev/null
+++ b/src/cmd/compile/internal/test/README
@@ -0,0 +1,4 @@
+This directory holds small tests and benchmarks of code
+generated by the compiler. This code is not for importing,
+and the tests are intended to verify that specific optimzations
+are applied and correct.
diff --git a/src/cmd/compile/internal/test/abiutils_test.go b/src/cmd/compile/internal/test/abiutils_test.go
new file mode 100644
index 0000000..12b4a0c
--- /dev/null
+++ b/src/cmd/compile/internal/test/abiutils_test.go
@@ -0,0 +1,399 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package test
+
+import (
+ "bufio"
+ "cmd/compile/internal/abi"
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ssagen"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/obj/x86"
+ "cmd/internal/src"
+ "fmt"
+ "os"
+ "testing"
+)
+
+// AMD64 registers available:
+// - integer: RAX, RBX, RCX, RDI, RSI, R8, R9, r10, R11
+// - floating point: X0 - X14
+var configAMD64 = abi.NewABIConfig(9, 15, 0)
+
+func TestMain(m *testing.M) {
+ ssagen.Arch.LinkArch = &x86.Linkamd64
+ ssagen.Arch.REGSP = x86.REGSP
+ ssagen.Arch.MAXWIDTH = 1 << 50
+ types.MaxWidth = ssagen.Arch.MAXWIDTH
+ base.Ctxt = obj.Linknew(ssagen.Arch.LinkArch)
+ base.Ctxt.DiagFunc = base.Errorf
+ base.Ctxt.DiagFlush = base.FlushErrors
+ base.Ctxt.Bso = bufio.NewWriter(os.Stdout)
+ types.LocalPkg = types.NewPkg("", "local")
+ types.LocalPkg.Prefix = `""`
+ types.PtrSize = ssagen.Arch.LinkArch.PtrSize
+ types.RegSize = ssagen.Arch.LinkArch.RegSize
+ typecheck.InitUniverse()
+ os.Exit(m.Run())
+}
+
+func TestABIUtilsBasic1(t *testing.T) {
+
+ // func(x int32) int32
+ i32 := types.Types[types.TINT32]
+ ft := mkFuncType(nil, []*types.Type{i32}, []*types.Type{i32})
+
+ // expected results
+ exp := makeExpectedDump(`
+ IN 0: R{ I0 } spilloffset: 0 typ: int32
+ OUT 0: R{ I0 } spilloffset: -1 typ: int32
+ offsetToSpillArea: 0 spillAreaSize: 8
+`)
+
+ abitest(t, ft, exp)
+}
+
+func TestABIUtilsBasic2(t *testing.T) {
+ // func(p1 int8, p2 int16, p3 int32, p4 int64,
+ // p5 float32, p6 float32, p7 float64, p8 float64,
+ // p9 int8, p10 int16, p11 int32, p12 int64,
+ // p13 float32, p14 float32, p15 float64, p16 float64,
+ // p17 complex128, p18 complex128, p19 complex12, p20 complex128,
+ // p21 complex64, p22 int8, p23 in16, p24 int32, p25 int64,
+ // p26 int8, p27 in16, p28 int32, p29 int64)
+ // (r1 int32, r2 float64, r3 float64) {
+ i8 := types.Types[types.TINT8]
+ i16 := types.Types[types.TINT16]
+ i32 := types.Types[types.TINT32]
+ i64 := types.Types[types.TINT64]
+ f32 := types.Types[types.TFLOAT32]
+ f64 := types.Types[types.TFLOAT64]
+ c64 := types.Types[types.TCOMPLEX64]
+ c128 := types.Types[types.TCOMPLEX128]
+ ft := mkFuncType(nil,
+ []*types.Type{
+ i8, i16, i32, i64,
+ f32, f32, f64, f64,
+ i8, i16, i32, i64,
+ f32, f32, f64, f64,
+ c128, c128, c128, c128, c64,
+ i8, i16, i32, i64,
+ i8, i16, i32, i64},
+ []*types.Type{i32, f64, f64})
+ exp := makeExpectedDump(`
+ IN 0: R{ I0 } spilloffset: 0 typ: int8
+ IN 1: R{ I1 } spilloffset: 2 typ: int16
+ IN 2: R{ I2 } spilloffset: 4 typ: int32
+ IN 3: R{ I3 } spilloffset: 8 typ: int64
+ IN 4: R{ F0 } spilloffset: 16 typ: float32
+ IN 5: R{ F1 } spilloffset: 20 typ: float32
+ IN 6: R{ F2 } spilloffset: 24 typ: float64
+ IN 7: R{ F3 } spilloffset: 32 typ: float64
+ IN 8: R{ I4 } spilloffset: 40 typ: int8
+ IN 9: R{ I5 } spilloffset: 42 typ: int16
+ IN 10: R{ I6 } spilloffset: 44 typ: int32
+ IN 11: R{ I7 } spilloffset: 48 typ: int64
+ IN 12: R{ F4 } spilloffset: 56 typ: float32
+ IN 13: R{ F5 } spilloffset: 60 typ: float32
+ IN 14: R{ F6 } spilloffset: 64 typ: float64
+ IN 15: R{ F7 } spilloffset: 72 typ: float64
+ IN 16: R{ F8 F9 } spilloffset: 80 typ: complex128
+ IN 17: R{ F10 F11 } spilloffset: 96 typ: complex128
+ IN 18: R{ F12 F13 } spilloffset: 112 typ: complex128
+ IN 19: R{ } offset: 0 typ: complex128
+ IN 20: R{ } offset: 16 typ: complex64
+ IN 21: R{ I8 } spilloffset: 128 typ: int8
+ IN 22: R{ } offset: 24 typ: int16
+ IN 23: R{ } offset: 28 typ: int32
+ IN 24: R{ } offset: 32 typ: int64
+ IN 25: R{ } offset: 40 typ: int8
+ IN 26: R{ } offset: 42 typ: int16
+ IN 27: R{ } offset: 44 typ: int32
+ IN 28: R{ } offset: 48 typ: int64
+ OUT 0: R{ I0 } spilloffset: -1 typ: int32
+ OUT 1: R{ F0 } spilloffset: -1 typ: float64
+ OUT 2: R{ F1 } spilloffset: -1 typ: float64
+ offsetToSpillArea: 56 spillAreaSize: 136
+`)
+
+ abitest(t, ft, exp)
+}
+
+func TestABIUtilsArrays(t *testing.T) {
+ // func(p1 [1]int32, p2 [0]int32, p3 [1][1]int32, p4 [2]int32)
+ // (r1 [2]int32, r2 [1]int32, r3 [0]int32, r4 [1][1]int32) {
+ i32 := types.Types[types.TINT32]
+ ae := types.NewArray(i32, 0)
+ a1 := types.NewArray(i32, 1)
+ a2 := types.NewArray(i32, 2)
+ aa1 := types.NewArray(a1, 1)
+ ft := mkFuncType(nil, []*types.Type{a1, ae, aa1, a2},
+ []*types.Type{a2, a1, ae, aa1})
+
+ exp := makeExpectedDump(`
+ IN 0: R{ I0 } spilloffset: 0 typ: [1]int32
+ IN 1: R{ } offset: 0 typ: [0]int32
+ IN 2: R{ I1 } spilloffset: 4 typ: [1][1]int32
+ IN 3: R{ } offset: 0 typ: [2]int32
+ OUT 0: R{ } offset: 8 typ: [2]int32
+ OUT 1: R{ I0 } spilloffset: -1 typ: [1]int32
+ OUT 2: R{ } offset: 16 typ: [0]int32
+ OUT 3: R{ I1 } spilloffset: -1 typ: [1][1]int32
+ offsetToSpillArea: 16 spillAreaSize: 8
+`)
+
+ abitest(t, ft, exp)
+}
+
+func TestABIUtilsStruct1(t *testing.T) {
+ // type s struct { f1 int8; f2 int8; f3 struct {}; f4 int8; f5 int16) }
+ // func(p1 int6, p2 s, p3 int64)
+ // (r1 s, r2 int8, r3 int32) {
+ i8 := types.Types[types.TINT8]
+ i16 := types.Types[types.TINT16]
+ i32 := types.Types[types.TINT32]
+ i64 := types.Types[types.TINT64]
+ s := mkstruct([]*types.Type{i8, i8, mkstruct([]*types.Type{}), i8, i16})
+ ft := mkFuncType(nil, []*types.Type{i8, s, i64},
+ []*types.Type{s, i8, i32})
+
+ exp := makeExpectedDump(`
+ IN 0: R{ I0 } spilloffset: 0 typ: int8
+ IN 1: R{ I1 I2 I3 I4 } spilloffset: 2 typ: struct { int8; int8; struct {}; int8; int16 }
+ IN 2: R{ I5 } spilloffset: 8 typ: int64
+ OUT 0: R{ I0 I1 I2 I3 } spilloffset: -1 typ: struct { int8; int8; struct {}; int8; int16 }
+ OUT 1: R{ I4 } spilloffset: -1 typ: int8
+ OUT 2: R{ I5 } spilloffset: -1 typ: int32
+ offsetToSpillArea: 0 spillAreaSize: 16
+`)
+
+ abitest(t, ft, exp)
+}
+
+func TestABIUtilsStruct2(t *testing.T) {
+ // type s struct { f1 int64; f2 struct { } }
+ // type fs struct { f1 float64; f2 s; f3 struct { } }
+ // func(p1 s, p2 s, p3 fs)
+ // (r1 fs, r2 fs)
+ f64 := types.Types[types.TFLOAT64]
+ i64 := types.Types[types.TINT64]
+ s := mkstruct([]*types.Type{i64, mkstruct([]*types.Type{})})
+ fs := mkstruct([]*types.Type{f64, s, mkstruct([]*types.Type{})})
+ ft := mkFuncType(nil, []*types.Type{s, s, fs},
+ []*types.Type{fs, fs})
+
+ exp := makeExpectedDump(`
+ IN 0: R{ I0 } spilloffset: 0 typ: struct { int64; struct {} }
+ IN 1: R{ I1 } spilloffset: 16 typ: struct { int64; struct {} }
+ IN 2: R{ F0 I2 } spilloffset: 32 typ: struct { float64; struct { int64; struct {} }; struct {} }
+ OUT 0: R{ F0 I0 } spilloffset: -1 typ: struct { float64; struct { int64; struct {} }; struct {} }
+ OUT 1: R{ F1 I1 } spilloffset: -1 typ: struct { float64; struct { int64; struct {} }; struct {} }
+ offsetToSpillArea: 0 spillAreaSize: 64
+`)
+
+ abitest(t, ft, exp)
+}
+
+// TestABIUtilsEmptyFieldAtEndOfStruct is testing to make sure
+// the abi code is doing the right thing for struct types that have
+// a trailing zero-sized field (where the we need to add padding).
+func TestABIUtilsEmptyFieldAtEndOfStruct(t *testing.T) {
+ // type s struct { f1 [2]int64; f2 struct { } }
+ // type s2 struct { f1 [3]int16; f2 struct { } }
+ // type fs struct { f1 float64; f s; f3 struct { } }
+ // func(p1 s, p2 s, p3 fs) (r1 fs, r2 fs)
+ f64 := types.Types[types.TFLOAT64]
+ i64 := types.Types[types.TINT64]
+ i16 := types.Types[types.TINT16]
+ tb := types.Types[types.TBOOL]
+ ab2 := types.NewArray(tb, 2)
+ a2 := types.NewArray(i64, 2)
+ a3 := types.NewArray(i16, 3)
+ s := mkstruct([]*types.Type{a2, mkstruct([]*types.Type{})})
+ s2 := mkstruct([]*types.Type{a3, mkstruct([]*types.Type{})})
+ fs := mkstruct([]*types.Type{f64, s, mkstruct([]*types.Type{})})
+ ft := mkFuncType(nil, []*types.Type{s, ab2, s2, fs, fs},
+ []*types.Type{fs, ab2, fs})
+
+ exp := makeExpectedDump(`
+ IN 0: R{ } offset: 0 typ: struct { [2]int64; struct {} }
+ IN 1: R{ } offset: 24 typ: [2]bool
+ IN 2: R{ } offset: 26 typ: struct { [3]int16; struct {} }
+ IN 3: R{ } offset: 40 typ: struct { float64; struct { [2]int64; struct {} }; struct {} }
+ IN 4: R{ } offset: 80 typ: struct { float64; struct { [2]int64; struct {} }; struct {} }
+ OUT 0: R{ } offset: 120 typ: struct { float64; struct { [2]int64; struct {} }; struct {} }
+ OUT 1: R{ } offset: 160 typ: [2]bool
+ OUT 2: R{ } offset: 168 typ: struct { float64; struct { [2]int64; struct {} }; struct {} }
+ offsetToSpillArea: 208 spillAreaSize: 0
+`)
+
+ abitest(t, ft, exp)
+
+ // Check to make sure that NumParamRegs yields 2 and not 3
+ // for struct "s" (e.g. that it handles the padding properly).
+ nps := configAMD64.NumParamRegs(s)
+ if nps != 2 {
+ t.Errorf("NumParams(%v) returned %d expected %d\n",
+ s, nps, 2)
+ }
+}
+
+func TestABIUtilsSliceString(t *testing.T) {
+ // func(p1 []int32, p2 int8, p3 []int32, p4 int8, p5 string,
+ // p6 int64, p6 []intr32) (r1 string, r2 int64, r3 string, r4 []int32)
+ i32 := types.Types[types.TINT32]
+ sli32 := types.NewSlice(i32)
+ str := types.Types[types.TSTRING]
+ i8 := types.Types[types.TINT8]
+ i64 := types.Types[types.TINT64]
+ ft := mkFuncType(nil, []*types.Type{sli32, i8, sli32, i8, str, i8, i64, sli32},
+ []*types.Type{str, i64, str, sli32})
+
+ exp := makeExpectedDump(`
+ IN 0: R{ I0 I1 I2 } spilloffset: 0 typ: []int32
+ IN 1: R{ I3 } spilloffset: 24 typ: int8
+ IN 2: R{ I4 I5 I6 } spilloffset: 32 typ: []int32
+ IN 3: R{ I7 } spilloffset: 56 typ: int8
+ IN 4: R{ } offset: 0 typ: string
+ IN 5: R{ I8 } spilloffset: 57 typ: int8
+ IN 6: R{ } offset: 16 typ: int64
+ IN 7: R{ } offset: 24 typ: []int32
+ OUT 0: R{ I0 I1 } spilloffset: -1 typ: string
+ OUT 1: R{ I2 } spilloffset: -1 typ: int64
+ OUT 2: R{ I3 I4 } spilloffset: -1 typ: string
+ OUT 3: R{ I5 I6 I7 } spilloffset: -1 typ: []int32
+ offsetToSpillArea: 48 spillAreaSize: 64
+`)
+
+ abitest(t, ft, exp)
+}
+
+func TestABIUtilsMethod(t *testing.T) {
+ // type s1 struct { f1 int16; f2 int16; f3 int16 }
+ // func(p1 *s1, p2 [7]*s1, p3 float64, p4 int16, p5 int16, p6 int16)
+ // (r1 [7]*s1, r2 float64, r3 int64)
+ i16 := types.Types[types.TINT16]
+ i64 := types.Types[types.TINT64]
+ f64 := types.Types[types.TFLOAT64]
+ s1 := mkstruct([]*types.Type{i16, i16, i16})
+ ps1 := types.NewPtr(s1)
+ a7 := types.NewArray(ps1, 7)
+ ft := mkFuncType(s1, []*types.Type{ps1, a7, f64, i16, i16, i16},
+ []*types.Type{a7, f64, i64})
+
+ exp := makeExpectedDump(`
+ IN 0: R{ I0 I1 I2 } spilloffset: 0 typ: struct { int16; int16; int16 }
+ IN 1: R{ I3 } spilloffset: 8 typ: *struct { int16; int16; int16 }
+ IN 2: R{ } offset: 0 typ: [7]*struct { int16; int16; int16 }
+ IN 3: R{ F0 } spilloffset: 16 typ: float64
+ IN 4: R{ I4 } spilloffset: 24 typ: int16
+ IN 5: R{ I5 } spilloffset: 26 typ: int16
+ IN 6: R{ I6 } spilloffset: 28 typ: int16
+ OUT 0: R{ } offset: 56 typ: [7]*struct { int16; int16; int16 }
+ OUT 1: R{ F0 } spilloffset: -1 typ: float64
+ OUT 2: R{ I0 } spilloffset: -1 typ: int64
+ offsetToSpillArea: 112 spillAreaSize: 32
+`)
+
+ abitest(t, ft, exp)
+}
+
+func TestABIUtilsInterfaces(t *testing.T) {
+ // type s1 { f1 int16; f2 int16; f3 bool)
+ // type nei interface { ...() string }
+ // func(p1 s1, p2 interface{}, p3 interface{}, p4 nei,
+ // p5 *interface{}, p6 nei, p7 int64)
+ // (r1 interface{}, r2 nei, r3 bool)
+ ei := types.Types[types.TINTER] // interface{}
+ pei := types.NewPtr(ei) // *interface{}
+ fldt := mkFuncType(types.FakeRecvType(), []*types.Type{},
+ []*types.Type{types.Types[types.TSTRING]})
+ field := types.NewField(src.NoXPos, typecheck.Lookup("F"), fldt)
+ nei := types.NewInterface(types.LocalPkg, []*types.Field{field}, false)
+ i16 := types.Types[types.TINT16]
+ tb := types.Types[types.TBOOL]
+ s1 := mkstruct([]*types.Type{i16, i16, tb})
+ ft := mkFuncType(nil, []*types.Type{s1, ei, ei, nei, pei, nei, i16},
+ []*types.Type{ei, nei, pei})
+
+ exp := makeExpectedDump(`
+ IN 0: R{ I0 I1 I2 } spilloffset: 0 typ: struct { int16; int16; bool }
+ IN 1: R{ I3 I4 } spilloffset: 8 typ: interface {}
+ IN 2: R{ I5 I6 } spilloffset: 24 typ: interface {}
+ IN 3: R{ I7 I8 } spilloffset: 40 typ: interface { F() string }
+ IN 4: R{ } offset: 0 typ: *interface {}
+ IN 5: R{ } offset: 8 typ: interface { F() string }
+ IN 6: R{ } offset: 24 typ: int16
+ OUT 0: R{ I0 I1 } spilloffset: -1 typ: interface {}
+ OUT 1: R{ I2 I3 } spilloffset: -1 typ: interface { F() string }
+ OUT 2: R{ I4 } spilloffset: -1 typ: *interface {}
+ offsetToSpillArea: 32 spillAreaSize: 56
+`)
+
+ abitest(t, ft, exp)
+}
+
+func TestABINumParamRegs(t *testing.T) {
+ i8 := types.Types[types.TINT8]
+ i16 := types.Types[types.TINT16]
+ i32 := types.Types[types.TINT32]
+ i64 := types.Types[types.TINT64]
+ f32 := types.Types[types.TFLOAT32]
+ f64 := types.Types[types.TFLOAT64]
+ c64 := types.Types[types.TCOMPLEX64]
+ c128 := types.Types[types.TCOMPLEX128]
+
+ s := mkstruct([]*types.Type{i8, i8, mkstruct([]*types.Type{}), i8, i16})
+ a := types.NewArray(s, 3)
+
+ nrtest(t, i8, 1)
+ nrtest(t, i16, 1)
+ nrtest(t, i32, 1)
+ nrtest(t, i64, 1)
+ nrtest(t, f32, 1)
+ nrtest(t, f64, 1)
+ nrtest(t, c64, 2)
+ nrtest(t, c128, 2)
+ nrtest(t, s, 4)
+ nrtest(t, a, 12)
+
+}
+
+func TestABIUtilsComputePadding(t *testing.T) {
+ // type s1 { f1 int8; f2 int16; f3 struct{}; f4 int32; f5 int64 }
+ i8 := types.Types[types.TINT8]
+ i16 := types.Types[types.TINT16]
+ i32 := types.Types[types.TINT32]
+ i64 := types.Types[types.TINT64]
+ emptys := mkstruct([]*types.Type{})
+ s1 := mkstruct([]*types.Type{i8, i16, emptys, i32, i64})
+ // func (p1 int32, p2 s1, p3 emptys, p4 [1]int32)
+ a1 := types.NewArray(i32, 1)
+ ft := mkFuncType(nil, []*types.Type{i32, s1, emptys, a1}, []*types.Type{})
+
+ // Run abitest() just to document what we're expected to see.
+ exp := makeExpectedDump(`
+ IN 0: R{ I0 } spilloffset: 0 typ: int32
+ IN 1: R{ I1 I2 I3 I4 } spilloffset: 8 typ: struct { int8; int16; struct {}; int32; int64 }
+ IN 2: R{ } offset: 0 typ: struct {}
+ IN 3: R{ I5 } spilloffset: 24 typ: [1]int32
+ offsetToSpillArea: 0 spillAreaSize: 32
+`)
+ abitest(t, ft, exp)
+
+ // Analyze with full set of registers, then call ComputePadding
+ // on the second param, verifying the results.
+ regRes := configAMD64.ABIAnalyze(ft, false)
+ padding := make([]uint64, 32)
+ parm := regRes.InParams()[1]
+ padding = parm.ComputePadding(padding)
+ want := "[1 1 1 0]"
+ got := fmt.Sprintf("%+v", padding)
+ if got != want {
+ t.Errorf("padding mismatch: wanted %q got %q\n", got, want)
+ }
+}
diff --git a/src/cmd/compile/internal/test/abiutilsaux_test.go b/src/cmd/compile/internal/test/abiutilsaux_test.go
new file mode 100644
index 0000000..b945633
--- /dev/null
+++ b/src/cmd/compile/internal/test/abiutilsaux_test.go
@@ -0,0 +1,132 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package test
+
+// This file contains utility routines and harness infrastructure used
+// by the ABI tests in "abiutils_test.go".
+
+import (
+ "cmd/compile/internal/abi"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+ "fmt"
+ "strings"
+ "testing"
+ "text/scanner"
+)
+
+func mkParamResultField(t *types.Type, s *types.Sym, which ir.Class) *types.Field {
+ field := types.NewField(src.NoXPos, s, t)
+ n := typecheck.NewName(s)
+ n.Class = which
+ field.Nname = n
+ n.SetType(t)
+ return field
+}
+
+// mkstruct is a helper routine to create a struct type with fields
+// of the types specified in 'fieldtypes'.
+func mkstruct(fieldtypes []*types.Type) *types.Type {
+ fields := make([]*types.Field, len(fieldtypes))
+ for k, t := range fieldtypes {
+ if t == nil {
+ panic("bad -- field has no type")
+ }
+ f := types.NewField(src.NoXPos, nil, t)
+ fields[k] = f
+ }
+ s := types.NewStruct(types.LocalPkg, fields)
+ return s
+}
+
+func mkFuncType(rcvr *types.Type, ins []*types.Type, outs []*types.Type) *types.Type {
+ q := typecheck.Lookup("?")
+ inf := []*types.Field{}
+ for _, it := range ins {
+ inf = append(inf, mkParamResultField(it, q, ir.PPARAM))
+ }
+ outf := []*types.Field{}
+ for _, ot := range outs {
+ outf = append(outf, mkParamResultField(ot, q, ir.PPARAMOUT))
+ }
+ var rf *types.Field
+ if rcvr != nil {
+ rf = mkParamResultField(rcvr, q, ir.PPARAM)
+ }
+ return types.NewSignature(types.LocalPkg, rf, nil, inf, outf)
+}
+
+type expectedDump struct {
+ dump string
+ file string
+ line int
+}
+
+func tokenize(src string) []string {
+ var s scanner.Scanner
+ s.Init(strings.NewReader(src))
+ res := []string{}
+ for tok := s.Scan(); tok != scanner.EOF; tok = s.Scan() {
+ res = append(res, s.TokenText())
+ }
+ return res
+}
+
+func verifyParamResultOffset(t *testing.T, f *types.Field, r abi.ABIParamAssignment, which string, idx int) int {
+ n := ir.AsNode(f.Nname).(*ir.Name)
+ if n.FrameOffset() != int64(r.Offset()) {
+ t.Errorf("%s %d: got offset %d wanted %d t=%v",
+ which, idx, r.Offset(), n.Offset_, f.Type)
+ return 1
+ }
+ return 0
+}
+
+func makeExpectedDump(e string) expectedDump {
+ return expectedDump{dump: e}
+}
+
+func difftokens(atoks []string, etoks []string) string {
+ if len(atoks) != len(etoks) {
+ return fmt.Sprintf("expected %d tokens got %d",
+ len(etoks), len(atoks))
+ }
+ for i := 0; i < len(etoks); i++ {
+ if etoks[i] == atoks[i] {
+ continue
+ }
+
+ return fmt.Sprintf("diff at token %d: expected %q got %q",
+ i, etoks[i], atoks[i])
+ }
+ return ""
+}
+
+func nrtest(t *testing.T, ft *types.Type, expected int) {
+ types.CalcSize(ft)
+ got := configAMD64.NumParamRegs(ft)
+ if got != expected {
+ t.Errorf("]\nexpected num regs = %d, got %d, type %v", expected, got, ft)
+ }
+}
+
+func abitest(t *testing.T, ft *types.Type, exp expectedDump) {
+
+ types.CalcSize(ft)
+
+ // Analyze with full set of registers.
+ regRes := configAMD64.ABIAnalyze(ft, false)
+ regResString := strings.TrimSpace(regRes.String())
+
+ // Check results.
+ reason := difftokens(tokenize(regResString), tokenize(exp.dump))
+ if reason != "" {
+ t.Errorf("\nexpected:\n%s\ngot:\n%s\nreason: %s",
+ strings.TrimSpace(exp.dump), regResString, reason)
+ }
+
+}
diff --git a/src/cmd/compile/internal/test/align_test.go b/src/cmd/compile/internal/test/align_test.go
new file mode 100644
index 0000000..32afc92
--- /dev/null
+++ b/src/cmd/compile/internal/test/align_test.go
@@ -0,0 +1,96 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Test to make sure that equality functions (and hash
+// functions) don't do unaligned reads on architectures
+// that can't do unaligned reads. See issue 46283.
+
+package test
+
+import "testing"
+
+type T1 struct {
+ x float32
+ a, b, c, d int16 // memequal64
+}
+type T2 struct {
+ x float32
+ a, b, c, d int32 // memequal128
+}
+
+type A2 [2]byte // eq uses a 2-byte load
+type A4 [4]byte // eq uses a 4-byte load
+type A8 [8]byte // eq uses an 8-byte load
+
+//go:noinline
+func cmpT1(p, q *T1) {
+ if *p != *q {
+ panic("comparison test wrong")
+ }
+}
+
+//go:noinline
+func cmpT2(p, q *T2) {
+ if *p != *q {
+ panic("comparison test wrong")
+ }
+}
+
+//go:noinline
+func cmpA2(p, q *A2) {
+ if *p != *q {
+ panic("comparison test wrong")
+ }
+}
+
+//go:noinline
+func cmpA4(p, q *A4) {
+ if *p != *q {
+ panic("comparison test wrong")
+ }
+}
+
+//go:noinline
+func cmpA8(p, q *A8) {
+ if *p != *q {
+ panic("comparison test wrong")
+ }
+}
+
+func TestAlignEqual(t *testing.T) {
+ cmpT1(&T1{}, &T1{})
+ cmpT2(&T2{}, &T2{})
+
+ m1 := map[T1]bool{}
+ m1[T1{}] = true
+ m1[T1{}] = false
+ if len(m1) != 1 {
+ t.Fatalf("len(m1)=%d, want 1", len(m1))
+ }
+ m2 := map[T2]bool{}
+ m2[T2{}] = true
+ m2[T2{}] = false
+ if len(m2) != 1 {
+ t.Fatalf("len(m2)=%d, want 1", len(m2))
+ }
+
+ type X2 struct {
+ y byte
+ z A2
+ }
+ var x2 X2
+ cmpA2(&x2.z, &A2{})
+ type X4 struct {
+ y byte
+ z A4
+ }
+ var x4 X4
+ cmpA4(&x4.z, &A4{})
+ type X8 struct {
+ y byte
+ z A8
+ }
+ var x8 X8
+ cmpA8(&x8.z, &A8{})
+}
diff --git a/src/cmd/compile/internal/test/bench_test.go b/src/cmd/compile/internal/test/bench_test.go
new file mode 100644
index 0000000..4724600
--- /dev/null
+++ b/src/cmd/compile/internal/test/bench_test.go
@@ -0,0 +1,124 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package test
+
+import "testing"
+
+var globl int64
+var globl32 int32
+
+func BenchmarkLoadAdd(b *testing.B) {
+ x := make([]int64, 1024)
+ y := make([]int64, 1024)
+ for i := 0; i < b.N; i++ {
+ var s int64
+ for i := range x {
+ s ^= x[i] + y[i]
+ }
+ globl = s
+ }
+}
+
+// Added for ppc64 extswsli on power9
+func BenchmarkExtShift(b *testing.B) {
+ x := make([]int32, 1024)
+ for i := 0; i < b.N; i++ {
+ var s int64
+ for i := range x {
+ s ^= int64(x[i]+32) * 8
+ }
+ globl = s
+ }
+}
+
+func BenchmarkModify(b *testing.B) {
+ a := make([]int64, 1024)
+ v := globl
+ for i := 0; i < b.N; i++ {
+ for j := range a {
+ a[j] += v
+ }
+ }
+}
+
+func BenchmarkMullImm(b *testing.B) {
+ x := make([]int32, 1024)
+ for i := 0; i < b.N; i++ {
+ var s int32
+ for i := range x {
+ s += x[i] * 100
+ }
+ globl32 = s
+ }
+}
+
+func BenchmarkConstModify(b *testing.B) {
+ a := make([]int64, 1024)
+ for i := 0; i < b.N; i++ {
+ for j := range a {
+ a[j] += 3
+ }
+ }
+}
+
+func BenchmarkBitSet(b *testing.B) {
+ const N = 64 * 8
+ a := make([]uint64, N/64)
+ for i := 0; i < b.N; i++ {
+ for j := uint64(0); j < N; j++ {
+ a[j/64] |= 1 << (j % 64)
+ }
+ }
+}
+
+func BenchmarkBitClear(b *testing.B) {
+ const N = 64 * 8
+ a := make([]uint64, N/64)
+ for i := 0; i < b.N; i++ {
+ for j := uint64(0); j < N; j++ {
+ a[j/64] &^= 1 << (j % 64)
+ }
+ }
+}
+
+func BenchmarkBitToggle(b *testing.B) {
+ const N = 64 * 8
+ a := make([]uint64, N/64)
+ for i := 0; i < b.N; i++ {
+ for j := uint64(0); j < N; j++ {
+ a[j/64] ^= 1 << (j % 64)
+ }
+ }
+}
+
+func BenchmarkBitSetConst(b *testing.B) {
+ const N = 64
+ a := make([]uint64, N)
+ for i := 0; i < b.N; i++ {
+ for j := range a {
+ a[j] |= 1 << 37
+ }
+ }
+}
+
+func BenchmarkBitClearConst(b *testing.B) {
+ const N = 64
+ a := make([]uint64, N)
+ for i := 0; i < b.N; i++ {
+ for j := range a {
+ a[j] &^= 1 << 37
+ }
+ }
+}
+
+func BenchmarkBitToggleConst(b *testing.B) {
+ const N = 64
+ a := make([]uint64, N)
+ for i := 0; i < b.N; i++ {
+ for j := range a {
+ a[j] ^= 1 << 37
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/test/clobberdead_test.go b/src/cmd/compile/internal/test/clobberdead_test.go
new file mode 100644
index 0000000..88b7d34
--- /dev/null
+++ b/src/cmd/compile/internal/test/clobberdead_test.go
@@ -0,0 +1,55 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package test
+
+import (
+ "internal/testenv"
+ "io/ioutil"
+ "os/exec"
+ "path/filepath"
+ "testing"
+)
+
+const helloSrc = `
+package main
+import "fmt"
+func main() { fmt.Println("hello") }
+`
+
+func TestClobberDead(t *testing.T) {
+ // Test that clobberdead mode generates correct program.
+ runHello(t, "-clobberdead")
+}
+
+func TestClobberDeadReg(t *testing.T) {
+ // Test that clobberdeadreg mode generates correct program.
+ runHello(t, "-clobberdeadreg")
+}
+
+func runHello(t *testing.T, flag string) {
+ if testing.Short() {
+ // This test rebuilds the runtime with a special flag, which
+ // takes a while.
+ t.Skip("skip in short mode")
+ }
+ testenv.MustHaveGoRun(t)
+ t.Parallel()
+
+ tmpdir := t.TempDir()
+ src := filepath.Join(tmpdir, "x.go")
+ err := ioutil.WriteFile(src, []byte(helloSrc), 0644)
+ if err != nil {
+ t.Fatalf("write file failed: %v", err)
+ }
+
+ cmd := exec.Command(testenv.GoToolPath(t), "run", "-gcflags=all="+flag, src)
+ out, err := cmd.CombinedOutput()
+ if err != nil {
+ t.Fatalf("go run failed: %v\n%s", err, out)
+ }
+ if string(out) != "hello\n" {
+ t.Errorf("wrong output: got %q, want %q", out, "hello\n")
+ }
+}
diff --git a/src/cmd/compile/internal/test/constFold_test.go b/src/cmd/compile/internal/test/constFold_test.go
new file mode 100644
index 0000000..7159f0e
--- /dev/null
+++ b/src/cmd/compile/internal/test/constFold_test.go
@@ -0,0 +1,18111 @@
+// run
+// Code generated by gen/constFoldGen.go. DO NOT EDIT.
+
+package test
+
+import "testing"
+
+func TestConstFolduint64add(t *testing.T) {
+ var x, y, r uint64
+ x = 0
+ y = 0
+ r = x + y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != 1 {
+ t.Errorf("0 %s 1 = %d, want 1", "+", r)
+ }
+ y = 4294967296
+ r = x + y
+ if r != 4294967296 {
+ t.Errorf("0 %s 4294967296 = %d, want 4294967296", "+", r)
+ }
+ y = 18446744073709551615
+ r = x + y
+ if r != 18446744073709551615 {
+ t.Errorf("0 %s 18446744073709551615 = %d, want 18446744073709551615", "+", r)
+ }
+ x = 1
+ y = 0
+ r = x + y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "+", r)
+ }
+ y = 4294967296
+ r = x + y
+ if r != 4294967297 {
+ t.Errorf("1 %s 4294967296 = %d, want 4294967297", "+", r)
+ }
+ y = 18446744073709551615
+ r = x + y
+ if r != 0 {
+ t.Errorf("1 %s 18446744073709551615 = %d, want 0", "+", r)
+ }
+ x = 4294967296
+ y = 0
+ r = x + y
+ if r != 4294967296 {
+ t.Errorf("4294967296 %s 0 = %d, want 4294967296", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != 4294967297 {
+ t.Errorf("4294967296 %s 1 = %d, want 4294967297", "+", r)
+ }
+ y = 4294967296
+ r = x + y
+ if r != 8589934592 {
+ t.Errorf("4294967296 %s 4294967296 = %d, want 8589934592", "+", r)
+ }
+ y = 18446744073709551615
+ r = x + y
+ if r != 4294967295 {
+ t.Errorf("4294967296 %s 18446744073709551615 = %d, want 4294967295", "+", r)
+ }
+ x = 18446744073709551615
+ y = 0
+ r = x + y
+ if r != 18446744073709551615 {
+ t.Errorf("18446744073709551615 %s 0 = %d, want 18446744073709551615", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != 0 {
+ t.Errorf("18446744073709551615 %s 1 = %d, want 0", "+", r)
+ }
+ y = 4294967296
+ r = x + y
+ if r != 4294967295 {
+ t.Errorf("18446744073709551615 %s 4294967296 = %d, want 4294967295", "+", r)
+ }
+ y = 18446744073709551615
+ r = x + y
+ if r != 18446744073709551614 {
+ t.Errorf("18446744073709551615 %s 18446744073709551615 = %d, want 18446744073709551614", "+", r)
+ }
+}
+func TestConstFolduint64sub(t *testing.T) {
+ var x, y, r uint64
+ x = 0
+ y = 0
+ r = x - y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != 18446744073709551615 {
+ t.Errorf("0 %s 1 = %d, want 18446744073709551615", "-", r)
+ }
+ y = 4294967296
+ r = x - y
+ if r != 18446744069414584320 {
+ t.Errorf("0 %s 4294967296 = %d, want 18446744069414584320", "-", r)
+ }
+ y = 18446744073709551615
+ r = x - y
+ if r != 1 {
+ t.Errorf("0 %s 18446744073709551615 = %d, want 1", "-", r)
+ }
+ x = 1
+ y = 0
+ r = x - y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", "-", r)
+ }
+ y = 4294967296
+ r = x - y
+ if r != 18446744069414584321 {
+ t.Errorf("1 %s 4294967296 = %d, want 18446744069414584321", "-", r)
+ }
+ y = 18446744073709551615
+ r = x - y
+ if r != 2 {
+ t.Errorf("1 %s 18446744073709551615 = %d, want 2", "-", r)
+ }
+ x = 4294967296
+ y = 0
+ r = x - y
+ if r != 4294967296 {
+ t.Errorf("4294967296 %s 0 = %d, want 4294967296", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != 4294967295 {
+ t.Errorf("4294967296 %s 1 = %d, want 4294967295", "-", r)
+ }
+ y = 4294967296
+ r = x - y
+ if r != 0 {
+ t.Errorf("4294967296 %s 4294967296 = %d, want 0", "-", r)
+ }
+ y = 18446744073709551615
+ r = x - y
+ if r != 4294967297 {
+ t.Errorf("4294967296 %s 18446744073709551615 = %d, want 4294967297", "-", r)
+ }
+ x = 18446744073709551615
+ y = 0
+ r = x - y
+ if r != 18446744073709551615 {
+ t.Errorf("18446744073709551615 %s 0 = %d, want 18446744073709551615", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != 18446744073709551614 {
+ t.Errorf("18446744073709551615 %s 1 = %d, want 18446744073709551614", "-", r)
+ }
+ y = 4294967296
+ r = x - y
+ if r != 18446744069414584319 {
+ t.Errorf("18446744073709551615 %s 4294967296 = %d, want 18446744069414584319", "-", r)
+ }
+ y = 18446744073709551615
+ r = x - y
+ if r != 0 {
+ t.Errorf("18446744073709551615 %s 18446744073709551615 = %d, want 0", "-", r)
+ }
+}
+func TestConstFolduint64div(t *testing.T) {
+ var x, y, r uint64
+ x = 0
+ y = 1
+ r = x / y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "/", r)
+ }
+ y = 4294967296
+ r = x / y
+ if r != 0 {
+ t.Errorf("0 %s 4294967296 = %d, want 0", "/", r)
+ }
+ y = 18446744073709551615
+ r = x / y
+ if r != 0 {
+ t.Errorf("0 %s 18446744073709551615 = %d, want 0", "/", r)
+ }
+ x = 1
+ y = 1
+ r = x / y
+ if r != 1 {
+ t.Errorf("1 %s 1 = %d, want 1", "/", r)
+ }
+ y = 4294967296
+ r = x / y
+ if r != 0 {
+ t.Errorf("1 %s 4294967296 = %d, want 0", "/", r)
+ }
+ y = 18446744073709551615
+ r = x / y
+ if r != 0 {
+ t.Errorf("1 %s 18446744073709551615 = %d, want 0", "/", r)
+ }
+ x = 4294967296
+ y = 1
+ r = x / y
+ if r != 4294967296 {
+ t.Errorf("4294967296 %s 1 = %d, want 4294967296", "/", r)
+ }
+ y = 4294967296
+ r = x / y
+ if r != 1 {
+ t.Errorf("4294967296 %s 4294967296 = %d, want 1", "/", r)
+ }
+ y = 18446744073709551615
+ r = x / y
+ if r != 0 {
+ t.Errorf("4294967296 %s 18446744073709551615 = %d, want 0", "/", r)
+ }
+ x = 18446744073709551615
+ y = 1
+ r = x / y
+ if r != 18446744073709551615 {
+ t.Errorf("18446744073709551615 %s 1 = %d, want 18446744073709551615", "/", r)
+ }
+ y = 4294967296
+ r = x / y
+ if r != 4294967295 {
+ t.Errorf("18446744073709551615 %s 4294967296 = %d, want 4294967295", "/", r)
+ }
+ y = 18446744073709551615
+ r = x / y
+ if r != 1 {
+ t.Errorf("18446744073709551615 %s 18446744073709551615 = %d, want 1", "/", r)
+ }
+}
+func TestConstFolduint64mul(t *testing.T) {
+ var x, y, r uint64
+ x = 0
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "*", r)
+ }
+ y = 4294967296
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s 4294967296 = %d, want 0", "*", r)
+ }
+ y = 18446744073709551615
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s 18446744073709551615 = %d, want 0", "*", r)
+ }
+ x = 1
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("1 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != 1 {
+ t.Errorf("1 %s 1 = %d, want 1", "*", r)
+ }
+ y = 4294967296
+ r = x * y
+ if r != 4294967296 {
+ t.Errorf("1 %s 4294967296 = %d, want 4294967296", "*", r)
+ }
+ y = 18446744073709551615
+ r = x * y
+ if r != 18446744073709551615 {
+ t.Errorf("1 %s 18446744073709551615 = %d, want 18446744073709551615", "*", r)
+ }
+ x = 4294967296
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("4294967296 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != 4294967296 {
+ t.Errorf("4294967296 %s 1 = %d, want 4294967296", "*", r)
+ }
+ y = 4294967296
+ r = x * y
+ if r != 0 {
+ t.Errorf("4294967296 %s 4294967296 = %d, want 0", "*", r)
+ }
+ y = 18446744073709551615
+ r = x * y
+ if r != 18446744069414584320 {
+ t.Errorf("4294967296 %s 18446744073709551615 = %d, want 18446744069414584320", "*", r)
+ }
+ x = 18446744073709551615
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("18446744073709551615 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != 18446744073709551615 {
+ t.Errorf("18446744073709551615 %s 1 = %d, want 18446744073709551615", "*", r)
+ }
+ y = 4294967296
+ r = x * y
+ if r != 18446744069414584320 {
+ t.Errorf("18446744073709551615 %s 4294967296 = %d, want 18446744069414584320", "*", r)
+ }
+ y = 18446744073709551615
+ r = x * y
+ if r != 1 {
+ t.Errorf("18446744073709551615 %s 18446744073709551615 = %d, want 1", "*", r)
+ }
+}
+func TestConstFolduint64mod(t *testing.T) {
+ var x, y, r uint64
+ x = 0
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "%", r)
+ }
+ y = 4294967296
+ r = x % y
+ if r != 0 {
+ t.Errorf("0 %s 4294967296 = %d, want 0", "%", r)
+ }
+ y = 18446744073709551615
+ r = x % y
+ if r != 0 {
+ t.Errorf("0 %s 18446744073709551615 = %d, want 0", "%", r)
+ }
+ x = 1
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", "%", r)
+ }
+ y = 4294967296
+ r = x % y
+ if r != 1 {
+ t.Errorf("1 %s 4294967296 = %d, want 1", "%", r)
+ }
+ y = 18446744073709551615
+ r = x % y
+ if r != 1 {
+ t.Errorf("1 %s 18446744073709551615 = %d, want 1", "%", r)
+ }
+ x = 4294967296
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("4294967296 %s 1 = %d, want 0", "%", r)
+ }
+ y = 4294967296
+ r = x % y
+ if r != 0 {
+ t.Errorf("4294967296 %s 4294967296 = %d, want 0", "%", r)
+ }
+ y = 18446744073709551615
+ r = x % y
+ if r != 4294967296 {
+ t.Errorf("4294967296 %s 18446744073709551615 = %d, want 4294967296", "%", r)
+ }
+ x = 18446744073709551615
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("18446744073709551615 %s 1 = %d, want 0", "%", r)
+ }
+ y = 4294967296
+ r = x % y
+ if r != 4294967295 {
+ t.Errorf("18446744073709551615 %s 4294967296 = %d, want 4294967295", "%", r)
+ }
+ y = 18446744073709551615
+ r = x % y
+ if r != 0 {
+ t.Errorf("18446744073709551615 %s 18446744073709551615 = %d, want 0", "%", r)
+ }
+}
+func TestConstFoldint64add(t *testing.T) {
+ var x, y, r int64
+ x = -9223372036854775808
+ y = -9223372036854775808
+ r = x + y
+ if r != 0 {
+ t.Errorf("-9223372036854775808 %s -9223372036854775808 = %d, want 0", "+", r)
+ }
+ y = -9223372036854775807
+ r = x + y
+ if r != 1 {
+ t.Errorf("-9223372036854775808 %s -9223372036854775807 = %d, want 1", "+", r)
+ }
+ y = -4294967296
+ r = x + y
+ if r != 9223372032559808512 {
+ t.Errorf("-9223372036854775808 %s -4294967296 = %d, want 9223372032559808512", "+", r)
+ }
+ y = -1
+ r = x + y
+ if r != 9223372036854775807 {
+ t.Errorf("-9223372036854775808 %s -1 = %d, want 9223372036854775807", "+", r)
+ }
+ y = 0
+ r = x + y
+ if r != -9223372036854775808 {
+ t.Errorf("-9223372036854775808 %s 0 = %d, want -9223372036854775808", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != -9223372036854775807 {
+ t.Errorf("-9223372036854775808 %s 1 = %d, want -9223372036854775807", "+", r)
+ }
+ y = 4294967296
+ r = x + y
+ if r != -9223372032559808512 {
+ t.Errorf("-9223372036854775808 %s 4294967296 = %d, want -9223372032559808512", "+", r)
+ }
+ y = 9223372036854775806
+ r = x + y
+ if r != -2 {
+ t.Errorf("-9223372036854775808 %s 9223372036854775806 = %d, want -2", "+", r)
+ }
+ y = 9223372036854775807
+ r = x + y
+ if r != -1 {
+ t.Errorf("-9223372036854775808 %s 9223372036854775807 = %d, want -1", "+", r)
+ }
+ x = -9223372036854775807
+ y = -9223372036854775808
+ r = x + y
+ if r != 1 {
+ t.Errorf("-9223372036854775807 %s -9223372036854775808 = %d, want 1", "+", r)
+ }
+ y = -9223372036854775807
+ r = x + y
+ if r != 2 {
+ t.Errorf("-9223372036854775807 %s -9223372036854775807 = %d, want 2", "+", r)
+ }
+ y = -4294967296
+ r = x + y
+ if r != 9223372032559808513 {
+ t.Errorf("-9223372036854775807 %s -4294967296 = %d, want 9223372032559808513", "+", r)
+ }
+ y = -1
+ r = x + y
+ if r != -9223372036854775808 {
+ t.Errorf("-9223372036854775807 %s -1 = %d, want -9223372036854775808", "+", r)
+ }
+ y = 0
+ r = x + y
+ if r != -9223372036854775807 {
+ t.Errorf("-9223372036854775807 %s 0 = %d, want -9223372036854775807", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != -9223372036854775806 {
+ t.Errorf("-9223372036854775807 %s 1 = %d, want -9223372036854775806", "+", r)
+ }
+ y = 4294967296
+ r = x + y
+ if r != -9223372032559808511 {
+ t.Errorf("-9223372036854775807 %s 4294967296 = %d, want -9223372032559808511", "+", r)
+ }
+ y = 9223372036854775806
+ r = x + y
+ if r != -1 {
+ t.Errorf("-9223372036854775807 %s 9223372036854775806 = %d, want -1", "+", r)
+ }
+ y = 9223372036854775807
+ r = x + y
+ if r != 0 {
+ t.Errorf("-9223372036854775807 %s 9223372036854775807 = %d, want 0", "+", r)
+ }
+ x = -4294967296
+ y = -9223372036854775808
+ r = x + y
+ if r != 9223372032559808512 {
+ t.Errorf("-4294967296 %s -9223372036854775808 = %d, want 9223372032559808512", "+", r)
+ }
+ y = -9223372036854775807
+ r = x + y
+ if r != 9223372032559808513 {
+ t.Errorf("-4294967296 %s -9223372036854775807 = %d, want 9223372032559808513", "+", r)
+ }
+ y = -4294967296
+ r = x + y
+ if r != -8589934592 {
+ t.Errorf("-4294967296 %s -4294967296 = %d, want -8589934592", "+", r)
+ }
+ y = -1
+ r = x + y
+ if r != -4294967297 {
+ t.Errorf("-4294967296 %s -1 = %d, want -4294967297", "+", r)
+ }
+ y = 0
+ r = x + y
+ if r != -4294967296 {
+ t.Errorf("-4294967296 %s 0 = %d, want -4294967296", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != -4294967295 {
+ t.Errorf("-4294967296 %s 1 = %d, want -4294967295", "+", r)
+ }
+ y = 4294967296
+ r = x + y
+ if r != 0 {
+ t.Errorf("-4294967296 %s 4294967296 = %d, want 0", "+", r)
+ }
+ y = 9223372036854775806
+ r = x + y
+ if r != 9223372032559808510 {
+ t.Errorf("-4294967296 %s 9223372036854775806 = %d, want 9223372032559808510", "+", r)
+ }
+ y = 9223372036854775807
+ r = x + y
+ if r != 9223372032559808511 {
+ t.Errorf("-4294967296 %s 9223372036854775807 = %d, want 9223372032559808511", "+", r)
+ }
+ x = -1
+ y = -9223372036854775808
+ r = x + y
+ if r != 9223372036854775807 {
+ t.Errorf("-1 %s -9223372036854775808 = %d, want 9223372036854775807", "+", r)
+ }
+ y = -9223372036854775807
+ r = x + y
+ if r != -9223372036854775808 {
+ t.Errorf("-1 %s -9223372036854775807 = %d, want -9223372036854775808", "+", r)
+ }
+ y = -4294967296
+ r = x + y
+ if r != -4294967297 {
+ t.Errorf("-1 %s -4294967296 = %d, want -4294967297", "+", r)
+ }
+ y = -1
+ r = x + y
+ if r != -2 {
+ t.Errorf("-1 %s -1 = %d, want -2", "+", r)
+ }
+ y = 0
+ r = x + y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != 0 {
+ t.Errorf("-1 %s 1 = %d, want 0", "+", r)
+ }
+ y = 4294967296
+ r = x + y
+ if r != 4294967295 {
+ t.Errorf("-1 %s 4294967296 = %d, want 4294967295", "+", r)
+ }
+ y = 9223372036854775806
+ r = x + y
+ if r != 9223372036854775805 {
+ t.Errorf("-1 %s 9223372036854775806 = %d, want 9223372036854775805", "+", r)
+ }
+ y = 9223372036854775807
+ r = x + y
+ if r != 9223372036854775806 {
+ t.Errorf("-1 %s 9223372036854775807 = %d, want 9223372036854775806", "+", r)
+ }
+ x = 0
+ y = -9223372036854775808
+ r = x + y
+ if r != -9223372036854775808 {
+ t.Errorf("0 %s -9223372036854775808 = %d, want -9223372036854775808", "+", r)
+ }
+ y = -9223372036854775807
+ r = x + y
+ if r != -9223372036854775807 {
+ t.Errorf("0 %s -9223372036854775807 = %d, want -9223372036854775807", "+", r)
+ }
+ y = -4294967296
+ r = x + y
+ if r != -4294967296 {
+ t.Errorf("0 %s -4294967296 = %d, want -4294967296", "+", r)
+ }
+ y = -1
+ r = x + y
+ if r != -1 {
+ t.Errorf("0 %s -1 = %d, want -1", "+", r)
+ }
+ y = 0
+ r = x + y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != 1 {
+ t.Errorf("0 %s 1 = %d, want 1", "+", r)
+ }
+ y = 4294967296
+ r = x + y
+ if r != 4294967296 {
+ t.Errorf("0 %s 4294967296 = %d, want 4294967296", "+", r)
+ }
+ y = 9223372036854775806
+ r = x + y
+ if r != 9223372036854775806 {
+ t.Errorf("0 %s 9223372036854775806 = %d, want 9223372036854775806", "+", r)
+ }
+ y = 9223372036854775807
+ r = x + y
+ if r != 9223372036854775807 {
+ t.Errorf("0 %s 9223372036854775807 = %d, want 9223372036854775807", "+", r)
+ }
+ x = 1
+ y = -9223372036854775808
+ r = x + y
+ if r != -9223372036854775807 {
+ t.Errorf("1 %s -9223372036854775808 = %d, want -9223372036854775807", "+", r)
+ }
+ y = -9223372036854775807
+ r = x + y
+ if r != -9223372036854775806 {
+ t.Errorf("1 %s -9223372036854775807 = %d, want -9223372036854775806", "+", r)
+ }
+ y = -4294967296
+ r = x + y
+ if r != -4294967295 {
+ t.Errorf("1 %s -4294967296 = %d, want -4294967295", "+", r)
+ }
+ y = -1
+ r = x + y
+ if r != 0 {
+ t.Errorf("1 %s -1 = %d, want 0", "+", r)
+ }
+ y = 0
+ r = x + y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "+", r)
+ }
+ y = 4294967296
+ r = x + y
+ if r != 4294967297 {
+ t.Errorf("1 %s 4294967296 = %d, want 4294967297", "+", r)
+ }
+ y = 9223372036854775806
+ r = x + y
+ if r != 9223372036854775807 {
+ t.Errorf("1 %s 9223372036854775806 = %d, want 9223372036854775807", "+", r)
+ }
+ y = 9223372036854775807
+ r = x + y
+ if r != -9223372036854775808 {
+ t.Errorf("1 %s 9223372036854775807 = %d, want -9223372036854775808", "+", r)
+ }
+ x = 4294967296
+ y = -9223372036854775808
+ r = x + y
+ if r != -9223372032559808512 {
+ t.Errorf("4294967296 %s -9223372036854775808 = %d, want -9223372032559808512", "+", r)
+ }
+ y = -9223372036854775807
+ r = x + y
+ if r != -9223372032559808511 {
+ t.Errorf("4294967296 %s -9223372036854775807 = %d, want -9223372032559808511", "+", r)
+ }
+ y = -4294967296
+ r = x + y
+ if r != 0 {
+ t.Errorf("4294967296 %s -4294967296 = %d, want 0", "+", r)
+ }
+ y = -1
+ r = x + y
+ if r != 4294967295 {
+ t.Errorf("4294967296 %s -1 = %d, want 4294967295", "+", r)
+ }
+ y = 0
+ r = x + y
+ if r != 4294967296 {
+ t.Errorf("4294967296 %s 0 = %d, want 4294967296", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != 4294967297 {
+ t.Errorf("4294967296 %s 1 = %d, want 4294967297", "+", r)
+ }
+ y = 4294967296
+ r = x + y
+ if r != 8589934592 {
+ t.Errorf("4294967296 %s 4294967296 = %d, want 8589934592", "+", r)
+ }
+ y = 9223372036854775806
+ r = x + y
+ if r != -9223372032559808514 {
+ t.Errorf("4294967296 %s 9223372036854775806 = %d, want -9223372032559808514", "+", r)
+ }
+ y = 9223372036854775807
+ r = x + y
+ if r != -9223372032559808513 {
+ t.Errorf("4294967296 %s 9223372036854775807 = %d, want -9223372032559808513", "+", r)
+ }
+ x = 9223372036854775806
+ y = -9223372036854775808
+ r = x + y
+ if r != -2 {
+ t.Errorf("9223372036854775806 %s -9223372036854775808 = %d, want -2", "+", r)
+ }
+ y = -9223372036854775807
+ r = x + y
+ if r != -1 {
+ t.Errorf("9223372036854775806 %s -9223372036854775807 = %d, want -1", "+", r)
+ }
+ y = -4294967296
+ r = x + y
+ if r != 9223372032559808510 {
+ t.Errorf("9223372036854775806 %s -4294967296 = %d, want 9223372032559808510", "+", r)
+ }
+ y = -1
+ r = x + y
+ if r != 9223372036854775805 {
+ t.Errorf("9223372036854775806 %s -1 = %d, want 9223372036854775805", "+", r)
+ }
+ y = 0
+ r = x + y
+ if r != 9223372036854775806 {
+ t.Errorf("9223372036854775806 %s 0 = %d, want 9223372036854775806", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != 9223372036854775807 {
+ t.Errorf("9223372036854775806 %s 1 = %d, want 9223372036854775807", "+", r)
+ }
+ y = 4294967296
+ r = x + y
+ if r != -9223372032559808514 {
+ t.Errorf("9223372036854775806 %s 4294967296 = %d, want -9223372032559808514", "+", r)
+ }
+ y = 9223372036854775806
+ r = x + y
+ if r != -4 {
+ t.Errorf("9223372036854775806 %s 9223372036854775806 = %d, want -4", "+", r)
+ }
+ y = 9223372036854775807
+ r = x + y
+ if r != -3 {
+ t.Errorf("9223372036854775806 %s 9223372036854775807 = %d, want -3", "+", r)
+ }
+ x = 9223372036854775807
+ y = -9223372036854775808
+ r = x + y
+ if r != -1 {
+ t.Errorf("9223372036854775807 %s -9223372036854775808 = %d, want -1", "+", r)
+ }
+ y = -9223372036854775807
+ r = x + y
+ if r != 0 {
+ t.Errorf("9223372036854775807 %s -9223372036854775807 = %d, want 0", "+", r)
+ }
+ y = -4294967296
+ r = x + y
+ if r != 9223372032559808511 {
+ t.Errorf("9223372036854775807 %s -4294967296 = %d, want 9223372032559808511", "+", r)
+ }
+ y = -1
+ r = x + y
+ if r != 9223372036854775806 {
+ t.Errorf("9223372036854775807 %s -1 = %d, want 9223372036854775806", "+", r)
+ }
+ y = 0
+ r = x + y
+ if r != 9223372036854775807 {
+ t.Errorf("9223372036854775807 %s 0 = %d, want 9223372036854775807", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != -9223372036854775808 {
+ t.Errorf("9223372036854775807 %s 1 = %d, want -9223372036854775808", "+", r)
+ }
+ y = 4294967296
+ r = x + y
+ if r != -9223372032559808513 {
+ t.Errorf("9223372036854775807 %s 4294967296 = %d, want -9223372032559808513", "+", r)
+ }
+ y = 9223372036854775806
+ r = x + y
+ if r != -3 {
+ t.Errorf("9223372036854775807 %s 9223372036854775806 = %d, want -3", "+", r)
+ }
+ y = 9223372036854775807
+ r = x + y
+ if r != -2 {
+ t.Errorf("9223372036854775807 %s 9223372036854775807 = %d, want -2", "+", r)
+ }
+}
+func TestConstFoldint64sub(t *testing.T) {
+ var x, y, r int64
+ x = -9223372036854775808
+ y = -9223372036854775808
+ r = x - y
+ if r != 0 {
+ t.Errorf("-9223372036854775808 %s -9223372036854775808 = %d, want 0", "-", r)
+ }
+ y = -9223372036854775807
+ r = x - y
+ if r != -1 {
+ t.Errorf("-9223372036854775808 %s -9223372036854775807 = %d, want -1", "-", r)
+ }
+ y = -4294967296
+ r = x - y
+ if r != -9223372032559808512 {
+ t.Errorf("-9223372036854775808 %s -4294967296 = %d, want -9223372032559808512", "-", r)
+ }
+ y = -1
+ r = x - y
+ if r != -9223372036854775807 {
+ t.Errorf("-9223372036854775808 %s -1 = %d, want -9223372036854775807", "-", r)
+ }
+ y = 0
+ r = x - y
+ if r != -9223372036854775808 {
+ t.Errorf("-9223372036854775808 %s 0 = %d, want -9223372036854775808", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != 9223372036854775807 {
+ t.Errorf("-9223372036854775808 %s 1 = %d, want 9223372036854775807", "-", r)
+ }
+ y = 4294967296
+ r = x - y
+ if r != 9223372032559808512 {
+ t.Errorf("-9223372036854775808 %s 4294967296 = %d, want 9223372032559808512", "-", r)
+ }
+ y = 9223372036854775806
+ r = x - y
+ if r != 2 {
+ t.Errorf("-9223372036854775808 %s 9223372036854775806 = %d, want 2", "-", r)
+ }
+ y = 9223372036854775807
+ r = x - y
+ if r != 1 {
+ t.Errorf("-9223372036854775808 %s 9223372036854775807 = %d, want 1", "-", r)
+ }
+ x = -9223372036854775807
+ y = -9223372036854775808
+ r = x - y
+ if r != 1 {
+ t.Errorf("-9223372036854775807 %s -9223372036854775808 = %d, want 1", "-", r)
+ }
+ y = -9223372036854775807
+ r = x - y
+ if r != 0 {
+ t.Errorf("-9223372036854775807 %s -9223372036854775807 = %d, want 0", "-", r)
+ }
+ y = -4294967296
+ r = x - y
+ if r != -9223372032559808511 {
+ t.Errorf("-9223372036854775807 %s -4294967296 = %d, want -9223372032559808511", "-", r)
+ }
+ y = -1
+ r = x - y
+ if r != -9223372036854775806 {
+ t.Errorf("-9223372036854775807 %s -1 = %d, want -9223372036854775806", "-", r)
+ }
+ y = 0
+ r = x - y
+ if r != -9223372036854775807 {
+ t.Errorf("-9223372036854775807 %s 0 = %d, want -9223372036854775807", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != -9223372036854775808 {
+ t.Errorf("-9223372036854775807 %s 1 = %d, want -9223372036854775808", "-", r)
+ }
+ y = 4294967296
+ r = x - y
+ if r != 9223372032559808513 {
+ t.Errorf("-9223372036854775807 %s 4294967296 = %d, want 9223372032559808513", "-", r)
+ }
+ y = 9223372036854775806
+ r = x - y
+ if r != 3 {
+ t.Errorf("-9223372036854775807 %s 9223372036854775806 = %d, want 3", "-", r)
+ }
+ y = 9223372036854775807
+ r = x - y
+ if r != 2 {
+ t.Errorf("-9223372036854775807 %s 9223372036854775807 = %d, want 2", "-", r)
+ }
+ x = -4294967296
+ y = -9223372036854775808
+ r = x - y
+ if r != 9223372032559808512 {
+ t.Errorf("-4294967296 %s -9223372036854775808 = %d, want 9223372032559808512", "-", r)
+ }
+ y = -9223372036854775807
+ r = x - y
+ if r != 9223372032559808511 {
+ t.Errorf("-4294967296 %s -9223372036854775807 = %d, want 9223372032559808511", "-", r)
+ }
+ y = -4294967296
+ r = x - y
+ if r != 0 {
+ t.Errorf("-4294967296 %s -4294967296 = %d, want 0", "-", r)
+ }
+ y = -1
+ r = x - y
+ if r != -4294967295 {
+ t.Errorf("-4294967296 %s -1 = %d, want -4294967295", "-", r)
+ }
+ y = 0
+ r = x - y
+ if r != -4294967296 {
+ t.Errorf("-4294967296 %s 0 = %d, want -4294967296", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != -4294967297 {
+ t.Errorf("-4294967296 %s 1 = %d, want -4294967297", "-", r)
+ }
+ y = 4294967296
+ r = x - y
+ if r != -8589934592 {
+ t.Errorf("-4294967296 %s 4294967296 = %d, want -8589934592", "-", r)
+ }
+ y = 9223372036854775806
+ r = x - y
+ if r != 9223372032559808514 {
+ t.Errorf("-4294967296 %s 9223372036854775806 = %d, want 9223372032559808514", "-", r)
+ }
+ y = 9223372036854775807
+ r = x - y
+ if r != 9223372032559808513 {
+ t.Errorf("-4294967296 %s 9223372036854775807 = %d, want 9223372032559808513", "-", r)
+ }
+ x = -1
+ y = -9223372036854775808
+ r = x - y
+ if r != 9223372036854775807 {
+ t.Errorf("-1 %s -9223372036854775808 = %d, want 9223372036854775807", "-", r)
+ }
+ y = -9223372036854775807
+ r = x - y
+ if r != 9223372036854775806 {
+ t.Errorf("-1 %s -9223372036854775807 = %d, want 9223372036854775806", "-", r)
+ }
+ y = -4294967296
+ r = x - y
+ if r != 4294967295 {
+ t.Errorf("-1 %s -4294967296 = %d, want 4294967295", "-", r)
+ }
+ y = -1
+ r = x - y
+ if r != 0 {
+ t.Errorf("-1 %s -1 = %d, want 0", "-", r)
+ }
+ y = 0
+ r = x - y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != -2 {
+ t.Errorf("-1 %s 1 = %d, want -2", "-", r)
+ }
+ y = 4294967296
+ r = x - y
+ if r != -4294967297 {
+ t.Errorf("-1 %s 4294967296 = %d, want -4294967297", "-", r)
+ }
+ y = 9223372036854775806
+ r = x - y
+ if r != -9223372036854775807 {
+ t.Errorf("-1 %s 9223372036854775806 = %d, want -9223372036854775807", "-", r)
+ }
+ y = 9223372036854775807
+ r = x - y
+ if r != -9223372036854775808 {
+ t.Errorf("-1 %s 9223372036854775807 = %d, want -9223372036854775808", "-", r)
+ }
+ x = 0
+ y = -9223372036854775808
+ r = x - y
+ if r != -9223372036854775808 {
+ t.Errorf("0 %s -9223372036854775808 = %d, want -9223372036854775808", "-", r)
+ }
+ y = -9223372036854775807
+ r = x - y
+ if r != 9223372036854775807 {
+ t.Errorf("0 %s -9223372036854775807 = %d, want 9223372036854775807", "-", r)
+ }
+ y = -4294967296
+ r = x - y
+ if r != 4294967296 {
+ t.Errorf("0 %s -4294967296 = %d, want 4294967296", "-", r)
+ }
+ y = -1
+ r = x - y
+ if r != 1 {
+ t.Errorf("0 %s -1 = %d, want 1", "-", r)
+ }
+ y = 0
+ r = x - y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != -1 {
+ t.Errorf("0 %s 1 = %d, want -1", "-", r)
+ }
+ y = 4294967296
+ r = x - y
+ if r != -4294967296 {
+ t.Errorf("0 %s 4294967296 = %d, want -4294967296", "-", r)
+ }
+ y = 9223372036854775806
+ r = x - y
+ if r != -9223372036854775806 {
+ t.Errorf("0 %s 9223372036854775806 = %d, want -9223372036854775806", "-", r)
+ }
+ y = 9223372036854775807
+ r = x - y
+ if r != -9223372036854775807 {
+ t.Errorf("0 %s 9223372036854775807 = %d, want -9223372036854775807", "-", r)
+ }
+ x = 1
+ y = -9223372036854775808
+ r = x - y
+ if r != -9223372036854775807 {
+ t.Errorf("1 %s -9223372036854775808 = %d, want -9223372036854775807", "-", r)
+ }
+ y = -9223372036854775807
+ r = x - y
+ if r != -9223372036854775808 {
+ t.Errorf("1 %s -9223372036854775807 = %d, want -9223372036854775808", "-", r)
+ }
+ y = -4294967296
+ r = x - y
+ if r != 4294967297 {
+ t.Errorf("1 %s -4294967296 = %d, want 4294967297", "-", r)
+ }
+ y = -1
+ r = x - y
+ if r != 2 {
+ t.Errorf("1 %s -1 = %d, want 2", "-", r)
+ }
+ y = 0
+ r = x - y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", "-", r)
+ }
+ y = 4294967296
+ r = x - y
+ if r != -4294967295 {
+ t.Errorf("1 %s 4294967296 = %d, want -4294967295", "-", r)
+ }
+ y = 9223372036854775806
+ r = x - y
+ if r != -9223372036854775805 {
+ t.Errorf("1 %s 9223372036854775806 = %d, want -9223372036854775805", "-", r)
+ }
+ y = 9223372036854775807
+ r = x - y
+ if r != -9223372036854775806 {
+ t.Errorf("1 %s 9223372036854775807 = %d, want -9223372036854775806", "-", r)
+ }
+ x = 4294967296
+ y = -9223372036854775808
+ r = x - y
+ if r != -9223372032559808512 {
+ t.Errorf("4294967296 %s -9223372036854775808 = %d, want -9223372032559808512", "-", r)
+ }
+ y = -9223372036854775807
+ r = x - y
+ if r != -9223372032559808513 {
+ t.Errorf("4294967296 %s -9223372036854775807 = %d, want -9223372032559808513", "-", r)
+ }
+ y = -4294967296
+ r = x - y
+ if r != 8589934592 {
+ t.Errorf("4294967296 %s -4294967296 = %d, want 8589934592", "-", r)
+ }
+ y = -1
+ r = x - y
+ if r != 4294967297 {
+ t.Errorf("4294967296 %s -1 = %d, want 4294967297", "-", r)
+ }
+ y = 0
+ r = x - y
+ if r != 4294967296 {
+ t.Errorf("4294967296 %s 0 = %d, want 4294967296", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != 4294967295 {
+ t.Errorf("4294967296 %s 1 = %d, want 4294967295", "-", r)
+ }
+ y = 4294967296
+ r = x - y
+ if r != 0 {
+ t.Errorf("4294967296 %s 4294967296 = %d, want 0", "-", r)
+ }
+ y = 9223372036854775806
+ r = x - y
+ if r != -9223372032559808510 {
+ t.Errorf("4294967296 %s 9223372036854775806 = %d, want -9223372032559808510", "-", r)
+ }
+ y = 9223372036854775807
+ r = x - y
+ if r != -9223372032559808511 {
+ t.Errorf("4294967296 %s 9223372036854775807 = %d, want -9223372032559808511", "-", r)
+ }
+ x = 9223372036854775806
+ y = -9223372036854775808
+ r = x - y
+ if r != -2 {
+ t.Errorf("9223372036854775806 %s -9223372036854775808 = %d, want -2", "-", r)
+ }
+ y = -9223372036854775807
+ r = x - y
+ if r != -3 {
+ t.Errorf("9223372036854775806 %s -9223372036854775807 = %d, want -3", "-", r)
+ }
+ y = -4294967296
+ r = x - y
+ if r != -9223372032559808514 {
+ t.Errorf("9223372036854775806 %s -4294967296 = %d, want -9223372032559808514", "-", r)
+ }
+ y = -1
+ r = x - y
+ if r != 9223372036854775807 {
+ t.Errorf("9223372036854775806 %s -1 = %d, want 9223372036854775807", "-", r)
+ }
+ y = 0
+ r = x - y
+ if r != 9223372036854775806 {
+ t.Errorf("9223372036854775806 %s 0 = %d, want 9223372036854775806", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != 9223372036854775805 {
+ t.Errorf("9223372036854775806 %s 1 = %d, want 9223372036854775805", "-", r)
+ }
+ y = 4294967296
+ r = x - y
+ if r != 9223372032559808510 {
+ t.Errorf("9223372036854775806 %s 4294967296 = %d, want 9223372032559808510", "-", r)
+ }
+ y = 9223372036854775806
+ r = x - y
+ if r != 0 {
+ t.Errorf("9223372036854775806 %s 9223372036854775806 = %d, want 0", "-", r)
+ }
+ y = 9223372036854775807
+ r = x - y
+ if r != -1 {
+ t.Errorf("9223372036854775806 %s 9223372036854775807 = %d, want -1", "-", r)
+ }
+ x = 9223372036854775807
+ y = -9223372036854775808
+ r = x - y
+ if r != -1 {
+ t.Errorf("9223372036854775807 %s -9223372036854775808 = %d, want -1", "-", r)
+ }
+ y = -9223372036854775807
+ r = x - y
+ if r != -2 {
+ t.Errorf("9223372036854775807 %s -9223372036854775807 = %d, want -2", "-", r)
+ }
+ y = -4294967296
+ r = x - y
+ if r != -9223372032559808513 {
+ t.Errorf("9223372036854775807 %s -4294967296 = %d, want -9223372032559808513", "-", r)
+ }
+ y = -1
+ r = x - y
+ if r != -9223372036854775808 {
+ t.Errorf("9223372036854775807 %s -1 = %d, want -9223372036854775808", "-", r)
+ }
+ y = 0
+ r = x - y
+ if r != 9223372036854775807 {
+ t.Errorf("9223372036854775807 %s 0 = %d, want 9223372036854775807", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != 9223372036854775806 {
+ t.Errorf("9223372036854775807 %s 1 = %d, want 9223372036854775806", "-", r)
+ }
+ y = 4294967296
+ r = x - y
+ if r != 9223372032559808511 {
+ t.Errorf("9223372036854775807 %s 4294967296 = %d, want 9223372032559808511", "-", r)
+ }
+ y = 9223372036854775806
+ r = x - y
+ if r != 1 {
+ t.Errorf("9223372036854775807 %s 9223372036854775806 = %d, want 1", "-", r)
+ }
+ y = 9223372036854775807
+ r = x - y
+ if r != 0 {
+ t.Errorf("9223372036854775807 %s 9223372036854775807 = %d, want 0", "-", r)
+ }
+}
+func TestConstFoldint64div(t *testing.T) {
+ var x, y, r int64
+ x = -9223372036854775808
+ y = -9223372036854775808
+ r = x / y
+ if r != 1 {
+ t.Errorf("-9223372036854775808 %s -9223372036854775808 = %d, want 1", "/", r)
+ }
+ y = -9223372036854775807
+ r = x / y
+ if r != 1 {
+ t.Errorf("-9223372036854775808 %s -9223372036854775807 = %d, want 1", "/", r)
+ }
+ y = -4294967296
+ r = x / y
+ if r != 2147483648 {
+ t.Errorf("-9223372036854775808 %s -4294967296 = %d, want 2147483648", "/", r)
+ }
+ y = -1
+ r = x / y
+ if r != -9223372036854775808 {
+ t.Errorf("-9223372036854775808 %s -1 = %d, want -9223372036854775808", "/", r)
+ }
+ y = 1
+ r = x / y
+ if r != -9223372036854775808 {
+ t.Errorf("-9223372036854775808 %s 1 = %d, want -9223372036854775808", "/", r)
+ }
+ y = 4294967296
+ r = x / y
+ if r != -2147483648 {
+ t.Errorf("-9223372036854775808 %s 4294967296 = %d, want -2147483648", "/", r)
+ }
+ y = 9223372036854775806
+ r = x / y
+ if r != -1 {
+ t.Errorf("-9223372036854775808 %s 9223372036854775806 = %d, want -1", "/", r)
+ }
+ y = 9223372036854775807
+ r = x / y
+ if r != -1 {
+ t.Errorf("-9223372036854775808 %s 9223372036854775807 = %d, want -1", "/", r)
+ }
+ x = -9223372036854775807
+ y = -9223372036854775808
+ r = x / y
+ if r != 0 {
+ t.Errorf("-9223372036854775807 %s -9223372036854775808 = %d, want 0", "/", r)
+ }
+ y = -9223372036854775807
+ r = x / y
+ if r != 1 {
+ t.Errorf("-9223372036854775807 %s -9223372036854775807 = %d, want 1", "/", r)
+ }
+ y = -4294967296
+ r = x / y
+ if r != 2147483647 {
+ t.Errorf("-9223372036854775807 %s -4294967296 = %d, want 2147483647", "/", r)
+ }
+ y = -1
+ r = x / y
+ if r != 9223372036854775807 {
+ t.Errorf("-9223372036854775807 %s -1 = %d, want 9223372036854775807", "/", r)
+ }
+ y = 1
+ r = x / y
+ if r != -9223372036854775807 {
+ t.Errorf("-9223372036854775807 %s 1 = %d, want -9223372036854775807", "/", r)
+ }
+ y = 4294967296
+ r = x / y
+ if r != -2147483647 {
+ t.Errorf("-9223372036854775807 %s 4294967296 = %d, want -2147483647", "/", r)
+ }
+ y = 9223372036854775806
+ r = x / y
+ if r != -1 {
+ t.Errorf("-9223372036854775807 %s 9223372036854775806 = %d, want -1", "/", r)
+ }
+ y = 9223372036854775807
+ r = x / y
+ if r != -1 {
+ t.Errorf("-9223372036854775807 %s 9223372036854775807 = %d, want -1", "/", r)
+ }
+ x = -4294967296
+ y = -9223372036854775808
+ r = x / y
+ if r != 0 {
+ t.Errorf("-4294967296 %s -9223372036854775808 = %d, want 0", "/", r)
+ }
+ y = -9223372036854775807
+ r = x / y
+ if r != 0 {
+ t.Errorf("-4294967296 %s -9223372036854775807 = %d, want 0", "/", r)
+ }
+ y = -4294967296
+ r = x / y
+ if r != 1 {
+ t.Errorf("-4294967296 %s -4294967296 = %d, want 1", "/", r)
+ }
+ y = -1
+ r = x / y
+ if r != 4294967296 {
+ t.Errorf("-4294967296 %s -1 = %d, want 4294967296", "/", r)
+ }
+ y = 1
+ r = x / y
+ if r != -4294967296 {
+ t.Errorf("-4294967296 %s 1 = %d, want -4294967296", "/", r)
+ }
+ y = 4294967296
+ r = x / y
+ if r != -1 {
+ t.Errorf("-4294967296 %s 4294967296 = %d, want -1", "/", r)
+ }
+ y = 9223372036854775806
+ r = x / y
+ if r != 0 {
+ t.Errorf("-4294967296 %s 9223372036854775806 = %d, want 0", "/", r)
+ }
+ y = 9223372036854775807
+ r = x / y
+ if r != 0 {
+ t.Errorf("-4294967296 %s 9223372036854775807 = %d, want 0", "/", r)
+ }
+ x = -1
+ y = -9223372036854775808
+ r = x / y
+ if r != 0 {
+ t.Errorf("-1 %s -9223372036854775808 = %d, want 0", "/", r)
+ }
+ y = -9223372036854775807
+ r = x / y
+ if r != 0 {
+ t.Errorf("-1 %s -9223372036854775807 = %d, want 0", "/", r)
+ }
+ y = -4294967296
+ r = x / y
+ if r != 0 {
+ t.Errorf("-1 %s -4294967296 = %d, want 0", "/", r)
+ }
+ y = -1
+ r = x / y
+ if r != 1 {
+ t.Errorf("-1 %s -1 = %d, want 1", "/", r)
+ }
+ y = 1
+ r = x / y
+ if r != -1 {
+ t.Errorf("-1 %s 1 = %d, want -1", "/", r)
+ }
+ y = 4294967296
+ r = x / y
+ if r != 0 {
+ t.Errorf("-1 %s 4294967296 = %d, want 0", "/", r)
+ }
+ y = 9223372036854775806
+ r = x / y
+ if r != 0 {
+ t.Errorf("-1 %s 9223372036854775806 = %d, want 0", "/", r)
+ }
+ y = 9223372036854775807
+ r = x / y
+ if r != 0 {
+ t.Errorf("-1 %s 9223372036854775807 = %d, want 0", "/", r)
+ }
+ x = 0
+ y = -9223372036854775808
+ r = x / y
+ if r != 0 {
+ t.Errorf("0 %s -9223372036854775808 = %d, want 0", "/", r)
+ }
+ y = -9223372036854775807
+ r = x / y
+ if r != 0 {
+ t.Errorf("0 %s -9223372036854775807 = %d, want 0", "/", r)
+ }
+ y = -4294967296
+ r = x / y
+ if r != 0 {
+ t.Errorf("0 %s -4294967296 = %d, want 0", "/", r)
+ }
+ y = -1
+ r = x / y
+ if r != 0 {
+ t.Errorf("0 %s -1 = %d, want 0", "/", r)
+ }
+ y = 1
+ r = x / y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "/", r)
+ }
+ y = 4294967296
+ r = x / y
+ if r != 0 {
+ t.Errorf("0 %s 4294967296 = %d, want 0", "/", r)
+ }
+ y = 9223372036854775806
+ r = x / y
+ if r != 0 {
+ t.Errorf("0 %s 9223372036854775806 = %d, want 0", "/", r)
+ }
+ y = 9223372036854775807
+ r = x / y
+ if r != 0 {
+ t.Errorf("0 %s 9223372036854775807 = %d, want 0", "/", r)
+ }
+ x = 1
+ y = -9223372036854775808
+ r = x / y
+ if r != 0 {
+ t.Errorf("1 %s -9223372036854775808 = %d, want 0", "/", r)
+ }
+ y = -9223372036854775807
+ r = x / y
+ if r != 0 {
+ t.Errorf("1 %s -9223372036854775807 = %d, want 0", "/", r)
+ }
+ y = -4294967296
+ r = x / y
+ if r != 0 {
+ t.Errorf("1 %s -4294967296 = %d, want 0", "/", r)
+ }
+ y = -1
+ r = x / y
+ if r != -1 {
+ t.Errorf("1 %s -1 = %d, want -1", "/", r)
+ }
+ y = 1
+ r = x / y
+ if r != 1 {
+ t.Errorf("1 %s 1 = %d, want 1", "/", r)
+ }
+ y = 4294967296
+ r = x / y
+ if r != 0 {
+ t.Errorf("1 %s 4294967296 = %d, want 0", "/", r)
+ }
+ y = 9223372036854775806
+ r = x / y
+ if r != 0 {
+ t.Errorf("1 %s 9223372036854775806 = %d, want 0", "/", r)
+ }
+ y = 9223372036854775807
+ r = x / y
+ if r != 0 {
+ t.Errorf("1 %s 9223372036854775807 = %d, want 0", "/", r)
+ }
+ x = 4294967296
+ y = -9223372036854775808
+ r = x / y
+ if r != 0 {
+ t.Errorf("4294967296 %s -9223372036854775808 = %d, want 0", "/", r)
+ }
+ y = -9223372036854775807
+ r = x / y
+ if r != 0 {
+ t.Errorf("4294967296 %s -9223372036854775807 = %d, want 0", "/", r)
+ }
+ y = -4294967296
+ r = x / y
+ if r != -1 {
+ t.Errorf("4294967296 %s -4294967296 = %d, want -1", "/", r)
+ }
+ y = -1
+ r = x / y
+ if r != -4294967296 {
+ t.Errorf("4294967296 %s -1 = %d, want -4294967296", "/", r)
+ }
+ y = 1
+ r = x / y
+ if r != 4294967296 {
+ t.Errorf("4294967296 %s 1 = %d, want 4294967296", "/", r)
+ }
+ y = 4294967296
+ r = x / y
+ if r != 1 {
+ t.Errorf("4294967296 %s 4294967296 = %d, want 1", "/", r)
+ }
+ y = 9223372036854775806
+ r = x / y
+ if r != 0 {
+ t.Errorf("4294967296 %s 9223372036854775806 = %d, want 0", "/", r)
+ }
+ y = 9223372036854775807
+ r = x / y
+ if r != 0 {
+ t.Errorf("4294967296 %s 9223372036854775807 = %d, want 0", "/", r)
+ }
+ x = 9223372036854775806
+ y = -9223372036854775808
+ r = x / y
+ if r != 0 {
+ t.Errorf("9223372036854775806 %s -9223372036854775808 = %d, want 0", "/", r)
+ }
+ y = -9223372036854775807
+ r = x / y
+ if r != 0 {
+ t.Errorf("9223372036854775806 %s -9223372036854775807 = %d, want 0", "/", r)
+ }
+ y = -4294967296
+ r = x / y
+ if r != -2147483647 {
+ t.Errorf("9223372036854775806 %s -4294967296 = %d, want -2147483647", "/", r)
+ }
+ y = -1
+ r = x / y
+ if r != -9223372036854775806 {
+ t.Errorf("9223372036854775806 %s -1 = %d, want -9223372036854775806", "/", r)
+ }
+ y = 1
+ r = x / y
+ if r != 9223372036854775806 {
+ t.Errorf("9223372036854775806 %s 1 = %d, want 9223372036854775806", "/", r)
+ }
+ y = 4294967296
+ r = x / y
+ if r != 2147483647 {
+ t.Errorf("9223372036854775806 %s 4294967296 = %d, want 2147483647", "/", r)
+ }
+ y = 9223372036854775806
+ r = x / y
+ if r != 1 {
+ t.Errorf("9223372036854775806 %s 9223372036854775806 = %d, want 1", "/", r)
+ }
+ y = 9223372036854775807
+ r = x / y
+ if r != 0 {
+ t.Errorf("9223372036854775806 %s 9223372036854775807 = %d, want 0", "/", r)
+ }
+ x = 9223372036854775807
+ y = -9223372036854775808
+ r = x / y
+ if r != 0 {
+ t.Errorf("9223372036854775807 %s -9223372036854775808 = %d, want 0", "/", r)
+ }
+ y = -9223372036854775807
+ r = x / y
+ if r != -1 {
+ t.Errorf("9223372036854775807 %s -9223372036854775807 = %d, want -1", "/", r)
+ }
+ y = -4294967296
+ r = x / y
+ if r != -2147483647 {
+ t.Errorf("9223372036854775807 %s -4294967296 = %d, want -2147483647", "/", r)
+ }
+ y = -1
+ r = x / y
+ if r != -9223372036854775807 {
+ t.Errorf("9223372036854775807 %s -1 = %d, want -9223372036854775807", "/", r)
+ }
+ y = 1
+ r = x / y
+ if r != 9223372036854775807 {
+ t.Errorf("9223372036854775807 %s 1 = %d, want 9223372036854775807", "/", r)
+ }
+ y = 4294967296
+ r = x / y
+ if r != 2147483647 {
+ t.Errorf("9223372036854775807 %s 4294967296 = %d, want 2147483647", "/", r)
+ }
+ y = 9223372036854775806
+ r = x / y
+ if r != 1 {
+ t.Errorf("9223372036854775807 %s 9223372036854775806 = %d, want 1", "/", r)
+ }
+ y = 9223372036854775807
+ r = x / y
+ if r != 1 {
+ t.Errorf("9223372036854775807 %s 9223372036854775807 = %d, want 1", "/", r)
+ }
+}
+func TestConstFoldint64mul(t *testing.T) {
+ var x, y, r int64
+ x = -9223372036854775808
+ y = -9223372036854775808
+ r = x * y
+ if r != 0 {
+ t.Errorf("-9223372036854775808 %s -9223372036854775808 = %d, want 0", "*", r)
+ }
+ y = -9223372036854775807
+ r = x * y
+ if r != -9223372036854775808 {
+ t.Errorf("-9223372036854775808 %s -9223372036854775807 = %d, want -9223372036854775808", "*", r)
+ }
+ y = -4294967296
+ r = x * y
+ if r != 0 {
+ t.Errorf("-9223372036854775808 %s -4294967296 = %d, want 0", "*", r)
+ }
+ y = -1
+ r = x * y
+ if r != -9223372036854775808 {
+ t.Errorf("-9223372036854775808 %s -1 = %d, want -9223372036854775808", "*", r)
+ }
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("-9223372036854775808 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != -9223372036854775808 {
+ t.Errorf("-9223372036854775808 %s 1 = %d, want -9223372036854775808", "*", r)
+ }
+ y = 4294967296
+ r = x * y
+ if r != 0 {
+ t.Errorf("-9223372036854775808 %s 4294967296 = %d, want 0", "*", r)
+ }
+ y = 9223372036854775806
+ r = x * y
+ if r != 0 {
+ t.Errorf("-9223372036854775808 %s 9223372036854775806 = %d, want 0", "*", r)
+ }
+ y = 9223372036854775807
+ r = x * y
+ if r != -9223372036854775808 {
+ t.Errorf("-9223372036854775808 %s 9223372036854775807 = %d, want -9223372036854775808", "*", r)
+ }
+ x = -9223372036854775807
+ y = -9223372036854775808
+ r = x * y
+ if r != -9223372036854775808 {
+ t.Errorf("-9223372036854775807 %s -9223372036854775808 = %d, want -9223372036854775808", "*", r)
+ }
+ y = -9223372036854775807
+ r = x * y
+ if r != 1 {
+ t.Errorf("-9223372036854775807 %s -9223372036854775807 = %d, want 1", "*", r)
+ }
+ y = -4294967296
+ r = x * y
+ if r != -4294967296 {
+ t.Errorf("-9223372036854775807 %s -4294967296 = %d, want -4294967296", "*", r)
+ }
+ y = -1
+ r = x * y
+ if r != 9223372036854775807 {
+ t.Errorf("-9223372036854775807 %s -1 = %d, want 9223372036854775807", "*", r)
+ }
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("-9223372036854775807 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != -9223372036854775807 {
+ t.Errorf("-9223372036854775807 %s 1 = %d, want -9223372036854775807", "*", r)
+ }
+ y = 4294967296
+ r = x * y
+ if r != 4294967296 {
+ t.Errorf("-9223372036854775807 %s 4294967296 = %d, want 4294967296", "*", r)
+ }
+ y = 9223372036854775806
+ r = x * y
+ if r != 9223372036854775806 {
+ t.Errorf("-9223372036854775807 %s 9223372036854775806 = %d, want 9223372036854775806", "*", r)
+ }
+ y = 9223372036854775807
+ r = x * y
+ if r != -1 {
+ t.Errorf("-9223372036854775807 %s 9223372036854775807 = %d, want -1", "*", r)
+ }
+ x = -4294967296
+ y = -9223372036854775808
+ r = x * y
+ if r != 0 {
+ t.Errorf("-4294967296 %s -9223372036854775808 = %d, want 0", "*", r)
+ }
+ y = -9223372036854775807
+ r = x * y
+ if r != -4294967296 {
+ t.Errorf("-4294967296 %s -9223372036854775807 = %d, want -4294967296", "*", r)
+ }
+ y = -4294967296
+ r = x * y
+ if r != 0 {
+ t.Errorf("-4294967296 %s -4294967296 = %d, want 0", "*", r)
+ }
+ y = -1
+ r = x * y
+ if r != 4294967296 {
+ t.Errorf("-4294967296 %s -1 = %d, want 4294967296", "*", r)
+ }
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("-4294967296 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != -4294967296 {
+ t.Errorf("-4294967296 %s 1 = %d, want -4294967296", "*", r)
+ }
+ y = 4294967296
+ r = x * y
+ if r != 0 {
+ t.Errorf("-4294967296 %s 4294967296 = %d, want 0", "*", r)
+ }
+ y = 9223372036854775806
+ r = x * y
+ if r != 8589934592 {
+ t.Errorf("-4294967296 %s 9223372036854775806 = %d, want 8589934592", "*", r)
+ }
+ y = 9223372036854775807
+ r = x * y
+ if r != 4294967296 {
+ t.Errorf("-4294967296 %s 9223372036854775807 = %d, want 4294967296", "*", r)
+ }
+ x = -1
+ y = -9223372036854775808
+ r = x * y
+ if r != -9223372036854775808 {
+ t.Errorf("-1 %s -9223372036854775808 = %d, want -9223372036854775808", "*", r)
+ }
+ y = -9223372036854775807
+ r = x * y
+ if r != 9223372036854775807 {
+ t.Errorf("-1 %s -9223372036854775807 = %d, want 9223372036854775807", "*", r)
+ }
+ y = -4294967296
+ r = x * y
+ if r != 4294967296 {
+ t.Errorf("-1 %s -4294967296 = %d, want 4294967296", "*", r)
+ }
+ y = -1
+ r = x * y
+ if r != 1 {
+ t.Errorf("-1 %s -1 = %d, want 1", "*", r)
+ }
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("-1 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != -1 {
+ t.Errorf("-1 %s 1 = %d, want -1", "*", r)
+ }
+ y = 4294967296
+ r = x * y
+ if r != -4294967296 {
+ t.Errorf("-1 %s 4294967296 = %d, want -4294967296", "*", r)
+ }
+ y = 9223372036854775806
+ r = x * y
+ if r != -9223372036854775806 {
+ t.Errorf("-1 %s 9223372036854775806 = %d, want -9223372036854775806", "*", r)
+ }
+ y = 9223372036854775807
+ r = x * y
+ if r != -9223372036854775807 {
+ t.Errorf("-1 %s 9223372036854775807 = %d, want -9223372036854775807", "*", r)
+ }
+ x = 0
+ y = -9223372036854775808
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s -9223372036854775808 = %d, want 0", "*", r)
+ }
+ y = -9223372036854775807
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s -9223372036854775807 = %d, want 0", "*", r)
+ }
+ y = -4294967296
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s -4294967296 = %d, want 0", "*", r)
+ }
+ y = -1
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s -1 = %d, want 0", "*", r)
+ }
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "*", r)
+ }
+ y = 4294967296
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s 4294967296 = %d, want 0", "*", r)
+ }
+ y = 9223372036854775806
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s 9223372036854775806 = %d, want 0", "*", r)
+ }
+ y = 9223372036854775807
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s 9223372036854775807 = %d, want 0", "*", r)
+ }
+ x = 1
+ y = -9223372036854775808
+ r = x * y
+ if r != -9223372036854775808 {
+ t.Errorf("1 %s -9223372036854775808 = %d, want -9223372036854775808", "*", r)
+ }
+ y = -9223372036854775807
+ r = x * y
+ if r != -9223372036854775807 {
+ t.Errorf("1 %s -9223372036854775807 = %d, want -9223372036854775807", "*", r)
+ }
+ y = -4294967296
+ r = x * y
+ if r != -4294967296 {
+ t.Errorf("1 %s -4294967296 = %d, want -4294967296", "*", r)
+ }
+ y = -1
+ r = x * y
+ if r != -1 {
+ t.Errorf("1 %s -1 = %d, want -1", "*", r)
+ }
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("1 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != 1 {
+ t.Errorf("1 %s 1 = %d, want 1", "*", r)
+ }
+ y = 4294967296
+ r = x * y
+ if r != 4294967296 {
+ t.Errorf("1 %s 4294967296 = %d, want 4294967296", "*", r)
+ }
+ y = 9223372036854775806
+ r = x * y
+ if r != 9223372036854775806 {
+ t.Errorf("1 %s 9223372036854775806 = %d, want 9223372036854775806", "*", r)
+ }
+ y = 9223372036854775807
+ r = x * y
+ if r != 9223372036854775807 {
+ t.Errorf("1 %s 9223372036854775807 = %d, want 9223372036854775807", "*", r)
+ }
+ x = 4294967296
+ y = -9223372036854775808
+ r = x * y
+ if r != 0 {
+ t.Errorf("4294967296 %s -9223372036854775808 = %d, want 0", "*", r)
+ }
+ y = -9223372036854775807
+ r = x * y
+ if r != 4294967296 {
+ t.Errorf("4294967296 %s -9223372036854775807 = %d, want 4294967296", "*", r)
+ }
+ y = -4294967296
+ r = x * y
+ if r != 0 {
+ t.Errorf("4294967296 %s -4294967296 = %d, want 0", "*", r)
+ }
+ y = -1
+ r = x * y
+ if r != -4294967296 {
+ t.Errorf("4294967296 %s -1 = %d, want -4294967296", "*", r)
+ }
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("4294967296 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != 4294967296 {
+ t.Errorf("4294967296 %s 1 = %d, want 4294967296", "*", r)
+ }
+ y = 4294967296
+ r = x * y
+ if r != 0 {
+ t.Errorf("4294967296 %s 4294967296 = %d, want 0", "*", r)
+ }
+ y = 9223372036854775806
+ r = x * y
+ if r != -8589934592 {
+ t.Errorf("4294967296 %s 9223372036854775806 = %d, want -8589934592", "*", r)
+ }
+ y = 9223372036854775807
+ r = x * y
+ if r != -4294967296 {
+ t.Errorf("4294967296 %s 9223372036854775807 = %d, want -4294967296", "*", r)
+ }
+ x = 9223372036854775806
+ y = -9223372036854775808
+ r = x * y
+ if r != 0 {
+ t.Errorf("9223372036854775806 %s -9223372036854775808 = %d, want 0", "*", r)
+ }
+ y = -9223372036854775807
+ r = x * y
+ if r != 9223372036854775806 {
+ t.Errorf("9223372036854775806 %s -9223372036854775807 = %d, want 9223372036854775806", "*", r)
+ }
+ y = -4294967296
+ r = x * y
+ if r != 8589934592 {
+ t.Errorf("9223372036854775806 %s -4294967296 = %d, want 8589934592", "*", r)
+ }
+ y = -1
+ r = x * y
+ if r != -9223372036854775806 {
+ t.Errorf("9223372036854775806 %s -1 = %d, want -9223372036854775806", "*", r)
+ }
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("9223372036854775806 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != 9223372036854775806 {
+ t.Errorf("9223372036854775806 %s 1 = %d, want 9223372036854775806", "*", r)
+ }
+ y = 4294967296
+ r = x * y
+ if r != -8589934592 {
+ t.Errorf("9223372036854775806 %s 4294967296 = %d, want -8589934592", "*", r)
+ }
+ y = 9223372036854775806
+ r = x * y
+ if r != 4 {
+ t.Errorf("9223372036854775806 %s 9223372036854775806 = %d, want 4", "*", r)
+ }
+ y = 9223372036854775807
+ r = x * y
+ if r != -9223372036854775806 {
+ t.Errorf("9223372036854775806 %s 9223372036854775807 = %d, want -9223372036854775806", "*", r)
+ }
+ x = 9223372036854775807
+ y = -9223372036854775808
+ r = x * y
+ if r != -9223372036854775808 {
+ t.Errorf("9223372036854775807 %s -9223372036854775808 = %d, want -9223372036854775808", "*", r)
+ }
+ y = -9223372036854775807
+ r = x * y
+ if r != -1 {
+ t.Errorf("9223372036854775807 %s -9223372036854775807 = %d, want -1", "*", r)
+ }
+ y = -4294967296
+ r = x * y
+ if r != 4294967296 {
+ t.Errorf("9223372036854775807 %s -4294967296 = %d, want 4294967296", "*", r)
+ }
+ y = -1
+ r = x * y
+ if r != -9223372036854775807 {
+ t.Errorf("9223372036854775807 %s -1 = %d, want -9223372036854775807", "*", r)
+ }
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("9223372036854775807 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != 9223372036854775807 {
+ t.Errorf("9223372036854775807 %s 1 = %d, want 9223372036854775807", "*", r)
+ }
+ y = 4294967296
+ r = x * y
+ if r != -4294967296 {
+ t.Errorf("9223372036854775807 %s 4294967296 = %d, want -4294967296", "*", r)
+ }
+ y = 9223372036854775806
+ r = x * y
+ if r != -9223372036854775806 {
+ t.Errorf("9223372036854775807 %s 9223372036854775806 = %d, want -9223372036854775806", "*", r)
+ }
+ y = 9223372036854775807
+ r = x * y
+ if r != 1 {
+ t.Errorf("9223372036854775807 %s 9223372036854775807 = %d, want 1", "*", r)
+ }
+}
+func TestConstFoldint64mod(t *testing.T) {
+ var x, y, r int64
+ x = -9223372036854775808
+ y = -9223372036854775808
+ r = x % y
+ if r != 0 {
+ t.Errorf("-9223372036854775808 %s -9223372036854775808 = %d, want 0", "%", r)
+ }
+ y = -9223372036854775807
+ r = x % y
+ if r != -1 {
+ t.Errorf("-9223372036854775808 %s -9223372036854775807 = %d, want -1", "%", r)
+ }
+ y = -4294967296
+ r = x % y
+ if r != 0 {
+ t.Errorf("-9223372036854775808 %s -4294967296 = %d, want 0", "%", r)
+ }
+ y = -1
+ r = x % y
+ if r != 0 {
+ t.Errorf("-9223372036854775808 %s -1 = %d, want 0", "%", r)
+ }
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("-9223372036854775808 %s 1 = %d, want 0", "%", r)
+ }
+ y = 4294967296
+ r = x % y
+ if r != 0 {
+ t.Errorf("-9223372036854775808 %s 4294967296 = %d, want 0", "%", r)
+ }
+ y = 9223372036854775806
+ r = x % y
+ if r != -2 {
+ t.Errorf("-9223372036854775808 %s 9223372036854775806 = %d, want -2", "%", r)
+ }
+ y = 9223372036854775807
+ r = x % y
+ if r != -1 {
+ t.Errorf("-9223372036854775808 %s 9223372036854775807 = %d, want -1", "%", r)
+ }
+ x = -9223372036854775807
+ y = -9223372036854775808
+ r = x % y
+ if r != -9223372036854775807 {
+ t.Errorf("-9223372036854775807 %s -9223372036854775808 = %d, want -9223372036854775807", "%", r)
+ }
+ y = -9223372036854775807
+ r = x % y
+ if r != 0 {
+ t.Errorf("-9223372036854775807 %s -9223372036854775807 = %d, want 0", "%", r)
+ }
+ y = -4294967296
+ r = x % y
+ if r != -4294967295 {
+ t.Errorf("-9223372036854775807 %s -4294967296 = %d, want -4294967295", "%", r)
+ }
+ y = -1
+ r = x % y
+ if r != 0 {
+ t.Errorf("-9223372036854775807 %s -1 = %d, want 0", "%", r)
+ }
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("-9223372036854775807 %s 1 = %d, want 0", "%", r)
+ }
+ y = 4294967296
+ r = x % y
+ if r != -4294967295 {
+ t.Errorf("-9223372036854775807 %s 4294967296 = %d, want -4294967295", "%", r)
+ }
+ y = 9223372036854775806
+ r = x % y
+ if r != -1 {
+ t.Errorf("-9223372036854775807 %s 9223372036854775806 = %d, want -1", "%", r)
+ }
+ y = 9223372036854775807
+ r = x % y
+ if r != 0 {
+ t.Errorf("-9223372036854775807 %s 9223372036854775807 = %d, want 0", "%", r)
+ }
+ x = -4294967296
+ y = -9223372036854775808
+ r = x % y
+ if r != -4294967296 {
+ t.Errorf("-4294967296 %s -9223372036854775808 = %d, want -4294967296", "%", r)
+ }
+ y = -9223372036854775807
+ r = x % y
+ if r != -4294967296 {
+ t.Errorf("-4294967296 %s -9223372036854775807 = %d, want -4294967296", "%", r)
+ }
+ y = -4294967296
+ r = x % y
+ if r != 0 {
+ t.Errorf("-4294967296 %s -4294967296 = %d, want 0", "%", r)
+ }
+ y = -1
+ r = x % y
+ if r != 0 {
+ t.Errorf("-4294967296 %s -1 = %d, want 0", "%", r)
+ }
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("-4294967296 %s 1 = %d, want 0", "%", r)
+ }
+ y = 4294967296
+ r = x % y
+ if r != 0 {
+ t.Errorf("-4294967296 %s 4294967296 = %d, want 0", "%", r)
+ }
+ y = 9223372036854775806
+ r = x % y
+ if r != -4294967296 {
+ t.Errorf("-4294967296 %s 9223372036854775806 = %d, want -4294967296", "%", r)
+ }
+ y = 9223372036854775807
+ r = x % y
+ if r != -4294967296 {
+ t.Errorf("-4294967296 %s 9223372036854775807 = %d, want -4294967296", "%", r)
+ }
+ x = -1
+ y = -9223372036854775808
+ r = x % y
+ if r != -1 {
+ t.Errorf("-1 %s -9223372036854775808 = %d, want -1", "%", r)
+ }
+ y = -9223372036854775807
+ r = x % y
+ if r != -1 {
+ t.Errorf("-1 %s -9223372036854775807 = %d, want -1", "%", r)
+ }
+ y = -4294967296
+ r = x % y
+ if r != -1 {
+ t.Errorf("-1 %s -4294967296 = %d, want -1", "%", r)
+ }
+ y = -1
+ r = x % y
+ if r != 0 {
+ t.Errorf("-1 %s -1 = %d, want 0", "%", r)
+ }
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("-1 %s 1 = %d, want 0", "%", r)
+ }
+ y = 4294967296
+ r = x % y
+ if r != -1 {
+ t.Errorf("-1 %s 4294967296 = %d, want -1", "%", r)
+ }
+ y = 9223372036854775806
+ r = x % y
+ if r != -1 {
+ t.Errorf("-1 %s 9223372036854775806 = %d, want -1", "%", r)
+ }
+ y = 9223372036854775807
+ r = x % y
+ if r != -1 {
+ t.Errorf("-1 %s 9223372036854775807 = %d, want -1", "%", r)
+ }
+ x = 0
+ y = -9223372036854775808
+ r = x % y
+ if r != 0 {
+ t.Errorf("0 %s -9223372036854775808 = %d, want 0", "%", r)
+ }
+ y = -9223372036854775807
+ r = x % y
+ if r != 0 {
+ t.Errorf("0 %s -9223372036854775807 = %d, want 0", "%", r)
+ }
+ y = -4294967296
+ r = x % y
+ if r != 0 {
+ t.Errorf("0 %s -4294967296 = %d, want 0", "%", r)
+ }
+ y = -1
+ r = x % y
+ if r != 0 {
+ t.Errorf("0 %s -1 = %d, want 0", "%", r)
+ }
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "%", r)
+ }
+ y = 4294967296
+ r = x % y
+ if r != 0 {
+ t.Errorf("0 %s 4294967296 = %d, want 0", "%", r)
+ }
+ y = 9223372036854775806
+ r = x % y
+ if r != 0 {
+ t.Errorf("0 %s 9223372036854775806 = %d, want 0", "%", r)
+ }
+ y = 9223372036854775807
+ r = x % y
+ if r != 0 {
+ t.Errorf("0 %s 9223372036854775807 = %d, want 0", "%", r)
+ }
+ x = 1
+ y = -9223372036854775808
+ r = x % y
+ if r != 1 {
+ t.Errorf("1 %s -9223372036854775808 = %d, want 1", "%", r)
+ }
+ y = -9223372036854775807
+ r = x % y
+ if r != 1 {
+ t.Errorf("1 %s -9223372036854775807 = %d, want 1", "%", r)
+ }
+ y = -4294967296
+ r = x % y
+ if r != 1 {
+ t.Errorf("1 %s -4294967296 = %d, want 1", "%", r)
+ }
+ y = -1
+ r = x % y
+ if r != 0 {
+ t.Errorf("1 %s -1 = %d, want 0", "%", r)
+ }
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", "%", r)
+ }
+ y = 4294967296
+ r = x % y
+ if r != 1 {
+ t.Errorf("1 %s 4294967296 = %d, want 1", "%", r)
+ }
+ y = 9223372036854775806
+ r = x % y
+ if r != 1 {
+ t.Errorf("1 %s 9223372036854775806 = %d, want 1", "%", r)
+ }
+ y = 9223372036854775807
+ r = x % y
+ if r != 1 {
+ t.Errorf("1 %s 9223372036854775807 = %d, want 1", "%", r)
+ }
+ x = 4294967296
+ y = -9223372036854775808
+ r = x % y
+ if r != 4294967296 {
+ t.Errorf("4294967296 %s -9223372036854775808 = %d, want 4294967296", "%", r)
+ }
+ y = -9223372036854775807
+ r = x % y
+ if r != 4294967296 {
+ t.Errorf("4294967296 %s -9223372036854775807 = %d, want 4294967296", "%", r)
+ }
+ y = -4294967296
+ r = x % y
+ if r != 0 {
+ t.Errorf("4294967296 %s -4294967296 = %d, want 0", "%", r)
+ }
+ y = -1
+ r = x % y
+ if r != 0 {
+ t.Errorf("4294967296 %s -1 = %d, want 0", "%", r)
+ }
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("4294967296 %s 1 = %d, want 0", "%", r)
+ }
+ y = 4294967296
+ r = x % y
+ if r != 0 {
+ t.Errorf("4294967296 %s 4294967296 = %d, want 0", "%", r)
+ }
+ y = 9223372036854775806
+ r = x % y
+ if r != 4294967296 {
+ t.Errorf("4294967296 %s 9223372036854775806 = %d, want 4294967296", "%", r)
+ }
+ y = 9223372036854775807
+ r = x % y
+ if r != 4294967296 {
+ t.Errorf("4294967296 %s 9223372036854775807 = %d, want 4294967296", "%", r)
+ }
+ x = 9223372036854775806
+ y = -9223372036854775808
+ r = x % y
+ if r != 9223372036854775806 {
+ t.Errorf("9223372036854775806 %s -9223372036854775808 = %d, want 9223372036854775806", "%", r)
+ }
+ y = -9223372036854775807
+ r = x % y
+ if r != 9223372036854775806 {
+ t.Errorf("9223372036854775806 %s -9223372036854775807 = %d, want 9223372036854775806", "%", r)
+ }
+ y = -4294967296
+ r = x % y
+ if r != 4294967294 {
+ t.Errorf("9223372036854775806 %s -4294967296 = %d, want 4294967294", "%", r)
+ }
+ y = -1
+ r = x % y
+ if r != 0 {
+ t.Errorf("9223372036854775806 %s -1 = %d, want 0", "%", r)
+ }
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("9223372036854775806 %s 1 = %d, want 0", "%", r)
+ }
+ y = 4294967296
+ r = x % y
+ if r != 4294967294 {
+ t.Errorf("9223372036854775806 %s 4294967296 = %d, want 4294967294", "%", r)
+ }
+ y = 9223372036854775806
+ r = x % y
+ if r != 0 {
+ t.Errorf("9223372036854775806 %s 9223372036854775806 = %d, want 0", "%", r)
+ }
+ y = 9223372036854775807
+ r = x % y
+ if r != 9223372036854775806 {
+ t.Errorf("9223372036854775806 %s 9223372036854775807 = %d, want 9223372036854775806", "%", r)
+ }
+ x = 9223372036854775807
+ y = -9223372036854775808
+ r = x % y
+ if r != 9223372036854775807 {
+ t.Errorf("9223372036854775807 %s -9223372036854775808 = %d, want 9223372036854775807", "%", r)
+ }
+ y = -9223372036854775807
+ r = x % y
+ if r != 0 {
+ t.Errorf("9223372036854775807 %s -9223372036854775807 = %d, want 0", "%", r)
+ }
+ y = -4294967296
+ r = x % y
+ if r != 4294967295 {
+ t.Errorf("9223372036854775807 %s -4294967296 = %d, want 4294967295", "%", r)
+ }
+ y = -1
+ r = x % y
+ if r != 0 {
+ t.Errorf("9223372036854775807 %s -1 = %d, want 0", "%", r)
+ }
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("9223372036854775807 %s 1 = %d, want 0", "%", r)
+ }
+ y = 4294967296
+ r = x % y
+ if r != 4294967295 {
+ t.Errorf("9223372036854775807 %s 4294967296 = %d, want 4294967295", "%", r)
+ }
+ y = 9223372036854775806
+ r = x % y
+ if r != 1 {
+ t.Errorf("9223372036854775807 %s 9223372036854775806 = %d, want 1", "%", r)
+ }
+ y = 9223372036854775807
+ r = x % y
+ if r != 0 {
+ t.Errorf("9223372036854775807 %s 9223372036854775807 = %d, want 0", "%", r)
+ }
+}
+func TestConstFolduint32add(t *testing.T) {
+ var x, y, r uint32
+ x = 0
+ y = 0
+ r = x + y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != 1 {
+ t.Errorf("0 %s 1 = %d, want 1", "+", r)
+ }
+ y = 4294967295
+ r = x + y
+ if r != 4294967295 {
+ t.Errorf("0 %s 4294967295 = %d, want 4294967295", "+", r)
+ }
+ x = 1
+ y = 0
+ r = x + y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "+", r)
+ }
+ y = 4294967295
+ r = x + y
+ if r != 0 {
+ t.Errorf("1 %s 4294967295 = %d, want 0", "+", r)
+ }
+ x = 4294967295
+ y = 0
+ r = x + y
+ if r != 4294967295 {
+ t.Errorf("4294967295 %s 0 = %d, want 4294967295", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != 0 {
+ t.Errorf("4294967295 %s 1 = %d, want 0", "+", r)
+ }
+ y = 4294967295
+ r = x + y
+ if r != 4294967294 {
+ t.Errorf("4294967295 %s 4294967295 = %d, want 4294967294", "+", r)
+ }
+}
+func TestConstFolduint32sub(t *testing.T) {
+ var x, y, r uint32
+ x = 0
+ y = 0
+ r = x - y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != 4294967295 {
+ t.Errorf("0 %s 1 = %d, want 4294967295", "-", r)
+ }
+ y = 4294967295
+ r = x - y
+ if r != 1 {
+ t.Errorf("0 %s 4294967295 = %d, want 1", "-", r)
+ }
+ x = 1
+ y = 0
+ r = x - y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", "-", r)
+ }
+ y = 4294967295
+ r = x - y
+ if r != 2 {
+ t.Errorf("1 %s 4294967295 = %d, want 2", "-", r)
+ }
+ x = 4294967295
+ y = 0
+ r = x - y
+ if r != 4294967295 {
+ t.Errorf("4294967295 %s 0 = %d, want 4294967295", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != 4294967294 {
+ t.Errorf("4294967295 %s 1 = %d, want 4294967294", "-", r)
+ }
+ y = 4294967295
+ r = x - y
+ if r != 0 {
+ t.Errorf("4294967295 %s 4294967295 = %d, want 0", "-", r)
+ }
+}
+func TestConstFolduint32div(t *testing.T) {
+ var x, y, r uint32
+ x = 0
+ y = 1
+ r = x / y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "/", r)
+ }
+ y = 4294967295
+ r = x / y
+ if r != 0 {
+ t.Errorf("0 %s 4294967295 = %d, want 0", "/", r)
+ }
+ x = 1
+ y = 1
+ r = x / y
+ if r != 1 {
+ t.Errorf("1 %s 1 = %d, want 1", "/", r)
+ }
+ y = 4294967295
+ r = x / y
+ if r != 0 {
+ t.Errorf("1 %s 4294967295 = %d, want 0", "/", r)
+ }
+ x = 4294967295
+ y = 1
+ r = x / y
+ if r != 4294967295 {
+ t.Errorf("4294967295 %s 1 = %d, want 4294967295", "/", r)
+ }
+ y = 4294967295
+ r = x / y
+ if r != 1 {
+ t.Errorf("4294967295 %s 4294967295 = %d, want 1", "/", r)
+ }
+}
+func TestConstFolduint32mul(t *testing.T) {
+ var x, y, r uint32
+ x = 0
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "*", r)
+ }
+ y = 4294967295
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s 4294967295 = %d, want 0", "*", r)
+ }
+ x = 1
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("1 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != 1 {
+ t.Errorf("1 %s 1 = %d, want 1", "*", r)
+ }
+ y = 4294967295
+ r = x * y
+ if r != 4294967295 {
+ t.Errorf("1 %s 4294967295 = %d, want 4294967295", "*", r)
+ }
+ x = 4294967295
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("4294967295 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != 4294967295 {
+ t.Errorf("4294967295 %s 1 = %d, want 4294967295", "*", r)
+ }
+ y = 4294967295
+ r = x * y
+ if r != 1 {
+ t.Errorf("4294967295 %s 4294967295 = %d, want 1", "*", r)
+ }
+}
+func TestConstFolduint32mod(t *testing.T) {
+ var x, y, r uint32
+ x = 0
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "%", r)
+ }
+ y = 4294967295
+ r = x % y
+ if r != 0 {
+ t.Errorf("0 %s 4294967295 = %d, want 0", "%", r)
+ }
+ x = 1
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", "%", r)
+ }
+ y = 4294967295
+ r = x % y
+ if r != 1 {
+ t.Errorf("1 %s 4294967295 = %d, want 1", "%", r)
+ }
+ x = 4294967295
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("4294967295 %s 1 = %d, want 0", "%", r)
+ }
+ y = 4294967295
+ r = x % y
+ if r != 0 {
+ t.Errorf("4294967295 %s 4294967295 = %d, want 0", "%", r)
+ }
+}
+func TestConstFoldint32add(t *testing.T) {
+ var x, y, r int32
+ x = -2147483648
+ y = -2147483648
+ r = x + y
+ if r != 0 {
+ t.Errorf("-2147483648 %s -2147483648 = %d, want 0", "+", r)
+ }
+ y = -2147483647
+ r = x + y
+ if r != 1 {
+ t.Errorf("-2147483648 %s -2147483647 = %d, want 1", "+", r)
+ }
+ y = -1
+ r = x + y
+ if r != 2147483647 {
+ t.Errorf("-2147483648 %s -1 = %d, want 2147483647", "+", r)
+ }
+ y = 0
+ r = x + y
+ if r != -2147483648 {
+ t.Errorf("-2147483648 %s 0 = %d, want -2147483648", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != -2147483647 {
+ t.Errorf("-2147483648 %s 1 = %d, want -2147483647", "+", r)
+ }
+ y = 2147483647
+ r = x + y
+ if r != -1 {
+ t.Errorf("-2147483648 %s 2147483647 = %d, want -1", "+", r)
+ }
+ x = -2147483647
+ y = -2147483648
+ r = x + y
+ if r != 1 {
+ t.Errorf("-2147483647 %s -2147483648 = %d, want 1", "+", r)
+ }
+ y = -2147483647
+ r = x + y
+ if r != 2 {
+ t.Errorf("-2147483647 %s -2147483647 = %d, want 2", "+", r)
+ }
+ y = -1
+ r = x + y
+ if r != -2147483648 {
+ t.Errorf("-2147483647 %s -1 = %d, want -2147483648", "+", r)
+ }
+ y = 0
+ r = x + y
+ if r != -2147483647 {
+ t.Errorf("-2147483647 %s 0 = %d, want -2147483647", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != -2147483646 {
+ t.Errorf("-2147483647 %s 1 = %d, want -2147483646", "+", r)
+ }
+ y = 2147483647
+ r = x + y
+ if r != 0 {
+ t.Errorf("-2147483647 %s 2147483647 = %d, want 0", "+", r)
+ }
+ x = -1
+ y = -2147483648
+ r = x + y
+ if r != 2147483647 {
+ t.Errorf("-1 %s -2147483648 = %d, want 2147483647", "+", r)
+ }
+ y = -2147483647
+ r = x + y
+ if r != -2147483648 {
+ t.Errorf("-1 %s -2147483647 = %d, want -2147483648", "+", r)
+ }
+ y = -1
+ r = x + y
+ if r != -2 {
+ t.Errorf("-1 %s -1 = %d, want -2", "+", r)
+ }
+ y = 0
+ r = x + y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != 0 {
+ t.Errorf("-1 %s 1 = %d, want 0", "+", r)
+ }
+ y = 2147483647
+ r = x + y
+ if r != 2147483646 {
+ t.Errorf("-1 %s 2147483647 = %d, want 2147483646", "+", r)
+ }
+ x = 0
+ y = -2147483648
+ r = x + y
+ if r != -2147483648 {
+ t.Errorf("0 %s -2147483648 = %d, want -2147483648", "+", r)
+ }
+ y = -2147483647
+ r = x + y
+ if r != -2147483647 {
+ t.Errorf("0 %s -2147483647 = %d, want -2147483647", "+", r)
+ }
+ y = -1
+ r = x + y
+ if r != -1 {
+ t.Errorf("0 %s -1 = %d, want -1", "+", r)
+ }
+ y = 0
+ r = x + y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != 1 {
+ t.Errorf("0 %s 1 = %d, want 1", "+", r)
+ }
+ y = 2147483647
+ r = x + y
+ if r != 2147483647 {
+ t.Errorf("0 %s 2147483647 = %d, want 2147483647", "+", r)
+ }
+ x = 1
+ y = -2147483648
+ r = x + y
+ if r != -2147483647 {
+ t.Errorf("1 %s -2147483648 = %d, want -2147483647", "+", r)
+ }
+ y = -2147483647
+ r = x + y
+ if r != -2147483646 {
+ t.Errorf("1 %s -2147483647 = %d, want -2147483646", "+", r)
+ }
+ y = -1
+ r = x + y
+ if r != 0 {
+ t.Errorf("1 %s -1 = %d, want 0", "+", r)
+ }
+ y = 0
+ r = x + y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "+", r)
+ }
+ y = 2147483647
+ r = x + y
+ if r != -2147483648 {
+ t.Errorf("1 %s 2147483647 = %d, want -2147483648", "+", r)
+ }
+ x = 2147483647
+ y = -2147483648
+ r = x + y
+ if r != -1 {
+ t.Errorf("2147483647 %s -2147483648 = %d, want -1", "+", r)
+ }
+ y = -2147483647
+ r = x + y
+ if r != 0 {
+ t.Errorf("2147483647 %s -2147483647 = %d, want 0", "+", r)
+ }
+ y = -1
+ r = x + y
+ if r != 2147483646 {
+ t.Errorf("2147483647 %s -1 = %d, want 2147483646", "+", r)
+ }
+ y = 0
+ r = x + y
+ if r != 2147483647 {
+ t.Errorf("2147483647 %s 0 = %d, want 2147483647", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != -2147483648 {
+ t.Errorf("2147483647 %s 1 = %d, want -2147483648", "+", r)
+ }
+ y = 2147483647
+ r = x + y
+ if r != -2 {
+ t.Errorf("2147483647 %s 2147483647 = %d, want -2", "+", r)
+ }
+}
+func TestConstFoldint32sub(t *testing.T) {
+ var x, y, r int32
+ x = -2147483648
+ y = -2147483648
+ r = x - y
+ if r != 0 {
+ t.Errorf("-2147483648 %s -2147483648 = %d, want 0", "-", r)
+ }
+ y = -2147483647
+ r = x - y
+ if r != -1 {
+ t.Errorf("-2147483648 %s -2147483647 = %d, want -1", "-", r)
+ }
+ y = -1
+ r = x - y
+ if r != -2147483647 {
+ t.Errorf("-2147483648 %s -1 = %d, want -2147483647", "-", r)
+ }
+ y = 0
+ r = x - y
+ if r != -2147483648 {
+ t.Errorf("-2147483648 %s 0 = %d, want -2147483648", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != 2147483647 {
+ t.Errorf("-2147483648 %s 1 = %d, want 2147483647", "-", r)
+ }
+ y = 2147483647
+ r = x - y
+ if r != 1 {
+ t.Errorf("-2147483648 %s 2147483647 = %d, want 1", "-", r)
+ }
+ x = -2147483647
+ y = -2147483648
+ r = x - y
+ if r != 1 {
+ t.Errorf("-2147483647 %s -2147483648 = %d, want 1", "-", r)
+ }
+ y = -2147483647
+ r = x - y
+ if r != 0 {
+ t.Errorf("-2147483647 %s -2147483647 = %d, want 0", "-", r)
+ }
+ y = -1
+ r = x - y
+ if r != -2147483646 {
+ t.Errorf("-2147483647 %s -1 = %d, want -2147483646", "-", r)
+ }
+ y = 0
+ r = x - y
+ if r != -2147483647 {
+ t.Errorf("-2147483647 %s 0 = %d, want -2147483647", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != -2147483648 {
+ t.Errorf("-2147483647 %s 1 = %d, want -2147483648", "-", r)
+ }
+ y = 2147483647
+ r = x - y
+ if r != 2 {
+ t.Errorf("-2147483647 %s 2147483647 = %d, want 2", "-", r)
+ }
+ x = -1
+ y = -2147483648
+ r = x - y
+ if r != 2147483647 {
+ t.Errorf("-1 %s -2147483648 = %d, want 2147483647", "-", r)
+ }
+ y = -2147483647
+ r = x - y
+ if r != 2147483646 {
+ t.Errorf("-1 %s -2147483647 = %d, want 2147483646", "-", r)
+ }
+ y = -1
+ r = x - y
+ if r != 0 {
+ t.Errorf("-1 %s -1 = %d, want 0", "-", r)
+ }
+ y = 0
+ r = x - y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != -2 {
+ t.Errorf("-1 %s 1 = %d, want -2", "-", r)
+ }
+ y = 2147483647
+ r = x - y
+ if r != -2147483648 {
+ t.Errorf("-1 %s 2147483647 = %d, want -2147483648", "-", r)
+ }
+ x = 0
+ y = -2147483648
+ r = x - y
+ if r != -2147483648 {
+ t.Errorf("0 %s -2147483648 = %d, want -2147483648", "-", r)
+ }
+ y = -2147483647
+ r = x - y
+ if r != 2147483647 {
+ t.Errorf("0 %s -2147483647 = %d, want 2147483647", "-", r)
+ }
+ y = -1
+ r = x - y
+ if r != 1 {
+ t.Errorf("0 %s -1 = %d, want 1", "-", r)
+ }
+ y = 0
+ r = x - y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != -1 {
+ t.Errorf("0 %s 1 = %d, want -1", "-", r)
+ }
+ y = 2147483647
+ r = x - y
+ if r != -2147483647 {
+ t.Errorf("0 %s 2147483647 = %d, want -2147483647", "-", r)
+ }
+ x = 1
+ y = -2147483648
+ r = x - y
+ if r != -2147483647 {
+ t.Errorf("1 %s -2147483648 = %d, want -2147483647", "-", r)
+ }
+ y = -2147483647
+ r = x - y
+ if r != -2147483648 {
+ t.Errorf("1 %s -2147483647 = %d, want -2147483648", "-", r)
+ }
+ y = -1
+ r = x - y
+ if r != 2 {
+ t.Errorf("1 %s -1 = %d, want 2", "-", r)
+ }
+ y = 0
+ r = x - y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", "-", r)
+ }
+ y = 2147483647
+ r = x - y
+ if r != -2147483646 {
+ t.Errorf("1 %s 2147483647 = %d, want -2147483646", "-", r)
+ }
+ x = 2147483647
+ y = -2147483648
+ r = x - y
+ if r != -1 {
+ t.Errorf("2147483647 %s -2147483648 = %d, want -1", "-", r)
+ }
+ y = -2147483647
+ r = x - y
+ if r != -2 {
+ t.Errorf("2147483647 %s -2147483647 = %d, want -2", "-", r)
+ }
+ y = -1
+ r = x - y
+ if r != -2147483648 {
+ t.Errorf("2147483647 %s -1 = %d, want -2147483648", "-", r)
+ }
+ y = 0
+ r = x - y
+ if r != 2147483647 {
+ t.Errorf("2147483647 %s 0 = %d, want 2147483647", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != 2147483646 {
+ t.Errorf("2147483647 %s 1 = %d, want 2147483646", "-", r)
+ }
+ y = 2147483647
+ r = x - y
+ if r != 0 {
+ t.Errorf("2147483647 %s 2147483647 = %d, want 0", "-", r)
+ }
+}
+func TestConstFoldint32div(t *testing.T) {
+ var x, y, r int32
+ x = -2147483648
+ y = -2147483648
+ r = x / y
+ if r != 1 {
+ t.Errorf("-2147483648 %s -2147483648 = %d, want 1", "/", r)
+ }
+ y = -2147483647
+ r = x / y
+ if r != 1 {
+ t.Errorf("-2147483648 %s -2147483647 = %d, want 1", "/", r)
+ }
+ y = -1
+ r = x / y
+ if r != -2147483648 {
+ t.Errorf("-2147483648 %s -1 = %d, want -2147483648", "/", r)
+ }
+ y = 1
+ r = x / y
+ if r != -2147483648 {
+ t.Errorf("-2147483648 %s 1 = %d, want -2147483648", "/", r)
+ }
+ y = 2147483647
+ r = x / y
+ if r != -1 {
+ t.Errorf("-2147483648 %s 2147483647 = %d, want -1", "/", r)
+ }
+ x = -2147483647
+ y = -2147483648
+ r = x / y
+ if r != 0 {
+ t.Errorf("-2147483647 %s -2147483648 = %d, want 0", "/", r)
+ }
+ y = -2147483647
+ r = x / y
+ if r != 1 {
+ t.Errorf("-2147483647 %s -2147483647 = %d, want 1", "/", r)
+ }
+ y = -1
+ r = x / y
+ if r != 2147483647 {
+ t.Errorf("-2147483647 %s -1 = %d, want 2147483647", "/", r)
+ }
+ y = 1
+ r = x / y
+ if r != -2147483647 {
+ t.Errorf("-2147483647 %s 1 = %d, want -2147483647", "/", r)
+ }
+ y = 2147483647
+ r = x / y
+ if r != -1 {
+ t.Errorf("-2147483647 %s 2147483647 = %d, want -1", "/", r)
+ }
+ x = -1
+ y = -2147483648
+ r = x / y
+ if r != 0 {
+ t.Errorf("-1 %s -2147483648 = %d, want 0", "/", r)
+ }
+ y = -2147483647
+ r = x / y
+ if r != 0 {
+ t.Errorf("-1 %s -2147483647 = %d, want 0", "/", r)
+ }
+ y = -1
+ r = x / y
+ if r != 1 {
+ t.Errorf("-1 %s -1 = %d, want 1", "/", r)
+ }
+ y = 1
+ r = x / y
+ if r != -1 {
+ t.Errorf("-1 %s 1 = %d, want -1", "/", r)
+ }
+ y = 2147483647
+ r = x / y
+ if r != 0 {
+ t.Errorf("-1 %s 2147483647 = %d, want 0", "/", r)
+ }
+ x = 0
+ y = -2147483648
+ r = x / y
+ if r != 0 {
+ t.Errorf("0 %s -2147483648 = %d, want 0", "/", r)
+ }
+ y = -2147483647
+ r = x / y
+ if r != 0 {
+ t.Errorf("0 %s -2147483647 = %d, want 0", "/", r)
+ }
+ y = -1
+ r = x / y
+ if r != 0 {
+ t.Errorf("0 %s -1 = %d, want 0", "/", r)
+ }
+ y = 1
+ r = x / y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "/", r)
+ }
+ y = 2147483647
+ r = x / y
+ if r != 0 {
+ t.Errorf("0 %s 2147483647 = %d, want 0", "/", r)
+ }
+ x = 1
+ y = -2147483648
+ r = x / y
+ if r != 0 {
+ t.Errorf("1 %s -2147483648 = %d, want 0", "/", r)
+ }
+ y = -2147483647
+ r = x / y
+ if r != 0 {
+ t.Errorf("1 %s -2147483647 = %d, want 0", "/", r)
+ }
+ y = -1
+ r = x / y
+ if r != -1 {
+ t.Errorf("1 %s -1 = %d, want -1", "/", r)
+ }
+ y = 1
+ r = x / y
+ if r != 1 {
+ t.Errorf("1 %s 1 = %d, want 1", "/", r)
+ }
+ y = 2147483647
+ r = x / y
+ if r != 0 {
+ t.Errorf("1 %s 2147483647 = %d, want 0", "/", r)
+ }
+ x = 2147483647
+ y = -2147483648
+ r = x / y
+ if r != 0 {
+ t.Errorf("2147483647 %s -2147483648 = %d, want 0", "/", r)
+ }
+ y = -2147483647
+ r = x / y
+ if r != -1 {
+ t.Errorf("2147483647 %s -2147483647 = %d, want -1", "/", r)
+ }
+ y = -1
+ r = x / y
+ if r != -2147483647 {
+ t.Errorf("2147483647 %s -1 = %d, want -2147483647", "/", r)
+ }
+ y = 1
+ r = x / y
+ if r != 2147483647 {
+ t.Errorf("2147483647 %s 1 = %d, want 2147483647", "/", r)
+ }
+ y = 2147483647
+ r = x / y
+ if r != 1 {
+ t.Errorf("2147483647 %s 2147483647 = %d, want 1", "/", r)
+ }
+}
+func TestConstFoldint32mul(t *testing.T) {
+ var x, y, r int32
+ x = -2147483648
+ y = -2147483648
+ r = x * y
+ if r != 0 {
+ t.Errorf("-2147483648 %s -2147483648 = %d, want 0", "*", r)
+ }
+ y = -2147483647
+ r = x * y
+ if r != -2147483648 {
+ t.Errorf("-2147483648 %s -2147483647 = %d, want -2147483648", "*", r)
+ }
+ y = -1
+ r = x * y
+ if r != -2147483648 {
+ t.Errorf("-2147483648 %s -1 = %d, want -2147483648", "*", r)
+ }
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("-2147483648 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != -2147483648 {
+ t.Errorf("-2147483648 %s 1 = %d, want -2147483648", "*", r)
+ }
+ y = 2147483647
+ r = x * y
+ if r != -2147483648 {
+ t.Errorf("-2147483648 %s 2147483647 = %d, want -2147483648", "*", r)
+ }
+ x = -2147483647
+ y = -2147483648
+ r = x * y
+ if r != -2147483648 {
+ t.Errorf("-2147483647 %s -2147483648 = %d, want -2147483648", "*", r)
+ }
+ y = -2147483647
+ r = x * y
+ if r != 1 {
+ t.Errorf("-2147483647 %s -2147483647 = %d, want 1", "*", r)
+ }
+ y = -1
+ r = x * y
+ if r != 2147483647 {
+ t.Errorf("-2147483647 %s -1 = %d, want 2147483647", "*", r)
+ }
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("-2147483647 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != -2147483647 {
+ t.Errorf("-2147483647 %s 1 = %d, want -2147483647", "*", r)
+ }
+ y = 2147483647
+ r = x * y
+ if r != -1 {
+ t.Errorf("-2147483647 %s 2147483647 = %d, want -1", "*", r)
+ }
+ x = -1
+ y = -2147483648
+ r = x * y
+ if r != -2147483648 {
+ t.Errorf("-1 %s -2147483648 = %d, want -2147483648", "*", r)
+ }
+ y = -2147483647
+ r = x * y
+ if r != 2147483647 {
+ t.Errorf("-1 %s -2147483647 = %d, want 2147483647", "*", r)
+ }
+ y = -1
+ r = x * y
+ if r != 1 {
+ t.Errorf("-1 %s -1 = %d, want 1", "*", r)
+ }
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("-1 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != -1 {
+ t.Errorf("-1 %s 1 = %d, want -1", "*", r)
+ }
+ y = 2147483647
+ r = x * y
+ if r != -2147483647 {
+ t.Errorf("-1 %s 2147483647 = %d, want -2147483647", "*", r)
+ }
+ x = 0
+ y = -2147483648
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s -2147483648 = %d, want 0", "*", r)
+ }
+ y = -2147483647
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s -2147483647 = %d, want 0", "*", r)
+ }
+ y = -1
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s -1 = %d, want 0", "*", r)
+ }
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "*", r)
+ }
+ y = 2147483647
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s 2147483647 = %d, want 0", "*", r)
+ }
+ x = 1
+ y = -2147483648
+ r = x * y
+ if r != -2147483648 {
+ t.Errorf("1 %s -2147483648 = %d, want -2147483648", "*", r)
+ }
+ y = -2147483647
+ r = x * y
+ if r != -2147483647 {
+ t.Errorf("1 %s -2147483647 = %d, want -2147483647", "*", r)
+ }
+ y = -1
+ r = x * y
+ if r != -1 {
+ t.Errorf("1 %s -1 = %d, want -1", "*", r)
+ }
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("1 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != 1 {
+ t.Errorf("1 %s 1 = %d, want 1", "*", r)
+ }
+ y = 2147483647
+ r = x * y
+ if r != 2147483647 {
+ t.Errorf("1 %s 2147483647 = %d, want 2147483647", "*", r)
+ }
+ x = 2147483647
+ y = -2147483648
+ r = x * y
+ if r != -2147483648 {
+ t.Errorf("2147483647 %s -2147483648 = %d, want -2147483648", "*", r)
+ }
+ y = -2147483647
+ r = x * y
+ if r != -1 {
+ t.Errorf("2147483647 %s -2147483647 = %d, want -1", "*", r)
+ }
+ y = -1
+ r = x * y
+ if r != -2147483647 {
+ t.Errorf("2147483647 %s -1 = %d, want -2147483647", "*", r)
+ }
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("2147483647 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != 2147483647 {
+ t.Errorf("2147483647 %s 1 = %d, want 2147483647", "*", r)
+ }
+ y = 2147483647
+ r = x * y
+ if r != 1 {
+ t.Errorf("2147483647 %s 2147483647 = %d, want 1", "*", r)
+ }
+}
+func TestConstFoldint32mod(t *testing.T) {
+ var x, y, r int32
+ x = -2147483648
+ y = -2147483648
+ r = x % y
+ if r != 0 {
+ t.Errorf("-2147483648 %s -2147483648 = %d, want 0", "%", r)
+ }
+ y = -2147483647
+ r = x % y
+ if r != -1 {
+ t.Errorf("-2147483648 %s -2147483647 = %d, want -1", "%", r)
+ }
+ y = -1
+ r = x % y
+ if r != 0 {
+ t.Errorf("-2147483648 %s -1 = %d, want 0", "%", r)
+ }
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("-2147483648 %s 1 = %d, want 0", "%", r)
+ }
+ y = 2147483647
+ r = x % y
+ if r != -1 {
+ t.Errorf("-2147483648 %s 2147483647 = %d, want -1", "%", r)
+ }
+ x = -2147483647
+ y = -2147483648
+ r = x % y
+ if r != -2147483647 {
+ t.Errorf("-2147483647 %s -2147483648 = %d, want -2147483647", "%", r)
+ }
+ y = -2147483647
+ r = x % y
+ if r != 0 {
+ t.Errorf("-2147483647 %s -2147483647 = %d, want 0", "%", r)
+ }
+ y = -1
+ r = x % y
+ if r != 0 {
+ t.Errorf("-2147483647 %s -1 = %d, want 0", "%", r)
+ }
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("-2147483647 %s 1 = %d, want 0", "%", r)
+ }
+ y = 2147483647
+ r = x % y
+ if r != 0 {
+ t.Errorf("-2147483647 %s 2147483647 = %d, want 0", "%", r)
+ }
+ x = -1
+ y = -2147483648
+ r = x % y
+ if r != -1 {
+ t.Errorf("-1 %s -2147483648 = %d, want -1", "%", r)
+ }
+ y = -2147483647
+ r = x % y
+ if r != -1 {
+ t.Errorf("-1 %s -2147483647 = %d, want -1", "%", r)
+ }
+ y = -1
+ r = x % y
+ if r != 0 {
+ t.Errorf("-1 %s -1 = %d, want 0", "%", r)
+ }
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("-1 %s 1 = %d, want 0", "%", r)
+ }
+ y = 2147483647
+ r = x % y
+ if r != -1 {
+ t.Errorf("-1 %s 2147483647 = %d, want -1", "%", r)
+ }
+ x = 0
+ y = -2147483648
+ r = x % y
+ if r != 0 {
+ t.Errorf("0 %s -2147483648 = %d, want 0", "%", r)
+ }
+ y = -2147483647
+ r = x % y
+ if r != 0 {
+ t.Errorf("0 %s -2147483647 = %d, want 0", "%", r)
+ }
+ y = -1
+ r = x % y
+ if r != 0 {
+ t.Errorf("0 %s -1 = %d, want 0", "%", r)
+ }
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "%", r)
+ }
+ y = 2147483647
+ r = x % y
+ if r != 0 {
+ t.Errorf("0 %s 2147483647 = %d, want 0", "%", r)
+ }
+ x = 1
+ y = -2147483648
+ r = x % y
+ if r != 1 {
+ t.Errorf("1 %s -2147483648 = %d, want 1", "%", r)
+ }
+ y = -2147483647
+ r = x % y
+ if r != 1 {
+ t.Errorf("1 %s -2147483647 = %d, want 1", "%", r)
+ }
+ y = -1
+ r = x % y
+ if r != 0 {
+ t.Errorf("1 %s -1 = %d, want 0", "%", r)
+ }
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", "%", r)
+ }
+ y = 2147483647
+ r = x % y
+ if r != 1 {
+ t.Errorf("1 %s 2147483647 = %d, want 1", "%", r)
+ }
+ x = 2147483647
+ y = -2147483648
+ r = x % y
+ if r != 2147483647 {
+ t.Errorf("2147483647 %s -2147483648 = %d, want 2147483647", "%", r)
+ }
+ y = -2147483647
+ r = x % y
+ if r != 0 {
+ t.Errorf("2147483647 %s -2147483647 = %d, want 0", "%", r)
+ }
+ y = -1
+ r = x % y
+ if r != 0 {
+ t.Errorf("2147483647 %s -1 = %d, want 0", "%", r)
+ }
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("2147483647 %s 1 = %d, want 0", "%", r)
+ }
+ y = 2147483647
+ r = x % y
+ if r != 0 {
+ t.Errorf("2147483647 %s 2147483647 = %d, want 0", "%", r)
+ }
+}
+func TestConstFolduint16add(t *testing.T) {
+ var x, y, r uint16
+ x = 0
+ y = 0
+ r = x + y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != 1 {
+ t.Errorf("0 %s 1 = %d, want 1", "+", r)
+ }
+ y = 65535
+ r = x + y
+ if r != 65535 {
+ t.Errorf("0 %s 65535 = %d, want 65535", "+", r)
+ }
+ x = 1
+ y = 0
+ r = x + y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "+", r)
+ }
+ y = 65535
+ r = x + y
+ if r != 0 {
+ t.Errorf("1 %s 65535 = %d, want 0", "+", r)
+ }
+ x = 65535
+ y = 0
+ r = x + y
+ if r != 65535 {
+ t.Errorf("65535 %s 0 = %d, want 65535", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != 0 {
+ t.Errorf("65535 %s 1 = %d, want 0", "+", r)
+ }
+ y = 65535
+ r = x + y
+ if r != 65534 {
+ t.Errorf("65535 %s 65535 = %d, want 65534", "+", r)
+ }
+}
+func TestConstFolduint16sub(t *testing.T) {
+ var x, y, r uint16
+ x = 0
+ y = 0
+ r = x - y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != 65535 {
+ t.Errorf("0 %s 1 = %d, want 65535", "-", r)
+ }
+ y = 65535
+ r = x - y
+ if r != 1 {
+ t.Errorf("0 %s 65535 = %d, want 1", "-", r)
+ }
+ x = 1
+ y = 0
+ r = x - y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", "-", r)
+ }
+ y = 65535
+ r = x - y
+ if r != 2 {
+ t.Errorf("1 %s 65535 = %d, want 2", "-", r)
+ }
+ x = 65535
+ y = 0
+ r = x - y
+ if r != 65535 {
+ t.Errorf("65535 %s 0 = %d, want 65535", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != 65534 {
+ t.Errorf("65535 %s 1 = %d, want 65534", "-", r)
+ }
+ y = 65535
+ r = x - y
+ if r != 0 {
+ t.Errorf("65535 %s 65535 = %d, want 0", "-", r)
+ }
+}
+func TestConstFolduint16div(t *testing.T) {
+ var x, y, r uint16
+ x = 0
+ y = 1
+ r = x / y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "/", r)
+ }
+ y = 65535
+ r = x / y
+ if r != 0 {
+ t.Errorf("0 %s 65535 = %d, want 0", "/", r)
+ }
+ x = 1
+ y = 1
+ r = x / y
+ if r != 1 {
+ t.Errorf("1 %s 1 = %d, want 1", "/", r)
+ }
+ y = 65535
+ r = x / y
+ if r != 0 {
+ t.Errorf("1 %s 65535 = %d, want 0", "/", r)
+ }
+ x = 65535
+ y = 1
+ r = x / y
+ if r != 65535 {
+ t.Errorf("65535 %s 1 = %d, want 65535", "/", r)
+ }
+ y = 65535
+ r = x / y
+ if r != 1 {
+ t.Errorf("65535 %s 65535 = %d, want 1", "/", r)
+ }
+}
+func TestConstFolduint16mul(t *testing.T) {
+ var x, y, r uint16
+ x = 0
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "*", r)
+ }
+ y = 65535
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s 65535 = %d, want 0", "*", r)
+ }
+ x = 1
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("1 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != 1 {
+ t.Errorf("1 %s 1 = %d, want 1", "*", r)
+ }
+ y = 65535
+ r = x * y
+ if r != 65535 {
+ t.Errorf("1 %s 65535 = %d, want 65535", "*", r)
+ }
+ x = 65535
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("65535 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != 65535 {
+ t.Errorf("65535 %s 1 = %d, want 65535", "*", r)
+ }
+ y = 65535
+ r = x * y
+ if r != 1 {
+ t.Errorf("65535 %s 65535 = %d, want 1", "*", r)
+ }
+}
+func TestConstFolduint16mod(t *testing.T) {
+ var x, y, r uint16
+ x = 0
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "%", r)
+ }
+ y = 65535
+ r = x % y
+ if r != 0 {
+ t.Errorf("0 %s 65535 = %d, want 0", "%", r)
+ }
+ x = 1
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", "%", r)
+ }
+ y = 65535
+ r = x % y
+ if r != 1 {
+ t.Errorf("1 %s 65535 = %d, want 1", "%", r)
+ }
+ x = 65535
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("65535 %s 1 = %d, want 0", "%", r)
+ }
+ y = 65535
+ r = x % y
+ if r != 0 {
+ t.Errorf("65535 %s 65535 = %d, want 0", "%", r)
+ }
+}
+func TestConstFoldint16add(t *testing.T) {
+ var x, y, r int16
+ x = -32768
+ y = -32768
+ r = x + y
+ if r != 0 {
+ t.Errorf("-32768 %s -32768 = %d, want 0", "+", r)
+ }
+ y = -32767
+ r = x + y
+ if r != 1 {
+ t.Errorf("-32768 %s -32767 = %d, want 1", "+", r)
+ }
+ y = -1
+ r = x + y
+ if r != 32767 {
+ t.Errorf("-32768 %s -1 = %d, want 32767", "+", r)
+ }
+ y = 0
+ r = x + y
+ if r != -32768 {
+ t.Errorf("-32768 %s 0 = %d, want -32768", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != -32767 {
+ t.Errorf("-32768 %s 1 = %d, want -32767", "+", r)
+ }
+ y = 32766
+ r = x + y
+ if r != -2 {
+ t.Errorf("-32768 %s 32766 = %d, want -2", "+", r)
+ }
+ y = 32767
+ r = x + y
+ if r != -1 {
+ t.Errorf("-32768 %s 32767 = %d, want -1", "+", r)
+ }
+ x = -32767
+ y = -32768
+ r = x + y
+ if r != 1 {
+ t.Errorf("-32767 %s -32768 = %d, want 1", "+", r)
+ }
+ y = -32767
+ r = x + y
+ if r != 2 {
+ t.Errorf("-32767 %s -32767 = %d, want 2", "+", r)
+ }
+ y = -1
+ r = x + y
+ if r != -32768 {
+ t.Errorf("-32767 %s -1 = %d, want -32768", "+", r)
+ }
+ y = 0
+ r = x + y
+ if r != -32767 {
+ t.Errorf("-32767 %s 0 = %d, want -32767", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != -32766 {
+ t.Errorf("-32767 %s 1 = %d, want -32766", "+", r)
+ }
+ y = 32766
+ r = x + y
+ if r != -1 {
+ t.Errorf("-32767 %s 32766 = %d, want -1", "+", r)
+ }
+ y = 32767
+ r = x + y
+ if r != 0 {
+ t.Errorf("-32767 %s 32767 = %d, want 0", "+", r)
+ }
+ x = -1
+ y = -32768
+ r = x + y
+ if r != 32767 {
+ t.Errorf("-1 %s -32768 = %d, want 32767", "+", r)
+ }
+ y = -32767
+ r = x + y
+ if r != -32768 {
+ t.Errorf("-1 %s -32767 = %d, want -32768", "+", r)
+ }
+ y = -1
+ r = x + y
+ if r != -2 {
+ t.Errorf("-1 %s -1 = %d, want -2", "+", r)
+ }
+ y = 0
+ r = x + y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != 0 {
+ t.Errorf("-1 %s 1 = %d, want 0", "+", r)
+ }
+ y = 32766
+ r = x + y
+ if r != 32765 {
+ t.Errorf("-1 %s 32766 = %d, want 32765", "+", r)
+ }
+ y = 32767
+ r = x + y
+ if r != 32766 {
+ t.Errorf("-1 %s 32767 = %d, want 32766", "+", r)
+ }
+ x = 0
+ y = -32768
+ r = x + y
+ if r != -32768 {
+ t.Errorf("0 %s -32768 = %d, want -32768", "+", r)
+ }
+ y = -32767
+ r = x + y
+ if r != -32767 {
+ t.Errorf("0 %s -32767 = %d, want -32767", "+", r)
+ }
+ y = -1
+ r = x + y
+ if r != -1 {
+ t.Errorf("0 %s -1 = %d, want -1", "+", r)
+ }
+ y = 0
+ r = x + y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != 1 {
+ t.Errorf("0 %s 1 = %d, want 1", "+", r)
+ }
+ y = 32766
+ r = x + y
+ if r != 32766 {
+ t.Errorf("0 %s 32766 = %d, want 32766", "+", r)
+ }
+ y = 32767
+ r = x + y
+ if r != 32767 {
+ t.Errorf("0 %s 32767 = %d, want 32767", "+", r)
+ }
+ x = 1
+ y = -32768
+ r = x + y
+ if r != -32767 {
+ t.Errorf("1 %s -32768 = %d, want -32767", "+", r)
+ }
+ y = -32767
+ r = x + y
+ if r != -32766 {
+ t.Errorf("1 %s -32767 = %d, want -32766", "+", r)
+ }
+ y = -1
+ r = x + y
+ if r != 0 {
+ t.Errorf("1 %s -1 = %d, want 0", "+", r)
+ }
+ y = 0
+ r = x + y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "+", r)
+ }
+ y = 32766
+ r = x + y
+ if r != 32767 {
+ t.Errorf("1 %s 32766 = %d, want 32767", "+", r)
+ }
+ y = 32767
+ r = x + y
+ if r != -32768 {
+ t.Errorf("1 %s 32767 = %d, want -32768", "+", r)
+ }
+ x = 32766
+ y = -32768
+ r = x + y
+ if r != -2 {
+ t.Errorf("32766 %s -32768 = %d, want -2", "+", r)
+ }
+ y = -32767
+ r = x + y
+ if r != -1 {
+ t.Errorf("32766 %s -32767 = %d, want -1", "+", r)
+ }
+ y = -1
+ r = x + y
+ if r != 32765 {
+ t.Errorf("32766 %s -1 = %d, want 32765", "+", r)
+ }
+ y = 0
+ r = x + y
+ if r != 32766 {
+ t.Errorf("32766 %s 0 = %d, want 32766", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != 32767 {
+ t.Errorf("32766 %s 1 = %d, want 32767", "+", r)
+ }
+ y = 32766
+ r = x + y
+ if r != -4 {
+ t.Errorf("32766 %s 32766 = %d, want -4", "+", r)
+ }
+ y = 32767
+ r = x + y
+ if r != -3 {
+ t.Errorf("32766 %s 32767 = %d, want -3", "+", r)
+ }
+ x = 32767
+ y = -32768
+ r = x + y
+ if r != -1 {
+ t.Errorf("32767 %s -32768 = %d, want -1", "+", r)
+ }
+ y = -32767
+ r = x + y
+ if r != 0 {
+ t.Errorf("32767 %s -32767 = %d, want 0", "+", r)
+ }
+ y = -1
+ r = x + y
+ if r != 32766 {
+ t.Errorf("32767 %s -1 = %d, want 32766", "+", r)
+ }
+ y = 0
+ r = x + y
+ if r != 32767 {
+ t.Errorf("32767 %s 0 = %d, want 32767", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != -32768 {
+ t.Errorf("32767 %s 1 = %d, want -32768", "+", r)
+ }
+ y = 32766
+ r = x + y
+ if r != -3 {
+ t.Errorf("32767 %s 32766 = %d, want -3", "+", r)
+ }
+ y = 32767
+ r = x + y
+ if r != -2 {
+ t.Errorf("32767 %s 32767 = %d, want -2", "+", r)
+ }
+}
+func TestConstFoldint16sub(t *testing.T) {
+ var x, y, r int16
+ x = -32768
+ y = -32768
+ r = x - y
+ if r != 0 {
+ t.Errorf("-32768 %s -32768 = %d, want 0", "-", r)
+ }
+ y = -32767
+ r = x - y
+ if r != -1 {
+ t.Errorf("-32768 %s -32767 = %d, want -1", "-", r)
+ }
+ y = -1
+ r = x - y
+ if r != -32767 {
+ t.Errorf("-32768 %s -1 = %d, want -32767", "-", r)
+ }
+ y = 0
+ r = x - y
+ if r != -32768 {
+ t.Errorf("-32768 %s 0 = %d, want -32768", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != 32767 {
+ t.Errorf("-32768 %s 1 = %d, want 32767", "-", r)
+ }
+ y = 32766
+ r = x - y
+ if r != 2 {
+ t.Errorf("-32768 %s 32766 = %d, want 2", "-", r)
+ }
+ y = 32767
+ r = x - y
+ if r != 1 {
+ t.Errorf("-32768 %s 32767 = %d, want 1", "-", r)
+ }
+ x = -32767
+ y = -32768
+ r = x - y
+ if r != 1 {
+ t.Errorf("-32767 %s -32768 = %d, want 1", "-", r)
+ }
+ y = -32767
+ r = x - y
+ if r != 0 {
+ t.Errorf("-32767 %s -32767 = %d, want 0", "-", r)
+ }
+ y = -1
+ r = x - y
+ if r != -32766 {
+ t.Errorf("-32767 %s -1 = %d, want -32766", "-", r)
+ }
+ y = 0
+ r = x - y
+ if r != -32767 {
+ t.Errorf("-32767 %s 0 = %d, want -32767", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != -32768 {
+ t.Errorf("-32767 %s 1 = %d, want -32768", "-", r)
+ }
+ y = 32766
+ r = x - y
+ if r != 3 {
+ t.Errorf("-32767 %s 32766 = %d, want 3", "-", r)
+ }
+ y = 32767
+ r = x - y
+ if r != 2 {
+ t.Errorf("-32767 %s 32767 = %d, want 2", "-", r)
+ }
+ x = -1
+ y = -32768
+ r = x - y
+ if r != 32767 {
+ t.Errorf("-1 %s -32768 = %d, want 32767", "-", r)
+ }
+ y = -32767
+ r = x - y
+ if r != 32766 {
+ t.Errorf("-1 %s -32767 = %d, want 32766", "-", r)
+ }
+ y = -1
+ r = x - y
+ if r != 0 {
+ t.Errorf("-1 %s -1 = %d, want 0", "-", r)
+ }
+ y = 0
+ r = x - y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != -2 {
+ t.Errorf("-1 %s 1 = %d, want -2", "-", r)
+ }
+ y = 32766
+ r = x - y
+ if r != -32767 {
+ t.Errorf("-1 %s 32766 = %d, want -32767", "-", r)
+ }
+ y = 32767
+ r = x - y
+ if r != -32768 {
+ t.Errorf("-1 %s 32767 = %d, want -32768", "-", r)
+ }
+ x = 0
+ y = -32768
+ r = x - y
+ if r != -32768 {
+ t.Errorf("0 %s -32768 = %d, want -32768", "-", r)
+ }
+ y = -32767
+ r = x - y
+ if r != 32767 {
+ t.Errorf("0 %s -32767 = %d, want 32767", "-", r)
+ }
+ y = -1
+ r = x - y
+ if r != 1 {
+ t.Errorf("0 %s -1 = %d, want 1", "-", r)
+ }
+ y = 0
+ r = x - y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != -1 {
+ t.Errorf("0 %s 1 = %d, want -1", "-", r)
+ }
+ y = 32766
+ r = x - y
+ if r != -32766 {
+ t.Errorf("0 %s 32766 = %d, want -32766", "-", r)
+ }
+ y = 32767
+ r = x - y
+ if r != -32767 {
+ t.Errorf("0 %s 32767 = %d, want -32767", "-", r)
+ }
+ x = 1
+ y = -32768
+ r = x - y
+ if r != -32767 {
+ t.Errorf("1 %s -32768 = %d, want -32767", "-", r)
+ }
+ y = -32767
+ r = x - y
+ if r != -32768 {
+ t.Errorf("1 %s -32767 = %d, want -32768", "-", r)
+ }
+ y = -1
+ r = x - y
+ if r != 2 {
+ t.Errorf("1 %s -1 = %d, want 2", "-", r)
+ }
+ y = 0
+ r = x - y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", "-", r)
+ }
+ y = 32766
+ r = x - y
+ if r != -32765 {
+ t.Errorf("1 %s 32766 = %d, want -32765", "-", r)
+ }
+ y = 32767
+ r = x - y
+ if r != -32766 {
+ t.Errorf("1 %s 32767 = %d, want -32766", "-", r)
+ }
+ x = 32766
+ y = -32768
+ r = x - y
+ if r != -2 {
+ t.Errorf("32766 %s -32768 = %d, want -2", "-", r)
+ }
+ y = -32767
+ r = x - y
+ if r != -3 {
+ t.Errorf("32766 %s -32767 = %d, want -3", "-", r)
+ }
+ y = -1
+ r = x - y
+ if r != 32767 {
+ t.Errorf("32766 %s -1 = %d, want 32767", "-", r)
+ }
+ y = 0
+ r = x - y
+ if r != 32766 {
+ t.Errorf("32766 %s 0 = %d, want 32766", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != 32765 {
+ t.Errorf("32766 %s 1 = %d, want 32765", "-", r)
+ }
+ y = 32766
+ r = x - y
+ if r != 0 {
+ t.Errorf("32766 %s 32766 = %d, want 0", "-", r)
+ }
+ y = 32767
+ r = x - y
+ if r != -1 {
+ t.Errorf("32766 %s 32767 = %d, want -1", "-", r)
+ }
+ x = 32767
+ y = -32768
+ r = x - y
+ if r != -1 {
+ t.Errorf("32767 %s -32768 = %d, want -1", "-", r)
+ }
+ y = -32767
+ r = x - y
+ if r != -2 {
+ t.Errorf("32767 %s -32767 = %d, want -2", "-", r)
+ }
+ y = -1
+ r = x - y
+ if r != -32768 {
+ t.Errorf("32767 %s -1 = %d, want -32768", "-", r)
+ }
+ y = 0
+ r = x - y
+ if r != 32767 {
+ t.Errorf("32767 %s 0 = %d, want 32767", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != 32766 {
+ t.Errorf("32767 %s 1 = %d, want 32766", "-", r)
+ }
+ y = 32766
+ r = x - y
+ if r != 1 {
+ t.Errorf("32767 %s 32766 = %d, want 1", "-", r)
+ }
+ y = 32767
+ r = x - y
+ if r != 0 {
+ t.Errorf("32767 %s 32767 = %d, want 0", "-", r)
+ }
+}
+func TestConstFoldint16div(t *testing.T) {
+ var x, y, r int16
+ x = -32768
+ y = -32768
+ r = x / y
+ if r != 1 {
+ t.Errorf("-32768 %s -32768 = %d, want 1", "/", r)
+ }
+ y = -32767
+ r = x / y
+ if r != 1 {
+ t.Errorf("-32768 %s -32767 = %d, want 1", "/", r)
+ }
+ y = -1
+ r = x / y
+ if r != -32768 {
+ t.Errorf("-32768 %s -1 = %d, want -32768", "/", r)
+ }
+ y = 1
+ r = x / y
+ if r != -32768 {
+ t.Errorf("-32768 %s 1 = %d, want -32768", "/", r)
+ }
+ y = 32766
+ r = x / y
+ if r != -1 {
+ t.Errorf("-32768 %s 32766 = %d, want -1", "/", r)
+ }
+ y = 32767
+ r = x / y
+ if r != -1 {
+ t.Errorf("-32768 %s 32767 = %d, want -1", "/", r)
+ }
+ x = -32767
+ y = -32768
+ r = x / y
+ if r != 0 {
+ t.Errorf("-32767 %s -32768 = %d, want 0", "/", r)
+ }
+ y = -32767
+ r = x / y
+ if r != 1 {
+ t.Errorf("-32767 %s -32767 = %d, want 1", "/", r)
+ }
+ y = -1
+ r = x / y
+ if r != 32767 {
+ t.Errorf("-32767 %s -1 = %d, want 32767", "/", r)
+ }
+ y = 1
+ r = x / y
+ if r != -32767 {
+ t.Errorf("-32767 %s 1 = %d, want -32767", "/", r)
+ }
+ y = 32766
+ r = x / y
+ if r != -1 {
+ t.Errorf("-32767 %s 32766 = %d, want -1", "/", r)
+ }
+ y = 32767
+ r = x / y
+ if r != -1 {
+ t.Errorf("-32767 %s 32767 = %d, want -1", "/", r)
+ }
+ x = -1
+ y = -32768
+ r = x / y
+ if r != 0 {
+ t.Errorf("-1 %s -32768 = %d, want 0", "/", r)
+ }
+ y = -32767
+ r = x / y
+ if r != 0 {
+ t.Errorf("-1 %s -32767 = %d, want 0", "/", r)
+ }
+ y = -1
+ r = x / y
+ if r != 1 {
+ t.Errorf("-1 %s -1 = %d, want 1", "/", r)
+ }
+ y = 1
+ r = x / y
+ if r != -1 {
+ t.Errorf("-1 %s 1 = %d, want -1", "/", r)
+ }
+ y = 32766
+ r = x / y
+ if r != 0 {
+ t.Errorf("-1 %s 32766 = %d, want 0", "/", r)
+ }
+ y = 32767
+ r = x / y
+ if r != 0 {
+ t.Errorf("-1 %s 32767 = %d, want 0", "/", r)
+ }
+ x = 0
+ y = -32768
+ r = x / y
+ if r != 0 {
+ t.Errorf("0 %s -32768 = %d, want 0", "/", r)
+ }
+ y = -32767
+ r = x / y
+ if r != 0 {
+ t.Errorf("0 %s -32767 = %d, want 0", "/", r)
+ }
+ y = -1
+ r = x / y
+ if r != 0 {
+ t.Errorf("0 %s -1 = %d, want 0", "/", r)
+ }
+ y = 1
+ r = x / y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "/", r)
+ }
+ y = 32766
+ r = x / y
+ if r != 0 {
+ t.Errorf("0 %s 32766 = %d, want 0", "/", r)
+ }
+ y = 32767
+ r = x / y
+ if r != 0 {
+ t.Errorf("0 %s 32767 = %d, want 0", "/", r)
+ }
+ x = 1
+ y = -32768
+ r = x / y
+ if r != 0 {
+ t.Errorf("1 %s -32768 = %d, want 0", "/", r)
+ }
+ y = -32767
+ r = x / y
+ if r != 0 {
+ t.Errorf("1 %s -32767 = %d, want 0", "/", r)
+ }
+ y = -1
+ r = x / y
+ if r != -1 {
+ t.Errorf("1 %s -1 = %d, want -1", "/", r)
+ }
+ y = 1
+ r = x / y
+ if r != 1 {
+ t.Errorf("1 %s 1 = %d, want 1", "/", r)
+ }
+ y = 32766
+ r = x / y
+ if r != 0 {
+ t.Errorf("1 %s 32766 = %d, want 0", "/", r)
+ }
+ y = 32767
+ r = x / y
+ if r != 0 {
+ t.Errorf("1 %s 32767 = %d, want 0", "/", r)
+ }
+ x = 32766
+ y = -32768
+ r = x / y
+ if r != 0 {
+ t.Errorf("32766 %s -32768 = %d, want 0", "/", r)
+ }
+ y = -32767
+ r = x / y
+ if r != 0 {
+ t.Errorf("32766 %s -32767 = %d, want 0", "/", r)
+ }
+ y = -1
+ r = x / y
+ if r != -32766 {
+ t.Errorf("32766 %s -1 = %d, want -32766", "/", r)
+ }
+ y = 1
+ r = x / y
+ if r != 32766 {
+ t.Errorf("32766 %s 1 = %d, want 32766", "/", r)
+ }
+ y = 32766
+ r = x / y
+ if r != 1 {
+ t.Errorf("32766 %s 32766 = %d, want 1", "/", r)
+ }
+ y = 32767
+ r = x / y
+ if r != 0 {
+ t.Errorf("32766 %s 32767 = %d, want 0", "/", r)
+ }
+ x = 32767
+ y = -32768
+ r = x / y
+ if r != 0 {
+ t.Errorf("32767 %s -32768 = %d, want 0", "/", r)
+ }
+ y = -32767
+ r = x / y
+ if r != -1 {
+ t.Errorf("32767 %s -32767 = %d, want -1", "/", r)
+ }
+ y = -1
+ r = x / y
+ if r != -32767 {
+ t.Errorf("32767 %s -1 = %d, want -32767", "/", r)
+ }
+ y = 1
+ r = x / y
+ if r != 32767 {
+ t.Errorf("32767 %s 1 = %d, want 32767", "/", r)
+ }
+ y = 32766
+ r = x / y
+ if r != 1 {
+ t.Errorf("32767 %s 32766 = %d, want 1", "/", r)
+ }
+ y = 32767
+ r = x / y
+ if r != 1 {
+ t.Errorf("32767 %s 32767 = %d, want 1", "/", r)
+ }
+}
+func TestConstFoldint16mul(t *testing.T) {
+ var x, y, r int16
+ x = -32768
+ y = -32768
+ r = x * y
+ if r != 0 {
+ t.Errorf("-32768 %s -32768 = %d, want 0", "*", r)
+ }
+ y = -32767
+ r = x * y
+ if r != -32768 {
+ t.Errorf("-32768 %s -32767 = %d, want -32768", "*", r)
+ }
+ y = -1
+ r = x * y
+ if r != -32768 {
+ t.Errorf("-32768 %s -1 = %d, want -32768", "*", r)
+ }
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("-32768 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != -32768 {
+ t.Errorf("-32768 %s 1 = %d, want -32768", "*", r)
+ }
+ y = 32766
+ r = x * y
+ if r != 0 {
+ t.Errorf("-32768 %s 32766 = %d, want 0", "*", r)
+ }
+ y = 32767
+ r = x * y
+ if r != -32768 {
+ t.Errorf("-32768 %s 32767 = %d, want -32768", "*", r)
+ }
+ x = -32767
+ y = -32768
+ r = x * y
+ if r != -32768 {
+ t.Errorf("-32767 %s -32768 = %d, want -32768", "*", r)
+ }
+ y = -32767
+ r = x * y
+ if r != 1 {
+ t.Errorf("-32767 %s -32767 = %d, want 1", "*", r)
+ }
+ y = -1
+ r = x * y
+ if r != 32767 {
+ t.Errorf("-32767 %s -1 = %d, want 32767", "*", r)
+ }
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("-32767 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != -32767 {
+ t.Errorf("-32767 %s 1 = %d, want -32767", "*", r)
+ }
+ y = 32766
+ r = x * y
+ if r != 32766 {
+ t.Errorf("-32767 %s 32766 = %d, want 32766", "*", r)
+ }
+ y = 32767
+ r = x * y
+ if r != -1 {
+ t.Errorf("-32767 %s 32767 = %d, want -1", "*", r)
+ }
+ x = -1
+ y = -32768
+ r = x * y
+ if r != -32768 {
+ t.Errorf("-1 %s -32768 = %d, want -32768", "*", r)
+ }
+ y = -32767
+ r = x * y
+ if r != 32767 {
+ t.Errorf("-1 %s -32767 = %d, want 32767", "*", r)
+ }
+ y = -1
+ r = x * y
+ if r != 1 {
+ t.Errorf("-1 %s -1 = %d, want 1", "*", r)
+ }
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("-1 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != -1 {
+ t.Errorf("-1 %s 1 = %d, want -1", "*", r)
+ }
+ y = 32766
+ r = x * y
+ if r != -32766 {
+ t.Errorf("-1 %s 32766 = %d, want -32766", "*", r)
+ }
+ y = 32767
+ r = x * y
+ if r != -32767 {
+ t.Errorf("-1 %s 32767 = %d, want -32767", "*", r)
+ }
+ x = 0
+ y = -32768
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s -32768 = %d, want 0", "*", r)
+ }
+ y = -32767
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s -32767 = %d, want 0", "*", r)
+ }
+ y = -1
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s -1 = %d, want 0", "*", r)
+ }
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "*", r)
+ }
+ y = 32766
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s 32766 = %d, want 0", "*", r)
+ }
+ y = 32767
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s 32767 = %d, want 0", "*", r)
+ }
+ x = 1
+ y = -32768
+ r = x * y
+ if r != -32768 {
+ t.Errorf("1 %s -32768 = %d, want -32768", "*", r)
+ }
+ y = -32767
+ r = x * y
+ if r != -32767 {
+ t.Errorf("1 %s -32767 = %d, want -32767", "*", r)
+ }
+ y = -1
+ r = x * y
+ if r != -1 {
+ t.Errorf("1 %s -1 = %d, want -1", "*", r)
+ }
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("1 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != 1 {
+ t.Errorf("1 %s 1 = %d, want 1", "*", r)
+ }
+ y = 32766
+ r = x * y
+ if r != 32766 {
+ t.Errorf("1 %s 32766 = %d, want 32766", "*", r)
+ }
+ y = 32767
+ r = x * y
+ if r != 32767 {
+ t.Errorf("1 %s 32767 = %d, want 32767", "*", r)
+ }
+ x = 32766
+ y = -32768
+ r = x * y
+ if r != 0 {
+ t.Errorf("32766 %s -32768 = %d, want 0", "*", r)
+ }
+ y = -32767
+ r = x * y
+ if r != 32766 {
+ t.Errorf("32766 %s -32767 = %d, want 32766", "*", r)
+ }
+ y = -1
+ r = x * y
+ if r != -32766 {
+ t.Errorf("32766 %s -1 = %d, want -32766", "*", r)
+ }
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("32766 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != 32766 {
+ t.Errorf("32766 %s 1 = %d, want 32766", "*", r)
+ }
+ y = 32766
+ r = x * y
+ if r != 4 {
+ t.Errorf("32766 %s 32766 = %d, want 4", "*", r)
+ }
+ y = 32767
+ r = x * y
+ if r != -32766 {
+ t.Errorf("32766 %s 32767 = %d, want -32766", "*", r)
+ }
+ x = 32767
+ y = -32768
+ r = x * y
+ if r != -32768 {
+ t.Errorf("32767 %s -32768 = %d, want -32768", "*", r)
+ }
+ y = -32767
+ r = x * y
+ if r != -1 {
+ t.Errorf("32767 %s -32767 = %d, want -1", "*", r)
+ }
+ y = -1
+ r = x * y
+ if r != -32767 {
+ t.Errorf("32767 %s -1 = %d, want -32767", "*", r)
+ }
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("32767 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != 32767 {
+ t.Errorf("32767 %s 1 = %d, want 32767", "*", r)
+ }
+ y = 32766
+ r = x * y
+ if r != -32766 {
+ t.Errorf("32767 %s 32766 = %d, want -32766", "*", r)
+ }
+ y = 32767
+ r = x * y
+ if r != 1 {
+ t.Errorf("32767 %s 32767 = %d, want 1", "*", r)
+ }
+}
+func TestConstFoldint16mod(t *testing.T) {
+ var x, y, r int16
+ x = -32768
+ y = -32768
+ r = x % y
+ if r != 0 {
+ t.Errorf("-32768 %s -32768 = %d, want 0", "%", r)
+ }
+ y = -32767
+ r = x % y
+ if r != -1 {
+ t.Errorf("-32768 %s -32767 = %d, want -1", "%", r)
+ }
+ y = -1
+ r = x % y
+ if r != 0 {
+ t.Errorf("-32768 %s -1 = %d, want 0", "%", r)
+ }
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("-32768 %s 1 = %d, want 0", "%", r)
+ }
+ y = 32766
+ r = x % y
+ if r != -2 {
+ t.Errorf("-32768 %s 32766 = %d, want -2", "%", r)
+ }
+ y = 32767
+ r = x % y
+ if r != -1 {
+ t.Errorf("-32768 %s 32767 = %d, want -1", "%", r)
+ }
+ x = -32767
+ y = -32768
+ r = x % y
+ if r != -32767 {
+ t.Errorf("-32767 %s -32768 = %d, want -32767", "%", r)
+ }
+ y = -32767
+ r = x % y
+ if r != 0 {
+ t.Errorf("-32767 %s -32767 = %d, want 0", "%", r)
+ }
+ y = -1
+ r = x % y
+ if r != 0 {
+ t.Errorf("-32767 %s -1 = %d, want 0", "%", r)
+ }
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("-32767 %s 1 = %d, want 0", "%", r)
+ }
+ y = 32766
+ r = x % y
+ if r != -1 {
+ t.Errorf("-32767 %s 32766 = %d, want -1", "%", r)
+ }
+ y = 32767
+ r = x % y
+ if r != 0 {
+ t.Errorf("-32767 %s 32767 = %d, want 0", "%", r)
+ }
+ x = -1
+ y = -32768
+ r = x % y
+ if r != -1 {
+ t.Errorf("-1 %s -32768 = %d, want -1", "%", r)
+ }
+ y = -32767
+ r = x % y
+ if r != -1 {
+ t.Errorf("-1 %s -32767 = %d, want -1", "%", r)
+ }
+ y = -1
+ r = x % y
+ if r != 0 {
+ t.Errorf("-1 %s -1 = %d, want 0", "%", r)
+ }
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("-1 %s 1 = %d, want 0", "%", r)
+ }
+ y = 32766
+ r = x % y
+ if r != -1 {
+ t.Errorf("-1 %s 32766 = %d, want -1", "%", r)
+ }
+ y = 32767
+ r = x % y
+ if r != -1 {
+ t.Errorf("-1 %s 32767 = %d, want -1", "%", r)
+ }
+ x = 0
+ y = -32768
+ r = x % y
+ if r != 0 {
+ t.Errorf("0 %s -32768 = %d, want 0", "%", r)
+ }
+ y = -32767
+ r = x % y
+ if r != 0 {
+ t.Errorf("0 %s -32767 = %d, want 0", "%", r)
+ }
+ y = -1
+ r = x % y
+ if r != 0 {
+ t.Errorf("0 %s -1 = %d, want 0", "%", r)
+ }
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "%", r)
+ }
+ y = 32766
+ r = x % y
+ if r != 0 {
+ t.Errorf("0 %s 32766 = %d, want 0", "%", r)
+ }
+ y = 32767
+ r = x % y
+ if r != 0 {
+ t.Errorf("0 %s 32767 = %d, want 0", "%", r)
+ }
+ x = 1
+ y = -32768
+ r = x % y
+ if r != 1 {
+ t.Errorf("1 %s -32768 = %d, want 1", "%", r)
+ }
+ y = -32767
+ r = x % y
+ if r != 1 {
+ t.Errorf("1 %s -32767 = %d, want 1", "%", r)
+ }
+ y = -1
+ r = x % y
+ if r != 0 {
+ t.Errorf("1 %s -1 = %d, want 0", "%", r)
+ }
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", "%", r)
+ }
+ y = 32766
+ r = x % y
+ if r != 1 {
+ t.Errorf("1 %s 32766 = %d, want 1", "%", r)
+ }
+ y = 32767
+ r = x % y
+ if r != 1 {
+ t.Errorf("1 %s 32767 = %d, want 1", "%", r)
+ }
+ x = 32766
+ y = -32768
+ r = x % y
+ if r != 32766 {
+ t.Errorf("32766 %s -32768 = %d, want 32766", "%", r)
+ }
+ y = -32767
+ r = x % y
+ if r != 32766 {
+ t.Errorf("32766 %s -32767 = %d, want 32766", "%", r)
+ }
+ y = -1
+ r = x % y
+ if r != 0 {
+ t.Errorf("32766 %s -1 = %d, want 0", "%", r)
+ }
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("32766 %s 1 = %d, want 0", "%", r)
+ }
+ y = 32766
+ r = x % y
+ if r != 0 {
+ t.Errorf("32766 %s 32766 = %d, want 0", "%", r)
+ }
+ y = 32767
+ r = x % y
+ if r != 32766 {
+ t.Errorf("32766 %s 32767 = %d, want 32766", "%", r)
+ }
+ x = 32767
+ y = -32768
+ r = x % y
+ if r != 32767 {
+ t.Errorf("32767 %s -32768 = %d, want 32767", "%", r)
+ }
+ y = -32767
+ r = x % y
+ if r != 0 {
+ t.Errorf("32767 %s -32767 = %d, want 0", "%", r)
+ }
+ y = -1
+ r = x % y
+ if r != 0 {
+ t.Errorf("32767 %s -1 = %d, want 0", "%", r)
+ }
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("32767 %s 1 = %d, want 0", "%", r)
+ }
+ y = 32766
+ r = x % y
+ if r != 1 {
+ t.Errorf("32767 %s 32766 = %d, want 1", "%", r)
+ }
+ y = 32767
+ r = x % y
+ if r != 0 {
+ t.Errorf("32767 %s 32767 = %d, want 0", "%", r)
+ }
+}
+func TestConstFolduint8add(t *testing.T) {
+ var x, y, r uint8
+ x = 0
+ y = 0
+ r = x + y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != 1 {
+ t.Errorf("0 %s 1 = %d, want 1", "+", r)
+ }
+ y = 255
+ r = x + y
+ if r != 255 {
+ t.Errorf("0 %s 255 = %d, want 255", "+", r)
+ }
+ x = 1
+ y = 0
+ r = x + y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "+", r)
+ }
+ y = 255
+ r = x + y
+ if r != 0 {
+ t.Errorf("1 %s 255 = %d, want 0", "+", r)
+ }
+ x = 255
+ y = 0
+ r = x + y
+ if r != 255 {
+ t.Errorf("255 %s 0 = %d, want 255", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != 0 {
+ t.Errorf("255 %s 1 = %d, want 0", "+", r)
+ }
+ y = 255
+ r = x + y
+ if r != 254 {
+ t.Errorf("255 %s 255 = %d, want 254", "+", r)
+ }
+}
+func TestConstFolduint8sub(t *testing.T) {
+ var x, y, r uint8
+ x = 0
+ y = 0
+ r = x - y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != 255 {
+ t.Errorf("0 %s 1 = %d, want 255", "-", r)
+ }
+ y = 255
+ r = x - y
+ if r != 1 {
+ t.Errorf("0 %s 255 = %d, want 1", "-", r)
+ }
+ x = 1
+ y = 0
+ r = x - y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", "-", r)
+ }
+ y = 255
+ r = x - y
+ if r != 2 {
+ t.Errorf("1 %s 255 = %d, want 2", "-", r)
+ }
+ x = 255
+ y = 0
+ r = x - y
+ if r != 255 {
+ t.Errorf("255 %s 0 = %d, want 255", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != 254 {
+ t.Errorf("255 %s 1 = %d, want 254", "-", r)
+ }
+ y = 255
+ r = x - y
+ if r != 0 {
+ t.Errorf("255 %s 255 = %d, want 0", "-", r)
+ }
+}
+func TestConstFolduint8div(t *testing.T) {
+ var x, y, r uint8
+ x = 0
+ y = 1
+ r = x / y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "/", r)
+ }
+ y = 255
+ r = x / y
+ if r != 0 {
+ t.Errorf("0 %s 255 = %d, want 0", "/", r)
+ }
+ x = 1
+ y = 1
+ r = x / y
+ if r != 1 {
+ t.Errorf("1 %s 1 = %d, want 1", "/", r)
+ }
+ y = 255
+ r = x / y
+ if r != 0 {
+ t.Errorf("1 %s 255 = %d, want 0", "/", r)
+ }
+ x = 255
+ y = 1
+ r = x / y
+ if r != 255 {
+ t.Errorf("255 %s 1 = %d, want 255", "/", r)
+ }
+ y = 255
+ r = x / y
+ if r != 1 {
+ t.Errorf("255 %s 255 = %d, want 1", "/", r)
+ }
+}
+func TestConstFolduint8mul(t *testing.T) {
+ var x, y, r uint8
+ x = 0
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "*", r)
+ }
+ y = 255
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s 255 = %d, want 0", "*", r)
+ }
+ x = 1
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("1 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != 1 {
+ t.Errorf("1 %s 1 = %d, want 1", "*", r)
+ }
+ y = 255
+ r = x * y
+ if r != 255 {
+ t.Errorf("1 %s 255 = %d, want 255", "*", r)
+ }
+ x = 255
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("255 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != 255 {
+ t.Errorf("255 %s 1 = %d, want 255", "*", r)
+ }
+ y = 255
+ r = x * y
+ if r != 1 {
+ t.Errorf("255 %s 255 = %d, want 1", "*", r)
+ }
+}
+func TestConstFolduint8mod(t *testing.T) {
+ var x, y, r uint8
+ x = 0
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "%", r)
+ }
+ y = 255
+ r = x % y
+ if r != 0 {
+ t.Errorf("0 %s 255 = %d, want 0", "%", r)
+ }
+ x = 1
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", "%", r)
+ }
+ y = 255
+ r = x % y
+ if r != 1 {
+ t.Errorf("1 %s 255 = %d, want 1", "%", r)
+ }
+ x = 255
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("255 %s 1 = %d, want 0", "%", r)
+ }
+ y = 255
+ r = x % y
+ if r != 0 {
+ t.Errorf("255 %s 255 = %d, want 0", "%", r)
+ }
+}
+func TestConstFoldint8add(t *testing.T) {
+ var x, y, r int8
+ x = -128
+ y = -128
+ r = x + y
+ if r != 0 {
+ t.Errorf("-128 %s -128 = %d, want 0", "+", r)
+ }
+ y = -127
+ r = x + y
+ if r != 1 {
+ t.Errorf("-128 %s -127 = %d, want 1", "+", r)
+ }
+ y = -1
+ r = x + y
+ if r != 127 {
+ t.Errorf("-128 %s -1 = %d, want 127", "+", r)
+ }
+ y = 0
+ r = x + y
+ if r != -128 {
+ t.Errorf("-128 %s 0 = %d, want -128", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != -127 {
+ t.Errorf("-128 %s 1 = %d, want -127", "+", r)
+ }
+ y = 126
+ r = x + y
+ if r != -2 {
+ t.Errorf("-128 %s 126 = %d, want -2", "+", r)
+ }
+ y = 127
+ r = x + y
+ if r != -1 {
+ t.Errorf("-128 %s 127 = %d, want -1", "+", r)
+ }
+ x = -127
+ y = -128
+ r = x + y
+ if r != 1 {
+ t.Errorf("-127 %s -128 = %d, want 1", "+", r)
+ }
+ y = -127
+ r = x + y
+ if r != 2 {
+ t.Errorf("-127 %s -127 = %d, want 2", "+", r)
+ }
+ y = -1
+ r = x + y
+ if r != -128 {
+ t.Errorf("-127 %s -1 = %d, want -128", "+", r)
+ }
+ y = 0
+ r = x + y
+ if r != -127 {
+ t.Errorf("-127 %s 0 = %d, want -127", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != -126 {
+ t.Errorf("-127 %s 1 = %d, want -126", "+", r)
+ }
+ y = 126
+ r = x + y
+ if r != -1 {
+ t.Errorf("-127 %s 126 = %d, want -1", "+", r)
+ }
+ y = 127
+ r = x + y
+ if r != 0 {
+ t.Errorf("-127 %s 127 = %d, want 0", "+", r)
+ }
+ x = -1
+ y = -128
+ r = x + y
+ if r != 127 {
+ t.Errorf("-1 %s -128 = %d, want 127", "+", r)
+ }
+ y = -127
+ r = x + y
+ if r != -128 {
+ t.Errorf("-1 %s -127 = %d, want -128", "+", r)
+ }
+ y = -1
+ r = x + y
+ if r != -2 {
+ t.Errorf("-1 %s -1 = %d, want -2", "+", r)
+ }
+ y = 0
+ r = x + y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != 0 {
+ t.Errorf("-1 %s 1 = %d, want 0", "+", r)
+ }
+ y = 126
+ r = x + y
+ if r != 125 {
+ t.Errorf("-1 %s 126 = %d, want 125", "+", r)
+ }
+ y = 127
+ r = x + y
+ if r != 126 {
+ t.Errorf("-1 %s 127 = %d, want 126", "+", r)
+ }
+ x = 0
+ y = -128
+ r = x + y
+ if r != -128 {
+ t.Errorf("0 %s -128 = %d, want -128", "+", r)
+ }
+ y = -127
+ r = x + y
+ if r != -127 {
+ t.Errorf("0 %s -127 = %d, want -127", "+", r)
+ }
+ y = -1
+ r = x + y
+ if r != -1 {
+ t.Errorf("0 %s -1 = %d, want -1", "+", r)
+ }
+ y = 0
+ r = x + y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != 1 {
+ t.Errorf("0 %s 1 = %d, want 1", "+", r)
+ }
+ y = 126
+ r = x + y
+ if r != 126 {
+ t.Errorf("0 %s 126 = %d, want 126", "+", r)
+ }
+ y = 127
+ r = x + y
+ if r != 127 {
+ t.Errorf("0 %s 127 = %d, want 127", "+", r)
+ }
+ x = 1
+ y = -128
+ r = x + y
+ if r != -127 {
+ t.Errorf("1 %s -128 = %d, want -127", "+", r)
+ }
+ y = -127
+ r = x + y
+ if r != -126 {
+ t.Errorf("1 %s -127 = %d, want -126", "+", r)
+ }
+ y = -1
+ r = x + y
+ if r != 0 {
+ t.Errorf("1 %s -1 = %d, want 0", "+", r)
+ }
+ y = 0
+ r = x + y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "+", r)
+ }
+ y = 126
+ r = x + y
+ if r != 127 {
+ t.Errorf("1 %s 126 = %d, want 127", "+", r)
+ }
+ y = 127
+ r = x + y
+ if r != -128 {
+ t.Errorf("1 %s 127 = %d, want -128", "+", r)
+ }
+ x = 126
+ y = -128
+ r = x + y
+ if r != -2 {
+ t.Errorf("126 %s -128 = %d, want -2", "+", r)
+ }
+ y = -127
+ r = x + y
+ if r != -1 {
+ t.Errorf("126 %s -127 = %d, want -1", "+", r)
+ }
+ y = -1
+ r = x + y
+ if r != 125 {
+ t.Errorf("126 %s -1 = %d, want 125", "+", r)
+ }
+ y = 0
+ r = x + y
+ if r != 126 {
+ t.Errorf("126 %s 0 = %d, want 126", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != 127 {
+ t.Errorf("126 %s 1 = %d, want 127", "+", r)
+ }
+ y = 126
+ r = x + y
+ if r != -4 {
+ t.Errorf("126 %s 126 = %d, want -4", "+", r)
+ }
+ y = 127
+ r = x + y
+ if r != -3 {
+ t.Errorf("126 %s 127 = %d, want -3", "+", r)
+ }
+ x = 127
+ y = -128
+ r = x + y
+ if r != -1 {
+ t.Errorf("127 %s -128 = %d, want -1", "+", r)
+ }
+ y = -127
+ r = x + y
+ if r != 0 {
+ t.Errorf("127 %s -127 = %d, want 0", "+", r)
+ }
+ y = -1
+ r = x + y
+ if r != 126 {
+ t.Errorf("127 %s -1 = %d, want 126", "+", r)
+ }
+ y = 0
+ r = x + y
+ if r != 127 {
+ t.Errorf("127 %s 0 = %d, want 127", "+", r)
+ }
+ y = 1
+ r = x + y
+ if r != -128 {
+ t.Errorf("127 %s 1 = %d, want -128", "+", r)
+ }
+ y = 126
+ r = x + y
+ if r != -3 {
+ t.Errorf("127 %s 126 = %d, want -3", "+", r)
+ }
+ y = 127
+ r = x + y
+ if r != -2 {
+ t.Errorf("127 %s 127 = %d, want -2", "+", r)
+ }
+}
+func TestConstFoldint8sub(t *testing.T) {
+ var x, y, r int8
+ x = -128
+ y = -128
+ r = x - y
+ if r != 0 {
+ t.Errorf("-128 %s -128 = %d, want 0", "-", r)
+ }
+ y = -127
+ r = x - y
+ if r != -1 {
+ t.Errorf("-128 %s -127 = %d, want -1", "-", r)
+ }
+ y = -1
+ r = x - y
+ if r != -127 {
+ t.Errorf("-128 %s -1 = %d, want -127", "-", r)
+ }
+ y = 0
+ r = x - y
+ if r != -128 {
+ t.Errorf("-128 %s 0 = %d, want -128", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != 127 {
+ t.Errorf("-128 %s 1 = %d, want 127", "-", r)
+ }
+ y = 126
+ r = x - y
+ if r != 2 {
+ t.Errorf("-128 %s 126 = %d, want 2", "-", r)
+ }
+ y = 127
+ r = x - y
+ if r != 1 {
+ t.Errorf("-128 %s 127 = %d, want 1", "-", r)
+ }
+ x = -127
+ y = -128
+ r = x - y
+ if r != 1 {
+ t.Errorf("-127 %s -128 = %d, want 1", "-", r)
+ }
+ y = -127
+ r = x - y
+ if r != 0 {
+ t.Errorf("-127 %s -127 = %d, want 0", "-", r)
+ }
+ y = -1
+ r = x - y
+ if r != -126 {
+ t.Errorf("-127 %s -1 = %d, want -126", "-", r)
+ }
+ y = 0
+ r = x - y
+ if r != -127 {
+ t.Errorf("-127 %s 0 = %d, want -127", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != -128 {
+ t.Errorf("-127 %s 1 = %d, want -128", "-", r)
+ }
+ y = 126
+ r = x - y
+ if r != 3 {
+ t.Errorf("-127 %s 126 = %d, want 3", "-", r)
+ }
+ y = 127
+ r = x - y
+ if r != 2 {
+ t.Errorf("-127 %s 127 = %d, want 2", "-", r)
+ }
+ x = -1
+ y = -128
+ r = x - y
+ if r != 127 {
+ t.Errorf("-1 %s -128 = %d, want 127", "-", r)
+ }
+ y = -127
+ r = x - y
+ if r != 126 {
+ t.Errorf("-1 %s -127 = %d, want 126", "-", r)
+ }
+ y = -1
+ r = x - y
+ if r != 0 {
+ t.Errorf("-1 %s -1 = %d, want 0", "-", r)
+ }
+ y = 0
+ r = x - y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != -2 {
+ t.Errorf("-1 %s 1 = %d, want -2", "-", r)
+ }
+ y = 126
+ r = x - y
+ if r != -127 {
+ t.Errorf("-1 %s 126 = %d, want -127", "-", r)
+ }
+ y = 127
+ r = x - y
+ if r != -128 {
+ t.Errorf("-1 %s 127 = %d, want -128", "-", r)
+ }
+ x = 0
+ y = -128
+ r = x - y
+ if r != -128 {
+ t.Errorf("0 %s -128 = %d, want -128", "-", r)
+ }
+ y = -127
+ r = x - y
+ if r != 127 {
+ t.Errorf("0 %s -127 = %d, want 127", "-", r)
+ }
+ y = -1
+ r = x - y
+ if r != 1 {
+ t.Errorf("0 %s -1 = %d, want 1", "-", r)
+ }
+ y = 0
+ r = x - y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != -1 {
+ t.Errorf("0 %s 1 = %d, want -1", "-", r)
+ }
+ y = 126
+ r = x - y
+ if r != -126 {
+ t.Errorf("0 %s 126 = %d, want -126", "-", r)
+ }
+ y = 127
+ r = x - y
+ if r != -127 {
+ t.Errorf("0 %s 127 = %d, want -127", "-", r)
+ }
+ x = 1
+ y = -128
+ r = x - y
+ if r != -127 {
+ t.Errorf("1 %s -128 = %d, want -127", "-", r)
+ }
+ y = -127
+ r = x - y
+ if r != -128 {
+ t.Errorf("1 %s -127 = %d, want -128", "-", r)
+ }
+ y = -1
+ r = x - y
+ if r != 2 {
+ t.Errorf("1 %s -1 = %d, want 2", "-", r)
+ }
+ y = 0
+ r = x - y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", "-", r)
+ }
+ y = 126
+ r = x - y
+ if r != -125 {
+ t.Errorf("1 %s 126 = %d, want -125", "-", r)
+ }
+ y = 127
+ r = x - y
+ if r != -126 {
+ t.Errorf("1 %s 127 = %d, want -126", "-", r)
+ }
+ x = 126
+ y = -128
+ r = x - y
+ if r != -2 {
+ t.Errorf("126 %s -128 = %d, want -2", "-", r)
+ }
+ y = -127
+ r = x - y
+ if r != -3 {
+ t.Errorf("126 %s -127 = %d, want -3", "-", r)
+ }
+ y = -1
+ r = x - y
+ if r != 127 {
+ t.Errorf("126 %s -1 = %d, want 127", "-", r)
+ }
+ y = 0
+ r = x - y
+ if r != 126 {
+ t.Errorf("126 %s 0 = %d, want 126", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != 125 {
+ t.Errorf("126 %s 1 = %d, want 125", "-", r)
+ }
+ y = 126
+ r = x - y
+ if r != 0 {
+ t.Errorf("126 %s 126 = %d, want 0", "-", r)
+ }
+ y = 127
+ r = x - y
+ if r != -1 {
+ t.Errorf("126 %s 127 = %d, want -1", "-", r)
+ }
+ x = 127
+ y = -128
+ r = x - y
+ if r != -1 {
+ t.Errorf("127 %s -128 = %d, want -1", "-", r)
+ }
+ y = -127
+ r = x - y
+ if r != -2 {
+ t.Errorf("127 %s -127 = %d, want -2", "-", r)
+ }
+ y = -1
+ r = x - y
+ if r != -128 {
+ t.Errorf("127 %s -1 = %d, want -128", "-", r)
+ }
+ y = 0
+ r = x - y
+ if r != 127 {
+ t.Errorf("127 %s 0 = %d, want 127", "-", r)
+ }
+ y = 1
+ r = x - y
+ if r != 126 {
+ t.Errorf("127 %s 1 = %d, want 126", "-", r)
+ }
+ y = 126
+ r = x - y
+ if r != 1 {
+ t.Errorf("127 %s 126 = %d, want 1", "-", r)
+ }
+ y = 127
+ r = x - y
+ if r != 0 {
+ t.Errorf("127 %s 127 = %d, want 0", "-", r)
+ }
+}
+func TestConstFoldint8div(t *testing.T) {
+ var x, y, r int8
+ x = -128
+ y = -128
+ r = x / y
+ if r != 1 {
+ t.Errorf("-128 %s -128 = %d, want 1", "/", r)
+ }
+ y = -127
+ r = x / y
+ if r != 1 {
+ t.Errorf("-128 %s -127 = %d, want 1", "/", r)
+ }
+ y = -1
+ r = x / y
+ if r != -128 {
+ t.Errorf("-128 %s -1 = %d, want -128", "/", r)
+ }
+ y = 1
+ r = x / y
+ if r != -128 {
+ t.Errorf("-128 %s 1 = %d, want -128", "/", r)
+ }
+ y = 126
+ r = x / y
+ if r != -1 {
+ t.Errorf("-128 %s 126 = %d, want -1", "/", r)
+ }
+ y = 127
+ r = x / y
+ if r != -1 {
+ t.Errorf("-128 %s 127 = %d, want -1", "/", r)
+ }
+ x = -127
+ y = -128
+ r = x / y
+ if r != 0 {
+ t.Errorf("-127 %s -128 = %d, want 0", "/", r)
+ }
+ y = -127
+ r = x / y
+ if r != 1 {
+ t.Errorf("-127 %s -127 = %d, want 1", "/", r)
+ }
+ y = -1
+ r = x / y
+ if r != 127 {
+ t.Errorf("-127 %s -1 = %d, want 127", "/", r)
+ }
+ y = 1
+ r = x / y
+ if r != -127 {
+ t.Errorf("-127 %s 1 = %d, want -127", "/", r)
+ }
+ y = 126
+ r = x / y
+ if r != -1 {
+ t.Errorf("-127 %s 126 = %d, want -1", "/", r)
+ }
+ y = 127
+ r = x / y
+ if r != -1 {
+ t.Errorf("-127 %s 127 = %d, want -1", "/", r)
+ }
+ x = -1
+ y = -128
+ r = x / y
+ if r != 0 {
+ t.Errorf("-1 %s -128 = %d, want 0", "/", r)
+ }
+ y = -127
+ r = x / y
+ if r != 0 {
+ t.Errorf("-1 %s -127 = %d, want 0", "/", r)
+ }
+ y = -1
+ r = x / y
+ if r != 1 {
+ t.Errorf("-1 %s -1 = %d, want 1", "/", r)
+ }
+ y = 1
+ r = x / y
+ if r != -1 {
+ t.Errorf("-1 %s 1 = %d, want -1", "/", r)
+ }
+ y = 126
+ r = x / y
+ if r != 0 {
+ t.Errorf("-1 %s 126 = %d, want 0", "/", r)
+ }
+ y = 127
+ r = x / y
+ if r != 0 {
+ t.Errorf("-1 %s 127 = %d, want 0", "/", r)
+ }
+ x = 0
+ y = -128
+ r = x / y
+ if r != 0 {
+ t.Errorf("0 %s -128 = %d, want 0", "/", r)
+ }
+ y = -127
+ r = x / y
+ if r != 0 {
+ t.Errorf("0 %s -127 = %d, want 0", "/", r)
+ }
+ y = -1
+ r = x / y
+ if r != 0 {
+ t.Errorf("0 %s -1 = %d, want 0", "/", r)
+ }
+ y = 1
+ r = x / y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "/", r)
+ }
+ y = 126
+ r = x / y
+ if r != 0 {
+ t.Errorf("0 %s 126 = %d, want 0", "/", r)
+ }
+ y = 127
+ r = x / y
+ if r != 0 {
+ t.Errorf("0 %s 127 = %d, want 0", "/", r)
+ }
+ x = 1
+ y = -128
+ r = x / y
+ if r != 0 {
+ t.Errorf("1 %s -128 = %d, want 0", "/", r)
+ }
+ y = -127
+ r = x / y
+ if r != 0 {
+ t.Errorf("1 %s -127 = %d, want 0", "/", r)
+ }
+ y = -1
+ r = x / y
+ if r != -1 {
+ t.Errorf("1 %s -1 = %d, want -1", "/", r)
+ }
+ y = 1
+ r = x / y
+ if r != 1 {
+ t.Errorf("1 %s 1 = %d, want 1", "/", r)
+ }
+ y = 126
+ r = x / y
+ if r != 0 {
+ t.Errorf("1 %s 126 = %d, want 0", "/", r)
+ }
+ y = 127
+ r = x / y
+ if r != 0 {
+ t.Errorf("1 %s 127 = %d, want 0", "/", r)
+ }
+ x = 126
+ y = -128
+ r = x / y
+ if r != 0 {
+ t.Errorf("126 %s -128 = %d, want 0", "/", r)
+ }
+ y = -127
+ r = x / y
+ if r != 0 {
+ t.Errorf("126 %s -127 = %d, want 0", "/", r)
+ }
+ y = -1
+ r = x / y
+ if r != -126 {
+ t.Errorf("126 %s -1 = %d, want -126", "/", r)
+ }
+ y = 1
+ r = x / y
+ if r != 126 {
+ t.Errorf("126 %s 1 = %d, want 126", "/", r)
+ }
+ y = 126
+ r = x / y
+ if r != 1 {
+ t.Errorf("126 %s 126 = %d, want 1", "/", r)
+ }
+ y = 127
+ r = x / y
+ if r != 0 {
+ t.Errorf("126 %s 127 = %d, want 0", "/", r)
+ }
+ x = 127
+ y = -128
+ r = x / y
+ if r != 0 {
+ t.Errorf("127 %s -128 = %d, want 0", "/", r)
+ }
+ y = -127
+ r = x / y
+ if r != -1 {
+ t.Errorf("127 %s -127 = %d, want -1", "/", r)
+ }
+ y = -1
+ r = x / y
+ if r != -127 {
+ t.Errorf("127 %s -1 = %d, want -127", "/", r)
+ }
+ y = 1
+ r = x / y
+ if r != 127 {
+ t.Errorf("127 %s 1 = %d, want 127", "/", r)
+ }
+ y = 126
+ r = x / y
+ if r != 1 {
+ t.Errorf("127 %s 126 = %d, want 1", "/", r)
+ }
+ y = 127
+ r = x / y
+ if r != 1 {
+ t.Errorf("127 %s 127 = %d, want 1", "/", r)
+ }
+}
+func TestConstFoldint8mul(t *testing.T) {
+ var x, y, r int8
+ x = -128
+ y = -128
+ r = x * y
+ if r != 0 {
+ t.Errorf("-128 %s -128 = %d, want 0", "*", r)
+ }
+ y = -127
+ r = x * y
+ if r != -128 {
+ t.Errorf("-128 %s -127 = %d, want -128", "*", r)
+ }
+ y = -1
+ r = x * y
+ if r != -128 {
+ t.Errorf("-128 %s -1 = %d, want -128", "*", r)
+ }
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("-128 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != -128 {
+ t.Errorf("-128 %s 1 = %d, want -128", "*", r)
+ }
+ y = 126
+ r = x * y
+ if r != 0 {
+ t.Errorf("-128 %s 126 = %d, want 0", "*", r)
+ }
+ y = 127
+ r = x * y
+ if r != -128 {
+ t.Errorf("-128 %s 127 = %d, want -128", "*", r)
+ }
+ x = -127
+ y = -128
+ r = x * y
+ if r != -128 {
+ t.Errorf("-127 %s -128 = %d, want -128", "*", r)
+ }
+ y = -127
+ r = x * y
+ if r != 1 {
+ t.Errorf("-127 %s -127 = %d, want 1", "*", r)
+ }
+ y = -1
+ r = x * y
+ if r != 127 {
+ t.Errorf("-127 %s -1 = %d, want 127", "*", r)
+ }
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("-127 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != -127 {
+ t.Errorf("-127 %s 1 = %d, want -127", "*", r)
+ }
+ y = 126
+ r = x * y
+ if r != 126 {
+ t.Errorf("-127 %s 126 = %d, want 126", "*", r)
+ }
+ y = 127
+ r = x * y
+ if r != -1 {
+ t.Errorf("-127 %s 127 = %d, want -1", "*", r)
+ }
+ x = -1
+ y = -128
+ r = x * y
+ if r != -128 {
+ t.Errorf("-1 %s -128 = %d, want -128", "*", r)
+ }
+ y = -127
+ r = x * y
+ if r != 127 {
+ t.Errorf("-1 %s -127 = %d, want 127", "*", r)
+ }
+ y = -1
+ r = x * y
+ if r != 1 {
+ t.Errorf("-1 %s -1 = %d, want 1", "*", r)
+ }
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("-1 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != -1 {
+ t.Errorf("-1 %s 1 = %d, want -1", "*", r)
+ }
+ y = 126
+ r = x * y
+ if r != -126 {
+ t.Errorf("-1 %s 126 = %d, want -126", "*", r)
+ }
+ y = 127
+ r = x * y
+ if r != -127 {
+ t.Errorf("-1 %s 127 = %d, want -127", "*", r)
+ }
+ x = 0
+ y = -128
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s -128 = %d, want 0", "*", r)
+ }
+ y = -127
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s -127 = %d, want 0", "*", r)
+ }
+ y = -1
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s -1 = %d, want 0", "*", r)
+ }
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "*", r)
+ }
+ y = 126
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s 126 = %d, want 0", "*", r)
+ }
+ y = 127
+ r = x * y
+ if r != 0 {
+ t.Errorf("0 %s 127 = %d, want 0", "*", r)
+ }
+ x = 1
+ y = -128
+ r = x * y
+ if r != -128 {
+ t.Errorf("1 %s -128 = %d, want -128", "*", r)
+ }
+ y = -127
+ r = x * y
+ if r != -127 {
+ t.Errorf("1 %s -127 = %d, want -127", "*", r)
+ }
+ y = -1
+ r = x * y
+ if r != -1 {
+ t.Errorf("1 %s -1 = %d, want -1", "*", r)
+ }
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("1 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != 1 {
+ t.Errorf("1 %s 1 = %d, want 1", "*", r)
+ }
+ y = 126
+ r = x * y
+ if r != 126 {
+ t.Errorf("1 %s 126 = %d, want 126", "*", r)
+ }
+ y = 127
+ r = x * y
+ if r != 127 {
+ t.Errorf("1 %s 127 = %d, want 127", "*", r)
+ }
+ x = 126
+ y = -128
+ r = x * y
+ if r != 0 {
+ t.Errorf("126 %s -128 = %d, want 0", "*", r)
+ }
+ y = -127
+ r = x * y
+ if r != 126 {
+ t.Errorf("126 %s -127 = %d, want 126", "*", r)
+ }
+ y = -1
+ r = x * y
+ if r != -126 {
+ t.Errorf("126 %s -1 = %d, want -126", "*", r)
+ }
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("126 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != 126 {
+ t.Errorf("126 %s 1 = %d, want 126", "*", r)
+ }
+ y = 126
+ r = x * y
+ if r != 4 {
+ t.Errorf("126 %s 126 = %d, want 4", "*", r)
+ }
+ y = 127
+ r = x * y
+ if r != -126 {
+ t.Errorf("126 %s 127 = %d, want -126", "*", r)
+ }
+ x = 127
+ y = -128
+ r = x * y
+ if r != -128 {
+ t.Errorf("127 %s -128 = %d, want -128", "*", r)
+ }
+ y = -127
+ r = x * y
+ if r != -1 {
+ t.Errorf("127 %s -127 = %d, want -1", "*", r)
+ }
+ y = -1
+ r = x * y
+ if r != -127 {
+ t.Errorf("127 %s -1 = %d, want -127", "*", r)
+ }
+ y = 0
+ r = x * y
+ if r != 0 {
+ t.Errorf("127 %s 0 = %d, want 0", "*", r)
+ }
+ y = 1
+ r = x * y
+ if r != 127 {
+ t.Errorf("127 %s 1 = %d, want 127", "*", r)
+ }
+ y = 126
+ r = x * y
+ if r != -126 {
+ t.Errorf("127 %s 126 = %d, want -126", "*", r)
+ }
+ y = 127
+ r = x * y
+ if r != 1 {
+ t.Errorf("127 %s 127 = %d, want 1", "*", r)
+ }
+}
+func TestConstFoldint8mod(t *testing.T) {
+ var x, y, r int8
+ x = -128
+ y = -128
+ r = x % y
+ if r != 0 {
+ t.Errorf("-128 %s -128 = %d, want 0", "%", r)
+ }
+ y = -127
+ r = x % y
+ if r != -1 {
+ t.Errorf("-128 %s -127 = %d, want -1", "%", r)
+ }
+ y = -1
+ r = x % y
+ if r != 0 {
+ t.Errorf("-128 %s -1 = %d, want 0", "%", r)
+ }
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("-128 %s 1 = %d, want 0", "%", r)
+ }
+ y = 126
+ r = x % y
+ if r != -2 {
+ t.Errorf("-128 %s 126 = %d, want -2", "%", r)
+ }
+ y = 127
+ r = x % y
+ if r != -1 {
+ t.Errorf("-128 %s 127 = %d, want -1", "%", r)
+ }
+ x = -127
+ y = -128
+ r = x % y
+ if r != -127 {
+ t.Errorf("-127 %s -128 = %d, want -127", "%", r)
+ }
+ y = -127
+ r = x % y
+ if r != 0 {
+ t.Errorf("-127 %s -127 = %d, want 0", "%", r)
+ }
+ y = -1
+ r = x % y
+ if r != 0 {
+ t.Errorf("-127 %s -1 = %d, want 0", "%", r)
+ }
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("-127 %s 1 = %d, want 0", "%", r)
+ }
+ y = 126
+ r = x % y
+ if r != -1 {
+ t.Errorf("-127 %s 126 = %d, want -1", "%", r)
+ }
+ y = 127
+ r = x % y
+ if r != 0 {
+ t.Errorf("-127 %s 127 = %d, want 0", "%", r)
+ }
+ x = -1
+ y = -128
+ r = x % y
+ if r != -1 {
+ t.Errorf("-1 %s -128 = %d, want -1", "%", r)
+ }
+ y = -127
+ r = x % y
+ if r != -1 {
+ t.Errorf("-1 %s -127 = %d, want -1", "%", r)
+ }
+ y = -1
+ r = x % y
+ if r != 0 {
+ t.Errorf("-1 %s -1 = %d, want 0", "%", r)
+ }
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("-1 %s 1 = %d, want 0", "%", r)
+ }
+ y = 126
+ r = x % y
+ if r != -1 {
+ t.Errorf("-1 %s 126 = %d, want -1", "%", r)
+ }
+ y = 127
+ r = x % y
+ if r != -1 {
+ t.Errorf("-1 %s 127 = %d, want -1", "%", r)
+ }
+ x = 0
+ y = -128
+ r = x % y
+ if r != 0 {
+ t.Errorf("0 %s -128 = %d, want 0", "%", r)
+ }
+ y = -127
+ r = x % y
+ if r != 0 {
+ t.Errorf("0 %s -127 = %d, want 0", "%", r)
+ }
+ y = -1
+ r = x % y
+ if r != 0 {
+ t.Errorf("0 %s -1 = %d, want 0", "%", r)
+ }
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "%", r)
+ }
+ y = 126
+ r = x % y
+ if r != 0 {
+ t.Errorf("0 %s 126 = %d, want 0", "%", r)
+ }
+ y = 127
+ r = x % y
+ if r != 0 {
+ t.Errorf("0 %s 127 = %d, want 0", "%", r)
+ }
+ x = 1
+ y = -128
+ r = x % y
+ if r != 1 {
+ t.Errorf("1 %s -128 = %d, want 1", "%", r)
+ }
+ y = -127
+ r = x % y
+ if r != 1 {
+ t.Errorf("1 %s -127 = %d, want 1", "%", r)
+ }
+ y = -1
+ r = x % y
+ if r != 0 {
+ t.Errorf("1 %s -1 = %d, want 0", "%", r)
+ }
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", "%", r)
+ }
+ y = 126
+ r = x % y
+ if r != 1 {
+ t.Errorf("1 %s 126 = %d, want 1", "%", r)
+ }
+ y = 127
+ r = x % y
+ if r != 1 {
+ t.Errorf("1 %s 127 = %d, want 1", "%", r)
+ }
+ x = 126
+ y = -128
+ r = x % y
+ if r != 126 {
+ t.Errorf("126 %s -128 = %d, want 126", "%", r)
+ }
+ y = -127
+ r = x % y
+ if r != 126 {
+ t.Errorf("126 %s -127 = %d, want 126", "%", r)
+ }
+ y = -1
+ r = x % y
+ if r != 0 {
+ t.Errorf("126 %s -1 = %d, want 0", "%", r)
+ }
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("126 %s 1 = %d, want 0", "%", r)
+ }
+ y = 126
+ r = x % y
+ if r != 0 {
+ t.Errorf("126 %s 126 = %d, want 0", "%", r)
+ }
+ y = 127
+ r = x % y
+ if r != 126 {
+ t.Errorf("126 %s 127 = %d, want 126", "%", r)
+ }
+ x = 127
+ y = -128
+ r = x % y
+ if r != 127 {
+ t.Errorf("127 %s -128 = %d, want 127", "%", r)
+ }
+ y = -127
+ r = x % y
+ if r != 0 {
+ t.Errorf("127 %s -127 = %d, want 0", "%", r)
+ }
+ y = -1
+ r = x % y
+ if r != 0 {
+ t.Errorf("127 %s -1 = %d, want 0", "%", r)
+ }
+ y = 1
+ r = x % y
+ if r != 0 {
+ t.Errorf("127 %s 1 = %d, want 0", "%", r)
+ }
+ y = 126
+ r = x % y
+ if r != 1 {
+ t.Errorf("127 %s 126 = %d, want 1", "%", r)
+ }
+ y = 127
+ r = x % y
+ if r != 0 {
+ t.Errorf("127 %s 127 = %d, want 0", "%", r)
+ }
+}
+func TestConstFolduint64uint64lsh(t *testing.T) {
+ var x, r uint64
+ var y uint64
+ x = 0
+ y = 0
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+ x = 1
+ y = 0
+ r = x << y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+ x = 4294967296
+ y = 0
+ r = x << y
+ if r != 4294967296 {
+ t.Errorf("4294967296 %s 0 = %d, want 4294967296", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 8589934592 {
+ t.Errorf("4294967296 %s 1 = %d, want 8589934592", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("4294967296 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("4294967296 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+ x = 18446744073709551615
+ y = 0
+ r = x << y
+ if r != 18446744073709551615 {
+ t.Errorf("18446744073709551615 %s 0 = %d, want 18446744073709551615", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 18446744073709551614 {
+ t.Errorf("18446744073709551615 %s 1 = %d, want 18446744073709551614", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("18446744073709551615 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("18446744073709551615 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+}
+func TestConstFolduint64uint64rsh(t *testing.T) {
+ var x, r uint64
+ var y uint64
+ x = 0
+ y = 0
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 4294967296 = %d, want 0", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 18446744073709551615 = %d, want 0", ">>", r)
+ }
+ x = 1
+ y = 0
+ r = x >> y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 4294967296 = %d, want 0", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 18446744073709551615 = %d, want 0", ">>", r)
+ }
+ x = 4294967296
+ y = 0
+ r = x >> y
+ if r != 4294967296 {
+ t.Errorf("4294967296 %s 0 = %d, want 4294967296", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 2147483648 {
+ t.Errorf("4294967296 %s 1 = %d, want 2147483648", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != 0 {
+ t.Errorf("4294967296 %s 4294967296 = %d, want 0", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != 0 {
+ t.Errorf("4294967296 %s 18446744073709551615 = %d, want 0", ">>", r)
+ }
+ x = 18446744073709551615
+ y = 0
+ r = x >> y
+ if r != 18446744073709551615 {
+ t.Errorf("18446744073709551615 %s 0 = %d, want 18446744073709551615", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 9223372036854775807 {
+ t.Errorf("18446744073709551615 %s 1 = %d, want 9223372036854775807", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != 0 {
+ t.Errorf("18446744073709551615 %s 4294967296 = %d, want 0", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != 0 {
+ t.Errorf("18446744073709551615 %s 18446744073709551615 = %d, want 0", ">>", r)
+ }
+}
+func TestConstFolduint64uint32lsh(t *testing.T) {
+ var x, r uint64
+ var y uint32
+ x = 0
+ y = 0
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 4294967295 = %d, want 0", "<<", r)
+ }
+ x = 1
+ y = 0
+ r = x << y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 4294967295 = %d, want 0", "<<", r)
+ }
+ x = 4294967296
+ y = 0
+ r = x << y
+ if r != 4294967296 {
+ t.Errorf("4294967296 %s 0 = %d, want 4294967296", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 8589934592 {
+ t.Errorf("4294967296 %s 1 = %d, want 8589934592", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("4294967296 %s 4294967295 = %d, want 0", "<<", r)
+ }
+ x = 18446744073709551615
+ y = 0
+ r = x << y
+ if r != 18446744073709551615 {
+ t.Errorf("18446744073709551615 %s 0 = %d, want 18446744073709551615", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 18446744073709551614 {
+ t.Errorf("18446744073709551615 %s 1 = %d, want 18446744073709551614", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("18446744073709551615 %s 4294967295 = %d, want 0", "<<", r)
+ }
+}
+func TestConstFolduint64uint32rsh(t *testing.T) {
+ var x, r uint64
+ var y uint32
+ x = 0
+ y = 0
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 4294967295 = %d, want 0", ">>", r)
+ }
+ x = 1
+ y = 0
+ r = x >> y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 4294967295 = %d, want 0", ">>", r)
+ }
+ x = 4294967296
+ y = 0
+ r = x >> y
+ if r != 4294967296 {
+ t.Errorf("4294967296 %s 0 = %d, want 4294967296", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 2147483648 {
+ t.Errorf("4294967296 %s 1 = %d, want 2147483648", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != 0 {
+ t.Errorf("4294967296 %s 4294967295 = %d, want 0", ">>", r)
+ }
+ x = 18446744073709551615
+ y = 0
+ r = x >> y
+ if r != 18446744073709551615 {
+ t.Errorf("18446744073709551615 %s 0 = %d, want 18446744073709551615", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 9223372036854775807 {
+ t.Errorf("18446744073709551615 %s 1 = %d, want 9223372036854775807", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != 0 {
+ t.Errorf("18446744073709551615 %s 4294967295 = %d, want 0", ">>", r)
+ }
+}
+func TestConstFolduint64uint16lsh(t *testing.T) {
+ var x, r uint64
+ var y uint16
+ x = 0
+ y = 0
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 65535 = %d, want 0", "<<", r)
+ }
+ x = 1
+ y = 0
+ r = x << y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 65535 = %d, want 0", "<<", r)
+ }
+ x = 4294967296
+ y = 0
+ r = x << y
+ if r != 4294967296 {
+ t.Errorf("4294967296 %s 0 = %d, want 4294967296", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 8589934592 {
+ t.Errorf("4294967296 %s 1 = %d, want 8589934592", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("4294967296 %s 65535 = %d, want 0", "<<", r)
+ }
+ x = 18446744073709551615
+ y = 0
+ r = x << y
+ if r != 18446744073709551615 {
+ t.Errorf("18446744073709551615 %s 0 = %d, want 18446744073709551615", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 18446744073709551614 {
+ t.Errorf("18446744073709551615 %s 1 = %d, want 18446744073709551614", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("18446744073709551615 %s 65535 = %d, want 0", "<<", r)
+ }
+}
+func TestConstFolduint64uint16rsh(t *testing.T) {
+ var x, r uint64
+ var y uint16
+ x = 0
+ y = 0
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 65535 = %d, want 0", ">>", r)
+ }
+ x = 1
+ y = 0
+ r = x >> y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 65535 = %d, want 0", ">>", r)
+ }
+ x = 4294967296
+ y = 0
+ r = x >> y
+ if r != 4294967296 {
+ t.Errorf("4294967296 %s 0 = %d, want 4294967296", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 2147483648 {
+ t.Errorf("4294967296 %s 1 = %d, want 2147483648", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != 0 {
+ t.Errorf("4294967296 %s 65535 = %d, want 0", ">>", r)
+ }
+ x = 18446744073709551615
+ y = 0
+ r = x >> y
+ if r != 18446744073709551615 {
+ t.Errorf("18446744073709551615 %s 0 = %d, want 18446744073709551615", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 9223372036854775807 {
+ t.Errorf("18446744073709551615 %s 1 = %d, want 9223372036854775807", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != 0 {
+ t.Errorf("18446744073709551615 %s 65535 = %d, want 0", ">>", r)
+ }
+}
+func TestConstFolduint64uint8lsh(t *testing.T) {
+ var x, r uint64
+ var y uint8
+ x = 0
+ y = 0
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 255 = %d, want 0", "<<", r)
+ }
+ x = 1
+ y = 0
+ r = x << y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 255 = %d, want 0", "<<", r)
+ }
+ x = 4294967296
+ y = 0
+ r = x << y
+ if r != 4294967296 {
+ t.Errorf("4294967296 %s 0 = %d, want 4294967296", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 8589934592 {
+ t.Errorf("4294967296 %s 1 = %d, want 8589934592", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("4294967296 %s 255 = %d, want 0", "<<", r)
+ }
+ x = 18446744073709551615
+ y = 0
+ r = x << y
+ if r != 18446744073709551615 {
+ t.Errorf("18446744073709551615 %s 0 = %d, want 18446744073709551615", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 18446744073709551614 {
+ t.Errorf("18446744073709551615 %s 1 = %d, want 18446744073709551614", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("18446744073709551615 %s 255 = %d, want 0", "<<", r)
+ }
+}
+func TestConstFolduint64uint8rsh(t *testing.T) {
+ var x, r uint64
+ var y uint8
+ x = 0
+ y = 0
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 255 = %d, want 0", ">>", r)
+ }
+ x = 1
+ y = 0
+ r = x >> y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 255 = %d, want 0", ">>", r)
+ }
+ x = 4294967296
+ y = 0
+ r = x >> y
+ if r != 4294967296 {
+ t.Errorf("4294967296 %s 0 = %d, want 4294967296", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 2147483648 {
+ t.Errorf("4294967296 %s 1 = %d, want 2147483648", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != 0 {
+ t.Errorf("4294967296 %s 255 = %d, want 0", ">>", r)
+ }
+ x = 18446744073709551615
+ y = 0
+ r = x >> y
+ if r != 18446744073709551615 {
+ t.Errorf("18446744073709551615 %s 0 = %d, want 18446744073709551615", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 9223372036854775807 {
+ t.Errorf("18446744073709551615 %s 1 = %d, want 9223372036854775807", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != 0 {
+ t.Errorf("18446744073709551615 %s 255 = %d, want 0", ">>", r)
+ }
+}
+func TestConstFoldint64uint64lsh(t *testing.T) {
+ var x, r int64
+ var y uint64
+ x = -9223372036854775808
+ y = 0
+ r = x << y
+ if r != -9223372036854775808 {
+ t.Errorf("-9223372036854775808 %s 0 = %d, want -9223372036854775808", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("-9223372036854775808 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("-9223372036854775808 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("-9223372036854775808 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+ x = -9223372036854775807
+ y = 0
+ r = x << y
+ if r != -9223372036854775807 {
+ t.Errorf("-9223372036854775807 %s 0 = %d, want -9223372036854775807", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("-9223372036854775807 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("-9223372036854775807 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("-9223372036854775807 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+ x = -4294967296
+ y = 0
+ r = x << y
+ if r != -4294967296 {
+ t.Errorf("-4294967296 %s 0 = %d, want -4294967296", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -8589934592 {
+ t.Errorf("-4294967296 %s 1 = %d, want -8589934592", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("-4294967296 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("-4294967296 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+ x = -1
+ y = 0
+ r = x << y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -2 {
+ t.Errorf("-1 %s 1 = %d, want -2", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("-1 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("-1 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+ x = 0
+ y = 0
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+ x = 1
+ y = 0
+ r = x << y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+ x = 4294967296
+ y = 0
+ r = x << y
+ if r != 4294967296 {
+ t.Errorf("4294967296 %s 0 = %d, want 4294967296", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 8589934592 {
+ t.Errorf("4294967296 %s 1 = %d, want 8589934592", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("4294967296 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("4294967296 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+ x = 9223372036854775806
+ y = 0
+ r = x << y
+ if r != 9223372036854775806 {
+ t.Errorf("9223372036854775806 %s 0 = %d, want 9223372036854775806", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -4 {
+ t.Errorf("9223372036854775806 %s 1 = %d, want -4", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("9223372036854775806 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("9223372036854775806 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+ x = 9223372036854775807
+ y = 0
+ r = x << y
+ if r != 9223372036854775807 {
+ t.Errorf("9223372036854775807 %s 0 = %d, want 9223372036854775807", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -2 {
+ t.Errorf("9223372036854775807 %s 1 = %d, want -2", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("9223372036854775807 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("9223372036854775807 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+}
+func TestConstFoldint64uint64rsh(t *testing.T) {
+ var x, r int64
+ var y uint64
+ x = -9223372036854775808
+ y = 0
+ r = x >> y
+ if r != -9223372036854775808 {
+ t.Errorf("-9223372036854775808 %s 0 = %d, want -9223372036854775808", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -4611686018427387904 {
+ t.Errorf("-9223372036854775808 %s 1 = %d, want -4611686018427387904", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-9223372036854775808 %s 4294967296 = %d, want -1", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-9223372036854775808 %s 18446744073709551615 = %d, want -1", ">>", r)
+ }
+ x = -9223372036854775807
+ y = 0
+ r = x >> y
+ if r != -9223372036854775807 {
+ t.Errorf("-9223372036854775807 %s 0 = %d, want -9223372036854775807", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -4611686018427387904 {
+ t.Errorf("-9223372036854775807 %s 1 = %d, want -4611686018427387904", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-9223372036854775807 %s 4294967296 = %d, want -1", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-9223372036854775807 %s 18446744073709551615 = %d, want -1", ">>", r)
+ }
+ x = -4294967296
+ y = 0
+ r = x >> y
+ if r != -4294967296 {
+ t.Errorf("-4294967296 %s 0 = %d, want -4294967296", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -2147483648 {
+ t.Errorf("-4294967296 %s 1 = %d, want -2147483648", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-4294967296 %s 4294967296 = %d, want -1", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-4294967296 %s 18446744073709551615 = %d, want -1", ">>", r)
+ }
+ x = -1
+ y = 0
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 1 = %d, want -1", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 4294967296 = %d, want -1", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 18446744073709551615 = %d, want -1", ">>", r)
+ }
+ x = 0
+ y = 0
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 4294967296 = %d, want 0", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 18446744073709551615 = %d, want 0", ">>", r)
+ }
+ x = 1
+ y = 0
+ r = x >> y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 4294967296 = %d, want 0", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 18446744073709551615 = %d, want 0", ">>", r)
+ }
+ x = 4294967296
+ y = 0
+ r = x >> y
+ if r != 4294967296 {
+ t.Errorf("4294967296 %s 0 = %d, want 4294967296", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 2147483648 {
+ t.Errorf("4294967296 %s 1 = %d, want 2147483648", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != 0 {
+ t.Errorf("4294967296 %s 4294967296 = %d, want 0", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != 0 {
+ t.Errorf("4294967296 %s 18446744073709551615 = %d, want 0", ">>", r)
+ }
+ x = 9223372036854775806
+ y = 0
+ r = x >> y
+ if r != 9223372036854775806 {
+ t.Errorf("9223372036854775806 %s 0 = %d, want 9223372036854775806", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 4611686018427387903 {
+ t.Errorf("9223372036854775806 %s 1 = %d, want 4611686018427387903", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != 0 {
+ t.Errorf("9223372036854775806 %s 4294967296 = %d, want 0", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != 0 {
+ t.Errorf("9223372036854775806 %s 18446744073709551615 = %d, want 0", ">>", r)
+ }
+ x = 9223372036854775807
+ y = 0
+ r = x >> y
+ if r != 9223372036854775807 {
+ t.Errorf("9223372036854775807 %s 0 = %d, want 9223372036854775807", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 4611686018427387903 {
+ t.Errorf("9223372036854775807 %s 1 = %d, want 4611686018427387903", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != 0 {
+ t.Errorf("9223372036854775807 %s 4294967296 = %d, want 0", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != 0 {
+ t.Errorf("9223372036854775807 %s 18446744073709551615 = %d, want 0", ">>", r)
+ }
+}
+func TestConstFoldint64uint32lsh(t *testing.T) {
+ var x, r int64
+ var y uint32
+ x = -9223372036854775808
+ y = 0
+ r = x << y
+ if r != -9223372036854775808 {
+ t.Errorf("-9223372036854775808 %s 0 = %d, want -9223372036854775808", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("-9223372036854775808 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("-9223372036854775808 %s 4294967295 = %d, want 0", "<<", r)
+ }
+ x = -9223372036854775807
+ y = 0
+ r = x << y
+ if r != -9223372036854775807 {
+ t.Errorf("-9223372036854775807 %s 0 = %d, want -9223372036854775807", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("-9223372036854775807 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("-9223372036854775807 %s 4294967295 = %d, want 0", "<<", r)
+ }
+ x = -4294967296
+ y = 0
+ r = x << y
+ if r != -4294967296 {
+ t.Errorf("-4294967296 %s 0 = %d, want -4294967296", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -8589934592 {
+ t.Errorf("-4294967296 %s 1 = %d, want -8589934592", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("-4294967296 %s 4294967295 = %d, want 0", "<<", r)
+ }
+ x = -1
+ y = 0
+ r = x << y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -2 {
+ t.Errorf("-1 %s 1 = %d, want -2", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("-1 %s 4294967295 = %d, want 0", "<<", r)
+ }
+ x = 0
+ y = 0
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 4294967295 = %d, want 0", "<<", r)
+ }
+ x = 1
+ y = 0
+ r = x << y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 4294967295 = %d, want 0", "<<", r)
+ }
+ x = 4294967296
+ y = 0
+ r = x << y
+ if r != 4294967296 {
+ t.Errorf("4294967296 %s 0 = %d, want 4294967296", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 8589934592 {
+ t.Errorf("4294967296 %s 1 = %d, want 8589934592", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("4294967296 %s 4294967295 = %d, want 0", "<<", r)
+ }
+ x = 9223372036854775806
+ y = 0
+ r = x << y
+ if r != 9223372036854775806 {
+ t.Errorf("9223372036854775806 %s 0 = %d, want 9223372036854775806", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -4 {
+ t.Errorf("9223372036854775806 %s 1 = %d, want -4", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("9223372036854775806 %s 4294967295 = %d, want 0", "<<", r)
+ }
+ x = 9223372036854775807
+ y = 0
+ r = x << y
+ if r != 9223372036854775807 {
+ t.Errorf("9223372036854775807 %s 0 = %d, want 9223372036854775807", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -2 {
+ t.Errorf("9223372036854775807 %s 1 = %d, want -2", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("9223372036854775807 %s 4294967295 = %d, want 0", "<<", r)
+ }
+}
+func TestConstFoldint64uint32rsh(t *testing.T) {
+ var x, r int64
+ var y uint32
+ x = -9223372036854775808
+ y = 0
+ r = x >> y
+ if r != -9223372036854775808 {
+ t.Errorf("-9223372036854775808 %s 0 = %d, want -9223372036854775808", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -4611686018427387904 {
+ t.Errorf("-9223372036854775808 %s 1 = %d, want -4611686018427387904", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-9223372036854775808 %s 4294967295 = %d, want -1", ">>", r)
+ }
+ x = -9223372036854775807
+ y = 0
+ r = x >> y
+ if r != -9223372036854775807 {
+ t.Errorf("-9223372036854775807 %s 0 = %d, want -9223372036854775807", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -4611686018427387904 {
+ t.Errorf("-9223372036854775807 %s 1 = %d, want -4611686018427387904", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-9223372036854775807 %s 4294967295 = %d, want -1", ">>", r)
+ }
+ x = -4294967296
+ y = 0
+ r = x >> y
+ if r != -4294967296 {
+ t.Errorf("-4294967296 %s 0 = %d, want -4294967296", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -2147483648 {
+ t.Errorf("-4294967296 %s 1 = %d, want -2147483648", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-4294967296 %s 4294967295 = %d, want -1", ">>", r)
+ }
+ x = -1
+ y = 0
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 1 = %d, want -1", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 4294967295 = %d, want -1", ">>", r)
+ }
+ x = 0
+ y = 0
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 4294967295 = %d, want 0", ">>", r)
+ }
+ x = 1
+ y = 0
+ r = x >> y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 4294967295 = %d, want 0", ">>", r)
+ }
+ x = 4294967296
+ y = 0
+ r = x >> y
+ if r != 4294967296 {
+ t.Errorf("4294967296 %s 0 = %d, want 4294967296", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 2147483648 {
+ t.Errorf("4294967296 %s 1 = %d, want 2147483648", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != 0 {
+ t.Errorf("4294967296 %s 4294967295 = %d, want 0", ">>", r)
+ }
+ x = 9223372036854775806
+ y = 0
+ r = x >> y
+ if r != 9223372036854775806 {
+ t.Errorf("9223372036854775806 %s 0 = %d, want 9223372036854775806", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 4611686018427387903 {
+ t.Errorf("9223372036854775806 %s 1 = %d, want 4611686018427387903", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != 0 {
+ t.Errorf("9223372036854775806 %s 4294967295 = %d, want 0", ">>", r)
+ }
+ x = 9223372036854775807
+ y = 0
+ r = x >> y
+ if r != 9223372036854775807 {
+ t.Errorf("9223372036854775807 %s 0 = %d, want 9223372036854775807", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 4611686018427387903 {
+ t.Errorf("9223372036854775807 %s 1 = %d, want 4611686018427387903", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != 0 {
+ t.Errorf("9223372036854775807 %s 4294967295 = %d, want 0", ">>", r)
+ }
+}
+func TestConstFoldint64uint16lsh(t *testing.T) {
+ var x, r int64
+ var y uint16
+ x = -9223372036854775808
+ y = 0
+ r = x << y
+ if r != -9223372036854775808 {
+ t.Errorf("-9223372036854775808 %s 0 = %d, want -9223372036854775808", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("-9223372036854775808 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("-9223372036854775808 %s 65535 = %d, want 0", "<<", r)
+ }
+ x = -9223372036854775807
+ y = 0
+ r = x << y
+ if r != -9223372036854775807 {
+ t.Errorf("-9223372036854775807 %s 0 = %d, want -9223372036854775807", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("-9223372036854775807 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("-9223372036854775807 %s 65535 = %d, want 0", "<<", r)
+ }
+ x = -4294967296
+ y = 0
+ r = x << y
+ if r != -4294967296 {
+ t.Errorf("-4294967296 %s 0 = %d, want -4294967296", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -8589934592 {
+ t.Errorf("-4294967296 %s 1 = %d, want -8589934592", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("-4294967296 %s 65535 = %d, want 0", "<<", r)
+ }
+ x = -1
+ y = 0
+ r = x << y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -2 {
+ t.Errorf("-1 %s 1 = %d, want -2", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("-1 %s 65535 = %d, want 0", "<<", r)
+ }
+ x = 0
+ y = 0
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 65535 = %d, want 0", "<<", r)
+ }
+ x = 1
+ y = 0
+ r = x << y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 65535 = %d, want 0", "<<", r)
+ }
+ x = 4294967296
+ y = 0
+ r = x << y
+ if r != 4294967296 {
+ t.Errorf("4294967296 %s 0 = %d, want 4294967296", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 8589934592 {
+ t.Errorf("4294967296 %s 1 = %d, want 8589934592", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("4294967296 %s 65535 = %d, want 0", "<<", r)
+ }
+ x = 9223372036854775806
+ y = 0
+ r = x << y
+ if r != 9223372036854775806 {
+ t.Errorf("9223372036854775806 %s 0 = %d, want 9223372036854775806", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -4 {
+ t.Errorf("9223372036854775806 %s 1 = %d, want -4", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("9223372036854775806 %s 65535 = %d, want 0", "<<", r)
+ }
+ x = 9223372036854775807
+ y = 0
+ r = x << y
+ if r != 9223372036854775807 {
+ t.Errorf("9223372036854775807 %s 0 = %d, want 9223372036854775807", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -2 {
+ t.Errorf("9223372036854775807 %s 1 = %d, want -2", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("9223372036854775807 %s 65535 = %d, want 0", "<<", r)
+ }
+}
+func TestConstFoldint64uint16rsh(t *testing.T) {
+ var x, r int64
+ var y uint16
+ x = -9223372036854775808
+ y = 0
+ r = x >> y
+ if r != -9223372036854775808 {
+ t.Errorf("-9223372036854775808 %s 0 = %d, want -9223372036854775808", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -4611686018427387904 {
+ t.Errorf("-9223372036854775808 %s 1 = %d, want -4611686018427387904", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-9223372036854775808 %s 65535 = %d, want -1", ">>", r)
+ }
+ x = -9223372036854775807
+ y = 0
+ r = x >> y
+ if r != -9223372036854775807 {
+ t.Errorf("-9223372036854775807 %s 0 = %d, want -9223372036854775807", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -4611686018427387904 {
+ t.Errorf("-9223372036854775807 %s 1 = %d, want -4611686018427387904", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-9223372036854775807 %s 65535 = %d, want -1", ">>", r)
+ }
+ x = -4294967296
+ y = 0
+ r = x >> y
+ if r != -4294967296 {
+ t.Errorf("-4294967296 %s 0 = %d, want -4294967296", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -2147483648 {
+ t.Errorf("-4294967296 %s 1 = %d, want -2147483648", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-4294967296 %s 65535 = %d, want -1", ">>", r)
+ }
+ x = -1
+ y = 0
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 1 = %d, want -1", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 65535 = %d, want -1", ">>", r)
+ }
+ x = 0
+ y = 0
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 65535 = %d, want 0", ">>", r)
+ }
+ x = 1
+ y = 0
+ r = x >> y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 65535 = %d, want 0", ">>", r)
+ }
+ x = 4294967296
+ y = 0
+ r = x >> y
+ if r != 4294967296 {
+ t.Errorf("4294967296 %s 0 = %d, want 4294967296", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 2147483648 {
+ t.Errorf("4294967296 %s 1 = %d, want 2147483648", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != 0 {
+ t.Errorf("4294967296 %s 65535 = %d, want 0", ">>", r)
+ }
+ x = 9223372036854775806
+ y = 0
+ r = x >> y
+ if r != 9223372036854775806 {
+ t.Errorf("9223372036854775806 %s 0 = %d, want 9223372036854775806", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 4611686018427387903 {
+ t.Errorf("9223372036854775806 %s 1 = %d, want 4611686018427387903", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != 0 {
+ t.Errorf("9223372036854775806 %s 65535 = %d, want 0", ">>", r)
+ }
+ x = 9223372036854775807
+ y = 0
+ r = x >> y
+ if r != 9223372036854775807 {
+ t.Errorf("9223372036854775807 %s 0 = %d, want 9223372036854775807", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 4611686018427387903 {
+ t.Errorf("9223372036854775807 %s 1 = %d, want 4611686018427387903", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != 0 {
+ t.Errorf("9223372036854775807 %s 65535 = %d, want 0", ">>", r)
+ }
+}
+func TestConstFoldint64uint8lsh(t *testing.T) {
+ var x, r int64
+ var y uint8
+ x = -9223372036854775808
+ y = 0
+ r = x << y
+ if r != -9223372036854775808 {
+ t.Errorf("-9223372036854775808 %s 0 = %d, want -9223372036854775808", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("-9223372036854775808 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("-9223372036854775808 %s 255 = %d, want 0", "<<", r)
+ }
+ x = -9223372036854775807
+ y = 0
+ r = x << y
+ if r != -9223372036854775807 {
+ t.Errorf("-9223372036854775807 %s 0 = %d, want -9223372036854775807", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("-9223372036854775807 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("-9223372036854775807 %s 255 = %d, want 0", "<<", r)
+ }
+ x = -4294967296
+ y = 0
+ r = x << y
+ if r != -4294967296 {
+ t.Errorf("-4294967296 %s 0 = %d, want -4294967296", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -8589934592 {
+ t.Errorf("-4294967296 %s 1 = %d, want -8589934592", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("-4294967296 %s 255 = %d, want 0", "<<", r)
+ }
+ x = -1
+ y = 0
+ r = x << y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -2 {
+ t.Errorf("-1 %s 1 = %d, want -2", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("-1 %s 255 = %d, want 0", "<<", r)
+ }
+ x = 0
+ y = 0
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 255 = %d, want 0", "<<", r)
+ }
+ x = 1
+ y = 0
+ r = x << y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 255 = %d, want 0", "<<", r)
+ }
+ x = 4294967296
+ y = 0
+ r = x << y
+ if r != 4294967296 {
+ t.Errorf("4294967296 %s 0 = %d, want 4294967296", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 8589934592 {
+ t.Errorf("4294967296 %s 1 = %d, want 8589934592", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("4294967296 %s 255 = %d, want 0", "<<", r)
+ }
+ x = 9223372036854775806
+ y = 0
+ r = x << y
+ if r != 9223372036854775806 {
+ t.Errorf("9223372036854775806 %s 0 = %d, want 9223372036854775806", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -4 {
+ t.Errorf("9223372036854775806 %s 1 = %d, want -4", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("9223372036854775806 %s 255 = %d, want 0", "<<", r)
+ }
+ x = 9223372036854775807
+ y = 0
+ r = x << y
+ if r != 9223372036854775807 {
+ t.Errorf("9223372036854775807 %s 0 = %d, want 9223372036854775807", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -2 {
+ t.Errorf("9223372036854775807 %s 1 = %d, want -2", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("9223372036854775807 %s 255 = %d, want 0", "<<", r)
+ }
+}
+func TestConstFoldint64uint8rsh(t *testing.T) {
+ var x, r int64
+ var y uint8
+ x = -9223372036854775808
+ y = 0
+ r = x >> y
+ if r != -9223372036854775808 {
+ t.Errorf("-9223372036854775808 %s 0 = %d, want -9223372036854775808", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -4611686018427387904 {
+ t.Errorf("-9223372036854775808 %s 1 = %d, want -4611686018427387904", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-9223372036854775808 %s 255 = %d, want -1", ">>", r)
+ }
+ x = -9223372036854775807
+ y = 0
+ r = x >> y
+ if r != -9223372036854775807 {
+ t.Errorf("-9223372036854775807 %s 0 = %d, want -9223372036854775807", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -4611686018427387904 {
+ t.Errorf("-9223372036854775807 %s 1 = %d, want -4611686018427387904", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-9223372036854775807 %s 255 = %d, want -1", ">>", r)
+ }
+ x = -4294967296
+ y = 0
+ r = x >> y
+ if r != -4294967296 {
+ t.Errorf("-4294967296 %s 0 = %d, want -4294967296", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -2147483648 {
+ t.Errorf("-4294967296 %s 1 = %d, want -2147483648", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-4294967296 %s 255 = %d, want -1", ">>", r)
+ }
+ x = -1
+ y = 0
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 1 = %d, want -1", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 255 = %d, want -1", ">>", r)
+ }
+ x = 0
+ y = 0
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 255 = %d, want 0", ">>", r)
+ }
+ x = 1
+ y = 0
+ r = x >> y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 255 = %d, want 0", ">>", r)
+ }
+ x = 4294967296
+ y = 0
+ r = x >> y
+ if r != 4294967296 {
+ t.Errorf("4294967296 %s 0 = %d, want 4294967296", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 2147483648 {
+ t.Errorf("4294967296 %s 1 = %d, want 2147483648", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != 0 {
+ t.Errorf("4294967296 %s 255 = %d, want 0", ">>", r)
+ }
+ x = 9223372036854775806
+ y = 0
+ r = x >> y
+ if r != 9223372036854775806 {
+ t.Errorf("9223372036854775806 %s 0 = %d, want 9223372036854775806", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 4611686018427387903 {
+ t.Errorf("9223372036854775806 %s 1 = %d, want 4611686018427387903", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != 0 {
+ t.Errorf("9223372036854775806 %s 255 = %d, want 0", ">>", r)
+ }
+ x = 9223372036854775807
+ y = 0
+ r = x >> y
+ if r != 9223372036854775807 {
+ t.Errorf("9223372036854775807 %s 0 = %d, want 9223372036854775807", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 4611686018427387903 {
+ t.Errorf("9223372036854775807 %s 1 = %d, want 4611686018427387903", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != 0 {
+ t.Errorf("9223372036854775807 %s 255 = %d, want 0", ">>", r)
+ }
+}
+func TestConstFolduint32uint64lsh(t *testing.T) {
+ var x, r uint32
+ var y uint64
+ x = 0
+ y = 0
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+ x = 1
+ y = 0
+ r = x << y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+ x = 4294967295
+ y = 0
+ r = x << y
+ if r != 4294967295 {
+ t.Errorf("4294967295 %s 0 = %d, want 4294967295", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 4294967294 {
+ t.Errorf("4294967295 %s 1 = %d, want 4294967294", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("4294967295 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("4294967295 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+}
+func TestConstFolduint32uint64rsh(t *testing.T) {
+ var x, r uint32
+ var y uint64
+ x = 0
+ y = 0
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 4294967296 = %d, want 0", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 18446744073709551615 = %d, want 0", ">>", r)
+ }
+ x = 1
+ y = 0
+ r = x >> y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 4294967296 = %d, want 0", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 18446744073709551615 = %d, want 0", ">>", r)
+ }
+ x = 4294967295
+ y = 0
+ r = x >> y
+ if r != 4294967295 {
+ t.Errorf("4294967295 %s 0 = %d, want 4294967295", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 2147483647 {
+ t.Errorf("4294967295 %s 1 = %d, want 2147483647", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != 0 {
+ t.Errorf("4294967295 %s 4294967296 = %d, want 0", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != 0 {
+ t.Errorf("4294967295 %s 18446744073709551615 = %d, want 0", ">>", r)
+ }
+}
+func TestConstFolduint32uint32lsh(t *testing.T) {
+ var x, r uint32
+ var y uint32
+ x = 0
+ y = 0
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 4294967295 = %d, want 0", "<<", r)
+ }
+ x = 1
+ y = 0
+ r = x << y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 4294967295 = %d, want 0", "<<", r)
+ }
+ x = 4294967295
+ y = 0
+ r = x << y
+ if r != 4294967295 {
+ t.Errorf("4294967295 %s 0 = %d, want 4294967295", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 4294967294 {
+ t.Errorf("4294967295 %s 1 = %d, want 4294967294", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("4294967295 %s 4294967295 = %d, want 0", "<<", r)
+ }
+}
+func TestConstFolduint32uint32rsh(t *testing.T) {
+ var x, r uint32
+ var y uint32
+ x = 0
+ y = 0
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 4294967295 = %d, want 0", ">>", r)
+ }
+ x = 1
+ y = 0
+ r = x >> y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 4294967295 = %d, want 0", ">>", r)
+ }
+ x = 4294967295
+ y = 0
+ r = x >> y
+ if r != 4294967295 {
+ t.Errorf("4294967295 %s 0 = %d, want 4294967295", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 2147483647 {
+ t.Errorf("4294967295 %s 1 = %d, want 2147483647", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != 0 {
+ t.Errorf("4294967295 %s 4294967295 = %d, want 0", ">>", r)
+ }
+}
+func TestConstFolduint32uint16lsh(t *testing.T) {
+ var x, r uint32
+ var y uint16
+ x = 0
+ y = 0
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 65535 = %d, want 0", "<<", r)
+ }
+ x = 1
+ y = 0
+ r = x << y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 65535 = %d, want 0", "<<", r)
+ }
+ x = 4294967295
+ y = 0
+ r = x << y
+ if r != 4294967295 {
+ t.Errorf("4294967295 %s 0 = %d, want 4294967295", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 4294967294 {
+ t.Errorf("4294967295 %s 1 = %d, want 4294967294", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("4294967295 %s 65535 = %d, want 0", "<<", r)
+ }
+}
+func TestConstFolduint32uint16rsh(t *testing.T) {
+ var x, r uint32
+ var y uint16
+ x = 0
+ y = 0
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 65535 = %d, want 0", ">>", r)
+ }
+ x = 1
+ y = 0
+ r = x >> y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 65535 = %d, want 0", ">>", r)
+ }
+ x = 4294967295
+ y = 0
+ r = x >> y
+ if r != 4294967295 {
+ t.Errorf("4294967295 %s 0 = %d, want 4294967295", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 2147483647 {
+ t.Errorf("4294967295 %s 1 = %d, want 2147483647", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != 0 {
+ t.Errorf("4294967295 %s 65535 = %d, want 0", ">>", r)
+ }
+}
+func TestConstFolduint32uint8lsh(t *testing.T) {
+ var x, r uint32
+ var y uint8
+ x = 0
+ y = 0
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 255 = %d, want 0", "<<", r)
+ }
+ x = 1
+ y = 0
+ r = x << y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 255 = %d, want 0", "<<", r)
+ }
+ x = 4294967295
+ y = 0
+ r = x << y
+ if r != 4294967295 {
+ t.Errorf("4294967295 %s 0 = %d, want 4294967295", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 4294967294 {
+ t.Errorf("4294967295 %s 1 = %d, want 4294967294", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("4294967295 %s 255 = %d, want 0", "<<", r)
+ }
+}
+func TestConstFolduint32uint8rsh(t *testing.T) {
+ var x, r uint32
+ var y uint8
+ x = 0
+ y = 0
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 255 = %d, want 0", ">>", r)
+ }
+ x = 1
+ y = 0
+ r = x >> y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 255 = %d, want 0", ">>", r)
+ }
+ x = 4294967295
+ y = 0
+ r = x >> y
+ if r != 4294967295 {
+ t.Errorf("4294967295 %s 0 = %d, want 4294967295", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 2147483647 {
+ t.Errorf("4294967295 %s 1 = %d, want 2147483647", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != 0 {
+ t.Errorf("4294967295 %s 255 = %d, want 0", ">>", r)
+ }
+}
+func TestConstFoldint32uint64lsh(t *testing.T) {
+ var x, r int32
+ var y uint64
+ x = -2147483648
+ y = 0
+ r = x << y
+ if r != -2147483648 {
+ t.Errorf("-2147483648 %s 0 = %d, want -2147483648", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("-2147483648 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("-2147483648 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("-2147483648 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+ x = -2147483647
+ y = 0
+ r = x << y
+ if r != -2147483647 {
+ t.Errorf("-2147483647 %s 0 = %d, want -2147483647", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("-2147483647 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("-2147483647 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("-2147483647 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+ x = -1
+ y = 0
+ r = x << y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -2 {
+ t.Errorf("-1 %s 1 = %d, want -2", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("-1 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("-1 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+ x = 0
+ y = 0
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+ x = 1
+ y = 0
+ r = x << y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+ x = 2147483647
+ y = 0
+ r = x << y
+ if r != 2147483647 {
+ t.Errorf("2147483647 %s 0 = %d, want 2147483647", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -2 {
+ t.Errorf("2147483647 %s 1 = %d, want -2", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("2147483647 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("2147483647 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+}
+func TestConstFoldint32uint64rsh(t *testing.T) {
+ var x, r int32
+ var y uint64
+ x = -2147483648
+ y = 0
+ r = x >> y
+ if r != -2147483648 {
+ t.Errorf("-2147483648 %s 0 = %d, want -2147483648", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -1073741824 {
+ t.Errorf("-2147483648 %s 1 = %d, want -1073741824", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-2147483648 %s 4294967296 = %d, want -1", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-2147483648 %s 18446744073709551615 = %d, want -1", ">>", r)
+ }
+ x = -2147483647
+ y = 0
+ r = x >> y
+ if r != -2147483647 {
+ t.Errorf("-2147483647 %s 0 = %d, want -2147483647", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -1073741824 {
+ t.Errorf("-2147483647 %s 1 = %d, want -1073741824", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-2147483647 %s 4294967296 = %d, want -1", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-2147483647 %s 18446744073709551615 = %d, want -1", ">>", r)
+ }
+ x = -1
+ y = 0
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 1 = %d, want -1", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 4294967296 = %d, want -1", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 18446744073709551615 = %d, want -1", ">>", r)
+ }
+ x = 0
+ y = 0
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 4294967296 = %d, want 0", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 18446744073709551615 = %d, want 0", ">>", r)
+ }
+ x = 1
+ y = 0
+ r = x >> y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 4294967296 = %d, want 0", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 18446744073709551615 = %d, want 0", ">>", r)
+ }
+ x = 2147483647
+ y = 0
+ r = x >> y
+ if r != 2147483647 {
+ t.Errorf("2147483647 %s 0 = %d, want 2147483647", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 1073741823 {
+ t.Errorf("2147483647 %s 1 = %d, want 1073741823", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != 0 {
+ t.Errorf("2147483647 %s 4294967296 = %d, want 0", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != 0 {
+ t.Errorf("2147483647 %s 18446744073709551615 = %d, want 0", ">>", r)
+ }
+}
+func TestConstFoldint32uint32lsh(t *testing.T) {
+ var x, r int32
+ var y uint32
+ x = -2147483648
+ y = 0
+ r = x << y
+ if r != -2147483648 {
+ t.Errorf("-2147483648 %s 0 = %d, want -2147483648", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("-2147483648 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("-2147483648 %s 4294967295 = %d, want 0", "<<", r)
+ }
+ x = -2147483647
+ y = 0
+ r = x << y
+ if r != -2147483647 {
+ t.Errorf("-2147483647 %s 0 = %d, want -2147483647", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("-2147483647 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("-2147483647 %s 4294967295 = %d, want 0", "<<", r)
+ }
+ x = -1
+ y = 0
+ r = x << y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -2 {
+ t.Errorf("-1 %s 1 = %d, want -2", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("-1 %s 4294967295 = %d, want 0", "<<", r)
+ }
+ x = 0
+ y = 0
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 4294967295 = %d, want 0", "<<", r)
+ }
+ x = 1
+ y = 0
+ r = x << y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 4294967295 = %d, want 0", "<<", r)
+ }
+ x = 2147483647
+ y = 0
+ r = x << y
+ if r != 2147483647 {
+ t.Errorf("2147483647 %s 0 = %d, want 2147483647", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -2 {
+ t.Errorf("2147483647 %s 1 = %d, want -2", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("2147483647 %s 4294967295 = %d, want 0", "<<", r)
+ }
+}
+func TestConstFoldint32uint32rsh(t *testing.T) {
+ var x, r int32
+ var y uint32
+ x = -2147483648
+ y = 0
+ r = x >> y
+ if r != -2147483648 {
+ t.Errorf("-2147483648 %s 0 = %d, want -2147483648", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -1073741824 {
+ t.Errorf("-2147483648 %s 1 = %d, want -1073741824", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-2147483648 %s 4294967295 = %d, want -1", ">>", r)
+ }
+ x = -2147483647
+ y = 0
+ r = x >> y
+ if r != -2147483647 {
+ t.Errorf("-2147483647 %s 0 = %d, want -2147483647", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -1073741824 {
+ t.Errorf("-2147483647 %s 1 = %d, want -1073741824", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-2147483647 %s 4294967295 = %d, want -1", ">>", r)
+ }
+ x = -1
+ y = 0
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 1 = %d, want -1", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 4294967295 = %d, want -1", ">>", r)
+ }
+ x = 0
+ y = 0
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 4294967295 = %d, want 0", ">>", r)
+ }
+ x = 1
+ y = 0
+ r = x >> y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 4294967295 = %d, want 0", ">>", r)
+ }
+ x = 2147483647
+ y = 0
+ r = x >> y
+ if r != 2147483647 {
+ t.Errorf("2147483647 %s 0 = %d, want 2147483647", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 1073741823 {
+ t.Errorf("2147483647 %s 1 = %d, want 1073741823", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != 0 {
+ t.Errorf("2147483647 %s 4294967295 = %d, want 0", ">>", r)
+ }
+}
+func TestConstFoldint32uint16lsh(t *testing.T) {
+ var x, r int32
+ var y uint16
+ x = -2147483648
+ y = 0
+ r = x << y
+ if r != -2147483648 {
+ t.Errorf("-2147483648 %s 0 = %d, want -2147483648", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("-2147483648 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("-2147483648 %s 65535 = %d, want 0", "<<", r)
+ }
+ x = -2147483647
+ y = 0
+ r = x << y
+ if r != -2147483647 {
+ t.Errorf("-2147483647 %s 0 = %d, want -2147483647", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("-2147483647 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("-2147483647 %s 65535 = %d, want 0", "<<", r)
+ }
+ x = -1
+ y = 0
+ r = x << y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -2 {
+ t.Errorf("-1 %s 1 = %d, want -2", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("-1 %s 65535 = %d, want 0", "<<", r)
+ }
+ x = 0
+ y = 0
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 65535 = %d, want 0", "<<", r)
+ }
+ x = 1
+ y = 0
+ r = x << y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 65535 = %d, want 0", "<<", r)
+ }
+ x = 2147483647
+ y = 0
+ r = x << y
+ if r != 2147483647 {
+ t.Errorf("2147483647 %s 0 = %d, want 2147483647", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -2 {
+ t.Errorf("2147483647 %s 1 = %d, want -2", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("2147483647 %s 65535 = %d, want 0", "<<", r)
+ }
+}
+func TestConstFoldint32uint16rsh(t *testing.T) {
+ var x, r int32
+ var y uint16
+ x = -2147483648
+ y = 0
+ r = x >> y
+ if r != -2147483648 {
+ t.Errorf("-2147483648 %s 0 = %d, want -2147483648", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -1073741824 {
+ t.Errorf("-2147483648 %s 1 = %d, want -1073741824", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-2147483648 %s 65535 = %d, want -1", ">>", r)
+ }
+ x = -2147483647
+ y = 0
+ r = x >> y
+ if r != -2147483647 {
+ t.Errorf("-2147483647 %s 0 = %d, want -2147483647", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -1073741824 {
+ t.Errorf("-2147483647 %s 1 = %d, want -1073741824", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-2147483647 %s 65535 = %d, want -1", ">>", r)
+ }
+ x = -1
+ y = 0
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 1 = %d, want -1", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 65535 = %d, want -1", ">>", r)
+ }
+ x = 0
+ y = 0
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 65535 = %d, want 0", ">>", r)
+ }
+ x = 1
+ y = 0
+ r = x >> y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 65535 = %d, want 0", ">>", r)
+ }
+ x = 2147483647
+ y = 0
+ r = x >> y
+ if r != 2147483647 {
+ t.Errorf("2147483647 %s 0 = %d, want 2147483647", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 1073741823 {
+ t.Errorf("2147483647 %s 1 = %d, want 1073741823", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != 0 {
+ t.Errorf("2147483647 %s 65535 = %d, want 0", ">>", r)
+ }
+}
+func TestConstFoldint32uint8lsh(t *testing.T) {
+ var x, r int32
+ var y uint8
+ x = -2147483648
+ y = 0
+ r = x << y
+ if r != -2147483648 {
+ t.Errorf("-2147483648 %s 0 = %d, want -2147483648", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("-2147483648 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("-2147483648 %s 255 = %d, want 0", "<<", r)
+ }
+ x = -2147483647
+ y = 0
+ r = x << y
+ if r != -2147483647 {
+ t.Errorf("-2147483647 %s 0 = %d, want -2147483647", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("-2147483647 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("-2147483647 %s 255 = %d, want 0", "<<", r)
+ }
+ x = -1
+ y = 0
+ r = x << y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -2 {
+ t.Errorf("-1 %s 1 = %d, want -2", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("-1 %s 255 = %d, want 0", "<<", r)
+ }
+ x = 0
+ y = 0
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 255 = %d, want 0", "<<", r)
+ }
+ x = 1
+ y = 0
+ r = x << y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 255 = %d, want 0", "<<", r)
+ }
+ x = 2147483647
+ y = 0
+ r = x << y
+ if r != 2147483647 {
+ t.Errorf("2147483647 %s 0 = %d, want 2147483647", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -2 {
+ t.Errorf("2147483647 %s 1 = %d, want -2", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("2147483647 %s 255 = %d, want 0", "<<", r)
+ }
+}
+func TestConstFoldint32uint8rsh(t *testing.T) {
+ var x, r int32
+ var y uint8
+ x = -2147483648
+ y = 0
+ r = x >> y
+ if r != -2147483648 {
+ t.Errorf("-2147483648 %s 0 = %d, want -2147483648", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -1073741824 {
+ t.Errorf("-2147483648 %s 1 = %d, want -1073741824", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-2147483648 %s 255 = %d, want -1", ">>", r)
+ }
+ x = -2147483647
+ y = 0
+ r = x >> y
+ if r != -2147483647 {
+ t.Errorf("-2147483647 %s 0 = %d, want -2147483647", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -1073741824 {
+ t.Errorf("-2147483647 %s 1 = %d, want -1073741824", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-2147483647 %s 255 = %d, want -1", ">>", r)
+ }
+ x = -1
+ y = 0
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 1 = %d, want -1", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 255 = %d, want -1", ">>", r)
+ }
+ x = 0
+ y = 0
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 255 = %d, want 0", ">>", r)
+ }
+ x = 1
+ y = 0
+ r = x >> y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 255 = %d, want 0", ">>", r)
+ }
+ x = 2147483647
+ y = 0
+ r = x >> y
+ if r != 2147483647 {
+ t.Errorf("2147483647 %s 0 = %d, want 2147483647", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 1073741823 {
+ t.Errorf("2147483647 %s 1 = %d, want 1073741823", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != 0 {
+ t.Errorf("2147483647 %s 255 = %d, want 0", ">>", r)
+ }
+}
+func TestConstFolduint16uint64lsh(t *testing.T) {
+ var x, r uint16
+ var y uint64
+ x = 0
+ y = 0
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+ x = 1
+ y = 0
+ r = x << y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+ x = 65535
+ y = 0
+ r = x << y
+ if r != 65535 {
+ t.Errorf("65535 %s 0 = %d, want 65535", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 65534 {
+ t.Errorf("65535 %s 1 = %d, want 65534", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("65535 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("65535 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+}
+func TestConstFolduint16uint64rsh(t *testing.T) {
+ var x, r uint16
+ var y uint64
+ x = 0
+ y = 0
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 4294967296 = %d, want 0", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 18446744073709551615 = %d, want 0", ">>", r)
+ }
+ x = 1
+ y = 0
+ r = x >> y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 4294967296 = %d, want 0", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 18446744073709551615 = %d, want 0", ">>", r)
+ }
+ x = 65535
+ y = 0
+ r = x >> y
+ if r != 65535 {
+ t.Errorf("65535 %s 0 = %d, want 65535", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 32767 {
+ t.Errorf("65535 %s 1 = %d, want 32767", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != 0 {
+ t.Errorf("65535 %s 4294967296 = %d, want 0", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != 0 {
+ t.Errorf("65535 %s 18446744073709551615 = %d, want 0", ">>", r)
+ }
+}
+func TestConstFolduint16uint32lsh(t *testing.T) {
+ var x, r uint16
+ var y uint32
+ x = 0
+ y = 0
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 4294967295 = %d, want 0", "<<", r)
+ }
+ x = 1
+ y = 0
+ r = x << y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 4294967295 = %d, want 0", "<<", r)
+ }
+ x = 65535
+ y = 0
+ r = x << y
+ if r != 65535 {
+ t.Errorf("65535 %s 0 = %d, want 65535", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 65534 {
+ t.Errorf("65535 %s 1 = %d, want 65534", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("65535 %s 4294967295 = %d, want 0", "<<", r)
+ }
+}
+func TestConstFolduint16uint32rsh(t *testing.T) {
+ var x, r uint16
+ var y uint32
+ x = 0
+ y = 0
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 4294967295 = %d, want 0", ">>", r)
+ }
+ x = 1
+ y = 0
+ r = x >> y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 4294967295 = %d, want 0", ">>", r)
+ }
+ x = 65535
+ y = 0
+ r = x >> y
+ if r != 65535 {
+ t.Errorf("65535 %s 0 = %d, want 65535", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 32767 {
+ t.Errorf("65535 %s 1 = %d, want 32767", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != 0 {
+ t.Errorf("65535 %s 4294967295 = %d, want 0", ">>", r)
+ }
+}
+func TestConstFolduint16uint16lsh(t *testing.T) {
+ var x, r uint16
+ var y uint16
+ x = 0
+ y = 0
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 65535 = %d, want 0", "<<", r)
+ }
+ x = 1
+ y = 0
+ r = x << y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 65535 = %d, want 0", "<<", r)
+ }
+ x = 65535
+ y = 0
+ r = x << y
+ if r != 65535 {
+ t.Errorf("65535 %s 0 = %d, want 65535", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 65534 {
+ t.Errorf("65535 %s 1 = %d, want 65534", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("65535 %s 65535 = %d, want 0", "<<", r)
+ }
+}
+func TestConstFolduint16uint16rsh(t *testing.T) {
+ var x, r uint16
+ var y uint16
+ x = 0
+ y = 0
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 65535 = %d, want 0", ">>", r)
+ }
+ x = 1
+ y = 0
+ r = x >> y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 65535 = %d, want 0", ">>", r)
+ }
+ x = 65535
+ y = 0
+ r = x >> y
+ if r != 65535 {
+ t.Errorf("65535 %s 0 = %d, want 65535", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 32767 {
+ t.Errorf("65535 %s 1 = %d, want 32767", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != 0 {
+ t.Errorf("65535 %s 65535 = %d, want 0", ">>", r)
+ }
+}
+func TestConstFolduint16uint8lsh(t *testing.T) {
+ var x, r uint16
+ var y uint8
+ x = 0
+ y = 0
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 255 = %d, want 0", "<<", r)
+ }
+ x = 1
+ y = 0
+ r = x << y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 255 = %d, want 0", "<<", r)
+ }
+ x = 65535
+ y = 0
+ r = x << y
+ if r != 65535 {
+ t.Errorf("65535 %s 0 = %d, want 65535", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 65534 {
+ t.Errorf("65535 %s 1 = %d, want 65534", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("65535 %s 255 = %d, want 0", "<<", r)
+ }
+}
+func TestConstFolduint16uint8rsh(t *testing.T) {
+ var x, r uint16
+ var y uint8
+ x = 0
+ y = 0
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 255 = %d, want 0", ">>", r)
+ }
+ x = 1
+ y = 0
+ r = x >> y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 255 = %d, want 0", ">>", r)
+ }
+ x = 65535
+ y = 0
+ r = x >> y
+ if r != 65535 {
+ t.Errorf("65535 %s 0 = %d, want 65535", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 32767 {
+ t.Errorf("65535 %s 1 = %d, want 32767", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != 0 {
+ t.Errorf("65535 %s 255 = %d, want 0", ">>", r)
+ }
+}
+func TestConstFoldint16uint64lsh(t *testing.T) {
+ var x, r int16
+ var y uint64
+ x = -32768
+ y = 0
+ r = x << y
+ if r != -32768 {
+ t.Errorf("-32768 %s 0 = %d, want -32768", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("-32768 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("-32768 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("-32768 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+ x = -32767
+ y = 0
+ r = x << y
+ if r != -32767 {
+ t.Errorf("-32767 %s 0 = %d, want -32767", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("-32767 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("-32767 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("-32767 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+ x = -1
+ y = 0
+ r = x << y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -2 {
+ t.Errorf("-1 %s 1 = %d, want -2", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("-1 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("-1 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+ x = 0
+ y = 0
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+ x = 1
+ y = 0
+ r = x << y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+ x = 32766
+ y = 0
+ r = x << y
+ if r != 32766 {
+ t.Errorf("32766 %s 0 = %d, want 32766", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -4 {
+ t.Errorf("32766 %s 1 = %d, want -4", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("32766 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("32766 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+ x = 32767
+ y = 0
+ r = x << y
+ if r != 32767 {
+ t.Errorf("32767 %s 0 = %d, want 32767", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -2 {
+ t.Errorf("32767 %s 1 = %d, want -2", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("32767 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("32767 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+}
+func TestConstFoldint16uint64rsh(t *testing.T) {
+ var x, r int16
+ var y uint64
+ x = -32768
+ y = 0
+ r = x >> y
+ if r != -32768 {
+ t.Errorf("-32768 %s 0 = %d, want -32768", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -16384 {
+ t.Errorf("-32768 %s 1 = %d, want -16384", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-32768 %s 4294967296 = %d, want -1", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-32768 %s 18446744073709551615 = %d, want -1", ">>", r)
+ }
+ x = -32767
+ y = 0
+ r = x >> y
+ if r != -32767 {
+ t.Errorf("-32767 %s 0 = %d, want -32767", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -16384 {
+ t.Errorf("-32767 %s 1 = %d, want -16384", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-32767 %s 4294967296 = %d, want -1", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-32767 %s 18446744073709551615 = %d, want -1", ">>", r)
+ }
+ x = -1
+ y = 0
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 1 = %d, want -1", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 4294967296 = %d, want -1", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 18446744073709551615 = %d, want -1", ">>", r)
+ }
+ x = 0
+ y = 0
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 4294967296 = %d, want 0", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 18446744073709551615 = %d, want 0", ">>", r)
+ }
+ x = 1
+ y = 0
+ r = x >> y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 4294967296 = %d, want 0", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 18446744073709551615 = %d, want 0", ">>", r)
+ }
+ x = 32766
+ y = 0
+ r = x >> y
+ if r != 32766 {
+ t.Errorf("32766 %s 0 = %d, want 32766", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 16383 {
+ t.Errorf("32766 %s 1 = %d, want 16383", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != 0 {
+ t.Errorf("32766 %s 4294967296 = %d, want 0", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != 0 {
+ t.Errorf("32766 %s 18446744073709551615 = %d, want 0", ">>", r)
+ }
+ x = 32767
+ y = 0
+ r = x >> y
+ if r != 32767 {
+ t.Errorf("32767 %s 0 = %d, want 32767", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 16383 {
+ t.Errorf("32767 %s 1 = %d, want 16383", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != 0 {
+ t.Errorf("32767 %s 4294967296 = %d, want 0", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != 0 {
+ t.Errorf("32767 %s 18446744073709551615 = %d, want 0", ">>", r)
+ }
+}
+func TestConstFoldint16uint32lsh(t *testing.T) {
+ var x, r int16
+ var y uint32
+ x = -32768
+ y = 0
+ r = x << y
+ if r != -32768 {
+ t.Errorf("-32768 %s 0 = %d, want -32768", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("-32768 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("-32768 %s 4294967295 = %d, want 0", "<<", r)
+ }
+ x = -32767
+ y = 0
+ r = x << y
+ if r != -32767 {
+ t.Errorf("-32767 %s 0 = %d, want -32767", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("-32767 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("-32767 %s 4294967295 = %d, want 0", "<<", r)
+ }
+ x = -1
+ y = 0
+ r = x << y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -2 {
+ t.Errorf("-1 %s 1 = %d, want -2", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("-1 %s 4294967295 = %d, want 0", "<<", r)
+ }
+ x = 0
+ y = 0
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 4294967295 = %d, want 0", "<<", r)
+ }
+ x = 1
+ y = 0
+ r = x << y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 4294967295 = %d, want 0", "<<", r)
+ }
+ x = 32766
+ y = 0
+ r = x << y
+ if r != 32766 {
+ t.Errorf("32766 %s 0 = %d, want 32766", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -4 {
+ t.Errorf("32766 %s 1 = %d, want -4", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("32766 %s 4294967295 = %d, want 0", "<<", r)
+ }
+ x = 32767
+ y = 0
+ r = x << y
+ if r != 32767 {
+ t.Errorf("32767 %s 0 = %d, want 32767", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -2 {
+ t.Errorf("32767 %s 1 = %d, want -2", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("32767 %s 4294967295 = %d, want 0", "<<", r)
+ }
+}
+func TestConstFoldint16uint32rsh(t *testing.T) {
+ var x, r int16
+ var y uint32
+ x = -32768
+ y = 0
+ r = x >> y
+ if r != -32768 {
+ t.Errorf("-32768 %s 0 = %d, want -32768", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -16384 {
+ t.Errorf("-32768 %s 1 = %d, want -16384", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-32768 %s 4294967295 = %d, want -1", ">>", r)
+ }
+ x = -32767
+ y = 0
+ r = x >> y
+ if r != -32767 {
+ t.Errorf("-32767 %s 0 = %d, want -32767", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -16384 {
+ t.Errorf("-32767 %s 1 = %d, want -16384", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-32767 %s 4294967295 = %d, want -1", ">>", r)
+ }
+ x = -1
+ y = 0
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 1 = %d, want -1", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 4294967295 = %d, want -1", ">>", r)
+ }
+ x = 0
+ y = 0
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 4294967295 = %d, want 0", ">>", r)
+ }
+ x = 1
+ y = 0
+ r = x >> y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 4294967295 = %d, want 0", ">>", r)
+ }
+ x = 32766
+ y = 0
+ r = x >> y
+ if r != 32766 {
+ t.Errorf("32766 %s 0 = %d, want 32766", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 16383 {
+ t.Errorf("32766 %s 1 = %d, want 16383", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != 0 {
+ t.Errorf("32766 %s 4294967295 = %d, want 0", ">>", r)
+ }
+ x = 32767
+ y = 0
+ r = x >> y
+ if r != 32767 {
+ t.Errorf("32767 %s 0 = %d, want 32767", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 16383 {
+ t.Errorf("32767 %s 1 = %d, want 16383", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != 0 {
+ t.Errorf("32767 %s 4294967295 = %d, want 0", ">>", r)
+ }
+}
+func TestConstFoldint16uint16lsh(t *testing.T) {
+ var x, r int16
+ var y uint16
+ x = -32768
+ y = 0
+ r = x << y
+ if r != -32768 {
+ t.Errorf("-32768 %s 0 = %d, want -32768", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("-32768 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("-32768 %s 65535 = %d, want 0", "<<", r)
+ }
+ x = -32767
+ y = 0
+ r = x << y
+ if r != -32767 {
+ t.Errorf("-32767 %s 0 = %d, want -32767", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("-32767 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("-32767 %s 65535 = %d, want 0", "<<", r)
+ }
+ x = -1
+ y = 0
+ r = x << y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -2 {
+ t.Errorf("-1 %s 1 = %d, want -2", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("-1 %s 65535 = %d, want 0", "<<", r)
+ }
+ x = 0
+ y = 0
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 65535 = %d, want 0", "<<", r)
+ }
+ x = 1
+ y = 0
+ r = x << y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 65535 = %d, want 0", "<<", r)
+ }
+ x = 32766
+ y = 0
+ r = x << y
+ if r != 32766 {
+ t.Errorf("32766 %s 0 = %d, want 32766", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -4 {
+ t.Errorf("32766 %s 1 = %d, want -4", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("32766 %s 65535 = %d, want 0", "<<", r)
+ }
+ x = 32767
+ y = 0
+ r = x << y
+ if r != 32767 {
+ t.Errorf("32767 %s 0 = %d, want 32767", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -2 {
+ t.Errorf("32767 %s 1 = %d, want -2", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("32767 %s 65535 = %d, want 0", "<<", r)
+ }
+}
+func TestConstFoldint16uint16rsh(t *testing.T) {
+ var x, r int16
+ var y uint16
+ x = -32768
+ y = 0
+ r = x >> y
+ if r != -32768 {
+ t.Errorf("-32768 %s 0 = %d, want -32768", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -16384 {
+ t.Errorf("-32768 %s 1 = %d, want -16384", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-32768 %s 65535 = %d, want -1", ">>", r)
+ }
+ x = -32767
+ y = 0
+ r = x >> y
+ if r != -32767 {
+ t.Errorf("-32767 %s 0 = %d, want -32767", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -16384 {
+ t.Errorf("-32767 %s 1 = %d, want -16384", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-32767 %s 65535 = %d, want -1", ">>", r)
+ }
+ x = -1
+ y = 0
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 1 = %d, want -1", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 65535 = %d, want -1", ">>", r)
+ }
+ x = 0
+ y = 0
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 65535 = %d, want 0", ">>", r)
+ }
+ x = 1
+ y = 0
+ r = x >> y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 65535 = %d, want 0", ">>", r)
+ }
+ x = 32766
+ y = 0
+ r = x >> y
+ if r != 32766 {
+ t.Errorf("32766 %s 0 = %d, want 32766", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 16383 {
+ t.Errorf("32766 %s 1 = %d, want 16383", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != 0 {
+ t.Errorf("32766 %s 65535 = %d, want 0", ">>", r)
+ }
+ x = 32767
+ y = 0
+ r = x >> y
+ if r != 32767 {
+ t.Errorf("32767 %s 0 = %d, want 32767", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 16383 {
+ t.Errorf("32767 %s 1 = %d, want 16383", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != 0 {
+ t.Errorf("32767 %s 65535 = %d, want 0", ">>", r)
+ }
+}
+func TestConstFoldint16uint8lsh(t *testing.T) {
+ var x, r int16
+ var y uint8
+ x = -32768
+ y = 0
+ r = x << y
+ if r != -32768 {
+ t.Errorf("-32768 %s 0 = %d, want -32768", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("-32768 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("-32768 %s 255 = %d, want 0", "<<", r)
+ }
+ x = -32767
+ y = 0
+ r = x << y
+ if r != -32767 {
+ t.Errorf("-32767 %s 0 = %d, want -32767", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("-32767 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("-32767 %s 255 = %d, want 0", "<<", r)
+ }
+ x = -1
+ y = 0
+ r = x << y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -2 {
+ t.Errorf("-1 %s 1 = %d, want -2", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("-1 %s 255 = %d, want 0", "<<", r)
+ }
+ x = 0
+ y = 0
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 255 = %d, want 0", "<<", r)
+ }
+ x = 1
+ y = 0
+ r = x << y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 255 = %d, want 0", "<<", r)
+ }
+ x = 32766
+ y = 0
+ r = x << y
+ if r != 32766 {
+ t.Errorf("32766 %s 0 = %d, want 32766", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -4 {
+ t.Errorf("32766 %s 1 = %d, want -4", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("32766 %s 255 = %d, want 0", "<<", r)
+ }
+ x = 32767
+ y = 0
+ r = x << y
+ if r != 32767 {
+ t.Errorf("32767 %s 0 = %d, want 32767", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -2 {
+ t.Errorf("32767 %s 1 = %d, want -2", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("32767 %s 255 = %d, want 0", "<<", r)
+ }
+}
+func TestConstFoldint16uint8rsh(t *testing.T) {
+ var x, r int16
+ var y uint8
+ x = -32768
+ y = 0
+ r = x >> y
+ if r != -32768 {
+ t.Errorf("-32768 %s 0 = %d, want -32768", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -16384 {
+ t.Errorf("-32768 %s 1 = %d, want -16384", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-32768 %s 255 = %d, want -1", ">>", r)
+ }
+ x = -32767
+ y = 0
+ r = x >> y
+ if r != -32767 {
+ t.Errorf("-32767 %s 0 = %d, want -32767", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -16384 {
+ t.Errorf("-32767 %s 1 = %d, want -16384", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-32767 %s 255 = %d, want -1", ">>", r)
+ }
+ x = -1
+ y = 0
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 1 = %d, want -1", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 255 = %d, want -1", ">>", r)
+ }
+ x = 0
+ y = 0
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 255 = %d, want 0", ">>", r)
+ }
+ x = 1
+ y = 0
+ r = x >> y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 255 = %d, want 0", ">>", r)
+ }
+ x = 32766
+ y = 0
+ r = x >> y
+ if r != 32766 {
+ t.Errorf("32766 %s 0 = %d, want 32766", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 16383 {
+ t.Errorf("32766 %s 1 = %d, want 16383", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != 0 {
+ t.Errorf("32766 %s 255 = %d, want 0", ">>", r)
+ }
+ x = 32767
+ y = 0
+ r = x >> y
+ if r != 32767 {
+ t.Errorf("32767 %s 0 = %d, want 32767", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 16383 {
+ t.Errorf("32767 %s 1 = %d, want 16383", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != 0 {
+ t.Errorf("32767 %s 255 = %d, want 0", ">>", r)
+ }
+}
+func TestConstFolduint8uint64lsh(t *testing.T) {
+ var x, r uint8
+ var y uint64
+ x = 0
+ y = 0
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+ x = 1
+ y = 0
+ r = x << y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+ x = 255
+ y = 0
+ r = x << y
+ if r != 255 {
+ t.Errorf("255 %s 0 = %d, want 255", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 254 {
+ t.Errorf("255 %s 1 = %d, want 254", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("255 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("255 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+}
+func TestConstFolduint8uint64rsh(t *testing.T) {
+ var x, r uint8
+ var y uint64
+ x = 0
+ y = 0
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 4294967296 = %d, want 0", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 18446744073709551615 = %d, want 0", ">>", r)
+ }
+ x = 1
+ y = 0
+ r = x >> y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 4294967296 = %d, want 0", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 18446744073709551615 = %d, want 0", ">>", r)
+ }
+ x = 255
+ y = 0
+ r = x >> y
+ if r != 255 {
+ t.Errorf("255 %s 0 = %d, want 255", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 127 {
+ t.Errorf("255 %s 1 = %d, want 127", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != 0 {
+ t.Errorf("255 %s 4294967296 = %d, want 0", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != 0 {
+ t.Errorf("255 %s 18446744073709551615 = %d, want 0", ">>", r)
+ }
+}
+func TestConstFolduint8uint32lsh(t *testing.T) {
+ var x, r uint8
+ var y uint32
+ x = 0
+ y = 0
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 4294967295 = %d, want 0", "<<", r)
+ }
+ x = 1
+ y = 0
+ r = x << y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 4294967295 = %d, want 0", "<<", r)
+ }
+ x = 255
+ y = 0
+ r = x << y
+ if r != 255 {
+ t.Errorf("255 %s 0 = %d, want 255", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 254 {
+ t.Errorf("255 %s 1 = %d, want 254", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("255 %s 4294967295 = %d, want 0", "<<", r)
+ }
+}
+func TestConstFolduint8uint32rsh(t *testing.T) {
+ var x, r uint8
+ var y uint32
+ x = 0
+ y = 0
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 4294967295 = %d, want 0", ">>", r)
+ }
+ x = 1
+ y = 0
+ r = x >> y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 4294967295 = %d, want 0", ">>", r)
+ }
+ x = 255
+ y = 0
+ r = x >> y
+ if r != 255 {
+ t.Errorf("255 %s 0 = %d, want 255", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 127 {
+ t.Errorf("255 %s 1 = %d, want 127", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != 0 {
+ t.Errorf("255 %s 4294967295 = %d, want 0", ">>", r)
+ }
+}
+func TestConstFolduint8uint16lsh(t *testing.T) {
+ var x, r uint8
+ var y uint16
+ x = 0
+ y = 0
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 65535 = %d, want 0", "<<", r)
+ }
+ x = 1
+ y = 0
+ r = x << y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 65535 = %d, want 0", "<<", r)
+ }
+ x = 255
+ y = 0
+ r = x << y
+ if r != 255 {
+ t.Errorf("255 %s 0 = %d, want 255", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 254 {
+ t.Errorf("255 %s 1 = %d, want 254", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("255 %s 65535 = %d, want 0", "<<", r)
+ }
+}
+func TestConstFolduint8uint16rsh(t *testing.T) {
+ var x, r uint8
+ var y uint16
+ x = 0
+ y = 0
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 65535 = %d, want 0", ">>", r)
+ }
+ x = 1
+ y = 0
+ r = x >> y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 65535 = %d, want 0", ">>", r)
+ }
+ x = 255
+ y = 0
+ r = x >> y
+ if r != 255 {
+ t.Errorf("255 %s 0 = %d, want 255", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 127 {
+ t.Errorf("255 %s 1 = %d, want 127", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != 0 {
+ t.Errorf("255 %s 65535 = %d, want 0", ">>", r)
+ }
+}
+func TestConstFolduint8uint8lsh(t *testing.T) {
+ var x, r uint8
+ var y uint8
+ x = 0
+ y = 0
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 255 = %d, want 0", "<<", r)
+ }
+ x = 1
+ y = 0
+ r = x << y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 255 = %d, want 0", "<<", r)
+ }
+ x = 255
+ y = 0
+ r = x << y
+ if r != 255 {
+ t.Errorf("255 %s 0 = %d, want 255", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 254 {
+ t.Errorf("255 %s 1 = %d, want 254", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("255 %s 255 = %d, want 0", "<<", r)
+ }
+}
+func TestConstFolduint8uint8rsh(t *testing.T) {
+ var x, r uint8
+ var y uint8
+ x = 0
+ y = 0
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 255 = %d, want 0", ">>", r)
+ }
+ x = 1
+ y = 0
+ r = x >> y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 255 = %d, want 0", ">>", r)
+ }
+ x = 255
+ y = 0
+ r = x >> y
+ if r != 255 {
+ t.Errorf("255 %s 0 = %d, want 255", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 127 {
+ t.Errorf("255 %s 1 = %d, want 127", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != 0 {
+ t.Errorf("255 %s 255 = %d, want 0", ">>", r)
+ }
+}
+func TestConstFoldint8uint64lsh(t *testing.T) {
+ var x, r int8
+ var y uint64
+ x = -128
+ y = 0
+ r = x << y
+ if r != -128 {
+ t.Errorf("-128 %s 0 = %d, want -128", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("-128 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("-128 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("-128 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+ x = -127
+ y = 0
+ r = x << y
+ if r != -127 {
+ t.Errorf("-127 %s 0 = %d, want -127", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("-127 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("-127 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("-127 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+ x = -1
+ y = 0
+ r = x << y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -2 {
+ t.Errorf("-1 %s 1 = %d, want -2", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("-1 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("-1 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+ x = 0
+ y = 0
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+ x = 1
+ y = 0
+ r = x << y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+ x = 126
+ y = 0
+ r = x << y
+ if r != 126 {
+ t.Errorf("126 %s 0 = %d, want 126", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -4 {
+ t.Errorf("126 %s 1 = %d, want -4", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("126 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("126 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+ x = 127
+ y = 0
+ r = x << y
+ if r != 127 {
+ t.Errorf("127 %s 0 = %d, want 127", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -2 {
+ t.Errorf("127 %s 1 = %d, want -2", "<<", r)
+ }
+ y = 4294967296
+ r = x << y
+ if r != 0 {
+ t.Errorf("127 %s 4294967296 = %d, want 0", "<<", r)
+ }
+ y = 18446744073709551615
+ r = x << y
+ if r != 0 {
+ t.Errorf("127 %s 18446744073709551615 = %d, want 0", "<<", r)
+ }
+}
+func TestConstFoldint8uint64rsh(t *testing.T) {
+ var x, r int8
+ var y uint64
+ x = -128
+ y = 0
+ r = x >> y
+ if r != -128 {
+ t.Errorf("-128 %s 0 = %d, want -128", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -64 {
+ t.Errorf("-128 %s 1 = %d, want -64", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-128 %s 4294967296 = %d, want -1", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-128 %s 18446744073709551615 = %d, want -1", ">>", r)
+ }
+ x = -127
+ y = 0
+ r = x >> y
+ if r != -127 {
+ t.Errorf("-127 %s 0 = %d, want -127", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -64 {
+ t.Errorf("-127 %s 1 = %d, want -64", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-127 %s 4294967296 = %d, want -1", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-127 %s 18446744073709551615 = %d, want -1", ">>", r)
+ }
+ x = -1
+ y = 0
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 1 = %d, want -1", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 4294967296 = %d, want -1", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 18446744073709551615 = %d, want -1", ">>", r)
+ }
+ x = 0
+ y = 0
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 4294967296 = %d, want 0", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 18446744073709551615 = %d, want 0", ">>", r)
+ }
+ x = 1
+ y = 0
+ r = x >> y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 4294967296 = %d, want 0", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 18446744073709551615 = %d, want 0", ">>", r)
+ }
+ x = 126
+ y = 0
+ r = x >> y
+ if r != 126 {
+ t.Errorf("126 %s 0 = %d, want 126", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 63 {
+ t.Errorf("126 %s 1 = %d, want 63", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != 0 {
+ t.Errorf("126 %s 4294967296 = %d, want 0", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != 0 {
+ t.Errorf("126 %s 18446744073709551615 = %d, want 0", ">>", r)
+ }
+ x = 127
+ y = 0
+ r = x >> y
+ if r != 127 {
+ t.Errorf("127 %s 0 = %d, want 127", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 63 {
+ t.Errorf("127 %s 1 = %d, want 63", ">>", r)
+ }
+ y = 4294967296
+ r = x >> y
+ if r != 0 {
+ t.Errorf("127 %s 4294967296 = %d, want 0", ">>", r)
+ }
+ y = 18446744073709551615
+ r = x >> y
+ if r != 0 {
+ t.Errorf("127 %s 18446744073709551615 = %d, want 0", ">>", r)
+ }
+}
+func TestConstFoldint8uint32lsh(t *testing.T) {
+ var x, r int8
+ var y uint32
+ x = -128
+ y = 0
+ r = x << y
+ if r != -128 {
+ t.Errorf("-128 %s 0 = %d, want -128", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("-128 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("-128 %s 4294967295 = %d, want 0", "<<", r)
+ }
+ x = -127
+ y = 0
+ r = x << y
+ if r != -127 {
+ t.Errorf("-127 %s 0 = %d, want -127", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("-127 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("-127 %s 4294967295 = %d, want 0", "<<", r)
+ }
+ x = -1
+ y = 0
+ r = x << y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -2 {
+ t.Errorf("-1 %s 1 = %d, want -2", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("-1 %s 4294967295 = %d, want 0", "<<", r)
+ }
+ x = 0
+ y = 0
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 4294967295 = %d, want 0", "<<", r)
+ }
+ x = 1
+ y = 0
+ r = x << y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 4294967295 = %d, want 0", "<<", r)
+ }
+ x = 126
+ y = 0
+ r = x << y
+ if r != 126 {
+ t.Errorf("126 %s 0 = %d, want 126", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -4 {
+ t.Errorf("126 %s 1 = %d, want -4", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("126 %s 4294967295 = %d, want 0", "<<", r)
+ }
+ x = 127
+ y = 0
+ r = x << y
+ if r != 127 {
+ t.Errorf("127 %s 0 = %d, want 127", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -2 {
+ t.Errorf("127 %s 1 = %d, want -2", "<<", r)
+ }
+ y = 4294967295
+ r = x << y
+ if r != 0 {
+ t.Errorf("127 %s 4294967295 = %d, want 0", "<<", r)
+ }
+}
+func TestConstFoldint8uint32rsh(t *testing.T) {
+ var x, r int8
+ var y uint32
+ x = -128
+ y = 0
+ r = x >> y
+ if r != -128 {
+ t.Errorf("-128 %s 0 = %d, want -128", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -64 {
+ t.Errorf("-128 %s 1 = %d, want -64", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-128 %s 4294967295 = %d, want -1", ">>", r)
+ }
+ x = -127
+ y = 0
+ r = x >> y
+ if r != -127 {
+ t.Errorf("-127 %s 0 = %d, want -127", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -64 {
+ t.Errorf("-127 %s 1 = %d, want -64", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-127 %s 4294967295 = %d, want -1", ">>", r)
+ }
+ x = -1
+ y = 0
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 1 = %d, want -1", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 4294967295 = %d, want -1", ">>", r)
+ }
+ x = 0
+ y = 0
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 4294967295 = %d, want 0", ">>", r)
+ }
+ x = 1
+ y = 0
+ r = x >> y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 4294967295 = %d, want 0", ">>", r)
+ }
+ x = 126
+ y = 0
+ r = x >> y
+ if r != 126 {
+ t.Errorf("126 %s 0 = %d, want 126", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 63 {
+ t.Errorf("126 %s 1 = %d, want 63", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != 0 {
+ t.Errorf("126 %s 4294967295 = %d, want 0", ">>", r)
+ }
+ x = 127
+ y = 0
+ r = x >> y
+ if r != 127 {
+ t.Errorf("127 %s 0 = %d, want 127", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 63 {
+ t.Errorf("127 %s 1 = %d, want 63", ">>", r)
+ }
+ y = 4294967295
+ r = x >> y
+ if r != 0 {
+ t.Errorf("127 %s 4294967295 = %d, want 0", ">>", r)
+ }
+}
+func TestConstFoldint8uint16lsh(t *testing.T) {
+ var x, r int8
+ var y uint16
+ x = -128
+ y = 0
+ r = x << y
+ if r != -128 {
+ t.Errorf("-128 %s 0 = %d, want -128", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("-128 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("-128 %s 65535 = %d, want 0", "<<", r)
+ }
+ x = -127
+ y = 0
+ r = x << y
+ if r != -127 {
+ t.Errorf("-127 %s 0 = %d, want -127", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("-127 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("-127 %s 65535 = %d, want 0", "<<", r)
+ }
+ x = -1
+ y = 0
+ r = x << y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -2 {
+ t.Errorf("-1 %s 1 = %d, want -2", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("-1 %s 65535 = %d, want 0", "<<", r)
+ }
+ x = 0
+ y = 0
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 65535 = %d, want 0", "<<", r)
+ }
+ x = 1
+ y = 0
+ r = x << y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 65535 = %d, want 0", "<<", r)
+ }
+ x = 126
+ y = 0
+ r = x << y
+ if r != 126 {
+ t.Errorf("126 %s 0 = %d, want 126", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -4 {
+ t.Errorf("126 %s 1 = %d, want -4", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("126 %s 65535 = %d, want 0", "<<", r)
+ }
+ x = 127
+ y = 0
+ r = x << y
+ if r != 127 {
+ t.Errorf("127 %s 0 = %d, want 127", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -2 {
+ t.Errorf("127 %s 1 = %d, want -2", "<<", r)
+ }
+ y = 65535
+ r = x << y
+ if r != 0 {
+ t.Errorf("127 %s 65535 = %d, want 0", "<<", r)
+ }
+}
+func TestConstFoldint8uint16rsh(t *testing.T) {
+ var x, r int8
+ var y uint16
+ x = -128
+ y = 0
+ r = x >> y
+ if r != -128 {
+ t.Errorf("-128 %s 0 = %d, want -128", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -64 {
+ t.Errorf("-128 %s 1 = %d, want -64", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-128 %s 65535 = %d, want -1", ">>", r)
+ }
+ x = -127
+ y = 0
+ r = x >> y
+ if r != -127 {
+ t.Errorf("-127 %s 0 = %d, want -127", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -64 {
+ t.Errorf("-127 %s 1 = %d, want -64", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-127 %s 65535 = %d, want -1", ">>", r)
+ }
+ x = -1
+ y = 0
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 1 = %d, want -1", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 65535 = %d, want -1", ">>", r)
+ }
+ x = 0
+ y = 0
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 65535 = %d, want 0", ">>", r)
+ }
+ x = 1
+ y = 0
+ r = x >> y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 65535 = %d, want 0", ">>", r)
+ }
+ x = 126
+ y = 0
+ r = x >> y
+ if r != 126 {
+ t.Errorf("126 %s 0 = %d, want 126", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 63 {
+ t.Errorf("126 %s 1 = %d, want 63", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != 0 {
+ t.Errorf("126 %s 65535 = %d, want 0", ">>", r)
+ }
+ x = 127
+ y = 0
+ r = x >> y
+ if r != 127 {
+ t.Errorf("127 %s 0 = %d, want 127", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 63 {
+ t.Errorf("127 %s 1 = %d, want 63", ">>", r)
+ }
+ y = 65535
+ r = x >> y
+ if r != 0 {
+ t.Errorf("127 %s 65535 = %d, want 0", ">>", r)
+ }
+}
+func TestConstFoldint8uint8lsh(t *testing.T) {
+ var x, r int8
+ var y uint8
+ x = -128
+ y = 0
+ r = x << y
+ if r != -128 {
+ t.Errorf("-128 %s 0 = %d, want -128", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("-128 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("-128 %s 255 = %d, want 0", "<<", r)
+ }
+ x = -127
+ y = 0
+ r = x << y
+ if r != -127 {
+ t.Errorf("-127 %s 0 = %d, want -127", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("-127 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("-127 %s 255 = %d, want 0", "<<", r)
+ }
+ x = -1
+ y = 0
+ r = x << y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -2 {
+ t.Errorf("-1 %s 1 = %d, want -2", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("-1 %s 255 = %d, want 0", "<<", r)
+ }
+ x = 0
+ y = 0
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("0 %s 255 = %d, want 0", "<<", r)
+ }
+ x = 1
+ y = 0
+ r = x << y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != 2 {
+ t.Errorf("1 %s 1 = %d, want 2", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("1 %s 255 = %d, want 0", "<<", r)
+ }
+ x = 126
+ y = 0
+ r = x << y
+ if r != 126 {
+ t.Errorf("126 %s 0 = %d, want 126", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -4 {
+ t.Errorf("126 %s 1 = %d, want -4", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("126 %s 255 = %d, want 0", "<<", r)
+ }
+ x = 127
+ y = 0
+ r = x << y
+ if r != 127 {
+ t.Errorf("127 %s 0 = %d, want 127", "<<", r)
+ }
+ y = 1
+ r = x << y
+ if r != -2 {
+ t.Errorf("127 %s 1 = %d, want -2", "<<", r)
+ }
+ y = 255
+ r = x << y
+ if r != 0 {
+ t.Errorf("127 %s 255 = %d, want 0", "<<", r)
+ }
+}
+func TestConstFoldint8uint8rsh(t *testing.T) {
+ var x, r int8
+ var y uint8
+ x = -128
+ y = 0
+ r = x >> y
+ if r != -128 {
+ t.Errorf("-128 %s 0 = %d, want -128", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -64 {
+ t.Errorf("-128 %s 1 = %d, want -64", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-128 %s 255 = %d, want -1", ">>", r)
+ }
+ x = -127
+ y = 0
+ r = x >> y
+ if r != -127 {
+ t.Errorf("-127 %s 0 = %d, want -127", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -64 {
+ t.Errorf("-127 %s 1 = %d, want -64", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-127 %s 255 = %d, want -1", ">>", r)
+ }
+ x = -1
+ y = 0
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 0 = %d, want -1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 1 = %d, want -1", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != -1 {
+ t.Errorf("-1 %s 255 = %d, want -1", ">>", r)
+ }
+ x = 0
+ y = 0
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 0 = %d, want 0", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != 0 {
+ t.Errorf("0 %s 255 = %d, want 0", ">>", r)
+ }
+ x = 1
+ y = 0
+ r = x >> y
+ if r != 1 {
+ t.Errorf("1 %s 0 = %d, want 1", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 1 = %d, want 0", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != 0 {
+ t.Errorf("1 %s 255 = %d, want 0", ">>", r)
+ }
+ x = 126
+ y = 0
+ r = x >> y
+ if r != 126 {
+ t.Errorf("126 %s 0 = %d, want 126", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 63 {
+ t.Errorf("126 %s 1 = %d, want 63", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != 0 {
+ t.Errorf("126 %s 255 = %d, want 0", ">>", r)
+ }
+ x = 127
+ y = 0
+ r = x >> y
+ if r != 127 {
+ t.Errorf("127 %s 0 = %d, want 127", ">>", r)
+ }
+ y = 1
+ r = x >> y
+ if r != 63 {
+ t.Errorf("127 %s 1 = %d, want 63", ">>", r)
+ }
+ y = 255
+ r = x >> y
+ if r != 0 {
+ t.Errorf("127 %s 255 = %d, want 0", ">>", r)
+ }
+}
+func TestConstFoldCompareuint64(t *testing.T) {
+ {
+ var x uint64 = 0
+ var y uint64 = 0
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x uint64 = 0
+ var y uint64 = 1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x uint64 = 0
+ var y uint64 = 4294967296
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x uint64 = 0
+ var y uint64 = 18446744073709551615
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x uint64 = 1
+ var y uint64 = 0
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x uint64 = 1
+ var y uint64 = 1
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x uint64 = 1
+ var y uint64 = 4294967296
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x uint64 = 1
+ var y uint64 = 18446744073709551615
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x uint64 = 4294967296
+ var y uint64 = 0
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x uint64 = 4294967296
+ var y uint64 = 1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x uint64 = 4294967296
+ var y uint64 = 4294967296
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x uint64 = 4294967296
+ var y uint64 = 18446744073709551615
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x uint64 = 18446744073709551615
+ var y uint64 = 0
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x uint64 = 18446744073709551615
+ var y uint64 = 1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x uint64 = 18446744073709551615
+ var y uint64 = 4294967296
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x uint64 = 18446744073709551615
+ var y uint64 = 18446744073709551615
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+}
+func TestConstFoldCompareint64(t *testing.T) {
+ {
+ var x int64 = -9223372036854775808
+ var y int64 = -9223372036854775808
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = -9223372036854775808
+ var y int64 = -9223372036854775807
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = -9223372036854775808
+ var y int64 = -4294967296
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = -9223372036854775808
+ var y int64 = -1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = -9223372036854775808
+ var y int64 = 0
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = -9223372036854775808
+ var y int64 = 1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = -9223372036854775808
+ var y int64 = 4294967296
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = -9223372036854775808
+ var y int64 = 9223372036854775806
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = -9223372036854775808
+ var y int64 = 9223372036854775807
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = -9223372036854775807
+ var y int64 = -9223372036854775808
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = -9223372036854775807
+ var y int64 = -9223372036854775807
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = -9223372036854775807
+ var y int64 = -4294967296
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = -9223372036854775807
+ var y int64 = -1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = -9223372036854775807
+ var y int64 = 0
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = -9223372036854775807
+ var y int64 = 1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = -9223372036854775807
+ var y int64 = 4294967296
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = -9223372036854775807
+ var y int64 = 9223372036854775806
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = -9223372036854775807
+ var y int64 = 9223372036854775807
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = -4294967296
+ var y int64 = -9223372036854775808
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = -4294967296
+ var y int64 = -9223372036854775807
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = -4294967296
+ var y int64 = -4294967296
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = -4294967296
+ var y int64 = -1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = -4294967296
+ var y int64 = 0
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = -4294967296
+ var y int64 = 1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = -4294967296
+ var y int64 = 4294967296
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = -4294967296
+ var y int64 = 9223372036854775806
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = -4294967296
+ var y int64 = 9223372036854775807
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = -1
+ var y int64 = -9223372036854775808
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = -1
+ var y int64 = -9223372036854775807
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = -1
+ var y int64 = -4294967296
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = -1
+ var y int64 = -1
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = -1
+ var y int64 = 0
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = -1
+ var y int64 = 1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = -1
+ var y int64 = 4294967296
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = -1
+ var y int64 = 9223372036854775806
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = -1
+ var y int64 = 9223372036854775807
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = 0
+ var y int64 = -9223372036854775808
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = 0
+ var y int64 = -9223372036854775807
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = 0
+ var y int64 = -4294967296
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = 0
+ var y int64 = -1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = 0
+ var y int64 = 0
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = 0
+ var y int64 = 1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = 0
+ var y int64 = 4294967296
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = 0
+ var y int64 = 9223372036854775806
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = 0
+ var y int64 = 9223372036854775807
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = 1
+ var y int64 = -9223372036854775808
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = 1
+ var y int64 = -9223372036854775807
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = 1
+ var y int64 = -4294967296
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = 1
+ var y int64 = -1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = 1
+ var y int64 = 0
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = 1
+ var y int64 = 1
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = 1
+ var y int64 = 4294967296
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = 1
+ var y int64 = 9223372036854775806
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = 1
+ var y int64 = 9223372036854775807
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = 4294967296
+ var y int64 = -9223372036854775808
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = 4294967296
+ var y int64 = -9223372036854775807
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = 4294967296
+ var y int64 = -4294967296
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = 4294967296
+ var y int64 = -1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = 4294967296
+ var y int64 = 0
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = 4294967296
+ var y int64 = 1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = 4294967296
+ var y int64 = 4294967296
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = 4294967296
+ var y int64 = 9223372036854775806
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = 4294967296
+ var y int64 = 9223372036854775807
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = 9223372036854775806
+ var y int64 = -9223372036854775808
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = 9223372036854775806
+ var y int64 = -9223372036854775807
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = 9223372036854775806
+ var y int64 = -4294967296
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = 9223372036854775806
+ var y int64 = -1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = 9223372036854775806
+ var y int64 = 0
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = 9223372036854775806
+ var y int64 = 1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = 9223372036854775806
+ var y int64 = 4294967296
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = 9223372036854775806
+ var y int64 = 9223372036854775806
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = 9223372036854775806
+ var y int64 = 9223372036854775807
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int64 = 9223372036854775807
+ var y int64 = -9223372036854775808
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = 9223372036854775807
+ var y int64 = -9223372036854775807
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = 9223372036854775807
+ var y int64 = -4294967296
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = 9223372036854775807
+ var y int64 = -1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = 9223372036854775807
+ var y int64 = 0
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = 9223372036854775807
+ var y int64 = 1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = 9223372036854775807
+ var y int64 = 4294967296
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = 9223372036854775807
+ var y int64 = 9223372036854775806
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int64 = 9223372036854775807
+ var y int64 = 9223372036854775807
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+}
+func TestConstFoldCompareuint32(t *testing.T) {
+ {
+ var x uint32 = 0
+ var y uint32 = 0
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x uint32 = 0
+ var y uint32 = 1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x uint32 = 0
+ var y uint32 = 4294967295
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x uint32 = 1
+ var y uint32 = 0
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x uint32 = 1
+ var y uint32 = 1
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x uint32 = 1
+ var y uint32 = 4294967295
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x uint32 = 4294967295
+ var y uint32 = 0
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x uint32 = 4294967295
+ var y uint32 = 1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x uint32 = 4294967295
+ var y uint32 = 4294967295
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+}
+func TestConstFoldCompareint32(t *testing.T) {
+ {
+ var x int32 = -2147483648
+ var y int32 = -2147483648
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int32 = -2147483648
+ var y int32 = -2147483647
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int32 = -2147483648
+ var y int32 = -1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int32 = -2147483648
+ var y int32 = 0
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int32 = -2147483648
+ var y int32 = 1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int32 = -2147483648
+ var y int32 = 2147483647
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int32 = -2147483647
+ var y int32 = -2147483648
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int32 = -2147483647
+ var y int32 = -2147483647
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int32 = -2147483647
+ var y int32 = -1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int32 = -2147483647
+ var y int32 = 0
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int32 = -2147483647
+ var y int32 = 1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int32 = -2147483647
+ var y int32 = 2147483647
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int32 = -1
+ var y int32 = -2147483648
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int32 = -1
+ var y int32 = -2147483647
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int32 = -1
+ var y int32 = -1
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int32 = -1
+ var y int32 = 0
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int32 = -1
+ var y int32 = 1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int32 = -1
+ var y int32 = 2147483647
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int32 = 0
+ var y int32 = -2147483648
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int32 = 0
+ var y int32 = -2147483647
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int32 = 0
+ var y int32 = -1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int32 = 0
+ var y int32 = 0
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int32 = 0
+ var y int32 = 1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int32 = 0
+ var y int32 = 2147483647
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int32 = 1
+ var y int32 = -2147483648
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int32 = 1
+ var y int32 = -2147483647
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int32 = 1
+ var y int32 = -1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int32 = 1
+ var y int32 = 0
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int32 = 1
+ var y int32 = 1
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int32 = 1
+ var y int32 = 2147483647
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int32 = 2147483647
+ var y int32 = -2147483648
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int32 = 2147483647
+ var y int32 = -2147483647
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int32 = 2147483647
+ var y int32 = -1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int32 = 2147483647
+ var y int32 = 0
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int32 = 2147483647
+ var y int32 = 1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int32 = 2147483647
+ var y int32 = 2147483647
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+}
+func TestConstFoldCompareuint16(t *testing.T) {
+ {
+ var x uint16 = 0
+ var y uint16 = 0
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x uint16 = 0
+ var y uint16 = 1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x uint16 = 0
+ var y uint16 = 65535
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x uint16 = 1
+ var y uint16 = 0
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x uint16 = 1
+ var y uint16 = 1
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x uint16 = 1
+ var y uint16 = 65535
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x uint16 = 65535
+ var y uint16 = 0
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x uint16 = 65535
+ var y uint16 = 1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x uint16 = 65535
+ var y uint16 = 65535
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+}
+func TestConstFoldCompareint16(t *testing.T) {
+ {
+ var x int16 = -32768
+ var y int16 = -32768
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int16 = -32768
+ var y int16 = -32767
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int16 = -32768
+ var y int16 = -1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int16 = -32768
+ var y int16 = 0
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int16 = -32768
+ var y int16 = 1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int16 = -32768
+ var y int16 = 32766
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int16 = -32768
+ var y int16 = 32767
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int16 = -32767
+ var y int16 = -32768
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int16 = -32767
+ var y int16 = -32767
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int16 = -32767
+ var y int16 = -1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int16 = -32767
+ var y int16 = 0
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int16 = -32767
+ var y int16 = 1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int16 = -32767
+ var y int16 = 32766
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int16 = -32767
+ var y int16 = 32767
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int16 = -1
+ var y int16 = -32768
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int16 = -1
+ var y int16 = -32767
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int16 = -1
+ var y int16 = -1
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int16 = -1
+ var y int16 = 0
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int16 = -1
+ var y int16 = 1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int16 = -1
+ var y int16 = 32766
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int16 = -1
+ var y int16 = 32767
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int16 = 0
+ var y int16 = -32768
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int16 = 0
+ var y int16 = -32767
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int16 = 0
+ var y int16 = -1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int16 = 0
+ var y int16 = 0
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int16 = 0
+ var y int16 = 1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int16 = 0
+ var y int16 = 32766
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int16 = 0
+ var y int16 = 32767
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int16 = 1
+ var y int16 = -32768
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int16 = 1
+ var y int16 = -32767
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int16 = 1
+ var y int16 = -1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int16 = 1
+ var y int16 = 0
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int16 = 1
+ var y int16 = 1
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int16 = 1
+ var y int16 = 32766
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int16 = 1
+ var y int16 = 32767
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int16 = 32766
+ var y int16 = -32768
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int16 = 32766
+ var y int16 = -32767
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int16 = 32766
+ var y int16 = -1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int16 = 32766
+ var y int16 = 0
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int16 = 32766
+ var y int16 = 1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int16 = 32766
+ var y int16 = 32766
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int16 = 32766
+ var y int16 = 32767
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int16 = 32767
+ var y int16 = -32768
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int16 = 32767
+ var y int16 = -32767
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int16 = 32767
+ var y int16 = -1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int16 = 32767
+ var y int16 = 0
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int16 = 32767
+ var y int16 = 1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int16 = 32767
+ var y int16 = 32766
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int16 = 32767
+ var y int16 = 32767
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+}
+func TestConstFoldCompareuint8(t *testing.T) {
+ {
+ var x uint8 = 0
+ var y uint8 = 0
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x uint8 = 0
+ var y uint8 = 1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x uint8 = 0
+ var y uint8 = 255
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x uint8 = 1
+ var y uint8 = 0
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x uint8 = 1
+ var y uint8 = 1
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x uint8 = 1
+ var y uint8 = 255
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x uint8 = 255
+ var y uint8 = 0
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x uint8 = 255
+ var y uint8 = 1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x uint8 = 255
+ var y uint8 = 255
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+}
+func TestConstFoldCompareint8(t *testing.T) {
+ {
+ var x int8 = -128
+ var y int8 = -128
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int8 = -128
+ var y int8 = -127
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int8 = -128
+ var y int8 = -1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int8 = -128
+ var y int8 = 0
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int8 = -128
+ var y int8 = 1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int8 = -128
+ var y int8 = 126
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int8 = -128
+ var y int8 = 127
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int8 = -127
+ var y int8 = -128
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int8 = -127
+ var y int8 = -127
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int8 = -127
+ var y int8 = -1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int8 = -127
+ var y int8 = 0
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int8 = -127
+ var y int8 = 1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int8 = -127
+ var y int8 = 126
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int8 = -127
+ var y int8 = 127
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int8 = -1
+ var y int8 = -128
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int8 = -1
+ var y int8 = -127
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int8 = -1
+ var y int8 = -1
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int8 = -1
+ var y int8 = 0
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int8 = -1
+ var y int8 = 1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int8 = -1
+ var y int8 = 126
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int8 = -1
+ var y int8 = 127
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int8 = 0
+ var y int8 = -128
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int8 = 0
+ var y int8 = -127
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int8 = 0
+ var y int8 = -1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int8 = 0
+ var y int8 = 0
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int8 = 0
+ var y int8 = 1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int8 = 0
+ var y int8 = 126
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int8 = 0
+ var y int8 = 127
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int8 = 1
+ var y int8 = -128
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int8 = 1
+ var y int8 = -127
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int8 = 1
+ var y int8 = -1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int8 = 1
+ var y int8 = 0
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int8 = 1
+ var y int8 = 1
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int8 = 1
+ var y int8 = 126
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int8 = 1
+ var y int8 = 127
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int8 = 126
+ var y int8 = -128
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int8 = 126
+ var y int8 = -127
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int8 = 126
+ var y int8 = -1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int8 = 126
+ var y int8 = 0
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int8 = 126
+ var y int8 = 1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int8 = 126
+ var y int8 = 126
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int8 = 126
+ var y int8 = 127
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if !(x < y) {
+ t.Errorf("!(%d < %d)", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if x >= y {
+ t.Errorf("%d >= %d", x, y)
+ }
+ }
+ {
+ var x int8 = 127
+ var y int8 = -128
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int8 = 127
+ var y int8 = -127
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int8 = 127
+ var y int8 = -1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int8 = 127
+ var y int8 = 0
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int8 = 127
+ var y int8 = 1
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int8 = 127
+ var y int8 = 126
+ if x == y {
+ t.Errorf("%d == %d", x, y)
+ }
+ if !(x != y) {
+ t.Errorf("!(%d != %d)", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if !(x > y) {
+ t.Errorf("!(%d > %d)", x, y)
+ }
+ if x <= y {
+ t.Errorf("%d <= %d", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+ {
+ var x int8 = 127
+ var y int8 = 127
+ if !(x == y) {
+ t.Errorf("!(%d == %d)", x, y)
+ }
+ if x != y {
+ t.Errorf("%d != %d", x, y)
+ }
+ if x < y {
+ t.Errorf("%d < %d", x, y)
+ }
+ if x > y {
+ t.Errorf("%d > %d", x, y)
+ }
+ if !(x <= y) {
+ t.Errorf("!(%d <= %d)", x, y)
+ }
+ if !(x >= y) {
+ t.Errorf("!(%d >= %d)", x, y)
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/test/dep_test.go b/src/cmd/compile/internal/test/dep_test.go
new file mode 100644
index 0000000..698a848
--- /dev/null
+++ b/src/cmd/compile/internal/test/dep_test.go
@@ -0,0 +1,30 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package test
+
+import (
+ "internal/testenv"
+ "os/exec"
+ "strings"
+ "testing"
+)
+
+func TestDeps(t *testing.T) {
+ out, err := exec.Command(testenv.GoToolPath(t), "list", "-f", "{{.Deps}}", "cmd/compile/internal/gc").Output()
+ if err != nil {
+ t.Fatal(err)
+ }
+ for _, dep := range strings.Fields(strings.Trim(string(out), "[]")) {
+ switch dep {
+ case "go/build", "go/scanner":
+ // cmd/compile/internal/importer introduces a dependency
+ // on go/build and go/token; cmd/compile/internal/ uses
+ // go/constant which uses go/token in its API. Once we
+ // got rid of those dependencies, enable this check again.
+ // TODO(gri) fix this
+ // t.Errorf("undesired dependency on %q", dep)
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/test/divconst_test.go b/src/cmd/compile/internal/test/divconst_test.go
new file mode 100644
index 0000000..9358a60
--- /dev/null
+++ b/src/cmd/compile/internal/test/divconst_test.go
@@ -0,0 +1,325 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package test
+
+import (
+ "testing"
+)
+
+var boolres bool
+
+var i64res int64
+
+func BenchmarkDivconstI64(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ i64res = int64(i) / 7
+ }
+}
+
+func BenchmarkModconstI64(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ i64res = int64(i) % 7
+ }
+}
+
+func BenchmarkDivisiblePow2constI64(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ boolres = int64(i)%16 == 0
+ }
+}
+func BenchmarkDivisibleconstI64(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ boolres = int64(i)%7 == 0
+ }
+}
+
+func BenchmarkDivisibleWDivconstI64(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ i64res = int64(i) / 7
+ boolres = int64(i)%7 == 0
+ }
+}
+
+var u64res uint64
+
+func TestDivmodConstU64(t *testing.T) {
+ // Test division by c. Function f must be func(n) { return n/c, n%c }
+ testdiv := func(c uint64, f func(uint64) (uint64, uint64)) func(*testing.T) {
+ return func(t *testing.T) {
+ x := uint64(12345)
+ for i := 0; i < 10000; i++ {
+ x += x << 2
+ q, r := f(x)
+ if r < 0 || r >= c || q*c+r != x {
+ t.Errorf("divmod(%d, %d) returned incorrect (%d, %d)", x, c, q, r)
+ }
+ }
+ max := uint64(1<<64-1) / c * c
+ xs := []uint64{0, 1, c - 1, c, c + 1, 2*c - 1, 2 * c, 2*c + 1,
+ c*c - 1, c * c, c*c + 1, max - 1, max, max + 1, 1<<64 - 1}
+ for _, x := range xs {
+ q, r := f(x)
+ if r < 0 || r >= c || q*c+r != x {
+ t.Errorf("divmod(%d, %d) returned incorrect (%d, %d)", x, c, q, r)
+ }
+ }
+ }
+ }
+ t.Run("2", testdiv(2, func(n uint64) (uint64, uint64) { return n / 2, n % 2 }))
+ t.Run("3", testdiv(3, func(n uint64) (uint64, uint64) { return n / 3, n % 3 }))
+ t.Run("4", testdiv(4, func(n uint64) (uint64, uint64) { return n / 4, n % 4 }))
+ t.Run("5", testdiv(5, func(n uint64) (uint64, uint64) { return n / 5, n % 5 }))
+ t.Run("6", testdiv(6, func(n uint64) (uint64, uint64) { return n / 6, n % 6 }))
+ t.Run("7", testdiv(7, func(n uint64) (uint64, uint64) { return n / 7, n % 7 }))
+ t.Run("8", testdiv(8, func(n uint64) (uint64, uint64) { return n / 8, n % 8 }))
+ t.Run("9", testdiv(9, func(n uint64) (uint64, uint64) { return n / 9, n % 9 }))
+ t.Run("10", testdiv(10, func(n uint64) (uint64, uint64) { return n / 10, n % 10 }))
+ t.Run("11", testdiv(11, func(n uint64) (uint64, uint64) { return n / 11, n % 11 }))
+ t.Run("12", testdiv(12, func(n uint64) (uint64, uint64) { return n / 12, n % 12 }))
+ t.Run("13", testdiv(13, func(n uint64) (uint64, uint64) { return n / 13, n % 13 }))
+ t.Run("14", testdiv(14, func(n uint64) (uint64, uint64) { return n / 14, n % 14 }))
+ t.Run("15", testdiv(15, func(n uint64) (uint64, uint64) { return n / 15, n % 15 }))
+ t.Run("16", testdiv(16, func(n uint64) (uint64, uint64) { return n / 16, n % 16 }))
+ t.Run("17", testdiv(17, func(n uint64) (uint64, uint64) { return n / 17, n % 17 }))
+ t.Run("255", testdiv(255, func(n uint64) (uint64, uint64) { return n / 255, n % 255 }))
+ t.Run("256", testdiv(256, func(n uint64) (uint64, uint64) { return n / 256, n % 256 }))
+ t.Run("257", testdiv(257, func(n uint64) (uint64, uint64) { return n / 257, n % 257 }))
+ t.Run("65535", testdiv(65535, func(n uint64) (uint64, uint64) { return n / 65535, n % 65535 }))
+ t.Run("65536", testdiv(65536, func(n uint64) (uint64, uint64) { return n / 65536, n % 65536 }))
+ t.Run("65537", testdiv(65537, func(n uint64) (uint64, uint64) { return n / 65537, n % 65537 }))
+ t.Run("1<<32-1", testdiv(1<<32-1, func(n uint64) (uint64, uint64) { return n / (1<<32 - 1), n % (1<<32 - 1) }))
+ t.Run("1<<32+1", testdiv(1<<32+1, func(n uint64) (uint64, uint64) { return n / (1<<32 + 1), n % (1<<32 + 1) }))
+ t.Run("1<<64-1", testdiv(1<<64-1, func(n uint64) (uint64, uint64) { return n / (1<<64 - 1), n % (1<<64 - 1) }))
+}
+
+func BenchmarkDivconstU64(b *testing.B) {
+ b.Run("3", func(b *testing.B) {
+ x := uint64(123456789123456789)
+ for i := 0; i < b.N; i++ {
+ x += x << 4
+ u64res = uint64(x) / 3
+ }
+ })
+ b.Run("5", func(b *testing.B) {
+ x := uint64(123456789123456789)
+ for i := 0; i < b.N; i++ {
+ x += x << 4
+ u64res = uint64(x) / 5
+ }
+ })
+ b.Run("37", func(b *testing.B) {
+ x := uint64(123456789123456789)
+ for i := 0; i < b.N; i++ {
+ x += x << 4
+ u64res = uint64(x) / 37
+ }
+ })
+ b.Run("1234567", func(b *testing.B) {
+ x := uint64(123456789123456789)
+ for i := 0; i < b.N; i++ {
+ x += x << 4
+ u64res = uint64(x) / 1234567
+ }
+ })
+}
+
+func BenchmarkModconstU64(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ u64res = uint64(i) % 7
+ }
+}
+
+func BenchmarkDivisibleconstU64(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ boolres = uint64(i)%7 == 0
+ }
+}
+
+func BenchmarkDivisibleWDivconstU64(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ u64res = uint64(i) / 7
+ boolres = uint64(i)%7 == 0
+ }
+}
+
+var i32res int32
+
+func BenchmarkDivconstI32(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ i32res = int32(i) / 7
+ }
+}
+
+func BenchmarkModconstI32(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ i32res = int32(i) % 7
+ }
+}
+
+func BenchmarkDivisiblePow2constI32(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ boolres = int32(i)%16 == 0
+ }
+}
+
+func BenchmarkDivisibleconstI32(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ boolres = int32(i)%7 == 0
+ }
+}
+
+func BenchmarkDivisibleWDivconstI32(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ i32res = int32(i) / 7
+ boolres = int32(i)%7 == 0
+ }
+}
+
+var u32res uint32
+
+func BenchmarkDivconstU32(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ u32res = uint32(i) / 7
+ }
+}
+
+func BenchmarkModconstU32(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ u32res = uint32(i) % 7
+ }
+}
+
+func BenchmarkDivisibleconstU32(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ boolres = uint32(i)%7 == 0
+ }
+}
+
+func BenchmarkDivisibleWDivconstU32(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ u32res = uint32(i) / 7
+ boolres = uint32(i)%7 == 0
+ }
+}
+
+var i16res int16
+
+func BenchmarkDivconstI16(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ i16res = int16(i) / 7
+ }
+}
+
+func BenchmarkModconstI16(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ i16res = int16(i) % 7
+ }
+}
+
+func BenchmarkDivisiblePow2constI16(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ boolres = int16(i)%16 == 0
+ }
+}
+
+func BenchmarkDivisibleconstI16(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ boolres = int16(i)%7 == 0
+ }
+}
+
+func BenchmarkDivisibleWDivconstI16(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ i16res = int16(i) / 7
+ boolres = int16(i)%7 == 0
+ }
+}
+
+var u16res uint16
+
+func BenchmarkDivconstU16(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ u16res = uint16(i) / 7
+ }
+}
+
+func BenchmarkModconstU16(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ u16res = uint16(i) % 7
+ }
+}
+
+func BenchmarkDivisibleconstU16(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ boolres = uint16(i)%7 == 0
+ }
+}
+
+func BenchmarkDivisibleWDivconstU16(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ u16res = uint16(i) / 7
+ boolres = uint16(i)%7 == 0
+ }
+}
+
+var i8res int8
+
+func BenchmarkDivconstI8(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ i8res = int8(i) / 7
+ }
+}
+
+func BenchmarkModconstI8(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ i8res = int8(i) % 7
+ }
+}
+
+func BenchmarkDivisiblePow2constI8(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ boolres = int8(i)%16 == 0
+ }
+}
+
+func BenchmarkDivisibleconstI8(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ boolres = int8(i)%7 == 0
+ }
+}
+
+func BenchmarkDivisibleWDivconstI8(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ i8res = int8(i) / 7
+ boolres = int8(i)%7 == 0
+ }
+}
+
+var u8res uint8
+
+func BenchmarkDivconstU8(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ u8res = uint8(i) / 7
+ }
+}
+
+func BenchmarkModconstU8(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ u8res = uint8(i) % 7
+ }
+}
+
+func BenchmarkDivisibleconstU8(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ boolres = uint8(i)%7 == 0
+ }
+}
+
+func BenchmarkDivisibleWDivconstU8(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ u8res = uint8(i) / 7
+ boolres = uint8(i)%7 == 0
+ }
+}
diff --git a/src/cmd/compile/internal/test/fixedbugs_test.go b/src/cmd/compile/internal/test/fixedbugs_test.go
new file mode 100644
index 0000000..376b45e
--- /dev/null
+++ b/src/cmd/compile/internal/test/fixedbugs_test.go
@@ -0,0 +1,92 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package test
+
+import (
+ "internal/testenv"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strings"
+ "testing"
+)
+
+type T struct {
+ x [2]int64 // field that will be clobbered. Also makes type not SSAable.
+ p *byte // has a pointer
+}
+
+//go:noinline
+func makeT() T {
+ return T{}
+}
+
+var g T
+
+var sink interface{}
+
+func TestIssue15854(t *testing.T) {
+ for i := 0; i < 10000; i++ {
+ if g.x[0] != 0 {
+ t.Fatalf("g.x[0] clobbered with %x\n", g.x[0])
+ }
+ // The bug was in the following assignment. The return
+ // value of makeT() is not copied out of the args area of
+ // stack frame in a timely fashion. So when write barriers
+ // are enabled, the marshaling of the args for the write
+ // barrier call clobbers the result of makeT() before it is
+ // read by the write barrier code.
+ g = makeT()
+ sink = make([]byte, 1000) // force write barriers to eventually happen
+ }
+}
+func TestIssue15854b(t *testing.T) {
+ const N = 10000
+ a := make([]T, N)
+ for i := 0; i < N; i++ {
+ a = append(a, makeT())
+ sink = make([]byte, 1000) // force write barriers to eventually happen
+ }
+ for i, v := range a {
+ if v.x[0] != 0 {
+ t.Fatalf("a[%d].x[0] clobbered with %x\n", i, v.x[0])
+ }
+ }
+}
+
+// Test that the generated assembly has line numbers (Issue #16214).
+func TestIssue16214(t *testing.T) {
+ testenv.MustHaveGoBuild(t)
+ dir, err := ioutil.TempDir("", "TestLineNumber")
+ if err != nil {
+ t.Fatalf("could not create directory: %v", err)
+ }
+ defer os.RemoveAll(dir)
+
+ src := filepath.Join(dir, "x.go")
+ err = ioutil.WriteFile(src, []byte(issue16214src), 0644)
+ if err != nil {
+ t.Fatalf("could not write file: %v", err)
+ }
+
+ cmd := exec.Command(testenv.GoToolPath(t), "tool", "compile", "-S", "-o", filepath.Join(dir, "out.o"), src)
+ out, err := cmd.CombinedOutput()
+ if err != nil {
+ t.Fatalf("go tool compile: %v\n%s", err, out)
+ }
+
+ if strings.Contains(string(out), "unknown line number") {
+ t.Errorf("line number missing in assembly:\n%s", out)
+ }
+}
+
+var issue16214src = `
+package main
+
+func Mod32(x uint32) uint32 {
+ return x % 3 // frontend rewrites it as HMUL with 2863311531, the LITERAL node has unknown Pos
+}
+`
diff --git a/src/cmd/compile/internal/test/float_test.go b/src/cmd/compile/internal/test/float_test.go
new file mode 100644
index 0000000..884a983
--- /dev/null
+++ b/src/cmd/compile/internal/test/float_test.go
@@ -0,0 +1,544 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package test
+
+import (
+ "math"
+ "testing"
+)
+
+//go:noinline
+func compare1(a, b float64) bool {
+ return a < b
+}
+
+//go:noinline
+func compare2(a, b float32) bool {
+ return a < b
+}
+
+func TestFloatCompare(t *testing.T) {
+ if !compare1(3, 5) {
+ t.Errorf("compare1 returned false")
+ }
+ if !compare2(3, 5) {
+ t.Errorf("compare2 returned false")
+ }
+}
+
+func TestFloatCompareFolded(t *testing.T) {
+ // float64 comparisons
+ d1, d3, d5, d9 := float64(1), float64(3), float64(5), float64(9)
+ if d3 == d5 {
+ t.Errorf("d3 == d5 returned true")
+ }
+ if d3 != d3 {
+ t.Errorf("d3 != d3 returned true")
+ }
+ if d3 > d5 {
+ t.Errorf("d3 > d5 returned true")
+ }
+ if d3 >= d9 {
+ t.Errorf("d3 >= d9 returned true")
+ }
+ if d5 < d1 {
+ t.Errorf("d5 < d1 returned true")
+ }
+ if d9 <= d1 {
+ t.Errorf("d9 <= d1 returned true")
+ }
+ if math.NaN() == math.NaN() {
+ t.Errorf("math.NaN() == math.NaN() returned true")
+ }
+ if math.NaN() >= math.NaN() {
+ t.Errorf("math.NaN() >= math.NaN() returned true")
+ }
+ if math.NaN() <= math.NaN() {
+ t.Errorf("math.NaN() <= math.NaN() returned true")
+ }
+ if math.Copysign(math.NaN(), -1) < math.NaN() {
+ t.Errorf("math.Copysign(math.NaN(), -1) < math.NaN() returned true")
+ }
+ if math.Inf(1) != math.Inf(1) {
+ t.Errorf("math.Inf(1) != math.Inf(1) returned true")
+ }
+ if math.Inf(-1) != math.Inf(-1) {
+ t.Errorf("math.Inf(-1) != math.Inf(-1) returned true")
+ }
+ if math.Copysign(0, -1) != 0 {
+ t.Errorf("math.Copysign(0, -1) != 0 returned true")
+ }
+ if math.Copysign(0, -1) < 0 {
+ t.Errorf("math.Copysign(0, -1) < 0 returned true")
+ }
+ if 0 > math.Copysign(0, -1) {
+ t.Errorf("0 > math.Copysign(0, -1) returned true")
+ }
+
+ // float32 comparisons
+ s1, s3, s5, s9 := float32(1), float32(3), float32(5), float32(9)
+ if s3 == s5 {
+ t.Errorf("s3 == s5 returned true")
+ }
+ if s3 != s3 {
+ t.Errorf("s3 != s3 returned true")
+ }
+ if s3 > s5 {
+ t.Errorf("s3 > s5 returned true")
+ }
+ if s3 >= s9 {
+ t.Errorf("s3 >= s9 returned true")
+ }
+ if s5 < s1 {
+ t.Errorf("s5 < s1 returned true")
+ }
+ if s9 <= s1 {
+ t.Errorf("s9 <= s1 returned true")
+ }
+ sPosNaN, sNegNaN := float32(math.NaN()), float32(math.Copysign(math.NaN(), -1))
+ if sPosNaN == sPosNaN {
+ t.Errorf("sPosNaN == sPosNaN returned true")
+ }
+ if sPosNaN >= sPosNaN {
+ t.Errorf("sPosNaN >= sPosNaN returned true")
+ }
+ if sPosNaN <= sPosNaN {
+ t.Errorf("sPosNaN <= sPosNaN returned true")
+ }
+ if sNegNaN < sPosNaN {
+ t.Errorf("sNegNaN < sPosNaN returned true")
+ }
+ sPosInf, sNegInf := float32(math.Inf(1)), float32(math.Inf(-1))
+ if sPosInf != sPosInf {
+ t.Errorf("sPosInf != sPosInf returned true")
+ }
+ if sNegInf != sNegInf {
+ t.Errorf("sNegInf != sNegInf returned true")
+ }
+ sNegZero := float32(math.Copysign(0, -1))
+ if sNegZero != 0 {
+ t.Errorf("sNegZero != 0 returned true")
+ }
+ if sNegZero < 0 {
+ t.Errorf("sNegZero < 0 returned true")
+ }
+ if 0 > sNegZero {
+ t.Errorf("0 > sNegZero returned true")
+ }
+}
+
+//go:noinline
+func cvt1(a float64) uint64 {
+ return uint64(a)
+}
+
+//go:noinline
+func cvt2(a float64) uint32 {
+ return uint32(a)
+}
+
+//go:noinline
+func cvt3(a float32) uint64 {
+ return uint64(a)
+}
+
+//go:noinline
+func cvt4(a float32) uint32 {
+ return uint32(a)
+}
+
+//go:noinline
+func cvt5(a float64) int64 {
+ return int64(a)
+}
+
+//go:noinline
+func cvt6(a float64) int32 {
+ return int32(a)
+}
+
+//go:noinline
+func cvt7(a float32) int64 {
+ return int64(a)
+}
+
+//go:noinline
+func cvt8(a float32) int32 {
+ return int32(a)
+}
+
+// make sure to cover int, uint cases (issue #16738)
+//go:noinline
+func cvt9(a float64) int {
+ return int(a)
+}
+
+//go:noinline
+func cvt10(a float64) uint {
+ return uint(a)
+}
+
+//go:noinline
+func cvt11(a float32) int {
+ return int(a)
+}
+
+//go:noinline
+func cvt12(a float32) uint {
+ return uint(a)
+}
+
+//go:noinline
+func f2i64p(v float64) *int64 {
+ return ip64(int64(v / 0.1))
+}
+
+//go:noinline
+func ip64(v int64) *int64 {
+ return &v
+}
+
+func TestFloatConvert(t *testing.T) {
+ if got := cvt1(3.5); got != 3 {
+ t.Errorf("cvt1 got %d, wanted 3", got)
+ }
+ if got := cvt2(3.5); got != 3 {
+ t.Errorf("cvt2 got %d, wanted 3", got)
+ }
+ if got := cvt3(3.5); got != 3 {
+ t.Errorf("cvt3 got %d, wanted 3", got)
+ }
+ if got := cvt4(3.5); got != 3 {
+ t.Errorf("cvt4 got %d, wanted 3", got)
+ }
+ if got := cvt5(3.5); got != 3 {
+ t.Errorf("cvt5 got %d, wanted 3", got)
+ }
+ if got := cvt6(3.5); got != 3 {
+ t.Errorf("cvt6 got %d, wanted 3", got)
+ }
+ if got := cvt7(3.5); got != 3 {
+ t.Errorf("cvt7 got %d, wanted 3", got)
+ }
+ if got := cvt8(3.5); got != 3 {
+ t.Errorf("cvt8 got %d, wanted 3", got)
+ }
+ if got := cvt9(3.5); got != 3 {
+ t.Errorf("cvt9 got %d, wanted 3", got)
+ }
+ if got := cvt10(3.5); got != 3 {
+ t.Errorf("cvt10 got %d, wanted 3", got)
+ }
+ if got := cvt11(3.5); got != 3 {
+ t.Errorf("cvt11 got %d, wanted 3", got)
+ }
+ if got := cvt12(3.5); got != 3 {
+ t.Errorf("cvt12 got %d, wanted 3", got)
+ }
+ if got := *f2i64p(10); got != 100 {
+ t.Errorf("f2i64p got %d, wanted 100", got)
+ }
+}
+
+func TestFloatConvertFolded(t *testing.T) {
+ // Assign constants to variables so that they are (hopefully) constant folded
+ // by the SSA backend rather than the frontend.
+ u64, u32, u16, u8 := uint64(1<<63), uint32(1<<31), uint16(1<<15), uint8(1<<7)
+ i64, i32, i16, i8 := int64(-1<<63), int32(-1<<31), int16(-1<<15), int8(-1<<7)
+ du64, du32, du16, du8 := float64(1<<63), float64(1<<31), float64(1<<15), float64(1<<7)
+ di64, di32, di16, di8 := float64(-1<<63), float64(-1<<31), float64(-1<<15), float64(-1<<7)
+ su64, su32, su16, su8 := float32(1<<63), float32(1<<31), float32(1<<15), float32(1<<7)
+ si64, si32, si16, si8 := float32(-1<<63), float32(-1<<31), float32(-1<<15), float32(-1<<7)
+
+ // integer to float
+ if float64(u64) != du64 {
+ t.Errorf("float64(u64) != du64")
+ }
+ if float64(u32) != du32 {
+ t.Errorf("float64(u32) != du32")
+ }
+ if float64(u16) != du16 {
+ t.Errorf("float64(u16) != du16")
+ }
+ if float64(u8) != du8 {
+ t.Errorf("float64(u8) != du8")
+ }
+ if float64(i64) != di64 {
+ t.Errorf("float64(i64) != di64")
+ }
+ if float64(i32) != di32 {
+ t.Errorf("float64(i32) != di32")
+ }
+ if float64(i16) != di16 {
+ t.Errorf("float64(i16) != di16")
+ }
+ if float64(i8) != di8 {
+ t.Errorf("float64(i8) != di8")
+ }
+ if float32(u64) != su64 {
+ t.Errorf("float32(u64) != su64")
+ }
+ if float32(u32) != su32 {
+ t.Errorf("float32(u32) != su32")
+ }
+ if float32(u16) != su16 {
+ t.Errorf("float32(u16) != su16")
+ }
+ if float32(u8) != su8 {
+ t.Errorf("float32(u8) != su8")
+ }
+ if float32(i64) != si64 {
+ t.Errorf("float32(i64) != si64")
+ }
+ if float32(i32) != si32 {
+ t.Errorf("float32(i32) != si32")
+ }
+ if float32(i16) != si16 {
+ t.Errorf("float32(i16) != si16")
+ }
+ if float32(i8) != si8 {
+ t.Errorf("float32(i8) != si8")
+ }
+
+ // float to integer
+ if uint64(du64) != u64 {
+ t.Errorf("uint64(du64) != u64")
+ }
+ if uint32(du32) != u32 {
+ t.Errorf("uint32(du32) != u32")
+ }
+ if uint16(du16) != u16 {
+ t.Errorf("uint16(du16) != u16")
+ }
+ if uint8(du8) != u8 {
+ t.Errorf("uint8(du8) != u8")
+ }
+ if int64(di64) != i64 {
+ t.Errorf("int64(di64) != i64")
+ }
+ if int32(di32) != i32 {
+ t.Errorf("int32(di32) != i32")
+ }
+ if int16(di16) != i16 {
+ t.Errorf("int16(di16) != i16")
+ }
+ if int8(di8) != i8 {
+ t.Errorf("int8(di8) != i8")
+ }
+ if uint64(su64) != u64 {
+ t.Errorf("uint64(su64) != u64")
+ }
+ if uint32(su32) != u32 {
+ t.Errorf("uint32(su32) != u32")
+ }
+ if uint16(su16) != u16 {
+ t.Errorf("uint16(su16) != u16")
+ }
+ if uint8(su8) != u8 {
+ t.Errorf("uint8(su8) != u8")
+ }
+ if int64(si64) != i64 {
+ t.Errorf("int64(si64) != i64")
+ }
+ if int32(si32) != i32 {
+ t.Errorf("int32(si32) != i32")
+ }
+ if int16(si16) != i16 {
+ t.Errorf("int16(si16) != i16")
+ }
+ if int8(si8) != i8 {
+ t.Errorf("int8(si8) != i8")
+ }
+}
+
+func TestFloat32StoreToLoadConstantFold(t *testing.T) {
+ // Test that math.Float32{,from}bits constant fold correctly.
+ // In particular we need to be careful that signaling NaN (sNaN) values
+ // are not converted to quiet NaN (qNaN) values during compilation.
+ // See issue #27193 for more information.
+
+ // signaling NaNs
+ {
+ const nan = uint32(0x7f800001) // sNaN
+ if x := math.Float32bits(math.Float32frombits(nan)); x != nan {
+ t.Errorf("got %#x, want %#x", x, nan)
+ }
+ }
+ {
+ const nan = uint32(0x7fbfffff) // sNaN
+ if x := math.Float32bits(math.Float32frombits(nan)); x != nan {
+ t.Errorf("got %#x, want %#x", x, nan)
+ }
+ }
+ {
+ const nan = uint32(0xff800001) // sNaN
+ if x := math.Float32bits(math.Float32frombits(nan)); x != nan {
+ t.Errorf("got %#x, want %#x", x, nan)
+ }
+ }
+ {
+ const nan = uint32(0xffbfffff) // sNaN
+ if x := math.Float32bits(math.Float32frombits(nan)); x != nan {
+ t.Errorf("got %#x, want %#x", x, nan)
+ }
+ }
+
+ // quiet NaNs
+ {
+ const nan = uint32(0x7fc00000) // qNaN
+ if x := math.Float32bits(math.Float32frombits(nan)); x != nan {
+ t.Errorf("got %#x, want %#x", x, nan)
+ }
+ }
+ {
+ const nan = uint32(0x7fffffff) // qNaN
+ if x := math.Float32bits(math.Float32frombits(nan)); x != nan {
+ t.Errorf("got %#x, want %#x", x, nan)
+ }
+ }
+ {
+ const nan = uint32(0x8fc00000) // qNaN
+ if x := math.Float32bits(math.Float32frombits(nan)); x != nan {
+ t.Errorf("got %#x, want %#x", x, nan)
+ }
+ }
+ {
+ const nan = uint32(0x8fffffff) // qNaN
+ if x := math.Float32bits(math.Float32frombits(nan)); x != nan {
+ t.Errorf("got %#x, want %#x", x, nan)
+ }
+ }
+
+ // infinities
+ {
+ const inf = uint32(0x7f800000) // +∞
+ if x := math.Float32bits(math.Float32frombits(inf)); x != inf {
+ t.Errorf("got %#x, want %#x", x, inf)
+ }
+ }
+ {
+ const negInf = uint32(0xff800000) // -∞
+ if x := math.Float32bits(math.Float32frombits(negInf)); x != negInf {
+ t.Errorf("got %#x, want %#x", x, negInf)
+ }
+ }
+
+ // numbers
+ {
+ const zero = uint32(0) // +0.0
+ if x := math.Float32bits(math.Float32frombits(zero)); x != zero {
+ t.Errorf("got %#x, want %#x", x, zero)
+ }
+ }
+ {
+ const negZero = uint32(1 << 31) // -0.0
+ if x := math.Float32bits(math.Float32frombits(negZero)); x != negZero {
+ t.Errorf("got %#x, want %#x", x, negZero)
+ }
+ }
+ {
+ const one = uint32(0x3f800000) // 1.0
+ if x := math.Float32bits(math.Float32frombits(one)); x != one {
+ t.Errorf("got %#x, want %#x", x, one)
+ }
+ }
+ {
+ const negOne = uint32(0xbf800000) // -1.0
+ if x := math.Float32bits(math.Float32frombits(negOne)); x != negOne {
+ t.Errorf("got %#x, want %#x", x, negOne)
+ }
+ }
+ {
+ const frac = uint32(0x3fc00000) // +1.5
+ if x := math.Float32bits(math.Float32frombits(frac)); x != frac {
+ t.Errorf("got %#x, want %#x", x, frac)
+ }
+ }
+ {
+ const negFrac = uint32(0xbfc00000) // -1.5
+ if x := math.Float32bits(math.Float32frombits(negFrac)); x != negFrac {
+ t.Errorf("got %#x, want %#x", x, negFrac)
+ }
+ }
+}
+
+// Signaling NaN values as constants.
+const (
+ snan32bits uint32 = 0x7f800001
+ snan64bits uint64 = 0x7ff0000000000001
+)
+
+// Signaling NaNs as variables.
+var snan32bitsVar uint32 = snan32bits
+var snan64bitsVar uint64 = snan64bits
+
+func TestFloatSignalingNaN(t *testing.T) {
+ // Make sure we generate a signaling NaN from a constant properly.
+ // See issue 36400.
+ f32 := math.Float32frombits(snan32bits)
+ g32 := math.Float32frombits(snan32bitsVar)
+ x32 := math.Float32bits(f32)
+ y32 := math.Float32bits(g32)
+ if x32 != y32 {
+ t.Errorf("got %x, want %x (diff=%x)", x32, y32, x32^y32)
+ }
+
+ f64 := math.Float64frombits(snan64bits)
+ g64 := math.Float64frombits(snan64bitsVar)
+ x64 := math.Float64bits(f64)
+ y64 := math.Float64bits(g64)
+ if x64 != y64 {
+ t.Errorf("got %x, want %x (diff=%x)", x64, y64, x64^y64)
+ }
+}
+
+func TestFloatSignalingNaNConversion(t *testing.T) {
+ // Test to make sure when we convert a signaling NaN, we get a NaN.
+ // (Ideally we want a quiet NaN, but some platforms don't agree.)
+ // See issue 36399.
+ s32 := math.Float32frombits(snan32bitsVar)
+ if s32 == s32 {
+ t.Errorf("converting a NaN did not result in a NaN")
+ }
+ s64 := math.Float64frombits(snan64bitsVar)
+ if s64 == s64 {
+ t.Errorf("converting a NaN did not result in a NaN")
+ }
+}
+
+func TestFloatSignalingNaNConversionConst(t *testing.T) {
+ // Test to make sure when we convert a signaling NaN, it converts to a NaN.
+ // (Ideally we want a quiet NaN, but some platforms don't agree.)
+ // See issue 36399 and 36400.
+ s32 := math.Float32frombits(snan32bits)
+ if s32 == s32 {
+ t.Errorf("converting a NaN did not result in a NaN")
+ }
+ s64 := math.Float64frombits(snan64bits)
+ if s64 == s64 {
+ t.Errorf("converting a NaN did not result in a NaN")
+ }
+}
+
+var sinkFloat float64
+
+func BenchmarkMul2(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ var m float64 = 1
+ for j := 0; j < 500; j++ {
+ m *= 2
+ }
+ sinkFloat = m
+ }
+}
+func BenchmarkMulNeg2(b *testing.B) {
+ for i := 0; i < b.N; i++ {
+ var m float64 = 1
+ for j := 0; j < 500; j++ {
+ m *= -2
+ }
+ sinkFloat = m
+ }
+}
diff --git a/src/cmd/compile/internal/test/global_test.go b/src/cmd/compile/internal/test/global_test.go
new file mode 100644
index 0000000..93de894
--- /dev/null
+++ b/src/cmd/compile/internal/test/global_test.go
@@ -0,0 +1,116 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package test
+
+import (
+ "bytes"
+ "internal/testenv"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "strings"
+ "testing"
+)
+
+// Make sure "hello world" does not link in all the
+// fmt.scanf routines. See issue 6853.
+func TestScanfRemoval(t *testing.T) {
+ testenv.MustHaveGoBuild(t)
+ t.Parallel()
+
+ // Make a directory to work in.
+ dir, err := ioutil.TempDir("", "issue6853a-")
+ if err != nil {
+ t.Fatalf("could not create directory: %v", err)
+ }
+ defer os.RemoveAll(dir)
+
+ // Create source.
+ src := filepath.Join(dir, "test.go")
+ f, err := os.Create(src)
+ if err != nil {
+ t.Fatalf("could not create source file: %v", err)
+ }
+ f.Write([]byte(`
+package main
+import "fmt"
+func main() {
+ fmt.Println("hello world")
+}
+`))
+ f.Close()
+
+ // Name of destination.
+ dst := filepath.Join(dir, "test")
+
+ // Compile source.
+ cmd := exec.Command(testenv.GoToolPath(t), "build", "-o", dst, src)
+ out, err := cmd.CombinedOutput()
+ if err != nil {
+ t.Fatalf("could not build target: %v\n%s", err, out)
+ }
+
+ // Check destination to see if scanf code was included.
+ cmd = exec.Command(testenv.GoToolPath(t), "tool", "nm", dst)
+ out, err = cmd.CombinedOutput()
+ if err != nil {
+ t.Fatalf("could not read target: %v", err)
+ }
+ if bytes.Contains(out, []byte("scanInt")) {
+ t.Fatalf("scanf code not removed from helloworld")
+ }
+}
+
+// Make sure -S prints assembly code. See issue 14515.
+func TestDashS(t *testing.T) {
+ testenv.MustHaveGoBuild(t)
+ t.Parallel()
+
+ // Make a directory to work in.
+ dir, err := ioutil.TempDir("", "issue14515-")
+ if err != nil {
+ t.Fatalf("could not create directory: %v", err)
+ }
+ defer os.RemoveAll(dir)
+
+ // Create source.
+ src := filepath.Join(dir, "test.go")
+ f, err := os.Create(src)
+ if err != nil {
+ t.Fatalf("could not create source file: %v", err)
+ }
+ f.Write([]byte(`
+package main
+import "fmt"
+func main() {
+ fmt.Println("hello world")
+}
+`))
+ f.Close()
+
+ // Compile source.
+ cmd := exec.Command(testenv.GoToolPath(t), "build", "-gcflags", "-S", "-o", filepath.Join(dir, "test"), src)
+ out, err := cmd.CombinedOutput()
+ if err != nil {
+ t.Fatalf("could not build target: %v\n%s", err, out)
+ }
+
+ patterns := []string{
+ // It is hard to look for actual instructions in an
+ // arch-independent way. So we'll just look for
+ // pseudo-ops that are arch-independent.
+ "\tTEXT\t",
+ "\tFUNCDATA\t",
+ "\tPCDATA\t",
+ }
+ outstr := string(out)
+ for _, p := range patterns {
+ if !strings.Contains(outstr, p) {
+ println(outstr)
+ panic("can't find pattern " + p)
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/test/iface_test.go b/src/cmd/compile/internal/test/iface_test.go
new file mode 100644
index 0000000..ebc4f89
--- /dev/null
+++ b/src/cmd/compile/internal/test/iface_test.go
@@ -0,0 +1,126 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package test
+
+import "testing"
+
+// Test to make sure we make copies of the values we
+// put in interfaces.
+
+var x int
+
+func TestEfaceConv1(t *testing.T) {
+ a := 5
+ i := interface{}(a)
+ a += 2
+ if got := i.(int); got != 5 {
+ t.Errorf("wanted 5, got %d\n", got)
+ }
+}
+
+func TestEfaceConv2(t *testing.T) {
+ a := 5
+ sink = &a
+ i := interface{}(a)
+ a += 2
+ if got := i.(int); got != 5 {
+ t.Errorf("wanted 5, got %d\n", got)
+ }
+}
+
+func TestEfaceConv3(t *testing.T) {
+ x = 5
+ if got := e2int3(x); got != 5 {
+ t.Errorf("wanted 5, got %d\n", got)
+ }
+}
+
+//go:noinline
+func e2int3(i interface{}) int {
+ x = 7
+ return i.(int)
+}
+
+func TestEfaceConv4(t *testing.T) {
+ a := 5
+ if got := e2int4(a, &a); got != 5 {
+ t.Errorf("wanted 5, got %d\n", got)
+ }
+}
+
+//go:noinline
+func e2int4(i interface{}, p *int) int {
+ *p = 7
+ return i.(int)
+}
+
+type Int int
+
+var y Int
+
+type I interface {
+ foo()
+}
+
+func (i Int) foo() {
+}
+
+func TestIfaceConv1(t *testing.T) {
+ a := Int(5)
+ i := interface{}(a)
+ a += 2
+ if got := i.(Int); got != 5 {
+ t.Errorf("wanted 5, got %d\n", int(got))
+ }
+}
+
+func TestIfaceConv2(t *testing.T) {
+ a := Int(5)
+ sink = &a
+ i := interface{}(a)
+ a += 2
+ if got := i.(Int); got != 5 {
+ t.Errorf("wanted 5, got %d\n", int(got))
+ }
+}
+
+func TestIfaceConv3(t *testing.T) {
+ y = 5
+ if got := i2Int3(y); got != 5 {
+ t.Errorf("wanted 5, got %d\n", int(got))
+ }
+}
+
+//go:noinline
+func i2Int3(i I) Int {
+ y = 7
+ return i.(Int)
+}
+
+func TestIfaceConv4(t *testing.T) {
+ a := Int(5)
+ if got := i2Int4(a, &a); got != 5 {
+ t.Errorf("wanted 5, got %d\n", int(got))
+ }
+}
+
+//go:noinline
+func i2Int4(i I, p *Int) Int {
+ *p = 7
+ return i.(Int)
+}
+
+func BenchmarkEfaceInteger(b *testing.B) {
+ sum := 0
+ for i := 0; i < b.N; i++ {
+ sum += i2int(i)
+ }
+ sink = sum
+}
+
+//go:noinline
+func i2int(i interface{}) int {
+ return i.(int)
+}
diff --git a/src/cmd/compile/internal/test/inl_test.go b/src/cmd/compile/internal/test/inl_test.go
new file mode 100644
index 0000000..b10d37a
--- /dev/null
+++ b/src/cmd/compile/internal/test/inl_test.go
@@ -0,0 +1,275 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package test
+
+import (
+ "bufio"
+ "internal/testenv"
+ "io"
+ "math/bits"
+ "os/exec"
+ "regexp"
+ "runtime"
+ "strings"
+ "testing"
+)
+
+// TestIntendedInlining tests that specific functions are inlined.
+// This allows refactoring for code clarity and re-use without fear that
+// changes to the compiler will cause silent performance regressions.
+func TestIntendedInlining(t *testing.T) {
+ if testing.Short() && testenv.Builder() == "" {
+ t.Skip("skipping in short mode")
+ }
+ testenv.MustHaveGoRun(t)
+ t.Parallel()
+
+ // want is the list of function names (by package) that should
+ // be inlinable. If they have no callers in their packages, they
+ // might not actually be inlined anywhere.
+ want := map[string][]string{
+ "runtime": {
+ "add",
+ "acquirem",
+ "add1",
+ "addb",
+ "adjustpanics",
+ "adjustpointer",
+ "alignDown",
+ "alignUp",
+ "bucketMask",
+ "bucketShift",
+ "chanbuf",
+ "evacuated",
+ "fastlog2",
+ "fastrand",
+ "float64bits",
+ "funcspdelta",
+ "getArgInfoFast",
+ "getm",
+ "getMCache",
+ "isDirectIface",
+ "itabHashFunc",
+ "noescape",
+ "pcvalueCacheKey",
+ "readUnaligned32",
+ "readUnaligned64",
+ "releasem",
+ "roundupsize",
+ "stackmapdata",
+ "stringStructOf",
+ "subtract1",
+ "subtractb",
+ "tophash",
+ "(*bmap).keys",
+ "(*bmap).overflow",
+ "(*waitq).enqueue",
+ "funcInfo.entry",
+
+ // GC-related ones
+ "cgoInRange",
+ "gclinkptr.ptr",
+ "guintptr.ptr",
+ "heapBits.bits",
+ "heapBits.isPointer",
+ "heapBits.morePointers",
+ "heapBits.next",
+ "heapBitsForAddr",
+ "markBits.isMarked",
+ "muintptr.ptr",
+ "puintptr.ptr",
+ "spanOf",
+ "spanOfUnchecked",
+ "(*gcWork).putFast",
+ "(*gcWork).tryGetFast",
+ "(*guintptr).set",
+ "(*markBits).advance",
+ "(*mspan).allocBitsForIndex",
+ "(*mspan).base",
+ "(*mspan).markBitsForBase",
+ "(*mspan).markBitsForIndex",
+ "(*muintptr).set",
+ "(*puintptr).set",
+ },
+ "runtime/internal/sys": {},
+ "runtime/internal/math": {
+ "MulUintptr",
+ },
+ "bytes": {
+ "(*Buffer).Bytes",
+ "(*Buffer).Cap",
+ "(*Buffer).Len",
+ "(*Buffer).Grow",
+ "(*Buffer).Next",
+ "(*Buffer).Read",
+ "(*Buffer).ReadByte",
+ "(*Buffer).Reset",
+ "(*Buffer).String",
+ "(*Buffer).UnreadByte",
+ "(*Buffer).tryGrowByReslice",
+ },
+ "compress/flate": {
+ "byLiteral.Len",
+ "byLiteral.Less",
+ "byLiteral.Swap",
+ "(*dictDecoder).tryWriteCopy",
+ },
+ "encoding/base64": {
+ "assemble32",
+ "assemble64",
+ },
+ "unicode/utf8": {
+ "FullRune",
+ "FullRuneInString",
+ "RuneLen",
+ "AppendRune",
+ "ValidRune",
+ },
+ "reflect": {
+ "Value.CanInt",
+ "Value.CanUint",
+ "Value.CanFloat",
+ "Value.CanComplex",
+ "Value.CanAddr",
+ "Value.CanSet",
+ "Value.CanInterface",
+ "Value.IsValid",
+ "Value.pointer",
+ "add",
+ "align",
+ "flag.mustBe",
+ "flag.mustBeAssignable",
+ "flag.mustBeExported",
+ "flag.kind",
+ "flag.ro",
+ },
+ "regexp": {
+ "(*bitState).push",
+ },
+ "math/big": {
+ "bigEndianWord",
+ // The following functions require the math_big_pure_go build tag.
+ "addVW",
+ "subVW",
+ },
+ "math/rand": {
+ "(*rngSource).Int63",
+ "(*rngSource).Uint64",
+ },
+ "net": {
+ "(*UDPConn).ReadFromUDP",
+ },
+ }
+
+ if runtime.GOARCH != "386" && runtime.GOARCH != "mips64" && runtime.GOARCH != "mips64le" && runtime.GOARCH != "riscv64" {
+ // nextFreeFast calls sys.Ctz64, which on 386 is implemented in asm and is not inlinable.
+ // We currently don't have midstack inlining so nextFreeFast is also not inlinable on 386.
+ // On mips64x and riscv64, Ctz64 is not intrinsified and causes nextFreeFast too expensive
+ // to inline (Issue 22239).
+ want["runtime"] = append(want["runtime"], "nextFreeFast")
+ }
+ if runtime.GOARCH != "386" {
+ // As explained above, Ctz64 and Ctz32 are not Go code on 386.
+ // The same applies to Bswap32.
+ want["runtime/internal/sys"] = append(want["runtime/internal/sys"], "Ctz64")
+ want["runtime/internal/sys"] = append(want["runtime/internal/sys"], "Ctz32")
+ want["runtime/internal/sys"] = append(want["runtime/internal/sys"], "Bswap32")
+ }
+ if bits.UintSize == 64 {
+ // mix is only defined on 64-bit architectures
+ want["runtime"] = append(want["runtime"], "mix")
+ }
+
+ switch runtime.GOARCH {
+ case "386", "wasm", "arm":
+ default:
+ // TODO(mvdan): As explained in /test/inline_sync.go, some
+ // architectures don't have atomic intrinsics, so these go over
+ // the inlining budget. Move back to the main table once that
+ // problem is solved.
+ want["sync"] = []string{
+ "(*Mutex).Lock",
+ "(*Mutex).Unlock",
+ "(*RWMutex).RLock",
+ "(*RWMutex).RUnlock",
+ "(*Once).Do",
+ }
+ }
+
+ // Functions that must actually be inlined; they must have actual callers.
+ must := map[string]bool{
+ "compress/flate.byLiteral.Len": true,
+ "compress/flate.byLiteral.Less": true,
+ "compress/flate.byLiteral.Swap": true,
+ }
+
+ notInlinedReason := make(map[string]string)
+ pkgs := make([]string, 0, len(want))
+ for pname, fnames := range want {
+ pkgs = append(pkgs, pname)
+ for _, fname := range fnames {
+ fullName := pname + "." + fname
+ if _, ok := notInlinedReason[fullName]; ok {
+ t.Errorf("duplicate func: %s", fullName)
+ }
+ notInlinedReason[fullName] = "unknown reason"
+ }
+ }
+
+ args := append([]string{"build", "-a", "-gcflags=all=-m -m", "-tags=math_big_pure_go"}, pkgs...)
+ cmd := testenv.CleanCmdEnv(exec.Command(testenv.GoToolPath(t), args...))
+ pr, pw := io.Pipe()
+ cmd.Stdout = pw
+ cmd.Stderr = pw
+ cmdErr := make(chan error, 1)
+ go func() {
+ cmdErr <- cmd.Run()
+ pw.Close()
+ }()
+ scanner := bufio.NewScanner(pr)
+ curPkg := ""
+ canInline := regexp.MustCompile(`: can inline ([^ ]*)`)
+ haveInlined := regexp.MustCompile(`: inlining call to ([^ ]*)`)
+ cannotInline := regexp.MustCompile(`: cannot inline ([^ ]*): (.*)`)
+ for scanner.Scan() {
+ line := scanner.Text()
+ if strings.HasPrefix(line, "# ") {
+ curPkg = line[2:]
+ continue
+ }
+ if m := haveInlined.FindStringSubmatch(line); m != nil {
+ fname := m[1]
+ delete(notInlinedReason, curPkg+"."+fname)
+ continue
+ }
+ if m := canInline.FindStringSubmatch(line); m != nil {
+ fname := m[1]
+ fullname := curPkg + "." + fname
+ // If function must be inlined somewhere, being inlinable is not enough
+ if _, ok := must[fullname]; !ok {
+ delete(notInlinedReason, fullname)
+ continue
+ }
+ }
+ if m := cannotInline.FindStringSubmatch(line); m != nil {
+ fname, reason := m[1], m[2]
+ fullName := curPkg + "." + fname
+ if _, ok := notInlinedReason[fullName]; ok {
+ // cmd/compile gave us a reason why
+ notInlinedReason[fullName] = reason
+ }
+ continue
+ }
+ }
+ if err := <-cmdErr; err != nil {
+ t.Fatal(err)
+ }
+ if err := scanner.Err(); err != nil {
+ t.Fatal(err)
+ }
+ for fullName, reason := range notInlinedReason {
+ t.Errorf("%s was not inlined: %s", fullName, reason)
+ }
+}
diff --git a/src/cmd/compile/internal/test/inst_test.go b/src/cmd/compile/internal/test/inst_test.go
new file mode 100644
index 0000000..951f6a0
--- /dev/null
+++ b/src/cmd/compile/internal/test/inst_test.go
@@ -0,0 +1,73 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package test
+
+import (
+ "internal/goexperiment"
+ "internal/testenv"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "regexp"
+ "testing"
+)
+
+// TestInst tests that only one instantiation of Sort is created, even though generic
+// Sort is used for multiple pointer types across two packages.
+func TestInst(t *testing.T) {
+ if goexperiment.Unified {
+ t.Skip("unified currently does stenciling, not dictionaries")
+ }
+ testenv.MustHaveGoBuild(t)
+ testenv.MustHaveGoRun(t)
+
+ var tmpdir string
+ var err error
+ tmpdir, err = ioutil.TempDir("", "TestDict")
+ if err != nil {
+ t.Fatalf("Failed to create temporary directory: %v", err)
+ }
+ defer os.RemoveAll(tmpdir)
+
+ // Build ptrsort.go, which uses package mysort.
+ var output []byte
+ filename := "ptrsort.go"
+ exename := "ptrsort"
+ outname := "ptrsort.out"
+ gotool := testenv.GoToolPath(t)
+ dest := filepath.Join(tmpdir, exename)
+ cmd := exec.Command(gotool, "build", "-o", dest, filepath.Join("testdata", filename))
+ if output, err = cmd.CombinedOutput(); err != nil {
+ t.Fatalf("Failed: %v:\nOutput: %s\n", err, output)
+ }
+
+ // Test that there is exactly one shape-based instantiation of Sort in
+ // the executable.
+ cmd = exec.Command(gotool, "tool", "nm", dest)
+ if output, err = cmd.CombinedOutput(); err != nil {
+ t.Fatalf("Failed: %v:\nOut: %s\n", err, output)
+ }
+ // Look for shape-based instantiation of Sort, but ignore any extra wrapper
+ // ending in "-tramp" (which are created on riscv).
+ re := regexp.MustCompile(`\bSort\[.*shape.*\][^-]`)
+ r := re.FindAllIndex(output, -1)
+ if len(r) != 1 {
+ t.Fatalf("Wanted 1 instantiations of Sort function, got %d\n", len(r))
+ }
+
+ // Actually run the test and make sure output is correct.
+ cmd = exec.Command(gotool, "run", filepath.Join("testdata", filename))
+ if output, err = cmd.CombinedOutput(); err != nil {
+ t.Fatalf("Failed: %v:\nOut: %s\n", err, output)
+ }
+ out, err := ioutil.ReadFile(filepath.Join("testdata", outname))
+ if err != nil {
+ t.Fatalf("Could not find %s\n", outname)
+ }
+ if string(out) != string(output) {
+ t.Fatalf("Wanted output %v, got %v\n", string(out), string(output))
+ }
+}
diff --git a/src/cmd/compile/internal/test/issue50182_test.go b/src/cmd/compile/internal/test/issue50182_test.go
new file mode 100644
index 0000000..cd277fa
--- /dev/null
+++ b/src/cmd/compile/internal/test/issue50182_test.go
@@ -0,0 +1,62 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package test
+
+import (
+ "fmt"
+ "sort"
+ "testing"
+)
+
+// Test that calling methods on generic types doesn't cause allocations.
+func genericSorted[T sort.Interface](data T) bool {
+ n := data.Len()
+ for i := n - 1; i > 0; i-- {
+ if data.Less(i, i-1) {
+ return false
+ }
+ }
+ return true
+}
+func TestGenericSorted(t *testing.T) {
+ var data = sort.IntSlice{-10, -5, 0, 1, 2, 3, 5, 7, 11, 100, 100, 100, 1000, 10000}
+ f := func() {
+ genericSorted(data)
+ }
+ if n := testing.AllocsPerRun(10, f); n > 0 {
+ t.Errorf("got %f allocs, want 0", n)
+ }
+}
+
+// Test that escape analysis correctly tracks escaping inside of methods
+// called on generic types.
+type fooer interface {
+ foo()
+}
+type P struct {
+ p *int
+ q int
+}
+
+var esc []*int
+
+func (p P) foo() {
+ esc = append(esc, p.p) // foo escapes the pointer from inside of p
+}
+func f[T fooer](t T) {
+ t.foo()
+}
+func TestGenericEscape(t *testing.T) {
+ for i := 0; i < 4; i++ {
+ var x int = 77 + i
+ var p P = P{p: &x}
+ f(p)
+ }
+ for i, p := range esc {
+ if got, want := *p, 77+i; got != want {
+ panic(fmt.Sprintf("entry %d: got %d, want %d", i, got, want))
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/test/lang_test.go b/src/cmd/compile/internal/test/lang_test.go
new file mode 100644
index 0000000..67c1551
--- /dev/null
+++ b/src/cmd/compile/internal/test/lang_test.go
@@ -0,0 +1,64 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package test
+
+import (
+ "internal/testenv"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "testing"
+)
+
+const aliasSrc = `
+package x
+
+type T = int
+`
+
+func TestInvalidLang(t *testing.T) {
+ t.Parallel()
+
+ testenv.MustHaveGoBuild(t)
+
+ dir, err := ioutil.TempDir("", "TestInvalidLang")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(dir)
+
+ src := filepath.Join(dir, "alias.go")
+ if err := ioutil.WriteFile(src, []byte(aliasSrc), 0644); err != nil {
+ t.Fatal(err)
+ }
+
+ outfile := filepath.Join(dir, "alias.o")
+
+ if testLang(t, "go9.99", src, outfile) == nil {
+ t.Error("compilation with -lang=go9.99 succeeded unexpectedly")
+ }
+
+ // This test will have to be adjusted if we ever reach 1.99 or 2.0.
+ if testLang(t, "go1.99", src, outfile) == nil {
+ t.Error("compilation with -lang=go1.99 succeeded unexpectedly")
+ }
+
+ if testLang(t, "go1.8", src, outfile) == nil {
+ t.Error("compilation with -lang=go1.8 succeeded unexpectedly")
+ }
+
+ if err := testLang(t, "go1.9", src, outfile); err != nil {
+ t.Errorf("compilation with -lang=go1.9 failed unexpectedly: %v", err)
+ }
+}
+
+func testLang(t *testing.T, lang, src, outfile string) error {
+ run := []string{testenv.GoToolPath(t), "tool", "compile", "-lang", lang, "-o", outfile, src}
+ t.Log(run)
+ out, err := exec.Command(run[0], run[1:]...).CombinedOutput()
+ t.Logf("%s", out)
+ return err
+}
diff --git a/src/cmd/compile/internal/test/logic_test.go b/src/cmd/compile/internal/test/logic_test.go
new file mode 100644
index 0000000..1d7043f
--- /dev/null
+++ b/src/cmd/compile/internal/test/logic_test.go
@@ -0,0 +1,289 @@
+package test
+
+import "testing"
+
+// Tests to make sure logic simplification rules are correct.
+
+func TestLogic64(t *testing.T) {
+ // test values to determine function equality
+ values := [...]int64{-1 << 63, 1<<63 - 1, -4, -3, -2, -1, 0, 1, 2, 3, 4}
+
+ // golden functions we use repeatedly
+ zero := func(x int64) int64 { return 0 }
+ id := func(x int64) int64 { return x }
+ or := func(x, y int64) int64 { return x | y }
+ and := func(x, y int64) int64 { return x & y }
+ y := func(x, y int64) int64 { return y }
+
+ for _, test := range [...]struct {
+ name string
+ f func(int64) int64
+ golden func(int64) int64
+ }{
+ {"x|x", func(x int64) int64 { return x | x }, id},
+ {"x|0", func(x int64) int64 { return x | 0 }, id},
+ {"x|-1", func(x int64) int64 { return x | -1 }, func(x int64) int64 { return -1 }},
+ {"x&x", func(x int64) int64 { return x & x }, id},
+ {"x&0", func(x int64) int64 { return x & 0 }, zero},
+ {"x&-1", func(x int64) int64 { return x & -1 }, id},
+ {"x^x", func(x int64) int64 { return x ^ x }, zero},
+ {"x^0", func(x int64) int64 { return x ^ 0 }, id},
+ {"x^-1", func(x int64) int64 { return x ^ -1 }, func(x int64) int64 { return ^x }},
+ {"x+0", func(x int64) int64 { return x + 0 }, id},
+ {"x-x", func(x int64) int64 { return x - x }, zero},
+ {"x*0", func(x int64) int64 { return x * 0 }, zero},
+ {"^^x", func(x int64) int64 { return ^^x }, id},
+ } {
+ for _, v := range values {
+ got := test.f(v)
+ want := test.golden(v)
+ if want != got {
+ t.Errorf("[%s](%d)=%d, want %d", test.name, v, got, want)
+ }
+ }
+ }
+ for _, test := range [...]struct {
+ name string
+ f func(int64, int64) int64
+ golden func(int64, int64) int64
+ }{
+ {"x|(x|y)", func(x, y int64) int64 { return x | (x | y) }, or},
+ {"x|(y|x)", func(x, y int64) int64 { return x | (y | x) }, or},
+ {"(x|y)|x", func(x, y int64) int64 { return (x | y) | x }, or},
+ {"(y|x)|x", func(x, y int64) int64 { return (y | x) | x }, or},
+ {"x&(x&y)", func(x, y int64) int64 { return x & (x & y) }, and},
+ {"x&(y&x)", func(x, y int64) int64 { return x & (y & x) }, and},
+ {"(x&y)&x", func(x, y int64) int64 { return (x & y) & x }, and},
+ {"(y&x)&x", func(x, y int64) int64 { return (y & x) & x }, and},
+ {"x^(x^y)", func(x, y int64) int64 { return x ^ (x ^ y) }, y},
+ {"x^(y^x)", func(x, y int64) int64 { return x ^ (y ^ x) }, y},
+ {"(x^y)^x", func(x, y int64) int64 { return (x ^ y) ^ x }, y},
+ {"(y^x)^x", func(x, y int64) int64 { return (y ^ x) ^ x }, y},
+ {"-(y-x)", func(x, y int64) int64 { return -(y - x) }, func(x, y int64) int64 { return x - y }},
+ {"(x+y)-x", func(x, y int64) int64 { return (x + y) - x }, y},
+ {"(y+x)-x", func(x, y int64) int64 { return (y + x) - x }, y},
+ } {
+ for _, v := range values {
+ for _, w := range values {
+ got := test.f(v, w)
+ want := test.golden(v, w)
+ if want != got {
+ t.Errorf("[%s](%d,%d)=%d, want %d", test.name, v, w, got, want)
+ }
+ }
+ }
+ }
+}
+
+func TestLogic32(t *testing.T) {
+ // test values to determine function equality
+ values := [...]int32{-1 << 31, 1<<31 - 1, -4, -3, -2, -1, 0, 1, 2, 3, 4}
+
+ // golden functions we use repeatedly
+ zero := func(x int32) int32 { return 0 }
+ id := func(x int32) int32 { return x }
+ or := func(x, y int32) int32 { return x | y }
+ and := func(x, y int32) int32 { return x & y }
+ y := func(x, y int32) int32 { return y }
+
+ for _, test := range [...]struct {
+ name string
+ f func(int32) int32
+ golden func(int32) int32
+ }{
+ {"x|x", func(x int32) int32 { return x | x }, id},
+ {"x|0", func(x int32) int32 { return x | 0 }, id},
+ {"x|-1", func(x int32) int32 { return x | -1 }, func(x int32) int32 { return -1 }},
+ {"x&x", func(x int32) int32 { return x & x }, id},
+ {"x&0", func(x int32) int32 { return x & 0 }, zero},
+ {"x&-1", func(x int32) int32 { return x & -1 }, id},
+ {"x^x", func(x int32) int32 { return x ^ x }, zero},
+ {"x^0", func(x int32) int32 { return x ^ 0 }, id},
+ {"x^-1", func(x int32) int32 { return x ^ -1 }, func(x int32) int32 { return ^x }},
+ {"x+0", func(x int32) int32 { return x + 0 }, id},
+ {"x-x", func(x int32) int32 { return x - x }, zero},
+ {"x*0", func(x int32) int32 { return x * 0 }, zero},
+ {"^^x", func(x int32) int32 { return ^^x }, id},
+ } {
+ for _, v := range values {
+ got := test.f(v)
+ want := test.golden(v)
+ if want != got {
+ t.Errorf("[%s](%d)=%d, want %d", test.name, v, got, want)
+ }
+ }
+ }
+ for _, test := range [...]struct {
+ name string
+ f func(int32, int32) int32
+ golden func(int32, int32) int32
+ }{
+ {"x|(x|y)", func(x, y int32) int32 { return x | (x | y) }, or},
+ {"x|(y|x)", func(x, y int32) int32 { return x | (y | x) }, or},
+ {"(x|y)|x", func(x, y int32) int32 { return (x | y) | x }, or},
+ {"(y|x)|x", func(x, y int32) int32 { return (y | x) | x }, or},
+ {"x&(x&y)", func(x, y int32) int32 { return x & (x & y) }, and},
+ {"x&(y&x)", func(x, y int32) int32 { return x & (y & x) }, and},
+ {"(x&y)&x", func(x, y int32) int32 { return (x & y) & x }, and},
+ {"(y&x)&x", func(x, y int32) int32 { return (y & x) & x }, and},
+ {"x^(x^y)", func(x, y int32) int32 { return x ^ (x ^ y) }, y},
+ {"x^(y^x)", func(x, y int32) int32 { return x ^ (y ^ x) }, y},
+ {"(x^y)^x", func(x, y int32) int32 { return (x ^ y) ^ x }, y},
+ {"(y^x)^x", func(x, y int32) int32 { return (y ^ x) ^ x }, y},
+ {"-(y-x)", func(x, y int32) int32 { return -(y - x) }, func(x, y int32) int32 { return x - y }},
+ {"(x+y)-x", func(x, y int32) int32 { return (x + y) - x }, y},
+ {"(y+x)-x", func(x, y int32) int32 { return (y + x) - x }, y},
+ } {
+ for _, v := range values {
+ for _, w := range values {
+ got := test.f(v, w)
+ want := test.golden(v, w)
+ if want != got {
+ t.Errorf("[%s](%d,%d)=%d, want %d", test.name, v, w, got, want)
+ }
+ }
+ }
+ }
+}
+
+func TestLogic16(t *testing.T) {
+ // test values to determine function equality
+ values := [...]int16{-1 << 15, 1<<15 - 1, -4, -3, -2, -1, 0, 1, 2, 3, 4}
+
+ // golden functions we use repeatedly
+ zero := func(x int16) int16 { return 0 }
+ id := func(x int16) int16 { return x }
+ or := func(x, y int16) int16 { return x | y }
+ and := func(x, y int16) int16 { return x & y }
+ y := func(x, y int16) int16 { return y }
+
+ for _, test := range [...]struct {
+ name string
+ f func(int16) int16
+ golden func(int16) int16
+ }{
+ {"x|x", func(x int16) int16 { return x | x }, id},
+ {"x|0", func(x int16) int16 { return x | 0 }, id},
+ {"x|-1", func(x int16) int16 { return x | -1 }, func(x int16) int16 { return -1 }},
+ {"x&x", func(x int16) int16 { return x & x }, id},
+ {"x&0", func(x int16) int16 { return x & 0 }, zero},
+ {"x&-1", func(x int16) int16 { return x & -1 }, id},
+ {"x^x", func(x int16) int16 { return x ^ x }, zero},
+ {"x^0", func(x int16) int16 { return x ^ 0 }, id},
+ {"x^-1", func(x int16) int16 { return x ^ -1 }, func(x int16) int16 { return ^x }},
+ {"x+0", func(x int16) int16 { return x + 0 }, id},
+ {"x-x", func(x int16) int16 { return x - x }, zero},
+ {"x*0", func(x int16) int16 { return x * 0 }, zero},
+ {"^^x", func(x int16) int16 { return ^^x }, id},
+ } {
+ for _, v := range values {
+ got := test.f(v)
+ want := test.golden(v)
+ if want != got {
+ t.Errorf("[%s](%d)=%d, want %d", test.name, v, got, want)
+ }
+ }
+ }
+ for _, test := range [...]struct {
+ name string
+ f func(int16, int16) int16
+ golden func(int16, int16) int16
+ }{
+ {"x|(x|y)", func(x, y int16) int16 { return x | (x | y) }, or},
+ {"x|(y|x)", func(x, y int16) int16 { return x | (y | x) }, or},
+ {"(x|y)|x", func(x, y int16) int16 { return (x | y) | x }, or},
+ {"(y|x)|x", func(x, y int16) int16 { return (y | x) | x }, or},
+ {"x&(x&y)", func(x, y int16) int16 { return x & (x & y) }, and},
+ {"x&(y&x)", func(x, y int16) int16 { return x & (y & x) }, and},
+ {"(x&y)&x", func(x, y int16) int16 { return (x & y) & x }, and},
+ {"(y&x)&x", func(x, y int16) int16 { return (y & x) & x }, and},
+ {"x^(x^y)", func(x, y int16) int16 { return x ^ (x ^ y) }, y},
+ {"x^(y^x)", func(x, y int16) int16 { return x ^ (y ^ x) }, y},
+ {"(x^y)^x", func(x, y int16) int16 { return (x ^ y) ^ x }, y},
+ {"(y^x)^x", func(x, y int16) int16 { return (y ^ x) ^ x }, y},
+ {"-(y-x)", func(x, y int16) int16 { return -(y - x) }, func(x, y int16) int16 { return x - y }},
+ {"(x+y)-x", func(x, y int16) int16 { return (x + y) - x }, y},
+ {"(y+x)-x", func(x, y int16) int16 { return (y + x) - x }, y},
+ } {
+ for _, v := range values {
+ for _, w := range values {
+ got := test.f(v, w)
+ want := test.golden(v, w)
+ if want != got {
+ t.Errorf("[%s](%d,%d)=%d, want %d", test.name, v, w, got, want)
+ }
+ }
+ }
+ }
+}
+
+func TestLogic8(t *testing.T) {
+ // test values to determine function equality
+ values := [...]int8{-1 << 7, 1<<7 - 1, -4, -3, -2, -1, 0, 1, 2, 3, 4}
+
+ // golden functions we use repeatedly
+ zero := func(x int8) int8 { return 0 }
+ id := func(x int8) int8 { return x }
+ or := func(x, y int8) int8 { return x | y }
+ and := func(x, y int8) int8 { return x & y }
+ y := func(x, y int8) int8 { return y }
+
+ for _, test := range [...]struct {
+ name string
+ f func(int8) int8
+ golden func(int8) int8
+ }{
+ {"x|x", func(x int8) int8 { return x | x }, id},
+ {"x|0", func(x int8) int8 { return x | 0 }, id},
+ {"x|-1", func(x int8) int8 { return x | -1 }, func(x int8) int8 { return -1 }},
+ {"x&x", func(x int8) int8 { return x & x }, id},
+ {"x&0", func(x int8) int8 { return x & 0 }, zero},
+ {"x&-1", func(x int8) int8 { return x & -1 }, id},
+ {"x^x", func(x int8) int8 { return x ^ x }, zero},
+ {"x^0", func(x int8) int8 { return x ^ 0 }, id},
+ {"x^-1", func(x int8) int8 { return x ^ -1 }, func(x int8) int8 { return ^x }},
+ {"x+0", func(x int8) int8 { return x + 0 }, id},
+ {"x-x", func(x int8) int8 { return x - x }, zero},
+ {"x*0", func(x int8) int8 { return x * 0 }, zero},
+ {"^^x", func(x int8) int8 { return ^^x }, id},
+ } {
+ for _, v := range values {
+ got := test.f(v)
+ want := test.golden(v)
+ if want != got {
+ t.Errorf("[%s](%d)=%d, want %d", test.name, v, got, want)
+ }
+ }
+ }
+ for _, test := range [...]struct {
+ name string
+ f func(int8, int8) int8
+ golden func(int8, int8) int8
+ }{
+ {"x|(x|y)", func(x, y int8) int8 { return x | (x | y) }, or},
+ {"x|(y|x)", func(x, y int8) int8 { return x | (y | x) }, or},
+ {"(x|y)|x", func(x, y int8) int8 { return (x | y) | x }, or},
+ {"(y|x)|x", func(x, y int8) int8 { return (y | x) | x }, or},
+ {"x&(x&y)", func(x, y int8) int8 { return x & (x & y) }, and},
+ {"x&(y&x)", func(x, y int8) int8 { return x & (y & x) }, and},
+ {"(x&y)&x", func(x, y int8) int8 { return (x & y) & x }, and},
+ {"(y&x)&x", func(x, y int8) int8 { return (y & x) & x }, and},
+ {"x^(x^y)", func(x, y int8) int8 { return x ^ (x ^ y) }, y},
+ {"x^(y^x)", func(x, y int8) int8 { return x ^ (y ^ x) }, y},
+ {"(x^y)^x", func(x, y int8) int8 { return (x ^ y) ^ x }, y},
+ {"(y^x)^x", func(x, y int8) int8 { return (y ^ x) ^ x }, y},
+ {"-(y-x)", func(x, y int8) int8 { return -(y - x) }, func(x, y int8) int8 { return x - y }},
+ {"(x+y)-x", func(x, y int8) int8 { return (x + y) - x }, y},
+ {"(y+x)-x", func(x, y int8) int8 { return (y + x) - x }, y},
+ } {
+ for _, v := range values {
+ for _, w := range values {
+ got := test.f(v, w)
+ want := test.golden(v, w)
+ if want != got {
+ t.Errorf("[%s](%d,%d)=%d, want %d", test.name, v, w, got, want)
+ }
+ }
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/test/mulconst_test.go b/src/cmd/compile/internal/test/mulconst_test.go
new file mode 100644
index 0000000..314cab3
--- /dev/null
+++ b/src/cmd/compile/internal/test/mulconst_test.go
@@ -0,0 +1,242 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package test
+
+import "testing"
+
+// Benchmark multiplication of an integer by various constants.
+//
+// The comment above each sub-benchmark provides an example of how the
+// target multiplication operation might be implemented using shift
+// (multiplication by a power of 2), addition and subtraction
+// operations. It is platform-dependent whether these transformations
+// are actually applied.
+
+var (
+ mulSinkI32 int32
+ mulSinkI64 int64
+ mulSinkU32 uint32
+ mulSinkU64 uint64
+)
+
+func BenchmarkMulconstI32(b *testing.B) {
+ // 3x = 2x + x
+ b.Run("3", func(b *testing.B) {
+ x := int32(1)
+ for i := 0; i < b.N; i++ {
+ x *= 3
+ }
+ mulSinkI32 = x
+ })
+ // 5x = 4x + x
+ b.Run("5", func(b *testing.B) {
+ x := int32(1)
+ for i := 0; i < b.N; i++ {
+ x *= 5
+ }
+ mulSinkI32 = x
+ })
+ // 12x = 8x + 4x
+ b.Run("12", func(b *testing.B) {
+ x := int32(1)
+ for i := 0; i < b.N; i++ {
+ x *= 12
+ }
+ mulSinkI32 = x
+ })
+ // 120x = 128x - 8x
+ b.Run("120", func(b *testing.B) {
+ x := int32(1)
+ for i := 0; i < b.N; i++ {
+ x *= 120
+ }
+ mulSinkI32 = x
+ })
+ // -120x = 8x - 120x
+ b.Run("-120", func(b *testing.B) {
+ x := int32(1)
+ for i := 0; i < b.N; i++ {
+ x *= -120
+ }
+ mulSinkI32 = x
+ })
+ // 65537x = 65536x + x
+ b.Run("65537", func(b *testing.B) {
+ x := int32(1)
+ for i := 0; i < b.N; i++ {
+ x *= 65537
+ }
+ mulSinkI32 = x
+ })
+ // 65538x = 65536x + 2x
+ b.Run("65538", func(b *testing.B) {
+ x := int32(1)
+ for i := 0; i < b.N; i++ {
+ x *= 65538
+ }
+ mulSinkI32 = x
+ })
+}
+
+func BenchmarkMulconstI64(b *testing.B) {
+ // 3x = 2x + x
+ b.Run("3", func(b *testing.B) {
+ x := int64(1)
+ for i := 0; i < b.N; i++ {
+ x *= 3
+ }
+ mulSinkI64 = x
+ })
+ // 5x = 4x + x
+ b.Run("5", func(b *testing.B) {
+ x := int64(1)
+ for i := 0; i < b.N; i++ {
+ x *= 5
+ }
+ mulSinkI64 = x
+ })
+ // 12x = 8x + 4x
+ b.Run("12", func(b *testing.B) {
+ x := int64(1)
+ for i := 0; i < b.N; i++ {
+ x *= 12
+ }
+ mulSinkI64 = x
+ })
+ // 120x = 128x - 8x
+ b.Run("120", func(b *testing.B) {
+ x := int64(1)
+ for i := 0; i < b.N; i++ {
+ x *= 120
+ }
+ mulSinkI64 = x
+ })
+ // -120x = 8x - 120x
+ b.Run("-120", func(b *testing.B) {
+ x := int64(1)
+ for i := 0; i < b.N; i++ {
+ x *= -120
+ }
+ mulSinkI64 = x
+ })
+ // 65537x = 65536x + x
+ b.Run("65537", func(b *testing.B) {
+ x := int64(1)
+ for i := 0; i < b.N; i++ {
+ x *= 65537
+ }
+ mulSinkI64 = x
+ })
+ // 65538x = 65536x + 2x
+ b.Run("65538", func(b *testing.B) {
+ x := int64(1)
+ for i := 0; i < b.N; i++ {
+ x *= 65538
+ }
+ mulSinkI64 = x
+ })
+}
+
+func BenchmarkMulconstU32(b *testing.B) {
+ // 3x = 2x + x
+ b.Run("3", func(b *testing.B) {
+ x := uint32(1)
+ for i := 0; i < b.N; i++ {
+ x *= 3
+ }
+ mulSinkU32 = x
+ })
+ // 5x = 4x + x
+ b.Run("5", func(b *testing.B) {
+ x := uint32(1)
+ for i := 0; i < b.N; i++ {
+ x *= 5
+ }
+ mulSinkU32 = x
+ })
+ // 12x = 8x + 4x
+ b.Run("12", func(b *testing.B) {
+ x := uint32(1)
+ for i := 0; i < b.N; i++ {
+ x *= 12
+ }
+ mulSinkU32 = x
+ })
+ // 120x = 128x - 8x
+ b.Run("120", func(b *testing.B) {
+ x := uint32(1)
+ for i := 0; i < b.N; i++ {
+ x *= 120
+ }
+ mulSinkU32 = x
+ })
+ // 65537x = 65536x + x
+ b.Run("65537", func(b *testing.B) {
+ x := uint32(1)
+ for i := 0; i < b.N; i++ {
+ x *= 65537
+ }
+ mulSinkU32 = x
+ })
+ // 65538x = 65536x + 2x
+ b.Run("65538", func(b *testing.B) {
+ x := uint32(1)
+ for i := 0; i < b.N; i++ {
+ x *= 65538
+ }
+ mulSinkU32 = x
+ })
+}
+
+func BenchmarkMulconstU64(b *testing.B) {
+ // 3x = 2x + x
+ b.Run("3", func(b *testing.B) {
+ x := uint64(1)
+ for i := 0; i < b.N; i++ {
+ x *= 3
+ }
+ mulSinkU64 = x
+ })
+ // 5x = 4x + x
+ b.Run("5", func(b *testing.B) {
+ x := uint64(1)
+ for i := 0; i < b.N; i++ {
+ x *= 5
+ }
+ mulSinkU64 = x
+ })
+ // 12x = 8x + 4x
+ b.Run("12", func(b *testing.B) {
+ x := uint64(1)
+ for i := 0; i < b.N; i++ {
+ x *= 12
+ }
+ mulSinkU64 = x
+ })
+ // 120x = 128x - 8x
+ b.Run("120", func(b *testing.B) {
+ x := uint64(1)
+ for i := 0; i < b.N; i++ {
+ x *= 120
+ }
+ mulSinkU64 = x
+ })
+ // 65537x = 65536x + x
+ b.Run("65537", func(b *testing.B) {
+ x := uint64(1)
+ for i := 0; i < b.N; i++ {
+ x *= 65537
+ }
+ mulSinkU64 = x
+ })
+ // 65538x = 65536x + 2x
+ b.Run("65538", func(b *testing.B) {
+ x := uint64(1)
+ for i := 0; i < b.N; i++ {
+ x *= 65538
+ }
+ mulSinkU64 = x
+ })
+}
diff --git a/src/cmd/compile/internal/test/race.go b/src/cmd/compile/internal/test/race.go
new file mode 100644
index 0000000..4cc4d53
--- /dev/null
+++ b/src/cmd/compile/internal/test/race.go
@@ -0,0 +1,65 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build !compiler_bootstrap
+// +build !compiler_bootstrap
+
+package test
+
+// The racecompile builder only builds packages, but does not build
+// or run tests. This is a non-test file to hold cases that (used
+// to) trigger compiler data races, so they will be exercised on
+// the racecompile builder.
+//
+// This package is not imported so functions here are not included
+// in the actual compiler.
+
+// Issue 55357: data race when building multiple instantiations of
+// generic closures with _ parameters.
+func Issue55357() {
+ type U struct {
+ A int
+ B string
+ C string
+ }
+ var q T55357[U]
+ q.Count()
+ q.List()
+
+ type M struct {
+ A int64
+ B uint32
+ C uint32
+ }
+ var q2 T55357[M]
+ q2.Count()
+ q2.List()
+}
+
+type T55357[T any] struct{}
+
+//go:noinline
+func (q *T55357[T]) do(w, v bool, fn func(bk []byte, v T) error) error {
+ return nil
+}
+
+func (q *T55357[T]) Count() (n int, rerr error) {
+ err := q.do(false, false, func(kb []byte, _ T) error {
+ n++
+ return nil
+ })
+ return n, err
+}
+
+func (q *T55357[T]) List() (list []T, rerr error) {
+ var l []T
+ err := q.do(false, true, func(_ []byte, v T) error {
+ l = append(l, v)
+ return nil
+ })
+ if err != nil {
+ return nil, err
+ }
+ return l, nil
+}
diff --git a/src/cmd/compile/internal/test/reproduciblebuilds_test.go b/src/cmd/compile/internal/test/reproduciblebuilds_test.go
new file mode 100644
index 0000000..4d84f9c
--- /dev/null
+++ b/src/cmd/compile/internal/test/reproduciblebuilds_test.go
@@ -0,0 +1,112 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package test
+
+import (
+ "bytes"
+ "internal/testenv"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "testing"
+)
+
+func TestReproducibleBuilds(t *testing.T) {
+ tests := []string{
+ "issue20272.go",
+ "issue27013.go",
+ "issue30202.go",
+ }
+
+ testenv.MustHaveGoBuild(t)
+ iters := 10
+ if testing.Short() {
+ iters = 4
+ }
+ t.Parallel()
+ for _, test := range tests {
+ test := test
+ t.Run(test, func(t *testing.T) {
+ t.Parallel()
+ var want []byte
+ tmp, err := ioutil.TempFile("", "")
+ if err != nil {
+ t.Fatalf("temp file creation failed: %v", err)
+ }
+ defer os.Remove(tmp.Name())
+ defer tmp.Close()
+ for i := 0; i < iters; i++ {
+ // Note: use -c 2 to expose any nondeterminism which is the result
+ // of the runtime scheduler.
+ out, err := exec.Command(testenv.GoToolPath(t), "tool", "compile", "-c", "2", "-o", tmp.Name(), filepath.Join("testdata", "reproducible", test)).CombinedOutput()
+ if err != nil {
+ t.Fatalf("failed to compile: %v\n%s", err, out)
+ }
+ obj, err := ioutil.ReadFile(tmp.Name())
+ if err != nil {
+ t.Fatalf("failed to read object file: %v", err)
+ }
+ if i == 0 {
+ want = obj
+ } else {
+ if !bytes.Equal(want, obj) {
+ t.Fatalf("builds produced different output after %d iters (%d bytes vs %d bytes)", i, len(want), len(obj))
+ }
+ }
+ }
+ })
+ }
+}
+
+func TestIssue38068(t *testing.T) {
+ testenv.MustHaveGoBuild(t)
+ t.Parallel()
+
+ // Compile a small package with and without the concurrent
+ // backend, then check to make sure that the resulting archives
+ // are identical. Note: this uses "go tool compile" instead of
+ // "go build" since the latter will generate differnent build IDs
+ // if it sees different command line flags.
+ scenarios := []struct {
+ tag string
+ args string
+ libpath string
+ }{
+ {tag: "serial", args: "-c=1"},
+ {tag: "concurrent", args: "-c=2"}}
+
+ tmpdir, err := ioutil.TempDir("", "TestIssue38068")
+ if err != nil {
+ t.Fatal(err)
+ }
+ defer os.RemoveAll(tmpdir)
+
+ src := filepath.Join("testdata", "reproducible", "issue38068.go")
+ for i := range scenarios {
+ s := &scenarios[i]
+ s.libpath = filepath.Join(tmpdir, s.tag+".a")
+ // Note: use of "-p" required in order for DWARF to be generated.
+ cmd := exec.Command(testenv.GoToolPath(t), "tool", "compile", "-trimpath", "-p=issue38068", "-buildid=", s.args, "-o", s.libpath, src)
+ out, err := cmd.CombinedOutput()
+ if err != nil {
+ t.Fatalf("%v: %v:\n%s", cmd.Args, err, out)
+ }
+ }
+
+ readBytes := func(fn string) []byte {
+ payload, err := ioutil.ReadFile(fn)
+ if err != nil {
+ t.Fatalf("failed to read executable '%s': %v", fn, err)
+ }
+ return payload
+ }
+
+ b1 := readBytes(scenarios[0].libpath)
+ b2 := readBytes(scenarios[1].libpath)
+ if !bytes.Equal(b1, b2) {
+ t.Fatalf("concurrent and serial builds produced different output")
+ }
+}
diff --git a/src/cmd/compile/internal/test/shift_test.go b/src/cmd/compile/internal/test/shift_test.go
new file mode 100644
index 0000000..ea88f0a
--- /dev/null
+++ b/src/cmd/compile/internal/test/shift_test.go
@@ -0,0 +1,1031 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package test
+
+import (
+ "reflect"
+ "testing"
+)
+
+// Tests shifts of zero.
+
+//go:noinline
+func ofz64l64(n uint64) int64 {
+ var x int64
+ return x << n
+}
+
+//go:noinline
+func ofz64l32(n uint32) int64 {
+ var x int64
+ return x << n
+}
+
+//go:noinline
+func ofz64l16(n uint16) int64 {
+ var x int64
+ return x << n
+}
+
+//go:noinline
+func ofz64l8(n uint8) int64 {
+ var x int64
+ return x << n
+}
+
+//go:noinline
+func ofz64r64(n uint64) int64 {
+ var x int64
+ return x >> n
+}
+
+//go:noinline
+func ofz64r32(n uint32) int64 {
+ var x int64
+ return x >> n
+}
+
+//go:noinline
+func ofz64r16(n uint16) int64 {
+ var x int64
+ return x >> n
+}
+
+//go:noinline
+func ofz64r8(n uint8) int64 {
+ var x int64
+ return x >> n
+}
+
+//go:noinline
+func ofz64ur64(n uint64) uint64 {
+ var x uint64
+ return x >> n
+}
+
+//go:noinline
+func ofz64ur32(n uint32) uint64 {
+ var x uint64
+ return x >> n
+}
+
+//go:noinline
+func ofz64ur16(n uint16) uint64 {
+ var x uint64
+ return x >> n
+}
+
+//go:noinline
+func ofz64ur8(n uint8) uint64 {
+ var x uint64
+ return x >> n
+}
+
+//go:noinline
+func ofz32l64(n uint64) int32 {
+ var x int32
+ return x << n
+}
+
+//go:noinline
+func ofz32l32(n uint32) int32 {
+ var x int32
+ return x << n
+}
+
+//go:noinline
+func ofz32l16(n uint16) int32 {
+ var x int32
+ return x << n
+}
+
+//go:noinline
+func ofz32l8(n uint8) int32 {
+ var x int32
+ return x << n
+}
+
+//go:noinline
+func ofz32r64(n uint64) int32 {
+ var x int32
+ return x >> n
+}
+
+//go:noinline
+func ofz32r32(n uint32) int32 {
+ var x int32
+ return x >> n
+}
+
+//go:noinline
+func ofz32r16(n uint16) int32 {
+ var x int32
+ return x >> n
+}
+
+//go:noinline
+func ofz32r8(n uint8) int32 {
+ var x int32
+ return x >> n
+}
+
+//go:noinline
+func ofz32ur64(n uint64) uint32 {
+ var x uint32
+ return x >> n
+}
+
+//go:noinline
+func ofz32ur32(n uint32) uint32 {
+ var x uint32
+ return x >> n
+}
+
+//go:noinline
+func ofz32ur16(n uint16) uint32 {
+ var x uint32
+ return x >> n
+}
+
+//go:noinline
+func ofz32ur8(n uint8) uint32 {
+ var x uint32
+ return x >> n
+}
+
+//go:noinline
+func ofz16l64(n uint64) int16 {
+ var x int16
+ return x << n
+}
+
+//go:noinline
+func ofz16l32(n uint32) int16 {
+ var x int16
+ return x << n
+}
+
+//go:noinline
+func ofz16l16(n uint16) int16 {
+ var x int16
+ return x << n
+}
+
+//go:noinline
+func ofz16l8(n uint8) int16 {
+ var x int16
+ return x << n
+}
+
+//go:noinline
+func ofz16r64(n uint64) int16 {
+ var x int16
+ return x >> n
+}
+
+//go:noinline
+func ofz16r32(n uint32) int16 {
+ var x int16
+ return x >> n
+}
+
+//go:noinline
+func ofz16r16(n uint16) int16 {
+ var x int16
+ return x >> n
+}
+
+//go:noinline
+func ofz16r8(n uint8) int16 {
+ var x int16
+ return x >> n
+}
+
+//go:noinline
+func ofz16ur64(n uint64) uint16 {
+ var x uint16
+ return x >> n
+}
+
+//go:noinline
+func ofz16ur32(n uint32) uint16 {
+ var x uint16
+ return x >> n
+}
+
+//go:noinline
+func ofz16ur16(n uint16) uint16 {
+ var x uint16
+ return x >> n
+}
+
+//go:noinline
+func ofz16ur8(n uint8) uint16 {
+ var x uint16
+ return x >> n
+}
+
+//go:noinline
+func ofz8l64(n uint64) int8 {
+ var x int8
+ return x << n
+}
+
+//go:noinline
+func ofz8l32(n uint32) int8 {
+ var x int8
+ return x << n
+}
+
+//go:noinline
+func ofz8l16(n uint16) int8 {
+ var x int8
+ return x << n
+}
+
+//go:noinline
+func ofz8l8(n uint8) int8 {
+ var x int8
+ return x << n
+}
+
+//go:noinline
+func ofz8r64(n uint64) int8 {
+ var x int8
+ return x >> n
+}
+
+//go:noinline
+func ofz8r32(n uint32) int8 {
+ var x int8
+ return x >> n
+}
+
+//go:noinline
+func ofz8r16(n uint16) int8 {
+ var x int8
+ return x >> n
+}
+
+//go:noinline
+func ofz8r8(n uint8) int8 {
+ var x int8
+ return x >> n
+}
+
+//go:noinline
+func ofz8ur64(n uint64) uint8 {
+ var x uint8
+ return x >> n
+}
+
+//go:noinline
+func ofz8ur32(n uint32) uint8 {
+ var x uint8
+ return x >> n
+}
+
+//go:noinline
+func ofz8ur16(n uint16) uint8 {
+ var x uint8
+ return x >> n
+}
+
+//go:noinline
+func ofz8ur8(n uint8) uint8 {
+ var x uint8
+ return x >> n
+}
+
+func TestShiftOfZero(t *testing.T) {
+ if got := ofz64l64(5); got != 0 {
+ t.Errorf("0<<5 == %d, want 0", got)
+ }
+ if got := ofz64l32(5); got != 0 {
+ t.Errorf("0<<5 == %d, want 0", got)
+ }
+ if got := ofz64l16(5); got != 0 {
+ t.Errorf("0<<5 == %d, want 0", got)
+ }
+ if got := ofz64l8(5); got != 0 {
+ t.Errorf("0<<5 == %d, want 0", got)
+ }
+ if got := ofz64r64(5); got != 0 {
+ t.Errorf("0>>5 == %d, want 0", got)
+ }
+ if got := ofz64r32(5); got != 0 {
+ t.Errorf("0>>5 == %d, want 0", got)
+ }
+ if got := ofz64r16(5); got != 0 {
+ t.Errorf("0>>5 == %d, want 0", got)
+ }
+ if got := ofz64r8(5); got != 0 {
+ t.Errorf("0>>5 == %d, want 0", got)
+ }
+ if got := ofz64ur64(5); got != 0 {
+ t.Errorf("0>>>5 == %d, want 0", got)
+ }
+ if got := ofz64ur32(5); got != 0 {
+ t.Errorf("0>>>5 == %d, want 0", got)
+ }
+ if got := ofz64ur16(5); got != 0 {
+ t.Errorf("0>>>5 == %d, want 0", got)
+ }
+ if got := ofz64ur8(5); got != 0 {
+ t.Errorf("0>>>5 == %d, want 0", got)
+ }
+
+ if got := ofz32l64(5); got != 0 {
+ t.Errorf("0<<5 == %d, want 0", got)
+ }
+ if got := ofz32l32(5); got != 0 {
+ t.Errorf("0<<5 == %d, want 0", got)
+ }
+ if got := ofz32l16(5); got != 0 {
+ t.Errorf("0<<5 == %d, want 0", got)
+ }
+ if got := ofz32l8(5); got != 0 {
+ t.Errorf("0<<5 == %d, want 0", got)
+ }
+ if got := ofz32r64(5); got != 0 {
+ t.Errorf("0>>5 == %d, want 0", got)
+ }
+ if got := ofz32r32(5); got != 0 {
+ t.Errorf("0>>5 == %d, want 0", got)
+ }
+ if got := ofz32r16(5); got != 0 {
+ t.Errorf("0>>5 == %d, want 0", got)
+ }
+ if got := ofz32r8(5); got != 0 {
+ t.Errorf("0>>5 == %d, want 0", got)
+ }
+ if got := ofz32ur64(5); got != 0 {
+ t.Errorf("0>>>5 == %d, want 0", got)
+ }
+ if got := ofz32ur32(5); got != 0 {
+ t.Errorf("0>>>5 == %d, want 0", got)
+ }
+ if got := ofz32ur16(5); got != 0 {
+ t.Errorf("0>>>5 == %d, want 0", got)
+ }
+ if got := ofz32ur8(5); got != 0 {
+ t.Errorf("0>>>5 == %d, want 0", got)
+ }
+
+ if got := ofz16l64(5); got != 0 {
+ t.Errorf("0<<5 == %d, want 0", got)
+ }
+ if got := ofz16l32(5); got != 0 {
+ t.Errorf("0<<5 == %d, want 0", got)
+ }
+ if got := ofz16l16(5); got != 0 {
+ t.Errorf("0<<5 == %d, want 0", got)
+ }
+ if got := ofz16l8(5); got != 0 {
+ t.Errorf("0<<5 == %d, want 0", got)
+ }
+ if got := ofz16r64(5); got != 0 {
+ t.Errorf("0>>5 == %d, want 0", got)
+ }
+ if got := ofz16r32(5); got != 0 {
+ t.Errorf("0>>5 == %d, want 0", got)
+ }
+ if got := ofz16r16(5); got != 0 {
+ t.Errorf("0>>5 == %d, want 0", got)
+ }
+ if got := ofz16r8(5); got != 0 {
+ t.Errorf("0>>5 == %d, want 0", got)
+ }
+ if got := ofz16ur64(5); got != 0 {
+ t.Errorf("0>>>5 == %d, want 0", got)
+ }
+ if got := ofz16ur32(5); got != 0 {
+ t.Errorf("0>>>5 == %d, want 0", got)
+ }
+ if got := ofz16ur16(5); got != 0 {
+ t.Errorf("0>>>5 == %d, want 0", got)
+ }
+ if got := ofz16ur8(5); got != 0 {
+ t.Errorf("0>>>5 == %d, want 0", got)
+ }
+
+ if got := ofz8l64(5); got != 0 {
+ t.Errorf("0<<5 == %d, want 0", got)
+ }
+ if got := ofz8l32(5); got != 0 {
+ t.Errorf("0<<5 == %d, want 0", got)
+ }
+ if got := ofz8l16(5); got != 0 {
+ t.Errorf("0<<5 == %d, want 0", got)
+ }
+ if got := ofz8l8(5); got != 0 {
+ t.Errorf("0<<5 == %d, want 0", got)
+ }
+ if got := ofz8r64(5); got != 0 {
+ t.Errorf("0>>5 == %d, want 0", got)
+ }
+ if got := ofz8r32(5); got != 0 {
+ t.Errorf("0>>5 == %d, want 0", got)
+ }
+ if got := ofz8r16(5); got != 0 {
+ t.Errorf("0>>5 == %d, want 0", got)
+ }
+ if got := ofz8r8(5); got != 0 {
+ t.Errorf("0>>5 == %d, want 0", got)
+ }
+ if got := ofz8ur64(5); got != 0 {
+ t.Errorf("0>>>5 == %d, want 0", got)
+ }
+ if got := ofz8ur32(5); got != 0 {
+ t.Errorf("0>>>5 == %d, want 0", got)
+ }
+ if got := ofz8ur16(5); got != 0 {
+ t.Errorf("0>>>5 == %d, want 0", got)
+ }
+ if got := ofz8ur8(5); got != 0 {
+ t.Errorf("0>>>5 == %d, want 0", got)
+ }
+}
+
+//go:noinline
+func byz64l(n int64) int64 {
+ return n << 0
+}
+
+//go:noinline
+func byz64r(n int64) int64 {
+ return n >> 0
+}
+
+//go:noinline
+func byz64ur(n uint64) uint64 {
+ return n >> 0
+}
+
+//go:noinline
+func byz32l(n int32) int32 {
+ return n << 0
+}
+
+//go:noinline
+func byz32r(n int32) int32 {
+ return n >> 0
+}
+
+//go:noinline
+func byz32ur(n uint32) uint32 {
+ return n >> 0
+}
+
+//go:noinline
+func byz16l(n int16) int16 {
+ return n << 0
+}
+
+//go:noinline
+func byz16r(n int16) int16 {
+ return n >> 0
+}
+
+//go:noinline
+func byz16ur(n uint16) uint16 {
+ return n >> 0
+}
+
+//go:noinline
+func byz8l(n int8) int8 {
+ return n << 0
+}
+
+//go:noinline
+func byz8r(n int8) int8 {
+ return n >> 0
+}
+
+//go:noinline
+func byz8ur(n uint8) uint8 {
+ return n >> 0
+}
+
+func TestShiftByZero(t *testing.T) {
+ {
+ var n int64 = 0x5555555555555555
+ if got := byz64l(n); got != n {
+ t.Errorf("%x<<0 == %x, want %x", n, got, n)
+ }
+ if got := byz64r(n); got != n {
+ t.Errorf("%x>>0 == %x, want %x", n, got, n)
+ }
+ }
+ {
+ var n uint64 = 0xaaaaaaaaaaaaaaaa
+ if got := byz64ur(n); got != n {
+ t.Errorf("%x>>>0 == %x, want %x", n, got, n)
+ }
+ }
+
+ {
+ var n int32 = 0x55555555
+ if got := byz32l(n); got != n {
+ t.Errorf("%x<<0 == %x, want %x", n, got, n)
+ }
+ if got := byz32r(n); got != n {
+ t.Errorf("%x>>0 == %x, want %x", n, got, n)
+ }
+ }
+ {
+ var n uint32 = 0xaaaaaaaa
+ if got := byz32ur(n); got != n {
+ t.Errorf("%x>>>0 == %x, want %x", n, got, n)
+ }
+ }
+
+ {
+ var n int16 = 0x5555
+ if got := byz16l(n); got != n {
+ t.Errorf("%x<<0 == %x, want %x", n, got, n)
+ }
+ if got := byz16r(n); got != n {
+ t.Errorf("%x>>0 == %x, want %x", n, got, n)
+ }
+ }
+ {
+ var n uint16 = 0xaaaa
+ if got := byz16ur(n); got != n {
+ t.Errorf("%x>>>0 == %x, want %x", n, got, n)
+ }
+ }
+
+ {
+ var n int8 = 0x55
+ if got := byz8l(n); got != n {
+ t.Errorf("%x<<0 == %x, want %x", n, got, n)
+ }
+ if got := byz8r(n); got != n {
+ t.Errorf("%x>>0 == %x, want %x", n, got, n)
+ }
+ }
+ {
+ var n uint8 = 0x55
+ if got := byz8ur(n); got != n {
+ t.Errorf("%x>>>0 == %x, want %x", n, got, n)
+ }
+ }
+}
+
+//go:noinline
+func two64l(x int64) int64 {
+ return x << 1 << 1
+}
+
+//go:noinline
+func two64r(x int64) int64 {
+ return x >> 1 >> 1
+}
+
+//go:noinline
+func two64ur(x uint64) uint64 {
+ return x >> 1 >> 1
+}
+
+//go:noinline
+func two32l(x int32) int32 {
+ return x << 1 << 1
+}
+
+//go:noinline
+func two32r(x int32) int32 {
+ return x >> 1 >> 1
+}
+
+//go:noinline
+func two32ur(x uint32) uint32 {
+ return x >> 1 >> 1
+}
+
+//go:noinline
+func two16l(x int16) int16 {
+ return x << 1 << 1
+}
+
+//go:noinline
+func two16r(x int16) int16 {
+ return x >> 1 >> 1
+}
+
+//go:noinline
+func two16ur(x uint16) uint16 {
+ return x >> 1 >> 1
+}
+
+//go:noinline
+func two8l(x int8) int8 {
+ return x << 1 << 1
+}
+
+//go:noinline
+func two8r(x int8) int8 {
+ return x >> 1 >> 1
+}
+
+//go:noinline
+func two8ur(x uint8) uint8 {
+ return x >> 1 >> 1
+}
+
+func TestShiftCombine(t *testing.T) {
+ if got, want := two64l(4), int64(16); want != got {
+ t.Errorf("4<<1<<1 == %d, want %d", got, want)
+ }
+ if got, want := two64r(64), int64(16); want != got {
+ t.Errorf("64>>1>>1 == %d, want %d", got, want)
+ }
+ if got, want := two64ur(64), uint64(16); want != got {
+ t.Errorf("64>>1>>1 == %d, want %d", got, want)
+ }
+ if got, want := two32l(4), int32(16); want != got {
+ t.Errorf("4<<1<<1 == %d, want %d", got, want)
+ }
+ if got, want := two32r(64), int32(16); want != got {
+ t.Errorf("64>>1>>1 == %d, want %d", got, want)
+ }
+ if got, want := two32ur(64), uint32(16); want != got {
+ t.Errorf("64>>1>>1 == %d, want %d", got, want)
+ }
+ if got, want := two16l(4), int16(16); want != got {
+ t.Errorf("4<<1<<1 == %d, want %d", got, want)
+ }
+ if got, want := two16r(64), int16(16); want != got {
+ t.Errorf("64>>1>>1 == %d, want %d", got, want)
+ }
+ if got, want := two16ur(64), uint16(16); want != got {
+ t.Errorf("64>>1>>1 == %d, want %d", got, want)
+ }
+ if got, want := two8l(4), int8(16); want != got {
+ t.Errorf("4<<1<<1 == %d, want %d", got, want)
+ }
+ if got, want := two8r(64), int8(16); want != got {
+ t.Errorf("64>>1>>1 == %d, want %d", got, want)
+ }
+ if got, want := two8ur(64), uint8(16); want != got {
+ t.Errorf("64>>1>>1 == %d, want %d", got, want)
+ }
+
+}
+
+//go:noinline
+func three64l(x int64) int64 {
+ return x << 3 >> 1 << 2
+}
+
+//go:noinline
+func three64ul(x uint64) uint64 {
+ return x << 3 >> 1 << 2
+}
+
+//go:noinline
+func three64r(x int64) int64 {
+ return x >> 3 << 1 >> 2
+}
+
+//go:noinline
+func three64ur(x uint64) uint64 {
+ return x >> 3 << 1 >> 2
+}
+
+//go:noinline
+func three32l(x int32) int32 {
+ return x << 3 >> 1 << 2
+}
+
+//go:noinline
+func three32ul(x uint32) uint32 {
+ return x << 3 >> 1 << 2
+}
+
+//go:noinline
+func three32r(x int32) int32 {
+ return x >> 3 << 1 >> 2
+}
+
+//go:noinline
+func three32ur(x uint32) uint32 {
+ return x >> 3 << 1 >> 2
+}
+
+//go:noinline
+func three16l(x int16) int16 {
+ return x << 3 >> 1 << 2
+}
+
+//go:noinline
+func three16ul(x uint16) uint16 {
+ return x << 3 >> 1 << 2
+}
+
+//go:noinline
+func three16r(x int16) int16 {
+ return x >> 3 << 1 >> 2
+}
+
+//go:noinline
+func three16ur(x uint16) uint16 {
+ return x >> 3 << 1 >> 2
+}
+
+//go:noinline
+func three8l(x int8) int8 {
+ return x << 3 >> 1 << 2
+}
+
+//go:noinline
+func three8ul(x uint8) uint8 {
+ return x << 3 >> 1 << 2
+}
+
+//go:noinline
+func three8r(x int8) int8 {
+ return x >> 3 << 1 >> 2
+}
+
+//go:noinline
+func three8ur(x uint8) uint8 {
+ return x >> 3 << 1 >> 2
+}
+
+func TestShiftCombine3(t *testing.T) {
+ if got, want := three64l(4), int64(64); want != got {
+ t.Errorf("4<<1<<1 == %d, want %d", got, want)
+ }
+ if got, want := three64ul(4), uint64(64); want != got {
+ t.Errorf("4<<1<<1 == %d, want %d", got, want)
+ }
+ if got, want := three64r(64), int64(4); want != got {
+ t.Errorf("64>>1>>1 == %d, want %d", got, want)
+ }
+ if got, want := three64ur(64), uint64(4); want != got {
+ t.Errorf("64>>1>>1 == %d, want %d", got, want)
+ }
+ if got, want := three32l(4), int32(64); want != got {
+ t.Errorf("4<<1<<1 == %d, want %d", got, want)
+ }
+ if got, want := three32ul(4), uint32(64); want != got {
+ t.Errorf("4<<1<<1 == %d, want %d", got, want)
+ }
+ if got, want := three32r(64), int32(4); want != got {
+ t.Errorf("64>>1>>1 == %d, want %d", got, want)
+ }
+ if got, want := three32ur(64), uint32(4); want != got {
+ t.Errorf("64>>1>>1 == %d, want %d", got, want)
+ }
+ if got, want := three16l(4), int16(64); want != got {
+ t.Errorf("4<<1<<1 == %d, want %d", got, want)
+ }
+ if got, want := three16ul(4), uint16(64); want != got {
+ t.Errorf("4<<1<<1 == %d, want %d", got, want)
+ }
+ if got, want := three16r(64), int16(4); want != got {
+ t.Errorf("64>>1>>1 == %d, want %d", got, want)
+ }
+ if got, want := three16ur(64), uint16(4); want != got {
+ t.Errorf("64>>1>>1 == %d, want %d", got, want)
+ }
+ if got, want := three8l(4), int8(64); want != got {
+ t.Errorf("4<<1<<1 == %d, want %d", got, want)
+ }
+ if got, want := three8ul(4), uint8(64); want != got {
+ t.Errorf("4<<1<<1 == %d, want %d", got, want)
+ }
+ if got, want := three8r(64), int8(4); want != got {
+ t.Errorf("64>>1>>1 == %d, want %d", got, want)
+ }
+ if got, want := three8ur(64), uint8(4); want != got {
+ t.Errorf("64>>1>>1 == %d, want %d", got, want)
+ }
+}
+
+var (
+ one64 int64 = 1
+ one64u uint64 = 1
+ one32 int32 = 1
+ one32u uint32 = 1
+ one16 int16 = 1
+ one16u uint16 = 1
+ one8 int8 = 1
+ one8u uint8 = 1
+)
+
+func TestShiftLargeCombine(t *testing.T) {
+ var N uint64 = 0x8000000000000000
+ if one64<<N<<N == 1 {
+ t.Errorf("shift overflow mishandled")
+ }
+ if one64>>N>>N == 1 {
+ t.Errorf("shift overflow mishandled")
+ }
+ if one64u>>N>>N == 1 {
+ t.Errorf("shift overflow mishandled")
+ }
+ if one32<<N<<N == 1 {
+ t.Errorf("shift overflow mishandled")
+ }
+ if one32>>N>>N == 1 {
+ t.Errorf("shift overflow mishandled")
+ }
+ if one32u>>N>>N == 1 {
+ t.Errorf("shift overflow mishandled")
+ }
+ if one16<<N<<N == 1 {
+ t.Errorf("shift overflow mishandled")
+ }
+ if one16>>N>>N == 1 {
+ t.Errorf("shift overflow mishandled")
+ }
+ if one16u>>N>>N == 1 {
+ t.Errorf("shift overflow mishandled")
+ }
+ if one8<<N<<N == 1 {
+ t.Errorf("shift overflow mishandled")
+ }
+ if one8>>N>>N == 1 {
+ t.Errorf("shift overflow mishandled")
+ }
+ if one8u>>N>>N == 1 {
+ t.Errorf("shift overflow mishandled")
+ }
+}
+
+func TestShiftLargeCombine3(t *testing.T) {
+ var N uint64 = 0x8000000000000001
+ if one64<<N>>2<<N == 1 {
+ t.Errorf("shift overflow mishandled")
+ }
+ if one64u<<N>>2<<N == 1 {
+ t.Errorf("shift overflow mishandled")
+ }
+ if one64>>N<<2>>N == 1 {
+ t.Errorf("shift overflow mishandled")
+ }
+ if one64u>>N<<2>>N == 1 {
+ t.Errorf("shift overflow mishandled")
+ }
+ if one32<<N>>2<<N == 1 {
+ t.Errorf("shift overflow mishandled")
+ }
+ if one32u<<N>>2<<N == 1 {
+ t.Errorf("shift overflow mishandled")
+ }
+ if one32>>N<<2>>N == 1 {
+ t.Errorf("shift overflow mishandled")
+ }
+ if one32u>>N<<2>>N == 1 {
+ t.Errorf("shift overflow mishandled")
+ }
+ if one16<<N>>2<<N == 1 {
+ t.Errorf("shift overflow mishandled")
+ }
+ if one16u<<N>>2<<N == 1 {
+ t.Errorf("shift overflow mishandled")
+ }
+ if one16>>N<<2>>N == 1 {
+ t.Errorf("shift overflow mishandled")
+ }
+ if one16u>>N<<2>>N == 1 {
+ t.Errorf("shift overflow mishandled")
+ }
+ if one8<<N>>2<<N == 1 {
+ t.Errorf("shift overflow mishandled")
+ }
+ if one8u<<N>>2<<N == 1 {
+ t.Errorf("shift overflow mishandled")
+ }
+ if one8>>N<<2>>N == 1 {
+ t.Errorf("shift overflow mishandled")
+ }
+ if one8u>>N<<2>>N == 1 {
+ t.Errorf("shift overflow mishandled")
+ }
+}
+
+func TestShiftGeneric(t *testing.T) {
+ for _, test := range [...]struct {
+ valueWidth int
+ signed bool
+ shiftWidth int
+ left bool
+ f interface{}
+ }{
+ {64, true, 64, true, func(n int64, s uint64) int64 { return n << s }},
+ {64, true, 64, false, func(n int64, s uint64) int64 { return n >> s }},
+ {64, false, 64, false, func(n uint64, s uint64) uint64 { return n >> s }},
+ {64, true, 32, true, func(n int64, s uint32) int64 { return n << s }},
+ {64, true, 32, false, func(n int64, s uint32) int64 { return n >> s }},
+ {64, false, 32, false, func(n uint64, s uint32) uint64 { return n >> s }},
+ {64, true, 16, true, func(n int64, s uint16) int64 { return n << s }},
+ {64, true, 16, false, func(n int64, s uint16) int64 { return n >> s }},
+ {64, false, 16, false, func(n uint64, s uint16) uint64 { return n >> s }},
+ {64, true, 8, true, func(n int64, s uint8) int64 { return n << s }},
+ {64, true, 8, false, func(n int64, s uint8) int64 { return n >> s }},
+ {64, false, 8, false, func(n uint64, s uint8) uint64 { return n >> s }},
+
+ {32, true, 64, true, func(n int32, s uint64) int32 { return n << s }},
+ {32, true, 64, false, func(n int32, s uint64) int32 { return n >> s }},
+ {32, false, 64, false, func(n uint32, s uint64) uint32 { return n >> s }},
+ {32, true, 32, true, func(n int32, s uint32) int32 { return n << s }},
+ {32, true, 32, false, func(n int32, s uint32) int32 { return n >> s }},
+ {32, false, 32, false, func(n uint32, s uint32) uint32 { return n >> s }},
+ {32, true, 16, true, func(n int32, s uint16) int32 { return n << s }},
+ {32, true, 16, false, func(n int32, s uint16) int32 { return n >> s }},
+ {32, false, 16, false, func(n uint32, s uint16) uint32 { return n >> s }},
+ {32, true, 8, true, func(n int32, s uint8) int32 { return n << s }},
+ {32, true, 8, false, func(n int32, s uint8) int32 { return n >> s }},
+ {32, false, 8, false, func(n uint32, s uint8) uint32 { return n >> s }},
+
+ {16, true, 64, true, func(n int16, s uint64) int16 { return n << s }},
+ {16, true, 64, false, func(n int16, s uint64) int16 { return n >> s }},
+ {16, false, 64, false, func(n uint16, s uint64) uint16 { return n >> s }},
+ {16, true, 32, true, func(n int16, s uint32) int16 { return n << s }},
+ {16, true, 32, false, func(n int16, s uint32) int16 { return n >> s }},
+ {16, false, 32, false, func(n uint16, s uint32) uint16 { return n >> s }},
+ {16, true, 16, true, func(n int16, s uint16) int16 { return n << s }},
+ {16, true, 16, false, func(n int16, s uint16) int16 { return n >> s }},
+ {16, false, 16, false, func(n uint16, s uint16) uint16 { return n >> s }},
+ {16, true, 8, true, func(n int16, s uint8) int16 { return n << s }},
+ {16, true, 8, false, func(n int16, s uint8) int16 { return n >> s }},
+ {16, false, 8, false, func(n uint16, s uint8) uint16 { return n >> s }},
+
+ {8, true, 64, true, func(n int8, s uint64) int8 { return n << s }},
+ {8, true, 64, false, func(n int8, s uint64) int8 { return n >> s }},
+ {8, false, 64, false, func(n uint8, s uint64) uint8 { return n >> s }},
+ {8, true, 32, true, func(n int8, s uint32) int8 { return n << s }},
+ {8, true, 32, false, func(n int8, s uint32) int8 { return n >> s }},
+ {8, false, 32, false, func(n uint8, s uint32) uint8 { return n >> s }},
+ {8, true, 16, true, func(n int8, s uint16) int8 { return n << s }},
+ {8, true, 16, false, func(n int8, s uint16) int8 { return n >> s }},
+ {8, false, 16, false, func(n uint8, s uint16) uint8 { return n >> s }},
+ {8, true, 8, true, func(n int8, s uint8) int8 { return n << s }},
+ {8, true, 8, false, func(n int8, s uint8) int8 { return n >> s }},
+ {8, false, 8, false, func(n uint8, s uint8) uint8 { return n >> s }},
+ } {
+ fv := reflect.ValueOf(test.f)
+ var args [2]reflect.Value
+ for i := 0; i < test.valueWidth; i++ {
+ // Build value to be shifted.
+ var n int64 = 1
+ for j := 0; j < i; j++ {
+ n <<= 1
+ }
+ args[0] = reflect.ValueOf(n).Convert(fv.Type().In(0))
+ for s := 0; s <= test.shiftWidth; s++ {
+ args[1] = reflect.ValueOf(s).Convert(fv.Type().In(1))
+
+ // Compute desired result. We're testing variable shifts
+ // assuming constant shifts are correct.
+ r := n
+ var op string
+ switch {
+ case test.left:
+ op = "<<"
+ for j := 0; j < s; j++ {
+ r <<= 1
+ }
+ switch test.valueWidth {
+ case 32:
+ r = int64(int32(r))
+ case 16:
+ r = int64(int16(r))
+ case 8:
+ r = int64(int8(r))
+ }
+ case test.signed:
+ op = ">>"
+ switch test.valueWidth {
+ case 32:
+ r = int64(int32(r))
+ case 16:
+ r = int64(int16(r))
+ case 8:
+ r = int64(int8(r))
+ }
+ for j := 0; j < s; j++ {
+ r >>= 1
+ }
+ default:
+ op = ">>>"
+ for j := 0; j < s; j++ {
+ r = int64(uint64(r) >> 1)
+ }
+ }
+
+ // Call function.
+ res := fv.Call(args[:])[0].Convert(reflect.ValueOf(r).Type())
+
+ if res.Int() != r {
+ t.Errorf("%s%dx%d(%x,%x)=%x, want %x", op, test.valueWidth, test.shiftWidth, n, s, res.Int(), r)
+ }
+ }
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/test/ssa_test.go b/src/cmd/compile/internal/test/ssa_test.go
new file mode 100644
index 0000000..af7d962
--- /dev/null
+++ b/src/cmd/compile/internal/test/ssa_test.go
@@ -0,0 +1,191 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package test
+
+import (
+ "bytes"
+ "fmt"
+ "go/ast"
+ "go/parser"
+ "go/token"
+ "internal/testenv"
+ "io/ioutil"
+ "os"
+ "os/exec"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "testing"
+)
+
+// runGenTest runs a test-generator, then runs the generated test.
+// Generated test can either fail in compilation or execution.
+// The environment variable parameter(s) is passed to the run
+// of the generated test.
+func runGenTest(t *testing.T, filename, tmpname string, ev ...string) {
+ testenv.MustHaveGoRun(t)
+ gotool := testenv.GoToolPath(t)
+ var stdout, stderr bytes.Buffer
+ cmd := exec.Command(gotool, "run", filepath.Join("testdata", filename))
+ cmd.Stdout = &stdout
+ cmd.Stderr = &stderr
+ if err := cmd.Run(); err != nil {
+ t.Fatalf("Failed: %v:\nOut: %s\nStderr: %s\n", err, &stdout, &stderr)
+ }
+ // Write stdout into a temporary file
+ tmpdir, ok := ioutil.TempDir("", tmpname)
+ if ok != nil {
+ t.Fatalf("Failed to create temporary directory")
+ }
+ defer os.RemoveAll(tmpdir)
+
+ rungo := filepath.Join(tmpdir, "run.go")
+ ok = ioutil.WriteFile(rungo, stdout.Bytes(), 0600)
+ if ok != nil {
+ t.Fatalf("Failed to create temporary file " + rungo)
+ }
+
+ stdout.Reset()
+ stderr.Reset()
+ cmd = exec.Command(gotool, "run", "-gcflags=-d=ssa/check/on", rungo)
+ cmd.Stdout = &stdout
+ cmd.Stderr = &stderr
+ cmd.Env = append(cmd.Env, ev...)
+ err := cmd.Run()
+ if err != nil {
+ t.Fatalf("Failed: %v:\nOut: %s\nStderr: %s\n", err, &stdout, &stderr)
+ }
+ if s := stderr.String(); s != "" {
+ t.Errorf("Stderr = %s\nWant empty", s)
+ }
+ if s := stdout.String(); s != "" {
+ t.Errorf("Stdout = %s\nWant empty", s)
+ }
+}
+
+func TestGenFlowGraph(t *testing.T) {
+ if testing.Short() {
+ t.Skip("not run in short mode.")
+ }
+ runGenTest(t, "flowgraph_generator1.go", "ssa_fg_tmp1")
+}
+
+// TestCode runs all the tests in the testdata directory as subtests.
+// These tests are special because we want to run them with different
+// compiler flags set (and thus they can't just be _test.go files in
+// this directory).
+func TestCode(t *testing.T) {
+ testenv.MustHaveGoBuild(t)
+ gotool := testenv.GoToolPath(t)
+
+ // Make a temporary directory to work in.
+ tmpdir, err := ioutil.TempDir("", "TestCode")
+ if err != nil {
+ t.Fatalf("Failed to create temporary directory: %v", err)
+ }
+ defer os.RemoveAll(tmpdir)
+
+ // Find all the test functions (and the files containing them).
+ var srcs []string // files containing Test functions
+ type test struct {
+ name string // TestFoo
+ usesFloat bool // might use float operations
+ }
+ var tests []test
+ files, err := ioutil.ReadDir("testdata")
+ if err != nil {
+ t.Fatalf("can't read testdata directory: %v", err)
+ }
+ for _, f := range files {
+ if !strings.HasSuffix(f.Name(), "_test.go") {
+ continue
+ }
+ text, err := ioutil.ReadFile(filepath.Join("testdata", f.Name()))
+ if err != nil {
+ t.Fatalf("can't read testdata/%s: %v", f.Name(), err)
+ }
+ fset := token.NewFileSet()
+ code, err := parser.ParseFile(fset, f.Name(), text, 0)
+ if err != nil {
+ t.Fatalf("can't parse testdata/%s: %v", f.Name(), err)
+ }
+ srcs = append(srcs, filepath.Join("testdata", f.Name()))
+ foundTest := false
+ for _, d := range code.Decls {
+ fd, ok := d.(*ast.FuncDecl)
+ if !ok {
+ continue
+ }
+ if !strings.HasPrefix(fd.Name.Name, "Test") {
+ continue
+ }
+ if fd.Recv != nil {
+ continue
+ }
+ if fd.Type.Results != nil {
+ continue
+ }
+ if len(fd.Type.Params.List) != 1 {
+ continue
+ }
+ p := fd.Type.Params.List[0]
+ if len(p.Names) != 1 {
+ continue
+ }
+ s, ok := p.Type.(*ast.StarExpr)
+ if !ok {
+ continue
+ }
+ sel, ok := s.X.(*ast.SelectorExpr)
+ if !ok {
+ continue
+ }
+ base, ok := sel.X.(*ast.Ident)
+ if !ok {
+ continue
+ }
+ if base.Name != "testing" {
+ continue
+ }
+ if sel.Sel.Name != "T" {
+ continue
+ }
+ // Found a testing function.
+ tests = append(tests, test{name: fd.Name.Name, usesFloat: bytes.Contains(text, []byte("float"))})
+ foundTest = true
+ }
+ if !foundTest {
+ t.Fatalf("test file testdata/%s has no tests in it", f.Name())
+ }
+ }
+
+ flags := []string{""}
+ if runtime.GOARCH == "arm" || runtime.GOARCH == "mips" || runtime.GOARCH == "mips64" || runtime.GOARCH == "386" {
+ flags = append(flags, ",softfloat")
+ }
+ for _, flag := range flags {
+ args := []string{"test", "-c", "-gcflags=-d=ssa/check/on" + flag, "-o", filepath.Join(tmpdir, "code.test")}
+ args = append(args, srcs...)
+ out, err := exec.Command(gotool, args...).CombinedOutput()
+ if err != nil || len(out) != 0 {
+ t.Fatalf("Build failed: %v\n%s\n", err, out)
+ }
+
+ // Now we have a test binary. Run it with all the tests as subtests of this one.
+ for _, test := range tests {
+ test := test
+ if flag == ",softfloat" && !test.usesFloat {
+ // No point in running the soft float version if the test doesn't use floats.
+ continue
+ }
+ t.Run(fmt.Sprintf("%s%s", test.name[4:], flag), func(t *testing.T) {
+ out, err := exec.Command(filepath.Join(tmpdir, "code.test"), "-test.run="+test.name).CombinedOutput()
+ if err != nil || string(out) != "PASS\n" {
+ t.Errorf("Failed:\n%s\n", out)
+ }
+ })
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/test/test.go b/src/cmd/compile/internal/test/test.go
new file mode 100644
index 0000000..56e5404
--- /dev/null
+++ b/src/cmd/compile/internal/test/test.go
@@ -0,0 +1 @@
+package test
diff --git a/src/cmd/compile/internal/test/testdata/addressed_test.go b/src/cmd/compile/internal/test/testdata/addressed_test.go
new file mode 100644
index 0000000..cdabf97
--- /dev/null
+++ b/src/cmd/compile/internal/test/testdata/addressed_test.go
@@ -0,0 +1,210 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "fmt"
+ "testing"
+)
+
+var output string
+
+func mypanic(t *testing.T, s string) {
+ t.Fatalf(s + "\n" + output)
+
+}
+
+func assertEqual(t *testing.T, x, y int) {
+ if x != y {
+ mypanic(t, fmt.Sprintf("assertEqual failed got %d, want %d", x, y))
+ }
+}
+
+func TestAddressed(t *testing.T) {
+ x := f1_ssa(2, 3)
+ output += fmt.Sprintln("*x is", *x)
+ output += fmt.Sprintln("Gratuitously use some stack")
+ output += fmt.Sprintln("*x is", *x)
+ assertEqual(t, *x, 9)
+
+ w := f3a_ssa(6)
+ output += fmt.Sprintln("*w is", *w)
+ output += fmt.Sprintln("Gratuitously use some stack")
+ output += fmt.Sprintln("*w is", *w)
+ assertEqual(t, *w, 6)
+
+ y := f3b_ssa(12)
+ output += fmt.Sprintln("*y.(*int) is", *y.(*int))
+ output += fmt.Sprintln("Gratuitously use some stack")
+ output += fmt.Sprintln("*y.(*int) is", *y.(*int))
+ assertEqual(t, *y.(*int), 12)
+
+ z := f3c_ssa(8)
+ output += fmt.Sprintln("*z.(*int) is", *z.(*int))
+ output += fmt.Sprintln("Gratuitously use some stack")
+ output += fmt.Sprintln("*z.(*int) is", *z.(*int))
+ assertEqual(t, *z.(*int), 8)
+
+ args(t)
+ test_autos(t)
+}
+
+//go:noinline
+func f1_ssa(x, y int) *int {
+ x = x*y + y
+ return &x
+}
+
+//go:noinline
+func f3a_ssa(x int) *int {
+ return &x
+}
+
+//go:noinline
+func f3b_ssa(x int) interface{} { // ./foo.go:15: internal error: f3b_ssa ~r1 (type interface {}) recorded as live on entry
+ return &x
+}
+
+//go:noinline
+func f3c_ssa(y int) interface{} {
+ x := y
+ return &x
+}
+
+type V struct {
+ p *V
+ w, x int64
+}
+
+func args(t *testing.T) {
+ v := V{p: nil, w: 1, x: 1}
+ a := V{p: &v, w: 2, x: 2}
+ b := V{p: &v, w: 0, x: 0}
+ i := v.args_ssa(a, b)
+ output += fmt.Sprintln("i=", i)
+ assertEqual(t, int(i), 2)
+}
+
+//go:noinline
+func (v V) args_ssa(a, b V) int64 {
+ if v.w == 0 {
+ return v.x
+ }
+ if v.w == 1 {
+ return a.x
+ }
+ if v.w == 2 {
+ return b.x
+ }
+ b.p.p = &a // v.p in caller = &a
+
+ return -1
+}
+
+func test_autos(t *testing.T) {
+ test(t, 11)
+ test(t, 12)
+ test(t, 13)
+ test(t, 21)
+ test(t, 22)
+ test(t, 23)
+ test(t, 31)
+ test(t, 32)
+}
+
+func test(t *testing.T, which int64) {
+ output += fmt.Sprintln("test", which)
+ v1 := V{w: 30, x: 3, p: nil}
+ v2, v3 := v1.autos_ssa(which, 10, 1, 20, 2)
+ if which != v2.val() {
+ output += fmt.Sprintln("Expected which=", which, "got v2.val()=", v2.val())
+ mypanic(t, "Failure of expected V value")
+ }
+ if v2.p.val() != v3.val() {
+ output += fmt.Sprintln("Expected v2.p.val()=", v2.p.val(), "got v3.val()=", v3.val())
+ mypanic(t, "Failure of expected V.p value")
+ }
+ if which != v3.p.p.p.p.p.p.p.val() {
+ output += fmt.Sprintln("Expected which=", which, "got v3.p.p.p.p.p.p.p.val()=", v3.p.p.p.p.p.p.p.val())
+ mypanic(t, "Failure of expected V.p value")
+ }
+}
+
+func (v V) val() int64 {
+ return v.w + v.x
+}
+
+// autos_ssa uses contents of v and parameters w1, w2, x1, x2
+// to initialize a bunch of locals, all of which have their
+// address taken to force heap allocation, and then based on
+// the value of which a pair of those locals are copied in
+// various ways to the two results y, and z, which are also
+// addressed. Which is expected to be one of 11-13, 21-23, 31, 32,
+// and y.val() should be equal to which and y.p.val() should
+// be equal to z.val(). Also, x(.p)**8 == x; that is, the
+// autos are all linked into a ring.
+//go:noinline
+func (v V) autos_ssa(which, w1, x1, w2, x2 int64) (y, z V) {
+ fill_ssa(v.w, v.x, &v, v.p) // gratuitous no-op to force addressing
+ var a, b, c, d, e, f, g, h V
+ fill_ssa(w1, x1, &a, &b)
+ fill_ssa(w1, x2, &b, &c)
+ fill_ssa(w1, v.x, &c, &d)
+ fill_ssa(w2, x1, &d, &e)
+ fill_ssa(w2, x2, &e, &f)
+ fill_ssa(w2, v.x, &f, &g)
+ fill_ssa(v.w, x1, &g, &h)
+ fill_ssa(v.w, x2, &h, &a)
+ switch which {
+ case 11:
+ y = a
+ z.getsI(&b)
+ case 12:
+ y.gets(&b)
+ z = c
+ case 13:
+ y.gets(&c)
+ z = d
+ case 21:
+ y.getsI(&d)
+ z.gets(&e)
+ case 22:
+ y = e
+ z = f
+ case 23:
+ y.gets(&f)
+ z.getsI(&g)
+ case 31:
+ y = g
+ z.gets(&h)
+ case 32:
+ y.getsI(&h)
+ z = a
+ default:
+
+ panic("")
+ }
+ return
+}
+
+// gets is an address-mentioning way of implementing
+// structure assignment.
+//go:noinline
+func (to *V) gets(from *V) {
+ *to = *from
+}
+
+// gets is an address-and-interface-mentioning way of
+// implementing structure assignment.
+//go:noinline
+func (to *V) getsI(from interface{}) {
+ *to = *from.(*V)
+}
+
+// fill_ssa initializes r with V{w:w, x:x, p:p}
+//go:noinline
+func fill_ssa(w, x int64, r, p *V) {
+ *r = V{w: w, x: x, p: p}
+}
diff --git a/src/cmd/compile/internal/test/testdata/append_test.go b/src/cmd/compile/internal/test/testdata/append_test.go
new file mode 100644
index 0000000..6663ce7
--- /dev/null
+++ b/src/cmd/compile/internal/test/testdata/append_test.go
@@ -0,0 +1,61 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// append_ssa.go tests append operations.
+package main
+
+import "testing"
+
+//go:noinline
+func appendOne_ssa(a []int, x int) []int {
+ return append(a, x)
+}
+
+//go:noinline
+func appendThree_ssa(a []int, x, y, z int) []int {
+ return append(a, x, y, z)
+}
+
+func eqBytes(a, b []int) bool {
+ if len(a) != len(b) {
+ return false
+ }
+ for i := range a {
+ if a[i] != b[i] {
+ return false
+ }
+ }
+ return true
+}
+
+func expect(t *testing.T, got, want []int) {
+ if eqBytes(got, want) {
+ return
+ }
+ t.Errorf("expected %v, got %v\n", want, got)
+}
+
+func testAppend(t *testing.T) {
+ var store [7]int
+ a := store[:0]
+
+ a = appendOne_ssa(a, 1)
+ expect(t, a, []int{1})
+ a = appendThree_ssa(a, 2, 3, 4)
+ expect(t, a, []int{1, 2, 3, 4})
+ a = appendThree_ssa(a, 5, 6, 7)
+ expect(t, a, []int{1, 2, 3, 4, 5, 6, 7})
+ if &a[0] != &store[0] {
+ t.Errorf("unnecessary grow")
+ }
+ a = appendOne_ssa(a, 8)
+ expect(t, a, []int{1, 2, 3, 4, 5, 6, 7, 8})
+ if &a[0] == &store[0] {
+ t.Errorf("didn't grow")
+ }
+}
+
+func TestAppend(t *testing.T) {
+ testAppend(t)
+}
diff --git a/src/cmd/compile/internal/test/testdata/arithBoundary_test.go b/src/cmd/compile/internal/test/testdata/arithBoundary_test.go
new file mode 100644
index 0000000..777b7cd
--- /dev/null
+++ b/src/cmd/compile/internal/test/testdata/arithBoundary_test.go
@@ -0,0 +1,694 @@
+// Code generated by gen/arithBoundaryGen.go. DO NOT EDIT.
+
+package main
+
+import "testing"
+
+type utd64 struct {
+ a, b uint64
+ add, sub, mul, div, mod uint64
+}
+type itd64 struct {
+ a, b int64
+ add, sub, mul, div, mod int64
+}
+type utd32 struct {
+ a, b uint32
+ add, sub, mul, div, mod uint32
+}
+type itd32 struct {
+ a, b int32
+ add, sub, mul, div, mod int32
+}
+type utd16 struct {
+ a, b uint16
+ add, sub, mul, div, mod uint16
+}
+type itd16 struct {
+ a, b int16
+ add, sub, mul, div, mod int16
+}
+type utd8 struct {
+ a, b uint8
+ add, sub, mul, div, mod uint8
+}
+type itd8 struct {
+ a, b int8
+ add, sub, mul, div, mod int8
+}
+
+//go:noinline
+func add_uint64_ssa(a, b uint64) uint64 {
+ return a + b
+}
+
+//go:noinline
+func sub_uint64_ssa(a, b uint64) uint64 {
+ return a - b
+}
+
+//go:noinline
+func div_uint64_ssa(a, b uint64) uint64 {
+ return a / b
+}
+
+//go:noinline
+func mod_uint64_ssa(a, b uint64) uint64 {
+ return a % b
+}
+
+//go:noinline
+func mul_uint64_ssa(a, b uint64) uint64 {
+ return a * b
+}
+
+//go:noinline
+func add_int64_ssa(a, b int64) int64 {
+ return a + b
+}
+
+//go:noinline
+func sub_int64_ssa(a, b int64) int64 {
+ return a - b
+}
+
+//go:noinline
+func div_int64_ssa(a, b int64) int64 {
+ return a / b
+}
+
+//go:noinline
+func mod_int64_ssa(a, b int64) int64 {
+ return a % b
+}
+
+//go:noinline
+func mul_int64_ssa(a, b int64) int64 {
+ return a * b
+}
+
+//go:noinline
+func add_uint32_ssa(a, b uint32) uint32 {
+ return a + b
+}
+
+//go:noinline
+func sub_uint32_ssa(a, b uint32) uint32 {
+ return a - b
+}
+
+//go:noinline
+func div_uint32_ssa(a, b uint32) uint32 {
+ return a / b
+}
+
+//go:noinline
+func mod_uint32_ssa(a, b uint32) uint32 {
+ return a % b
+}
+
+//go:noinline
+func mul_uint32_ssa(a, b uint32) uint32 {
+ return a * b
+}
+
+//go:noinline
+func add_int32_ssa(a, b int32) int32 {
+ return a + b
+}
+
+//go:noinline
+func sub_int32_ssa(a, b int32) int32 {
+ return a - b
+}
+
+//go:noinline
+func div_int32_ssa(a, b int32) int32 {
+ return a / b
+}
+
+//go:noinline
+func mod_int32_ssa(a, b int32) int32 {
+ return a % b
+}
+
+//go:noinline
+func mul_int32_ssa(a, b int32) int32 {
+ return a * b
+}
+
+//go:noinline
+func add_uint16_ssa(a, b uint16) uint16 {
+ return a + b
+}
+
+//go:noinline
+func sub_uint16_ssa(a, b uint16) uint16 {
+ return a - b
+}
+
+//go:noinline
+func div_uint16_ssa(a, b uint16) uint16 {
+ return a / b
+}
+
+//go:noinline
+func mod_uint16_ssa(a, b uint16) uint16 {
+ return a % b
+}
+
+//go:noinline
+func mul_uint16_ssa(a, b uint16) uint16 {
+ return a * b
+}
+
+//go:noinline
+func add_int16_ssa(a, b int16) int16 {
+ return a + b
+}
+
+//go:noinline
+func sub_int16_ssa(a, b int16) int16 {
+ return a - b
+}
+
+//go:noinline
+func div_int16_ssa(a, b int16) int16 {
+ return a / b
+}
+
+//go:noinline
+func mod_int16_ssa(a, b int16) int16 {
+ return a % b
+}
+
+//go:noinline
+func mul_int16_ssa(a, b int16) int16 {
+ return a * b
+}
+
+//go:noinline
+func add_uint8_ssa(a, b uint8) uint8 {
+ return a + b
+}
+
+//go:noinline
+func sub_uint8_ssa(a, b uint8) uint8 {
+ return a - b
+}
+
+//go:noinline
+func div_uint8_ssa(a, b uint8) uint8 {
+ return a / b
+}
+
+//go:noinline
+func mod_uint8_ssa(a, b uint8) uint8 {
+ return a % b
+}
+
+//go:noinline
+func mul_uint8_ssa(a, b uint8) uint8 {
+ return a * b
+}
+
+//go:noinline
+func add_int8_ssa(a, b int8) int8 {
+ return a + b
+}
+
+//go:noinline
+func sub_int8_ssa(a, b int8) int8 {
+ return a - b
+}
+
+//go:noinline
+func div_int8_ssa(a, b int8) int8 {
+ return a / b
+}
+
+//go:noinline
+func mod_int8_ssa(a, b int8) int8 {
+ return a % b
+}
+
+//go:noinline
+func mul_int8_ssa(a, b int8) int8 {
+ return a * b
+}
+
+var uint64_data []utd64 = []utd64{utd64{a: 0, b: 0, add: 0, sub: 0, mul: 0},
+ utd64{a: 0, b: 1, add: 1, sub: 18446744073709551615, mul: 0, div: 0, mod: 0},
+ utd64{a: 0, b: 4294967296, add: 4294967296, sub: 18446744069414584320, mul: 0, div: 0, mod: 0},
+ utd64{a: 0, b: 18446744073709551615, add: 18446744073709551615, sub: 1, mul: 0, div: 0, mod: 0},
+ utd64{a: 1, b: 0, add: 1, sub: 1, mul: 0},
+ utd64{a: 1, b: 1, add: 2, sub: 0, mul: 1, div: 1, mod: 0},
+ utd64{a: 1, b: 4294967296, add: 4294967297, sub: 18446744069414584321, mul: 4294967296, div: 0, mod: 1},
+ utd64{a: 1, b: 18446744073709551615, add: 0, sub: 2, mul: 18446744073709551615, div: 0, mod: 1},
+ utd64{a: 4294967296, b: 0, add: 4294967296, sub: 4294967296, mul: 0},
+ utd64{a: 4294967296, b: 1, add: 4294967297, sub: 4294967295, mul: 4294967296, div: 4294967296, mod: 0},
+ utd64{a: 4294967296, b: 4294967296, add: 8589934592, sub: 0, mul: 0, div: 1, mod: 0},
+ utd64{a: 4294967296, b: 18446744073709551615, add: 4294967295, sub: 4294967297, mul: 18446744069414584320, div: 0, mod: 4294967296},
+ utd64{a: 18446744073709551615, b: 0, add: 18446744073709551615, sub: 18446744073709551615, mul: 0},
+ utd64{a: 18446744073709551615, b: 1, add: 0, sub: 18446744073709551614, mul: 18446744073709551615, div: 18446744073709551615, mod: 0},
+ utd64{a: 18446744073709551615, b: 4294967296, add: 4294967295, sub: 18446744069414584319, mul: 18446744069414584320, div: 4294967295, mod: 4294967295},
+ utd64{a: 18446744073709551615, b: 18446744073709551615, add: 18446744073709551614, sub: 0, mul: 1, div: 1, mod: 0},
+}
+var int64_data []itd64 = []itd64{itd64{a: -9223372036854775808, b: -9223372036854775808, add: 0, sub: 0, mul: 0, div: 1, mod: 0},
+ itd64{a: -9223372036854775808, b: -9223372036854775807, add: 1, sub: -1, mul: -9223372036854775808, div: 1, mod: -1},
+ itd64{a: -9223372036854775808, b: -4294967296, add: 9223372032559808512, sub: -9223372032559808512, mul: 0, div: 2147483648, mod: 0},
+ itd64{a: -9223372036854775808, b: -1, add: 9223372036854775807, sub: -9223372036854775807, mul: -9223372036854775808, div: -9223372036854775808, mod: 0},
+ itd64{a: -9223372036854775808, b: 0, add: -9223372036854775808, sub: -9223372036854775808, mul: 0},
+ itd64{a: -9223372036854775808, b: 1, add: -9223372036854775807, sub: 9223372036854775807, mul: -9223372036854775808, div: -9223372036854775808, mod: 0},
+ itd64{a: -9223372036854775808, b: 4294967296, add: -9223372032559808512, sub: 9223372032559808512, mul: 0, div: -2147483648, mod: 0},
+ itd64{a: -9223372036854775808, b: 9223372036854775806, add: -2, sub: 2, mul: 0, div: -1, mod: -2},
+ itd64{a: -9223372036854775808, b: 9223372036854775807, add: -1, sub: 1, mul: -9223372036854775808, div: -1, mod: -1},
+ itd64{a: -9223372036854775807, b: -9223372036854775808, add: 1, sub: 1, mul: -9223372036854775808, div: 0, mod: -9223372036854775807},
+ itd64{a: -9223372036854775807, b: -9223372036854775807, add: 2, sub: 0, mul: 1, div: 1, mod: 0},
+ itd64{a: -9223372036854775807, b: -4294967296, add: 9223372032559808513, sub: -9223372032559808511, mul: -4294967296, div: 2147483647, mod: -4294967295},
+ itd64{a: -9223372036854775807, b: -1, add: -9223372036854775808, sub: -9223372036854775806, mul: 9223372036854775807, div: 9223372036854775807, mod: 0},
+ itd64{a: -9223372036854775807, b: 0, add: -9223372036854775807, sub: -9223372036854775807, mul: 0},
+ itd64{a: -9223372036854775807, b: 1, add: -9223372036854775806, sub: -9223372036854775808, mul: -9223372036854775807, div: -9223372036854775807, mod: 0},
+ itd64{a: -9223372036854775807, b: 4294967296, add: -9223372032559808511, sub: 9223372032559808513, mul: 4294967296, div: -2147483647, mod: -4294967295},
+ itd64{a: -9223372036854775807, b: 9223372036854775806, add: -1, sub: 3, mul: 9223372036854775806, div: -1, mod: -1},
+ itd64{a: -9223372036854775807, b: 9223372036854775807, add: 0, sub: 2, mul: -1, div: -1, mod: 0},
+ itd64{a: -4294967296, b: -9223372036854775808, add: 9223372032559808512, sub: 9223372032559808512, mul: 0, div: 0, mod: -4294967296},
+ itd64{a: -4294967296, b: -9223372036854775807, add: 9223372032559808513, sub: 9223372032559808511, mul: -4294967296, div: 0, mod: -4294967296},
+ itd64{a: -4294967296, b: -4294967296, add: -8589934592, sub: 0, mul: 0, div: 1, mod: 0},
+ itd64{a: -4294967296, b: -1, add: -4294967297, sub: -4294967295, mul: 4294967296, div: 4294967296, mod: 0},
+ itd64{a: -4294967296, b: 0, add: -4294967296, sub: -4294967296, mul: 0},
+ itd64{a: -4294967296, b: 1, add: -4294967295, sub: -4294967297, mul: -4294967296, div: -4294967296, mod: 0},
+ itd64{a: -4294967296, b: 4294967296, add: 0, sub: -8589934592, mul: 0, div: -1, mod: 0},
+ itd64{a: -4294967296, b: 9223372036854775806, add: 9223372032559808510, sub: 9223372032559808514, mul: 8589934592, div: 0, mod: -4294967296},
+ itd64{a: -4294967296, b: 9223372036854775807, add: 9223372032559808511, sub: 9223372032559808513, mul: 4294967296, div: 0, mod: -4294967296},
+ itd64{a: -1, b: -9223372036854775808, add: 9223372036854775807, sub: 9223372036854775807, mul: -9223372036854775808, div: 0, mod: -1},
+ itd64{a: -1, b: -9223372036854775807, add: -9223372036854775808, sub: 9223372036854775806, mul: 9223372036854775807, div: 0, mod: -1},
+ itd64{a: -1, b: -4294967296, add: -4294967297, sub: 4294967295, mul: 4294967296, div: 0, mod: -1},
+ itd64{a: -1, b: -1, add: -2, sub: 0, mul: 1, div: 1, mod: 0},
+ itd64{a: -1, b: 0, add: -1, sub: -1, mul: 0},
+ itd64{a: -1, b: 1, add: 0, sub: -2, mul: -1, div: -1, mod: 0},
+ itd64{a: -1, b: 4294967296, add: 4294967295, sub: -4294967297, mul: -4294967296, div: 0, mod: -1},
+ itd64{a: -1, b: 9223372036854775806, add: 9223372036854775805, sub: -9223372036854775807, mul: -9223372036854775806, div: 0, mod: -1},
+ itd64{a: -1, b: 9223372036854775807, add: 9223372036854775806, sub: -9223372036854775808, mul: -9223372036854775807, div: 0, mod: -1},
+ itd64{a: 0, b: -9223372036854775808, add: -9223372036854775808, sub: -9223372036854775808, mul: 0, div: 0, mod: 0},
+ itd64{a: 0, b: -9223372036854775807, add: -9223372036854775807, sub: 9223372036854775807, mul: 0, div: 0, mod: 0},
+ itd64{a: 0, b: -4294967296, add: -4294967296, sub: 4294967296, mul: 0, div: 0, mod: 0},
+ itd64{a: 0, b: -1, add: -1, sub: 1, mul: 0, div: 0, mod: 0},
+ itd64{a: 0, b: 0, add: 0, sub: 0, mul: 0},
+ itd64{a: 0, b: 1, add: 1, sub: -1, mul: 0, div: 0, mod: 0},
+ itd64{a: 0, b: 4294967296, add: 4294967296, sub: -4294967296, mul: 0, div: 0, mod: 0},
+ itd64{a: 0, b: 9223372036854775806, add: 9223372036854775806, sub: -9223372036854775806, mul: 0, div: 0, mod: 0},
+ itd64{a: 0, b: 9223372036854775807, add: 9223372036854775807, sub: -9223372036854775807, mul: 0, div: 0, mod: 0},
+ itd64{a: 1, b: -9223372036854775808, add: -9223372036854775807, sub: -9223372036854775807, mul: -9223372036854775808, div: 0, mod: 1},
+ itd64{a: 1, b: -9223372036854775807, add: -9223372036854775806, sub: -9223372036854775808, mul: -9223372036854775807, div: 0, mod: 1},
+ itd64{a: 1, b: -4294967296, add: -4294967295, sub: 4294967297, mul: -4294967296, div: 0, mod: 1},
+ itd64{a: 1, b: -1, add: 0, sub: 2, mul: -1, div: -1, mod: 0},
+ itd64{a: 1, b: 0, add: 1, sub: 1, mul: 0},
+ itd64{a: 1, b: 1, add: 2, sub: 0, mul: 1, div: 1, mod: 0},
+ itd64{a: 1, b: 4294967296, add: 4294967297, sub: -4294967295, mul: 4294967296, div: 0, mod: 1},
+ itd64{a: 1, b: 9223372036854775806, add: 9223372036854775807, sub: -9223372036854775805, mul: 9223372036854775806, div: 0, mod: 1},
+ itd64{a: 1, b: 9223372036854775807, add: -9223372036854775808, sub: -9223372036854775806, mul: 9223372036854775807, div: 0, mod: 1},
+ itd64{a: 4294967296, b: -9223372036854775808, add: -9223372032559808512, sub: -9223372032559808512, mul: 0, div: 0, mod: 4294967296},
+ itd64{a: 4294967296, b: -9223372036854775807, add: -9223372032559808511, sub: -9223372032559808513, mul: 4294967296, div: 0, mod: 4294967296},
+ itd64{a: 4294967296, b: -4294967296, add: 0, sub: 8589934592, mul: 0, div: -1, mod: 0},
+ itd64{a: 4294967296, b: -1, add: 4294967295, sub: 4294967297, mul: -4294967296, div: -4294967296, mod: 0},
+ itd64{a: 4294967296, b: 0, add: 4294967296, sub: 4294967296, mul: 0},
+ itd64{a: 4294967296, b: 1, add: 4294967297, sub: 4294967295, mul: 4294967296, div: 4294967296, mod: 0},
+ itd64{a: 4294967296, b: 4294967296, add: 8589934592, sub: 0, mul: 0, div: 1, mod: 0},
+ itd64{a: 4294967296, b: 9223372036854775806, add: -9223372032559808514, sub: -9223372032559808510, mul: -8589934592, div: 0, mod: 4294967296},
+ itd64{a: 4294967296, b: 9223372036854775807, add: -9223372032559808513, sub: -9223372032559808511, mul: -4294967296, div: 0, mod: 4294967296},
+ itd64{a: 9223372036854775806, b: -9223372036854775808, add: -2, sub: -2, mul: 0, div: 0, mod: 9223372036854775806},
+ itd64{a: 9223372036854775806, b: -9223372036854775807, add: -1, sub: -3, mul: 9223372036854775806, div: 0, mod: 9223372036854775806},
+ itd64{a: 9223372036854775806, b: -4294967296, add: 9223372032559808510, sub: -9223372032559808514, mul: 8589934592, div: -2147483647, mod: 4294967294},
+ itd64{a: 9223372036854775806, b: -1, add: 9223372036854775805, sub: 9223372036854775807, mul: -9223372036854775806, div: -9223372036854775806, mod: 0},
+ itd64{a: 9223372036854775806, b: 0, add: 9223372036854775806, sub: 9223372036854775806, mul: 0},
+ itd64{a: 9223372036854775806, b: 1, add: 9223372036854775807, sub: 9223372036854775805, mul: 9223372036854775806, div: 9223372036854775806, mod: 0},
+ itd64{a: 9223372036854775806, b: 4294967296, add: -9223372032559808514, sub: 9223372032559808510, mul: -8589934592, div: 2147483647, mod: 4294967294},
+ itd64{a: 9223372036854775806, b: 9223372036854775806, add: -4, sub: 0, mul: 4, div: 1, mod: 0},
+ itd64{a: 9223372036854775806, b: 9223372036854775807, add: -3, sub: -1, mul: -9223372036854775806, div: 0, mod: 9223372036854775806},
+ itd64{a: 9223372036854775807, b: -9223372036854775808, add: -1, sub: -1, mul: -9223372036854775808, div: 0, mod: 9223372036854775807},
+ itd64{a: 9223372036854775807, b: -9223372036854775807, add: 0, sub: -2, mul: -1, div: -1, mod: 0},
+ itd64{a: 9223372036854775807, b: -4294967296, add: 9223372032559808511, sub: -9223372032559808513, mul: 4294967296, div: -2147483647, mod: 4294967295},
+ itd64{a: 9223372036854775807, b: -1, add: 9223372036854775806, sub: -9223372036854775808, mul: -9223372036854775807, div: -9223372036854775807, mod: 0},
+ itd64{a: 9223372036854775807, b: 0, add: 9223372036854775807, sub: 9223372036854775807, mul: 0},
+ itd64{a: 9223372036854775807, b: 1, add: -9223372036854775808, sub: 9223372036854775806, mul: 9223372036854775807, div: 9223372036854775807, mod: 0},
+ itd64{a: 9223372036854775807, b: 4294967296, add: -9223372032559808513, sub: 9223372032559808511, mul: -4294967296, div: 2147483647, mod: 4294967295},
+ itd64{a: 9223372036854775807, b: 9223372036854775806, add: -3, sub: 1, mul: -9223372036854775806, div: 1, mod: 1},
+ itd64{a: 9223372036854775807, b: 9223372036854775807, add: -2, sub: 0, mul: 1, div: 1, mod: 0},
+}
+var uint32_data []utd32 = []utd32{utd32{a: 0, b: 0, add: 0, sub: 0, mul: 0},
+ utd32{a: 0, b: 1, add: 1, sub: 4294967295, mul: 0, div: 0, mod: 0},
+ utd32{a: 0, b: 4294967295, add: 4294967295, sub: 1, mul: 0, div: 0, mod: 0},
+ utd32{a: 1, b: 0, add: 1, sub: 1, mul: 0},
+ utd32{a: 1, b: 1, add: 2, sub: 0, mul: 1, div: 1, mod: 0},
+ utd32{a: 1, b: 4294967295, add: 0, sub: 2, mul: 4294967295, div: 0, mod: 1},
+ utd32{a: 4294967295, b: 0, add: 4294967295, sub: 4294967295, mul: 0},
+ utd32{a: 4294967295, b: 1, add: 0, sub: 4294967294, mul: 4294967295, div: 4294967295, mod: 0},
+ utd32{a: 4294967295, b: 4294967295, add: 4294967294, sub: 0, mul: 1, div: 1, mod: 0},
+}
+var int32_data []itd32 = []itd32{itd32{a: -2147483648, b: -2147483648, add: 0, sub: 0, mul: 0, div: 1, mod: 0},
+ itd32{a: -2147483648, b: -2147483647, add: 1, sub: -1, mul: -2147483648, div: 1, mod: -1},
+ itd32{a: -2147483648, b: -1, add: 2147483647, sub: -2147483647, mul: -2147483648, div: -2147483648, mod: 0},
+ itd32{a: -2147483648, b: 0, add: -2147483648, sub: -2147483648, mul: 0},
+ itd32{a: -2147483648, b: 1, add: -2147483647, sub: 2147483647, mul: -2147483648, div: -2147483648, mod: 0},
+ itd32{a: -2147483648, b: 2147483647, add: -1, sub: 1, mul: -2147483648, div: -1, mod: -1},
+ itd32{a: -2147483647, b: -2147483648, add: 1, sub: 1, mul: -2147483648, div: 0, mod: -2147483647},
+ itd32{a: -2147483647, b: -2147483647, add: 2, sub: 0, mul: 1, div: 1, mod: 0},
+ itd32{a: -2147483647, b: -1, add: -2147483648, sub: -2147483646, mul: 2147483647, div: 2147483647, mod: 0},
+ itd32{a: -2147483647, b: 0, add: -2147483647, sub: -2147483647, mul: 0},
+ itd32{a: -2147483647, b: 1, add: -2147483646, sub: -2147483648, mul: -2147483647, div: -2147483647, mod: 0},
+ itd32{a: -2147483647, b: 2147483647, add: 0, sub: 2, mul: -1, div: -1, mod: 0},
+ itd32{a: -1, b: -2147483648, add: 2147483647, sub: 2147483647, mul: -2147483648, div: 0, mod: -1},
+ itd32{a: -1, b: -2147483647, add: -2147483648, sub: 2147483646, mul: 2147483647, div: 0, mod: -1},
+ itd32{a: -1, b: -1, add: -2, sub: 0, mul: 1, div: 1, mod: 0},
+ itd32{a: -1, b: 0, add: -1, sub: -1, mul: 0},
+ itd32{a: -1, b: 1, add: 0, sub: -2, mul: -1, div: -1, mod: 0},
+ itd32{a: -1, b: 2147483647, add: 2147483646, sub: -2147483648, mul: -2147483647, div: 0, mod: -1},
+ itd32{a: 0, b: -2147483648, add: -2147483648, sub: -2147483648, mul: 0, div: 0, mod: 0},
+ itd32{a: 0, b: -2147483647, add: -2147483647, sub: 2147483647, mul: 0, div: 0, mod: 0},
+ itd32{a: 0, b: -1, add: -1, sub: 1, mul: 0, div: 0, mod: 0},
+ itd32{a: 0, b: 0, add: 0, sub: 0, mul: 0},
+ itd32{a: 0, b: 1, add: 1, sub: -1, mul: 0, div: 0, mod: 0},
+ itd32{a: 0, b: 2147483647, add: 2147483647, sub: -2147483647, mul: 0, div: 0, mod: 0},
+ itd32{a: 1, b: -2147483648, add: -2147483647, sub: -2147483647, mul: -2147483648, div: 0, mod: 1},
+ itd32{a: 1, b: -2147483647, add: -2147483646, sub: -2147483648, mul: -2147483647, div: 0, mod: 1},
+ itd32{a: 1, b: -1, add: 0, sub: 2, mul: -1, div: -1, mod: 0},
+ itd32{a: 1, b: 0, add: 1, sub: 1, mul: 0},
+ itd32{a: 1, b: 1, add: 2, sub: 0, mul: 1, div: 1, mod: 0},
+ itd32{a: 1, b: 2147483647, add: -2147483648, sub: -2147483646, mul: 2147483647, div: 0, mod: 1},
+ itd32{a: 2147483647, b: -2147483648, add: -1, sub: -1, mul: -2147483648, div: 0, mod: 2147483647},
+ itd32{a: 2147483647, b: -2147483647, add: 0, sub: -2, mul: -1, div: -1, mod: 0},
+ itd32{a: 2147483647, b: -1, add: 2147483646, sub: -2147483648, mul: -2147483647, div: -2147483647, mod: 0},
+ itd32{a: 2147483647, b: 0, add: 2147483647, sub: 2147483647, mul: 0},
+ itd32{a: 2147483647, b: 1, add: -2147483648, sub: 2147483646, mul: 2147483647, div: 2147483647, mod: 0},
+ itd32{a: 2147483647, b: 2147483647, add: -2, sub: 0, mul: 1, div: 1, mod: 0},
+}
+var uint16_data []utd16 = []utd16{utd16{a: 0, b: 0, add: 0, sub: 0, mul: 0},
+ utd16{a: 0, b: 1, add: 1, sub: 65535, mul: 0, div: 0, mod: 0},
+ utd16{a: 0, b: 65535, add: 65535, sub: 1, mul: 0, div: 0, mod: 0},
+ utd16{a: 1, b: 0, add: 1, sub: 1, mul: 0},
+ utd16{a: 1, b: 1, add: 2, sub: 0, mul: 1, div: 1, mod: 0},
+ utd16{a: 1, b: 65535, add: 0, sub: 2, mul: 65535, div: 0, mod: 1},
+ utd16{a: 65535, b: 0, add: 65535, sub: 65535, mul: 0},
+ utd16{a: 65535, b: 1, add: 0, sub: 65534, mul: 65535, div: 65535, mod: 0},
+ utd16{a: 65535, b: 65535, add: 65534, sub: 0, mul: 1, div: 1, mod: 0},
+}
+var int16_data []itd16 = []itd16{itd16{a: -32768, b: -32768, add: 0, sub: 0, mul: 0, div: 1, mod: 0},
+ itd16{a: -32768, b: -32767, add: 1, sub: -1, mul: -32768, div: 1, mod: -1},
+ itd16{a: -32768, b: -1, add: 32767, sub: -32767, mul: -32768, div: -32768, mod: 0},
+ itd16{a: -32768, b: 0, add: -32768, sub: -32768, mul: 0},
+ itd16{a: -32768, b: 1, add: -32767, sub: 32767, mul: -32768, div: -32768, mod: 0},
+ itd16{a: -32768, b: 32766, add: -2, sub: 2, mul: 0, div: -1, mod: -2},
+ itd16{a: -32768, b: 32767, add: -1, sub: 1, mul: -32768, div: -1, mod: -1},
+ itd16{a: -32767, b: -32768, add: 1, sub: 1, mul: -32768, div: 0, mod: -32767},
+ itd16{a: -32767, b: -32767, add: 2, sub: 0, mul: 1, div: 1, mod: 0},
+ itd16{a: -32767, b: -1, add: -32768, sub: -32766, mul: 32767, div: 32767, mod: 0},
+ itd16{a: -32767, b: 0, add: -32767, sub: -32767, mul: 0},
+ itd16{a: -32767, b: 1, add: -32766, sub: -32768, mul: -32767, div: -32767, mod: 0},
+ itd16{a: -32767, b: 32766, add: -1, sub: 3, mul: 32766, div: -1, mod: -1},
+ itd16{a: -32767, b: 32767, add: 0, sub: 2, mul: -1, div: -1, mod: 0},
+ itd16{a: -1, b: -32768, add: 32767, sub: 32767, mul: -32768, div: 0, mod: -1},
+ itd16{a: -1, b: -32767, add: -32768, sub: 32766, mul: 32767, div: 0, mod: -1},
+ itd16{a: -1, b: -1, add: -2, sub: 0, mul: 1, div: 1, mod: 0},
+ itd16{a: -1, b: 0, add: -1, sub: -1, mul: 0},
+ itd16{a: -1, b: 1, add: 0, sub: -2, mul: -1, div: -1, mod: 0},
+ itd16{a: -1, b: 32766, add: 32765, sub: -32767, mul: -32766, div: 0, mod: -1},
+ itd16{a: -1, b: 32767, add: 32766, sub: -32768, mul: -32767, div: 0, mod: -1},
+ itd16{a: 0, b: -32768, add: -32768, sub: -32768, mul: 0, div: 0, mod: 0},
+ itd16{a: 0, b: -32767, add: -32767, sub: 32767, mul: 0, div: 0, mod: 0},
+ itd16{a: 0, b: -1, add: -1, sub: 1, mul: 0, div: 0, mod: 0},
+ itd16{a: 0, b: 0, add: 0, sub: 0, mul: 0},
+ itd16{a: 0, b: 1, add: 1, sub: -1, mul: 0, div: 0, mod: 0},
+ itd16{a: 0, b: 32766, add: 32766, sub: -32766, mul: 0, div: 0, mod: 0},
+ itd16{a: 0, b: 32767, add: 32767, sub: -32767, mul: 0, div: 0, mod: 0},
+ itd16{a: 1, b: -32768, add: -32767, sub: -32767, mul: -32768, div: 0, mod: 1},
+ itd16{a: 1, b: -32767, add: -32766, sub: -32768, mul: -32767, div: 0, mod: 1},
+ itd16{a: 1, b: -1, add: 0, sub: 2, mul: -1, div: -1, mod: 0},
+ itd16{a: 1, b: 0, add: 1, sub: 1, mul: 0},
+ itd16{a: 1, b: 1, add: 2, sub: 0, mul: 1, div: 1, mod: 0},
+ itd16{a: 1, b: 32766, add: 32767, sub: -32765, mul: 32766, div: 0, mod: 1},
+ itd16{a: 1, b: 32767, add: -32768, sub: -32766, mul: 32767, div: 0, mod: 1},
+ itd16{a: 32766, b: -32768, add: -2, sub: -2, mul: 0, div: 0, mod: 32766},
+ itd16{a: 32766, b: -32767, add: -1, sub: -3, mul: 32766, div: 0, mod: 32766},
+ itd16{a: 32766, b: -1, add: 32765, sub: 32767, mul: -32766, div: -32766, mod: 0},
+ itd16{a: 32766, b: 0, add: 32766, sub: 32766, mul: 0},
+ itd16{a: 32766, b: 1, add: 32767, sub: 32765, mul: 32766, div: 32766, mod: 0},
+ itd16{a: 32766, b: 32766, add: -4, sub: 0, mul: 4, div: 1, mod: 0},
+ itd16{a: 32766, b: 32767, add: -3, sub: -1, mul: -32766, div: 0, mod: 32766},
+ itd16{a: 32767, b: -32768, add: -1, sub: -1, mul: -32768, div: 0, mod: 32767},
+ itd16{a: 32767, b: -32767, add: 0, sub: -2, mul: -1, div: -1, mod: 0},
+ itd16{a: 32767, b: -1, add: 32766, sub: -32768, mul: -32767, div: -32767, mod: 0},
+ itd16{a: 32767, b: 0, add: 32767, sub: 32767, mul: 0},
+ itd16{a: 32767, b: 1, add: -32768, sub: 32766, mul: 32767, div: 32767, mod: 0},
+ itd16{a: 32767, b: 32766, add: -3, sub: 1, mul: -32766, div: 1, mod: 1},
+ itd16{a: 32767, b: 32767, add: -2, sub: 0, mul: 1, div: 1, mod: 0},
+}
+var uint8_data []utd8 = []utd8{utd8{a: 0, b: 0, add: 0, sub: 0, mul: 0},
+ utd8{a: 0, b: 1, add: 1, sub: 255, mul: 0, div: 0, mod: 0},
+ utd8{a: 0, b: 255, add: 255, sub: 1, mul: 0, div: 0, mod: 0},
+ utd8{a: 1, b: 0, add: 1, sub: 1, mul: 0},
+ utd8{a: 1, b: 1, add: 2, sub: 0, mul: 1, div: 1, mod: 0},
+ utd8{a: 1, b: 255, add: 0, sub: 2, mul: 255, div: 0, mod: 1},
+ utd8{a: 255, b: 0, add: 255, sub: 255, mul: 0},
+ utd8{a: 255, b: 1, add: 0, sub: 254, mul: 255, div: 255, mod: 0},
+ utd8{a: 255, b: 255, add: 254, sub: 0, mul: 1, div: 1, mod: 0},
+}
+var int8_data []itd8 = []itd8{itd8{a: -128, b: -128, add: 0, sub: 0, mul: 0, div: 1, mod: 0},
+ itd8{a: -128, b: -127, add: 1, sub: -1, mul: -128, div: 1, mod: -1},
+ itd8{a: -128, b: -1, add: 127, sub: -127, mul: -128, div: -128, mod: 0},
+ itd8{a: -128, b: 0, add: -128, sub: -128, mul: 0},
+ itd8{a: -128, b: 1, add: -127, sub: 127, mul: -128, div: -128, mod: 0},
+ itd8{a: -128, b: 126, add: -2, sub: 2, mul: 0, div: -1, mod: -2},
+ itd8{a: -128, b: 127, add: -1, sub: 1, mul: -128, div: -1, mod: -1},
+ itd8{a: -127, b: -128, add: 1, sub: 1, mul: -128, div: 0, mod: -127},
+ itd8{a: -127, b: -127, add: 2, sub: 0, mul: 1, div: 1, mod: 0},
+ itd8{a: -127, b: -1, add: -128, sub: -126, mul: 127, div: 127, mod: 0},
+ itd8{a: -127, b: 0, add: -127, sub: -127, mul: 0},
+ itd8{a: -127, b: 1, add: -126, sub: -128, mul: -127, div: -127, mod: 0},
+ itd8{a: -127, b: 126, add: -1, sub: 3, mul: 126, div: -1, mod: -1},
+ itd8{a: -127, b: 127, add: 0, sub: 2, mul: -1, div: -1, mod: 0},
+ itd8{a: -1, b: -128, add: 127, sub: 127, mul: -128, div: 0, mod: -1},
+ itd8{a: -1, b: -127, add: -128, sub: 126, mul: 127, div: 0, mod: -1},
+ itd8{a: -1, b: -1, add: -2, sub: 0, mul: 1, div: 1, mod: 0},
+ itd8{a: -1, b: 0, add: -1, sub: -1, mul: 0},
+ itd8{a: -1, b: 1, add: 0, sub: -2, mul: -1, div: -1, mod: 0},
+ itd8{a: -1, b: 126, add: 125, sub: -127, mul: -126, div: 0, mod: -1},
+ itd8{a: -1, b: 127, add: 126, sub: -128, mul: -127, div: 0, mod: -1},
+ itd8{a: 0, b: -128, add: -128, sub: -128, mul: 0, div: 0, mod: 0},
+ itd8{a: 0, b: -127, add: -127, sub: 127, mul: 0, div: 0, mod: 0},
+ itd8{a: 0, b: -1, add: -1, sub: 1, mul: 0, div: 0, mod: 0},
+ itd8{a: 0, b: 0, add: 0, sub: 0, mul: 0},
+ itd8{a: 0, b: 1, add: 1, sub: -1, mul: 0, div: 0, mod: 0},
+ itd8{a: 0, b: 126, add: 126, sub: -126, mul: 0, div: 0, mod: 0},
+ itd8{a: 0, b: 127, add: 127, sub: -127, mul: 0, div: 0, mod: 0},
+ itd8{a: 1, b: -128, add: -127, sub: -127, mul: -128, div: 0, mod: 1},
+ itd8{a: 1, b: -127, add: -126, sub: -128, mul: -127, div: 0, mod: 1},
+ itd8{a: 1, b: -1, add: 0, sub: 2, mul: -1, div: -1, mod: 0},
+ itd8{a: 1, b: 0, add: 1, sub: 1, mul: 0},
+ itd8{a: 1, b: 1, add: 2, sub: 0, mul: 1, div: 1, mod: 0},
+ itd8{a: 1, b: 126, add: 127, sub: -125, mul: 126, div: 0, mod: 1},
+ itd8{a: 1, b: 127, add: -128, sub: -126, mul: 127, div: 0, mod: 1},
+ itd8{a: 126, b: -128, add: -2, sub: -2, mul: 0, div: 0, mod: 126},
+ itd8{a: 126, b: -127, add: -1, sub: -3, mul: 126, div: 0, mod: 126},
+ itd8{a: 126, b: -1, add: 125, sub: 127, mul: -126, div: -126, mod: 0},
+ itd8{a: 126, b: 0, add: 126, sub: 126, mul: 0},
+ itd8{a: 126, b: 1, add: 127, sub: 125, mul: 126, div: 126, mod: 0},
+ itd8{a: 126, b: 126, add: -4, sub: 0, mul: 4, div: 1, mod: 0},
+ itd8{a: 126, b: 127, add: -3, sub: -1, mul: -126, div: 0, mod: 126},
+ itd8{a: 127, b: -128, add: -1, sub: -1, mul: -128, div: 0, mod: 127},
+ itd8{a: 127, b: -127, add: 0, sub: -2, mul: -1, div: -1, mod: 0},
+ itd8{a: 127, b: -1, add: 126, sub: -128, mul: -127, div: -127, mod: 0},
+ itd8{a: 127, b: 0, add: 127, sub: 127, mul: 0},
+ itd8{a: 127, b: 1, add: -128, sub: 126, mul: 127, div: 127, mod: 0},
+ itd8{a: 127, b: 126, add: -3, sub: 1, mul: -126, div: 1, mod: 1},
+ itd8{a: 127, b: 127, add: -2, sub: 0, mul: 1, div: 1, mod: 0},
+}
+
+//TestArithmeticBoundary tests boundary results for arithmetic operations.
+func TestArithmeticBoundary(t *testing.T) {
+
+ for _, v := range uint64_data {
+ if got := add_uint64_ssa(v.a, v.b); got != v.add {
+ t.Errorf("add_uint64 %d+%d = %d, wanted %d\n", v.a, v.b, got, v.add)
+ }
+ if got := sub_uint64_ssa(v.a, v.b); got != v.sub {
+ t.Errorf("sub_uint64 %d-%d = %d, wanted %d\n", v.a, v.b, got, v.sub)
+ }
+ if v.b != 0 {
+ if got := div_uint64_ssa(v.a, v.b); got != v.div {
+ t.Errorf("div_uint64 %d/%d = %d, wanted %d\n", v.a, v.b, got, v.div)
+ }
+
+ }
+ if v.b != 0 {
+ if got := mod_uint64_ssa(v.a, v.b); got != v.mod {
+ t.Errorf("mod_uint64 %d%%%d = %d, wanted %d\n", v.a, v.b, got, v.mod)
+ }
+
+ }
+ if got := mul_uint64_ssa(v.a, v.b); got != v.mul {
+ t.Errorf("mul_uint64 %d*%d = %d, wanted %d\n", v.a, v.b, got, v.mul)
+ }
+ }
+ for _, v := range int64_data {
+ if got := add_int64_ssa(v.a, v.b); got != v.add {
+ t.Errorf("add_int64 %d+%d = %d, wanted %d\n", v.a, v.b, got, v.add)
+ }
+ if got := sub_int64_ssa(v.a, v.b); got != v.sub {
+ t.Errorf("sub_int64 %d-%d = %d, wanted %d\n", v.a, v.b, got, v.sub)
+ }
+ if v.b != 0 {
+ if got := div_int64_ssa(v.a, v.b); got != v.div {
+ t.Errorf("div_int64 %d/%d = %d, wanted %d\n", v.a, v.b, got, v.div)
+ }
+
+ }
+ if v.b != 0 {
+ if got := mod_int64_ssa(v.a, v.b); got != v.mod {
+ t.Errorf("mod_int64 %d%%%d = %d, wanted %d\n", v.a, v.b, got, v.mod)
+ }
+
+ }
+ if got := mul_int64_ssa(v.a, v.b); got != v.mul {
+ t.Errorf("mul_int64 %d*%d = %d, wanted %d\n", v.a, v.b, got, v.mul)
+ }
+ }
+ for _, v := range uint32_data {
+ if got := add_uint32_ssa(v.a, v.b); got != v.add {
+ t.Errorf("add_uint32 %d+%d = %d, wanted %d\n", v.a, v.b, got, v.add)
+ }
+ if got := sub_uint32_ssa(v.a, v.b); got != v.sub {
+ t.Errorf("sub_uint32 %d-%d = %d, wanted %d\n", v.a, v.b, got, v.sub)
+ }
+ if v.b != 0 {
+ if got := div_uint32_ssa(v.a, v.b); got != v.div {
+ t.Errorf("div_uint32 %d/%d = %d, wanted %d\n", v.a, v.b, got, v.div)
+ }
+
+ }
+ if v.b != 0 {
+ if got := mod_uint32_ssa(v.a, v.b); got != v.mod {
+ t.Errorf("mod_uint32 %d%%%d = %d, wanted %d\n", v.a, v.b, got, v.mod)
+ }
+
+ }
+ if got := mul_uint32_ssa(v.a, v.b); got != v.mul {
+ t.Errorf("mul_uint32 %d*%d = %d, wanted %d\n", v.a, v.b, got, v.mul)
+ }
+ }
+ for _, v := range int32_data {
+ if got := add_int32_ssa(v.a, v.b); got != v.add {
+ t.Errorf("add_int32 %d+%d = %d, wanted %d\n", v.a, v.b, got, v.add)
+ }
+ if got := sub_int32_ssa(v.a, v.b); got != v.sub {
+ t.Errorf("sub_int32 %d-%d = %d, wanted %d\n", v.a, v.b, got, v.sub)
+ }
+ if v.b != 0 {
+ if got := div_int32_ssa(v.a, v.b); got != v.div {
+ t.Errorf("div_int32 %d/%d = %d, wanted %d\n", v.a, v.b, got, v.div)
+ }
+
+ }
+ if v.b != 0 {
+ if got := mod_int32_ssa(v.a, v.b); got != v.mod {
+ t.Errorf("mod_int32 %d%%%d = %d, wanted %d\n", v.a, v.b, got, v.mod)
+ }
+
+ }
+ if got := mul_int32_ssa(v.a, v.b); got != v.mul {
+ t.Errorf("mul_int32 %d*%d = %d, wanted %d\n", v.a, v.b, got, v.mul)
+ }
+ }
+ for _, v := range uint16_data {
+ if got := add_uint16_ssa(v.a, v.b); got != v.add {
+ t.Errorf("add_uint16 %d+%d = %d, wanted %d\n", v.a, v.b, got, v.add)
+ }
+ if got := sub_uint16_ssa(v.a, v.b); got != v.sub {
+ t.Errorf("sub_uint16 %d-%d = %d, wanted %d\n", v.a, v.b, got, v.sub)
+ }
+ if v.b != 0 {
+ if got := div_uint16_ssa(v.a, v.b); got != v.div {
+ t.Errorf("div_uint16 %d/%d = %d, wanted %d\n", v.a, v.b, got, v.div)
+ }
+
+ }
+ if v.b != 0 {
+ if got := mod_uint16_ssa(v.a, v.b); got != v.mod {
+ t.Errorf("mod_uint16 %d%%%d = %d, wanted %d\n", v.a, v.b, got, v.mod)
+ }
+
+ }
+ if got := mul_uint16_ssa(v.a, v.b); got != v.mul {
+ t.Errorf("mul_uint16 %d*%d = %d, wanted %d\n", v.a, v.b, got, v.mul)
+ }
+ }
+ for _, v := range int16_data {
+ if got := add_int16_ssa(v.a, v.b); got != v.add {
+ t.Errorf("add_int16 %d+%d = %d, wanted %d\n", v.a, v.b, got, v.add)
+ }
+ if got := sub_int16_ssa(v.a, v.b); got != v.sub {
+ t.Errorf("sub_int16 %d-%d = %d, wanted %d\n", v.a, v.b, got, v.sub)
+ }
+ if v.b != 0 {
+ if got := div_int16_ssa(v.a, v.b); got != v.div {
+ t.Errorf("div_int16 %d/%d = %d, wanted %d\n", v.a, v.b, got, v.div)
+ }
+
+ }
+ if v.b != 0 {
+ if got := mod_int16_ssa(v.a, v.b); got != v.mod {
+ t.Errorf("mod_int16 %d%%%d = %d, wanted %d\n", v.a, v.b, got, v.mod)
+ }
+
+ }
+ if got := mul_int16_ssa(v.a, v.b); got != v.mul {
+ t.Errorf("mul_int16 %d*%d = %d, wanted %d\n", v.a, v.b, got, v.mul)
+ }
+ }
+ for _, v := range uint8_data {
+ if got := add_uint8_ssa(v.a, v.b); got != v.add {
+ t.Errorf("add_uint8 %d+%d = %d, wanted %d\n", v.a, v.b, got, v.add)
+ }
+ if got := sub_uint8_ssa(v.a, v.b); got != v.sub {
+ t.Errorf("sub_uint8 %d-%d = %d, wanted %d\n", v.a, v.b, got, v.sub)
+ }
+ if v.b != 0 {
+ if got := div_uint8_ssa(v.a, v.b); got != v.div {
+ t.Errorf("div_uint8 %d/%d = %d, wanted %d\n", v.a, v.b, got, v.div)
+ }
+
+ }
+ if v.b != 0 {
+ if got := mod_uint8_ssa(v.a, v.b); got != v.mod {
+ t.Errorf("mod_uint8 %d%%%d = %d, wanted %d\n", v.a, v.b, got, v.mod)
+ }
+
+ }
+ if got := mul_uint8_ssa(v.a, v.b); got != v.mul {
+ t.Errorf("mul_uint8 %d*%d = %d, wanted %d\n", v.a, v.b, got, v.mul)
+ }
+ }
+ for _, v := range int8_data {
+ if got := add_int8_ssa(v.a, v.b); got != v.add {
+ t.Errorf("add_int8 %d+%d = %d, wanted %d\n", v.a, v.b, got, v.add)
+ }
+ if got := sub_int8_ssa(v.a, v.b); got != v.sub {
+ t.Errorf("sub_int8 %d-%d = %d, wanted %d\n", v.a, v.b, got, v.sub)
+ }
+ if v.b != 0 {
+ if got := div_int8_ssa(v.a, v.b); got != v.div {
+ t.Errorf("div_int8 %d/%d = %d, wanted %d\n", v.a, v.b, got, v.div)
+ }
+
+ }
+ if v.b != 0 {
+ if got := mod_int8_ssa(v.a, v.b); got != v.mod {
+ t.Errorf("mod_int8 %d%%%d = %d, wanted %d\n", v.a, v.b, got, v.mod)
+ }
+
+ }
+ if got := mul_int8_ssa(v.a, v.b); got != v.mul {
+ t.Errorf("mul_int8 %d*%d = %d, wanted %d\n", v.a, v.b, got, v.mul)
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/test/testdata/arithConst_test.go b/src/cmd/compile/internal/test/testdata/arithConst_test.go
new file mode 100644
index 0000000..9f5ac61
--- /dev/null
+++ b/src/cmd/compile/internal/test/testdata/arithConst_test.go
@@ -0,0 +1,9570 @@
+// Code generated by gen/arithConstGen.go. DO NOT EDIT.
+
+package main
+
+import "testing"
+
+//go:noinline
+func add_uint64_0(a uint64) uint64 { return a + 0 }
+
+//go:noinline
+func add_0_uint64(a uint64) uint64 { return 0 + a }
+
+//go:noinline
+func add_uint64_1(a uint64) uint64 { return a + 1 }
+
+//go:noinline
+func add_1_uint64(a uint64) uint64 { return 1 + a }
+
+//go:noinline
+func add_uint64_4294967296(a uint64) uint64 { return a + 4294967296 }
+
+//go:noinline
+func add_4294967296_uint64(a uint64) uint64 { return 4294967296 + a }
+
+//go:noinline
+func add_uint64_9223372036854775808(a uint64) uint64 { return a + 9223372036854775808 }
+
+//go:noinline
+func add_9223372036854775808_uint64(a uint64) uint64 { return 9223372036854775808 + a }
+
+//go:noinline
+func add_uint64_18446744073709551615(a uint64) uint64 { return a + 18446744073709551615 }
+
+//go:noinline
+func add_18446744073709551615_uint64(a uint64) uint64 { return 18446744073709551615 + a }
+
+//go:noinline
+func sub_uint64_0(a uint64) uint64 { return a - 0 }
+
+//go:noinline
+func sub_0_uint64(a uint64) uint64 { return 0 - a }
+
+//go:noinline
+func sub_uint64_1(a uint64) uint64 { return a - 1 }
+
+//go:noinline
+func sub_1_uint64(a uint64) uint64 { return 1 - a }
+
+//go:noinline
+func sub_uint64_4294967296(a uint64) uint64 { return a - 4294967296 }
+
+//go:noinline
+func sub_4294967296_uint64(a uint64) uint64 { return 4294967296 - a }
+
+//go:noinline
+func sub_uint64_9223372036854775808(a uint64) uint64 { return a - 9223372036854775808 }
+
+//go:noinline
+func sub_9223372036854775808_uint64(a uint64) uint64 { return 9223372036854775808 - a }
+
+//go:noinline
+func sub_uint64_18446744073709551615(a uint64) uint64 { return a - 18446744073709551615 }
+
+//go:noinline
+func sub_18446744073709551615_uint64(a uint64) uint64 { return 18446744073709551615 - a }
+
+//go:noinline
+func div_0_uint64(a uint64) uint64 { return 0 / a }
+
+//go:noinline
+func div_uint64_1(a uint64) uint64 { return a / 1 }
+
+//go:noinline
+func div_1_uint64(a uint64) uint64 { return 1 / a }
+
+//go:noinline
+func div_uint64_4294967296(a uint64) uint64 { return a / 4294967296 }
+
+//go:noinline
+func div_4294967296_uint64(a uint64) uint64 { return 4294967296 / a }
+
+//go:noinline
+func div_uint64_9223372036854775808(a uint64) uint64 { return a / 9223372036854775808 }
+
+//go:noinline
+func div_9223372036854775808_uint64(a uint64) uint64 { return 9223372036854775808 / a }
+
+//go:noinline
+func div_uint64_18446744073709551615(a uint64) uint64 { return a / 18446744073709551615 }
+
+//go:noinline
+func div_18446744073709551615_uint64(a uint64) uint64 { return 18446744073709551615 / a }
+
+//go:noinline
+func mul_uint64_0(a uint64) uint64 { return a * 0 }
+
+//go:noinline
+func mul_0_uint64(a uint64) uint64 { return 0 * a }
+
+//go:noinline
+func mul_uint64_1(a uint64) uint64 { return a * 1 }
+
+//go:noinline
+func mul_1_uint64(a uint64) uint64 { return 1 * a }
+
+//go:noinline
+func mul_uint64_4294967296(a uint64) uint64 { return a * 4294967296 }
+
+//go:noinline
+func mul_4294967296_uint64(a uint64) uint64 { return 4294967296 * a }
+
+//go:noinline
+func mul_uint64_9223372036854775808(a uint64) uint64 { return a * 9223372036854775808 }
+
+//go:noinline
+func mul_9223372036854775808_uint64(a uint64) uint64 { return 9223372036854775808 * a }
+
+//go:noinline
+func mul_uint64_18446744073709551615(a uint64) uint64 { return a * 18446744073709551615 }
+
+//go:noinline
+func mul_18446744073709551615_uint64(a uint64) uint64 { return 18446744073709551615 * a }
+
+//go:noinline
+func lsh_uint64_0(a uint64) uint64 { return a << 0 }
+
+//go:noinline
+func lsh_0_uint64(a uint64) uint64 { return 0 << a }
+
+//go:noinline
+func lsh_uint64_1(a uint64) uint64 { return a << 1 }
+
+//go:noinline
+func lsh_1_uint64(a uint64) uint64 { return 1 << a }
+
+//go:noinline
+func lsh_uint64_4294967296(a uint64) uint64 { return a << uint64(4294967296) }
+
+//go:noinline
+func lsh_4294967296_uint64(a uint64) uint64 { return 4294967296 << a }
+
+//go:noinline
+func lsh_uint64_9223372036854775808(a uint64) uint64 { return a << uint64(9223372036854775808) }
+
+//go:noinline
+func lsh_9223372036854775808_uint64(a uint64) uint64 { return 9223372036854775808 << a }
+
+//go:noinline
+func lsh_uint64_18446744073709551615(a uint64) uint64 { return a << uint64(18446744073709551615) }
+
+//go:noinline
+func lsh_18446744073709551615_uint64(a uint64) uint64 { return 18446744073709551615 << a }
+
+//go:noinline
+func rsh_uint64_0(a uint64) uint64 { return a >> 0 }
+
+//go:noinline
+func rsh_0_uint64(a uint64) uint64 { return 0 >> a }
+
+//go:noinline
+func rsh_uint64_1(a uint64) uint64 { return a >> 1 }
+
+//go:noinline
+func rsh_1_uint64(a uint64) uint64 { return 1 >> a }
+
+//go:noinline
+func rsh_uint64_4294967296(a uint64) uint64 { return a >> uint64(4294967296) }
+
+//go:noinline
+func rsh_4294967296_uint64(a uint64) uint64 { return 4294967296 >> a }
+
+//go:noinline
+func rsh_uint64_9223372036854775808(a uint64) uint64 { return a >> uint64(9223372036854775808) }
+
+//go:noinline
+func rsh_9223372036854775808_uint64(a uint64) uint64 { return 9223372036854775808 >> a }
+
+//go:noinline
+func rsh_uint64_18446744073709551615(a uint64) uint64 { return a >> uint64(18446744073709551615) }
+
+//go:noinline
+func rsh_18446744073709551615_uint64(a uint64) uint64 { return 18446744073709551615 >> a }
+
+//go:noinline
+func mod_0_uint64(a uint64) uint64 { return 0 % a }
+
+//go:noinline
+func mod_uint64_1(a uint64) uint64 { return a % 1 }
+
+//go:noinline
+func mod_1_uint64(a uint64) uint64 { return 1 % a }
+
+//go:noinline
+func mod_uint64_4294967296(a uint64) uint64 { return a % 4294967296 }
+
+//go:noinline
+func mod_4294967296_uint64(a uint64) uint64 { return 4294967296 % a }
+
+//go:noinline
+func mod_uint64_9223372036854775808(a uint64) uint64 { return a % 9223372036854775808 }
+
+//go:noinline
+func mod_9223372036854775808_uint64(a uint64) uint64 { return 9223372036854775808 % a }
+
+//go:noinline
+func mod_uint64_18446744073709551615(a uint64) uint64 { return a % 18446744073709551615 }
+
+//go:noinline
+func mod_18446744073709551615_uint64(a uint64) uint64 { return 18446744073709551615 % a }
+
+//go:noinline
+func and_uint64_0(a uint64) uint64 { return a & 0 }
+
+//go:noinline
+func and_0_uint64(a uint64) uint64 { return 0 & a }
+
+//go:noinline
+func and_uint64_1(a uint64) uint64 { return a & 1 }
+
+//go:noinline
+func and_1_uint64(a uint64) uint64 { return 1 & a }
+
+//go:noinline
+func and_uint64_4294967296(a uint64) uint64 { return a & 4294967296 }
+
+//go:noinline
+func and_4294967296_uint64(a uint64) uint64 { return 4294967296 & a }
+
+//go:noinline
+func and_uint64_9223372036854775808(a uint64) uint64 { return a & 9223372036854775808 }
+
+//go:noinline
+func and_9223372036854775808_uint64(a uint64) uint64 { return 9223372036854775808 & a }
+
+//go:noinline
+func and_uint64_18446744073709551615(a uint64) uint64 { return a & 18446744073709551615 }
+
+//go:noinline
+func and_18446744073709551615_uint64(a uint64) uint64 { return 18446744073709551615 & a }
+
+//go:noinline
+func or_uint64_0(a uint64) uint64 { return a | 0 }
+
+//go:noinline
+func or_0_uint64(a uint64) uint64 { return 0 | a }
+
+//go:noinline
+func or_uint64_1(a uint64) uint64 { return a | 1 }
+
+//go:noinline
+func or_1_uint64(a uint64) uint64 { return 1 | a }
+
+//go:noinline
+func or_uint64_4294967296(a uint64) uint64 { return a | 4294967296 }
+
+//go:noinline
+func or_4294967296_uint64(a uint64) uint64 { return 4294967296 | a }
+
+//go:noinline
+func or_uint64_9223372036854775808(a uint64) uint64 { return a | 9223372036854775808 }
+
+//go:noinline
+func or_9223372036854775808_uint64(a uint64) uint64 { return 9223372036854775808 | a }
+
+//go:noinline
+func or_uint64_18446744073709551615(a uint64) uint64 { return a | 18446744073709551615 }
+
+//go:noinline
+func or_18446744073709551615_uint64(a uint64) uint64 { return 18446744073709551615 | a }
+
+//go:noinline
+func xor_uint64_0(a uint64) uint64 { return a ^ 0 }
+
+//go:noinline
+func xor_0_uint64(a uint64) uint64 { return 0 ^ a }
+
+//go:noinline
+func xor_uint64_1(a uint64) uint64 { return a ^ 1 }
+
+//go:noinline
+func xor_1_uint64(a uint64) uint64 { return 1 ^ a }
+
+//go:noinline
+func xor_uint64_4294967296(a uint64) uint64 { return a ^ 4294967296 }
+
+//go:noinline
+func xor_4294967296_uint64(a uint64) uint64 { return 4294967296 ^ a }
+
+//go:noinline
+func xor_uint64_9223372036854775808(a uint64) uint64 { return a ^ 9223372036854775808 }
+
+//go:noinline
+func xor_9223372036854775808_uint64(a uint64) uint64 { return 9223372036854775808 ^ a }
+
+//go:noinline
+func xor_uint64_18446744073709551615(a uint64) uint64 { return a ^ 18446744073709551615 }
+
+//go:noinline
+func xor_18446744073709551615_uint64(a uint64) uint64 { return 18446744073709551615 ^ a }
+
+//go:noinline
+func mul_uint64_3(a uint64) uint64 { return a * 3 }
+
+//go:noinline
+func mul_3_uint64(a uint64) uint64 { return 3 * a }
+
+//go:noinline
+func mul_uint64_5(a uint64) uint64 { return a * 5 }
+
+//go:noinline
+func mul_5_uint64(a uint64) uint64 { return 5 * a }
+
+//go:noinline
+func mul_uint64_7(a uint64) uint64 { return a * 7 }
+
+//go:noinline
+func mul_7_uint64(a uint64) uint64 { return 7 * a }
+
+//go:noinline
+func mul_uint64_9(a uint64) uint64 { return a * 9 }
+
+//go:noinline
+func mul_9_uint64(a uint64) uint64 { return 9 * a }
+
+//go:noinline
+func mul_uint64_10(a uint64) uint64 { return a * 10 }
+
+//go:noinline
+func mul_10_uint64(a uint64) uint64 { return 10 * a }
+
+//go:noinline
+func mul_uint64_11(a uint64) uint64 { return a * 11 }
+
+//go:noinline
+func mul_11_uint64(a uint64) uint64 { return 11 * a }
+
+//go:noinline
+func mul_uint64_13(a uint64) uint64 { return a * 13 }
+
+//go:noinline
+func mul_13_uint64(a uint64) uint64 { return 13 * a }
+
+//go:noinline
+func mul_uint64_19(a uint64) uint64 { return a * 19 }
+
+//go:noinline
+func mul_19_uint64(a uint64) uint64 { return 19 * a }
+
+//go:noinline
+func mul_uint64_21(a uint64) uint64 { return a * 21 }
+
+//go:noinline
+func mul_21_uint64(a uint64) uint64 { return 21 * a }
+
+//go:noinline
+func mul_uint64_25(a uint64) uint64 { return a * 25 }
+
+//go:noinline
+func mul_25_uint64(a uint64) uint64 { return 25 * a }
+
+//go:noinline
+func mul_uint64_27(a uint64) uint64 { return a * 27 }
+
+//go:noinline
+func mul_27_uint64(a uint64) uint64 { return 27 * a }
+
+//go:noinline
+func mul_uint64_37(a uint64) uint64 { return a * 37 }
+
+//go:noinline
+func mul_37_uint64(a uint64) uint64 { return 37 * a }
+
+//go:noinline
+func mul_uint64_41(a uint64) uint64 { return a * 41 }
+
+//go:noinline
+func mul_41_uint64(a uint64) uint64 { return 41 * a }
+
+//go:noinline
+func mul_uint64_45(a uint64) uint64 { return a * 45 }
+
+//go:noinline
+func mul_45_uint64(a uint64) uint64 { return 45 * a }
+
+//go:noinline
+func mul_uint64_73(a uint64) uint64 { return a * 73 }
+
+//go:noinline
+func mul_73_uint64(a uint64) uint64 { return 73 * a }
+
+//go:noinline
+func mul_uint64_81(a uint64) uint64 { return a * 81 }
+
+//go:noinline
+func mul_81_uint64(a uint64) uint64 { return 81 * a }
+
+//go:noinline
+func add_int64_Neg9223372036854775808(a int64) int64 { return a + -9223372036854775808 }
+
+//go:noinline
+func add_Neg9223372036854775808_int64(a int64) int64 { return -9223372036854775808 + a }
+
+//go:noinline
+func add_int64_Neg9223372036854775807(a int64) int64 { return a + -9223372036854775807 }
+
+//go:noinline
+func add_Neg9223372036854775807_int64(a int64) int64 { return -9223372036854775807 + a }
+
+//go:noinline
+func add_int64_Neg4294967296(a int64) int64 { return a + -4294967296 }
+
+//go:noinline
+func add_Neg4294967296_int64(a int64) int64 { return -4294967296 + a }
+
+//go:noinline
+func add_int64_Neg1(a int64) int64 { return a + -1 }
+
+//go:noinline
+func add_Neg1_int64(a int64) int64 { return -1 + a }
+
+//go:noinline
+func add_int64_0(a int64) int64 { return a + 0 }
+
+//go:noinline
+func add_0_int64(a int64) int64 { return 0 + a }
+
+//go:noinline
+func add_int64_1(a int64) int64 { return a + 1 }
+
+//go:noinline
+func add_1_int64(a int64) int64 { return 1 + a }
+
+//go:noinline
+func add_int64_4294967296(a int64) int64 { return a + 4294967296 }
+
+//go:noinline
+func add_4294967296_int64(a int64) int64 { return 4294967296 + a }
+
+//go:noinline
+func add_int64_9223372036854775806(a int64) int64 { return a + 9223372036854775806 }
+
+//go:noinline
+func add_9223372036854775806_int64(a int64) int64 { return 9223372036854775806 + a }
+
+//go:noinline
+func add_int64_9223372036854775807(a int64) int64 { return a + 9223372036854775807 }
+
+//go:noinline
+func add_9223372036854775807_int64(a int64) int64 { return 9223372036854775807 + a }
+
+//go:noinline
+func sub_int64_Neg9223372036854775808(a int64) int64 { return a - -9223372036854775808 }
+
+//go:noinline
+func sub_Neg9223372036854775808_int64(a int64) int64 { return -9223372036854775808 - a }
+
+//go:noinline
+func sub_int64_Neg9223372036854775807(a int64) int64 { return a - -9223372036854775807 }
+
+//go:noinline
+func sub_Neg9223372036854775807_int64(a int64) int64 { return -9223372036854775807 - a }
+
+//go:noinline
+func sub_int64_Neg4294967296(a int64) int64 { return a - -4294967296 }
+
+//go:noinline
+func sub_Neg4294967296_int64(a int64) int64 { return -4294967296 - a }
+
+//go:noinline
+func sub_int64_Neg1(a int64) int64 { return a - -1 }
+
+//go:noinline
+func sub_Neg1_int64(a int64) int64 { return -1 - a }
+
+//go:noinline
+func sub_int64_0(a int64) int64 { return a - 0 }
+
+//go:noinline
+func sub_0_int64(a int64) int64 { return 0 - a }
+
+//go:noinline
+func sub_int64_1(a int64) int64 { return a - 1 }
+
+//go:noinline
+func sub_1_int64(a int64) int64 { return 1 - a }
+
+//go:noinline
+func sub_int64_4294967296(a int64) int64 { return a - 4294967296 }
+
+//go:noinline
+func sub_4294967296_int64(a int64) int64 { return 4294967296 - a }
+
+//go:noinline
+func sub_int64_9223372036854775806(a int64) int64 { return a - 9223372036854775806 }
+
+//go:noinline
+func sub_9223372036854775806_int64(a int64) int64 { return 9223372036854775806 - a }
+
+//go:noinline
+func sub_int64_9223372036854775807(a int64) int64 { return a - 9223372036854775807 }
+
+//go:noinline
+func sub_9223372036854775807_int64(a int64) int64 { return 9223372036854775807 - a }
+
+//go:noinline
+func div_int64_Neg9223372036854775808(a int64) int64 { return a / -9223372036854775808 }
+
+//go:noinline
+func div_Neg9223372036854775808_int64(a int64) int64 { return -9223372036854775808 / a }
+
+//go:noinline
+func div_int64_Neg9223372036854775807(a int64) int64 { return a / -9223372036854775807 }
+
+//go:noinline
+func div_Neg9223372036854775807_int64(a int64) int64 { return -9223372036854775807 / a }
+
+//go:noinline
+func div_int64_Neg4294967296(a int64) int64 { return a / -4294967296 }
+
+//go:noinline
+func div_Neg4294967296_int64(a int64) int64 { return -4294967296 / a }
+
+//go:noinline
+func div_int64_Neg1(a int64) int64 { return a / -1 }
+
+//go:noinline
+func div_Neg1_int64(a int64) int64 { return -1 / a }
+
+//go:noinline
+func div_0_int64(a int64) int64 { return 0 / a }
+
+//go:noinline
+func div_int64_1(a int64) int64 { return a / 1 }
+
+//go:noinline
+func div_1_int64(a int64) int64 { return 1 / a }
+
+//go:noinline
+func div_int64_4294967296(a int64) int64 { return a / 4294967296 }
+
+//go:noinline
+func div_4294967296_int64(a int64) int64 { return 4294967296 / a }
+
+//go:noinline
+func div_int64_9223372036854775806(a int64) int64 { return a / 9223372036854775806 }
+
+//go:noinline
+func div_9223372036854775806_int64(a int64) int64 { return 9223372036854775806 / a }
+
+//go:noinline
+func div_int64_9223372036854775807(a int64) int64 { return a / 9223372036854775807 }
+
+//go:noinline
+func div_9223372036854775807_int64(a int64) int64 { return 9223372036854775807 / a }
+
+//go:noinline
+func mul_int64_Neg9223372036854775808(a int64) int64 { return a * -9223372036854775808 }
+
+//go:noinline
+func mul_Neg9223372036854775808_int64(a int64) int64 { return -9223372036854775808 * a }
+
+//go:noinline
+func mul_int64_Neg9223372036854775807(a int64) int64 { return a * -9223372036854775807 }
+
+//go:noinline
+func mul_Neg9223372036854775807_int64(a int64) int64 { return -9223372036854775807 * a }
+
+//go:noinline
+func mul_int64_Neg4294967296(a int64) int64 { return a * -4294967296 }
+
+//go:noinline
+func mul_Neg4294967296_int64(a int64) int64 { return -4294967296 * a }
+
+//go:noinline
+func mul_int64_Neg1(a int64) int64 { return a * -1 }
+
+//go:noinline
+func mul_Neg1_int64(a int64) int64 { return -1 * a }
+
+//go:noinline
+func mul_int64_0(a int64) int64 { return a * 0 }
+
+//go:noinline
+func mul_0_int64(a int64) int64 { return 0 * a }
+
+//go:noinline
+func mul_int64_1(a int64) int64 { return a * 1 }
+
+//go:noinline
+func mul_1_int64(a int64) int64 { return 1 * a }
+
+//go:noinline
+func mul_int64_4294967296(a int64) int64 { return a * 4294967296 }
+
+//go:noinline
+func mul_4294967296_int64(a int64) int64 { return 4294967296 * a }
+
+//go:noinline
+func mul_int64_9223372036854775806(a int64) int64 { return a * 9223372036854775806 }
+
+//go:noinline
+func mul_9223372036854775806_int64(a int64) int64 { return 9223372036854775806 * a }
+
+//go:noinline
+func mul_int64_9223372036854775807(a int64) int64 { return a * 9223372036854775807 }
+
+//go:noinline
+func mul_9223372036854775807_int64(a int64) int64 { return 9223372036854775807 * a }
+
+//go:noinline
+func mod_int64_Neg9223372036854775808(a int64) int64 { return a % -9223372036854775808 }
+
+//go:noinline
+func mod_Neg9223372036854775808_int64(a int64) int64 { return -9223372036854775808 % a }
+
+//go:noinline
+func mod_int64_Neg9223372036854775807(a int64) int64 { return a % -9223372036854775807 }
+
+//go:noinline
+func mod_Neg9223372036854775807_int64(a int64) int64 { return -9223372036854775807 % a }
+
+//go:noinline
+func mod_int64_Neg4294967296(a int64) int64 { return a % -4294967296 }
+
+//go:noinline
+func mod_Neg4294967296_int64(a int64) int64 { return -4294967296 % a }
+
+//go:noinline
+func mod_int64_Neg1(a int64) int64 { return a % -1 }
+
+//go:noinline
+func mod_Neg1_int64(a int64) int64 { return -1 % a }
+
+//go:noinline
+func mod_0_int64(a int64) int64 { return 0 % a }
+
+//go:noinline
+func mod_int64_1(a int64) int64 { return a % 1 }
+
+//go:noinline
+func mod_1_int64(a int64) int64 { return 1 % a }
+
+//go:noinline
+func mod_int64_4294967296(a int64) int64 { return a % 4294967296 }
+
+//go:noinline
+func mod_4294967296_int64(a int64) int64 { return 4294967296 % a }
+
+//go:noinline
+func mod_int64_9223372036854775806(a int64) int64 { return a % 9223372036854775806 }
+
+//go:noinline
+func mod_9223372036854775806_int64(a int64) int64 { return 9223372036854775806 % a }
+
+//go:noinline
+func mod_int64_9223372036854775807(a int64) int64 { return a % 9223372036854775807 }
+
+//go:noinline
+func mod_9223372036854775807_int64(a int64) int64 { return 9223372036854775807 % a }
+
+//go:noinline
+func and_int64_Neg9223372036854775808(a int64) int64 { return a & -9223372036854775808 }
+
+//go:noinline
+func and_Neg9223372036854775808_int64(a int64) int64 { return -9223372036854775808 & a }
+
+//go:noinline
+func and_int64_Neg9223372036854775807(a int64) int64 { return a & -9223372036854775807 }
+
+//go:noinline
+func and_Neg9223372036854775807_int64(a int64) int64 { return -9223372036854775807 & a }
+
+//go:noinline
+func and_int64_Neg4294967296(a int64) int64 { return a & -4294967296 }
+
+//go:noinline
+func and_Neg4294967296_int64(a int64) int64 { return -4294967296 & a }
+
+//go:noinline
+func and_int64_Neg1(a int64) int64 { return a & -1 }
+
+//go:noinline
+func and_Neg1_int64(a int64) int64 { return -1 & a }
+
+//go:noinline
+func and_int64_0(a int64) int64 { return a & 0 }
+
+//go:noinline
+func and_0_int64(a int64) int64 { return 0 & a }
+
+//go:noinline
+func and_int64_1(a int64) int64 { return a & 1 }
+
+//go:noinline
+func and_1_int64(a int64) int64 { return 1 & a }
+
+//go:noinline
+func and_int64_4294967296(a int64) int64 { return a & 4294967296 }
+
+//go:noinline
+func and_4294967296_int64(a int64) int64 { return 4294967296 & a }
+
+//go:noinline
+func and_int64_9223372036854775806(a int64) int64 { return a & 9223372036854775806 }
+
+//go:noinline
+func and_9223372036854775806_int64(a int64) int64 { return 9223372036854775806 & a }
+
+//go:noinline
+func and_int64_9223372036854775807(a int64) int64 { return a & 9223372036854775807 }
+
+//go:noinline
+func and_9223372036854775807_int64(a int64) int64 { return 9223372036854775807 & a }
+
+//go:noinline
+func or_int64_Neg9223372036854775808(a int64) int64 { return a | -9223372036854775808 }
+
+//go:noinline
+func or_Neg9223372036854775808_int64(a int64) int64 { return -9223372036854775808 | a }
+
+//go:noinline
+func or_int64_Neg9223372036854775807(a int64) int64 { return a | -9223372036854775807 }
+
+//go:noinline
+func or_Neg9223372036854775807_int64(a int64) int64 { return -9223372036854775807 | a }
+
+//go:noinline
+func or_int64_Neg4294967296(a int64) int64 { return a | -4294967296 }
+
+//go:noinline
+func or_Neg4294967296_int64(a int64) int64 { return -4294967296 | a }
+
+//go:noinline
+func or_int64_Neg1(a int64) int64 { return a | -1 }
+
+//go:noinline
+func or_Neg1_int64(a int64) int64 { return -1 | a }
+
+//go:noinline
+func or_int64_0(a int64) int64 { return a | 0 }
+
+//go:noinline
+func or_0_int64(a int64) int64 { return 0 | a }
+
+//go:noinline
+func or_int64_1(a int64) int64 { return a | 1 }
+
+//go:noinline
+func or_1_int64(a int64) int64 { return 1 | a }
+
+//go:noinline
+func or_int64_4294967296(a int64) int64 { return a | 4294967296 }
+
+//go:noinline
+func or_4294967296_int64(a int64) int64 { return 4294967296 | a }
+
+//go:noinline
+func or_int64_9223372036854775806(a int64) int64 { return a | 9223372036854775806 }
+
+//go:noinline
+func or_9223372036854775806_int64(a int64) int64 { return 9223372036854775806 | a }
+
+//go:noinline
+func or_int64_9223372036854775807(a int64) int64 { return a | 9223372036854775807 }
+
+//go:noinline
+func or_9223372036854775807_int64(a int64) int64 { return 9223372036854775807 | a }
+
+//go:noinline
+func xor_int64_Neg9223372036854775808(a int64) int64 { return a ^ -9223372036854775808 }
+
+//go:noinline
+func xor_Neg9223372036854775808_int64(a int64) int64 { return -9223372036854775808 ^ a }
+
+//go:noinline
+func xor_int64_Neg9223372036854775807(a int64) int64 { return a ^ -9223372036854775807 }
+
+//go:noinline
+func xor_Neg9223372036854775807_int64(a int64) int64 { return -9223372036854775807 ^ a }
+
+//go:noinline
+func xor_int64_Neg4294967296(a int64) int64 { return a ^ -4294967296 }
+
+//go:noinline
+func xor_Neg4294967296_int64(a int64) int64 { return -4294967296 ^ a }
+
+//go:noinline
+func xor_int64_Neg1(a int64) int64 { return a ^ -1 }
+
+//go:noinline
+func xor_Neg1_int64(a int64) int64 { return -1 ^ a }
+
+//go:noinline
+func xor_int64_0(a int64) int64 { return a ^ 0 }
+
+//go:noinline
+func xor_0_int64(a int64) int64 { return 0 ^ a }
+
+//go:noinline
+func xor_int64_1(a int64) int64 { return a ^ 1 }
+
+//go:noinline
+func xor_1_int64(a int64) int64 { return 1 ^ a }
+
+//go:noinline
+func xor_int64_4294967296(a int64) int64 { return a ^ 4294967296 }
+
+//go:noinline
+func xor_4294967296_int64(a int64) int64 { return 4294967296 ^ a }
+
+//go:noinline
+func xor_int64_9223372036854775806(a int64) int64 { return a ^ 9223372036854775806 }
+
+//go:noinline
+func xor_9223372036854775806_int64(a int64) int64 { return 9223372036854775806 ^ a }
+
+//go:noinline
+func xor_int64_9223372036854775807(a int64) int64 { return a ^ 9223372036854775807 }
+
+//go:noinline
+func xor_9223372036854775807_int64(a int64) int64 { return 9223372036854775807 ^ a }
+
+//go:noinline
+func mul_int64_Neg9(a int64) int64 { return a * -9 }
+
+//go:noinline
+func mul_Neg9_int64(a int64) int64 { return -9 * a }
+
+//go:noinline
+func mul_int64_Neg5(a int64) int64 { return a * -5 }
+
+//go:noinline
+func mul_Neg5_int64(a int64) int64 { return -5 * a }
+
+//go:noinline
+func mul_int64_Neg3(a int64) int64 { return a * -3 }
+
+//go:noinline
+func mul_Neg3_int64(a int64) int64 { return -3 * a }
+
+//go:noinline
+func mul_int64_3(a int64) int64 { return a * 3 }
+
+//go:noinline
+func mul_3_int64(a int64) int64 { return 3 * a }
+
+//go:noinline
+func mul_int64_5(a int64) int64 { return a * 5 }
+
+//go:noinline
+func mul_5_int64(a int64) int64 { return 5 * a }
+
+//go:noinline
+func mul_int64_7(a int64) int64 { return a * 7 }
+
+//go:noinline
+func mul_7_int64(a int64) int64 { return 7 * a }
+
+//go:noinline
+func mul_int64_9(a int64) int64 { return a * 9 }
+
+//go:noinline
+func mul_9_int64(a int64) int64 { return 9 * a }
+
+//go:noinline
+func mul_int64_10(a int64) int64 { return a * 10 }
+
+//go:noinline
+func mul_10_int64(a int64) int64 { return 10 * a }
+
+//go:noinline
+func mul_int64_11(a int64) int64 { return a * 11 }
+
+//go:noinline
+func mul_11_int64(a int64) int64 { return 11 * a }
+
+//go:noinline
+func mul_int64_13(a int64) int64 { return a * 13 }
+
+//go:noinline
+func mul_13_int64(a int64) int64 { return 13 * a }
+
+//go:noinline
+func mul_int64_19(a int64) int64 { return a * 19 }
+
+//go:noinline
+func mul_19_int64(a int64) int64 { return 19 * a }
+
+//go:noinline
+func mul_int64_21(a int64) int64 { return a * 21 }
+
+//go:noinline
+func mul_21_int64(a int64) int64 { return 21 * a }
+
+//go:noinline
+func mul_int64_25(a int64) int64 { return a * 25 }
+
+//go:noinline
+func mul_25_int64(a int64) int64 { return 25 * a }
+
+//go:noinline
+func mul_int64_27(a int64) int64 { return a * 27 }
+
+//go:noinline
+func mul_27_int64(a int64) int64 { return 27 * a }
+
+//go:noinline
+func mul_int64_37(a int64) int64 { return a * 37 }
+
+//go:noinline
+func mul_37_int64(a int64) int64 { return 37 * a }
+
+//go:noinline
+func mul_int64_41(a int64) int64 { return a * 41 }
+
+//go:noinline
+func mul_41_int64(a int64) int64 { return 41 * a }
+
+//go:noinline
+func mul_int64_45(a int64) int64 { return a * 45 }
+
+//go:noinline
+func mul_45_int64(a int64) int64 { return 45 * a }
+
+//go:noinline
+func mul_int64_73(a int64) int64 { return a * 73 }
+
+//go:noinline
+func mul_73_int64(a int64) int64 { return 73 * a }
+
+//go:noinline
+func mul_int64_81(a int64) int64 { return a * 81 }
+
+//go:noinline
+func mul_81_int64(a int64) int64 { return 81 * a }
+
+//go:noinline
+func add_uint32_0(a uint32) uint32 { return a + 0 }
+
+//go:noinline
+func add_0_uint32(a uint32) uint32 { return 0 + a }
+
+//go:noinline
+func add_uint32_1(a uint32) uint32 { return a + 1 }
+
+//go:noinline
+func add_1_uint32(a uint32) uint32 { return 1 + a }
+
+//go:noinline
+func add_uint32_4294967295(a uint32) uint32 { return a + 4294967295 }
+
+//go:noinline
+func add_4294967295_uint32(a uint32) uint32 { return 4294967295 + a }
+
+//go:noinline
+func sub_uint32_0(a uint32) uint32 { return a - 0 }
+
+//go:noinline
+func sub_0_uint32(a uint32) uint32 { return 0 - a }
+
+//go:noinline
+func sub_uint32_1(a uint32) uint32 { return a - 1 }
+
+//go:noinline
+func sub_1_uint32(a uint32) uint32 { return 1 - a }
+
+//go:noinline
+func sub_uint32_4294967295(a uint32) uint32 { return a - 4294967295 }
+
+//go:noinline
+func sub_4294967295_uint32(a uint32) uint32 { return 4294967295 - a }
+
+//go:noinline
+func div_0_uint32(a uint32) uint32 { return 0 / a }
+
+//go:noinline
+func div_uint32_1(a uint32) uint32 { return a / 1 }
+
+//go:noinline
+func div_1_uint32(a uint32) uint32 { return 1 / a }
+
+//go:noinline
+func div_uint32_4294967295(a uint32) uint32 { return a / 4294967295 }
+
+//go:noinline
+func div_4294967295_uint32(a uint32) uint32 { return 4294967295 / a }
+
+//go:noinline
+func mul_uint32_0(a uint32) uint32 { return a * 0 }
+
+//go:noinline
+func mul_0_uint32(a uint32) uint32 { return 0 * a }
+
+//go:noinline
+func mul_uint32_1(a uint32) uint32 { return a * 1 }
+
+//go:noinline
+func mul_1_uint32(a uint32) uint32 { return 1 * a }
+
+//go:noinline
+func mul_uint32_4294967295(a uint32) uint32 { return a * 4294967295 }
+
+//go:noinline
+func mul_4294967295_uint32(a uint32) uint32 { return 4294967295 * a }
+
+//go:noinline
+func lsh_uint32_0(a uint32) uint32 { return a << 0 }
+
+//go:noinline
+func lsh_0_uint32(a uint32) uint32 { return 0 << a }
+
+//go:noinline
+func lsh_uint32_1(a uint32) uint32 { return a << 1 }
+
+//go:noinline
+func lsh_1_uint32(a uint32) uint32 { return 1 << a }
+
+//go:noinline
+func lsh_uint32_4294967295(a uint32) uint32 { return a << 4294967295 }
+
+//go:noinline
+func lsh_4294967295_uint32(a uint32) uint32 { return 4294967295 << a }
+
+//go:noinline
+func rsh_uint32_0(a uint32) uint32 { return a >> 0 }
+
+//go:noinline
+func rsh_0_uint32(a uint32) uint32 { return 0 >> a }
+
+//go:noinline
+func rsh_uint32_1(a uint32) uint32 { return a >> 1 }
+
+//go:noinline
+func rsh_1_uint32(a uint32) uint32 { return 1 >> a }
+
+//go:noinline
+func rsh_uint32_4294967295(a uint32) uint32 { return a >> 4294967295 }
+
+//go:noinline
+func rsh_4294967295_uint32(a uint32) uint32 { return 4294967295 >> a }
+
+//go:noinline
+func mod_0_uint32(a uint32) uint32 { return 0 % a }
+
+//go:noinline
+func mod_uint32_1(a uint32) uint32 { return a % 1 }
+
+//go:noinline
+func mod_1_uint32(a uint32) uint32 { return 1 % a }
+
+//go:noinline
+func mod_uint32_4294967295(a uint32) uint32 { return a % 4294967295 }
+
+//go:noinline
+func mod_4294967295_uint32(a uint32) uint32 { return 4294967295 % a }
+
+//go:noinline
+func and_uint32_0(a uint32) uint32 { return a & 0 }
+
+//go:noinline
+func and_0_uint32(a uint32) uint32 { return 0 & a }
+
+//go:noinline
+func and_uint32_1(a uint32) uint32 { return a & 1 }
+
+//go:noinline
+func and_1_uint32(a uint32) uint32 { return 1 & a }
+
+//go:noinline
+func and_uint32_4294967295(a uint32) uint32 { return a & 4294967295 }
+
+//go:noinline
+func and_4294967295_uint32(a uint32) uint32 { return 4294967295 & a }
+
+//go:noinline
+func or_uint32_0(a uint32) uint32 { return a | 0 }
+
+//go:noinline
+func or_0_uint32(a uint32) uint32 { return 0 | a }
+
+//go:noinline
+func or_uint32_1(a uint32) uint32 { return a | 1 }
+
+//go:noinline
+func or_1_uint32(a uint32) uint32 { return 1 | a }
+
+//go:noinline
+func or_uint32_4294967295(a uint32) uint32 { return a | 4294967295 }
+
+//go:noinline
+func or_4294967295_uint32(a uint32) uint32 { return 4294967295 | a }
+
+//go:noinline
+func xor_uint32_0(a uint32) uint32 { return a ^ 0 }
+
+//go:noinline
+func xor_0_uint32(a uint32) uint32 { return 0 ^ a }
+
+//go:noinline
+func xor_uint32_1(a uint32) uint32 { return a ^ 1 }
+
+//go:noinline
+func xor_1_uint32(a uint32) uint32 { return 1 ^ a }
+
+//go:noinline
+func xor_uint32_4294967295(a uint32) uint32 { return a ^ 4294967295 }
+
+//go:noinline
+func xor_4294967295_uint32(a uint32) uint32 { return 4294967295 ^ a }
+
+//go:noinline
+func mul_uint32_3(a uint32) uint32 { return a * 3 }
+
+//go:noinline
+func mul_3_uint32(a uint32) uint32 { return 3 * a }
+
+//go:noinline
+func mul_uint32_5(a uint32) uint32 { return a * 5 }
+
+//go:noinline
+func mul_5_uint32(a uint32) uint32 { return 5 * a }
+
+//go:noinline
+func mul_uint32_7(a uint32) uint32 { return a * 7 }
+
+//go:noinline
+func mul_7_uint32(a uint32) uint32 { return 7 * a }
+
+//go:noinline
+func mul_uint32_9(a uint32) uint32 { return a * 9 }
+
+//go:noinline
+func mul_9_uint32(a uint32) uint32 { return 9 * a }
+
+//go:noinline
+func mul_uint32_10(a uint32) uint32 { return a * 10 }
+
+//go:noinline
+func mul_10_uint32(a uint32) uint32 { return 10 * a }
+
+//go:noinline
+func mul_uint32_11(a uint32) uint32 { return a * 11 }
+
+//go:noinline
+func mul_11_uint32(a uint32) uint32 { return 11 * a }
+
+//go:noinline
+func mul_uint32_13(a uint32) uint32 { return a * 13 }
+
+//go:noinline
+func mul_13_uint32(a uint32) uint32 { return 13 * a }
+
+//go:noinline
+func mul_uint32_19(a uint32) uint32 { return a * 19 }
+
+//go:noinline
+func mul_19_uint32(a uint32) uint32 { return 19 * a }
+
+//go:noinline
+func mul_uint32_21(a uint32) uint32 { return a * 21 }
+
+//go:noinline
+func mul_21_uint32(a uint32) uint32 { return 21 * a }
+
+//go:noinline
+func mul_uint32_25(a uint32) uint32 { return a * 25 }
+
+//go:noinline
+func mul_25_uint32(a uint32) uint32 { return 25 * a }
+
+//go:noinline
+func mul_uint32_27(a uint32) uint32 { return a * 27 }
+
+//go:noinline
+func mul_27_uint32(a uint32) uint32 { return 27 * a }
+
+//go:noinline
+func mul_uint32_37(a uint32) uint32 { return a * 37 }
+
+//go:noinline
+func mul_37_uint32(a uint32) uint32 { return 37 * a }
+
+//go:noinline
+func mul_uint32_41(a uint32) uint32 { return a * 41 }
+
+//go:noinline
+func mul_41_uint32(a uint32) uint32 { return 41 * a }
+
+//go:noinline
+func mul_uint32_45(a uint32) uint32 { return a * 45 }
+
+//go:noinline
+func mul_45_uint32(a uint32) uint32 { return 45 * a }
+
+//go:noinline
+func mul_uint32_73(a uint32) uint32 { return a * 73 }
+
+//go:noinline
+func mul_73_uint32(a uint32) uint32 { return 73 * a }
+
+//go:noinline
+func mul_uint32_81(a uint32) uint32 { return a * 81 }
+
+//go:noinline
+func mul_81_uint32(a uint32) uint32 { return 81 * a }
+
+//go:noinline
+func add_int32_Neg2147483648(a int32) int32 { return a + -2147483648 }
+
+//go:noinline
+func add_Neg2147483648_int32(a int32) int32 { return -2147483648 + a }
+
+//go:noinline
+func add_int32_Neg2147483647(a int32) int32 { return a + -2147483647 }
+
+//go:noinline
+func add_Neg2147483647_int32(a int32) int32 { return -2147483647 + a }
+
+//go:noinline
+func add_int32_Neg1(a int32) int32 { return a + -1 }
+
+//go:noinline
+func add_Neg1_int32(a int32) int32 { return -1 + a }
+
+//go:noinline
+func add_int32_0(a int32) int32 { return a + 0 }
+
+//go:noinline
+func add_0_int32(a int32) int32 { return 0 + a }
+
+//go:noinline
+func add_int32_1(a int32) int32 { return a + 1 }
+
+//go:noinline
+func add_1_int32(a int32) int32 { return 1 + a }
+
+//go:noinline
+func add_int32_2147483647(a int32) int32 { return a + 2147483647 }
+
+//go:noinline
+func add_2147483647_int32(a int32) int32 { return 2147483647 + a }
+
+//go:noinline
+func sub_int32_Neg2147483648(a int32) int32 { return a - -2147483648 }
+
+//go:noinline
+func sub_Neg2147483648_int32(a int32) int32 { return -2147483648 - a }
+
+//go:noinline
+func sub_int32_Neg2147483647(a int32) int32 { return a - -2147483647 }
+
+//go:noinline
+func sub_Neg2147483647_int32(a int32) int32 { return -2147483647 - a }
+
+//go:noinline
+func sub_int32_Neg1(a int32) int32 { return a - -1 }
+
+//go:noinline
+func sub_Neg1_int32(a int32) int32 { return -1 - a }
+
+//go:noinline
+func sub_int32_0(a int32) int32 { return a - 0 }
+
+//go:noinline
+func sub_0_int32(a int32) int32 { return 0 - a }
+
+//go:noinline
+func sub_int32_1(a int32) int32 { return a - 1 }
+
+//go:noinline
+func sub_1_int32(a int32) int32 { return 1 - a }
+
+//go:noinline
+func sub_int32_2147483647(a int32) int32 { return a - 2147483647 }
+
+//go:noinline
+func sub_2147483647_int32(a int32) int32 { return 2147483647 - a }
+
+//go:noinline
+func div_int32_Neg2147483648(a int32) int32 { return a / -2147483648 }
+
+//go:noinline
+func div_Neg2147483648_int32(a int32) int32 { return -2147483648 / a }
+
+//go:noinline
+func div_int32_Neg2147483647(a int32) int32 { return a / -2147483647 }
+
+//go:noinline
+func div_Neg2147483647_int32(a int32) int32 { return -2147483647 / a }
+
+//go:noinline
+func div_int32_Neg1(a int32) int32 { return a / -1 }
+
+//go:noinline
+func div_Neg1_int32(a int32) int32 { return -1 / a }
+
+//go:noinline
+func div_0_int32(a int32) int32 { return 0 / a }
+
+//go:noinline
+func div_int32_1(a int32) int32 { return a / 1 }
+
+//go:noinline
+func div_1_int32(a int32) int32 { return 1 / a }
+
+//go:noinline
+func div_int32_2147483647(a int32) int32 { return a / 2147483647 }
+
+//go:noinline
+func div_2147483647_int32(a int32) int32 { return 2147483647 / a }
+
+//go:noinline
+func mul_int32_Neg2147483648(a int32) int32 { return a * -2147483648 }
+
+//go:noinline
+func mul_Neg2147483648_int32(a int32) int32 { return -2147483648 * a }
+
+//go:noinline
+func mul_int32_Neg2147483647(a int32) int32 { return a * -2147483647 }
+
+//go:noinline
+func mul_Neg2147483647_int32(a int32) int32 { return -2147483647 * a }
+
+//go:noinline
+func mul_int32_Neg1(a int32) int32 { return a * -1 }
+
+//go:noinline
+func mul_Neg1_int32(a int32) int32 { return -1 * a }
+
+//go:noinline
+func mul_int32_0(a int32) int32 { return a * 0 }
+
+//go:noinline
+func mul_0_int32(a int32) int32 { return 0 * a }
+
+//go:noinline
+func mul_int32_1(a int32) int32 { return a * 1 }
+
+//go:noinline
+func mul_1_int32(a int32) int32 { return 1 * a }
+
+//go:noinline
+func mul_int32_2147483647(a int32) int32 { return a * 2147483647 }
+
+//go:noinline
+func mul_2147483647_int32(a int32) int32 { return 2147483647 * a }
+
+//go:noinline
+func mod_int32_Neg2147483648(a int32) int32 { return a % -2147483648 }
+
+//go:noinline
+func mod_Neg2147483648_int32(a int32) int32 { return -2147483648 % a }
+
+//go:noinline
+func mod_int32_Neg2147483647(a int32) int32 { return a % -2147483647 }
+
+//go:noinline
+func mod_Neg2147483647_int32(a int32) int32 { return -2147483647 % a }
+
+//go:noinline
+func mod_int32_Neg1(a int32) int32 { return a % -1 }
+
+//go:noinline
+func mod_Neg1_int32(a int32) int32 { return -1 % a }
+
+//go:noinline
+func mod_0_int32(a int32) int32 { return 0 % a }
+
+//go:noinline
+func mod_int32_1(a int32) int32 { return a % 1 }
+
+//go:noinline
+func mod_1_int32(a int32) int32 { return 1 % a }
+
+//go:noinline
+func mod_int32_2147483647(a int32) int32 { return a % 2147483647 }
+
+//go:noinline
+func mod_2147483647_int32(a int32) int32 { return 2147483647 % a }
+
+//go:noinline
+func and_int32_Neg2147483648(a int32) int32 { return a & -2147483648 }
+
+//go:noinline
+func and_Neg2147483648_int32(a int32) int32 { return -2147483648 & a }
+
+//go:noinline
+func and_int32_Neg2147483647(a int32) int32 { return a & -2147483647 }
+
+//go:noinline
+func and_Neg2147483647_int32(a int32) int32 { return -2147483647 & a }
+
+//go:noinline
+func and_int32_Neg1(a int32) int32 { return a & -1 }
+
+//go:noinline
+func and_Neg1_int32(a int32) int32 { return -1 & a }
+
+//go:noinline
+func and_int32_0(a int32) int32 { return a & 0 }
+
+//go:noinline
+func and_0_int32(a int32) int32 { return 0 & a }
+
+//go:noinline
+func and_int32_1(a int32) int32 { return a & 1 }
+
+//go:noinline
+func and_1_int32(a int32) int32 { return 1 & a }
+
+//go:noinline
+func and_int32_2147483647(a int32) int32 { return a & 2147483647 }
+
+//go:noinline
+func and_2147483647_int32(a int32) int32 { return 2147483647 & a }
+
+//go:noinline
+func or_int32_Neg2147483648(a int32) int32 { return a | -2147483648 }
+
+//go:noinline
+func or_Neg2147483648_int32(a int32) int32 { return -2147483648 | a }
+
+//go:noinline
+func or_int32_Neg2147483647(a int32) int32 { return a | -2147483647 }
+
+//go:noinline
+func or_Neg2147483647_int32(a int32) int32 { return -2147483647 | a }
+
+//go:noinline
+func or_int32_Neg1(a int32) int32 { return a | -1 }
+
+//go:noinline
+func or_Neg1_int32(a int32) int32 { return -1 | a }
+
+//go:noinline
+func or_int32_0(a int32) int32 { return a | 0 }
+
+//go:noinline
+func or_0_int32(a int32) int32 { return 0 | a }
+
+//go:noinline
+func or_int32_1(a int32) int32 { return a | 1 }
+
+//go:noinline
+func or_1_int32(a int32) int32 { return 1 | a }
+
+//go:noinline
+func or_int32_2147483647(a int32) int32 { return a | 2147483647 }
+
+//go:noinline
+func or_2147483647_int32(a int32) int32 { return 2147483647 | a }
+
+//go:noinline
+func xor_int32_Neg2147483648(a int32) int32 { return a ^ -2147483648 }
+
+//go:noinline
+func xor_Neg2147483648_int32(a int32) int32 { return -2147483648 ^ a }
+
+//go:noinline
+func xor_int32_Neg2147483647(a int32) int32 { return a ^ -2147483647 }
+
+//go:noinline
+func xor_Neg2147483647_int32(a int32) int32 { return -2147483647 ^ a }
+
+//go:noinline
+func xor_int32_Neg1(a int32) int32 { return a ^ -1 }
+
+//go:noinline
+func xor_Neg1_int32(a int32) int32 { return -1 ^ a }
+
+//go:noinline
+func xor_int32_0(a int32) int32 { return a ^ 0 }
+
+//go:noinline
+func xor_0_int32(a int32) int32 { return 0 ^ a }
+
+//go:noinline
+func xor_int32_1(a int32) int32 { return a ^ 1 }
+
+//go:noinline
+func xor_1_int32(a int32) int32 { return 1 ^ a }
+
+//go:noinline
+func xor_int32_2147483647(a int32) int32 { return a ^ 2147483647 }
+
+//go:noinline
+func xor_2147483647_int32(a int32) int32 { return 2147483647 ^ a }
+
+//go:noinline
+func mul_int32_Neg9(a int32) int32 { return a * -9 }
+
+//go:noinline
+func mul_Neg9_int32(a int32) int32 { return -9 * a }
+
+//go:noinline
+func mul_int32_Neg5(a int32) int32 { return a * -5 }
+
+//go:noinline
+func mul_Neg5_int32(a int32) int32 { return -5 * a }
+
+//go:noinline
+func mul_int32_Neg3(a int32) int32 { return a * -3 }
+
+//go:noinline
+func mul_Neg3_int32(a int32) int32 { return -3 * a }
+
+//go:noinline
+func mul_int32_3(a int32) int32 { return a * 3 }
+
+//go:noinline
+func mul_3_int32(a int32) int32 { return 3 * a }
+
+//go:noinline
+func mul_int32_5(a int32) int32 { return a * 5 }
+
+//go:noinline
+func mul_5_int32(a int32) int32 { return 5 * a }
+
+//go:noinline
+func mul_int32_7(a int32) int32 { return a * 7 }
+
+//go:noinline
+func mul_7_int32(a int32) int32 { return 7 * a }
+
+//go:noinline
+func mul_int32_9(a int32) int32 { return a * 9 }
+
+//go:noinline
+func mul_9_int32(a int32) int32 { return 9 * a }
+
+//go:noinline
+func mul_int32_10(a int32) int32 { return a * 10 }
+
+//go:noinline
+func mul_10_int32(a int32) int32 { return 10 * a }
+
+//go:noinline
+func mul_int32_11(a int32) int32 { return a * 11 }
+
+//go:noinline
+func mul_11_int32(a int32) int32 { return 11 * a }
+
+//go:noinline
+func mul_int32_13(a int32) int32 { return a * 13 }
+
+//go:noinline
+func mul_13_int32(a int32) int32 { return 13 * a }
+
+//go:noinline
+func mul_int32_19(a int32) int32 { return a * 19 }
+
+//go:noinline
+func mul_19_int32(a int32) int32 { return 19 * a }
+
+//go:noinline
+func mul_int32_21(a int32) int32 { return a * 21 }
+
+//go:noinline
+func mul_21_int32(a int32) int32 { return 21 * a }
+
+//go:noinline
+func mul_int32_25(a int32) int32 { return a * 25 }
+
+//go:noinline
+func mul_25_int32(a int32) int32 { return 25 * a }
+
+//go:noinline
+func mul_int32_27(a int32) int32 { return a * 27 }
+
+//go:noinline
+func mul_27_int32(a int32) int32 { return 27 * a }
+
+//go:noinline
+func mul_int32_37(a int32) int32 { return a * 37 }
+
+//go:noinline
+func mul_37_int32(a int32) int32 { return 37 * a }
+
+//go:noinline
+func mul_int32_41(a int32) int32 { return a * 41 }
+
+//go:noinline
+func mul_41_int32(a int32) int32 { return 41 * a }
+
+//go:noinline
+func mul_int32_45(a int32) int32 { return a * 45 }
+
+//go:noinline
+func mul_45_int32(a int32) int32 { return 45 * a }
+
+//go:noinline
+func mul_int32_73(a int32) int32 { return a * 73 }
+
+//go:noinline
+func mul_73_int32(a int32) int32 { return 73 * a }
+
+//go:noinline
+func mul_int32_81(a int32) int32 { return a * 81 }
+
+//go:noinline
+func mul_81_int32(a int32) int32 { return 81 * a }
+
+//go:noinline
+func add_uint16_0(a uint16) uint16 { return a + 0 }
+
+//go:noinline
+func add_0_uint16(a uint16) uint16 { return 0 + a }
+
+//go:noinline
+func add_uint16_1(a uint16) uint16 { return a + 1 }
+
+//go:noinline
+func add_1_uint16(a uint16) uint16 { return 1 + a }
+
+//go:noinline
+func add_uint16_65535(a uint16) uint16 { return a + 65535 }
+
+//go:noinline
+func add_65535_uint16(a uint16) uint16 { return 65535 + a }
+
+//go:noinline
+func sub_uint16_0(a uint16) uint16 { return a - 0 }
+
+//go:noinline
+func sub_0_uint16(a uint16) uint16 { return 0 - a }
+
+//go:noinline
+func sub_uint16_1(a uint16) uint16 { return a - 1 }
+
+//go:noinline
+func sub_1_uint16(a uint16) uint16 { return 1 - a }
+
+//go:noinline
+func sub_uint16_65535(a uint16) uint16 { return a - 65535 }
+
+//go:noinline
+func sub_65535_uint16(a uint16) uint16 { return 65535 - a }
+
+//go:noinline
+func div_0_uint16(a uint16) uint16 { return 0 / a }
+
+//go:noinline
+func div_uint16_1(a uint16) uint16 { return a / 1 }
+
+//go:noinline
+func div_1_uint16(a uint16) uint16 { return 1 / a }
+
+//go:noinline
+func div_uint16_65535(a uint16) uint16 { return a / 65535 }
+
+//go:noinline
+func div_65535_uint16(a uint16) uint16 { return 65535 / a }
+
+//go:noinline
+func mul_uint16_0(a uint16) uint16 { return a * 0 }
+
+//go:noinline
+func mul_0_uint16(a uint16) uint16 { return 0 * a }
+
+//go:noinline
+func mul_uint16_1(a uint16) uint16 { return a * 1 }
+
+//go:noinline
+func mul_1_uint16(a uint16) uint16 { return 1 * a }
+
+//go:noinline
+func mul_uint16_65535(a uint16) uint16 { return a * 65535 }
+
+//go:noinline
+func mul_65535_uint16(a uint16) uint16 { return 65535 * a }
+
+//go:noinline
+func lsh_uint16_0(a uint16) uint16 { return a << 0 }
+
+//go:noinline
+func lsh_0_uint16(a uint16) uint16 { return 0 << a }
+
+//go:noinline
+func lsh_uint16_1(a uint16) uint16 { return a << 1 }
+
+//go:noinline
+func lsh_1_uint16(a uint16) uint16 { return 1 << a }
+
+//go:noinline
+func lsh_uint16_65535(a uint16) uint16 { return a << 65535 }
+
+//go:noinline
+func lsh_65535_uint16(a uint16) uint16 { return 65535 << a }
+
+//go:noinline
+func rsh_uint16_0(a uint16) uint16 { return a >> 0 }
+
+//go:noinline
+func rsh_0_uint16(a uint16) uint16 { return 0 >> a }
+
+//go:noinline
+func rsh_uint16_1(a uint16) uint16 { return a >> 1 }
+
+//go:noinline
+func rsh_1_uint16(a uint16) uint16 { return 1 >> a }
+
+//go:noinline
+func rsh_uint16_65535(a uint16) uint16 { return a >> 65535 }
+
+//go:noinline
+func rsh_65535_uint16(a uint16) uint16 { return 65535 >> a }
+
+//go:noinline
+func mod_0_uint16(a uint16) uint16 { return 0 % a }
+
+//go:noinline
+func mod_uint16_1(a uint16) uint16 { return a % 1 }
+
+//go:noinline
+func mod_1_uint16(a uint16) uint16 { return 1 % a }
+
+//go:noinline
+func mod_uint16_65535(a uint16) uint16 { return a % 65535 }
+
+//go:noinline
+func mod_65535_uint16(a uint16) uint16 { return 65535 % a }
+
+//go:noinline
+func and_uint16_0(a uint16) uint16 { return a & 0 }
+
+//go:noinline
+func and_0_uint16(a uint16) uint16 { return 0 & a }
+
+//go:noinline
+func and_uint16_1(a uint16) uint16 { return a & 1 }
+
+//go:noinline
+func and_1_uint16(a uint16) uint16 { return 1 & a }
+
+//go:noinline
+func and_uint16_65535(a uint16) uint16 { return a & 65535 }
+
+//go:noinline
+func and_65535_uint16(a uint16) uint16 { return 65535 & a }
+
+//go:noinline
+func or_uint16_0(a uint16) uint16 { return a | 0 }
+
+//go:noinline
+func or_0_uint16(a uint16) uint16 { return 0 | a }
+
+//go:noinline
+func or_uint16_1(a uint16) uint16 { return a | 1 }
+
+//go:noinline
+func or_1_uint16(a uint16) uint16 { return 1 | a }
+
+//go:noinline
+func or_uint16_65535(a uint16) uint16 { return a | 65535 }
+
+//go:noinline
+func or_65535_uint16(a uint16) uint16 { return 65535 | a }
+
+//go:noinline
+func xor_uint16_0(a uint16) uint16 { return a ^ 0 }
+
+//go:noinline
+func xor_0_uint16(a uint16) uint16 { return 0 ^ a }
+
+//go:noinline
+func xor_uint16_1(a uint16) uint16 { return a ^ 1 }
+
+//go:noinline
+func xor_1_uint16(a uint16) uint16 { return 1 ^ a }
+
+//go:noinline
+func xor_uint16_65535(a uint16) uint16 { return a ^ 65535 }
+
+//go:noinline
+func xor_65535_uint16(a uint16) uint16 { return 65535 ^ a }
+
+//go:noinline
+func add_int16_Neg32768(a int16) int16 { return a + -32768 }
+
+//go:noinline
+func add_Neg32768_int16(a int16) int16 { return -32768 + a }
+
+//go:noinline
+func add_int16_Neg32767(a int16) int16 { return a + -32767 }
+
+//go:noinline
+func add_Neg32767_int16(a int16) int16 { return -32767 + a }
+
+//go:noinline
+func add_int16_Neg1(a int16) int16 { return a + -1 }
+
+//go:noinline
+func add_Neg1_int16(a int16) int16 { return -1 + a }
+
+//go:noinline
+func add_int16_0(a int16) int16 { return a + 0 }
+
+//go:noinline
+func add_0_int16(a int16) int16 { return 0 + a }
+
+//go:noinline
+func add_int16_1(a int16) int16 { return a + 1 }
+
+//go:noinline
+func add_1_int16(a int16) int16 { return 1 + a }
+
+//go:noinline
+func add_int16_32766(a int16) int16 { return a + 32766 }
+
+//go:noinline
+func add_32766_int16(a int16) int16 { return 32766 + a }
+
+//go:noinline
+func add_int16_32767(a int16) int16 { return a + 32767 }
+
+//go:noinline
+func add_32767_int16(a int16) int16 { return 32767 + a }
+
+//go:noinline
+func sub_int16_Neg32768(a int16) int16 { return a - -32768 }
+
+//go:noinline
+func sub_Neg32768_int16(a int16) int16 { return -32768 - a }
+
+//go:noinline
+func sub_int16_Neg32767(a int16) int16 { return a - -32767 }
+
+//go:noinline
+func sub_Neg32767_int16(a int16) int16 { return -32767 - a }
+
+//go:noinline
+func sub_int16_Neg1(a int16) int16 { return a - -1 }
+
+//go:noinline
+func sub_Neg1_int16(a int16) int16 { return -1 - a }
+
+//go:noinline
+func sub_int16_0(a int16) int16 { return a - 0 }
+
+//go:noinline
+func sub_0_int16(a int16) int16 { return 0 - a }
+
+//go:noinline
+func sub_int16_1(a int16) int16 { return a - 1 }
+
+//go:noinline
+func sub_1_int16(a int16) int16 { return 1 - a }
+
+//go:noinline
+func sub_int16_32766(a int16) int16 { return a - 32766 }
+
+//go:noinline
+func sub_32766_int16(a int16) int16 { return 32766 - a }
+
+//go:noinline
+func sub_int16_32767(a int16) int16 { return a - 32767 }
+
+//go:noinline
+func sub_32767_int16(a int16) int16 { return 32767 - a }
+
+//go:noinline
+func div_int16_Neg32768(a int16) int16 { return a / -32768 }
+
+//go:noinline
+func div_Neg32768_int16(a int16) int16 { return -32768 / a }
+
+//go:noinline
+func div_int16_Neg32767(a int16) int16 { return a / -32767 }
+
+//go:noinline
+func div_Neg32767_int16(a int16) int16 { return -32767 / a }
+
+//go:noinline
+func div_int16_Neg1(a int16) int16 { return a / -1 }
+
+//go:noinline
+func div_Neg1_int16(a int16) int16 { return -1 / a }
+
+//go:noinline
+func div_0_int16(a int16) int16 { return 0 / a }
+
+//go:noinline
+func div_int16_1(a int16) int16 { return a / 1 }
+
+//go:noinline
+func div_1_int16(a int16) int16 { return 1 / a }
+
+//go:noinline
+func div_int16_32766(a int16) int16 { return a / 32766 }
+
+//go:noinline
+func div_32766_int16(a int16) int16 { return 32766 / a }
+
+//go:noinline
+func div_int16_32767(a int16) int16 { return a / 32767 }
+
+//go:noinline
+func div_32767_int16(a int16) int16 { return 32767 / a }
+
+//go:noinline
+func mul_int16_Neg32768(a int16) int16 { return a * -32768 }
+
+//go:noinline
+func mul_Neg32768_int16(a int16) int16 { return -32768 * a }
+
+//go:noinline
+func mul_int16_Neg32767(a int16) int16 { return a * -32767 }
+
+//go:noinline
+func mul_Neg32767_int16(a int16) int16 { return -32767 * a }
+
+//go:noinline
+func mul_int16_Neg1(a int16) int16 { return a * -1 }
+
+//go:noinline
+func mul_Neg1_int16(a int16) int16 { return -1 * a }
+
+//go:noinline
+func mul_int16_0(a int16) int16 { return a * 0 }
+
+//go:noinline
+func mul_0_int16(a int16) int16 { return 0 * a }
+
+//go:noinline
+func mul_int16_1(a int16) int16 { return a * 1 }
+
+//go:noinline
+func mul_1_int16(a int16) int16 { return 1 * a }
+
+//go:noinline
+func mul_int16_32766(a int16) int16 { return a * 32766 }
+
+//go:noinline
+func mul_32766_int16(a int16) int16 { return 32766 * a }
+
+//go:noinline
+func mul_int16_32767(a int16) int16 { return a * 32767 }
+
+//go:noinline
+func mul_32767_int16(a int16) int16 { return 32767 * a }
+
+//go:noinline
+func mod_int16_Neg32768(a int16) int16 { return a % -32768 }
+
+//go:noinline
+func mod_Neg32768_int16(a int16) int16 { return -32768 % a }
+
+//go:noinline
+func mod_int16_Neg32767(a int16) int16 { return a % -32767 }
+
+//go:noinline
+func mod_Neg32767_int16(a int16) int16 { return -32767 % a }
+
+//go:noinline
+func mod_int16_Neg1(a int16) int16 { return a % -1 }
+
+//go:noinline
+func mod_Neg1_int16(a int16) int16 { return -1 % a }
+
+//go:noinline
+func mod_0_int16(a int16) int16 { return 0 % a }
+
+//go:noinline
+func mod_int16_1(a int16) int16 { return a % 1 }
+
+//go:noinline
+func mod_1_int16(a int16) int16 { return 1 % a }
+
+//go:noinline
+func mod_int16_32766(a int16) int16 { return a % 32766 }
+
+//go:noinline
+func mod_32766_int16(a int16) int16 { return 32766 % a }
+
+//go:noinline
+func mod_int16_32767(a int16) int16 { return a % 32767 }
+
+//go:noinline
+func mod_32767_int16(a int16) int16 { return 32767 % a }
+
+//go:noinline
+func and_int16_Neg32768(a int16) int16 { return a & -32768 }
+
+//go:noinline
+func and_Neg32768_int16(a int16) int16 { return -32768 & a }
+
+//go:noinline
+func and_int16_Neg32767(a int16) int16 { return a & -32767 }
+
+//go:noinline
+func and_Neg32767_int16(a int16) int16 { return -32767 & a }
+
+//go:noinline
+func and_int16_Neg1(a int16) int16 { return a & -1 }
+
+//go:noinline
+func and_Neg1_int16(a int16) int16 { return -1 & a }
+
+//go:noinline
+func and_int16_0(a int16) int16 { return a & 0 }
+
+//go:noinline
+func and_0_int16(a int16) int16 { return 0 & a }
+
+//go:noinline
+func and_int16_1(a int16) int16 { return a & 1 }
+
+//go:noinline
+func and_1_int16(a int16) int16 { return 1 & a }
+
+//go:noinline
+func and_int16_32766(a int16) int16 { return a & 32766 }
+
+//go:noinline
+func and_32766_int16(a int16) int16 { return 32766 & a }
+
+//go:noinline
+func and_int16_32767(a int16) int16 { return a & 32767 }
+
+//go:noinline
+func and_32767_int16(a int16) int16 { return 32767 & a }
+
+//go:noinline
+func or_int16_Neg32768(a int16) int16 { return a | -32768 }
+
+//go:noinline
+func or_Neg32768_int16(a int16) int16 { return -32768 | a }
+
+//go:noinline
+func or_int16_Neg32767(a int16) int16 { return a | -32767 }
+
+//go:noinline
+func or_Neg32767_int16(a int16) int16 { return -32767 | a }
+
+//go:noinline
+func or_int16_Neg1(a int16) int16 { return a | -1 }
+
+//go:noinline
+func or_Neg1_int16(a int16) int16 { return -1 | a }
+
+//go:noinline
+func or_int16_0(a int16) int16 { return a | 0 }
+
+//go:noinline
+func or_0_int16(a int16) int16 { return 0 | a }
+
+//go:noinline
+func or_int16_1(a int16) int16 { return a | 1 }
+
+//go:noinline
+func or_1_int16(a int16) int16 { return 1 | a }
+
+//go:noinline
+func or_int16_32766(a int16) int16 { return a | 32766 }
+
+//go:noinline
+func or_32766_int16(a int16) int16 { return 32766 | a }
+
+//go:noinline
+func or_int16_32767(a int16) int16 { return a | 32767 }
+
+//go:noinline
+func or_32767_int16(a int16) int16 { return 32767 | a }
+
+//go:noinline
+func xor_int16_Neg32768(a int16) int16 { return a ^ -32768 }
+
+//go:noinline
+func xor_Neg32768_int16(a int16) int16 { return -32768 ^ a }
+
+//go:noinline
+func xor_int16_Neg32767(a int16) int16 { return a ^ -32767 }
+
+//go:noinline
+func xor_Neg32767_int16(a int16) int16 { return -32767 ^ a }
+
+//go:noinline
+func xor_int16_Neg1(a int16) int16 { return a ^ -1 }
+
+//go:noinline
+func xor_Neg1_int16(a int16) int16 { return -1 ^ a }
+
+//go:noinline
+func xor_int16_0(a int16) int16 { return a ^ 0 }
+
+//go:noinline
+func xor_0_int16(a int16) int16 { return 0 ^ a }
+
+//go:noinline
+func xor_int16_1(a int16) int16 { return a ^ 1 }
+
+//go:noinline
+func xor_1_int16(a int16) int16 { return 1 ^ a }
+
+//go:noinline
+func xor_int16_32766(a int16) int16 { return a ^ 32766 }
+
+//go:noinline
+func xor_32766_int16(a int16) int16 { return 32766 ^ a }
+
+//go:noinline
+func xor_int16_32767(a int16) int16 { return a ^ 32767 }
+
+//go:noinline
+func xor_32767_int16(a int16) int16 { return 32767 ^ a }
+
+//go:noinline
+func add_uint8_0(a uint8) uint8 { return a + 0 }
+
+//go:noinline
+func add_0_uint8(a uint8) uint8 { return 0 + a }
+
+//go:noinline
+func add_uint8_1(a uint8) uint8 { return a + 1 }
+
+//go:noinline
+func add_1_uint8(a uint8) uint8 { return 1 + a }
+
+//go:noinline
+func add_uint8_255(a uint8) uint8 { return a + 255 }
+
+//go:noinline
+func add_255_uint8(a uint8) uint8 { return 255 + a }
+
+//go:noinline
+func sub_uint8_0(a uint8) uint8 { return a - 0 }
+
+//go:noinline
+func sub_0_uint8(a uint8) uint8 { return 0 - a }
+
+//go:noinline
+func sub_uint8_1(a uint8) uint8 { return a - 1 }
+
+//go:noinline
+func sub_1_uint8(a uint8) uint8 { return 1 - a }
+
+//go:noinline
+func sub_uint8_255(a uint8) uint8 { return a - 255 }
+
+//go:noinline
+func sub_255_uint8(a uint8) uint8 { return 255 - a }
+
+//go:noinline
+func div_0_uint8(a uint8) uint8 { return 0 / a }
+
+//go:noinline
+func div_uint8_1(a uint8) uint8 { return a / 1 }
+
+//go:noinline
+func div_1_uint8(a uint8) uint8 { return 1 / a }
+
+//go:noinline
+func div_uint8_255(a uint8) uint8 { return a / 255 }
+
+//go:noinline
+func div_255_uint8(a uint8) uint8 { return 255 / a }
+
+//go:noinline
+func mul_uint8_0(a uint8) uint8 { return a * 0 }
+
+//go:noinline
+func mul_0_uint8(a uint8) uint8 { return 0 * a }
+
+//go:noinline
+func mul_uint8_1(a uint8) uint8 { return a * 1 }
+
+//go:noinline
+func mul_1_uint8(a uint8) uint8 { return 1 * a }
+
+//go:noinline
+func mul_uint8_255(a uint8) uint8 { return a * 255 }
+
+//go:noinline
+func mul_255_uint8(a uint8) uint8 { return 255 * a }
+
+//go:noinline
+func lsh_uint8_0(a uint8) uint8 { return a << 0 }
+
+//go:noinline
+func lsh_0_uint8(a uint8) uint8 { return 0 << a }
+
+//go:noinline
+func lsh_uint8_1(a uint8) uint8 { return a << 1 }
+
+//go:noinline
+func lsh_1_uint8(a uint8) uint8 { return 1 << a }
+
+//go:noinline
+func lsh_uint8_255(a uint8) uint8 { return a << 255 }
+
+//go:noinline
+func lsh_255_uint8(a uint8) uint8 { return 255 << a }
+
+//go:noinline
+func rsh_uint8_0(a uint8) uint8 { return a >> 0 }
+
+//go:noinline
+func rsh_0_uint8(a uint8) uint8 { return 0 >> a }
+
+//go:noinline
+func rsh_uint8_1(a uint8) uint8 { return a >> 1 }
+
+//go:noinline
+func rsh_1_uint8(a uint8) uint8 { return 1 >> a }
+
+//go:noinline
+func rsh_uint8_255(a uint8) uint8 { return a >> 255 }
+
+//go:noinline
+func rsh_255_uint8(a uint8) uint8 { return 255 >> a }
+
+//go:noinline
+func mod_0_uint8(a uint8) uint8 { return 0 % a }
+
+//go:noinline
+func mod_uint8_1(a uint8) uint8 { return a % 1 }
+
+//go:noinline
+func mod_1_uint8(a uint8) uint8 { return 1 % a }
+
+//go:noinline
+func mod_uint8_255(a uint8) uint8 { return a % 255 }
+
+//go:noinline
+func mod_255_uint8(a uint8) uint8 { return 255 % a }
+
+//go:noinline
+func and_uint8_0(a uint8) uint8 { return a & 0 }
+
+//go:noinline
+func and_0_uint8(a uint8) uint8 { return 0 & a }
+
+//go:noinline
+func and_uint8_1(a uint8) uint8 { return a & 1 }
+
+//go:noinline
+func and_1_uint8(a uint8) uint8 { return 1 & a }
+
+//go:noinline
+func and_uint8_255(a uint8) uint8 { return a & 255 }
+
+//go:noinline
+func and_255_uint8(a uint8) uint8 { return 255 & a }
+
+//go:noinline
+func or_uint8_0(a uint8) uint8 { return a | 0 }
+
+//go:noinline
+func or_0_uint8(a uint8) uint8 { return 0 | a }
+
+//go:noinline
+func or_uint8_1(a uint8) uint8 { return a | 1 }
+
+//go:noinline
+func or_1_uint8(a uint8) uint8 { return 1 | a }
+
+//go:noinline
+func or_uint8_255(a uint8) uint8 { return a | 255 }
+
+//go:noinline
+func or_255_uint8(a uint8) uint8 { return 255 | a }
+
+//go:noinline
+func xor_uint8_0(a uint8) uint8 { return a ^ 0 }
+
+//go:noinline
+func xor_0_uint8(a uint8) uint8 { return 0 ^ a }
+
+//go:noinline
+func xor_uint8_1(a uint8) uint8 { return a ^ 1 }
+
+//go:noinline
+func xor_1_uint8(a uint8) uint8 { return 1 ^ a }
+
+//go:noinline
+func xor_uint8_255(a uint8) uint8 { return a ^ 255 }
+
+//go:noinline
+func xor_255_uint8(a uint8) uint8 { return 255 ^ a }
+
+//go:noinline
+func add_int8_Neg128(a int8) int8 { return a + -128 }
+
+//go:noinline
+func add_Neg128_int8(a int8) int8 { return -128 + a }
+
+//go:noinline
+func add_int8_Neg127(a int8) int8 { return a + -127 }
+
+//go:noinline
+func add_Neg127_int8(a int8) int8 { return -127 + a }
+
+//go:noinline
+func add_int8_Neg1(a int8) int8 { return a + -1 }
+
+//go:noinline
+func add_Neg1_int8(a int8) int8 { return -1 + a }
+
+//go:noinline
+func add_int8_0(a int8) int8 { return a + 0 }
+
+//go:noinline
+func add_0_int8(a int8) int8 { return 0 + a }
+
+//go:noinline
+func add_int8_1(a int8) int8 { return a + 1 }
+
+//go:noinline
+func add_1_int8(a int8) int8 { return 1 + a }
+
+//go:noinline
+func add_int8_126(a int8) int8 { return a + 126 }
+
+//go:noinline
+func add_126_int8(a int8) int8 { return 126 + a }
+
+//go:noinline
+func add_int8_127(a int8) int8 { return a + 127 }
+
+//go:noinline
+func add_127_int8(a int8) int8 { return 127 + a }
+
+//go:noinline
+func sub_int8_Neg128(a int8) int8 { return a - -128 }
+
+//go:noinline
+func sub_Neg128_int8(a int8) int8 { return -128 - a }
+
+//go:noinline
+func sub_int8_Neg127(a int8) int8 { return a - -127 }
+
+//go:noinline
+func sub_Neg127_int8(a int8) int8 { return -127 - a }
+
+//go:noinline
+func sub_int8_Neg1(a int8) int8 { return a - -1 }
+
+//go:noinline
+func sub_Neg1_int8(a int8) int8 { return -1 - a }
+
+//go:noinline
+func sub_int8_0(a int8) int8 { return a - 0 }
+
+//go:noinline
+func sub_0_int8(a int8) int8 { return 0 - a }
+
+//go:noinline
+func sub_int8_1(a int8) int8 { return a - 1 }
+
+//go:noinline
+func sub_1_int8(a int8) int8 { return 1 - a }
+
+//go:noinline
+func sub_int8_126(a int8) int8 { return a - 126 }
+
+//go:noinline
+func sub_126_int8(a int8) int8 { return 126 - a }
+
+//go:noinline
+func sub_int8_127(a int8) int8 { return a - 127 }
+
+//go:noinline
+func sub_127_int8(a int8) int8 { return 127 - a }
+
+//go:noinline
+func div_int8_Neg128(a int8) int8 { return a / -128 }
+
+//go:noinline
+func div_Neg128_int8(a int8) int8 { return -128 / a }
+
+//go:noinline
+func div_int8_Neg127(a int8) int8 { return a / -127 }
+
+//go:noinline
+func div_Neg127_int8(a int8) int8 { return -127 / a }
+
+//go:noinline
+func div_int8_Neg1(a int8) int8 { return a / -1 }
+
+//go:noinline
+func div_Neg1_int8(a int8) int8 { return -1 / a }
+
+//go:noinline
+func div_0_int8(a int8) int8 { return 0 / a }
+
+//go:noinline
+func div_int8_1(a int8) int8 { return a / 1 }
+
+//go:noinline
+func div_1_int8(a int8) int8 { return 1 / a }
+
+//go:noinline
+func div_int8_126(a int8) int8 { return a / 126 }
+
+//go:noinline
+func div_126_int8(a int8) int8 { return 126 / a }
+
+//go:noinline
+func div_int8_127(a int8) int8 { return a / 127 }
+
+//go:noinline
+func div_127_int8(a int8) int8 { return 127 / a }
+
+//go:noinline
+func mul_int8_Neg128(a int8) int8 { return a * -128 }
+
+//go:noinline
+func mul_Neg128_int8(a int8) int8 { return -128 * a }
+
+//go:noinline
+func mul_int8_Neg127(a int8) int8 { return a * -127 }
+
+//go:noinline
+func mul_Neg127_int8(a int8) int8 { return -127 * a }
+
+//go:noinline
+func mul_int8_Neg1(a int8) int8 { return a * -1 }
+
+//go:noinline
+func mul_Neg1_int8(a int8) int8 { return -1 * a }
+
+//go:noinline
+func mul_int8_0(a int8) int8 { return a * 0 }
+
+//go:noinline
+func mul_0_int8(a int8) int8 { return 0 * a }
+
+//go:noinline
+func mul_int8_1(a int8) int8 { return a * 1 }
+
+//go:noinline
+func mul_1_int8(a int8) int8 { return 1 * a }
+
+//go:noinline
+func mul_int8_126(a int8) int8 { return a * 126 }
+
+//go:noinline
+func mul_126_int8(a int8) int8 { return 126 * a }
+
+//go:noinline
+func mul_int8_127(a int8) int8 { return a * 127 }
+
+//go:noinline
+func mul_127_int8(a int8) int8 { return 127 * a }
+
+//go:noinline
+func mod_int8_Neg128(a int8) int8 { return a % -128 }
+
+//go:noinline
+func mod_Neg128_int8(a int8) int8 { return -128 % a }
+
+//go:noinline
+func mod_int8_Neg127(a int8) int8 { return a % -127 }
+
+//go:noinline
+func mod_Neg127_int8(a int8) int8 { return -127 % a }
+
+//go:noinline
+func mod_int8_Neg1(a int8) int8 { return a % -1 }
+
+//go:noinline
+func mod_Neg1_int8(a int8) int8 { return -1 % a }
+
+//go:noinline
+func mod_0_int8(a int8) int8 { return 0 % a }
+
+//go:noinline
+func mod_int8_1(a int8) int8 { return a % 1 }
+
+//go:noinline
+func mod_1_int8(a int8) int8 { return 1 % a }
+
+//go:noinline
+func mod_int8_126(a int8) int8 { return a % 126 }
+
+//go:noinline
+func mod_126_int8(a int8) int8 { return 126 % a }
+
+//go:noinline
+func mod_int8_127(a int8) int8 { return a % 127 }
+
+//go:noinline
+func mod_127_int8(a int8) int8 { return 127 % a }
+
+//go:noinline
+func and_int8_Neg128(a int8) int8 { return a & -128 }
+
+//go:noinline
+func and_Neg128_int8(a int8) int8 { return -128 & a }
+
+//go:noinline
+func and_int8_Neg127(a int8) int8 { return a & -127 }
+
+//go:noinline
+func and_Neg127_int8(a int8) int8 { return -127 & a }
+
+//go:noinline
+func and_int8_Neg1(a int8) int8 { return a & -1 }
+
+//go:noinline
+func and_Neg1_int8(a int8) int8 { return -1 & a }
+
+//go:noinline
+func and_int8_0(a int8) int8 { return a & 0 }
+
+//go:noinline
+func and_0_int8(a int8) int8 { return 0 & a }
+
+//go:noinline
+func and_int8_1(a int8) int8 { return a & 1 }
+
+//go:noinline
+func and_1_int8(a int8) int8 { return 1 & a }
+
+//go:noinline
+func and_int8_126(a int8) int8 { return a & 126 }
+
+//go:noinline
+func and_126_int8(a int8) int8 { return 126 & a }
+
+//go:noinline
+func and_int8_127(a int8) int8 { return a & 127 }
+
+//go:noinline
+func and_127_int8(a int8) int8 { return 127 & a }
+
+//go:noinline
+func or_int8_Neg128(a int8) int8 { return a | -128 }
+
+//go:noinline
+func or_Neg128_int8(a int8) int8 { return -128 | a }
+
+//go:noinline
+func or_int8_Neg127(a int8) int8 { return a | -127 }
+
+//go:noinline
+func or_Neg127_int8(a int8) int8 { return -127 | a }
+
+//go:noinline
+func or_int8_Neg1(a int8) int8 { return a | -1 }
+
+//go:noinline
+func or_Neg1_int8(a int8) int8 { return -1 | a }
+
+//go:noinline
+func or_int8_0(a int8) int8 { return a | 0 }
+
+//go:noinline
+func or_0_int8(a int8) int8 { return 0 | a }
+
+//go:noinline
+func or_int8_1(a int8) int8 { return a | 1 }
+
+//go:noinline
+func or_1_int8(a int8) int8 { return 1 | a }
+
+//go:noinline
+func or_int8_126(a int8) int8 { return a | 126 }
+
+//go:noinline
+func or_126_int8(a int8) int8 { return 126 | a }
+
+//go:noinline
+func or_int8_127(a int8) int8 { return a | 127 }
+
+//go:noinline
+func or_127_int8(a int8) int8 { return 127 | a }
+
+//go:noinline
+func xor_int8_Neg128(a int8) int8 { return a ^ -128 }
+
+//go:noinline
+func xor_Neg128_int8(a int8) int8 { return -128 ^ a }
+
+//go:noinline
+func xor_int8_Neg127(a int8) int8 { return a ^ -127 }
+
+//go:noinline
+func xor_Neg127_int8(a int8) int8 { return -127 ^ a }
+
+//go:noinline
+func xor_int8_Neg1(a int8) int8 { return a ^ -1 }
+
+//go:noinline
+func xor_Neg1_int8(a int8) int8 { return -1 ^ a }
+
+//go:noinline
+func xor_int8_0(a int8) int8 { return a ^ 0 }
+
+//go:noinline
+func xor_0_int8(a int8) int8 { return 0 ^ a }
+
+//go:noinline
+func xor_int8_1(a int8) int8 { return a ^ 1 }
+
+//go:noinline
+func xor_1_int8(a int8) int8 { return 1 ^ a }
+
+//go:noinline
+func xor_int8_126(a int8) int8 { return a ^ 126 }
+
+//go:noinline
+func xor_126_int8(a int8) int8 { return 126 ^ a }
+
+//go:noinline
+func xor_int8_127(a int8) int8 { return a ^ 127 }
+
+//go:noinline
+func xor_127_int8(a int8) int8 { return 127 ^ a }
+
+type test_uint64 struct {
+ fn func(uint64) uint64
+ fnname string
+ in uint64
+ want uint64
+}
+
+var tests_uint64 = []test_uint64{
+
+ test_uint64{fn: add_0_uint64, fnname: "add_0_uint64", in: 0, want: 0},
+ test_uint64{fn: add_uint64_0, fnname: "add_uint64_0", in: 0, want: 0},
+ test_uint64{fn: add_0_uint64, fnname: "add_0_uint64", in: 1, want: 1},
+ test_uint64{fn: add_uint64_0, fnname: "add_uint64_0", in: 1, want: 1},
+ test_uint64{fn: add_0_uint64, fnname: "add_0_uint64", in: 4294967296, want: 4294967296},
+ test_uint64{fn: add_uint64_0, fnname: "add_uint64_0", in: 4294967296, want: 4294967296},
+ test_uint64{fn: add_0_uint64, fnname: "add_0_uint64", in: 9223372036854775808, want: 9223372036854775808},
+ test_uint64{fn: add_uint64_0, fnname: "add_uint64_0", in: 9223372036854775808, want: 9223372036854775808},
+ test_uint64{fn: add_0_uint64, fnname: "add_0_uint64", in: 18446744073709551615, want: 18446744073709551615},
+ test_uint64{fn: add_uint64_0, fnname: "add_uint64_0", in: 18446744073709551615, want: 18446744073709551615},
+ test_uint64{fn: add_1_uint64, fnname: "add_1_uint64", in: 0, want: 1},
+ test_uint64{fn: add_uint64_1, fnname: "add_uint64_1", in: 0, want: 1},
+ test_uint64{fn: add_1_uint64, fnname: "add_1_uint64", in: 1, want: 2},
+ test_uint64{fn: add_uint64_1, fnname: "add_uint64_1", in: 1, want: 2},
+ test_uint64{fn: add_1_uint64, fnname: "add_1_uint64", in: 4294967296, want: 4294967297},
+ test_uint64{fn: add_uint64_1, fnname: "add_uint64_1", in: 4294967296, want: 4294967297},
+ test_uint64{fn: add_1_uint64, fnname: "add_1_uint64", in: 9223372036854775808, want: 9223372036854775809},
+ test_uint64{fn: add_uint64_1, fnname: "add_uint64_1", in: 9223372036854775808, want: 9223372036854775809},
+ test_uint64{fn: add_1_uint64, fnname: "add_1_uint64", in: 18446744073709551615, want: 0},
+ test_uint64{fn: add_uint64_1, fnname: "add_uint64_1", in: 18446744073709551615, want: 0},
+ test_uint64{fn: add_4294967296_uint64, fnname: "add_4294967296_uint64", in: 0, want: 4294967296},
+ test_uint64{fn: add_uint64_4294967296, fnname: "add_uint64_4294967296", in: 0, want: 4294967296},
+ test_uint64{fn: add_4294967296_uint64, fnname: "add_4294967296_uint64", in: 1, want: 4294967297},
+ test_uint64{fn: add_uint64_4294967296, fnname: "add_uint64_4294967296", in: 1, want: 4294967297},
+ test_uint64{fn: add_4294967296_uint64, fnname: "add_4294967296_uint64", in: 4294967296, want: 8589934592},
+ test_uint64{fn: add_uint64_4294967296, fnname: "add_uint64_4294967296", in: 4294967296, want: 8589934592},
+ test_uint64{fn: add_4294967296_uint64, fnname: "add_4294967296_uint64", in: 9223372036854775808, want: 9223372041149743104},
+ test_uint64{fn: add_uint64_4294967296, fnname: "add_uint64_4294967296", in: 9223372036854775808, want: 9223372041149743104},
+ test_uint64{fn: add_4294967296_uint64, fnname: "add_4294967296_uint64", in: 18446744073709551615, want: 4294967295},
+ test_uint64{fn: add_uint64_4294967296, fnname: "add_uint64_4294967296", in: 18446744073709551615, want: 4294967295},
+ test_uint64{fn: add_9223372036854775808_uint64, fnname: "add_9223372036854775808_uint64", in: 0, want: 9223372036854775808},
+ test_uint64{fn: add_uint64_9223372036854775808, fnname: "add_uint64_9223372036854775808", in: 0, want: 9223372036854775808},
+ test_uint64{fn: add_9223372036854775808_uint64, fnname: "add_9223372036854775808_uint64", in: 1, want: 9223372036854775809},
+ test_uint64{fn: add_uint64_9223372036854775808, fnname: "add_uint64_9223372036854775808", in: 1, want: 9223372036854775809},
+ test_uint64{fn: add_9223372036854775808_uint64, fnname: "add_9223372036854775808_uint64", in: 4294967296, want: 9223372041149743104},
+ test_uint64{fn: add_uint64_9223372036854775808, fnname: "add_uint64_9223372036854775808", in: 4294967296, want: 9223372041149743104},
+ test_uint64{fn: add_9223372036854775808_uint64, fnname: "add_9223372036854775808_uint64", in: 9223372036854775808, want: 0},
+ test_uint64{fn: add_uint64_9223372036854775808, fnname: "add_uint64_9223372036854775808", in: 9223372036854775808, want: 0},
+ test_uint64{fn: add_9223372036854775808_uint64, fnname: "add_9223372036854775808_uint64", in: 18446744073709551615, want: 9223372036854775807},
+ test_uint64{fn: add_uint64_9223372036854775808, fnname: "add_uint64_9223372036854775808", in: 18446744073709551615, want: 9223372036854775807},
+ test_uint64{fn: add_18446744073709551615_uint64, fnname: "add_18446744073709551615_uint64", in: 0, want: 18446744073709551615},
+ test_uint64{fn: add_uint64_18446744073709551615, fnname: "add_uint64_18446744073709551615", in: 0, want: 18446744073709551615},
+ test_uint64{fn: add_18446744073709551615_uint64, fnname: "add_18446744073709551615_uint64", in: 1, want: 0},
+ test_uint64{fn: add_uint64_18446744073709551615, fnname: "add_uint64_18446744073709551615", in: 1, want: 0},
+ test_uint64{fn: add_18446744073709551615_uint64, fnname: "add_18446744073709551615_uint64", in: 4294967296, want: 4294967295},
+ test_uint64{fn: add_uint64_18446744073709551615, fnname: "add_uint64_18446744073709551615", in: 4294967296, want: 4294967295},
+ test_uint64{fn: add_18446744073709551615_uint64, fnname: "add_18446744073709551615_uint64", in: 9223372036854775808, want: 9223372036854775807},
+ test_uint64{fn: add_uint64_18446744073709551615, fnname: "add_uint64_18446744073709551615", in: 9223372036854775808, want: 9223372036854775807},
+ test_uint64{fn: add_18446744073709551615_uint64, fnname: "add_18446744073709551615_uint64", in: 18446744073709551615, want: 18446744073709551614},
+ test_uint64{fn: add_uint64_18446744073709551615, fnname: "add_uint64_18446744073709551615", in: 18446744073709551615, want: 18446744073709551614},
+ test_uint64{fn: sub_0_uint64, fnname: "sub_0_uint64", in: 0, want: 0},
+ test_uint64{fn: sub_uint64_0, fnname: "sub_uint64_0", in: 0, want: 0},
+ test_uint64{fn: sub_0_uint64, fnname: "sub_0_uint64", in: 1, want: 18446744073709551615},
+ test_uint64{fn: sub_uint64_0, fnname: "sub_uint64_0", in: 1, want: 1},
+ test_uint64{fn: sub_0_uint64, fnname: "sub_0_uint64", in: 4294967296, want: 18446744069414584320},
+ test_uint64{fn: sub_uint64_0, fnname: "sub_uint64_0", in: 4294967296, want: 4294967296},
+ test_uint64{fn: sub_0_uint64, fnname: "sub_0_uint64", in: 9223372036854775808, want: 9223372036854775808},
+ test_uint64{fn: sub_uint64_0, fnname: "sub_uint64_0", in: 9223372036854775808, want: 9223372036854775808},
+ test_uint64{fn: sub_0_uint64, fnname: "sub_0_uint64", in: 18446744073709551615, want: 1},
+ test_uint64{fn: sub_uint64_0, fnname: "sub_uint64_0", in: 18446744073709551615, want: 18446744073709551615},
+ test_uint64{fn: sub_1_uint64, fnname: "sub_1_uint64", in: 0, want: 1},
+ test_uint64{fn: sub_uint64_1, fnname: "sub_uint64_1", in: 0, want: 18446744073709551615},
+ test_uint64{fn: sub_1_uint64, fnname: "sub_1_uint64", in: 1, want: 0},
+ test_uint64{fn: sub_uint64_1, fnname: "sub_uint64_1", in: 1, want: 0},
+ test_uint64{fn: sub_1_uint64, fnname: "sub_1_uint64", in: 4294967296, want: 18446744069414584321},
+ test_uint64{fn: sub_uint64_1, fnname: "sub_uint64_1", in: 4294967296, want: 4294967295},
+ test_uint64{fn: sub_1_uint64, fnname: "sub_1_uint64", in: 9223372036854775808, want: 9223372036854775809},
+ test_uint64{fn: sub_uint64_1, fnname: "sub_uint64_1", in: 9223372036854775808, want: 9223372036854775807},
+ test_uint64{fn: sub_1_uint64, fnname: "sub_1_uint64", in: 18446744073709551615, want: 2},
+ test_uint64{fn: sub_uint64_1, fnname: "sub_uint64_1", in: 18446744073709551615, want: 18446744073709551614},
+ test_uint64{fn: sub_4294967296_uint64, fnname: "sub_4294967296_uint64", in: 0, want: 4294967296},
+ test_uint64{fn: sub_uint64_4294967296, fnname: "sub_uint64_4294967296", in: 0, want: 18446744069414584320},
+ test_uint64{fn: sub_4294967296_uint64, fnname: "sub_4294967296_uint64", in: 1, want: 4294967295},
+ test_uint64{fn: sub_uint64_4294967296, fnname: "sub_uint64_4294967296", in: 1, want: 18446744069414584321},
+ test_uint64{fn: sub_4294967296_uint64, fnname: "sub_4294967296_uint64", in: 4294967296, want: 0},
+ test_uint64{fn: sub_uint64_4294967296, fnname: "sub_uint64_4294967296", in: 4294967296, want: 0},
+ test_uint64{fn: sub_4294967296_uint64, fnname: "sub_4294967296_uint64", in: 9223372036854775808, want: 9223372041149743104},
+ test_uint64{fn: sub_uint64_4294967296, fnname: "sub_uint64_4294967296", in: 9223372036854775808, want: 9223372032559808512},
+ test_uint64{fn: sub_4294967296_uint64, fnname: "sub_4294967296_uint64", in: 18446744073709551615, want: 4294967297},
+ test_uint64{fn: sub_uint64_4294967296, fnname: "sub_uint64_4294967296", in: 18446744073709551615, want: 18446744069414584319},
+ test_uint64{fn: sub_9223372036854775808_uint64, fnname: "sub_9223372036854775808_uint64", in: 0, want: 9223372036854775808},
+ test_uint64{fn: sub_uint64_9223372036854775808, fnname: "sub_uint64_9223372036854775808", in: 0, want: 9223372036854775808},
+ test_uint64{fn: sub_9223372036854775808_uint64, fnname: "sub_9223372036854775808_uint64", in: 1, want: 9223372036854775807},
+ test_uint64{fn: sub_uint64_9223372036854775808, fnname: "sub_uint64_9223372036854775808", in: 1, want: 9223372036854775809},
+ test_uint64{fn: sub_9223372036854775808_uint64, fnname: "sub_9223372036854775808_uint64", in: 4294967296, want: 9223372032559808512},
+ test_uint64{fn: sub_uint64_9223372036854775808, fnname: "sub_uint64_9223372036854775808", in: 4294967296, want: 9223372041149743104},
+ test_uint64{fn: sub_9223372036854775808_uint64, fnname: "sub_9223372036854775808_uint64", in: 9223372036854775808, want: 0},
+ test_uint64{fn: sub_uint64_9223372036854775808, fnname: "sub_uint64_9223372036854775808", in: 9223372036854775808, want: 0},
+ test_uint64{fn: sub_9223372036854775808_uint64, fnname: "sub_9223372036854775808_uint64", in: 18446744073709551615, want: 9223372036854775809},
+ test_uint64{fn: sub_uint64_9223372036854775808, fnname: "sub_uint64_9223372036854775808", in: 18446744073709551615, want: 9223372036854775807},
+ test_uint64{fn: sub_18446744073709551615_uint64, fnname: "sub_18446744073709551615_uint64", in: 0, want: 18446744073709551615},
+ test_uint64{fn: sub_uint64_18446744073709551615, fnname: "sub_uint64_18446744073709551615", in: 0, want: 1},
+ test_uint64{fn: sub_18446744073709551615_uint64, fnname: "sub_18446744073709551615_uint64", in: 1, want: 18446744073709551614},
+ test_uint64{fn: sub_uint64_18446744073709551615, fnname: "sub_uint64_18446744073709551615", in: 1, want: 2},
+ test_uint64{fn: sub_18446744073709551615_uint64, fnname: "sub_18446744073709551615_uint64", in: 4294967296, want: 18446744069414584319},
+ test_uint64{fn: sub_uint64_18446744073709551615, fnname: "sub_uint64_18446744073709551615", in: 4294967296, want: 4294967297},
+ test_uint64{fn: sub_18446744073709551615_uint64, fnname: "sub_18446744073709551615_uint64", in: 9223372036854775808, want: 9223372036854775807},
+ test_uint64{fn: sub_uint64_18446744073709551615, fnname: "sub_uint64_18446744073709551615", in: 9223372036854775808, want: 9223372036854775809},
+ test_uint64{fn: sub_18446744073709551615_uint64, fnname: "sub_18446744073709551615_uint64", in: 18446744073709551615, want: 0},
+ test_uint64{fn: sub_uint64_18446744073709551615, fnname: "sub_uint64_18446744073709551615", in: 18446744073709551615, want: 0},
+ test_uint64{fn: div_0_uint64, fnname: "div_0_uint64", in: 1, want: 0},
+ test_uint64{fn: div_0_uint64, fnname: "div_0_uint64", in: 4294967296, want: 0},
+ test_uint64{fn: div_0_uint64, fnname: "div_0_uint64", in: 9223372036854775808, want: 0},
+ test_uint64{fn: div_0_uint64, fnname: "div_0_uint64", in: 18446744073709551615, want: 0},
+ test_uint64{fn: div_uint64_1, fnname: "div_uint64_1", in: 0, want: 0},
+ test_uint64{fn: div_1_uint64, fnname: "div_1_uint64", in: 1, want: 1},
+ test_uint64{fn: div_uint64_1, fnname: "div_uint64_1", in: 1, want: 1},
+ test_uint64{fn: div_1_uint64, fnname: "div_1_uint64", in: 4294967296, want: 0},
+ test_uint64{fn: div_uint64_1, fnname: "div_uint64_1", in: 4294967296, want: 4294967296},
+ test_uint64{fn: div_1_uint64, fnname: "div_1_uint64", in: 9223372036854775808, want: 0},
+ test_uint64{fn: div_uint64_1, fnname: "div_uint64_1", in: 9223372036854775808, want: 9223372036854775808},
+ test_uint64{fn: div_1_uint64, fnname: "div_1_uint64", in: 18446744073709551615, want: 0},
+ test_uint64{fn: div_uint64_1, fnname: "div_uint64_1", in: 18446744073709551615, want: 18446744073709551615},
+ test_uint64{fn: div_uint64_4294967296, fnname: "div_uint64_4294967296", in: 0, want: 0},
+ test_uint64{fn: div_4294967296_uint64, fnname: "div_4294967296_uint64", in: 1, want: 4294967296},
+ test_uint64{fn: div_uint64_4294967296, fnname: "div_uint64_4294967296", in: 1, want: 0},
+ test_uint64{fn: div_4294967296_uint64, fnname: "div_4294967296_uint64", in: 4294967296, want: 1},
+ test_uint64{fn: div_uint64_4294967296, fnname: "div_uint64_4294967296", in: 4294967296, want: 1},
+ test_uint64{fn: div_4294967296_uint64, fnname: "div_4294967296_uint64", in: 9223372036854775808, want: 0},
+ test_uint64{fn: div_uint64_4294967296, fnname: "div_uint64_4294967296", in: 9223372036854775808, want: 2147483648},
+ test_uint64{fn: div_4294967296_uint64, fnname: "div_4294967296_uint64", in: 18446744073709551615, want: 0},
+ test_uint64{fn: div_uint64_4294967296, fnname: "div_uint64_4294967296", in: 18446744073709551615, want: 4294967295},
+ test_uint64{fn: div_uint64_9223372036854775808, fnname: "div_uint64_9223372036854775808", in: 0, want: 0},
+ test_uint64{fn: div_9223372036854775808_uint64, fnname: "div_9223372036854775808_uint64", in: 1, want: 9223372036854775808},
+ test_uint64{fn: div_uint64_9223372036854775808, fnname: "div_uint64_9223372036854775808", in: 1, want: 0},
+ test_uint64{fn: div_9223372036854775808_uint64, fnname: "div_9223372036854775808_uint64", in: 4294967296, want: 2147483648},
+ test_uint64{fn: div_uint64_9223372036854775808, fnname: "div_uint64_9223372036854775808", in: 4294967296, want: 0},
+ test_uint64{fn: div_9223372036854775808_uint64, fnname: "div_9223372036854775808_uint64", in: 9223372036854775808, want: 1},
+ test_uint64{fn: div_uint64_9223372036854775808, fnname: "div_uint64_9223372036854775808", in: 9223372036854775808, want: 1},
+ test_uint64{fn: div_9223372036854775808_uint64, fnname: "div_9223372036854775808_uint64", in: 18446744073709551615, want: 0},
+ test_uint64{fn: div_uint64_9223372036854775808, fnname: "div_uint64_9223372036854775808", in: 18446744073709551615, want: 1},
+ test_uint64{fn: div_uint64_18446744073709551615, fnname: "div_uint64_18446744073709551615", in: 0, want: 0},
+ test_uint64{fn: div_18446744073709551615_uint64, fnname: "div_18446744073709551615_uint64", in: 1, want: 18446744073709551615},
+ test_uint64{fn: div_uint64_18446744073709551615, fnname: "div_uint64_18446744073709551615", in: 1, want: 0},
+ test_uint64{fn: div_18446744073709551615_uint64, fnname: "div_18446744073709551615_uint64", in: 4294967296, want: 4294967295},
+ test_uint64{fn: div_uint64_18446744073709551615, fnname: "div_uint64_18446744073709551615", in: 4294967296, want: 0},
+ test_uint64{fn: div_18446744073709551615_uint64, fnname: "div_18446744073709551615_uint64", in: 9223372036854775808, want: 1},
+ test_uint64{fn: div_uint64_18446744073709551615, fnname: "div_uint64_18446744073709551615", in: 9223372036854775808, want: 0},
+ test_uint64{fn: div_18446744073709551615_uint64, fnname: "div_18446744073709551615_uint64", in: 18446744073709551615, want: 1},
+ test_uint64{fn: div_uint64_18446744073709551615, fnname: "div_uint64_18446744073709551615", in: 18446744073709551615, want: 1},
+ test_uint64{fn: mul_0_uint64, fnname: "mul_0_uint64", in: 0, want: 0},
+ test_uint64{fn: mul_uint64_0, fnname: "mul_uint64_0", in: 0, want: 0},
+ test_uint64{fn: mul_0_uint64, fnname: "mul_0_uint64", in: 1, want: 0},
+ test_uint64{fn: mul_uint64_0, fnname: "mul_uint64_0", in: 1, want: 0},
+ test_uint64{fn: mul_0_uint64, fnname: "mul_0_uint64", in: 4294967296, want: 0},
+ test_uint64{fn: mul_uint64_0, fnname: "mul_uint64_0", in: 4294967296, want: 0},
+ test_uint64{fn: mul_0_uint64, fnname: "mul_0_uint64", in: 9223372036854775808, want: 0},
+ test_uint64{fn: mul_uint64_0, fnname: "mul_uint64_0", in: 9223372036854775808, want: 0},
+ test_uint64{fn: mul_0_uint64, fnname: "mul_0_uint64", in: 18446744073709551615, want: 0},
+ test_uint64{fn: mul_uint64_0, fnname: "mul_uint64_0", in: 18446744073709551615, want: 0},
+ test_uint64{fn: mul_1_uint64, fnname: "mul_1_uint64", in: 0, want: 0},
+ test_uint64{fn: mul_uint64_1, fnname: "mul_uint64_1", in: 0, want: 0},
+ test_uint64{fn: mul_1_uint64, fnname: "mul_1_uint64", in: 1, want: 1},
+ test_uint64{fn: mul_uint64_1, fnname: "mul_uint64_1", in: 1, want: 1},
+ test_uint64{fn: mul_1_uint64, fnname: "mul_1_uint64", in: 4294967296, want: 4294967296},
+ test_uint64{fn: mul_uint64_1, fnname: "mul_uint64_1", in: 4294967296, want: 4294967296},
+ test_uint64{fn: mul_1_uint64, fnname: "mul_1_uint64", in: 9223372036854775808, want: 9223372036854775808},
+ test_uint64{fn: mul_uint64_1, fnname: "mul_uint64_1", in: 9223372036854775808, want: 9223372036854775808},
+ test_uint64{fn: mul_1_uint64, fnname: "mul_1_uint64", in: 18446744073709551615, want: 18446744073709551615},
+ test_uint64{fn: mul_uint64_1, fnname: "mul_uint64_1", in: 18446744073709551615, want: 18446744073709551615},
+ test_uint64{fn: mul_4294967296_uint64, fnname: "mul_4294967296_uint64", in: 0, want: 0},
+ test_uint64{fn: mul_uint64_4294967296, fnname: "mul_uint64_4294967296", in: 0, want: 0},
+ test_uint64{fn: mul_4294967296_uint64, fnname: "mul_4294967296_uint64", in: 1, want: 4294967296},
+ test_uint64{fn: mul_uint64_4294967296, fnname: "mul_uint64_4294967296", in: 1, want: 4294967296},
+ test_uint64{fn: mul_4294967296_uint64, fnname: "mul_4294967296_uint64", in: 4294967296, want: 0},
+ test_uint64{fn: mul_uint64_4294967296, fnname: "mul_uint64_4294967296", in: 4294967296, want: 0},
+ test_uint64{fn: mul_4294967296_uint64, fnname: "mul_4294967296_uint64", in: 9223372036854775808, want: 0},
+ test_uint64{fn: mul_uint64_4294967296, fnname: "mul_uint64_4294967296", in: 9223372036854775808, want: 0},
+ test_uint64{fn: mul_4294967296_uint64, fnname: "mul_4294967296_uint64", in: 18446744073709551615, want: 18446744069414584320},
+ test_uint64{fn: mul_uint64_4294967296, fnname: "mul_uint64_4294967296", in: 18446744073709551615, want: 18446744069414584320},
+ test_uint64{fn: mul_9223372036854775808_uint64, fnname: "mul_9223372036854775808_uint64", in: 0, want: 0},
+ test_uint64{fn: mul_uint64_9223372036854775808, fnname: "mul_uint64_9223372036854775808", in: 0, want: 0},
+ test_uint64{fn: mul_9223372036854775808_uint64, fnname: "mul_9223372036854775808_uint64", in: 1, want: 9223372036854775808},
+ test_uint64{fn: mul_uint64_9223372036854775808, fnname: "mul_uint64_9223372036854775808", in: 1, want: 9223372036854775808},
+ test_uint64{fn: mul_9223372036854775808_uint64, fnname: "mul_9223372036854775808_uint64", in: 4294967296, want: 0},
+ test_uint64{fn: mul_uint64_9223372036854775808, fnname: "mul_uint64_9223372036854775808", in: 4294967296, want: 0},
+ test_uint64{fn: mul_9223372036854775808_uint64, fnname: "mul_9223372036854775808_uint64", in: 9223372036854775808, want: 0},
+ test_uint64{fn: mul_uint64_9223372036854775808, fnname: "mul_uint64_9223372036854775808", in: 9223372036854775808, want: 0},
+ test_uint64{fn: mul_9223372036854775808_uint64, fnname: "mul_9223372036854775808_uint64", in: 18446744073709551615, want: 9223372036854775808},
+ test_uint64{fn: mul_uint64_9223372036854775808, fnname: "mul_uint64_9223372036854775808", in: 18446744073709551615, want: 9223372036854775808},
+ test_uint64{fn: mul_18446744073709551615_uint64, fnname: "mul_18446744073709551615_uint64", in: 0, want: 0},
+ test_uint64{fn: mul_uint64_18446744073709551615, fnname: "mul_uint64_18446744073709551615", in: 0, want: 0},
+ test_uint64{fn: mul_18446744073709551615_uint64, fnname: "mul_18446744073709551615_uint64", in: 1, want: 18446744073709551615},
+ test_uint64{fn: mul_uint64_18446744073709551615, fnname: "mul_uint64_18446744073709551615", in: 1, want: 18446744073709551615},
+ test_uint64{fn: mul_18446744073709551615_uint64, fnname: "mul_18446744073709551615_uint64", in: 4294967296, want: 18446744069414584320},
+ test_uint64{fn: mul_uint64_18446744073709551615, fnname: "mul_uint64_18446744073709551615", in: 4294967296, want: 18446744069414584320},
+ test_uint64{fn: mul_18446744073709551615_uint64, fnname: "mul_18446744073709551615_uint64", in: 9223372036854775808, want: 9223372036854775808},
+ test_uint64{fn: mul_uint64_18446744073709551615, fnname: "mul_uint64_18446744073709551615", in: 9223372036854775808, want: 9223372036854775808},
+ test_uint64{fn: mul_18446744073709551615_uint64, fnname: "mul_18446744073709551615_uint64", in: 18446744073709551615, want: 1},
+ test_uint64{fn: mul_uint64_18446744073709551615, fnname: "mul_uint64_18446744073709551615", in: 18446744073709551615, want: 1},
+ test_uint64{fn: lsh_0_uint64, fnname: "lsh_0_uint64", in: 0, want: 0},
+ test_uint64{fn: lsh_uint64_0, fnname: "lsh_uint64_0", in: 0, want: 0},
+ test_uint64{fn: lsh_0_uint64, fnname: "lsh_0_uint64", in: 1, want: 0},
+ test_uint64{fn: lsh_uint64_0, fnname: "lsh_uint64_0", in: 1, want: 1},
+ test_uint64{fn: lsh_0_uint64, fnname: "lsh_0_uint64", in: 4294967296, want: 0},
+ test_uint64{fn: lsh_uint64_0, fnname: "lsh_uint64_0", in: 4294967296, want: 4294967296},
+ test_uint64{fn: lsh_0_uint64, fnname: "lsh_0_uint64", in: 9223372036854775808, want: 0},
+ test_uint64{fn: lsh_uint64_0, fnname: "lsh_uint64_0", in: 9223372036854775808, want: 9223372036854775808},
+ test_uint64{fn: lsh_0_uint64, fnname: "lsh_0_uint64", in: 18446744073709551615, want: 0},
+ test_uint64{fn: lsh_uint64_0, fnname: "lsh_uint64_0", in: 18446744073709551615, want: 18446744073709551615},
+ test_uint64{fn: lsh_1_uint64, fnname: "lsh_1_uint64", in: 0, want: 1},
+ test_uint64{fn: lsh_uint64_1, fnname: "lsh_uint64_1", in: 0, want: 0},
+ test_uint64{fn: lsh_1_uint64, fnname: "lsh_1_uint64", in: 1, want: 2},
+ test_uint64{fn: lsh_uint64_1, fnname: "lsh_uint64_1", in: 1, want: 2},
+ test_uint64{fn: lsh_1_uint64, fnname: "lsh_1_uint64", in: 4294967296, want: 0},
+ test_uint64{fn: lsh_uint64_1, fnname: "lsh_uint64_1", in: 4294967296, want: 8589934592},
+ test_uint64{fn: lsh_1_uint64, fnname: "lsh_1_uint64", in: 9223372036854775808, want: 0},
+ test_uint64{fn: lsh_uint64_1, fnname: "lsh_uint64_1", in: 9223372036854775808, want: 0},
+ test_uint64{fn: lsh_1_uint64, fnname: "lsh_1_uint64", in: 18446744073709551615, want: 0},
+ test_uint64{fn: lsh_uint64_1, fnname: "lsh_uint64_1", in: 18446744073709551615, want: 18446744073709551614},
+ test_uint64{fn: lsh_4294967296_uint64, fnname: "lsh_4294967296_uint64", in: 0, want: 4294967296},
+ test_uint64{fn: lsh_uint64_4294967296, fnname: "lsh_uint64_4294967296", in: 0, want: 0},
+ test_uint64{fn: lsh_4294967296_uint64, fnname: "lsh_4294967296_uint64", in: 1, want: 8589934592},
+ test_uint64{fn: lsh_uint64_4294967296, fnname: "lsh_uint64_4294967296", in: 1, want: 0},
+ test_uint64{fn: lsh_4294967296_uint64, fnname: "lsh_4294967296_uint64", in: 4294967296, want: 0},
+ test_uint64{fn: lsh_uint64_4294967296, fnname: "lsh_uint64_4294967296", in: 4294967296, want: 0},
+ test_uint64{fn: lsh_4294967296_uint64, fnname: "lsh_4294967296_uint64", in: 9223372036854775808, want: 0},
+ test_uint64{fn: lsh_uint64_4294967296, fnname: "lsh_uint64_4294967296", in: 9223372036854775808, want: 0},
+ test_uint64{fn: lsh_4294967296_uint64, fnname: "lsh_4294967296_uint64", in: 18446744073709551615, want: 0},
+ test_uint64{fn: lsh_uint64_4294967296, fnname: "lsh_uint64_4294967296", in: 18446744073709551615, want: 0},
+ test_uint64{fn: lsh_9223372036854775808_uint64, fnname: "lsh_9223372036854775808_uint64", in: 0, want: 9223372036854775808},
+ test_uint64{fn: lsh_uint64_9223372036854775808, fnname: "lsh_uint64_9223372036854775808", in: 0, want: 0},
+ test_uint64{fn: lsh_9223372036854775808_uint64, fnname: "lsh_9223372036854775808_uint64", in: 1, want: 0},
+ test_uint64{fn: lsh_uint64_9223372036854775808, fnname: "lsh_uint64_9223372036854775808", in: 1, want: 0},
+ test_uint64{fn: lsh_9223372036854775808_uint64, fnname: "lsh_9223372036854775808_uint64", in: 4294967296, want: 0},
+ test_uint64{fn: lsh_uint64_9223372036854775808, fnname: "lsh_uint64_9223372036854775808", in: 4294967296, want: 0},
+ test_uint64{fn: lsh_9223372036854775808_uint64, fnname: "lsh_9223372036854775808_uint64", in: 9223372036854775808, want: 0},
+ test_uint64{fn: lsh_uint64_9223372036854775808, fnname: "lsh_uint64_9223372036854775808", in: 9223372036854775808, want: 0},
+ test_uint64{fn: lsh_9223372036854775808_uint64, fnname: "lsh_9223372036854775808_uint64", in: 18446744073709551615, want: 0},
+ test_uint64{fn: lsh_uint64_9223372036854775808, fnname: "lsh_uint64_9223372036854775808", in: 18446744073709551615, want: 0},
+ test_uint64{fn: lsh_18446744073709551615_uint64, fnname: "lsh_18446744073709551615_uint64", in: 0, want: 18446744073709551615},
+ test_uint64{fn: lsh_uint64_18446744073709551615, fnname: "lsh_uint64_18446744073709551615", in: 0, want: 0},
+ test_uint64{fn: lsh_18446744073709551615_uint64, fnname: "lsh_18446744073709551615_uint64", in: 1, want: 18446744073709551614},
+ test_uint64{fn: lsh_uint64_18446744073709551615, fnname: "lsh_uint64_18446744073709551615", in: 1, want: 0},
+ test_uint64{fn: lsh_18446744073709551615_uint64, fnname: "lsh_18446744073709551615_uint64", in: 4294967296, want: 0},
+ test_uint64{fn: lsh_uint64_18446744073709551615, fnname: "lsh_uint64_18446744073709551615", in: 4294967296, want: 0},
+ test_uint64{fn: lsh_18446744073709551615_uint64, fnname: "lsh_18446744073709551615_uint64", in: 9223372036854775808, want: 0},
+ test_uint64{fn: lsh_uint64_18446744073709551615, fnname: "lsh_uint64_18446744073709551615", in: 9223372036854775808, want: 0},
+ test_uint64{fn: lsh_18446744073709551615_uint64, fnname: "lsh_18446744073709551615_uint64", in: 18446744073709551615, want: 0},
+ test_uint64{fn: lsh_uint64_18446744073709551615, fnname: "lsh_uint64_18446744073709551615", in: 18446744073709551615, want: 0},
+ test_uint64{fn: rsh_0_uint64, fnname: "rsh_0_uint64", in: 0, want: 0},
+ test_uint64{fn: rsh_uint64_0, fnname: "rsh_uint64_0", in: 0, want: 0},
+ test_uint64{fn: rsh_0_uint64, fnname: "rsh_0_uint64", in: 1, want: 0},
+ test_uint64{fn: rsh_uint64_0, fnname: "rsh_uint64_0", in: 1, want: 1},
+ test_uint64{fn: rsh_0_uint64, fnname: "rsh_0_uint64", in: 4294967296, want: 0},
+ test_uint64{fn: rsh_uint64_0, fnname: "rsh_uint64_0", in: 4294967296, want: 4294967296},
+ test_uint64{fn: rsh_0_uint64, fnname: "rsh_0_uint64", in: 9223372036854775808, want: 0},
+ test_uint64{fn: rsh_uint64_0, fnname: "rsh_uint64_0", in: 9223372036854775808, want: 9223372036854775808},
+ test_uint64{fn: rsh_0_uint64, fnname: "rsh_0_uint64", in: 18446744073709551615, want: 0},
+ test_uint64{fn: rsh_uint64_0, fnname: "rsh_uint64_0", in: 18446744073709551615, want: 18446744073709551615},
+ test_uint64{fn: rsh_1_uint64, fnname: "rsh_1_uint64", in: 0, want: 1},
+ test_uint64{fn: rsh_uint64_1, fnname: "rsh_uint64_1", in: 0, want: 0},
+ test_uint64{fn: rsh_1_uint64, fnname: "rsh_1_uint64", in: 1, want: 0},
+ test_uint64{fn: rsh_uint64_1, fnname: "rsh_uint64_1", in: 1, want: 0},
+ test_uint64{fn: rsh_1_uint64, fnname: "rsh_1_uint64", in: 4294967296, want: 0},
+ test_uint64{fn: rsh_uint64_1, fnname: "rsh_uint64_1", in: 4294967296, want: 2147483648},
+ test_uint64{fn: rsh_1_uint64, fnname: "rsh_1_uint64", in: 9223372036854775808, want: 0},
+ test_uint64{fn: rsh_uint64_1, fnname: "rsh_uint64_1", in: 9223372036854775808, want: 4611686018427387904},
+ test_uint64{fn: rsh_1_uint64, fnname: "rsh_1_uint64", in: 18446744073709551615, want: 0},
+ test_uint64{fn: rsh_uint64_1, fnname: "rsh_uint64_1", in: 18446744073709551615, want: 9223372036854775807},
+ test_uint64{fn: rsh_4294967296_uint64, fnname: "rsh_4294967296_uint64", in: 0, want: 4294967296},
+ test_uint64{fn: rsh_uint64_4294967296, fnname: "rsh_uint64_4294967296", in: 0, want: 0},
+ test_uint64{fn: rsh_4294967296_uint64, fnname: "rsh_4294967296_uint64", in: 1, want: 2147483648},
+ test_uint64{fn: rsh_uint64_4294967296, fnname: "rsh_uint64_4294967296", in: 1, want: 0},
+ test_uint64{fn: rsh_4294967296_uint64, fnname: "rsh_4294967296_uint64", in: 4294967296, want: 0},
+ test_uint64{fn: rsh_uint64_4294967296, fnname: "rsh_uint64_4294967296", in: 4294967296, want: 0},
+ test_uint64{fn: rsh_4294967296_uint64, fnname: "rsh_4294967296_uint64", in: 9223372036854775808, want: 0},
+ test_uint64{fn: rsh_uint64_4294967296, fnname: "rsh_uint64_4294967296", in: 9223372036854775808, want: 0},
+ test_uint64{fn: rsh_4294967296_uint64, fnname: "rsh_4294967296_uint64", in: 18446744073709551615, want: 0},
+ test_uint64{fn: rsh_uint64_4294967296, fnname: "rsh_uint64_4294967296", in: 18446744073709551615, want: 0},
+ test_uint64{fn: rsh_9223372036854775808_uint64, fnname: "rsh_9223372036854775808_uint64", in: 0, want: 9223372036854775808},
+ test_uint64{fn: rsh_uint64_9223372036854775808, fnname: "rsh_uint64_9223372036854775808", in: 0, want: 0},
+ test_uint64{fn: rsh_9223372036854775808_uint64, fnname: "rsh_9223372036854775808_uint64", in: 1, want: 4611686018427387904},
+ test_uint64{fn: rsh_uint64_9223372036854775808, fnname: "rsh_uint64_9223372036854775808", in: 1, want: 0},
+ test_uint64{fn: rsh_9223372036854775808_uint64, fnname: "rsh_9223372036854775808_uint64", in: 4294967296, want: 0},
+ test_uint64{fn: rsh_uint64_9223372036854775808, fnname: "rsh_uint64_9223372036854775808", in: 4294967296, want: 0},
+ test_uint64{fn: rsh_9223372036854775808_uint64, fnname: "rsh_9223372036854775808_uint64", in: 9223372036854775808, want: 0},
+ test_uint64{fn: rsh_uint64_9223372036854775808, fnname: "rsh_uint64_9223372036854775808", in: 9223372036854775808, want: 0},
+ test_uint64{fn: rsh_9223372036854775808_uint64, fnname: "rsh_9223372036854775808_uint64", in: 18446744073709551615, want: 0},
+ test_uint64{fn: rsh_uint64_9223372036854775808, fnname: "rsh_uint64_9223372036854775808", in: 18446744073709551615, want: 0},
+ test_uint64{fn: rsh_18446744073709551615_uint64, fnname: "rsh_18446744073709551615_uint64", in: 0, want: 18446744073709551615},
+ test_uint64{fn: rsh_uint64_18446744073709551615, fnname: "rsh_uint64_18446744073709551615", in: 0, want: 0},
+ test_uint64{fn: rsh_18446744073709551615_uint64, fnname: "rsh_18446744073709551615_uint64", in: 1, want: 9223372036854775807},
+ test_uint64{fn: rsh_uint64_18446744073709551615, fnname: "rsh_uint64_18446744073709551615", in: 1, want: 0},
+ test_uint64{fn: rsh_18446744073709551615_uint64, fnname: "rsh_18446744073709551615_uint64", in: 4294967296, want: 0},
+ test_uint64{fn: rsh_uint64_18446744073709551615, fnname: "rsh_uint64_18446744073709551615", in: 4294967296, want: 0},
+ test_uint64{fn: rsh_18446744073709551615_uint64, fnname: "rsh_18446744073709551615_uint64", in: 9223372036854775808, want: 0},
+ test_uint64{fn: rsh_uint64_18446744073709551615, fnname: "rsh_uint64_18446744073709551615", in: 9223372036854775808, want: 0},
+ test_uint64{fn: rsh_18446744073709551615_uint64, fnname: "rsh_18446744073709551615_uint64", in: 18446744073709551615, want: 0},
+ test_uint64{fn: rsh_uint64_18446744073709551615, fnname: "rsh_uint64_18446744073709551615", in: 18446744073709551615, want: 0},
+ test_uint64{fn: mod_0_uint64, fnname: "mod_0_uint64", in: 1, want: 0},
+ test_uint64{fn: mod_0_uint64, fnname: "mod_0_uint64", in: 4294967296, want: 0},
+ test_uint64{fn: mod_0_uint64, fnname: "mod_0_uint64", in: 9223372036854775808, want: 0},
+ test_uint64{fn: mod_0_uint64, fnname: "mod_0_uint64", in: 18446744073709551615, want: 0},
+ test_uint64{fn: mod_uint64_1, fnname: "mod_uint64_1", in: 0, want: 0},
+ test_uint64{fn: mod_1_uint64, fnname: "mod_1_uint64", in: 1, want: 0},
+ test_uint64{fn: mod_uint64_1, fnname: "mod_uint64_1", in: 1, want: 0},
+ test_uint64{fn: mod_1_uint64, fnname: "mod_1_uint64", in: 4294967296, want: 1},
+ test_uint64{fn: mod_uint64_1, fnname: "mod_uint64_1", in: 4294967296, want: 0},
+ test_uint64{fn: mod_1_uint64, fnname: "mod_1_uint64", in: 9223372036854775808, want: 1},
+ test_uint64{fn: mod_uint64_1, fnname: "mod_uint64_1", in: 9223372036854775808, want: 0},
+ test_uint64{fn: mod_1_uint64, fnname: "mod_1_uint64", in: 18446744073709551615, want: 1},
+ test_uint64{fn: mod_uint64_1, fnname: "mod_uint64_1", in: 18446744073709551615, want: 0},
+ test_uint64{fn: mod_uint64_4294967296, fnname: "mod_uint64_4294967296", in: 0, want: 0},
+ test_uint64{fn: mod_4294967296_uint64, fnname: "mod_4294967296_uint64", in: 1, want: 0},
+ test_uint64{fn: mod_uint64_4294967296, fnname: "mod_uint64_4294967296", in: 1, want: 1},
+ test_uint64{fn: mod_4294967296_uint64, fnname: "mod_4294967296_uint64", in: 4294967296, want: 0},
+ test_uint64{fn: mod_uint64_4294967296, fnname: "mod_uint64_4294967296", in: 4294967296, want: 0},
+ test_uint64{fn: mod_4294967296_uint64, fnname: "mod_4294967296_uint64", in: 9223372036854775808, want: 4294967296},
+ test_uint64{fn: mod_uint64_4294967296, fnname: "mod_uint64_4294967296", in: 9223372036854775808, want: 0},
+ test_uint64{fn: mod_4294967296_uint64, fnname: "mod_4294967296_uint64", in: 18446744073709551615, want: 4294967296},
+ test_uint64{fn: mod_uint64_4294967296, fnname: "mod_uint64_4294967296", in: 18446744073709551615, want: 4294967295},
+ test_uint64{fn: mod_uint64_9223372036854775808, fnname: "mod_uint64_9223372036854775808", in: 0, want: 0},
+ test_uint64{fn: mod_9223372036854775808_uint64, fnname: "mod_9223372036854775808_uint64", in: 1, want: 0},
+ test_uint64{fn: mod_uint64_9223372036854775808, fnname: "mod_uint64_9223372036854775808", in: 1, want: 1},
+ test_uint64{fn: mod_9223372036854775808_uint64, fnname: "mod_9223372036854775808_uint64", in: 4294967296, want: 0},
+ test_uint64{fn: mod_uint64_9223372036854775808, fnname: "mod_uint64_9223372036854775808", in: 4294967296, want: 4294967296},
+ test_uint64{fn: mod_9223372036854775808_uint64, fnname: "mod_9223372036854775808_uint64", in: 9223372036854775808, want: 0},
+ test_uint64{fn: mod_uint64_9223372036854775808, fnname: "mod_uint64_9223372036854775808", in: 9223372036854775808, want: 0},
+ test_uint64{fn: mod_9223372036854775808_uint64, fnname: "mod_9223372036854775808_uint64", in: 18446744073709551615, want: 9223372036854775808},
+ test_uint64{fn: mod_uint64_9223372036854775808, fnname: "mod_uint64_9223372036854775808", in: 18446744073709551615, want: 9223372036854775807},
+ test_uint64{fn: mod_uint64_18446744073709551615, fnname: "mod_uint64_18446744073709551615", in: 0, want: 0},
+ test_uint64{fn: mod_18446744073709551615_uint64, fnname: "mod_18446744073709551615_uint64", in: 1, want: 0},
+ test_uint64{fn: mod_uint64_18446744073709551615, fnname: "mod_uint64_18446744073709551615", in: 1, want: 1},
+ test_uint64{fn: mod_18446744073709551615_uint64, fnname: "mod_18446744073709551615_uint64", in: 4294967296, want: 4294967295},
+ test_uint64{fn: mod_uint64_18446744073709551615, fnname: "mod_uint64_18446744073709551615", in: 4294967296, want: 4294967296},
+ test_uint64{fn: mod_18446744073709551615_uint64, fnname: "mod_18446744073709551615_uint64", in: 9223372036854775808, want: 9223372036854775807},
+ test_uint64{fn: mod_uint64_18446744073709551615, fnname: "mod_uint64_18446744073709551615", in: 9223372036854775808, want: 9223372036854775808},
+ test_uint64{fn: mod_18446744073709551615_uint64, fnname: "mod_18446744073709551615_uint64", in: 18446744073709551615, want: 0},
+ test_uint64{fn: mod_uint64_18446744073709551615, fnname: "mod_uint64_18446744073709551615", in: 18446744073709551615, want: 0},
+ test_uint64{fn: and_0_uint64, fnname: "and_0_uint64", in: 0, want: 0},
+ test_uint64{fn: and_uint64_0, fnname: "and_uint64_0", in: 0, want: 0},
+ test_uint64{fn: and_0_uint64, fnname: "and_0_uint64", in: 1, want: 0},
+ test_uint64{fn: and_uint64_0, fnname: "and_uint64_0", in: 1, want: 0},
+ test_uint64{fn: and_0_uint64, fnname: "and_0_uint64", in: 4294967296, want: 0},
+ test_uint64{fn: and_uint64_0, fnname: "and_uint64_0", in: 4294967296, want: 0},
+ test_uint64{fn: and_0_uint64, fnname: "and_0_uint64", in: 9223372036854775808, want: 0},
+ test_uint64{fn: and_uint64_0, fnname: "and_uint64_0", in: 9223372036854775808, want: 0},
+ test_uint64{fn: and_0_uint64, fnname: "and_0_uint64", in: 18446744073709551615, want: 0},
+ test_uint64{fn: and_uint64_0, fnname: "and_uint64_0", in: 18446744073709551615, want: 0},
+ test_uint64{fn: and_1_uint64, fnname: "and_1_uint64", in: 0, want: 0},
+ test_uint64{fn: and_uint64_1, fnname: "and_uint64_1", in: 0, want: 0},
+ test_uint64{fn: and_1_uint64, fnname: "and_1_uint64", in: 1, want: 1},
+ test_uint64{fn: and_uint64_1, fnname: "and_uint64_1", in: 1, want: 1},
+ test_uint64{fn: and_1_uint64, fnname: "and_1_uint64", in: 4294967296, want: 0},
+ test_uint64{fn: and_uint64_1, fnname: "and_uint64_1", in: 4294967296, want: 0},
+ test_uint64{fn: and_1_uint64, fnname: "and_1_uint64", in: 9223372036854775808, want: 0},
+ test_uint64{fn: and_uint64_1, fnname: "and_uint64_1", in: 9223372036854775808, want: 0},
+ test_uint64{fn: and_1_uint64, fnname: "and_1_uint64", in: 18446744073709551615, want: 1},
+ test_uint64{fn: and_uint64_1, fnname: "and_uint64_1", in: 18446744073709551615, want: 1},
+ test_uint64{fn: and_4294967296_uint64, fnname: "and_4294967296_uint64", in: 0, want: 0},
+ test_uint64{fn: and_uint64_4294967296, fnname: "and_uint64_4294967296", in: 0, want: 0},
+ test_uint64{fn: and_4294967296_uint64, fnname: "and_4294967296_uint64", in: 1, want: 0},
+ test_uint64{fn: and_uint64_4294967296, fnname: "and_uint64_4294967296", in: 1, want: 0},
+ test_uint64{fn: and_4294967296_uint64, fnname: "and_4294967296_uint64", in: 4294967296, want: 4294967296},
+ test_uint64{fn: and_uint64_4294967296, fnname: "and_uint64_4294967296", in: 4294967296, want: 4294967296},
+ test_uint64{fn: and_4294967296_uint64, fnname: "and_4294967296_uint64", in: 9223372036854775808, want: 0},
+ test_uint64{fn: and_uint64_4294967296, fnname: "and_uint64_4294967296", in: 9223372036854775808, want: 0},
+ test_uint64{fn: and_4294967296_uint64, fnname: "and_4294967296_uint64", in: 18446744073709551615, want: 4294967296},
+ test_uint64{fn: and_uint64_4294967296, fnname: "and_uint64_4294967296", in: 18446744073709551615, want: 4294967296},
+ test_uint64{fn: and_9223372036854775808_uint64, fnname: "and_9223372036854775808_uint64", in: 0, want: 0},
+ test_uint64{fn: and_uint64_9223372036854775808, fnname: "and_uint64_9223372036854775808", in: 0, want: 0},
+ test_uint64{fn: and_9223372036854775808_uint64, fnname: "and_9223372036854775808_uint64", in: 1, want: 0},
+ test_uint64{fn: and_uint64_9223372036854775808, fnname: "and_uint64_9223372036854775808", in: 1, want: 0},
+ test_uint64{fn: and_9223372036854775808_uint64, fnname: "and_9223372036854775808_uint64", in: 4294967296, want: 0},
+ test_uint64{fn: and_uint64_9223372036854775808, fnname: "and_uint64_9223372036854775808", in: 4294967296, want: 0},
+ test_uint64{fn: and_9223372036854775808_uint64, fnname: "and_9223372036854775808_uint64", in: 9223372036854775808, want: 9223372036854775808},
+ test_uint64{fn: and_uint64_9223372036854775808, fnname: "and_uint64_9223372036854775808", in: 9223372036854775808, want: 9223372036854775808},
+ test_uint64{fn: and_9223372036854775808_uint64, fnname: "and_9223372036854775808_uint64", in: 18446744073709551615, want: 9223372036854775808},
+ test_uint64{fn: and_uint64_9223372036854775808, fnname: "and_uint64_9223372036854775808", in: 18446744073709551615, want: 9223372036854775808},
+ test_uint64{fn: and_18446744073709551615_uint64, fnname: "and_18446744073709551615_uint64", in: 0, want: 0},
+ test_uint64{fn: and_uint64_18446744073709551615, fnname: "and_uint64_18446744073709551615", in: 0, want: 0},
+ test_uint64{fn: and_18446744073709551615_uint64, fnname: "and_18446744073709551615_uint64", in: 1, want: 1},
+ test_uint64{fn: and_uint64_18446744073709551615, fnname: "and_uint64_18446744073709551615", in: 1, want: 1},
+ test_uint64{fn: and_18446744073709551615_uint64, fnname: "and_18446744073709551615_uint64", in: 4294967296, want: 4294967296},
+ test_uint64{fn: and_uint64_18446744073709551615, fnname: "and_uint64_18446744073709551615", in: 4294967296, want: 4294967296},
+ test_uint64{fn: and_18446744073709551615_uint64, fnname: "and_18446744073709551615_uint64", in: 9223372036854775808, want: 9223372036854775808},
+ test_uint64{fn: and_uint64_18446744073709551615, fnname: "and_uint64_18446744073709551615", in: 9223372036854775808, want: 9223372036854775808},
+ test_uint64{fn: and_18446744073709551615_uint64, fnname: "and_18446744073709551615_uint64", in: 18446744073709551615, want: 18446744073709551615},
+ test_uint64{fn: and_uint64_18446744073709551615, fnname: "and_uint64_18446744073709551615", in: 18446744073709551615, want: 18446744073709551615},
+ test_uint64{fn: or_0_uint64, fnname: "or_0_uint64", in: 0, want: 0},
+ test_uint64{fn: or_uint64_0, fnname: "or_uint64_0", in: 0, want: 0},
+ test_uint64{fn: or_0_uint64, fnname: "or_0_uint64", in: 1, want: 1},
+ test_uint64{fn: or_uint64_0, fnname: "or_uint64_0", in: 1, want: 1},
+ test_uint64{fn: or_0_uint64, fnname: "or_0_uint64", in: 4294967296, want: 4294967296},
+ test_uint64{fn: or_uint64_0, fnname: "or_uint64_0", in: 4294967296, want: 4294967296},
+ test_uint64{fn: or_0_uint64, fnname: "or_0_uint64", in: 9223372036854775808, want: 9223372036854775808},
+ test_uint64{fn: or_uint64_0, fnname: "or_uint64_0", in: 9223372036854775808, want: 9223372036854775808},
+ test_uint64{fn: or_0_uint64, fnname: "or_0_uint64", in: 18446744073709551615, want: 18446744073709551615},
+ test_uint64{fn: or_uint64_0, fnname: "or_uint64_0", in: 18446744073709551615, want: 18446744073709551615},
+ test_uint64{fn: or_1_uint64, fnname: "or_1_uint64", in: 0, want: 1},
+ test_uint64{fn: or_uint64_1, fnname: "or_uint64_1", in: 0, want: 1},
+ test_uint64{fn: or_1_uint64, fnname: "or_1_uint64", in: 1, want: 1},
+ test_uint64{fn: or_uint64_1, fnname: "or_uint64_1", in: 1, want: 1},
+ test_uint64{fn: or_1_uint64, fnname: "or_1_uint64", in: 4294967296, want: 4294967297},
+ test_uint64{fn: or_uint64_1, fnname: "or_uint64_1", in: 4294967296, want: 4294967297},
+ test_uint64{fn: or_1_uint64, fnname: "or_1_uint64", in: 9223372036854775808, want: 9223372036854775809},
+ test_uint64{fn: or_uint64_1, fnname: "or_uint64_1", in: 9223372036854775808, want: 9223372036854775809},
+ test_uint64{fn: or_1_uint64, fnname: "or_1_uint64", in: 18446744073709551615, want: 18446744073709551615},
+ test_uint64{fn: or_uint64_1, fnname: "or_uint64_1", in: 18446744073709551615, want: 18446744073709551615},
+ test_uint64{fn: or_4294967296_uint64, fnname: "or_4294967296_uint64", in: 0, want: 4294967296},
+ test_uint64{fn: or_uint64_4294967296, fnname: "or_uint64_4294967296", in: 0, want: 4294967296},
+ test_uint64{fn: or_4294967296_uint64, fnname: "or_4294967296_uint64", in: 1, want: 4294967297},
+ test_uint64{fn: or_uint64_4294967296, fnname: "or_uint64_4294967296", in: 1, want: 4294967297},
+ test_uint64{fn: or_4294967296_uint64, fnname: "or_4294967296_uint64", in: 4294967296, want: 4294967296},
+ test_uint64{fn: or_uint64_4294967296, fnname: "or_uint64_4294967296", in: 4294967296, want: 4294967296},
+ test_uint64{fn: or_4294967296_uint64, fnname: "or_4294967296_uint64", in: 9223372036854775808, want: 9223372041149743104},
+ test_uint64{fn: or_uint64_4294967296, fnname: "or_uint64_4294967296", in: 9223372036854775808, want: 9223372041149743104},
+ test_uint64{fn: or_4294967296_uint64, fnname: "or_4294967296_uint64", in: 18446744073709551615, want: 18446744073709551615},
+ test_uint64{fn: or_uint64_4294967296, fnname: "or_uint64_4294967296", in: 18446744073709551615, want: 18446744073709551615},
+ test_uint64{fn: or_9223372036854775808_uint64, fnname: "or_9223372036854775808_uint64", in: 0, want: 9223372036854775808},
+ test_uint64{fn: or_uint64_9223372036854775808, fnname: "or_uint64_9223372036854775808", in: 0, want: 9223372036854775808},
+ test_uint64{fn: or_9223372036854775808_uint64, fnname: "or_9223372036854775808_uint64", in: 1, want: 9223372036854775809},
+ test_uint64{fn: or_uint64_9223372036854775808, fnname: "or_uint64_9223372036854775808", in: 1, want: 9223372036854775809},
+ test_uint64{fn: or_9223372036854775808_uint64, fnname: "or_9223372036854775808_uint64", in: 4294967296, want: 9223372041149743104},
+ test_uint64{fn: or_uint64_9223372036854775808, fnname: "or_uint64_9223372036854775808", in: 4294967296, want: 9223372041149743104},
+ test_uint64{fn: or_9223372036854775808_uint64, fnname: "or_9223372036854775808_uint64", in: 9223372036854775808, want: 9223372036854775808},
+ test_uint64{fn: or_uint64_9223372036854775808, fnname: "or_uint64_9223372036854775808", in: 9223372036854775808, want: 9223372036854775808},
+ test_uint64{fn: or_9223372036854775808_uint64, fnname: "or_9223372036854775808_uint64", in: 18446744073709551615, want: 18446744073709551615},
+ test_uint64{fn: or_uint64_9223372036854775808, fnname: "or_uint64_9223372036854775808", in: 18446744073709551615, want: 18446744073709551615},
+ test_uint64{fn: or_18446744073709551615_uint64, fnname: "or_18446744073709551615_uint64", in: 0, want: 18446744073709551615},
+ test_uint64{fn: or_uint64_18446744073709551615, fnname: "or_uint64_18446744073709551615", in: 0, want: 18446744073709551615},
+ test_uint64{fn: or_18446744073709551615_uint64, fnname: "or_18446744073709551615_uint64", in: 1, want: 18446744073709551615},
+ test_uint64{fn: or_uint64_18446744073709551615, fnname: "or_uint64_18446744073709551615", in: 1, want: 18446744073709551615},
+ test_uint64{fn: or_18446744073709551615_uint64, fnname: "or_18446744073709551615_uint64", in: 4294967296, want: 18446744073709551615},
+ test_uint64{fn: or_uint64_18446744073709551615, fnname: "or_uint64_18446744073709551615", in: 4294967296, want: 18446744073709551615},
+ test_uint64{fn: or_18446744073709551615_uint64, fnname: "or_18446744073709551615_uint64", in: 9223372036854775808, want: 18446744073709551615},
+ test_uint64{fn: or_uint64_18446744073709551615, fnname: "or_uint64_18446744073709551615", in: 9223372036854775808, want: 18446744073709551615},
+ test_uint64{fn: or_18446744073709551615_uint64, fnname: "or_18446744073709551615_uint64", in: 18446744073709551615, want: 18446744073709551615},
+ test_uint64{fn: or_uint64_18446744073709551615, fnname: "or_uint64_18446744073709551615", in: 18446744073709551615, want: 18446744073709551615},
+ test_uint64{fn: xor_0_uint64, fnname: "xor_0_uint64", in: 0, want: 0},
+ test_uint64{fn: xor_uint64_0, fnname: "xor_uint64_0", in: 0, want: 0},
+ test_uint64{fn: xor_0_uint64, fnname: "xor_0_uint64", in: 1, want: 1},
+ test_uint64{fn: xor_uint64_0, fnname: "xor_uint64_0", in: 1, want: 1},
+ test_uint64{fn: xor_0_uint64, fnname: "xor_0_uint64", in: 4294967296, want: 4294967296},
+ test_uint64{fn: xor_uint64_0, fnname: "xor_uint64_0", in: 4294967296, want: 4294967296},
+ test_uint64{fn: xor_0_uint64, fnname: "xor_0_uint64", in: 9223372036854775808, want: 9223372036854775808},
+ test_uint64{fn: xor_uint64_0, fnname: "xor_uint64_0", in: 9223372036854775808, want: 9223372036854775808},
+ test_uint64{fn: xor_0_uint64, fnname: "xor_0_uint64", in: 18446744073709551615, want: 18446744073709551615},
+ test_uint64{fn: xor_uint64_0, fnname: "xor_uint64_0", in: 18446744073709551615, want: 18446744073709551615},
+ test_uint64{fn: xor_1_uint64, fnname: "xor_1_uint64", in: 0, want: 1},
+ test_uint64{fn: xor_uint64_1, fnname: "xor_uint64_1", in: 0, want: 1},
+ test_uint64{fn: xor_1_uint64, fnname: "xor_1_uint64", in: 1, want: 0},
+ test_uint64{fn: xor_uint64_1, fnname: "xor_uint64_1", in: 1, want: 0},
+ test_uint64{fn: xor_1_uint64, fnname: "xor_1_uint64", in: 4294967296, want: 4294967297},
+ test_uint64{fn: xor_uint64_1, fnname: "xor_uint64_1", in: 4294967296, want: 4294967297},
+ test_uint64{fn: xor_1_uint64, fnname: "xor_1_uint64", in: 9223372036854775808, want: 9223372036854775809},
+ test_uint64{fn: xor_uint64_1, fnname: "xor_uint64_1", in: 9223372036854775808, want: 9223372036854775809},
+ test_uint64{fn: xor_1_uint64, fnname: "xor_1_uint64", in: 18446744073709551615, want: 18446744073709551614},
+ test_uint64{fn: xor_uint64_1, fnname: "xor_uint64_1", in: 18446744073709551615, want: 18446744073709551614},
+ test_uint64{fn: xor_4294967296_uint64, fnname: "xor_4294967296_uint64", in: 0, want: 4294967296},
+ test_uint64{fn: xor_uint64_4294967296, fnname: "xor_uint64_4294967296", in: 0, want: 4294967296},
+ test_uint64{fn: xor_4294967296_uint64, fnname: "xor_4294967296_uint64", in: 1, want: 4294967297},
+ test_uint64{fn: xor_uint64_4294967296, fnname: "xor_uint64_4294967296", in: 1, want: 4294967297},
+ test_uint64{fn: xor_4294967296_uint64, fnname: "xor_4294967296_uint64", in: 4294967296, want: 0},
+ test_uint64{fn: xor_uint64_4294967296, fnname: "xor_uint64_4294967296", in: 4294967296, want: 0},
+ test_uint64{fn: xor_4294967296_uint64, fnname: "xor_4294967296_uint64", in: 9223372036854775808, want: 9223372041149743104},
+ test_uint64{fn: xor_uint64_4294967296, fnname: "xor_uint64_4294967296", in: 9223372036854775808, want: 9223372041149743104},
+ test_uint64{fn: xor_4294967296_uint64, fnname: "xor_4294967296_uint64", in: 18446744073709551615, want: 18446744069414584319},
+ test_uint64{fn: xor_uint64_4294967296, fnname: "xor_uint64_4294967296", in: 18446744073709551615, want: 18446744069414584319},
+ test_uint64{fn: xor_9223372036854775808_uint64, fnname: "xor_9223372036854775808_uint64", in: 0, want: 9223372036854775808},
+ test_uint64{fn: xor_uint64_9223372036854775808, fnname: "xor_uint64_9223372036854775808", in: 0, want: 9223372036854775808},
+ test_uint64{fn: xor_9223372036854775808_uint64, fnname: "xor_9223372036854775808_uint64", in: 1, want: 9223372036854775809},
+ test_uint64{fn: xor_uint64_9223372036854775808, fnname: "xor_uint64_9223372036854775808", in: 1, want: 9223372036854775809},
+ test_uint64{fn: xor_9223372036854775808_uint64, fnname: "xor_9223372036854775808_uint64", in: 4294967296, want: 9223372041149743104},
+ test_uint64{fn: xor_uint64_9223372036854775808, fnname: "xor_uint64_9223372036854775808", in: 4294967296, want: 9223372041149743104},
+ test_uint64{fn: xor_9223372036854775808_uint64, fnname: "xor_9223372036854775808_uint64", in: 9223372036854775808, want: 0},
+ test_uint64{fn: xor_uint64_9223372036854775808, fnname: "xor_uint64_9223372036854775808", in: 9223372036854775808, want: 0},
+ test_uint64{fn: xor_9223372036854775808_uint64, fnname: "xor_9223372036854775808_uint64", in: 18446744073709551615, want: 9223372036854775807},
+ test_uint64{fn: xor_uint64_9223372036854775808, fnname: "xor_uint64_9223372036854775808", in: 18446744073709551615, want: 9223372036854775807},
+ test_uint64{fn: xor_18446744073709551615_uint64, fnname: "xor_18446744073709551615_uint64", in: 0, want: 18446744073709551615},
+ test_uint64{fn: xor_uint64_18446744073709551615, fnname: "xor_uint64_18446744073709551615", in: 0, want: 18446744073709551615},
+ test_uint64{fn: xor_18446744073709551615_uint64, fnname: "xor_18446744073709551615_uint64", in: 1, want: 18446744073709551614},
+ test_uint64{fn: xor_uint64_18446744073709551615, fnname: "xor_uint64_18446744073709551615", in: 1, want: 18446744073709551614},
+ test_uint64{fn: xor_18446744073709551615_uint64, fnname: "xor_18446744073709551615_uint64", in: 4294967296, want: 18446744069414584319},
+ test_uint64{fn: xor_uint64_18446744073709551615, fnname: "xor_uint64_18446744073709551615", in: 4294967296, want: 18446744069414584319},
+ test_uint64{fn: xor_18446744073709551615_uint64, fnname: "xor_18446744073709551615_uint64", in: 9223372036854775808, want: 9223372036854775807},
+ test_uint64{fn: xor_uint64_18446744073709551615, fnname: "xor_uint64_18446744073709551615", in: 9223372036854775808, want: 9223372036854775807},
+ test_uint64{fn: xor_18446744073709551615_uint64, fnname: "xor_18446744073709551615_uint64", in: 18446744073709551615, want: 0},
+ test_uint64{fn: xor_uint64_18446744073709551615, fnname: "xor_uint64_18446744073709551615", in: 18446744073709551615, want: 0}}
+
+type test_uint64mul struct {
+ fn func(uint64) uint64
+ fnname string
+ in uint64
+ want uint64
+}
+
+var tests_uint64mul = []test_uint64{
+
+ test_uint64{fn: mul_3_uint64, fnname: "mul_3_uint64", in: 3, want: 9},
+ test_uint64{fn: mul_uint64_3, fnname: "mul_uint64_3", in: 3, want: 9},
+ test_uint64{fn: mul_3_uint64, fnname: "mul_3_uint64", in: 5, want: 15},
+ test_uint64{fn: mul_uint64_3, fnname: "mul_uint64_3", in: 5, want: 15},
+ test_uint64{fn: mul_3_uint64, fnname: "mul_3_uint64", in: 7, want: 21},
+ test_uint64{fn: mul_uint64_3, fnname: "mul_uint64_3", in: 7, want: 21},
+ test_uint64{fn: mul_3_uint64, fnname: "mul_3_uint64", in: 9, want: 27},
+ test_uint64{fn: mul_uint64_3, fnname: "mul_uint64_3", in: 9, want: 27},
+ test_uint64{fn: mul_3_uint64, fnname: "mul_3_uint64", in: 10, want: 30},
+ test_uint64{fn: mul_uint64_3, fnname: "mul_uint64_3", in: 10, want: 30},
+ test_uint64{fn: mul_3_uint64, fnname: "mul_3_uint64", in: 11, want: 33},
+ test_uint64{fn: mul_uint64_3, fnname: "mul_uint64_3", in: 11, want: 33},
+ test_uint64{fn: mul_3_uint64, fnname: "mul_3_uint64", in: 13, want: 39},
+ test_uint64{fn: mul_uint64_3, fnname: "mul_uint64_3", in: 13, want: 39},
+ test_uint64{fn: mul_3_uint64, fnname: "mul_3_uint64", in: 19, want: 57},
+ test_uint64{fn: mul_uint64_3, fnname: "mul_uint64_3", in: 19, want: 57},
+ test_uint64{fn: mul_3_uint64, fnname: "mul_3_uint64", in: 21, want: 63},
+ test_uint64{fn: mul_uint64_3, fnname: "mul_uint64_3", in: 21, want: 63},
+ test_uint64{fn: mul_3_uint64, fnname: "mul_3_uint64", in: 25, want: 75},
+ test_uint64{fn: mul_uint64_3, fnname: "mul_uint64_3", in: 25, want: 75},
+ test_uint64{fn: mul_3_uint64, fnname: "mul_3_uint64", in: 27, want: 81},
+ test_uint64{fn: mul_uint64_3, fnname: "mul_uint64_3", in: 27, want: 81},
+ test_uint64{fn: mul_3_uint64, fnname: "mul_3_uint64", in: 37, want: 111},
+ test_uint64{fn: mul_uint64_3, fnname: "mul_uint64_3", in: 37, want: 111},
+ test_uint64{fn: mul_3_uint64, fnname: "mul_3_uint64", in: 41, want: 123},
+ test_uint64{fn: mul_uint64_3, fnname: "mul_uint64_3", in: 41, want: 123},
+ test_uint64{fn: mul_3_uint64, fnname: "mul_3_uint64", in: 45, want: 135},
+ test_uint64{fn: mul_uint64_3, fnname: "mul_uint64_3", in: 45, want: 135},
+ test_uint64{fn: mul_3_uint64, fnname: "mul_3_uint64", in: 73, want: 219},
+ test_uint64{fn: mul_uint64_3, fnname: "mul_uint64_3", in: 73, want: 219},
+ test_uint64{fn: mul_3_uint64, fnname: "mul_3_uint64", in: 81, want: 243},
+ test_uint64{fn: mul_uint64_3, fnname: "mul_uint64_3", in: 81, want: 243},
+ test_uint64{fn: mul_5_uint64, fnname: "mul_5_uint64", in: 3, want: 15},
+ test_uint64{fn: mul_uint64_5, fnname: "mul_uint64_5", in: 3, want: 15},
+ test_uint64{fn: mul_5_uint64, fnname: "mul_5_uint64", in: 5, want: 25},
+ test_uint64{fn: mul_uint64_5, fnname: "mul_uint64_5", in: 5, want: 25},
+ test_uint64{fn: mul_5_uint64, fnname: "mul_5_uint64", in: 7, want: 35},
+ test_uint64{fn: mul_uint64_5, fnname: "mul_uint64_5", in: 7, want: 35},
+ test_uint64{fn: mul_5_uint64, fnname: "mul_5_uint64", in: 9, want: 45},
+ test_uint64{fn: mul_uint64_5, fnname: "mul_uint64_5", in: 9, want: 45},
+ test_uint64{fn: mul_5_uint64, fnname: "mul_5_uint64", in: 10, want: 50},
+ test_uint64{fn: mul_uint64_5, fnname: "mul_uint64_5", in: 10, want: 50},
+ test_uint64{fn: mul_5_uint64, fnname: "mul_5_uint64", in: 11, want: 55},
+ test_uint64{fn: mul_uint64_5, fnname: "mul_uint64_5", in: 11, want: 55},
+ test_uint64{fn: mul_5_uint64, fnname: "mul_5_uint64", in: 13, want: 65},
+ test_uint64{fn: mul_uint64_5, fnname: "mul_uint64_5", in: 13, want: 65},
+ test_uint64{fn: mul_5_uint64, fnname: "mul_5_uint64", in: 19, want: 95},
+ test_uint64{fn: mul_uint64_5, fnname: "mul_uint64_5", in: 19, want: 95},
+ test_uint64{fn: mul_5_uint64, fnname: "mul_5_uint64", in: 21, want: 105},
+ test_uint64{fn: mul_uint64_5, fnname: "mul_uint64_5", in: 21, want: 105},
+ test_uint64{fn: mul_5_uint64, fnname: "mul_5_uint64", in: 25, want: 125},
+ test_uint64{fn: mul_uint64_5, fnname: "mul_uint64_5", in: 25, want: 125},
+ test_uint64{fn: mul_5_uint64, fnname: "mul_5_uint64", in: 27, want: 135},
+ test_uint64{fn: mul_uint64_5, fnname: "mul_uint64_5", in: 27, want: 135},
+ test_uint64{fn: mul_5_uint64, fnname: "mul_5_uint64", in: 37, want: 185},
+ test_uint64{fn: mul_uint64_5, fnname: "mul_uint64_5", in: 37, want: 185},
+ test_uint64{fn: mul_5_uint64, fnname: "mul_5_uint64", in: 41, want: 205},
+ test_uint64{fn: mul_uint64_5, fnname: "mul_uint64_5", in: 41, want: 205},
+ test_uint64{fn: mul_5_uint64, fnname: "mul_5_uint64", in: 45, want: 225},
+ test_uint64{fn: mul_uint64_5, fnname: "mul_uint64_5", in: 45, want: 225},
+ test_uint64{fn: mul_5_uint64, fnname: "mul_5_uint64", in: 73, want: 365},
+ test_uint64{fn: mul_uint64_5, fnname: "mul_uint64_5", in: 73, want: 365},
+ test_uint64{fn: mul_5_uint64, fnname: "mul_5_uint64", in: 81, want: 405},
+ test_uint64{fn: mul_uint64_5, fnname: "mul_uint64_5", in: 81, want: 405},
+ test_uint64{fn: mul_7_uint64, fnname: "mul_7_uint64", in: 3, want: 21},
+ test_uint64{fn: mul_uint64_7, fnname: "mul_uint64_7", in: 3, want: 21},
+ test_uint64{fn: mul_7_uint64, fnname: "mul_7_uint64", in: 5, want: 35},
+ test_uint64{fn: mul_uint64_7, fnname: "mul_uint64_7", in: 5, want: 35},
+ test_uint64{fn: mul_7_uint64, fnname: "mul_7_uint64", in: 7, want: 49},
+ test_uint64{fn: mul_uint64_7, fnname: "mul_uint64_7", in: 7, want: 49},
+ test_uint64{fn: mul_7_uint64, fnname: "mul_7_uint64", in: 9, want: 63},
+ test_uint64{fn: mul_uint64_7, fnname: "mul_uint64_7", in: 9, want: 63},
+ test_uint64{fn: mul_7_uint64, fnname: "mul_7_uint64", in: 10, want: 70},
+ test_uint64{fn: mul_uint64_7, fnname: "mul_uint64_7", in: 10, want: 70},
+ test_uint64{fn: mul_7_uint64, fnname: "mul_7_uint64", in: 11, want: 77},
+ test_uint64{fn: mul_uint64_7, fnname: "mul_uint64_7", in: 11, want: 77},
+ test_uint64{fn: mul_7_uint64, fnname: "mul_7_uint64", in: 13, want: 91},
+ test_uint64{fn: mul_uint64_7, fnname: "mul_uint64_7", in: 13, want: 91},
+ test_uint64{fn: mul_7_uint64, fnname: "mul_7_uint64", in: 19, want: 133},
+ test_uint64{fn: mul_uint64_7, fnname: "mul_uint64_7", in: 19, want: 133},
+ test_uint64{fn: mul_7_uint64, fnname: "mul_7_uint64", in: 21, want: 147},
+ test_uint64{fn: mul_uint64_7, fnname: "mul_uint64_7", in: 21, want: 147},
+ test_uint64{fn: mul_7_uint64, fnname: "mul_7_uint64", in: 25, want: 175},
+ test_uint64{fn: mul_uint64_7, fnname: "mul_uint64_7", in: 25, want: 175},
+ test_uint64{fn: mul_7_uint64, fnname: "mul_7_uint64", in: 27, want: 189},
+ test_uint64{fn: mul_uint64_7, fnname: "mul_uint64_7", in: 27, want: 189},
+ test_uint64{fn: mul_7_uint64, fnname: "mul_7_uint64", in: 37, want: 259},
+ test_uint64{fn: mul_uint64_7, fnname: "mul_uint64_7", in: 37, want: 259},
+ test_uint64{fn: mul_7_uint64, fnname: "mul_7_uint64", in: 41, want: 287},
+ test_uint64{fn: mul_uint64_7, fnname: "mul_uint64_7", in: 41, want: 287},
+ test_uint64{fn: mul_7_uint64, fnname: "mul_7_uint64", in: 45, want: 315},
+ test_uint64{fn: mul_uint64_7, fnname: "mul_uint64_7", in: 45, want: 315},
+ test_uint64{fn: mul_7_uint64, fnname: "mul_7_uint64", in: 73, want: 511},
+ test_uint64{fn: mul_uint64_7, fnname: "mul_uint64_7", in: 73, want: 511},
+ test_uint64{fn: mul_7_uint64, fnname: "mul_7_uint64", in: 81, want: 567},
+ test_uint64{fn: mul_uint64_7, fnname: "mul_uint64_7", in: 81, want: 567},
+ test_uint64{fn: mul_9_uint64, fnname: "mul_9_uint64", in: 3, want: 27},
+ test_uint64{fn: mul_uint64_9, fnname: "mul_uint64_9", in: 3, want: 27},
+ test_uint64{fn: mul_9_uint64, fnname: "mul_9_uint64", in: 5, want: 45},
+ test_uint64{fn: mul_uint64_9, fnname: "mul_uint64_9", in: 5, want: 45},
+ test_uint64{fn: mul_9_uint64, fnname: "mul_9_uint64", in: 7, want: 63},
+ test_uint64{fn: mul_uint64_9, fnname: "mul_uint64_9", in: 7, want: 63},
+ test_uint64{fn: mul_9_uint64, fnname: "mul_9_uint64", in: 9, want: 81},
+ test_uint64{fn: mul_uint64_9, fnname: "mul_uint64_9", in: 9, want: 81},
+ test_uint64{fn: mul_9_uint64, fnname: "mul_9_uint64", in: 10, want: 90},
+ test_uint64{fn: mul_uint64_9, fnname: "mul_uint64_9", in: 10, want: 90},
+ test_uint64{fn: mul_9_uint64, fnname: "mul_9_uint64", in: 11, want: 99},
+ test_uint64{fn: mul_uint64_9, fnname: "mul_uint64_9", in: 11, want: 99},
+ test_uint64{fn: mul_9_uint64, fnname: "mul_9_uint64", in: 13, want: 117},
+ test_uint64{fn: mul_uint64_9, fnname: "mul_uint64_9", in: 13, want: 117},
+ test_uint64{fn: mul_9_uint64, fnname: "mul_9_uint64", in: 19, want: 171},
+ test_uint64{fn: mul_uint64_9, fnname: "mul_uint64_9", in: 19, want: 171},
+ test_uint64{fn: mul_9_uint64, fnname: "mul_9_uint64", in: 21, want: 189},
+ test_uint64{fn: mul_uint64_9, fnname: "mul_uint64_9", in: 21, want: 189},
+ test_uint64{fn: mul_9_uint64, fnname: "mul_9_uint64", in: 25, want: 225},
+ test_uint64{fn: mul_uint64_9, fnname: "mul_uint64_9", in: 25, want: 225},
+ test_uint64{fn: mul_9_uint64, fnname: "mul_9_uint64", in: 27, want: 243},
+ test_uint64{fn: mul_uint64_9, fnname: "mul_uint64_9", in: 27, want: 243},
+ test_uint64{fn: mul_9_uint64, fnname: "mul_9_uint64", in: 37, want: 333},
+ test_uint64{fn: mul_uint64_9, fnname: "mul_uint64_9", in: 37, want: 333},
+ test_uint64{fn: mul_9_uint64, fnname: "mul_9_uint64", in: 41, want: 369},
+ test_uint64{fn: mul_uint64_9, fnname: "mul_uint64_9", in: 41, want: 369},
+ test_uint64{fn: mul_9_uint64, fnname: "mul_9_uint64", in: 45, want: 405},
+ test_uint64{fn: mul_uint64_9, fnname: "mul_uint64_9", in: 45, want: 405},
+ test_uint64{fn: mul_9_uint64, fnname: "mul_9_uint64", in: 73, want: 657},
+ test_uint64{fn: mul_uint64_9, fnname: "mul_uint64_9", in: 73, want: 657},
+ test_uint64{fn: mul_9_uint64, fnname: "mul_9_uint64", in: 81, want: 729},
+ test_uint64{fn: mul_uint64_9, fnname: "mul_uint64_9", in: 81, want: 729},
+ test_uint64{fn: mul_10_uint64, fnname: "mul_10_uint64", in: 3, want: 30},
+ test_uint64{fn: mul_uint64_10, fnname: "mul_uint64_10", in: 3, want: 30},
+ test_uint64{fn: mul_10_uint64, fnname: "mul_10_uint64", in: 5, want: 50},
+ test_uint64{fn: mul_uint64_10, fnname: "mul_uint64_10", in: 5, want: 50},
+ test_uint64{fn: mul_10_uint64, fnname: "mul_10_uint64", in: 7, want: 70},
+ test_uint64{fn: mul_uint64_10, fnname: "mul_uint64_10", in: 7, want: 70},
+ test_uint64{fn: mul_10_uint64, fnname: "mul_10_uint64", in: 9, want: 90},
+ test_uint64{fn: mul_uint64_10, fnname: "mul_uint64_10", in: 9, want: 90},
+ test_uint64{fn: mul_10_uint64, fnname: "mul_10_uint64", in: 10, want: 100},
+ test_uint64{fn: mul_uint64_10, fnname: "mul_uint64_10", in: 10, want: 100},
+ test_uint64{fn: mul_10_uint64, fnname: "mul_10_uint64", in: 11, want: 110},
+ test_uint64{fn: mul_uint64_10, fnname: "mul_uint64_10", in: 11, want: 110},
+ test_uint64{fn: mul_10_uint64, fnname: "mul_10_uint64", in: 13, want: 130},
+ test_uint64{fn: mul_uint64_10, fnname: "mul_uint64_10", in: 13, want: 130},
+ test_uint64{fn: mul_10_uint64, fnname: "mul_10_uint64", in: 19, want: 190},
+ test_uint64{fn: mul_uint64_10, fnname: "mul_uint64_10", in: 19, want: 190},
+ test_uint64{fn: mul_10_uint64, fnname: "mul_10_uint64", in: 21, want: 210},
+ test_uint64{fn: mul_uint64_10, fnname: "mul_uint64_10", in: 21, want: 210},
+ test_uint64{fn: mul_10_uint64, fnname: "mul_10_uint64", in: 25, want: 250},
+ test_uint64{fn: mul_uint64_10, fnname: "mul_uint64_10", in: 25, want: 250},
+ test_uint64{fn: mul_10_uint64, fnname: "mul_10_uint64", in: 27, want: 270},
+ test_uint64{fn: mul_uint64_10, fnname: "mul_uint64_10", in: 27, want: 270},
+ test_uint64{fn: mul_10_uint64, fnname: "mul_10_uint64", in: 37, want: 370},
+ test_uint64{fn: mul_uint64_10, fnname: "mul_uint64_10", in: 37, want: 370},
+ test_uint64{fn: mul_10_uint64, fnname: "mul_10_uint64", in: 41, want: 410},
+ test_uint64{fn: mul_uint64_10, fnname: "mul_uint64_10", in: 41, want: 410},
+ test_uint64{fn: mul_10_uint64, fnname: "mul_10_uint64", in: 45, want: 450},
+ test_uint64{fn: mul_uint64_10, fnname: "mul_uint64_10", in: 45, want: 450},
+ test_uint64{fn: mul_10_uint64, fnname: "mul_10_uint64", in: 73, want: 730},
+ test_uint64{fn: mul_uint64_10, fnname: "mul_uint64_10", in: 73, want: 730},
+ test_uint64{fn: mul_10_uint64, fnname: "mul_10_uint64", in: 81, want: 810},
+ test_uint64{fn: mul_uint64_10, fnname: "mul_uint64_10", in: 81, want: 810},
+ test_uint64{fn: mul_11_uint64, fnname: "mul_11_uint64", in: 3, want: 33},
+ test_uint64{fn: mul_uint64_11, fnname: "mul_uint64_11", in: 3, want: 33},
+ test_uint64{fn: mul_11_uint64, fnname: "mul_11_uint64", in: 5, want: 55},
+ test_uint64{fn: mul_uint64_11, fnname: "mul_uint64_11", in: 5, want: 55},
+ test_uint64{fn: mul_11_uint64, fnname: "mul_11_uint64", in: 7, want: 77},
+ test_uint64{fn: mul_uint64_11, fnname: "mul_uint64_11", in: 7, want: 77},
+ test_uint64{fn: mul_11_uint64, fnname: "mul_11_uint64", in: 9, want: 99},
+ test_uint64{fn: mul_uint64_11, fnname: "mul_uint64_11", in: 9, want: 99},
+ test_uint64{fn: mul_11_uint64, fnname: "mul_11_uint64", in: 10, want: 110},
+ test_uint64{fn: mul_uint64_11, fnname: "mul_uint64_11", in: 10, want: 110},
+ test_uint64{fn: mul_11_uint64, fnname: "mul_11_uint64", in: 11, want: 121},
+ test_uint64{fn: mul_uint64_11, fnname: "mul_uint64_11", in: 11, want: 121},
+ test_uint64{fn: mul_11_uint64, fnname: "mul_11_uint64", in: 13, want: 143},
+ test_uint64{fn: mul_uint64_11, fnname: "mul_uint64_11", in: 13, want: 143},
+ test_uint64{fn: mul_11_uint64, fnname: "mul_11_uint64", in: 19, want: 209},
+ test_uint64{fn: mul_uint64_11, fnname: "mul_uint64_11", in: 19, want: 209},
+ test_uint64{fn: mul_11_uint64, fnname: "mul_11_uint64", in: 21, want: 231},
+ test_uint64{fn: mul_uint64_11, fnname: "mul_uint64_11", in: 21, want: 231},
+ test_uint64{fn: mul_11_uint64, fnname: "mul_11_uint64", in: 25, want: 275},
+ test_uint64{fn: mul_uint64_11, fnname: "mul_uint64_11", in: 25, want: 275},
+ test_uint64{fn: mul_11_uint64, fnname: "mul_11_uint64", in: 27, want: 297},
+ test_uint64{fn: mul_uint64_11, fnname: "mul_uint64_11", in: 27, want: 297},
+ test_uint64{fn: mul_11_uint64, fnname: "mul_11_uint64", in: 37, want: 407},
+ test_uint64{fn: mul_uint64_11, fnname: "mul_uint64_11", in: 37, want: 407},
+ test_uint64{fn: mul_11_uint64, fnname: "mul_11_uint64", in: 41, want: 451},
+ test_uint64{fn: mul_uint64_11, fnname: "mul_uint64_11", in: 41, want: 451},
+ test_uint64{fn: mul_11_uint64, fnname: "mul_11_uint64", in: 45, want: 495},
+ test_uint64{fn: mul_uint64_11, fnname: "mul_uint64_11", in: 45, want: 495},
+ test_uint64{fn: mul_11_uint64, fnname: "mul_11_uint64", in: 73, want: 803},
+ test_uint64{fn: mul_uint64_11, fnname: "mul_uint64_11", in: 73, want: 803},
+ test_uint64{fn: mul_11_uint64, fnname: "mul_11_uint64", in: 81, want: 891},
+ test_uint64{fn: mul_uint64_11, fnname: "mul_uint64_11", in: 81, want: 891},
+ test_uint64{fn: mul_13_uint64, fnname: "mul_13_uint64", in: 3, want: 39},
+ test_uint64{fn: mul_uint64_13, fnname: "mul_uint64_13", in: 3, want: 39},
+ test_uint64{fn: mul_13_uint64, fnname: "mul_13_uint64", in: 5, want: 65},
+ test_uint64{fn: mul_uint64_13, fnname: "mul_uint64_13", in: 5, want: 65},
+ test_uint64{fn: mul_13_uint64, fnname: "mul_13_uint64", in: 7, want: 91},
+ test_uint64{fn: mul_uint64_13, fnname: "mul_uint64_13", in: 7, want: 91},
+ test_uint64{fn: mul_13_uint64, fnname: "mul_13_uint64", in: 9, want: 117},
+ test_uint64{fn: mul_uint64_13, fnname: "mul_uint64_13", in: 9, want: 117},
+ test_uint64{fn: mul_13_uint64, fnname: "mul_13_uint64", in: 10, want: 130},
+ test_uint64{fn: mul_uint64_13, fnname: "mul_uint64_13", in: 10, want: 130},
+ test_uint64{fn: mul_13_uint64, fnname: "mul_13_uint64", in: 11, want: 143},
+ test_uint64{fn: mul_uint64_13, fnname: "mul_uint64_13", in: 11, want: 143},
+ test_uint64{fn: mul_13_uint64, fnname: "mul_13_uint64", in: 13, want: 169},
+ test_uint64{fn: mul_uint64_13, fnname: "mul_uint64_13", in: 13, want: 169},
+ test_uint64{fn: mul_13_uint64, fnname: "mul_13_uint64", in: 19, want: 247},
+ test_uint64{fn: mul_uint64_13, fnname: "mul_uint64_13", in: 19, want: 247},
+ test_uint64{fn: mul_13_uint64, fnname: "mul_13_uint64", in: 21, want: 273},
+ test_uint64{fn: mul_uint64_13, fnname: "mul_uint64_13", in: 21, want: 273},
+ test_uint64{fn: mul_13_uint64, fnname: "mul_13_uint64", in: 25, want: 325},
+ test_uint64{fn: mul_uint64_13, fnname: "mul_uint64_13", in: 25, want: 325},
+ test_uint64{fn: mul_13_uint64, fnname: "mul_13_uint64", in: 27, want: 351},
+ test_uint64{fn: mul_uint64_13, fnname: "mul_uint64_13", in: 27, want: 351},
+ test_uint64{fn: mul_13_uint64, fnname: "mul_13_uint64", in: 37, want: 481},
+ test_uint64{fn: mul_uint64_13, fnname: "mul_uint64_13", in: 37, want: 481},
+ test_uint64{fn: mul_13_uint64, fnname: "mul_13_uint64", in: 41, want: 533},
+ test_uint64{fn: mul_uint64_13, fnname: "mul_uint64_13", in: 41, want: 533},
+ test_uint64{fn: mul_13_uint64, fnname: "mul_13_uint64", in: 45, want: 585},
+ test_uint64{fn: mul_uint64_13, fnname: "mul_uint64_13", in: 45, want: 585},
+ test_uint64{fn: mul_13_uint64, fnname: "mul_13_uint64", in: 73, want: 949},
+ test_uint64{fn: mul_uint64_13, fnname: "mul_uint64_13", in: 73, want: 949},
+ test_uint64{fn: mul_13_uint64, fnname: "mul_13_uint64", in: 81, want: 1053},
+ test_uint64{fn: mul_uint64_13, fnname: "mul_uint64_13", in: 81, want: 1053},
+ test_uint64{fn: mul_19_uint64, fnname: "mul_19_uint64", in: 3, want: 57},
+ test_uint64{fn: mul_uint64_19, fnname: "mul_uint64_19", in: 3, want: 57},
+ test_uint64{fn: mul_19_uint64, fnname: "mul_19_uint64", in: 5, want: 95},
+ test_uint64{fn: mul_uint64_19, fnname: "mul_uint64_19", in: 5, want: 95},
+ test_uint64{fn: mul_19_uint64, fnname: "mul_19_uint64", in: 7, want: 133},
+ test_uint64{fn: mul_uint64_19, fnname: "mul_uint64_19", in: 7, want: 133},
+ test_uint64{fn: mul_19_uint64, fnname: "mul_19_uint64", in: 9, want: 171},
+ test_uint64{fn: mul_uint64_19, fnname: "mul_uint64_19", in: 9, want: 171},
+ test_uint64{fn: mul_19_uint64, fnname: "mul_19_uint64", in: 10, want: 190},
+ test_uint64{fn: mul_uint64_19, fnname: "mul_uint64_19", in: 10, want: 190},
+ test_uint64{fn: mul_19_uint64, fnname: "mul_19_uint64", in: 11, want: 209},
+ test_uint64{fn: mul_uint64_19, fnname: "mul_uint64_19", in: 11, want: 209},
+ test_uint64{fn: mul_19_uint64, fnname: "mul_19_uint64", in: 13, want: 247},
+ test_uint64{fn: mul_uint64_19, fnname: "mul_uint64_19", in: 13, want: 247},
+ test_uint64{fn: mul_19_uint64, fnname: "mul_19_uint64", in: 19, want: 361},
+ test_uint64{fn: mul_uint64_19, fnname: "mul_uint64_19", in: 19, want: 361},
+ test_uint64{fn: mul_19_uint64, fnname: "mul_19_uint64", in: 21, want: 399},
+ test_uint64{fn: mul_uint64_19, fnname: "mul_uint64_19", in: 21, want: 399},
+ test_uint64{fn: mul_19_uint64, fnname: "mul_19_uint64", in: 25, want: 475},
+ test_uint64{fn: mul_uint64_19, fnname: "mul_uint64_19", in: 25, want: 475},
+ test_uint64{fn: mul_19_uint64, fnname: "mul_19_uint64", in: 27, want: 513},
+ test_uint64{fn: mul_uint64_19, fnname: "mul_uint64_19", in: 27, want: 513},
+ test_uint64{fn: mul_19_uint64, fnname: "mul_19_uint64", in: 37, want: 703},
+ test_uint64{fn: mul_uint64_19, fnname: "mul_uint64_19", in: 37, want: 703},
+ test_uint64{fn: mul_19_uint64, fnname: "mul_19_uint64", in: 41, want: 779},
+ test_uint64{fn: mul_uint64_19, fnname: "mul_uint64_19", in: 41, want: 779},
+ test_uint64{fn: mul_19_uint64, fnname: "mul_19_uint64", in: 45, want: 855},
+ test_uint64{fn: mul_uint64_19, fnname: "mul_uint64_19", in: 45, want: 855},
+ test_uint64{fn: mul_19_uint64, fnname: "mul_19_uint64", in: 73, want: 1387},
+ test_uint64{fn: mul_uint64_19, fnname: "mul_uint64_19", in: 73, want: 1387},
+ test_uint64{fn: mul_19_uint64, fnname: "mul_19_uint64", in: 81, want: 1539},
+ test_uint64{fn: mul_uint64_19, fnname: "mul_uint64_19", in: 81, want: 1539},
+ test_uint64{fn: mul_21_uint64, fnname: "mul_21_uint64", in: 3, want: 63},
+ test_uint64{fn: mul_uint64_21, fnname: "mul_uint64_21", in: 3, want: 63},
+ test_uint64{fn: mul_21_uint64, fnname: "mul_21_uint64", in: 5, want: 105},
+ test_uint64{fn: mul_uint64_21, fnname: "mul_uint64_21", in: 5, want: 105},
+ test_uint64{fn: mul_21_uint64, fnname: "mul_21_uint64", in: 7, want: 147},
+ test_uint64{fn: mul_uint64_21, fnname: "mul_uint64_21", in: 7, want: 147},
+ test_uint64{fn: mul_21_uint64, fnname: "mul_21_uint64", in: 9, want: 189},
+ test_uint64{fn: mul_uint64_21, fnname: "mul_uint64_21", in: 9, want: 189},
+ test_uint64{fn: mul_21_uint64, fnname: "mul_21_uint64", in: 10, want: 210},
+ test_uint64{fn: mul_uint64_21, fnname: "mul_uint64_21", in: 10, want: 210},
+ test_uint64{fn: mul_21_uint64, fnname: "mul_21_uint64", in: 11, want: 231},
+ test_uint64{fn: mul_uint64_21, fnname: "mul_uint64_21", in: 11, want: 231},
+ test_uint64{fn: mul_21_uint64, fnname: "mul_21_uint64", in: 13, want: 273},
+ test_uint64{fn: mul_uint64_21, fnname: "mul_uint64_21", in: 13, want: 273},
+ test_uint64{fn: mul_21_uint64, fnname: "mul_21_uint64", in: 19, want: 399},
+ test_uint64{fn: mul_uint64_21, fnname: "mul_uint64_21", in: 19, want: 399},
+ test_uint64{fn: mul_21_uint64, fnname: "mul_21_uint64", in: 21, want: 441},
+ test_uint64{fn: mul_uint64_21, fnname: "mul_uint64_21", in: 21, want: 441},
+ test_uint64{fn: mul_21_uint64, fnname: "mul_21_uint64", in: 25, want: 525},
+ test_uint64{fn: mul_uint64_21, fnname: "mul_uint64_21", in: 25, want: 525},
+ test_uint64{fn: mul_21_uint64, fnname: "mul_21_uint64", in: 27, want: 567},
+ test_uint64{fn: mul_uint64_21, fnname: "mul_uint64_21", in: 27, want: 567},
+ test_uint64{fn: mul_21_uint64, fnname: "mul_21_uint64", in: 37, want: 777},
+ test_uint64{fn: mul_uint64_21, fnname: "mul_uint64_21", in: 37, want: 777},
+ test_uint64{fn: mul_21_uint64, fnname: "mul_21_uint64", in: 41, want: 861},
+ test_uint64{fn: mul_uint64_21, fnname: "mul_uint64_21", in: 41, want: 861},
+ test_uint64{fn: mul_21_uint64, fnname: "mul_21_uint64", in: 45, want: 945},
+ test_uint64{fn: mul_uint64_21, fnname: "mul_uint64_21", in: 45, want: 945},
+ test_uint64{fn: mul_21_uint64, fnname: "mul_21_uint64", in: 73, want: 1533},
+ test_uint64{fn: mul_uint64_21, fnname: "mul_uint64_21", in: 73, want: 1533},
+ test_uint64{fn: mul_21_uint64, fnname: "mul_21_uint64", in: 81, want: 1701},
+ test_uint64{fn: mul_uint64_21, fnname: "mul_uint64_21", in: 81, want: 1701},
+ test_uint64{fn: mul_25_uint64, fnname: "mul_25_uint64", in: 3, want: 75},
+ test_uint64{fn: mul_uint64_25, fnname: "mul_uint64_25", in: 3, want: 75},
+ test_uint64{fn: mul_25_uint64, fnname: "mul_25_uint64", in: 5, want: 125},
+ test_uint64{fn: mul_uint64_25, fnname: "mul_uint64_25", in: 5, want: 125},
+ test_uint64{fn: mul_25_uint64, fnname: "mul_25_uint64", in: 7, want: 175},
+ test_uint64{fn: mul_uint64_25, fnname: "mul_uint64_25", in: 7, want: 175},
+ test_uint64{fn: mul_25_uint64, fnname: "mul_25_uint64", in: 9, want: 225},
+ test_uint64{fn: mul_uint64_25, fnname: "mul_uint64_25", in: 9, want: 225},
+ test_uint64{fn: mul_25_uint64, fnname: "mul_25_uint64", in: 10, want: 250},
+ test_uint64{fn: mul_uint64_25, fnname: "mul_uint64_25", in: 10, want: 250},
+ test_uint64{fn: mul_25_uint64, fnname: "mul_25_uint64", in: 11, want: 275},
+ test_uint64{fn: mul_uint64_25, fnname: "mul_uint64_25", in: 11, want: 275},
+ test_uint64{fn: mul_25_uint64, fnname: "mul_25_uint64", in: 13, want: 325},
+ test_uint64{fn: mul_uint64_25, fnname: "mul_uint64_25", in: 13, want: 325},
+ test_uint64{fn: mul_25_uint64, fnname: "mul_25_uint64", in: 19, want: 475},
+ test_uint64{fn: mul_uint64_25, fnname: "mul_uint64_25", in: 19, want: 475},
+ test_uint64{fn: mul_25_uint64, fnname: "mul_25_uint64", in: 21, want: 525},
+ test_uint64{fn: mul_uint64_25, fnname: "mul_uint64_25", in: 21, want: 525},
+ test_uint64{fn: mul_25_uint64, fnname: "mul_25_uint64", in: 25, want: 625},
+ test_uint64{fn: mul_uint64_25, fnname: "mul_uint64_25", in: 25, want: 625},
+ test_uint64{fn: mul_25_uint64, fnname: "mul_25_uint64", in: 27, want: 675},
+ test_uint64{fn: mul_uint64_25, fnname: "mul_uint64_25", in: 27, want: 675},
+ test_uint64{fn: mul_25_uint64, fnname: "mul_25_uint64", in: 37, want: 925},
+ test_uint64{fn: mul_uint64_25, fnname: "mul_uint64_25", in: 37, want: 925},
+ test_uint64{fn: mul_25_uint64, fnname: "mul_25_uint64", in: 41, want: 1025},
+ test_uint64{fn: mul_uint64_25, fnname: "mul_uint64_25", in: 41, want: 1025},
+ test_uint64{fn: mul_25_uint64, fnname: "mul_25_uint64", in: 45, want: 1125},
+ test_uint64{fn: mul_uint64_25, fnname: "mul_uint64_25", in: 45, want: 1125},
+ test_uint64{fn: mul_25_uint64, fnname: "mul_25_uint64", in: 73, want: 1825},
+ test_uint64{fn: mul_uint64_25, fnname: "mul_uint64_25", in: 73, want: 1825},
+ test_uint64{fn: mul_25_uint64, fnname: "mul_25_uint64", in: 81, want: 2025},
+ test_uint64{fn: mul_uint64_25, fnname: "mul_uint64_25", in: 81, want: 2025},
+ test_uint64{fn: mul_27_uint64, fnname: "mul_27_uint64", in: 3, want: 81},
+ test_uint64{fn: mul_uint64_27, fnname: "mul_uint64_27", in: 3, want: 81},
+ test_uint64{fn: mul_27_uint64, fnname: "mul_27_uint64", in: 5, want: 135},
+ test_uint64{fn: mul_uint64_27, fnname: "mul_uint64_27", in: 5, want: 135},
+ test_uint64{fn: mul_27_uint64, fnname: "mul_27_uint64", in: 7, want: 189},
+ test_uint64{fn: mul_uint64_27, fnname: "mul_uint64_27", in: 7, want: 189},
+ test_uint64{fn: mul_27_uint64, fnname: "mul_27_uint64", in: 9, want: 243},
+ test_uint64{fn: mul_uint64_27, fnname: "mul_uint64_27", in: 9, want: 243},
+ test_uint64{fn: mul_27_uint64, fnname: "mul_27_uint64", in: 10, want: 270},
+ test_uint64{fn: mul_uint64_27, fnname: "mul_uint64_27", in: 10, want: 270},
+ test_uint64{fn: mul_27_uint64, fnname: "mul_27_uint64", in: 11, want: 297},
+ test_uint64{fn: mul_uint64_27, fnname: "mul_uint64_27", in: 11, want: 297},
+ test_uint64{fn: mul_27_uint64, fnname: "mul_27_uint64", in: 13, want: 351},
+ test_uint64{fn: mul_uint64_27, fnname: "mul_uint64_27", in: 13, want: 351},
+ test_uint64{fn: mul_27_uint64, fnname: "mul_27_uint64", in: 19, want: 513},
+ test_uint64{fn: mul_uint64_27, fnname: "mul_uint64_27", in: 19, want: 513},
+ test_uint64{fn: mul_27_uint64, fnname: "mul_27_uint64", in: 21, want: 567},
+ test_uint64{fn: mul_uint64_27, fnname: "mul_uint64_27", in: 21, want: 567},
+ test_uint64{fn: mul_27_uint64, fnname: "mul_27_uint64", in: 25, want: 675},
+ test_uint64{fn: mul_uint64_27, fnname: "mul_uint64_27", in: 25, want: 675},
+ test_uint64{fn: mul_27_uint64, fnname: "mul_27_uint64", in: 27, want: 729},
+ test_uint64{fn: mul_uint64_27, fnname: "mul_uint64_27", in: 27, want: 729},
+ test_uint64{fn: mul_27_uint64, fnname: "mul_27_uint64", in: 37, want: 999},
+ test_uint64{fn: mul_uint64_27, fnname: "mul_uint64_27", in: 37, want: 999},
+ test_uint64{fn: mul_27_uint64, fnname: "mul_27_uint64", in: 41, want: 1107},
+ test_uint64{fn: mul_uint64_27, fnname: "mul_uint64_27", in: 41, want: 1107},
+ test_uint64{fn: mul_27_uint64, fnname: "mul_27_uint64", in: 45, want: 1215},
+ test_uint64{fn: mul_uint64_27, fnname: "mul_uint64_27", in: 45, want: 1215},
+ test_uint64{fn: mul_27_uint64, fnname: "mul_27_uint64", in: 73, want: 1971},
+ test_uint64{fn: mul_uint64_27, fnname: "mul_uint64_27", in: 73, want: 1971},
+ test_uint64{fn: mul_27_uint64, fnname: "mul_27_uint64", in: 81, want: 2187},
+ test_uint64{fn: mul_uint64_27, fnname: "mul_uint64_27", in: 81, want: 2187},
+ test_uint64{fn: mul_37_uint64, fnname: "mul_37_uint64", in: 3, want: 111},
+ test_uint64{fn: mul_uint64_37, fnname: "mul_uint64_37", in: 3, want: 111},
+ test_uint64{fn: mul_37_uint64, fnname: "mul_37_uint64", in: 5, want: 185},
+ test_uint64{fn: mul_uint64_37, fnname: "mul_uint64_37", in: 5, want: 185},
+ test_uint64{fn: mul_37_uint64, fnname: "mul_37_uint64", in: 7, want: 259},
+ test_uint64{fn: mul_uint64_37, fnname: "mul_uint64_37", in: 7, want: 259},
+ test_uint64{fn: mul_37_uint64, fnname: "mul_37_uint64", in: 9, want: 333},
+ test_uint64{fn: mul_uint64_37, fnname: "mul_uint64_37", in: 9, want: 333},
+ test_uint64{fn: mul_37_uint64, fnname: "mul_37_uint64", in: 10, want: 370},
+ test_uint64{fn: mul_uint64_37, fnname: "mul_uint64_37", in: 10, want: 370},
+ test_uint64{fn: mul_37_uint64, fnname: "mul_37_uint64", in: 11, want: 407},
+ test_uint64{fn: mul_uint64_37, fnname: "mul_uint64_37", in: 11, want: 407},
+ test_uint64{fn: mul_37_uint64, fnname: "mul_37_uint64", in: 13, want: 481},
+ test_uint64{fn: mul_uint64_37, fnname: "mul_uint64_37", in: 13, want: 481},
+ test_uint64{fn: mul_37_uint64, fnname: "mul_37_uint64", in: 19, want: 703},
+ test_uint64{fn: mul_uint64_37, fnname: "mul_uint64_37", in: 19, want: 703},
+ test_uint64{fn: mul_37_uint64, fnname: "mul_37_uint64", in: 21, want: 777},
+ test_uint64{fn: mul_uint64_37, fnname: "mul_uint64_37", in: 21, want: 777},
+ test_uint64{fn: mul_37_uint64, fnname: "mul_37_uint64", in: 25, want: 925},
+ test_uint64{fn: mul_uint64_37, fnname: "mul_uint64_37", in: 25, want: 925},
+ test_uint64{fn: mul_37_uint64, fnname: "mul_37_uint64", in: 27, want: 999},
+ test_uint64{fn: mul_uint64_37, fnname: "mul_uint64_37", in: 27, want: 999},
+ test_uint64{fn: mul_37_uint64, fnname: "mul_37_uint64", in: 37, want: 1369},
+ test_uint64{fn: mul_uint64_37, fnname: "mul_uint64_37", in: 37, want: 1369},
+ test_uint64{fn: mul_37_uint64, fnname: "mul_37_uint64", in: 41, want: 1517},
+ test_uint64{fn: mul_uint64_37, fnname: "mul_uint64_37", in: 41, want: 1517},
+ test_uint64{fn: mul_37_uint64, fnname: "mul_37_uint64", in: 45, want: 1665},
+ test_uint64{fn: mul_uint64_37, fnname: "mul_uint64_37", in: 45, want: 1665},
+ test_uint64{fn: mul_37_uint64, fnname: "mul_37_uint64", in: 73, want: 2701},
+ test_uint64{fn: mul_uint64_37, fnname: "mul_uint64_37", in: 73, want: 2701},
+ test_uint64{fn: mul_37_uint64, fnname: "mul_37_uint64", in: 81, want: 2997},
+ test_uint64{fn: mul_uint64_37, fnname: "mul_uint64_37", in: 81, want: 2997},
+ test_uint64{fn: mul_41_uint64, fnname: "mul_41_uint64", in: 3, want: 123},
+ test_uint64{fn: mul_uint64_41, fnname: "mul_uint64_41", in: 3, want: 123},
+ test_uint64{fn: mul_41_uint64, fnname: "mul_41_uint64", in: 5, want: 205},
+ test_uint64{fn: mul_uint64_41, fnname: "mul_uint64_41", in: 5, want: 205},
+ test_uint64{fn: mul_41_uint64, fnname: "mul_41_uint64", in: 7, want: 287},
+ test_uint64{fn: mul_uint64_41, fnname: "mul_uint64_41", in: 7, want: 287},
+ test_uint64{fn: mul_41_uint64, fnname: "mul_41_uint64", in: 9, want: 369},
+ test_uint64{fn: mul_uint64_41, fnname: "mul_uint64_41", in: 9, want: 369},
+ test_uint64{fn: mul_41_uint64, fnname: "mul_41_uint64", in: 10, want: 410},
+ test_uint64{fn: mul_uint64_41, fnname: "mul_uint64_41", in: 10, want: 410},
+ test_uint64{fn: mul_41_uint64, fnname: "mul_41_uint64", in: 11, want: 451},
+ test_uint64{fn: mul_uint64_41, fnname: "mul_uint64_41", in: 11, want: 451},
+ test_uint64{fn: mul_41_uint64, fnname: "mul_41_uint64", in: 13, want: 533},
+ test_uint64{fn: mul_uint64_41, fnname: "mul_uint64_41", in: 13, want: 533},
+ test_uint64{fn: mul_41_uint64, fnname: "mul_41_uint64", in: 19, want: 779},
+ test_uint64{fn: mul_uint64_41, fnname: "mul_uint64_41", in: 19, want: 779},
+ test_uint64{fn: mul_41_uint64, fnname: "mul_41_uint64", in: 21, want: 861},
+ test_uint64{fn: mul_uint64_41, fnname: "mul_uint64_41", in: 21, want: 861},
+ test_uint64{fn: mul_41_uint64, fnname: "mul_41_uint64", in: 25, want: 1025},
+ test_uint64{fn: mul_uint64_41, fnname: "mul_uint64_41", in: 25, want: 1025},
+ test_uint64{fn: mul_41_uint64, fnname: "mul_41_uint64", in: 27, want: 1107},
+ test_uint64{fn: mul_uint64_41, fnname: "mul_uint64_41", in: 27, want: 1107},
+ test_uint64{fn: mul_41_uint64, fnname: "mul_41_uint64", in: 37, want: 1517},
+ test_uint64{fn: mul_uint64_41, fnname: "mul_uint64_41", in: 37, want: 1517},
+ test_uint64{fn: mul_41_uint64, fnname: "mul_41_uint64", in: 41, want: 1681},
+ test_uint64{fn: mul_uint64_41, fnname: "mul_uint64_41", in: 41, want: 1681},
+ test_uint64{fn: mul_41_uint64, fnname: "mul_41_uint64", in: 45, want: 1845},
+ test_uint64{fn: mul_uint64_41, fnname: "mul_uint64_41", in: 45, want: 1845},
+ test_uint64{fn: mul_41_uint64, fnname: "mul_41_uint64", in: 73, want: 2993},
+ test_uint64{fn: mul_uint64_41, fnname: "mul_uint64_41", in: 73, want: 2993},
+ test_uint64{fn: mul_41_uint64, fnname: "mul_41_uint64", in: 81, want: 3321},
+ test_uint64{fn: mul_uint64_41, fnname: "mul_uint64_41", in: 81, want: 3321},
+ test_uint64{fn: mul_45_uint64, fnname: "mul_45_uint64", in: 3, want: 135},
+ test_uint64{fn: mul_uint64_45, fnname: "mul_uint64_45", in: 3, want: 135},
+ test_uint64{fn: mul_45_uint64, fnname: "mul_45_uint64", in: 5, want: 225},
+ test_uint64{fn: mul_uint64_45, fnname: "mul_uint64_45", in: 5, want: 225},
+ test_uint64{fn: mul_45_uint64, fnname: "mul_45_uint64", in: 7, want: 315},
+ test_uint64{fn: mul_uint64_45, fnname: "mul_uint64_45", in: 7, want: 315},
+ test_uint64{fn: mul_45_uint64, fnname: "mul_45_uint64", in: 9, want: 405},
+ test_uint64{fn: mul_uint64_45, fnname: "mul_uint64_45", in: 9, want: 405},
+ test_uint64{fn: mul_45_uint64, fnname: "mul_45_uint64", in: 10, want: 450},
+ test_uint64{fn: mul_uint64_45, fnname: "mul_uint64_45", in: 10, want: 450},
+ test_uint64{fn: mul_45_uint64, fnname: "mul_45_uint64", in: 11, want: 495},
+ test_uint64{fn: mul_uint64_45, fnname: "mul_uint64_45", in: 11, want: 495},
+ test_uint64{fn: mul_45_uint64, fnname: "mul_45_uint64", in: 13, want: 585},
+ test_uint64{fn: mul_uint64_45, fnname: "mul_uint64_45", in: 13, want: 585},
+ test_uint64{fn: mul_45_uint64, fnname: "mul_45_uint64", in: 19, want: 855},
+ test_uint64{fn: mul_uint64_45, fnname: "mul_uint64_45", in: 19, want: 855},
+ test_uint64{fn: mul_45_uint64, fnname: "mul_45_uint64", in: 21, want: 945},
+ test_uint64{fn: mul_uint64_45, fnname: "mul_uint64_45", in: 21, want: 945},
+ test_uint64{fn: mul_45_uint64, fnname: "mul_45_uint64", in: 25, want: 1125},
+ test_uint64{fn: mul_uint64_45, fnname: "mul_uint64_45", in: 25, want: 1125},
+ test_uint64{fn: mul_45_uint64, fnname: "mul_45_uint64", in: 27, want: 1215},
+ test_uint64{fn: mul_uint64_45, fnname: "mul_uint64_45", in: 27, want: 1215},
+ test_uint64{fn: mul_45_uint64, fnname: "mul_45_uint64", in: 37, want: 1665},
+ test_uint64{fn: mul_uint64_45, fnname: "mul_uint64_45", in: 37, want: 1665},
+ test_uint64{fn: mul_45_uint64, fnname: "mul_45_uint64", in: 41, want: 1845},
+ test_uint64{fn: mul_uint64_45, fnname: "mul_uint64_45", in: 41, want: 1845},
+ test_uint64{fn: mul_45_uint64, fnname: "mul_45_uint64", in: 45, want: 2025},
+ test_uint64{fn: mul_uint64_45, fnname: "mul_uint64_45", in: 45, want: 2025},
+ test_uint64{fn: mul_45_uint64, fnname: "mul_45_uint64", in: 73, want: 3285},
+ test_uint64{fn: mul_uint64_45, fnname: "mul_uint64_45", in: 73, want: 3285},
+ test_uint64{fn: mul_45_uint64, fnname: "mul_45_uint64", in: 81, want: 3645},
+ test_uint64{fn: mul_uint64_45, fnname: "mul_uint64_45", in: 81, want: 3645},
+ test_uint64{fn: mul_73_uint64, fnname: "mul_73_uint64", in: 3, want: 219},
+ test_uint64{fn: mul_uint64_73, fnname: "mul_uint64_73", in: 3, want: 219},
+ test_uint64{fn: mul_73_uint64, fnname: "mul_73_uint64", in: 5, want: 365},
+ test_uint64{fn: mul_uint64_73, fnname: "mul_uint64_73", in: 5, want: 365},
+ test_uint64{fn: mul_73_uint64, fnname: "mul_73_uint64", in: 7, want: 511},
+ test_uint64{fn: mul_uint64_73, fnname: "mul_uint64_73", in: 7, want: 511},
+ test_uint64{fn: mul_73_uint64, fnname: "mul_73_uint64", in: 9, want: 657},
+ test_uint64{fn: mul_uint64_73, fnname: "mul_uint64_73", in: 9, want: 657},
+ test_uint64{fn: mul_73_uint64, fnname: "mul_73_uint64", in: 10, want: 730},
+ test_uint64{fn: mul_uint64_73, fnname: "mul_uint64_73", in: 10, want: 730},
+ test_uint64{fn: mul_73_uint64, fnname: "mul_73_uint64", in: 11, want: 803},
+ test_uint64{fn: mul_uint64_73, fnname: "mul_uint64_73", in: 11, want: 803},
+ test_uint64{fn: mul_73_uint64, fnname: "mul_73_uint64", in: 13, want: 949},
+ test_uint64{fn: mul_uint64_73, fnname: "mul_uint64_73", in: 13, want: 949},
+ test_uint64{fn: mul_73_uint64, fnname: "mul_73_uint64", in: 19, want: 1387},
+ test_uint64{fn: mul_uint64_73, fnname: "mul_uint64_73", in: 19, want: 1387},
+ test_uint64{fn: mul_73_uint64, fnname: "mul_73_uint64", in: 21, want: 1533},
+ test_uint64{fn: mul_uint64_73, fnname: "mul_uint64_73", in: 21, want: 1533},
+ test_uint64{fn: mul_73_uint64, fnname: "mul_73_uint64", in: 25, want: 1825},
+ test_uint64{fn: mul_uint64_73, fnname: "mul_uint64_73", in: 25, want: 1825},
+ test_uint64{fn: mul_73_uint64, fnname: "mul_73_uint64", in: 27, want: 1971},
+ test_uint64{fn: mul_uint64_73, fnname: "mul_uint64_73", in: 27, want: 1971},
+ test_uint64{fn: mul_73_uint64, fnname: "mul_73_uint64", in: 37, want: 2701},
+ test_uint64{fn: mul_uint64_73, fnname: "mul_uint64_73", in: 37, want: 2701},
+ test_uint64{fn: mul_73_uint64, fnname: "mul_73_uint64", in: 41, want: 2993},
+ test_uint64{fn: mul_uint64_73, fnname: "mul_uint64_73", in: 41, want: 2993},
+ test_uint64{fn: mul_73_uint64, fnname: "mul_73_uint64", in: 45, want: 3285},
+ test_uint64{fn: mul_uint64_73, fnname: "mul_uint64_73", in: 45, want: 3285},
+ test_uint64{fn: mul_73_uint64, fnname: "mul_73_uint64", in: 73, want: 5329},
+ test_uint64{fn: mul_uint64_73, fnname: "mul_uint64_73", in: 73, want: 5329},
+ test_uint64{fn: mul_73_uint64, fnname: "mul_73_uint64", in: 81, want: 5913},
+ test_uint64{fn: mul_uint64_73, fnname: "mul_uint64_73", in: 81, want: 5913},
+ test_uint64{fn: mul_81_uint64, fnname: "mul_81_uint64", in: 3, want: 243},
+ test_uint64{fn: mul_uint64_81, fnname: "mul_uint64_81", in: 3, want: 243},
+ test_uint64{fn: mul_81_uint64, fnname: "mul_81_uint64", in: 5, want: 405},
+ test_uint64{fn: mul_uint64_81, fnname: "mul_uint64_81", in: 5, want: 405},
+ test_uint64{fn: mul_81_uint64, fnname: "mul_81_uint64", in: 7, want: 567},
+ test_uint64{fn: mul_uint64_81, fnname: "mul_uint64_81", in: 7, want: 567},
+ test_uint64{fn: mul_81_uint64, fnname: "mul_81_uint64", in: 9, want: 729},
+ test_uint64{fn: mul_uint64_81, fnname: "mul_uint64_81", in: 9, want: 729},
+ test_uint64{fn: mul_81_uint64, fnname: "mul_81_uint64", in: 10, want: 810},
+ test_uint64{fn: mul_uint64_81, fnname: "mul_uint64_81", in: 10, want: 810},
+ test_uint64{fn: mul_81_uint64, fnname: "mul_81_uint64", in: 11, want: 891},
+ test_uint64{fn: mul_uint64_81, fnname: "mul_uint64_81", in: 11, want: 891},
+ test_uint64{fn: mul_81_uint64, fnname: "mul_81_uint64", in: 13, want: 1053},
+ test_uint64{fn: mul_uint64_81, fnname: "mul_uint64_81", in: 13, want: 1053},
+ test_uint64{fn: mul_81_uint64, fnname: "mul_81_uint64", in: 19, want: 1539},
+ test_uint64{fn: mul_uint64_81, fnname: "mul_uint64_81", in: 19, want: 1539},
+ test_uint64{fn: mul_81_uint64, fnname: "mul_81_uint64", in: 21, want: 1701},
+ test_uint64{fn: mul_uint64_81, fnname: "mul_uint64_81", in: 21, want: 1701},
+ test_uint64{fn: mul_81_uint64, fnname: "mul_81_uint64", in: 25, want: 2025},
+ test_uint64{fn: mul_uint64_81, fnname: "mul_uint64_81", in: 25, want: 2025},
+ test_uint64{fn: mul_81_uint64, fnname: "mul_81_uint64", in: 27, want: 2187},
+ test_uint64{fn: mul_uint64_81, fnname: "mul_uint64_81", in: 27, want: 2187},
+ test_uint64{fn: mul_81_uint64, fnname: "mul_81_uint64", in: 37, want: 2997},
+ test_uint64{fn: mul_uint64_81, fnname: "mul_uint64_81", in: 37, want: 2997},
+ test_uint64{fn: mul_81_uint64, fnname: "mul_81_uint64", in: 41, want: 3321},
+ test_uint64{fn: mul_uint64_81, fnname: "mul_uint64_81", in: 41, want: 3321},
+ test_uint64{fn: mul_81_uint64, fnname: "mul_81_uint64", in: 45, want: 3645},
+ test_uint64{fn: mul_uint64_81, fnname: "mul_uint64_81", in: 45, want: 3645},
+ test_uint64{fn: mul_81_uint64, fnname: "mul_81_uint64", in: 73, want: 5913},
+ test_uint64{fn: mul_uint64_81, fnname: "mul_uint64_81", in: 73, want: 5913},
+ test_uint64{fn: mul_81_uint64, fnname: "mul_81_uint64", in: 81, want: 6561},
+ test_uint64{fn: mul_uint64_81, fnname: "mul_uint64_81", in: 81, want: 6561}}
+
+type test_int64 struct {
+ fn func(int64) int64
+ fnname string
+ in int64
+ want int64
+}
+
+var tests_int64 = []test_int64{
+
+ test_int64{fn: add_Neg9223372036854775808_int64, fnname: "add_Neg9223372036854775808_int64", in: -9223372036854775808, want: 0},
+ test_int64{fn: add_int64_Neg9223372036854775808, fnname: "add_int64_Neg9223372036854775808", in: -9223372036854775808, want: 0},
+ test_int64{fn: add_Neg9223372036854775808_int64, fnname: "add_Neg9223372036854775808_int64", in: -9223372036854775807, want: 1},
+ test_int64{fn: add_int64_Neg9223372036854775808, fnname: "add_int64_Neg9223372036854775808", in: -9223372036854775807, want: 1},
+ test_int64{fn: add_Neg9223372036854775808_int64, fnname: "add_Neg9223372036854775808_int64", in: -4294967296, want: 9223372032559808512},
+ test_int64{fn: add_int64_Neg9223372036854775808, fnname: "add_int64_Neg9223372036854775808", in: -4294967296, want: 9223372032559808512},
+ test_int64{fn: add_Neg9223372036854775808_int64, fnname: "add_Neg9223372036854775808_int64", in: -1, want: 9223372036854775807},
+ test_int64{fn: add_int64_Neg9223372036854775808, fnname: "add_int64_Neg9223372036854775808", in: -1, want: 9223372036854775807},
+ test_int64{fn: add_Neg9223372036854775808_int64, fnname: "add_Neg9223372036854775808_int64", in: 0, want: -9223372036854775808},
+ test_int64{fn: add_int64_Neg9223372036854775808, fnname: "add_int64_Neg9223372036854775808", in: 0, want: -9223372036854775808},
+ test_int64{fn: add_Neg9223372036854775808_int64, fnname: "add_Neg9223372036854775808_int64", in: 1, want: -9223372036854775807},
+ test_int64{fn: add_int64_Neg9223372036854775808, fnname: "add_int64_Neg9223372036854775808", in: 1, want: -9223372036854775807},
+ test_int64{fn: add_Neg9223372036854775808_int64, fnname: "add_Neg9223372036854775808_int64", in: 4294967296, want: -9223372032559808512},
+ test_int64{fn: add_int64_Neg9223372036854775808, fnname: "add_int64_Neg9223372036854775808", in: 4294967296, want: -9223372032559808512},
+ test_int64{fn: add_Neg9223372036854775808_int64, fnname: "add_Neg9223372036854775808_int64", in: 9223372036854775806, want: -2},
+ test_int64{fn: add_int64_Neg9223372036854775808, fnname: "add_int64_Neg9223372036854775808", in: 9223372036854775806, want: -2},
+ test_int64{fn: add_Neg9223372036854775808_int64, fnname: "add_Neg9223372036854775808_int64", in: 9223372036854775807, want: -1},
+ test_int64{fn: add_int64_Neg9223372036854775808, fnname: "add_int64_Neg9223372036854775808", in: 9223372036854775807, want: -1},
+ test_int64{fn: add_Neg9223372036854775807_int64, fnname: "add_Neg9223372036854775807_int64", in: -9223372036854775808, want: 1},
+ test_int64{fn: add_int64_Neg9223372036854775807, fnname: "add_int64_Neg9223372036854775807", in: -9223372036854775808, want: 1},
+ test_int64{fn: add_Neg9223372036854775807_int64, fnname: "add_Neg9223372036854775807_int64", in: -9223372036854775807, want: 2},
+ test_int64{fn: add_int64_Neg9223372036854775807, fnname: "add_int64_Neg9223372036854775807", in: -9223372036854775807, want: 2},
+ test_int64{fn: add_Neg9223372036854775807_int64, fnname: "add_Neg9223372036854775807_int64", in: -4294967296, want: 9223372032559808513},
+ test_int64{fn: add_int64_Neg9223372036854775807, fnname: "add_int64_Neg9223372036854775807", in: -4294967296, want: 9223372032559808513},
+ test_int64{fn: add_Neg9223372036854775807_int64, fnname: "add_Neg9223372036854775807_int64", in: -1, want: -9223372036854775808},
+ test_int64{fn: add_int64_Neg9223372036854775807, fnname: "add_int64_Neg9223372036854775807", in: -1, want: -9223372036854775808},
+ test_int64{fn: add_Neg9223372036854775807_int64, fnname: "add_Neg9223372036854775807_int64", in: 0, want: -9223372036854775807},
+ test_int64{fn: add_int64_Neg9223372036854775807, fnname: "add_int64_Neg9223372036854775807", in: 0, want: -9223372036854775807},
+ test_int64{fn: add_Neg9223372036854775807_int64, fnname: "add_Neg9223372036854775807_int64", in: 1, want: -9223372036854775806},
+ test_int64{fn: add_int64_Neg9223372036854775807, fnname: "add_int64_Neg9223372036854775807", in: 1, want: -9223372036854775806},
+ test_int64{fn: add_Neg9223372036854775807_int64, fnname: "add_Neg9223372036854775807_int64", in: 4294967296, want: -9223372032559808511},
+ test_int64{fn: add_int64_Neg9223372036854775807, fnname: "add_int64_Neg9223372036854775807", in: 4294967296, want: -9223372032559808511},
+ test_int64{fn: add_Neg9223372036854775807_int64, fnname: "add_Neg9223372036854775807_int64", in: 9223372036854775806, want: -1},
+ test_int64{fn: add_int64_Neg9223372036854775807, fnname: "add_int64_Neg9223372036854775807", in: 9223372036854775806, want: -1},
+ test_int64{fn: add_Neg9223372036854775807_int64, fnname: "add_Neg9223372036854775807_int64", in: 9223372036854775807, want: 0},
+ test_int64{fn: add_int64_Neg9223372036854775807, fnname: "add_int64_Neg9223372036854775807", in: 9223372036854775807, want: 0},
+ test_int64{fn: add_Neg4294967296_int64, fnname: "add_Neg4294967296_int64", in: -9223372036854775808, want: 9223372032559808512},
+ test_int64{fn: add_int64_Neg4294967296, fnname: "add_int64_Neg4294967296", in: -9223372036854775808, want: 9223372032559808512},
+ test_int64{fn: add_Neg4294967296_int64, fnname: "add_Neg4294967296_int64", in: -9223372036854775807, want: 9223372032559808513},
+ test_int64{fn: add_int64_Neg4294967296, fnname: "add_int64_Neg4294967296", in: -9223372036854775807, want: 9223372032559808513},
+ test_int64{fn: add_Neg4294967296_int64, fnname: "add_Neg4294967296_int64", in: -4294967296, want: -8589934592},
+ test_int64{fn: add_int64_Neg4294967296, fnname: "add_int64_Neg4294967296", in: -4294967296, want: -8589934592},
+ test_int64{fn: add_Neg4294967296_int64, fnname: "add_Neg4294967296_int64", in: -1, want: -4294967297},
+ test_int64{fn: add_int64_Neg4294967296, fnname: "add_int64_Neg4294967296", in: -1, want: -4294967297},
+ test_int64{fn: add_Neg4294967296_int64, fnname: "add_Neg4294967296_int64", in: 0, want: -4294967296},
+ test_int64{fn: add_int64_Neg4294967296, fnname: "add_int64_Neg4294967296", in: 0, want: -4294967296},
+ test_int64{fn: add_Neg4294967296_int64, fnname: "add_Neg4294967296_int64", in: 1, want: -4294967295},
+ test_int64{fn: add_int64_Neg4294967296, fnname: "add_int64_Neg4294967296", in: 1, want: -4294967295},
+ test_int64{fn: add_Neg4294967296_int64, fnname: "add_Neg4294967296_int64", in: 4294967296, want: 0},
+ test_int64{fn: add_int64_Neg4294967296, fnname: "add_int64_Neg4294967296", in: 4294967296, want: 0},
+ test_int64{fn: add_Neg4294967296_int64, fnname: "add_Neg4294967296_int64", in: 9223372036854775806, want: 9223372032559808510},
+ test_int64{fn: add_int64_Neg4294967296, fnname: "add_int64_Neg4294967296", in: 9223372036854775806, want: 9223372032559808510},
+ test_int64{fn: add_Neg4294967296_int64, fnname: "add_Neg4294967296_int64", in: 9223372036854775807, want: 9223372032559808511},
+ test_int64{fn: add_int64_Neg4294967296, fnname: "add_int64_Neg4294967296", in: 9223372036854775807, want: 9223372032559808511},
+ test_int64{fn: add_Neg1_int64, fnname: "add_Neg1_int64", in: -9223372036854775808, want: 9223372036854775807},
+ test_int64{fn: add_int64_Neg1, fnname: "add_int64_Neg1", in: -9223372036854775808, want: 9223372036854775807},
+ test_int64{fn: add_Neg1_int64, fnname: "add_Neg1_int64", in: -9223372036854775807, want: -9223372036854775808},
+ test_int64{fn: add_int64_Neg1, fnname: "add_int64_Neg1", in: -9223372036854775807, want: -9223372036854775808},
+ test_int64{fn: add_Neg1_int64, fnname: "add_Neg1_int64", in: -4294967296, want: -4294967297},
+ test_int64{fn: add_int64_Neg1, fnname: "add_int64_Neg1", in: -4294967296, want: -4294967297},
+ test_int64{fn: add_Neg1_int64, fnname: "add_Neg1_int64", in: -1, want: -2},
+ test_int64{fn: add_int64_Neg1, fnname: "add_int64_Neg1", in: -1, want: -2},
+ test_int64{fn: add_Neg1_int64, fnname: "add_Neg1_int64", in: 0, want: -1},
+ test_int64{fn: add_int64_Neg1, fnname: "add_int64_Neg1", in: 0, want: -1},
+ test_int64{fn: add_Neg1_int64, fnname: "add_Neg1_int64", in: 1, want: 0},
+ test_int64{fn: add_int64_Neg1, fnname: "add_int64_Neg1", in: 1, want: 0},
+ test_int64{fn: add_Neg1_int64, fnname: "add_Neg1_int64", in: 4294967296, want: 4294967295},
+ test_int64{fn: add_int64_Neg1, fnname: "add_int64_Neg1", in: 4294967296, want: 4294967295},
+ test_int64{fn: add_Neg1_int64, fnname: "add_Neg1_int64", in: 9223372036854775806, want: 9223372036854775805},
+ test_int64{fn: add_int64_Neg1, fnname: "add_int64_Neg1", in: 9223372036854775806, want: 9223372036854775805},
+ test_int64{fn: add_Neg1_int64, fnname: "add_Neg1_int64", in: 9223372036854775807, want: 9223372036854775806},
+ test_int64{fn: add_int64_Neg1, fnname: "add_int64_Neg1", in: 9223372036854775807, want: 9223372036854775806},
+ test_int64{fn: add_0_int64, fnname: "add_0_int64", in: -9223372036854775808, want: -9223372036854775808},
+ test_int64{fn: add_int64_0, fnname: "add_int64_0", in: -9223372036854775808, want: -9223372036854775808},
+ test_int64{fn: add_0_int64, fnname: "add_0_int64", in: -9223372036854775807, want: -9223372036854775807},
+ test_int64{fn: add_int64_0, fnname: "add_int64_0", in: -9223372036854775807, want: -9223372036854775807},
+ test_int64{fn: add_0_int64, fnname: "add_0_int64", in: -4294967296, want: -4294967296},
+ test_int64{fn: add_int64_0, fnname: "add_int64_0", in: -4294967296, want: -4294967296},
+ test_int64{fn: add_0_int64, fnname: "add_0_int64", in: -1, want: -1},
+ test_int64{fn: add_int64_0, fnname: "add_int64_0", in: -1, want: -1},
+ test_int64{fn: add_0_int64, fnname: "add_0_int64", in: 0, want: 0},
+ test_int64{fn: add_int64_0, fnname: "add_int64_0", in: 0, want: 0},
+ test_int64{fn: add_0_int64, fnname: "add_0_int64", in: 1, want: 1},
+ test_int64{fn: add_int64_0, fnname: "add_int64_0", in: 1, want: 1},
+ test_int64{fn: add_0_int64, fnname: "add_0_int64", in: 4294967296, want: 4294967296},
+ test_int64{fn: add_int64_0, fnname: "add_int64_0", in: 4294967296, want: 4294967296},
+ test_int64{fn: add_0_int64, fnname: "add_0_int64", in: 9223372036854775806, want: 9223372036854775806},
+ test_int64{fn: add_int64_0, fnname: "add_int64_0", in: 9223372036854775806, want: 9223372036854775806},
+ test_int64{fn: add_0_int64, fnname: "add_0_int64", in: 9223372036854775807, want: 9223372036854775807},
+ test_int64{fn: add_int64_0, fnname: "add_int64_0", in: 9223372036854775807, want: 9223372036854775807},
+ test_int64{fn: add_1_int64, fnname: "add_1_int64", in: -9223372036854775808, want: -9223372036854775807},
+ test_int64{fn: add_int64_1, fnname: "add_int64_1", in: -9223372036854775808, want: -9223372036854775807},
+ test_int64{fn: add_1_int64, fnname: "add_1_int64", in: -9223372036854775807, want: -9223372036854775806},
+ test_int64{fn: add_int64_1, fnname: "add_int64_1", in: -9223372036854775807, want: -9223372036854775806},
+ test_int64{fn: add_1_int64, fnname: "add_1_int64", in: -4294967296, want: -4294967295},
+ test_int64{fn: add_int64_1, fnname: "add_int64_1", in: -4294967296, want: -4294967295},
+ test_int64{fn: add_1_int64, fnname: "add_1_int64", in: -1, want: 0},
+ test_int64{fn: add_int64_1, fnname: "add_int64_1", in: -1, want: 0},
+ test_int64{fn: add_1_int64, fnname: "add_1_int64", in: 0, want: 1},
+ test_int64{fn: add_int64_1, fnname: "add_int64_1", in: 0, want: 1},
+ test_int64{fn: add_1_int64, fnname: "add_1_int64", in: 1, want: 2},
+ test_int64{fn: add_int64_1, fnname: "add_int64_1", in: 1, want: 2},
+ test_int64{fn: add_1_int64, fnname: "add_1_int64", in: 4294967296, want: 4294967297},
+ test_int64{fn: add_int64_1, fnname: "add_int64_1", in: 4294967296, want: 4294967297},
+ test_int64{fn: add_1_int64, fnname: "add_1_int64", in: 9223372036854775806, want: 9223372036854775807},
+ test_int64{fn: add_int64_1, fnname: "add_int64_1", in: 9223372036854775806, want: 9223372036854775807},
+ test_int64{fn: add_1_int64, fnname: "add_1_int64", in: 9223372036854775807, want: -9223372036854775808},
+ test_int64{fn: add_int64_1, fnname: "add_int64_1", in: 9223372036854775807, want: -9223372036854775808},
+ test_int64{fn: add_4294967296_int64, fnname: "add_4294967296_int64", in: -9223372036854775808, want: -9223372032559808512},
+ test_int64{fn: add_int64_4294967296, fnname: "add_int64_4294967296", in: -9223372036854775808, want: -9223372032559808512},
+ test_int64{fn: add_4294967296_int64, fnname: "add_4294967296_int64", in: -9223372036854775807, want: -9223372032559808511},
+ test_int64{fn: add_int64_4294967296, fnname: "add_int64_4294967296", in: -9223372036854775807, want: -9223372032559808511},
+ test_int64{fn: add_4294967296_int64, fnname: "add_4294967296_int64", in: -4294967296, want: 0},
+ test_int64{fn: add_int64_4294967296, fnname: "add_int64_4294967296", in: -4294967296, want: 0},
+ test_int64{fn: add_4294967296_int64, fnname: "add_4294967296_int64", in: -1, want: 4294967295},
+ test_int64{fn: add_int64_4294967296, fnname: "add_int64_4294967296", in: -1, want: 4294967295},
+ test_int64{fn: add_4294967296_int64, fnname: "add_4294967296_int64", in: 0, want: 4294967296},
+ test_int64{fn: add_int64_4294967296, fnname: "add_int64_4294967296", in: 0, want: 4294967296},
+ test_int64{fn: add_4294967296_int64, fnname: "add_4294967296_int64", in: 1, want: 4294967297},
+ test_int64{fn: add_int64_4294967296, fnname: "add_int64_4294967296", in: 1, want: 4294967297},
+ test_int64{fn: add_4294967296_int64, fnname: "add_4294967296_int64", in: 4294967296, want: 8589934592},
+ test_int64{fn: add_int64_4294967296, fnname: "add_int64_4294967296", in: 4294967296, want: 8589934592},
+ test_int64{fn: add_4294967296_int64, fnname: "add_4294967296_int64", in: 9223372036854775806, want: -9223372032559808514},
+ test_int64{fn: add_int64_4294967296, fnname: "add_int64_4294967296", in: 9223372036854775806, want: -9223372032559808514},
+ test_int64{fn: add_4294967296_int64, fnname: "add_4294967296_int64", in: 9223372036854775807, want: -9223372032559808513},
+ test_int64{fn: add_int64_4294967296, fnname: "add_int64_4294967296", in: 9223372036854775807, want: -9223372032559808513},
+ test_int64{fn: add_9223372036854775806_int64, fnname: "add_9223372036854775806_int64", in: -9223372036854775808, want: -2},
+ test_int64{fn: add_int64_9223372036854775806, fnname: "add_int64_9223372036854775806", in: -9223372036854775808, want: -2},
+ test_int64{fn: add_9223372036854775806_int64, fnname: "add_9223372036854775806_int64", in: -9223372036854775807, want: -1},
+ test_int64{fn: add_int64_9223372036854775806, fnname: "add_int64_9223372036854775806", in: -9223372036854775807, want: -1},
+ test_int64{fn: add_9223372036854775806_int64, fnname: "add_9223372036854775806_int64", in: -4294967296, want: 9223372032559808510},
+ test_int64{fn: add_int64_9223372036854775806, fnname: "add_int64_9223372036854775806", in: -4294967296, want: 9223372032559808510},
+ test_int64{fn: add_9223372036854775806_int64, fnname: "add_9223372036854775806_int64", in: -1, want: 9223372036854775805},
+ test_int64{fn: add_int64_9223372036854775806, fnname: "add_int64_9223372036854775806", in: -1, want: 9223372036854775805},
+ test_int64{fn: add_9223372036854775806_int64, fnname: "add_9223372036854775806_int64", in: 0, want: 9223372036854775806},
+ test_int64{fn: add_int64_9223372036854775806, fnname: "add_int64_9223372036854775806", in: 0, want: 9223372036854775806},
+ test_int64{fn: add_9223372036854775806_int64, fnname: "add_9223372036854775806_int64", in: 1, want: 9223372036854775807},
+ test_int64{fn: add_int64_9223372036854775806, fnname: "add_int64_9223372036854775806", in: 1, want: 9223372036854775807},
+ test_int64{fn: add_9223372036854775806_int64, fnname: "add_9223372036854775806_int64", in: 4294967296, want: -9223372032559808514},
+ test_int64{fn: add_int64_9223372036854775806, fnname: "add_int64_9223372036854775806", in: 4294967296, want: -9223372032559808514},
+ test_int64{fn: add_9223372036854775806_int64, fnname: "add_9223372036854775806_int64", in: 9223372036854775806, want: -4},
+ test_int64{fn: add_int64_9223372036854775806, fnname: "add_int64_9223372036854775806", in: 9223372036854775806, want: -4},
+ test_int64{fn: add_9223372036854775806_int64, fnname: "add_9223372036854775806_int64", in: 9223372036854775807, want: -3},
+ test_int64{fn: add_int64_9223372036854775806, fnname: "add_int64_9223372036854775806", in: 9223372036854775807, want: -3},
+ test_int64{fn: add_9223372036854775807_int64, fnname: "add_9223372036854775807_int64", in: -9223372036854775808, want: -1},
+ test_int64{fn: add_int64_9223372036854775807, fnname: "add_int64_9223372036854775807", in: -9223372036854775808, want: -1},
+ test_int64{fn: add_9223372036854775807_int64, fnname: "add_9223372036854775807_int64", in: -9223372036854775807, want: 0},
+ test_int64{fn: add_int64_9223372036854775807, fnname: "add_int64_9223372036854775807", in: -9223372036854775807, want: 0},
+ test_int64{fn: add_9223372036854775807_int64, fnname: "add_9223372036854775807_int64", in: -4294967296, want: 9223372032559808511},
+ test_int64{fn: add_int64_9223372036854775807, fnname: "add_int64_9223372036854775807", in: -4294967296, want: 9223372032559808511},
+ test_int64{fn: add_9223372036854775807_int64, fnname: "add_9223372036854775807_int64", in: -1, want: 9223372036854775806},
+ test_int64{fn: add_int64_9223372036854775807, fnname: "add_int64_9223372036854775807", in: -1, want: 9223372036854775806},
+ test_int64{fn: add_9223372036854775807_int64, fnname: "add_9223372036854775807_int64", in: 0, want: 9223372036854775807},
+ test_int64{fn: add_int64_9223372036854775807, fnname: "add_int64_9223372036854775807", in: 0, want: 9223372036854775807},
+ test_int64{fn: add_9223372036854775807_int64, fnname: "add_9223372036854775807_int64", in: 1, want: -9223372036854775808},
+ test_int64{fn: add_int64_9223372036854775807, fnname: "add_int64_9223372036854775807", in: 1, want: -9223372036854775808},
+ test_int64{fn: add_9223372036854775807_int64, fnname: "add_9223372036854775807_int64", in: 4294967296, want: -9223372032559808513},
+ test_int64{fn: add_int64_9223372036854775807, fnname: "add_int64_9223372036854775807", in: 4294967296, want: -9223372032559808513},
+ test_int64{fn: add_9223372036854775807_int64, fnname: "add_9223372036854775807_int64", in: 9223372036854775806, want: -3},
+ test_int64{fn: add_int64_9223372036854775807, fnname: "add_int64_9223372036854775807", in: 9223372036854775806, want: -3},
+ test_int64{fn: add_9223372036854775807_int64, fnname: "add_9223372036854775807_int64", in: 9223372036854775807, want: -2},
+ test_int64{fn: add_int64_9223372036854775807, fnname: "add_int64_9223372036854775807", in: 9223372036854775807, want: -2},
+ test_int64{fn: sub_Neg9223372036854775808_int64, fnname: "sub_Neg9223372036854775808_int64", in: -9223372036854775808, want: 0},
+ test_int64{fn: sub_int64_Neg9223372036854775808, fnname: "sub_int64_Neg9223372036854775808", in: -9223372036854775808, want: 0},
+ test_int64{fn: sub_Neg9223372036854775808_int64, fnname: "sub_Neg9223372036854775808_int64", in: -9223372036854775807, want: -1},
+ test_int64{fn: sub_int64_Neg9223372036854775808, fnname: "sub_int64_Neg9223372036854775808", in: -9223372036854775807, want: 1},
+ test_int64{fn: sub_Neg9223372036854775808_int64, fnname: "sub_Neg9223372036854775808_int64", in: -4294967296, want: -9223372032559808512},
+ test_int64{fn: sub_int64_Neg9223372036854775808, fnname: "sub_int64_Neg9223372036854775808", in: -4294967296, want: 9223372032559808512},
+ test_int64{fn: sub_Neg9223372036854775808_int64, fnname: "sub_Neg9223372036854775808_int64", in: -1, want: -9223372036854775807},
+ test_int64{fn: sub_int64_Neg9223372036854775808, fnname: "sub_int64_Neg9223372036854775808", in: -1, want: 9223372036854775807},
+ test_int64{fn: sub_Neg9223372036854775808_int64, fnname: "sub_Neg9223372036854775808_int64", in: 0, want: -9223372036854775808},
+ test_int64{fn: sub_int64_Neg9223372036854775808, fnname: "sub_int64_Neg9223372036854775808", in: 0, want: -9223372036854775808},
+ test_int64{fn: sub_Neg9223372036854775808_int64, fnname: "sub_Neg9223372036854775808_int64", in: 1, want: 9223372036854775807},
+ test_int64{fn: sub_int64_Neg9223372036854775808, fnname: "sub_int64_Neg9223372036854775808", in: 1, want: -9223372036854775807},
+ test_int64{fn: sub_Neg9223372036854775808_int64, fnname: "sub_Neg9223372036854775808_int64", in: 4294967296, want: 9223372032559808512},
+ test_int64{fn: sub_int64_Neg9223372036854775808, fnname: "sub_int64_Neg9223372036854775808", in: 4294967296, want: -9223372032559808512},
+ test_int64{fn: sub_Neg9223372036854775808_int64, fnname: "sub_Neg9223372036854775808_int64", in: 9223372036854775806, want: 2},
+ test_int64{fn: sub_int64_Neg9223372036854775808, fnname: "sub_int64_Neg9223372036854775808", in: 9223372036854775806, want: -2},
+ test_int64{fn: sub_Neg9223372036854775808_int64, fnname: "sub_Neg9223372036854775808_int64", in: 9223372036854775807, want: 1},
+ test_int64{fn: sub_int64_Neg9223372036854775808, fnname: "sub_int64_Neg9223372036854775808", in: 9223372036854775807, want: -1},
+ test_int64{fn: sub_Neg9223372036854775807_int64, fnname: "sub_Neg9223372036854775807_int64", in: -9223372036854775808, want: 1},
+ test_int64{fn: sub_int64_Neg9223372036854775807, fnname: "sub_int64_Neg9223372036854775807", in: -9223372036854775808, want: -1},
+ test_int64{fn: sub_Neg9223372036854775807_int64, fnname: "sub_Neg9223372036854775807_int64", in: -9223372036854775807, want: 0},
+ test_int64{fn: sub_int64_Neg9223372036854775807, fnname: "sub_int64_Neg9223372036854775807", in: -9223372036854775807, want: 0},
+ test_int64{fn: sub_Neg9223372036854775807_int64, fnname: "sub_Neg9223372036854775807_int64", in: -4294967296, want: -9223372032559808511},
+ test_int64{fn: sub_int64_Neg9223372036854775807, fnname: "sub_int64_Neg9223372036854775807", in: -4294967296, want: 9223372032559808511},
+ test_int64{fn: sub_Neg9223372036854775807_int64, fnname: "sub_Neg9223372036854775807_int64", in: -1, want: -9223372036854775806},
+ test_int64{fn: sub_int64_Neg9223372036854775807, fnname: "sub_int64_Neg9223372036854775807", in: -1, want: 9223372036854775806},
+ test_int64{fn: sub_Neg9223372036854775807_int64, fnname: "sub_Neg9223372036854775807_int64", in: 0, want: -9223372036854775807},
+ test_int64{fn: sub_int64_Neg9223372036854775807, fnname: "sub_int64_Neg9223372036854775807", in: 0, want: 9223372036854775807},
+ test_int64{fn: sub_Neg9223372036854775807_int64, fnname: "sub_Neg9223372036854775807_int64", in: 1, want: -9223372036854775808},
+ test_int64{fn: sub_int64_Neg9223372036854775807, fnname: "sub_int64_Neg9223372036854775807", in: 1, want: -9223372036854775808},
+ test_int64{fn: sub_Neg9223372036854775807_int64, fnname: "sub_Neg9223372036854775807_int64", in: 4294967296, want: 9223372032559808513},
+ test_int64{fn: sub_int64_Neg9223372036854775807, fnname: "sub_int64_Neg9223372036854775807", in: 4294967296, want: -9223372032559808513},
+ test_int64{fn: sub_Neg9223372036854775807_int64, fnname: "sub_Neg9223372036854775807_int64", in: 9223372036854775806, want: 3},
+ test_int64{fn: sub_int64_Neg9223372036854775807, fnname: "sub_int64_Neg9223372036854775807", in: 9223372036854775806, want: -3},
+ test_int64{fn: sub_Neg9223372036854775807_int64, fnname: "sub_Neg9223372036854775807_int64", in: 9223372036854775807, want: 2},
+ test_int64{fn: sub_int64_Neg9223372036854775807, fnname: "sub_int64_Neg9223372036854775807", in: 9223372036854775807, want: -2},
+ test_int64{fn: sub_Neg4294967296_int64, fnname: "sub_Neg4294967296_int64", in: -9223372036854775808, want: 9223372032559808512},
+ test_int64{fn: sub_int64_Neg4294967296, fnname: "sub_int64_Neg4294967296", in: -9223372036854775808, want: -9223372032559808512},
+ test_int64{fn: sub_Neg4294967296_int64, fnname: "sub_Neg4294967296_int64", in: -9223372036854775807, want: 9223372032559808511},
+ test_int64{fn: sub_int64_Neg4294967296, fnname: "sub_int64_Neg4294967296", in: -9223372036854775807, want: -9223372032559808511},
+ test_int64{fn: sub_Neg4294967296_int64, fnname: "sub_Neg4294967296_int64", in: -4294967296, want: 0},
+ test_int64{fn: sub_int64_Neg4294967296, fnname: "sub_int64_Neg4294967296", in: -4294967296, want: 0},
+ test_int64{fn: sub_Neg4294967296_int64, fnname: "sub_Neg4294967296_int64", in: -1, want: -4294967295},
+ test_int64{fn: sub_int64_Neg4294967296, fnname: "sub_int64_Neg4294967296", in: -1, want: 4294967295},
+ test_int64{fn: sub_Neg4294967296_int64, fnname: "sub_Neg4294967296_int64", in: 0, want: -4294967296},
+ test_int64{fn: sub_int64_Neg4294967296, fnname: "sub_int64_Neg4294967296", in: 0, want: 4294967296},
+ test_int64{fn: sub_Neg4294967296_int64, fnname: "sub_Neg4294967296_int64", in: 1, want: -4294967297},
+ test_int64{fn: sub_int64_Neg4294967296, fnname: "sub_int64_Neg4294967296", in: 1, want: 4294967297},
+ test_int64{fn: sub_Neg4294967296_int64, fnname: "sub_Neg4294967296_int64", in: 4294967296, want: -8589934592},
+ test_int64{fn: sub_int64_Neg4294967296, fnname: "sub_int64_Neg4294967296", in: 4294967296, want: 8589934592},
+ test_int64{fn: sub_Neg4294967296_int64, fnname: "sub_Neg4294967296_int64", in: 9223372036854775806, want: 9223372032559808514},
+ test_int64{fn: sub_int64_Neg4294967296, fnname: "sub_int64_Neg4294967296", in: 9223372036854775806, want: -9223372032559808514},
+ test_int64{fn: sub_Neg4294967296_int64, fnname: "sub_Neg4294967296_int64", in: 9223372036854775807, want: 9223372032559808513},
+ test_int64{fn: sub_int64_Neg4294967296, fnname: "sub_int64_Neg4294967296", in: 9223372036854775807, want: -9223372032559808513},
+ test_int64{fn: sub_Neg1_int64, fnname: "sub_Neg1_int64", in: -9223372036854775808, want: 9223372036854775807},
+ test_int64{fn: sub_int64_Neg1, fnname: "sub_int64_Neg1", in: -9223372036854775808, want: -9223372036854775807},
+ test_int64{fn: sub_Neg1_int64, fnname: "sub_Neg1_int64", in: -9223372036854775807, want: 9223372036854775806},
+ test_int64{fn: sub_int64_Neg1, fnname: "sub_int64_Neg1", in: -9223372036854775807, want: -9223372036854775806},
+ test_int64{fn: sub_Neg1_int64, fnname: "sub_Neg1_int64", in: -4294967296, want: 4294967295},
+ test_int64{fn: sub_int64_Neg1, fnname: "sub_int64_Neg1", in: -4294967296, want: -4294967295},
+ test_int64{fn: sub_Neg1_int64, fnname: "sub_Neg1_int64", in: -1, want: 0},
+ test_int64{fn: sub_int64_Neg1, fnname: "sub_int64_Neg1", in: -1, want: 0},
+ test_int64{fn: sub_Neg1_int64, fnname: "sub_Neg1_int64", in: 0, want: -1},
+ test_int64{fn: sub_int64_Neg1, fnname: "sub_int64_Neg1", in: 0, want: 1},
+ test_int64{fn: sub_Neg1_int64, fnname: "sub_Neg1_int64", in: 1, want: -2},
+ test_int64{fn: sub_int64_Neg1, fnname: "sub_int64_Neg1", in: 1, want: 2},
+ test_int64{fn: sub_Neg1_int64, fnname: "sub_Neg1_int64", in: 4294967296, want: -4294967297},
+ test_int64{fn: sub_int64_Neg1, fnname: "sub_int64_Neg1", in: 4294967296, want: 4294967297},
+ test_int64{fn: sub_Neg1_int64, fnname: "sub_Neg1_int64", in: 9223372036854775806, want: -9223372036854775807},
+ test_int64{fn: sub_int64_Neg1, fnname: "sub_int64_Neg1", in: 9223372036854775806, want: 9223372036854775807},
+ test_int64{fn: sub_Neg1_int64, fnname: "sub_Neg1_int64", in: 9223372036854775807, want: -9223372036854775808},
+ test_int64{fn: sub_int64_Neg1, fnname: "sub_int64_Neg1", in: 9223372036854775807, want: -9223372036854775808},
+ test_int64{fn: sub_0_int64, fnname: "sub_0_int64", in: -9223372036854775808, want: -9223372036854775808},
+ test_int64{fn: sub_int64_0, fnname: "sub_int64_0", in: -9223372036854775808, want: -9223372036854775808},
+ test_int64{fn: sub_0_int64, fnname: "sub_0_int64", in: -9223372036854775807, want: 9223372036854775807},
+ test_int64{fn: sub_int64_0, fnname: "sub_int64_0", in: -9223372036854775807, want: -9223372036854775807},
+ test_int64{fn: sub_0_int64, fnname: "sub_0_int64", in: -4294967296, want: 4294967296},
+ test_int64{fn: sub_int64_0, fnname: "sub_int64_0", in: -4294967296, want: -4294967296},
+ test_int64{fn: sub_0_int64, fnname: "sub_0_int64", in: -1, want: 1},
+ test_int64{fn: sub_int64_0, fnname: "sub_int64_0", in: -1, want: -1},
+ test_int64{fn: sub_0_int64, fnname: "sub_0_int64", in: 0, want: 0},
+ test_int64{fn: sub_int64_0, fnname: "sub_int64_0", in: 0, want: 0},
+ test_int64{fn: sub_0_int64, fnname: "sub_0_int64", in: 1, want: -1},
+ test_int64{fn: sub_int64_0, fnname: "sub_int64_0", in: 1, want: 1},
+ test_int64{fn: sub_0_int64, fnname: "sub_0_int64", in: 4294967296, want: -4294967296},
+ test_int64{fn: sub_int64_0, fnname: "sub_int64_0", in: 4294967296, want: 4294967296},
+ test_int64{fn: sub_0_int64, fnname: "sub_0_int64", in: 9223372036854775806, want: -9223372036854775806},
+ test_int64{fn: sub_int64_0, fnname: "sub_int64_0", in: 9223372036854775806, want: 9223372036854775806},
+ test_int64{fn: sub_0_int64, fnname: "sub_0_int64", in: 9223372036854775807, want: -9223372036854775807},
+ test_int64{fn: sub_int64_0, fnname: "sub_int64_0", in: 9223372036854775807, want: 9223372036854775807},
+ test_int64{fn: sub_1_int64, fnname: "sub_1_int64", in: -9223372036854775808, want: -9223372036854775807},
+ test_int64{fn: sub_int64_1, fnname: "sub_int64_1", in: -9223372036854775808, want: 9223372036854775807},
+ test_int64{fn: sub_1_int64, fnname: "sub_1_int64", in: -9223372036854775807, want: -9223372036854775808},
+ test_int64{fn: sub_int64_1, fnname: "sub_int64_1", in: -9223372036854775807, want: -9223372036854775808},
+ test_int64{fn: sub_1_int64, fnname: "sub_1_int64", in: -4294967296, want: 4294967297},
+ test_int64{fn: sub_int64_1, fnname: "sub_int64_1", in: -4294967296, want: -4294967297},
+ test_int64{fn: sub_1_int64, fnname: "sub_1_int64", in: -1, want: 2},
+ test_int64{fn: sub_int64_1, fnname: "sub_int64_1", in: -1, want: -2},
+ test_int64{fn: sub_1_int64, fnname: "sub_1_int64", in: 0, want: 1},
+ test_int64{fn: sub_int64_1, fnname: "sub_int64_1", in: 0, want: -1},
+ test_int64{fn: sub_1_int64, fnname: "sub_1_int64", in: 1, want: 0},
+ test_int64{fn: sub_int64_1, fnname: "sub_int64_1", in: 1, want: 0},
+ test_int64{fn: sub_1_int64, fnname: "sub_1_int64", in: 4294967296, want: -4294967295},
+ test_int64{fn: sub_int64_1, fnname: "sub_int64_1", in: 4294967296, want: 4294967295},
+ test_int64{fn: sub_1_int64, fnname: "sub_1_int64", in: 9223372036854775806, want: -9223372036854775805},
+ test_int64{fn: sub_int64_1, fnname: "sub_int64_1", in: 9223372036854775806, want: 9223372036854775805},
+ test_int64{fn: sub_1_int64, fnname: "sub_1_int64", in: 9223372036854775807, want: -9223372036854775806},
+ test_int64{fn: sub_int64_1, fnname: "sub_int64_1", in: 9223372036854775807, want: 9223372036854775806},
+ test_int64{fn: sub_4294967296_int64, fnname: "sub_4294967296_int64", in: -9223372036854775808, want: -9223372032559808512},
+ test_int64{fn: sub_int64_4294967296, fnname: "sub_int64_4294967296", in: -9223372036854775808, want: 9223372032559808512},
+ test_int64{fn: sub_4294967296_int64, fnname: "sub_4294967296_int64", in: -9223372036854775807, want: -9223372032559808513},
+ test_int64{fn: sub_int64_4294967296, fnname: "sub_int64_4294967296", in: -9223372036854775807, want: 9223372032559808513},
+ test_int64{fn: sub_4294967296_int64, fnname: "sub_4294967296_int64", in: -4294967296, want: 8589934592},
+ test_int64{fn: sub_int64_4294967296, fnname: "sub_int64_4294967296", in: -4294967296, want: -8589934592},
+ test_int64{fn: sub_4294967296_int64, fnname: "sub_4294967296_int64", in: -1, want: 4294967297},
+ test_int64{fn: sub_int64_4294967296, fnname: "sub_int64_4294967296", in: -1, want: -4294967297},
+ test_int64{fn: sub_4294967296_int64, fnname: "sub_4294967296_int64", in: 0, want: 4294967296},
+ test_int64{fn: sub_int64_4294967296, fnname: "sub_int64_4294967296", in: 0, want: -4294967296},
+ test_int64{fn: sub_4294967296_int64, fnname: "sub_4294967296_int64", in: 1, want: 4294967295},
+ test_int64{fn: sub_int64_4294967296, fnname: "sub_int64_4294967296", in: 1, want: -4294967295},
+ test_int64{fn: sub_4294967296_int64, fnname: "sub_4294967296_int64", in: 4294967296, want: 0},
+ test_int64{fn: sub_int64_4294967296, fnname: "sub_int64_4294967296", in: 4294967296, want: 0},
+ test_int64{fn: sub_4294967296_int64, fnname: "sub_4294967296_int64", in: 9223372036854775806, want: -9223372032559808510},
+ test_int64{fn: sub_int64_4294967296, fnname: "sub_int64_4294967296", in: 9223372036854775806, want: 9223372032559808510},
+ test_int64{fn: sub_4294967296_int64, fnname: "sub_4294967296_int64", in: 9223372036854775807, want: -9223372032559808511},
+ test_int64{fn: sub_int64_4294967296, fnname: "sub_int64_4294967296", in: 9223372036854775807, want: 9223372032559808511},
+ test_int64{fn: sub_9223372036854775806_int64, fnname: "sub_9223372036854775806_int64", in: -9223372036854775808, want: -2},
+ test_int64{fn: sub_int64_9223372036854775806, fnname: "sub_int64_9223372036854775806", in: -9223372036854775808, want: 2},
+ test_int64{fn: sub_9223372036854775806_int64, fnname: "sub_9223372036854775806_int64", in: -9223372036854775807, want: -3},
+ test_int64{fn: sub_int64_9223372036854775806, fnname: "sub_int64_9223372036854775806", in: -9223372036854775807, want: 3},
+ test_int64{fn: sub_9223372036854775806_int64, fnname: "sub_9223372036854775806_int64", in: -4294967296, want: -9223372032559808514},
+ test_int64{fn: sub_int64_9223372036854775806, fnname: "sub_int64_9223372036854775806", in: -4294967296, want: 9223372032559808514},
+ test_int64{fn: sub_9223372036854775806_int64, fnname: "sub_9223372036854775806_int64", in: -1, want: 9223372036854775807},
+ test_int64{fn: sub_int64_9223372036854775806, fnname: "sub_int64_9223372036854775806", in: -1, want: -9223372036854775807},
+ test_int64{fn: sub_9223372036854775806_int64, fnname: "sub_9223372036854775806_int64", in: 0, want: 9223372036854775806},
+ test_int64{fn: sub_int64_9223372036854775806, fnname: "sub_int64_9223372036854775806", in: 0, want: -9223372036854775806},
+ test_int64{fn: sub_9223372036854775806_int64, fnname: "sub_9223372036854775806_int64", in: 1, want: 9223372036854775805},
+ test_int64{fn: sub_int64_9223372036854775806, fnname: "sub_int64_9223372036854775806", in: 1, want: -9223372036854775805},
+ test_int64{fn: sub_9223372036854775806_int64, fnname: "sub_9223372036854775806_int64", in: 4294967296, want: 9223372032559808510},
+ test_int64{fn: sub_int64_9223372036854775806, fnname: "sub_int64_9223372036854775806", in: 4294967296, want: -9223372032559808510},
+ test_int64{fn: sub_9223372036854775806_int64, fnname: "sub_9223372036854775806_int64", in: 9223372036854775806, want: 0},
+ test_int64{fn: sub_int64_9223372036854775806, fnname: "sub_int64_9223372036854775806", in: 9223372036854775806, want: 0},
+ test_int64{fn: sub_9223372036854775806_int64, fnname: "sub_9223372036854775806_int64", in: 9223372036854775807, want: -1},
+ test_int64{fn: sub_int64_9223372036854775806, fnname: "sub_int64_9223372036854775806", in: 9223372036854775807, want: 1},
+ test_int64{fn: sub_9223372036854775807_int64, fnname: "sub_9223372036854775807_int64", in: -9223372036854775808, want: -1},
+ test_int64{fn: sub_int64_9223372036854775807, fnname: "sub_int64_9223372036854775807", in: -9223372036854775808, want: 1},
+ test_int64{fn: sub_9223372036854775807_int64, fnname: "sub_9223372036854775807_int64", in: -9223372036854775807, want: -2},
+ test_int64{fn: sub_int64_9223372036854775807, fnname: "sub_int64_9223372036854775807", in: -9223372036854775807, want: 2},
+ test_int64{fn: sub_9223372036854775807_int64, fnname: "sub_9223372036854775807_int64", in: -4294967296, want: -9223372032559808513},
+ test_int64{fn: sub_int64_9223372036854775807, fnname: "sub_int64_9223372036854775807", in: -4294967296, want: 9223372032559808513},
+ test_int64{fn: sub_9223372036854775807_int64, fnname: "sub_9223372036854775807_int64", in: -1, want: -9223372036854775808},
+ test_int64{fn: sub_int64_9223372036854775807, fnname: "sub_int64_9223372036854775807", in: -1, want: -9223372036854775808},
+ test_int64{fn: sub_9223372036854775807_int64, fnname: "sub_9223372036854775807_int64", in: 0, want: 9223372036854775807},
+ test_int64{fn: sub_int64_9223372036854775807, fnname: "sub_int64_9223372036854775807", in: 0, want: -9223372036854775807},
+ test_int64{fn: sub_9223372036854775807_int64, fnname: "sub_9223372036854775807_int64", in: 1, want: 9223372036854775806},
+ test_int64{fn: sub_int64_9223372036854775807, fnname: "sub_int64_9223372036854775807", in: 1, want: -9223372036854775806},
+ test_int64{fn: sub_9223372036854775807_int64, fnname: "sub_9223372036854775807_int64", in: 4294967296, want: 9223372032559808511},
+ test_int64{fn: sub_int64_9223372036854775807, fnname: "sub_int64_9223372036854775807", in: 4294967296, want: -9223372032559808511},
+ test_int64{fn: sub_9223372036854775807_int64, fnname: "sub_9223372036854775807_int64", in: 9223372036854775806, want: 1},
+ test_int64{fn: sub_int64_9223372036854775807, fnname: "sub_int64_9223372036854775807", in: 9223372036854775806, want: -1},
+ test_int64{fn: sub_9223372036854775807_int64, fnname: "sub_9223372036854775807_int64", in: 9223372036854775807, want: 0},
+ test_int64{fn: sub_int64_9223372036854775807, fnname: "sub_int64_9223372036854775807", in: 9223372036854775807, want: 0},
+ test_int64{fn: div_Neg9223372036854775808_int64, fnname: "div_Neg9223372036854775808_int64", in: -9223372036854775808, want: 1},
+ test_int64{fn: div_int64_Neg9223372036854775808, fnname: "div_int64_Neg9223372036854775808", in: -9223372036854775808, want: 1},
+ test_int64{fn: div_Neg9223372036854775808_int64, fnname: "div_Neg9223372036854775808_int64", in: -9223372036854775807, want: 1},
+ test_int64{fn: div_int64_Neg9223372036854775808, fnname: "div_int64_Neg9223372036854775808", in: -9223372036854775807, want: 0},
+ test_int64{fn: div_Neg9223372036854775808_int64, fnname: "div_Neg9223372036854775808_int64", in: -4294967296, want: 2147483648},
+ test_int64{fn: div_int64_Neg9223372036854775808, fnname: "div_int64_Neg9223372036854775808", in: -4294967296, want: 0},
+ test_int64{fn: div_Neg9223372036854775808_int64, fnname: "div_Neg9223372036854775808_int64", in: -1, want: -9223372036854775808},
+ test_int64{fn: div_int64_Neg9223372036854775808, fnname: "div_int64_Neg9223372036854775808", in: -1, want: 0},
+ test_int64{fn: div_int64_Neg9223372036854775808, fnname: "div_int64_Neg9223372036854775808", in: 0, want: 0},
+ test_int64{fn: div_Neg9223372036854775808_int64, fnname: "div_Neg9223372036854775808_int64", in: 1, want: -9223372036854775808},
+ test_int64{fn: div_int64_Neg9223372036854775808, fnname: "div_int64_Neg9223372036854775808", in: 1, want: 0},
+ test_int64{fn: div_Neg9223372036854775808_int64, fnname: "div_Neg9223372036854775808_int64", in: 4294967296, want: -2147483648},
+ test_int64{fn: div_int64_Neg9223372036854775808, fnname: "div_int64_Neg9223372036854775808", in: 4294967296, want: 0},
+ test_int64{fn: div_Neg9223372036854775808_int64, fnname: "div_Neg9223372036854775808_int64", in: 9223372036854775806, want: -1},
+ test_int64{fn: div_int64_Neg9223372036854775808, fnname: "div_int64_Neg9223372036854775808", in: 9223372036854775806, want: 0},
+ test_int64{fn: div_Neg9223372036854775808_int64, fnname: "div_Neg9223372036854775808_int64", in: 9223372036854775807, want: -1},
+ test_int64{fn: div_int64_Neg9223372036854775808, fnname: "div_int64_Neg9223372036854775808", in: 9223372036854775807, want: 0},
+ test_int64{fn: div_Neg9223372036854775807_int64, fnname: "div_Neg9223372036854775807_int64", in: -9223372036854775808, want: 0},
+ test_int64{fn: div_int64_Neg9223372036854775807, fnname: "div_int64_Neg9223372036854775807", in: -9223372036854775808, want: 1},
+ test_int64{fn: div_Neg9223372036854775807_int64, fnname: "div_Neg9223372036854775807_int64", in: -9223372036854775807, want: 1},
+ test_int64{fn: div_int64_Neg9223372036854775807, fnname: "div_int64_Neg9223372036854775807", in: -9223372036854775807, want: 1},
+ test_int64{fn: div_Neg9223372036854775807_int64, fnname: "div_Neg9223372036854775807_int64", in: -4294967296, want: 2147483647},
+ test_int64{fn: div_int64_Neg9223372036854775807, fnname: "div_int64_Neg9223372036854775807", in: -4294967296, want: 0},
+ test_int64{fn: div_Neg9223372036854775807_int64, fnname: "div_Neg9223372036854775807_int64", in: -1, want: 9223372036854775807},
+ test_int64{fn: div_int64_Neg9223372036854775807, fnname: "div_int64_Neg9223372036854775807", in: -1, want: 0},
+ test_int64{fn: div_int64_Neg9223372036854775807, fnname: "div_int64_Neg9223372036854775807", in: 0, want: 0},
+ test_int64{fn: div_Neg9223372036854775807_int64, fnname: "div_Neg9223372036854775807_int64", in: 1, want: -9223372036854775807},
+ test_int64{fn: div_int64_Neg9223372036854775807, fnname: "div_int64_Neg9223372036854775807", in: 1, want: 0},
+ test_int64{fn: div_Neg9223372036854775807_int64, fnname: "div_Neg9223372036854775807_int64", in: 4294967296, want: -2147483647},
+ test_int64{fn: div_int64_Neg9223372036854775807, fnname: "div_int64_Neg9223372036854775807", in: 4294967296, want: 0},
+ test_int64{fn: div_Neg9223372036854775807_int64, fnname: "div_Neg9223372036854775807_int64", in: 9223372036854775806, want: -1},
+ test_int64{fn: div_int64_Neg9223372036854775807, fnname: "div_int64_Neg9223372036854775807", in: 9223372036854775806, want: 0},
+ test_int64{fn: div_Neg9223372036854775807_int64, fnname: "div_Neg9223372036854775807_int64", in: 9223372036854775807, want: -1},
+ test_int64{fn: div_int64_Neg9223372036854775807, fnname: "div_int64_Neg9223372036854775807", in: 9223372036854775807, want: -1},
+ test_int64{fn: div_Neg4294967296_int64, fnname: "div_Neg4294967296_int64", in: -9223372036854775808, want: 0},
+ test_int64{fn: div_int64_Neg4294967296, fnname: "div_int64_Neg4294967296", in: -9223372036854775808, want: 2147483648},
+ test_int64{fn: div_Neg4294967296_int64, fnname: "div_Neg4294967296_int64", in: -9223372036854775807, want: 0},
+ test_int64{fn: div_int64_Neg4294967296, fnname: "div_int64_Neg4294967296", in: -9223372036854775807, want: 2147483647},
+ test_int64{fn: div_Neg4294967296_int64, fnname: "div_Neg4294967296_int64", in: -4294967296, want: 1},
+ test_int64{fn: div_int64_Neg4294967296, fnname: "div_int64_Neg4294967296", in: -4294967296, want: 1},
+ test_int64{fn: div_Neg4294967296_int64, fnname: "div_Neg4294967296_int64", in: -1, want: 4294967296},
+ test_int64{fn: div_int64_Neg4294967296, fnname: "div_int64_Neg4294967296", in: -1, want: 0},
+ test_int64{fn: div_int64_Neg4294967296, fnname: "div_int64_Neg4294967296", in: 0, want: 0},
+ test_int64{fn: div_Neg4294967296_int64, fnname: "div_Neg4294967296_int64", in: 1, want: -4294967296},
+ test_int64{fn: div_int64_Neg4294967296, fnname: "div_int64_Neg4294967296", in: 1, want: 0},
+ test_int64{fn: div_Neg4294967296_int64, fnname: "div_Neg4294967296_int64", in: 4294967296, want: -1},
+ test_int64{fn: div_int64_Neg4294967296, fnname: "div_int64_Neg4294967296", in: 4294967296, want: -1},
+ test_int64{fn: div_Neg4294967296_int64, fnname: "div_Neg4294967296_int64", in: 9223372036854775806, want: 0},
+ test_int64{fn: div_int64_Neg4294967296, fnname: "div_int64_Neg4294967296", in: 9223372036854775806, want: -2147483647},
+ test_int64{fn: div_Neg4294967296_int64, fnname: "div_Neg4294967296_int64", in: 9223372036854775807, want: 0},
+ test_int64{fn: div_int64_Neg4294967296, fnname: "div_int64_Neg4294967296", in: 9223372036854775807, want: -2147483647},
+ test_int64{fn: div_Neg1_int64, fnname: "div_Neg1_int64", in: -9223372036854775808, want: 0},
+ test_int64{fn: div_int64_Neg1, fnname: "div_int64_Neg1", in: -9223372036854775808, want: -9223372036854775808},
+ test_int64{fn: div_Neg1_int64, fnname: "div_Neg1_int64", in: -9223372036854775807, want: 0},
+ test_int64{fn: div_int64_Neg1, fnname: "div_int64_Neg1", in: -9223372036854775807, want: 9223372036854775807},
+ test_int64{fn: div_Neg1_int64, fnname: "div_Neg1_int64", in: -4294967296, want: 0},
+ test_int64{fn: div_int64_Neg1, fnname: "div_int64_Neg1", in: -4294967296, want: 4294967296},
+ test_int64{fn: div_Neg1_int64, fnname: "div_Neg1_int64", in: -1, want: 1},
+ test_int64{fn: div_int64_Neg1, fnname: "div_int64_Neg1", in: -1, want: 1},
+ test_int64{fn: div_int64_Neg1, fnname: "div_int64_Neg1", in: 0, want: 0},
+ test_int64{fn: div_Neg1_int64, fnname: "div_Neg1_int64", in: 1, want: -1},
+ test_int64{fn: div_int64_Neg1, fnname: "div_int64_Neg1", in: 1, want: -1},
+ test_int64{fn: div_Neg1_int64, fnname: "div_Neg1_int64", in: 4294967296, want: 0},
+ test_int64{fn: div_int64_Neg1, fnname: "div_int64_Neg1", in: 4294967296, want: -4294967296},
+ test_int64{fn: div_Neg1_int64, fnname: "div_Neg1_int64", in: 9223372036854775806, want: 0},
+ test_int64{fn: div_int64_Neg1, fnname: "div_int64_Neg1", in: 9223372036854775806, want: -9223372036854775806},
+ test_int64{fn: div_Neg1_int64, fnname: "div_Neg1_int64", in: 9223372036854775807, want: 0},
+ test_int64{fn: div_int64_Neg1, fnname: "div_int64_Neg1", in: 9223372036854775807, want: -9223372036854775807},
+ test_int64{fn: div_0_int64, fnname: "div_0_int64", in: -9223372036854775808, want: 0},
+ test_int64{fn: div_0_int64, fnname: "div_0_int64", in: -9223372036854775807, want: 0},
+ test_int64{fn: div_0_int64, fnname: "div_0_int64", in: -4294967296, want: 0},
+ test_int64{fn: div_0_int64, fnname: "div_0_int64", in: -1, want: 0},
+ test_int64{fn: div_0_int64, fnname: "div_0_int64", in: 1, want: 0},
+ test_int64{fn: div_0_int64, fnname: "div_0_int64", in: 4294967296, want: 0},
+ test_int64{fn: div_0_int64, fnname: "div_0_int64", in: 9223372036854775806, want: 0},
+ test_int64{fn: div_0_int64, fnname: "div_0_int64", in: 9223372036854775807, want: 0},
+ test_int64{fn: div_1_int64, fnname: "div_1_int64", in: -9223372036854775808, want: 0},
+ test_int64{fn: div_int64_1, fnname: "div_int64_1", in: -9223372036854775808, want: -9223372036854775808},
+ test_int64{fn: div_1_int64, fnname: "div_1_int64", in: -9223372036854775807, want: 0},
+ test_int64{fn: div_int64_1, fnname: "div_int64_1", in: -9223372036854775807, want: -9223372036854775807},
+ test_int64{fn: div_1_int64, fnname: "div_1_int64", in: -4294967296, want: 0},
+ test_int64{fn: div_int64_1, fnname: "div_int64_1", in: -4294967296, want: -4294967296},
+ test_int64{fn: div_1_int64, fnname: "div_1_int64", in: -1, want: -1},
+ test_int64{fn: div_int64_1, fnname: "div_int64_1", in: -1, want: -1},
+ test_int64{fn: div_int64_1, fnname: "div_int64_1", in: 0, want: 0},
+ test_int64{fn: div_1_int64, fnname: "div_1_int64", in: 1, want: 1},
+ test_int64{fn: div_int64_1, fnname: "div_int64_1", in: 1, want: 1},
+ test_int64{fn: div_1_int64, fnname: "div_1_int64", in: 4294967296, want: 0},
+ test_int64{fn: div_int64_1, fnname: "div_int64_1", in: 4294967296, want: 4294967296},
+ test_int64{fn: div_1_int64, fnname: "div_1_int64", in: 9223372036854775806, want: 0},
+ test_int64{fn: div_int64_1, fnname: "div_int64_1", in: 9223372036854775806, want: 9223372036854775806},
+ test_int64{fn: div_1_int64, fnname: "div_1_int64", in: 9223372036854775807, want: 0},
+ test_int64{fn: div_int64_1, fnname: "div_int64_1", in: 9223372036854775807, want: 9223372036854775807},
+ test_int64{fn: div_4294967296_int64, fnname: "div_4294967296_int64", in: -9223372036854775808, want: 0},
+ test_int64{fn: div_int64_4294967296, fnname: "div_int64_4294967296", in: -9223372036854775808, want: -2147483648},
+ test_int64{fn: div_4294967296_int64, fnname: "div_4294967296_int64", in: -9223372036854775807, want: 0},
+ test_int64{fn: div_int64_4294967296, fnname: "div_int64_4294967296", in: -9223372036854775807, want: -2147483647},
+ test_int64{fn: div_4294967296_int64, fnname: "div_4294967296_int64", in: -4294967296, want: -1},
+ test_int64{fn: div_int64_4294967296, fnname: "div_int64_4294967296", in: -4294967296, want: -1},
+ test_int64{fn: div_4294967296_int64, fnname: "div_4294967296_int64", in: -1, want: -4294967296},
+ test_int64{fn: div_int64_4294967296, fnname: "div_int64_4294967296", in: -1, want: 0},
+ test_int64{fn: div_int64_4294967296, fnname: "div_int64_4294967296", in: 0, want: 0},
+ test_int64{fn: div_4294967296_int64, fnname: "div_4294967296_int64", in: 1, want: 4294967296},
+ test_int64{fn: div_int64_4294967296, fnname: "div_int64_4294967296", in: 1, want: 0},
+ test_int64{fn: div_4294967296_int64, fnname: "div_4294967296_int64", in: 4294967296, want: 1},
+ test_int64{fn: div_int64_4294967296, fnname: "div_int64_4294967296", in: 4294967296, want: 1},
+ test_int64{fn: div_4294967296_int64, fnname: "div_4294967296_int64", in: 9223372036854775806, want: 0},
+ test_int64{fn: div_int64_4294967296, fnname: "div_int64_4294967296", in: 9223372036854775806, want: 2147483647},
+ test_int64{fn: div_4294967296_int64, fnname: "div_4294967296_int64", in: 9223372036854775807, want: 0},
+ test_int64{fn: div_int64_4294967296, fnname: "div_int64_4294967296", in: 9223372036854775807, want: 2147483647},
+ test_int64{fn: div_9223372036854775806_int64, fnname: "div_9223372036854775806_int64", in: -9223372036854775808, want: 0},
+ test_int64{fn: div_int64_9223372036854775806, fnname: "div_int64_9223372036854775806", in: -9223372036854775808, want: -1},
+ test_int64{fn: div_9223372036854775806_int64, fnname: "div_9223372036854775806_int64", in: -9223372036854775807, want: 0},
+ test_int64{fn: div_int64_9223372036854775806, fnname: "div_int64_9223372036854775806", in: -9223372036854775807, want: -1},
+ test_int64{fn: div_9223372036854775806_int64, fnname: "div_9223372036854775806_int64", in: -4294967296, want: -2147483647},
+ test_int64{fn: div_int64_9223372036854775806, fnname: "div_int64_9223372036854775806", in: -4294967296, want: 0},
+ test_int64{fn: div_9223372036854775806_int64, fnname: "div_9223372036854775806_int64", in: -1, want: -9223372036854775806},
+ test_int64{fn: div_int64_9223372036854775806, fnname: "div_int64_9223372036854775806", in: -1, want: 0},
+ test_int64{fn: div_int64_9223372036854775806, fnname: "div_int64_9223372036854775806", in: 0, want: 0},
+ test_int64{fn: div_9223372036854775806_int64, fnname: "div_9223372036854775806_int64", in: 1, want: 9223372036854775806},
+ test_int64{fn: div_int64_9223372036854775806, fnname: "div_int64_9223372036854775806", in: 1, want: 0},
+ test_int64{fn: div_9223372036854775806_int64, fnname: "div_9223372036854775806_int64", in: 4294967296, want: 2147483647},
+ test_int64{fn: div_int64_9223372036854775806, fnname: "div_int64_9223372036854775806", in: 4294967296, want: 0},
+ test_int64{fn: div_9223372036854775806_int64, fnname: "div_9223372036854775806_int64", in: 9223372036854775806, want: 1},
+ test_int64{fn: div_int64_9223372036854775806, fnname: "div_int64_9223372036854775806", in: 9223372036854775806, want: 1},
+ test_int64{fn: div_9223372036854775806_int64, fnname: "div_9223372036854775806_int64", in: 9223372036854775807, want: 0},
+ test_int64{fn: div_int64_9223372036854775806, fnname: "div_int64_9223372036854775806", in: 9223372036854775807, want: 1},
+ test_int64{fn: div_9223372036854775807_int64, fnname: "div_9223372036854775807_int64", in: -9223372036854775808, want: 0},
+ test_int64{fn: div_int64_9223372036854775807, fnname: "div_int64_9223372036854775807", in: -9223372036854775808, want: -1},
+ test_int64{fn: div_9223372036854775807_int64, fnname: "div_9223372036854775807_int64", in: -9223372036854775807, want: -1},
+ test_int64{fn: div_int64_9223372036854775807, fnname: "div_int64_9223372036854775807", in: -9223372036854775807, want: -1},
+ test_int64{fn: div_9223372036854775807_int64, fnname: "div_9223372036854775807_int64", in: -4294967296, want: -2147483647},
+ test_int64{fn: div_int64_9223372036854775807, fnname: "div_int64_9223372036854775807", in: -4294967296, want: 0},
+ test_int64{fn: div_9223372036854775807_int64, fnname: "div_9223372036854775807_int64", in: -1, want: -9223372036854775807},
+ test_int64{fn: div_int64_9223372036854775807, fnname: "div_int64_9223372036854775807", in: -1, want: 0},
+ test_int64{fn: div_int64_9223372036854775807, fnname: "div_int64_9223372036854775807", in: 0, want: 0},
+ test_int64{fn: div_9223372036854775807_int64, fnname: "div_9223372036854775807_int64", in: 1, want: 9223372036854775807},
+ test_int64{fn: div_int64_9223372036854775807, fnname: "div_int64_9223372036854775807", in: 1, want: 0},
+ test_int64{fn: div_9223372036854775807_int64, fnname: "div_9223372036854775807_int64", in: 4294967296, want: 2147483647},
+ test_int64{fn: div_int64_9223372036854775807, fnname: "div_int64_9223372036854775807", in: 4294967296, want: 0},
+ test_int64{fn: div_9223372036854775807_int64, fnname: "div_9223372036854775807_int64", in: 9223372036854775806, want: 1},
+ test_int64{fn: div_int64_9223372036854775807, fnname: "div_int64_9223372036854775807", in: 9223372036854775806, want: 0},
+ test_int64{fn: div_9223372036854775807_int64, fnname: "div_9223372036854775807_int64", in: 9223372036854775807, want: 1},
+ test_int64{fn: div_int64_9223372036854775807, fnname: "div_int64_9223372036854775807", in: 9223372036854775807, want: 1},
+ test_int64{fn: mul_Neg9223372036854775808_int64, fnname: "mul_Neg9223372036854775808_int64", in: -9223372036854775808, want: 0},
+ test_int64{fn: mul_int64_Neg9223372036854775808, fnname: "mul_int64_Neg9223372036854775808", in: -9223372036854775808, want: 0},
+ test_int64{fn: mul_Neg9223372036854775808_int64, fnname: "mul_Neg9223372036854775808_int64", in: -9223372036854775807, want: -9223372036854775808},
+ test_int64{fn: mul_int64_Neg9223372036854775808, fnname: "mul_int64_Neg9223372036854775808", in: -9223372036854775807, want: -9223372036854775808},
+ test_int64{fn: mul_Neg9223372036854775808_int64, fnname: "mul_Neg9223372036854775808_int64", in: -4294967296, want: 0},
+ test_int64{fn: mul_int64_Neg9223372036854775808, fnname: "mul_int64_Neg9223372036854775808", in: -4294967296, want: 0},
+ test_int64{fn: mul_Neg9223372036854775808_int64, fnname: "mul_Neg9223372036854775808_int64", in: -1, want: -9223372036854775808},
+ test_int64{fn: mul_int64_Neg9223372036854775808, fnname: "mul_int64_Neg9223372036854775808", in: -1, want: -9223372036854775808},
+ test_int64{fn: mul_Neg9223372036854775808_int64, fnname: "mul_Neg9223372036854775808_int64", in: 0, want: 0},
+ test_int64{fn: mul_int64_Neg9223372036854775808, fnname: "mul_int64_Neg9223372036854775808", in: 0, want: 0},
+ test_int64{fn: mul_Neg9223372036854775808_int64, fnname: "mul_Neg9223372036854775808_int64", in: 1, want: -9223372036854775808},
+ test_int64{fn: mul_int64_Neg9223372036854775808, fnname: "mul_int64_Neg9223372036854775808", in: 1, want: -9223372036854775808},
+ test_int64{fn: mul_Neg9223372036854775808_int64, fnname: "mul_Neg9223372036854775808_int64", in: 4294967296, want: 0},
+ test_int64{fn: mul_int64_Neg9223372036854775808, fnname: "mul_int64_Neg9223372036854775808", in: 4294967296, want: 0},
+ test_int64{fn: mul_Neg9223372036854775808_int64, fnname: "mul_Neg9223372036854775808_int64", in: 9223372036854775806, want: 0},
+ test_int64{fn: mul_int64_Neg9223372036854775808, fnname: "mul_int64_Neg9223372036854775808", in: 9223372036854775806, want: 0},
+ test_int64{fn: mul_Neg9223372036854775808_int64, fnname: "mul_Neg9223372036854775808_int64", in: 9223372036854775807, want: -9223372036854775808},
+ test_int64{fn: mul_int64_Neg9223372036854775808, fnname: "mul_int64_Neg9223372036854775808", in: 9223372036854775807, want: -9223372036854775808},
+ test_int64{fn: mul_Neg9223372036854775807_int64, fnname: "mul_Neg9223372036854775807_int64", in: -9223372036854775808, want: -9223372036854775808},
+ test_int64{fn: mul_int64_Neg9223372036854775807, fnname: "mul_int64_Neg9223372036854775807", in: -9223372036854775808, want: -9223372036854775808},
+ test_int64{fn: mul_Neg9223372036854775807_int64, fnname: "mul_Neg9223372036854775807_int64", in: -9223372036854775807, want: 1},
+ test_int64{fn: mul_int64_Neg9223372036854775807, fnname: "mul_int64_Neg9223372036854775807", in: -9223372036854775807, want: 1},
+ test_int64{fn: mul_Neg9223372036854775807_int64, fnname: "mul_Neg9223372036854775807_int64", in: -4294967296, want: -4294967296},
+ test_int64{fn: mul_int64_Neg9223372036854775807, fnname: "mul_int64_Neg9223372036854775807", in: -4294967296, want: -4294967296},
+ test_int64{fn: mul_Neg9223372036854775807_int64, fnname: "mul_Neg9223372036854775807_int64", in: -1, want: 9223372036854775807},
+ test_int64{fn: mul_int64_Neg9223372036854775807, fnname: "mul_int64_Neg9223372036854775807", in: -1, want: 9223372036854775807},
+ test_int64{fn: mul_Neg9223372036854775807_int64, fnname: "mul_Neg9223372036854775807_int64", in: 0, want: 0},
+ test_int64{fn: mul_int64_Neg9223372036854775807, fnname: "mul_int64_Neg9223372036854775807", in: 0, want: 0},
+ test_int64{fn: mul_Neg9223372036854775807_int64, fnname: "mul_Neg9223372036854775807_int64", in: 1, want: -9223372036854775807},
+ test_int64{fn: mul_int64_Neg9223372036854775807, fnname: "mul_int64_Neg9223372036854775807", in: 1, want: -9223372036854775807},
+ test_int64{fn: mul_Neg9223372036854775807_int64, fnname: "mul_Neg9223372036854775807_int64", in: 4294967296, want: 4294967296},
+ test_int64{fn: mul_int64_Neg9223372036854775807, fnname: "mul_int64_Neg9223372036854775807", in: 4294967296, want: 4294967296},
+ test_int64{fn: mul_Neg9223372036854775807_int64, fnname: "mul_Neg9223372036854775807_int64", in: 9223372036854775806, want: 9223372036854775806},
+ test_int64{fn: mul_int64_Neg9223372036854775807, fnname: "mul_int64_Neg9223372036854775807", in: 9223372036854775806, want: 9223372036854775806},
+ test_int64{fn: mul_Neg9223372036854775807_int64, fnname: "mul_Neg9223372036854775807_int64", in: 9223372036854775807, want: -1},
+ test_int64{fn: mul_int64_Neg9223372036854775807, fnname: "mul_int64_Neg9223372036854775807", in: 9223372036854775807, want: -1},
+ test_int64{fn: mul_Neg4294967296_int64, fnname: "mul_Neg4294967296_int64", in: -9223372036854775808, want: 0},
+ test_int64{fn: mul_int64_Neg4294967296, fnname: "mul_int64_Neg4294967296", in: -9223372036854775808, want: 0},
+ test_int64{fn: mul_Neg4294967296_int64, fnname: "mul_Neg4294967296_int64", in: -9223372036854775807, want: -4294967296},
+ test_int64{fn: mul_int64_Neg4294967296, fnname: "mul_int64_Neg4294967296", in: -9223372036854775807, want: -4294967296},
+ test_int64{fn: mul_Neg4294967296_int64, fnname: "mul_Neg4294967296_int64", in: -4294967296, want: 0},
+ test_int64{fn: mul_int64_Neg4294967296, fnname: "mul_int64_Neg4294967296", in: -4294967296, want: 0},
+ test_int64{fn: mul_Neg4294967296_int64, fnname: "mul_Neg4294967296_int64", in: -1, want: 4294967296},
+ test_int64{fn: mul_int64_Neg4294967296, fnname: "mul_int64_Neg4294967296", in: -1, want: 4294967296},
+ test_int64{fn: mul_Neg4294967296_int64, fnname: "mul_Neg4294967296_int64", in: 0, want: 0},
+ test_int64{fn: mul_int64_Neg4294967296, fnname: "mul_int64_Neg4294967296", in: 0, want: 0},
+ test_int64{fn: mul_Neg4294967296_int64, fnname: "mul_Neg4294967296_int64", in: 1, want: -4294967296},
+ test_int64{fn: mul_int64_Neg4294967296, fnname: "mul_int64_Neg4294967296", in: 1, want: -4294967296},
+ test_int64{fn: mul_Neg4294967296_int64, fnname: "mul_Neg4294967296_int64", in: 4294967296, want: 0},
+ test_int64{fn: mul_int64_Neg4294967296, fnname: "mul_int64_Neg4294967296", in: 4294967296, want: 0},
+ test_int64{fn: mul_Neg4294967296_int64, fnname: "mul_Neg4294967296_int64", in: 9223372036854775806, want: 8589934592},
+ test_int64{fn: mul_int64_Neg4294967296, fnname: "mul_int64_Neg4294967296", in: 9223372036854775806, want: 8589934592},
+ test_int64{fn: mul_Neg4294967296_int64, fnname: "mul_Neg4294967296_int64", in: 9223372036854775807, want: 4294967296},
+ test_int64{fn: mul_int64_Neg4294967296, fnname: "mul_int64_Neg4294967296", in: 9223372036854775807, want: 4294967296},
+ test_int64{fn: mul_Neg1_int64, fnname: "mul_Neg1_int64", in: -9223372036854775808, want: -9223372036854775808},
+ test_int64{fn: mul_int64_Neg1, fnname: "mul_int64_Neg1", in: -9223372036854775808, want: -9223372036854775808},
+ test_int64{fn: mul_Neg1_int64, fnname: "mul_Neg1_int64", in: -9223372036854775807, want: 9223372036854775807},
+ test_int64{fn: mul_int64_Neg1, fnname: "mul_int64_Neg1", in: -9223372036854775807, want: 9223372036854775807},
+ test_int64{fn: mul_Neg1_int64, fnname: "mul_Neg1_int64", in: -4294967296, want: 4294967296},
+ test_int64{fn: mul_int64_Neg1, fnname: "mul_int64_Neg1", in: -4294967296, want: 4294967296},
+ test_int64{fn: mul_Neg1_int64, fnname: "mul_Neg1_int64", in: -1, want: 1},
+ test_int64{fn: mul_int64_Neg1, fnname: "mul_int64_Neg1", in: -1, want: 1},
+ test_int64{fn: mul_Neg1_int64, fnname: "mul_Neg1_int64", in: 0, want: 0},
+ test_int64{fn: mul_int64_Neg1, fnname: "mul_int64_Neg1", in: 0, want: 0},
+ test_int64{fn: mul_Neg1_int64, fnname: "mul_Neg1_int64", in: 1, want: -1},
+ test_int64{fn: mul_int64_Neg1, fnname: "mul_int64_Neg1", in: 1, want: -1},
+ test_int64{fn: mul_Neg1_int64, fnname: "mul_Neg1_int64", in: 4294967296, want: -4294967296},
+ test_int64{fn: mul_int64_Neg1, fnname: "mul_int64_Neg1", in: 4294967296, want: -4294967296},
+ test_int64{fn: mul_Neg1_int64, fnname: "mul_Neg1_int64", in: 9223372036854775806, want: -9223372036854775806},
+ test_int64{fn: mul_int64_Neg1, fnname: "mul_int64_Neg1", in: 9223372036854775806, want: -9223372036854775806},
+ test_int64{fn: mul_Neg1_int64, fnname: "mul_Neg1_int64", in: 9223372036854775807, want: -9223372036854775807},
+ test_int64{fn: mul_int64_Neg1, fnname: "mul_int64_Neg1", in: 9223372036854775807, want: -9223372036854775807},
+ test_int64{fn: mul_0_int64, fnname: "mul_0_int64", in: -9223372036854775808, want: 0},
+ test_int64{fn: mul_int64_0, fnname: "mul_int64_0", in: -9223372036854775808, want: 0},
+ test_int64{fn: mul_0_int64, fnname: "mul_0_int64", in: -9223372036854775807, want: 0},
+ test_int64{fn: mul_int64_0, fnname: "mul_int64_0", in: -9223372036854775807, want: 0},
+ test_int64{fn: mul_0_int64, fnname: "mul_0_int64", in: -4294967296, want: 0},
+ test_int64{fn: mul_int64_0, fnname: "mul_int64_0", in: -4294967296, want: 0},
+ test_int64{fn: mul_0_int64, fnname: "mul_0_int64", in: -1, want: 0},
+ test_int64{fn: mul_int64_0, fnname: "mul_int64_0", in: -1, want: 0},
+ test_int64{fn: mul_0_int64, fnname: "mul_0_int64", in: 0, want: 0},
+ test_int64{fn: mul_int64_0, fnname: "mul_int64_0", in: 0, want: 0},
+ test_int64{fn: mul_0_int64, fnname: "mul_0_int64", in: 1, want: 0},
+ test_int64{fn: mul_int64_0, fnname: "mul_int64_0", in: 1, want: 0},
+ test_int64{fn: mul_0_int64, fnname: "mul_0_int64", in: 4294967296, want: 0},
+ test_int64{fn: mul_int64_0, fnname: "mul_int64_0", in: 4294967296, want: 0},
+ test_int64{fn: mul_0_int64, fnname: "mul_0_int64", in: 9223372036854775806, want: 0},
+ test_int64{fn: mul_int64_0, fnname: "mul_int64_0", in: 9223372036854775806, want: 0},
+ test_int64{fn: mul_0_int64, fnname: "mul_0_int64", in: 9223372036854775807, want: 0},
+ test_int64{fn: mul_int64_0, fnname: "mul_int64_0", in: 9223372036854775807, want: 0},
+ test_int64{fn: mul_1_int64, fnname: "mul_1_int64", in: -9223372036854775808, want: -9223372036854775808},
+ test_int64{fn: mul_int64_1, fnname: "mul_int64_1", in: -9223372036854775808, want: -9223372036854775808},
+ test_int64{fn: mul_1_int64, fnname: "mul_1_int64", in: -9223372036854775807, want: -9223372036854775807},
+ test_int64{fn: mul_int64_1, fnname: "mul_int64_1", in: -9223372036854775807, want: -9223372036854775807},
+ test_int64{fn: mul_1_int64, fnname: "mul_1_int64", in: -4294967296, want: -4294967296},
+ test_int64{fn: mul_int64_1, fnname: "mul_int64_1", in: -4294967296, want: -4294967296},
+ test_int64{fn: mul_1_int64, fnname: "mul_1_int64", in: -1, want: -1},
+ test_int64{fn: mul_int64_1, fnname: "mul_int64_1", in: -1, want: -1},
+ test_int64{fn: mul_1_int64, fnname: "mul_1_int64", in: 0, want: 0},
+ test_int64{fn: mul_int64_1, fnname: "mul_int64_1", in: 0, want: 0},
+ test_int64{fn: mul_1_int64, fnname: "mul_1_int64", in: 1, want: 1},
+ test_int64{fn: mul_int64_1, fnname: "mul_int64_1", in: 1, want: 1},
+ test_int64{fn: mul_1_int64, fnname: "mul_1_int64", in: 4294967296, want: 4294967296},
+ test_int64{fn: mul_int64_1, fnname: "mul_int64_1", in: 4294967296, want: 4294967296},
+ test_int64{fn: mul_1_int64, fnname: "mul_1_int64", in: 9223372036854775806, want: 9223372036854775806},
+ test_int64{fn: mul_int64_1, fnname: "mul_int64_1", in: 9223372036854775806, want: 9223372036854775806},
+ test_int64{fn: mul_1_int64, fnname: "mul_1_int64", in: 9223372036854775807, want: 9223372036854775807},
+ test_int64{fn: mul_int64_1, fnname: "mul_int64_1", in: 9223372036854775807, want: 9223372036854775807},
+ test_int64{fn: mul_4294967296_int64, fnname: "mul_4294967296_int64", in: -9223372036854775808, want: 0},
+ test_int64{fn: mul_int64_4294967296, fnname: "mul_int64_4294967296", in: -9223372036854775808, want: 0},
+ test_int64{fn: mul_4294967296_int64, fnname: "mul_4294967296_int64", in: -9223372036854775807, want: 4294967296},
+ test_int64{fn: mul_int64_4294967296, fnname: "mul_int64_4294967296", in: -9223372036854775807, want: 4294967296},
+ test_int64{fn: mul_4294967296_int64, fnname: "mul_4294967296_int64", in: -4294967296, want: 0},
+ test_int64{fn: mul_int64_4294967296, fnname: "mul_int64_4294967296", in: -4294967296, want: 0},
+ test_int64{fn: mul_4294967296_int64, fnname: "mul_4294967296_int64", in: -1, want: -4294967296},
+ test_int64{fn: mul_int64_4294967296, fnname: "mul_int64_4294967296", in: -1, want: -4294967296},
+ test_int64{fn: mul_4294967296_int64, fnname: "mul_4294967296_int64", in: 0, want: 0},
+ test_int64{fn: mul_int64_4294967296, fnname: "mul_int64_4294967296", in: 0, want: 0},
+ test_int64{fn: mul_4294967296_int64, fnname: "mul_4294967296_int64", in: 1, want: 4294967296},
+ test_int64{fn: mul_int64_4294967296, fnname: "mul_int64_4294967296", in: 1, want: 4294967296},
+ test_int64{fn: mul_4294967296_int64, fnname: "mul_4294967296_int64", in: 4294967296, want: 0},
+ test_int64{fn: mul_int64_4294967296, fnname: "mul_int64_4294967296", in: 4294967296, want: 0},
+ test_int64{fn: mul_4294967296_int64, fnname: "mul_4294967296_int64", in: 9223372036854775806, want: -8589934592},
+ test_int64{fn: mul_int64_4294967296, fnname: "mul_int64_4294967296", in: 9223372036854775806, want: -8589934592},
+ test_int64{fn: mul_4294967296_int64, fnname: "mul_4294967296_int64", in: 9223372036854775807, want: -4294967296},
+ test_int64{fn: mul_int64_4294967296, fnname: "mul_int64_4294967296", in: 9223372036854775807, want: -4294967296},
+ test_int64{fn: mul_9223372036854775806_int64, fnname: "mul_9223372036854775806_int64", in: -9223372036854775808, want: 0},
+ test_int64{fn: mul_int64_9223372036854775806, fnname: "mul_int64_9223372036854775806", in: -9223372036854775808, want: 0},
+ test_int64{fn: mul_9223372036854775806_int64, fnname: "mul_9223372036854775806_int64", in: -9223372036854775807, want: 9223372036854775806},
+ test_int64{fn: mul_int64_9223372036854775806, fnname: "mul_int64_9223372036854775806", in: -9223372036854775807, want: 9223372036854775806},
+ test_int64{fn: mul_9223372036854775806_int64, fnname: "mul_9223372036854775806_int64", in: -4294967296, want: 8589934592},
+ test_int64{fn: mul_int64_9223372036854775806, fnname: "mul_int64_9223372036854775806", in: -4294967296, want: 8589934592},
+ test_int64{fn: mul_9223372036854775806_int64, fnname: "mul_9223372036854775806_int64", in: -1, want: -9223372036854775806},
+ test_int64{fn: mul_int64_9223372036854775806, fnname: "mul_int64_9223372036854775806", in: -1, want: -9223372036854775806},
+ test_int64{fn: mul_9223372036854775806_int64, fnname: "mul_9223372036854775806_int64", in: 0, want: 0},
+ test_int64{fn: mul_int64_9223372036854775806, fnname: "mul_int64_9223372036854775806", in: 0, want: 0},
+ test_int64{fn: mul_9223372036854775806_int64, fnname: "mul_9223372036854775806_int64", in: 1, want: 9223372036854775806},
+ test_int64{fn: mul_int64_9223372036854775806, fnname: "mul_int64_9223372036854775806", in: 1, want: 9223372036854775806},
+ test_int64{fn: mul_9223372036854775806_int64, fnname: "mul_9223372036854775806_int64", in: 4294967296, want: -8589934592},
+ test_int64{fn: mul_int64_9223372036854775806, fnname: "mul_int64_9223372036854775806", in: 4294967296, want: -8589934592},
+ test_int64{fn: mul_9223372036854775806_int64, fnname: "mul_9223372036854775806_int64", in: 9223372036854775806, want: 4},
+ test_int64{fn: mul_int64_9223372036854775806, fnname: "mul_int64_9223372036854775806", in: 9223372036854775806, want: 4},
+ test_int64{fn: mul_9223372036854775806_int64, fnname: "mul_9223372036854775806_int64", in: 9223372036854775807, want: -9223372036854775806},
+ test_int64{fn: mul_int64_9223372036854775806, fnname: "mul_int64_9223372036854775806", in: 9223372036854775807, want: -9223372036854775806},
+ test_int64{fn: mul_9223372036854775807_int64, fnname: "mul_9223372036854775807_int64", in: -9223372036854775808, want: -9223372036854775808},
+ test_int64{fn: mul_int64_9223372036854775807, fnname: "mul_int64_9223372036854775807", in: -9223372036854775808, want: -9223372036854775808},
+ test_int64{fn: mul_9223372036854775807_int64, fnname: "mul_9223372036854775807_int64", in: -9223372036854775807, want: -1},
+ test_int64{fn: mul_int64_9223372036854775807, fnname: "mul_int64_9223372036854775807", in: -9223372036854775807, want: -1},
+ test_int64{fn: mul_9223372036854775807_int64, fnname: "mul_9223372036854775807_int64", in: -4294967296, want: 4294967296},
+ test_int64{fn: mul_int64_9223372036854775807, fnname: "mul_int64_9223372036854775807", in: -4294967296, want: 4294967296},
+ test_int64{fn: mul_9223372036854775807_int64, fnname: "mul_9223372036854775807_int64", in: -1, want: -9223372036854775807},
+ test_int64{fn: mul_int64_9223372036854775807, fnname: "mul_int64_9223372036854775807", in: -1, want: -9223372036854775807},
+ test_int64{fn: mul_9223372036854775807_int64, fnname: "mul_9223372036854775807_int64", in: 0, want: 0},
+ test_int64{fn: mul_int64_9223372036854775807, fnname: "mul_int64_9223372036854775807", in: 0, want: 0},
+ test_int64{fn: mul_9223372036854775807_int64, fnname: "mul_9223372036854775807_int64", in: 1, want: 9223372036854775807},
+ test_int64{fn: mul_int64_9223372036854775807, fnname: "mul_int64_9223372036854775807", in: 1, want: 9223372036854775807},
+ test_int64{fn: mul_9223372036854775807_int64, fnname: "mul_9223372036854775807_int64", in: 4294967296, want: -4294967296},
+ test_int64{fn: mul_int64_9223372036854775807, fnname: "mul_int64_9223372036854775807", in: 4294967296, want: -4294967296},
+ test_int64{fn: mul_9223372036854775807_int64, fnname: "mul_9223372036854775807_int64", in: 9223372036854775806, want: -9223372036854775806},
+ test_int64{fn: mul_int64_9223372036854775807, fnname: "mul_int64_9223372036854775807", in: 9223372036854775806, want: -9223372036854775806},
+ test_int64{fn: mul_9223372036854775807_int64, fnname: "mul_9223372036854775807_int64", in: 9223372036854775807, want: 1},
+ test_int64{fn: mul_int64_9223372036854775807, fnname: "mul_int64_9223372036854775807", in: 9223372036854775807, want: 1},
+ test_int64{fn: mod_Neg9223372036854775808_int64, fnname: "mod_Neg9223372036854775808_int64", in: -9223372036854775808, want: 0},
+ test_int64{fn: mod_int64_Neg9223372036854775808, fnname: "mod_int64_Neg9223372036854775808", in: -9223372036854775808, want: 0},
+ test_int64{fn: mod_Neg9223372036854775808_int64, fnname: "mod_Neg9223372036854775808_int64", in: -9223372036854775807, want: -1},
+ test_int64{fn: mod_int64_Neg9223372036854775808, fnname: "mod_int64_Neg9223372036854775808", in: -9223372036854775807, want: -9223372036854775807},
+ test_int64{fn: mod_Neg9223372036854775808_int64, fnname: "mod_Neg9223372036854775808_int64", in: -4294967296, want: 0},
+ test_int64{fn: mod_int64_Neg9223372036854775808, fnname: "mod_int64_Neg9223372036854775808", in: -4294967296, want: -4294967296},
+ test_int64{fn: mod_Neg9223372036854775808_int64, fnname: "mod_Neg9223372036854775808_int64", in: -1, want: 0},
+ test_int64{fn: mod_int64_Neg9223372036854775808, fnname: "mod_int64_Neg9223372036854775808", in: -1, want: -1},
+ test_int64{fn: mod_int64_Neg9223372036854775808, fnname: "mod_int64_Neg9223372036854775808", in: 0, want: 0},
+ test_int64{fn: mod_Neg9223372036854775808_int64, fnname: "mod_Neg9223372036854775808_int64", in: 1, want: 0},
+ test_int64{fn: mod_int64_Neg9223372036854775808, fnname: "mod_int64_Neg9223372036854775808", in: 1, want: 1},
+ test_int64{fn: mod_Neg9223372036854775808_int64, fnname: "mod_Neg9223372036854775808_int64", in: 4294967296, want: 0},
+ test_int64{fn: mod_int64_Neg9223372036854775808, fnname: "mod_int64_Neg9223372036854775808", in: 4294967296, want: 4294967296},
+ test_int64{fn: mod_Neg9223372036854775808_int64, fnname: "mod_Neg9223372036854775808_int64", in: 9223372036854775806, want: -2},
+ test_int64{fn: mod_int64_Neg9223372036854775808, fnname: "mod_int64_Neg9223372036854775808", in: 9223372036854775806, want: 9223372036854775806},
+ test_int64{fn: mod_Neg9223372036854775808_int64, fnname: "mod_Neg9223372036854775808_int64", in: 9223372036854775807, want: -1},
+ test_int64{fn: mod_int64_Neg9223372036854775808, fnname: "mod_int64_Neg9223372036854775808", in: 9223372036854775807, want: 9223372036854775807},
+ test_int64{fn: mod_Neg9223372036854775807_int64, fnname: "mod_Neg9223372036854775807_int64", in: -9223372036854775808, want: -9223372036854775807},
+ test_int64{fn: mod_int64_Neg9223372036854775807, fnname: "mod_int64_Neg9223372036854775807", in: -9223372036854775808, want: -1},
+ test_int64{fn: mod_Neg9223372036854775807_int64, fnname: "mod_Neg9223372036854775807_int64", in: -9223372036854775807, want: 0},
+ test_int64{fn: mod_int64_Neg9223372036854775807, fnname: "mod_int64_Neg9223372036854775807", in: -9223372036854775807, want: 0},
+ test_int64{fn: mod_Neg9223372036854775807_int64, fnname: "mod_Neg9223372036854775807_int64", in: -4294967296, want: -4294967295},
+ test_int64{fn: mod_int64_Neg9223372036854775807, fnname: "mod_int64_Neg9223372036854775807", in: -4294967296, want: -4294967296},
+ test_int64{fn: mod_Neg9223372036854775807_int64, fnname: "mod_Neg9223372036854775807_int64", in: -1, want: 0},
+ test_int64{fn: mod_int64_Neg9223372036854775807, fnname: "mod_int64_Neg9223372036854775807", in: -1, want: -1},
+ test_int64{fn: mod_int64_Neg9223372036854775807, fnname: "mod_int64_Neg9223372036854775807", in: 0, want: 0},
+ test_int64{fn: mod_Neg9223372036854775807_int64, fnname: "mod_Neg9223372036854775807_int64", in: 1, want: 0},
+ test_int64{fn: mod_int64_Neg9223372036854775807, fnname: "mod_int64_Neg9223372036854775807", in: 1, want: 1},
+ test_int64{fn: mod_Neg9223372036854775807_int64, fnname: "mod_Neg9223372036854775807_int64", in: 4294967296, want: -4294967295},
+ test_int64{fn: mod_int64_Neg9223372036854775807, fnname: "mod_int64_Neg9223372036854775807", in: 4294967296, want: 4294967296},
+ test_int64{fn: mod_Neg9223372036854775807_int64, fnname: "mod_Neg9223372036854775807_int64", in: 9223372036854775806, want: -1},
+ test_int64{fn: mod_int64_Neg9223372036854775807, fnname: "mod_int64_Neg9223372036854775807", in: 9223372036854775806, want: 9223372036854775806},
+ test_int64{fn: mod_Neg9223372036854775807_int64, fnname: "mod_Neg9223372036854775807_int64", in: 9223372036854775807, want: 0},
+ test_int64{fn: mod_int64_Neg9223372036854775807, fnname: "mod_int64_Neg9223372036854775807", in: 9223372036854775807, want: 0},
+ test_int64{fn: mod_Neg4294967296_int64, fnname: "mod_Neg4294967296_int64", in: -9223372036854775808, want: -4294967296},
+ test_int64{fn: mod_int64_Neg4294967296, fnname: "mod_int64_Neg4294967296", in: -9223372036854775808, want: 0},
+ test_int64{fn: mod_Neg4294967296_int64, fnname: "mod_Neg4294967296_int64", in: -9223372036854775807, want: -4294967296},
+ test_int64{fn: mod_int64_Neg4294967296, fnname: "mod_int64_Neg4294967296", in: -9223372036854775807, want: -4294967295},
+ test_int64{fn: mod_Neg4294967296_int64, fnname: "mod_Neg4294967296_int64", in: -4294967296, want: 0},
+ test_int64{fn: mod_int64_Neg4294967296, fnname: "mod_int64_Neg4294967296", in: -4294967296, want: 0},
+ test_int64{fn: mod_Neg4294967296_int64, fnname: "mod_Neg4294967296_int64", in: -1, want: 0},
+ test_int64{fn: mod_int64_Neg4294967296, fnname: "mod_int64_Neg4294967296", in: -1, want: -1},
+ test_int64{fn: mod_int64_Neg4294967296, fnname: "mod_int64_Neg4294967296", in: 0, want: 0},
+ test_int64{fn: mod_Neg4294967296_int64, fnname: "mod_Neg4294967296_int64", in: 1, want: 0},
+ test_int64{fn: mod_int64_Neg4294967296, fnname: "mod_int64_Neg4294967296", in: 1, want: 1},
+ test_int64{fn: mod_Neg4294967296_int64, fnname: "mod_Neg4294967296_int64", in: 4294967296, want: 0},
+ test_int64{fn: mod_int64_Neg4294967296, fnname: "mod_int64_Neg4294967296", in: 4294967296, want: 0},
+ test_int64{fn: mod_Neg4294967296_int64, fnname: "mod_Neg4294967296_int64", in: 9223372036854775806, want: -4294967296},
+ test_int64{fn: mod_int64_Neg4294967296, fnname: "mod_int64_Neg4294967296", in: 9223372036854775806, want: 4294967294},
+ test_int64{fn: mod_Neg4294967296_int64, fnname: "mod_Neg4294967296_int64", in: 9223372036854775807, want: -4294967296},
+ test_int64{fn: mod_int64_Neg4294967296, fnname: "mod_int64_Neg4294967296", in: 9223372036854775807, want: 4294967295},
+ test_int64{fn: mod_Neg1_int64, fnname: "mod_Neg1_int64", in: -9223372036854775808, want: -1},
+ test_int64{fn: mod_int64_Neg1, fnname: "mod_int64_Neg1", in: -9223372036854775808, want: 0},
+ test_int64{fn: mod_Neg1_int64, fnname: "mod_Neg1_int64", in: -9223372036854775807, want: -1},
+ test_int64{fn: mod_int64_Neg1, fnname: "mod_int64_Neg1", in: -9223372036854775807, want: 0},
+ test_int64{fn: mod_Neg1_int64, fnname: "mod_Neg1_int64", in: -4294967296, want: -1},
+ test_int64{fn: mod_int64_Neg1, fnname: "mod_int64_Neg1", in: -4294967296, want: 0},
+ test_int64{fn: mod_Neg1_int64, fnname: "mod_Neg1_int64", in: -1, want: 0},
+ test_int64{fn: mod_int64_Neg1, fnname: "mod_int64_Neg1", in: -1, want: 0},
+ test_int64{fn: mod_int64_Neg1, fnname: "mod_int64_Neg1", in: 0, want: 0},
+ test_int64{fn: mod_Neg1_int64, fnname: "mod_Neg1_int64", in: 1, want: 0},
+ test_int64{fn: mod_int64_Neg1, fnname: "mod_int64_Neg1", in: 1, want: 0},
+ test_int64{fn: mod_Neg1_int64, fnname: "mod_Neg1_int64", in: 4294967296, want: -1},
+ test_int64{fn: mod_int64_Neg1, fnname: "mod_int64_Neg1", in: 4294967296, want: 0},
+ test_int64{fn: mod_Neg1_int64, fnname: "mod_Neg1_int64", in: 9223372036854775806, want: -1},
+ test_int64{fn: mod_int64_Neg1, fnname: "mod_int64_Neg1", in: 9223372036854775806, want: 0},
+ test_int64{fn: mod_Neg1_int64, fnname: "mod_Neg1_int64", in: 9223372036854775807, want: -1},
+ test_int64{fn: mod_int64_Neg1, fnname: "mod_int64_Neg1", in: 9223372036854775807, want: 0},
+ test_int64{fn: mod_0_int64, fnname: "mod_0_int64", in: -9223372036854775808, want: 0},
+ test_int64{fn: mod_0_int64, fnname: "mod_0_int64", in: -9223372036854775807, want: 0},
+ test_int64{fn: mod_0_int64, fnname: "mod_0_int64", in: -4294967296, want: 0},
+ test_int64{fn: mod_0_int64, fnname: "mod_0_int64", in: -1, want: 0},
+ test_int64{fn: mod_0_int64, fnname: "mod_0_int64", in: 1, want: 0},
+ test_int64{fn: mod_0_int64, fnname: "mod_0_int64", in: 4294967296, want: 0},
+ test_int64{fn: mod_0_int64, fnname: "mod_0_int64", in: 9223372036854775806, want: 0},
+ test_int64{fn: mod_0_int64, fnname: "mod_0_int64", in: 9223372036854775807, want: 0},
+ test_int64{fn: mod_1_int64, fnname: "mod_1_int64", in: -9223372036854775808, want: 1},
+ test_int64{fn: mod_int64_1, fnname: "mod_int64_1", in: -9223372036854775808, want: 0},
+ test_int64{fn: mod_1_int64, fnname: "mod_1_int64", in: -9223372036854775807, want: 1},
+ test_int64{fn: mod_int64_1, fnname: "mod_int64_1", in: -9223372036854775807, want: 0},
+ test_int64{fn: mod_1_int64, fnname: "mod_1_int64", in: -4294967296, want: 1},
+ test_int64{fn: mod_int64_1, fnname: "mod_int64_1", in: -4294967296, want: 0},
+ test_int64{fn: mod_1_int64, fnname: "mod_1_int64", in: -1, want: 0},
+ test_int64{fn: mod_int64_1, fnname: "mod_int64_1", in: -1, want: 0},
+ test_int64{fn: mod_int64_1, fnname: "mod_int64_1", in: 0, want: 0},
+ test_int64{fn: mod_1_int64, fnname: "mod_1_int64", in: 1, want: 0},
+ test_int64{fn: mod_int64_1, fnname: "mod_int64_1", in: 1, want: 0},
+ test_int64{fn: mod_1_int64, fnname: "mod_1_int64", in: 4294967296, want: 1},
+ test_int64{fn: mod_int64_1, fnname: "mod_int64_1", in: 4294967296, want: 0},
+ test_int64{fn: mod_1_int64, fnname: "mod_1_int64", in: 9223372036854775806, want: 1},
+ test_int64{fn: mod_int64_1, fnname: "mod_int64_1", in: 9223372036854775806, want: 0},
+ test_int64{fn: mod_1_int64, fnname: "mod_1_int64", in: 9223372036854775807, want: 1},
+ test_int64{fn: mod_int64_1, fnname: "mod_int64_1", in: 9223372036854775807, want: 0},
+ test_int64{fn: mod_4294967296_int64, fnname: "mod_4294967296_int64", in: -9223372036854775808, want: 4294967296},
+ test_int64{fn: mod_int64_4294967296, fnname: "mod_int64_4294967296", in: -9223372036854775808, want: 0},
+ test_int64{fn: mod_4294967296_int64, fnname: "mod_4294967296_int64", in: -9223372036854775807, want: 4294967296},
+ test_int64{fn: mod_int64_4294967296, fnname: "mod_int64_4294967296", in: -9223372036854775807, want: -4294967295},
+ test_int64{fn: mod_4294967296_int64, fnname: "mod_4294967296_int64", in: -4294967296, want: 0},
+ test_int64{fn: mod_int64_4294967296, fnname: "mod_int64_4294967296", in: -4294967296, want: 0},
+ test_int64{fn: mod_4294967296_int64, fnname: "mod_4294967296_int64", in: -1, want: 0},
+ test_int64{fn: mod_int64_4294967296, fnname: "mod_int64_4294967296", in: -1, want: -1},
+ test_int64{fn: mod_int64_4294967296, fnname: "mod_int64_4294967296", in: 0, want: 0},
+ test_int64{fn: mod_4294967296_int64, fnname: "mod_4294967296_int64", in: 1, want: 0},
+ test_int64{fn: mod_int64_4294967296, fnname: "mod_int64_4294967296", in: 1, want: 1},
+ test_int64{fn: mod_4294967296_int64, fnname: "mod_4294967296_int64", in: 4294967296, want: 0},
+ test_int64{fn: mod_int64_4294967296, fnname: "mod_int64_4294967296", in: 4294967296, want: 0},
+ test_int64{fn: mod_4294967296_int64, fnname: "mod_4294967296_int64", in: 9223372036854775806, want: 4294967296},
+ test_int64{fn: mod_int64_4294967296, fnname: "mod_int64_4294967296", in: 9223372036854775806, want: 4294967294},
+ test_int64{fn: mod_4294967296_int64, fnname: "mod_4294967296_int64", in: 9223372036854775807, want: 4294967296},
+ test_int64{fn: mod_int64_4294967296, fnname: "mod_int64_4294967296", in: 9223372036854775807, want: 4294967295},
+ test_int64{fn: mod_9223372036854775806_int64, fnname: "mod_9223372036854775806_int64", in: -9223372036854775808, want: 9223372036854775806},
+ test_int64{fn: mod_int64_9223372036854775806, fnname: "mod_int64_9223372036854775806", in: -9223372036854775808, want: -2},
+ test_int64{fn: mod_9223372036854775806_int64, fnname: "mod_9223372036854775806_int64", in: -9223372036854775807, want: 9223372036854775806},
+ test_int64{fn: mod_int64_9223372036854775806, fnname: "mod_int64_9223372036854775806", in: -9223372036854775807, want: -1},
+ test_int64{fn: mod_9223372036854775806_int64, fnname: "mod_9223372036854775806_int64", in: -4294967296, want: 4294967294},
+ test_int64{fn: mod_int64_9223372036854775806, fnname: "mod_int64_9223372036854775806", in: -4294967296, want: -4294967296},
+ test_int64{fn: mod_9223372036854775806_int64, fnname: "mod_9223372036854775806_int64", in: -1, want: 0},
+ test_int64{fn: mod_int64_9223372036854775806, fnname: "mod_int64_9223372036854775806", in: -1, want: -1},
+ test_int64{fn: mod_int64_9223372036854775806, fnname: "mod_int64_9223372036854775806", in: 0, want: 0},
+ test_int64{fn: mod_9223372036854775806_int64, fnname: "mod_9223372036854775806_int64", in: 1, want: 0},
+ test_int64{fn: mod_int64_9223372036854775806, fnname: "mod_int64_9223372036854775806", in: 1, want: 1},
+ test_int64{fn: mod_9223372036854775806_int64, fnname: "mod_9223372036854775806_int64", in: 4294967296, want: 4294967294},
+ test_int64{fn: mod_int64_9223372036854775806, fnname: "mod_int64_9223372036854775806", in: 4294967296, want: 4294967296},
+ test_int64{fn: mod_9223372036854775806_int64, fnname: "mod_9223372036854775806_int64", in: 9223372036854775806, want: 0},
+ test_int64{fn: mod_int64_9223372036854775806, fnname: "mod_int64_9223372036854775806", in: 9223372036854775806, want: 0},
+ test_int64{fn: mod_9223372036854775806_int64, fnname: "mod_9223372036854775806_int64", in: 9223372036854775807, want: 9223372036854775806},
+ test_int64{fn: mod_int64_9223372036854775806, fnname: "mod_int64_9223372036854775806", in: 9223372036854775807, want: 1},
+ test_int64{fn: mod_9223372036854775807_int64, fnname: "mod_9223372036854775807_int64", in: -9223372036854775808, want: 9223372036854775807},
+ test_int64{fn: mod_int64_9223372036854775807, fnname: "mod_int64_9223372036854775807", in: -9223372036854775808, want: -1},
+ test_int64{fn: mod_9223372036854775807_int64, fnname: "mod_9223372036854775807_int64", in: -9223372036854775807, want: 0},
+ test_int64{fn: mod_int64_9223372036854775807, fnname: "mod_int64_9223372036854775807", in: -9223372036854775807, want: 0},
+ test_int64{fn: mod_9223372036854775807_int64, fnname: "mod_9223372036854775807_int64", in: -4294967296, want: 4294967295},
+ test_int64{fn: mod_int64_9223372036854775807, fnname: "mod_int64_9223372036854775807", in: -4294967296, want: -4294967296},
+ test_int64{fn: mod_9223372036854775807_int64, fnname: "mod_9223372036854775807_int64", in: -1, want: 0},
+ test_int64{fn: mod_int64_9223372036854775807, fnname: "mod_int64_9223372036854775807", in: -1, want: -1},
+ test_int64{fn: mod_int64_9223372036854775807, fnname: "mod_int64_9223372036854775807", in: 0, want: 0},
+ test_int64{fn: mod_9223372036854775807_int64, fnname: "mod_9223372036854775807_int64", in: 1, want: 0},
+ test_int64{fn: mod_int64_9223372036854775807, fnname: "mod_int64_9223372036854775807", in: 1, want: 1},
+ test_int64{fn: mod_9223372036854775807_int64, fnname: "mod_9223372036854775807_int64", in: 4294967296, want: 4294967295},
+ test_int64{fn: mod_int64_9223372036854775807, fnname: "mod_int64_9223372036854775807", in: 4294967296, want: 4294967296},
+ test_int64{fn: mod_9223372036854775807_int64, fnname: "mod_9223372036854775807_int64", in: 9223372036854775806, want: 1},
+ test_int64{fn: mod_int64_9223372036854775807, fnname: "mod_int64_9223372036854775807", in: 9223372036854775806, want: 9223372036854775806},
+ test_int64{fn: mod_9223372036854775807_int64, fnname: "mod_9223372036854775807_int64", in: 9223372036854775807, want: 0},
+ test_int64{fn: mod_int64_9223372036854775807, fnname: "mod_int64_9223372036854775807", in: 9223372036854775807, want: 0},
+ test_int64{fn: and_Neg9223372036854775808_int64, fnname: "and_Neg9223372036854775808_int64", in: -9223372036854775808, want: -9223372036854775808},
+ test_int64{fn: and_int64_Neg9223372036854775808, fnname: "and_int64_Neg9223372036854775808", in: -9223372036854775808, want: -9223372036854775808},
+ test_int64{fn: and_Neg9223372036854775808_int64, fnname: "and_Neg9223372036854775808_int64", in: -9223372036854775807, want: -9223372036854775808},
+ test_int64{fn: and_int64_Neg9223372036854775808, fnname: "and_int64_Neg9223372036854775808", in: -9223372036854775807, want: -9223372036854775808},
+ test_int64{fn: and_Neg9223372036854775808_int64, fnname: "and_Neg9223372036854775808_int64", in: -4294967296, want: -9223372036854775808},
+ test_int64{fn: and_int64_Neg9223372036854775808, fnname: "and_int64_Neg9223372036854775808", in: -4294967296, want: -9223372036854775808},
+ test_int64{fn: and_Neg9223372036854775808_int64, fnname: "and_Neg9223372036854775808_int64", in: -1, want: -9223372036854775808},
+ test_int64{fn: and_int64_Neg9223372036854775808, fnname: "and_int64_Neg9223372036854775808", in: -1, want: -9223372036854775808},
+ test_int64{fn: and_Neg9223372036854775808_int64, fnname: "and_Neg9223372036854775808_int64", in: 0, want: 0},
+ test_int64{fn: and_int64_Neg9223372036854775808, fnname: "and_int64_Neg9223372036854775808", in: 0, want: 0},
+ test_int64{fn: and_Neg9223372036854775808_int64, fnname: "and_Neg9223372036854775808_int64", in: 1, want: 0},
+ test_int64{fn: and_int64_Neg9223372036854775808, fnname: "and_int64_Neg9223372036854775808", in: 1, want: 0},
+ test_int64{fn: and_Neg9223372036854775808_int64, fnname: "and_Neg9223372036854775808_int64", in: 4294967296, want: 0},
+ test_int64{fn: and_int64_Neg9223372036854775808, fnname: "and_int64_Neg9223372036854775808", in: 4294967296, want: 0},
+ test_int64{fn: and_Neg9223372036854775808_int64, fnname: "and_Neg9223372036854775808_int64", in: 9223372036854775806, want: 0},
+ test_int64{fn: and_int64_Neg9223372036854775808, fnname: "and_int64_Neg9223372036854775808", in: 9223372036854775806, want: 0},
+ test_int64{fn: and_Neg9223372036854775808_int64, fnname: "and_Neg9223372036854775808_int64", in: 9223372036854775807, want: 0},
+ test_int64{fn: and_int64_Neg9223372036854775808, fnname: "and_int64_Neg9223372036854775808", in: 9223372036854775807, want: 0},
+ test_int64{fn: and_Neg9223372036854775807_int64, fnname: "and_Neg9223372036854775807_int64", in: -9223372036854775808, want: -9223372036854775808},
+ test_int64{fn: and_int64_Neg9223372036854775807, fnname: "and_int64_Neg9223372036854775807", in: -9223372036854775808, want: -9223372036854775808},
+ test_int64{fn: and_Neg9223372036854775807_int64, fnname: "and_Neg9223372036854775807_int64", in: -9223372036854775807, want: -9223372036854775807},
+ test_int64{fn: and_int64_Neg9223372036854775807, fnname: "and_int64_Neg9223372036854775807", in: -9223372036854775807, want: -9223372036854775807},
+ test_int64{fn: and_Neg9223372036854775807_int64, fnname: "and_Neg9223372036854775807_int64", in: -4294967296, want: -9223372036854775808},
+ test_int64{fn: and_int64_Neg9223372036854775807, fnname: "and_int64_Neg9223372036854775807", in: -4294967296, want: -9223372036854775808},
+ test_int64{fn: and_Neg9223372036854775807_int64, fnname: "and_Neg9223372036854775807_int64", in: -1, want: -9223372036854775807},
+ test_int64{fn: and_int64_Neg9223372036854775807, fnname: "and_int64_Neg9223372036854775807", in: -1, want: -9223372036854775807},
+ test_int64{fn: and_Neg9223372036854775807_int64, fnname: "and_Neg9223372036854775807_int64", in: 0, want: 0},
+ test_int64{fn: and_int64_Neg9223372036854775807, fnname: "and_int64_Neg9223372036854775807", in: 0, want: 0},
+ test_int64{fn: and_Neg9223372036854775807_int64, fnname: "and_Neg9223372036854775807_int64", in: 1, want: 1},
+ test_int64{fn: and_int64_Neg9223372036854775807, fnname: "and_int64_Neg9223372036854775807", in: 1, want: 1},
+ test_int64{fn: and_Neg9223372036854775807_int64, fnname: "and_Neg9223372036854775807_int64", in: 4294967296, want: 0},
+ test_int64{fn: and_int64_Neg9223372036854775807, fnname: "and_int64_Neg9223372036854775807", in: 4294967296, want: 0},
+ test_int64{fn: and_Neg9223372036854775807_int64, fnname: "and_Neg9223372036854775807_int64", in: 9223372036854775806, want: 0},
+ test_int64{fn: and_int64_Neg9223372036854775807, fnname: "and_int64_Neg9223372036854775807", in: 9223372036854775806, want: 0},
+ test_int64{fn: and_Neg9223372036854775807_int64, fnname: "and_Neg9223372036854775807_int64", in: 9223372036854775807, want: 1},
+ test_int64{fn: and_int64_Neg9223372036854775807, fnname: "and_int64_Neg9223372036854775807", in: 9223372036854775807, want: 1},
+ test_int64{fn: and_Neg4294967296_int64, fnname: "and_Neg4294967296_int64", in: -9223372036854775808, want: -9223372036854775808},
+ test_int64{fn: and_int64_Neg4294967296, fnname: "and_int64_Neg4294967296", in: -9223372036854775808, want: -9223372036854775808},
+ test_int64{fn: and_Neg4294967296_int64, fnname: "and_Neg4294967296_int64", in: -9223372036854775807, want: -9223372036854775808},
+ test_int64{fn: and_int64_Neg4294967296, fnname: "and_int64_Neg4294967296", in: -9223372036854775807, want: -9223372036854775808},
+ test_int64{fn: and_Neg4294967296_int64, fnname: "and_Neg4294967296_int64", in: -4294967296, want: -4294967296},
+ test_int64{fn: and_int64_Neg4294967296, fnname: "and_int64_Neg4294967296", in: -4294967296, want: -4294967296},
+ test_int64{fn: and_Neg4294967296_int64, fnname: "and_Neg4294967296_int64", in: -1, want: -4294967296},
+ test_int64{fn: and_int64_Neg4294967296, fnname: "and_int64_Neg4294967296", in: -1, want: -4294967296},
+ test_int64{fn: and_Neg4294967296_int64, fnname: "and_Neg4294967296_int64", in: 0, want: 0},
+ test_int64{fn: and_int64_Neg4294967296, fnname: "and_int64_Neg4294967296", in: 0, want: 0},
+ test_int64{fn: and_Neg4294967296_int64, fnname: "and_Neg4294967296_int64", in: 1, want: 0},
+ test_int64{fn: and_int64_Neg4294967296, fnname: "and_int64_Neg4294967296", in: 1, want: 0},
+ test_int64{fn: and_Neg4294967296_int64, fnname: "and_Neg4294967296_int64", in: 4294967296, want: 4294967296},
+ test_int64{fn: and_int64_Neg4294967296, fnname: "and_int64_Neg4294967296", in: 4294967296, want: 4294967296},
+ test_int64{fn: and_Neg4294967296_int64, fnname: "and_Neg4294967296_int64", in: 9223372036854775806, want: 9223372032559808512},
+ test_int64{fn: and_int64_Neg4294967296, fnname: "and_int64_Neg4294967296", in: 9223372036854775806, want: 9223372032559808512},
+ test_int64{fn: and_Neg4294967296_int64, fnname: "and_Neg4294967296_int64", in: 9223372036854775807, want: 9223372032559808512},
+ test_int64{fn: and_int64_Neg4294967296, fnname: "and_int64_Neg4294967296", in: 9223372036854775807, want: 9223372032559808512},
+ test_int64{fn: and_Neg1_int64, fnname: "and_Neg1_int64", in: -9223372036854775808, want: -9223372036854775808},
+ test_int64{fn: and_int64_Neg1, fnname: "and_int64_Neg1", in: -9223372036854775808, want: -9223372036854775808},
+ test_int64{fn: and_Neg1_int64, fnname: "and_Neg1_int64", in: -9223372036854775807, want: -9223372036854775807},
+ test_int64{fn: and_int64_Neg1, fnname: "and_int64_Neg1", in: -9223372036854775807, want: -9223372036854775807},
+ test_int64{fn: and_Neg1_int64, fnname: "and_Neg1_int64", in: -4294967296, want: -4294967296},
+ test_int64{fn: and_int64_Neg1, fnname: "and_int64_Neg1", in: -4294967296, want: -4294967296},
+ test_int64{fn: and_Neg1_int64, fnname: "and_Neg1_int64", in: -1, want: -1},
+ test_int64{fn: and_int64_Neg1, fnname: "and_int64_Neg1", in: -1, want: -1},
+ test_int64{fn: and_Neg1_int64, fnname: "and_Neg1_int64", in: 0, want: 0},
+ test_int64{fn: and_int64_Neg1, fnname: "and_int64_Neg1", in: 0, want: 0},
+ test_int64{fn: and_Neg1_int64, fnname: "and_Neg1_int64", in: 1, want: 1},
+ test_int64{fn: and_int64_Neg1, fnname: "and_int64_Neg1", in: 1, want: 1},
+ test_int64{fn: and_Neg1_int64, fnname: "and_Neg1_int64", in: 4294967296, want: 4294967296},
+ test_int64{fn: and_int64_Neg1, fnname: "and_int64_Neg1", in: 4294967296, want: 4294967296},
+ test_int64{fn: and_Neg1_int64, fnname: "and_Neg1_int64", in: 9223372036854775806, want: 9223372036854775806},
+ test_int64{fn: and_int64_Neg1, fnname: "and_int64_Neg1", in: 9223372036854775806, want: 9223372036854775806},
+ test_int64{fn: and_Neg1_int64, fnname: "and_Neg1_int64", in: 9223372036854775807, want: 9223372036854775807},
+ test_int64{fn: and_int64_Neg1, fnname: "and_int64_Neg1", in: 9223372036854775807, want: 9223372036854775807},
+ test_int64{fn: and_0_int64, fnname: "and_0_int64", in: -9223372036854775808, want: 0},
+ test_int64{fn: and_int64_0, fnname: "and_int64_0", in: -9223372036854775808, want: 0},
+ test_int64{fn: and_0_int64, fnname: "and_0_int64", in: -9223372036854775807, want: 0},
+ test_int64{fn: and_int64_0, fnname: "and_int64_0", in: -9223372036854775807, want: 0},
+ test_int64{fn: and_0_int64, fnname: "and_0_int64", in: -4294967296, want: 0},
+ test_int64{fn: and_int64_0, fnname: "and_int64_0", in: -4294967296, want: 0},
+ test_int64{fn: and_0_int64, fnname: "and_0_int64", in: -1, want: 0},
+ test_int64{fn: and_int64_0, fnname: "and_int64_0", in: -1, want: 0},
+ test_int64{fn: and_0_int64, fnname: "and_0_int64", in: 0, want: 0},
+ test_int64{fn: and_int64_0, fnname: "and_int64_0", in: 0, want: 0},
+ test_int64{fn: and_0_int64, fnname: "and_0_int64", in: 1, want: 0},
+ test_int64{fn: and_int64_0, fnname: "and_int64_0", in: 1, want: 0},
+ test_int64{fn: and_0_int64, fnname: "and_0_int64", in: 4294967296, want: 0},
+ test_int64{fn: and_int64_0, fnname: "and_int64_0", in: 4294967296, want: 0},
+ test_int64{fn: and_0_int64, fnname: "and_0_int64", in: 9223372036854775806, want: 0},
+ test_int64{fn: and_int64_0, fnname: "and_int64_0", in: 9223372036854775806, want: 0},
+ test_int64{fn: and_0_int64, fnname: "and_0_int64", in: 9223372036854775807, want: 0},
+ test_int64{fn: and_int64_0, fnname: "and_int64_0", in: 9223372036854775807, want: 0},
+ test_int64{fn: and_1_int64, fnname: "and_1_int64", in: -9223372036854775808, want: 0},
+ test_int64{fn: and_int64_1, fnname: "and_int64_1", in: -9223372036854775808, want: 0},
+ test_int64{fn: and_1_int64, fnname: "and_1_int64", in: -9223372036854775807, want: 1},
+ test_int64{fn: and_int64_1, fnname: "and_int64_1", in: -9223372036854775807, want: 1},
+ test_int64{fn: and_1_int64, fnname: "and_1_int64", in: -4294967296, want: 0},
+ test_int64{fn: and_int64_1, fnname: "and_int64_1", in: -4294967296, want: 0},
+ test_int64{fn: and_1_int64, fnname: "and_1_int64", in: -1, want: 1},
+ test_int64{fn: and_int64_1, fnname: "and_int64_1", in: -1, want: 1},
+ test_int64{fn: and_1_int64, fnname: "and_1_int64", in: 0, want: 0},
+ test_int64{fn: and_int64_1, fnname: "and_int64_1", in: 0, want: 0},
+ test_int64{fn: and_1_int64, fnname: "and_1_int64", in: 1, want: 1},
+ test_int64{fn: and_int64_1, fnname: "and_int64_1", in: 1, want: 1},
+ test_int64{fn: and_1_int64, fnname: "and_1_int64", in: 4294967296, want: 0},
+ test_int64{fn: and_int64_1, fnname: "and_int64_1", in: 4294967296, want: 0},
+ test_int64{fn: and_1_int64, fnname: "and_1_int64", in: 9223372036854775806, want: 0},
+ test_int64{fn: and_int64_1, fnname: "and_int64_1", in: 9223372036854775806, want: 0},
+ test_int64{fn: and_1_int64, fnname: "and_1_int64", in: 9223372036854775807, want: 1},
+ test_int64{fn: and_int64_1, fnname: "and_int64_1", in: 9223372036854775807, want: 1},
+ test_int64{fn: and_4294967296_int64, fnname: "and_4294967296_int64", in: -9223372036854775808, want: 0},
+ test_int64{fn: and_int64_4294967296, fnname: "and_int64_4294967296", in: -9223372036854775808, want: 0},
+ test_int64{fn: and_4294967296_int64, fnname: "and_4294967296_int64", in: -9223372036854775807, want: 0},
+ test_int64{fn: and_int64_4294967296, fnname: "and_int64_4294967296", in: -9223372036854775807, want: 0},
+ test_int64{fn: and_4294967296_int64, fnname: "and_4294967296_int64", in: -4294967296, want: 4294967296},
+ test_int64{fn: and_int64_4294967296, fnname: "and_int64_4294967296", in: -4294967296, want: 4294967296},
+ test_int64{fn: and_4294967296_int64, fnname: "and_4294967296_int64", in: -1, want: 4294967296},
+ test_int64{fn: and_int64_4294967296, fnname: "and_int64_4294967296", in: -1, want: 4294967296},
+ test_int64{fn: and_4294967296_int64, fnname: "and_4294967296_int64", in: 0, want: 0},
+ test_int64{fn: and_int64_4294967296, fnname: "and_int64_4294967296", in: 0, want: 0},
+ test_int64{fn: and_4294967296_int64, fnname: "and_4294967296_int64", in: 1, want: 0},
+ test_int64{fn: and_int64_4294967296, fnname: "and_int64_4294967296", in: 1, want: 0},
+ test_int64{fn: and_4294967296_int64, fnname: "and_4294967296_int64", in: 4294967296, want: 4294967296},
+ test_int64{fn: and_int64_4294967296, fnname: "and_int64_4294967296", in: 4294967296, want: 4294967296},
+ test_int64{fn: and_4294967296_int64, fnname: "and_4294967296_int64", in: 9223372036854775806, want: 4294967296},
+ test_int64{fn: and_int64_4294967296, fnname: "and_int64_4294967296", in: 9223372036854775806, want: 4294967296},
+ test_int64{fn: and_4294967296_int64, fnname: "and_4294967296_int64", in: 9223372036854775807, want: 4294967296},
+ test_int64{fn: and_int64_4294967296, fnname: "and_int64_4294967296", in: 9223372036854775807, want: 4294967296},
+ test_int64{fn: and_9223372036854775806_int64, fnname: "and_9223372036854775806_int64", in: -9223372036854775808, want: 0},
+ test_int64{fn: and_int64_9223372036854775806, fnname: "and_int64_9223372036854775806", in: -9223372036854775808, want: 0},
+ test_int64{fn: and_9223372036854775806_int64, fnname: "and_9223372036854775806_int64", in: -9223372036854775807, want: 0},
+ test_int64{fn: and_int64_9223372036854775806, fnname: "and_int64_9223372036854775806", in: -9223372036854775807, want: 0},
+ test_int64{fn: and_9223372036854775806_int64, fnname: "and_9223372036854775806_int64", in: -4294967296, want: 9223372032559808512},
+ test_int64{fn: and_int64_9223372036854775806, fnname: "and_int64_9223372036854775806", in: -4294967296, want: 9223372032559808512},
+ test_int64{fn: and_9223372036854775806_int64, fnname: "and_9223372036854775806_int64", in: -1, want: 9223372036854775806},
+ test_int64{fn: and_int64_9223372036854775806, fnname: "and_int64_9223372036854775806", in: -1, want: 9223372036854775806},
+ test_int64{fn: and_9223372036854775806_int64, fnname: "and_9223372036854775806_int64", in: 0, want: 0},
+ test_int64{fn: and_int64_9223372036854775806, fnname: "and_int64_9223372036854775806", in: 0, want: 0},
+ test_int64{fn: and_9223372036854775806_int64, fnname: "and_9223372036854775806_int64", in: 1, want: 0},
+ test_int64{fn: and_int64_9223372036854775806, fnname: "and_int64_9223372036854775806", in: 1, want: 0},
+ test_int64{fn: and_9223372036854775806_int64, fnname: "and_9223372036854775806_int64", in: 4294967296, want: 4294967296},
+ test_int64{fn: and_int64_9223372036854775806, fnname: "and_int64_9223372036854775806", in: 4294967296, want: 4294967296},
+ test_int64{fn: and_9223372036854775806_int64, fnname: "and_9223372036854775806_int64", in: 9223372036854775806, want: 9223372036854775806},
+ test_int64{fn: and_int64_9223372036854775806, fnname: "and_int64_9223372036854775806", in: 9223372036854775806, want: 9223372036854775806},
+ test_int64{fn: and_9223372036854775806_int64, fnname: "and_9223372036854775806_int64", in: 9223372036854775807, want: 9223372036854775806},
+ test_int64{fn: and_int64_9223372036854775806, fnname: "and_int64_9223372036854775806", in: 9223372036854775807, want: 9223372036854775806},
+ test_int64{fn: and_9223372036854775807_int64, fnname: "and_9223372036854775807_int64", in: -9223372036854775808, want: 0},
+ test_int64{fn: and_int64_9223372036854775807, fnname: "and_int64_9223372036854775807", in: -9223372036854775808, want: 0},
+ test_int64{fn: and_9223372036854775807_int64, fnname: "and_9223372036854775807_int64", in: -9223372036854775807, want: 1},
+ test_int64{fn: and_int64_9223372036854775807, fnname: "and_int64_9223372036854775807", in: -9223372036854775807, want: 1},
+ test_int64{fn: and_9223372036854775807_int64, fnname: "and_9223372036854775807_int64", in: -4294967296, want: 9223372032559808512},
+ test_int64{fn: and_int64_9223372036854775807, fnname: "and_int64_9223372036854775807", in: -4294967296, want: 9223372032559808512},
+ test_int64{fn: and_9223372036854775807_int64, fnname: "and_9223372036854775807_int64", in: -1, want: 9223372036854775807},
+ test_int64{fn: and_int64_9223372036854775807, fnname: "and_int64_9223372036854775807", in: -1, want: 9223372036854775807},
+ test_int64{fn: and_9223372036854775807_int64, fnname: "and_9223372036854775807_int64", in: 0, want: 0},
+ test_int64{fn: and_int64_9223372036854775807, fnname: "and_int64_9223372036854775807", in: 0, want: 0},
+ test_int64{fn: and_9223372036854775807_int64, fnname: "and_9223372036854775807_int64", in: 1, want: 1},
+ test_int64{fn: and_int64_9223372036854775807, fnname: "and_int64_9223372036854775807", in: 1, want: 1},
+ test_int64{fn: and_9223372036854775807_int64, fnname: "and_9223372036854775807_int64", in: 4294967296, want: 4294967296},
+ test_int64{fn: and_int64_9223372036854775807, fnname: "and_int64_9223372036854775807", in: 4294967296, want: 4294967296},
+ test_int64{fn: and_9223372036854775807_int64, fnname: "and_9223372036854775807_int64", in: 9223372036854775806, want: 9223372036854775806},
+ test_int64{fn: and_int64_9223372036854775807, fnname: "and_int64_9223372036854775807", in: 9223372036854775806, want: 9223372036854775806},
+ test_int64{fn: and_9223372036854775807_int64, fnname: "and_9223372036854775807_int64", in: 9223372036854775807, want: 9223372036854775807},
+ test_int64{fn: and_int64_9223372036854775807, fnname: "and_int64_9223372036854775807", in: 9223372036854775807, want: 9223372036854775807},
+ test_int64{fn: or_Neg9223372036854775808_int64, fnname: "or_Neg9223372036854775808_int64", in: -9223372036854775808, want: -9223372036854775808},
+ test_int64{fn: or_int64_Neg9223372036854775808, fnname: "or_int64_Neg9223372036854775808", in: -9223372036854775808, want: -9223372036854775808},
+ test_int64{fn: or_Neg9223372036854775808_int64, fnname: "or_Neg9223372036854775808_int64", in: -9223372036854775807, want: -9223372036854775807},
+ test_int64{fn: or_int64_Neg9223372036854775808, fnname: "or_int64_Neg9223372036854775808", in: -9223372036854775807, want: -9223372036854775807},
+ test_int64{fn: or_Neg9223372036854775808_int64, fnname: "or_Neg9223372036854775808_int64", in: -4294967296, want: -4294967296},
+ test_int64{fn: or_int64_Neg9223372036854775808, fnname: "or_int64_Neg9223372036854775808", in: -4294967296, want: -4294967296},
+ test_int64{fn: or_Neg9223372036854775808_int64, fnname: "or_Neg9223372036854775808_int64", in: -1, want: -1},
+ test_int64{fn: or_int64_Neg9223372036854775808, fnname: "or_int64_Neg9223372036854775808", in: -1, want: -1},
+ test_int64{fn: or_Neg9223372036854775808_int64, fnname: "or_Neg9223372036854775808_int64", in: 0, want: -9223372036854775808},
+ test_int64{fn: or_int64_Neg9223372036854775808, fnname: "or_int64_Neg9223372036854775808", in: 0, want: -9223372036854775808},
+ test_int64{fn: or_Neg9223372036854775808_int64, fnname: "or_Neg9223372036854775808_int64", in: 1, want: -9223372036854775807},
+ test_int64{fn: or_int64_Neg9223372036854775808, fnname: "or_int64_Neg9223372036854775808", in: 1, want: -9223372036854775807},
+ test_int64{fn: or_Neg9223372036854775808_int64, fnname: "or_Neg9223372036854775808_int64", in: 4294967296, want: -9223372032559808512},
+ test_int64{fn: or_int64_Neg9223372036854775808, fnname: "or_int64_Neg9223372036854775808", in: 4294967296, want: -9223372032559808512},
+ test_int64{fn: or_Neg9223372036854775808_int64, fnname: "or_Neg9223372036854775808_int64", in: 9223372036854775806, want: -2},
+ test_int64{fn: or_int64_Neg9223372036854775808, fnname: "or_int64_Neg9223372036854775808", in: 9223372036854775806, want: -2},
+ test_int64{fn: or_Neg9223372036854775808_int64, fnname: "or_Neg9223372036854775808_int64", in: 9223372036854775807, want: -1},
+ test_int64{fn: or_int64_Neg9223372036854775808, fnname: "or_int64_Neg9223372036854775808", in: 9223372036854775807, want: -1},
+ test_int64{fn: or_Neg9223372036854775807_int64, fnname: "or_Neg9223372036854775807_int64", in: -9223372036854775808, want: -9223372036854775807},
+ test_int64{fn: or_int64_Neg9223372036854775807, fnname: "or_int64_Neg9223372036854775807", in: -9223372036854775808, want: -9223372036854775807},
+ test_int64{fn: or_Neg9223372036854775807_int64, fnname: "or_Neg9223372036854775807_int64", in: -9223372036854775807, want: -9223372036854775807},
+ test_int64{fn: or_int64_Neg9223372036854775807, fnname: "or_int64_Neg9223372036854775807", in: -9223372036854775807, want: -9223372036854775807},
+ test_int64{fn: or_Neg9223372036854775807_int64, fnname: "or_Neg9223372036854775807_int64", in: -4294967296, want: -4294967295},
+ test_int64{fn: or_int64_Neg9223372036854775807, fnname: "or_int64_Neg9223372036854775807", in: -4294967296, want: -4294967295},
+ test_int64{fn: or_Neg9223372036854775807_int64, fnname: "or_Neg9223372036854775807_int64", in: -1, want: -1},
+ test_int64{fn: or_int64_Neg9223372036854775807, fnname: "or_int64_Neg9223372036854775807", in: -1, want: -1},
+ test_int64{fn: or_Neg9223372036854775807_int64, fnname: "or_Neg9223372036854775807_int64", in: 0, want: -9223372036854775807},
+ test_int64{fn: or_int64_Neg9223372036854775807, fnname: "or_int64_Neg9223372036854775807", in: 0, want: -9223372036854775807},
+ test_int64{fn: or_Neg9223372036854775807_int64, fnname: "or_Neg9223372036854775807_int64", in: 1, want: -9223372036854775807},
+ test_int64{fn: or_int64_Neg9223372036854775807, fnname: "or_int64_Neg9223372036854775807", in: 1, want: -9223372036854775807},
+ test_int64{fn: or_Neg9223372036854775807_int64, fnname: "or_Neg9223372036854775807_int64", in: 4294967296, want: -9223372032559808511},
+ test_int64{fn: or_int64_Neg9223372036854775807, fnname: "or_int64_Neg9223372036854775807", in: 4294967296, want: -9223372032559808511},
+ test_int64{fn: or_Neg9223372036854775807_int64, fnname: "or_Neg9223372036854775807_int64", in: 9223372036854775806, want: -1},
+ test_int64{fn: or_int64_Neg9223372036854775807, fnname: "or_int64_Neg9223372036854775807", in: 9223372036854775806, want: -1},
+ test_int64{fn: or_Neg9223372036854775807_int64, fnname: "or_Neg9223372036854775807_int64", in: 9223372036854775807, want: -1},
+ test_int64{fn: or_int64_Neg9223372036854775807, fnname: "or_int64_Neg9223372036854775807", in: 9223372036854775807, want: -1},
+ test_int64{fn: or_Neg4294967296_int64, fnname: "or_Neg4294967296_int64", in: -9223372036854775808, want: -4294967296},
+ test_int64{fn: or_int64_Neg4294967296, fnname: "or_int64_Neg4294967296", in: -9223372036854775808, want: -4294967296},
+ test_int64{fn: or_Neg4294967296_int64, fnname: "or_Neg4294967296_int64", in: -9223372036854775807, want: -4294967295},
+ test_int64{fn: or_int64_Neg4294967296, fnname: "or_int64_Neg4294967296", in: -9223372036854775807, want: -4294967295},
+ test_int64{fn: or_Neg4294967296_int64, fnname: "or_Neg4294967296_int64", in: -4294967296, want: -4294967296},
+ test_int64{fn: or_int64_Neg4294967296, fnname: "or_int64_Neg4294967296", in: -4294967296, want: -4294967296},
+ test_int64{fn: or_Neg4294967296_int64, fnname: "or_Neg4294967296_int64", in: -1, want: -1},
+ test_int64{fn: or_int64_Neg4294967296, fnname: "or_int64_Neg4294967296", in: -1, want: -1},
+ test_int64{fn: or_Neg4294967296_int64, fnname: "or_Neg4294967296_int64", in: 0, want: -4294967296},
+ test_int64{fn: or_int64_Neg4294967296, fnname: "or_int64_Neg4294967296", in: 0, want: -4294967296},
+ test_int64{fn: or_Neg4294967296_int64, fnname: "or_Neg4294967296_int64", in: 1, want: -4294967295},
+ test_int64{fn: or_int64_Neg4294967296, fnname: "or_int64_Neg4294967296", in: 1, want: -4294967295},
+ test_int64{fn: or_Neg4294967296_int64, fnname: "or_Neg4294967296_int64", in: 4294967296, want: -4294967296},
+ test_int64{fn: or_int64_Neg4294967296, fnname: "or_int64_Neg4294967296", in: 4294967296, want: -4294967296},
+ test_int64{fn: or_Neg4294967296_int64, fnname: "or_Neg4294967296_int64", in: 9223372036854775806, want: -2},
+ test_int64{fn: or_int64_Neg4294967296, fnname: "or_int64_Neg4294967296", in: 9223372036854775806, want: -2},
+ test_int64{fn: or_Neg4294967296_int64, fnname: "or_Neg4294967296_int64", in: 9223372036854775807, want: -1},
+ test_int64{fn: or_int64_Neg4294967296, fnname: "or_int64_Neg4294967296", in: 9223372036854775807, want: -1},
+ test_int64{fn: or_Neg1_int64, fnname: "or_Neg1_int64", in: -9223372036854775808, want: -1},
+ test_int64{fn: or_int64_Neg1, fnname: "or_int64_Neg1", in: -9223372036854775808, want: -1},
+ test_int64{fn: or_Neg1_int64, fnname: "or_Neg1_int64", in: -9223372036854775807, want: -1},
+ test_int64{fn: or_int64_Neg1, fnname: "or_int64_Neg1", in: -9223372036854775807, want: -1},
+ test_int64{fn: or_Neg1_int64, fnname: "or_Neg1_int64", in: -4294967296, want: -1},
+ test_int64{fn: or_int64_Neg1, fnname: "or_int64_Neg1", in: -4294967296, want: -1},
+ test_int64{fn: or_Neg1_int64, fnname: "or_Neg1_int64", in: -1, want: -1},
+ test_int64{fn: or_int64_Neg1, fnname: "or_int64_Neg1", in: -1, want: -1},
+ test_int64{fn: or_Neg1_int64, fnname: "or_Neg1_int64", in: 0, want: -1},
+ test_int64{fn: or_int64_Neg1, fnname: "or_int64_Neg1", in: 0, want: -1},
+ test_int64{fn: or_Neg1_int64, fnname: "or_Neg1_int64", in: 1, want: -1},
+ test_int64{fn: or_int64_Neg1, fnname: "or_int64_Neg1", in: 1, want: -1},
+ test_int64{fn: or_Neg1_int64, fnname: "or_Neg1_int64", in: 4294967296, want: -1},
+ test_int64{fn: or_int64_Neg1, fnname: "or_int64_Neg1", in: 4294967296, want: -1},
+ test_int64{fn: or_Neg1_int64, fnname: "or_Neg1_int64", in: 9223372036854775806, want: -1},
+ test_int64{fn: or_int64_Neg1, fnname: "or_int64_Neg1", in: 9223372036854775806, want: -1},
+ test_int64{fn: or_Neg1_int64, fnname: "or_Neg1_int64", in: 9223372036854775807, want: -1},
+ test_int64{fn: or_int64_Neg1, fnname: "or_int64_Neg1", in: 9223372036854775807, want: -1},
+ test_int64{fn: or_0_int64, fnname: "or_0_int64", in: -9223372036854775808, want: -9223372036854775808},
+ test_int64{fn: or_int64_0, fnname: "or_int64_0", in: -9223372036854775808, want: -9223372036854775808},
+ test_int64{fn: or_0_int64, fnname: "or_0_int64", in: -9223372036854775807, want: -9223372036854775807},
+ test_int64{fn: or_int64_0, fnname: "or_int64_0", in: -9223372036854775807, want: -9223372036854775807},
+ test_int64{fn: or_0_int64, fnname: "or_0_int64", in: -4294967296, want: -4294967296},
+ test_int64{fn: or_int64_0, fnname: "or_int64_0", in: -4294967296, want: -4294967296},
+ test_int64{fn: or_0_int64, fnname: "or_0_int64", in: -1, want: -1},
+ test_int64{fn: or_int64_0, fnname: "or_int64_0", in: -1, want: -1},
+ test_int64{fn: or_0_int64, fnname: "or_0_int64", in: 0, want: 0},
+ test_int64{fn: or_int64_0, fnname: "or_int64_0", in: 0, want: 0},
+ test_int64{fn: or_0_int64, fnname: "or_0_int64", in: 1, want: 1},
+ test_int64{fn: or_int64_0, fnname: "or_int64_0", in: 1, want: 1},
+ test_int64{fn: or_0_int64, fnname: "or_0_int64", in: 4294967296, want: 4294967296},
+ test_int64{fn: or_int64_0, fnname: "or_int64_0", in: 4294967296, want: 4294967296},
+ test_int64{fn: or_0_int64, fnname: "or_0_int64", in: 9223372036854775806, want: 9223372036854775806},
+ test_int64{fn: or_int64_0, fnname: "or_int64_0", in: 9223372036854775806, want: 9223372036854775806},
+ test_int64{fn: or_0_int64, fnname: "or_0_int64", in: 9223372036854775807, want: 9223372036854775807},
+ test_int64{fn: or_int64_0, fnname: "or_int64_0", in: 9223372036854775807, want: 9223372036854775807},
+ test_int64{fn: or_1_int64, fnname: "or_1_int64", in: -9223372036854775808, want: -9223372036854775807},
+ test_int64{fn: or_int64_1, fnname: "or_int64_1", in: -9223372036854775808, want: -9223372036854775807},
+ test_int64{fn: or_1_int64, fnname: "or_1_int64", in: -9223372036854775807, want: -9223372036854775807},
+ test_int64{fn: or_int64_1, fnname: "or_int64_1", in: -9223372036854775807, want: -9223372036854775807},
+ test_int64{fn: or_1_int64, fnname: "or_1_int64", in: -4294967296, want: -4294967295},
+ test_int64{fn: or_int64_1, fnname: "or_int64_1", in: -4294967296, want: -4294967295},
+ test_int64{fn: or_1_int64, fnname: "or_1_int64", in: -1, want: -1},
+ test_int64{fn: or_int64_1, fnname: "or_int64_1", in: -1, want: -1},
+ test_int64{fn: or_1_int64, fnname: "or_1_int64", in: 0, want: 1},
+ test_int64{fn: or_int64_1, fnname: "or_int64_1", in: 0, want: 1},
+ test_int64{fn: or_1_int64, fnname: "or_1_int64", in: 1, want: 1},
+ test_int64{fn: or_int64_1, fnname: "or_int64_1", in: 1, want: 1},
+ test_int64{fn: or_1_int64, fnname: "or_1_int64", in: 4294967296, want: 4294967297},
+ test_int64{fn: or_int64_1, fnname: "or_int64_1", in: 4294967296, want: 4294967297},
+ test_int64{fn: or_1_int64, fnname: "or_1_int64", in: 9223372036854775806, want: 9223372036854775807},
+ test_int64{fn: or_int64_1, fnname: "or_int64_1", in: 9223372036854775806, want: 9223372036854775807},
+ test_int64{fn: or_1_int64, fnname: "or_1_int64", in: 9223372036854775807, want: 9223372036854775807},
+ test_int64{fn: or_int64_1, fnname: "or_int64_1", in: 9223372036854775807, want: 9223372036854775807},
+ test_int64{fn: or_4294967296_int64, fnname: "or_4294967296_int64", in: -9223372036854775808, want: -9223372032559808512},
+ test_int64{fn: or_int64_4294967296, fnname: "or_int64_4294967296", in: -9223372036854775808, want: -9223372032559808512},
+ test_int64{fn: or_4294967296_int64, fnname: "or_4294967296_int64", in: -9223372036854775807, want: -9223372032559808511},
+ test_int64{fn: or_int64_4294967296, fnname: "or_int64_4294967296", in: -9223372036854775807, want: -9223372032559808511},
+ test_int64{fn: or_4294967296_int64, fnname: "or_4294967296_int64", in: -4294967296, want: -4294967296},
+ test_int64{fn: or_int64_4294967296, fnname: "or_int64_4294967296", in: -4294967296, want: -4294967296},
+ test_int64{fn: or_4294967296_int64, fnname: "or_4294967296_int64", in: -1, want: -1},
+ test_int64{fn: or_int64_4294967296, fnname: "or_int64_4294967296", in: -1, want: -1},
+ test_int64{fn: or_4294967296_int64, fnname: "or_4294967296_int64", in: 0, want: 4294967296},
+ test_int64{fn: or_int64_4294967296, fnname: "or_int64_4294967296", in: 0, want: 4294967296},
+ test_int64{fn: or_4294967296_int64, fnname: "or_4294967296_int64", in: 1, want: 4294967297},
+ test_int64{fn: or_int64_4294967296, fnname: "or_int64_4294967296", in: 1, want: 4294967297},
+ test_int64{fn: or_4294967296_int64, fnname: "or_4294967296_int64", in: 4294967296, want: 4294967296},
+ test_int64{fn: or_int64_4294967296, fnname: "or_int64_4294967296", in: 4294967296, want: 4294967296},
+ test_int64{fn: or_4294967296_int64, fnname: "or_4294967296_int64", in: 9223372036854775806, want: 9223372036854775806},
+ test_int64{fn: or_int64_4294967296, fnname: "or_int64_4294967296", in: 9223372036854775806, want: 9223372036854775806},
+ test_int64{fn: or_4294967296_int64, fnname: "or_4294967296_int64", in: 9223372036854775807, want: 9223372036854775807},
+ test_int64{fn: or_int64_4294967296, fnname: "or_int64_4294967296", in: 9223372036854775807, want: 9223372036854775807},
+ test_int64{fn: or_9223372036854775806_int64, fnname: "or_9223372036854775806_int64", in: -9223372036854775808, want: -2},
+ test_int64{fn: or_int64_9223372036854775806, fnname: "or_int64_9223372036854775806", in: -9223372036854775808, want: -2},
+ test_int64{fn: or_9223372036854775806_int64, fnname: "or_9223372036854775806_int64", in: -9223372036854775807, want: -1},
+ test_int64{fn: or_int64_9223372036854775806, fnname: "or_int64_9223372036854775806", in: -9223372036854775807, want: -1},
+ test_int64{fn: or_9223372036854775806_int64, fnname: "or_9223372036854775806_int64", in: -4294967296, want: -2},
+ test_int64{fn: or_int64_9223372036854775806, fnname: "or_int64_9223372036854775806", in: -4294967296, want: -2},
+ test_int64{fn: or_9223372036854775806_int64, fnname: "or_9223372036854775806_int64", in: -1, want: -1},
+ test_int64{fn: or_int64_9223372036854775806, fnname: "or_int64_9223372036854775806", in: -1, want: -1},
+ test_int64{fn: or_9223372036854775806_int64, fnname: "or_9223372036854775806_int64", in: 0, want: 9223372036854775806},
+ test_int64{fn: or_int64_9223372036854775806, fnname: "or_int64_9223372036854775806", in: 0, want: 9223372036854775806},
+ test_int64{fn: or_9223372036854775806_int64, fnname: "or_9223372036854775806_int64", in: 1, want: 9223372036854775807},
+ test_int64{fn: or_int64_9223372036854775806, fnname: "or_int64_9223372036854775806", in: 1, want: 9223372036854775807},
+ test_int64{fn: or_9223372036854775806_int64, fnname: "or_9223372036854775806_int64", in: 4294967296, want: 9223372036854775806},
+ test_int64{fn: or_int64_9223372036854775806, fnname: "or_int64_9223372036854775806", in: 4294967296, want: 9223372036854775806},
+ test_int64{fn: or_9223372036854775806_int64, fnname: "or_9223372036854775806_int64", in: 9223372036854775806, want: 9223372036854775806},
+ test_int64{fn: or_int64_9223372036854775806, fnname: "or_int64_9223372036854775806", in: 9223372036854775806, want: 9223372036854775806},
+ test_int64{fn: or_9223372036854775806_int64, fnname: "or_9223372036854775806_int64", in: 9223372036854775807, want: 9223372036854775807},
+ test_int64{fn: or_int64_9223372036854775806, fnname: "or_int64_9223372036854775806", in: 9223372036854775807, want: 9223372036854775807},
+ test_int64{fn: or_9223372036854775807_int64, fnname: "or_9223372036854775807_int64", in: -9223372036854775808, want: -1},
+ test_int64{fn: or_int64_9223372036854775807, fnname: "or_int64_9223372036854775807", in: -9223372036854775808, want: -1},
+ test_int64{fn: or_9223372036854775807_int64, fnname: "or_9223372036854775807_int64", in: -9223372036854775807, want: -1},
+ test_int64{fn: or_int64_9223372036854775807, fnname: "or_int64_9223372036854775807", in: -9223372036854775807, want: -1},
+ test_int64{fn: or_9223372036854775807_int64, fnname: "or_9223372036854775807_int64", in: -4294967296, want: -1},
+ test_int64{fn: or_int64_9223372036854775807, fnname: "or_int64_9223372036854775807", in: -4294967296, want: -1},
+ test_int64{fn: or_9223372036854775807_int64, fnname: "or_9223372036854775807_int64", in: -1, want: -1},
+ test_int64{fn: or_int64_9223372036854775807, fnname: "or_int64_9223372036854775807", in: -1, want: -1},
+ test_int64{fn: or_9223372036854775807_int64, fnname: "or_9223372036854775807_int64", in: 0, want: 9223372036854775807},
+ test_int64{fn: or_int64_9223372036854775807, fnname: "or_int64_9223372036854775807", in: 0, want: 9223372036854775807},
+ test_int64{fn: or_9223372036854775807_int64, fnname: "or_9223372036854775807_int64", in: 1, want: 9223372036854775807},
+ test_int64{fn: or_int64_9223372036854775807, fnname: "or_int64_9223372036854775807", in: 1, want: 9223372036854775807},
+ test_int64{fn: or_9223372036854775807_int64, fnname: "or_9223372036854775807_int64", in: 4294967296, want: 9223372036854775807},
+ test_int64{fn: or_int64_9223372036854775807, fnname: "or_int64_9223372036854775807", in: 4294967296, want: 9223372036854775807},
+ test_int64{fn: or_9223372036854775807_int64, fnname: "or_9223372036854775807_int64", in: 9223372036854775806, want: 9223372036854775807},
+ test_int64{fn: or_int64_9223372036854775807, fnname: "or_int64_9223372036854775807", in: 9223372036854775806, want: 9223372036854775807},
+ test_int64{fn: or_9223372036854775807_int64, fnname: "or_9223372036854775807_int64", in: 9223372036854775807, want: 9223372036854775807},
+ test_int64{fn: or_int64_9223372036854775807, fnname: "or_int64_9223372036854775807", in: 9223372036854775807, want: 9223372036854775807},
+ test_int64{fn: xor_Neg9223372036854775808_int64, fnname: "xor_Neg9223372036854775808_int64", in: -9223372036854775808, want: 0},
+ test_int64{fn: xor_int64_Neg9223372036854775808, fnname: "xor_int64_Neg9223372036854775808", in: -9223372036854775808, want: 0},
+ test_int64{fn: xor_Neg9223372036854775808_int64, fnname: "xor_Neg9223372036854775808_int64", in: -9223372036854775807, want: 1},
+ test_int64{fn: xor_int64_Neg9223372036854775808, fnname: "xor_int64_Neg9223372036854775808", in: -9223372036854775807, want: 1},
+ test_int64{fn: xor_Neg9223372036854775808_int64, fnname: "xor_Neg9223372036854775808_int64", in: -4294967296, want: 9223372032559808512},
+ test_int64{fn: xor_int64_Neg9223372036854775808, fnname: "xor_int64_Neg9223372036854775808", in: -4294967296, want: 9223372032559808512},
+ test_int64{fn: xor_Neg9223372036854775808_int64, fnname: "xor_Neg9223372036854775808_int64", in: -1, want: 9223372036854775807},
+ test_int64{fn: xor_int64_Neg9223372036854775808, fnname: "xor_int64_Neg9223372036854775808", in: -1, want: 9223372036854775807},
+ test_int64{fn: xor_Neg9223372036854775808_int64, fnname: "xor_Neg9223372036854775808_int64", in: 0, want: -9223372036854775808},
+ test_int64{fn: xor_int64_Neg9223372036854775808, fnname: "xor_int64_Neg9223372036854775808", in: 0, want: -9223372036854775808},
+ test_int64{fn: xor_Neg9223372036854775808_int64, fnname: "xor_Neg9223372036854775808_int64", in: 1, want: -9223372036854775807},
+ test_int64{fn: xor_int64_Neg9223372036854775808, fnname: "xor_int64_Neg9223372036854775808", in: 1, want: -9223372036854775807},
+ test_int64{fn: xor_Neg9223372036854775808_int64, fnname: "xor_Neg9223372036854775808_int64", in: 4294967296, want: -9223372032559808512},
+ test_int64{fn: xor_int64_Neg9223372036854775808, fnname: "xor_int64_Neg9223372036854775808", in: 4294967296, want: -9223372032559808512},
+ test_int64{fn: xor_Neg9223372036854775808_int64, fnname: "xor_Neg9223372036854775808_int64", in: 9223372036854775806, want: -2},
+ test_int64{fn: xor_int64_Neg9223372036854775808, fnname: "xor_int64_Neg9223372036854775808", in: 9223372036854775806, want: -2},
+ test_int64{fn: xor_Neg9223372036854775808_int64, fnname: "xor_Neg9223372036854775808_int64", in: 9223372036854775807, want: -1},
+ test_int64{fn: xor_int64_Neg9223372036854775808, fnname: "xor_int64_Neg9223372036854775808", in: 9223372036854775807, want: -1},
+ test_int64{fn: xor_Neg9223372036854775807_int64, fnname: "xor_Neg9223372036854775807_int64", in: -9223372036854775808, want: 1},
+ test_int64{fn: xor_int64_Neg9223372036854775807, fnname: "xor_int64_Neg9223372036854775807", in: -9223372036854775808, want: 1},
+ test_int64{fn: xor_Neg9223372036854775807_int64, fnname: "xor_Neg9223372036854775807_int64", in: -9223372036854775807, want: 0},
+ test_int64{fn: xor_int64_Neg9223372036854775807, fnname: "xor_int64_Neg9223372036854775807", in: -9223372036854775807, want: 0},
+ test_int64{fn: xor_Neg9223372036854775807_int64, fnname: "xor_Neg9223372036854775807_int64", in: -4294967296, want: 9223372032559808513},
+ test_int64{fn: xor_int64_Neg9223372036854775807, fnname: "xor_int64_Neg9223372036854775807", in: -4294967296, want: 9223372032559808513},
+ test_int64{fn: xor_Neg9223372036854775807_int64, fnname: "xor_Neg9223372036854775807_int64", in: -1, want: 9223372036854775806},
+ test_int64{fn: xor_int64_Neg9223372036854775807, fnname: "xor_int64_Neg9223372036854775807", in: -1, want: 9223372036854775806},
+ test_int64{fn: xor_Neg9223372036854775807_int64, fnname: "xor_Neg9223372036854775807_int64", in: 0, want: -9223372036854775807},
+ test_int64{fn: xor_int64_Neg9223372036854775807, fnname: "xor_int64_Neg9223372036854775807", in: 0, want: -9223372036854775807},
+ test_int64{fn: xor_Neg9223372036854775807_int64, fnname: "xor_Neg9223372036854775807_int64", in: 1, want: -9223372036854775808},
+ test_int64{fn: xor_int64_Neg9223372036854775807, fnname: "xor_int64_Neg9223372036854775807", in: 1, want: -9223372036854775808},
+ test_int64{fn: xor_Neg9223372036854775807_int64, fnname: "xor_Neg9223372036854775807_int64", in: 4294967296, want: -9223372032559808511},
+ test_int64{fn: xor_int64_Neg9223372036854775807, fnname: "xor_int64_Neg9223372036854775807", in: 4294967296, want: -9223372032559808511},
+ test_int64{fn: xor_Neg9223372036854775807_int64, fnname: "xor_Neg9223372036854775807_int64", in: 9223372036854775806, want: -1},
+ test_int64{fn: xor_int64_Neg9223372036854775807, fnname: "xor_int64_Neg9223372036854775807", in: 9223372036854775806, want: -1},
+ test_int64{fn: xor_Neg9223372036854775807_int64, fnname: "xor_Neg9223372036854775807_int64", in: 9223372036854775807, want: -2},
+ test_int64{fn: xor_int64_Neg9223372036854775807, fnname: "xor_int64_Neg9223372036854775807", in: 9223372036854775807, want: -2},
+ test_int64{fn: xor_Neg4294967296_int64, fnname: "xor_Neg4294967296_int64", in: -9223372036854775808, want: 9223372032559808512},
+ test_int64{fn: xor_int64_Neg4294967296, fnname: "xor_int64_Neg4294967296", in: -9223372036854775808, want: 9223372032559808512},
+ test_int64{fn: xor_Neg4294967296_int64, fnname: "xor_Neg4294967296_int64", in: -9223372036854775807, want: 9223372032559808513},
+ test_int64{fn: xor_int64_Neg4294967296, fnname: "xor_int64_Neg4294967296", in: -9223372036854775807, want: 9223372032559808513},
+ test_int64{fn: xor_Neg4294967296_int64, fnname: "xor_Neg4294967296_int64", in: -4294967296, want: 0},
+ test_int64{fn: xor_int64_Neg4294967296, fnname: "xor_int64_Neg4294967296", in: -4294967296, want: 0},
+ test_int64{fn: xor_Neg4294967296_int64, fnname: "xor_Neg4294967296_int64", in: -1, want: 4294967295},
+ test_int64{fn: xor_int64_Neg4294967296, fnname: "xor_int64_Neg4294967296", in: -1, want: 4294967295},
+ test_int64{fn: xor_Neg4294967296_int64, fnname: "xor_Neg4294967296_int64", in: 0, want: -4294967296},
+ test_int64{fn: xor_int64_Neg4294967296, fnname: "xor_int64_Neg4294967296", in: 0, want: -4294967296},
+ test_int64{fn: xor_Neg4294967296_int64, fnname: "xor_Neg4294967296_int64", in: 1, want: -4294967295},
+ test_int64{fn: xor_int64_Neg4294967296, fnname: "xor_int64_Neg4294967296", in: 1, want: -4294967295},
+ test_int64{fn: xor_Neg4294967296_int64, fnname: "xor_Neg4294967296_int64", in: 4294967296, want: -8589934592},
+ test_int64{fn: xor_int64_Neg4294967296, fnname: "xor_int64_Neg4294967296", in: 4294967296, want: -8589934592},
+ test_int64{fn: xor_Neg4294967296_int64, fnname: "xor_Neg4294967296_int64", in: 9223372036854775806, want: -9223372032559808514},
+ test_int64{fn: xor_int64_Neg4294967296, fnname: "xor_int64_Neg4294967296", in: 9223372036854775806, want: -9223372032559808514},
+ test_int64{fn: xor_Neg4294967296_int64, fnname: "xor_Neg4294967296_int64", in: 9223372036854775807, want: -9223372032559808513},
+ test_int64{fn: xor_int64_Neg4294967296, fnname: "xor_int64_Neg4294967296", in: 9223372036854775807, want: -9223372032559808513},
+ test_int64{fn: xor_Neg1_int64, fnname: "xor_Neg1_int64", in: -9223372036854775808, want: 9223372036854775807},
+ test_int64{fn: xor_int64_Neg1, fnname: "xor_int64_Neg1", in: -9223372036854775808, want: 9223372036854775807},
+ test_int64{fn: xor_Neg1_int64, fnname: "xor_Neg1_int64", in: -9223372036854775807, want: 9223372036854775806},
+ test_int64{fn: xor_int64_Neg1, fnname: "xor_int64_Neg1", in: -9223372036854775807, want: 9223372036854775806},
+ test_int64{fn: xor_Neg1_int64, fnname: "xor_Neg1_int64", in: -4294967296, want: 4294967295},
+ test_int64{fn: xor_int64_Neg1, fnname: "xor_int64_Neg1", in: -4294967296, want: 4294967295},
+ test_int64{fn: xor_Neg1_int64, fnname: "xor_Neg1_int64", in: -1, want: 0},
+ test_int64{fn: xor_int64_Neg1, fnname: "xor_int64_Neg1", in: -1, want: 0},
+ test_int64{fn: xor_Neg1_int64, fnname: "xor_Neg1_int64", in: 0, want: -1},
+ test_int64{fn: xor_int64_Neg1, fnname: "xor_int64_Neg1", in: 0, want: -1},
+ test_int64{fn: xor_Neg1_int64, fnname: "xor_Neg1_int64", in: 1, want: -2},
+ test_int64{fn: xor_int64_Neg1, fnname: "xor_int64_Neg1", in: 1, want: -2},
+ test_int64{fn: xor_Neg1_int64, fnname: "xor_Neg1_int64", in: 4294967296, want: -4294967297},
+ test_int64{fn: xor_int64_Neg1, fnname: "xor_int64_Neg1", in: 4294967296, want: -4294967297},
+ test_int64{fn: xor_Neg1_int64, fnname: "xor_Neg1_int64", in: 9223372036854775806, want: -9223372036854775807},
+ test_int64{fn: xor_int64_Neg1, fnname: "xor_int64_Neg1", in: 9223372036854775806, want: -9223372036854775807},
+ test_int64{fn: xor_Neg1_int64, fnname: "xor_Neg1_int64", in: 9223372036854775807, want: -9223372036854775808},
+ test_int64{fn: xor_int64_Neg1, fnname: "xor_int64_Neg1", in: 9223372036854775807, want: -9223372036854775808},
+ test_int64{fn: xor_0_int64, fnname: "xor_0_int64", in: -9223372036854775808, want: -9223372036854775808},
+ test_int64{fn: xor_int64_0, fnname: "xor_int64_0", in: -9223372036854775808, want: -9223372036854775808},
+ test_int64{fn: xor_0_int64, fnname: "xor_0_int64", in: -9223372036854775807, want: -9223372036854775807},
+ test_int64{fn: xor_int64_0, fnname: "xor_int64_0", in: -9223372036854775807, want: -9223372036854775807},
+ test_int64{fn: xor_0_int64, fnname: "xor_0_int64", in: -4294967296, want: -4294967296},
+ test_int64{fn: xor_int64_0, fnname: "xor_int64_0", in: -4294967296, want: -4294967296},
+ test_int64{fn: xor_0_int64, fnname: "xor_0_int64", in: -1, want: -1},
+ test_int64{fn: xor_int64_0, fnname: "xor_int64_0", in: -1, want: -1},
+ test_int64{fn: xor_0_int64, fnname: "xor_0_int64", in: 0, want: 0},
+ test_int64{fn: xor_int64_0, fnname: "xor_int64_0", in: 0, want: 0},
+ test_int64{fn: xor_0_int64, fnname: "xor_0_int64", in: 1, want: 1},
+ test_int64{fn: xor_int64_0, fnname: "xor_int64_0", in: 1, want: 1},
+ test_int64{fn: xor_0_int64, fnname: "xor_0_int64", in: 4294967296, want: 4294967296},
+ test_int64{fn: xor_int64_0, fnname: "xor_int64_0", in: 4294967296, want: 4294967296},
+ test_int64{fn: xor_0_int64, fnname: "xor_0_int64", in: 9223372036854775806, want: 9223372036854775806},
+ test_int64{fn: xor_int64_0, fnname: "xor_int64_0", in: 9223372036854775806, want: 9223372036854775806},
+ test_int64{fn: xor_0_int64, fnname: "xor_0_int64", in: 9223372036854775807, want: 9223372036854775807},
+ test_int64{fn: xor_int64_0, fnname: "xor_int64_0", in: 9223372036854775807, want: 9223372036854775807},
+ test_int64{fn: xor_1_int64, fnname: "xor_1_int64", in: -9223372036854775808, want: -9223372036854775807},
+ test_int64{fn: xor_int64_1, fnname: "xor_int64_1", in: -9223372036854775808, want: -9223372036854775807},
+ test_int64{fn: xor_1_int64, fnname: "xor_1_int64", in: -9223372036854775807, want: -9223372036854775808},
+ test_int64{fn: xor_int64_1, fnname: "xor_int64_1", in: -9223372036854775807, want: -9223372036854775808},
+ test_int64{fn: xor_1_int64, fnname: "xor_1_int64", in: -4294967296, want: -4294967295},
+ test_int64{fn: xor_int64_1, fnname: "xor_int64_1", in: -4294967296, want: -4294967295},
+ test_int64{fn: xor_1_int64, fnname: "xor_1_int64", in: -1, want: -2},
+ test_int64{fn: xor_int64_1, fnname: "xor_int64_1", in: -1, want: -2},
+ test_int64{fn: xor_1_int64, fnname: "xor_1_int64", in: 0, want: 1},
+ test_int64{fn: xor_int64_1, fnname: "xor_int64_1", in: 0, want: 1},
+ test_int64{fn: xor_1_int64, fnname: "xor_1_int64", in: 1, want: 0},
+ test_int64{fn: xor_int64_1, fnname: "xor_int64_1", in: 1, want: 0},
+ test_int64{fn: xor_1_int64, fnname: "xor_1_int64", in: 4294967296, want: 4294967297},
+ test_int64{fn: xor_int64_1, fnname: "xor_int64_1", in: 4294967296, want: 4294967297},
+ test_int64{fn: xor_1_int64, fnname: "xor_1_int64", in: 9223372036854775806, want: 9223372036854775807},
+ test_int64{fn: xor_int64_1, fnname: "xor_int64_1", in: 9223372036854775806, want: 9223372036854775807},
+ test_int64{fn: xor_1_int64, fnname: "xor_1_int64", in: 9223372036854775807, want: 9223372036854775806},
+ test_int64{fn: xor_int64_1, fnname: "xor_int64_1", in: 9223372036854775807, want: 9223372036854775806},
+ test_int64{fn: xor_4294967296_int64, fnname: "xor_4294967296_int64", in: -9223372036854775808, want: -9223372032559808512},
+ test_int64{fn: xor_int64_4294967296, fnname: "xor_int64_4294967296", in: -9223372036854775808, want: -9223372032559808512},
+ test_int64{fn: xor_4294967296_int64, fnname: "xor_4294967296_int64", in: -9223372036854775807, want: -9223372032559808511},
+ test_int64{fn: xor_int64_4294967296, fnname: "xor_int64_4294967296", in: -9223372036854775807, want: -9223372032559808511},
+ test_int64{fn: xor_4294967296_int64, fnname: "xor_4294967296_int64", in: -4294967296, want: -8589934592},
+ test_int64{fn: xor_int64_4294967296, fnname: "xor_int64_4294967296", in: -4294967296, want: -8589934592},
+ test_int64{fn: xor_4294967296_int64, fnname: "xor_4294967296_int64", in: -1, want: -4294967297},
+ test_int64{fn: xor_int64_4294967296, fnname: "xor_int64_4294967296", in: -1, want: -4294967297},
+ test_int64{fn: xor_4294967296_int64, fnname: "xor_4294967296_int64", in: 0, want: 4294967296},
+ test_int64{fn: xor_int64_4294967296, fnname: "xor_int64_4294967296", in: 0, want: 4294967296},
+ test_int64{fn: xor_4294967296_int64, fnname: "xor_4294967296_int64", in: 1, want: 4294967297},
+ test_int64{fn: xor_int64_4294967296, fnname: "xor_int64_4294967296", in: 1, want: 4294967297},
+ test_int64{fn: xor_4294967296_int64, fnname: "xor_4294967296_int64", in: 4294967296, want: 0},
+ test_int64{fn: xor_int64_4294967296, fnname: "xor_int64_4294967296", in: 4294967296, want: 0},
+ test_int64{fn: xor_4294967296_int64, fnname: "xor_4294967296_int64", in: 9223372036854775806, want: 9223372032559808510},
+ test_int64{fn: xor_int64_4294967296, fnname: "xor_int64_4294967296", in: 9223372036854775806, want: 9223372032559808510},
+ test_int64{fn: xor_4294967296_int64, fnname: "xor_4294967296_int64", in: 9223372036854775807, want: 9223372032559808511},
+ test_int64{fn: xor_int64_4294967296, fnname: "xor_int64_4294967296", in: 9223372036854775807, want: 9223372032559808511},
+ test_int64{fn: xor_9223372036854775806_int64, fnname: "xor_9223372036854775806_int64", in: -9223372036854775808, want: -2},
+ test_int64{fn: xor_int64_9223372036854775806, fnname: "xor_int64_9223372036854775806", in: -9223372036854775808, want: -2},
+ test_int64{fn: xor_9223372036854775806_int64, fnname: "xor_9223372036854775806_int64", in: -9223372036854775807, want: -1},
+ test_int64{fn: xor_int64_9223372036854775806, fnname: "xor_int64_9223372036854775806", in: -9223372036854775807, want: -1},
+ test_int64{fn: xor_9223372036854775806_int64, fnname: "xor_9223372036854775806_int64", in: -4294967296, want: -9223372032559808514},
+ test_int64{fn: xor_int64_9223372036854775806, fnname: "xor_int64_9223372036854775806", in: -4294967296, want: -9223372032559808514},
+ test_int64{fn: xor_9223372036854775806_int64, fnname: "xor_9223372036854775806_int64", in: -1, want: -9223372036854775807},
+ test_int64{fn: xor_int64_9223372036854775806, fnname: "xor_int64_9223372036854775806", in: -1, want: -9223372036854775807},
+ test_int64{fn: xor_9223372036854775806_int64, fnname: "xor_9223372036854775806_int64", in: 0, want: 9223372036854775806},
+ test_int64{fn: xor_int64_9223372036854775806, fnname: "xor_int64_9223372036854775806", in: 0, want: 9223372036854775806},
+ test_int64{fn: xor_9223372036854775806_int64, fnname: "xor_9223372036854775806_int64", in: 1, want: 9223372036854775807},
+ test_int64{fn: xor_int64_9223372036854775806, fnname: "xor_int64_9223372036854775806", in: 1, want: 9223372036854775807},
+ test_int64{fn: xor_9223372036854775806_int64, fnname: "xor_9223372036854775806_int64", in: 4294967296, want: 9223372032559808510},
+ test_int64{fn: xor_int64_9223372036854775806, fnname: "xor_int64_9223372036854775806", in: 4294967296, want: 9223372032559808510},
+ test_int64{fn: xor_9223372036854775806_int64, fnname: "xor_9223372036854775806_int64", in: 9223372036854775806, want: 0},
+ test_int64{fn: xor_int64_9223372036854775806, fnname: "xor_int64_9223372036854775806", in: 9223372036854775806, want: 0},
+ test_int64{fn: xor_9223372036854775806_int64, fnname: "xor_9223372036854775806_int64", in: 9223372036854775807, want: 1},
+ test_int64{fn: xor_int64_9223372036854775806, fnname: "xor_int64_9223372036854775806", in: 9223372036854775807, want: 1},
+ test_int64{fn: xor_9223372036854775807_int64, fnname: "xor_9223372036854775807_int64", in: -9223372036854775808, want: -1},
+ test_int64{fn: xor_int64_9223372036854775807, fnname: "xor_int64_9223372036854775807", in: -9223372036854775808, want: -1},
+ test_int64{fn: xor_9223372036854775807_int64, fnname: "xor_9223372036854775807_int64", in: -9223372036854775807, want: -2},
+ test_int64{fn: xor_int64_9223372036854775807, fnname: "xor_int64_9223372036854775807", in: -9223372036854775807, want: -2},
+ test_int64{fn: xor_9223372036854775807_int64, fnname: "xor_9223372036854775807_int64", in: -4294967296, want: -9223372032559808513},
+ test_int64{fn: xor_int64_9223372036854775807, fnname: "xor_int64_9223372036854775807", in: -4294967296, want: -9223372032559808513},
+ test_int64{fn: xor_9223372036854775807_int64, fnname: "xor_9223372036854775807_int64", in: -1, want: -9223372036854775808},
+ test_int64{fn: xor_int64_9223372036854775807, fnname: "xor_int64_9223372036854775807", in: -1, want: -9223372036854775808},
+ test_int64{fn: xor_9223372036854775807_int64, fnname: "xor_9223372036854775807_int64", in: 0, want: 9223372036854775807},
+ test_int64{fn: xor_int64_9223372036854775807, fnname: "xor_int64_9223372036854775807", in: 0, want: 9223372036854775807},
+ test_int64{fn: xor_9223372036854775807_int64, fnname: "xor_9223372036854775807_int64", in: 1, want: 9223372036854775806},
+ test_int64{fn: xor_int64_9223372036854775807, fnname: "xor_int64_9223372036854775807", in: 1, want: 9223372036854775806},
+ test_int64{fn: xor_9223372036854775807_int64, fnname: "xor_9223372036854775807_int64", in: 4294967296, want: 9223372032559808511},
+ test_int64{fn: xor_int64_9223372036854775807, fnname: "xor_int64_9223372036854775807", in: 4294967296, want: 9223372032559808511},
+ test_int64{fn: xor_9223372036854775807_int64, fnname: "xor_9223372036854775807_int64", in: 9223372036854775806, want: 1},
+ test_int64{fn: xor_int64_9223372036854775807, fnname: "xor_int64_9223372036854775807", in: 9223372036854775806, want: 1},
+ test_int64{fn: xor_9223372036854775807_int64, fnname: "xor_9223372036854775807_int64", in: 9223372036854775807, want: 0},
+ test_int64{fn: xor_int64_9223372036854775807, fnname: "xor_int64_9223372036854775807", in: 9223372036854775807, want: 0}}
+
+type test_int64mul struct {
+ fn func(int64) int64
+ fnname string
+ in int64
+ want int64
+}
+
+var tests_int64mul = []test_int64{
+
+ test_int64{fn: mul_Neg9_int64, fnname: "mul_Neg9_int64", in: -9, want: 81},
+ test_int64{fn: mul_int64_Neg9, fnname: "mul_int64_Neg9", in: -9, want: 81},
+ test_int64{fn: mul_Neg9_int64, fnname: "mul_Neg9_int64", in: -5, want: 45},
+ test_int64{fn: mul_int64_Neg9, fnname: "mul_int64_Neg9", in: -5, want: 45},
+ test_int64{fn: mul_Neg9_int64, fnname: "mul_Neg9_int64", in: -3, want: 27},
+ test_int64{fn: mul_int64_Neg9, fnname: "mul_int64_Neg9", in: -3, want: 27},
+ test_int64{fn: mul_Neg9_int64, fnname: "mul_Neg9_int64", in: 3, want: -27},
+ test_int64{fn: mul_int64_Neg9, fnname: "mul_int64_Neg9", in: 3, want: -27},
+ test_int64{fn: mul_Neg9_int64, fnname: "mul_Neg9_int64", in: 5, want: -45},
+ test_int64{fn: mul_int64_Neg9, fnname: "mul_int64_Neg9", in: 5, want: -45},
+ test_int64{fn: mul_Neg9_int64, fnname: "mul_Neg9_int64", in: 7, want: -63},
+ test_int64{fn: mul_int64_Neg9, fnname: "mul_int64_Neg9", in: 7, want: -63},
+ test_int64{fn: mul_Neg9_int64, fnname: "mul_Neg9_int64", in: 9, want: -81},
+ test_int64{fn: mul_int64_Neg9, fnname: "mul_int64_Neg9", in: 9, want: -81},
+ test_int64{fn: mul_Neg9_int64, fnname: "mul_Neg9_int64", in: 10, want: -90},
+ test_int64{fn: mul_int64_Neg9, fnname: "mul_int64_Neg9", in: 10, want: -90},
+ test_int64{fn: mul_Neg9_int64, fnname: "mul_Neg9_int64", in: 11, want: -99},
+ test_int64{fn: mul_int64_Neg9, fnname: "mul_int64_Neg9", in: 11, want: -99},
+ test_int64{fn: mul_Neg9_int64, fnname: "mul_Neg9_int64", in: 13, want: -117},
+ test_int64{fn: mul_int64_Neg9, fnname: "mul_int64_Neg9", in: 13, want: -117},
+ test_int64{fn: mul_Neg9_int64, fnname: "mul_Neg9_int64", in: 19, want: -171},
+ test_int64{fn: mul_int64_Neg9, fnname: "mul_int64_Neg9", in: 19, want: -171},
+ test_int64{fn: mul_Neg9_int64, fnname: "mul_Neg9_int64", in: 21, want: -189},
+ test_int64{fn: mul_int64_Neg9, fnname: "mul_int64_Neg9", in: 21, want: -189},
+ test_int64{fn: mul_Neg9_int64, fnname: "mul_Neg9_int64", in: 25, want: -225},
+ test_int64{fn: mul_int64_Neg9, fnname: "mul_int64_Neg9", in: 25, want: -225},
+ test_int64{fn: mul_Neg9_int64, fnname: "mul_Neg9_int64", in: 27, want: -243},
+ test_int64{fn: mul_int64_Neg9, fnname: "mul_int64_Neg9", in: 27, want: -243},
+ test_int64{fn: mul_Neg9_int64, fnname: "mul_Neg9_int64", in: 37, want: -333},
+ test_int64{fn: mul_int64_Neg9, fnname: "mul_int64_Neg9", in: 37, want: -333},
+ test_int64{fn: mul_Neg9_int64, fnname: "mul_Neg9_int64", in: 41, want: -369},
+ test_int64{fn: mul_int64_Neg9, fnname: "mul_int64_Neg9", in: 41, want: -369},
+ test_int64{fn: mul_Neg9_int64, fnname: "mul_Neg9_int64", in: 45, want: -405},
+ test_int64{fn: mul_int64_Neg9, fnname: "mul_int64_Neg9", in: 45, want: -405},
+ test_int64{fn: mul_Neg9_int64, fnname: "mul_Neg9_int64", in: 73, want: -657},
+ test_int64{fn: mul_int64_Neg9, fnname: "mul_int64_Neg9", in: 73, want: -657},
+ test_int64{fn: mul_Neg9_int64, fnname: "mul_Neg9_int64", in: 81, want: -729},
+ test_int64{fn: mul_int64_Neg9, fnname: "mul_int64_Neg9", in: 81, want: -729},
+ test_int64{fn: mul_Neg5_int64, fnname: "mul_Neg5_int64", in: -9, want: 45},
+ test_int64{fn: mul_int64_Neg5, fnname: "mul_int64_Neg5", in: -9, want: 45},
+ test_int64{fn: mul_Neg5_int64, fnname: "mul_Neg5_int64", in: -5, want: 25},
+ test_int64{fn: mul_int64_Neg5, fnname: "mul_int64_Neg5", in: -5, want: 25},
+ test_int64{fn: mul_Neg5_int64, fnname: "mul_Neg5_int64", in: -3, want: 15},
+ test_int64{fn: mul_int64_Neg5, fnname: "mul_int64_Neg5", in: -3, want: 15},
+ test_int64{fn: mul_Neg5_int64, fnname: "mul_Neg5_int64", in: 3, want: -15},
+ test_int64{fn: mul_int64_Neg5, fnname: "mul_int64_Neg5", in: 3, want: -15},
+ test_int64{fn: mul_Neg5_int64, fnname: "mul_Neg5_int64", in: 5, want: -25},
+ test_int64{fn: mul_int64_Neg5, fnname: "mul_int64_Neg5", in: 5, want: -25},
+ test_int64{fn: mul_Neg5_int64, fnname: "mul_Neg5_int64", in: 7, want: -35},
+ test_int64{fn: mul_int64_Neg5, fnname: "mul_int64_Neg5", in: 7, want: -35},
+ test_int64{fn: mul_Neg5_int64, fnname: "mul_Neg5_int64", in: 9, want: -45},
+ test_int64{fn: mul_int64_Neg5, fnname: "mul_int64_Neg5", in: 9, want: -45},
+ test_int64{fn: mul_Neg5_int64, fnname: "mul_Neg5_int64", in: 10, want: -50},
+ test_int64{fn: mul_int64_Neg5, fnname: "mul_int64_Neg5", in: 10, want: -50},
+ test_int64{fn: mul_Neg5_int64, fnname: "mul_Neg5_int64", in: 11, want: -55},
+ test_int64{fn: mul_int64_Neg5, fnname: "mul_int64_Neg5", in: 11, want: -55},
+ test_int64{fn: mul_Neg5_int64, fnname: "mul_Neg5_int64", in: 13, want: -65},
+ test_int64{fn: mul_int64_Neg5, fnname: "mul_int64_Neg5", in: 13, want: -65},
+ test_int64{fn: mul_Neg5_int64, fnname: "mul_Neg5_int64", in: 19, want: -95},
+ test_int64{fn: mul_int64_Neg5, fnname: "mul_int64_Neg5", in: 19, want: -95},
+ test_int64{fn: mul_Neg5_int64, fnname: "mul_Neg5_int64", in: 21, want: -105},
+ test_int64{fn: mul_int64_Neg5, fnname: "mul_int64_Neg5", in: 21, want: -105},
+ test_int64{fn: mul_Neg5_int64, fnname: "mul_Neg5_int64", in: 25, want: -125},
+ test_int64{fn: mul_int64_Neg5, fnname: "mul_int64_Neg5", in: 25, want: -125},
+ test_int64{fn: mul_Neg5_int64, fnname: "mul_Neg5_int64", in: 27, want: -135},
+ test_int64{fn: mul_int64_Neg5, fnname: "mul_int64_Neg5", in: 27, want: -135},
+ test_int64{fn: mul_Neg5_int64, fnname: "mul_Neg5_int64", in: 37, want: -185},
+ test_int64{fn: mul_int64_Neg5, fnname: "mul_int64_Neg5", in: 37, want: -185},
+ test_int64{fn: mul_Neg5_int64, fnname: "mul_Neg5_int64", in: 41, want: -205},
+ test_int64{fn: mul_int64_Neg5, fnname: "mul_int64_Neg5", in: 41, want: -205},
+ test_int64{fn: mul_Neg5_int64, fnname: "mul_Neg5_int64", in: 45, want: -225},
+ test_int64{fn: mul_int64_Neg5, fnname: "mul_int64_Neg5", in: 45, want: -225},
+ test_int64{fn: mul_Neg5_int64, fnname: "mul_Neg5_int64", in: 73, want: -365},
+ test_int64{fn: mul_int64_Neg5, fnname: "mul_int64_Neg5", in: 73, want: -365},
+ test_int64{fn: mul_Neg5_int64, fnname: "mul_Neg5_int64", in: 81, want: -405},
+ test_int64{fn: mul_int64_Neg5, fnname: "mul_int64_Neg5", in: 81, want: -405},
+ test_int64{fn: mul_Neg3_int64, fnname: "mul_Neg3_int64", in: -9, want: 27},
+ test_int64{fn: mul_int64_Neg3, fnname: "mul_int64_Neg3", in: -9, want: 27},
+ test_int64{fn: mul_Neg3_int64, fnname: "mul_Neg3_int64", in: -5, want: 15},
+ test_int64{fn: mul_int64_Neg3, fnname: "mul_int64_Neg3", in: -5, want: 15},
+ test_int64{fn: mul_Neg3_int64, fnname: "mul_Neg3_int64", in: -3, want: 9},
+ test_int64{fn: mul_int64_Neg3, fnname: "mul_int64_Neg3", in: -3, want: 9},
+ test_int64{fn: mul_Neg3_int64, fnname: "mul_Neg3_int64", in: 3, want: -9},
+ test_int64{fn: mul_int64_Neg3, fnname: "mul_int64_Neg3", in: 3, want: -9},
+ test_int64{fn: mul_Neg3_int64, fnname: "mul_Neg3_int64", in: 5, want: -15},
+ test_int64{fn: mul_int64_Neg3, fnname: "mul_int64_Neg3", in: 5, want: -15},
+ test_int64{fn: mul_Neg3_int64, fnname: "mul_Neg3_int64", in: 7, want: -21},
+ test_int64{fn: mul_int64_Neg3, fnname: "mul_int64_Neg3", in: 7, want: -21},
+ test_int64{fn: mul_Neg3_int64, fnname: "mul_Neg3_int64", in: 9, want: -27},
+ test_int64{fn: mul_int64_Neg3, fnname: "mul_int64_Neg3", in: 9, want: -27},
+ test_int64{fn: mul_Neg3_int64, fnname: "mul_Neg3_int64", in: 10, want: -30},
+ test_int64{fn: mul_int64_Neg3, fnname: "mul_int64_Neg3", in: 10, want: -30},
+ test_int64{fn: mul_Neg3_int64, fnname: "mul_Neg3_int64", in: 11, want: -33},
+ test_int64{fn: mul_int64_Neg3, fnname: "mul_int64_Neg3", in: 11, want: -33},
+ test_int64{fn: mul_Neg3_int64, fnname: "mul_Neg3_int64", in: 13, want: -39},
+ test_int64{fn: mul_int64_Neg3, fnname: "mul_int64_Neg3", in: 13, want: -39},
+ test_int64{fn: mul_Neg3_int64, fnname: "mul_Neg3_int64", in: 19, want: -57},
+ test_int64{fn: mul_int64_Neg3, fnname: "mul_int64_Neg3", in: 19, want: -57},
+ test_int64{fn: mul_Neg3_int64, fnname: "mul_Neg3_int64", in: 21, want: -63},
+ test_int64{fn: mul_int64_Neg3, fnname: "mul_int64_Neg3", in: 21, want: -63},
+ test_int64{fn: mul_Neg3_int64, fnname: "mul_Neg3_int64", in: 25, want: -75},
+ test_int64{fn: mul_int64_Neg3, fnname: "mul_int64_Neg3", in: 25, want: -75},
+ test_int64{fn: mul_Neg3_int64, fnname: "mul_Neg3_int64", in: 27, want: -81},
+ test_int64{fn: mul_int64_Neg3, fnname: "mul_int64_Neg3", in: 27, want: -81},
+ test_int64{fn: mul_Neg3_int64, fnname: "mul_Neg3_int64", in: 37, want: -111},
+ test_int64{fn: mul_int64_Neg3, fnname: "mul_int64_Neg3", in: 37, want: -111},
+ test_int64{fn: mul_Neg3_int64, fnname: "mul_Neg3_int64", in: 41, want: -123},
+ test_int64{fn: mul_int64_Neg3, fnname: "mul_int64_Neg3", in: 41, want: -123},
+ test_int64{fn: mul_Neg3_int64, fnname: "mul_Neg3_int64", in: 45, want: -135},
+ test_int64{fn: mul_int64_Neg3, fnname: "mul_int64_Neg3", in: 45, want: -135},
+ test_int64{fn: mul_Neg3_int64, fnname: "mul_Neg3_int64", in: 73, want: -219},
+ test_int64{fn: mul_int64_Neg3, fnname: "mul_int64_Neg3", in: 73, want: -219},
+ test_int64{fn: mul_Neg3_int64, fnname: "mul_Neg3_int64", in: 81, want: -243},
+ test_int64{fn: mul_int64_Neg3, fnname: "mul_int64_Neg3", in: 81, want: -243},
+ test_int64{fn: mul_3_int64, fnname: "mul_3_int64", in: -9, want: -27},
+ test_int64{fn: mul_int64_3, fnname: "mul_int64_3", in: -9, want: -27},
+ test_int64{fn: mul_3_int64, fnname: "mul_3_int64", in: -5, want: -15},
+ test_int64{fn: mul_int64_3, fnname: "mul_int64_3", in: -5, want: -15},
+ test_int64{fn: mul_3_int64, fnname: "mul_3_int64", in: -3, want: -9},
+ test_int64{fn: mul_int64_3, fnname: "mul_int64_3", in: -3, want: -9},
+ test_int64{fn: mul_3_int64, fnname: "mul_3_int64", in: 3, want: 9},
+ test_int64{fn: mul_int64_3, fnname: "mul_int64_3", in: 3, want: 9},
+ test_int64{fn: mul_3_int64, fnname: "mul_3_int64", in: 5, want: 15},
+ test_int64{fn: mul_int64_3, fnname: "mul_int64_3", in: 5, want: 15},
+ test_int64{fn: mul_3_int64, fnname: "mul_3_int64", in: 7, want: 21},
+ test_int64{fn: mul_int64_3, fnname: "mul_int64_3", in: 7, want: 21},
+ test_int64{fn: mul_3_int64, fnname: "mul_3_int64", in: 9, want: 27},
+ test_int64{fn: mul_int64_3, fnname: "mul_int64_3", in: 9, want: 27},
+ test_int64{fn: mul_3_int64, fnname: "mul_3_int64", in: 10, want: 30},
+ test_int64{fn: mul_int64_3, fnname: "mul_int64_3", in: 10, want: 30},
+ test_int64{fn: mul_3_int64, fnname: "mul_3_int64", in: 11, want: 33},
+ test_int64{fn: mul_int64_3, fnname: "mul_int64_3", in: 11, want: 33},
+ test_int64{fn: mul_3_int64, fnname: "mul_3_int64", in: 13, want: 39},
+ test_int64{fn: mul_int64_3, fnname: "mul_int64_3", in: 13, want: 39},
+ test_int64{fn: mul_3_int64, fnname: "mul_3_int64", in: 19, want: 57},
+ test_int64{fn: mul_int64_3, fnname: "mul_int64_3", in: 19, want: 57},
+ test_int64{fn: mul_3_int64, fnname: "mul_3_int64", in: 21, want: 63},
+ test_int64{fn: mul_int64_3, fnname: "mul_int64_3", in: 21, want: 63},
+ test_int64{fn: mul_3_int64, fnname: "mul_3_int64", in: 25, want: 75},
+ test_int64{fn: mul_int64_3, fnname: "mul_int64_3", in: 25, want: 75},
+ test_int64{fn: mul_3_int64, fnname: "mul_3_int64", in: 27, want: 81},
+ test_int64{fn: mul_int64_3, fnname: "mul_int64_3", in: 27, want: 81},
+ test_int64{fn: mul_3_int64, fnname: "mul_3_int64", in: 37, want: 111},
+ test_int64{fn: mul_int64_3, fnname: "mul_int64_3", in: 37, want: 111},
+ test_int64{fn: mul_3_int64, fnname: "mul_3_int64", in: 41, want: 123},
+ test_int64{fn: mul_int64_3, fnname: "mul_int64_3", in: 41, want: 123},
+ test_int64{fn: mul_3_int64, fnname: "mul_3_int64", in: 45, want: 135},
+ test_int64{fn: mul_int64_3, fnname: "mul_int64_3", in: 45, want: 135},
+ test_int64{fn: mul_3_int64, fnname: "mul_3_int64", in: 73, want: 219},
+ test_int64{fn: mul_int64_3, fnname: "mul_int64_3", in: 73, want: 219},
+ test_int64{fn: mul_3_int64, fnname: "mul_3_int64", in: 81, want: 243},
+ test_int64{fn: mul_int64_3, fnname: "mul_int64_3", in: 81, want: 243},
+ test_int64{fn: mul_5_int64, fnname: "mul_5_int64", in: -9, want: -45},
+ test_int64{fn: mul_int64_5, fnname: "mul_int64_5", in: -9, want: -45},
+ test_int64{fn: mul_5_int64, fnname: "mul_5_int64", in: -5, want: -25},
+ test_int64{fn: mul_int64_5, fnname: "mul_int64_5", in: -5, want: -25},
+ test_int64{fn: mul_5_int64, fnname: "mul_5_int64", in: -3, want: -15},
+ test_int64{fn: mul_int64_5, fnname: "mul_int64_5", in: -3, want: -15},
+ test_int64{fn: mul_5_int64, fnname: "mul_5_int64", in: 3, want: 15},
+ test_int64{fn: mul_int64_5, fnname: "mul_int64_5", in: 3, want: 15},
+ test_int64{fn: mul_5_int64, fnname: "mul_5_int64", in: 5, want: 25},
+ test_int64{fn: mul_int64_5, fnname: "mul_int64_5", in: 5, want: 25},
+ test_int64{fn: mul_5_int64, fnname: "mul_5_int64", in: 7, want: 35},
+ test_int64{fn: mul_int64_5, fnname: "mul_int64_5", in: 7, want: 35},
+ test_int64{fn: mul_5_int64, fnname: "mul_5_int64", in: 9, want: 45},
+ test_int64{fn: mul_int64_5, fnname: "mul_int64_5", in: 9, want: 45},
+ test_int64{fn: mul_5_int64, fnname: "mul_5_int64", in: 10, want: 50},
+ test_int64{fn: mul_int64_5, fnname: "mul_int64_5", in: 10, want: 50},
+ test_int64{fn: mul_5_int64, fnname: "mul_5_int64", in: 11, want: 55},
+ test_int64{fn: mul_int64_5, fnname: "mul_int64_5", in: 11, want: 55},
+ test_int64{fn: mul_5_int64, fnname: "mul_5_int64", in: 13, want: 65},
+ test_int64{fn: mul_int64_5, fnname: "mul_int64_5", in: 13, want: 65},
+ test_int64{fn: mul_5_int64, fnname: "mul_5_int64", in: 19, want: 95},
+ test_int64{fn: mul_int64_5, fnname: "mul_int64_5", in: 19, want: 95},
+ test_int64{fn: mul_5_int64, fnname: "mul_5_int64", in: 21, want: 105},
+ test_int64{fn: mul_int64_5, fnname: "mul_int64_5", in: 21, want: 105},
+ test_int64{fn: mul_5_int64, fnname: "mul_5_int64", in: 25, want: 125},
+ test_int64{fn: mul_int64_5, fnname: "mul_int64_5", in: 25, want: 125},
+ test_int64{fn: mul_5_int64, fnname: "mul_5_int64", in: 27, want: 135},
+ test_int64{fn: mul_int64_5, fnname: "mul_int64_5", in: 27, want: 135},
+ test_int64{fn: mul_5_int64, fnname: "mul_5_int64", in: 37, want: 185},
+ test_int64{fn: mul_int64_5, fnname: "mul_int64_5", in: 37, want: 185},
+ test_int64{fn: mul_5_int64, fnname: "mul_5_int64", in: 41, want: 205},
+ test_int64{fn: mul_int64_5, fnname: "mul_int64_5", in: 41, want: 205},
+ test_int64{fn: mul_5_int64, fnname: "mul_5_int64", in: 45, want: 225},
+ test_int64{fn: mul_int64_5, fnname: "mul_int64_5", in: 45, want: 225},
+ test_int64{fn: mul_5_int64, fnname: "mul_5_int64", in: 73, want: 365},
+ test_int64{fn: mul_int64_5, fnname: "mul_int64_5", in: 73, want: 365},
+ test_int64{fn: mul_5_int64, fnname: "mul_5_int64", in: 81, want: 405},
+ test_int64{fn: mul_int64_5, fnname: "mul_int64_5", in: 81, want: 405},
+ test_int64{fn: mul_7_int64, fnname: "mul_7_int64", in: -9, want: -63},
+ test_int64{fn: mul_int64_7, fnname: "mul_int64_7", in: -9, want: -63},
+ test_int64{fn: mul_7_int64, fnname: "mul_7_int64", in: -5, want: -35},
+ test_int64{fn: mul_int64_7, fnname: "mul_int64_7", in: -5, want: -35},
+ test_int64{fn: mul_7_int64, fnname: "mul_7_int64", in: -3, want: -21},
+ test_int64{fn: mul_int64_7, fnname: "mul_int64_7", in: -3, want: -21},
+ test_int64{fn: mul_7_int64, fnname: "mul_7_int64", in: 3, want: 21},
+ test_int64{fn: mul_int64_7, fnname: "mul_int64_7", in: 3, want: 21},
+ test_int64{fn: mul_7_int64, fnname: "mul_7_int64", in: 5, want: 35},
+ test_int64{fn: mul_int64_7, fnname: "mul_int64_7", in: 5, want: 35},
+ test_int64{fn: mul_7_int64, fnname: "mul_7_int64", in: 7, want: 49},
+ test_int64{fn: mul_int64_7, fnname: "mul_int64_7", in: 7, want: 49},
+ test_int64{fn: mul_7_int64, fnname: "mul_7_int64", in: 9, want: 63},
+ test_int64{fn: mul_int64_7, fnname: "mul_int64_7", in: 9, want: 63},
+ test_int64{fn: mul_7_int64, fnname: "mul_7_int64", in: 10, want: 70},
+ test_int64{fn: mul_int64_7, fnname: "mul_int64_7", in: 10, want: 70},
+ test_int64{fn: mul_7_int64, fnname: "mul_7_int64", in: 11, want: 77},
+ test_int64{fn: mul_int64_7, fnname: "mul_int64_7", in: 11, want: 77},
+ test_int64{fn: mul_7_int64, fnname: "mul_7_int64", in: 13, want: 91},
+ test_int64{fn: mul_int64_7, fnname: "mul_int64_7", in: 13, want: 91},
+ test_int64{fn: mul_7_int64, fnname: "mul_7_int64", in: 19, want: 133},
+ test_int64{fn: mul_int64_7, fnname: "mul_int64_7", in: 19, want: 133},
+ test_int64{fn: mul_7_int64, fnname: "mul_7_int64", in: 21, want: 147},
+ test_int64{fn: mul_int64_7, fnname: "mul_int64_7", in: 21, want: 147},
+ test_int64{fn: mul_7_int64, fnname: "mul_7_int64", in: 25, want: 175},
+ test_int64{fn: mul_int64_7, fnname: "mul_int64_7", in: 25, want: 175},
+ test_int64{fn: mul_7_int64, fnname: "mul_7_int64", in: 27, want: 189},
+ test_int64{fn: mul_int64_7, fnname: "mul_int64_7", in: 27, want: 189},
+ test_int64{fn: mul_7_int64, fnname: "mul_7_int64", in: 37, want: 259},
+ test_int64{fn: mul_int64_7, fnname: "mul_int64_7", in: 37, want: 259},
+ test_int64{fn: mul_7_int64, fnname: "mul_7_int64", in: 41, want: 287},
+ test_int64{fn: mul_int64_7, fnname: "mul_int64_7", in: 41, want: 287},
+ test_int64{fn: mul_7_int64, fnname: "mul_7_int64", in: 45, want: 315},
+ test_int64{fn: mul_int64_7, fnname: "mul_int64_7", in: 45, want: 315},
+ test_int64{fn: mul_7_int64, fnname: "mul_7_int64", in: 73, want: 511},
+ test_int64{fn: mul_int64_7, fnname: "mul_int64_7", in: 73, want: 511},
+ test_int64{fn: mul_7_int64, fnname: "mul_7_int64", in: 81, want: 567},
+ test_int64{fn: mul_int64_7, fnname: "mul_int64_7", in: 81, want: 567},
+ test_int64{fn: mul_9_int64, fnname: "mul_9_int64", in: -9, want: -81},
+ test_int64{fn: mul_int64_9, fnname: "mul_int64_9", in: -9, want: -81},
+ test_int64{fn: mul_9_int64, fnname: "mul_9_int64", in: -5, want: -45},
+ test_int64{fn: mul_int64_9, fnname: "mul_int64_9", in: -5, want: -45},
+ test_int64{fn: mul_9_int64, fnname: "mul_9_int64", in: -3, want: -27},
+ test_int64{fn: mul_int64_9, fnname: "mul_int64_9", in: -3, want: -27},
+ test_int64{fn: mul_9_int64, fnname: "mul_9_int64", in: 3, want: 27},
+ test_int64{fn: mul_int64_9, fnname: "mul_int64_9", in: 3, want: 27},
+ test_int64{fn: mul_9_int64, fnname: "mul_9_int64", in: 5, want: 45},
+ test_int64{fn: mul_int64_9, fnname: "mul_int64_9", in: 5, want: 45},
+ test_int64{fn: mul_9_int64, fnname: "mul_9_int64", in: 7, want: 63},
+ test_int64{fn: mul_int64_9, fnname: "mul_int64_9", in: 7, want: 63},
+ test_int64{fn: mul_9_int64, fnname: "mul_9_int64", in: 9, want: 81},
+ test_int64{fn: mul_int64_9, fnname: "mul_int64_9", in: 9, want: 81},
+ test_int64{fn: mul_9_int64, fnname: "mul_9_int64", in: 10, want: 90},
+ test_int64{fn: mul_int64_9, fnname: "mul_int64_9", in: 10, want: 90},
+ test_int64{fn: mul_9_int64, fnname: "mul_9_int64", in: 11, want: 99},
+ test_int64{fn: mul_int64_9, fnname: "mul_int64_9", in: 11, want: 99},
+ test_int64{fn: mul_9_int64, fnname: "mul_9_int64", in: 13, want: 117},
+ test_int64{fn: mul_int64_9, fnname: "mul_int64_9", in: 13, want: 117},
+ test_int64{fn: mul_9_int64, fnname: "mul_9_int64", in: 19, want: 171},
+ test_int64{fn: mul_int64_9, fnname: "mul_int64_9", in: 19, want: 171},
+ test_int64{fn: mul_9_int64, fnname: "mul_9_int64", in: 21, want: 189},
+ test_int64{fn: mul_int64_9, fnname: "mul_int64_9", in: 21, want: 189},
+ test_int64{fn: mul_9_int64, fnname: "mul_9_int64", in: 25, want: 225},
+ test_int64{fn: mul_int64_9, fnname: "mul_int64_9", in: 25, want: 225},
+ test_int64{fn: mul_9_int64, fnname: "mul_9_int64", in: 27, want: 243},
+ test_int64{fn: mul_int64_9, fnname: "mul_int64_9", in: 27, want: 243},
+ test_int64{fn: mul_9_int64, fnname: "mul_9_int64", in: 37, want: 333},
+ test_int64{fn: mul_int64_9, fnname: "mul_int64_9", in: 37, want: 333},
+ test_int64{fn: mul_9_int64, fnname: "mul_9_int64", in: 41, want: 369},
+ test_int64{fn: mul_int64_9, fnname: "mul_int64_9", in: 41, want: 369},
+ test_int64{fn: mul_9_int64, fnname: "mul_9_int64", in: 45, want: 405},
+ test_int64{fn: mul_int64_9, fnname: "mul_int64_9", in: 45, want: 405},
+ test_int64{fn: mul_9_int64, fnname: "mul_9_int64", in: 73, want: 657},
+ test_int64{fn: mul_int64_9, fnname: "mul_int64_9", in: 73, want: 657},
+ test_int64{fn: mul_9_int64, fnname: "mul_9_int64", in: 81, want: 729},
+ test_int64{fn: mul_int64_9, fnname: "mul_int64_9", in: 81, want: 729},
+ test_int64{fn: mul_10_int64, fnname: "mul_10_int64", in: -9, want: -90},
+ test_int64{fn: mul_int64_10, fnname: "mul_int64_10", in: -9, want: -90},
+ test_int64{fn: mul_10_int64, fnname: "mul_10_int64", in: -5, want: -50},
+ test_int64{fn: mul_int64_10, fnname: "mul_int64_10", in: -5, want: -50},
+ test_int64{fn: mul_10_int64, fnname: "mul_10_int64", in: -3, want: -30},
+ test_int64{fn: mul_int64_10, fnname: "mul_int64_10", in: -3, want: -30},
+ test_int64{fn: mul_10_int64, fnname: "mul_10_int64", in: 3, want: 30},
+ test_int64{fn: mul_int64_10, fnname: "mul_int64_10", in: 3, want: 30},
+ test_int64{fn: mul_10_int64, fnname: "mul_10_int64", in: 5, want: 50},
+ test_int64{fn: mul_int64_10, fnname: "mul_int64_10", in: 5, want: 50},
+ test_int64{fn: mul_10_int64, fnname: "mul_10_int64", in: 7, want: 70},
+ test_int64{fn: mul_int64_10, fnname: "mul_int64_10", in: 7, want: 70},
+ test_int64{fn: mul_10_int64, fnname: "mul_10_int64", in: 9, want: 90},
+ test_int64{fn: mul_int64_10, fnname: "mul_int64_10", in: 9, want: 90},
+ test_int64{fn: mul_10_int64, fnname: "mul_10_int64", in: 10, want: 100},
+ test_int64{fn: mul_int64_10, fnname: "mul_int64_10", in: 10, want: 100},
+ test_int64{fn: mul_10_int64, fnname: "mul_10_int64", in: 11, want: 110},
+ test_int64{fn: mul_int64_10, fnname: "mul_int64_10", in: 11, want: 110},
+ test_int64{fn: mul_10_int64, fnname: "mul_10_int64", in: 13, want: 130},
+ test_int64{fn: mul_int64_10, fnname: "mul_int64_10", in: 13, want: 130},
+ test_int64{fn: mul_10_int64, fnname: "mul_10_int64", in: 19, want: 190},
+ test_int64{fn: mul_int64_10, fnname: "mul_int64_10", in: 19, want: 190},
+ test_int64{fn: mul_10_int64, fnname: "mul_10_int64", in: 21, want: 210},
+ test_int64{fn: mul_int64_10, fnname: "mul_int64_10", in: 21, want: 210},
+ test_int64{fn: mul_10_int64, fnname: "mul_10_int64", in: 25, want: 250},
+ test_int64{fn: mul_int64_10, fnname: "mul_int64_10", in: 25, want: 250},
+ test_int64{fn: mul_10_int64, fnname: "mul_10_int64", in: 27, want: 270},
+ test_int64{fn: mul_int64_10, fnname: "mul_int64_10", in: 27, want: 270},
+ test_int64{fn: mul_10_int64, fnname: "mul_10_int64", in: 37, want: 370},
+ test_int64{fn: mul_int64_10, fnname: "mul_int64_10", in: 37, want: 370},
+ test_int64{fn: mul_10_int64, fnname: "mul_10_int64", in: 41, want: 410},
+ test_int64{fn: mul_int64_10, fnname: "mul_int64_10", in: 41, want: 410},
+ test_int64{fn: mul_10_int64, fnname: "mul_10_int64", in: 45, want: 450},
+ test_int64{fn: mul_int64_10, fnname: "mul_int64_10", in: 45, want: 450},
+ test_int64{fn: mul_10_int64, fnname: "mul_10_int64", in: 73, want: 730},
+ test_int64{fn: mul_int64_10, fnname: "mul_int64_10", in: 73, want: 730},
+ test_int64{fn: mul_10_int64, fnname: "mul_10_int64", in: 81, want: 810},
+ test_int64{fn: mul_int64_10, fnname: "mul_int64_10", in: 81, want: 810},
+ test_int64{fn: mul_11_int64, fnname: "mul_11_int64", in: -9, want: -99},
+ test_int64{fn: mul_int64_11, fnname: "mul_int64_11", in: -9, want: -99},
+ test_int64{fn: mul_11_int64, fnname: "mul_11_int64", in: -5, want: -55},
+ test_int64{fn: mul_int64_11, fnname: "mul_int64_11", in: -5, want: -55},
+ test_int64{fn: mul_11_int64, fnname: "mul_11_int64", in: -3, want: -33},
+ test_int64{fn: mul_int64_11, fnname: "mul_int64_11", in: -3, want: -33},
+ test_int64{fn: mul_11_int64, fnname: "mul_11_int64", in: 3, want: 33},
+ test_int64{fn: mul_int64_11, fnname: "mul_int64_11", in: 3, want: 33},
+ test_int64{fn: mul_11_int64, fnname: "mul_11_int64", in: 5, want: 55},
+ test_int64{fn: mul_int64_11, fnname: "mul_int64_11", in: 5, want: 55},
+ test_int64{fn: mul_11_int64, fnname: "mul_11_int64", in: 7, want: 77},
+ test_int64{fn: mul_int64_11, fnname: "mul_int64_11", in: 7, want: 77},
+ test_int64{fn: mul_11_int64, fnname: "mul_11_int64", in: 9, want: 99},
+ test_int64{fn: mul_int64_11, fnname: "mul_int64_11", in: 9, want: 99},
+ test_int64{fn: mul_11_int64, fnname: "mul_11_int64", in: 10, want: 110},
+ test_int64{fn: mul_int64_11, fnname: "mul_int64_11", in: 10, want: 110},
+ test_int64{fn: mul_11_int64, fnname: "mul_11_int64", in: 11, want: 121},
+ test_int64{fn: mul_int64_11, fnname: "mul_int64_11", in: 11, want: 121},
+ test_int64{fn: mul_11_int64, fnname: "mul_11_int64", in: 13, want: 143},
+ test_int64{fn: mul_int64_11, fnname: "mul_int64_11", in: 13, want: 143},
+ test_int64{fn: mul_11_int64, fnname: "mul_11_int64", in: 19, want: 209},
+ test_int64{fn: mul_int64_11, fnname: "mul_int64_11", in: 19, want: 209},
+ test_int64{fn: mul_11_int64, fnname: "mul_11_int64", in: 21, want: 231},
+ test_int64{fn: mul_int64_11, fnname: "mul_int64_11", in: 21, want: 231},
+ test_int64{fn: mul_11_int64, fnname: "mul_11_int64", in: 25, want: 275},
+ test_int64{fn: mul_int64_11, fnname: "mul_int64_11", in: 25, want: 275},
+ test_int64{fn: mul_11_int64, fnname: "mul_11_int64", in: 27, want: 297},
+ test_int64{fn: mul_int64_11, fnname: "mul_int64_11", in: 27, want: 297},
+ test_int64{fn: mul_11_int64, fnname: "mul_11_int64", in: 37, want: 407},
+ test_int64{fn: mul_int64_11, fnname: "mul_int64_11", in: 37, want: 407},
+ test_int64{fn: mul_11_int64, fnname: "mul_11_int64", in: 41, want: 451},
+ test_int64{fn: mul_int64_11, fnname: "mul_int64_11", in: 41, want: 451},
+ test_int64{fn: mul_11_int64, fnname: "mul_11_int64", in: 45, want: 495},
+ test_int64{fn: mul_int64_11, fnname: "mul_int64_11", in: 45, want: 495},
+ test_int64{fn: mul_11_int64, fnname: "mul_11_int64", in: 73, want: 803},
+ test_int64{fn: mul_int64_11, fnname: "mul_int64_11", in: 73, want: 803},
+ test_int64{fn: mul_11_int64, fnname: "mul_11_int64", in: 81, want: 891},
+ test_int64{fn: mul_int64_11, fnname: "mul_int64_11", in: 81, want: 891},
+ test_int64{fn: mul_13_int64, fnname: "mul_13_int64", in: -9, want: -117},
+ test_int64{fn: mul_int64_13, fnname: "mul_int64_13", in: -9, want: -117},
+ test_int64{fn: mul_13_int64, fnname: "mul_13_int64", in: -5, want: -65},
+ test_int64{fn: mul_int64_13, fnname: "mul_int64_13", in: -5, want: -65},
+ test_int64{fn: mul_13_int64, fnname: "mul_13_int64", in: -3, want: -39},
+ test_int64{fn: mul_int64_13, fnname: "mul_int64_13", in: -3, want: -39},
+ test_int64{fn: mul_13_int64, fnname: "mul_13_int64", in: 3, want: 39},
+ test_int64{fn: mul_int64_13, fnname: "mul_int64_13", in: 3, want: 39},
+ test_int64{fn: mul_13_int64, fnname: "mul_13_int64", in: 5, want: 65},
+ test_int64{fn: mul_int64_13, fnname: "mul_int64_13", in: 5, want: 65},
+ test_int64{fn: mul_13_int64, fnname: "mul_13_int64", in: 7, want: 91},
+ test_int64{fn: mul_int64_13, fnname: "mul_int64_13", in: 7, want: 91},
+ test_int64{fn: mul_13_int64, fnname: "mul_13_int64", in: 9, want: 117},
+ test_int64{fn: mul_int64_13, fnname: "mul_int64_13", in: 9, want: 117},
+ test_int64{fn: mul_13_int64, fnname: "mul_13_int64", in: 10, want: 130},
+ test_int64{fn: mul_int64_13, fnname: "mul_int64_13", in: 10, want: 130},
+ test_int64{fn: mul_13_int64, fnname: "mul_13_int64", in: 11, want: 143},
+ test_int64{fn: mul_int64_13, fnname: "mul_int64_13", in: 11, want: 143},
+ test_int64{fn: mul_13_int64, fnname: "mul_13_int64", in: 13, want: 169},
+ test_int64{fn: mul_int64_13, fnname: "mul_int64_13", in: 13, want: 169},
+ test_int64{fn: mul_13_int64, fnname: "mul_13_int64", in: 19, want: 247},
+ test_int64{fn: mul_int64_13, fnname: "mul_int64_13", in: 19, want: 247},
+ test_int64{fn: mul_13_int64, fnname: "mul_13_int64", in: 21, want: 273},
+ test_int64{fn: mul_int64_13, fnname: "mul_int64_13", in: 21, want: 273},
+ test_int64{fn: mul_13_int64, fnname: "mul_13_int64", in: 25, want: 325},
+ test_int64{fn: mul_int64_13, fnname: "mul_int64_13", in: 25, want: 325},
+ test_int64{fn: mul_13_int64, fnname: "mul_13_int64", in: 27, want: 351},
+ test_int64{fn: mul_int64_13, fnname: "mul_int64_13", in: 27, want: 351},
+ test_int64{fn: mul_13_int64, fnname: "mul_13_int64", in: 37, want: 481},
+ test_int64{fn: mul_int64_13, fnname: "mul_int64_13", in: 37, want: 481},
+ test_int64{fn: mul_13_int64, fnname: "mul_13_int64", in: 41, want: 533},
+ test_int64{fn: mul_int64_13, fnname: "mul_int64_13", in: 41, want: 533},
+ test_int64{fn: mul_13_int64, fnname: "mul_13_int64", in: 45, want: 585},
+ test_int64{fn: mul_int64_13, fnname: "mul_int64_13", in: 45, want: 585},
+ test_int64{fn: mul_13_int64, fnname: "mul_13_int64", in: 73, want: 949},
+ test_int64{fn: mul_int64_13, fnname: "mul_int64_13", in: 73, want: 949},
+ test_int64{fn: mul_13_int64, fnname: "mul_13_int64", in: 81, want: 1053},
+ test_int64{fn: mul_int64_13, fnname: "mul_int64_13", in: 81, want: 1053},
+ test_int64{fn: mul_19_int64, fnname: "mul_19_int64", in: -9, want: -171},
+ test_int64{fn: mul_int64_19, fnname: "mul_int64_19", in: -9, want: -171},
+ test_int64{fn: mul_19_int64, fnname: "mul_19_int64", in: -5, want: -95},
+ test_int64{fn: mul_int64_19, fnname: "mul_int64_19", in: -5, want: -95},
+ test_int64{fn: mul_19_int64, fnname: "mul_19_int64", in: -3, want: -57},
+ test_int64{fn: mul_int64_19, fnname: "mul_int64_19", in: -3, want: -57},
+ test_int64{fn: mul_19_int64, fnname: "mul_19_int64", in: 3, want: 57},
+ test_int64{fn: mul_int64_19, fnname: "mul_int64_19", in: 3, want: 57},
+ test_int64{fn: mul_19_int64, fnname: "mul_19_int64", in: 5, want: 95},
+ test_int64{fn: mul_int64_19, fnname: "mul_int64_19", in: 5, want: 95},
+ test_int64{fn: mul_19_int64, fnname: "mul_19_int64", in: 7, want: 133},
+ test_int64{fn: mul_int64_19, fnname: "mul_int64_19", in: 7, want: 133},
+ test_int64{fn: mul_19_int64, fnname: "mul_19_int64", in: 9, want: 171},
+ test_int64{fn: mul_int64_19, fnname: "mul_int64_19", in: 9, want: 171},
+ test_int64{fn: mul_19_int64, fnname: "mul_19_int64", in: 10, want: 190},
+ test_int64{fn: mul_int64_19, fnname: "mul_int64_19", in: 10, want: 190},
+ test_int64{fn: mul_19_int64, fnname: "mul_19_int64", in: 11, want: 209},
+ test_int64{fn: mul_int64_19, fnname: "mul_int64_19", in: 11, want: 209},
+ test_int64{fn: mul_19_int64, fnname: "mul_19_int64", in: 13, want: 247},
+ test_int64{fn: mul_int64_19, fnname: "mul_int64_19", in: 13, want: 247},
+ test_int64{fn: mul_19_int64, fnname: "mul_19_int64", in: 19, want: 361},
+ test_int64{fn: mul_int64_19, fnname: "mul_int64_19", in: 19, want: 361},
+ test_int64{fn: mul_19_int64, fnname: "mul_19_int64", in: 21, want: 399},
+ test_int64{fn: mul_int64_19, fnname: "mul_int64_19", in: 21, want: 399},
+ test_int64{fn: mul_19_int64, fnname: "mul_19_int64", in: 25, want: 475},
+ test_int64{fn: mul_int64_19, fnname: "mul_int64_19", in: 25, want: 475},
+ test_int64{fn: mul_19_int64, fnname: "mul_19_int64", in: 27, want: 513},
+ test_int64{fn: mul_int64_19, fnname: "mul_int64_19", in: 27, want: 513},
+ test_int64{fn: mul_19_int64, fnname: "mul_19_int64", in: 37, want: 703},
+ test_int64{fn: mul_int64_19, fnname: "mul_int64_19", in: 37, want: 703},
+ test_int64{fn: mul_19_int64, fnname: "mul_19_int64", in: 41, want: 779},
+ test_int64{fn: mul_int64_19, fnname: "mul_int64_19", in: 41, want: 779},
+ test_int64{fn: mul_19_int64, fnname: "mul_19_int64", in: 45, want: 855},
+ test_int64{fn: mul_int64_19, fnname: "mul_int64_19", in: 45, want: 855},
+ test_int64{fn: mul_19_int64, fnname: "mul_19_int64", in: 73, want: 1387},
+ test_int64{fn: mul_int64_19, fnname: "mul_int64_19", in: 73, want: 1387},
+ test_int64{fn: mul_19_int64, fnname: "mul_19_int64", in: 81, want: 1539},
+ test_int64{fn: mul_int64_19, fnname: "mul_int64_19", in: 81, want: 1539},
+ test_int64{fn: mul_21_int64, fnname: "mul_21_int64", in: -9, want: -189},
+ test_int64{fn: mul_int64_21, fnname: "mul_int64_21", in: -9, want: -189},
+ test_int64{fn: mul_21_int64, fnname: "mul_21_int64", in: -5, want: -105},
+ test_int64{fn: mul_int64_21, fnname: "mul_int64_21", in: -5, want: -105},
+ test_int64{fn: mul_21_int64, fnname: "mul_21_int64", in: -3, want: -63},
+ test_int64{fn: mul_int64_21, fnname: "mul_int64_21", in: -3, want: -63},
+ test_int64{fn: mul_21_int64, fnname: "mul_21_int64", in: 3, want: 63},
+ test_int64{fn: mul_int64_21, fnname: "mul_int64_21", in: 3, want: 63},
+ test_int64{fn: mul_21_int64, fnname: "mul_21_int64", in: 5, want: 105},
+ test_int64{fn: mul_int64_21, fnname: "mul_int64_21", in: 5, want: 105},
+ test_int64{fn: mul_21_int64, fnname: "mul_21_int64", in: 7, want: 147},
+ test_int64{fn: mul_int64_21, fnname: "mul_int64_21", in: 7, want: 147},
+ test_int64{fn: mul_21_int64, fnname: "mul_21_int64", in: 9, want: 189},
+ test_int64{fn: mul_int64_21, fnname: "mul_int64_21", in: 9, want: 189},
+ test_int64{fn: mul_21_int64, fnname: "mul_21_int64", in: 10, want: 210},
+ test_int64{fn: mul_int64_21, fnname: "mul_int64_21", in: 10, want: 210},
+ test_int64{fn: mul_21_int64, fnname: "mul_21_int64", in: 11, want: 231},
+ test_int64{fn: mul_int64_21, fnname: "mul_int64_21", in: 11, want: 231},
+ test_int64{fn: mul_21_int64, fnname: "mul_21_int64", in: 13, want: 273},
+ test_int64{fn: mul_int64_21, fnname: "mul_int64_21", in: 13, want: 273},
+ test_int64{fn: mul_21_int64, fnname: "mul_21_int64", in: 19, want: 399},
+ test_int64{fn: mul_int64_21, fnname: "mul_int64_21", in: 19, want: 399},
+ test_int64{fn: mul_21_int64, fnname: "mul_21_int64", in: 21, want: 441},
+ test_int64{fn: mul_int64_21, fnname: "mul_int64_21", in: 21, want: 441},
+ test_int64{fn: mul_21_int64, fnname: "mul_21_int64", in: 25, want: 525},
+ test_int64{fn: mul_int64_21, fnname: "mul_int64_21", in: 25, want: 525},
+ test_int64{fn: mul_21_int64, fnname: "mul_21_int64", in: 27, want: 567},
+ test_int64{fn: mul_int64_21, fnname: "mul_int64_21", in: 27, want: 567},
+ test_int64{fn: mul_21_int64, fnname: "mul_21_int64", in: 37, want: 777},
+ test_int64{fn: mul_int64_21, fnname: "mul_int64_21", in: 37, want: 777},
+ test_int64{fn: mul_21_int64, fnname: "mul_21_int64", in: 41, want: 861},
+ test_int64{fn: mul_int64_21, fnname: "mul_int64_21", in: 41, want: 861},
+ test_int64{fn: mul_21_int64, fnname: "mul_21_int64", in: 45, want: 945},
+ test_int64{fn: mul_int64_21, fnname: "mul_int64_21", in: 45, want: 945},
+ test_int64{fn: mul_21_int64, fnname: "mul_21_int64", in: 73, want: 1533},
+ test_int64{fn: mul_int64_21, fnname: "mul_int64_21", in: 73, want: 1533},
+ test_int64{fn: mul_21_int64, fnname: "mul_21_int64", in: 81, want: 1701},
+ test_int64{fn: mul_int64_21, fnname: "mul_int64_21", in: 81, want: 1701},
+ test_int64{fn: mul_25_int64, fnname: "mul_25_int64", in: -9, want: -225},
+ test_int64{fn: mul_int64_25, fnname: "mul_int64_25", in: -9, want: -225},
+ test_int64{fn: mul_25_int64, fnname: "mul_25_int64", in: -5, want: -125},
+ test_int64{fn: mul_int64_25, fnname: "mul_int64_25", in: -5, want: -125},
+ test_int64{fn: mul_25_int64, fnname: "mul_25_int64", in: -3, want: -75},
+ test_int64{fn: mul_int64_25, fnname: "mul_int64_25", in: -3, want: -75},
+ test_int64{fn: mul_25_int64, fnname: "mul_25_int64", in: 3, want: 75},
+ test_int64{fn: mul_int64_25, fnname: "mul_int64_25", in: 3, want: 75},
+ test_int64{fn: mul_25_int64, fnname: "mul_25_int64", in: 5, want: 125},
+ test_int64{fn: mul_int64_25, fnname: "mul_int64_25", in: 5, want: 125},
+ test_int64{fn: mul_25_int64, fnname: "mul_25_int64", in: 7, want: 175},
+ test_int64{fn: mul_int64_25, fnname: "mul_int64_25", in: 7, want: 175},
+ test_int64{fn: mul_25_int64, fnname: "mul_25_int64", in: 9, want: 225},
+ test_int64{fn: mul_int64_25, fnname: "mul_int64_25", in: 9, want: 225},
+ test_int64{fn: mul_25_int64, fnname: "mul_25_int64", in: 10, want: 250},
+ test_int64{fn: mul_int64_25, fnname: "mul_int64_25", in: 10, want: 250},
+ test_int64{fn: mul_25_int64, fnname: "mul_25_int64", in: 11, want: 275},
+ test_int64{fn: mul_int64_25, fnname: "mul_int64_25", in: 11, want: 275},
+ test_int64{fn: mul_25_int64, fnname: "mul_25_int64", in: 13, want: 325},
+ test_int64{fn: mul_int64_25, fnname: "mul_int64_25", in: 13, want: 325},
+ test_int64{fn: mul_25_int64, fnname: "mul_25_int64", in: 19, want: 475},
+ test_int64{fn: mul_int64_25, fnname: "mul_int64_25", in: 19, want: 475},
+ test_int64{fn: mul_25_int64, fnname: "mul_25_int64", in: 21, want: 525},
+ test_int64{fn: mul_int64_25, fnname: "mul_int64_25", in: 21, want: 525},
+ test_int64{fn: mul_25_int64, fnname: "mul_25_int64", in: 25, want: 625},
+ test_int64{fn: mul_int64_25, fnname: "mul_int64_25", in: 25, want: 625},
+ test_int64{fn: mul_25_int64, fnname: "mul_25_int64", in: 27, want: 675},
+ test_int64{fn: mul_int64_25, fnname: "mul_int64_25", in: 27, want: 675},
+ test_int64{fn: mul_25_int64, fnname: "mul_25_int64", in: 37, want: 925},
+ test_int64{fn: mul_int64_25, fnname: "mul_int64_25", in: 37, want: 925},
+ test_int64{fn: mul_25_int64, fnname: "mul_25_int64", in: 41, want: 1025},
+ test_int64{fn: mul_int64_25, fnname: "mul_int64_25", in: 41, want: 1025},
+ test_int64{fn: mul_25_int64, fnname: "mul_25_int64", in: 45, want: 1125},
+ test_int64{fn: mul_int64_25, fnname: "mul_int64_25", in: 45, want: 1125},
+ test_int64{fn: mul_25_int64, fnname: "mul_25_int64", in: 73, want: 1825},
+ test_int64{fn: mul_int64_25, fnname: "mul_int64_25", in: 73, want: 1825},
+ test_int64{fn: mul_25_int64, fnname: "mul_25_int64", in: 81, want: 2025},
+ test_int64{fn: mul_int64_25, fnname: "mul_int64_25", in: 81, want: 2025},
+ test_int64{fn: mul_27_int64, fnname: "mul_27_int64", in: -9, want: -243},
+ test_int64{fn: mul_int64_27, fnname: "mul_int64_27", in: -9, want: -243},
+ test_int64{fn: mul_27_int64, fnname: "mul_27_int64", in: -5, want: -135},
+ test_int64{fn: mul_int64_27, fnname: "mul_int64_27", in: -5, want: -135},
+ test_int64{fn: mul_27_int64, fnname: "mul_27_int64", in: -3, want: -81},
+ test_int64{fn: mul_int64_27, fnname: "mul_int64_27", in: -3, want: -81},
+ test_int64{fn: mul_27_int64, fnname: "mul_27_int64", in: 3, want: 81},
+ test_int64{fn: mul_int64_27, fnname: "mul_int64_27", in: 3, want: 81},
+ test_int64{fn: mul_27_int64, fnname: "mul_27_int64", in: 5, want: 135},
+ test_int64{fn: mul_int64_27, fnname: "mul_int64_27", in: 5, want: 135},
+ test_int64{fn: mul_27_int64, fnname: "mul_27_int64", in: 7, want: 189},
+ test_int64{fn: mul_int64_27, fnname: "mul_int64_27", in: 7, want: 189},
+ test_int64{fn: mul_27_int64, fnname: "mul_27_int64", in: 9, want: 243},
+ test_int64{fn: mul_int64_27, fnname: "mul_int64_27", in: 9, want: 243},
+ test_int64{fn: mul_27_int64, fnname: "mul_27_int64", in: 10, want: 270},
+ test_int64{fn: mul_int64_27, fnname: "mul_int64_27", in: 10, want: 270},
+ test_int64{fn: mul_27_int64, fnname: "mul_27_int64", in: 11, want: 297},
+ test_int64{fn: mul_int64_27, fnname: "mul_int64_27", in: 11, want: 297},
+ test_int64{fn: mul_27_int64, fnname: "mul_27_int64", in: 13, want: 351},
+ test_int64{fn: mul_int64_27, fnname: "mul_int64_27", in: 13, want: 351},
+ test_int64{fn: mul_27_int64, fnname: "mul_27_int64", in: 19, want: 513},
+ test_int64{fn: mul_int64_27, fnname: "mul_int64_27", in: 19, want: 513},
+ test_int64{fn: mul_27_int64, fnname: "mul_27_int64", in: 21, want: 567},
+ test_int64{fn: mul_int64_27, fnname: "mul_int64_27", in: 21, want: 567},
+ test_int64{fn: mul_27_int64, fnname: "mul_27_int64", in: 25, want: 675},
+ test_int64{fn: mul_int64_27, fnname: "mul_int64_27", in: 25, want: 675},
+ test_int64{fn: mul_27_int64, fnname: "mul_27_int64", in: 27, want: 729},
+ test_int64{fn: mul_int64_27, fnname: "mul_int64_27", in: 27, want: 729},
+ test_int64{fn: mul_27_int64, fnname: "mul_27_int64", in: 37, want: 999},
+ test_int64{fn: mul_int64_27, fnname: "mul_int64_27", in: 37, want: 999},
+ test_int64{fn: mul_27_int64, fnname: "mul_27_int64", in: 41, want: 1107},
+ test_int64{fn: mul_int64_27, fnname: "mul_int64_27", in: 41, want: 1107},
+ test_int64{fn: mul_27_int64, fnname: "mul_27_int64", in: 45, want: 1215},
+ test_int64{fn: mul_int64_27, fnname: "mul_int64_27", in: 45, want: 1215},
+ test_int64{fn: mul_27_int64, fnname: "mul_27_int64", in: 73, want: 1971},
+ test_int64{fn: mul_int64_27, fnname: "mul_int64_27", in: 73, want: 1971},
+ test_int64{fn: mul_27_int64, fnname: "mul_27_int64", in: 81, want: 2187},
+ test_int64{fn: mul_int64_27, fnname: "mul_int64_27", in: 81, want: 2187},
+ test_int64{fn: mul_37_int64, fnname: "mul_37_int64", in: -9, want: -333},
+ test_int64{fn: mul_int64_37, fnname: "mul_int64_37", in: -9, want: -333},
+ test_int64{fn: mul_37_int64, fnname: "mul_37_int64", in: -5, want: -185},
+ test_int64{fn: mul_int64_37, fnname: "mul_int64_37", in: -5, want: -185},
+ test_int64{fn: mul_37_int64, fnname: "mul_37_int64", in: -3, want: -111},
+ test_int64{fn: mul_int64_37, fnname: "mul_int64_37", in: -3, want: -111},
+ test_int64{fn: mul_37_int64, fnname: "mul_37_int64", in: 3, want: 111},
+ test_int64{fn: mul_int64_37, fnname: "mul_int64_37", in: 3, want: 111},
+ test_int64{fn: mul_37_int64, fnname: "mul_37_int64", in: 5, want: 185},
+ test_int64{fn: mul_int64_37, fnname: "mul_int64_37", in: 5, want: 185},
+ test_int64{fn: mul_37_int64, fnname: "mul_37_int64", in: 7, want: 259},
+ test_int64{fn: mul_int64_37, fnname: "mul_int64_37", in: 7, want: 259},
+ test_int64{fn: mul_37_int64, fnname: "mul_37_int64", in: 9, want: 333},
+ test_int64{fn: mul_int64_37, fnname: "mul_int64_37", in: 9, want: 333},
+ test_int64{fn: mul_37_int64, fnname: "mul_37_int64", in: 10, want: 370},
+ test_int64{fn: mul_int64_37, fnname: "mul_int64_37", in: 10, want: 370},
+ test_int64{fn: mul_37_int64, fnname: "mul_37_int64", in: 11, want: 407},
+ test_int64{fn: mul_int64_37, fnname: "mul_int64_37", in: 11, want: 407},
+ test_int64{fn: mul_37_int64, fnname: "mul_37_int64", in: 13, want: 481},
+ test_int64{fn: mul_int64_37, fnname: "mul_int64_37", in: 13, want: 481},
+ test_int64{fn: mul_37_int64, fnname: "mul_37_int64", in: 19, want: 703},
+ test_int64{fn: mul_int64_37, fnname: "mul_int64_37", in: 19, want: 703},
+ test_int64{fn: mul_37_int64, fnname: "mul_37_int64", in: 21, want: 777},
+ test_int64{fn: mul_int64_37, fnname: "mul_int64_37", in: 21, want: 777},
+ test_int64{fn: mul_37_int64, fnname: "mul_37_int64", in: 25, want: 925},
+ test_int64{fn: mul_int64_37, fnname: "mul_int64_37", in: 25, want: 925},
+ test_int64{fn: mul_37_int64, fnname: "mul_37_int64", in: 27, want: 999},
+ test_int64{fn: mul_int64_37, fnname: "mul_int64_37", in: 27, want: 999},
+ test_int64{fn: mul_37_int64, fnname: "mul_37_int64", in: 37, want: 1369},
+ test_int64{fn: mul_int64_37, fnname: "mul_int64_37", in: 37, want: 1369},
+ test_int64{fn: mul_37_int64, fnname: "mul_37_int64", in: 41, want: 1517},
+ test_int64{fn: mul_int64_37, fnname: "mul_int64_37", in: 41, want: 1517},
+ test_int64{fn: mul_37_int64, fnname: "mul_37_int64", in: 45, want: 1665},
+ test_int64{fn: mul_int64_37, fnname: "mul_int64_37", in: 45, want: 1665},
+ test_int64{fn: mul_37_int64, fnname: "mul_37_int64", in: 73, want: 2701},
+ test_int64{fn: mul_int64_37, fnname: "mul_int64_37", in: 73, want: 2701},
+ test_int64{fn: mul_37_int64, fnname: "mul_37_int64", in: 81, want: 2997},
+ test_int64{fn: mul_int64_37, fnname: "mul_int64_37", in: 81, want: 2997},
+ test_int64{fn: mul_41_int64, fnname: "mul_41_int64", in: -9, want: -369},
+ test_int64{fn: mul_int64_41, fnname: "mul_int64_41", in: -9, want: -369},
+ test_int64{fn: mul_41_int64, fnname: "mul_41_int64", in: -5, want: -205},
+ test_int64{fn: mul_int64_41, fnname: "mul_int64_41", in: -5, want: -205},
+ test_int64{fn: mul_41_int64, fnname: "mul_41_int64", in: -3, want: -123},
+ test_int64{fn: mul_int64_41, fnname: "mul_int64_41", in: -3, want: -123},
+ test_int64{fn: mul_41_int64, fnname: "mul_41_int64", in: 3, want: 123},
+ test_int64{fn: mul_int64_41, fnname: "mul_int64_41", in: 3, want: 123},
+ test_int64{fn: mul_41_int64, fnname: "mul_41_int64", in: 5, want: 205},
+ test_int64{fn: mul_int64_41, fnname: "mul_int64_41", in: 5, want: 205},
+ test_int64{fn: mul_41_int64, fnname: "mul_41_int64", in: 7, want: 287},
+ test_int64{fn: mul_int64_41, fnname: "mul_int64_41", in: 7, want: 287},
+ test_int64{fn: mul_41_int64, fnname: "mul_41_int64", in: 9, want: 369},
+ test_int64{fn: mul_int64_41, fnname: "mul_int64_41", in: 9, want: 369},
+ test_int64{fn: mul_41_int64, fnname: "mul_41_int64", in: 10, want: 410},
+ test_int64{fn: mul_int64_41, fnname: "mul_int64_41", in: 10, want: 410},
+ test_int64{fn: mul_41_int64, fnname: "mul_41_int64", in: 11, want: 451},
+ test_int64{fn: mul_int64_41, fnname: "mul_int64_41", in: 11, want: 451},
+ test_int64{fn: mul_41_int64, fnname: "mul_41_int64", in: 13, want: 533},
+ test_int64{fn: mul_int64_41, fnname: "mul_int64_41", in: 13, want: 533},
+ test_int64{fn: mul_41_int64, fnname: "mul_41_int64", in: 19, want: 779},
+ test_int64{fn: mul_int64_41, fnname: "mul_int64_41", in: 19, want: 779},
+ test_int64{fn: mul_41_int64, fnname: "mul_41_int64", in: 21, want: 861},
+ test_int64{fn: mul_int64_41, fnname: "mul_int64_41", in: 21, want: 861},
+ test_int64{fn: mul_41_int64, fnname: "mul_41_int64", in: 25, want: 1025},
+ test_int64{fn: mul_int64_41, fnname: "mul_int64_41", in: 25, want: 1025},
+ test_int64{fn: mul_41_int64, fnname: "mul_41_int64", in: 27, want: 1107},
+ test_int64{fn: mul_int64_41, fnname: "mul_int64_41", in: 27, want: 1107},
+ test_int64{fn: mul_41_int64, fnname: "mul_41_int64", in: 37, want: 1517},
+ test_int64{fn: mul_int64_41, fnname: "mul_int64_41", in: 37, want: 1517},
+ test_int64{fn: mul_41_int64, fnname: "mul_41_int64", in: 41, want: 1681},
+ test_int64{fn: mul_int64_41, fnname: "mul_int64_41", in: 41, want: 1681},
+ test_int64{fn: mul_41_int64, fnname: "mul_41_int64", in: 45, want: 1845},
+ test_int64{fn: mul_int64_41, fnname: "mul_int64_41", in: 45, want: 1845},
+ test_int64{fn: mul_41_int64, fnname: "mul_41_int64", in: 73, want: 2993},
+ test_int64{fn: mul_int64_41, fnname: "mul_int64_41", in: 73, want: 2993},
+ test_int64{fn: mul_41_int64, fnname: "mul_41_int64", in: 81, want: 3321},
+ test_int64{fn: mul_int64_41, fnname: "mul_int64_41", in: 81, want: 3321},
+ test_int64{fn: mul_45_int64, fnname: "mul_45_int64", in: -9, want: -405},
+ test_int64{fn: mul_int64_45, fnname: "mul_int64_45", in: -9, want: -405},
+ test_int64{fn: mul_45_int64, fnname: "mul_45_int64", in: -5, want: -225},
+ test_int64{fn: mul_int64_45, fnname: "mul_int64_45", in: -5, want: -225},
+ test_int64{fn: mul_45_int64, fnname: "mul_45_int64", in: -3, want: -135},
+ test_int64{fn: mul_int64_45, fnname: "mul_int64_45", in: -3, want: -135},
+ test_int64{fn: mul_45_int64, fnname: "mul_45_int64", in: 3, want: 135},
+ test_int64{fn: mul_int64_45, fnname: "mul_int64_45", in: 3, want: 135},
+ test_int64{fn: mul_45_int64, fnname: "mul_45_int64", in: 5, want: 225},
+ test_int64{fn: mul_int64_45, fnname: "mul_int64_45", in: 5, want: 225},
+ test_int64{fn: mul_45_int64, fnname: "mul_45_int64", in: 7, want: 315},
+ test_int64{fn: mul_int64_45, fnname: "mul_int64_45", in: 7, want: 315},
+ test_int64{fn: mul_45_int64, fnname: "mul_45_int64", in: 9, want: 405},
+ test_int64{fn: mul_int64_45, fnname: "mul_int64_45", in: 9, want: 405},
+ test_int64{fn: mul_45_int64, fnname: "mul_45_int64", in: 10, want: 450},
+ test_int64{fn: mul_int64_45, fnname: "mul_int64_45", in: 10, want: 450},
+ test_int64{fn: mul_45_int64, fnname: "mul_45_int64", in: 11, want: 495},
+ test_int64{fn: mul_int64_45, fnname: "mul_int64_45", in: 11, want: 495},
+ test_int64{fn: mul_45_int64, fnname: "mul_45_int64", in: 13, want: 585},
+ test_int64{fn: mul_int64_45, fnname: "mul_int64_45", in: 13, want: 585},
+ test_int64{fn: mul_45_int64, fnname: "mul_45_int64", in: 19, want: 855},
+ test_int64{fn: mul_int64_45, fnname: "mul_int64_45", in: 19, want: 855},
+ test_int64{fn: mul_45_int64, fnname: "mul_45_int64", in: 21, want: 945},
+ test_int64{fn: mul_int64_45, fnname: "mul_int64_45", in: 21, want: 945},
+ test_int64{fn: mul_45_int64, fnname: "mul_45_int64", in: 25, want: 1125},
+ test_int64{fn: mul_int64_45, fnname: "mul_int64_45", in: 25, want: 1125},
+ test_int64{fn: mul_45_int64, fnname: "mul_45_int64", in: 27, want: 1215},
+ test_int64{fn: mul_int64_45, fnname: "mul_int64_45", in: 27, want: 1215},
+ test_int64{fn: mul_45_int64, fnname: "mul_45_int64", in: 37, want: 1665},
+ test_int64{fn: mul_int64_45, fnname: "mul_int64_45", in: 37, want: 1665},
+ test_int64{fn: mul_45_int64, fnname: "mul_45_int64", in: 41, want: 1845},
+ test_int64{fn: mul_int64_45, fnname: "mul_int64_45", in: 41, want: 1845},
+ test_int64{fn: mul_45_int64, fnname: "mul_45_int64", in: 45, want: 2025},
+ test_int64{fn: mul_int64_45, fnname: "mul_int64_45", in: 45, want: 2025},
+ test_int64{fn: mul_45_int64, fnname: "mul_45_int64", in: 73, want: 3285},
+ test_int64{fn: mul_int64_45, fnname: "mul_int64_45", in: 73, want: 3285},
+ test_int64{fn: mul_45_int64, fnname: "mul_45_int64", in: 81, want: 3645},
+ test_int64{fn: mul_int64_45, fnname: "mul_int64_45", in: 81, want: 3645},
+ test_int64{fn: mul_73_int64, fnname: "mul_73_int64", in: -9, want: -657},
+ test_int64{fn: mul_int64_73, fnname: "mul_int64_73", in: -9, want: -657},
+ test_int64{fn: mul_73_int64, fnname: "mul_73_int64", in: -5, want: -365},
+ test_int64{fn: mul_int64_73, fnname: "mul_int64_73", in: -5, want: -365},
+ test_int64{fn: mul_73_int64, fnname: "mul_73_int64", in: -3, want: -219},
+ test_int64{fn: mul_int64_73, fnname: "mul_int64_73", in: -3, want: -219},
+ test_int64{fn: mul_73_int64, fnname: "mul_73_int64", in: 3, want: 219},
+ test_int64{fn: mul_int64_73, fnname: "mul_int64_73", in: 3, want: 219},
+ test_int64{fn: mul_73_int64, fnname: "mul_73_int64", in: 5, want: 365},
+ test_int64{fn: mul_int64_73, fnname: "mul_int64_73", in: 5, want: 365},
+ test_int64{fn: mul_73_int64, fnname: "mul_73_int64", in: 7, want: 511},
+ test_int64{fn: mul_int64_73, fnname: "mul_int64_73", in: 7, want: 511},
+ test_int64{fn: mul_73_int64, fnname: "mul_73_int64", in: 9, want: 657},
+ test_int64{fn: mul_int64_73, fnname: "mul_int64_73", in: 9, want: 657},
+ test_int64{fn: mul_73_int64, fnname: "mul_73_int64", in: 10, want: 730},
+ test_int64{fn: mul_int64_73, fnname: "mul_int64_73", in: 10, want: 730},
+ test_int64{fn: mul_73_int64, fnname: "mul_73_int64", in: 11, want: 803},
+ test_int64{fn: mul_int64_73, fnname: "mul_int64_73", in: 11, want: 803},
+ test_int64{fn: mul_73_int64, fnname: "mul_73_int64", in: 13, want: 949},
+ test_int64{fn: mul_int64_73, fnname: "mul_int64_73", in: 13, want: 949},
+ test_int64{fn: mul_73_int64, fnname: "mul_73_int64", in: 19, want: 1387},
+ test_int64{fn: mul_int64_73, fnname: "mul_int64_73", in: 19, want: 1387},
+ test_int64{fn: mul_73_int64, fnname: "mul_73_int64", in: 21, want: 1533},
+ test_int64{fn: mul_int64_73, fnname: "mul_int64_73", in: 21, want: 1533},
+ test_int64{fn: mul_73_int64, fnname: "mul_73_int64", in: 25, want: 1825},
+ test_int64{fn: mul_int64_73, fnname: "mul_int64_73", in: 25, want: 1825},
+ test_int64{fn: mul_73_int64, fnname: "mul_73_int64", in: 27, want: 1971},
+ test_int64{fn: mul_int64_73, fnname: "mul_int64_73", in: 27, want: 1971},
+ test_int64{fn: mul_73_int64, fnname: "mul_73_int64", in: 37, want: 2701},
+ test_int64{fn: mul_int64_73, fnname: "mul_int64_73", in: 37, want: 2701},
+ test_int64{fn: mul_73_int64, fnname: "mul_73_int64", in: 41, want: 2993},
+ test_int64{fn: mul_int64_73, fnname: "mul_int64_73", in: 41, want: 2993},
+ test_int64{fn: mul_73_int64, fnname: "mul_73_int64", in: 45, want: 3285},
+ test_int64{fn: mul_int64_73, fnname: "mul_int64_73", in: 45, want: 3285},
+ test_int64{fn: mul_73_int64, fnname: "mul_73_int64", in: 73, want: 5329},
+ test_int64{fn: mul_int64_73, fnname: "mul_int64_73", in: 73, want: 5329},
+ test_int64{fn: mul_73_int64, fnname: "mul_73_int64", in: 81, want: 5913},
+ test_int64{fn: mul_int64_73, fnname: "mul_int64_73", in: 81, want: 5913},
+ test_int64{fn: mul_81_int64, fnname: "mul_81_int64", in: -9, want: -729},
+ test_int64{fn: mul_int64_81, fnname: "mul_int64_81", in: -9, want: -729},
+ test_int64{fn: mul_81_int64, fnname: "mul_81_int64", in: -5, want: -405},
+ test_int64{fn: mul_int64_81, fnname: "mul_int64_81", in: -5, want: -405},
+ test_int64{fn: mul_81_int64, fnname: "mul_81_int64", in: -3, want: -243},
+ test_int64{fn: mul_int64_81, fnname: "mul_int64_81", in: -3, want: -243},
+ test_int64{fn: mul_81_int64, fnname: "mul_81_int64", in: 3, want: 243},
+ test_int64{fn: mul_int64_81, fnname: "mul_int64_81", in: 3, want: 243},
+ test_int64{fn: mul_81_int64, fnname: "mul_81_int64", in: 5, want: 405},
+ test_int64{fn: mul_int64_81, fnname: "mul_int64_81", in: 5, want: 405},
+ test_int64{fn: mul_81_int64, fnname: "mul_81_int64", in: 7, want: 567},
+ test_int64{fn: mul_int64_81, fnname: "mul_int64_81", in: 7, want: 567},
+ test_int64{fn: mul_81_int64, fnname: "mul_81_int64", in: 9, want: 729},
+ test_int64{fn: mul_int64_81, fnname: "mul_int64_81", in: 9, want: 729},
+ test_int64{fn: mul_81_int64, fnname: "mul_81_int64", in: 10, want: 810},
+ test_int64{fn: mul_int64_81, fnname: "mul_int64_81", in: 10, want: 810},
+ test_int64{fn: mul_81_int64, fnname: "mul_81_int64", in: 11, want: 891},
+ test_int64{fn: mul_int64_81, fnname: "mul_int64_81", in: 11, want: 891},
+ test_int64{fn: mul_81_int64, fnname: "mul_81_int64", in: 13, want: 1053},
+ test_int64{fn: mul_int64_81, fnname: "mul_int64_81", in: 13, want: 1053},
+ test_int64{fn: mul_81_int64, fnname: "mul_81_int64", in: 19, want: 1539},
+ test_int64{fn: mul_int64_81, fnname: "mul_int64_81", in: 19, want: 1539},
+ test_int64{fn: mul_81_int64, fnname: "mul_81_int64", in: 21, want: 1701},
+ test_int64{fn: mul_int64_81, fnname: "mul_int64_81", in: 21, want: 1701},
+ test_int64{fn: mul_81_int64, fnname: "mul_81_int64", in: 25, want: 2025},
+ test_int64{fn: mul_int64_81, fnname: "mul_int64_81", in: 25, want: 2025},
+ test_int64{fn: mul_81_int64, fnname: "mul_81_int64", in: 27, want: 2187},
+ test_int64{fn: mul_int64_81, fnname: "mul_int64_81", in: 27, want: 2187},
+ test_int64{fn: mul_81_int64, fnname: "mul_81_int64", in: 37, want: 2997},
+ test_int64{fn: mul_int64_81, fnname: "mul_int64_81", in: 37, want: 2997},
+ test_int64{fn: mul_81_int64, fnname: "mul_81_int64", in: 41, want: 3321},
+ test_int64{fn: mul_int64_81, fnname: "mul_int64_81", in: 41, want: 3321},
+ test_int64{fn: mul_81_int64, fnname: "mul_81_int64", in: 45, want: 3645},
+ test_int64{fn: mul_int64_81, fnname: "mul_int64_81", in: 45, want: 3645},
+ test_int64{fn: mul_81_int64, fnname: "mul_81_int64", in: 73, want: 5913},
+ test_int64{fn: mul_int64_81, fnname: "mul_int64_81", in: 73, want: 5913},
+ test_int64{fn: mul_81_int64, fnname: "mul_81_int64", in: 81, want: 6561},
+ test_int64{fn: mul_int64_81, fnname: "mul_int64_81", in: 81, want: 6561}}
+
+type test_uint32 struct {
+ fn func(uint32) uint32
+ fnname string
+ in uint32
+ want uint32
+}
+
+var tests_uint32 = []test_uint32{
+
+ test_uint32{fn: add_0_uint32, fnname: "add_0_uint32", in: 0, want: 0},
+ test_uint32{fn: add_uint32_0, fnname: "add_uint32_0", in: 0, want: 0},
+ test_uint32{fn: add_0_uint32, fnname: "add_0_uint32", in: 1, want: 1},
+ test_uint32{fn: add_uint32_0, fnname: "add_uint32_0", in: 1, want: 1},
+ test_uint32{fn: add_0_uint32, fnname: "add_0_uint32", in: 4294967295, want: 4294967295},
+ test_uint32{fn: add_uint32_0, fnname: "add_uint32_0", in: 4294967295, want: 4294967295},
+ test_uint32{fn: add_1_uint32, fnname: "add_1_uint32", in: 0, want: 1},
+ test_uint32{fn: add_uint32_1, fnname: "add_uint32_1", in: 0, want: 1},
+ test_uint32{fn: add_1_uint32, fnname: "add_1_uint32", in: 1, want: 2},
+ test_uint32{fn: add_uint32_1, fnname: "add_uint32_1", in: 1, want: 2},
+ test_uint32{fn: add_1_uint32, fnname: "add_1_uint32", in: 4294967295, want: 0},
+ test_uint32{fn: add_uint32_1, fnname: "add_uint32_1", in: 4294967295, want: 0},
+ test_uint32{fn: add_4294967295_uint32, fnname: "add_4294967295_uint32", in: 0, want: 4294967295},
+ test_uint32{fn: add_uint32_4294967295, fnname: "add_uint32_4294967295", in: 0, want: 4294967295},
+ test_uint32{fn: add_4294967295_uint32, fnname: "add_4294967295_uint32", in: 1, want: 0},
+ test_uint32{fn: add_uint32_4294967295, fnname: "add_uint32_4294967295", in: 1, want: 0},
+ test_uint32{fn: add_4294967295_uint32, fnname: "add_4294967295_uint32", in: 4294967295, want: 4294967294},
+ test_uint32{fn: add_uint32_4294967295, fnname: "add_uint32_4294967295", in: 4294967295, want: 4294967294},
+ test_uint32{fn: sub_0_uint32, fnname: "sub_0_uint32", in: 0, want: 0},
+ test_uint32{fn: sub_uint32_0, fnname: "sub_uint32_0", in: 0, want: 0},
+ test_uint32{fn: sub_0_uint32, fnname: "sub_0_uint32", in: 1, want: 4294967295},
+ test_uint32{fn: sub_uint32_0, fnname: "sub_uint32_0", in: 1, want: 1},
+ test_uint32{fn: sub_0_uint32, fnname: "sub_0_uint32", in: 4294967295, want: 1},
+ test_uint32{fn: sub_uint32_0, fnname: "sub_uint32_0", in: 4294967295, want: 4294967295},
+ test_uint32{fn: sub_1_uint32, fnname: "sub_1_uint32", in: 0, want: 1},
+ test_uint32{fn: sub_uint32_1, fnname: "sub_uint32_1", in: 0, want: 4294967295},
+ test_uint32{fn: sub_1_uint32, fnname: "sub_1_uint32", in: 1, want: 0},
+ test_uint32{fn: sub_uint32_1, fnname: "sub_uint32_1", in: 1, want: 0},
+ test_uint32{fn: sub_1_uint32, fnname: "sub_1_uint32", in: 4294967295, want: 2},
+ test_uint32{fn: sub_uint32_1, fnname: "sub_uint32_1", in: 4294967295, want: 4294967294},
+ test_uint32{fn: sub_4294967295_uint32, fnname: "sub_4294967295_uint32", in: 0, want: 4294967295},
+ test_uint32{fn: sub_uint32_4294967295, fnname: "sub_uint32_4294967295", in: 0, want: 1},
+ test_uint32{fn: sub_4294967295_uint32, fnname: "sub_4294967295_uint32", in: 1, want: 4294967294},
+ test_uint32{fn: sub_uint32_4294967295, fnname: "sub_uint32_4294967295", in: 1, want: 2},
+ test_uint32{fn: sub_4294967295_uint32, fnname: "sub_4294967295_uint32", in: 4294967295, want: 0},
+ test_uint32{fn: sub_uint32_4294967295, fnname: "sub_uint32_4294967295", in: 4294967295, want: 0},
+ test_uint32{fn: div_0_uint32, fnname: "div_0_uint32", in: 1, want: 0},
+ test_uint32{fn: div_0_uint32, fnname: "div_0_uint32", in: 4294967295, want: 0},
+ test_uint32{fn: div_uint32_1, fnname: "div_uint32_1", in: 0, want: 0},
+ test_uint32{fn: div_1_uint32, fnname: "div_1_uint32", in: 1, want: 1},
+ test_uint32{fn: div_uint32_1, fnname: "div_uint32_1", in: 1, want: 1},
+ test_uint32{fn: div_1_uint32, fnname: "div_1_uint32", in: 4294967295, want: 0},
+ test_uint32{fn: div_uint32_1, fnname: "div_uint32_1", in: 4294967295, want: 4294967295},
+ test_uint32{fn: div_uint32_4294967295, fnname: "div_uint32_4294967295", in: 0, want: 0},
+ test_uint32{fn: div_4294967295_uint32, fnname: "div_4294967295_uint32", in: 1, want: 4294967295},
+ test_uint32{fn: div_uint32_4294967295, fnname: "div_uint32_4294967295", in: 1, want: 0},
+ test_uint32{fn: div_4294967295_uint32, fnname: "div_4294967295_uint32", in: 4294967295, want: 1},
+ test_uint32{fn: div_uint32_4294967295, fnname: "div_uint32_4294967295", in: 4294967295, want: 1},
+ test_uint32{fn: mul_0_uint32, fnname: "mul_0_uint32", in: 0, want: 0},
+ test_uint32{fn: mul_uint32_0, fnname: "mul_uint32_0", in: 0, want: 0},
+ test_uint32{fn: mul_0_uint32, fnname: "mul_0_uint32", in: 1, want: 0},
+ test_uint32{fn: mul_uint32_0, fnname: "mul_uint32_0", in: 1, want: 0},
+ test_uint32{fn: mul_0_uint32, fnname: "mul_0_uint32", in: 4294967295, want: 0},
+ test_uint32{fn: mul_uint32_0, fnname: "mul_uint32_0", in: 4294967295, want: 0},
+ test_uint32{fn: mul_1_uint32, fnname: "mul_1_uint32", in: 0, want: 0},
+ test_uint32{fn: mul_uint32_1, fnname: "mul_uint32_1", in: 0, want: 0},
+ test_uint32{fn: mul_1_uint32, fnname: "mul_1_uint32", in: 1, want: 1},
+ test_uint32{fn: mul_uint32_1, fnname: "mul_uint32_1", in: 1, want: 1},
+ test_uint32{fn: mul_1_uint32, fnname: "mul_1_uint32", in: 4294967295, want: 4294967295},
+ test_uint32{fn: mul_uint32_1, fnname: "mul_uint32_1", in: 4294967295, want: 4294967295},
+ test_uint32{fn: mul_4294967295_uint32, fnname: "mul_4294967295_uint32", in: 0, want: 0},
+ test_uint32{fn: mul_uint32_4294967295, fnname: "mul_uint32_4294967295", in: 0, want: 0},
+ test_uint32{fn: mul_4294967295_uint32, fnname: "mul_4294967295_uint32", in: 1, want: 4294967295},
+ test_uint32{fn: mul_uint32_4294967295, fnname: "mul_uint32_4294967295", in: 1, want: 4294967295},
+ test_uint32{fn: mul_4294967295_uint32, fnname: "mul_4294967295_uint32", in: 4294967295, want: 1},
+ test_uint32{fn: mul_uint32_4294967295, fnname: "mul_uint32_4294967295", in: 4294967295, want: 1},
+ test_uint32{fn: lsh_0_uint32, fnname: "lsh_0_uint32", in: 0, want: 0},
+ test_uint32{fn: lsh_uint32_0, fnname: "lsh_uint32_0", in: 0, want: 0},
+ test_uint32{fn: lsh_0_uint32, fnname: "lsh_0_uint32", in: 1, want: 0},
+ test_uint32{fn: lsh_uint32_0, fnname: "lsh_uint32_0", in: 1, want: 1},
+ test_uint32{fn: lsh_0_uint32, fnname: "lsh_0_uint32", in: 4294967295, want: 0},
+ test_uint32{fn: lsh_uint32_0, fnname: "lsh_uint32_0", in: 4294967295, want: 4294967295},
+ test_uint32{fn: lsh_1_uint32, fnname: "lsh_1_uint32", in: 0, want: 1},
+ test_uint32{fn: lsh_uint32_1, fnname: "lsh_uint32_1", in: 0, want: 0},
+ test_uint32{fn: lsh_1_uint32, fnname: "lsh_1_uint32", in: 1, want: 2},
+ test_uint32{fn: lsh_uint32_1, fnname: "lsh_uint32_1", in: 1, want: 2},
+ test_uint32{fn: lsh_1_uint32, fnname: "lsh_1_uint32", in: 4294967295, want: 0},
+ test_uint32{fn: lsh_uint32_1, fnname: "lsh_uint32_1", in: 4294967295, want: 4294967294},
+ test_uint32{fn: lsh_4294967295_uint32, fnname: "lsh_4294967295_uint32", in: 0, want: 4294967295},
+ test_uint32{fn: lsh_uint32_4294967295, fnname: "lsh_uint32_4294967295", in: 0, want: 0},
+ test_uint32{fn: lsh_4294967295_uint32, fnname: "lsh_4294967295_uint32", in: 1, want: 4294967294},
+ test_uint32{fn: lsh_uint32_4294967295, fnname: "lsh_uint32_4294967295", in: 1, want: 0},
+ test_uint32{fn: lsh_4294967295_uint32, fnname: "lsh_4294967295_uint32", in: 4294967295, want: 0},
+ test_uint32{fn: lsh_uint32_4294967295, fnname: "lsh_uint32_4294967295", in: 4294967295, want: 0},
+ test_uint32{fn: rsh_0_uint32, fnname: "rsh_0_uint32", in: 0, want: 0},
+ test_uint32{fn: rsh_uint32_0, fnname: "rsh_uint32_0", in: 0, want: 0},
+ test_uint32{fn: rsh_0_uint32, fnname: "rsh_0_uint32", in: 1, want: 0},
+ test_uint32{fn: rsh_uint32_0, fnname: "rsh_uint32_0", in: 1, want: 1},
+ test_uint32{fn: rsh_0_uint32, fnname: "rsh_0_uint32", in: 4294967295, want: 0},
+ test_uint32{fn: rsh_uint32_0, fnname: "rsh_uint32_0", in: 4294967295, want: 4294967295},
+ test_uint32{fn: rsh_1_uint32, fnname: "rsh_1_uint32", in: 0, want: 1},
+ test_uint32{fn: rsh_uint32_1, fnname: "rsh_uint32_1", in: 0, want: 0},
+ test_uint32{fn: rsh_1_uint32, fnname: "rsh_1_uint32", in: 1, want: 0},
+ test_uint32{fn: rsh_uint32_1, fnname: "rsh_uint32_1", in: 1, want: 0},
+ test_uint32{fn: rsh_1_uint32, fnname: "rsh_1_uint32", in: 4294967295, want: 0},
+ test_uint32{fn: rsh_uint32_1, fnname: "rsh_uint32_1", in: 4294967295, want: 2147483647},
+ test_uint32{fn: rsh_4294967295_uint32, fnname: "rsh_4294967295_uint32", in: 0, want: 4294967295},
+ test_uint32{fn: rsh_uint32_4294967295, fnname: "rsh_uint32_4294967295", in: 0, want: 0},
+ test_uint32{fn: rsh_4294967295_uint32, fnname: "rsh_4294967295_uint32", in: 1, want: 2147483647},
+ test_uint32{fn: rsh_uint32_4294967295, fnname: "rsh_uint32_4294967295", in: 1, want: 0},
+ test_uint32{fn: rsh_4294967295_uint32, fnname: "rsh_4294967295_uint32", in: 4294967295, want: 0},
+ test_uint32{fn: rsh_uint32_4294967295, fnname: "rsh_uint32_4294967295", in: 4294967295, want: 0},
+ test_uint32{fn: mod_0_uint32, fnname: "mod_0_uint32", in: 1, want: 0},
+ test_uint32{fn: mod_0_uint32, fnname: "mod_0_uint32", in: 4294967295, want: 0},
+ test_uint32{fn: mod_uint32_1, fnname: "mod_uint32_1", in: 0, want: 0},
+ test_uint32{fn: mod_1_uint32, fnname: "mod_1_uint32", in: 1, want: 0},
+ test_uint32{fn: mod_uint32_1, fnname: "mod_uint32_1", in: 1, want: 0},
+ test_uint32{fn: mod_1_uint32, fnname: "mod_1_uint32", in: 4294967295, want: 1},
+ test_uint32{fn: mod_uint32_1, fnname: "mod_uint32_1", in: 4294967295, want: 0},
+ test_uint32{fn: mod_uint32_4294967295, fnname: "mod_uint32_4294967295", in: 0, want: 0},
+ test_uint32{fn: mod_4294967295_uint32, fnname: "mod_4294967295_uint32", in: 1, want: 0},
+ test_uint32{fn: mod_uint32_4294967295, fnname: "mod_uint32_4294967295", in: 1, want: 1},
+ test_uint32{fn: mod_4294967295_uint32, fnname: "mod_4294967295_uint32", in: 4294967295, want: 0},
+ test_uint32{fn: mod_uint32_4294967295, fnname: "mod_uint32_4294967295", in: 4294967295, want: 0},
+ test_uint32{fn: and_0_uint32, fnname: "and_0_uint32", in: 0, want: 0},
+ test_uint32{fn: and_uint32_0, fnname: "and_uint32_0", in: 0, want: 0},
+ test_uint32{fn: and_0_uint32, fnname: "and_0_uint32", in: 1, want: 0},
+ test_uint32{fn: and_uint32_0, fnname: "and_uint32_0", in: 1, want: 0},
+ test_uint32{fn: and_0_uint32, fnname: "and_0_uint32", in: 4294967295, want: 0},
+ test_uint32{fn: and_uint32_0, fnname: "and_uint32_0", in: 4294967295, want: 0},
+ test_uint32{fn: and_1_uint32, fnname: "and_1_uint32", in: 0, want: 0},
+ test_uint32{fn: and_uint32_1, fnname: "and_uint32_1", in: 0, want: 0},
+ test_uint32{fn: and_1_uint32, fnname: "and_1_uint32", in: 1, want: 1},
+ test_uint32{fn: and_uint32_1, fnname: "and_uint32_1", in: 1, want: 1},
+ test_uint32{fn: and_1_uint32, fnname: "and_1_uint32", in: 4294967295, want: 1},
+ test_uint32{fn: and_uint32_1, fnname: "and_uint32_1", in: 4294967295, want: 1},
+ test_uint32{fn: and_4294967295_uint32, fnname: "and_4294967295_uint32", in: 0, want: 0},
+ test_uint32{fn: and_uint32_4294967295, fnname: "and_uint32_4294967295", in: 0, want: 0},
+ test_uint32{fn: and_4294967295_uint32, fnname: "and_4294967295_uint32", in: 1, want: 1},
+ test_uint32{fn: and_uint32_4294967295, fnname: "and_uint32_4294967295", in: 1, want: 1},
+ test_uint32{fn: and_4294967295_uint32, fnname: "and_4294967295_uint32", in: 4294967295, want: 4294967295},
+ test_uint32{fn: and_uint32_4294967295, fnname: "and_uint32_4294967295", in: 4294967295, want: 4294967295},
+ test_uint32{fn: or_0_uint32, fnname: "or_0_uint32", in: 0, want: 0},
+ test_uint32{fn: or_uint32_0, fnname: "or_uint32_0", in: 0, want: 0},
+ test_uint32{fn: or_0_uint32, fnname: "or_0_uint32", in: 1, want: 1},
+ test_uint32{fn: or_uint32_0, fnname: "or_uint32_0", in: 1, want: 1},
+ test_uint32{fn: or_0_uint32, fnname: "or_0_uint32", in: 4294967295, want: 4294967295},
+ test_uint32{fn: or_uint32_0, fnname: "or_uint32_0", in: 4294967295, want: 4294967295},
+ test_uint32{fn: or_1_uint32, fnname: "or_1_uint32", in: 0, want: 1},
+ test_uint32{fn: or_uint32_1, fnname: "or_uint32_1", in: 0, want: 1},
+ test_uint32{fn: or_1_uint32, fnname: "or_1_uint32", in: 1, want: 1},
+ test_uint32{fn: or_uint32_1, fnname: "or_uint32_1", in: 1, want: 1},
+ test_uint32{fn: or_1_uint32, fnname: "or_1_uint32", in: 4294967295, want: 4294967295},
+ test_uint32{fn: or_uint32_1, fnname: "or_uint32_1", in: 4294967295, want: 4294967295},
+ test_uint32{fn: or_4294967295_uint32, fnname: "or_4294967295_uint32", in: 0, want: 4294967295},
+ test_uint32{fn: or_uint32_4294967295, fnname: "or_uint32_4294967295", in: 0, want: 4294967295},
+ test_uint32{fn: or_4294967295_uint32, fnname: "or_4294967295_uint32", in: 1, want: 4294967295},
+ test_uint32{fn: or_uint32_4294967295, fnname: "or_uint32_4294967295", in: 1, want: 4294967295},
+ test_uint32{fn: or_4294967295_uint32, fnname: "or_4294967295_uint32", in: 4294967295, want: 4294967295},
+ test_uint32{fn: or_uint32_4294967295, fnname: "or_uint32_4294967295", in: 4294967295, want: 4294967295},
+ test_uint32{fn: xor_0_uint32, fnname: "xor_0_uint32", in: 0, want: 0},
+ test_uint32{fn: xor_uint32_0, fnname: "xor_uint32_0", in: 0, want: 0},
+ test_uint32{fn: xor_0_uint32, fnname: "xor_0_uint32", in: 1, want: 1},
+ test_uint32{fn: xor_uint32_0, fnname: "xor_uint32_0", in: 1, want: 1},
+ test_uint32{fn: xor_0_uint32, fnname: "xor_0_uint32", in: 4294967295, want: 4294967295},
+ test_uint32{fn: xor_uint32_0, fnname: "xor_uint32_0", in: 4294967295, want: 4294967295},
+ test_uint32{fn: xor_1_uint32, fnname: "xor_1_uint32", in: 0, want: 1},
+ test_uint32{fn: xor_uint32_1, fnname: "xor_uint32_1", in: 0, want: 1},
+ test_uint32{fn: xor_1_uint32, fnname: "xor_1_uint32", in: 1, want: 0},
+ test_uint32{fn: xor_uint32_1, fnname: "xor_uint32_1", in: 1, want: 0},
+ test_uint32{fn: xor_1_uint32, fnname: "xor_1_uint32", in: 4294967295, want: 4294967294},
+ test_uint32{fn: xor_uint32_1, fnname: "xor_uint32_1", in: 4294967295, want: 4294967294},
+ test_uint32{fn: xor_4294967295_uint32, fnname: "xor_4294967295_uint32", in: 0, want: 4294967295},
+ test_uint32{fn: xor_uint32_4294967295, fnname: "xor_uint32_4294967295", in: 0, want: 4294967295},
+ test_uint32{fn: xor_4294967295_uint32, fnname: "xor_4294967295_uint32", in: 1, want: 4294967294},
+ test_uint32{fn: xor_uint32_4294967295, fnname: "xor_uint32_4294967295", in: 1, want: 4294967294},
+ test_uint32{fn: xor_4294967295_uint32, fnname: "xor_4294967295_uint32", in: 4294967295, want: 0},
+ test_uint32{fn: xor_uint32_4294967295, fnname: "xor_uint32_4294967295", in: 4294967295, want: 0}}
+
+type test_uint32mul struct {
+ fn func(uint32) uint32
+ fnname string
+ in uint32
+ want uint32
+}
+
+var tests_uint32mul = []test_uint32{
+
+ test_uint32{fn: mul_3_uint32, fnname: "mul_3_uint32", in: 3, want: 9},
+ test_uint32{fn: mul_uint32_3, fnname: "mul_uint32_3", in: 3, want: 9},
+ test_uint32{fn: mul_3_uint32, fnname: "mul_3_uint32", in: 5, want: 15},
+ test_uint32{fn: mul_uint32_3, fnname: "mul_uint32_3", in: 5, want: 15},
+ test_uint32{fn: mul_3_uint32, fnname: "mul_3_uint32", in: 7, want: 21},
+ test_uint32{fn: mul_uint32_3, fnname: "mul_uint32_3", in: 7, want: 21},
+ test_uint32{fn: mul_3_uint32, fnname: "mul_3_uint32", in: 9, want: 27},
+ test_uint32{fn: mul_uint32_3, fnname: "mul_uint32_3", in: 9, want: 27},
+ test_uint32{fn: mul_3_uint32, fnname: "mul_3_uint32", in: 10, want: 30},
+ test_uint32{fn: mul_uint32_3, fnname: "mul_uint32_3", in: 10, want: 30},
+ test_uint32{fn: mul_3_uint32, fnname: "mul_3_uint32", in: 11, want: 33},
+ test_uint32{fn: mul_uint32_3, fnname: "mul_uint32_3", in: 11, want: 33},
+ test_uint32{fn: mul_3_uint32, fnname: "mul_3_uint32", in: 13, want: 39},
+ test_uint32{fn: mul_uint32_3, fnname: "mul_uint32_3", in: 13, want: 39},
+ test_uint32{fn: mul_3_uint32, fnname: "mul_3_uint32", in: 19, want: 57},
+ test_uint32{fn: mul_uint32_3, fnname: "mul_uint32_3", in: 19, want: 57},
+ test_uint32{fn: mul_3_uint32, fnname: "mul_3_uint32", in: 21, want: 63},
+ test_uint32{fn: mul_uint32_3, fnname: "mul_uint32_3", in: 21, want: 63},
+ test_uint32{fn: mul_3_uint32, fnname: "mul_3_uint32", in: 25, want: 75},
+ test_uint32{fn: mul_uint32_3, fnname: "mul_uint32_3", in: 25, want: 75},
+ test_uint32{fn: mul_3_uint32, fnname: "mul_3_uint32", in: 27, want: 81},
+ test_uint32{fn: mul_uint32_3, fnname: "mul_uint32_3", in: 27, want: 81},
+ test_uint32{fn: mul_3_uint32, fnname: "mul_3_uint32", in: 37, want: 111},
+ test_uint32{fn: mul_uint32_3, fnname: "mul_uint32_3", in: 37, want: 111},
+ test_uint32{fn: mul_3_uint32, fnname: "mul_3_uint32", in: 41, want: 123},
+ test_uint32{fn: mul_uint32_3, fnname: "mul_uint32_3", in: 41, want: 123},
+ test_uint32{fn: mul_3_uint32, fnname: "mul_3_uint32", in: 45, want: 135},
+ test_uint32{fn: mul_uint32_3, fnname: "mul_uint32_3", in: 45, want: 135},
+ test_uint32{fn: mul_3_uint32, fnname: "mul_3_uint32", in: 73, want: 219},
+ test_uint32{fn: mul_uint32_3, fnname: "mul_uint32_3", in: 73, want: 219},
+ test_uint32{fn: mul_3_uint32, fnname: "mul_3_uint32", in: 81, want: 243},
+ test_uint32{fn: mul_uint32_3, fnname: "mul_uint32_3", in: 81, want: 243},
+ test_uint32{fn: mul_5_uint32, fnname: "mul_5_uint32", in: 3, want: 15},
+ test_uint32{fn: mul_uint32_5, fnname: "mul_uint32_5", in: 3, want: 15},
+ test_uint32{fn: mul_5_uint32, fnname: "mul_5_uint32", in: 5, want: 25},
+ test_uint32{fn: mul_uint32_5, fnname: "mul_uint32_5", in: 5, want: 25},
+ test_uint32{fn: mul_5_uint32, fnname: "mul_5_uint32", in: 7, want: 35},
+ test_uint32{fn: mul_uint32_5, fnname: "mul_uint32_5", in: 7, want: 35},
+ test_uint32{fn: mul_5_uint32, fnname: "mul_5_uint32", in: 9, want: 45},
+ test_uint32{fn: mul_uint32_5, fnname: "mul_uint32_5", in: 9, want: 45},
+ test_uint32{fn: mul_5_uint32, fnname: "mul_5_uint32", in: 10, want: 50},
+ test_uint32{fn: mul_uint32_5, fnname: "mul_uint32_5", in: 10, want: 50},
+ test_uint32{fn: mul_5_uint32, fnname: "mul_5_uint32", in: 11, want: 55},
+ test_uint32{fn: mul_uint32_5, fnname: "mul_uint32_5", in: 11, want: 55},
+ test_uint32{fn: mul_5_uint32, fnname: "mul_5_uint32", in: 13, want: 65},
+ test_uint32{fn: mul_uint32_5, fnname: "mul_uint32_5", in: 13, want: 65},
+ test_uint32{fn: mul_5_uint32, fnname: "mul_5_uint32", in: 19, want: 95},
+ test_uint32{fn: mul_uint32_5, fnname: "mul_uint32_5", in: 19, want: 95},
+ test_uint32{fn: mul_5_uint32, fnname: "mul_5_uint32", in: 21, want: 105},
+ test_uint32{fn: mul_uint32_5, fnname: "mul_uint32_5", in: 21, want: 105},
+ test_uint32{fn: mul_5_uint32, fnname: "mul_5_uint32", in: 25, want: 125},
+ test_uint32{fn: mul_uint32_5, fnname: "mul_uint32_5", in: 25, want: 125},
+ test_uint32{fn: mul_5_uint32, fnname: "mul_5_uint32", in: 27, want: 135},
+ test_uint32{fn: mul_uint32_5, fnname: "mul_uint32_5", in: 27, want: 135},
+ test_uint32{fn: mul_5_uint32, fnname: "mul_5_uint32", in: 37, want: 185},
+ test_uint32{fn: mul_uint32_5, fnname: "mul_uint32_5", in: 37, want: 185},
+ test_uint32{fn: mul_5_uint32, fnname: "mul_5_uint32", in: 41, want: 205},
+ test_uint32{fn: mul_uint32_5, fnname: "mul_uint32_5", in: 41, want: 205},
+ test_uint32{fn: mul_5_uint32, fnname: "mul_5_uint32", in: 45, want: 225},
+ test_uint32{fn: mul_uint32_5, fnname: "mul_uint32_5", in: 45, want: 225},
+ test_uint32{fn: mul_5_uint32, fnname: "mul_5_uint32", in: 73, want: 365},
+ test_uint32{fn: mul_uint32_5, fnname: "mul_uint32_5", in: 73, want: 365},
+ test_uint32{fn: mul_5_uint32, fnname: "mul_5_uint32", in: 81, want: 405},
+ test_uint32{fn: mul_uint32_5, fnname: "mul_uint32_5", in: 81, want: 405},
+ test_uint32{fn: mul_7_uint32, fnname: "mul_7_uint32", in: 3, want: 21},
+ test_uint32{fn: mul_uint32_7, fnname: "mul_uint32_7", in: 3, want: 21},
+ test_uint32{fn: mul_7_uint32, fnname: "mul_7_uint32", in: 5, want: 35},
+ test_uint32{fn: mul_uint32_7, fnname: "mul_uint32_7", in: 5, want: 35},
+ test_uint32{fn: mul_7_uint32, fnname: "mul_7_uint32", in: 7, want: 49},
+ test_uint32{fn: mul_uint32_7, fnname: "mul_uint32_7", in: 7, want: 49},
+ test_uint32{fn: mul_7_uint32, fnname: "mul_7_uint32", in: 9, want: 63},
+ test_uint32{fn: mul_uint32_7, fnname: "mul_uint32_7", in: 9, want: 63},
+ test_uint32{fn: mul_7_uint32, fnname: "mul_7_uint32", in: 10, want: 70},
+ test_uint32{fn: mul_uint32_7, fnname: "mul_uint32_7", in: 10, want: 70},
+ test_uint32{fn: mul_7_uint32, fnname: "mul_7_uint32", in: 11, want: 77},
+ test_uint32{fn: mul_uint32_7, fnname: "mul_uint32_7", in: 11, want: 77},
+ test_uint32{fn: mul_7_uint32, fnname: "mul_7_uint32", in: 13, want: 91},
+ test_uint32{fn: mul_uint32_7, fnname: "mul_uint32_7", in: 13, want: 91},
+ test_uint32{fn: mul_7_uint32, fnname: "mul_7_uint32", in: 19, want: 133},
+ test_uint32{fn: mul_uint32_7, fnname: "mul_uint32_7", in: 19, want: 133},
+ test_uint32{fn: mul_7_uint32, fnname: "mul_7_uint32", in: 21, want: 147},
+ test_uint32{fn: mul_uint32_7, fnname: "mul_uint32_7", in: 21, want: 147},
+ test_uint32{fn: mul_7_uint32, fnname: "mul_7_uint32", in: 25, want: 175},
+ test_uint32{fn: mul_uint32_7, fnname: "mul_uint32_7", in: 25, want: 175},
+ test_uint32{fn: mul_7_uint32, fnname: "mul_7_uint32", in: 27, want: 189},
+ test_uint32{fn: mul_uint32_7, fnname: "mul_uint32_7", in: 27, want: 189},
+ test_uint32{fn: mul_7_uint32, fnname: "mul_7_uint32", in: 37, want: 259},
+ test_uint32{fn: mul_uint32_7, fnname: "mul_uint32_7", in: 37, want: 259},
+ test_uint32{fn: mul_7_uint32, fnname: "mul_7_uint32", in: 41, want: 287},
+ test_uint32{fn: mul_uint32_7, fnname: "mul_uint32_7", in: 41, want: 287},
+ test_uint32{fn: mul_7_uint32, fnname: "mul_7_uint32", in: 45, want: 315},
+ test_uint32{fn: mul_uint32_7, fnname: "mul_uint32_7", in: 45, want: 315},
+ test_uint32{fn: mul_7_uint32, fnname: "mul_7_uint32", in: 73, want: 511},
+ test_uint32{fn: mul_uint32_7, fnname: "mul_uint32_7", in: 73, want: 511},
+ test_uint32{fn: mul_7_uint32, fnname: "mul_7_uint32", in: 81, want: 567},
+ test_uint32{fn: mul_uint32_7, fnname: "mul_uint32_7", in: 81, want: 567},
+ test_uint32{fn: mul_9_uint32, fnname: "mul_9_uint32", in: 3, want: 27},
+ test_uint32{fn: mul_uint32_9, fnname: "mul_uint32_9", in: 3, want: 27},
+ test_uint32{fn: mul_9_uint32, fnname: "mul_9_uint32", in: 5, want: 45},
+ test_uint32{fn: mul_uint32_9, fnname: "mul_uint32_9", in: 5, want: 45},
+ test_uint32{fn: mul_9_uint32, fnname: "mul_9_uint32", in: 7, want: 63},
+ test_uint32{fn: mul_uint32_9, fnname: "mul_uint32_9", in: 7, want: 63},
+ test_uint32{fn: mul_9_uint32, fnname: "mul_9_uint32", in: 9, want: 81},
+ test_uint32{fn: mul_uint32_9, fnname: "mul_uint32_9", in: 9, want: 81},
+ test_uint32{fn: mul_9_uint32, fnname: "mul_9_uint32", in: 10, want: 90},
+ test_uint32{fn: mul_uint32_9, fnname: "mul_uint32_9", in: 10, want: 90},
+ test_uint32{fn: mul_9_uint32, fnname: "mul_9_uint32", in: 11, want: 99},
+ test_uint32{fn: mul_uint32_9, fnname: "mul_uint32_9", in: 11, want: 99},
+ test_uint32{fn: mul_9_uint32, fnname: "mul_9_uint32", in: 13, want: 117},
+ test_uint32{fn: mul_uint32_9, fnname: "mul_uint32_9", in: 13, want: 117},
+ test_uint32{fn: mul_9_uint32, fnname: "mul_9_uint32", in: 19, want: 171},
+ test_uint32{fn: mul_uint32_9, fnname: "mul_uint32_9", in: 19, want: 171},
+ test_uint32{fn: mul_9_uint32, fnname: "mul_9_uint32", in: 21, want: 189},
+ test_uint32{fn: mul_uint32_9, fnname: "mul_uint32_9", in: 21, want: 189},
+ test_uint32{fn: mul_9_uint32, fnname: "mul_9_uint32", in: 25, want: 225},
+ test_uint32{fn: mul_uint32_9, fnname: "mul_uint32_9", in: 25, want: 225},
+ test_uint32{fn: mul_9_uint32, fnname: "mul_9_uint32", in: 27, want: 243},
+ test_uint32{fn: mul_uint32_9, fnname: "mul_uint32_9", in: 27, want: 243},
+ test_uint32{fn: mul_9_uint32, fnname: "mul_9_uint32", in: 37, want: 333},
+ test_uint32{fn: mul_uint32_9, fnname: "mul_uint32_9", in: 37, want: 333},
+ test_uint32{fn: mul_9_uint32, fnname: "mul_9_uint32", in: 41, want: 369},
+ test_uint32{fn: mul_uint32_9, fnname: "mul_uint32_9", in: 41, want: 369},
+ test_uint32{fn: mul_9_uint32, fnname: "mul_9_uint32", in: 45, want: 405},
+ test_uint32{fn: mul_uint32_9, fnname: "mul_uint32_9", in: 45, want: 405},
+ test_uint32{fn: mul_9_uint32, fnname: "mul_9_uint32", in: 73, want: 657},
+ test_uint32{fn: mul_uint32_9, fnname: "mul_uint32_9", in: 73, want: 657},
+ test_uint32{fn: mul_9_uint32, fnname: "mul_9_uint32", in: 81, want: 729},
+ test_uint32{fn: mul_uint32_9, fnname: "mul_uint32_9", in: 81, want: 729},
+ test_uint32{fn: mul_10_uint32, fnname: "mul_10_uint32", in: 3, want: 30},
+ test_uint32{fn: mul_uint32_10, fnname: "mul_uint32_10", in: 3, want: 30},
+ test_uint32{fn: mul_10_uint32, fnname: "mul_10_uint32", in: 5, want: 50},
+ test_uint32{fn: mul_uint32_10, fnname: "mul_uint32_10", in: 5, want: 50},
+ test_uint32{fn: mul_10_uint32, fnname: "mul_10_uint32", in: 7, want: 70},
+ test_uint32{fn: mul_uint32_10, fnname: "mul_uint32_10", in: 7, want: 70},
+ test_uint32{fn: mul_10_uint32, fnname: "mul_10_uint32", in: 9, want: 90},
+ test_uint32{fn: mul_uint32_10, fnname: "mul_uint32_10", in: 9, want: 90},
+ test_uint32{fn: mul_10_uint32, fnname: "mul_10_uint32", in: 10, want: 100},
+ test_uint32{fn: mul_uint32_10, fnname: "mul_uint32_10", in: 10, want: 100},
+ test_uint32{fn: mul_10_uint32, fnname: "mul_10_uint32", in: 11, want: 110},
+ test_uint32{fn: mul_uint32_10, fnname: "mul_uint32_10", in: 11, want: 110},
+ test_uint32{fn: mul_10_uint32, fnname: "mul_10_uint32", in: 13, want: 130},
+ test_uint32{fn: mul_uint32_10, fnname: "mul_uint32_10", in: 13, want: 130},
+ test_uint32{fn: mul_10_uint32, fnname: "mul_10_uint32", in: 19, want: 190},
+ test_uint32{fn: mul_uint32_10, fnname: "mul_uint32_10", in: 19, want: 190},
+ test_uint32{fn: mul_10_uint32, fnname: "mul_10_uint32", in: 21, want: 210},
+ test_uint32{fn: mul_uint32_10, fnname: "mul_uint32_10", in: 21, want: 210},
+ test_uint32{fn: mul_10_uint32, fnname: "mul_10_uint32", in: 25, want: 250},
+ test_uint32{fn: mul_uint32_10, fnname: "mul_uint32_10", in: 25, want: 250},
+ test_uint32{fn: mul_10_uint32, fnname: "mul_10_uint32", in: 27, want: 270},
+ test_uint32{fn: mul_uint32_10, fnname: "mul_uint32_10", in: 27, want: 270},
+ test_uint32{fn: mul_10_uint32, fnname: "mul_10_uint32", in: 37, want: 370},
+ test_uint32{fn: mul_uint32_10, fnname: "mul_uint32_10", in: 37, want: 370},
+ test_uint32{fn: mul_10_uint32, fnname: "mul_10_uint32", in: 41, want: 410},
+ test_uint32{fn: mul_uint32_10, fnname: "mul_uint32_10", in: 41, want: 410},
+ test_uint32{fn: mul_10_uint32, fnname: "mul_10_uint32", in: 45, want: 450},
+ test_uint32{fn: mul_uint32_10, fnname: "mul_uint32_10", in: 45, want: 450},
+ test_uint32{fn: mul_10_uint32, fnname: "mul_10_uint32", in: 73, want: 730},
+ test_uint32{fn: mul_uint32_10, fnname: "mul_uint32_10", in: 73, want: 730},
+ test_uint32{fn: mul_10_uint32, fnname: "mul_10_uint32", in: 81, want: 810},
+ test_uint32{fn: mul_uint32_10, fnname: "mul_uint32_10", in: 81, want: 810},
+ test_uint32{fn: mul_11_uint32, fnname: "mul_11_uint32", in: 3, want: 33},
+ test_uint32{fn: mul_uint32_11, fnname: "mul_uint32_11", in: 3, want: 33},
+ test_uint32{fn: mul_11_uint32, fnname: "mul_11_uint32", in: 5, want: 55},
+ test_uint32{fn: mul_uint32_11, fnname: "mul_uint32_11", in: 5, want: 55},
+ test_uint32{fn: mul_11_uint32, fnname: "mul_11_uint32", in: 7, want: 77},
+ test_uint32{fn: mul_uint32_11, fnname: "mul_uint32_11", in: 7, want: 77},
+ test_uint32{fn: mul_11_uint32, fnname: "mul_11_uint32", in: 9, want: 99},
+ test_uint32{fn: mul_uint32_11, fnname: "mul_uint32_11", in: 9, want: 99},
+ test_uint32{fn: mul_11_uint32, fnname: "mul_11_uint32", in: 10, want: 110},
+ test_uint32{fn: mul_uint32_11, fnname: "mul_uint32_11", in: 10, want: 110},
+ test_uint32{fn: mul_11_uint32, fnname: "mul_11_uint32", in: 11, want: 121},
+ test_uint32{fn: mul_uint32_11, fnname: "mul_uint32_11", in: 11, want: 121},
+ test_uint32{fn: mul_11_uint32, fnname: "mul_11_uint32", in: 13, want: 143},
+ test_uint32{fn: mul_uint32_11, fnname: "mul_uint32_11", in: 13, want: 143},
+ test_uint32{fn: mul_11_uint32, fnname: "mul_11_uint32", in: 19, want: 209},
+ test_uint32{fn: mul_uint32_11, fnname: "mul_uint32_11", in: 19, want: 209},
+ test_uint32{fn: mul_11_uint32, fnname: "mul_11_uint32", in: 21, want: 231},
+ test_uint32{fn: mul_uint32_11, fnname: "mul_uint32_11", in: 21, want: 231},
+ test_uint32{fn: mul_11_uint32, fnname: "mul_11_uint32", in: 25, want: 275},
+ test_uint32{fn: mul_uint32_11, fnname: "mul_uint32_11", in: 25, want: 275},
+ test_uint32{fn: mul_11_uint32, fnname: "mul_11_uint32", in: 27, want: 297},
+ test_uint32{fn: mul_uint32_11, fnname: "mul_uint32_11", in: 27, want: 297},
+ test_uint32{fn: mul_11_uint32, fnname: "mul_11_uint32", in: 37, want: 407},
+ test_uint32{fn: mul_uint32_11, fnname: "mul_uint32_11", in: 37, want: 407},
+ test_uint32{fn: mul_11_uint32, fnname: "mul_11_uint32", in: 41, want: 451},
+ test_uint32{fn: mul_uint32_11, fnname: "mul_uint32_11", in: 41, want: 451},
+ test_uint32{fn: mul_11_uint32, fnname: "mul_11_uint32", in: 45, want: 495},
+ test_uint32{fn: mul_uint32_11, fnname: "mul_uint32_11", in: 45, want: 495},
+ test_uint32{fn: mul_11_uint32, fnname: "mul_11_uint32", in: 73, want: 803},
+ test_uint32{fn: mul_uint32_11, fnname: "mul_uint32_11", in: 73, want: 803},
+ test_uint32{fn: mul_11_uint32, fnname: "mul_11_uint32", in: 81, want: 891},
+ test_uint32{fn: mul_uint32_11, fnname: "mul_uint32_11", in: 81, want: 891},
+ test_uint32{fn: mul_13_uint32, fnname: "mul_13_uint32", in: 3, want: 39},
+ test_uint32{fn: mul_uint32_13, fnname: "mul_uint32_13", in: 3, want: 39},
+ test_uint32{fn: mul_13_uint32, fnname: "mul_13_uint32", in: 5, want: 65},
+ test_uint32{fn: mul_uint32_13, fnname: "mul_uint32_13", in: 5, want: 65},
+ test_uint32{fn: mul_13_uint32, fnname: "mul_13_uint32", in: 7, want: 91},
+ test_uint32{fn: mul_uint32_13, fnname: "mul_uint32_13", in: 7, want: 91},
+ test_uint32{fn: mul_13_uint32, fnname: "mul_13_uint32", in: 9, want: 117},
+ test_uint32{fn: mul_uint32_13, fnname: "mul_uint32_13", in: 9, want: 117},
+ test_uint32{fn: mul_13_uint32, fnname: "mul_13_uint32", in: 10, want: 130},
+ test_uint32{fn: mul_uint32_13, fnname: "mul_uint32_13", in: 10, want: 130},
+ test_uint32{fn: mul_13_uint32, fnname: "mul_13_uint32", in: 11, want: 143},
+ test_uint32{fn: mul_uint32_13, fnname: "mul_uint32_13", in: 11, want: 143},
+ test_uint32{fn: mul_13_uint32, fnname: "mul_13_uint32", in: 13, want: 169},
+ test_uint32{fn: mul_uint32_13, fnname: "mul_uint32_13", in: 13, want: 169},
+ test_uint32{fn: mul_13_uint32, fnname: "mul_13_uint32", in: 19, want: 247},
+ test_uint32{fn: mul_uint32_13, fnname: "mul_uint32_13", in: 19, want: 247},
+ test_uint32{fn: mul_13_uint32, fnname: "mul_13_uint32", in: 21, want: 273},
+ test_uint32{fn: mul_uint32_13, fnname: "mul_uint32_13", in: 21, want: 273},
+ test_uint32{fn: mul_13_uint32, fnname: "mul_13_uint32", in: 25, want: 325},
+ test_uint32{fn: mul_uint32_13, fnname: "mul_uint32_13", in: 25, want: 325},
+ test_uint32{fn: mul_13_uint32, fnname: "mul_13_uint32", in: 27, want: 351},
+ test_uint32{fn: mul_uint32_13, fnname: "mul_uint32_13", in: 27, want: 351},
+ test_uint32{fn: mul_13_uint32, fnname: "mul_13_uint32", in: 37, want: 481},
+ test_uint32{fn: mul_uint32_13, fnname: "mul_uint32_13", in: 37, want: 481},
+ test_uint32{fn: mul_13_uint32, fnname: "mul_13_uint32", in: 41, want: 533},
+ test_uint32{fn: mul_uint32_13, fnname: "mul_uint32_13", in: 41, want: 533},
+ test_uint32{fn: mul_13_uint32, fnname: "mul_13_uint32", in: 45, want: 585},
+ test_uint32{fn: mul_uint32_13, fnname: "mul_uint32_13", in: 45, want: 585},
+ test_uint32{fn: mul_13_uint32, fnname: "mul_13_uint32", in: 73, want: 949},
+ test_uint32{fn: mul_uint32_13, fnname: "mul_uint32_13", in: 73, want: 949},
+ test_uint32{fn: mul_13_uint32, fnname: "mul_13_uint32", in: 81, want: 1053},
+ test_uint32{fn: mul_uint32_13, fnname: "mul_uint32_13", in: 81, want: 1053},
+ test_uint32{fn: mul_19_uint32, fnname: "mul_19_uint32", in: 3, want: 57},
+ test_uint32{fn: mul_uint32_19, fnname: "mul_uint32_19", in: 3, want: 57},
+ test_uint32{fn: mul_19_uint32, fnname: "mul_19_uint32", in: 5, want: 95},
+ test_uint32{fn: mul_uint32_19, fnname: "mul_uint32_19", in: 5, want: 95},
+ test_uint32{fn: mul_19_uint32, fnname: "mul_19_uint32", in: 7, want: 133},
+ test_uint32{fn: mul_uint32_19, fnname: "mul_uint32_19", in: 7, want: 133},
+ test_uint32{fn: mul_19_uint32, fnname: "mul_19_uint32", in: 9, want: 171},
+ test_uint32{fn: mul_uint32_19, fnname: "mul_uint32_19", in: 9, want: 171},
+ test_uint32{fn: mul_19_uint32, fnname: "mul_19_uint32", in: 10, want: 190},
+ test_uint32{fn: mul_uint32_19, fnname: "mul_uint32_19", in: 10, want: 190},
+ test_uint32{fn: mul_19_uint32, fnname: "mul_19_uint32", in: 11, want: 209},
+ test_uint32{fn: mul_uint32_19, fnname: "mul_uint32_19", in: 11, want: 209},
+ test_uint32{fn: mul_19_uint32, fnname: "mul_19_uint32", in: 13, want: 247},
+ test_uint32{fn: mul_uint32_19, fnname: "mul_uint32_19", in: 13, want: 247},
+ test_uint32{fn: mul_19_uint32, fnname: "mul_19_uint32", in: 19, want: 361},
+ test_uint32{fn: mul_uint32_19, fnname: "mul_uint32_19", in: 19, want: 361},
+ test_uint32{fn: mul_19_uint32, fnname: "mul_19_uint32", in: 21, want: 399},
+ test_uint32{fn: mul_uint32_19, fnname: "mul_uint32_19", in: 21, want: 399},
+ test_uint32{fn: mul_19_uint32, fnname: "mul_19_uint32", in: 25, want: 475},
+ test_uint32{fn: mul_uint32_19, fnname: "mul_uint32_19", in: 25, want: 475},
+ test_uint32{fn: mul_19_uint32, fnname: "mul_19_uint32", in: 27, want: 513},
+ test_uint32{fn: mul_uint32_19, fnname: "mul_uint32_19", in: 27, want: 513},
+ test_uint32{fn: mul_19_uint32, fnname: "mul_19_uint32", in: 37, want: 703},
+ test_uint32{fn: mul_uint32_19, fnname: "mul_uint32_19", in: 37, want: 703},
+ test_uint32{fn: mul_19_uint32, fnname: "mul_19_uint32", in: 41, want: 779},
+ test_uint32{fn: mul_uint32_19, fnname: "mul_uint32_19", in: 41, want: 779},
+ test_uint32{fn: mul_19_uint32, fnname: "mul_19_uint32", in: 45, want: 855},
+ test_uint32{fn: mul_uint32_19, fnname: "mul_uint32_19", in: 45, want: 855},
+ test_uint32{fn: mul_19_uint32, fnname: "mul_19_uint32", in: 73, want: 1387},
+ test_uint32{fn: mul_uint32_19, fnname: "mul_uint32_19", in: 73, want: 1387},
+ test_uint32{fn: mul_19_uint32, fnname: "mul_19_uint32", in: 81, want: 1539},
+ test_uint32{fn: mul_uint32_19, fnname: "mul_uint32_19", in: 81, want: 1539},
+ test_uint32{fn: mul_21_uint32, fnname: "mul_21_uint32", in: 3, want: 63},
+ test_uint32{fn: mul_uint32_21, fnname: "mul_uint32_21", in: 3, want: 63},
+ test_uint32{fn: mul_21_uint32, fnname: "mul_21_uint32", in: 5, want: 105},
+ test_uint32{fn: mul_uint32_21, fnname: "mul_uint32_21", in: 5, want: 105},
+ test_uint32{fn: mul_21_uint32, fnname: "mul_21_uint32", in: 7, want: 147},
+ test_uint32{fn: mul_uint32_21, fnname: "mul_uint32_21", in: 7, want: 147},
+ test_uint32{fn: mul_21_uint32, fnname: "mul_21_uint32", in: 9, want: 189},
+ test_uint32{fn: mul_uint32_21, fnname: "mul_uint32_21", in: 9, want: 189},
+ test_uint32{fn: mul_21_uint32, fnname: "mul_21_uint32", in: 10, want: 210},
+ test_uint32{fn: mul_uint32_21, fnname: "mul_uint32_21", in: 10, want: 210},
+ test_uint32{fn: mul_21_uint32, fnname: "mul_21_uint32", in: 11, want: 231},
+ test_uint32{fn: mul_uint32_21, fnname: "mul_uint32_21", in: 11, want: 231},
+ test_uint32{fn: mul_21_uint32, fnname: "mul_21_uint32", in: 13, want: 273},
+ test_uint32{fn: mul_uint32_21, fnname: "mul_uint32_21", in: 13, want: 273},
+ test_uint32{fn: mul_21_uint32, fnname: "mul_21_uint32", in: 19, want: 399},
+ test_uint32{fn: mul_uint32_21, fnname: "mul_uint32_21", in: 19, want: 399},
+ test_uint32{fn: mul_21_uint32, fnname: "mul_21_uint32", in: 21, want: 441},
+ test_uint32{fn: mul_uint32_21, fnname: "mul_uint32_21", in: 21, want: 441},
+ test_uint32{fn: mul_21_uint32, fnname: "mul_21_uint32", in: 25, want: 525},
+ test_uint32{fn: mul_uint32_21, fnname: "mul_uint32_21", in: 25, want: 525},
+ test_uint32{fn: mul_21_uint32, fnname: "mul_21_uint32", in: 27, want: 567},
+ test_uint32{fn: mul_uint32_21, fnname: "mul_uint32_21", in: 27, want: 567},
+ test_uint32{fn: mul_21_uint32, fnname: "mul_21_uint32", in: 37, want: 777},
+ test_uint32{fn: mul_uint32_21, fnname: "mul_uint32_21", in: 37, want: 777},
+ test_uint32{fn: mul_21_uint32, fnname: "mul_21_uint32", in: 41, want: 861},
+ test_uint32{fn: mul_uint32_21, fnname: "mul_uint32_21", in: 41, want: 861},
+ test_uint32{fn: mul_21_uint32, fnname: "mul_21_uint32", in: 45, want: 945},
+ test_uint32{fn: mul_uint32_21, fnname: "mul_uint32_21", in: 45, want: 945},
+ test_uint32{fn: mul_21_uint32, fnname: "mul_21_uint32", in: 73, want: 1533},
+ test_uint32{fn: mul_uint32_21, fnname: "mul_uint32_21", in: 73, want: 1533},
+ test_uint32{fn: mul_21_uint32, fnname: "mul_21_uint32", in: 81, want: 1701},
+ test_uint32{fn: mul_uint32_21, fnname: "mul_uint32_21", in: 81, want: 1701},
+ test_uint32{fn: mul_25_uint32, fnname: "mul_25_uint32", in: 3, want: 75},
+ test_uint32{fn: mul_uint32_25, fnname: "mul_uint32_25", in: 3, want: 75},
+ test_uint32{fn: mul_25_uint32, fnname: "mul_25_uint32", in: 5, want: 125},
+ test_uint32{fn: mul_uint32_25, fnname: "mul_uint32_25", in: 5, want: 125},
+ test_uint32{fn: mul_25_uint32, fnname: "mul_25_uint32", in: 7, want: 175},
+ test_uint32{fn: mul_uint32_25, fnname: "mul_uint32_25", in: 7, want: 175},
+ test_uint32{fn: mul_25_uint32, fnname: "mul_25_uint32", in: 9, want: 225},
+ test_uint32{fn: mul_uint32_25, fnname: "mul_uint32_25", in: 9, want: 225},
+ test_uint32{fn: mul_25_uint32, fnname: "mul_25_uint32", in: 10, want: 250},
+ test_uint32{fn: mul_uint32_25, fnname: "mul_uint32_25", in: 10, want: 250},
+ test_uint32{fn: mul_25_uint32, fnname: "mul_25_uint32", in: 11, want: 275},
+ test_uint32{fn: mul_uint32_25, fnname: "mul_uint32_25", in: 11, want: 275},
+ test_uint32{fn: mul_25_uint32, fnname: "mul_25_uint32", in: 13, want: 325},
+ test_uint32{fn: mul_uint32_25, fnname: "mul_uint32_25", in: 13, want: 325},
+ test_uint32{fn: mul_25_uint32, fnname: "mul_25_uint32", in: 19, want: 475},
+ test_uint32{fn: mul_uint32_25, fnname: "mul_uint32_25", in: 19, want: 475},
+ test_uint32{fn: mul_25_uint32, fnname: "mul_25_uint32", in: 21, want: 525},
+ test_uint32{fn: mul_uint32_25, fnname: "mul_uint32_25", in: 21, want: 525},
+ test_uint32{fn: mul_25_uint32, fnname: "mul_25_uint32", in: 25, want: 625},
+ test_uint32{fn: mul_uint32_25, fnname: "mul_uint32_25", in: 25, want: 625},
+ test_uint32{fn: mul_25_uint32, fnname: "mul_25_uint32", in: 27, want: 675},
+ test_uint32{fn: mul_uint32_25, fnname: "mul_uint32_25", in: 27, want: 675},
+ test_uint32{fn: mul_25_uint32, fnname: "mul_25_uint32", in: 37, want: 925},
+ test_uint32{fn: mul_uint32_25, fnname: "mul_uint32_25", in: 37, want: 925},
+ test_uint32{fn: mul_25_uint32, fnname: "mul_25_uint32", in: 41, want: 1025},
+ test_uint32{fn: mul_uint32_25, fnname: "mul_uint32_25", in: 41, want: 1025},
+ test_uint32{fn: mul_25_uint32, fnname: "mul_25_uint32", in: 45, want: 1125},
+ test_uint32{fn: mul_uint32_25, fnname: "mul_uint32_25", in: 45, want: 1125},
+ test_uint32{fn: mul_25_uint32, fnname: "mul_25_uint32", in: 73, want: 1825},
+ test_uint32{fn: mul_uint32_25, fnname: "mul_uint32_25", in: 73, want: 1825},
+ test_uint32{fn: mul_25_uint32, fnname: "mul_25_uint32", in: 81, want: 2025},
+ test_uint32{fn: mul_uint32_25, fnname: "mul_uint32_25", in: 81, want: 2025},
+ test_uint32{fn: mul_27_uint32, fnname: "mul_27_uint32", in: 3, want: 81},
+ test_uint32{fn: mul_uint32_27, fnname: "mul_uint32_27", in: 3, want: 81},
+ test_uint32{fn: mul_27_uint32, fnname: "mul_27_uint32", in: 5, want: 135},
+ test_uint32{fn: mul_uint32_27, fnname: "mul_uint32_27", in: 5, want: 135},
+ test_uint32{fn: mul_27_uint32, fnname: "mul_27_uint32", in: 7, want: 189},
+ test_uint32{fn: mul_uint32_27, fnname: "mul_uint32_27", in: 7, want: 189},
+ test_uint32{fn: mul_27_uint32, fnname: "mul_27_uint32", in: 9, want: 243},
+ test_uint32{fn: mul_uint32_27, fnname: "mul_uint32_27", in: 9, want: 243},
+ test_uint32{fn: mul_27_uint32, fnname: "mul_27_uint32", in: 10, want: 270},
+ test_uint32{fn: mul_uint32_27, fnname: "mul_uint32_27", in: 10, want: 270},
+ test_uint32{fn: mul_27_uint32, fnname: "mul_27_uint32", in: 11, want: 297},
+ test_uint32{fn: mul_uint32_27, fnname: "mul_uint32_27", in: 11, want: 297},
+ test_uint32{fn: mul_27_uint32, fnname: "mul_27_uint32", in: 13, want: 351},
+ test_uint32{fn: mul_uint32_27, fnname: "mul_uint32_27", in: 13, want: 351},
+ test_uint32{fn: mul_27_uint32, fnname: "mul_27_uint32", in: 19, want: 513},
+ test_uint32{fn: mul_uint32_27, fnname: "mul_uint32_27", in: 19, want: 513},
+ test_uint32{fn: mul_27_uint32, fnname: "mul_27_uint32", in: 21, want: 567},
+ test_uint32{fn: mul_uint32_27, fnname: "mul_uint32_27", in: 21, want: 567},
+ test_uint32{fn: mul_27_uint32, fnname: "mul_27_uint32", in: 25, want: 675},
+ test_uint32{fn: mul_uint32_27, fnname: "mul_uint32_27", in: 25, want: 675},
+ test_uint32{fn: mul_27_uint32, fnname: "mul_27_uint32", in: 27, want: 729},
+ test_uint32{fn: mul_uint32_27, fnname: "mul_uint32_27", in: 27, want: 729},
+ test_uint32{fn: mul_27_uint32, fnname: "mul_27_uint32", in: 37, want: 999},
+ test_uint32{fn: mul_uint32_27, fnname: "mul_uint32_27", in: 37, want: 999},
+ test_uint32{fn: mul_27_uint32, fnname: "mul_27_uint32", in: 41, want: 1107},
+ test_uint32{fn: mul_uint32_27, fnname: "mul_uint32_27", in: 41, want: 1107},
+ test_uint32{fn: mul_27_uint32, fnname: "mul_27_uint32", in: 45, want: 1215},
+ test_uint32{fn: mul_uint32_27, fnname: "mul_uint32_27", in: 45, want: 1215},
+ test_uint32{fn: mul_27_uint32, fnname: "mul_27_uint32", in: 73, want: 1971},
+ test_uint32{fn: mul_uint32_27, fnname: "mul_uint32_27", in: 73, want: 1971},
+ test_uint32{fn: mul_27_uint32, fnname: "mul_27_uint32", in: 81, want: 2187},
+ test_uint32{fn: mul_uint32_27, fnname: "mul_uint32_27", in: 81, want: 2187},
+ test_uint32{fn: mul_37_uint32, fnname: "mul_37_uint32", in: 3, want: 111},
+ test_uint32{fn: mul_uint32_37, fnname: "mul_uint32_37", in: 3, want: 111},
+ test_uint32{fn: mul_37_uint32, fnname: "mul_37_uint32", in: 5, want: 185},
+ test_uint32{fn: mul_uint32_37, fnname: "mul_uint32_37", in: 5, want: 185},
+ test_uint32{fn: mul_37_uint32, fnname: "mul_37_uint32", in: 7, want: 259},
+ test_uint32{fn: mul_uint32_37, fnname: "mul_uint32_37", in: 7, want: 259},
+ test_uint32{fn: mul_37_uint32, fnname: "mul_37_uint32", in: 9, want: 333},
+ test_uint32{fn: mul_uint32_37, fnname: "mul_uint32_37", in: 9, want: 333},
+ test_uint32{fn: mul_37_uint32, fnname: "mul_37_uint32", in: 10, want: 370},
+ test_uint32{fn: mul_uint32_37, fnname: "mul_uint32_37", in: 10, want: 370},
+ test_uint32{fn: mul_37_uint32, fnname: "mul_37_uint32", in: 11, want: 407},
+ test_uint32{fn: mul_uint32_37, fnname: "mul_uint32_37", in: 11, want: 407},
+ test_uint32{fn: mul_37_uint32, fnname: "mul_37_uint32", in: 13, want: 481},
+ test_uint32{fn: mul_uint32_37, fnname: "mul_uint32_37", in: 13, want: 481},
+ test_uint32{fn: mul_37_uint32, fnname: "mul_37_uint32", in: 19, want: 703},
+ test_uint32{fn: mul_uint32_37, fnname: "mul_uint32_37", in: 19, want: 703},
+ test_uint32{fn: mul_37_uint32, fnname: "mul_37_uint32", in: 21, want: 777},
+ test_uint32{fn: mul_uint32_37, fnname: "mul_uint32_37", in: 21, want: 777},
+ test_uint32{fn: mul_37_uint32, fnname: "mul_37_uint32", in: 25, want: 925},
+ test_uint32{fn: mul_uint32_37, fnname: "mul_uint32_37", in: 25, want: 925},
+ test_uint32{fn: mul_37_uint32, fnname: "mul_37_uint32", in: 27, want: 999},
+ test_uint32{fn: mul_uint32_37, fnname: "mul_uint32_37", in: 27, want: 999},
+ test_uint32{fn: mul_37_uint32, fnname: "mul_37_uint32", in: 37, want: 1369},
+ test_uint32{fn: mul_uint32_37, fnname: "mul_uint32_37", in: 37, want: 1369},
+ test_uint32{fn: mul_37_uint32, fnname: "mul_37_uint32", in: 41, want: 1517},
+ test_uint32{fn: mul_uint32_37, fnname: "mul_uint32_37", in: 41, want: 1517},
+ test_uint32{fn: mul_37_uint32, fnname: "mul_37_uint32", in: 45, want: 1665},
+ test_uint32{fn: mul_uint32_37, fnname: "mul_uint32_37", in: 45, want: 1665},
+ test_uint32{fn: mul_37_uint32, fnname: "mul_37_uint32", in: 73, want: 2701},
+ test_uint32{fn: mul_uint32_37, fnname: "mul_uint32_37", in: 73, want: 2701},
+ test_uint32{fn: mul_37_uint32, fnname: "mul_37_uint32", in: 81, want: 2997},
+ test_uint32{fn: mul_uint32_37, fnname: "mul_uint32_37", in: 81, want: 2997},
+ test_uint32{fn: mul_41_uint32, fnname: "mul_41_uint32", in: 3, want: 123},
+ test_uint32{fn: mul_uint32_41, fnname: "mul_uint32_41", in: 3, want: 123},
+ test_uint32{fn: mul_41_uint32, fnname: "mul_41_uint32", in: 5, want: 205},
+ test_uint32{fn: mul_uint32_41, fnname: "mul_uint32_41", in: 5, want: 205},
+ test_uint32{fn: mul_41_uint32, fnname: "mul_41_uint32", in: 7, want: 287},
+ test_uint32{fn: mul_uint32_41, fnname: "mul_uint32_41", in: 7, want: 287},
+ test_uint32{fn: mul_41_uint32, fnname: "mul_41_uint32", in: 9, want: 369},
+ test_uint32{fn: mul_uint32_41, fnname: "mul_uint32_41", in: 9, want: 369},
+ test_uint32{fn: mul_41_uint32, fnname: "mul_41_uint32", in: 10, want: 410},
+ test_uint32{fn: mul_uint32_41, fnname: "mul_uint32_41", in: 10, want: 410},
+ test_uint32{fn: mul_41_uint32, fnname: "mul_41_uint32", in: 11, want: 451},
+ test_uint32{fn: mul_uint32_41, fnname: "mul_uint32_41", in: 11, want: 451},
+ test_uint32{fn: mul_41_uint32, fnname: "mul_41_uint32", in: 13, want: 533},
+ test_uint32{fn: mul_uint32_41, fnname: "mul_uint32_41", in: 13, want: 533},
+ test_uint32{fn: mul_41_uint32, fnname: "mul_41_uint32", in: 19, want: 779},
+ test_uint32{fn: mul_uint32_41, fnname: "mul_uint32_41", in: 19, want: 779},
+ test_uint32{fn: mul_41_uint32, fnname: "mul_41_uint32", in: 21, want: 861},
+ test_uint32{fn: mul_uint32_41, fnname: "mul_uint32_41", in: 21, want: 861},
+ test_uint32{fn: mul_41_uint32, fnname: "mul_41_uint32", in: 25, want: 1025},
+ test_uint32{fn: mul_uint32_41, fnname: "mul_uint32_41", in: 25, want: 1025},
+ test_uint32{fn: mul_41_uint32, fnname: "mul_41_uint32", in: 27, want: 1107},
+ test_uint32{fn: mul_uint32_41, fnname: "mul_uint32_41", in: 27, want: 1107},
+ test_uint32{fn: mul_41_uint32, fnname: "mul_41_uint32", in: 37, want: 1517},
+ test_uint32{fn: mul_uint32_41, fnname: "mul_uint32_41", in: 37, want: 1517},
+ test_uint32{fn: mul_41_uint32, fnname: "mul_41_uint32", in: 41, want: 1681},
+ test_uint32{fn: mul_uint32_41, fnname: "mul_uint32_41", in: 41, want: 1681},
+ test_uint32{fn: mul_41_uint32, fnname: "mul_41_uint32", in: 45, want: 1845},
+ test_uint32{fn: mul_uint32_41, fnname: "mul_uint32_41", in: 45, want: 1845},
+ test_uint32{fn: mul_41_uint32, fnname: "mul_41_uint32", in: 73, want: 2993},
+ test_uint32{fn: mul_uint32_41, fnname: "mul_uint32_41", in: 73, want: 2993},
+ test_uint32{fn: mul_41_uint32, fnname: "mul_41_uint32", in: 81, want: 3321},
+ test_uint32{fn: mul_uint32_41, fnname: "mul_uint32_41", in: 81, want: 3321},
+ test_uint32{fn: mul_45_uint32, fnname: "mul_45_uint32", in: 3, want: 135},
+ test_uint32{fn: mul_uint32_45, fnname: "mul_uint32_45", in: 3, want: 135},
+ test_uint32{fn: mul_45_uint32, fnname: "mul_45_uint32", in: 5, want: 225},
+ test_uint32{fn: mul_uint32_45, fnname: "mul_uint32_45", in: 5, want: 225},
+ test_uint32{fn: mul_45_uint32, fnname: "mul_45_uint32", in: 7, want: 315},
+ test_uint32{fn: mul_uint32_45, fnname: "mul_uint32_45", in: 7, want: 315},
+ test_uint32{fn: mul_45_uint32, fnname: "mul_45_uint32", in: 9, want: 405},
+ test_uint32{fn: mul_uint32_45, fnname: "mul_uint32_45", in: 9, want: 405},
+ test_uint32{fn: mul_45_uint32, fnname: "mul_45_uint32", in: 10, want: 450},
+ test_uint32{fn: mul_uint32_45, fnname: "mul_uint32_45", in: 10, want: 450},
+ test_uint32{fn: mul_45_uint32, fnname: "mul_45_uint32", in: 11, want: 495},
+ test_uint32{fn: mul_uint32_45, fnname: "mul_uint32_45", in: 11, want: 495},
+ test_uint32{fn: mul_45_uint32, fnname: "mul_45_uint32", in: 13, want: 585},
+ test_uint32{fn: mul_uint32_45, fnname: "mul_uint32_45", in: 13, want: 585},
+ test_uint32{fn: mul_45_uint32, fnname: "mul_45_uint32", in: 19, want: 855},
+ test_uint32{fn: mul_uint32_45, fnname: "mul_uint32_45", in: 19, want: 855},
+ test_uint32{fn: mul_45_uint32, fnname: "mul_45_uint32", in: 21, want: 945},
+ test_uint32{fn: mul_uint32_45, fnname: "mul_uint32_45", in: 21, want: 945},
+ test_uint32{fn: mul_45_uint32, fnname: "mul_45_uint32", in: 25, want: 1125},
+ test_uint32{fn: mul_uint32_45, fnname: "mul_uint32_45", in: 25, want: 1125},
+ test_uint32{fn: mul_45_uint32, fnname: "mul_45_uint32", in: 27, want: 1215},
+ test_uint32{fn: mul_uint32_45, fnname: "mul_uint32_45", in: 27, want: 1215},
+ test_uint32{fn: mul_45_uint32, fnname: "mul_45_uint32", in: 37, want: 1665},
+ test_uint32{fn: mul_uint32_45, fnname: "mul_uint32_45", in: 37, want: 1665},
+ test_uint32{fn: mul_45_uint32, fnname: "mul_45_uint32", in: 41, want: 1845},
+ test_uint32{fn: mul_uint32_45, fnname: "mul_uint32_45", in: 41, want: 1845},
+ test_uint32{fn: mul_45_uint32, fnname: "mul_45_uint32", in: 45, want: 2025},
+ test_uint32{fn: mul_uint32_45, fnname: "mul_uint32_45", in: 45, want: 2025},
+ test_uint32{fn: mul_45_uint32, fnname: "mul_45_uint32", in: 73, want: 3285},
+ test_uint32{fn: mul_uint32_45, fnname: "mul_uint32_45", in: 73, want: 3285},
+ test_uint32{fn: mul_45_uint32, fnname: "mul_45_uint32", in: 81, want: 3645},
+ test_uint32{fn: mul_uint32_45, fnname: "mul_uint32_45", in: 81, want: 3645},
+ test_uint32{fn: mul_73_uint32, fnname: "mul_73_uint32", in: 3, want: 219},
+ test_uint32{fn: mul_uint32_73, fnname: "mul_uint32_73", in: 3, want: 219},
+ test_uint32{fn: mul_73_uint32, fnname: "mul_73_uint32", in: 5, want: 365},
+ test_uint32{fn: mul_uint32_73, fnname: "mul_uint32_73", in: 5, want: 365},
+ test_uint32{fn: mul_73_uint32, fnname: "mul_73_uint32", in: 7, want: 511},
+ test_uint32{fn: mul_uint32_73, fnname: "mul_uint32_73", in: 7, want: 511},
+ test_uint32{fn: mul_73_uint32, fnname: "mul_73_uint32", in: 9, want: 657},
+ test_uint32{fn: mul_uint32_73, fnname: "mul_uint32_73", in: 9, want: 657},
+ test_uint32{fn: mul_73_uint32, fnname: "mul_73_uint32", in: 10, want: 730},
+ test_uint32{fn: mul_uint32_73, fnname: "mul_uint32_73", in: 10, want: 730},
+ test_uint32{fn: mul_73_uint32, fnname: "mul_73_uint32", in: 11, want: 803},
+ test_uint32{fn: mul_uint32_73, fnname: "mul_uint32_73", in: 11, want: 803},
+ test_uint32{fn: mul_73_uint32, fnname: "mul_73_uint32", in: 13, want: 949},
+ test_uint32{fn: mul_uint32_73, fnname: "mul_uint32_73", in: 13, want: 949},
+ test_uint32{fn: mul_73_uint32, fnname: "mul_73_uint32", in: 19, want: 1387},
+ test_uint32{fn: mul_uint32_73, fnname: "mul_uint32_73", in: 19, want: 1387},
+ test_uint32{fn: mul_73_uint32, fnname: "mul_73_uint32", in: 21, want: 1533},
+ test_uint32{fn: mul_uint32_73, fnname: "mul_uint32_73", in: 21, want: 1533},
+ test_uint32{fn: mul_73_uint32, fnname: "mul_73_uint32", in: 25, want: 1825},
+ test_uint32{fn: mul_uint32_73, fnname: "mul_uint32_73", in: 25, want: 1825},
+ test_uint32{fn: mul_73_uint32, fnname: "mul_73_uint32", in: 27, want: 1971},
+ test_uint32{fn: mul_uint32_73, fnname: "mul_uint32_73", in: 27, want: 1971},
+ test_uint32{fn: mul_73_uint32, fnname: "mul_73_uint32", in: 37, want: 2701},
+ test_uint32{fn: mul_uint32_73, fnname: "mul_uint32_73", in: 37, want: 2701},
+ test_uint32{fn: mul_73_uint32, fnname: "mul_73_uint32", in: 41, want: 2993},
+ test_uint32{fn: mul_uint32_73, fnname: "mul_uint32_73", in: 41, want: 2993},
+ test_uint32{fn: mul_73_uint32, fnname: "mul_73_uint32", in: 45, want: 3285},
+ test_uint32{fn: mul_uint32_73, fnname: "mul_uint32_73", in: 45, want: 3285},
+ test_uint32{fn: mul_73_uint32, fnname: "mul_73_uint32", in: 73, want: 5329},
+ test_uint32{fn: mul_uint32_73, fnname: "mul_uint32_73", in: 73, want: 5329},
+ test_uint32{fn: mul_73_uint32, fnname: "mul_73_uint32", in: 81, want: 5913},
+ test_uint32{fn: mul_uint32_73, fnname: "mul_uint32_73", in: 81, want: 5913},
+ test_uint32{fn: mul_81_uint32, fnname: "mul_81_uint32", in: 3, want: 243},
+ test_uint32{fn: mul_uint32_81, fnname: "mul_uint32_81", in: 3, want: 243},
+ test_uint32{fn: mul_81_uint32, fnname: "mul_81_uint32", in: 5, want: 405},
+ test_uint32{fn: mul_uint32_81, fnname: "mul_uint32_81", in: 5, want: 405},
+ test_uint32{fn: mul_81_uint32, fnname: "mul_81_uint32", in: 7, want: 567},
+ test_uint32{fn: mul_uint32_81, fnname: "mul_uint32_81", in: 7, want: 567},
+ test_uint32{fn: mul_81_uint32, fnname: "mul_81_uint32", in: 9, want: 729},
+ test_uint32{fn: mul_uint32_81, fnname: "mul_uint32_81", in: 9, want: 729},
+ test_uint32{fn: mul_81_uint32, fnname: "mul_81_uint32", in: 10, want: 810},
+ test_uint32{fn: mul_uint32_81, fnname: "mul_uint32_81", in: 10, want: 810},
+ test_uint32{fn: mul_81_uint32, fnname: "mul_81_uint32", in: 11, want: 891},
+ test_uint32{fn: mul_uint32_81, fnname: "mul_uint32_81", in: 11, want: 891},
+ test_uint32{fn: mul_81_uint32, fnname: "mul_81_uint32", in: 13, want: 1053},
+ test_uint32{fn: mul_uint32_81, fnname: "mul_uint32_81", in: 13, want: 1053},
+ test_uint32{fn: mul_81_uint32, fnname: "mul_81_uint32", in: 19, want: 1539},
+ test_uint32{fn: mul_uint32_81, fnname: "mul_uint32_81", in: 19, want: 1539},
+ test_uint32{fn: mul_81_uint32, fnname: "mul_81_uint32", in: 21, want: 1701},
+ test_uint32{fn: mul_uint32_81, fnname: "mul_uint32_81", in: 21, want: 1701},
+ test_uint32{fn: mul_81_uint32, fnname: "mul_81_uint32", in: 25, want: 2025},
+ test_uint32{fn: mul_uint32_81, fnname: "mul_uint32_81", in: 25, want: 2025},
+ test_uint32{fn: mul_81_uint32, fnname: "mul_81_uint32", in: 27, want: 2187},
+ test_uint32{fn: mul_uint32_81, fnname: "mul_uint32_81", in: 27, want: 2187},
+ test_uint32{fn: mul_81_uint32, fnname: "mul_81_uint32", in: 37, want: 2997},
+ test_uint32{fn: mul_uint32_81, fnname: "mul_uint32_81", in: 37, want: 2997},
+ test_uint32{fn: mul_81_uint32, fnname: "mul_81_uint32", in: 41, want: 3321},
+ test_uint32{fn: mul_uint32_81, fnname: "mul_uint32_81", in: 41, want: 3321},
+ test_uint32{fn: mul_81_uint32, fnname: "mul_81_uint32", in: 45, want: 3645},
+ test_uint32{fn: mul_uint32_81, fnname: "mul_uint32_81", in: 45, want: 3645},
+ test_uint32{fn: mul_81_uint32, fnname: "mul_81_uint32", in: 73, want: 5913},
+ test_uint32{fn: mul_uint32_81, fnname: "mul_uint32_81", in: 73, want: 5913},
+ test_uint32{fn: mul_81_uint32, fnname: "mul_81_uint32", in: 81, want: 6561},
+ test_uint32{fn: mul_uint32_81, fnname: "mul_uint32_81", in: 81, want: 6561}}
+
+type test_int32 struct {
+ fn func(int32) int32
+ fnname string
+ in int32
+ want int32
+}
+
+var tests_int32 = []test_int32{
+
+ test_int32{fn: add_Neg2147483648_int32, fnname: "add_Neg2147483648_int32", in: -2147483648, want: 0},
+ test_int32{fn: add_int32_Neg2147483648, fnname: "add_int32_Neg2147483648", in: -2147483648, want: 0},
+ test_int32{fn: add_Neg2147483648_int32, fnname: "add_Neg2147483648_int32", in: -2147483647, want: 1},
+ test_int32{fn: add_int32_Neg2147483648, fnname: "add_int32_Neg2147483648", in: -2147483647, want: 1},
+ test_int32{fn: add_Neg2147483648_int32, fnname: "add_Neg2147483648_int32", in: -1, want: 2147483647},
+ test_int32{fn: add_int32_Neg2147483648, fnname: "add_int32_Neg2147483648", in: -1, want: 2147483647},
+ test_int32{fn: add_Neg2147483648_int32, fnname: "add_Neg2147483648_int32", in: 0, want: -2147483648},
+ test_int32{fn: add_int32_Neg2147483648, fnname: "add_int32_Neg2147483648", in: 0, want: -2147483648},
+ test_int32{fn: add_Neg2147483648_int32, fnname: "add_Neg2147483648_int32", in: 1, want: -2147483647},
+ test_int32{fn: add_int32_Neg2147483648, fnname: "add_int32_Neg2147483648", in: 1, want: -2147483647},
+ test_int32{fn: add_Neg2147483648_int32, fnname: "add_Neg2147483648_int32", in: 2147483647, want: -1},
+ test_int32{fn: add_int32_Neg2147483648, fnname: "add_int32_Neg2147483648", in: 2147483647, want: -1},
+ test_int32{fn: add_Neg2147483647_int32, fnname: "add_Neg2147483647_int32", in: -2147483648, want: 1},
+ test_int32{fn: add_int32_Neg2147483647, fnname: "add_int32_Neg2147483647", in: -2147483648, want: 1},
+ test_int32{fn: add_Neg2147483647_int32, fnname: "add_Neg2147483647_int32", in: -2147483647, want: 2},
+ test_int32{fn: add_int32_Neg2147483647, fnname: "add_int32_Neg2147483647", in: -2147483647, want: 2},
+ test_int32{fn: add_Neg2147483647_int32, fnname: "add_Neg2147483647_int32", in: -1, want: -2147483648},
+ test_int32{fn: add_int32_Neg2147483647, fnname: "add_int32_Neg2147483647", in: -1, want: -2147483648},
+ test_int32{fn: add_Neg2147483647_int32, fnname: "add_Neg2147483647_int32", in: 0, want: -2147483647},
+ test_int32{fn: add_int32_Neg2147483647, fnname: "add_int32_Neg2147483647", in: 0, want: -2147483647},
+ test_int32{fn: add_Neg2147483647_int32, fnname: "add_Neg2147483647_int32", in: 1, want: -2147483646},
+ test_int32{fn: add_int32_Neg2147483647, fnname: "add_int32_Neg2147483647", in: 1, want: -2147483646},
+ test_int32{fn: add_Neg2147483647_int32, fnname: "add_Neg2147483647_int32", in: 2147483647, want: 0},
+ test_int32{fn: add_int32_Neg2147483647, fnname: "add_int32_Neg2147483647", in: 2147483647, want: 0},
+ test_int32{fn: add_Neg1_int32, fnname: "add_Neg1_int32", in: -2147483648, want: 2147483647},
+ test_int32{fn: add_int32_Neg1, fnname: "add_int32_Neg1", in: -2147483648, want: 2147483647},
+ test_int32{fn: add_Neg1_int32, fnname: "add_Neg1_int32", in: -2147483647, want: -2147483648},
+ test_int32{fn: add_int32_Neg1, fnname: "add_int32_Neg1", in: -2147483647, want: -2147483648},
+ test_int32{fn: add_Neg1_int32, fnname: "add_Neg1_int32", in: -1, want: -2},
+ test_int32{fn: add_int32_Neg1, fnname: "add_int32_Neg1", in: -1, want: -2},
+ test_int32{fn: add_Neg1_int32, fnname: "add_Neg1_int32", in: 0, want: -1},
+ test_int32{fn: add_int32_Neg1, fnname: "add_int32_Neg1", in: 0, want: -1},
+ test_int32{fn: add_Neg1_int32, fnname: "add_Neg1_int32", in: 1, want: 0},
+ test_int32{fn: add_int32_Neg1, fnname: "add_int32_Neg1", in: 1, want: 0},
+ test_int32{fn: add_Neg1_int32, fnname: "add_Neg1_int32", in: 2147483647, want: 2147483646},
+ test_int32{fn: add_int32_Neg1, fnname: "add_int32_Neg1", in: 2147483647, want: 2147483646},
+ test_int32{fn: add_0_int32, fnname: "add_0_int32", in: -2147483648, want: -2147483648},
+ test_int32{fn: add_int32_0, fnname: "add_int32_0", in: -2147483648, want: -2147483648},
+ test_int32{fn: add_0_int32, fnname: "add_0_int32", in: -2147483647, want: -2147483647},
+ test_int32{fn: add_int32_0, fnname: "add_int32_0", in: -2147483647, want: -2147483647},
+ test_int32{fn: add_0_int32, fnname: "add_0_int32", in: -1, want: -1},
+ test_int32{fn: add_int32_0, fnname: "add_int32_0", in: -1, want: -1},
+ test_int32{fn: add_0_int32, fnname: "add_0_int32", in: 0, want: 0},
+ test_int32{fn: add_int32_0, fnname: "add_int32_0", in: 0, want: 0},
+ test_int32{fn: add_0_int32, fnname: "add_0_int32", in: 1, want: 1},
+ test_int32{fn: add_int32_0, fnname: "add_int32_0", in: 1, want: 1},
+ test_int32{fn: add_0_int32, fnname: "add_0_int32", in: 2147483647, want: 2147483647},
+ test_int32{fn: add_int32_0, fnname: "add_int32_0", in: 2147483647, want: 2147483647},
+ test_int32{fn: add_1_int32, fnname: "add_1_int32", in: -2147483648, want: -2147483647},
+ test_int32{fn: add_int32_1, fnname: "add_int32_1", in: -2147483648, want: -2147483647},
+ test_int32{fn: add_1_int32, fnname: "add_1_int32", in: -2147483647, want: -2147483646},
+ test_int32{fn: add_int32_1, fnname: "add_int32_1", in: -2147483647, want: -2147483646},
+ test_int32{fn: add_1_int32, fnname: "add_1_int32", in: -1, want: 0},
+ test_int32{fn: add_int32_1, fnname: "add_int32_1", in: -1, want: 0},
+ test_int32{fn: add_1_int32, fnname: "add_1_int32", in: 0, want: 1},
+ test_int32{fn: add_int32_1, fnname: "add_int32_1", in: 0, want: 1},
+ test_int32{fn: add_1_int32, fnname: "add_1_int32", in: 1, want: 2},
+ test_int32{fn: add_int32_1, fnname: "add_int32_1", in: 1, want: 2},
+ test_int32{fn: add_1_int32, fnname: "add_1_int32", in: 2147483647, want: -2147483648},
+ test_int32{fn: add_int32_1, fnname: "add_int32_1", in: 2147483647, want: -2147483648},
+ test_int32{fn: add_2147483647_int32, fnname: "add_2147483647_int32", in: -2147483648, want: -1},
+ test_int32{fn: add_int32_2147483647, fnname: "add_int32_2147483647", in: -2147483648, want: -1},
+ test_int32{fn: add_2147483647_int32, fnname: "add_2147483647_int32", in: -2147483647, want: 0},
+ test_int32{fn: add_int32_2147483647, fnname: "add_int32_2147483647", in: -2147483647, want: 0},
+ test_int32{fn: add_2147483647_int32, fnname: "add_2147483647_int32", in: -1, want: 2147483646},
+ test_int32{fn: add_int32_2147483647, fnname: "add_int32_2147483647", in: -1, want: 2147483646},
+ test_int32{fn: add_2147483647_int32, fnname: "add_2147483647_int32", in: 0, want: 2147483647},
+ test_int32{fn: add_int32_2147483647, fnname: "add_int32_2147483647", in: 0, want: 2147483647},
+ test_int32{fn: add_2147483647_int32, fnname: "add_2147483647_int32", in: 1, want: -2147483648},
+ test_int32{fn: add_int32_2147483647, fnname: "add_int32_2147483647", in: 1, want: -2147483648},
+ test_int32{fn: add_2147483647_int32, fnname: "add_2147483647_int32", in: 2147483647, want: -2},
+ test_int32{fn: add_int32_2147483647, fnname: "add_int32_2147483647", in: 2147483647, want: -2},
+ test_int32{fn: sub_Neg2147483648_int32, fnname: "sub_Neg2147483648_int32", in: -2147483648, want: 0},
+ test_int32{fn: sub_int32_Neg2147483648, fnname: "sub_int32_Neg2147483648", in: -2147483648, want: 0},
+ test_int32{fn: sub_Neg2147483648_int32, fnname: "sub_Neg2147483648_int32", in: -2147483647, want: -1},
+ test_int32{fn: sub_int32_Neg2147483648, fnname: "sub_int32_Neg2147483648", in: -2147483647, want: 1},
+ test_int32{fn: sub_Neg2147483648_int32, fnname: "sub_Neg2147483648_int32", in: -1, want: -2147483647},
+ test_int32{fn: sub_int32_Neg2147483648, fnname: "sub_int32_Neg2147483648", in: -1, want: 2147483647},
+ test_int32{fn: sub_Neg2147483648_int32, fnname: "sub_Neg2147483648_int32", in: 0, want: -2147483648},
+ test_int32{fn: sub_int32_Neg2147483648, fnname: "sub_int32_Neg2147483648", in: 0, want: -2147483648},
+ test_int32{fn: sub_Neg2147483648_int32, fnname: "sub_Neg2147483648_int32", in: 1, want: 2147483647},
+ test_int32{fn: sub_int32_Neg2147483648, fnname: "sub_int32_Neg2147483648", in: 1, want: -2147483647},
+ test_int32{fn: sub_Neg2147483648_int32, fnname: "sub_Neg2147483648_int32", in: 2147483647, want: 1},
+ test_int32{fn: sub_int32_Neg2147483648, fnname: "sub_int32_Neg2147483648", in: 2147483647, want: -1},
+ test_int32{fn: sub_Neg2147483647_int32, fnname: "sub_Neg2147483647_int32", in: -2147483648, want: 1},
+ test_int32{fn: sub_int32_Neg2147483647, fnname: "sub_int32_Neg2147483647", in: -2147483648, want: -1},
+ test_int32{fn: sub_Neg2147483647_int32, fnname: "sub_Neg2147483647_int32", in: -2147483647, want: 0},
+ test_int32{fn: sub_int32_Neg2147483647, fnname: "sub_int32_Neg2147483647", in: -2147483647, want: 0},
+ test_int32{fn: sub_Neg2147483647_int32, fnname: "sub_Neg2147483647_int32", in: -1, want: -2147483646},
+ test_int32{fn: sub_int32_Neg2147483647, fnname: "sub_int32_Neg2147483647", in: -1, want: 2147483646},
+ test_int32{fn: sub_Neg2147483647_int32, fnname: "sub_Neg2147483647_int32", in: 0, want: -2147483647},
+ test_int32{fn: sub_int32_Neg2147483647, fnname: "sub_int32_Neg2147483647", in: 0, want: 2147483647},
+ test_int32{fn: sub_Neg2147483647_int32, fnname: "sub_Neg2147483647_int32", in: 1, want: -2147483648},
+ test_int32{fn: sub_int32_Neg2147483647, fnname: "sub_int32_Neg2147483647", in: 1, want: -2147483648},
+ test_int32{fn: sub_Neg2147483647_int32, fnname: "sub_Neg2147483647_int32", in: 2147483647, want: 2},
+ test_int32{fn: sub_int32_Neg2147483647, fnname: "sub_int32_Neg2147483647", in: 2147483647, want: -2},
+ test_int32{fn: sub_Neg1_int32, fnname: "sub_Neg1_int32", in: -2147483648, want: 2147483647},
+ test_int32{fn: sub_int32_Neg1, fnname: "sub_int32_Neg1", in: -2147483648, want: -2147483647},
+ test_int32{fn: sub_Neg1_int32, fnname: "sub_Neg1_int32", in: -2147483647, want: 2147483646},
+ test_int32{fn: sub_int32_Neg1, fnname: "sub_int32_Neg1", in: -2147483647, want: -2147483646},
+ test_int32{fn: sub_Neg1_int32, fnname: "sub_Neg1_int32", in: -1, want: 0},
+ test_int32{fn: sub_int32_Neg1, fnname: "sub_int32_Neg1", in: -1, want: 0},
+ test_int32{fn: sub_Neg1_int32, fnname: "sub_Neg1_int32", in: 0, want: -1},
+ test_int32{fn: sub_int32_Neg1, fnname: "sub_int32_Neg1", in: 0, want: 1},
+ test_int32{fn: sub_Neg1_int32, fnname: "sub_Neg1_int32", in: 1, want: -2},
+ test_int32{fn: sub_int32_Neg1, fnname: "sub_int32_Neg1", in: 1, want: 2},
+ test_int32{fn: sub_Neg1_int32, fnname: "sub_Neg1_int32", in: 2147483647, want: -2147483648},
+ test_int32{fn: sub_int32_Neg1, fnname: "sub_int32_Neg1", in: 2147483647, want: -2147483648},
+ test_int32{fn: sub_0_int32, fnname: "sub_0_int32", in: -2147483648, want: -2147483648},
+ test_int32{fn: sub_int32_0, fnname: "sub_int32_0", in: -2147483648, want: -2147483648},
+ test_int32{fn: sub_0_int32, fnname: "sub_0_int32", in: -2147483647, want: 2147483647},
+ test_int32{fn: sub_int32_0, fnname: "sub_int32_0", in: -2147483647, want: -2147483647},
+ test_int32{fn: sub_0_int32, fnname: "sub_0_int32", in: -1, want: 1},
+ test_int32{fn: sub_int32_0, fnname: "sub_int32_0", in: -1, want: -1},
+ test_int32{fn: sub_0_int32, fnname: "sub_0_int32", in: 0, want: 0},
+ test_int32{fn: sub_int32_0, fnname: "sub_int32_0", in: 0, want: 0},
+ test_int32{fn: sub_0_int32, fnname: "sub_0_int32", in: 1, want: -1},
+ test_int32{fn: sub_int32_0, fnname: "sub_int32_0", in: 1, want: 1},
+ test_int32{fn: sub_0_int32, fnname: "sub_0_int32", in: 2147483647, want: -2147483647},
+ test_int32{fn: sub_int32_0, fnname: "sub_int32_0", in: 2147483647, want: 2147483647},
+ test_int32{fn: sub_1_int32, fnname: "sub_1_int32", in: -2147483648, want: -2147483647},
+ test_int32{fn: sub_int32_1, fnname: "sub_int32_1", in: -2147483648, want: 2147483647},
+ test_int32{fn: sub_1_int32, fnname: "sub_1_int32", in: -2147483647, want: -2147483648},
+ test_int32{fn: sub_int32_1, fnname: "sub_int32_1", in: -2147483647, want: -2147483648},
+ test_int32{fn: sub_1_int32, fnname: "sub_1_int32", in: -1, want: 2},
+ test_int32{fn: sub_int32_1, fnname: "sub_int32_1", in: -1, want: -2},
+ test_int32{fn: sub_1_int32, fnname: "sub_1_int32", in: 0, want: 1},
+ test_int32{fn: sub_int32_1, fnname: "sub_int32_1", in: 0, want: -1},
+ test_int32{fn: sub_1_int32, fnname: "sub_1_int32", in: 1, want: 0},
+ test_int32{fn: sub_int32_1, fnname: "sub_int32_1", in: 1, want: 0},
+ test_int32{fn: sub_1_int32, fnname: "sub_1_int32", in: 2147483647, want: -2147483646},
+ test_int32{fn: sub_int32_1, fnname: "sub_int32_1", in: 2147483647, want: 2147483646},
+ test_int32{fn: sub_2147483647_int32, fnname: "sub_2147483647_int32", in: -2147483648, want: -1},
+ test_int32{fn: sub_int32_2147483647, fnname: "sub_int32_2147483647", in: -2147483648, want: 1},
+ test_int32{fn: sub_2147483647_int32, fnname: "sub_2147483647_int32", in: -2147483647, want: -2},
+ test_int32{fn: sub_int32_2147483647, fnname: "sub_int32_2147483647", in: -2147483647, want: 2},
+ test_int32{fn: sub_2147483647_int32, fnname: "sub_2147483647_int32", in: -1, want: -2147483648},
+ test_int32{fn: sub_int32_2147483647, fnname: "sub_int32_2147483647", in: -1, want: -2147483648},
+ test_int32{fn: sub_2147483647_int32, fnname: "sub_2147483647_int32", in: 0, want: 2147483647},
+ test_int32{fn: sub_int32_2147483647, fnname: "sub_int32_2147483647", in: 0, want: -2147483647},
+ test_int32{fn: sub_2147483647_int32, fnname: "sub_2147483647_int32", in: 1, want: 2147483646},
+ test_int32{fn: sub_int32_2147483647, fnname: "sub_int32_2147483647", in: 1, want: -2147483646},
+ test_int32{fn: sub_2147483647_int32, fnname: "sub_2147483647_int32", in: 2147483647, want: 0},
+ test_int32{fn: sub_int32_2147483647, fnname: "sub_int32_2147483647", in: 2147483647, want: 0},
+ test_int32{fn: div_Neg2147483648_int32, fnname: "div_Neg2147483648_int32", in: -2147483648, want: 1},
+ test_int32{fn: div_int32_Neg2147483648, fnname: "div_int32_Neg2147483648", in: -2147483648, want: 1},
+ test_int32{fn: div_Neg2147483648_int32, fnname: "div_Neg2147483648_int32", in: -2147483647, want: 1},
+ test_int32{fn: div_int32_Neg2147483648, fnname: "div_int32_Neg2147483648", in: -2147483647, want: 0},
+ test_int32{fn: div_Neg2147483648_int32, fnname: "div_Neg2147483648_int32", in: -1, want: -2147483648},
+ test_int32{fn: div_int32_Neg2147483648, fnname: "div_int32_Neg2147483648", in: -1, want: 0},
+ test_int32{fn: div_int32_Neg2147483648, fnname: "div_int32_Neg2147483648", in: 0, want: 0},
+ test_int32{fn: div_Neg2147483648_int32, fnname: "div_Neg2147483648_int32", in: 1, want: -2147483648},
+ test_int32{fn: div_int32_Neg2147483648, fnname: "div_int32_Neg2147483648", in: 1, want: 0},
+ test_int32{fn: div_Neg2147483648_int32, fnname: "div_Neg2147483648_int32", in: 2147483647, want: -1},
+ test_int32{fn: div_int32_Neg2147483648, fnname: "div_int32_Neg2147483648", in: 2147483647, want: 0},
+ test_int32{fn: div_Neg2147483647_int32, fnname: "div_Neg2147483647_int32", in: -2147483648, want: 0},
+ test_int32{fn: div_int32_Neg2147483647, fnname: "div_int32_Neg2147483647", in: -2147483648, want: 1},
+ test_int32{fn: div_Neg2147483647_int32, fnname: "div_Neg2147483647_int32", in: -2147483647, want: 1},
+ test_int32{fn: div_int32_Neg2147483647, fnname: "div_int32_Neg2147483647", in: -2147483647, want: 1},
+ test_int32{fn: div_Neg2147483647_int32, fnname: "div_Neg2147483647_int32", in: -1, want: 2147483647},
+ test_int32{fn: div_int32_Neg2147483647, fnname: "div_int32_Neg2147483647", in: -1, want: 0},
+ test_int32{fn: div_int32_Neg2147483647, fnname: "div_int32_Neg2147483647", in: 0, want: 0},
+ test_int32{fn: div_Neg2147483647_int32, fnname: "div_Neg2147483647_int32", in: 1, want: -2147483647},
+ test_int32{fn: div_int32_Neg2147483647, fnname: "div_int32_Neg2147483647", in: 1, want: 0},
+ test_int32{fn: div_Neg2147483647_int32, fnname: "div_Neg2147483647_int32", in: 2147483647, want: -1},
+ test_int32{fn: div_int32_Neg2147483647, fnname: "div_int32_Neg2147483647", in: 2147483647, want: -1},
+ test_int32{fn: div_Neg1_int32, fnname: "div_Neg1_int32", in: -2147483648, want: 0},
+ test_int32{fn: div_int32_Neg1, fnname: "div_int32_Neg1", in: -2147483648, want: -2147483648},
+ test_int32{fn: div_Neg1_int32, fnname: "div_Neg1_int32", in: -2147483647, want: 0},
+ test_int32{fn: div_int32_Neg1, fnname: "div_int32_Neg1", in: -2147483647, want: 2147483647},
+ test_int32{fn: div_Neg1_int32, fnname: "div_Neg1_int32", in: -1, want: 1},
+ test_int32{fn: div_int32_Neg1, fnname: "div_int32_Neg1", in: -1, want: 1},
+ test_int32{fn: div_int32_Neg1, fnname: "div_int32_Neg1", in: 0, want: 0},
+ test_int32{fn: div_Neg1_int32, fnname: "div_Neg1_int32", in: 1, want: -1},
+ test_int32{fn: div_int32_Neg1, fnname: "div_int32_Neg1", in: 1, want: -1},
+ test_int32{fn: div_Neg1_int32, fnname: "div_Neg1_int32", in: 2147483647, want: 0},
+ test_int32{fn: div_int32_Neg1, fnname: "div_int32_Neg1", in: 2147483647, want: -2147483647},
+ test_int32{fn: div_0_int32, fnname: "div_0_int32", in: -2147483648, want: 0},
+ test_int32{fn: div_0_int32, fnname: "div_0_int32", in: -2147483647, want: 0},
+ test_int32{fn: div_0_int32, fnname: "div_0_int32", in: -1, want: 0},
+ test_int32{fn: div_0_int32, fnname: "div_0_int32", in: 1, want: 0},
+ test_int32{fn: div_0_int32, fnname: "div_0_int32", in: 2147483647, want: 0},
+ test_int32{fn: div_1_int32, fnname: "div_1_int32", in: -2147483648, want: 0},
+ test_int32{fn: div_int32_1, fnname: "div_int32_1", in: -2147483648, want: -2147483648},
+ test_int32{fn: div_1_int32, fnname: "div_1_int32", in: -2147483647, want: 0},
+ test_int32{fn: div_int32_1, fnname: "div_int32_1", in: -2147483647, want: -2147483647},
+ test_int32{fn: div_1_int32, fnname: "div_1_int32", in: -1, want: -1},
+ test_int32{fn: div_int32_1, fnname: "div_int32_1", in: -1, want: -1},
+ test_int32{fn: div_int32_1, fnname: "div_int32_1", in: 0, want: 0},
+ test_int32{fn: div_1_int32, fnname: "div_1_int32", in: 1, want: 1},
+ test_int32{fn: div_int32_1, fnname: "div_int32_1", in: 1, want: 1},
+ test_int32{fn: div_1_int32, fnname: "div_1_int32", in: 2147483647, want: 0},
+ test_int32{fn: div_int32_1, fnname: "div_int32_1", in: 2147483647, want: 2147483647},
+ test_int32{fn: div_2147483647_int32, fnname: "div_2147483647_int32", in: -2147483648, want: 0},
+ test_int32{fn: div_int32_2147483647, fnname: "div_int32_2147483647", in: -2147483648, want: -1},
+ test_int32{fn: div_2147483647_int32, fnname: "div_2147483647_int32", in: -2147483647, want: -1},
+ test_int32{fn: div_int32_2147483647, fnname: "div_int32_2147483647", in: -2147483647, want: -1},
+ test_int32{fn: div_2147483647_int32, fnname: "div_2147483647_int32", in: -1, want: -2147483647},
+ test_int32{fn: div_int32_2147483647, fnname: "div_int32_2147483647", in: -1, want: 0},
+ test_int32{fn: div_int32_2147483647, fnname: "div_int32_2147483647", in: 0, want: 0},
+ test_int32{fn: div_2147483647_int32, fnname: "div_2147483647_int32", in: 1, want: 2147483647},
+ test_int32{fn: div_int32_2147483647, fnname: "div_int32_2147483647", in: 1, want: 0},
+ test_int32{fn: div_2147483647_int32, fnname: "div_2147483647_int32", in: 2147483647, want: 1},
+ test_int32{fn: div_int32_2147483647, fnname: "div_int32_2147483647", in: 2147483647, want: 1},
+ test_int32{fn: mul_Neg2147483648_int32, fnname: "mul_Neg2147483648_int32", in: -2147483648, want: 0},
+ test_int32{fn: mul_int32_Neg2147483648, fnname: "mul_int32_Neg2147483648", in: -2147483648, want: 0},
+ test_int32{fn: mul_Neg2147483648_int32, fnname: "mul_Neg2147483648_int32", in: -2147483647, want: -2147483648},
+ test_int32{fn: mul_int32_Neg2147483648, fnname: "mul_int32_Neg2147483648", in: -2147483647, want: -2147483648},
+ test_int32{fn: mul_Neg2147483648_int32, fnname: "mul_Neg2147483648_int32", in: -1, want: -2147483648},
+ test_int32{fn: mul_int32_Neg2147483648, fnname: "mul_int32_Neg2147483648", in: -1, want: -2147483648},
+ test_int32{fn: mul_Neg2147483648_int32, fnname: "mul_Neg2147483648_int32", in: 0, want: 0},
+ test_int32{fn: mul_int32_Neg2147483648, fnname: "mul_int32_Neg2147483648", in: 0, want: 0},
+ test_int32{fn: mul_Neg2147483648_int32, fnname: "mul_Neg2147483648_int32", in: 1, want: -2147483648},
+ test_int32{fn: mul_int32_Neg2147483648, fnname: "mul_int32_Neg2147483648", in: 1, want: -2147483648},
+ test_int32{fn: mul_Neg2147483648_int32, fnname: "mul_Neg2147483648_int32", in: 2147483647, want: -2147483648},
+ test_int32{fn: mul_int32_Neg2147483648, fnname: "mul_int32_Neg2147483648", in: 2147483647, want: -2147483648},
+ test_int32{fn: mul_Neg2147483647_int32, fnname: "mul_Neg2147483647_int32", in: -2147483648, want: -2147483648},
+ test_int32{fn: mul_int32_Neg2147483647, fnname: "mul_int32_Neg2147483647", in: -2147483648, want: -2147483648},
+ test_int32{fn: mul_Neg2147483647_int32, fnname: "mul_Neg2147483647_int32", in: -2147483647, want: 1},
+ test_int32{fn: mul_int32_Neg2147483647, fnname: "mul_int32_Neg2147483647", in: -2147483647, want: 1},
+ test_int32{fn: mul_Neg2147483647_int32, fnname: "mul_Neg2147483647_int32", in: -1, want: 2147483647},
+ test_int32{fn: mul_int32_Neg2147483647, fnname: "mul_int32_Neg2147483647", in: -1, want: 2147483647},
+ test_int32{fn: mul_Neg2147483647_int32, fnname: "mul_Neg2147483647_int32", in: 0, want: 0},
+ test_int32{fn: mul_int32_Neg2147483647, fnname: "mul_int32_Neg2147483647", in: 0, want: 0},
+ test_int32{fn: mul_Neg2147483647_int32, fnname: "mul_Neg2147483647_int32", in: 1, want: -2147483647},
+ test_int32{fn: mul_int32_Neg2147483647, fnname: "mul_int32_Neg2147483647", in: 1, want: -2147483647},
+ test_int32{fn: mul_Neg2147483647_int32, fnname: "mul_Neg2147483647_int32", in: 2147483647, want: -1},
+ test_int32{fn: mul_int32_Neg2147483647, fnname: "mul_int32_Neg2147483647", in: 2147483647, want: -1},
+ test_int32{fn: mul_Neg1_int32, fnname: "mul_Neg1_int32", in: -2147483648, want: -2147483648},
+ test_int32{fn: mul_int32_Neg1, fnname: "mul_int32_Neg1", in: -2147483648, want: -2147483648},
+ test_int32{fn: mul_Neg1_int32, fnname: "mul_Neg1_int32", in: -2147483647, want: 2147483647},
+ test_int32{fn: mul_int32_Neg1, fnname: "mul_int32_Neg1", in: -2147483647, want: 2147483647},
+ test_int32{fn: mul_Neg1_int32, fnname: "mul_Neg1_int32", in: -1, want: 1},
+ test_int32{fn: mul_int32_Neg1, fnname: "mul_int32_Neg1", in: -1, want: 1},
+ test_int32{fn: mul_Neg1_int32, fnname: "mul_Neg1_int32", in: 0, want: 0},
+ test_int32{fn: mul_int32_Neg1, fnname: "mul_int32_Neg1", in: 0, want: 0},
+ test_int32{fn: mul_Neg1_int32, fnname: "mul_Neg1_int32", in: 1, want: -1},
+ test_int32{fn: mul_int32_Neg1, fnname: "mul_int32_Neg1", in: 1, want: -1},
+ test_int32{fn: mul_Neg1_int32, fnname: "mul_Neg1_int32", in: 2147483647, want: -2147483647},
+ test_int32{fn: mul_int32_Neg1, fnname: "mul_int32_Neg1", in: 2147483647, want: -2147483647},
+ test_int32{fn: mul_0_int32, fnname: "mul_0_int32", in: -2147483648, want: 0},
+ test_int32{fn: mul_int32_0, fnname: "mul_int32_0", in: -2147483648, want: 0},
+ test_int32{fn: mul_0_int32, fnname: "mul_0_int32", in: -2147483647, want: 0},
+ test_int32{fn: mul_int32_0, fnname: "mul_int32_0", in: -2147483647, want: 0},
+ test_int32{fn: mul_0_int32, fnname: "mul_0_int32", in: -1, want: 0},
+ test_int32{fn: mul_int32_0, fnname: "mul_int32_0", in: -1, want: 0},
+ test_int32{fn: mul_0_int32, fnname: "mul_0_int32", in: 0, want: 0},
+ test_int32{fn: mul_int32_0, fnname: "mul_int32_0", in: 0, want: 0},
+ test_int32{fn: mul_0_int32, fnname: "mul_0_int32", in: 1, want: 0},
+ test_int32{fn: mul_int32_0, fnname: "mul_int32_0", in: 1, want: 0},
+ test_int32{fn: mul_0_int32, fnname: "mul_0_int32", in: 2147483647, want: 0},
+ test_int32{fn: mul_int32_0, fnname: "mul_int32_0", in: 2147483647, want: 0},
+ test_int32{fn: mul_1_int32, fnname: "mul_1_int32", in: -2147483648, want: -2147483648},
+ test_int32{fn: mul_int32_1, fnname: "mul_int32_1", in: -2147483648, want: -2147483648},
+ test_int32{fn: mul_1_int32, fnname: "mul_1_int32", in: -2147483647, want: -2147483647},
+ test_int32{fn: mul_int32_1, fnname: "mul_int32_1", in: -2147483647, want: -2147483647},
+ test_int32{fn: mul_1_int32, fnname: "mul_1_int32", in: -1, want: -1},
+ test_int32{fn: mul_int32_1, fnname: "mul_int32_1", in: -1, want: -1},
+ test_int32{fn: mul_1_int32, fnname: "mul_1_int32", in: 0, want: 0},
+ test_int32{fn: mul_int32_1, fnname: "mul_int32_1", in: 0, want: 0},
+ test_int32{fn: mul_1_int32, fnname: "mul_1_int32", in: 1, want: 1},
+ test_int32{fn: mul_int32_1, fnname: "mul_int32_1", in: 1, want: 1},
+ test_int32{fn: mul_1_int32, fnname: "mul_1_int32", in: 2147483647, want: 2147483647},
+ test_int32{fn: mul_int32_1, fnname: "mul_int32_1", in: 2147483647, want: 2147483647},
+ test_int32{fn: mul_2147483647_int32, fnname: "mul_2147483647_int32", in: -2147483648, want: -2147483648},
+ test_int32{fn: mul_int32_2147483647, fnname: "mul_int32_2147483647", in: -2147483648, want: -2147483648},
+ test_int32{fn: mul_2147483647_int32, fnname: "mul_2147483647_int32", in: -2147483647, want: -1},
+ test_int32{fn: mul_int32_2147483647, fnname: "mul_int32_2147483647", in: -2147483647, want: -1},
+ test_int32{fn: mul_2147483647_int32, fnname: "mul_2147483647_int32", in: -1, want: -2147483647},
+ test_int32{fn: mul_int32_2147483647, fnname: "mul_int32_2147483647", in: -1, want: -2147483647},
+ test_int32{fn: mul_2147483647_int32, fnname: "mul_2147483647_int32", in: 0, want: 0},
+ test_int32{fn: mul_int32_2147483647, fnname: "mul_int32_2147483647", in: 0, want: 0},
+ test_int32{fn: mul_2147483647_int32, fnname: "mul_2147483647_int32", in: 1, want: 2147483647},
+ test_int32{fn: mul_int32_2147483647, fnname: "mul_int32_2147483647", in: 1, want: 2147483647},
+ test_int32{fn: mul_2147483647_int32, fnname: "mul_2147483647_int32", in: 2147483647, want: 1},
+ test_int32{fn: mul_int32_2147483647, fnname: "mul_int32_2147483647", in: 2147483647, want: 1},
+ test_int32{fn: mod_Neg2147483648_int32, fnname: "mod_Neg2147483648_int32", in: -2147483648, want: 0},
+ test_int32{fn: mod_int32_Neg2147483648, fnname: "mod_int32_Neg2147483648", in: -2147483648, want: 0},
+ test_int32{fn: mod_Neg2147483648_int32, fnname: "mod_Neg2147483648_int32", in: -2147483647, want: -1},
+ test_int32{fn: mod_int32_Neg2147483648, fnname: "mod_int32_Neg2147483648", in: -2147483647, want: -2147483647},
+ test_int32{fn: mod_Neg2147483648_int32, fnname: "mod_Neg2147483648_int32", in: -1, want: 0},
+ test_int32{fn: mod_int32_Neg2147483648, fnname: "mod_int32_Neg2147483648", in: -1, want: -1},
+ test_int32{fn: mod_int32_Neg2147483648, fnname: "mod_int32_Neg2147483648", in: 0, want: 0},
+ test_int32{fn: mod_Neg2147483648_int32, fnname: "mod_Neg2147483648_int32", in: 1, want: 0},
+ test_int32{fn: mod_int32_Neg2147483648, fnname: "mod_int32_Neg2147483648", in: 1, want: 1},
+ test_int32{fn: mod_Neg2147483648_int32, fnname: "mod_Neg2147483648_int32", in: 2147483647, want: -1},
+ test_int32{fn: mod_int32_Neg2147483648, fnname: "mod_int32_Neg2147483648", in: 2147483647, want: 2147483647},
+ test_int32{fn: mod_Neg2147483647_int32, fnname: "mod_Neg2147483647_int32", in: -2147483648, want: -2147483647},
+ test_int32{fn: mod_int32_Neg2147483647, fnname: "mod_int32_Neg2147483647", in: -2147483648, want: -1},
+ test_int32{fn: mod_Neg2147483647_int32, fnname: "mod_Neg2147483647_int32", in: -2147483647, want: 0},
+ test_int32{fn: mod_int32_Neg2147483647, fnname: "mod_int32_Neg2147483647", in: -2147483647, want: 0},
+ test_int32{fn: mod_Neg2147483647_int32, fnname: "mod_Neg2147483647_int32", in: -1, want: 0},
+ test_int32{fn: mod_int32_Neg2147483647, fnname: "mod_int32_Neg2147483647", in: -1, want: -1},
+ test_int32{fn: mod_int32_Neg2147483647, fnname: "mod_int32_Neg2147483647", in: 0, want: 0},
+ test_int32{fn: mod_Neg2147483647_int32, fnname: "mod_Neg2147483647_int32", in: 1, want: 0},
+ test_int32{fn: mod_int32_Neg2147483647, fnname: "mod_int32_Neg2147483647", in: 1, want: 1},
+ test_int32{fn: mod_Neg2147483647_int32, fnname: "mod_Neg2147483647_int32", in: 2147483647, want: 0},
+ test_int32{fn: mod_int32_Neg2147483647, fnname: "mod_int32_Neg2147483647", in: 2147483647, want: 0},
+ test_int32{fn: mod_Neg1_int32, fnname: "mod_Neg1_int32", in: -2147483648, want: -1},
+ test_int32{fn: mod_int32_Neg1, fnname: "mod_int32_Neg1", in: -2147483648, want: 0},
+ test_int32{fn: mod_Neg1_int32, fnname: "mod_Neg1_int32", in: -2147483647, want: -1},
+ test_int32{fn: mod_int32_Neg1, fnname: "mod_int32_Neg1", in: -2147483647, want: 0},
+ test_int32{fn: mod_Neg1_int32, fnname: "mod_Neg1_int32", in: -1, want: 0},
+ test_int32{fn: mod_int32_Neg1, fnname: "mod_int32_Neg1", in: -1, want: 0},
+ test_int32{fn: mod_int32_Neg1, fnname: "mod_int32_Neg1", in: 0, want: 0},
+ test_int32{fn: mod_Neg1_int32, fnname: "mod_Neg1_int32", in: 1, want: 0},
+ test_int32{fn: mod_int32_Neg1, fnname: "mod_int32_Neg1", in: 1, want: 0},
+ test_int32{fn: mod_Neg1_int32, fnname: "mod_Neg1_int32", in: 2147483647, want: -1},
+ test_int32{fn: mod_int32_Neg1, fnname: "mod_int32_Neg1", in: 2147483647, want: 0},
+ test_int32{fn: mod_0_int32, fnname: "mod_0_int32", in: -2147483648, want: 0},
+ test_int32{fn: mod_0_int32, fnname: "mod_0_int32", in: -2147483647, want: 0},
+ test_int32{fn: mod_0_int32, fnname: "mod_0_int32", in: -1, want: 0},
+ test_int32{fn: mod_0_int32, fnname: "mod_0_int32", in: 1, want: 0},
+ test_int32{fn: mod_0_int32, fnname: "mod_0_int32", in: 2147483647, want: 0},
+ test_int32{fn: mod_1_int32, fnname: "mod_1_int32", in: -2147483648, want: 1},
+ test_int32{fn: mod_int32_1, fnname: "mod_int32_1", in: -2147483648, want: 0},
+ test_int32{fn: mod_1_int32, fnname: "mod_1_int32", in: -2147483647, want: 1},
+ test_int32{fn: mod_int32_1, fnname: "mod_int32_1", in: -2147483647, want: 0},
+ test_int32{fn: mod_1_int32, fnname: "mod_1_int32", in: -1, want: 0},
+ test_int32{fn: mod_int32_1, fnname: "mod_int32_1", in: -1, want: 0},
+ test_int32{fn: mod_int32_1, fnname: "mod_int32_1", in: 0, want: 0},
+ test_int32{fn: mod_1_int32, fnname: "mod_1_int32", in: 1, want: 0},
+ test_int32{fn: mod_int32_1, fnname: "mod_int32_1", in: 1, want: 0},
+ test_int32{fn: mod_1_int32, fnname: "mod_1_int32", in: 2147483647, want: 1},
+ test_int32{fn: mod_int32_1, fnname: "mod_int32_1", in: 2147483647, want: 0},
+ test_int32{fn: mod_2147483647_int32, fnname: "mod_2147483647_int32", in: -2147483648, want: 2147483647},
+ test_int32{fn: mod_int32_2147483647, fnname: "mod_int32_2147483647", in: -2147483648, want: -1},
+ test_int32{fn: mod_2147483647_int32, fnname: "mod_2147483647_int32", in: -2147483647, want: 0},
+ test_int32{fn: mod_int32_2147483647, fnname: "mod_int32_2147483647", in: -2147483647, want: 0},
+ test_int32{fn: mod_2147483647_int32, fnname: "mod_2147483647_int32", in: -1, want: 0},
+ test_int32{fn: mod_int32_2147483647, fnname: "mod_int32_2147483647", in: -1, want: -1},
+ test_int32{fn: mod_int32_2147483647, fnname: "mod_int32_2147483647", in: 0, want: 0},
+ test_int32{fn: mod_2147483647_int32, fnname: "mod_2147483647_int32", in: 1, want: 0},
+ test_int32{fn: mod_int32_2147483647, fnname: "mod_int32_2147483647", in: 1, want: 1},
+ test_int32{fn: mod_2147483647_int32, fnname: "mod_2147483647_int32", in: 2147483647, want: 0},
+ test_int32{fn: mod_int32_2147483647, fnname: "mod_int32_2147483647", in: 2147483647, want: 0},
+ test_int32{fn: and_Neg2147483648_int32, fnname: "and_Neg2147483648_int32", in: -2147483648, want: -2147483648},
+ test_int32{fn: and_int32_Neg2147483648, fnname: "and_int32_Neg2147483648", in: -2147483648, want: -2147483648},
+ test_int32{fn: and_Neg2147483648_int32, fnname: "and_Neg2147483648_int32", in: -2147483647, want: -2147483648},
+ test_int32{fn: and_int32_Neg2147483648, fnname: "and_int32_Neg2147483648", in: -2147483647, want: -2147483648},
+ test_int32{fn: and_Neg2147483648_int32, fnname: "and_Neg2147483648_int32", in: -1, want: -2147483648},
+ test_int32{fn: and_int32_Neg2147483648, fnname: "and_int32_Neg2147483648", in: -1, want: -2147483648},
+ test_int32{fn: and_Neg2147483648_int32, fnname: "and_Neg2147483648_int32", in: 0, want: 0},
+ test_int32{fn: and_int32_Neg2147483648, fnname: "and_int32_Neg2147483648", in: 0, want: 0},
+ test_int32{fn: and_Neg2147483648_int32, fnname: "and_Neg2147483648_int32", in: 1, want: 0},
+ test_int32{fn: and_int32_Neg2147483648, fnname: "and_int32_Neg2147483648", in: 1, want: 0},
+ test_int32{fn: and_Neg2147483648_int32, fnname: "and_Neg2147483648_int32", in: 2147483647, want: 0},
+ test_int32{fn: and_int32_Neg2147483648, fnname: "and_int32_Neg2147483648", in: 2147483647, want: 0},
+ test_int32{fn: and_Neg2147483647_int32, fnname: "and_Neg2147483647_int32", in: -2147483648, want: -2147483648},
+ test_int32{fn: and_int32_Neg2147483647, fnname: "and_int32_Neg2147483647", in: -2147483648, want: -2147483648},
+ test_int32{fn: and_Neg2147483647_int32, fnname: "and_Neg2147483647_int32", in: -2147483647, want: -2147483647},
+ test_int32{fn: and_int32_Neg2147483647, fnname: "and_int32_Neg2147483647", in: -2147483647, want: -2147483647},
+ test_int32{fn: and_Neg2147483647_int32, fnname: "and_Neg2147483647_int32", in: -1, want: -2147483647},
+ test_int32{fn: and_int32_Neg2147483647, fnname: "and_int32_Neg2147483647", in: -1, want: -2147483647},
+ test_int32{fn: and_Neg2147483647_int32, fnname: "and_Neg2147483647_int32", in: 0, want: 0},
+ test_int32{fn: and_int32_Neg2147483647, fnname: "and_int32_Neg2147483647", in: 0, want: 0},
+ test_int32{fn: and_Neg2147483647_int32, fnname: "and_Neg2147483647_int32", in: 1, want: 1},
+ test_int32{fn: and_int32_Neg2147483647, fnname: "and_int32_Neg2147483647", in: 1, want: 1},
+ test_int32{fn: and_Neg2147483647_int32, fnname: "and_Neg2147483647_int32", in: 2147483647, want: 1},
+ test_int32{fn: and_int32_Neg2147483647, fnname: "and_int32_Neg2147483647", in: 2147483647, want: 1},
+ test_int32{fn: and_Neg1_int32, fnname: "and_Neg1_int32", in: -2147483648, want: -2147483648},
+ test_int32{fn: and_int32_Neg1, fnname: "and_int32_Neg1", in: -2147483648, want: -2147483648},
+ test_int32{fn: and_Neg1_int32, fnname: "and_Neg1_int32", in: -2147483647, want: -2147483647},
+ test_int32{fn: and_int32_Neg1, fnname: "and_int32_Neg1", in: -2147483647, want: -2147483647},
+ test_int32{fn: and_Neg1_int32, fnname: "and_Neg1_int32", in: -1, want: -1},
+ test_int32{fn: and_int32_Neg1, fnname: "and_int32_Neg1", in: -1, want: -1},
+ test_int32{fn: and_Neg1_int32, fnname: "and_Neg1_int32", in: 0, want: 0},
+ test_int32{fn: and_int32_Neg1, fnname: "and_int32_Neg1", in: 0, want: 0},
+ test_int32{fn: and_Neg1_int32, fnname: "and_Neg1_int32", in: 1, want: 1},
+ test_int32{fn: and_int32_Neg1, fnname: "and_int32_Neg1", in: 1, want: 1},
+ test_int32{fn: and_Neg1_int32, fnname: "and_Neg1_int32", in: 2147483647, want: 2147483647},
+ test_int32{fn: and_int32_Neg1, fnname: "and_int32_Neg1", in: 2147483647, want: 2147483647},
+ test_int32{fn: and_0_int32, fnname: "and_0_int32", in: -2147483648, want: 0},
+ test_int32{fn: and_int32_0, fnname: "and_int32_0", in: -2147483648, want: 0},
+ test_int32{fn: and_0_int32, fnname: "and_0_int32", in: -2147483647, want: 0},
+ test_int32{fn: and_int32_0, fnname: "and_int32_0", in: -2147483647, want: 0},
+ test_int32{fn: and_0_int32, fnname: "and_0_int32", in: -1, want: 0},
+ test_int32{fn: and_int32_0, fnname: "and_int32_0", in: -1, want: 0},
+ test_int32{fn: and_0_int32, fnname: "and_0_int32", in: 0, want: 0},
+ test_int32{fn: and_int32_0, fnname: "and_int32_0", in: 0, want: 0},
+ test_int32{fn: and_0_int32, fnname: "and_0_int32", in: 1, want: 0},
+ test_int32{fn: and_int32_0, fnname: "and_int32_0", in: 1, want: 0},
+ test_int32{fn: and_0_int32, fnname: "and_0_int32", in: 2147483647, want: 0},
+ test_int32{fn: and_int32_0, fnname: "and_int32_0", in: 2147483647, want: 0},
+ test_int32{fn: and_1_int32, fnname: "and_1_int32", in: -2147483648, want: 0},
+ test_int32{fn: and_int32_1, fnname: "and_int32_1", in: -2147483648, want: 0},
+ test_int32{fn: and_1_int32, fnname: "and_1_int32", in: -2147483647, want: 1},
+ test_int32{fn: and_int32_1, fnname: "and_int32_1", in: -2147483647, want: 1},
+ test_int32{fn: and_1_int32, fnname: "and_1_int32", in: -1, want: 1},
+ test_int32{fn: and_int32_1, fnname: "and_int32_1", in: -1, want: 1},
+ test_int32{fn: and_1_int32, fnname: "and_1_int32", in: 0, want: 0},
+ test_int32{fn: and_int32_1, fnname: "and_int32_1", in: 0, want: 0},
+ test_int32{fn: and_1_int32, fnname: "and_1_int32", in: 1, want: 1},
+ test_int32{fn: and_int32_1, fnname: "and_int32_1", in: 1, want: 1},
+ test_int32{fn: and_1_int32, fnname: "and_1_int32", in: 2147483647, want: 1},
+ test_int32{fn: and_int32_1, fnname: "and_int32_1", in: 2147483647, want: 1},
+ test_int32{fn: and_2147483647_int32, fnname: "and_2147483647_int32", in: -2147483648, want: 0},
+ test_int32{fn: and_int32_2147483647, fnname: "and_int32_2147483647", in: -2147483648, want: 0},
+ test_int32{fn: and_2147483647_int32, fnname: "and_2147483647_int32", in: -2147483647, want: 1},
+ test_int32{fn: and_int32_2147483647, fnname: "and_int32_2147483647", in: -2147483647, want: 1},
+ test_int32{fn: and_2147483647_int32, fnname: "and_2147483647_int32", in: -1, want: 2147483647},
+ test_int32{fn: and_int32_2147483647, fnname: "and_int32_2147483647", in: -1, want: 2147483647},
+ test_int32{fn: and_2147483647_int32, fnname: "and_2147483647_int32", in: 0, want: 0},
+ test_int32{fn: and_int32_2147483647, fnname: "and_int32_2147483647", in: 0, want: 0},
+ test_int32{fn: and_2147483647_int32, fnname: "and_2147483647_int32", in: 1, want: 1},
+ test_int32{fn: and_int32_2147483647, fnname: "and_int32_2147483647", in: 1, want: 1},
+ test_int32{fn: and_2147483647_int32, fnname: "and_2147483647_int32", in: 2147483647, want: 2147483647},
+ test_int32{fn: and_int32_2147483647, fnname: "and_int32_2147483647", in: 2147483647, want: 2147483647},
+ test_int32{fn: or_Neg2147483648_int32, fnname: "or_Neg2147483648_int32", in: -2147483648, want: -2147483648},
+ test_int32{fn: or_int32_Neg2147483648, fnname: "or_int32_Neg2147483648", in: -2147483648, want: -2147483648},
+ test_int32{fn: or_Neg2147483648_int32, fnname: "or_Neg2147483648_int32", in: -2147483647, want: -2147483647},
+ test_int32{fn: or_int32_Neg2147483648, fnname: "or_int32_Neg2147483648", in: -2147483647, want: -2147483647},
+ test_int32{fn: or_Neg2147483648_int32, fnname: "or_Neg2147483648_int32", in: -1, want: -1},
+ test_int32{fn: or_int32_Neg2147483648, fnname: "or_int32_Neg2147483648", in: -1, want: -1},
+ test_int32{fn: or_Neg2147483648_int32, fnname: "or_Neg2147483648_int32", in: 0, want: -2147483648},
+ test_int32{fn: or_int32_Neg2147483648, fnname: "or_int32_Neg2147483648", in: 0, want: -2147483648},
+ test_int32{fn: or_Neg2147483648_int32, fnname: "or_Neg2147483648_int32", in: 1, want: -2147483647},
+ test_int32{fn: or_int32_Neg2147483648, fnname: "or_int32_Neg2147483648", in: 1, want: -2147483647},
+ test_int32{fn: or_Neg2147483648_int32, fnname: "or_Neg2147483648_int32", in: 2147483647, want: -1},
+ test_int32{fn: or_int32_Neg2147483648, fnname: "or_int32_Neg2147483648", in: 2147483647, want: -1},
+ test_int32{fn: or_Neg2147483647_int32, fnname: "or_Neg2147483647_int32", in: -2147483648, want: -2147483647},
+ test_int32{fn: or_int32_Neg2147483647, fnname: "or_int32_Neg2147483647", in: -2147483648, want: -2147483647},
+ test_int32{fn: or_Neg2147483647_int32, fnname: "or_Neg2147483647_int32", in: -2147483647, want: -2147483647},
+ test_int32{fn: or_int32_Neg2147483647, fnname: "or_int32_Neg2147483647", in: -2147483647, want: -2147483647},
+ test_int32{fn: or_Neg2147483647_int32, fnname: "or_Neg2147483647_int32", in: -1, want: -1},
+ test_int32{fn: or_int32_Neg2147483647, fnname: "or_int32_Neg2147483647", in: -1, want: -1},
+ test_int32{fn: or_Neg2147483647_int32, fnname: "or_Neg2147483647_int32", in: 0, want: -2147483647},
+ test_int32{fn: or_int32_Neg2147483647, fnname: "or_int32_Neg2147483647", in: 0, want: -2147483647},
+ test_int32{fn: or_Neg2147483647_int32, fnname: "or_Neg2147483647_int32", in: 1, want: -2147483647},
+ test_int32{fn: or_int32_Neg2147483647, fnname: "or_int32_Neg2147483647", in: 1, want: -2147483647},
+ test_int32{fn: or_Neg2147483647_int32, fnname: "or_Neg2147483647_int32", in: 2147483647, want: -1},
+ test_int32{fn: or_int32_Neg2147483647, fnname: "or_int32_Neg2147483647", in: 2147483647, want: -1},
+ test_int32{fn: or_Neg1_int32, fnname: "or_Neg1_int32", in: -2147483648, want: -1},
+ test_int32{fn: or_int32_Neg1, fnname: "or_int32_Neg1", in: -2147483648, want: -1},
+ test_int32{fn: or_Neg1_int32, fnname: "or_Neg1_int32", in: -2147483647, want: -1},
+ test_int32{fn: or_int32_Neg1, fnname: "or_int32_Neg1", in: -2147483647, want: -1},
+ test_int32{fn: or_Neg1_int32, fnname: "or_Neg1_int32", in: -1, want: -1},
+ test_int32{fn: or_int32_Neg1, fnname: "or_int32_Neg1", in: -1, want: -1},
+ test_int32{fn: or_Neg1_int32, fnname: "or_Neg1_int32", in: 0, want: -1},
+ test_int32{fn: or_int32_Neg1, fnname: "or_int32_Neg1", in: 0, want: -1},
+ test_int32{fn: or_Neg1_int32, fnname: "or_Neg1_int32", in: 1, want: -1},
+ test_int32{fn: or_int32_Neg1, fnname: "or_int32_Neg1", in: 1, want: -1},
+ test_int32{fn: or_Neg1_int32, fnname: "or_Neg1_int32", in: 2147483647, want: -1},
+ test_int32{fn: or_int32_Neg1, fnname: "or_int32_Neg1", in: 2147483647, want: -1},
+ test_int32{fn: or_0_int32, fnname: "or_0_int32", in: -2147483648, want: -2147483648},
+ test_int32{fn: or_int32_0, fnname: "or_int32_0", in: -2147483648, want: -2147483648},
+ test_int32{fn: or_0_int32, fnname: "or_0_int32", in: -2147483647, want: -2147483647},
+ test_int32{fn: or_int32_0, fnname: "or_int32_0", in: -2147483647, want: -2147483647},
+ test_int32{fn: or_0_int32, fnname: "or_0_int32", in: -1, want: -1},
+ test_int32{fn: or_int32_0, fnname: "or_int32_0", in: -1, want: -1},
+ test_int32{fn: or_0_int32, fnname: "or_0_int32", in: 0, want: 0},
+ test_int32{fn: or_int32_0, fnname: "or_int32_0", in: 0, want: 0},
+ test_int32{fn: or_0_int32, fnname: "or_0_int32", in: 1, want: 1},
+ test_int32{fn: or_int32_0, fnname: "or_int32_0", in: 1, want: 1},
+ test_int32{fn: or_0_int32, fnname: "or_0_int32", in: 2147483647, want: 2147483647},
+ test_int32{fn: or_int32_0, fnname: "or_int32_0", in: 2147483647, want: 2147483647},
+ test_int32{fn: or_1_int32, fnname: "or_1_int32", in: -2147483648, want: -2147483647},
+ test_int32{fn: or_int32_1, fnname: "or_int32_1", in: -2147483648, want: -2147483647},
+ test_int32{fn: or_1_int32, fnname: "or_1_int32", in: -2147483647, want: -2147483647},
+ test_int32{fn: or_int32_1, fnname: "or_int32_1", in: -2147483647, want: -2147483647},
+ test_int32{fn: or_1_int32, fnname: "or_1_int32", in: -1, want: -1},
+ test_int32{fn: or_int32_1, fnname: "or_int32_1", in: -1, want: -1},
+ test_int32{fn: or_1_int32, fnname: "or_1_int32", in: 0, want: 1},
+ test_int32{fn: or_int32_1, fnname: "or_int32_1", in: 0, want: 1},
+ test_int32{fn: or_1_int32, fnname: "or_1_int32", in: 1, want: 1},
+ test_int32{fn: or_int32_1, fnname: "or_int32_1", in: 1, want: 1},
+ test_int32{fn: or_1_int32, fnname: "or_1_int32", in: 2147483647, want: 2147483647},
+ test_int32{fn: or_int32_1, fnname: "or_int32_1", in: 2147483647, want: 2147483647},
+ test_int32{fn: or_2147483647_int32, fnname: "or_2147483647_int32", in: -2147483648, want: -1},
+ test_int32{fn: or_int32_2147483647, fnname: "or_int32_2147483647", in: -2147483648, want: -1},
+ test_int32{fn: or_2147483647_int32, fnname: "or_2147483647_int32", in: -2147483647, want: -1},
+ test_int32{fn: or_int32_2147483647, fnname: "or_int32_2147483647", in: -2147483647, want: -1},
+ test_int32{fn: or_2147483647_int32, fnname: "or_2147483647_int32", in: -1, want: -1},
+ test_int32{fn: or_int32_2147483647, fnname: "or_int32_2147483647", in: -1, want: -1},
+ test_int32{fn: or_2147483647_int32, fnname: "or_2147483647_int32", in: 0, want: 2147483647},
+ test_int32{fn: or_int32_2147483647, fnname: "or_int32_2147483647", in: 0, want: 2147483647},
+ test_int32{fn: or_2147483647_int32, fnname: "or_2147483647_int32", in: 1, want: 2147483647},
+ test_int32{fn: or_int32_2147483647, fnname: "or_int32_2147483647", in: 1, want: 2147483647},
+ test_int32{fn: or_2147483647_int32, fnname: "or_2147483647_int32", in: 2147483647, want: 2147483647},
+ test_int32{fn: or_int32_2147483647, fnname: "or_int32_2147483647", in: 2147483647, want: 2147483647},
+ test_int32{fn: xor_Neg2147483648_int32, fnname: "xor_Neg2147483648_int32", in: -2147483648, want: 0},
+ test_int32{fn: xor_int32_Neg2147483648, fnname: "xor_int32_Neg2147483648", in: -2147483648, want: 0},
+ test_int32{fn: xor_Neg2147483648_int32, fnname: "xor_Neg2147483648_int32", in: -2147483647, want: 1},
+ test_int32{fn: xor_int32_Neg2147483648, fnname: "xor_int32_Neg2147483648", in: -2147483647, want: 1},
+ test_int32{fn: xor_Neg2147483648_int32, fnname: "xor_Neg2147483648_int32", in: -1, want: 2147483647},
+ test_int32{fn: xor_int32_Neg2147483648, fnname: "xor_int32_Neg2147483648", in: -1, want: 2147483647},
+ test_int32{fn: xor_Neg2147483648_int32, fnname: "xor_Neg2147483648_int32", in: 0, want: -2147483648},
+ test_int32{fn: xor_int32_Neg2147483648, fnname: "xor_int32_Neg2147483648", in: 0, want: -2147483648},
+ test_int32{fn: xor_Neg2147483648_int32, fnname: "xor_Neg2147483648_int32", in: 1, want: -2147483647},
+ test_int32{fn: xor_int32_Neg2147483648, fnname: "xor_int32_Neg2147483648", in: 1, want: -2147483647},
+ test_int32{fn: xor_Neg2147483648_int32, fnname: "xor_Neg2147483648_int32", in: 2147483647, want: -1},
+ test_int32{fn: xor_int32_Neg2147483648, fnname: "xor_int32_Neg2147483648", in: 2147483647, want: -1},
+ test_int32{fn: xor_Neg2147483647_int32, fnname: "xor_Neg2147483647_int32", in: -2147483648, want: 1},
+ test_int32{fn: xor_int32_Neg2147483647, fnname: "xor_int32_Neg2147483647", in: -2147483648, want: 1},
+ test_int32{fn: xor_Neg2147483647_int32, fnname: "xor_Neg2147483647_int32", in: -2147483647, want: 0},
+ test_int32{fn: xor_int32_Neg2147483647, fnname: "xor_int32_Neg2147483647", in: -2147483647, want: 0},
+ test_int32{fn: xor_Neg2147483647_int32, fnname: "xor_Neg2147483647_int32", in: -1, want: 2147483646},
+ test_int32{fn: xor_int32_Neg2147483647, fnname: "xor_int32_Neg2147483647", in: -1, want: 2147483646},
+ test_int32{fn: xor_Neg2147483647_int32, fnname: "xor_Neg2147483647_int32", in: 0, want: -2147483647},
+ test_int32{fn: xor_int32_Neg2147483647, fnname: "xor_int32_Neg2147483647", in: 0, want: -2147483647},
+ test_int32{fn: xor_Neg2147483647_int32, fnname: "xor_Neg2147483647_int32", in: 1, want: -2147483648},
+ test_int32{fn: xor_int32_Neg2147483647, fnname: "xor_int32_Neg2147483647", in: 1, want: -2147483648},
+ test_int32{fn: xor_Neg2147483647_int32, fnname: "xor_Neg2147483647_int32", in: 2147483647, want: -2},
+ test_int32{fn: xor_int32_Neg2147483647, fnname: "xor_int32_Neg2147483647", in: 2147483647, want: -2},
+ test_int32{fn: xor_Neg1_int32, fnname: "xor_Neg1_int32", in: -2147483648, want: 2147483647},
+ test_int32{fn: xor_int32_Neg1, fnname: "xor_int32_Neg1", in: -2147483648, want: 2147483647},
+ test_int32{fn: xor_Neg1_int32, fnname: "xor_Neg1_int32", in: -2147483647, want: 2147483646},
+ test_int32{fn: xor_int32_Neg1, fnname: "xor_int32_Neg1", in: -2147483647, want: 2147483646},
+ test_int32{fn: xor_Neg1_int32, fnname: "xor_Neg1_int32", in: -1, want: 0},
+ test_int32{fn: xor_int32_Neg1, fnname: "xor_int32_Neg1", in: -1, want: 0},
+ test_int32{fn: xor_Neg1_int32, fnname: "xor_Neg1_int32", in: 0, want: -1},
+ test_int32{fn: xor_int32_Neg1, fnname: "xor_int32_Neg1", in: 0, want: -1},
+ test_int32{fn: xor_Neg1_int32, fnname: "xor_Neg1_int32", in: 1, want: -2},
+ test_int32{fn: xor_int32_Neg1, fnname: "xor_int32_Neg1", in: 1, want: -2},
+ test_int32{fn: xor_Neg1_int32, fnname: "xor_Neg1_int32", in: 2147483647, want: -2147483648},
+ test_int32{fn: xor_int32_Neg1, fnname: "xor_int32_Neg1", in: 2147483647, want: -2147483648},
+ test_int32{fn: xor_0_int32, fnname: "xor_0_int32", in: -2147483648, want: -2147483648},
+ test_int32{fn: xor_int32_0, fnname: "xor_int32_0", in: -2147483648, want: -2147483648},
+ test_int32{fn: xor_0_int32, fnname: "xor_0_int32", in: -2147483647, want: -2147483647},
+ test_int32{fn: xor_int32_0, fnname: "xor_int32_0", in: -2147483647, want: -2147483647},
+ test_int32{fn: xor_0_int32, fnname: "xor_0_int32", in: -1, want: -1},
+ test_int32{fn: xor_int32_0, fnname: "xor_int32_0", in: -1, want: -1},
+ test_int32{fn: xor_0_int32, fnname: "xor_0_int32", in: 0, want: 0},
+ test_int32{fn: xor_int32_0, fnname: "xor_int32_0", in: 0, want: 0},
+ test_int32{fn: xor_0_int32, fnname: "xor_0_int32", in: 1, want: 1},
+ test_int32{fn: xor_int32_0, fnname: "xor_int32_0", in: 1, want: 1},
+ test_int32{fn: xor_0_int32, fnname: "xor_0_int32", in: 2147483647, want: 2147483647},
+ test_int32{fn: xor_int32_0, fnname: "xor_int32_0", in: 2147483647, want: 2147483647},
+ test_int32{fn: xor_1_int32, fnname: "xor_1_int32", in: -2147483648, want: -2147483647},
+ test_int32{fn: xor_int32_1, fnname: "xor_int32_1", in: -2147483648, want: -2147483647},
+ test_int32{fn: xor_1_int32, fnname: "xor_1_int32", in: -2147483647, want: -2147483648},
+ test_int32{fn: xor_int32_1, fnname: "xor_int32_1", in: -2147483647, want: -2147483648},
+ test_int32{fn: xor_1_int32, fnname: "xor_1_int32", in: -1, want: -2},
+ test_int32{fn: xor_int32_1, fnname: "xor_int32_1", in: -1, want: -2},
+ test_int32{fn: xor_1_int32, fnname: "xor_1_int32", in: 0, want: 1},
+ test_int32{fn: xor_int32_1, fnname: "xor_int32_1", in: 0, want: 1},
+ test_int32{fn: xor_1_int32, fnname: "xor_1_int32", in: 1, want: 0},
+ test_int32{fn: xor_int32_1, fnname: "xor_int32_1", in: 1, want: 0},
+ test_int32{fn: xor_1_int32, fnname: "xor_1_int32", in: 2147483647, want: 2147483646},
+ test_int32{fn: xor_int32_1, fnname: "xor_int32_1", in: 2147483647, want: 2147483646},
+ test_int32{fn: xor_2147483647_int32, fnname: "xor_2147483647_int32", in: -2147483648, want: -1},
+ test_int32{fn: xor_int32_2147483647, fnname: "xor_int32_2147483647", in: -2147483648, want: -1},
+ test_int32{fn: xor_2147483647_int32, fnname: "xor_2147483647_int32", in: -2147483647, want: -2},
+ test_int32{fn: xor_int32_2147483647, fnname: "xor_int32_2147483647", in: -2147483647, want: -2},
+ test_int32{fn: xor_2147483647_int32, fnname: "xor_2147483647_int32", in: -1, want: -2147483648},
+ test_int32{fn: xor_int32_2147483647, fnname: "xor_int32_2147483647", in: -1, want: -2147483648},
+ test_int32{fn: xor_2147483647_int32, fnname: "xor_2147483647_int32", in: 0, want: 2147483647},
+ test_int32{fn: xor_int32_2147483647, fnname: "xor_int32_2147483647", in: 0, want: 2147483647},
+ test_int32{fn: xor_2147483647_int32, fnname: "xor_2147483647_int32", in: 1, want: 2147483646},
+ test_int32{fn: xor_int32_2147483647, fnname: "xor_int32_2147483647", in: 1, want: 2147483646},
+ test_int32{fn: xor_2147483647_int32, fnname: "xor_2147483647_int32", in: 2147483647, want: 0},
+ test_int32{fn: xor_int32_2147483647, fnname: "xor_int32_2147483647", in: 2147483647, want: 0}}
+
+type test_int32mul struct {
+ fn func(int32) int32
+ fnname string
+ in int32
+ want int32
+}
+
+var tests_int32mul = []test_int32{
+
+ test_int32{fn: mul_Neg9_int32, fnname: "mul_Neg9_int32", in: -9, want: 81},
+ test_int32{fn: mul_int32_Neg9, fnname: "mul_int32_Neg9", in: -9, want: 81},
+ test_int32{fn: mul_Neg9_int32, fnname: "mul_Neg9_int32", in: -5, want: 45},
+ test_int32{fn: mul_int32_Neg9, fnname: "mul_int32_Neg9", in: -5, want: 45},
+ test_int32{fn: mul_Neg9_int32, fnname: "mul_Neg9_int32", in: -3, want: 27},
+ test_int32{fn: mul_int32_Neg9, fnname: "mul_int32_Neg9", in: -3, want: 27},
+ test_int32{fn: mul_Neg9_int32, fnname: "mul_Neg9_int32", in: 3, want: -27},
+ test_int32{fn: mul_int32_Neg9, fnname: "mul_int32_Neg9", in: 3, want: -27},
+ test_int32{fn: mul_Neg9_int32, fnname: "mul_Neg9_int32", in: 5, want: -45},
+ test_int32{fn: mul_int32_Neg9, fnname: "mul_int32_Neg9", in: 5, want: -45},
+ test_int32{fn: mul_Neg9_int32, fnname: "mul_Neg9_int32", in: 7, want: -63},
+ test_int32{fn: mul_int32_Neg9, fnname: "mul_int32_Neg9", in: 7, want: -63},
+ test_int32{fn: mul_Neg9_int32, fnname: "mul_Neg9_int32", in: 9, want: -81},
+ test_int32{fn: mul_int32_Neg9, fnname: "mul_int32_Neg9", in: 9, want: -81},
+ test_int32{fn: mul_Neg9_int32, fnname: "mul_Neg9_int32", in: 10, want: -90},
+ test_int32{fn: mul_int32_Neg9, fnname: "mul_int32_Neg9", in: 10, want: -90},
+ test_int32{fn: mul_Neg9_int32, fnname: "mul_Neg9_int32", in: 11, want: -99},
+ test_int32{fn: mul_int32_Neg9, fnname: "mul_int32_Neg9", in: 11, want: -99},
+ test_int32{fn: mul_Neg9_int32, fnname: "mul_Neg9_int32", in: 13, want: -117},
+ test_int32{fn: mul_int32_Neg9, fnname: "mul_int32_Neg9", in: 13, want: -117},
+ test_int32{fn: mul_Neg9_int32, fnname: "mul_Neg9_int32", in: 19, want: -171},
+ test_int32{fn: mul_int32_Neg9, fnname: "mul_int32_Neg9", in: 19, want: -171},
+ test_int32{fn: mul_Neg9_int32, fnname: "mul_Neg9_int32", in: 21, want: -189},
+ test_int32{fn: mul_int32_Neg9, fnname: "mul_int32_Neg9", in: 21, want: -189},
+ test_int32{fn: mul_Neg9_int32, fnname: "mul_Neg9_int32", in: 25, want: -225},
+ test_int32{fn: mul_int32_Neg9, fnname: "mul_int32_Neg9", in: 25, want: -225},
+ test_int32{fn: mul_Neg9_int32, fnname: "mul_Neg9_int32", in: 27, want: -243},
+ test_int32{fn: mul_int32_Neg9, fnname: "mul_int32_Neg9", in: 27, want: -243},
+ test_int32{fn: mul_Neg9_int32, fnname: "mul_Neg9_int32", in: 37, want: -333},
+ test_int32{fn: mul_int32_Neg9, fnname: "mul_int32_Neg9", in: 37, want: -333},
+ test_int32{fn: mul_Neg9_int32, fnname: "mul_Neg9_int32", in: 41, want: -369},
+ test_int32{fn: mul_int32_Neg9, fnname: "mul_int32_Neg9", in: 41, want: -369},
+ test_int32{fn: mul_Neg9_int32, fnname: "mul_Neg9_int32", in: 45, want: -405},
+ test_int32{fn: mul_int32_Neg9, fnname: "mul_int32_Neg9", in: 45, want: -405},
+ test_int32{fn: mul_Neg9_int32, fnname: "mul_Neg9_int32", in: 73, want: -657},
+ test_int32{fn: mul_int32_Neg9, fnname: "mul_int32_Neg9", in: 73, want: -657},
+ test_int32{fn: mul_Neg9_int32, fnname: "mul_Neg9_int32", in: 81, want: -729},
+ test_int32{fn: mul_int32_Neg9, fnname: "mul_int32_Neg9", in: 81, want: -729},
+ test_int32{fn: mul_Neg5_int32, fnname: "mul_Neg5_int32", in: -9, want: 45},
+ test_int32{fn: mul_int32_Neg5, fnname: "mul_int32_Neg5", in: -9, want: 45},
+ test_int32{fn: mul_Neg5_int32, fnname: "mul_Neg5_int32", in: -5, want: 25},
+ test_int32{fn: mul_int32_Neg5, fnname: "mul_int32_Neg5", in: -5, want: 25},
+ test_int32{fn: mul_Neg5_int32, fnname: "mul_Neg5_int32", in: -3, want: 15},
+ test_int32{fn: mul_int32_Neg5, fnname: "mul_int32_Neg5", in: -3, want: 15},
+ test_int32{fn: mul_Neg5_int32, fnname: "mul_Neg5_int32", in: 3, want: -15},
+ test_int32{fn: mul_int32_Neg5, fnname: "mul_int32_Neg5", in: 3, want: -15},
+ test_int32{fn: mul_Neg5_int32, fnname: "mul_Neg5_int32", in: 5, want: -25},
+ test_int32{fn: mul_int32_Neg5, fnname: "mul_int32_Neg5", in: 5, want: -25},
+ test_int32{fn: mul_Neg5_int32, fnname: "mul_Neg5_int32", in: 7, want: -35},
+ test_int32{fn: mul_int32_Neg5, fnname: "mul_int32_Neg5", in: 7, want: -35},
+ test_int32{fn: mul_Neg5_int32, fnname: "mul_Neg5_int32", in: 9, want: -45},
+ test_int32{fn: mul_int32_Neg5, fnname: "mul_int32_Neg5", in: 9, want: -45},
+ test_int32{fn: mul_Neg5_int32, fnname: "mul_Neg5_int32", in: 10, want: -50},
+ test_int32{fn: mul_int32_Neg5, fnname: "mul_int32_Neg5", in: 10, want: -50},
+ test_int32{fn: mul_Neg5_int32, fnname: "mul_Neg5_int32", in: 11, want: -55},
+ test_int32{fn: mul_int32_Neg5, fnname: "mul_int32_Neg5", in: 11, want: -55},
+ test_int32{fn: mul_Neg5_int32, fnname: "mul_Neg5_int32", in: 13, want: -65},
+ test_int32{fn: mul_int32_Neg5, fnname: "mul_int32_Neg5", in: 13, want: -65},
+ test_int32{fn: mul_Neg5_int32, fnname: "mul_Neg5_int32", in: 19, want: -95},
+ test_int32{fn: mul_int32_Neg5, fnname: "mul_int32_Neg5", in: 19, want: -95},
+ test_int32{fn: mul_Neg5_int32, fnname: "mul_Neg5_int32", in: 21, want: -105},
+ test_int32{fn: mul_int32_Neg5, fnname: "mul_int32_Neg5", in: 21, want: -105},
+ test_int32{fn: mul_Neg5_int32, fnname: "mul_Neg5_int32", in: 25, want: -125},
+ test_int32{fn: mul_int32_Neg5, fnname: "mul_int32_Neg5", in: 25, want: -125},
+ test_int32{fn: mul_Neg5_int32, fnname: "mul_Neg5_int32", in: 27, want: -135},
+ test_int32{fn: mul_int32_Neg5, fnname: "mul_int32_Neg5", in: 27, want: -135},
+ test_int32{fn: mul_Neg5_int32, fnname: "mul_Neg5_int32", in: 37, want: -185},
+ test_int32{fn: mul_int32_Neg5, fnname: "mul_int32_Neg5", in: 37, want: -185},
+ test_int32{fn: mul_Neg5_int32, fnname: "mul_Neg5_int32", in: 41, want: -205},
+ test_int32{fn: mul_int32_Neg5, fnname: "mul_int32_Neg5", in: 41, want: -205},
+ test_int32{fn: mul_Neg5_int32, fnname: "mul_Neg5_int32", in: 45, want: -225},
+ test_int32{fn: mul_int32_Neg5, fnname: "mul_int32_Neg5", in: 45, want: -225},
+ test_int32{fn: mul_Neg5_int32, fnname: "mul_Neg5_int32", in: 73, want: -365},
+ test_int32{fn: mul_int32_Neg5, fnname: "mul_int32_Neg5", in: 73, want: -365},
+ test_int32{fn: mul_Neg5_int32, fnname: "mul_Neg5_int32", in: 81, want: -405},
+ test_int32{fn: mul_int32_Neg5, fnname: "mul_int32_Neg5", in: 81, want: -405},
+ test_int32{fn: mul_Neg3_int32, fnname: "mul_Neg3_int32", in: -9, want: 27},
+ test_int32{fn: mul_int32_Neg3, fnname: "mul_int32_Neg3", in: -9, want: 27},
+ test_int32{fn: mul_Neg3_int32, fnname: "mul_Neg3_int32", in: -5, want: 15},
+ test_int32{fn: mul_int32_Neg3, fnname: "mul_int32_Neg3", in: -5, want: 15},
+ test_int32{fn: mul_Neg3_int32, fnname: "mul_Neg3_int32", in: -3, want: 9},
+ test_int32{fn: mul_int32_Neg3, fnname: "mul_int32_Neg3", in: -3, want: 9},
+ test_int32{fn: mul_Neg3_int32, fnname: "mul_Neg3_int32", in: 3, want: -9},
+ test_int32{fn: mul_int32_Neg3, fnname: "mul_int32_Neg3", in: 3, want: -9},
+ test_int32{fn: mul_Neg3_int32, fnname: "mul_Neg3_int32", in: 5, want: -15},
+ test_int32{fn: mul_int32_Neg3, fnname: "mul_int32_Neg3", in: 5, want: -15},
+ test_int32{fn: mul_Neg3_int32, fnname: "mul_Neg3_int32", in: 7, want: -21},
+ test_int32{fn: mul_int32_Neg3, fnname: "mul_int32_Neg3", in: 7, want: -21},
+ test_int32{fn: mul_Neg3_int32, fnname: "mul_Neg3_int32", in: 9, want: -27},
+ test_int32{fn: mul_int32_Neg3, fnname: "mul_int32_Neg3", in: 9, want: -27},
+ test_int32{fn: mul_Neg3_int32, fnname: "mul_Neg3_int32", in: 10, want: -30},
+ test_int32{fn: mul_int32_Neg3, fnname: "mul_int32_Neg3", in: 10, want: -30},
+ test_int32{fn: mul_Neg3_int32, fnname: "mul_Neg3_int32", in: 11, want: -33},
+ test_int32{fn: mul_int32_Neg3, fnname: "mul_int32_Neg3", in: 11, want: -33},
+ test_int32{fn: mul_Neg3_int32, fnname: "mul_Neg3_int32", in: 13, want: -39},
+ test_int32{fn: mul_int32_Neg3, fnname: "mul_int32_Neg3", in: 13, want: -39},
+ test_int32{fn: mul_Neg3_int32, fnname: "mul_Neg3_int32", in: 19, want: -57},
+ test_int32{fn: mul_int32_Neg3, fnname: "mul_int32_Neg3", in: 19, want: -57},
+ test_int32{fn: mul_Neg3_int32, fnname: "mul_Neg3_int32", in: 21, want: -63},
+ test_int32{fn: mul_int32_Neg3, fnname: "mul_int32_Neg3", in: 21, want: -63},
+ test_int32{fn: mul_Neg3_int32, fnname: "mul_Neg3_int32", in: 25, want: -75},
+ test_int32{fn: mul_int32_Neg3, fnname: "mul_int32_Neg3", in: 25, want: -75},
+ test_int32{fn: mul_Neg3_int32, fnname: "mul_Neg3_int32", in: 27, want: -81},
+ test_int32{fn: mul_int32_Neg3, fnname: "mul_int32_Neg3", in: 27, want: -81},
+ test_int32{fn: mul_Neg3_int32, fnname: "mul_Neg3_int32", in: 37, want: -111},
+ test_int32{fn: mul_int32_Neg3, fnname: "mul_int32_Neg3", in: 37, want: -111},
+ test_int32{fn: mul_Neg3_int32, fnname: "mul_Neg3_int32", in: 41, want: -123},
+ test_int32{fn: mul_int32_Neg3, fnname: "mul_int32_Neg3", in: 41, want: -123},
+ test_int32{fn: mul_Neg3_int32, fnname: "mul_Neg3_int32", in: 45, want: -135},
+ test_int32{fn: mul_int32_Neg3, fnname: "mul_int32_Neg3", in: 45, want: -135},
+ test_int32{fn: mul_Neg3_int32, fnname: "mul_Neg3_int32", in: 73, want: -219},
+ test_int32{fn: mul_int32_Neg3, fnname: "mul_int32_Neg3", in: 73, want: -219},
+ test_int32{fn: mul_Neg3_int32, fnname: "mul_Neg3_int32", in: 81, want: -243},
+ test_int32{fn: mul_int32_Neg3, fnname: "mul_int32_Neg3", in: 81, want: -243},
+ test_int32{fn: mul_3_int32, fnname: "mul_3_int32", in: -9, want: -27},
+ test_int32{fn: mul_int32_3, fnname: "mul_int32_3", in: -9, want: -27},
+ test_int32{fn: mul_3_int32, fnname: "mul_3_int32", in: -5, want: -15},
+ test_int32{fn: mul_int32_3, fnname: "mul_int32_3", in: -5, want: -15},
+ test_int32{fn: mul_3_int32, fnname: "mul_3_int32", in: -3, want: -9},
+ test_int32{fn: mul_int32_3, fnname: "mul_int32_3", in: -3, want: -9},
+ test_int32{fn: mul_3_int32, fnname: "mul_3_int32", in: 3, want: 9},
+ test_int32{fn: mul_int32_3, fnname: "mul_int32_3", in: 3, want: 9},
+ test_int32{fn: mul_3_int32, fnname: "mul_3_int32", in: 5, want: 15},
+ test_int32{fn: mul_int32_3, fnname: "mul_int32_3", in: 5, want: 15},
+ test_int32{fn: mul_3_int32, fnname: "mul_3_int32", in: 7, want: 21},
+ test_int32{fn: mul_int32_3, fnname: "mul_int32_3", in: 7, want: 21},
+ test_int32{fn: mul_3_int32, fnname: "mul_3_int32", in: 9, want: 27},
+ test_int32{fn: mul_int32_3, fnname: "mul_int32_3", in: 9, want: 27},
+ test_int32{fn: mul_3_int32, fnname: "mul_3_int32", in: 10, want: 30},
+ test_int32{fn: mul_int32_3, fnname: "mul_int32_3", in: 10, want: 30},
+ test_int32{fn: mul_3_int32, fnname: "mul_3_int32", in: 11, want: 33},
+ test_int32{fn: mul_int32_3, fnname: "mul_int32_3", in: 11, want: 33},
+ test_int32{fn: mul_3_int32, fnname: "mul_3_int32", in: 13, want: 39},
+ test_int32{fn: mul_int32_3, fnname: "mul_int32_3", in: 13, want: 39},
+ test_int32{fn: mul_3_int32, fnname: "mul_3_int32", in: 19, want: 57},
+ test_int32{fn: mul_int32_3, fnname: "mul_int32_3", in: 19, want: 57},
+ test_int32{fn: mul_3_int32, fnname: "mul_3_int32", in: 21, want: 63},
+ test_int32{fn: mul_int32_3, fnname: "mul_int32_3", in: 21, want: 63},
+ test_int32{fn: mul_3_int32, fnname: "mul_3_int32", in: 25, want: 75},
+ test_int32{fn: mul_int32_3, fnname: "mul_int32_3", in: 25, want: 75},
+ test_int32{fn: mul_3_int32, fnname: "mul_3_int32", in: 27, want: 81},
+ test_int32{fn: mul_int32_3, fnname: "mul_int32_3", in: 27, want: 81},
+ test_int32{fn: mul_3_int32, fnname: "mul_3_int32", in: 37, want: 111},
+ test_int32{fn: mul_int32_3, fnname: "mul_int32_3", in: 37, want: 111},
+ test_int32{fn: mul_3_int32, fnname: "mul_3_int32", in: 41, want: 123},
+ test_int32{fn: mul_int32_3, fnname: "mul_int32_3", in: 41, want: 123},
+ test_int32{fn: mul_3_int32, fnname: "mul_3_int32", in: 45, want: 135},
+ test_int32{fn: mul_int32_3, fnname: "mul_int32_3", in: 45, want: 135},
+ test_int32{fn: mul_3_int32, fnname: "mul_3_int32", in: 73, want: 219},
+ test_int32{fn: mul_int32_3, fnname: "mul_int32_3", in: 73, want: 219},
+ test_int32{fn: mul_3_int32, fnname: "mul_3_int32", in: 81, want: 243},
+ test_int32{fn: mul_int32_3, fnname: "mul_int32_3", in: 81, want: 243},
+ test_int32{fn: mul_5_int32, fnname: "mul_5_int32", in: -9, want: -45},
+ test_int32{fn: mul_int32_5, fnname: "mul_int32_5", in: -9, want: -45},
+ test_int32{fn: mul_5_int32, fnname: "mul_5_int32", in: -5, want: -25},
+ test_int32{fn: mul_int32_5, fnname: "mul_int32_5", in: -5, want: -25},
+ test_int32{fn: mul_5_int32, fnname: "mul_5_int32", in: -3, want: -15},
+ test_int32{fn: mul_int32_5, fnname: "mul_int32_5", in: -3, want: -15},
+ test_int32{fn: mul_5_int32, fnname: "mul_5_int32", in: 3, want: 15},
+ test_int32{fn: mul_int32_5, fnname: "mul_int32_5", in: 3, want: 15},
+ test_int32{fn: mul_5_int32, fnname: "mul_5_int32", in: 5, want: 25},
+ test_int32{fn: mul_int32_5, fnname: "mul_int32_5", in: 5, want: 25},
+ test_int32{fn: mul_5_int32, fnname: "mul_5_int32", in: 7, want: 35},
+ test_int32{fn: mul_int32_5, fnname: "mul_int32_5", in: 7, want: 35},
+ test_int32{fn: mul_5_int32, fnname: "mul_5_int32", in: 9, want: 45},
+ test_int32{fn: mul_int32_5, fnname: "mul_int32_5", in: 9, want: 45},
+ test_int32{fn: mul_5_int32, fnname: "mul_5_int32", in: 10, want: 50},
+ test_int32{fn: mul_int32_5, fnname: "mul_int32_5", in: 10, want: 50},
+ test_int32{fn: mul_5_int32, fnname: "mul_5_int32", in: 11, want: 55},
+ test_int32{fn: mul_int32_5, fnname: "mul_int32_5", in: 11, want: 55},
+ test_int32{fn: mul_5_int32, fnname: "mul_5_int32", in: 13, want: 65},
+ test_int32{fn: mul_int32_5, fnname: "mul_int32_5", in: 13, want: 65},
+ test_int32{fn: mul_5_int32, fnname: "mul_5_int32", in: 19, want: 95},
+ test_int32{fn: mul_int32_5, fnname: "mul_int32_5", in: 19, want: 95},
+ test_int32{fn: mul_5_int32, fnname: "mul_5_int32", in: 21, want: 105},
+ test_int32{fn: mul_int32_5, fnname: "mul_int32_5", in: 21, want: 105},
+ test_int32{fn: mul_5_int32, fnname: "mul_5_int32", in: 25, want: 125},
+ test_int32{fn: mul_int32_5, fnname: "mul_int32_5", in: 25, want: 125},
+ test_int32{fn: mul_5_int32, fnname: "mul_5_int32", in: 27, want: 135},
+ test_int32{fn: mul_int32_5, fnname: "mul_int32_5", in: 27, want: 135},
+ test_int32{fn: mul_5_int32, fnname: "mul_5_int32", in: 37, want: 185},
+ test_int32{fn: mul_int32_5, fnname: "mul_int32_5", in: 37, want: 185},
+ test_int32{fn: mul_5_int32, fnname: "mul_5_int32", in: 41, want: 205},
+ test_int32{fn: mul_int32_5, fnname: "mul_int32_5", in: 41, want: 205},
+ test_int32{fn: mul_5_int32, fnname: "mul_5_int32", in: 45, want: 225},
+ test_int32{fn: mul_int32_5, fnname: "mul_int32_5", in: 45, want: 225},
+ test_int32{fn: mul_5_int32, fnname: "mul_5_int32", in: 73, want: 365},
+ test_int32{fn: mul_int32_5, fnname: "mul_int32_5", in: 73, want: 365},
+ test_int32{fn: mul_5_int32, fnname: "mul_5_int32", in: 81, want: 405},
+ test_int32{fn: mul_int32_5, fnname: "mul_int32_5", in: 81, want: 405},
+ test_int32{fn: mul_7_int32, fnname: "mul_7_int32", in: -9, want: -63},
+ test_int32{fn: mul_int32_7, fnname: "mul_int32_7", in: -9, want: -63},
+ test_int32{fn: mul_7_int32, fnname: "mul_7_int32", in: -5, want: -35},
+ test_int32{fn: mul_int32_7, fnname: "mul_int32_7", in: -5, want: -35},
+ test_int32{fn: mul_7_int32, fnname: "mul_7_int32", in: -3, want: -21},
+ test_int32{fn: mul_int32_7, fnname: "mul_int32_7", in: -3, want: -21},
+ test_int32{fn: mul_7_int32, fnname: "mul_7_int32", in: 3, want: 21},
+ test_int32{fn: mul_int32_7, fnname: "mul_int32_7", in: 3, want: 21},
+ test_int32{fn: mul_7_int32, fnname: "mul_7_int32", in: 5, want: 35},
+ test_int32{fn: mul_int32_7, fnname: "mul_int32_7", in: 5, want: 35},
+ test_int32{fn: mul_7_int32, fnname: "mul_7_int32", in: 7, want: 49},
+ test_int32{fn: mul_int32_7, fnname: "mul_int32_7", in: 7, want: 49},
+ test_int32{fn: mul_7_int32, fnname: "mul_7_int32", in: 9, want: 63},
+ test_int32{fn: mul_int32_7, fnname: "mul_int32_7", in: 9, want: 63},
+ test_int32{fn: mul_7_int32, fnname: "mul_7_int32", in: 10, want: 70},
+ test_int32{fn: mul_int32_7, fnname: "mul_int32_7", in: 10, want: 70},
+ test_int32{fn: mul_7_int32, fnname: "mul_7_int32", in: 11, want: 77},
+ test_int32{fn: mul_int32_7, fnname: "mul_int32_7", in: 11, want: 77},
+ test_int32{fn: mul_7_int32, fnname: "mul_7_int32", in: 13, want: 91},
+ test_int32{fn: mul_int32_7, fnname: "mul_int32_7", in: 13, want: 91},
+ test_int32{fn: mul_7_int32, fnname: "mul_7_int32", in: 19, want: 133},
+ test_int32{fn: mul_int32_7, fnname: "mul_int32_7", in: 19, want: 133},
+ test_int32{fn: mul_7_int32, fnname: "mul_7_int32", in: 21, want: 147},
+ test_int32{fn: mul_int32_7, fnname: "mul_int32_7", in: 21, want: 147},
+ test_int32{fn: mul_7_int32, fnname: "mul_7_int32", in: 25, want: 175},
+ test_int32{fn: mul_int32_7, fnname: "mul_int32_7", in: 25, want: 175},
+ test_int32{fn: mul_7_int32, fnname: "mul_7_int32", in: 27, want: 189},
+ test_int32{fn: mul_int32_7, fnname: "mul_int32_7", in: 27, want: 189},
+ test_int32{fn: mul_7_int32, fnname: "mul_7_int32", in: 37, want: 259},
+ test_int32{fn: mul_int32_7, fnname: "mul_int32_7", in: 37, want: 259},
+ test_int32{fn: mul_7_int32, fnname: "mul_7_int32", in: 41, want: 287},
+ test_int32{fn: mul_int32_7, fnname: "mul_int32_7", in: 41, want: 287},
+ test_int32{fn: mul_7_int32, fnname: "mul_7_int32", in: 45, want: 315},
+ test_int32{fn: mul_int32_7, fnname: "mul_int32_7", in: 45, want: 315},
+ test_int32{fn: mul_7_int32, fnname: "mul_7_int32", in: 73, want: 511},
+ test_int32{fn: mul_int32_7, fnname: "mul_int32_7", in: 73, want: 511},
+ test_int32{fn: mul_7_int32, fnname: "mul_7_int32", in: 81, want: 567},
+ test_int32{fn: mul_int32_7, fnname: "mul_int32_7", in: 81, want: 567},
+ test_int32{fn: mul_9_int32, fnname: "mul_9_int32", in: -9, want: -81},
+ test_int32{fn: mul_int32_9, fnname: "mul_int32_9", in: -9, want: -81},
+ test_int32{fn: mul_9_int32, fnname: "mul_9_int32", in: -5, want: -45},
+ test_int32{fn: mul_int32_9, fnname: "mul_int32_9", in: -5, want: -45},
+ test_int32{fn: mul_9_int32, fnname: "mul_9_int32", in: -3, want: -27},
+ test_int32{fn: mul_int32_9, fnname: "mul_int32_9", in: -3, want: -27},
+ test_int32{fn: mul_9_int32, fnname: "mul_9_int32", in: 3, want: 27},
+ test_int32{fn: mul_int32_9, fnname: "mul_int32_9", in: 3, want: 27},
+ test_int32{fn: mul_9_int32, fnname: "mul_9_int32", in: 5, want: 45},
+ test_int32{fn: mul_int32_9, fnname: "mul_int32_9", in: 5, want: 45},
+ test_int32{fn: mul_9_int32, fnname: "mul_9_int32", in: 7, want: 63},
+ test_int32{fn: mul_int32_9, fnname: "mul_int32_9", in: 7, want: 63},
+ test_int32{fn: mul_9_int32, fnname: "mul_9_int32", in: 9, want: 81},
+ test_int32{fn: mul_int32_9, fnname: "mul_int32_9", in: 9, want: 81},
+ test_int32{fn: mul_9_int32, fnname: "mul_9_int32", in: 10, want: 90},
+ test_int32{fn: mul_int32_9, fnname: "mul_int32_9", in: 10, want: 90},
+ test_int32{fn: mul_9_int32, fnname: "mul_9_int32", in: 11, want: 99},
+ test_int32{fn: mul_int32_9, fnname: "mul_int32_9", in: 11, want: 99},
+ test_int32{fn: mul_9_int32, fnname: "mul_9_int32", in: 13, want: 117},
+ test_int32{fn: mul_int32_9, fnname: "mul_int32_9", in: 13, want: 117},
+ test_int32{fn: mul_9_int32, fnname: "mul_9_int32", in: 19, want: 171},
+ test_int32{fn: mul_int32_9, fnname: "mul_int32_9", in: 19, want: 171},
+ test_int32{fn: mul_9_int32, fnname: "mul_9_int32", in: 21, want: 189},
+ test_int32{fn: mul_int32_9, fnname: "mul_int32_9", in: 21, want: 189},
+ test_int32{fn: mul_9_int32, fnname: "mul_9_int32", in: 25, want: 225},
+ test_int32{fn: mul_int32_9, fnname: "mul_int32_9", in: 25, want: 225},
+ test_int32{fn: mul_9_int32, fnname: "mul_9_int32", in: 27, want: 243},
+ test_int32{fn: mul_int32_9, fnname: "mul_int32_9", in: 27, want: 243},
+ test_int32{fn: mul_9_int32, fnname: "mul_9_int32", in: 37, want: 333},
+ test_int32{fn: mul_int32_9, fnname: "mul_int32_9", in: 37, want: 333},
+ test_int32{fn: mul_9_int32, fnname: "mul_9_int32", in: 41, want: 369},
+ test_int32{fn: mul_int32_9, fnname: "mul_int32_9", in: 41, want: 369},
+ test_int32{fn: mul_9_int32, fnname: "mul_9_int32", in: 45, want: 405},
+ test_int32{fn: mul_int32_9, fnname: "mul_int32_9", in: 45, want: 405},
+ test_int32{fn: mul_9_int32, fnname: "mul_9_int32", in: 73, want: 657},
+ test_int32{fn: mul_int32_9, fnname: "mul_int32_9", in: 73, want: 657},
+ test_int32{fn: mul_9_int32, fnname: "mul_9_int32", in: 81, want: 729},
+ test_int32{fn: mul_int32_9, fnname: "mul_int32_9", in: 81, want: 729},
+ test_int32{fn: mul_10_int32, fnname: "mul_10_int32", in: -9, want: -90},
+ test_int32{fn: mul_int32_10, fnname: "mul_int32_10", in: -9, want: -90},
+ test_int32{fn: mul_10_int32, fnname: "mul_10_int32", in: -5, want: -50},
+ test_int32{fn: mul_int32_10, fnname: "mul_int32_10", in: -5, want: -50},
+ test_int32{fn: mul_10_int32, fnname: "mul_10_int32", in: -3, want: -30},
+ test_int32{fn: mul_int32_10, fnname: "mul_int32_10", in: -3, want: -30},
+ test_int32{fn: mul_10_int32, fnname: "mul_10_int32", in: 3, want: 30},
+ test_int32{fn: mul_int32_10, fnname: "mul_int32_10", in: 3, want: 30},
+ test_int32{fn: mul_10_int32, fnname: "mul_10_int32", in: 5, want: 50},
+ test_int32{fn: mul_int32_10, fnname: "mul_int32_10", in: 5, want: 50},
+ test_int32{fn: mul_10_int32, fnname: "mul_10_int32", in: 7, want: 70},
+ test_int32{fn: mul_int32_10, fnname: "mul_int32_10", in: 7, want: 70},
+ test_int32{fn: mul_10_int32, fnname: "mul_10_int32", in: 9, want: 90},
+ test_int32{fn: mul_int32_10, fnname: "mul_int32_10", in: 9, want: 90},
+ test_int32{fn: mul_10_int32, fnname: "mul_10_int32", in: 10, want: 100},
+ test_int32{fn: mul_int32_10, fnname: "mul_int32_10", in: 10, want: 100},
+ test_int32{fn: mul_10_int32, fnname: "mul_10_int32", in: 11, want: 110},
+ test_int32{fn: mul_int32_10, fnname: "mul_int32_10", in: 11, want: 110},
+ test_int32{fn: mul_10_int32, fnname: "mul_10_int32", in: 13, want: 130},
+ test_int32{fn: mul_int32_10, fnname: "mul_int32_10", in: 13, want: 130},
+ test_int32{fn: mul_10_int32, fnname: "mul_10_int32", in: 19, want: 190},
+ test_int32{fn: mul_int32_10, fnname: "mul_int32_10", in: 19, want: 190},
+ test_int32{fn: mul_10_int32, fnname: "mul_10_int32", in: 21, want: 210},
+ test_int32{fn: mul_int32_10, fnname: "mul_int32_10", in: 21, want: 210},
+ test_int32{fn: mul_10_int32, fnname: "mul_10_int32", in: 25, want: 250},
+ test_int32{fn: mul_int32_10, fnname: "mul_int32_10", in: 25, want: 250},
+ test_int32{fn: mul_10_int32, fnname: "mul_10_int32", in: 27, want: 270},
+ test_int32{fn: mul_int32_10, fnname: "mul_int32_10", in: 27, want: 270},
+ test_int32{fn: mul_10_int32, fnname: "mul_10_int32", in: 37, want: 370},
+ test_int32{fn: mul_int32_10, fnname: "mul_int32_10", in: 37, want: 370},
+ test_int32{fn: mul_10_int32, fnname: "mul_10_int32", in: 41, want: 410},
+ test_int32{fn: mul_int32_10, fnname: "mul_int32_10", in: 41, want: 410},
+ test_int32{fn: mul_10_int32, fnname: "mul_10_int32", in: 45, want: 450},
+ test_int32{fn: mul_int32_10, fnname: "mul_int32_10", in: 45, want: 450},
+ test_int32{fn: mul_10_int32, fnname: "mul_10_int32", in: 73, want: 730},
+ test_int32{fn: mul_int32_10, fnname: "mul_int32_10", in: 73, want: 730},
+ test_int32{fn: mul_10_int32, fnname: "mul_10_int32", in: 81, want: 810},
+ test_int32{fn: mul_int32_10, fnname: "mul_int32_10", in: 81, want: 810},
+ test_int32{fn: mul_11_int32, fnname: "mul_11_int32", in: -9, want: -99},
+ test_int32{fn: mul_int32_11, fnname: "mul_int32_11", in: -9, want: -99},
+ test_int32{fn: mul_11_int32, fnname: "mul_11_int32", in: -5, want: -55},
+ test_int32{fn: mul_int32_11, fnname: "mul_int32_11", in: -5, want: -55},
+ test_int32{fn: mul_11_int32, fnname: "mul_11_int32", in: -3, want: -33},
+ test_int32{fn: mul_int32_11, fnname: "mul_int32_11", in: -3, want: -33},
+ test_int32{fn: mul_11_int32, fnname: "mul_11_int32", in: 3, want: 33},
+ test_int32{fn: mul_int32_11, fnname: "mul_int32_11", in: 3, want: 33},
+ test_int32{fn: mul_11_int32, fnname: "mul_11_int32", in: 5, want: 55},
+ test_int32{fn: mul_int32_11, fnname: "mul_int32_11", in: 5, want: 55},
+ test_int32{fn: mul_11_int32, fnname: "mul_11_int32", in: 7, want: 77},
+ test_int32{fn: mul_int32_11, fnname: "mul_int32_11", in: 7, want: 77},
+ test_int32{fn: mul_11_int32, fnname: "mul_11_int32", in: 9, want: 99},
+ test_int32{fn: mul_int32_11, fnname: "mul_int32_11", in: 9, want: 99},
+ test_int32{fn: mul_11_int32, fnname: "mul_11_int32", in: 10, want: 110},
+ test_int32{fn: mul_int32_11, fnname: "mul_int32_11", in: 10, want: 110},
+ test_int32{fn: mul_11_int32, fnname: "mul_11_int32", in: 11, want: 121},
+ test_int32{fn: mul_int32_11, fnname: "mul_int32_11", in: 11, want: 121},
+ test_int32{fn: mul_11_int32, fnname: "mul_11_int32", in: 13, want: 143},
+ test_int32{fn: mul_int32_11, fnname: "mul_int32_11", in: 13, want: 143},
+ test_int32{fn: mul_11_int32, fnname: "mul_11_int32", in: 19, want: 209},
+ test_int32{fn: mul_int32_11, fnname: "mul_int32_11", in: 19, want: 209},
+ test_int32{fn: mul_11_int32, fnname: "mul_11_int32", in: 21, want: 231},
+ test_int32{fn: mul_int32_11, fnname: "mul_int32_11", in: 21, want: 231},
+ test_int32{fn: mul_11_int32, fnname: "mul_11_int32", in: 25, want: 275},
+ test_int32{fn: mul_int32_11, fnname: "mul_int32_11", in: 25, want: 275},
+ test_int32{fn: mul_11_int32, fnname: "mul_11_int32", in: 27, want: 297},
+ test_int32{fn: mul_int32_11, fnname: "mul_int32_11", in: 27, want: 297},
+ test_int32{fn: mul_11_int32, fnname: "mul_11_int32", in: 37, want: 407},
+ test_int32{fn: mul_int32_11, fnname: "mul_int32_11", in: 37, want: 407},
+ test_int32{fn: mul_11_int32, fnname: "mul_11_int32", in: 41, want: 451},
+ test_int32{fn: mul_int32_11, fnname: "mul_int32_11", in: 41, want: 451},
+ test_int32{fn: mul_11_int32, fnname: "mul_11_int32", in: 45, want: 495},
+ test_int32{fn: mul_int32_11, fnname: "mul_int32_11", in: 45, want: 495},
+ test_int32{fn: mul_11_int32, fnname: "mul_11_int32", in: 73, want: 803},
+ test_int32{fn: mul_int32_11, fnname: "mul_int32_11", in: 73, want: 803},
+ test_int32{fn: mul_11_int32, fnname: "mul_11_int32", in: 81, want: 891},
+ test_int32{fn: mul_int32_11, fnname: "mul_int32_11", in: 81, want: 891},
+ test_int32{fn: mul_13_int32, fnname: "mul_13_int32", in: -9, want: -117},
+ test_int32{fn: mul_int32_13, fnname: "mul_int32_13", in: -9, want: -117},
+ test_int32{fn: mul_13_int32, fnname: "mul_13_int32", in: -5, want: -65},
+ test_int32{fn: mul_int32_13, fnname: "mul_int32_13", in: -5, want: -65},
+ test_int32{fn: mul_13_int32, fnname: "mul_13_int32", in: -3, want: -39},
+ test_int32{fn: mul_int32_13, fnname: "mul_int32_13", in: -3, want: -39},
+ test_int32{fn: mul_13_int32, fnname: "mul_13_int32", in: 3, want: 39},
+ test_int32{fn: mul_int32_13, fnname: "mul_int32_13", in: 3, want: 39},
+ test_int32{fn: mul_13_int32, fnname: "mul_13_int32", in: 5, want: 65},
+ test_int32{fn: mul_int32_13, fnname: "mul_int32_13", in: 5, want: 65},
+ test_int32{fn: mul_13_int32, fnname: "mul_13_int32", in: 7, want: 91},
+ test_int32{fn: mul_int32_13, fnname: "mul_int32_13", in: 7, want: 91},
+ test_int32{fn: mul_13_int32, fnname: "mul_13_int32", in: 9, want: 117},
+ test_int32{fn: mul_int32_13, fnname: "mul_int32_13", in: 9, want: 117},
+ test_int32{fn: mul_13_int32, fnname: "mul_13_int32", in: 10, want: 130},
+ test_int32{fn: mul_int32_13, fnname: "mul_int32_13", in: 10, want: 130},
+ test_int32{fn: mul_13_int32, fnname: "mul_13_int32", in: 11, want: 143},
+ test_int32{fn: mul_int32_13, fnname: "mul_int32_13", in: 11, want: 143},
+ test_int32{fn: mul_13_int32, fnname: "mul_13_int32", in: 13, want: 169},
+ test_int32{fn: mul_int32_13, fnname: "mul_int32_13", in: 13, want: 169},
+ test_int32{fn: mul_13_int32, fnname: "mul_13_int32", in: 19, want: 247},
+ test_int32{fn: mul_int32_13, fnname: "mul_int32_13", in: 19, want: 247},
+ test_int32{fn: mul_13_int32, fnname: "mul_13_int32", in: 21, want: 273},
+ test_int32{fn: mul_int32_13, fnname: "mul_int32_13", in: 21, want: 273},
+ test_int32{fn: mul_13_int32, fnname: "mul_13_int32", in: 25, want: 325},
+ test_int32{fn: mul_int32_13, fnname: "mul_int32_13", in: 25, want: 325},
+ test_int32{fn: mul_13_int32, fnname: "mul_13_int32", in: 27, want: 351},
+ test_int32{fn: mul_int32_13, fnname: "mul_int32_13", in: 27, want: 351},
+ test_int32{fn: mul_13_int32, fnname: "mul_13_int32", in: 37, want: 481},
+ test_int32{fn: mul_int32_13, fnname: "mul_int32_13", in: 37, want: 481},
+ test_int32{fn: mul_13_int32, fnname: "mul_13_int32", in: 41, want: 533},
+ test_int32{fn: mul_int32_13, fnname: "mul_int32_13", in: 41, want: 533},
+ test_int32{fn: mul_13_int32, fnname: "mul_13_int32", in: 45, want: 585},
+ test_int32{fn: mul_int32_13, fnname: "mul_int32_13", in: 45, want: 585},
+ test_int32{fn: mul_13_int32, fnname: "mul_13_int32", in: 73, want: 949},
+ test_int32{fn: mul_int32_13, fnname: "mul_int32_13", in: 73, want: 949},
+ test_int32{fn: mul_13_int32, fnname: "mul_13_int32", in: 81, want: 1053},
+ test_int32{fn: mul_int32_13, fnname: "mul_int32_13", in: 81, want: 1053},
+ test_int32{fn: mul_19_int32, fnname: "mul_19_int32", in: -9, want: -171},
+ test_int32{fn: mul_int32_19, fnname: "mul_int32_19", in: -9, want: -171},
+ test_int32{fn: mul_19_int32, fnname: "mul_19_int32", in: -5, want: -95},
+ test_int32{fn: mul_int32_19, fnname: "mul_int32_19", in: -5, want: -95},
+ test_int32{fn: mul_19_int32, fnname: "mul_19_int32", in: -3, want: -57},
+ test_int32{fn: mul_int32_19, fnname: "mul_int32_19", in: -3, want: -57},
+ test_int32{fn: mul_19_int32, fnname: "mul_19_int32", in: 3, want: 57},
+ test_int32{fn: mul_int32_19, fnname: "mul_int32_19", in: 3, want: 57},
+ test_int32{fn: mul_19_int32, fnname: "mul_19_int32", in: 5, want: 95},
+ test_int32{fn: mul_int32_19, fnname: "mul_int32_19", in: 5, want: 95},
+ test_int32{fn: mul_19_int32, fnname: "mul_19_int32", in: 7, want: 133},
+ test_int32{fn: mul_int32_19, fnname: "mul_int32_19", in: 7, want: 133},
+ test_int32{fn: mul_19_int32, fnname: "mul_19_int32", in: 9, want: 171},
+ test_int32{fn: mul_int32_19, fnname: "mul_int32_19", in: 9, want: 171},
+ test_int32{fn: mul_19_int32, fnname: "mul_19_int32", in: 10, want: 190},
+ test_int32{fn: mul_int32_19, fnname: "mul_int32_19", in: 10, want: 190},
+ test_int32{fn: mul_19_int32, fnname: "mul_19_int32", in: 11, want: 209},
+ test_int32{fn: mul_int32_19, fnname: "mul_int32_19", in: 11, want: 209},
+ test_int32{fn: mul_19_int32, fnname: "mul_19_int32", in: 13, want: 247},
+ test_int32{fn: mul_int32_19, fnname: "mul_int32_19", in: 13, want: 247},
+ test_int32{fn: mul_19_int32, fnname: "mul_19_int32", in: 19, want: 361},
+ test_int32{fn: mul_int32_19, fnname: "mul_int32_19", in: 19, want: 361},
+ test_int32{fn: mul_19_int32, fnname: "mul_19_int32", in: 21, want: 399},
+ test_int32{fn: mul_int32_19, fnname: "mul_int32_19", in: 21, want: 399},
+ test_int32{fn: mul_19_int32, fnname: "mul_19_int32", in: 25, want: 475},
+ test_int32{fn: mul_int32_19, fnname: "mul_int32_19", in: 25, want: 475},
+ test_int32{fn: mul_19_int32, fnname: "mul_19_int32", in: 27, want: 513},
+ test_int32{fn: mul_int32_19, fnname: "mul_int32_19", in: 27, want: 513},
+ test_int32{fn: mul_19_int32, fnname: "mul_19_int32", in: 37, want: 703},
+ test_int32{fn: mul_int32_19, fnname: "mul_int32_19", in: 37, want: 703},
+ test_int32{fn: mul_19_int32, fnname: "mul_19_int32", in: 41, want: 779},
+ test_int32{fn: mul_int32_19, fnname: "mul_int32_19", in: 41, want: 779},
+ test_int32{fn: mul_19_int32, fnname: "mul_19_int32", in: 45, want: 855},
+ test_int32{fn: mul_int32_19, fnname: "mul_int32_19", in: 45, want: 855},
+ test_int32{fn: mul_19_int32, fnname: "mul_19_int32", in: 73, want: 1387},
+ test_int32{fn: mul_int32_19, fnname: "mul_int32_19", in: 73, want: 1387},
+ test_int32{fn: mul_19_int32, fnname: "mul_19_int32", in: 81, want: 1539},
+ test_int32{fn: mul_int32_19, fnname: "mul_int32_19", in: 81, want: 1539},
+ test_int32{fn: mul_21_int32, fnname: "mul_21_int32", in: -9, want: -189},
+ test_int32{fn: mul_int32_21, fnname: "mul_int32_21", in: -9, want: -189},
+ test_int32{fn: mul_21_int32, fnname: "mul_21_int32", in: -5, want: -105},
+ test_int32{fn: mul_int32_21, fnname: "mul_int32_21", in: -5, want: -105},
+ test_int32{fn: mul_21_int32, fnname: "mul_21_int32", in: -3, want: -63},
+ test_int32{fn: mul_int32_21, fnname: "mul_int32_21", in: -3, want: -63},
+ test_int32{fn: mul_21_int32, fnname: "mul_21_int32", in: 3, want: 63},
+ test_int32{fn: mul_int32_21, fnname: "mul_int32_21", in: 3, want: 63},
+ test_int32{fn: mul_21_int32, fnname: "mul_21_int32", in: 5, want: 105},
+ test_int32{fn: mul_int32_21, fnname: "mul_int32_21", in: 5, want: 105},
+ test_int32{fn: mul_21_int32, fnname: "mul_21_int32", in: 7, want: 147},
+ test_int32{fn: mul_int32_21, fnname: "mul_int32_21", in: 7, want: 147},
+ test_int32{fn: mul_21_int32, fnname: "mul_21_int32", in: 9, want: 189},
+ test_int32{fn: mul_int32_21, fnname: "mul_int32_21", in: 9, want: 189},
+ test_int32{fn: mul_21_int32, fnname: "mul_21_int32", in: 10, want: 210},
+ test_int32{fn: mul_int32_21, fnname: "mul_int32_21", in: 10, want: 210},
+ test_int32{fn: mul_21_int32, fnname: "mul_21_int32", in: 11, want: 231},
+ test_int32{fn: mul_int32_21, fnname: "mul_int32_21", in: 11, want: 231},
+ test_int32{fn: mul_21_int32, fnname: "mul_21_int32", in: 13, want: 273},
+ test_int32{fn: mul_int32_21, fnname: "mul_int32_21", in: 13, want: 273},
+ test_int32{fn: mul_21_int32, fnname: "mul_21_int32", in: 19, want: 399},
+ test_int32{fn: mul_int32_21, fnname: "mul_int32_21", in: 19, want: 399},
+ test_int32{fn: mul_21_int32, fnname: "mul_21_int32", in: 21, want: 441},
+ test_int32{fn: mul_int32_21, fnname: "mul_int32_21", in: 21, want: 441},
+ test_int32{fn: mul_21_int32, fnname: "mul_21_int32", in: 25, want: 525},
+ test_int32{fn: mul_int32_21, fnname: "mul_int32_21", in: 25, want: 525},
+ test_int32{fn: mul_21_int32, fnname: "mul_21_int32", in: 27, want: 567},
+ test_int32{fn: mul_int32_21, fnname: "mul_int32_21", in: 27, want: 567},
+ test_int32{fn: mul_21_int32, fnname: "mul_21_int32", in: 37, want: 777},
+ test_int32{fn: mul_int32_21, fnname: "mul_int32_21", in: 37, want: 777},
+ test_int32{fn: mul_21_int32, fnname: "mul_21_int32", in: 41, want: 861},
+ test_int32{fn: mul_int32_21, fnname: "mul_int32_21", in: 41, want: 861},
+ test_int32{fn: mul_21_int32, fnname: "mul_21_int32", in: 45, want: 945},
+ test_int32{fn: mul_int32_21, fnname: "mul_int32_21", in: 45, want: 945},
+ test_int32{fn: mul_21_int32, fnname: "mul_21_int32", in: 73, want: 1533},
+ test_int32{fn: mul_int32_21, fnname: "mul_int32_21", in: 73, want: 1533},
+ test_int32{fn: mul_21_int32, fnname: "mul_21_int32", in: 81, want: 1701},
+ test_int32{fn: mul_int32_21, fnname: "mul_int32_21", in: 81, want: 1701},
+ test_int32{fn: mul_25_int32, fnname: "mul_25_int32", in: -9, want: -225},
+ test_int32{fn: mul_int32_25, fnname: "mul_int32_25", in: -9, want: -225},
+ test_int32{fn: mul_25_int32, fnname: "mul_25_int32", in: -5, want: -125},
+ test_int32{fn: mul_int32_25, fnname: "mul_int32_25", in: -5, want: -125},
+ test_int32{fn: mul_25_int32, fnname: "mul_25_int32", in: -3, want: -75},
+ test_int32{fn: mul_int32_25, fnname: "mul_int32_25", in: -3, want: -75},
+ test_int32{fn: mul_25_int32, fnname: "mul_25_int32", in: 3, want: 75},
+ test_int32{fn: mul_int32_25, fnname: "mul_int32_25", in: 3, want: 75},
+ test_int32{fn: mul_25_int32, fnname: "mul_25_int32", in: 5, want: 125},
+ test_int32{fn: mul_int32_25, fnname: "mul_int32_25", in: 5, want: 125},
+ test_int32{fn: mul_25_int32, fnname: "mul_25_int32", in: 7, want: 175},
+ test_int32{fn: mul_int32_25, fnname: "mul_int32_25", in: 7, want: 175},
+ test_int32{fn: mul_25_int32, fnname: "mul_25_int32", in: 9, want: 225},
+ test_int32{fn: mul_int32_25, fnname: "mul_int32_25", in: 9, want: 225},
+ test_int32{fn: mul_25_int32, fnname: "mul_25_int32", in: 10, want: 250},
+ test_int32{fn: mul_int32_25, fnname: "mul_int32_25", in: 10, want: 250},
+ test_int32{fn: mul_25_int32, fnname: "mul_25_int32", in: 11, want: 275},
+ test_int32{fn: mul_int32_25, fnname: "mul_int32_25", in: 11, want: 275},
+ test_int32{fn: mul_25_int32, fnname: "mul_25_int32", in: 13, want: 325},
+ test_int32{fn: mul_int32_25, fnname: "mul_int32_25", in: 13, want: 325},
+ test_int32{fn: mul_25_int32, fnname: "mul_25_int32", in: 19, want: 475},
+ test_int32{fn: mul_int32_25, fnname: "mul_int32_25", in: 19, want: 475},
+ test_int32{fn: mul_25_int32, fnname: "mul_25_int32", in: 21, want: 525},
+ test_int32{fn: mul_int32_25, fnname: "mul_int32_25", in: 21, want: 525},
+ test_int32{fn: mul_25_int32, fnname: "mul_25_int32", in: 25, want: 625},
+ test_int32{fn: mul_int32_25, fnname: "mul_int32_25", in: 25, want: 625},
+ test_int32{fn: mul_25_int32, fnname: "mul_25_int32", in: 27, want: 675},
+ test_int32{fn: mul_int32_25, fnname: "mul_int32_25", in: 27, want: 675},
+ test_int32{fn: mul_25_int32, fnname: "mul_25_int32", in: 37, want: 925},
+ test_int32{fn: mul_int32_25, fnname: "mul_int32_25", in: 37, want: 925},
+ test_int32{fn: mul_25_int32, fnname: "mul_25_int32", in: 41, want: 1025},
+ test_int32{fn: mul_int32_25, fnname: "mul_int32_25", in: 41, want: 1025},
+ test_int32{fn: mul_25_int32, fnname: "mul_25_int32", in: 45, want: 1125},
+ test_int32{fn: mul_int32_25, fnname: "mul_int32_25", in: 45, want: 1125},
+ test_int32{fn: mul_25_int32, fnname: "mul_25_int32", in: 73, want: 1825},
+ test_int32{fn: mul_int32_25, fnname: "mul_int32_25", in: 73, want: 1825},
+ test_int32{fn: mul_25_int32, fnname: "mul_25_int32", in: 81, want: 2025},
+ test_int32{fn: mul_int32_25, fnname: "mul_int32_25", in: 81, want: 2025},
+ test_int32{fn: mul_27_int32, fnname: "mul_27_int32", in: -9, want: -243},
+ test_int32{fn: mul_int32_27, fnname: "mul_int32_27", in: -9, want: -243},
+ test_int32{fn: mul_27_int32, fnname: "mul_27_int32", in: -5, want: -135},
+ test_int32{fn: mul_int32_27, fnname: "mul_int32_27", in: -5, want: -135},
+ test_int32{fn: mul_27_int32, fnname: "mul_27_int32", in: -3, want: -81},
+ test_int32{fn: mul_int32_27, fnname: "mul_int32_27", in: -3, want: -81},
+ test_int32{fn: mul_27_int32, fnname: "mul_27_int32", in: 3, want: 81},
+ test_int32{fn: mul_int32_27, fnname: "mul_int32_27", in: 3, want: 81},
+ test_int32{fn: mul_27_int32, fnname: "mul_27_int32", in: 5, want: 135},
+ test_int32{fn: mul_int32_27, fnname: "mul_int32_27", in: 5, want: 135},
+ test_int32{fn: mul_27_int32, fnname: "mul_27_int32", in: 7, want: 189},
+ test_int32{fn: mul_int32_27, fnname: "mul_int32_27", in: 7, want: 189},
+ test_int32{fn: mul_27_int32, fnname: "mul_27_int32", in: 9, want: 243},
+ test_int32{fn: mul_int32_27, fnname: "mul_int32_27", in: 9, want: 243},
+ test_int32{fn: mul_27_int32, fnname: "mul_27_int32", in: 10, want: 270},
+ test_int32{fn: mul_int32_27, fnname: "mul_int32_27", in: 10, want: 270},
+ test_int32{fn: mul_27_int32, fnname: "mul_27_int32", in: 11, want: 297},
+ test_int32{fn: mul_int32_27, fnname: "mul_int32_27", in: 11, want: 297},
+ test_int32{fn: mul_27_int32, fnname: "mul_27_int32", in: 13, want: 351},
+ test_int32{fn: mul_int32_27, fnname: "mul_int32_27", in: 13, want: 351},
+ test_int32{fn: mul_27_int32, fnname: "mul_27_int32", in: 19, want: 513},
+ test_int32{fn: mul_int32_27, fnname: "mul_int32_27", in: 19, want: 513},
+ test_int32{fn: mul_27_int32, fnname: "mul_27_int32", in: 21, want: 567},
+ test_int32{fn: mul_int32_27, fnname: "mul_int32_27", in: 21, want: 567},
+ test_int32{fn: mul_27_int32, fnname: "mul_27_int32", in: 25, want: 675},
+ test_int32{fn: mul_int32_27, fnname: "mul_int32_27", in: 25, want: 675},
+ test_int32{fn: mul_27_int32, fnname: "mul_27_int32", in: 27, want: 729},
+ test_int32{fn: mul_int32_27, fnname: "mul_int32_27", in: 27, want: 729},
+ test_int32{fn: mul_27_int32, fnname: "mul_27_int32", in: 37, want: 999},
+ test_int32{fn: mul_int32_27, fnname: "mul_int32_27", in: 37, want: 999},
+ test_int32{fn: mul_27_int32, fnname: "mul_27_int32", in: 41, want: 1107},
+ test_int32{fn: mul_int32_27, fnname: "mul_int32_27", in: 41, want: 1107},
+ test_int32{fn: mul_27_int32, fnname: "mul_27_int32", in: 45, want: 1215},
+ test_int32{fn: mul_int32_27, fnname: "mul_int32_27", in: 45, want: 1215},
+ test_int32{fn: mul_27_int32, fnname: "mul_27_int32", in: 73, want: 1971},
+ test_int32{fn: mul_int32_27, fnname: "mul_int32_27", in: 73, want: 1971},
+ test_int32{fn: mul_27_int32, fnname: "mul_27_int32", in: 81, want: 2187},
+ test_int32{fn: mul_int32_27, fnname: "mul_int32_27", in: 81, want: 2187},
+ test_int32{fn: mul_37_int32, fnname: "mul_37_int32", in: -9, want: -333},
+ test_int32{fn: mul_int32_37, fnname: "mul_int32_37", in: -9, want: -333},
+ test_int32{fn: mul_37_int32, fnname: "mul_37_int32", in: -5, want: -185},
+ test_int32{fn: mul_int32_37, fnname: "mul_int32_37", in: -5, want: -185},
+ test_int32{fn: mul_37_int32, fnname: "mul_37_int32", in: -3, want: -111},
+ test_int32{fn: mul_int32_37, fnname: "mul_int32_37", in: -3, want: -111},
+ test_int32{fn: mul_37_int32, fnname: "mul_37_int32", in: 3, want: 111},
+ test_int32{fn: mul_int32_37, fnname: "mul_int32_37", in: 3, want: 111},
+ test_int32{fn: mul_37_int32, fnname: "mul_37_int32", in: 5, want: 185},
+ test_int32{fn: mul_int32_37, fnname: "mul_int32_37", in: 5, want: 185},
+ test_int32{fn: mul_37_int32, fnname: "mul_37_int32", in: 7, want: 259},
+ test_int32{fn: mul_int32_37, fnname: "mul_int32_37", in: 7, want: 259},
+ test_int32{fn: mul_37_int32, fnname: "mul_37_int32", in: 9, want: 333},
+ test_int32{fn: mul_int32_37, fnname: "mul_int32_37", in: 9, want: 333},
+ test_int32{fn: mul_37_int32, fnname: "mul_37_int32", in: 10, want: 370},
+ test_int32{fn: mul_int32_37, fnname: "mul_int32_37", in: 10, want: 370},
+ test_int32{fn: mul_37_int32, fnname: "mul_37_int32", in: 11, want: 407},
+ test_int32{fn: mul_int32_37, fnname: "mul_int32_37", in: 11, want: 407},
+ test_int32{fn: mul_37_int32, fnname: "mul_37_int32", in: 13, want: 481},
+ test_int32{fn: mul_int32_37, fnname: "mul_int32_37", in: 13, want: 481},
+ test_int32{fn: mul_37_int32, fnname: "mul_37_int32", in: 19, want: 703},
+ test_int32{fn: mul_int32_37, fnname: "mul_int32_37", in: 19, want: 703},
+ test_int32{fn: mul_37_int32, fnname: "mul_37_int32", in: 21, want: 777},
+ test_int32{fn: mul_int32_37, fnname: "mul_int32_37", in: 21, want: 777},
+ test_int32{fn: mul_37_int32, fnname: "mul_37_int32", in: 25, want: 925},
+ test_int32{fn: mul_int32_37, fnname: "mul_int32_37", in: 25, want: 925},
+ test_int32{fn: mul_37_int32, fnname: "mul_37_int32", in: 27, want: 999},
+ test_int32{fn: mul_int32_37, fnname: "mul_int32_37", in: 27, want: 999},
+ test_int32{fn: mul_37_int32, fnname: "mul_37_int32", in: 37, want: 1369},
+ test_int32{fn: mul_int32_37, fnname: "mul_int32_37", in: 37, want: 1369},
+ test_int32{fn: mul_37_int32, fnname: "mul_37_int32", in: 41, want: 1517},
+ test_int32{fn: mul_int32_37, fnname: "mul_int32_37", in: 41, want: 1517},
+ test_int32{fn: mul_37_int32, fnname: "mul_37_int32", in: 45, want: 1665},
+ test_int32{fn: mul_int32_37, fnname: "mul_int32_37", in: 45, want: 1665},
+ test_int32{fn: mul_37_int32, fnname: "mul_37_int32", in: 73, want: 2701},
+ test_int32{fn: mul_int32_37, fnname: "mul_int32_37", in: 73, want: 2701},
+ test_int32{fn: mul_37_int32, fnname: "mul_37_int32", in: 81, want: 2997},
+ test_int32{fn: mul_int32_37, fnname: "mul_int32_37", in: 81, want: 2997},
+ test_int32{fn: mul_41_int32, fnname: "mul_41_int32", in: -9, want: -369},
+ test_int32{fn: mul_int32_41, fnname: "mul_int32_41", in: -9, want: -369},
+ test_int32{fn: mul_41_int32, fnname: "mul_41_int32", in: -5, want: -205},
+ test_int32{fn: mul_int32_41, fnname: "mul_int32_41", in: -5, want: -205},
+ test_int32{fn: mul_41_int32, fnname: "mul_41_int32", in: -3, want: -123},
+ test_int32{fn: mul_int32_41, fnname: "mul_int32_41", in: -3, want: -123},
+ test_int32{fn: mul_41_int32, fnname: "mul_41_int32", in: 3, want: 123},
+ test_int32{fn: mul_int32_41, fnname: "mul_int32_41", in: 3, want: 123},
+ test_int32{fn: mul_41_int32, fnname: "mul_41_int32", in: 5, want: 205},
+ test_int32{fn: mul_int32_41, fnname: "mul_int32_41", in: 5, want: 205},
+ test_int32{fn: mul_41_int32, fnname: "mul_41_int32", in: 7, want: 287},
+ test_int32{fn: mul_int32_41, fnname: "mul_int32_41", in: 7, want: 287},
+ test_int32{fn: mul_41_int32, fnname: "mul_41_int32", in: 9, want: 369},
+ test_int32{fn: mul_int32_41, fnname: "mul_int32_41", in: 9, want: 369},
+ test_int32{fn: mul_41_int32, fnname: "mul_41_int32", in: 10, want: 410},
+ test_int32{fn: mul_int32_41, fnname: "mul_int32_41", in: 10, want: 410},
+ test_int32{fn: mul_41_int32, fnname: "mul_41_int32", in: 11, want: 451},
+ test_int32{fn: mul_int32_41, fnname: "mul_int32_41", in: 11, want: 451},
+ test_int32{fn: mul_41_int32, fnname: "mul_41_int32", in: 13, want: 533},
+ test_int32{fn: mul_int32_41, fnname: "mul_int32_41", in: 13, want: 533},
+ test_int32{fn: mul_41_int32, fnname: "mul_41_int32", in: 19, want: 779},
+ test_int32{fn: mul_int32_41, fnname: "mul_int32_41", in: 19, want: 779},
+ test_int32{fn: mul_41_int32, fnname: "mul_41_int32", in: 21, want: 861},
+ test_int32{fn: mul_int32_41, fnname: "mul_int32_41", in: 21, want: 861},
+ test_int32{fn: mul_41_int32, fnname: "mul_41_int32", in: 25, want: 1025},
+ test_int32{fn: mul_int32_41, fnname: "mul_int32_41", in: 25, want: 1025},
+ test_int32{fn: mul_41_int32, fnname: "mul_41_int32", in: 27, want: 1107},
+ test_int32{fn: mul_int32_41, fnname: "mul_int32_41", in: 27, want: 1107},
+ test_int32{fn: mul_41_int32, fnname: "mul_41_int32", in: 37, want: 1517},
+ test_int32{fn: mul_int32_41, fnname: "mul_int32_41", in: 37, want: 1517},
+ test_int32{fn: mul_41_int32, fnname: "mul_41_int32", in: 41, want: 1681},
+ test_int32{fn: mul_int32_41, fnname: "mul_int32_41", in: 41, want: 1681},
+ test_int32{fn: mul_41_int32, fnname: "mul_41_int32", in: 45, want: 1845},
+ test_int32{fn: mul_int32_41, fnname: "mul_int32_41", in: 45, want: 1845},
+ test_int32{fn: mul_41_int32, fnname: "mul_41_int32", in: 73, want: 2993},
+ test_int32{fn: mul_int32_41, fnname: "mul_int32_41", in: 73, want: 2993},
+ test_int32{fn: mul_41_int32, fnname: "mul_41_int32", in: 81, want: 3321},
+ test_int32{fn: mul_int32_41, fnname: "mul_int32_41", in: 81, want: 3321},
+ test_int32{fn: mul_45_int32, fnname: "mul_45_int32", in: -9, want: -405},
+ test_int32{fn: mul_int32_45, fnname: "mul_int32_45", in: -9, want: -405},
+ test_int32{fn: mul_45_int32, fnname: "mul_45_int32", in: -5, want: -225},
+ test_int32{fn: mul_int32_45, fnname: "mul_int32_45", in: -5, want: -225},
+ test_int32{fn: mul_45_int32, fnname: "mul_45_int32", in: -3, want: -135},
+ test_int32{fn: mul_int32_45, fnname: "mul_int32_45", in: -3, want: -135},
+ test_int32{fn: mul_45_int32, fnname: "mul_45_int32", in: 3, want: 135},
+ test_int32{fn: mul_int32_45, fnname: "mul_int32_45", in: 3, want: 135},
+ test_int32{fn: mul_45_int32, fnname: "mul_45_int32", in: 5, want: 225},
+ test_int32{fn: mul_int32_45, fnname: "mul_int32_45", in: 5, want: 225},
+ test_int32{fn: mul_45_int32, fnname: "mul_45_int32", in: 7, want: 315},
+ test_int32{fn: mul_int32_45, fnname: "mul_int32_45", in: 7, want: 315},
+ test_int32{fn: mul_45_int32, fnname: "mul_45_int32", in: 9, want: 405},
+ test_int32{fn: mul_int32_45, fnname: "mul_int32_45", in: 9, want: 405},
+ test_int32{fn: mul_45_int32, fnname: "mul_45_int32", in: 10, want: 450},
+ test_int32{fn: mul_int32_45, fnname: "mul_int32_45", in: 10, want: 450},
+ test_int32{fn: mul_45_int32, fnname: "mul_45_int32", in: 11, want: 495},
+ test_int32{fn: mul_int32_45, fnname: "mul_int32_45", in: 11, want: 495},
+ test_int32{fn: mul_45_int32, fnname: "mul_45_int32", in: 13, want: 585},
+ test_int32{fn: mul_int32_45, fnname: "mul_int32_45", in: 13, want: 585},
+ test_int32{fn: mul_45_int32, fnname: "mul_45_int32", in: 19, want: 855},
+ test_int32{fn: mul_int32_45, fnname: "mul_int32_45", in: 19, want: 855},
+ test_int32{fn: mul_45_int32, fnname: "mul_45_int32", in: 21, want: 945},
+ test_int32{fn: mul_int32_45, fnname: "mul_int32_45", in: 21, want: 945},
+ test_int32{fn: mul_45_int32, fnname: "mul_45_int32", in: 25, want: 1125},
+ test_int32{fn: mul_int32_45, fnname: "mul_int32_45", in: 25, want: 1125},
+ test_int32{fn: mul_45_int32, fnname: "mul_45_int32", in: 27, want: 1215},
+ test_int32{fn: mul_int32_45, fnname: "mul_int32_45", in: 27, want: 1215},
+ test_int32{fn: mul_45_int32, fnname: "mul_45_int32", in: 37, want: 1665},
+ test_int32{fn: mul_int32_45, fnname: "mul_int32_45", in: 37, want: 1665},
+ test_int32{fn: mul_45_int32, fnname: "mul_45_int32", in: 41, want: 1845},
+ test_int32{fn: mul_int32_45, fnname: "mul_int32_45", in: 41, want: 1845},
+ test_int32{fn: mul_45_int32, fnname: "mul_45_int32", in: 45, want: 2025},
+ test_int32{fn: mul_int32_45, fnname: "mul_int32_45", in: 45, want: 2025},
+ test_int32{fn: mul_45_int32, fnname: "mul_45_int32", in: 73, want: 3285},
+ test_int32{fn: mul_int32_45, fnname: "mul_int32_45", in: 73, want: 3285},
+ test_int32{fn: mul_45_int32, fnname: "mul_45_int32", in: 81, want: 3645},
+ test_int32{fn: mul_int32_45, fnname: "mul_int32_45", in: 81, want: 3645},
+ test_int32{fn: mul_73_int32, fnname: "mul_73_int32", in: -9, want: -657},
+ test_int32{fn: mul_int32_73, fnname: "mul_int32_73", in: -9, want: -657},
+ test_int32{fn: mul_73_int32, fnname: "mul_73_int32", in: -5, want: -365},
+ test_int32{fn: mul_int32_73, fnname: "mul_int32_73", in: -5, want: -365},
+ test_int32{fn: mul_73_int32, fnname: "mul_73_int32", in: -3, want: -219},
+ test_int32{fn: mul_int32_73, fnname: "mul_int32_73", in: -3, want: -219},
+ test_int32{fn: mul_73_int32, fnname: "mul_73_int32", in: 3, want: 219},
+ test_int32{fn: mul_int32_73, fnname: "mul_int32_73", in: 3, want: 219},
+ test_int32{fn: mul_73_int32, fnname: "mul_73_int32", in: 5, want: 365},
+ test_int32{fn: mul_int32_73, fnname: "mul_int32_73", in: 5, want: 365},
+ test_int32{fn: mul_73_int32, fnname: "mul_73_int32", in: 7, want: 511},
+ test_int32{fn: mul_int32_73, fnname: "mul_int32_73", in: 7, want: 511},
+ test_int32{fn: mul_73_int32, fnname: "mul_73_int32", in: 9, want: 657},
+ test_int32{fn: mul_int32_73, fnname: "mul_int32_73", in: 9, want: 657},
+ test_int32{fn: mul_73_int32, fnname: "mul_73_int32", in: 10, want: 730},
+ test_int32{fn: mul_int32_73, fnname: "mul_int32_73", in: 10, want: 730},
+ test_int32{fn: mul_73_int32, fnname: "mul_73_int32", in: 11, want: 803},
+ test_int32{fn: mul_int32_73, fnname: "mul_int32_73", in: 11, want: 803},
+ test_int32{fn: mul_73_int32, fnname: "mul_73_int32", in: 13, want: 949},
+ test_int32{fn: mul_int32_73, fnname: "mul_int32_73", in: 13, want: 949},
+ test_int32{fn: mul_73_int32, fnname: "mul_73_int32", in: 19, want: 1387},
+ test_int32{fn: mul_int32_73, fnname: "mul_int32_73", in: 19, want: 1387},
+ test_int32{fn: mul_73_int32, fnname: "mul_73_int32", in: 21, want: 1533},
+ test_int32{fn: mul_int32_73, fnname: "mul_int32_73", in: 21, want: 1533},
+ test_int32{fn: mul_73_int32, fnname: "mul_73_int32", in: 25, want: 1825},
+ test_int32{fn: mul_int32_73, fnname: "mul_int32_73", in: 25, want: 1825},
+ test_int32{fn: mul_73_int32, fnname: "mul_73_int32", in: 27, want: 1971},
+ test_int32{fn: mul_int32_73, fnname: "mul_int32_73", in: 27, want: 1971},
+ test_int32{fn: mul_73_int32, fnname: "mul_73_int32", in: 37, want: 2701},
+ test_int32{fn: mul_int32_73, fnname: "mul_int32_73", in: 37, want: 2701},
+ test_int32{fn: mul_73_int32, fnname: "mul_73_int32", in: 41, want: 2993},
+ test_int32{fn: mul_int32_73, fnname: "mul_int32_73", in: 41, want: 2993},
+ test_int32{fn: mul_73_int32, fnname: "mul_73_int32", in: 45, want: 3285},
+ test_int32{fn: mul_int32_73, fnname: "mul_int32_73", in: 45, want: 3285},
+ test_int32{fn: mul_73_int32, fnname: "mul_73_int32", in: 73, want: 5329},
+ test_int32{fn: mul_int32_73, fnname: "mul_int32_73", in: 73, want: 5329},
+ test_int32{fn: mul_73_int32, fnname: "mul_73_int32", in: 81, want: 5913},
+ test_int32{fn: mul_int32_73, fnname: "mul_int32_73", in: 81, want: 5913},
+ test_int32{fn: mul_81_int32, fnname: "mul_81_int32", in: -9, want: -729},
+ test_int32{fn: mul_int32_81, fnname: "mul_int32_81", in: -9, want: -729},
+ test_int32{fn: mul_81_int32, fnname: "mul_81_int32", in: -5, want: -405},
+ test_int32{fn: mul_int32_81, fnname: "mul_int32_81", in: -5, want: -405},
+ test_int32{fn: mul_81_int32, fnname: "mul_81_int32", in: -3, want: -243},
+ test_int32{fn: mul_int32_81, fnname: "mul_int32_81", in: -3, want: -243},
+ test_int32{fn: mul_81_int32, fnname: "mul_81_int32", in: 3, want: 243},
+ test_int32{fn: mul_int32_81, fnname: "mul_int32_81", in: 3, want: 243},
+ test_int32{fn: mul_81_int32, fnname: "mul_81_int32", in: 5, want: 405},
+ test_int32{fn: mul_int32_81, fnname: "mul_int32_81", in: 5, want: 405},
+ test_int32{fn: mul_81_int32, fnname: "mul_81_int32", in: 7, want: 567},
+ test_int32{fn: mul_int32_81, fnname: "mul_int32_81", in: 7, want: 567},
+ test_int32{fn: mul_81_int32, fnname: "mul_81_int32", in: 9, want: 729},
+ test_int32{fn: mul_int32_81, fnname: "mul_int32_81", in: 9, want: 729},
+ test_int32{fn: mul_81_int32, fnname: "mul_81_int32", in: 10, want: 810},
+ test_int32{fn: mul_int32_81, fnname: "mul_int32_81", in: 10, want: 810},
+ test_int32{fn: mul_81_int32, fnname: "mul_81_int32", in: 11, want: 891},
+ test_int32{fn: mul_int32_81, fnname: "mul_int32_81", in: 11, want: 891},
+ test_int32{fn: mul_81_int32, fnname: "mul_81_int32", in: 13, want: 1053},
+ test_int32{fn: mul_int32_81, fnname: "mul_int32_81", in: 13, want: 1053},
+ test_int32{fn: mul_81_int32, fnname: "mul_81_int32", in: 19, want: 1539},
+ test_int32{fn: mul_int32_81, fnname: "mul_int32_81", in: 19, want: 1539},
+ test_int32{fn: mul_81_int32, fnname: "mul_81_int32", in: 21, want: 1701},
+ test_int32{fn: mul_int32_81, fnname: "mul_int32_81", in: 21, want: 1701},
+ test_int32{fn: mul_81_int32, fnname: "mul_81_int32", in: 25, want: 2025},
+ test_int32{fn: mul_int32_81, fnname: "mul_int32_81", in: 25, want: 2025},
+ test_int32{fn: mul_81_int32, fnname: "mul_81_int32", in: 27, want: 2187},
+ test_int32{fn: mul_int32_81, fnname: "mul_int32_81", in: 27, want: 2187},
+ test_int32{fn: mul_81_int32, fnname: "mul_81_int32", in: 37, want: 2997},
+ test_int32{fn: mul_int32_81, fnname: "mul_int32_81", in: 37, want: 2997},
+ test_int32{fn: mul_81_int32, fnname: "mul_81_int32", in: 41, want: 3321},
+ test_int32{fn: mul_int32_81, fnname: "mul_int32_81", in: 41, want: 3321},
+ test_int32{fn: mul_81_int32, fnname: "mul_81_int32", in: 45, want: 3645},
+ test_int32{fn: mul_int32_81, fnname: "mul_int32_81", in: 45, want: 3645},
+ test_int32{fn: mul_81_int32, fnname: "mul_81_int32", in: 73, want: 5913},
+ test_int32{fn: mul_int32_81, fnname: "mul_int32_81", in: 73, want: 5913},
+ test_int32{fn: mul_81_int32, fnname: "mul_81_int32", in: 81, want: 6561},
+ test_int32{fn: mul_int32_81, fnname: "mul_int32_81", in: 81, want: 6561}}
+
+type test_uint16 struct {
+ fn func(uint16) uint16
+ fnname string
+ in uint16
+ want uint16
+}
+
+var tests_uint16 = []test_uint16{
+
+ test_uint16{fn: add_0_uint16, fnname: "add_0_uint16", in: 0, want: 0},
+ test_uint16{fn: add_uint16_0, fnname: "add_uint16_0", in: 0, want: 0},
+ test_uint16{fn: add_0_uint16, fnname: "add_0_uint16", in: 1, want: 1},
+ test_uint16{fn: add_uint16_0, fnname: "add_uint16_0", in: 1, want: 1},
+ test_uint16{fn: add_0_uint16, fnname: "add_0_uint16", in: 65535, want: 65535},
+ test_uint16{fn: add_uint16_0, fnname: "add_uint16_0", in: 65535, want: 65535},
+ test_uint16{fn: add_1_uint16, fnname: "add_1_uint16", in: 0, want: 1},
+ test_uint16{fn: add_uint16_1, fnname: "add_uint16_1", in: 0, want: 1},
+ test_uint16{fn: add_1_uint16, fnname: "add_1_uint16", in: 1, want: 2},
+ test_uint16{fn: add_uint16_1, fnname: "add_uint16_1", in: 1, want: 2},
+ test_uint16{fn: add_1_uint16, fnname: "add_1_uint16", in: 65535, want: 0},
+ test_uint16{fn: add_uint16_1, fnname: "add_uint16_1", in: 65535, want: 0},
+ test_uint16{fn: add_65535_uint16, fnname: "add_65535_uint16", in: 0, want: 65535},
+ test_uint16{fn: add_uint16_65535, fnname: "add_uint16_65535", in: 0, want: 65535},
+ test_uint16{fn: add_65535_uint16, fnname: "add_65535_uint16", in: 1, want: 0},
+ test_uint16{fn: add_uint16_65535, fnname: "add_uint16_65535", in: 1, want: 0},
+ test_uint16{fn: add_65535_uint16, fnname: "add_65535_uint16", in: 65535, want: 65534},
+ test_uint16{fn: add_uint16_65535, fnname: "add_uint16_65535", in: 65535, want: 65534},
+ test_uint16{fn: sub_0_uint16, fnname: "sub_0_uint16", in: 0, want: 0},
+ test_uint16{fn: sub_uint16_0, fnname: "sub_uint16_0", in: 0, want: 0},
+ test_uint16{fn: sub_0_uint16, fnname: "sub_0_uint16", in: 1, want: 65535},
+ test_uint16{fn: sub_uint16_0, fnname: "sub_uint16_0", in: 1, want: 1},
+ test_uint16{fn: sub_0_uint16, fnname: "sub_0_uint16", in: 65535, want: 1},
+ test_uint16{fn: sub_uint16_0, fnname: "sub_uint16_0", in: 65535, want: 65535},
+ test_uint16{fn: sub_1_uint16, fnname: "sub_1_uint16", in: 0, want: 1},
+ test_uint16{fn: sub_uint16_1, fnname: "sub_uint16_1", in: 0, want: 65535},
+ test_uint16{fn: sub_1_uint16, fnname: "sub_1_uint16", in: 1, want: 0},
+ test_uint16{fn: sub_uint16_1, fnname: "sub_uint16_1", in: 1, want: 0},
+ test_uint16{fn: sub_1_uint16, fnname: "sub_1_uint16", in: 65535, want: 2},
+ test_uint16{fn: sub_uint16_1, fnname: "sub_uint16_1", in: 65535, want: 65534},
+ test_uint16{fn: sub_65535_uint16, fnname: "sub_65535_uint16", in: 0, want: 65535},
+ test_uint16{fn: sub_uint16_65535, fnname: "sub_uint16_65535", in: 0, want: 1},
+ test_uint16{fn: sub_65535_uint16, fnname: "sub_65535_uint16", in: 1, want: 65534},
+ test_uint16{fn: sub_uint16_65535, fnname: "sub_uint16_65535", in: 1, want: 2},
+ test_uint16{fn: sub_65535_uint16, fnname: "sub_65535_uint16", in: 65535, want: 0},
+ test_uint16{fn: sub_uint16_65535, fnname: "sub_uint16_65535", in: 65535, want: 0},
+ test_uint16{fn: div_0_uint16, fnname: "div_0_uint16", in: 1, want: 0},
+ test_uint16{fn: div_0_uint16, fnname: "div_0_uint16", in: 65535, want: 0},
+ test_uint16{fn: div_uint16_1, fnname: "div_uint16_1", in: 0, want: 0},
+ test_uint16{fn: div_1_uint16, fnname: "div_1_uint16", in: 1, want: 1},
+ test_uint16{fn: div_uint16_1, fnname: "div_uint16_1", in: 1, want: 1},
+ test_uint16{fn: div_1_uint16, fnname: "div_1_uint16", in: 65535, want: 0},
+ test_uint16{fn: div_uint16_1, fnname: "div_uint16_1", in: 65535, want: 65535},
+ test_uint16{fn: div_uint16_65535, fnname: "div_uint16_65535", in: 0, want: 0},
+ test_uint16{fn: div_65535_uint16, fnname: "div_65535_uint16", in: 1, want: 65535},
+ test_uint16{fn: div_uint16_65535, fnname: "div_uint16_65535", in: 1, want: 0},
+ test_uint16{fn: div_65535_uint16, fnname: "div_65535_uint16", in: 65535, want: 1},
+ test_uint16{fn: div_uint16_65535, fnname: "div_uint16_65535", in: 65535, want: 1},
+ test_uint16{fn: mul_0_uint16, fnname: "mul_0_uint16", in: 0, want: 0},
+ test_uint16{fn: mul_uint16_0, fnname: "mul_uint16_0", in: 0, want: 0},
+ test_uint16{fn: mul_0_uint16, fnname: "mul_0_uint16", in: 1, want: 0},
+ test_uint16{fn: mul_uint16_0, fnname: "mul_uint16_0", in: 1, want: 0},
+ test_uint16{fn: mul_0_uint16, fnname: "mul_0_uint16", in: 65535, want: 0},
+ test_uint16{fn: mul_uint16_0, fnname: "mul_uint16_0", in: 65535, want: 0},
+ test_uint16{fn: mul_1_uint16, fnname: "mul_1_uint16", in: 0, want: 0},
+ test_uint16{fn: mul_uint16_1, fnname: "mul_uint16_1", in: 0, want: 0},
+ test_uint16{fn: mul_1_uint16, fnname: "mul_1_uint16", in: 1, want: 1},
+ test_uint16{fn: mul_uint16_1, fnname: "mul_uint16_1", in: 1, want: 1},
+ test_uint16{fn: mul_1_uint16, fnname: "mul_1_uint16", in: 65535, want: 65535},
+ test_uint16{fn: mul_uint16_1, fnname: "mul_uint16_1", in: 65535, want: 65535},
+ test_uint16{fn: mul_65535_uint16, fnname: "mul_65535_uint16", in: 0, want: 0},
+ test_uint16{fn: mul_uint16_65535, fnname: "mul_uint16_65535", in: 0, want: 0},
+ test_uint16{fn: mul_65535_uint16, fnname: "mul_65535_uint16", in: 1, want: 65535},
+ test_uint16{fn: mul_uint16_65535, fnname: "mul_uint16_65535", in: 1, want: 65535},
+ test_uint16{fn: mul_65535_uint16, fnname: "mul_65535_uint16", in: 65535, want: 1},
+ test_uint16{fn: mul_uint16_65535, fnname: "mul_uint16_65535", in: 65535, want: 1},
+ test_uint16{fn: lsh_0_uint16, fnname: "lsh_0_uint16", in: 0, want: 0},
+ test_uint16{fn: lsh_uint16_0, fnname: "lsh_uint16_0", in: 0, want: 0},
+ test_uint16{fn: lsh_0_uint16, fnname: "lsh_0_uint16", in: 1, want: 0},
+ test_uint16{fn: lsh_uint16_0, fnname: "lsh_uint16_0", in: 1, want: 1},
+ test_uint16{fn: lsh_0_uint16, fnname: "lsh_0_uint16", in: 65535, want: 0},
+ test_uint16{fn: lsh_uint16_0, fnname: "lsh_uint16_0", in: 65535, want: 65535},
+ test_uint16{fn: lsh_1_uint16, fnname: "lsh_1_uint16", in: 0, want: 1},
+ test_uint16{fn: lsh_uint16_1, fnname: "lsh_uint16_1", in: 0, want: 0},
+ test_uint16{fn: lsh_1_uint16, fnname: "lsh_1_uint16", in: 1, want: 2},
+ test_uint16{fn: lsh_uint16_1, fnname: "lsh_uint16_1", in: 1, want: 2},
+ test_uint16{fn: lsh_1_uint16, fnname: "lsh_1_uint16", in: 65535, want: 0},
+ test_uint16{fn: lsh_uint16_1, fnname: "lsh_uint16_1", in: 65535, want: 65534},
+ test_uint16{fn: lsh_65535_uint16, fnname: "lsh_65535_uint16", in: 0, want: 65535},
+ test_uint16{fn: lsh_uint16_65535, fnname: "lsh_uint16_65535", in: 0, want: 0},
+ test_uint16{fn: lsh_65535_uint16, fnname: "lsh_65535_uint16", in: 1, want: 65534},
+ test_uint16{fn: lsh_uint16_65535, fnname: "lsh_uint16_65535", in: 1, want: 0},
+ test_uint16{fn: lsh_65535_uint16, fnname: "lsh_65535_uint16", in: 65535, want: 0},
+ test_uint16{fn: lsh_uint16_65535, fnname: "lsh_uint16_65535", in: 65535, want: 0},
+ test_uint16{fn: rsh_0_uint16, fnname: "rsh_0_uint16", in: 0, want: 0},
+ test_uint16{fn: rsh_uint16_0, fnname: "rsh_uint16_0", in: 0, want: 0},
+ test_uint16{fn: rsh_0_uint16, fnname: "rsh_0_uint16", in: 1, want: 0},
+ test_uint16{fn: rsh_uint16_0, fnname: "rsh_uint16_0", in: 1, want: 1},
+ test_uint16{fn: rsh_0_uint16, fnname: "rsh_0_uint16", in: 65535, want: 0},
+ test_uint16{fn: rsh_uint16_0, fnname: "rsh_uint16_0", in: 65535, want: 65535},
+ test_uint16{fn: rsh_1_uint16, fnname: "rsh_1_uint16", in: 0, want: 1},
+ test_uint16{fn: rsh_uint16_1, fnname: "rsh_uint16_1", in: 0, want: 0},
+ test_uint16{fn: rsh_1_uint16, fnname: "rsh_1_uint16", in: 1, want: 0},
+ test_uint16{fn: rsh_uint16_1, fnname: "rsh_uint16_1", in: 1, want: 0},
+ test_uint16{fn: rsh_1_uint16, fnname: "rsh_1_uint16", in: 65535, want: 0},
+ test_uint16{fn: rsh_uint16_1, fnname: "rsh_uint16_1", in: 65535, want: 32767},
+ test_uint16{fn: rsh_65535_uint16, fnname: "rsh_65535_uint16", in: 0, want: 65535},
+ test_uint16{fn: rsh_uint16_65535, fnname: "rsh_uint16_65535", in: 0, want: 0},
+ test_uint16{fn: rsh_65535_uint16, fnname: "rsh_65535_uint16", in: 1, want: 32767},
+ test_uint16{fn: rsh_uint16_65535, fnname: "rsh_uint16_65535", in: 1, want: 0},
+ test_uint16{fn: rsh_65535_uint16, fnname: "rsh_65535_uint16", in: 65535, want: 0},
+ test_uint16{fn: rsh_uint16_65535, fnname: "rsh_uint16_65535", in: 65535, want: 0},
+ test_uint16{fn: mod_0_uint16, fnname: "mod_0_uint16", in: 1, want: 0},
+ test_uint16{fn: mod_0_uint16, fnname: "mod_0_uint16", in: 65535, want: 0},
+ test_uint16{fn: mod_uint16_1, fnname: "mod_uint16_1", in: 0, want: 0},
+ test_uint16{fn: mod_1_uint16, fnname: "mod_1_uint16", in: 1, want: 0},
+ test_uint16{fn: mod_uint16_1, fnname: "mod_uint16_1", in: 1, want: 0},
+ test_uint16{fn: mod_1_uint16, fnname: "mod_1_uint16", in: 65535, want: 1},
+ test_uint16{fn: mod_uint16_1, fnname: "mod_uint16_1", in: 65535, want: 0},
+ test_uint16{fn: mod_uint16_65535, fnname: "mod_uint16_65535", in: 0, want: 0},
+ test_uint16{fn: mod_65535_uint16, fnname: "mod_65535_uint16", in: 1, want: 0},
+ test_uint16{fn: mod_uint16_65535, fnname: "mod_uint16_65535", in: 1, want: 1},
+ test_uint16{fn: mod_65535_uint16, fnname: "mod_65535_uint16", in: 65535, want: 0},
+ test_uint16{fn: mod_uint16_65535, fnname: "mod_uint16_65535", in: 65535, want: 0},
+ test_uint16{fn: and_0_uint16, fnname: "and_0_uint16", in: 0, want: 0},
+ test_uint16{fn: and_uint16_0, fnname: "and_uint16_0", in: 0, want: 0},
+ test_uint16{fn: and_0_uint16, fnname: "and_0_uint16", in: 1, want: 0},
+ test_uint16{fn: and_uint16_0, fnname: "and_uint16_0", in: 1, want: 0},
+ test_uint16{fn: and_0_uint16, fnname: "and_0_uint16", in: 65535, want: 0},
+ test_uint16{fn: and_uint16_0, fnname: "and_uint16_0", in: 65535, want: 0},
+ test_uint16{fn: and_1_uint16, fnname: "and_1_uint16", in: 0, want: 0},
+ test_uint16{fn: and_uint16_1, fnname: "and_uint16_1", in: 0, want: 0},
+ test_uint16{fn: and_1_uint16, fnname: "and_1_uint16", in: 1, want: 1},
+ test_uint16{fn: and_uint16_1, fnname: "and_uint16_1", in: 1, want: 1},
+ test_uint16{fn: and_1_uint16, fnname: "and_1_uint16", in: 65535, want: 1},
+ test_uint16{fn: and_uint16_1, fnname: "and_uint16_1", in: 65535, want: 1},
+ test_uint16{fn: and_65535_uint16, fnname: "and_65535_uint16", in: 0, want: 0},
+ test_uint16{fn: and_uint16_65535, fnname: "and_uint16_65535", in: 0, want: 0},
+ test_uint16{fn: and_65535_uint16, fnname: "and_65535_uint16", in: 1, want: 1},
+ test_uint16{fn: and_uint16_65535, fnname: "and_uint16_65535", in: 1, want: 1},
+ test_uint16{fn: and_65535_uint16, fnname: "and_65535_uint16", in: 65535, want: 65535},
+ test_uint16{fn: and_uint16_65535, fnname: "and_uint16_65535", in: 65535, want: 65535},
+ test_uint16{fn: or_0_uint16, fnname: "or_0_uint16", in: 0, want: 0},
+ test_uint16{fn: or_uint16_0, fnname: "or_uint16_0", in: 0, want: 0},
+ test_uint16{fn: or_0_uint16, fnname: "or_0_uint16", in: 1, want: 1},
+ test_uint16{fn: or_uint16_0, fnname: "or_uint16_0", in: 1, want: 1},
+ test_uint16{fn: or_0_uint16, fnname: "or_0_uint16", in: 65535, want: 65535},
+ test_uint16{fn: or_uint16_0, fnname: "or_uint16_0", in: 65535, want: 65535},
+ test_uint16{fn: or_1_uint16, fnname: "or_1_uint16", in: 0, want: 1},
+ test_uint16{fn: or_uint16_1, fnname: "or_uint16_1", in: 0, want: 1},
+ test_uint16{fn: or_1_uint16, fnname: "or_1_uint16", in: 1, want: 1},
+ test_uint16{fn: or_uint16_1, fnname: "or_uint16_1", in: 1, want: 1},
+ test_uint16{fn: or_1_uint16, fnname: "or_1_uint16", in: 65535, want: 65535},
+ test_uint16{fn: or_uint16_1, fnname: "or_uint16_1", in: 65535, want: 65535},
+ test_uint16{fn: or_65535_uint16, fnname: "or_65535_uint16", in: 0, want: 65535},
+ test_uint16{fn: or_uint16_65535, fnname: "or_uint16_65535", in: 0, want: 65535},
+ test_uint16{fn: or_65535_uint16, fnname: "or_65535_uint16", in: 1, want: 65535},
+ test_uint16{fn: or_uint16_65535, fnname: "or_uint16_65535", in: 1, want: 65535},
+ test_uint16{fn: or_65535_uint16, fnname: "or_65535_uint16", in: 65535, want: 65535},
+ test_uint16{fn: or_uint16_65535, fnname: "or_uint16_65535", in: 65535, want: 65535},
+ test_uint16{fn: xor_0_uint16, fnname: "xor_0_uint16", in: 0, want: 0},
+ test_uint16{fn: xor_uint16_0, fnname: "xor_uint16_0", in: 0, want: 0},
+ test_uint16{fn: xor_0_uint16, fnname: "xor_0_uint16", in: 1, want: 1},
+ test_uint16{fn: xor_uint16_0, fnname: "xor_uint16_0", in: 1, want: 1},
+ test_uint16{fn: xor_0_uint16, fnname: "xor_0_uint16", in: 65535, want: 65535},
+ test_uint16{fn: xor_uint16_0, fnname: "xor_uint16_0", in: 65535, want: 65535},
+ test_uint16{fn: xor_1_uint16, fnname: "xor_1_uint16", in: 0, want: 1},
+ test_uint16{fn: xor_uint16_1, fnname: "xor_uint16_1", in: 0, want: 1},
+ test_uint16{fn: xor_1_uint16, fnname: "xor_1_uint16", in: 1, want: 0},
+ test_uint16{fn: xor_uint16_1, fnname: "xor_uint16_1", in: 1, want: 0},
+ test_uint16{fn: xor_1_uint16, fnname: "xor_1_uint16", in: 65535, want: 65534},
+ test_uint16{fn: xor_uint16_1, fnname: "xor_uint16_1", in: 65535, want: 65534},
+ test_uint16{fn: xor_65535_uint16, fnname: "xor_65535_uint16", in: 0, want: 65535},
+ test_uint16{fn: xor_uint16_65535, fnname: "xor_uint16_65535", in: 0, want: 65535},
+ test_uint16{fn: xor_65535_uint16, fnname: "xor_65535_uint16", in: 1, want: 65534},
+ test_uint16{fn: xor_uint16_65535, fnname: "xor_uint16_65535", in: 1, want: 65534},
+ test_uint16{fn: xor_65535_uint16, fnname: "xor_65535_uint16", in: 65535, want: 0},
+ test_uint16{fn: xor_uint16_65535, fnname: "xor_uint16_65535", in: 65535, want: 0}}
+
+type test_int16 struct {
+ fn func(int16) int16
+ fnname string
+ in int16
+ want int16
+}
+
+var tests_int16 = []test_int16{
+
+ test_int16{fn: add_Neg32768_int16, fnname: "add_Neg32768_int16", in: -32768, want: 0},
+ test_int16{fn: add_int16_Neg32768, fnname: "add_int16_Neg32768", in: -32768, want: 0},
+ test_int16{fn: add_Neg32768_int16, fnname: "add_Neg32768_int16", in: -32767, want: 1},
+ test_int16{fn: add_int16_Neg32768, fnname: "add_int16_Neg32768", in: -32767, want: 1},
+ test_int16{fn: add_Neg32768_int16, fnname: "add_Neg32768_int16", in: -1, want: 32767},
+ test_int16{fn: add_int16_Neg32768, fnname: "add_int16_Neg32768", in: -1, want: 32767},
+ test_int16{fn: add_Neg32768_int16, fnname: "add_Neg32768_int16", in: 0, want: -32768},
+ test_int16{fn: add_int16_Neg32768, fnname: "add_int16_Neg32768", in: 0, want: -32768},
+ test_int16{fn: add_Neg32768_int16, fnname: "add_Neg32768_int16", in: 1, want: -32767},
+ test_int16{fn: add_int16_Neg32768, fnname: "add_int16_Neg32768", in: 1, want: -32767},
+ test_int16{fn: add_Neg32768_int16, fnname: "add_Neg32768_int16", in: 32766, want: -2},
+ test_int16{fn: add_int16_Neg32768, fnname: "add_int16_Neg32768", in: 32766, want: -2},
+ test_int16{fn: add_Neg32768_int16, fnname: "add_Neg32768_int16", in: 32767, want: -1},
+ test_int16{fn: add_int16_Neg32768, fnname: "add_int16_Neg32768", in: 32767, want: -1},
+ test_int16{fn: add_Neg32767_int16, fnname: "add_Neg32767_int16", in: -32768, want: 1},
+ test_int16{fn: add_int16_Neg32767, fnname: "add_int16_Neg32767", in: -32768, want: 1},
+ test_int16{fn: add_Neg32767_int16, fnname: "add_Neg32767_int16", in: -32767, want: 2},
+ test_int16{fn: add_int16_Neg32767, fnname: "add_int16_Neg32767", in: -32767, want: 2},
+ test_int16{fn: add_Neg32767_int16, fnname: "add_Neg32767_int16", in: -1, want: -32768},
+ test_int16{fn: add_int16_Neg32767, fnname: "add_int16_Neg32767", in: -1, want: -32768},
+ test_int16{fn: add_Neg32767_int16, fnname: "add_Neg32767_int16", in: 0, want: -32767},
+ test_int16{fn: add_int16_Neg32767, fnname: "add_int16_Neg32767", in: 0, want: -32767},
+ test_int16{fn: add_Neg32767_int16, fnname: "add_Neg32767_int16", in: 1, want: -32766},
+ test_int16{fn: add_int16_Neg32767, fnname: "add_int16_Neg32767", in: 1, want: -32766},
+ test_int16{fn: add_Neg32767_int16, fnname: "add_Neg32767_int16", in: 32766, want: -1},
+ test_int16{fn: add_int16_Neg32767, fnname: "add_int16_Neg32767", in: 32766, want: -1},
+ test_int16{fn: add_Neg32767_int16, fnname: "add_Neg32767_int16", in: 32767, want: 0},
+ test_int16{fn: add_int16_Neg32767, fnname: "add_int16_Neg32767", in: 32767, want: 0},
+ test_int16{fn: add_Neg1_int16, fnname: "add_Neg1_int16", in: -32768, want: 32767},
+ test_int16{fn: add_int16_Neg1, fnname: "add_int16_Neg1", in: -32768, want: 32767},
+ test_int16{fn: add_Neg1_int16, fnname: "add_Neg1_int16", in: -32767, want: -32768},
+ test_int16{fn: add_int16_Neg1, fnname: "add_int16_Neg1", in: -32767, want: -32768},
+ test_int16{fn: add_Neg1_int16, fnname: "add_Neg1_int16", in: -1, want: -2},
+ test_int16{fn: add_int16_Neg1, fnname: "add_int16_Neg1", in: -1, want: -2},
+ test_int16{fn: add_Neg1_int16, fnname: "add_Neg1_int16", in: 0, want: -1},
+ test_int16{fn: add_int16_Neg1, fnname: "add_int16_Neg1", in: 0, want: -1},
+ test_int16{fn: add_Neg1_int16, fnname: "add_Neg1_int16", in: 1, want: 0},
+ test_int16{fn: add_int16_Neg1, fnname: "add_int16_Neg1", in: 1, want: 0},
+ test_int16{fn: add_Neg1_int16, fnname: "add_Neg1_int16", in: 32766, want: 32765},
+ test_int16{fn: add_int16_Neg1, fnname: "add_int16_Neg1", in: 32766, want: 32765},
+ test_int16{fn: add_Neg1_int16, fnname: "add_Neg1_int16", in: 32767, want: 32766},
+ test_int16{fn: add_int16_Neg1, fnname: "add_int16_Neg1", in: 32767, want: 32766},
+ test_int16{fn: add_0_int16, fnname: "add_0_int16", in: -32768, want: -32768},
+ test_int16{fn: add_int16_0, fnname: "add_int16_0", in: -32768, want: -32768},
+ test_int16{fn: add_0_int16, fnname: "add_0_int16", in: -32767, want: -32767},
+ test_int16{fn: add_int16_0, fnname: "add_int16_0", in: -32767, want: -32767},
+ test_int16{fn: add_0_int16, fnname: "add_0_int16", in: -1, want: -1},
+ test_int16{fn: add_int16_0, fnname: "add_int16_0", in: -1, want: -1},
+ test_int16{fn: add_0_int16, fnname: "add_0_int16", in: 0, want: 0},
+ test_int16{fn: add_int16_0, fnname: "add_int16_0", in: 0, want: 0},
+ test_int16{fn: add_0_int16, fnname: "add_0_int16", in: 1, want: 1},
+ test_int16{fn: add_int16_0, fnname: "add_int16_0", in: 1, want: 1},
+ test_int16{fn: add_0_int16, fnname: "add_0_int16", in: 32766, want: 32766},
+ test_int16{fn: add_int16_0, fnname: "add_int16_0", in: 32766, want: 32766},
+ test_int16{fn: add_0_int16, fnname: "add_0_int16", in: 32767, want: 32767},
+ test_int16{fn: add_int16_0, fnname: "add_int16_0", in: 32767, want: 32767},
+ test_int16{fn: add_1_int16, fnname: "add_1_int16", in: -32768, want: -32767},
+ test_int16{fn: add_int16_1, fnname: "add_int16_1", in: -32768, want: -32767},
+ test_int16{fn: add_1_int16, fnname: "add_1_int16", in: -32767, want: -32766},
+ test_int16{fn: add_int16_1, fnname: "add_int16_1", in: -32767, want: -32766},
+ test_int16{fn: add_1_int16, fnname: "add_1_int16", in: -1, want: 0},
+ test_int16{fn: add_int16_1, fnname: "add_int16_1", in: -1, want: 0},
+ test_int16{fn: add_1_int16, fnname: "add_1_int16", in: 0, want: 1},
+ test_int16{fn: add_int16_1, fnname: "add_int16_1", in: 0, want: 1},
+ test_int16{fn: add_1_int16, fnname: "add_1_int16", in: 1, want: 2},
+ test_int16{fn: add_int16_1, fnname: "add_int16_1", in: 1, want: 2},
+ test_int16{fn: add_1_int16, fnname: "add_1_int16", in: 32766, want: 32767},
+ test_int16{fn: add_int16_1, fnname: "add_int16_1", in: 32766, want: 32767},
+ test_int16{fn: add_1_int16, fnname: "add_1_int16", in: 32767, want: -32768},
+ test_int16{fn: add_int16_1, fnname: "add_int16_1", in: 32767, want: -32768},
+ test_int16{fn: add_32766_int16, fnname: "add_32766_int16", in: -32768, want: -2},
+ test_int16{fn: add_int16_32766, fnname: "add_int16_32766", in: -32768, want: -2},
+ test_int16{fn: add_32766_int16, fnname: "add_32766_int16", in: -32767, want: -1},
+ test_int16{fn: add_int16_32766, fnname: "add_int16_32766", in: -32767, want: -1},
+ test_int16{fn: add_32766_int16, fnname: "add_32766_int16", in: -1, want: 32765},
+ test_int16{fn: add_int16_32766, fnname: "add_int16_32766", in: -1, want: 32765},
+ test_int16{fn: add_32766_int16, fnname: "add_32766_int16", in: 0, want: 32766},
+ test_int16{fn: add_int16_32766, fnname: "add_int16_32766", in: 0, want: 32766},
+ test_int16{fn: add_32766_int16, fnname: "add_32766_int16", in: 1, want: 32767},
+ test_int16{fn: add_int16_32766, fnname: "add_int16_32766", in: 1, want: 32767},
+ test_int16{fn: add_32766_int16, fnname: "add_32766_int16", in: 32766, want: -4},
+ test_int16{fn: add_int16_32766, fnname: "add_int16_32766", in: 32766, want: -4},
+ test_int16{fn: add_32766_int16, fnname: "add_32766_int16", in: 32767, want: -3},
+ test_int16{fn: add_int16_32766, fnname: "add_int16_32766", in: 32767, want: -3},
+ test_int16{fn: add_32767_int16, fnname: "add_32767_int16", in: -32768, want: -1},
+ test_int16{fn: add_int16_32767, fnname: "add_int16_32767", in: -32768, want: -1},
+ test_int16{fn: add_32767_int16, fnname: "add_32767_int16", in: -32767, want: 0},
+ test_int16{fn: add_int16_32767, fnname: "add_int16_32767", in: -32767, want: 0},
+ test_int16{fn: add_32767_int16, fnname: "add_32767_int16", in: -1, want: 32766},
+ test_int16{fn: add_int16_32767, fnname: "add_int16_32767", in: -1, want: 32766},
+ test_int16{fn: add_32767_int16, fnname: "add_32767_int16", in: 0, want: 32767},
+ test_int16{fn: add_int16_32767, fnname: "add_int16_32767", in: 0, want: 32767},
+ test_int16{fn: add_32767_int16, fnname: "add_32767_int16", in: 1, want: -32768},
+ test_int16{fn: add_int16_32767, fnname: "add_int16_32767", in: 1, want: -32768},
+ test_int16{fn: add_32767_int16, fnname: "add_32767_int16", in: 32766, want: -3},
+ test_int16{fn: add_int16_32767, fnname: "add_int16_32767", in: 32766, want: -3},
+ test_int16{fn: add_32767_int16, fnname: "add_32767_int16", in: 32767, want: -2},
+ test_int16{fn: add_int16_32767, fnname: "add_int16_32767", in: 32767, want: -2},
+ test_int16{fn: sub_Neg32768_int16, fnname: "sub_Neg32768_int16", in: -32768, want: 0},
+ test_int16{fn: sub_int16_Neg32768, fnname: "sub_int16_Neg32768", in: -32768, want: 0},
+ test_int16{fn: sub_Neg32768_int16, fnname: "sub_Neg32768_int16", in: -32767, want: -1},
+ test_int16{fn: sub_int16_Neg32768, fnname: "sub_int16_Neg32768", in: -32767, want: 1},
+ test_int16{fn: sub_Neg32768_int16, fnname: "sub_Neg32768_int16", in: -1, want: -32767},
+ test_int16{fn: sub_int16_Neg32768, fnname: "sub_int16_Neg32768", in: -1, want: 32767},
+ test_int16{fn: sub_Neg32768_int16, fnname: "sub_Neg32768_int16", in: 0, want: -32768},
+ test_int16{fn: sub_int16_Neg32768, fnname: "sub_int16_Neg32768", in: 0, want: -32768},
+ test_int16{fn: sub_Neg32768_int16, fnname: "sub_Neg32768_int16", in: 1, want: 32767},
+ test_int16{fn: sub_int16_Neg32768, fnname: "sub_int16_Neg32768", in: 1, want: -32767},
+ test_int16{fn: sub_Neg32768_int16, fnname: "sub_Neg32768_int16", in: 32766, want: 2},
+ test_int16{fn: sub_int16_Neg32768, fnname: "sub_int16_Neg32768", in: 32766, want: -2},
+ test_int16{fn: sub_Neg32768_int16, fnname: "sub_Neg32768_int16", in: 32767, want: 1},
+ test_int16{fn: sub_int16_Neg32768, fnname: "sub_int16_Neg32768", in: 32767, want: -1},
+ test_int16{fn: sub_Neg32767_int16, fnname: "sub_Neg32767_int16", in: -32768, want: 1},
+ test_int16{fn: sub_int16_Neg32767, fnname: "sub_int16_Neg32767", in: -32768, want: -1},
+ test_int16{fn: sub_Neg32767_int16, fnname: "sub_Neg32767_int16", in: -32767, want: 0},
+ test_int16{fn: sub_int16_Neg32767, fnname: "sub_int16_Neg32767", in: -32767, want: 0},
+ test_int16{fn: sub_Neg32767_int16, fnname: "sub_Neg32767_int16", in: -1, want: -32766},
+ test_int16{fn: sub_int16_Neg32767, fnname: "sub_int16_Neg32767", in: -1, want: 32766},
+ test_int16{fn: sub_Neg32767_int16, fnname: "sub_Neg32767_int16", in: 0, want: -32767},
+ test_int16{fn: sub_int16_Neg32767, fnname: "sub_int16_Neg32767", in: 0, want: 32767},
+ test_int16{fn: sub_Neg32767_int16, fnname: "sub_Neg32767_int16", in: 1, want: -32768},
+ test_int16{fn: sub_int16_Neg32767, fnname: "sub_int16_Neg32767", in: 1, want: -32768},
+ test_int16{fn: sub_Neg32767_int16, fnname: "sub_Neg32767_int16", in: 32766, want: 3},
+ test_int16{fn: sub_int16_Neg32767, fnname: "sub_int16_Neg32767", in: 32766, want: -3},
+ test_int16{fn: sub_Neg32767_int16, fnname: "sub_Neg32767_int16", in: 32767, want: 2},
+ test_int16{fn: sub_int16_Neg32767, fnname: "sub_int16_Neg32767", in: 32767, want: -2},
+ test_int16{fn: sub_Neg1_int16, fnname: "sub_Neg1_int16", in: -32768, want: 32767},
+ test_int16{fn: sub_int16_Neg1, fnname: "sub_int16_Neg1", in: -32768, want: -32767},
+ test_int16{fn: sub_Neg1_int16, fnname: "sub_Neg1_int16", in: -32767, want: 32766},
+ test_int16{fn: sub_int16_Neg1, fnname: "sub_int16_Neg1", in: -32767, want: -32766},
+ test_int16{fn: sub_Neg1_int16, fnname: "sub_Neg1_int16", in: -1, want: 0},
+ test_int16{fn: sub_int16_Neg1, fnname: "sub_int16_Neg1", in: -1, want: 0},
+ test_int16{fn: sub_Neg1_int16, fnname: "sub_Neg1_int16", in: 0, want: -1},
+ test_int16{fn: sub_int16_Neg1, fnname: "sub_int16_Neg1", in: 0, want: 1},
+ test_int16{fn: sub_Neg1_int16, fnname: "sub_Neg1_int16", in: 1, want: -2},
+ test_int16{fn: sub_int16_Neg1, fnname: "sub_int16_Neg1", in: 1, want: 2},
+ test_int16{fn: sub_Neg1_int16, fnname: "sub_Neg1_int16", in: 32766, want: -32767},
+ test_int16{fn: sub_int16_Neg1, fnname: "sub_int16_Neg1", in: 32766, want: 32767},
+ test_int16{fn: sub_Neg1_int16, fnname: "sub_Neg1_int16", in: 32767, want: -32768},
+ test_int16{fn: sub_int16_Neg1, fnname: "sub_int16_Neg1", in: 32767, want: -32768},
+ test_int16{fn: sub_0_int16, fnname: "sub_0_int16", in: -32768, want: -32768},
+ test_int16{fn: sub_int16_0, fnname: "sub_int16_0", in: -32768, want: -32768},
+ test_int16{fn: sub_0_int16, fnname: "sub_0_int16", in: -32767, want: 32767},
+ test_int16{fn: sub_int16_0, fnname: "sub_int16_0", in: -32767, want: -32767},
+ test_int16{fn: sub_0_int16, fnname: "sub_0_int16", in: -1, want: 1},
+ test_int16{fn: sub_int16_0, fnname: "sub_int16_0", in: -1, want: -1},
+ test_int16{fn: sub_0_int16, fnname: "sub_0_int16", in: 0, want: 0},
+ test_int16{fn: sub_int16_0, fnname: "sub_int16_0", in: 0, want: 0},
+ test_int16{fn: sub_0_int16, fnname: "sub_0_int16", in: 1, want: -1},
+ test_int16{fn: sub_int16_0, fnname: "sub_int16_0", in: 1, want: 1},
+ test_int16{fn: sub_0_int16, fnname: "sub_0_int16", in: 32766, want: -32766},
+ test_int16{fn: sub_int16_0, fnname: "sub_int16_0", in: 32766, want: 32766},
+ test_int16{fn: sub_0_int16, fnname: "sub_0_int16", in: 32767, want: -32767},
+ test_int16{fn: sub_int16_0, fnname: "sub_int16_0", in: 32767, want: 32767},
+ test_int16{fn: sub_1_int16, fnname: "sub_1_int16", in: -32768, want: -32767},
+ test_int16{fn: sub_int16_1, fnname: "sub_int16_1", in: -32768, want: 32767},
+ test_int16{fn: sub_1_int16, fnname: "sub_1_int16", in: -32767, want: -32768},
+ test_int16{fn: sub_int16_1, fnname: "sub_int16_1", in: -32767, want: -32768},
+ test_int16{fn: sub_1_int16, fnname: "sub_1_int16", in: -1, want: 2},
+ test_int16{fn: sub_int16_1, fnname: "sub_int16_1", in: -1, want: -2},
+ test_int16{fn: sub_1_int16, fnname: "sub_1_int16", in: 0, want: 1},
+ test_int16{fn: sub_int16_1, fnname: "sub_int16_1", in: 0, want: -1},
+ test_int16{fn: sub_1_int16, fnname: "sub_1_int16", in: 1, want: 0},
+ test_int16{fn: sub_int16_1, fnname: "sub_int16_1", in: 1, want: 0},
+ test_int16{fn: sub_1_int16, fnname: "sub_1_int16", in: 32766, want: -32765},
+ test_int16{fn: sub_int16_1, fnname: "sub_int16_1", in: 32766, want: 32765},
+ test_int16{fn: sub_1_int16, fnname: "sub_1_int16", in: 32767, want: -32766},
+ test_int16{fn: sub_int16_1, fnname: "sub_int16_1", in: 32767, want: 32766},
+ test_int16{fn: sub_32766_int16, fnname: "sub_32766_int16", in: -32768, want: -2},
+ test_int16{fn: sub_int16_32766, fnname: "sub_int16_32766", in: -32768, want: 2},
+ test_int16{fn: sub_32766_int16, fnname: "sub_32766_int16", in: -32767, want: -3},
+ test_int16{fn: sub_int16_32766, fnname: "sub_int16_32766", in: -32767, want: 3},
+ test_int16{fn: sub_32766_int16, fnname: "sub_32766_int16", in: -1, want: 32767},
+ test_int16{fn: sub_int16_32766, fnname: "sub_int16_32766", in: -1, want: -32767},
+ test_int16{fn: sub_32766_int16, fnname: "sub_32766_int16", in: 0, want: 32766},
+ test_int16{fn: sub_int16_32766, fnname: "sub_int16_32766", in: 0, want: -32766},
+ test_int16{fn: sub_32766_int16, fnname: "sub_32766_int16", in: 1, want: 32765},
+ test_int16{fn: sub_int16_32766, fnname: "sub_int16_32766", in: 1, want: -32765},
+ test_int16{fn: sub_32766_int16, fnname: "sub_32766_int16", in: 32766, want: 0},
+ test_int16{fn: sub_int16_32766, fnname: "sub_int16_32766", in: 32766, want: 0},
+ test_int16{fn: sub_32766_int16, fnname: "sub_32766_int16", in: 32767, want: -1},
+ test_int16{fn: sub_int16_32766, fnname: "sub_int16_32766", in: 32767, want: 1},
+ test_int16{fn: sub_32767_int16, fnname: "sub_32767_int16", in: -32768, want: -1},
+ test_int16{fn: sub_int16_32767, fnname: "sub_int16_32767", in: -32768, want: 1},
+ test_int16{fn: sub_32767_int16, fnname: "sub_32767_int16", in: -32767, want: -2},
+ test_int16{fn: sub_int16_32767, fnname: "sub_int16_32767", in: -32767, want: 2},
+ test_int16{fn: sub_32767_int16, fnname: "sub_32767_int16", in: -1, want: -32768},
+ test_int16{fn: sub_int16_32767, fnname: "sub_int16_32767", in: -1, want: -32768},
+ test_int16{fn: sub_32767_int16, fnname: "sub_32767_int16", in: 0, want: 32767},
+ test_int16{fn: sub_int16_32767, fnname: "sub_int16_32767", in: 0, want: -32767},
+ test_int16{fn: sub_32767_int16, fnname: "sub_32767_int16", in: 1, want: 32766},
+ test_int16{fn: sub_int16_32767, fnname: "sub_int16_32767", in: 1, want: -32766},
+ test_int16{fn: sub_32767_int16, fnname: "sub_32767_int16", in: 32766, want: 1},
+ test_int16{fn: sub_int16_32767, fnname: "sub_int16_32767", in: 32766, want: -1},
+ test_int16{fn: sub_32767_int16, fnname: "sub_32767_int16", in: 32767, want: 0},
+ test_int16{fn: sub_int16_32767, fnname: "sub_int16_32767", in: 32767, want: 0},
+ test_int16{fn: div_Neg32768_int16, fnname: "div_Neg32768_int16", in: -32768, want: 1},
+ test_int16{fn: div_int16_Neg32768, fnname: "div_int16_Neg32768", in: -32768, want: 1},
+ test_int16{fn: div_Neg32768_int16, fnname: "div_Neg32768_int16", in: -32767, want: 1},
+ test_int16{fn: div_int16_Neg32768, fnname: "div_int16_Neg32768", in: -32767, want: 0},
+ test_int16{fn: div_Neg32768_int16, fnname: "div_Neg32768_int16", in: -1, want: -32768},
+ test_int16{fn: div_int16_Neg32768, fnname: "div_int16_Neg32768", in: -1, want: 0},
+ test_int16{fn: div_int16_Neg32768, fnname: "div_int16_Neg32768", in: 0, want: 0},
+ test_int16{fn: div_Neg32768_int16, fnname: "div_Neg32768_int16", in: 1, want: -32768},
+ test_int16{fn: div_int16_Neg32768, fnname: "div_int16_Neg32768", in: 1, want: 0},
+ test_int16{fn: div_Neg32768_int16, fnname: "div_Neg32768_int16", in: 32766, want: -1},
+ test_int16{fn: div_int16_Neg32768, fnname: "div_int16_Neg32768", in: 32766, want: 0},
+ test_int16{fn: div_Neg32768_int16, fnname: "div_Neg32768_int16", in: 32767, want: -1},
+ test_int16{fn: div_int16_Neg32768, fnname: "div_int16_Neg32768", in: 32767, want: 0},
+ test_int16{fn: div_Neg32767_int16, fnname: "div_Neg32767_int16", in: -32768, want: 0},
+ test_int16{fn: div_int16_Neg32767, fnname: "div_int16_Neg32767", in: -32768, want: 1},
+ test_int16{fn: div_Neg32767_int16, fnname: "div_Neg32767_int16", in: -32767, want: 1},
+ test_int16{fn: div_int16_Neg32767, fnname: "div_int16_Neg32767", in: -32767, want: 1},
+ test_int16{fn: div_Neg32767_int16, fnname: "div_Neg32767_int16", in: -1, want: 32767},
+ test_int16{fn: div_int16_Neg32767, fnname: "div_int16_Neg32767", in: -1, want: 0},
+ test_int16{fn: div_int16_Neg32767, fnname: "div_int16_Neg32767", in: 0, want: 0},
+ test_int16{fn: div_Neg32767_int16, fnname: "div_Neg32767_int16", in: 1, want: -32767},
+ test_int16{fn: div_int16_Neg32767, fnname: "div_int16_Neg32767", in: 1, want: 0},
+ test_int16{fn: div_Neg32767_int16, fnname: "div_Neg32767_int16", in: 32766, want: -1},
+ test_int16{fn: div_int16_Neg32767, fnname: "div_int16_Neg32767", in: 32766, want: 0},
+ test_int16{fn: div_Neg32767_int16, fnname: "div_Neg32767_int16", in: 32767, want: -1},
+ test_int16{fn: div_int16_Neg32767, fnname: "div_int16_Neg32767", in: 32767, want: -1},
+ test_int16{fn: div_Neg1_int16, fnname: "div_Neg1_int16", in: -32768, want: 0},
+ test_int16{fn: div_int16_Neg1, fnname: "div_int16_Neg1", in: -32768, want: -32768},
+ test_int16{fn: div_Neg1_int16, fnname: "div_Neg1_int16", in: -32767, want: 0},
+ test_int16{fn: div_int16_Neg1, fnname: "div_int16_Neg1", in: -32767, want: 32767},
+ test_int16{fn: div_Neg1_int16, fnname: "div_Neg1_int16", in: -1, want: 1},
+ test_int16{fn: div_int16_Neg1, fnname: "div_int16_Neg1", in: -1, want: 1},
+ test_int16{fn: div_int16_Neg1, fnname: "div_int16_Neg1", in: 0, want: 0},
+ test_int16{fn: div_Neg1_int16, fnname: "div_Neg1_int16", in: 1, want: -1},
+ test_int16{fn: div_int16_Neg1, fnname: "div_int16_Neg1", in: 1, want: -1},
+ test_int16{fn: div_Neg1_int16, fnname: "div_Neg1_int16", in: 32766, want: 0},
+ test_int16{fn: div_int16_Neg1, fnname: "div_int16_Neg1", in: 32766, want: -32766},
+ test_int16{fn: div_Neg1_int16, fnname: "div_Neg1_int16", in: 32767, want: 0},
+ test_int16{fn: div_int16_Neg1, fnname: "div_int16_Neg1", in: 32767, want: -32767},
+ test_int16{fn: div_0_int16, fnname: "div_0_int16", in: -32768, want: 0},
+ test_int16{fn: div_0_int16, fnname: "div_0_int16", in: -32767, want: 0},
+ test_int16{fn: div_0_int16, fnname: "div_0_int16", in: -1, want: 0},
+ test_int16{fn: div_0_int16, fnname: "div_0_int16", in: 1, want: 0},
+ test_int16{fn: div_0_int16, fnname: "div_0_int16", in: 32766, want: 0},
+ test_int16{fn: div_0_int16, fnname: "div_0_int16", in: 32767, want: 0},
+ test_int16{fn: div_1_int16, fnname: "div_1_int16", in: -32768, want: 0},
+ test_int16{fn: div_int16_1, fnname: "div_int16_1", in: -32768, want: -32768},
+ test_int16{fn: div_1_int16, fnname: "div_1_int16", in: -32767, want: 0},
+ test_int16{fn: div_int16_1, fnname: "div_int16_1", in: -32767, want: -32767},
+ test_int16{fn: div_1_int16, fnname: "div_1_int16", in: -1, want: -1},
+ test_int16{fn: div_int16_1, fnname: "div_int16_1", in: -1, want: -1},
+ test_int16{fn: div_int16_1, fnname: "div_int16_1", in: 0, want: 0},
+ test_int16{fn: div_1_int16, fnname: "div_1_int16", in: 1, want: 1},
+ test_int16{fn: div_int16_1, fnname: "div_int16_1", in: 1, want: 1},
+ test_int16{fn: div_1_int16, fnname: "div_1_int16", in: 32766, want: 0},
+ test_int16{fn: div_int16_1, fnname: "div_int16_1", in: 32766, want: 32766},
+ test_int16{fn: div_1_int16, fnname: "div_1_int16", in: 32767, want: 0},
+ test_int16{fn: div_int16_1, fnname: "div_int16_1", in: 32767, want: 32767},
+ test_int16{fn: div_32766_int16, fnname: "div_32766_int16", in: -32768, want: 0},
+ test_int16{fn: div_int16_32766, fnname: "div_int16_32766", in: -32768, want: -1},
+ test_int16{fn: div_32766_int16, fnname: "div_32766_int16", in: -32767, want: 0},
+ test_int16{fn: div_int16_32766, fnname: "div_int16_32766", in: -32767, want: -1},
+ test_int16{fn: div_32766_int16, fnname: "div_32766_int16", in: -1, want: -32766},
+ test_int16{fn: div_int16_32766, fnname: "div_int16_32766", in: -1, want: 0},
+ test_int16{fn: div_int16_32766, fnname: "div_int16_32766", in: 0, want: 0},
+ test_int16{fn: div_32766_int16, fnname: "div_32766_int16", in: 1, want: 32766},
+ test_int16{fn: div_int16_32766, fnname: "div_int16_32766", in: 1, want: 0},
+ test_int16{fn: div_32766_int16, fnname: "div_32766_int16", in: 32766, want: 1},
+ test_int16{fn: div_int16_32766, fnname: "div_int16_32766", in: 32766, want: 1},
+ test_int16{fn: div_32766_int16, fnname: "div_32766_int16", in: 32767, want: 0},
+ test_int16{fn: div_int16_32766, fnname: "div_int16_32766", in: 32767, want: 1},
+ test_int16{fn: div_32767_int16, fnname: "div_32767_int16", in: -32768, want: 0},
+ test_int16{fn: div_int16_32767, fnname: "div_int16_32767", in: -32768, want: -1},
+ test_int16{fn: div_32767_int16, fnname: "div_32767_int16", in: -32767, want: -1},
+ test_int16{fn: div_int16_32767, fnname: "div_int16_32767", in: -32767, want: -1},
+ test_int16{fn: div_32767_int16, fnname: "div_32767_int16", in: -1, want: -32767},
+ test_int16{fn: div_int16_32767, fnname: "div_int16_32767", in: -1, want: 0},
+ test_int16{fn: div_int16_32767, fnname: "div_int16_32767", in: 0, want: 0},
+ test_int16{fn: div_32767_int16, fnname: "div_32767_int16", in: 1, want: 32767},
+ test_int16{fn: div_int16_32767, fnname: "div_int16_32767", in: 1, want: 0},
+ test_int16{fn: div_32767_int16, fnname: "div_32767_int16", in: 32766, want: 1},
+ test_int16{fn: div_int16_32767, fnname: "div_int16_32767", in: 32766, want: 0},
+ test_int16{fn: div_32767_int16, fnname: "div_32767_int16", in: 32767, want: 1},
+ test_int16{fn: div_int16_32767, fnname: "div_int16_32767", in: 32767, want: 1},
+ test_int16{fn: mul_Neg32768_int16, fnname: "mul_Neg32768_int16", in: -32768, want: 0},
+ test_int16{fn: mul_int16_Neg32768, fnname: "mul_int16_Neg32768", in: -32768, want: 0},
+ test_int16{fn: mul_Neg32768_int16, fnname: "mul_Neg32768_int16", in: -32767, want: -32768},
+ test_int16{fn: mul_int16_Neg32768, fnname: "mul_int16_Neg32768", in: -32767, want: -32768},
+ test_int16{fn: mul_Neg32768_int16, fnname: "mul_Neg32768_int16", in: -1, want: -32768},
+ test_int16{fn: mul_int16_Neg32768, fnname: "mul_int16_Neg32768", in: -1, want: -32768},
+ test_int16{fn: mul_Neg32768_int16, fnname: "mul_Neg32768_int16", in: 0, want: 0},
+ test_int16{fn: mul_int16_Neg32768, fnname: "mul_int16_Neg32768", in: 0, want: 0},
+ test_int16{fn: mul_Neg32768_int16, fnname: "mul_Neg32768_int16", in: 1, want: -32768},
+ test_int16{fn: mul_int16_Neg32768, fnname: "mul_int16_Neg32768", in: 1, want: -32768},
+ test_int16{fn: mul_Neg32768_int16, fnname: "mul_Neg32768_int16", in: 32766, want: 0},
+ test_int16{fn: mul_int16_Neg32768, fnname: "mul_int16_Neg32768", in: 32766, want: 0},
+ test_int16{fn: mul_Neg32768_int16, fnname: "mul_Neg32768_int16", in: 32767, want: -32768},
+ test_int16{fn: mul_int16_Neg32768, fnname: "mul_int16_Neg32768", in: 32767, want: -32768},
+ test_int16{fn: mul_Neg32767_int16, fnname: "mul_Neg32767_int16", in: -32768, want: -32768},
+ test_int16{fn: mul_int16_Neg32767, fnname: "mul_int16_Neg32767", in: -32768, want: -32768},
+ test_int16{fn: mul_Neg32767_int16, fnname: "mul_Neg32767_int16", in: -32767, want: 1},
+ test_int16{fn: mul_int16_Neg32767, fnname: "mul_int16_Neg32767", in: -32767, want: 1},
+ test_int16{fn: mul_Neg32767_int16, fnname: "mul_Neg32767_int16", in: -1, want: 32767},
+ test_int16{fn: mul_int16_Neg32767, fnname: "mul_int16_Neg32767", in: -1, want: 32767},
+ test_int16{fn: mul_Neg32767_int16, fnname: "mul_Neg32767_int16", in: 0, want: 0},
+ test_int16{fn: mul_int16_Neg32767, fnname: "mul_int16_Neg32767", in: 0, want: 0},
+ test_int16{fn: mul_Neg32767_int16, fnname: "mul_Neg32767_int16", in: 1, want: -32767},
+ test_int16{fn: mul_int16_Neg32767, fnname: "mul_int16_Neg32767", in: 1, want: -32767},
+ test_int16{fn: mul_Neg32767_int16, fnname: "mul_Neg32767_int16", in: 32766, want: 32766},
+ test_int16{fn: mul_int16_Neg32767, fnname: "mul_int16_Neg32767", in: 32766, want: 32766},
+ test_int16{fn: mul_Neg32767_int16, fnname: "mul_Neg32767_int16", in: 32767, want: -1},
+ test_int16{fn: mul_int16_Neg32767, fnname: "mul_int16_Neg32767", in: 32767, want: -1},
+ test_int16{fn: mul_Neg1_int16, fnname: "mul_Neg1_int16", in: -32768, want: -32768},
+ test_int16{fn: mul_int16_Neg1, fnname: "mul_int16_Neg1", in: -32768, want: -32768},
+ test_int16{fn: mul_Neg1_int16, fnname: "mul_Neg1_int16", in: -32767, want: 32767},
+ test_int16{fn: mul_int16_Neg1, fnname: "mul_int16_Neg1", in: -32767, want: 32767},
+ test_int16{fn: mul_Neg1_int16, fnname: "mul_Neg1_int16", in: -1, want: 1},
+ test_int16{fn: mul_int16_Neg1, fnname: "mul_int16_Neg1", in: -1, want: 1},
+ test_int16{fn: mul_Neg1_int16, fnname: "mul_Neg1_int16", in: 0, want: 0},
+ test_int16{fn: mul_int16_Neg1, fnname: "mul_int16_Neg1", in: 0, want: 0},
+ test_int16{fn: mul_Neg1_int16, fnname: "mul_Neg1_int16", in: 1, want: -1},
+ test_int16{fn: mul_int16_Neg1, fnname: "mul_int16_Neg1", in: 1, want: -1},
+ test_int16{fn: mul_Neg1_int16, fnname: "mul_Neg1_int16", in: 32766, want: -32766},
+ test_int16{fn: mul_int16_Neg1, fnname: "mul_int16_Neg1", in: 32766, want: -32766},
+ test_int16{fn: mul_Neg1_int16, fnname: "mul_Neg1_int16", in: 32767, want: -32767},
+ test_int16{fn: mul_int16_Neg1, fnname: "mul_int16_Neg1", in: 32767, want: -32767},
+ test_int16{fn: mul_0_int16, fnname: "mul_0_int16", in: -32768, want: 0},
+ test_int16{fn: mul_int16_0, fnname: "mul_int16_0", in: -32768, want: 0},
+ test_int16{fn: mul_0_int16, fnname: "mul_0_int16", in: -32767, want: 0},
+ test_int16{fn: mul_int16_0, fnname: "mul_int16_0", in: -32767, want: 0},
+ test_int16{fn: mul_0_int16, fnname: "mul_0_int16", in: -1, want: 0},
+ test_int16{fn: mul_int16_0, fnname: "mul_int16_0", in: -1, want: 0},
+ test_int16{fn: mul_0_int16, fnname: "mul_0_int16", in: 0, want: 0},
+ test_int16{fn: mul_int16_0, fnname: "mul_int16_0", in: 0, want: 0},
+ test_int16{fn: mul_0_int16, fnname: "mul_0_int16", in: 1, want: 0},
+ test_int16{fn: mul_int16_0, fnname: "mul_int16_0", in: 1, want: 0},
+ test_int16{fn: mul_0_int16, fnname: "mul_0_int16", in: 32766, want: 0},
+ test_int16{fn: mul_int16_0, fnname: "mul_int16_0", in: 32766, want: 0},
+ test_int16{fn: mul_0_int16, fnname: "mul_0_int16", in: 32767, want: 0},
+ test_int16{fn: mul_int16_0, fnname: "mul_int16_0", in: 32767, want: 0},
+ test_int16{fn: mul_1_int16, fnname: "mul_1_int16", in: -32768, want: -32768},
+ test_int16{fn: mul_int16_1, fnname: "mul_int16_1", in: -32768, want: -32768},
+ test_int16{fn: mul_1_int16, fnname: "mul_1_int16", in: -32767, want: -32767},
+ test_int16{fn: mul_int16_1, fnname: "mul_int16_1", in: -32767, want: -32767},
+ test_int16{fn: mul_1_int16, fnname: "mul_1_int16", in: -1, want: -1},
+ test_int16{fn: mul_int16_1, fnname: "mul_int16_1", in: -1, want: -1},
+ test_int16{fn: mul_1_int16, fnname: "mul_1_int16", in: 0, want: 0},
+ test_int16{fn: mul_int16_1, fnname: "mul_int16_1", in: 0, want: 0},
+ test_int16{fn: mul_1_int16, fnname: "mul_1_int16", in: 1, want: 1},
+ test_int16{fn: mul_int16_1, fnname: "mul_int16_1", in: 1, want: 1},
+ test_int16{fn: mul_1_int16, fnname: "mul_1_int16", in: 32766, want: 32766},
+ test_int16{fn: mul_int16_1, fnname: "mul_int16_1", in: 32766, want: 32766},
+ test_int16{fn: mul_1_int16, fnname: "mul_1_int16", in: 32767, want: 32767},
+ test_int16{fn: mul_int16_1, fnname: "mul_int16_1", in: 32767, want: 32767},
+ test_int16{fn: mul_32766_int16, fnname: "mul_32766_int16", in: -32768, want: 0},
+ test_int16{fn: mul_int16_32766, fnname: "mul_int16_32766", in: -32768, want: 0},
+ test_int16{fn: mul_32766_int16, fnname: "mul_32766_int16", in: -32767, want: 32766},
+ test_int16{fn: mul_int16_32766, fnname: "mul_int16_32766", in: -32767, want: 32766},
+ test_int16{fn: mul_32766_int16, fnname: "mul_32766_int16", in: -1, want: -32766},
+ test_int16{fn: mul_int16_32766, fnname: "mul_int16_32766", in: -1, want: -32766},
+ test_int16{fn: mul_32766_int16, fnname: "mul_32766_int16", in: 0, want: 0},
+ test_int16{fn: mul_int16_32766, fnname: "mul_int16_32766", in: 0, want: 0},
+ test_int16{fn: mul_32766_int16, fnname: "mul_32766_int16", in: 1, want: 32766},
+ test_int16{fn: mul_int16_32766, fnname: "mul_int16_32766", in: 1, want: 32766},
+ test_int16{fn: mul_32766_int16, fnname: "mul_32766_int16", in: 32766, want: 4},
+ test_int16{fn: mul_int16_32766, fnname: "mul_int16_32766", in: 32766, want: 4},
+ test_int16{fn: mul_32766_int16, fnname: "mul_32766_int16", in: 32767, want: -32766},
+ test_int16{fn: mul_int16_32766, fnname: "mul_int16_32766", in: 32767, want: -32766},
+ test_int16{fn: mul_32767_int16, fnname: "mul_32767_int16", in: -32768, want: -32768},
+ test_int16{fn: mul_int16_32767, fnname: "mul_int16_32767", in: -32768, want: -32768},
+ test_int16{fn: mul_32767_int16, fnname: "mul_32767_int16", in: -32767, want: -1},
+ test_int16{fn: mul_int16_32767, fnname: "mul_int16_32767", in: -32767, want: -1},
+ test_int16{fn: mul_32767_int16, fnname: "mul_32767_int16", in: -1, want: -32767},
+ test_int16{fn: mul_int16_32767, fnname: "mul_int16_32767", in: -1, want: -32767},
+ test_int16{fn: mul_32767_int16, fnname: "mul_32767_int16", in: 0, want: 0},
+ test_int16{fn: mul_int16_32767, fnname: "mul_int16_32767", in: 0, want: 0},
+ test_int16{fn: mul_32767_int16, fnname: "mul_32767_int16", in: 1, want: 32767},
+ test_int16{fn: mul_int16_32767, fnname: "mul_int16_32767", in: 1, want: 32767},
+ test_int16{fn: mul_32767_int16, fnname: "mul_32767_int16", in: 32766, want: -32766},
+ test_int16{fn: mul_int16_32767, fnname: "mul_int16_32767", in: 32766, want: -32766},
+ test_int16{fn: mul_32767_int16, fnname: "mul_32767_int16", in: 32767, want: 1},
+ test_int16{fn: mul_int16_32767, fnname: "mul_int16_32767", in: 32767, want: 1},
+ test_int16{fn: mod_Neg32768_int16, fnname: "mod_Neg32768_int16", in: -32768, want: 0},
+ test_int16{fn: mod_int16_Neg32768, fnname: "mod_int16_Neg32768", in: -32768, want: 0},
+ test_int16{fn: mod_Neg32768_int16, fnname: "mod_Neg32768_int16", in: -32767, want: -1},
+ test_int16{fn: mod_int16_Neg32768, fnname: "mod_int16_Neg32768", in: -32767, want: -32767},
+ test_int16{fn: mod_Neg32768_int16, fnname: "mod_Neg32768_int16", in: -1, want: 0},
+ test_int16{fn: mod_int16_Neg32768, fnname: "mod_int16_Neg32768", in: -1, want: -1},
+ test_int16{fn: mod_int16_Neg32768, fnname: "mod_int16_Neg32768", in: 0, want: 0},
+ test_int16{fn: mod_Neg32768_int16, fnname: "mod_Neg32768_int16", in: 1, want: 0},
+ test_int16{fn: mod_int16_Neg32768, fnname: "mod_int16_Neg32768", in: 1, want: 1},
+ test_int16{fn: mod_Neg32768_int16, fnname: "mod_Neg32768_int16", in: 32766, want: -2},
+ test_int16{fn: mod_int16_Neg32768, fnname: "mod_int16_Neg32768", in: 32766, want: 32766},
+ test_int16{fn: mod_Neg32768_int16, fnname: "mod_Neg32768_int16", in: 32767, want: -1},
+ test_int16{fn: mod_int16_Neg32768, fnname: "mod_int16_Neg32768", in: 32767, want: 32767},
+ test_int16{fn: mod_Neg32767_int16, fnname: "mod_Neg32767_int16", in: -32768, want: -32767},
+ test_int16{fn: mod_int16_Neg32767, fnname: "mod_int16_Neg32767", in: -32768, want: -1},
+ test_int16{fn: mod_Neg32767_int16, fnname: "mod_Neg32767_int16", in: -32767, want: 0},
+ test_int16{fn: mod_int16_Neg32767, fnname: "mod_int16_Neg32767", in: -32767, want: 0},
+ test_int16{fn: mod_Neg32767_int16, fnname: "mod_Neg32767_int16", in: -1, want: 0},
+ test_int16{fn: mod_int16_Neg32767, fnname: "mod_int16_Neg32767", in: -1, want: -1},
+ test_int16{fn: mod_int16_Neg32767, fnname: "mod_int16_Neg32767", in: 0, want: 0},
+ test_int16{fn: mod_Neg32767_int16, fnname: "mod_Neg32767_int16", in: 1, want: 0},
+ test_int16{fn: mod_int16_Neg32767, fnname: "mod_int16_Neg32767", in: 1, want: 1},
+ test_int16{fn: mod_Neg32767_int16, fnname: "mod_Neg32767_int16", in: 32766, want: -1},
+ test_int16{fn: mod_int16_Neg32767, fnname: "mod_int16_Neg32767", in: 32766, want: 32766},
+ test_int16{fn: mod_Neg32767_int16, fnname: "mod_Neg32767_int16", in: 32767, want: 0},
+ test_int16{fn: mod_int16_Neg32767, fnname: "mod_int16_Neg32767", in: 32767, want: 0},
+ test_int16{fn: mod_Neg1_int16, fnname: "mod_Neg1_int16", in: -32768, want: -1},
+ test_int16{fn: mod_int16_Neg1, fnname: "mod_int16_Neg1", in: -32768, want: 0},
+ test_int16{fn: mod_Neg1_int16, fnname: "mod_Neg1_int16", in: -32767, want: -1},
+ test_int16{fn: mod_int16_Neg1, fnname: "mod_int16_Neg1", in: -32767, want: 0},
+ test_int16{fn: mod_Neg1_int16, fnname: "mod_Neg1_int16", in: -1, want: 0},
+ test_int16{fn: mod_int16_Neg1, fnname: "mod_int16_Neg1", in: -1, want: 0},
+ test_int16{fn: mod_int16_Neg1, fnname: "mod_int16_Neg1", in: 0, want: 0},
+ test_int16{fn: mod_Neg1_int16, fnname: "mod_Neg1_int16", in: 1, want: 0},
+ test_int16{fn: mod_int16_Neg1, fnname: "mod_int16_Neg1", in: 1, want: 0},
+ test_int16{fn: mod_Neg1_int16, fnname: "mod_Neg1_int16", in: 32766, want: -1},
+ test_int16{fn: mod_int16_Neg1, fnname: "mod_int16_Neg1", in: 32766, want: 0},
+ test_int16{fn: mod_Neg1_int16, fnname: "mod_Neg1_int16", in: 32767, want: -1},
+ test_int16{fn: mod_int16_Neg1, fnname: "mod_int16_Neg1", in: 32767, want: 0},
+ test_int16{fn: mod_0_int16, fnname: "mod_0_int16", in: -32768, want: 0},
+ test_int16{fn: mod_0_int16, fnname: "mod_0_int16", in: -32767, want: 0},
+ test_int16{fn: mod_0_int16, fnname: "mod_0_int16", in: -1, want: 0},
+ test_int16{fn: mod_0_int16, fnname: "mod_0_int16", in: 1, want: 0},
+ test_int16{fn: mod_0_int16, fnname: "mod_0_int16", in: 32766, want: 0},
+ test_int16{fn: mod_0_int16, fnname: "mod_0_int16", in: 32767, want: 0},
+ test_int16{fn: mod_1_int16, fnname: "mod_1_int16", in: -32768, want: 1},
+ test_int16{fn: mod_int16_1, fnname: "mod_int16_1", in: -32768, want: 0},
+ test_int16{fn: mod_1_int16, fnname: "mod_1_int16", in: -32767, want: 1},
+ test_int16{fn: mod_int16_1, fnname: "mod_int16_1", in: -32767, want: 0},
+ test_int16{fn: mod_1_int16, fnname: "mod_1_int16", in: -1, want: 0},
+ test_int16{fn: mod_int16_1, fnname: "mod_int16_1", in: -1, want: 0},
+ test_int16{fn: mod_int16_1, fnname: "mod_int16_1", in: 0, want: 0},
+ test_int16{fn: mod_1_int16, fnname: "mod_1_int16", in: 1, want: 0},
+ test_int16{fn: mod_int16_1, fnname: "mod_int16_1", in: 1, want: 0},
+ test_int16{fn: mod_1_int16, fnname: "mod_1_int16", in: 32766, want: 1},
+ test_int16{fn: mod_int16_1, fnname: "mod_int16_1", in: 32766, want: 0},
+ test_int16{fn: mod_1_int16, fnname: "mod_1_int16", in: 32767, want: 1},
+ test_int16{fn: mod_int16_1, fnname: "mod_int16_1", in: 32767, want: 0},
+ test_int16{fn: mod_32766_int16, fnname: "mod_32766_int16", in: -32768, want: 32766},
+ test_int16{fn: mod_int16_32766, fnname: "mod_int16_32766", in: -32768, want: -2},
+ test_int16{fn: mod_32766_int16, fnname: "mod_32766_int16", in: -32767, want: 32766},
+ test_int16{fn: mod_int16_32766, fnname: "mod_int16_32766", in: -32767, want: -1},
+ test_int16{fn: mod_32766_int16, fnname: "mod_32766_int16", in: -1, want: 0},
+ test_int16{fn: mod_int16_32766, fnname: "mod_int16_32766", in: -1, want: -1},
+ test_int16{fn: mod_int16_32766, fnname: "mod_int16_32766", in: 0, want: 0},
+ test_int16{fn: mod_32766_int16, fnname: "mod_32766_int16", in: 1, want: 0},
+ test_int16{fn: mod_int16_32766, fnname: "mod_int16_32766", in: 1, want: 1},
+ test_int16{fn: mod_32766_int16, fnname: "mod_32766_int16", in: 32766, want: 0},
+ test_int16{fn: mod_int16_32766, fnname: "mod_int16_32766", in: 32766, want: 0},
+ test_int16{fn: mod_32766_int16, fnname: "mod_32766_int16", in: 32767, want: 32766},
+ test_int16{fn: mod_int16_32766, fnname: "mod_int16_32766", in: 32767, want: 1},
+ test_int16{fn: mod_32767_int16, fnname: "mod_32767_int16", in: -32768, want: 32767},
+ test_int16{fn: mod_int16_32767, fnname: "mod_int16_32767", in: -32768, want: -1},
+ test_int16{fn: mod_32767_int16, fnname: "mod_32767_int16", in: -32767, want: 0},
+ test_int16{fn: mod_int16_32767, fnname: "mod_int16_32767", in: -32767, want: 0},
+ test_int16{fn: mod_32767_int16, fnname: "mod_32767_int16", in: -1, want: 0},
+ test_int16{fn: mod_int16_32767, fnname: "mod_int16_32767", in: -1, want: -1},
+ test_int16{fn: mod_int16_32767, fnname: "mod_int16_32767", in: 0, want: 0},
+ test_int16{fn: mod_32767_int16, fnname: "mod_32767_int16", in: 1, want: 0},
+ test_int16{fn: mod_int16_32767, fnname: "mod_int16_32767", in: 1, want: 1},
+ test_int16{fn: mod_32767_int16, fnname: "mod_32767_int16", in: 32766, want: 1},
+ test_int16{fn: mod_int16_32767, fnname: "mod_int16_32767", in: 32766, want: 32766},
+ test_int16{fn: mod_32767_int16, fnname: "mod_32767_int16", in: 32767, want: 0},
+ test_int16{fn: mod_int16_32767, fnname: "mod_int16_32767", in: 32767, want: 0},
+ test_int16{fn: and_Neg32768_int16, fnname: "and_Neg32768_int16", in: -32768, want: -32768},
+ test_int16{fn: and_int16_Neg32768, fnname: "and_int16_Neg32768", in: -32768, want: -32768},
+ test_int16{fn: and_Neg32768_int16, fnname: "and_Neg32768_int16", in: -32767, want: -32768},
+ test_int16{fn: and_int16_Neg32768, fnname: "and_int16_Neg32768", in: -32767, want: -32768},
+ test_int16{fn: and_Neg32768_int16, fnname: "and_Neg32768_int16", in: -1, want: -32768},
+ test_int16{fn: and_int16_Neg32768, fnname: "and_int16_Neg32768", in: -1, want: -32768},
+ test_int16{fn: and_Neg32768_int16, fnname: "and_Neg32768_int16", in: 0, want: 0},
+ test_int16{fn: and_int16_Neg32768, fnname: "and_int16_Neg32768", in: 0, want: 0},
+ test_int16{fn: and_Neg32768_int16, fnname: "and_Neg32768_int16", in: 1, want: 0},
+ test_int16{fn: and_int16_Neg32768, fnname: "and_int16_Neg32768", in: 1, want: 0},
+ test_int16{fn: and_Neg32768_int16, fnname: "and_Neg32768_int16", in: 32766, want: 0},
+ test_int16{fn: and_int16_Neg32768, fnname: "and_int16_Neg32768", in: 32766, want: 0},
+ test_int16{fn: and_Neg32768_int16, fnname: "and_Neg32768_int16", in: 32767, want: 0},
+ test_int16{fn: and_int16_Neg32768, fnname: "and_int16_Neg32768", in: 32767, want: 0},
+ test_int16{fn: and_Neg32767_int16, fnname: "and_Neg32767_int16", in: -32768, want: -32768},
+ test_int16{fn: and_int16_Neg32767, fnname: "and_int16_Neg32767", in: -32768, want: -32768},
+ test_int16{fn: and_Neg32767_int16, fnname: "and_Neg32767_int16", in: -32767, want: -32767},
+ test_int16{fn: and_int16_Neg32767, fnname: "and_int16_Neg32767", in: -32767, want: -32767},
+ test_int16{fn: and_Neg32767_int16, fnname: "and_Neg32767_int16", in: -1, want: -32767},
+ test_int16{fn: and_int16_Neg32767, fnname: "and_int16_Neg32767", in: -1, want: -32767},
+ test_int16{fn: and_Neg32767_int16, fnname: "and_Neg32767_int16", in: 0, want: 0},
+ test_int16{fn: and_int16_Neg32767, fnname: "and_int16_Neg32767", in: 0, want: 0},
+ test_int16{fn: and_Neg32767_int16, fnname: "and_Neg32767_int16", in: 1, want: 1},
+ test_int16{fn: and_int16_Neg32767, fnname: "and_int16_Neg32767", in: 1, want: 1},
+ test_int16{fn: and_Neg32767_int16, fnname: "and_Neg32767_int16", in: 32766, want: 0},
+ test_int16{fn: and_int16_Neg32767, fnname: "and_int16_Neg32767", in: 32766, want: 0},
+ test_int16{fn: and_Neg32767_int16, fnname: "and_Neg32767_int16", in: 32767, want: 1},
+ test_int16{fn: and_int16_Neg32767, fnname: "and_int16_Neg32767", in: 32767, want: 1},
+ test_int16{fn: and_Neg1_int16, fnname: "and_Neg1_int16", in: -32768, want: -32768},
+ test_int16{fn: and_int16_Neg1, fnname: "and_int16_Neg1", in: -32768, want: -32768},
+ test_int16{fn: and_Neg1_int16, fnname: "and_Neg1_int16", in: -32767, want: -32767},
+ test_int16{fn: and_int16_Neg1, fnname: "and_int16_Neg1", in: -32767, want: -32767},
+ test_int16{fn: and_Neg1_int16, fnname: "and_Neg1_int16", in: -1, want: -1},
+ test_int16{fn: and_int16_Neg1, fnname: "and_int16_Neg1", in: -1, want: -1},
+ test_int16{fn: and_Neg1_int16, fnname: "and_Neg1_int16", in: 0, want: 0},
+ test_int16{fn: and_int16_Neg1, fnname: "and_int16_Neg1", in: 0, want: 0},
+ test_int16{fn: and_Neg1_int16, fnname: "and_Neg1_int16", in: 1, want: 1},
+ test_int16{fn: and_int16_Neg1, fnname: "and_int16_Neg1", in: 1, want: 1},
+ test_int16{fn: and_Neg1_int16, fnname: "and_Neg1_int16", in: 32766, want: 32766},
+ test_int16{fn: and_int16_Neg1, fnname: "and_int16_Neg1", in: 32766, want: 32766},
+ test_int16{fn: and_Neg1_int16, fnname: "and_Neg1_int16", in: 32767, want: 32767},
+ test_int16{fn: and_int16_Neg1, fnname: "and_int16_Neg1", in: 32767, want: 32767},
+ test_int16{fn: and_0_int16, fnname: "and_0_int16", in: -32768, want: 0},
+ test_int16{fn: and_int16_0, fnname: "and_int16_0", in: -32768, want: 0},
+ test_int16{fn: and_0_int16, fnname: "and_0_int16", in: -32767, want: 0},
+ test_int16{fn: and_int16_0, fnname: "and_int16_0", in: -32767, want: 0},
+ test_int16{fn: and_0_int16, fnname: "and_0_int16", in: -1, want: 0},
+ test_int16{fn: and_int16_0, fnname: "and_int16_0", in: -1, want: 0},
+ test_int16{fn: and_0_int16, fnname: "and_0_int16", in: 0, want: 0},
+ test_int16{fn: and_int16_0, fnname: "and_int16_0", in: 0, want: 0},
+ test_int16{fn: and_0_int16, fnname: "and_0_int16", in: 1, want: 0},
+ test_int16{fn: and_int16_0, fnname: "and_int16_0", in: 1, want: 0},
+ test_int16{fn: and_0_int16, fnname: "and_0_int16", in: 32766, want: 0},
+ test_int16{fn: and_int16_0, fnname: "and_int16_0", in: 32766, want: 0},
+ test_int16{fn: and_0_int16, fnname: "and_0_int16", in: 32767, want: 0},
+ test_int16{fn: and_int16_0, fnname: "and_int16_0", in: 32767, want: 0},
+ test_int16{fn: and_1_int16, fnname: "and_1_int16", in: -32768, want: 0},
+ test_int16{fn: and_int16_1, fnname: "and_int16_1", in: -32768, want: 0},
+ test_int16{fn: and_1_int16, fnname: "and_1_int16", in: -32767, want: 1},
+ test_int16{fn: and_int16_1, fnname: "and_int16_1", in: -32767, want: 1},
+ test_int16{fn: and_1_int16, fnname: "and_1_int16", in: -1, want: 1},
+ test_int16{fn: and_int16_1, fnname: "and_int16_1", in: -1, want: 1},
+ test_int16{fn: and_1_int16, fnname: "and_1_int16", in: 0, want: 0},
+ test_int16{fn: and_int16_1, fnname: "and_int16_1", in: 0, want: 0},
+ test_int16{fn: and_1_int16, fnname: "and_1_int16", in: 1, want: 1},
+ test_int16{fn: and_int16_1, fnname: "and_int16_1", in: 1, want: 1},
+ test_int16{fn: and_1_int16, fnname: "and_1_int16", in: 32766, want: 0},
+ test_int16{fn: and_int16_1, fnname: "and_int16_1", in: 32766, want: 0},
+ test_int16{fn: and_1_int16, fnname: "and_1_int16", in: 32767, want: 1},
+ test_int16{fn: and_int16_1, fnname: "and_int16_1", in: 32767, want: 1},
+ test_int16{fn: and_32766_int16, fnname: "and_32766_int16", in: -32768, want: 0},
+ test_int16{fn: and_int16_32766, fnname: "and_int16_32766", in: -32768, want: 0},
+ test_int16{fn: and_32766_int16, fnname: "and_32766_int16", in: -32767, want: 0},
+ test_int16{fn: and_int16_32766, fnname: "and_int16_32766", in: -32767, want: 0},
+ test_int16{fn: and_32766_int16, fnname: "and_32766_int16", in: -1, want: 32766},
+ test_int16{fn: and_int16_32766, fnname: "and_int16_32766", in: -1, want: 32766},
+ test_int16{fn: and_32766_int16, fnname: "and_32766_int16", in: 0, want: 0},
+ test_int16{fn: and_int16_32766, fnname: "and_int16_32766", in: 0, want: 0},
+ test_int16{fn: and_32766_int16, fnname: "and_32766_int16", in: 1, want: 0},
+ test_int16{fn: and_int16_32766, fnname: "and_int16_32766", in: 1, want: 0},
+ test_int16{fn: and_32766_int16, fnname: "and_32766_int16", in: 32766, want: 32766},
+ test_int16{fn: and_int16_32766, fnname: "and_int16_32766", in: 32766, want: 32766},
+ test_int16{fn: and_32766_int16, fnname: "and_32766_int16", in: 32767, want: 32766},
+ test_int16{fn: and_int16_32766, fnname: "and_int16_32766", in: 32767, want: 32766},
+ test_int16{fn: and_32767_int16, fnname: "and_32767_int16", in: -32768, want: 0},
+ test_int16{fn: and_int16_32767, fnname: "and_int16_32767", in: -32768, want: 0},
+ test_int16{fn: and_32767_int16, fnname: "and_32767_int16", in: -32767, want: 1},
+ test_int16{fn: and_int16_32767, fnname: "and_int16_32767", in: -32767, want: 1},
+ test_int16{fn: and_32767_int16, fnname: "and_32767_int16", in: -1, want: 32767},
+ test_int16{fn: and_int16_32767, fnname: "and_int16_32767", in: -1, want: 32767},
+ test_int16{fn: and_32767_int16, fnname: "and_32767_int16", in: 0, want: 0},
+ test_int16{fn: and_int16_32767, fnname: "and_int16_32767", in: 0, want: 0},
+ test_int16{fn: and_32767_int16, fnname: "and_32767_int16", in: 1, want: 1},
+ test_int16{fn: and_int16_32767, fnname: "and_int16_32767", in: 1, want: 1},
+ test_int16{fn: and_32767_int16, fnname: "and_32767_int16", in: 32766, want: 32766},
+ test_int16{fn: and_int16_32767, fnname: "and_int16_32767", in: 32766, want: 32766},
+ test_int16{fn: and_32767_int16, fnname: "and_32767_int16", in: 32767, want: 32767},
+ test_int16{fn: and_int16_32767, fnname: "and_int16_32767", in: 32767, want: 32767},
+ test_int16{fn: or_Neg32768_int16, fnname: "or_Neg32768_int16", in: -32768, want: -32768},
+ test_int16{fn: or_int16_Neg32768, fnname: "or_int16_Neg32768", in: -32768, want: -32768},
+ test_int16{fn: or_Neg32768_int16, fnname: "or_Neg32768_int16", in: -32767, want: -32767},
+ test_int16{fn: or_int16_Neg32768, fnname: "or_int16_Neg32768", in: -32767, want: -32767},
+ test_int16{fn: or_Neg32768_int16, fnname: "or_Neg32768_int16", in: -1, want: -1},
+ test_int16{fn: or_int16_Neg32768, fnname: "or_int16_Neg32768", in: -1, want: -1},
+ test_int16{fn: or_Neg32768_int16, fnname: "or_Neg32768_int16", in: 0, want: -32768},
+ test_int16{fn: or_int16_Neg32768, fnname: "or_int16_Neg32768", in: 0, want: -32768},
+ test_int16{fn: or_Neg32768_int16, fnname: "or_Neg32768_int16", in: 1, want: -32767},
+ test_int16{fn: or_int16_Neg32768, fnname: "or_int16_Neg32768", in: 1, want: -32767},
+ test_int16{fn: or_Neg32768_int16, fnname: "or_Neg32768_int16", in: 32766, want: -2},
+ test_int16{fn: or_int16_Neg32768, fnname: "or_int16_Neg32768", in: 32766, want: -2},
+ test_int16{fn: or_Neg32768_int16, fnname: "or_Neg32768_int16", in: 32767, want: -1},
+ test_int16{fn: or_int16_Neg32768, fnname: "or_int16_Neg32768", in: 32767, want: -1},
+ test_int16{fn: or_Neg32767_int16, fnname: "or_Neg32767_int16", in: -32768, want: -32767},
+ test_int16{fn: or_int16_Neg32767, fnname: "or_int16_Neg32767", in: -32768, want: -32767},
+ test_int16{fn: or_Neg32767_int16, fnname: "or_Neg32767_int16", in: -32767, want: -32767},
+ test_int16{fn: or_int16_Neg32767, fnname: "or_int16_Neg32767", in: -32767, want: -32767},
+ test_int16{fn: or_Neg32767_int16, fnname: "or_Neg32767_int16", in: -1, want: -1},
+ test_int16{fn: or_int16_Neg32767, fnname: "or_int16_Neg32767", in: -1, want: -1},
+ test_int16{fn: or_Neg32767_int16, fnname: "or_Neg32767_int16", in: 0, want: -32767},
+ test_int16{fn: or_int16_Neg32767, fnname: "or_int16_Neg32767", in: 0, want: -32767},
+ test_int16{fn: or_Neg32767_int16, fnname: "or_Neg32767_int16", in: 1, want: -32767},
+ test_int16{fn: or_int16_Neg32767, fnname: "or_int16_Neg32767", in: 1, want: -32767},
+ test_int16{fn: or_Neg32767_int16, fnname: "or_Neg32767_int16", in: 32766, want: -1},
+ test_int16{fn: or_int16_Neg32767, fnname: "or_int16_Neg32767", in: 32766, want: -1},
+ test_int16{fn: or_Neg32767_int16, fnname: "or_Neg32767_int16", in: 32767, want: -1},
+ test_int16{fn: or_int16_Neg32767, fnname: "or_int16_Neg32767", in: 32767, want: -1},
+ test_int16{fn: or_Neg1_int16, fnname: "or_Neg1_int16", in: -32768, want: -1},
+ test_int16{fn: or_int16_Neg1, fnname: "or_int16_Neg1", in: -32768, want: -1},
+ test_int16{fn: or_Neg1_int16, fnname: "or_Neg1_int16", in: -32767, want: -1},
+ test_int16{fn: or_int16_Neg1, fnname: "or_int16_Neg1", in: -32767, want: -1},
+ test_int16{fn: or_Neg1_int16, fnname: "or_Neg1_int16", in: -1, want: -1},
+ test_int16{fn: or_int16_Neg1, fnname: "or_int16_Neg1", in: -1, want: -1},
+ test_int16{fn: or_Neg1_int16, fnname: "or_Neg1_int16", in: 0, want: -1},
+ test_int16{fn: or_int16_Neg1, fnname: "or_int16_Neg1", in: 0, want: -1},
+ test_int16{fn: or_Neg1_int16, fnname: "or_Neg1_int16", in: 1, want: -1},
+ test_int16{fn: or_int16_Neg1, fnname: "or_int16_Neg1", in: 1, want: -1},
+ test_int16{fn: or_Neg1_int16, fnname: "or_Neg1_int16", in: 32766, want: -1},
+ test_int16{fn: or_int16_Neg1, fnname: "or_int16_Neg1", in: 32766, want: -1},
+ test_int16{fn: or_Neg1_int16, fnname: "or_Neg1_int16", in: 32767, want: -1},
+ test_int16{fn: or_int16_Neg1, fnname: "or_int16_Neg1", in: 32767, want: -1},
+ test_int16{fn: or_0_int16, fnname: "or_0_int16", in: -32768, want: -32768},
+ test_int16{fn: or_int16_0, fnname: "or_int16_0", in: -32768, want: -32768},
+ test_int16{fn: or_0_int16, fnname: "or_0_int16", in: -32767, want: -32767},
+ test_int16{fn: or_int16_0, fnname: "or_int16_0", in: -32767, want: -32767},
+ test_int16{fn: or_0_int16, fnname: "or_0_int16", in: -1, want: -1},
+ test_int16{fn: or_int16_0, fnname: "or_int16_0", in: -1, want: -1},
+ test_int16{fn: or_0_int16, fnname: "or_0_int16", in: 0, want: 0},
+ test_int16{fn: or_int16_0, fnname: "or_int16_0", in: 0, want: 0},
+ test_int16{fn: or_0_int16, fnname: "or_0_int16", in: 1, want: 1},
+ test_int16{fn: or_int16_0, fnname: "or_int16_0", in: 1, want: 1},
+ test_int16{fn: or_0_int16, fnname: "or_0_int16", in: 32766, want: 32766},
+ test_int16{fn: or_int16_0, fnname: "or_int16_0", in: 32766, want: 32766},
+ test_int16{fn: or_0_int16, fnname: "or_0_int16", in: 32767, want: 32767},
+ test_int16{fn: or_int16_0, fnname: "or_int16_0", in: 32767, want: 32767},
+ test_int16{fn: or_1_int16, fnname: "or_1_int16", in: -32768, want: -32767},
+ test_int16{fn: or_int16_1, fnname: "or_int16_1", in: -32768, want: -32767},
+ test_int16{fn: or_1_int16, fnname: "or_1_int16", in: -32767, want: -32767},
+ test_int16{fn: or_int16_1, fnname: "or_int16_1", in: -32767, want: -32767},
+ test_int16{fn: or_1_int16, fnname: "or_1_int16", in: -1, want: -1},
+ test_int16{fn: or_int16_1, fnname: "or_int16_1", in: -1, want: -1},
+ test_int16{fn: or_1_int16, fnname: "or_1_int16", in: 0, want: 1},
+ test_int16{fn: or_int16_1, fnname: "or_int16_1", in: 0, want: 1},
+ test_int16{fn: or_1_int16, fnname: "or_1_int16", in: 1, want: 1},
+ test_int16{fn: or_int16_1, fnname: "or_int16_1", in: 1, want: 1},
+ test_int16{fn: or_1_int16, fnname: "or_1_int16", in: 32766, want: 32767},
+ test_int16{fn: or_int16_1, fnname: "or_int16_1", in: 32766, want: 32767},
+ test_int16{fn: or_1_int16, fnname: "or_1_int16", in: 32767, want: 32767},
+ test_int16{fn: or_int16_1, fnname: "or_int16_1", in: 32767, want: 32767},
+ test_int16{fn: or_32766_int16, fnname: "or_32766_int16", in: -32768, want: -2},
+ test_int16{fn: or_int16_32766, fnname: "or_int16_32766", in: -32768, want: -2},
+ test_int16{fn: or_32766_int16, fnname: "or_32766_int16", in: -32767, want: -1},
+ test_int16{fn: or_int16_32766, fnname: "or_int16_32766", in: -32767, want: -1},
+ test_int16{fn: or_32766_int16, fnname: "or_32766_int16", in: -1, want: -1},
+ test_int16{fn: or_int16_32766, fnname: "or_int16_32766", in: -1, want: -1},
+ test_int16{fn: or_32766_int16, fnname: "or_32766_int16", in: 0, want: 32766},
+ test_int16{fn: or_int16_32766, fnname: "or_int16_32766", in: 0, want: 32766},
+ test_int16{fn: or_32766_int16, fnname: "or_32766_int16", in: 1, want: 32767},
+ test_int16{fn: or_int16_32766, fnname: "or_int16_32766", in: 1, want: 32767},
+ test_int16{fn: or_32766_int16, fnname: "or_32766_int16", in: 32766, want: 32766},
+ test_int16{fn: or_int16_32766, fnname: "or_int16_32766", in: 32766, want: 32766},
+ test_int16{fn: or_32766_int16, fnname: "or_32766_int16", in: 32767, want: 32767},
+ test_int16{fn: or_int16_32766, fnname: "or_int16_32766", in: 32767, want: 32767},
+ test_int16{fn: or_32767_int16, fnname: "or_32767_int16", in: -32768, want: -1},
+ test_int16{fn: or_int16_32767, fnname: "or_int16_32767", in: -32768, want: -1},
+ test_int16{fn: or_32767_int16, fnname: "or_32767_int16", in: -32767, want: -1},
+ test_int16{fn: or_int16_32767, fnname: "or_int16_32767", in: -32767, want: -1},
+ test_int16{fn: or_32767_int16, fnname: "or_32767_int16", in: -1, want: -1},
+ test_int16{fn: or_int16_32767, fnname: "or_int16_32767", in: -1, want: -1},
+ test_int16{fn: or_32767_int16, fnname: "or_32767_int16", in: 0, want: 32767},
+ test_int16{fn: or_int16_32767, fnname: "or_int16_32767", in: 0, want: 32767},
+ test_int16{fn: or_32767_int16, fnname: "or_32767_int16", in: 1, want: 32767},
+ test_int16{fn: or_int16_32767, fnname: "or_int16_32767", in: 1, want: 32767},
+ test_int16{fn: or_32767_int16, fnname: "or_32767_int16", in: 32766, want: 32767},
+ test_int16{fn: or_int16_32767, fnname: "or_int16_32767", in: 32766, want: 32767},
+ test_int16{fn: or_32767_int16, fnname: "or_32767_int16", in: 32767, want: 32767},
+ test_int16{fn: or_int16_32767, fnname: "or_int16_32767", in: 32767, want: 32767},
+ test_int16{fn: xor_Neg32768_int16, fnname: "xor_Neg32768_int16", in: -32768, want: 0},
+ test_int16{fn: xor_int16_Neg32768, fnname: "xor_int16_Neg32768", in: -32768, want: 0},
+ test_int16{fn: xor_Neg32768_int16, fnname: "xor_Neg32768_int16", in: -32767, want: 1},
+ test_int16{fn: xor_int16_Neg32768, fnname: "xor_int16_Neg32768", in: -32767, want: 1},
+ test_int16{fn: xor_Neg32768_int16, fnname: "xor_Neg32768_int16", in: -1, want: 32767},
+ test_int16{fn: xor_int16_Neg32768, fnname: "xor_int16_Neg32768", in: -1, want: 32767},
+ test_int16{fn: xor_Neg32768_int16, fnname: "xor_Neg32768_int16", in: 0, want: -32768},
+ test_int16{fn: xor_int16_Neg32768, fnname: "xor_int16_Neg32768", in: 0, want: -32768},
+ test_int16{fn: xor_Neg32768_int16, fnname: "xor_Neg32768_int16", in: 1, want: -32767},
+ test_int16{fn: xor_int16_Neg32768, fnname: "xor_int16_Neg32768", in: 1, want: -32767},
+ test_int16{fn: xor_Neg32768_int16, fnname: "xor_Neg32768_int16", in: 32766, want: -2},
+ test_int16{fn: xor_int16_Neg32768, fnname: "xor_int16_Neg32768", in: 32766, want: -2},
+ test_int16{fn: xor_Neg32768_int16, fnname: "xor_Neg32768_int16", in: 32767, want: -1},
+ test_int16{fn: xor_int16_Neg32768, fnname: "xor_int16_Neg32768", in: 32767, want: -1},
+ test_int16{fn: xor_Neg32767_int16, fnname: "xor_Neg32767_int16", in: -32768, want: 1},
+ test_int16{fn: xor_int16_Neg32767, fnname: "xor_int16_Neg32767", in: -32768, want: 1},
+ test_int16{fn: xor_Neg32767_int16, fnname: "xor_Neg32767_int16", in: -32767, want: 0},
+ test_int16{fn: xor_int16_Neg32767, fnname: "xor_int16_Neg32767", in: -32767, want: 0},
+ test_int16{fn: xor_Neg32767_int16, fnname: "xor_Neg32767_int16", in: -1, want: 32766},
+ test_int16{fn: xor_int16_Neg32767, fnname: "xor_int16_Neg32767", in: -1, want: 32766},
+ test_int16{fn: xor_Neg32767_int16, fnname: "xor_Neg32767_int16", in: 0, want: -32767},
+ test_int16{fn: xor_int16_Neg32767, fnname: "xor_int16_Neg32767", in: 0, want: -32767},
+ test_int16{fn: xor_Neg32767_int16, fnname: "xor_Neg32767_int16", in: 1, want: -32768},
+ test_int16{fn: xor_int16_Neg32767, fnname: "xor_int16_Neg32767", in: 1, want: -32768},
+ test_int16{fn: xor_Neg32767_int16, fnname: "xor_Neg32767_int16", in: 32766, want: -1},
+ test_int16{fn: xor_int16_Neg32767, fnname: "xor_int16_Neg32767", in: 32766, want: -1},
+ test_int16{fn: xor_Neg32767_int16, fnname: "xor_Neg32767_int16", in: 32767, want: -2},
+ test_int16{fn: xor_int16_Neg32767, fnname: "xor_int16_Neg32767", in: 32767, want: -2},
+ test_int16{fn: xor_Neg1_int16, fnname: "xor_Neg1_int16", in: -32768, want: 32767},
+ test_int16{fn: xor_int16_Neg1, fnname: "xor_int16_Neg1", in: -32768, want: 32767},
+ test_int16{fn: xor_Neg1_int16, fnname: "xor_Neg1_int16", in: -32767, want: 32766},
+ test_int16{fn: xor_int16_Neg1, fnname: "xor_int16_Neg1", in: -32767, want: 32766},
+ test_int16{fn: xor_Neg1_int16, fnname: "xor_Neg1_int16", in: -1, want: 0},
+ test_int16{fn: xor_int16_Neg1, fnname: "xor_int16_Neg1", in: -1, want: 0},
+ test_int16{fn: xor_Neg1_int16, fnname: "xor_Neg1_int16", in: 0, want: -1},
+ test_int16{fn: xor_int16_Neg1, fnname: "xor_int16_Neg1", in: 0, want: -1},
+ test_int16{fn: xor_Neg1_int16, fnname: "xor_Neg1_int16", in: 1, want: -2},
+ test_int16{fn: xor_int16_Neg1, fnname: "xor_int16_Neg1", in: 1, want: -2},
+ test_int16{fn: xor_Neg1_int16, fnname: "xor_Neg1_int16", in: 32766, want: -32767},
+ test_int16{fn: xor_int16_Neg1, fnname: "xor_int16_Neg1", in: 32766, want: -32767},
+ test_int16{fn: xor_Neg1_int16, fnname: "xor_Neg1_int16", in: 32767, want: -32768},
+ test_int16{fn: xor_int16_Neg1, fnname: "xor_int16_Neg1", in: 32767, want: -32768},
+ test_int16{fn: xor_0_int16, fnname: "xor_0_int16", in: -32768, want: -32768},
+ test_int16{fn: xor_int16_0, fnname: "xor_int16_0", in: -32768, want: -32768},
+ test_int16{fn: xor_0_int16, fnname: "xor_0_int16", in: -32767, want: -32767},
+ test_int16{fn: xor_int16_0, fnname: "xor_int16_0", in: -32767, want: -32767},
+ test_int16{fn: xor_0_int16, fnname: "xor_0_int16", in: -1, want: -1},
+ test_int16{fn: xor_int16_0, fnname: "xor_int16_0", in: -1, want: -1},
+ test_int16{fn: xor_0_int16, fnname: "xor_0_int16", in: 0, want: 0},
+ test_int16{fn: xor_int16_0, fnname: "xor_int16_0", in: 0, want: 0},
+ test_int16{fn: xor_0_int16, fnname: "xor_0_int16", in: 1, want: 1},
+ test_int16{fn: xor_int16_0, fnname: "xor_int16_0", in: 1, want: 1},
+ test_int16{fn: xor_0_int16, fnname: "xor_0_int16", in: 32766, want: 32766},
+ test_int16{fn: xor_int16_0, fnname: "xor_int16_0", in: 32766, want: 32766},
+ test_int16{fn: xor_0_int16, fnname: "xor_0_int16", in: 32767, want: 32767},
+ test_int16{fn: xor_int16_0, fnname: "xor_int16_0", in: 32767, want: 32767},
+ test_int16{fn: xor_1_int16, fnname: "xor_1_int16", in: -32768, want: -32767},
+ test_int16{fn: xor_int16_1, fnname: "xor_int16_1", in: -32768, want: -32767},
+ test_int16{fn: xor_1_int16, fnname: "xor_1_int16", in: -32767, want: -32768},
+ test_int16{fn: xor_int16_1, fnname: "xor_int16_1", in: -32767, want: -32768},
+ test_int16{fn: xor_1_int16, fnname: "xor_1_int16", in: -1, want: -2},
+ test_int16{fn: xor_int16_1, fnname: "xor_int16_1", in: -1, want: -2},
+ test_int16{fn: xor_1_int16, fnname: "xor_1_int16", in: 0, want: 1},
+ test_int16{fn: xor_int16_1, fnname: "xor_int16_1", in: 0, want: 1},
+ test_int16{fn: xor_1_int16, fnname: "xor_1_int16", in: 1, want: 0},
+ test_int16{fn: xor_int16_1, fnname: "xor_int16_1", in: 1, want: 0},
+ test_int16{fn: xor_1_int16, fnname: "xor_1_int16", in: 32766, want: 32767},
+ test_int16{fn: xor_int16_1, fnname: "xor_int16_1", in: 32766, want: 32767},
+ test_int16{fn: xor_1_int16, fnname: "xor_1_int16", in: 32767, want: 32766},
+ test_int16{fn: xor_int16_1, fnname: "xor_int16_1", in: 32767, want: 32766},
+ test_int16{fn: xor_32766_int16, fnname: "xor_32766_int16", in: -32768, want: -2},
+ test_int16{fn: xor_int16_32766, fnname: "xor_int16_32766", in: -32768, want: -2},
+ test_int16{fn: xor_32766_int16, fnname: "xor_32766_int16", in: -32767, want: -1},
+ test_int16{fn: xor_int16_32766, fnname: "xor_int16_32766", in: -32767, want: -1},
+ test_int16{fn: xor_32766_int16, fnname: "xor_32766_int16", in: -1, want: -32767},
+ test_int16{fn: xor_int16_32766, fnname: "xor_int16_32766", in: -1, want: -32767},
+ test_int16{fn: xor_32766_int16, fnname: "xor_32766_int16", in: 0, want: 32766},
+ test_int16{fn: xor_int16_32766, fnname: "xor_int16_32766", in: 0, want: 32766},
+ test_int16{fn: xor_32766_int16, fnname: "xor_32766_int16", in: 1, want: 32767},
+ test_int16{fn: xor_int16_32766, fnname: "xor_int16_32766", in: 1, want: 32767},
+ test_int16{fn: xor_32766_int16, fnname: "xor_32766_int16", in: 32766, want: 0},
+ test_int16{fn: xor_int16_32766, fnname: "xor_int16_32766", in: 32766, want: 0},
+ test_int16{fn: xor_32766_int16, fnname: "xor_32766_int16", in: 32767, want: 1},
+ test_int16{fn: xor_int16_32766, fnname: "xor_int16_32766", in: 32767, want: 1},
+ test_int16{fn: xor_32767_int16, fnname: "xor_32767_int16", in: -32768, want: -1},
+ test_int16{fn: xor_int16_32767, fnname: "xor_int16_32767", in: -32768, want: -1},
+ test_int16{fn: xor_32767_int16, fnname: "xor_32767_int16", in: -32767, want: -2},
+ test_int16{fn: xor_int16_32767, fnname: "xor_int16_32767", in: -32767, want: -2},
+ test_int16{fn: xor_32767_int16, fnname: "xor_32767_int16", in: -1, want: -32768},
+ test_int16{fn: xor_int16_32767, fnname: "xor_int16_32767", in: -1, want: -32768},
+ test_int16{fn: xor_32767_int16, fnname: "xor_32767_int16", in: 0, want: 32767},
+ test_int16{fn: xor_int16_32767, fnname: "xor_int16_32767", in: 0, want: 32767},
+ test_int16{fn: xor_32767_int16, fnname: "xor_32767_int16", in: 1, want: 32766},
+ test_int16{fn: xor_int16_32767, fnname: "xor_int16_32767", in: 1, want: 32766},
+ test_int16{fn: xor_32767_int16, fnname: "xor_32767_int16", in: 32766, want: 1},
+ test_int16{fn: xor_int16_32767, fnname: "xor_int16_32767", in: 32766, want: 1},
+ test_int16{fn: xor_32767_int16, fnname: "xor_32767_int16", in: 32767, want: 0},
+ test_int16{fn: xor_int16_32767, fnname: "xor_int16_32767", in: 32767, want: 0}}
+
+type test_uint8 struct {
+ fn func(uint8) uint8
+ fnname string
+ in uint8
+ want uint8
+}
+
+var tests_uint8 = []test_uint8{
+
+ test_uint8{fn: add_0_uint8, fnname: "add_0_uint8", in: 0, want: 0},
+ test_uint8{fn: add_uint8_0, fnname: "add_uint8_0", in: 0, want: 0},
+ test_uint8{fn: add_0_uint8, fnname: "add_0_uint8", in: 1, want: 1},
+ test_uint8{fn: add_uint8_0, fnname: "add_uint8_0", in: 1, want: 1},
+ test_uint8{fn: add_0_uint8, fnname: "add_0_uint8", in: 255, want: 255},
+ test_uint8{fn: add_uint8_0, fnname: "add_uint8_0", in: 255, want: 255},
+ test_uint8{fn: add_1_uint8, fnname: "add_1_uint8", in: 0, want: 1},
+ test_uint8{fn: add_uint8_1, fnname: "add_uint8_1", in: 0, want: 1},
+ test_uint8{fn: add_1_uint8, fnname: "add_1_uint8", in: 1, want: 2},
+ test_uint8{fn: add_uint8_1, fnname: "add_uint8_1", in: 1, want: 2},
+ test_uint8{fn: add_1_uint8, fnname: "add_1_uint8", in: 255, want: 0},
+ test_uint8{fn: add_uint8_1, fnname: "add_uint8_1", in: 255, want: 0},
+ test_uint8{fn: add_255_uint8, fnname: "add_255_uint8", in: 0, want: 255},
+ test_uint8{fn: add_uint8_255, fnname: "add_uint8_255", in: 0, want: 255},
+ test_uint8{fn: add_255_uint8, fnname: "add_255_uint8", in: 1, want: 0},
+ test_uint8{fn: add_uint8_255, fnname: "add_uint8_255", in: 1, want: 0},
+ test_uint8{fn: add_255_uint8, fnname: "add_255_uint8", in: 255, want: 254},
+ test_uint8{fn: add_uint8_255, fnname: "add_uint8_255", in: 255, want: 254},
+ test_uint8{fn: sub_0_uint8, fnname: "sub_0_uint8", in: 0, want: 0},
+ test_uint8{fn: sub_uint8_0, fnname: "sub_uint8_0", in: 0, want: 0},
+ test_uint8{fn: sub_0_uint8, fnname: "sub_0_uint8", in: 1, want: 255},
+ test_uint8{fn: sub_uint8_0, fnname: "sub_uint8_0", in: 1, want: 1},
+ test_uint8{fn: sub_0_uint8, fnname: "sub_0_uint8", in: 255, want: 1},
+ test_uint8{fn: sub_uint8_0, fnname: "sub_uint8_0", in: 255, want: 255},
+ test_uint8{fn: sub_1_uint8, fnname: "sub_1_uint8", in: 0, want: 1},
+ test_uint8{fn: sub_uint8_1, fnname: "sub_uint8_1", in: 0, want: 255},
+ test_uint8{fn: sub_1_uint8, fnname: "sub_1_uint8", in: 1, want: 0},
+ test_uint8{fn: sub_uint8_1, fnname: "sub_uint8_1", in: 1, want: 0},
+ test_uint8{fn: sub_1_uint8, fnname: "sub_1_uint8", in: 255, want: 2},
+ test_uint8{fn: sub_uint8_1, fnname: "sub_uint8_1", in: 255, want: 254},
+ test_uint8{fn: sub_255_uint8, fnname: "sub_255_uint8", in: 0, want: 255},
+ test_uint8{fn: sub_uint8_255, fnname: "sub_uint8_255", in: 0, want: 1},
+ test_uint8{fn: sub_255_uint8, fnname: "sub_255_uint8", in: 1, want: 254},
+ test_uint8{fn: sub_uint8_255, fnname: "sub_uint8_255", in: 1, want: 2},
+ test_uint8{fn: sub_255_uint8, fnname: "sub_255_uint8", in: 255, want: 0},
+ test_uint8{fn: sub_uint8_255, fnname: "sub_uint8_255", in: 255, want: 0},
+ test_uint8{fn: div_0_uint8, fnname: "div_0_uint8", in: 1, want: 0},
+ test_uint8{fn: div_0_uint8, fnname: "div_0_uint8", in: 255, want: 0},
+ test_uint8{fn: div_uint8_1, fnname: "div_uint8_1", in: 0, want: 0},
+ test_uint8{fn: div_1_uint8, fnname: "div_1_uint8", in: 1, want: 1},
+ test_uint8{fn: div_uint8_1, fnname: "div_uint8_1", in: 1, want: 1},
+ test_uint8{fn: div_1_uint8, fnname: "div_1_uint8", in: 255, want: 0},
+ test_uint8{fn: div_uint8_1, fnname: "div_uint8_1", in: 255, want: 255},
+ test_uint8{fn: div_uint8_255, fnname: "div_uint8_255", in: 0, want: 0},
+ test_uint8{fn: div_255_uint8, fnname: "div_255_uint8", in: 1, want: 255},
+ test_uint8{fn: div_uint8_255, fnname: "div_uint8_255", in: 1, want: 0},
+ test_uint8{fn: div_255_uint8, fnname: "div_255_uint8", in: 255, want: 1},
+ test_uint8{fn: div_uint8_255, fnname: "div_uint8_255", in: 255, want: 1},
+ test_uint8{fn: mul_0_uint8, fnname: "mul_0_uint8", in: 0, want: 0},
+ test_uint8{fn: mul_uint8_0, fnname: "mul_uint8_0", in: 0, want: 0},
+ test_uint8{fn: mul_0_uint8, fnname: "mul_0_uint8", in: 1, want: 0},
+ test_uint8{fn: mul_uint8_0, fnname: "mul_uint8_0", in: 1, want: 0},
+ test_uint8{fn: mul_0_uint8, fnname: "mul_0_uint8", in: 255, want: 0},
+ test_uint8{fn: mul_uint8_0, fnname: "mul_uint8_0", in: 255, want: 0},
+ test_uint8{fn: mul_1_uint8, fnname: "mul_1_uint8", in: 0, want: 0},
+ test_uint8{fn: mul_uint8_1, fnname: "mul_uint8_1", in: 0, want: 0},
+ test_uint8{fn: mul_1_uint8, fnname: "mul_1_uint8", in: 1, want: 1},
+ test_uint8{fn: mul_uint8_1, fnname: "mul_uint8_1", in: 1, want: 1},
+ test_uint8{fn: mul_1_uint8, fnname: "mul_1_uint8", in: 255, want: 255},
+ test_uint8{fn: mul_uint8_1, fnname: "mul_uint8_1", in: 255, want: 255},
+ test_uint8{fn: mul_255_uint8, fnname: "mul_255_uint8", in: 0, want: 0},
+ test_uint8{fn: mul_uint8_255, fnname: "mul_uint8_255", in: 0, want: 0},
+ test_uint8{fn: mul_255_uint8, fnname: "mul_255_uint8", in: 1, want: 255},
+ test_uint8{fn: mul_uint8_255, fnname: "mul_uint8_255", in: 1, want: 255},
+ test_uint8{fn: mul_255_uint8, fnname: "mul_255_uint8", in: 255, want: 1},
+ test_uint8{fn: mul_uint8_255, fnname: "mul_uint8_255", in: 255, want: 1},
+ test_uint8{fn: lsh_0_uint8, fnname: "lsh_0_uint8", in: 0, want: 0},
+ test_uint8{fn: lsh_uint8_0, fnname: "lsh_uint8_0", in: 0, want: 0},
+ test_uint8{fn: lsh_0_uint8, fnname: "lsh_0_uint8", in: 1, want: 0},
+ test_uint8{fn: lsh_uint8_0, fnname: "lsh_uint8_0", in: 1, want: 1},
+ test_uint8{fn: lsh_0_uint8, fnname: "lsh_0_uint8", in: 255, want: 0},
+ test_uint8{fn: lsh_uint8_0, fnname: "lsh_uint8_0", in: 255, want: 255},
+ test_uint8{fn: lsh_1_uint8, fnname: "lsh_1_uint8", in: 0, want: 1},
+ test_uint8{fn: lsh_uint8_1, fnname: "lsh_uint8_1", in: 0, want: 0},
+ test_uint8{fn: lsh_1_uint8, fnname: "lsh_1_uint8", in: 1, want: 2},
+ test_uint8{fn: lsh_uint8_1, fnname: "lsh_uint8_1", in: 1, want: 2},
+ test_uint8{fn: lsh_1_uint8, fnname: "lsh_1_uint8", in: 255, want: 0},
+ test_uint8{fn: lsh_uint8_1, fnname: "lsh_uint8_1", in: 255, want: 254},
+ test_uint8{fn: lsh_255_uint8, fnname: "lsh_255_uint8", in: 0, want: 255},
+ test_uint8{fn: lsh_uint8_255, fnname: "lsh_uint8_255", in: 0, want: 0},
+ test_uint8{fn: lsh_255_uint8, fnname: "lsh_255_uint8", in: 1, want: 254},
+ test_uint8{fn: lsh_uint8_255, fnname: "lsh_uint8_255", in: 1, want: 0},
+ test_uint8{fn: lsh_255_uint8, fnname: "lsh_255_uint8", in: 255, want: 0},
+ test_uint8{fn: lsh_uint8_255, fnname: "lsh_uint8_255", in: 255, want: 0},
+ test_uint8{fn: rsh_0_uint8, fnname: "rsh_0_uint8", in: 0, want: 0},
+ test_uint8{fn: rsh_uint8_0, fnname: "rsh_uint8_0", in: 0, want: 0},
+ test_uint8{fn: rsh_0_uint8, fnname: "rsh_0_uint8", in: 1, want: 0},
+ test_uint8{fn: rsh_uint8_0, fnname: "rsh_uint8_0", in: 1, want: 1},
+ test_uint8{fn: rsh_0_uint8, fnname: "rsh_0_uint8", in: 255, want: 0},
+ test_uint8{fn: rsh_uint8_0, fnname: "rsh_uint8_0", in: 255, want: 255},
+ test_uint8{fn: rsh_1_uint8, fnname: "rsh_1_uint8", in: 0, want: 1},
+ test_uint8{fn: rsh_uint8_1, fnname: "rsh_uint8_1", in: 0, want: 0},
+ test_uint8{fn: rsh_1_uint8, fnname: "rsh_1_uint8", in: 1, want: 0},
+ test_uint8{fn: rsh_uint8_1, fnname: "rsh_uint8_1", in: 1, want: 0},
+ test_uint8{fn: rsh_1_uint8, fnname: "rsh_1_uint8", in: 255, want: 0},
+ test_uint8{fn: rsh_uint8_1, fnname: "rsh_uint8_1", in: 255, want: 127},
+ test_uint8{fn: rsh_255_uint8, fnname: "rsh_255_uint8", in: 0, want: 255},
+ test_uint8{fn: rsh_uint8_255, fnname: "rsh_uint8_255", in: 0, want: 0},
+ test_uint8{fn: rsh_255_uint8, fnname: "rsh_255_uint8", in: 1, want: 127},
+ test_uint8{fn: rsh_uint8_255, fnname: "rsh_uint8_255", in: 1, want: 0},
+ test_uint8{fn: rsh_255_uint8, fnname: "rsh_255_uint8", in: 255, want: 0},
+ test_uint8{fn: rsh_uint8_255, fnname: "rsh_uint8_255", in: 255, want: 0},
+ test_uint8{fn: mod_0_uint8, fnname: "mod_0_uint8", in: 1, want: 0},
+ test_uint8{fn: mod_0_uint8, fnname: "mod_0_uint8", in: 255, want: 0},
+ test_uint8{fn: mod_uint8_1, fnname: "mod_uint8_1", in: 0, want: 0},
+ test_uint8{fn: mod_1_uint8, fnname: "mod_1_uint8", in: 1, want: 0},
+ test_uint8{fn: mod_uint8_1, fnname: "mod_uint8_1", in: 1, want: 0},
+ test_uint8{fn: mod_1_uint8, fnname: "mod_1_uint8", in: 255, want: 1},
+ test_uint8{fn: mod_uint8_1, fnname: "mod_uint8_1", in: 255, want: 0},
+ test_uint8{fn: mod_uint8_255, fnname: "mod_uint8_255", in: 0, want: 0},
+ test_uint8{fn: mod_255_uint8, fnname: "mod_255_uint8", in: 1, want: 0},
+ test_uint8{fn: mod_uint8_255, fnname: "mod_uint8_255", in: 1, want: 1},
+ test_uint8{fn: mod_255_uint8, fnname: "mod_255_uint8", in: 255, want: 0},
+ test_uint8{fn: mod_uint8_255, fnname: "mod_uint8_255", in: 255, want: 0},
+ test_uint8{fn: and_0_uint8, fnname: "and_0_uint8", in: 0, want: 0},
+ test_uint8{fn: and_uint8_0, fnname: "and_uint8_0", in: 0, want: 0},
+ test_uint8{fn: and_0_uint8, fnname: "and_0_uint8", in: 1, want: 0},
+ test_uint8{fn: and_uint8_0, fnname: "and_uint8_0", in: 1, want: 0},
+ test_uint8{fn: and_0_uint8, fnname: "and_0_uint8", in: 255, want: 0},
+ test_uint8{fn: and_uint8_0, fnname: "and_uint8_0", in: 255, want: 0},
+ test_uint8{fn: and_1_uint8, fnname: "and_1_uint8", in: 0, want: 0},
+ test_uint8{fn: and_uint8_1, fnname: "and_uint8_1", in: 0, want: 0},
+ test_uint8{fn: and_1_uint8, fnname: "and_1_uint8", in: 1, want: 1},
+ test_uint8{fn: and_uint8_1, fnname: "and_uint8_1", in: 1, want: 1},
+ test_uint8{fn: and_1_uint8, fnname: "and_1_uint8", in: 255, want: 1},
+ test_uint8{fn: and_uint8_1, fnname: "and_uint8_1", in: 255, want: 1},
+ test_uint8{fn: and_255_uint8, fnname: "and_255_uint8", in: 0, want: 0},
+ test_uint8{fn: and_uint8_255, fnname: "and_uint8_255", in: 0, want: 0},
+ test_uint8{fn: and_255_uint8, fnname: "and_255_uint8", in: 1, want: 1},
+ test_uint8{fn: and_uint8_255, fnname: "and_uint8_255", in: 1, want: 1},
+ test_uint8{fn: and_255_uint8, fnname: "and_255_uint8", in: 255, want: 255},
+ test_uint8{fn: and_uint8_255, fnname: "and_uint8_255", in: 255, want: 255},
+ test_uint8{fn: or_0_uint8, fnname: "or_0_uint8", in: 0, want: 0},
+ test_uint8{fn: or_uint8_0, fnname: "or_uint8_0", in: 0, want: 0},
+ test_uint8{fn: or_0_uint8, fnname: "or_0_uint8", in: 1, want: 1},
+ test_uint8{fn: or_uint8_0, fnname: "or_uint8_0", in: 1, want: 1},
+ test_uint8{fn: or_0_uint8, fnname: "or_0_uint8", in: 255, want: 255},
+ test_uint8{fn: or_uint8_0, fnname: "or_uint8_0", in: 255, want: 255},
+ test_uint8{fn: or_1_uint8, fnname: "or_1_uint8", in: 0, want: 1},
+ test_uint8{fn: or_uint8_1, fnname: "or_uint8_1", in: 0, want: 1},
+ test_uint8{fn: or_1_uint8, fnname: "or_1_uint8", in: 1, want: 1},
+ test_uint8{fn: or_uint8_1, fnname: "or_uint8_1", in: 1, want: 1},
+ test_uint8{fn: or_1_uint8, fnname: "or_1_uint8", in: 255, want: 255},
+ test_uint8{fn: or_uint8_1, fnname: "or_uint8_1", in: 255, want: 255},
+ test_uint8{fn: or_255_uint8, fnname: "or_255_uint8", in: 0, want: 255},
+ test_uint8{fn: or_uint8_255, fnname: "or_uint8_255", in: 0, want: 255},
+ test_uint8{fn: or_255_uint8, fnname: "or_255_uint8", in: 1, want: 255},
+ test_uint8{fn: or_uint8_255, fnname: "or_uint8_255", in: 1, want: 255},
+ test_uint8{fn: or_255_uint8, fnname: "or_255_uint8", in: 255, want: 255},
+ test_uint8{fn: or_uint8_255, fnname: "or_uint8_255", in: 255, want: 255},
+ test_uint8{fn: xor_0_uint8, fnname: "xor_0_uint8", in: 0, want: 0},
+ test_uint8{fn: xor_uint8_0, fnname: "xor_uint8_0", in: 0, want: 0},
+ test_uint8{fn: xor_0_uint8, fnname: "xor_0_uint8", in: 1, want: 1},
+ test_uint8{fn: xor_uint8_0, fnname: "xor_uint8_0", in: 1, want: 1},
+ test_uint8{fn: xor_0_uint8, fnname: "xor_0_uint8", in: 255, want: 255},
+ test_uint8{fn: xor_uint8_0, fnname: "xor_uint8_0", in: 255, want: 255},
+ test_uint8{fn: xor_1_uint8, fnname: "xor_1_uint8", in: 0, want: 1},
+ test_uint8{fn: xor_uint8_1, fnname: "xor_uint8_1", in: 0, want: 1},
+ test_uint8{fn: xor_1_uint8, fnname: "xor_1_uint8", in: 1, want: 0},
+ test_uint8{fn: xor_uint8_1, fnname: "xor_uint8_1", in: 1, want: 0},
+ test_uint8{fn: xor_1_uint8, fnname: "xor_1_uint8", in: 255, want: 254},
+ test_uint8{fn: xor_uint8_1, fnname: "xor_uint8_1", in: 255, want: 254},
+ test_uint8{fn: xor_255_uint8, fnname: "xor_255_uint8", in: 0, want: 255},
+ test_uint8{fn: xor_uint8_255, fnname: "xor_uint8_255", in: 0, want: 255},
+ test_uint8{fn: xor_255_uint8, fnname: "xor_255_uint8", in: 1, want: 254},
+ test_uint8{fn: xor_uint8_255, fnname: "xor_uint8_255", in: 1, want: 254},
+ test_uint8{fn: xor_255_uint8, fnname: "xor_255_uint8", in: 255, want: 0},
+ test_uint8{fn: xor_uint8_255, fnname: "xor_uint8_255", in: 255, want: 0}}
+
+type test_int8 struct {
+ fn func(int8) int8
+ fnname string
+ in int8
+ want int8
+}
+
+var tests_int8 = []test_int8{
+
+ test_int8{fn: add_Neg128_int8, fnname: "add_Neg128_int8", in: -128, want: 0},
+ test_int8{fn: add_int8_Neg128, fnname: "add_int8_Neg128", in: -128, want: 0},
+ test_int8{fn: add_Neg128_int8, fnname: "add_Neg128_int8", in: -127, want: 1},
+ test_int8{fn: add_int8_Neg128, fnname: "add_int8_Neg128", in: -127, want: 1},
+ test_int8{fn: add_Neg128_int8, fnname: "add_Neg128_int8", in: -1, want: 127},
+ test_int8{fn: add_int8_Neg128, fnname: "add_int8_Neg128", in: -1, want: 127},
+ test_int8{fn: add_Neg128_int8, fnname: "add_Neg128_int8", in: 0, want: -128},
+ test_int8{fn: add_int8_Neg128, fnname: "add_int8_Neg128", in: 0, want: -128},
+ test_int8{fn: add_Neg128_int8, fnname: "add_Neg128_int8", in: 1, want: -127},
+ test_int8{fn: add_int8_Neg128, fnname: "add_int8_Neg128", in: 1, want: -127},
+ test_int8{fn: add_Neg128_int8, fnname: "add_Neg128_int8", in: 126, want: -2},
+ test_int8{fn: add_int8_Neg128, fnname: "add_int8_Neg128", in: 126, want: -2},
+ test_int8{fn: add_Neg128_int8, fnname: "add_Neg128_int8", in: 127, want: -1},
+ test_int8{fn: add_int8_Neg128, fnname: "add_int8_Neg128", in: 127, want: -1},
+ test_int8{fn: add_Neg127_int8, fnname: "add_Neg127_int8", in: -128, want: 1},
+ test_int8{fn: add_int8_Neg127, fnname: "add_int8_Neg127", in: -128, want: 1},
+ test_int8{fn: add_Neg127_int8, fnname: "add_Neg127_int8", in: -127, want: 2},
+ test_int8{fn: add_int8_Neg127, fnname: "add_int8_Neg127", in: -127, want: 2},
+ test_int8{fn: add_Neg127_int8, fnname: "add_Neg127_int8", in: -1, want: -128},
+ test_int8{fn: add_int8_Neg127, fnname: "add_int8_Neg127", in: -1, want: -128},
+ test_int8{fn: add_Neg127_int8, fnname: "add_Neg127_int8", in: 0, want: -127},
+ test_int8{fn: add_int8_Neg127, fnname: "add_int8_Neg127", in: 0, want: -127},
+ test_int8{fn: add_Neg127_int8, fnname: "add_Neg127_int8", in: 1, want: -126},
+ test_int8{fn: add_int8_Neg127, fnname: "add_int8_Neg127", in: 1, want: -126},
+ test_int8{fn: add_Neg127_int8, fnname: "add_Neg127_int8", in: 126, want: -1},
+ test_int8{fn: add_int8_Neg127, fnname: "add_int8_Neg127", in: 126, want: -1},
+ test_int8{fn: add_Neg127_int8, fnname: "add_Neg127_int8", in: 127, want: 0},
+ test_int8{fn: add_int8_Neg127, fnname: "add_int8_Neg127", in: 127, want: 0},
+ test_int8{fn: add_Neg1_int8, fnname: "add_Neg1_int8", in: -128, want: 127},
+ test_int8{fn: add_int8_Neg1, fnname: "add_int8_Neg1", in: -128, want: 127},
+ test_int8{fn: add_Neg1_int8, fnname: "add_Neg1_int8", in: -127, want: -128},
+ test_int8{fn: add_int8_Neg1, fnname: "add_int8_Neg1", in: -127, want: -128},
+ test_int8{fn: add_Neg1_int8, fnname: "add_Neg1_int8", in: -1, want: -2},
+ test_int8{fn: add_int8_Neg1, fnname: "add_int8_Neg1", in: -1, want: -2},
+ test_int8{fn: add_Neg1_int8, fnname: "add_Neg1_int8", in: 0, want: -1},
+ test_int8{fn: add_int8_Neg1, fnname: "add_int8_Neg1", in: 0, want: -1},
+ test_int8{fn: add_Neg1_int8, fnname: "add_Neg1_int8", in: 1, want: 0},
+ test_int8{fn: add_int8_Neg1, fnname: "add_int8_Neg1", in: 1, want: 0},
+ test_int8{fn: add_Neg1_int8, fnname: "add_Neg1_int8", in: 126, want: 125},
+ test_int8{fn: add_int8_Neg1, fnname: "add_int8_Neg1", in: 126, want: 125},
+ test_int8{fn: add_Neg1_int8, fnname: "add_Neg1_int8", in: 127, want: 126},
+ test_int8{fn: add_int8_Neg1, fnname: "add_int8_Neg1", in: 127, want: 126},
+ test_int8{fn: add_0_int8, fnname: "add_0_int8", in: -128, want: -128},
+ test_int8{fn: add_int8_0, fnname: "add_int8_0", in: -128, want: -128},
+ test_int8{fn: add_0_int8, fnname: "add_0_int8", in: -127, want: -127},
+ test_int8{fn: add_int8_0, fnname: "add_int8_0", in: -127, want: -127},
+ test_int8{fn: add_0_int8, fnname: "add_0_int8", in: -1, want: -1},
+ test_int8{fn: add_int8_0, fnname: "add_int8_0", in: -1, want: -1},
+ test_int8{fn: add_0_int8, fnname: "add_0_int8", in: 0, want: 0},
+ test_int8{fn: add_int8_0, fnname: "add_int8_0", in: 0, want: 0},
+ test_int8{fn: add_0_int8, fnname: "add_0_int8", in: 1, want: 1},
+ test_int8{fn: add_int8_0, fnname: "add_int8_0", in: 1, want: 1},
+ test_int8{fn: add_0_int8, fnname: "add_0_int8", in: 126, want: 126},
+ test_int8{fn: add_int8_0, fnname: "add_int8_0", in: 126, want: 126},
+ test_int8{fn: add_0_int8, fnname: "add_0_int8", in: 127, want: 127},
+ test_int8{fn: add_int8_0, fnname: "add_int8_0", in: 127, want: 127},
+ test_int8{fn: add_1_int8, fnname: "add_1_int8", in: -128, want: -127},
+ test_int8{fn: add_int8_1, fnname: "add_int8_1", in: -128, want: -127},
+ test_int8{fn: add_1_int8, fnname: "add_1_int8", in: -127, want: -126},
+ test_int8{fn: add_int8_1, fnname: "add_int8_1", in: -127, want: -126},
+ test_int8{fn: add_1_int8, fnname: "add_1_int8", in: -1, want: 0},
+ test_int8{fn: add_int8_1, fnname: "add_int8_1", in: -1, want: 0},
+ test_int8{fn: add_1_int8, fnname: "add_1_int8", in: 0, want: 1},
+ test_int8{fn: add_int8_1, fnname: "add_int8_1", in: 0, want: 1},
+ test_int8{fn: add_1_int8, fnname: "add_1_int8", in: 1, want: 2},
+ test_int8{fn: add_int8_1, fnname: "add_int8_1", in: 1, want: 2},
+ test_int8{fn: add_1_int8, fnname: "add_1_int8", in: 126, want: 127},
+ test_int8{fn: add_int8_1, fnname: "add_int8_1", in: 126, want: 127},
+ test_int8{fn: add_1_int8, fnname: "add_1_int8", in: 127, want: -128},
+ test_int8{fn: add_int8_1, fnname: "add_int8_1", in: 127, want: -128},
+ test_int8{fn: add_126_int8, fnname: "add_126_int8", in: -128, want: -2},
+ test_int8{fn: add_int8_126, fnname: "add_int8_126", in: -128, want: -2},
+ test_int8{fn: add_126_int8, fnname: "add_126_int8", in: -127, want: -1},
+ test_int8{fn: add_int8_126, fnname: "add_int8_126", in: -127, want: -1},
+ test_int8{fn: add_126_int8, fnname: "add_126_int8", in: -1, want: 125},
+ test_int8{fn: add_int8_126, fnname: "add_int8_126", in: -1, want: 125},
+ test_int8{fn: add_126_int8, fnname: "add_126_int8", in: 0, want: 126},
+ test_int8{fn: add_int8_126, fnname: "add_int8_126", in: 0, want: 126},
+ test_int8{fn: add_126_int8, fnname: "add_126_int8", in: 1, want: 127},
+ test_int8{fn: add_int8_126, fnname: "add_int8_126", in: 1, want: 127},
+ test_int8{fn: add_126_int8, fnname: "add_126_int8", in: 126, want: -4},
+ test_int8{fn: add_int8_126, fnname: "add_int8_126", in: 126, want: -4},
+ test_int8{fn: add_126_int8, fnname: "add_126_int8", in: 127, want: -3},
+ test_int8{fn: add_int8_126, fnname: "add_int8_126", in: 127, want: -3},
+ test_int8{fn: add_127_int8, fnname: "add_127_int8", in: -128, want: -1},
+ test_int8{fn: add_int8_127, fnname: "add_int8_127", in: -128, want: -1},
+ test_int8{fn: add_127_int8, fnname: "add_127_int8", in: -127, want: 0},
+ test_int8{fn: add_int8_127, fnname: "add_int8_127", in: -127, want: 0},
+ test_int8{fn: add_127_int8, fnname: "add_127_int8", in: -1, want: 126},
+ test_int8{fn: add_int8_127, fnname: "add_int8_127", in: -1, want: 126},
+ test_int8{fn: add_127_int8, fnname: "add_127_int8", in: 0, want: 127},
+ test_int8{fn: add_int8_127, fnname: "add_int8_127", in: 0, want: 127},
+ test_int8{fn: add_127_int8, fnname: "add_127_int8", in: 1, want: -128},
+ test_int8{fn: add_int8_127, fnname: "add_int8_127", in: 1, want: -128},
+ test_int8{fn: add_127_int8, fnname: "add_127_int8", in: 126, want: -3},
+ test_int8{fn: add_int8_127, fnname: "add_int8_127", in: 126, want: -3},
+ test_int8{fn: add_127_int8, fnname: "add_127_int8", in: 127, want: -2},
+ test_int8{fn: add_int8_127, fnname: "add_int8_127", in: 127, want: -2},
+ test_int8{fn: sub_Neg128_int8, fnname: "sub_Neg128_int8", in: -128, want: 0},
+ test_int8{fn: sub_int8_Neg128, fnname: "sub_int8_Neg128", in: -128, want: 0},
+ test_int8{fn: sub_Neg128_int8, fnname: "sub_Neg128_int8", in: -127, want: -1},
+ test_int8{fn: sub_int8_Neg128, fnname: "sub_int8_Neg128", in: -127, want: 1},
+ test_int8{fn: sub_Neg128_int8, fnname: "sub_Neg128_int8", in: -1, want: -127},
+ test_int8{fn: sub_int8_Neg128, fnname: "sub_int8_Neg128", in: -1, want: 127},
+ test_int8{fn: sub_Neg128_int8, fnname: "sub_Neg128_int8", in: 0, want: -128},
+ test_int8{fn: sub_int8_Neg128, fnname: "sub_int8_Neg128", in: 0, want: -128},
+ test_int8{fn: sub_Neg128_int8, fnname: "sub_Neg128_int8", in: 1, want: 127},
+ test_int8{fn: sub_int8_Neg128, fnname: "sub_int8_Neg128", in: 1, want: -127},
+ test_int8{fn: sub_Neg128_int8, fnname: "sub_Neg128_int8", in: 126, want: 2},
+ test_int8{fn: sub_int8_Neg128, fnname: "sub_int8_Neg128", in: 126, want: -2},
+ test_int8{fn: sub_Neg128_int8, fnname: "sub_Neg128_int8", in: 127, want: 1},
+ test_int8{fn: sub_int8_Neg128, fnname: "sub_int8_Neg128", in: 127, want: -1},
+ test_int8{fn: sub_Neg127_int8, fnname: "sub_Neg127_int8", in: -128, want: 1},
+ test_int8{fn: sub_int8_Neg127, fnname: "sub_int8_Neg127", in: -128, want: -1},
+ test_int8{fn: sub_Neg127_int8, fnname: "sub_Neg127_int8", in: -127, want: 0},
+ test_int8{fn: sub_int8_Neg127, fnname: "sub_int8_Neg127", in: -127, want: 0},
+ test_int8{fn: sub_Neg127_int8, fnname: "sub_Neg127_int8", in: -1, want: -126},
+ test_int8{fn: sub_int8_Neg127, fnname: "sub_int8_Neg127", in: -1, want: 126},
+ test_int8{fn: sub_Neg127_int8, fnname: "sub_Neg127_int8", in: 0, want: -127},
+ test_int8{fn: sub_int8_Neg127, fnname: "sub_int8_Neg127", in: 0, want: 127},
+ test_int8{fn: sub_Neg127_int8, fnname: "sub_Neg127_int8", in: 1, want: -128},
+ test_int8{fn: sub_int8_Neg127, fnname: "sub_int8_Neg127", in: 1, want: -128},
+ test_int8{fn: sub_Neg127_int8, fnname: "sub_Neg127_int8", in: 126, want: 3},
+ test_int8{fn: sub_int8_Neg127, fnname: "sub_int8_Neg127", in: 126, want: -3},
+ test_int8{fn: sub_Neg127_int8, fnname: "sub_Neg127_int8", in: 127, want: 2},
+ test_int8{fn: sub_int8_Neg127, fnname: "sub_int8_Neg127", in: 127, want: -2},
+ test_int8{fn: sub_Neg1_int8, fnname: "sub_Neg1_int8", in: -128, want: 127},
+ test_int8{fn: sub_int8_Neg1, fnname: "sub_int8_Neg1", in: -128, want: -127},
+ test_int8{fn: sub_Neg1_int8, fnname: "sub_Neg1_int8", in: -127, want: 126},
+ test_int8{fn: sub_int8_Neg1, fnname: "sub_int8_Neg1", in: -127, want: -126},
+ test_int8{fn: sub_Neg1_int8, fnname: "sub_Neg1_int8", in: -1, want: 0},
+ test_int8{fn: sub_int8_Neg1, fnname: "sub_int8_Neg1", in: -1, want: 0},
+ test_int8{fn: sub_Neg1_int8, fnname: "sub_Neg1_int8", in: 0, want: -1},
+ test_int8{fn: sub_int8_Neg1, fnname: "sub_int8_Neg1", in: 0, want: 1},
+ test_int8{fn: sub_Neg1_int8, fnname: "sub_Neg1_int8", in: 1, want: -2},
+ test_int8{fn: sub_int8_Neg1, fnname: "sub_int8_Neg1", in: 1, want: 2},
+ test_int8{fn: sub_Neg1_int8, fnname: "sub_Neg1_int8", in: 126, want: -127},
+ test_int8{fn: sub_int8_Neg1, fnname: "sub_int8_Neg1", in: 126, want: 127},
+ test_int8{fn: sub_Neg1_int8, fnname: "sub_Neg1_int8", in: 127, want: -128},
+ test_int8{fn: sub_int8_Neg1, fnname: "sub_int8_Neg1", in: 127, want: -128},
+ test_int8{fn: sub_0_int8, fnname: "sub_0_int8", in: -128, want: -128},
+ test_int8{fn: sub_int8_0, fnname: "sub_int8_0", in: -128, want: -128},
+ test_int8{fn: sub_0_int8, fnname: "sub_0_int8", in: -127, want: 127},
+ test_int8{fn: sub_int8_0, fnname: "sub_int8_0", in: -127, want: -127},
+ test_int8{fn: sub_0_int8, fnname: "sub_0_int8", in: -1, want: 1},
+ test_int8{fn: sub_int8_0, fnname: "sub_int8_0", in: -1, want: -1},
+ test_int8{fn: sub_0_int8, fnname: "sub_0_int8", in: 0, want: 0},
+ test_int8{fn: sub_int8_0, fnname: "sub_int8_0", in: 0, want: 0},
+ test_int8{fn: sub_0_int8, fnname: "sub_0_int8", in: 1, want: -1},
+ test_int8{fn: sub_int8_0, fnname: "sub_int8_0", in: 1, want: 1},
+ test_int8{fn: sub_0_int8, fnname: "sub_0_int8", in: 126, want: -126},
+ test_int8{fn: sub_int8_0, fnname: "sub_int8_0", in: 126, want: 126},
+ test_int8{fn: sub_0_int8, fnname: "sub_0_int8", in: 127, want: -127},
+ test_int8{fn: sub_int8_0, fnname: "sub_int8_0", in: 127, want: 127},
+ test_int8{fn: sub_1_int8, fnname: "sub_1_int8", in: -128, want: -127},
+ test_int8{fn: sub_int8_1, fnname: "sub_int8_1", in: -128, want: 127},
+ test_int8{fn: sub_1_int8, fnname: "sub_1_int8", in: -127, want: -128},
+ test_int8{fn: sub_int8_1, fnname: "sub_int8_1", in: -127, want: -128},
+ test_int8{fn: sub_1_int8, fnname: "sub_1_int8", in: -1, want: 2},
+ test_int8{fn: sub_int8_1, fnname: "sub_int8_1", in: -1, want: -2},
+ test_int8{fn: sub_1_int8, fnname: "sub_1_int8", in: 0, want: 1},
+ test_int8{fn: sub_int8_1, fnname: "sub_int8_1", in: 0, want: -1},
+ test_int8{fn: sub_1_int8, fnname: "sub_1_int8", in: 1, want: 0},
+ test_int8{fn: sub_int8_1, fnname: "sub_int8_1", in: 1, want: 0},
+ test_int8{fn: sub_1_int8, fnname: "sub_1_int8", in: 126, want: -125},
+ test_int8{fn: sub_int8_1, fnname: "sub_int8_1", in: 126, want: 125},
+ test_int8{fn: sub_1_int8, fnname: "sub_1_int8", in: 127, want: -126},
+ test_int8{fn: sub_int8_1, fnname: "sub_int8_1", in: 127, want: 126},
+ test_int8{fn: sub_126_int8, fnname: "sub_126_int8", in: -128, want: -2},
+ test_int8{fn: sub_int8_126, fnname: "sub_int8_126", in: -128, want: 2},
+ test_int8{fn: sub_126_int8, fnname: "sub_126_int8", in: -127, want: -3},
+ test_int8{fn: sub_int8_126, fnname: "sub_int8_126", in: -127, want: 3},
+ test_int8{fn: sub_126_int8, fnname: "sub_126_int8", in: -1, want: 127},
+ test_int8{fn: sub_int8_126, fnname: "sub_int8_126", in: -1, want: -127},
+ test_int8{fn: sub_126_int8, fnname: "sub_126_int8", in: 0, want: 126},
+ test_int8{fn: sub_int8_126, fnname: "sub_int8_126", in: 0, want: -126},
+ test_int8{fn: sub_126_int8, fnname: "sub_126_int8", in: 1, want: 125},
+ test_int8{fn: sub_int8_126, fnname: "sub_int8_126", in: 1, want: -125},
+ test_int8{fn: sub_126_int8, fnname: "sub_126_int8", in: 126, want: 0},
+ test_int8{fn: sub_int8_126, fnname: "sub_int8_126", in: 126, want: 0},
+ test_int8{fn: sub_126_int8, fnname: "sub_126_int8", in: 127, want: -1},
+ test_int8{fn: sub_int8_126, fnname: "sub_int8_126", in: 127, want: 1},
+ test_int8{fn: sub_127_int8, fnname: "sub_127_int8", in: -128, want: -1},
+ test_int8{fn: sub_int8_127, fnname: "sub_int8_127", in: -128, want: 1},
+ test_int8{fn: sub_127_int8, fnname: "sub_127_int8", in: -127, want: -2},
+ test_int8{fn: sub_int8_127, fnname: "sub_int8_127", in: -127, want: 2},
+ test_int8{fn: sub_127_int8, fnname: "sub_127_int8", in: -1, want: -128},
+ test_int8{fn: sub_int8_127, fnname: "sub_int8_127", in: -1, want: -128},
+ test_int8{fn: sub_127_int8, fnname: "sub_127_int8", in: 0, want: 127},
+ test_int8{fn: sub_int8_127, fnname: "sub_int8_127", in: 0, want: -127},
+ test_int8{fn: sub_127_int8, fnname: "sub_127_int8", in: 1, want: 126},
+ test_int8{fn: sub_int8_127, fnname: "sub_int8_127", in: 1, want: -126},
+ test_int8{fn: sub_127_int8, fnname: "sub_127_int8", in: 126, want: 1},
+ test_int8{fn: sub_int8_127, fnname: "sub_int8_127", in: 126, want: -1},
+ test_int8{fn: sub_127_int8, fnname: "sub_127_int8", in: 127, want: 0},
+ test_int8{fn: sub_int8_127, fnname: "sub_int8_127", in: 127, want: 0},
+ test_int8{fn: div_Neg128_int8, fnname: "div_Neg128_int8", in: -128, want: 1},
+ test_int8{fn: div_int8_Neg128, fnname: "div_int8_Neg128", in: -128, want: 1},
+ test_int8{fn: div_Neg128_int8, fnname: "div_Neg128_int8", in: -127, want: 1},
+ test_int8{fn: div_int8_Neg128, fnname: "div_int8_Neg128", in: -127, want: 0},
+ test_int8{fn: div_Neg128_int8, fnname: "div_Neg128_int8", in: -1, want: -128},
+ test_int8{fn: div_int8_Neg128, fnname: "div_int8_Neg128", in: -1, want: 0},
+ test_int8{fn: div_int8_Neg128, fnname: "div_int8_Neg128", in: 0, want: 0},
+ test_int8{fn: div_Neg128_int8, fnname: "div_Neg128_int8", in: 1, want: -128},
+ test_int8{fn: div_int8_Neg128, fnname: "div_int8_Neg128", in: 1, want: 0},
+ test_int8{fn: div_Neg128_int8, fnname: "div_Neg128_int8", in: 126, want: -1},
+ test_int8{fn: div_int8_Neg128, fnname: "div_int8_Neg128", in: 126, want: 0},
+ test_int8{fn: div_Neg128_int8, fnname: "div_Neg128_int8", in: 127, want: -1},
+ test_int8{fn: div_int8_Neg128, fnname: "div_int8_Neg128", in: 127, want: 0},
+ test_int8{fn: div_Neg127_int8, fnname: "div_Neg127_int8", in: -128, want: 0},
+ test_int8{fn: div_int8_Neg127, fnname: "div_int8_Neg127", in: -128, want: 1},
+ test_int8{fn: div_Neg127_int8, fnname: "div_Neg127_int8", in: -127, want: 1},
+ test_int8{fn: div_int8_Neg127, fnname: "div_int8_Neg127", in: -127, want: 1},
+ test_int8{fn: div_Neg127_int8, fnname: "div_Neg127_int8", in: -1, want: 127},
+ test_int8{fn: div_int8_Neg127, fnname: "div_int8_Neg127", in: -1, want: 0},
+ test_int8{fn: div_int8_Neg127, fnname: "div_int8_Neg127", in: 0, want: 0},
+ test_int8{fn: div_Neg127_int8, fnname: "div_Neg127_int8", in: 1, want: -127},
+ test_int8{fn: div_int8_Neg127, fnname: "div_int8_Neg127", in: 1, want: 0},
+ test_int8{fn: div_Neg127_int8, fnname: "div_Neg127_int8", in: 126, want: -1},
+ test_int8{fn: div_int8_Neg127, fnname: "div_int8_Neg127", in: 126, want: 0},
+ test_int8{fn: div_Neg127_int8, fnname: "div_Neg127_int8", in: 127, want: -1},
+ test_int8{fn: div_int8_Neg127, fnname: "div_int8_Neg127", in: 127, want: -1},
+ test_int8{fn: div_Neg1_int8, fnname: "div_Neg1_int8", in: -128, want: 0},
+ test_int8{fn: div_int8_Neg1, fnname: "div_int8_Neg1", in: -128, want: -128},
+ test_int8{fn: div_Neg1_int8, fnname: "div_Neg1_int8", in: -127, want: 0},
+ test_int8{fn: div_int8_Neg1, fnname: "div_int8_Neg1", in: -127, want: 127},
+ test_int8{fn: div_Neg1_int8, fnname: "div_Neg1_int8", in: -1, want: 1},
+ test_int8{fn: div_int8_Neg1, fnname: "div_int8_Neg1", in: -1, want: 1},
+ test_int8{fn: div_int8_Neg1, fnname: "div_int8_Neg1", in: 0, want: 0},
+ test_int8{fn: div_Neg1_int8, fnname: "div_Neg1_int8", in: 1, want: -1},
+ test_int8{fn: div_int8_Neg1, fnname: "div_int8_Neg1", in: 1, want: -1},
+ test_int8{fn: div_Neg1_int8, fnname: "div_Neg1_int8", in: 126, want: 0},
+ test_int8{fn: div_int8_Neg1, fnname: "div_int8_Neg1", in: 126, want: -126},
+ test_int8{fn: div_Neg1_int8, fnname: "div_Neg1_int8", in: 127, want: 0},
+ test_int8{fn: div_int8_Neg1, fnname: "div_int8_Neg1", in: 127, want: -127},
+ test_int8{fn: div_0_int8, fnname: "div_0_int8", in: -128, want: 0},
+ test_int8{fn: div_0_int8, fnname: "div_0_int8", in: -127, want: 0},
+ test_int8{fn: div_0_int8, fnname: "div_0_int8", in: -1, want: 0},
+ test_int8{fn: div_0_int8, fnname: "div_0_int8", in: 1, want: 0},
+ test_int8{fn: div_0_int8, fnname: "div_0_int8", in: 126, want: 0},
+ test_int8{fn: div_0_int8, fnname: "div_0_int8", in: 127, want: 0},
+ test_int8{fn: div_1_int8, fnname: "div_1_int8", in: -128, want: 0},
+ test_int8{fn: div_int8_1, fnname: "div_int8_1", in: -128, want: -128},
+ test_int8{fn: div_1_int8, fnname: "div_1_int8", in: -127, want: 0},
+ test_int8{fn: div_int8_1, fnname: "div_int8_1", in: -127, want: -127},
+ test_int8{fn: div_1_int8, fnname: "div_1_int8", in: -1, want: -1},
+ test_int8{fn: div_int8_1, fnname: "div_int8_1", in: -1, want: -1},
+ test_int8{fn: div_int8_1, fnname: "div_int8_1", in: 0, want: 0},
+ test_int8{fn: div_1_int8, fnname: "div_1_int8", in: 1, want: 1},
+ test_int8{fn: div_int8_1, fnname: "div_int8_1", in: 1, want: 1},
+ test_int8{fn: div_1_int8, fnname: "div_1_int8", in: 126, want: 0},
+ test_int8{fn: div_int8_1, fnname: "div_int8_1", in: 126, want: 126},
+ test_int8{fn: div_1_int8, fnname: "div_1_int8", in: 127, want: 0},
+ test_int8{fn: div_int8_1, fnname: "div_int8_1", in: 127, want: 127},
+ test_int8{fn: div_126_int8, fnname: "div_126_int8", in: -128, want: 0},
+ test_int8{fn: div_int8_126, fnname: "div_int8_126", in: -128, want: -1},
+ test_int8{fn: div_126_int8, fnname: "div_126_int8", in: -127, want: 0},
+ test_int8{fn: div_int8_126, fnname: "div_int8_126", in: -127, want: -1},
+ test_int8{fn: div_126_int8, fnname: "div_126_int8", in: -1, want: -126},
+ test_int8{fn: div_int8_126, fnname: "div_int8_126", in: -1, want: 0},
+ test_int8{fn: div_int8_126, fnname: "div_int8_126", in: 0, want: 0},
+ test_int8{fn: div_126_int8, fnname: "div_126_int8", in: 1, want: 126},
+ test_int8{fn: div_int8_126, fnname: "div_int8_126", in: 1, want: 0},
+ test_int8{fn: div_126_int8, fnname: "div_126_int8", in: 126, want: 1},
+ test_int8{fn: div_int8_126, fnname: "div_int8_126", in: 126, want: 1},
+ test_int8{fn: div_126_int8, fnname: "div_126_int8", in: 127, want: 0},
+ test_int8{fn: div_int8_126, fnname: "div_int8_126", in: 127, want: 1},
+ test_int8{fn: div_127_int8, fnname: "div_127_int8", in: -128, want: 0},
+ test_int8{fn: div_int8_127, fnname: "div_int8_127", in: -128, want: -1},
+ test_int8{fn: div_127_int8, fnname: "div_127_int8", in: -127, want: -1},
+ test_int8{fn: div_int8_127, fnname: "div_int8_127", in: -127, want: -1},
+ test_int8{fn: div_127_int8, fnname: "div_127_int8", in: -1, want: -127},
+ test_int8{fn: div_int8_127, fnname: "div_int8_127", in: -1, want: 0},
+ test_int8{fn: div_int8_127, fnname: "div_int8_127", in: 0, want: 0},
+ test_int8{fn: div_127_int8, fnname: "div_127_int8", in: 1, want: 127},
+ test_int8{fn: div_int8_127, fnname: "div_int8_127", in: 1, want: 0},
+ test_int8{fn: div_127_int8, fnname: "div_127_int8", in: 126, want: 1},
+ test_int8{fn: div_int8_127, fnname: "div_int8_127", in: 126, want: 0},
+ test_int8{fn: div_127_int8, fnname: "div_127_int8", in: 127, want: 1},
+ test_int8{fn: div_int8_127, fnname: "div_int8_127", in: 127, want: 1},
+ test_int8{fn: mul_Neg128_int8, fnname: "mul_Neg128_int8", in: -128, want: 0},
+ test_int8{fn: mul_int8_Neg128, fnname: "mul_int8_Neg128", in: -128, want: 0},
+ test_int8{fn: mul_Neg128_int8, fnname: "mul_Neg128_int8", in: -127, want: -128},
+ test_int8{fn: mul_int8_Neg128, fnname: "mul_int8_Neg128", in: -127, want: -128},
+ test_int8{fn: mul_Neg128_int8, fnname: "mul_Neg128_int8", in: -1, want: -128},
+ test_int8{fn: mul_int8_Neg128, fnname: "mul_int8_Neg128", in: -1, want: -128},
+ test_int8{fn: mul_Neg128_int8, fnname: "mul_Neg128_int8", in: 0, want: 0},
+ test_int8{fn: mul_int8_Neg128, fnname: "mul_int8_Neg128", in: 0, want: 0},
+ test_int8{fn: mul_Neg128_int8, fnname: "mul_Neg128_int8", in: 1, want: -128},
+ test_int8{fn: mul_int8_Neg128, fnname: "mul_int8_Neg128", in: 1, want: -128},
+ test_int8{fn: mul_Neg128_int8, fnname: "mul_Neg128_int8", in: 126, want: 0},
+ test_int8{fn: mul_int8_Neg128, fnname: "mul_int8_Neg128", in: 126, want: 0},
+ test_int8{fn: mul_Neg128_int8, fnname: "mul_Neg128_int8", in: 127, want: -128},
+ test_int8{fn: mul_int8_Neg128, fnname: "mul_int8_Neg128", in: 127, want: -128},
+ test_int8{fn: mul_Neg127_int8, fnname: "mul_Neg127_int8", in: -128, want: -128},
+ test_int8{fn: mul_int8_Neg127, fnname: "mul_int8_Neg127", in: -128, want: -128},
+ test_int8{fn: mul_Neg127_int8, fnname: "mul_Neg127_int8", in: -127, want: 1},
+ test_int8{fn: mul_int8_Neg127, fnname: "mul_int8_Neg127", in: -127, want: 1},
+ test_int8{fn: mul_Neg127_int8, fnname: "mul_Neg127_int8", in: -1, want: 127},
+ test_int8{fn: mul_int8_Neg127, fnname: "mul_int8_Neg127", in: -1, want: 127},
+ test_int8{fn: mul_Neg127_int8, fnname: "mul_Neg127_int8", in: 0, want: 0},
+ test_int8{fn: mul_int8_Neg127, fnname: "mul_int8_Neg127", in: 0, want: 0},
+ test_int8{fn: mul_Neg127_int8, fnname: "mul_Neg127_int8", in: 1, want: -127},
+ test_int8{fn: mul_int8_Neg127, fnname: "mul_int8_Neg127", in: 1, want: -127},
+ test_int8{fn: mul_Neg127_int8, fnname: "mul_Neg127_int8", in: 126, want: 126},
+ test_int8{fn: mul_int8_Neg127, fnname: "mul_int8_Neg127", in: 126, want: 126},
+ test_int8{fn: mul_Neg127_int8, fnname: "mul_Neg127_int8", in: 127, want: -1},
+ test_int8{fn: mul_int8_Neg127, fnname: "mul_int8_Neg127", in: 127, want: -1},
+ test_int8{fn: mul_Neg1_int8, fnname: "mul_Neg1_int8", in: -128, want: -128},
+ test_int8{fn: mul_int8_Neg1, fnname: "mul_int8_Neg1", in: -128, want: -128},
+ test_int8{fn: mul_Neg1_int8, fnname: "mul_Neg1_int8", in: -127, want: 127},
+ test_int8{fn: mul_int8_Neg1, fnname: "mul_int8_Neg1", in: -127, want: 127},
+ test_int8{fn: mul_Neg1_int8, fnname: "mul_Neg1_int8", in: -1, want: 1},
+ test_int8{fn: mul_int8_Neg1, fnname: "mul_int8_Neg1", in: -1, want: 1},
+ test_int8{fn: mul_Neg1_int8, fnname: "mul_Neg1_int8", in: 0, want: 0},
+ test_int8{fn: mul_int8_Neg1, fnname: "mul_int8_Neg1", in: 0, want: 0},
+ test_int8{fn: mul_Neg1_int8, fnname: "mul_Neg1_int8", in: 1, want: -1},
+ test_int8{fn: mul_int8_Neg1, fnname: "mul_int8_Neg1", in: 1, want: -1},
+ test_int8{fn: mul_Neg1_int8, fnname: "mul_Neg1_int8", in: 126, want: -126},
+ test_int8{fn: mul_int8_Neg1, fnname: "mul_int8_Neg1", in: 126, want: -126},
+ test_int8{fn: mul_Neg1_int8, fnname: "mul_Neg1_int8", in: 127, want: -127},
+ test_int8{fn: mul_int8_Neg1, fnname: "mul_int8_Neg1", in: 127, want: -127},
+ test_int8{fn: mul_0_int8, fnname: "mul_0_int8", in: -128, want: 0},
+ test_int8{fn: mul_int8_0, fnname: "mul_int8_0", in: -128, want: 0},
+ test_int8{fn: mul_0_int8, fnname: "mul_0_int8", in: -127, want: 0},
+ test_int8{fn: mul_int8_0, fnname: "mul_int8_0", in: -127, want: 0},
+ test_int8{fn: mul_0_int8, fnname: "mul_0_int8", in: -1, want: 0},
+ test_int8{fn: mul_int8_0, fnname: "mul_int8_0", in: -1, want: 0},
+ test_int8{fn: mul_0_int8, fnname: "mul_0_int8", in: 0, want: 0},
+ test_int8{fn: mul_int8_0, fnname: "mul_int8_0", in: 0, want: 0},
+ test_int8{fn: mul_0_int8, fnname: "mul_0_int8", in: 1, want: 0},
+ test_int8{fn: mul_int8_0, fnname: "mul_int8_0", in: 1, want: 0},
+ test_int8{fn: mul_0_int8, fnname: "mul_0_int8", in: 126, want: 0},
+ test_int8{fn: mul_int8_0, fnname: "mul_int8_0", in: 126, want: 0},
+ test_int8{fn: mul_0_int8, fnname: "mul_0_int8", in: 127, want: 0},
+ test_int8{fn: mul_int8_0, fnname: "mul_int8_0", in: 127, want: 0},
+ test_int8{fn: mul_1_int8, fnname: "mul_1_int8", in: -128, want: -128},
+ test_int8{fn: mul_int8_1, fnname: "mul_int8_1", in: -128, want: -128},
+ test_int8{fn: mul_1_int8, fnname: "mul_1_int8", in: -127, want: -127},
+ test_int8{fn: mul_int8_1, fnname: "mul_int8_1", in: -127, want: -127},
+ test_int8{fn: mul_1_int8, fnname: "mul_1_int8", in: -1, want: -1},
+ test_int8{fn: mul_int8_1, fnname: "mul_int8_1", in: -1, want: -1},
+ test_int8{fn: mul_1_int8, fnname: "mul_1_int8", in: 0, want: 0},
+ test_int8{fn: mul_int8_1, fnname: "mul_int8_1", in: 0, want: 0},
+ test_int8{fn: mul_1_int8, fnname: "mul_1_int8", in: 1, want: 1},
+ test_int8{fn: mul_int8_1, fnname: "mul_int8_1", in: 1, want: 1},
+ test_int8{fn: mul_1_int8, fnname: "mul_1_int8", in: 126, want: 126},
+ test_int8{fn: mul_int8_1, fnname: "mul_int8_1", in: 126, want: 126},
+ test_int8{fn: mul_1_int8, fnname: "mul_1_int8", in: 127, want: 127},
+ test_int8{fn: mul_int8_1, fnname: "mul_int8_1", in: 127, want: 127},
+ test_int8{fn: mul_126_int8, fnname: "mul_126_int8", in: -128, want: 0},
+ test_int8{fn: mul_int8_126, fnname: "mul_int8_126", in: -128, want: 0},
+ test_int8{fn: mul_126_int8, fnname: "mul_126_int8", in: -127, want: 126},
+ test_int8{fn: mul_int8_126, fnname: "mul_int8_126", in: -127, want: 126},
+ test_int8{fn: mul_126_int8, fnname: "mul_126_int8", in: -1, want: -126},
+ test_int8{fn: mul_int8_126, fnname: "mul_int8_126", in: -1, want: -126},
+ test_int8{fn: mul_126_int8, fnname: "mul_126_int8", in: 0, want: 0},
+ test_int8{fn: mul_int8_126, fnname: "mul_int8_126", in: 0, want: 0},
+ test_int8{fn: mul_126_int8, fnname: "mul_126_int8", in: 1, want: 126},
+ test_int8{fn: mul_int8_126, fnname: "mul_int8_126", in: 1, want: 126},
+ test_int8{fn: mul_126_int8, fnname: "mul_126_int8", in: 126, want: 4},
+ test_int8{fn: mul_int8_126, fnname: "mul_int8_126", in: 126, want: 4},
+ test_int8{fn: mul_126_int8, fnname: "mul_126_int8", in: 127, want: -126},
+ test_int8{fn: mul_int8_126, fnname: "mul_int8_126", in: 127, want: -126},
+ test_int8{fn: mul_127_int8, fnname: "mul_127_int8", in: -128, want: -128},
+ test_int8{fn: mul_int8_127, fnname: "mul_int8_127", in: -128, want: -128},
+ test_int8{fn: mul_127_int8, fnname: "mul_127_int8", in: -127, want: -1},
+ test_int8{fn: mul_int8_127, fnname: "mul_int8_127", in: -127, want: -1},
+ test_int8{fn: mul_127_int8, fnname: "mul_127_int8", in: -1, want: -127},
+ test_int8{fn: mul_int8_127, fnname: "mul_int8_127", in: -1, want: -127},
+ test_int8{fn: mul_127_int8, fnname: "mul_127_int8", in: 0, want: 0},
+ test_int8{fn: mul_int8_127, fnname: "mul_int8_127", in: 0, want: 0},
+ test_int8{fn: mul_127_int8, fnname: "mul_127_int8", in: 1, want: 127},
+ test_int8{fn: mul_int8_127, fnname: "mul_int8_127", in: 1, want: 127},
+ test_int8{fn: mul_127_int8, fnname: "mul_127_int8", in: 126, want: -126},
+ test_int8{fn: mul_int8_127, fnname: "mul_int8_127", in: 126, want: -126},
+ test_int8{fn: mul_127_int8, fnname: "mul_127_int8", in: 127, want: 1},
+ test_int8{fn: mul_int8_127, fnname: "mul_int8_127", in: 127, want: 1},
+ test_int8{fn: mod_Neg128_int8, fnname: "mod_Neg128_int8", in: -128, want: 0},
+ test_int8{fn: mod_int8_Neg128, fnname: "mod_int8_Neg128", in: -128, want: 0},
+ test_int8{fn: mod_Neg128_int8, fnname: "mod_Neg128_int8", in: -127, want: -1},
+ test_int8{fn: mod_int8_Neg128, fnname: "mod_int8_Neg128", in: -127, want: -127},
+ test_int8{fn: mod_Neg128_int8, fnname: "mod_Neg128_int8", in: -1, want: 0},
+ test_int8{fn: mod_int8_Neg128, fnname: "mod_int8_Neg128", in: -1, want: -1},
+ test_int8{fn: mod_int8_Neg128, fnname: "mod_int8_Neg128", in: 0, want: 0},
+ test_int8{fn: mod_Neg128_int8, fnname: "mod_Neg128_int8", in: 1, want: 0},
+ test_int8{fn: mod_int8_Neg128, fnname: "mod_int8_Neg128", in: 1, want: 1},
+ test_int8{fn: mod_Neg128_int8, fnname: "mod_Neg128_int8", in: 126, want: -2},
+ test_int8{fn: mod_int8_Neg128, fnname: "mod_int8_Neg128", in: 126, want: 126},
+ test_int8{fn: mod_Neg128_int8, fnname: "mod_Neg128_int8", in: 127, want: -1},
+ test_int8{fn: mod_int8_Neg128, fnname: "mod_int8_Neg128", in: 127, want: 127},
+ test_int8{fn: mod_Neg127_int8, fnname: "mod_Neg127_int8", in: -128, want: -127},
+ test_int8{fn: mod_int8_Neg127, fnname: "mod_int8_Neg127", in: -128, want: -1},
+ test_int8{fn: mod_Neg127_int8, fnname: "mod_Neg127_int8", in: -127, want: 0},
+ test_int8{fn: mod_int8_Neg127, fnname: "mod_int8_Neg127", in: -127, want: 0},
+ test_int8{fn: mod_Neg127_int8, fnname: "mod_Neg127_int8", in: -1, want: 0},
+ test_int8{fn: mod_int8_Neg127, fnname: "mod_int8_Neg127", in: -1, want: -1},
+ test_int8{fn: mod_int8_Neg127, fnname: "mod_int8_Neg127", in: 0, want: 0},
+ test_int8{fn: mod_Neg127_int8, fnname: "mod_Neg127_int8", in: 1, want: 0},
+ test_int8{fn: mod_int8_Neg127, fnname: "mod_int8_Neg127", in: 1, want: 1},
+ test_int8{fn: mod_Neg127_int8, fnname: "mod_Neg127_int8", in: 126, want: -1},
+ test_int8{fn: mod_int8_Neg127, fnname: "mod_int8_Neg127", in: 126, want: 126},
+ test_int8{fn: mod_Neg127_int8, fnname: "mod_Neg127_int8", in: 127, want: 0},
+ test_int8{fn: mod_int8_Neg127, fnname: "mod_int8_Neg127", in: 127, want: 0},
+ test_int8{fn: mod_Neg1_int8, fnname: "mod_Neg1_int8", in: -128, want: -1},
+ test_int8{fn: mod_int8_Neg1, fnname: "mod_int8_Neg1", in: -128, want: 0},
+ test_int8{fn: mod_Neg1_int8, fnname: "mod_Neg1_int8", in: -127, want: -1},
+ test_int8{fn: mod_int8_Neg1, fnname: "mod_int8_Neg1", in: -127, want: 0},
+ test_int8{fn: mod_Neg1_int8, fnname: "mod_Neg1_int8", in: -1, want: 0},
+ test_int8{fn: mod_int8_Neg1, fnname: "mod_int8_Neg1", in: -1, want: 0},
+ test_int8{fn: mod_int8_Neg1, fnname: "mod_int8_Neg1", in: 0, want: 0},
+ test_int8{fn: mod_Neg1_int8, fnname: "mod_Neg1_int8", in: 1, want: 0},
+ test_int8{fn: mod_int8_Neg1, fnname: "mod_int8_Neg1", in: 1, want: 0},
+ test_int8{fn: mod_Neg1_int8, fnname: "mod_Neg1_int8", in: 126, want: -1},
+ test_int8{fn: mod_int8_Neg1, fnname: "mod_int8_Neg1", in: 126, want: 0},
+ test_int8{fn: mod_Neg1_int8, fnname: "mod_Neg1_int8", in: 127, want: -1},
+ test_int8{fn: mod_int8_Neg1, fnname: "mod_int8_Neg1", in: 127, want: 0},
+ test_int8{fn: mod_0_int8, fnname: "mod_0_int8", in: -128, want: 0},
+ test_int8{fn: mod_0_int8, fnname: "mod_0_int8", in: -127, want: 0},
+ test_int8{fn: mod_0_int8, fnname: "mod_0_int8", in: -1, want: 0},
+ test_int8{fn: mod_0_int8, fnname: "mod_0_int8", in: 1, want: 0},
+ test_int8{fn: mod_0_int8, fnname: "mod_0_int8", in: 126, want: 0},
+ test_int8{fn: mod_0_int8, fnname: "mod_0_int8", in: 127, want: 0},
+ test_int8{fn: mod_1_int8, fnname: "mod_1_int8", in: -128, want: 1},
+ test_int8{fn: mod_int8_1, fnname: "mod_int8_1", in: -128, want: 0},
+ test_int8{fn: mod_1_int8, fnname: "mod_1_int8", in: -127, want: 1},
+ test_int8{fn: mod_int8_1, fnname: "mod_int8_1", in: -127, want: 0},
+ test_int8{fn: mod_1_int8, fnname: "mod_1_int8", in: -1, want: 0},
+ test_int8{fn: mod_int8_1, fnname: "mod_int8_1", in: -1, want: 0},
+ test_int8{fn: mod_int8_1, fnname: "mod_int8_1", in: 0, want: 0},
+ test_int8{fn: mod_1_int8, fnname: "mod_1_int8", in: 1, want: 0},
+ test_int8{fn: mod_int8_1, fnname: "mod_int8_1", in: 1, want: 0},
+ test_int8{fn: mod_1_int8, fnname: "mod_1_int8", in: 126, want: 1},
+ test_int8{fn: mod_int8_1, fnname: "mod_int8_1", in: 126, want: 0},
+ test_int8{fn: mod_1_int8, fnname: "mod_1_int8", in: 127, want: 1},
+ test_int8{fn: mod_int8_1, fnname: "mod_int8_1", in: 127, want: 0},
+ test_int8{fn: mod_126_int8, fnname: "mod_126_int8", in: -128, want: 126},
+ test_int8{fn: mod_int8_126, fnname: "mod_int8_126", in: -128, want: -2},
+ test_int8{fn: mod_126_int8, fnname: "mod_126_int8", in: -127, want: 126},
+ test_int8{fn: mod_int8_126, fnname: "mod_int8_126", in: -127, want: -1},
+ test_int8{fn: mod_126_int8, fnname: "mod_126_int8", in: -1, want: 0},
+ test_int8{fn: mod_int8_126, fnname: "mod_int8_126", in: -1, want: -1},
+ test_int8{fn: mod_int8_126, fnname: "mod_int8_126", in: 0, want: 0},
+ test_int8{fn: mod_126_int8, fnname: "mod_126_int8", in: 1, want: 0},
+ test_int8{fn: mod_int8_126, fnname: "mod_int8_126", in: 1, want: 1},
+ test_int8{fn: mod_126_int8, fnname: "mod_126_int8", in: 126, want: 0},
+ test_int8{fn: mod_int8_126, fnname: "mod_int8_126", in: 126, want: 0},
+ test_int8{fn: mod_126_int8, fnname: "mod_126_int8", in: 127, want: 126},
+ test_int8{fn: mod_int8_126, fnname: "mod_int8_126", in: 127, want: 1},
+ test_int8{fn: mod_127_int8, fnname: "mod_127_int8", in: -128, want: 127},
+ test_int8{fn: mod_int8_127, fnname: "mod_int8_127", in: -128, want: -1},
+ test_int8{fn: mod_127_int8, fnname: "mod_127_int8", in: -127, want: 0},
+ test_int8{fn: mod_int8_127, fnname: "mod_int8_127", in: -127, want: 0},
+ test_int8{fn: mod_127_int8, fnname: "mod_127_int8", in: -1, want: 0},
+ test_int8{fn: mod_int8_127, fnname: "mod_int8_127", in: -1, want: -1},
+ test_int8{fn: mod_int8_127, fnname: "mod_int8_127", in: 0, want: 0},
+ test_int8{fn: mod_127_int8, fnname: "mod_127_int8", in: 1, want: 0},
+ test_int8{fn: mod_int8_127, fnname: "mod_int8_127", in: 1, want: 1},
+ test_int8{fn: mod_127_int8, fnname: "mod_127_int8", in: 126, want: 1},
+ test_int8{fn: mod_int8_127, fnname: "mod_int8_127", in: 126, want: 126},
+ test_int8{fn: mod_127_int8, fnname: "mod_127_int8", in: 127, want: 0},
+ test_int8{fn: mod_int8_127, fnname: "mod_int8_127", in: 127, want: 0},
+ test_int8{fn: and_Neg128_int8, fnname: "and_Neg128_int8", in: -128, want: -128},
+ test_int8{fn: and_int8_Neg128, fnname: "and_int8_Neg128", in: -128, want: -128},
+ test_int8{fn: and_Neg128_int8, fnname: "and_Neg128_int8", in: -127, want: -128},
+ test_int8{fn: and_int8_Neg128, fnname: "and_int8_Neg128", in: -127, want: -128},
+ test_int8{fn: and_Neg128_int8, fnname: "and_Neg128_int8", in: -1, want: -128},
+ test_int8{fn: and_int8_Neg128, fnname: "and_int8_Neg128", in: -1, want: -128},
+ test_int8{fn: and_Neg128_int8, fnname: "and_Neg128_int8", in: 0, want: 0},
+ test_int8{fn: and_int8_Neg128, fnname: "and_int8_Neg128", in: 0, want: 0},
+ test_int8{fn: and_Neg128_int8, fnname: "and_Neg128_int8", in: 1, want: 0},
+ test_int8{fn: and_int8_Neg128, fnname: "and_int8_Neg128", in: 1, want: 0},
+ test_int8{fn: and_Neg128_int8, fnname: "and_Neg128_int8", in: 126, want: 0},
+ test_int8{fn: and_int8_Neg128, fnname: "and_int8_Neg128", in: 126, want: 0},
+ test_int8{fn: and_Neg128_int8, fnname: "and_Neg128_int8", in: 127, want: 0},
+ test_int8{fn: and_int8_Neg128, fnname: "and_int8_Neg128", in: 127, want: 0},
+ test_int8{fn: and_Neg127_int8, fnname: "and_Neg127_int8", in: -128, want: -128},
+ test_int8{fn: and_int8_Neg127, fnname: "and_int8_Neg127", in: -128, want: -128},
+ test_int8{fn: and_Neg127_int8, fnname: "and_Neg127_int8", in: -127, want: -127},
+ test_int8{fn: and_int8_Neg127, fnname: "and_int8_Neg127", in: -127, want: -127},
+ test_int8{fn: and_Neg127_int8, fnname: "and_Neg127_int8", in: -1, want: -127},
+ test_int8{fn: and_int8_Neg127, fnname: "and_int8_Neg127", in: -1, want: -127},
+ test_int8{fn: and_Neg127_int8, fnname: "and_Neg127_int8", in: 0, want: 0},
+ test_int8{fn: and_int8_Neg127, fnname: "and_int8_Neg127", in: 0, want: 0},
+ test_int8{fn: and_Neg127_int8, fnname: "and_Neg127_int8", in: 1, want: 1},
+ test_int8{fn: and_int8_Neg127, fnname: "and_int8_Neg127", in: 1, want: 1},
+ test_int8{fn: and_Neg127_int8, fnname: "and_Neg127_int8", in: 126, want: 0},
+ test_int8{fn: and_int8_Neg127, fnname: "and_int8_Neg127", in: 126, want: 0},
+ test_int8{fn: and_Neg127_int8, fnname: "and_Neg127_int8", in: 127, want: 1},
+ test_int8{fn: and_int8_Neg127, fnname: "and_int8_Neg127", in: 127, want: 1},
+ test_int8{fn: and_Neg1_int8, fnname: "and_Neg1_int8", in: -128, want: -128},
+ test_int8{fn: and_int8_Neg1, fnname: "and_int8_Neg1", in: -128, want: -128},
+ test_int8{fn: and_Neg1_int8, fnname: "and_Neg1_int8", in: -127, want: -127},
+ test_int8{fn: and_int8_Neg1, fnname: "and_int8_Neg1", in: -127, want: -127},
+ test_int8{fn: and_Neg1_int8, fnname: "and_Neg1_int8", in: -1, want: -1},
+ test_int8{fn: and_int8_Neg1, fnname: "and_int8_Neg1", in: -1, want: -1},
+ test_int8{fn: and_Neg1_int8, fnname: "and_Neg1_int8", in: 0, want: 0},
+ test_int8{fn: and_int8_Neg1, fnname: "and_int8_Neg1", in: 0, want: 0},
+ test_int8{fn: and_Neg1_int8, fnname: "and_Neg1_int8", in: 1, want: 1},
+ test_int8{fn: and_int8_Neg1, fnname: "and_int8_Neg1", in: 1, want: 1},
+ test_int8{fn: and_Neg1_int8, fnname: "and_Neg1_int8", in: 126, want: 126},
+ test_int8{fn: and_int8_Neg1, fnname: "and_int8_Neg1", in: 126, want: 126},
+ test_int8{fn: and_Neg1_int8, fnname: "and_Neg1_int8", in: 127, want: 127},
+ test_int8{fn: and_int8_Neg1, fnname: "and_int8_Neg1", in: 127, want: 127},
+ test_int8{fn: and_0_int8, fnname: "and_0_int8", in: -128, want: 0},
+ test_int8{fn: and_int8_0, fnname: "and_int8_0", in: -128, want: 0},
+ test_int8{fn: and_0_int8, fnname: "and_0_int8", in: -127, want: 0},
+ test_int8{fn: and_int8_0, fnname: "and_int8_0", in: -127, want: 0},
+ test_int8{fn: and_0_int8, fnname: "and_0_int8", in: -1, want: 0},
+ test_int8{fn: and_int8_0, fnname: "and_int8_0", in: -1, want: 0},
+ test_int8{fn: and_0_int8, fnname: "and_0_int8", in: 0, want: 0},
+ test_int8{fn: and_int8_0, fnname: "and_int8_0", in: 0, want: 0},
+ test_int8{fn: and_0_int8, fnname: "and_0_int8", in: 1, want: 0},
+ test_int8{fn: and_int8_0, fnname: "and_int8_0", in: 1, want: 0},
+ test_int8{fn: and_0_int8, fnname: "and_0_int8", in: 126, want: 0},
+ test_int8{fn: and_int8_0, fnname: "and_int8_0", in: 126, want: 0},
+ test_int8{fn: and_0_int8, fnname: "and_0_int8", in: 127, want: 0},
+ test_int8{fn: and_int8_0, fnname: "and_int8_0", in: 127, want: 0},
+ test_int8{fn: and_1_int8, fnname: "and_1_int8", in: -128, want: 0},
+ test_int8{fn: and_int8_1, fnname: "and_int8_1", in: -128, want: 0},
+ test_int8{fn: and_1_int8, fnname: "and_1_int8", in: -127, want: 1},
+ test_int8{fn: and_int8_1, fnname: "and_int8_1", in: -127, want: 1},
+ test_int8{fn: and_1_int8, fnname: "and_1_int8", in: -1, want: 1},
+ test_int8{fn: and_int8_1, fnname: "and_int8_1", in: -1, want: 1},
+ test_int8{fn: and_1_int8, fnname: "and_1_int8", in: 0, want: 0},
+ test_int8{fn: and_int8_1, fnname: "and_int8_1", in: 0, want: 0},
+ test_int8{fn: and_1_int8, fnname: "and_1_int8", in: 1, want: 1},
+ test_int8{fn: and_int8_1, fnname: "and_int8_1", in: 1, want: 1},
+ test_int8{fn: and_1_int8, fnname: "and_1_int8", in: 126, want: 0},
+ test_int8{fn: and_int8_1, fnname: "and_int8_1", in: 126, want: 0},
+ test_int8{fn: and_1_int8, fnname: "and_1_int8", in: 127, want: 1},
+ test_int8{fn: and_int8_1, fnname: "and_int8_1", in: 127, want: 1},
+ test_int8{fn: and_126_int8, fnname: "and_126_int8", in: -128, want: 0},
+ test_int8{fn: and_int8_126, fnname: "and_int8_126", in: -128, want: 0},
+ test_int8{fn: and_126_int8, fnname: "and_126_int8", in: -127, want: 0},
+ test_int8{fn: and_int8_126, fnname: "and_int8_126", in: -127, want: 0},
+ test_int8{fn: and_126_int8, fnname: "and_126_int8", in: -1, want: 126},
+ test_int8{fn: and_int8_126, fnname: "and_int8_126", in: -1, want: 126},
+ test_int8{fn: and_126_int8, fnname: "and_126_int8", in: 0, want: 0},
+ test_int8{fn: and_int8_126, fnname: "and_int8_126", in: 0, want: 0},
+ test_int8{fn: and_126_int8, fnname: "and_126_int8", in: 1, want: 0},
+ test_int8{fn: and_int8_126, fnname: "and_int8_126", in: 1, want: 0},
+ test_int8{fn: and_126_int8, fnname: "and_126_int8", in: 126, want: 126},
+ test_int8{fn: and_int8_126, fnname: "and_int8_126", in: 126, want: 126},
+ test_int8{fn: and_126_int8, fnname: "and_126_int8", in: 127, want: 126},
+ test_int8{fn: and_int8_126, fnname: "and_int8_126", in: 127, want: 126},
+ test_int8{fn: and_127_int8, fnname: "and_127_int8", in: -128, want: 0},
+ test_int8{fn: and_int8_127, fnname: "and_int8_127", in: -128, want: 0},
+ test_int8{fn: and_127_int8, fnname: "and_127_int8", in: -127, want: 1},
+ test_int8{fn: and_int8_127, fnname: "and_int8_127", in: -127, want: 1},
+ test_int8{fn: and_127_int8, fnname: "and_127_int8", in: -1, want: 127},
+ test_int8{fn: and_int8_127, fnname: "and_int8_127", in: -1, want: 127},
+ test_int8{fn: and_127_int8, fnname: "and_127_int8", in: 0, want: 0},
+ test_int8{fn: and_int8_127, fnname: "and_int8_127", in: 0, want: 0},
+ test_int8{fn: and_127_int8, fnname: "and_127_int8", in: 1, want: 1},
+ test_int8{fn: and_int8_127, fnname: "and_int8_127", in: 1, want: 1},
+ test_int8{fn: and_127_int8, fnname: "and_127_int8", in: 126, want: 126},
+ test_int8{fn: and_int8_127, fnname: "and_int8_127", in: 126, want: 126},
+ test_int8{fn: and_127_int8, fnname: "and_127_int8", in: 127, want: 127},
+ test_int8{fn: and_int8_127, fnname: "and_int8_127", in: 127, want: 127},
+ test_int8{fn: or_Neg128_int8, fnname: "or_Neg128_int8", in: -128, want: -128},
+ test_int8{fn: or_int8_Neg128, fnname: "or_int8_Neg128", in: -128, want: -128},
+ test_int8{fn: or_Neg128_int8, fnname: "or_Neg128_int8", in: -127, want: -127},
+ test_int8{fn: or_int8_Neg128, fnname: "or_int8_Neg128", in: -127, want: -127},
+ test_int8{fn: or_Neg128_int8, fnname: "or_Neg128_int8", in: -1, want: -1},
+ test_int8{fn: or_int8_Neg128, fnname: "or_int8_Neg128", in: -1, want: -1},
+ test_int8{fn: or_Neg128_int8, fnname: "or_Neg128_int8", in: 0, want: -128},
+ test_int8{fn: or_int8_Neg128, fnname: "or_int8_Neg128", in: 0, want: -128},
+ test_int8{fn: or_Neg128_int8, fnname: "or_Neg128_int8", in: 1, want: -127},
+ test_int8{fn: or_int8_Neg128, fnname: "or_int8_Neg128", in: 1, want: -127},
+ test_int8{fn: or_Neg128_int8, fnname: "or_Neg128_int8", in: 126, want: -2},
+ test_int8{fn: or_int8_Neg128, fnname: "or_int8_Neg128", in: 126, want: -2},
+ test_int8{fn: or_Neg128_int8, fnname: "or_Neg128_int8", in: 127, want: -1},
+ test_int8{fn: or_int8_Neg128, fnname: "or_int8_Neg128", in: 127, want: -1},
+ test_int8{fn: or_Neg127_int8, fnname: "or_Neg127_int8", in: -128, want: -127},
+ test_int8{fn: or_int8_Neg127, fnname: "or_int8_Neg127", in: -128, want: -127},
+ test_int8{fn: or_Neg127_int8, fnname: "or_Neg127_int8", in: -127, want: -127},
+ test_int8{fn: or_int8_Neg127, fnname: "or_int8_Neg127", in: -127, want: -127},
+ test_int8{fn: or_Neg127_int8, fnname: "or_Neg127_int8", in: -1, want: -1},
+ test_int8{fn: or_int8_Neg127, fnname: "or_int8_Neg127", in: -1, want: -1},
+ test_int8{fn: or_Neg127_int8, fnname: "or_Neg127_int8", in: 0, want: -127},
+ test_int8{fn: or_int8_Neg127, fnname: "or_int8_Neg127", in: 0, want: -127},
+ test_int8{fn: or_Neg127_int8, fnname: "or_Neg127_int8", in: 1, want: -127},
+ test_int8{fn: or_int8_Neg127, fnname: "or_int8_Neg127", in: 1, want: -127},
+ test_int8{fn: or_Neg127_int8, fnname: "or_Neg127_int8", in: 126, want: -1},
+ test_int8{fn: or_int8_Neg127, fnname: "or_int8_Neg127", in: 126, want: -1},
+ test_int8{fn: or_Neg127_int8, fnname: "or_Neg127_int8", in: 127, want: -1},
+ test_int8{fn: or_int8_Neg127, fnname: "or_int8_Neg127", in: 127, want: -1},
+ test_int8{fn: or_Neg1_int8, fnname: "or_Neg1_int8", in: -128, want: -1},
+ test_int8{fn: or_int8_Neg1, fnname: "or_int8_Neg1", in: -128, want: -1},
+ test_int8{fn: or_Neg1_int8, fnname: "or_Neg1_int8", in: -127, want: -1},
+ test_int8{fn: or_int8_Neg1, fnname: "or_int8_Neg1", in: -127, want: -1},
+ test_int8{fn: or_Neg1_int8, fnname: "or_Neg1_int8", in: -1, want: -1},
+ test_int8{fn: or_int8_Neg1, fnname: "or_int8_Neg1", in: -1, want: -1},
+ test_int8{fn: or_Neg1_int8, fnname: "or_Neg1_int8", in: 0, want: -1},
+ test_int8{fn: or_int8_Neg1, fnname: "or_int8_Neg1", in: 0, want: -1},
+ test_int8{fn: or_Neg1_int8, fnname: "or_Neg1_int8", in: 1, want: -1},
+ test_int8{fn: or_int8_Neg1, fnname: "or_int8_Neg1", in: 1, want: -1},
+ test_int8{fn: or_Neg1_int8, fnname: "or_Neg1_int8", in: 126, want: -1},
+ test_int8{fn: or_int8_Neg1, fnname: "or_int8_Neg1", in: 126, want: -1},
+ test_int8{fn: or_Neg1_int8, fnname: "or_Neg1_int8", in: 127, want: -1},
+ test_int8{fn: or_int8_Neg1, fnname: "or_int8_Neg1", in: 127, want: -1},
+ test_int8{fn: or_0_int8, fnname: "or_0_int8", in: -128, want: -128},
+ test_int8{fn: or_int8_0, fnname: "or_int8_0", in: -128, want: -128},
+ test_int8{fn: or_0_int8, fnname: "or_0_int8", in: -127, want: -127},
+ test_int8{fn: or_int8_0, fnname: "or_int8_0", in: -127, want: -127},
+ test_int8{fn: or_0_int8, fnname: "or_0_int8", in: -1, want: -1},
+ test_int8{fn: or_int8_0, fnname: "or_int8_0", in: -1, want: -1},
+ test_int8{fn: or_0_int8, fnname: "or_0_int8", in: 0, want: 0},
+ test_int8{fn: or_int8_0, fnname: "or_int8_0", in: 0, want: 0},
+ test_int8{fn: or_0_int8, fnname: "or_0_int8", in: 1, want: 1},
+ test_int8{fn: or_int8_0, fnname: "or_int8_0", in: 1, want: 1},
+ test_int8{fn: or_0_int8, fnname: "or_0_int8", in: 126, want: 126},
+ test_int8{fn: or_int8_0, fnname: "or_int8_0", in: 126, want: 126},
+ test_int8{fn: or_0_int8, fnname: "or_0_int8", in: 127, want: 127},
+ test_int8{fn: or_int8_0, fnname: "or_int8_0", in: 127, want: 127},
+ test_int8{fn: or_1_int8, fnname: "or_1_int8", in: -128, want: -127},
+ test_int8{fn: or_int8_1, fnname: "or_int8_1", in: -128, want: -127},
+ test_int8{fn: or_1_int8, fnname: "or_1_int8", in: -127, want: -127},
+ test_int8{fn: or_int8_1, fnname: "or_int8_1", in: -127, want: -127},
+ test_int8{fn: or_1_int8, fnname: "or_1_int8", in: -1, want: -1},
+ test_int8{fn: or_int8_1, fnname: "or_int8_1", in: -1, want: -1},
+ test_int8{fn: or_1_int8, fnname: "or_1_int8", in: 0, want: 1},
+ test_int8{fn: or_int8_1, fnname: "or_int8_1", in: 0, want: 1},
+ test_int8{fn: or_1_int8, fnname: "or_1_int8", in: 1, want: 1},
+ test_int8{fn: or_int8_1, fnname: "or_int8_1", in: 1, want: 1},
+ test_int8{fn: or_1_int8, fnname: "or_1_int8", in: 126, want: 127},
+ test_int8{fn: or_int8_1, fnname: "or_int8_1", in: 126, want: 127},
+ test_int8{fn: or_1_int8, fnname: "or_1_int8", in: 127, want: 127},
+ test_int8{fn: or_int8_1, fnname: "or_int8_1", in: 127, want: 127},
+ test_int8{fn: or_126_int8, fnname: "or_126_int8", in: -128, want: -2},
+ test_int8{fn: or_int8_126, fnname: "or_int8_126", in: -128, want: -2},
+ test_int8{fn: or_126_int8, fnname: "or_126_int8", in: -127, want: -1},
+ test_int8{fn: or_int8_126, fnname: "or_int8_126", in: -127, want: -1},
+ test_int8{fn: or_126_int8, fnname: "or_126_int8", in: -1, want: -1},
+ test_int8{fn: or_int8_126, fnname: "or_int8_126", in: -1, want: -1},
+ test_int8{fn: or_126_int8, fnname: "or_126_int8", in: 0, want: 126},
+ test_int8{fn: or_int8_126, fnname: "or_int8_126", in: 0, want: 126},
+ test_int8{fn: or_126_int8, fnname: "or_126_int8", in: 1, want: 127},
+ test_int8{fn: or_int8_126, fnname: "or_int8_126", in: 1, want: 127},
+ test_int8{fn: or_126_int8, fnname: "or_126_int8", in: 126, want: 126},
+ test_int8{fn: or_int8_126, fnname: "or_int8_126", in: 126, want: 126},
+ test_int8{fn: or_126_int8, fnname: "or_126_int8", in: 127, want: 127},
+ test_int8{fn: or_int8_126, fnname: "or_int8_126", in: 127, want: 127},
+ test_int8{fn: or_127_int8, fnname: "or_127_int8", in: -128, want: -1},
+ test_int8{fn: or_int8_127, fnname: "or_int8_127", in: -128, want: -1},
+ test_int8{fn: or_127_int8, fnname: "or_127_int8", in: -127, want: -1},
+ test_int8{fn: or_int8_127, fnname: "or_int8_127", in: -127, want: -1},
+ test_int8{fn: or_127_int8, fnname: "or_127_int8", in: -1, want: -1},
+ test_int8{fn: or_int8_127, fnname: "or_int8_127", in: -1, want: -1},
+ test_int8{fn: or_127_int8, fnname: "or_127_int8", in: 0, want: 127},
+ test_int8{fn: or_int8_127, fnname: "or_int8_127", in: 0, want: 127},
+ test_int8{fn: or_127_int8, fnname: "or_127_int8", in: 1, want: 127},
+ test_int8{fn: or_int8_127, fnname: "or_int8_127", in: 1, want: 127},
+ test_int8{fn: or_127_int8, fnname: "or_127_int8", in: 126, want: 127},
+ test_int8{fn: or_int8_127, fnname: "or_int8_127", in: 126, want: 127},
+ test_int8{fn: or_127_int8, fnname: "or_127_int8", in: 127, want: 127},
+ test_int8{fn: or_int8_127, fnname: "or_int8_127", in: 127, want: 127},
+ test_int8{fn: xor_Neg128_int8, fnname: "xor_Neg128_int8", in: -128, want: 0},
+ test_int8{fn: xor_int8_Neg128, fnname: "xor_int8_Neg128", in: -128, want: 0},
+ test_int8{fn: xor_Neg128_int8, fnname: "xor_Neg128_int8", in: -127, want: 1},
+ test_int8{fn: xor_int8_Neg128, fnname: "xor_int8_Neg128", in: -127, want: 1},
+ test_int8{fn: xor_Neg128_int8, fnname: "xor_Neg128_int8", in: -1, want: 127},
+ test_int8{fn: xor_int8_Neg128, fnname: "xor_int8_Neg128", in: -1, want: 127},
+ test_int8{fn: xor_Neg128_int8, fnname: "xor_Neg128_int8", in: 0, want: -128},
+ test_int8{fn: xor_int8_Neg128, fnname: "xor_int8_Neg128", in: 0, want: -128},
+ test_int8{fn: xor_Neg128_int8, fnname: "xor_Neg128_int8", in: 1, want: -127},
+ test_int8{fn: xor_int8_Neg128, fnname: "xor_int8_Neg128", in: 1, want: -127},
+ test_int8{fn: xor_Neg128_int8, fnname: "xor_Neg128_int8", in: 126, want: -2},
+ test_int8{fn: xor_int8_Neg128, fnname: "xor_int8_Neg128", in: 126, want: -2},
+ test_int8{fn: xor_Neg128_int8, fnname: "xor_Neg128_int8", in: 127, want: -1},
+ test_int8{fn: xor_int8_Neg128, fnname: "xor_int8_Neg128", in: 127, want: -1},
+ test_int8{fn: xor_Neg127_int8, fnname: "xor_Neg127_int8", in: -128, want: 1},
+ test_int8{fn: xor_int8_Neg127, fnname: "xor_int8_Neg127", in: -128, want: 1},
+ test_int8{fn: xor_Neg127_int8, fnname: "xor_Neg127_int8", in: -127, want: 0},
+ test_int8{fn: xor_int8_Neg127, fnname: "xor_int8_Neg127", in: -127, want: 0},
+ test_int8{fn: xor_Neg127_int8, fnname: "xor_Neg127_int8", in: -1, want: 126},
+ test_int8{fn: xor_int8_Neg127, fnname: "xor_int8_Neg127", in: -1, want: 126},
+ test_int8{fn: xor_Neg127_int8, fnname: "xor_Neg127_int8", in: 0, want: -127},
+ test_int8{fn: xor_int8_Neg127, fnname: "xor_int8_Neg127", in: 0, want: -127},
+ test_int8{fn: xor_Neg127_int8, fnname: "xor_Neg127_int8", in: 1, want: -128},
+ test_int8{fn: xor_int8_Neg127, fnname: "xor_int8_Neg127", in: 1, want: -128},
+ test_int8{fn: xor_Neg127_int8, fnname: "xor_Neg127_int8", in: 126, want: -1},
+ test_int8{fn: xor_int8_Neg127, fnname: "xor_int8_Neg127", in: 126, want: -1},
+ test_int8{fn: xor_Neg127_int8, fnname: "xor_Neg127_int8", in: 127, want: -2},
+ test_int8{fn: xor_int8_Neg127, fnname: "xor_int8_Neg127", in: 127, want: -2},
+ test_int8{fn: xor_Neg1_int8, fnname: "xor_Neg1_int8", in: -128, want: 127},
+ test_int8{fn: xor_int8_Neg1, fnname: "xor_int8_Neg1", in: -128, want: 127},
+ test_int8{fn: xor_Neg1_int8, fnname: "xor_Neg1_int8", in: -127, want: 126},
+ test_int8{fn: xor_int8_Neg1, fnname: "xor_int8_Neg1", in: -127, want: 126},
+ test_int8{fn: xor_Neg1_int8, fnname: "xor_Neg1_int8", in: -1, want: 0},
+ test_int8{fn: xor_int8_Neg1, fnname: "xor_int8_Neg1", in: -1, want: 0},
+ test_int8{fn: xor_Neg1_int8, fnname: "xor_Neg1_int8", in: 0, want: -1},
+ test_int8{fn: xor_int8_Neg1, fnname: "xor_int8_Neg1", in: 0, want: -1},
+ test_int8{fn: xor_Neg1_int8, fnname: "xor_Neg1_int8", in: 1, want: -2},
+ test_int8{fn: xor_int8_Neg1, fnname: "xor_int8_Neg1", in: 1, want: -2},
+ test_int8{fn: xor_Neg1_int8, fnname: "xor_Neg1_int8", in: 126, want: -127},
+ test_int8{fn: xor_int8_Neg1, fnname: "xor_int8_Neg1", in: 126, want: -127},
+ test_int8{fn: xor_Neg1_int8, fnname: "xor_Neg1_int8", in: 127, want: -128},
+ test_int8{fn: xor_int8_Neg1, fnname: "xor_int8_Neg1", in: 127, want: -128},
+ test_int8{fn: xor_0_int8, fnname: "xor_0_int8", in: -128, want: -128},
+ test_int8{fn: xor_int8_0, fnname: "xor_int8_0", in: -128, want: -128},
+ test_int8{fn: xor_0_int8, fnname: "xor_0_int8", in: -127, want: -127},
+ test_int8{fn: xor_int8_0, fnname: "xor_int8_0", in: -127, want: -127},
+ test_int8{fn: xor_0_int8, fnname: "xor_0_int8", in: -1, want: -1},
+ test_int8{fn: xor_int8_0, fnname: "xor_int8_0", in: -1, want: -1},
+ test_int8{fn: xor_0_int8, fnname: "xor_0_int8", in: 0, want: 0},
+ test_int8{fn: xor_int8_0, fnname: "xor_int8_0", in: 0, want: 0},
+ test_int8{fn: xor_0_int8, fnname: "xor_0_int8", in: 1, want: 1},
+ test_int8{fn: xor_int8_0, fnname: "xor_int8_0", in: 1, want: 1},
+ test_int8{fn: xor_0_int8, fnname: "xor_0_int8", in: 126, want: 126},
+ test_int8{fn: xor_int8_0, fnname: "xor_int8_0", in: 126, want: 126},
+ test_int8{fn: xor_0_int8, fnname: "xor_0_int8", in: 127, want: 127},
+ test_int8{fn: xor_int8_0, fnname: "xor_int8_0", in: 127, want: 127},
+ test_int8{fn: xor_1_int8, fnname: "xor_1_int8", in: -128, want: -127},
+ test_int8{fn: xor_int8_1, fnname: "xor_int8_1", in: -128, want: -127},
+ test_int8{fn: xor_1_int8, fnname: "xor_1_int8", in: -127, want: -128},
+ test_int8{fn: xor_int8_1, fnname: "xor_int8_1", in: -127, want: -128},
+ test_int8{fn: xor_1_int8, fnname: "xor_1_int8", in: -1, want: -2},
+ test_int8{fn: xor_int8_1, fnname: "xor_int8_1", in: -1, want: -2},
+ test_int8{fn: xor_1_int8, fnname: "xor_1_int8", in: 0, want: 1},
+ test_int8{fn: xor_int8_1, fnname: "xor_int8_1", in: 0, want: 1},
+ test_int8{fn: xor_1_int8, fnname: "xor_1_int8", in: 1, want: 0},
+ test_int8{fn: xor_int8_1, fnname: "xor_int8_1", in: 1, want: 0},
+ test_int8{fn: xor_1_int8, fnname: "xor_1_int8", in: 126, want: 127},
+ test_int8{fn: xor_int8_1, fnname: "xor_int8_1", in: 126, want: 127},
+ test_int8{fn: xor_1_int8, fnname: "xor_1_int8", in: 127, want: 126},
+ test_int8{fn: xor_int8_1, fnname: "xor_int8_1", in: 127, want: 126},
+ test_int8{fn: xor_126_int8, fnname: "xor_126_int8", in: -128, want: -2},
+ test_int8{fn: xor_int8_126, fnname: "xor_int8_126", in: -128, want: -2},
+ test_int8{fn: xor_126_int8, fnname: "xor_126_int8", in: -127, want: -1},
+ test_int8{fn: xor_int8_126, fnname: "xor_int8_126", in: -127, want: -1},
+ test_int8{fn: xor_126_int8, fnname: "xor_126_int8", in: -1, want: -127},
+ test_int8{fn: xor_int8_126, fnname: "xor_int8_126", in: -1, want: -127},
+ test_int8{fn: xor_126_int8, fnname: "xor_126_int8", in: 0, want: 126},
+ test_int8{fn: xor_int8_126, fnname: "xor_int8_126", in: 0, want: 126},
+ test_int8{fn: xor_126_int8, fnname: "xor_126_int8", in: 1, want: 127},
+ test_int8{fn: xor_int8_126, fnname: "xor_int8_126", in: 1, want: 127},
+ test_int8{fn: xor_126_int8, fnname: "xor_126_int8", in: 126, want: 0},
+ test_int8{fn: xor_int8_126, fnname: "xor_int8_126", in: 126, want: 0},
+ test_int8{fn: xor_126_int8, fnname: "xor_126_int8", in: 127, want: 1},
+ test_int8{fn: xor_int8_126, fnname: "xor_int8_126", in: 127, want: 1},
+ test_int8{fn: xor_127_int8, fnname: "xor_127_int8", in: -128, want: -1},
+ test_int8{fn: xor_int8_127, fnname: "xor_int8_127", in: -128, want: -1},
+ test_int8{fn: xor_127_int8, fnname: "xor_127_int8", in: -127, want: -2},
+ test_int8{fn: xor_int8_127, fnname: "xor_int8_127", in: -127, want: -2},
+ test_int8{fn: xor_127_int8, fnname: "xor_127_int8", in: -1, want: -128},
+ test_int8{fn: xor_int8_127, fnname: "xor_int8_127", in: -1, want: -128},
+ test_int8{fn: xor_127_int8, fnname: "xor_127_int8", in: 0, want: 127},
+ test_int8{fn: xor_int8_127, fnname: "xor_int8_127", in: 0, want: 127},
+ test_int8{fn: xor_127_int8, fnname: "xor_127_int8", in: 1, want: 126},
+ test_int8{fn: xor_int8_127, fnname: "xor_int8_127", in: 1, want: 126},
+ test_int8{fn: xor_127_int8, fnname: "xor_127_int8", in: 126, want: 1},
+ test_int8{fn: xor_int8_127, fnname: "xor_int8_127", in: 126, want: 1},
+ test_int8{fn: xor_127_int8, fnname: "xor_127_int8", in: 127, want: 0},
+ test_int8{fn: xor_int8_127, fnname: "xor_int8_127", in: 127, want: 0}}
+
+// TestArithmeticConst tests results for arithmetic operations against constants.
+func TestArithmeticConst(t *testing.T) {
+ for _, test := range tests_uint64 {
+ if got := test.fn(test.in); got != test.want {
+ t.Errorf("%s(%d) = %d, want %d\n", test.fnname, test.in, got, test.want)
+ }
+ }
+ for _, test := range tests_uint64mul {
+ if got := test.fn(test.in); got != test.want {
+ t.Errorf("%s(%d) = %d, want %d\n", test.fnname, test.in, got, test.want)
+ }
+ }
+ for _, test := range tests_int64 {
+ if got := test.fn(test.in); got != test.want {
+ t.Errorf("%s(%d) = %d, want %d\n", test.fnname, test.in, got, test.want)
+ }
+ }
+ for _, test := range tests_int64mul {
+ if got := test.fn(test.in); got != test.want {
+ t.Errorf("%s(%d) = %d, want %d\n", test.fnname, test.in, got, test.want)
+ }
+ }
+ for _, test := range tests_uint32 {
+ if got := test.fn(test.in); got != test.want {
+ t.Errorf("%s(%d) = %d, want %d\n", test.fnname, test.in, got, test.want)
+ }
+ }
+ for _, test := range tests_uint32mul {
+ if got := test.fn(test.in); got != test.want {
+ t.Errorf("%s(%d) = %d, want %d\n", test.fnname, test.in, got, test.want)
+ }
+ }
+ for _, test := range tests_int32 {
+ if got := test.fn(test.in); got != test.want {
+ t.Errorf("%s(%d) = %d, want %d\n", test.fnname, test.in, got, test.want)
+ }
+ }
+ for _, test := range tests_int32mul {
+ if got := test.fn(test.in); got != test.want {
+ t.Errorf("%s(%d) = %d, want %d\n", test.fnname, test.in, got, test.want)
+ }
+ }
+ for _, test := range tests_uint16 {
+ if got := test.fn(test.in); got != test.want {
+ t.Errorf("%s(%d) = %d, want %d\n", test.fnname, test.in, got, test.want)
+ }
+ }
+ for _, test := range tests_int16 {
+ if got := test.fn(test.in); got != test.want {
+ t.Errorf("%s(%d) = %d, want %d\n", test.fnname, test.in, got, test.want)
+ }
+ }
+ for _, test := range tests_uint8 {
+ if got := test.fn(test.in); got != test.want {
+ t.Errorf("%s(%d) = %d, want %d\n", test.fnname, test.in, got, test.want)
+ }
+ }
+ for _, test := range tests_int8 {
+ if got := test.fn(test.in); got != test.want {
+ t.Errorf("%s(%d) = %d, want %d\n", test.fnname, test.in, got, test.want)
+ }
+ }
+
+}
diff --git a/src/cmd/compile/internal/test/testdata/arith_test.go b/src/cmd/compile/internal/test/testdata/arith_test.go
new file mode 100644
index 0000000..7d54a91
--- /dev/null
+++ b/src/cmd/compile/internal/test/testdata/arith_test.go
@@ -0,0 +1,1497 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Tests arithmetic expressions
+
+package main
+
+import (
+ "math"
+ "runtime"
+ "testing"
+)
+
+const (
+ y = 0x0fffFFFF
+)
+
+var (
+ g8 int8
+ g16 int16
+ g32 int32
+ g64 int64
+)
+
+//go:noinline
+func lshNop1(x uint64) uint64 {
+ // two outer shifts should be removed
+ return (((x << 5) >> 2) << 2)
+}
+
+//go:noinline
+func lshNop2(x uint64) uint64 {
+ return (((x << 5) >> 2) << 3)
+}
+
+//go:noinline
+func lshNop3(x uint64) uint64 {
+ return (((x << 5) >> 2) << 6)
+}
+
+//go:noinline
+func lshNotNop(x uint64) uint64 {
+ // outer shift can't be removed
+ return (((x << 5) >> 2) << 1)
+}
+
+//go:noinline
+func rshNop1(x uint64) uint64 {
+ return (((x >> 5) << 2) >> 2)
+}
+
+//go:noinline
+func rshNop2(x uint64) uint64 {
+ return (((x >> 5) << 2) >> 3)
+}
+
+//go:noinline
+func rshNop3(x uint64) uint64 {
+ return (((x >> 5) << 2) >> 6)
+}
+
+//go:noinline
+func rshNotNop(x uint64) uint64 {
+ return (((x >> 5) << 2) >> 1)
+}
+
+func testShiftRemoval(t *testing.T) {
+ allSet := ^uint64(0)
+ if want, got := uint64(0x7ffffffffffffff), rshNop1(allSet); want != got {
+ t.Errorf("testShiftRemoval rshNop1 failed, wanted %d got %d", want, got)
+ }
+ if want, got := uint64(0x3ffffffffffffff), rshNop2(allSet); want != got {
+ t.Errorf("testShiftRemoval rshNop2 failed, wanted %d got %d", want, got)
+ }
+ if want, got := uint64(0x7fffffffffffff), rshNop3(allSet); want != got {
+ t.Errorf("testShiftRemoval rshNop3 failed, wanted %d got %d", want, got)
+ }
+ if want, got := uint64(0xffffffffffffffe), rshNotNop(allSet); want != got {
+ t.Errorf("testShiftRemoval rshNotNop failed, wanted %d got %d", want, got)
+ }
+ if want, got := uint64(0xffffffffffffffe0), lshNop1(allSet); want != got {
+ t.Errorf("testShiftRemoval lshNop1 failed, wanted %d got %d", want, got)
+ }
+ if want, got := uint64(0xffffffffffffffc0), lshNop2(allSet); want != got {
+ t.Errorf("testShiftRemoval lshNop2 failed, wanted %d got %d", want, got)
+ }
+ if want, got := uint64(0xfffffffffffffe00), lshNop3(allSet); want != got {
+ t.Errorf("testShiftRemoval lshNop3 failed, wanted %d got %d", want, got)
+ }
+ if want, got := uint64(0x7ffffffffffffff0), lshNotNop(allSet); want != got {
+ t.Errorf("testShiftRemoval lshNotNop failed, wanted %d got %d", want, got)
+ }
+}
+
+//go:noinline
+func parseLE64(b []byte) uint64 {
+ // skip the first two bytes, and parse the remaining 8 as a uint64
+ return uint64(b[2]) | uint64(b[3])<<8 | uint64(b[4])<<16 | uint64(b[5])<<24 |
+ uint64(b[6])<<32 | uint64(b[7])<<40 | uint64(b[8])<<48 | uint64(b[9])<<56
+}
+
+//go:noinline
+func parseLE32(b []byte) uint32 {
+ return uint32(b[2]) | uint32(b[3])<<8 | uint32(b[4])<<16 | uint32(b[5])<<24
+}
+
+//go:noinline
+func parseLE16(b []byte) uint16 {
+ return uint16(b[2]) | uint16(b[3])<<8
+}
+
+// testLoadCombine tests for issue #14694 where load combining didn't respect the pointer offset.
+func testLoadCombine(t *testing.T) {
+ testData := []byte{0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08, 0x09}
+ if want, got := uint64(0x0908070605040302), parseLE64(testData); want != got {
+ t.Errorf("testLoadCombine failed, wanted %d got %d", want, got)
+ }
+ if want, got := uint32(0x05040302), parseLE32(testData); want != got {
+ t.Errorf("testLoadCombine failed, wanted %d got %d", want, got)
+ }
+ if want, got := uint16(0x0302), parseLE16(testData); want != got {
+ t.Errorf("testLoadCombine failed, wanted %d got %d", want, got)
+ }
+}
+
+var loadSymData = [...]byte{0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08}
+
+func testLoadSymCombine(t *testing.T) {
+ w2 := uint16(0x0201)
+ g2 := uint16(loadSymData[0]) | uint16(loadSymData[1])<<8
+ if g2 != w2 {
+ t.Errorf("testLoadSymCombine failed, wanted %d got %d", w2, g2)
+ }
+ w4 := uint32(0x04030201)
+ g4 := uint32(loadSymData[0]) | uint32(loadSymData[1])<<8 |
+ uint32(loadSymData[2])<<16 | uint32(loadSymData[3])<<24
+ if g4 != w4 {
+ t.Errorf("testLoadSymCombine failed, wanted %d got %d", w4, g4)
+ }
+ w8 := uint64(0x0807060504030201)
+ g8 := uint64(loadSymData[0]) | uint64(loadSymData[1])<<8 |
+ uint64(loadSymData[2])<<16 | uint64(loadSymData[3])<<24 |
+ uint64(loadSymData[4])<<32 | uint64(loadSymData[5])<<40 |
+ uint64(loadSymData[6])<<48 | uint64(loadSymData[7])<<56
+ if g8 != w8 {
+ t.Errorf("testLoadSymCombine failed, wanted %d got %d", w8, g8)
+ }
+}
+
+//go:noinline
+func invalidAdd_ssa(x uint32) uint32 {
+ return x + y + y + y + y + y + y + y + y + y + y + y + y + y + y + y + y + y
+}
+
+//go:noinline
+func invalidSub_ssa(x uint32) uint32 {
+ return x - y - y - y - y - y - y - y - y - y - y - y - y - y - y - y - y - y
+}
+
+//go:noinline
+func invalidMul_ssa(x uint32) uint32 {
+ return x * y * y * y * y * y * y * y * y * y * y * y * y * y * y * y * y * y
+}
+
+// testLargeConst tests a situation where larger than 32 bit consts were passed to ADDL
+// causing an invalid instruction error.
+func testLargeConst(t *testing.T) {
+ if want, got := uint32(268435440), invalidAdd_ssa(1); want != got {
+ t.Errorf("testLargeConst add failed, wanted %d got %d", want, got)
+ }
+ if want, got := uint32(4026531858), invalidSub_ssa(1); want != got {
+ t.Errorf("testLargeConst sub failed, wanted %d got %d", want, got)
+ }
+ if want, got := uint32(268435455), invalidMul_ssa(1); want != got {
+ t.Errorf("testLargeConst mul failed, wanted %d got %d", want, got)
+ }
+}
+
+// testArithRshConst ensures that "const >> const" right shifts correctly perform
+// sign extension on the lhs constant
+func testArithRshConst(t *testing.T) {
+ wantu := uint64(0x4000000000000000)
+ if got := arithRshuConst_ssa(); got != wantu {
+ t.Errorf("arithRshuConst failed, wanted %d got %d", wantu, got)
+ }
+
+ wants := int64(-0x4000000000000000)
+ if got := arithRshConst_ssa(); got != wants {
+ t.Errorf("arithRshConst failed, wanted %d got %d", wants, got)
+ }
+}
+
+//go:noinline
+func arithRshuConst_ssa() uint64 {
+ y := uint64(0x8000000000000001)
+ z := uint64(1)
+ return uint64(y >> z)
+}
+
+//go:noinline
+func arithRshConst_ssa() int64 {
+ y := int64(-0x8000000000000000)
+ z := uint64(1)
+ return int64(y >> z)
+}
+
+//go:noinline
+func arithConstShift_ssa(x int64) int64 {
+ return x >> 100
+}
+
+// testArithConstShift tests that right shift by large constants preserve
+// the sign of the input.
+func testArithConstShift(t *testing.T) {
+ want := int64(-1)
+ if got := arithConstShift_ssa(-1); want != got {
+ t.Errorf("arithConstShift_ssa(-1) failed, wanted %d got %d", want, got)
+ }
+ want = 0
+ if got := arithConstShift_ssa(1); want != got {
+ t.Errorf("arithConstShift_ssa(1) failed, wanted %d got %d", want, got)
+ }
+}
+
+// overflowConstShift_ssa verifes that constant folding for shift
+// doesn't wrap (i.e. x << MAX_INT << 1 doesn't get folded to x << 0).
+//go:noinline
+func overflowConstShift64_ssa(x int64) int64 {
+ return x << uint64(0xffffffffffffffff) << uint64(1)
+}
+
+//go:noinline
+func overflowConstShift32_ssa(x int64) int32 {
+ return int32(x) << uint32(0xffffffff) << uint32(1)
+}
+
+//go:noinline
+func overflowConstShift16_ssa(x int64) int16 {
+ return int16(x) << uint16(0xffff) << uint16(1)
+}
+
+//go:noinline
+func overflowConstShift8_ssa(x int64) int8 {
+ return int8(x) << uint8(0xff) << uint8(1)
+}
+
+func testOverflowConstShift(t *testing.T) {
+ want := int64(0)
+ for x := int64(-127); x < int64(127); x++ {
+ got := overflowConstShift64_ssa(x)
+ if want != got {
+ t.Errorf("overflowShift64 failed, wanted %d got %d", want, got)
+ }
+ got = int64(overflowConstShift32_ssa(x))
+ if want != got {
+ t.Errorf("overflowShift32 failed, wanted %d got %d", want, got)
+ }
+ got = int64(overflowConstShift16_ssa(x))
+ if want != got {
+ t.Errorf("overflowShift16 failed, wanted %d got %d", want, got)
+ }
+ got = int64(overflowConstShift8_ssa(x))
+ if want != got {
+ t.Errorf("overflowShift8 failed, wanted %d got %d", want, got)
+ }
+ }
+}
+
+// test64BitConstMult tests that rewrite rules don't fold 64 bit constants
+// into multiply instructions.
+func test64BitConstMult(t *testing.T) {
+ want := int64(103079215109)
+ if got := test64BitConstMult_ssa(1, 2); want != got {
+ t.Errorf("test64BitConstMult failed, wanted %d got %d", want, got)
+ }
+}
+
+//go:noinline
+func test64BitConstMult_ssa(a, b int64) int64 {
+ return 34359738369*a + b*34359738370
+}
+
+// test64BitConstAdd tests that rewrite rules don't fold 64 bit constants
+// into add instructions.
+func test64BitConstAdd(t *testing.T) {
+ want := int64(3567671782835376650)
+ if got := test64BitConstAdd_ssa(1, 2); want != got {
+ t.Errorf("test64BitConstAdd failed, wanted %d got %d", want, got)
+ }
+}
+
+//go:noinline
+func test64BitConstAdd_ssa(a, b int64) int64 {
+ return a + 575815584948629622 + b + 2991856197886747025
+}
+
+// testRegallocCVSpill tests that regalloc spills a value whose last use is the
+// current value.
+func testRegallocCVSpill(t *testing.T) {
+ want := int8(-9)
+ if got := testRegallocCVSpill_ssa(1, 2, 3, 4); want != got {
+ t.Errorf("testRegallocCVSpill failed, wanted %d got %d", want, got)
+ }
+}
+
+//go:noinline
+func testRegallocCVSpill_ssa(a, b, c, d int8) int8 {
+ return a + -32 + b + 63*c*-87*d
+}
+
+func testBitwiseLogic(t *testing.T) {
+ a, b := uint32(57623283), uint32(1314713839)
+ if want, got := uint32(38551779), testBitwiseAnd_ssa(a, b); want != got {
+ t.Errorf("testBitwiseAnd failed, wanted %d got %d", want, got)
+ }
+ if want, got := uint32(1333785343), testBitwiseOr_ssa(a, b); want != got {
+ t.Errorf("testBitwiseOr failed, wanted %d got %d", want, got)
+ }
+ if want, got := uint32(1295233564), testBitwiseXor_ssa(a, b); want != got {
+ t.Errorf("testBitwiseXor failed, wanted %d got %d", want, got)
+ }
+ if want, got := int32(832), testBitwiseLsh_ssa(13, 4, 2); want != got {
+ t.Errorf("testBitwiseLsh failed, wanted %d got %d", want, got)
+ }
+ if want, got := int32(0), testBitwiseLsh_ssa(13, 25, 15); want != got {
+ t.Errorf("testBitwiseLsh failed, wanted %d got %d", want, got)
+ }
+ if want, got := int32(0), testBitwiseLsh_ssa(-13, 25, 15); want != got {
+ t.Errorf("testBitwiseLsh failed, wanted %d got %d", want, got)
+ }
+ if want, got := int32(-13), testBitwiseRsh_ssa(-832, 4, 2); want != got {
+ t.Errorf("testBitwiseRsh failed, wanted %d got %d", want, got)
+ }
+ if want, got := int32(0), testBitwiseRsh_ssa(13, 25, 15); want != got {
+ t.Errorf("testBitwiseRsh failed, wanted %d got %d", want, got)
+ }
+ if want, got := int32(-1), testBitwiseRsh_ssa(-13, 25, 15); want != got {
+ t.Errorf("testBitwiseRsh failed, wanted %d got %d", want, got)
+ }
+ if want, got := uint32(0x3ffffff), testBitwiseRshU_ssa(0xffffffff, 4, 2); want != got {
+ t.Errorf("testBitwiseRshU failed, wanted %d got %d", want, got)
+ }
+ if want, got := uint32(0), testBitwiseRshU_ssa(13, 25, 15); want != got {
+ t.Errorf("testBitwiseRshU failed, wanted %d got %d", want, got)
+ }
+ if want, got := uint32(0), testBitwiseRshU_ssa(0x8aaaaaaa, 25, 15); want != got {
+ t.Errorf("testBitwiseRshU failed, wanted %d got %d", want, got)
+ }
+}
+
+//go:noinline
+func testBitwiseAnd_ssa(a, b uint32) uint32 {
+ return a & b
+}
+
+//go:noinline
+func testBitwiseOr_ssa(a, b uint32) uint32 {
+ return a | b
+}
+
+//go:noinline
+func testBitwiseXor_ssa(a, b uint32) uint32 {
+ return a ^ b
+}
+
+//go:noinline
+func testBitwiseLsh_ssa(a int32, b, c uint32) int32 {
+ return a << b << c
+}
+
+//go:noinline
+func testBitwiseRsh_ssa(a int32, b, c uint32) int32 {
+ return a >> b >> c
+}
+
+//go:noinline
+func testBitwiseRshU_ssa(a uint32, b, c uint32) uint32 {
+ return a >> b >> c
+}
+
+//go:noinline
+func testShiftCX_ssa() int {
+ v1 := uint8(3)
+ v4 := (v1 * v1) ^ v1 | v1 - v1 - v1&v1 ^ uint8(3+2) + v1*1>>0 - v1 | 1 | v1<<(2*3|0-0*0^1)
+ v5 := v4>>(3-0-uint(3)) | v1 | v1 + v1 ^ v4<<(0+1|3&1)<<(uint64(1)<<0*2*0<<0) ^ v1
+ v6 := v5 ^ (v1+v1)*v1 | v1 | v1*v1>>(v1&v1)>>(uint(1)<<0*uint(3)>>1)*v1<<2*v1<<v1 - v1>>2 | (v4 - v1) ^ v1 + v1 ^ v1>>1 | v1 + v1 - v1 ^ v1
+ v7 := v6 & v5 << 0
+ v1++
+ v11 := 2&1 ^ 0 + 3 | int(0^0)<<1>>(1*0*3) ^ 0*0 ^ 3&0*3&3 ^ 3*3 ^ 1 ^ int(2)<<(2*3) + 2 | 2 | 2 ^ 2 + 1 | 3 | 0 ^ int(1)>>1 ^ 2 // int
+ v7--
+ return int(uint64(2*1)<<(3-2)<<uint(3>>v7)-2)&v11 | v11 - int(2)<<0>>(2-1)*(v11*0&v11<<1<<(uint8(2)+v4))
+}
+
+func testShiftCX(t *testing.T) {
+ want := 141
+ if got := testShiftCX_ssa(); want != got {
+ t.Errorf("testShiftCX failed, wanted %d got %d", want, got)
+ }
+}
+
+// testSubqToNegq ensures that the SUBQ -> NEGQ translation works correctly.
+func testSubqToNegq(t *testing.T) {
+ want := int64(-318294940372190156)
+ if got := testSubqToNegq_ssa(1, 2, 3, 4, 5, 6, 7, 8, 9, 1, 2); want != got {
+ t.Errorf("testSubqToNegq failed, wanted %d got %d", want, got)
+ }
+}
+
+//go:noinline
+func testSubqToNegq_ssa(a, b, c, d, e, f, g, h, i, j, k int64) int64 {
+ return a + 8207351403619448057 - b - 1779494519303207690 + c*8810076340510052032*d - 4465874067674546219 - e*4361839741470334295 - f + 8688847565426072650*g*8065564729145417479
+}
+
+func testOcom(t *testing.T) {
+ want1, want2 := int32(0x55555555), int32(-0x55555556)
+ if got1, got2 := testOcom_ssa(0x55555555, 0x55555555); want1 != got1 || want2 != got2 {
+ t.Errorf("testOcom failed, wanted %d and %d got %d and %d", want1, want2, got1, got2)
+ }
+}
+
+//go:noinline
+func testOcom_ssa(a, b int32) (int32, int32) {
+ return ^^^^a, ^^^^^b
+}
+
+func lrot1_ssa(w uint8, x uint16, y uint32, z uint64) (a uint8, b uint16, c uint32, d uint64) {
+ a = (w << 5) | (w >> 3)
+ b = (x << 13) | (x >> 3)
+ c = (y << 29) | (y >> 3)
+ d = (z << 61) | (z >> 3)
+ return
+}
+
+//go:noinline
+func lrot2_ssa(w, n uint32) uint32 {
+ // Want to be sure that a "rotate by 32" which
+ // is really 0 | (w >> 0) == w
+ // is correctly compiled.
+ return (w << n) | (w >> (32 - n))
+}
+
+//go:noinline
+func lrot3_ssa(w uint32) uint32 {
+ // Want to be sure that a "rotate by 32" which
+ // is really 0 | (w >> 0) == w
+ // is correctly compiled.
+ return (w << 32) | (w >> (32 - 32))
+}
+
+func testLrot(t *testing.T) {
+ wantA, wantB, wantC, wantD := uint8(0xe1), uint16(0xe001),
+ uint32(0xe0000001), uint64(0xe000000000000001)
+ a, b, c, d := lrot1_ssa(0xf, 0xf, 0xf, 0xf)
+ if a != wantA || b != wantB || c != wantC || d != wantD {
+ t.Errorf("lrot1_ssa(0xf, 0xf, 0xf, 0xf)=%d %d %d %d, got %d %d %d %d", wantA, wantB, wantC, wantD, a, b, c, d)
+ }
+ x := lrot2_ssa(0xb0000001, 32)
+ wantX := uint32(0xb0000001)
+ if x != wantX {
+ t.Errorf("lrot2_ssa(0xb0000001, 32)=%d, got %d", wantX, x)
+ }
+ x = lrot3_ssa(0xb0000001)
+ if x != wantX {
+ t.Errorf("lrot3_ssa(0xb0000001)=%d, got %d", wantX, x)
+ }
+
+}
+
+//go:noinline
+func sub1_ssa() uint64 {
+ v1 := uint64(3) // uint64
+ return v1*v1 - (v1&v1)&v1
+}
+
+//go:noinline
+func sub2_ssa() uint8 {
+ v1 := uint8(0)
+ v3 := v1 + v1 + v1 ^ v1 | 3 + v1 ^ v1 | v1 ^ v1
+ v1-- // dev.ssa doesn't see this one
+ return v1 ^ v1*v1 - v3
+}
+
+func testSubConst(t *testing.T) {
+ x1 := sub1_ssa()
+ want1 := uint64(6)
+ if x1 != want1 {
+ t.Errorf("sub1_ssa()=%d, got %d", want1, x1)
+ }
+ x2 := sub2_ssa()
+ want2 := uint8(251)
+ if x2 != want2 {
+ t.Errorf("sub2_ssa()=%d, got %d", want2, x2)
+ }
+}
+
+//go:noinline
+func orPhi_ssa(a bool, x int) int {
+ v := 0
+ if a {
+ v = -1
+ } else {
+ v = -1
+ }
+ return x | v
+}
+
+func testOrPhi(t *testing.T) {
+ if want, got := -1, orPhi_ssa(true, 4); got != want {
+ t.Errorf("orPhi_ssa(true, 4)=%d, want %d", got, want)
+ }
+ if want, got := -1, orPhi_ssa(false, 0); got != want {
+ t.Errorf("orPhi_ssa(false, 0)=%d, want %d", got, want)
+ }
+}
+
+//go:noinline
+func addshiftLL_ssa(a, b uint32) uint32 {
+ return a + b<<3
+}
+
+//go:noinline
+func subshiftLL_ssa(a, b uint32) uint32 {
+ return a - b<<3
+}
+
+//go:noinline
+func rsbshiftLL_ssa(a, b uint32) uint32 {
+ return a<<3 - b
+}
+
+//go:noinline
+func andshiftLL_ssa(a, b uint32) uint32 {
+ return a & (b << 3)
+}
+
+//go:noinline
+func orshiftLL_ssa(a, b uint32) uint32 {
+ return a | b<<3
+}
+
+//go:noinline
+func xorshiftLL_ssa(a, b uint32) uint32 {
+ return a ^ b<<3
+}
+
+//go:noinline
+func bicshiftLL_ssa(a, b uint32) uint32 {
+ return a &^ (b << 3)
+}
+
+//go:noinline
+func notshiftLL_ssa(a uint32) uint32 {
+ return ^(a << 3)
+}
+
+//go:noinline
+func addshiftRL_ssa(a, b uint32) uint32 {
+ return a + b>>3
+}
+
+//go:noinline
+func subshiftRL_ssa(a, b uint32) uint32 {
+ return a - b>>3
+}
+
+//go:noinline
+func rsbshiftRL_ssa(a, b uint32) uint32 {
+ return a>>3 - b
+}
+
+//go:noinline
+func andshiftRL_ssa(a, b uint32) uint32 {
+ return a & (b >> 3)
+}
+
+//go:noinline
+func orshiftRL_ssa(a, b uint32) uint32 {
+ return a | b>>3
+}
+
+//go:noinline
+func xorshiftRL_ssa(a, b uint32) uint32 {
+ return a ^ b>>3
+}
+
+//go:noinline
+func bicshiftRL_ssa(a, b uint32) uint32 {
+ return a &^ (b >> 3)
+}
+
+//go:noinline
+func notshiftRL_ssa(a uint32) uint32 {
+ return ^(a >> 3)
+}
+
+//go:noinline
+func addshiftRA_ssa(a, b int32) int32 {
+ return a + b>>3
+}
+
+//go:noinline
+func subshiftRA_ssa(a, b int32) int32 {
+ return a - b>>3
+}
+
+//go:noinline
+func rsbshiftRA_ssa(a, b int32) int32 {
+ return a>>3 - b
+}
+
+//go:noinline
+func andshiftRA_ssa(a, b int32) int32 {
+ return a & (b >> 3)
+}
+
+//go:noinline
+func orshiftRA_ssa(a, b int32) int32 {
+ return a | b>>3
+}
+
+//go:noinline
+func xorshiftRA_ssa(a, b int32) int32 {
+ return a ^ b>>3
+}
+
+//go:noinline
+func bicshiftRA_ssa(a, b int32) int32 {
+ return a &^ (b >> 3)
+}
+
+//go:noinline
+func notshiftRA_ssa(a int32) int32 {
+ return ^(a >> 3)
+}
+
+//go:noinline
+func addshiftLLreg_ssa(a, b uint32, s uint8) uint32 {
+ return a + b<<s
+}
+
+//go:noinline
+func subshiftLLreg_ssa(a, b uint32, s uint8) uint32 {
+ return a - b<<s
+}
+
+//go:noinline
+func rsbshiftLLreg_ssa(a, b uint32, s uint8) uint32 {
+ return a<<s - b
+}
+
+//go:noinline
+func andshiftLLreg_ssa(a, b uint32, s uint8) uint32 {
+ return a & (b << s)
+}
+
+//go:noinline
+func orshiftLLreg_ssa(a, b uint32, s uint8) uint32 {
+ return a | b<<s
+}
+
+//go:noinline
+func xorshiftLLreg_ssa(a, b uint32, s uint8) uint32 {
+ return a ^ b<<s
+}
+
+//go:noinline
+func bicshiftLLreg_ssa(a, b uint32, s uint8) uint32 {
+ return a &^ (b << s)
+}
+
+//go:noinline
+func notshiftLLreg_ssa(a uint32, s uint8) uint32 {
+ return ^(a << s)
+}
+
+//go:noinline
+func addshiftRLreg_ssa(a, b uint32, s uint8) uint32 {
+ return a + b>>s
+}
+
+//go:noinline
+func subshiftRLreg_ssa(a, b uint32, s uint8) uint32 {
+ return a - b>>s
+}
+
+//go:noinline
+func rsbshiftRLreg_ssa(a, b uint32, s uint8) uint32 {
+ return a>>s - b
+}
+
+//go:noinline
+func andshiftRLreg_ssa(a, b uint32, s uint8) uint32 {
+ return a & (b >> s)
+}
+
+//go:noinline
+func orshiftRLreg_ssa(a, b uint32, s uint8) uint32 {
+ return a | b>>s
+}
+
+//go:noinline
+func xorshiftRLreg_ssa(a, b uint32, s uint8) uint32 {
+ return a ^ b>>s
+}
+
+//go:noinline
+func bicshiftRLreg_ssa(a, b uint32, s uint8) uint32 {
+ return a &^ (b >> s)
+}
+
+//go:noinline
+func notshiftRLreg_ssa(a uint32, s uint8) uint32 {
+ return ^(a >> s)
+}
+
+//go:noinline
+func addshiftRAreg_ssa(a, b int32, s uint8) int32 {
+ return a + b>>s
+}
+
+//go:noinline
+func subshiftRAreg_ssa(a, b int32, s uint8) int32 {
+ return a - b>>s
+}
+
+//go:noinline
+func rsbshiftRAreg_ssa(a, b int32, s uint8) int32 {
+ return a>>s - b
+}
+
+//go:noinline
+func andshiftRAreg_ssa(a, b int32, s uint8) int32 {
+ return a & (b >> s)
+}
+
+//go:noinline
+func orshiftRAreg_ssa(a, b int32, s uint8) int32 {
+ return a | b>>s
+}
+
+//go:noinline
+func xorshiftRAreg_ssa(a, b int32, s uint8) int32 {
+ return a ^ b>>s
+}
+
+//go:noinline
+func bicshiftRAreg_ssa(a, b int32, s uint8) int32 {
+ return a &^ (b >> s)
+}
+
+//go:noinline
+func notshiftRAreg_ssa(a int32, s uint8) int32 {
+ return ^(a >> s)
+}
+
+// test ARM shifted ops
+func testShiftedOps(t *testing.T) {
+ a, b := uint32(10), uint32(42)
+ if want, got := a+b<<3, addshiftLL_ssa(a, b); got != want {
+ t.Errorf("addshiftLL_ssa(10, 42) = %d want %d", got, want)
+ }
+ if want, got := a-b<<3, subshiftLL_ssa(a, b); got != want {
+ t.Errorf("subshiftLL_ssa(10, 42) = %d want %d", got, want)
+ }
+ if want, got := a<<3-b, rsbshiftLL_ssa(a, b); got != want {
+ t.Errorf("rsbshiftLL_ssa(10, 42) = %d want %d", got, want)
+ }
+ if want, got := a&(b<<3), andshiftLL_ssa(a, b); got != want {
+ t.Errorf("andshiftLL_ssa(10, 42) = %d want %d", got, want)
+ }
+ if want, got := a|b<<3, orshiftLL_ssa(a, b); got != want {
+ t.Errorf("orshiftLL_ssa(10, 42) = %d want %d", got, want)
+ }
+ if want, got := a^b<<3, xorshiftLL_ssa(a, b); got != want {
+ t.Errorf("xorshiftLL_ssa(10, 42) = %d want %d", got, want)
+ }
+ if want, got := a&^(b<<3), bicshiftLL_ssa(a, b); got != want {
+ t.Errorf("bicshiftLL_ssa(10, 42) = %d want %d", got, want)
+ }
+ if want, got := ^(a << 3), notshiftLL_ssa(a); got != want {
+ t.Errorf("notshiftLL_ssa(10) = %d want %d", got, want)
+ }
+ if want, got := a+b>>3, addshiftRL_ssa(a, b); got != want {
+ t.Errorf("addshiftRL_ssa(10, 42) = %d want %d", got, want)
+ }
+ if want, got := a-b>>3, subshiftRL_ssa(a, b); got != want {
+ t.Errorf("subshiftRL_ssa(10, 42) = %d want %d", got, want)
+ }
+ if want, got := a>>3-b, rsbshiftRL_ssa(a, b); got != want {
+ t.Errorf("rsbshiftRL_ssa(10, 42) = %d want %d", got, want)
+ }
+ if want, got := a&(b>>3), andshiftRL_ssa(a, b); got != want {
+ t.Errorf("andshiftRL_ssa(10, 42) = %d want %d", got, want)
+ }
+ if want, got := a|b>>3, orshiftRL_ssa(a, b); got != want {
+ t.Errorf("orshiftRL_ssa(10, 42) = %d want %d", got, want)
+ }
+ if want, got := a^b>>3, xorshiftRL_ssa(a, b); got != want {
+ t.Errorf("xorshiftRL_ssa(10, 42) = %d want %d", got, want)
+ }
+ if want, got := a&^(b>>3), bicshiftRL_ssa(a, b); got != want {
+ t.Errorf("bicshiftRL_ssa(10, 42) = %d want %d", got, want)
+ }
+ if want, got := ^(a >> 3), notshiftRL_ssa(a); got != want {
+ t.Errorf("notshiftRL_ssa(10) = %d want %d", got, want)
+ }
+ c, d := int32(10), int32(-42)
+ if want, got := c+d>>3, addshiftRA_ssa(c, d); got != want {
+ t.Errorf("addshiftRA_ssa(10, -42) = %d want %d", got, want)
+ }
+ if want, got := c-d>>3, subshiftRA_ssa(c, d); got != want {
+ t.Errorf("subshiftRA_ssa(10, -42) = %d want %d", got, want)
+ }
+ if want, got := c>>3-d, rsbshiftRA_ssa(c, d); got != want {
+ t.Errorf("rsbshiftRA_ssa(10, -42) = %d want %d", got, want)
+ }
+ if want, got := c&(d>>3), andshiftRA_ssa(c, d); got != want {
+ t.Errorf("andshiftRA_ssa(10, -42) = %d want %d", got, want)
+ }
+ if want, got := c|d>>3, orshiftRA_ssa(c, d); got != want {
+ t.Errorf("orshiftRA_ssa(10, -42) = %d want %d", got, want)
+ }
+ if want, got := c^d>>3, xorshiftRA_ssa(c, d); got != want {
+ t.Errorf("xorshiftRA_ssa(10, -42) = %d want %d", got, want)
+ }
+ if want, got := c&^(d>>3), bicshiftRA_ssa(c, d); got != want {
+ t.Errorf("bicshiftRA_ssa(10, -42) = %d want %d", got, want)
+ }
+ if want, got := ^(d >> 3), notshiftRA_ssa(d); got != want {
+ t.Errorf("notshiftRA_ssa(-42) = %d want %d", got, want)
+ }
+ s := uint8(3)
+ if want, got := a+b<<s, addshiftLLreg_ssa(a, b, s); got != want {
+ t.Errorf("addshiftLLreg_ssa(10, 42, 3) = %d want %d", got, want)
+ }
+ if want, got := a-b<<s, subshiftLLreg_ssa(a, b, s); got != want {
+ t.Errorf("subshiftLLreg_ssa(10, 42, 3) = %d want %d", got, want)
+ }
+ if want, got := a<<s-b, rsbshiftLLreg_ssa(a, b, s); got != want {
+ t.Errorf("rsbshiftLLreg_ssa(10, 42, 3) = %d want %d", got, want)
+ }
+ if want, got := a&(b<<s), andshiftLLreg_ssa(a, b, s); got != want {
+ t.Errorf("andshiftLLreg_ssa(10, 42, 3) = %d want %d", got, want)
+ }
+ if want, got := a|b<<s, orshiftLLreg_ssa(a, b, s); got != want {
+ t.Errorf("orshiftLLreg_ssa(10, 42, 3) = %d want %d", got, want)
+ }
+ if want, got := a^b<<s, xorshiftLLreg_ssa(a, b, s); got != want {
+ t.Errorf("xorshiftLLreg_ssa(10, 42, 3) = %d want %d", got, want)
+ }
+ if want, got := a&^(b<<s), bicshiftLLreg_ssa(a, b, s); got != want {
+ t.Errorf("bicshiftLLreg_ssa(10, 42, 3) = %d want %d", got, want)
+ }
+ if want, got := ^(a << s), notshiftLLreg_ssa(a, s); got != want {
+ t.Errorf("notshiftLLreg_ssa(10) = %d want %d", got, want)
+ }
+ if want, got := a+b>>s, addshiftRLreg_ssa(a, b, s); got != want {
+ t.Errorf("addshiftRLreg_ssa(10, 42, 3) = %d want %d", got, want)
+ }
+ if want, got := a-b>>s, subshiftRLreg_ssa(a, b, s); got != want {
+ t.Errorf("subshiftRLreg_ssa(10, 42, 3) = %d want %d", got, want)
+ }
+ if want, got := a>>s-b, rsbshiftRLreg_ssa(a, b, s); got != want {
+ t.Errorf("rsbshiftRLreg_ssa(10, 42, 3) = %d want %d", got, want)
+ }
+ if want, got := a&(b>>s), andshiftRLreg_ssa(a, b, s); got != want {
+ t.Errorf("andshiftRLreg_ssa(10, 42, 3) = %d want %d", got, want)
+ }
+ if want, got := a|b>>s, orshiftRLreg_ssa(a, b, s); got != want {
+ t.Errorf("orshiftRLreg_ssa(10, 42, 3) = %d want %d", got, want)
+ }
+ if want, got := a^b>>s, xorshiftRLreg_ssa(a, b, s); got != want {
+ t.Errorf("xorshiftRLreg_ssa(10, 42, 3) = %d want %d", got, want)
+ }
+ if want, got := a&^(b>>s), bicshiftRLreg_ssa(a, b, s); got != want {
+ t.Errorf("bicshiftRLreg_ssa(10, 42, 3) = %d want %d", got, want)
+ }
+ if want, got := ^(a >> s), notshiftRLreg_ssa(a, s); got != want {
+ t.Errorf("notshiftRLreg_ssa(10) = %d want %d", got, want)
+ }
+ if want, got := c+d>>s, addshiftRAreg_ssa(c, d, s); got != want {
+ t.Errorf("addshiftRAreg_ssa(10, -42, 3) = %d want %d", got, want)
+ }
+ if want, got := c-d>>s, subshiftRAreg_ssa(c, d, s); got != want {
+ t.Errorf("subshiftRAreg_ssa(10, -42, 3) = %d want %d", got, want)
+ }
+ if want, got := c>>s-d, rsbshiftRAreg_ssa(c, d, s); got != want {
+ t.Errorf("rsbshiftRAreg_ssa(10, -42, 3) = %d want %d", got, want)
+ }
+ if want, got := c&(d>>s), andshiftRAreg_ssa(c, d, s); got != want {
+ t.Errorf("andshiftRAreg_ssa(10, -42, 3) = %d want %d", got, want)
+ }
+ if want, got := c|d>>s, orshiftRAreg_ssa(c, d, s); got != want {
+ t.Errorf("orshiftRAreg_ssa(10, -42, 3) = %d want %d", got, want)
+ }
+ if want, got := c^d>>s, xorshiftRAreg_ssa(c, d, s); got != want {
+ t.Errorf("xorshiftRAreg_ssa(10, -42, 3) = %d want %d", got, want)
+ }
+ if want, got := c&^(d>>s), bicshiftRAreg_ssa(c, d, s); got != want {
+ t.Errorf("bicshiftRAreg_ssa(10, -42, 3) = %d want %d", got, want)
+ }
+ if want, got := ^(d >> s), notshiftRAreg_ssa(d, s); got != want {
+ t.Errorf("notshiftRAreg_ssa(-42, 3) = %d want %d", got, want)
+ }
+}
+
+// TestArithmetic tests that both backends have the same result for arithmetic expressions.
+func TestArithmetic(t *testing.T) {
+ test64BitConstMult(t)
+ test64BitConstAdd(t)
+ testRegallocCVSpill(t)
+ testSubqToNegq(t)
+ testBitwiseLogic(t)
+ testOcom(t)
+ testLrot(t)
+ testShiftCX(t)
+ testSubConst(t)
+ testOverflowConstShift(t)
+ testArithConstShift(t)
+ testArithRshConst(t)
+ testLargeConst(t)
+ testLoadCombine(t)
+ testLoadSymCombine(t)
+ testShiftRemoval(t)
+ testShiftedOps(t)
+ testDivFixUp(t)
+ testDivisibleSignedPow2(t)
+ testDivisibility(t)
+}
+
+// testDivFixUp ensures that signed division fix-ups are being generated.
+func testDivFixUp(t *testing.T) {
+ defer func() {
+ if r := recover(); r != nil {
+ t.Error("testDivFixUp failed")
+ if e, ok := r.(runtime.Error); ok {
+ t.Logf("%v\n", e.Error())
+ }
+ }
+ }()
+ var w int8 = -128
+ var x int16 = -32768
+ var y int32 = -2147483648
+ var z int64 = -9223372036854775808
+
+ for i := -5; i < 0; i++ {
+ g8 = w / int8(i)
+ g16 = x / int16(i)
+ g32 = y / int32(i)
+ g64 = z / int64(i)
+ g8 = w % int8(i)
+ g16 = x % int16(i)
+ g32 = y % int32(i)
+ g64 = z % int64(i)
+ }
+}
+
+//go:noinline
+func divisible_int8_2to1(x int8) bool {
+ return x%(1<<1) == 0
+}
+
+//go:noinline
+func divisible_int8_2to2(x int8) bool {
+ return x%(1<<2) == 0
+}
+
+//go:noinline
+func divisible_int8_2to3(x int8) bool {
+ return x%(1<<3) == 0
+}
+
+//go:noinline
+func divisible_int8_2to4(x int8) bool {
+ return x%(1<<4) == 0
+}
+
+//go:noinline
+func divisible_int8_2to5(x int8) bool {
+ return x%(1<<5) == 0
+}
+
+//go:noinline
+func divisible_int8_2to6(x int8) bool {
+ return x%(1<<6) == 0
+}
+
+//go:noinline
+func divisible_int16_2to1(x int16) bool {
+ return x%(1<<1) == 0
+}
+
+//go:noinline
+func divisible_int16_2to2(x int16) bool {
+ return x%(1<<2) == 0
+}
+
+//go:noinline
+func divisible_int16_2to3(x int16) bool {
+ return x%(1<<3) == 0
+}
+
+//go:noinline
+func divisible_int16_2to4(x int16) bool {
+ return x%(1<<4) == 0
+}
+
+//go:noinline
+func divisible_int16_2to5(x int16) bool {
+ return x%(1<<5) == 0
+}
+
+//go:noinline
+func divisible_int16_2to6(x int16) bool {
+ return x%(1<<6) == 0
+}
+
+//go:noinline
+func divisible_int16_2to7(x int16) bool {
+ return x%(1<<7) == 0
+}
+
+//go:noinline
+func divisible_int16_2to8(x int16) bool {
+ return x%(1<<8) == 0
+}
+
+//go:noinline
+func divisible_int16_2to9(x int16) bool {
+ return x%(1<<9) == 0
+}
+
+//go:noinline
+func divisible_int16_2to10(x int16) bool {
+ return x%(1<<10) == 0
+}
+
+//go:noinline
+func divisible_int16_2to11(x int16) bool {
+ return x%(1<<11) == 0
+}
+
+//go:noinline
+func divisible_int16_2to12(x int16) bool {
+ return x%(1<<12) == 0
+}
+
+//go:noinline
+func divisible_int16_2to13(x int16) bool {
+ return x%(1<<13) == 0
+}
+
+//go:noinline
+func divisible_int16_2to14(x int16) bool {
+ return x%(1<<14) == 0
+}
+
+//go:noinline
+func divisible_int32_2to4(x int32) bool {
+ return x%(1<<4) == 0
+}
+
+//go:noinline
+func divisible_int32_2to15(x int32) bool {
+ return x%(1<<15) == 0
+}
+
+//go:noinline
+func divisible_int32_2to26(x int32) bool {
+ return x%(1<<26) == 0
+}
+
+//go:noinline
+func divisible_int64_2to4(x int64) bool {
+ return x%(1<<4) == 0
+}
+
+//go:noinline
+func divisible_int64_2to15(x int64) bool {
+ return x%(1<<15) == 0
+}
+
+//go:noinline
+func divisible_int64_2to26(x int64) bool {
+ return x%(1<<26) == 0
+}
+
+//go:noinline
+func divisible_int64_2to34(x int64) bool {
+ return x%(1<<34) == 0
+}
+
+//go:noinline
+func divisible_int64_2to48(x int64) bool {
+ return x%(1<<48) == 0
+}
+
+//go:noinline
+func divisible_int64_2to57(x int64) bool {
+ return x%(1<<57) == 0
+}
+
+// testDivisibleSignedPow2 confirms that x%(1<<k)==0 is rewritten correctly
+func testDivisibleSignedPow2(t *testing.T) {
+ var i int64
+ var pow2 = []int64{
+ 1,
+ 1 << 1,
+ 1 << 2,
+ 1 << 3,
+ 1 << 4,
+ 1 << 5,
+ 1 << 6,
+ 1 << 7,
+ 1 << 8,
+ 1 << 9,
+ 1 << 10,
+ 1 << 11,
+ 1 << 12,
+ 1 << 13,
+ 1 << 14,
+ }
+ // exhaustive test for int8
+ for i = math.MinInt8; i <= math.MaxInt8; i++ {
+ if want, got := int8(i)%int8(pow2[1]) == 0, divisible_int8_2to1(int8(i)); got != want {
+ t.Errorf("divisible_int8_2to1(%d) = %v want %v", i, got, want)
+ }
+ if want, got := int8(i)%int8(pow2[2]) == 0, divisible_int8_2to2(int8(i)); got != want {
+ t.Errorf("divisible_int8_2to2(%d) = %v want %v", i, got, want)
+ }
+ if want, got := int8(i)%int8(pow2[3]) == 0, divisible_int8_2to3(int8(i)); got != want {
+ t.Errorf("divisible_int8_2to3(%d) = %v want %v", i, got, want)
+ }
+ if want, got := int8(i)%int8(pow2[4]) == 0, divisible_int8_2to4(int8(i)); got != want {
+ t.Errorf("divisible_int8_2to4(%d) = %v want %v", i, got, want)
+ }
+ if want, got := int8(i)%int8(pow2[5]) == 0, divisible_int8_2to5(int8(i)); got != want {
+ t.Errorf("divisible_int8_2to5(%d) = %v want %v", i, got, want)
+ }
+ if want, got := int8(i)%int8(pow2[6]) == 0, divisible_int8_2to6(int8(i)); got != want {
+ t.Errorf("divisible_int8_2to6(%d) = %v want %v", i, got, want)
+ }
+ }
+ // exhaustive test for int16
+ for i = math.MinInt16; i <= math.MaxInt16; i++ {
+ if want, got := int16(i)%int16(pow2[1]) == 0, divisible_int16_2to1(int16(i)); got != want {
+ t.Errorf("divisible_int16_2to1(%d) = %v want %v", i, got, want)
+ }
+ if want, got := int16(i)%int16(pow2[2]) == 0, divisible_int16_2to2(int16(i)); got != want {
+ t.Errorf("divisible_int16_2to2(%d) = %v want %v", i, got, want)
+ }
+ if want, got := int16(i)%int16(pow2[3]) == 0, divisible_int16_2to3(int16(i)); got != want {
+ t.Errorf("divisible_int16_2to3(%d) = %v want %v", i, got, want)
+ }
+ if want, got := int16(i)%int16(pow2[4]) == 0, divisible_int16_2to4(int16(i)); got != want {
+ t.Errorf("divisible_int16_2to4(%d) = %v want %v", i, got, want)
+ }
+ if want, got := int16(i)%int16(pow2[5]) == 0, divisible_int16_2to5(int16(i)); got != want {
+ t.Errorf("divisible_int16_2to5(%d) = %v want %v", i, got, want)
+ }
+ if want, got := int16(i)%int16(pow2[6]) == 0, divisible_int16_2to6(int16(i)); got != want {
+ t.Errorf("divisible_int16_2to6(%d) = %v want %v", i, got, want)
+ }
+ if want, got := int16(i)%int16(pow2[7]) == 0, divisible_int16_2to7(int16(i)); got != want {
+ t.Errorf("divisible_int16_2to7(%d) = %v want %v", i, got, want)
+ }
+ if want, got := int16(i)%int16(pow2[8]) == 0, divisible_int16_2to8(int16(i)); got != want {
+ t.Errorf("divisible_int16_2to8(%d) = %v want %v", i, got, want)
+ }
+ if want, got := int16(i)%int16(pow2[9]) == 0, divisible_int16_2to9(int16(i)); got != want {
+ t.Errorf("divisible_int16_2to9(%d) = %v want %v", i, got, want)
+ }
+ if want, got := int16(i)%int16(pow2[10]) == 0, divisible_int16_2to10(int16(i)); got != want {
+ t.Errorf("divisible_int16_2to10(%d) = %v want %v", i, got, want)
+ }
+ if want, got := int16(i)%int16(pow2[11]) == 0, divisible_int16_2to11(int16(i)); got != want {
+ t.Errorf("divisible_int16_2to11(%d) = %v want %v", i, got, want)
+ }
+ if want, got := int16(i)%int16(pow2[12]) == 0, divisible_int16_2to12(int16(i)); got != want {
+ t.Errorf("divisible_int16_2to12(%d) = %v want %v", i, got, want)
+ }
+ if want, got := int16(i)%int16(pow2[13]) == 0, divisible_int16_2to13(int16(i)); got != want {
+ t.Errorf("divisible_int16_2to13(%d) = %v want %v", i, got, want)
+ }
+ if want, got := int16(i)%int16(pow2[14]) == 0, divisible_int16_2to14(int16(i)); got != want {
+ t.Errorf("divisible_int16_2to14(%d) = %v want %v", i, got, want)
+ }
+ }
+ // spot check for int32 and int64
+ var (
+ two4 int64 = 1 << 4
+ two15 int64 = 1 << 15
+ two26 int64 = 1 << 26
+ two34 int64 = 1 << 34
+ two48 int64 = 1 << 48
+ two57 int64 = 1 << 57
+ )
+ var xs = []int64{two4, two4 + 3, -3 * two4, -3*two4 + 1,
+ two15, two15 + 3, -3 * two15, -3*two15 + 1,
+ two26, two26 + 37, -5 * two26, -5*two26 + 2,
+ two34, two34 + 356, -7 * two34, -7*two34 + 13,
+ two48, two48 + 3000, -12 * two48, -12*two48 + 1111,
+ two57, two57 + 397654, -15 * two57, -15*two57 + 11234,
+ }
+ for _, x := range xs {
+ if int64(int32(x)) == x {
+ if want, got := int32(x)%int32(two4) == 0, divisible_int32_2to4(int32(x)); got != want {
+ t.Errorf("divisible_int32_2to4(%d) = %v want %v", x, got, want)
+ }
+
+ if want, got := int32(x)%int32(two15) == 0, divisible_int32_2to15(int32(x)); got != want {
+ t.Errorf("divisible_int32_2to15(%d) = %v want %v", x, got, want)
+ }
+
+ if want, got := int32(x)%int32(two26) == 0, divisible_int32_2to26(int32(x)); got != want {
+ t.Errorf("divisible_int32_2to26(%d) = %v want %v", x, got, want)
+ }
+ }
+ // spot check for int64
+ if want, got := x%two4 == 0, divisible_int64_2to4(x); got != want {
+ t.Errorf("divisible_int64_2to4(%d) = %v want %v", x, got, want)
+ }
+
+ if want, got := x%two15 == 0, divisible_int64_2to15(x); got != want {
+ t.Errorf("divisible_int64_2to15(%d) = %v want %v", x, got, want)
+ }
+
+ if want, got := x%two26 == 0, divisible_int64_2to26(x); got != want {
+ t.Errorf("divisible_int64_2to26(%d) = %v want %v", x, got, want)
+ }
+
+ if want, got := x%two34 == 0, divisible_int64_2to34(x); got != want {
+ t.Errorf("divisible_int64_2to34(%d) = %v want %v", x, got, want)
+ }
+
+ if want, got := x%two48 == 0, divisible_int64_2to48(x); got != want {
+ t.Errorf("divisible_int64_2to48(%d) = %v want %v", x, got, want)
+ }
+
+ if want, got := x%two57 == 0, divisible_int64_2to57(x); got != want {
+ t.Errorf("divisible_int64_2to57(%d) = %v want %v", x, got, want)
+ }
+ }
+}
+
+func div6_uint8(n uint8) bool {
+ return n%6 == 0
+}
+
+//go:noinline
+func div6_uint16(n uint16) bool {
+ return n%6 == 0
+}
+
+//go:noinline
+func div6_uint32(n uint32) bool {
+ return n%6 == 0
+}
+
+//go:noinline
+func div6_uint64(n uint64) bool {
+ return n%6 == 0
+}
+
+//go:noinline
+func div19_uint8(n uint8) bool {
+ return n%19 == 0
+}
+
+//go:noinline
+func div19_uint16(n uint16) bool {
+ return n%19 == 0
+}
+
+//go:noinline
+func div19_uint32(n uint32) bool {
+ return n%19 == 0
+}
+
+//go:noinline
+func div19_uint64(n uint64) bool {
+ return n%19 == 0
+}
+
+//go:noinline
+func div6_int8(n int8) bool {
+ return n%6 == 0
+}
+
+//go:noinline
+func div6_int16(n int16) bool {
+ return n%6 == 0
+}
+
+//go:noinline
+func div6_int32(n int32) bool {
+ return n%6 == 0
+}
+
+//go:noinline
+func div6_int64(n int64) bool {
+ return n%6 == 0
+}
+
+//go:noinline
+func div19_int8(n int8) bool {
+ return n%19 == 0
+}
+
+//go:noinline
+func div19_int16(n int16) bool {
+ return n%19 == 0
+}
+
+//go:noinline
+func div19_int32(n int32) bool {
+ return n%19 == 0
+}
+
+//go:noinline
+func div19_int64(n int64) bool {
+ return n%19 == 0
+}
+
+// testDivisibility confirms that rewrite rules x%c ==0 for c constant are correct.
+func testDivisibility(t *testing.T) {
+ // unsigned tests
+ // test an even and an odd divisor
+ var sixU, nineteenU uint64 = 6, 19
+ // test all inputs for uint8, uint16
+ for i := uint64(0); i <= math.MaxUint16; i++ {
+ if i <= math.MaxUint8 {
+ if want, got := uint8(i)%uint8(sixU) == 0, div6_uint8(uint8(i)); got != want {
+ t.Errorf("div6_uint8(%d) = %v want %v", i, got, want)
+ }
+ if want, got := uint8(i)%uint8(nineteenU) == 0, div19_uint8(uint8(i)); got != want {
+ t.Errorf("div6_uint19(%d) = %v want %v", i, got, want)
+ }
+ }
+ if want, got := uint16(i)%uint16(sixU) == 0, div6_uint16(uint16(i)); got != want {
+ t.Errorf("div6_uint16(%d) = %v want %v", i, got, want)
+ }
+ if want, got := uint16(i)%uint16(nineteenU) == 0, div19_uint16(uint16(i)); got != want {
+ t.Errorf("div19_uint16(%d) = %v want %v", i, got, want)
+ }
+ }
+ var maxU32, maxU64 uint64 = math.MaxUint32, math.MaxUint64
+ // spot check inputs for uint32 and uint64
+ xu := []uint64{
+ 0, 1, 2, 3, 4, 5,
+ sixU, 2 * sixU, 3 * sixU, 5 * sixU, 12345 * sixU,
+ sixU + 1, 2*sixU - 5, 3*sixU + 3, 5*sixU + 4, 12345*sixU - 2,
+ nineteenU, 2 * nineteenU, 3 * nineteenU, 5 * nineteenU, 12345 * nineteenU,
+ nineteenU + 1, 2*nineteenU - 5, 3*nineteenU + 3, 5*nineteenU + 4, 12345*nineteenU - 2,
+ maxU32, maxU32 - 1, maxU32 - 2, maxU32 - 3, maxU32 - 4,
+ maxU32 - 5, maxU32 - 6, maxU32 - 7, maxU32 - 8,
+ maxU32 - 9, maxU32 - 10, maxU32 - 11, maxU32 - 12,
+ maxU32 - 13, maxU32 - 14, maxU32 - 15, maxU32 - 16,
+ maxU32 - 17, maxU32 - 18, maxU32 - 19, maxU32 - 20,
+ maxU64, maxU64 - 1, maxU64 - 2, maxU64 - 3, maxU64 - 4,
+ maxU64 - 5, maxU64 - 6, maxU64 - 7, maxU64 - 8,
+ maxU64 - 9, maxU64 - 10, maxU64 - 11, maxU64 - 12,
+ maxU64 - 13, maxU64 - 14, maxU64 - 15, maxU64 - 16,
+ maxU64 - 17, maxU64 - 18, maxU64 - 19, maxU64 - 20,
+ }
+ for _, x := range xu {
+ if x <= maxU32 {
+ if want, got := uint32(x)%uint32(sixU) == 0, div6_uint32(uint32(x)); got != want {
+ t.Errorf("div6_uint32(%d) = %v want %v", x, got, want)
+ }
+ if want, got := uint32(x)%uint32(nineteenU) == 0, div19_uint32(uint32(x)); got != want {
+ t.Errorf("div19_uint32(%d) = %v want %v", x, got, want)
+ }
+ }
+ if want, got := x%sixU == 0, div6_uint64(x); got != want {
+ t.Errorf("div6_uint64(%d) = %v want %v", x, got, want)
+ }
+ if want, got := x%nineteenU == 0, div19_uint64(x); got != want {
+ t.Errorf("div19_uint64(%d) = %v want %v", x, got, want)
+ }
+ }
+
+ // signed tests
+ // test an even and an odd divisor
+ var sixS, nineteenS int64 = 6, 19
+ // test all inputs for int8, int16
+ for i := int64(math.MinInt16); i <= math.MaxInt16; i++ {
+ if math.MinInt8 <= i && i <= math.MaxInt8 {
+ if want, got := int8(i)%int8(sixS) == 0, div6_int8(int8(i)); got != want {
+ t.Errorf("div6_int8(%d) = %v want %v", i, got, want)
+ }
+ if want, got := int8(i)%int8(nineteenS) == 0, div19_int8(int8(i)); got != want {
+ t.Errorf("div6_int19(%d) = %v want %v", i, got, want)
+ }
+ }
+ if want, got := int16(i)%int16(sixS) == 0, div6_int16(int16(i)); got != want {
+ t.Errorf("div6_int16(%d) = %v want %v", i, got, want)
+ }
+ if want, got := int16(i)%int16(nineteenS) == 0, div19_int16(int16(i)); got != want {
+ t.Errorf("div19_int16(%d) = %v want %v", i, got, want)
+ }
+ }
+ var minI32, maxI32, minI64, maxI64 int64 = math.MinInt32, math.MaxInt32, math.MinInt64, math.MaxInt64
+ // spot check inputs for int32 and int64
+ xs := []int64{
+ 0, 1, 2, 3, 4, 5,
+ -1, -2, -3, -4, -5,
+ sixS, 2 * sixS, 3 * sixS, 5 * sixS, 12345 * sixS,
+ sixS + 1, 2*sixS - 5, 3*sixS + 3, 5*sixS + 4, 12345*sixS - 2,
+ -sixS, -2 * sixS, -3 * sixS, -5 * sixS, -12345 * sixS,
+ -sixS + 1, -2*sixS - 5, -3*sixS + 3, -5*sixS + 4, -12345*sixS - 2,
+ nineteenS, 2 * nineteenS, 3 * nineteenS, 5 * nineteenS, 12345 * nineteenS,
+ nineteenS + 1, 2*nineteenS - 5, 3*nineteenS + 3, 5*nineteenS + 4, 12345*nineteenS - 2,
+ -nineteenS, -2 * nineteenS, -3 * nineteenS, -5 * nineteenS, -12345 * nineteenS,
+ -nineteenS + 1, -2*nineteenS - 5, -3*nineteenS + 3, -5*nineteenS + 4, -12345*nineteenS - 2,
+ minI32, minI32 + 1, minI32 + 2, minI32 + 3, minI32 + 4,
+ minI32 + 5, minI32 + 6, minI32 + 7, minI32 + 8,
+ minI32 + 9, minI32 + 10, minI32 + 11, minI32 + 12,
+ minI32 + 13, minI32 + 14, minI32 + 15, minI32 + 16,
+ minI32 + 17, minI32 + 18, minI32 + 19, minI32 + 20,
+ maxI32, maxI32 - 1, maxI32 - 2, maxI32 - 3, maxI32 - 4,
+ maxI32 - 5, maxI32 - 6, maxI32 - 7, maxI32 - 8,
+ maxI32 - 9, maxI32 - 10, maxI32 - 11, maxI32 - 12,
+ maxI32 - 13, maxI32 - 14, maxI32 - 15, maxI32 - 16,
+ maxI32 - 17, maxI32 - 18, maxI32 - 19, maxI32 - 20,
+ minI64, minI64 + 1, minI64 + 2, minI64 + 3, minI64 + 4,
+ minI64 + 5, minI64 + 6, minI64 + 7, minI64 + 8,
+ minI64 + 9, minI64 + 10, minI64 + 11, minI64 + 12,
+ minI64 + 13, minI64 + 14, minI64 + 15, minI64 + 16,
+ minI64 + 17, minI64 + 18, minI64 + 19, minI64 + 20,
+ maxI64, maxI64 - 1, maxI64 - 2, maxI64 - 3, maxI64 - 4,
+ maxI64 - 5, maxI64 - 6, maxI64 - 7, maxI64 - 8,
+ maxI64 - 9, maxI64 - 10, maxI64 - 11, maxI64 - 12,
+ maxI64 - 13, maxI64 - 14, maxI64 - 15, maxI64 - 16,
+ maxI64 - 17, maxI64 - 18, maxI64 - 19, maxI64 - 20,
+ }
+ for _, x := range xs {
+ if minI32 <= x && x <= maxI32 {
+ if want, got := int32(x)%int32(sixS) == 0, div6_int32(int32(x)); got != want {
+ t.Errorf("div6_int32(%d) = %v want %v", x, got, want)
+ }
+ if want, got := int32(x)%int32(nineteenS) == 0, div19_int32(int32(x)); got != want {
+ t.Errorf("div19_int32(%d) = %v want %v", x, got, want)
+ }
+ }
+ if want, got := x%sixS == 0, div6_int64(x); got != want {
+ t.Errorf("div6_int64(%d) = %v want %v", x, got, want)
+ }
+ if want, got := x%nineteenS == 0, div19_int64(x); got != want {
+ t.Errorf("div19_int64(%d) = %v want %v", x, got, want)
+ }
+ }
+}
+
+//go:noinline
+func genREV16_1(c uint64) uint64 {
+ b := ((c & 0xff00ff00ff00ff00) >> 8) | ((c & 0x00ff00ff00ff00ff) << 8)
+ return b
+}
+
+//go:noinline
+func genREV16_2(c uint64) uint64 {
+ b := ((c & 0xff00ff00) >> 8) | ((c & 0x00ff00ff) << 8)
+ return b
+}
+
+//go:noinline
+func genREV16W(c uint32) uint32 {
+ b := ((c & 0xff00ff00) >> 8) | ((c & 0x00ff00ff) << 8)
+ return b
+}
+
+func TestREV16(t *testing.T) {
+ x := uint64(0x8f7f6f5f4f3f2f1f)
+ want1 := uint64(0x7f8f5f6f3f4f1f2f)
+ want2 := uint64(0x3f4f1f2f)
+
+ got1 := genREV16_1(x)
+ if got1 != want1 {
+ t.Errorf("genREV16_1(%#x) = %#x want %#x", x, got1, want1)
+ }
+ got2 := genREV16_2(x)
+ if got2 != want2 {
+ t.Errorf("genREV16_2(%#x) = %#x want %#x", x, got2, want2)
+ }
+}
+
+func TestREV16W(t *testing.T) {
+ x := uint32(0x4f3f2f1f)
+ want := uint32(0x3f4f1f2f)
+
+ got := genREV16W(x)
+ if got != want {
+ t.Errorf("genREV16W(%#x) = %#x want %#x", x, got, want)
+ }
+}
diff --git a/src/cmd/compile/internal/test/testdata/array_test.go b/src/cmd/compile/internal/test/testdata/array_test.go
new file mode 100644
index 0000000..efa00d0
--- /dev/null
+++ b/src/cmd/compile/internal/test/testdata/array_test.go
@@ -0,0 +1,132 @@
+package main
+
+import "testing"
+
+//go:noinline
+func testSliceLenCap12_ssa(a [10]int, i, j int) (int, int) {
+ b := a[i:j]
+ return len(b), cap(b)
+}
+
+//go:noinline
+func testSliceLenCap1_ssa(a [10]int, i, j int) (int, int) {
+ b := a[i:]
+ return len(b), cap(b)
+}
+
+//go:noinline
+func testSliceLenCap2_ssa(a [10]int, i, j int) (int, int) {
+ b := a[:j]
+ return len(b), cap(b)
+}
+
+func testSliceLenCap(t *testing.T) {
+ a := [10]int{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}
+ tests := [...]struct {
+ fn func(a [10]int, i, j int) (int, int)
+ i, j int // slice range
+ l, c int // len, cap
+ }{
+ // -1 means the value is not used.
+ {testSliceLenCap12_ssa, 0, 0, 0, 10},
+ {testSliceLenCap12_ssa, 0, 1, 1, 10},
+ {testSliceLenCap12_ssa, 0, 10, 10, 10},
+ {testSliceLenCap12_ssa, 10, 10, 0, 0},
+ {testSliceLenCap12_ssa, 0, 5, 5, 10},
+ {testSliceLenCap12_ssa, 5, 5, 0, 5},
+ {testSliceLenCap12_ssa, 5, 10, 5, 5},
+ {testSliceLenCap1_ssa, 0, -1, 0, 10},
+ {testSliceLenCap1_ssa, 5, -1, 5, 5},
+ {testSliceLenCap1_ssa, 10, -1, 0, 0},
+ {testSliceLenCap2_ssa, -1, 0, 0, 10},
+ {testSliceLenCap2_ssa, -1, 5, 5, 10},
+ {testSliceLenCap2_ssa, -1, 10, 10, 10},
+ }
+
+ for i, test := range tests {
+ if l, c := test.fn(a, test.i, test.j); l != test.l && c != test.c {
+ t.Errorf("#%d len(a[%d:%d]), cap(a[%d:%d]) = %d %d, want %d %d", i, test.i, test.j, test.i, test.j, l, c, test.l, test.c)
+ }
+ }
+}
+
+//go:noinline
+func testSliceGetElement_ssa(a [10]int, i, j, p int) int {
+ return a[i:j][p]
+}
+
+func testSliceGetElement(t *testing.T) {
+ a := [10]int{0, 10, 20, 30, 40, 50, 60, 70, 80, 90}
+ tests := [...]struct {
+ i, j, p int
+ want int // a[i:j][p]
+ }{
+ {0, 10, 2, 20},
+ {0, 5, 4, 40},
+ {5, 10, 3, 80},
+ {1, 9, 7, 80},
+ }
+
+ for i, test := range tests {
+ if got := testSliceGetElement_ssa(a, test.i, test.j, test.p); got != test.want {
+ t.Errorf("#%d a[%d:%d][%d] = %d, wanted %d", i, test.i, test.j, test.p, got, test.want)
+ }
+ }
+}
+
+//go:noinline
+func testSliceSetElement_ssa(a *[10]int, i, j, p, x int) {
+ (*a)[i:j][p] = x
+}
+
+func testSliceSetElement(t *testing.T) {
+ a := [10]int{0, 10, 20, 30, 40, 50, 60, 70, 80, 90}
+ tests := [...]struct {
+ i, j, p int
+ want int // a[i:j][p]
+ }{
+ {0, 10, 2, 17},
+ {0, 5, 4, 11},
+ {5, 10, 3, 28},
+ {1, 9, 7, 99},
+ }
+
+ for i, test := range tests {
+ testSliceSetElement_ssa(&a, test.i, test.j, test.p, test.want)
+ if got := a[test.i+test.p]; got != test.want {
+ t.Errorf("#%d a[%d:%d][%d] = %d, wanted %d", i, test.i, test.j, test.p, got, test.want)
+ }
+ }
+}
+
+func testSlicePanic1(t *testing.T) {
+ defer func() {
+ if r := recover(); r != nil {
+ //println("panicked as expected")
+ }
+ }()
+
+ a := [10]int{0, 10, 20, 30, 40, 50, 60, 70, 80, 90}
+ testSliceLenCap12_ssa(a, 3, 12)
+ t.Errorf("expected to panic, but didn't")
+}
+
+func testSlicePanic2(t *testing.T) {
+ defer func() {
+ if r := recover(); r != nil {
+ //println("panicked as expected")
+ }
+ }()
+
+ a := [10]int{0, 10, 20, 30, 40, 50, 60, 70, 80, 90}
+ testSliceGetElement_ssa(a, 3, 7, 4)
+ t.Errorf("expected to panic, but didn't")
+}
+
+func TestArray(t *testing.T) {
+ testSliceLenCap(t)
+ testSliceGetElement(t)
+ testSliceSetElement(t)
+ testSlicePanic1(t)
+ testSlicePanic2(t)
+}
diff --git a/src/cmd/compile/internal/test/testdata/assert_test.go b/src/cmd/compile/internal/test/testdata/assert_test.go
new file mode 100644
index 0000000..4326be8
--- /dev/null
+++ b/src/cmd/compile/internal/test/testdata/assert_test.go
@@ -0,0 +1,128 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Tests type assertion expressions and statements
+
+package main
+
+import (
+ "runtime"
+ "testing"
+)
+
+type (
+ S struct{}
+ U struct{}
+
+ I interface {
+ F()
+ }
+)
+
+var (
+ s *S
+ u *U
+)
+
+func (s *S) F() {}
+func (u *U) F() {}
+
+func e2t_ssa(e interface{}) *U {
+ return e.(*U)
+}
+
+func i2t_ssa(i I) *U {
+ return i.(*U)
+}
+
+func testAssertE2TOk(t *testing.T) {
+ if got := e2t_ssa(u); got != u {
+ t.Errorf("e2t_ssa(u)=%v want %v", got, u)
+ }
+}
+
+func testAssertE2TPanic(t *testing.T) {
+ var got *U
+ defer func() {
+ if got != nil {
+ t.Errorf("e2t_ssa(s)=%v want nil", got)
+ }
+ e := recover()
+ err, ok := e.(*runtime.TypeAssertionError)
+ if !ok {
+ t.Errorf("e2t_ssa(s) panic type %T", e)
+ }
+ want := "interface conversion: interface {} is *main.S, not *main.U"
+ if err.Error() != want {
+ t.Errorf("e2t_ssa(s) wrong error, want '%s', got '%s'", want, err.Error())
+ }
+ }()
+ got = e2t_ssa(s)
+ t.Errorf("e2t_ssa(s) should panic")
+
+}
+
+func testAssertI2TOk(t *testing.T) {
+ if got := i2t_ssa(u); got != u {
+ t.Errorf("i2t_ssa(u)=%v want %v", got, u)
+ }
+}
+
+func testAssertI2TPanic(t *testing.T) {
+ var got *U
+ defer func() {
+ if got != nil {
+ t.Errorf("i2t_ssa(s)=%v want nil", got)
+ }
+ e := recover()
+ err, ok := e.(*runtime.TypeAssertionError)
+ if !ok {
+ t.Errorf("i2t_ssa(s) panic type %T", e)
+ }
+ want := "interface conversion: main.I is *main.S, not *main.U"
+ if err.Error() != want {
+ t.Errorf("i2t_ssa(s) wrong error, want '%s', got '%s'", want, err.Error())
+ }
+ }()
+ got = i2t_ssa(s)
+ t.Errorf("i2t_ssa(s) should panic")
+}
+
+func e2t2_ssa(e interface{}) (*U, bool) {
+ u, ok := e.(*U)
+ return u, ok
+}
+
+func i2t2_ssa(i I) (*U, bool) {
+ u, ok := i.(*U)
+ return u, ok
+}
+
+func testAssertE2T2(t *testing.T) {
+ if got, ok := e2t2_ssa(u); !ok || got != u {
+ t.Errorf("e2t2_ssa(u)=(%v, %v) want (%v, %v)", got, ok, u, true)
+ }
+ if got, ok := e2t2_ssa(s); ok || got != nil {
+ t.Errorf("e2t2_ssa(s)=(%v, %v) want (%v, %v)", got, ok, nil, false)
+ }
+}
+
+func testAssertI2T2(t *testing.T) {
+ if got, ok := i2t2_ssa(u); !ok || got != u {
+ t.Errorf("i2t2_ssa(u)=(%v, %v) want (%v, %v)", got, ok, u, true)
+ }
+ if got, ok := i2t2_ssa(s); ok || got != nil {
+ t.Errorf("i2t2_ssa(s)=(%v, %v) want (%v, %v)", got, ok, nil, false)
+ }
+}
+
+// TestTypeAssertion tests type assertions.
+func TestTypeAssertion(t *testing.T) {
+ testAssertE2TOk(t)
+ testAssertE2TPanic(t)
+ testAssertI2TOk(t)
+ testAssertI2TPanic(t)
+ testAssertE2T2(t)
+ testAssertI2T2(t)
+}
diff --git a/src/cmd/compile/internal/test/testdata/break_test.go b/src/cmd/compile/internal/test/testdata/break_test.go
new file mode 100644
index 0000000..50245df
--- /dev/null
+++ b/src/cmd/compile/internal/test/testdata/break_test.go
@@ -0,0 +1,250 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Tests continue and break.
+
+package main
+
+import "testing"
+
+func continuePlain_ssa() int {
+ var n int
+ for i := 0; i < 10; i++ {
+ if i == 6 {
+ continue
+ }
+ n = i
+ }
+ return n
+}
+
+func continueLabeled_ssa() int {
+ var n int
+Next:
+ for i := 0; i < 10; i++ {
+ if i == 6 {
+ continue Next
+ }
+ n = i
+ }
+ return n
+}
+
+func continuePlainInner_ssa() int {
+ var n int
+ for j := 0; j < 30; j += 10 {
+ for i := 0; i < 10; i++ {
+ if i == 6 {
+ continue
+ }
+ n = i
+ }
+ n += j
+ }
+ return n
+}
+
+func continueLabeledInner_ssa() int {
+ var n int
+ for j := 0; j < 30; j += 10 {
+ Next:
+ for i := 0; i < 10; i++ {
+ if i == 6 {
+ continue Next
+ }
+ n = i
+ }
+ n += j
+ }
+ return n
+}
+
+func continueLabeledOuter_ssa() int {
+ var n int
+Next:
+ for j := 0; j < 30; j += 10 {
+ for i := 0; i < 10; i++ {
+ if i == 6 {
+ continue Next
+ }
+ n = i
+ }
+ n += j
+ }
+ return n
+}
+
+func breakPlain_ssa() int {
+ var n int
+ for i := 0; i < 10; i++ {
+ if i == 6 {
+ break
+ }
+ n = i
+ }
+ return n
+}
+
+func breakLabeled_ssa() int {
+ var n int
+Next:
+ for i := 0; i < 10; i++ {
+ if i == 6 {
+ break Next
+ }
+ n = i
+ }
+ return n
+}
+
+func breakPlainInner_ssa() int {
+ var n int
+ for j := 0; j < 30; j += 10 {
+ for i := 0; i < 10; i++ {
+ if i == 6 {
+ break
+ }
+ n = i
+ }
+ n += j
+ }
+ return n
+}
+
+func breakLabeledInner_ssa() int {
+ var n int
+ for j := 0; j < 30; j += 10 {
+ Next:
+ for i := 0; i < 10; i++ {
+ if i == 6 {
+ break Next
+ }
+ n = i
+ }
+ n += j
+ }
+ return n
+}
+
+func breakLabeledOuter_ssa() int {
+ var n int
+Next:
+ for j := 0; j < 30; j += 10 {
+ for i := 0; i < 10; i++ {
+ if i == 6 {
+ break Next
+ }
+ n = i
+ }
+ n += j
+ }
+ return n
+}
+
+var g, h int // globals to ensure optimizations don't collapse our switch statements
+
+func switchPlain_ssa() int {
+ var n int
+ switch g {
+ case 0:
+ n = 1
+ break
+ n = 2
+ }
+ return n
+}
+
+func switchLabeled_ssa() int {
+ var n int
+Done:
+ switch g {
+ case 0:
+ n = 1
+ break Done
+ n = 2
+ }
+ return n
+}
+
+func switchPlainInner_ssa() int {
+ var n int
+ switch g {
+ case 0:
+ n = 1
+ switch h {
+ case 0:
+ n += 10
+ break
+ }
+ n = 2
+ }
+ return n
+}
+
+func switchLabeledInner_ssa() int {
+ var n int
+ switch g {
+ case 0:
+ n = 1
+ Done:
+ switch h {
+ case 0:
+ n += 10
+ break Done
+ }
+ n = 2
+ }
+ return n
+}
+
+func switchLabeledOuter_ssa() int {
+ var n int
+Done:
+ switch g {
+ case 0:
+ n = 1
+ switch h {
+ case 0:
+ n += 10
+ break Done
+ }
+ n = 2
+ }
+ return n
+}
+
+// TestBreakContinue tests that continue and break statements do what they say.
+func TestBreakContinue(t *testing.T) {
+ tests := [...]struct {
+ name string
+ fn func() int
+ want int
+ }{
+ {"continuePlain_ssa", continuePlain_ssa, 9},
+ {"continueLabeled_ssa", continueLabeled_ssa, 9},
+ {"continuePlainInner_ssa", continuePlainInner_ssa, 29},
+ {"continueLabeledInner_ssa", continueLabeledInner_ssa, 29},
+ {"continueLabeledOuter_ssa", continueLabeledOuter_ssa, 5},
+
+ {"breakPlain_ssa", breakPlain_ssa, 5},
+ {"breakLabeled_ssa", breakLabeled_ssa, 5},
+ {"breakPlainInner_ssa", breakPlainInner_ssa, 25},
+ {"breakLabeledInner_ssa", breakLabeledInner_ssa, 25},
+ {"breakLabeledOuter_ssa", breakLabeledOuter_ssa, 5},
+
+ {"switchPlain_ssa", switchPlain_ssa, 1},
+ {"switchLabeled_ssa", switchLabeled_ssa, 1},
+ {"switchPlainInner_ssa", switchPlainInner_ssa, 2},
+ {"switchLabeledInner_ssa", switchLabeledInner_ssa, 2},
+ {"switchLabeledOuter_ssa", switchLabeledOuter_ssa, 11},
+
+ // no select tests; they're identical to switch
+ }
+
+ for _, test := range tests {
+ if got := test.fn(); got != test.want {
+ t.Errorf("%s()=%d, want %d", test.name, got, test.want)
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/test/testdata/chan_test.go b/src/cmd/compile/internal/test/testdata/chan_test.go
new file mode 100644
index 0000000..628bd8f
--- /dev/null
+++ b/src/cmd/compile/internal/test/testdata/chan_test.go
@@ -0,0 +1,63 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// chan.go tests chan operations.
+package main
+
+import "testing"
+
+//go:noinline
+func lenChan_ssa(v chan int) int {
+ return len(v)
+}
+
+//go:noinline
+func capChan_ssa(v chan int) int {
+ return cap(v)
+}
+
+func testLenChan(t *testing.T) {
+
+ v := make(chan int, 10)
+ v <- 1
+ v <- 1
+ v <- 1
+
+ if want, got := 3, lenChan_ssa(v); got != want {
+ t.Errorf("expected len(chan) = %d, got %d", want, got)
+ }
+}
+
+func testLenNilChan(t *testing.T) {
+
+ var v chan int
+ if want, got := 0, lenChan_ssa(v); got != want {
+ t.Errorf("expected len(nil) = %d, got %d", want, got)
+ }
+}
+
+func testCapChan(t *testing.T) {
+
+ v := make(chan int, 25)
+
+ if want, got := 25, capChan_ssa(v); got != want {
+ t.Errorf("expected cap(chan) = %d, got %d", want, got)
+ }
+}
+
+func testCapNilChan(t *testing.T) {
+
+ var v chan int
+ if want, got := 0, capChan_ssa(v); got != want {
+ t.Errorf("expected cap(nil) = %d, got %d", want, got)
+ }
+}
+
+func TestChan(t *testing.T) {
+ testLenChan(t)
+ testLenNilChan(t)
+
+ testCapChan(t)
+ testCapNilChan(t)
+}
diff --git a/src/cmd/compile/internal/test/testdata/closure_test.go b/src/cmd/compile/internal/test/testdata/closure_test.go
new file mode 100644
index 0000000..6cddc2d
--- /dev/null
+++ b/src/cmd/compile/internal/test/testdata/closure_test.go
@@ -0,0 +1,32 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// closure.go tests closure operations.
+package main
+
+import "testing"
+
+//go:noinline
+func testCFunc_ssa() int {
+ a := 0
+ b := func() {
+ switch {
+ }
+ a++
+ }
+ b()
+ b()
+ return a
+}
+
+func testCFunc(t *testing.T) {
+ if want, got := 2, testCFunc_ssa(); got != want {
+ t.Errorf("expected %d, got %d", want, got)
+ }
+}
+
+// TestClosure tests closure related behavior.
+func TestClosure(t *testing.T) {
+ testCFunc(t)
+}
diff --git a/src/cmd/compile/internal/test/testdata/cmpConst_test.go b/src/cmd/compile/internal/test/testdata/cmpConst_test.go
new file mode 100644
index 0000000..9400ef4
--- /dev/null
+++ b/src/cmd/compile/internal/test/testdata/cmpConst_test.go
@@ -0,0 +1,2209 @@
+// Code generated by gen/cmpConstGen.go. DO NOT EDIT.
+
+package main
+
+import (
+ "reflect"
+ "runtime"
+ "testing"
+)
+
+// results show the expected result for the elements left of, equal to and right of the index.
+type result struct{ l, e, r bool }
+
+var (
+ eq = result{l: false, e: true, r: false}
+ ne = result{l: true, e: false, r: true}
+ lt = result{l: true, e: false, r: false}
+ le = result{l: true, e: true, r: false}
+ gt = result{l: false, e: false, r: true}
+ ge = result{l: false, e: true, r: true}
+)
+
+// uint64 tests
+var uint64_vals = []uint64{
+ 0,
+ 1,
+ 126,
+ 127,
+ 128,
+ 254,
+ 255,
+ 256,
+ 32766,
+ 32767,
+ 32768,
+ 65534,
+ 65535,
+ 65536,
+ 2147483646,
+ 2147483647,
+ 2147483648,
+ 4278190080,
+ 4294967294,
+ 4294967295,
+ 4294967296,
+ 1095216660480,
+ 9223372036854775806,
+ 9223372036854775807,
+ 9223372036854775808,
+ 18374686479671623680,
+ 18446744073709551614,
+ 18446744073709551615,
+}
+
+func lt_0_uint64(x uint64) bool { return x < 0 }
+func le_0_uint64(x uint64) bool { return x <= 0 }
+func gt_0_uint64(x uint64) bool { return x > 0 }
+func ge_0_uint64(x uint64) bool { return x >= 0 }
+func eq_0_uint64(x uint64) bool { return x == 0 }
+func ne_0_uint64(x uint64) bool { return x != 0 }
+func lt_1_uint64(x uint64) bool { return x < 1 }
+func le_1_uint64(x uint64) bool { return x <= 1 }
+func gt_1_uint64(x uint64) bool { return x > 1 }
+func ge_1_uint64(x uint64) bool { return x >= 1 }
+func eq_1_uint64(x uint64) bool { return x == 1 }
+func ne_1_uint64(x uint64) bool { return x != 1 }
+func lt_126_uint64(x uint64) bool { return x < 126 }
+func le_126_uint64(x uint64) bool { return x <= 126 }
+func gt_126_uint64(x uint64) bool { return x > 126 }
+func ge_126_uint64(x uint64) bool { return x >= 126 }
+func eq_126_uint64(x uint64) bool { return x == 126 }
+func ne_126_uint64(x uint64) bool { return x != 126 }
+func lt_127_uint64(x uint64) bool { return x < 127 }
+func le_127_uint64(x uint64) bool { return x <= 127 }
+func gt_127_uint64(x uint64) bool { return x > 127 }
+func ge_127_uint64(x uint64) bool { return x >= 127 }
+func eq_127_uint64(x uint64) bool { return x == 127 }
+func ne_127_uint64(x uint64) bool { return x != 127 }
+func lt_128_uint64(x uint64) bool { return x < 128 }
+func le_128_uint64(x uint64) bool { return x <= 128 }
+func gt_128_uint64(x uint64) bool { return x > 128 }
+func ge_128_uint64(x uint64) bool { return x >= 128 }
+func eq_128_uint64(x uint64) bool { return x == 128 }
+func ne_128_uint64(x uint64) bool { return x != 128 }
+func lt_254_uint64(x uint64) bool { return x < 254 }
+func le_254_uint64(x uint64) bool { return x <= 254 }
+func gt_254_uint64(x uint64) bool { return x > 254 }
+func ge_254_uint64(x uint64) bool { return x >= 254 }
+func eq_254_uint64(x uint64) bool { return x == 254 }
+func ne_254_uint64(x uint64) bool { return x != 254 }
+func lt_255_uint64(x uint64) bool { return x < 255 }
+func le_255_uint64(x uint64) bool { return x <= 255 }
+func gt_255_uint64(x uint64) bool { return x > 255 }
+func ge_255_uint64(x uint64) bool { return x >= 255 }
+func eq_255_uint64(x uint64) bool { return x == 255 }
+func ne_255_uint64(x uint64) bool { return x != 255 }
+func lt_256_uint64(x uint64) bool { return x < 256 }
+func le_256_uint64(x uint64) bool { return x <= 256 }
+func gt_256_uint64(x uint64) bool { return x > 256 }
+func ge_256_uint64(x uint64) bool { return x >= 256 }
+func eq_256_uint64(x uint64) bool { return x == 256 }
+func ne_256_uint64(x uint64) bool { return x != 256 }
+func lt_32766_uint64(x uint64) bool { return x < 32766 }
+func le_32766_uint64(x uint64) bool { return x <= 32766 }
+func gt_32766_uint64(x uint64) bool { return x > 32766 }
+func ge_32766_uint64(x uint64) bool { return x >= 32766 }
+func eq_32766_uint64(x uint64) bool { return x == 32766 }
+func ne_32766_uint64(x uint64) bool { return x != 32766 }
+func lt_32767_uint64(x uint64) bool { return x < 32767 }
+func le_32767_uint64(x uint64) bool { return x <= 32767 }
+func gt_32767_uint64(x uint64) bool { return x > 32767 }
+func ge_32767_uint64(x uint64) bool { return x >= 32767 }
+func eq_32767_uint64(x uint64) bool { return x == 32767 }
+func ne_32767_uint64(x uint64) bool { return x != 32767 }
+func lt_32768_uint64(x uint64) bool { return x < 32768 }
+func le_32768_uint64(x uint64) bool { return x <= 32768 }
+func gt_32768_uint64(x uint64) bool { return x > 32768 }
+func ge_32768_uint64(x uint64) bool { return x >= 32768 }
+func eq_32768_uint64(x uint64) bool { return x == 32768 }
+func ne_32768_uint64(x uint64) bool { return x != 32768 }
+func lt_65534_uint64(x uint64) bool { return x < 65534 }
+func le_65534_uint64(x uint64) bool { return x <= 65534 }
+func gt_65534_uint64(x uint64) bool { return x > 65534 }
+func ge_65534_uint64(x uint64) bool { return x >= 65534 }
+func eq_65534_uint64(x uint64) bool { return x == 65534 }
+func ne_65534_uint64(x uint64) bool { return x != 65534 }
+func lt_65535_uint64(x uint64) bool { return x < 65535 }
+func le_65535_uint64(x uint64) bool { return x <= 65535 }
+func gt_65535_uint64(x uint64) bool { return x > 65535 }
+func ge_65535_uint64(x uint64) bool { return x >= 65535 }
+func eq_65535_uint64(x uint64) bool { return x == 65535 }
+func ne_65535_uint64(x uint64) bool { return x != 65535 }
+func lt_65536_uint64(x uint64) bool { return x < 65536 }
+func le_65536_uint64(x uint64) bool { return x <= 65536 }
+func gt_65536_uint64(x uint64) bool { return x > 65536 }
+func ge_65536_uint64(x uint64) bool { return x >= 65536 }
+func eq_65536_uint64(x uint64) bool { return x == 65536 }
+func ne_65536_uint64(x uint64) bool { return x != 65536 }
+func lt_2147483646_uint64(x uint64) bool { return x < 2147483646 }
+func le_2147483646_uint64(x uint64) bool { return x <= 2147483646 }
+func gt_2147483646_uint64(x uint64) bool { return x > 2147483646 }
+func ge_2147483646_uint64(x uint64) bool { return x >= 2147483646 }
+func eq_2147483646_uint64(x uint64) bool { return x == 2147483646 }
+func ne_2147483646_uint64(x uint64) bool { return x != 2147483646 }
+func lt_2147483647_uint64(x uint64) bool { return x < 2147483647 }
+func le_2147483647_uint64(x uint64) bool { return x <= 2147483647 }
+func gt_2147483647_uint64(x uint64) bool { return x > 2147483647 }
+func ge_2147483647_uint64(x uint64) bool { return x >= 2147483647 }
+func eq_2147483647_uint64(x uint64) bool { return x == 2147483647 }
+func ne_2147483647_uint64(x uint64) bool { return x != 2147483647 }
+func lt_2147483648_uint64(x uint64) bool { return x < 2147483648 }
+func le_2147483648_uint64(x uint64) bool { return x <= 2147483648 }
+func gt_2147483648_uint64(x uint64) bool { return x > 2147483648 }
+func ge_2147483648_uint64(x uint64) bool { return x >= 2147483648 }
+func eq_2147483648_uint64(x uint64) bool { return x == 2147483648 }
+func ne_2147483648_uint64(x uint64) bool { return x != 2147483648 }
+func lt_4278190080_uint64(x uint64) bool { return x < 4278190080 }
+func le_4278190080_uint64(x uint64) bool { return x <= 4278190080 }
+func gt_4278190080_uint64(x uint64) bool { return x > 4278190080 }
+func ge_4278190080_uint64(x uint64) bool { return x >= 4278190080 }
+func eq_4278190080_uint64(x uint64) bool { return x == 4278190080 }
+func ne_4278190080_uint64(x uint64) bool { return x != 4278190080 }
+func lt_4294967294_uint64(x uint64) bool { return x < 4294967294 }
+func le_4294967294_uint64(x uint64) bool { return x <= 4294967294 }
+func gt_4294967294_uint64(x uint64) bool { return x > 4294967294 }
+func ge_4294967294_uint64(x uint64) bool { return x >= 4294967294 }
+func eq_4294967294_uint64(x uint64) bool { return x == 4294967294 }
+func ne_4294967294_uint64(x uint64) bool { return x != 4294967294 }
+func lt_4294967295_uint64(x uint64) bool { return x < 4294967295 }
+func le_4294967295_uint64(x uint64) bool { return x <= 4294967295 }
+func gt_4294967295_uint64(x uint64) bool { return x > 4294967295 }
+func ge_4294967295_uint64(x uint64) bool { return x >= 4294967295 }
+func eq_4294967295_uint64(x uint64) bool { return x == 4294967295 }
+func ne_4294967295_uint64(x uint64) bool { return x != 4294967295 }
+func lt_4294967296_uint64(x uint64) bool { return x < 4294967296 }
+func le_4294967296_uint64(x uint64) bool { return x <= 4294967296 }
+func gt_4294967296_uint64(x uint64) bool { return x > 4294967296 }
+func ge_4294967296_uint64(x uint64) bool { return x >= 4294967296 }
+func eq_4294967296_uint64(x uint64) bool { return x == 4294967296 }
+func ne_4294967296_uint64(x uint64) bool { return x != 4294967296 }
+func lt_1095216660480_uint64(x uint64) bool { return x < 1095216660480 }
+func le_1095216660480_uint64(x uint64) bool { return x <= 1095216660480 }
+func gt_1095216660480_uint64(x uint64) bool { return x > 1095216660480 }
+func ge_1095216660480_uint64(x uint64) bool { return x >= 1095216660480 }
+func eq_1095216660480_uint64(x uint64) bool { return x == 1095216660480 }
+func ne_1095216660480_uint64(x uint64) bool { return x != 1095216660480 }
+func lt_9223372036854775806_uint64(x uint64) bool { return x < 9223372036854775806 }
+func le_9223372036854775806_uint64(x uint64) bool { return x <= 9223372036854775806 }
+func gt_9223372036854775806_uint64(x uint64) bool { return x > 9223372036854775806 }
+func ge_9223372036854775806_uint64(x uint64) bool { return x >= 9223372036854775806 }
+func eq_9223372036854775806_uint64(x uint64) bool { return x == 9223372036854775806 }
+func ne_9223372036854775806_uint64(x uint64) bool { return x != 9223372036854775806 }
+func lt_9223372036854775807_uint64(x uint64) bool { return x < 9223372036854775807 }
+func le_9223372036854775807_uint64(x uint64) bool { return x <= 9223372036854775807 }
+func gt_9223372036854775807_uint64(x uint64) bool { return x > 9223372036854775807 }
+func ge_9223372036854775807_uint64(x uint64) bool { return x >= 9223372036854775807 }
+func eq_9223372036854775807_uint64(x uint64) bool { return x == 9223372036854775807 }
+func ne_9223372036854775807_uint64(x uint64) bool { return x != 9223372036854775807 }
+func lt_9223372036854775808_uint64(x uint64) bool { return x < 9223372036854775808 }
+func le_9223372036854775808_uint64(x uint64) bool { return x <= 9223372036854775808 }
+func gt_9223372036854775808_uint64(x uint64) bool { return x > 9223372036854775808 }
+func ge_9223372036854775808_uint64(x uint64) bool { return x >= 9223372036854775808 }
+func eq_9223372036854775808_uint64(x uint64) bool { return x == 9223372036854775808 }
+func ne_9223372036854775808_uint64(x uint64) bool { return x != 9223372036854775808 }
+func lt_18374686479671623680_uint64(x uint64) bool { return x < 18374686479671623680 }
+func le_18374686479671623680_uint64(x uint64) bool { return x <= 18374686479671623680 }
+func gt_18374686479671623680_uint64(x uint64) bool { return x > 18374686479671623680 }
+func ge_18374686479671623680_uint64(x uint64) bool { return x >= 18374686479671623680 }
+func eq_18374686479671623680_uint64(x uint64) bool { return x == 18374686479671623680 }
+func ne_18374686479671623680_uint64(x uint64) bool { return x != 18374686479671623680 }
+func lt_18446744073709551614_uint64(x uint64) bool { return x < 18446744073709551614 }
+func le_18446744073709551614_uint64(x uint64) bool { return x <= 18446744073709551614 }
+func gt_18446744073709551614_uint64(x uint64) bool { return x > 18446744073709551614 }
+func ge_18446744073709551614_uint64(x uint64) bool { return x >= 18446744073709551614 }
+func eq_18446744073709551614_uint64(x uint64) bool { return x == 18446744073709551614 }
+func ne_18446744073709551614_uint64(x uint64) bool { return x != 18446744073709551614 }
+func lt_18446744073709551615_uint64(x uint64) bool { return x < 18446744073709551615 }
+func le_18446744073709551615_uint64(x uint64) bool { return x <= 18446744073709551615 }
+func gt_18446744073709551615_uint64(x uint64) bool { return x > 18446744073709551615 }
+func ge_18446744073709551615_uint64(x uint64) bool { return x >= 18446744073709551615 }
+func eq_18446744073709551615_uint64(x uint64) bool { return x == 18446744073709551615 }
+func ne_18446744073709551615_uint64(x uint64) bool { return x != 18446744073709551615 }
+
+var uint64_tests = []struct {
+ idx int // index of the constant used
+ exp result // expected results
+ fn func(uint64) bool
+}{
+ {idx: 0, exp: lt, fn: lt_0_uint64},
+ {idx: 0, exp: le, fn: le_0_uint64},
+ {idx: 0, exp: gt, fn: gt_0_uint64},
+ {idx: 0, exp: ge, fn: ge_0_uint64},
+ {idx: 0, exp: eq, fn: eq_0_uint64},
+ {idx: 0, exp: ne, fn: ne_0_uint64},
+ {idx: 1, exp: lt, fn: lt_1_uint64},
+ {idx: 1, exp: le, fn: le_1_uint64},
+ {idx: 1, exp: gt, fn: gt_1_uint64},
+ {idx: 1, exp: ge, fn: ge_1_uint64},
+ {idx: 1, exp: eq, fn: eq_1_uint64},
+ {idx: 1, exp: ne, fn: ne_1_uint64},
+ {idx: 2, exp: lt, fn: lt_126_uint64},
+ {idx: 2, exp: le, fn: le_126_uint64},
+ {idx: 2, exp: gt, fn: gt_126_uint64},
+ {idx: 2, exp: ge, fn: ge_126_uint64},
+ {idx: 2, exp: eq, fn: eq_126_uint64},
+ {idx: 2, exp: ne, fn: ne_126_uint64},
+ {idx: 3, exp: lt, fn: lt_127_uint64},
+ {idx: 3, exp: le, fn: le_127_uint64},
+ {idx: 3, exp: gt, fn: gt_127_uint64},
+ {idx: 3, exp: ge, fn: ge_127_uint64},
+ {idx: 3, exp: eq, fn: eq_127_uint64},
+ {idx: 3, exp: ne, fn: ne_127_uint64},
+ {idx: 4, exp: lt, fn: lt_128_uint64},
+ {idx: 4, exp: le, fn: le_128_uint64},
+ {idx: 4, exp: gt, fn: gt_128_uint64},
+ {idx: 4, exp: ge, fn: ge_128_uint64},
+ {idx: 4, exp: eq, fn: eq_128_uint64},
+ {idx: 4, exp: ne, fn: ne_128_uint64},
+ {idx: 5, exp: lt, fn: lt_254_uint64},
+ {idx: 5, exp: le, fn: le_254_uint64},
+ {idx: 5, exp: gt, fn: gt_254_uint64},
+ {idx: 5, exp: ge, fn: ge_254_uint64},
+ {idx: 5, exp: eq, fn: eq_254_uint64},
+ {idx: 5, exp: ne, fn: ne_254_uint64},
+ {idx: 6, exp: lt, fn: lt_255_uint64},
+ {idx: 6, exp: le, fn: le_255_uint64},
+ {idx: 6, exp: gt, fn: gt_255_uint64},
+ {idx: 6, exp: ge, fn: ge_255_uint64},
+ {idx: 6, exp: eq, fn: eq_255_uint64},
+ {idx: 6, exp: ne, fn: ne_255_uint64},
+ {idx: 7, exp: lt, fn: lt_256_uint64},
+ {idx: 7, exp: le, fn: le_256_uint64},
+ {idx: 7, exp: gt, fn: gt_256_uint64},
+ {idx: 7, exp: ge, fn: ge_256_uint64},
+ {idx: 7, exp: eq, fn: eq_256_uint64},
+ {idx: 7, exp: ne, fn: ne_256_uint64},
+ {idx: 8, exp: lt, fn: lt_32766_uint64},
+ {idx: 8, exp: le, fn: le_32766_uint64},
+ {idx: 8, exp: gt, fn: gt_32766_uint64},
+ {idx: 8, exp: ge, fn: ge_32766_uint64},
+ {idx: 8, exp: eq, fn: eq_32766_uint64},
+ {idx: 8, exp: ne, fn: ne_32766_uint64},
+ {idx: 9, exp: lt, fn: lt_32767_uint64},
+ {idx: 9, exp: le, fn: le_32767_uint64},
+ {idx: 9, exp: gt, fn: gt_32767_uint64},
+ {idx: 9, exp: ge, fn: ge_32767_uint64},
+ {idx: 9, exp: eq, fn: eq_32767_uint64},
+ {idx: 9, exp: ne, fn: ne_32767_uint64},
+ {idx: 10, exp: lt, fn: lt_32768_uint64},
+ {idx: 10, exp: le, fn: le_32768_uint64},
+ {idx: 10, exp: gt, fn: gt_32768_uint64},
+ {idx: 10, exp: ge, fn: ge_32768_uint64},
+ {idx: 10, exp: eq, fn: eq_32768_uint64},
+ {idx: 10, exp: ne, fn: ne_32768_uint64},
+ {idx: 11, exp: lt, fn: lt_65534_uint64},
+ {idx: 11, exp: le, fn: le_65534_uint64},
+ {idx: 11, exp: gt, fn: gt_65534_uint64},
+ {idx: 11, exp: ge, fn: ge_65534_uint64},
+ {idx: 11, exp: eq, fn: eq_65534_uint64},
+ {idx: 11, exp: ne, fn: ne_65534_uint64},
+ {idx: 12, exp: lt, fn: lt_65535_uint64},
+ {idx: 12, exp: le, fn: le_65535_uint64},
+ {idx: 12, exp: gt, fn: gt_65535_uint64},
+ {idx: 12, exp: ge, fn: ge_65535_uint64},
+ {idx: 12, exp: eq, fn: eq_65535_uint64},
+ {idx: 12, exp: ne, fn: ne_65535_uint64},
+ {idx: 13, exp: lt, fn: lt_65536_uint64},
+ {idx: 13, exp: le, fn: le_65536_uint64},
+ {idx: 13, exp: gt, fn: gt_65536_uint64},
+ {idx: 13, exp: ge, fn: ge_65536_uint64},
+ {idx: 13, exp: eq, fn: eq_65536_uint64},
+ {idx: 13, exp: ne, fn: ne_65536_uint64},
+ {idx: 14, exp: lt, fn: lt_2147483646_uint64},
+ {idx: 14, exp: le, fn: le_2147483646_uint64},
+ {idx: 14, exp: gt, fn: gt_2147483646_uint64},
+ {idx: 14, exp: ge, fn: ge_2147483646_uint64},
+ {idx: 14, exp: eq, fn: eq_2147483646_uint64},
+ {idx: 14, exp: ne, fn: ne_2147483646_uint64},
+ {idx: 15, exp: lt, fn: lt_2147483647_uint64},
+ {idx: 15, exp: le, fn: le_2147483647_uint64},
+ {idx: 15, exp: gt, fn: gt_2147483647_uint64},
+ {idx: 15, exp: ge, fn: ge_2147483647_uint64},
+ {idx: 15, exp: eq, fn: eq_2147483647_uint64},
+ {idx: 15, exp: ne, fn: ne_2147483647_uint64},
+ {idx: 16, exp: lt, fn: lt_2147483648_uint64},
+ {idx: 16, exp: le, fn: le_2147483648_uint64},
+ {idx: 16, exp: gt, fn: gt_2147483648_uint64},
+ {idx: 16, exp: ge, fn: ge_2147483648_uint64},
+ {idx: 16, exp: eq, fn: eq_2147483648_uint64},
+ {idx: 16, exp: ne, fn: ne_2147483648_uint64},
+ {idx: 17, exp: lt, fn: lt_4278190080_uint64},
+ {idx: 17, exp: le, fn: le_4278190080_uint64},
+ {idx: 17, exp: gt, fn: gt_4278190080_uint64},
+ {idx: 17, exp: ge, fn: ge_4278190080_uint64},
+ {idx: 17, exp: eq, fn: eq_4278190080_uint64},
+ {idx: 17, exp: ne, fn: ne_4278190080_uint64},
+ {idx: 18, exp: lt, fn: lt_4294967294_uint64},
+ {idx: 18, exp: le, fn: le_4294967294_uint64},
+ {idx: 18, exp: gt, fn: gt_4294967294_uint64},
+ {idx: 18, exp: ge, fn: ge_4294967294_uint64},
+ {idx: 18, exp: eq, fn: eq_4294967294_uint64},
+ {idx: 18, exp: ne, fn: ne_4294967294_uint64},
+ {idx: 19, exp: lt, fn: lt_4294967295_uint64},
+ {idx: 19, exp: le, fn: le_4294967295_uint64},
+ {idx: 19, exp: gt, fn: gt_4294967295_uint64},
+ {idx: 19, exp: ge, fn: ge_4294967295_uint64},
+ {idx: 19, exp: eq, fn: eq_4294967295_uint64},
+ {idx: 19, exp: ne, fn: ne_4294967295_uint64},
+ {idx: 20, exp: lt, fn: lt_4294967296_uint64},
+ {idx: 20, exp: le, fn: le_4294967296_uint64},
+ {idx: 20, exp: gt, fn: gt_4294967296_uint64},
+ {idx: 20, exp: ge, fn: ge_4294967296_uint64},
+ {idx: 20, exp: eq, fn: eq_4294967296_uint64},
+ {idx: 20, exp: ne, fn: ne_4294967296_uint64},
+ {idx: 21, exp: lt, fn: lt_1095216660480_uint64},
+ {idx: 21, exp: le, fn: le_1095216660480_uint64},
+ {idx: 21, exp: gt, fn: gt_1095216660480_uint64},
+ {idx: 21, exp: ge, fn: ge_1095216660480_uint64},
+ {idx: 21, exp: eq, fn: eq_1095216660480_uint64},
+ {idx: 21, exp: ne, fn: ne_1095216660480_uint64},
+ {idx: 22, exp: lt, fn: lt_9223372036854775806_uint64},
+ {idx: 22, exp: le, fn: le_9223372036854775806_uint64},
+ {idx: 22, exp: gt, fn: gt_9223372036854775806_uint64},
+ {idx: 22, exp: ge, fn: ge_9223372036854775806_uint64},
+ {idx: 22, exp: eq, fn: eq_9223372036854775806_uint64},
+ {idx: 22, exp: ne, fn: ne_9223372036854775806_uint64},
+ {idx: 23, exp: lt, fn: lt_9223372036854775807_uint64},
+ {idx: 23, exp: le, fn: le_9223372036854775807_uint64},
+ {idx: 23, exp: gt, fn: gt_9223372036854775807_uint64},
+ {idx: 23, exp: ge, fn: ge_9223372036854775807_uint64},
+ {idx: 23, exp: eq, fn: eq_9223372036854775807_uint64},
+ {idx: 23, exp: ne, fn: ne_9223372036854775807_uint64},
+ {idx: 24, exp: lt, fn: lt_9223372036854775808_uint64},
+ {idx: 24, exp: le, fn: le_9223372036854775808_uint64},
+ {idx: 24, exp: gt, fn: gt_9223372036854775808_uint64},
+ {idx: 24, exp: ge, fn: ge_9223372036854775808_uint64},
+ {idx: 24, exp: eq, fn: eq_9223372036854775808_uint64},
+ {idx: 24, exp: ne, fn: ne_9223372036854775808_uint64},
+ {idx: 25, exp: lt, fn: lt_18374686479671623680_uint64},
+ {idx: 25, exp: le, fn: le_18374686479671623680_uint64},
+ {idx: 25, exp: gt, fn: gt_18374686479671623680_uint64},
+ {idx: 25, exp: ge, fn: ge_18374686479671623680_uint64},
+ {idx: 25, exp: eq, fn: eq_18374686479671623680_uint64},
+ {idx: 25, exp: ne, fn: ne_18374686479671623680_uint64},
+ {idx: 26, exp: lt, fn: lt_18446744073709551614_uint64},
+ {idx: 26, exp: le, fn: le_18446744073709551614_uint64},
+ {idx: 26, exp: gt, fn: gt_18446744073709551614_uint64},
+ {idx: 26, exp: ge, fn: ge_18446744073709551614_uint64},
+ {idx: 26, exp: eq, fn: eq_18446744073709551614_uint64},
+ {idx: 26, exp: ne, fn: ne_18446744073709551614_uint64},
+ {idx: 27, exp: lt, fn: lt_18446744073709551615_uint64},
+ {idx: 27, exp: le, fn: le_18446744073709551615_uint64},
+ {idx: 27, exp: gt, fn: gt_18446744073709551615_uint64},
+ {idx: 27, exp: ge, fn: ge_18446744073709551615_uint64},
+ {idx: 27, exp: eq, fn: eq_18446744073709551615_uint64},
+ {idx: 27, exp: ne, fn: ne_18446744073709551615_uint64},
+}
+
+// uint32 tests
+var uint32_vals = []uint32{
+ 0,
+ 1,
+ 126,
+ 127,
+ 128,
+ 254,
+ 255,
+ 256,
+ 32766,
+ 32767,
+ 32768,
+ 65534,
+ 65535,
+ 65536,
+ 2147483646,
+ 2147483647,
+ 2147483648,
+ 4278190080,
+ 4294967294,
+ 4294967295,
+}
+
+func lt_0_uint32(x uint32) bool { return x < 0 }
+func le_0_uint32(x uint32) bool { return x <= 0 }
+func gt_0_uint32(x uint32) bool { return x > 0 }
+func ge_0_uint32(x uint32) bool { return x >= 0 }
+func eq_0_uint32(x uint32) bool { return x == 0 }
+func ne_0_uint32(x uint32) bool { return x != 0 }
+func lt_1_uint32(x uint32) bool { return x < 1 }
+func le_1_uint32(x uint32) bool { return x <= 1 }
+func gt_1_uint32(x uint32) bool { return x > 1 }
+func ge_1_uint32(x uint32) bool { return x >= 1 }
+func eq_1_uint32(x uint32) bool { return x == 1 }
+func ne_1_uint32(x uint32) bool { return x != 1 }
+func lt_126_uint32(x uint32) bool { return x < 126 }
+func le_126_uint32(x uint32) bool { return x <= 126 }
+func gt_126_uint32(x uint32) bool { return x > 126 }
+func ge_126_uint32(x uint32) bool { return x >= 126 }
+func eq_126_uint32(x uint32) bool { return x == 126 }
+func ne_126_uint32(x uint32) bool { return x != 126 }
+func lt_127_uint32(x uint32) bool { return x < 127 }
+func le_127_uint32(x uint32) bool { return x <= 127 }
+func gt_127_uint32(x uint32) bool { return x > 127 }
+func ge_127_uint32(x uint32) bool { return x >= 127 }
+func eq_127_uint32(x uint32) bool { return x == 127 }
+func ne_127_uint32(x uint32) bool { return x != 127 }
+func lt_128_uint32(x uint32) bool { return x < 128 }
+func le_128_uint32(x uint32) bool { return x <= 128 }
+func gt_128_uint32(x uint32) bool { return x > 128 }
+func ge_128_uint32(x uint32) bool { return x >= 128 }
+func eq_128_uint32(x uint32) bool { return x == 128 }
+func ne_128_uint32(x uint32) bool { return x != 128 }
+func lt_254_uint32(x uint32) bool { return x < 254 }
+func le_254_uint32(x uint32) bool { return x <= 254 }
+func gt_254_uint32(x uint32) bool { return x > 254 }
+func ge_254_uint32(x uint32) bool { return x >= 254 }
+func eq_254_uint32(x uint32) bool { return x == 254 }
+func ne_254_uint32(x uint32) bool { return x != 254 }
+func lt_255_uint32(x uint32) bool { return x < 255 }
+func le_255_uint32(x uint32) bool { return x <= 255 }
+func gt_255_uint32(x uint32) bool { return x > 255 }
+func ge_255_uint32(x uint32) bool { return x >= 255 }
+func eq_255_uint32(x uint32) bool { return x == 255 }
+func ne_255_uint32(x uint32) bool { return x != 255 }
+func lt_256_uint32(x uint32) bool { return x < 256 }
+func le_256_uint32(x uint32) bool { return x <= 256 }
+func gt_256_uint32(x uint32) bool { return x > 256 }
+func ge_256_uint32(x uint32) bool { return x >= 256 }
+func eq_256_uint32(x uint32) bool { return x == 256 }
+func ne_256_uint32(x uint32) bool { return x != 256 }
+func lt_32766_uint32(x uint32) bool { return x < 32766 }
+func le_32766_uint32(x uint32) bool { return x <= 32766 }
+func gt_32766_uint32(x uint32) bool { return x > 32766 }
+func ge_32766_uint32(x uint32) bool { return x >= 32766 }
+func eq_32766_uint32(x uint32) bool { return x == 32766 }
+func ne_32766_uint32(x uint32) bool { return x != 32766 }
+func lt_32767_uint32(x uint32) bool { return x < 32767 }
+func le_32767_uint32(x uint32) bool { return x <= 32767 }
+func gt_32767_uint32(x uint32) bool { return x > 32767 }
+func ge_32767_uint32(x uint32) bool { return x >= 32767 }
+func eq_32767_uint32(x uint32) bool { return x == 32767 }
+func ne_32767_uint32(x uint32) bool { return x != 32767 }
+func lt_32768_uint32(x uint32) bool { return x < 32768 }
+func le_32768_uint32(x uint32) bool { return x <= 32768 }
+func gt_32768_uint32(x uint32) bool { return x > 32768 }
+func ge_32768_uint32(x uint32) bool { return x >= 32768 }
+func eq_32768_uint32(x uint32) bool { return x == 32768 }
+func ne_32768_uint32(x uint32) bool { return x != 32768 }
+func lt_65534_uint32(x uint32) bool { return x < 65534 }
+func le_65534_uint32(x uint32) bool { return x <= 65534 }
+func gt_65534_uint32(x uint32) bool { return x > 65534 }
+func ge_65534_uint32(x uint32) bool { return x >= 65534 }
+func eq_65534_uint32(x uint32) bool { return x == 65534 }
+func ne_65534_uint32(x uint32) bool { return x != 65534 }
+func lt_65535_uint32(x uint32) bool { return x < 65535 }
+func le_65535_uint32(x uint32) bool { return x <= 65535 }
+func gt_65535_uint32(x uint32) bool { return x > 65535 }
+func ge_65535_uint32(x uint32) bool { return x >= 65535 }
+func eq_65535_uint32(x uint32) bool { return x == 65535 }
+func ne_65535_uint32(x uint32) bool { return x != 65535 }
+func lt_65536_uint32(x uint32) bool { return x < 65536 }
+func le_65536_uint32(x uint32) bool { return x <= 65536 }
+func gt_65536_uint32(x uint32) bool { return x > 65536 }
+func ge_65536_uint32(x uint32) bool { return x >= 65536 }
+func eq_65536_uint32(x uint32) bool { return x == 65536 }
+func ne_65536_uint32(x uint32) bool { return x != 65536 }
+func lt_2147483646_uint32(x uint32) bool { return x < 2147483646 }
+func le_2147483646_uint32(x uint32) bool { return x <= 2147483646 }
+func gt_2147483646_uint32(x uint32) bool { return x > 2147483646 }
+func ge_2147483646_uint32(x uint32) bool { return x >= 2147483646 }
+func eq_2147483646_uint32(x uint32) bool { return x == 2147483646 }
+func ne_2147483646_uint32(x uint32) bool { return x != 2147483646 }
+func lt_2147483647_uint32(x uint32) bool { return x < 2147483647 }
+func le_2147483647_uint32(x uint32) bool { return x <= 2147483647 }
+func gt_2147483647_uint32(x uint32) bool { return x > 2147483647 }
+func ge_2147483647_uint32(x uint32) bool { return x >= 2147483647 }
+func eq_2147483647_uint32(x uint32) bool { return x == 2147483647 }
+func ne_2147483647_uint32(x uint32) bool { return x != 2147483647 }
+func lt_2147483648_uint32(x uint32) bool { return x < 2147483648 }
+func le_2147483648_uint32(x uint32) bool { return x <= 2147483648 }
+func gt_2147483648_uint32(x uint32) bool { return x > 2147483648 }
+func ge_2147483648_uint32(x uint32) bool { return x >= 2147483648 }
+func eq_2147483648_uint32(x uint32) bool { return x == 2147483648 }
+func ne_2147483648_uint32(x uint32) bool { return x != 2147483648 }
+func lt_4278190080_uint32(x uint32) bool { return x < 4278190080 }
+func le_4278190080_uint32(x uint32) bool { return x <= 4278190080 }
+func gt_4278190080_uint32(x uint32) bool { return x > 4278190080 }
+func ge_4278190080_uint32(x uint32) bool { return x >= 4278190080 }
+func eq_4278190080_uint32(x uint32) bool { return x == 4278190080 }
+func ne_4278190080_uint32(x uint32) bool { return x != 4278190080 }
+func lt_4294967294_uint32(x uint32) bool { return x < 4294967294 }
+func le_4294967294_uint32(x uint32) bool { return x <= 4294967294 }
+func gt_4294967294_uint32(x uint32) bool { return x > 4294967294 }
+func ge_4294967294_uint32(x uint32) bool { return x >= 4294967294 }
+func eq_4294967294_uint32(x uint32) bool { return x == 4294967294 }
+func ne_4294967294_uint32(x uint32) bool { return x != 4294967294 }
+func lt_4294967295_uint32(x uint32) bool { return x < 4294967295 }
+func le_4294967295_uint32(x uint32) bool { return x <= 4294967295 }
+func gt_4294967295_uint32(x uint32) bool { return x > 4294967295 }
+func ge_4294967295_uint32(x uint32) bool { return x >= 4294967295 }
+func eq_4294967295_uint32(x uint32) bool { return x == 4294967295 }
+func ne_4294967295_uint32(x uint32) bool { return x != 4294967295 }
+
+var uint32_tests = []struct {
+ idx int // index of the constant used
+ exp result // expected results
+ fn func(uint32) bool
+}{
+ {idx: 0, exp: lt, fn: lt_0_uint32},
+ {idx: 0, exp: le, fn: le_0_uint32},
+ {idx: 0, exp: gt, fn: gt_0_uint32},
+ {idx: 0, exp: ge, fn: ge_0_uint32},
+ {idx: 0, exp: eq, fn: eq_0_uint32},
+ {idx: 0, exp: ne, fn: ne_0_uint32},
+ {idx: 1, exp: lt, fn: lt_1_uint32},
+ {idx: 1, exp: le, fn: le_1_uint32},
+ {idx: 1, exp: gt, fn: gt_1_uint32},
+ {idx: 1, exp: ge, fn: ge_1_uint32},
+ {idx: 1, exp: eq, fn: eq_1_uint32},
+ {idx: 1, exp: ne, fn: ne_1_uint32},
+ {idx: 2, exp: lt, fn: lt_126_uint32},
+ {idx: 2, exp: le, fn: le_126_uint32},
+ {idx: 2, exp: gt, fn: gt_126_uint32},
+ {idx: 2, exp: ge, fn: ge_126_uint32},
+ {idx: 2, exp: eq, fn: eq_126_uint32},
+ {idx: 2, exp: ne, fn: ne_126_uint32},
+ {idx: 3, exp: lt, fn: lt_127_uint32},
+ {idx: 3, exp: le, fn: le_127_uint32},
+ {idx: 3, exp: gt, fn: gt_127_uint32},
+ {idx: 3, exp: ge, fn: ge_127_uint32},
+ {idx: 3, exp: eq, fn: eq_127_uint32},
+ {idx: 3, exp: ne, fn: ne_127_uint32},
+ {idx: 4, exp: lt, fn: lt_128_uint32},
+ {idx: 4, exp: le, fn: le_128_uint32},
+ {idx: 4, exp: gt, fn: gt_128_uint32},
+ {idx: 4, exp: ge, fn: ge_128_uint32},
+ {idx: 4, exp: eq, fn: eq_128_uint32},
+ {idx: 4, exp: ne, fn: ne_128_uint32},
+ {idx: 5, exp: lt, fn: lt_254_uint32},
+ {idx: 5, exp: le, fn: le_254_uint32},
+ {idx: 5, exp: gt, fn: gt_254_uint32},
+ {idx: 5, exp: ge, fn: ge_254_uint32},
+ {idx: 5, exp: eq, fn: eq_254_uint32},
+ {idx: 5, exp: ne, fn: ne_254_uint32},
+ {idx: 6, exp: lt, fn: lt_255_uint32},
+ {idx: 6, exp: le, fn: le_255_uint32},
+ {idx: 6, exp: gt, fn: gt_255_uint32},
+ {idx: 6, exp: ge, fn: ge_255_uint32},
+ {idx: 6, exp: eq, fn: eq_255_uint32},
+ {idx: 6, exp: ne, fn: ne_255_uint32},
+ {idx: 7, exp: lt, fn: lt_256_uint32},
+ {idx: 7, exp: le, fn: le_256_uint32},
+ {idx: 7, exp: gt, fn: gt_256_uint32},
+ {idx: 7, exp: ge, fn: ge_256_uint32},
+ {idx: 7, exp: eq, fn: eq_256_uint32},
+ {idx: 7, exp: ne, fn: ne_256_uint32},
+ {idx: 8, exp: lt, fn: lt_32766_uint32},
+ {idx: 8, exp: le, fn: le_32766_uint32},
+ {idx: 8, exp: gt, fn: gt_32766_uint32},
+ {idx: 8, exp: ge, fn: ge_32766_uint32},
+ {idx: 8, exp: eq, fn: eq_32766_uint32},
+ {idx: 8, exp: ne, fn: ne_32766_uint32},
+ {idx: 9, exp: lt, fn: lt_32767_uint32},
+ {idx: 9, exp: le, fn: le_32767_uint32},
+ {idx: 9, exp: gt, fn: gt_32767_uint32},
+ {idx: 9, exp: ge, fn: ge_32767_uint32},
+ {idx: 9, exp: eq, fn: eq_32767_uint32},
+ {idx: 9, exp: ne, fn: ne_32767_uint32},
+ {idx: 10, exp: lt, fn: lt_32768_uint32},
+ {idx: 10, exp: le, fn: le_32768_uint32},
+ {idx: 10, exp: gt, fn: gt_32768_uint32},
+ {idx: 10, exp: ge, fn: ge_32768_uint32},
+ {idx: 10, exp: eq, fn: eq_32768_uint32},
+ {idx: 10, exp: ne, fn: ne_32768_uint32},
+ {idx: 11, exp: lt, fn: lt_65534_uint32},
+ {idx: 11, exp: le, fn: le_65534_uint32},
+ {idx: 11, exp: gt, fn: gt_65534_uint32},
+ {idx: 11, exp: ge, fn: ge_65534_uint32},
+ {idx: 11, exp: eq, fn: eq_65534_uint32},
+ {idx: 11, exp: ne, fn: ne_65534_uint32},
+ {idx: 12, exp: lt, fn: lt_65535_uint32},
+ {idx: 12, exp: le, fn: le_65535_uint32},
+ {idx: 12, exp: gt, fn: gt_65535_uint32},
+ {idx: 12, exp: ge, fn: ge_65535_uint32},
+ {idx: 12, exp: eq, fn: eq_65535_uint32},
+ {idx: 12, exp: ne, fn: ne_65535_uint32},
+ {idx: 13, exp: lt, fn: lt_65536_uint32},
+ {idx: 13, exp: le, fn: le_65536_uint32},
+ {idx: 13, exp: gt, fn: gt_65536_uint32},
+ {idx: 13, exp: ge, fn: ge_65536_uint32},
+ {idx: 13, exp: eq, fn: eq_65536_uint32},
+ {idx: 13, exp: ne, fn: ne_65536_uint32},
+ {idx: 14, exp: lt, fn: lt_2147483646_uint32},
+ {idx: 14, exp: le, fn: le_2147483646_uint32},
+ {idx: 14, exp: gt, fn: gt_2147483646_uint32},
+ {idx: 14, exp: ge, fn: ge_2147483646_uint32},
+ {idx: 14, exp: eq, fn: eq_2147483646_uint32},
+ {idx: 14, exp: ne, fn: ne_2147483646_uint32},
+ {idx: 15, exp: lt, fn: lt_2147483647_uint32},
+ {idx: 15, exp: le, fn: le_2147483647_uint32},
+ {idx: 15, exp: gt, fn: gt_2147483647_uint32},
+ {idx: 15, exp: ge, fn: ge_2147483647_uint32},
+ {idx: 15, exp: eq, fn: eq_2147483647_uint32},
+ {idx: 15, exp: ne, fn: ne_2147483647_uint32},
+ {idx: 16, exp: lt, fn: lt_2147483648_uint32},
+ {idx: 16, exp: le, fn: le_2147483648_uint32},
+ {idx: 16, exp: gt, fn: gt_2147483648_uint32},
+ {idx: 16, exp: ge, fn: ge_2147483648_uint32},
+ {idx: 16, exp: eq, fn: eq_2147483648_uint32},
+ {idx: 16, exp: ne, fn: ne_2147483648_uint32},
+ {idx: 17, exp: lt, fn: lt_4278190080_uint32},
+ {idx: 17, exp: le, fn: le_4278190080_uint32},
+ {idx: 17, exp: gt, fn: gt_4278190080_uint32},
+ {idx: 17, exp: ge, fn: ge_4278190080_uint32},
+ {idx: 17, exp: eq, fn: eq_4278190080_uint32},
+ {idx: 17, exp: ne, fn: ne_4278190080_uint32},
+ {idx: 18, exp: lt, fn: lt_4294967294_uint32},
+ {idx: 18, exp: le, fn: le_4294967294_uint32},
+ {idx: 18, exp: gt, fn: gt_4294967294_uint32},
+ {idx: 18, exp: ge, fn: ge_4294967294_uint32},
+ {idx: 18, exp: eq, fn: eq_4294967294_uint32},
+ {idx: 18, exp: ne, fn: ne_4294967294_uint32},
+ {idx: 19, exp: lt, fn: lt_4294967295_uint32},
+ {idx: 19, exp: le, fn: le_4294967295_uint32},
+ {idx: 19, exp: gt, fn: gt_4294967295_uint32},
+ {idx: 19, exp: ge, fn: ge_4294967295_uint32},
+ {idx: 19, exp: eq, fn: eq_4294967295_uint32},
+ {idx: 19, exp: ne, fn: ne_4294967295_uint32},
+}
+
+// uint16 tests
+var uint16_vals = []uint16{
+ 0,
+ 1,
+ 126,
+ 127,
+ 128,
+ 254,
+ 255,
+ 256,
+ 32766,
+ 32767,
+ 32768,
+ 65534,
+ 65535,
+}
+
+func lt_0_uint16(x uint16) bool { return x < 0 }
+func le_0_uint16(x uint16) bool { return x <= 0 }
+func gt_0_uint16(x uint16) bool { return x > 0 }
+func ge_0_uint16(x uint16) bool { return x >= 0 }
+func eq_0_uint16(x uint16) bool { return x == 0 }
+func ne_0_uint16(x uint16) bool { return x != 0 }
+func lt_1_uint16(x uint16) bool { return x < 1 }
+func le_1_uint16(x uint16) bool { return x <= 1 }
+func gt_1_uint16(x uint16) bool { return x > 1 }
+func ge_1_uint16(x uint16) bool { return x >= 1 }
+func eq_1_uint16(x uint16) bool { return x == 1 }
+func ne_1_uint16(x uint16) bool { return x != 1 }
+func lt_126_uint16(x uint16) bool { return x < 126 }
+func le_126_uint16(x uint16) bool { return x <= 126 }
+func gt_126_uint16(x uint16) bool { return x > 126 }
+func ge_126_uint16(x uint16) bool { return x >= 126 }
+func eq_126_uint16(x uint16) bool { return x == 126 }
+func ne_126_uint16(x uint16) bool { return x != 126 }
+func lt_127_uint16(x uint16) bool { return x < 127 }
+func le_127_uint16(x uint16) bool { return x <= 127 }
+func gt_127_uint16(x uint16) bool { return x > 127 }
+func ge_127_uint16(x uint16) bool { return x >= 127 }
+func eq_127_uint16(x uint16) bool { return x == 127 }
+func ne_127_uint16(x uint16) bool { return x != 127 }
+func lt_128_uint16(x uint16) bool { return x < 128 }
+func le_128_uint16(x uint16) bool { return x <= 128 }
+func gt_128_uint16(x uint16) bool { return x > 128 }
+func ge_128_uint16(x uint16) bool { return x >= 128 }
+func eq_128_uint16(x uint16) bool { return x == 128 }
+func ne_128_uint16(x uint16) bool { return x != 128 }
+func lt_254_uint16(x uint16) bool { return x < 254 }
+func le_254_uint16(x uint16) bool { return x <= 254 }
+func gt_254_uint16(x uint16) bool { return x > 254 }
+func ge_254_uint16(x uint16) bool { return x >= 254 }
+func eq_254_uint16(x uint16) bool { return x == 254 }
+func ne_254_uint16(x uint16) bool { return x != 254 }
+func lt_255_uint16(x uint16) bool { return x < 255 }
+func le_255_uint16(x uint16) bool { return x <= 255 }
+func gt_255_uint16(x uint16) bool { return x > 255 }
+func ge_255_uint16(x uint16) bool { return x >= 255 }
+func eq_255_uint16(x uint16) bool { return x == 255 }
+func ne_255_uint16(x uint16) bool { return x != 255 }
+func lt_256_uint16(x uint16) bool { return x < 256 }
+func le_256_uint16(x uint16) bool { return x <= 256 }
+func gt_256_uint16(x uint16) bool { return x > 256 }
+func ge_256_uint16(x uint16) bool { return x >= 256 }
+func eq_256_uint16(x uint16) bool { return x == 256 }
+func ne_256_uint16(x uint16) bool { return x != 256 }
+func lt_32766_uint16(x uint16) bool { return x < 32766 }
+func le_32766_uint16(x uint16) bool { return x <= 32766 }
+func gt_32766_uint16(x uint16) bool { return x > 32766 }
+func ge_32766_uint16(x uint16) bool { return x >= 32766 }
+func eq_32766_uint16(x uint16) bool { return x == 32766 }
+func ne_32766_uint16(x uint16) bool { return x != 32766 }
+func lt_32767_uint16(x uint16) bool { return x < 32767 }
+func le_32767_uint16(x uint16) bool { return x <= 32767 }
+func gt_32767_uint16(x uint16) bool { return x > 32767 }
+func ge_32767_uint16(x uint16) bool { return x >= 32767 }
+func eq_32767_uint16(x uint16) bool { return x == 32767 }
+func ne_32767_uint16(x uint16) bool { return x != 32767 }
+func lt_32768_uint16(x uint16) bool { return x < 32768 }
+func le_32768_uint16(x uint16) bool { return x <= 32768 }
+func gt_32768_uint16(x uint16) bool { return x > 32768 }
+func ge_32768_uint16(x uint16) bool { return x >= 32768 }
+func eq_32768_uint16(x uint16) bool { return x == 32768 }
+func ne_32768_uint16(x uint16) bool { return x != 32768 }
+func lt_65534_uint16(x uint16) bool { return x < 65534 }
+func le_65534_uint16(x uint16) bool { return x <= 65534 }
+func gt_65534_uint16(x uint16) bool { return x > 65534 }
+func ge_65534_uint16(x uint16) bool { return x >= 65534 }
+func eq_65534_uint16(x uint16) bool { return x == 65534 }
+func ne_65534_uint16(x uint16) bool { return x != 65534 }
+func lt_65535_uint16(x uint16) bool { return x < 65535 }
+func le_65535_uint16(x uint16) bool { return x <= 65535 }
+func gt_65535_uint16(x uint16) bool { return x > 65535 }
+func ge_65535_uint16(x uint16) bool { return x >= 65535 }
+func eq_65535_uint16(x uint16) bool { return x == 65535 }
+func ne_65535_uint16(x uint16) bool { return x != 65535 }
+
+var uint16_tests = []struct {
+ idx int // index of the constant used
+ exp result // expected results
+ fn func(uint16) bool
+}{
+ {idx: 0, exp: lt, fn: lt_0_uint16},
+ {idx: 0, exp: le, fn: le_0_uint16},
+ {idx: 0, exp: gt, fn: gt_0_uint16},
+ {idx: 0, exp: ge, fn: ge_0_uint16},
+ {idx: 0, exp: eq, fn: eq_0_uint16},
+ {idx: 0, exp: ne, fn: ne_0_uint16},
+ {idx: 1, exp: lt, fn: lt_1_uint16},
+ {idx: 1, exp: le, fn: le_1_uint16},
+ {idx: 1, exp: gt, fn: gt_1_uint16},
+ {idx: 1, exp: ge, fn: ge_1_uint16},
+ {idx: 1, exp: eq, fn: eq_1_uint16},
+ {idx: 1, exp: ne, fn: ne_1_uint16},
+ {idx: 2, exp: lt, fn: lt_126_uint16},
+ {idx: 2, exp: le, fn: le_126_uint16},
+ {idx: 2, exp: gt, fn: gt_126_uint16},
+ {idx: 2, exp: ge, fn: ge_126_uint16},
+ {idx: 2, exp: eq, fn: eq_126_uint16},
+ {idx: 2, exp: ne, fn: ne_126_uint16},
+ {idx: 3, exp: lt, fn: lt_127_uint16},
+ {idx: 3, exp: le, fn: le_127_uint16},
+ {idx: 3, exp: gt, fn: gt_127_uint16},
+ {idx: 3, exp: ge, fn: ge_127_uint16},
+ {idx: 3, exp: eq, fn: eq_127_uint16},
+ {idx: 3, exp: ne, fn: ne_127_uint16},
+ {idx: 4, exp: lt, fn: lt_128_uint16},
+ {idx: 4, exp: le, fn: le_128_uint16},
+ {idx: 4, exp: gt, fn: gt_128_uint16},
+ {idx: 4, exp: ge, fn: ge_128_uint16},
+ {idx: 4, exp: eq, fn: eq_128_uint16},
+ {idx: 4, exp: ne, fn: ne_128_uint16},
+ {idx: 5, exp: lt, fn: lt_254_uint16},
+ {idx: 5, exp: le, fn: le_254_uint16},
+ {idx: 5, exp: gt, fn: gt_254_uint16},
+ {idx: 5, exp: ge, fn: ge_254_uint16},
+ {idx: 5, exp: eq, fn: eq_254_uint16},
+ {idx: 5, exp: ne, fn: ne_254_uint16},
+ {idx: 6, exp: lt, fn: lt_255_uint16},
+ {idx: 6, exp: le, fn: le_255_uint16},
+ {idx: 6, exp: gt, fn: gt_255_uint16},
+ {idx: 6, exp: ge, fn: ge_255_uint16},
+ {idx: 6, exp: eq, fn: eq_255_uint16},
+ {idx: 6, exp: ne, fn: ne_255_uint16},
+ {idx: 7, exp: lt, fn: lt_256_uint16},
+ {idx: 7, exp: le, fn: le_256_uint16},
+ {idx: 7, exp: gt, fn: gt_256_uint16},
+ {idx: 7, exp: ge, fn: ge_256_uint16},
+ {idx: 7, exp: eq, fn: eq_256_uint16},
+ {idx: 7, exp: ne, fn: ne_256_uint16},
+ {idx: 8, exp: lt, fn: lt_32766_uint16},
+ {idx: 8, exp: le, fn: le_32766_uint16},
+ {idx: 8, exp: gt, fn: gt_32766_uint16},
+ {idx: 8, exp: ge, fn: ge_32766_uint16},
+ {idx: 8, exp: eq, fn: eq_32766_uint16},
+ {idx: 8, exp: ne, fn: ne_32766_uint16},
+ {idx: 9, exp: lt, fn: lt_32767_uint16},
+ {idx: 9, exp: le, fn: le_32767_uint16},
+ {idx: 9, exp: gt, fn: gt_32767_uint16},
+ {idx: 9, exp: ge, fn: ge_32767_uint16},
+ {idx: 9, exp: eq, fn: eq_32767_uint16},
+ {idx: 9, exp: ne, fn: ne_32767_uint16},
+ {idx: 10, exp: lt, fn: lt_32768_uint16},
+ {idx: 10, exp: le, fn: le_32768_uint16},
+ {idx: 10, exp: gt, fn: gt_32768_uint16},
+ {idx: 10, exp: ge, fn: ge_32768_uint16},
+ {idx: 10, exp: eq, fn: eq_32768_uint16},
+ {idx: 10, exp: ne, fn: ne_32768_uint16},
+ {idx: 11, exp: lt, fn: lt_65534_uint16},
+ {idx: 11, exp: le, fn: le_65534_uint16},
+ {idx: 11, exp: gt, fn: gt_65534_uint16},
+ {idx: 11, exp: ge, fn: ge_65534_uint16},
+ {idx: 11, exp: eq, fn: eq_65534_uint16},
+ {idx: 11, exp: ne, fn: ne_65534_uint16},
+ {idx: 12, exp: lt, fn: lt_65535_uint16},
+ {idx: 12, exp: le, fn: le_65535_uint16},
+ {idx: 12, exp: gt, fn: gt_65535_uint16},
+ {idx: 12, exp: ge, fn: ge_65535_uint16},
+ {idx: 12, exp: eq, fn: eq_65535_uint16},
+ {idx: 12, exp: ne, fn: ne_65535_uint16},
+}
+
+// uint8 tests
+var uint8_vals = []uint8{
+ 0,
+ 1,
+ 126,
+ 127,
+ 128,
+ 254,
+ 255,
+}
+
+func lt_0_uint8(x uint8) bool { return x < 0 }
+func le_0_uint8(x uint8) bool { return x <= 0 }
+func gt_0_uint8(x uint8) bool { return x > 0 }
+func ge_0_uint8(x uint8) bool { return x >= 0 }
+func eq_0_uint8(x uint8) bool { return x == 0 }
+func ne_0_uint8(x uint8) bool { return x != 0 }
+func lt_1_uint8(x uint8) bool { return x < 1 }
+func le_1_uint8(x uint8) bool { return x <= 1 }
+func gt_1_uint8(x uint8) bool { return x > 1 }
+func ge_1_uint8(x uint8) bool { return x >= 1 }
+func eq_1_uint8(x uint8) bool { return x == 1 }
+func ne_1_uint8(x uint8) bool { return x != 1 }
+func lt_126_uint8(x uint8) bool { return x < 126 }
+func le_126_uint8(x uint8) bool { return x <= 126 }
+func gt_126_uint8(x uint8) bool { return x > 126 }
+func ge_126_uint8(x uint8) bool { return x >= 126 }
+func eq_126_uint8(x uint8) bool { return x == 126 }
+func ne_126_uint8(x uint8) bool { return x != 126 }
+func lt_127_uint8(x uint8) bool { return x < 127 }
+func le_127_uint8(x uint8) bool { return x <= 127 }
+func gt_127_uint8(x uint8) bool { return x > 127 }
+func ge_127_uint8(x uint8) bool { return x >= 127 }
+func eq_127_uint8(x uint8) bool { return x == 127 }
+func ne_127_uint8(x uint8) bool { return x != 127 }
+func lt_128_uint8(x uint8) bool { return x < 128 }
+func le_128_uint8(x uint8) bool { return x <= 128 }
+func gt_128_uint8(x uint8) bool { return x > 128 }
+func ge_128_uint8(x uint8) bool { return x >= 128 }
+func eq_128_uint8(x uint8) bool { return x == 128 }
+func ne_128_uint8(x uint8) bool { return x != 128 }
+func lt_254_uint8(x uint8) bool { return x < 254 }
+func le_254_uint8(x uint8) bool { return x <= 254 }
+func gt_254_uint8(x uint8) bool { return x > 254 }
+func ge_254_uint8(x uint8) bool { return x >= 254 }
+func eq_254_uint8(x uint8) bool { return x == 254 }
+func ne_254_uint8(x uint8) bool { return x != 254 }
+func lt_255_uint8(x uint8) bool { return x < 255 }
+func le_255_uint8(x uint8) bool { return x <= 255 }
+func gt_255_uint8(x uint8) bool { return x > 255 }
+func ge_255_uint8(x uint8) bool { return x >= 255 }
+func eq_255_uint8(x uint8) bool { return x == 255 }
+func ne_255_uint8(x uint8) bool { return x != 255 }
+
+var uint8_tests = []struct {
+ idx int // index of the constant used
+ exp result // expected results
+ fn func(uint8) bool
+}{
+ {idx: 0, exp: lt, fn: lt_0_uint8},
+ {idx: 0, exp: le, fn: le_0_uint8},
+ {idx: 0, exp: gt, fn: gt_0_uint8},
+ {idx: 0, exp: ge, fn: ge_0_uint8},
+ {idx: 0, exp: eq, fn: eq_0_uint8},
+ {idx: 0, exp: ne, fn: ne_0_uint8},
+ {idx: 1, exp: lt, fn: lt_1_uint8},
+ {idx: 1, exp: le, fn: le_1_uint8},
+ {idx: 1, exp: gt, fn: gt_1_uint8},
+ {idx: 1, exp: ge, fn: ge_1_uint8},
+ {idx: 1, exp: eq, fn: eq_1_uint8},
+ {idx: 1, exp: ne, fn: ne_1_uint8},
+ {idx: 2, exp: lt, fn: lt_126_uint8},
+ {idx: 2, exp: le, fn: le_126_uint8},
+ {idx: 2, exp: gt, fn: gt_126_uint8},
+ {idx: 2, exp: ge, fn: ge_126_uint8},
+ {idx: 2, exp: eq, fn: eq_126_uint8},
+ {idx: 2, exp: ne, fn: ne_126_uint8},
+ {idx: 3, exp: lt, fn: lt_127_uint8},
+ {idx: 3, exp: le, fn: le_127_uint8},
+ {idx: 3, exp: gt, fn: gt_127_uint8},
+ {idx: 3, exp: ge, fn: ge_127_uint8},
+ {idx: 3, exp: eq, fn: eq_127_uint8},
+ {idx: 3, exp: ne, fn: ne_127_uint8},
+ {idx: 4, exp: lt, fn: lt_128_uint8},
+ {idx: 4, exp: le, fn: le_128_uint8},
+ {idx: 4, exp: gt, fn: gt_128_uint8},
+ {idx: 4, exp: ge, fn: ge_128_uint8},
+ {idx: 4, exp: eq, fn: eq_128_uint8},
+ {idx: 4, exp: ne, fn: ne_128_uint8},
+ {idx: 5, exp: lt, fn: lt_254_uint8},
+ {idx: 5, exp: le, fn: le_254_uint8},
+ {idx: 5, exp: gt, fn: gt_254_uint8},
+ {idx: 5, exp: ge, fn: ge_254_uint8},
+ {idx: 5, exp: eq, fn: eq_254_uint8},
+ {idx: 5, exp: ne, fn: ne_254_uint8},
+ {idx: 6, exp: lt, fn: lt_255_uint8},
+ {idx: 6, exp: le, fn: le_255_uint8},
+ {idx: 6, exp: gt, fn: gt_255_uint8},
+ {idx: 6, exp: ge, fn: ge_255_uint8},
+ {idx: 6, exp: eq, fn: eq_255_uint8},
+ {idx: 6, exp: ne, fn: ne_255_uint8},
+}
+
+// int64 tests
+var int64_vals = []int64{
+ -9223372036854775808,
+ -9223372036854775807,
+ -2147483649,
+ -2147483648,
+ -2147483647,
+ -32769,
+ -32768,
+ -32767,
+ -129,
+ -128,
+ -127,
+ -1,
+ 0,
+ 1,
+ 126,
+ 127,
+ 128,
+ 254,
+ 255,
+ 256,
+ 32766,
+ 32767,
+ 32768,
+ 65534,
+ 65535,
+ 65536,
+ 2147483646,
+ 2147483647,
+ 2147483648,
+ 4278190080,
+ 4294967294,
+ 4294967295,
+ 4294967296,
+ 1095216660480,
+ 9223372036854775806,
+ 9223372036854775807,
+}
+
+func lt_neg9223372036854775808_int64(x int64) bool { return x < -9223372036854775808 }
+func le_neg9223372036854775808_int64(x int64) bool { return x <= -9223372036854775808 }
+func gt_neg9223372036854775808_int64(x int64) bool { return x > -9223372036854775808 }
+func ge_neg9223372036854775808_int64(x int64) bool { return x >= -9223372036854775808 }
+func eq_neg9223372036854775808_int64(x int64) bool { return x == -9223372036854775808 }
+func ne_neg9223372036854775808_int64(x int64) bool { return x != -9223372036854775808 }
+func lt_neg9223372036854775807_int64(x int64) bool { return x < -9223372036854775807 }
+func le_neg9223372036854775807_int64(x int64) bool { return x <= -9223372036854775807 }
+func gt_neg9223372036854775807_int64(x int64) bool { return x > -9223372036854775807 }
+func ge_neg9223372036854775807_int64(x int64) bool { return x >= -9223372036854775807 }
+func eq_neg9223372036854775807_int64(x int64) bool { return x == -9223372036854775807 }
+func ne_neg9223372036854775807_int64(x int64) bool { return x != -9223372036854775807 }
+func lt_neg2147483649_int64(x int64) bool { return x < -2147483649 }
+func le_neg2147483649_int64(x int64) bool { return x <= -2147483649 }
+func gt_neg2147483649_int64(x int64) bool { return x > -2147483649 }
+func ge_neg2147483649_int64(x int64) bool { return x >= -2147483649 }
+func eq_neg2147483649_int64(x int64) bool { return x == -2147483649 }
+func ne_neg2147483649_int64(x int64) bool { return x != -2147483649 }
+func lt_neg2147483648_int64(x int64) bool { return x < -2147483648 }
+func le_neg2147483648_int64(x int64) bool { return x <= -2147483648 }
+func gt_neg2147483648_int64(x int64) bool { return x > -2147483648 }
+func ge_neg2147483648_int64(x int64) bool { return x >= -2147483648 }
+func eq_neg2147483648_int64(x int64) bool { return x == -2147483648 }
+func ne_neg2147483648_int64(x int64) bool { return x != -2147483648 }
+func lt_neg2147483647_int64(x int64) bool { return x < -2147483647 }
+func le_neg2147483647_int64(x int64) bool { return x <= -2147483647 }
+func gt_neg2147483647_int64(x int64) bool { return x > -2147483647 }
+func ge_neg2147483647_int64(x int64) bool { return x >= -2147483647 }
+func eq_neg2147483647_int64(x int64) bool { return x == -2147483647 }
+func ne_neg2147483647_int64(x int64) bool { return x != -2147483647 }
+func lt_neg32769_int64(x int64) bool { return x < -32769 }
+func le_neg32769_int64(x int64) bool { return x <= -32769 }
+func gt_neg32769_int64(x int64) bool { return x > -32769 }
+func ge_neg32769_int64(x int64) bool { return x >= -32769 }
+func eq_neg32769_int64(x int64) bool { return x == -32769 }
+func ne_neg32769_int64(x int64) bool { return x != -32769 }
+func lt_neg32768_int64(x int64) bool { return x < -32768 }
+func le_neg32768_int64(x int64) bool { return x <= -32768 }
+func gt_neg32768_int64(x int64) bool { return x > -32768 }
+func ge_neg32768_int64(x int64) bool { return x >= -32768 }
+func eq_neg32768_int64(x int64) bool { return x == -32768 }
+func ne_neg32768_int64(x int64) bool { return x != -32768 }
+func lt_neg32767_int64(x int64) bool { return x < -32767 }
+func le_neg32767_int64(x int64) bool { return x <= -32767 }
+func gt_neg32767_int64(x int64) bool { return x > -32767 }
+func ge_neg32767_int64(x int64) bool { return x >= -32767 }
+func eq_neg32767_int64(x int64) bool { return x == -32767 }
+func ne_neg32767_int64(x int64) bool { return x != -32767 }
+func lt_neg129_int64(x int64) bool { return x < -129 }
+func le_neg129_int64(x int64) bool { return x <= -129 }
+func gt_neg129_int64(x int64) bool { return x > -129 }
+func ge_neg129_int64(x int64) bool { return x >= -129 }
+func eq_neg129_int64(x int64) bool { return x == -129 }
+func ne_neg129_int64(x int64) bool { return x != -129 }
+func lt_neg128_int64(x int64) bool { return x < -128 }
+func le_neg128_int64(x int64) bool { return x <= -128 }
+func gt_neg128_int64(x int64) bool { return x > -128 }
+func ge_neg128_int64(x int64) bool { return x >= -128 }
+func eq_neg128_int64(x int64) bool { return x == -128 }
+func ne_neg128_int64(x int64) bool { return x != -128 }
+func lt_neg127_int64(x int64) bool { return x < -127 }
+func le_neg127_int64(x int64) bool { return x <= -127 }
+func gt_neg127_int64(x int64) bool { return x > -127 }
+func ge_neg127_int64(x int64) bool { return x >= -127 }
+func eq_neg127_int64(x int64) bool { return x == -127 }
+func ne_neg127_int64(x int64) bool { return x != -127 }
+func lt_neg1_int64(x int64) bool { return x < -1 }
+func le_neg1_int64(x int64) bool { return x <= -1 }
+func gt_neg1_int64(x int64) bool { return x > -1 }
+func ge_neg1_int64(x int64) bool { return x >= -1 }
+func eq_neg1_int64(x int64) bool { return x == -1 }
+func ne_neg1_int64(x int64) bool { return x != -1 }
+func lt_0_int64(x int64) bool { return x < 0 }
+func le_0_int64(x int64) bool { return x <= 0 }
+func gt_0_int64(x int64) bool { return x > 0 }
+func ge_0_int64(x int64) bool { return x >= 0 }
+func eq_0_int64(x int64) bool { return x == 0 }
+func ne_0_int64(x int64) bool { return x != 0 }
+func lt_1_int64(x int64) bool { return x < 1 }
+func le_1_int64(x int64) bool { return x <= 1 }
+func gt_1_int64(x int64) bool { return x > 1 }
+func ge_1_int64(x int64) bool { return x >= 1 }
+func eq_1_int64(x int64) bool { return x == 1 }
+func ne_1_int64(x int64) bool { return x != 1 }
+func lt_126_int64(x int64) bool { return x < 126 }
+func le_126_int64(x int64) bool { return x <= 126 }
+func gt_126_int64(x int64) bool { return x > 126 }
+func ge_126_int64(x int64) bool { return x >= 126 }
+func eq_126_int64(x int64) bool { return x == 126 }
+func ne_126_int64(x int64) bool { return x != 126 }
+func lt_127_int64(x int64) bool { return x < 127 }
+func le_127_int64(x int64) bool { return x <= 127 }
+func gt_127_int64(x int64) bool { return x > 127 }
+func ge_127_int64(x int64) bool { return x >= 127 }
+func eq_127_int64(x int64) bool { return x == 127 }
+func ne_127_int64(x int64) bool { return x != 127 }
+func lt_128_int64(x int64) bool { return x < 128 }
+func le_128_int64(x int64) bool { return x <= 128 }
+func gt_128_int64(x int64) bool { return x > 128 }
+func ge_128_int64(x int64) bool { return x >= 128 }
+func eq_128_int64(x int64) bool { return x == 128 }
+func ne_128_int64(x int64) bool { return x != 128 }
+func lt_254_int64(x int64) bool { return x < 254 }
+func le_254_int64(x int64) bool { return x <= 254 }
+func gt_254_int64(x int64) bool { return x > 254 }
+func ge_254_int64(x int64) bool { return x >= 254 }
+func eq_254_int64(x int64) bool { return x == 254 }
+func ne_254_int64(x int64) bool { return x != 254 }
+func lt_255_int64(x int64) bool { return x < 255 }
+func le_255_int64(x int64) bool { return x <= 255 }
+func gt_255_int64(x int64) bool { return x > 255 }
+func ge_255_int64(x int64) bool { return x >= 255 }
+func eq_255_int64(x int64) bool { return x == 255 }
+func ne_255_int64(x int64) bool { return x != 255 }
+func lt_256_int64(x int64) bool { return x < 256 }
+func le_256_int64(x int64) bool { return x <= 256 }
+func gt_256_int64(x int64) bool { return x > 256 }
+func ge_256_int64(x int64) bool { return x >= 256 }
+func eq_256_int64(x int64) bool { return x == 256 }
+func ne_256_int64(x int64) bool { return x != 256 }
+func lt_32766_int64(x int64) bool { return x < 32766 }
+func le_32766_int64(x int64) bool { return x <= 32766 }
+func gt_32766_int64(x int64) bool { return x > 32766 }
+func ge_32766_int64(x int64) bool { return x >= 32766 }
+func eq_32766_int64(x int64) bool { return x == 32766 }
+func ne_32766_int64(x int64) bool { return x != 32766 }
+func lt_32767_int64(x int64) bool { return x < 32767 }
+func le_32767_int64(x int64) bool { return x <= 32767 }
+func gt_32767_int64(x int64) bool { return x > 32767 }
+func ge_32767_int64(x int64) bool { return x >= 32767 }
+func eq_32767_int64(x int64) bool { return x == 32767 }
+func ne_32767_int64(x int64) bool { return x != 32767 }
+func lt_32768_int64(x int64) bool { return x < 32768 }
+func le_32768_int64(x int64) bool { return x <= 32768 }
+func gt_32768_int64(x int64) bool { return x > 32768 }
+func ge_32768_int64(x int64) bool { return x >= 32768 }
+func eq_32768_int64(x int64) bool { return x == 32768 }
+func ne_32768_int64(x int64) bool { return x != 32768 }
+func lt_65534_int64(x int64) bool { return x < 65534 }
+func le_65534_int64(x int64) bool { return x <= 65534 }
+func gt_65534_int64(x int64) bool { return x > 65534 }
+func ge_65534_int64(x int64) bool { return x >= 65534 }
+func eq_65534_int64(x int64) bool { return x == 65534 }
+func ne_65534_int64(x int64) bool { return x != 65534 }
+func lt_65535_int64(x int64) bool { return x < 65535 }
+func le_65535_int64(x int64) bool { return x <= 65535 }
+func gt_65535_int64(x int64) bool { return x > 65535 }
+func ge_65535_int64(x int64) bool { return x >= 65535 }
+func eq_65535_int64(x int64) bool { return x == 65535 }
+func ne_65535_int64(x int64) bool { return x != 65535 }
+func lt_65536_int64(x int64) bool { return x < 65536 }
+func le_65536_int64(x int64) bool { return x <= 65536 }
+func gt_65536_int64(x int64) bool { return x > 65536 }
+func ge_65536_int64(x int64) bool { return x >= 65536 }
+func eq_65536_int64(x int64) bool { return x == 65536 }
+func ne_65536_int64(x int64) bool { return x != 65536 }
+func lt_2147483646_int64(x int64) bool { return x < 2147483646 }
+func le_2147483646_int64(x int64) bool { return x <= 2147483646 }
+func gt_2147483646_int64(x int64) bool { return x > 2147483646 }
+func ge_2147483646_int64(x int64) bool { return x >= 2147483646 }
+func eq_2147483646_int64(x int64) bool { return x == 2147483646 }
+func ne_2147483646_int64(x int64) bool { return x != 2147483646 }
+func lt_2147483647_int64(x int64) bool { return x < 2147483647 }
+func le_2147483647_int64(x int64) bool { return x <= 2147483647 }
+func gt_2147483647_int64(x int64) bool { return x > 2147483647 }
+func ge_2147483647_int64(x int64) bool { return x >= 2147483647 }
+func eq_2147483647_int64(x int64) bool { return x == 2147483647 }
+func ne_2147483647_int64(x int64) bool { return x != 2147483647 }
+func lt_2147483648_int64(x int64) bool { return x < 2147483648 }
+func le_2147483648_int64(x int64) bool { return x <= 2147483648 }
+func gt_2147483648_int64(x int64) bool { return x > 2147483648 }
+func ge_2147483648_int64(x int64) bool { return x >= 2147483648 }
+func eq_2147483648_int64(x int64) bool { return x == 2147483648 }
+func ne_2147483648_int64(x int64) bool { return x != 2147483648 }
+func lt_4278190080_int64(x int64) bool { return x < 4278190080 }
+func le_4278190080_int64(x int64) bool { return x <= 4278190080 }
+func gt_4278190080_int64(x int64) bool { return x > 4278190080 }
+func ge_4278190080_int64(x int64) bool { return x >= 4278190080 }
+func eq_4278190080_int64(x int64) bool { return x == 4278190080 }
+func ne_4278190080_int64(x int64) bool { return x != 4278190080 }
+func lt_4294967294_int64(x int64) bool { return x < 4294967294 }
+func le_4294967294_int64(x int64) bool { return x <= 4294967294 }
+func gt_4294967294_int64(x int64) bool { return x > 4294967294 }
+func ge_4294967294_int64(x int64) bool { return x >= 4294967294 }
+func eq_4294967294_int64(x int64) bool { return x == 4294967294 }
+func ne_4294967294_int64(x int64) bool { return x != 4294967294 }
+func lt_4294967295_int64(x int64) bool { return x < 4294967295 }
+func le_4294967295_int64(x int64) bool { return x <= 4294967295 }
+func gt_4294967295_int64(x int64) bool { return x > 4294967295 }
+func ge_4294967295_int64(x int64) bool { return x >= 4294967295 }
+func eq_4294967295_int64(x int64) bool { return x == 4294967295 }
+func ne_4294967295_int64(x int64) bool { return x != 4294967295 }
+func lt_4294967296_int64(x int64) bool { return x < 4294967296 }
+func le_4294967296_int64(x int64) bool { return x <= 4294967296 }
+func gt_4294967296_int64(x int64) bool { return x > 4294967296 }
+func ge_4294967296_int64(x int64) bool { return x >= 4294967296 }
+func eq_4294967296_int64(x int64) bool { return x == 4294967296 }
+func ne_4294967296_int64(x int64) bool { return x != 4294967296 }
+func lt_1095216660480_int64(x int64) bool { return x < 1095216660480 }
+func le_1095216660480_int64(x int64) bool { return x <= 1095216660480 }
+func gt_1095216660480_int64(x int64) bool { return x > 1095216660480 }
+func ge_1095216660480_int64(x int64) bool { return x >= 1095216660480 }
+func eq_1095216660480_int64(x int64) bool { return x == 1095216660480 }
+func ne_1095216660480_int64(x int64) bool { return x != 1095216660480 }
+func lt_9223372036854775806_int64(x int64) bool { return x < 9223372036854775806 }
+func le_9223372036854775806_int64(x int64) bool { return x <= 9223372036854775806 }
+func gt_9223372036854775806_int64(x int64) bool { return x > 9223372036854775806 }
+func ge_9223372036854775806_int64(x int64) bool { return x >= 9223372036854775806 }
+func eq_9223372036854775806_int64(x int64) bool { return x == 9223372036854775806 }
+func ne_9223372036854775806_int64(x int64) bool { return x != 9223372036854775806 }
+func lt_9223372036854775807_int64(x int64) bool { return x < 9223372036854775807 }
+func le_9223372036854775807_int64(x int64) bool { return x <= 9223372036854775807 }
+func gt_9223372036854775807_int64(x int64) bool { return x > 9223372036854775807 }
+func ge_9223372036854775807_int64(x int64) bool { return x >= 9223372036854775807 }
+func eq_9223372036854775807_int64(x int64) bool { return x == 9223372036854775807 }
+func ne_9223372036854775807_int64(x int64) bool { return x != 9223372036854775807 }
+
+var int64_tests = []struct {
+ idx int // index of the constant used
+ exp result // expected results
+ fn func(int64) bool
+}{
+ {idx: 0, exp: lt, fn: lt_neg9223372036854775808_int64},
+ {idx: 0, exp: le, fn: le_neg9223372036854775808_int64},
+ {idx: 0, exp: gt, fn: gt_neg9223372036854775808_int64},
+ {idx: 0, exp: ge, fn: ge_neg9223372036854775808_int64},
+ {idx: 0, exp: eq, fn: eq_neg9223372036854775808_int64},
+ {idx: 0, exp: ne, fn: ne_neg9223372036854775808_int64},
+ {idx: 1, exp: lt, fn: lt_neg9223372036854775807_int64},
+ {idx: 1, exp: le, fn: le_neg9223372036854775807_int64},
+ {idx: 1, exp: gt, fn: gt_neg9223372036854775807_int64},
+ {idx: 1, exp: ge, fn: ge_neg9223372036854775807_int64},
+ {idx: 1, exp: eq, fn: eq_neg9223372036854775807_int64},
+ {idx: 1, exp: ne, fn: ne_neg9223372036854775807_int64},
+ {idx: 2, exp: lt, fn: lt_neg2147483649_int64},
+ {idx: 2, exp: le, fn: le_neg2147483649_int64},
+ {idx: 2, exp: gt, fn: gt_neg2147483649_int64},
+ {idx: 2, exp: ge, fn: ge_neg2147483649_int64},
+ {idx: 2, exp: eq, fn: eq_neg2147483649_int64},
+ {idx: 2, exp: ne, fn: ne_neg2147483649_int64},
+ {idx: 3, exp: lt, fn: lt_neg2147483648_int64},
+ {idx: 3, exp: le, fn: le_neg2147483648_int64},
+ {idx: 3, exp: gt, fn: gt_neg2147483648_int64},
+ {idx: 3, exp: ge, fn: ge_neg2147483648_int64},
+ {idx: 3, exp: eq, fn: eq_neg2147483648_int64},
+ {idx: 3, exp: ne, fn: ne_neg2147483648_int64},
+ {idx: 4, exp: lt, fn: lt_neg2147483647_int64},
+ {idx: 4, exp: le, fn: le_neg2147483647_int64},
+ {idx: 4, exp: gt, fn: gt_neg2147483647_int64},
+ {idx: 4, exp: ge, fn: ge_neg2147483647_int64},
+ {idx: 4, exp: eq, fn: eq_neg2147483647_int64},
+ {idx: 4, exp: ne, fn: ne_neg2147483647_int64},
+ {idx: 5, exp: lt, fn: lt_neg32769_int64},
+ {idx: 5, exp: le, fn: le_neg32769_int64},
+ {idx: 5, exp: gt, fn: gt_neg32769_int64},
+ {idx: 5, exp: ge, fn: ge_neg32769_int64},
+ {idx: 5, exp: eq, fn: eq_neg32769_int64},
+ {idx: 5, exp: ne, fn: ne_neg32769_int64},
+ {idx: 6, exp: lt, fn: lt_neg32768_int64},
+ {idx: 6, exp: le, fn: le_neg32768_int64},
+ {idx: 6, exp: gt, fn: gt_neg32768_int64},
+ {idx: 6, exp: ge, fn: ge_neg32768_int64},
+ {idx: 6, exp: eq, fn: eq_neg32768_int64},
+ {idx: 6, exp: ne, fn: ne_neg32768_int64},
+ {idx: 7, exp: lt, fn: lt_neg32767_int64},
+ {idx: 7, exp: le, fn: le_neg32767_int64},
+ {idx: 7, exp: gt, fn: gt_neg32767_int64},
+ {idx: 7, exp: ge, fn: ge_neg32767_int64},
+ {idx: 7, exp: eq, fn: eq_neg32767_int64},
+ {idx: 7, exp: ne, fn: ne_neg32767_int64},
+ {idx: 8, exp: lt, fn: lt_neg129_int64},
+ {idx: 8, exp: le, fn: le_neg129_int64},
+ {idx: 8, exp: gt, fn: gt_neg129_int64},
+ {idx: 8, exp: ge, fn: ge_neg129_int64},
+ {idx: 8, exp: eq, fn: eq_neg129_int64},
+ {idx: 8, exp: ne, fn: ne_neg129_int64},
+ {idx: 9, exp: lt, fn: lt_neg128_int64},
+ {idx: 9, exp: le, fn: le_neg128_int64},
+ {idx: 9, exp: gt, fn: gt_neg128_int64},
+ {idx: 9, exp: ge, fn: ge_neg128_int64},
+ {idx: 9, exp: eq, fn: eq_neg128_int64},
+ {idx: 9, exp: ne, fn: ne_neg128_int64},
+ {idx: 10, exp: lt, fn: lt_neg127_int64},
+ {idx: 10, exp: le, fn: le_neg127_int64},
+ {idx: 10, exp: gt, fn: gt_neg127_int64},
+ {idx: 10, exp: ge, fn: ge_neg127_int64},
+ {idx: 10, exp: eq, fn: eq_neg127_int64},
+ {idx: 10, exp: ne, fn: ne_neg127_int64},
+ {idx: 11, exp: lt, fn: lt_neg1_int64},
+ {idx: 11, exp: le, fn: le_neg1_int64},
+ {idx: 11, exp: gt, fn: gt_neg1_int64},
+ {idx: 11, exp: ge, fn: ge_neg1_int64},
+ {idx: 11, exp: eq, fn: eq_neg1_int64},
+ {idx: 11, exp: ne, fn: ne_neg1_int64},
+ {idx: 12, exp: lt, fn: lt_0_int64},
+ {idx: 12, exp: le, fn: le_0_int64},
+ {idx: 12, exp: gt, fn: gt_0_int64},
+ {idx: 12, exp: ge, fn: ge_0_int64},
+ {idx: 12, exp: eq, fn: eq_0_int64},
+ {idx: 12, exp: ne, fn: ne_0_int64},
+ {idx: 13, exp: lt, fn: lt_1_int64},
+ {idx: 13, exp: le, fn: le_1_int64},
+ {idx: 13, exp: gt, fn: gt_1_int64},
+ {idx: 13, exp: ge, fn: ge_1_int64},
+ {idx: 13, exp: eq, fn: eq_1_int64},
+ {idx: 13, exp: ne, fn: ne_1_int64},
+ {idx: 14, exp: lt, fn: lt_126_int64},
+ {idx: 14, exp: le, fn: le_126_int64},
+ {idx: 14, exp: gt, fn: gt_126_int64},
+ {idx: 14, exp: ge, fn: ge_126_int64},
+ {idx: 14, exp: eq, fn: eq_126_int64},
+ {idx: 14, exp: ne, fn: ne_126_int64},
+ {idx: 15, exp: lt, fn: lt_127_int64},
+ {idx: 15, exp: le, fn: le_127_int64},
+ {idx: 15, exp: gt, fn: gt_127_int64},
+ {idx: 15, exp: ge, fn: ge_127_int64},
+ {idx: 15, exp: eq, fn: eq_127_int64},
+ {idx: 15, exp: ne, fn: ne_127_int64},
+ {idx: 16, exp: lt, fn: lt_128_int64},
+ {idx: 16, exp: le, fn: le_128_int64},
+ {idx: 16, exp: gt, fn: gt_128_int64},
+ {idx: 16, exp: ge, fn: ge_128_int64},
+ {idx: 16, exp: eq, fn: eq_128_int64},
+ {idx: 16, exp: ne, fn: ne_128_int64},
+ {idx: 17, exp: lt, fn: lt_254_int64},
+ {idx: 17, exp: le, fn: le_254_int64},
+ {idx: 17, exp: gt, fn: gt_254_int64},
+ {idx: 17, exp: ge, fn: ge_254_int64},
+ {idx: 17, exp: eq, fn: eq_254_int64},
+ {idx: 17, exp: ne, fn: ne_254_int64},
+ {idx: 18, exp: lt, fn: lt_255_int64},
+ {idx: 18, exp: le, fn: le_255_int64},
+ {idx: 18, exp: gt, fn: gt_255_int64},
+ {idx: 18, exp: ge, fn: ge_255_int64},
+ {idx: 18, exp: eq, fn: eq_255_int64},
+ {idx: 18, exp: ne, fn: ne_255_int64},
+ {idx: 19, exp: lt, fn: lt_256_int64},
+ {idx: 19, exp: le, fn: le_256_int64},
+ {idx: 19, exp: gt, fn: gt_256_int64},
+ {idx: 19, exp: ge, fn: ge_256_int64},
+ {idx: 19, exp: eq, fn: eq_256_int64},
+ {idx: 19, exp: ne, fn: ne_256_int64},
+ {idx: 20, exp: lt, fn: lt_32766_int64},
+ {idx: 20, exp: le, fn: le_32766_int64},
+ {idx: 20, exp: gt, fn: gt_32766_int64},
+ {idx: 20, exp: ge, fn: ge_32766_int64},
+ {idx: 20, exp: eq, fn: eq_32766_int64},
+ {idx: 20, exp: ne, fn: ne_32766_int64},
+ {idx: 21, exp: lt, fn: lt_32767_int64},
+ {idx: 21, exp: le, fn: le_32767_int64},
+ {idx: 21, exp: gt, fn: gt_32767_int64},
+ {idx: 21, exp: ge, fn: ge_32767_int64},
+ {idx: 21, exp: eq, fn: eq_32767_int64},
+ {idx: 21, exp: ne, fn: ne_32767_int64},
+ {idx: 22, exp: lt, fn: lt_32768_int64},
+ {idx: 22, exp: le, fn: le_32768_int64},
+ {idx: 22, exp: gt, fn: gt_32768_int64},
+ {idx: 22, exp: ge, fn: ge_32768_int64},
+ {idx: 22, exp: eq, fn: eq_32768_int64},
+ {idx: 22, exp: ne, fn: ne_32768_int64},
+ {idx: 23, exp: lt, fn: lt_65534_int64},
+ {idx: 23, exp: le, fn: le_65534_int64},
+ {idx: 23, exp: gt, fn: gt_65534_int64},
+ {idx: 23, exp: ge, fn: ge_65534_int64},
+ {idx: 23, exp: eq, fn: eq_65534_int64},
+ {idx: 23, exp: ne, fn: ne_65534_int64},
+ {idx: 24, exp: lt, fn: lt_65535_int64},
+ {idx: 24, exp: le, fn: le_65535_int64},
+ {idx: 24, exp: gt, fn: gt_65535_int64},
+ {idx: 24, exp: ge, fn: ge_65535_int64},
+ {idx: 24, exp: eq, fn: eq_65535_int64},
+ {idx: 24, exp: ne, fn: ne_65535_int64},
+ {idx: 25, exp: lt, fn: lt_65536_int64},
+ {idx: 25, exp: le, fn: le_65536_int64},
+ {idx: 25, exp: gt, fn: gt_65536_int64},
+ {idx: 25, exp: ge, fn: ge_65536_int64},
+ {idx: 25, exp: eq, fn: eq_65536_int64},
+ {idx: 25, exp: ne, fn: ne_65536_int64},
+ {idx: 26, exp: lt, fn: lt_2147483646_int64},
+ {idx: 26, exp: le, fn: le_2147483646_int64},
+ {idx: 26, exp: gt, fn: gt_2147483646_int64},
+ {idx: 26, exp: ge, fn: ge_2147483646_int64},
+ {idx: 26, exp: eq, fn: eq_2147483646_int64},
+ {idx: 26, exp: ne, fn: ne_2147483646_int64},
+ {idx: 27, exp: lt, fn: lt_2147483647_int64},
+ {idx: 27, exp: le, fn: le_2147483647_int64},
+ {idx: 27, exp: gt, fn: gt_2147483647_int64},
+ {idx: 27, exp: ge, fn: ge_2147483647_int64},
+ {idx: 27, exp: eq, fn: eq_2147483647_int64},
+ {idx: 27, exp: ne, fn: ne_2147483647_int64},
+ {idx: 28, exp: lt, fn: lt_2147483648_int64},
+ {idx: 28, exp: le, fn: le_2147483648_int64},
+ {idx: 28, exp: gt, fn: gt_2147483648_int64},
+ {idx: 28, exp: ge, fn: ge_2147483648_int64},
+ {idx: 28, exp: eq, fn: eq_2147483648_int64},
+ {idx: 28, exp: ne, fn: ne_2147483648_int64},
+ {idx: 29, exp: lt, fn: lt_4278190080_int64},
+ {idx: 29, exp: le, fn: le_4278190080_int64},
+ {idx: 29, exp: gt, fn: gt_4278190080_int64},
+ {idx: 29, exp: ge, fn: ge_4278190080_int64},
+ {idx: 29, exp: eq, fn: eq_4278190080_int64},
+ {idx: 29, exp: ne, fn: ne_4278190080_int64},
+ {idx: 30, exp: lt, fn: lt_4294967294_int64},
+ {idx: 30, exp: le, fn: le_4294967294_int64},
+ {idx: 30, exp: gt, fn: gt_4294967294_int64},
+ {idx: 30, exp: ge, fn: ge_4294967294_int64},
+ {idx: 30, exp: eq, fn: eq_4294967294_int64},
+ {idx: 30, exp: ne, fn: ne_4294967294_int64},
+ {idx: 31, exp: lt, fn: lt_4294967295_int64},
+ {idx: 31, exp: le, fn: le_4294967295_int64},
+ {idx: 31, exp: gt, fn: gt_4294967295_int64},
+ {idx: 31, exp: ge, fn: ge_4294967295_int64},
+ {idx: 31, exp: eq, fn: eq_4294967295_int64},
+ {idx: 31, exp: ne, fn: ne_4294967295_int64},
+ {idx: 32, exp: lt, fn: lt_4294967296_int64},
+ {idx: 32, exp: le, fn: le_4294967296_int64},
+ {idx: 32, exp: gt, fn: gt_4294967296_int64},
+ {idx: 32, exp: ge, fn: ge_4294967296_int64},
+ {idx: 32, exp: eq, fn: eq_4294967296_int64},
+ {idx: 32, exp: ne, fn: ne_4294967296_int64},
+ {idx: 33, exp: lt, fn: lt_1095216660480_int64},
+ {idx: 33, exp: le, fn: le_1095216660480_int64},
+ {idx: 33, exp: gt, fn: gt_1095216660480_int64},
+ {idx: 33, exp: ge, fn: ge_1095216660480_int64},
+ {idx: 33, exp: eq, fn: eq_1095216660480_int64},
+ {idx: 33, exp: ne, fn: ne_1095216660480_int64},
+ {idx: 34, exp: lt, fn: lt_9223372036854775806_int64},
+ {idx: 34, exp: le, fn: le_9223372036854775806_int64},
+ {idx: 34, exp: gt, fn: gt_9223372036854775806_int64},
+ {idx: 34, exp: ge, fn: ge_9223372036854775806_int64},
+ {idx: 34, exp: eq, fn: eq_9223372036854775806_int64},
+ {idx: 34, exp: ne, fn: ne_9223372036854775806_int64},
+ {idx: 35, exp: lt, fn: lt_9223372036854775807_int64},
+ {idx: 35, exp: le, fn: le_9223372036854775807_int64},
+ {idx: 35, exp: gt, fn: gt_9223372036854775807_int64},
+ {idx: 35, exp: ge, fn: ge_9223372036854775807_int64},
+ {idx: 35, exp: eq, fn: eq_9223372036854775807_int64},
+ {idx: 35, exp: ne, fn: ne_9223372036854775807_int64},
+}
+
+// int32 tests
+var int32_vals = []int32{
+ -2147483648,
+ -2147483647,
+ -32769,
+ -32768,
+ -32767,
+ -129,
+ -128,
+ -127,
+ -1,
+ 0,
+ 1,
+ 126,
+ 127,
+ 128,
+ 254,
+ 255,
+ 256,
+ 32766,
+ 32767,
+ 32768,
+ 65534,
+ 65535,
+ 65536,
+ 2147483646,
+ 2147483647,
+}
+
+func lt_neg2147483648_int32(x int32) bool { return x < -2147483648 }
+func le_neg2147483648_int32(x int32) bool { return x <= -2147483648 }
+func gt_neg2147483648_int32(x int32) bool { return x > -2147483648 }
+func ge_neg2147483648_int32(x int32) bool { return x >= -2147483648 }
+func eq_neg2147483648_int32(x int32) bool { return x == -2147483648 }
+func ne_neg2147483648_int32(x int32) bool { return x != -2147483648 }
+func lt_neg2147483647_int32(x int32) bool { return x < -2147483647 }
+func le_neg2147483647_int32(x int32) bool { return x <= -2147483647 }
+func gt_neg2147483647_int32(x int32) bool { return x > -2147483647 }
+func ge_neg2147483647_int32(x int32) bool { return x >= -2147483647 }
+func eq_neg2147483647_int32(x int32) bool { return x == -2147483647 }
+func ne_neg2147483647_int32(x int32) bool { return x != -2147483647 }
+func lt_neg32769_int32(x int32) bool { return x < -32769 }
+func le_neg32769_int32(x int32) bool { return x <= -32769 }
+func gt_neg32769_int32(x int32) bool { return x > -32769 }
+func ge_neg32769_int32(x int32) bool { return x >= -32769 }
+func eq_neg32769_int32(x int32) bool { return x == -32769 }
+func ne_neg32769_int32(x int32) bool { return x != -32769 }
+func lt_neg32768_int32(x int32) bool { return x < -32768 }
+func le_neg32768_int32(x int32) bool { return x <= -32768 }
+func gt_neg32768_int32(x int32) bool { return x > -32768 }
+func ge_neg32768_int32(x int32) bool { return x >= -32768 }
+func eq_neg32768_int32(x int32) bool { return x == -32768 }
+func ne_neg32768_int32(x int32) bool { return x != -32768 }
+func lt_neg32767_int32(x int32) bool { return x < -32767 }
+func le_neg32767_int32(x int32) bool { return x <= -32767 }
+func gt_neg32767_int32(x int32) bool { return x > -32767 }
+func ge_neg32767_int32(x int32) bool { return x >= -32767 }
+func eq_neg32767_int32(x int32) bool { return x == -32767 }
+func ne_neg32767_int32(x int32) bool { return x != -32767 }
+func lt_neg129_int32(x int32) bool { return x < -129 }
+func le_neg129_int32(x int32) bool { return x <= -129 }
+func gt_neg129_int32(x int32) bool { return x > -129 }
+func ge_neg129_int32(x int32) bool { return x >= -129 }
+func eq_neg129_int32(x int32) bool { return x == -129 }
+func ne_neg129_int32(x int32) bool { return x != -129 }
+func lt_neg128_int32(x int32) bool { return x < -128 }
+func le_neg128_int32(x int32) bool { return x <= -128 }
+func gt_neg128_int32(x int32) bool { return x > -128 }
+func ge_neg128_int32(x int32) bool { return x >= -128 }
+func eq_neg128_int32(x int32) bool { return x == -128 }
+func ne_neg128_int32(x int32) bool { return x != -128 }
+func lt_neg127_int32(x int32) bool { return x < -127 }
+func le_neg127_int32(x int32) bool { return x <= -127 }
+func gt_neg127_int32(x int32) bool { return x > -127 }
+func ge_neg127_int32(x int32) bool { return x >= -127 }
+func eq_neg127_int32(x int32) bool { return x == -127 }
+func ne_neg127_int32(x int32) bool { return x != -127 }
+func lt_neg1_int32(x int32) bool { return x < -1 }
+func le_neg1_int32(x int32) bool { return x <= -1 }
+func gt_neg1_int32(x int32) bool { return x > -1 }
+func ge_neg1_int32(x int32) bool { return x >= -1 }
+func eq_neg1_int32(x int32) bool { return x == -1 }
+func ne_neg1_int32(x int32) bool { return x != -1 }
+func lt_0_int32(x int32) bool { return x < 0 }
+func le_0_int32(x int32) bool { return x <= 0 }
+func gt_0_int32(x int32) bool { return x > 0 }
+func ge_0_int32(x int32) bool { return x >= 0 }
+func eq_0_int32(x int32) bool { return x == 0 }
+func ne_0_int32(x int32) bool { return x != 0 }
+func lt_1_int32(x int32) bool { return x < 1 }
+func le_1_int32(x int32) bool { return x <= 1 }
+func gt_1_int32(x int32) bool { return x > 1 }
+func ge_1_int32(x int32) bool { return x >= 1 }
+func eq_1_int32(x int32) bool { return x == 1 }
+func ne_1_int32(x int32) bool { return x != 1 }
+func lt_126_int32(x int32) bool { return x < 126 }
+func le_126_int32(x int32) bool { return x <= 126 }
+func gt_126_int32(x int32) bool { return x > 126 }
+func ge_126_int32(x int32) bool { return x >= 126 }
+func eq_126_int32(x int32) bool { return x == 126 }
+func ne_126_int32(x int32) bool { return x != 126 }
+func lt_127_int32(x int32) bool { return x < 127 }
+func le_127_int32(x int32) bool { return x <= 127 }
+func gt_127_int32(x int32) bool { return x > 127 }
+func ge_127_int32(x int32) bool { return x >= 127 }
+func eq_127_int32(x int32) bool { return x == 127 }
+func ne_127_int32(x int32) bool { return x != 127 }
+func lt_128_int32(x int32) bool { return x < 128 }
+func le_128_int32(x int32) bool { return x <= 128 }
+func gt_128_int32(x int32) bool { return x > 128 }
+func ge_128_int32(x int32) bool { return x >= 128 }
+func eq_128_int32(x int32) bool { return x == 128 }
+func ne_128_int32(x int32) bool { return x != 128 }
+func lt_254_int32(x int32) bool { return x < 254 }
+func le_254_int32(x int32) bool { return x <= 254 }
+func gt_254_int32(x int32) bool { return x > 254 }
+func ge_254_int32(x int32) bool { return x >= 254 }
+func eq_254_int32(x int32) bool { return x == 254 }
+func ne_254_int32(x int32) bool { return x != 254 }
+func lt_255_int32(x int32) bool { return x < 255 }
+func le_255_int32(x int32) bool { return x <= 255 }
+func gt_255_int32(x int32) bool { return x > 255 }
+func ge_255_int32(x int32) bool { return x >= 255 }
+func eq_255_int32(x int32) bool { return x == 255 }
+func ne_255_int32(x int32) bool { return x != 255 }
+func lt_256_int32(x int32) bool { return x < 256 }
+func le_256_int32(x int32) bool { return x <= 256 }
+func gt_256_int32(x int32) bool { return x > 256 }
+func ge_256_int32(x int32) bool { return x >= 256 }
+func eq_256_int32(x int32) bool { return x == 256 }
+func ne_256_int32(x int32) bool { return x != 256 }
+func lt_32766_int32(x int32) bool { return x < 32766 }
+func le_32766_int32(x int32) bool { return x <= 32766 }
+func gt_32766_int32(x int32) bool { return x > 32766 }
+func ge_32766_int32(x int32) bool { return x >= 32766 }
+func eq_32766_int32(x int32) bool { return x == 32766 }
+func ne_32766_int32(x int32) bool { return x != 32766 }
+func lt_32767_int32(x int32) bool { return x < 32767 }
+func le_32767_int32(x int32) bool { return x <= 32767 }
+func gt_32767_int32(x int32) bool { return x > 32767 }
+func ge_32767_int32(x int32) bool { return x >= 32767 }
+func eq_32767_int32(x int32) bool { return x == 32767 }
+func ne_32767_int32(x int32) bool { return x != 32767 }
+func lt_32768_int32(x int32) bool { return x < 32768 }
+func le_32768_int32(x int32) bool { return x <= 32768 }
+func gt_32768_int32(x int32) bool { return x > 32768 }
+func ge_32768_int32(x int32) bool { return x >= 32768 }
+func eq_32768_int32(x int32) bool { return x == 32768 }
+func ne_32768_int32(x int32) bool { return x != 32768 }
+func lt_65534_int32(x int32) bool { return x < 65534 }
+func le_65534_int32(x int32) bool { return x <= 65534 }
+func gt_65534_int32(x int32) bool { return x > 65534 }
+func ge_65534_int32(x int32) bool { return x >= 65534 }
+func eq_65534_int32(x int32) bool { return x == 65534 }
+func ne_65534_int32(x int32) bool { return x != 65534 }
+func lt_65535_int32(x int32) bool { return x < 65535 }
+func le_65535_int32(x int32) bool { return x <= 65535 }
+func gt_65535_int32(x int32) bool { return x > 65535 }
+func ge_65535_int32(x int32) bool { return x >= 65535 }
+func eq_65535_int32(x int32) bool { return x == 65535 }
+func ne_65535_int32(x int32) bool { return x != 65535 }
+func lt_65536_int32(x int32) bool { return x < 65536 }
+func le_65536_int32(x int32) bool { return x <= 65536 }
+func gt_65536_int32(x int32) bool { return x > 65536 }
+func ge_65536_int32(x int32) bool { return x >= 65536 }
+func eq_65536_int32(x int32) bool { return x == 65536 }
+func ne_65536_int32(x int32) bool { return x != 65536 }
+func lt_2147483646_int32(x int32) bool { return x < 2147483646 }
+func le_2147483646_int32(x int32) bool { return x <= 2147483646 }
+func gt_2147483646_int32(x int32) bool { return x > 2147483646 }
+func ge_2147483646_int32(x int32) bool { return x >= 2147483646 }
+func eq_2147483646_int32(x int32) bool { return x == 2147483646 }
+func ne_2147483646_int32(x int32) bool { return x != 2147483646 }
+func lt_2147483647_int32(x int32) bool { return x < 2147483647 }
+func le_2147483647_int32(x int32) bool { return x <= 2147483647 }
+func gt_2147483647_int32(x int32) bool { return x > 2147483647 }
+func ge_2147483647_int32(x int32) bool { return x >= 2147483647 }
+func eq_2147483647_int32(x int32) bool { return x == 2147483647 }
+func ne_2147483647_int32(x int32) bool { return x != 2147483647 }
+
+var int32_tests = []struct {
+ idx int // index of the constant used
+ exp result // expected results
+ fn func(int32) bool
+}{
+ {idx: 0, exp: lt, fn: lt_neg2147483648_int32},
+ {idx: 0, exp: le, fn: le_neg2147483648_int32},
+ {idx: 0, exp: gt, fn: gt_neg2147483648_int32},
+ {idx: 0, exp: ge, fn: ge_neg2147483648_int32},
+ {idx: 0, exp: eq, fn: eq_neg2147483648_int32},
+ {idx: 0, exp: ne, fn: ne_neg2147483648_int32},
+ {idx: 1, exp: lt, fn: lt_neg2147483647_int32},
+ {idx: 1, exp: le, fn: le_neg2147483647_int32},
+ {idx: 1, exp: gt, fn: gt_neg2147483647_int32},
+ {idx: 1, exp: ge, fn: ge_neg2147483647_int32},
+ {idx: 1, exp: eq, fn: eq_neg2147483647_int32},
+ {idx: 1, exp: ne, fn: ne_neg2147483647_int32},
+ {idx: 2, exp: lt, fn: lt_neg32769_int32},
+ {idx: 2, exp: le, fn: le_neg32769_int32},
+ {idx: 2, exp: gt, fn: gt_neg32769_int32},
+ {idx: 2, exp: ge, fn: ge_neg32769_int32},
+ {idx: 2, exp: eq, fn: eq_neg32769_int32},
+ {idx: 2, exp: ne, fn: ne_neg32769_int32},
+ {idx: 3, exp: lt, fn: lt_neg32768_int32},
+ {idx: 3, exp: le, fn: le_neg32768_int32},
+ {idx: 3, exp: gt, fn: gt_neg32768_int32},
+ {idx: 3, exp: ge, fn: ge_neg32768_int32},
+ {idx: 3, exp: eq, fn: eq_neg32768_int32},
+ {idx: 3, exp: ne, fn: ne_neg32768_int32},
+ {idx: 4, exp: lt, fn: lt_neg32767_int32},
+ {idx: 4, exp: le, fn: le_neg32767_int32},
+ {idx: 4, exp: gt, fn: gt_neg32767_int32},
+ {idx: 4, exp: ge, fn: ge_neg32767_int32},
+ {idx: 4, exp: eq, fn: eq_neg32767_int32},
+ {idx: 4, exp: ne, fn: ne_neg32767_int32},
+ {idx: 5, exp: lt, fn: lt_neg129_int32},
+ {idx: 5, exp: le, fn: le_neg129_int32},
+ {idx: 5, exp: gt, fn: gt_neg129_int32},
+ {idx: 5, exp: ge, fn: ge_neg129_int32},
+ {idx: 5, exp: eq, fn: eq_neg129_int32},
+ {idx: 5, exp: ne, fn: ne_neg129_int32},
+ {idx: 6, exp: lt, fn: lt_neg128_int32},
+ {idx: 6, exp: le, fn: le_neg128_int32},
+ {idx: 6, exp: gt, fn: gt_neg128_int32},
+ {idx: 6, exp: ge, fn: ge_neg128_int32},
+ {idx: 6, exp: eq, fn: eq_neg128_int32},
+ {idx: 6, exp: ne, fn: ne_neg128_int32},
+ {idx: 7, exp: lt, fn: lt_neg127_int32},
+ {idx: 7, exp: le, fn: le_neg127_int32},
+ {idx: 7, exp: gt, fn: gt_neg127_int32},
+ {idx: 7, exp: ge, fn: ge_neg127_int32},
+ {idx: 7, exp: eq, fn: eq_neg127_int32},
+ {idx: 7, exp: ne, fn: ne_neg127_int32},
+ {idx: 8, exp: lt, fn: lt_neg1_int32},
+ {idx: 8, exp: le, fn: le_neg1_int32},
+ {idx: 8, exp: gt, fn: gt_neg1_int32},
+ {idx: 8, exp: ge, fn: ge_neg1_int32},
+ {idx: 8, exp: eq, fn: eq_neg1_int32},
+ {idx: 8, exp: ne, fn: ne_neg1_int32},
+ {idx: 9, exp: lt, fn: lt_0_int32},
+ {idx: 9, exp: le, fn: le_0_int32},
+ {idx: 9, exp: gt, fn: gt_0_int32},
+ {idx: 9, exp: ge, fn: ge_0_int32},
+ {idx: 9, exp: eq, fn: eq_0_int32},
+ {idx: 9, exp: ne, fn: ne_0_int32},
+ {idx: 10, exp: lt, fn: lt_1_int32},
+ {idx: 10, exp: le, fn: le_1_int32},
+ {idx: 10, exp: gt, fn: gt_1_int32},
+ {idx: 10, exp: ge, fn: ge_1_int32},
+ {idx: 10, exp: eq, fn: eq_1_int32},
+ {idx: 10, exp: ne, fn: ne_1_int32},
+ {idx: 11, exp: lt, fn: lt_126_int32},
+ {idx: 11, exp: le, fn: le_126_int32},
+ {idx: 11, exp: gt, fn: gt_126_int32},
+ {idx: 11, exp: ge, fn: ge_126_int32},
+ {idx: 11, exp: eq, fn: eq_126_int32},
+ {idx: 11, exp: ne, fn: ne_126_int32},
+ {idx: 12, exp: lt, fn: lt_127_int32},
+ {idx: 12, exp: le, fn: le_127_int32},
+ {idx: 12, exp: gt, fn: gt_127_int32},
+ {idx: 12, exp: ge, fn: ge_127_int32},
+ {idx: 12, exp: eq, fn: eq_127_int32},
+ {idx: 12, exp: ne, fn: ne_127_int32},
+ {idx: 13, exp: lt, fn: lt_128_int32},
+ {idx: 13, exp: le, fn: le_128_int32},
+ {idx: 13, exp: gt, fn: gt_128_int32},
+ {idx: 13, exp: ge, fn: ge_128_int32},
+ {idx: 13, exp: eq, fn: eq_128_int32},
+ {idx: 13, exp: ne, fn: ne_128_int32},
+ {idx: 14, exp: lt, fn: lt_254_int32},
+ {idx: 14, exp: le, fn: le_254_int32},
+ {idx: 14, exp: gt, fn: gt_254_int32},
+ {idx: 14, exp: ge, fn: ge_254_int32},
+ {idx: 14, exp: eq, fn: eq_254_int32},
+ {idx: 14, exp: ne, fn: ne_254_int32},
+ {idx: 15, exp: lt, fn: lt_255_int32},
+ {idx: 15, exp: le, fn: le_255_int32},
+ {idx: 15, exp: gt, fn: gt_255_int32},
+ {idx: 15, exp: ge, fn: ge_255_int32},
+ {idx: 15, exp: eq, fn: eq_255_int32},
+ {idx: 15, exp: ne, fn: ne_255_int32},
+ {idx: 16, exp: lt, fn: lt_256_int32},
+ {idx: 16, exp: le, fn: le_256_int32},
+ {idx: 16, exp: gt, fn: gt_256_int32},
+ {idx: 16, exp: ge, fn: ge_256_int32},
+ {idx: 16, exp: eq, fn: eq_256_int32},
+ {idx: 16, exp: ne, fn: ne_256_int32},
+ {idx: 17, exp: lt, fn: lt_32766_int32},
+ {idx: 17, exp: le, fn: le_32766_int32},
+ {idx: 17, exp: gt, fn: gt_32766_int32},
+ {idx: 17, exp: ge, fn: ge_32766_int32},
+ {idx: 17, exp: eq, fn: eq_32766_int32},
+ {idx: 17, exp: ne, fn: ne_32766_int32},
+ {idx: 18, exp: lt, fn: lt_32767_int32},
+ {idx: 18, exp: le, fn: le_32767_int32},
+ {idx: 18, exp: gt, fn: gt_32767_int32},
+ {idx: 18, exp: ge, fn: ge_32767_int32},
+ {idx: 18, exp: eq, fn: eq_32767_int32},
+ {idx: 18, exp: ne, fn: ne_32767_int32},
+ {idx: 19, exp: lt, fn: lt_32768_int32},
+ {idx: 19, exp: le, fn: le_32768_int32},
+ {idx: 19, exp: gt, fn: gt_32768_int32},
+ {idx: 19, exp: ge, fn: ge_32768_int32},
+ {idx: 19, exp: eq, fn: eq_32768_int32},
+ {idx: 19, exp: ne, fn: ne_32768_int32},
+ {idx: 20, exp: lt, fn: lt_65534_int32},
+ {idx: 20, exp: le, fn: le_65534_int32},
+ {idx: 20, exp: gt, fn: gt_65534_int32},
+ {idx: 20, exp: ge, fn: ge_65534_int32},
+ {idx: 20, exp: eq, fn: eq_65534_int32},
+ {idx: 20, exp: ne, fn: ne_65534_int32},
+ {idx: 21, exp: lt, fn: lt_65535_int32},
+ {idx: 21, exp: le, fn: le_65535_int32},
+ {idx: 21, exp: gt, fn: gt_65535_int32},
+ {idx: 21, exp: ge, fn: ge_65535_int32},
+ {idx: 21, exp: eq, fn: eq_65535_int32},
+ {idx: 21, exp: ne, fn: ne_65535_int32},
+ {idx: 22, exp: lt, fn: lt_65536_int32},
+ {idx: 22, exp: le, fn: le_65536_int32},
+ {idx: 22, exp: gt, fn: gt_65536_int32},
+ {idx: 22, exp: ge, fn: ge_65536_int32},
+ {idx: 22, exp: eq, fn: eq_65536_int32},
+ {idx: 22, exp: ne, fn: ne_65536_int32},
+ {idx: 23, exp: lt, fn: lt_2147483646_int32},
+ {idx: 23, exp: le, fn: le_2147483646_int32},
+ {idx: 23, exp: gt, fn: gt_2147483646_int32},
+ {idx: 23, exp: ge, fn: ge_2147483646_int32},
+ {idx: 23, exp: eq, fn: eq_2147483646_int32},
+ {idx: 23, exp: ne, fn: ne_2147483646_int32},
+ {idx: 24, exp: lt, fn: lt_2147483647_int32},
+ {idx: 24, exp: le, fn: le_2147483647_int32},
+ {idx: 24, exp: gt, fn: gt_2147483647_int32},
+ {idx: 24, exp: ge, fn: ge_2147483647_int32},
+ {idx: 24, exp: eq, fn: eq_2147483647_int32},
+ {idx: 24, exp: ne, fn: ne_2147483647_int32},
+}
+
+// int16 tests
+var int16_vals = []int16{
+ -32768,
+ -32767,
+ -129,
+ -128,
+ -127,
+ -1,
+ 0,
+ 1,
+ 126,
+ 127,
+ 128,
+ 254,
+ 255,
+ 256,
+ 32766,
+ 32767,
+}
+
+func lt_neg32768_int16(x int16) bool { return x < -32768 }
+func le_neg32768_int16(x int16) bool { return x <= -32768 }
+func gt_neg32768_int16(x int16) bool { return x > -32768 }
+func ge_neg32768_int16(x int16) bool { return x >= -32768 }
+func eq_neg32768_int16(x int16) bool { return x == -32768 }
+func ne_neg32768_int16(x int16) bool { return x != -32768 }
+func lt_neg32767_int16(x int16) bool { return x < -32767 }
+func le_neg32767_int16(x int16) bool { return x <= -32767 }
+func gt_neg32767_int16(x int16) bool { return x > -32767 }
+func ge_neg32767_int16(x int16) bool { return x >= -32767 }
+func eq_neg32767_int16(x int16) bool { return x == -32767 }
+func ne_neg32767_int16(x int16) bool { return x != -32767 }
+func lt_neg129_int16(x int16) bool { return x < -129 }
+func le_neg129_int16(x int16) bool { return x <= -129 }
+func gt_neg129_int16(x int16) bool { return x > -129 }
+func ge_neg129_int16(x int16) bool { return x >= -129 }
+func eq_neg129_int16(x int16) bool { return x == -129 }
+func ne_neg129_int16(x int16) bool { return x != -129 }
+func lt_neg128_int16(x int16) bool { return x < -128 }
+func le_neg128_int16(x int16) bool { return x <= -128 }
+func gt_neg128_int16(x int16) bool { return x > -128 }
+func ge_neg128_int16(x int16) bool { return x >= -128 }
+func eq_neg128_int16(x int16) bool { return x == -128 }
+func ne_neg128_int16(x int16) bool { return x != -128 }
+func lt_neg127_int16(x int16) bool { return x < -127 }
+func le_neg127_int16(x int16) bool { return x <= -127 }
+func gt_neg127_int16(x int16) bool { return x > -127 }
+func ge_neg127_int16(x int16) bool { return x >= -127 }
+func eq_neg127_int16(x int16) bool { return x == -127 }
+func ne_neg127_int16(x int16) bool { return x != -127 }
+func lt_neg1_int16(x int16) bool { return x < -1 }
+func le_neg1_int16(x int16) bool { return x <= -1 }
+func gt_neg1_int16(x int16) bool { return x > -1 }
+func ge_neg1_int16(x int16) bool { return x >= -1 }
+func eq_neg1_int16(x int16) bool { return x == -1 }
+func ne_neg1_int16(x int16) bool { return x != -1 }
+func lt_0_int16(x int16) bool { return x < 0 }
+func le_0_int16(x int16) bool { return x <= 0 }
+func gt_0_int16(x int16) bool { return x > 0 }
+func ge_0_int16(x int16) bool { return x >= 0 }
+func eq_0_int16(x int16) bool { return x == 0 }
+func ne_0_int16(x int16) bool { return x != 0 }
+func lt_1_int16(x int16) bool { return x < 1 }
+func le_1_int16(x int16) bool { return x <= 1 }
+func gt_1_int16(x int16) bool { return x > 1 }
+func ge_1_int16(x int16) bool { return x >= 1 }
+func eq_1_int16(x int16) bool { return x == 1 }
+func ne_1_int16(x int16) bool { return x != 1 }
+func lt_126_int16(x int16) bool { return x < 126 }
+func le_126_int16(x int16) bool { return x <= 126 }
+func gt_126_int16(x int16) bool { return x > 126 }
+func ge_126_int16(x int16) bool { return x >= 126 }
+func eq_126_int16(x int16) bool { return x == 126 }
+func ne_126_int16(x int16) bool { return x != 126 }
+func lt_127_int16(x int16) bool { return x < 127 }
+func le_127_int16(x int16) bool { return x <= 127 }
+func gt_127_int16(x int16) bool { return x > 127 }
+func ge_127_int16(x int16) bool { return x >= 127 }
+func eq_127_int16(x int16) bool { return x == 127 }
+func ne_127_int16(x int16) bool { return x != 127 }
+func lt_128_int16(x int16) bool { return x < 128 }
+func le_128_int16(x int16) bool { return x <= 128 }
+func gt_128_int16(x int16) bool { return x > 128 }
+func ge_128_int16(x int16) bool { return x >= 128 }
+func eq_128_int16(x int16) bool { return x == 128 }
+func ne_128_int16(x int16) bool { return x != 128 }
+func lt_254_int16(x int16) bool { return x < 254 }
+func le_254_int16(x int16) bool { return x <= 254 }
+func gt_254_int16(x int16) bool { return x > 254 }
+func ge_254_int16(x int16) bool { return x >= 254 }
+func eq_254_int16(x int16) bool { return x == 254 }
+func ne_254_int16(x int16) bool { return x != 254 }
+func lt_255_int16(x int16) bool { return x < 255 }
+func le_255_int16(x int16) bool { return x <= 255 }
+func gt_255_int16(x int16) bool { return x > 255 }
+func ge_255_int16(x int16) bool { return x >= 255 }
+func eq_255_int16(x int16) bool { return x == 255 }
+func ne_255_int16(x int16) bool { return x != 255 }
+func lt_256_int16(x int16) bool { return x < 256 }
+func le_256_int16(x int16) bool { return x <= 256 }
+func gt_256_int16(x int16) bool { return x > 256 }
+func ge_256_int16(x int16) bool { return x >= 256 }
+func eq_256_int16(x int16) bool { return x == 256 }
+func ne_256_int16(x int16) bool { return x != 256 }
+func lt_32766_int16(x int16) bool { return x < 32766 }
+func le_32766_int16(x int16) bool { return x <= 32766 }
+func gt_32766_int16(x int16) bool { return x > 32766 }
+func ge_32766_int16(x int16) bool { return x >= 32766 }
+func eq_32766_int16(x int16) bool { return x == 32766 }
+func ne_32766_int16(x int16) bool { return x != 32766 }
+func lt_32767_int16(x int16) bool { return x < 32767 }
+func le_32767_int16(x int16) bool { return x <= 32767 }
+func gt_32767_int16(x int16) bool { return x > 32767 }
+func ge_32767_int16(x int16) bool { return x >= 32767 }
+func eq_32767_int16(x int16) bool { return x == 32767 }
+func ne_32767_int16(x int16) bool { return x != 32767 }
+
+var int16_tests = []struct {
+ idx int // index of the constant used
+ exp result // expected results
+ fn func(int16) bool
+}{
+ {idx: 0, exp: lt, fn: lt_neg32768_int16},
+ {idx: 0, exp: le, fn: le_neg32768_int16},
+ {idx: 0, exp: gt, fn: gt_neg32768_int16},
+ {idx: 0, exp: ge, fn: ge_neg32768_int16},
+ {idx: 0, exp: eq, fn: eq_neg32768_int16},
+ {idx: 0, exp: ne, fn: ne_neg32768_int16},
+ {idx: 1, exp: lt, fn: lt_neg32767_int16},
+ {idx: 1, exp: le, fn: le_neg32767_int16},
+ {idx: 1, exp: gt, fn: gt_neg32767_int16},
+ {idx: 1, exp: ge, fn: ge_neg32767_int16},
+ {idx: 1, exp: eq, fn: eq_neg32767_int16},
+ {idx: 1, exp: ne, fn: ne_neg32767_int16},
+ {idx: 2, exp: lt, fn: lt_neg129_int16},
+ {idx: 2, exp: le, fn: le_neg129_int16},
+ {idx: 2, exp: gt, fn: gt_neg129_int16},
+ {idx: 2, exp: ge, fn: ge_neg129_int16},
+ {idx: 2, exp: eq, fn: eq_neg129_int16},
+ {idx: 2, exp: ne, fn: ne_neg129_int16},
+ {idx: 3, exp: lt, fn: lt_neg128_int16},
+ {idx: 3, exp: le, fn: le_neg128_int16},
+ {idx: 3, exp: gt, fn: gt_neg128_int16},
+ {idx: 3, exp: ge, fn: ge_neg128_int16},
+ {idx: 3, exp: eq, fn: eq_neg128_int16},
+ {idx: 3, exp: ne, fn: ne_neg128_int16},
+ {idx: 4, exp: lt, fn: lt_neg127_int16},
+ {idx: 4, exp: le, fn: le_neg127_int16},
+ {idx: 4, exp: gt, fn: gt_neg127_int16},
+ {idx: 4, exp: ge, fn: ge_neg127_int16},
+ {idx: 4, exp: eq, fn: eq_neg127_int16},
+ {idx: 4, exp: ne, fn: ne_neg127_int16},
+ {idx: 5, exp: lt, fn: lt_neg1_int16},
+ {idx: 5, exp: le, fn: le_neg1_int16},
+ {idx: 5, exp: gt, fn: gt_neg1_int16},
+ {idx: 5, exp: ge, fn: ge_neg1_int16},
+ {idx: 5, exp: eq, fn: eq_neg1_int16},
+ {idx: 5, exp: ne, fn: ne_neg1_int16},
+ {idx: 6, exp: lt, fn: lt_0_int16},
+ {idx: 6, exp: le, fn: le_0_int16},
+ {idx: 6, exp: gt, fn: gt_0_int16},
+ {idx: 6, exp: ge, fn: ge_0_int16},
+ {idx: 6, exp: eq, fn: eq_0_int16},
+ {idx: 6, exp: ne, fn: ne_0_int16},
+ {idx: 7, exp: lt, fn: lt_1_int16},
+ {idx: 7, exp: le, fn: le_1_int16},
+ {idx: 7, exp: gt, fn: gt_1_int16},
+ {idx: 7, exp: ge, fn: ge_1_int16},
+ {idx: 7, exp: eq, fn: eq_1_int16},
+ {idx: 7, exp: ne, fn: ne_1_int16},
+ {idx: 8, exp: lt, fn: lt_126_int16},
+ {idx: 8, exp: le, fn: le_126_int16},
+ {idx: 8, exp: gt, fn: gt_126_int16},
+ {idx: 8, exp: ge, fn: ge_126_int16},
+ {idx: 8, exp: eq, fn: eq_126_int16},
+ {idx: 8, exp: ne, fn: ne_126_int16},
+ {idx: 9, exp: lt, fn: lt_127_int16},
+ {idx: 9, exp: le, fn: le_127_int16},
+ {idx: 9, exp: gt, fn: gt_127_int16},
+ {idx: 9, exp: ge, fn: ge_127_int16},
+ {idx: 9, exp: eq, fn: eq_127_int16},
+ {idx: 9, exp: ne, fn: ne_127_int16},
+ {idx: 10, exp: lt, fn: lt_128_int16},
+ {idx: 10, exp: le, fn: le_128_int16},
+ {idx: 10, exp: gt, fn: gt_128_int16},
+ {idx: 10, exp: ge, fn: ge_128_int16},
+ {idx: 10, exp: eq, fn: eq_128_int16},
+ {idx: 10, exp: ne, fn: ne_128_int16},
+ {idx: 11, exp: lt, fn: lt_254_int16},
+ {idx: 11, exp: le, fn: le_254_int16},
+ {idx: 11, exp: gt, fn: gt_254_int16},
+ {idx: 11, exp: ge, fn: ge_254_int16},
+ {idx: 11, exp: eq, fn: eq_254_int16},
+ {idx: 11, exp: ne, fn: ne_254_int16},
+ {idx: 12, exp: lt, fn: lt_255_int16},
+ {idx: 12, exp: le, fn: le_255_int16},
+ {idx: 12, exp: gt, fn: gt_255_int16},
+ {idx: 12, exp: ge, fn: ge_255_int16},
+ {idx: 12, exp: eq, fn: eq_255_int16},
+ {idx: 12, exp: ne, fn: ne_255_int16},
+ {idx: 13, exp: lt, fn: lt_256_int16},
+ {idx: 13, exp: le, fn: le_256_int16},
+ {idx: 13, exp: gt, fn: gt_256_int16},
+ {idx: 13, exp: ge, fn: ge_256_int16},
+ {idx: 13, exp: eq, fn: eq_256_int16},
+ {idx: 13, exp: ne, fn: ne_256_int16},
+ {idx: 14, exp: lt, fn: lt_32766_int16},
+ {idx: 14, exp: le, fn: le_32766_int16},
+ {idx: 14, exp: gt, fn: gt_32766_int16},
+ {idx: 14, exp: ge, fn: ge_32766_int16},
+ {idx: 14, exp: eq, fn: eq_32766_int16},
+ {idx: 14, exp: ne, fn: ne_32766_int16},
+ {idx: 15, exp: lt, fn: lt_32767_int16},
+ {idx: 15, exp: le, fn: le_32767_int16},
+ {idx: 15, exp: gt, fn: gt_32767_int16},
+ {idx: 15, exp: ge, fn: ge_32767_int16},
+ {idx: 15, exp: eq, fn: eq_32767_int16},
+ {idx: 15, exp: ne, fn: ne_32767_int16},
+}
+
+// int8 tests
+var int8_vals = []int8{
+ -128,
+ -127,
+ -1,
+ 0,
+ 1,
+ 126,
+ 127,
+}
+
+func lt_neg128_int8(x int8) bool { return x < -128 }
+func le_neg128_int8(x int8) bool { return x <= -128 }
+func gt_neg128_int8(x int8) bool { return x > -128 }
+func ge_neg128_int8(x int8) bool { return x >= -128 }
+func eq_neg128_int8(x int8) bool { return x == -128 }
+func ne_neg128_int8(x int8) bool { return x != -128 }
+func lt_neg127_int8(x int8) bool { return x < -127 }
+func le_neg127_int8(x int8) bool { return x <= -127 }
+func gt_neg127_int8(x int8) bool { return x > -127 }
+func ge_neg127_int8(x int8) bool { return x >= -127 }
+func eq_neg127_int8(x int8) bool { return x == -127 }
+func ne_neg127_int8(x int8) bool { return x != -127 }
+func lt_neg1_int8(x int8) bool { return x < -1 }
+func le_neg1_int8(x int8) bool { return x <= -1 }
+func gt_neg1_int8(x int8) bool { return x > -1 }
+func ge_neg1_int8(x int8) bool { return x >= -1 }
+func eq_neg1_int8(x int8) bool { return x == -1 }
+func ne_neg1_int8(x int8) bool { return x != -1 }
+func lt_0_int8(x int8) bool { return x < 0 }
+func le_0_int8(x int8) bool { return x <= 0 }
+func gt_0_int8(x int8) bool { return x > 0 }
+func ge_0_int8(x int8) bool { return x >= 0 }
+func eq_0_int8(x int8) bool { return x == 0 }
+func ne_0_int8(x int8) bool { return x != 0 }
+func lt_1_int8(x int8) bool { return x < 1 }
+func le_1_int8(x int8) bool { return x <= 1 }
+func gt_1_int8(x int8) bool { return x > 1 }
+func ge_1_int8(x int8) bool { return x >= 1 }
+func eq_1_int8(x int8) bool { return x == 1 }
+func ne_1_int8(x int8) bool { return x != 1 }
+func lt_126_int8(x int8) bool { return x < 126 }
+func le_126_int8(x int8) bool { return x <= 126 }
+func gt_126_int8(x int8) bool { return x > 126 }
+func ge_126_int8(x int8) bool { return x >= 126 }
+func eq_126_int8(x int8) bool { return x == 126 }
+func ne_126_int8(x int8) bool { return x != 126 }
+func lt_127_int8(x int8) bool { return x < 127 }
+func le_127_int8(x int8) bool { return x <= 127 }
+func gt_127_int8(x int8) bool { return x > 127 }
+func ge_127_int8(x int8) bool { return x >= 127 }
+func eq_127_int8(x int8) bool { return x == 127 }
+func ne_127_int8(x int8) bool { return x != 127 }
+
+var int8_tests = []struct {
+ idx int // index of the constant used
+ exp result // expected results
+ fn func(int8) bool
+}{
+ {idx: 0, exp: lt, fn: lt_neg128_int8},
+ {idx: 0, exp: le, fn: le_neg128_int8},
+ {idx: 0, exp: gt, fn: gt_neg128_int8},
+ {idx: 0, exp: ge, fn: ge_neg128_int8},
+ {idx: 0, exp: eq, fn: eq_neg128_int8},
+ {idx: 0, exp: ne, fn: ne_neg128_int8},
+ {idx: 1, exp: lt, fn: lt_neg127_int8},
+ {idx: 1, exp: le, fn: le_neg127_int8},
+ {idx: 1, exp: gt, fn: gt_neg127_int8},
+ {idx: 1, exp: ge, fn: ge_neg127_int8},
+ {idx: 1, exp: eq, fn: eq_neg127_int8},
+ {idx: 1, exp: ne, fn: ne_neg127_int8},
+ {idx: 2, exp: lt, fn: lt_neg1_int8},
+ {idx: 2, exp: le, fn: le_neg1_int8},
+ {idx: 2, exp: gt, fn: gt_neg1_int8},
+ {idx: 2, exp: ge, fn: ge_neg1_int8},
+ {idx: 2, exp: eq, fn: eq_neg1_int8},
+ {idx: 2, exp: ne, fn: ne_neg1_int8},
+ {idx: 3, exp: lt, fn: lt_0_int8},
+ {idx: 3, exp: le, fn: le_0_int8},
+ {idx: 3, exp: gt, fn: gt_0_int8},
+ {idx: 3, exp: ge, fn: ge_0_int8},
+ {idx: 3, exp: eq, fn: eq_0_int8},
+ {idx: 3, exp: ne, fn: ne_0_int8},
+ {idx: 4, exp: lt, fn: lt_1_int8},
+ {idx: 4, exp: le, fn: le_1_int8},
+ {idx: 4, exp: gt, fn: gt_1_int8},
+ {idx: 4, exp: ge, fn: ge_1_int8},
+ {idx: 4, exp: eq, fn: eq_1_int8},
+ {idx: 4, exp: ne, fn: ne_1_int8},
+ {idx: 5, exp: lt, fn: lt_126_int8},
+ {idx: 5, exp: le, fn: le_126_int8},
+ {idx: 5, exp: gt, fn: gt_126_int8},
+ {idx: 5, exp: ge, fn: ge_126_int8},
+ {idx: 5, exp: eq, fn: eq_126_int8},
+ {idx: 5, exp: ne, fn: ne_126_int8},
+ {idx: 6, exp: lt, fn: lt_127_int8},
+ {idx: 6, exp: le, fn: le_127_int8},
+ {idx: 6, exp: gt, fn: gt_127_int8},
+ {idx: 6, exp: ge, fn: ge_127_int8},
+ {idx: 6, exp: eq, fn: eq_127_int8},
+ {idx: 6, exp: ne, fn: ne_127_int8},
+}
+
+// TestComparisonsConst tests results for comparison operations against constants.
+func TestComparisonsConst(t *testing.T) {
+ for i, test := range uint64_tests {
+ for j, x := range uint64_vals {
+ want := test.exp.l
+ if j == test.idx {
+ want = test.exp.e
+ } else if j > test.idx {
+ want = test.exp.r
+ }
+ if test.fn(x) != want {
+ fn := runtime.FuncForPC(reflect.ValueOf(test.fn).Pointer()).Name()
+ t.Errorf("test failed: %v(%v) != %v [type=uint64 i=%v j=%v idx=%v]", fn, x, want, i, j, test.idx)
+ }
+ }
+ }
+ for i, test := range uint32_tests {
+ for j, x := range uint32_vals {
+ want := test.exp.l
+ if j == test.idx {
+ want = test.exp.e
+ } else if j > test.idx {
+ want = test.exp.r
+ }
+ if test.fn(x) != want {
+ fn := runtime.FuncForPC(reflect.ValueOf(test.fn).Pointer()).Name()
+ t.Errorf("test failed: %v(%v) != %v [type=uint32 i=%v j=%v idx=%v]", fn, x, want, i, j, test.idx)
+ }
+ }
+ }
+ for i, test := range uint16_tests {
+ for j, x := range uint16_vals {
+ want := test.exp.l
+ if j == test.idx {
+ want = test.exp.e
+ } else if j > test.idx {
+ want = test.exp.r
+ }
+ if test.fn(x) != want {
+ fn := runtime.FuncForPC(reflect.ValueOf(test.fn).Pointer()).Name()
+ t.Errorf("test failed: %v(%v) != %v [type=uint16 i=%v j=%v idx=%v]", fn, x, want, i, j, test.idx)
+ }
+ }
+ }
+ for i, test := range uint8_tests {
+ for j, x := range uint8_vals {
+ want := test.exp.l
+ if j == test.idx {
+ want = test.exp.e
+ } else if j > test.idx {
+ want = test.exp.r
+ }
+ if test.fn(x) != want {
+ fn := runtime.FuncForPC(reflect.ValueOf(test.fn).Pointer()).Name()
+ t.Errorf("test failed: %v(%v) != %v [type=uint8 i=%v j=%v idx=%v]", fn, x, want, i, j, test.idx)
+ }
+ }
+ }
+ for i, test := range int64_tests {
+ for j, x := range int64_vals {
+ want := test.exp.l
+ if j == test.idx {
+ want = test.exp.e
+ } else if j > test.idx {
+ want = test.exp.r
+ }
+ if test.fn(x) != want {
+ fn := runtime.FuncForPC(reflect.ValueOf(test.fn).Pointer()).Name()
+ t.Errorf("test failed: %v(%v) != %v [type=int64 i=%v j=%v idx=%v]", fn, x, want, i, j, test.idx)
+ }
+ }
+ }
+ for i, test := range int32_tests {
+ for j, x := range int32_vals {
+ want := test.exp.l
+ if j == test.idx {
+ want = test.exp.e
+ } else if j > test.idx {
+ want = test.exp.r
+ }
+ if test.fn(x) != want {
+ fn := runtime.FuncForPC(reflect.ValueOf(test.fn).Pointer()).Name()
+ t.Errorf("test failed: %v(%v) != %v [type=int32 i=%v j=%v idx=%v]", fn, x, want, i, j, test.idx)
+ }
+ }
+ }
+ for i, test := range int16_tests {
+ for j, x := range int16_vals {
+ want := test.exp.l
+ if j == test.idx {
+ want = test.exp.e
+ } else if j > test.idx {
+ want = test.exp.r
+ }
+ if test.fn(x) != want {
+ fn := runtime.FuncForPC(reflect.ValueOf(test.fn).Pointer()).Name()
+ t.Errorf("test failed: %v(%v) != %v [type=int16 i=%v j=%v idx=%v]", fn, x, want, i, j, test.idx)
+ }
+ }
+ }
+ for i, test := range int8_tests {
+ for j, x := range int8_vals {
+ want := test.exp.l
+ if j == test.idx {
+ want = test.exp.e
+ } else if j > test.idx {
+ want = test.exp.r
+ }
+ if test.fn(x) != want {
+ fn := runtime.FuncForPC(reflect.ValueOf(test.fn).Pointer()).Name()
+ t.Errorf("test failed: %v(%v) != %v [type=int8 i=%v j=%v idx=%v]", fn, x, want, i, j, test.idx)
+ }
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/test/testdata/cmp_test.go b/src/cmd/compile/internal/test/testdata/cmp_test.go
new file mode 100644
index 0000000..06b58f2
--- /dev/null
+++ b/src/cmd/compile/internal/test/testdata/cmp_test.go
@@ -0,0 +1,37 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// cmp_ssa.go tests compare simplification operations.
+package main
+
+import "testing"
+
+//go:noinline
+func eq_ssa(a int64) bool {
+ return 4+a == 10
+}
+
+//go:noinline
+func neq_ssa(a int64) bool {
+ return 10 != a+4
+}
+
+func testCmp(t *testing.T) {
+ if wanted, got := true, eq_ssa(6); wanted != got {
+ t.Errorf("eq_ssa: expected %v, got %v\n", wanted, got)
+ }
+ if wanted, got := false, eq_ssa(7); wanted != got {
+ t.Errorf("eq_ssa: expected %v, got %v\n", wanted, got)
+ }
+ if wanted, got := false, neq_ssa(6); wanted != got {
+ t.Errorf("neq_ssa: expected %v, got %v\n", wanted, got)
+ }
+ if wanted, got := true, neq_ssa(7); wanted != got {
+ t.Errorf("neq_ssa: expected %v, got %v\n", wanted, got)
+ }
+}
+
+func TestCmp(t *testing.T) {
+ testCmp(t)
+}
diff --git a/src/cmd/compile/internal/test/testdata/compound_test.go b/src/cmd/compile/internal/test/testdata/compound_test.go
new file mode 100644
index 0000000..4ae464d
--- /dev/null
+++ b/src/cmd/compile/internal/test/testdata/compound_test.go
@@ -0,0 +1,128 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Test compound objects
+
+package main
+
+import (
+ "testing"
+)
+
+func string_ssa(a, b string, x bool) string {
+ s := ""
+ if x {
+ s = a
+ } else {
+ s = b
+ }
+ return s
+}
+
+func testString(t *testing.T) {
+ a := "foo"
+ b := "barz"
+ if want, got := a, string_ssa(a, b, true); got != want {
+ t.Errorf("string_ssa(%v, %v, true) = %v, want %v\n", a, b, got, want)
+ }
+ if want, got := b, string_ssa(a, b, false); got != want {
+ t.Errorf("string_ssa(%v, %v, false) = %v, want %v\n", a, b, got, want)
+ }
+}
+
+//go:noinline
+func complex64_ssa(a, b complex64, x bool) complex64 {
+ var c complex64
+ if x {
+ c = a
+ } else {
+ c = b
+ }
+ return c
+}
+
+//go:noinline
+func complex128_ssa(a, b complex128, x bool) complex128 {
+ var c complex128
+ if x {
+ c = a
+ } else {
+ c = b
+ }
+ return c
+}
+
+func testComplex64(t *testing.T) {
+ var a complex64 = 1 + 2i
+ var b complex64 = 3 + 4i
+
+ if want, got := a, complex64_ssa(a, b, true); got != want {
+ t.Errorf("complex64_ssa(%v, %v, true) = %v, want %v\n", a, b, got, want)
+ }
+ if want, got := b, complex64_ssa(a, b, false); got != want {
+ t.Errorf("complex64_ssa(%v, %v, true) = %v, want %v\n", a, b, got, want)
+ }
+}
+
+func testComplex128(t *testing.T) {
+ var a complex128 = 1 + 2i
+ var b complex128 = 3 + 4i
+
+ if want, got := a, complex128_ssa(a, b, true); got != want {
+ t.Errorf("complex128_ssa(%v, %v, true) = %v, want %v\n", a, b, got, want)
+ }
+ if want, got := b, complex128_ssa(a, b, false); got != want {
+ t.Errorf("complex128_ssa(%v, %v, true) = %v, want %v\n", a, b, got, want)
+ }
+}
+
+func slice_ssa(a, b []byte, x bool) []byte {
+ var s []byte
+ if x {
+ s = a
+ } else {
+ s = b
+ }
+ return s
+}
+
+func testSlice(t *testing.T) {
+ a := []byte{3, 4, 5}
+ b := []byte{7, 8, 9}
+ if want, got := byte(3), slice_ssa(a, b, true)[0]; got != want {
+ t.Errorf("slice_ssa(%v, %v, true) = %v, want %v\n", a, b, got, want)
+ }
+ if want, got := byte(7), slice_ssa(a, b, false)[0]; got != want {
+ t.Errorf("slice_ssa(%v, %v, false) = %v, want %v\n", a, b, got, want)
+ }
+}
+
+func interface_ssa(a, b interface{}, x bool) interface{} {
+ var s interface{}
+ if x {
+ s = a
+ } else {
+ s = b
+ }
+ return s
+}
+
+func testInterface(t *testing.T) {
+ a := interface{}(3)
+ b := interface{}(4)
+ if want, got := 3, interface_ssa(a, b, true).(int); got != want {
+ t.Errorf("interface_ssa(%v, %v, true) = %v, want %v\n", a, b, got, want)
+ }
+ if want, got := 4, interface_ssa(a, b, false).(int); got != want {
+ t.Errorf("interface_ssa(%v, %v, false) = %v, want %v\n", a, b, got, want)
+ }
+}
+
+func TestCompound(t *testing.T) {
+ testString(t)
+ testSlice(t)
+ testInterface(t)
+ testComplex64(t)
+ testComplex128(t)
+}
diff --git a/src/cmd/compile/internal/test/testdata/copy_test.go b/src/cmd/compile/internal/test/testdata/copy_test.go
new file mode 100644
index 0000000..c29611d
--- /dev/null
+++ b/src/cmd/compile/internal/test/testdata/copy_test.go
@@ -0,0 +1,760 @@
+// Code generated by gen/copyGen.go. DO NOT EDIT.
+
+package main
+
+import "testing"
+
+type T1 struct {
+ pre [8]byte
+ mid [1]byte
+ post [8]byte
+}
+
+//go:noinline
+func t1copy_ssa(y, x *[1]byte) {
+ *y = *x
+}
+func testCopy1(t *testing.T) {
+ a := T1{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [1]byte{0}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ x := [1]byte{100}
+ t1copy_ssa(&a.mid, &x)
+ want := T1{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [1]byte{100}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ if a != want {
+ t.Errorf("t1copy got=%v, want %v\n", a, want)
+ }
+}
+
+type T2 struct {
+ pre [8]byte
+ mid [2]byte
+ post [8]byte
+}
+
+//go:noinline
+func t2copy_ssa(y, x *[2]byte) {
+ *y = *x
+}
+func testCopy2(t *testing.T) {
+ a := T2{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [2]byte{0, 1}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ x := [2]byte{100, 101}
+ t2copy_ssa(&a.mid, &x)
+ want := T2{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [2]byte{100, 101}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ if a != want {
+ t.Errorf("t2copy got=%v, want %v\n", a, want)
+ }
+}
+
+type T3 struct {
+ pre [8]byte
+ mid [3]byte
+ post [8]byte
+}
+
+//go:noinline
+func t3copy_ssa(y, x *[3]byte) {
+ *y = *x
+}
+func testCopy3(t *testing.T) {
+ a := T3{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [3]byte{0, 1, 2}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ x := [3]byte{100, 101, 102}
+ t3copy_ssa(&a.mid, &x)
+ want := T3{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [3]byte{100, 101, 102}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ if a != want {
+ t.Errorf("t3copy got=%v, want %v\n", a, want)
+ }
+}
+
+type T4 struct {
+ pre [8]byte
+ mid [4]byte
+ post [8]byte
+}
+
+//go:noinline
+func t4copy_ssa(y, x *[4]byte) {
+ *y = *x
+}
+func testCopy4(t *testing.T) {
+ a := T4{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [4]byte{0, 1, 2, 3}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ x := [4]byte{100, 101, 102, 103}
+ t4copy_ssa(&a.mid, &x)
+ want := T4{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [4]byte{100, 101, 102, 103}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ if a != want {
+ t.Errorf("t4copy got=%v, want %v\n", a, want)
+ }
+}
+
+type T5 struct {
+ pre [8]byte
+ mid [5]byte
+ post [8]byte
+}
+
+//go:noinline
+func t5copy_ssa(y, x *[5]byte) {
+ *y = *x
+}
+func testCopy5(t *testing.T) {
+ a := T5{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [5]byte{0, 1, 2, 3, 4}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ x := [5]byte{100, 101, 102, 103, 104}
+ t5copy_ssa(&a.mid, &x)
+ want := T5{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [5]byte{100, 101, 102, 103, 104}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ if a != want {
+ t.Errorf("t5copy got=%v, want %v\n", a, want)
+ }
+}
+
+type T6 struct {
+ pre [8]byte
+ mid [6]byte
+ post [8]byte
+}
+
+//go:noinline
+func t6copy_ssa(y, x *[6]byte) {
+ *y = *x
+}
+func testCopy6(t *testing.T) {
+ a := T6{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [6]byte{0, 1, 2, 3, 4, 5}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ x := [6]byte{100, 101, 102, 103, 104, 105}
+ t6copy_ssa(&a.mid, &x)
+ want := T6{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [6]byte{100, 101, 102, 103, 104, 105}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ if a != want {
+ t.Errorf("t6copy got=%v, want %v\n", a, want)
+ }
+}
+
+type T7 struct {
+ pre [8]byte
+ mid [7]byte
+ post [8]byte
+}
+
+//go:noinline
+func t7copy_ssa(y, x *[7]byte) {
+ *y = *x
+}
+func testCopy7(t *testing.T) {
+ a := T7{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [7]byte{0, 1, 2, 3, 4, 5, 6}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ x := [7]byte{100, 101, 102, 103, 104, 105, 106}
+ t7copy_ssa(&a.mid, &x)
+ want := T7{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [7]byte{100, 101, 102, 103, 104, 105, 106}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ if a != want {
+ t.Errorf("t7copy got=%v, want %v\n", a, want)
+ }
+}
+
+type T8 struct {
+ pre [8]byte
+ mid [8]byte
+ post [8]byte
+}
+
+//go:noinline
+func t8copy_ssa(y, x *[8]byte) {
+ *y = *x
+}
+func testCopy8(t *testing.T) {
+ a := T8{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [8]byte{0, 1, 2, 3, 4, 5, 6, 7}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ x := [8]byte{100, 101, 102, 103, 104, 105, 106, 107}
+ t8copy_ssa(&a.mid, &x)
+ want := T8{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [8]byte{100, 101, 102, 103, 104, 105, 106, 107}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ if a != want {
+ t.Errorf("t8copy got=%v, want %v\n", a, want)
+ }
+}
+
+type T9 struct {
+ pre [8]byte
+ mid [9]byte
+ post [8]byte
+}
+
+//go:noinline
+func t9copy_ssa(y, x *[9]byte) {
+ *y = *x
+}
+func testCopy9(t *testing.T) {
+ a := T9{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [9]byte{0, 1, 2, 3, 4, 5, 6, 7, 8}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ x := [9]byte{100, 101, 102, 103, 104, 105, 106, 107, 108}
+ t9copy_ssa(&a.mid, &x)
+ want := T9{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [9]byte{100, 101, 102, 103, 104, 105, 106, 107, 108}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ if a != want {
+ t.Errorf("t9copy got=%v, want %v\n", a, want)
+ }
+}
+
+type T10 struct {
+ pre [8]byte
+ mid [10]byte
+ post [8]byte
+}
+
+//go:noinline
+func t10copy_ssa(y, x *[10]byte) {
+ *y = *x
+}
+func testCopy10(t *testing.T) {
+ a := T10{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [10]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ x := [10]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109}
+ t10copy_ssa(&a.mid, &x)
+ want := T10{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [10]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ if a != want {
+ t.Errorf("t10copy got=%v, want %v\n", a, want)
+ }
+}
+
+type T15 struct {
+ pre [8]byte
+ mid [15]byte
+ post [8]byte
+}
+
+//go:noinline
+func t15copy_ssa(y, x *[15]byte) {
+ *y = *x
+}
+func testCopy15(t *testing.T) {
+ a := T15{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [15]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ x := [15]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114}
+ t15copy_ssa(&a.mid, &x)
+ want := T15{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [15]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ if a != want {
+ t.Errorf("t15copy got=%v, want %v\n", a, want)
+ }
+}
+
+type T16 struct {
+ pre [8]byte
+ mid [16]byte
+ post [8]byte
+}
+
+//go:noinline
+func t16copy_ssa(y, x *[16]byte) {
+ *y = *x
+}
+func testCopy16(t *testing.T) {
+ a := T16{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [16]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ x := [16]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115}
+ t16copy_ssa(&a.mid, &x)
+ want := T16{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [16]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ if a != want {
+ t.Errorf("t16copy got=%v, want %v\n", a, want)
+ }
+}
+
+type T17 struct {
+ pre [8]byte
+ mid [17]byte
+ post [8]byte
+}
+
+//go:noinline
+func t17copy_ssa(y, x *[17]byte) {
+ *y = *x
+}
+func testCopy17(t *testing.T) {
+ a := T17{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [17]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ x := [17]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116}
+ t17copy_ssa(&a.mid, &x)
+ want := T17{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [17]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ if a != want {
+ t.Errorf("t17copy got=%v, want %v\n", a, want)
+ }
+}
+
+type T23 struct {
+ pre [8]byte
+ mid [23]byte
+ post [8]byte
+}
+
+//go:noinline
+func t23copy_ssa(y, x *[23]byte) {
+ *y = *x
+}
+func testCopy23(t *testing.T) {
+ a := T23{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [23]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ x := [23]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122}
+ t23copy_ssa(&a.mid, &x)
+ want := T23{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [23]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ if a != want {
+ t.Errorf("t23copy got=%v, want %v\n", a, want)
+ }
+}
+
+type T24 struct {
+ pre [8]byte
+ mid [24]byte
+ post [8]byte
+}
+
+//go:noinline
+func t24copy_ssa(y, x *[24]byte) {
+ *y = *x
+}
+func testCopy24(t *testing.T) {
+ a := T24{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [24]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ x := [24]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123}
+ t24copy_ssa(&a.mid, &x)
+ want := T24{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [24]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ if a != want {
+ t.Errorf("t24copy got=%v, want %v\n", a, want)
+ }
+}
+
+type T25 struct {
+ pre [8]byte
+ mid [25]byte
+ post [8]byte
+}
+
+//go:noinline
+func t25copy_ssa(y, x *[25]byte) {
+ *y = *x
+}
+func testCopy25(t *testing.T) {
+ a := T25{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [25]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ x := [25]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124}
+ t25copy_ssa(&a.mid, &x)
+ want := T25{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [25]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ if a != want {
+ t.Errorf("t25copy got=%v, want %v\n", a, want)
+ }
+}
+
+type T31 struct {
+ pre [8]byte
+ mid [31]byte
+ post [8]byte
+}
+
+//go:noinline
+func t31copy_ssa(y, x *[31]byte) {
+ *y = *x
+}
+func testCopy31(t *testing.T) {
+ a := T31{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [31]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ x := [31]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130}
+ t31copy_ssa(&a.mid, &x)
+ want := T31{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [31]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ if a != want {
+ t.Errorf("t31copy got=%v, want %v\n", a, want)
+ }
+}
+
+type T32 struct {
+ pre [8]byte
+ mid [32]byte
+ post [8]byte
+}
+
+//go:noinline
+func t32copy_ssa(y, x *[32]byte) {
+ *y = *x
+}
+func testCopy32(t *testing.T) {
+ a := T32{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [32]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ x := [32]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131}
+ t32copy_ssa(&a.mid, &x)
+ want := T32{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [32]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ if a != want {
+ t.Errorf("t32copy got=%v, want %v\n", a, want)
+ }
+}
+
+type T33 struct {
+ pre [8]byte
+ mid [33]byte
+ post [8]byte
+}
+
+//go:noinline
+func t33copy_ssa(y, x *[33]byte) {
+ *y = *x
+}
+func testCopy33(t *testing.T) {
+ a := T33{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [33]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ x := [33]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132}
+ t33copy_ssa(&a.mid, &x)
+ want := T33{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [33]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ if a != want {
+ t.Errorf("t33copy got=%v, want %v\n", a, want)
+ }
+}
+
+type T63 struct {
+ pre [8]byte
+ mid [63]byte
+ post [8]byte
+}
+
+//go:noinline
+func t63copy_ssa(y, x *[63]byte) {
+ *y = *x
+}
+func testCopy63(t *testing.T) {
+ a := T63{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [63]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ x := [63]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162}
+ t63copy_ssa(&a.mid, &x)
+ want := T63{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [63]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ if a != want {
+ t.Errorf("t63copy got=%v, want %v\n", a, want)
+ }
+}
+
+type T64 struct {
+ pre [8]byte
+ mid [64]byte
+ post [8]byte
+}
+
+//go:noinline
+func t64copy_ssa(y, x *[64]byte) {
+ *y = *x
+}
+func testCopy64(t *testing.T) {
+ a := T64{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [64]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ x := [64]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163}
+ t64copy_ssa(&a.mid, &x)
+ want := T64{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [64]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ if a != want {
+ t.Errorf("t64copy got=%v, want %v\n", a, want)
+ }
+}
+
+type T65 struct {
+ pre [8]byte
+ mid [65]byte
+ post [8]byte
+}
+
+//go:noinline
+func t65copy_ssa(y, x *[65]byte) {
+ *y = *x
+}
+func testCopy65(t *testing.T) {
+ a := T65{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [65]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ x := [65]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164}
+ t65copy_ssa(&a.mid, &x)
+ want := T65{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [65]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ if a != want {
+ t.Errorf("t65copy got=%v, want %v\n", a, want)
+ }
+}
+
+type T1023 struct {
+ pre [8]byte
+ mid [1023]byte
+ post [8]byte
+}
+
+//go:noinline
+func t1023copy_ssa(y, x *[1023]byte) {
+ *y = *x
+}
+func testCopy1023(t *testing.T) {
+ a := T1023{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [1023]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ x := [1023]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122}
+ t1023copy_ssa(&a.mid, &x)
+ want := T1023{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [1023]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ if a != want {
+ t.Errorf("t1023copy got=%v, want %v\n", a, want)
+ }
+}
+
+type T1024 struct {
+ pre [8]byte
+ mid [1024]byte
+ post [8]byte
+}
+
+//go:noinline
+func t1024copy_ssa(y, x *[1024]byte) {
+ *y = *x
+}
+func testCopy1024(t *testing.T) {
+ a := T1024{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [1024]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ x := [1024]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123}
+ t1024copy_ssa(&a.mid, &x)
+ want := T1024{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [1024]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ if a != want {
+ t.Errorf("t1024copy got=%v, want %v\n", a, want)
+ }
+}
+
+type T1025 struct {
+ pre [8]byte
+ mid [1025]byte
+ post [8]byte
+}
+
+//go:noinline
+func t1025copy_ssa(y, x *[1025]byte) {
+ *y = *x
+}
+func testCopy1025(t *testing.T) {
+ a := T1025{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [1025]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ x := [1025]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124}
+ t1025copy_ssa(&a.mid, &x)
+ want := T1025{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [1025]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ if a != want {
+ t.Errorf("t1025copy got=%v, want %v\n", a, want)
+ }
+}
+
+type T1031 struct {
+ pre [8]byte
+ mid [1031]byte
+ post [8]byte
+}
+
+//go:noinline
+func t1031copy_ssa(y, x *[1031]byte) {
+ *y = *x
+}
+func testCopy1031(t *testing.T) {
+ a := T1031{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [1031]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ x := [1031]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130}
+ t1031copy_ssa(&a.mid, &x)
+ want := T1031{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [1031]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ if a != want {
+ t.Errorf("t1031copy got=%v, want %v\n", a, want)
+ }
+}
+
+type T1032 struct {
+ pre [8]byte
+ mid [1032]byte
+ post [8]byte
+}
+
+//go:noinline
+func t1032copy_ssa(y, x *[1032]byte) {
+ *y = *x
+}
+func testCopy1032(t *testing.T) {
+ a := T1032{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [1032]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ x := [1032]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131}
+ t1032copy_ssa(&a.mid, &x)
+ want := T1032{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [1032]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ if a != want {
+ t.Errorf("t1032copy got=%v, want %v\n", a, want)
+ }
+}
+
+type T1033 struct {
+ pre [8]byte
+ mid [1033]byte
+ post [8]byte
+}
+
+//go:noinline
+func t1033copy_ssa(y, x *[1033]byte) {
+ *y = *x
+}
+func testCopy1033(t *testing.T) {
+ a := T1033{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [1033]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ x := [1033]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132}
+ t1033copy_ssa(&a.mid, &x)
+ want := T1033{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [1033]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ if a != want {
+ t.Errorf("t1033copy got=%v, want %v\n", a, want)
+ }
+}
+
+type T1039 struct {
+ pre [8]byte
+ mid [1039]byte
+ post [8]byte
+}
+
+//go:noinline
+func t1039copy_ssa(y, x *[1039]byte) {
+ *y = *x
+}
+func testCopy1039(t *testing.T) {
+ a := T1039{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [1039]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ x := [1039]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138}
+ t1039copy_ssa(&a.mid, &x)
+ want := T1039{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [1039]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ if a != want {
+ t.Errorf("t1039copy got=%v, want %v\n", a, want)
+ }
+}
+
+type T1040 struct {
+ pre [8]byte
+ mid [1040]byte
+ post [8]byte
+}
+
+//go:noinline
+func t1040copy_ssa(y, x *[1040]byte) {
+ *y = *x
+}
+func testCopy1040(t *testing.T) {
+ a := T1040{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [1040]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ x := [1040]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139}
+ t1040copy_ssa(&a.mid, &x)
+ want := T1040{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [1040]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ if a != want {
+ t.Errorf("t1040copy got=%v, want %v\n", a, want)
+ }
+}
+
+type T1041 struct {
+ pre [8]byte
+ mid [1041]byte
+ post [8]byte
+}
+
+//go:noinline
+func t1041copy_ssa(y, x *[1041]byte) {
+ *y = *x
+}
+func testCopy1041(t *testing.T) {
+ a := T1041{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [1041]byte{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40, 41, 42, 43, 44, 45, 46, 47, 48, 49, 50, 51, 52, 53, 54, 55, 56, 57, 58, 59, 60, 61, 62, 63, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 80, 81, 82, 83, 84, 85, 86, 87, 88, 89, 90, 91, 92, 93, 94, 95, 96, 97, 98, 99, 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 27, 28, 29, 30, 31, 32, 33, 34, 35, 36, 37, 38, 39, 40}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ x := [1041]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140}
+ t1041copy_ssa(&a.mid, &x)
+ want := T1041{[8]byte{201, 202, 203, 204, 205, 206, 207, 208}, [1041]byte{100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140, 141, 142, 143, 144, 145, 146, 147, 148, 149, 150, 151, 152, 153, 154, 155, 156, 157, 158, 159, 160, 161, 162, 163, 164, 165, 166, 167, 168, 169, 170, 171, 172, 173, 174, 175, 176, 177, 178, 179, 180, 181, 182, 183, 184, 185, 186, 187, 188, 189, 190, 191, 192, 193, 194, 195, 196, 197, 198, 199, 100, 101, 102, 103, 104, 105, 106, 107, 108, 109, 110, 111, 112, 113, 114, 115, 116, 117, 118, 119, 120, 121, 122, 123, 124, 125, 126, 127, 128, 129, 130, 131, 132, 133, 134, 135, 136, 137, 138, 139, 140}, [8]byte{211, 212, 213, 214, 215, 216, 217, 218}}
+ if a != want {
+ t.Errorf("t1041copy got=%v, want %v\n", a, want)
+ }
+}
+
+//go:noinline
+func tu2copy_ssa(docopy bool, data [2]byte, x *[2]byte) {
+ if docopy {
+ *x = data
+ }
+}
+func testUnalignedCopy2(t *testing.T) {
+ var a [2]byte
+ t2 := [2]byte{2, 3}
+ tu2copy_ssa(true, t2, &a)
+ want2 := [2]byte{2, 3}
+ if a != want2 {
+ t.Errorf("tu2copy got=%v, want %v\n", a, want2)
+ }
+}
+
+//go:noinline
+func tu3copy_ssa(docopy bool, data [3]byte, x *[3]byte) {
+ if docopy {
+ *x = data
+ }
+}
+func testUnalignedCopy3(t *testing.T) {
+ var a [3]byte
+ t3 := [3]byte{3, 4, 5}
+ tu3copy_ssa(true, t3, &a)
+ want3 := [3]byte{3, 4, 5}
+ if a != want3 {
+ t.Errorf("tu3copy got=%v, want %v\n", a, want3)
+ }
+}
+
+//go:noinline
+func tu4copy_ssa(docopy bool, data [4]byte, x *[4]byte) {
+ if docopy {
+ *x = data
+ }
+}
+func testUnalignedCopy4(t *testing.T) {
+ var a [4]byte
+ t4 := [4]byte{4, 5, 6, 7}
+ tu4copy_ssa(true, t4, &a)
+ want4 := [4]byte{4, 5, 6, 7}
+ if a != want4 {
+ t.Errorf("tu4copy got=%v, want %v\n", a, want4)
+ }
+}
+
+//go:noinline
+func tu5copy_ssa(docopy bool, data [5]byte, x *[5]byte) {
+ if docopy {
+ *x = data
+ }
+}
+func testUnalignedCopy5(t *testing.T) {
+ var a [5]byte
+ t5 := [5]byte{5, 6, 7, 8, 9}
+ tu5copy_ssa(true, t5, &a)
+ want5 := [5]byte{5, 6, 7, 8, 9}
+ if a != want5 {
+ t.Errorf("tu5copy got=%v, want %v\n", a, want5)
+ }
+}
+
+//go:noinline
+func tu6copy_ssa(docopy bool, data [6]byte, x *[6]byte) {
+ if docopy {
+ *x = data
+ }
+}
+func testUnalignedCopy6(t *testing.T) {
+ var a [6]byte
+ t6 := [6]byte{6, 7, 8, 9, 10, 11}
+ tu6copy_ssa(true, t6, &a)
+ want6 := [6]byte{6, 7, 8, 9, 10, 11}
+ if a != want6 {
+ t.Errorf("tu6copy got=%v, want %v\n", a, want6)
+ }
+}
+
+//go:noinline
+func tu7copy_ssa(docopy bool, data [7]byte, x *[7]byte) {
+ if docopy {
+ *x = data
+ }
+}
+func testUnalignedCopy7(t *testing.T) {
+ var a [7]byte
+ t7 := [7]byte{7, 8, 9, 10, 11, 12, 13}
+ tu7copy_ssa(true, t7, &a)
+ want7 := [7]byte{7, 8, 9, 10, 11, 12, 13}
+ if a != want7 {
+ t.Errorf("tu7copy got=%v, want %v\n", a, want7)
+ }
+}
+func TestCopy(t *testing.T) {
+ testCopy1(t)
+ testCopy2(t)
+ testCopy3(t)
+ testCopy4(t)
+ testCopy5(t)
+ testCopy6(t)
+ testCopy7(t)
+ testCopy8(t)
+ testCopy9(t)
+ testCopy10(t)
+ testCopy15(t)
+ testCopy16(t)
+ testCopy17(t)
+ testCopy23(t)
+ testCopy24(t)
+ testCopy25(t)
+ testCopy31(t)
+ testCopy32(t)
+ testCopy33(t)
+ testCopy63(t)
+ testCopy64(t)
+ testCopy65(t)
+ testCopy1023(t)
+ testCopy1024(t)
+ testCopy1025(t)
+ testCopy1031(t)
+ testCopy1032(t)
+ testCopy1033(t)
+ testCopy1039(t)
+ testCopy1040(t)
+ testCopy1041(t)
+ testUnalignedCopy2(t)
+ testUnalignedCopy3(t)
+ testUnalignedCopy4(t)
+ testUnalignedCopy5(t)
+ testUnalignedCopy6(t)
+ testUnalignedCopy7(t)
+}
diff --git a/src/cmd/compile/internal/test/testdata/ctl_test.go b/src/cmd/compile/internal/test/testdata/ctl_test.go
new file mode 100644
index 0000000..16d571c
--- /dev/null
+++ b/src/cmd/compile/internal/test/testdata/ctl_test.go
@@ -0,0 +1,149 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Test control flow
+
+package main
+
+import "testing"
+
+// nor_ssa calculates NOR(a, b).
+// It is implemented in a way that generates
+// phi control values.
+func nor_ssa(a, b bool) bool {
+ var c bool
+ if a {
+ c = true
+ }
+ if b {
+ c = true
+ }
+ if c {
+ return false
+ }
+ return true
+}
+
+func testPhiControl(t *testing.T) {
+ tests := [...][3]bool{ // a, b, want
+ {false, false, true},
+ {true, false, false},
+ {false, true, false},
+ {true, true, false},
+ }
+ for _, test := range tests {
+ a, b := test[0], test[1]
+ got := nor_ssa(a, b)
+ want := test[2]
+ if want != got {
+ t.Errorf("nor(%t, %t)=%t got %t", a, b, want, got)
+ }
+ }
+}
+
+func emptyRange_ssa(b []byte) bool {
+ for _, x := range b {
+ _ = x
+ }
+ return true
+}
+
+func testEmptyRange(t *testing.T) {
+ if !emptyRange_ssa([]byte{}) {
+ t.Errorf("emptyRange_ssa([]byte{})=false, want true")
+ }
+}
+
+func switch_ssa(a int) int {
+ ret := 0
+ switch a {
+ case 5:
+ ret += 5
+ case 4:
+ ret += 4
+ case 3:
+ ret += 3
+ case 2:
+ ret += 2
+ case 1:
+ ret += 1
+ }
+ return ret
+
+}
+
+func fallthrough_ssa(a int) int {
+ ret := 0
+ switch a {
+ case 5:
+ ret++
+ fallthrough
+ case 4:
+ ret++
+ fallthrough
+ case 3:
+ ret++
+ fallthrough
+ case 2:
+ ret++
+ fallthrough
+ case 1:
+ ret++
+ }
+ return ret
+
+}
+
+func testFallthrough(t *testing.T) {
+ for i := 0; i < 6; i++ {
+ if got := fallthrough_ssa(i); got != i {
+ t.Errorf("fallthrough_ssa(i) = %d, wanted %d", got, i)
+ }
+ }
+}
+
+func testSwitch(t *testing.T) {
+ for i := 0; i < 6; i++ {
+ if got := switch_ssa(i); got != i {
+ t.Errorf("switch_ssa(i) = %d, wanted %d", got, i)
+ }
+ }
+}
+
+type junk struct {
+ step int
+}
+
+// flagOverwrite_ssa is intended to reproduce an issue seen where a XOR
+// was scheduled between a compare and branch, clearing flags.
+//go:noinline
+func flagOverwrite_ssa(s *junk, c int) int {
+ if '0' <= c && c <= '9' {
+ s.step = 0
+ return 1
+ }
+ if c == 'e' || c == 'E' {
+ s.step = 0
+ return 2
+ }
+ s.step = 0
+ return 3
+}
+
+func testFlagOverwrite(t *testing.T) {
+ j := junk{}
+ if got := flagOverwrite_ssa(&j, ' '); got != 3 {
+ t.Errorf("flagOverwrite_ssa = %d, wanted 3", got)
+ }
+}
+
+func TestCtl(t *testing.T) {
+ testPhiControl(t)
+ testEmptyRange(t)
+
+ testSwitch(t)
+ testFallthrough(t)
+
+ testFlagOverwrite(t)
+}
diff --git a/src/cmd/compile/internal/test/testdata/deferNoReturn_test.go b/src/cmd/compile/internal/test/testdata/deferNoReturn_test.go
new file mode 100644
index 0000000..308e897
--- /dev/null
+++ b/src/cmd/compile/internal/test/testdata/deferNoReturn_test.go
@@ -0,0 +1,21 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Test that a defer in a function with no return
+// statement will compile correctly.
+
+package main
+
+import "testing"
+
+func deferNoReturn_ssa() {
+ defer func() { println("returned") }()
+ for {
+ println("loop")
+ }
+}
+
+func TestDeferNoReturn(t *testing.T) {
+ // This is a compile-time test, no runtime testing required.
+}
diff --git a/src/cmd/compile/internal/test/testdata/divbyzero_test.go b/src/cmd/compile/internal/test/testdata/divbyzero_test.go
new file mode 100644
index 0000000..ee848b3
--- /dev/null
+++ b/src/cmd/compile/internal/test/testdata/divbyzero_test.go
@@ -0,0 +1,48 @@
+package main
+
+import (
+ "runtime"
+ "testing"
+)
+
+func checkDivByZero(f func()) (divByZero bool) {
+ defer func() {
+ if r := recover(); r != nil {
+ if e, ok := r.(runtime.Error); ok && e.Error() == "runtime error: integer divide by zero" {
+ divByZero = true
+ }
+ }
+ }()
+ f()
+ return false
+}
+
+//go:noinline
+func div_a(i uint, s []int) int {
+ return s[i%uint(len(s))]
+}
+
+//go:noinline
+func div_b(i uint, j uint) uint {
+ return i / j
+}
+
+//go:noinline
+func div_c(i int) int {
+ return 7 / (i - i)
+}
+
+func TestDivByZero(t *testing.T) {
+ if got := checkDivByZero(func() { div_b(7, 0) }); !got {
+ t.Errorf("expected div by zero for b(7, 0), got no error\n")
+ }
+ if got := checkDivByZero(func() { div_b(7, 7) }); got {
+ t.Errorf("expected no error for b(7, 7), got div by zero\n")
+ }
+ if got := checkDivByZero(func() { div_a(4, nil) }); !got {
+ t.Errorf("expected div by zero for a(4, nil), got no error\n")
+ }
+ if got := checkDivByZero(func() { div_c(5) }); !got {
+ t.Errorf("expected div by zero for c(5), got no error\n")
+ }
+}
diff --git a/src/cmd/compile/internal/test/testdata/dupLoad_test.go b/src/cmd/compile/internal/test/testdata/dupLoad_test.go
new file mode 100644
index 0000000..d859123
--- /dev/null
+++ b/src/cmd/compile/internal/test/testdata/dupLoad_test.go
@@ -0,0 +1,83 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This test makes sure that we don't split a single
+// load up into two separate loads.
+
+package main
+
+import "testing"
+
+//go:noinline
+func read1(b []byte) (uint16, uint16) {
+ // There is only a single read of b[0]. The two
+ // returned values must have the same low byte.
+ v := b[0]
+ return uint16(v), uint16(v) | uint16(b[1])<<8
+}
+
+func main1(t *testing.T) {
+ const N = 100000
+ done := make(chan bool, 2)
+ b := make([]byte, 2)
+ go func() {
+ for i := 0; i < N; i++ {
+ b[0] = byte(i)
+ b[1] = byte(i)
+ }
+ done <- true
+ }()
+ go func() {
+ for i := 0; i < N; i++ {
+ x, y := read1(b)
+ if byte(x) != byte(y) {
+ t.Errorf("x=%x y=%x\n", x, y)
+ done <- false
+ return
+ }
+ }
+ done <- true
+ }()
+ <-done
+ <-done
+}
+
+//go:noinline
+func read2(b []byte) (uint16, uint16) {
+ // There is only a single read of b[1]. The two
+ // returned values must have the same high byte.
+ v := uint16(b[1]) << 8
+ return v, uint16(b[0]) | v
+}
+
+func main2(t *testing.T) {
+ const N = 100000
+ done := make(chan bool, 2)
+ b := make([]byte, 2)
+ go func() {
+ for i := 0; i < N; i++ {
+ b[0] = byte(i)
+ b[1] = byte(i)
+ }
+ done <- true
+ }()
+ go func() {
+ for i := 0; i < N; i++ {
+ x, y := read2(b)
+ if x&0xff00 != y&0xff00 {
+ t.Errorf("x=%x y=%x\n", x, y)
+ done <- false
+ return
+ }
+ }
+ done <- true
+ }()
+ <-done
+ <-done
+}
+
+func TestDupLoad(t *testing.T) {
+ main1(t)
+ main2(t)
+}
diff --git a/src/cmd/compile/internal/test/testdata/flowgraph_generator1.go b/src/cmd/compile/internal/test/testdata/flowgraph_generator1.go
new file mode 100644
index 0000000..ad22601
--- /dev/null
+++ b/src/cmd/compile/internal/test/testdata/flowgraph_generator1.go
@@ -0,0 +1,315 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "fmt"
+ "strings"
+)
+
+// make fake flow graph.
+
+// The blocks of the flow graph are designated with letters A
+// through Z, always including A (start block) and Z (exit
+// block) The specification of a flow graph is a comma-
+// separated list of block successor words, for blocks ordered
+// A, B, C etc, where each block except Z has one or two
+// successors, and any block except A can be a target. Within
+// the generated code, each block with two successors includes
+// a conditional testing x & 1 != 0 (x is the input parameter
+// to the generated function) and also unconditionally shifts x
+// right by one, so that different inputs generate different
+// execution paths, including loops. Every block inverts a
+// global binary to ensure it is not empty. For a flow graph
+// with J words (J+1 blocks), a J-1 bit serial number specifies
+// which blocks (not including A and Z) include an increment of
+// the return variable y by increasing powers of 10, and a
+// different version of the test function is created for each
+// of the 2-to-the-(J-1) serial numbers.
+
+// For each generated function a compact summary is also
+// created so that the generated function can be simulated
+// with a simple interpreter to sanity check the behavior of
+// the compiled code.
+
+// For example:
+
+// func BC_CD_BE_BZ_CZ101(x int64) int64 {
+// y := int64(0)
+// var b int64
+// _ = b
+// b = x & 1
+// x = x >> 1
+// if b != 0 {
+// goto C
+// }
+// goto B
+// B:
+// glob_ = !glob_
+// y += 1
+// b = x & 1
+// x = x >> 1
+// if b != 0 {
+// goto D
+// }
+// goto C
+// C:
+// glob_ = !glob_
+// // no y increment
+// b = x & 1
+// x = x >> 1
+// if b != 0 {
+// goto E
+// }
+// goto B
+// D:
+// glob_ = !glob_
+// y += 10
+// b = x & 1
+// x = x >> 1
+// if b != 0 {
+// goto Z
+// }
+// goto B
+// E:
+// glob_ = !glob_
+// // no y increment
+// b = x & 1
+// x = x >> 1
+// if b != 0 {
+// goto Z
+// }
+// goto C
+// Z:
+// return y
+// }
+
+// {f:BC_CD_BE_BZ_CZ101,
+// maxin:32, blocks:[]blo{
+// blo{inc:0, cond:true, succs:[2]int64{1, 2}},
+// blo{inc:1, cond:true, succs:[2]int64{2, 3}},
+// blo{inc:0, cond:true, succs:[2]int64{1, 4}},
+// blo{inc:10, cond:true, succs:[2]int64{1, 25}},
+// blo{inc:0, cond:true, succs:[2]int64{2, 25}},}},
+
+var labels string = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
+
+func blocks(spec string) (blocks []string, fnameBase string) {
+ spec = strings.ToUpper(spec)
+ blocks = strings.Split(spec, ",")
+ fnameBase = strings.Replace(spec, ",", "_", -1)
+ return
+}
+
+func makeFunctionFromFlowGraph(blocks []blo, fname string) string {
+ s := ""
+
+ for j := range blocks {
+ // begin block
+ if j == 0 {
+ // block A, implicit label
+ s += `
+func ` + fname + `(x int64) int64 {
+ y := int64(0)
+ var b int64
+ _ = b`
+ } else {
+ // block B,C, etc, explicit label w/ conditional increment
+ l := labels[j : j+1]
+ yeq := `
+ // no y increment`
+ if blocks[j].inc != 0 {
+ yeq = `
+ y += ` + fmt.Sprintf("%d", blocks[j].inc)
+ }
+
+ s += `
+` + l + `:
+ glob = !glob` + yeq
+ }
+
+ // edges to successors
+ if blocks[j].cond { // conditionally branch to second successor
+ s += `
+ b = x & 1
+ x = x >> 1
+ if b != 0 {` + `
+ goto ` + string(labels[blocks[j].succs[1]]) + `
+ }`
+
+ }
+ // branch to first successor
+ s += `
+ goto ` + string(labels[blocks[j].succs[0]])
+ }
+
+ // end block (Z)
+ s += `
+Z:
+ return y
+}
+`
+ return s
+}
+
+var graphs []string = []string{
+ "Z", "BZ,Z", "B,BZ", "BZ,BZ",
+ "ZB,Z", "B,ZB", "ZB,BZ", "ZB,ZB",
+
+ "BC,C,Z", "BC,BC,Z", "BC,BC,BZ",
+ "BC,Z,Z", "BC,ZC,Z", "BC,ZC,BZ",
+ "BZ,C,Z", "BZ,BC,Z", "BZ,CZ,Z",
+ "BZ,C,BZ", "BZ,BC,BZ", "BZ,CZ,BZ",
+ "BZ,C,CZ", "BZ,BC,CZ", "BZ,CZ,CZ",
+
+ "BC,CD,BE,BZ,CZ",
+ "BC,BD,CE,CZ,BZ",
+ "BC,BD,CE,FZ,GZ,F,G",
+ "BC,BD,CE,FZ,GZ,G,F",
+
+ "BC,DE,BE,FZ,FZ,Z",
+ "BC,DE,BE,FZ,ZF,Z",
+ "BC,DE,BE,ZF,FZ,Z",
+ "BC,DE,EB,FZ,FZ,Z",
+ "BC,ED,BE,FZ,FZ,Z",
+ "CB,DE,BE,FZ,FZ,Z",
+
+ "CB,ED,BE,FZ,FZ,Z",
+ "BC,ED,EB,FZ,ZF,Z",
+ "CB,DE,EB,ZF,FZ,Z",
+ "CB,ED,EB,FZ,FZ,Z",
+
+ "BZ,CD,CD,CE,BZ",
+ "EC,DF,FG,ZC,GB,BE,FD",
+ "BH,CF,DG,HE,BF,CG,DH,BZ",
+}
+
+// blo describes a block in the generated/interpreted code
+type blo struct {
+ inc int64 // increment amount
+ cond bool // block ends in conditional
+ succs [2]int64
+}
+
+// strings2blocks converts a slice of strings specifying
+// successors into a slice of blo encoding the blocks in a
+// common form easy to execute or interpret.
+func strings2blocks(blocks []string, fname string, i int) (bs []blo, cond uint) {
+ bs = make([]blo, len(blocks))
+ edge := int64(1)
+ cond = 0
+ k := uint(0)
+ for j, s := range blocks {
+ if j == 0 {
+ } else {
+ if (i>>k)&1 != 0 {
+ bs[j].inc = edge
+ edge *= 10
+ }
+ k++
+ }
+ if len(s) > 1 {
+ bs[j].succs[1] = int64(blocks[j][1] - 'A')
+ bs[j].cond = true
+ cond++
+ }
+ bs[j].succs[0] = int64(blocks[j][0] - 'A')
+ }
+ return bs, cond
+}
+
+// fmtBlocks writes out the blocks for consumption in the generated test
+func fmtBlocks(bs []blo) string {
+ s := "[]blo{"
+ for _, b := range bs {
+ s += fmt.Sprintf("blo{inc:%d, cond:%v, succs:[2]int64{%d, %d}},", b.inc, b.cond, b.succs[0], b.succs[1])
+ }
+ s += "}"
+ return s
+}
+
+func main() {
+ fmt.Printf(`// This is a machine-generated test file from flowgraph_generator1.go.
+package main
+import "fmt"
+var glob bool
+`)
+ s := "var funs []fun = []fun{"
+ for _, g := range graphs {
+ split, fnameBase := blocks(g)
+ nconfigs := 1 << uint(len(split)-1)
+
+ for i := 0; i < nconfigs; i++ {
+ fname := fnameBase + fmt.Sprintf("%b", i)
+ bs, k := strings2blocks(split, fname, i)
+ fmt.Printf("%s", makeFunctionFromFlowGraph(bs, fname))
+ s += `
+ {f:` + fname + `, maxin:` + fmt.Sprintf("%d", 1<<k) + `, blocks:` + fmtBlocks(bs) + `},`
+ }
+
+ }
+ s += `}
+`
+ // write types for name+array tables.
+ fmt.Printf("%s",
+ `
+type blo struct {
+ inc int64
+ cond bool
+ succs [2]int64
+}
+type fun struct {
+ f func(int64) int64
+ maxin int64
+ blocks []blo
+}
+`)
+ // write table of function names and blo arrays.
+ fmt.Printf("%s", s)
+
+ // write interpreter and main/test
+ fmt.Printf("%s", `
+func interpret(blocks []blo, x int64) (int64, bool) {
+ y := int64(0)
+ last := int64(25) // 'Z'-'A'
+ j := int64(0)
+ for i := 0; i < 4*len(blocks); i++ {
+ b := blocks[j]
+ y += b.inc
+ next := b.succs[0]
+ if b.cond {
+ c := x&1 != 0
+ x = x>>1
+ if c {
+ next = b.succs[1]
+ }
+ }
+ if next == last {
+ return y, true
+ }
+ j = next
+ }
+ return -1, false
+}
+
+func main() {
+ sum := int64(0)
+ for i, f := range funs {
+ for x := int64(0); x < 16*f.maxin; x++ {
+ y, ok := interpret(f.blocks, x)
+ if ok {
+ yy := f.f(x)
+ if y != yy {
+ fmt.Printf("y(%d) != yy(%d), x=%b, i=%d, blocks=%v\n", y, yy, x, i, f.blocks)
+ return
+ }
+ sum += y
+ }
+ }
+ }
+// fmt.Printf("Sum of all returns over all terminating inputs is %d\n", sum)
+}
+`)
+}
diff --git a/src/cmd/compile/internal/test/testdata/fp_test.go b/src/cmd/compile/internal/test/testdata/fp_test.go
new file mode 100644
index 0000000..7d61a80
--- /dev/null
+++ b/src/cmd/compile/internal/test/testdata/fp_test.go
@@ -0,0 +1,1773 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Tests floating point arithmetic expressions
+
+package main
+
+import (
+ "fmt"
+ "testing"
+)
+
+// manysub_ssa is designed to tickle bugs that depend on register
+// pressure or unfriendly operand ordering in registers (and at
+// least once it succeeded in this).
+//go:noinline
+func manysub_ssa(a, b, c, d float64) (aa, ab, ac, ad, ba, bb, bc, bd, ca, cb, cc, cd, da, db, dc, dd float64) {
+ aa = a + 11.0 - a
+ ab = a - b
+ ac = a - c
+ ad = a - d
+ ba = b - a
+ bb = b + 22.0 - b
+ bc = b - c
+ bd = b - d
+ ca = c - a
+ cb = c - b
+ cc = c + 33.0 - c
+ cd = c - d
+ da = d - a
+ db = d - b
+ dc = d - c
+ dd = d + 44.0 - d
+ return
+}
+
+// fpspill_ssa attempts to trigger a bug where phis with floating point values
+// were stored in non-fp registers causing an error in doasm.
+//go:noinline
+func fpspill_ssa(a int) float64 {
+
+ ret := -1.0
+ switch a {
+ case 0:
+ ret = 1.0
+ case 1:
+ ret = 1.1
+ case 2:
+ ret = 1.2
+ case 3:
+ ret = 1.3
+ case 4:
+ ret = 1.4
+ case 5:
+ ret = 1.5
+ case 6:
+ ret = 1.6
+ case 7:
+ ret = 1.7
+ case 8:
+ ret = 1.8
+ case 9:
+ ret = 1.9
+ case 10:
+ ret = 1.10
+ case 11:
+ ret = 1.11
+ case 12:
+ ret = 1.12
+ case 13:
+ ret = 1.13
+ case 14:
+ ret = 1.14
+ case 15:
+ ret = 1.15
+ case 16:
+ ret = 1.16
+ }
+ return ret
+}
+
+//go:noinline
+func add64_ssa(a, b float64) float64 {
+ return a + b
+}
+
+//go:noinline
+func mul64_ssa(a, b float64) float64 {
+ return a * b
+}
+
+//go:noinline
+func sub64_ssa(a, b float64) float64 {
+ return a - b
+}
+
+//go:noinline
+func div64_ssa(a, b float64) float64 {
+ return a / b
+}
+
+//go:noinline
+func neg64_ssa(a, b float64) float64 {
+ return -a + -1*b
+}
+
+//go:noinline
+func add32_ssa(a, b float32) float32 {
+ return a + b
+}
+
+//go:noinline
+func mul32_ssa(a, b float32) float32 {
+ return a * b
+}
+
+//go:noinline
+func sub32_ssa(a, b float32) float32 {
+ return a - b
+}
+
+//go:noinline
+func div32_ssa(a, b float32) float32 {
+ return a / b
+}
+
+//go:noinline
+func neg32_ssa(a, b float32) float32 {
+ return -a + -1*b
+}
+
+//go:noinline
+func conv2Float64_ssa(a int8, b uint8, c int16, d uint16,
+ e int32, f uint32, g int64, h uint64, i float32) (aa, bb, cc, dd, ee, ff, gg, hh, ii float64) {
+ aa = float64(a)
+ bb = float64(b)
+ cc = float64(c)
+ hh = float64(h)
+ dd = float64(d)
+ ee = float64(e)
+ ff = float64(f)
+ gg = float64(g)
+ ii = float64(i)
+ return
+}
+
+//go:noinline
+func conv2Float32_ssa(a int8, b uint8, c int16, d uint16,
+ e int32, f uint32, g int64, h uint64, i float64) (aa, bb, cc, dd, ee, ff, gg, hh, ii float32) {
+ aa = float32(a)
+ bb = float32(b)
+ cc = float32(c)
+ dd = float32(d)
+ ee = float32(e)
+ ff = float32(f)
+ gg = float32(g)
+ hh = float32(h)
+ ii = float32(i)
+ return
+}
+
+func integer2floatConversions(t *testing.T) {
+ {
+ a, b, c, d, e, f, g, h, i := conv2Float64_ssa(0, 0, 0, 0, 0, 0, 0, 0, 0)
+ expectAll64(t, "zero64", 0, a, b, c, d, e, f, g, h, i)
+ }
+ {
+ a, b, c, d, e, f, g, h, i := conv2Float64_ssa(1, 1, 1, 1, 1, 1, 1, 1, 1)
+ expectAll64(t, "one64", 1, a, b, c, d, e, f, g, h, i)
+ }
+ {
+ a, b, c, d, e, f, g, h, i := conv2Float32_ssa(0, 0, 0, 0, 0, 0, 0, 0, 0)
+ expectAll32(t, "zero32", 0, a, b, c, d, e, f, g, h, i)
+ }
+ {
+ a, b, c, d, e, f, g, h, i := conv2Float32_ssa(1, 1, 1, 1, 1, 1, 1, 1, 1)
+ expectAll32(t, "one32", 1, a, b, c, d, e, f, g, h, i)
+ }
+ {
+ // Check maximum values
+ a, b, c, d, e, f, g, h, i := conv2Float64_ssa(127, 255, 32767, 65535, 0x7fffffff, 0xffffffff, 0x7fffFFFFffffFFFF, 0xffffFFFFffffFFFF, 3.402823e38)
+ expect64(t, "a", a, 127)
+ expect64(t, "b", b, 255)
+ expect64(t, "c", c, 32767)
+ expect64(t, "d", d, 65535)
+ expect64(t, "e", e, float64(int32(0x7fffffff)))
+ expect64(t, "f", f, float64(uint32(0xffffffff)))
+ expect64(t, "g", g, float64(int64(0x7fffffffffffffff)))
+ expect64(t, "h", h, float64(uint64(0xffffffffffffffff)))
+ expect64(t, "i", i, float64(float32(3.402823e38)))
+ }
+ {
+ // Check minimum values (and tweaks for unsigned)
+ a, b, c, d, e, f, g, h, i := conv2Float64_ssa(-128, 254, -32768, 65534, ^0x7fffffff, 0xfffffffe, ^0x7fffFFFFffffFFFF, 0xffffFFFFffffF401, 1.5e-45)
+ expect64(t, "a", a, -128)
+ expect64(t, "b", b, 254)
+ expect64(t, "c", c, -32768)
+ expect64(t, "d", d, 65534)
+ expect64(t, "e", e, float64(^int32(0x7fffffff)))
+ expect64(t, "f", f, float64(uint32(0xfffffffe)))
+ expect64(t, "g", g, float64(^int64(0x7fffffffffffffff)))
+ expect64(t, "h", h, float64(uint64(0xfffffffffffff401)))
+ expect64(t, "i", i, float64(float32(1.5e-45)))
+ }
+ {
+ // Check maximum values
+ a, b, c, d, e, f, g, h, i := conv2Float32_ssa(127, 255, 32767, 65535, 0x7fffffff, 0xffffffff, 0x7fffFFFFffffFFFF, 0xffffFFFFffffFFFF, 3.402823e38)
+ expect32(t, "a", a, 127)
+ expect32(t, "b", b, 255)
+ expect32(t, "c", c, 32767)
+ expect32(t, "d", d, 65535)
+ expect32(t, "e", e, float32(int32(0x7fffffff)))
+ expect32(t, "f", f, float32(uint32(0xffffffff)))
+ expect32(t, "g", g, float32(int64(0x7fffffffffffffff)))
+ expect32(t, "h", h, float32(uint64(0xffffffffffffffff)))
+ expect32(t, "i", i, float32(float64(3.402823e38)))
+ }
+ {
+ // Check minimum values (and tweaks for unsigned)
+ a, b, c, d, e, f, g, h, i := conv2Float32_ssa(-128, 254, -32768, 65534, ^0x7fffffff, 0xfffffffe, ^0x7fffFFFFffffFFFF, 0xffffFFFFffffF401, 1.5e-45)
+ expect32(t, "a", a, -128)
+ expect32(t, "b", b, 254)
+ expect32(t, "c", c, -32768)
+ expect32(t, "d", d, 65534)
+ expect32(t, "e", e, float32(^int32(0x7fffffff)))
+ expect32(t, "f", f, float32(uint32(0xfffffffe)))
+ expect32(t, "g", g, float32(^int64(0x7fffffffffffffff)))
+ expect32(t, "h", h, float32(uint64(0xfffffffffffff401)))
+ expect32(t, "i", i, float32(float64(1.5e-45)))
+ }
+}
+
+func multiplyAdd(t *testing.T) {
+ {
+ // Test that a multiply-accumulate operation with intermediate
+ // rounding forced by a float32() cast produces the expected
+ // result.
+ // Test cases generated experimentally on a system (s390x) that
+ // supports fused multiply-add instructions.
+ var tests = [...]struct{ x, y, z, res float32 }{
+ {0.6046603, 0.9405091, 0.6645601, 1.2332485}, // fused multiply-add result: 1.2332486
+ {0.67908466, 0.21855305, 0.20318687, 0.3516029}, // fused multiply-add result: 0.35160288
+ {0.29311424, 0.29708257, 0.752573, 0.8396522}, // fused multiply-add result: 0.8396521
+ {0.5305857, 0.2535405, 0.282081, 0.41660595}, // fused multiply-add result: 0.41660598
+ {0.29711226, 0.89436173, 0.097454615, 0.36318043}, // fused multiply-add result: 0.36318046
+ {0.6810783, 0.24151509, 0.31152245, 0.47601312}, // fused multiply-add result: 0.47601315
+ {0.73023146, 0.18292491, 0.4283571, 0.5619346}, // fused multiply-add result: 0.56193465
+ {0.89634174, 0.32208398, 0.7211478, 1.009845}, // fused multiply-add result: 1.0098451
+ {0.6280982, 0.12675293, 0.2813303, 0.36094356}, // fused multiply-add result: 0.3609436
+ {0.29400632, 0.75316125, 0.15096405, 0.3723982}, // fused multiply-add result: 0.37239823
+ }
+ check := func(s string, got, expected float32) {
+ if got != expected {
+ fmt.Printf("multiplyAdd: %s, expected %g, got %g\n", s, expected, got)
+ }
+ }
+ for _, t := range tests {
+ check(
+ fmt.Sprintf("float32(%v * %v) + %v", t.x, t.y, t.z),
+ func(x, y, z float32) float32 {
+ return float32(x*y) + z
+ }(t.x, t.y, t.z),
+ t.res)
+
+ check(
+ fmt.Sprintf("%v += float32(%v * %v)", t.z, t.x, t.y),
+ func(x, y, z float32) float32 {
+ z += float32(x * y)
+ return z
+ }(t.x, t.y, t.z),
+ t.res)
+ }
+ }
+ {
+ // Test that a multiply-accumulate operation with intermediate
+ // rounding forced by a float64() cast produces the expected
+ // result.
+ // Test cases generated experimentally on a system (s390x) that
+ // supports fused multiply-add instructions.
+ var tests = [...]struct{ x, y, z, res float64 }{
+ {0.4688898449024232, 0.28303415118044517, 0.29310185733681576, 0.42581369658590373}, // fused multiply-add result: 0.4258136965859037
+ {0.7886049150193449, 0.3618054804803169, 0.8805431227416171, 1.1658647029293308}, // fused multiply-add result: 1.1658647029293305
+ {0.7302314772948083, 0.18292491645390843, 0.4283570818068078, 0.5619346137829748}, // fused multiply-add result: 0.5619346137829747
+ {0.6908388315056789, 0.7109071952999951, 0.5637795958152644, 1.0549018919252924}, // fused multiply-add result: 1.0549018919252926
+ {0.4584424785756506, 0.6001655953233308, 0.02626515060968944, 0.3014065536855481}, // fused multiply-add result: 0.30140655368554814
+ {0.539210105890946, 0.9756748149873165, 0.7507630564795985, 1.2768567767840384}, // fused multiply-add result: 1.2768567767840386
+ {0.7830349733960021, 0.3932509992288867, 0.1304138461737918, 0.4383431318929343}, // fused multiply-add result: 0.43834313189293433
+ {0.6841751300974551, 0.6530402051353608, 0.524499759549865, 0.9712936268572192}, // fused multiply-add result: 0.9712936268572193
+ {0.3691117091643448, 0.826454125634742, 0.34768170859156955, 0.6527356034505334}, // fused multiply-add result: 0.6527356034505333
+ {0.16867966833433606, 0.33136826030698385, 0.8279280961505588, 0.8838231843956668}, // fused multiply-add result: 0.8838231843956669
+ }
+ check := func(s string, got, expected float64) {
+ if got != expected {
+ fmt.Printf("multiplyAdd: %s, expected %g, got %g\n", s, expected, got)
+ }
+ }
+ for _, t := range tests {
+ check(
+ fmt.Sprintf("float64(%v * %v) + %v", t.x, t.y, t.z),
+ func(x, y, z float64) float64 {
+ return float64(x*y) + z
+ }(t.x, t.y, t.z),
+ t.res)
+
+ check(
+ fmt.Sprintf("%v += float64(%v * %v)", t.z, t.x, t.y),
+ func(x, y, z float64) float64 {
+ z += float64(x * y)
+ return z
+ }(t.x, t.y, t.z),
+ t.res)
+ }
+ }
+ {
+ // Test that a multiply-accumulate operation with intermediate
+ // rounding forced by a complex128() cast produces the expected
+ // result.
+ // Test cases generated experimentally on a system (s390x) that
+ // supports fused multiply-add instructions.
+ var tests = [...]struct {
+ x, y float64
+ res complex128
+ }{
+ {0.6046602879796196, 0.9405090880450124, (2.754489951983871 + 3i)}, // fused multiply-add result: (2.7544899519838713 + 3i)
+ {0.09696951891448456, 0.30091186058528707, (0.5918204173287407 + 3i)}, // fused multiply-add result: (0.5918204173287408 + 3i)
+ {0.544155573000885, 0.27850762181610883, (1.910974340818764 + 3i)}, // fused multiply-add result: (1.9109743408187638 + 3i)
+ {0.9769168685862624, 0.07429099894984302, (3.0050416047086297 + 3i)}, // fused multiply-add result: (3.00504160470863 + 3i)
+ {0.9269868035744142, 0.9549454404167818, (3.735905851140024 + 3i)}, // fused multiply-add result: (3.7359058511400245 + 3i)
+ {0.7109071952999951, 0.5637795958152644, (2.69650118171525 + 3i)}, // fused multiply-add result: (2.6965011817152496 + 3i)
+ {0.7558235074915978, 0.40380328579570035, (2.671273808270494 + 3i)}, // fused multiply-add result: (2.6712738082704934 + 3i)
+ {0.13065111702897217, 0.9859647293402467, (1.3779180804271633 + 3i)}, // fused multiply-add result: (1.3779180804271631 + 3i)
+ {0.8963417453962161, 0.3220839705208817, (3.0111092067095298 + 3i)}, // fused multiply-add result: (3.01110920670953 + 3i)
+ {0.39998376285699544, 0.497868113342702, (1.697819401913688 + 3i)}, // fused multiply-add result: (1.6978194019136883 + 3i)
+ }
+ check := func(s string, got, expected complex128) {
+ if got != expected {
+ fmt.Printf("multiplyAdd: %s, expected %v, got %v\n", s, expected, got)
+ }
+ }
+ for _, t := range tests {
+ check(
+ fmt.Sprintf("complex128(complex(%v, 1)*3) + complex(%v, 0)", t.x, t.y),
+ func(x, y float64) complex128 {
+ return complex128(complex(x, 1)*3) + complex(y, 0)
+ }(t.x, t.y),
+ t.res)
+
+ check(
+ fmt.Sprintf("z := complex(%v, 1); z += complex128(complex(%v, 1) * 3)", t.y, t.x),
+ func(x, y float64) complex128 {
+ z := complex(y, 0)
+ z += complex128(complex(x, 1) * 3)
+ return z
+ }(t.x, t.y),
+ t.res)
+ }
+ }
+}
+
+const (
+ aa = 0x1000000000000000
+ ab = 0x100000000000000
+ ac = 0x10000000000000
+ ad = 0x1000000000000
+ ba = 0x100000000000
+ bb = 0x10000000000
+ bc = 0x1000000000
+ bd = 0x100000000
+ ca = 0x10000000
+ cb = 0x1000000
+ cc = 0x100000
+ cd = 0x10000
+ da = 0x1000
+ db = 0x100
+ dc = 0x10
+ dd = 0x1
+)
+
+//go:noinline
+func compares64_ssa(a, b, c, d float64) (lt, le, eq, ne, ge, gt uint64) {
+ if a < a {
+ lt += aa
+ }
+ if a < b {
+ lt += ab
+ }
+ if a < c {
+ lt += ac
+ }
+ if a < d {
+ lt += ad
+ }
+
+ if b < a {
+ lt += ba
+ }
+ if b < b {
+ lt += bb
+ }
+ if b < c {
+ lt += bc
+ }
+ if b < d {
+ lt += bd
+ }
+
+ if c < a {
+ lt += ca
+ }
+ if c < b {
+ lt += cb
+ }
+ if c < c {
+ lt += cc
+ }
+ if c < d {
+ lt += cd
+ }
+
+ if d < a {
+ lt += da
+ }
+ if d < b {
+ lt += db
+ }
+ if d < c {
+ lt += dc
+ }
+ if d < d {
+ lt += dd
+ }
+
+ if a <= a {
+ le += aa
+ }
+ if a <= b {
+ le += ab
+ }
+ if a <= c {
+ le += ac
+ }
+ if a <= d {
+ le += ad
+ }
+
+ if b <= a {
+ le += ba
+ }
+ if b <= b {
+ le += bb
+ }
+ if b <= c {
+ le += bc
+ }
+ if b <= d {
+ le += bd
+ }
+
+ if c <= a {
+ le += ca
+ }
+ if c <= b {
+ le += cb
+ }
+ if c <= c {
+ le += cc
+ }
+ if c <= d {
+ le += cd
+ }
+
+ if d <= a {
+ le += da
+ }
+ if d <= b {
+ le += db
+ }
+ if d <= c {
+ le += dc
+ }
+ if d <= d {
+ le += dd
+ }
+
+ if a == a {
+ eq += aa
+ }
+ if a == b {
+ eq += ab
+ }
+ if a == c {
+ eq += ac
+ }
+ if a == d {
+ eq += ad
+ }
+
+ if b == a {
+ eq += ba
+ }
+ if b == b {
+ eq += bb
+ }
+ if b == c {
+ eq += bc
+ }
+ if b == d {
+ eq += bd
+ }
+
+ if c == a {
+ eq += ca
+ }
+ if c == b {
+ eq += cb
+ }
+ if c == c {
+ eq += cc
+ }
+ if c == d {
+ eq += cd
+ }
+
+ if d == a {
+ eq += da
+ }
+ if d == b {
+ eq += db
+ }
+ if d == c {
+ eq += dc
+ }
+ if d == d {
+ eq += dd
+ }
+
+ if a != a {
+ ne += aa
+ }
+ if a != b {
+ ne += ab
+ }
+ if a != c {
+ ne += ac
+ }
+ if a != d {
+ ne += ad
+ }
+
+ if b != a {
+ ne += ba
+ }
+ if b != b {
+ ne += bb
+ }
+ if b != c {
+ ne += bc
+ }
+ if b != d {
+ ne += bd
+ }
+
+ if c != a {
+ ne += ca
+ }
+ if c != b {
+ ne += cb
+ }
+ if c != c {
+ ne += cc
+ }
+ if c != d {
+ ne += cd
+ }
+
+ if d != a {
+ ne += da
+ }
+ if d != b {
+ ne += db
+ }
+ if d != c {
+ ne += dc
+ }
+ if d != d {
+ ne += dd
+ }
+
+ if a >= a {
+ ge += aa
+ }
+ if a >= b {
+ ge += ab
+ }
+ if a >= c {
+ ge += ac
+ }
+ if a >= d {
+ ge += ad
+ }
+
+ if b >= a {
+ ge += ba
+ }
+ if b >= b {
+ ge += bb
+ }
+ if b >= c {
+ ge += bc
+ }
+ if b >= d {
+ ge += bd
+ }
+
+ if c >= a {
+ ge += ca
+ }
+ if c >= b {
+ ge += cb
+ }
+ if c >= c {
+ ge += cc
+ }
+ if c >= d {
+ ge += cd
+ }
+
+ if d >= a {
+ ge += da
+ }
+ if d >= b {
+ ge += db
+ }
+ if d >= c {
+ ge += dc
+ }
+ if d >= d {
+ ge += dd
+ }
+
+ if a > a {
+ gt += aa
+ }
+ if a > b {
+ gt += ab
+ }
+ if a > c {
+ gt += ac
+ }
+ if a > d {
+ gt += ad
+ }
+
+ if b > a {
+ gt += ba
+ }
+ if b > b {
+ gt += bb
+ }
+ if b > c {
+ gt += bc
+ }
+ if b > d {
+ gt += bd
+ }
+
+ if c > a {
+ gt += ca
+ }
+ if c > b {
+ gt += cb
+ }
+ if c > c {
+ gt += cc
+ }
+ if c > d {
+ gt += cd
+ }
+
+ if d > a {
+ gt += da
+ }
+ if d > b {
+ gt += db
+ }
+ if d > c {
+ gt += dc
+ }
+ if d > d {
+ gt += dd
+ }
+
+ return
+}
+
+//go:noinline
+func compares32_ssa(a, b, c, d float32) (lt, le, eq, ne, ge, gt uint64) {
+ if a < a {
+ lt += aa
+ }
+ if a < b {
+ lt += ab
+ }
+ if a < c {
+ lt += ac
+ }
+ if a < d {
+ lt += ad
+ }
+
+ if b < a {
+ lt += ba
+ }
+ if b < b {
+ lt += bb
+ }
+ if b < c {
+ lt += bc
+ }
+ if b < d {
+ lt += bd
+ }
+
+ if c < a {
+ lt += ca
+ }
+ if c < b {
+ lt += cb
+ }
+ if c < c {
+ lt += cc
+ }
+ if c < d {
+ lt += cd
+ }
+
+ if d < a {
+ lt += da
+ }
+ if d < b {
+ lt += db
+ }
+ if d < c {
+ lt += dc
+ }
+ if d < d {
+ lt += dd
+ }
+
+ if a <= a {
+ le += aa
+ }
+ if a <= b {
+ le += ab
+ }
+ if a <= c {
+ le += ac
+ }
+ if a <= d {
+ le += ad
+ }
+
+ if b <= a {
+ le += ba
+ }
+ if b <= b {
+ le += bb
+ }
+ if b <= c {
+ le += bc
+ }
+ if b <= d {
+ le += bd
+ }
+
+ if c <= a {
+ le += ca
+ }
+ if c <= b {
+ le += cb
+ }
+ if c <= c {
+ le += cc
+ }
+ if c <= d {
+ le += cd
+ }
+
+ if d <= a {
+ le += da
+ }
+ if d <= b {
+ le += db
+ }
+ if d <= c {
+ le += dc
+ }
+ if d <= d {
+ le += dd
+ }
+
+ if a == a {
+ eq += aa
+ }
+ if a == b {
+ eq += ab
+ }
+ if a == c {
+ eq += ac
+ }
+ if a == d {
+ eq += ad
+ }
+
+ if b == a {
+ eq += ba
+ }
+ if b == b {
+ eq += bb
+ }
+ if b == c {
+ eq += bc
+ }
+ if b == d {
+ eq += bd
+ }
+
+ if c == a {
+ eq += ca
+ }
+ if c == b {
+ eq += cb
+ }
+ if c == c {
+ eq += cc
+ }
+ if c == d {
+ eq += cd
+ }
+
+ if d == a {
+ eq += da
+ }
+ if d == b {
+ eq += db
+ }
+ if d == c {
+ eq += dc
+ }
+ if d == d {
+ eq += dd
+ }
+
+ if a != a {
+ ne += aa
+ }
+ if a != b {
+ ne += ab
+ }
+ if a != c {
+ ne += ac
+ }
+ if a != d {
+ ne += ad
+ }
+
+ if b != a {
+ ne += ba
+ }
+ if b != b {
+ ne += bb
+ }
+ if b != c {
+ ne += bc
+ }
+ if b != d {
+ ne += bd
+ }
+
+ if c != a {
+ ne += ca
+ }
+ if c != b {
+ ne += cb
+ }
+ if c != c {
+ ne += cc
+ }
+ if c != d {
+ ne += cd
+ }
+
+ if d != a {
+ ne += da
+ }
+ if d != b {
+ ne += db
+ }
+ if d != c {
+ ne += dc
+ }
+ if d != d {
+ ne += dd
+ }
+
+ if a >= a {
+ ge += aa
+ }
+ if a >= b {
+ ge += ab
+ }
+ if a >= c {
+ ge += ac
+ }
+ if a >= d {
+ ge += ad
+ }
+
+ if b >= a {
+ ge += ba
+ }
+ if b >= b {
+ ge += bb
+ }
+ if b >= c {
+ ge += bc
+ }
+ if b >= d {
+ ge += bd
+ }
+
+ if c >= a {
+ ge += ca
+ }
+ if c >= b {
+ ge += cb
+ }
+ if c >= c {
+ ge += cc
+ }
+ if c >= d {
+ ge += cd
+ }
+
+ if d >= a {
+ ge += da
+ }
+ if d >= b {
+ ge += db
+ }
+ if d >= c {
+ ge += dc
+ }
+ if d >= d {
+ ge += dd
+ }
+
+ if a > a {
+ gt += aa
+ }
+ if a > b {
+ gt += ab
+ }
+ if a > c {
+ gt += ac
+ }
+ if a > d {
+ gt += ad
+ }
+
+ if b > a {
+ gt += ba
+ }
+ if b > b {
+ gt += bb
+ }
+ if b > c {
+ gt += bc
+ }
+ if b > d {
+ gt += bd
+ }
+
+ if c > a {
+ gt += ca
+ }
+ if c > b {
+ gt += cb
+ }
+ if c > c {
+ gt += cc
+ }
+ if c > d {
+ gt += cd
+ }
+
+ if d > a {
+ gt += da
+ }
+ if d > b {
+ gt += db
+ }
+ if d > c {
+ gt += dc
+ }
+ if d > d {
+ gt += dd
+ }
+
+ return
+}
+
+//go:noinline
+func le64_ssa(x, y float64) bool {
+ return x <= y
+}
+
+//go:noinline
+func ge64_ssa(x, y float64) bool {
+ return x >= y
+}
+
+//go:noinline
+func lt64_ssa(x, y float64) bool {
+ return x < y
+}
+
+//go:noinline
+func gt64_ssa(x, y float64) bool {
+ return x > y
+}
+
+//go:noinline
+func eq64_ssa(x, y float64) bool {
+ return x == y
+}
+
+//go:noinline
+func ne64_ssa(x, y float64) bool {
+ return x != y
+}
+
+//go:noinline
+func eqbr64_ssa(x, y float64) float64 {
+ if x == y {
+ return 17
+ }
+ return 42
+}
+
+//go:noinline
+func nebr64_ssa(x, y float64) float64 {
+ if x != y {
+ return 17
+ }
+ return 42
+}
+
+//go:noinline
+func gebr64_ssa(x, y float64) float64 {
+ if x >= y {
+ return 17
+ }
+ return 42
+}
+
+//go:noinline
+func lebr64_ssa(x, y float64) float64 {
+ if x <= y {
+ return 17
+ }
+ return 42
+}
+
+//go:noinline
+func ltbr64_ssa(x, y float64) float64 {
+ if x < y {
+ return 17
+ }
+ return 42
+}
+
+//go:noinline
+func gtbr64_ssa(x, y float64) float64 {
+ if x > y {
+ return 17
+ }
+ return 42
+}
+
+//go:noinline
+func le32_ssa(x, y float32) bool {
+ return x <= y
+}
+
+//go:noinline
+func ge32_ssa(x, y float32) bool {
+ return x >= y
+}
+
+//go:noinline
+func lt32_ssa(x, y float32) bool {
+ return x < y
+}
+
+//go:noinline
+func gt32_ssa(x, y float32) bool {
+ return x > y
+}
+
+//go:noinline
+func eq32_ssa(x, y float32) bool {
+ return x == y
+}
+
+//go:noinline
+func ne32_ssa(x, y float32) bool {
+ return x != y
+}
+
+//go:noinline
+func eqbr32_ssa(x, y float32) float32 {
+ if x == y {
+ return 17
+ }
+ return 42
+}
+
+//go:noinline
+func nebr32_ssa(x, y float32) float32 {
+ if x != y {
+ return 17
+ }
+ return 42
+}
+
+//go:noinline
+func gebr32_ssa(x, y float32) float32 {
+ if x >= y {
+ return 17
+ }
+ return 42
+}
+
+//go:noinline
+func lebr32_ssa(x, y float32) float32 {
+ if x <= y {
+ return 17
+ }
+ return 42
+}
+
+//go:noinline
+func ltbr32_ssa(x, y float32) float32 {
+ if x < y {
+ return 17
+ }
+ return 42
+}
+
+//go:noinline
+func gtbr32_ssa(x, y float32) float32 {
+ if x > y {
+ return 17
+ }
+ return 42
+}
+
+//go:noinline
+func F32toU8_ssa(x float32) uint8 {
+ return uint8(x)
+}
+
+//go:noinline
+func F32toI8_ssa(x float32) int8 {
+ return int8(x)
+}
+
+//go:noinline
+func F32toU16_ssa(x float32) uint16 {
+ return uint16(x)
+}
+
+//go:noinline
+func F32toI16_ssa(x float32) int16 {
+ return int16(x)
+}
+
+//go:noinline
+func F32toU32_ssa(x float32) uint32 {
+ return uint32(x)
+}
+
+//go:noinline
+func F32toI32_ssa(x float32) int32 {
+ return int32(x)
+}
+
+//go:noinline
+func F32toU64_ssa(x float32) uint64 {
+ return uint64(x)
+}
+
+//go:noinline
+func F32toI64_ssa(x float32) int64 {
+ return int64(x)
+}
+
+//go:noinline
+func F64toU8_ssa(x float64) uint8 {
+ return uint8(x)
+}
+
+//go:noinline
+func F64toI8_ssa(x float64) int8 {
+ return int8(x)
+}
+
+//go:noinline
+func F64toU16_ssa(x float64) uint16 {
+ return uint16(x)
+}
+
+//go:noinline
+func F64toI16_ssa(x float64) int16 {
+ return int16(x)
+}
+
+//go:noinline
+func F64toU32_ssa(x float64) uint32 {
+ return uint32(x)
+}
+
+//go:noinline
+func F64toI32_ssa(x float64) int32 {
+ return int32(x)
+}
+
+//go:noinline
+func F64toU64_ssa(x float64) uint64 {
+ return uint64(x)
+}
+
+//go:noinline
+func F64toI64_ssa(x float64) int64 {
+ return int64(x)
+}
+
+func floatsToInts(t *testing.T, x float64, expected int64) {
+ y := float32(x)
+ expectInt64(t, "F64toI8", int64(F64toI8_ssa(x)), expected)
+ expectInt64(t, "F64toI16", int64(F64toI16_ssa(x)), expected)
+ expectInt64(t, "F64toI32", int64(F64toI32_ssa(x)), expected)
+ expectInt64(t, "F64toI64", int64(F64toI64_ssa(x)), expected)
+ expectInt64(t, "F32toI8", int64(F32toI8_ssa(y)), expected)
+ expectInt64(t, "F32toI16", int64(F32toI16_ssa(y)), expected)
+ expectInt64(t, "F32toI32", int64(F32toI32_ssa(y)), expected)
+ expectInt64(t, "F32toI64", int64(F32toI64_ssa(y)), expected)
+}
+
+func floatsToUints(t *testing.T, x float64, expected uint64) {
+ y := float32(x)
+ expectUint64(t, "F64toU8", uint64(F64toU8_ssa(x)), expected)
+ expectUint64(t, "F64toU16", uint64(F64toU16_ssa(x)), expected)
+ expectUint64(t, "F64toU32", uint64(F64toU32_ssa(x)), expected)
+ expectUint64(t, "F64toU64", uint64(F64toU64_ssa(x)), expected)
+ expectUint64(t, "F32toU8", uint64(F32toU8_ssa(y)), expected)
+ expectUint64(t, "F32toU16", uint64(F32toU16_ssa(y)), expected)
+ expectUint64(t, "F32toU32", uint64(F32toU32_ssa(y)), expected)
+ expectUint64(t, "F32toU64", uint64(F32toU64_ssa(y)), expected)
+}
+
+func floatingToIntegerConversionsTest(t *testing.T) {
+ floatsToInts(t, 0.0, 0)
+ floatsToInts(t, 0.5, 0)
+ floatsToInts(t, 0.9, 0)
+ floatsToInts(t, 1.0, 1)
+ floatsToInts(t, 1.5, 1)
+ floatsToInts(t, 127.0, 127)
+ floatsToInts(t, -1.0, -1)
+ floatsToInts(t, -128.0, -128)
+
+ floatsToUints(t, 0.0, 0)
+ floatsToUints(t, 1.0, 1)
+ floatsToUints(t, 255.0, 255)
+
+ for j := uint(0); j < 24; j++ {
+ // Avoid hard cases in the construction
+ // of the test inputs.
+ v := int64(1<<62) | int64(1<<(62-j))
+ w := uint64(v)
+ f := float32(v)
+ d := float64(v)
+ expectUint64(t, "2**62...", F32toU64_ssa(f), w)
+ expectUint64(t, "2**62...", F64toU64_ssa(d), w)
+ expectInt64(t, "2**62...", F32toI64_ssa(f), v)
+ expectInt64(t, "2**62...", F64toI64_ssa(d), v)
+ expectInt64(t, "2**62...", F32toI64_ssa(-f), -v)
+ expectInt64(t, "2**62...", F64toI64_ssa(-d), -v)
+ w += w
+ f += f
+ d += d
+ expectUint64(t, "2**63...", F32toU64_ssa(f), w)
+ expectUint64(t, "2**63...", F64toU64_ssa(d), w)
+ }
+
+ for j := uint(0); j < 16; j++ {
+ // Avoid hard cases in the construction
+ // of the test inputs.
+ v := int32(1<<30) | int32(1<<(30-j))
+ w := uint32(v)
+ f := float32(v)
+ d := float64(v)
+ expectUint32(t, "2**30...", F32toU32_ssa(f), w)
+ expectUint32(t, "2**30...", F64toU32_ssa(d), w)
+ expectInt32(t, "2**30...", F32toI32_ssa(f), v)
+ expectInt32(t, "2**30...", F64toI32_ssa(d), v)
+ expectInt32(t, "2**30...", F32toI32_ssa(-f), -v)
+ expectInt32(t, "2**30...", F64toI32_ssa(-d), -v)
+ w += w
+ f += f
+ d += d
+ expectUint32(t, "2**31...", F32toU32_ssa(f), w)
+ expectUint32(t, "2**31...", F64toU32_ssa(d), w)
+ }
+
+ for j := uint(0); j < 15; j++ {
+ // Avoid hard cases in the construction
+ // of the test inputs.
+ v := int16(1<<14) | int16(1<<(14-j))
+ w := uint16(v)
+ f := float32(v)
+ d := float64(v)
+ expectUint16(t, "2**14...", F32toU16_ssa(f), w)
+ expectUint16(t, "2**14...", F64toU16_ssa(d), w)
+ expectInt16(t, "2**14...", F32toI16_ssa(f), v)
+ expectInt16(t, "2**14...", F64toI16_ssa(d), v)
+ expectInt16(t, "2**14...", F32toI16_ssa(-f), -v)
+ expectInt16(t, "2**14...", F64toI16_ssa(-d), -v)
+ w += w
+ f += f
+ d += d
+ expectUint16(t, "2**15...", F32toU16_ssa(f), w)
+ expectUint16(t, "2**15...", F64toU16_ssa(d), w)
+ }
+
+ expectInt32(t, "-2147483648", F32toI32_ssa(-2147483648), -2147483648)
+
+ expectInt32(t, "-2147483648", F64toI32_ssa(-2147483648), -2147483648)
+ expectInt32(t, "-2147483647", F64toI32_ssa(-2147483647), -2147483647)
+ expectUint32(t, "4294967295", F64toU32_ssa(4294967295), 4294967295)
+
+ expectInt16(t, "-32768", F64toI16_ssa(-32768), -32768)
+ expectInt16(t, "-32768", F32toI16_ssa(-32768), -32768)
+
+ // NB more of a pain to do these for 32-bit because of lost bits in Float32 mantissa
+ expectInt16(t, "32767", F64toI16_ssa(32767), 32767)
+ expectInt16(t, "32767", F32toI16_ssa(32767), 32767)
+ expectUint16(t, "32767", F64toU16_ssa(32767), 32767)
+ expectUint16(t, "32767", F32toU16_ssa(32767), 32767)
+ expectUint16(t, "65535", F64toU16_ssa(65535), 65535)
+ expectUint16(t, "65535", F32toU16_ssa(65535), 65535)
+}
+
+func fail64(s string, f func(a, b float64) float64, a, b, e float64) {
+ d := f(a, b)
+ if d != e {
+ fmt.Printf("For (float64) %v %v %v, expected %v, got %v\n", a, s, b, e, d)
+ }
+}
+
+func fail64bool(s string, f func(a, b float64) bool, a, b float64, e bool) {
+ d := f(a, b)
+ if d != e {
+ fmt.Printf("For (float64) %v %v %v, expected %v, got %v\n", a, s, b, e, d)
+ }
+}
+
+func fail32(s string, f func(a, b float32) float32, a, b, e float32) {
+ d := f(a, b)
+ if d != e {
+ fmt.Printf("For (float32) %v %v %v, expected %v, got %v\n", a, s, b, e, d)
+ }
+}
+
+func fail32bool(s string, f func(a, b float32) bool, a, b float32, e bool) {
+ d := f(a, b)
+ if d != e {
+ fmt.Printf("For (float32) %v %v %v, expected %v, got %v\n", a, s, b, e, d)
+ }
+}
+
+func expect64(t *testing.T, s string, x, expected float64) {
+ if x != expected {
+ println("F64 Expected", expected, "for", s, ", got", x)
+ }
+}
+
+func expect32(t *testing.T, s string, x, expected float32) {
+ if x != expected {
+ println("F32 Expected", expected, "for", s, ", got", x)
+ }
+}
+
+func expectUint64(t *testing.T, s string, x, expected uint64) {
+ if x != expected {
+ fmt.Printf("U64 Expected 0x%016x for %s, got 0x%016x\n", expected, s, x)
+ }
+}
+
+func expectInt64(t *testing.T, s string, x, expected int64) {
+ if x != expected {
+ fmt.Printf("%s: Expected 0x%016x, got 0x%016x\n", s, expected, x)
+ }
+}
+
+func expectUint32(t *testing.T, s string, x, expected uint32) {
+ if x != expected {
+ fmt.Printf("U32 %s: Expected 0x%08x, got 0x%08x\n", s, expected, x)
+ }
+}
+
+func expectInt32(t *testing.T, s string, x, expected int32) {
+ if x != expected {
+ fmt.Printf("I32 %s: Expected 0x%08x, got 0x%08x\n", s, expected, x)
+ }
+}
+
+func expectUint16(t *testing.T, s string, x, expected uint16) {
+ if x != expected {
+ fmt.Printf("U16 %s: Expected 0x%04x, got 0x%04x\n", s, expected, x)
+ }
+}
+
+func expectInt16(t *testing.T, s string, x, expected int16) {
+ if x != expected {
+ fmt.Printf("I16 %s: Expected 0x%04x, got 0x%04x\n", s, expected, x)
+ }
+}
+
+func expectAll64(t *testing.T, s string, expected, a, b, c, d, e, f, g, h, i float64) {
+ expect64(t, s+":a", a, expected)
+ expect64(t, s+":b", b, expected)
+ expect64(t, s+":c", c, expected)
+ expect64(t, s+":d", d, expected)
+ expect64(t, s+":e", e, expected)
+ expect64(t, s+":f", f, expected)
+ expect64(t, s+":g", g, expected)
+}
+
+func expectAll32(t *testing.T, s string, expected, a, b, c, d, e, f, g, h, i float32) {
+ expect32(t, s+":a", a, expected)
+ expect32(t, s+":b", b, expected)
+ expect32(t, s+":c", c, expected)
+ expect32(t, s+":d", d, expected)
+ expect32(t, s+":e", e, expected)
+ expect32(t, s+":f", f, expected)
+ expect32(t, s+":g", g, expected)
+}
+
+var ev64 [2]float64 = [2]float64{42.0, 17.0}
+var ev32 [2]float32 = [2]float32{42.0, 17.0}
+
+func cmpOpTest(t *testing.T,
+ s string,
+ f func(a, b float64) bool,
+ g func(a, b float64) float64,
+ ff func(a, b float32) bool,
+ gg func(a, b float32) float32,
+ zero, one, inf, nan float64, result uint) {
+ fail64bool(s, f, zero, zero, result>>16&1 == 1)
+ fail64bool(s, f, zero, one, result>>12&1 == 1)
+ fail64bool(s, f, zero, inf, result>>8&1 == 1)
+ fail64bool(s, f, zero, nan, result>>4&1 == 1)
+ fail64bool(s, f, nan, nan, result&1 == 1)
+
+ fail64(s, g, zero, zero, ev64[result>>16&1])
+ fail64(s, g, zero, one, ev64[result>>12&1])
+ fail64(s, g, zero, inf, ev64[result>>8&1])
+ fail64(s, g, zero, nan, ev64[result>>4&1])
+ fail64(s, g, nan, nan, ev64[result>>0&1])
+
+ {
+ zero := float32(zero)
+ one := float32(one)
+ inf := float32(inf)
+ nan := float32(nan)
+ fail32bool(s, ff, zero, zero, (result>>16)&1 == 1)
+ fail32bool(s, ff, zero, one, (result>>12)&1 == 1)
+ fail32bool(s, ff, zero, inf, (result>>8)&1 == 1)
+ fail32bool(s, ff, zero, nan, (result>>4)&1 == 1)
+ fail32bool(s, ff, nan, nan, result&1 == 1)
+
+ fail32(s, gg, zero, zero, ev32[(result>>16)&1])
+ fail32(s, gg, zero, one, ev32[(result>>12)&1])
+ fail32(s, gg, zero, inf, ev32[(result>>8)&1])
+ fail32(s, gg, zero, nan, ev32[(result>>4)&1])
+ fail32(s, gg, nan, nan, ev32[(result>>0)&1])
+ }
+}
+
+func expectCx128(t *testing.T, s string, x, expected complex128) {
+ if x != expected {
+ t.Errorf("Cx 128 Expected %f for %s, got %f", expected, s, x)
+ }
+}
+
+func expectCx64(t *testing.T, s string, x, expected complex64) {
+ if x != expected {
+ t.Errorf("Cx 64 Expected %f for %s, got %f", expected, s, x)
+ }
+}
+
+//go:noinline
+func cx128sum_ssa(a, b complex128) complex128 {
+ return a + b
+}
+
+//go:noinline
+func cx128diff_ssa(a, b complex128) complex128 {
+ return a - b
+}
+
+//go:noinline
+func cx128prod_ssa(a, b complex128) complex128 {
+ return a * b
+}
+
+//go:noinline
+func cx128quot_ssa(a, b complex128) complex128 {
+ return a / b
+}
+
+//go:noinline
+func cx128neg_ssa(a complex128) complex128 {
+ return -a
+}
+
+//go:noinline
+func cx128real_ssa(a complex128) float64 {
+ return real(a)
+}
+
+//go:noinline
+func cx128imag_ssa(a complex128) float64 {
+ return imag(a)
+}
+
+//go:noinline
+func cx128cnst_ssa(a complex128) complex128 {
+ b := 2 + 3i
+ return a * b
+}
+
+//go:noinline
+func cx64sum_ssa(a, b complex64) complex64 {
+ return a + b
+}
+
+//go:noinline
+func cx64diff_ssa(a, b complex64) complex64 {
+ return a - b
+}
+
+//go:noinline
+func cx64prod_ssa(a, b complex64) complex64 {
+ return a * b
+}
+
+//go:noinline
+func cx64quot_ssa(a, b complex64) complex64 {
+ return a / b
+}
+
+//go:noinline
+func cx64neg_ssa(a complex64) complex64 {
+ return -a
+}
+
+//go:noinline
+func cx64real_ssa(a complex64) float32 {
+ return real(a)
+}
+
+//go:noinline
+func cx64imag_ssa(a complex64) float32 {
+ return imag(a)
+}
+
+//go:noinline
+func cx128eq_ssa(a, b complex128) bool {
+ return a == b
+}
+
+//go:noinline
+func cx128ne_ssa(a, b complex128) bool {
+ return a != b
+}
+
+//go:noinline
+func cx64eq_ssa(a, b complex64) bool {
+ return a == b
+}
+
+//go:noinline
+func cx64ne_ssa(a, b complex64) bool {
+ return a != b
+}
+
+func expectTrue(t *testing.T, s string, b bool) {
+ if !b {
+ t.Errorf("expected true for %s, got false", s)
+ }
+}
+func expectFalse(t *testing.T, s string, b bool) {
+ if b {
+ t.Errorf("expected false for %s, got true", s)
+ }
+}
+
+func complexTest128(t *testing.T) {
+ var a complex128 = 1 + 2i
+ var b complex128 = 3 + 6i
+ sum := cx128sum_ssa(b, a)
+ diff := cx128diff_ssa(b, a)
+ prod := cx128prod_ssa(b, a)
+ quot := cx128quot_ssa(b, a)
+ neg := cx128neg_ssa(a)
+ r := cx128real_ssa(a)
+ i := cx128imag_ssa(a)
+ cnst := cx128cnst_ssa(a)
+ c1 := cx128eq_ssa(a, a)
+ c2 := cx128eq_ssa(a, b)
+ c3 := cx128ne_ssa(a, a)
+ c4 := cx128ne_ssa(a, b)
+
+ expectCx128(t, "sum", sum, 4+8i)
+ expectCx128(t, "diff", diff, 2+4i)
+ expectCx128(t, "prod", prod, -9+12i)
+ expectCx128(t, "quot", quot, 3+0i)
+ expectCx128(t, "neg", neg, -1-2i)
+ expect64(t, "real", r, 1)
+ expect64(t, "imag", i, 2)
+ expectCx128(t, "cnst", cnst, -4+7i)
+ expectTrue(t, fmt.Sprintf("%v==%v", a, a), c1)
+ expectFalse(t, fmt.Sprintf("%v==%v", a, b), c2)
+ expectFalse(t, fmt.Sprintf("%v!=%v", a, a), c3)
+ expectTrue(t, fmt.Sprintf("%v!=%v", a, b), c4)
+}
+
+func complexTest64(t *testing.T) {
+ var a complex64 = 1 + 2i
+ var b complex64 = 3 + 6i
+ sum := cx64sum_ssa(b, a)
+ diff := cx64diff_ssa(b, a)
+ prod := cx64prod_ssa(b, a)
+ quot := cx64quot_ssa(b, a)
+ neg := cx64neg_ssa(a)
+ r := cx64real_ssa(a)
+ i := cx64imag_ssa(a)
+ c1 := cx64eq_ssa(a, a)
+ c2 := cx64eq_ssa(a, b)
+ c3 := cx64ne_ssa(a, a)
+ c4 := cx64ne_ssa(a, b)
+
+ expectCx64(t, "sum", sum, 4+8i)
+ expectCx64(t, "diff", diff, 2+4i)
+ expectCx64(t, "prod", prod, -9+12i)
+ expectCx64(t, "quot", quot, 3+0i)
+ expectCx64(t, "neg", neg, -1-2i)
+ expect32(t, "real", r, 1)
+ expect32(t, "imag", i, 2)
+ expectTrue(t, fmt.Sprintf("%v==%v", a, a), c1)
+ expectFalse(t, fmt.Sprintf("%v==%v", a, b), c2)
+ expectFalse(t, fmt.Sprintf("%v!=%v", a, a), c3)
+ expectTrue(t, fmt.Sprintf("%v!=%v", a, b), c4)
+}
+
+// TestFP tests that we get the right answer for floating point expressions.
+func TestFP(t *testing.T) {
+ a := 3.0
+ b := 4.0
+
+ c := float32(3.0)
+ d := float32(4.0)
+
+ tiny := float32(1.5e-45) // smallest f32 denorm = 2**(-149)
+ dtiny := float64(tiny) // well within range of f64
+
+ fail64("+", add64_ssa, a, b, 7.0)
+ fail64("*", mul64_ssa, a, b, 12.0)
+ fail64("-", sub64_ssa, a, b, -1.0)
+ fail64("/", div64_ssa, a, b, 0.75)
+ fail64("neg", neg64_ssa, a, b, -7)
+
+ fail32("+", add32_ssa, c, d, 7.0)
+ fail32("*", mul32_ssa, c, d, 12.0)
+ fail32("-", sub32_ssa, c, d, -1.0)
+ fail32("/", div32_ssa, c, d, 0.75)
+ fail32("neg", neg32_ssa, c, d, -7)
+
+ // denorm-squared should underflow to zero.
+ fail32("*", mul32_ssa, tiny, tiny, 0)
+
+ // but should not underflow in float and in fact is exactly representable.
+ fail64("*", mul64_ssa, dtiny, dtiny, 1.9636373861190906e-90)
+
+ // Intended to create register pressure which forces
+ // asymmetric op into different code paths.
+ aa, ab, ac, ad, ba, bb, bc, bd, ca, cb, cc, cd, da, db, dc, dd := manysub_ssa(1000.0, 100.0, 10.0, 1.0)
+
+ expect64(t, "aa", aa, 11.0)
+ expect64(t, "ab", ab, 900.0)
+ expect64(t, "ac", ac, 990.0)
+ expect64(t, "ad", ad, 999.0)
+
+ expect64(t, "ba", ba, -900.0)
+ expect64(t, "bb", bb, 22.0)
+ expect64(t, "bc", bc, 90.0)
+ expect64(t, "bd", bd, 99.0)
+
+ expect64(t, "ca", ca, -990.0)
+ expect64(t, "cb", cb, -90.0)
+ expect64(t, "cc", cc, 33.0)
+ expect64(t, "cd", cd, 9.0)
+
+ expect64(t, "da", da, -999.0)
+ expect64(t, "db", db, -99.0)
+ expect64(t, "dc", dc, -9.0)
+ expect64(t, "dd", dd, 44.0)
+
+ integer2floatConversions(t)
+
+ multiplyAdd(t)
+
+ var zero64 float64 = 0.0
+ var one64 float64 = 1.0
+ var inf64 float64 = 1.0 / zero64
+ var nan64 float64 = sub64_ssa(inf64, inf64)
+
+ cmpOpTest(t, "!=", ne64_ssa, nebr64_ssa, ne32_ssa, nebr32_ssa, zero64, one64, inf64, nan64, 0x01111)
+ cmpOpTest(t, "==", eq64_ssa, eqbr64_ssa, eq32_ssa, eqbr32_ssa, zero64, one64, inf64, nan64, 0x10000)
+ cmpOpTest(t, "<=", le64_ssa, lebr64_ssa, le32_ssa, lebr32_ssa, zero64, one64, inf64, nan64, 0x11100)
+ cmpOpTest(t, "<", lt64_ssa, ltbr64_ssa, lt32_ssa, ltbr32_ssa, zero64, one64, inf64, nan64, 0x01100)
+ cmpOpTest(t, ">", gt64_ssa, gtbr64_ssa, gt32_ssa, gtbr32_ssa, zero64, one64, inf64, nan64, 0x00000)
+ cmpOpTest(t, ">=", ge64_ssa, gebr64_ssa, ge32_ssa, gebr32_ssa, zero64, one64, inf64, nan64, 0x10000)
+
+ {
+ lt, le, eq, ne, ge, gt := compares64_ssa(0.0, 1.0, inf64, nan64)
+ expectUint64(t, "lt", lt, 0x0110001000000000)
+ expectUint64(t, "le", le, 0x1110011000100000)
+ expectUint64(t, "eq", eq, 0x1000010000100000)
+ expectUint64(t, "ne", ne, 0x0111101111011111)
+ expectUint64(t, "ge", ge, 0x1000110011100000)
+ expectUint64(t, "gt", gt, 0x0000100011000000)
+ // fmt.Printf("lt=0x%016x, le=0x%016x, eq=0x%016x, ne=0x%016x, ge=0x%016x, gt=0x%016x\n",
+ // lt, le, eq, ne, ge, gt)
+ }
+ {
+ lt, le, eq, ne, ge, gt := compares32_ssa(0.0, 1.0, float32(inf64), float32(nan64))
+ expectUint64(t, "lt", lt, 0x0110001000000000)
+ expectUint64(t, "le", le, 0x1110011000100000)
+ expectUint64(t, "eq", eq, 0x1000010000100000)
+ expectUint64(t, "ne", ne, 0x0111101111011111)
+ expectUint64(t, "ge", ge, 0x1000110011100000)
+ expectUint64(t, "gt", gt, 0x0000100011000000)
+ }
+
+ floatingToIntegerConversionsTest(t)
+ complexTest128(t)
+ complexTest64(t)
+}
diff --git a/src/cmd/compile/internal/test/testdata/gen/arithBoundaryGen.go b/src/cmd/compile/internal/test/testdata/gen/arithBoundaryGen.go
new file mode 100644
index 0000000..21ad27e
--- /dev/null
+++ b/src/cmd/compile/internal/test/testdata/gen/arithBoundaryGen.go
@@ -0,0 +1,209 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This program generates a test to verify that the standard arithmetic
+// operators properly handle some special cases. The test file should be
+// generated with a known working version of go.
+// launch with `go run arithBoundaryGen.go` a file called arithBoundary.go
+// will be written into the parent directory containing the tests
+
+package main
+
+import (
+ "bytes"
+ "fmt"
+ "go/format"
+ "io/ioutil"
+ "log"
+ "text/template"
+)
+
+// used for interpolation in a text template
+type tmplData struct {
+ Name, Stype, Symbol string
+}
+
+// used to work around an issue with the mod symbol being
+// interpreted as part of a format string
+func (s tmplData) SymFirst() string {
+ return string(s.Symbol[0])
+}
+
+// ucast casts an unsigned int to the size in s
+func ucast(i uint64, s sizedTestData) uint64 {
+ switch s.name {
+ case "uint32":
+ return uint64(uint32(i))
+ case "uint16":
+ return uint64(uint16(i))
+ case "uint8":
+ return uint64(uint8(i))
+ }
+ return i
+}
+
+// icast casts a signed int to the size in s
+func icast(i int64, s sizedTestData) int64 {
+ switch s.name {
+ case "int32":
+ return int64(int32(i))
+ case "int16":
+ return int64(int16(i))
+ case "int8":
+ return int64(int8(i))
+ }
+ return i
+}
+
+type sizedTestData struct {
+ name string
+ sn string
+ u []uint64
+ i []int64
+}
+
+// values to generate tests. these should include the smallest and largest values, along
+// with any other values that might cause issues. we generate n^2 tests for each size to
+// cover all cases.
+var szs = []sizedTestData{
+ sizedTestData{name: "uint64", sn: "64", u: []uint64{0, 1, 4294967296, 0xffffFFFFffffFFFF}},
+ sizedTestData{name: "int64", sn: "64", i: []int64{-0x8000000000000000, -0x7FFFFFFFFFFFFFFF,
+ -4294967296, -1, 0, 1, 4294967296, 0x7FFFFFFFFFFFFFFE, 0x7FFFFFFFFFFFFFFF}},
+
+ sizedTestData{name: "uint32", sn: "32", u: []uint64{0, 1, 4294967295}},
+ sizedTestData{name: "int32", sn: "32", i: []int64{-0x80000000, -0x7FFFFFFF, -1, 0,
+ 1, 0x7FFFFFFF}},
+
+ sizedTestData{name: "uint16", sn: "16", u: []uint64{0, 1, 65535}},
+ sizedTestData{name: "int16", sn: "16", i: []int64{-32768, -32767, -1, 0, 1, 32766, 32767}},
+
+ sizedTestData{name: "uint8", sn: "8", u: []uint64{0, 1, 255}},
+ sizedTestData{name: "int8", sn: "8", i: []int64{-128, -127, -1, 0, 1, 126, 127}},
+}
+
+type op struct {
+ name, symbol string
+}
+
+// ops that we will be generating tests for
+var ops = []op{op{"add", "+"}, op{"sub", "-"}, op{"div", "/"}, op{"mod", "%%"}, op{"mul", "*"}}
+
+func main() {
+ w := new(bytes.Buffer)
+ fmt.Fprintf(w, "// Code generated by gen/arithBoundaryGen.go. DO NOT EDIT.\n\n")
+ fmt.Fprintf(w, "package main;\n")
+ fmt.Fprintf(w, "import \"testing\"\n")
+
+ for _, sz := range []int{64, 32, 16, 8} {
+ fmt.Fprintf(w, "type utd%d struct {\n", sz)
+ fmt.Fprintf(w, " a,b uint%d\n", sz)
+ fmt.Fprintf(w, " add,sub,mul,div,mod uint%d\n", sz)
+ fmt.Fprintf(w, "}\n")
+
+ fmt.Fprintf(w, "type itd%d struct {\n", sz)
+ fmt.Fprintf(w, " a,b int%d\n", sz)
+ fmt.Fprintf(w, " add,sub,mul,div,mod int%d\n", sz)
+ fmt.Fprintf(w, "}\n")
+ }
+
+ // the function being tested
+ testFunc, err := template.New("testFunc").Parse(
+ `//go:noinline
+ func {{.Name}}_{{.Stype}}_ssa(a, b {{.Stype}}) {{.Stype}} {
+ return a {{.SymFirst}} b
+}
+`)
+ if err != nil {
+ panic(err)
+ }
+
+ // generate our functions to be tested
+ for _, s := range szs {
+ for _, o := range ops {
+ fd := tmplData{o.name, s.name, o.symbol}
+ err = testFunc.Execute(w, fd)
+ if err != nil {
+ panic(err)
+ }
+ }
+ }
+
+ // generate the test data
+ for _, s := range szs {
+ if len(s.u) > 0 {
+ fmt.Fprintf(w, "var %s_data []utd%s = []utd%s{", s.name, s.sn, s.sn)
+ for _, i := range s.u {
+ for _, j := range s.u {
+ fmt.Fprintf(w, "utd%s{a: %d, b: %d, add: %d, sub: %d, mul: %d", s.sn, i, j, ucast(i+j, s), ucast(i-j, s), ucast(i*j, s))
+ if j != 0 {
+ fmt.Fprintf(w, ", div: %d, mod: %d", ucast(i/j, s), ucast(i%j, s))
+ }
+ fmt.Fprint(w, "},\n")
+ }
+ }
+ fmt.Fprintf(w, "}\n")
+ } else {
+ // TODO: clean up this duplication
+ fmt.Fprintf(w, "var %s_data []itd%s = []itd%s{", s.name, s.sn, s.sn)
+ for _, i := range s.i {
+ for _, j := range s.i {
+ fmt.Fprintf(w, "itd%s{a: %d, b: %d, add: %d, sub: %d, mul: %d", s.sn, i, j, icast(i+j, s), icast(i-j, s), icast(i*j, s))
+ if j != 0 {
+ fmt.Fprintf(w, ", div: %d, mod: %d", icast(i/j, s), icast(i%j, s))
+ }
+ fmt.Fprint(w, "},\n")
+ }
+ }
+ fmt.Fprintf(w, "}\n")
+ }
+ }
+
+ fmt.Fprintf(w, "//TestArithmeticBoundary tests boundary results for arithmetic operations.\n")
+ fmt.Fprintf(w, "func TestArithmeticBoundary(t *testing.T) {\n\n")
+
+ verify, err := template.New("tst").Parse(
+ `if got := {{.Name}}_{{.Stype}}_ssa(v.a, v.b); got != v.{{.Name}} {
+ t.Errorf("{{.Name}}_{{.Stype}} %d{{.Symbol}}%d = %d, wanted %d\n",v.a,v.b,got,v.{{.Name}})
+}
+`)
+
+ for _, s := range szs {
+ fmt.Fprintf(w, "for _, v := range %s_data {\n", s.name)
+
+ for _, o := range ops {
+ // avoid generating tests that divide by zero
+ if o.name == "div" || o.name == "mod" {
+ fmt.Fprint(w, "if v.b != 0 {")
+ }
+
+ err = verify.Execute(w, tmplData{o.name, s.name, o.symbol})
+
+ if o.name == "div" || o.name == "mod" {
+ fmt.Fprint(w, "\n}\n")
+ }
+
+ if err != nil {
+ panic(err)
+ }
+
+ }
+ fmt.Fprint(w, " }\n")
+ }
+
+ fmt.Fprintf(w, "}\n")
+
+ // gofmt result
+ b := w.Bytes()
+ src, err := format.Source(b)
+ if err != nil {
+ fmt.Printf("%s\n", b)
+ panic(err)
+ }
+
+ // write to file
+ err = ioutil.WriteFile("../arithBoundary_test.go", src, 0666)
+ if err != nil {
+ log.Fatalf("can't write output: %v\n", err)
+ }
+}
diff --git a/src/cmd/compile/internal/test/testdata/gen/arithConstGen.go b/src/cmd/compile/internal/test/testdata/gen/arithConstGen.go
new file mode 100644
index 0000000..41b2946
--- /dev/null
+++ b/src/cmd/compile/internal/test/testdata/gen/arithConstGen.go
@@ -0,0 +1,346 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This program generates a test to verify that the standard arithmetic
+// operators properly handle const cases. The test file should be
+// generated with a known working version of go.
+// launch with `go run arithConstGen.go` a file called arithConst.go
+// will be written into the parent directory containing the tests
+
+package main
+
+import (
+ "bytes"
+ "fmt"
+ "go/format"
+ "io/ioutil"
+ "log"
+ "strings"
+ "text/template"
+)
+
+type op struct {
+ name, symbol string
+}
+type szD struct {
+ name string
+ sn string
+ u []uint64
+ i []int64
+ oponly string
+}
+
+var szs = []szD{
+ {name: "uint64", sn: "64", u: []uint64{0, 1, 4294967296, 0x8000000000000000, 0xffffFFFFffffFFFF}},
+ {name: "uint64", sn: "64", u: []uint64{3, 5, 7, 9, 10, 11, 13, 19, 21, 25, 27, 37, 41, 45, 73, 81}, oponly: "mul"},
+
+ {name: "int64", sn: "64", i: []int64{-0x8000000000000000, -0x7FFFFFFFFFFFFFFF,
+ -4294967296, -1, 0, 1, 4294967296, 0x7FFFFFFFFFFFFFFE, 0x7FFFFFFFFFFFFFFF}},
+ {name: "int64", sn: "64", i: []int64{-9, -5, -3, 3, 5, 7, 9, 10, 11, 13, 19, 21, 25, 27, 37, 41, 45, 73, 81}, oponly: "mul"},
+
+ {name: "uint32", sn: "32", u: []uint64{0, 1, 4294967295}},
+ {name: "uint32", sn: "32", u: []uint64{3, 5, 7, 9, 10, 11, 13, 19, 21, 25, 27, 37, 41, 45, 73, 81}, oponly: "mul"},
+
+ {name: "int32", sn: "32", i: []int64{-0x80000000, -0x7FFFFFFF, -1, 0,
+ 1, 0x7FFFFFFF}},
+ {name: "int32", sn: "32", i: []int64{-9, -5, -3, 3, 5, 7, 9, 10, 11, 13, 19, 21, 25, 27, 37, 41, 45, 73, 81}, oponly: "mul"},
+
+ {name: "uint16", sn: "16", u: []uint64{0, 1, 65535}},
+ {name: "int16", sn: "16", i: []int64{-32768, -32767, -1, 0, 1, 32766, 32767}},
+
+ {name: "uint8", sn: "8", u: []uint64{0, 1, 255}},
+ {name: "int8", sn: "8", i: []int64{-128, -127, -1, 0, 1, 126, 127}},
+}
+
+var ops = []op{
+ {"add", "+"},
+ {"sub", "-"},
+ {"div", "/"},
+ {"mul", "*"},
+ {"lsh", "<<"},
+ {"rsh", ">>"},
+ {"mod", "%"},
+ {"and", "&"},
+ {"or", "|"},
+ {"xor", "^"},
+}
+
+// compute the result of i op j, cast as type t.
+func ansU(i, j uint64, t, op string) string {
+ var ans uint64
+ switch op {
+ case "+":
+ ans = i + j
+ case "-":
+ ans = i - j
+ case "*":
+ ans = i * j
+ case "/":
+ if j != 0 {
+ ans = i / j
+ }
+ case "%":
+ if j != 0 {
+ ans = i % j
+ }
+ case "<<":
+ ans = i << j
+ case ">>":
+ ans = i >> j
+ case "&":
+ ans = i & j
+ case "|":
+ ans = i | j
+ case "^":
+ ans = i ^ j
+ }
+ switch t {
+ case "uint32":
+ ans = uint64(uint32(ans))
+ case "uint16":
+ ans = uint64(uint16(ans))
+ case "uint8":
+ ans = uint64(uint8(ans))
+ }
+ return fmt.Sprintf("%d", ans)
+}
+
+// compute the result of i op j, cast as type t.
+func ansS(i, j int64, t, op string) string {
+ var ans int64
+ switch op {
+ case "+":
+ ans = i + j
+ case "-":
+ ans = i - j
+ case "*":
+ ans = i * j
+ case "/":
+ if j != 0 {
+ ans = i / j
+ }
+ case "%":
+ if j != 0 {
+ ans = i % j
+ }
+ case "<<":
+ ans = i << uint64(j)
+ case ">>":
+ ans = i >> uint64(j)
+ case "&":
+ ans = i & j
+ case "|":
+ ans = i | j
+ case "^":
+ ans = i ^ j
+ }
+ switch t {
+ case "int32":
+ ans = int64(int32(ans))
+ case "int16":
+ ans = int64(int16(ans))
+ case "int8":
+ ans = int64(int8(ans))
+ }
+ return fmt.Sprintf("%d", ans)
+}
+
+func main() {
+ w := new(bytes.Buffer)
+ fmt.Fprintf(w, "// Code generated by gen/arithConstGen.go. DO NOT EDIT.\n\n")
+ fmt.Fprintf(w, "package main;\n")
+ fmt.Fprintf(w, "import \"testing\"\n")
+
+ fncCnst1 := template.Must(template.New("fnc").Parse(
+ `//go:noinline
+func {{.Name}}_{{.Type_}}_{{.FNumber}}(a {{.Type_}}) {{.Type_}} { return a {{.Symbol}} {{.Number}} }
+`))
+ fncCnst2 := template.Must(template.New("fnc").Parse(
+ `//go:noinline
+func {{.Name}}_{{.FNumber}}_{{.Type_}}(a {{.Type_}}) {{.Type_}} { return {{.Number}} {{.Symbol}} a }
+`))
+
+ type fncData struct {
+ Name, Type_, Symbol, FNumber, Number string
+ }
+
+ for _, s := range szs {
+ for _, o := range ops {
+ if s.oponly != "" && s.oponly != o.name {
+ continue
+ }
+ fd := fncData{o.name, s.name, o.symbol, "", ""}
+
+ // unsigned test cases
+ if len(s.u) > 0 {
+ for _, i := range s.u {
+ fd.Number = fmt.Sprintf("%d", i)
+ fd.FNumber = strings.Replace(fd.Number, "-", "Neg", -1)
+
+ // avoid division by zero
+ if o.name != "mod" && o.name != "div" || i != 0 {
+ // introduce uint64 cast for rhs shift operands
+ // if they are too large for default uint type
+ number := fd.Number
+ if (o.name == "lsh" || o.name == "rsh") && uint64(uint32(i)) != i {
+ fd.Number = fmt.Sprintf("uint64(%s)", number)
+ }
+ fncCnst1.Execute(w, fd)
+ fd.Number = number
+ }
+
+ fncCnst2.Execute(w, fd)
+ }
+ }
+
+ // signed test cases
+ if len(s.i) > 0 {
+ // don't generate tests for shifts by signed integers
+ if o.name == "lsh" || o.name == "rsh" {
+ continue
+ }
+ for _, i := range s.i {
+ fd.Number = fmt.Sprintf("%d", i)
+ fd.FNumber = strings.Replace(fd.Number, "-", "Neg", -1)
+
+ // avoid division by zero
+ if o.name != "mod" && o.name != "div" || i != 0 {
+ fncCnst1.Execute(w, fd)
+ }
+ fncCnst2.Execute(w, fd)
+ }
+ }
+ }
+ }
+
+ vrf1 := template.Must(template.New("vrf1").Parse(`
+ test_{{.Size}}{fn: {{.Name}}_{{.FNumber}}_{{.Type_}}, fnname: "{{.Name}}_{{.FNumber}}_{{.Type_}}", in: {{.Input}}, want: {{.Ans}}},`))
+
+ vrf2 := template.Must(template.New("vrf2").Parse(`
+ test_{{.Size}}{fn: {{.Name}}_{{.Type_}}_{{.FNumber}}, fnname: "{{.Name}}_{{.Type_}}_{{.FNumber}}", in: {{.Input}}, want: {{.Ans}}},`))
+
+ type cfncData struct {
+ Size, Name, Type_, Symbol, FNumber, Number string
+ Ans, Input string
+ }
+ for _, s := range szs {
+ fmt.Fprintf(w, `
+type test_%[1]s%[2]s struct {
+ fn func (%[1]s) %[1]s
+ fnname string
+ in %[1]s
+ want %[1]s
+}
+`, s.name, s.oponly)
+ fmt.Fprintf(w, "var tests_%[1]s%[2]s =[]test_%[1]s {\n\n", s.name, s.oponly)
+
+ if len(s.u) > 0 {
+ for _, o := range ops {
+ if s.oponly != "" && s.oponly != o.name {
+ continue
+ }
+ fd := cfncData{s.name, o.name, s.name, o.symbol, "", "", "", ""}
+ for _, i := range s.u {
+ fd.Number = fmt.Sprintf("%d", i)
+ fd.FNumber = strings.Replace(fd.Number, "-", "Neg", -1)
+
+ // unsigned
+ for _, j := range s.u {
+
+ if o.name != "mod" && o.name != "div" || j != 0 {
+ fd.Ans = ansU(i, j, s.name, o.symbol)
+ fd.Input = fmt.Sprintf("%d", j)
+ if err := vrf1.Execute(w, fd); err != nil {
+ panic(err)
+ }
+ }
+
+ if o.name != "mod" && o.name != "div" || i != 0 {
+ fd.Ans = ansU(j, i, s.name, o.symbol)
+ fd.Input = fmt.Sprintf("%d", j)
+ if err := vrf2.Execute(w, fd); err != nil {
+ panic(err)
+ }
+ }
+
+ }
+ }
+
+ }
+ }
+
+ // signed
+ if len(s.i) > 0 {
+ for _, o := range ops {
+ if s.oponly != "" && s.oponly != o.name {
+ continue
+ }
+ // don't generate tests for shifts by signed integers
+ if o.name == "lsh" || o.name == "rsh" {
+ continue
+ }
+ fd := cfncData{s.name, o.name, s.name, o.symbol, "", "", "", ""}
+ for _, i := range s.i {
+ fd.Number = fmt.Sprintf("%d", i)
+ fd.FNumber = strings.Replace(fd.Number, "-", "Neg", -1)
+ for _, j := range s.i {
+ if o.name != "mod" && o.name != "div" || j != 0 {
+ fd.Ans = ansS(i, j, s.name, o.symbol)
+ fd.Input = fmt.Sprintf("%d", j)
+ if err := vrf1.Execute(w, fd); err != nil {
+ panic(err)
+ }
+ }
+
+ if o.name != "mod" && o.name != "div" || i != 0 {
+ fd.Ans = ansS(j, i, s.name, o.symbol)
+ fd.Input = fmt.Sprintf("%d", j)
+ if err := vrf2.Execute(w, fd); err != nil {
+ panic(err)
+ }
+ }
+
+ }
+ }
+
+ }
+ }
+
+ fmt.Fprintf(w, "}\n\n")
+ }
+
+ fmt.Fprint(w, `
+
+// TestArithmeticConst tests results for arithmetic operations against constants.
+func TestArithmeticConst(t *testing.T) {
+`)
+
+ for _, s := range szs {
+ fmt.Fprintf(w, `for _, test := range tests_%s%s {`, s.name, s.oponly)
+ // Use WriteString here to avoid a vet warning about formatting directives.
+ w.WriteString(`if got := test.fn(test.in); got != test.want {
+ t.Errorf("%s(%d) = %d, want %d\n", test.fnname, test.in, got, test.want)
+ }
+ }
+`)
+ }
+
+ fmt.Fprint(w, `
+}
+`)
+
+ // gofmt result
+ b := w.Bytes()
+ src, err := format.Source(b)
+ if err != nil {
+ fmt.Printf("%s\n", b)
+ panic(err)
+ }
+
+ // write to file
+ err = ioutil.WriteFile("../arithConst_test.go", src, 0666)
+ if err != nil {
+ log.Fatalf("can't write output: %v\n", err)
+ }
+}
diff --git a/src/cmd/compile/internal/test/testdata/gen/cmpConstGen.go b/src/cmd/compile/internal/test/testdata/gen/cmpConstGen.go
new file mode 100644
index 0000000..5508e76
--- /dev/null
+++ b/src/cmd/compile/internal/test/testdata/gen/cmpConstGen.go
@@ -0,0 +1,247 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This program generates a test to verify that the standard comparison
+// operators properly handle one const operand. The test file should be
+// generated with a known working version of go.
+// launch with `go run cmpConstGen.go` a file called cmpConst.go
+// will be written into the parent directory containing the tests
+
+package main
+
+import (
+ "bytes"
+ "fmt"
+ "go/format"
+ "io/ioutil"
+ "log"
+ "math/big"
+ "sort"
+)
+
+const (
+ maxU64 = (1 << 64) - 1
+ maxU32 = (1 << 32) - 1
+ maxU16 = (1 << 16) - 1
+ maxU8 = (1 << 8) - 1
+
+ maxI64 = (1 << 63) - 1
+ maxI32 = (1 << 31) - 1
+ maxI16 = (1 << 15) - 1
+ maxI8 = (1 << 7) - 1
+
+ minI64 = -(1 << 63)
+ minI32 = -(1 << 31)
+ minI16 = -(1 << 15)
+ minI8 = -(1 << 7)
+)
+
+func cmp(left *big.Int, op string, right *big.Int) bool {
+ switch left.Cmp(right) {
+ case -1: // less than
+ return op == "<" || op == "<=" || op == "!="
+ case 0: // equal
+ return op == "==" || op == "<=" || op == ">="
+ case 1: // greater than
+ return op == ">" || op == ">=" || op == "!="
+ }
+ panic("unexpected comparison value")
+}
+
+func inRange(typ string, val *big.Int) bool {
+ min, max := &big.Int{}, &big.Int{}
+ switch typ {
+ case "uint64":
+ max = max.SetUint64(maxU64)
+ case "uint32":
+ max = max.SetUint64(maxU32)
+ case "uint16":
+ max = max.SetUint64(maxU16)
+ case "uint8":
+ max = max.SetUint64(maxU8)
+ case "int64":
+ min = min.SetInt64(minI64)
+ max = max.SetInt64(maxI64)
+ case "int32":
+ min = min.SetInt64(minI32)
+ max = max.SetInt64(maxI32)
+ case "int16":
+ min = min.SetInt64(minI16)
+ max = max.SetInt64(maxI16)
+ case "int8":
+ min = min.SetInt64(minI8)
+ max = max.SetInt64(maxI8)
+ default:
+ panic("unexpected type")
+ }
+ return cmp(min, "<=", val) && cmp(val, "<=", max)
+}
+
+func getValues(typ string) []*big.Int {
+ Uint := func(v uint64) *big.Int { return big.NewInt(0).SetUint64(v) }
+ Int := func(v int64) *big.Int { return big.NewInt(0).SetInt64(v) }
+ values := []*big.Int{
+ // limits
+ Uint(maxU64),
+ Uint(maxU64 - 1),
+ Uint(maxI64 + 1),
+ Uint(maxI64),
+ Uint(maxI64 - 1),
+ Uint(maxU32 + 1),
+ Uint(maxU32),
+ Uint(maxU32 - 1),
+ Uint(maxI32 + 1),
+ Uint(maxI32),
+ Uint(maxI32 - 1),
+ Uint(maxU16 + 1),
+ Uint(maxU16),
+ Uint(maxU16 - 1),
+ Uint(maxI16 + 1),
+ Uint(maxI16),
+ Uint(maxI16 - 1),
+ Uint(maxU8 + 1),
+ Uint(maxU8),
+ Uint(maxU8 - 1),
+ Uint(maxI8 + 1),
+ Uint(maxI8),
+ Uint(maxI8 - 1),
+ Uint(0),
+ Int(minI8 + 1),
+ Int(minI8),
+ Int(minI8 - 1),
+ Int(minI16 + 1),
+ Int(minI16),
+ Int(minI16 - 1),
+ Int(minI32 + 1),
+ Int(minI32),
+ Int(minI32 - 1),
+ Int(minI64 + 1),
+ Int(minI64),
+
+ // other possibly interesting values
+ Uint(1),
+ Int(-1),
+ Uint(0xff << 56),
+ Uint(0xff << 32),
+ Uint(0xff << 24),
+ }
+ sort.Slice(values, func(i, j int) bool { return values[i].Cmp(values[j]) == -1 })
+ var ret []*big.Int
+ for _, val := range values {
+ if !inRange(typ, val) {
+ continue
+ }
+ ret = append(ret, val)
+ }
+ return ret
+}
+
+func sigString(v *big.Int) string {
+ var t big.Int
+ t.Abs(v)
+ if v.Sign() == -1 {
+ return "neg" + t.String()
+ }
+ return t.String()
+}
+
+func main() {
+ types := []string{
+ "uint64", "uint32", "uint16", "uint8",
+ "int64", "int32", "int16", "int8",
+ }
+
+ w := new(bytes.Buffer)
+ fmt.Fprintf(w, "// Code generated by gen/cmpConstGen.go. DO NOT EDIT.\n\n")
+ fmt.Fprintf(w, "package main;\n")
+ fmt.Fprintf(w, "import (\"testing\"; \"reflect\"; \"runtime\";)\n")
+ fmt.Fprintf(w, "// results show the expected result for the elements left of, equal to and right of the index.\n")
+ fmt.Fprintf(w, "type result struct{l, e, r bool}\n")
+ fmt.Fprintf(w, "var (\n")
+ fmt.Fprintf(w, " eq = result{l: false, e: true, r: false}\n")
+ fmt.Fprintf(w, " ne = result{l: true, e: false, r: true}\n")
+ fmt.Fprintf(w, " lt = result{l: true, e: false, r: false}\n")
+ fmt.Fprintf(w, " le = result{l: true, e: true, r: false}\n")
+ fmt.Fprintf(w, " gt = result{l: false, e: false, r: true}\n")
+ fmt.Fprintf(w, " ge = result{l: false, e: true, r: true}\n")
+ fmt.Fprintf(w, ")\n")
+
+ operators := []struct{ op, name string }{
+ {"<", "lt"},
+ {"<=", "le"},
+ {">", "gt"},
+ {">=", "ge"},
+ {"==", "eq"},
+ {"!=", "ne"},
+ }
+
+ for _, typ := range types {
+ // generate a slice containing valid values for this type
+ fmt.Fprintf(w, "\n// %v tests\n", typ)
+ values := getValues(typ)
+ fmt.Fprintf(w, "var %v_vals = []%v{\n", typ, typ)
+ for _, val := range values {
+ fmt.Fprintf(w, "%v,\n", val.String())
+ }
+ fmt.Fprintf(w, "}\n")
+
+ // generate test functions
+ for _, r := range values {
+ // TODO: could also test constant on lhs.
+ sig := sigString(r)
+ for _, op := range operators {
+ // no need for go:noinline because the function is called indirectly
+ fmt.Fprintf(w, "func %v_%v_%v(x %v) bool { return x %v %v; }\n", op.name, sig, typ, typ, op.op, r.String())
+ }
+ }
+
+ // generate a table of test cases
+ fmt.Fprintf(w, "var %v_tests = []struct{\n", typ)
+ fmt.Fprintf(w, " idx int // index of the constant used\n")
+ fmt.Fprintf(w, " exp result // expected results\n")
+ fmt.Fprintf(w, " fn func(%v) bool\n", typ)
+ fmt.Fprintf(w, "}{\n")
+ for i, r := range values {
+ sig := sigString(r)
+ for _, op := range operators {
+ fmt.Fprintf(w, "{idx: %v,", i)
+ fmt.Fprintf(w, "exp: %v,", op.name)
+ fmt.Fprintf(w, "fn: %v_%v_%v},\n", op.name, sig, typ)
+ }
+ }
+ fmt.Fprintf(w, "}\n")
+ }
+
+ // emit the main function, looping over all test cases
+ fmt.Fprintf(w, "// TestComparisonsConst tests results for comparison operations against constants.\n")
+ fmt.Fprintf(w, "func TestComparisonsConst(t *testing.T) {\n")
+ for _, typ := range types {
+ fmt.Fprintf(w, "for i, test := range %v_tests {\n", typ)
+ fmt.Fprintf(w, " for j, x := range %v_vals {\n", typ)
+ fmt.Fprintf(w, " want := test.exp.l\n")
+ fmt.Fprintf(w, " if j == test.idx {\nwant = test.exp.e\n}")
+ fmt.Fprintf(w, " else if j > test.idx {\nwant = test.exp.r\n}\n")
+ fmt.Fprintf(w, " if test.fn(x) != want {\n")
+ fmt.Fprintf(w, " fn := runtime.FuncForPC(reflect.ValueOf(test.fn).Pointer()).Name()\n")
+ fmt.Fprintf(w, " t.Errorf(\"test failed: %%v(%%v) != %%v [type=%v i=%%v j=%%v idx=%%v]\", fn, x, want, i, j, test.idx)\n", typ)
+ fmt.Fprintf(w, " }\n")
+ fmt.Fprintf(w, " }\n")
+ fmt.Fprintf(w, "}\n")
+ }
+ fmt.Fprintf(w, "}\n")
+
+ // gofmt result
+ b := w.Bytes()
+ src, err := format.Source(b)
+ if err != nil {
+ fmt.Printf("%s\n", b)
+ panic(err)
+ }
+
+ // write to file
+ err = ioutil.WriteFile("../cmpConst_test.go", src, 0666)
+ if err != nil {
+ log.Fatalf("can't write output: %v\n", err)
+ }
+}
diff --git a/src/cmd/compile/internal/test/testdata/gen/constFoldGen.go b/src/cmd/compile/internal/test/testdata/gen/constFoldGen.go
new file mode 100644
index 0000000..2b8a331
--- /dev/null
+++ b/src/cmd/compile/internal/test/testdata/gen/constFoldGen.go
@@ -0,0 +1,307 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This program generates a test to verify that the standard arithmetic
+// operators properly handle constant folding. The test file should be
+// generated with a known working version of go.
+// launch with `go run constFoldGen.go` a file called constFold_test.go
+// will be written into the grandparent directory containing the tests.
+
+package main
+
+import (
+ "bytes"
+ "fmt"
+ "go/format"
+ "io/ioutil"
+ "log"
+)
+
+type op struct {
+ name, symbol string
+}
+type szD struct {
+ name string
+ sn string
+ u []uint64
+ i []int64
+}
+
+var szs []szD = []szD{
+ szD{name: "uint64", sn: "64", u: []uint64{0, 1, 4294967296, 0xffffFFFFffffFFFF}},
+ szD{name: "int64", sn: "64", i: []int64{-0x8000000000000000, -0x7FFFFFFFFFFFFFFF,
+ -4294967296, -1, 0, 1, 4294967296, 0x7FFFFFFFFFFFFFFE, 0x7FFFFFFFFFFFFFFF}},
+
+ szD{name: "uint32", sn: "32", u: []uint64{0, 1, 4294967295}},
+ szD{name: "int32", sn: "32", i: []int64{-0x80000000, -0x7FFFFFFF, -1, 0,
+ 1, 0x7FFFFFFF}},
+
+ szD{name: "uint16", sn: "16", u: []uint64{0, 1, 65535}},
+ szD{name: "int16", sn: "16", i: []int64{-32768, -32767, -1, 0, 1, 32766, 32767}},
+
+ szD{name: "uint8", sn: "8", u: []uint64{0, 1, 255}},
+ szD{name: "int8", sn: "8", i: []int64{-128, -127, -1, 0, 1, 126, 127}},
+}
+
+var ops = []op{
+ op{"add", "+"}, op{"sub", "-"}, op{"div", "/"}, op{"mul", "*"},
+ op{"lsh", "<<"}, op{"rsh", ">>"}, op{"mod", "%"},
+}
+
+// compute the result of i op j, cast as type t.
+func ansU(i, j uint64, t, op string) string {
+ var ans uint64
+ switch op {
+ case "+":
+ ans = i + j
+ case "-":
+ ans = i - j
+ case "*":
+ ans = i * j
+ case "/":
+ if j != 0 {
+ ans = i / j
+ }
+ case "%":
+ if j != 0 {
+ ans = i % j
+ }
+ case "<<":
+ ans = i << j
+ case ">>":
+ ans = i >> j
+ }
+ switch t {
+ case "uint32":
+ ans = uint64(uint32(ans))
+ case "uint16":
+ ans = uint64(uint16(ans))
+ case "uint8":
+ ans = uint64(uint8(ans))
+ }
+ return fmt.Sprintf("%d", ans)
+}
+
+// compute the result of i op j, cast as type t.
+func ansS(i, j int64, t, op string) string {
+ var ans int64
+ switch op {
+ case "+":
+ ans = i + j
+ case "-":
+ ans = i - j
+ case "*":
+ ans = i * j
+ case "/":
+ if j != 0 {
+ ans = i / j
+ }
+ case "%":
+ if j != 0 {
+ ans = i % j
+ }
+ case "<<":
+ ans = i << uint64(j)
+ case ">>":
+ ans = i >> uint64(j)
+ }
+ switch t {
+ case "int32":
+ ans = int64(int32(ans))
+ case "int16":
+ ans = int64(int16(ans))
+ case "int8":
+ ans = int64(int8(ans))
+ }
+ return fmt.Sprintf("%d", ans)
+}
+
+func main() {
+ w := new(bytes.Buffer)
+ fmt.Fprintf(w, "// run\n")
+ fmt.Fprintf(w, "// Code generated by gen/constFoldGen.go. DO NOT EDIT.\n\n")
+ fmt.Fprintf(w, "package gc\n")
+ fmt.Fprintf(w, "import \"testing\"\n")
+
+ for _, s := range szs {
+ for _, o := range ops {
+ if o.symbol == "<<" || o.symbol == ">>" {
+ // shifts handled separately below, as they can have
+ // different types on the LHS and RHS.
+ continue
+ }
+ fmt.Fprintf(w, "func TestConstFold%s%s(t *testing.T) {\n", s.name, o.name)
+ fmt.Fprintf(w, "\tvar x, y, r %s\n", s.name)
+ // unsigned test cases
+ for _, c := range s.u {
+ fmt.Fprintf(w, "\tx = %d\n", c)
+ for _, d := range s.u {
+ if d == 0 && (o.symbol == "/" || o.symbol == "%") {
+ continue
+ }
+ fmt.Fprintf(w, "\ty = %d\n", d)
+ fmt.Fprintf(w, "\tr = x %s y\n", o.symbol)
+ want := ansU(c, d, s.name, o.symbol)
+ fmt.Fprintf(w, "\tif r != %s {\n", want)
+ fmt.Fprintf(w, "\t\tt.Errorf(\"%d %%s %d = %%d, want %s\", %q, r)\n", c, d, want, o.symbol)
+ fmt.Fprintf(w, "\t}\n")
+ }
+ }
+ // signed test cases
+ for _, c := range s.i {
+ fmt.Fprintf(w, "\tx = %d\n", c)
+ for _, d := range s.i {
+ if d == 0 && (o.symbol == "/" || o.symbol == "%") {
+ continue
+ }
+ fmt.Fprintf(w, "\ty = %d\n", d)
+ fmt.Fprintf(w, "\tr = x %s y\n", o.symbol)
+ want := ansS(c, d, s.name, o.symbol)
+ fmt.Fprintf(w, "\tif r != %s {\n", want)
+ fmt.Fprintf(w, "\t\tt.Errorf(\"%d %%s %d = %%d, want %s\", %q, r)\n", c, d, want, o.symbol)
+ fmt.Fprintf(w, "\t}\n")
+ }
+ }
+ fmt.Fprintf(w, "}\n")
+ }
+ }
+
+ // Special signed/unsigned cases for shifts
+ for _, ls := range szs {
+ for _, rs := range szs {
+ if rs.name[0] != 'u' {
+ continue
+ }
+ for _, o := range ops {
+ if o.symbol != "<<" && o.symbol != ">>" {
+ continue
+ }
+ fmt.Fprintf(w, "func TestConstFold%s%s%s(t *testing.T) {\n", ls.name, rs.name, o.name)
+ fmt.Fprintf(w, "\tvar x, r %s\n", ls.name)
+ fmt.Fprintf(w, "\tvar y %s\n", rs.name)
+ // unsigned LHS
+ for _, c := range ls.u {
+ fmt.Fprintf(w, "\tx = %d\n", c)
+ for _, d := range rs.u {
+ fmt.Fprintf(w, "\ty = %d\n", d)
+ fmt.Fprintf(w, "\tr = x %s y\n", o.symbol)
+ want := ansU(c, d, ls.name, o.symbol)
+ fmt.Fprintf(w, "\tif r != %s {\n", want)
+ fmt.Fprintf(w, "\t\tt.Errorf(\"%d %%s %d = %%d, want %s\", %q, r)\n", c, d, want, o.symbol)
+ fmt.Fprintf(w, "\t}\n")
+ }
+ }
+ // signed LHS
+ for _, c := range ls.i {
+ fmt.Fprintf(w, "\tx = %d\n", c)
+ for _, d := range rs.u {
+ fmt.Fprintf(w, "\ty = %d\n", d)
+ fmt.Fprintf(w, "\tr = x %s y\n", o.symbol)
+ want := ansS(c, int64(d), ls.name, o.symbol)
+ fmt.Fprintf(w, "\tif r != %s {\n", want)
+ fmt.Fprintf(w, "\t\tt.Errorf(\"%d %%s %d = %%d, want %s\", %q, r)\n", c, d, want, o.symbol)
+ fmt.Fprintf(w, "\t}\n")
+ }
+ }
+ fmt.Fprintf(w, "}\n")
+ }
+ }
+ }
+
+ // Constant folding for comparisons
+ for _, s := range szs {
+ fmt.Fprintf(w, "func TestConstFoldCompare%s(t *testing.T) {\n", s.name)
+ for _, x := range s.i {
+ for _, y := range s.i {
+ fmt.Fprintf(w, "\t{\n")
+ fmt.Fprintf(w, "\t\tvar x %s = %d\n", s.name, x)
+ fmt.Fprintf(w, "\t\tvar y %s = %d\n", s.name, y)
+ if x == y {
+ fmt.Fprintf(w, "\t\tif !(x == y) { t.Errorf(\"!(%%d == %%d)\", x, y) }\n")
+ } else {
+ fmt.Fprintf(w, "\t\tif x == y { t.Errorf(\"%%d == %%d\", x, y) }\n")
+ }
+ if x != y {
+ fmt.Fprintf(w, "\t\tif !(x != y) { t.Errorf(\"!(%%d != %%d)\", x, y) }\n")
+ } else {
+ fmt.Fprintf(w, "\t\tif x != y { t.Errorf(\"%%d != %%d\", x, y) }\n")
+ }
+ if x < y {
+ fmt.Fprintf(w, "\t\tif !(x < y) { t.Errorf(\"!(%%d < %%d)\", x, y) }\n")
+ } else {
+ fmt.Fprintf(w, "\t\tif x < y { t.Errorf(\"%%d < %%d\", x, y) }\n")
+ }
+ if x > y {
+ fmt.Fprintf(w, "\t\tif !(x > y) { t.Errorf(\"!(%%d > %%d)\", x, y) }\n")
+ } else {
+ fmt.Fprintf(w, "\t\tif x > y { t.Errorf(\"%%d > %%d\", x, y) }\n")
+ }
+ if x <= y {
+ fmt.Fprintf(w, "\t\tif !(x <= y) { t.Errorf(\"!(%%d <= %%d)\", x, y) }\n")
+ } else {
+ fmt.Fprintf(w, "\t\tif x <= y { t.Errorf(\"%%d <= %%d\", x, y) }\n")
+ }
+ if x >= y {
+ fmt.Fprintf(w, "\t\tif !(x >= y) { t.Errorf(\"!(%%d >= %%d)\", x, y) }\n")
+ } else {
+ fmt.Fprintf(w, "\t\tif x >= y { t.Errorf(\"%%d >= %%d\", x, y) }\n")
+ }
+ fmt.Fprintf(w, "\t}\n")
+ }
+ }
+ for _, x := range s.u {
+ for _, y := range s.u {
+ fmt.Fprintf(w, "\t{\n")
+ fmt.Fprintf(w, "\t\tvar x %s = %d\n", s.name, x)
+ fmt.Fprintf(w, "\t\tvar y %s = %d\n", s.name, y)
+ if x == y {
+ fmt.Fprintf(w, "\t\tif !(x == y) { t.Errorf(\"!(%%d == %%d)\", x, y) }\n")
+ } else {
+ fmt.Fprintf(w, "\t\tif x == y { t.Errorf(\"%%d == %%d\", x, y) }\n")
+ }
+ if x != y {
+ fmt.Fprintf(w, "\t\tif !(x != y) { t.Errorf(\"!(%%d != %%d)\", x, y) }\n")
+ } else {
+ fmt.Fprintf(w, "\t\tif x != y { t.Errorf(\"%%d != %%d\", x, y) }\n")
+ }
+ if x < y {
+ fmt.Fprintf(w, "\t\tif !(x < y) { t.Errorf(\"!(%%d < %%d)\", x, y) }\n")
+ } else {
+ fmt.Fprintf(w, "\t\tif x < y { t.Errorf(\"%%d < %%d\", x, y) }\n")
+ }
+ if x > y {
+ fmt.Fprintf(w, "\t\tif !(x > y) { t.Errorf(\"!(%%d > %%d)\", x, y) }\n")
+ } else {
+ fmt.Fprintf(w, "\t\tif x > y { t.Errorf(\"%%d > %%d\", x, y) }\n")
+ }
+ if x <= y {
+ fmt.Fprintf(w, "\t\tif !(x <= y) { t.Errorf(\"!(%%d <= %%d)\", x, y) }\n")
+ } else {
+ fmt.Fprintf(w, "\t\tif x <= y { t.Errorf(\"%%d <= %%d\", x, y) }\n")
+ }
+ if x >= y {
+ fmt.Fprintf(w, "\t\tif !(x >= y) { t.Errorf(\"!(%%d >= %%d)\", x, y) }\n")
+ } else {
+ fmt.Fprintf(w, "\t\tif x >= y { t.Errorf(\"%%d >= %%d\", x, y) }\n")
+ }
+ fmt.Fprintf(w, "\t}\n")
+ }
+ }
+ fmt.Fprintf(w, "}\n")
+ }
+
+ // gofmt result
+ b := w.Bytes()
+ src, err := format.Source(b)
+ if err != nil {
+ fmt.Printf("%s\n", b)
+ panic(err)
+ }
+
+ // write to file
+ err = ioutil.WriteFile("../../constFold_test.go", src, 0666)
+ if err != nil {
+ log.Fatalf("can't write output: %v\n", err)
+ }
+}
diff --git a/src/cmd/compile/internal/test/testdata/gen/copyGen.go b/src/cmd/compile/internal/test/testdata/gen/copyGen.go
new file mode 100644
index 0000000..4567f2f
--- /dev/null
+++ b/src/cmd/compile/internal/test/testdata/gen/copyGen.go
@@ -0,0 +1,121 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "bytes"
+ "fmt"
+ "go/format"
+ "io/ioutil"
+ "log"
+)
+
+// This program generates tests to verify that copying operations
+// copy the data they are supposed to and clobber no adjacent values.
+
+// run as `go run copyGen.go`. A file called copy.go
+// will be written into the parent directory containing the tests.
+
+var sizes = [...]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 15, 16, 17, 23, 24, 25, 31, 32, 33, 63, 64, 65, 1023, 1024, 1025, 1024 + 7, 1024 + 8, 1024 + 9, 1024 + 15, 1024 + 16, 1024 + 17}
+
+var usizes = [...]int{2, 3, 4, 5, 6, 7}
+
+func main() {
+ w := new(bytes.Buffer)
+ fmt.Fprintf(w, "// Code generated by gen/copyGen.go. DO NOT EDIT.\n\n")
+ fmt.Fprintf(w, "package main\n")
+ fmt.Fprintf(w, "import \"testing\"\n")
+
+ for _, s := range sizes {
+ // type for test
+ fmt.Fprintf(w, "type T%d struct {\n", s)
+ fmt.Fprintf(w, " pre [8]byte\n")
+ fmt.Fprintf(w, " mid [%d]byte\n", s)
+ fmt.Fprintf(w, " post [8]byte\n")
+ fmt.Fprintf(w, "}\n")
+
+ // function being tested
+ fmt.Fprintf(w, "//go:noinline\n")
+ fmt.Fprintf(w, "func t%dcopy_ssa(y, x *[%d]byte) {\n", s, s)
+ fmt.Fprintf(w, " *y = *x\n")
+ fmt.Fprintf(w, "}\n")
+
+ // testing harness
+ fmt.Fprintf(w, "func testCopy%d(t *testing.T) {\n", s)
+ fmt.Fprintf(w, " a := T%d{[8]byte{201, 202, 203, 204, 205, 206, 207, 208},[%d]byte{", s, s)
+ for i := 0; i < s; i++ {
+ fmt.Fprintf(w, "%d,", i%100)
+ }
+ fmt.Fprintf(w, "},[8]byte{211, 212, 213, 214, 215, 216, 217, 218}}\n")
+ fmt.Fprintf(w, " x := [%d]byte{", s)
+ for i := 0; i < s; i++ {
+ fmt.Fprintf(w, "%d,", 100+i%100)
+ }
+ fmt.Fprintf(w, "}\n")
+ fmt.Fprintf(w, " t%dcopy_ssa(&a.mid, &x)\n", s)
+ fmt.Fprintf(w, " want := T%d{[8]byte{201, 202, 203, 204, 205, 206, 207, 208},[%d]byte{", s, s)
+ for i := 0; i < s; i++ {
+ fmt.Fprintf(w, "%d,", 100+i%100)
+ }
+ fmt.Fprintf(w, "},[8]byte{211, 212, 213, 214, 215, 216, 217, 218}}\n")
+ fmt.Fprintf(w, " if a != want {\n")
+ fmt.Fprintf(w, " t.Errorf(\"t%dcopy got=%%v, want %%v\\n\", a, want)\n", s)
+ fmt.Fprintf(w, " }\n")
+ fmt.Fprintf(w, "}\n")
+ }
+
+ for _, s := range usizes {
+ // function being tested
+ fmt.Fprintf(w, "//go:noinline\n")
+ fmt.Fprintf(w, "func tu%dcopy_ssa(docopy bool, data [%d]byte, x *[%d]byte) {\n", s, s, s)
+ fmt.Fprintf(w, " if docopy {\n")
+ fmt.Fprintf(w, " *x = data\n")
+ fmt.Fprintf(w, " }\n")
+ fmt.Fprintf(w, "}\n")
+
+ // testing harness
+ fmt.Fprintf(w, "func testUnalignedCopy%d(t *testing.T) {\n", s)
+ fmt.Fprintf(w, " var a [%d]byte\n", s)
+ fmt.Fprintf(w, " t%d := [%d]byte{", s, s)
+ for i := 0; i < s; i++ {
+ fmt.Fprintf(w, " %d,", s+i)
+ }
+ fmt.Fprintf(w, "}\n")
+ fmt.Fprintf(w, " tu%dcopy_ssa(true, t%d, &a)\n", s, s)
+ fmt.Fprintf(w, " want%d := [%d]byte{", s, s)
+ for i := 0; i < s; i++ {
+ fmt.Fprintf(w, " %d,", s+i)
+ }
+ fmt.Fprintf(w, "}\n")
+ fmt.Fprintf(w, " if a != want%d {\n", s)
+ fmt.Fprintf(w, " t.Errorf(\"tu%dcopy got=%%v, want %%v\\n\", a, want%d)\n", s, s)
+ fmt.Fprintf(w, " }\n")
+ fmt.Fprintf(w, "}\n")
+ }
+
+ // boilerplate at end
+ fmt.Fprintf(w, "func TestCopy(t *testing.T) {\n")
+ for _, s := range sizes {
+ fmt.Fprintf(w, " testCopy%d(t)\n", s)
+ }
+ for _, s := range usizes {
+ fmt.Fprintf(w, " testUnalignedCopy%d(t)\n", s)
+ }
+ fmt.Fprintf(w, "}\n")
+
+ // gofmt result
+ b := w.Bytes()
+ src, err := format.Source(b)
+ if err != nil {
+ fmt.Printf("%s\n", b)
+ panic(err)
+ }
+
+ // write to file
+ err = ioutil.WriteFile("../copy_test.go", src, 0666)
+ if err != nil {
+ log.Fatalf("can't write output: %v\n", err)
+ }
+}
diff --git a/src/cmd/compile/internal/test/testdata/gen/zeroGen.go b/src/cmd/compile/internal/test/testdata/gen/zeroGen.go
new file mode 100644
index 0000000..7056730
--- /dev/null
+++ b/src/cmd/compile/internal/test/testdata/gen/zeroGen.go
@@ -0,0 +1,143 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "bytes"
+ "fmt"
+ "go/format"
+ "io/ioutil"
+ "log"
+)
+
+// This program generates tests to verify that zeroing operations
+// zero the data they are supposed to and clobber no adjacent values.
+
+// run as `go run zeroGen.go`. A file called zero.go
+// will be written into the parent directory containing the tests.
+
+var sizes = [...]int{1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 15, 16, 17, 23, 24, 25, 31, 32, 33, 63, 64, 65, 1023, 1024, 1025}
+var usizes = [...]int{8, 16, 24, 32, 64, 256}
+
+func main() {
+ w := new(bytes.Buffer)
+ fmt.Fprintf(w, "// Code generated by gen/zeroGen.go. DO NOT EDIT.\n\n")
+ fmt.Fprintf(w, "package main\n")
+ fmt.Fprintf(w, "import \"testing\"\n")
+
+ for _, s := range sizes {
+ // type for test
+ fmt.Fprintf(w, "type Z%d struct {\n", s)
+ fmt.Fprintf(w, " pre [8]byte\n")
+ fmt.Fprintf(w, " mid [%d]byte\n", s)
+ fmt.Fprintf(w, " post [8]byte\n")
+ fmt.Fprintf(w, "}\n")
+
+ // function being tested
+ fmt.Fprintf(w, "//go:noinline\n")
+ fmt.Fprintf(w, "func zero%d_ssa(x *[%d]byte) {\n", s, s)
+ fmt.Fprintf(w, " *x = [%d]byte{}\n", s)
+ fmt.Fprintf(w, "}\n")
+
+ // testing harness
+ fmt.Fprintf(w, "func testZero%d(t *testing.T) {\n", s)
+ fmt.Fprintf(w, " a := Z%d{[8]byte{255,255,255,255,255,255,255,255},[%d]byte{", s, s)
+ for i := 0; i < s; i++ {
+ fmt.Fprintf(w, "255,")
+ }
+ fmt.Fprintf(w, "},[8]byte{255,255,255,255,255,255,255,255}}\n")
+ fmt.Fprintf(w, " zero%d_ssa(&a.mid)\n", s)
+ fmt.Fprintf(w, " want := Z%d{[8]byte{255,255,255,255,255,255,255,255},[%d]byte{", s, s)
+ for i := 0; i < s; i++ {
+ fmt.Fprintf(w, "0,")
+ }
+ fmt.Fprintf(w, "},[8]byte{255,255,255,255,255,255,255,255}}\n")
+ fmt.Fprintf(w, " if a != want {\n")
+ fmt.Fprintf(w, " t.Errorf(\"zero%d got=%%v, want %%v\\n\", a, want)\n", s)
+ fmt.Fprintf(w, " }\n")
+ fmt.Fprintf(w, "}\n")
+ }
+
+ for _, s := range usizes {
+ // type for test
+ fmt.Fprintf(w, "type Z%du1 struct {\n", s)
+ fmt.Fprintf(w, " b bool\n")
+ fmt.Fprintf(w, " val [%d]byte\n", s)
+ fmt.Fprintf(w, "}\n")
+
+ fmt.Fprintf(w, "type Z%du2 struct {\n", s)
+ fmt.Fprintf(w, " i uint16\n")
+ fmt.Fprintf(w, " val [%d]byte\n", s)
+ fmt.Fprintf(w, "}\n")
+
+ // function being tested
+ fmt.Fprintf(w, "//go:noinline\n")
+ fmt.Fprintf(w, "func zero%du1_ssa(t *Z%du1) {\n", s, s)
+ fmt.Fprintf(w, " t.val = [%d]byte{}\n", s)
+ fmt.Fprintf(w, "}\n")
+
+ // function being tested
+ fmt.Fprintf(w, "//go:noinline\n")
+ fmt.Fprintf(w, "func zero%du2_ssa(t *Z%du2) {\n", s, s)
+ fmt.Fprintf(w, " t.val = [%d]byte{}\n", s)
+ fmt.Fprintf(w, "}\n")
+
+ // testing harness
+ fmt.Fprintf(w, "func testZero%du(t *testing.T) {\n", s)
+ fmt.Fprintf(w, " a := Z%du1{false, [%d]byte{", s, s)
+ for i := 0; i < s; i++ {
+ fmt.Fprintf(w, "255,")
+ }
+ fmt.Fprintf(w, "}}\n")
+ fmt.Fprintf(w, " zero%du1_ssa(&a)\n", s)
+ fmt.Fprintf(w, " want := Z%du1{false, [%d]byte{", s, s)
+ for i := 0; i < s; i++ {
+ fmt.Fprintf(w, "0,")
+ }
+ fmt.Fprintf(w, "}}\n")
+ fmt.Fprintf(w, " if a != want {\n")
+ fmt.Fprintf(w, " t.Errorf(\"zero%du2 got=%%v, want %%v\\n\", a, want)\n", s)
+ fmt.Fprintf(w, " }\n")
+ fmt.Fprintf(w, " b := Z%du2{15, [%d]byte{", s, s)
+ for i := 0; i < s; i++ {
+ fmt.Fprintf(w, "255,")
+ }
+ fmt.Fprintf(w, "}}\n")
+ fmt.Fprintf(w, " zero%du2_ssa(&b)\n", s)
+ fmt.Fprintf(w, " wantb := Z%du2{15, [%d]byte{", s, s)
+ for i := 0; i < s; i++ {
+ fmt.Fprintf(w, "0,")
+ }
+ fmt.Fprintf(w, "}}\n")
+ fmt.Fprintf(w, " if b != wantb {\n")
+ fmt.Fprintf(w, " t.Errorf(\"zero%du2 got=%%v, want %%v\\n\", b, wantb)\n", s)
+ fmt.Fprintf(w, " }\n")
+ fmt.Fprintf(w, "}\n")
+ }
+
+ // boilerplate at end
+ fmt.Fprintf(w, "func TestZero(t *testing.T) {\n")
+ for _, s := range sizes {
+ fmt.Fprintf(w, " testZero%d(t)\n", s)
+ }
+ for _, s := range usizes {
+ fmt.Fprintf(w, " testZero%du(t)\n", s)
+ }
+ fmt.Fprintf(w, "}\n")
+
+ // gofmt result
+ b := w.Bytes()
+ src, err := format.Source(b)
+ if err != nil {
+ fmt.Printf("%s\n", b)
+ panic(err)
+ }
+
+ // write to file
+ err = ioutil.WriteFile("../zero_test.go", src, 0666)
+ if err != nil {
+ log.Fatalf("can't write output: %v\n", err)
+ }
+}
diff --git a/src/cmd/compile/internal/test/testdata/loadstore_test.go b/src/cmd/compile/internal/test/testdata/loadstore_test.go
new file mode 100644
index 0000000..57571f5
--- /dev/null
+++ b/src/cmd/compile/internal/test/testdata/loadstore_test.go
@@ -0,0 +1,204 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Tests load/store ordering
+
+package main
+
+import "testing"
+
+// testLoadStoreOrder tests for reordering of stores/loads.
+func testLoadStoreOrder(t *testing.T) {
+ z := uint32(1000)
+ if testLoadStoreOrder_ssa(&z, 100) == 0 {
+ t.Errorf("testLoadStoreOrder failed")
+ }
+}
+
+//go:noinline
+func testLoadStoreOrder_ssa(z *uint32, prec uint) int {
+ old := *z // load
+ *z = uint32(prec) // store
+ if *z < old { // load
+ return 1
+ }
+ return 0
+}
+
+func testStoreSize(t *testing.T) {
+ a := [4]uint16{11, 22, 33, 44}
+ testStoreSize_ssa(&a[0], &a[2], 77)
+ want := [4]uint16{77, 22, 33, 44}
+ if a != want {
+ t.Errorf("testStoreSize failed. want = %d, got = %d", want, a)
+ }
+}
+
+//go:noinline
+func testStoreSize_ssa(p *uint16, q *uint16, v uint32) {
+ // Test to make sure that (Store ptr (Trunc32to16 val) mem)
+ // does not end up as a 32-bit store. It must stay a 16 bit store
+ // even when Trunc32to16 is rewritten to be a nop.
+ // To ensure that we get rewrite the Trunc32to16 before
+ // we rewrite the Store, we force the truncate into an
+ // earlier basic block by using it on both branches.
+ w := uint16(v)
+ if p != nil {
+ *p = w
+ } else {
+ *q = w
+ }
+}
+
+//go:noinline
+func testExtStore_ssa(p *byte, b bool) int {
+ x := *p
+ *p = 7
+ if b {
+ return int(x)
+ }
+ return 0
+}
+
+func testExtStore(t *testing.T) {
+ const start = 8
+ var b byte = start
+ if got := testExtStore_ssa(&b, true); got != start {
+ t.Errorf("testExtStore failed. want = %d, got = %d", start, got)
+ }
+}
+
+var b int
+
+// testDeadStorePanic_ssa ensures that we don't optimize away stores
+// that could be read by after recover(). Modeled after fixedbugs/issue1304.
+//go:noinline
+func testDeadStorePanic_ssa(a int) (r int) {
+ defer func() {
+ recover()
+ r = a
+ }()
+ a = 2 // store
+ b := a - a // optimized to zero
+ c := 4
+ a = c / b // store, but panics
+ a = 3 // store
+ r = a
+ return
+}
+
+func testDeadStorePanic(t *testing.T) {
+ if want, got := 2, testDeadStorePanic_ssa(1); want != got {
+ t.Errorf("testDeadStorePanic failed. want = %d, got = %d", want, got)
+ }
+}
+
+//go:noinline
+func loadHitStore8(x int8, p *int8) int32 {
+ x *= x // try to trash high bits (arch-dependent)
+ *p = x // store
+ return int32(*p) // load and cast
+}
+
+//go:noinline
+func loadHitStoreU8(x uint8, p *uint8) uint32 {
+ x *= x // try to trash high bits (arch-dependent)
+ *p = x // store
+ return uint32(*p) // load and cast
+}
+
+//go:noinline
+func loadHitStore16(x int16, p *int16) int32 {
+ x *= x // try to trash high bits (arch-dependent)
+ *p = x // store
+ return int32(*p) // load and cast
+}
+
+//go:noinline
+func loadHitStoreU16(x uint16, p *uint16) uint32 {
+ x *= x // try to trash high bits (arch-dependent)
+ *p = x // store
+ return uint32(*p) // load and cast
+}
+
+//go:noinline
+func loadHitStore32(x int32, p *int32) int64 {
+ x *= x // try to trash high bits (arch-dependent)
+ *p = x // store
+ return int64(*p) // load and cast
+}
+
+//go:noinline
+func loadHitStoreU32(x uint32, p *uint32) uint64 {
+ x *= x // try to trash high bits (arch-dependent)
+ *p = x // store
+ return uint64(*p) // load and cast
+}
+
+func testLoadHitStore(t *testing.T) {
+ // Test that sign/zero extensions are kept when a load-hit-store
+ // is replaced by a register-register move.
+ {
+ var in int8 = (1 << 6) + 1
+ var p int8
+ got := loadHitStore8(in, &p)
+ want := int32(in * in)
+ if got != want {
+ t.Errorf("testLoadHitStore (int8) failed. want = %d, got = %d", want, got)
+ }
+ }
+ {
+ var in uint8 = (1 << 6) + 1
+ var p uint8
+ got := loadHitStoreU8(in, &p)
+ want := uint32(in * in)
+ if got != want {
+ t.Errorf("testLoadHitStore (uint8) failed. want = %d, got = %d", want, got)
+ }
+ }
+ {
+ var in int16 = (1 << 10) + 1
+ var p int16
+ got := loadHitStore16(in, &p)
+ want := int32(in * in)
+ if got != want {
+ t.Errorf("testLoadHitStore (int16) failed. want = %d, got = %d", want, got)
+ }
+ }
+ {
+ var in uint16 = (1 << 10) + 1
+ var p uint16
+ got := loadHitStoreU16(in, &p)
+ want := uint32(in * in)
+ if got != want {
+ t.Errorf("testLoadHitStore (uint16) failed. want = %d, got = %d", want, got)
+ }
+ }
+ {
+ var in int32 = (1 << 30) + 1
+ var p int32
+ got := loadHitStore32(in, &p)
+ want := int64(in * in)
+ if got != want {
+ t.Errorf("testLoadHitStore (int32) failed. want = %d, got = %d", want, got)
+ }
+ }
+ {
+ var in uint32 = (1 << 30) + 1
+ var p uint32
+ got := loadHitStoreU32(in, &p)
+ want := uint64(in * in)
+ if got != want {
+ t.Errorf("testLoadHitStore (uint32) failed. want = %d, got = %d", want, got)
+ }
+ }
+}
+
+func TestLoadStore(t *testing.T) {
+ testLoadStoreOrder(t)
+ testStoreSize(t)
+ testExtStore(t)
+ testDeadStorePanic(t)
+ testLoadHitStore(t)
+}
diff --git a/src/cmd/compile/internal/test/testdata/map_test.go b/src/cmd/compile/internal/test/testdata/map_test.go
new file mode 100644
index 0000000..71dc820
--- /dev/null
+++ b/src/cmd/compile/internal/test/testdata/map_test.go
@@ -0,0 +1,37 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// map.go tests map operations.
+package main
+
+import "testing"
+
+//go:noinline
+func lenMap_ssa(v map[int]int) int {
+ return len(v)
+}
+
+func testLenMap(t *testing.T) {
+
+ v := make(map[int]int)
+ v[0] = 0
+ v[1] = 0
+ v[2] = 0
+
+ if want, got := 3, lenMap_ssa(v); got != want {
+ t.Errorf("expected len(map) = %d, got %d", want, got)
+ }
+}
+
+func testLenNilMap(t *testing.T) {
+
+ var v map[int]int
+ if want, got := 0, lenMap_ssa(v); got != want {
+ t.Errorf("expected len(nil) = %d, got %d", want, got)
+ }
+}
+func TestMap(t *testing.T) {
+ testLenMap(t)
+ testLenNilMap(t)
+}
diff --git a/src/cmd/compile/internal/test/testdata/mysort/mysort.go b/src/cmd/compile/internal/test/testdata/mysort/mysort.go
new file mode 100644
index 0000000..14852c8
--- /dev/null
+++ b/src/cmd/compile/internal/test/testdata/mysort/mysort.go
@@ -0,0 +1,40 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Generic sort function, tested with two different pointer types.
+
+package mysort
+
+import (
+ "fmt"
+)
+
+type LessConstraint[T any] interface {
+ Less(T) bool
+}
+
+//go:noinline
+func Sort[T LessConstraint[T]](x []T) {
+ n := len(x)
+ for i := 1; i < n; i++ {
+ for j := i; j > 0 && x[j].Less(x[j-1]); j-- {
+ x[j], x[j-1] = x[j-1], x[j]
+ }
+ }
+}
+
+type MyInt struct {
+ Value int
+}
+
+func (a *MyInt) Less(b *MyInt) bool {
+ return a.Value < b.Value
+}
+
+//go:noinline
+func F() {
+ sl1 := []*MyInt{&MyInt{4}, &MyInt{3}, &MyInt{8}, &MyInt{7}}
+ Sort(sl1)
+ fmt.Printf("%v %v %v %v\n", sl1[0], sl1[1], sl1[2], sl1[3])
+}
diff --git a/src/cmd/compile/internal/test/testdata/namedReturn_test.go b/src/cmd/compile/internal/test/testdata/namedReturn_test.go
new file mode 100644
index 0000000..b07e225
--- /dev/null
+++ b/src/cmd/compile/internal/test/testdata/namedReturn_test.go
@@ -0,0 +1,93 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This test makes sure that naming named
+// return variables in a return statement works.
+// See issue #14904.
+
+package main
+
+import (
+ "runtime"
+ "testing"
+)
+
+// Our heap-allocated object that will be GC'd incorrectly.
+// Note that we always check the second word because that's
+// where 0xdeaddeaddeaddead is written.
+type B [4]int
+
+// small (SSAable) array
+type A1 [3]*B
+
+//go:noinline
+func f1() (t A1) {
+ t[0] = &B{91, 92, 93, 94}
+ runtime.GC()
+ return t
+}
+
+// large (non-SSAable) array
+type A2 [8]*B
+
+//go:noinline
+func f2() (t A2) {
+ t[0] = &B{91, 92, 93, 94}
+ runtime.GC()
+ return t
+}
+
+// small (SSAable) struct
+type A3 struct {
+ a, b, c *B
+}
+
+//go:noinline
+func f3() (t A3) {
+ t.a = &B{91, 92, 93, 94}
+ runtime.GC()
+ return t
+}
+
+// large (non-SSAable) struct
+type A4 struct {
+ a, b, c, d, e, f *B
+}
+
+//go:noinline
+func f4() (t A4) {
+ t.a = &B{91, 92, 93, 94}
+ runtime.GC()
+ return t
+}
+
+var sink *B
+
+func f5() int {
+ b := &B{91, 92, 93, 94}
+ t := A4{b, nil, nil, nil, nil, nil}
+ sink = b // make sure b is heap allocated ...
+ sink = nil // ... but not live
+ runtime.GC()
+ t = t
+ return t.a[1]
+}
+
+func TestNamedReturn(t *testing.T) {
+ if v := f1()[0][1]; v != 92 {
+ t.Errorf("f1()[0][1]=%d, want 92\n", v)
+ }
+ if v := f2()[0][1]; v != 92 {
+ t.Errorf("f2()[0][1]=%d, want 92\n", v)
+ }
+ if v := f3().a[1]; v != 92 {
+ t.Errorf("f3().a[1]=%d, want 92\n", v)
+ }
+ if v := f4().a[1]; v != 92 {
+ t.Errorf("f4().a[1]=%d, want 92\n", v)
+ }
+ if v := f5(); v != 92 {
+ t.Errorf("f5()=%d, want 92\n", v)
+ }
+}
diff --git a/src/cmd/compile/internal/test/testdata/phi_test.go b/src/cmd/compile/internal/test/testdata/phi_test.go
new file mode 100644
index 0000000..c8a73ff
--- /dev/null
+++ b/src/cmd/compile/internal/test/testdata/phi_test.go
@@ -0,0 +1,99 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+// Test to make sure spills of cast-shortened values
+// don't end up spilling the pre-shortened size instead
+// of the post-shortened size.
+
+import (
+ "runtime"
+ "testing"
+)
+
+var data1 [26]int32
+var data2 [26]int64
+
+func init() {
+ for i := 0; i < 26; i++ {
+ // If we spill all 8 bytes of this datum, the 1 in the high-order 4 bytes
+ // will overwrite some other variable in the stack frame.
+ data2[i] = 0x100000000
+ }
+}
+
+func foo() int32 {
+ var a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v, w, x, y, z int32
+ if always {
+ a = data1[0]
+ b = data1[1]
+ c = data1[2]
+ d = data1[3]
+ e = data1[4]
+ f = data1[5]
+ g = data1[6]
+ h = data1[7]
+ i = data1[8]
+ j = data1[9]
+ k = data1[10]
+ l = data1[11]
+ m = data1[12]
+ n = data1[13]
+ o = data1[14]
+ p = data1[15]
+ q = data1[16]
+ r = data1[17]
+ s = data1[18]
+ t = data1[19]
+ u = data1[20]
+ v = data1[21]
+ w = data1[22]
+ x = data1[23]
+ y = data1[24]
+ z = data1[25]
+ } else {
+ a = int32(data2[0])
+ b = int32(data2[1])
+ c = int32(data2[2])
+ d = int32(data2[3])
+ e = int32(data2[4])
+ f = int32(data2[5])
+ g = int32(data2[6])
+ h = int32(data2[7])
+ i = int32(data2[8])
+ j = int32(data2[9])
+ k = int32(data2[10])
+ l = int32(data2[11])
+ m = int32(data2[12])
+ n = int32(data2[13])
+ o = int32(data2[14])
+ p = int32(data2[15])
+ q = int32(data2[16])
+ r = int32(data2[17])
+ s = int32(data2[18])
+ t = int32(data2[19])
+ u = int32(data2[20])
+ v = int32(data2[21])
+ w = int32(data2[22])
+ x = int32(data2[23])
+ y = int32(data2[24])
+ z = int32(data2[25])
+ }
+ // Lots of phis of the form phi(int32,int64) of type int32 happen here.
+ // Some will be stack phis. For those stack phis, make sure the spill
+ // of the second argument uses the phi's width (4 bytes), not its width
+ // (8 bytes). Otherwise, a random stack slot gets clobbered.
+
+ runtime.Gosched()
+ return a + b + c + d + e + f + g + h + i + j + k + l + m + n + o + p + q + r + s + t + u + v + w + x + y + z
+}
+
+func TestPhi(t *testing.T) {
+ want := int32(0)
+ got := foo()
+ if got != want {
+ t.Fatalf("want %d, got %d\n", want, got)
+ }
+}
diff --git a/src/cmd/compile/internal/test/testdata/ptrsort.go b/src/cmd/compile/internal/test/testdata/ptrsort.go
new file mode 100644
index 0000000..d26ba58
--- /dev/null
+++ b/src/cmd/compile/internal/test/testdata/ptrsort.go
@@ -0,0 +1,30 @@
+package main
+
+// Test generic sort function with two different pointer types in different packages,
+// make sure only one instantiation is created.
+
+import (
+ "fmt"
+
+ "cmd/compile/internal/test/testdata/mysort"
+)
+
+type MyString struct {
+ string
+}
+
+func (a *MyString) Less(b *MyString) bool {
+ return a.string < b.string
+}
+
+func main() {
+ mysort.F()
+
+ sl1 := []*mysort.MyInt{{7}, {1}, {4}, {6}}
+ mysort.Sort(sl1)
+ fmt.Printf("%v %v %v %v\n", sl1[0], sl1[1], sl1[2], sl1[3])
+
+ sl2 := []*MyString{{"when"}, {"in"}, {"the"}, {"course"}, {"of"}}
+ mysort.Sort(sl2)
+ fmt.Printf("%v %v %v %v %v\n", sl2[0], sl2[1], sl2[2], sl2[3], sl2[4])
+}
diff --git a/src/cmd/compile/internal/test/testdata/ptrsort.out b/src/cmd/compile/internal/test/testdata/ptrsort.out
new file mode 100644
index 0000000..41f1621
--- /dev/null
+++ b/src/cmd/compile/internal/test/testdata/ptrsort.out
@@ -0,0 +1,3 @@
+&{3} &{4} &{7} &{8}
+&{1} &{4} &{6} &{7}
+&{course} &{in} &{of} &{the} &{when}
diff --git a/src/cmd/compile/internal/test/testdata/regalloc_test.go b/src/cmd/compile/internal/test/testdata/regalloc_test.go
new file mode 100644
index 0000000..577f8e7
--- /dev/null
+++ b/src/cmd/compile/internal/test/testdata/regalloc_test.go
@@ -0,0 +1,50 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Tests phi implementation
+
+package main
+
+import "testing"
+
+func phiOverwrite_ssa() int {
+ var n int
+ for i := 0; i < 10; i++ {
+ if i == 6 {
+ break
+ }
+ n = i
+ }
+ return n
+}
+
+func phiOverwrite(t *testing.T) {
+ want := 5
+ got := phiOverwrite_ssa()
+ if got != want {
+ t.Errorf("phiOverwrite_ssa()= %d, got %d", want, got)
+ }
+}
+
+func phiOverwriteBig_ssa() int {
+ var a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v, w, x, y, z int
+ a = 1
+ for idx := 0; idx < 26; idx++ {
+ a, b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v, w, x, y, z = b, c, d, e, f, g, h, i, j, k, l, m, n, o, p, q, r, s, t, u, v, w, x, y, z, a
+ }
+ return a*1 + b*2 + c*3 + d*4 + e*5 + f*6 + g*7 + h*8 + i*9 + j*10 + k*11 + l*12 + m*13 + n*14 + o*15 + p*16 + q*17 + r*18 + s*19 + t*20 + u*21 + v*22 + w*23 + x*24 + y*25 + z*26
+}
+
+func phiOverwriteBig(t *testing.T) {
+ want := 1
+ got := phiOverwriteBig_ssa()
+ if got != want {
+ t.Errorf("phiOverwriteBig_ssa()= %d, got %d", want, got)
+ }
+}
+
+func TestRegalloc(t *testing.T) {
+ phiOverwrite(t)
+ phiOverwriteBig(t)
+}
diff --git a/src/cmd/compile/internal/test/testdata/reproducible/issue20272.go b/src/cmd/compile/internal/test/testdata/reproducible/issue20272.go
new file mode 100644
index 0000000..3db0b8a
--- /dev/null
+++ b/src/cmd/compile/internal/test/testdata/reproducible/issue20272.go
@@ -0,0 +1,34 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+var (
+ i0 uint8
+ b0 byte
+
+ i1 *uint8
+ b1 *byte
+
+ i2 **uint8
+ b2 **byte
+
+ i3 ***uint8
+ b3 ***byte
+
+ i4 ****uint8
+ b4 ****byte
+
+ i5 *****uint8
+ b5 *****byte
+
+ i6 ******uint8
+ b6 ******byte
+
+ i7 *******uint8
+ b7 *******byte
+
+ i8 ********uint8
+ b8 ********byte
+)
diff --git a/src/cmd/compile/internal/test/testdata/reproducible/issue27013.go b/src/cmd/compile/internal/test/testdata/reproducible/issue27013.go
new file mode 100644
index 0000000..817f4a6
--- /dev/null
+++ b/src/cmd/compile/internal/test/testdata/reproducible/issue27013.go
@@ -0,0 +1,15 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func A(arg interface{}) {
+ _ = arg.(interface{ Func() int32 })
+ _ = arg.(interface{ Func() int32 })
+ _ = arg.(interface{ Func() int32 })
+ _ = arg.(interface{ Func() int32 })
+ _ = arg.(interface{ Func() int32 })
+ _ = arg.(interface{ Func() int32 })
+ _ = arg.(interface{ Func() int32 })
+}
diff --git a/src/cmd/compile/internal/test/testdata/reproducible/issue30202.go b/src/cmd/compile/internal/test/testdata/reproducible/issue30202.go
new file mode 100644
index 0000000..7b5de2c
--- /dev/null
+++ b/src/cmd/compile/internal/test/testdata/reproducible/issue30202.go
@@ -0,0 +1,17 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func A(x interface {
+ X() int
+}) int {
+ return x.X()
+}
+
+func B(x interface {
+ X() int
+}) int {
+ return x.X()
+}
diff --git a/src/cmd/compile/internal/test/testdata/reproducible/issue38068.go b/src/cmd/compile/internal/test/testdata/reproducible/issue38068.go
new file mode 100644
index 0000000..b87daed
--- /dev/null
+++ b/src/cmd/compile/internal/test/testdata/reproducible/issue38068.go
@@ -0,0 +1,70 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package issue38068
+
+// A type with a couple of inlinable, non-pointer-receiver methods
+// that have params and local variables.
+type A struct {
+ s string
+ next *A
+ prev *A
+}
+
+// Inlinable, value-received method with locals and parms.
+func (a A) double(x string, y int) string {
+ if y == 191 {
+ a.s = ""
+ }
+ q := a.s + "a"
+ r := a.s + "b"
+ return q + r
+}
+
+// Inlinable, value-received method with locals and parms.
+func (a A) triple(x string, y int) string {
+ q := a.s
+ if y == 998877 {
+ a.s = x
+ }
+ r := a.s + a.s
+ return q + r
+}
+
+type methods struct {
+ m1 func(a *A, x string, y int) string
+ m2 func(a *A, x string, y int) string
+}
+
+// Now a function that makes references to the methods via pointers,
+// which should trigger the wrapper generation.
+func P(a *A, ms *methods) {
+ if a != nil {
+ defer func() { println("done") }()
+ }
+ println(ms.m1(a, "a", 2))
+ println(ms.m2(a, "b", 3))
+}
+
+func G(x *A, n int) {
+ if n <= 0 {
+ println(n)
+ return
+ }
+ // Address-taken local of type A, which will insure that the
+ // compiler's writeType() routine will create a method wrapper.
+ var a, b A
+ a.next = x
+ a.prev = &b
+ x = &a
+ G(x, n-2)
+}
+
+var M methods
+
+func F() {
+ M.m1 = (*A).double
+ M.m2 = (*A).triple
+ G(nil, 100)
+}
diff --git a/src/cmd/compile/internal/test/testdata/short_test.go b/src/cmd/compile/internal/test/testdata/short_test.go
new file mode 100644
index 0000000..7a743b5
--- /dev/null
+++ b/src/cmd/compile/internal/test/testdata/short_test.go
@@ -0,0 +1,57 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Tests short circuiting.
+
+package main
+
+import "testing"
+
+func and_ssa(arg1, arg2 bool) bool {
+ return arg1 && rightCall(arg2)
+}
+
+func or_ssa(arg1, arg2 bool) bool {
+ return arg1 || rightCall(arg2)
+}
+
+var rightCalled bool
+
+//go:noinline
+func rightCall(v bool) bool {
+ rightCalled = true
+ return v
+ panic("unreached")
+}
+
+func testAnd(t *testing.T, arg1, arg2, wantRes bool) {
+ testShortCircuit(t, "AND", arg1, arg2, and_ssa, arg1, wantRes)
+}
+func testOr(t *testing.T, arg1, arg2, wantRes bool) {
+ testShortCircuit(t, "OR", arg1, arg2, or_ssa, !arg1, wantRes)
+}
+
+func testShortCircuit(t *testing.T, opName string, arg1, arg2 bool, fn func(bool, bool) bool, wantRightCall, wantRes bool) {
+ rightCalled = false
+ got := fn(arg1, arg2)
+ if rightCalled != wantRightCall {
+ t.Errorf("failed for %t %s %t; rightCalled=%t want=%t", arg1, opName, arg2, rightCalled, wantRightCall)
+ }
+ if wantRes != got {
+ t.Errorf("failed for %t %s %t; res=%t want=%t", arg1, opName, arg2, got, wantRes)
+ }
+}
+
+// TestShortCircuit tests OANDAND and OOROR expressions and short circuiting.
+func TestShortCircuit(t *testing.T) {
+ testAnd(t, false, false, false)
+ testAnd(t, false, true, false)
+ testAnd(t, true, false, false)
+ testAnd(t, true, true, true)
+
+ testOr(t, false, false, false)
+ testOr(t, false, true, true)
+ testOr(t, true, false, true)
+ testOr(t, true, true, true)
+}
diff --git a/src/cmd/compile/internal/test/testdata/slice_test.go b/src/cmd/compile/internal/test/testdata/slice_test.go
new file mode 100644
index 0000000..c134578
--- /dev/null
+++ b/src/cmd/compile/internal/test/testdata/slice_test.go
@@ -0,0 +1,46 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This test makes sure that t.s = t.s[0:x] doesn't write
+// either the slice pointer or the capacity.
+// See issue #14855.
+
+package main
+
+import "testing"
+
+const N = 1000000
+
+type X struct {
+ s []int
+}
+
+func TestSlice(t *testing.T) {
+ done := make(chan struct{})
+ a := make([]int, N+10)
+
+ x := &X{a}
+
+ go func() {
+ for i := 0; i < N; i++ {
+ x.s = x.s[1:9]
+ }
+ done <- struct{}{}
+ }()
+ go func() {
+ for i := 0; i < N; i++ {
+ x.s = x.s[0:8] // should only write len
+ }
+ done <- struct{}{}
+ }()
+ <-done
+ <-done
+
+ if cap(x.s) != cap(a)-N {
+ t.Errorf("wanted cap=%d, got %d\n", cap(a)-N, cap(x.s))
+ }
+ if &x.s[0] != &a[N] {
+ t.Errorf("wanted ptr=%p, got %p\n", &a[N], &x.s[0])
+ }
+}
diff --git a/src/cmd/compile/internal/test/testdata/sqrtConst_test.go b/src/cmd/compile/internal/test/testdata/sqrtConst_test.go
new file mode 100644
index 0000000..5b7a149
--- /dev/null
+++ b/src/cmd/compile/internal/test/testdata/sqrtConst_test.go
@@ -0,0 +1,50 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "math"
+ "testing"
+)
+
+var tests = [...]struct {
+ name string
+ in float64 // used for error messages, not an input
+ got float64
+ want float64
+}{
+ {"sqrt0", 0, math.Sqrt(0), 0},
+ {"sqrt1", 1, math.Sqrt(1), 1},
+ {"sqrt2", 2, math.Sqrt(2), math.Sqrt2},
+ {"sqrt4", 4, math.Sqrt(4), 2},
+ {"sqrt100", 100, math.Sqrt(100), 10},
+ {"sqrt101", 101, math.Sqrt(101), 10.04987562112089},
+}
+
+var nanTests = [...]struct {
+ name string
+ in float64 // used for error messages, not an input
+ got float64
+}{
+ {"sqrtNaN", math.NaN(), math.Sqrt(math.NaN())},
+ {"sqrtNegative", -1, math.Sqrt(-1)},
+ {"sqrtNegInf", math.Inf(-1), math.Sqrt(math.Inf(-1))},
+}
+
+func TestSqrtConst(t *testing.T) {
+ for _, test := range tests {
+ if test.got != test.want {
+ t.Errorf("%s: math.Sqrt(%f): got %f, want %f\n", test.name, test.in, test.got, test.want)
+ }
+ }
+ for _, test := range nanTests {
+ if math.IsNaN(test.got) != true {
+ t.Errorf("%s: math.Sqrt(%f): got %f, want NaN\n", test.name, test.in, test.got)
+ }
+ }
+ if got := math.Sqrt(math.Inf(1)); !math.IsInf(got, 1) {
+ t.Errorf("math.Sqrt(+Inf), got %f, want +Inf\n", got)
+ }
+}
diff --git a/src/cmd/compile/internal/test/testdata/string_test.go b/src/cmd/compile/internal/test/testdata/string_test.go
new file mode 100644
index 0000000..5d086f0
--- /dev/null
+++ b/src/cmd/compile/internal/test/testdata/string_test.go
@@ -0,0 +1,207 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// string_ssa.go tests string operations.
+package main
+
+import "testing"
+
+//go:noinline
+func testStringSlice1_ssa(a string, i, j int) string {
+ return a[i:]
+}
+
+//go:noinline
+func testStringSlice2_ssa(a string, i, j int) string {
+ return a[:j]
+}
+
+//go:noinline
+func testStringSlice12_ssa(a string, i, j int) string {
+ return a[i:j]
+}
+
+func testStringSlice(t *testing.T) {
+ tests := [...]struct {
+ fn func(string, int, int) string
+ s string
+ low, high int
+ want string
+ }{
+ // -1 means the value is not used.
+ {testStringSlice1_ssa, "foobar", 0, -1, "foobar"},
+ {testStringSlice1_ssa, "foobar", 3, -1, "bar"},
+ {testStringSlice1_ssa, "foobar", 6, -1, ""},
+ {testStringSlice2_ssa, "foobar", -1, 0, ""},
+ {testStringSlice2_ssa, "foobar", -1, 3, "foo"},
+ {testStringSlice2_ssa, "foobar", -1, 6, "foobar"},
+ {testStringSlice12_ssa, "foobar", 0, 6, "foobar"},
+ {testStringSlice12_ssa, "foobar", 0, 0, ""},
+ {testStringSlice12_ssa, "foobar", 6, 6, ""},
+ {testStringSlice12_ssa, "foobar", 1, 5, "ooba"},
+ {testStringSlice12_ssa, "foobar", 3, 3, ""},
+ {testStringSlice12_ssa, "", 0, 0, ""},
+ }
+
+ for i, test := range tests {
+ if got := test.fn(test.s, test.low, test.high); test.want != got {
+ t.Errorf("#%d %s[%d,%d] = %s, want %s", i, test.s, test.low, test.high, got, test.want)
+ }
+ }
+}
+
+type prefix struct {
+ prefix string
+}
+
+func (p *prefix) slice_ssa() {
+ p.prefix = p.prefix[:3]
+}
+
+//go:noinline
+func testStructSlice(t *testing.T) {
+ p := &prefix{"prefix"}
+ p.slice_ssa()
+ if "pre" != p.prefix {
+ t.Errorf("wrong field slice: wanted %s got %s", "pre", p.prefix)
+ }
+}
+
+func testStringSlicePanic(t *testing.T) {
+ defer func() {
+ if r := recover(); r != nil {
+ //println("panicked as expected")
+ }
+ }()
+
+ str := "foobar"
+ t.Errorf("got %s and expected to panic, but didn't", testStringSlice12_ssa(str, 3, 9))
+}
+
+const _Accuracy_name = "BelowExactAbove"
+
+var _Accuracy_index = [...]uint8{0, 5, 10, 15}
+
+//go:noinline
+func testSmallIndexType_ssa(i int) string {
+ return _Accuracy_name[_Accuracy_index[i]:_Accuracy_index[i+1]]
+}
+
+func testSmallIndexType(t *testing.T) {
+ tests := []struct {
+ i int
+ want string
+ }{
+ {0, "Below"},
+ {1, "Exact"},
+ {2, "Above"},
+ }
+
+ for i, test := range tests {
+ if got := testSmallIndexType_ssa(test.i); got != test.want {
+ t.Errorf("#%d got %s wanted %s", i, got, test.want)
+ }
+ }
+}
+
+//go:noinline
+func testInt64Index_ssa(s string, i int64) byte {
+ return s[i]
+}
+
+//go:noinline
+func testInt64Slice_ssa(s string, i, j int64) string {
+ return s[i:j]
+}
+
+func testInt64Index(t *testing.T) {
+ tests := []struct {
+ i int64
+ j int64
+ b byte
+ s string
+ }{
+ {0, 5, 'B', "Below"},
+ {5, 10, 'E', "Exact"},
+ {10, 15, 'A', "Above"},
+ }
+
+ str := "BelowExactAbove"
+ for i, test := range tests {
+ if got := testInt64Index_ssa(str, test.i); got != test.b {
+ t.Errorf("#%d got %d wanted %d", i, got, test.b)
+ }
+ if got := testInt64Slice_ssa(str, test.i, test.j); got != test.s {
+ t.Errorf("#%d got %s wanted %s", i, got, test.s)
+ }
+ }
+}
+
+func testInt64IndexPanic(t *testing.T) {
+ defer func() {
+ if r := recover(); r != nil {
+ //println("panicked as expected")
+ }
+ }()
+
+ str := "foobar"
+ t.Errorf("got %d and expected to panic, but didn't", testInt64Index_ssa(str, 1<<32+1))
+}
+
+func testInt64SlicePanic(t *testing.T) {
+ defer func() {
+ if r := recover(); r != nil {
+ //println("panicked as expected")
+ }
+ }()
+
+ str := "foobar"
+ t.Errorf("got %s and expected to panic, but didn't", testInt64Slice_ssa(str, 1<<32, 1<<32+1))
+}
+
+//go:noinline
+func testStringElem_ssa(s string, i int) byte {
+ return s[i]
+}
+
+func testStringElem(t *testing.T) {
+ tests := []struct {
+ s string
+ i int
+ n byte
+ }{
+ {"foobar", 3, 98},
+ {"foobar", 0, 102},
+ {"foobar", 5, 114},
+ }
+ for _, test := range tests {
+ if got := testStringElem_ssa(test.s, test.i); got != test.n {
+ t.Errorf("testStringElem \"%s\"[%d] = %d, wanted %d", test.s, test.i, got, test.n)
+ }
+ }
+}
+
+//go:noinline
+func testStringElemConst_ssa(i int) byte {
+ s := "foobar"
+ return s[i]
+}
+
+func testStringElemConst(t *testing.T) {
+ if got := testStringElemConst_ssa(3); got != 98 {
+ t.Errorf("testStringElemConst= %d, wanted 98", got)
+ }
+}
+
+func TestString(t *testing.T) {
+ testStringSlice(t)
+ testStringSlicePanic(t)
+ testStructSlice(t)
+ testSmallIndexType(t)
+ testStringElem(t)
+ testStringElemConst(t)
+ testInt64Index(t)
+ testInt64IndexPanic(t)
+ testInt64SlicePanic(t)
+}
diff --git a/src/cmd/compile/internal/test/testdata/unsafe_test.go b/src/cmd/compile/internal/test/testdata/unsafe_test.go
new file mode 100644
index 0000000..37599d3
--- /dev/null
+++ b/src/cmd/compile/internal/test/testdata/unsafe_test.go
@@ -0,0 +1,145 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "runtime"
+ "testing"
+ "unsafe"
+)
+
+// global pointer slot
+var a *[8]uint
+
+// unfoldable true
+var always = true
+
+// Test to make sure that a pointer value which is alive
+// across a call is retained, even when there are matching
+// conversions to/from uintptr around the call.
+// We arrange things very carefully to have to/from
+// conversions on either side of the call which cannot be
+// combined with any other conversions.
+func f_ssa() *[8]uint {
+ // Make x a uintptr pointing to where a points.
+ var x uintptr
+ if always {
+ x = uintptr(unsafe.Pointer(a))
+ } else {
+ x = 0
+ }
+ // Clobber the global pointer. The only live ref
+ // to the allocated object is now x.
+ a = nil
+
+ // Convert to pointer so it should hold
+ // the object live across GC call.
+ p := unsafe.Pointer(x)
+
+ // Call gc.
+ runtime.GC()
+
+ // Convert back to uintptr.
+ y := uintptr(p)
+
+ // Mess with y so that the subsequent cast
+ // to unsafe.Pointer can't be combined with the
+ // uintptr cast above.
+ var z uintptr
+ if always {
+ z = y
+ } else {
+ z = 0
+ }
+ return (*[8]uint)(unsafe.Pointer(z))
+}
+
+// g_ssa is the same as f_ssa, but with a bit of pointer
+// arithmetic for added insanity.
+func g_ssa() *[7]uint {
+ // Make x a uintptr pointing to where a points.
+ var x uintptr
+ if always {
+ x = uintptr(unsafe.Pointer(a))
+ } else {
+ x = 0
+ }
+ // Clobber the global pointer. The only live ref
+ // to the allocated object is now x.
+ a = nil
+
+ // Offset x by one int.
+ x += unsafe.Sizeof(int(0))
+
+ // Convert to pointer so it should hold
+ // the object live across GC call.
+ p := unsafe.Pointer(x)
+
+ // Call gc.
+ runtime.GC()
+
+ // Convert back to uintptr.
+ y := uintptr(p)
+
+ // Mess with y so that the subsequent cast
+ // to unsafe.Pointer can't be combined with the
+ // uintptr cast above.
+ var z uintptr
+ if always {
+ z = y
+ } else {
+ z = 0
+ }
+ return (*[7]uint)(unsafe.Pointer(z))
+}
+
+func testf(t *testing.T) {
+ a = new([8]uint)
+ for i := 0; i < 8; i++ {
+ a[i] = 0xabcd
+ }
+ c := f_ssa()
+ for i := 0; i < 8; i++ {
+ if c[i] != 0xabcd {
+ t.Fatalf("%d:%x\n", i, c[i])
+ }
+ }
+}
+
+func testg(t *testing.T) {
+ a = new([8]uint)
+ for i := 0; i < 8; i++ {
+ a[i] = 0xabcd
+ }
+ c := g_ssa()
+ for i := 0; i < 7; i++ {
+ if c[i] != 0xabcd {
+ t.Fatalf("%d:%x\n", i, c[i])
+ }
+ }
+}
+
+func alias_ssa(ui64 *uint64, ui32 *uint32) uint32 {
+ *ui32 = 0xffffffff
+ *ui64 = 0 // store
+ ret := *ui32 // load from same address, should be zero
+ *ui64 = 0xffffffffffffffff // store
+ return ret
+}
+func testdse(t *testing.T) {
+ x := int64(-1)
+ // construct two pointers that alias one another
+ ui64 := (*uint64)(unsafe.Pointer(&x))
+ ui32 := (*uint32)(unsafe.Pointer(&x))
+ if want, got := uint32(0), alias_ssa(ui64, ui32); got != want {
+ t.Fatalf("alias_ssa: wanted %d, got %d\n", want, got)
+ }
+}
+
+func TestUnsafe(t *testing.T) {
+ testf(t)
+ testg(t)
+ testdse(t)
+}
diff --git a/src/cmd/compile/internal/test/testdata/zero_test.go b/src/cmd/compile/internal/test/testdata/zero_test.go
new file mode 100644
index 0000000..64fa25e
--- /dev/null
+++ b/src/cmd/compile/internal/test/testdata/zero_test.go
@@ -0,0 +1,711 @@
+// Code generated by gen/zeroGen.go. DO NOT EDIT.
+
+package main
+
+import "testing"
+
+type Z1 struct {
+ pre [8]byte
+ mid [1]byte
+ post [8]byte
+}
+
+//go:noinline
+func zero1_ssa(x *[1]byte) {
+ *x = [1]byte{}
+}
+func testZero1(t *testing.T) {
+ a := Z1{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [1]byte{255}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ zero1_ssa(&a.mid)
+ want := Z1{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [1]byte{0}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ if a != want {
+ t.Errorf("zero1 got=%v, want %v\n", a, want)
+ }
+}
+
+type Z2 struct {
+ pre [8]byte
+ mid [2]byte
+ post [8]byte
+}
+
+//go:noinline
+func zero2_ssa(x *[2]byte) {
+ *x = [2]byte{}
+}
+func testZero2(t *testing.T) {
+ a := Z2{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [2]byte{255, 255}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ zero2_ssa(&a.mid)
+ want := Z2{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [2]byte{0, 0}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ if a != want {
+ t.Errorf("zero2 got=%v, want %v\n", a, want)
+ }
+}
+
+type Z3 struct {
+ pre [8]byte
+ mid [3]byte
+ post [8]byte
+}
+
+//go:noinline
+func zero3_ssa(x *[3]byte) {
+ *x = [3]byte{}
+}
+func testZero3(t *testing.T) {
+ a := Z3{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [3]byte{255, 255, 255}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ zero3_ssa(&a.mid)
+ want := Z3{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [3]byte{0, 0, 0}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ if a != want {
+ t.Errorf("zero3 got=%v, want %v\n", a, want)
+ }
+}
+
+type Z4 struct {
+ pre [8]byte
+ mid [4]byte
+ post [8]byte
+}
+
+//go:noinline
+func zero4_ssa(x *[4]byte) {
+ *x = [4]byte{}
+}
+func testZero4(t *testing.T) {
+ a := Z4{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [4]byte{255, 255, 255, 255}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ zero4_ssa(&a.mid)
+ want := Z4{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [4]byte{0, 0, 0, 0}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ if a != want {
+ t.Errorf("zero4 got=%v, want %v\n", a, want)
+ }
+}
+
+type Z5 struct {
+ pre [8]byte
+ mid [5]byte
+ post [8]byte
+}
+
+//go:noinline
+func zero5_ssa(x *[5]byte) {
+ *x = [5]byte{}
+}
+func testZero5(t *testing.T) {
+ a := Z5{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [5]byte{255, 255, 255, 255, 255}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ zero5_ssa(&a.mid)
+ want := Z5{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [5]byte{0, 0, 0, 0, 0}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ if a != want {
+ t.Errorf("zero5 got=%v, want %v\n", a, want)
+ }
+}
+
+type Z6 struct {
+ pre [8]byte
+ mid [6]byte
+ post [8]byte
+}
+
+//go:noinline
+func zero6_ssa(x *[6]byte) {
+ *x = [6]byte{}
+}
+func testZero6(t *testing.T) {
+ a := Z6{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [6]byte{255, 255, 255, 255, 255, 255}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ zero6_ssa(&a.mid)
+ want := Z6{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [6]byte{0, 0, 0, 0, 0, 0}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ if a != want {
+ t.Errorf("zero6 got=%v, want %v\n", a, want)
+ }
+}
+
+type Z7 struct {
+ pre [8]byte
+ mid [7]byte
+ post [8]byte
+}
+
+//go:noinline
+func zero7_ssa(x *[7]byte) {
+ *x = [7]byte{}
+}
+func testZero7(t *testing.T) {
+ a := Z7{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [7]byte{255, 255, 255, 255, 255, 255, 255}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ zero7_ssa(&a.mid)
+ want := Z7{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [7]byte{0, 0, 0, 0, 0, 0, 0}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ if a != want {
+ t.Errorf("zero7 got=%v, want %v\n", a, want)
+ }
+}
+
+type Z8 struct {
+ pre [8]byte
+ mid [8]byte
+ post [8]byte
+}
+
+//go:noinline
+func zero8_ssa(x *[8]byte) {
+ *x = [8]byte{}
+}
+func testZero8(t *testing.T) {
+ a := Z8{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ zero8_ssa(&a.mid)
+ want := Z8{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [8]byte{0, 0, 0, 0, 0, 0, 0, 0}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ if a != want {
+ t.Errorf("zero8 got=%v, want %v\n", a, want)
+ }
+}
+
+type Z9 struct {
+ pre [8]byte
+ mid [9]byte
+ post [8]byte
+}
+
+//go:noinline
+func zero9_ssa(x *[9]byte) {
+ *x = [9]byte{}
+}
+func testZero9(t *testing.T) {
+ a := Z9{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [9]byte{255, 255, 255, 255, 255, 255, 255, 255, 255}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ zero9_ssa(&a.mid)
+ want := Z9{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [9]byte{0, 0, 0, 0, 0, 0, 0, 0, 0}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ if a != want {
+ t.Errorf("zero9 got=%v, want %v\n", a, want)
+ }
+}
+
+type Z10 struct {
+ pre [8]byte
+ mid [10]byte
+ post [8]byte
+}
+
+//go:noinline
+func zero10_ssa(x *[10]byte) {
+ *x = [10]byte{}
+}
+func testZero10(t *testing.T) {
+ a := Z10{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [10]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ zero10_ssa(&a.mid)
+ want := Z10{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [10]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ if a != want {
+ t.Errorf("zero10 got=%v, want %v\n", a, want)
+ }
+}
+
+type Z15 struct {
+ pre [8]byte
+ mid [15]byte
+ post [8]byte
+}
+
+//go:noinline
+func zero15_ssa(x *[15]byte) {
+ *x = [15]byte{}
+}
+func testZero15(t *testing.T) {
+ a := Z15{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [15]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ zero15_ssa(&a.mid)
+ want := Z15{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [15]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ if a != want {
+ t.Errorf("zero15 got=%v, want %v\n", a, want)
+ }
+}
+
+type Z16 struct {
+ pre [8]byte
+ mid [16]byte
+ post [8]byte
+}
+
+//go:noinline
+func zero16_ssa(x *[16]byte) {
+ *x = [16]byte{}
+}
+func testZero16(t *testing.T) {
+ a := Z16{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [16]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ zero16_ssa(&a.mid)
+ want := Z16{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [16]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ if a != want {
+ t.Errorf("zero16 got=%v, want %v\n", a, want)
+ }
+}
+
+type Z17 struct {
+ pre [8]byte
+ mid [17]byte
+ post [8]byte
+}
+
+//go:noinline
+func zero17_ssa(x *[17]byte) {
+ *x = [17]byte{}
+}
+func testZero17(t *testing.T) {
+ a := Z17{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [17]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ zero17_ssa(&a.mid)
+ want := Z17{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [17]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ if a != want {
+ t.Errorf("zero17 got=%v, want %v\n", a, want)
+ }
+}
+
+type Z23 struct {
+ pre [8]byte
+ mid [23]byte
+ post [8]byte
+}
+
+//go:noinline
+func zero23_ssa(x *[23]byte) {
+ *x = [23]byte{}
+}
+func testZero23(t *testing.T) {
+ a := Z23{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [23]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ zero23_ssa(&a.mid)
+ want := Z23{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [23]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ if a != want {
+ t.Errorf("zero23 got=%v, want %v\n", a, want)
+ }
+}
+
+type Z24 struct {
+ pre [8]byte
+ mid [24]byte
+ post [8]byte
+}
+
+//go:noinline
+func zero24_ssa(x *[24]byte) {
+ *x = [24]byte{}
+}
+func testZero24(t *testing.T) {
+ a := Z24{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [24]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ zero24_ssa(&a.mid)
+ want := Z24{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [24]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ if a != want {
+ t.Errorf("zero24 got=%v, want %v\n", a, want)
+ }
+}
+
+type Z25 struct {
+ pre [8]byte
+ mid [25]byte
+ post [8]byte
+}
+
+//go:noinline
+func zero25_ssa(x *[25]byte) {
+ *x = [25]byte{}
+}
+func testZero25(t *testing.T) {
+ a := Z25{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [25]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ zero25_ssa(&a.mid)
+ want := Z25{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [25]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ if a != want {
+ t.Errorf("zero25 got=%v, want %v\n", a, want)
+ }
+}
+
+type Z31 struct {
+ pre [8]byte
+ mid [31]byte
+ post [8]byte
+}
+
+//go:noinline
+func zero31_ssa(x *[31]byte) {
+ *x = [31]byte{}
+}
+func testZero31(t *testing.T) {
+ a := Z31{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [31]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ zero31_ssa(&a.mid)
+ want := Z31{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [31]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ if a != want {
+ t.Errorf("zero31 got=%v, want %v\n", a, want)
+ }
+}
+
+type Z32 struct {
+ pre [8]byte
+ mid [32]byte
+ post [8]byte
+}
+
+//go:noinline
+func zero32_ssa(x *[32]byte) {
+ *x = [32]byte{}
+}
+func testZero32(t *testing.T) {
+ a := Z32{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [32]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ zero32_ssa(&a.mid)
+ want := Z32{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [32]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ if a != want {
+ t.Errorf("zero32 got=%v, want %v\n", a, want)
+ }
+}
+
+type Z33 struct {
+ pre [8]byte
+ mid [33]byte
+ post [8]byte
+}
+
+//go:noinline
+func zero33_ssa(x *[33]byte) {
+ *x = [33]byte{}
+}
+func testZero33(t *testing.T) {
+ a := Z33{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [33]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ zero33_ssa(&a.mid)
+ want := Z33{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [33]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ if a != want {
+ t.Errorf("zero33 got=%v, want %v\n", a, want)
+ }
+}
+
+type Z63 struct {
+ pre [8]byte
+ mid [63]byte
+ post [8]byte
+}
+
+//go:noinline
+func zero63_ssa(x *[63]byte) {
+ *x = [63]byte{}
+}
+func testZero63(t *testing.T) {
+ a := Z63{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [63]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ zero63_ssa(&a.mid)
+ want := Z63{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [63]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ if a != want {
+ t.Errorf("zero63 got=%v, want %v\n", a, want)
+ }
+}
+
+type Z64 struct {
+ pre [8]byte
+ mid [64]byte
+ post [8]byte
+}
+
+//go:noinline
+func zero64_ssa(x *[64]byte) {
+ *x = [64]byte{}
+}
+func testZero64(t *testing.T) {
+ a := Z64{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [64]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ zero64_ssa(&a.mid)
+ want := Z64{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [64]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ if a != want {
+ t.Errorf("zero64 got=%v, want %v\n", a, want)
+ }
+}
+
+type Z65 struct {
+ pre [8]byte
+ mid [65]byte
+ post [8]byte
+}
+
+//go:noinline
+func zero65_ssa(x *[65]byte) {
+ *x = [65]byte{}
+}
+func testZero65(t *testing.T) {
+ a := Z65{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [65]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ zero65_ssa(&a.mid)
+ want := Z65{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [65]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ if a != want {
+ t.Errorf("zero65 got=%v, want %v\n", a, want)
+ }
+}
+
+type Z1023 struct {
+ pre [8]byte
+ mid [1023]byte
+ post [8]byte
+}
+
+//go:noinline
+func zero1023_ssa(x *[1023]byte) {
+ *x = [1023]byte{}
+}
+func testZero1023(t *testing.T) {
+ a := Z1023{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [1023]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ zero1023_ssa(&a.mid)
+ want := Z1023{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [1023]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ if a != want {
+ t.Errorf("zero1023 got=%v, want %v\n", a, want)
+ }
+}
+
+type Z1024 struct {
+ pre [8]byte
+ mid [1024]byte
+ post [8]byte
+}
+
+//go:noinline
+func zero1024_ssa(x *[1024]byte) {
+ *x = [1024]byte{}
+}
+func testZero1024(t *testing.T) {
+ a := Z1024{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [1024]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ zero1024_ssa(&a.mid)
+ want := Z1024{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [1024]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ if a != want {
+ t.Errorf("zero1024 got=%v, want %v\n", a, want)
+ }
+}
+
+type Z1025 struct {
+ pre [8]byte
+ mid [1025]byte
+ post [8]byte
+}
+
+//go:noinline
+func zero1025_ssa(x *[1025]byte) {
+ *x = [1025]byte{}
+}
+func testZero1025(t *testing.T) {
+ a := Z1025{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [1025]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ zero1025_ssa(&a.mid)
+ want := Z1025{[8]byte{255, 255, 255, 255, 255, 255, 255, 255}, [1025]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ if a != want {
+ t.Errorf("zero1025 got=%v, want %v\n", a, want)
+ }
+}
+
+type Z8u1 struct {
+ b bool
+ val [8]byte
+}
+type Z8u2 struct {
+ i uint16
+ val [8]byte
+}
+
+//go:noinline
+func zero8u1_ssa(t *Z8u1) {
+ t.val = [8]byte{}
+}
+
+//go:noinline
+func zero8u2_ssa(t *Z8u2) {
+ t.val = [8]byte{}
+}
+func testZero8u(t *testing.T) {
+ a := Z8u1{false, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ zero8u1_ssa(&a)
+ want := Z8u1{false, [8]byte{0, 0, 0, 0, 0, 0, 0, 0}}
+ if a != want {
+ t.Errorf("zero8u2 got=%v, want %v\n", a, want)
+ }
+ b := Z8u2{15, [8]byte{255, 255, 255, 255, 255, 255, 255, 255}}
+ zero8u2_ssa(&b)
+ wantb := Z8u2{15, [8]byte{0, 0, 0, 0, 0, 0, 0, 0}}
+ if b != wantb {
+ t.Errorf("zero8u2 got=%v, want %v\n", b, wantb)
+ }
+}
+
+type Z16u1 struct {
+ b bool
+ val [16]byte
+}
+type Z16u2 struct {
+ i uint16
+ val [16]byte
+}
+
+//go:noinline
+func zero16u1_ssa(t *Z16u1) {
+ t.val = [16]byte{}
+}
+
+//go:noinline
+func zero16u2_ssa(t *Z16u2) {
+ t.val = [16]byte{}
+}
+func testZero16u(t *testing.T) {
+ a := Z16u1{false, [16]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}}
+ zero16u1_ssa(&a)
+ want := Z16u1{false, [16]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}
+ if a != want {
+ t.Errorf("zero16u2 got=%v, want %v\n", a, want)
+ }
+ b := Z16u2{15, [16]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}}
+ zero16u2_ssa(&b)
+ wantb := Z16u2{15, [16]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}
+ if b != wantb {
+ t.Errorf("zero16u2 got=%v, want %v\n", b, wantb)
+ }
+}
+
+type Z24u1 struct {
+ b bool
+ val [24]byte
+}
+type Z24u2 struct {
+ i uint16
+ val [24]byte
+}
+
+//go:noinline
+func zero24u1_ssa(t *Z24u1) {
+ t.val = [24]byte{}
+}
+
+//go:noinline
+func zero24u2_ssa(t *Z24u2) {
+ t.val = [24]byte{}
+}
+func testZero24u(t *testing.T) {
+ a := Z24u1{false, [24]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}}
+ zero24u1_ssa(&a)
+ want := Z24u1{false, [24]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}
+ if a != want {
+ t.Errorf("zero24u2 got=%v, want %v\n", a, want)
+ }
+ b := Z24u2{15, [24]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}}
+ zero24u2_ssa(&b)
+ wantb := Z24u2{15, [24]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}
+ if b != wantb {
+ t.Errorf("zero24u2 got=%v, want %v\n", b, wantb)
+ }
+}
+
+type Z32u1 struct {
+ b bool
+ val [32]byte
+}
+type Z32u2 struct {
+ i uint16
+ val [32]byte
+}
+
+//go:noinline
+func zero32u1_ssa(t *Z32u1) {
+ t.val = [32]byte{}
+}
+
+//go:noinline
+func zero32u2_ssa(t *Z32u2) {
+ t.val = [32]byte{}
+}
+func testZero32u(t *testing.T) {
+ a := Z32u1{false, [32]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}}
+ zero32u1_ssa(&a)
+ want := Z32u1{false, [32]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}
+ if a != want {
+ t.Errorf("zero32u2 got=%v, want %v\n", a, want)
+ }
+ b := Z32u2{15, [32]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}}
+ zero32u2_ssa(&b)
+ wantb := Z32u2{15, [32]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}
+ if b != wantb {
+ t.Errorf("zero32u2 got=%v, want %v\n", b, wantb)
+ }
+}
+
+type Z64u1 struct {
+ b bool
+ val [64]byte
+}
+type Z64u2 struct {
+ i uint16
+ val [64]byte
+}
+
+//go:noinline
+func zero64u1_ssa(t *Z64u1) {
+ t.val = [64]byte{}
+}
+
+//go:noinline
+func zero64u2_ssa(t *Z64u2) {
+ t.val = [64]byte{}
+}
+func testZero64u(t *testing.T) {
+ a := Z64u1{false, [64]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}}
+ zero64u1_ssa(&a)
+ want := Z64u1{false, [64]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}
+ if a != want {
+ t.Errorf("zero64u2 got=%v, want %v\n", a, want)
+ }
+ b := Z64u2{15, [64]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}}
+ zero64u2_ssa(&b)
+ wantb := Z64u2{15, [64]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}
+ if b != wantb {
+ t.Errorf("zero64u2 got=%v, want %v\n", b, wantb)
+ }
+}
+
+type Z256u1 struct {
+ b bool
+ val [256]byte
+}
+type Z256u2 struct {
+ i uint16
+ val [256]byte
+}
+
+//go:noinline
+func zero256u1_ssa(t *Z256u1) {
+ t.val = [256]byte{}
+}
+
+//go:noinline
+func zero256u2_ssa(t *Z256u2) {
+ t.val = [256]byte{}
+}
+func testZero256u(t *testing.T) {
+ a := Z256u1{false, [256]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}}
+ zero256u1_ssa(&a)
+ want := Z256u1{false, [256]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}
+ if a != want {
+ t.Errorf("zero256u2 got=%v, want %v\n", a, want)
+ }
+ b := Z256u2{15, [256]byte{255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255, 255}}
+ zero256u2_ssa(&b)
+ wantb := Z256u2{15, [256]byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}}
+ if b != wantb {
+ t.Errorf("zero256u2 got=%v, want %v\n", b, wantb)
+ }
+}
+func TestZero(t *testing.T) {
+ testZero1(t)
+ testZero2(t)
+ testZero3(t)
+ testZero4(t)
+ testZero5(t)
+ testZero6(t)
+ testZero7(t)
+ testZero8(t)
+ testZero9(t)
+ testZero10(t)
+ testZero15(t)
+ testZero16(t)
+ testZero17(t)
+ testZero23(t)
+ testZero24(t)
+ testZero25(t)
+ testZero31(t)
+ testZero32(t)
+ testZero33(t)
+ testZero63(t)
+ testZero64(t)
+ testZero65(t)
+ testZero1023(t)
+ testZero1024(t)
+ testZero1025(t)
+ testZero8u(t)
+ testZero16u(t)
+ testZero24u(t)
+ testZero32u(t)
+ testZero64u(t)
+ testZero256u(t)
+}
diff --git a/src/cmd/compile/internal/test/truncconst_test.go b/src/cmd/compile/internal/test/truncconst_test.go
new file mode 100644
index 0000000..7705042
--- /dev/null
+++ b/src/cmd/compile/internal/test/truncconst_test.go
@@ -0,0 +1,63 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package test
+
+import "testing"
+
+var f52want float64 = 1.0 / (1 << 52)
+var f53want float64 = 1.0 / (1 << 53)
+
+func TestTruncFlt(t *testing.T) {
+ const f52 = 1 + 1.0/(1<<52)
+ const f53 = 1 + 1.0/(1<<53)
+
+ if got := f52 - 1; got != f52want {
+ t.Errorf("f52-1 = %g, want %g", got, f52want)
+ }
+ if got := float64(f52) - 1; got != f52want {
+ t.Errorf("float64(f52)-1 = %g, want %g", got, f52want)
+ }
+ if got := f53 - 1; got != f53want {
+ t.Errorf("f53-1 = %g, want %g", got, f53want)
+ }
+ if got := float64(f53) - 1; got != 0 {
+ t.Errorf("float64(f53)-1 = %g, want 0", got)
+ }
+}
+
+func TestTruncCmplx(t *testing.T) {
+ const r52 = complex(1+1.0/(1<<52), 0)
+ const r53 = complex(1+1.0/(1<<53), 0)
+
+ if got := real(r52 - 1); got != f52want {
+ t.Errorf("real(r52-1) = %g, want %g", got, f52want)
+ }
+ if got := real(complex128(r52) - 1); got != f52want {
+ t.Errorf("real(complex128(r52)-1) = %g, want %g", got, f52want)
+ }
+ if got := real(r53 - 1); got != f53want {
+ t.Errorf("real(r53-1) = %g, want %g", got, f53want)
+ }
+ if got := real(complex128(r53) - 1); got != 0 {
+ t.Errorf("real(complex128(r53)-1) = %g, want 0", got)
+ }
+
+ const i52 = complex(0, 1+1.0/(1<<52))
+ const i53 = complex(0, 1+1.0/(1<<53))
+
+ if got := imag(i52 - 1i); got != f52want {
+ t.Errorf("imag(i52-1i) = %g, want %g", got, f52want)
+ }
+ if got := imag(complex128(i52) - 1i); got != f52want {
+ t.Errorf("imag(complex128(i52)-1i) = %g, want %g", got, f52want)
+ }
+ if got := imag(i53 - 1i); got != f53want {
+ t.Errorf("imag(i53-1i) = %g, want %g", got, f53want)
+ }
+ if got := imag(complex128(i53) - 1i); got != 0 {
+ t.Errorf("imag(complex128(i53)-1i) = %g, want 0", got)
+ }
+
+}
diff --git a/src/cmd/compile/internal/test/zerorange_test.go b/src/cmd/compile/internal/test/zerorange_test.go
new file mode 100644
index 0000000..ec87136
--- /dev/null
+++ b/src/cmd/compile/internal/test/zerorange_test.go
@@ -0,0 +1,185 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package test
+
+import (
+ "testing"
+)
+
+var glob = 3
+var globp *int64
+
+// Testing compilation of arch.ZeroRange of various sizes.
+
+// By storing a pointer to an int64 output param in a global, the compiler must
+// ensure that output param is allocated on the heap. Also, since there is a
+// defer, the pointer to each output param must be zeroed in the prologue (see
+// plive.go:epilogue()). So, we will get a block of one or more stack slots that
+// need to be zeroed. Hence, we are testing compilation completes successfully when
+// zerorange calls of various sizes (8-136 bytes) are generated. We are not
+// testing runtime correctness (which is hard to do for the current uses of
+// ZeroRange).
+
+func TestZeroRange(t *testing.T) {
+ testZeroRange8(t)
+ testZeroRange16(t)
+ testZeroRange32(t)
+ testZeroRange64(t)
+ testZeroRange136(t)
+}
+
+func testZeroRange8(t *testing.T) (r int64) {
+ defer func() {
+ glob = 4
+ }()
+ globp = &r
+ return
+}
+
+func testZeroRange16(t *testing.T) (r, s int64) {
+ defer func() {
+ glob = 4
+ }()
+ globp = &r
+ globp = &s
+ return
+}
+
+func testZeroRange32(t *testing.T) (r, s, t2, u int64) {
+ defer func() {
+ glob = 4
+ }()
+ globp = &r
+ globp = &s
+ globp = &t2
+ globp = &u
+ return
+}
+
+func testZeroRange64(t *testing.T) (r, s, t2, u, v, w, x, y int64) {
+ defer func() {
+ glob = 4
+ }()
+ globp = &r
+ globp = &s
+ globp = &t2
+ globp = &u
+ globp = &v
+ globp = &w
+ globp = &x
+ globp = &y
+ return
+}
+
+func testZeroRange136(t *testing.T) (r, s, t2, u, v, w, x, y, r1, s1, t1, u1, v1, w1, x1, y1, z1 int64) {
+ defer func() {
+ glob = 4
+ }()
+ globp = &r
+ globp = &s
+ globp = &t2
+ globp = &u
+ globp = &v
+ globp = &w
+ globp = &x
+ globp = &y
+ globp = &r1
+ globp = &s1
+ globp = &t1
+ globp = &u1
+ globp = &v1
+ globp = &w1
+ globp = &x1
+ globp = &y1
+ globp = &z1
+ return
+}
+
+type S struct {
+ x [2]uint64
+ p *uint64
+ y [2]uint64
+ q uint64
+}
+
+type M struct {
+ x [8]uint64
+ p *uint64
+ y [8]uint64
+ q uint64
+}
+
+type L struct {
+ x [4096]uint64
+ p *uint64
+ y [4096]uint64
+ q uint64
+}
+
+//go:noinline
+func triggerZerorangeLarge(f, g, h uint64) (rv0 uint64) {
+ ll := L{p: &f}
+ da := f
+ rv0 = f + g + h
+ defer func(dl L, i uint64) {
+ rv0 += dl.q + i
+ }(ll, da)
+ return rv0
+}
+
+//go:noinline
+func triggerZerorangeMedium(f, g, h uint64) (rv0 uint64) {
+ ll := M{p: &f}
+ rv0 = f + g + h
+ defer func(dm M, i uint64) {
+ rv0 += dm.q + i
+ }(ll, f)
+ return rv0
+}
+
+//go:noinline
+func triggerZerorangeSmall(f, g, h uint64) (rv0 uint64) {
+ ll := S{p: &f}
+ rv0 = f + g + h
+ defer func(ds S, i uint64) {
+ rv0 += ds.q + i
+ }(ll, f)
+ return rv0
+}
+
+// This test was created as a follow up to issue #45372, to help
+// improve coverage of the compiler's arch-specific "zerorange"
+// function, which is invoked to zero out ambiguously live portions of
+// the stack frame in certain specific circumstances.
+//
+// In the current compiler implementation, for zerorange to be
+// invoked, we need to have an ambiguously live variable that needs
+// zeroing. One way to trigger this is to have a function with an
+// open-coded defer, where the opendefer function has an argument that
+// contains a pointer (this is what's used below).
+//
+// At the moment this test doesn't do any specific checking for
+// code sequence, or verification that things were properly set to zero,
+// this seems as though it would be too tricky and would result
+// in a "brittle" test.
+//
+// The small/medium/large scenarios below are inspired by the amd64
+// implementation of zerorange, which generates different code
+// depending on the size of the thing that needs to be zeroed out
+// (I've verified at the time of the writing of this test that it
+// exercises the various cases).
+//
+func TestZerorange45372(t *testing.T) {
+ if r := triggerZerorangeLarge(101, 303, 505); r != 1010 {
+ t.Errorf("large: wanted %d got %d", 1010, r)
+ }
+ if r := triggerZerorangeMedium(101, 303, 505); r != 1010 {
+ t.Errorf("medium: wanted %d got %d", 1010, r)
+ }
+ if r := triggerZerorangeSmall(101, 303, 505); r != 1010 {
+ t.Errorf("small: wanted %d got %d", 1010, r)
+ }
+
+}
diff --git a/src/cmd/compile/internal/typebits/typebits.go b/src/cmd/compile/internal/typebits/typebits.go
new file mode 100644
index 0000000..fddad6e
--- /dev/null
+++ b/src/cmd/compile/internal/typebits/typebits.go
@@ -0,0 +1,87 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typebits
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/bitvec"
+ "cmd/compile/internal/types"
+)
+
+// NOTE: The bitmap for a specific type t could be cached in t after
+// the first run and then simply copied into bv at the correct offset
+// on future calls with the same type t.
+func Set(t *types.Type, off int64, bv bitvec.BitVec) {
+ if uint8(t.Alignment()) > 0 && off&int64(uint8(t.Alignment())-1) != 0 {
+ base.Fatalf("typebits.Set: invalid initial alignment: type %v has alignment %d, but offset is %v", t, uint8(t.Alignment()), off)
+ }
+ if !t.HasPointers() {
+ // Note: this case ensures that pointers to go:notinheap types
+ // are not considered pointers by garbage collection and stack copying.
+ return
+ }
+
+ switch t.Kind() {
+ case types.TPTR, types.TUNSAFEPTR, types.TFUNC, types.TCHAN, types.TMAP:
+ if off&int64(types.PtrSize-1) != 0 {
+ base.Fatalf("typebits.Set: invalid alignment, %v", t)
+ }
+ bv.Set(int32(off / int64(types.PtrSize))) // pointer
+
+ case types.TSTRING:
+ // struct { byte *str; intgo len; }
+ if off&int64(types.PtrSize-1) != 0 {
+ base.Fatalf("typebits.Set: invalid alignment, %v", t)
+ }
+ bv.Set(int32(off / int64(types.PtrSize))) //pointer in first slot
+
+ case types.TINTER:
+ // struct { Itab *tab; void *data; }
+ // or, when isnilinter(t)==true:
+ // struct { Type *type; void *data; }
+ if off&int64(types.PtrSize-1) != 0 {
+ base.Fatalf("typebits.Set: invalid alignment, %v", t)
+ }
+ // The first word of an interface is a pointer, but we don't
+ // treat it as such.
+ // 1. If it is a non-empty interface, the pointer points to an itab
+ // which is always in persistentalloc space.
+ // 2. If it is an empty interface, the pointer points to a _type.
+ // a. If it is a compile-time-allocated type, it points into
+ // the read-only data section.
+ // b. If it is a reflect-allocated type, it points into the Go heap.
+ // Reflect is responsible for keeping a reference to
+ // the underlying type so it won't be GCd.
+ // If we ever have a moving GC, we need to change this for 2b (as
+ // well as scan itabs to update their itab._type fields).
+ bv.Set(int32(off/int64(types.PtrSize) + 1)) // pointer in second slot
+
+ case types.TSLICE:
+ // struct { byte *array; uintgo len; uintgo cap; }
+ if off&int64(types.PtrSize-1) != 0 {
+ base.Fatalf("typebits.Set: invalid TARRAY alignment, %v", t)
+ }
+ bv.Set(int32(off / int64(types.PtrSize))) // pointer in first slot (BitsPointer)
+
+ case types.TARRAY:
+ elt := t.Elem()
+ if elt.Size() == 0 {
+ // Short-circuit for #20739.
+ break
+ }
+ for i := int64(0); i < t.NumElem(); i++ {
+ Set(elt, off, bv)
+ off += elt.Size()
+ }
+
+ case types.TSTRUCT:
+ for _, f := range t.Fields().Slice() {
+ Set(f.Type, off+f.Offset, bv)
+ }
+
+ default:
+ base.Fatalf("typebits.Set: unexpected type, %v", t)
+ }
+}
diff --git a/src/cmd/compile/internal/typecheck/bexport.go b/src/cmd/compile/internal/typecheck/bexport.go
new file mode 100644
index 0000000..352f7a9
--- /dev/null
+++ b/src/cmd/compile/internal/typecheck/bexport.go
@@ -0,0 +1,108 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typecheck
+
+import "cmd/compile/internal/types"
+
+// ----------------------------------------------------------------------------
+// Export format
+
+// Tags. Must be < 0.
+const (
+ // Objects
+ packageTag = -(iota + 1)
+ constTag
+ typeTag
+ varTag
+ funcTag
+ endTag
+
+ // Types
+ namedTag
+ arrayTag
+ sliceTag
+ dddTag
+ structTag
+ pointerTag
+ signatureTag
+ interfaceTag
+ mapTag
+ chanTag
+
+ // Values
+ falseTag
+ trueTag
+ int64Tag
+ floatTag
+ fractionTag // not used by gc
+ complexTag
+ stringTag
+ nilTag
+ unknownTag // not used by gc (only appears in packages with errors)
+
+ // Type aliases
+ aliasTag
+)
+
+var predecl []*types.Type // initialized lazily
+
+func predeclared() []*types.Type {
+ if predecl == nil {
+ // initialize lazily to be sure that all
+ // elements have been initialized before
+ predecl = []*types.Type{
+ // basic types
+ types.Types[types.TBOOL],
+ types.Types[types.TINT],
+ types.Types[types.TINT8],
+ types.Types[types.TINT16],
+ types.Types[types.TINT32],
+ types.Types[types.TINT64],
+ types.Types[types.TUINT],
+ types.Types[types.TUINT8],
+ types.Types[types.TUINT16],
+ types.Types[types.TUINT32],
+ types.Types[types.TUINT64],
+ types.Types[types.TUINTPTR],
+ types.Types[types.TFLOAT32],
+ types.Types[types.TFLOAT64],
+ types.Types[types.TCOMPLEX64],
+ types.Types[types.TCOMPLEX128],
+ types.Types[types.TSTRING],
+
+ // basic type aliases
+ types.ByteType,
+ types.RuneType,
+
+ // error
+ types.ErrorType,
+
+ // untyped types
+ types.UntypedBool,
+ types.UntypedInt,
+ types.UntypedRune,
+ types.UntypedFloat,
+ types.UntypedComplex,
+ types.UntypedString,
+ types.Types[types.TNIL],
+
+ // package unsafe
+ types.Types[types.TUNSAFEPTR],
+
+ // invalid type (package contains errors)
+ types.Types[types.Txxx],
+
+ // any type, for builtin export data
+ types.Types[types.TANY],
+
+ // comparable
+ types.ComparableType,
+
+ // any
+ types.AnyType,
+ }
+ }
+ return predecl
+}
diff --git a/src/cmd/compile/internal/typecheck/builtin.go b/src/cmd/compile/internal/typecheck/builtin.go
new file mode 100644
index 0000000..67597ce
--- /dev/null
+++ b/src/cmd/compile/internal/typecheck/builtin.go
@@ -0,0 +1,380 @@
+// Code generated by mkbuiltin.go. DO NOT EDIT.
+
+package typecheck
+
+import (
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+)
+
+var runtimeDecls = [...]struct {
+ name string
+ tag int
+ typ int
+}{
+ {"newobject", funcTag, 4},
+ {"mallocgc", funcTag, 8},
+ {"panicdivide", funcTag, 9},
+ {"panicshift", funcTag, 9},
+ {"panicmakeslicelen", funcTag, 9},
+ {"panicmakeslicecap", funcTag, 9},
+ {"throwinit", funcTag, 9},
+ {"panicwrap", funcTag, 9},
+ {"gopanic", funcTag, 11},
+ {"gorecover", funcTag, 14},
+ {"goschedguarded", funcTag, 9},
+ {"goPanicIndex", funcTag, 16},
+ {"goPanicIndexU", funcTag, 18},
+ {"goPanicSliceAlen", funcTag, 16},
+ {"goPanicSliceAlenU", funcTag, 18},
+ {"goPanicSliceAcap", funcTag, 16},
+ {"goPanicSliceAcapU", funcTag, 18},
+ {"goPanicSliceB", funcTag, 16},
+ {"goPanicSliceBU", funcTag, 18},
+ {"goPanicSlice3Alen", funcTag, 16},
+ {"goPanicSlice3AlenU", funcTag, 18},
+ {"goPanicSlice3Acap", funcTag, 16},
+ {"goPanicSlice3AcapU", funcTag, 18},
+ {"goPanicSlice3B", funcTag, 16},
+ {"goPanicSlice3BU", funcTag, 18},
+ {"goPanicSlice3C", funcTag, 16},
+ {"goPanicSlice3CU", funcTag, 18},
+ {"goPanicSliceConvert", funcTag, 16},
+ {"printbool", funcTag, 19},
+ {"printfloat", funcTag, 21},
+ {"printint", funcTag, 23},
+ {"printhex", funcTag, 25},
+ {"printuint", funcTag, 25},
+ {"printcomplex", funcTag, 27},
+ {"printstring", funcTag, 29},
+ {"printpointer", funcTag, 30},
+ {"printuintptr", funcTag, 31},
+ {"printiface", funcTag, 30},
+ {"printeface", funcTag, 30},
+ {"printslice", funcTag, 30},
+ {"printnl", funcTag, 9},
+ {"printsp", funcTag, 9},
+ {"printlock", funcTag, 9},
+ {"printunlock", funcTag, 9},
+ {"concatstring2", funcTag, 34},
+ {"concatstring3", funcTag, 35},
+ {"concatstring4", funcTag, 36},
+ {"concatstring5", funcTag, 37},
+ {"concatstrings", funcTag, 39},
+ {"cmpstring", funcTag, 40},
+ {"intstring", funcTag, 43},
+ {"slicebytetostring", funcTag, 44},
+ {"slicebytetostringtmp", funcTag, 45},
+ {"slicerunetostring", funcTag, 48},
+ {"stringtoslicebyte", funcTag, 50},
+ {"stringtoslicerune", funcTag, 53},
+ {"slicecopy", funcTag, 54},
+ {"decoderune", funcTag, 55},
+ {"countrunes", funcTag, 56},
+ {"convI2I", funcTag, 58},
+ {"convT", funcTag, 59},
+ {"convTnoptr", funcTag, 59},
+ {"convT16", funcTag, 61},
+ {"convT32", funcTag, 63},
+ {"convT64", funcTag, 64},
+ {"convTstring", funcTag, 65},
+ {"convTslice", funcTag, 68},
+ {"assertE2I", funcTag, 69},
+ {"assertE2I2", funcTag, 70},
+ {"assertI2I", funcTag, 69},
+ {"assertI2I2", funcTag, 70},
+ {"panicdottypeE", funcTag, 71},
+ {"panicdottypeI", funcTag, 71},
+ {"panicnildottype", funcTag, 72},
+ {"ifaceeq", funcTag, 73},
+ {"efaceeq", funcTag, 73},
+ {"fastrand", funcTag, 74},
+ {"makemap64", funcTag, 76},
+ {"makemap", funcTag, 77},
+ {"makemap_small", funcTag, 78},
+ {"mapaccess1", funcTag, 79},
+ {"mapaccess1_fast32", funcTag, 80},
+ {"mapaccess1_fast64", funcTag, 81},
+ {"mapaccess1_faststr", funcTag, 82},
+ {"mapaccess1_fat", funcTag, 83},
+ {"mapaccess2", funcTag, 84},
+ {"mapaccess2_fast32", funcTag, 85},
+ {"mapaccess2_fast64", funcTag, 86},
+ {"mapaccess2_faststr", funcTag, 87},
+ {"mapaccess2_fat", funcTag, 88},
+ {"mapassign", funcTag, 79},
+ {"mapassign_fast32", funcTag, 80},
+ {"mapassign_fast32ptr", funcTag, 89},
+ {"mapassign_fast64", funcTag, 81},
+ {"mapassign_fast64ptr", funcTag, 89},
+ {"mapassign_faststr", funcTag, 82},
+ {"mapiterinit", funcTag, 90},
+ {"mapdelete", funcTag, 90},
+ {"mapdelete_fast32", funcTag, 91},
+ {"mapdelete_fast64", funcTag, 92},
+ {"mapdelete_faststr", funcTag, 93},
+ {"mapiternext", funcTag, 94},
+ {"mapclear", funcTag, 95},
+ {"makechan64", funcTag, 97},
+ {"makechan", funcTag, 98},
+ {"chanrecv1", funcTag, 100},
+ {"chanrecv2", funcTag, 101},
+ {"chansend1", funcTag, 103},
+ {"closechan", funcTag, 30},
+ {"writeBarrier", varTag, 105},
+ {"typedmemmove", funcTag, 106},
+ {"typedmemclr", funcTag, 107},
+ {"typedslicecopy", funcTag, 108},
+ {"selectnbsend", funcTag, 109},
+ {"selectnbrecv", funcTag, 110},
+ {"selectsetpc", funcTag, 111},
+ {"selectgo", funcTag, 112},
+ {"block", funcTag, 9},
+ {"makeslice", funcTag, 113},
+ {"makeslice64", funcTag, 114},
+ {"makeslicecopy", funcTag, 115},
+ {"growslice", funcTag, 117},
+ {"unsafeslice", funcTag, 118},
+ {"unsafeslice64", funcTag, 119},
+ {"unsafeslicecheckptr", funcTag, 119},
+ {"memmove", funcTag, 120},
+ {"memclrNoHeapPointers", funcTag, 121},
+ {"memclrHasPointers", funcTag, 121},
+ {"memequal", funcTag, 122},
+ {"memequal0", funcTag, 123},
+ {"memequal8", funcTag, 123},
+ {"memequal16", funcTag, 123},
+ {"memequal32", funcTag, 123},
+ {"memequal64", funcTag, 123},
+ {"memequal128", funcTag, 123},
+ {"f32equal", funcTag, 124},
+ {"f64equal", funcTag, 124},
+ {"c64equal", funcTag, 124},
+ {"c128equal", funcTag, 124},
+ {"strequal", funcTag, 124},
+ {"interequal", funcTag, 124},
+ {"nilinterequal", funcTag, 124},
+ {"memhash", funcTag, 125},
+ {"memhash0", funcTag, 126},
+ {"memhash8", funcTag, 126},
+ {"memhash16", funcTag, 126},
+ {"memhash32", funcTag, 126},
+ {"memhash64", funcTag, 126},
+ {"memhash128", funcTag, 126},
+ {"f32hash", funcTag, 126},
+ {"f64hash", funcTag, 126},
+ {"c64hash", funcTag, 126},
+ {"c128hash", funcTag, 126},
+ {"strhash", funcTag, 126},
+ {"interhash", funcTag, 126},
+ {"nilinterhash", funcTag, 126},
+ {"int64div", funcTag, 127},
+ {"uint64div", funcTag, 128},
+ {"int64mod", funcTag, 127},
+ {"uint64mod", funcTag, 128},
+ {"float64toint64", funcTag, 129},
+ {"float64touint64", funcTag, 130},
+ {"float64touint32", funcTag, 131},
+ {"int64tofloat64", funcTag, 132},
+ {"int64tofloat32", funcTag, 134},
+ {"uint64tofloat64", funcTag, 135},
+ {"uint64tofloat32", funcTag, 136},
+ {"uint32tofloat64", funcTag, 137},
+ {"complex128div", funcTag, 138},
+ {"getcallerpc", funcTag, 139},
+ {"getcallersp", funcTag, 139},
+ {"racefuncenter", funcTag, 31},
+ {"racefuncexit", funcTag, 9},
+ {"raceread", funcTag, 31},
+ {"racewrite", funcTag, 31},
+ {"racereadrange", funcTag, 140},
+ {"racewriterange", funcTag, 140},
+ {"msanread", funcTag, 140},
+ {"msanwrite", funcTag, 140},
+ {"msanmove", funcTag, 141},
+ {"asanread", funcTag, 140},
+ {"asanwrite", funcTag, 140},
+ {"checkptrAlignment", funcTag, 142},
+ {"checkptrArithmetic", funcTag, 144},
+ {"libfuzzerTraceCmp1", funcTag, 145},
+ {"libfuzzerTraceCmp2", funcTag, 146},
+ {"libfuzzerTraceCmp4", funcTag, 147},
+ {"libfuzzerTraceCmp8", funcTag, 148},
+ {"libfuzzerTraceConstCmp1", funcTag, 145},
+ {"libfuzzerTraceConstCmp2", funcTag, 146},
+ {"libfuzzerTraceConstCmp4", funcTag, 147},
+ {"libfuzzerTraceConstCmp8", funcTag, 148},
+ {"x86HasPOPCNT", varTag, 6},
+ {"x86HasSSE41", varTag, 6},
+ {"x86HasFMA", varTag, 6},
+ {"armHasVFPv4", varTag, 6},
+ {"arm64HasATOMICS", varTag, 6},
+}
+
+// Not inlining this function removes a significant chunk of init code.
+//go:noinline
+func newSig(params, results []*types.Field) *types.Type {
+ return types.NewSignature(types.NoPkg, nil, nil, params, results)
+}
+
+func params(tlist ...*types.Type) []*types.Field {
+ flist := make([]*types.Field, len(tlist))
+ for i, typ := range tlist {
+ flist[i] = types.NewField(src.NoXPos, nil, typ)
+ }
+ return flist
+}
+
+func runtimeTypes() []*types.Type {
+ var typs [149]*types.Type
+ typs[0] = types.ByteType
+ typs[1] = types.NewPtr(typs[0])
+ typs[2] = types.Types[types.TANY]
+ typs[3] = types.NewPtr(typs[2])
+ typs[4] = newSig(params(typs[1]), params(typs[3]))
+ typs[5] = types.Types[types.TUINTPTR]
+ typs[6] = types.Types[types.TBOOL]
+ typs[7] = types.Types[types.TUNSAFEPTR]
+ typs[8] = newSig(params(typs[5], typs[1], typs[6]), params(typs[7]))
+ typs[9] = newSig(nil, nil)
+ typs[10] = types.Types[types.TINTER]
+ typs[11] = newSig(params(typs[10]), nil)
+ typs[12] = types.Types[types.TINT32]
+ typs[13] = types.NewPtr(typs[12])
+ typs[14] = newSig(params(typs[13]), params(typs[10]))
+ typs[15] = types.Types[types.TINT]
+ typs[16] = newSig(params(typs[15], typs[15]), nil)
+ typs[17] = types.Types[types.TUINT]
+ typs[18] = newSig(params(typs[17], typs[15]), nil)
+ typs[19] = newSig(params(typs[6]), nil)
+ typs[20] = types.Types[types.TFLOAT64]
+ typs[21] = newSig(params(typs[20]), nil)
+ typs[22] = types.Types[types.TINT64]
+ typs[23] = newSig(params(typs[22]), nil)
+ typs[24] = types.Types[types.TUINT64]
+ typs[25] = newSig(params(typs[24]), nil)
+ typs[26] = types.Types[types.TCOMPLEX128]
+ typs[27] = newSig(params(typs[26]), nil)
+ typs[28] = types.Types[types.TSTRING]
+ typs[29] = newSig(params(typs[28]), nil)
+ typs[30] = newSig(params(typs[2]), nil)
+ typs[31] = newSig(params(typs[5]), nil)
+ typs[32] = types.NewArray(typs[0], 32)
+ typs[33] = types.NewPtr(typs[32])
+ typs[34] = newSig(params(typs[33], typs[28], typs[28]), params(typs[28]))
+ typs[35] = newSig(params(typs[33], typs[28], typs[28], typs[28]), params(typs[28]))
+ typs[36] = newSig(params(typs[33], typs[28], typs[28], typs[28], typs[28]), params(typs[28]))
+ typs[37] = newSig(params(typs[33], typs[28], typs[28], typs[28], typs[28], typs[28]), params(typs[28]))
+ typs[38] = types.NewSlice(typs[28])
+ typs[39] = newSig(params(typs[33], typs[38]), params(typs[28]))
+ typs[40] = newSig(params(typs[28], typs[28]), params(typs[15]))
+ typs[41] = types.NewArray(typs[0], 4)
+ typs[42] = types.NewPtr(typs[41])
+ typs[43] = newSig(params(typs[42], typs[22]), params(typs[28]))
+ typs[44] = newSig(params(typs[33], typs[1], typs[15]), params(typs[28]))
+ typs[45] = newSig(params(typs[1], typs[15]), params(typs[28]))
+ typs[46] = types.RuneType
+ typs[47] = types.NewSlice(typs[46])
+ typs[48] = newSig(params(typs[33], typs[47]), params(typs[28]))
+ typs[49] = types.NewSlice(typs[0])
+ typs[50] = newSig(params(typs[33], typs[28]), params(typs[49]))
+ typs[51] = types.NewArray(typs[46], 32)
+ typs[52] = types.NewPtr(typs[51])
+ typs[53] = newSig(params(typs[52], typs[28]), params(typs[47]))
+ typs[54] = newSig(params(typs[3], typs[15], typs[3], typs[15], typs[5]), params(typs[15]))
+ typs[55] = newSig(params(typs[28], typs[15]), params(typs[46], typs[15]))
+ typs[56] = newSig(params(typs[28]), params(typs[15]))
+ typs[57] = types.NewPtr(typs[5])
+ typs[58] = newSig(params(typs[1], typs[57]), params(typs[57]))
+ typs[59] = newSig(params(typs[1], typs[3]), params(typs[7]))
+ typs[60] = types.Types[types.TUINT16]
+ typs[61] = newSig(params(typs[60]), params(typs[7]))
+ typs[62] = types.Types[types.TUINT32]
+ typs[63] = newSig(params(typs[62]), params(typs[7]))
+ typs[64] = newSig(params(typs[24]), params(typs[7]))
+ typs[65] = newSig(params(typs[28]), params(typs[7]))
+ typs[66] = types.Types[types.TUINT8]
+ typs[67] = types.NewSlice(typs[66])
+ typs[68] = newSig(params(typs[67]), params(typs[7]))
+ typs[69] = newSig(params(typs[1], typs[1]), params(typs[1]))
+ typs[70] = newSig(params(typs[1], typs[2]), params(typs[2]))
+ typs[71] = newSig(params(typs[1], typs[1], typs[1]), nil)
+ typs[72] = newSig(params(typs[1]), nil)
+ typs[73] = newSig(params(typs[57], typs[7], typs[7]), params(typs[6]))
+ typs[74] = newSig(nil, params(typs[62]))
+ typs[75] = types.NewMap(typs[2], typs[2])
+ typs[76] = newSig(params(typs[1], typs[22], typs[3]), params(typs[75]))
+ typs[77] = newSig(params(typs[1], typs[15], typs[3]), params(typs[75]))
+ typs[78] = newSig(nil, params(typs[75]))
+ typs[79] = newSig(params(typs[1], typs[75], typs[3]), params(typs[3]))
+ typs[80] = newSig(params(typs[1], typs[75], typs[62]), params(typs[3]))
+ typs[81] = newSig(params(typs[1], typs[75], typs[24]), params(typs[3]))
+ typs[82] = newSig(params(typs[1], typs[75], typs[28]), params(typs[3]))
+ typs[83] = newSig(params(typs[1], typs[75], typs[3], typs[1]), params(typs[3]))
+ typs[84] = newSig(params(typs[1], typs[75], typs[3]), params(typs[3], typs[6]))
+ typs[85] = newSig(params(typs[1], typs[75], typs[62]), params(typs[3], typs[6]))
+ typs[86] = newSig(params(typs[1], typs[75], typs[24]), params(typs[3], typs[6]))
+ typs[87] = newSig(params(typs[1], typs[75], typs[28]), params(typs[3], typs[6]))
+ typs[88] = newSig(params(typs[1], typs[75], typs[3], typs[1]), params(typs[3], typs[6]))
+ typs[89] = newSig(params(typs[1], typs[75], typs[7]), params(typs[3]))
+ typs[90] = newSig(params(typs[1], typs[75], typs[3]), nil)
+ typs[91] = newSig(params(typs[1], typs[75], typs[62]), nil)
+ typs[92] = newSig(params(typs[1], typs[75], typs[24]), nil)
+ typs[93] = newSig(params(typs[1], typs[75], typs[28]), nil)
+ typs[94] = newSig(params(typs[3]), nil)
+ typs[95] = newSig(params(typs[1], typs[75]), nil)
+ typs[96] = types.NewChan(typs[2], types.Cboth)
+ typs[97] = newSig(params(typs[1], typs[22]), params(typs[96]))
+ typs[98] = newSig(params(typs[1], typs[15]), params(typs[96]))
+ typs[99] = types.NewChan(typs[2], types.Crecv)
+ typs[100] = newSig(params(typs[99], typs[3]), nil)
+ typs[101] = newSig(params(typs[99], typs[3]), params(typs[6]))
+ typs[102] = types.NewChan(typs[2], types.Csend)
+ typs[103] = newSig(params(typs[102], typs[3]), nil)
+ typs[104] = types.NewArray(typs[0], 3)
+ typs[105] = types.NewStruct(types.NoPkg, []*types.Field{types.NewField(src.NoXPos, Lookup("enabled"), typs[6]), types.NewField(src.NoXPos, Lookup("pad"), typs[104]), types.NewField(src.NoXPos, Lookup("needed"), typs[6]), types.NewField(src.NoXPos, Lookup("cgo"), typs[6]), types.NewField(src.NoXPos, Lookup("alignme"), typs[24])})
+ typs[106] = newSig(params(typs[1], typs[3], typs[3]), nil)
+ typs[107] = newSig(params(typs[1], typs[3]), nil)
+ typs[108] = newSig(params(typs[1], typs[3], typs[15], typs[3], typs[15]), params(typs[15]))
+ typs[109] = newSig(params(typs[102], typs[3]), params(typs[6]))
+ typs[110] = newSig(params(typs[3], typs[99]), params(typs[6], typs[6]))
+ typs[111] = newSig(params(typs[57]), nil)
+ typs[112] = newSig(params(typs[1], typs[1], typs[57], typs[15], typs[15], typs[6]), params(typs[15], typs[6]))
+ typs[113] = newSig(params(typs[1], typs[15], typs[15]), params(typs[7]))
+ typs[114] = newSig(params(typs[1], typs[22], typs[22]), params(typs[7]))
+ typs[115] = newSig(params(typs[1], typs[15], typs[15], typs[7]), params(typs[7]))
+ typs[116] = types.NewSlice(typs[2])
+ typs[117] = newSig(params(typs[1], typs[116], typs[15]), params(typs[116]))
+ typs[118] = newSig(params(typs[1], typs[7], typs[15]), nil)
+ typs[119] = newSig(params(typs[1], typs[7], typs[22]), nil)
+ typs[120] = newSig(params(typs[3], typs[3], typs[5]), nil)
+ typs[121] = newSig(params(typs[7], typs[5]), nil)
+ typs[122] = newSig(params(typs[3], typs[3], typs[5]), params(typs[6]))
+ typs[123] = newSig(params(typs[3], typs[3]), params(typs[6]))
+ typs[124] = newSig(params(typs[7], typs[7]), params(typs[6]))
+ typs[125] = newSig(params(typs[7], typs[5], typs[5]), params(typs[5]))
+ typs[126] = newSig(params(typs[7], typs[5]), params(typs[5]))
+ typs[127] = newSig(params(typs[22], typs[22]), params(typs[22]))
+ typs[128] = newSig(params(typs[24], typs[24]), params(typs[24]))
+ typs[129] = newSig(params(typs[20]), params(typs[22]))
+ typs[130] = newSig(params(typs[20]), params(typs[24]))
+ typs[131] = newSig(params(typs[20]), params(typs[62]))
+ typs[132] = newSig(params(typs[22]), params(typs[20]))
+ typs[133] = types.Types[types.TFLOAT32]
+ typs[134] = newSig(params(typs[22]), params(typs[133]))
+ typs[135] = newSig(params(typs[24]), params(typs[20]))
+ typs[136] = newSig(params(typs[24]), params(typs[133]))
+ typs[137] = newSig(params(typs[62]), params(typs[20]))
+ typs[138] = newSig(params(typs[26], typs[26]), params(typs[26]))
+ typs[139] = newSig(nil, params(typs[5]))
+ typs[140] = newSig(params(typs[5], typs[5]), nil)
+ typs[141] = newSig(params(typs[5], typs[5], typs[5]), nil)
+ typs[142] = newSig(params(typs[7], typs[1], typs[5]), nil)
+ typs[143] = types.NewSlice(typs[7])
+ typs[144] = newSig(params(typs[7], typs[143]), nil)
+ typs[145] = newSig(params(typs[66], typs[66]), nil)
+ typs[146] = newSig(params(typs[60], typs[60]), nil)
+ typs[147] = newSig(params(typs[62], typs[62]), nil)
+ typs[148] = newSig(params(typs[24], typs[24]), nil)
+ return typs[:]
+}
diff --git a/src/cmd/compile/internal/typecheck/builtin/runtime.go b/src/cmd/compile/internal/typecheck/builtin/runtime.go
new file mode 100644
index 0000000..04ae4f2
--- /dev/null
+++ b/src/cmd/compile/internal/typecheck/builtin/runtime.go
@@ -0,0 +1,274 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// NOTE: If you change this file you must run "go generate"
+// to update builtin.go. This is not done automatically
+// to avoid depending on having a working compiler binary.
+
+//go:build ignore
+// +build ignore
+
+package runtime
+
+// emitted by compiler, not referred to by go programs
+
+import "unsafe"
+
+func newobject(typ *byte) *any
+func mallocgc(size uintptr, typ *byte, needszero bool) unsafe.Pointer
+func panicdivide()
+func panicshift()
+func panicmakeslicelen()
+func panicmakeslicecap()
+func throwinit()
+func panicwrap()
+
+func gopanic(interface{})
+func gorecover(*int32) interface{}
+func goschedguarded()
+
+// Note: these declarations are just for wasm port.
+// Other ports call assembly stubs instead.
+func goPanicIndex(x int, y int)
+func goPanicIndexU(x uint, y int)
+func goPanicSliceAlen(x int, y int)
+func goPanicSliceAlenU(x uint, y int)
+func goPanicSliceAcap(x int, y int)
+func goPanicSliceAcapU(x uint, y int)
+func goPanicSliceB(x int, y int)
+func goPanicSliceBU(x uint, y int)
+func goPanicSlice3Alen(x int, y int)
+func goPanicSlice3AlenU(x uint, y int)
+func goPanicSlice3Acap(x int, y int)
+func goPanicSlice3AcapU(x uint, y int)
+func goPanicSlice3B(x int, y int)
+func goPanicSlice3BU(x uint, y int)
+func goPanicSlice3C(x int, y int)
+func goPanicSlice3CU(x uint, y int)
+func goPanicSliceConvert(x int, y int)
+
+func printbool(bool)
+func printfloat(float64)
+func printint(int64)
+func printhex(uint64)
+func printuint(uint64)
+func printcomplex(complex128)
+func printstring(string)
+func printpointer(any)
+func printuintptr(uintptr)
+func printiface(any)
+func printeface(any)
+func printslice(any)
+func printnl()
+func printsp()
+func printlock()
+func printunlock()
+
+func concatstring2(*[32]byte, string, string) string
+func concatstring3(*[32]byte, string, string, string) string
+func concatstring4(*[32]byte, string, string, string, string) string
+func concatstring5(*[32]byte, string, string, string, string, string) string
+func concatstrings(*[32]byte, []string) string
+
+func cmpstring(string, string) int
+func intstring(*[4]byte, int64) string
+func slicebytetostring(buf *[32]byte, ptr *byte, n int) string
+func slicebytetostringtmp(ptr *byte, n int) string
+func slicerunetostring(*[32]byte, []rune) string
+func stringtoslicebyte(*[32]byte, string) []byte
+func stringtoslicerune(*[32]rune, string) []rune
+func slicecopy(toPtr *any, toLen int, fromPtr *any, fromLen int, wid uintptr) int
+
+func decoderune(string, int) (retv rune, retk int)
+func countrunes(string) int
+
+// Non-empty-interface to non-empty-interface conversion.
+func convI2I(typ *byte, itab *uintptr) (ret *uintptr)
+
+// Convert non-interface type to the data word of a (empty or nonempty) interface.
+func convT(typ *byte, elem *any) unsafe.Pointer
+
+// Same as convT, for types with no pointers in them.
+func convTnoptr(typ *byte, elem *any) unsafe.Pointer
+
+// Specialized versions of convT for specific types.
+// These functions take concrete types in the runtime. But they may
+// be used for a wider range of types, which have the same memory
+// layout as the parameter type. The compiler converts the
+// to-be-converted type to the parameter type before calling the
+// runtime function. This way, the call is ABI-insensitive.
+func convT16(val uint16) unsafe.Pointer
+func convT32(val uint32) unsafe.Pointer
+func convT64(val uint64) unsafe.Pointer
+func convTstring(val string) unsafe.Pointer
+func convTslice(val []uint8) unsafe.Pointer
+
+// interface type assertions x.(T)
+func assertE2I(inter *byte, typ *byte) *byte
+func assertE2I2(inter *byte, eface any) (ret any)
+func assertI2I(inter *byte, tab *byte) *byte
+func assertI2I2(inter *byte, iface any) (ret any)
+func panicdottypeE(have, want, iface *byte)
+func panicdottypeI(have, want, iface *byte)
+func panicnildottype(want *byte)
+
+// interface equality. Type/itab pointers are already known to be equal, so
+// we only need to pass one.
+func ifaceeq(tab *uintptr, x, y unsafe.Pointer) (ret bool)
+func efaceeq(typ *uintptr, x, y unsafe.Pointer) (ret bool)
+
+func fastrand() uint32
+
+// *byte is really *runtime.Type
+func makemap64(mapType *byte, hint int64, mapbuf *any) (hmap map[any]any)
+func makemap(mapType *byte, hint int, mapbuf *any) (hmap map[any]any)
+func makemap_small() (hmap map[any]any)
+func mapaccess1(mapType *byte, hmap map[any]any, key *any) (val *any)
+func mapaccess1_fast32(mapType *byte, hmap map[any]any, key uint32) (val *any)
+func mapaccess1_fast64(mapType *byte, hmap map[any]any, key uint64) (val *any)
+func mapaccess1_faststr(mapType *byte, hmap map[any]any, key string) (val *any)
+func mapaccess1_fat(mapType *byte, hmap map[any]any, key *any, zero *byte) (val *any)
+func mapaccess2(mapType *byte, hmap map[any]any, key *any) (val *any, pres bool)
+func mapaccess2_fast32(mapType *byte, hmap map[any]any, key uint32) (val *any, pres bool)
+func mapaccess2_fast64(mapType *byte, hmap map[any]any, key uint64) (val *any, pres bool)
+func mapaccess2_faststr(mapType *byte, hmap map[any]any, key string) (val *any, pres bool)
+func mapaccess2_fat(mapType *byte, hmap map[any]any, key *any, zero *byte) (val *any, pres bool)
+func mapassign(mapType *byte, hmap map[any]any, key *any) (val *any)
+func mapassign_fast32(mapType *byte, hmap map[any]any, key uint32) (val *any)
+func mapassign_fast32ptr(mapType *byte, hmap map[any]any, key unsafe.Pointer) (val *any)
+func mapassign_fast64(mapType *byte, hmap map[any]any, key uint64) (val *any)
+func mapassign_fast64ptr(mapType *byte, hmap map[any]any, key unsafe.Pointer) (val *any)
+func mapassign_faststr(mapType *byte, hmap map[any]any, key string) (val *any)
+func mapiterinit(mapType *byte, hmap map[any]any, hiter *any)
+func mapdelete(mapType *byte, hmap map[any]any, key *any)
+func mapdelete_fast32(mapType *byte, hmap map[any]any, key uint32)
+func mapdelete_fast64(mapType *byte, hmap map[any]any, key uint64)
+func mapdelete_faststr(mapType *byte, hmap map[any]any, key string)
+func mapiternext(hiter *any)
+func mapclear(mapType *byte, hmap map[any]any)
+
+// *byte is really *runtime.Type
+func makechan64(chanType *byte, size int64) (hchan chan any)
+func makechan(chanType *byte, size int) (hchan chan any)
+func chanrecv1(hchan <-chan any, elem *any)
+func chanrecv2(hchan <-chan any, elem *any) bool
+func chansend1(hchan chan<- any, elem *any)
+func closechan(hchan any)
+
+var writeBarrier struct {
+ enabled bool
+ pad [3]byte
+ needed bool
+ cgo bool
+ alignme uint64
+}
+
+// *byte is really *runtime.Type
+func typedmemmove(typ *byte, dst *any, src *any)
+func typedmemclr(typ *byte, dst *any)
+func typedslicecopy(typ *byte, dstPtr *any, dstLen int, srcPtr *any, srcLen int) int
+
+func selectnbsend(hchan chan<- any, elem *any) bool
+func selectnbrecv(elem *any, hchan <-chan any) (bool, bool)
+
+func selectsetpc(pc *uintptr)
+func selectgo(cas0 *byte, order0 *byte, pc0 *uintptr, nsends int, nrecvs int, block bool) (int, bool)
+func block()
+
+func makeslice(typ *byte, len int, cap int) unsafe.Pointer
+func makeslice64(typ *byte, len int64, cap int64) unsafe.Pointer
+func makeslicecopy(typ *byte, tolen int, fromlen int, from unsafe.Pointer) unsafe.Pointer
+func growslice(typ *byte, old []any, cap int) (ary []any)
+func unsafeslice(typ *byte, ptr unsafe.Pointer, len int)
+func unsafeslice64(typ *byte, ptr unsafe.Pointer, len int64)
+func unsafeslicecheckptr(typ *byte, ptr unsafe.Pointer, len int64)
+
+func memmove(to *any, frm *any, length uintptr)
+func memclrNoHeapPointers(ptr unsafe.Pointer, n uintptr)
+func memclrHasPointers(ptr unsafe.Pointer, n uintptr)
+
+func memequal(x, y *any, size uintptr) bool
+func memequal0(x, y *any) bool
+func memequal8(x, y *any) bool
+func memequal16(x, y *any) bool
+func memequal32(x, y *any) bool
+func memequal64(x, y *any) bool
+func memequal128(x, y *any) bool
+func f32equal(p, q unsafe.Pointer) bool
+func f64equal(p, q unsafe.Pointer) bool
+func c64equal(p, q unsafe.Pointer) bool
+func c128equal(p, q unsafe.Pointer) bool
+func strequal(p, q unsafe.Pointer) bool
+func interequal(p, q unsafe.Pointer) bool
+func nilinterequal(p, q unsafe.Pointer) bool
+
+func memhash(p unsafe.Pointer, h uintptr, size uintptr) uintptr
+func memhash0(p unsafe.Pointer, h uintptr) uintptr
+func memhash8(p unsafe.Pointer, h uintptr) uintptr
+func memhash16(p unsafe.Pointer, h uintptr) uintptr
+func memhash32(p unsafe.Pointer, h uintptr) uintptr
+func memhash64(p unsafe.Pointer, h uintptr) uintptr
+func memhash128(p unsafe.Pointer, h uintptr) uintptr
+func f32hash(p unsafe.Pointer, h uintptr) uintptr
+func f64hash(p unsafe.Pointer, h uintptr) uintptr
+func c64hash(p unsafe.Pointer, h uintptr) uintptr
+func c128hash(p unsafe.Pointer, h uintptr) uintptr
+func strhash(a unsafe.Pointer, h uintptr) uintptr
+func interhash(p unsafe.Pointer, h uintptr) uintptr
+func nilinterhash(p unsafe.Pointer, h uintptr) uintptr
+
+// only used on 32-bit
+func int64div(int64, int64) int64
+func uint64div(uint64, uint64) uint64
+func int64mod(int64, int64) int64
+func uint64mod(uint64, uint64) uint64
+func float64toint64(float64) int64
+func float64touint64(float64) uint64
+func float64touint32(float64) uint32
+func int64tofloat64(int64) float64
+func int64tofloat32(int64) float32
+func uint64tofloat64(uint64) float64
+func uint64tofloat32(uint64) float32
+func uint32tofloat64(uint32) float64
+
+func complex128div(num complex128, den complex128) (quo complex128)
+
+func getcallerpc() uintptr
+func getcallersp() uintptr
+
+// race detection
+func racefuncenter(uintptr)
+func racefuncexit()
+func raceread(uintptr)
+func racewrite(uintptr)
+func racereadrange(addr, size uintptr)
+func racewriterange(addr, size uintptr)
+
+// memory sanitizer
+func msanread(addr, size uintptr)
+func msanwrite(addr, size uintptr)
+func msanmove(dst, src, size uintptr)
+
+// address sanitizer
+func asanread(addr, size uintptr)
+func asanwrite(addr, size uintptr)
+
+func checkptrAlignment(unsafe.Pointer, *byte, uintptr)
+func checkptrArithmetic(unsafe.Pointer, []unsafe.Pointer)
+
+func libfuzzerTraceCmp1(uint8, uint8)
+func libfuzzerTraceCmp2(uint16, uint16)
+func libfuzzerTraceCmp4(uint32, uint32)
+func libfuzzerTraceCmp8(uint64, uint64)
+func libfuzzerTraceConstCmp1(uint8, uint8)
+func libfuzzerTraceConstCmp2(uint16, uint16)
+func libfuzzerTraceConstCmp4(uint32, uint32)
+func libfuzzerTraceConstCmp8(uint64, uint64)
+
+// architecture variants
+var x86HasPOPCNT bool
+var x86HasSSE41 bool
+var x86HasFMA bool
+var armHasVFPv4 bool
+var arm64HasATOMICS bool
diff --git a/src/cmd/compile/internal/typecheck/builtin_test.go b/src/cmd/compile/internal/typecheck/builtin_test.go
new file mode 100644
index 0000000..fb9d3e3
--- /dev/null
+++ b/src/cmd/compile/internal/typecheck/builtin_test.go
@@ -0,0 +1,32 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typecheck
+
+import (
+ "bytes"
+ "internal/testenv"
+ "io/ioutil"
+ "os/exec"
+ "testing"
+)
+
+func TestBuiltin(t *testing.T) {
+ testenv.MustHaveGoRun(t)
+ t.Parallel()
+
+ old, err := ioutil.ReadFile("builtin.go")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ new, err := exec.Command(testenv.GoToolPath(t), "run", "mkbuiltin.go", "-stdout").Output()
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if !bytes.Equal(old, new) {
+ t.Fatal("builtin.go out of date; run mkbuiltin.go")
+ }
+}
diff --git a/src/cmd/compile/internal/typecheck/const.go b/src/cmd/compile/internal/typecheck/const.go
new file mode 100644
index 0000000..24058fa
--- /dev/null
+++ b/src/cmd/compile/internal/typecheck/const.go
@@ -0,0 +1,955 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typecheck
+
+import (
+ "fmt"
+ "go/constant"
+ "go/token"
+ "math"
+ "math/big"
+ "strings"
+ "unicode"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+)
+
+func roundFloat(v constant.Value, sz int64) constant.Value {
+ switch sz {
+ case 4:
+ f, _ := constant.Float32Val(v)
+ return makeFloat64(float64(f))
+ case 8:
+ f, _ := constant.Float64Val(v)
+ return makeFloat64(f)
+ }
+ base.Fatalf("unexpected size: %v", sz)
+ panic("unreachable")
+}
+
+// truncate float literal fv to 32-bit or 64-bit precision
+// according to type; return truncated value.
+func truncfltlit(v constant.Value, t *types.Type) constant.Value {
+ if t.IsUntyped() || overflow(v, t) {
+ // If there was overflow, simply continuing would set the
+ // value to Inf which in turn would lead to spurious follow-on
+ // errors. Avoid this by returning the existing value.
+ return v
+ }
+
+ return roundFloat(v, t.Size())
+}
+
+// truncate Real and Imag parts of Mpcplx to 32-bit or 64-bit
+// precision, according to type; return truncated value. In case of
+// overflow, calls Errorf but does not truncate the input value.
+func trunccmplxlit(v constant.Value, t *types.Type) constant.Value {
+ if t.IsUntyped() || overflow(v, t) {
+ // If there was overflow, simply continuing would set the
+ // value to Inf which in turn would lead to spurious follow-on
+ // errors. Avoid this by returning the existing value.
+ return v
+ }
+
+ fsz := t.Size() / 2
+ return makeComplex(roundFloat(constant.Real(v), fsz), roundFloat(constant.Imag(v), fsz))
+}
+
+// TODO(mdempsky): Replace these with better APIs.
+func convlit(n ir.Node, t *types.Type) ir.Node { return convlit1(n, t, false, nil) }
+func DefaultLit(n ir.Node, t *types.Type) ir.Node { return convlit1(n, t, false, nil) }
+
+// convlit1 converts an untyped expression n to type t. If n already
+// has a type, convlit1 has no effect.
+//
+// For explicit conversions, t must be non-nil, and integer-to-string
+// conversions are allowed.
+//
+// For implicit conversions (e.g., assignments), t may be nil; if so,
+// n is converted to its default type.
+//
+// If there's an error converting n to t, context is used in the error
+// message.
+func convlit1(n ir.Node, t *types.Type, explicit bool, context func() string) ir.Node {
+ if explicit && t == nil {
+ base.Fatalf("explicit conversion missing type")
+ }
+ if t != nil && t.IsUntyped() {
+ base.Fatalf("bad conversion to untyped: %v", t)
+ }
+
+ if n == nil || n.Type() == nil {
+ // Allow sloppy callers.
+ return n
+ }
+ if !n.Type().IsUntyped() {
+ // Already typed; nothing to do.
+ return n
+ }
+
+ // Nil is technically not a constant, so handle it specially.
+ if n.Type().Kind() == types.TNIL {
+ if n.Op() != ir.ONIL {
+ base.Fatalf("unexpected op: %v (%v)", n, n.Op())
+ }
+ n = ir.Copy(n)
+ if t == nil {
+ base.Errorf("use of untyped nil")
+ n.SetDiag(true)
+ n.SetType(nil)
+ return n
+ }
+
+ if !t.HasNil() {
+ // Leave for caller to handle.
+ return n
+ }
+
+ n.SetType(t)
+ return n
+ }
+
+ if t == nil || !ir.OKForConst[t.Kind()] {
+ t = defaultType(n.Type())
+ }
+
+ switch n.Op() {
+ default:
+ base.Fatalf("unexpected untyped expression: %v", n)
+
+ case ir.OLITERAL:
+ v := convertVal(n.Val(), t, explicit)
+ if v.Kind() == constant.Unknown {
+ n = ir.NewConstExpr(n.Val(), n)
+ break
+ }
+ n = ir.NewConstExpr(v, n)
+ n.SetType(t)
+ return n
+
+ case ir.OPLUS, ir.ONEG, ir.OBITNOT, ir.ONOT, ir.OREAL, ir.OIMAG:
+ ot := operandType(n.Op(), t)
+ if ot == nil {
+ n = DefaultLit(n, nil)
+ break
+ }
+
+ n := n.(*ir.UnaryExpr)
+ n.X = convlit(n.X, ot)
+ if n.X.Type() == nil {
+ n.SetType(nil)
+ return n
+ }
+ n.SetType(t)
+ return n
+
+ case ir.OADD, ir.OSUB, ir.OMUL, ir.ODIV, ir.OMOD, ir.OOR, ir.OXOR, ir.OAND, ir.OANDNOT, ir.OOROR, ir.OANDAND, ir.OCOMPLEX:
+ ot := operandType(n.Op(), t)
+ if ot == nil {
+ n = DefaultLit(n, nil)
+ break
+ }
+
+ var l, r ir.Node
+ switch n := n.(type) {
+ case *ir.BinaryExpr:
+ n.X = convlit(n.X, ot)
+ n.Y = convlit(n.Y, ot)
+ l, r = n.X, n.Y
+ case *ir.LogicalExpr:
+ n.X = convlit(n.X, ot)
+ n.Y = convlit(n.Y, ot)
+ l, r = n.X, n.Y
+ }
+
+ if l.Type() == nil || r.Type() == nil {
+ n.SetType(nil)
+ return n
+ }
+ if !types.Identical(l.Type(), r.Type()) {
+ base.Errorf("invalid operation: %v (mismatched types %v and %v)", n, l.Type(), r.Type())
+ n.SetType(nil)
+ return n
+ }
+
+ n.SetType(t)
+ return n
+
+ case ir.OEQ, ir.ONE, ir.OLT, ir.OLE, ir.OGT, ir.OGE:
+ n := n.(*ir.BinaryExpr)
+ if !t.IsBoolean() {
+ break
+ }
+ n.SetType(t)
+ return n
+
+ case ir.OLSH, ir.ORSH:
+ n := n.(*ir.BinaryExpr)
+ n.X = convlit1(n.X, t, explicit, nil)
+ n.SetType(n.X.Type())
+ if n.Type() != nil && !n.Type().IsInteger() {
+ base.Errorf("invalid operation: %v (shift of type %v)", n, n.Type())
+ n.SetType(nil)
+ }
+ return n
+ }
+
+ if !n.Diag() {
+ if !t.Broke() {
+ if explicit {
+ base.Errorf("cannot convert %L to type %v", n, t)
+ } else if context != nil {
+ base.Errorf("cannot use %L as type %v in %s", n, t, context())
+ } else {
+ base.Errorf("cannot use %L as type %v", n, t)
+ }
+ }
+ n.SetDiag(true)
+ }
+ n.SetType(nil)
+ return n
+}
+
+func operandType(op ir.Op, t *types.Type) *types.Type {
+ switch op {
+ case ir.OCOMPLEX:
+ if t.IsComplex() {
+ return types.FloatForComplex(t)
+ }
+ case ir.OREAL, ir.OIMAG:
+ if t.IsFloat() {
+ return types.ComplexForFloat(t)
+ }
+ default:
+ if okfor[op][t.Kind()] {
+ return t
+ }
+ }
+ return nil
+}
+
+// convertVal converts v into a representation appropriate for t. If
+// no such representation exists, it returns Val{} instead.
+//
+// If explicit is true, then conversions from integer to string are
+// also allowed.
+func convertVal(v constant.Value, t *types.Type, explicit bool) constant.Value {
+ switch ct := v.Kind(); ct {
+ case constant.Bool:
+ if t.IsBoolean() {
+ return v
+ }
+
+ case constant.String:
+ if t.IsString() {
+ return v
+ }
+
+ case constant.Int:
+ if explicit && t.IsString() {
+ return tostr(v)
+ }
+ fallthrough
+ case constant.Float, constant.Complex:
+ switch {
+ case t.IsInteger():
+ v = toint(v)
+ overflow(v, t)
+ return v
+ case t.IsFloat():
+ v = toflt(v)
+ v = truncfltlit(v, t)
+ return v
+ case t.IsComplex():
+ v = tocplx(v)
+ v = trunccmplxlit(v, t)
+ return v
+ }
+ }
+
+ return constant.MakeUnknown()
+}
+
+func tocplx(v constant.Value) constant.Value {
+ return constant.ToComplex(v)
+}
+
+func toflt(v constant.Value) constant.Value {
+ if v.Kind() == constant.Complex {
+ if constant.Sign(constant.Imag(v)) != 0 {
+ base.Errorf("constant %v truncated to real", v)
+ }
+ v = constant.Real(v)
+ }
+
+ return constant.ToFloat(v)
+}
+
+func toint(v constant.Value) constant.Value {
+ if v.Kind() == constant.Complex {
+ if constant.Sign(constant.Imag(v)) != 0 {
+ base.Errorf("constant %v truncated to integer", v)
+ }
+ v = constant.Real(v)
+ }
+
+ if v := constant.ToInt(v); v.Kind() == constant.Int {
+ return v
+ }
+
+ // The value of v cannot be represented as an integer;
+ // so we need to print an error message.
+ // Unfortunately some float values cannot be
+ // reasonably formatted for inclusion in an error
+ // message (example: 1 + 1e-100), so first we try to
+ // format the float; if the truncation resulted in
+ // something that looks like an integer we omit the
+ // value from the error message.
+ // (See issue #11371).
+ f := ir.BigFloat(v)
+ if f.MantExp(nil) > 2*ir.ConstPrec {
+ base.Errorf("integer too large")
+ } else {
+ var t big.Float
+ t.Parse(fmt.Sprint(v), 0)
+ if t.IsInt() {
+ base.Errorf("constant truncated to integer")
+ } else {
+ base.Errorf("constant %v truncated to integer", v)
+ }
+ }
+
+ // Prevent follow-on errors.
+ // TODO(mdempsky): Use constant.MakeUnknown() instead.
+ return constant.MakeInt64(1)
+}
+
+// overflow reports whether constant value v is too large
+// to represent with type t, and emits an error message if so.
+func overflow(v constant.Value, t *types.Type) bool {
+ // v has already been converted
+ // to appropriate form for t.
+ if t.IsUntyped() {
+ return false
+ }
+ if v.Kind() == constant.Int && constant.BitLen(v) > ir.ConstPrec {
+ base.Errorf("integer too large")
+ return true
+ }
+ if ir.ConstOverflow(v, t) {
+ base.Errorf("constant %v overflows %v", types.FmtConst(v, false), t)
+ return true
+ }
+ return false
+}
+
+func tostr(v constant.Value) constant.Value {
+ if v.Kind() == constant.Int {
+ r := unicode.ReplacementChar
+ if x, ok := constant.Uint64Val(v); ok && x <= unicode.MaxRune {
+ r = rune(x)
+ }
+ v = constant.MakeString(string(r))
+ }
+ return v
+}
+
+var tokenForOp = [...]token.Token{
+ ir.OPLUS: token.ADD,
+ ir.ONEG: token.SUB,
+ ir.ONOT: token.NOT,
+ ir.OBITNOT: token.XOR,
+
+ ir.OADD: token.ADD,
+ ir.OSUB: token.SUB,
+ ir.OMUL: token.MUL,
+ ir.ODIV: token.QUO,
+ ir.OMOD: token.REM,
+ ir.OOR: token.OR,
+ ir.OXOR: token.XOR,
+ ir.OAND: token.AND,
+ ir.OANDNOT: token.AND_NOT,
+ ir.OOROR: token.LOR,
+ ir.OANDAND: token.LAND,
+
+ ir.OEQ: token.EQL,
+ ir.ONE: token.NEQ,
+ ir.OLT: token.LSS,
+ ir.OLE: token.LEQ,
+ ir.OGT: token.GTR,
+ ir.OGE: token.GEQ,
+
+ ir.OLSH: token.SHL,
+ ir.ORSH: token.SHR,
+}
+
+// EvalConst returns a constant-evaluated expression equivalent to n.
+// If n is not a constant, EvalConst returns n.
+// Otherwise, EvalConst returns a new OLITERAL with the same value as n,
+// and with .Orig pointing back to n.
+func EvalConst(n ir.Node) ir.Node {
+ // Pick off just the opcodes that can be constant evaluated.
+ switch n.Op() {
+ case ir.OPLUS, ir.ONEG, ir.OBITNOT, ir.ONOT:
+ n := n.(*ir.UnaryExpr)
+ nl := n.X
+ if nl.Op() == ir.OLITERAL {
+ var prec uint
+ if n.Type().IsUnsigned() {
+ prec = uint(n.Type().Size() * 8)
+ }
+ return OrigConst(n, constant.UnaryOp(tokenForOp[n.Op()], nl.Val(), prec))
+ }
+
+ case ir.OADD, ir.OSUB, ir.OMUL, ir.ODIV, ir.OMOD, ir.OOR, ir.OXOR, ir.OAND, ir.OANDNOT:
+ n := n.(*ir.BinaryExpr)
+ nl, nr := n.X, n.Y
+ if nl.Op() == ir.OLITERAL && nr.Op() == ir.OLITERAL {
+ rval := nr.Val()
+
+ // check for divisor underflow in complex division (see issue 20227)
+ if n.Op() == ir.ODIV && n.Type().IsComplex() && constant.Sign(square(constant.Real(rval))) == 0 && constant.Sign(square(constant.Imag(rval))) == 0 {
+ base.Errorf("complex division by zero")
+ n.SetType(nil)
+ return n
+ }
+ if (n.Op() == ir.ODIV || n.Op() == ir.OMOD) && constant.Sign(rval) == 0 {
+ base.Errorf("division by zero")
+ n.SetType(nil)
+ return n
+ }
+
+ tok := tokenForOp[n.Op()]
+ if n.Op() == ir.ODIV && n.Type().IsInteger() {
+ tok = token.QUO_ASSIGN // integer division
+ }
+ return OrigConst(n, constant.BinaryOp(nl.Val(), tok, rval))
+ }
+
+ case ir.OOROR, ir.OANDAND:
+ n := n.(*ir.LogicalExpr)
+ nl, nr := n.X, n.Y
+ if nl.Op() == ir.OLITERAL && nr.Op() == ir.OLITERAL {
+ return OrigConst(n, constant.BinaryOp(nl.Val(), tokenForOp[n.Op()], nr.Val()))
+ }
+
+ case ir.OEQ, ir.ONE, ir.OLT, ir.OLE, ir.OGT, ir.OGE:
+ n := n.(*ir.BinaryExpr)
+ nl, nr := n.X, n.Y
+ if nl.Op() == ir.OLITERAL && nr.Op() == ir.OLITERAL {
+ return OrigBool(n, constant.Compare(nl.Val(), tokenForOp[n.Op()], nr.Val()))
+ }
+
+ case ir.OLSH, ir.ORSH:
+ n := n.(*ir.BinaryExpr)
+ nl, nr := n.X, n.Y
+ if nl.Op() == ir.OLITERAL && nr.Op() == ir.OLITERAL {
+ // shiftBound from go/types; "so we can express smallestFloat64" (see issue #44057)
+ const shiftBound = 1023 - 1 + 52
+ s, ok := constant.Uint64Val(nr.Val())
+ if !ok || s > shiftBound {
+ base.Errorf("invalid shift count %v", nr)
+ n.SetType(nil)
+ break
+ }
+ return OrigConst(n, constant.Shift(toint(nl.Val()), tokenForOp[n.Op()], uint(s)))
+ }
+
+ case ir.OCONV, ir.ORUNESTR:
+ n := n.(*ir.ConvExpr)
+ nl := n.X
+ if ir.OKForConst[n.Type().Kind()] && nl.Op() == ir.OLITERAL {
+ return OrigConst(n, convertVal(nl.Val(), n.Type(), true))
+ }
+
+ case ir.OCONVNOP:
+ n := n.(*ir.ConvExpr)
+ nl := n.X
+ if ir.OKForConst[n.Type().Kind()] && nl.Op() == ir.OLITERAL {
+ // set so n.Orig gets OCONV instead of OCONVNOP
+ n.SetOp(ir.OCONV)
+ return OrigConst(n, nl.Val())
+ }
+
+ case ir.OADDSTR:
+ // Merge adjacent constants in the argument list.
+ n := n.(*ir.AddStringExpr)
+ s := n.List
+ need := 0
+ for i := 0; i < len(s); i++ {
+ if i == 0 || !ir.IsConst(s[i-1], constant.String) || !ir.IsConst(s[i], constant.String) {
+ // Can't merge s[i] into s[i-1]; need a slot in the list.
+ need++
+ }
+ }
+ if need == len(s) {
+ return n
+ }
+ if need == 1 {
+ var strs []string
+ for _, c := range s {
+ strs = append(strs, ir.StringVal(c))
+ }
+ return OrigConst(n, constant.MakeString(strings.Join(strs, "")))
+ }
+ newList := make([]ir.Node, 0, need)
+ for i := 0; i < len(s); i++ {
+ if ir.IsConst(s[i], constant.String) && i+1 < len(s) && ir.IsConst(s[i+1], constant.String) {
+ // merge from i up to but not including i2
+ var strs []string
+ i2 := i
+ for i2 < len(s) && ir.IsConst(s[i2], constant.String) {
+ strs = append(strs, ir.StringVal(s[i2]))
+ i2++
+ }
+
+ nl := ir.Copy(n).(*ir.AddStringExpr)
+ nl.List = s[i:i2]
+ newList = append(newList, OrigConst(nl, constant.MakeString(strings.Join(strs, ""))))
+ i = i2 - 1
+ } else {
+ newList = append(newList, s[i])
+ }
+ }
+
+ nn := ir.Copy(n).(*ir.AddStringExpr)
+ nn.List = newList
+ return nn
+
+ case ir.OCAP, ir.OLEN:
+ n := n.(*ir.UnaryExpr)
+ nl := n.X
+ switch nl.Type().Kind() {
+ case types.TSTRING:
+ if ir.IsConst(nl, constant.String) {
+ return OrigInt(n, int64(len(ir.StringVal(nl))))
+ }
+ case types.TARRAY:
+ if !anyCallOrChan(nl) {
+ return OrigInt(n, nl.Type().NumElem())
+ }
+ }
+
+ case ir.OALIGNOF, ir.OOFFSETOF, ir.OSIZEOF:
+ n := n.(*ir.UnaryExpr)
+ return OrigInt(n, evalunsafe(n))
+
+ case ir.OREAL:
+ n := n.(*ir.UnaryExpr)
+ nl := n.X
+ if nl.Op() == ir.OLITERAL {
+ return OrigConst(n, constant.Real(nl.Val()))
+ }
+
+ case ir.OIMAG:
+ n := n.(*ir.UnaryExpr)
+ nl := n.X
+ if nl.Op() == ir.OLITERAL {
+ return OrigConst(n, constant.Imag(nl.Val()))
+ }
+
+ case ir.OCOMPLEX:
+ n := n.(*ir.BinaryExpr)
+ nl, nr := n.X, n.Y
+ if nl.Op() == ir.OLITERAL && nr.Op() == ir.OLITERAL {
+ return OrigConst(n, makeComplex(nl.Val(), nr.Val()))
+ }
+ }
+
+ return n
+}
+
+func makeFloat64(f float64) constant.Value {
+ if math.IsInf(f, 0) {
+ base.Fatalf("infinity is not a valid constant")
+ }
+ return constant.MakeFloat64(f)
+}
+
+func makeComplex(real, imag constant.Value) constant.Value {
+ return constant.BinaryOp(constant.ToFloat(real), token.ADD, constant.MakeImag(constant.ToFloat(imag)))
+}
+
+func square(x constant.Value) constant.Value {
+ return constant.BinaryOp(x, token.MUL, x)
+}
+
+// For matching historical "constant OP overflow" error messages.
+// TODO(mdempsky): Replace with error messages like go/types uses.
+var overflowNames = [...]string{
+ ir.OADD: "addition",
+ ir.OSUB: "subtraction",
+ ir.OMUL: "multiplication",
+ ir.OLSH: "shift",
+ ir.OXOR: "bitwise XOR",
+ ir.OBITNOT: "bitwise complement",
+}
+
+// OrigConst returns an OLITERAL with orig n and value v.
+func OrigConst(n ir.Node, v constant.Value) ir.Node {
+ lno := ir.SetPos(n)
+ v = convertVal(v, n.Type(), false)
+ base.Pos = lno
+
+ switch v.Kind() {
+ case constant.Int:
+ if constant.BitLen(v) <= ir.ConstPrec {
+ break
+ }
+ fallthrough
+ case constant.Unknown:
+ what := overflowNames[n.Op()]
+ if what == "" {
+ base.Fatalf("unexpected overflow: %v", n.Op())
+ }
+ base.ErrorfAt(n.Pos(), "constant %v overflow", what)
+ n.SetType(nil)
+ return n
+ }
+
+ return ir.NewConstExpr(v, n)
+}
+
+func OrigBool(n ir.Node, v bool) ir.Node {
+ return OrigConst(n, constant.MakeBool(v))
+}
+
+func OrigInt(n ir.Node, v int64) ir.Node {
+ return OrigConst(n, constant.MakeInt64(v))
+}
+
+// DefaultLit on both nodes simultaneously;
+// if they're both ideal going in they better
+// get the same type going out.
+// force means must assign concrete (non-ideal) type.
+// The results of defaultlit2 MUST be assigned back to l and r, e.g.
+// n.Left, n.Right = defaultlit2(n.Left, n.Right, force)
+func defaultlit2(l ir.Node, r ir.Node, force bool) (ir.Node, ir.Node) {
+ if l.Type() == nil || r.Type() == nil {
+ return l, r
+ }
+
+ if !l.Type().IsInterface() && !r.Type().IsInterface() {
+ // Can't mix bool with non-bool, string with non-string.
+ if l.Type().IsBoolean() != r.Type().IsBoolean() {
+ return l, r
+ }
+ if l.Type().IsString() != r.Type().IsString() {
+ return l, r
+ }
+ }
+
+ if !l.Type().IsUntyped() {
+ r = convlit(r, l.Type())
+ return l, r
+ }
+
+ if !r.Type().IsUntyped() {
+ l = convlit(l, r.Type())
+ return l, r
+ }
+
+ if !force {
+ return l, r
+ }
+
+ // Can't mix nil with anything untyped.
+ if ir.IsNil(l) || ir.IsNil(r) {
+ return l, r
+ }
+ t := defaultType(mixUntyped(l.Type(), r.Type()))
+ l = convlit(l, t)
+ r = convlit(r, t)
+ return l, r
+}
+
+func mixUntyped(t1, t2 *types.Type) *types.Type {
+ if t1 == t2 {
+ return t1
+ }
+
+ rank := func(t *types.Type) int {
+ switch t {
+ case types.UntypedInt:
+ return 0
+ case types.UntypedRune:
+ return 1
+ case types.UntypedFloat:
+ return 2
+ case types.UntypedComplex:
+ return 3
+ }
+ base.Fatalf("bad type %v", t)
+ panic("unreachable")
+ }
+
+ if rank(t2) > rank(t1) {
+ return t2
+ }
+ return t1
+}
+
+func defaultType(t *types.Type) *types.Type {
+ if !t.IsUntyped() || t.Kind() == types.TNIL {
+ return t
+ }
+
+ switch t {
+ case types.UntypedBool:
+ return types.Types[types.TBOOL]
+ case types.UntypedString:
+ return types.Types[types.TSTRING]
+ case types.UntypedInt:
+ return types.Types[types.TINT]
+ case types.UntypedRune:
+ return types.RuneType
+ case types.UntypedFloat:
+ return types.Types[types.TFLOAT64]
+ case types.UntypedComplex:
+ return types.Types[types.TCOMPLEX128]
+ }
+
+ base.Fatalf("bad type %v", t)
+ return nil
+}
+
+// IndexConst checks if Node n contains a constant expression
+// representable as a non-negative int and returns its value.
+// If n is not a constant expression, not representable as an
+// integer, or negative, it returns -1. If n is too large, it
+// returns -2.
+func IndexConst(n ir.Node) int64 {
+ if n.Op() != ir.OLITERAL {
+ return -1
+ }
+ if !n.Type().IsInteger() && n.Type().Kind() != types.TIDEAL {
+ return -1
+ }
+
+ v := toint(n.Val())
+ if v.Kind() != constant.Int || constant.Sign(v) < 0 {
+ return -1
+ }
+ if ir.ConstOverflow(v, types.Types[types.TINT]) {
+ return -2
+ }
+ return ir.IntVal(types.Types[types.TINT], v)
+}
+
+// anyCallOrChan reports whether n contains any calls or channel operations.
+func anyCallOrChan(n ir.Node) bool {
+ return ir.Any(n, func(n ir.Node) bool {
+ switch n.Op() {
+ case ir.OAPPEND,
+ ir.OCALL,
+ ir.OCALLFUNC,
+ ir.OCALLINTER,
+ ir.OCALLMETH,
+ ir.OCAP,
+ ir.OCLOSE,
+ ir.OCOMPLEX,
+ ir.OCOPY,
+ ir.ODELETE,
+ ir.OIMAG,
+ ir.OLEN,
+ ir.OMAKE,
+ ir.ONEW,
+ ir.OPANIC,
+ ir.OPRINT,
+ ir.OPRINTN,
+ ir.OREAL,
+ ir.ORECOVER,
+ ir.ORECV,
+ ir.OUNSAFEADD,
+ ir.OUNSAFESLICE:
+ return true
+ }
+ return false
+ })
+}
+
+// A constSet represents a set of Go constant expressions.
+type constSet struct {
+ m map[constSetKey]src.XPos
+}
+
+type constSetKey struct {
+ typ *types.Type
+ val interface{}
+}
+
+// add adds constant expression n to s. If a constant expression of
+// equal value and identical type has already been added, then add
+// reports an error about the duplicate value.
+//
+// pos provides position information for where expression n occurred
+// (in case n does not have its own position information). what and
+// where are used in the error message.
+//
+// n must not be an untyped constant.
+func (s *constSet) add(pos src.XPos, n ir.Node, what, where string) {
+ if conv := n; conv.Op() == ir.OCONVIFACE {
+ conv := conv.(*ir.ConvExpr)
+ if conv.Implicit() {
+ n = conv.X
+ }
+ }
+
+ if !ir.IsConstNode(n) || n.Type() == nil {
+ return
+ }
+ if n.Type().IsUntyped() {
+ base.Fatalf("%v is untyped", n)
+ }
+
+ // Consts are only duplicates if they have the same value and
+ // identical types.
+ //
+ // In general, we have to use types.Identical to test type
+ // identity, because == gives false negatives for anonymous
+ // types and the byte/uint8 and rune/int32 builtin type
+ // aliases. However, this is not a problem here, because
+ // constant expressions are always untyped or have a named
+ // type, and we explicitly handle the builtin type aliases
+ // below.
+ //
+ // This approach may need to be revisited though if we fix
+ // #21866 by treating all type aliases like byte/uint8 and
+ // rune/int32.
+
+ typ := n.Type()
+ switch typ {
+ case types.ByteType:
+ typ = types.Types[types.TUINT8]
+ case types.RuneType:
+ typ = types.Types[types.TINT32]
+ }
+ k := constSetKey{typ, ir.ConstValue(n)}
+
+ if ir.HasUniquePos(n) {
+ pos = n.Pos()
+ }
+
+ if s.m == nil {
+ s.m = make(map[constSetKey]src.XPos)
+ }
+
+ if prevPos, isDup := s.m[k]; isDup {
+ base.ErrorfAt(pos, "duplicate %s %s in %s\n\tprevious %s at %v",
+ what, nodeAndVal(n), where,
+ what, base.FmtPos(prevPos))
+ } else {
+ s.m[k] = pos
+ }
+}
+
+// nodeAndVal reports both an expression and its constant value, if
+// the latter is non-obvious.
+//
+// TODO(mdempsky): This could probably be a fmt.go flag.
+func nodeAndVal(n ir.Node) string {
+ show := fmt.Sprint(n)
+ val := ir.ConstValue(n)
+ if s := fmt.Sprintf("%#v", val); show != s {
+ show += " (value " + s + ")"
+ }
+ return show
+}
+
+// evalunsafe evaluates a package unsafe operation and returns the result.
+func evalunsafe(n ir.Node) int64 {
+ switch n.Op() {
+ case ir.OALIGNOF, ir.OSIZEOF:
+ n := n.(*ir.UnaryExpr)
+ n.X = Expr(n.X)
+ n.X = DefaultLit(n.X, nil)
+ tr := n.X.Type()
+ if tr == nil {
+ return 0
+ }
+ types.CalcSize(tr)
+ if n.Op() == ir.OALIGNOF {
+ return tr.Alignment()
+ }
+ return tr.Size()
+
+ case ir.OOFFSETOF:
+ // must be a selector.
+ n := n.(*ir.UnaryExpr)
+ // ODOT and ODOTPTR are allowed in case the OXDOT transformation has
+ // already happened (e.g. during -G=3 stenciling).
+ if n.X.Op() != ir.OXDOT && n.X.Op() != ir.ODOT && n.X.Op() != ir.ODOTPTR {
+ base.Errorf("invalid expression %v", n)
+ return 0
+ }
+ sel := n.X.(*ir.SelectorExpr)
+
+ // Remember base of selector to find it back after dot insertion.
+ // Since r->left may be mutated by typechecking, check it explicitly
+ // first to track it correctly.
+ sel.X = Expr(sel.X)
+ sbase := sel.X
+
+ // Implicit dot may already be resolved for instantiating generic function. So we
+ // need to remove any implicit dot until we reach the first non-implicit one, it's
+ // the right base selector. See issue #53137.
+ var clobberBase func(n ir.Node) ir.Node
+ clobberBase = func(n ir.Node) ir.Node {
+ if sel, ok := n.(*ir.SelectorExpr); ok && sel.Implicit() {
+ return clobberBase(sel.X)
+ }
+ return n
+ }
+ sbase = clobberBase(sbase)
+
+ tsel := Expr(sel)
+ n.X = tsel
+ if tsel.Type() == nil {
+ return 0
+ }
+ switch tsel.Op() {
+ case ir.ODOT, ir.ODOTPTR:
+ break
+ case ir.OMETHVALUE:
+ base.Errorf("invalid expression %v: argument is a method value", n)
+ return 0
+ default:
+ base.Errorf("invalid expression %v", n)
+ return 0
+ }
+
+ // Sum offsets for dots until we reach sbase.
+ var v int64
+ var next ir.Node
+ for r := tsel; r != sbase; r = next {
+ switch r.Op() {
+ case ir.ODOTPTR:
+ // For Offsetof(s.f), s may itself be a pointer,
+ // but accessing f must not otherwise involve
+ // indirection via embedded pointer types.
+ r := r.(*ir.SelectorExpr)
+ if r.X != sbase {
+ base.Errorf("invalid expression %v: selector implies indirection of embedded %v", n, r.X)
+ return 0
+ }
+ fallthrough
+ case ir.ODOT:
+ r := r.(*ir.SelectorExpr)
+ v += r.Offset()
+ next = r.X
+ default:
+ ir.Dump("unsafenmagic", tsel)
+ base.Fatalf("impossible %v node after dot insertion", r.Op())
+ }
+ }
+ return v
+ }
+
+ base.Fatalf("unexpected op %v", n.Op())
+ return 0
+}
diff --git a/src/cmd/compile/internal/typecheck/crawler.go b/src/cmd/compile/internal/typecheck/crawler.go
new file mode 100644
index 0000000..f14d885
--- /dev/null
+++ b/src/cmd/compile/internal/typecheck/crawler.go
@@ -0,0 +1,399 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typecheck
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+)
+
+// crawlExports crawls the type/object graph rooted at the given list of exported
+// objects (which are variables, functions, and types). It descends through all parts
+// of types and follows methods on defined types. Any functions that are found to be
+// potentially callable by importers directly or after inlining are marked with
+// ExportInline, so that iexport.go knows to export their inline body.
+//
+// The overall purpose of crawlExports is to AVOID exporting inlineable methods
+// that cannot actually be referenced, thereby reducing the size of the exports
+// significantly.
+//
+// For non-generic defined types reachable from global variables, we only set
+// ExportInline for exported methods. For defined types that are directly named or are
+// embedded recursively in such a type, we set ExportInline for all methods, since
+// these types can be embedded in another local type. For instantiated types that are
+// used anywhere in a inlineable function, we set ExportInline on all methods of the
+// base generic type, since all methods will be needed for creating any instantiated
+// type.
+func crawlExports(exports []*ir.Name) {
+ p := crawler{
+ marked: make(map[*types.Type]bool),
+ embedded: make(map[*types.Type]bool),
+ generic: make(map[*types.Type]bool),
+ checkFullyInst: make(map[*types.Type]bool),
+ }
+ for _, n := range exports {
+ p.markObject(n)
+ }
+}
+
+type crawler struct {
+ marked map[*types.Type]bool // types already seen by markType
+ embedded map[*types.Type]bool // types already seen by markEmbed
+ generic map[*types.Type]bool // types already seen by markGeneric
+ checkFullyInst map[*types.Type]bool // types already seen by checkForFullyInst
+}
+
+// markObject visits a reachable object (function, method, global type, or global variable)
+func (p *crawler) markObject(n *ir.Name) {
+ if n.Op() == ir.ONAME && n.Class == ir.PFUNC {
+ p.markInlBody(n)
+ }
+
+ // If a declared type name is reachable, users can embed it in their
+ // own types, which makes even its unexported methods reachable.
+ if n.Op() == ir.OTYPE {
+ p.markEmbed(n.Type())
+ }
+
+ p.markType(n.Type())
+}
+
+// markType recursively visits types reachable from t to identify functions whose
+// inline bodies may be needed. For instantiated generic types, it visits the base
+// generic type, which has the relevant methods.
+func (p *crawler) markType(t *types.Type) {
+ if orig := t.OrigType(); orig != nil {
+ // Convert to the base generic type.
+ t = orig
+ }
+ if p.marked[t] {
+ return
+ }
+ p.marked[t] = true
+
+ // If this is a defined type, mark all of its associated
+ // methods. Skip interface types because t.Methods contains
+ // only their unexpanded method set (i.e., exclusive of
+ // interface embeddings), and the switch statement below
+ // handles their full method set.
+ if t.Sym() != nil && t.Kind() != types.TINTER {
+ for _, m := range t.Methods().Slice() {
+ if types.IsExported(m.Sym.Name) {
+ p.markObject(m.Nname.(*ir.Name))
+ }
+ }
+ }
+
+ // Recursively mark any types that can be produced given a
+ // value of type t: dereferencing a pointer; indexing or
+ // iterating over an array, slice, or map; receiving from a
+ // channel; accessing a struct field or interface method; or
+ // calling a function.
+ //
+ // Notably, we don't mark function parameter types, because
+ // the user already needs some way to construct values of
+ // those types.
+ switch t.Kind() {
+ case types.TPTR, types.TARRAY, types.TSLICE:
+ p.markType(t.Elem())
+
+ case types.TCHAN:
+ if t.ChanDir().CanRecv() {
+ p.markType(t.Elem())
+ }
+
+ case types.TMAP:
+ p.markType(t.Key())
+ p.markType(t.Elem())
+
+ case types.TSTRUCT:
+ if t.IsFuncArgStruct() {
+ break
+ }
+ for _, f := range t.FieldSlice() {
+ // Mark the type of a unexported field if it is a
+ // fully-instantiated type, since we create and instantiate
+ // the methods of any fully-instantiated type that we see
+ // during import (see end of typecheck.substInstType).
+ if types.IsExported(f.Sym.Name) || f.Embedded != 0 ||
+ isPtrFullyInstantiated(f.Type) {
+ p.markType(f.Type)
+ }
+ }
+
+ case types.TFUNC:
+ for _, f := range t.Results().FieldSlice() {
+ p.markType(f.Type)
+ }
+
+ case types.TINTER:
+ for _, f := range t.AllMethods().Slice() {
+ if types.IsExported(f.Sym.Name) {
+ p.markType(f.Type)
+ }
+ }
+
+ case types.TTYPEPARAM:
+ // No other type that needs to be followed.
+ }
+}
+
+// markEmbed is similar to markType, but handles finding methods that
+// need to be re-exported because t can be embedded in user code
+// (possibly transitively).
+func (p *crawler) markEmbed(t *types.Type) {
+ if t.IsPtr() {
+ // Defined pointer type; not allowed to embed anyway.
+ if t.Sym() != nil {
+ return
+ }
+ t = t.Elem()
+ }
+
+ if orig := t.OrigType(); orig != nil {
+ // Convert to the base generic type.
+ t = orig
+ }
+
+ if p.embedded[t] {
+ return
+ }
+ p.embedded[t] = true
+
+ // If t is a defined type, then re-export all of its methods. Unlike
+ // in markType, we include even unexported methods here, because we
+ // still need to generate wrappers for them, even if the user can't
+ // refer to them directly.
+ if t.Sym() != nil && t.Kind() != types.TINTER {
+ for _, m := range t.Methods().Slice() {
+ p.markObject(m.Nname.(*ir.Name))
+ }
+ }
+
+ // If t is a struct, recursively visit its embedded fields.
+ if t.IsStruct() {
+ for _, f := range t.FieldSlice() {
+ if f.Embedded != 0 {
+ p.markEmbed(f.Type)
+ }
+ }
+ }
+}
+
+// markGeneric takes an instantiated type or a base generic type t, and marks all the
+// methods of the base generic type of t. If a base generic type is written out for
+// export, even if not explicitly marked for export, then all of its methods need to
+// be available for instantiation, since we always create all methods of a specified
+// instantiated type. Non-exported methods must generally be instantiated, since they may
+// be called by the exported methods or other generic function in the same package.
+func (p *crawler) markGeneric(t *types.Type) {
+ if t.IsPtr() {
+ t = t.Elem()
+ }
+ if orig := t.OrigType(); orig != nil {
+ // Convert to the base generic type.
+ t = orig
+ }
+ if p.generic[t] {
+ return
+ }
+ p.generic[t] = true
+
+ if t.Sym() != nil && t.Kind() != types.TINTER {
+ for _, m := range t.Methods().Slice() {
+ p.markObject(m.Nname.(*ir.Name))
+ }
+ }
+}
+
+// checkForFullyInst looks for fully-instantiated types in a type (at any nesting
+// level). If it finds a fully-instantiated type, it ensures that the necessary
+// dictionary and shape methods are exported. It updates p.checkFullyInst, so it
+// traverses each particular type only once.
+func (p *crawler) checkForFullyInst(t *types.Type) {
+ if p.checkFullyInst[t] {
+ return
+ }
+ p.checkFullyInst[t] = true
+
+ if t.IsFullyInstantiated() && !t.HasShape() && !t.IsInterface() && t.Methods().Len() > 0 {
+ // For any fully-instantiated type, the relevant
+ // dictionaries and shape instantiations will have
+ // already been created or are in the import data.
+ // Make sure that they are exported, so that any
+ // other package that inlines this function will have
+ // them available for import, and so will not need
+ // another round of method and dictionary
+ // instantiation after inlining.
+ baseType := t.OrigType()
+ shapes := make([]*types.Type, len(t.RParams()))
+ for i, t1 := range t.RParams() {
+ shapes[i] = Shapify(t1, i, baseType.RParams()[i])
+ }
+ for j, tmethod := range t.Methods().Slice() {
+ baseNname := baseType.Methods().Slice()[j].Nname.(*ir.Name)
+ dictsym := MakeDictSym(baseNname.Sym(), t.RParams(), true)
+ if dictsym.Def == nil {
+ in := Resolve(ir.NewIdent(src.NoXPos, dictsym))
+ dictsym = in.Sym()
+ }
+ Export(dictsym.Def.(*ir.Name))
+ methsym := MakeFuncInstSym(baseNname.Sym(), shapes, false, true)
+ if methsym.Def == nil {
+ in := Resolve(ir.NewIdent(src.NoXPos, methsym))
+ methsym = in.Sym()
+ }
+ methNode := methsym.Def.(*ir.Name)
+ Export(methNode)
+ if HaveInlineBody(methNode.Func) {
+ // Export the body as well if
+ // instantiation is inlineable.
+ ImportedBody(methNode.Func)
+ methNode.Func.SetExportInline(true)
+ }
+ // Make sure that any associated types are also exported. (See #52279)
+ p.checkForFullyInst(tmethod.Type)
+ }
+ }
+
+ // Descend into the type. We descend even if it is a fully-instantiated type,
+ // since the instantiated type may have other instantiated types inside of
+ // it (in fields, methods, etc.).
+ switch t.Kind() {
+ case types.TPTR, types.TARRAY, types.TSLICE:
+ p.checkForFullyInst(t.Elem())
+
+ case types.TCHAN:
+ p.checkForFullyInst(t.Elem())
+
+ case types.TMAP:
+ p.checkForFullyInst(t.Key())
+ p.checkForFullyInst(t.Elem())
+
+ case types.TSTRUCT:
+ if t.IsFuncArgStruct() {
+ break
+ }
+ for _, f := range t.FieldSlice() {
+ p.checkForFullyInst(f.Type)
+ }
+
+ case types.TFUNC:
+ if recv := t.Recv(); recv != nil {
+ p.checkForFullyInst(t.Recv().Type)
+ }
+ for _, f := range t.Params().FieldSlice() {
+ p.checkForFullyInst(f.Type)
+ }
+ for _, f := range t.Results().FieldSlice() {
+ p.checkForFullyInst(f.Type)
+ }
+
+ case types.TINTER:
+ for _, f := range t.AllMethods().Slice() {
+ p.checkForFullyInst(f.Type)
+ }
+ }
+}
+
+// markInlBody marks n's inline body for export and recursively
+// ensures all called functions are marked too.
+func (p *crawler) markInlBody(n *ir.Name) {
+ if n == nil {
+ return
+ }
+ if n.Op() != ir.ONAME || n.Class != ir.PFUNC {
+ base.Fatalf("markInlBody: unexpected %v, %v, %v", n, n.Op(), n.Class)
+ }
+ fn := n.Func
+ if fn == nil {
+ base.Fatalf("markInlBody: missing Func on %v", n)
+ }
+ if !HaveInlineBody(fn) {
+ return
+ }
+
+ if fn.ExportInline() {
+ return
+ }
+ fn.SetExportInline(true)
+
+ ImportedBody(fn)
+
+ var doFlood func(n ir.Node)
+ doFlood = func(n ir.Node) {
+ t := n.Type()
+ if t != nil {
+ if t.HasTParam() {
+ // If any generic types are used, then make sure that
+ // the methods of the generic type are exported and
+ // scanned for other possible exports.
+ p.markGeneric(t)
+ } else {
+ p.checkForFullyInst(t)
+ }
+ if base.Debug.Unified == 0 {
+ // If a method of un-exported type is promoted and accessible by
+ // embedding in an exported type, it makes that type reachable.
+ //
+ // Example:
+ //
+ // type t struct {}
+ // func (t) M() {}
+ //
+ // func F() interface{} { return struct{ t }{} }
+ //
+ // We generate the wrapper for "struct{ t }".M, and inline call
+ // to "struct{ t }".M, which makes "t.M" reachable.
+ if t.IsStruct() {
+ for _, f := range t.FieldSlice() {
+ if f.Embedded != 0 {
+ p.markEmbed(f.Type)
+ }
+ }
+ }
+ }
+ }
+
+ switch n.Op() {
+ case ir.OMETHEXPR, ir.ODOTMETH:
+ p.markInlBody(ir.MethodExprName(n))
+ case ir.ONAME:
+ n := n.(*ir.Name)
+ switch n.Class {
+ case ir.PFUNC:
+ p.markInlBody(n)
+ // Note: this Export() and the one below seem unneeded,
+ // since any function/extern name encountered in an
+ // exported function body will be exported
+ // automatically via qualifiedIdent() in iexport.go.
+ Export(n)
+ case ir.PEXTERN:
+ Export(n)
+ }
+ case ir.OMETHVALUE:
+ // Okay, because we don't yet inline indirect
+ // calls to method values.
+ case ir.OCLOSURE:
+ // VisitList doesn't visit closure bodies, so force a
+ // recursive call to VisitList on the body of the closure.
+ ir.VisitList(n.(*ir.ClosureExpr).Func.Body, doFlood)
+ }
+ }
+
+ // Recursively identify all referenced functions for
+ // reexport. We want to include even non-called functions,
+ // because after inlining they might be callable.
+ ir.VisitList(fn.Inl.Body, doFlood)
+}
+
+// isPtrFullyInstantiated returns true if t is a fully-instantiated type, or it is a
+// pointer to a fully-instantiated type.
+func isPtrFullyInstantiated(t *types.Type) bool {
+ return t.IsPtr() && t.Elem().IsFullyInstantiated() ||
+ t.IsFullyInstantiated()
+}
diff --git a/src/cmd/compile/internal/typecheck/dcl.go b/src/cmd/compile/internal/typecheck/dcl.go
new file mode 100644
index 0000000..68ab05a
--- /dev/null
+++ b/src/cmd/compile/internal/typecheck/dcl.go
@@ -0,0 +1,504 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typecheck
+
+import (
+ "fmt"
+ "sync"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+)
+
+var DeclContext ir.Class = ir.PEXTERN // PEXTERN/PAUTO
+
+func DeclFunc(sym *types.Sym, tfn ir.Ntype) *ir.Func {
+ if tfn.Op() != ir.OTFUNC {
+ base.Fatalf("expected OTFUNC node, got %v", tfn)
+ }
+
+ fn := ir.NewFunc(base.Pos)
+ fn.Nname = ir.NewNameAt(base.Pos, sym)
+ fn.Nname.Func = fn
+ fn.Nname.Defn = fn
+ fn.Nname.Ntype = tfn
+ ir.MarkFunc(fn.Nname)
+ StartFuncBody(fn)
+ fn.Nname.Ntype = typecheckNtype(fn.Nname.Ntype)
+ return fn
+}
+
+// Declare records that Node n declares symbol n.Sym in the specified
+// declaration context.
+func Declare(n *ir.Name, ctxt ir.Class) {
+ if ir.IsBlank(n) {
+ return
+ }
+
+ s := n.Sym()
+
+ // kludgy: TypecheckAllowed means we're past parsing. Eg reflectdata.methodWrapper may declare out of package names later.
+ if !inimport && !TypecheckAllowed && s.Pkg != types.LocalPkg {
+ base.ErrorfAt(n.Pos(), "cannot declare name %v", s)
+ }
+
+ if ctxt == ir.PEXTERN {
+ if s.Name == "init" {
+ base.ErrorfAt(n.Pos(), "cannot declare init - must be func")
+ }
+ if s.Name == "main" && s.Pkg.Name == "main" {
+ base.ErrorfAt(n.Pos(), "cannot declare main - must be func")
+ }
+ Target.Externs = append(Target.Externs, n)
+ } else {
+ if ir.CurFunc == nil && ctxt == ir.PAUTO {
+ base.Pos = n.Pos()
+ base.Fatalf("automatic outside function")
+ }
+ if ir.CurFunc != nil && ctxt != ir.PFUNC && n.Op() == ir.ONAME {
+ ir.CurFunc.Dcl = append(ir.CurFunc.Dcl, n)
+ }
+ types.Pushdcl(s)
+ n.Curfn = ir.CurFunc
+ }
+
+ if ctxt == ir.PAUTO {
+ n.SetFrameOffset(0)
+ }
+
+ if s.Block == types.Block {
+ // functype will print errors about duplicate function arguments.
+ // Don't repeat the error here.
+ if ctxt != ir.PPARAM && ctxt != ir.PPARAMOUT {
+ Redeclared(n.Pos(), s, "in this block")
+ }
+ }
+
+ s.Block = types.Block
+ s.Lastlineno = base.Pos
+ s.Def = n
+ n.Class = ctxt
+ if ctxt == ir.PFUNC {
+ n.Sym().SetFunc(true)
+ }
+
+ autoexport(n, ctxt)
+}
+
+// Export marks n for export (or reexport).
+func Export(n *ir.Name) {
+ if n.Sym().OnExportList() {
+ return
+ }
+ n.Sym().SetOnExportList(true)
+
+ if base.Flag.E != 0 {
+ fmt.Printf("export symbol %v\n", n.Sym())
+ }
+
+ Target.Exports = append(Target.Exports, n)
+}
+
+// Redeclared emits a diagnostic about symbol s being redeclared at pos.
+func Redeclared(pos src.XPos, s *types.Sym, where string) {
+ if !s.Lastlineno.IsKnown() {
+ var pkgName *ir.PkgName
+ if s.Def == nil {
+ for id, pkg := range DotImportRefs {
+ if id.Sym().Name == s.Name {
+ pkgName = pkg
+ break
+ }
+ }
+ } else {
+ pkgName = DotImportRefs[s.Def.(*ir.Ident)]
+ }
+ base.ErrorfAt(pos, "%v redeclared %s\n"+
+ "\t%v: previous declaration during import %q", s, where, base.FmtPos(pkgName.Pos()), pkgName.Pkg.Path)
+ } else {
+ prevPos := s.Lastlineno
+
+ // When an import and a declaration collide in separate files,
+ // present the import as the "redeclared", because the declaration
+ // is visible where the import is, but not vice versa.
+ // See issue 4510.
+ if s.Def == nil {
+ pos, prevPos = prevPos, pos
+ }
+
+ base.ErrorfAt(pos, "%v redeclared %s\n"+
+ "\t%v: previous declaration", s, where, base.FmtPos(prevPos))
+ }
+}
+
+// declare the function proper
+// and declare the arguments.
+// called in extern-declaration context
+// returns in auto-declaration context.
+func StartFuncBody(fn *ir.Func) {
+ // change the declaration context from extern to auto
+ funcStack = append(funcStack, funcStackEnt{ir.CurFunc, DeclContext})
+ ir.CurFunc = fn
+ DeclContext = ir.PAUTO
+
+ types.Markdcl()
+
+ if fn.Nname.Ntype != nil {
+ funcargs(fn.Nname.Ntype.(*ir.FuncType))
+ } else {
+ funcargs2(fn.Type())
+ }
+}
+
+// finish the body.
+// called in auto-declaration context.
+// returns in extern-declaration context.
+func FinishFuncBody() {
+ // change the declaration context from auto to previous context
+ types.Popdcl()
+ var e funcStackEnt
+ funcStack, e = funcStack[:len(funcStack)-1], funcStack[len(funcStack)-1]
+ ir.CurFunc, DeclContext = e.curfn, e.dclcontext
+}
+
+func CheckFuncStack() {
+ if len(funcStack) != 0 {
+ base.Fatalf("funcStack is non-empty: %v", len(funcStack))
+ }
+}
+
+// Add a method, declared as a function.
+// - msym is the method symbol
+// - t is function type (with receiver)
+// Returns a pointer to the existing or added Field; or nil if there's an error.
+func addmethod(n *ir.Func, msym *types.Sym, t *types.Type, local, nointerface bool) *types.Field {
+ if msym == nil {
+ base.Fatalf("no method symbol")
+ }
+
+ // get parent type sym
+ rf := t.Recv() // ptr to this structure
+ if rf == nil {
+ base.Errorf("missing receiver")
+ return nil
+ }
+
+ mt := types.ReceiverBaseType(rf.Type)
+ if mt == nil || mt.Sym() == nil {
+ pa := rf.Type
+ t := pa
+ if t != nil && t.IsPtr() {
+ if t.Sym() != nil {
+ base.Errorf("invalid receiver type %v (%v is a pointer type)", pa, t)
+ return nil
+ }
+ t = t.Elem()
+ }
+
+ switch {
+ case t == nil || t.Broke():
+ // rely on typecheck having complained before
+ case t.Sym() == nil:
+ base.Errorf("invalid receiver type %v (%v is not a defined type)", pa, t)
+ case t.IsPtr():
+ base.Errorf("invalid receiver type %v (%v is a pointer type)", pa, t)
+ case t.IsInterface():
+ base.Errorf("invalid receiver type %v (%v is an interface type)", pa, t)
+ default:
+ // Should have picked off all the reasons above,
+ // but just in case, fall back to generic error.
+ base.Errorf("invalid receiver type %v (%L / %L)", pa, pa, t)
+ }
+ return nil
+ }
+
+ if local && mt.Sym().Pkg != types.LocalPkg {
+ base.Errorf("cannot define new methods on non-local type %v", mt)
+ return nil
+ }
+
+ if msym.IsBlank() {
+ return nil
+ }
+
+ if mt.IsStruct() {
+ for _, f := range mt.Fields().Slice() {
+ if f.Sym == msym {
+ base.Errorf("type %v has both field and method named %v", mt, msym)
+ f.SetBroke(true)
+ return nil
+ }
+ }
+ }
+
+ for _, f := range mt.Methods().Slice() {
+ if msym.Name != f.Sym.Name {
+ continue
+ }
+ // types.Identical only checks that incoming and result parameters match,
+ // so explicitly check that the receiver parameters match too.
+ if !types.Identical(t, f.Type) || !types.Identical(t.Recv().Type, f.Type.Recv().Type) {
+ base.Errorf("method redeclared: %v.%v\n\t%v\n\t%v", mt, msym, f.Type, t)
+ }
+ return f
+ }
+
+ f := types.NewField(base.Pos, msym, t)
+ f.Nname = n.Nname
+ f.SetNointerface(nointerface)
+
+ mt.Methods().Append(f)
+ return f
+}
+
+func autoexport(n *ir.Name, ctxt ir.Class) {
+ if n.Sym().Pkg != types.LocalPkg {
+ return
+ }
+ if (ctxt != ir.PEXTERN && ctxt != ir.PFUNC) || DeclContext != ir.PEXTERN {
+ return
+ }
+ if n.Type() != nil && n.Type().IsKind(types.TFUNC) && ir.IsMethod(n) {
+ return
+ }
+
+ if types.IsExported(n.Sym().Name) || n.Sym().Name == "init" {
+ Export(n)
+ }
+ if base.Flag.AsmHdr != "" && !n.Sym().Asm() {
+ n.Sym().SetAsm(true)
+ Target.Asms = append(Target.Asms, n)
+ }
+}
+
+// checkdupfields emits errors for duplicately named fields or methods in
+// a list of struct or interface types.
+func checkdupfields(what string, fss ...[]*types.Field) {
+ seen := make(map[*types.Sym]bool)
+ for _, fs := range fss {
+ for _, f := range fs {
+ if f.Sym == nil || f.Sym.IsBlank() {
+ continue
+ }
+ if seen[f.Sym] {
+ base.ErrorfAt(f.Pos, "duplicate %s %s", what, f.Sym.Name)
+ continue
+ }
+ seen[f.Sym] = true
+ }
+ }
+}
+
+// structs, functions, and methods.
+// they don't belong here, but where do they belong?
+func checkembeddedtype(t *types.Type) {
+ if t == nil {
+ return
+ }
+
+ if t.Sym() == nil && t.IsPtr() {
+ t = t.Elem()
+ if t.IsInterface() {
+ base.Errorf("embedded type cannot be a pointer to interface")
+ }
+ }
+
+ if t.IsPtr() || t.IsUnsafePtr() {
+ base.Errorf("embedded type cannot be a pointer")
+ } else if t.Kind() == types.TFORW && !t.ForwardType().Embedlineno.IsKnown() {
+ t.ForwardType().Embedlineno = base.Pos
+ }
+}
+
+var funcStack []funcStackEnt // stack of previous values of ir.CurFunc/DeclContext
+
+type funcStackEnt struct {
+ curfn *ir.Func
+ dclcontext ir.Class
+}
+
+func funcarg(n *ir.Field, ctxt ir.Class) {
+ if n.Sym == nil {
+ return
+ }
+
+ name := ir.NewNameAt(n.Pos, n.Sym)
+ n.Decl = name
+ name.Ntype = n.Ntype
+ Declare(name, ctxt)
+}
+
+func funcarg2(f *types.Field, ctxt ir.Class) {
+ if f.Sym == nil {
+ return
+ }
+ n := ir.NewNameAt(f.Pos, f.Sym)
+ f.Nname = n
+ n.SetType(f.Type)
+ Declare(n, ctxt)
+}
+
+func funcargs(nt *ir.FuncType) {
+ if nt.Op() != ir.OTFUNC {
+ base.Fatalf("funcargs %v", nt.Op())
+ }
+
+ // declare the receiver and in arguments.
+ if nt.Recv != nil {
+ funcarg(nt.Recv, ir.PPARAM)
+ }
+ for _, n := range nt.Params {
+ funcarg(n, ir.PPARAM)
+ }
+
+ // declare the out arguments.
+ for i, n := range nt.Results {
+ if n.Sym == nil {
+ // Name so that escape analysis can track it. ~r stands for 'result'.
+ n.Sym = LookupNum("~r", i)
+ } else if n.Sym.IsBlank() {
+ // Give it a name so we can assign to it during return. ~b stands for 'blank'.
+ // The name must be different from ~r above because if you have
+ // func f() (_ int)
+ // func g() int
+ // f is allowed to use a plain 'return' with no arguments, while g is not.
+ // So the two cases must be distinguished.
+ n.Sym = LookupNum("~b", i)
+ }
+
+ funcarg(n, ir.PPARAMOUT)
+ }
+}
+
+// Same as funcargs, except run over an already constructed TFUNC.
+// This happens during import, where the hidden_fndcl rule has
+// used functype directly to parse the function's type.
+func funcargs2(t *types.Type) {
+ if t.Kind() != types.TFUNC {
+ base.Fatalf("funcargs2 %v", t)
+ }
+
+ for _, f := range t.Recvs().Fields().Slice() {
+ funcarg2(f, ir.PPARAM)
+ }
+ for _, f := range t.Params().Fields().Slice() {
+ funcarg2(f, ir.PPARAM)
+ }
+ for _, f := range t.Results().Fields().Slice() {
+ funcarg2(f, ir.PPARAMOUT)
+ }
+}
+
+func Temp(t *types.Type) *ir.Name {
+ return TempAt(base.Pos, ir.CurFunc, t)
+}
+
+// make a new Node off the books
+func TempAt(pos src.XPos, curfn *ir.Func, t *types.Type) *ir.Name {
+ if curfn == nil {
+ base.Fatalf("no curfn for TempAt")
+ }
+ if curfn.Op() == ir.OCLOSURE {
+ ir.Dump("TempAt", curfn)
+ base.Fatalf("adding TempAt to wrong closure function")
+ }
+ if t == nil {
+ base.Fatalf("TempAt called with nil type")
+ }
+ if t.Kind() == types.TFUNC && t.Recv() != nil {
+ base.Fatalf("misuse of method type: %v", t)
+ }
+
+ s := &types.Sym{
+ Name: autotmpname(len(curfn.Dcl)),
+ Pkg: types.LocalPkg,
+ }
+ n := ir.NewNameAt(pos, s)
+ s.Def = n
+ n.SetType(t)
+ n.SetTypecheck(1)
+ n.Class = ir.PAUTO
+ n.SetEsc(ir.EscNever)
+ n.Curfn = curfn
+ n.SetUsed(true)
+ n.SetAutoTemp(true)
+ curfn.Dcl = append(curfn.Dcl, n)
+
+ types.CalcSize(t)
+
+ return n
+}
+
+var (
+ autotmpnamesmu sync.Mutex
+ autotmpnames []string
+)
+
+// autotmpname returns the name for an autotmp variable numbered n.
+func autotmpname(n int) string {
+ autotmpnamesmu.Lock()
+ defer autotmpnamesmu.Unlock()
+
+ // Grow autotmpnames, if needed.
+ if n >= len(autotmpnames) {
+ autotmpnames = append(autotmpnames, make([]string, n+1-len(autotmpnames))...)
+ autotmpnames = autotmpnames[:cap(autotmpnames)]
+ }
+
+ s := autotmpnames[n]
+ if s == "" {
+ // Give each tmp a different name so that they can be registerized.
+ // Add a preceding . to avoid clashing with legal names.
+ prefix := ".autotmp_%d"
+
+ // In quirks mode, pad out the number to stabilize variable
+ // sorting. This ensures autotmps 8 and 9 sort the same way even
+ // if they get renumbered to 9 and 10, respectively.
+ if base.Debug.UnifiedQuirks != 0 {
+ prefix = ".autotmp_%06d"
+ }
+
+ s = fmt.Sprintf(prefix, n)
+ autotmpnames[n] = s
+ }
+ return s
+}
+
+// f is method type, with receiver.
+// return function type, receiver as first argument (or not).
+func NewMethodType(sig *types.Type, recv *types.Type) *types.Type {
+ if sig.HasTParam() {
+ base.Fatalf("NewMethodType with type parameters in signature %+v", sig)
+ }
+ if recv != nil && recv.HasTParam() {
+ base.Fatalf("NewMethodType with type parameters in receiver %+v", recv)
+ }
+ nrecvs := 0
+ if recv != nil {
+ nrecvs++
+ }
+
+ // TODO(mdempsky): Move this function to types.
+ // TODO(mdempsky): Preserve positions, names, and package from sig+recv.
+
+ params := make([]*types.Field, nrecvs+sig.Params().Fields().Len())
+ if recv != nil {
+ params[0] = types.NewField(base.Pos, nil, recv)
+ }
+ for i, param := range sig.Params().Fields().Slice() {
+ d := types.NewField(base.Pos, nil, param.Type)
+ d.SetIsDDD(param.IsDDD())
+ params[nrecvs+i] = d
+ }
+
+ results := make([]*types.Field, sig.Results().Fields().Len())
+ for i, t := range sig.Results().Fields().Slice() {
+ results[i] = types.NewField(base.Pos, nil, t.Type)
+ }
+
+ return types.NewSignature(types.LocalPkg, nil, nil, params, results)
+}
diff --git a/src/cmd/compile/internal/typecheck/export.go b/src/cmd/compile/internal/typecheck/export.go
new file mode 100644
index 0000000..30726d4
--- /dev/null
+++ b/src/cmd/compile/internal/typecheck/export.go
@@ -0,0 +1,74 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typecheck
+
+import (
+ "go/constant"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+)
+
+// importalias declares symbol s as an imported type alias with type t.
+// ipkg is the package being imported
+func importalias(pos src.XPos, s *types.Sym, t *types.Type) *ir.Name {
+ return importobj(pos, s, ir.OTYPE, ir.PEXTERN, t)
+}
+
+// importconst declares symbol s as an imported constant with type t and value val.
+// ipkg is the package being imported
+func importconst(pos src.XPos, s *types.Sym, t *types.Type, val constant.Value) *ir.Name {
+ n := importobj(pos, s, ir.OLITERAL, ir.PEXTERN, t)
+ n.SetVal(val)
+ return n
+}
+
+// importfunc declares symbol s as an imported function with type t.
+// ipkg is the package being imported
+func importfunc(pos src.XPos, s *types.Sym, t *types.Type) *ir.Name {
+ n := importobj(pos, s, ir.ONAME, ir.PFUNC, t)
+ n.Func = ir.NewFunc(pos)
+ n.Func.Nname = n
+ return n
+}
+
+// importobj declares symbol s as an imported object representable by op.
+// ipkg is the package being imported
+func importobj(pos src.XPos, s *types.Sym, op ir.Op, ctxt ir.Class, t *types.Type) *ir.Name {
+ n := importsym(pos, s, op, ctxt)
+ n.SetType(t)
+ if ctxt == ir.PFUNC {
+ n.Sym().SetFunc(true)
+ }
+ return n
+}
+
+func importsym(pos src.XPos, s *types.Sym, op ir.Op, ctxt ir.Class) *ir.Name {
+ if n := s.PkgDef(); n != nil {
+ base.Fatalf("importsym of symbol that already exists: %v", n)
+ }
+
+ n := ir.NewDeclNameAt(pos, op, s)
+ n.Class = ctxt // TODO(mdempsky): Move this into NewDeclNameAt too?
+ s.SetPkgDef(n)
+ return n
+}
+
+// importtype returns the named type declared by symbol s.
+// If no such type has been declared yet, a forward declaration is returned.
+// ipkg is the package being imported
+func importtype(pos src.XPos, s *types.Sym) *ir.Name {
+ n := importsym(pos, s, ir.OTYPE, ir.PEXTERN)
+ n.SetType(types.NewNamed(n))
+ return n
+}
+
+// importvar declares symbol s as an imported variable with type t.
+// ipkg is the package being imported
+func importvar(pos src.XPos, s *types.Sym, t *types.Type) *ir.Name {
+ return importobj(pos, s, ir.ONAME, ir.PEXTERN, t)
+}
diff --git a/src/cmd/compile/internal/typecheck/expr.go b/src/cmd/compile/internal/typecheck/expr.go
new file mode 100644
index 0000000..eb316d3
--- /dev/null
+++ b/src/cmd/compile/internal/typecheck/expr.go
@@ -0,0 +1,917 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typecheck
+
+import (
+ "fmt"
+ "go/constant"
+ "go/token"
+ "strings"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/types"
+)
+
+// tcAddr typechecks an OADDR node.
+func tcAddr(n *ir.AddrExpr) ir.Node {
+ n.X = Expr(n.X)
+ if n.X.Type() == nil {
+ n.SetType(nil)
+ return n
+ }
+
+ switch n.X.Op() {
+ case ir.OARRAYLIT, ir.OMAPLIT, ir.OSLICELIT, ir.OSTRUCTLIT:
+ n.SetOp(ir.OPTRLIT)
+
+ default:
+ checklvalue(n.X, "take the address of")
+ r := ir.OuterValue(n.X)
+ if r.Op() == ir.ONAME {
+ r := r.(*ir.Name)
+ if ir.Orig(r) != r {
+ base.Fatalf("found non-orig name node %v", r) // TODO(mdempsky): What does this mean?
+ }
+ }
+ n.X = DefaultLit(n.X, nil)
+ if n.X.Type() == nil {
+ n.SetType(nil)
+ return n
+ }
+ }
+
+ n.SetType(types.NewPtr(n.X.Type()))
+ return n
+}
+
+func tcShift(n, l, r ir.Node) (ir.Node, ir.Node, *types.Type) {
+ if l.Type() == nil || r.Type() == nil {
+ return l, r, nil
+ }
+
+ r = DefaultLit(r, types.Types[types.TUINT])
+ t := r.Type()
+ if !t.IsInteger() {
+ base.Errorf("invalid operation: %v (shift count type %v, must be integer)", n, r.Type())
+ return l, r, nil
+ }
+ if t.IsSigned() && !types.AllowsGoVersion(curpkg(), 1, 13) {
+ base.ErrorfVers("go1.13", "invalid operation: %v (signed shift count type %v)", n, r.Type())
+ return l, r, nil
+ }
+ t = l.Type()
+ if t != nil && t.Kind() != types.TIDEAL && !t.IsInteger() {
+ base.Errorf("invalid operation: %v (shift of type %v)", n, t)
+ return l, r, nil
+ }
+
+ // no DefaultLit for left
+ // the outer context gives the type
+ t = l.Type()
+ if (l.Type() == types.UntypedFloat || l.Type() == types.UntypedComplex) && r.Op() == ir.OLITERAL {
+ t = types.UntypedInt
+ }
+ return l, r, t
+}
+
+// tcArith typechecks operands of a binary arithmetic expression.
+// The result of tcArith MUST be assigned back to original operands,
+// t is the type of the expression, and should be set by the caller. e.g:
+// n.X, n.Y, t = tcArith(n, op, n.X, n.Y)
+// n.SetType(t)
+func tcArith(n ir.Node, op ir.Op, l, r ir.Node) (ir.Node, ir.Node, *types.Type) {
+ l, r = defaultlit2(l, r, false)
+ if l.Type() == nil || r.Type() == nil {
+ return l, r, nil
+ }
+ t := l.Type()
+ if t.Kind() == types.TIDEAL {
+ t = r.Type()
+ }
+ aop := ir.OXXX
+ if n.Op().IsCmp() && t.Kind() != types.TIDEAL && !types.Identical(l.Type(), r.Type()) {
+ // comparison is okay as long as one side is
+ // assignable to the other. convert so they have
+ // the same type.
+ //
+ // the only conversion that isn't a no-op is concrete == interface.
+ // in that case, check comparability of the concrete type.
+ // The conversion allocates, so only do it if the concrete type is huge.
+ converted := false
+ if r.Type().Kind() != types.TBLANK {
+ aop, _ = Assignop(l.Type(), r.Type())
+ if aop != ir.OXXX {
+ if r.Type().IsInterface() && !l.Type().IsInterface() && !types.IsComparable(l.Type()) {
+ base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, op, typekind(l.Type()))
+ return l, r, nil
+ }
+
+ types.CalcSize(l.Type())
+ if r.Type().IsInterface() == l.Type().IsInterface() || l.Type().Size() >= 1<<16 {
+ l = ir.NewConvExpr(base.Pos, aop, r.Type(), l)
+ l.SetTypecheck(1)
+ }
+
+ t = r.Type()
+ converted = true
+ }
+ }
+
+ if !converted && l.Type().Kind() != types.TBLANK {
+ aop, _ = Assignop(r.Type(), l.Type())
+ if aop != ir.OXXX {
+ if l.Type().IsInterface() && !r.Type().IsInterface() && !types.IsComparable(r.Type()) {
+ base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, op, typekind(r.Type()))
+ return l, r, nil
+ }
+
+ types.CalcSize(r.Type())
+ if r.Type().IsInterface() == l.Type().IsInterface() || r.Type().Size() >= 1<<16 {
+ r = ir.NewConvExpr(base.Pos, aop, l.Type(), r)
+ r.SetTypecheck(1)
+ }
+
+ t = l.Type()
+ }
+ }
+ }
+
+ if t.Kind() != types.TIDEAL && !types.Identical(l.Type(), r.Type()) {
+ l, r = defaultlit2(l, r, true)
+ if l.Type() == nil || r.Type() == nil {
+ return l, r, nil
+ }
+ if l.Type().IsInterface() == r.Type().IsInterface() || aop == 0 {
+ base.Errorf("invalid operation: %v (mismatched types %v and %v)", n, l.Type(), r.Type())
+ return l, r, nil
+ }
+ }
+
+ if t.Kind() == types.TIDEAL {
+ t = mixUntyped(l.Type(), r.Type())
+ }
+ if dt := defaultType(t); !okfor[op][dt.Kind()] {
+ base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, op, typekind(t))
+ return l, r, nil
+ }
+
+ // okfor allows any array == array, map == map, func == func.
+ // restrict to slice/map/func == nil and nil == slice/map/func.
+ if l.Type().IsArray() && !types.IsComparable(l.Type()) {
+ base.Errorf("invalid operation: %v (%v cannot be compared)", n, l.Type())
+ return l, r, nil
+ }
+
+ if l.Type().IsSlice() && !ir.IsNil(l) && !ir.IsNil(r) {
+ base.Errorf("invalid operation: %v (slice can only be compared to nil)", n)
+ return l, r, nil
+ }
+
+ if l.Type().IsMap() && !ir.IsNil(l) && !ir.IsNil(r) {
+ base.Errorf("invalid operation: %v (map can only be compared to nil)", n)
+ return l, r, nil
+ }
+
+ if l.Type().Kind() == types.TFUNC && !ir.IsNil(l) && !ir.IsNil(r) {
+ base.Errorf("invalid operation: %v (func can only be compared to nil)", n)
+ return l, r, nil
+ }
+
+ if l.Type().IsStruct() {
+ if f := types.IncomparableField(l.Type()); f != nil {
+ base.Errorf("invalid operation: %v (struct containing %v cannot be compared)", n, f.Type)
+ return l, r, nil
+ }
+ }
+
+ if (op == ir.ODIV || op == ir.OMOD) && ir.IsConst(r, constant.Int) {
+ if constant.Sign(r.Val()) == 0 {
+ base.Errorf("division by zero")
+ return l, r, nil
+ }
+ }
+
+ return l, r, t
+}
+
+// The result of tcCompLit MUST be assigned back to n, e.g.
+// n.Left = tcCompLit(n.Left)
+func tcCompLit(n *ir.CompLitExpr) (res ir.Node) {
+ if base.EnableTrace && base.Flag.LowerT {
+ defer tracePrint("tcCompLit", n)(&res)
+ }
+
+ lno := base.Pos
+ defer func() {
+ base.Pos = lno
+ }()
+
+ if n.Ntype == nil {
+ base.ErrorfAt(n.Pos(), "missing type in composite literal")
+ n.SetType(nil)
+ return n
+ }
+
+ // Save original node (including n.Right)
+ n.SetOrig(ir.Copy(n))
+
+ ir.SetPos(n.Ntype)
+
+ // Need to handle [...]T arrays specially.
+ if array, ok := n.Ntype.(*ir.ArrayType); ok && array.Elem != nil && array.Len == nil {
+ array.Elem = typecheckNtype(array.Elem)
+ elemType := array.Elem.Type()
+ if elemType == nil {
+ n.SetType(nil)
+ return n
+ }
+ length := typecheckarraylit(elemType, -1, n.List, "array literal")
+ n.SetOp(ir.OARRAYLIT)
+ n.SetType(types.NewArray(elemType, length))
+ n.Ntype = nil
+ return n
+ }
+
+ n.Ntype = typecheckNtype(n.Ntype)
+ t := n.Ntype.Type()
+ if t == nil {
+ n.SetType(nil)
+ return n
+ }
+ n.SetType(t)
+
+ switch t.Kind() {
+ default:
+ base.Errorf("invalid composite literal type %v", t)
+ n.SetType(nil)
+
+ case types.TARRAY:
+ typecheckarraylit(t.Elem(), t.NumElem(), n.List, "array literal")
+ n.SetOp(ir.OARRAYLIT)
+ n.Ntype = nil
+
+ case types.TSLICE:
+ length := typecheckarraylit(t.Elem(), -1, n.List, "slice literal")
+ n.SetOp(ir.OSLICELIT)
+ n.Ntype = nil
+ n.Len = length
+
+ case types.TMAP:
+ var cs constSet
+ for i3, l := range n.List {
+ ir.SetPos(l)
+ if l.Op() != ir.OKEY {
+ n.List[i3] = Expr(l)
+ base.Errorf("missing key in map literal")
+ continue
+ }
+ l := l.(*ir.KeyExpr)
+
+ r := l.Key
+ r = pushtype(r, t.Key())
+ r = Expr(r)
+ l.Key = AssignConv(r, t.Key(), "map key")
+ cs.add(base.Pos, l.Key, "key", "map literal")
+
+ r = l.Value
+ r = pushtype(r, t.Elem())
+ r = Expr(r)
+ l.Value = AssignConv(r, t.Elem(), "map value")
+ }
+
+ n.SetOp(ir.OMAPLIT)
+ n.Ntype = nil
+
+ case types.TSTRUCT:
+ // Need valid field offsets for Xoffset below.
+ types.CalcSize(t)
+
+ errored := false
+ if len(n.List) != 0 && nokeys(n.List) {
+ // simple list of variables
+ ls := n.List
+ for i, n1 := range ls {
+ ir.SetPos(n1)
+ n1 = Expr(n1)
+ ls[i] = n1
+ if i >= t.NumFields() {
+ if !errored {
+ base.Errorf("too many values in %v", n)
+ errored = true
+ }
+ continue
+ }
+
+ f := t.Field(i)
+ s := f.Sym
+
+ // Do the test for assigning to unexported fields.
+ // But if this is an instantiated function, then
+ // the function has already been typechecked. In
+ // that case, don't do the test, since it can fail
+ // for the closure structs created in
+ // walkClosure(), because the instantiated
+ // function is compiled as if in the source
+ // package of the generic function.
+ if !(ir.CurFunc != nil && strings.Index(ir.CurFunc.Nname.Sym().Name, "[") >= 0) {
+ if s != nil && !types.IsExported(s.Name) && s.Pkg != types.LocalPkg {
+ base.Errorf("implicit assignment of unexported field '%s' in %v literal", s.Name, t)
+ }
+ }
+ // No pushtype allowed here. Must name fields for that.
+ n1 = AssignConv(n1, f.Type, "field value")
+ ls[i] = ir.NewStructKeyExpr(base.Pos, f, n1)
+ }
+ if len(ls) < t.NumFields() {
+ base.Errorf("too few values in %v", n)
+ }
+ } else {
+ hash := make(map[string]bool)
+
+ // keyed list
+ ls := n.List
+ for i, n := range ls {
+ ir.SetPos(n)
+
+ sk, ok := n.(*ir.StructKeyExpr)
+ if !ok {
+ kv, ok := n.(*ir.KeyExpr)
+ if !ok {
+ if !errored {
+ base.Errorf("mixture of field:value and value initializers")
+ errored = true
+ }
+ ls[i] = Expr(n)
+ continue
+ }
+
+ sk = tcStructLitKey(t, kv)
+ if sk == nil {
+ continue
+ }
+
+ fielddup(sk.Sym().Name, hash)
+ }
+
+ // No pushtype allowed here. Tried and rejected.
+ sk.Value = Expr(sk.Value)
+ sk.Value = AssignConv(sk.Value, sk.Field.Type, "field value")
+ ls[i] = sk
+ }
+ }
+
+ n.SetOp(ir.OSTRUCTLIT)
+ n.Ntype = nil
+ }
+
+ return n
+}
+
+// tcStructLitKey typechecks an OKEY node that appeared within a
+// struct literal.
+func tcStructLitKey(typ *types.Type, kv *ir.KeyExpr) *ir.StructKeyExpr {
+ key := kv.Key
+
+ // Sym might have resolved to name in other top-level
+ // package, because of import dot. Redirect to correct sym
+ // before we do the lookup.
+ sym := key.Sym()
+ if id, ok := key.(*ir.Ident); ok && DotImportRefs[id] != nil {
+ sym = Lookup(sym.Name)
+ }
+
+ // An OXDOT uses the Sym field to hold
+ // the field to the right of the dot,
+ // so s will be non-nil, but an OXDOT
+ // is never a valid struct literal key.
+ if sym == nil || sym.Pkg != types.LocalPkg || key.Op() == ir.OXDOT || sym.IsBlank() {
+ base.Errorf("invalid field name %v in struct initializer", key)
+ return nil
+ }
+
+ if f := Lookdot1(nil, sym, typ, typ.Fields(), 0); f != nil {
+ return ir.NewStructKeyExpr(kv.Pos(), f, kv.Value)
+ }
+
+ if ci := Lookdot1(nil, sym, typ, typ.Fields(), 2); ci != nil { // Case-insensitive lookup.
+ if visible(ci.Sym) {
+ base.Errorf("unknown field '%v' in struct literal of type %v (but does have %v)", sym, typ, ci.Sym)
+ } else if nonexported(sym) && sym.Name == ci.Sym.Name { // Ensure exactness before the suggestion.
+ base.Errorf("cannot refer to unexported field '%v' in struct literal of type %v", sym, typ)
+ } else {
+ base.Errorf("unknown field '%v' in struct literal of type %v", sym, typ)
+ }
+ return nil
+ }
+
+ var f *types.Field
+ p, _ := dotpath(sym, typ, &f, true)
+ if p == nil || f.IsMethod() {
+ base.Errorf("unknown field '%v' in struct literal of type %v", sym, typ)
+ return nil
+ }
+
+ // dotpath returns the parent embedded types in reverse order.
+ var ep []string
+ for ei := len(p) - 1; ei >= 0; ei-- {
+ ep = append(ep, p[ei].field.Sym.Name)
+ }
+ ep = append(ep, sym.Name)
+ base.Errorf("cannot use promoted field %v in struct literal of type %v", strings.Join(ep, "."), typ)
+ return nil
+}
+
+// tcConv typechecks an OCONV node.
+func tcConv(n *ir.ConvExpr) ir.Node {
+ types.CheckSize(n.Type()) // ensure width is calculated for backend
+ n.X = Expr(n.X)
+ n.X = convlit1(n.X, n.Type(), true, nil)
+ t := n.X.Type()
+ if t == nil || n.Type() == nil {
+ n.SetType(nil)
+ return n
+ }
+ op, why := Convertop(n.X.Op() == ir.OLITERAL, t, n.Type())
+ if op == ir.OXXX {
+ if !n.Diag() && !n.Type().Broke() && !n.X.Diag() {
+ base.Errorf("cannot convert %L to type %v%s", n.X, n.Type(), why)
+ n.SetDiag(true)
+ }
+ n.SetOp(ir.OCONV)
+ n.SetType(nil)
+ return n
+ }
+
+ n.SetOp(op)
+ switch n.Op() {
+ case ir.OCONVNOP:
+ if t.Kind() == n.Type().Kind() {
+ switch t.Kind() {
+ case types.TFLOAT32, types.TFLOAT64, types.TCOMPLEX64, types.TCOMPLEX128:
+ // Floating point casts imply rounding and
+ // so the conversion must be kept.
+ n.SetOp(ir.OCONV)
+ }
+ }
+
+ // do not convert to []byte literal. See CL 125796.
+ // generated code and compiler memory footprint is better without it.
+ case ir.OSTR2BYTES:
+ // ok
+
+ case ir.OSTR2RUNES:
+ if n.X.Op() == ir.OLITERAL {
+ return stringtoruneslit(n)
+ }
+
+ case ir.OBYTES2STR:
+ if t.Elem() != types.ByteType && t.Elem() != types.Types[types.TUINT8] {
+ // If t is a slice of a user-defined byte type B (not uint8
+ // or byte), then add an extra CONVNOP from []B to []byte, so
+ // that the call to slicebytetostring() added in walk will
+ // typecheck correctly.
+ n.X = ir.NewConvExpr(n.X.Pos(), ir.OCONVNOP, types.NewSlice(types.ByteType), n.X)
+ n.X.SetTypecheck(1)
+ }
+
+ case ir.ORUNES2STR:
+ if t.Elem() != types.RuneType && t.Elem() != types.Types[types.TINT32] {
+ // If t is a slice of a user-defined rune type B (not uint32
+ // or rune), then add an extra CONVNOP from []B to []rune, so
+ // that the call to slicerunetostring() added in walk will
+ // typecheck correctly.
+ n.X = ir.NewConvExpr(n.X.Pos(), ir.OCONVNOP, types.NewSlice(types.RuneType), n.X)
+ n.X.SetTypecheck(1)
+ }
+
+ }
+ return n
+}
+
+// tcDot typechecks an OXDOT or ODOT node.
+func tcDot(n *ir.SelectorExpr, top int) ir.Node {
+ if n.Op() == ir.OXDOT {
+ n = AddImplicitDots(n)
+ n.SetOp(ir.ODOT)
+ if n.X == nil {
+ n.SetType(nil)
+ return n
+ }
+ }
+
+ n.X = typecheck(n.X, ctxExpr|ctxType)
+ n.X = DefaultLit(n.X, nil)
+
+ t := n.X.Type()
+ if t == nil {
+ base.UpdateErrorDot(ir.Line(n), fmt.Sprint(n.X), fmt.Sprint(n))
+ n.SetType(nil)
+ return n
+ }
+
+ if n.X.Op() == ir.OTYPE {
+ return typecheckMethodExpr(n)
+ }
+
+ if t.IsPtr() && !t.Elem().IsInterface() {
+ t = t.Elem()
+ if t == nil {
+ n.SetType(nil)
+ return n
+ }
+ n.SetOp(ir.ODOTPTR)
+ types.CheckSize(t)
+ }
+
+ if n.Sel.IsBlank() {
+ base.Errorf("cannot refer to blank field or method")
+ n.SetType(nil)
+ return n
+ }
+
+ if Lookdot(n, t, 0) == nil {
+ // Legitimate field or method lookup failed, try to explain the error
+ switch {
+ case t.IsEmptyInterface():
+ base.Errorf("%v undefined (type %v is interface with no methods)", n, n.X.Type())
+
+ case t.IsPtr() && t.Elem().IsInterface():
+ // Pointer to interface is almost always a mistake.
+ base.Errorf("%v undefined (type %v is pointer to interface, not interface)", n, n.X.Type())
+
+ case Lookdot(n, t, 1) != nil:
+ // Field or method matches by name, but it is not exported.
+ base.Errorf("%v undefined (cannot refer to unexported field or method %v)", n, n.Sel)
+
+ default:
+ if mt := Lookdot(n, t, 2); mt != nil && visible(mt.Sym) { // Case-insensitive lookup.
+ base.Errorf("%v undefined (type %v has no field or method %v, but does have %v)", n, n.X.Type(), n.Sel, mt.Sym)
+ } else {
+ base.Errorf("%v undefined (type %v has no field or method %v)", n, n.X.Type(), n.Sel)
+ }
+ }
+ n.SetType(nil)
+ return n
+ }
+
+ if (n.Op() == ir.ODOTINTER || n.Op() == ir.ODOTMETH) && top&ctxCallee == 0 {
+ n.SetOp(ir.OMETHVALUE)
+ n.SetType(NewMethodType(n.Type(), nil))
+ }
+ return n
+}
+
+// tcDotType typechecks an ODOTTYPE node.
+func tcDotType(n *ir.TypeAssertExpr) ir.Node {
+ n.X = Expr(n.X)
+ n.X = DefaultLit(n.X, nil)
+ l := n.X
+ t := l.Type()
+ if t == nil {
+ n.SetType(nil)
+ return n
+ }
+ if !t.IsInterface() {
+ base.Errorf("invalid type assertion: %v (non-interface type %v on left)", n, t)
+ n.SetType(nil)
+ return n
+ }
+
+ if n.Ntype != nil {
+ n.Ntype = typecheckNtype(n.Ntype)
+ n.SetType(n.Ntype.Type())
+ n.Ntype = nil
+ if n.Type() == nil {
+ return n
+ }
+ }
+
+ if n.Type() != nil && !n.Type().IsInterface() {
+ var missing, have *types.Field
+ var ptr int
+ if !implements(n.Type(), t, &missing, &have, &ptr) {
+ if have != nil && have.Sym == missing.Sym {
+ base.Errorf("impossible type assertion:\n\t%v does not implement %v (wrong type for %v method)\n"+
+ "\t\thave %v%S\n\t\twant %v%S", n.Type(), t, missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type)
+ } else if ptr != 0 {
+ base.Errorf("impossible type assertion:\n\t%v does not implement %v (%v method has pointer receiver)", n.Type(), t, missing.Sym)
+ } else if have != nil {
+ base.Errorf("impossible type assertion:\n\t%v does not implement %v (missing %v method)\n"+
+ "\t\thave %v%S\n\t\twant %v%S", n.Type(), t, missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type)
+ } else {
+ base.Errorf("impossible type assertion:\n\t%v does not implement %v (missing %v method)", n.Type(), t, missing.Sym)
+ }
+ n.SetType(nil)
+ return n
+ }
+ }
+ return n
+}
+
+// tcITab typechecks an OITAB node.
+func tcITab(n *ir.UnaryExpr) ir.Node {
+ n.X = Expr(n.X)
+ t := n.X.Type()
+ if t == nil {
+ n.SetType(nil)
+ return n
+ }
+ if !t.IsInterface() {
+ base.Fatalf("OITAB of %v", t)
+ }
+ n.SetType(types.NewPtr(types.Types[types.TUINTPTR]))
+ return n
+}
+
+// tcIndex typechecks an OINDEX node.
+func tcIndex(n *ir.IndexExpr) ir.Node {
+ n.X = Expr(n.X)
+ n.X = DefaultLit(n.X, nil)
+ n.X = implicitstar(n.X)
+ l := n.X
+ n.Index = Expr(n.Index)
+ r := n.Index
+ t := l.Type()
+ if t == nil || r.Type() == nil {
+ n.SetType(nil)
+ return n
+ }
+ switch t.Kind() {
+ default:
+ base.Errorf("invalid operation: %v (type %v does not support indexing)", n, t)
+ n.SetType(nil)
+ return n
+
+ case types.TSTRING, types.TARRAY, types.TSLICE:
+ n.Index = indexlit(n.Index)
+ if t.IsString() {
+ n.SetType(types.ByteType)
+ } else {
+ n.SetType(t.Elem())
+ }
+ why := "string"
+ if t.IsArray() {
+ why = "array"
+ } else if t.IsSlice() {
+ why = "slice"
+ }
+
+ if n.Index.Type() != nil && !n.Index.Type().IsInteger() {
+ base.Errorf("non-integer %s index %v", why, n.Index)
+ return n
+ }
+
+ if !n.Bounded() && ir.IsConst(n.Index, constant.Int) {
+ x := n.Index.Val()
+ if constant.Sign(x) < 0 {
+ base.Errorf("invalid %s index %v (index must be non-negative)", why, n.Index)
+ } else if t.IsArray() && constant.Compare(x, token.GEQ, constant.MakeInt64(t.NumElem())) {
+ base.Errorf("invalid array index %v (out of bounds for %d-element array)", n.Index, t.NumElem())
+ } else if ir.IsConst(n.X, constant.String) && constant.Compare(x, token.GEQ, constant.MakeInt64(int64(len(ir.StringVal(n.X))))) {
+ base.Errorf("invalid string index %v (out of bounds for %d-byte string)", n.Index, len(ir.StringVal(n.X)))
+ } else if ir.ConstOverflow(x, types.Types[types.TINT]) {
+ base.Errorf("invalid %s index %v (index too large)", why, n.Index)
+ }
+ }
+
+ case types.TMAP:
+ n.Index = AssignConv(n.Index, t.Key(), "map index")
+ n.SetType(t.Elem())
+ n.SetOp(ir.OINDEXMAP)
+ n.Assigned = false
+ }
+ return n
+}
+
+// tcLenCap typechecks an OLEN or OCAP node.
+func tcLenCap(n *ir.UnaryExpr) ir.Node {
+ n.X = Expr(n.X)
+ n.X = DefaultLit(n.X, nil)
+ n.X = implicitstar(n.X)
+ l := n.X
+ t := l.Type()
+ if t == nil {
+ n.SetType(nil)
+ return n
+ }
+
+ var ok bool
+ if n.Op() == ir.OLEN {
+ ok = okforlen[t.Kind()]
+ } else {
+ ok = okforcap[t.Kind()]
+ }
+ if !ok {
+ base.Errorf("invalid argument %L for %v", l, n.Op())
+ n.SetType(nil)
+ return n
+ }
+
+ n.SetType(types.Types[types.TINT])
+ return n
+}
+
+// tcRecv typechecks an ORECV node.
+func tcRecv(n *ir.UnaryExpr) ir.Node {
+ n.X = Expr(n.X)
+ n.X = DefaultLit(n.X, nil)
+ l := n.X
+ t := l.Type()
+ if t == nil {
+ n.SetType(nil)
+ return n
+ }
+ if !t.IsChan() {
+ base.Errorf("invalid operation: %v (receive from non-chan type %v)", n, t)
+ n.SetType(nil)
+ return n
+ }
+
+ if !t.ChanDir().CanRecv() {
+ base.Errorf("invalid operation: %v (receive from send-only type %v)", n, t)
+ n.SetType(nil)
+ return n
+ }
+
+ n.SetType(t.Elem())
+ return n
+}
+
+// tcSPtr typechecks an OSPTR node.
+func tcSPtr(n *ir.UnaryExpr) ir.Node {
+ n.X = Expr(n.X)
+ t := n.X.Type()
+ if t == nil {
+ n.SetType(nil)
+ return n
+ }
+ if !t.IsSlice() && !t.IsString() {
+ base.Fatalf("OSPTR of %v", t)
+ }
+ if t.IsString() {
+ n.SetType(types.NewPtr(types.Types[types.TUINT8]))
+ } else {
+ n.SetType(types.NewPtr(t.Elem()))
+ }
+ return n
+}
+
+// tcSlice typechecks an OSLICE or OSLICE3 node.
+func tcSlice(n *ir.SliceExpr) ir.Node {
+ n.X = DefaultLit(Expr(n.X), nil)
+ n.Low = indexlit(Expr(n.Low))
+ n.High = indexlit(Expr(n.High))
+ n.Max = indexlit(Expr(n.Max))
+ hasmax := n.Op().IsSlice3()
+ l := n.X
+ if l.Type() == nil {
+ n.SetType(nil)
+ return n
+ }
+ if l.Type().IsArray() {
+ if !ir.IsAddressable(n.X) {
+ base.Errorf("invalid operation %v (slice of unaddressable value)", n)
+ n.SetType(nil)
+ return n
+ }
+
+ addr := NodAddr(n.X)
+ addr.SetImplicit(true)
+ n.X = Expr(addr)
+ l = n.X
+ }
+ t := l.Type()
+ var tp *types.Type
+ if t.IsString() {
+ if hasmax {
+ base.Errorf("invalid operation %v (3-index slice of string)", n)
+ n.SetType(nil)
+ return n
+ }
+ n.SetType(t)
+ n.SetOp(ir.OSLICESTR)
+ } else if t.IsPtr() && t.Elem().IsArray() {
+ tp = t.Elem()
+ n.SetType(types.NewSlice(tp.Elem()))
+ types.CalcSize(n.Type())
+ if hasmax {
+ n.SetOp(ir.OSLICE3ARR)
+ } else {
+ n.SetOp(ir.OSLICEARR)
+ }
+ } else if t.IsSlice() {
+ n.SetType(t)
+ } else {
+ base.Errorf("cannot slice %v (type %v)", l, t)
+ n.SetType(nil)
+ return n
+ }
+
+ if n.Low != nil && !checksliceindex(l, n.Low, tp) {
+ n.SetType(nil)
+ return n
+ }
+ if n.High != nil && !checksliceindex(l, n.High, tp) {
+ n.SetType(nil)
+ return n
+ }
+ if n.Max != nil && !checksliceindex(l, n.Max, tp) {
+ n.SetType(nil)
+ return n
+ }
+ if !checksliceconst(n.Low, n.High) || !checksliceconst(n.Low, n.Max) || !checksliceconst(n.High, n.Max) {
+ n.SetType(nil)
+ return n
+ }
+ return n
+}
+
+// tcSliceHeader typechecks an OSLICEHEADER node.
+func tcSliceHeader(n *ir.SliceHeaderExpr) ir.Node {
+ // Errors here are Fatalf instead of Errorf because only the compiler
+ // can construct an OSLICEHEADER node.
+ // Components used in OSLICEHEADER that are supplied by parsed source code
+ // have already been typechecked in e.g. OMAKESLICE earlier.
+ t := n.Type()
+ if t == nil {
+ base.Fatalf("no type specified for OSLICEHEADER")
+ }
+
+ if !t.IsSlice() {
+ base.Fatalf("invalid type %v for OSLICEHEADER", n.Type())
+ }
+
+ if n.Ptr == nil || n.Ptr.Type() == nil || !n.Ptr.Type().IsUnsafePtr() {
+ base.Fatalf("need unsafe.Pointer for OSLICEHEADER")
+ }
+
+ n.Ptr = Expr(n.Ptr)
+ n.Len = DefaultLit(Expr(n.Len), types.Types[types.TINT])
+ n.Cap = DefaultLit(Expr(n.Cap), types.Types[types.TINT])
+
+ if ir.IsConst(n.Len, constant.Int) && ir.Int64Val(n.Len) < 0 {
+ base.Fatalf("len for OSLICEHEADER must be non-negative")
+ }
+
+ if ir.IsConst(n.Cap, constant.Int) && ir.Int64Val(n.Cap) < 0 {
+ base.Fatalf("cap for OSLICEHEADER must be non-negative")
+ }
+
+ if ir.IsConst(n.Len, constant.Int) && ir.IsConst(n.Cap, constant.Int) && constant.Compare(n.Len.Val(), token.GTR, n.Cap.Val()) {
+ base.Fatalf("len larger than cap for OSLICEHEADER")
+ }
+
+ return n
+}
+
+// tcStar typechecks an ODEREF node, which may be an expression or a type.
+func tcStar(n *ir.StarExpr, top int) ir.Node {
+ n.X = typecheck(n.X, ctxExpr|ctxType)
+ l := n.X
+ t := l.Type()
+ if t == nil {
+ n.SetType(nil)
+ return n
+ }
+ if l.Op() == ir.OTYPE {
+ n.SetOTYPE(types.NewPtr(l.Type()))
+ // Ensure l.Type gets CalcSize'd for the backend. Issue 20174.
+ types.CheckSize(l.Type())
+ return n
+ }
+
+ if !t.IsPtr() {
+ if top&(ctxExpr|ctxStmt) != 0 {
+ base.Errorf("invalid indirect of %L", n.X)
+ n.SetType(nil)
+ return n
+ }
+ base.Errorf("%v is not a type", l)
+ return n
+ }
+
+ n.SetType(t.Elem())
+ return n
+}
+
+// tcUnaryArith typechecks a unary arithmetic expression.
+func tcUnaryArith(n *ir.UnaryExpr) ir.Node {
+ n.X = Expr(n.X)
+ l := n.X
+ t := l.Type()
+ if t == nil {
+ n.SetType(nil)
+ return n
+ }
+ if !okfor[n.Op()][defaultType(t).Kind()] {
+ base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, n.Op(), typekind(t))
+ n.SetType(nil)
+ return n
+ }
+
+ n.SetType(t)
+ return n
+}
diff --git a/src/cmd/compile/internal/typecheck/func.go b/src/cmd/compile/internal/typecheck/func.go
new file mode 100644
index 0000000..57b15b7
--- /dev/null
+++ b/src/cmd/compile/internal/typecheck/func.go
@@ -0,0 +1,977 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typecheck
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+
+ "fmt"
+ "go/constant"
+ "go/token"
+)
+
+// MakeDotArgs package all the arguments that match a ... T parameter into a []T.
+func MakeDotArgs(pos src.XPos, typ *types.Type, args []ir.Node) ir.Node {
+ var n ir.Node
+ if len(args) == 0 {
+ n = ir.NewNilExpr(pos)
+ n.SetType(typ)
+ } else {
+ args = append([]ir.Node(nil), args...)
+ lit := ir.NewCompLitExpr(pos, ir.OCOMPLIT, ir.TypeNode(typ), args)
+ lit.SetImplicit(true)
+ n = lit
+ }
+
+ n = Expr(n)
+ if n.Type() == nil {
+ base.FatalfAt(pos, "mkdotargslice: typecheck failed")
+ }
+ return n
+}
+
+// FixVariadicCall rewrites calls to variadic functions to use an
+// explicit ... argument if one is not already present.
+func FixVariadicCall(call *ir.CallExpr) {
+ fntype := call.X.Type()
+ if !fntype.IsVariadic() || call.IsDDD {
+ return
+ }
+
+ vi := fntype.NumParams() - 1
+ vt := fntype.Params().Field(vi).Type
+
+ args := call.Args
+ extra := args[vi:]
+ slice := MakeDotArgs(call.Pos(), vt, extra)
+ for i := range extra {
+ extra[i] = nil // allow GC
+ }
+
+ call.Args = append(args[:vi], slice)
+ call.IsDDD = true
+}
+
+// FixMethodCall rewrites a method call t.M(...) into a function call T.M(t, ...).
+func FixMethodCall(call *ir.CallExpr) {
+ if call.X.Op() != ir.ODOTMETH {
+ return
+ }
+
+ dot := call.X.(*ir.SelectorExpr)
+
+ fn := Expr(ir.NewSelectorExpr(dot.Pos(), ir.OXDOT, ir.TypeNode(dot.X.Type()), dot.Selection.Sym))
+
+ args := make([]ir.Node, 1+len(call.Args))
+ args[0] = dot.X
+ copy(args[1:], call.Args)
+
+ call.SetOp(ir.OCALLFUNC)
+ call.X = fn
+ call.Args = args
+}
+
+// ClosureType returns the struct type used to hold all the information
+// needed in the closure for clo (clo must be a OCLOSURE node).
+// The address of a variable of the returned type can be cast to a func.
+func ClosureType(clo *ir.ClosureExpr) *types.Type {
+ // Create closure in the form of a composite literal.
+ // supposing the closure captures an int i and a string s
+ // and has one float64 argument and no results,
+ // the generated code looks like:
+ //
+ // clos = &struct{.F uintptr; i *int; s *string}{func.1, &i, &s}
+ //
+ // The use of the struct provides type information to the garbage
+ // collector so that it can walk the closure. We could use (in this case)
+ // [3]unsafe.Pointer instead, but that would leave the gc in the dark.
+ // The information appears in the binary in the form of type descriptors;
+ // the struct is unnamed so that closures in multiple packages with the
+ // same struct type can share the descriptor.
+
+ // Make sure the .F field is in the same package as the rest of the
+ // fields. This deals with closures in instantiated functions, which are
+ // compiled as if from the source package of the generic function.
+ var pkg *types.Pkg
+ if len(clo.Func.ClosureVars) == 0 {
+ pkg = types.LocalPkg
+ } else {
+ for _, v := range clo.Func.ClosureVars {
+ if pkg == nil {
+ pkg = v.Sym().Pkg
+ } else if pkg != v.Sym().Pkg {
+ base.Fatalf("Closure variables from multiple packages")
+ }
+ }
+ }
+
+ fields := []*types.Field{
+ types.NewField(base.Pos, pkg.Lookup(".F"), types.Types[types.TUINTPTR]),
+ }
+ for _, v := range clo.Func.ClosureVars {
+ typ := v.Type()
+ if !v.Byval() {
+ typ = types.NewPtr(typ)
+ }
+ fields = append(fields, types.NewField(base.Pos, v.Sym(), typ))
+ }
+ typ := types.NewStruct(types.NoPkg, fields)
+ typ.SetNoalg(true)
+ return typ
+}
+
+// MethodValueType returns the struct type used to hold all the information
+// needed in the closure for a OMETHVALUE node. The address of a variable of
+// the returned type can be cast to a func.
+func MethodValueType(n *ir.SelectorExpr) *types.Type {
+ t := types.NewStruct(types.NoPkg, []*types.Field{
+ types.NewField(base.Pos, Lookup("F"), types.Types[types.TUINTPTR]),
+ types.NewField(base.Pos, Lookup("R"), n.X.Type()),
+ })
+ t.SetNoalg(true)
+ return t
+}
+
+// True if we are typechecking an inline body in ImportedBody below. We use this
+// flag to not create a new closure function in tcClosure when we are just
+// typechecking an inline body, as opposed to the body of a real function.
+var inTypeCheckInl bool
+
+// ImportedBody returns immediately if the inlining information for fn is
+// populated. Otherwise, fn must be an imported function. If so, ImportedBody
+// loads in the dcls and body for fn, and typechecks as needed.
+func ImportedBody(fn *ir.Func) {
+ if fn.Inl.Body != nil {
+ return
+ }
+ lno := ir.SetPos(fn.Nname)
+
+ // When we load an inlined body, we need to allow OADDR
+ // operations on untyped expressions. We will fix the
+ // addrtaken flags on all the arguments of the OADDR with the
+ // computeAddrtaken call below (after we typecheck the body).
+ // TODO: export/import types and addrtaken marks along with inlined bodies,
+ // so this will be unnecessary.
+ IncrementalAddrtaken = false
+ defer func() {
+ if DirtyAddrtaken {
+ // We do ComputeAddrTaken on function instantiations, but not
+ // generic functions (since we may not yet know if x in &x[i]
+ // is an array or a slice).
+ if !fn.Type().HasTParam() {
+ ComputeAddrtaken(fn.Inl.Body) // compute addrtaken marks once types are available
+ }
+ DirtyAddrtaken = false
+ }
+ IncrementalAddrtaken = true
+ }()
+
+ ImportBody(fn)
+
+ // Stmts(fn.Inl.Body) below is only for imported functions;
+ // their bodies may refer to unsafe as long as the package
+ // was marked safe during import (which was checked then).
+ // the ->inl of a local function has been typechecked before CanInline copied it.
+ pkg := fnpkg(fn.Nname)
+
+ if pkg == types.LocalPkg || pkg == nil {
+ return // ImportedBody on local function
+ }
+
+ if base.Flag.LowerM > 2 || base.Debug.Export != 0 {
+ fmt.Printf("typecheck import [%v] %L { %v }\n", fn.Sym(), fn, ir.Nodes(fn.Inl.Body))
+ }
+
+ if !go117ExportTypes {
+ // If we didn't export & import types, typecheck the code here.
+ savefn := ir.CurFunc
+ ir.CurFunc = fn
+ if inTypeCheckInl {
+ base.Fatalf("inTypeCheckInl should not be set recursively")
+ }
+ inTypeCheckInl = true
+ Stmts(fn.Inl.Body)
+ inTypeCheckInl = false
+ ir.CurFunc = savefn
+ }
+
+ base.Pos = lno
+}
+
+// Get the function's package. For ordinary functions it's on the ->sym, but for imported methods
+// the ->sym can be re-used in the local package, so peel it off the receiver's type.
+func fnpkg(fn *ir.Name) *types.Pkg {
+ if ir.IsMethod(fn) {
+ // method
+ rcvr := fn.Type().Recv().Type
+
+ if rcvr.IsPtr() {
+ rcvr = rcvr.Elem()
+ }
+ if rcvr.Sym() == nil {
+ base.Fatalf("receiver with no sym: [%v] %L (%v)", fn.Sym(), fn, rcvr)
+ }
+ return rcvr.Sym().Pkg
+ }
+
+ // non-method
+ return fn.Sym().Pkg
+}
+
+// tcClosure typechecks an OCLOSURE node. It also creates the named
+// function associated with the closure.
+// TODO: This creation of the named function should probably really be done in a
+// separate pass from type-checking.
+func tcClosure(clo *ir.ClosureExpr, top int) ir.Node {
+ fn := clo.Func
+
+ // We used to allow IR builders to typecheck the underlying Func
+ // themselves, but that led to too much variety and inconsistency
+ // around who's responsible for naming the function, typechecking
+ // it, or adding it to Target.Decls.
+ //
+ // It's now all or nothing. Callers are still allowed to do these
+ // themselves, but then they assume responsibility for all of them.
+ if fn.Typecheck() == 1 {
+ base.FatalfAt(fn.Pos(), "underlying closure func already typechecked: %v", fn)
+ }
+
+ // Set current associated iota value, so iota can be used inside
+ // function in ConstSpec, see issue #22344
+ if x := getIotaValue(); x >= 0 {
+ fn.Iota = x
+ }
+
+ ir.NameClosure(clo, ir.CurFunc)
+ Func(fn)
+
+ // Type check the body now, but only if we're inside a function.
+ // At top level (in a variable initialization: curfn==nil) we're not
+ // ready to type check code yet; we'll check it later, because the
+ // underlying closure function we create is added to Target.Decls.
+ if ir.CurFunc != nil {
+ oldfn := ir.CurFunc
+ ir.CurFunc = fn
+ Stmts(fn.Body)
+ ir.CurFunc = oldfn
+ }
+
+ out := 0
+ for _, v := range fn.ClosureVars {
+ if v.Type() == nil {
+ // If v.Type is nil, it means v looked like it was going to be
+ // used in the closure, but isn't. This happens in struct
+ // literals like s{f: x} where we can't distinguish whether f is
+ // a field identifier or expression until resolving s.
+ continue
+ }
+
+ // type check closed variables outside the closure, so that the
+ // outer frame also captures them.
+ Expr(v.Outer)
+
+ fn.ClosureVars[out] = v
+ out++
+ }
+ fn.ClosureVars = fn.ClosureVars[:out]
+
+ clo.SetType(fn.Type())
+
+ target := Target
+ if inTypeCheckInl {
+ // We're typechecking an imported function, so it's not actually
+ // part of Target. Skip adding it to Target.Decls so we don't
+ // compile it again.
+ target = nil
+ }
+
+ return ir.UseClosure(clo, target)
+}
+
+// type check function definition
+// To be called by typecheck, not directly.
+// (Call typecheck.Func instead.)
+func tcFunc(n *ir.Func) {
+ if base.EnableTrace && base.Flag.LowerT {
+ defer tracePrint("tcFunc", n)(nil)
+ }
+
+ n.Nname = AssignExpr(n.Nname).(*ir.Name)
+ t := n.Nname.Type()
+ if t == nil {
+ return
+ }
+ rcvr := t.Recv()
+ if rcvr != nil && n.Shortname != nil {
+ m := addmethod(n, n.Shortname, t, true, n.Pragma&ir.Nointerface != 0)
+ if m == nil {
+ return
+ }
+
+ n.Nname.SetSym(ir.MethodSym(rcvr.Type, n.Shortname))
+ Declare(n.Nname, ir.PFUNC)
+ }
+}
+
+// tcCall typechecks an OCALL node.
+func tcCall(n *ir.CallExpr, top int) ir.Node {
+ Stmts(n.Init()) // imported rewritten f(g()) calls (#30907)
+ n.X = typecheck(n.X, ctxExpr|ctxType|ctxCallee)
+ if n.X.Diag() {
+ n.SetDiag(true)
+ }
+
+ l := n.X
+
+ if l.Op() == ir.ONAME && l.(*ir.Name).BuiltinOp != 0 {
+ l := l.(*ir.Name)
+ if n.IsDDD && l.BuiltinOp != ir.OAPPEND {
+ base.Errorf("invalid use of ... with builtin %v", l)
+ }
+
+ // builtin: OLEN, OCAP, etc.
+ switch l.BuiltinOp {
+ default:
+ base.Fatalf("unknown builtin %v", l)
+
+ case ir.OAPPEND, ir.ODELETE, ir.OMAKE, ir.OPRINT, ir.OPRINTN, ir.ORECOVER:
+ n.SetOp(l.BuiltinOp)
+ n.X = nil
+ n.SetTypecheck(0) // re-typechecking new op is OK, not a loop
+ return typecheck(n, top)
+
+ case ir.OCAP, ir.OCLOSE, ir.OIMAG, ir.OLEN, ir.OPANIC, ir.OREAL:
+ typecheckargs(n)
+ fallthrough
+ case ir.ONEW, ir.OALIGNOF, ir.OOFFSETOF, ir.OSIZEOF:
+ arg, ok := needOneArg(n, "%v", n.Op())
+ if !ok {
+ n.SetType(nil)
+ return n
+ }
+ u := ir.NewUnaryExpr(n.Pos(), l.BuiltinOp, arg)
+ return typecheck(ir.InitExpr(n.Init(), u), top) // typecheckargs can add to old.Init
+
+ case ir.OCOMPLEX, ir.OCOPY, ir.OUNSAFEADD, ir.OUNSAFESLICE:
+ typecheckargs(n)
+ arg1, arg2, ok := needTwoArgs(n)
+ if !ok {
+ n.SetType(nil)
+ return n
+ }
+ b := ir.NewBinaryExpr(n.Pos(), l.BuiltinOp, arg1, arg2)
+ return typecheck(ir.InitExpr(n.Init(), b), top) // typecheckargs can add to old.Init
+ }
+ panic("unreachable")
+ }
+
+ n.X = DefaultLit(n.X, nil)
+ l = n.X
+ if l.Op() == ir.OTYPE {
+ if n.IsDDD {
+ if !l.Type().Broke() {
+ base.Errorf("invalid use of ... in type conversion to %v", l.Type())
+ }
+ n.SetDiag(true)
+ }
+
+ // pick off before type-checking arguments
+ arg, ok := needOneArg(n, "conversion to %v", l.Type())
+ if !ok {
+ n.SetType(nil)
+ return n
+ }
+
+ n := ir.NewConvExpr(n.Pos(), ir.OCONV, nil, arg)
+ n.SetType(l.Type())
+ return tcConv(n)
+ }
+
+ typecheckargs(n)
+ t := l.Type()
+ if t == nil {
+ n.SetType(nil)
+ return n
+ }
+ types.CheckSize(t)
+
+ switch l.Op() {
+ case ir.ODOTINTER:
+ n.SetOp(ir.OCALLINTER)
+
+ case ir.ODOTMETH:
+ l := l.(*ir.SelectorExpr)
+ n.SetOp(ir.OCALLMETH)
+
+ // typecheckaste was used here but there wasn't enough
+ // information further down the call chain to know if we
+ // were testing a method receiver for unexported fields.
+ // It isn't necessary, so just do a sanity check.
+ tp := t.Recv().Type
+
+ if l.X == nil || !types.Identical(l.X.Type(), tp) {
+ base.Fatalf("method receiver")
+ }
+
+ default:
+ n.SetOp(ir.OCALLFUNC)
+ if t.Kind() != types.TFUNC {
+ if o := ir.Orig(l); o.Name() != nil && types.BuiltinPkg.Lookup(o.Sym().Name).Def != nil {
+ // be more specific when the non-function
+ // name matches a predeclared function
+ base.Errorf("cannot call non-function %L, declared at %s",
+ l, base.FmtPos(o.Name().Pos()))
+ } else {
+ base.Errorf("cannot call non-function %L", l)
+ }
+ n.SetType(nil)
+ return n
+ }
+ }
+
+ typecheckaste(ir.OCALL, n.X, n.IsDDD, t.Params(), n.Args, func() string { return fmt.Sprintf("argument to %v", n.X) })
+ FixMethodCall(n)
+ if t.NumResults() == 0 {
+ return n
+ }
+ if t.NumResults() == 1 {
+ n.SetType(l.Type().Results().Field(0).Type)
+
+ if n.Op() == ir.OCALLFUNC && n.X.Op() == ir.ONAME {
+ if sym := n.X.(*ir.Name).Sym(); types.IsRuntimePkg(sym.Pkg) && sym.Name == "getg" {
+ // Emit code for runtime.getg() directly instead of calling function.
+ // Most such rewrites (for example the similar one for math.Sqrt) should be done in walk,
+ // so that the ordering pass can make sure to preserve the semantics of the original code
+ // (in particular, the exact time of the function call) by introducing temporaries.
+ // In this case, we know getg() always returns the same result within a given function
+ // and we want to avoid the temporaries, so we do the rewrite earlier than is typical.
+ n.SetOp(ir.OGETG)
+ }
+ }
+ return n
+ }
+
+ // multiple return
+ if top&(ctxMultiOK|ctxStmt) == 0 {
+ base.Errorf("multiple-value %v() in single-value context", l)
+ return n
+ }
+
+ n.SetType(l.Type().Results())
+ return n
+}
+
+// tcAppend typechecks an OAPPEND node.
+func tcAppend(n *ir.CallExpr) ir.Node {
+ typecheckargs(n)
+ args := n.Args
+ if len(args) == 0 {
+ base.Errorf("missing arguments to append")
+ n.SetType(nil)
+ return n
+ }
+
+ t := args[0].Type()
+ if t == nil {
+ n.SetType(nil)
+ return n
+ }
+
+ n.SetType(t)
+ if !t.IsSlice() {
+ if ir.IsNil(args[0]) {
+ base.Errorf("first argument to append must be typed slice; have untyped nil")
+ n.SetType(nil)
+ return n
+ }
+
+ base.Errorf("first argument to append must be slice; have %L", t)
+ n.SetType(nil)
+ return n
+ }
+
+ if n.IsDDD {
+ if len(args) == 1 {
+ base.Errorf("cannot use ... on first argument to append")
+ n.SetType(nil)
+ return n
+ }
+
+ if len(args) != 2 {
+ base.Errorf("too many arguments to append")
+ n.SetType(nil)
+ return n
+ }
+
+ if t.Elem().IsKind(types.TUINT8) && args[1].Type().IsString() {
+ args[1] = DefaultLit(args[1], types.Types[types.TSTRING])
+ return n
+ }
+
+ args[1] = AssignConv(args[1], t.Underlying(), "append")
+ return n
+ }
+
+ as := args[1:]
+ for i, n := range as {
+ if n.Type() == nil {
+ continue
+ }
+ as[i] = AssignConv(n, t.Elem(), "append")
+ types.CheckSize(as[i].Type()) // ensure width is calculated for backend
+ }
+ return n
+}
+
+// tcClose typechecks an OCLOSE node.
+func tcClose(n *ir.UnaryExpr) ir.Node {
+ n.X = Expr(n.X)
+ n.X = DefaultLit(n.X, nil)
+ l := n.X
+ t := l.Type()
+ if t == nil {
+ n.SetType(nil)
+ return n
+ }
+ if !t.IsChan() {
+ base.Errorf("invalid operation: %v (non-chan type %v)", n, t)
+ n.SetType(nil)
+ return n
+ }
+
+ if !t.ChanDir().CanSend() {
+ base.Errorf("invalid operation: %v (cannot close receive-only channel)", n)
+ n.SetType(nil)
+ return n
+ }
+ return n
+}
+
+// tcComplex typechecks an OCOMPLEX node.
+func tcComplex(n *ir.BinaryExpr) ir.Node {
+ l := Expr(n.X)
+ r := Expr(n.Y)
+ if l.Type() == nil || r.Type() == nil {
+ n.SetType(nil)
+ return n
+ }
+ l, r = defaultlit2(l, r, false)
+ if l.Type() == nil || r.Type() == nil {
+ n.SetType(nil)
+ return n
+ }
+ n.X = l
+ n.Y = r
+
+ if !types.Identical(l.Type(), r.Type()) {
+ base.Errorf("invalid operation: %v (mismatched types %v and %v)", n, l.Type(), r.Type())
+ n.SetType(nil)
+ return n
+ }
+
+ var t *types.Type
+ switch l.Type().Kind() {
+ default:
+ base.Errorf("invalid operation: %v (arguments have type %v, expected floating-point)", n, l.Type())
+ n.SetType(nil)
+ return n
+
+ case types.TIDEAL:
+ t = types.UntypedComplex
+
+ case types.TFLOAT32:
+ t = types.Types[types.TCOMPLEX64]
+
+ case types.TFLOAT64:
+ t = types.Types[types.TCOMPLEX128]
+ }
+ n.SetType(t)
+ return n
+}
+
+// tcCopy typechecks an OCOPY node.
+func tcCopy(n *ir.BinaryExpr) ir.Node {
+ n.SetType(types.Types[types.TINT])
+ n.X = Expr(n.X)
+ n.X = DefaultLit(n.X, nil)
+ n.Y = Expr(n.Y)
+ n.Y = DefaultLit(n.Y, nil)
+ if n.X.Type() == nil || n.Y.Type() == nil {
+ n.SetType(nil)
+ return n
+ }
+
+ // copy([]byte, string)
+ if n.X.Type().IsSlice() && n.Y.Type().IsString() {
+ if types.Identical(n.X.Type().Elem(), types.ByteType) {
+ return n
+ }
+ base.Errorf("arguments to copy have different element types: %L and string", n.X.Type())
+ n.SetType(nil)
+ return n
+ }
+
+ if !n.X.Type().IsSlice() || !n.Y.Type().IsSlice() {
+ if !n.X.Type().IsSlice() && !n.Y.Type().IsSlice() {
+ base.Errorf("arguments to copy must be slices; have %L, %L", n.X.Type(), n.Y.Type())
+ } else if !n.X.Type().IsSlice() {
+ base.Errorf("first argument to copy should be slice; have %L", n.X.Type())
+ } else {
+ base.Errorf("second argument to copy should be slice or string; have %L", n.Y.Type())
+ }
+ n.SetType(nil)
+ return n
+ }
+
+ if !types.Identical(n.X.Type().Elem(), n.Y.Type().Elem()) {
+ base.Errorf("arguments to copy have different element types: %L and %L", n.X.Type(), n.Y.Type())
+ n.SetType(nil)
+ return n
+ }
+ return n
+}
+
+// tcDelete typechecks an ODELETE node.
+func tcDelete(n *ir.CallExpr) ir.Node {
+ typecheckargs(n)
+ args := n.Args
+ if len(args) == 0 {
+ base.Errorf("missing arguments to delete")
+ n.SetType(nil)
+ return n
+ }
+
+ if len(args) == 1 {
+ base.Errorf("missing second (key) argument to delete")
+ n.SetType(nil)
+ return n
+ }
+
+ if len(args) != 2 {
+ base.Errorf("too many arguments to delete")
+ n.SetType(nil)
+ return n
+ }
+
+ l := args[0]
+ r := args[1]
+ if l.Type() != nil && !l.Type().IsMap() {
+ base.Errorf("first argument to delete must be map; have %L", l.Type())
+ n.SetType(nil)
+ return n
+ }
+
+ args[1] = AssignConv(r, l.Type().Key(), "delete")
+ return n
+}
+
+// tcMake typechecks an OMAKE node.
+func tcMake(n *ir.CallExpr) ir.Node {
+ args := n.Args
+ if len(args) == 0 {
+ base.Errorf("missing argument to make")
+ n.SetType(nil)
+ return n
+ }
+
+ n.Args = nil
+ l := args[0]
+ l = typecheck(l, ctxType)
+ t := l.Type()
+ if t == nil {
+ n.SetType(nil)
+ return n
+ }
+
+ i := 1
+ var nn ir.Node
+ switch t.Kind() {
+ default:
+ base.Errorf("cannot make type %v", t)
+ n.SetType(nil)
+ return n
+
+ case types.TSLICE:
+ if i >= len(args) {
+ base.Errorf("missing len argument to make(%v)", t)
+ n.SetType(nil)
+ return n
+ }
+
+ l = args[i]
+ i++
+ l = Expr(l)
+ var r ir.Node
+ if i < len(args) {
+ r = args[i]
+ i++
+ r = Expr(r)
+ }
+
+ if l.Type() == nil || (r != nil && r.Type() == nil) {
+ n.SetType(nil)
+ return n
+ }
+ if !checkmake(t, "len", &l) || r != nil && !checkmake(t, "cap", &r) {
+ n.SetType(nil)
+ return n
+ }
+ if ir.IsConst(l, constant.Int) && r != nil && ir.IsConst(r, constant.Int) && constant.Compare(l.Val(), token.GTR, r.Val()) {
+ base.Errorf("len larger than cap in make(%v)", t)
+ n.SetType(nil)
+ return n
+ }
+ nn = ir.NewMakeExpr(n.Pos(), ir.OMAKESLICE, l, r)
+
+ case types.TMAP:
+ if i < len(args) {
+ l = args[i]
+ i++
+ l = Expr(l)
+ l = DefaultLit(l, types.Types[types.TINT])
+ if l.Type() == nil {
+ n.SetType(nil)
+ return n
+ }
+ if !checkmake(t, "size", &l) {
+ n.SetType(nil)
+ return n
+ }
+ } else {
+ l = ir.NewInt(0)
+ }
+ nn = ir.NewMakeExpr(n.Pos(), ir.OMAKEMAP, l, nil)
+ nn.SetEsc(n.Esc())
+
+ case types.TCHAN:
+ l = nil
+ if i < len(args) {
+ l = args[i]
+ i++
+ l = Expr(l)
+ l = DefaultLit(l, types.Types[types.TINT])
+ if l.Type() == nil {
+ n.SetType(nil)
+ return n
+ }
+ if !checkmake(t, "buffer", &l) {
+ n.SetType(nil)
+ return n
+ }
+ } else {
+ l = ir.NewInt(0)
+ }
+ nn = ir.NewMakeExpr(n.Pos(), ir.OMAKECHAN, l, nil)
+ }
+
+ if i < len(args) {
+ base.Errorf("too many arguments to make(%v)", t)
+ n.SetType(nil)
+ return n
+ }
+
+ nn.SetType(t)
+ return nn
+}
+
+// tcMakeSliceCopy typechecks an OMAKESLICECOPY node.
+func tcMakeSliceCopy(n *ir.MakeExpr) ir.Node {
+ // Errors here are Fatalf instead of Errorf because only the compiler
+ // can construct an OMAKESLICECOPY node.
+ // Components used in OMAKESCLICECOPY that are supplied by parsed source code
+ // have already been typechecked in OMAKE and OCOPY earlier.
+ t := n.Type()
+
+ if t == nil {
+ base.Fatalf("no type specified for OMAKESLICECOPY")
+ }
+
+ if !t.IsSlice() {
+ base.Fatalf("invalid type %v for OMAKESLICECOPY", n.Type())
+ }
+
+ if n.Len == nil {
+ base.Fatalf("missing len argument for OMAKESLICECOPY")
+ }
+
+ if n.Cap == nil {
+ base.Fatalf("missing slice argument to copy for OMAKESLICECOPY")
+ }
+
+ n.Len = Expr(n.Len)
+ n.Cap = Expr(n.Cap)
+
+ n.Len = DefaultLit(n.Len, types.Types[types.TINT])
+
+ if !n.Len.Type().IsInteger() && n.Type().Kind() != types.TIDEAL {
+ base.Errorf("non-integer len argument in OMAKESLICECOPY")
+ }
+
+ if ir.IsConst(n.Len, constant.Int) {
+ if ir.ConstOverflow(n.Len.Val(), types.Types[types.TINT]) {
+ base.Fatalf("len for OMAKESLICECOPY too large")
+ }
+ if constant.Sign(n.Len.Val()) < 0 {
+ base.Fatalf("len for OMAKESLICECOPY must be non-negative")
+ }
+ }
+ return n
+}
+
+// tcNew typechecks an ONEW node.
+func tcNew(n *ir.UnaryExpr) ir.Node {
+ if n.X == nil {
+ // Fatalf because the OCALL above checked for us,
+ // so this must be an internally-generated mistake.
+ base.Fatalf("missing argument to new")
+ }
+ l := n.X
+ l = typecheck(l, ctxType)
+ t := l.Type()
+ if t == nil {
+ n.SetType(nil)
+ return n
+ }
+ n.X = l
+ n.SetType(types.NewPtr(t))
+ return n
+}
+
+// tcPanic typechecks an OPANIC node.
+func tcPanic(n *ir.UnaryExpr) ir.Node {
+ n.X = Expr(n.X)
+ n.X = AssignConv(n.X, types.Types[types.TINTER], "argument to panic")
+ if n.X.Type() == nil {
+ n.SetType(nil)
+ return n
+ }
+ return n
+}
+
+// tcPrint typechecks an OPRINT or OPRINTN node.
+func tcPrint(n *ir.CallExpr) ir.Node {
+ typecheckargs(n)
+ ls := n.Args
+ for i1, n1 := range ls {
+ // Special case for print: int constant is int64, not int.
+ if ir.IsConst(n1, constant.Int) {
+ ls[i1] = DefaultLit(ls[i1], types.Types[types.TINT64])
+ } else {
+ ls[i1] = DefaultLit(ls[i1], nil)
+ }
+ }
+ return n
+}
+
+// tcRealImag typechecks an OREAL or OIMAG node.
+func tcRealImag(n *ir.UnaryExpr) ir.Node {
+ n.X = Expr(n.X)
+ l := n.X
+ t := l.Type()
+ if t == nil {
+ n.SetType(nil)
+ return n
+ }
+
+ // Determine result type.
+ switch t.Kind() {
+ case types.TIDEAL:
+ n.SetType(types.UntypedFloat)
+ case types.TCOMPLEX64:
+ n.SetType(types.Types[types.TFLOAT32])
+ case types.TCOMPLEX128:
+ n.SetType(types.Types[types.TFLOAT64])
+ default:
+ base.Errorf("invalid argument %L for %v", l, n.Op())
+ n.SetType(nil)
+ return n
+ }
+ return n
+}
+
+// tcRecover typechecks an ORECOVER node.
+func tcRecover(n *ir.CallExpr) ir.Node {
+ if len(n.Args) != 0 {
+ base.Errorf("too many arguments to recover")
+ n.SetType(nil)
+ return n
+ }
+
+ n.SetType(types.Types[types.TINTER])
+ return n
+}
+
+// tcRecoverFP typechecks an ORECOVERFP node.
+func tcRecoverFP(n *ir.CallExpr) ir.Node {
+ if len(n.Args) != 1 {
+ base.FatalfAt(n.Pos(), "wrong number of arguments: %v", n)
+ }
+
+ n.Args[0] = Expr(n.Args[0])
+ if !n.Args[0].Type().IsPtrShaped() {
+ base.FatalfAt(n.Pos(), "%L is not pointer shaped", n.Args[0])
+ }
+
+ n.SetType(types.Types[types.TINTER])
+ return n
+}
+
+// tcUnsafeAdd typechecks an OUNSAFEADD node.
+func tcUnsafeAdd(n *ir.BinaryExpr) *ir.BinaryExpr {
+ if !types.AllowsGoVersion(curpkg(), 1, 17) {
+ base.ErrorfVers("go1.17", "unsafe.Add")
+ n.SetType(nil)
+ return n
+ }
+
+ n.X = AssignConv(Expr(n.X), types.Types[types.TUNSAFEPTR], "argument to unsafe.Add")
+ n.Y = DefaultLit(Expr(n.Y), types.Types[types.TINT])
+ if n.X.Type() == nil || n.Y.Type() == nil {
+ n.SetType(nil)
+ return n
+ }
+ if !n.Y.Type().IsInteger() {
+ n.SetType(nil)
+ return n
+ }
+ n.SetType(n.X.Type())
+ return n
+}
+
+// tcUnsafeSlice typechecks an OUNSAFESLICE node.
+func tcUnsafeSlice(n *ir.BinaryExpr) *ir.BinaryExpr {
+ if !types.AllowsGoVersion(curpkg(), 1, 17) {
+ base.ErrorfVers("go1.17", "unsafe.Slice")
+ n.SetType(nil)
+ return n
+ }
+
+ n.X = Expr(n.X)
+ n.Y = Expr(n.Y)
+ if n.X.Type() == nil || n.Y.Type() == nil {
+ n.SetType(nil)
+ return n
+ }
+ t := n.X.Type()
+ if !t.IsPtr() {
+ base.Errorf("first argument to unsafe.Slice must be pointer; have %L", t)
+ } else if t.Elem().NotInHeap() {
+ // TODO(mdempsky): This can be relaxed, but should only affect the
+ // Go runtime itself. End users should only see //go:notinheap
+ // types due to incomplete C structs in cgo, and those types don't
+ // have a meaningful size anyway.
+ base.Errorf("unsafe.Slice of incomplete (or unallocatable) type not allowed")
+ }
+
+ if !checkunsafeslice(&n.Y) {
+ n.SetType(nil)
+ return n
+ }
+ n.SetType(types.NewSlice(t.Elem()))
+ return n
+}
diff --git a/src/cmd/compile/internal/typecheck/iexport.go b/src/cmd/compile/internal/typecheck/iexport.go
new file mode 100644
index 0000000..e811368
--- /dev/null
+++ b/src/cmd/compile/internal/typecheck/iexport.go
@@ -0,0 +1,2325 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Indexed package export.
+//
+// The indexed export data format is an evolution of the previous
+// binary export data format. Its chief contribution is introducing an
+// index table, which allows efficient random access of individual
+// declarations and inline function bodies. In turn, this allows
+// avoiding unnecessary work for compilation units that import large
+// packages.
+//
+//
+// The top-level data format is structured as:
+//
+// Header struct {
+// Tag byte // 'i'
+// Version uvarint
+// StringSize uvarint
+// DataSize uvarint
+// }
+//
+// Strings [StringSize]byte
+// Data [DataSize]byte
+//
+// MainIndex []struct{
+// PkgPath stringOff
+// PkgName stringOff
+// PkgHeight uvarint
+//
+// Decls []struct{
+// Name stringOff
+// Offset declOff
+// }
+// }
+//
+// Fingerprint [8]byte
+//
+// uvarint means a uint64 written out using uvarint encoding.
+//
+// []T means a uvarint followed by that many T objects. In other
+// words:
+//
+// Len uvarint
+// Elems [Len]T
+//
+// stringOff means a uvarint that indicates an offset within the
+// Strings section. At that offset is another uvarint, followed by
+// that many bytes, which form the string value.
+//
+// declOff means a uvarint that indicates an offset within the Data
+// section where the associated declaration can be found.
+//
+//
+// There are five kinds of declarations, distinguished by their first
+// byte:
+//
+// type Var struct {
+// Tag byte // 'V'
+// Pos Pos
+// Type typeOff
+// }
+//
+// type Func struct {
+// Tag byte // 'F' or 'G'
+// Pos Pos
+// TypeParams []typeOff // only present if Tag == 'G'
+// Signature Signature
+// }
+//
+// type Const struct {
+// Tag byte // 'C'
+// Pos Pos
+// Value Value
+// }
+//
+// type Type struct {
+// Tag byte // 'T' or 'U'
+// Pos Pos
+// TypeParams []typeOff // only present if Tag == 'U'
+// Underlying typeOff
+//
+// Methods []struct{ // omitted if Underlying is an interface type
+// Pos Pos
+// Name stringOff
+// Recv Param
+// Signature Signature
+// }
+// }
+//
+// type Alias struct {
+// Tag byte // 'A'
+// Pos Pos
+// Type typeOff
+// }
+//
+// // "Automatic" declaration of each typeparam
+// type TypeParam struct {
+// Tag byte // 'P'
+// Pos Pos
+// Implicit bool
+// Constraint typeOff
+// }
+//
+// typeOff means a uvarint that either indicates a predeclared type,
+// or an offset into the Data section. If the uvarint is less than
+// predeclReserved, then it indicates the index into the predeclared
+// types list (see predeclared in bexport.go for order). Otherwise,
+// subtracting predeclReserved yields the offset of a type descriptor.
+//
+// Value means a type, kind, and type-specific value. See
+// (*exportWriter).value for details.
+//
+//
+// There are twelve kinds of type descriptors, distinguished by an itag:
+//
+// type DefinedType struct {
+// Tag itag // definedType
+// Name stringOff
+// PkgPath stringOff
+// }
+//
+// type PointerType struct {
+// Tag itag // pointerType
+// Elem typeOff
+// }
+//
+// type SliceType struct {
+// Tag itag // sliceType
+// Elem typeOff
+// }
+//
+// type ArrayType struct {
+// Tag itag // arrayType
+// Len uint64
+// Elem typeOff
+// }
+//
+// type ChanType struct {
+// Tag itag // chanType
+// Dir uint64 // 1 RecvOnly; 2 SendOnly; 3 SendRecv
+// Elem typeOff
+// }
+//
+// type MapType struct {
+// Tag itag // mapType
+// Key typeOff
+// Elem typeOff
+// }
+//
+// type FuncType struct {
+// Tag itag // signatureType
+// PkgPath stringOff
+// Signature Signature
+// }
+//
+// type StructType struct {
+// Tag itag // structType
+// PkgPath stringOff
+// Fields []struct {
+// Pos Pos
+// Name stringOff
+// Type typeOff
+// Embedded bool
+// Note stringOff
+// }
+// }
+//
+// type InterfaceType struct {
+// Tag itag // interfaceType
+// PkgPath stringOff
+// Embeddeds []struct {
+// Pos Pos
+// Type typeOff
+// }
+// Methods []struct {
+// Pos Pos
+// Name stringOff
+// Signature Signature
+// }
+// }
+//
+// // Reference to a type param declaration
+// type TypeParamType struct {
+// Tag itag // typeParamType
+// Name stringOff
+// PkgPath stringOff
+// }
+//
+// // Instantiation of a generic type (like List[T2] or List[int])
+// type InstanceType struct {
+// Tag itag // instanceType
+// Pos pos
+// TypeArgs []typeOff
+// BaseType typeOff
+// }
+//
+// type UnionType struct {
+// Tag itag // interfaceType
+// Terms []struct {
+// tilde bool
+// Type typeOff
+// }
+// }
+//
+//
+//
+// type Signature struct {
+// Params []Param
+// Results []Param
+// Variadic bool // omitted if Results is empty
+// }
+//
+// type Param struct {
+// Pos Pos
+// Name stringOff
+// Type typOff
+// }
+//
+//
+// Pos encodes a file:line:column triple, incorporating a simple delta
+// encoding scheme within a data object. See exportWriter.pos for
+// details.
+//
+//
+// Compiler-specific details.
+//
+// cmd/compile writes out a second index for inline bodies and also
+// appends additional compiler-specific details after declarations.
+// Third-party tools are not expected to depend on these details and
+// they're expected to change much more rapidly, so they're omitted
+// here. See exportWriter's varExt/funcExt/etc methods for details.
+
+package typecheck
+
+import (
+ "bytes"
+ "crypto/md5"
+ "encoding/binary"
+ "fmt"
+ "go/constant"
+ "io"
+ "math/big"
+ "sort"
+ "strconv"
+ "strings"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/types"
+ "cmd/internal/goobj"
+ "cmd/internal/src"
+)
+
+// Current indexed export format version. Increase with each format change.
+// 0: Go1.11 encoding
+// 1: added column details to Pos
+// 2: added information for generic function/types. The export of non-generic
+// functions/types remains largely backward-compatible. Breaking changes include:
+// - a 'kind' byte is added to constant values
+const (
+ iexportVersionGo1_11 = 0
+ iexportVersionPosCol = 1
+ iexportVersionGenerics = 2
+ iexportVersionGo1_18 = 2
+
+ iexportVersionCurrent = 2
+)
+
+// predeclReserved is the number of type offsets reserved for types
+// implicitly declared in the universe block.
+const predeclReserved = 32
+
+// An itag distinguishes the kind of type that was written into the
+// indexed export format.
+type itag uint64
+
+const (
+ // Types
+ definedType itag = iota
+ pointerType
+ sliceType
+ arrayType
+ chanType
+ mapType
+ signatureType
+ structType
+ interfaceType
+ typeParamType
+ instanceType // Instantiation of a generic type
+ unionType
+)
+
+const (
+ debug = false
+ magic = 0x6742937dc293105
+)
+
+// WriteExports writes the indexed export format to out. If extensions
+// is true, then the compiler-only extensions are included.
+func WriteExports(out io.Writer, extensions bool) {
+ if extensions {
+ // If we're exporting inline bodies, invoke the crawler to mark
+ // which bodies to include.
+ crawlExports(Target.Exports)
+ }
+
+ p := iexporter{
+ allPkgs: map[*types.Pkg]bool{},
+ stringIndex: map[string]uint64{},
+ declIndex: map[*types.Sym]uint64{},
+ inlineIndex: map[*types.Sym]uint64{},
+ typIndex: map[*types.Type]uint64{},
+ extensions: extensions,
+ }
+
+ for i, pt := range predeclared() {
+ p.typIndex[pt] = uint64(i)
+ }
+ if len(p.typIndex) > predeclReserved {
+ base.Fatalf("too many predeclared types: %d > %d", len(p.typIndex), predeclReserved)
+ }
+
+ // Initialize work queue with exported declarations.
+ for _, n := range Target.Exports {
+ p.pushDecl(n)
+ }
+
+ // Loop until no more work. We use a queue because while
+ // writing out inline bodies, we may discover additional
+ // declarations that are needed.
+ for !p.declTodo.Empty() {
+ p.doDecl(p.declTodo.PopLeft())
+ }
+
+ // Append indices to data0 section.
+ dataLen := uint64(p.data0.Len())
+ w := p.newWriter()
+ w.writeIndex(p.declIndex, true)
+ w.writeIndex(p.inlineIndex, false)
+ w.flush()
+
+ if *base.Flag.LowerV {
+ fmt.Printf("export: hdr strings %v, data %v, index %v\n", p.strings.Len(), dataLen, p.data0.Len())
+ }
+
+ // Assemble header.
+ var hdr intWriter
+ hdr.WriteByte('i')
+ hdr.uint64(iexportVersionCurrent)
+ hdr.uint64(uint64(p.strings.Len()))
+ hdr.uint64(dataLen)
+
+ // Flush output.
+ h := md5.New()
+ wr := io.MultiWriter(out, h)
+ io.Copy(wr, &hdr)
+ io.Copy(wr, &p.strings)
+ io.Copy(wr, &p.data0)
+
+ // Add fingerprint (used by linker object file).
+ // Attach this to the end, so tools (e.g. gcimporter) don't care.
+ copy(base.Ctxt.Fingerprint[:], h.Sum(nil)[:])
+ out.Write(base.Ctxt.Fingerprint[:])
+}
+
+// writeIndex writes out a symbol index. mainIndex indicates whether
+// we're writing out the main index, which is also read by
+// non-compiler tools and includes a complete package description
+// (i.e., name and height).
+func (w *exportWriter) writeIndex(index map[*types.Sym]uint64, mainIndex bool) {
+ // Build a map from packages to symbols from that package.
+ pkgSyms := map[*types.Pkg][]*types.Sym{}
+
+ // For the main index, make sure to include every package that
+ // we reference, even if we're not exporting (or reexporting)
+ // any symbols from it.
+ if mainIndex {
+ pkgSyms[types.LocalPkg] = nil
+ for pkg := range w.p.allPkgs {
+ pkgSyms[pkg] = nil
+ }
+ }
+
+ // Group symbols by package.
+ for sym := range index {
+ pkgSyms[sym.Pkg] = append(pkgSyms[sym.Pkg], sym)
+ }
+
+ // Sort packages by path.
+ var pkgs []*types.Pkg
+ for pkg := range pkgSyms {
+ pkgs = append(pkgs, pkg)
+ }
+ sort.Slice(pkgs, func(i, j int) bool {
+ return pkgs[i].Path < pkgs[j].Path
+ })
+
+ w.uint64(uint64(len(pkgs)))
+ for _, pkg := range pkgs {
+ w.string(pkg.Path)
+ if mainIndex {
+ w.string(pkg.Name)
+ w.uint64(uint64(pkg.Height))
+ }
+
+ // Sort symbols within a package by name.
+ syms := pkgSyms[pkg]
+ sort.Slice(syms, func(i, j int) bool {
+ return syms[i].Name < syms[j].Name
+ })
+
+ w.uint64(uint64(len(syms)))
+ for _, sym := range syms {
+ w.string(sym.Name)
+ w.uint64(index[sym])
+ }
+ }
+}
+
+type iexporter struct {
+ // allPkgs tracks all packages that have been referenced by
+ // the export data, so we can ensure to include them in the
+ // main index.
+ allPkgs map[*types.Pkg]bool
+
+ declTodo ir.NameQueue
+
+ strings intWriter
+ stringIndex map[string]uint64
+
+ data0 intWriter
+ declIndex map[*types.Sym]uint64
+ inlineIndex map[*types.Sym]uint64
+ typIndex map[*types.Type]uint64
+
+ extensions bool
+}
+
+// stringOff returns the offset of s within the string section.
+// If not already present, it's added to the end.
+func (p *iexporter) stringOff(s string) uint64 {
+ off, ok := p.stringIndex[s]
+ if !ok {
+ off = uint64(p.strings.Len())
+ p.stringIndex[s] = off
+
+ if *base.Flag.LowerV {
+ fmt.Printf("export: str %v %.40q\n", off, s)
+ }
+
+ p.strings.uint64(uint64(len(s)))
+ p.strings.WriteString(s)
+ }
+ return off
+}
+
+// pushDecl adds n to the declaration work queue, if not already present.
+func (p *iexporter) pushDecl(n *ir.Name) {
+ if n.Sym() == nil || n.Sym().Def != n && n.Op() != ir.OTYPE {
+ base.Fatalf("weird Sym: %v, %v", n, n.Sym())
+ }
+
+ // Don't export predeclared declarations.
+ if n.Sym().Pkg == types.BuiltinPkg || n.Sym().Pkg == types.UnsafePkg {
+ return
+ }
+
+ if _, ok := p.declIndex[n.Sym()]; ok {
+ return
+ }
+
+ p.declIndex[n.Sym()] = ^uint64(0) // mark n present in work queue
+ p.declTodo.PushRight(n)
+}
+
+// exportWriter handles writing out individual data section chunks.
+type exportWriter struct {
+ p *iexporter
+
+ data intWriter
+ currPkg *types.Pkg
+ prevFile string
+ prevLine int64
+ prevColumn int64
+
+ // dclIndex maps function-scoped declarations to an int used to refer to
+ // them later in the function. For local variables/params, the int is
+ // non-negative and in order of the appearance in the Func's Dcl list. For
+ // closure variables, the index is negative starting at -2.
+ dclIndex map[*ir.Name]int
+ maxDclIndex int
+ maxClosureVarIndex int
+}
+
+func (p *iexporter) doDecl(n *ir.Name) {
+ w := p.newWriter()
+ w.setPkg(n.Sym().Pkg, false)
+
+ switch n.Op() {
+ case ir.ONAME:
+ switch n.Class {
+ case ir.PEXTERN:
+ // Variable.
+ w.tag('V')
+ w.pos(n.Pos())
+ w.typ(n.Type())
+ if w.p.extensions {
+ w.varExt(n)
+ }
+
+ case ir.PFUNC:
+ if ir.IsMethod(n) {
+ base.Fatalf("unexpected method: %v", n)
+ }
+
+ // Function.
+ if n.Type().TParams().NumFields() == 0 {
+ w.tag('F')
+ } else {
+ w.tag('G')
+ }
+ w.pos(n.Pos())
+ // The tparam list of the function type is the
+ // declaration of the type params. So, write out the type
+ // params right now. Then those type params will be
+ // referenced via their type offset (via typOff) in all
+ // other places in the signature and function that they
+ // are used.
+ if n.Type().TParams().NumFields() > 0 {
+ w.tparamList(n.Type().TParams().FieldSlice())
+ }
+ w.signature(n.Type())
+ if w.p.extensions {
+ w.funcExt(n)
+ }
+
+ default:
+ base.Fatalf("unexpected class: %v, %v", n, n.Class)
+ }
+
+ case ir.OLITERAL:
+ // TODO(mdempsky): Extend check to all declarations.
+ if n.Typecheck() == 0 {
+ base.FatalfAt(n.Pos(), "missed typecheck: %v", n)
+ }
+
+ // Constant.
+ w.tag('C')
+ w.pos(n.Pos())
+ w.value(n.Type(), n.Val())
+ if w.p.extensions {
+ w.constExt(n)
+ }
+
+ case ir.OTYPE:
+ if n.Type().IsTypeParam() && n.Type().Underlying() == n.Type() {
+ // Even though it has local scope, a typeparam requires a
+ // declaration via its package and unique name, because it
+ // may be referenced within its type bound during its own
+ // definition.
+ w.tag('P')
+ // A typeparam has a name, and has a type bound rather
+ // than an underlying type.
+ w.pos(n.Pos())
+ if iexportVersionCurrent >= iexportVersionGo1_18 {
+ implicit := n.Type().Bound().IsImplicit()
+ w.bool(implicit)
+ }
+ w.typ(n.Type().Bound())
+ break
+ }
+
+ if n.Alias() {
+ // Alias.
+ w.tag('A')
+ w.pos(n.Pos())
+ w.typ(n.Type())
+ break
+ }
+
+ // Defined type.
+ if len(n.Type().RParams()) == 0 {
+ w.tag('T')
+ } else {
+ w.tag('U')
+ }
+ w.pos(n.Pos())
+
+ if len(n.Type().RParams()) > 0 {
+ // Export type parameters, if any, needed for this type
+ w.typeList(n.Type().RParams())
+ }
+
+ underlying := n.Type().Underlying()
+ if underlying == types.ErrorType.Underlying() {
+ // For "type T error", use error as the
+ // underlying type instead of error's own
+ // underlying anonymous interface. This
+ // ensures consistency with how importers may
+ // declare error (e.g., go/types uses nil Pkg
+ // for predeclared objects).
+ underlying = types.ErrorType
+ }
+ if underlying == types.ComparableType.Underlying() {
+ // Do same for ComparableType as for ErrorType.
+ underlying = types.ComparableType
+ }
+ if base.Flag.G > 0 && underlying == types.AnyType.Underlying() {
+ // Do same for AnyType as for ErrorType.
+ underlying = types.AnyType
+ }
+ w.typ(underlying)
+
+ t := n.Type()
+ if t.IsInterface() {
+ if w.p.extensions {
+ w.typeExt(t)
+ }
+ break
+ }
+
+ // Sort methods, for consistency with types2.
+ methods := append([]*types.Field(nil), t.Methods().Slice()...)
+ if base.Debug.UnifiedQuirks != 0 {
+ sort.Sort(types.MethodsByName(methods))
+ }
+
+ w.uint64(uint64(len(methods)))
+ for _, m := range methods {
+ w.pos(m.Pos)
+ w.selector(m.Sym)
+ w.param(m.Type.Recv())
+ w.signature(m.Type)
+ }
+
+ if w.p.extensions {
+ w.typeExt(t)
+ for _, m := range methods {
+ w.methExt(m)
+ }
+ }
+
+ default:
+ base.Fatalf("unexpected node: %v", n)
+ }
+
+ w.finish("dcl", p.declIndex, n.Sym())
+}
+
+func (w *exportWriter) tag(tag byte) {
+ w.data.WriteByte(tag)
+}
+
+func (w *exportWriter) finish(what string, index map[*types.Sym]uint64, sym *types.Sym) {
+ off := w.flush()
+ if *base.Flag.LowerV {
+ fmt.Printf("export: %v %v %v\n", what, off, sym)
+ }
+ index[sym] = off
+}
+
+func (p *iexporter) doInline(f *ir.Name) {
+ w := p.newWriter()
+ w.setPkg(fnpkg(f), false)
+
+ w.dclIndex = make(map[*ir.Name]int, len(f.Func.Inl.Dcl))
+ w.funcBody(f.Func)
+
+ w.finish("inl", p.inlineIndex, f.Sym())
+}
+
+func (w *exportWriter) pos(pos src.XPos) {
+ p := base.Ctxt.PosTable.Pos(pos)
+ file := p.Base().AbsFilename()
+ line := int64(p.RelLine())
+ column := int64(p.RelCol())
+
+ // Encode position relative to the last position: column
+ // delta, then line delta, then file name. We reserve the
+ // bottom bit of the column and line deltas to encode whether
+ // the remaining fields are present.
+ //
+ // Note: Because data objects may be read out of order (or not
+ // at all), we can only apply delta encoding within a single
+ // object. This is handled implicitly by tracking prevFile,
+ // prevLine, and prevColumn as fields of exportWriter.
+
+ deltaColumn := (column - w.prevColumn) << 1
+ deltaLine := (line - w.prevLine) << 1
+
+ if file != w.prevFile {
+ deltaLine |= 1
+ }
+ if deltaLine != 0 {
+ deltaColumn |= 1
+ }
+
+ w.int64(deltaColumn)
+ if deltaColumn&1 != 0 {
+ w.int64(deltaLine)
+ if deltaLine&1 != 0 {
+ w.string(file)
+ }
+ }
+
+ w.prevFile = file
+ w.prevLine = line
+ w.prevColumn = column
+}
+
+func (w *exportWriter) pkg(pkg *types.Pkg) {
+ // TODO(mdempsky): Add flag to types.Pkg to mark pseudo-packages.
+ if pkg == ir.Pkgs.Go {
+ base.Fatalf("export of pseudo-package: %q", pkg.Path)
+ }
+
+ // Ensure any referenced packages are declared in the main index.
+ w.p.allPkgs[pkg] = true
+
+ w.string(pkg.Path)
+}
+
+func (w *exportWriter) qualifiedIdent(n *ir.Name) {
+ // Ensure any referenced declarations are written out too.
+ w.p.pushDecl(n)
+
+ s := n.Sym()
+ w.string(s.Name)
+ w.pkg(s.Pkg)
+}
+
+const blankMarker = "$"
+
+// TparamExportName creates a unique name for type param in a method or a generic
+// type, using the specified unique prefix and the index of the type param. The index
+// is only used if the type param is blank, in which case the blank is replace by
+// "$<index>". A unique name is needed for later substitution in the compiler and
+// export/import that keeps blank type params associated with the correct constraint.
+func TparamExportName(prefix string, name string, index int) string {
+ if name == "_" {
+ name = blankMarker + strconv.Itoa(index)
+ }
+ return prefix + "." + name
+}
+
+// TparamName returns the real name of a type parameter, after stripping its
+// qualifying prefix and reverting blank-name encoding. See TparamExportName
+// for details.
+func TparamName(exportName string) string {
+ // Remove the "path" from the type param name that makes it unique.
+ ix := strings.LastIndex(exportName, ".")
+ if ix < 0 {
+ return ""
+ }
+ name := exportName[ix+1:]
+ if strings.HasPrefix(name, blankMarker) {
+ return "_"
+ }
+ return name
+}
+
+func (w *exportWriter) selector(s *types.Sym) {
+ if w.currPkg == nil {
+ base.Fatalf("missing currPkg")
+ }
+
+ // If the selector being written is unexported, it comes with a package qualifier.
+ // If the selector being written is exported, it is not package-qualified.
+ // See the spec: https://golang.org/ref/spec#Uniqueness_of_identifiers
+ // As an optimization, we don't actually write the package every time - instead we
+ // call setPkg before a group of selectors (all of which must have the same package qualifier).
+ pkg := w.currPkg
+ if types.IsExported(s.Name) {
+ pkg = types.LocalPkg
+ }
+ if s.Pkg != pkg {
+ base.Fatalf("package mismatch in selector: %v in package %q, but want %q", s, s.Pkg.Path, pkg.Path)
+ }
+
+ w.string(s.Name)
+}
+
+func (w *exportWriter) typ(t *types.Type) {
+ w.data.uint64(w.p.typOff(t))
+}
+
+// The "exotic" functions in this section encode a wider range of
+// items than the standard encoding functions above. These include
+// types that do not appear in declarations, only in code, such as
+// method types. These methods need to be separate from the standard
+// encoding functions because we don't want to modify the encoding
+// generated by the standard functions (because that exported
+// information is read by tools besides the compiler).
+
+// exoticType exports a type to the writer.
+func (w *exportWriter) exoticType(t *types.Type) {
+ switch {
+ case t == nil:
+ // Calls-as-statements have no type.
+ w.data.uint64(exoticTypeNil)
+ case t.IsStruct() && t.StructType().Funarg != types.FunargNone:
+ // These are weird structs for representing tuples of types returned
+ // by multi-return functions.
+ // They don't fit the standard struct type mold. For instance,
+ // they don't have any package info.
+ w.data.uint64(exoticTypeTuple)
+ w.uint64(uint64(t.StructType().Funarg))
+ w.uint64(uint64(t.NumFields()))
+ for _, f := range t.FieldSlice() {
+ w.pos(f.Pos)
+ s := f.Sym
+ if s == nil {
+ w.uint64(0)
+ } else if s.Pkg == nil {
+ w.uint64(exoticTypeSymNoPkg)
+ w.string(s.Name)
+ } else {
+ w.uint64(exoticTypeSymWithPkg)
+ w.pkg(s.Pkg)
+ w.string(s.Name)
+ }
+ w.typ(f.Type)
+ if f.Embedded != 0 || f.Note != "" {
+ panic("extra info in funarg struct field")
+ }
+ }
+ case t.Kind() == types.TFUNC && t.Recv() != nil:
+ w.data.uint64(exoticTypeRecv)
+ // interface method types have a fake receiver type.
+ isFakeRecv := t.Recv().Type == types.FakeRecvType()
+ w.bool(isFakeRecv)
+ if !isFakeRecv {
+ w.exoticParam(t.Recv())
+ }
+ w.exoticSignature(t)
+
+ default:
+ // A regular type.
+ w.data.uint64(exoticTypeRegular)
+ w.typ(t)
+ }
+}
+
+const (
+ exoticTypeNil = iota
+ exoticTypeTuple
+ exoticTypeRecv
+ exoticTypeRegular
+)
+const (
+ exoticTypeSymNil = iota
+ exoticTypeSymNoPkg
+ exoticTypeSymWithPkg
+)
+
+// Export a selector, but one whose package may not match
+// the package being compiled. This is a separate function
+// because the standard selector() serialization format is fixed
+// by the go/types reader. This one can only be used during
+// inline/generic body exporting.
+func (w *exportWriter) exoticSelector(s *types.Sym) {
+ pkg := w.currPkg
+ if types.IsExported(s.Name) {
+ pkg = types.LocalPkg
+ }
+
+ w.string(s.Name)
+ if s.Pkg == pkg {
+ w.uint64(0)
+ } else {
+ w.uint64(1)
+ w.pkg(s.Pkg)
+ }
+}
+
+func (w *exportWriter) exoticSignature(t *types.Type) {
+ hasPkg := t.Pkg() != nil
+ w.bool(hasPkg)
+ if hasPkg {
+ w.pkg(t.Pkg())
+ }
+ w.exoticParamList(t.Params().FieldSlice())
+ w.exoticParamList(t.Results().FieldSlice())
+}
+
+func (w *exportWriter) exoticParamList(fs []*types.Field) {
+ w.uint64(uint64(len(fs)))
+ for _, f := range fs {
+ w.exoticParam(f)
+ }
+
+}
+func (w *exportWriter) exoticParam(f *types.Field) {
+ w.pos(f.Pos)
+ w.exoticSym(f.Sym)
+ w.uint64(uint64(f.Offset))
+ w.exoticType(f.Type)
+ w.bool(f.IsDDD())
+}
+
+func (w *exportWriter) exoticField(f *types.Field) {
+ w.pos(f.Pos)
+ w.exoticSym(f.Sym)
+ w.uint64(uint64(f.Offset))
+ w.exoticType(f.Type)
+ w.string(f.Note)
+}
+
+func (w *exportWriter) exoticSym(s *types.Sym) {
+ if s == nil {
+ w.string("")
+ return
+ }
+ if s.Name == "" {
+ base.Fatalf("empty symbol name")
+ }
+ w.string(s.Name)
+ if !types.IsExported(s.Name) {
+ w.pkg(s.Pkg)
+ }
+}
+
+func (p *iexporter) newWriter() *exportWriter {
+ return &exportWriter{p: p}
+}
+
+func (w *exportWriter) flush() uint64 {
+ off := uint64(w.p.data0.Len())
+ io.Copy(&w.p.data0, &w.data)
+ return off
+}
+
+func (p *iexporter) typOff(t *types.Type) uint64 {
+ off, ok := p.typIndex[t]
+ if !ok {
+ w := p.newWriter()
+ w.doTyp(t)
+ rawOff := w.flush()
+ if *base.Flag.LowerV {
+ fmt.Printf("export: typ %v %v\n", rawOff, t)
+ }
+ off = predeclReserved + rawOff
+ p.typIndex[t] = off
+ }
+ return off
+}
+
+func (w *exportWriter) startType(k itag) {
+ w.data.uint64(uint64(k))
+}
+
+func (w *exportWriter) doTyp(t *types.Type) {
+ s := t.Sym()
+ if s != nil && t.OrigType() != nil {
+ assert(base.Flag.G > 0)
+ // This is an instantiated type - could be a re-instantiation like
+ // Value[T2] or a full instantiation like Value[int].
+ if strings.Index(s.Name, "[") < 0 {
+ base.Fatalf("incorrect name for instantiated type")
+ }
+ w.startType(instanceType)
+ w.pos(t.Pos())
+ // Export the type arguments for the instantiated type. The
+ // instantiated type could be in a method header (e.g. "func (v
+ // *Value[T2]) set (...) { ... }"), so the type args are "new"
+ // typeparams. Or the instantiated type could be in a
+ // function/method body, so the type args are either concrete
+ // types or existing typeparams from the function/method header.
+ w.typeList(t.RParams())
+ // Export a reference to the base type.
+ baseType := t.OrigType()
+ w.typ(baseType)
+ return
+ }
+
+ // The 't.Underlying() == t' check is to confirm this is a base typeparam
+ // type, rather than a defined type with typeparam underlying type, like:
+ // type orderedAbs[T any] T
+ if t.IsTypeParam() && t.Underlying() == t {
+ assert(base.Flag.G > 0)
+ if s.Pkg == types.BuiltinPkg || s.Pkg == types.UnsafePkg {
+ base.Fatalf("builtin type missing from typIndex: %v", t)
+ }
+ // Write out the first use of a type param as a qualified ident.
+ // This will force a "declaration" of the type param.
+ w.startType(typeParamType)
+ w.qualifiedIdent(t.Obj().(*ir.Name))
+ return
+ }
+
+ if s != nil {
+ if s.Pkg == types.BuiltinPkg || s.Pkg == types.UnsafePkg {
+ base.Fatalf("builtin type missing from typIndex: %v", t)
+ }
+
+ w.startType(definedType)
+ w.qualifiedIdent(t.Obj().(*ir.Name))
+ return
+ }
+
+ switch t.Kind() {
+ case types.TPTR:
+ w.startType(pointerType)
+ w.typ(t.Elem())
+
+ case types.TSLICE:
+ w.startType(sliceType)
+ w.typ(t.Elem())
+
+ case types.TARRAY:
+ w.startType(arrayType)
+ w.uint64(uint64(t.NumElem()))
+ w.typ(t.Elem())
+
+ case types.TCHAN:
+ w.startType(chanType)
+ w.uint64(uint64(t.ChanDir()))
+ w.typ(t.Elem())
+
+ case types.TMAP:
+ w.startType(mapType)
+ w.typ(t.Key())
+ w.typ(t.Elem())
+
+ case types.TFUNC:
+ w.startType(signatureType)
+ w.setPkg(t.Pkg(), true)
+ w.signature(t)
+
+ case types.TSTRUCT:
+ w.startType(structType)
+ w.setPkg(t.Pkg(), true)
+
+ w.uint64(uint64(t.NumFields()))
+ for _, f := range t.FieldSlice() {
+ w.pos(f.Pos)
+ w.selector(f.Sym)
+ w.typ(f.Type)
+ w.bool(f.Embedded != 0)
+ w.string(f.Note)
+ }
+
+ case types.TINTER:
+ var embeddeds, methods []*types.Field
+ for _, m := range t.Methods().Slice() {
+ if m.Sym != nil {
+ methods = append(methods, m)
+ } else {
+ embeddeds = append(embeddeds, m)
+ }
+ }
+
+ // Sort methods and embedded types, for consistency with types2.
+ // Note: embedded types may be anonymous, and types2 sorts them
+ // with sort.Stable too.
+ if base.Debug.UnifiedQuirks != 0 {
+ sort.Sort(types.MethodsByName(methods))
+ sort.Stable(types.EmbeddedsByName(embeddeds))
+ }
+
+ w.startType(interfaceType)
+ w.setPkg(t.Pkg(), true)
+
+ w.uint64(uint64(len(embeddeds)))
+ for _, f := range embeddeds {
+ w.pos(f.Pos)
+ w.typ(f.Type)
+ }
+
+ w.uint64(uint64(len(methods)))
+ for _, f := range methods {
+ w.pos(f.Pos)
+ w.selector(f.Sym)
+ w.signature(f.Type)
+ }
+
+ case types.TUNION:
+ assert(base.Flag.G > 0)
+ // TODO(danscales): possibly put out the tilde bools in more
+ // compact form.
+ w.startType(unionType)
+ nt := t.NumTerms()
+ w.uint64(uint64(nt))
+ for i := 0; i < nt; i++ {
+ typ, tilde := t.Term(i)
+ w.bool(tilde)
+ w.typ(typ)
+ }
+
+ default:
+ base.Fatalf("unexpected type: %v", t)
+ }
+}
+
+func (w *exportWriter) setPkg(pkg *types.Pkg, write bool) {
+ if pkg == types.NoPkg {
+ base.Fatalf("missing pkg")
+ }
+
+ if write {
+ w.pkg(pkg)
+ }
+
+ w.currPkg = pkg
+}
+
+func (w *exportWriter) signature(t *types.Type) {
+ w.paramList(t.Params().FieldSlice())
+ w.paramList(t.Results().FieldSlice())
+ if n := t.Params().NumFields(); n > 0 {
+ w.bool(t.Params().Field(n - 1).IsDDD())
+ }
+}
+
+func (w *exportWriter) typeList(ts []*types.Type) {
+ w.uint64(uint64(len(ts)))
+ for _, rparam := range ts {
+ w.typ(rparam)
+ }
+}
+
+func (w *exportWriter) tparamList(fs []*types.Field) {
+ w.uint64(uint64(len(fs)))
+ for _, f := range fs {
+ if !f.Type.IsTypeParam() {
+ base.Fatalf("unexpected non-typeparam")
+ }
+ w.typ(f.Type)
+ }
+}
+
+func (w *exportWriter) paramList(fs []*types.Field) {
+ w.uint64(uint64(len(fs)))
+ for _, f := range fs {
+ w.param(f)
+ }
+}
+
+func (w *exportWriter) param(f *types.Field) {
+ w.pos(f.Pos)
+ w.localIdent(types.OrigSym(f.Sym))
+ w.typ(f.Type)
+}
+
+func constTypeOf(typ *types.Type) constant.Kind {
+ switch typ {
+ case types.UntypedInt, types.UntypedRune:
+ return constant.Int
+ case types.UntypedFloat:
+ return constant.Float
+ case types.UntypedComplex:
+ return constant.Complex
+ }
+
+ switch typ.Kind() {
+ case types.TBOOL:
+ return constant.Bool
+ case types.TSTRING:
+ return constant.String
+ case types.TINT, types.TINT8, types.TINT16, types.TINT32, types.TINT64,
+ types.TUINT, types.TUINT8, types.TUINT16, types.TUINT32, types.TUINT64, types.TUINTPTR:
+ return constant.Int
+ case types.TFLOAT32, types.TFLOAT64:
+ return constant.Float
+ case types.TCOMPLEX64, types.TCOMPLEX128:
+ return constant.Complex
+ }
+
+ base.Fatalf("unexpected constant type: %v", typ)
+ return 0
+}
+
+func (w *exportWriter) value(typ *types.Type, v constant.Value) {
+ w.typ(typ)
+
+ if iexportVersionCurrent >= iexportVersionGo1_18 {
+ w.int64(int64(v.Kind()))
+ }
+
+ var kind constant.Kind
+ var valType *types.Type
+
+ if typ.IsTypeParam() {
+ kind = v.Kind()
+ if iexportVersionCurrent < iexportVersionGo1_18 {
+ // A constant will have a TYPEPARAM type if it appears in a place
+ // where it must match that typeparam type (e.g. in a binary
+ // operation with a variable of that typeparam type). If so, then
+ // we must write out its actual constant kind as well, so its
+ // constant val can be read in properly during import.
+ w.int64(int64(kind))
+ }
+
+ switch kind {
+ case constant.Int:
+ valType = types.Types[types.TINT64]
+ case constant.Float:
+ valType = types.Types[types.TFLOAT64]
+ case constant.Complex:
+ valType = types.Types[types.TCOMPLEX128]
+ }
+ } else {
+ ir.AssertValidTypeForConst(typ, v)
+ kind = constTypeOf(typ)
+ valType = typ
+ }
+
+ // Each type has only one admissible constant representation, so we could
+ // type switch directly on v.Kind() here. However, switching on the type
+ // (in the non-typeparam case) increases symmetry with import logic and
+ // provides a useful consistency check.
+
+ switch kind {
+ case constant.Bool:
+ w.bool(constant.BoolVal(v))
+ case constant.String:
+ w.string(constant.StringVal(v))
+ case constant.Int:
+ w.mpint(v, valType)
+ case constant.Float:
+ w.mpfloat(v, valType)
+ case constant.Complex:
+ w.mpfloat(constant.Real(v), valType)
+ w.mpfloat(constant.Imag(v), valType)
+ }
+}
+
+func intSize(typ *types.Type) (signed bool, maxBytes uint) {
+ if typ.IsUntyped() {
+ return true, ir.ConstPrec / 8
+ }
+
+ switch typ.Kind() {
+ case types.TFLOAT32, types.TCOMPLEX64:
+ return true, 3
+ case types.TFLOAT64, types.TCOMPLEX128:
+ return true, 7
+ }
+
+ signed = typ.IsSigned()
+ maxBytes = uint(typ.Size())
+
+ // The go/types API doesn't expose sizes to importers, so they
+ // don't know how big these types are.
+ switch typ.Kind() {
+ case types.TINT, types.TUINT, types.TUINTPTR:
+ maxBytes = 8
+ }
+
+ return
+}
+
+// mpint exports a multi-precision integer.
+//
+// For unsigned types, small values are written out as a single
+// byte. Larger values are written out as a length-prefixed big-endian
+// byte string, where the length prefix is encoded as its complement.
+// For example, bytes 0, 1, and 2 directly represent the integer
+// values 0, 1, and 2; while bytes 255, 254, and 253 indicate a 1-,
+// 2-, and 3-byte big-endian string follow.
+//
+// Encoding for signed types use the same general approach as for
+// unsigned types, except small values use zig-zag encoding and the
+// bottom bit of length prefix byte for large values is reserved as a
+// sign bit.
+//
+// The exact boundary between small and large encodings varies
+// according to the maximum number of bytes needed to encode a value
+// of type typ. As a special case, 8-bit types are always encoded as a
+// single byte.
+func (w *exportWriter) mpint(x constant.Value, typ *types.Type) {
+ signed, maxBytes := intSize(typ)
+
+ negative := constant.Sign(x) < 0
+ if !signed && negative {
+ base.Fatalf("negative unsigned integer; type %v, value %v", typ, x)
+ }
+
+ b := constant.Bytes(x) // little endian
+ for i, j := 0, len(b)-1; i < j; i, j = i+1, j-1 {
+ b[i], b[j] = b[j], b[i]
+ }
+
+ if len(b) > 0 && b[0] == 0 {
+ base.Fatalf("leading zeros")
+ }
+ if uint(len(b)) > maxBytes {
+ base.Fatalf("bad mpint length: %d > %d (type %v, value %v)", len(b), maxBytes, typ, x)
+ }
+
+ maxSmall := 256 - maxBytes
+ if signed {
+ maxSmall = 256 - 2*maxBytes
+ }
+ if maxBytes == 1 {
+ maxSmall = 256
+ }
+
+ // Check if x can use small value encoding.
+ if len(b) <= 1 {
+ var ux uint
+ if len(b) == 1 {
+ ux = uint(b[0])
+ }
+ if signed {
+ ux <<= 1
+ if negative {
+ ux--
+ }
+ }
+ if ux < maxSmall {
+ w.data.WriteByte(byte(ux))
+ return
+ }
+ }
+
+ n := 256 - uint(len(b))
+ if signed {
+ n = 256 - 2*uint(len(b))
+ if negative {
+ n |= 1
+ }
+ }
+ if n < maxSmall || n >= 256 {
+ base.Fatalf("encoding mistake: %d, %v, %v => %d", len(b), signed, negative, n)
+ }
+
+ w.data.WriteByte(byte(n))
+ w.data.Write(b)
+}
+
+// mpfloat exports a multi-precision floating point number.
+//
+// The number's value is decomposed into mantissa × 2**exponent, where
+// mantissa is an integer. The value is written out as mantissa (as a
+// multi-precision integer) and then the exponent, except exponent is
+// omitted if mantissa is zero.
+func (w *exportWriter) mpfloat(v constant.Value, typ *types.Type) {
+ f := ir.BigFloat(v)
+ if f.IsInf() {
+ base.Fatalf("infinite constant")
+ }
+
+ // Break into f = mant × 2**exp, with 0.5 <= mant < 1.
+ var mant big.Float
+ exp := int64(f.MantExp(&mant))
+
+ // Scale so that mant is an integer.
+ prec := mant.MinPrec()
+ mant.SetMantExp(&mant, int(prec))
+ exp -= int64(prec)
+
+ manti, acc := mant.Int(nil)
+ if acc != big.Exact {
+ base.Fatalf("mantissa scaling failed for %f (%s)", f, acc)
+ }
+ w.mpint(constant.Make(manti), typ)
+ if manti.Sign() != 0 {
+ w.int64(exp)
+ }
+}
+
+func (w *exportWriter) mprat(v constant.Value) {
+ r, ok := constant.Val(v).(*big.Rat)
+ if !w.bool(ok) {
+ return
+ }
+ // TODO(mdempsky): Come up with a more efficient binary
+ // encoding before bumping iexportVersion to expose to
+ // gcimporter.
+ w.string(r.String())
+}
+
+func (w *exportWriter) bool(b bool) bool {
+ var x uint64
+ if b {
+ x = 1
+ }
+ w.uint64(x)
+ return b
+}
+
+func (w *exportWriter) int64(x int64) { w.data.int64(x) }
+func (w *exportWriter) uint64(x uint64) { w.data.uint64(x) }
+func (w *exportWriter) string(s string) { w.uint64(w.p.stringOff(s)) }
+
+// Compiler-specific extensions.
+
+func (w *exportWriter) constExt(n *ir.Name) {
+ // Internally, we now represent untyped float and complex
+ // constants with infinite-precision rational numbers using
+ // go/constant, but the "public" export data format known to
+ // gcimporter only supports 512-bit floating point constants.
+ // In case rationals turn out to be a bad idea and we want to
+ // switch back to fixed-precision constants, for now we
+ // continue writing out the 512-bit truncation in the public
+ // data section, and write the exact, rational constant in the
+ // compiler's extension data. Also, we only need to worry
+ // about exporting rationals for declared constants, because
+ // constants that appear in an expression will already have
+ // been coerced to a concrete, fixed-precision type.
+ //
+ // Eventually, assuming we stick with using rationals, we
+ // should bump iexportVersion to support rationals, and do the
+ // whole gcimporter update song-and-dance.
+ //
+ // TODO(mdempsky): Prepare vocals for that.
+
+ switch n.Type() {
+ case types.UntypedFloat:
+ w.mprat(n.Val())
+ case types.UntypedComplex:
+ v := n.Val()
+ w.mprat(constant.Real(v))
+ w.mprat(constant.Imag(v))
+ }
+}
+
+func (w *exportWriter) varExt(n *ir.Name) {
+ w.linkname(n.Sym())
+ w.symIdx(n.Sym())
+}
+
+func (w *exportWriter) funcExt(n *ir.Name) {
+ w.linkname(n.Sym())
+ w.symIdx(n.Sym())
+
+ // Record definition ABI so cross-ABI calls can be direct.
+ // This is important for the performance of calling some
+ // common functions implemented in assembly (e.g., bytealg).
+ w.uint64(uint64(n.Func.ABI))
+
+ w.uint64(uint64(n.Func.Pragma))
+
+ // Escape analysis.
+ for _, fs := range &types.RecvsParams {
+ for _, f := range fs(n.Type()).FieldSlice() {
+ w.string(f.Note)
+ }
+ }
+
+ // Write out inline body or body of a generic function/method.
+ if n.Type().HasTParam() && n.Func.Body != nil && n.Func.Inl == nil {
+ base.FatalfAt(n.Pos(), "generic function is not marked inlineable")
+ }
+ if n.Func.Inl != nil {
+ w.uint64(1 + uint64(n.Func.Inl.Cost))
+ w.bool(n.Func.Inl.CanDelayResults)
+ if n.Func.ExportInline() || n.Type().HasTParam() {
+ if n.Type().HasTParam() {
+ // If this generic function/method is from another
+ // package, but we didn't use for instantiation in
+ // this package, we may not yet have imported it.
+ ImportedBody(n.Func)
+ }
+ w.p.doInline(n)
+ }
+
+ // Endlineno for inlined function.
+ w.pos(n.Func.Endlineno)
+ } else {
+ w.uint64(0)
+ }
+}
+
+func (w *exportWriter) methExt(m *types.Field) {
+ w.bool(m.Nointerface())
+ w.funcExt(m.Nname.(*ir.Name))
+}
+
+func (w *exportWriter) linkname(s *types.Sym) {
+ w.string(s.Linkname)
+}
+
+func (w *exportWriter) symIdx(s *types.Sym) {
+ lsym := s.Linksym()
+ if lsym.PkgIdx > goobj.PkgIdxSelf || (lsym.PkgIdx == goobj.PkgIdxInvalid && !lsym.Indexed()) || s.Linkname != "" {
+ // Don't export index for non-package symbols, linkname'd symbols,
+ // and symbols without an index. They can only be referenced by
+ // name.
+ w.int64(-1)
+ } else {
+ // For a defined symbol, export its index.
+ // For re-exporting an imported symbol, pass its index through.
+ w.int64(int64(lsym.SymIdx))
+ }
+}
+
+func (w *exportWriter) typeExt(t *types.Type) {
+ // Export whether this type is marked notinheap.
+ w.bool(t.NotInHeap())
+ // For type T, export the index of type descriptor symbols of T and *T.
+ if i, ok := typeSymIdx[t]; ok {
+ w.int64(i[0])
+ w.int64(i[1])
+ return
+ }
+ w.symIdx(types.TypeSym(t))
+ w.symIdx(types.TypeSym(t.PtrTo()))
+}
+
+// Inline bodies.
+
+func (w *exportWriter) writeNames(dcl []*ir.Name) {
+ w.int64(int64(len(dcl)))
+ for i, n := range dcl {
+ w.pos(n.Pos())
+ w.localIdent(n.Sym())
+ w.typ(n.Type())
+ w.dclIndex[n] = w.maxDclIndex + i
+ }
+ w.maxDclIndex += len(dcl)
+}
+
+func (w *exportWriter) funcBody(fn *ir.Func) {
+ //fmt.Printf("Exporting %s\n", fn.Nname.Sym().Name)
+ w.writeNames(fn.Inl.Dcl)
+
+ w.stmtList(fn.Inl.Body)
+}
+
+func (w *exportWriter) stmtList(list []ir.Node) {
+ for _, n := range list {
+ w.node(n)
+ }
+ w.op(ir.OEND)
+}
+
+func (w *exportWriter) node(n ir.Node) {
+ if ir.OpPrec[n.Op()] < 0 {
+ w.stmt(n)
+ } else {
+ w.expr(n)
+ }
+}
+
+func isNonEmptyAssign(n ir.Node) bool {
+ switch n.Op() {
+ case ir.OAS:
+ if n.(*ir.AssignStmt).Y != nil {
+ return true
+ }
+ case ir.OAS2, ir.OAS2DOTTYPE, ir.OAS2FUNC, ir.OAS2MAPR, ir.OAS2RECV:
+ return true
+ }
+ return false
+}
+
+// Caution: stmt will emit more than one node for statement nodes n that have a
+// non-empty n.Ninit and where n is not a non-empty assignment or a node with a natural init
+// section (such as in "if", "for", etc.).
+func (w *exportWriter) stmt(n ir.Node) {
+ if len(n.Init()) > 0 && !ir.StmtWithInit(n.Op()) && !isNonEmptyAssign(n) && n.Op() != ir.ORANGE {
+ // can't use stmtList here since we don't want the final OEND
+ for _, n := range n.Init() {
+ w.stmt(n)
+ }
+ }
+
+ switch n.Op() {
+ case ir.OBLOCK:
+ // No OBLOCK in export data.
+ // Inline content into this statement list,
+ // like the init list above.
+ // (At the moment neither the parser nor the typechecker
+ // generate OBLOCK nodes except to denote an empty
+ // function body, although that may change.)
+ n := n.(*ir.BlockStmt)
+ for _, n := range n.List {
+ w.stmt(n)
+ }
+
+ case ir.ODCL:
+ n := n.(*ir.Decl)
+ if ir.IsBlank(n.X) {
+ return // blank declarations not useful to importers
+ }
+ w.op(ir.ODCL)
+ w.localName(n.X)
+
+ case ir.OAS:
+ // Don't export "v = <N>" initializing statements, hope they're always
+ // preceded by the DCL which will be re-parsed and typecheck to reproduce
+ // the "v = <N>" again.
+ n := n.(*ir.AssignStmt)
+ if n.Y != nil {
+ w.op(ir.OAS)
+ w.pos(n.Pos())
+ w.stmtList(n.Init())
+ w.expr(n.X)
+ w.expr(n.Y)
+ w.bool(n.Def)
+ }
+
+ case ir.OASOP:
+ n := n.(*ir.AssignOpStmt)
+ w.op(ir.OASOP)
+ w.pos(n.Pos())
+ w.op(n.AsOp)
+ w.expr(n.X)
+ if w.bool(!n.IncDec) {
+ w.expr(n.Y)
+ }
+
+ case ir.OAS2, ir.OAS2DOTTYPE, ir.OAS2FUNC, ir.OAS2MAPR, ir.OAS2RECV:
+ n := n.(*ir.AssignListStmt)
+ if go117ExportTypes {
+ w.op(n.Op())
+ } else {
+ w.op(ir.OAS2)
+ }
+ w.pos(n.Pos())
+ w.stmtList(n.Init())
+ w.exprList(n.Lhs)
+ w.exprList(n.Rhs)
+ w.bool(n.Def)
+
+ case ir.ORETURN:
+ n := n.(*ir.ReturnStmt)
+ w.op(ir.ORETURN)
+ w.pos(n.Pos())
+ w.exprList(n.Results)
+
+ // case ORETJMP:
+ // unreachable - generated by compiler for trampoline routines
+
+ case ir.OGO, ir.ODEFER:
+ n := n.(*ir.GoDeferStmt)
+ w.op(n.Op())
+ w.pos(n.Pos())
+ w.expr(n.Call)
+
+ case ir.OIF:
+ n := n.(*ir.IfStmt)
+ w.op(ir.OIF)
+ w.pos(n.Pos())
+ w.stmtList(n.Init())
+ w.expr(n.Cond)
+ w.stmtList(n.Body)
+ w.stmtList(n.Else)
+
+ case ir.OFOR:
+ n := n.(*ir.ForStmt)
+ w.op(ir.OFOR)
+ w.pos(n.Pos())
+ w.stmtList(n.Init())
+ w.exprsOrNil(n.Cond, n.Post)
+ w.stmtList(n.Body)
+
+ case ir.ORANGE:
+ n := n.(*ir.RangeStmt)
+ w.op(ir.ORANGE)
+ w.pos(n.Pos())
+ w.stmtList(n.Init())
+ w.exprsOrNil(n.Key, n.Value)
+ w.expr(n.X)
+ w.stmtList(n.Body)
+
+ case ir.OSELECT:
+ n := n.(*ir.SelectStmt)
+ w.op(n.Op())
+ w.pos(n.Pos())
+ w.stmtList(n.Init())
+ w.commList(n.Cases)
+
+ case ir.OSWITCH:
+ n := n.(*ir.SwitchStmt)
+ w.op(n.Op())
+ w.pos(n.Pos())
+ w.stmtList(n.Init())
+ w.exprsOrNil(n.Tag, nil)
+ w.caseList(n.Cases, isNamedTypeSwitch(n.Tag))
+
+ // case OCASE:
+ // handled by caseList
+
+ case ir.OFALL:
+ n := n.(*ir.BranchStmt)
+ w.op(ir.OFALL)
+ w.pos(n.Pos())
+
+ case ir.OBREAK, ir.OCONTINUE, ir.OGOTO, ir.OLABEL:
+ w.op(n.Op())
+ w.pos(n.Pos())
+ label := ""
+ if sym := n.Sym(); sym != nil {
+ label = sym.Name
+ }
+ w.string(label)
+
+ default:
+ base.Fatalf("exporter: CANNOT EXPORT: %v\nPlease notify gri@\n", n.Op())
+ }
+}
+
+func isNamedTypeSwitch(x ir.Node) bool {
+ guard, ok := x.(*ir.TypeSwitchGuard)
+ return ok && guard.Tag != nil
+}
+
+func (w *exportWriter) caseList(cases []*ir.CaseClause, namedTypeSwitch bool) {
+ w.uint64(uint64(len(cases)))
+ for _, cas := range cases {
+ w.pos(cas.Pos())
+ w.stmtList(cas.List)
+ if namedTypeSwitch {
+ w.localName(cas.Var)
+ }
+ w.stmtList(cas.Body)
+ }
+}
+
+func (w *exportWriter) commList(cases []*ir.CommClause) {
+ w.uint64(uint64(len(cases)))
+ for _, cas := range cases {
+ w.pos(cas.Pos())
+ defaultCase := cas.Comm == nil
+ w.bool(defaultCase)
+ if !defaultCase {
+ // Only call w.node for non-default cause (cas.Comm is non-nil)
+ w.node(cas.Comm)
+ }
+ w.stmtList(cas.Body)
+ }
+}
+
+func (w *exportWriter) exprList(list ir.Nodes) {
+ for _, n := range list {
+ w.expr(n)
+ }
+ w.op(ir.OEND)
+}
+
+func simplifyForExport(n ir.Node) ir.Node {
+ switch n.Op() {
+ case ir.OPAREN:
+ n := n.(*ir.ParenExpr)
+ return simplifyForExport(n.X)
+ }
+ return n
+}
+
+func (w *exportWriter) expr(n ir.Node) {
+ n = simplifyForExport(n)
+ switch n.Op() {
+ // expressions
+ // (somewhat closely following the structure of exprfmt in fmt.go)
+ case ir.ONIL:
+ n := n.(*ir.NilExpr)
+ // If n is a typeparam, it will have already been checked
+ // for proper use by the types2 typechecker.
+ if !n.Type().IsTypeParam() && !n.Type().HasNil() {
+ base.Fatalf("unexpected type for nil: %v", n.Type())
+ }
+ w.op(ir.ONIL)
+ w.pos(n.Pos())
+ w.typ(n.Type())
+
+ case ir.OLITERAL:
+ w.op(ir.OLITERAL)
+ if ir.HasUniquePos(n) {
+ w.pos(n.Pos())
+ } else {
+ w.pos(src.NoXPos)
+ }
+ w.value(n.Type(), n.Val())
+
+ case ir.ONAME:
+ // Package scope name.
+ n := n.(*ir.Name)
+ if (n.Class == ir.PEXTERN || n.Class == ir.PFUNC) && !ir.IsBlank(n) {
+ w.op(ir.ONONAME)
+ // Indicate that this is not an OKEY entry.
+ w.bool(false)
+ w.qualifiedIdent(n)
+ if go117ExportTypes {
+ w.typ(n.Type())
+ }
+ break
+ }
+
+ // Function scope name.
+ // We don't need a type here, as the type will be provided at the
+ // declaration of n.
+ w.op(ir.ONAME)
+
+ // This handles the case where we haven't yet transformed a call
+ // to a builtin, so we must write out the builtin as a name in the
+ // builtin package.
+ isBuiltin := n.BuiltinOp != ir.OXXX
+ w.bool(isBuiltin)
+ if isBuiltin {
+ w.bool(n.Sym().Pkg == types.UnsafePkg)
+ w.string(n.Sym().Name)
+ break
+ }
+ w.localName(n)
+
+ case ir.ONONAME:
+ w.op(ir.ONONAME)
+ // This can only be for OKEY nodes in generic functions. Mark it
+ // as a key entry.
+ w.bool(true)
+ s := n.Sym()
+ w.string(s.Name)
+ w.pkg(s.Pkg)
+ if go117ExportTypes {
+ w.typ(n.Type())
+ }
+
+ // case OPACK:
+ // should have been resolved by typechecking - handled by default case
+
+ case ir.OTYPE:
+ w.op(ir.OTYPE)
+ w.typ(n.Type())
+
+ case ir.ODYNAMICTYPE:
+ n := n.(*ir.DynamicType)
+ w.op(ir.ODYNAMICTYPE)
+ w.pos(n.Pos())
+ w.expr(n.X)
+ if n.ITab != nil {
+ w.bool(true)
+ w.expr(n.ITab)
+ } else {
+ w.bool(false)
+ }
+ w.typ(n.Type())
+
+ case ir.OTYPESW:
+ n := n.(*ir.TypeSwitchGuard)
+ w.op(ir.OTYPESW)
+ w.pos(n.Pos())
+ var s *types.Sym
+ if n.Tag != nil {
+ if n.Tag.Op() != ir.ONONAME {
+ base.Fatalf("expected ONONAME, got %v", n.Tag)
+ }
+ s = n.Tag.Sym()
+ }
+ w.localIdent(s) // declared pseudo-variable, if any
+ w.expr(n.X)
+
+ // case OTARRAY, OTMAP, OTCHAN, OTSTRUCT, OTINTER, OTFUNC:
+ // should have been resolved by typechecking - handled by default case
+
+ case ir.OCLOSURE:
+ n := n.(*ir.ClosureExpr)
+ w.op(ir.OCLOSURE)
+ w.pos(n.Pos())
+ old := w.currPkg
+ w.setPkg(n.Type().Pkg(), true)
+ w.signature(n.Type())
+ w.setPkg(old, true)
+
+ // Write out id for the Outer of each conditional variable. The
+ // conditional variable itself for this closure will be re-created
+ // during import.
+ w.int64(int64(len(n.Func.ClosureVars)))
+ for i, cv := range n.Func.ClosureVars {
+ w.pos(cv.Pos())
+ w.localName(cv.Outer)
+ // Closure variable (which will be re-created during
+ // import) is given via a negative id, starting at -2,
+ // which is used to refer to it later in the function
+ // during export. -1 represents blanks.
+ w.dclIndex[cv] = -(i + 2) - w.maxClosureVarIndex
+ }
+ w.maxClosureVarIndex += len(n.Func.ClosureVars)
+
+ // like w.funcBody(n.Func), but not for .Inl
+ w.writeNames(n.Func.Dcl)
+ w.stmtList(n.Func.Body)
+
+ // case OCOMPLIT:
+ // should have been resolved by typechecking - handled by default case
+
+ case ir.OPTRLIT:
+ n := n.(*ir.AddrExpr)
+ if go117ExportTypes {
+ w.op(ir.OPTRLIT)
+ } else {
+ w.op(ir.OADDR)
+ }
+ w.pos(n.Pos())
+ w.expr(n.X)
+ if go117ExportTypes {
+ w.typ(n.Type())
+ }
+
+ case ir.OSTRUCTLIT:
+ n := n.(*ir.CompLitExpr)
+ w.op(ir.OSTRUCTLIT)
+ w.pos(n.Pos())
+ w.typ(n.Type())
+ w.fieldList(n.List) // special handling of field names
+
+ case ir.OCOMPLIT, ir.OARRAYLIT, ir.OSLICELIT, ir.OMAPLIT:
+ n := n.(*ir.CompLitExpr)
+ if go117ExportTypes {
+ w.op(n.Op())
+ } else {
+ w.op(ir.OCOMPLIT)
+ }
+ w.pos(n.Pos())
+ w.typ(n.Type())
+ w.exprList(n.List)
+ if go117ExportTypes && n.Op() == ir.OSLICELIT {
+ w.uint64(uint64(n.Len))
+ }
+ case ir.OKEY:
+ n := n.(*ir.KeyExpr)
+ w.op(ir.OKEY)
+ w.pos(n.Pos())
+ w.expr(n.Key)
+ w.expr(n.Value)
+
+ // case OSTRUCTKEY:
+ // unreachable - handled in case OSTRUCTLIT by elemList
+
+ case ir.OXDOT, ir.ODOT, ir.ODOTPTR, ir.ODOTINTER, ir.ODOTMETH, ir.OMETHVALUE, ir.OMETHEXPR:
+ n := n.(*ir.SelectorExpr)
+ if go117ExportTypes {
+ // For go117ExportTypes, we usually see all ops except
+ // OXDOT, but we can see OXDOT for generic functions.
+ w.op(n.Op())
+ } else {
+ w.op(ir.OXDOT)
+ }
+ w.pos(n.Pos())
+ w.expr(n.X)
+ w.exoticSelector(n.Sel)
+ if go117ExportTypes {
+ w.exoticType(n.Type())
+ if n.Op() == ir.OXDOT {
+ // n.Selection for method references will be
+ // reconstructed during import.
+ w.bool(n.Selection != nil)
+ } else if n.Op() == ir.ODOT || n.Op() == ir.ODOTPTR || n.Op() == ir.ODOTINTER {
+ w.exoticField(n.Selection)
+ }
+ // n.Selection is not required for OMETHEXPR, ODOTMETH, and OMETHVALUE. It will
+ // be reconstructed during import. n.Selection is computed during
+ // transformDot() for OXDOT.
+ }
+
+ case ir.ODOTTYPE, ir.ODOTTYPE2:
+ n := n.(*ir.TypeAssertExpr)
+ if go117ExportTypes {
+ w.op(n.Op())
+ } else {
+ w.op(ir.ODOTTYPE)
+ }
+ w.pos(n.Pos())
+ w.expr(n.X)
+ w.typ(n.Type())
+
+ case ir.ODYNAMICDOTTYPE, ir.ODYNAMICDOTTYPE2:
+ n := n.(*ir.DynamicTypeAssertExpr)
+ w.op(n.Op())
+ w.pos(n.Pos())
+ w.expr(n.X)
+ w.expr(n.T)
+ w.typ(n.Type())
+
+ case ir.OINDEX, ir.OINDEXMAP:
+ n := n.(*ir.IndexExpr)
+ if go117ExportTypes {
+ w.op(n.Op())
+ } else {
+ w.op(ir.OINDEX)
+ }
+ w.pos(n.Pos())
+ w.expr(n.X)
+ w.expr(n.Index)
+ if go117ExportTypes {
+ w.exoticType(n.Type())
+ if n.Op() == ir.OINDEXMAP {
+ w.bool(n.Assigned)
+ }
+ }
+
+ case ir.OSLICE, ir.OSLICESTR, ir.OSLICEARR:
+ n := n.(*ir.SliceExpr)
+ if go117ExportTypes {
+ w.op(n.Op())
+ } else {
+ w.op(ir.OSLICE)
+ }
+ w.pos(n.Pos())
+ w.expr(n.X)
+ w.exprsOrNil(n.Low, n.High)
+ if go117ExportTypes {
+ w.typ(n.Type())
+ }
+
+ case ir.OSLICE3, ir.OSLICE3ARR:
+ n := n.(*ir.SliceExpr)
+ if go117ExportTypes {
+ w.op(n.Op())
+ } else {
+ w.op(ir.OSLICE3)
+ }
+ w.pos(n.Pos())
+ w.expr(n.X)
+ w.exprsOrNil(n.Low, n.High)
+ w.expr(n.Max)
+ if go117ExportTypes {
+ w.typ(n.Type())
+ }
+
+ case ir.OCOPY, ir.OCOMPLEX, ir.OUNSAFEADD, ir.OUNSAFESLICE:
+ // treated like other builtin calls (see e.g., OREAL)
+ n := n.(*ir.BinaryExpr)
+ w.op(n.Op())
+ w.pos(n.Pos())
+ w.expr(n.X)
+ w.expr(n.Y)
+ if go117ExportTypes {
+ w.typ(n.Type())
+ } else {
+ w.op(ir.OEND)
+ }
+
+ case ir.OCONV, ir.OCONVIFACE, ir.OCONVIDATA, ir.OCONVNOP, ir.OBYTES2STR, ir.ORUNES2STR, ir.OSTR2BYTES, ir.OSTR2RUNES, ir.ORUNESTR, ir.OSLICE2ARRPTR:
+ n := n.(*ir.ConvExpr)
+ if go117ExportTypes {
+ w.op(n.Op())
+ } else {
+ w.op(ir.OCONV)
+ }
+ w.pos(n.Pos())
+ w.typ(n.Type())
+ w.expr(n.X)
+
+ case ir.OREAL, ir.OIMAG, ir.OCAP, ir.OCLOSE, ir.OLEN, ir.ONEW, ir.OPANIC:
+ n := n.(*ir.UnaryExpr)
+ w.op(n.Op())
+ w.pos(n.Pos())
+ w.expr(n.X)
+ if go117ExportTypes {
+ if n.Op() != ir.OPANIC {
+ w.typ(n.Type())
+ }
+ } else {
+ w.op(ir.OEND)
+ }
+
+ case ir.OAPPEND, ir.ODELETE, ir.ORECOVER, ir.OPRINT, ir.OPRINTN:
+ n := n.(*ir.CallExpr)
+ w.op(n.Op())
+ w.pos(n.Pos())
+ w.exprList(n.Args) // emits terminating OEND
+ // only append() calls may contain '...' arguments
+ if n.Op() == ir.OAPPEND {
+ w.bool(n.IsDDD)
+ } else if n.IsDDD {
+ base.Fatalf("exporter: unexpected '...' with %v call", n.Op())
+ }
+ if go117ExportTypes {
+ if n.Op() != ir.ODELETE && n.Op() != ir.OPRINT && n.Op() != ir.OPRINTN {
+ w.typ(n.Type())
+ }
+ }
+
+ case ir.OCALL, ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER, ir.OGETG:
+ n := n.(*ir.CallExpr)
+ if go117ExportTypes {
+ w.op(n.Op())
+ } else {
+ w.op(ir.OCALL)
+ }
+ w.pos(n.Pos())
+ w.stmtList(n.Init())
+ w.expr(n.X)
+ w.exprList(n.Args)
+ w.bool(n.IsDDD)
+ if go117ExportTypes {
+ w.exoticType(n.Type())
+ }
+
+ case ir.OMAKEMAP, ir.OMAKECHAN, ir.OMAKESLICE:
+ n := n.(*ir.MakeExpr)
+ w.op(n.Op()) // must keep separate from OMAKE for importer
+ w.pos(n.Pos())
+ w.typ(n.Type())
+ switch {
+ default:
+ // empty list
+ w.op(ir.OEND)
+ case n.Cap != nil:
+ w.expr(n.Len)
+ w.expr(n.Cap)
+ w.op(ir.OEND)
+ case n.Len != nil && (n.Op() == ir.OMAKESLICE || !n.Len.Type().IsUntyped()):
+ // Note: the extra conditional exists because make(T) for
+ // T a map or chan type, gets an untyped zero added as
+ // an argument. Don't serialize that argument here.
+ w.expr(n.Len)
+ w.op(ir.OEND)
+ case n.Len != nil && go117ExportTypes:
+ w.expr(n.Len)
+ w.op(ir.OEND)
+ }
+
+ case ir.OLINKSYMOFFSET:
+ n := n.(*ir.LinksymOffsetExpr)
+ w.op(ir.OLINKSYMOFFSET)
+ w.pos(n.Pos())
+ w.string(n.Linksym.Name)
+ w.uint64(uint64(n.Offset_))
+ w.typ(n.Type())
+
+ // unary expressions
+ case ir.OPLUS, ir.ONEG, ir.OBITNOT, ir.ONOT, ir.ORECV, ir.OIDATA:
+ n := n.(*ir.UnaryExpr)
+ w.op(n.Op())
+ w.pos(n.Pos())
+ w.expr(n.X)
+ if go117ExportTypes {
+ w.typ(n.Type())
+ }
+
+ case ir.OADDR:
+ n := n.(*ir.AddrExpr)
+ w.op(n.Op())
+ w.pos(n.Pos())
+ w.expr(n.X)
+ if go117ExportTypes {
+ w.typ(n.Type())
+ }
+
+ case ir.ODEREF:
+ n := n.(*ir.StarExpr)
+ w.op(n.Op())
+ w.pos(n.Pos())
+ w.expr(n.X)
+ if go117ExportTypes {
+ w.typ(n.Type())
+ }
+
+ case ir.OSEND:
+ n := n.(*ir.SendStmt)
+ w.op(n.Op())
+ w.pos(n.Pos())
+ w.expr(n.Chan)
+ w.expr(n.Value)
+
+ // binary expressions
+ case ir.OADD, ir.OAND, ir.OANDNOT, ir.ODIV, ir.OEQ, ir.OGE, ir.OGT, ir.OLE, ir.OLT,
+ ir.OLSH, ir.OMOD, ir.OMUL, ir.ONE, ir.OOR, ir.ORSH, ir.OSUB, ir.OXOR, ir.OEFACE:
+ n := n.(*ir.BinaryExpr)
+ w.op(n.Op())
+ w.pos(n.Pos())
+ w.expr(n.X)
+ w.expr(n.Y)
+ if go117ExportTypes {
+ w.typ(n.Type())
+ }
+
+ case ir.OANDAND, ir.OOROR:
+ n := n.(*ir.LogicalExpr)
+ w.op(n.Op())
+ w.pos(n.Pos())
+ w.expr(n.X)
+ w.expr(n.Y)
+ if go117ExportTypes {
+ w.typ(n.Type())
+ }
+
+ case ir.OADDSTR:
+ n := n.(*ir.AddStringExpr)
+ w.op(ir.OADDSTR)
+ w.pos(n.Pos())
+ w.exprList(n.List)
+ if go117ExportTypes {
+ w.typ(n.Type())
+ }
+
+ case ir.ODCLCONST:
+ // if exporting, DCLCONST should just be removed as its usage
+ // has already been replaced with literals
+
+ case ir.OFUNCINST:
+ n := n.(*ir.InstExpr)
+ w.op(ir.OFUNCINST)
+ w.pos(n.Pos())
+ w.expr(n.X)
+ w.uint64(uint64(len(n.Targs)))
+ for _, targ := range n.Targs {
+ w.typ(targ.Type())
+ }
+ if go117ExportTypes {
+ w.typ(n.Type())
+ }
+
+ case ir.OSELRECV2:
+ n := n.(*ir.AssignListStmt)
+ w.op(ir.OSELRECV2)
+ w.pos(n.Pos())
+ w.stmtList(n.Init())
+ w.exprList(n.Lhs)
+ w.exprList(n.Rhs)
+ w.bool(n.Def)
+
+ default:
+ base.Fatalf("cannot export %v (%d) node\n"+
+ "\t==> please file an issue and assign to gri@", n.Op(), int(n.Op()))
+ }
+}
+
+func (w *exportWriter) op(op ir.Op) {
+ if debug {
+ w.uint64(magic)
+ }
+ w.uint64(uint64(op))
+}
+
+func (w *exportWriter) exprsOrNil(a, b ir.Node) {
+ ab := 0
+ if a != nil {
+ ab |= 1
+ }
+ if b != nil {
+ ab |= 2
+ }
+ w.uint64(uint64(ab))
+ if ab&1 != 0 {
+ w.expr(a)
+ }
+ if ab&2 != 0 {
+ w.node(b)
+ }
+}
+
+func (w *exportWriter) fieldList(list ir.Nodes) {
+ w.uint64(uint64(len(list)))
+ for _, n := range list {
+ n := n.(*ir.StructKeyExpr)
+ w.pos(n.Pos())
+ w.exoticField(n.Field)
+ w.expr(n.Value)
+ }
+}
+
+func (w *exportWriter) localName(n *ir.Name) {
+ if ir.IsBlank(n) {
+ w.int64(-1)
+ return
+ }
+
+ i, ok := w.dclIndex[n]
+ if !ok {
+ base.FatalfAt(n.Pos(), "missing from dclIndex: %+v", n)
+ }
+ w.int64(int64(i))
+}
+
+func (w *exportWriter) localIdent(s *types.Sym) {
+ if w.currPkg == nil {
+ base.Fatalf("missing currPkg")
+ }
+
+ // Anonymous parameters.
+ if s == nil {
+ w.string("")
+ return
+ }
+
+ name := s.Name
+ if name == "_" {
+ w.string("_")
+ return
+ }
+
+ // The name of autotmp variables isn't important; they just need to
+ // be unique. To stabilize the export data, simply write out "$" as
+ // a marker and let the importer generate its own unique name.
+ if strings.HasPrefix(name, ".autotmp_") {
+ w.string("$autotmp")
+ return
+ }
+
+ if i := strings.LastIndex(name, "."); i >= 0 && !strings.HasPrefix(name, LocalDictName) && !strings.HasPrefix(name, ".rcvr") {
+ base.Fatalf("unexpected dot in identifier: %v", name)
+ }
+
+ if s.Pkg != w.currPkg {
+ base.Fatalf("weird package in name: %v => %v from %q, not %q", s, name, s.Pkg.Path, w.currPkg.Path)
+ }
+
+ w.string(name)
+}
+
+type intWriter struct {
+ bytes.Buffer
+}
+
+func (w *intWriter) int64(x int64) {
+ var buf [binary.MaxVarintLen64]byte
+ n := binary.PutVarint(buf[:], x)
+ w.Write(buf[:n])
+}
+
+func (w *intWriter) uint64(x uint64) {
+ var buf [binary.MaxVarintLen64]byte
+ n := binary.PutUvarint(buf[:], x)
+ w.Write(buf[:n])
+}
+
+// If go117ExportTypes is true, then we write type information when
+// exporting function bodies, so those function bodies don't need to
+// be re-typechecked on import.
+// This flag adds some other info to the serialized stream as well
+// which was previously recomputed during typechecking, like
+// specializing opcodes (e.g. OXDOT to ODOTPTR) and ancillary
+// information (e.g. length field for OSLICELIT).
+const go117ExportTypes = true
+const Go117ExportTypes = go117ExportTypes
+
+// The name used for dictionary parameters or local variables.
+const LocalDictName = ".dict"
diff --git a/src/cmd/compile/internal/typecheck/iimport.go b/src/cmd/compile/internal/typecheck/iimport.go
new file mode 100644
index 0000000..654aff8
--- /dev/null
+++ b/src/cmd/compile/internal/typecheck/iimport.go
@@ -0,0 +1,2011 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Indexed package import.
+// See iexport.go for the export data format.
+
+package typecheck
+
+import (
+ "encoding/binary"
+ "fmt"
+ "go/constant"
+ "math/big"
+ "os"
+ "strings"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/src"
+)
+
+// An iimporterAndOffset identifies an importer and an offset within
+// its data section.
+type iimporterAndOffset struct {
+ p *iimporter
+ off uint64
+}
+
+var (
+ // DeclImporter maps from imported identifiers to an importer
+ // and offset where that identifier's declaration can be read.
+ DeclImporter = map[*types.Sym]iimporterAndOffset{}
+
+ // inlineImporter is like DeclImporter, but for inline bodies
+ // for function and method symbols.
+ inlineImporter = map[*types.Sym]iimporterAndOffset{}
+)
+
+// expandDecl returns immediately if n is already a Name node. Otherwise, n should
+// be an Ident node, and expandDecl reads in the definition of the specified
+// identifier from the appropriate package.
+func expandDecl(n ir.Node) ir.Node {
+ if n, ok := n.(*ir.Name); ok {
+ return n
+ }
+
+ id := n.(*ir.Ident)
+ if n := id.Sym().PkgDef(); n != nil {
+ return n.(*ir.Name)
+ }
+
+ r := importReaderFor(id.Sym(), DeclImporter)
+ if r == nil {
+ // Can happen if user tries to reference an undeclared name.
+ return n
+ }
+
+ return r.doDecl(n.Sym())
+}
+
+// ImportBody reads in the dcls and body of an imported function (which should not
+// yet have been read in).
+func ImportBody(fn *ir.Func) {
+ if fn.Inl.Body != nil {
+ base.Fatalf("%v already has inline body", fn)
+ }
+
+ r := importReaderFor(fn.Nname.Sym(), inlineImporter)
+ if r == nil {
+ base.Fatalf("missing import reader for %v", fn)
+ }
+
+ if inimport {
+ base.Fatalf("recursive inimport")
+ }
+ inimport = true
+ r.doInline(fn)
+ inimport = false
+}
+
+// HaveInlineBody reports whether we have fn's inline body available
+// for inlining.
+func HaveInlineBody(fn *ir.Func) bool {
+ if fn.Inl == nil {
+ return false
+ }
+
+ // Unified IR is much more conservative about pruning unreachable
+ // methods (at the cost of increased build artifact size).
+ if base.Debug.Unified != 0 {
+ return true
+ }
+
+ if fn.Inl.Body != nil {
+ return true
+ }
+
+ _, ok := inlineImporter[fn.Nname.Sym()]
+ return ok
+}
+
+func importReaderFor(sym *types.Sym, importers map[*types.Sym]iimporterAndOffset) *importReader {
+ x, ok := importers[sym]
+ if !ok {
+ return nil
+ }
+
+ return x.p.newReader(x.off, sym.Pkg)
+}
+
+type intReader struct {
+ *strings.Reader
+ pkg *types.Pkg
+}
+
+func (r *intReader) int64() int64 {
+ i, err := binary.ReadVarint(r.Reader)
+ if err != nil {
+ base.Errorf("import %q: read error: %v", r.pkg.Path, err)
+ base.ErrorExit()
+ }
+ return i
+}
+
+func (r *intReader) uint64() uint64 {
+ i, err := binary.ReadUvarint(r.Reader)
+ if err != nil {
+ base.Errorf("import %q: read error: %v", r.pkg.Path, err)
+ base.ErrorExit()
+ }
+ return i
+}
+
+func ReadImports(pkg *types.Pkg, data string) {
+ ird := &intReader{strings.NewReader(data), pkg}
+
+ version := ird.uint64()
+ switch version {
+ case iexportVersionGo1_18, iexportVersionPosCol, iexportVersionGo1_11:
+ default:
+ base.Errorf("import %q: unknown export format version %d", pkg.Path, version)
+ base.ErrorExit()
+ }
+
+ sLen := int64(ird.uint64())
+ dLen := int64(ird.uint64())
+
+ // TODO(mdempsky): Replace os.SEEK_CUR with io.SeekCurrent after
+ // #44505 is fixed.
+ whence, _ := ird.Seek(0, os.SEEK_CUR)
+ stringData := data[whence : whence+sLen]
+ declData := data[whence+sLen : whence+sLen+dLen]
+ ird.Seek(sLen+dLen, os.SEEK_CUR)
+
+ p := &iimporter{
+ exportVersion: version,
+ ipkg: pkg,
+
+ pkgCache: map[uint64]*types.Pkg{},
+ posBaseCache: map[uint64]*src.PosBase{},
+ typCache: map[uint64]*types.Type{},
+
+ stringData: stringData,
+ declData: declData,
+ }
+
+ for i, pt := range predeclared() {
+ p.typCache[uint64(i)] = pt
+ }
+
+ // Declaration index.
+ for nPkgs := ird.uint64(); nPkgs > 0; nPkgs-- {
+ pkg := p.pkgAt(ird.uint64())
+ pkgName := p.stringAt(ird.uint64())
+ pkgHeight := int(ird.uint64())
+ if pkg.Name == "" {
+ pkg.Name = pkgName
+ pkg.Height = pkgHeight
+ types.NumImport[pkgName]++
+
+ // TODO(mdempsky): This belongs somewhere else.
+ pkg.Lookup("_").Def = ir.BlankNode
+ } else {
+ if pkg.Name != pkgName {
+ base.Fatalf("conflicting package names %v and %v for path %q", pkg.Name, pkgName, pkg.Path)
+ }
+ if pkg.Height != pkgHeight {
+ base.Fatalf("conflicting package heights %v and %v for path %q", pkg.Height, pkgHeight, pkg.Path)
+ }
+ }
+
+ for nSyms := ird.uint64(); nSyms > 0; nSyms-- {
+ s := pkg.Lookup(p.nameAt(ird.uint64()))
+ off := ird.uint64()
+
+ if _, ok := DeclImporter[s]; !ok {
+ DeclImporter[s] = iimporterAndOffset{p, off}
+ }
+ }
+ }
+
+ // Inline body index.
+ for nPkgs := ird.uint64(); nPkgs > 0; nPkgs-- {
+ pkg := p.pkgAt(ird.uint64())
+
+ for nSyms := ird.uint64(); nSyms > 0; nSyms-- {
+ s := pkg.Lookup(p.nameAt(ird.uint64()))
+ off := ird.uint64()
+
+ if _, ok := inlineImporter[s]; !ok {
+ inlineImporter[s] = iimporterAndOffset{p, off}
+ }
+ }
+ }
+}
+
+type iimporter struct {
+ exportVersion uint64
+ ipkg *types.Pkg
+
+ pkgCache map[uint64]*types.Pkg
+ posBaseCache map[uint64]*src.PosBase
+ typCache map[uint64]*types.Type
+
+ stringData string
+ declData string
+}
+
+func (p *iimporter) stringAt(off uint64) string {
+ var x [binary.MaxVarintLen64]byte
+ n := copy(x[:], p.stringData[off:])
+
+ slen, n := binary.Uvarint(x[:n])
+ if n <= 0 {
+ base.Fatalf("varint failed")
+ }
+ spos := off + uint64(n)
+ return p.stringData[spos : spos+slen]
+}
+
+// nameAt is the same as stringAt, except it replaces instances
+// of "" with the path of the package being imported.
+func (p *iimporter) nameAt(off uint64) string {
+ s := p.stringAt(off)
+ // Names of objects (functions, methods, globals) may include ""
+ // to represent the path name of the imported package.
+ // Replace "" with the imported package prefix. This occurs
+ // specifically for generics where the names of instantiations
+ // and dictionaries contain package-qualified type names.
+ // Include the dot to avoid matching with struct tags ending in '"'.
+ if strings.Contains(s, "\"\".") {
+ s = strings.Replace(s, "\"\".", p.ipkg.Prefix+".", -1)
+ }
+ return s
+}
+
+func (p *iimporter) posBaseAt(off uint64) *src.PosBase {
+ if posBase, ok := p.posBaseCache[off]; ok {
+ return posBase
+ }
+
+ file := p.stringAt(off)
+ posBase := src.NewFileBase(file, file)
+ p.posBaseCache[off] = posBase
+ return posBase
+}
+
+func (p *iimporter) pkgAt(off uint64) *types.Pkg {
+ if pkg, ok := p.pkgCache[off]; ok {
+ return pkg
+ }
+
+ pkg := p.ipkg
+ if pkgPath := p.stringAt(off); pkgPath != "" {
+ pkg = types.NewPkg(pkgPath, "")
+ }
+ p.pkgCache[off] = pkg
+ return pkg
+}
+
+// An importReader keeps state for reading an individual imported
+// object (declaration or inline body).
+type importReader struct {
+ strings.Reader
+ p *iimporter
+
+ currPkg *types.Pkg
+ prevBase *src.PosBase
+ prevLine int64
+ prevColumn int64
+
+ // curfn is the current function we're importing into.
+ curfn *ir.Func
+ // Slice of all dcls for function, including any interior closures
+ allDcls []*ir.Name
+ allClosureVars []*ir.Name
+ autotmpgen int
+}
+
+func (p *iimporter) newReader(off uint64, pkg *types.Pkg) *importReader {
+ r := &importReader{
+ p: p,
+ currPkg: pkg,
+ }
+ // (*strings.Reader).Reset wasn't added until Go 1.7, and we
+ // need to build with Go 1.4.
+ r.Reader = *strings.NewReader(p.declData[off:])
+ return r
+}
+
+func (r *importReader) string() string { return r.p.stringAt(r.uint64()) }
+func (r *importReader) name() string { return r.p.nameAt(r.uint64()) }
+func (r *importReader) posBase() *src.PosBase { return r.p.posBaseAt(r.uint64()) }
+func (r *importReader) pkg() *types.Pkg { return r.p.pkgAt(r.uint64()) }
+
+func (r *importReader) setPkg() {
+ r.currPkg = r.pkg()
+}
+
+func (r *importReader) doDecl(sym *types.Sym) *ir.Name {
+ tag := r.byte()
+ pos := r.pos()
+
+ switch tag {
+ case 'A':
+ typ := r.typ()
+
+ return importalias(pos, sym, typ)
+
+ case 'C':
+ typ := r.typ()
+ val := r.value(typ)
+
+ n := importconst(pos, sym, typ, val)
+ r.constExt(n)
+ return n
+
+ case 'F', 'G':
+ var tparams []*types.Field
+ if tag == 'G' {
+ tparams = r.tparamList()
+ }
+ typ := r.signature(nil, tparams)
+
+ n := importfunc(pos, sym, typ)
+ r.funcExt(n)
+ return n
+
+ case 'T', 'U':
+ // Types can be recursive. We need to setup a stub
+ // declaration before recursing.
+ n := importtype(pos, sym)
+ t := n.Type()
+
+ // Because of recursion, we need to defer width calculations and
+ // instantiations on intermediate types until the top-level type is
+ // fully constructed. Note that we can have recursion via type
+ // constraints.
+ types.DeferCheckSize()
+ deferDoInst()
+ if tag == 'U' {
+ rparams := r.typeList()
+ t.SetRParams(rparams)
+ }
+
+ underlying := r.typ()
+ t.SetUnderlying(underlying)
+
+ if underlying.IsInterface() {
+ // Finish up all type instantiations and CheckSize calls
+ // now that a top-level type is fully constructed.
+ resumeDoInst()
+ types.ResumeCheckSize()
+ r.typeExt(t)
+ return n
+ }
+
+ ms := make([]*types.Field, r.uint64())
+ for i := range ms {
+ mpos := r.pos()
+ msym := r.selector()
+ recv := r.param()
+ mtyp := r.signature(recv, nil)
+
+ // MethodSym already marked m.Sym as a function.
+ m := ir.NewNameAt(mpos, ir.MethodSym(recv.Type, msym))
+ m.Class = ir.PFUNC
+ m.SetType(mtyp)
+
+ m.Func = ir.NewFunc(mpos)
+ m.Func.Nname = m
+
+ f := types.NewField(mpos, msym, mtyp)
+ f.Nname = m
+ ms[i] = f
+ }
+ t.Methods().Set(ms)
+
+ // Finish up all instantiations and CheckSize calls now
+ // that a top-level type is fully constructed.
+ resumeDoInst()
+ types.ResumeCheckSize()
+
+ r.typeExt(t)
+ for _, m := range ms {
+ r.methExt(m)
+ }
+ return n
+
+ case 'P':
+ if r.p.exportVersion < iexportVersionGenerics {
+ base.Fatalf("unexpected type param type")
+ }
+ if sym.Def != nil {
+ // Make sure we use the same type param type for the same
+ // name, whether it is created during types1-import or
+ // this types2-to-types1 translation.
+ return sym.Def.(*ir.Name)
+ }
+ // The typeparam index is set at the point where the containing type
+ // param list is imported.
+ t := types.NewTypeParam(sym, 0)
+ // Nname needed to save the pos.
+ nname := ir.NewDeclNameAt(pos, ir.OTYPE, sym)
+ sym.Def = nname
+ nname.SetType(t)
+ t.SetNod(nname)
+ implicit := false
+ if r.p.exportVersion >= iexportVersionGo1_18 {
+ implicit = r.bool()
+ }
+ bound := r.typ()
+ if implicit {
+ bound.MarkImplicit()
+ }
+ t.SetBound(bound)
+ return nname
+
+ case 'V':
+ typ := r.typ()
+
+ n := importvar(pos, sym, typ)
+ r.varExt(n)
+ return n
+
+ default:
+ base.Fatalf("unexpected tag: %v", tag)
+ panic("unreachable")
+ }
+}
+
+func (r *importReader) value(typ *types.Type) constant.Value {
+ var kind constant.Kind
+ var valType *types.Type
+
+ if r.p.exportVersion >= iexportVersionGo1_18 {
+ // TODO: add support for using the kind in the non-typeparam case.
+ kind = constant.Kind(r.int64())
+ }
+
+ if typ.IsTypeParam() {
+ if r.p.exportVersion < iexportVersionGo1_18 {
+ // If a constant had a typeparam type, then we wrote out its
+ // actual constant kind as well.
+ kind = constant.Kind(r.int64())
+ }
+ switch kind {
+ case constant.Int:
+ valType = types.Types[types.TINT64]
+ case constant.Float:
+ valType = types.Types[types.TFLOAT64]
+ case constant.Complex:
+ valType = types.Types[types.TCOMPLEX128]
+ }
+ } else {
+ kind = constTypeOf(typ)
+ valType = typ
+ }
+
+ switch kind {
+ case constant.Bool:
+ return constant.MakeBool(r.bool())
+ case constant.String:
+ return constant.MakeString(r.string())
+ case constant.Int:
+ var i big.Int
+ r.mpint(&i, valType)
+ return constant.Make(&i)
+ case constant.Float:
+ return r.float(valType)
+ case constant.Complex:
+ return makeComplex(r.float(valType), r.float(valType))
+ }
+
+ base.Fatalf("unexpected value type: %v", typ)
+ panic("unreachable")
+}
+
+func (r *importReader) mpint(x *big.Int, typ *types.Type) {
+ signed, maxBytes := intSize(typ)
+
+ maxSmall := 256 - maxBytes
+ if signed {
+ maxSmall = 256 - 2*maxBytes
+ }
+ if maxBytes == 1 {
+ maxSmall = 256
+ }
+
+ n, _ := r.ReadByte()
+ if uint(n) < maxSmall {
+ v := int64(n)
+ if signed {
+ v >>= 1
+ if n&1 != 0 {
+ v = ^v
+ }
+ }
+ x.SetInt64(v)
+ return
+ }
+
+ v := -n
+ if signed {
+ v = -(n &^ 1) >> 1
+ }
+ if v < 1 || uint(v) > maxBytes {
+ base.Fatalf("weird decoding: %v, %v => %v", n, signed, v)
+ }
+ b := make([]byte, v)
+ r.Read(b)
+ x.SetBytes(b)
+ if signed && n&1 != 0 {
+ x.Neg(x)
+ }
+}
+
+func (r *importReader) float(typ *types.Type) constant.Value {
+ var mant big.Int
+ r.mpint(&mant, typ)
+ var f big.Float
+ f.SetInt(&mant)
+ if f.Sign() != 0 {
+ f.SetMantExp(&f, int(r.int64()))
+ }
+ return constant.Make(&f)
+}
+
+func (r *importReader) mprat(orig constant.Value) constant.Value {
+ if !r.bool() {
+ return orig
+ }
+ var rat big.Rat
+ rat.SetString(r.string())
+ return constant.Make(&rat)
+}
+
+func (r *importReader) ident(selector bool) *types.Sym {
+ name := r.string()
+ if name == "" {
+ return nil
+ }
+ pkg := r.currPkg
+ if selector {
+ if types.IsExported(name) {
+ pkg = types.LocalPkg
+ }
+ } else {
+ if name == "$autotmp" {
+ name = autotmpname(r.autotmpgen)
+ r.autotmpgen++
+ }
+ }
+ return pkg.Lookup(name)
+}
+
+func (r *importReader) localIdent() *types.Sym { return r.ident(false) }
+func (r *importReader) selector() *types.Sym { return r.ident(true) }
+
+func (r *importReader) qualifiedIdent() *ir.Ident {
+ name := r.name()
+ pkg := r.pkg()
+ sym := pkg.Lookup(name)
+ return ir.NewIdent(src.NoXPos, sym)
+}
+
+func (r *importReader) pos() src.XPos {
+ delta := r.int64()
+ r.prevColumn += delta >> 1
+ if delta&1 != 0 {
+ delta = r.int64()
+ r.prevLine += delta >> 1
+ if delta&1 != 0 {
+ r.prevBase = r.posBase()
+ }
+ }
+
+ if (r.prevBase == nil || r.prevBase.AbsFilename() == "") && r.prevLine == 0 && r.prevColumn == 0 {
+ // TODO(mdempsky): Remove once we reliably write
+ // position information for all nodes.
+ return src.NoXPos
+ }
+
+ if r.prevBase == nil {
+ base.Fatalf("missing posbase")
+ }
+ pos := src.MakePos(r.prevBase, uint(r.prevLine), uint(r.prevColumn))
+ return base.Ctxt.PosTable.XPos(pos)
+}
+
+func (r *importReader) typ() *types.Type {
+ // If this is a top-level type call, defer type instantiations until the
+ // type is fully constructed.
+ types.DeferCheckSize()
+ deferDoInst()
+ t := r.p.typAt(r.uint64())
+ resumeDoInst()
+ types.ResumeCheckSize()
+ return t
+}
+
+func (r *importReader) exoticType() *types.Type {
+ switch r.uint64() {
+ case exoticTypeNil:
+ return nil
+ case exoticTypeTuple:
+ funarg := types.Funarg(r.uint64())
+ n := r.uint64()
+ fs := make([]*types.Field, n)
+ for i := range fs {
+ pos := r.pos()
+ var sym *types.Sym
+ switch r.uint64() {
+ case exoticTypeSymNil:
+ sym = nil
+ case exoticTypeSymNoPkg:
+ sym = types.NoPkg.Lookup(r.string())
+ case exoticTypeSymWithPkg:
+ pkg := r.pkg()
+ sym = pkg.Lookup(r.string())
+ default:
+ base.Fatalf("unknown symbol kind")
+ }
+ typ := r.typ()
+ f := types.NewField(pos, sym, typ)
+ fs[i] = f
+ }
+ t := types.NewStruct(types.NoPkg, fs)
+ t.StructType().Funarg = funarg
+ return t
+ case exoticTypeRecv:
+ var rcvr *types.Field
+ if r.bool() { // isFakeRecv
+ rcvr = types.FakeRecv()
+ } else {
+ rcvr = r.exoticParam()
+ }
+ return r.exoticSignature(rcvr)
+ case exoticTypeRegular:
+ return r.typ()
+ default:
+ base.Fatalf("bad kind of call type")
+ return nil
+ }
+}
+
+func (r *importReader) exoticSelector() *types.Sym {
+ name := r.string()
+ if name == "" {
+ return nil
+ }
+ pkg := r.currPkg
+ if types.IsExported(name) {
+ pkg = types.LocalPkg
+ }
+ if r.uint64() != 0 {
+ pkg = r.pkg()
+ }
+ return pkg.Lookup(name)
+}
+
+func (r *importReader) exoticSignature(recv *types.Field) *types.Type {
+ var pkg *types.Pkg
+ if r.bool() { // hasPkg
+ pkg = r.pkg()
+ }
+ params := r.exoticParamList()
+ results := r.exoticParamList()
+ return types.NewSignature(pkg, recv, nil, params, results)
+}
+
+func (r *importReader) exoticParamList() []*types.Field {
+ n := r.uint64()
+ fs := make([]*types.Field, n)
+ for i := range fs {
+ fs[i] = r.exoticParam()
+ }
+ return fs
+}
+
+func (r *importReader) exoticParam() *types.Field {
+ pos := r.pos()
+ sym := r.exoticSym()
+ off := r.uint64()
+ typ := r.exoticType()
+ ddd := r.bool()
+ f := types.NewField(pos, sym, typ)
+ f.Offset = int64(off)
+ if sym != nil {
+ f.Nname = ir.NewNameAt(pos, sym)
+ }
+ f.SetIsDDD(ddd)
+ return f
+}
+
+func (r *importReader) exoticField() *types.Field {
+ pos := r.pos()
+ sym := r.exoticSym()
+ off := r.uint64()
+ typ := r.exoticType()
+ note := r.string()
+ f := types.NewField(pos, sym, typ)
+ f.Offset = int64(off)
+ if sym != nil {
+ f.Nname = ir.NewNameAt(pos, sym)
+ }
+ f.Note = note
+ return f
+}
+
+func (r *importReader) exoticSym() *types.Sym {
+ name := r.string()
+ if name == "" {
+ return nil
+ }
+ var pkg *types.Pkg
+ if types.IsExported(name) {
+ pkg = types.LocalPkg
+ } else {
+ pkg = r.pkg()
+ }
+ return pkg.Lookup(name)
+}
+
+func (p *iimporter) typAt(off uint64) *types.Type {
+ t, ok := p.typCache[off]
+ if !ok {
+ if off < predeclReserved {
+ base.Fatalf("predeclared type missing from cache: %d", off)
+ }
+ t = p.newReader(off-predeclReserved, nil).typ1()
+ // Ensure size is calculated for imported types. Since CL 283313, the compiler
+ // does not compile the function immediately when it sees them. Instead, funtions
+ // are pushed to compile queue, then draining from the queue for compiling.
+ // During this process, the size calculation is disabled, so it is not safe for
+ // calculating size during SSA generation anymore. See issue #44732.
+ //
+ // No need to calc sizes for re-instantiated generic types, and
+ // they are not necessarily resolved until the top-level type is
+ // defined (because of recursive types).
+ if t.OrigType() == nil || !t.HasTParam() {
+ types.CheckSize(t)
+ }
+ p.typCache[off] = t
+ }
+ return t
+}
+
+func (r *importReader) typ1() *types.Type {
+ switch k := r.kind(); k {
+ default:
+ base.Fatalf("unexpected kind tag in %q: %v", r.p.ipkg.Path, k)
+ return nil
+
+ case definedType:
+ // We might be called from within doInline, in which
+ // case Sym.Def can point to declared parameters
+ // instead of the top-level types. Also, we don't
+ // support inlining functions with local defined
+ // types. Therefore, this must be a package-scope
+ // type.
+ n := expandDecl(r.qualifiedIdent())
+ if n.Op() != ir.OTYPE {
+ base.Fatalf("expected OTYPE, got %v: %v, %v", n.Op(), n.Sym(), n)
+ }
+ return n.Type()
+ case pointerType:
+ return types.NewPtr(r.typ())
+ case sliceType:
+ return types.NewSlice(r.typ())
+ case arrayType:
+ n := r.uint64()
+ return types.NewArray(r.typ(), int64(n))
+ case chanType:
+ dir := types.ChanDir(r.uint64())
+ return types.NewChan(r.typ(), dir)
+ case mapType:
+ return types.NewMap(r.typ(), r.typ())
+
+ case signatureType:
+ r.setPkg()
+ return r.signature(nil, nil)
+
+ case structType:
+ r.setPkg()
+
+ fs := make([]*types.Field, r.uint64())
+ for i := range fs {
+ pos := r.pos()
+ sym := r.selector()
+ typ := r.typ()
+ emb := r.bool()
+ note := r.string()
+
+ f := types.NewField(pos, sym, typ)
+ if emb {
+ f.Embedded = 1
+ }
+ f.Note = note
+ fs[i] = f
+ }
+
+ return types.NewStruct(r.currPkg, fs)
+
+ case interfaceType:
+ r.setPkg()
+
+ embeddeds := make([]*types.Field, r.uint64())
+ for i := range embeddeds {
+ pos := r.pos()
+ typ := r.typ()
+
+ embeddeds[i] = types.NewField(pos, nil, typ)
+ }
+
+ methods := make([]*types.Field, r.uint64())
+ for i := range methods {
+ pos := r.pos()
+ sym := r.selector()
+ typ := r.signature(types.FakeRecv(), nil)
+
+ methods[i] = types.NewField(pos, sym, typ)
+ }
+
+ if len(embeddeds)+len(methods) == 0 {
+ return types.Types[types.TINTER]
+ }
+
+ t := types.NewInterface(r.currPkg, append(embeddeds, methods...), false)
+
+ // Ensure we expand the interface in the frontend (#25055).
+ types.CheckSize(t)
+ return t
+
+ case typeParamType:
+ if r.p.exportVersion < iexportVersionGenerics {
+ base.Fatalf("unexpected type param type")
+ }
+ // Similar to code for defined types, since we "declared"
+ // typeparams to deal with recursion (typeparam is used within its
+ // own type bound).
+ ident := r.qualifiedIdent()
+ if ident.Sym().Def != nil {
+ return ident.Sym().Def.(*ir.Name).Type()
+ }
+ n := expandDecl(ident)
+ if n.Op() != ir.OTYPE {
+ base.Fatalf("expected OTYPE, got %v: %v, %v", n.Op(), n.Sym(), n)
+ }
+ return n.Type()
+
+ case instanceType:
+ if r.p.exportVersion < iexportVersionGenerics {
+ base.Fatalf("unexpected instantiation type")
+ }
+ pos := r.pos()
+ len := r.uint64()
+ targs := make([]*types.Type, len)
+ for i := range targs {
+ targs[i] = r.typ()
+ }
+ baseType := r.typ()
+ t := Instantiate(pos, baseType, targs)
+ return t
+
+ case unionType:
+ if r.p.exportVersion < iexportVersionGenerics {
+ base.Fatalf("unexpected instantiation type")
+ }
+ nt := int(r.uint64())
+ terms := make([]*types.Type, nt)
+ tildes := make([]bool, nt)
+ for i := range terms {
+ tildes[i] = r.bool()
+ terms[i] = r.typ()
+ }
+ return types.NewUnion(terms, tildes)
+ }
+}
+
+func (r *importReader) kind() itag {
+ return itag(r.uint64())
+}
+
+func (r *importReader) signature(recv *types.Field, tparams []*types.Field) *types.Type {
+ params := r.paramList()
+ results := r.paramList()
+ if n := len(params); n > 0 {
+ params[n-1].SetIsDDD(r.bool())
+ }
+ return types.NewSignature(r.currPkg, recv, tparams, params, results)
+}
+
+func (r *importReader) typeList() []*types.Type {
+ n := r.uint64()
+ if n == 0 {
+ return nil
+ }
+ ts := make([]*types.Type, n)
+ for i := range ts {
+ ts[i] = r.typ()
+ if ts[i].IsTypeParam() {
+ ts[i].SetIndex(i)
+ }
+ }
+ return ts
+}
+
+func (r *importReader) tparamList() []*types.Field {
+ n := r.uint64()
+ if n == 0 {
+ return nil
+ }
+ fs := make([]*types.Field, n)
+ for i := range fs {
+ typ := r.typ()
+ typ.SetIndex(i)
+ fs[i] = types.NewField(typ.Pos(), typ.Sym(), typ)
+ }
+ return fs
+}
+
+func (r *importReader) paramList() []*types.Field {
+ fs := make([]*types.Field, r.uint64())
+ for i := range fs {
+ fs[i] = r.param()
+ }
+ return fs
+}
+
+func (r *importReader) param() *types.Field {
+ return types.NewField(r.pos(), r.localIdent(), r.typ())
+}
+
+func (r *importReader) bool() bool {
+ return r.uint64() != 0
+}
+
+func (r *importReader) int64() int64 {
+ n, err := binary.ReadVarint(r)
+ if err != nil {
+ base.Fatalf("readVarint: %v", err)
+ }
+ return n
+}
+
+func (r *importReader) uint64() uint64 {
+ n, err := binary.ReadUvarint(r)
+ if err != nil {
+ base.Fatalf("readVarint: %v", err)
+ }
+ return n
+}
+
+func (r *importReader) byte() byte {
+ x, err := r.ReadByte()
+ if err != nil {
+ base.Fatalf("declReader.ReadByte: %v", err)
+ }
+ return x
+}
+
+// Compiler-specific extensions.
+
+func (r *importReader) constExt(n *ir.Name) {
+ switch n.Type() {
+ case types.UntypedFloat:
+ n.SetVal(r.mprat(n.Val()))
+ case types.UntypedComplex:
+ v := n.Val()
+ re := r.mprat(constant.Real(v))
+ im := r.mprat(constant.Imag(v))
+ n.SetVal(makeComplex(re, im))
+ }
+}
+
+func (r *importReader) varExt(n *ir.Name) {
+ r.linkname(n.Sym())
+ r.symIdx(n.Sym())
+}
+
+func (r *importReader) funcExt(n *ir.Name) {
+ r.linkname(n.Sym())
+ r.symIdx(n.Sym())
+
+ n.Func.ABI = obj.ABI(r.uint64())
+
+ // Make sure //go:noinline pragma is imported (so stenciled functions have
+ // same noinline status as the corresponding generic function.)
+ n.Func.Pragma = ir.PragmaFlag(r.uint64())
+
+ // Escape analysis.
+ for _, fs := range &types.RecvsParams {
+ for _, f := range fs(n.Type()).FieldSlice() {
+ f.Note = r.string()
+ }
+ }
+
+ // Inline body.
+ if u := r.uint64(); u > 0 {
+ n.Func.Inl = &ir.Inline{
+ Cost: int32(u - 1),
+ CanDelayResults: r.bool(),
+ }
+ n.Func.Endlineno = r.pos()
+ }
+}
+
+func (r *importReader) methExt(m *types.Field) {
+ if r.bool() {
+ m.SetNointerface(true)
+ }
+ r.funcExt(m.Nname.(*ir.Name))
+}
+
+func (r *importReader) linkname(s *types.Sym) {
+ s.Linkname = r.string()
+}
+
+func (r *importReader) symIdx(s *types.Sym) {
+ lsym := s.Linksym()
+ idx := int32(r.int64())
+ if idx != -1 {
+ if s.Linkname != "" {
+ base.Fatalf("bad index for linknamed symbol: %v %d\n", lsym, idx)
+ }
+ lsym.SymIdx = idx
+ lsym.Set(obj.AttrIndexed, true)
+ }
+}
+
+func (r *importReader) typeExt(t *types.Type) {
+ t.SetNotInHeap(r.bool())
+ SetBaseTypeIndex(t, r.int64(), r.int64())
+}
+
+func SetBaseTypeIndex(t *types.Type, i, pi int64) {
+ if t.Obj() == nil {
+ base.Fatalf("SetBaseTypeIndex on non-defined type %v", t)
+ }
+ if i != -1 && pi != -1 {
+ typeSymIdx[t] = [2]int64{i, pi}
+ }
+}
+
+// Map imported type T to the index of type descriptor symbols of T and *T,
+// so we can use index to reference the symbol.
+// TODO(mdempsky): Store this information directly in the Type's Name.
+var typeSymIdx = make(map[*types.Type][2]int64)
+
+func BaseTypeIndex(t *types.Type) int64 {
+ tbase := t
+ if t.IsPtr() && t.Sym() == nil && t.Elem().Sym() != nil {
+ tbase = t.Elem()
+ }
+ i, ok := typeSymIdx[tbase]
+ if !ok {
+ return -1
+ }
+ if t != tbase {
+ return i[1]
+ }
+ return i[0]
+}
+
+func (r *importReader) doInline(fn *ir.Func) {
+ if len(fn.Inl.Body) != 0 {
+ base.Fatalf("%v already has inline body", fn)
+ }
+
+ //fmt.Printf("Importing %s\n", fn.Nname.Sym().Name)
+ r.funcBody(fn)
+
+ importlist = append(importlist, fn)
+
+ if base.Flag.E > 0 && base.Flag.LowerM > 2 {
+ if base.Flag.LowerM > 3 {
+ fmt.Printf("inl body for %v %v: %+v\n", fn, fn.Type(), ir.Nodes(fn.Inl.Body))
+ } else {
+ fmt.Printf("inl body for %v %v: %v\n", fn, fn.Type(), ir.Nodes(fn.Inl.Body))
+ }
+ }
+}
+
+// ----------------------------------------------------------------------------
+// Inlined function bodies
+
+// Approach: Read nodes and use them to create/declare the same data structures
+// as done originally by the (hidden) parser by closely following the parser's
+// original code. In other words, "parsing" the import data (which happens to
+// be encoded in binary rather textual form) is the best way at the moment to
+// re-establish the syntax tree's invariants. At some future point we might be
+// able to avoid this round-about way and create the rewritten nodes directly,
+// possibly avoiding a lot of duplicate work (name resolution, type checking).
+//
+// Refined nodes (e.g., ODOTPTR as a refinement of OXDOT) are exported as their
+// unrefined nodes (since this is what the importer uses). The respective case
+// entries are unreachable in the importer.
+
+func (r *importReader) funcBody(fn *ir.Func) {
+ outerfn := r.curfn
+ r.curfn = fn
+
+ // Import local declarations.
+ fn.Inl.Dcl = r.readFuncDcls(fn)
+
+ // Import function body.
+ body := r.stmtList()
+ if body == nil {
+ // Make sure empty body is not interpreted as
+ // no inlineable body (see also parser.fnbody)
+ // (not doing so can cause significant performance
+ // degradation due to unnecessary calls to empty
+ // functions).
+ body = []ir.Node{}
+ }
+ if go117ExportTypes {
+ ir.VisitList(body, func(n ir.Node) {
+ n.SetTypecheck(1)
+ })
+ }
+ fn.Inl.Body = body
+
+ r.curfn = outerfn
+ if base.Flag.W >= 3 {
+ fmt.Printf("Imported for %v", fn)
+ ir.DumpList("", fn.Inl.Body)
+ }
+}
+
+func (r *importReader) readNames(fn *ir.Func) []*ir.Name {
+ dcls := make([]*ir.Name, r.int64())
+ for i := range dcls {
+ n := ir.NewDeclNameAt(r.pos(), ir.ONAME, r.localIdent())
+ n.Class = ir.PAUTO // overwritten below for parameters/results
+ n.Curfn = fn
+ n.SetType(r.typ())
+ dcls[i] = n
+ }
+ r.allDcls = append(r.allDcls, dcls...)
+ return dcls
+}
+
+func (r *importReader) readFuncDcls(fn *ir.Func) []*ir.Name {
+ dcls := r.readNames(fn)
+
+ // Fixup parameter classes and associate with their
+ // signature's type fields.
+ i := 0
+ fix := func(f *types.Field, class ir.Class) {
+ if class == ir.PPARAM && (f.Sym == nil || f.Sym.Name == "_") {
+ return
+ }
+ n := dcls[i]
+ n.Class = class
+ f.Nname = n
+ i++
+ }
+
+ typ := fn.Type()
+ if recv := typ.Recv(); recv != nil {
+ fix(recv, ir.PPARAM)
+ }
+ for _, f := range typ.Params().FieldSlice() {
+ fix(f, ir.PPARAM)
+ }
+ for _, f := range typ.Results().FieldSlice() {
+ fix(f, ir.PPARAMOUT)
+ }
+ return dcls
+}
+
+func (r *importReader) localName() *ir.Name {
+ i := r.int64()
+ if i == -1 {
+ return ir.BlankNode.(*ir.Name)
+ }
+ if i < 0 {
+ return r.allClosureVars[-i-2]
+ }
+ return r.allDcls[i]
+}
+
+func (r *importReader) stmtList() []ir.Node {
+ var list []ir.Node
+ for {
+ n := r.node()
+ if n == nil {
+ break
+ }
+ // OBLOCK nodes are not written to the import data directly,
+ // but the handling of ODCL calls liststmt, which creates one.
+ // Inline them into the statement list.
+ if n.Op() == ir.OBLOCK {
+ n := n.(*ir.BlockStmt)
+ list = append(list, n.List...)
+ continue
+ }
+ if len(list) > 0 {
+ // check for an optional label that can only immediately
+ // precede a for/range/select/switch statement.
+ if last := list[len(list)-1]; last.Op() == ir.OLABEL {
+ label := last.(*ir.LabelStmt).Label
+ switch n.Op() {
+ case ir.OFOR:
+ n.(*ir.ForStmt).Label = label
+ case ir.ORANGE:
+ n.(*ir.RangeStmt).Label = label
+ case ir.OSELECT:
+ n.(*ir.SelectStmt).Label = label
+ case ir.OSWITCH:
+ n.(*ir.SwitchStmt).Label = label
+ }
+ }
+ }
+ list = append(list, n)
+ }
+ return list
+}
+
+func (r *importReader) caseList(switchExpr ir.Node) []*ir.CaseClause {
+ namedTypeSwitch := isNamedTypeSwitch(switchExpr)
+
+ cases := make([]*ir.CaseClause, r.uint64())
+ for i := range cases {
+ cas := ir.NewCaseStmt(r.pos(), nil, nil)
+ cas.List = r.stmtList()
+ if namedTypeSwitch {
+ cas.Var = r.localName()
+ cas.Var.Defn = switchExpr
+ }
+ cas.Body = r.stmtList()
+ cases[i] = cas
+ }
+ return cases
+}
+
+func (r *importReader) commList() []*ir.CommClause {
+ cases := make([]*ir.CommClause, r.uint64())
+ for i := range cases {
+ pos := r.pos()
+ defaultCase := r.bool()
+ var comm ir.Node
+ if !defaultCase {
+ comm = r.node()
+ }
+ cases[i] = ir.NewCommStmt(pos, comm, r.stmtList())
+ }
+ return cases
+}
+
+func (r *importReader) exprList() []ir.Node {
+ var list []ir.Node
+ for {
+ n := r.expr()
+ if n == nil {
+ break
+ }
+ list = append(list, n)
+ }
+ return list
+}
+
+func (r *importReader) expr() ir.Node {
+ n := r.node()
+ if n != nil && n.Op() == ir.OBLOCK {
+ n := n.(*ir.BlockStmt)
+ base.Fatalf("unexpected block node: %v", n)
+ }
+ return n
+}
+
+// TODO(gri) split into expr and stmt
+func (r *importReader) node() ir.Node {
+ op := r.op()
+ switch op {
+ // expressions
+ // case OPAREN:
+ // unreachable - unpacked by exporter
+
+ case ir.ONIL:
+ pos := r.pos()
+ typ := r.typ()
+
+ n := ir.NewNilExpr(pos)
+ n.SetType(typ)
+ return n
+
+ case ir.OLITERAL:
+ pos := r.pos()
+ typ := r.typ()
+
+ n := ir.NewBasicLit(pos, r.value(typ))
+ n.SetType(typ)
+ return n
+
+ case ir.ONONAME:
+ isKey := r.bool()
+ n := r.qualifiedIdent()
+ if go117ExportTypes {
+ var n2 ir.Node = n
+ // Key ONONAME entries should not be resolved - they should
+ // stay as identifiers.
+ if !isKey {
+ n2 = Resolve(n)
+ }
+ typ := r.typ()
+ if n2.Type() == nil {
+ n2.SetType(typ)
+ }
+ return n2
+ }
+ return n
+
+ case ir.ONAME:
+ isBuiltin := r.bool()
+ if isBuiltin {
+ pkg := types.BuiltinPkg
+ if r.bool() {
+ pkg = types.UnsafePkg
+ }
+ return pkg.Lookup(r.string()).Def.(*ir.Name)
+ }
+ return r.localName()
+
+ // case OPACK, ONONAME:
+ // unreachable - should have been resolved by typechecking
+
+ case ir.OTYPE:
+ return ir.TypeNode(r.typ())
+
+ case ir.ODYNAMICTYPE:
+ n := ir.NewDynamicType(r.pos(), r.expr())
+ if r.bool() {
+ n.ITab = r.expr()
+ }
+ n.SetType(r.typ())
+ return n
+
+ case ir.OTYPESW:
+ pos := r.pos()
+ var tag *ir.Ident
+ if s := r.localIdent(); s != nil {
+ tag = ir.NewIdent(pos, s)
+ }
+ return ir.NewTypeSwitchGuard(pos, tag, r.expr())
+
+ // case OTARRAY, OTMAP, OTCHAN, OTSTRUCT, OTINTER, OTFUNC:
+ // unreachable - should have been resolved by typechecking
+
+ case ir.OCLOSURE:
+ //println("Importing CLOSURE")
+ pos := r.pos()
+ r.setPkg()
+ typ := r.signature(nil, nil)
+ r.setPkg()
+
+ // All the remaining code below is similar to (*noder).funcLit(), but
+ // with Dcls and ClosureVars lists already set up
+ fn := ir.NewClosureFunc(pos, true)
+ fn.Nname.SetType(typ)
+
+ cvars := make([]*ir.Name, r.int64())
+ for i := range cvars {
+ cvars[i] = ir.CaptureName(r.pos(), fn, r.localName().Canonical())
+ if go117ExportTypes && cvars[i].Defn == nil {
+ base.Fatalf("bad import of closure variable")
+ }
+ }
+ fn.ClosureVars = cvars
+ r.allClosureVars = append(r.allClosureVars, cvars...)
+
+ fn.Inl = &ir.Inline{}
+ // Read in the Dcls and Body of the closure after temporarily
+ // setting r.curfn to fn.
+ r.funcBody(fn)
+ fn.Dcl = fn.Inl.Dcl
+ fn.Body = fn.Inl.Body
+ if len(fn.Body) == 0 {
+ // An empty closure must be represented as a single empty
+ // block statement, else it will be dropped.
+ fn.Body = []ir.Node{ir.NewBlockStmt(src.NoXPos, nil)}
+ }
+ fn.Inl = nil
+
+ ir.FinishCaptureNames(pos, r.curfn, fn)
+
+ clo := fn.OClosure
+ if go117ExportTypes {
+ clo.SetType(typ)
+ }
+ return clo
+
+ case ir.OSTRUCTLIT:
+ if go117ExportTypes {
+ pos := r.pos()
+ typ := r.typ()
+ list := r.fieldList()
+ n := ir.NewCompLitExpr(pos, ir.OSTRUCTLIT, nil, list)
+ n.SetType(typ)
+ return n
+ }
+ return ir.NewCompLitExpr(r.pos(), ir.OCOMPLIT, ir.TypeNode(r.typ()), r.fieldList())
+
+ case ir.OCOMPLIT:
+ pos := r.pos()
+ t := r.typ()
+ n := ir.NewCompLitExpr(pos, ir.OCOMPLIT, ir.TypeNode(t), r.exprList())
+ n.SetType(t)
+ return n
+
+ case ir.OARRAYLIT, ir.OSLICELIT, ir.OMAPLIT:
+ if !go117ExportTypes {
+ // unreachable - mapped to OCOMPLIT by exporter
+ goto error
+ }
+ pos := r.pos()
+ typ := r.typ()
+ list := r.exprList()
+ n := ir.NewCompLitExpr(pos, op, ir.TypeNode(typ), list)
+ n.SetType(typ)
+ if op == ir.OSLICELIT {
+ n.Len = int64(r.uint64())
+ }
+ return n
+
+ case ir.OKEY:
+ return ir.NewKeyExpr(r.pos(), r.expr(), r.expr())
+
+ // case OSTRUCTKEY:
+ // unreachable - handled in case OSTRUCTLIT by elemList
+
+ case ir.OXDOT, ir.ODOT, ir.ODOTPTR, ir.ODOTINTER, ir.ODOTMETH, ir.OMETHVALUE, ir.OMETHEXPR:
+ // For !go117ExportTypes, we should only see OXDOT.
+ // For go117ExportTypes, we usually see all the other ops, but can see
+ // OXDOT for generic functions.
+ if op != ir.OXDOT && !go117ExportTypes {
+ goto error
+ }
+ pos := r.pos()
+ expr := r.expr()
+ sel := r.exoticSelector()
+ n := ir.NewSelectorExpr(pos, op, expr, sel)
+ if go117ExportTypes {
+ n.SetType(r.exoticType())
+ switch op {
+ case ir.OXDOT:
+ hasSelection := r.bool()
+ // We reconstruct n.Selection for method calls on
+ // generic types and method calls due to type param
+ // bounds. Otherwise, n.Selection is nil.
+ if hasSelection {
+ n1 := ir.NewSelectorExpr(pos, op, expr, sel)
+ AddImplicitDots(n1)
+ var m *types.Field
+ if n1.X.Type().IsTypeParam() {
+ genType := n1.X.Type().Bound()
+ m = Lookdot1(n1, sel, genType, genType.AllMethods(), 1)
+ } else {
+ genType := types.ReceiverBaseType(n1.X.Type())
+ if genType.IsInstantiatedGeneric() {
+ genType = genType.OrigType()
+ }
+ m = Lookdot1(n1, sel, genType, genType.Methods(), 1)
+ }
+ assert(m != nil)
+ n.Selection = m
+ }
+ case ir.ODOT, ir.ODOTPTR, ir.ODOTINTER:
+ n.Selection = r.exoticField()
+ case ir.OMETHEXPR:
+ n = typecheckMethodExpr(n).(*ir.SelectorExpr)
+ case ir.ODOTMETH, ir.OMETHVALUE:
+ // These require a Lookup to link to the correct declaration.
+ rcvrType := expr.Type()
+ typ := n.Type()
+ n.Selection = Lookdot(n, rcvrType, 1)
+ if op == ir.OMETHVALUE {
+ // Lookdot clobbers the opcode and type, undo that.
+ n.SetOp(op)
+ n.SetType(typ)
+ }
+ }
+ }
+ return n
+
+ case ir.ODOTTYPE, ir.ODOTTYPE2:
+ n := ir.NewTypeAssertExpr(r.pos(), r.expr(), nil)
+ n.SetType(r.typ())
+ if go117ExportTypes {
+ n.SetOp(op)
+ }
+ return n
+
+ case ir.ODYNAMICDOTTYPE, ir.ODYNAMICDOTTYPE2:
+ n := ir.NewDynamicTypeAssertExpr(r.pos(), op, r.expr(), r.expr())
+ n.SetType(r.typ())
+ return n
+
+ case ir.OINDEX, ir.OINDEXMAP:
+ n := ir.NewIndexExpr(r.pos(), r.expr(), r.expr())
+ if go117ExportTypes {
+ n.SetOp(op)
+ n.SetType(r.exoticType())
+ if op == ir.OINDEXMAP {
+ n.Assigned = r.bool()
+ }
+ }
+ return n
+
+ case ir.OSLICE, ir.OSLICESTR, ir.OSLICEARR, ir.OSLICE3, ir.OSLICE3ARR:
+ pos, x := r.pos(), r.expr()
+ low, high := r.exprsOrNil()
+ var max ir.Node
+ if op.IsSlice3() {
+ max = r.expr()
+ }
+ n := ir.NewSliceExpr(pos, op, x, low, high, max)
+ if go117ExportTypes {
+ n.SetType(r.typ())
+ }
+ return n
+
+ case ir.OCONV, ir.OCONVIFACE, ir.OCONVIDATA, ir.OCONVNOP, ir.OBYTES2STR, ir.ORUNES2STR, ir.OSTR2BYTES, ir.OSTR2RUNES, ir.ORUNESTR, ir.OSLICE2ARRPTR:
+ if !go117ExportTypes && op != ir.OCONV {
+ // unreachable - mapped to OCONV case by exporter
+ goto error
+ }
+ return ir.NewConvExpr(r.pos(), op, r.typ(), r.expr())
+
+ case ir.OCOPY, ir.OCOMPLEX, ir.OREAL, ir.OIMAG, ir.OAPPEND, ir.OCAP, ir.OCLOSE, ir.ODELETE, ir.OLEN, ir.OMAKE, ir.ONEW, ir.OPANIC, ir.ORECOVER, ir.OPRINT, ir.OPRINTN, ir.OUNSAFEADD, ir.OUNSAFESLICE:
+ if go117ExportTypes {
+ switch op {
+ case ir.OCOPY, ir.OCOMPLEX, ir.OUNSAFEADD, ir.OUNSAFESLICE:
+ n := ir.NewBinaryExpr(r.pos(), op, r.expr(), r.expr())
+ n.SetType(r.typ())
+ return n
+ case ir.OREAL, ir.OIMAG, ir.OCAP, ir.OCLOSE, ir.OLEN, ir.ONEW, ir.OPANIC:
+ n := ir.NewUnaryExpr(r.pos(), op, r.expr())
+ if op != ir.OPANIC {
+ n.SetType(r.typ())
+ }
+ return n
+ case ir.OAPPEND, ir.ODELETE, ir.ORECOVER, ir.OPRINT, ir.OPRINTN:
+ n := ir.NewCallExpr(r.pos(), op, nil, r.exprList())
+ if op == ir.OAPPEND {
+ n.IsDDD = r.bool()
+ }
+ if op == ir.OAPPEND || op == ir.ORECOVER {
+ n.SetType(r.typ())
+ }
+ return n
+ }
+ // ir.OMAKE
+ goto error
+ }
+ n := builtinCall(r.pos(), op)
+ n.Args = r.exprList()
+ if op == ir.OAPPEND {
+ n.IsDDD = r.bool()
+ }
+ return n
+
+ case ir.OCALL, ir.OCALLFUNC, ir.OCALLMETH, ir.OCALLINTER, ir.OGETG:
+ pos := r.pos()
+ init := r.stmtList()
+ n := ir.NewCallExpr(pos, ir.OCALL, r.expr(), r.exprList())
+ if go117ExportTypes {
+ n.SetOp(op)
+ }
+ n.SetInit(init)
+ n.IsDDD = r.bool()
+ if go117ExportTypes {
+ n.SetType(r.exoticType())
+ }
+ return n
+
+ case ir.OMAKEMAP, ir.OMAKECHAN, ir.OMAKESLICE:
+ if go117ExportTypes {
+ pos := r.pos()
+ typ := r.typ()
+ list := r.exprList()
+ var len_, cap_ ir.Node
+ if len(list) > 0 {
+ len_ = list[0]
+ }
+ if len(list) > 1 {
+ cap_ = list[1]
+ }
+ n := ir.NewMakeExpr(pos, op, len_, cap_)
+ n.SetType(typ)
+ return n
+ }
+ n := builtinCall(r.pos(), ir.OMAKE)
+ n.Args.Append(ir.TypeNode(r.typ()))
+ n.Args.Append(r.exprList()...)
+ return n
+
+ case ir.OLINKSYMOFFSET:
+ pos := r.pos()
+ name := r.string()
+ off := r.uint64()
+ typ := r.typ()
+ return ir.NewLinksymOffsetExpr(pos, Lookup(name).Linksym(), int64(off), typ)
+
+ // unary expressions
+ case ir.OPLUS, ir.ONEG, ir.OBITNOT, ir.ONOT, ir.ORECV, ir.OIDATA:
+ n := ir.NewUnaryExpr(r.pos(), op, r.expr())
+ if go117ExportTypes {
+ n.SetType(r.typ())
+ }
+ return n
+
+ case ir.OADDR, ir.OPTRLIT:
+ if go117ExportTypes {
+ pos := r.pos()
+ expr := r.expr()
+ expr.SetTypecheck(1) // we do this for all nodes after importing, but do it now so markAddrOf can see it.
+ n := NodAddrAt(pos, expr)
+ n.SetOp(op)
+ n.SetType(r.typ())
+ return n
+ }
+ n := NodAddrAt(r.pos(), r.expr())
+ return n
+
+ case ir.ODEREF:
+ n := ir.NewStarExpr(r.pos(), r.expr())
+ if go117ExportTypes {
+ n.SetType(r.typ())
+ }
+ return n
+
+ // binary expressions
+ case ir.OADD, ir.OAND, ir.OANDNOT, ir.ODIV, ir.OEQ, ir.OGE, ir.OGT, ir.OLE, ir.OLT,
+ ir.OLSH, ir.OMOD, ir.OMUL, ir.ONE, ir.OOR, ir.ORSH, ir.OSUB, ir.OXOR, ir.OEFACE:
+ n := ir.NewBinaryExpr(r.pos(), op, r.expr(), r.expr())
+ if go117ExportTypes {
+ n.SetType(r.typ())
+ }
+ return n
+
+ case ir.OANDAND, ir.OOROR:
+ n := ir.NewLogicalExpr(r.pos(), op, r.expr(), r.expr())
+ if go117ExportTypes {
+ n.SetType(r.typ())
+ }
+ return n
+
+ case ir.OSEND:
+ return ir.NewSendStmt(r.pos(), r.expr(), r.expr())
+
+ case ir.OADDSTR:
+ pos := r.pos()
+ list := r.exprList()
+ if go117ExportTypes {
+ n := ir.NewAddStringExpr(pos, list)
+ n.SetType(r.typ())
+ return n
+ }
+ x := list[0]
+ for _, y := range list[1:] {
+ x = ir.NewBinaryExpr(pos, ir.OADD, x, y)
+ }
+ return x
+
+ // --------------------------------------------------------------------
+ // statements
+ case ir.ODCL:
+ var stmts ir.Nodes
+ n := r.localName()
+ stmts.Append(ir.NewDecl(n.Pos(), ir.ODCL, n))
+ stmts.Append(ir.NewAssignStmt(n.Pos(), n, nil))
+ return ir.NewBlockStmt(n.Pos(), stmts)
+
+ // case OASWB:
+ // unreachable - never exported
+
+ case ir.OAS:
+ pos := r.pos()
+ init := r.stmtList()
+ n := ir.NewAssignStmt(pos, r.expr(), r.expr())
+ n.SetInit(init)
+ n.Def = r.bool()
+ return n
+
+ case ir.OASOP:
+ n := ir.NewAssignOpStmt(r.pos(), r.op(), r.expr(), nil)
+ if !r.bool() {
+ n.Y = ir.NewInt(1)
+ n.IncDec = true
+ } else {
+ n.Y = r.expr()
+ }
+ return n
+
+ case ir.OAS2, ir.OAS2DOTTYPE, ir.OAS2FUNC, ir.OAS2MAPR, ir.OAS2RECV:
+ if !go117ExportTypes && op != ir.OAS2 {
+ // unreachable - mapped to case OAS2 by exporter
+ goto error
+ }
+ pos := r.pos()
+ init := r.stmtList()
+ n := ir.NewAssignListStmt(pos, op, r.exprList(), r.exprList())
+ n.SetInit(init)
+ n.Def = r.bool()
+ return n
+
+ case ir.ORETURN:
+ return ir.NewReturnStmt(r.pos(), r.exprList())
+
+ // case ORETJMP:
+ // unreachable - generated by compiler for trampolin routines (not exported)
+
+ case ir.OGO, ir.ODEFER:
+ return ir.NewGoDeferStmt(r.pos(), op, r.expr())
+
+ case ir.OIF:
+ pos, init := r.pos(), r.stmtList()
+ n := ir.NewIfStmt(pos, r.expr(), r.stmtList(), r.stmtList())
+ n.SetInit(init)
+ return n
+
+ case ir.OFOR:
+ pos, init := r.pos(), r.stmtList()
+ cond, post := r.exprsOrNil()
+ n := ir.NewForStmt(pos, nil, cond, post, r.stmtList())
+ n.SetInit(init)
+ return n
+
+ case ir.ORANGE:
+ pos, init := r.pos(), r.stmtList()
+ k, v := r.exprsOrNil()
+ n := ir.NewRangeStmt(pos, k, v, r.expr(), r.stmtList())
+ n.SetInit(init)
+ return n
+
+ case ir.OSELECT:
+ pos := r.pos()
+ init := r.stmtList()
+ n := ir.NewSelectStmt(pos, r.commList())
+ n.SetInit(init)
+ return n
+
+ case ir.OSWITCH:
+ pos := r.pos()
+ init := r.stmtList()
+ x, _ := r.exprsOrNil()
+ n := ir.NewSwitchStmt(pos, x, r.caseList(x))
+ n.SetInit(init)
+ return n
+
+ // case OCASE:
+ // handled by caseList
+
+ case ir.OFALL:
+ return ir.NewBranchStmt(r.pos(), ir.OFALL, nil)
+
+ // case OEMPTY:
+ // unreachable - not emitted by exporter
+
+ case ir.OBREAK, ir.OCONTINUE, ir.OGOTO:
+ pos := r.pos()
+ var sym *types.Sym
+ if label := r.string(); label != "" {
+ sym = Lookup(label)
+ }
+ return ir.NewBranchStmt(pos, op, sym)
+
+ case ir.OLABEL:
+ return ir.NewLabelStmt(r.pos(), Lookup(r.string()))
+
+ case ir.OEND:
+ return nil
+
+ case ir.OFUNCINST:
+ pos := r.pos()
+ x := r.expr()
+ ntargs := r.uint64()
+ var targs []ir.Node
+ if ntargs > 0 {
+ targs = make([]ir.Node, ntargs)
+ for i := range targs {
+ targs[i] = ir.TypeNode(r.typ())
+ }
+ }
+ n := ir.NewInstExpr(pos, ir.OFUNCINST, x, targs)
+ if go117ExportTypes {
+ n.SetType(r.typ())
+ }
+ return n
+
+ case ir.OSELRECV2:
+ pos := r.pos()
+ init := r.stmtList()
+ n := ir.NewAssignListStmt(pos, ir.OSELRECV2, r.exprList(), r.exprList())
+ n.SetInit(init)
+ n.Def = r.bool()
+ return n
+
+ default:
+ base.Fatalf("cannot import %v (%d) node\n"+
+ "\t==> please file an issue and assign to gri@", op, int(op))
+ panic("unreachable") // satisfy compiler
+ }
+error:
+ base.Fatalf("cannot import %v (%d) node\n"+
+ "\t==> please file an issue and assign to khr@", op, int(op))
+ panic("unreachable") // satisfy compiler
+}
+
+func (r *importReader) op() ir.Op {
+ if debug && r.uint64() != magic {
+ base.Fatalf("import stream has desynchronized")
+ }
+ return ir.Op(r.uint64())
+}
+
+func (r *importReader) fieldList() []ir.Node {
+ list := make([]ir.Node, r.uint64())
+ for i := range list {
+ list[i] = ir.NewStructKeyExpr(r.pos(), r.exoticField(), r.expr())
+ }
+ return list
+}
+
+func (r *importReader) exprsOrNil() (a, b ir.Node) {
+ ab := r.uint64()
+ if ab&1 != 0 {
+ a = r.expr()
+ }
+ if ab&2 != 0 {
+ b = r.node()
+ }
+ return
+}
+
+func builtinCall(pos src.XPos, op ir.Op) *ir.CallExpr {
+ if go117ExportTypes {
+ // These should all be encoded as direct ops, not OCALL.
+ base.Fatalf("builtinCall should not be invoked when types are included in import/export")
+ }
+ return ir.NewCallExpr(pos, ir.OCALL, ir.NewIdent(base.Pos, types.BuiltinPkg.Lookup(ir.OpNames[op])), nil)
+}
+
+// NewIncompleteNamedType returns a TFORW type t with name specified by sym, such
+// that t.nod and sym.Def are set correctly. If there are any RParams for the type,
+// they should be set soon after creating the TFORW type, before creating the
+// underlying type. That ensures that the HasTParam and HasShape flags will be set
+// properly, in case this type is part of some mutually recursive type.
+func NewIncompleteNamedType(pos src.XPos, sym *types.Sym) *types.Type {
+ name := ir.NewDeclNameAt(pos, ir.OTYPE, sym)
+ forw := types.NewNamed(name)
+ name.SetType(forw)
+ sym.Def = name
+ return forw
+}
+
+// Instantiate creates a new named type which is the instantiation of the base
+// named generic type, with the specified type args.
+func Instantiate(pos src.XPos, baseType *types.Type, targs []*types.Type) *types.Type {
+ baseSym := baseType.Sym()
+ if strings.Index(baseSym.Name, "[") >= 0 {
+ base.Fatalf("arg to Instantiate is not a base generic type")
+ }
+ name := InstTypeName(baseSym.Name, targs)
+ instSym := baseSym.Pkg.Lookup(name)
+ if instSym.Def != nil {
+ // May match existing type from previous import or
+ // types2-to-types1 conversion.
+ t := instSym.Def.Type()
+ if t.Kind() != types.TFORW {
+ return t
+ }
+ // Or, we have started creating this type in (*TSubster).Typ, but its
+ // underlying type was not completed yet, so we need to add this type
+ // to deferredInstStack, if not already there.
+ found := false
+ for _, t2 := range deferredInstStack {
+ if t2 == t {
+ found = true
+ break
+ }
+ }
+ if !found {
+ deferredInstStack = append(deferredInstStack, t)
+ }
+ return t
+ }
+
+ t := NewIncompleteNamedType(baseType.Pos(), instSym)
+ t.SetRParams(targs)
+ t.SetOrigType(baseType)
+
+ // baseType may still be TFORW or its methods may not be fully filled in
+ // (since we are in the middle of importing it). So, delay call to
+ // substInstType until we get back up to the top of the current top-most
+ // type import.
+ deferredInstStack = append(deferredInstStack, t)
+
+ return t
+}
+
+var deferredInstStack []*types.Type
+var deferInst int
+
+// deferDoInst defers substitution on instantiated types until we are at the
+// top-most defined type, so the base types are fully defined.
+func deferDoInst() {
+ deferInst++
+}
+
+func resumeDoInst() {
+ if deferInst == 1 {
+ for len(deferredInstStack) > 0 {
+ t := deferredInstStack[0]
+ deferredInstStack = deferredInstStack[1:]
+ substInstType(t, t.OrigType(), t.RParams())
+ }
+ }
+ deferInst--
+}
+
+// doInst creates a new instantiation type (which will be added to
+// deferredInstStack for completion later) for an incomplete type encountered
+// during a type substitution for an instantiation. This is needed for
+// instantiations of mutually recursive types.
+func doInst(t *types.Type) *types.Type {
+ assert(t.Kind() == types.TFORW)
+ return Instantiate(t.Pos(), t.OrigType(), t.RParams())
+}
+
+// substInstType completes the instantiation of a generic type by doing a
+// substitution on the underlying type itself and any methods. t is the
+// instantiation being created, baseType is the base generic type, and targs are
+// the type arguments that baseType is being instantiated with.
+func substInstType(t *types.Type, baseType *types.Type, targs []*types.Type) {
+ assert(t.Kind() == types.TFORW)
+ subst := Tsubster{
+ Tparams: baseType.RParams(),
+ Targs: targs,
+ SubstForwFunc: doInst,
+ }
+ t.SetUnderlying(subst.Typ(baseType.Underlying()))
+
+ newfields := make([]*types.Field, baseType.Methods().Len())
+ for i, f := range baseType.Methods().Slice() {
+ if !f.IsMethod() || types.IsInterfaceMethod(f.Type) {
+ // Do a normal substitution if this is a non-method (which
+ // means this must be an interface used as a constraint) or
+ // an interface method.
+ t2 := subst.Typ(f.Type)
+ newfields[i] = types.NewField(f.Pos, f.Sym, t2)
+ continue
+ }
+ recvType := f.Type.Recv().Type
+ if recvType.IsPtr() {
+ recvType = recvType.Elem()
+ }
+ // Substitute in the method using the type params used in the
+ // method (not the type params in the definition of the generic type).
+ msubst := Tsubster{
+ Tparams: recvType.RParams(),
+ Targs: targs,
+ SubstForwFunc: doInst,
+ }
+ t2 := msubst.Typ(f.Type)
+ oldsym := f.Nname.Sym()
+ newsym := MakeFuncInstSym(oldsym, targs, true, true)
+ var nname *ir.Name
+ if newsym.Def != nil {
+ nname = newsym.Def.(*ir.Name)
+ } else {
+ nname = ir.NewNameAt(f.Pos, newsym)
+ nname.SetType(t2)
+ ir.MarkFunc(nname)
+ newsym.Def = nname
+ }
+ newfields[i] = types.NewField(f.Pos, f.Sym, t2)
+ newfields[i].Nname = nname
+ }
+ t.Methods().Set(newfields)
+ if !t.HasTParam() && !t.HasShape() && t.Kind() != types.TINTER && t.Methods().Len() > 0 {
+ // Generate all the methods for a new fully-instantiated,
+ // non-interface, non-shape type.
+ NeedInstType(t)
+ }
+}
diff --git a/src/cmd/compile/internal/typecheck/mkbuiltin.go b/src/cmd/compile/internal/typecheck/mkbuiltin.go
new file mode 100644
index 0000000..6dbd186
--- /dev/null
+++ b/src/cmd/compile/internal/typecheck/mkbuiltin.go
@@ -0,0 +1,249 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:build ignore
+// +build ignore
+
+// Generate builtin.go from builtin/runtime.go.
+
+package main
+
+import (
+ "bytes"
+ "flag"
+ "fmt"
+ "go/ast"
+ "go/format"
+ "go/parser"
+ "go/token"
+ "io"
+ "io/ioutil"
+ "log"
+ "os"
+ "path/filepath"
+ "strconv"
+ "strings"
+)
+
+var stdout = flag.Bool("stdout", false, "write to stdout instead of builtin.go")
+
+func main() {
+ flag.Parse()
+
+ var b bytes.Buffer
+ fmt.Fprintln(&b, "// Code generated by mkbuiltin.go. DO NOT EDIT.")
+ fmt.Fprintln(&b)
+ fmt.Fprintln(&b, "package typecheck")
+ fmt.Fprintln(&b)
+ fmt.Fprintln(&b, `import (`)
+ fmt.Fprintln(&b, ` "cmd/compile/internal/types"`)
+ fmt.Fprintln(&b, ` "cmd/internal/src"`)
+ fmt.Fprintln(&b, `)`)
+
+ mkbuiltin(&b, "runtime")
+
+ out, err := format.Source(b.Bytes())
+ if err != nil {
+ log.Fatal(err)
+ }
+ if *stdout {
+ _, err = os.Stdout.Write(out)
+ } else {
+ err = ioutil.WriteFile("builtin.go", out, 0666)
+ }
+ if err != nil {
+ log.Fatal(err)
+ }
+}
+
+func mkbuiltin(w io.Writer, name string) {
+ fset := token.NewFileSet()
+ f, err := parser.ParseFile(fset, filepath.Join("builtin", name+".go"), nil, 0)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ var interner typeInterner
+
+ fmt.Fprintf(w, "var %sDecls = [...]struct { name string; tag int; typ int }{\n", name)
+ for _, decl := range f.Decls {
+ switch decl := decl.(type) {
+ case *ast.FuncDecl:
+ if decl.Recv != nil {
+ log.Fatal("methods unsupported")
+ }
+ if decl.Body != nil {
+ log.Fatal("unexpected function body")
+ }
+ fmt.Fprintf(w, "{%q, funcTag, %d},\n", decl.Name.Name, interner.intern(decl.Type))
+ case *ast.GenDecl:
+ if decl.Tok == token.IMPORT {
+ if len(decl.Specs) != 1 || decl.Specs[0].(*ast.ImportSpec).Path.Value != "\"unsafe\"" {
+ log.Fatal("runtime cannot import other package")
+ }
+ continue
+ }
+ if decl.Tok != token.VAR {
+ log.Fatal("unhandled declaration kind", decl.Tok)
+ }
+ for _, spec := range decl.Specs {
+ spec := spec.(*ast.ValueSpec)
+ if len(spec.Values) != 0 {
+ log.Fatal("unexpected values")
+ }
+ typ := interner.intern(spec.Type)
+ for _, name := range spec.Names {
+ fmt.Fprintf(w, "{%q, varTag, %d},\n", name.Name, typ)
+ }
+ }
+ default:
+ log.Fatal("unhandled decl type", decl)
+ }
+ }
+ fmt.Fprintln(w, "}")
+
+ fmt.Fprintln(w, `
+// Not inlining this function removes a significant chunk of init code.
+//go:noinline
+func newSig(params, results []*types.Field) *types.Type {
+ return types.NewSignature(types.NoPkg, nil, nil, params, results)
+}
+
+func params(tlist ...*types.Type) []*types.Field {
+ flist := make([]*types.Field, len(tlist))
+ for i, typ := range tlist {
+ flist[i] = types.NewField(src.NoXPos, nil, typ)
+ }
+ return flist
+}`)
+
+ fmt.Fprintln(w)
+ fmt.Fprintf(w, "func %sTypes() []*types.Type {\n", name)
+ fmt.Fprintf(w, "var typs [%d]*types.Type\n", len(interner.typs))
+ for i, typ := range interner.typs {
+ fmt.Fprintf(w, "typs[%d] = %s\n", i, typ)
+ }
+ fmt.Fprintln(w, "return typs[:]")
+ fmt.Fprintln(w, "}")
+}
+
+// typeInterner maps Go type expressions to compiler code that
+// constructs the denoted type. It recognizes and reuses common
+// subtype expressions.
+type typeInterner struct {
+ typs []string
+ hash map[string]int
+}
+
+func (i *typeInterner) intern(t ast.Expr) int {
+ x := i.mktype(t)
+ v, ok := i.hash[x]
+ if !ok {
+ v = len(i.typs)
+ if i.hash == nil {
+ i.hash = make(map[string]int)
+ }
+ i.hash[x] = v
+ i.typs = append(i.typs, x)
+ }
+ return v
+}
+
+func (i *typeInterner) subtype(t ast.Expr) string {
+ return fmt.Sprintf("typs[%d]", i.intern(t))
+}
+
+func (i *typeInterner) mktype(t ast.Expr) string {
+ switch t := t.(type) {
+ case *ast.Ident:
+ switch t.Name {
+ case "byte":
+ return "types.ByteType"
+ case "rune":
+ return "types.RuneType"
+ }
+ return fmt.Sprintf("types.Types[types.T%s]", strings.ToUpper(t.Name))
+ case *ast.SelectorExpr:
+ if t.X.(*ast.Ident).Name != "unsafe" || t.Sel.Name != "Pointer" {
+ log.Fatalf("unhandled type: %#v", t)
+ }
+ return "types.Types[types.TUNSAFEPTR]"
+
+ case *ast.ArrayType:
+ if t.Len == nil {
+ return fmt.Sprintf("types.NewSlice(%s)", i.subtype(t.Elt))
+ }
+ return fmt.Sprintf("types.NewArray(%s, %d)", i.subtype(t.Elt), intconst(t.Len))
+ case *ast.ChanType:
+ dir := "types.Cboth"
+ switch t.Dir {
+ case ast.SEND:
+ dir = "types.Csend"
+ case ast.RECV:
+ dir = "types.Crecv"
+ }
+ return fmt.Sprintf("types.NewChan(%s, %s)", i.subtype(t.Value), dir)
+ case *ast.FuncType:
+ return fmt.Sprintf("newSig(%s, %s)", i.fields(t.Params, false), i.fields(t.Results, false))
+ case *ast.InterfaceType:
+ if len(t.Methods.List) != 0 {
+ log.Fatal("non-empty interfaces unsupported")
+ }
+ return "types.Types[types.TINTER]"
+ case *ast.MapType:
+ return fmt.Sprintf("types.NewMap(%s, %s)", i.subtype(t.Key), i.subtype(t.Value))
+ case *ast.StarExpr:
+ return fmt.Sprintf("types.NewPtr(%s)", i.subtype(t.X))
+ case *ast.StructType:
+ return fmt.Sprintf("types.NewStruct(types.NoPkg, %s)", i.fields(t.Fields, true))
+
+ default:
+ log.Fatalf("unhandled type: %#v", t)
+ panic("unreachable")
+ }
+}
+
+func (i *typeInterner) fields(fl *ast.FieldList, keepNames bool) string {
+ if fl == nil || len(fl.List) == 0 {
+ return "nil"
+ }
+
+ var res []string
+ for _, f := range fl.List {
+ typ := i.subtype(f.Type)
+ if len(f.Names) == 0 {
+ res = append(res, typ)
+ } else {
+ for _, name := range f.Names {
+ if keepNames {
+ res = append(res, fmt.Sprintf("types.NewField(src.NoXPos, Lookup(%q), %s)", name.Name, typ))
+ } else {
+ res = append(res, typ)
+ }
+ }
+ }
+ }
+
+ if keepNames {
+ return fmt.Sprintf("[]*types.Field{%s}", strings.Join(res, ", "))
+ }
+ return fmt.Sprintf("params(%s)", strings.Join(res, ", "))
+}
+
+func intconst(e ast.Expr) int64 {
+ switch e := e.(type) {
+ case *ast.BasicLit:
+ if e.Kind != token.INT {
+ log.Fatalf("expected INT, got %v", e.Kind)
+ }
+ x, err := strconv.ParseInt(e.Value, 0, 64)
+ if err != nil {
+ log.Fatal(err)
+ }
+ return x
+ default:
+ log.Fatalf("unhandled expr: %#v", e)
+ panic("unreachable")
+ }
+}
diff --git a/src/cmd/compile/internal/typecheck/stmt.go b/src/cmd/compile/internal/typecheck/stmt.go
new file mode 100644
index 0000000..9a02c17
--- /dev/null
+++ b/src/cmd/compile/internal/typecheck/stmt.go
@@ -0,0 +1,683 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typecheck
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+)
+
+func RangeExprType(t *types.Type) *types.Type {
+ if t.IsPtr() && t.Elem().IsArray() {
+ return t.Elem()
+ }
+ return t
+}
+
+func typecheckrangeExpr(n *ir.RangeStmt) {
+ n.X = Expr(n.X)
+ if n.X.Type() == nil {
+ return
+ }
+
+ t := RangeExprType(n.X.Type())
+ // delicate little dance. see tcAssignList
+ if n.Key != nil && !ir.DeclaredBy(n.Key, n) {
+ n.Key = AssignExpr(n.Key)
+ }
+ if n.Value != nil && !ir.DeclaredBy(n.Value, n) {
+ n.Value = AssignExpr(n.Value)
+ }
+
+ var tk, tv *types.Type
+ toomany := false
+ switch t.Kind() {
+ default:
+ base.ErrorfAt(n.Pos(), "cannot range over %L", n.X)
+ return
+
+ case types.TARRAY, types.TSLICE:
+ tk = types.Types[types.TINT]
+ tv = t.Elem()
+
+ case types.TMAP:
+ tk = t.Key()
+ tv = t.Elem()
+
+ case types.TCHAN:
+ if !t.ChanDir().CanRecv() {
+ base.ErrorfAt(n.Pos(), "invalid operation: range %v (receive from send-only type %v)", n.X, n.X.Type())
+ return
+ }
+
+ tk = t.Elem()
+ tv = nil
+ if n.Value != nil {
+ toomany = true
+ }
+
+ case types.TSTRING:
+ tk = types.Types[types.TINT]
+ tv = types.RuneType
+ }
+
+ if toomany {
+ base.ErrorfAt(n.Pos(), "too many variables in range")
+ }
+
+ do := func(nn ir.Node, t *types.Type) {
+ if nn != nil {
+ if ir.DeclaredBy(nn, n) {
+ nn.SetType(t)
+ } else if nn.Type() != nil {
+ if op, why := Assignop(t, nn.Type()); op == ir.OXXX {
+ base.ErrorfAt(n.Pos(), "cannot assign type %v to %L in range%s", t, nn, why)
+ }
+ }
+ checkassign(n, nn)
+ }
+ }
+ do(n.Key, tk)
+ do(n.Value, tv)
+}
+
+// type check assignment.
+// if this assignment is the definition of a var on the left side,
+// fill in the var's type.
+func tcAssign(n *ir.AssignStmt) {
+ if base.EnableTrace && base.Flag.LowerT {
+ defer tracePrint("tcAssign", n)(nil)
+ }
+
+ if n.Y == nil {
+ n.X = AssignExpr(n.X)
+ return
+ }
+
+ lhs, rhs := []ir.Node{n.X}, []ir.Node{n.Y}
+ assign(n, lhs, rhs)
+ n.X, n.Y = lhs[0], rhs[0]
+
+ // TODO(mdempsky): This seems out of place.
+ if !ir.IsBlank(n.X) {
+ types.CheckSize(n.X.Type()) // ensure width is calculated for backend
+ }
+}
+
+func tcAssignList(n *ir.AssignListStmt) {
+ if base.EnableTrace && base.Flag.LowerT {
+ defer tracePrint("tcAssignList", n)(nil)
+ }
+
+ assign(n, n.Lhs, n.Rhs)
+}
+
+func assign(stmt ir.Node, lhs, rhs []ir.Node) {
+ // delicate little dance.
+ // the definition of lhs may refer to this assignment
+ // as its definition, in which case it will call tcAssign.
+ // in that case, do not call typecheck back, or it will cycle.
+ // if the variable has a type (ntype) then typechecking
+ // will not look at defn, so it is okay (and desirable,
+ // so that the conversion below happens).
+
+ checkLHS := func(i int, typ *types.Type) {
+ lhs[i] = Resolve(lhs[i])
+ if n := lhs[i]; typ != nil && ir.DeclaredBy(n, stmt) && n.Name().Ntype == nil {
+ if typ.Kind() != types.TNIL {
+ n.SetType(defaultType(typ))
+ } else {
+ base.Errorf("use of untyped nil")
+ }
+ }
+ if lhs[i].Typecheck() == 0 {
+ lhs[i] = AssignExpr(lhs[i])
+ }
+ checkassign(stmt, lhs[i])
+ }
+
+ assignType := func(i int, typ *types.Type) {
+ checkLHS(i, typ)
+ if typ != nil {
+ checkassignto(typ, lhs[i])
+ }
+ }
+
+ cr := len(rhs)
+ if len(rhs) == 1 {
+ rhs[0] = typecheck(rhs[0], ctxExpr|ctxMultiOK)
+ if rtyp := rhs[0].Type(); rtyp != nil && rtyp.IsFuncArgStruct() {
+ cr = rtyp.NumFields()
+ }
+ } else {
+ Exprs(rhs)
+ }
+
+ // x, ok = y
+assignOK:
+ for len(lhs) == 2 && cr == 1 {
+ stmt := stmt.(*ir.AssignListStmt)
+ r := rhs[0]
+
+ switch r.Op() {
+ case ir.OINDEXMAP:
+ stmt.SetOp(ir.OAS2MAPR)
+ case ir.ORECV:
+ stmt.SetOp(ir.OAS2RECV)
+ case ir.ODOTTYPE:
+ r := r.(*ir.TypeAssertExpr)
+ stmt.SetOp(ir.OAS2DOTTYPE)
+ r.SetOp(ir.ODOTTYPE2)
+ case ir.ODYNAMICDOTTYPE:
+ r := r.(*ir.DynamicTypeAssertExpr)
+ stmt.SetOp(ir.OAS2DOTTYPE)
+ r.SetOp(ir.ODYNAMICDOTTYPE2)
+ default:
+ break assignOK
+ }
+
+ assignType(0, r.Type())
+ assignType(1, types.UntypedBool)
+ return
+ }
+
+ if len(lhs) != cr {
+ if r, ok := rhs[0].(*ir.CallExpr); ok && len(rhs) == 1 {
+ if r.Type() != nil {
+ base.ErrorfAt(stmt.Pos(), "assignment mismatch: %d variable%s but %v returns %d value%s", len(lhs), plural(len(lhs)), r.X, cr, plural(cr))
+ }
+ } else {
+ base.ErrorfAt(stmt.Pos(), "assignment mismatch: %d variable%s but %v value%s", len(lhs), plural(len(lhs)), len(rhs), plural(len(rhs)))
+ }
+
+ for i := range lhs {
+ checkLHS(i, nil)
+ }
+ return
+ }
+
+ // x,y,z = f()
+ if cr > len(rhs) {
+ stmt := stmt.(*ir.AssignListStmt)
+ stmt.SetOp(ir.OAS2FUNC)
+ r := rhs[0].(*ir.CallExpr)
+ rtyp := r.Type()
+
+ mismatched := false
+ failed := false
+ for i := range lhs {
+ result := rtyp.Field(i).Type
+ assignType(i, result)
+
+ if lhs[i].Type() == nil || result == nil {
+ failed = true
+ } else if lhs[i] != ir.BlankNode && !types.Identical(lhs[i].Type(), result) {
+ mismatched = true
+ }
+ }
+ if mismatched && !failed {
+ RewriteMultiValueCall(stmt, r)
+ }
+ return
+ }
+
+ for i, r := range rhs {
+ checkLHS(i, r.Type())
+ if lhs[i].Type() != nil {
+ rhs[i] = AssignConv(r, lhs[i].Type(), "assignment")
+ }
+ }
+}
+
+func plural(n int) string {
+ if n == 1 {
+ return ""
+ }
+ return "s"
+}
+
+// tcCheckNil typechecks an OCHECKNIL node.
+func tcCheckNil(n *ir.UnaryExpr) ir.Node {
+ n.X = Expr(n.X)
+ if !n.X.Type().IsPtrShaped() {
+ base.FatalfAt(n.Pos(), "%L is not pointer shaped", n.X)
+ }
+ return n
+}
+
+// tcFor typechecks an OFOR node.
+func tcFor(n *ir.ForStmt) ir.Node {
+ Stmts(n.Init())
+ n.Cond = Expr(n.Cond)
+ n.Cond = DefaultLit(n.Cond, nil)
+ if n.Cond != nil {
+ t := n.Cond.Type()
+ if t != nil && !t.IsBoolean() {
+ base.Errorf("non-bool %L used as for condition", n.Cond)
+ }
+ }
+ n.Post = Stmt(n.Post)
+ if n.Op() == ir.OFORUNTIL {
+ Stmts(n.Late)
+ }
+ Stmts(n.Body)
+ return n
+}
+
+func tcGoDefer(n *ir.GoDeferStmt) {
+ what := "defer"
+ if n.Op() == ir.OGO {
+ what = "go"
+ }
+
+ switch n.Call.Op() {
+ // ok
+ case ir.OCALLINTER,
+ ir.OCALLMETH,
+ ir.OCALLFUNC,
+ ir.OCLOSE,
+ ir.OCOPY,
+ ir.ODELETE,
+ ir.OPANIC,
+ ir.OPRINT,
+ ir.OPRINTN,
+ ir.ORECOVER:
+ return
+
+ case ir.OAPPEND,
+ ir.OCAP,
+ ir.OCOMPLEX,
+ ir.OIMAG,
+ ir.OLEN,
+ ir.OMAKE,
+ ir.OMAKESLICE,
+ ir.OMAKECHAN,
+ ir.OMAKEMAP,
+ ir.ONEW,
+ ir.OREAL,
+ ir.OLITERAL: // conversion or unsafe.Alignof, Offsetof, Sizeof
+ if orig := ir.Orig(n.Call); orig.Op() == ir.OCONV {
+ break
+ }
+ base.ErrorfAt(n.Pos(), "%s discards result of %v", what, n.Call)
+ return
+ }
+
+ // type is broken or missing, most likely a method call on a broken type
+ // we will warn about the broken type elsewhere. no need to emit a potentially confusing error
+ if n.Call.Type() == nil || n.Call.Type().Broke() {
+ return
+ }
+
+ if !n.Diag() {
+ // The syntax made sure it was a call, so this must be
+ // a conversion.
+ n.SetDiag(true)
+ base.ErrorfAt(n.Pos(), "%s requires function call, not conversion", what)
+ }
+}
+
+// tcIf typechecks an OIF node.
+func tcIf(n *ir.IfStmt) ir.Node {
+ Stmts(n.Init())
+ n.Cond = Expr(n.Cond)
+ n.Cond = DefaultLit(n.Cond, nil)
+ if n.Cond != nil {
+ t := n.Cond.Type()
+ if t != nil && !t.IsBoolean() {
+ base.Errorf("non-bool %L used as if condition", n.Cond)
+ }
+ }
+ Stmts(n.Body)
+ Stmts(n.Else)
+ return n
+}
+
+// range
+func tcRange(n *ir.RangeStmt) {
+ // Typechecking order is important here:
+ // 0. first typecheck range expression (slice/map/chan),
+ // it is evaluated only once and so logically it is not part of the loop.
+ // 1. typecheck produced values,
+ // this part can declare new vars and so it must be typechecked before body,
+ // because body can contain a closure that captures the vars.
+ // 2. decldepth++ to denote loop body.
+ // 3. typecheck body.
+ // 4. decldepth--.
+ typecheckrangeExpr(n)
+
+ // second half of dance, the first half being typecheckrangeExpr
+ n.SetTypecheck(1)
+ if n.Key != nil && n.Key.Typecheck() == 0 {
+ n.Key = AssignExpr(n.Key)
+ }
+ if n.Value != nil && n.Value.Typecheck() == 0 {
+ n.Value = AssignExpr(n.Value)
+ }
+
+ Stmts(n.Body)
+}
+
+// tcReturn typechecks an ORETURN node.
+func tcReturn(n *ir.ReturnStmt) ir.Node {
+ typecheckargs(n)
+ if ir.CurFunc == nil {
+ base.Errorf("return outside function")
+ n.SetType(nil)
+ return n
+ }
+
+ if ir.HasNamedResults(ir.CurFunc) && len(n.Results) == 0 {
+ return n
+ }
+ typecheckaste(ir.ORETURN, nil, false, ir.CurFunc.Type().Results(), n.Results, func() string { return "return argument" })
+ return n
+}
+
+// select
+func tcSelect(sel *ir.SelectStmt) {
+ var def *ir.CommClause
+ lno := ir.SetPos(sel)
+ Stmts(sel.Init())
+ for _, ncase := range sel.Cases {
+ if ncase.Comm == nil {
+ // default
+ if def != nil {
+ base.ErrorfAt(ncase.Pos(), "multiple defaults in select (first at %v)", ir.Line(def))
+ } else {
+ def = ncase
+ }
+ } else {
+ n := Stmt(ncase.Comm)
+ ncase.Comm = n
+ oselrecv2 := func(dst, recv ir.Node, def bool) {
+ selrecv := ir.NewAssignListStmt(n.Pos(), ir.OSELRECV2, []ir.Node{dst, ir.BlankNode}, []ir.Node{recv})
+ selrecv.Def = def
+ selrecv.SetTypecheck(1)
+ selrecv.SetInit(n.Init())
+ ncase.Comm = selrecv
+ }
+ switch n.Op() {
+ default:
+ pos := n.Pos()
+ if n.Op() == ir.ONAME {
+ // We don't have the right position for ONAME nodes (see #15459 and
+ // others). Using ncase.Pos for now as it will provide the correct
+ // line number (assuming the expression follows the "case" keyword
+ // on the same line). This matches the approach before 1.10.
+ pos = ncase.Pos()
+ }
+ base.ErrorfAt(pos, "select case must be receive, send or assign recv")
+
+ case ir.OAS:
+ // convert x = <-c into x, _ = <-c
+ // remove implicit conversions; the eventual assignment
+ // will reintroduce them.
+ n := n.(*ir.AssignStmt)
+ if r := n.Y; r.Op() == ir.OCONVNOP || r.Op() == ir.OCONVIFACE {
+ r := r.(*ir.ConvExpr)
+ if r.Implicit() {
+ n.Y = r.X
+ }
+ }
+ if n.Y.Op() != ir.ORECV {
+ base.ErrorfAt(n.Pos(), "select assignment must have receive on right hand side")
+ break
+ }
+ oselrecv2(n.X, n.Y, n.Def)
+
+ case ir.OAS2RECV:
+ n := n.(*ir.AssignListStmt)
+ if n.Rhs[0].Op() != ir.ORECV {
+ base.ErrorfAt(n.Pos(), "select assignment must have receive on right hand side")
+ break
+ }
+ n.SetOp(ir.OSELRECV2)
+
+ case ir.ORECV:
+ // convert <-c into _, _ = <-c
+ n := n.(*ir.UnaryExpr)
+ oselrecv2(ir.BlankNode, n, false)
+
+ case ir.OSEND:
+ break
+ }
+ }
+
+ Stmts(ncase.Body)
+ }
+
+ base.Pos = lno
+}
+
+// tcSend typechecks an OSEND node.
+func tcSend(n *ir.SendStmt) ir.Node {
+ n.Chan = Expr(n.Chan)
+ n.Value = Expr(n.Value)
+ n.Chan = DefaultLit(n.Chan, nil)
+ t := n.Chan.Type()
+ if t == nil {
+ return n
+ }
+ if !t.IsChan() {
+ base.Errorf("invalid operation: %v (send to non-chan type %v)", n, t)
+ return n
+ }
+
+ if !t.ChanDir().CanSend() {
+ base.Errorf("invalid operation: %v (send to receive-only type %v)", n, t)
+ return n
+ }
+
+ n.Value = AssignConv(n.Value, t.Elem(), "send")
+ if n.Value.Type() == nil {
+ return n
+ }
+ return n
+}
+
+// tcSwitch typechecks a switch statement.
+func tcSwitch(n *ir.SwitchStmt) {
+ Stmts(n.Init())
+ if n.Tag != nil && n.Tag.Op() == ir.OTYPESW {
+ tcSwitchType(n)
+ } else {
+ tcSwitchExpr(n)
+ }
+}
+
+func tcSwitchExpr(n *ir.SwitchStmt) {
+ t := types.Types[types.TBOOL]
+ if n.Tag != nil {
+ n.Tag = Expr(n.Tag)
+ n.Tag = DefaultLit(n.Tag, nil)
+ t = n.Tag.Type()
+ }
+
+ var nilonly string
+ if t != nil {
+ switch {
+ case t.IsMap():
+ nilonly = "map"
+ case t.Kind() == types.TFUNC:
+ nilonly = "func"
+ case t.IsSlice():
+ nilonly = "slice"
+
+ case !types.IsComparable(t):
+ if t.IsStruct() {
+ base.ErrorfAt(n.Pos(), "cannot switch on %L (struct containing %v cannot be compared)", n.Tag, types.IncomparableField(t).Type)
+ } else {
+ base.ErrorfAt(n.Pos(), "cannot switch on %L", n.Tag)
+ }
+ t = nil
+ }
+ }
+
+ var defCase ir.Node
+ var cs constSet
+ for _, ncase := range n.Cases {
+ ls := ncase.List
+ if len(ls) == 0 { // default:
+ if defCase != nil {
+ base.ErrorfAt(ncase.Pos(), "multiple defaults in switch (first at %v)", ir.Line(defCase))
+ } else {
+ defCase = ncase
+ }
+ }
+
+ for i := range ls {
+ ir.SetPos(ncase)
+ ls[i] = Expr(ls[i])
+ ls[i] = DefaultLit(ls[i], t)
+ n1 := ls[i]
+ if t == nil || n1.Type() == nil {
+ continue
+ }
+
+ if nilonly != "" && !ir.IsNil(n1) {
+ base.ErrorfAt(ncase.Pos(), "invalid case %v in switch (can only compare %s %v to nil)", n1, nilonly, n.Tag)
+ } else if t.IsInterface() && !n1.Type().IsInterface() && !types.IsComparable(n1.Type()) {
+ base.ErrorfAt(ncase.Pos(), "invalid case %L in switch (incomparable type)", n1)
+ } else {
+ op1, _ := Assignop(n1.Type(), t)
+ op2, _ := Assignop(t, n1.Type())
+ if op1 == ir.OXXX && op2 == ir.OXXX {
+ if n.Tag != nil {
+ base.ErrorfAt(ncase.Pos(), "invalid case %v in switch on %v (mismatched types %v and %v)", n1, n.Tag, n1.Type(), t)
+ } else {
+ base.ErrorfAt(ncase.Pos(), "invalid case %v in switch (mismatched types %v and bool)", n1, n1.Type())
+ }
+ }
+ }
+
+ // Don't check for duplicate bools. Although the spec allows it,
+ // (1) the compiler hasn't checked it in the past, so compatibility mandates it, and
+ // (2) it would disallow useful things like
+ // case GOARCH == "arm" && GOARM == "5":
+ // case GOARCH == "arm":
+ // which would both evaluate to false for non-ARM compiles.
+ if !n1.Type().IsBoolean() {
+ cs.add(ncase.Pos(), n1, "case", "switch")
+ }
+ }
+
+ Stmts(ncase.Body)
+ }
+}
+
+func tcSwitchType(n *ir.SwitchStmt) {
+ guard := n.Tag.(*ir.TypeSwitchGuard)
+ guard.X = Expr(guard.X)
+ t := guard.X.Type()
+ if t != nil && !t.IsInterface() {
+ base.ErrorfAt(n.Pos(), "cannot type switch on non-interface value %L", guard.X)
+ t = nil
+ }
+
+ // We don't actually declare the type switch's guarded
+ // declaration itself. So if there are no cases, we won't
+ // notice that it went unused.
+ if v := guard.Tag; v != nil && !ir.IsBlank(v) && len(n.Cases) == 0 {
+ base.ErrorfAt(v.Pos(), "%v declared but not used", v.Sym())
+ }
+
+ var defCase, nilCase ir.Node
+ var ts typeSet
+ for _, ncase := range n.Cases {
+ ls := ncase.List
+ if len(ls) == 0 { // default:
+ if defCase != nil {
+ base.ErrorfAt(ncase.Pos(), "multiple defaults in switch (first at %v)", ir.Line(defCase))
+ } else {
+ defCase = ncase
+ }
+ }
+
+ for i := range ls {
+ ls[i] = typecheck(ls[i], ctxExpr|ctxType)
+ n1 := ls[i]
+ if t == nil || n1.Type() == nil {
+ continue
+ }
+
+ var missing, have *types.Field
+ var ptr int
+ if ir.IsNil(n1) { // case nil:
+ if nilCase != nil {
+ base.ErrorfAt(ncase.Pos(), "multiple nil cases in type switch (first at %v)", ir.Line(nilCase))
+ } else {
+ nilCase = ncase
+ }
+ continue
+ }
+ if n1.Op() != ir.OTYPE {
+ base.ErrorfAt(ncase.Pos(), "%L is not a type", n1)
+ continue
+ }
+ if !n1.Type().IsInterface() && !implements(n1.Type(), t, &missing, &have, &ptr) && !missing.Broke() {
+ if have != nil && !have.Broke() {
+ base.ErrorfAt(ncase.Pos(), "impossible type switch case: %L cannot have dynamic type %v"+
+ " (wrong type for %v method)\n\thave %v%S\n\twant %v%S", guard.X, n1.Type(), missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type)
+ } else if ptr != 0 {
+ base.ErrorfAt(ncase.Pos(), "impossible type switch case: %L cannot have dynamic type %v"+
+ " (%v method has pointer receiver)", guard.X, n1.Type(), missing.Sym)
+ } else {
+ base.ErrorfAt(ncase.Pos(), "impossible type switch case: %L cannot have dynamic type %v"+
+ " (missing %v method)", guard.X, n1.Type(), missing.Sym)
+ }
+ continue
+ }
+
+ ts.add(ncase.Pos(), n1.Type())
+ }
+
+ if ncase.Var != nil {
+ // Assign the clause variable's type.
+ vt := t
+ if len(ls) == 1 {
+ if ls[0].Op() == ir.OTYPE {
+ vt = ls[0].Type()
+ } else if !ir.IsNil(ls[0]) {
+ // Invalid single-type case;
+ // mark variable as broken.
+ vt = nil
+ }
+ }
+
+ nvar := ncase.Var
+ nvar.SetType(vt)
+ if vt != nil {
+ nvar = AssignExpr(nvar).(*ir.Name)
+ } else {
+ // Clause variable is broken; prevent typechecking.
+ nvar.SetTypecheck(1)
+ nvar.SetWalkdef(1)
+ }
+ ncase.Var = nvar
+ }
+
+ Stmts(ncase.Body)
+ }
+}
+
+type typeSet struct {
+ m map[string]src.XPos
+}
+
+func (s *typeSet) add(pos src.XPos, typ *types.Type) {
+ if s.m == nil {
+ s.m = make(map[string]src.XPos)
+ }
+
+ ls := typ.LinkString()
+ if prev, ok := s.m[ls]; ok {
+ base.ErrorfAt(pos, "duplicate case %v in type switch\n\tprevious case at %s", typ, base.FmtPos(prev))
+ return
+ }
+ s.m[ls] = pos
+}
diff --git a/src/cmd/compile/internal/typecheck/subr.go b/src/cmd/compile/internal/typecheck/subr.go
new file mode 100644
index 0000000..7c48fb5
--- /dev/null
+++ b/src/cmd/compile/internal/typecheck/subr.go
@@ -0,0 +1,1584 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typecheck
+
+import (
+ "bytes"
+ "fmt"
+ "sort"
+ "strconv"
+ "strings"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/types"
+ "cmd/internal/objabi"
+ "cmd/internal/src"
+)
+
+func AssignConv(n ir.Node, t *types.Type, context string) ir.Node {
+ return assignconvfn(n, t, func() string { return context })
+}
+
+// DotImportRefs maps idents introduced by importDot back to the
+// ir.PkgName they were dot-imported through.
+var DotImportRefs map[*ir.Ident]*ir.PkgName
+
+// LookupNum looks up the symbol starting with prefix and ending with
+// the decimal n. If prefix is too long, LookupNum panics.
+func LookupNum(prefix string, n int) *types.Sym {
+ var buf [20]byte // plenty long enough for all current users
+ copy(buf[:], prefix)
+ b := strconv.AppendInt(buf[:len(prefix)], int64(n), 10)
+ return types.LocalPkg.LookupBytes(b)
+}
+
+// Given funarg struct list, return list of fn args.
+func NewFuncParams(tl *types.Type, mustname bool) []*ir.Field {
+ var args []*ir.Field
+ gen := 0
+ for _, t := range tl.Fields().Slice() {
+ s := t.Sym
+ if mustname && (s == nil || s.Name == "_") {
+ // invent a name so that we can refer to it in the trampoline
+ s = LookupNum(".anon", gen)
+ gen++
+ } else if s != nil && s.Pkg != types.LocalPkg {
+ // TODO(mdempsky): Preserve original position, name, and package.
+ s = Lookup(s.Name)
+ }
+ a := ir.NewField(base.Pos, s, nil, t.Type)
+ a.Pos = t.Pos
+ a.IsDDD = t.IsDDD()
+ args = append(args, a)
+ }
+
+ return args
+}
+
+// newname returns a new ONAME Node associated with symbol s.
+func NewName(s *types.Sym) *ir.Name {
+ n := ir.NewNameAt(base.Pos, s)
+ n.Curfn = ir.CurFunc
+ return n
+}
+
+// NodAddr returns a node representing &n at base.Pos.
+func NodAddr(n ir.Node) *ir.AddrExpr {
+ return NodAddrAt(base.Pos, n)
+}
+
+// nodAddrPos returns a node representing &n at position pos.
+func NodAddrAt(pos src.XPos, n ir.Node) *ir.AddrExpr {
+ n = markAddrOf(n)
+ return ir.NewAddrExpr(pos, n)
+}
+
+func markAddrOf(n ir.Node) ir.Node {
+ if IncrementalAddrtaken {
+ // We can only do incremental addrtaken computation when it is ok
+ // to typecheck the argument of the OADDR. That's only safe after the
+ // main typecheck has completed, and not loading the inlined body.
+ // The argument to OADDR needs to be typechecked because &x[i] takes
+ // the address of x if x is an array, but not if x is a slice.
+ // Note: OuterValue doesn't work correctly until n is typechecked.
+ n = typecheck(n, ctxExpr)
+ if x := ir.OuterValue(n); x.Op() == ir.ONAME {
+ x.Name().SetAddrtaken(true)
+ }
+ } else {
+ // Remember that we built an OADDR without computing the Addrtaken bit for
+ // its argument. We'll do that later in bulk using computeAddrtaken.
+ DirtyAddrtaken = true
+ }
+ return n
+}
+
+// If IncrementalAddrtaken is false, we do not compute Addrtaken for an OADDR Node
+// when it is built. The Addrtaken bits are set in bulk by computeAddrtaken.
+// If IncrementalAddrtaken is true, then when an OADDR Node is built the Addrtaken
+// field of its argument is updated immediately.
+var IncrementalAddrtaken = false
+
+// If DirtyAddrtaken is true, then there are OADDR whose corresponding arguments
+// have not yet been marked as Addrtaken.
+var DirtyAddrtaken = false
+
+func ComputeAddrtaken(top []ir.Node) {
+ for _, n := range top {
+ var doVisit func(n ir.Node)
+ doVisit = func(n ir.Node) {
+ if n.Op() == ir.OADDR {
+ if x := ir.OuterValue(n.(*ir.AddrExpr).X); x.Op() == ir.ONAME {
+ x.Name().SetAddrtaken(true)
+ if x.Name().IsClosureVar() {
+ // Mark the original variable as Addrtaken so that capturevars
+ // knows not to pass it by value.
+ x.Name().Defn.Name().SetAddrtaken(true)
+ }
+ }
+ }
+ if n.Op() == ir.OCLOSURE {
+ ir.VisitList(n.(*ir.ClosureExpr).Func.Body, doVisit)
+ }
+ }
+ ir.Visit(n, doVisit)
+ }
+}
+
+func NodNil() ir.Node {
+ n := ir.NewNilExpr(base.Pos)
+ n.SetType(types.Types[types.TNIL])
+ return n
+}
+
+// AddImplicitDots finds missing fields in obj.field that
+// will give the shortest unique addressing and
+// modifies the tree with missing field names.
+func AddImplicitDots(n *ir.SelectorExpr) *ir.SelectorExpr {
+ n.X = typecheck(n.X, ctxType|ctxExpr)
+ if n.X.Diag() {
+ n.SetDiag(true)
+ }
+ t := n.X.Type()
+ if t == nil {
+ return n
+ }
+
+ if n.X.Op() == ir.OTYPE {
+ return n
+ }
+
+ s := n.Sel
+ if s == nil {
+ return n
+ }
+
+ switch path, ambig := dotpath(s, t, nil, false); {
+ case path != nil:
+ // rebuild elided dots
+ for c := len(path) - 1; c >= 0; c-- {
+ dot := ir.NewSelectorExpr(n.Pos(), ir.ODOT, n.X, path[c].field.Sym)
+ dot.SetImplicit(true)
+ dot.SetType(path[c].field.Type)
+ n.X = dot
+ }
+ case ambig:
+ base.Errorf("ambiguous selector %v", n)
+ n.X = nil
+ }
+
+ return n
+}
+
+// CalcMethods calculates all the methods (including embedding) of a non-interface
+// type t.
+func CalcMethods(t *types.Type) {
+ if t == nil || t.AllMethods().Len() != 0 {
+ return
+ }
+
+ // mark top-level method symbols
+ // so that expand1 doesn't consider them.
+ for _, f := range t.Methods().Slice() {
+ f.Sym.SetUniq(true)
+ }
+
+ // generate all reachable methods
+ slist = slist[:0]
+ expand1(t, true)
+
+ // check each method to be uniquely reachable
+ var ms []*types.Field
+ for i, sl := range slist {
+ slist[i].field = nil
+ sl.field.Sym.SetUniq(false)
+
+ var f *types.Field
+ path, _ := dotpath(sl.field.Sym, t, &f, false)
+ if path == nil {
+ continue
+ }
+
+ // dotpath may have dug out arbitrary fields, we only want methods.
+ if !f.IsMethod() {
+ continue
+ }
+
+ // add it to the base type method list
+ f = f.Copy()
+ f.Embedded = 1 // needs a trampoline
+ for _, d := range path {
+ if d.field.Type.IsPtr() {
+ f.Embedded = 2
+ break
+ }
+ }
+ ms = append(ms, f)
+ }
+
+ for _, f := range t.Methods().Slice() {
+ f.Sym.SetUniq(false)
+ }
+
+ ms = append(ms, t.Methods().Slice()...)
+ sort.Sort(types.MethodsByName(ms))
+ t.SetAllMethods(ms)
+}
+
+// adddot1 returns the number of fields or methods named s at depth d in Type t.
+// If exactly one exists, it will be returned in *save (if save is not nil),
+// and dotlist will contain the path of embedded fields traversed to find it,
+// in reverse order. If none exist, more will indicate whether t contains any
+// embedded fields at depth d, so callers can decide whether to retry at
+// a greater depth.
+func adddot1(s *types.Sym, t *types.Type, d int, save **types.Field, ignorecase bool) (c int, more bool) {
+ if t.Recur() {
+ return
+ }
+ t.SetRecur(true)
+ defer t.SetRecur(false)
+
+ var u *types.Type
+ d--
+ if d < 0 {
+ // We've reached our target depth. If t has any fields/methods
+ // named s, then we're done. Otherwise, we still need to check
+ // below for embedded fields.
+ c = lookdot0(s, t, save, ignorecase)
+ if c != 0 {
+ return c, false
+ }
+ }
+
+ u = t
+ if u.IsPtr() {
+ u = u.Elem()
+ }
+ if !u.IsStruct() && !u.IsInterface() {
+ return c, false
+ }
+
+ var fields *types.Fields
+ if u.IsStruct() {
+ fields = u.Fields()
+ } else {
+ fields = u.AllMethods()
+ }
+ for _, f := range fields.Slice() {
+ if f.Embedded == 0 || f.Sym == nil {
+ continue
+ }
+ if d < 0 {
+ // Found an embedded field at target depth.
+ return c, true
+ }
+ a, more1 := adddot1(s, f.Type, d, save, ignorecase)
+ if a != 0 && c == 0 {
+ dotlist[d].field = f
+ }
+ c += a
+ if more1 {
+ more = true
+ }
+ }
+
+ return c, more
+}
+
+// dotlist is used by adddot1 to record the path of embedded fields
+// used to access a target field or method.
+// Must be non-nil so that dotpath returns a non-nil slice even if d is zero.
+var dotlist = make([]dlist, 10)
+
+// Convert node n for assignment to type t.
+func assignconvfn(n ir.Node, t *types.Type, context func() string) ir.Node {
+ if n == nil || n.Type() == nil || n.Type().Broke() {
+ return n
+ }
+
+ if t.Kind() == types.TBLANK && n.Type().Kind() == types.TNIL {
+ base.Errorf("use of untyped nil")
+ }
+
+ n = convlit1(n, t, false, context)
+ if n.Type() == nil {
+ return n
+ }
+ if t.Kind() == types.TBLANK {
+ return n
+ }
+
+ // Convert ideal bool from comparison to plain bool
+ // if the next step is non-bool (like interface{}).
+ if n.Type() == types.UntypedBool && !t.IsBoolean() {
+ if n.Op() == ir.ONAME || n.Op() == ir.OLITERAL {
+ r := ir.NewConvExpr(base.Pos, ir.OCONVNOP, nil, n)
+ r.SetType(types.Types[types.TBOOL])
+ r.SetTypecheck(1)
+ r.SetImplicit(true)
+ n = r
+ }
+ }
+
+ if types.Identical(n.Type(), t) {
+ return n
+ }
+
+ op, why := Assignop(n.Type(), t)
+ if op == ir.OXXX {
+ base.Errorf("cannot use %L as type %v in %s%s", n, t, context(), why)
+ op = ir.OCONV
+ }
+
+ r := ir.NewConvExpr(base.Pos, op, t, n)
+ r.SetTypecheck(1)
+ r.SetImplicit(true)
+ return r
+}
+
+// Is type src assignment compatible to type dst?
+// If so, return op code to use in conversion.
+// If not, return OXXX. In this case, the string return parameter may
+// hold a reason why. In all other cases, it'll be the empty string.
+func Assignop(src, dst *types.Type) (ir.Op, string) {
+ if src == dst {
+ return ir.OCONVNOP, ""
+ }
+ if src == nil || dst == nil || src.Kind() == types.TFORW || dst.Kind() == types.TFORW || src.Underlying() == nil || dst.Underlying() == nil {
+ return ir.OXXX, ""
+ }
+
+ // 1. src type is identical to dst.
+ if types.Identical(src, dst) {
+ return ir.OCONVNOP, ""
+ }
+ return Assignop1(src, dst)
+}
+
+func Assignop1(src, dst *types.Type) (ir.Op, string) {
+ // 2. src and dst have identical underlying types and
+ // a. either src or dst is not a named type, or
+ // b. both are empty interface types, or
+ // c. at least one is a gcshape type.
+ // For assignable but different non-empty interface types,
+ // we want to recompute the itab. Recomputing the itab ensures
+ // that itabs are unique (thus an interface with a compile-time
+ // type I has an itab with interface type I).
+ if types.Identical(src.Underlying(), dst.Underlying()) {
+ if src.IsEmptyInterface() {
+ // Conversion between two empty interfaces
+ // requires no code.
+ return ir.OCONVNOP, ""
+ }
+ if (src.Sym() == nil || dst.Sym() == nil) && !src.IsInterface() {
+ // Conversion between two types, at least one unnamed,
+ // needs no conversion. The exception is nonempty interfaces
+ // which need to have their itab updated.
+ return ir.OCONVNOP, ""
+ }
+ if src.IsShape() || dst.IsShape() {
+ // Conversion between a shape type and one of the types
+ // it represents also needs no conversion.
+ return ir.OCONVNOP, ""
+ }
+ }
+
+ // 3. dst is an interface type and src implements dst.
+ if dst.IsInterface() && src.Kind() != types.TNIL {
+ var missing, have *types.Field
+ var ptr int
+ if src.IsShape() {
+ // Shape types implement things they have already
+ // been typechecked to implement, even if they
+ // don't have the methods for them.
+ return ir.OCONVIFACE, ""
+ }
+ if implements(src, dst, &missing, &have, &ptr) {
+ return ir.OCONVIFACE, ""
+ }
+
+ // we'll have complained about this method anyway, suppress spurious messages.
+ if have != nil && have.Sym == missing.Sym && (have.Type.Broke() || missing.Type.Broke()) {
+ return ir.OCONVIFACE, ""
+ }
+
+ var why string
+ if isptrto(src, types.TINTER) {
+ why = fmt.Sprintf(":\n\t%v is pointer to interface, not interface", src)
+ } else if have != nil && have.Sym == missing.Sym && have.Nointerface() {
+ why = fmt.Sprintf(":\n\t%v does not implement %v (%v method is marked 'nointerface')", src, dst, missing.Sym)
+ } else if have != nil && have.Sym == missing.Sym {
+ why = fmt.Sprintf(":\n\t%v does not implement %v (wrong type for %v method)\n"+
+ "\t\thave %v%S\n\t\twant %v%S", src, dst, missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type)
+ } else if ptr != 0 {
+ why = fmt.Sprintf(":\n\t%v does not implement %v (%v method has pointer receiver)", src, dst, missing.Sym)
+ } else if have != nil {
+ why = fmt.Sprintf(":\n\t%v does not implement %v (missing %v method)\n"+
+ "\t\thave %v%S\n\t\twant %v%S", src, dst, missing.Sym, have.Sym, have.Type, missing.Sym, missing.Type)
+ } else {
+ why = fmt.Sprintf(":\n\t%v does not implement %v (missing %v method)", src, dst, missing.Sym)
+ }
+
+ return ir.OXXX, why
+ }
+
+ if isptrto(dst, types.TINTER) {
+ why := fmt.Sprintf(":\n\t%v is pointer to interface, not interface", dst)
+ return ir.OXXX, why
+ }
+
+ if src.IsInterface() && dst.Kind() != types.TBLANK {
+ var missing, have *types.Field
+ var ptr int
+ var why string
+ if implements(dst, src, &missing, &have, &ptr) {
+ why = ": need type assertion"
+ }
+ return ir.OXXX, why
+ }
+
+ // 4. src is a bidirectional channel value, dst is a channel type,
+ // src and dst have identical element types, and
+ // either src or dst is not a named type.
+ if src.IsChan() && src.ChanDir() == types.Cboth && dst.IsChan() {
+ if types.Identical(src.Elem(), dst.Elem()) && (src.Sym() == nil || dst.Sym() == nil) {
+ return ir.OCONVNOP, ""
+ }
+ }
+
+ // 5. src is the predeclared identifier nil and dst is a nillable type.
+ if src.Kind() == types.TNIL {
+ switch dst.Kind() {
+ case types.TPTR,
+ types.TFUNC,
+ types.TMAP,
+ types.TCHAN,
+ types.TINTER,
+ types.TSLICE:
+ return ir.OCONVNOP, ""
+ }
+ }
+
+ // 6. rule about untyped constants - already converted by DefaultLit.
+
+ // 7. Any typed value can be assigned to the blank identifier.
+ if dst.Kind() == types.TBLANK {
+ return ir.OCONVNOP, ""
+ }
+
+ return ir.OXXX, ""
+}
+
+// Can we convert a value of type src to a value of type dst?
+// If so, return op code to use in conversion (maybe OCONVNOP).
+// If not, return OXXX. In this case, the string return parameter may
+// hold a reason why. In all other cases, it'll be the empty string.
+// srcConstant indicates whether the value of type src is a constant.
+func Convertop(srcConstant bool, src, dst *types.Type) (ir.Op, string) {
+ if src == dst {
+ return ir.OCONVNOP, ""
+ }
+ if src == nil || dst == nil {
+ return ir.OXXX, ""
+ }
+
+ // Conversions from regular to go:notinheap are not allowed
+ // (unless it's unsafe.Pointer). These are runtime-specific
+ // rules.
+ // (a) Disallow (*T) to (*U) where T is go:notinheap but U isn't.
+ if src.IsPtr() && dst.IsPtr() && dst.Elem().NotInHeap() && !src.Elem().NotInHeap() {
+ why := fmt.Sprintf(":\n\t%v is incomplete (or unallocatable), but %v is not", dst.Elem(), src.Elem())
+ return ir.OXXX, why
+ }
+ // (b) Disallow string to []T where T is go:notinheap.
+ if src.IsString() && dst.IsSlice() && dst.Elem().NotInHeap() && (dst.Elem().Kind() == types.ByteType.Kind() || dst.Elem().Kind() == types.RuneType.Kind()) {
+ why := fmt.Sprintf(":\n\t%v is incomplete (or unallocatable)", dst.Elem())
+ return ir.OXXX, why
+ }
+
+ // 1. src can be assigned to dst.
+ op, why := Assignop(src, dst)
+ if op != ir.OXXX {
+ return op, why
+ }
+
+ // The rules for interfaces are no different in conversions
+ // than assignments. If interfaces are involved, stop now
+ // with the good message from assignop.
+ // Otherwise clear the error.
+ if src.IsInterface() || dst.IsInterface() {
+ return ir.OXXX, why
+ }
+
+ // 2. Ignoring struct tags, src and dst have identical underlying types.
+ if types.IdenticalIgnoreTags(src.Underlying(), dst.Underlying()) {
+ return ir.OCONVNOP, ""
+ }
+
+ // 3. src and dst are unnamed pointer types and, ignoring struct tags,
+ // their base types have identical underlying types.
+ if src.IsPtr() && dst.IsPtr() && src.Sym() == nil && dst.Sym() == nil {
+ if types.IdenticalIgnoreTags(src.Elem().Underlying(), dst.Elem().Underlying()) {
+ return ir.OCONVNOP, ""
+ }
+ }
+
+ // 4. src and dst are both integer or floating point types.
+ if (src.IsInteger() || src.IsFloat()) && (dst.IsInteger() || dst.IsFloat()) {
+ if types.SimType[src.Kind()] == types.SimType[dst.Kind()] {
+ return ir.OCONVNOP, ""
+ }
+ return ir.OCONV, ""
+ }
+
+ // 5. src and dst are both complex types.
+ if src.IsComplex() && dst.IsComplex() {
+ if types.SimType[src.Kind()] == types.SimType[dst.Kind()] {
+ return ir.OCONVNOP, ""
+ }
+ return ir.OCONV, ""
+ }
+
+ // Special case for constant conversions: any numeric
+ // conversion is potentially okay. We'll validate further
+ // within evconst. See #38117.
+ if srcConstant && (src.IsInteger() || src.IsFloat() || src.IsComplex()) && (dst.IsInteger() || dst.IsFloat() || dst.IsComplex()) {
+ return ir.OCONV, ""
+ }
+
+ // 6. src is an integer or has type []byte or []rune
+ // and dst is a string type.
+ if src.IsInteger() && dst.IsString() {
+ return ir.ORUNESTR, ""
+ }
+
+ if src.IsSlice() && dst.IsString() {
+ if src.Elem().Kind() == types.ByteType.Kind() {
+ return ir.OBYTES2STR, ""
+ }
+ if src.Elem().Kind() == types.RuneType.Kind() {
+ return ir.ORUNES2STR, ""
+ }
+ }
+
+ // 7. src is a string and dst is []byte or []rune.
+ // String to slice.
+ if src.IsString() && dst.IsSlice() {
+ if dst.Elem().Kind() == types.ByteType.Kind() {
+ return ir.OSTR2BYTES, ""
+ }
+ if dst.Elem().Kind() == types.RuneType.Kind() {
+ return ir.OSTR2RUNES, ""
+ }
+ }
+
+ // 8. src is a pointer or uintptr and dst is unsafe.Pointer.
+ if (src.IsPtr() || src.IsUintptr()) && dst.IsUnsafePtr() {
+ return ir.OCONVNOP, ""
+ }
+
+ // 9. src is unsafe.Pointer and dst is a pointer or uintptr.
+ if src.IsUnsafePtr() && (dst.IsPtr() || dst.IsUintptr()) {
+ return ir.OCONVNOP, ""
+ }
+
+ // 10. src is map and dst is a pointer to corresponding hmap.
+ // This rule is needed for the implementation detail that
+ // go gc maps are implemented as a pointer to a hmap struct.
+ if src.Kind() == types.TMAP && dst.IsPtr() &&
+ src.MapType().Hmap == dst.Elem() {
+ return ir.OCONVNOP, ""
+ }
+
+ // 11. src is a slice and dst is a pointer-to-array.
+ // They must have same element type.
+ if src.IsSlice() && dst.IsPtr() && dst.Elem().IsArray() &&
+ types.Identical(src.Elem(), dst.Elem().Elem()) {
+ if !types.AllowsGoVersion(curpkg(), 1, 17) {
+ return ir.OXXX, ":\n\tconversion of slices to array pointers only supported as of -lang=go1.17"
+ }
+ return ir.OSLICE2ARRPTR, ""
+ }
+
+ return ir.OXXX, ""
+}
+
+// Code to resolve elided DOTs in embedded types.
+
+// A dlist stores a pointer to a TFIELD Type embedded within
+// a TSTRUCT or TINTER Type.
+type dlist struct {
+ field *types.Field
+}
+
+// dotpath computes the unique shortest explicit selector path to fully qualify
+// a selection expression x.f, where x is of type t and f is the symbol s.
+// If no such path exists, dotpath returns nil.
+// If there are multiple shortest paths to the same depth, ambig is true.
+func dotpath(s *types.Sym, t *types.Type, save **types.Field, ignorecase bool) (path []dlist, ambig bool) {
+ // The embedding of types within structs imposes a tree structure onto
+ // types: structs parent the types they embed, and types parent their
+ // fields or methods. Our goal here is to find the shortest path to
+ // a field or method named s in the subtree rooted at t. To accomplish
+ // that, we iteratively perform depth-first searches of increasing depth
+ // until we either find the named field/method or exhaust the tree.
+ for d := 0; ; d++ {
+ if d > len(dotlist) {
+ dotlist = append(dotlist, dlist{})
+ }
+ if c, more := adddot1(s, t, d, save, ignorecase); c == 1 {
+ return dotlist[:d], false
+ } else if c > 1 {
+ return nil, true
+ } else if !more {
+ return nil, false
+ }
+ }
+}
+
+func expand0(t *types.Type) {
+ u := t
+ if u.IsPtr() {
+ u = u.Elem()
+ }
+
+ if u.IsInterface() {
+ for _, f := range u.AllMethods().Slice() {
+ if f.Sym.Uniq() {
+ continue
+ }
+ f.Sym.SetUniq(true)
+ slist = append(slist, symlink{field: f})
+ }
+
+ return
+ }
+
+ u = types.ReceiverBaseType(t)
+ if u != nil {
+ for _, f := range u.Methods().Slice() {
+ if f.Sym.Uniq() {
+ continue
+ }
+ f.Sym.SetUniq(true)
+ slist = append(slist, symlink{field: f})
+ }
+ }
+}
+
+func expand1(t *types.Type, top bool) {
+ if t.Recur() {
+ return
+ }
+ t.SetRecur(true)
+
+ if !top {
+ expand0(t)
+ }
+
+ u := t
+ if u.IsPtr() {
+ u = u.Elem()
+ }
+
+ if u.IsStruct() || u.IsInterface() {
+ var fields *types.Fields
+ if u.IsStruct() {
+ fields = u.Fields()
+ } else {
+ fields = u.AllMethods()
+ }
+ for _, f := range fields.Slice() {
+ if f.Embedded == 0 {
+ continue
+ }
+ if f.Sym == nil {
+ continue
+ }
+ expand1(f.Type, false)
+ }
+ }
+
+ t.SetRecur(false)
+}
+
+func ifacelookdot(s *types.Sym, t *types.Type, ignorecase bool) (m *types.Field, followptr bool) {
+ if t == nil {
+ return nil, false
+ }
+
+ path, ambig := dotpath(s, t, &m, ignorecase)
+ if path == nil {
+ if ambig {
+ base.Errorf("%v.%v is ambiguous", t, s)
+ }
+ return nil, false
+ }
+
+ for _, d := range path {
+ if d.field.Type.IsPtr() {
+ followptr = true
+ break
+ }
+ }
+
+ if !m.IsMethod() {
+ base.Errorf("%v.%v is a field, not a method", t, s)
+ return nil, followptr
+ }
+
+ return m, followptr
+}
+
+// implements reports whether t implements the interface iface. t can be
+// an interface, a type parameter, or a concrete type. If implements returns
+// false, it stores a method of iface that is not implemented in *m. If the
+// method name matches but the type is wrong, it additionally stores the type
+// of the method (on t) in *samename.
+func implements(t, iface *types.Type, m, samename **types.Field, ptr *int) bool {
+ t0 := t
+ if t == nil {
+ return false
+ }
+
+ if t.IsInterface() || t.IsTypeParam() {
+ if t.IsTypeParam() {
+ // If t is a simple type parameter T, its type and underlying is the same.
+ // If t is a type definition:'type P[T any] T', its type is P[T] and its
+ // underlying is T. Therefore we use 't.Underlying() != t' to distinguish them.
+ if t.Underlying() != t {
+ CalcMethods(t)
+ } else {
+ // A typeparam satisfies an interface if its type bound
+ // has all the methods of that interface.
+ t = t.Bound()
+ }
+ }
+ i := 0
+ tms := t.AllMethods().Slice()
+ for _, im := range iface.AllMethods().Slice() {
+ for i < len(tms) && tms[i].Sym != im.Sym {
+ i++
+ }
+ if i == len(tms) {
+ *m = im
+ *samename = nil
+ *ptr = 0
+ return false
+ }
+ tm := tms[i]
+ if !types.Identical(tm.Type, im.Type) {
+ *m = im
+ *samename = tm
+ *ptr = 0
+ return false
+ }
+ }
+
+ return true
+ }
+
+ t = types.ReceiverBaseType(t)
+ var tms []*types.Field
+ if t != nil {
+ CalcMethods(t)
+ tms = t.AllMethods().Slice()
+ }
+ i := 0
+ for _, im := range iface.AllMethods().Slice() {
+ if im.Broke() {
+ continue
+ }
+ for i < len(tms) && tms[i].Sym != im.Sym {
+ i++
+ }
+ if i == len(tms) {
+ *m = im
+ *samename, _ = ifacelookdot(im.Sym, t, true)
+ *ptr = 0
+ return false
+ }
+ tm := tms[i]
+ if tm.Nointerface() || !types.Identical(tm.Type, im.Type) {
+ *m = im
+ *samename = tm
+ *ptr = 0
+ return false
+ }
+ followptr := tm.Embedded == 2
+
+ // if pointer receiver in method,
+ // the method does not exist for value types.
+ rcvr := tm.Type.Recv().Type
+ if rcvr.IsPtr() && !t0.IsPtr() && !followptr && !types.IsInterfaceMethod(tm.Type) {
+ if false && base.Flag.LowerR != 0 {
+ base.Errorf("interface pointer mismatch")
+ }
+
+ *m = im
+ *samename = nil
+ *ptr = 1
+ return false
+ }
+ }
+
+ return true
+}
+
+func isptrto(t *types.Type, et types.Kind) bool {
+ if t == nil {
+ return false
+ }
+ if !t.IsPtr() {
+ return false
+ }
+ t = t.Elem()
+ if t == nil {
+ return false
+ }
+ if t.Kind() != et {
+ return false
+ }
+ return true
+}
+
+// lookdot0 returns the number of fields or methods named s associated
+// with Type t. If exactly one exists, it will be returned in *save
+// (if save is not nil).
+func lookdot0(s *types.Sym, t *types.Type, save **types.Field, ignorecase bool) int {
+ u := t
+ if u.IsPtr() {
+ u = u.Elem()
+ }
+
+ c := 0
+ if u.IsStruct() || u.IsInterface() {
+ var fields *types.Fields
+ if u.IsStruct() {
+ fields = u.Fields()
+ } else {
+ fields = u.AllMethods()
+ }
+ for _, f := range fields.Slice() {
+ if f.Sym == s || (ignorecase && f.IsMethod() && strings.EqualFold(f.Sym.Name, s.Name)) {
+ if save != nil {
+ *save = f
+ }
+ c++
+ }
+ }
+ }
+
+ u = t
+ if t.Sym() != nil && t.IsPtr() && !t.Elem().IsPtr() {
+ // If t is a defined pointer type, then x.m is shorthand for (*x).m.
+ u = t.Elem()
+ }
+ u = types.ReceiverBaseType(u)
+ if u != nil {
+ for _, f := range u.Methods().Slice() {
+ if f.Embedded == 0 && (f.Sym == s || (ignorecase && strings.EqualFold(f.Sym.Name, s.Name))) {
+ if save != nil {
+ *save = f
+ }
+ c++
+ }
+ }
+ }
+
+ return c
+}
+
+var slist []symlink
+
+// Code to help generate trampoline functions for methods on embedded
+// types. These are approx the same as the corresponding AddImplicitDots
+// routines except that they expect to be called with unique tasks and
+// they return the actual methods.
+
+type symlink struct {
+ field *types.Field
+}
+
+// TypesOf converts a list of nodes to a list
+// of types of those nodes.
+func TypesOf(x []ir.Node) []*types.Type {
+ r := make([]*types.Type, len(x))
+ for i, n := range x {
+ r[i] = n.Type()
+ }
+ return r
+}
+
+// addTargs writes out the targs to buffer b as a comma-separated list enclosed by
+// brackets.
+func addTargs(b *bytes.Buffer, targs []*types.Type) {
+ b.WriteByte('[')
+ for i, targ := range targs {
+ if i > 0 {
+ b.WriteByte(',')
+ }
+ // Make sure that type arguments (including type params), are
+ // uniquely specified. LinkString() eliminates all spaces
+ // and includes the package path (local package path is "" before
+ // linker substitution).
+ tstring := targ.LinkString()
+ b.WriteString(tstring)
+ }
+ b.WriteString("]")
+}
+
+// InstTypeName creates a name for an instantiated type, based on the name of the
+// generic type and the type args.
+func InstTypeName(name string, targs []*types.Type) string {
+ b := bytes.NewBufferString(name)
+ addTargs(b, targs)
+ return b.String()
+}
+
+// makeInstName1 returns the name of the generic function instantiated with the
+// given types, which can have type params or shapes, or be concrete types. name is
+// the name of the generic function or method.
+func makeInstName1(name string, targs []*types.Type, hasBrackets bool) string {
+ b := bytes.NewBufferString("")
+ i := strings.Index(name, "[")
+ assert(hasBrackets == (i >= 0))
+ if i >= 0 {
+ b.WriteString(name[0:i])
+ } else {
+ b.WriteString(name)
+ }
+ addTargs(b, targs)
+ if i >= 0 {
+ i2 := strings.LastIndex(name[i:], "]")
+ assert(i2 >= 0)
+ b.WriteString(name[i+i2+1:])
+ }
+ return b.String()
+}
+
+// MakeFuncInstSym makes the unique sym for a stenciled generic function or method,
+// based on the name of the function gf and the targs. It replaces any
+// existing bracket type list in the name. MakeInstName asserts that gf has
+// brackets in its name if and only if hasBrackets is true.
+//
+// Names of declared generic functions have no brackets originally, so hasBrackets
+// should be false. Names of generic methods already have brackets, since the new
+// type parameter is specified in the generic type of the receiver (e.g. func
+// (func (v *value[T]).set(...) { ... } has the original name (*value[T]).set.
+//
+// The standard naming is something like: 'genFn[int,bool]' for functions and
+// '(*genType[int,bool]).methodName' for methods
+//
+// isMethodNode specifies if the name of a method node is being generated (as opposed
+// to a name of an instantiation of generic function or name of the shape-based
+// function that helps implement a method of an instantiated type). For method nodes
+// on shape types, we prepend "nofunc.", because method nodes for shape types will
+// have no body, and we want to avoid a name conflict with the shape-based function
+// that helps implement the same method for fully-instantiated types. Function names
+// are also created at the end of (*Tsubster).typ1, so we append "nofunc" there as
+// well, as needed.
+func MakeFuncInstSym(gf *types.Sym, targs []*types.Type, isMethodNode, hasBrackets bool) *types.Sym {
+ nm := makeInstName1(gf.Name, targs, hasBrackets)
+ if targs[0].HasShape() && isMethodNode {
+ nm = "nofunc." + nm
+ }
+ return gf.Pkg.Lookup(nm)
+}
+
+func MakeDictSym(gf *types.Sym, targs []*types.Type, hasBrackets bool) *types.Sym {
+ for _, targ := range targs {
+ if targ.HasTParam() {
+ fmt.Printf("FUNCTION %s\n", gf.Name)
+ for _, targ := range targs {
+ fmt.Printf(" PARAM %+v\n", targ)
+ }
+ panic("dictionary should always have concrete type args")
+ }
+ }
+ name := makeInstName1(gf.Name, targs, hasBrackets)
+ name = fmt.Sprintf("%s.%s", objabi.GlobalDictPrefix, name)
+ return gf.Pkg.Lookup(name)
+}
+
+func assert(p bool) {
+ base.Assert(p)
+}
+
+// List of newly fully-instantiated types who should have their methods generated.
+var instTypeList []*types.Type
+
+// NeedInstType adds a new fully-instantiated type to instTypeList.
+func NeedInstType(t *types.Type) {
+ instTypeList = append(instTypeList, t)
+}
+
+// GetInstTypeList returns the current contents of instTypeList.
+func GetInstTypeList() []*types.Type {
+ r := instTypeList
+ return r
+}
+
+// ClearInstTypeList clears the contents of instTypeList.
+func ClearInstTypeList() {
+ instTypeList = nil
+}
+
+// General type substituter, for replacing typeparams with type args.
+type Tsubster struct {
+ Tparams []*types.Type
+ Targs []*types.Type
+ // If non-nil, the substitution map from name nodes in the generic function to the
+ // name nodes in the new stenciled function.
+ Vars map[*ir.Name]*ir.Name
+ // If non-nil, function to substitute an incomplete (TFORW) type.
+ SubstForwFunc func(*types.Type) *types.Type
+}
+
+// Typ computes the type obtained by substituting any type parameter or shape in t
+// that appears in subst.Tparams with the corresponding type argument in subst.Targs.
+// If t contains no type parameters, the result is t; otherwise the result is a new
+// type. It deals with recursive types by using TFORW types and finding partially or
+// fully created types via sym.Def.
+func (ts *Tsubster) Typ(t *types.Type) *types.Type {
+ // Defer the CheckSize calls until we have fully-defined
+ // (possibly-recursive) top-level type.
+ types.DeferCheckSize()
+ r := ts.typ1(t)
+ types.ResumeCheckSize()
+ return r
+}
+
+func (ts *Tsubster) typ1(t *types.Type) *types.Type {
+ if !t.HasTParam() && !t.HasShape() && t.Kind() != types.TFUNC {
+ // Note: function types need to be copied regardless, as the
+ // types of closures may contain declarations that need
+ // to be copied. See #45738.
+ return t
+ }
+
+ if t.IsTypeParam() || t.IsShape() {
+ for i, tp := range ts.Tparams {
+ if tp == t {
+ return ts.Targs[i]
+ }
+ }
+ // If t is a simple typeparam T, then t has the name/symbol 'T'
+ // and t.Underlying() == t.
+ //
+ // However, consider the type definition: 'type P[T any] T'. We
+ // might use this definition so we can have a variant of type T
+ // that we can add new methods to. Suppose t is a reference to
+ // P[T]. t has the name 'P[T]', but its kind is TTYPEPARAM,
+ // because P[T] is defined as T. If we look at t.Underlying(), it
+ // is different, because the name of t.Underlying() is 'T' rather
+ // than 'P[T]'. But the kind of t.Underlying() is also TTYPEPARAM.
+ // In this case, we do the needed recursive substitution in the
+ // case statement below.
+ if t.Underlying() == t {
+ // t is a simple typeparam that didn't match anything in tparam
+ return t
+ }
+ // t is a more complex typeparam (e.g. P[T], as above, whose
+ // definition is just T).
+ assert(t.Sym() != nil)
+ }
+
+ var newsym *types.Sym
+ var neededTargs []*types.Type
+ var targsChanged bool
+ var forw *types.Type
+
+ if t.Sym() != nil && (t.HasTParam() || t.HasShape()) {
+ // Need to test for t.HasTParam() again because of special TFUNC case above.
+ // Translate the type params for this type according to
+ // the tparam/targs mapping from subst.
+ neededTargs = make([]*types.Type, len(t.RParams()))
+ for i, rparam := range t.RParams() {
+ neededTargs[i] = ts.typ1(rparam)
+ if !types.IdenticalStrict(neededTargs[i], rparam) {
+ targsChanged = true
+ }
+ }
+ // For a named (defined) type, we have to change the name of the
+ // type as well. We do this first, so we can look up if we've
+ // already seen this type during this substitution or other
+ // definitions/substitutions.
+ genName := genericTypeName(t.Sym())
+ newsym = t.Sym().Pkg.Lookup(InstTypeName(genName, neededTargs))
+ if newsym.Def != nil {
+ // We've already created this instantiated defined type.
+ return newsym.Def.Type()
+ }
+
+ // In order to deal with recursive generic types, create a TFORW
+ // type initially and set the Def field of its sym, so it can be
+ // found if this type appears recursively within the type.
+ forw = NewIncompleteNamedType(t.Pos(), newsym)
+ //println("Creating new type by sub", newsym.Name, forw.HasTParam())
+ forw.SetRParams(neededTargs)
+ // Copy the OrigType from the re-instantiated type (which is the sym of
+ // the base generic type).
+ assert(t.OrigType() != nil)
+ forw.SetOrigType(t.OrigType())
+ }
+
+ var newt *types.Type
+
+ switch t.Kind() {
+ case types.TTYPEPARAM:
+ if t.Sym() == newsym && !targsChanged {
+ // The substitution did not change the type.
+ return t
+ }
+ // Substitute the underlying typeparam (e.g. T in P[T], see
+ // the example describing type P[T] above).
+ newt = ts.typ1(t.Underlying())
+ assert(newt != t)
+
+ case types.TARRAY:
+ elem := t.Elem()
+ newelem := ts.typ1(elem)
+ if newelem != elem || targsChanged {
+ newt = types.NewArray(newelem, t.NumElem())
+ }
+
+ case types.TPTR:
+ elem := t.Elem()
+ newelem := ts.typ1(elem)
+ if newelem != elem || targsChanged {
+ newt = types.NewPtr(newelem)
+ }
+
+ case types.TSLICE:
+ elem := t.Elem()
+ newelem := ts.typ1(elem)
+ if newelem != elem || targsChanged {
+ newt = types.NewSlice(newelem)
+ }
+
+ case types.TSTRUCT:
+ newt = ts.tstruct(t, targsChanged)
+ if newt == t {
+ newt = nil
+ }
+
+ case types.TFUNC:
+ newrecvs := ts.tstruct(t.Recvs(), false)
+ newparams := ts.tstruct(t.Params(), false)
+ newresults := ts.tstruct(t.Results(), false)
+ // Translate the tparams of a signature.
+ newtparams := ts.tstruct(t.TParams(), false)
+ if newrecvs != t.Recvs() || newparams != t.Params() ||
+ newresults != t.Results() || newtparams != t.TParams() || targsChanged {
+ // If any types have changed, then the all the fields of
+ // of recv, params, and results must be copied, because they have
+ // offset fields that are dependent, and so must have an
+ // independent copy for each new signature.
+ var newrecv *types.Field
+ if newrecvs.NumFields() > 0 {
+ if newrecvs == t.Recvs() {
+ newrecvs = ts.tstruct(t.Recvs(), true)
+ }
+ newrecv = newrecvs.Field(0)
+ }
+ if newparams == t.Params() {
+ newparams = ts.tstruct(t.Params(), true)
+ }
+ if newresults == t.Results() {
+ newresults = ts.tstruct(t.Results(), true)
+ }
+ var tparamfields []*types.Field
+ if newtparams.HasTParam() {
+ tparamfields = newtparams.FieldSlice()
+ } else {
+ // Completely remove the tparams from the resulting
+ // signature, if the tparams are now concrete types.
+ tparamfields = nil
+ }
+ newt = types.NewSignature(t.Pkg(), newrecv, tparamfields,
+ newparams.FieldSlice(), newresults.FieldSlice())
+ }
+
+ case types.TINTER:
+ newt = ts.tinter(t, targsChanged)
+ if newt == t {
+ newt = nil
+ }
+
+ case types.TMAP:
+ newkey := ts.typ1(t.Key())
+ newval := ts.typ1(t.Elem())
+ if newkey != t.Key() || newval != t.Elem() || targsChanged {
+ newt = types.NewMap(newkey, newval)
+ }
+
+ case types.TCHAN:
+ elem := t.Elem()
+ newelem := ts.typ1(elem)
+ if newelem != elem || targsChanged {
+ newt = types.NewChan(newelem, t.ChanDir())
+ }
+ case types.TFORW:
+ if ts.SubstForwFunc != nil {
+ return ts.SubstForwFunc(forw)
+ } else {
+ assert(false)
+ }
+ case types.TINT, types.TINT8, types.TINT16, types.TINT32, types.TINT64,
+ types.TUINT, types.TUINT8, types.TUINT16, types.TUINT32, types.TUINT64,
+ types.TUINTPTR, types.TBOOL, types.TSTRING, types.TFLOAT32, types.TFLOAT64, types.TCOMPLEX64, types.TCOMPLEX128, types.TUNSAFEPTR:
+ newt = t.Underlying()
+ case types.TUNION:
+ nt := t.NumTerms()
+ newterms := make([]*types.Type, nt)
+ tildes := make([]bool, nt)
+ changed := false
+ for i := 0; i < nt; i++ {
+ term, tilde := t.Term(i)
+ tildes[i] = tilde
+ newterms[i] = ts.typ1(term)
+ if newterms[i] != term {
+ changed = true
+ }
+ }
+ if changed {
+ newt = types.NewUnion(newterms, tildes)
+ }
+ default:
+ panic(fmt.Sprintf("Bad type in (*TSubster).Typ: %v", t.Kind()))
+ }
+ if newt == nil {
+ // Even though there were typeparams in the type, there may be no
+ // change if this is a function type for a function call (which will
+ // have its own tparams/targs in the function instantiation).
+ return t
+ }
+
+ if forw != nil {
+ forw.SetUnderlying(newt)
+ newt = forw
+ }
+
+ if !newt.HasTParam() && !newt.IsFuncArgStruct() {
+ // Calculate the size of any new types created. These will be
+ // deferred until the top-level ts.Typ() or g.typ() (if this is
+ // called from g.fillinMethods()).
+ types.CheckSize(newt)
+ }
+
+ if t.Kind() != types.TINTER && t.Methods().Len() > 0 {
+ // Fill in the method info for the new type.
+ var newfields []*types.Field
+ newfields = make([]*types.Field, t.Methods().Len())
+ for i, f := range t.Methods().Slice() {
+ t2 := ts.typ1(f.Type)
+ oldsym := f.Nname.Sym()
+
+ // Use the name of the substituted receiver to create the
+ // method name, since the receiver name may have many levels
+ // of nesting (brackets) with type names to be substituted.
+ recvType := t2.Recv().Type
+ var nm string
+ if recvType.IsPtr() {
+ recvType = recvType.Elem()
+ nm = "(*" + recvType.Sym().Name + ")." + f.Sym.Name
+ } else {
+ nm = recvType.Sym().Name + "." + f.Sym.Name
+ }
+ if recvType.RParams()[0].HasShape() {
+ // We add "nofunc" to methods of shape type to avoid
+ // conflict with the name of the shape-based helper
+ // function. See header comment of MakeFuncInstSym.
+ nm = "nofunc." + nm
+ }
+ newsym := oldsym.Pkg.Lookup(nm)
+ var nname *ir.Name
+ if newsym.Def != nil {
+ nname = newsym.Def.(*ir.Name)
+ } else {
+ nname = ir.NewNameAt(f.Pos, newsym)
+ nname.SetType(t2)
+ ir.MarkFunc(nname)
+ newsym.Def = nname
+ }
+ newfields[i] = types.NewField(f.Pos, f.Sym, t2)
+ newfields[i].Nname = nname
+ }
+ newt.Methods().Set(newfields)
+ if !newt.HasTParam() && !newt.HasShape() {
+ // Generate all the methods for a new fully-instantiated type.
+
+ NeedInstType(newt)
+ }
+ }
+ return newt
+}
+
+// tstruct substitutes type params in types of the fields of a structure type. For
+// each field, tstruct copies the Nname, and translates it if Nname is in
+// ts.vars. To always force the creation of a new (top-level) struct,
+// regardless of whether anything changed with the types or names of the struct's
+// fields, set force to true.
+func (ts *Tsubster) tstruct(t *types.Type, force bool) *types.Type {
+ if t.NumFields() == 0 {
+ if t.HasTParam() || t.HasShape() {
+ // For an empty struct, we need to return a new type, if
+ // substituting from a generic type or shape type, since it
+ // will change HasTParam/HasShape flags.
+ return types.NewStruct(t.Pkg(), nil)
+ }
+ return t
+ }
+ var newfields []*types.Field
+ if force {
+ newfields = make([]*types.Field, t.NumFields())
+ }
+ for i, f := range t.Fields().Slice() {
+ t2 := ts.typ1(f.Type)
+ if (t2 != f.Type || f.Nname != nil) && newfields == nil {
+ newfields = make([]*types.Field, t.NumFields())
+ for j := 0; j < i; j++ {
+ newfields[j] = t.Field(j)
+ }
+ }
+ if newfields != nil {
+ newfields[i] = types.NewField(f.Pos, f.Sym, t2)
+ newfields[i].Embedded = f.Embedded
+ newfields[i].Note = f.Note
+ if f.IsDDD() {
+ newfields[i].SetIsDDD(true)
+ }
+ if f.Nointerface() {
+ newfields[i].SetNointerface(true)
+ }
+ if f.Nname != nil && ts.Vars != nil {
+ n := f.Nname.(*ir.Name)
+ v := ts.Vars[n]
+ if v != nil {
+ // This is the case where we are
+ // translating the type of the function we
+ // are substituting, so its dcls are in
+ // the subst.ts.vars table, and we want to
+ // change to reference the new dcl.
+ newfields[i].Nname = v
+ } else if ir.IsBlank(n) {
+ // Blank variable is not dcl list. Make a
+ // new one to not share.
+ m := ir.NewNameAt(n.Pos(), ir.BlankNode.Sym())
+ m.SetType(n.Type())
+ m.SetTypecheck(1)
+ newfields[i].Nname = m
+ } else {
+ // This is the case where we are
+ // translating the type of a function
+ // reference inside the function we are
+ // substituting, so we leave the Nname
+ // value as is.
+ newfields[i].Nname = f.Nname
+ }
+ }
+ }
+ }
+ if newfields != nil {
+ news := types.NewStruct(t.Pkg(), newfields)
+ news.StructType().Funarg = t.StructType().Funarg
+ return news
+ }
+ return t
+
+}
+
+// tinter substitutes type params in types of the methods of an interface type.
+func (ts *Tsubster) tinter(t *types.Type, force bool) *types.Type {
+ if t.Methods().Len() == 0 {
+ if t.HasTParam() || t.HasShape() {
+ // For an empty interface, we need to return a new type, if
+ // substituting from a generic type or shape type, since
+ // since it will change HasTParam/HasShape flags.
+ return types.NewInterface(t.Pkg(), nil, false)
+ }
+ return t
+ }
+ var newfields []*types.Field
+ if force {
+ newfields = make([]*types.Field, t.Methods().Len())
+ }
+ for i, f := range t.Methods().Slice() {
+ t2 := ts.typ1(f.Type)
+ if (t2 != f.Type || f.Nname != nil) && newfields == nil {
+ newfields = make([]*types.Field, t.Methods().Len())
+ for j := 0; j < i; j++ {
+ newfields[j] = t.Methods().Index(j)
+ }
+ }
+ if newfields != nil {
+ newfields[i] = types.NewField(f.Pos, f.Sym, t2)
+ }
+ }
+ if newfields != nil {
+ return types.NewInterface(t.Pkg(), newfields, t.IsImplicit())
+ }
+ return t
+}
+
+// genericSym returns the name of the base generic type for the type named by
+// sym. It simply returns the name obtained by removing everything after the
+// first bracket ("[").
+func genericTypeName(sym *types.Sym) string {
+ return sym.Name[0:strings.Index(sym.Name, "[")]
+}
+
+// getShapes appends the list of the shape types that are used within type t to
+// listp. The type traversal is simplified for two reasons: (1) we can always stop a
+// type traversal when t.HasShape() is false; and (2) shape types can't appear inside
+// a named type, except for the type args of a generic type. So, the traversal will
+// always stop before we have to deal with recursive types.
+func getShapes(t *types.Type, listp *[]*types.Type) {
+ if !t.HasShape() {
+ return
+ }
+ if t.IsShape() {
+ *listp = append(*listp, t)
+ return
+ }
+
+ if t.Sym() != nil {
+ // A named type can't have shapes in it, except for type args of a
+ // generic type. We will have to deal with this differently once we
+ // alloc local types in generic functions (#47631).
+ for _, rparam := range t.RParams() {
+ getShapes(rparam, listp)
+ }
+ return
+ }
+
+ switch t.Kind() {
+ case types.TARRAY, types.TPTR, types.TSLICE, types.TCHAN:
+ getShapes(t.Elem(), listp)
+
+ case types.TSTRUCT:
+ for _, f := range t.FieldSlice() {
+ getShapes(f.Type, listp)
+ }
+
+ case types.TFUNC:
+ for _, f := range t.Recvs().FieldSlice() {
+ getShapes(f.Type, listp)
+ }
+ for _, f := range t.Params().FieldSlice() {
+ getShapes(f.Type, listp)
+ }
+ for _, f := range t.Results().FieldSlice() {
+ getShapes(f.Type, listp)
+ }
+ for _, f := range t.TParams().FieldSlice() {
+ getShapes(f.Type, listp)
+ }
+
+ case types.TINTER:
+ for _, f := range t.Methods().Slice() {
+ getShapes(f.Type, listp)
+ }
+
+ case types.TMAP:
+ getShapes(t.Key(), listp)
+ getShapes(t.Elem(), listp)
+
+ default:
+ panic(fmt.Sprintf("Bad type in getShapes: %v", t.Kind()))
+ }
+
+}
+
+// Shapify takes a concrete type and a type param index, and returns a GCshape type that can
+// be used in place of the input type and still generate identical code.
+// No methods are added - all methods calls directly on a shape should
+// be done by converting to an interface using the dictionary.
+//
+// For now, we only consider two types to have the same shape, if they have exactly
+// the same underlying type or they are both pointer types.
+//
+// tparam is the associated typeparam - it must be TTYPEPARAM type. If there is a
+// structural type for the associated type param (not common), then a pointer type t
+// is mapped to its underlying type, rather than being merged with other pointers.
+//
+// Shape types are also distinguished by the index of the type in a type param/arg
+// list. We need to do this so we can distinguish and substitute properly for two
+// type params in the same function that have the same shape for a particular
+// instantiation.
+func Shapify(t *types.Type, index int, tparam *types.Type) *types.Type {
+ assert(!t.IsShape())
+ if t.HasShape() {
+ // We are sometimes dealing with types from a shape instantiation
+ // that were constructed from existing shape types, so t may
+ // sometimes have shape types inside it. In that case, we find all
+ // those shape types with getShapes() and replace them with their
+ // underlying type.
+ //
+ // If we don't do this, we may create extra unneeded shape types that
+ // have these other shape types embedded in them. This may lead to
+ // generating extra shape instantiations, and a mismatch between the
+ // instantiations that we used in generating dictionaries and the
+ // instantations that are actually called. (#51303).
+ list := []*types.Type{}
+ getShapes(t, &list)
+ list2 := make([]*types.Type, len(list))
+ for i, shape := range list {
+ list2[i] = shape.Underlying()
+ }
+ ts := Tsubster{
+ Tparams: list,
+ Targs: list2,
+ }
+ t = ts.Typ(t)
+ }
+ // Map all types with the same underlying type to the same shape.
+ u := t.Underlying()
+
+ // All pointers have the same shape.
+ // TODO: Make unsafe.Pointer the same shape as normal pointers.
+ // Note: pointers to arrays are special because of slice-to-array-pointer
+ // conversions. See issue 49295.
+ if u.Kind() == types.TPTR && u.Elem().Kind() != types.TARRAY &&
+ tparam.Bound().StructuralType() == nil && !u.Elem().NotInHeap() {
+ u = types.Types[types.TUINT8].PtrTo()
+ }
+
+ if shapeMap == nil {
+ shapeMap = map[int]map[*types.Type]*types.Type{}
+ }
+ submap := shapeMap[index]
+ if submap == nil {
+ submap = map[*types.Type]*types.Type{}
+ shapeMap[index] = submap
+ }
+ if s := submap[u]; s != nil {
+ return s
+ }
+
+ // LinkString specifies the type uniquely, but has no spaces.
+ nm := fmt.Sprintf("%s_%d", u.LinkString(), index)
+ sym := types.ShapePkg.Lookup(nm)
+ if sym.Def != nil {
+ // Use any existing type with the same name
+ submap[u] = sym.Def.Type()
+ return submap[u]
+ }
+ name := ir.NewDeclNameAt(u.Pos(), ir.OTYPE, sym)
+ s := types.NewNamed(name)
+ sym.Def = name
+ s.SetUnderlying(u)
+ s.SetIsShape(true)
+ s.SetHasShape(true)
+ types.CalcSize(s)
+ name.SetType(s)
+ name.SetTypecheck(1)
+ submap[u] = s
+ return s
+}
+
+var shapeMap map[int]map[*types.Type]*types.Type
diff --git a/src/cmd/compile/internal/typecheck/syms.go b/src/cmd/compile/internal/typecheck/syms.go
new file mode 100644
index 0000000..ed3aaec
--- /dev/null
+++ b/src/cmd/compile/internal/typecheck/syms.go
@@ -0,0 +1,103 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typecheck
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/src"
+)
+
+func LookupRuntime(name string) *ir.Name {
+ s := ir.Pkgs.Runtime.Lookup(name)
+ if s == nil || s.Def == nil {
+ base.Fatalf("LookupRuntime: can't find runtime.%s", name)
+ }
+ return ir.AsNode(s.Def).(*ir.Name)
+}
+
+// SubstArgTypes substitutes the given list of types for
+// successive occurrences of the "any" placeholder in the
+// type syntax expression n.Type.
+// The result of SubstArgTypes MUST be assigned back to old, e.g.
+// n.Left = SubstArgTypes(n.Left, t1, t2)
+func SubstArgTypes(old *ir.Name, types_ ...*types.Type) *ir.Name {
+ for _, t := range types_ {
+ types.CalcSize(t)
+ }
+ n := ir.NewNameAt(old.Pos(), old.Sym())
+ n.Class = old.Class
+ n.SetType(types.SubstAny(old.Type(), &types_))
+ n.Func = old.Func
+ if len(types_) > 0 {
+ base.Fatalf("SubstArgTypes: too many argument types")
+ }
+ return n
+}
+
+// AutoLabel generates a new Name node for use with
+// an automatically generated label.
+// prefix is a short mnemonic (e.g. ".s" for switch)
+// to help with debugging.
+// It should begin with "." to avoid conflicts with
+// user labels.
+func AutoLabel(prefix string) *types.Sym {
+ if prefix[0] != '.' {
+ base.Fatalf("autolabel prefix must start with '.', have %q", prefix)
+ }
+ fn := ir.CurFunc
+ if ir.CurFunc == nil {
+ base.Fatalf("autolabel outside function")
+ }
+ n := fn.Label
+ fn.Label++
+ return LookupNum(prefix, int(n))
+}
+
+func Lookup(name string) *types.Sym {
+ return types.LocalPkg.Lookup(name)
+}
+
+// InitRuntime loads the definitions for the low-level runtime functions,
+// so that the compiler can generate calls to them,
+// but does not make them visible to user code.
+func InitRuntime() {
+ base.Timer.Start("fe", "loadsys")
+ types.Block = 1
+
+ typs := runtimeTypes()
+ for _, d := range &runtimeDecls {
+ sym := ir.Pkgs.Runtime.Lookup(d.name)
+ typ := typs[d.typ]
+ switch d.tag {
+ case funcTag:
+ importfunc(src.NoXPos, sym, typ)
+ case varTag:
+ importvar(src.NoXPos, sym, typ)
+ default:
+ base.Fatalf("unhandled declaration tag %v", d.tag)
+ }
+ }
+}
+
+// LookupRuntimeFunc looks up Go function name in package runtime. This function
+// must follow the internal calling convention.
+func LookupRuntimeFunc(name string) *obj.LSym {
+ return LookupRuntimeABI(name, obj.ABIInternal)
+}
+
+// LookupRuntimeVar looks up a variable (or assembly function) name in package
+// runtime. If this is a function, it may have a special calling
+// convention.
+func LookupRuntimeVar(name string) *obj.LSym {
+ return LookupRuntimeABI(name, obj.ABI0)
+}
+
+// LookupRuntimeABI looks up a name in package runtime using the given ABI.
+func LookupRuntimeABI(name string, abi obj.ABI) *obj.LSym {
+ return base.PkgLinksym("runtime", name, abi)
+}
diff --git a/src/cmd/compile/internal/typecheck/target.go b/src/cmd/compile/internal/typecheck/target.go
new file mode 100644
index 0000000..018614d
--- /dev/null
+++ b/src/cmd/compile/internal/typecheck/target.go
@@ -0,0 +1,12 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+//go:generate go run mkbuiltin.go
+
+package typecheck
+
+import "cmd/compile/internal/ir"
+
+// Target is the package being compiled.
+var Target *ir.Package
diff --git a/src/cmd/compile/internal/typecheck/type.go b/src/cmd/compile/internal/typecheck/type.go
new file mode 100644
index 0000000..c4c1ef5
--- /dev/null
+++ b/src/cmd/compile/internal/typecheck/type.go
@@ -0,0 +1,188 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typecheck
+
+import (
+ "go/constant"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/types"
+)
+
+// tcArrayType typechecks an OTARRAY node.
+func tcArrayType(n *ir.ArrayType) ir.Node {
+ n.Elem = typecheckNtype(n.Elem)
+ if n.Elem.Type() == nil {
+ return n
+ }
+ if n.Len == nil { // [...]T
+ if !n.Diag() {
+ n.SetDiag(true)
+ base.Errorf("use of [...] array outside of array literal")
+ }
+ return n
+ }
+ n.Len = indexlit(Expr(n.Len))
+ size := n.Len
+ if ir.ConstType(size) != constant.Int {
+ switch {
+ case size.Type() == nil:
+ // Error already reported elsewhere.
+ case size.Type().IsInteger() && size.Op() != ir.OLITERAL:
+ base.Errorf("non-constant array bound %v", size)
+ default:
+ base.Errorf("invalid array bound %v", size)
+ }
+ return n
+ }
+
+ v := size.Val()
+ if ir.ConstOverflow(v, types.Types[types.TINT]) {
+ base.Errorf("array bound is too large")
+ return n
+ }
+
+ if constant.Sign(v) < 0 {
+ base.Errorf("array bound must be non-negative")
+ return n
+ }
+
+ bound, _ := constant.Int64Val(v)
+ t := types.NewArray(n.Elem.Type(), bound)
+ n.SetOTYPE(t)
+ types.CheckSize(t)
+ return n
+}
+
+// tcChanType typechecks an OTCHAN node.
+func tcChanType(n *ir.ChanType) ir.Node {
+ n.Elem = typecheckNtype(n.Elem)
+ l := n.Elem
+ if l.Type() == nil {
+ return n
+ }
+ if l.Type().NotInHeap() {
+ base.Errorf("chan of incomplete (or unallocatable) type not allowed")
+ }
+ n.SetOTYPE(types.NewChan(l.Type(), n.Dir))
+ return n
+}
+
+// tcFuncType typechecks an OTFUNC node.
+func tcFuncType(n *ir.FuncType) ir.Node {
+ misc := func(f *types.Field, nf *ir.Field) {
+ f.SetIsDDD(nf.IsDDD)
+ if nf.Decl != nil {
+ nf.Decl.SetType(f.Type)
+ f.Nname = nf.Decl
+ }
+ }
+
+ lno := base.Pos
+
+ var recv *types.Field
+ if n.Recv != nil {
+ recv = tcField(n.Recv, misc)
+ }
+
+ t := types.NewSignature(types.LocalPkg, recv, nil, tcFields(n.Params, misc), tcFields(n.Results, misc))
+ checkdupfields("argument", t.Recvs().FieldSlice(), t.Params().FieldSlice(), t.Results().FieldSlice())
+
+ base.Pos = lno
+
+ n.SetOTYPE(t)
+ return n
+}
+
+// tcInterfaceType typechecks an OTINTER node.
+func tcInterfaceType(n *ir.InterfaceType) ir.Node {
+ if len(n.Methods) == 0 {
+ n.SetOTYPE(types.Types[types.TINTER])
+ return n
+ }
+
+ lno := base.Pos
+ methods := tcFields(n.Methods, nil)
+ base.Pos = lno
+
+ n.SetOTYPE(types.NewInterface(types.LocalPkg, methods, false))
+ return n
+}
+
+// tcMapType typechecks an OTMAP node.
+func tcMapType(n *ir.MapType) ir.Node {
+ n.Key = typecheckNtype(n.Key)
+ n.Elem = typecheckNtype(n.Elem)
+ l := n.Key
+ r := n.Elem
+ if l.Type() == nil || r.Type() == nil {
+ return n
+ }
+ if l.Type().NotInHeap() {
+ base.Errorf("incomplete (or unallocatable) map key not allowed")
+ }
+ if r.Type().NotInHeap() {
+ base.Errorf("incomplete (or unallocatable) map value not allowed")
+ }
+ n.SetOTYPE(types.NewMap(l.Type(), r.Type()))
+ mapqueue = append(mapqueue, n) // check map keys when all types are settled
+ return n
+}
+
+// tcSliceType typechecks an OTSLICE node.
+func tcSliceType(n *ir.SliceType) ir.Node {
+ n.Elem = typecheckNtype(n.Elem)
+ if n.Elem.Type() == nil {
+ return n
+ }
+ t := types.NewSlice(n.Elem.Type())
+ n.SetOTYPE(t)
+ types.CheckSize(t)
+ return n
+}
+
+// tcStructType typechecks an OTSTRUCT node.
+func tcStructType(n *ir.StructType) ir.Node {
+ lno := base.Pos
+
+ fields := tcFields(n.Fields, func(f *types.Field, nf *ir.Field) {
+ if nf.Embedded {
+ checkembeddedtype(f.Type)
+ f.Embedded = 1
+ }
+ f.Note = nf.Note
+ })
+ checkdupfields("field", fields)
+
+ base.Pos = lno
+ n.SetOTYPE(types.NewStruct(types.LocalPkg, fields))
+ return n
+}
+
+// tcField typechecks a generic Field.
+// misc can be provided to handle specialized typechecking.
+func tcField(n *ir.Field, misc func(*types.Field, *ir.Field)) *types.Field {
+ base.Pos = n.Pos
+ if n.Ntype != nil {
+ n.Type = typecheckNtype(n.Ntype).Type()
+ n.Ntype = nil
+ }
+ f := types.NewField(n.Pos, n.Sym, n.Type)
+ if misc != nil {
+ misc(f, n)
+ }
+ return f
+}
+
+// tcFields typechecks a slice of generic Fields.
+// misc can be provided to handle specialized typechecking.
+func tcFields(l []*ir.Field, misc func(*types.Field, *ir.Field)) []*types.Field {
+ fields := make([]*types.Field, len(l))
+ for i, n := range l {
+ fields[i] = tcField(n, misc)
+ }
+ return fields
+}
diff --git a/src/cmd/compile/internal/typecheck/typecheck.go b/src/cmd/compile/internal/typecheck/typecheck.go
new file mode 100644
index 0000000..f6be298
--- /dev/null
+++ b/src/cmd/compile/internal/typecheck/typecheck.go
@@ -0,0 +1,2249 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typecheck
+
+import (
+ "fmt"
+ "go/constant"
+ "go/token"
+ "strings"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+)
+
+// Function collecting autotmps generated during typechecking,
+// to be included in the package-level init function.
+var InitTodoFunc = ir.NewFunc(base.Pos)
+
+var inimport bool // set during import
+
+var TypecheckAllowed bool
+
+var (
+ NeedRuntimeType = func(*types.Type) {}
+)
+
+func AssignExpr(n ir.Node) ir.Node { return typecheck(n, ctxExpr|ctxAssign) }
+func Expr(n ir.Node) ir.Node { return typecheck(n, ctxExpr) }
+func Stmt(n ir.Node) ir.Node { return typecheck(n, ctxStmt) }
+
+func Exprs(exprs []ir.Node) { typecheckslice(exprs, ctxExpr) }
+func Stmts(stmts []ir.Node) { typecheckslice(stmts, ctxStmt) }
+
+func Call(pos src.XPos, callee ir.Node, args []ir.Node, dots bool) ir.Node {
+ call := ir.NewCallExpr(pos, ir.OCALL, callee, args)
+ call.IsDDD = dots
+ return typecheck(call, ctxStmt|ctxExpr)
+}
+
+func Callee(n ir.Node) ir.Node {
+ return typecheck(n, ctxExpr|ctxCallee)
+}
+
+func FuncBody(n *ir.Func) {
+ ir.CurFunc = n
+ errorsBefore := base.Errors()
+ Stmts(n.Body)
+ CheckUnused(n)
+ CheckReturn(n)
+ if ir.IsBlank(n.Nname) || base.Errors() > errorsBefore {
+ n.Body = nil // blank function or type errors; do not compile
+ }
+}
+
+var importlist []*ir.Func
+
+// AllImportedBodies reads in the bodies of all imported functions and typechecks
+// them, if needed.
+func AllImportedBodies() {
+ for _, n := range importlist {
+ if n.Inl != nil {
+ ImportedBody(n)
+ }
+ }
+}
+
+var traceIndent []byte
+
+func tracePrint(title string, n ir.Node) func(np *ir.Node) {
+ indent := traceIndent
+
+ // guard against nil
+ var pos, op string
+ var tc uint8
+ if n != nil {
+ pos = base.FmtPos(n.Pos())
+ op = n.Op().String()
+ tc = n.Typecheck()
+ }
+
+ types.SkipSizeForTracing = true
+ defer func() { types.SkipSizeForTracing = false }()
+ fmt.Printf("%s: %s%s %p %s %v tc=%d\n", pos, indent, title, n, op, n, tc)
+ traceIndent = append(traceIndent, ". "...)
+
+ return func(np *ir.Node) {
+ traceIndent = traceIndent[:len(traceIndent)-2]
+
+ // if we have a result, use that
+ if np != nil {
+ n = *np
+ }
+
+ // guard against nil
+ // use outer pos, op so we don't get empty pos/op if n == nil (nicer output)
+ var tc uint8
+ var typ *types.Type
+ if n != nil {
+ pos = base.FmtPos(n.Pos())
+ op = n.Op().String()
+ tc = n.Typecheck()
+ typ = n.Type()
+ }
+
+ types.SkipSizeForTracing = true
+ defer func() { types.SkipSizeForTracing = false }()
+ fmt.Printf("%s: %s=> %p %s %v tc=%d type=%L\n", pos, indent, n, op, n, tc, typ)
+ }
+}
+
+const (
+ ctxStmt = 1 << iota // evaluated at statement level
+ ctxExpr // evaluated in value context
+ ctxType // evaluated in type context
+ ctxCallee // call-only expressions are ok
+ ctxMultiOK // multivalue function returns are ok
+ ctxAssign // assigning to expression
+)
+
+// type checks the whole tree of an expression.
+// calculates expression types.
+// evaluates compile time constants.
+// marks variables that escape the local frame.
+// rewrites n.Op to be more specific in some cases.
+
+var typecheckdefstack []*ir.Name
+
+// Resolve resolves an ONONAME node to a definition, if any. If n is not an ONONAME node,
+// Resolve returns n unchanged. If n is an ONONAME node and not in the same package,
+// then n.Sym() is resolved using import data. Otherwise, Resolve returns
+// n.Sym().Def. An ONONAME node can be created using ir.NewIdent(), so an imported
+// symbol can be resolved via Resolve(ir.NewIdent(src.NoXPos, sym)).
+func Resolve(n ir.Node) (res ir.Node) {
+ if n == nil || n.Op() != ir.ONONAME {
+ return n
+ }
+
+ // only trace if there's work to do
+ if base.EnableTrace && base.Flag.LowerT {
+ defer tracePrint("resolve", n)(&res)
+ }
+
+ if sym := n.Sym(); sym.Pkg != types.LocalPkg {
+ // We might have an ir.Ident from oldname or importDot.
+ if id, ok := n.(*ir.Ident); ok {
+ if pkgName := DotImportRefs[id]; pkgName != nil {
+ pkgName.Used = true
+ }
+ }
+
+ return expandDecl(n)
+ }
+
+ r := ir.AsNode(n.Sym().Def)
+ if r == nil {
+ return n
+ }
+
+ if r.Op() == ir.OIOTA {
+ if x := getIotaValue(); x >= 0 {
+ return ir.NewInt(x)
+ }
+ return n
+ }
+
+ return r
+}
+
+func typecheckslice(l []ir.Node, top int) {
+ for i := range l {
+ l[i] = typecheck(l[i], top)
+ }
+}
+
+var _typekind = []string{
+ types.TINT: "int",
+ types.TUINT: "uint",
+ types.TINT8: "int8",
+ types.TUINT8: "uint8",
+ types.TINT16: "int16",
+ types.TUINT16: "uint16",
+ types.TINT32: "int32",
+ types.TUINT32: "uint32",
+ types.TINT64: "int64",
+ types.TUINT64: "uint64",
+ types.TUINTPTR: "uintptr",
+ types.TCOMPLEX64: "complex64",
+ types.TCOMPLEX128: "complex128",
+ types.TFLOAT32: "float32",
+ types.TFLOAT64: "float64",
+ types.TBOOL: "bool",
+ types.TSTRING: "string",
+ types.TPTR: "pointer",
+ types.TUNSAFEPTR: "unsafe.Pointer",
+ types.TSTRUCT: "struct",
+ types.TINTER: "interface",
+ types.TCHAN: "chan",
+ types.TMAP: "map",
+ types.TARRAY: "array",
+ types.TSLICE: "slice",
+ types.TFUNC: "func",
+ types.TNIL: "nil",
+ types.TIDEAL: "untyped number",
+}
+
+func typekind(t *types.Type) string {
+ if t.IsUntyped() {
+ return fmt.Sprintf("%v", t)
+ }
+ et := t.Kind()
+ if int(et) < len(_typekind) {
+ s := _typekind[et]
+ if s != "" {
+ return s
+ }
+ }
+ return fmt.Sprintf("etype=%d", et)
+}
+
+func cycleFor(start ir.Node) []ir.Node {
+ // Find the start node in typecheck_tcstack.
+ // We know that it must exist because each time we mark
+ // a node with n.SetTypecheck(2) we push it on the stack,
+ // and each time we mark a node with n.SetTypecheck(2) we
+ // pop it from the stack. We hit a cycle when we encounter
+ // a node marked 2 in which case is must be on the stack.
+ i := len(typecheck_tcstack) - 1
+ for i > 0 && typecheck_tcstack[i] != start {
+ i--
+ }
+
+ // collect all nodes with same Op
+ var cycle []ir.Node
+ for _, n := range typecheck_tcstack[i:] {
+ if n.Op() == start.Op() {
+ cycle = append(cycle, n)
+ }
+ }
+
+ return cycle
+}
+
+func cycleTrace(cycle []ir.Node) string {
+ var s string
+ for i, n := range cycle {
+ s += fmt.Sprintf("\n\t%v: %v uses %v", ir.Line(n), n, cycle[(i+1)%len(cycle)])
+ }
+ return s
+}
+
+var typecheck_tcstack []ir.Node
+
+func Func(fn *ir.Func) {
+ new := Stmt(fn)
+ if new != fn {
+ base.Fatalf("typecheck changed func")
+ }
+}
+
+func typecheckNtype(n ir.Ntype) ir.Ntype {
+ return typecheck(n, ctxType).(ir.Ntype)
+}
+
+// typecheck type checks node n.
+// The result of typecheck MUST be assigned back to n, e.g.
+// n.Left = typecheck(n.Left, top)
+func typecheck(n ir.Node, top int) (res ir.Node) {
+ // cannot type check until all the source has been parsed
+ if !TypecheckAllowed {
+ base.Fatalf("early typecheck")
+ }
+
+ if n == nil {
+ return nil
+ }
+
+ // only trace if there's work to do
+ if base.EnableTrace && base.Flag.LowerT {
+ defer tracePrint("typecheck", n)(&res)
+ }
+
+ lno := ir.SetPos(n)
+
+ // Skip over parens.
+ for n.Op() == ir.OPAREN {
+ n = n.(*ir.ParenExpr).X
+ }
+
+ // Resolve definition of name and value of iota lazily.
+ n = Resolve(n)
+
+ // Skip typecheck if already done.
+ // But re-typecheck ONAME/OTYPE/OLITERAL/OPACK node in case context has changed.
+ if n.Typecheck() == 1 || n.Typecheck() == 3 {
+ switch n.Op() {
+ case ir.ONAME, ir.OTYPE, ir.OLITERAL, ir.OPACK:
+ break
+
+ default:
+ base.Pos = lno
+ return n
+ }
+ }
+
+ if n.Typecheck() == 2 {
+ // Typechecking loop. Trying printing a meaningful message,
+ // otherwise a stack trace of typechecking.
+ switch n.Op() {
+ // We can already diagnose variables used as types.
+ case ir.ONAME:
+ n := n.(*ir.Name)
+ if top&(ctxExpr|ctxType) == ctxType {
+ base.Errorf("%v is not a type", n)
+ }
+
+ case ir.OTYPE:
+ // Only report a type cycle if we are expecting a type.
+ // Otherwise let other code report an error.
+ if top&ctxType == ctxType {
+ // A cycle containing only alias types is an error
+ // since it would expand indefinitely when aliases
+ // are substituted.
+ cycle := cycleFor(n)
+ for _, n1 := range cycle {
+ if n1.Name() != nil && !n1.Name().Alias() {
+ // Cycle is ok. But if n is an alias type and doesn't
+ // have a type yet, we have a recursive type declaration
+ // with aliases that we can't handle properly yet.
+ // Report an error rather than crashing later.
+ if n.Name() != nil && n.Name().Alias() && n.Type() == nil {
+ base.Pos = n.Pos()
+ base.Fatalf("cannot handle alias type declaration (issue #25838): %v", n)
+ }
+ base.Pos = lno
+ return n
+ }
+ }
+ base.ErrorfAt(n.Pos(), "invalid recursive type alias %v%s", n, cycleTrace(cycle))
+ }
+
+ case ir.OLITERAL:
+ if top&(ctxExpr|ctxType) == ctxType {
+ base.Errorf("%v is not a type", n)
+ break
+ }
+ base.ErrorfAt(n.Pos(), "constant definition loop%s", cycleTrace(cycleFor(n)))
+ }
+
+ if base.Errors() == 0 {
+ var trace string
+ for i := len(typecheck_tcstack) - 1; i >= 0; i-- {
+ x := typecheck_tcstack[i]
+ trace += fmt.Sprintf("\n\t%v %v", ir.Line(x), x)
+ }
+ base.Errorf("typechecking loop involving %v%s", n, trace)
+ }
+
+ base.Pos = lno
+ return n
+ }
+
+ typecheck_tcstack = append(typecheck_tcstack, n)
+
+ n.SetTypecheck(2)
+ n = typecheck1(n, top)
+ n.SetTypecheck(1)
+
+ last := len(typecheck_tcstack) - 1
+ typecheck_tcstack[last] = nil
+ typecheck_tcstack = typecheck_tcstack[:last]
+
+ _, isExpr := n.(ir.Expr)
+ _, isStmt := n.(ir.Stmt)
+ isMulti := false
+ switch n.Op() {
+ case ir.OCALLFUNC, ir.OCALLINTER, ir.OCALLMETH:
+ n := n.(*ir.CallExpr)
+ if t := n.X.Type(); t != nil && t.Kind() == types.TFUNC {
+ nr := t.NumResults()
+ isMulti = nr > 1
+ if nr == 0 {
+ isExpr = false
+ }
+ }
+ case ir.OAPPEND:
+ // Must be used (and not BinaryExpr/UnaryExpr).
+ isStmt = false
+ case ir.OCLOSE, ir.ODELETE, ir.OPANIC, ir.OPRINT, ir.OPRINTN, ir.OVARKILL, ir.OVARLIVE:
+ // Must not be used.
+ isExpr = false
+ isStmt = true
+ case ir.OCOPY, ir.ORECOVER, ir.ORECV:
+ // Can be used or not.
+ isStmt = true
+ }
+
+ t := n.Type()
+ if t != nil && !t.IsFuncArgStruct() && n.Op() != ir.OTYPE {
+ switch t.Kind() {
+ case types.TFUNC, // might have TANY; wait until it's called
+ types.TANY, types.TFORW, types.TIDEAL, types.TNIL, types.TBLANK:
+ break
+
+ default:
+ types.CheckSize(t)
+ }
+ }
+ if t != nil {
+ n = EvalConst(n)
+ t = n.Type()
+ }
+
+ // TODO(rsc): Lots of the complexity here is because typecheck can
+ // see OTYPE, ONAME, and OLITERAL nodes multiple times.
+ // Once we make the IR a proper tree, we should be able to simplify
+ // this code a bit, especially the final case.
+ switch {
+ case top&(ctxStmt|ctxExpr) == ctxExpr && !isExpr && n.Op() != ir.OTYPE && !isMulti:
+ if !n.Diag() {
+ base.Errorf("%v used as value", n)
+ n.SetDiag(true)
+ }
+ if t != nil {
+ n.SetType(nil)
+ }
+
+ case top&ctxType == 0 && n.Op() == ir.OTYPE && t != nil:
+ if !n.Type().Broke() {
+ base.Errorf("type %v is not an expression", n.Type())
+ n.SetDiag(true)
+ }
+
+ case top&(ctxStmt|ctxExpr) == ctxStmt && !isStmt && t != nil:
+ if !n.Diag() {
+ base.Errorf("%v evaluated but not used", n)
+ n.SetDiag(true)
+ }
+ n.SetType(nil)
+
+ case top&(ctxType|ctxExpr) == ctxType && n.Op() != ir.OTYPE && n.Op() != ir.ONONAME && (t != nil || n.Op() == ir.ONAME):
+ base.Errorf("%v is not a type", n)
+ if t != nil {
+ if n.Op() == ir.ONAME {
+ t.SetBroke(true)
+ } else {
+ n.SetType(nil)
+ }
+ }
+
+ }
+
+ base.Pos = lno
+ return n
+}
+
+// indexlit implements typechecking of untyped values as
+// array/slice indexes. It is almost equivalent to DefaultLit
+// but also accepts untyped numeric values representable as
+// value of type int (see also checkmake for comparison).
+// The result of indexlit MUST be assigned back to n, e.g.
+// n.Left = indexlit(n.Left)
+func indexlit(n ir.Node) ir.Node {
+ if n != nil && n.Type() != nil && n.Type().Kind() == types.TIDEAL {
+ return DefaultLit(n, types.Types[types.TINT])
+ }
+ return n
+}
+
+// typecheck1 should ONLY be called from typecheck.
+func typecheck1(n ir.Node, top int) ir.Node {
+ if n, ok := n.(*ir.Name); ok {
+ typecheckdef(n)
+ }
+
+ switch n.Op() {
+ default:
+ ir.Dump("typecheck", n)
+ base.Fatalf("typecheck %v", n.Op())
+ panic("unreachable")
+
+ case ir.OLITERAL:
+ if n.Sym() == nil && n.Type() == nil {
+ if !n.Diag() {
+ base.Fatalf("literal missing type: %v", n)
+ }
+ }
+ return n
+
+ case ir.ONIL:
+ return n
+
+ // names
+ case ir.ONONAME:
+ if !n.Diag() {
+ // Note: adderrorname looks for this string and
+ // adds context about the outer expression
+ base.ErrorfAt(n.Pos(), "undefined: %v", n.Sym())
+ n.SetDiag(true)
+ }
+ n.SetType(nil)
+ return n
+
+ case ir.ONAME:
+ n := n.(*ir.Name)
+ if n.BuiltinOp != 0 {
+ if top&ctxCallee == 0 {
+ base.Errorf("use of builtin %v not in function call", n.Sym())
+ n.SetType(nil)
+ return n
+ }
+ return n
+ }
+ if top&ctxAssign == 0 {
+ // not a write to the variable
+ if ir.IsBlank(n) {
+ base.Errorf("cannot use _ as value")
+ n.SetType(nil)
+ return n
+ }
+ n.SetUsed(true)
+ }
+ return n
+
+ case ir.OLINKSYMOFFSET:
+ // type already set
+ return n
+
+ case ir.OPACK:
+ n := n.(*ir.PkgName)
+ base.Errorf("use of package %v without selector", n.Sym())
+ n.SetDiag(true)
+ return n
+
+ // types (ODEREF is with exprs)
+ case ir.OTYPE:
+ return n
+
+ case ir.OTSLICE:
+ n := n.(*ir.SliceType)
+ return tcSliceType(n)
+
+ case ir.OTARRAY:
+ n := n.(*ir.ArrayType)
+ return tcArrayType(n)
+
+ case ir.OTMAP:
+ n := n.(*ir.MapType)
+ return tcMapType(n)
+
+ case ir.OTCHAN:
+ n := n.(*ir.ChanType)
+ return tcChanType(n)
+
+ case ir.OTSTRUCT:
+ n := n.(*ir.StructType)
+ return tcStructType(n)
+
+ case ir.OTINTER:
+ n := n.(*ir.InterfaceType)
+ return tcInterfaceType(n)
+
+ case ir.OTFUNC:
+ n := n.(*ir.FuncType)
+ return tcFuncType(n)
+ // type or expr
+ case ir.ODEREF:
+ n := n.(*ir.StarExpr)
+ return tcStar(n, top)
+
+ // x op= y
+ case ir.OASOP:
+ n := n.(*ir.AssignOpStmt)
+ n.X, n.Y = Expr(n.X), Expr(n.Y)
+ checkassign(n, n.X)
+ if n.IncDec && !okforarith[n.X.Type().Kind()] {
+ base.Errorf("invalid operation: %v (non-numeric type %v)", n, n.X.Type())
+ return n
+ }
+ switch n.AsOp {
+ case ir.OLSH, ir.ORSH:
+ n.X, n.Y, _ = tcShift(n, n.X, n.Y)
+ case ir.OADD, ir.OAND, ir.OANDNOT, ir.ODIV, ir.OMOD, ir.OMUL, ir.OOR, ir.OSUB, ir.OXOR:
+ n.X, n.Y, _ = tcArith(n, n.AsOp, n.X, n.Y)
+ default:
+ base.Fatalf("invalid assign op: %v", n.AsOp)
+ }
+ return n
+
+ // logical operators
+ case ir.OANDAND, ir.OOROR:
+ n := n.(*ir.LogicalExpr)
+ n.X, n.Y = Expr(n.X), Expr(n.Y)
+ if n.X.Type() == nil || n.Y.Type() == nil {
+ n.SetType(nil)
+ return n
+ }
+ // For "x == x && len(s)", it's better to report that "len(s)" (type int)
+ // can't be used with "&&" than to report that "x == x" (type untyped bool)
+ // can't be converted to int (see issue #41500).
+ if !n.X.Type().IsBoolean() {
+ base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, n.Op(), typekind(n.X.Type()))
+ n.SetType(nil)
+ return n
+ }
+ if !n.Y.Type().IsBoolean() {
+ base.Errorf("invalid operation: %v (operator %v not defined on %s)", n, n.Op(), typekind(n.Y.Type()))
+ n.SetType(nil)
+ return n
+ }
+ l, r, t := tcArith(n, n.Op(), n.X, n.Y)
+ n.X, n.Y = l, r
+ n.SetType(t)
+ return n
+
+ // shift operators
+ case ir.OLSH, ir.ORSH:
+ n := n.(*ir.BinaryExpr)
+ n.X, n.Y = Expr(n.X), Expr(n.Y)
+ l, r, t := tcShift(n, n.X, n.Y)
+ n.X, n.Y = l, r
+ n.SetType(t)
+ return n
+
+ // comparison operators
+ case ir.OEQ, ir.OGE, ir.OGT, ir.OLE, ir.OLT, ir.ONE:
+ n := n.(*ir.BinaryExpr)
+ n.X, n.Y = Expr(n.X), Expr(n.Y)
+ l, r, t := tcArith(n, n.Op(), n.X, n.Y)
+ if t != nil {
+ n.X, n.Y = l, r
+ n.SetType(types.UntypedBool)
+ if con := EvalConst(n); con.Op() == ir.OLITERAL {
+ return con
+ }
+ n.X, n.Y = defaultlit2(l, r, true)
+ }
+ return n
+
+ // binary operators
+ case ir.OADD, ir.OAND, ir.OANDNOT, ir.ODIV, ir.OMOD, ir.OMUL, ir.OOR, ir.OSUB, ir.OXOR:
+ n := n.(*ir.BinaryExpr)
+ n.X, n.Y = Expr(n.X), Expr(n.Y)
+ l, r, t := tcArith(n, n.Op(), n.X, n.Y)
+ if t != nil && t.Kind() == types.TSTRING && n.Op() == ir.OADD {
+ // create or update OADDSTR node with list of strings in x + y + z + (w + v) + ...
+ var add *ir.AddStringExpr
+ if l.Op() == ir.OADDSTR {
+ add = l.(*ir.AddStringExpr)
+ add.SetPos(n.Pos())
+ } else {
+ add = ir.NewAddStringExpr(n.Pos(), []ir.Node{l})
+ }
+ if r.Op() == ir.OADDSTR {
+ r := r.(*ir.AddStringExpr)
+ add.List.Append(r.List.Take()...)
+ } else {
+ add.List.Append(r)
+ }
+ add.SetType(t)
+ return add
+ }
+ n.X, n.Y = l, r
+ n.SetType(t)
+ return n
+
+ case ir.OBITNOT, ir.ONEG, ir.ONOT, ir.OPLUS:
+ n := n.(*ir.UnaryExpr)
+ return tcUnaryArith(n)
+
+ // exprs
+ case ir.OADDR:
+ n := n.(*ir.AddrExpr)
+ return tcAddr(n)
+
+ case ir.OCOMPLIT:
+ return tcCompLit(n.(*ir.CompLitExpr))
+
+ case ir.OXDOT, ir.ODOT:
+ n := n.(*ir.SelectorExpr)
+ return tcDot(n, top)
+
+ case ir.ODOTTYPE:
+ n := n.(*ir.TypeAssertExpr)
+ return tcDotType(n)
+
+ case ir.OINDEX:
+ n := n.(*ir.IndexExpr)
+ return tcIndex(n)
+
+ case ir.ORECV:
+ n := n.(*ir.UnaryExpr)
+ return tcRecv(n)
+
+ case ir.OSEND:
+ n := n.(*ir.SendStmt)
+ return tcSend(n)
+
+ case ir.OSLICEHEADER:
+ n := n.(*ir.SliceHeaderExpr)
+ return tcSliceHeader(n)
+
+ case ir.OMAKESLICECOPY:
+ n := n.(*ir.MakeExpr)
+ return tcMakeSliceCopy(n)
+
+ case ir.OSLICE, ir.OSLICE3:
+ n := n.(*ir.SliceExpr)
+ return tcSlice(n)
+
+ // call and call like
+ case ir.OCALL:
+ n := n.(*ir.CallExpr)
+ return tcCall(n, top)
+
+ case ir.OALIGNOF, ir.OOFFSETOF, ir.OSIZEOF:
+ n := n.(*ir.UnaryExpr)
+ n.SetType(types.Types[types.TUINTPTR])
+ return n
+
+ case ir.OCAP, ir.OLEN:
+ n := n.(*ir.UnaryExpr)
+ return tcLenCap(n)
+
+ case ir.OREAL, ir.OIMAG:
+ n := n.(*ir.UnaryExpr)
+ return tcRealImag(n)
+
+ case ir.OCOMPLEX:
+ n := n.(*ir.BinaryExpr)
+ return tcComplex(n)
+
+ case ir.OCLOSE:
+ n := n.(*ir.UnaryExpr)
+ return tcClose(n)
+
+ case ir.ODELETE:
+ n := n.(*ir.CallExpr)
+ return tcDelete(n)
+
+ case ir.OAPPEND:
+ n := n.(*ir.CallExpr)
+ return tcAppend(n)
+
+ case ir.OCOPY:
+ n := n.(*ir.BinaryExpr)
+ return tcCopy(n)
+
+ case ir.OCONV:
+ n := n.(*ir.ConvExpr)
+ return tcConv(n)
+
+ case ir.OMAKE:
+ n := n.(*ir.CallExpr)
+ return tcMake(n)
+
+ case ir.ONEW:
+ n := n.(*ir.UnaryExpr)
+ return tcNew(n)
+
+ case ir.OPRINT, ir.OPRINTN:
+ n := n.(*ir.CallExpr)
+ return tcPrint(n)
+
+ case ir.OPANIC:
+ n := n.(*ir.UnaryExpr)
+ return tcPanic(n)
+
+ case ir.ORECOVER:
+ n := n.(*ir.CallExpr)
+ return tcRecover(n)
+
+ case ir.ORECOVERFP:
+ n := n.(*ir.CallExpr)
+ return tcRecoverFP(n)
+
+ case ir.OUNSAFEADD:
+ n := n.(*ir.BinaryExpr)
+ return tcUnsafeAdd(n)
+
+ case ir.OUNSAFESLICE:
+ n := n.(*ir.BinaryExpr)
+ return tcUnsafeSlice(n)
+
+ case ir.OCLOSURE:
+ n := n.(*ir.ClosureExpr)
+ return tcClosure(n, top)
+
+ case ir.OITAB:
+ n := n.(*ir.UnaryExpr)
+ return tcITab(n)
+
+ case ir.OIDATA:
+ // Whoever creates the OIDATA node must know a priori the concrete type at that moment,
+ // usually by just having checked the OITAB.
+ n := n.(*ir.UnaryExpr)
+ base.Fatalf("cannot typecheck interface data %v", n)
+ panic("unreachable")
+
+ case ir.OSPTR:
+ n := n.(*ir.UnaryExpr)
+ return tcSPtr(n)
+
+ case ir.OCFUNC:
+ n := n.(*ir.UnaryExpr)
+ n.X = Expr(n.X)
+ n.SetType(types.Types[types.TUINTPTR])
+ return n
+
+ case ir.OGETCALLERPC, ir.OGETCALLERSP:
+ n := n.(*ir.CallExpr)
+ if len(n.Args) != 0 {
+ base.FatalfAt(n.Pos(), "unexpected arguments: %v", n)
+ }
+ n.SetType(types.Types[types.TUINTPTR])
+ return n
+
+ case ir.OCONVNOP:
+ n := n.(*ir.ConvExpr)
+ n.X = Expr(n.X)
+ return n
+
+ // statements
+ case ir.OAS:
+ n := n.(*ir.AssignStmt)
+ tcAssign(n)
+
+ // Code that creates temps does not bother to set defn, so do it here.
+ if n.X.Op() == ir.ONAME && ir.IsAutoTmp(n.X) {
+ n.X.Name().Defn = n
+ }
+ return n
+
+ case ir.OAS2:
+ tcAssignList(n.(*ir.AssignListStmt))
+ return n
+
+ case ir.OBREAK,
+ ir.OCONTINUE,
+ ir.ODCL,
+ ir.OGOTO,
+ ir.OFALL,
+ ir.OVARKILL,
+ ir.OVARLIVE:
+ return n
+
+ case ir.OBLOCK:
+ n := n.(*ir.BlockStmt)
+ Stmts(n.List)
+ return n
+
+ case ir.OLABEL:
+ if n.Sym().IsBlank() {
+ // Empty identifier is valid but useless.
+ // Eliminate now to simplify life later.
+ // See issues 7538, 11589, 11593.
+ n = ir.NewBlockStmt(n.Pos(), nil)
+ }
+ return n
+
+ case ir.ODEFER, ir.OGO:
+ n := n.(*ir.GoDeferStmt)
+ n.Call = typecheck(n.Call, ctxStmt|ctxExpr)
+ if !n.Call.Diag() {
+ tcGoDefer(n)
+ }
+ return n
+
+ case ir.OFOR, ir.OFORUNTIL:
+ n := n.(*ir.ForStmt)
+ return tcFor(n)
+
+ case ir.OIF:
+ n := n.(*ir.IfStmt)
+ return tcIf(n)
+
+ case ir.ORETURN:
+ n := n.(*ir.ReturnStmt)
+ return tcReturn(n)
+
+ case ir.OTAILCALL:
+ n := n.(*ir.TailCallStmt)
+ n.Call = typecheck(n.Call, ctxStmt|ctxExpr).(*ir.CallExpr)
+ return n
+
+ case ir.OCHECKNIL:
+ n := n.(*ir.UnaryExpr)
+ return tcCheckNil(n)
+
+ case ir.OSELECT:
+ tcSelect(n.(*ir.SelectStmt))
+ return n
+
+ case ir.OSWITCH:
+ tcSwitch(n.(*ir.SwitchStmt))
+ return n
+
+ case ir.ORANGE:
+ tcRange(n.(*ir.RangeStmt))
+ return n
+
+ case ir.OTYPESW:
+ n := n.(*ir.TypeSwitchGuard)
+ base.Errorf("use of .(type) outside type switch")
+ n.SetDiag(true)
+ return n
+
+ case ir.ODCLFUNC:
+ tcFunc(n.(*ir.Func))
+ return n
+
+ case ir.ODCLCONST:
+ n := n.(*ir.Decl)
+ n.X = Expr(n.X).(*ir.Name)
+ return n
+
+ case ir.ODCLTYPE:
+ n := n.(*ir.Decl)
+ n.X = typecheck(n.X, ctxType).(*ir.Name)
+ types.CheckSize(n.X.Type())
+ return n
+ }
+
+ // No return n here!
+ // Individual cases can type-assert n, introducing a new one.
+ // Each must execute its own return n.
+}
+
+func typecheckargs(n ir.InitNode) {
+ var list []ir.Node
+ switch n := n.(type) {
+ default:
+ base.Fatalf("typecheckargs %+v", n.Op())
+ case *ir.CallExpr:
+ list = n.Args
+ if n.IsDDD {
+ Exprs(list)
+ return
+ }
+ case *ir.ReturnStmt:
+ list = n.Results
+ }
+ if len(list) != 1 {
+ Exprs(list)
+ return
+ }
+
+ typecheckslice(list, ctxExpr|ctxMultiOK)
+ t := list[0].Type()
+ if t == nil || !t.IsFuncArgStruct() {
+ return
+ }
+
+ // Save n as n.Orig for fmt.go.
+ if ir.Orig(n) == n {
+ n.(ir.OrigNode).SetOrig(ir.SepCopy(n))
+ }
+
+ // Rewrite f(g()) into t1, t2, ... = g(); f(t1, t2, ...).
+ RewriteMultiValueCall(n, list[0])
+}
+
+// RewriteMultiValueCall rewrites multi-valued f() to use temporaries,
+// so the backend wouldn't need to worry about tuple-valued expressions.
+func RewriteMultiValueCall(n ir.InitNode, call ir.Node) {
+ // If we're outside of function context, then this call will
+ // be executed during the generated init function. However,
+ // init.go hasn't yet created it. Instead, associate the
+ // temporary variables with InitTodoFunc for now, and init.go
+ // will reassociate them later when it's appropriate.
+ static := ir.CurFunc == nil
+ if static {
+ ir.CurFunc = InitTodoFunc
+ }
+
+ as := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, []ir.Node{call})
+ results := call.Type().FieldSlice()
+ list := make([]ir.Node, len(results))
+ for i, result := range results {
+ tmp := Temp(result.Type)
+ as.PtrInit().Append(ir.NewDecl(base.Pos, ir.ODCL, tmp))
+ as.Lhs.Append(tmp)
+ list[i] = tmp
+ }
+ if static {
+ ir.CurFunc = nil
+ }
+
+ n.PtrInit().Append(Stmt(as))
+
+ switch n := n.(type) {
+ default:
+ base.Fatalf("rewriteMultiValueCall %+v", n.Op())
+ case *ir.CallExpr:
+ n.Args = list
+ case *ir.ReturnStmt:
+ n.Results = list
+ case *ir.AssignListStmt:
+ if n.Op() != ir.OAS2FUNC {
+ base.Fatalf("rewriteMultiValueCall: invalid op %v", n.Op())
+ }
+ as.SetOp(ir.OAS2FUNC)
+ n.SetOp(ir.OAS2)
+ n.Rhs = make([]ir.Node, len(list))
+ for i, tmp := range list {
+ n.Rhs[i] = AssignConv(tmp, n.Lhs[i].Type(), "assignment")
+ }
+ }
+}
+
+func checksliceindex(l ir.Node, r ir.Node, tp *types.Type) bool {
+ t := r.Type()
+ if t == nil {
+ return false
+ }
+ if !t.IsInteger() {
+ base.Errorf("invalid slice index %v (type %v)", r, t)
+ return false
+ }
+
+ if r.Op() == ir.OLITERAL {
+ x := r.Val()
+ if constant.Sign(x) < 0 {
+ base.Errorf("invalid slice index %v (index must be non-negative)", r)
+ return false
+ } else if tp != nil && tp.NumElem() >= 0 && constant.Compare(x, token.GTR, constant.MakeInt64(tp.NumElem())) {
+ base.Errorf("invalid slice index %v (out of bounds for %d-element array)", r, tp.NumElem())
+ return false
+ } else if ir.IsConst(l, constant.String) && constant.Compare(x, token.GTR, constant.MakeInt64(int64(len(ir.StringVal(l))))) {
+ base.Errorf("invalid slice index %v (out of bounds for %d-byte string)", r, len(ir.StringVal(l)))
+ return false
+ } else if ir.ConstOverflow(x, types.Types[types.TINT]) {
+ base.Errorf("invalid slice index %v (index too large)", r)
+ return false
+ }
+ }
+
+ return true
+}
+
+func checksliceconst(lo ir.Node, hi ir.Node) bool {
+ if lo != nil && hi != nil && lo.Op() == ir.OLITERAL && hi.Op() == ir.OLITERAL && constant.Compare(lo.Val(), token.GTR, hi.Val()) {
+ base.Errorf("invalid slice index: %v > %v", lo, hi)
+ return false
+ }
+
+ return true
+}
+
+// The result of implicitstar MUST be assigned back to n, e.g.
+// n.Left = implicitstar(n.Left)
+func implicitstar(n ir.Node) ir.Node {
+ // insert implicit * if needed for fixed array
+ t := n.Type()
+ if t == nil || !t.IsPtr() {
+ return n
+ }
+ t = t.Elem()
+ if t == nil {
+ return n
+ }
+ if !t.IsArray() {
+ return n
+ }
+ star := ir.NewStarExpr(base.Pos, n)
+ star.SetImplicit(true)
+ return Expr(star)
+}
+
+func needOneArg(n *ir.CallExpr, f string, args ...interface{}) (ir.Node, bool) {
+ if len(n.Args) == 0 {
+ p := fmt.Sprintf(f, args...)
+ base.Errorf("missing argument to %s: %v", p, n)
+ return nil, false
+ }
+
+ if len(n.Args) > 1 {
+ p := fmt.Sprintf(f, args...)
+ base.Errorf("too many arguments to %s: %v", p, n)
+ return n.Args[0], false
+ }
+
+ return n.Args[0], true
+}
+
+func needTwoArgs(n *ir.CallExpr) (ir.Node, ir.Node, bool) {
+ if len(n.Args) != 2 {
+ if len(n.Args) < 2 {
+ base.Errorf("not enough arguments in call to %v", n)
+ } else {
+ base.Errorf("too many arguments in call to %v", n)
+ }
+ return nil, nil, false
+ }
+ return n.Args[0], n.Args[1], true
+}
+
+// Lookdot1 looks up the specified method s in the list fs of methods, returning
+// the matching field or nil. If dostrcmp is 0, it matches the symbols. If
+// dostrcmp is 1, it matches by name exactly. If dostrcmp is 2, it matches names
+// with case folding.
+func Lookdot1(errnode ir.Node, s *types.Sym, t *types.Type, fs *types.Fields, dostrcmp int) *types.Field {
+ var r *types.Field
+ for _, f := range fs.Slice() {
+ if dostrcmp != 0 && f.Sym.Name == s.Name {
+ return f
+ }
+ if dostrcmp == 2 && strings.EqualFold(f.Sym.Name, s.Name) {
+ return f
+ }
+ if f.Sym != s {
+ continue
+ }
+ if r != nil {
+ if errnode != nil {
+ base.Errorf("ambiguous selector %v", errnode)
+ } else if t.IsPtr() {
+ base.Errorf("ambiguous selector (%v).%v", t, s)
+ } else {
+ base.Errorf("ambiguous selector %v.%v", t, s)
+ }
+ break
+ }
+
+ r = f
+ }
+
+ return r
+}
+
+// typecheckMethodExpr checks selector expressions (ODOT) where the
+// base expression is a type expression (OTYPE).
+func typecheckMethodExpr(n *ir.SelectorExpr) (res ir.Node) {
+ if base.EnableTrace && base.Flag.LowerT {
+ defer tracePrint("typecheckMethodExpr", n)(&res)
+ }
+
+ t := n.X.Type()
+
+ // Compute the method set for t.
+ var ms *types.Fields
+ if t.IsInterface() {
+ ms = t.AllMethods()
+ } else {
+ mt := types.ReceiverBaseType(t)
+ if mt == nil {
+ base.Errorf("%v undefined (type %v has no method %v)", n, t, n.Sel)
+ n.SetType(nil)
+ return n
+ }
+ CalcMethods(mt)
+ ms = mt.AllMethods()
+
+ // The method expression T.m requires a wrapper when T
+ // is different from m's declared receiver type. We
+ // normally generate these wrappers while writing out
+ // runtime type descriptors, which is always done for
+ // types declared at package scope. However, we need
+ // to make sure to generate wrappers for anonymous
+ // receiver types too.
+ if mt.Sym() == nil {
+ NeedRuntimeType(t)
+ }
+ }
+
+ s := n.Sel
+ m := Lookdot1(n, s, t, ms, 0)
+ if m == nil {
+ if Lookdot1(n, s, t, ms, 1) != nil {
+ base.Errorf("%v undefined (cannot refer to unexported method %v)", n, s)
+ } else if _, ambig := dotpath(s, t, nil, false); ambig {
+ base.Errorf("%v undefined (ambiguous selector)", n) // method or field
+ } else {
+ base.Errorf("%v undefined (type %v has no method %v)", n, t, s)
+ }
+ n.SetType(nil)
+ return n
+ }
+
+ if !types.IsMethodApplicable(t, m) {
+ base.Errorf("invalid method expression %v (needs pointer receiver: (*%v).%S)", n, t, s)
+ n.SetType(nil)
+ return n
+ }
+
+ n.SetOp(ir.OMETHEXPR)
+ n.Selection = m
+ n.SetType(NewMethodType(m.Type, n.X.Type()))
+ return n
+}
+
+func derefall(t *types.Type) *types.Type {
+ for t != nil && t.IsPtr() {
+ t = t.Elem()
+ }
+ return t
+}
+
+// Lookdot looks up field or method n.Sel in the type t and returns the matching
+// field. It transforms the op of node n to ODOTINTER or ODOTMETH, if appropriate.
+// It also may add a StarExpr node to n.X as needed for access to non-pointer
+// methods. If dostrcmp is 0, it matches the field/method with the exact symbol
+// as n.Sel (appropriate for exported fields). If dostrcmp is 1, it matches by name
+// exactly. If dostrcmp is 2, it matches names with case folding.
+func Lookdot(n *ir.SelectorExpr, t *types.Type, dostrcmp int) *types.Field {
+ s := n.Sel
+
+ types.CalcSize(t)
+ var f1 *types.Field
+ if t.IsStruct() {
+ f1 = Lookdot1(n, s, t, t.Fields(), dostrcmp)
+ } else if t.IsInterface() {
+ f1 = Lookdot1(n, s, t, t.AllMethods(), dostrcmp)
+ }
+
+ var f2 *types.Field
+ if n.X.Type() == t || n.X.Type().Sym() == nil {
+ mt := types.ReceiverBaseType(t)
+ if mt != nil {
+ f2 = Lookdot1(n, s, mt, mt.Methods(), dostrcmp)
+ }
+ }
+
+ if f1 != nil {
+ if dostrcmp > 1 || f1.Broke() {
+ // Already in the process of diagnosing an error.
+ return f1
+ }
+ if f2 != nil {
+ base.Errorf("%v is both field and method", n.Sel)
+ }
+ if f1.Offset == types.BADWIDTH {
+ base.Fatalf("Lookdot badwidth t=%v, f1=%v@%p", t, f1, f1)
+ }
+ n.Selection = f1
+ n.SetType(f1.Type)
+ if t.IsInterface() {
+ if n.X.Type().IsPtr() {
+ star := ir.NewStarExpr(base.Pos, n.X)
+ star.SetImplicit(true)
+ n.X = Expr(star)
+ }
+
+ n.SetOp(ir.ODOTINTER)
+ }
+ return f1
+ }
+
+ if f2 != nil {
+ if dostrcmp > 1 {
+ // Already in the process of diagnosing an error.
+ return f2
+ }
+ orig := n.X
+ tt := n.X.Type()
+ types.CalcSize(tt)
+ rcvr := f2.Type.Recv().Type
+ if !types.Identical(rcvr, tt) {
+ if rcvr.IsPtr() && types.Identical(rcvr.Elem(), tt) {
+ checklvalue(n.X, "call pointer method on")
+ addr := NodAddr(n.X)
+ addr.SetImplicit(true)
+ n.X = typecheck(addr, ctxType|ctxExpr)
+ } else if tt.IsPtr() && (!rcvr.IsPtr() || rcvr.IsPtr() && rcvr.Elem().NotInHeap()) && types.Identical(tt.Elem(), rcvr) {
+ star := ir.NewStarExpr(base.Pos, n.X)
+ star.SetImplicit(true)
+ n.X = typecheck(star, ctxType|ctxExpr)
+ } else if tt.IsPtr() && tt.Elem().IsPtr() && types.Identical(derefall(tt), derefall(rcvr)) {
+ base.Errorf("calling method %v with receiver %L requires explicit dereference", n.Sel, n.X)
+ for tt.IsPtr() {
+ // Stop one level early for method with pointer receiver.
+ if rcvr.IsPtr() && !tt.Elem().IsPtr() {
+ break
+ }
+ star := ir.NewStarExpr(base.Pos, n.X)
+ star.SetImplicit(true)
+ n.X = typecheck(star, ctxType|ctxExpr)
+ tt = tt.Elem()
+ }
+ } else {
+ base.Fatalf("method mismatch: %v for %v", rcvr, tt)
+ }
+ }
+
+ // Check that we haven't implicitly dereferenced any defined pointer types.
+ for x := n.X; ; {
+ var inner ir.Node
+ implicit := false
+ switch x := x.(type) {
+ case *ir.AddrExpr:
+ inner, implicit = x.X, x.Implicit()
+ case *ir.SelectorExpr:
+ inner, implicit = x.X, x.Implicit()
+ case *ir.StarExpr:
+ inner, implicit = x.X, x.Implicit()
+ }
+ if !implicit {
+ break
+ }
+ if inner.Type().Sym() != nil && (x.Op() == ir.ODEREF || x.Op() == ir.ODOTPTR) {
+ // Found an implicit dereference of a defined pointer type.
+ // Restore n.X for better error message.
+ n.X = orig
+ return nil
+ }
+ x = inner
+ }
+
+ n.Selection = f2
+ n.SetType(f2.Type)
+ n.SetOp(ir.ODOTMETH)
+
+ return f2
+ }
+
+ return nil
+}
+
+func nokeys(l ir.Nodes) bool {
+ for _, n := range l {
+ if n.Op() == ir.OKEY || n.Op() == ir.OSTRUCTKEY {
+ return false
+ }
+ }
+ return true
+}
+
+func hasddd(t *types.Type) bool {
+ for _, tl := range t.Fields().Slice() {
+ if tl.IsDDD() {
+ return true
+ }
+ }
+
+ return false
+}
+
+// typecheck assignment: type list = expression list
+func typecheckaste(op ir.Op, call ir.Node, isddd bool, tstruct *types.Type, nl ir.Nodes, desc func() string) {
+ var t *types.Type
+ var i int
+
+ lno := base.Pos
+ defer func() { base.Pos = lno }()
+
+ if tstruct.Broke() {
+ return
+ }
+
+ var n ir.Node
+ if len(nl) == 1 {
+ n = nl[0]
+ }
+
+ n1 := tstruct.NumFields()
+ n2 := len(nl)
+ if !hasddd(tstruct) {
+ if isddd {
+ goto invalidddd
+ }
+ if n2 > n1 {
+ goto toomany
+ }
+ if n2 < n1 {
+ goto notenough
+ }
+ } else {
+ if !isddd {
+ if n2 < n1-1 {
+ goto notenough
+ }
+ } else {
+ if n2 > n1 {
+ goto toomany
+ }
+ if n2 < n1 {
+ goto notenough
+ }
+ }
+ }
+
+ i = 0
+ for _, tl := range tstruct.Fields().Slice() {
+ t = tl.Type
+ if tl.IsDDD() {
+ if isddd {
+ if i >= len(nl) {
+ goto notenough
+ }
+ if len(nl)-i > 1 {
+ goto toomany
+ }
+ n = nl[i]
+ ir.SetPos(n)
+ if n.Type() != nil {
+ nl[i] = assignconvfn(n, t, desc)
+ }
+ return
+ }
+
+ // TODO(mdempsky): Make into ... call with implicit slice.
+ for ; i < len(nl); i++ {
+ n = nl[i]
+ ir.SetPos(n)
+ if n.Type() != nil {
+ nl[i] = assignconvfn(n, t.Elem(), desc)
+ }
+ }
+ return
+ }
+
+ if i >= len(nl) {
+ goto notenough
+ }
+ n = nl[i]
+ ir.SetPos(n)
+ if n.Type() != nil {
+ nl[i] = assignconvfn(n, t, desc)
+ }
+ i++
+ }
+
+ if i < len(nl) {
+ goto toomany
+ }
+
+invalidddd:
+ if isddd {
+ if call != nil {
+ base.Errorf("invalid use of ... in call to %v", call)
+ } else {
+ base.Errorf("invalid use of ... in %v", op)
+ }
+ }
+ return
+
+notenough:
+ if n == nil || (!n.Diag() && n.Type() != nil) {
+ details := errorDetails(nl, tstruct, isddd)
+ if call != nil {
+ // call is the expression being called, not the overall call.
+ // Method expressions have the form T.M, and the compiler has
+ // rewritten those to ONAME nodes but left T in Left.
+ if call.Op() == ir.OMETHEXPR {
+ call := call.(*ir.SelectorExpr)
+ base.Errorf("not enough arguments in call to method expression %v%s", call, details)
+ } else {
+ base.Errorf("not enough arguments in call to %v%s", call, details)
+ }
+ } else {
+ base.Errorf("not enough arguments to %v%s", op, details)
+ }
+ if n != nil {
+ n.SetDiag(true)
+ }
+ }
+ return
+
+toomany:
+ details := errorDetails(nl, tstruct, isddd)
+ if call != nil {
+ base.Errorf("too many arguments in call to %v%s", call, details)
+ } else {
+ base.Errorf("too many arguments to %v%s", op, details)
+ }
+}
+
+func errorDetails(nl ir.Nodes, tstruct *types.Type, isddd bool) string {
+ // Suppress any return message signatures if:
+ //
+ // (1) We don't know any type at a call site (see #19012).
+ // (2) Any node has an unknown type.
+ // (3) Invalid type for variadic parameter (see #46957).
+ if tstruct == nil {
+ return "" // case 1
+ }
+
+ if isddd && !nl[len(nl)-1].Type().IsSlice() {
+ return "" // case 3
+ }
+
+ for _, n := range nl {
+ if n.Type() == nil {
+ return "" // case 2
+ }
+ }
+ return fmt.Sprintf("\n\thave %s\n\twant %v", fmtSignature(nl, isddd), tstruct)
+}
+
+// sigrepr is a type's representation to the outside world,
+// in string representations of return signatures
+// e.g in error messages about wrong arguments to return.
+func sigrepr(t *types.Type, isddd bool) string {
+ switch t {
+ case types.UntypedString:
+ return "string"
+ case types.UntypedBool:
+ return "bool"
+ }
+
+ if t.Kind() == types.TIDEAL {
+ // "untyped number" is not commonly used
+ // outside of the compiler, so let's use "number".
+ // TODO(mdempsky): Revisit this.
+ return "number"
+ }
+
+ // Turn []T... argument to ...T for clearer error message.
+ if isddd {
+ if !t.IsSlice() {
+ base.Fatalf("bad type for ... argument: %v", t)
+ }
+ return "..." + t.Elem().String()
+ }
+ return t.String()
+}
+
+// sigerr returns the signature of the types at the call or return.
+func fmtSignature(nl ir.Nodes, isddd bool) string {
+ if len(nl) < 1 {
+ return "()"
+ }
+
+ var typeStrings []string
+ for i, n := range nl {
+ isdddArg := isddd && i == len(nl)-1
+ typeStrings = append(typeStrings, sigrepr(n.Type(), isdddArg))
+ }
+
+ return fmt.Sprintf("(%s)", strings.Join(typeStrings, ", "))
+}
+
+// type check composite
+func fielddup(name string, hash map[string]bool) {
+ if hash[name] {
+ base.Errorf("duplicate field name in struct literal: %s", name)
+ return
+ }
+ hash[name] = true
+}
+
+// iscomptype reports whether type t is a composite literal type.
+func iscomptype(t *types.Type) bool {
+ switch t.Kind() {
+ case types.TARRAY, types.TSLICE, types.TSTRUCT, types.TMAP:
+ return true
+ default:
+ return false
+ }
+}
+
+// pushtype adds elided type information for composite literals if
+// appropriate, and returns the resulting expression.
+func pushtype(nn ir.Node, t *types.Type) ir.Node {
+ if nn == nil || nn.Op() != ir.OCOMPLIT {
+ return nn
+ }
+ n := nn.(*ir.CompLitExpr)
+ if n.Ntype != nil {
+ return n
+ }
+
+ switch {
+ case iscomptype(t):
+ // For T, return T{...}.
+ n.Ntype = ir.TypeNode(t)
+
+ case t.IsPtr() && iscomptype(t.Elem()):
+ // For *T, return &T{...}.
+ n.Ntype = ir.TypeNode(t.Elem())
+
+ addr := NodAddrAt(n.Pos(), n)
+ addr.SetImplicit(true)
+ return addr
+ }
+ return n
+}
+
+// typecheckarraylit type-checks a sequence of slice/array literal elements.
+func typecheckarraylit(elemType *types.Type, bound int64, elts []ir.Node, ctx string) int64 {
+ // If there are key/value pairs, create a map to keep seen
+ // keys so we can check for duplicate indices.
+ var indices map[int64]bool
+ for _, elt := range elts {
+ if elt.Op() == ir.OKEY {
+ indices = make(map[int64]bool)
+ break
+ }
+ }
+
+ var key, length int64
+ for i, elt := range elts {
+ ir.SetPos(elt)
+ r := elts[i]
+ var kv *ir.KeyExpr
+ if elt.Op() == ir.OKEY {
+ elt := elt.(*ir.KeyExpr)
+ elt.Key = Expr(elt.Key)
+ key = IndexConst(elt.Key)
+ if key < 0 {
+ if !elt.Key.Diag() {
+ if key == -2 {
+ base.Errorf("index too large")
+ } else {
+ base.Errorf("index must be non-negative integer constant")
+ }
+ elt.Key.SetDiag(true)
+ }
+ key = -(1 << 30) // stay negative for a while
+ }
+ kv = elt
+ r = elt.Value
+ }
+
+ r = pushtype(r, elemType)
+ r = Expr(r)
+ r = AssignConv(r, elemType, ctx)
+ if kv != nil {
+ kv.Value = r
+ } else {
+ elts[i] = r
+ }
+
+ if key >= 0 {
+ if indices != nil {
+ if indices[key] {
+ base.Errorf("duplicate index in %s: %d", ctx, key)
+ } else {
+ indices[key] = true
+ }
+ }
+
+ if bound >= 0 && key >= bound {
+ base.Errorf("array index %d out of bounds [0:%d]", key, bound)
+ bound = -1
+ }
+ }
+
+ key++
+ if key > length {
+ length = key
+ }
+ }
+
+ return length
+}
+
+// visible reports whether sym is exported or locally defined.
+func visible(sym *types.Sym) bool {
+ return sym != nil && (types.IsExported(sym.Name) || sym.Pkg == types.LocalPkg)
+}
+
+// nonexported reports whether sym is an unexported field.
+func nonexported(sym *types.Sym) bool {
+ return sym != nil && !types.IsExported(sym.Name)
+}
+
+func checklvalue(n ir.Node, verb string) {
+ if !ir.IsAddressable(n) {
+ base.Errorf("cannot %s %v", verb, n)
+ }
+}
+
+func checkassign(stmt ir.Node, n ir.Node) {
+ // have already complained about n being invalid
+ if n.Type() == nil {
+ if base.Errors() == 0 {
+ base.Fatalf("expected an error about %v", n)
+ }
+ return
+ }
+
+ if ir.IsAddressable(n) {
+ return
+ }
+ if n.Op() == ir.OINDEXMAP {
+ n := n.(*ir.IndexExpr)
+ n.Assigned = true
+ return
+ }
+
+ defer n.SetType(nil)
+ if n.Diag() {
+ return
+ }
+ switch {
+ case n.Op() == ir.ODOT && n.(*ir.SelectorExpr).X.Op() == ir.OINDEXMAP:
+ base.Errorf("cannot assign to struct field %v in map", n)
+ case (n.Op() == ir.OINDEX && n.(*ir.IndexExpr).X.Type().IsString()) || n.Op() == ir.OSLICESTR:
+ base.Errorf("cannot assign to %v (strings are immutable)", n)
+ case n.Op() == ir.OLITERAL && n.Sym() != nil && ir.IsConstNode(n):
+ base.Errorf("cannot assign to %v (declared const)", n)
+ default:
+ base.Errorf("cannot assign to %v", n)
+ }
+}
+
+func checkassignto(src *types.Type, dst ir.Node) {
+ // TODO(mdempsky): Handle all untyped types correctly.
+ if src == types.UntypedBool && dst.Type().IsBoolean() {
+ return
+ }
+
+ if op, why := Assignop(src, dst.Type()); op == ir.OXXX {
+ base.Errorf("cannot assign %v to %L in multiple assignment%s", src, dst, why)
+ return
+ }
+}
+
+// The result of stringtoruneslit MUST be assigned back to n, e.g.
+// n.Left = stringtoruneslit(n.Left)
+func stringtoruneslit(n *ir.ConvExpr) ir.Node {
+ if n.X.Op() != ir.OLITERAL || n.X.Val().Kind() != constant.String {
+ base.Fatalf("stringtoarraylit %v", n)
+ }
+
+ var l []ir.Node
+ i := 0
+ for _, r := range ir.StringVal(n.X) {
+ l = append(l, ir.NewKeyExpr(base.Pos, ir.NewInt(int64(i)), ir.NewInt(int64(r))))
+ i++
+ }
+
+ nn := ir.NewCompLitExpr(base.Pos, ir.OCOMPLIT, ir.TypeNode(n.Type()), nil)
+ nn.List = l
+ return Expr(nn)
+}
+
+var mapqueue []*ir.MapType
+
+func CheckMapKeys() {
+ for _, n := range mapqueue {
+ k := n.Type().MapType().Key
+ if !k.Broke() && !types.IsComparable(k) {
+ base.ErrorfAt(n.Pos(), "invalid map key type %v", k)
+ }
+ }
+ mapqueue = nil
+}
+
+func typecheckdeftype(n *ir.Name) {
+ if base.EnableTrace && base.Flag.LowerT {
+ defer tracePrint("typecheckdeftype", n)(nil)
+ }
+
+ t := types.NewNamed(n)
+ if n.Curfn != nil {
+ t.SetVargen()
+ }
+
+ if n.Pragma()&ir.NotInHeap != 0 {
+ t.SetNotInHeap(true)
+ }
+
+ n.SetType(t)
+ n.SetTypecheck(1)
+ n.SetWalkdef(1)
+
+ types.DeferCheckSize()
+ errorsBefore := base.Errors()
+ n.Ntype = typecheckNtype(n.Ntype)
+ if underlying := n.Ntype.Type(); underlying != nil {
+ t.SetUnderlying(underlying)
+ } else {
+ n.SetDiag(true)
+ n.SetType(nil)
+ }
+ if t.Kind() == types.TFORW && base.Errors() > errorsBefore {
+ // Something went wrong during type-checking,
+ // but it was reported. Silence future errors.
+ t.SetBroke(true)
+ }
+ types.ResumeCheckSize()
+}
+
+func typecheckdef(n *ir.Name) {
+ if base.EnableTrace && base.Flag.LowerT {
+ defer tracePrint("typecheckdef", n)(nil)
+ }
+
+ if n.Walkdef() == 1 {
+ return
+ }
+
+ if n.Type() != nil { // builtin
+ // Mark as Walkdef so that if n.SetType(nil) is called later, we
+ // won't try walking again.
+ if got := n.Walkdef(); got != 0 {
+ base.Fatalf("unexpected walkdef: %v", got)
+ }
+ n.SetWalkdef(1)
+ return
+ }
+
+ lno := ir.SetPos(n)
+ typecheckdefstack = append(typecheckdefstack, n)
+ if n.Walkdef() == 2 {
+ base.FlushErrors()
+ fmt.Printf("typecheckdef loop:")
+ for i := len(typecheckdefstack) - 1; i >= 0; i-- {
+ n := typecheckdefstack[i]
+ fmt.Printf(" %v", n.Sym())
+ }
+ fmt.Printf("\n")
+ base.Fatalf("typecheckdef loop")
+ }
+
+ n.SetWalkdef(2)
+
+ switch n.Op() {
+ default:
+ base.Fatalf("typecheckdef %v", n.Op())
+
+ case ir.OLITERAL:
+ if n.Ntype != nil {
+ n.Ntype = typecheckNtype(n.Ntype)
+ n.SetType(n.Ntype.Type())
+ n.Ntype = nil
+ if n.Type() == nil {
+ n.SetDiag(true)
+ goto ret
+ }
+ }
+
+ e := n.Defn
+ n.Defn = nil
+ if e == nil {
+ ir.Dump("typecheckdef nil defn", n)
+ base.ErrorfAt(n.Pos(), "xxx")
+ }
+
+ e = Expr(e)
+ if e.Type() == nil {
+ goto ret
+ }
+ if !ir.IsConstNode(e) {
+ if !e.Diag() {
+ if e.Op() == ir.ONIL {
+ base.ErrorfAt(n.Pos(), "const initializer cannot be nil")
+ } else {
+ base.ErrorfAt(n.Pos(), "const initializer %v is not a constant", e)
+ }
+ e.SetDiag(true)
+ }
+ goto ret
+ }
+
+ t := n.Type()
+ if t != nil {
+ if !ir.OKForConst[t.Kind()] {
+ base.ErrorfAt(n.Pos(), "invalid constant type %v", t)
+ goto ret
+ }
+
+ if !e.Type().IsUntyped() && !types.Identical(t, e.Type()) {
+ base.ErrorfAt(n.Pos(), "cannot use %L as type %v in const initializer", e, t)
+ goto ret
+ }
+
+ e = convlit(e, t)
+ }
+
+ n.SetType(e.Type())
+ if n.Type() != nil {
+ n.SetVal(e.Val())
+ }
+
+ case ir.ONAME:
+ if n.Ntype != nil {
+ n.Ntype = typecheckNtype(n.Ntype)
+ n.SetType(n.Ntype.Type())
+ if n.Type() == nil {
+ n.SetDiag(true)
+ goto ret
+ }
+ }
+
+ if n.Type() != nil {
+ break
+ }
+ if n.Defn == nil {
+ if n.BuiltinOp != 0 { // like OPRINTN
+ break
+ }
+ if base.Errors() > 0 {
+ // Can have undefined variables in x := foo
+ // that make x have an n.name.Defn == nil.
+ // If there are other errors anyway, don't
+ // bother adding to the noise.
+ break
+ }
+
+ base.Fatalf("var without type, init: %v", n.Sym())
+ }
+
+ if n.Defn.Op() == ir.ONAME {
+ n.Defn = Expr(n.Defn)
+ n.SetType(n.Defn.Type())
+ break
+ }
+
+ n.Defn = Stmt(n.Defn) // fills in n.Type
+
+ case ir.OTYPE:
+ if n.Alias() {
+ // Type alias declaration: Simply use the rhs type - no need
+ // to create a new type.
+ // If we have a syntax error, name.Ntype may be nil.
+ if n.Ntype != nil {
+ n.Ntype = typecheckNtype(n.Ntype)
+ n.SetType(n.Ntype.Type())
+ if n.Type() == nil {
+ n.SetDiag(true)
+ goto ret
+ }
+ }
+ break
+ }
+
+ // regular type declaration
+ typecheckdeftype(n)
+ }
+
+ret:
+ if n.Op() != ir.OLITERAL && n.Type() != nil && n.Type().IsUntyped() {
+ base.Fatalf("got %v for %v", n.Type(), n)
+ }
+ last := len(typecheckdefstack) - 1
+ if typecheckdefstack[last] != n {
+ base.Fatalf("typecheckdefstack mismatch")
+ }
+ typecheckdefstack[last] = nil
+ typecheckdefstack = typecheckdefstack[:last]
+
+ base.Pos = lno
+ n.SetWalkdef(1)
+}
+
+func checkmake(t *types.Type, arg string, np *ir.Node) bool {
+ n := *np
+ if !n.Type().IsInteger() && n.Type().Kind() != types.TIDEAL {
+ base.Errorf("non-integer %s argument in make(%v) - %v", arg, t, n.Type())
+ return false
+ }
+
+ // Do range checks for constants before DefaultLit
+ // to avoid redundant "constant NNN overflows int" errors.
+ if n.Op() == ir.OLITERAL {
+ v := toint(n.Val())
+ if constant.Sign(v) < 0 {
+ base.Errorf("negative %s argument in make(%v)", arg, t)
+ return false
+ }
+ if ir.ConstOverflow(v, types.Types[types.TINT]) {
+ base.Errorf("%s argument too large in make(%v)", arg, t)
+ return false
+ }
+ }
+
+ // DefaultLit is necessary for non-constants too: n might be 1.1<<k.
+ // TODO(gri) The length argument requirements for (array/slice) make
+ // are the same as for index expressions. Factor the code better;
+ // for instance, indexlit might be called here and incorporate some
+ // of the bounds checks done for make.
+ n = DefaultLit(n, types.Types[types.TINT])
+ *np = n
+
+ return true
+}
+
+// checkunsafeslice is like checkmake but for unsafe.Slice.
+func checkunsafeslice(np *ir.Node) bool {
+ n := *np
+ if !n.Type().IsInteger() && n.Type().Kind() != types.TIDEAL {
+ base.Errorf("non-integer len argument in unsafe.Slice - %v", n.Type())
+ return false
+ }
+
+ // Do range checks for constants before DefaultLit
+ // to avoid redundant "constant NNN overflows int" errors.
+ if n.Op() == ir.OLITERAL {
+ v := toint(n.Val())
+ if constant.Sign(v) < 0 {
+ base.Errorf("negative len argument in unsafe.Slice")
+ return false
+ }
+ if ir.ConstOverflow(v, types.Types[types.TINT]) {
+ base.Errorf("len argument too large in unsafe.Slice")
+ return false
+ }
+ }
+
+ // DefaultLit is necessary for non-constants too: n might be 1.1<<k.
+ n = DefaultLit(n, types.Types[types.TINT])
+ *np = n
+
+ return true
+}
+
+// markBreak marks control statements containing break statements with SetHasBreak(true).
+func markBreak(fn *ir.Func) {
+ var labels map[*types.Sym]ir.Node
+ var implicit ir.Node
+
+ var mark func(ir.Node) bool
+ mark = func(n ir.Node) bool {
+ switch n.Op() {
+ default:
+ ir.DoChildren(n, mark)
+
+ case ir.OBREAK:
+ n := n.(*ir.BranchStmt)
+ if n.Label == nil {
+ setHasBreak(implicit)
+ } else {
+ setHasBreak(labels[n.Label])
+ }
+
+ case ir.OFOR, ir.OFORUNTIL, ir.OSWITCH, ir.OSELECT, ir.ORANGE:
+ old := implicit
+ implicit = n
+ var sym *types.Sym
+ switch n := n.(type) {
+ case *ir.ForStmt:
+ sym = n.Label
+ case *ir.RangeStmt:
+ sym = n.Label
+ case *ir.SelectStmt:
+ sym = n.Label
+ case *ir.SwitchStmt:
+ sym = n.Label
+ }
+ if sym != nil {
+ if labels == nil {
+ // Map creation delayed until we need it - most functions don't.
+ labels = make(map[*types.Sym]ir.Node)
+ }
+ labels[sym] = n
+ }
+ ir.DoChildren(n, mark)
+ if sym != nil {
+ delete(labels, sym)
+ }
+ implicit = old
+ }
+ return false
+ }
+
+ mark(fn)
+}
+
+func controlLabel(n ir.Node) *types.Sym {
+ switch n := n.(type) {
+ default:
+ base.Fatalf("controlLabel %+v", n.Op())
+ return nil
+ case *ir.ForStmt:
+ return n.Label
+ case *ir.RangeStmt:
+ return n.Label
+ case *ir.SelectStmt:
+ return n.Label
+ case *ir.SwitchStmt:
+ return n.Label
+ }
+}
+
+func setHasBreak(n ir.Node) {
+ switch n := n.(type) {
+ default:
+ base.Fatalf("setHasBreak %+v", n.Op())
+ case nil:
+ // ignore
+ case *ir.ForStmt:
+ n.HasBreak = true
+ case *ir.RangeStmt:
+ n.HasBreak = true
+ case *ir.SelectStmt:
+ n.HasBreak = true
+ case *ir.SwitchStmt:
+ n.HasBreak = true
+ }
+}
+
+// isTermNodes reports whether the Nodes list ends with a terminating statement.
+func isTermNodes(l ir.Nodes) bool {
+ s := l
+ c := len(s)
+ if c == 0 {
+ return false
+ }
+ return isTermNode(s[c-1])
+}
+
+// isTermNode reports whether the node n, the last one in a
+// statement list, is a terminating statement.
+func isTermNode(n ir.Node) bool {
+ switch n.Op() {
+ // NOTE: OLABEL is treated as a separate statement,
+ // not a separate prefix, so skipping to the last statement
+ // in the block handles the labeled statement case by
+ // skipping over the label. No case OLABEL here.
+
+ case ir.OBLOCK:
+ n := n.(*ir.BlockStmt)
+ return isTermNodes(n.List)
+
+ case ir.OGOTO, ir.ORETURN, ir.OTAILCALL, ir.OPANIC, ir.OFALL:
+ return true
+
+ case ir.OFOR, ir.OFORUNTIL:
+ n := n.(*ir.ForStmt)
+ if n.Cond != nil {
+ return false
+ }
+ if n.HasBreak {
+ return false
+ }
+ return true
+
+ case ir.OIF:
+ n := n.(*ir.IfStmt)
+ return isTermNodes(n.Body) && isTermNodes(n.Else)
+
+ case ir.OSWITCH:
+ n := n.(*ir.SwitchStmt)
+ if n.HasBreak {
+ return false
+ }
+ def := false
+ for _, cas := range n.Cases {
+ if !isTermNodes(cas.Body) {
+ return false
+ }
+ if len(cas.List) == 0 { // default
+ def = true
+ }
+ }
+ return def
+
+ case ir.OSELECT:
+ n := n.(*ir.SelectStmt)
+ if n.HasBreak {
+ return false
+ }
+ for _, cas := range n.Cases {
+ if !isTermNodes(cas.Body) {
+ return false
+ }
+ }
+ return true
+ }
+
+ return false
+}
+
+// CheckUnused checks for any declared variables that weren't used.
+func CheckUnused(fn *ir.Func) {
+ // Only report unused variables if we haven't seen any type-checking
+ // errors yet.
+ if base.Errors() != 0 {
+ return
+ }
+
+ // Propagate the used flag for typeswitch variables up to the NONAME in its definition.
+ for _, ln := range fn.Dcl {
+ if ln.Op() == ir.ONAME && ln.Class == ir.PAUTO && ln.Used() {
+ if guard, ok := ln.Defn.(*ir.TypeSwitchGuard); ok {
+ guard.Used = true
+ }
+ }
+ }
+
+ for _, ln := range fn.Dcl {
+ if ln.Op() != ir.ONAME || ln.Class != ir.PAUTO || ln.Used() {
+ continue
+ }
+ if defn, ok := ln.Defn.(*ir.TypeSwitchGuard); ok {
+ if defn.Used {
+ continue
+ }
+ base.ErrorfAt(defn.Tag.Pos(), "%v declared but not used", ln.Sym())
+ defn.Used = true // suppress repeats
+ } else {
+ base.ErrorfAt(ln.Pos(), "%v declared but not used", ln.Sym())
+ }
+ }
+}
+
+// CheckReturn makes sure that fn terminates appropriately.
+func CheckReturn(fn *ir.Func) {
+ if fn.Type() != nil && fn.Type().NumResults() != 0 && len(fn.Body) != 0 {
+ markBreak(fn)
+ if !isTermNodes(fn.Body) {
+ base.ErrorfAt(fn.Endlineno, "missing return at end of function")
+ }
+ }
+}
+
+// getIotaValue returns the current value for "iota",
+// or -1 if not within a ConstSpec.
+func getIotaValue() int64 {
+ if i := len(typecheckdefstack); i > 0 {
+ if x := typecheckdefstack[i-1]; x.Op() == ir.OLITERAL {
+ return x.Iota()
+ }
+ }
+
+ if ir.CurFunc != nil && ir.CurFunc.Iota >= 0 {
+ return ir.CurFunc.Iota
+ }
+
+ return -1
+}
+
+// curpkg returns the current package, based on Curfn.
+func curpkg() *types.Pkg {
+ fn := ir.CurFunc
+ if fn == nil {
+ // Initialization expressions for package-scope variables.
+ return types.LocalPkg
+ }
+ return fnpkg(fn.Nname)
+}
+
+func Conv(n ir.Node, t *types.Type) ir.Node {
+ if types.Identical(n.Type(), t) {
+ return n
+ }
+ n = ir.NewConvExpr(base.Pos, ir.OCONV, nil, n)
+ n.SetType(t)
+ n = Expr(n)
+ return n
+}
+
+// ConvNop converts node n to type t using the OCONVNOP op
+// and typechecks the result with ctxExpr.
+func ConvNop(n ir.Node, t *types.Type) ir.Node {
+ if types.Identical(n.Type(), t) {
+ return n
+ }
+ n = ir.NewConvExpr(base.Pos, ir.OCONVNOP, nil, n)
+ n.SetType(t)
+ n = Expr(n)
+ return n
+}
diff --git a/src/cmd/compile/internal/typecheck/universe.go b/src/cmd/compile/internal/typecheck/universe.go
new file mode 100644
index 0000000..0254d96
--- /dev/null
+++ b/src/cmd/compile/internal/typecheck/universe.go
@@ -0,0 +1,231 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typecheck
+
+import (
+ "go/constant"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+)
+
+var (
+ okfor [ir.OEND][]bool
+ iscmp [ir.OEND]bool
+)
+
+var (
+ okforeq [types.NTYPE]bool
+ okforadd [types.NTYPE]bool
+ okforand [types.NTYPE]bool
+ okfornone [types.NTYPE]bool
+ okforbool [types.NTYPE]bool
+ okforcap [types.NTYPE]bool
+ okforlen [types.NTYPE]bool
+ okforarith [types.NTYPE]bool
+)
+
+var builtinFuncs = [...]struct {
+ name string
+ op ir.Op
+}{
+ {"append", ir.OAPPEND},
+ {"cap", ir.OCAP},
+ {"close", ir.OCLOSE},
+ {"complex", ir.OCOMPLEX},
+ {"copy", ir.OCOPY},
+ {"delete", ir.ODELETE},
+ {"imag", ir.OIMAG},
+ {"len", ir.OLEN},
+ {"make", ir.OMAKE},
+ {"new", ir.ONEW},
+ {"panic", ir.OPANIC},
+ {"print", ir.OPRINT},
+ {"println", ir.OPRINTN},
+ {"real", ir.OREAL},
+ {"recover", ir.ORECOVER},
+}
+
+var unsafeFuncs = [...]struct {
+ name string
+ op ir.Op
+}{
+ {"Add", ir.OUNSAFEADD},
+ {"Alignof", ir.OALIGNOF},
+ {"Offsetof", ir.OOFFSETOF},
+ {"Sizeof", ir.OSIZEOF},
+ {"Slice", ir.OUNSAFESLICE},
+}
+
+// InitUniverse initializes the universe block.
+func InitUniverse() {
+ types.InitTypes(func(sym *types.Sym, typ *types.Type) types.Object {
+ n := ir.NewDeclNameAt(src.NoXPos, ir.OTYPE, sym)
+ n.SetType(typ)
+ sym.Def = n
+ return n
+ })
+
+ for _, s := range &builtinFuncs {
+ s2 := types.BuiltinPkg.Lookup(s.name)
+ def := NewName(s2)
+ def.BuiltinOp = s.op
+ s2.Def = def
+ }
+
+ for _, s := range &unsafeFuncs {
+ s2 := types.UnsafePkg.Lookup(s.name)
+ def := NewName(s2)
+ def.BuiltinOp = s.op
+ s2.Def = def
+ }
+
+ s := types.BuiltinPkg.Lookup("true")
+ s.Def = ir.NewConstAt(src.NoXPos, s, types.UntypedBool, constant.MakeBool(true))
+
+ s = types.BuiltinPkg.Lookup("false")
+ s.Def = ir.NewConstAt(src.NoXPos, s, types.UntypedBool, constant.MakeBool(false))
+
+ s = Lookup("_")
+ types.BlankSym = s
+ s.Block = -100
+ s.Def = NewName(s)
+ ir.AsNode(s.Def).SetType(types.Types[types.TBLANK])
+ ir.BlankNode = ir.AsNode(s.Def)
+ ir.BlankNode.SetTypecheck(1)
+
+ s = types.BuiltinPkg.Lookup("_")
+ s.Block = -100
+ s.Def = NewName(s)
+ ir.AsNode(s.Def).SetType(types.Types[types.TBLANK])
+
+ s = types.BuiltinPkg.Lookup("nil")
+ nnil := NodNil()
+ nnil.(*ir.NilExpr).SetSym(s)
+ s.Def = nnil
+
+ s = types.BuiltinPkg.Lookup("iota")
+ s.Def = ir.NewIota(base.Pos, s)
+
+ // initialize okfor
+ for et := types.Kind(0); et < types.NTYPE; et++ {
+ if types.IsInt[et] || et == types.TIDEAL {
+ okforeq[et] = true
+ types.IsOrdered[et] = true
+ okforarith[et] = true
+ okforadd[et] = true
+ okforand[et] = true
+ ir.OKForConst[et] = true
+ types.IsSimple[et] = true
+ }
+
+ if types.IsFloat[et] {
+ okforeq[et] = true
+ types.IsOrdered[et] = true
+ okforadd[et] = true
+ okforarith[et] = true
+ ir.OKForConst[et] = true
+ types.IsSimple[et] = true
+ }
+
+ if types.IsComplex[et] {
+ okforeq[et] = true
+ okforadd[et] = true
+ okforarith[et] = true
+ ir.OKForConst[et] = true
+ types.IsSimple[et] = true
+ }
+ }
+
+ types.IsSimple[types.TBOOL] = true
+
+ okforadd[types.TSTRING] = true
+
+ okforbool[types.TBOOL] = true
+
+ okforcap[types.TARRAY] = true
+ okforcap[types.TCHAN] = true
+ okforcap[types.TSLICE] = true
+
+ ir.OKForConst[types.TBOOL] = true
+ ir.OKForConst[types.TSTRING] = true
+
+ okforlen[types.TARRAY] = true
+ okforlen[types.TCHAN] = true
+ okforlen[types.TMAP] = true
+ okforlen[types.TSLICE] = true
+ okforlen[types.TSTRING] = true
+
+ okforeq[types.TPTR] = true
+ okforeq[types.TUNSAFEPTR] = true
+ okforeq[types.TINTER] = true
+ okforeq[types.TCHAN] = true
+ okforeq[types.TSTRING] = true
+ okforeq[types.TBOOL] = true
+ okforeq[types.TMAP] = true // nil only; refined in typecheck
+ okforeq[types.TFUNC] = true // nil only; refined in typecheck
+ okforeq[types.TSLICE] = true // nil only; refined in typecheck
+ okforeq[types.TARRAY] = true // only if element type is comparable; refined in typecheck
+ okforeq[types.TSTRUCT] = true // only if all struct fields are comparable; refined in typecheck
+
+ types.IsOrdered[types.TSTRING] = true
+
+ for i := range okfor {
+ okfor[i] = okfornone[:]
+ }
+
+ // binary
+ okfor[ir.OADD] = okforadd[:]
+ okfor[ir.OAND] = okforand[:]
+ okfor[ir.OANDAND] = okforbool[:]
+ okfor[ir.OANDNOT] = okforand[:]
+ okfor[ir.ODIV] = okforarith[:]
+ okfor[ir.OEQ] = okforeq[:]
+ okfor[ir.OGE] = types.IsOrdered[:]
+ okfor[ir.OGT] = types.IsOrdered[:]
+ okfor[ir.OLE] = types.IsOrdered[:]
+ okfor[ir.OLT] = types.IsOrdered[:]
+ okfor[ir.OMOD] = okforand[:]
+ okfor[ir.OMUL] = okforarith[:]
+ okfor[ir.ONE] = okforeq[:]
+ okfor[ir.OOR] = okforand[:]
+ okfor[ir.OOROR] = okforbool[:]
+ okfor[ir.OSUB] = okforarith[:]
+ okfor[ir.OXOR] = okforand[:]
+ okfor[ir.OLSH] = okforand[:]
+ okfor[ir.ORSH] = okforand[:]
+
+ // unary
+ okfor[ir.OBITNOT] = okforand[:]
+ okfor[ir.ONEG] = okforarith[:]
+ okfor[ir.ONOT] = okforbool[:]
+ okfor[ir.OPLUS] = okforarith[:]
+
+ // special
+ okfor[ir.OCAP] = okforcap[:]
+ okfor[ir.OLEN] = okforlen[:]
+}
+
+// DeclareUniverse makes the universe block visible within the current package.
+func DeclareUniverse() {
+ // Operationally, this is similar to a dot import of builtinpkg, except
+ // that we silently skip symbols that are already declared in the
+ // package block rather than emitting a redeclared symbol error.
+
+ for _, s := range types.BuiltinPkg.Syms {
+ if s.Def == nil {
+ continue
+ }
+ s1 := Lookup(s.Name)
+ if s1.Def != nil {
+ continue
+ }
+
+ s1.Def = s.Def
+ s1.Block = s.Block
+ }
+}
diff --git a/src/cmd/compile/internal/types/alg.go b/src/cmd/compile/internal/types/alg.go
new file mode 100644
index 0000000..f5675c6
--- /dev/null
+++ b/src/cmd/compile/internal/types/alg.go
@@ -0,0 +1,173 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types
+
+import "cmd/compile/internal/base"
+
+// AlgKind describes the kind of algorithms used for comparing and
+// hashing a Type.
+type AlgKind int
+
+//go:generate stringer -type AlgKind -trimprefix A alg.go
+
+const (
+ // These values are known by runtime.
+ ANOEQ AlgKind = iota
+ AMEM0
+ AMEM8
+ AMEM16
+ AMEM32
+ AMEM64
+ AMEM128
+ ASTRING
+ AINTER
+ ANILINTER
+ AFLOAT32
+ AFLOAT64
+ ACPLX64
+ ACPLX128
+
+ // Type can be compared/hashed as regular memory.
+ AMEM AlgKind = 100
+
+ // Type needs special comparison/hashing functions.
+ ASPECIAL AlgKind = -1
+)
+
+// AlgType returns the AlgKind used for comparing and hashing Type t.
+// If it returns ANOEQ, it also returns the component type of t that
+// makes it incomparable.
+func AlgType(t *Type) (AlgKind, *Type) {
+ if t.Broke() {
+ return AMEM, nil
+ }
+ if t.Noalg() {
+ return ANOEQ, t
+ }
+
+ switch t.Kind() {
+ case TANY, TFORW:
+ // will be defined later.
+ return ANOEQ, t
+
+ case TINT8, TUINT8, TINT16, TUINT16,
+ TINT32, TUINT32, TINT64, TUINT64,
+ TINT, TUINT, TUINTPTR,
+ TBOOL, TPTR,
+ TCHAN, TUNSAFEPTR:
+ return AMEM, nil
+
+ case TFUNC, TMAP:
+ return ANOEQ, t
+
+ case TFLOAT32:
+ return AFLOAT32, nil
+
+ case TFLOAT64:
+ return AFLOAT64, nil
+
+ case TCOMPLEX64:
+ return ACPLX64, nil
+
+ case TCOMPLEX128:
+ return ACPLX128, nil
+
+ case TSTRING:
+ return ASTRING, nil
+
+ case TINTER:
+ if t.IsEmptyInterface() {
+ return ANILINTER, nil
+ }
+ return AINTER, nil
+
+ case TSLICE:
+ return ANOEQ, t
+
+ case TARRAY:
+ a, bad := AlgType(t.Elem())
+ switch a {
+ case AMEM:
+ return AMEM, nil
+ case ANOEQ:
+ return ANOEQ, bad
+ }
+
+ switch t.NumElem() {
+ case 0:
+ // We checked above that the element type is comparable.
+ return AMEM, nil
+ case 1:
+ // Single-element array is same as its lone element.
+ return a, nil
+ }
+
+ return ASPECIAL, nil
+
+ case TSTRUCT:
+ fields := t.FieldSlice()
+
+ // One-field struct is same as that one field alone.
+ if len(fields) == 1 && !fields[0].Sym.IsBlank() {
+ return AlgType(fields[0].Type)
+ }
+
+ ret := AMEM
+ for i, f := range fields {
+ // All fields must be comparable.
+ a, bad := AlgType(f.Type)
+ if a == ANOEQ {
+ return ANOEQ, bad
+ }
+
+ // Blank fields, padded fields, fields with non-memory
+ // equality need special compare.
+ if a != AMEM || f.Sym.IsBlank() || IsPaddedField(t, i) {
+ ret = ASPECIAL
+ }
+ }
+
+ return ret, nil
+ }
+
+ base.Fatalf("AlgType: unexpected type %v", t)
+ return 0, nil
+}
+
+// TypeHasNoAlg reports whether t does not have any associated hash/eq
+// algorithms because t, or some component of t, is marked Noalg.
+func TypeHasNoAlg(t *Type) bool {
+ a, bad := AlgType(t)
+ return a == ANOEQ && bad.Noalg()
+}
+
+// IsComparable reports whether t is a comparable type.
+func IsComparable(t *Type) bool {
+ a, _ := AlgType(t)
+ return a != ANOEQ
+}
+
+// IncomparableField returns an incomparable Field of struct Type t, if any.
+func IncomparableField(t *Type) *Field {
+ for _, f := range t.FieldSlice() {
+ if !IsComparable(f.Type) {
+ return f
+ }
+ }
+ return nil
+}
+
+// IsPaddedField reports whether the i'th field of struct type t is followed
+// by padding.
+func IsPaddedField(t *Type, i int) bool {
+ if !t.IsStruct() {
+ base.Fatalf("IsPaddedField called non-struct %v", t)
+ }
+ end := t.width
+ if i+1 < t.NumFields() {
+ end = t.Field(i + 1).Offset
+ }
+ return t.Field(i).End() != end
+}
diff --git a/src/cmd/compile/internal/types/algkind_string.go b/src/cmd/compile/internal/types/algkind_string.go
new file mode 100644
index 0000000..a1b518e
--- /dev/null
+++ b/src/cmd/compile/internal/types/algkind_string.go
@@ -0,0 +1,48 @@
+// Code generated by "stringer -type AlgKind -trimprefix A alg.go"; DO NOT EDIT.
+
+package types
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[ANOEQ-0]
+ _ = x[AMEM0-1]
+ _ = x[AMEM8-2]
+ _ = x[AMEM16-3]
+ _ = x[AMEM32-4]
+ _ = x[AMEM64-5]
+ _ = x[AMEM128-6]
+ _ = x[ASTRING-7]
+ _ = x[AINTER-8]
+ _ = x[ANILINTER-9]
+ _ = x[AFLOAT32-10]
+ _ = x[AFLOAT64-11]
+ _ = x[ACPLX64-12]
+ _ = x[ACPLX128-13]
+ _ = x[AMEM-100]
+ _ = x[ASPECIAL - -1]
+}
+
+const (
+ _AlgKind_name_0 = "SPECIALNOEQMEM0MEM8MEM16MEM32MEM64MEM128STRINGINTERNILINTERFLOAT32FLOAT64CPLX64CPLX128"
+ _AlgKind_name_1 = "MEM"
+)
+
+var (
+ _AlgKind_index_0 = [...]uint8{0, 7, 11, 15, 19, 24, 29, 34, 40, 46, 51, 59, 66, 73, 79, 86}
+)
+
+func (i AlgKind) String() string {
+ switch {
+ case -1 <= i && i <= 13:
+ i -= -1
+ return _AlgKind_name_0[_AlgKind_index_0[i]:_AlgKind_index_0[i+1]]
+ case i == 100:
+ return _AlgKind_name_1
+ default:
+ return "AlgKind(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+}
diff --git a/src/cmd/compile/internal/types/fmt.go b/src/cmd/compile/internal/types/fmt.go
new file mode 100644
index 0000000..c7d0623
--- /dev/null
+++ b/src/cmd/compile/internal/types/fmt.go
@@ -0,0 +1,776 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types
+
+import (
+ "bytes"
+ "crypto/md5"
+ "encoding/binary"
+ "fmt"
+ "go/constant"
+ "strconv"
+ "strings"
+ "sync"
+
+ "cmd/compile/internal/base"
+)
+
+// BuiltinPkg is a fake package that declares the universe block.
+var BuiltinPkg *Pkg
+
+// LocalPkg is the package being compiled.
+var LocalPkg *Pkg
+
+// UnsafePkg is package unsafe.
+var UnsafePkg *Pkg
+
+// BlankSym is the blank (_) symbol.
+var BlankSym *Sym
+
+// OrigSym returns the original symbol written by the user.
+func OrigSym(s *Sym) *Sym {
+ if s == nil {
+ return nil
+ }
+
+ if len(s.Name) > 1 && s.Name[0] == '~' {
+ switch s.Name[1] {
+ case 'r': // originally an unnamed result
+ return nil
+ case 'b': // originally the blank identifier _
+ // TODO(mdempsky): Does s.Pkg matter here?
+ return BlankSym
+ }
+ return s
+ }
+
+ if strings.HasPrefix(s.Name, ".anon") {
+ // originally an unnamed or _ name (see subr.go: NewFuncParams)
+ return nil
+ }
+
+ return s
+}
+
+// numImport tracks how often a package with a given name is imported.
+// It is used to provide a better error message (by using the package
+// path to disambiguate) if a package that appears multiple times with
+// the same name appears in an error message.
+var NumImport = make(map[string]int)
+
+// fmtMode represents the kind of printing being done.
+// The default is regular Go syntax (fmtGo).
+// fmtDebug is like fmtGo but for debugging dumps and prints the type kind too.
+// fmtTypeID and fmtTypeIDName are for generating various unique representations
+// of types used in hashes, the linker, and function/method instantiations.
+type fmtMode int
+
+const (
+ fmtGo fmtMode = iota
+ fmtDebug
+ fmtTypeID
+ fmtTypeIDName
+ fmtTypeIDHash
+)
+
+// Sym
+
+// Format implements formatting for a Sym.
+// The valid formats are:
+//
+// %v Go syntax: Name for symbols in the local package, PkgName.Name for imported symbols.
+// %+v Debug syntax: always include PkgName. prefix even for local names.
+// %S Short syntax: Name only, no matter what.
+//
+func (s *Sym) Format(f fmt.State, verb rune) {
+ mode := fmtGo
+ switch verb {
+ case 'v', 'S':
+ if verb == 'v' && f.Flag('+') {
+ mode = fmtDebug
+ }
+ fmt.Fprint(f, sconv(s, verb, mode))
+
+ default:
+ fmt.Fprintf(f, "%%!%c(*types.Sym=%p)", verb, s)
+ }
+}
+
+func (s *Sym) String() string {
+ return sconv(s, 0, fmtGo)
+}
+
+// See #16897 for details about performance implications
+// before changing the implementation of sconv.
+func sconv(s *Sym, verb rune, mode fmtMode) string {
+ if verb == 'L' {
+ panic("linksymfmt")
+ }
+
+ if s == nil {
+ return "<S>"
+ }
+
+ q := pkgqual(s.Pkg, verb, mode)
+ if q == "" {
+ return s.Name
+ }
+
+ buf := fmtBufferPool.Get().(*bytes.Buffer)
+ buf.Reset()
+ defer fmtBufferPool.Put(buf)
+
+ buf.WriteString(q)
+ buf.WriteByte('.')
+ buf.WriteString(s.Name)
+ return InternString(buf.Bytes())
+}
+
+func sconv2(b *bytes.Buffer, s *Sym, verb rune, mode fmtMode) {
+ if verb == 'L' {
+ panic("linksymfmt")
+ }
+ if s == nil {
+ b.WriteString("<S>")
+ return
+ }
+
+ symfmt(b, s, verb, mode)
+}
+
+func symfmt(b *bytes.Buffer, s *Sym, verb rune, mode fmtMode) {
+ name := s.Name
+ if q := pkgqual(s.Pkg, verb, mode); q != "" {
+ b.WriteString(q)
+ b.WriteByte('.')
+ switch mode {
+ case fmtTypeIDName:
+ // If name is a generic instantiation, it might have local package placeholders
+ // in it. Replace those placeholders with the package name. See issue 49547.
+ name = strings.Replace(name, LocalPkg.Prefix, q, -1)
+ case fmtTypeIDHash:
+ // If name is a generic instantiation, don't hash the instantiating types.
+ // This isn't great, but it is safe. If we hash the instantiating types, then
+ // we need to make sure they have just the package name. At this point, they
+ // either have "", or the whole package path, and it is hard to reconcile
+ // the two without depending on -p (which we might do someday).
+ // See issue 51250.
+ if i := strings.Index(name, "["); i >= 0 {
+ name = name[:i]
+ }
+ }
+ }
+ b.WriteString(name)
+}
+
+// pkgqual returns the qualifier that should be used for printing
+// symbols from the given package in the given mode.
+// If it returns the empty string, no qualification is needed.
+func pkgqual(pkg *Pkg, verb rune, mode fmtMode) string {
+ if verb != 'S' {
+ switch mode {
+ case fmtGo: // This is for the user
+ if pkg == BuiltinPkg || pkg == LocalPkg {
+ return ""
+ }
+
+ // If the name was used by multiple packages, display the full path,
+ if pkg.Name != "" && NumImport[pkg.Name] > 1 {
+ return strconv.Quote(pkg.Path)
+ }
+ return pkg.Name
+
+ case fmtDebug:
+ return pkg.Name
+
+ case fmtTypeIDName, fmtTypeIDHash:
+ // dcommontype, typehash
+ return pkg.Name
+
+ case fmtTypeID:
+ // (methodsym), typesym, weaksym
+ return pkg.Prefix
+ }
+ }
+
+ return ""
+}
+
+// Type
+
+var BasicTypeNames = []string{
+ TINT: "int",
+ TUINT: "uint",
+ TINT8: "int8",
+ TUINT8: "uint8",
+ TINT16: "int16",
+ TUINT16: "uint16",
+ TINT32: "int32",
+ TUINT32: "uint32",
+ TINT64: "int64",
+ TUINT64: "uint64",
+ TUINTPTR: "uintptr",
+ TFLOAT32: "float32",
+ TFLOAT64: "float64",
+ TCOMPLEX64: "complex64",
+ TCOMPLEX128: "complex128",
+ TBOOL: "bool",
+ TANY: "any",
+ TSTRING: "string",
+ TNIL: "nil",
+ TIDEAL: "untyped number",
+ TBLANK: "blank",
+}
+
+var fmtBufferPool = sync.Pool{
+ New: func() interface{} {
+ return new(bytes.Buffer)
+ },
+}
+
+// Format implements formatting for a Type.
+// The valid formats are:
+//
+// %v Go syntax
+// %+v Debug syntax: Go syntax with a KIND- prefix for all but builtins.
+// %L Go syntax for underlying type if t is named
+// %S short Go syntax: drop leading "func" in function type
+// %-S special case for method receiver symbol
+//
+func (t *Type) Format(s fmt.State, verb rune) {
+ mode := fmtGo
+ switch verb {
+ case 'v', 'S', 'L':
+ if verb == 'v' && s.Flag('+') { // %+v is debug format
+ mode = fmtDebug
+ }
+ if verb == 'S' && s.Flag('-') { // %-S is special case for receiver - short typeid format
+ mode = fmtTypeID
+ }
+ fmt.Fprint(s, tconv(t, verb, mode))
+ default:
+ fmt.Fprintf(s, "%%!%c(*Type=%p)", verb, t)
+ }
+}
+
+// String returns the Go syntax for the type t.
+func (t *Type) String() string {
+ return tconv(t, 0, fmtGo)
+}
+
+// LinkString returns an unexpanded string description of t, suitable
+// for use in link symbols. "Unexpanded" here means that the
+// description uses `"".` to qualify identifiers from the current
+// package, and "expansion" refers to the renaming step performed by
+// the linker to replace these qualifiers with proper `path/to/pkg.`
+// qualifiers.
+//
+// After expansion, the description corresponds to type identity. That
+// is, for any pair of types t1 and t2, Identical(t1, t2) and
+// expand(t1.LinkString()) == expand(t2.LinkString()) report the same
+// value.
+//
+// Within a single compilation unit, LinkString always returns the
+// same unexpanded description for identical types. Thus it's safe to
+// use as a map key to implement a type-identity-keyed map. However,
+// make sure all LinkString calls used for this purpose happen within
+// the same compile process; the string keys are not stable across
+// multiple processes.
+func (t *Type) LinkString() string {
+ return tconv(t, 0, fmtTypeID)
+}
+
+// NameString generates a user-readable, mostly unique string
+// description of t. NameString always returns the same description
+// for identical types, even across compilation units.
+//
+// NameString qualifies identifiers by package name, so it has
+// collisions when different packages share the same names and
+// identifiers. It also does not distinguish function-scope defined
+// types from package-scoped defined types or from each other.
+func (t *Type) NameString() string {
+ return tconv(t, 0, fmtTypeIDName)
+}
+
+func tconv(t *Type, verb rune, mode fmtMode) string {
+ buf := fmtBufferPool.Get().(*bytes.Buffer)
+ buf.Reset()
+ defer fmtBufferPool.Put(buf)
+
+ tconv2(buf, t, verb, mode, nil)
+ return InternString(buf.Bytes())
+}
+
+// tconv2 writes a string representation of t to b.
+// flag and mode control exactly what is printed.
+// Any types x that are already in the visited map get printed as @%d where %d=visited[x].
+// See #16897 before changing the implementation of tconv.
+func tconv2(b *bytes.Buffer, t *Type, verb rune, mode fmtMode, visited map[*Type]int) {
+ if off, ok := visited[t]; ok {
+ // We've seen this type before, so we're trying to print it recursively.
+ // Print a reference to it instead.
+ fmt.Fprintf(b, "@%d", off)
+ return
+ }
+ if t == nil {
+ b.WriteString("<T>")
+ return
+ }
+ if t.Kind() == TSSA {
+ b.WriteString(t.extra.(string))
+ return
+ }
+ if t.Kind() == TTUPLE {
+ b.WriteString(t.FieldType(0).String())
+ b.WriteByte(',')
+ b.WriteString(t.FieldType(1).String())
+ return
+ }
+
+ if t.Kind() == TRESULTS {
+ tys := t.extra.(*Results).Types
+ for i, et := range tys {
+ if i > 0 {
+ b.WriteByte(',')
+ }
+ b.WriteString(et.String())
+ }
+ return
+ }
+
+ if t == AnyType || t == ByteType || t == RuneType {
+ // in %-T mode collapse predeclared aliases with their originals.
+ switch mode {
+ case fmtTypeIDName, fmtTypeIDHash, fmtTypeID:
+ t = Types[t.Kind()]
+ default:
+ sconv2(b, t.Sym(), 'S', mode)
+ return
+ }
+ }
+ if t == ErrorType {
+ b.WriteString("error")
+ return
+ }
+
+ // Unless the 'L' flag was specified, if the type has a name, just print that name.
+ if verb != 'L' && t.Sym() != nil && t != Types[t.Kind()] {
+ // Default to 'v' if verb is invalid.
+ if verb != 'S' {
+ verb = 'v'
+ }
+
+ // In unified IR, function-scope defined types will have a ·N
+ // suffix embedded directly in their Name. Trim this off for
+ // non-fmtTypeID modes.
+ sym := t.Sym()
+ if mode != fmtTypeID {
+ i := len(sym.Name)
+ for i > 0 && sym.Name[i-1] >= '0' && sym.Name[i-1] <= '9' {
+ i--
+ }
+ const dot = "·"
+ if i >= len(dot) && sym.Name[i-len(dot):i] == dot {
+ sym = &Sym{Pkg: sym.Pkg, Name: sym.Name[:i-len(dot)]}
+ }
+ }
+ sconv2(b, sym, verb, mode)
+
+ // TODO(mdempsky): Investigate including Vargen in fmtTypeIDName
+ // output too. It seems like it should, but that mode is currently
+ // used in string representation used by reflection, which is
+ // user-visible and doesn't expect this.
+ if mode == fmtTypeID && t.vargen != 0 {
+ fmt.Fprintf(b, "·%d", t.vargen)
+ }
+ return
+ }
+
+ if int(t.Kind()) < len(BasicTypeNames) && BasicTypeNames[t.Kind()] != "" {
+ var name string
+ switch t {
+ case UntypedBool:
+ name = "untyped bool"
+ case UntypedString:
+ name = "untyped string"
+ case UntypedInt:
+ name = "untyped int"
+ case UntypedRune:
+ name = "untyped rune"
+ case UntypedFloat:
+ name = "untyped float"
+ case UntypedComplex:
+ name = "untyped complex"
+ default:
+ name = BasicTypeNames[t.Kind()]
+ }
+ b.WriteString(name)
+ return
+ }
+
+ if mode == fmtDebug {
+ b.WriteString(t.Kind().String())
+ b.WriteByte('-')
+ tconv2(b, t, 'v', fmtGo, visited)
+ return
+ }
+
+ // At this point, we might call tconv2 recursively. Add the current type to the visited list so we don't
+ // try to print it recursively.
+ // We record the offset in the result buffer where the type's text starts. This offset serves as a reference
+ // point for any later references to the same type.
+ // Note that we remove the type from the visited map as soon as the recursive call is done.
+ // This prevents encoding types like map[*int]*int as map[*int]@4. (That encoding would work,
+ // but I'd like to use the @ notation only when strictly necessary.)
+ if visited == nil {
+ visited = map[*Type]int{}
+ }
+ visited[t] = b.Len()
+ defer delete(visited, t)
+
+ switch t.Kind() {
+ case TPTR:
+ b.WriteByte('*')
+ switch mode {
+ case fmtTypeID, fmtTypeIDName, fmtTypeIDHash:
+ if verb == 'S' {
+ tconv2(b, t.Elem(), 'S', mode, visited)
+ return
+ }
+ }
+ tconv2(b, t.Elem(), 'v', mode, visited)
+
+ case TARRAY:
+ b.WriteByte('[')
+ b.WriteString(strconv.FormatInt(t.NumElem(), 10))
+ b.WriteByte(']')
+ tconv2(b, t.Elem(), 0, mode, visited)
+
+ case TSLICE:
+ b.WriteString("[]")
+ tconv2(b, t.Elem(), 0, mode, visited)
+
+ case TCHAN:
+ switch t.ChanDir() {
+ case Crecv:
+ b.WriteString("<-chan ")
+ tconv2(b, t.Elem(), 0, mode, visited)
+ case Csend:
+ b.WriteString("chan<- ")
+ tconv2(b, t.Elem(), 0, mode, visited)
+ default:
+ b.WriteString("chan ")
+ if t.Elem() != nil && t.Elem().IsChan() && t.Elem().Sym() == nil && t.Elem().ChanDir() == Crecv {
+ b.WriteByte('(')
+ tconv2(b, t.Elem(), 0, mode, visited)
+ b.WriteByte(')')
+ } else {
+ tconv2(b, t.Elem(), 0, mode, visited)
+ }
+ }
+
+ case TMAP:
+ b.WriteString("map[")
+ tconv2(b, t.Key(), 0, mode, visited)
+ b.WriteByte(']')
+ tconv2(b, t.Elem(), 0, mode, visited)
+
+ case TINTER:
+ if t.IsEmptyInterface() {
+ b.WriteString("interface {}")
+ break
+ }
+ b.WriteString("interface {")
+ for i, f := range t.AllMethods().Slice() {
+ if i != 0 {
+ b.WriteByte(';')
+ }
+ b.WriteByte(' ')
+ switch {
+ case f.Sym == nil:
+ // Check first that a symbol is defined for this type.
+ // Wrong interface definitions may have types lacking a symbol.
+ break
+ case IsExported(f.Sym.Name):
+ sconv2(b, f.Sym, 'S', mode)
+ default:
+ if mode != fmtTypeIDName && mode != fmtTypeIDHash {
+ mode = fmtTypeID
+ }
+ sconv2(b, f.Sym, 'v', mode)
+ }
+ tconv2(b, f.Type, 'S', mode, visited)
+ }
+ if t.AllMethods().Len() != 0 {
+ b.WriteByte(' ')
+ }
+ b.WriteByte('}')
+
+ case TFUNC:
+ if verb == 'S' {
+ // no leading func
+ } else {
+ if t.Recv() != nil {
+ b.WriteString("method")
+ tconv2(b, t.Recvs(), 0, mode, visited)
+ b.WriteByte(' ')
+ }
+ b.WriteString("func")
+ }
+ if t.NumTParams() > 0 {
+ tconv2(b, t.TParams(), 0, mode, visited)
+ }
+ tconv2(b, t.Params(), 0, mode, visited)
+
+ switch t.NumResults() {
+ case 0:
+ // nothing to do
+
+ case 1:
+ b.WriteByte(' ')
+ tconv2(b, t.Results().Field(0).Type, 0, mode, visited) // struct->field->field's type
+
+ default:
+ b.WriteByte(' ')
+ tconv2(b, t.Results(), 0, mode, visited)
+ }
+
+ case TSTRUCT:
+ if m := t.StructType().Map; m != nil {
+ mt := m.MapType()
+ // Format the bucket struct for map[x]y as map.bucket[x]y.
+ // This avoids a recursive print that generates very long names.
+ switch t {
+ case mt.Bucket:
+ b.WriteString("map.bucket[")
+ case mt.Hmap:
+ b.WriteString("map.hdr[")
+ case mt.Hiter:
+ b.WriteString("map.iter[")
+ default:
+ base.Fatalf("unknown internal map type")
+ }
+ tconv2(b, m.Key(), 0, mode, visited)
+ b.WriteByte(']')
+ tconv2(b, m.Elem(), 0, mode, visited)
+ break
+ }
+
+ if funarg := t.StructType().Funarg; funarg != FunargNone {
+ open, close := '(', ')'
+ if funarg == FunargTparams {
+ open, close = '[', ']'
+ }
+ b.WriteByte(byte(open))
+ fieldVerb := 'v'
+ switch mode {
+ case fmtTypeID, fmtTypeIDName, fmtTypeIDHash, fmtGo:
+ // no argument names on function signature, and no "noescape"/"nosplit" tags
+ fieldVerb = 'S'
+ }
+ for i, f := range t.Fields().Slice() {
+ if i != 0 {
+ b.WriteString(", ")
+ }
+ fldconv(b, f, fieldVerb, mode, visited, funarg)
+ }
+ b.WriteByte(byte(close))
+ } else {
+ b.WriteString("struct {")
+ for i, f := range t.Fields().Slice() {
+ if i != 0 {
+ b.WriteByte(';')
+ }
+ b.WriteByte(' ')
+ fldconv(b, f, 'L', mode, visited, funarg)
+ }
+ if t.NumFields() != 0 {
+ b.WriteByte(' ')
+ }
+ b.WriteByte('}')
+ }
+
+ case TFORW:
+ b.WriteString("undefined")
+ if t.Sym() != nil {
+ b.WriteByte(' ')
+ sconv2(b, t.Sym(), 'v', mode)
+ }
+
+ case TUNSAFEPTR:
+ b.WriteString("unsafe.Pointer")
+
+ case TTYPEPARAM:
+ if t.Sym() != nil {
+ sconv2(b, t.Sym(), 'v', mode)
+ } else {
+ b.WriteString("tp")
+ // Print out the pointer value for now to disambiguate type params
+ b.WriteString(fmt.Sprintf("%p", t))
+ }
+
+ case TUNION:
+ for i := 0; i < t.NumTerms(); i++ {
+ if i > 0 {
+ b.WriteString("|")
+ }
+ elem, tilde := t.Term(i)
+ if tilde {
+ b.WriteString("~")
+ }
+ tconv2(b, elem, 0, mode, visited)
+ }
+
+ case Txxx:
+ b.WriteString("Txxx")
+
+ default:
+ // Don't know how to handle - fall back to detailed prints
+ b.WriteString(t.Kind().String())
+ b.WriteString(" <")
+ sconv2(b, t.Sym(), 'v', mode)
+ b.WriteString(">")
+
+ }
+}
+
+func fldconv(b *bytes.Buffer, f *Field, verb rune, mode fmtMode, visited map[*Type]int, funarg Funarg) {
+ if f == nil {
+ b.WriteString("<T>")
+ return
+ }
+
+ var name string
+ nameSep := " "
+ if verb != 'S' {
+ s := f.Sym
+
+ // Take the name from the original.
+ if mode == fmtGo {
+ s = OrigSym(s)
+ }
+
+ // Using type aliases and embedded fields, it's possible to
+ // construct types that can't be directly represented as a
+ // type literal. For example, given "type Int = int" (#50190),
+ // it would be incorrect to format "struct{ Int }" as either
+ // "struct{ int }" or "struct{ Int int }", because those each
+ // represent other, distinct types.
+ //
+ // So for the purpose of LinkString (i.e., fmtTypeID), we use
+ // the non-standard syntax "struct{ Int = int }" to represent
+ // embedded fields that have been renamed through the use of
+ // type aliases.
+ if f.Embedded != 0 {
+ if mode == fmtTypeID {
+ nameSep = " = "
+
+ // Compute tsym, the symbol that would normally be used as
+ // the field name when embedding f.Type.
+ // TODO(mdempsky): Check for other occurrences of this logic
+ // and deduplicate.
+ typ := f.Type
+ if typ.IsPtr() {
+ base.Assertf(typ.Sym() == nil, "embedded pointer type has name: %L", typ)
+ typ = typ.Elem()
+ }
+ tsym := typ.Sym()
+
+ // If the field name matches the embedded type's name, then
+ // suppress printing of the field name. For example, format
+ // "struct{ T }" as simply that instead of "struct{ T = T }".
+ if tsym != nil && (s == tsym || IsExported(tsym.Name) && s.Name == tsym.Name) {
+ s = nil
+ }
+ } else {
+ // Suppress the field name for embedded fields for
+ // non-LinkString formats, to match historical behavior.
+ // TODO(mdempsky): Re-evaluate this.
+ s = nil
+ }
+ }
+
+ if s != nil {
+ if funarg != FunargNone {
+ name = fmt.Sprint(f.Nname)
+ } else if verb == 'L' {
+ name = s.Name
+ if name == ".F" {
+ name = "F" // Hack for toolstash -cmp.
+ }
+ if !IsExported(name) && mode != fmtTypeIDName && mode != fmtTypeIDHash {
+ name = sconv(s, 0, mode) // qualify non-exported names (used on structs, not on funarg)
+ }
+ } else {
+ name = sconv(s, 0, mode)
+ }
+ }
+ }
+
+ if name != "" {
+ b.WriteString(name)
+ b.WriteString(nameSep)
+ }
+
+ if f.IsDDD() {
+ var et *Type
+ if f.Type != nil {
+ et = f.Type.Elem()
+ }
+ b.WriteString("...")
+ tconv2(b, et, 0, mode, visited)
+ } else {
+ tconv2(b, f.Type, 0, mode, visited)
+ }
+
+ if verb != 'S' && funarg == FunargNone && f.Note != "" {
+ b.WriteString(" ")
+ b.WriteString(strconv.Quote(f.Note))
+ }
+}
+
+// Val
+
+func FmtConst(v constant.Value, sharp bool) string {
+ if !sharp && v.Kind() == constant.Complex {
+ real, imag := constant.Real(v), constant.Imag(v)
+
+ var re string
+ sre := constant.Sign(real)
+ if sre != 0 {
+ re = real.String()
+ }
+
+ var im string
+ sim := constant.Sign(imag)
+ if sim != 0 {
+ im = imag.String()
+ }
+
+ switch {
+ case sre == 0 && sim == 0:
+ return "0"
+ case sre == 0:
+ return im + "i"
+ case sim == 0:
+ return re
+ case sim < 0:
+ return fmt.Sprintf("(%s%si)", re, im)
+ default:
+ return fmt.Sprintf("(%s+%si)", re, im)
+ }
+ }
+
+ return v.String()
+}
+
+// TypeHash computes a hash value for type t to use in type switch statements.
+func TypeHash(t *Type) uint32 {
+ p := tconv(t, 0, fmtTypeIDHash)
+
+ // Using MD5 is overkill, but reduces accidental collisions.
+ h := md5.Sum([]byte(p))
+ return binary.LittleEndian.Uint32(h[:4])
+}
diff --git a/src/cmd/compile/internal/types/goversion.go b/src/cmd/compile/internal/types/goversion.go
new file mode 100644
index 0000000..1a324aa
--- /dev/null
+++ b/src/cmd/compile/internal/types/goversion.go
@@ -0,0 +1,94 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types
+
+import (
+ "fmt"
+ "internal/goversion"
+ "log"
+ "regexp"
+ "strconv"
+
+ "cmd/compile/internal/base"
+)
+
+// A lang is a language version broken into major and minor numbers.
+type lang struct {
+ major, minor int
+}
+
+// langWant is the desired language version set by the -lang flag.
+// If the -lang flag is not set, this is the zero value, meaning that
+// any language version is supported.
+var langWant lang
+
+// AllowsGoVersion reports whether a particular package
+// is allowed to use Go version major.minor.
+// We assume the imported packages have all been checked,
+// so we only have to check the local package against the -lang flag.
+func AllowsGoVersion(pkg *Pkg, major, minor int) bool {
+ if pkg == nil {
+ // TODO(mdempsky): Set Pkg for local types earlier.
+ pkg = LocalPkg
+ }
+ if pkg != LocalPkg {
+ // Assume imported packages passed type-checking.
+ return true
+ }
+ if langWant.major == 0 && langWant.minor == 0 {
+ return true
+ }
+ return langWant.major > major || (langWant.major == major && langWant.minor >= minor)
+}
+
+// ParseLangFlag verifies that the -lang flag holds a valid value, and
+// exits if not. It initializes data used by langSupported.
+func ParseLangFlag() {
+ if base.Flag.Lang == "" {
+ return
+ }
+
+ var err error
+ langWant, err = parseLang(base.Flag.Lang)
+ if err != nil {
+ log.Fatalf("invalid value %q for -lang: %v", base.Flag.Lang, err)
+ }
+
+ if def := currentLang(); base.Flag.Lang != def {
+ defVers, err := parseLang(def)
+ if err != nil {
+ log.Fatalf("internal error parsing default lang %q: %v", def, err)
+ }
+ if langWant.major > defVers.major || (langWant.major == defVers.major && langWant.minor > defVers.minor) {
+ log.Fatalf("invalid value %q for -lang: max known version is %q", base.Flag.Lang, def)
+ }
+ }
+}
+
+// parseLang parses a -lang option into a langVer.
+func parseLang(s string) (lang, error) {
+ matches := goVersionRE.FindStringSubmatch(s)
+ if matches == nil {
+ return lang{}, fmt.Errorf(`should be something like "go1.12"`)
+ }
+ major, err := strconv.Atoi(matches[1])
+ if err != nil {
+ return lang{}, err
+ }
+ minor, err := strconv.Atoi(matches[2])
+ if err != nil {
+ return lang{}, err
+ }
+ return lang{major: major, minor: minor}, nil
+}
+
+// currentLang returns the current language version.
+func currentLang() string {
+ return fmt.Sprintf("go1.%d", goversion.Version)
+}
+
+// goVersionRE is a regular expression that matches the valid
+// arguments to the -lang flag.
+var goVersionRE = regexp.MustCompile(`^go([1-9][0-9]*)\.(0|[1-9][0-9]*)$`)
diff --git a/src/cmd/compile/internal/types/identity.go b/src/cmd/compile/internal/types/identity.go
new file mode 100644
index 0000000..60a0f2e
--- /dev/null
+++ b/src/cmd/compile/internal/types/identity.go
@@ -0,0 +1,157 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types
+
+const (
+ identIgnoreTags = 1 << iota
+ identStrict
+)
+
+// Identical reports whether t1 and t2 are identical types, following the spec rules.
+// Receiver parameter types are ignored. Named (defined) types are only equal if they
+// are pointer-equal - i.e. there must be a unique types.Type for each specific named
+// type. Also, a type containing a shape type is considered identical to another type
+// (shape or not) if their underlying types are the same, or they are both pointers.
+func Identical(t1, t2 *Type) bool {
+ return identical(t1, t2, 0, nil)
+}
+
+// IdenticalIgnoreTags is like Identical, but it ignores struct tags
+// for struct identity.
+func IdenticalIgnoreTags(t1, t2 *Type) bool {
+ return identical(t1, t2, identIgnoreTags, nil)
+}
+
+// IdenticalStrict is like Identical, but matches types exactly, without the
+// exception for shapes.
+func IdenticalStrict(t1, t2 *Type) bool {
+ return identical(t1, t2, identStrict, nil)
+}
+
+type typePair struct {
+ t1 *Type
+ t2 *Type
+}
+
+func identical(t1, t2 *Type, flags int, assumedEqual map[typePair]struct{}) bool {
+ if t1 == t2 {
+ return true
+ }
+ if t1 == nil || t2 == nil || t1.kind != t2.kind || t1.Broke() || t2.Broke() {
+ return false
+ }
+ if t1.sym != nil || t2.sym != nil {
+ if flags&identStrict == 0 && (t1.HasShape() || t2.HasShape()) {
+ switch t1.kind {
+ case TINT8, TUINT8, TINT16, TUINT16, TINT32, TUINT32, TINT64, TUINT64, TINT, TUINT, TUINTPTR, TCOMPLEX64, TCOMPLEX128, TFLOAT32, TFLOAT64, TBOOL, TSTRING, TPTR, TUNSAFEPTR:
+ return true
+ }
+ // fall through to unnamed type comparison for complex types.
+ goto cont
+ }
+ // Special case: we keep byte/uint8 and rune/int32
+ // separate for error messages. Treat them as equal.
+ switch t1.kind {
+ case TUINT8:
+ return (t1 == Types[TUINT8] || t1 == ByteType) && (t2 == Types[TUINT8] || t2 == ByteType)
+ case TINT32:
+ return (t1 == Types[TINT32] || t1 == RuneType) && (t2 == Types[TINT32] || t2 == RuneType)
+ case TINTER:
+ // Make sure named any type matches any unnamed empty interface
+ // (but not a shape type, if identStrict).
+ isUnnamedEface := func(t *Type) bool { return t.IsEmptyInterface() && t.Sym() == nil }
+ if flags&identStrict != 0 {
+ return t1 == AnyType && isUnnamedEface(t2) && !t2.HasShape() || t2 == AnyType && isUnnamedEface(t1) && !t1.HasShape()
+ }
+ return t1 == AnyType && isUnnamedEface(t2) || t2 == AnyType && isUnnamedEface(t1)
+ default:
+ return false
+ }
+ }
+cont:
+
+ // Any cyclic type must go through a named type, and if one is
+ // named, it is only identical to the other if they are the
+ // same pointer (t1 == t2), so there's no chance of chasing
+ // cycles ad infinitum, so no need for a depth counter.
+ if assumedEqual == nil {
+ assumedEqual = make(map[typePair]struct{})
+ } else if _, ok := assumedEqual[typePair{t1, t2}]; ok {
+ return true
+ }
+ assumedEqual[typePair{t1, t2}] = struct{}{}
+
+ switch t1.kind {
+ case TIDEAL:
+ // Historically, cmd/compile used a single "untyped
+ // number" type, so all untyped number types were
+ // identical. Match this behavior.
+ // TODO(mdempsky): Revisit this.
+ return true
+
+ case TINTER:
+ if t1.AllMethods().Len() != t2.AllMethods().Len() {
+ return false
+ }
+ for i, f1 := range t1.AllMethods().Slice() {
+ f2 := t2.AllMethods().Index(i)
+ if f1.Sym != f2.Sym || !identical(f1.Type, f2.Type, flags, assumedEqual) {
+ return false
+ }
+ }
+ return true
+
+ case TSTRUCT:
+ if t1.NumFields() != t2.NumFields() {
+ return false
+ }
+ for i, f1 := range t1.FieldSlice() {
+ f2 := t2.Field(i)
+ if f1.Sym != f2.Sym || f1.Embedded != f2.Embedded || !identical(f1.Type, f2.Type, flags, assumedEqual) {
+ return false
+ }
+ if (flags&identIgnoreTags) == 0 && f1.Note != f2.Note {
+ return false
+ }
+ }
+ return true
+
+ case TFUNC:
+ // Check parameters and result parameters for type equality.
+ // We intentionally ignore receiver parameters for type
+ // equality, because they're never relevant.
+ for _, f := range ParamsResults {
+ // Loop over fields in structs, ignoring argument names.
+ fs1, fs2 := f(t1).FieldSlice(), f(t2).FieldSlice()
+ if len(fs1) != len(fs2) {
+ return false
+ }
+ for i, f1 := range fs1 {
+ f2 := fs2[i]
+ if f1.IsDDD() != f2.IsDDD() || !identical(f1.Type, f2.Type, flags, assumedEqual) {
+ return false
+ }
+ }
+ }
+ return true
+
+ case TARRAY:
+ if t1.NumElem() != t2.NumElem() {
+ return false
+ }
+
+ case TCHAN:
+ if t1.ChanDir() != t2.ChanDir() {
+ return false
+ }
+
+ case TMAP:
+ if !identical(t1.Key(), t2.Key(), flags, assumedEqual) {
+ return false
+ }
+ }
+
+ return identical(t1.Elem(), t2.Elem(), flags, assumedEqual)
+}
diff --git a/src/cmd/compile/internal/types/kind_string.go b/src/cmd/compile/internal/types/kind_string.go
new file mode 100644
index 0000000..3e6a8bc
--- /dev/null
+++ b/src/cmd/compile/internal/types/kind_string.go
@@ -0,0 +1,62 @@
+// Code generated by "stringer -type Kind -trimprefix T type.go"; DO NOT EDIT.
+
+package types
+
+import "strconv"
+
+func _() {
+ // An "invalid array index" compiler error signifies that the constant values have changed.
+ // Re-run the stringer command to generate them again.
+ var x [1]struct{}
+ _ = x[Txxx-0]
+ _ = x[TINT8-1]
+ _ = x[TUINT8-2]
+ _ = x[TINT16-3]
+ _ = x[TUINT16-4]
+ _ = x[TINT32-5]
+ _ = x[TUINT32-6]
+ _ = x[TINT64-7]
+ _ = x[TUINT64-8]
+ _ = x[TINT-9]
+ _ = x[TUINT-10]
+ _ = x[TUINTPTR-11]
+ _ = x[TCOMPLEX64-12]
+ _ = x[TCOMPLEX128-13]
+ _ = x[TFLOAT32-14]
+ _ = x[TFLOAT64-15]
+ _ = x[TBOOL-16]
+ _ = x[TPTR-17]
+ _ = x[TFUNC-18]
+ _ = x[TSLICE-19]
+ _ = x[TARRAY-20]
+ _ = x[TSTRUCT-21]
+ _ = x[TCHAN-22]
+ _ = x[TMAP-23]
+ _ = x[TINTER-24]
+ _ = x[TFORW-25]
+ _ = x[TANY-26]
+ _ = x[TSTRING-27]
+ _ = x[TUNSAFEPTR-28]
+ _ = x[TTYPEPARAM-29]
+ _ = x[TUNION-30]
+ _ = x[TIDEAL-31]
+ _ = x[TNIL-32]
+ _ = x[TBLANK-33]
+ _ = x[TFUNCARGS-34]
+ _ = x[TCHANARGS-35]
+ _ = x[TSSA-36]
+ _ = x[TTUPLE-37]
+ _ = x[TRESULTS-38]
+ _ = x[NTYPE-39]
+}
+
+const _Kind_name = "xxxINT8UINT8INT16UINT16INT32UINT32INT64UINT64INTUINTUINTPTRCOMPLEX64COMPLEX128FLOAT32FLOAT64BOOLPTRFUNCSLICEARRAYSTRUCTCHANMAPINTERFORWANYSTRINGUNSAFEPTRTYPEPARAMUNIONIDEALNILBLANKFUNCARGSCHANARGSSSATUPLERESULTSNTYPE"
+
+var _Kind_index = [...]uint8{0, 3, 7, 12, 17, 23, 28, 34, 39, 45, 48, 52, 59, 68, 78, 85, 92, 96, 99, 103, 108, 113, 119, 123, 126, 131, 135, 138, 144, 153, 162, 167, 172, 175, 180, 188, 196, 199, 204, 211, 216}
+
+func (i Kind) String() string {
+ if i >= Kind(len(_Kind_index)-1) {
+ return "Kind(" + strconv.FormatInt(int64(i), 10) + ")"
+ }
+ return _Kind_name[_Kind_index[i]:_Kind_index[i+1]]
+}
diff --git a/src/cmd/compile/internal/types/pkg.go b/src/cmd/compile/internal/types/pkg.go
new file mode 100644
index 0000000..b159eb5
--- /dev/null
+++ b/src/cmd/compile/internal/types/pkg.go
@@ -0,0 +1,147 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types
+
+import (
+ "cmd/internal/obj"
+ "cmd/internal/objabi"
+ "fmt"
+ "sort"
+ "sync"
+)
+
+// pkgMap maps a package path to a package.
+var pkgMap = make(map[string]*Pkg)
+
+// MaxPkgHeight is a height greater than any likely package height.
+const MaxPkgHeight = 1e9
+
+type Pkg struct {
+ Path string // string literal used in import statement, e.g. "runtime/internal/sys"
+ Name string // package name, e.g. "sys"
+ Prefix string // escaped path for use in symbol table
+ Syms map[string]*Sym
+ Pathsym *obj.LSym
+
+ // Height is the package's height in the import graph. Leaf
+ // packages (i.e., packages with no imports) have height 0,
+ // and all other packages have height 1 plus the maximum
+ // height of their imported packages.
+ Height int
+
+ Direct bool // imported directly
+}
+
+// NewPkg returns a new Pkg for the given package path and name.
+// Unless name is the empty string, if the package exists already,
+// the existing package name and the provided name must match.
+func NewPkg(path, name string) *Pkg {
+ if p := pkgMap[path]; p != nil {
+ if name != "" && p.Name != name {
+ panic(fmt.Sprintf("conflicting package names %s and %s for path %q", p.Name, name, path))
+ }
+ return p
+ }
+
+ p := new(Pkg)
+ p.Path = path
+ p.Name = name
+ if path == "go.shape" {
+ // Don't escape "go.shape", since it's not needed (it's a builtin
+ // package), and we don't want escape codes showing up in shape type
+ // names, which also appear in names of function/method
+ // instantiations.
+ p.Prefix = path
+ } else {
+ p.Prefix = objabi.PathToPrefix(path)
+ }
+ p.Syms = make(map[string]*Sym)
+ pkgMap[path] = p
+
+ return p
+}
+
+// ImportedPkgList returns the list of directly imported packages.
+// The list is sorted by package path.
+func ImportedPkgList() []*Pkg {
+ var list []*Pkg
+ for _, p := range pkgMap {
+ if p.Direct {
+ list = append(list, p)
+ }
+ }
+ sort.Sort(byPath(list))
+ return list
+}
+
+type byPath []*Pkg
+
+func (a byPath) Len() int { return len(a) }
+func (a byPath) Less(i, j int) bool { return a[i].Path < a[j].Path }
+func (a byPath) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+
+var nopkg = &Pkg{
+ Syms: make(map[string]*Sym),
+}
+
+func (pkg *Pkg) Lookup(name string) *Sym {
+ s, _ := pkg.LookupOK(name)
+ return s
+}
+
+// LookupOK looks up name in pkg and reports whether it previously existed.
+func (pkg *Pkg) LookupOK(name string) (s *Sym, existed bool) {
+ // TODO(gri) remove this check in favor of specialized lookup
+ if pkg == nil {
+ pkg = nopkg
+ }
+ if s := pkg.Syms[name]; s != nil {
+ return s, true
+ }
+
+ s = &Sym{
+ Name: name,
+ Pkg: pkg,
+ }
+ pkg.Syms[name] = s
+ return s, false
+}
+
+func (pkg *Pkg) LookupBytes(name []byte) *Sym {
+ // TODO(gri) remove this check in favor of specialized lookup
+ if pkg == nil {
+ pkg = nopkg
+ }
+ if s := pkg.Syms[string(name)]; s != nil {
+ return s
+ }
+ str := InternString(name)
+ return pkg.Lookup(str)
+}
+
+var (
+ internedStringsmu sync.Mutex // protects internedStrings
+ internedStrings = map[string]string{}
+)
+
+func InternString(b []byte) string {
+ internedStringsmu.Lock()
+ s, ok := internedStrings[string(b)] // string(b) here doesn't allocate
+ if !ok {
+ s = string(b)
+ internedStrings[s] = s
+ }
+ internedStringsmu.Unlock()
+ return s
+}
+
+// CleanroomDo invokes f in an environment with no preexisting packages.
+// For testing of import/export only.
+func CleanroomDo(f func()) {
+ saved := pkgMap
+ pkgMap = make(map[string]*Pkg)
+ f()
+ pkgMap = saved
+}
diff --git a/src/cmd/compile/internal/types/scope.go b/src/cmd/compile/internal/types/scope.go
new file mode 100644
index 0000000..d7c454f
--- /dev/null
+++ b/src/cmd/compile/internal/types/scope.go
@@ -0,0 +1,113 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/internal/src"
+)
+
+// Declaration stack & operations
+
+var blockgen int32 = 1 // max block number
+var Block int32 = 1 // current block number
+
+// A dsym stores a symbol's shadowed declaration so that it can be
+// restored once the block scope ends.
+type dsym struct {
+ sym *Sym // sym == nil indicates stack mark
+ def Object
+ block int32
+ lastlineno src.XPos // last declaration for diagnostic
+}
+
+// dclstack maintains a stack of shadowed symbol declarations so that
+// Popdcl can restore their declarations when a block scope ends.
+var dclstack []dsym
+
+// Pushdcl pushes the current declaration for symbol s (if any) so that
+// it can be shadowed by a new declaration within a nested block scope.
+func Pushdcl(s *Sym) {
+ dclstack = append(dclstack, dsym{
+ sym: s,
+ def: s.Def,
+ block: s.Block,
+ lastlineno: s.Lastlineno,
+ })
+}
+
+// Popdcl pops the innermost block scope and restores all symbol declarations
+// to their previous state.
+func Popdcl() {
+ for i := len(dclstack); i > 0; i-- {
+ d := &dclstack[i-1]
+ s := d.sym
+ if s == nil {
+ // pop stack mark
+ Block = d.block
+ dclstack = dclstack[:i-1]
+ return
+ }
+
+ s.Def = d.def
+ s.Block = d.block
+ s.Lastlineno = d.lastlineno
+
+ // Clear dead pointer fields.
+ d.sym = nil
+ d.def = nil
+ }
+ base.Fatalf("popdcl: no stack mark")
+}
+
+// Markdcl records the start of a new block scope for declarations.
+func Markdcl() {
+ dclstack = append(dclstack, dsym{
+ sym: nil, // stack mark
+ block: Block,
+ })
+ blockgen++
+ Block = blockgen
+}
+
+func isDclstackValid() bool {
+ for _, d := range dclstack {
+ if d.sym == nil {
+ return false
+ }
+ }
+ return true
+}
+
+// PkgDef returns the definition associated with s at package scope.
+func (s *Sym) PkgDef() Object {
+ return *s.pkgDefPtr()
+}
+
+// SetPkgDef sets the definition associated with s at package scope.
+func (s *Sym) SetPkgDef(n Object) {
+ *s.pkgDefPtr() = n
+}
+
+func (s *Sym) pkgDefPtr() *Object {
+ // Look for outermost saved declaration, which must be the
+ // package scope definition, if present.
+ for i := range dclstack {
+ d := &dclstack[i]
+ if s == d.sym {
+ return &d.def
+ }
+ }
+
+ // Otherwise, the declaration hasn't been shadowed within a
+ // function scope.
+ return &s.Def
+}
+
+func CheckDclstack() {
+ if !isDclstackValid() {
+ base.Fatalf("mark left on the dclstack")
+ }
+}
diff --git a/src/cmd/compile/internal/types/size.go b/src/cmd/compile/internal/types/size.go
new file mode 100644
index 0000000..0c3fa92
--- /dev/null
+++ b/src/cmd/compile/internal/types/size.go
@@ -0,0 +1,709 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types
+
+import (
+ "bytes"
+ "fmt"
+ "sort"
+
+ "cmd/compile/internal/base"
+ "cmd/internal/src"
+)
+
+var PtrSize int
+
+var RegSize int
+
+// Slices in the runtime are represented by three components:
+//
+// type slice struct {
+// ptr unsafe.Pointer
+// len int
+// cap int
+// }
+//
+// Strings in the runtime are represented by two components:
+//
+// type string struct {
+// ptr unsafe.Pointer
+// len int
+// }
+//
+// These variables are the offsets of fields and sizes of these structs.
+var (
+ SlicePtrOffset int64
+ SliceLenOffset int64
+ SliceCapOffset int64
+
+ SliceSize int64
+ StringSize int64
+)
+
+var SkipSizeForTracing bool
+
+// typePos returns the position associated with t.
+// This is where t was declared or where it appeared as a type expression.
+func typePos(t *Type) src.XPos {
+ if pos := t.Pos(); pos.IsKnown() {
+ return pos
+ }
+ base.Fatalf("bad type: %v", t)
+ panic("unreachable")
+}
+
+// MaxWidth is the maximum size of a value on the target architecture.
+var MaxWidth int64
+
+// CalcSizeDisabled indicates whether it is safe
+// to calculate Types' widths and alignments. See CalcSize.
+var CalcSizeDisabled bool
+
+// machine size and rounding alignment is dictated around
+// the size of a pointer, set in betypeinit (see ../amd64/galign.go).
+var defercalc int
+
+func Rnd(o int64, r int64) int64 {
+ if r < 1 || r > 8 || r&(r-1) != 0 {
+ base.Fatalf("rnd %d", r)
+ }
+ return (o + r - 1) &^ (r - 1)
+}
+
+// expandiface computes the method set for interface type t by
+// expanding embedded interfaces.
+func expandiface(t *Type) {
+ seen := make(map[*Sym]*Field)
+ var methods []*Field
+
+ addMethod := func(m *Field, explicit bool) {
+ switch prev := seen[m.Sym]; {
+ case prev == nil:
+ seen[m.Sym] = m
+ case AllowsGoVersion(t.Pkg(), 1, 14) && !explicit && Identical(m.Type, prev.Type):
+ return
+ default:
+ base.ErrorfAt(m.Pos, "duplicate method %s", m.Sym.Name)
+ }
+ methods = append(methods, m)
+ }
+
+ {
+ methods := t.Methods().Slice()
+ sort.SliceStable(methods, func(i, j int) bool {
+ mi, mj := methods[i], methods[j]
+
+ // Sort embedded types by type name (if any).
+ if mi.Sym == nil && mj.Sym == nil {
+ return mi.Type.Sym().Less(mj.Type.Sym())
+ }
+
+ // Sort methods before embedded types.
+ if mi.Sym == nil || mj.Sym == nil {
+ return mi.Sym != nil
+ }
+
+ // Sort methods by symbol name.
+ return mi.Sym.Less(mj.Sym)
+ })
+ }
+
+ for _, m := range t.Methods().Slice() {
+ if m.Sym == nil {
+ continue
+ }
+
+ CheckSize(m.Type)
+ addMethod(m, true)
+ }
+
+ for _, m := range t.Methods().Slice() {
+ if m.Sym != nil || m.Type == nil {
+ continue
+ }
+
+ if m.Type.IsUnion() {
+ continue
+ }
+
+ // In 1.18, embedded types can be anything. In Go 1.17, we disallow
+ // embedding anything other than interfaces.
+ if !m.Type.IsInterface() {
+ if AllowsGoVersion(t.Pkg(), 1, 18) {
+ continue
+ }
+ base.ErrorfAt(m.Pos, "interface contains embedded non-interface, non-union %v", m.Type)
+ m.SetBroke(true)
+ t.SetBroke(true)
+ // Add to fields so that error messages
+ // include the broken embedded type when
+ // printing t.
+ // TODO(mdempsky): Revisit this.
+ methods = append(methods, m)
+ continue
+ }
+
+ // Embedded interface: duplicate all methods
+ // (including broken ones, if any) and add to t's
+ // method set.
+ for _, t1 := range m.Type.AllMethods().Slice() {
+ f := NewField(m.Pos, t1.Sym, t1.Type)
+ addMethod(f, false)
+
+ // Clear position after typechecking, for consistency with types2.
+ f.Pos = src.NoXPos
+ }
+
+ // Clear position after typechecking, for consistency with types2.
+ m.Pos = src.NoXPos
+ }
+
+ sort.Sort(MethodsByName(methods))
+
+ if int64(len(methods)) >= MaxWidth/int64(PtrSize) {
+ base.ErrorfAt(typePos(t), "interface too large")
+ }
+ for i, m := range methods {
+ m.Offset = int64(i) * int64(PtrSize)
+ }
+
+ t.SetAllMethods(methods)
+}
+
+func calcStructOffset(errtype *Type, t *Type, o int64, flag int) int64 {
+ // flag is 0 (receiver), 1 (actual struct), or RegSize (in/out parameters)
+ isStruct := flag == 1
+ starto := o
+ maxalign := int32(flag)
+ if maxalign < 1 {
+ maxalign = 1
+ }
+ lastzero := int64(0)
+ for _, f := range t.Fields().Slice() {
+ if f.Type == nil {
+ // broken field, just skip it so that other valid fields
+ // get a width.
+ continue
+ }
+
+ CalcSize(f.Type)
+ if int32(f.Type.align) > maxalign {
+ maxalign = int32(f.Type.align)
+ }
+ if f.Type.align > 0 {
+ o = Rnd(o, int64(f.Type.align))
+ }
+ if isStruct { // For receiver/args/results, do not set, it depends on ABI
+ f.Offset = o
+ }
+
+ w := f.Type.width
+ if w < 0 {
+ base.Fatalf("invalid width %d", f.Type.width)
+ }
+ if w == 0 {
+ lastzero = o
+ }
+ o += w
+ maxwidth := MaxWidth
+ // On 32-bit systems, reflect tables impose an additional constraint
+ // that each field start offset must fit in 31 bits.
+ if maxwidth < 1<<32 {
+ maxwidth = 1<<31 - 1
+ }
+ if o >= maxwidth {
+ base.ErrorfAt(typePos(errtype), "type %L too large", errtype)
+ o = 8 // small but nonzero
+ }
+ }
+
+ // For nonzero-sized structs which end in a zero-sized thing, we add
+ // an extra byte of padding to the type. This padding ensures that
+ // taking the address of the zero-sized thing can't manufacture a
+ // pointer to the next object in the heap. See issue 9401.
+ if flag == 1 && o > starto && o == lastzero {
+ o++
+ }
+
+ // final width is rounded
+ if flag != 0 {
+ o = Rnd(o, int64(maxalign))
+ }
+ t.align = uint8(maxalign)
+
+ // type width only includes back to first field's offset
+ t.width = o - starto
+
+ return o
+}
+
+// findTypeLoop searches for an invalid type declaration loop involving
+// type t and reports whether one is found. If so, path contains the
+// loop.
+//
+// path points to a slice used for tracking the sequence of types
+// visited. Using a pointer to a slice allows the slice capacity to
+// grow and limit reallocations.
+func findTypeLoop(t *Type, path *[]*Type) bool {
+ // We implement a simple DFS loop-finding algorithm. This
+ // could be faster, but type cycles are rare.
+
+ if t.Sym() != nil {
+ // Declared type. Check for loops and otherwise
+ // recurse on the type expression used in the type
+ // declaration.
+
+ // Type imported from package, so it can't be part of
+ // a type loop (otherwise that package should have
+ // failed to compile).
+ if t.Sym().Pkg != LocalPkg {
+ return false
+ }
+
+ for i, x := range *path {
+ if x == t {
+ *path = (*path)[i:]
+ return true
+ }
+ }
+
+ *path = append(*path, t)
+ if findTypeLoop(t.Obj().(TypeObject).TypeDefn(), path) {
+ return true
+ }
+ *path = (*path)[:len(*path)-1]
+ } else {
+ // Anonymous type. Recurse on contained types.
+
+ switch t.Kind() {
+ case TARRAY:
+ if findTypeLoop(t.Elem(), path) {
+ return true
+ }
+ case TSTRUCT:
+ for _, f := range t.Fields().Slice() {
+ if findTypeLoop(f.Type, path) {
+ return true
+ }
+ }
+ case TINTER:
+ for _, m := range t.Methods().Slice() {
+ if m.Type.IsInterface() { // embedded interface
+ if findTypeLoop(m.Type, path) {
+ return true
+ }
+ }
+ }
+ }
+ }
+
+ return false
+}
+
+func reportTypeLoop(t *Type) {
+ if t.Broke() {
+ return
+ }
+
+ var l []*Type
+ if !findTypeLoop(t, &l) {
+ base.Fatalf("failed to find type loop for: %v", t)
+ }
+
+ // Rotate loop so that the earliest type declaration is first.
+ i := 0
+ for j, t := range l[1:] {
+ if typePos(t).Before(typePos(l[i])) {
+ i = j + 1
+ }
+ }
+ l = append(l[i:], l[:i]...)
+
+ var msg bytes.Buffer
+ fmt.Fprintf(&msg, "invalid recursive type %v\n", l[0])
+ for _, t := range l {
+ fmt.Fprintf(&msg, "\t%v: %v refers to\n", base.FmtPos(typePos(t)), t)
+ t.SetBroke(true)
+ }
+ fmt.Fprintf(&msg, "\t%v: %v", base.FmtPos(typePos(l[0])), l[0])
+ base.ErrorfAt(typePos(l[0]), msg.String())
+}
+
+// CalcSize calculates and stores the size and alignment for t.
+// If CalcSizeDisabled is set, and the size/alignment
+// have not already been calculated, it calls Fatal.
+// This is used to prevent data races in the back end.
+func CalcSize(t *Type) {
+ // Calling CalcSize when typecheck tracing enabled is not safe.
+ // See issue #33658.
+ if base.EnableTrace && SkipSizeForTracing {
+ return
+ }
+ if PtrSize == 0 {
+ // Assume this is a test.
+ return
+ }
+
+ if t == nil {
+ return
+ }
+
+ if t.width == -2 {
+ reportTypeLoop(t)
+ t.width = 0
+ t.align = 1
+ return
+ }
+
+ if t.widthCalculated() {
+ return
+ }
+
+ if CalcSizeDisabled {
+ if t.Broke() {
+ // break infinite recursion from Fatal call below
+ return
+ }
+ t.SetBroke(true)
+ base.Fatalf("width not calculated: %v", t)
+ }
+
+ // break infinite recursion if the broken recursive type
+ // is referenced again
+ if t.Broke() && t.width == 0 {
+ return
+ }
+
+ // defer CheckSize calls until after we're done
+ DeferCheckSize()
+
+ lno := base.Pos
+ if pos := t.Pos(); pos.IsKnown() {
+ base.Pos = pos
+ }
+
+ t.width = -2
+ t.align = 0 // 0 means use t.Width, below
+
+ et := t.Kind()
+ switch et {
+ case TFUNC, TCHAN, TMAP, TSTRING:
+ break
+
+ // SimType == 0 during bootstrap
+ default:
+ if SimType[t.Kind()] != 0 {
+ et = SimType[t.Kind()]
+ }
+ }
+
+ var w int64
+ switch et {
+ default:
+ base.Fatalf("CalcSize: unknown type: %v", t)
+
+ // compiler-specific stuff
+ case TINT8, TUINT8, TBOOL:
+ // bool is int8
+ w = 1
+
+ case TINT16, TUINT16:
+ w = 2
+
+ case TINT32, TUINT32, TFLOAT32:
+ w = 4
+
+ case TINT64, TUINT64, TFLOAT64:
+ w = 8
+ t.align = uint8(RegSize)
+
+ case TCOMPLEX64:
+ w = 8
+ t.align = 4
+
+ case TCOMPLEX128:
+ w = 16
+ t.align = uint8(RegSize)
+
+ case TPTR:
+ w = int64(PtrSize)
+ CheckSize(t.Elem())
+
+ case TUNSAFEPTR:
+ w = int64(PtrSize)
+
+ case TINTER: // implemented as 2 pointers
+ w = 2 * int64(PtrSize)
+ t.align = uint8(PtrSize)
+ expandiface(t)
+
+ case TUNION:
+ // Always part of an interface for now, so size/align don't matter.
+ // Pretend a union is represented like an interface.
+ w = 2 * int64(PtrSize)
+ t.align = uint8(PtrSize)
+
+ case TCHAN: // implemented as pointer
+ w = int64(PtrSize)
+
+ CheckSize(t.Elem())
+
+ // Make fake type to trigger channel element size check after
+ // any top-level recursive type has been completed.
+ t1 := NewChanArgs(t)
+ CheckSize(t1)
+
+ case TCHANARGS:
+ t1 := t.ChanArgs()
+ CalcSize(t1) // just in case
+ // Make sure size of t1.Elem() is calculated at this point. We can
+ // use CalcSize() here rather than CheckSize(), because the top-level
+ // (possibly recursive) type will have been calculated before the fake
+ // chanargs is handled.
+ CalcSize(t1.Elem())
+ if t1.Elem().width >= 1<<16 {
+ base.Errorf("channel element type too large (>64kB)")
+ }
+ w = 1 // anything will do
+
+ case TMAP: // implemented as pointer
+ w = int64(PtrSize)
+ CheckSize(t.Elem())
+ CheckSize(t.Key())
+
+ case TFORW: // should have been filled in
+ reportTypeLoop(t)
+ w = 1 // anything will do
+
+ case TANY:
+ // not a real type; should be replaced before use.
+ base.Fatalf("CalcSize any")
+
+ case TSTRING:
+ if StringSize == 0 {
+ base.Fatalf("early CalcSize string")
+ }
+ w = StringSize
+ t.align = uint8(PtrSize)
+
+ case TARRAY:
+ if t.Elem() == nil {
+ break
+ }
+
+ CalcSize(t.Elem())
+ if t.Elem().width != 0 {
+ cap := (uint64(MaxWidth) - 1) / uint64(t.Elem().width)
+ if uint64(t.NumElem()) > cap {
+ base.Errorf("type %L larger than address space", t)
+ }
+ }
+ w = t.NumElem() * t.Elem().width
+ t.align = t.Elem().align
+
+ case TSLICE:
+ if t.Elem() == nil {
+ break
+ }
+ w = SliceSize
+ CheckSize(t.Elem())
+ t.align = uint8(PtrSize)
+
+ case TSTRUCT:
+ if t.IsFuncArgStruct() {
+ base.Fatalf("CalcSize fn struct %v", t)
+ }
+ w = calcStructOffset(t, t, 0, 1)
+
+ // make fake type to check later to
+ // trigger function argument computation.
+ case TFUNC:
+ t1 := NewFuncArgs(t)
+ CheckSize(t1)
+ w = int64(PtrSize) // width of func type is pointer
+
+ // function is 3 cated structures;
+ // compute their widths as side-effect.
+ case TFUNCARGS:
+ t1 := t.FuncArgs()
+ w = calcStructOffset(t1, t1.Recvs(), 0, 0)
+ w = calcStructOffset(t1, t1.Params(), w, RegSize)
+ w = calcStructOffset(t1, t1.Results(), w, RegSize)
+ t1.extra.(*Func).Argwid = w
+ if w%int64(RegSize) != 0 {
+ base.Warn("bad type %v %d\n", t1, w)
+ }
+ t.align = 1
+
+ case TTYPEPARAM:
+ // TODO(danscales) - remove when we eliminate the need
+ // to do CalcSize in noder2 (which shouldn't be needed in the noder)
+ w = int64(PtrSize)
+ }
+
+ if PtrSize == 4 && w != int64(int32(w)) {
+ base.Errorf("type %v too large", t)
+ }
+
+ t.width = w
+ if t.align == 0 {
+ if w == 0 || w > 8 || w&(w-1) != 0 {
+ base.Fatalf("invalid alignment for %v", t)
+ }
+ t.align = uint8(w)
+ }
+
+ base.Pos = lno
+
+ ResumeCheckSize()
+}
+
+// CalcStructSize calculates the size of s,
+// filling in s.Width and s.Align,
+// even if size calculation is otherwise disabled.
+func CalcStructSize(s *Type) {
+ s.width = calcStructOffset(s, s, 0, 1) // sets align
+}
+
+// RecalcSize is like CalcSize, but recalculates t's size even if it
+// has already been calculated before. It does not recalculate other
+// types.
+func RecalcSize(t *Type) {
+ t.align = 0
+ CalcSize(t)
+}
+
+func (t *Type) widthCalculated() bool {
+ return t.align > 0
+}
+
+// when a type's width should be known, we call CheckSize
+// to compute it. during a declaration like
+//
+// type T *struct { next T }
+//
+// it is necessary to defer the calculation of the struct width
+// until after T has been initialized to be a pointer to that struct.
+// similarly, during import processing structs may be used
+// before their definition. in those situations, calling
+// DeferCheckSize() stops width calculations until
+// ResumeCheckSize() is called, at which point all the
+// CalcSizes that were deferred are executed.
+// CalcSize should only be called when the type's size
+// is needed immediately. CheckSize makes sure the
+// size is evaluated eventually.
+
+var deferredTypeStack []*Type
+
+func CheckSize(t *Type) {
+ if t == nil {
+ return
+ }
+
+ // function arg structs should not be checked
+ // outside of the enclosing function.
+ if t.IsFuncArgStruct() {
+ base.Fatalf("CheckSize %v", t)
+ }
+
+ if defercalc == 0 {
+ CalcSize(t)
+ return
+ }
+
+ // if type has not yet been pushed on deferredTypeStack yet, do it now
+ if !t.Deferwidth() {
+ t.SetDeferwidth(true)
+ deferredTypeStack = append(deferredTypeStack, t)
+ }
+}
+
+func DeferCheckSize() {
+ defercalc++
+}
+
+func ResumeCheckSize() {
+ if defercalc == 1 {
+ for len(deferredTypeStack) > 0 {
+ t := deferredTypeStack[len(deferredTypeStack)-1]
+ deferredTypeStack = deferredTypeStack[:len(deferredTypeStack)-1]
+ t.SetDeferwidth(false)
+ CalcSize(t)
+ }
+ }
+
+ defercalc--
+}
+
+// PtrDataSize returns the length in bytes of the prefix of t
+// containing pointer data. Anything after this offset is scalar data.
+//
+// PtrDataSize is only defined for actual Go types. It's an error to
+// use it on compiler-internal types (e.g., TSSA, TRESULTS).
+func PtrDataSize(t *Type) int64 {
+ switch t.Kind() {
+ case TBOOL, TINT8, TUINT8, TINT16, TUINT16, TINT32,
+ TUINT32, TINT64, TUINT64, TINT, TUINT,
+ TUINTPTR, TCOMPLEX64, TCOMPLEX128, TFLOAT32, TFLOAT64:
+ return 0
+
+ case TPTR:
+ if t.Elem().NotInHeap() {
+ return 0
+ }
+ return int64(PtrSize)
+
+ case TUNSAFEPTR, TFUNC, TCHAN, TMAP:
+ return int64(PtrSize)
+
+ case TSTRING:
+ // struct { byte *str; intgo len; }
+ return int64(PtrSize)
+
+ case TINTER:
+ // struct { Itab *tab; void *data; } or
+ // struct { Type *type; void *data; }
+ // Note: see comment in typebits.Set
+ return 2 * int64(PtrSize)
+
+ case TSLICE:
+ if t.Elem().NotInHeap() {
+ return 0
+ }
+ // struct { byte *array; uintgo len; uintgo cap; }
+ return int64(PtrSize)
+
+ case TARRAY:
+ if t.NumElem() == 0 {
+ return 0
+ }
+ // t.NumElem() > 0
+ size := PtrDataSize(t.Elem())
+ if size == 0 {
+ return 0
+ }
+ return (t.NumElem()-1)*t.Elem().Size() + size
+
+ case TSTRUCT:
+ // Find the last field that has pointers, if any.
+ fs := t.Fields().Slice()
+ for i := len(fs) - 1; i >= 0; i-- {
+ if size := PtrDataSize(fs[i].Type); size > 0 {
+ return fs[i].Offset + size
+ }
+ }
+ return 0
+
+ case TSSA:
+ if t != TypeInt128 {
+ base.Fatalf("PtrDataSize: unexpected ssa type %v", t)
+ }
+ return 0
+
+ default:
+ base.Fatalf("PtrDataSize: unexpected type, %v", t)
+ return 0
+ }
+}
diff --git a/src/cmd/compile/internal/types/sizeof_test.go b/src/cmd/compile/internal/types/sizeof_test.go
new file mode 100644
index 0000000..d37c173
--- /dev/null
+++ b/src/cmd/compile/internal/types/sizeof_test.go
@@ -0,0 +1,48 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types
+
+import (
+ "reflect"
+ "testing"
+ "unsafe"
+)
+
+// Assert that the size of important structures do not change unexpectedly.
+
+func TestSizeof(t *testing.T) {
+ const _64bit = unsafe.Sizeof(uintptr(0)) == 8
+
+ var tests = []struct {
+ val interface{} // type as a value
+ _32bit uintptr // size on 32bit platforms
+ _64bit uintptr // size on 64bit platforms
+ }{
+ {Sym{}, 44, 72},
+ {Type{}, 64, 112},
+ {Map{}, 20, 40},
+ {Forward{}, 20, 32},
+ {Func{}, 28, 48},
+ {Struct{}, 16, 32},
+ {Interface{}, 8, 16},
+ {Chan{}, 8, 16},
+ {Array{}, 12, 16},
+ {FuncArgs{}, 4, 8},
+ {ChanArgs{}, 4, 8},
+ {Ptr{}, 4, 8},
+ {Slice{}, 4, 8},
+ }
+
+ for _, tt := range tests {
+ want := tt._32bit
+ if _64bit {
+ want = tt._64bit
+ }
+ got := reflect.TypeOf(tt.val).Size()
+ if want != got {
+ t.Errorf("unsafe.Sizeof(%T) = %d, want %d", tt.val, got, want)
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/types/sort.go b/src/cmd/compile/internal/types/sort.go
new file mode 100644
index 0000000..765c070
--- /dev/null
+++ b/src/cmd/compile/internal/types/sort.go
@@ -0,0 +1,19 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types
+
+// MethodsByName sorts methods by name.
+type MethodsByName []*Field
+
+func (x MethodsByName) Len() int { return len(x) }
+func (x MethodsByName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+func (x MethodsByName) Less(i, j int) bool { return x[i].Sym.Less(x[j].Sym) }
+
+// EmbeddedsByName sorts embedded types by name.
+type EmbeddedsByName []*Field
+
+func (x EmbeddedsByName) Len() int { return len(x) }
+func (x EmbeddedsByName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
+func (x EmbeddedsByName) Less(i, j int) bool { return x[i].Type.Sym().Less(x[j].Type.Sym()) }
diff --git a/src/cmd/compile/internal/types/structuraltype.go b/src/cmd/compile/internal/types/structuraltype.go
new file mode 100644
index 0000000..ee1341b
--- /dev/null
+++ b/src/cmd/compile/internal/types/structuraltype.go
@@ -0,0 +1,191 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types
+
+// Implementation of structural type computation for types.
+
+// TODO: we would like to depend only on the types2 computation of structural type,
+// but we can only do that the next time we change the export format and export
+// structural type info along with each constraint type, since the compiler imports
+// types directly into types1 format.
+
+// A term describes elementary type sets:
+//
+// term{false, T} set of type T
+// term{true, T} set of types with underlying type t
+// term{} empty set (we specifically check for typ == nil)
+type term struct {
+ tilde bool
+ typ *Type
+}
+
+// StructuralType returns the structural type of an interface, or nil if it has no
+// structural type.
+func (t *Type) StructuralType() *Type {
+ sts, _ := specificTypes(t)
+ var su *Type
+ for _, st := range sts {
+ u := st.typ.Underlying()
+ if su != nil {
+ u = match(su, u)
+ if u == nil {
+ return nil
+ }
+ }
+ // su == nil || match(su, u) != nil
+ su = u
+ }
+ return su
+}
+
+// If x and y are identical, match returns x.
+// If x and y are identical channels but for their direction
+// and one of them is unrestricted, match returns the channel
+// with the restricted direction.
+// In all other cases, match returns nil.
+// x and y are assumed to be underlying types, hence are not named types.
+func match(x, y *Type) *Type {
+ if IdenticalStrict(x, y) {
+ return x
+ }
+
+ if x.IsChan() && y.IsChan() && IdenticalStrict(x.Elem(), y.Elem()) {
+ // We have channels that differ in direction only.
+ // If there's an unrestricted channel, select the restricted one.
+ // If both have the same direction, return x (either is fine).
+ switch {
+ case x.ChanDir().CanSend() && x.ChanDir().CanRecv():
+ return y
+ case y.ChanDir().CanSend() && y.ChanDir().CanRecv():
+ return x
+ }
+ }
+ return nil
+}
+
+// specificTypes returns the list of specific types of an interface type or nil if
+// there are none. It also returns a flag that indicates, for an empty term list
+// result, whether it represents the empty set, or the infinite set of all types (in
+// both cases, there are no specific types).
+func specificTypes(t *Type) (list []term, inf bool) {
+ t.wantEtype(TINTER)
+
+ // We have infinite term list before processing any type elements
+ // (or if there are no type elements).
+ inf = true
+ for _, m := range t.Methods().Slice() {
+ var r2 []term
+ inf2 := false
+
+ switch {
+ case m.IsMethod():
+ inf2 = true
+
+ case m.Type.IsUnion():
+ nt := m.Type.NumTerms()
+ for i := 0; i < nt; i++ {
+ t, tilde := m.Type.Term(i)
+ if t.IsInterface() {
+ r3, r3inf := specificTypes(t)
+ if r3inf {
+ // Union with an infinite set of types is
+ // infinite, so skip remaining terms.
+ r2 = nil
+ inf2 = true
+ break
+ }
+ // Add the elements of r3 to r2.
+ for _, r3e := range r3 {
+ r2 = insertType(r2, r3e)
+ }
+ } else {
+ r2 = insertType(r2, term{tilde, t})
+ }
+ }
+
+ case m.Type.IsInterface():
+ r2, inf2 = specificTypes(m.Type)
+
+ default:
+ // m.Type is a single non-interface type, so r2 is just a
+ // one-element list, inf2 is false.
+ r2 = []term{{false, m.Type}}
+ }
+
+ if inf2 {
+ // If the current type element has infinite types,
+ // its intersection with r is just r, so skip this type element.
+ continue
+ }
+
+ if inf {
+ // If r is infinite, then the intersection of r and r2 is just r2.
+ list = r2
+ inf = false
+ continue
+ }
+
+ // r and r2 are finite, so intersect r and r2.
+ var r3 []term
+ for _, re := range list {
+ for _, r2e := range r2 {
+ if tm := intersect(re, r2e); tm.typ != nil {
+ r3 = append(r3, tm)
+ }
+ }
+ }
+ list = r3
+ }
+ return
+}
+
+// insertType adds t to the returned list if it is not already in list.
+func insertType(list []term, tm term) []term {
+ for i, elt := range list {
+ if new := union(elt, tm); new.typ != nil {
+ // Replace existing elt with the union of elt and new.
+ list[i] = new
+ return list
+ }
+ }
+ return append(list, tm)
+}
+
+// If x and y are disjoint, return term with nil typ (which means the union should
+// include both types). If x and y are not disjoint, return the single type which is
+// the union of x and y.
+func union(x, y term) term {
+ if disjoint(x, y) {
+ return term{false, nil}
+ }
+ if x.tilde || !y.tilde {
+ return x
+ }
+ return y
+}
+
+// intersect returns the intersection x ∩ y.
+func intersect(x, y term) term {
+ if disjoint(x, y) {
+ return term{false, nil}
+ }
+ if !x.tilde || y.tilde {
+ return x
+ }
+ return y
+}
+
+// disjoint reports whether x ∩ y == ∅.
+func disjoint(x, y term) bool {
+ ux := x.typ
+ if y.tilde {
+ ux = ux.Underlying()
+ }
+ uy := y.typ
+ if x.tilde {
+ uy = uy.Underlying()
+ }
+ return !IdenticalStrict(ux, uy)
+}
diff --git a/src/cmd/compile/internal/types/structuraltype_test.go b/src/cmd/compile/internal/types/structuraltype_test.go
new file mode 100644
index 0000000..fc34458
--- /dev/null
+++ b/src/cmd/compile/internal/types/structuraltype_test.go
@@ -0,0 +1,135 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Test that StructuralType() calculates the correct value of structural type for
+// unusual cases.
+
+package types
+
+import (
+ "cmd/internal/src"
+ "testing"
+)
+
+type test struct {
+ typ *Type
+ structuralType *Type
+}
+
+func TestStructuralType(t *testing.T) {
+ // These are the few constants that need to be initialized in order to use
+ // the types package without using the typecheck package by calling
+ // typecheck.InitUniverse() (the normal way to initialize the types package).
+ PtrSize = 8
+ RegSize = 8
+ MaxWidth = 1 << 50
+
+ // type intType = int
+ intType := newType(TINT)
+ // type structf = struct { f int }
+ structf := NewStruct(nil, []*Field{
+ NewField(src.NoXPos, LocalPkg.Lookup("f"), intType),
+ })
+
+ // type Sf structf
+ Sf := newType(TFORW)
+ Sf.sym = LocalPkg.Lookup("Sf")
+ Sf.SetUnderlying(structf)
+
+ // type A int
+ A := newType(TFORW)
+ A.sym = LocalPkg.Lookup("A")
+ A.SetUnderlying(intType)
+
+ // type B int
+ B := newType(TFORW)
+ B.sym = LocalPkg.Lookup("B")
+ B.SetUnderlying(intType)
+
+ emptyInterface := NewInterface(BuiltinPkg, []*Field{}, false)
+ any := newType(TFORW)
+ any.sym = LocalPkg.Lookup("any")
+ any.SetUnderlying(emptyInterface)
+
+ // The tests marked NONE have no structural type; all the others have a
+ // structural type of structf - "struct { f int }"
+ tests := []*test{
+ {
+ // interface { struct { f int } }
+ embed(structf),
+ structf,
+ },
+ {
+ // interface { struct { f int }; any }
+ embed(structf, any),
+ structf,
+ },
+ {
+ // interface { Sf }
+ embed(Sf),
+ structf,
+ },
+ {
+ // interface { any | Sf }
+ embed(any, Sf),
+ structf,
+ },
+ {
+ // interface { struct { f int }; Sf } - NONE
+ embed(structf, Sf),
+ nil,
+ },
+ {
+ // interface { struct { f int } | ~struct { f int } }
+ embed(NewUnion([]*Type{structf, structf}, []bool{false, true})),
+ structf,
+ },
+ {
+ // interface { ~struct { f int } ; Sf }
+ embed(NewUnion([]*Type{structf}, []bool{true}), Sf),
+ structf,
+ },
+ {
+ // interface { struct { f int } ; Sf } - NONE
+ embed(NewUnion([]*Type{structf}, []bool{false}), Sf),
+ nil,
+ },
+ {
+ // interface { Sf | A; B | Sf}
+ embed(NewUnion([]*Type{Sf, A}, []bool{false, false}),
+ NewUnion([]*Type{B, Sf}, []bool{false, false})),
+ structf,
+ },
+ {
+ // interface { Sf | A; A | Sf } - NONE
+ embed(NewUnion([]*Type{Sf, A}, []bool{false, false}),
+ NewUnion([]*Type{A, Sf}, []bool{false, false})),
+ nil,
+ },
+ {
+ // interface { Sf | any } - NONE
+ embed(NewUnion([]*Type{Sf, any}, []bool{false, false})),
+ nil,
+ },
+ {
+ // interface { Sf | any; Sf }
+ embed(NewUnion([]*Type{Sf, any}, []bool{false, false}), Sf),
+ structf,
+ },
+ }
+ for _, tst := range tests {
+ if got, want := tst.typ.StructuralType(), tst.structuralType; got != want {
+ t.Errorf("StructuralType(%v) = %v, wanted %v",
+ tst.typ, got, want)
+ }
+ }
+}
+
+func embed(types ...*Type) *Type {
+ fields := make([]*Field, len(types))
+ for i, t := range types {
+ fields[i] = NewField(src.NoXPos, nil, t)
+ }
+ return NewInterface(LocalPkg, fields, false)
+}
diff --git a/src/cmd/compile/internal/types/sym.go b/src/cmd/compile/internal/types/sym.go
new file mode 100644
index 0000000..fb642f5
--- /dev/null
+++ b/src/cmd/compile/internal/types/sym.go
@@ -0,0 +1,150 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/internal/obj"
+ "cmd/internal/src"
+ "unicode"
+ "unicode/utf8"
+)
+
+// Sym represents an object name in a segmented (pkg, name) namespace.
+// Most commonly, this is a Go identifier naming an object declared within a package,
+// but Syms are also used to name internal synthesized objects.
+//
+// As an exception, field and method names that are exported use the Sym
+// associated with localpkg instead of the package that declared them. This
+// allows using Sym pointer equality to test for Go identifier uniqueness when
+// handling selector expressions.
+//
+// Ideally, Sym should be used for representing Go language constructs,
+// while cmd/internal/obj.LSym is used for representing emitted artifacts.
+//
+// NOTE: In practice, things can be messier than the description above
+// for various reasons (historical, convenience).
+type Sym struct {
+ Linkname string // link name
+
+ Pkg *Pkg
+ Name string // object name
+
+ // Def, Block, and Lastlineno are saved and restored by Pushdcl/Popdcl.
+
+ // The unique ONAME, OTYPE, OPACK, or OLITERAL node that this symbol is
+ // bound to within the current scope. (Most parts of the compiler should
+ // prefer passing the Node directly, rather than relying on this field.)
+ Def Object
+ Block int32 // blocknumber to catch redeclaration
+ Lastlineno src.XPos // last declaration for diagnostic
+
+ flags bitset8
+}
+
+const (
+ symOnExportList = 1 << iota // added to exportlist (no need to add again)
+ symUniq
+ symSiggen // type symbol has been generated
+ symAsm // on asmlist, for writing to -asmhdr
+ symFunc // function symbol
+)
+
+func (sym *Sym) OnExportList() bool { return sym.flags&symOnExportList != 0 }
+func (sym *Sym) Uniq() bool { return sym.flags&symUniq != 0 }
+func (sym *Sym) Siggen() bool { return sym.flags&symSiggen != 0 }
+func (sym *Sym) Asm() bool { return sym.flags&symAsm != 0 }
+func (sym *Sym) Func() bool { return sym.flags&symFunc != 0 }
+
+func (sym *Sym) SetOnExportList(b bool) { sym.flags.set(symOnExportList, b) }
+func (sym *Sym) SetUniq(b bool) { sym.flags.set(symUniq, b) }
+func (sym *Sym) SetSiggen(b bool) { sym.flags.set(symSiggen, b) }
+func (sym *Sym) SetAsm(b bool) { sym.flags.set(symAsm, b) }
+func (sym *Sym) SetFunc(b bool) { sym.flags.set(symFunc, b) }
+
+func (sym *Sym) IsBlank() bool {
+ return sym != nil && sym.Name == "_"
+}
+
+// Deprecated: This method should not be used directly. Instead, use a
+// higher-level abstraction that directly returns the linker symbol
+// for a named object. For example, reflectdata.TypeLinksym(t) instead
+// of reflectdata.TypeSym(t).Linksym().
+func (sym *Sym) Linksym() *obj.LSym {
+ abi := obj.ABI0
+ if sym.Func() {
+ abi = obj.ABIInternal
+ }
+ return sym.LinksymABI(abi)
+}
+
+// Deprecated: This method should not be used directly. Instead, use a
+// higher-level abstraction that directly returns the linker symbol
+// for a named object. For example, (*ir.Name).LinksymABI(abi) instead
+// of (*ir.Name).Sym().LinksymABI(abi).
+func (sym *Sym) LinksymABI(abi obj.ABI) *obj.LSym {
+ if sym == nil {
+ base.Fatalf("nil symbol")
+ }
+ if sym.Linkname != "" {
+ return base.Linkname(sym.Linkname, abi)
+ }
+ return base.PkgLinksym(sym.Pkg.Prefix, sym.Name, abi)
+}
+
+// Less reports whether symbol a is ordered before symbol b.
+//
+// Symbols are ordered exported before non-exported, then by name, and
+// finally (for non-exported symbols) by package height and path.
+//
+// Ordering by package height is necessary to establish a consistent
+// ordering for non-exported names with the same spelling but from
+// different packages. We don't necessarily know the path for the
+// package being compiled, but by definition it will have a height
+// greater than any other packages seen within the compilation unit.
+// For more background, see issue #24693.
+func (a *Sym) Less(b *Sym) bool {
+ if a == b {
+ return false
+ }
+
+ // Nil before non-nil.
+ if a == nil {
+ return true
+ }
+ if b == nil {
+ return false
+ }
+
+ // Exported symbols before non-exported.
+ ea := IsExported(a.Name)
+ eb := IsExported(b.Name)
+ if ea != eb {
+ return ea
+ }
+
+ // Order by name and then (for non-exported names) by package
+ // height and path.
+ if a.Name != b.Name {
+ return a.Name < b.Name
+ }
+ if !ea {
+ if a.Pkg.Height != b.Pkg.Height {
+ return a.Pkg.Height < b.Pkg.Height
+ }
+ return a.Pkg.Path < b.Pkg.Path
+ }
+ return false
+}
+
+// IsExported reports whether name is an exported Go symbol (that is,
+// whether it begins with an upper-case letter).
+func IsExported(name string) bool {
+ if r := name[0]; r < utf8.RuneSelf {
+ return 'A' <= r && r <= 'Z'
+ }
+ r, _ := utf8.DecodeRuneInString(name)
+ return unicode.IsUpper(r)
+}
diff --git a/src/cmd/compile/internal/types/sym_test.go b/src/cmd/compile/internal/types/sym_test.go
new file mode 100644
index 0000000..94efd42
--- /dev/null
+++ b/src/cmd/compile/internal/types/sym_test.go
@@ -0,0 +1,59 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types_test
+
+import (
+ "cmd/compile/internal/types"
+ "reflect"
+ "sort"
+ "testing"
+)
+
+func TestSymLess(t *testing.T) {
+ var (
+ local = types.NewPkg("", "")
+ abc = types.NewPkg("abc", "")
+ uvw = types.NewPkg("uvw", "")
+ xyz = types.NewPkg("xyz", "")
+ gr = types.NewPkg("gr", "")
+ )
+
+ data := []*types.Sym{
+ abc.Lookup("b"),
+ local.Lookup("B"),
+ local.Lookup("C"),
+ uvw.Lookup("c"),
+ local.Lookup("C"),
+ gr.Lookup("φ"),
+ local.Lookup("Φ"),
+ xyz.Lookup("b"),
+ abc.Lookup("a"),
+ local.Lookup("B"),
+ }
+ want := []*types.Sym{
+ local.Lookup("B"),
+ local.Lookup("B"),
+ local.Lookup("C"),
+ local.Lookup("C"),
+ local.Lookup("Φ"),
+ abc.Lookup("a"),
+ abc.Lookup("b"),
+ xyz.Lookup("b"),
+ uvw.Lookup("c"),
+ gr.Lookup("φ"),
+ }
+ if len(data) != len(want) {
+ t.Fatal("want and data must match")
+ }
+ if reflect.DeepEqual(data, want) {
+ t.Fatal("data must be shuffled")
+ }
+ sort.Slice(data, func(i, j int) bool { return data[i].Less(data[j]) })
+ if !reflect.DeepEqual(data, want) {
+ t.Logf("want: %#v", want)
+ t.Logf("data: %#v", data)
+ t.Errorf("sorting failed")
+ }
+}
diff --git a/src/cmd/compile/internal/types/type.go b/src/cmd/compile/internal/types/type.go
new file mode 100644
index 0000000..1021bd9
--- /dev/null
+++ b/src/cmd/compile/internal/types/type.go
@@ -0,0 +1,2235 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/internal/src"
+ "fmt"
+ "strings"
+ "sync"
+)
+
+// Object represents an ir.Node, but without needing to import cmd/compile/internal/ir,
+// which would cause an import cycle. The uses in other packages must type assert
+// values of type Object to ir.Node or a more specific type.
+type Object interface {
+ Pos() src.XPos
+ Sym() *Sym
+ Type() *Type
+}
+
+// A TypeObject is an Object representing a named type.
+type TypeObject interface {
+ Object
+ TypeDefn() *Type // for "type T Defn", returns Defn
+}
+
+//go:generate stringer -type Kind -trimprefix T type.go
+
+// Kind describes a kind of type.
+type Kind uint8
+
+const (
+ Txxx Kind = iota
+
+ TINT8
+ TUINT8
+ TINT16
+ TUINT16
+ TINT32
+ TUINT32
+ TINT64
+ TUINT64
+ TINT
+ TUINT
+ TUINTPTR
+
+ TCOMPLEX64
+ TCOMPLEX128
+
+ TFLOAT32
+ TFLOAT64
+
+ TBOOL
+
+ TPTR
+ TFUNC
+ TSLICE
+ TARRAY
+ TSTRUCT
+ TCHAN
+ TMAP
+ TINTER
+ TFORW
+ TANY
+ TSTRING
+ TUNSAFEPTR
+ TTYPEPARAM
+ TUNION
+
+ // pseudo-types for literals
+ TIDEAL // untyped numeric constants
+ TNIL
+ TBLANK
+
+ // pseudo-types used temporarily only during frame layout (CalcSize())
+ TFUNCARGS
+ TCHANARGS
+
+ // SSA backend types
+ TSSA // internal types used by SSA backend (flags, memory, etc.)
+ TTUPLE // a pair of types, used by SSA backend
+ TRESULTS // multiple types; the result of calling a function or method, with a memory at the end.
+
+ NTYPE
+)
+
+// ChanDir is whether a channel can send, receive, or both.
+type ChanDir uint8
+
+func (c ChanDir) CanRecv() bool { return c&Crecv != 0 }
+func (c ChanDir) CanSend() bool { return c&Csend != 0 }
+
+const (
+ // types of channel
+ // must match ../../../../reflect/type.go:/ChanDir
+ Crecv ChanDir = 1 << 0
+ Csend ChanDir = 1 << 1
+ Cboth ChanDir = Crecv | Csend
+)
+
+// Types stores pointers to predeclared named types.
+//
+// It also stores pointers to several special types:
+// - Types[TANY] is the placeholder "any" type recognized by SubstArgTypes.
+// - Types[TBLANK] represents the blank variable's type.
+// - Types[TINTER] is the canonical "interface{}" type.
+// - Types[TNIL] represents the predeclared "nil" value's type.
+// - Types[TUNSAFEPTR] is package unsafe's Pointer type.
+var Types [NTYPE]*Type
+
+var (
+ // Predeclared alias types. These are actually created as distinct
+ // defined types for better error messages, but are then specially
+ // treated as identical to their respective underlying types.
+ AnyType *Type
+ ByteType *Type
+ RuneType *Type
+
+ // Predeclared error interface type.
+ ErrorType *Type
+ // Predeclared comparable interface type.
+ ComparableType *Type
+
+ // Types to represent untyped string and boolean constants.
+ UntypedString = newType(TSTRING)
+ UntypedBool = newType(TBOOL)
+
+ // Types to represent untyped numeric constants.
+ UntypedInt = newType(TIDEAL)
+ UntypedRune = newType(TIDEAL)
+ UntypedFloat = newType(TIDEAL)
+ UntypedComplex = newType(TIDEAL)
+)
+
+// A Type represents a Go type.
+//
+// There may be multiple unnamed types with identical structure. However, there must
+// be a unique Type object for each unique named (defined) type. After noding, a
+// package-level type can be looked up by building its unique symbol sym (sym =
+// package.Lookup(name)) and checking sym.Def. If sym.Def is non-nil, the type
+// already exists at package scope and is available at sym.Def.(*ir.Name).Type().
+// Local types (which may have the same name as a package-level type) are
+// distinguished by the value of vargen.
+type Type struct {
+ // extra contains extra etype-specific fields.
+ // As an optimization, those etype-specific structs which contain exactly
+ // one pointer-shaped field are stored as values rather than pointers when possible.
+ //
+ // TMAP: *Map
+ // TFORW: *Forward
+ // TFUNC: *Func
+ // TSTRUCT: *Struct
+ // TINTER: *Interface
+ // TFUNCARGS: FuncArgs
+ // TCHANARGS: ChanArgs
+ // TCHAN: *Chan
+ // TPTR: Ptr
+ // TARRAY: *Array
+ // TSLICE: Slice
+ // TSSA: string
+ // TTYPEPARAM: *Typeparam
+ // TUNION: *Union
+ extra interface{}
+
+ // width is the width of this Type in bytes.
+ width int64 // valid if Align > 0
+
+ // list of base methods (excluding embedding)
+ methods Fields
+ // list of all methods (including embedding)
+ allMethods Fields
+
+ // canonical OTYPE node for a named type (should be an ir.Name node with same sym)
+ nod Object
+ // the underlying type (type literal or predeclared type) for a defined type
+ underlying *Type
+
+ // Cache of composite types, with this type being the element type.
+ cache struct {
+ ptr *Type // *T, or nil
+ slice *Type // []T, or nil
+ }
+
+ sym *Sym // symbol containing name, for named types
+ vargen int32 // unique name for OTYPE/ONAME
+
+ kind Kind // kind of type
+ align uint8 // the required alignment of this type, in bytes (0 means Width and Align have not yet been computed)
+
+ flags bitset8
+
+ // For defined (named) generic types, a pointer to the list of type params
+ // (in order) of this type that need to be instantiated. For instantiated
+ // generic types, this is the targs used to instantiate them. These targs
+ // may be typeparams (for re-instantiated types such as Value[T2]) or
+ // concrete types (for fully instantiated types such as Value[int]).
+ // rparams is only set for named types that are generic or are fully
+ // instantiated from a generic type, and is otherwise set to nil.
+ // TODO(danscales): choose a better name.
+ rparams *[]*Type
+
+ // For an instantiated generic type, the base generic type.
+ // This backpointer is useful, because the base type is the type that has
+ // the method bodies.
+ origType *Type
+}
+
+func (*Type) CanBeAnSSAAux() {}
+
+const (
+ typeNotInHeap = 1 << iota // type cannot be heap allocated
+ typeBroke // broken type definition
+ typeNoalg // suppress hash and eq algorithm generation
+ typeDeferwidth // width computation has been deferred and type is on deferredTypeStack
+ typeRecur
+ typeHasTParam // there is a typeparam somewhere in the type (generic function or type)
+ typeIsShape // represents a set of closely related types, for generics
+ typeHasShape // there is a shape somewhere in the type
+)
+
+func (t *Type) NotInHeap() bool { return t.flags&typeNotInHeap != 0 }
+func (t *Type) Broke() bool { return t.flags&typeBroke != 0 }
+func (t *Type) Noalg() bool { return t.flags&typeNoalg != 0 }
+func (t *Type) Deferwidth() bool { return t.flags&typeDeferwidth != 0 }
+func (t *Type) Recur() bool { return t.flags&typeRecur != 0 }
+func (t *Type) HasTParam() bool { return t.flags&typeHasTParam != 0 }
+func (t *Type) IsShape() bool { return t.flags&typeIsShape != 0 }
+func (t *Type) HasShape() bool { return t.flags&typeHasShape != 0 }
+
+func (t *Type) SetNotInHeap(b bool) { t.flags.set(typeNotInHeap, b) }
+func (t *Type) SetBroke(b bool) { t.flags.set(typeBroke, b) }
+func (t *Type) SetNoalg(b bool) { t.flags.set(typeNoalg, b) }
+func (t *Type) SetDeferwidth(b bool) { t.flags.set(typeDeferwidth, b) }
+func (t *Type) SetRecur(b bool) { t.flags.set(typeRecur, b) }
+
+// Generic types should never have alg functions.
+func (t *Type) SetHasTParam(b bool) { t.flags.set(typeHasTParam, b); t.flags.set(typeNoalg, b) }
+
+// Should always do SetHasShape(true) when doing SetIsShape(true).
+func (t *Type) SetIsShape(b bool) { t.flags.set(typeIsShape, b) }
+func (t *Type) SetHasShape(b bool) { t.flags.set(typeHasShape, b) }
+
+// Kind returns the kind of type t.
+func (t *Type) Kind() Kind { return t.kind }
+
+// Sym returns the name of type t.
+func (t *Type) Sym() *Sym { return t.sym }
+func (t *Type) SetSym(sym *Sym) { t.sym = sym }
+
+// OrigType returns the original generic type that t is an
+// instantiation of, if any.
+func (t *Type) OrigType() *Type { return t.origType }
+func (t *Type) SetOrigType(orig *Type) { t.origType = orig }
+
+// Underlying returns the underlying type of type t.
+func (t *Type) Underlying() *Type { return t.underlying }
+
+// SetNod associates t with syntax node n.
+func (t *Type) SetNod(n Object) {
+ // t.nod can be non-nil already
+ // in the case of shared *Types, like []byte or interface{}.
+ if t.nod == nil {
+ t.nod = n
+ }
+}
+
+// Pos returns a position associated with t, if any.
+// This should only be used for diagnostics.
+func (t *Type) Pos() src.XPos {
+ if t.nod != nil {
+ return t.nod.Pos()
+ }
+ return src.NoXPos
+}
+
+func (t *Type) RParams() []*Type {
+ if t.rparams == nil {
+ return nil
+ }
+ return *t.rparams
+}
+
+func (t *Type) SetRParams(rparams []*Type) {
+ if len(rparams) == 0 {
+ base.Fatalf("Setting nil or zero-length rparams")
+ }
+ t.rparams = &rparams
+ // HasTParam should be set if any rparam is or has a type param. This is
+ // to handle the case of a generic type which doesn't reference any of its
+ // type params (e.g. most commonly, an empty struct).
+ for _, rparam := range rparams {
+ if rparam.HasTParam() {
+ t.SetHasTParam(true)
+ break
+ }
+ if rparam.HasShape() {
+ t.SetHasShape(true)
+ break
+ }
+ }
+}
+
+// IsBaseGeneric returns true if t is a generic type (not reinstantiated with
+// another type params or fully instantiated.
+func (t *Type) IsBaseGeneric() bool {
+ return len(t.RParams()) > 0 && strings.Index(t.Sym().Name, "[") < 0
+}
+
+// IsInstantiatedGeneric returns t if t ia generic type that has been
+// reinstantiated with new typeparams (i.e. is not fully instantiated).
+func (t *Type) IsInstantiatedGeneric() bool {
+ return len(t.RParams()) > 0 && strings.Index(t.Sym().Name, "[") >= 0 &&
+ t.HasTParam()
+}
+
+// IsFullyInstantiated reports whether t is a fully instantiated generic type; i.e. an
+// instantiated generic type where all type arguments are non-generic or fully
+// instantiated generic types.
+func (t *Type) IsFullyInstantiated() bool {
+ return len(t.RParams()) > 0 && !t.HasTParam()
+}
+
+// NoPkg is a nil *Pkg value for clarity.
+// It's intended for use when constructing types that aren't exported
+// and thus don't need to be associated with any package.
+var NoPkg *Pkg = nil
+
+// Pkg returns the package that t appeared in.
+//
+// Pkg is only defined for function, struct, and interface types
+// (i.e., types with named elements). This information isn't used by
+// cmd/compile itself, but we need to track it because it's exposed by
+// the go/types API.
+func (t *Type) Pkg() *Pkg {
+ switch t.kind {
+ case TFUNC:
+ return t.extra.(*Func).pkg
+ case TSTRUCT:
+ return t.extra.(*Struct).pkg
+ case TINTER:
+ return t.extra.(*Interface).pkg
+ default:
+ base.Fatalf("Pkg: unexpected kind: %v", t)
+ return nil
+ }
+}
+
+// Map contains Type fields specific to maps.
+type Map struct {
+ Key *Type // Key type
+ Elem *Type // Val (elem) type
+
+ Bucket *Type // internal struct type representing a hash bucket
+ Hmap *Type // internal struct type representing the Hmap (map header object)
+ Hiter *Type // internal struct type representing hash iterator state
+}
+
+// MapType returns t's extra map-specific fields.
+func (t *Type) MapType() *Map {
+ t.wantEtype(TMAP)
+ return t.extra.(*Map)
+}
+
+// Forward contains Type fields specific to forward types.
+type Forward struct {
+ Copyto []*Type // where to copy the eventual value to
+ Embedlineno src.XPos // first use of this type as an embedded type
+}
+
+// ForwardType returns t's extra forward-type-specific fields.
+func (t *Type) ForwardType() *Forward {
+ t.wantEtype(TFORW)
+ return t.extra.(*Forward)
+}
+
+// Func contains Type fields specific to func types.
+type Func struct {
+ Receiver *Type // function receiver
+ Results *Type // function results
+ Params *Type // function params
+ TParams *Type // type params of receiver (if method) or function
+
+ pkg *Pkg
+
+ // Argwid is the total width of the function receiver, params, and results.
+ // It gets calculated via a temporary TFUNCARGS type.
+ // Note that TFUNC's Width is Widthptr.
+ Argwid int64
+}
+
+// FuncType returns t's extra func-specific fields.
+func (t *Type) FuncType() *Func {
+ t.wantEtype(TFUNC)
+ return t.extra.(*Func)
+}
+
+// StructType contains Type fields specific to struct types.
+type Struct struct {
+ fields Fields
+ pkg *Pkg
+
+ // Maps have three associated internal structs (see struct MapType).
+ // Map links such structs back to their map type.
+ Map *Type
+
+ Funarg Funarg // type of function arguments for arg struct
+}
+
+// Fnstruct records the kind of function argument
+type Funarg uint8
+
+const (
+ FunargNone Funarg = iota
+ FunargRcvr // receiver
+ FunargParams // input parameters
+ FunargResults // output results
+ FunargTparams // type params
+)
+
+// StructType returns t's extra struct-specific fields.
+func (t *Type) StructType() *Struct {
+ t.wantEtype(TSTRUCT)
+ return t.extra.(*Struct)
+}
+
+// Interface contains Type fields specific to interface types.
+type Interface struct {
+ pkg *Pkg
+ implicit bool
+}
+
+// Typeparam contains Type fields specific to typeparam types.
+type Typeparam struct {
+ index int // type parameter index in source order, starting at 0
+ bound *Type
+}
+
+// Union contains Type fields specific to union types.
+type Union struct {
+ terms []*Type
+ tildes []bool // whether terms[i] is of form ~T
+}
+
+// Ptr contains Type fields specific to pointer types.
+type Ptr struct {
+ Elem *Type // element type
+}
+
+// ChanArgs contains Type fields specific to TCHANARGS types.
+type ChanArgs struct {
+ T *Type // reference to a chan type whose elements need a width check
+}
+
+// // FuncArgs contains Type fields specific to TFUNCARGS types.
+type FuncArgs struct {
+ T *Type // reference to a func type whose elements need a width check
+}
+
+// Chan contains Type fields specific to channel types.
+type Chan struct {
+ Elem *Type // element type
+ Dir ChanDir // channel direction
+}
+
+// ChanType returns t's extra channel-specific fields.
+func (t *Type) ChanType() *Chan {
+ t.wantEtype(TCHAN)
+ return t.extra.(*Chan)
+}
+
+type Tuple struct {
+ first *Type
+ second *Type
+ // Any tuple with a memory type must put that memory type second.
+}
+
+// Results are the output from calls that will be late-expanded.
+type Results struct {
+ Types []*Type // Last element is memory output from call.
+}
+
+// Array contains Type fields specific to array types.
+type Array struct {
+ Elem *Type // element type
+ Bound int64 // number of elements; <0 if unknown yet
+}
+
+// Slice contains Type fields specific to slice types.
+type Slice struct {
+ Elem *Type // element type
+}
+
+// A Field is a (Sym, Type) pairing along with some other information, and,
+// depending on the context, is used to represent:
+// - a field in a struct
+// - a method in an interface or associated with a named type
+// - a function parameter
+type Field struct {
+ flags bitset8
+
+ Embedded uint8 // embedded field
+
+ Pos src.XPos
+
+ // Name of field/method/parameter. Can be nil for interface fields embedded
+ // in interfaces and unnamed parameters.
+ Sym *Sym
+ Type *Type // field type
+ Note string // literal string annotation
+
+ // For fields that represent function parameters, Nname points to the
+ // associated ONAME Node. For fields that represent methods, Nname points to
+ // the function name node.
+ Nname Object
+
+ // Offset in bytes of this field or method within its enclosing struct
+ // or interface Type. Exception: if field is function receiver, arg or
+ // result, then this is BOGUS_FUNARG_OFFSET; types does not know the Abi.
+ Offset int64
+}
+
+const (
+ fieldIsDDD = 1 << iota // field is ... argument
+ fieldBroke // broken field definition
+ fieldNointerface
+)
+
+func (f *Field) IsDDD() bool { return f.flags&fieldIsDDD != 0 }
+func (f *Field) Broke() bool { return f.flags&fieldBroke != 0 }
+func (f *Field) Nointerface() bool { return f.flags&fieldNointerface != 0 }
+
+func (f *Field) SetIsDDD(b bool) { f.flags.set(fieldIsDDD, b) }
+func (f *Field) SetBroke(b bool) { f.flags.set(fieldBroke, b) }
+func (f *Field) SetNointerface(b bool) { f.flags.set(fieldNointerface, b) }
+
+// End returns the offset of the first byte immediately after this field.
+func (f *Field) End() int64 {
+ return f.Offset + f.Type.width
+}
+
+// IsMethod reports whether f represents a method rather than a struct field.
+func (f *Field) IsMethod() bool {
+ return f.Type.kind == TFUNC && f.Type.Recv() != nil
+}
+
+// Fields is a pointer to a slice of *Field.
+// This saves space in Types that do not have fields or methods
+// compared to a simple slice of *Field.
+type Fields struct {
+ s *[]*Field
+}
+
+// Len returns the number of entries in f.
+func (f *Fields) Len() int {
+ if f.s == nil {
+ return 0
+ }
+ return len(*f.s)
+}
+
+// Slice returns the entries in f as a slice.
+// Changes to the slice entries will be reflected in f.
+func (f *Fields) Slice() []*Field {
+ if f.s == nil {
+ return nil
+ }
+ return *f.s
+}
+
+// Index returns the i'th element of Fields.
+// It panics if f does not have at least i+1 elements.
+func (f *Fields) Index(i int) *Field {
+ return (*f.s)[i]
+}
+
+// Set sets f to a slice.
+// This takes ownership of the slice.
+func (f *Fields) Set(s []*Field) {
+ if len(s) == 0 {
+ f.s = nil
+ } else {
+ // Copy s and take address of t rather than s to avoid
+ // allocation in the case where len(s) == 0.
+ t := s
+ f.s = &t
+ }
+}
+
+// Append appends entries to f.
+func (f *Fields) Append(s ...*Field) {
+ if f.s == nil {
+ f.s = new([]*Field)
+ }
+ *f.s = append(*f.s, s...)
+}
+
+// New returns a new Type of the specified kind.
+func newType(et Kind) *Type {
+ t := &Type{
+ kind: et,
+ width: BADWIDTH,
+ }
+ t.underlying = t
+ // TODO(josharian): lazily initialize some of these?
+ switch t.kind {
+ case TMAP:
+ t.extra = new(Map)
+ case TFORW:
+ t.extra = new(Forward)
+ case TFUNC:
+ t.extra = new(Func)
+ case TSTRUCT:
+ t.extra = new(Struct)
+ case TINTER:
+ t.extra = new(Interface)
+ case TPTR:
+ t.extra = Ptr{}
+ case TCHANARGS:
+ t.extra = ChanArgs{}
+ case TFUNCARGS:
+ t.extra = FuncArgs{}
+ case TCHAN:
+ t.extra = new(Chan)
+ case TTUPLE:
+ t.extra = new(Tuple)
+ case TRESULTS:
+ t.extra = new(Results)
+ case TTYPEPARAM:
+ t.extra = new(Typeparam)
+ case TUNION:
+ t.extra = new(Union)
+ }
+ return t
+}
+
+// NewArray returns a new fixed-length array Type.
+func NewArray(elem *Type, bound int64) *Type {
+ if bound < 0 {
+ base.Fatalf("NewArray: invalid bound %v", bound)
+ }
+ t := newType(TARRAY)
+ t.extra = &Array{Elem: elem, Bound: bound}
+ t.SetNotInHeap(elem.NotInHeap())
+ if elem.HasTParam() {
+ t.SetHasTParam(true)
+ }
+ if elem.HasShape() {
+ t.SetHasShape(true)
+ }
+ return t
+}
+
+// NewSlice returns the slice Type with element type elem.
+func NewSlice(elem *Type) *Type {
+ if t := elem.cache.slice; t != nil {
+ if t.Elem() != elem {
+ base.Fatalf("elem mismatch")
+ }
+ if elem.HasTParam() != t.HasTParam() || elem.HasShape() != t.HasShape() {
+ base.Fatalf("Incorrect HasTParam/HasShape flag for cached slice type")
+ }
+ return t
+ }
+
+ t := newType(TSLICE)
+ t.extra = Slice{Elem: elem}
+ elem.cache.slice = t
+ if elem.HasTParam() {
+ t.SetHasTParam(true)
+ }
+ if elem.HasShape() {
+ t.SetHasShape(true)
+ }
+ return t
+}
+
+// NewChan returns a new chan Type with direction dir.
+func NewChan(elem *Type, dir ChanDir) *Type {
+ t := newType(TCHAN)
+ ct := t.ChanType()
+ ct.Elem = elem
+ ct.Dir = dir
+ if elem.HasTParam() {
+ t.SetHasTParam(true)
+ }
+ if elem.HasShape() {
+ t.SetHasShape(true)
+ }
+ return t
+}
+
+func NewTuple(t1, t2 *Type) *Type {
+ t := newType(TTUPLE)
+ t.extra.(*Tuple).first = t1
+ t.extra.(*Tuple).second = t2
+ if t1.HasTParam() || t2.HasTParam() {
+ t.SetHasTParam(true)
+ }
+ if t1.HasShape() || t2.HasShape() {
+ t.SetHasShape(true)
+ }
+ return t
+}
+
+func newResults(types []*Type) *Type {
+ t := newType(TRESULTS)
+ t.extra.(*Results).Types = types
+ return t
+}
+
+func NewResults(types []*Type) *Type {
+ if len(types) == 1 && types[0] == TypeMem {
+ return TypeResultMem
+ }
+ return newResults(types)
+}
+
+func newSSA(name string) *Type {
+ t := newType(TSSA)
+ t.extra = name
+ return t
+}
+
+// NewMap returns a new map Type with key type k and element (aka value) type v.
+func NewMap(k, v *Type) *Type {
+ t := newType(TMAP)
+ mt := t.MapType()
+ mt.Key = k
+ mt.Elem = v
+ if k.HasTParam() || v.HasTParam() {
+ t.SetHasTParam(true)
+ }
+ if k.HasShape() || v.HasShape() {
+ t.SetHasShape(true)
+ }
+ return t
+}
+
+// NewPtrCacheEnabled controls whether *T Types are cached in T.
+// Caching is disabled just before starting the backend.
+// This allows the backend to run concurrently.
+var NewPtrCacheEnabled = true
+
+// NewPtr returns the pointer type pointing to t.
+func NewPtr(elem *Type) *Type {
+ if elem == nil {
+ base.Fatalf("NewPtr: pointer to elem Type is nil")
+ }
+
+ if t := elem.cache.ptr; t != nil {
+ if t.Elem() != elem {
+ base.Fatalf("NewPtr: elem mismatch")
+ }
+ if elem.HasTParam() != t.HasTParam() || elem.HasShape() != t.HasShape() {
+ base.Fatalf("Incorrect HasTParam/HasShape flag for cached pointer type")
+ }
+ return t
+ }
+
+ t := newType(TPTR)
+ t.extra = Ptr{Elem: elem}
+ t.width = int64(PtrSize)
+ t.align = uint8(PtrSize)
+ if NewPtrCacheEnabled {
+ elem.cache.ptr = t
+ }
+ if elem.HasTParam() {
+ t.SetHasTParam(true)
+ }
+ if elem.HasShape() {
+ t.SetHasShape(true)
+ }
+ return t
+}
+
+// NewChanArgs returns a new TCHANARGS type for channel type c.
+func NewChanArgs(c *Type) *Type {
+ t := newType(TCHANARGS)
+ t.extra = ChanArgs{T: c}
+ return t
+}
+
+// NewFuncArgs returns a new TFUNCARGS type for func type f.
+func NewFuncArgs(f *Type) *Type {
+ t := newType(TFUNCARGS)
+ t.extra = FuncArgs{T: f}
+ return t
+}
+
+func NewField(pos src.XPos, sym *Sym, typ *Type) *Field {
+ f := &Field{
+ Pos: pos,
+ Sym: sym,
+ Type: typ,
+ Offset: BADWIDTH,
+ }
+ if typ == nil {
+ f.SetBroke(true)
+ }
+ return f
+}
+
+// SubstAny walks t, replacing instances of "any" with successive
+// elements removed from types. It returns the substituted type.
+func SubstAny(t *Type, types *[]*Type) *Type {
+ if t == nil {
+ return nil
+ }
+
+ switch t.kind {
+ default:
+ // Leave the type unchanged.
+
+ case TANY:
+ if len(*types) == 0 {
+ base.Fatalf("SubstArgTypes: not enough argument types")
+ }
+ t = (*types)[0]
+ *types = (*types)[1:]
+
+ case TPTR:
+ elem := SubstAny(t.Elem(), types)
+ if elem != t.Elem() {
+ t = t.copy()
+ t.extra = Ptr{Elem: elem}
+ }
+
+ case TARRAY:
+ elem := SubstAny(t.Elem(), types)
+ if elem != t.Elem() {
+ t = t.copy()
+ t.extra.(*Array).Elem = elem
+ }
+
+ case TSLICE:
+ elem := SubstAny(t.Elem(), types)
+ if elem != t.Elem() {
+ t = t.copy()
+ t.extra = Slice{Elem: elem}
+ }
+
+ case TCHAN:
+ elem := SubstAny(t.Elem(), types)
+ if elem != t.Elem() {
+ t = t.copy()
+ t.extra.(*Chan).Elem = elem
+ }
+
+ case TMAP:
+ key := SubstAny(t.Key(), types)
+ elem := SubstAny(t.Elem(), types)
+ if key != t.Key() || elem != t.Elem() {
+ t = t.copy()
+ t.extra.(*Map).Key = key
+ t.extra.(*Map).Elem = elem
+ }
+
+ case TFUNC:
+ recvs := SubstAny(t.Recvs(), types)
+ params := SubstAny(t.Params(), types)
+ results := SubstAny(t.Results(), types)
+ if recvs != t.Recvs() || params != t.Params() || results != t.Results() {
+ t = t.copy()
+ t.FuncType().Receiver = recvs
+ t.FuncType().Results = results
+ t.FuncType().Params = params
+ }
+
+ case TSTRUCT:
+ // Make a copy of all fields, including ones whose type does not change.
+ // This prevents aliasing across functions, which can lead to later
+ // fields getting their Offset incorrectly overwritten.
+ fields := t.FieldSlice()
+ nfs := make([]*Field, len(fields))
+ for i, f := range fields {
+ nft := SubstAny(f.Type, types)
+ nfs[i] = f.Copy()
+ nfs[i].Type = nft
+ }
+ t = t.copy()
+ t.SetFields(nfs)
+ }
+
+ return t
+}
+
+// copy returns a shallow copy of the Type.
+func (t *Type) copy() *Type {
+ if t == nil {
+ return nil
+ }
+ nt := *t
+ // copy any *T Extra fields, to avoid aliasing
+ switch t.kind {
+ case TMAP:
+ x := *t.extra.(*Map)
+ nt.extra = &x
+ case TFORW:
+ x := *t.extra.(*Forward)
+ nt.extra = &x
+ case TFUNC:
+ x := *t.extra.(*Func)
+ nt.extra = &x
+ case TSTRUCT:
+ x := *t.extra.(*Struct)
+ nt.extra = &x
+ case TINTER:
+ x := *t.extra.(*Interface)
+ nt.extra = &x
+ case TCHAN:
+ x := *t.extra.(*Chan)
+ nt.extra = &x
+ case TARRAY:
+ x := *t.extra.(*Array)
+ nt.extra = &x
+ case TTYPEPARAM:
+ base.Fatalf("typeparam types cannot be copied")
+ case TTUPLE, TSSA, TRESULTS:
+ base.Fatalf("ssa types cannot be copied")
+ }
+ // TODO(mdempsky): Find out why this is necessary and explain.
+ if t.underlying == t {
+ nt.underlying = &nt
+ }
+ return &nt
+}
+
+func (f *Field) Copy() *Field {
+ nf := *f
+ return &nf
+}
+
+func (t *Type) wantEtype(et Kind) {
+ if t.kind != et {
+ base.Fatalf("want %v, but have %v", et, t)
+ }
+}
+
+func (t *Type) Recvs() *Type { return t.FuncType().Receiver }
+func (t *Type) TParams() *Type { return t.FuncType().TParams }
+func (t *Type) Params() *Type { return t.FuncType().Params }
+func (t *Type) Results() *Type { return t.FuncType().Results }
+
+func (t *Type) NumRecvs() int { return t.FuncType().Receiver.NumFields() }
+func (t *Type) NumTParams() int { return t.FuncType().TParams.NumFields() }
+func (t *Type) NumParams() int { return t.FuncType().Params.NumFields() }
+func (t *Type) NumResults() int { return t.FuncType().Results.NumFields() }
+
+// IsVariadic reports whether function type t is variadic.
+func (t *Type) IsVariadic() bool {
+ n := t.NumParams()
+ return n > 0 && t.Params().Field(n-1).IsDDD()
+}
+
+// Recv returns the receiver of function type t, if any.
+func (t *Type) Recv() *Field {
+ s := t.Recvs()
+ if s.NumFields() == 0 {
+ return nil
+ }
+ return s.Field(0)
+}
+
+// RecvsParamsResults stores the accessor functions for a function Type's
+// receiver, parameters, and result parameters, in that order.
+// It can be used to iterate over all of a function's parameter lists.
+var RecvsParamsResults = [3]func(*Type) *Type{
+ (*Type).Recvs, (*Type).Params, (*Type).Results,
+}
+
+// RecvsParams is like RecvsParamsResults, but omits result parameters.
+var RecvsParams = [2]func(*Type) *Type{
+ (*Type).Recvs, (*Type).Params,
+}
+
+// ParamsResults is like RecvsParamsResults, but omits receiver parameters.
+var ParamsResults = [2]func(*Type) *Type{
+ (*Type).Params, (*Type).Results,
+}
+
+// Key returns the key type of map type t.
+func (t *Type) Key() *Type {
+ t.wantEtype(TMAP)
+ return t.extra.(*Map).Key
+}
+
+// Elem returns the type of elements of t.
+// Usable with pointers, channels, arrays, slices, and maps.
+func (t *Type) Elem() *Type {
+ switch t.kind {
+ case TPTR:
+ return t.extra.(Ptr).Elem
+ case TARRAY:
+ return t.extra.(*Array).Elem
+ case TSLICE:
+ return t.extra.(Slice).Elem
+ case TCHAN:
+ return t.extra.(*Chan).Elem
+ case TMAP:
+ return t.extra.(*Map).Elem
+ }
+ base.Fatalf("Type.Elem %s", t.kind)
+ return nil
+}
+
+// ChanArgs returns the channel type for TCHANARGS type t.
+func (t *Type) ChanArgs() *Type {
+ t.wantEtype(TCHANARGS)
+ return t.extra.(ChanArgs).T
+}
+
+// FuncArgs returns the func type for TFUNCARGS type t.
+func (t *Type) FuncArgs() *Type {
+ t.wantEtype(TFUNCARGS)
+ return t.extra.(FuncArgs).T
+}
+
+// IsFuncArgStruct reports whether t is a struct representing function parameters or results.
+func (t *Type) IsFuncArgStruct() bool {
+ return t.kind == TSTRUCT && t.extra.(*Struct).Funarg != FunargNone
+}
+
+// Methods returns a pointer to the base methods (excluding embedding) for type t.
+// These can either be concrete methods (for non-interface types) or interface
+// methods (for interface types).
+func (t *Type) Methods() *Fields {
+ return &t.methods
+}
+
+// AllMethods returns a pointer to all the methods (including embedding) for type t.
+// For an interface type, this is the set of methods that are typically iterated
+// over. For non-interface types, AllMethods() only returns a valid result after
+// CalcMethods() has been called at least once.
+func (t *Type) AllMethods() *Fields {
+ if t.kind == TINTER {
+ // Calculate the full method set of an interface type on the fly
+ // now, if not done yet.
+ CalcSize(t)
+ }
+ return &t.allMethods
+}
+
+// SetAllMethods sets the set of all methods (including embedding) for type t.
+// Use this method instead of t.AllMethods().Set(), which might call CalcSize() on
+// an uninitialized interface type.
+func (t *Type) SetAllMethods(fs []*Field) {
+ t.allMethods.Set(fs)
+}
+
+// Fields returns the fields of struct type t.
+func (t *Type) Fields() *Fields {
+ t.wantEtype(TSTRUCT)
+ return &t.extra.(*Struct).fields
+}
+
+// Field returns the i'th field of struct type t.
+func (t *Type) Field(i int) *Field {
+ return t.Fields().Slice()[i]
+}
+
+// FieldSlice returns a slice of containing all fields of
+// a struct type t.
+func (t *Type) FieldSlice() []*Field {
+ return t.Fields().Slice()
+}
+
+// SetFields sets struct type t's fields to fields.
+func (t *Type) SetFields(fields []*Field) {
+ // If we've calculated the width of t before,
+ // then some other type such as a function signature
+ // might now have the wrong type.
+ // Rather than try to track and invalidate those,
+ // enforce that SetFields cannot be called once
+ // t's width has been calculated.
+ if t.widthCalculated() {
+ base.Fatalf("SetFields of %v: width previously calculated", t)
+ }
+ t.wantEtype(TSTRUCT)
+ for _, f := range fields {
+ // If type T contains a field F with a go:notinheap
+ // type, then T must also be go:notinheap. Otherwise,
+ // you could heap allocate T and then get a pointer F,
+ // which would be a heap pointer to a go:notinheap
+ // type.
+ if f.Type != nil && f.Type.NotInHeap() {
+ t.SetNotInHeap(true)
+ break
+ }
+ }
+ t.Fields().Set(fields)
+}
+
+// SetInterface sets the base methods of an interface type t.
+func (t *Type) SetInterface(methods []*Field) {
+ t.wantEtype(TINTER)
+ t.Methods().Set(methods)
+}
+
+// ArgWidth returns the total aligned argument size for a function.
+// It includes the receiver, parameters, and results.
+func (t *Type) ArgWidth() int64 {
+ t.wantEtype(TFUNC)
+ return t.extra.(*Func).Argwid
+}
+
+func (t *Type) Size() int64 {
+ if t.kind == TSSA {
+ if t == TypeInt128 {
+ return 16
+ }
+ return 0
+ }
+ CalcSize(t)
+ return t.width
+}
+
+func (t *Type) Alignment() int64 {
+ CalcSize(t)
+ return int64(t.align)
+}
+
+func (t *Type) SimpleString() string {
+ return t.kind.String()
+}
+
+// Cmp is a comparison between values a and b.
+// -1 if a < b
+// 0 if a == b
+// 1 if a > b
+type Cmp int8
+
+const (
+ CMPlt = Cmp(-1)
+ CMPeq = Cmp(0)
+ CMPgt = Cmp(1)
+)
+
+// Compare compares types for purposes of the SSA back
+// end, returning a Cmp (one of CMPlt, CMPeq, CMPgt).
+// The answers are correct for an optimizer
+// or code generator, but not necessarily typechecking.
+// The order chosen is arbitrary, only consistency and division
+// into equivalence classes (Types that compare CMPeq) matters.
+func (t *Type) Compare(x *Type) Cmp {
+ if x == t {
+ return CMPeq
+ }
+ return t.cmp(x)
+}
+
+func cmpForNe(x bool) Cmp {
+ if x {
+ return CMPlt
+ }
+ return CMPgt
+}
+
+func (r *Sym) cmpsym(s *Sym) Cmp {
+ if r == s {
+ return CMPeq
+ }
+ if r == nil {
+ return CMPlt
+ }
+ if s == nil {
+ return CMPgt
+ }
+ // Fast sort, not pretty sort
+ if len(r.Name) != len(s.Name) {
+ return cmpForNe(len(r.Name) < len(s.Name))
+ }
+ if r.Pkg != s.Pkg {
+ if len(r.Pkg.Prefix) != len(s.Pkg.Prefix) {
+ return cmpForNe(len(r.Pkg.Prefix) < len(s.Pkg.Prefix))
+ }
+ if r.Pkg.Prefix != s.Pkg.Prefix {
+ return cmpForNe(r.Pkg.Prefix < s.Pkg.Prefix)
+ }
+ }
+ if r.Name != s.Name {
+ return cmpForNe(r.Name < s.Name)
+ }
+ return CMPeq
+}
+
+// cmp compares two *Types t and x, returning CMPlt,
+// CMPeq, CMPgt as t<x, t==x, t>x, for an arbitrary
+// and optimizer-centric notion of comparison.
+// TODO(josharian): make this safe for recursive interface types
+// and use in signatlist sorting. See issue 19869.
+func (t *Type) cmp(x *Type) Cmp {
+ // This follows the structure of function identical in identity.go
+ // with two exceptions.
+ // 1. Symbols are compared more carefully because a <,=,> result is desired.
+ // 2. Maps are treated specially to avoid endless recursion -- maps
+ // contain an internal data type not expressible in Go source code.
+ if t == x {
+ return CMPeq
+ }
+ if t == nil {
+ return CMPlt
+ }
+ if x == nil {
+ return CMPgt
+ }
+
+ if t.kind != x.kind {
+ return cmpForNe(t.kind < x.kind)
+ }
+
+ if t.sym != nil || x.sym != nil {
+ // Special case: we keep byte and uint8 separate
+ // for error messages. Treat them as equal.
+ switch t.kind {
+ case TUINT8:
+ if (t == Types[TUINT8] || t == ByteType) && (x == Types[TUINT8] || x == ByteType) {
+ return CMPeq
+ }
+
+ case TINT32:
+ if (t == Types[RuneType.kind] || t == RuneType) && (x == Types[RuneType.kind] || x == RuneType) {
+ return CMPeq
+ }
+
+ case TINTER:
+ // Make sure named any type matches any empty interface.
+ if t == AnyType && x.IsEmptyInterface() || x == AnyType && t.IsEmptyInterface() {
+ return CMPeq
+ }
+ }
+ }
+
+ if c := t.sym.cmpsym(x.sym); c != CMPeq {
+ return c
+ }
+
+ if x.sym != nil {
+ // Syms non-nil, if vargens match then equal.
+ if t.vargen != x.vargen {
+ return cmpForNe(t.vargen < x.vargen)
+ }
+ return CMPeq
+ }
+ // both syms nil, look at structure below.
+
+ switch t.kind {
+ case TBOOL, TFLOAT32, TFLOAT64, TCOMPLEX64, TCOMPLEX128, TUNSAFEPTR, TUINTPTR,
+ TINT8, TINT16, TINT32, TINT64, TINT, TUINT8, TUINT16, TUINT32, TUINT64, TUINT:
+ return CMPeq
+
+ case TSSA:
+ tname := t.extra.(string)
+ xname := x.extra.(string)
+ // desire fast sorting, not pretty sorting.
+ if len(tname) == len(xname) {
+ if tname == xname {
+ return CMPeq
+ }
+ if tname < xname {
+ return CMPlt
+ }
+ return CMPgt
+ }
+ if len(tname) > len(xname) {
+ return CMPgt
+ }
+ return CMPlt
+
+ case TTUPLE:
+ xtup := x.extra.(*Tuple)
+ ttup := t.extra.(*Tuple)
+ if c := ttup.first.Compare(xtup.first); c != CMPeq {
+ return c
+ }
+ return ttup.second.Compare(xtup.second)
+
+ case TRESULTS:
+ xResults := x.extra.(*Results)
+ tResults := t.extra.(*Results)
+ xl, tl := len(xResults.Types), len(tResults.Types)
+ if tl != xl {
+ if tl < xl {
+ return CMPlt
+ }
+ return CMPgt
+ }
+ for i := 0; i < tl; i++ {
+ if c := tResults.Types[i].Compare(xResults.Types[i]); c != CMPeq {
+ return c
+ }
+ }
+ return CMPeq
+
+ case TMAP:
+ if c := t.Key().cmp(x.Key()); c != CMPeq {
+ return c
+ }
+ return t.Elem().cmp(x.Elem())
+
+ case TPTR, TSLICE:
+ // No special cases for these, they are handled
+ // by the general code after the switch.
+
+ case TSTRUCT:
+ if t.StructType().Map == nil {
+ if x.StructType().Map != nil {
+ return CMPlt // nil < non-nil
+ }
+ // to the fallthrough
+ } else if x.StructType().Map == nil {
+ return CMPgt // nil > non-nil
+ } else if t.StructType().Map.MapType().Bucket == t {
+ // Both have non-nil Map
+ // Special case for Maps which include a recursive type where the recursion is not broken with a named type
+ if x.StructType().Map.MapType().Bucket != x {
+ return CMPlt // bucket maps are least
+ }
+ return t.StructType().Map.cmp(x.StructType().Map)
+ } else if x.StructType().Map.MapType().Bucket == x {
+ return CMPgt // bucket maps are least
+ } // If t != t.Map.Bucket, fall through to general case
+
+ tfs := t.FieldSlice()
+ xfs := x.FieldSlice()
+ for i := 0; i < len(tfs) && i < len(xfs); i++ {
+ t1, x1 := tfs[i], xfs[i]
+ if t1.Embedded != x1.Embedded {
+ return cmpForNe(t1.Embedded < x1.Embedded)
+ }
+ if t1.Note != x1.Note {
+ return cmpForNe(t1.Note < x1.Note)
+ }
+ if c := t1.Sym.cmpsym(x1.Sym); c != CMPeq {
+ return c
+ }
+ if c := t1.Type.cmp(x1.Type); c != CMPeq {
+ return c
+ }
+ }
+ if len(tfs) != len(xfs) {
+ return cmpForNe(len(tfs) < len(xfs))
+ }
+ return CMPeq
+
+ case TINTER:
+ tfs := t.AllMethods().Slice()
+ xfs := x.AllMethods().Slice()
+ for i := 0; i < len(tfs) && i < len(xfs); i++ {
+ t1, x1 := tfs[i], xfs[i]
+ if c := t1.Sym.cmpsym(x1.Sym); c != CMPeq {
+ return c
+ }
+ if c := t1.Type.cmp(x1.Type); c != CMPeq {
+ return c
+ }
+ }
+ if len(tfs) != len(xfs) {
+ return cmpForNe(len(tfs) < len(xfs))
+ }
+ return CMPeq
+
+ case TFUNC:
+ for _, f := range RecvsParamsResults {
+ // Loop over fields in structs, ignoring argument names.
+ tfs := f(t).FieldSlice()
+ xfs := f(x).FieldSlice()
+ for i := 0; i < len(tfs) && i < len(xfs); i++ {
+ ta := tfs[i]
+ tb := xfs[i]
+ if ta.IsDDD() != tb.IsDDD() {
+ return cmpForNe(!ta.IsDDD())
+ }
+ if c := ta.Type.cmp(tb.Type); c != CMPeq {
+ return c
+ }
+ }
+ if len(tfs) != len(xfs) {
+ return cmpForNe(len(tfs) < len(xfs))
+ }
+ }
+ return CMPeq
+
+ case TARRAY:
+ if t.NumElem() != x.NumElem() {
+ return cmpForNe(t.NumElem() < x.NumElem())
+ }
+
+ case TCHAN:
+ if t.ChanDir() != x.ChanDir() {
+ return cmpForNe(t.ChanDir() < x.ChanDir())
+ }
+
+ default:
+ e := fmt.Sprintf("Do not know how to compare %v with %v", t, x)
+ panic(e)
+ }
+
+ // Common element type comparison for TARRAY, TCHAN, TPTR, and TSLICE.
+ return t.Elem().cmp(x.Elem())
+}
+
+// IsKind reports whether t is a Type of the specified kind.
+func (t *Type) IsKind(et Kind) bool {
+ return t != nil && t.kind == et
+}
+
+func (t *Type) IsBoolean() bool {
+ return t.kind == TBOOL
+}
+
+var unsignedEType = [...]Kind{
+ TINT8: TUINT8,
+ TUINT8: TUINT8,
+ TINT16: TUINT16,
+ TUINT16: TUINT16,
+ TINT32: TUINT32,
+ TUINT32: TUINT32,
+ TINT64: TUINT64,
+ TUINT64: TUINT64,
+ TINT: TUINT,
+ TUINT: TUINT,
+ TUINTPTR: TUINTPTR,
+}
+
+// ToUnsigned returns the unsigned equivalent of integer type t.
+func (t *Type) ToUnsigned() *Type {
+ if !t.IsInteger() {
+ base.Fatalf("unsignedType(%v)", t)
+ }
+ return Types[unsignedEType[t.kind]]
+}
+
+func (t *Type) IsInteger() bool {
+ switch t.kind {
+ case TINT8, TUINT8, TINT16, TUINT16, TINT32, TUINT32, TINT64, TUINT64, TINT, TUINT, TUINTPTR:
+ return true
+ }
+ return t == UntypedInt || t == UntypedRune
+}
+
+func (t *Type) IsSigned() bool {
+ switch t.kind {
+ case TINT8, TINT16, TINT32, TINT64, TINT:
+ return true
+ }
+ return false
+}
+
+func (t *Type) IsUnsigned() bool {
+ switch t.kind {
+ case TUINT8, TUINT16, TUINT32, TUINT64, TUINT, TUINTPTR:
+ return true
+ }
+ return false
+}
+
+func (t *Type) IsFloat() bool {
+ return t.kind == TFLOAT32 || t.kind == TFLOAT64 || t == UntypedFloat
+}
+
+func (t *Type) IsComplex() bool {
+ return t.kind == TCOMPLEX64 || t.kind == TCOMPLEX128 || t == UntypedComplex
+}
+
+// IsPtr reports whether t is a regular Go pointer type.
+// This does not include unsafe.Pointer.
+func (t *Type) IsPtr() bool {
+ return t.kind == TPTR
+}
+
+// IsPtrElem reports whether t is the element of a pointer (to t).
+func (t *Type) IsPtrElem() bool {
+ return t.cache.ptr != nil
+}
+
+// IsUnsafePtr reports whether t is an unsafe pointer.
+func (t *Type) IsUnsafePtr() bool {
+ return t.kind == TUNSAFEPTR
+}
+
+// IsUintptr reports whether t is an uintptr.
+func (t *Type) IsUintptr() bool {
+ return t.kind == TUINTPTR
+}
+
+// IsPtrShaped reports whether t is represented by a single machine pointer.
+// In addition to regular Go pointer types, this includes map, channel, and
+// function types and unsafe.Pointer. It does not include array or struct types
+// that consist of a single pointer shaped type.
+// TODO(mdempsky): Should it? See golang.org/issue/15028.
+func (t *Type) IsPtrShaped() bool {
+ return t.kind == TPTR || t.kind == TUNSAFEPTR ||
+ t.kind == TMAP || t.kind == TCHAN || t.kind == TFUNC
+}
+
+// HasNil reports whether the set of values determined by t includes nil.
+func (t *Type) HasNil() bool {
+ switch t.kind {
+ case TCHAN, TFUNC, TINTER, TMAP, TNIL, TPTR, TSLICE, TUNSAFEPTR:
+ return true
+ }
+ return false
+}
+
+func (t *Type) IsString() bool {
+ return t.kind == TSTRING
+}
+
+func (t *Type) IsMap() bool {
+ return t.kind == TMAP
+}
+
+func (t *Type) IsChan() bool {
+ return t.kind == TCHAN
+}
+
+func (t *Type) IsSlice() bool {
+ return t.kind == TSLICE
+}
+
+func (t *Type) IsArray() bool {
+ return t.kind == TARRAY
+}
+
+func (t *Type) IsStruct() bool {
+ return t.kind == TSTRUCT
+}
+
+func (t *Type) IsInterface() bool {
+ return t.kind == TINTER
+}
+
+func (t *Type) IsUnion() bool {
+ return t.kind == TUNION
+}
+
+func (t *Type) IsTypeParam() bool {
+ return t.kind == TTYPEPARAM
+}
+
+// IsEmptyInterface reports whether t is an empty interface type.
+func (t *Type) IsEmptyInterface() bool {
+ return t.IsInterface() && t.AllMethods().Len() == 0
+}
+
+// IsScalar reports whether 't' is a scalar Go type, e.g.
+// bool/int/float/complex. Note that struct and array types consisting
+// of a single scalar element are not considered scalar, likewise
+// pointer types are also not considered scalar.
+func (t *Type) IsScalar() bool {
+ switch t.kind {
+ case TBOOL, TINT8, TUINT8, TINT16, TUINT16, TINT32,
+ TUINT32, TINT64, TUINT64, TINT, TUINT,
+ TUINTPTR, TCOMPLEX64, TCOMPLEX128, TFLOAT32, TFLOAT64:
+ return true
+ }
+ return false
+}
+
+func (t *Type) PtrTo() *Type {
+ return NewPtr(t)
+}
+
+func (t *Type) NumFields() int {
+ if t.kind == TRESULTS {
+ return len(t.extra.(*Results).Types)
+ }
+ return t.Fields().Len()
+}
+func (t *Type) FieldType(i int) *Type {
+ if t.kind == TTUPLE {
+ switch i {
+ case 0:
+ return t.extra.(*Tuple).first
+ case 1:
+ return t.extra.(*Tuple).second
+ default:
+ panic("bad tuple index")
+ }
+ }
+ if t.kind == TRESULTS {
+ return t.extra.(*Results).Types[i]
+ }
+ return t.Field(i).Type
+}
+func (t *Type) FieldOff(i int) int64 {
+ return t.Field(i).Offset
+}
+func (t *Type) FieldName(i int) string {
+ return t.Field(i).Sym.Name
+}
+
+func (t *Type) NumElem() int64 {
+ t.wantEtype(TARRAY)
+ return t.extra.(*Array).Bound
+}
+
+type componentsIncludeBlankFields bool
+
+const (
+ IgnoreBlankFields componentsIncludeBlankFields = false
+ CountBlankFields componentsIncludeBlankFields = true
+)
+
+// NumComponents returns the number of primitive elements that compose t.
+// Struct and array types are flattened for the purpose of counting.
+// All other types (including string, slice, and interface types) count as one element.
+// If countBlank is IgnoreBlankFields, then blank struct fields
+// (and their comprised elements) are excluded from the count.
+// struct { x, y [3]int } has six components; [10]struct{ x, y string } has twenty.
+func (t *Type) NumComponents(countBlank componentsIncludeBlankFields) int64 {
+ switch t.kind {
+ case TSTRUCT:
+ if t.IsFuncArgStruct() {
+ base.Fatalf("NumComponents func arg struct")
+ }
+ var n int64
+ for _, f := range t.FieldSlice() {
+ if countBlank == IgnoreBlankFields && f.Sym.IsBlank() {
+ continue
+ }
+ n += f.Type.NumComponents(countBlank)
+ }
+ return n
+ case TARRAY:
+ return t.NumElem() * t.Elem().NumComponents(countBlank)
+ }
+ return 1
+}
+
+// SoleComponent returns the only primitive component in t,
+// if there is exactly one. Otherwise, it returns nil.
+// Components are counted as in NumComponents, including blank fields.
+func (t *Type) SoleComponent() *Type {
+ switch t.kind {
+ case TSTRUCT:
+ if t.IsFuncArgStruct() {
+ base.Fatalf("SoleComponent func arg struct")
+ }
+ if t.NumFields() != 1 {
+ return nil
+ }
+ return t.Field(0).Type.SoleComponent()
+ case TARRAY:
+ if t.NumElem() != 1 {
+ return nil
+ }
+ return t.Elem().SoleComponent()
+ }
+ return t
+}
+
+// ChanDir returns the direction of a channel type t.
+// The direction will be one of Crecv, Csend, or Cboth.
+func (t *Type) ChanDir() ChanDir {
+ t.wantEtype(TCHAN)
+ return t.extra.(*Chan).Dir
+}
+
+func (t *Type) IsMemory() bool {
+ if t == TypeMem || t.kind == TTUPLE && t.extra.(*Tuple).second == TypeMem {
+ return true
+ }
+ if t.kind == TRESULTS {
+ if types := t.extra.(*Results).Types; len(types) > 0 && types[len(types)-1] == TypeMem {
+ return true
+ }
+ }
+ return false
+}
+func (t *Type) IsFlags() bool { return t == TypeFlags }
+func (t *Type) IsVoid() bool { return t == TypeVoid }
+func (t *Type) IsTuple() bool { return t.kind == TTUPLE }
+func (t *Type) IsResults() bool { return t.kind == TRESULTS }
+
+// IsUntyped reports whether t is an untyped type.
+func (t *Type) IsUntyped() bool {
+ if t == nil {
+ return false
+ }
+ if t == UntypedString || t == UntypedBool {
+ return true
+ }
+ switch t.kind {
+ case TNIL, TIDEAL:
+ return true
+ }
+ return false
+}
+
+// HasPointers reports whether t contains a heap pointer.
+// Note that this function ignores pointers to go:notinheap types.
+func (t *Type) HasPointers() bool {
+ return PtrDataSize(t) > 0
+}
+
+var recvType *Type
+
+// FakeRecvType returns the singleton type used for interface method receivers.
+func FakeRecvType() *Type {
+ if recvType == nil {
+ recvType = NewPtr(newType(TSTRUCT))
+ }
+ return recvType
+}
+
+func FakeRecv() *Field {
+ return NewField(src.NoXPos, nil, FakeRecvType())
+}
+
+var (
+ // TSSA types. HasPointers assumes these are pointer-free.
+ TypeInvalid = newSSA("invalid")
+ TypeMem = newSSA("mem")
+ TypeFlags = newSSA("flags")
+ TypeVoid = newSSA("void")
+ TypeInt128 = newSSA("int128")
+ TypeResultMem = newResults([]*Type{TypeMem})
+)
+
+func init() {
+ TypeInt128.width = 16
+ TypeInt128.align = 8
+}
+
+// NewNamed returns a new named type for the given type name. obj should be an
+// ir.Name. The new type is incomplete (marked as TFORW kind), and the underlying
+// type should be set later via SetUnderlying(). References to the type are
+// maintained until the type is filled in, so those references can be updated when
+// the type is complete.
+func NewNamed(obj TypeObject) *Type {
+ t := newType(TFORW)
+ t.sym = obj.Sym()
+ t.nod = obj
+ if t.sym.Pkg == ShapePkg {
+ t.SetIsShape(true)
+ t.SetHasShape(true)
+ }
+ return t
+}
+
+// Obj returns the canonical type name node for a named type t, nil for an unnamed type.
+func (t *Type) Obj() Object {
+ if t.sym != nil {
+ return t.nod
+ }
+ return nil
+}
+
+// typeGen tracks the number of function-scoped defined types that
+// have been declared. It's used to generate unique linker symbols for
+// their runtime type descriptors.
+var typeGen int32
+
+// SetVargen assigns a unique generation number to type t, which must
+// be a defined type declared within function scope. The generation
+// number is used to distinguish it from other similarly spelled
+// defined types from the same package.
+//
+// TODO(mdempsky): Come up with a better solution.
+func (t *Type) SetVargen() {
+ base.Assertf(t.Sym() != nil, "SetVargen on anonymous type %v", t)
+ base.Assertf(t.vargen == 0, "type %v already has Vargen %v", t, t.vargen)
+
+ typeGen++
+ t.vargen = typeGen
+}
+
+// SetUnderlying sets the underlying type of an incomplete type (i.e. type whose kind
+// is currently TFORW). SetUnderlying automatically updates any types that were waiting
+// for this type to be completed.
+func (t *Type) SetUnderlying(underlying *Type) {
+ if underlying.kind == TFORW {
+ // This type isn't computed yet; when it is, update n.
+ underlying.ForwardType().Copyto = append(underlying.ForwardType().Copyto, t)
+ return
+ }
+
+ ft := t.ForwardType()
+
+ // TODO(mdempsky): Fix Type rekinding.
+ t.kind = underlying.kind
+ t.extra = underlying.extra
+ t.width = underlying.width
+ t.align = underlying.align
+ t.underlying = underlying.underlying
+
+ if underlying.NotInHeap() {
+ t.SetNotInHeap(true)
+ }
+ if underlying.Broke() {
+ t.SetBroke(true)
+ }
+ if underlying.HasTParam() {
+ t.SetHasTParam(true)
+ }
+ if underlying.HasShape() {
+ t.SetHasShape(true)
+ }
+
+ // spec: "The declared type does not inherit any methods bound
+ // to the existing type, but the method set of an interface
+ // type [...] remains unchanged."
+ if t.IsInterface() {
+ t.methods = underlying.methods
+ t.allMethods = underlying.allMethods
+ }
+
+ // Update types waiting on this type.
+ for _, w := range ft.Copyto {
+ w.SetUnderlying(t)
+ }
+
+ // Double-check use of type as embedded type.
+ if ft.Embedlineno.IsKnown() {
+ if t.IsPtr() || t.IsUnsafePtr() {
+ base.ErrorfAt(ft.Embedlineno, "embedded type cannot be a pointer")
+ }
+ }
+}
+
+func fieldsHasTParam(fields []*Field) bool {
+ for _, f := range fields {
+ if f.Type != nil && f.Type.HasTParam() {
+ return true
+ }
+ }
+ return false
+}
+
+func fieldsHasShape(fields []*Field) bool {
+ for _, f := range fields {
+ if f.Type != nil && f.Type.HasShape() {
+ return true
+ }
+ }
+ return false
+}
+
+// NewBasic returns a new basic type of the given kind.
+func newBasic(kind Kind, obj Object) *Type {
+ t := newType(kind)
+ t.sym = obj.Sym()
+ t.nod = obj
+ return t
+}
+
+// NewInterface returns a new interface for the given methods and
+// embedded types. Embedded types are specified as fields with no Sym.
+func NewInterface(pkg *Pkg, methods []*Field, implicit bool) *Type {
+ t := newType(TINTER)
+ t.SetInterface(methods)
+ for _, f := range methods {
+ // f.Type could be nil for a broken interface declaration
+ if f.Type != nil && f.Type.HasTParam() {
+ t.SetHasTParam(true)
+ break
+ }
+ if f.Type != nil && f.Type.HasShape() {
+ t.SetHasShape(true)
+ break
+ }
+ }
+ if anyBroke(methods) {
+ t.SetBroke(true)
+ }
+ t.extra.(*Interface).pkg = pkg
+ t.extra.(*Interface).implicit = implicit
+ return t
+}
+
+// NewTypeParam returns a new type param with the specified sym (package and name)
+// and specified index within the typeparam list.
+func NewTypeParam(sym *Sym, index int) *Type {
+ t := newType(TTYPEPARAM)
+ t.sym = sym
+ t.extra.(*Typeparam).index = index
+ t.SetHasTParam(true)
+ return t
+}
+
+// Index returns the index of the type param within its param list.
+func (t *Type) Index() int {
+ t.wantEtype(TTYPEPARAM)
+ return t.extra.(*Typeparam).index
+}
+
+// SetIndex sets the index of the type param within its param list.
+func (t *Type) SetIndex(i int) {
+ t.wantEtype(TTYPEPARAM)
+ t.extra.(*Typeparam).index = i
+}
+
+// SetBound sets the bound of a typeparam.
+func (t *Type) SetBound(bound *Type) {
+ t.wantEtype(TTYPEPARAM)
+ t.extra.(*Typeparam).bound = bound
+}
+
+// Bound returns the bound of a typeparam.
+func (t *Type) Bound() *Type {
+ t.wantEtype(TTYPEPARAM)
+ return t.extra.(*Typeparam).bound
+}
+
+// IsImplicit reports whether an interface is implicit (i.e. elided from a type
+// parameter constraint).
+func (t *Type) IsImplicit() bool {
+ t.wantEtype(TINTER)
+ return t.extra.(*Interface).implicit
+}
+
+// MarkImplicit marks the interface as implicit.
+func (t *Type) MarkImplicit() {
+ t.wantEtype(TINTER)
+ t.extra.(*Interface).implicit = true
+}
+
+// NewUnion returns a new union with the specified set of terms (types). If
+// tildes[i] is true, then terms[i] represents ~T, rather than just T.
+func NewUnion(terms []*Type, tildes []bool) *Type {
+ t := newType(TUNION)
+ if len(terms) != len(tildes) {
+ base.Fatalf("Mismatched terms and tildes for NewUnion")
+ }
+ t.extra.(*Union).terms = terms
+ t.extra.(*Union).tildes = tildes
+ nt := len(terms)
+ for i := 0; i < nt; i++ {
+ if terms[i].HasTParam() {
+ t.SetHasTParam(true)
+ }
+ if terms[i].HasShape() {
+ t.SetHasShape(true)
+ }
+ }
+ return t
+}
+
+// NumTerms returns the number of terms in a union type.
+func (t *Type) NumTerms() int {
+ t.wantEtype(TUNION)
+ return len(t.extra.(*Union).terms)
+}
+
+// Term returns ith term of a union type as (term, tilde). If tilde is true, term
+// represents ~T, rather than just T.
+func (t *Type) Term(i int) (*Type, bool) {
+ t.wantEtype(TUNION)
+ u := t.extra.(*Union)
+ return u.terms[i], u.tildes[i]
+}
+
+const BOGUS_FUNARG_OFFSET = -1000000000
+
+func unzeroFieldOffsets(f []*Field) {
+ for i := range f {
+ f[i].Offset = BOGUS_FUNARG_OFFSET // This will cause an explosion if it is not corrected
+ }
+}
+
+// NewSignature returns a new function type for the given receiver,
+// parameters, results, and type parameters, any of which may be nil.
+func NewSignature(pkg *Pkg, recv *Field, tparams, params, results []*Field) *Type {
+ var recvs []*Field
+ if recv != nil {
+ recvs = []*Field{recv}
+ }
+
+ t := newType(TFUNC)
+ ft := t.FuncType()
+
+ funargs := func(fields []*Field, funarg Funarg) *Type {
+ s := NewStruct(NoPkg, fields)
+ s.StructType().Funarg = funarg
+ if s.Broke() {
+ t.SetBroke(true)
+ }
+ return s
+ }
+
+ if recv != nil {
+ recv.Offset = BOGUS_FUNARG_OFFSET
+ }
+ unzeroFieldOffsets(params)
+ unzeroFieldOffsets(results)
+ ft.Receiver = funargs(recvs, FunargRcvr)
+ // TODO(danscales): just use nil here (save memory) if no tparams
+ ft.TParams = funargs(tparams, FunargTparams)
+ ft.Params = funargs(params, FunargParams)
+ ft.Results = funargs(results, FunargResults)
+ ft.pkg = pkg
+ if len(tparams) > 0 || fieldsHasTParam(recvs) || fieldsHasTParam(params) ||
+ fieldsHasTParam(results) {
+ t.SetHasTParam(true)
+ }
+ if fieldsHasShape(recvs) || fieldsHasShape(params) || fieldsHasShape(results) {
+ t.SetHasShape(true)
+ }
+
+ return t
+}
+
+// NewStruct returns a new struct with the given fields.
+func NewStruct(pkg *Pkg, fields []*Field) *Type {
+ t := newType(TSTRUCT)
+ t.SetFields(fields)
+ if anyBroke(fields) {
+ t.SetBroke(true)
+ }
+ t.extra.(*Struct).pkg = pkg
+ if fieldsHasTParam(fields) {
+ t.SetHasTParam(true)
+ }
+ if fieldsHasShape(fields) {
+ t.SetHasShape(true)
+ }
+ return t
+}
+
+func anyBroke(fields []*Field) bool {
+ for _, f := range fields {
+ if f.Broke() {
+ return true
+ }
+ }
+ return false
+}
+
+var (
+ IsInt [NTYPE]bool
+ IsFloat [NTYPE]bool
+ IsComplex [NTYPE]bool
+ IsSimple [NTYPE]bool
+)
+
+var IsOrdered [NTYPE]bool
+
+// IsReflexive reports whether t has a reflexive equality operator.
+// That is, if x==x for all x of type t.
+func IsReflexive(t *Type) bool {
+ switch t.Kind() {
+ case TBOOL,
+ TINT,
+ TUINT,
+ TINT8,
+ TUINT8,
+ TINT16,
+ TUINT16,
+ TINT32,
+ TUINT32,
+ TINT64,
+ TUINT64,
+ TUINTPTR,
+ TPTR,
+ TUNSAFEPTR,
+ TSTRING,
+ TCHAN:
+ return true
+
+ case TFLOAT32,
+ TFLOAT64,
+ TCOMPLEX64,
+ TCOMPLEX128,
+ TINTER:
+ return false
+
+ case TARRAY:
+ return IsReflexive(t.Elem())
+
+ case TSTRUCT:
+ for _, t1 := range t.Fields().Slice() {
+ if !IsReflexive(t1.Type) {
+ return false
+ }
+ }
+ return true
+
+ default:
+ base.Fatalf("bad type for map key: %v", t)
+ return false
+ }
+}
+
+// Can this type be stored directly in an interface word?
+// Yes, if the representation is a single pointer.
+func IsDirectIface(t *Type) bool {
+ if t.Broke() {
+ return false
+ }
+
+ switch t.Kind() {
+ case TPTR:
+ // Pointers to notinheap types must be stored indirectly. See issue 42076.
+ return !t.Elem().NotInHeap()
+ case TCHAN,
+ TMAP,
+ TFUNC,
+ TUNSAFEPTR:
+ return true
+
+ case TARRAY:
+ // Array of 1 direct iface type can be direct.
+ return t.NumElem() == 1 && IsDirectIface(t.Elem())
+
+ case TSTRUCT:
+ // Struct with 1 field of direct iface type can be direct.
+ return t.NumFields() == 1 && IsDirectIface(t.Field(0).Type)
+ }
+
+ return false
+}
+
+// IsInterfaceMethod reports whether (field) m is
+// an interface method. Such methods have the
+// special receiver type types.FakeRecvType().
+func IsInterfaceMethod(f *Type) bool {
+ return f.Recv().Type == FakeRecvType()
+}
+
+// IsMethodApplicable reports whether method m can be called on a
+// value of type t. This is necessary because we compute a single
+// method set for both T and *T, but some *T methods are not
+// applicable to T receivers.
+func IsMethodApplicable(t *Type, m *Field) bool {
+ return t.IsPtr() || !m.Type.Recv().Type.IsPtr() || IsInterfaceMethod(m.Type) || m.Embedded == 2
+}
+
+// IsRuntimePkg reports whether p is package runtime.
+func IsRuntimePkg(p *Pkg) bool {
+ if base.Flag.CompilingRuntime && p == LocalPkg {
+ return true
+ }
+ return p.Path == "runtime"
+}
+
+// IsReflectPkg reports whether p is package reflect.
+func IsReflectPkg(p *Pkg) bool {
+ if p == LocalPkg {
+ return base.Ctxt.Pkgpath == "reflect"
+ }
+ return p.Path == "reflect"
+}
+
+// ReceiverBaseType returns the underlying type, if any,
+// that owns methods with receiver parameter t.
+// The result is either a named type or an anonymous struct.
+func ReceiverBaseType(t *Type) *Type {
+ if t == nil {
+ return nil
+ }
+
+ // Strip away pointer if it's there.
+ if t.IsPtr() {
+ if t.Sym() != nil {
+ return nil
+ }
+ t = t.Elem()
+ if t == nil {
+ return nil
+ }
+ }
+
+ // Must be a named type or anonymous struct.
+ if t.Sym() == nil && !t.IsStruct() {
+ return nil
+ }
+
+ // Check types.
+ if IsSimple[t.Kind()] {
+ return t
+ }
+ switch t.Kind() {
+ case TARRAY, TCHAN, TFUNC, TMAP, TSLICE, TSTRING, TSTRUCT:
+ return t
+ }
+ return nil
+}
+
+func FloatForComplex(t *Type) *Type {
+ switch t.Kind() {
+ case TCOMPLEX64:
+ return Types[TFLOAT32]
+ case TCOMPLEX128:
+ return Types[TFLOAT64]
+ }
+ base.Fatalf("unexpected type: %v", t)
+ return nil
+}
+
+func ComplexForFloat(t *Type) *Type {
+ switch t.Kind() {
+ case TFLOAT32:
+ return Types[TCOMPLEX64]
+ case TFLOAT64:
+ return Types[TCOMPLEX128]
+ }
+ base.Fatalf("unexpected type: %v", t)
+ return nil
+}
+
+func TypeSym(t *Type) *Sym {
+ return TypeSymLookup(TypeSymName(t))
+}
+
+func TypeSymLookup(name string) *Sym {
+ typepkgmu.Lock()
+ s := typepkg.Lookup(name)
+ typepkgmu.Unlock()
+ return s
+}
+
+func TypeSymName(t *Type) string {
+ name := t.LinkString()
+ // Use a separate symbol name for Noalg types for #17752.
+ if TypeHasNoAlg(t) {
+ name = "noalg." + name
+ }
+ return name
+}
+
+// Fake package for runtime type info (headers)
+// Don't access directly, use typeLookup below.
+var (
+ typepkgmu sync.Mutex // protects typepkg lookups
+ typepkg = NewPkg("type", "type")
+)
+
+var SimType [NTYPE]Kind
+
+// Fake package for shape types (see typecheck.Shapify()).
+var ShapePkg = NewPkg("go.shape", "go.shape")
diff --git a/src/cmd/compile/internal/types/type_test.go b/src/cmd/compile/internal/types/type_test.go
new file mode 100644
index 0000000..1fd05b3
--- /dev/null
+++ b/src/cmd/compile/internal/types/type_test.go
@@ -0,0 +1,27 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types
+
+import (
+ "testing"
+)
+
+func TestSSACompare(t *testing.T) {
+ a := []*Type{
+ TypeInvalid,
+ TypeMem,
+ TypeFlags,
+ TypeVoid,
+ TypeInt128,
+ }
+ for _, x := range a {
+ for _, y := range a {
+ c := x.Compare(y)
+ if x == y && c != CMPeq || x != y && c == CMPeq {
+ t.Errorf("%s compare %s == %d\n", x.extra, y.extra, c)
+ }
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/types/universe.go b/src/cmd/compile/internal/types/universe.go
new file mode 100644
index 0000000..55ed7bd
--- /dev/null
+++ b/src/cmd/compile/internal/types/universe.go
@@ -0,0 +1,159 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/internal/src"
+)
+
+var basicTypes = [...]struct {
+ name string
+ etype Kind
+}{
+ {"int8", TINT8},
+ {"int16", TINT16},
+ {"int32", TINT32},
+ {"int64", TINT64},
+ {"uint8", TUINT8},
+ {"uint16", TUINT16},
+ {"uint32", TUINT32},
+ {"uint64", TUINT64},
+ {"float32", TFLOAT32},
+ {"float64", TFLOAT64},
+ {"complex64", TCOMPLEX64},
+ {"complex128", TCOMPLEX128},
+ {"bool", TBOOL},
+ {"string", TSTRING},
+}
+
+var typedefs = [...]struct {
+ name string
+ etype Kind
+ sameas32 Kind
+ sameas64 Kind
+}{
+ {"int", TINT, TINT32, TINT64},
+ {"uint", TUINT, TUINT32, TUINT64},
+ {"uintptr", TUINTPTR, TUINT32, TUINT64},
+}
+
+func InitTypes(defTypeName func(sym *Sym, typ *Type) Object) {
+ if PtrSize == 0 {
+ base.Fatalf("typeinit before betypeinit")
+ }
+
+ SlicePtrOffset = 0
+ SliceLenOffset = Rnd(SlicePtrOffset+int64(PtrSize), int64(PtrSize))
+ SliceCapOffset = Rnd(SliceLenOffset+int64(PtrSize), int64(PtrSize))
+ SliceSize = Rnd(SliceCapOffset+int64(PtrSize), int64(PtrSize))
+
+ // string is same as slice wo the cap
+ StringSize = Rnd(SliceLenOffset+int64(PtrSize), int64(PtrSize))
+
+ for et := Kind(0); et < NTYPE; et++ {
+ SimType[et] = et
+ }
+
+ Types[TANY] = newType(TANY) // note: an old placeholder type, NOT the new builtin 'any' alias for interface{}
+ Types[TINTER] = NewInterface(LocalPkg, nil, false)
+ CheckSize(Types[TINTER])
+
+ defBasic := func(kind Kind, pkg *Pkg, name string) *Type {
+ typ := newType(kind)
+ obj := defTypeName(pkg.Lookup(name), typ)
+ typ.sym = obj.Sym()
+ typ.nod = obj
+ if kind != TANY {
+ CheckSize(typ)
+ }
+ return typ
+ }
+
+ for _, s := range &basicTypes {
+ Types[s.etype] = defBasic(s.etype, BuiltinPkg, s.name)
+ }
+
+ for _, s := range &typedefs {
+ sameas := s.sameas32
+ if PtrSize == 8 {
+ sameas = s.sameas64
+ }
+ SimType[s.etype] = sameas
+
+ Types[s.etype] = defBasic(s.etype, BuiltinPkg, s.name)
+ }
+
+ // We create separate byte and rune types for better error messages
+ // rather than just creating type alias *Sym's for the uint8 and
+ // int32 Hence, (bytetype|runtype).Sym.isAlias() is false.
+ // TODO(gri) Should we get rid of this special case (at the cost
+ // of less informative error messages involving bytes and runes)?
+ // NOTE(rsc): No, the error message quality is important.
+ // (Alternatively, we could introduce an OTALIAS node representing
+ // type aliases, albeit at the cost of having to deal with it everywhere).
+ ByteType = defBasic(TUINT8, BuiltinPkg, "byte")
+ RuneType = defBasic(TINT32, BuiltinPkg, "rune")
+
+ // error type
+ DeferCheckSize()
+ ErrorType = defBasic(TFORW, BuiltinPkg, "error")
+ ErrorType.SetUnderlying(makeErrorInterface())
+ ResumeCheckSize()
+
+ // comparable type (interface)
+ DeferCheckSize()
+ ComparableType = defBasic(TFORW, BuiltinPkg, "comparable")
+ ComparableType.SetUnderlying(makeComparableInterface())
+ ResumeCheckSize()
+
+ // any type (interface)
+ DeferCheckSize()
+ AnyType = defBasic(TFORW, BuiltinPkg, "any")
+ AnyType.SetUnderlying(NewInterface(BuiltinPkg, []*Field{}, false))
+ ResumeCheckSize()
+
+ if base.Flag.G == 0 {
+ ComparableType.Sym().Def = nil
+ }
+
+ Types[TUNSAFEPTR] = defBasic(TUNSAFEPTR, UnsafePkg, "Pointer")
+
+ Types[TBLANK] = newType(TBLANK)
+ Types[TNIL] = newType(TNIL)
+
+ // simple aliases
+ SimType[TMAP] = TPTR
+ SimType[TCHAN] = TPTR
+ SimType[TFUNC] = TPTR
+ SimType[TUNSAFEPTR] = TPTR
+
+ for et := TINT8; et <= TUINT64; et++ {
+ IsInt[et] = true
+ }
+ IsInt[TINT] = true
+ IsInt[TUINT] = true
+ IsInt[TUINTPTR] = true
+
+ IsFloat[TFLOAT32] = true
+ IsFloat[TFLOAT64] = true
+
+ IsComplex[TCOMPLEX64] = true
+ IsComplex[TCOMPLEX128] = true
+}
+
+func makeErrorInterface() *Type {
+ sig := NewSignature(NoPkg, FakeRecv(), nil, nil, []*Field{
+ NewField(src.NoXPos, nil, Types[TSTRING]),
+ })
+ method := NewField(src.NoXPos, LocalPkg.Lookup("Error"), sig)
+ return NewInterface(NoPkg, []*Field{method}, false)
+}
+
+// makeComparableInterface makes the predefined "comparable" interface in the
+// built-in package. It has a unique name, but no methods.
+func makeComparableInterface() *Type {
+ return NewInterface(NoPkg, nil, false)
+}
diff --git a/src/cmd/compile/internal/types/utils.go b/src/cmd/compile/internal/types/utils.go
new file mode 100644
index 0000000..f9f629c
--- /dev/null
+++ b/src/cmd/compile/internal/types/utils.go
@@ -0,0 +1,17 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types
+
+const BADWIDTH = -1000000000
+
+type bitset8 uint8
+
+func (f *bitset8) set(mask uint8, b bool) {
+ if b {
+ *(*uint8)(f) |= mask
+ } else {
+ *(*uint8)(f) &^= mask
+ }
+}
diff --git a/src/cmd/compile/internal/types2/api.go b/src/cmd/compile/internal/types2/api.go
new file mode 100644
index 0000000..d864c96
--- /dev/null
+++ b/src/cmd/compile/internal/types2/api.go
@@ -0,0 +1,486 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package types declares the data types and implements
+// the algorithms for type-checking of Go packages. Use
+// Config.Check to invoke the type checker for a package.
+// Alternatively, create a new type checker with NewChecker
+// and invoke it incrementally by calling Checker.Files.
+//
+// Type-checking consists of several interdependent phases:
+//
+// Name resolution maps each identifier (syntax.Name) in the program to the
+// language object (Object) it denotes.
+// Use Info.{Defs,Uses,Implicits} for the results of name resolution.
+//
+// Constant folding computes the exact constant value (constant.Value)
+// for every expression (syntax.Expr) that is a compile-time constant.
+// Use Info.Types[expr].Value for the results of constant folding.
+//
+// Type inference computes the type (Type) of every expression (syntax.Expr)
+// and checks for compliance with the language specification.
+// Use Info.Types[expr].Type for the results of type inference.
+//
+package types2
+
+import (
+ "bytes"
+ "cmd/compile/internal/syntax"
+ "fmt"
+ "go/constant"
+)
+
+// An Error describes a type-checking error; it implements the error interface.
+// A "soft" error is an error that still permits a valid interpretation of a
+// package (such as "unused variable"); "hard" errors may lead to unpredictable
+// behavior if ignored.
+type Error struct {
+ Pos syntax.Pos // error position
+ Msg string // default error message, user-friendly
+ Full string // full error message, for debugging (may contain internal details)
+ Soft bool // if set, error is "soft"
+}
+
+// Error returns an error string formatted as follows:
+// filename:line:column: message
+func (err Error) Error() string {
+ return fmt.Sprintf("%s: %s", err.Pos, err.Msg)
+}
+
+// FullError returns an error string like Error, buy it may contain
+// type-checker internal details such as subscript indices for type
+// parameters and more. Useful for debugging.
+func (err Error) FullError() string {
+ return fmt.Sprintf("%s: %s", err.Pos, err.Full)
+}
+
+// An ArgumentError holds an error associated with an argument index.
+type ArgumentError struct {
+ Index int
+ Err error
+}
+
+func (e *ArgumentError) Error() string { return e.Err.Error() }
+func (e *ArgumentError) Unwrap() error { return e.Err }
+
+// An Importer resolves import paths to Packages.
+//
+// CAUTION: This interface does not support the import of locally
+// vendored packages. See https://golang.org/s/go15vendor.
+// If possible, external implementations should implement ImporterFrom.
+type Importer interface {
+ // Import returns the imported package for the given import path.
+ // The semantics is like for ImporterFrom.ImportFrom except that
+ // dir and mode are ignored (since they are not present).
+ Import(path string) (*Package, error)
+}
+
+// ImportMode is reserved for future use.
+type ImportMode int
+
+// An ImporterFrom resolves import paths to packages; it
+// supports vendoring per https://golang.org/s/go15vendor.
+// Use go/importer to obtain an ImporterFrom implementation.
+type ImporterFrom interface {
+ // Importer is present for backward-compatibility. Calling
+ // Import(path) is the same as calling ImportFrom(path, "", 0);
+ // i.e., locally vendored packages may not be found.
+ // The types package does not call Import if an ImporterFrom
+ // is present.
+ Importer
+
+ // ImportFrom returns the imported package for the given import
+ // path when imported by a package file located in dir.
+ // If the import failed, besides returning an error, ImportFrom
+ // is encouraged to cache and return a package anyway, if one
+ // was created. This will reduce package inconsistencies and
+ // follow-on type checker errors due to the missing package.
+ // The mode value must be 0; it is reserved for future use.
+ // Two calls to ImportFrom with the same path and dir must
+ // return the same package.
+ ImportFrom(path, dir string, mode ImportMode) (*Package, error)
+}
+
+// A Config specifies the configuration for type checking.
+// The zero value for Config is a ready-to-use default configuration.
+type Config struct {
+ // Context is the context used for resolving global identifiers. If nil, the
+ // type checker will initialize this field with a newly created context.
+ Context *Context
+
+ // GoVersion describes the accepted Go language version. The string
+ // must follow the format "go%d.%d" (e.g. "go1.12") or ist must be
+ // empty; an empty string indicates the latest language version.
+ // If the format is invalid, invoking the type checker will cause a
+ // panic.
+ GoVersion string
+
+ // If IgnoreFuncBodies is set, function bodies are not
+ // type-checked.
+ IgnoreFuncBodies bool
+
+ // If FakeImportC is set, `import "C"` (for packages requiring Cgo)
+ // declares an empty "C" package and errors are omitted for qualified
+ // identifiers referring to package C (which won't find an object).
+ // This feature is intended for the standard library cmd/api tool.
+ //
+ // Caution: Effects may be unpredictable due to follow-on errors.
+ // Do not use casually!
+ FakeImportC bool
+
+ // If IgnoreLabels is set, correct label use is not checked.
+ // TODO(gri) Consolidate label checking and remove this flag.
+ IgnoreLabels bool
+
+ // If CompilerErrorMessages is set, errors are reported using
+ // cmd/compile error strings to match $GOROOT/test errors.
+ // TODO(gri) Consolidate error messages and remove this flag.
+ CompilerErrorMessages bool
+
+ // If go115UsesCgo is set, the type checker expects the
+ // _cgo_gotypes.go file generated by running cmd/cgo to be
+ // provided as a package source file. Qualified identifiers
+ // referring to package C will be resolved to cgo-provided
+ // declarations within _cgo_gotypes.go.
+ //
+ // It is an error to set both FakeImportC and go115UsesCgo.
+ go115UsesCgo bool
+
+ // If Trace is set, a debug trace is printed to stdout.
+ Trace bool
+
+ // If Error != nil, it is called with each error found
+ // during type checking; err has dynamic type Error.
+ // Secondary errors (for instance, to enumerate all types
+ // involved in an invalid recursive type declaration) have
+ // error strings that start with a '\t' character.
+ // If Error == nil, type-checking stops with the first
+ // error found.
+ Error func(err error)
+
+ // An importer is used to import packages referred to from
+ // import declarations.
+ // If the installed importer implements ImporterFrom, the type
+ // checker calls ImportFrom instead of Import.
+ // The type checker reports an error if an importer is needed
+ // but none was installed.
+ Importer Importer
+
+ // If Sizes != nil, it provides the sizing functions for package unsafe.
+ // Otherwise SizesFor("gc", "amd64") is used instead.
+ Sizes Sizes
+
+ // If DisableUnusedImportCheck is set, packages are not checked
+ // for unused imports.
+ DisableUnusedImportCheck bool
+}
+
+func srcimporter_setUsesCgo(conf *Config) {
+ conf.go115UsesCgo = true
+}
+
+// Info holds result type information for a type-checked package.
+// Only the information for which a map is provided is collected.
+// If the package has type errors, the collected information may
+// be incomplete.
+type Info struct {
+ // Types maps expressions to their types, and for constant
+ // expressions, also their values. Invalid expressions are
+ // omitted.
+ //
+ // For (possibly parenthesized) identifiers denoting built-in
+ // functions, the recorded signatures are call-site specific:
+ // if the call result is not a constant, the recorded type is
+ // an argument-specific signature. Otherwise, the recorded type
+ // is invalid.
+ //
+ // The Types map does not record the type of every identifier,
+ // only those that appear where an arbitrary expression is
+ // permitted. For instance, the identifier f in a selector
+ // expression x.f is found only in the Selections map, the
+ // identifier z in a variable declaration 'var z int' is found
+ // only in the Defs map, and identifiers denoting packages in
+ // qualified identifiers are collected in the Uses map.
+ Types map[syntax.Expr]TypeAndValue
+
+ // Instances maps identifiers denoting generic types or functions to their
+ // type arguments and instantiated type.
+ //
+ // For example, Instances will map the identifier for 'T' in the type
+ // instantiation T[int, string] to the type arguments [int, string] and
+ // resulting instantiated *Named type. Given a generic function
+ // func F[A any](A), Instances will map the identifier for 'F' in the call
+ // expression F(int(1)) to the inferred type arguments [int], and resulting
+ // instantiated *Signature.
+ //
+ // Invariant: Instantiating Uses[id].Type() with Instances[id].TypeArgs
+ // results in an equivalent of Instances[id].Type.
+ Instances map[*syntax.Name]Instance
+
+ // Defs maps identifiers to the objects they define (including
+ // package names, dots "." of dot-imports, and blank "_" identifiers).
+ // For identifiers that do not denote objects (e.g., the package name
+ // in package clauses, or symbolic variables t in t := x.(type) of
+ // type switch headers), the corresponding objects are nil.
+ //
+ // For an embedded field, Defs returns the field *Var it defines.
+ //
+ // Invariant: Defs[id] == nil || Defs[id].Pos() == id.Pos()
+ Defs map[*syntax.Name]Object
+
+ // Uses maps identifiers to the objects they denote.
+ //
+ // For an embedded field, Uses returns the *TypeName it denotes.
+ //
+ // Invariant: Uses[id].Pos() != id.Pos()
+ Uses map[*syntax.Name]Object
+
+ // Implicits maps nodes to their implicitly declared objects, if any.
+ // The following node and object types may appear:
+ //
+ // node declared object
+ //
+ // *syntax.ImportDecl *PkgName for imports without renames
+ // *syntax.CaseClause type-specific *Var for each type switch case clause (incl. default)
+ // *syntax.Field anonymous parameter *Var (incl. unnamed results)
+ //
+ Implicits map[syntax.Node]Object
+
+ // Selections maps selector expressions (excluding qualified identifiers)
+ // to their corresponding selections.
+ Selections map[*syntax.SelectorExpr]*Selection
+
+ // Scopes maps syntax.Nodes to the scopes they define. Package scopes are not
+ // associated with a specific node but with all files belonging to a package.
+ // Thus, the package scope can be found in the type-checked Package object.
+ // Scopes nest, with the Universe scope being the outermost scope, enclosing
+ // the package scope, which contains (one or more) files scopes, which enclose
+ // function scopes which in turn enclose statement and function literal scopes.
+ // Note that even though package-level functions are declared in the package
+ // scope, the function scopes are embedded in the file scope of the file
+ // containing the function declaration.
+ //
+ // The following node types may appear in Scopes:
+ //
+ // *syntax.File
+ // *syntax.FuncType
+ // *syntax.TypeDecl
+ // *syntax.BlockStmt
+ // *syntax.IfStmt
+ // *syntax.SwitchStmt
+ // *syntax.CaseClause
+ // *syntax.CommClause
+ // *syntax.ForStmt
+ //
+ Scopes map[syntax.Node]*Scope
+
+ // InitOrder is the list of package-level initializers in the order in which
+ // they must be executed. Initializers referring to variables related by an
+ // initialization dependency appear in topological order, the others appear
+ // in source order. Variables without an initialization expression do not
+ // appear in this list.
+ InitOrder []*Initializer
+}
+
+// TypeOf returns the type of expression e, or nil if not found.
+// Precondition: the Types, Uses and Defs maps are populated.
+//
+func (info *Info) TypeOf(e syntax.Expr) Type {
+ if t, ok := info.Types[e]; ok {
+ return t.Type
+ }
+ if id, _ := e.(*syntax.Name); id != nil {
+ if obj := info.ObjectOf(id); obj != nil {
+ return obj.Type()
+ }
+ }
+ return nil
+}
+
+// ObjectOf returns the object denoted by the specified id,
+// or nil if not found.
+//
+// If id is an embedded struct field, ObjectOf returns the field (*Var)
+// it defines, not the type (*TypeName) it uses.
+//
+// Precondition: the Uses and Defs maps are populated.
+//
+func (info *Info) ObjectOf(id *syntax.Name) Object {
+ if obj := info.Defs[id]; obj != nil {
+ return obj
+ }
+ return info.Uses[id]
+}
+
+// TypeAndValue reports the type and value (for constants)
+// of the corresponding expression.
+type TypeAndValue struct {
+ mode operandMode
+ Type Type
+ Value constant.Value
+}
+
+// IsVoid reports whether the corresponding expression
+// is a function call without results.
+func (tv TypeAndValue) IsVoid() bool {
+ return tv.mode == novalue
+}
+
+// IsType reports whether the corresponding expression specifies a type.
+func (tv TypeAndValue) IsType() bool {
+ return tv.mode == typexpr
+}
+
+// IsBuiltin reports whether the corresponding expression denotes
+// a (possibly parenthesized) built-in function.
+func (tv TypeAndValue) IsBuiltin() bool {
+ return tv.mode == builtin
+}
+
+// IsValue reports whether the corresponding expression is a value.
+// Builtins are not considered values. Constant values have a non-
+// nil Value.
+func (tv TypeAndValue) IsValue() bool {
+ switch tv.mode {
+ case constant_, variable, mapindex, value, nilvalue, commaok, commaerr:
+ return true
+ }
+ return false
+}
+
+// IsNil reports whether the corresponding expression denotes the
+// predeclared value nil. Depending on context, it may have been
+// given a type different from UntypedNil.
+func (tv TypeAndValue) IsNil() bool {
+ return tv.mode == nilvalue
+}
+
+// Addressable reports whether the corresponding expression
+// is addressable (https://golang.org/ref/spec#Address_operators).
+func (tv TypeAndValue) Addressable() bool {
+ return tv.mode == variable
+}
+
+// Assignable reports whether the corresponding expression
+// is assignable to (provided a value of the right type).
+func (tv TypeAndValue) Assignable() bool {
+ return tv.mode == variable || tv.mode == mapindex
+}
+
+// HasOk reports whether the corresponding expression may be
+// used on the rhs of a comma-ok assignment.
+func (tv TypeAndValue) HasOk() bool {
+ return tv.mode == commaok || tv.mode == mapindex
+}
+
+// Instance reports the type arguments and instantiated type for type and
+// function instantiations. For type instantiations, Type will be of dynamic
+// type *Named. For function instantiations, Type will be of dynamic type
+// *Signature.
+type Instance struct {
+ TypeArgs *TypeList
+ Type Type
+}
+
+// An Initializer describes a package-level variable, or a list of variables in case
+// of a multi-valued initialization expression, and the corresponding initialization
+// expression.
+type Initializer struct {
+ Lhs []*Var // var Lhs = Rhs
+ Rhs syntax.Expr
+}
+
+func (init *Initializer) String() string {
+ var buf bytes.Buffer
+ for i, lhs := range init.Lhs {
+ if i > 0 {
+ buf.WriteString(", ")
+ }
+ buf.WriteString(lhs.Name())
+ }
+ buf.WriteString(" = ")
+ syntax.Fprint(&buf, init.Rhs, syntax.ShortForm)
+ return buf.String()
+}
+
+// Check type-checks a package and returns the resulting package object and
+// the first error if any. Additionally, if info != nil, Check populates each
+// of the non-nil maps in the Info struct.
+//
+// The package is marked as complete if no errors occurred, otherwise it is
+// incomplete. See Config.Error for controlling behavior in the presence of
+// errors.
+//
+// The package is specified by a list of *syntax.Files and corresponding
+// file set, and the package path the package is identified with.
+// The clean path must not be empty or dot (".").
+func (conf *Config) Check(path string, files []*syntax.File, info *Info) (*Package, error) {
+ pkg := NewPackage(path, "")
+ return pkg, NewChecker(conf, pkg, info).Files(files)
+}
+
+// AssertableTo reports whether a value of type V can be asserted to have type T.
+//
+// The behavior of AssertableTo is undefined in two cases:
+// - if V is a generalized interface; i.e., an interface that may only be used
+// as a type constraint in Go code
+// - if T is an uninstantiated generic type
+func AssertableTo(V *Interface, T Type) bool {
+ // Checker.newAssertableTo suppresses errors for invalid types, so we need special
+ // handling here.
+ if T.Underlying() == Typ[Invalid] {
+ return false
+ }
+ return (*Checker)(nil).newAssertableTo(V, T) == nil
+}
+
+// AssignableTo reports whether a value of type V is assignable to a variable
+// of type T.
+//
+// The behavior of AssignableTo is undefined if V or T is an uninstantiated
+// generic type.
+func AssignableTo(V, T Type) bool {
+ x := operand{mode: value, typ: V}
+ ok, _ := x.assignableTo(nil, T, nil) // check not needed for non-constant x
+ return ok
+}
+
+// ConvertibleTo reports whether a value of type V is convertible to a value of
+// type T.
+//
+// The behavior of ConvertibleTo is undefined if V or T is an uninstantiated
+// generic type.
+func ConvertibleTo(V, T Type) bool {
+ x := operand{mode: value, typ: V}
+ return x.convertibleTo(nil, T, nil) // check not needed for non-constant x
+}
+
+// Implements reports whether type V implements interface T.
+//
+// The behavior of Implements is undefined if V is an uninstantiated generic
+// type.
+func Implements(V Type, T *Interface) bool {
+ if T.Empty() {
+ // All types (even Typ[Invalid]) implement the empty interface.
+ return true
+ }
+ // Checker.implements suppresses errors for invalid types, so we need special
+ // handling here.
+ if V.Underlying() == Typ[Invalid] {
+ return false
+ }
+ return (*Checker)(nil).implements(V, T) == nil
+}
+
+// Identical reports whether x and y are identical types.
+// Receivers of Signature types are ignored.
+func Identical(x, y Type) bool {
+ return identical(x, y, true, nil)
+}
+
+// IdenticalIgnoreTags reports whether x and y are identical types if tags are ignored.
+// Receivers of Signature types are ignored.
+func IdenticalIgnoreTags(x, y Type) bool {
+ return identical(x, y, false, nil)
+}
diff --git a/src/cmd/compile/internal/types2/api_test.go b/src/cmd/compile/internal/types2/api_test.go
new file mode 100644
index 0000000..df54f61
--- /dev/null
+++ b/src/cmd/compile/internal/types2/api_test.go
@@ -0,0 +1,2612 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2_test
+
+import (
+ "bytes"
+ "cmd/compile/internal/syntax"
+ "errors"
+ "fmt"
+ "internal/testenv"
+ "reflect"
+ "regexp"
+ "sort"
+ "strings"
+ "testing"
+
+ . "cmd/compile/internal/types2"
+)
+
+// brokenPkg is a source prefix for packages that are not expected to parse
+// or type-check cleanly. They are always parsed assuming that they contain
+// generic code.
+const brokenPkg = "package broken_"
+
+func parseSrc(path, src string) (*syntax.File, error) {
+ errh := func(error) {} // dummy error handler so that parsing continues in presence of errors
+ return syntax.Parse(syntax.NewFileBase(path), strings.NewReader(src), errh, nil, syntax.AllowGenerics|syntax.AllowMethodTypeParams)
+}
+
+func pkgFor(path, source string, info *Info) (*Package, error) {
+ f, err := parseSrc(path, source)
+ if err != nil {
+ return nil, err
+ }
+ conf := Config{Importer: defaultImporter()}
+ return conf.Check(f.PkgName.Value, []*syntax.File{f}, info)
+}
+
+func mustTypecheck(t *testing.T, path, source string, info *Info) string {
+ pkg, err := pkgFor(path, source, info)
+ if err != nil {
+ name := path
+ if pkg != nil {
+ name = "package " + pkg.Name()
+ }
+ t.Fatalf("%s: didn't type-check (%s)", name, err)
+ }
+ return pkg.Name()
+}
+
+func mayTypecheck(t *testing.T, path, source string, info *Info) (string, error) {
+ f, err := parseSrc(path, source)
+ if f == nil { // ignore errors unless f is nil
+ t.Fatalf("%s: unable to parse: %s", path, err)
+ }
+ conf := Config{
+ Error: func(err error) {},
+ Importer: defaultImporter(),
+ }
+ pkg, err := conf.Check(f.PkgName.Value, []*syntax.File{f}, info)
+ return pkg.Name(), err
+}
+
+func TestValuesInfo(t *testing.T) {
+ var tests = []struct {
+ src string
+ expr string // constant expression
+ typ string // constant type
+ val string // constant value
+ }{
+ {`package a0; const _ = false`, `false`, `untyped bool`, `false`},
+ {`package a1; const _ = 0`, `0`, `untyped int`, `0`},
+ {`package a2; const _ = 'A'`, `'A'`, `untyped rune`, `65`},
+ {`package a3; const _ = 0.`, `0.`, `untyped float`, `0`},
+ {`package a4; const _ = 0i`, `0i`, `untyped complex`, `(0 + 0i)`},
+ {`package a5; const _ = "foo"`, `"foo"`, `untyped string`, `"foo"`},
+
+ {`package b0; var _ = false`, `false`, `bool`, `false`},
+ {`package b1; var _ = 0`, `0`, `int`, `0`},
+ {`package b2; var _ = 'A'`, `'A'`, `rune`, `65`},
+ {`package b3; var _ = 0.`, `0.`, `float64`, `0`},
+ {`package b4; var _ = 0i`, `0i`, `complex128`, `(0 + 0i)`},
+ {`package b5; var _ = "foo"`, `"foo"`, `string`, `"foo"`},
+
+ {`package c0a; var _ = bool(false)`, `false`, `bool`, `false`},
+ {`package c0b; var _ = bool(false)`, `bool(false)`, `bool`, `false`},
+ {`package c0c; type T bool; var _ = T(false)`, `T(false)`, `c0c.T`, `false`},
+
+ {`package c1a; var _ = int(0)`, `0`, `int`, `0`},
+ {`package c1b; var _ = int(0)`, `int(0)`, `int`, `0`},
+ {`package c1c; type T int; var _ = T(0)`, `T(0)`, `c1c.T`, `0`},
+
+ {`package c2a; var _ = rune('A')`, `'A'`, `rune`, `65`},
+ {`package c2b; var _ = rune('A')`, `rune('A')`, `rune`, `65`},
+ {`package c2c; type T rune; var _ = T('A')`, `T('A')`, `c2c.T`, `65`},
+
+ {`package c3a; var _ = float32(0.)`, `0.`, `float32`, `0`},
+ {`package c3b; var _ = float32(0.)`, `float32(0.)`, `float32`, `0`},
+ {`package c3c; type T float32; var _ = T(0.)`, `T(0.)`, `c3c.T`, `0`},
+
+ {`package c4a; var _ = complex64(0i)`, `0i`, `complex64`, `(0 + 0i)`},
+ {`package c4b; var _ = complex64(0i)`, `complex64(0i)`, `complex64`, `(0 + 0i)`},
+ {`package c4c; type T complex64; var _ = T(0i)`, `T(0i)`, `c4c.T`, `(0 + 0i)`},
+
+ {`package c5a; var _ = string("foo")`, `"foo"`, `string`, `"foo"`},
+ {`package c5b; var _ = string("foo")`, `string("foo")`, `string`, `"foo"`},
+ {`package c5c; type T string; var _ = T("foo")`, `T("foo")`, `c5c.T`, `"foo"`},
+ {`package c5d; var _ = string(65)`, `65`, `untyped int`, `65`},
+ {`package c5e; var _ = string('A')`, `'A'`, `untyped rune`, `65`},
+ {`package c5f; type T string; var _ = T('A')`, `'A'`, `untyped rune`, `65`},
+
+ {`package d0; var _ = []byte("foo")`, `"foo"`, `string`, `"foo"`},
+ {`package d1; var _ = []byte(string("foo"))`, `"foo"`, `string`, `"foo"`},
+ {`package d2; var _ = []byte(string("foo"))`, `string("foo")`, `string`, `"foo"`},
+ {`package d3; type T []byte; var _ = T("foo")`, `"foo"`, `string`, `"foo"`},
+
+ {`package e0; const _ = float32( 1e-200)`, `float32(1e-200)`, `float32`, `0`},
+ {`package e1; const _ = float32(-1e-200)`, `float32(-1e-200)`, `float32`, `0`},
+ {`package e2; const _ = float64( 1e-2000)`, `float64(1e-2000)`, `float64`, `0`},
+ {`package e3; const _ = float64(-1e-2000)`, `float64(-1e-2000)`, `float64`, `0`},
+ {`package e4; const _ = complex64( 1e-200)`, `complex64(1e-200)`, `complex64`, `(0 + 0i)`},
+ {`package e5; const _ = complex64(-1e-200)`, `complex64(-1e-200)`, `complex64`, `(0 + 0i)`},
+ {`package e6; const _ = complex128( 1e-2000)`, `complex128(1e-2000)`, `complex128`, `(0 + 0i)`},
+ {`package e7; const _ = complex128(-1e-2000)`, `complex128(-1e-2000)`, `complex128`, `(0 + 0i)`},
+
+ {`package f0 ; var _ float32 = 1e-200`, `1e-200`, `float32`, `0`},
+ {`package f1 ; var _ float32 = -1e-200`, `-1e-200`, `float32`, `0`},
+ {`package f2a; var _ float64 = 1e-2000`, `1e-2000`, `float64`, `0`},
+ {`package f3a; var _ float64 = -1e-2000`, `-1e-2000`, `float64`, `0`},
+ {`package f2b; var _ = 1e-2000`, `1e-2000`, `float64`, `0`},
+ {`package f3b; var _ = -1e-2000`, `-1e-2000`, `float64`, `0`},
+ {`package f4 ; var _ complex64 = 1e-200 `, `1e-200`, `complex64`, `(0 + 0i)`},
+ {`package f5 ; var _ complex64 = -1e-200 `, `-1e-200`, `complex64`, `(0 + 0i)`},
+ {`package f6a; var _ complex128 = 1e-2000i`, `1e-2000i`, `complex128`, `(0 + 0i)`},
+ {`package f7a; var _ complex128 = -1e-2000i`, `-1e-2000i`, `complex128`, `(0 + 0i)`},
+ {`package f6b; var _ = 1e-2000i`, `1e-2000i`, `complex128`, `(0 + 0i)`},
+ {`package f7b; var _ = -1e-2000i`, `-1e-2000i`, `complex128`, `(0 + 0i)`},
+
+ {`package g0; const (a = len([iota]int{}); b; c); const _ = c`, `c`, `int`, `2`}, // issue #22341
+ {`package g1; var(j int32; s int; n = 1.0<<s == j)`, `1.0`, `int32`, `1`}, // issue #48422
+ }
+
+ for _, test := range tests {
+ info := Info{
+ Types: make(map[syntax.Expr]TypeAndValue),
+ }
+ name := mustTypecheck(t, "ValuesInfo", test.src, &info)
+
+ // look for expression
+ var expr syntax.Expr
+ for e := range info.Types {
+ if syntax.String(e) == test.expr {
+ expr = e
+ break
+ }
+ }
+ if expr == nil {
+ t.Errorf("package %s: no expression found for %s", name, test.expr)
+ continue
+ }
+ tv := info.Types[expr]
+
+ // check that type is correct
+ if got := tv.Type.String(); got != test.typ {
+ t.Errorf("package %s: got type %s; want %s", name, got, test.typ)
+ continue
+ }
+
+ // if we have a constant, check that value is correct
+ if tv.Value != nil {
+ if got := tv.Value.ExactString(); got != test.val {
+ t.Errorf("package %s: got value %s; want %s", name, got, test.val)
+ }
+ } else {
+ if test.val != "" {
+ t.Errorf("package %s: no constant found; want %s", name, test.val)
+ }
+ }
+ }
+}
+
+func TestTypesInfo(t *testing.T) {
+ var tests = []struct {
+ src string
+ expr string // expression
+ typ string // value type
+ }{
+ // single-valued expressions of untyped constants
+ {`package b0; var x interface{} = false`, `false`, `bool`},
+ {`package b1; var x interface{} = 0`, `0`, `int`},
+ {`package b2; var x interface{} = 0.`, `0.`, `float64`},
+ {`package b3; var x interface{} = 0i`, `0i`, `complex128`},
+ {`package b4; var x interface{} = "foo"`, `"foo"`, `string`},
+
+ // uses of nil
+ {`package n0; var _ *int = nil`, `nil`, `*int`},
+ {`package n1; var _ func() = nil`, `nil`, `func()`},
+ {`package n2; var _ []byte = nil`, `nil`, `[]byte`},
+ {`package n3; var _ map[int]int = nil`, `nil`, `map[int]int`},
+ {`package n4; var _ chan int = nil`, `nil`, `chan int`},
+ {`package n5a; var _ interface{} = (*int)(nil)`, `nil`, `*int`},
+ {`package n5b; var _ interface{m()} = nil`, `nil`, `interface{m()}`},
+ {`package n6; import "unsafe"; var _ unsafe.Pointer = nil`, `nil`, `unsafe.Pointer`},
+
+ {`package n10; var (x *int; _ = x == nil)`, `nil`, `*int`},
+ {`package n11; var (x func(); _ = x == nil)`, `nil`, `func()`},
+ {`package n12; var (x []byte; _ = x == nil)`, `nil`, `[]byte`},
+ {`package n13; var (x map[int]int; _ = x == nil)`, `nil`, `map[int]int`},
+ {`package n14; var (x chan int; _ = x == nil)`, `nil`, `chan int`},
+ {`package n15a; var (x interface{}; _ = x == (*int)(nil))`, `nil`, `*int`},
+ {`package n15b; var (x interface{m()}; _ = x == nil)`, `nil`, `interface{m()}`},
+ {`package n15; import "unsafe"; var (x unsafe.Pointer; _ = x == nil)`, `nil`, `unsafe.Pointer`},
+
+ {`package n20; var _ = (*int)(nil)`, `nil`, `*int`},
+ {`package n21; var _ = (func())(nil)`, `nil`, `func()`},
+ {`package n22; var _ = ([]byte)(nil)`, `nil`, `[]byte`},
+ {`package n23; var _ = (map[int]int)(nil)`, `nil`, `map[int]int`},
+ {`package n24; var _ = (chan int)(nil)`, `nil`, `chan int`},
+ {`package n25a; var _ = (interface{})((*int)(nil))`, `nil`, `*int`},
+ {`package n25b; var _ = (interface{m()})(nil)`, `nil`, `interface{m()}`},
+ {`package n26; import "unsafe"; var _ = unsafe.Pointer(nil)`, `nil`, `unsafe.Pointer`},
+
+ {`package n30; func f(*int) { f(nil) }`, `nil`, `*int`},
+ {`package n31; func f(func()) { f(nil) }`, `nil`, `func()`},
+ {`package n32; func f([]byte) { f(nil) }`, `nil`, `[]byte`},
+ {`package n33; func f(map[int]int) { f(nil) }`, `nil`, `map[int]int`},
+ {`package n34; func f(chan int) { f(nil) }`, `nil`, `chan int`},
+ {`package n35a; func f(interface{}) { f((*int)(nil)) }`, `nil`, `*int`},
+ {`package n35b; func f(interface{m()}) { f(nil) }`, `nil`, `interface{m()}`},
+ {`package n35; import "unsafe"; func f(unsafe.Pointer) { f(nil) }`, `nil`, `unsafe.Pointer`},
+
+ // comma-ok expressions
+ {`package p0; var x interface{}; var _, _ = x.(int)`,
+ `x.(int)`,
+ `(int, bool)`,
+ },
+ {`package p1; var x interface{}; func _() { _, _ = x.(int) }`,
+ `x.(int)`,
+ `(int, bool)`,
+ },
+ {`package p2a; type mybool bool; var m map[string]complex128; var b mybool; func _() { _, b = m["foo"] }`,
+ `m["foo"]`,
+ `(complex128, p2a.mybool)`,
+ },
+ {`package p2b; var m map[string]complex128; var b bool; func _() { _, b = m["foo"] }`,
+ `m["foo"]`,
+ `(complex128, bool)`,
+ },
+ {`package p3; var c chan string; var _, _ = <-c`,
+ `<-c`,
+ `(string, bool)`,
+ },
+
+ // issue 6796
+ {`package issue6796_a; var x interface{}; var _, _ = (x.(int))`,
+ `x.(int)`,
+ `(int, bool)`,
+ },
+ {`package issue6796_b; var c chan string; var _, _ = (<-c)`,
+ `(<-c)`,
+ `(string, bool)`,
+ },
+ {`package issue6796_c; var c chan string; var _, _ = (<-c)`,
+ `<-c`,
+ `(string, bool)`,
+ },
+ {`package issue6796_d; var c chan string; var _, _ = ((<-c))`,
+ `(<-c)`,
+ `(string, bool)`,
+ },
+ {`package issue6796_e; func f(c chan string) { _, _ = ((<-c)) }`,
+ `(<-c)`,
+ `(string, bool)`,
+ },
+
+ // issue 7060
+ {`package issue7060_a; var ( m map[int]string; x, ok = m[0] )`,
+ `m[0]`,
+ `(string, bool)`,
+ },
+ {`package issue7060_b; var ( m map[int]string; x, ok interface{} = m[0] )`,
+ `m[0]`,
+ `(string, bool)`,
+ },
+ {`package issue7060_c; func f(x interface{}, ok bool, m map[int]string) { x, ok = m[0] }`,
+ `m[0]`,
+ `(string, bool)`,
+ },
+ {`package issue7060_d; var ( ch chan string; x, ok = <-ch )`,
+ `<-ch`,
+ `(string, bool)`,
+ },
+ {`package issue7060_e; var ( ch chan string; x, ok interface{} = <-ch )`,
+ `<-ch`,
+ `(string, bool)`,
+ },
+ {`package issue7060_f; func f(x interface{}, ok bool, ch chan string) { x, ok = <-ch }`,
+ `<-ch`,
+ `(string, bool)`,
+ },
+
+ // issue 28277
+ {`package issue28277_a; func f(...int)`,
+ `...int`,
+ `[]int`,
+ },
+ {`package issue28277_b; func f(a, b int, c ...[]struct{})`,
+ `...[]struct{}`,
+ `[][]struct{}`,
+ },
+
+ // tests for broken code that doesn't parse or type-check
+ {brokenPkg + `x0; func _() { var x struct {f string}; x.f := 0 }`, `x.f`, `string`},
+ {brokenPkg + `x1; func _() { var z string; type x struct {f string}; y := &x{q: z}}`, `z`, `string`},
+ {brokenPkg + `x2; func _() { var a, b string; type x struct {f string}; z := &x{f: a, f: b,}}`, `b`, `string`},
+ {brokenPkg + `x3; var x = panic("");`, `panic`, `func(interface{})`},
+ {`package x4; func _() { panic("") }`, `panic`, `func(interface{})`},
+ {brokenPkg + `x5; func _() { var x map[string][...]int; x = map[string][...]int{"": {1,2,3}} }`, `x`, `map[string]invalid type`},
+
+ // parameterized functions
+ {`package p0; func f[T any](T) {}; var _ = f[int]`, `f`, `func[T any](T)`},
+ {`package p1; func f[T any](T) {}; var _ = f[int]`, `f[int]`, `func(int)`},
+ {`package p2; func f[T any](T) {}; func _() { f(42) }`, `f`, `func(int)`},
+ {`package p3; func f[T any](T) {}; func _() { f[int](42) }`, `f[int]`, `func(int)`},
+ {`package p4; func f[T any](T) {}; func _() { f[int](42) }`, `f`, `func[T any](T)`},
+ {`package p5; func f[T any](T) {}; func _() { f(42) }`, `f(42)`, `()`},
+
+ // type parameters
+ {`package t0; type t[] int; var _ t`, `t`, `t0.t`}, // t[] is a syntax error that is ignored in this test in favor of t
+ {`package t1; type t[P any] int; var _ t[int]`, `t`, `t1.t[P any]`},
+ {`package t2; type t[P interface{}] int; var _ t[int]`, `t`, `t2.t[P interface{}]`},
+ {`package t3; type t[P, Q interface{}] int; var _ t[int, int]`, `t`, `t3.t[P, Q interface{}]`},
+ {brokenPkg + `t4; type t[P, Q interface{ m() }] int; var _ t[int, int]`, `t`, `broken_t4.t[P, Q interface{m()}]`},
+
+ // instantiated types must be sanitized
+ {`package g0; type t[P any] int; var x struct{ f t[int] }; var _ = x.f`, `x.f`, `g0.t[int]`},
+
+ // issue 45096
+ {`package issue45096; func _[T interface{ ~int8 | ~int16 | ~int32 }](x T) { _ = x < 0 }`, `0`, `T`},
+
+ // issue 47895
+ {`package p; import "unsafe"; type S struct { f int }; var s S; var _ = unsafe.Offsetof(s.f)`, `s.f`, `int`},
+
+ // issue 50093
+ {`package u0a; func _[_ interface{int}]() {}`, `int`, `int`},
+ {`package u1a; func _[_ interface{~int}]() {}`, `~int`, `~int`},
+ {`package u2a; func _[_ interface{int|string}]() {}`, `int | string`, `int|string`},
+ {`package u3a; func _[_ interface{int|string|~bool}]() {}`, `int | string | ~bool`, `int|string|~bool`},
+ {`package u3a; func _[_ interface{int|string|~bool}]() {}`, `int | string`, `int|string`},
+ {`package u3a; func _[_ interface{int|string|~bool}]() {}`, `~bool`, `~bool`},
+ {`package u3a; func _[_ interface{int|string|~float64|~bool}]() {}`, `int | string | ~float64`, `int|string|~float64`},
+
+ {`package u0b; func _[_ int]() {}`, `int`, `int`},
+ {`package u1b; func _[_ ~int]() {}`, `~int`, `~int`},
+ {`package u2b; func _[_ int|string]() {}`, `int | string`, `int|string`},
+ {`package u3b; func _[_ int|string|~bool]() {}`, `int | string | ~bool`, `int|string|~bool`},
+ {`package u3b; func _[_ int|string|~bool]() {}`, `int | string`, `int|string`},
+ {`package u3b; func _[_ int|string|~bool]() {}`, `~bool`, `~bool`},
+ {`package u3b; func _[_ int|string|~float64|~bool]() {}`, `int | string | ~float64`, `int|string|~float64`},
+
+ {`package u0c; type _ interface{int}`, `int`, `int`},
+ {`package u1c; type _ interface{~int}`, `~int`, `~int`},
+ {`package u2c; type _ interface{int|string}`, `int | string`, `int|string`},
+ {`package u3c; type _ interface{int|string|~bool}`, `int | string | ~bool`, `int|string|~bool`},
+ {`package u3c; type _ interface{int|string|~bool}`, `int | string`, `int|string`},
+ {`package u3c; type _ interface{int|string|~bool}`, `~bool`, `~bool`},
+ {`package u3c; type _ interface{int|string|~float64|~bool}`, `int | string | ~float64`, `int|string|~float64`},
+ }
+
+ for _, test := range tests {
+ info := Info{Types: make(map[syntax.Expr]TypeAndValue)}
+ var name string
+ if strings.HasPrefix(test.src, brokenPkg) {
+ var err error
+ name, err = mayTypecheck(t, "TypesInfo", test.src, &info)
+ if err == nil {
+ t.Errorf("package %s: expected to fail but passed", name)
+ continue
+ }
+ } else {
+ name = mustTypecheck(t, "TypesInfo", test.src, &info)
+ }
+
+ // look for expression type
+ var typ Type
+ for e, tv := range info.Types {
+ if syntax.String(e) == test.expr {
+ typ = tv.Type
+ break
+ }
+ }
+ if typ == nil {
+ t.Errorf("package %s: no type found for %s", name, test.expr)
+ continue
+ }
+
+ // check that type is correct
+ if got := typ.String(); got != test.typ {
+ t.Errorf("package %s: got %s; want %s", name, got, test.typ)
+ }
+ }
+}
+
+func TestInstanceInfo(t *testing.T) {
+ const lib = `package lib
+
+func F[P any](P) {}
+
+type T[P any] []P
+`
+
+ type testInst struct {
+ name string
+ targs []string
+ typ string
+ }
+
+ var tests = []struct {
+ src string
+ instances []testInst // recorded instances in source order
+ }{
+ {`package p0; func f[T any](T) {}; func _() { f(42) }`,
+ []testInst{{`f`, []string{`int`}, `func(int)`}},
+ },
+ {`package p1; func f[T any](T) T { panic(0) }; func _() { f('@') }`,
+ []testInst{{`f`, []string{`rune`}, `func(rune) rune`}},
+ },
+ {`package p2; func f[T any](...T) T { panic(0) }; func _() { f(0i) }`,
+ []testInst{{`f`, []string{`complex128`}, `func(...complex128) complex128`}},
+ },
+ {`package p3; func f[A, B, C any](A, *B, []C) {}; func _() { f(1.2, new(string), []byte{}) }`,
+ []testInst{{`f`, []string{`float64`, `string`, `byte`}, `func(float64, *string, []byte)`}},
+ },
+ {`package p4; func f[A, B any](A, *B, ...[]B) {}; func _() { f(1.2, new(byte)) }`,
+ []testInst{{`f`, []string{`float64`, `byte`}, `func(float64, *byte, ...[]byte)`}},
+ },
+ // we don't know how to translate these but we can type-check them
+ {`package q0; type T struct{}; func (T) m[P any](P) {}; func _(x T) { x.m(42) }`,
+ []testInst{{`m`, []string{`int`}, `func(int)`}},
+ },
+ {`package q1; type T struct{}; func (T) m[P any](P) P { panic(0) }; func _(x T) { x.m(42) }`,
+ []testInst{{`m`, []string{`int`}, `func(int) int`}},
+ },
+ {`package q2; type T struct{}; func (T) m[P any](...P) P { panic(0) }; func _(x T) { x.m(42) }`,
+ []testInst{{`m`, []string{`int`}, `func(...int) int`}},
+ },
+ {`package q3; type T struct{}; func (T) m[A, B, C any](A, *B, []C) {}; func _(x T) { x.m(1.2, new(string), []byte{}) }`,
+ []testInst{{`m`, []string{`float64`, `string`, `byte`}, `func(float64, *string, []byte)`}},
+ },
+ {`package q4; type T struct{}; func (T) m[A, B any](A, *B, ...[]B) {}; func _(x T) { x.m(1.2, new(byte)) }`,
+ []testInst{{`m`, []string{`float64`, `byte`}, `func(float64, *byte, ...[]byte)`}},
+ },
+
+ {`package r0; type T[P1 any] struct{}; func (_ T[P2]) m[Q any](Q) {}; func _[P3 any](x T[P3]) { x.m(42) }`,
+ []testInst{
+ {`T`, []string{`P2`}, `struct{}`},
+ {`T`, []string{`P3`}, `struct{}`},
+ {`m`, []string{`int`}, `func(int)`},
+ },
+ },
+ // TODO(gri) record method type parameters in syntax.FuncType so we can check this
+ // {`package r1; type T interface{ m[P any](P) }; func _(x T) { x.m(4.2) }`,
+ // `x.m`,
+ // []string{`float64`},
+ // `func(float64)`,
+ // },
+
+ {`package s1; func f[T any, P interface{*T}](x T) {}; func _(x string) { f(x) }`,
+ []testInst{{`f`, []string{`string`, `*string`}, `func(x string)`}},
+ },
+ {`package s2; func f[T any, P interface{*T}](x []T) {}; func _(x []int) { f(x) }`,
+ []testInst{{`f`, []string{`int`, `*int`}, `func(x []int)`}},
+ },
+ {`package s3; type C[T any] interface{chan<- T}; func f[T any, P C[T]](x []T) {}; func _(x []int) { f(x) }`,
+ []testInst{
+ {`C`, []string{`T`}, `interface{chan<- T}`},
+ {`f`, []string{`int`, `chan<- int`}, `func(x []int)`},
+ },
+ },
+ {`package s4; type C[T any] interface{chan<- T}; func f[T any, P C[T], Q C[[]*P]](x []T) {}; func _(x []int) { f(x) }`,
+ []testInst{
+ {`C`, []string{`T`}, `interface{chan<- T}`},
+ {`C`, []string{`[]*P`}, `interface{chan<- []*P}`},
+ {`f`, []string{`int`, `chan<- int`, `chan<- []*chan<- int`}, `func(x []int)`},
+ },
+ },
+
+ {`package t1; func f[T any, P interface{*T}]() T { panic(0) }; func _() { _ = f[string] }`,
+ []testInst{{`f`, []string{`string`, `*string`}, `func() string`}},
+ },
+ {`package t2; func f[T any, P interface{*T}]() T { panic(0) }; func _() { _ = (f[string]) }`,
+ []testInst{{`f`, []string{`string`, `*string`}, `func() string`}},
+ },
+ {`package t3; type C[T any] interface{chan<- T}; func f[T any, P C[T], Q C[[]*P]]() []T { return nil }; func _() { _ = f[int] }`,
+ []testInst{
+ {`C`, []string{`T`}, `interface{chan<- T}`},
+ {`C`, []string{`[]*P`}, `interface{chan<- []*P}`},
+ {`f`, []string{`int`, `chan<- int`, `chan<- []*chan<- int`}, `func() []int`},
+ },
+ },
+ {`package t4; type C[T any] interface{chan<- T}; func f[T any, P C[T], Q C[[]*P]]() []T { return nil }; func _() { _ = (f[int]) }`,
+ []testInst{
+ {`C`, []string{`T`}, `interface{chan<- T}`},
+ {`C`, []string{`[]*P`}, `interface{chan<- []*P}`},
+ {`f`, []string{`int`, `chan<- int`, `chan<- []*chan<- int`}, `func() []int`},
+ },
+ },
+ {`package i0; import "lib"; func _() { lib.F(42) }`,
+ []testInst{{`F`, []string{`int`}, `func(int)`}},
+ },
+
+ {`package duplfunc0; func f[T any](T) {}; func _() { f(42); f("foo"); f[int](3) }`,
+ []testInst{
+ {`f`, []string{`int`}, `func(int)`},
+ {`f`, []string{`string`}, `func(string)`},
+ {`f`, []string{`int`}, `func(int)`},
+ },
+ },
+ {`package duplfunc1; import "lib"; func _() { lib.F(42); lib.F("foo"); lib.F(3) }`,
+ []testInst{
+ {`F`, []string{`int`}, `func(int)`},
+ {`F`, []string{`string`}, `func(string)`},
+ {`F`, []string{`int`}, `func(int)`},
+ },
+ },
+
+ {`package type0; type T[P interface{~int}] struct{ x P }; var _ T[int]`,
+ []testInst{{`T`, []string{`int`}, `struct{x int}`}},
+ },
+ {`package type1; type T[P interface{~int}] struct{ x P }; var _ (T[int])`,
+ []testInst{{`T`, []string{`int`}, `struct{x int}`}},
+ },
+ {`package type2; type T[P interface{~int}] struct{ x P }; var _ T[(int)]`,
+ []testInst{{`T`, []string{`int`}, `struct{x int}`}},
+ },
+ {`package type3; type T[P1 interface{~[]P2}, P2 any] struct{ x P1; y P2 }; var _ T[[]int, int]`,
+ []testInst{{`T`, []string{`[]int`, `int`}, `struct{x []int; y int}`}},
+ },
+ {`package type4; import "lib"; var _ lib.T[int]`,
+ []testInst{{`T`, []string{`int`}, `[]int`}},
+ },
+
+ {`package dupltype0; type T[P interface{~int}] struct{ x P }; var x T[int]; var y T[int]`,
+ []testInst{
+ {`T`, []string{`int`}, `struct{x int}`},
+ {`T`, []string{`int`}, `struct{x int}`},
+ },
+ },
+ {`package dupltype1; type T[P ~int] struct{ x P }; func (r *T[Q]) add(z T[Q]) { r.x += z.x }`,
+ []testInst{
+ {`T`, []string{`Q`}, `struct{x Q}`},
+ {`T`, []string{`Q`}, `struct{x Q}`},
+ },
+ },
+ {`package dupltype1; import "lib"; var x lib.T[int]; var y lib.T[int]; var z lib.T[string]`,
+ []testInst{
+ {`T`, []string{`int`}, `[]int`},
+ {`T`, []string{`int`}, `[]int`},
+ {`T`, []string{`string`}, `[]string`},
+ },
+ },
+ }
+
+ for _, test := range tests {
+ imports := make(testImporter)
+ conf := Config{Importer: imports}
+ instMap := make(map[*syntax.Name]Instance)
+ useMap := make(map[*syntax.Name]Object)
+ makePkg := func(src string) *Package {
+ f, err := parseSrc("p.go", src)
+ if err != nil {
+ t.Fatal(err)
+ }
+ pkg, err := conf.Check("", []*syntax.File{f}, &Info{Instances: instMap, Uses: useMap})
+ if err != nil {
+ t.Fatal(err)
+ }
+ imports[pkg.Name()] = pkg
+ return pkg
+ }
+ makePkg(lib)
+ pkg := makePkg(test.src)
+
+ t.Run(pkg.Name(), func(t *testing.T) {
+ // Sort instances in source order for stability.
+ instances := sortedInstances(instMap)
+ if got, want := len(instances), len(test.instances); got != want {
+ t.Fatalf("got %d instances, want %d", got, want)
+ }
+
+ // Pairwise compare with the expected instances.
+ for ii, inst := range instances {
+ var targs []Type
+ for i := 0; i < inst.Inst.TypeArgs.Len(); i++ {
+ targs = append(targs, inst.Inst.TypeArgs.At(i))
+ }
+ typ := inst.Inst.Type
+
+ testInst := test.instances[ii]
+ if got := inst.Name.Value; got != testInst.name {
+ t.Fatalf("got name %s, want %s", got, testInst.name)
+ }
+
+ if len(targs) != len(testInst.targs) {
+ t.Fatalf("got %d type arguments; want %d", len(targs), len(testInst.targs))
+ }
+ for i, targ := range targs {
+ if got := targ.String(); got != testInst.targs[i] {
+ t.Errorf("type argument %d: got %s; want %s", i, got, testInst.targs[i])
+ }
+ }
+ if got := typ.Underlying().String(); got != testInst.typ {
+ t.Errorf("package %s: got %s; want %s", pkg.Name(), got, testInst.typ)
+ }
+
+ // Verify the invariant that re-instantiating the corresponding generic
+ // type with TypeArgs results in an identical instance.
+ ptype := useMap[inst.Name].Type()
+ lister, _ := ptype.(interface{ TypeParams() *TypeParamList })
+ if lister == nil || lister.TypeParams().Len() == 0 {
+ t.Fatalf("info.Types[%v] = %v, want parameterized type", inst.Name, ptype)
+ }
+ inst2, err := Instantiate(nil, ptype, targs, true)
+ if err != nil {
+ t.Errorf("Instantiate(%v, %v) failed: %v", ptype, targs, err)
+ }
+ if !Identical(inst.Inst.Type, inst2) {
+ t.Errorf("%v and %v are not identical", inst.Inst.Type, inst2)
+ }
+ }
+ })
+ }
+}
+
+type recordedInstance struct {
+ Name *syntax.Name
+ Inst Instance
+}
+
+func sortedInstances(m map[*syntax.Name]Instance) (instances []recordedInstance) {
+ for id, inst := range m {
+ instances = append(instances, recordedInstance{id, inst})
+ }
+ sort.Slice(instances, func(i, j int) bool {
+ return instances[i].Name.Pos().Cmp(instances[j].Name.Pos()) < 0
+ })
+ return instances
+}
+
+func TestDefsInfo(t *testing.T) {
+ var tests = []struct {
+ src string
+ obj string
+ want string
+ }{
+ {`package p0; const x = 42`, `x`, `const p0.x untyped int`},
+ {`package p1; const x int = 42`, `x`, `const p1.x int`},
+ {`package p2; var x int`, `x`, `var p2.x int`},
+ {`package p3; type x int`, `x`, `type p3.x int`},
+ {`package p4; func f()`, `f`, `func p4.f()`},
+ {`package p5; func f() int { x, _ := 1, 2; return x }`, `_`, `var _ int`},
+
+ // Tests using generics.
+ {`package g0; type x[T any] int`, `x`, `type g0.x[T any] int`},
+ {`package g1; func f[T any]() {}`, `f`, `func g1.f[T any]()`},
+ {`package g2; type x[T any] int; func (*x[_]) m() {}`, `m`, `func (*g2.x[_]).m()`},
+ }
+
+ for _, test := range tests {
+ info := Info{
+ Defs: make(map[*syntax.Name]Object),
+ }
+ name := mustTypecheck(t, "DefsInfo", test.src, &info)
+
+ // find object
+ var def Object
+ for id, obj := range info.Defs {
+ if id.Value == test.obj {
+ def = obj
+ break
+ }
+ }
+ if def == nil {
+ t.Errorf("package %s: %s not found", name, test.obj)
+ continue
+ }
+
+ if got := def.String(); got != test.want {
+ t.Errorf("package %s: got %s; want %s", name, got, test.want)
+ }
+ }
+}
+
+func TestUsesInfo(t *testing.T) {
+ var tests = []struct {
+ src string
+ obj string
+ want string
+ }{
+ {`package p0; func _() { _ = x }; const x = 42`, `x`, `const p0.x untyped int`},
+ {`package p1; func _() { _ = x }; const x int = 42`, `x`, `const p1.x int`},
+ {`package p2; func _() { _ = x }; var x int`, `x`, `var p2.x int`},
+ {`package p3; func _() { type _ x }; type x int`, `x`, `type p3.x int`},
+ {`package p4; func _() { _ = f }; func f()`, `f`, `func p4.f()`},
+
+ // Tests using generics.
+ {`package g0; func _[T any]() { _ = x }; const x = 42`, `x`, `const g0.x untyped int`},
+ {`package g1; func _[T any](x T) { }`, `T`, `type parameter T any`},
+ {`package g2; type N[A any] int; var _ N[int]`, `N`, `type g2.N[A any] int`},
+ {`package g3; type N[A any] int; func (N[_]) m() {}`, `N`, `type g3.N[A any] int`},
+
+ // Uses of fields are instantiated.
+ {`package s1; type N[A any] struct{ a A }; var f = N[int]{}.a`, `a`, `field a int`},
+ {`package s1; type N[A any] struct{ a A }; func (r N[B]) m(b B) { r.a = b }`, `a`, `field a B`},
+
+ // Uses of methods are uses of the instantiated method.
+ {`package m0; type N[A any] int; func (r N[B]) m() { r.n() }; func (N[C]) n() {}`, `n`, `func (m0.N[B]).n()`},
+ {`package m1; type N[A any] int; func (r N[B]) m() { }; var f = N[int].m`, `m`, `func (m1.N[int]).m()`},
+ {`package m2; func _[A any](v interface{ m() A }) { v.m() }`, `m`, `func (interface).m() A`},
+ {`package m3; func f[A any]() interface{ m() A } { return nil }; var _ = f[int]().m()`, `m`, `func (interface).m() int`},
+ {`package m4; type T[A any] func() interface{ m() A }; var x T[int]; var y = x().m`, `m`, `func (interface).m() int`},
+ {`package m5; type T[A any] interface{ m() A }; func _[B any](t T[B]) { t.m() }`, `m`, `func (m5.T[B]).m() B`},
+ {`package m6; type T[A any] interface{ m() }; func _[B any](t T[B]) { t.m() }`, `m`, `func (m6.T[B]).m()`},
+ {`package m7; type T[A any] interface{ m() A }; func _(t T[int]) { t.m() }`, `m`, `func (m7.T[int]).m() int`},
+ {`package m8; type T[A any] interface{ m() }; func _(t T[int]) { t.m() }`, `m`, `func (m8.T[int]).m()`},
+ {`package m9; type T[A any] interface{ m() }; func _(t T[int]) { _ = t.m }`, `m`, `func (m9.T[int]).m()`},
+ {
+ `package m10; type E[A any] interface{ m() }; type T[B any] interface{ E[B]; n() }; func _(t T[int]) { t.m() }`,
+ `m`,
+ `func (m10.E[int]).m()`,
+ },
+ }
+
+ for _, test := range tests {
+ info := Info{
+ Uses: make(map[*syntax.Name]Object),
+ }
+ name := mustTypecheck(t, "UsesInfo", test.src, &info)
+
+ // find object
+ var use Object
+ for id, obj := range info.Uses {
+ if id.Value == test.obj {
+ if use != nil {
+ panic(fmt.Sprintf("multiple uses of %q", id.Value))
+ }
+ use = obj
+ }
+ }
+ if use == nil {
+ t.Errorf("package %s: %s not found", name, test.obj)
+ continue
+ }
+
+ if got := use.String(); got != test.want {
+ t.Errorf("package %s: got %s; want %s", name, got, test.want)
+ }
+ }
+}
+
+func TestGenericMethodInfo(t *testing.T) {
+ src := `package p
+
+type N[A any] int
+
+func (r N[B]) m() { r.m(); r.n() }
+
+func (r *N[C]) n() { }
+`
+ f, err := parseSrc("p.go", src)
+ if err != nil {
+ t.Fatal(err)
+ }
+ info := Info{
+ Defs: make(map[*syntax.Name]Object),
+ Uses: make(map[*syntax.Name]Object),
+ Selections: make(map[*syntax.SelectorExpr]*Selection),
+ }
+ var conf Config
+ pkg, err := conf.Check("p", []*syntax.File{f}, &info)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ N := pkg.Scope().Lookup("N").Type().(*Named)
+
+ // Find the generic methods stored on N.
+ gm, gn := N.Method(0), N.Method(1)
+ if gm.Name() == "n" {
+ gm, gn = gn, gm
+ }
+
+ // Collect objects from info.
+ var dm, dn *Func // the declared methods
+ var dmm, dmn *Func // the methods used in the body of m
+ for _, decl := range f.DeclList {
+ fdecl, ok := decl.(*syntax.FuncDecl)
+ if !ok {
+ continue
+ }
+ def := info.Defs[fdecl.Name].(*Func)
+ switch fdecl.Name.Value {
+ case "m":
+ dm = def
+ syntax.Inspect(fdecl.Body, func(n syntax.Node) bool {
+ if call, ok := n.(*syntax.CallExpr); ok {
+ sel := call.Fun.(*syntax.SelectorExpr)
+ use := info.Uses[sel.Sel].(*Func)
+ selection := info.Selections[sel]
+ if selection.Kind() != MethodVal {
+ t.Errorf("Selection kind = %v, want %v", selection.Kind(), MethodVal)
+ }
+ if selection.Obj() != use {
+ t.Errorf("info.Selections contains %v, want %v", selection.Obj(), use)
+ }
+ switch sel.Sel.Value {
+ case "m":
+ dmm = use
+ case "n":
+ dmn = use
+ }
+ }
+ return true
+ })
+ case "n":
+ dn = def
+ }
+ }
+
+ if gm != dm {
+ t.Errorf(`N.Method(...) returns %v for "m", but Info.Defs has %v`, gm, dm)
+ }
+ if gn != dn {
+ t.Errorf(`N.Method(...) returns %v for "m", but Info.Defs has %v`, gm, dm)
+ }
+ if dmm != dm {
+ t.Errorf(`Inside "m", r.m uses %v, want the defined func %v`, dmm, dm)
+ }
+ if dmn == dn {
+ t.Errorf(`Inside "m", r.n uses %v, want a func distinct from %v`, dmm, dm)
+ }
+}
+
+func TestImplicitsInfo(t *testing.T) {
+ testenv.MustHaveGoBuild(t)
+
+ var tests = []struct {
+ src string
+ want string
+ }{
+ {`package p2; import . "fmt"; var _ = Println`, ""}, // no Implicits entry
+ {`package p0; import local "fmt"; var _ = local.Println`, ""}, // no Implicits entry
+ {`package p1; import "fmt"; var _ = fmt.Println`, "importSpec: package fmt"},
+
+ {`package p3; func f(x interface{}) { switch x.(type) { case int: } }`, ""}, // no Implicits entry
+ {`package p4; func f(x interface{}) { switch t := x.(type) { case int: _ = t } }`, "caseClause: var t int"},
+ {`package p5; func f(x interface{}) { switch t := x.(type) { case int, uint: _ = t } }`, "caseClause: var t interface{}"},
+ {`package p6; func f(x interface{}) { switch t := x.(type) { default: _ = t } }`, "caseClause: var t interface{}"},
+
+ {`package p7; func f(x int) {}`, ""}, // no Implicits entry
+ {`package p8; func f(int) {}`, "field: var int"},
+ {`package p9; func f() (complex64) { return 0 }`, "field: var complex64"},
+ {`package p10; type T struct{}; func (*T) f() {}`, "field: var *p10.T"},
+
+ // Tests using generics.
+ {`package f0; func f[T any](x int) {}`, ""}, // no Implicits entry
+ {`package f1; func f[T any](int) {}`, "field: var int"},
+ {`package f2; func f[T any](T) {}`, "field: var T"},
+ {`package f3; func f[T any]() (complex64) { return 0 }`, "field: var complex64"},
+ {`package f4; func f[T any](t T) (T) { return t }`, "field: var T"},
+ {`package t0; type T[A any] struct{}; func (*T[_]) f() {}`, "field: var *t0.T[_]"},
+ {`package t1; type T[A any] struct{}; func _(x interface{}) { switch t := x.(type) { case T[int]: _ = t } }`, "caseClause: var t t1.T[int]"},
+ {`package t2; type T[A any] struct{}; func _[P any](x interface{}) { switch t := x.(type) { case T[P]: _ = t } }`, "caseClause: var t t2.T[P]"},
+ {`package t3; func _[P any](x interface{}) { switch t := x.(type) { case P: _ = t } }`, "caseClause: var t P"},
+ }
+
+ for _, test := range tests {
+ info := Info{
+ Implicits: make(map[syntax.Node]Object),
+ }
+ name := mustTypecheck(t, "ImplicitsInfo", test.src, &info)
+
+ // the test cases expect at most one Implicits entry
+ if len(info.Implicits) > 1 {
+ t.Errorf("package %s: %d Implicits entries found", name, len(info.Implicits))
+ continue
+ }
+
+ // extract Implicits entry, if any
+ var got string
+ for n, obj := range info.Implicits {
+ switch x := n.(type) {
+ case *syntax.ImportDecl:
+ got = "importSpec"
+ case *syntax.CaseClause:
+ got = "caseClause"
+ case *syntax.Field:
+ got = "field"
+ default:
+ t.Fatalf("package %s: unexpected %T", name, x)
+ }
+ got += ": " + obj.String()
+ }
+
+ // verify entry
+ if got != test.want {
+ t.Errorf("package %s: got %q; want %q", name, got, test.want)
+ }
+ }
+}
+
+func predString(tv TypeAndValue) string {
+ var buf bytes.Buffer
+ pred := func(b bool, s string) {
+ if b {
+ if buf.Len() > 0 {
+ buf.WriteString(", ")
+ }
+ buf.WriteString(s)
+ }
+ }
+
+ pred(tv.IsVoid(), "void")
+ pred(tv.IsType(), "type")
+ pred(tv.IsBuiltin(), "builtin")
+ pred(tv.IsValue() && tv.Value != nil, "const")
+ pred(tv.IsValue() && tv.Value == nil, "value")
+ pred(tv.IsNil(), "nil")
+ pred(tv.Addressable(), "addressable")
+ pred(tv.Assignable(), "assignable")
+ pred(tv.HasOk(), "hasOk")
+
+ if buf.Len() == 0 {
+ return "invalid"
+ }
+ return buf.String()
+}
+
+func TestPredicatesInfo(t *testing.T) {
+ testenv.MustHaveGoBuild(t)
+
+ var tests = []struct {
+ src string
+ expr string
+ pred string
+ }{
+ // void
+ {`package n0; func f() { f() }`, `f()`, `void`},
+
+ // types
+ {`package t0; type _ int`, `int`, `type`},
+ {`package t1; type _ []int`, `[]int`, `type`},
+ {`package t2; type _ func()`, `func()`, `type`},
+ {`package t3; type _ func(int)`, `int`, `type`},
+ {`package t3; type _ func(...int)`, `...int`, `type`},
+
+ // built-ins
+ {`package b0; var _ = len("")`, `len`, `builtin`},
+ {`package b1; var _ = (len)("")`, `(len)`, `builtin`},
+
+ // constants
+ {`package c0; var _ = 42`, `42`, `const`},
+ {`package c1; var _ = "foo" + "bar"`, `"foo" + "bar"`, `const`},
+ {`package c2; const (i = 1i; _ = i)`, `i`, `const`},
+
+ // values
+ {`package v0; var (a, b int; _ = a + b)`, `a + b`, `value`},
+ {`package v1; var _ = &[]int{1}`, `[]int{…}`, `value`},
+ {`package v2; var _ = func(){}`, `func() {}`, `value`},
+ {`package v4; func f() { _ = f }`, `f`, `value`},
+ {`package v3; var _ *int = nil`, `nil`, `value, nil`},
+ {`package v3; var _ *int = (nil)`, `(nil)`, `value, nil`},
+
+ // addressable (and thus assignable) operands
+ {`package a0; var (x int; _ = x)`, `x`, `value, addressable, assignable`},
+ {`package a1; var (p *int; _ = *p)`, `*p`, `value, addressable, assignable`},
+ {`package a2; var (s []int; _ = s[0])`, `s[0]`, `value, addressable, assignable`},
+ {`package a3; var (s struct{f int}; _ = s.f)`, `s.f`, `value, addressable, assignable`},
+ {`package a4; var (a [10]int; _ = a[0])`, `a[0]`, `value, addressable, assignable`},
+ {`package a5; func _(x int) { _ = x }`, `x`, `value, addressable, assignable`},
+ {`package a6; func _()(x int) { _ = x; return }`, `x`, `value, addressable, assignable`},
+ {`package a7; type T int; func (x T) _() { _ = x }`, `x`, `value, addressable, assignable`},
+ // composite literals are not addressable
+
+ // assignable but not addressable values
+ {`package s0; var (m map[int]int; _ = m[0])`, `m[0]`, `value, assignable, hasOk`},
+ {`package s1; var (m map[int]int; _, _ = m[0])`, `m[0]`, `value, assignable, hasOk`},
+
+ // hasOk expressions
+ {`package k0; var (ch chan int; _ = <-ch)`, `<-ch`, `value, hasOk`},
+ {`package k1; var (ch chan int; _, _ = <-ch)`, `<-ch`, `value, hasOk`},
+
+ // missing entries
+ // - package names are collected in the Uses map
+ // - identifiers being declared are collected in the Defs map
+ {`package m0; import "os"; func _() { _ = os.Stdout }`, `os`, `<missing>`},
+ {`package m1; import p "os"; func _() { _ = p.Stdout }`, `p`, `<missing>`},
+ {`package m2; const c = 0`, `c`, `<missing>`},
+ {`package m3; type T int`, `T`, `<missing>`},
+ {`package m4; var v int`, `v`, `<missing>`},
+ {`package m5; func f() {}`, `f`, `<missing>`},
+ {`package m6; func _(x int) {}`, `x`, `<missing>`},
+ {`package m6; func _()(x int) { return }`, `x`, `<missing>`},
+ {`package m6; type T int; func (x T) _() {}`, `x`, `<missing>`},
+ }
+
+ for _, test := range tests {
+ info := Info{Types: make(map[syntax.Expr]TypeAndValue)}
+ name := mustTypecheck(t, "PredicatesInfo", test.src, &info)
+
+ // look for expression predicates
+ got := "<missing>"
+ for e, tv := range info.Types {
+ //println(name, syntax.String(e))
+ if syntax.String(e) == test.expr {
+ got = predString(tv)
+ break
+ }
+ }
+
+ if got != test.pred {
+ t.Errorf("package %s: got %s; want %s", name, got, test.pred)
+ }
+ }
+}
+
+func TestScopesInfo(t *testing.T) {
+ testenv.MustHaveGoBuild(t)
+
+ var tests = []struct {
+ src string
+ scopes []string // list of scope descriptors of the form kind:varlist
+ }{
+ {`package p0`, []string{
+ "file:",
+ }},
+ {`package p1; import ( "fmt"; m "math"; _ "os" ); var ( _ = fmt.Println; _ = m.Pi )`, []string{
+ "file:fmt m",
+ }},
+ {`package p2; func _() {}`, []string{
+ "file:", "func:",
+ }},
+ {`package p3; func _(x, y int) {}`, []string{
+ "file:", "func:x y",
+ }},
+ {`package p4; func _(x, y int) { x, z := 1, 2; _ = z }`, []string{
+ "file:", "func:x y z", // redeclaration of x
+ }},
+ {`package p5; func _(x, y int) (u, _ int) { return }`, []string{
+ "file:", "func:u x y",
+ }},
+ {`package p6; func _() { { var x int; _ = x } }`, []string{
+ "file:", "func:", "block:x",
+ }},
+ {`package p7; func _() { if true {} }`, []string{
+ "file:", "func:", "if:", "block:",
+ }},
+ {`package p8; func _() { if x := 0; x < 0 { y := x; _ = y } }`, []string{
+ "file:", "func:", "if:x", "block:y",
+ }},
+ {`package p9; func _() { switch x := 0; x {} }`, []string{
+ "file:", "func:", "switch:x",
+ }},
+ {`package p10; func _() { switch x := 0; x { case 1: y := x; _ = y; default: }}`, []string{
+ "file:", "func:", "switch:x", "case:y", "case:",
+ }},
+ {`package p11; func _(t interface{}) { switch t.(type) {} }`, []string{
+ "file:", "func:t", "switch:",
+ }},
+ {`package p12; func _(t interface{}) { switch t := t; t.(type) {} }`, []string{
+ "file:", "func:t", "switch:t",
+ }},
+ {`package p13; func _(t interface{}) { switch x := t.(type) { case int: _ = x } }`, []string{
+ "file:", "func:t", "switch:", "case:x", // x implicitly declared
+ }},
+ {`package p14; func _() { select{} }`, []string{
+ "file:", "func:",
+ }},
+ {`package p15; func _(c chan int) { select{ case <-c: } }`, []string{
+ "file:", "func:c", "comm:",
+ }},
+ {`package p16; func _(c chan int) { select{ case i := <-c: x := i; _ = x} }`, []string{
+ "file:", "func:c", "comm:i x",
+ }},
+ {`package p17; func _() { for{} }`, []string{
+ "file:", "func:", "for:", "block:",
+ }},
+ {`package p18; func _(n int) { for i := 0; i < n; i++ { _ = i } }`, []string{
+ "file:", "func:n", "for:i", "block:",
+ }},
+ {`package p19; func _(a []int) { for i := range a { _ = i} }`, []string{
+ "file:", "func:a", "for:i", "block:",
+ }},
+ {`package p20; var s int; func _(a []int) { for i, x := range a { s += x; _ = i } }`, []string{
+ "file:", "func:a", "for:i x", "block:",
+ }},
+ }
+
+ for _, test := range tests {
+ info := Info{Scopes: make(map[syntax.Node]*Scope)}
+ name := mustTypecheck(t, "ScopesInfo", test.src, &info)
+
+ // number of scopes must match
+ if len(info.Scopes) != len(test.scopes) {
+ t.Errorf("package %s: got %d scopes; want %d", name, len(info.Scopes), len(test.scopes))
+ }
+
+ // scope descriptions must match
+ for node, scope := range info.Scopes {
+ var kind string
+ switch node.(type) {
+ case *syntax.File:
+ kind = "file"
+ case *syntax.FuncType:
+ kind = "func"
+ case *syntax.BlockStmt:
+ kind = "block"
+ case *syntax.IfStmt:
+ kind = "if"
+ case *syntax.SwitchStmt:
+ kind = "switch"
+ case *syntax.SelectStmt:
+ kind = "select"
+ case *syntax.CaseClause:
+ kind = "case"
+ case *syntax.CommClause:
+ kind = "comm"
+ case *syntax.ForStmt:
+ kind = "for"
+ default:
+ kind = fmt.Sprintf("%T", node)
+ }
+
+ // look for matching scope description
+ desc := kind + ":" + strings.Join(scope.Names(), " ")
+ found := false
+ for _, d := range test.scopes {
+ if desc == d {
+ found = true
+ break
+ }
+ }
+ if !found {
+ t.Errorf("package %s: no matching scope found for %s", name, desc)
+ }
+ }
+ }
+}
+
+func TestInitOrderInfo(t *testing.T) {
+ var tests = []struct {
+ src string
+ inits []string
+ }{
+ {`package p0; var (x = 1; y = x)`, []string{
+ "x = 1", "y = x",
+ }},
+ {`package p1; var (a = 1; b = 2; c = 3)`, []string{
+ "a = 1", "b = 2", "c = 3",
+ }},
+ {`package p2; var (a, b, c = 1, 2, 3)`, []string{
+ "a = 1", "b = 2", "c = 3",
+ }},
+ {`package p3; var _ = f(); func f() int { return 1 }`, []string{
+ "_ = f()", // blank var
+ }},
+ {`package p4; var (a = 0; x = y; y = z; z = 0)`, []string{
+ "a = 0", "z = 0", "y = z", "x = y",
+ }},
+ {`package p5; var (a, _ = m[0]; m map[int]string)`, []string{
+ "a, _ = m[0]", // blank var
+ }},
+ {`package p6; var a, b = f(); func f() (_, _ int) { return z, z }; var z = 0`, []string{
+ "z = 0", "a, b = f()",
+ }},
+ {`package p7; var (a = func() int { return b }(); b = 1)`, []string{
+ "b = 1", "a = func() int {…}()",
+ }},
+ {`package p8; var (a, b = func() (_, _ int) { return c, c }(); c = 1)`, []string{
+ "c = 1", "a, b = func() (_, _ int) {…}()",
+ }},
+ {`package p9; type T struct{}; func (T) m() int { _ = y; return 0 }; var x, y = T.m, 1`, []string{
+ "y = 1", "x = T.m",
+ }},
+ {`package p10; var (d = c + b; a = 0; b = 0; c = 0)`, []string{
+ "a = 0", "b = 0", "c = 0", "d = c + b",
+ }},
+ {`package p11; var (a = e + c; b = d + c; c = 0; d = 0; e = 0)`, []string{
+ "c = 0", "d = 0", "b = d + c", "e = 0", "a = e + c",
+ }},
+ // emit an initializer for n:1 initializations only once (not for each node
+ // on the lhs which may appear in different order in the dependency graph)
+ {`package p12; var (a = x; b = 0; x, y = m[0]; m map[int]int)`, []string{
+ "b = 0", "x, y = m[0]", "a = x",
+ }},
+ // test case from spec section on package initialization
+ {`package p12
+
+ var (
+ a = c + b
+ b = f()
+ c = f()
+ d = 3
+ )
+
+ func f() int {
+ d++
+ return d
+ }`, []string{
+ "d = 3", "b = f()", "c = f()", "a = c + b",
+ }},
+ // test case for issue 7131
+ {`package main
+
+ var counter int
+ func next() int { counter++; return counter }
+
+ var _ = makeOrder()
+ func makeOrder() []int { return []int{f, b, d, e, c, a} }
+
+ var a = next()
+ var b, c = next(), next()
+ var d, e, f = next(), next(), next()
+ `, []string{
+ "a = next()", "b = next()", "c = next()", "d = next()", "e = next()", "f = next()", "_ = makeOrder()",
+ }},
+ // test case for issue 10709
+ {`package p13
+
+ var (
+ v = t.m()
+ t = makeT(0)
+ )
+
+ type T struct{}
+
+ func (T) m() int { return 0 }
+
+ func makeT(n int) T {
+ if n > 0 {
+ return makeT(n-1)
+ }
+ return T{}
+ }`, []string{
+ "t = makeT(0)", "v = t.m()",
+ }},
+ // test case for issue 10709: same as test before, but variable decls swapped
+ {`package p14
+
+ var (
+ t = makeT(0)
+ v = t.m()
+ )
+
+ type T struct{}
+
+ func (T) m() int { return 0 }
+
+ func makeT(n int) T {
+ if n > 0 {
+ return makeT(n-1)
+ }
+ return T{}
+ }`, []string{
+ "t = makeT(0)", "v = t.m()",
+ }},
+ // another candidate possibly causing problems with issue 10709
+ {`package p15
+
+ var y1 = f1()
+
+ func f1() int { return g1() }
+ func g1() int { f1(); return x1 }
+
+ var x1 = 0
+
+ var y2 = f2()
+
+ func f2() int { return g2() }
+ func g2() int { return x2 }
+
+ var x2 = 0`, []string{
+ "x1 = 0", "y1 = f1()", "x2 = 0", "y2 = f2()",
+ }},
+ }
+
+ for _, test := range tests {
+ info := Info{}
+ name := mustTypecheck(t, "InitOrderInfo", test.src, &info)
+
+ // number of initializers must match
+ if len(info.InitOrder) != len(test.inits) {
+ t.Errorf("package %s: got %d initializers; want %d", name, len(info.InitOrder), len(test.inits))
+ continue
+ }
+
+ // initializers must match
+ for i, want := range test.inits {
+ got := info.InitOrder[i].String()
+ if got != want {
+ t.Errorf("package %s, init %d: got %s; want %s", name, i, got, want)
+ continue
+ }
+ }
+ }
+}
+
+func TestMultiFileInitOrder(t *testing.T) {
+ mustParse := func(src string) *syntax.File {
+ f, err := parseSrc("main", src)
+ if err != nil {
+ t.Fatal(err)
+ }
+ return f
+ }
+
+ fileA := mustParse(`package main; var a = 1`)
+ fileB := mustParse(`package main; var b = 2`)
+
+ // The initialization order must not depend on the parse
+ // order of the files, only on the presentation order to
+ // the type-checker.
+ for _, test := range []struct {
+ files []*syntax.File
+ want string
+ }{
+ {[]*syntax.File{fileA, fileB}, "[a = 1 b = 2]"},
+ {[]*syntax.File{fileB, fileA}, "[b = 2 a = 1]"},
+ } {
+ var info Info
+ if _, err := new(Config).Check("main", test.files, &info); err != nil {
+ t.Fatal(err)
+ }
+ if got := fmt.Sprint(info.InitOrder); got != test.want {
+ t.Fatalf("got %s; want %s", got, test.want)
+ }
+ }
+}
+
+func TestFiles(t *testing.T) {
+ var sources = []string{
+ "package p; type T struct{}; func (T) m1() {}",
+ "package p; func (T) m2() {}; var x interface{ m1(); m2() } = T{}",
+ "package p; func (T) m3() {}; var y interface{ m1(); m2(); m3() } = T{}",
+ "package p",
+ }
+
+ var conf Config
+ pkg := NewPackage("p", "p")
+ var info Info
+ check := NewChecker(&conf, pkg, &info)
+
+ for i, src := range sources {
+ filename := fmt.Sprintf("sources%d", i)
+ f, err := parseSrc(filename, src)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if err := check.Files([]*syntax.File{f}); err != nil {
+ t.Error(err)
+ }
+ }
+
+ // check InitOrder is [x y]
+ var vars []string
+ for _, init := range info.InitOrder {
+ for _, v := range init.Lhs {
+ vars = append(vars, v.Name())
+ }
+ }
+ if got, want := fmt.Sprint(vars), "[x y]"; got != want {
+ t.Errorf("InitOrder == %s, want %s", got, want)
+ }
+}
+
+type testImporter map[string]*Package
+
+func (m testImporter) Import(path string) (*Package, error) {
+ if pkg := m[path]; pkg != nil {
+ return pkg, nil
+ }
+ return nil, fmt.Errorf("package %q not found", path)
+}
+
+func TestSelection(t *testing.T) {
+ selections := make(map[*syntax.SelectorExpr]*Selection)
+
+ imports := make(testImporter)
+ conf := Config{Importer: imports}
+ makePkg := func(path, src string) {
+ f, err := parseSrc(path+".go", src)
+ if err != nil {
+ t.Fatal(err)
+ }
+ pkg, err := conf.Check(path, []*syntax.File{f}, &Info{Selections: selections})
+ if err != nil {
+ t.Fatal(err)
+ }
+ imports[path] = pkg
+ }
+
+ const libSrc = `
+package lib
+type T float64
+const C T = 3
+var V T
+func F() {}
+func (T) M() {}
+`
+ const mainSrc = `
+package main
+import "lib"
+
+type A struct {
+ *B
+ C
+}
+
+type B struct {
+ b int
+}
+
+func (B) f(int)
+
+type C struct {
+ c int
+}
+
+func (C) g()
+func (*C) h()
+
+func main() {
+ // qualified identifiers
+ var _ lib.T
+ _ = lib.C
+ _ = lib.F
+ _ = lib.V
+ _ = lib.T.M
+
+ // fields
+ _ = A{}.B
+ _ = new(A).B
+
+ _ = A{}.C
+ _ = new(A).C
+
+ _ = A{}.b
+ _ = new(A).b
+
+ _ = A{}.c
+ _ = new(A).c
+
+ // methods
+ _ = A{}.f
+ _ = new(A).f
+ _ = A{}.g
+ _ = new(A).g
+ _ = new(A).h
+
+ _ = B{}.f
+ _ = new(B).f
+
+ _ = C{}.g
+ _ = new(C).g
+ _ = new(C).h
+
+ // method expressions
+ _ = A.f
+ _ = (*A).f
+ _ = B.f
+ _ = (*B).f
+}`
+
+ wantOut := map[string][2]string{
+ "lib.T.M": {"method expr (lib.T) M(lib.T)", ".[0]"},
+
+ "A{}.B": {"field (main.A) B *main.B", ".[0]"},
+ "new(A).B": {"field (*main.A) B *main.B", "->[0]"},
+ "A{}.C": {"field (main.A) C main.C", ".[1]"},
+ "new(A).C": {"field (*main.A) C main.C", "->[1]"},
+ "A{}.b": {"field (main.A) b int", "->[0 0]"},
+ "new(A).b": {"field (*main.A) b int", "->[0 0]"},
+ "A{}.c": {"field (main.A) c int", ".[1 0]"},
+ "new(A).c": {"field (*main.A) c int", "->[1 0]"},
+
+ "A{}.f": {"method (main.A) f(int)", "->[0 0]"},
+ "new(A).f": {"method (*main.A) f(int)", "->[0 0]"},
+ "A{}.g": {"method (main.A) g()", ".[1 0]"},
+ "new(A).g": {"method (*main.A) g()", "->[1 0]"},
+ "new(A).h": {"method (*main.A) h()", "->[1 1]"}, // TODO(gri) should this report .[1 1] ?
+ "B{}.f": {"method (main.B) f(int)", ".[0]"},
+ "new(B).f": {"method (*main.B) f(int)", "->[0]"},
+ "C{}.g": {"method (main.C) g()", ".[0]"},
+ "new(C).g": {"method (*main.C) g()", "->[0]"},
+ "new(C).h": {"method (*main.C) h()", "->[1]"}, // TODO(gri) should this report .[1] ?
+
+ "A.f": {"method expr (main.A) f(main.A, int)", "->[0 0]"},
+ "(*A).f": {"method expr (*main.A) f(*main.A, int)", "->[0 0]"},
+ "B.f": {"method expr (main.B) f(main.B, int)", ".[0]"},
+ "(*B).f": {"method expr (*main.B) f(*main.B, int)", "->[0]"},
+ }
+
+ makePkg("lib", libSrc)
+ makePkg("main", mainSrc)
+
+ for e, sel := range selections {
+ _ = sel.String() // assertion: must not panic
+
+ start := indexFor(mainSrc, syntax.StartPos(e))
+ end := indexFor(mainSrc, syntax.EndPos(e))
+ segment := mainSrc[start:end] // (all SelectorExprs are in main, not lib)
+
+ direct := "."
+ if sel.Indirect() {
+ direct = "->"
+ }
+ got := [2]string{
+ sel.String(),
+ fmt.Sprintf("%s%v", direct, sel.Index()),
+ }
+ want := wantOut[segment]
+ if want != got {
+ t.Errorf("%s: got %q; want %q", segment, got, want)
+ }
+ delete(wantOut, segment)
+
+ // We must explicitly assert properties of the
+ // Signature's receiver since it doesn't participate
+ // in Identical() or String().
+ sig, _ := sel.Type().(*Signature)
+ if sel.Kind() == MethodVal {
+ got := sig.Recv().Type()
+ want := sel.Recv()
+ if !Identical(got, want) {
+ t.Errorf("%s: Recv() = %s, want %s", segment, got, want)
+ }
+ } else if sig != nil && sig.Recv() != nil {
+ t.Errorf("%s: signature has receiver %s", sig, sig.Recv().Type())
+ }
+ }
+ // Assert that all wantOut entries were used exactly once.
+ for segment := range wantOut {
+ t.Errorf("no syntax.Selection found with syntax %q", segment)
+ }
+}
+
+// indexFor returns the index into s corresponding to the position pos.
+func indexFor(s string, pos syntax.Pos) int {
+ i, line := 0, 1 // string index and corresponding line
+ target := int(pos.Line())
+ for line < target && i < len(s) {
+ if s[i] == '\n' {
+ line++
+ }
+ i++
+ }
+ return i + int(pos.Col()-1) // columns are 1-based
+}
+
+func TestIssue8518(t *testing.T) {
+ imports := make(testImporter)
+ conf := Config{
+ Error: func(err error) { t.Log(err) }, // don't exit after first error
+ Importer: imports,
+ }
+ makePkg := func(path, src string) {
+ f, err := parseSrc(path, src)
+ if err != nil {
+ t.Fatal(err)
+ }
+ pkg, _ := conf.Check(path, []*syntax.File{f}, nil) // errors logged via conf.Error
+ imports[path] = pkg
+ }
+
+ const libSrc = `
+package a
+import "missing"
+const C1 = foo
+const C2 = missing.C
+`
+
+ const mainSrc = `
+package main
+import "a"
+var _ = a.C1
+var _ = a.C2
+`
+
+ makePkg("a", libSrc)
+ makePkg("main", mainSrc) // don't crash when type-checking this package
+}
+
+func TestLookupFieldOrMethodOnNil(t *testing.T) {
+ // LookupFieldOrMethod on a nil type is expected to produce a run-time panic.
+ defer func() {
+ const want = "LookupFieldOrMethod on nil type"
+ p := recover()
+ if s, ok := p.(string); !ok || s != want {
+ t.Fatalf("got %v, want %s", p, want)
+ }
+ }()
+ LookupFieldOrMethod(nil, false, nil, "")
+}
+
+func TestLookupFieldOrMethod(t *testing.T) {
+ // Test cases assume a lookup of the form a.f or x.f, where a stands for an
+ // addressable value, and x for a non-addressable value (even though a variable
+ // for ease of test case writing).
+ var tests = []struct {
+ src string
+ found bool
+ index []int
+ indirect bool
+ }{
+ // field lookups
+ {"var x T; type T struct{}", false, nil, false},
+ {"var x T; type T struct{ f int }", true, []int{0}, false},
+ {"var x T; type T struct{ a, b, f, c int }", true, []int{2}, false},
+
+ // field lookups on a generic type
+ {"var x T[int]; type T[P any] struct{}", false, nil, false},
+ {"var x T[int]; type T[P any] struct{ f P }", true, []int{0}, false},
+ {"var x T[int]; type T[P any] struct{ a, b, f, c P }", true, []int{2}, false},
+
+ // method lookups
+ {"var a T; type T struct{}; func (T) f() {}", true, []int{0}, false},
+ {"var a *T; type T struct{}; func (T) f() {}", true, []int{0}, true},
+ {"var a T; type T struct{}; func (*T) f() {}", true, []int{0}, false},
+ {"var a *T; type T struct{}; func (*T) f() {}", true, []int{0}, true}, // TODO(gri) should this report indirect = false?
+
+ // method lookups on a generic type
+ {"var a T[int]; type T[P any] struct{}; func (T[P]) f() {}", true, []int{0}, false},
+ {"var a *T[int]; type T[P any] struct{}; func (T[P]) f() {}", true, []int{0}, true},
+ {"var a T[int]; type T[P any] struct{}; func (*T[P]) f() {}", true, []int{0}, false},
+ {"var a *T[int]; type T[P any] struct{}; func (*T[P]) f() {}", true, []int{0}, true}, // TODO(gri) should this report indirect = false?
+
+ // collisions
+ {"type ( E1 struct{ f int }; E2 struct{ f int }; x struct{ E1; *E2 })", false, []int{1, 0}, false},
+ {"type ( E1 struct{ f int }; E2 struct{}; x struct{ E1; *E2 }); func (E2) f() {}", false, []int{1, 0}, false},
+
+ // collisions on a generic type
+ {"type ( E1[P any] struct{ f P }; E2[P any] struct{ f P }; x struct{ E1[int]; *E2[int] })", false, []int{1, 0}, false},
+ {"type ( E1[P any] struct{ f P }; E2[P any] struct{}; x struct{ E1[int]; *E2[int] }); func (E2[P]) f() {}", false, []int{1, 0}, false},
+
+ // outside methodset
+ // (*T).f method exists, but value of type T is not addressable
+ {"var x T; type T struct{}; func (*T) f() {}", false, nil, true},
+
+ // outside method set of a generic type
+ {"var x T[int]; type T[P any] struct{}; func (*T[P]) f() {}", false, nil, true},
+
+ // recursive generic types; see golang/go#52715
+ {"var a T[int]; type ( T[P any] struct { *N[P] }; N[P any] struct { *T[P] } ); func (N[P]) f() {}", true, []int{0, 0}, true},
+ {"var a T[int]; type ( T[P any] struct { *N[P] }; N[P any] struct { *T[P] } ); func (T[P]) f() {}", true, []int{0}, false},
+ }
+
+ for _, test := range tests {
+ pkg, err := pkgFor("test", "package p;"+test.src, nil)
+ if err != nil {
+ t.Errorf("%s: incorrect test case: %s", test.src, err)
+ continue
+ }
+
+ obj := pkg.Scope().Lookup("a")
+ if obj == nil {
+ if obj = pkg.Scope().Lookup("x"); obj == nil {
+ t.Errorf("%s: incorrect test case - no object a or x", test.src)
+ continue
+ }
+ }
+
+ f, index, indirect := LookupFieldOrMethod(obj.Type(), obj.Name() == "a", pkg, "f")
+ if (f != nil) != test.found {
+ if f == nil {
+ t.Errorf("%s: got no object; want one", test.src)
+ } else {
+ t.Errorf("%s: got object = %v; want none", test.src, f)
+ }
+ }
+ if !sameSlice(index, test.index) {
+ t.Errorf("%s: got index = %v; want %v", test.src, index, test.index)
+ }
+ if indirect != test.indirect {
+ t.Errorf("%s: got indirect = %v; want %v", test.src, indirect, test.indirect)
+ }
+ }
+}
+
+// Test for golang/go#52715
+func TestLookupFieldOrMethod_RecursiveGeneric(t *testing.T) {
+ const src = `
+package pkg
+
+type Tree[T any] struct {
+ *Node[T]
+}
+
+func (*Tree[R]) N(r R) R { return r }
+
+type Node[T any] struct {
+ *Tree[T]
+}
+
+type Instance = *Tree[int]
+`
+
+ f, err := parseSrc("foo.go", src)
+ if err != nil {
+ panic(err)
+ }
+ pkg := NewPackage("pkg", f.PkgName.Value)
+ if err := NewChecker(nil, pkg, nil).Files([]*syntax.File{f}); err != nil {
+ panic(err)
+ }
+
+ T := pkg.Scope().Lookup("Instance").Type()
+ _, _, _ = LookupFieldOrMethod(T, false, pkg, "M") // verify that LookupFieldOrMethod terminates
+}
+
+func sameSlice(a, b []int) bool {
+ if len(a) != len(b) {
+ return false
+ }
+ for i, x := range a {
+ if x != b[i] {
+ return false
+ }
+ }
+ return true
+}
+
+// TestScopeLookupParent ensures that (*Scope).LookupParent returns
+// the correct result at various positions within the source.
+func TestScopeLookupParent(t *testing.T) {
+ imports := make(testImporter)
+ conf := Config{Importer: imports}
+ var info Info
+ makePkg := func(path, src string) {
+ f, err := parseSrc(path, src)
+ if err != nil {
+ t.Fatal(err)
+ }
+ imports[path], err = conf.Check(path, []*syntax.File{f}, &info)
+ if err != nil {
+ t.Fatal(err)
+ }
+ }
+
+ makePkg("lib", "package lib; var X int")
+ // Each /*name=kind:line*/ comment makes the test look up the
+ // name at that point and checks that it resolves to a decl of
+ // the specified kind and line number. "undef" means undefined.
+ mainSrc := `
+/*lib=pkgname:5*/ /*X=var:1*/ /*Pi=const:8*/ /*T=typename:9*/ /*Y=var:10*/ /*F=func:12*/
+package main
+
+import "lib"
+import . "lib"
+
+const Pi = 3.1415
+type T struct{}
+var Y, _ = lib.X, X
+
+func F(){
+ const pi, e = 3.1415, /*pi=undef*/ 2.71828 /*pi=const:13*/ /*e=const:13*/
+ type /*t=undef*/ t /*t=typename:14*/ *t
+ print(Y) /*Y=var:10*/
+ x, Y := Y, /*x=undef*/ /*Y=var:10*/ Pi /*x=var:16*/ /*Y=var:16*/ ; _ = x; _ = Y
+ var F = /*F=func:12*/ F /*F=var:17*/ ; _ = F
+
+ var a []int
+ for i, x := range a /*i=undef*/ /*x=var:16*/ { _ = i; _ = x }
+
+ var i interface{}
+ switch y := i.(type) { /*y=undef*/
+ case /*y=undef*/ int /*y=var:23*/ :
+ case float32, /*y=undef*/ float64 /*y=var:23*/ :
+ default /*y=var:23*/:
+ println(y)
+ }
+ /*y=undef*/
+
+ switch int := i.(type) {
+ case /*int=typename:0*/ int /*int=var:31*/ :
+ println(int)
+ default /*int=var:31*/ :
+ }
+}
+/*main=undef*/
+`
+
+ info.Uses = make(map[*syntax.Name]Object)
+ makePkg("main", mainSrc)
+ mainScope := imports["main"].Scope()
+
+ rx := regexp.MustCompile(`^/\*(\w*)=([\w:]*)\*/$`)
+
+ base := syntax.NewFileBase("main")
+ syntax.CommentsDo(strings.NewReader(mainSrc), func(line, col uint, text string) {
+ pos := syntax.MakePos(base, line, col)
+
+ // Syntax errors are not comments.
+ if text[0] != '/' {
+ t.Errorf("%s: %s", pos, text)
+ return
+ }
+
+ // Parse the assertion in the comment.
+ m := rx.FindStringSubmatch(text)
+ if m == nil {
+ t.Errorf("%s: bad comment: %s", pos, text)
+ return
+ }
+ name, want := m[1], m[2]
+
+ // Look up the name in the innermost enclosing scope.
+ inner := mainScope.Innermost(pos)
+ if inner == nil {
+ t.Errorf("%s: at %s: can't find innermost scope", pos, text)
+ return
+ }
+ got := "undef"
+ if _, obj := inner.LookupParent(name, pos); obj != nil {
+ kind := strings.ToLower(strings.TrimPrefix(reflect.TypeOf(obj).String(), "*types2."))
+ got = fmt.Sprintf("%s:%d", kind, obj.Pos().Line())
+ }
+ if got != want {
+ t.Errorf("%s: at %s: %s resolved to %s, want %s", pos, text, name, got, want)
+ }
+ })
+
+ // Check that for each referring identifier,
+ // a lookup of its name on the innermost
+ // enclosing scope returns the correct object.
+
+ for id, wantObj := range info.Uses {
+ inner := mainScope.Innermost(id.Pos())
+ if inner == nil {
+ t.Errorf("%s: can't find innermost scope enclosing %q", id.Pos(), id.Value)
+ continue
+ }
+
+ // Exclude selectors and qualified identifiers---lexical
+ // refs only. (Ideally, we'd see if the AST parent is a
+ // SelectorExpr, but that requires PathEnclosingInterval
+ // from golang.org/x/tools/go/ast/astutil.)
+ if id.Value == "X" {
+ continue
+ }
+
+ _, gotObj := inner.LookupParent(id.Value, id.Pos())
+ if gotObj != wantObj {
+ t.Errorf("%s: got %v, want %v", id.Pos(), gotObj, wantObj)
+ continue
+ }
+ }
+}
+
+var nopos syntax.Pos
+
+// newDefined creates a new defined type named T with the given underlying type.
+func newDefined(underlying Type) *Named {
+ tname := NewTypeName(nopos, nil, "T", nil)
+ return NewNamed(tname, underlying, nil)
+}
+
+func TestConvertibleTo(t *testing.T) {
+ for _, test := range []struct {
+ v, t Type
+ want bool
+ }{
+ {Typ[Int], Typ[Int], true},
+ {Typ[Int], Typ[Float32], true},
+ {Typ[Int], Typ[String], true},
+ {newDefined(Typ[Int]), Typ[Int], true},
+ {newDefined(new(Struct)), new(Struct), true},
+ {newDefined(Typ[Int]), new(Struct), false},
+ {Typ[UntypedInt], Typ[Int], true},
+ {NewSlice(Typ[Int]), NewPointer(NewArray(Typ[Int], 10)), true},
+ {NewSlice(Typ[Int]), NewArray(Typ[Int], 10), false},
+ {NewSlice(Typ[Int]), NewPointer(NewArray(Typ[Uint], 10)), false},
+ // Untyped string values are not permitted by the spec, so the behavior below is undefined.
+ {Typ[UntypedString], Typ[String], true},
+ } {
+ if got := ConvertibleTo(test.v, test.t); got != test.want {
+ t.Errorf("ConvertibleTo(%v, %v) = %t, want %t", test.v, test.t, got, test.want)
+ }
+ }
+}
+
+func TestAssignableTo(t *testing.T) {
+ for _, test := range []struct {
+ v, t Type
+ want bool
+ }{
+ {Typ[Int], Typ[Int], true},
+ {Typ[Int], Typ[Float32], false},
+ {newDefined(Typ[Int]), Typ[Int], false},
+ {newDefined(new(Struct)), new(Struct), true},
+ {Typ[UntypedBool], Typ[Bool], true},
+ {Typ[UntypedString], Typ[Bool], false},
+ // Neither untyped string nor untyped numeric assignments arise during
+ // normal type checking, so the below behavior is technically undefined by
+ // the spec.
+ {Typ[UntypedString], Typ[String], true},
+ {Typ[UntypedInt], Typ[Int], true},
+ } {
+ if got := AssignableTo(test.v, test.t); got != test.want {
+ t.Errorf("AssignableTo(%v, %v) = %t, want %t", test.v, test.t, got, test.want)
+ }
+ }
+}
+
+func TestIdentical(t *testing.T) {
+ // For each test, we compare the types of objects X and Y in the source.
+ tests := []struct {
+ src string
+ want bool
+ }{
+ // Basic types.
+ {"var X int; var Y int", true},
+ {"var X int; var Y string", false},
+
+ // TODO: add more tests for complex types.
+
+ // Named types.
+ {"type X int; type Y int", false},
+
+ // Aliases.
+ {"type X = int; type Y = int", true},
+
+ // Functions.
+ {`func X(int) string { return "" }; func Y(int) string { return "" }`, true},
+ {`func X() string { return "" }; func Y(int) string { return "" }`, false},
+ {`func X(int) string { return "" }; func Y(int) {}`, false},
+
+ // Generic functions. Type parameters should be considered identical modulo
+ // renaming. See also issue #49722.
+ {`func X[P ~int](){}; func Y[Q ~int]() {}`, true},
+ {`func X[P1 any, P2 ~*P1](){}; func Y[Q1 any, Q2 ~*Q1]() {}`, true},
+ {`func X[P1 any, P2 ~[]P1](){}; func Y[Q1 any, Q2 ~*Q1]() {}`, false},
+ {`func X[P ~int](P){}; func Y[Q ~int](Q) {}`, true},
+ {`func X[P ~string](P){}; func Y[Q ~int](Q) {}`, false},
+ {`func X[P ~int]([]P){}; func Y[Q ~int]([]Q) {}`, true},
+ }
+
+ for _, test := range tests {
+ pkg, err := pkgFor("test", "package p;"+test.src, nil)
+ if err != nil {
+ t.Errorf("%s: incorrect test case: %s", test.src, err)
+ continue
+ }
+ X := pkg.Scope().Lookup("X")
+ Y := pkg.Scope().Lookup("Y")
+ if X == nil || Y == nil {
+ t.Fatal("test must declare both X and Y")
+ }
+ if got := Identical(X.Type(), Y.Type()); got != test.want {
+ t.Errorf("Identical(%s, %s) = %t, want %t", X.Type(), Y.Type(), got, test.want)
+ }
+ }
+}
+
+func TestIdentical_issue15173(t *testing.T) {
+ // Identical should allow nil arguments and be symmetric.
+ for _, test := range []struct {
+ x, y Type
+ want bool
+ }{
+ {Typ[Int], Typ[Int], true},
+ {Typ[Int], nil, false},
+ {nil, Typ[Int], false},
+ {nil, nil, true},
+ } {
+ if got := Identical(test.x, test.y); got != test.want {
+ t.Errorf("Identical(%v, %v) = %t", test.x, test.y, got)
+ }
+ }
+}
+
+func TestIdenticalUnions(t *testing.T) {
+ tname := NewTypeName(nopos, nil, "myInt", nil)
+ myInt := NewNamed(tname, Typ[Int], nil)
+ tmap := map[string]*Term{
+ "int": NewTerm(false, Typ[Int]),
+ "~int": NewTerm(true, Typ[Int]),
+ "string": NewTerm(false, Typ[String]),
+ "~string": NewTerm(true, Typ[String]),
+ "myInt": NewTerm(false, myInt),
+ }
+ makeUnion := func(s string) *Union {
+ parts := strings.Split(s, "|")
+ var terms []*Term
+ for _, p := range parts {
+ term := tmap[p]
+ if term == nil {
+ t.Fatalf("missing term %q", p)
+ }
+ terms = append(terms, term)
+ }
+ return NewUnion(terms)
+ }
+ for _, test := range []struct {
+ x, y string
+ want bool
+ }{
+ // These tests are just sanity checks. The tests for type sets and
+ // interfaces provide much more test coverage.
+ {"int|~int", "~int", true},
+ {"myInt|~int", "~int", true},
+ {"int|string", "string|int", true},
+ {"int|int|string", "string|int", true},
+ {"myInt|string", "int|string", false},
+ } {
+ x := makeUnion(test.x)
+ y := makeUnion(test.y)
+ if got := Identical(x, y); got != test.want {
+ t.Errorf("Identical(%v, %v) = %t", test.x, test.y, got)
+ }
+ }
+}
+
+func TestIssue15305(t *testing.T) {
+ const src = "package p; func f() int16; var _ = f(undef)"
+ f, err := parseSrc("issue15305.go", src)
+ if err != nil {
+ t.Fatal(err)
+ }
+ conf := Config{
+ Error: func(err error) {}, // allow errors
+ }
+ info := &Info{
+ Types: make(map[syntax.Expr]TypeAndValue),
+ }
+ conf.Check("p", []*syntax.File{f}, info) // ignore result
+ for e, tv := range info.Types {
+ if _, ok := e.(*syntax.CallExpr); ok {
+ if tv.Type != Typ[Int16] {
+ t.Errorf("CallExpr has type %v, want int16", tv.Type)
+ }
+ return
+ }
+ }
+ t.Errorf("CallExpr has no type")
+}
+
+// TestCompositeLitTypes verifies that Info.Types registers the correct
+// types for composite literal expressions and composite literal type
+// expressions.
+func TestCompositeLitTypes(t *testing.T) {
+ for _, test := range []struct {
+ lit, typ string
+ }{
+ {`[16]byte{}`, `[16]byte`},
+ {`[...]byte{}`, `[0]byte`}, // test for issue #14092
+ {`[...]int{1, 2, 3}`, `[3]int`}, // test for issue #14092
+ {`[...]int{90: 0, 98: 1, 2}`, `[100]int`}, // test for issue #14092
+ {`[]int{}`, `[]int`},
+ {`map[string]bool{"foo": true}`, `map[string]bool`},
+ {`struct{}{}`, `struct{}`},
+ {`struct{x, y int; z complex128}{}`, `struct{x int; y int; z complex128}`},
+ } {
+ f, err := parseSrc(test.lit, "package p; var _ = "+test.lit)
+ if err != nil {
+ t.Fatalf("%s: %v", test.lit, err)
+ }
+
+ info := &Info{
+ Types: make(map[syntax.Expr]TypeAndValue),
+ }
+ if _, err = new(Config).Check("p", []*syntax.File{f}, info); err != nil {
+ t.Fatalf("%s: %v", test.lit, err)
+ }
+
+ cmptype := func(x syntax.Expr, want string) {
+ tv, ok := info.Types[x]
+ if !ok {
+ t.Errorf("%s: no Types entry found", test.lit)
+ return
+ }
+ if tv.Type == nil {
+ t.Errorf("%s: type is nil", test.lit)
+ return
+ }
+ if got := tv.Type.String(); got != want {
+ t.Errorf("%s: got %v, want %s", test.lit, got, want)
+ }
+ }
+
+ // test type of composite literal expression
+ rhs := f.DeclList[0].(*syntax.VarDecl).Values
+ cmptype(rhs, test.typ)
+
+ // test type of composite literal type expression
+ cmptype(rhs.(*syntax.CompositeLit).Type, test.typ)
+ }
+}
+
+// TestObjectParents verifies that objects have parent scopes or not
+// as specified by the Object interface.
+func TestObjectParents(t *testing.T) {
+ const src = `
+package p
+
+const C = 0
+
+type T1 struct {
+ a, b int
+ T2
+}
+
+type T2 interface {
+ im1()
+ im2()
+}
+
+func (T1) m1() {}
+func (*T1) m2() {}
+
+func f(x int) { y := x; print(y) }
+`
+
+ f, err := parseSrc("src", src)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ info := &Info{
+ Defs: make(map[*syntax.Name]Object),
+ }
+ if _, err = new(Config).Check("p", []*syntax.File{f}, info); err != nil {
+ t.Fatal(err)
+ }
+
+ for ident, obj := range info.Defs {
+ if obj == nil {
+ // only package names and implicit vars have a nil object
+ // (in this test we only need to handle the package name)
+ if ident.Value != "p" {
+ t.Errorf("%v has nil object", ident)
+ }
+ continue
+ }
+
+ // struct fields, type-associated and interface methods
+ // have no parent scope
+ wantParent := true
+ switch obj := obj.(type) {
+ case *Var:
+ if obj.IsField() {
+ wantParent = false
+ }
+ case *Func:
+ if obj.Type().(*Signature).Recv() != nil { // method
+ wantParent = false
+ }
+ }
+
+ gotParent := obj.Parent() != nil
+ switch {
+ case gotParent && !wantParent:
+ t.Errorf("%v: want no parent, got %s", ident, obj.Parent())
+ case !gotParent && wantParent:
+ t.Errorf("%v: no parent found", ident)
+ }
+ }
+}
+
+// TestFailedImport tests that we don't get follow-on errors
+// elsewhere in a package due to failing to import a package.
+func TestFailedImport(t *testing.T) {
+ testenv.MustHaveGoBuild(t)
+
+ const src = `
+package p
+
+import foo "go/types/thisdirectorymustnotexistotherwisethistestmayfail/foo" // should only see an error here
+
+const c = foo.C
+type T = foo.T
+var v T = c
+func f(x T) T { return foo.F(x) }
+`
+ f, err := parseSrc("src", src)
+ if err != nil {
+ t.Fatal(err)
+ }
+ files := []*syntax.File{f}
+
+ // type-check using all possible importers
+ for _, compiler := range []string{"gc", "gccgo", "source"} {
+ errcount := 0
+ conf := Config{
+ Error: func(err error) {
+ // we should only see the import error
+ if errcount > 0 || !strings.Contains(err.Error(), "could not import") {
+ t.Errorf("for %s importer, got unexpected error: %v", compiler, err)
+ }
+ errcount++
+ },
+ //Importer: importer.For(compiler, nil),
+ }
+
+ info := &Info{
+ Uses: make(map[*syntax.Name]Object),
+ }
+ pkg, _ := conf.Check("p", files, info)
+ if pkg == nil {
+ t.Errorf("for %s importer, type-checking failed to return a package", compiler)
+ continue
+ }
+
+ imports := pkg.Imports()
+ if len(imports) != 1 {
+ t.Errorf("for %s importer, got %d imports, want 1", compiler, len(imports))
+ continue
+ }
+ imp := imports[0]
+ if imp.Name() != "foo" {
+ t.Errorf(`for %s importer, got %q, want "foo"`, compiler, imp.Name())
+ continue
+ }
+
+ // verify that all uses of foo refer to the imported package foo (imp)
+ for ident, obj := range info.Uses {
+ if ident.Value == "foo" {
+ if obj, ok := obj.(*PkgName); ok {
+ if obj.Imported() != imp {
+ t.Errorf("%s resolved to %v; want %v", ident.Value, obj.Imported(), imp)
+ }
+ } else {
+ t.Errorf("%s resolved to %v; want package name", ident.Value, obj)
+ }
+ }
+ }
+ }
+}
+
+func TestInstantiate(t *testing.T) {
+ // eventually we like more tests but this is a start
+ const src = "package p; type T[P any] *T[P]"
+ pkg, err := pkgFor(".", src, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // type T should have one type parameter
+ T := pkg.Scope().Lookup("T").Type().(*Named)
+ if n := T.TypeParams().Len(); n != 1 {
+ t.Fatalf("expected 1 type parameter; found %d", n)
+ }
+
+ // instantiation should succeed (no endless recursion)
+ // even with a nil *Checker
+ res, err := Instantiate(nil, T, []Type{Typ[Int]}, false)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // instantiated type should point to itself
+ if p := res.Underlying().(*Pointer).Elem(); p != res {
+ t.Fatalf("unexpected result type: %s points to %s", res, p)
+ }
+}
+
+func TestInstantiateErrors(t *testing.T) {
+ tests := []struct {
+ src string // by convention, T must be the type being instantiated
+ targs []Type
+ wantAt int // -1 indicates no error
+ }{
+ {"type T[P interface{~string}] int", []Type{Typ[Int]}, 0},
+ {"type T[P1 interface{int}, P2 interface{~string}] int", []Type{Typ[Int], Typ[Int]}, 1},
+ {"type T[P1 any, P2 interface{~[]P1}] int", []Type{Typ[Int], NewSlice(Typ[String])}, 1},
+ {"type T[P1 interface{~[]P2}, P2 any] int", []Type{NewSlice(Typ[String]), Typ[Int]}, 0},
+ }
+
+ for _, test := range tests {
+ src := "package p; " + test.src
+ pkg, err := pkgFor(".", src, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ T := pkg.Scope().Lookup("T").Type().(*Named)
+
+ _, err = Instantiate(nil, T, test.targs, true)
+ if err == nil {
+ t.Fatalf("Instantiate(%v, %v) returned nil error, want non-nil", T, test.targs)
+ }
+
+ var argErr *ArgumentError
+ if !errors.As(err, &argErr) {
+ t.Fatalf("Instantiate(%v, %v): error is not an *ArgumentError", T, test.targs)
+ }
+
+ if argErr.Index != test.wantAt {
+ t.Errorf("Instantate(%v, %v): error at index %d, want index %d", T, test.targs, argErr.Index, test.wantAt)
+ }
+ }
+}
+
+func TestArgumentErrorUnwrapping(t *testing.T) {
+ var err error = &ArgumentError{
+ Index: 1,
+ Err: Error{Msg: "test"},
+ }
+ var e Error
+ if !errors.As(err, &e) {
+ t.Fatalf("error %v does not wrap types.Error", err)
+ }
+ if e.Msg != "test" {
+ t.Errorf("e.Msg = %q, want %q", e.Msg, "test")
+ }
+}
+
+func TestInstanceIdentity(t *testing.T) {
+ imports := make(testImporter)
+ conf := Config{Importer: imports}
+ makePkg := func(src string) {
+ f, err := parseSrc("", src)
+ if err != nil {
+ t.Fatal(err)
+ }
+ name := f.PkgName.Value
+ pkg, err := conf.Check(name, []*syntax.File{f}, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ imports[name] = pkg
+ }
+ makePkg(`package lib; type T[P any] struct{}`)
+ makePkg(`package a; import "lib"; var A lib.T[int]`)
+ makePkg(`package b; import "lib"; var B lib.T[int]`)
+ a := imports["a"].Scope().Lookup("A")
+ b := imports["b"].Scope().Lookup("B")
+ if !Identical(a.Type(), b.Type()) {
+ t.Errorf("mismatching types: a.A: %s, b.B: %s", a.Type(), b.Type())
+ }
+}
+
+// TestInstantiatedObjects verifies properties of instantiated objects.
+func TestInstantiatedObjects(t *testing.T) {
+ const src = `
+package p
+
+type T[P any] struct {
+ field P
+}
+
+func (recv *T[Q]) concreteMethod() {}
+
+type FT[P any] func(ftp P) (ftrp P)
+
+func F[P any](fp P) (frp P){ return }
+
+type I[P any] interface {
+ interfaceMethod(P)
+}
+
+var (
+ t T[int]
+ ft FT[int]
+ f = F[int]
+ i I[int]
+)
+`
+ info := &Info{
+ Defs: make(map[*syntax.Name]Object),
+ }
+ f, err := parseSrc("p.go", src)
+ if err != nil {
+ t.Fatal(err)
+ }
+ conf := Config{}
+ pkg, err := conf.Check(f.PkgName.Value, []*syntax.File{f}, info)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ lookup := func(name string) Type { return pkg.Scope().Lookup(name).Type() }
+ tests := []struct {
+ ident string
+ obj Object
+ }{
+ {"field", lookup("t").Underlying().(*Struct).Field(0)},
+ {"concreteMethod", lookup("t").(*Named).Method(0)},
+ {"recv", lookup("t").(*Named).Method(0).Type().(*Signature).Recv()},
+ {"ftp", lookup("ft").Underlying().(*Signature).Params().At(0)},
+ {"ftrp", lookup("ft").Underlying().(*Signature).Results().At(0)},
+ {"fp", lookup("f").(*Signature).Params().At(0)},
+ {"frp", lookup("f").(*Signature).Results().At(0)},
+ {"interfaceMethod", lookup("i").Underlying().(*Interface).Method(0)},
+ }
+
+ // Collect all identifiers by name.
+ idents := make(map[string][]*syntax.Name)
+ syntax.Inspect(f, func(n syntax.Node) bool {
+ if id, ok := n.(*syntax.Name); ok {
+ idents[id.Value] = append(idents[id.Value], id)
+ }
+ return true
+ })
+
+ for _, test := range tests {
+ test := test
+ t.Run(test.ident, func(t *testing.T) {
+ if got := len(idents[test.ident]); got != 1 {
+ t.Fatalf("found %d identifiers named %s, want 1", got, test.ident)
+ }
+ ident := idents[test.ident][0]
+ def := info.Defs[ident]
+ if def == test.obj {
+ t.Fatalf("info.Defs[%s] contains the test object", test.ident)
+ }
+ if def.Pkg() != test.obj.Pkg() {
+ t.Errorf("Pkg() = %v, want %v", def.Pkg(), test.obj.Pkg())
+ }
+ if def.Name() != test.obj.Name() {
+ t.Errorf("Name() = %v, want %v", def.Name(), test.obj.Name())
+ }
+ if def.Pos() != test.obj.Pos() {
+ t.Errorf("Pos() = %v, want %v", def.Pos(), test.obj.Pos())
+ }
+ if def.Parent() != test.obj.Parent() {
+ t.Fatalf("Parent() = %v, want %v", def.Parent(), test.obj.Parent())
+ }
+ if def.Exported() != test.obj.Exported() {
+ t.Fatalf("Exported() = %v, want %v", def.Exported(), test.obj.Exported())
+ }
+ if def.Id() != test.obj.Id() {
+ t.Fatalf("Id() = %v, want %v", def.Id(), test.obj.Id())
+ }
+ // String and Type are expected to differ.
+ })
+ }
+}
+
+func TestImplements(t *testing.T) {
+ const src = `
+package p
+
+type EmptyIface interface{}
+
+type I interface {
+ m()
+}
+
+type C interface {
+ m()
+ ~int
+}
+
+type Integer interface{
+ int8 | int16 | int32 | int64
+}
+
+type EmptyTypeSet interface{
+ Integer
+ ~string
+}
+
+type N1 int
+func (N1) m() {}
+
+type N2 int
+func (*N2) m() {}
+
+type N3 int
+func (N3) m(int) {}
+
+type N4 string
+func (N4) m()
+
+type Bad Bad // invalid type
+`
+
+ f, err := parseSrc("p.go", src)
+ if err != nil {
+ t.Fatal(err)
+ }
+ conf := Config{Error: func(error) {}}
+ pkg, _ := conf.Check(f.PkgName.Value, []*syntax.File{f}, nil)
+
+ lookup := func(tname string) Type { return pkg.Scope().Lookup(tname).Type() }
+ var (
+ EmptyIface = lookup("EmptyIface").Underlying().(*Interface)
+ I = lookup("I").(*Named)
+ II = I.Underlying().(*Interface)
+ C = lookup("C").(*Named)
+ CI = C.Underlying().(*Interface)
+ Integer = lookup("Integer").Underlying().(*Interface)
+ EmptyTypeSet = lookup("EmptyTypeSet").Underlying().(*Interface)
+ N1 = lookup("N1")
+ N1p = NewPointer(N1)
+ N2 = lookup("N2")
+ N2p = NewPointer(N2)
+ N3 = lookup("N3")
+ N4 = lookup("N4")
+ Bad = lookup("Bad")
+ )
+
+ tests := []struct {
+ V Type
+ T *Interface
+ want bool
+ }{
+ {I, II, true},
+ {I, CI, false},
+ {C, II, true},
+ {C, CI, true},
+ {Typ[Int8], Integer, true},
+ {Typ[Int64], Integer, true},
+ {Typ[String], Integer, false},
+ {EmptyTypeSet, II, true},
+ {EmptyTypeSet, EmptyTypeSet, true},
+ {Typ[Int], EmptyTypeSet, false},
+ {N1, II, true},
+ {N1, CI, true},
+ {N1p, II, true},
+ {N1p, CI, false},
+ {N2, II, false},
+ {N2, CI, false},
+ {N2p, II, true},
+ {N2p, CI, false},
+ {N3, II, false},
+ {N3, CI, false},
+ {N4, II, true},
+ {N4, CI, false},
+ {Bad, II, false},
+ {Bad, CI, false},
+ {Bad, EmptyIface, true},
+ }
+
+ for _, test := range tests {
+ if got := Implements(test.V, test.T); got != test.want {
+ t.Errorf("Implements(%s, %s) = %t, want %t", test.V, test.T, got, test.want)
+ }
+
+ // The type assertion x.(T) is valid if T is an interface or if T implements the type of x.
+ // The assertion is never valid if T is a bad type.
+ V := test.T
+ T := test.V
+ want := false
+ if _, ok := T.Underlying().(*Interface); (ok || Implements(T, V)) && T != Bad {
+ want = true
+ }
+ if got := AssertableTo(V, T); got != want {
+ t.Errorf("AssertableTo(%s, %s) = %t, want %t", V, T, got, want)
+ }
+ }
+}
+
+func TestMissingMethodAlternative(t *testing.T) {
+ const src = `
+package p
+type T interface {
+ m()
+}
+
+type V0 struct{}
+func (V0) m() {}
+
+type V1 struct{}
+
+type V2 struct{}
+func (V2) m() int
+
+type V3 struct{}
+func (*V3) m()
+
+type V4 struct{}
+func (V4) M()
+`
+
+ pkg, err := pkgFor("p.go", src, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ T := pkg.Scope().Lookup("T").Type().Underlying().(*Interface)
+ lookup := func(name string) (*Func, bool) {
+ return MissingMethod(pkg.Scope().Lookup(name).Type(), T, true)
+ }
+
+ // V0 has method m with correct signature. Should not report wrongType.
+ method, wrongType := lookup("V0")
+ if method != nil || wrongType {
+ t.Fatalf("V0: got method = %v, wrongType = %v", method, wrongType)
+ }
+
+ checkMissingMethod := func(tname string, reportWrongType bool) {
+ method, wrongType := lookup(tname)
+ if method == nil || method.Name() != "m" || wrongType != reportWrongType {
+ t.Fatalf("%s: got method = %v, wrongType = %v", tname, method, wrongType)
+ }
+ }
+
+ // V1 has no method m. Should not report wrongType.
+ checkMissingMethod("V1", false)
+
+ // V2 has method m with wrong signature type (ignoring receiver). Should report wrongType.
+ checkMissingMethod("V2", true)
+
+ // V3 has no method m but it exists on *V3. Should report wrongType.
+ checkMissingMethod("V3", true)
+
+ // V4 has no method m but has M. Should not report wrongType.
+ checkMissingMethod("V4", false)
+}
diff --git a/src/cmd/compile/internal/types2/array.go b/src/cmd/compile/internal/types2/array.go
new file mode 100644
index 0000000..502d49b
--- /dev/null
+++ b/src/cmd/compile/internal/types2/array.go
@@ -0,0 +1,25 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+// An Array represents an array type.
+type Array struct {
+ len int64
+ elem Type
+}
+
+// NewArray returns a new array type for the given element type and length.
+// A negative length indicates an unknown length.
+func NewArray(elem Type, len int64) *Array { return &Array{len: len, elem: elem} }
+
+// Len returns the length of array a.
+// A negative result indicates an unknown length.
+func (a *Array) Len() int64 { return a.len }
+
+// Elem returns element type of array a.
+func (a *Array) Elem() Type { return a.elem }
+
+func (a *Array) Underlying() Type { return a }
+func (a *Array) String() string { return TypeString(a, nil) }
diff --git a/src/cmd/compile/internal/types2/assignments.go b/src/cmd/compile/internal/types2/assignments.go
new file mode 100644
index 0000000..d88b037
--- /dev/null
+++ b/src/cmd/compile/internal/types2/assignments.go
@@ -0,0 +1,535 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements initialization and assignment checks.
+
+package types2
+
+import (
+ "cmd/compile/internal/syntax"
+ "fmt"
+ "strings"
+)
+
+// assignment reports whether x can be assigned to a variable of type T,
+// if necessary by attempting to convert untyped values to the appropriate
+// type. context describes the context in which the assignment takes place.
+// Use T == nil to indicate assignment to an untyped blank identifier.
+// x.mode is set to invalid if the assignment failed.
+func (check *Checker) assignment(x *operand, T Type, context string) {
+ check.singleValue(x)
+
+ switch x.mode {
+ case invalid:
+ return // error reported before
+ case constant_, variable, mapindex, value, nilvalue, commaok, commaerr:
+ // ok
+ default:
+ // we may get here because of other problems (issue #39634, crash 12)
+ check.errorf(x, "cannot assign %s to %s in %s", x, T, context)
+ return
+ }
+
+ if isUntyped(x.typ) {
+ target := T
+ // spec: "If an untyped constant is assigned to a variable of interface
+ // type or the blank identifier, the constant is first converted to type
+ // bool, rune, int, float64, complex128 or string respectively, depending
+ // on whether the value is a boolean, rune, integer, floating-point,
+ // complex, or string constant."
+ if x.isNil() {
+ if T == nil {
+ check.errorf(x, "use of untyped nil in %s", context)
+ x.mode = invalid
+ return
+ }
+ } else if T == nil || IsInterface(T) && !isTypeParam(T) {
+ target = Default(x.typ)
+ }
+ newType, val, code := check.implicitTypeAndValue(x, target)
+ if code != 0 {
+ msg := check.sprintf("cannot use %s as %s value in %s", x, target, context)
+ switch code {
+ case _TruncatedFloat:
+ msg += " (truncated)"
+ case _NumericOverflow:
+ msg += " (overflows)"
+ }
+ check.error(x, msg)
+ x.mode = invalid
+ return
+ }
+ if val != nil {
+ x.val = val
+ check.updateExprVal(x.expr, val)
+ }
+ if newType != x.typ {
+ x.typ = newType
+ check.updateExprType(x.expr, newType, false)
+ }
+ }
+ // x.typ is typed
+
+ // A generic (non-instantiated) function value cannot be assigned to a variable.
+ if sig, _ := under(x.typ).(*Signature); sig != nil && sig.TypeParams().Len() > 0 {
+ check.errorf(x, "cannot use generic function %s without instantiation in %s", x, context)
+ }
+
+ // spec: "If a left-hand side is the blank identifier, any typed or
+ // non-constant value except for the predeclared identifier nil may
+ // be assigned to it."
+ if T == nil {
+ return
+ }
+
+ reason := ""
+ if ok, _ := x.assignableTo(check, T, &reason); !ok {
+ if check.conf.CompilerErrorMessages {
+ if reason != "" {
+ check.errorf(x, "cannot use %s as type %s in %s:\n\t%s", x, T, context, reason)
+ } else {
+ check.errorf(x, "cannot use %s as type %s in %s", x, T, context)
+ }
+ } else {
+ if reason != "" {
+ check.errorf(x, "cannot use %s as %s value in %s: %s", x, T, context, reason)
+ } else {
+ check.errorf(x, "cannot use %s as %s value in %s", x, T, context)
+ }
+ }
+ x.mode = invalid
+ }
+}
+
+func (check *Checker) initConst(lhs *Const, x *operand) {
+ if x.mode == invalid || x.typ == Typ[Invalid] || lhs.typ == Typ[Invalid] {
+ if lhs.typ == nil {
+ lhs.typ = Typ[Invalid]
+ }
+ return
+ }
+
+ // rhs must be a constant
+ if x.mode != constant_ {
+ check.errorf(x, "%s is not constant", x)
+ if lhs.typ == nil {
+ lhs.typ = Typ[Invalid]
+ }
+ return
+ }
+ assert(isConstType(x.typ))
+
+ // If the lhs doesn't have a type yet, use the type of x.
+ if lhs.typ == nil {
+ lhs.typ = x.typ
+ }
+
+ check.assignment(x, lhs.typ, "constant declaration")
+ if x.mode == invalid {
+ return
+ }
+
+ lhs.val = x.val
+}
+
+func (check *Checker) initVar(lhs *Var, x *operand, context string) Type {
+ if x.mode == invalid || x.typ == Typ[Invalid] || lhs.typ == Typ[Invalid] {
+ if lhs.typ == nil {
+ lhs.typ = Typ[Invalid]
+ }
+ // Note: This was reverted in go/types (https://golang.org/cl/292751).
+ // TODO(gri): decide what to do (also affects test/run.go exclusion list)
+ lhs.used = true // avoid follow-on "declared but not used" errors
+ return nil
+ }
+
+ // If the lhs doesn't have a type yet, use the type of x.
+ if lhs.typ == nil {
+ typ := x.typ
+ if isUntyped(typ) {
+ // convert untyped types to default types
+ if typ == Typ[UntypedNil] {
+ check.errorf(x, "use of untyped nil in %s", context)
+ lhs.typ = Typ[Invalid]
+ return nil
+ }
+ typ = Default(typ)
+ }
+ lhs.typ = typ
+ }
+
+ check.assignment(x, lhs.typ, context)
+ if x.mode == invalid {
+ lhs.used = true // avoid follow-on "declared but not used" errors
+ return nil
+ }
+
+ return x.typ
+}
+
+func (check *Checker) assignVar(lhs syntax.Expr, x *operand) Type {
+ if x.mode == invalid || x.typ == Typ[Invalid] {
+ check.use(lhs)
+ return nil
+ }
+
+ // Determine if the lhs is a (possibly parenthesized) identifier.
+ ident, _ := unparen(lhs).(*syntax.Name)
+
+ // Don't evaluate lhs if it is the blank identifier.
+ if ident != nil && ident.Value == "_" {
+ check.recordDef(ident, nil)
+ check.assignment(x, nil, "assignment to _ identifier")
+ if x.mode == invalid {
+ return nil
+ }
+ return x.typ
+ }
+
+ // If the lhs is an identifier denoting a variable v, this assignment
+ // is not a 'use' of v. Remember current value of v.used and restore
+ // after evaluating the lhs via check.expr.
+ var v *Var
+ var v_used bool
+ if ident != nil {
+ if obj := check.lookup(ident.Value); obj != nil {
+ // It's ok to mark non-local variables, but ignore variables
+ // from other packages to avoid potential race conditions with
+ // dot-imported variables.
+ if w, _ := obj.(*Var); w != nil && w.pkg == check.pkg {
+ v = w
+ v_used = v.used
+ }
+ }
+ }
+
+ var z operand
+ check.expr(&z, lhs)
+ if v != nil {
+ v.used = v_used // restore v.used
+ }
+
+ if z.mode == invalid || z.typ == Typ[Invalid] {
+ return nil
+ }
+
+ // spec: "Each left-hand side operand must be addressable, a map index
+ // expression, or the blank identifier. Operands may be parenthesized."
+ switch z.mode {
+ case invalid:
+ return nil
+ case variable, mapindex:
+ // ok
+ default:
+ if sel, ok := z.expr.(*syntax.SelectorExpr); ok {
+ var op operand
+ check.expr(&op, sel.X)
+ if op.mode == mapindex {
+ check.errorf(&z, "cannot assign to struct field %s in map", syntax.String(z.expr))
+ return nil
+ }
+ }
+ check.errorf(&z, "cannot assign to %s", &z)
+ return nil
+ }
+
+ check.assignment(x, z.typ, "assignment")
+ if x.mode == invalid {
+ return nil
+ }
+
+ return x.typ
+}
+
+// operandTypes returns the list of types for the given operands.
+func operandTypes(list []*operand) (res []Type) {
+ for _, x := range list {
+ res = append(res, x.typ)
+ }
+ return res
+}
+
+// varTypes returns the list of types for the given variables.
+func varTypes(list []*Var) (res []Type) {
+ for _, x := range list {
+ res = append(res, x.typ)
+ }
+ return res
+}
+
+// typesSummary returns a string of the form "(t1, t2, ...)" where the
+// ti's are user-friendly string representations for the given types.
+// If variadic is set and the last type is a slice, its string is of
+// the form "...E" where E is the slice's element type.
+func (check *Checker) typesSummary(list []Type, variadic bool) string {
+ var res []string
+ for i, t := range list {
+ var s string
+ switch {
+ case t == nil:
+ fallthrough // should not happen but be cautious
+ case t == Typ[Invalid]:
+ s = "<T>"
+ case isUntyped(t):
+ if isNumeric(t) {
+ // Do not imply a specific type requirement:
+ // "have number, want float64" is better than
+ // "have untyped int, want float64" or
+ // "have int, want float64".
+ s = "number"
+ } else {
+ // If we don't have a number, omit the "untyped" qualifier
+ // for compactness.
+ s = strings.Replace(t.(*Basic).name, "untyped ", "", -1)
+ }
+ case variadic && i == len(list)-1:
+ s = check.sprintf("...%s", t.(*Slice).elem)
+ }
+ if s == "" {
+ s = check.sprintf("%s", t)
+ }
+ res = append(res, s)
+ }
+ return "(" + strings.Join(res, ", ") + ")"
+}
+
+func measure(x int, unit string) string {
+ if x != 1 {
+ unit += "s"
+ }
+ return fmt.Sprintf("%d %s", x, unit)
+}
+
+func (check *Checker) assignError(rhs []syntax.Expr, nvars, nvals int) {
+ vars := measure(nvars, "variable")
+ vals := measure(nvals, "value")
+ rhs0 := rhs[0]
+
+ if len(rhs) == 1 {
+ if call, _ := unparen(rhs0).(*syntax.CallExpr); call != nil {
+ check.errorf(rhs0, "assignment mismatch: %s but %s returns %s", vars, call.Fun, vals)
+ return
+ }
+ }
+ check.errorf(rhs0, "assignment mismatch: %s but %s", vars, vals)
+}
+
+// If returnStmt != nil, initVars is called to type-check the assignment
+// of return expressions, and returnStmt is the return statement.
+func (check *Checker) initVars(lhs []*Var, orig_rhs []syntax.Expr, returnStmt syntax.Stmt) {
+ rhs, commaOk := check.exprList(orig_rhs, len(lhs) == 2 && returnStmt == nil)
+
+ if len(lhs) != len(rhs) {
+ // invalidate lhs
+ for _, obj := range lhs {
+ obj.used = true // avoid declared but not used errors
+ if obj.typ == nil {
+ obj.typ = Typ[Invalid]
+ }
+ }
+ // don't report an error if we already reported one
+ for _, x := range rhs {
+ if x.mode == invalid {
+ return
+ }
+ }
+ if returnStmt != nil {
+ var at poser = returnStmt
+ qualifier := "not enough"
+ if len(rhs) > len(lhs) {
+ at = rhs[len(lhs)].expr // report at first extra value
+ qualifier = "too many"
+ } else if len(rhs) > 0 {
+ at = rhs[len(rhs)-1].expr // report at last value
+ }
+ var err error_
+ err.errorf(at, "%s return values", qualifier)
+ err.errorf(nopos, "have %s", check.typesSummary(operandTypes(rhs), false))
+ err.errorf(nopos, "want %s", check.typesSummary(varTypes(lhs), false))
+ check.report(&err)
+ return
+ }
+ if check.conf.CompilerErrorMessages {
+ check.assignError(orig_rhs, len(lhs), len(rhs))
+ } else {
+ check.errorf(rhs[0], "cannot initialize %d variables with %d values", len(lhs), len(rhs))
+ }
+ return
+ }
+
+ context := "assignment"
+ if returnStmt != nil {
+ context = "return statement"
+ }
+
+ if commaOk {
+ var a [2]Type
+ for i := range a {
+ a[i] = check.initVar(lhs[i], rhs[i], context)
+ }
+ check.recordCommaOkTypes(orig_rhs[0], a)
+ return
+ }
+
+ ok := true
+ for i, lhs := range lhs {
+ if check.initVar(lhs, rhs[i], context) == nil {
+ ok = false
+ }
+ }
+
+ // avoid follow-on "declared but not used" errors if any initialization failed
+ if !ok {
+ for _, lhs := range lhs {
+ lhs.used = true
+ }
+ }
+}
+
+func (check *Checker) assignVars(lhs, orig_rhs []syntax.Expr) {
+ rhs, commaOk := check.exprList(orig_rhs, len(lhs) == 2)
+
+ if len(lhs) != len(rhs) {
+ check.use(lhs...)
+ // don't report an error if we already reported one
+ for _, x := range rhs {
+ if x.mode == invalid {
+ return
+ }
+ }
+ if check.conf.CompilerErrorMessages {
+ check.assignError(orig_rhs, len(lhs), len(rhs))
+ } else {
+ check.errorf(rhs[0], "cannot assign %d values to %d variables", len(rhs), len(lhs))
+ }
+ return
+ }
+
+ if commaOk {
+ var a [2]Type
+ for i := range a {
+ a[i] = check.assignVar(lhs[i], rhs[i])
+ }
+ check.recordCommaOkTypes(orig_rhs[0], a)
+ return
+ }
+
+ ok := true
+ for i, lhs := range lhs {
+ if check.assignVar(lhs, rhs[i]) == nil {
+ ok = false
+ }
+ }
+
+ // avoid follow-on "declared but not used" errors if any assignment failed
+ if !ok {
+ // don't call check.use to avoid re-evaluation of the lhs expressions
+ for _, lhs := range lhs {
+ if name, _ := unparen(lhs).(*syntax.Name); name != nil {
+ if obj := check.lookup(name.Value); obj != nil {
+ // see comment in assignVar
+ if v, _ := obj.(*Var); v != nil && v.pkg == check.pkg {
+ v.used = true
+ }
+ }
+ }
+ }
+ }
+}
+
+// unpack unpacks a *syntax.ListExpr into a list of syntax.Expr.
+// Helper introduced for the go/types -> types2 port.
+// TODO(gri) Should find a more efficient solution that doesn't
+// require introduction of a new slice for simple
+// expressions.
+func unpackExpr(x syntax.Expr) []syntax.Expr {
+ if x, _ := x.(*syntax.ListExpr); x != nil {
+ return x.ElemList
+ }
+ if x != nil {
+ return []syntax.Expr{x}
+ }
+ return nil
+}
+
+func (check *Checker) shortVarDecl(pos syntax.Pos, lhs, rhs []syntax.Expr) {
+ top := len(check.delayed)
+ scope := check.scope
+
+ // collect lhs variables
+ seen := make(map[string]bool, len(lhs))
+ lhsVars := make([]*Var, len(lhs))
+ newVars := make([]*Var, 0, len(lhs))
+ hasErr := false
+ for i, lhs := range lhs {
+ ident, _ := lhs.(*syntax.Name)
+ if ident == nil {
+ check.use(lhs)
+ check.errorf(lhs, "non-name %s on left side of :=", lhs)
+ hasErr = true
+ continue
+ }
+
+ name := ident.Value
+ if name != "_" {
+ if seen[name] {
+ check.errorf(lhs, "%s repeated on left side of :=", lhs)
+ hasErr = true
+ continue
+ }
+ seen[name] = true
+ }
+
+ // Use the correct obj if the ident is redeclared. The
+ // variable's scope starts after the declaration; so we
+ // must use Scope.Lookup here and call Scope.Insert
+ // (via check.declare) later.
+ if alt := scope.Lookup(name); alt != nil {
+ check.recordUse(ident, alt)
+ // redeclared object must be a variable
+ if obj, _ := alt.(*Var); obj != nil {
+ lhsVars[i] = obj
+ } else {
+ check.errorf(lhs, "cannot assign to %s", lhs)
+ hasErr = true
+ }
+ continue
+ }
+
+ // declare new variable
+ obj := NewVar(ident.Pos(), check.pkg, name, nil)
+ lhsVars[i] = obj
+ if name != "_" {
+ newVars = append(newVars, obj)
+ }
+ check.recordDef(ident, obj)
+ }
+
+ // create dummy variables where the lhs is invalid
+ for i, obj := range lhsVars {
+ if obj == nil {
+ lhsVars[i] = NewVar(lhs[i].Pos(), check.pkg, "_", nil)
+ }
+ }
+
+ check.initVars(lhsVars, rhs, nil)
+
+ // process function literals in rhs expressions before scope changes
+ check.processDelayed(top)
+
+ if len(newVars) == 0 && !hasErr {
+ check.softErrorf(pos, "no new variables on left side of :=")
+ return
+ }
+
+ // declare new variables
+ // spec: "The scope of a constant or variable identifier declared inside
+ // a function begins at the end of the ConstSpec or VarSpec (ShortVarDecl
+ // for short variable declarations) and ends at the end of the innermost
+ // containing block."
+ scopePos := syntax.EndPos(rhs[len(rhs)-1])
+ for _, obj := range newVars {
+ check.declare(scope, nil, obj, scopePos) // id = nil: recordDef already called
+ }
+}
diff --git a/src/cmd/compile/internal/types2/basic.go b/src/cmd/compile/internal/types2/basic.go
new file mode 100644
index 0000000..2fd973c
--- /dev/null
+++ b/src/cmd/compile/internal/types2/basic.go
@@ -0,0 +1,82 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+// BasicKind describes the kind of basic type.
+type BasicKind int
+
+const (
+ Invalid BasicKind = iota // type is invalid
+
+ // predeclared types
+ Bool
+ Int
+ Int8
+ Int16
+ Int32
+ Int64
+ Uint
+ Uint8
+ Uint16
+ Uint32
+ Uint64
+ Uintptr
+ Float32
+ Float64
+ Complex64
+ Complex128
+ String
+ UnsafePointer
+
+ // types for untyped values
+ UntypedBool
+ UntypedInt
+ UntypedRune
+ UntypedFloat
+ UntypedComplex
+ UntypedString
+ UntypedNil
+
+ // aliases
+ Byte = Uint8
+ Rune = Int32
+)
+
+// BasicInfo is a set of flags describing properties of a basic type.
+type BasicInfo int
+
+// Properties of basic types.
+const (
+ IsBoolean BasicInfo = 1 << iota
+ IsInteger
+ IsUnsigned
+ IsFloat
+ IsComplex
+ IsString
+ IsUntyped
+
+ IsOrdered = IsInteger | IsFloat | IsString
+ IsNumeric = IsInteger | IsFloat | IsComplex
+ IsConstType = IsBoolean | IsNumeric | IsString
+)
+
+// A Basic represents a basic type.
+type Basic struct {
+ kind BasicKind
+ info BasicInfo
+ name string
+}
+
+// Kind returns the kind of basic type b.
+func (b *Basic) Kind() BasicKind { return b.kind }
+
+// Info returns information about properties of basic type b.
+func (b *Basic) Info() BasicInfo { return b.info }
+
+// Name returns the name of basic type b.
+func (b *Basic) Name() string { return b.name }
+
+func (b *Basic) Underlying() Type { return b }
+func (b *Basic) String() string { return TypeString(b, nil) }
diff --git a/src/cmd/compile/internal/types2/builtins.go b/src/cmd/compile/internal/types2/builtins.go
new file mode 100644
index 0000000..428897c
--- /dev/null
+++ b/src/cmd/compile/internal/types2/builtins.go
@@ -0,0 +1,891 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements typechecking of builtin function calls.
+
+package types2
+
+import (
+ "cmd/compile/internal/syntax"
+ "go/constant"
+ "go/token"
+)
+
+// builtin type-checks a call to the built-in specified by id and
+// reports whether the call is valid, with *x holding the result;
+// but x.expr is not set. If the call is invalid, the result is
+// false, and *x is undefined.
+//
+func (check *Checker) builtin(x *operand, call *syntax.CallExpr, id builtinId) (_ bool) {
+ // append is the only built-in that permits the use of ... for the last argument
+ bin := predeclaredFuncs[id]
+ if call.HasDots && id != _Append {
+ //check.errorf(call.Ellipsis, invalidOp + "invalid use of ... with built-in %s", bin.name)
+ check.errorf(call, invalidOp+"invalid use of ... with built-in %s", bin.name)
+ check.use(call.ArgList...)
+ return
+ }
+
+ // For len(x) and cap(x) we need to know if x contains any function calls or
+ // receive operations. Save/restore current setting and set hasCallOrRecv to
+ // false for the evaluation of x so that we can check it afterwards.
+ // Note: We must do this _before_ calling exprList because exprList evaluates
+ // all arguments.
+ if id == _Len || id == _Cap {
+ defer func(b bool) {
+ check.hasCallOrRecv = b
+ }(check.hasCallOrRecv)
+ check.hasCallOrRecv = false
+ }
+
+ // determine actual arguments
+ var arg func(*operand, int) // TODO(gri) remove use of arg getter in favor of using xlist directly
+ nargs := len(call.ArgList)
+ switch id {
+ default:
+ // make argument getter
+ xlist, _ := check.exprList(call.ArgList, false)
+ arg = func(x *operand, i int) { *x = *xlist[i] }
+ nargs = len(xlist)
+ // evaluate first argument, if present
+ if nargs > 0 {
+ arg(x, 0)
+ if x.mode == invalid {
+ return
+ }
+ }
+ case _Make, _New, _Offsetof, _Trace:
+ // arguments require special handling
+ }
+
+ // check argument count
+ {
+ msg := ""
+ if nargs < bin.nargs {
+ msg = "not enough"
+ } else if !bin.variadic && nargs > bin.nargs {
+ msg = "too many"
+ }
+ if msg != "" {
+ check.errorf(call, invalidOp+"%s arguments for %v (expected %d, found %d)", msg, call, bin.nargs, nargs)
+ return
+ }
+ }
+
+ switch id {
+ case _Append:
+ // append(s S, x ...T) S, where T is the element type of S
+ // spec: "The variadic function append appends zero or more values x to s of type
+ // S, which must be a slice type, and returns the resulting slice, also of type S.
+ // The values x are passed to a parameter of type ...T where T is the element type
+ // of S and the respective parameter passing rules apply."
+ S := x.typ
+ var T Type
+ if s, _ := coreType(S).(*Slice); s != nil {
+ T = s.elem
+ } else {
+ var cause string
+ switch {
+ case x.isNil():
+ cause = "have untyped nil"
+ case isTypeParam(S):
+ if u := coreType(S); u != nil {
+ cause = check.sprintf("%s has core type %s", x, u)
+ } else {
+ cause = check.sprintf("%s has no core type", x)
+ }
+ default:
+ cause = check.sprintf("have %s", x)
+ }
+ // don't use invalidArg prefix here as it would repeat "argument" in the error message
+ check.errorf(x, "first argument to append must be a slice; %s", cause)
+ return
+ }
+
+ // remember arguments that have been evaluated already
+ alist := []operand{*x}
+
+ // spec: "As a special case, append also accepts a first argument assignable
+ // to type []byte with a second argument of string type followed by ... .
+ // This form appends the bytes of the string.
+ if nargs == 2 && call.HasDots {
+ if ok, _ := x.assignableTo(check, NewSlice(universeByte), nil); ok {
+ arg(x, 1)
+ if x.mode == invalid {
+ return
+ }
+ if t := coreString(x.typ); t != nil && isString(t) {
+ if check.Types != nil {
+ sig := makeSig(S, S, x.typ)
+ sig.variadic = true
+ check.recordBuiltinType(call.Fun, sig)
+ }
+ x.mode = value
+ x.typ = S
+ break
+ }
+ alist = append(alist, *x)
+ // fallthrough
+ }
+ }
+
+ // check general case by creating custom signature
+ sig := makeSig(S, S, NewSlice(T)) // []T required for variadic signature
+ sig.variadic = true
+ var xlist []*operand
+ // convert []operand to []*operand
+ for i := range alist {
+ xlist = append(xlist, &alist[i])
+ }
+ for i := len(alist); i < nargs; i++ {
+ var x operand
+ arg(&x, i)
+ xlist = append(xlist, &x)
+ }
+ check.arguments(call, sig, nil, xlist, nil) // discard result (we know the result type)
+ // ok to continue even if check.arguments reported errors
+
+ x.mode = value
+ x.typ = S
+ if check.Types != nil {
+ check.recordBuiltinType(call.Fun, sig)
+ }
+
+ case _Cap, _Len:
+ // cap(x)
+ // len(x)
+ mode := invalid
+ var val constant.Value
+ switch t := arrayPtrDeref(under(x.typ)).(type) {
+ case *Basic:
+ if isString(t) && id == _Len {
+ if x.mode == constant_ {
+ mode = constant_
+ val = constant.MakeInt64(int64(len(constant.StringVal(x.val))))
+ } else {
+ mode = value
+ }
+ }
+
+ case *Array:
+ mode = value
+ // spec: "The expressions len(s) and cap(s) are constants
+ // if the type of s is an array or pointer to an array and
+ // the expression s does not contain channel receives or
+ // function calls; in this case s is not evaluated."
+ if !check.hasCallOrRecv {
+ mode = constant_
+ if t.len >= 0 {
+ val = constant.MakeInt64(t.len)
+ } else {
+ val = constant.MakeUnknown()
+ }
+ }
+
+ case *Slice, *Chan:
+ mode = value
+
+ case *Map:
+ if id == _Len {
+ mode = value
+ }
+
+ case *Interface:
+ if !isTypeParam(x.typ) {
+ break
+ }
+ if t.typeSet().underIs(func(t Type) bool {
+ switch t := arrayPtrDeref(t).(type) {
+ case *Basic:
+ if isString(t) && id == _Len {
+ return true
+ }
+ case *Array, *Slice, *Chan:
+ return true
+ case *Map:
+ if id == _Len {
+ return true
+ }
+ }
+ return false
+ }) {
+ mode = value
+ }
+ }
+
+ if mode == invalid && under(x.typ) != Typ[Invalid] {
+ check.errorf(x, invalidArg+"%s for %s", x, bin.name)
+ return
+ }
+
+ // record the signature before changing x.typ
+ if check.Types != nil && mode != constant_ {
+ check.recordBuiltinType(call.Fun, makeSig(Typ[Int], x.typ))
+ }
+
+ x.mode = mode
+ x.typ = Typ[Int]
+ x.val = val
+
+ case _Close:
+ // close(c)
+ if !underIs(x.typ, func(u Type) bool {
+ uch, _ := u.(*Chan)
+ if uch == nil {
+ check.errorf(x, invalidOp+"cannot close non-channel %s", x)
+ return false
+ }
+ if uch.dir == RecvOnly {
+ check.errorf(x, invalidOp+"cannot close receive-only channel %s", x)
+ return false
+ }
+ return true
+ }) {
+ return
+ }
+ x.mode = novalue
+ if check.Types != nil {
+ check.recordBuiltinType(call.Fun, makeSig(nil, x.typ))
+ }
+
+ case _Complex:
+ // complex(x, y floatT) complexT
+ var y operand
+ arg(&y, 1)
+ if y.mode == invalid {
+ return
+ }
+
+ // convert or check untyped arguments
+ d := 0
+ if isUntyped(x.typ) {
+ d |= 1
+ }
+ if isUntyped(y.typ) {
+ d |= 2
+ }
+ switch d {
+ case 0:
+ // x and y are typed => nothing to do
+ case 1:
+ // only x is untyped => convert to type of y
+ check.convertUntyped(x, y.typ)
+ case 2:
+ // only y is untyped => convert to type of x
+ check.convertUntyped(&y, x.typ)
+ case 3:
+ // x and y are untyped =>
+ // 1) if both are constants, convert them to untyped
+ // floating-point numbers if possible,
+ // 2) if one of them is not constant (possible because
+ // it contains a shift that is yet untyped), convert
+ // both of them to float64 since they must have the
+ // same type to succeed (this will result in an error
+ // because shifts of floats are not permitted)
+ if x.mode == constant_ && y.mode == constant_ {
+ toFloat := func(x *operand) {
+ if isNumeric(x.typ) && constant.Sign(constant.Imag(x.val)) == 0 {
+ x.typ = Typ[UntypedFloat]
+ }
+ }
+ toFloat(x)
+ toFloat(&y)
+ } else {
+ check.convertUntyped(x, Typ[Float64])
+ check.convertUntyped(&y, Typ[Float64])
+ // x and y should be invalid now, but be conservative
+ // and check below
+ }
+ }
+ if x.mode == invalid || y.mode == invalid {
+ return
+ }
+
+ // both argument types must be identical
+ if !Identical(x.typ, y.typ) {
+ check.errorf(x, invalidOp+"%v (mismatched types %s and %s)", call, x.typ, y.typ)
+ return
+ }
+
+ // the argument types must be of floating-point type
+ // (applyTypeFunc never calls f with a type parameter)
+ f := func(typ Type) Type {
+ assert(!isTypeParam(typ))
+ if t, _ := under(typ).(*Basic); t != nil {
+ switch t.kind {
+ case Float32:
+ return Typ[Complex64]
+ case Float64:
+ return Typ[Complex128]
+ case UntypedFloat:
+ return Typ[UntypedComplex]
+ }
+ }
+ return nil
+ }
+ resTyp := check.applyTypeFunc(f, x, id)
+ if resTyp == nil {
+ check.errorf(x, invalidArg+"arguments have type %s, expected floating-point", x.typ)
+ return
+ }
+
+ // if both arguments are constants, the result is a constant
+ if x.mode == constant_ && y.mode == constant_ {
+ x.val = constant.BinaryOp(constant.ToFloat(x.val), token.ADD, constant.MakeImag(constant.ToFloat(y.val)))
+ } else {
+ x.mode = value
+ }
+
+ if check.Types != nil && x.mode != constant_ {
+ check.recordBuiltinType(call.Fun, makeSig(resTyp, x.typ, x.typ))
+ }
+
+ x.typ = resTyp
+
+ case _Copy:
+ // copy(x, y []T) int
+ dst, _ := coreType(x.typ).(*Slice)
+
+ var y operand
+ arg(&y, 1)
+ if y.mode == invalid {
+ return
+ }
+ src0 := coreString(y.typ)
+ if src0 != nil && isString(src0) {
+ src0 = NewSlice(universeByte)
+ }
+ src, _ := src0.(*Slice)
+
+ if dst == nil || src == nil {
+ check.errorf(x, invalidArg+"copy expects slice arguments; found %s and %s", x, &y)
+ return
+ }
+
+ if !Identical(dst.elem, src.elem) {
+ check.errorf(x, invalidArg+"arguments to copy %s and %s have different element types %s and %s", x, &y, dst.elem, src.elem)
+ return
+ }
+
+ if check.Types != nil {
+ check.recordBuiltinType(call.Fun, makeSig(Typ[Int], x.typ, y.typ))
+ }
+ x.mode = value
+ x.typ = Typ[Int]
+
+ case _Delete:
+ // delete(map_, key)
+ // map_ must be a map type or a type parameter describing map types.
+ // The key cannot be a type parameter for now.
+ map_ := x.typ
+ var key Type
+ if !underIs(map_, func(u Type) bool {
+ map_, _ := u.(*Map)
+ if map_ == nil {
+ check.errorf(x, invalidArg+"%s is not a map", x)
+ return false
+ }
+ if key != nil && !Identical(map_.key, key) {
+ check.errorf(x, invalidArg+"maps of %s must have identical key types", x)
+ return false
+ }
+ key = map_.key
+ return true
+ }) {
+ return
+ }
+
+ arg(x, 1) // k
+ if x.mode == invalid {
+ return
+ }
+
+ check.assignment(x, key, "argument to delete")
+ if x.mode == invalid {
+ return
+ }
+
+ x.mode = novalue
+ if check.Types != nil {
+ check.recordBuiltinType(call.Fun, makeSig(nil, map_, key))
+ }
+
+ case _Imag, _Real:
+ // imag(complexT) floatT
+ // real(complexT) floatT
+
+ // convert or check untyped argument
+ if isUntyped(x.typ) {
+ if x.mode == constant_ {
+ // an untyped constant number can always be considered
+ // as a complex constant
+ if isNumeric(x.typ) {
+ x.typ = Typ[UntypedComplex]
+ }
+ } else {
+ // an untyped non-constant argument may appear if
+ // it contains a (yet untyped non-constant) shift
+ // expression: convert it to complex128 which will
+ // result in an error (shift of complex value)
+ check.convertUntyped(x, Typ[Complex128])
+ // x should be invalid now, but be conservative and check
+ if x.mode == invalid {
+ return
+ }
+ }
+ }
+
+ // the argument must be of complex type
+ // (applyTypeFunc never calls f with a type parameter)
+ f := func(typ Type) Type {
+ assert(!isTypeParam(typ))
+ if t, _ := under(typ).(*Basic); t != nil {
+ switch t.kind {
+ case Complex64:
+ return Typ[Float32]
+ case Complex128:
+ return Typ[Float64]
+ case UntypedComplex:
+ return Typ[UntypedFloat]
+ }
+ }
+ return nil
+ }
+ resTyp := check.applyTypeFunc(f, x, id)
+ if resTyp == nil {
+ check.errorf(x, invalidArg+"argument has type %s, expected complex type", x.typ)
+ return
+ }
+
+ // if the argument is a constant, the result is a constant
+ if x.mode == constant_ {
+ if id == _Real {
+ x.val = constant.Real(x.val)
+ } else {
+ x.val = constant.Imag(x.val)
+ }
+ } else {
+ x.mode = value
+ }
+
+ if check.Types != nil && x.mode != constant_ {
+ check.recordBuiltinType(call.Fun, makeSig(resTyp, x.typ))
+ }
+
+ x.typ = resTyp
+
+ case _Make:
+ // make(T, n)
+ // make(T, n, m)
+ // (no argument evaluated yet)
+ arg0 := call.ArgList[0]
+ T := check.varType(arg0)
+ if T == Typ[Invalid] {
+ return
+ }
+
+ var min int // minimum number of arguments
+ switch coreType(T).(type) {
+ case *Slice:
+ min = 2
+ case *Map, *Chan:
+ min = 1
+ case nil:
+ check.errorf(arg0, invalidArg+"cannot make %s: no core type", arg0)
+ return
+ default:
+ check.errorf(arg0, invalidArg+"cannot make %s; type must be slice, map, or channel", arg0)
+ return
+ }
+ if nargs < min || min+1 < nargs {
+ check.errorf(call, invalidOp+"%v expects %d or %d arguments; found %d", call, min, min+1, nargs)
+ return
+ }
+
+ types := []Type{T}
+ var sizes []int64 // constant integer arguments, if any
+ for _, arg := range call.ArgList[1:] {
+ typ, size := check.index(arg, -1) // ok to continue with typ == Typ[Invalid]
+ types = append(types, typ)
+ if size >= 0 {
+ sizes = append(sizes, size)
+ }
+ }
+ if len(sizes) == 2 && sizes[0] > sizes[1] {
+ check.error(call.ArgList[1], invalidArg+"length and capacity swapped")
+ // safe to continue
+ }
+ x.mode = value
+ x.typ = T
+ if check.Types != nil {
+ check.recordBuiltinType(call.Fun, makeSig(x.typ, types...))
+ }
+
+ case _New:
+ // new(T)
+ // (no argument evaluated yet)
+ T := check.varType(call.ArgList[0])
+ if T == Typ[Invalid] {
+ return
+ }
+
+ x.mode = value
+ x.typ = &Pointer{base: T}
+ if check.Types != nil {
+ check.recordBuiltinType(call.Fun, makeSig(x.typ, T))
+ }
+
+ case _Panic:
+ // panic(x)
+ // record panic call if inside a function with result parameters
+ // (for use in Checker.isTerminating)
+ if check.sig != nil && check.sig.results.Len() > 0 {
+ // function has result parameters
+ p := check.isPanic
+ if p == nil {
+ // allocate lazily
+ p = make(map[*syntax.CallExpr]bool)
+ check.isPanic = p
+ }
+ p[call] = true
+ }
+
+ check.assignment(x, &emptyInterface, "argument to panic")
+ if x.mode == invalid {
+ return
+ }
+
+ x.mode = novalue
+ if check.Types != nil {
+ check.recordBuiltinType(call.Fun, makeSig(nil, &emptyInterface))
+ }
+
+ case _Print, _Println:
+ // print(x, y, ...)
+ // println(x, y, ...)
+ var params []Type
+ if nargs > 0 {
+ params = make([]Type, nargs)
+ for i := 0; i < nargs; i++ {
+ if i > 0 {
+ arg(x, i) // first argument already evaluated
+ }
+ check.assignment(x, nil, "argument to "+predeclaredFuncs[id].name)
+ if x.mode == invalid {
+ // TODO(gri) "use" all arguments?
+ return
+ }
+ params[i] = x.typ
+ }
+ }
+
+ x.mode = novalue
+ if check.Types != nil {
+ check.recordBuiltinType(call.Fun, makeSig(nil, params...))
+ }
+
+ case _Recover:
+ // recover() interface{}
+ x.mode = value
+ x.typ = &emptyInterface
+ if check.Types != nil {
+ check.recordBuiltinType(call.Fun, makeSig(x.typ))
+ }
+
+ case _Add:
+ // unsafe.Add(ptr unsafe.Pointer, len IntegerType) unsafe.Pointer
+ if !check.allowVersion(check.pkg, 1, 17) {
+ check.versionErrorf(call.Fun, "go1.17", "unsafe.Add")
+ return
+ }
+
+ check.assignment(x, Typ[UnsafePointer], "argument to unsafe.Add")
+ if x.mode == invalid {
+ return
+ }
+
+ var y operand
+ arg(&y, 1)
+ if !check.isValidIndex(&y, "length", true) {
+ return
+ }
+
+ x.mode = value
+ x.typ = Typ[UnsafePointer]
+ if check.Types != nil {
+ check.recordBuiltinType(call.Fun, makeSig(x.typ, x.typ, y.typ))
+ }
+
+ case _Alignof:
+ // unsafe.Alignof(x T) uintptr
+ check.assignment(x, nil, "argument to unsafe.Alignof")
+ if x.mode == invalid {
+ return
+ }
+
+ if hasVarSize(x.typ) {
+ x.mode = value
+ if check.Types != nil {
+ check.recordBuiltinType(call.Fun, makeSig(Typ[Uintptr], x.typ))
+ }
+ } else {
+ x.mode = constant_
+ x.val = constant.MakeInt64(check.conf.alignof(x.typ))
+ // result is constant - no need to record signature
+ }
+ x.typ = Typ[Uintptr]
+
+ case _Offsetof:
+ // unsafe.Offsetof(x T) uintptr, where x must be a selector
+ // (no argument evaluated yet)
+ arg0 := call.ArgList[0]
+ selx, _ := unparen(arg0).(*syntax.SelectorExpr)
+ if selx == nil {
+ check.errorf(arg0, invalidArg+"%s is not a selector expression", arg0)
+ check.use(arg0)
+ return
+ }
+
+ check.expr(x, selx.X)
+ if x.mode == invalid {
+ return
+ }
+
+ base := derefStructPtr(x.typ)
+ sel := selx.Sel.Value
+ obj, index, indirect := LookupFieldOrMethod(base, false, check.pkg, sel)
+ switch obj.(type) {
+ case nil:
+ check.errorf(x, invalidArg+"%s has no single field %s", base, sel)
+ return
+ case *Func:
+ // TODO(gri) Using derefStructPtr may result in methods being found
+ // that don't actually exist. An error either way, but the error
+ // message is confusing. See: https://play.golang.org/p/al75v23kUy ,
+ // but go/types reports: "invalid argument: x.m is a method value".
+ check.errorf(arg0, invalidArg+"%s is a method value", arg0)
+ return
+ }
+ if indirect {
+ check.errorf(x, invalidArg+"field %s is embedded via a pointer in %s", sel, base)
+ return
+ }
+
+ // TODO(gri) Should we pass x.typ instead of base (and have indirect report if derefStructPtr indirected)?
+ check.recordSelection(selx, FieldVal, base, obj, index, false)
+
+ // record the selector expression (was bug - issue #47895)
+ {
+ mode := value
+ if x.mode == variable || indirect {
+ mode = variable
+ }
+ check.record(&operand{mode, selx, obj.Type(), nil, 0})
+ }
+
+ // The field offset is considered a variable even if the field is declared before
+ // the part of the struct which is variable-sized. This makes both the rules
+ // simpler and also permits (or at least doesn't prevent) a compiler from re-
+ // arranging struct fields if it wanted to.
+ if hasVarSize(base) {
+ x.mode = value
+ if check.Types != nil {
+ check.recordBuiltinType(call.Fun, makeSig(Typ[Uintptr], obj.Type()))
+ }
+ } else {
+ x.mode = constant_
+ x.val = constant.MakeInt64(check.conf.offsetof(base, index))
+ // result is constant - no need to record signature
+ }
+ x.typ = Typ[Uintptr]
+
+ case _Sizeof:
+ // unsafe.Sizeof(x T) uintptr
+ check.assignment(x, nil, "argument to unsafe.Sizeof")
+ if x.mode == invalid {
+ return
+ }
+
+ if hasVarSize(x.typ) {
+ x.mode = value
+ if check.Types != nil {
+ check.recordBuiltinType(call.Fun, makeSig(Typ[Uintptr], x.typ))
+ }
+ } else {
+ x.mode = constant_
+ x.val = constant.MakeInt64(check.conf.sizeof(x.typ))
+ // result is constant - no need to record signature
+ }
+ x.typ = Typ[Uintptr]
+
+ case _Slice:
+ // unsafe.Slice(ptr *T, len IntegerType) []T
+ if !check.allowVersion(check.pkg, 1, 17) {
+ check.versionErrorf(call.Fun, "go1.17", "unsafe.Slice")
+ return
+ }
+
+ typ, _ := under(x.typ).(*Pointer)
+ if typ == nil {
+ check.errorf(x, invalidArg+"%s is not a pointer", x)
+ return
+ }
+
+ var y operand
+ arg(&y, 1)
+ if !check.isValidIndex(&y, "length", false) {
+ return
+ }
+
+ x.mode = value
+ x.typ = NewSlice(typ.base)
+ if check.Types != nil {
+ check.recordBuiltinType(call.Fun, makeSig(x.typ, typ, y.typ))
+ }
+
+ case _Assert:
+ // assert(pred) causes a typechecker error if pred is false.
+ // The result of assert is the value of pred if there is no error.
+ // Note: assert is only available in self-test mode.
+ if x.mode != constant_ || !isBoolean(x.typ) {
+ check.errorf(x, invalidArg+"%s is not a boolean constant", x)
+ return
+ }
+ if x.val.Kind() != constant.Bool {
+ check.errorf(x, "internal error: value of %s should be a boolean constant", x)
+ return
+ }
+ if !constant.BoolVal(x.val) {
+ check.errorf(call, "%v failed", call)
+ // compile-time assertion failure - safe to continue
+ }
+ // result is constant - no need to record signature
+
+ case _Trace:
+ // trace(x, y, z, ...) dumps the positions, expressions, and
+ // values of its arguments. The result of trace is the value
+ // of the first argument.
+ // Note: trace is only available in self-test mode.
+ // (no argument evaluated yet)
+ if nargs == 0 {
+ check.dump("%v: trace() without arguments", posFor(call))
+ x.mode = novalue
+ break
+ }
+ var t operand
+ x1 := x
+ for _, arg := range call.ArgList {
+ check.rawExpr(x1, arg, nil, false) // permit trace for types, e.g.: new(trace(T))
+ check.dump("%v: %s", posFor(x1), x1)
+ x1 = &t // use incoming x only for first argument
+ }
+ // trace is only available in test mode - no need to record signature
+
+ default:
+ unreachable()
+ }
+
+ return true
+}
+
+// hasVarSize reports if the size of type t is variable due to type parameters.
+func hasVarSize(t Type) bool {
+ switch u := under(t).(type) {
+ case *Array:
+ return hasVarSize(u.elem)
+ case *Struct:
+ for _, f := range u.fields {
+ if hasVarSize(f.typ) {
+ return true
+ }
+ }
+ case *Interface:
+ return isTypeParam(t)
+ case *Named, *Union:
+ unreachable()
+ }
+ return false
+}
+
+// applyTypeFunc applies f to x. If x is a type parameter,
+// the result is a type parameter constrained by an new
+// interface bound. The type bounds for that interface
+// are computed by applying f to each of the type bounds
+// of x. If any of these applications of f return nil,
+// applyTypeFunc returns nil.
+// If x is not a type parameter, the result is f(x).
+func (check *Checker) applyTypeFunc(f func(Type) Type, x *operand, id builtinId) Type {
+ if tp, _ := x.typ.(*TypeParam); tp != nil {
+ // Test if t satisfies the requirements for the argument
+ // type and collect possible result types at the same time.
+ var terms []*Term
+ if !tp.is(func(t *term) bool {
+ if t == nil {
+ return false
+ }
+ if r := f(t.typ); r != nil {
+ terms = append(terms, NewTerm(t.tilde, r))
+ return true
+ }
+ return false
+ }) {
+ return nil
+ }
+
+ // We can type-check this fine but we're introducing a synthetic
+ // type parameter for the result. It's not clear what the API
+ // implications are here. Report an error for 1.18 but continue
+ // type-checking.
+ check.softErrorf(x, "%s not supported as argument to %s for go1.18 (see issue #50937)", x, predeclaredFuncs[id].name)
+
+ // Construct a suitable new type parameter for the result type.
+ // The type parameter is placed in the current package so export/import
+ // works as expected.
+ tpar := NewTypeName(nopos, check.pkg, tp.obj.name, nil)
+ ptyp := check.newTypeParam(tpar, NewInterfaceType(nil, []Type{NewUnion(terms)})) // assigns type to tpar as a side-effect
+ ptyp.index = tp.index
+
+ return ptyp
+ }
+
+ return f(x.typ)
+}
+
+// makeSig makes a signature for the given argument and result types.
+// Default types are used for untyped arguments, and res may be nil.
+func makeSig(res Type, args ...Type) *Signature {
+ list := make([]*Var, len(args))
+ for i, param := range args {
+ list[i] = NewVar(nopos, nil, "", Default(param))
+ }
+ params := NewTuple(list...)
+ var result *Tuple
+ if res != nil {
+ assert(!isUntyped(res))
+ result = NewTuple(NewVar(nopos, nil, "", res))
+ }
+ return &Signature{params: params, results: result}
+}
+
+// arrayPtrDeref returns A if typ is of the form *A and A is an array;
+// otherwise it returns typ.
+func arrayPtrDeref(typ Type) Type {
+ if p, ok := typ.(*Pointer); ok {
+ if a, _ := under(p.base).(*Array); a != nil {
+ return a
+ }
+ }
+ return typ
+}
+
+// unparen returns e with any enclosing parentheses stripped.
+func unparen(e syntax.Expr) syntax.Expr {
+ for {
+ p, ok := e.(*syntax.ParenExpr)
+ if !ok {
+ return e
+ }
+ e = p.X
+ }
+}
diff --git a/src/cmd/compile/internal/types2/builtins_test.go b/src/cmd/compile/internal/types2/builtins_test.go
new file mode 100644
index 0000000..e07a779
--- /dev/null
+++ b/src/cmd/compile/internal/types2/builtins_test.go
@@ -0,0 +1,241 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2_test
+
+import (
+ "cmd/compile/internal/syntax"
+ "fmt"
+ "strings"
+ "testing"
+
+ . "cmd/compile/internal/types2"
+)
+
+var builtinCalls = []struct {
+ name, src, sig string
+}{
+ {"append", `var s []int; _ = append(s)`, `func([]int, ...int) []int`},
+ {"append", `var s []int; _ = append(s, 0)`, `func([]int, ...int) []int`},
+ {"append", `var s []int; _ = (append)(s, 0)`, `func([]int, ...int) []int`},
+ {"append", `var s []byte; _ = ((append))(s, 0)`, `func([]byte, ...byte) []byte`},
+ {"append", `var s []byte; _ = append(s, "foo"...)`, `func([]byte, string...) []byte`},
+ {"append", `type T []byte; var s T; var str string; _ = append(s, str...)`, `func(p.T, string...) p.T`},
+ {"append", `type T []byte; type U string; var s T; var str U; _ = append(s, str...)`, `func(p.T, p.U...) p.T`},
+
+ {"cap", `var s [10]int; _ = cap(s)`, `invalid type`}, // constant
+ {"cap", `var s [10]int; _ = cap(&s)`, `invalid type`}, // constant
+ {"cap", `var s []int64; _ = cap(s)`, `func([]int64) int`},
+ {"cap", `var c chan<-bool; _ = cap(c)`, `func(chan<- bool) int`},
+ {"cap", `type S []byte; var s S; _ = cap(s)`, `func(p.S) int`},
+ {"cap", `var s P; _ = cap(s)`, `func(P) int`},
+
+ {"len", `_ = len("foo")`, `invalid type`}, // constant
+ {"len", `var s string; _ = len(s)`, `func(string) int`},
+ {"len", `var s [10]int; _ = len(s)`, `invalid type`}, // constant
+ {"len", `var s [10]int; _ = len(&s)`, `invalid type`}, // constant
+ {"len", `var s []int64; _ = len(s)`, `func([]int64) int`},
+ {"len", `var c chan<-bool; _ = len(c)`, `func(chan<- bool) int`},
+ {"len", `var m map[string]float32; _ = len(m)`, `func(map[string]float32) int`},
+ {"len", `type S []byte; var s S; _ = len(s)`, `func(p.S) int`},
+ {"len", `var s P; _ = len(s)`, `func(P) int`},
+
+ {"close", `var c chan int; close(c)`, `func(chan int)`},
+ {"close", `var c chan<- chan string; close(c)`, `func(chan<- chan string)`},
+
+ {"complex", `_ = complex(1, 0)`, `invalid type`}, // constant
+ {"complex", `var re float32; _ = complex(re, 1.0)`, `func(float32, float32) complex64`},
+ {"complex", `var im float64; _ = complex(1, im)`, `func(float64, float64) complex128`},
+ {"complex", `type F32 float32; var re, im F32; _ = complex(re, im)`, `func(p.F32, p.F32) complex64`},
+ {"complex", `type F64 float64; var re, im F64; _ = complex(re, im)`, `func(p.F64, p.F64) complex128`},
+
+ {"copy", `var src, dst []byte; copy(dst, src)`, `func([]byte, []byte) int`},
+ {"copy", `type T [][]int; var src, dst T; _ = copy(dst, src)`, `func(p.T, p.T) int`},
+ {"copy", `var src string; var dst []byte; copy(dst, src)`, `func([]byte, string) int`},
+ {"copy", `type T string; type U []byte; var src T; var dst U; copy(dst, src)`, `func(p.U, p.T) int`},
+ {"copy", `var dst []byte; copy(dst, "hello")`, `func([]byte, string) int`},
+
+ {"delete", `var m map[string]bool; delete(m, "foo")`, `func(map[string]bool, string)`},
+ {"delete", `type (K string; V int); var m map[K]V; delete(m, "foo")`, `func(map[p.K]p.V, p.K)`},
+
+ {"imag", `_ = imag(1i)`, `invalid type`}, // constant
+ {"imag", `var c complex64; _ = imag(c)`, `func(complex64) float32`},
+ {"imag", `var c complex128; _ = imag(c)`, `func(complex128) float64`},
+ {"imag", `type C64 complex64; var c C64; _ = imag(c)`, `func(p.C64) float32`},
+ {"imag", `type C128 complex128; var c C128; _ = imag(c)`, `func(p.C128) float64`},
+
+ {"real", `_ = real(1i)`, `invalid type`}, // constant
+ {"real", `var c complex64; _ = real(c)`, `func(complex64) float32`},
+ {"real", `var c complex128; _ = real(c)`, `func(complex128) float64`},
+ {"real", `type C64 complex64; var c C64; _ = real(c)`, `func(p.C64) float32`},
+ {"real", `type C128 complex128; var c C128; _ = real(c)`, `func(p.C128) float64`},
+
+ {"make", `_ = make([]int, 10)`, `func([]int, int) []int`},
+ {"make", `type T []byte; _ = make(T, 10, 20)`, `func(p.T, int, int) p.T`},
+
+ // issue #37349
+ {"make", ` _ = make([]int, 0 )`, `func([]int, int) []int`},
+ {"make", `var l int; _ = make([]int, l )`, `func([]int, int) []int`},
+ {"make", ` _ = make([]int, 0, 0)`, `func([]int, int, int) []int`},
+ {"make", `var l int; _ = make([]int, l, 0)`, `func([]int, int, int) []int`},
+ {"make", `var c int; _ = make([]int, 0, c)`, `func([]int, int, int) []int`},
+ {"make", `var l, c int; _ = make([]int, l, c)`, `func([]int, int, int) []int`},
+
+ // issue #37393
+ {"make", ` _ = make([]int , 0 )`, `func([]int, int) []int`},
+ {"make", `var l byte ; _ = make([]int8 , l )`, `func([]int8, byte) []int8`},
+ {"make", ` _ = make([]int16 , 0, 0)`, `func([]int16, int, int) []int16`},
+ {"make", `var l int16; _ = make([]string , l, 0)`, `func([]string, int16, int) []string`},
+ {"make", `var c int32; _ = make([]float64 , 0, c)`, `func([]float64, int, int32) []float64`},
+ {"make", `var l, c uint ; _ = make([]complex128, l, c)`, `func([]complex128, uint, uint) []complex128`},
+
+ // issue #45667
+ {"make", `const l uint = 1; _ = make([]int, l)`, `func([]int, uint) []int`},
+
+ {"new", `_ = new(int)`, `func(int) *int`},
+ {"new", `type T struct{}; _ = new(T)`, `func(p.T) *p.T`},
+
+ {"panic", `panic(0)`, `func(interface{})`},
+ {"panic", `panic("foo")`, `func(interface{})`},
+
+ {"print", `print()`, `func()`},
+ {"print", `print(0)`, `func(int)`},
+ {"print", `print(1, 2.0, "foo", true)`, `func(int, float64, string, bool)`},
+
+ {"println", `println()`, `func()`},
+ {"println", `println(0)`, `func(int)`},
+ {"println", `println(1, 2.0, "foo", true)`, `func(int, float64, string, bool)`},
+
+ {"recover", `recover()`, `func() interface{}`},
+ {"recover", `_ = recover()`, `func() interface{}`},
+
+ {"Add", `var p unsafe.Pointer; _ = unsafe.Add(p, -1.0)`, `func(unsafe.Pointer, int) unsafe.Pointer`},
+ {"Add", `var p unsafe.Pointer; var n uintptr; _ = unsafe.Add(p, n)`, `func(unsafe.Pointer, uintptr) unsafe.Pointer`},
+ {"Add", `_ = unsafe.Add(nil, 0)`, `func(unsafe.Pointer, int) unsafe.Pointer`},
+
+ {"Alignof", `_ = unsafe.Alignof(0)`, `invalid type`}, // constant
+ {"Alignof", `var x struct{}; _ = unsafe.Alignof(x)`, `invalid type`}, // constant
+ {"Alignof", `var x P; _ = unsafe.Alignof(x)`, `func(P) uintptr`},
+
+ {"Offsetof", `var x struct{f bool}; _ = unsafe.Offsetof(x.f)`, `invalid type`}, // constant
+ {"Offsetof", `var x struct{_ int; f bool}; _ = unsafe.Offsetof((&x).f)`, `invalid type`}, // constant
+ {"Offsetof", `var x struct{_ int; f P}; _ = unsafe.Offsetof((&x).f)`, `func(P) uintptr`},
+
+ {"Sizeof", `_ = unsafe.Sizeof(0)`, `invalid type`}, // constant
+ {"Sizeof", `var x struct{}; _ = unsafe.Sizeof(x)`, `invalid type`}, // constant
+ {"Sizeof", `var x P; _ = unsafe.Sizeof(x)`, `func(P) uintptr`},
+
+ {"Slice", `var p *int; _ = unsafe.Slice(p, 1)`, `func(*int, int) []int`},
+ {"Slice", `var p *byte; var n uintptr; _ = unsafe.Slice(p, n)`, `func(*byte, uintptr) []byte`},
+
+ {"assert", `assert(true)`, `invalid type`}, // constant
+ {"assert", `type B bool; const pred B = 1 < 2; assert(pred)`, `invalid type`}, // constant
+
+ // no tests for trace since it produces output as a side-effect
+}
+
+func TestBuiltinSignatures(t *testing.T) {
+ DefPredeclaredTestFuncs()
+
+ seen := map[string]bool{"trace": true} // no test for trace built-in; add it manually
+ for _, call := range builtinCalls {
+ testBuiltinSignature(t, call.name, call.src, call.sig)
+ seen[call.name] = true
+ }
+
+ // make sure we didn't miss one
+ for _, name := range Universe.Names() {
+ if _, ok := Universe.Lookup(name).(*Builtin); ok && !seen[name] {
+ t.Errorf("missing test for %s", name)
+ }
+ }
+ for _, name := range Unsafe.Scope().Names() {
+ if _, ok := Unsafe.Scope().Lookup(name).(*Builtin); ok && !seen[name] {
+ t.Errorf("missing test for unsafe.%s", name)
+ }
+ }
+}
+
+func parseGenericSrc(path, src string) (*syntax.File, error) {
+ errh := func(error) {} // dummy error handler so that parsing continues in presence of errors
+ return syntax.Parse(syntax.NewFileBase(path), strings.NewReader(src), errh, nil, syntax.AllowGenerics)
+}
+
+func testBuiltinSignature(t *testing.T, name, src0, want string) {
+ src := fmt.Sprintf(`package p; import "unsafe"; type _ unsafe.Pointer /* use unsafe */; func _[P ~[]byte]() { %s }`, src0)
+ f, err := parseGenericSrc("", src)
+ if err != nil {
+ t.Errorf("%s: %s", src0, err)
+ return
+ }
+
+ conf := Config{Importer: defaultImporter()}
+ uses := make(map[*syntax.Name]Object)
+ types := make(map[syntax.Expr]TypeAndValue)
+ _, err = conf.Check(f.PkgName.Value, []*syntax.File{f}, &Info{Uses: uses, Types: types})
+ if err != nil {
+ t.Errorf("%s: %s", src0, err)
+ return
+ }
+
+ // find called function
+ n := 0
+ var fun syntax.Expr
+ for x := range types {
+ if call, _ := x.(*syntax.CallExpr); call != nil {
+ fun = call.Fun
+ n++
+ }
+ }
+ if n != 1 {
+ t.Errorf("%s: got %d CallExprs; want 1", src0, n)
+ return
+ }
+
+ // check recorded types for fun and descendents (may be parenthesized)
+ for {
+ // the recorded type for the built-in must match the wanted signature
+ typ := types[fun].Type
+ if typ == nil {
+ t.Errorf("%s: no type recorded for %s", src0, syntax.String(fun))
+ return
+ }
+ if got := typ.String(); got != want {
+ t.Errorf("%s: got type %s; want %s", src0, got, want)
+ return
+ }
+
+ // called function must be a (possibly parenthesized, qualified)
+ // identifier denoting the expected built-in
+ switch p := fun.(type) {
+ case *syntax.Name:
+ obj := uses[p]
+ if obj == nil {
+ t.Errorf("%s: no object found for %s", src0, p.Value)
+ return
+ }
+ bin, _ := obj.(*Builtin)
+ if bin == nil {
+ t.Errorf("%s: %s does not denote a built-in", src0, p.Value)
+ return
+ }
+ if bin.Name() != name {
+ t.Errorf("%s: got built-in %s; want %s", src0, bin.Name(), name)
+ return
+ }
+ return // we're done
+
+ case *syntax.ParenExpr:
+ fun = p.X // unpack
+
+ case *syntax.SelectorExpr:
+ // built-in from package unsafe - ignore details
+ return // we're done
+
+ default:
+ t.Errorf("%s: invalid function call", src0)
+ return
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/types2/call.go b/src/cmd/compile/internal/types2/call.go
new file mode 100644
index 0000000..6cc30a7
--- /dev/null
+++ b/src/cmd/compile/internal/types2/call.go
@@ -0,0 +1,701 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements typechecking of call and selector expressions.
+
+package types2
+
+import (
+ "cmd/compile/internal/syntax"
+ "strings"
+ "unicode"
+)
+
+// funcInst type-checks a function instantiation inst and returns the result in x.
+// The operand x must be the evaluation of inst.X and its type must be a signature.
+func (check *Checker) funcInst(x *operand, inst *syntax.IndexExpr) {
+ if !check.allowVersion(check.pkg, 1, 18) {
+ check.versionErrorf(inst.Pos(), "go1.18", "function instantiation")
+ }
+
+ xlist := unpackExpr(inst.Index)
+ targs := check.typeList(xlist)
+ if targs == nil {
+ x.mode = invalid
+ x.expr = inst
+ return
+ }
+ assert(len(targs) == len(xlist))
+
+ // check number of type arguments (got) vs number of type parameters (want)
+ sig := x.typ.(*Signature)
+ got, want := len(targs), sig.TypeParams().Len()
+ if !useConstraintTypeInference && got != want || got > want {
+ check.errorf(xlist[got-1], "got %d type arguments but want %d", got, want)
+ x.mode = invalid
+ x.expr = inst
+ return
+ }
+
+ if got < want {
+ targs = check.infer(inst.Pos(), sig.TypeParams().list(), targs, nil, nil)
+ if targs == nil {
+ // error was already reported
+ x.mode = invalid
+ x.expr = inst
+ return
+ }
+ got = len(targs)
+ }
+ assert(got == want)
+
+ // instantiate function signature
+ res := check.instantiateSignature(x.Pos(), sig, targs, xlist)
+ assert(res.TypeParams().Len() == 0) // signature is not generic anymore
+ check.recordInstance(inst.X, targs, res)
+ x.typ = res
+ x.mode = value
+ x.expr = inst
+}
+
+func (check *Checker) instantiateSignature(pos syntax.Pos, typ *Signature, targs []Type, xlist []syntax.Expr) (res *Signature) {
+ assert(check != nil)
+ assert(len(targs) == typ.TypeParams().Len())
+
+ if check.conf.Trace {
+ check.trace(pos, "-- instantiating %s with %s", typ, targs)
+ check.indent++
+ defer func() {
+ check.indent--
+ check.trace(pos, "=> %s (under = %s)", res, res.Underlying())
+ }()
+ }
+
+ inst := check.instance(pos, typ, targs, check.bestContext(nil)).(*Signature)
+ assert(len(xlist) <= len(targs))
+
+ // verify instantiation lazily (was issue #50450)
+ check.later(func() {
+ tparams := typ.TypeParams().list()
+ if i, err := check.verify(pos, tparams, targs); err != nil {
+ // best position for error reporting
+ pos := pos
+ if i < len(xlist) {
+ pos = syntax.StartPos(xlist[i])
+ }
+ check.softErrorf(pos, "%s", err)
+ } else {
+ check.mono.recordInstance(check.pkg, pos, tparams, targs, xlist)
+ }
+ })
+
+ return inst
+}
+
+func (check *Checker) callExpr(x *operand, call *syntax.CallExpr) exprKind {
+ var inst *syntax.IndexExpr // function instantiation, if any
+ if iexpr, _ := call.Fun.(*syntax.IndexExpr); iexpr != nil {
+ if check.indexExpr(x, iexpr) {
+ // Delay function instantiation to argument checking,
+ // where we combine type and value arguments for type
+ // inference.
+ assert(x.mode == value)
+ inst = iexpr
+ }
+ x.expr = iexpr
+ check.record(x)
+ } else {
+ check.exprOrType(x, call.Fun, true)
+ }
+ // x.typ may be generic
+
+ switch x.mode {
+ case invalid:
+ check.use(call.ArgList...)
+ x.expr = call
+ return statement
+
+ case typexpr:
+ // conversion
+ check.nonGeneric(x)
+ if x.mode == invalid {
+ return conversion
+ }
+ T := x.typ
+ x.mode = invalid
+ switch n := len(call.ArgList); n {
+ case 0:
+ check.errorf(call, "missing argument in conversion to %s", T)
+ case 1:
+ check.expr(x, call.ArgList[0])
+ if x.mode != invalid {
+ if t, _ := under(T).(*Interface); t != nil && !isTypeParam(T) {
+ if !t.IsMethodSet() {
+ check.errorf(call, "cannot use interface %s in conversion (contains specific type constraints or is comparable)", T)
+ break
+ }
+ }
+ if call.HasDots {
+ check.errorf(call.ArgList[0], "invalid use of ... in type conversion to %s", T)
+ break
+ }
+ check.conversion(x, T)
+ }
+ default:
+ check.use(call.ArgList...)
+ check.errorf(call.ArgList[n-1], "too many arguments in conversion to %s", T)
+ }
+ x.expr = call
+ return conversion
+
+ case builtin:
+ // no need to check for non-genericity here
+ id := x.id
+ if !check.builtin(x, call, id) {
+ x.mode = invalid
+ }
+ x.expr = call
+ // a non-constant result implies a function call
+ if x.mode != invalid && x.mode != constant_ {
+ check.hasCallOrRecv = true
+ }
+ return predeclaredFuncs[id].kind
+ }
+
+ // ordinary function/method call
+ // signature may be generic
+ cgocall := x.mode == cgofunc
+
+ // a type parameter may be "called" if all types have the same signature
+ sig, _ := coreType(x.typ).(*Signature)
+ if sig == nil {
+ check.errorf(x, invalidOp+"cannot call non-function %s", x)
+ x.mode = invalid
+ x.expr = call
+ return statement
+ }
+
+ // evaluate type arguments, if any
+ var xlist []syntax.Expr
+ var targs []Type
+ if inst != nil {
+ xlist = unpackExpr(inst.Index)
+ targs = check.typeList(xlist)
+ if targs == nil {
+ check.use(call.ArgList...)
+ x.mode = invalid
+ x.expr = call
+ return statement
+ }
+ assert(len(targs) == len(xlist))
+
+ // check number of type arguments (got) vs number of type parameters (want)
+ got, want := len(targs), sig.TypeParams().Len()
+ if got > want {
+ check.errorf(xlist[want], "got %d type arguments but want %d", got, want)
+ check.use(call.ArgList...)
+ x.mode = invalid
+ x.expr = call
+ return statement
+ }
+ }
+
+ // evaluate arguments
+ args, _ := check.exprList(call.ArgList, false)
+ isGeneric := sig.TypeParams().Len() > 0
+ sig = check.arguments(call, sig, targs, args, xlist)
+
+ if isGeneric && sig.TypeParams().Len() == 0 {
+ // update the recorded type of call.Fun to its instantiated type
+ check.recordTypeAndValue(call.Fun, value, sig, nil)
+ }
+
+ // determine result
+ switch sig.results.Len() {
+ case 0:
+ x.mode = novalue
+ case 1:
+ if cgocall {
+ x.mode = commaerr
+ } else {
+ x.mode = value
+ }
+ x.typ = sig.results.vars[0].typ // unpack tuple
+ default:
+ x.mode = value
+ x.typ = sig.results
+ }
+ x.expr = call
+ check.hasCallOrRecv = true
+
+ // if type inference failed, a parametrized result must be invalidated
+ // (operands cannot have a parametrized type)
+ if x.mode == value && sig.TypeParams().Len() > 0 && isParameterized(sig.TypeParams().list(), x.typ) {
+ x.mode = invalid
+ }
+
+ return statement
+}
+
+func (check *Checker) exprList(elist []syntax.Expr, allowCommaOk bool) (xlist []*operand, commaOk bool) {
+ switch len(elist) {
+ case 0:
+ // nothing to do
+
+ case 1:
+ // single (possibly comma-ok) value, or function returning multiple values
+ e := elist[0]
+ var x operand
+ check.multiExpr(&x, e)
+ if t, ok := x.typ.(*Tuple); ok && x.mode != invalid {
+ // multiple values
+ xlist = make([]*operand, t.Len())
+ for i, v := range t.vars {
+ xlist[i] = &operand{mode: value, expr: e, typ: v.typ}
+ }
+ break
+ }
+
+ // exactly one (possibly invalid or comma-ok) value
+ xlist = []*operand{&x}
+ if allowCommaOk && (x.mode == mapindex || x.mode == commaok || x.mode == commaerr) {
+ x.mode = value
+ xlist = append(xlist, &operand{mode: value, expr: e, typ: Typ[UntypedBool]})
+ commaOk = true
+ }
+
+ default:
+ // multiple (possibly invalid) values
+ xlist = make([]*operand, len(elist))
+ for i, e := range elist {
+ var x operand
+ check.expr(&x, e)
+ xlist[i] = &x
+ }
+ }
+
+ return
+}
+
+// xlist is the list of type argument expressions supplied in the source code.
+func (check *Checker) arguments(call *syntax.CallExpr, sig *Signature, targs []Type, args []*operand, xlist []syntax.Expr) (rsig *Signature) {
+ rsig = sig
+
+ // TODO(gri) try to eliminate this extra verification loop
+ for _, a := range args {
+ switch a.mode {
+ case typexpr:
+ check.errorf(a, "%s used as value", a)
+ return
+ case invalid:
+ return
+ }
+ }
+
+ // Function call argument/parameter count requirements
+ //
+ // | standard call | dotdotdot call |
+ // --------------+------------------+----------------+
+ // standard func | nargs == npars | invalid |
+ // --------------+------------------+----------------+
+ // variadic func | nargs >= npars-1 | nargs == npars |
+ // --------------+------------------+----------------+
+
+ nargs := len(args)
+ npars := sig.params.Len()
+ ddd := call.HasDots
+
+ // set up parameters
+ sigParams := sig.params // adjusted for variadic functions (may be nil for empty parameter lists!)
+ adjusted := false // indicates if sigParams is different from t.params
+ if sig.variadic {
+ if ddd {
+ // variadic_func(a, b, c...)
+ if len(call.ArgList) == 1 && nargs > 1 {
+ // f()... is not permitted if f() is multi-valued
+ //check.errorf(call.Ellipsis, "cannot use ... with %d-valued %s", nargs, call.ArgList[0])
+ check.errorf(call, "cannot use ... with %d-valued %s", nargs, call.ArgList[0])
+ return
+ }
+ } else {
+ // variadic_func(a, b, c)
+ if nargs >= npars-1 {
+ // Create custom parameters for arguments: keep
+ // the first npars-1 parameters and add one for
+ // each argument mapping to the ... parameter.
+ vars := make([]*Var, npars-1) // npars > 0 for variadic functions
+ copy(vars, sig.params.vars)
+ last := sig.params.vars[npars-1]
+ typ := last.typ.(*Slice).elem
+ for len(vars) < nargs {
+ vars = append(vars, NewParam(last.pos, last.pkg, last.name, typ))
+ }
+ sigParams = NewTuple(vars...) // possibly nil!
+ adjusted = true
+ npars = nargs
+ } else {
+ // nargs < npars-1
+ npars-- // for correct error message below
+ }
+ }
+ } else {
+ if ddd {
+ // standard_func(a, b, c...)
+ //check.errorf(call.Ellipsis, "cannot use ... in call to non-variadic %s", call.Fun)
+ check.errorf(call, "cannot use ... in call to non-variadic %s", call.Fun)
+ return
+ }
+ // standard_func(a, b, c)
+ }
+
+ // check argument count
+ if nargs != npars {
+ var at poser = call
+ qualifier := "not enough"
+ if nargs > npars {
+ at = args[npars].expr // report at first extra argument
+ qualifier = "too many"
+ } else if nargs > 0 {
+ at = args[nargs-1].expr // report at last argument
+ }
+ // take care of empty parameter lists represented by nil tuples
+ var params []*Var
+ if sig.params != nil {
+ params = sig.params.vars
+ }
+ var err error_
+ err.errorf(at, "%s arguments in call to %s", qualifier, call.Fun)
+ err.errorf(nopos, "have %s", check.typesSummary(operandTypes(args), false))
+ err.errorf(nopos, "want %s", check.typesSummary(varTypes(params), sig.variadic))
+ check.report(&err)
+ return
+ }
+
+ // infer type arguments and instantiate signature if necessary
+ if sig.TypeParams().Len() > 0 {
+ if !check.allowVersion(check.pkg, 1, 18) {
+ if iexpr, _ := call.Fun.(*syntax.IndexExpr); iexpr != nil {
+ check.versionErrorf(iexpr.Pos(), "go1.18", "function instantiation")
+ } else {
+ check.versionErrorf(call.Pos(), "go1.18", "implicit function instantiation")
+ }
+ }
+ targs := check.infer(call.Pos(), sig.TypeParams().list(), targs, sigParams, args)
+ if targs == nil {
+ return // error already reported
+ }
+
+ // compute result signature
+ rsig = check.instantiateSignature(call.Pos(), sig, targs, xlist)
+ assert(rsig.TypeParams().Len() == 0) // signature is not generic anymore
+ check.recordInstance(call.Fun, targs, rsig)
+
+ // Optimization: Only if the parameter list was adjusted do we
+ // need to compute it from the adjusted list; otherwise we can
+ // simply use the result signature's parameter list.
+ if adjusted {
+ sigParams = check.subst(call.Pos(), sigParams, makeSubstMap(sig.TypeParams().list(), targs), nil).(*Tuple)
+ } else {
+ sigParams = rsig.params
+ }
+ }
+
+ // check arguments
+ if len(args) > 0 {
+ context := check.sprintf("argument to %s", call.Fun)
+ for i, a := range args {
+ check.assignment(a, sigParams.vars[i].typ, context)
+ }
+ }
+
+ return
+}
+
+var cgoPrefixes = [...]string{
+ "_Ciconst_",
+ "_Cfconst_",
+ "_Csconst_",
+ "_Ctype_",
+ "_Cvar_", // actually a pointer to the var
+ "_Cfpvar_fp_",
+ "_Cfunc_",
+ "_Cmacro_", // function to evaluate the expanded expression
+}
+
+func (check *Checker) selector(x *operand, e *syntax.SelectorExpr, def *Named) {
+ // these must be declared before the "goto Error" statements
+ var (
+ obj Object
+ index []int
+ indirect bool
+ )
+
+ sel := e.Sel.Value
+ // If the identifier refers to a package, handle everything here
+ // so we don't need a "package" mode for operands: package names
+ // can only appear in qualified identifiers which are mapped to
+ // selector expressions.
+ if ident, ok := e.X.(*syntax.Name); ok {
+ obj := check.lookup(ident.Value)
+ if pname, _ := obj.(*PkgName); pname != nil {
+ assert(pname.pkg == check.pkg)
+ check.recordUse(ident, pname)
+ pname.used = true
+ pkg := pname.imported
+
+ var exp Object
+ funcMode := value
+ if pkg.cgo {
+ // cgo special cases C.malloc: it's
+ // rewritten to _CMalloc and does not
+ // support two-result calls.
+ if sel == "malloc" {
+ sel = "_CMalloc"
+ } else {
+ funcMode = cgofunc
+ }
+ for _, prefix := range cgoPrefixes {
+ // cgo objects are part of the current package (in file
+ // _cgo_gotypes.go). Use regular lookup.
+ _, exp = check.scope.LookupParent(prefix+sel, check.pos)
+ if exp != nil {
+ break
+ }
+ }
+ if exp == nil {
+ check.errorf(e.Sel, "%s not declared by package C", sel)
+ goto Error
+ }
+ check.objDecl(exp, nil)
+ } else {
+ exp = pkg.scope.Lookup(sel)
+ if exp == nil {
+ if !pkg.fake {
+ if check.conf.CompilerErrorMessages {
+ check.errorf(e.Sel, "undefined: %s.%s", pkg.name, sel)
+ } else {
+ check.errorf(e.Sel, "%s not declared by package %s", sel, pkg.name)
+ }
+ }
+ goto Error
+ }
+ if !exp.Exported() {
+ check.errorf(e.Sel, "%s not exported by package %s", sel, pkg.name)
+ // ok to continue
+ }
+ }
+ check.recordUse(e.Sel, exp)
+
+ // Simplified version of the code for *syntax.Names:
+ // - imported objects are always fully initialized
+ switch exp := exp.(type) {
+ case *Const:
+ assert(exp.Val() != nil)
+ x.mode = constant_
+ x.typ = exp.typ
+ x.val = exp.val
+ case *TypeName:
+ x.mode = typexpr
+ x.typ = exp.typ
+ case *Var:
+ x.mode = variable
+ x.typ = exp.typ
+ if pkg.cgo && strings.HasPrefix(exp.name, "_Cvar_") {
+ x.typ = x.typ.(*Pointer).base
+ }
+ case *Func:
+ x.mode = funcMode
+ x.typ = exp.typ
+ if pkg.cgo && strings.HasPrefix(exp.name, "_Cmacro_") {
+ x.mode = value
+ x.typ = x.typ.(*Signature).results.vars[0].typ
+ }
+ case *Builtin:
+ x.mode = builtin
+ x.typ = exp.typ
+ x.id = exp.id
+ default:
+ check.dump("%v: unexpected object %v", posFor(e.Sel), exp)
+ unreachable()
+ }
+ x.expr = e
+ return
+ }
+ }
+
+ check.exprOrType(x, e.X, false)
+ switch x.mode {
+ case typexpr:
+ // don't crash for "type T T.x" (was issue #51509)
+ if def != nil && x.typ == def {
+ check.cycleError([]Object{def.obj})
+ goto Error
+ }
+ case builtin:
+ check.errorf(e.Pos(), "cannot select on %s", x)
+ goto Error
+ case invalid:
+ goto Error
+ }
+
+ obj, index, indirect = LookupFieldOrMethod(x.typ, x.mode == variable, check.pkg, sel)
+ if obj == nil {
+ // Don't report another error if the underlying type was invalid (issue #49541).
+ if under(x.typ) == Typ[Invalid] {
+ goto Error
+ }
+
+ if index != nil {
+ // TODO(gri) should provide actual type where the conflict happens
+ check.errorf(e.Sel, "ambiguous selector %s.%s", x.expr, sel)
+ goto Error
+ }
+
+ if indirect {
+ check.errorf(e.Sel, "cannot call pointer method %s on %s", sel, x.typ)
+ goto Error
+ }
+
+ var why string
+ if isInterfacePtr(x.typ) {
+ why = check.interfacePtrError(x.typ)
+ } else {
+ why = check.sprintf("type %s has no field or method %s", x.typ, sel)
+ // Check if capitalization of sel matters and provide better error message in that case.
+ // TODO(gri) This code only looks at the first character but LookupFieldOrMethod has an
+ // (internal) mechanism for case-insensitive lookup. Should use that instead.
+ if len(sel) > 0 {
+ var changeCase string
+ if r := rune(sel[0]); unicode.IsUpper(r) {
+ changeCase = string(unicode.ToLower(r)) + sel[1:]
+ } else {
+ changeCase = string(unicode.ToUpper(r)) + sel[1:]
+ }
+ if obj, _, _ = LookupFieldOrMethod(x.typ, x.mode == variable, check.pkg, changeCase); obj != nil {
+ why += ", but does have " + changeCase
+ }
+ }
+ }
+ check.errorf(e.Sel, "%s.%s undefined (%s)", x.expr, sel, why)
+ goto Error
+ }
+
+ // methods may not have a fully set up signature yet
+ if m, _ := obj.(*Func); m != nil {
+ check.objDecl(m, nil)
+ }
+
+ if x.mode == typexpr {
+ // method expression
+ m, _ := obj.(*Func)
+ if m == nil {
+ // TODO(gri) should check if capitalization of sel matters and provide better error message in that case
+ check.errorf(e.Sel, "%s.%s undefined (type %s has no method %s)", x.expr, sel, x.typ, sel)
+ goto Error
+ }
+
+ check.recordSelection(e, MethodExpr, x.typ, m, index, indirect)
+
+ sig := m.typ.(*Signature)
+ if sig.recv == nil {
+ check.error(e, "illegal cycle in method declaration")
+ goto Error
+ }
+
+ // The receiver type becomes the type of the first function
+ // argument of the method expression's function type.
+ var params []*Var
+ if sig.params != nil {
+ params = sig.params.vars
+ }
+ // Be consistent about named/unnamed parameters. This is not needed
+ // for type-checking, but the newly constructed signature may appear
+ // in an error message and then have mixed named/unnamed parameters.
+ // (An alternative would be to not print parameter names in errors,
+ // but it's useful to see them; this is cheap and method expressions
+ // are rare.)
+ name := ""
+ if len(params) > 0 && params[0].name != "" {
+ // name needed
+ name = sig.recv.name
+ if name == "" {
+ name = "_"
+ }
+ }
+ params = append([]*Var{NewVar(sig.recv.pos, sig.recv.pkg, name, x.typ)}, params...)
+ x.mode = value
+ x.typ = &Signature{
+ tparams: sig.tparams,
+ params: NewTuple(params...),
+ results: sig.results,
+ variadic: sig.variadic,
+ }
+
+ check.addDeclDep(m)
+
+ } else {
+ // regular selector
+ switch obj := obj.(type) {
+ case *Var:
+ check.recordSelection(e, FieldVal, x.typ, obj, index, indirect)
+ if x.mode == variable || indirect {
+ x.mode = variable
+ } else {
+ x.mode = value
+ }
+ x.typ = obj.typ
+
+ case *Func:
+ // TODO(gri) If we needed to take into account the receiver's
+ // addressability, should we report the type &(x.typ) instead?
+ check.recordSelection(e, MethodVal, x.typ, obj, index, indirect)
+
+ x.mode = value
+
+ // remove receiver
+ sig := *obj.typ.(*Signature)
+ sig.recv = nil
+ x.typ = &sig
+
+ check.addDeclDep(obj)
+
+ default:
+ unreachable()
+ }
+ }
+
+ // everything went well
+ x.expr = e
+ return
+
+Error:
+ x.mode = invalid
+ x.expr = e
+}
+
+// use type-checks each argument.
+// Useful to make sure expressions are evaluated
+// (and variables are "used") in the presence of other errors.
+// The arguments may be nil.
+// TODO(gri) make this accept a []syntax.Expr and use an unpack function when we have a ListExpr?
+func (check *Checker) use(arg ...syntax.Expr) {
+ var x operand
+ for _, e := range arg {
+ switch n := e.(type) {
+ case nil:
+ // some AST fields may be nil (e.g., elements of syntax.SliceExpr.Index)
+ // TODO(gri) can those fields really make it here?
+ continue
+ case *syntax.Name:
+ // don't report an error evaluating blank
+ if n.Value == "_" {
+ continue
+ }
+ case *syntax.ListExpr:
+ check.use(n.ElemList...)
+ continue
+ }
+ check.rawExpr(&x, e, nil, false)
+ }
+}
diff --git a/src/cmd/compile/internal/types2/chan.go b/src/cmd/compile/internal/types2/chan.go
new file mode 100644
index 0000000..77650df
--- /dev/null
+++ b/src/cmd/compile/internal/types2/chan.go
@@ -0,0 +1,35 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+// A Chan represents a channel type.
+type Chan struct {
+ dir ChanDir
+ elem Type
+}
+
+// A ChanDir value indicates a channel direction.
+type ChanDir int
+
+// The direction of a channel is indicated by one of these constants.
+const (
+ SendRecv ChanDir = iota
+ SendOnly
+ RecvOnly
+)
+
+// NewChan returns a new channel type for the given direction and element type.
+func NewChan(dir ChanDir, elem Type) *Chan {
+ return &Chan{dir: dir, elem: elem}
+}
+
+// Dir returns the direction of channel c.
+func (c *Chan) Dir() ChanDir { return c.dir }
+
+// Elem returns the element type of channel c.
+func (c *Chan) Elem() Type { return c.elem }
+
+func (c *Chan) Underlying() Type { return c }
+func (c *Chan) String() string { return TypeString(c, nil) }
diff --git a/src/cmd/compile/internal/types2/check.go b/src/cmd/compile/internal/types2/check.go
new file mode 100644
index 0000000..4ec6a7b
--- /dev/null
+++ b/src/cmd/compile/internal/types2/check.go
@@ -0,0 +1,566 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements the Check function, which drives type-checking.
+
+package types2
+
+import (
+ "cmd/compile/internal/syntax"
+ "errors"
+ "fmt"
+ "go/constant"
+)
+
+var nopos syntax.Pos
+
+// debugging/development support
+const debug = false // leave on during development
+
+// exprInfo stores information about an untyped expression.
+type exprInfo struct {
+ isLhs bool // expression is lhs operand of a shift with delayed type-check
+ mode operandMode
+ typ *Basic
+ val constant.Value // constant value; or nil (if not a constant)
+}
+
+// An environment represents the environment within which an object is
+// type-checked.
+type environment struct {
+ decl *declInfo // package-level declaration whose init expression/function body is checked
+ scope *Scope // top-most scope for lookups
+ pos syntax.Pos // if valid, identifiers are looked up as if at position pos (used by Eval)
+ iota constant.Value // value of iota in a constant declaration; nil otherwise
+ errpos syntax.Pos // if valid, identifier position of a constant with inherited initializer
+ inTParamList bool // set if inside a type parameter list
+ sig *Signature // function signature if inside a function; nil otherwise
+ isPanic map[*syntax.CallExpr]bool // set of panic call expressions (used for termination check)
+ hasLabel bool // set if a function makes use of labels (only ~1% of functions); unused outside functions
+ hasCallOrRecv bool // set if an expression contains a function call or channel receive operation
+}
+
+// lookup looks up name in the current environment and returns the matching object, or nil.
+func (env *environment) lookup(name string) Object {
+ _, obj := env.scope.LookupParent(name, env.pos)
+ return obj
+}
+
+// An importKey identifies an imported package by import path and source directory
+// (directory containing the file containing the import). In practice, the directory
+// may always be the same, or may not matter. Given an (import path, directory), an
+// importer must always return the same package (but given two different import paths,
+// an importer may still return the same package by mapping them to the same package
+// paths).
+type importKey struct {
+ path, dir string
+}
+
+// A dotImportKey describes a dot-imported object in the given scope.
+type dotImportKey struct {
+ scope *Scope
+ name string
+}
+
+// An action describes a (delayed) action.
+type action struct {
+ f func() // action to be executed
+ desc *actionDesc // action description; may be nil, requires debug to be set
+}
+
+// If debug is set, describef sets a printf-formatted description for action a.
+// Otherwise, it is a no-op.
+func (a *action) describef(pos poser, format string, args ...interface{}) {
+ if debug {
+ a.desc = &actionDesc{pos, format, args}
+ }
+}
+
+// An actionDesc provides information on an action.
+// For debugging only.
+type actionDesc struct {
+ pos poser
+ format string
+ args []interface{}
+}
+
+// A Checker maintains the state of the type checker.
+// It must be created with NewChecker.
+type Checker struct {
+ // package information
+ // (initialized by NewChecker, valid for the life-time of checker)
+ conf *Config
+ ctxt *Context // context for de-duplicating instances
+ pkg *Package
+ *Info
+ version version // accepted language version
+ nextID uint64 // unique Id for type parameters (first valid Id is 1)
+ objMap map[Object]*declInfo // maps package-level objects and (non-interface) methods to declaration info
+ impMap map[importKey]*Package // maps (import path, source directory) to (complete or fake) package
+ infoMap map[*Named]typeInfo // maps named types to their associated type info (for cycle detection)
+
+ // pkgPathMap maps package names to the set of distinct import paths we've
+ // seen for that name, anywhere in the import graph. It is used for
+ // disambiguating package names in error messages.
+ //
+ // pkgPathMap is allocated lazily, so that we don't pay the price of building
+ // it on the happy path. seenPkgMap tracks the packages that we've already
+ // walked.
+ pkgPathMap map[string]map[string]bool
+ seenPkgMap map[*Package]bool
+
+ // information collected during type-checking of a set of package files
+ // (initialized by Files, valid only for the duration of check.Files;
+ // maps and lists are allocated on demand)
+ files []*syntax.File // list of package files
+ imports []*PkgName // list of imported packages
+ dotImportMap map[dotImportKey]*PkgName // maps dot-imported objects to the package they were dot-imported through
+ recvTParamMap map[*syntax.Name]*TypeParam // maps blank receiver type parameters to their type
+ brokenAliases map[*TypeName]bool // set of aliases with broken (not yet determined) types
+ unionTypeSets map[*Union]*_TypeSet // computed type sets for union types
+ mono monoGraph // graph for detecting non-monomorphizable instantiation loops
+
+ firstErr error // first error encountered
+ methods map[*TypeName][]*Func // maps package scope type names to associated non-blank (non-interface) methods
+ untyped map[syntax.Expr]exprInfo // map of expressions without final type
+ delayed []action // stack of delayed action segments; segments are processed in FIFO order
+ objPath []Object // path of object dependencies during type inference (for cycle reporting)
+ cleaners []cleaner // list of types that may need a final cleanup at the end of type-checking
+
+ // environment within which the current object is type-checked (valid only
+ // for the duration of type-checking a specific object)
+ environment
+
+ // debugging
+ indent int // indentation for tracing
+}
+
+// addDeclDep adds the dependency edge (check.decl -> to) if check.decl exists
+func (check *Checker) addDeclDep(to Object) {
+ from := check.decl
+ if from == nil {
+ return // not in a package-level init expression
+ }
+ if _, found := check.objMap[to]; !found {
+ return // to is not a package-level object
+ }
+ from.addDep(to)
+}
+
+// brokenAlias records that alias doesn't have a determined type yet.
+// It also sets alias.typ to Typ[Invalid].
+func (check *Checker) brokenAlias(alias *TypeName) {
+ if check.brokenAliases == nil {
+ check.brokenAliases = make(map[*TypeName]bool)
+ }
+ check.brokenAliases[alias] = true
+ alias.typ = Typ[Invalid]
+}
+
+// validAlias records that alias has the valid type typ (possibly Typ[Invalid]).
+func (check *Checker) validAlias(alias *TypeName, typ Type) {
+ delete(check.brokenAliases, alias)
+ alias.typ = typ
+}
+
+// isBrokenAlias reports whether alias doesn't have a determined type yet.
+func (check *Checker) isBrokenAlias(alias *TypeName) bool {
+ return alias.typ == Typ[Invalid] && check.brokenAliases[alias]
+}
+
+func (check *Checker) rememberUntyped(e syntax.Expr, lhs bool, mode operandMode, typ *Basic, val constant.Value) {
+ m := check.untyped
+ if m == nil {
+ m = make(map[syntax.Expr]exprInfo)
+ check.untyped = m
+ }
+ m[e] = exprInfo{lhs, mode, typ, val}
+}
+
+// later pushes f on to the stack of actions that will be processed later;
+// either at the end of the current statement, or in case of a local constant
+// or variable declaration, before the constant or variable is in scope
+// (so that f still sees the scope before any new declarations).
+// later returns the pushed action so one can provide a description
+// via action.describef for debugging, if desired.
+func (check *Checker) later(f func()) *action {
+ i := len(check.delayed)
+ check.delayed = append(check.delayed, action{f: f})
+ return &check.delayed[i]
+}
+
+// push pushes obj onto the object path and returns its index in the path.
+func (check *Checker) push(obj Object) int {
+ check.objPath = append(check.objPath, obj)
+ return len(check.objPath) - 1
+}
+
+// pop pops and returns the topmost object from the object path.
+func (check *Checker) pop() Object {
+ i := len(check.objPath) - 1
+ obj := check.objPath[i]
+ check.objPath[i] = nil
+ check.objPath = check.objPath[:i]
+ return obj
+}
+
+type cleaner interface {
+ cleanup()
+}
+
+// needsCleanup records objects/types that implement the cleanup method
+// which will be called at the end of type-checking.
+func (check *Checker) needsCleanup(c cleaner) {
+ check.cleaners = append(check.cleaners, c)
+}
+
+// NewChecker returns a new Checker instance for a given package.
+// Package files may be added incrementally via checker.Files.
+func NewChecker(conf *Config, pkg *Package, info *Info) *Checker {
+ // make sure we have a configuration
+ if conf == nil {
+ conf = new(Config)
+ }
+
+ // make sure we have an info struct
+ if info == nil {
+ info = new(Info)
+ }
+
+ version, err := parseGoVersion(conf.GoVersion)
+ if err != nil {
+ panic(fmt.Sprintf("invalid Go version %q (%v)", conf.GoVersion, err))
+ }
+
+ return &Checker{
+ conf: conf,
+ ctxt: conf.Context,
+ pkg: pkg,
+ Info: info,
+ version: version,
+ objMap: make(map[Object]*declInfo),
+ impMap: make(map[importKey]*Package),
+ infoMap: make(map[*Named]typeInfo),
+ }
+}
+
+// initFiles initializes the files-specific portion of checker.
+// The provided files must all belong to the same package.
+func (check *Checker) initFiles(files []*syntax.File) {
+ // start with a clean slate (check.Files may be called multiple times)
+ check.files = nil
+ check.imports = nil
+ check.dotImportMap = nil
+
+ check.firstErr = nil
+ check.methods = nil
+ check.untyped = nil
+ check.delayed = nil
+ check.objPath = nil
+ check.cleaners = nil
+
+ // determine package name and collect valid files
+ pkg := check.pkg
+ for _, file := range files {
+ switch name := file.PkgName.Value; pkg.name {
+ case "":
+ if name != "_" {
+ pkg.name = name
+ } else {
+ check.error(file.PkgName, "invalid package name _")
+ }
+ fallthrough
+
+ case name:
+ check.files = append(check.files, file)
+
+ default:
+ check.errorf(file, "package %s; expected %s", name, pkg.name)
+ // ignore this file
+ }
+ }
+}
+
+// A bailout panic is used for early termination.
+type bailout struct{}
+
+func (check *Checker) handleBailout(err *error) {
+ switch p := recover().(type) {
+ case nil, bailout:
+ // normal return or early exit
+ *err = check.firstErr
+ default:
+ // re-panic
+ panic(p)
+ }
+}
+
+// Files checks the provided files as part of the checker's package.
+func (check *Checker) Files(files []*syntax.File) error { return check.checkFiles(files) }
+
+var errBadCgo = errors.New("cannot use FakeImportC and go115UsesCgo together")
+
+func (check *Checker) checkFiles(files []*syntax.File) (err error) {
+ if check.conf.FakeImportC && check.conf.go115UsesCgo {
+ return errBadCgo
+ }
+
+ defer check.handleBailout(&err)
+
+ print := func(msg string) {
+ if check.conf.Trace {
+ fmt.Println()
+ fmt.Println(msg)
+ }
+ }
+
+ print("== initFiles ==")
+ check.initFiles(files)
+
+ print("== collectObjects ==")
+ check.collectObjects()
+
+ print("== packageObjects ==")
+ check.packageObjects()
+
+ print("== processDelayed ==")
+ check.processDelayed(0) // incl. all functions
+
+ print("== cleanup ==")
+ check.cleanup()
+
+ print("== initOrder ==")
+ check.initOrder()
+
+ if !check.conf.DisableUnusedImportCheck {
+ print("== unusedImports ==")
+ check.unusedImports()
+ }
+
+ print("== recordUntyped ==")
+ check.recordUntyped()
+
+ if check.firstErr == nil {
+ // TODO(mdempsky): Ensure monomorph is safe when errors exist.
+ check.monomorph()
+ }
+
+ check.pkg.complete = true
+
+ // no longer needed - release memory
+ check.imports = nil
+ check.dotImportMap = nil
+ check.pkgPathMap = nil
+ check.seenPkgMap = nil
+ check.recvTParamMap = nil
+ check.brokenAliases = nil
+ check.unionTypeSets = nil
+ check.ctxt = nil
+
+ // TODO(gri) There's more memory we should release at this point.
+
+ return
+}
+
+// processDelayed processes all delayed actions pushed after top.
+func (check *Checker) processDelayed(top int) {
+ // If each delayed action pushes a new action, the
+ // stack will continue to grow during this loop.
+ // However, it is only processing functions (which
+ // are processed in a delayed fashion) that may
+ // add more actions (such as nested functions), so
+ // this is a sufficiently bounded process.
+ for i := top; i < len(check.delayed); i++ {
+ a := &check.delayed[i]
+ if check.conf.Trace && a.desc != nil {
+ fmt.Println()
+ check.trace(a.desc.pos.Pos(), "-- "+a.desc.format, a.desc.args...)
+ }
+ a.f() // may append to check.delayed
+ }
+ assert(top <= len(check.delayed)) // stack must not have shrunk
+ check.delayed = check.delayed[:top]
+}
+
+// cleanup runs cleanup for all collected cleaners.
+func (check *Checker) cleanup() {
+ // Don't use a range clause since Named.cleanup may add more cleaners.
+ for i := 0; i < len(check.cleaners); i++ {
+ check.cleaners[i].cleanup()
+ }
+ check.cleaners = nil
+}
+
+func (check *Checker) record(x *operand) {
+ // convert x into a user-friendly set of values
+ // TODO(gri) this code can be simplified
+ var typ Type
+ var val constant.Value
+ switch x.mode {
+ case invalid:
+ typ = Typ[Invalid]
+ case novalue:
+ typ = (*Tuple)(nil)
+ case constant_:
+ typ = x.typ
+ val = x.val
+ default:
+ typ = x.typ
+ }
+ assert(x.expr != nil && typ != nil)
+
+ if isUntyped(typ) {
+ // delay type and value recording until we know the type
+ // or until the end of type checking
+ check.rememberUntyped(x.expr, false, x.mode, typ.(*Basic), val)
+ } else {
+ check.recordTypeAndValue(x.expr, x.mode, typ, val)
+ }
+}
+
+func (check *Checker) recordUntyped() {
+ if !debug && check.Types == nil {
+ return // nothing to do
+ }
+
+ for x, info := range check.untyped {
+ if debug && isTyped(info.typ) {
+ check.dump("%v: %s (type %s) is typed", posFor(x), x, info.typ)
+ unreachable()
+ }
+ check.recordTypeAndValue(x, info.mode, info.typ, info.val)
+ }
+}
+
+func (check *Checker) recordTypeAndValue(x syntax.Expr, mode operandMode, typ Type, val constant.Value) {
+ assert(x != nil)
+ assert(typ != nil)
+ if mode == invalid {
+ return // omit
+ }
+ if mode == constant_ {
+ assert(val != nil)
+ // We check allBasic(typ, IsConstType) here as constant expressions may be
+ // recorded as type parameters.
+ assert(typ == Typ[Invalid] || allBasic(typ, IsConstType))
+ }
+ if m := check.Types; m != nil {
+ m[x] = TypeAndValue{mode, typ, val}
+ }
+}
+
+func (check *Checker) recordBuiltinType(f syntax.Expr, sig *Signature) {
+ // f must be a (possibly parenthesized, possibly qualified)
+ // identifier denoting a built-in (including unsafe's non-constant
+ // functions Add and Slice): record the signature for f and possible
+ // children.
+ for {
+ check.recordTypeAndValue(f, builtin, sig, nil)
+ switch p := f.(type) {
+ case *syntax.Name, *syntax.SelectorExpr:
+ return // we're done
+ case *syntax.ParenExpr:
+ f = p.X
+ default:
+ unreachable()
+ }
+ }
+}
+
+func (check *Checker) recordCommaOkTypes(x syntax.Expr, a [2]Type) {
+ assert(x != nil)
+ if a[0] == nil || a[1] == nil {
+ return
+ }
+ assert(isTyped(a[0]) && isTyped(a[1]) && (isBoolean(a[1]) || a[1] == universeError))
+ if m := check.Types; m != nil {
+ for {
+ tv := m[x]
+ assert(tv.Type != nil) // should have been recorded already
+ pos := x.Pos()
+ tv.Type = NewTuple(
+ NewVar(pos, check.pkg, "", a[0]),
+ NewVar(pos, check.pkg, "", a[1]),
+ )
+ m[x] = tv
+ // if x is a parenthesized expression (p.X), update p.X
+ p, _ := x.(*syntax.ParenExpr)
+ if p == nil {
+ break
+ }
+ x = p.X
+ }
+ }
+}
+
+// recordInstance records instantiation information into check.Info, if the
+// Instances map is non-nil. The given expr must be an ident, selector, or
+// index (list) expr with ident or selector operand.
+//
+// TODO(rfindley): the expr parameter is fragile. See if we can access the
+// instantiated identifier in some other way.
+func (check *Checker) recordInstance(expr syntax.Expr, targs []Type, typ Type) {
+ ident := instantiatedIdent(expr)
+ assert(ident != nil)
+ assert(typ != nil)
+ if m := check.Instances; m != nil {
+ m[ident] = Instance{newTypeList(targs), typ}
+ }
+}
+
+func instantiatedIdent(expr syntax.Expr) *syntax.Name {
+ var selOrIdent syntax.Expr
+ switch e := expr.(type) {
+ case *syntax.IndexExpr:
+ selOrIdent = e.X
+ case *syntax.SelectorExpr, *syntax.Name:
+ selOrIdent = e
+ }
+ switch x := selOrIdent.(type) {
+ case *syntax.Name:
+ return x
+ case *syntax.SelectorExpr:
+ return x.Sel
+ }
+ panic("instantiated ident not found")
+}
+
+func (check *Checker) recordDef(id *syntax.Name, obj Object) {
+ assert(id != nil)
+ if m := check.Defs; m != nil {
+ m[id] = obj
+ }
+}
+
+func (check *Checker) recordUse(id *syntax.Name, obj Object) {
+ assert(id != nil)
+ assert(obj != nil)
+ if m := check.Uses; m != nil {
+ m[id] = obj
+ }
+}
+
+func (check *Checker) recordImplicit(node syntax.Node, obj Object) {
+ assert(node != nil)
+ assert(obj != nil)
+ if m := check.Implicits; m != nil {
+ m[node] = obj
+ }
+}
+
+func (check *Checker) recordSelection(x *syntax.SelectorExpr, kind SelectionKind, recv Type, obj Object, index []int, indirect bool) {
+ assert(obj != nil && (recv == nil || len(index) > 0))
+ check.recordUse(x.Sel, obj)
+ if m := check.Selections; m != nil {
+ m[x] = &Selection{kind, recv, obj, index, indirect}
+ }
+}
+
+func (check *Checker) recordScope(node syntax.Node, scope *Scope) {
+ assert(node != nil)
+ assert(scope != nil)
+ if m := check.Scopes; m != nil {
+ m[node] = scope
+ }
+}
diff --git a/src/cmd/compile/internal/types2/check_test.go b/src/cmd/compile/internal/types2/check_test.go
new file mode 100644
index 0000000..7efa512
--- /dev/null
+++ b/src/cmd/compile/internal/types2/check_test.go
@@ -0,0 +1,345 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements a typechecker test harness. The packages specified
+// in tests are typechecked. Error messages reported by the typechecker are
+// compared against the error messages expected in the test files.
+//
+// Expected errors are indicated in the test files by putting a comment
+// of the form /* ERROR "rx" */ immediately following an offending token.
+// The harness will verify that an error matching the regular expression
+// rx is reported at that source position. Consecutive comments may be
+// used to indicate multiple errors for the same token position.
+//
+// For instance, the following test file indicates that a "not declared"
+// error should be reported for the undeclared variable x:
+//
+// package p
+// func f() {
+// _ = x /* ERROR "not declared" */ + 1
+// }
+
+package types2_test
+
+import (
+ "cmd/compile/internal/syntax"
+ "flag"
+ "internal/buildcfg"
+ "internal/testenv"
+ "os"
+ "path/filepath"
+ "regexp"
+ "sort"
+ "strings"
+ "testing"
+
+ . "cmd/compile/internal/types2"
+)
+
+var (
+ haltOnError = flag.Bool("halt", false, "halt on error")
+ verifyErrors = flag.Bool("verify", false, "verify errors (rather than list them) in TestManual")
+ goVersion = flag.String("lang", "", "Go language version (e.g. \"go1.12\")")
+)
+
+func parseFiles(t *testing.T, filenames []string, mode syntax.Mode) ([]*syntax.File, []error) {
+ var files []*syntax.File
+ var errlist []error
+ errh := func(err error) { errlist = append(errlist, err) }
+ for _, filename := range filenames {
+ file, err := syntax.ParseFile(filename, errh, nil, mode)
+ if file == nil {
+ t.Fatalf("%s: %s", filename, err)
+ }
+ files = append(files, file)
+ }
+ return files, errlist
+}
+
+func unpackError(err error) syntax.Error {
+ switch err := err.(type) {
+ case syntax.Error:
+ return err
+ case Error:
+ return syntax.Error{Pos: err.Pos, Msg: err.Msg}
+ default:
+ return syntax.Error{Msg: err.Error()}
+ }
+}
+
+// delta returns the absolute difference between x and y.
+func delta(x, y uint) uint {
+ switch {
+ case x < y:
+ return y - x
+ case x > y:
+ return x - y
+ default:
+ return 0
+ }
+}
+
+// goVersionRx matches a Go version string using '_', e.g. "go1_12".
+var goVersionRx = regexp.MustCompile(`^go[1-9][0-9]*_(0|[1-9][0-9]*)$`)
+
+// asGoVersion returns a regular Go language version string
+// if s is a Go version string using '_' rather than '.' to
+// separate the major and minor version numbers (e.g. "go1_12").
+// Otherwise it returns the empty string.
+func asGoVersion(s string) string {
+ if goVersionRx.MatchString(s) {
+ return strings.Replace(s, "_", ".", 1)
+ }
+ return ""
+}
+
+// excludedForUnifiedBuild lists files that cannot be tested
+// when using the unified build's export data.
+// TODO(gri) enable as soon as the unified build supports this.
+var excludedForUnifiedBuild = map[string]bool{
+ "issue47818.go2": true,
+ "issue49705.go2": true,
+}
+
+func testFiles(t *testing.T, filenames []string, colDelta uint, manual bool) {
+ if len(filenames) == 0 {
+ t.Fatal("no source files")
+ }
+
+ if buildcfg.Experiment.Unified {
+ for _, f := range filenames {
+ if excludedForUnifiedBuild[filepath.Base(f)] {
+ t.Logf("%s cannot be tested with unified build - skipped", f)
+ return
+ }
+ }
+ }
+
+ var mode syntax.Mode
+ if strings.HasSuffix(filenames[0], ".go2") || manual {
+ mode |= syntax.AllowGenerics | syntax.AllowMethodTypeParams
+ }
+ // parse files and collect parser errors
+ files, errlist := parseFiles(t, filenames, mode)
+
+ pkgName := "<no package>"
+ if len(files) > 0 {
+ pkgName = files[0].PkgName.Value
+ }
+
+ // if no Go version is given, consider the package name
+ goVersion := *goVersion
+ if goVersion == "" {
+ goVersion = asGoVersion(pkgName)
+ }
+
+ listErrors := manual && !*verifyErrors
+ if listErrors && len(errlist) > 0 {
+ t.Errorf("--- %s:", pkgName)
+ for _, err := range errlist {
+ t.Error(err)
+ }
+ }
+
+ // typecheck and collect typechecker errors
+ var conf Config
+ conf.GoVersion = goVersion
+ // special case for importC.src
+ if len(filenames) == 1 && strings.HasSuffix(filenames[0], "importC.src") {
+ conf.FakeImportC = true
+ }
+ conf.Trace = manual && testing.Verbose()
+ conf.Importer = defaultImporter()
+ conf.Error = func(err error) {
+ if *haltOnError {
+ defer panic(err)
+ }
+ if listErrors {
+ t.Error(err)
+ return
+ }
+ errlist = append(errlist, err)
+ }
+ conf.Check(pkgName, files, nil)
+
+ if listErrors {
+ return
+ }
+
+ // sort errlist in source order
+ sort.Slice(errlist, func(i, j int) bool {
+ pi := unpackError(errlist[i]).Pos
+ pj := unpackError(errlist[j]).Pos
+ return pi.Cmp(pj) < 0
+ })
+
+ // collect expected errors
+ errmap := make(map[string]map[uint][]syntax.Error)
+ for _, filename := range filenames {
+ f, err := os.Open(filename)
+ if err != nil {
+ t.Error(err)
+ continue
+ }
+ if m := syntax.ErrorMap(f); len(m) > 0 {
+ errmap[filename] = m
+ }
+ f.Close()
+ }
+
+ // match against found errors
+ for _, err := range errlist {
+ got := unpackError(err)
+
+ // find list of errors for the respective error line
+ filename := got.Pos.Base().Filename()
+ filemap := errmap[filename]
+ line := got.Pos.Line()
+ var list []syntax.Error
+ if filemap != nil {
+ list = filemap[line]
+ }
+ // list may be nil
+
+ // one of errors in list should match the current error
+ index := -1 // list index of matching message, if any
+ for i, want := range list {
+ rx, err := regexp.Compile(want.Msg)
+ if err != nil {
+ t.Errorf("%s:%d:%d: %v", filename, line, want.Pos.Col(), err)
+ continue
+ }
+ if rx.MatchString(got.Msg) {
+ index = i
+ break
+ }
+ }
+ if index < 0 {
+ t.Errorf("%s: no error expected: %q", got.Pos, got.Msg)
+ continue
+ }
+
+ // column position must be within expected colDelta
+ want := list[index]
+ if delta(got.Pos.Col(), want.Pos.Col()) > colDelta {
+ t.Errorf("%s: got col = %d; want %d", got.Pos, got.Pos.Col(), want.Pos.Col())
+ }
+
+ // eliminate from list
+ if n := len(list) - 1; n > 0 {
+ // not the last entry - slide entries down (don't reorder)
+ copy(list[index:], list[index+1:])
+ filemap[line] = list[:n]
+ } else {
+ // last entry - remove list from filemap
+ delete(filemap, line)
+ }
+
+ // if filemap is empty, eliminate from errmap
+ if len(filemap) == 0 {
+ delete(errmap, filename)
+ }
+ }
+
+ // there should be no expected errors left
+ if len(errmap) > 0 {
+ t.Errorf("--- %s: unreported errors:", pkgName)
+ for filename, filemap := range errmap {
+ for line, list := range filemap {
+ for _, err := range list {
+ t.Errorf("%s:%d:%d: %s", filename, line, err.Pos.Col(), err.Msg)
+ }
+ }
+ }
+ }
+}
+
+// TestManual is for manual testing of a package - either provided
+// as a list of filenames belonging to the package, or a directory
+// name containing the package files - after the test arguments
+// (and a separating "--"). For instance, to test the package made
+// of the files foo.go and bar.go, use:
+//
+// go test -run Manual -- foo.go bar.go
+//
+// If no source arguments are provided, the file testdata/manual.go2
+// is used instead.
+// Provide the -verify flag to verify errors against ERROR comments
+// in the input files rather than having a list of errors reported.
+// The accepted Go language version can be controlled with the -lang
+// flag.
+func TestManual(t *testing.T) {
+ testenv.MustHaveGoBuild(t)
+
+ filenames := flag.Args()
+ if len(filenames) == 0 {
+ filenames = []string{filepath.FromSlash("testdata/manual.go2")}
+ }
+
+ info, err := os.Stat(filenames[0])
+ if err != nil {
+ t.Fatalf("TestManual: %v", err)
+ }
+
+ DefPredeclaredTestFuncs()
+ if info.IsDir() {
+ if len(filenames) > 1 {
+ t.Fatal("TestManual: must have only one directory argument")
+ }
+ testDir(t, filenames[0], 0, true)
+ } else {
+ testFiles(t, filenames, 0, true)
+ }
+}
+
+// TODO(gri) go/types has extra TestLongConstants and TestIndexRepresentability tests
+
+func TestCheck(t *testing.T) { DefPredeclaredTestFuncs(); testDirFiles(t, "testdata/check", 55, false) } // TODO(gri) narrow column tolerance
+func TestSpec(t *testing.T) { DefPredeclaredTestFuncs(); testDirFiles(t, "testdata/spec", 0, false) }
+func TestExamples(t *testing.T) { testDirFiles(t, "testdata/examples", 0, false) }
+func TestFixedbugs(t *testing.T) {
+ DefPredeclaredTestFuncs()
+ testDirFiles(t, "testdata/fixedbugs", 0, false)
+}
+
+func testDirFiles(t *testing.T, dir string, colDelta uint, manual bool) {
+ testenv.MustHaveGoBuild(t)
+ dir = filepath.FromSlash(dir)
+
+ fis, err := os.ReadDir(dir)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ for _, fi := range fis {
+ path := filepath.Join(dir, fi.Name())
+
+ // If fi is a directory, its files make up a single package.
+ if fi.IsDir() {
+ testDir(t, path, colDelta, manual)
+ } else {
+ t.Run(filepath.Base(path), func(t *testing.T) {
+ testFiles(t, []string{path}, colDelta, manual)
+ })
+ }
+ }
+}
+
+func testDir(t *testing.T, dir string, colDelta uint, manual bool) {
+ fis, err := os.ReadDir(dir)
+ if err != nil {
+ t.Error(err)
+ return
+ }
+
+ var filenames []string
+ for _, fi := range fis {
+ filenames = append(filenames, filepath.Join(dir, fi.Name()))
+ }
+
+ t.Run(filepath.Base(dir), func(t *testing.T) {
+ testFiles(t, filenames, colDelta, manual)
+ })
+}
diff --git a/src/cmd/compile/internal/types2/compilersupport.go b/src/cmd/compile/internal/types2/compilersupport.go
new file mode 100644
index 0000000..33dd8e8
--- /dev/null
+++ b/src/cmd/compile/internal/types2/compilersupport.go
@@ -0,0 +1,30 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Helper functions exported for the compiler.
+// Do not use internally.
+
+package types2
+
+// If t is a pointer, AsPointer returns that type, otherwise it returns nil.
+func AsPointer(t Type) *Pointer {
+ u, _ := t.Underlying().(*Pointer)
+ return u
+}
+
+// If t is a signature, AsSignature returns that type, otherwise it returns nil.
+func AsSignature(t Type) *Signature {
+ u, _ := t.Underlying().(*Signature)
+ return u
+}
+
+// If typ is a type parameter, CoreType returns the single underlying
+// type of all types in the corresponding type constraint if it exists, or
+// nil otherwise. If the type set contains only unrestricted and restricted
+// channel types (with identical element types), the single underlying type
+// is the restricted channel type if the restrictions are always the same.
+// If typ is not a type parameter, CoreType returns the underlying type.
+func CoreType(t Type) Type {
+ return coreType(t)
+}
diff --git a/src/cmd/compile/internal/types2/context.go b/src/cmd/compile/internal/types2/context.go
new file mode 100644
index 0000000..2d790fe
--- /dev/null
+++ b/src/cmd/compile/internal/types2/context.go
@@ -0,0 +1,124 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+import (
+ "bytes"
+ "fmt"
+ "strconv"
+ "strings"
+ "sync"
+)
+
+// A Context is an opaque type checking context. It may be used to share
+// identical type instances across type-checked packages or calls to
+// Instantiate. Contexts are safe for concurrent use.
+//
+// The use of a shared context does not guarantee that identical instances are
+// deduplicated in all cases.
+type Context struct {
+ mu sync.Mutex
+ typeMap map[string][]ctxtEntry // type hash -> instances entries
+ nextID int // next unique ID
+ originIDs map[Type]int // origin type -> unique ID
+}
+
+type ctxtEntry struct {
+ orig Type
+ targs []Type
+ instance Type // = orig[targs]
+}
+
+// NewContext creates a new Context.
+func NewContext() *Context {
+ return &Context{
+ typeMap: make(map[string][]ctxtEntry),
+ originIDs: make(map[Type]int),
+ }
+}
+
+// instanceHash returns a string representation of typ instantiated with targs.
+// The hash should be a perfect hash, though out of caution the type checker
+// does not assume this. The result is guaranteed to not contain blanks.
+func (ctxt *Context) instanceHash(orig Type, targs []Type) string {
+ assert(ctxt != nil)
+ assert(orig != nil)
+ var buf bytes.Buffer
+
+ h := newTypeHasher(&buf, ctxt)
+ h.string(strconv.Itoa(ctxt.getID(orig)))
+ // Because we've already written the unique origin ID this call to h.typ is
+ // unnecessary, but we leave it for hash readability. It can be removed later
+ // if performance is an issue.
+ h.typ(orig)
+ if len(targs) > 0 {
+ // TODO(rfindley): consider asserting on isGeneric(typ) here, if and when
+ // isGeneric handles *Signature types.
+ h.typeList(targs)
+ }
+
+ return strings.Replace(buf.String(), " ", "#", -1) // ReplaceAll is not available in Go1.4
+}
+
+// lookup returns an existing instantiation of orig with targs, if it exists.
+// Otherwise, it returns nil.
+func (ctxt *Context) lookup(h string, orig Type, targs []Type) Type {
+ ctxt.mu.Lock()
+ defer ctxt.mu.Unlock()
+
+ for _, e := range ctxt.typeMap[h] {
+ if identicalInstance(orig, targs, e.orig, e.targs) {
+ return e.instance
+ }
+ if debug {
+ // Panic during development to surface any imperfections in our hash.
+ panic(fmt.Sprintf("non-identical instances: (orig: %s, targs: %v) and %s", orig, targs, e.instance))
+ }
+ }
+
+ return nil
+}
+
+// update de-duplicates n against previously seen types with the hash h. If an
+// identical type is found with the type hash h, the previously seen type is
+// returned. Otherwise, n is returned, and recorded in the Context for the hash
+// h.
+func (ctxt *Context) update(h string, orig Type, targs []Type, inst Type) Type {
+ assert(inst != nil)
+
+ ctxt.mu.Lock()
+ defer ctxt.mu.Unlock()
+
+ for _, e := range ctxt.typeMap[h] {
+ if inst == nil || Identical(inst, e.instance) {
+ return e.instance
+ }
+ if debug {
+ // Panic during development to surface any imperfections in our hash.
+ panic(fmt.Sprintf("%s and %s are not identical", inst, e.instance))
+ }
+ }
+
+ ctxt.typeMap[h] = append(ctxt.typeMap[h], ctxtEntry{
+ orig: orig,
+ targs: targs,
+ instance: inst,
+ })
+
+ return inst
+}
+
+// getID returns a unique ID for the type t.
+func (ctxt *Context) getID(t Type) int {
+ ctxt.mu.Lock()
+ defer ctxt.mu.Unlock()
+ id, ok := ctxt.originIDs[t]
+ if !ok {
+ id = ctxt.nextID
+ ctxt.originIDs[t] = id
+ ctxt.nextID++
+ }
+ return id
+}
diff --git a/src/cmd/compile/internal/types2/context_test.go b/src/cmd/compile/internal/types2/context_test.go
new file mode 100644
index 0000000..aa649b1
--- /dev/null
+++ b/src/cmd/compile/internal/types2/context_test.go
@@ -0,0 +1,69 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+import (
+ "testing"
+)
+
+func TestContextHashCollisions(t *testing.T) {
+ if debug {
+ t.Skip("hash collisions are expected, and would fail debug assertions")
+ }
+ // Unit test the de-duplication fall-back logic in Context.
+ //
+ // We can't test this via Instantiate because this is only a fall-back in
+ // case our hash is imperfect.
+ //
+ // These lookups and updates use reasonable looking types in an attempt to
+ // make them robust to internal type assertions, but could equally well use
+ // arbitrary types.
+
+ // Create some distinct origin types. nullaryP and nullaryQ have no
+ // parameters and are identical (but have different type parameter names).
+ // unaryP has a parameter.
+ var nullaryP, nullaryQ, unaryP Type
+ {
+ // type nullaryP = func[P any]()
+ tparam := NewTypeParam(NewTypeName(nopos, nil, "P", nil), &emptyInterface)
+ nullaryP = NewSignatureType(nil, nil, []*TypeParam{tparam}, nil, nil, false)
+ }
+ {
+ // type nullaryQ = func[Q any]()
+ tparam := NewTypeParam(NewTypeName(nopos, nil, "Q", nil), &emptyInterface)
+ nullaryQ = NewSignatureType(nil, nil, []*TypeParam{tparam}, nil, nil, false)
+ }
+ {
+ // type unaryP = func[P any](_ P)
+ tparam := NewTypeParam(NewTypeName(nopos, nil, "P", nil), &emptyInterface)
+ params := NewTuple(NewVar(nopos, nil, "_", tparam))
+ unaryP = NewSignatureType(nil, nil, []*TypeParam{tparam}, params, nil, false)
+ }
+
+ ctxt := NewContext()
+
+ // Update the context with an instantiation of nullaryP.
+ inst := NewSignatureType(nil, nil, nil, nil, nil, false)
+ if got := ctxt.update("", nullaryP, []Type{Typ[Int]}, inst); got != inst {
+ t.Error("bad")
+ }
+
+ // unaryP is not identical to nullaryP, so we should not get inst when
+ // instantiated with identical type arguments.
+ if got := ctxt.lookup("", unaryP, []Type{Typ[Int]}); got != nil {
+ t.Error("bad")
+ }
+
+ // nullaryQ is identical to nullaryP, so we *should* get inst when
+ // instantiated with identical type arguments.
+ if got := ctxt.lookup("", nullaryQ, []Type{Typ[Int]}); got != inst {
+ t.Error("bad")
+ }
+
+ // ...but verify we don't get inst with different type arguments.
+ if got := ctxt.lookup("", nullaryQ, []Type{Typ[String]}); got != nil {
+ t.Error("bad")
+ }
+}
diff --git a/src/cmd/compile/internal/types2/conversions.go b/src/cmd/compile/internal/types2/conversions.go
new file mode 100644
index 0000000..08b3cbf
--- /dev/null
+++ b/src/cmd/compile/internal/types2/conversions.go
@@ -0,0 +1,299 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements typechecking of conversions.
+
+package types2
+
+import (
+ "fmt"
+ "go/constant"
+ "unicode"
+)
+
+// Conversion type-checks the conversion T(x).
+// The result is in x.
+func (check *Checker) conversion(x *operand, T Type) {
+ constArg := x.mode == constant_
+
+ constConvertibleTo := func(T Type, val *constant.Value) bool {
+ switch t, _ := under(T).(*Basic); {
+ case t == nil:
+ // nothing to do
+ case representableConst(x.val, check, t, val):
+ return true
+ case isInteger(x.typ) && isString(t):
+ codepoint := unicode.ReplacementChar
+ if i, ok := constant.Uint64Val(x.val); ok && i <= unicode.MaxRune {
+ codepoint = rune(i)
+ }
+ if val != nil {
+ *val = constant.MakeString(string(codepoint))
+ }
+ return true
+ }
+ return false
+ }
+
+ var ok bool
+ var cause string
+ switch {
+ case constArg && isConstType(T):
+ // constant conversion
+ ok = constConvertibleTo(T, &x.val)
+ case constArg && isTypeParam(T):
+ // x is convertible to T if it is convertible
+ // to each specific type in the type set of T.
+ // If T's type set is empty, or if it doesn't
+ // have specific types, constant x cannot be
+ // converted.
+ ok = T.(*TypeParam).underIs(func(u Type) bool {
+ // u is nil if there are no specific type terms
+ if u == nil {
+ cause = check.sprintf("%s does not contain specific types", T)
+ return false
+ }
+ if isString(x.typ) && isBytesOrRunes(u) {
+ return true
+ }
+ if !constConvertibleTo(u, nil) {
+ cause = check.sprintf("cannot convert %s to %s (in %s)", x, u, T)
+ return false
+ }
+ return true
+ })
+ x.mode = value // type parameters are not constants
+ case x.convertibleTo(check, T, &cause):
+ // non-constant conversion
+ ok = true
+ x.mode = value
+ }
+
+ if !ok {
+ var err error_
+ if check.conf.CompilerErrorMessages {
+ if cause != "" {
+ // Add colon at end of line if we have a following cause.
+ err.errorf(x, "cannot convert %s to type %s:", x, T)
+ err.errorf(nopos, cause)
+ } else {
+ err.errorf(x, "cannot convert %s to type %s", x, T)
+ }
+ } else {
+ err.errorf(x, "cannot convert %s to %s", x, T)
+ if cause != "" {
+ err.errorf(nopos, cause)
+ }
+ }
+ check.report(&err)
+ x.mode = invalid
+ return
+ }
+
+ // The conversion argument types are final. For untyped values the
+ // conversion provides the type, per the spec: "A constant may be
+ // given a type explicitly by a constant declaration or conversion,...".
+ if isUntyped(x.typ) {
+ final := T
+ // - For conversions to interfaces, except for untyped nil arguments,
+ // use the argument's default type.
+ // - For conversions of untyped constants to non-constant types, also
+ // use the default type (e.g., []byte("foo") should report string
+ // not []byte as type for the constant "foo").
+ // - For constant integer to string conversions, keep the argument type.
+ // (See also the TODO below.)
+ if x.typ == Typ[UntypedNil] {
+ // ok
+ } else if IsInterface(T) && !isTypeParam(T) || constArg && !isConstType(T) {
+ final = Default(x.typ)
+ } else if x.mode == constant_ && isInteger(x.typ) && allString(T) {
+ final = x.typ
+ }
+ check.updateExprType(x.expr, final, true)
+ }
+
+ x.typ = T
+}
+
+// TODO(gri) convertibleTo checks if T(x) is valid. It assumes that the type
+// of x is fully known, but that's not the case for say string(1<<s + 1.0):
+// Here, the type of 1<<s + 1.0 will be UntypedFloat which will lead to the
+// (correct!) refusal of the conversion. But the reported error is essentially
+// "cannot convert untyped float value to string", yet the correct error (per
+// the spec) is that we cannot shift a floating-point value: 1 in 1<<s should
+// be converted to UntypedFloat because of the addition of 1.0. Fixing this
+// is tricky because we'd have to run updateExprType on the argument first.
+// (Issue #21982.)
+
+// convertibleTo reports whether T(x) is valid. In the failure case, *cause
+// may be set to the cause for the failure.
+// The check parameter may be nil if convertibleTo is invoked through an
+// exported API call, i.e., when all methods have been type-checked.
+func (x *operand) convertibleTo(check *Checker, T Type, cause *string) bool {
+ // "x is assignable to T"
+ if ok, _ := x.assignableTo(check, T, cause); ok {
+ return true
+ }
+
+ // "V and T have identical underlying types if tags are ignored
+ // and V and T are not type parameters"
+ V := x.typ
+ Vu := under(V)
+ Tu := under(T)
+ Vp, _ := V.(*TypeParam)
+ Tp, _ := T.(*TypeParam)
+ if IdenticalIgnoreTags(Vu, Tu) && Vp == nil && Tp == nil {
+ return true
+ }
+
+ // "V and T are unnamed pointer types and their pointer base types
+ // have identical underlying types if tags are ignored
+ // and their pointer base types are not type parameters"
+ if V, ok := V.(*Pointer); ok {
+ if T, ok := T.(*Pointer); ok {
+ if IdenticalIgnoreTags(under(V.base), under(T.base)) && !isTypeParam(V.base) && !isTypeParam(T.base) {
+ return true
+ }
+ }
+ }
+
+ // "V and T are both integer or floating point types"
+ if isIntegerOrFloat(Vu) && isIntegerOrFloat(Tu) {
+ return true
+ }
+
+ // "V and T are both complex types"
+ if isComplex(Vu) && isComplex(Tu) {
+ return true
+ }
+
+ // "V is an integer or a slice of bytes or runes and T is a string type"
+ if (isInteger(Vu) || isBytesOrRunes(Vu)) && isString(Tu) {
+ return true
+ }
+
+ // "V is a string and T is a slice of bytes or runes"
+ if isString(Vu) && isBytesOrRunes(Tu) {
+ return true
+ }
+
+ // package unsafe:
+ // "any pointer or value of underlying type uintptr can be converted into a unsafe.Pointer"
+ if (isPointer(Vu) || isUintptr(Vu)) && isUnsafePointer(Tu) {
+ return true
+ }
+ // "and vice versa"
+ if isUnsafePointer(Vu) && (isPointer(Tu) || isUintptr(Tu)) {
+ return true
+ }
+
+ // "V a slice, T is a pointer-to-array type,
+ // and the slice and array types have identical element types."
+ if s, _ := Vu.(*Slice); s != nil {
+ if p, _ := Tu.(*Pointer); p != nil {
+ if a, _ := under(p.Elem()).(*Array); a != nil {
+ if Identical(s.Elem(), a.Elem()) {
+ if check == nil || check.allowVersion(check.pkg, 1, 17) {
+ return true
+ }
+ // check != nil
+ if cause != nil {
+ *cause = "conversion of slices to array pointers requires go1.17 or later"
+ if check.conf.CompilerErrorMessages {
+ *cause += fmt.Sprintf(" (-lang was set to %s; check go.mod)", check.conf.GoVersion)
+ }
+ }
+ return false
+ }
+ }
+ }
+ }
+
+ // optimization: if we don't have type parameters, we're done
+ if Vp == nil && Tp == nil {
+ return false
+ }
+
+ errorf := func(format string, args ...interface{}) {
+ if check != nil && cause != nil {
+ msg := check.sprintf(format, args...)
+ if *cause != "" {
+ msg += "\n\t" + *cause
+ }
+ *cause = msg
+ }
+ }
+
+ // generic cases with specific type terms
+ // (generic operands cannot be constants, so we can ignore x.val)
+ switch {
+ case Vp != nil && Tp != nil:
+ x := *x // don't clobber outer x
+ return Vp.is(func(V *term) bool {
+ if V == nil {
+ return false // no specific types
+ }
+ x.typ = V.typ
+ return Tp.is(func(T *term) bool {
+ if T == nil {
+ return false // no specific types
+ }
+ if !x.convertibleTo(check, T.typ, cause) {
+ errorf("cannot convert %s (in %s) to %s (in %s)", V.typ, Vp, T.typ, Tp)
+ return false
+ }
+ return true
+ })
+ })
+ case Vp != nil:
+ x := *x // don't clobber outer x
+ return Vp.is(func(V *term) bool {
+ if V == nil {
+ return false // no specific types
+ }
+ x.typ = V.typ
+ if !x.convertibleTo(check, T, cause) {
+ errorf("cannot convert %s (in %s) to %s", V.typ, Vp, T)
+ return false
+ }
+ return true
+ })
+ case Tp != nil:
+ return Tp.is(func(T *term) bool {
+ if T == nil {
+ return false // no specific types
+ }
+ if !x.convertibleTo(check, T.typ, cause) {
+ errorf("cannot convert %s to %s (in %s)", x.typ, T.typ, Tp)
+ return false
+ }
+ return true
+ })
+ }
+
+ return false
+}
+
+func isUintptr(typ Type) bool {
+ t, _ := under(typ).(*Basic)
+ return t != nil && t.kind == Uintptr
+}
+
+func isUnsafePointer(typ Type) bool {
+ t, _ := under(typ).(*Basic)
+ return t != nil && t.kind == UnsafePointer
+}
+
+func isPointer(typ Type) bool {
+ _, ok := under(typ).(*Pointer)
+ return ok
+}
+
+func isBytesOrRunes(typ Type) bool {
+ if s, _ := under(typ).(*Slice); s != nil {
+ t, _ := under(s.elem).(*Basic)
+ return t != nil && (t.kind == Byte || t.kind == Rune)
+ }
+ return false
+}
diff --git a/src/cmd/compile/internal/types2/decl.go b/src/cmd/compile/internal/types2/decl.go
new file mode 100644
index 0000000..fc03155
--- /dev/null
+++ b/src/cmd/compile/internal/types2/decl.go
@@ -0,0 +1,880 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+import (
+ "cmd/compile/internal/syntax"
+ "fmt"
+ "go/constant"
+)
+
+func (err *error_) recordAltDecl(obj Object) {
+ if pos := obj.Pos(); pos.IsKnown() {
+ // We use "other" rather than "previous" here because
+ // the first declaration seen may not be textually
+ // earlier in the source.
+ err.errorf(pos, "other declaration of %s", obj.Name())
+ }
+}
+
+func (check *Checker) declare(scope *Scope, id *syntax.Name, obj Object, pos syntax.Pos) {
+ // spec: "The blank identifier, represented by the underscore
+ // character _, may be used in a declaration like any other
+ // identifier but the declaration does not introduce a new
+ // binding."
+ if obj.Name() != "_" {
+ if alt := scope.Insert(obj); alt != nil {
+ var err error_
+ err.errorf(obj, "%s redeclared in this block", obj.Name())
+ err.recordAltDecl(alt)
+ check.report(&err)
+ return
+ }
+ obj.setScopePos(pos)
+ }
+ if id != nil {
+ check.recordDef(id, obj)
+ }
+}
+
+// pathString returns a string of the form a->b-> ... ->g for a path [a, b, ... g].
+func pathString(path []Object) string {
+ var s string
+ for i, p := range path {
+ if i > 0 {
+ s += "->"
+ }
+ s += p.Name()
+ }
+ return s
+}
+
+// objDecl type-checks the declaration of obj in its respective (file) environment.
+// For the meaning of def, see Checker.definedType, in typexpr.go.
+func (check *Checker) objDecl(obj Object, def *Named) {
+ if check.conf.Trace && obj.Type() == nil {
+ if check.indent == 0 {
+ fmt.Println() // empty line between top-level objects for readability
+ }
+ check.trace(obj.Pos(), "-- checking %s (%s, objPath = %s)", obj, obj.color(), pathString(check.objPath))
+ check.indent++
+ defer func() {
+ check.indent--
+ check.trace(obj.Pos(), "=> %s (%s)", obj, obj.color())
+ }()
+ }
+
+ // Checking the declaration of obj means inferring its type
+ // (and possibly its value, for constants).
+ // An object's type (and thus the object) may be in one of
+ // three states which are expressed by colors:
+ //
+ // - an object whose type is not yet known is painted white (initial color)
+ // - an object whose type is in the process of being inferred is painted grey
+ // - an object whose type is fully inferred is painted black
+ //
+ // During type inference, an object's color changes from white to grey
+ // to black (pre-declared objects are painted black from the start).
+ // A black object (i.e., its type) can only depend on (refer to) other black
+ // ones. White and grey objects may depend on white and black objects.
+ // A dependency on a grey object indicates a cycle which may or may not be
+ // valid.
+ //
+ // When objects turn grey, they are pushed on the object path (a stack);
+ // they are popped again when they turn black. Thus, if a grey object (a
+ // cycle) is encountered, it is on the object path, and all the objects
+ // it depends on are the remaining objects on that path. Color encoding
+ // is such that the color value of a grey object indicates the index of
+ // that object in the object path.
+
+ // During type-checking, white objects may be assigned a type without
+ // traversing through objDecl; e.g., when initializing constants and
+ // variables. Update the colors of those objects here (rather than
+ // everywhere where we set the type) to satisfy the color invariants.
+ if obj.color() == white && obj.Type() != nil {
+ obj.setColor(black)
+ return
+ }
+
+ switch obj.color() {
+ case white:
+ assert(obj.Type() == nil)
+ // All color values other than white and black are considered grey.
+ // Because black and white are < grey, all values >= grey are grey.
+ // Use those values to encode the object's index into the object path.
+ obj.setColor(grey + color(check.push(obj)))
+ defer func() {
+ check.pop().setColor(black)
+ }()
+
+ case black:
+ assert(obj.Type() != nil)
+ return
+
+ default:
+ // Color values other than white or black are considered grey.
+ fallthrough
+
+ case grey:
+ // We have a (possibly invalid) cycle.
+ // In the existing code, this is marked by a non-nil type
+ // for the object except for constants and variables whose
+ // type may be non-nil (known), or nil if it depends on the
+ // not-yet known initialization value.
+ // In the former case, set the type to Typ[Invalid] because
+ // we have an initialization cycle. The cycle error will be
+ // reported later, when determining initialization order.
+ // TODO(gri) Report cycle here and simplify initialization
+ // order code.
+ switch obj := obj.(type) {
+ case *Const:
+ if !check.validCycle(obj) || obj.typ == nil {
+ obj.typ = Typ[Invalid]
+ }
+
+ case *Var:
+ if !check.validCycle(obj) || obj.typ == nil {
+ obj.typ = Typ[Invalid]
+ }
+
+ case *TypeName:
+ if !check.validCycle(obj) {
+ // break cycle
+ // (without this, calling underlying()
+ // below may lead to an endless loop
+ // if we have a cycle for a defined
+ // (*Named) type)
+ obj.typ = Typ[Invalid]
+ }
+
+ case *Func:
+ if !check.validCycle(obj) {
+ // Don't set obj.typ to Typ[Invalid] here
+ // because plenty of code type-asserts that
+ // functions have a *Signature type. Grey
+ // functions have their type set to an empty
+ // signature which makes it impossible to
+ // initialize a variable with the function.
+ }
+
+ default:
+ unreachable()
+ }
+ assert(obj.Type() != nil)
+ return
+ }
+
+ d := check.objMap[obj]
+ if d == nil {
+ check.dump("%v: %s should have been declared", obj.Pos(), obj)
+ unreachable()
+ }
+
+ // save/restore current environment and set up object environment
+ defer func(env environment) {
+ check.environment = env
+ }(check.environment)
+ check.environment = environment{
+ scope: d.file,
+ }
+
+ // Const and var declarations must not have initialization
+ // cycles. We track them by remembering the current declaration
+ // in check.decl. Initialization expressions depending on other
+ // consts, vars, or functions, add dependencies to the current
+ // check.decl.
+ switch obj := obj.(type) {
+ case *Const:
+ check.decl = d // new package-level const decl
+ check.constDecl(obj, d.vtyp, d.init, d.inherited)
+ case *Var:
+ check.decl = d // new package-level var decl
+ check.varDecl(obj, d.lhs, d.vtyp, d.init)
+ case *TypeName:
+ // invalid recursive types are detected via path
+ check.typeDecl(obj, d.tdecl, def)
+ check.collectMethods(obj) // methods can only be added to top-level types
+ case *Func:
+ // functions may be recursive - no need to track dependencies
+ check.funcDecl(obj, d)
+ default:
+ unreachable()
+ }
+}
+
+// validCycle reports whether the cycle starting with obj is valid and
+// reports an error if it is not.
+func (check *Checker) validCycle(obj Object) (valid bool) {
+ // The object map contains the package scope objects and the non-interface methods.
+ if debug {
+ info := check.objMap[obj]
+ inObjMap := info != nil && (info.fdecl == nil || info.fdecl.Recv == nil) // exclude methods
+ isPkgObj := obj.Parent() == check.pkg.scope
+ if isPkgObj != inObjMap {
+ check.dump("%v: inconsistent object map for %s (isPkgObj = %v, inObjMap = %v)", obj.Pos(), obj, isPkgObj, inObjMap)
+ unreachable()
+ }
+ }
+
+ // Count cycle objects.
+ assert(obj.color() >= grey)
+ start := obj.color() - grey // index of obj in objPath
+ cycle := check.objPath[start:]
+ tparCycle := false // if set, the cycle is through a type parameter list
+ nval := 0 // number of (constant or variable) values in the cycle; valid if !generic
+ ndef := 0 // number of type definitions in the cycle; valid if !generic
+loop:
+ for _, obj := range cycle {
+ switch obj := obj.(type) {
+ case *Const, *Var:
+ nval++
+ case *TypeName:
+ // If we reach a generic type that is part of a cycle
+ // and we are in a type parameter list, we have a cycle
+ // through a type parameter list, which is invalid.
+ if check.inTParamList && isGeneric(obj.typ) {
+ tparCycle = true
+ break loop
+ }
+
+ // Determine if the type name is an alias or not. For
+ // package-level objects, use the object map which
+ // provides syntactic information (which doesn't rely
+ // on the order in which the objects are set up). For
+ // local objects, we can rely on the order, so use
+ // the object's predicate.
+ // TODO(gri) It would be less fragile to always access
+ // the syntactic information. We should consider storing
+ // this information explicitly in the object.
+ var alias bool
+ if d := check.objMap[obj]; d != nil {
+ alias = d.tdecl.Alias // package-level object
+ } else {
+ alias = obj.IsAlias() // function local object
+ }
+ if !alias {
+ ndef++
+ }
+ case *Func:
+ // ignored for now
+ default:
+ unreachable()
+ }
+ }
+
+ if check.conf.Trace {
+ check.trace(obj.Pos(), "## cycle detected: objPath = %s->%s (len = %d)", pathString(cycle), obj.Name(), len(cycle))
+ if tparCycle {
+ check.trace(obj.Pos(), "## cycle contains: generic type in a type parameter list")
+ } else {
+ check.trace(obj.Pos(), "## cycle contains: %d values, %d type definitions", nval, ndef)
+ }
+ defer func() {
+ if valid {
+ check.trace(obj.Pos(), "=> cycle is valid")
+ } else {
+ check.trace(obj.Pos(), "=> error: cycle is invalid")
+ }
+ }()
+ }
+
+ if !tparCycle {
+ // A cycle involving only constants and variables is invalid but we
+ // ignore them here because they are reported via the initialization
+ // cycle check.
+ if nval == len(cycle) {
+ return true
+ }
+
+ // A cycle involving only types (and possibly functions) must have at least
+ // one type definition to be permitted: If there is no type definition, we
+ // have a sequence of alias type names which will expand ad infinitum.
+ if nval == 0 && ndef > 0 {
+ return true
+ }
+ }
+
+ check.cycleError(cycle)
+ return false
+}
+
+// cycleError reports a declaration cycle starting with
+// the object in cycle that is "first" in the source.
+func (check *Checker) cycleError(cycle []Object) {
+ // TODO(gri) Should we start with the last (rather than the first) object in the cycle
+ // since that is the earliest point in the source where we start seeing the
+ // cycle? That would be more consistent with other error messages.
+ i := firstInSrc(cycle)
+ obj := cycle[i]
+ // If obj is a type alias, mark it as valid (not broken) in order to avoid follow-on errors.
+ tname, _ := obj.(*TypeName)
+ if tname != nil && tname.IsAlias() {
+ check.validAlias(tname, Typ[Invalid])
+ }
+ var err error_
+ if tname != nil && check.conf.CompilerErrorMessages {
+ err.errorf(obj, "invalid recursive type %s", obj.Name())
+ } else {
+ err.errorf(obj, "illegal cycle in declaration of %s", obj.Name())
+ }
+ for range cycle {
+ err.errorf(obj, "%s refers to", obj.Name())
+ i++
+ if i >= len(cycle) {
+ i = 0
+ }
+ obj = cycle[i]
+ }
+ err.errorf(obj, "%s", obj.Name())
+ check.report(&err)
+}
+
+// firstInSrc reports the index of the object with the "smallest"
+// source position in path. path must not be empty.
+func firstInSrc(path []Object) int {
+ fst, pos := 0, path[0].Pos()
+ for i, t := range path[1:] {
+ if t.Pos().Cmp(pos) < 0 {
+ fst, pos = i+1, t.Pos()
+ }
+ }
+ return fst
+}
+
+func (check *Checker) constDecl(obj *Const, typ, init syntax.Expr, inherited bool) {
+ assert(obj.typ == nil)
+
+ // use the correct value of iota and errpos
+ defer func(iota constant.Value, errpos syntax.Pos) {
+ check.iota = iota
+ check.errpos = errpos
+ }(check.iota, check.errpos)
+ check.iota = obj.val
+ check.errpos = nopos
+
+ // provide valid constant value under all circumstances
+ obj.val = constant.MakeUnknown()
+
+ // determine type, if any
+ if typ != nil {
+ t := check.typ(typ)
+ if !isConstType(t) {
+ // don't report an error if the type is an invalid C (defined) type
+ // (issue #22090)
+ if under(t) != Typ[Invalid] {
+ check.errorf(typ, "invalid constant type %s", t)
+ }
+ obj.typ = Typ[Invalid]
+ return
+ }
+ obj.typ = t
+ }
+
+ // check initialization
+ var x operand
+ if init != nil {
+ if inherited {
+ // The initialization expression is inherited from a previous
+ // constant declaration, and (error) positions refer to that
+ // expression and not the current constant declaration. Use
+ // the constant identifier position for any errors during
+ // init expression evaluation since that is all we have
+ // (see issues #42991, #42992).
+ check.errpos = obj.pos
+ }
+ check.expr(&x, init)
+ }
+ check.initConst(obj, &x)
+}
+
+func (check *Checker) varDecl(obj *Var, lhs []*Var, typ, init syntax.Expr) {
+ assert(obj.typ == nil)
+
+ // If we have undefined variable types due to errors,
+ // mark variables as used to avoid follow-on errors.
+ // Matches compiler behavior.
+ defer func() {
+ if obj.typ == Typ[Invalid] {
+ obj.used = true
+ }
+ for _, lhs := range lhs {
+ if lhs.typ == Typ[Invalid] {
+ lhs.used = true
+ }
+ }
+ }()
+
+ // determine type, if any
+ if typ != nil {
+ obj.typ = check.varType(typ)
+ // We cannot spread the type to all lhs variables if there
+ // are more than one since that would mark them as checked
+ // (see Checker.objDecl) and the assignment of init exprs,
+ // if any, would not be checked.
+ //
+ // TODO(gri) If we have no init expr, we should distribute
+ // a given type otherwise we need to re-evalate the type
+ // expr for each lhs variable, leading to duplicate work.
+ }
+
+ // check initialization
+ if init == nil {
+ if typ == nil {
+ // error reported before by arityMatch
+ obj.typ = Typ[Invalid]
+ }
+ return
+ }
+
+ if lhs == nil || len(lhs) == 1 {
+ assert(lhs == nil || lhs[0] == obj)
+ var x operand
+ check.expr(&x, init)
+ check.initVar(obj, &x, "variable declaration")
+ return
+ }
+
+ if debug {
+ // obj must be one of lhs
+ found := false
+ for _, lhs := range lhs {
+ if obj == lhs {
+ found = true
+ break
+ }
+ }
+ if !found {
+ panic("inconsistent lhs")
+ }
+ }
+
+ // We have multiple variables on the lhs and one init expr.
+ // Make sure all variables have been given the same type if
+ // one was specified, otherwise they assume the type of the
+ // init expression values (was issue #15755).
+ if typ != nil {
+ for _, lhs := range lhs {
+ lhs.typ = obj.typ
+ }
+ }
+
+ check.initVars(lhs, []syntax.Expr{init}, nil)
+}
+
+// isImportedConstraint reports whether typ is an imported type constraint.
+func (check *Checker) isImportedConstraint(typ Type) bool {
+ named, _ := typ.(*Named)
+ if named == nil || named.obj.pkg == check.pkg || named.obj.pkg == nil {
+ return false
+ }
+ u, _ := named.under().(*Interface)
+ return u != nil && !u.IsMethodSet()
+}
+
+func (check *Checker) typeDecl(obj *TypeName, tdecl *syntax.TypeDecl, def *Named) {
+ assert(obj.typ == nil)
+
+ var rhs Type
+ check.later(func() {
+ if t, _ := obj.typ.(*Named); t != nil { // type may be invalid
+ check.validType(t)
+ }
+ // If typ is local, an error was already reported where typ is specified/defined.
+ if check.isImportedConstraint(rhs) && !check.allowVersion(check.pkg, 1, 18) {
+ check.versionErrorf(tdecl.Type, "go1.18", "using type constraint %s", rhs)
+ }
+ }).describef(obj, "validType(%s)", obj.Name())
+
+ alias := tdecl.Alias
+ if alias && tdecl.TParamList != nil {
+ // The parser will ensure this but we may still get an invalid AST.
+ // Complain and continue as regular type definition.
+ check.error(tdecl, "generic type cannot be alias")
+ alias = false
+ }
+
+ // alias declaration
+ if alias {
+ if !check.allowVersion(check.pkg, 1, 9) {
+ check.versionErrorf(tdecl, "go1.9", "type aliases")
+ }
+
+ check.brokenAlias(obj)
+ rhs = check.varType(tdecl.Type)
+ check.validAlias(obj, rhs)
+ return
+ }
+
+ // type definition or generic type declaration
+ named := check.newNamed(obj, nil, nil, nil, nil)
+ def.setUnderlying(named)
+
+ if tdecl.TParamList != nil {
+ check.openScope(tdecl, "type parameters")
+ defer check.closeScope()
+ check.collectTypeParams(&named.tparams, tdecl.TParamList)
+ }
+
+ // determine underlying type of named
+ rhs = check.definedType(tdecl.Type, named)
+ assert(rhs != nil)
+ named.fromRHS = rhs
+
+ // If the underlying was not set while type-checking the right-hand side, it
+ // is invalid and an error should have been reported elsewhere.
+ if named.underlying == nil {
+ named.underlying = Typ[Invalid]
+ }
+
+ // Disallow a lone type parameter as the RHS of a type declaration (issue #45639).
+ // We don't need this restriction anymore if we make the underlying type of a type
+ // parameter its constraint interface: if the RHS is a lone type parameter, we will
+ // use its underlying type (like we do for any RHS in a type declaration), and its
+ // underlying type is an interface and the type declaration is well defined.
+ if isTypeParam(rhs) {
+ check.error(tdecl.Type, "cannot use a type parameter as RHS in type declaration")
+ named.underlying = Typ[Invalid]
+ }
+}
+
+func (check *Checker) collectTypeParams(dst **TypeParamList, list []*syntax.Field) {
+ tparams := make([]*TypeParam, len(list))
+
+ // Declare type parameters up-front.
+ // The scope of type parameters starts at the beginning of the type parameter
+ // list (so we can have mutually recursive parameterized type bounds).
+ for i, f := range list {
+ tparams[i] = check.declareTypeParam(f.Name)
+ }
+
+ // Set the type parameters before collecting the type constraints because
+ // the parameterized type may be used by the constraints (issue #47887).
+ // Example: type T[P T[P]] interface{}
+ *dst = bindTParams(tparams)
+
+ // Signal to cycle detection that we are in a type parameter list.
+ // We can only be inside one type parameter list at any given time:
+ // function closures may appear inside a type parameter list but they
+ // cannot be generic, and their bodies are processed in delayed and
+ // sequential fashion. Note that with each new declaration, we save
+ // the existing environment and restore it when done; thus inTParamList
+ // is true exactly only when we are in a specific type parameter list.
+ assert(!check.inTParamList)
+ check.inTParamList = true
+ defer func() {
+ check.inTParamList = false
+ }()
+
+ // Keep track of bounds for later validation.
+ var bound Type
+ for i, f := range list {
+ // Optimization: Re-use the previous type bound if it hasn't changed.
+ // This also preserves the grouped output of type parameter lists
+ // when printing type strings.
+ if i == 0 || f.Type != list[i-1].Type {
+ bound = check.bound(f.Type)
+ if isTypeParam(bound) {
+ // We may be able to allow this since it is now well-defined what
+ // the underlying type and thus type set of a type parameter is.
+ // But we may need some additional form of cycle detection within
+ // type parameter lists.
+ check.error(f.Type, "cannot use a type parameter as constraint")
+ bound = Typ[Invalid]
+ }
+ }
+ tparams[i].bound = bound
+ }
+}
+
+func (check *Checker) bound(x syntax.Expr) Type {
+ // A type set literal of the form ~T and A|B may only appear as constraint;
+ // embed it in an implicit interface so that only interface type-checking
+ // needs to take care of such type expressions.
+ if op, _ := x.(*syntax.Operation); op != nil && (op.Op == syntax.Tilde || op.Op == syntax.Or) {
+ t := check.typ(&syntax.InterfaceType{MethodList: []*syntax.Field{{Type: x}}})
+ // mark t as implicit interface if all went well
+ if t, _ := t.(*Interface); t != nil {
+ t.implicit = true
+ }
+ return t
+ }
+ return check.typ(x)
+}
+
+func (check *Checker) declareTypeParam(name *syntax.Name) *TypeParam {
+ // Use Typ[Invalid] for the type constraint to ensure that a type
+ // is present even if the actual constraint has not been assigned
+ // yet.
+ // TODO(gri) Need to systematically review all uses of type parameter
+ // constraints to make sure we don't rely on them if they
+ // are not properly set yet.
+ tname := NewTypeName(name.Pos(), check.pkg, name.Value, nil)
+ tpar := check.newTypeParam(tname, Typ[Invalid]) // assigns type to tname as a side-effect
+ check.declare(check.scope, name, tname, check.scope.pos) // TODO(gri) check scope position
+ return tpar
+}
+
+func (check *Checker) collectMethods(obj *TypeName) {
+ // get associated methods
+ // (Checker.collectObjects only collects methods with non-blank names;
+ // Checker.resolveBaseTypeName ensures that obj is not an alias name
+ // if it has attached methods.)
+ methods := check.methods[obj]
+ if methods == nil {
+ return
+ }
+ delete(check.methods, obj)
+ assert(!check.objMap[obj].tdecl.Alias) // don't use TypeName.IsAlias (requires fully set up object)
+
+ // use an objset to check for name conflicts
+ var mset objset
+
+ // spec: "If the base type is a struct type, the non-blank method
+ // and field names must be distinct."
+ base, _ := obj.typ.(*Named) // shouldn't fail but be conservative
+ if base != nil {
+ assert(base.targs.Len() == 0) // collectMethods should not be called on an instantiated type
+
+ // See issue #52529: we must delay the expansion of underlying here, as
+ // base may not be fully set-up.
+ check.later(func() {
+ check.checkFieldUniqueness(base)
+ }).describef(obj, "verifying field uniqueness for %v", base)
+
+ // Checker.Files may be called multiple times; additional package files
+ // may add methods to already type-checked types. Add pre-existing methods
+ // so that we can detect redeclarations.
+ for i := 0; i < base.methods.Len(); i++ {
+ m := base.methods.At(i, nil)
+ assert(m.name != "_")
+ assert(mset.insert(m) == nil)
+ }
+ }
+
+ // add valid methods
+ for _, m := range methods {
+ // spec: "For a base type, the non-blank names of methods bound
+ // to it must be unique."
+ assert(m.name != "_")
+ if alt := mset.insert(m); alt != nil {
+ var err error_
+ if check.conf.CompilerErrorMessages {
+ err.errorf(m.pos, "%s.%s redeclared in this block", obj.Name(), m.name)
+ } else {
+ err.errorf(m.pos, "method %s already declared for %s", m.name, obj)
+ }
+ err.recordAltDecl(alt)
+ check.report(&err)
+ continue
+ }
+
+ if base != nil {
+ base.resolve(nil) // TODO(mdempsky): Probably unnecessary.
+ base.AddMethod(m)
+ }
+ }
+}
+
+func (check *Checker) checkFieldUniqueness(base *Named) {
+ if t, _ := base.under().(*Struct); t != nil {
+ var mset objset
+ for i := 0; i < base.methods.Len(); i++ {
+ m := base.methods.At(i, nil)
+ assert(m.name != "_")
+ assert(mset.insert(m) == nil)
+ }
+
+ // Check that any non-blank field names of base are distinct from its
+ // method names.
+ for _, fld := range t.fields {
+ if fld.name != "_" {
+ if alt := mset.insert(fld); alt != nil {
+ // Struct fields should already be unique, so we should only
+ // encounter an alternate via collision with a method name.
+ _ = alt.(*Func)
+
+ // For historical consistency, we report the primary error on the
+ // method, and the alt decl on the field.
+ var err error_
+ err.errorf(alt, "field and method with the same name %s", fld.name)
+ err.recordAltDecl(fld)
+ check.report(&err)
+ }
+ }
+ }
+ }
+}
+
+func (check *Checker) funcDecl(obj *Func, decl *declInfo) {
+ assert(obj.typ == nil)
+
+ // func declarations cannot use iota
+ assert(check.iota == nil)
+
+ sig := new(Signature)
+ obj.typ = sig // guard against cycles
+
+ // Avoid cycle error when referring to method while type-checking the signature.
+ // This avoids a nuisance in the best case (non-parameterized receiver type) and
+ // since the method is not a type, we get an error. If we have a parameterized
+ // receiver type, instantiating the receiver type leads to the instantiation of
+ // its methods, and we don't want a cycle error in that case.
+ // TODO(gri) review if this is correct and/or whether we still need this?
+ saved := obj.color_
+ obj.color_ = black
+ fdecl := decl.fdecl
+ check.funcType(sig, fdecl.Recv, fdecl.TParamList, fdecl.Type)
+ obj.color_ = saved
+
+ if len(fdecl.TParamList) > 0 && fdecl.Body == nil {
+ check.softErrorf(fdecl, "parameterized function is missing function body")
+ }
+
+ // function body must be type-checked after global declarations
+ // (functions implemented elsewhere have no body)
+ if !check.conf.IgnoreFuncBodies && fdecl.Body != nil {
+ check.later(func() {
+ check.funcBody(decl, obj.name, sig, fdecl.Body, nil)
+ })
+ }
+}
+
+func (check *Checker) declStmt(list []syntax.Decl) {
+ pkg := check.pkg
+
+ first := -1 // index of first ConstDecl in the current group, or -1
+ var last *syntax.ConstDecl // last ConstDecl with init expressions, or nil
+ for index, decl := range list {
+ if _, ok := decl.(*syntax.ConstDecl); !ok {
+ first = -1 // we're not in a constant declaration
+ }
+
+ switch s := decl.(type) {
+ case *syntax.ConstDecl:
+ top := len(check.delayed)
+
+ // iota is the index of the current constDecl within the group
+ if first < 0 || s.Group == nil || list[index-1].(*syntax.ConstDecl).Group != s.Group {
+ first = index
+ last = nil
+ }
+ iota := constant.MakeInt64(int64(index - first))
+
+ // determine which initialization expressions to use
+ inherited := true
+ switch {
+ case s.Type != nil || s.Values != nil:
+ last = s
+ inherited = false
+ case last == nil:
+ last = new(syntax.ConstDecl) // make sure last exists
+ inherited = false
+ }
+
+ // declare all constants
+ lhs := make([]*Const, len(s.NameList))
+ values := unpackExpr(last.Values)
+ for i, name := range s.NameList {
+ obj := NewConst(name.Pos(), pkg, name.Value, nil, iota)
+ lhs[i] = obj
+
+ var init syntax.Expr
+ if i < len(values) {
+ init = values[i]
+ }
+
+ check.constDecl(obj, last.Type, init, inherited)
+ }
+
+ // Constants must always have init values.
+ check.arity(s.Pos(), s.NameList, values, true, inherited)
+
+ // process function literals in init expressions before scope changes
+ check.processDelayed(top)
+
+ // spec: "The scope of a constant or variable identifier declared
+ // inside a function begins at the end of the ConstSpec or VarSpec
+ // (ShortVarDecl for short variable declarations) and ends at the
+ // end of the innermost containing block."
+ scopePos := syntax.EndPos(s)
+ for i, name := range s.NameList {
+ check.declare(check.scope, name, lhs[i], scopePos)
+ }
+
+ case *syntax.VarDecl:
+ top := len(check.delayed)
+
+ lhs0 := make([]*Var, len(s.NameList))
+ for i, name := range s.NameList {
+ lhs0[i] = NewVar(name.Pos(), pkg, name.Value, nil)
+ }
+
+ // initialize all variables
+ values := unpackExpr(s.Values)
+ for i, obj := range lhs0 {
+ var lhs []*Var
+ var init syntax.Expr
+ switch len(values) {
+ case len(s.NameList):
+ // lhs and rhs match
+ init = values[i]
+ case 1:
+ // rhs is expected to be a multi-valued expression
+ lhs = lhs0
+ init = values[0]
+ default:
+ if i < len(values) {
+ init = values[i]
+ }
+ }
+ check.varDecl(obj, lhs, s.Type, init)
+ if len(values) == 1 {
+ // If we have a single lhs variable we are done either way.
+ // If we have a single rhs expression, it must be a multi-
+ // valued expression, in which case handling the first lhs
+ // variable will cause all lhs variables to have a type
+ // assigned, and we are done as well.
+ if debug {
+ for _, obj := range lhs0 {
+ assert(obj.typ != nil)
+ }
+ }
+ break
+ }
+ }
+
+ // If we have no type, we must have values.
+ if s.Type == nil || values != nil {
+ check.arity(s.Pos(), s.NameList, values, false, false)
+ }
+
+ // process function literals in init expressions before scope changes
+ check.processDelayed(top)
+
+ // declare all variables
+ // (only at this point are the variable scopes (parents) set)
+ scopePos := syntax.EndPos(s) // see constant declarations
+ for i, name := range s.NameList {
+ // see constant declarations
+ check.declare(check.scope, name, lhs0[i], scopePos)
+ }
+
+ case *syntax.TypeDecl:
+ obj := NewTypeName(s.Name.Pos(), pkg, s.Name.Value, nil)
+ // spec: "The scope of a type identifier declared inside a function
+ // begins at the identifier in the TypeSpec and ends at the end of
+ // the innermost containing block."
+ scopePos := s.Name.Pos()
+ check.declare(check.scope, s.Name, obj, scopePos)
+ // mark and unmark type before calling typeDecl; its type is still nil (see Checker.objDecl)
+ obj.setColor(grey + color(check.push(obj)))
+ check.typeDecl(obj, s, nil)
+ check.pop().setColor(black)
+
+ default:
+ check.errorf(s, invalidAST+"unknown syntax.Decl node %T", s)
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/types2/errorcalls_test.go b/src/cmd/compile/internal/types2/errorcalls_test.go
new file mode 100644
index 0000000..80b05f9
--- /dev/null
+++ b/src/cmd/compile/internal/types2/errorcalls_test.go
@@ -0,0 +1,49 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE ast.
+
+package types2_test
+
+import (
+ "cmd/compile/internal/syntax"
+ "testing"
+)
+
+// TestErrorCalls makes sure that check.errorf calls have at
+// least 3 arguments (otherwise we should be using check.error).
+func TestErrorCalls(t *testing.T) {
+ files, err := pkgFiles(".")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ for _, file := range files {
+ syntax.Crawl(file, func(n syntax.Node) bool {
+ call, _ := n.(*syntax.CallExpr)
+ if call == nil {
+ return false
+ }
+ selx, _ := call.Fun.(*syntax.SelectorExpr)
+ if selx == nil {
+ return false
+ }
+ if !(isName(selx.X, "check") && isName(selx.Sel, "errorf")) {
+ return false
+ }
+ // check.errorf calls should have more than 2 arguments:
+ // position, format string, and arguments to format
+ if n := len(call.ArgList); n <= 2 {
+ t.Errorf("%s: got %d arguments, want > 2", call.Pos(), n)
+ return true
+ }
+ return false
+ })
+ }
+}
+
+func isName(n syntax.Node, name string) bool {
+ if n, ok := n.(*syntax.Name); ok {
+ return n.Value == name
+ }
+ return false
+}
diff --git a/src/cmd/compile/internal/types2/errors.go b/src/cmd/compile/internal/types2/errors.go
new file mode 100644
index 0000000..422f520
--- /dev/null
+++ b/src/cmd/compile/internal/types2/errors.go
@@ -0,0 +1,308 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements various error reporters.
+
+package types2
+
+import (
+ "bytes"
+ "cmd/compile/internal/syntax"
+ "fmt"
+ "strconv"
+ "strings"
+)
+
+func unimplemented() {
+ panic("unimplemented")
+}
+
+func assert(p bool) {
+ if !p {
+ panic("assertion failed")
+ }
+}
+
+func unreachable() {
+ panic("unreachable")
+}
+
+// An error_ represents a type-checking error.
+// To report an error_, call Checker.report.
+type error_ struct {
+ desc []errorDesc
+ soft bool // TODO(gri) eventually determine this from an error code
+}
+
+// An errorDesc describes part of a type-checking error.
+type errorDesc struct {
+ pos syntax.Pos
+ format string
+ args []interface{}
+}
+
+func (err *error_) empty() bool {
+ return err.desc == nil
+}
+
+func (err *error_) pos() syntax.Pos {
+ if err.empty() {
+ return nopos
+ }
+ return err.desc[0].pos
+}
+
+func (err *error_) msg(qf Qualifier) string {
+ if err.empty() {
+ return "no error"
+ }
+ var buf bytes.Buffer
+ for i := range err.desc {
+ p := &err.desc[i]
+ if i > 0 {
+ fmt.Fprint(&buf, "\n\t")
+ if p.pos.IsKnown() {
+ fmt.Fprintf(&buf, "%s: ", p.pos)
+ }
+ }
+ buf.WriteString(sprintf(qf, false, p.format, p.args...))
+ }
+ return buf.String()
+}
+
+// String is for testing.
+func (err *error_) String() string {
+ if err.empty() {
+ return "no error"
+ }
+ return fmt.Sprintf("%s: %s", err.pos(), err.msg(nil))
+}
+
+// errorf adds formatted error information to err.
+// It may be called multiple times to provide additional information.
+func (err *error_) errorf(at poser, format string, args ...interface{}) {
+ err.desc = append(err.desc, errorDesc{posFor(at), format, args})
+}
+
+func sprintf(qf Qualifier, debug bool, format string, args ...interface{}) string {
+ for i, arg := range args {
+ switch a := arg.(type) {
+ case nil:
+ arg = "<nil>"
+ case operand:
+ panic("got operand instead of *operand")
+ case *operand:
+ arg = operandString(a, qf)
+ case syntax.Pos:
+ arg = a.String()
+ case syntax.Expr:
+ arg = syntax.String(a)
+ case []syntax.Expr:
+ var buf bytes.Buffer
+ buf.WriteByte('[')
+ for i, x := range a {
+ if i > 0 {
+ buf.WriteString(", ")
+ }
+ buf.WriteString(syntax.String(x))
+ }
+ buf.WriteByte(']')
+ arg = buf.String()
+ case Object:
+ arg = ObjectString(a, qf)
+ case Type:
+ arg = typeString(a, qf, debug)
+ case []Type:
+ var buf bytes.Buffer
+ buf.WriteByte('[')
+ for i, x := range a {
+ if i > 0 {
+ buf.WriteString(", ")
+ }
+ buf.WriteString(typeString(x, qf, debug))
+ }
+ buf.WriteByte(']')
+ arg = buf.String()
+ case []*TypeParam:
+ var buf bytes.Buffer
+ buf.WriteByte('[')
+ for i, x := range a {
+ if i > 0 {
+ buf.WriteString(", ")
+ }
+ buf.WriteString(typeString(x, qf, debug)) // use typeString so we get subscripts when debugging
+ }
+ buf.WriteByte(']')
+ arg = buf.String()
+ }
+ args[i] = arg
+ }
+ return fmt.Sprintf(format, args...)
+}
+
+func (check *Checker) qualifier(pkg *Package) string {
+ // Qualify the package unless it's the package being type-checked.
+ if pkg != check.pkg {
+ if check.pkgPathMap == nil {
+ check.pkgPathMap = make(map[string]map[string]bool)
+ check.seenPkgMap = make(map[*Package]bool)
+ check.markImports(check.pkg)
+ }
+ // If the same package name was used by multiple packages, display the full path.
+ if len(check.pkgPathMap[pkg.name]) > 1 {
+ return strconv.Quote(pkg.path)
+ }
+ return pkg.name
+ }
+ return ""
+}
+
+// markImports recursively walks pkg and its imports, to record unique import
+// paths in pkgPathMap.
+func (check *Checker) markImports(pkg *Package) {
+ if check.seenPkgMap[pkg] {
+ return
+ }
+ check.seenPkgMap[pkg] = true
+
+ forName, ok := check.pkgPathMap[pkg.name]
+ if !ok {
+ forName = make(map[string]bool)
+ check.pkgPathMap[pkg.name] = forName
+ }
+ forName[pkg.path] = true
+
+ for _, imp := range pkg.imports {
+ check.markImports(imp)
+ }
+}
+
+// check may be nil.
+func (check *Checker) sprintf(format string, args ...interface{}) string {
+ var qf Qualifier
+ if check != nil {
+ qf = check.qualifier
+ }
+ return sprintf(qf, false, format, args...)
+}
+
+func (check *Checker) report(err *error_) {
+ if err.empty() {
+ panic("no error to report")
+ }
+ check.err(err.pos(), err.msg(check.qualifier), err.soft)
+}
+
+func (check *Checker) trace(pos syntax.Pos, format string, args ...interface{}) {
+ fmt.Printf("%s:\t%s%s\n",
+ pos,
+ strings.Repeat(". ", check.indent),
+ sprintf(check.qualifier, true, format, args...),
+ )
+}
+
+// dump is only needed for debugging
+func (check *Checker) dump(format string, args ...interface{}) {
+ fmt.Println(sprintf(check.qualifier, true, format, args...))
+}
+
+func (check *Checker) err(at poser, msg string, soft bool) {
+ // Cheap trick: Don't report errors with messages containing
+ // "invalid operand" or "invalid type" as those tend to be
+ // follow-on errors which don't add useful information. Only
+ // exclude them if these strings are not at the beginning,
+ // and only if we have at least one error already reported.
+ if check.firstErr != nil && (strings.Index(msg, "invalid operand") > 0 || strings.Index(msg, "invalid type") > 0) {
+ return
+ }
+
+ pos := posFor(at)
+
+ // If we are encountering an error while evaluating an inherited
+ // constant initialization expression, pos is the position of in
+ // the original expression, and not of the currently declared
+ // constant identifier. Use the provided errpos instead.
+ // TODO(gri) We may also want to augment the error message and
+ // refer to the position (pos) in the original expression.
+ if check.errpos.IsKnown() {
+ assert(check.iota != nil)
+ pos = check.errpos
+ }
+
+ err := Error{pos, stripAnnotations(msg), msg, soft}
+ if check.firstErr == nil {
+ check.firstErr = err
+ }
+
+ if check.conf.Trace {
+ check.trace(pos, "ERROR: %s", msg)
+ }
+
+ f := check.conf.Error
+ if f == nil {
+ panic(bailout{}) // report only first error
+ }
+ f(err)
+}
+
+const (
+ invalidAST = "invalid AST: "
+ invalidArg = "invalid argument: "
+ invalidOp = "invalid operation: "
+)
+
+type poser interface {
+ Pos() syntax.Pos
+}
+
+func (check *Checker) error(at poser, msg string) {
+ check.err(at, msg, false)
+}
+
+func (check *Checker) errorf(at poser, format string, args ...interface{}) {
+ check.err(at, check.sprintf(format, args...), false)
+}
+
+func (check *Checker) softErrorf(at poser, format string, args ...interface{}) {
+ check.err(at, check.sprintf(format, args...), true)
+}
+
+func (check *Checker) versionErrorf(at poser, goVersion string, format string, args ...interface{}) {
+ msg := check.sprintf(format, args...)
+ if check.conf.CompilerErrorMessages {
+ msg = fmt.Sprintf("%s requires %s or later (-lang was set to %s; check go.mod)", msg, goVersion, check.conf.GoVersion)
+ } else {
+ msg = fmt.Sprintf("%s requires %s or later", msg, goVersion)
+ }
+ check.err(at, msg, true)
+}
+
+// posFor reports the left (= start) position of at.
+func posFor(at poser) syntax.Pos {
+ switch x := at.(type) {
+ case *operand:
+ if x.expr != nil {
+ return syntax.StartPos(x.expr)
+ }
+ case syntax.Node:
+ return syntax.StartPos(x)
+ }
+ return at.Pos()
+}
+
+// stripAnnotations removes internal (type) annotations from s.
+func stripAnnotations(s string) string {
+ // Would like to use strings.Builder but it's not available in Go 1.4.
+ var b bytes.Buffer
+ for _, r := range s {
+ // strip #'s and subscript digits
+ if r < '₀' || '₀'+10 <= r { // '₀' == U+2080
+ b.WriteRune(r)
+ }
+ }
+ if b.Len() < len(s) {
+ return b.String()
+ }
+ return s
+}
diff --git a/src/cmd/compile/internal/types2/errors_test.go b/src/cmd/compile/internal/types2/errors_test.go
new file mode 100644
index 0000000..ac73ca4
--- /dev/null
+++ b/src/cmd/compile/internal/types2/errors_test.go
@@ -0,0 +1,44 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+import "testing"
+
+func TestError(t *testing.T) {
+ var err error_
+ want := "no error"
+ if got := err.String(); got != want {
+ t.Errorf("empty error: got %q, want %q", got, want)
+ }
+
+ want = "<unknown position>: foo 42"
+ err.errorf(nopos, "foo %d", 42)
+ if got := err.String(); got != want {
+ t.Errorf("simple error: got %q, want %q", got, want)
+ }
+
+ want = "<unknown position>: foo 42\n\tbar 43"
+ err.errorf(nopos, "bar %d", 43)
+ if got := err.String(); got != want {
+ t.Errorf("simple error: got %q, want %q", got, want)
+ }
+}
+
+func TestStripAnnotations(t *testing.T) {
+ for _, test := range []struct {
+ in, want string
+ }{
+ {"", ""},
+ {" ", " "},
+ {"foo", "foo"},
+ {"foo₀", "foo"},
+ {"foo(T₀)", "foo(T)"},
+ } {
+ got := stripAnnotations(test.in)
+ if got != test.want {
+ t.Errorf("%q: got %q; want %q", test.in, got, test.want)
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/types2/example_test.go b/src/cmd/compile/internal/types2/example_test.go
new file mode 100644
index 0000000..4edaad5
--- /dev/null
+++ b/src/cmd/compile/internal/types2/example_test.go
@@ -0,0 +1,269 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Only run where builders (build.golang.org) have
+// access to compiled packages for import.
+//
+//go:build !arm && !arm64
+// +build !arm,!arm64
+
+package types2_test
+
+// This file shows examples of basic usage of the go/types API.
+//
+// To locate a Go package, use (*go/build.Context).Import.
+// To load, parse, and type-check a complete Go program
+// from source, use golang.org/x/tools/go/loader.
+
+import (
+ "bytes"
+ "cmd/compile/internal/syntax"
+ "cmd/compile/internal/types2"
+ "fmt"
+ "log"
+ "regexp"
+ "sort"
+ "strings"
+)
+
+// ExampleScope prints the tree of Scopes of a package created from a
+// set of parsed files.
+func ExampleScope() {
+ // Parse the source files for a package.
+ var files []*syntax.File
+ for _, file := range []struct{ name, input string }{
+ {"main.go", `
+package main
+import "fmt"
+func main() {
+ freezing := FToC(-18)
+ fmt.Println(freezing, Boiling) }
+`},
+ {"celsius.go", `
+package main
+import "fmt"
+type Celsius float64
+func (c Celsius) String() string { return fmt.Sprintf("%g°C", c) }
+func FToC(f float64) Celsius { return Celsius(f - 32 / 9 * 5) }
+const Boiling Celsius = 100
+func Unused() { {}; {{ var x int; _ = x }} } // make sure empty block scopes get printed
+`},
+ } {
+ f, err := parseSrc(file.name, file.input)
+ if err != nil {
+ log.Fatal(err)
+ }
+ files = append(files, f)
+ }
+
+ // Type-check a package consisting of these files.
+ // Type information for the imported "fmt" package
+ // comes from $GOROOT/pkg/$GOOS_$GOOARCH/fmt.a.
+ conf := types2.Config{Importer: defaultImporter()}
+ pkg, err := conf.Check("temperature", files, nil)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ // Print the tree of scopes.
+ // For determinism, we redact addresses.
+ var buf bytes.Buffer
+ pkg.Scope().WriteTo(&buf, 0, true)
+ rx := regexp.MustCompile(` 0x[a-fA-F0-9]*`)
+ fmt.Println(rx.ReplaceAllString(buf.String(), ""))
+
+ // Output:
+ // package "temperature" scope {
+ // . const temperature.Boiling temperature.Celsius
+ // . type temperature.Celsius float64
+ // . func temperature.FToC(f float64) temperature.Celsius
+ // . func temperature.Unused()
+ // . func temperature.main()
+ // . main.go scope {
+ // . . package fmt
+ // . . function scope {
+ // . . . var freezing temperature.Celsius
+ // . . }
+ // . }
+ // . celsius.go scope {
+ // . . package fmt
+ // . . function scope {
+ // . . . var c temperature.Celsius
+ // . . }
+ // . . function scope {
+ // . . . var f float64
+ // . . }
+ // . . function scope {
+ // . . . block scope {
+ // . . . }
+ // . . . block scope {
+ // . . . . block scope {
+ // . . . . . var x int
+ // . . . . }
+ // . . . }
+ // . . }
+ // . }
+ // }
+}
+
+// ExampleInfo prints various facts recorded by the type checker in a
+// types2.Info struct: definitions of and references to each named object,
+// and the type, value, and mode of every expression in the package.
+func ExampleInfo() {
+ // Parse a single source file.
+ const input = `
+package fib
+
+type S string
+
+var a, b, c = len(b), S(c), "hello"
+
+func fib(x int) int {
+ if x < 2 {
+ return x
+ }
+ return fib(x-1) - fib(x-2)
+}`
+ f, err := parseSrc("fib.go", input)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ // Type-check the package.
+ // We create an empty map for each kind of input
+ // we're interested in, and Check populates them.
+ info := types2.Info{
+ Types: make(map[syntax.Expr]types2.TypeAndValue),
+ Defs: make(map[*syntax.Name]types2.Object),
+ Uses: make(map[*syntax.Name]types2.Object),
+ }
+ var conf types2.Config
+ pkg, err := conf.Check("fib", []*syntax.File{f}, &info)
+ if err != nil {
+ log.Fatal(err)
+ }
+
+ // Print package-level variables in initialization order.
+ fmt.Printf("InitOrder: %v\n\n", info.InitOrder)
+
+ // For each named object, print the line and
+ // column of its definition and each of its uses.
+ fmt.Println("Defs and Uses of each named object:")
+ usesByObj := make(map[types2.Object][]string)
+ for id, obj := range info.Uses {
+ posn := id.Pos()
+ lineCol := fmt.Sprintf("%d:%d", posn.Line(), posn.Col())
+ usesByObj[obj] = append(usesByObj[obj], lineCol)
+ }
+ var items []string
+ for obj, uses := range usesByObj {
+ sort.Strings(uses)
+ item := fmt.Sprintf("%s:\n defined at %s\n used at %s",
+ types2.ObjectString(obj, types2.RelativeTo(pkg)),
+ obj.Pos(),
+ strings.Join(uses, ", "))
+ items = append(items, item)
+ }
+ sort.Strings(items) // sort by line:col, in effect
+ fmt.Println(strings.Join(items, "\n"))
+ fmt.Println()
+
+ // TODO(gri) Enable once positions are updated/verified
+ // fmt.Println("Types and Values of each expression:")
+ // items = nil
+ // for expr, tv := range info.Types {
+ // var buf bytes.Buffer
+ // posn := expr.Pos()
+ // tvstr := tv.Type.String()
+ // if tv.Value != nil {
+ // tvstr += " = " + tv.Value.String()
+ // }
+ // // line:col | expr | mode : type = value
+ // fmt.Fprintf(&buf, "%2d:%2d | %-19s | %-7s : %s",
+ // posn.Line(), posn.Col(), types2.ExprString(expr),
+ // mode(tv), tvstr)
+ // items = append(items, buf.String())
+ // }
+ // sort.Strings(items)
+ // fmt.Println(strings.Join(items, "\n"))
+
+ // Output:
+ // InitOrder: [c = "hello" b = S(c) a = len(b)]
+ //
+ // Defs and Uses of each named object:
+ // builtin len:
+ // defined at <unknown position>
+ // used at 6:15
+ // func fib(x int) int:
+ // defined at fib.go:8:6
+ // used at 12:20, 12:9
+ // type S string:
+ // defined at fib.go:4:6
+ // used at 6:23
+ // type int:
+ // defined at <unknown position>
+ // used at 8:12, 8:17
+ // type string:
+ // defined at <unknown position>
+ // used at 4:8
+ // var b S:
+ // defined at fib.go:6:8
+ // used at 6:19
+ // var c string:
+ // defined at fib.go:6:11
+ // used at 6:25
+ // var x int:
+ // defined at fib.go:8:10
+ // used at 10:10, 12:13, 12:24, 9:5
+}
+
+// TODO(gri) Enable once positions are updated/verified
+// Types and Values of each expression:
+// 4: 8 | string | type : string
+// 6:15 | len | builtin : func(string) int
+// 6:15 | len(b) | value : int
+// 6:19 | b | var : fib.S
+// 6:23 | S | type : fib.S
+// 6:23 | S(c) | value : fib.S
+// 6:25 | c | var : string
+// 6:29 | "hello" | value : string = "hello"
+// 8:12 | int | type : int
+// 8:17 | int | type : int
+// 9: 5 | x | var : int
+// 9: 5 | x < 2 | value : untyped bool
+// 9: 9 | 2 | value : int = 2
+// 10:10 | x | var : int
+// 12: 9 | fib | value : func(x int) int
+// 12: 9 | fib(x - 1) | value : int
+// 12: 9 | fib(x - 1) - fib(x - 2) | value : int
+// 12:13 | x | var : int
+// 12:13 | x - 1 | value : int
+// 12:15 | 1 | value : int = 1
+// 12:20 | fib | value : func(x int) int
+// 12:20 | fib(x - 2) | value : int
+// 12:24 | x | var : int
+// 12:24 | x - 2 | value : int
+// 12:26 | 2 | value : int = 2
+
+func mode(tv types2.TypeAndValue) string {
+ switch {
+ case tv.IsVoid():
+ return "void"
+ case tv.IsType():
+ return "type"
+ case tv.IsBuiltin():
+ return "builtin"
+ case tv.IsNil():
+ return "nil"
+ case tv.Assignable():
+ if tv.Addressable() {
+ return "var"
+ }
+ return "mapindex"
+ case tv.IsValue():
+ return "value"
+ default:
+ return "unknown"
+ }
+}
diff --git a/src/cmd/compile/internal/types2/expr.go b/src/cmd/compile/internal/types2/expr.go
new file mode 100644
index 0000000..05cf1d0
--- /dev/null
+++ b/src/cmd/compile/internal/types2/expr.go
@@ -0,0 +1,1863 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements typechecking of expressions.
+
+package types2
+
+import (
+ "cmd/compile/internal/syntax"
+ "fmt"
+ "go/constant"
+ "go/token"
+ "math"
+)
+
+/*
+Basic algorithm:
+
+Expressions are checked recursively, top down. Expression checker functions
+are generally of the form:
+
+ func f(x *operand, e *syntax.Expr, ...)
+
+where e is the expression to be checked, and x is the result of the check.
+The check performed by f may fail in which case x.mode == invalid, and
+related error messages will have been issued by f.
+
+If a hint argument is present, it is the composite literal element type
+of an outer composite literal; it is used to type-check composite literal
+elements that have no explicit type specification in the source
+(e.g.: []T{{...}, {...}}, the hint is the type T in this case).
+
+All expressions are checked via rawExpr, which dispatches according
+to expression kind. Upon returning, rawExpr is recording the types and
+constant values for all expressions that have an untyped type (those types
+may change on the way up in the expression tree). Usually these are constants,
+but the results of comparisons or non-constant shifts of untyped constants
+may also be untyped, but not constant.
+
+Untyped expressions may eventually become fully typed (i.e., not untyped),
+typically when the value is assigned to a variable, or is used otherwise.
+The updateExprType method is used to record this final type and update
+the recorded types: the type-checked expression tree is again traversed down,
+and the new type is propagated as needed. Untyped constant expression values
+that become fully typed must now be representable by the full type (constant
+sub-expression trees are left alone except for their roots). This mechanism
+ensures that a client sees the actual (run-time) type an untyped value would
+have. It also permits type-checking of lhs shift operands "as if the shift
+were not present": when updateExprType visits an untyped lhs shift operand
+and assigns it it's final type, that type must be an integer type, and a
+constant lhs must be representable as an integer.
+
+When an expression gets its final type, either on the way out from rawExpr,
+on the way down in updateExprType, or at the end of the type checker run,
+the type (and constant value, if any) is recorded via Info.Types, if present.
+*/
+
+type opPredicates map[syntax.Operator]func(Type) bool
+
+var unaryOpPredicates opPredicates
+
+func init() {
+ // Setting unaryOpPredicates in init avoids declaration cycles.
+ unaryOpPredicates = opPredicates{
+ syntax.Add: allNumeric,
+ syntax.Sub: allNumeric,
+ syntax.Xor: allInteger,
+ syntax.Not: allBoolean,
+ }
+}
+
+func (check *Checker) op(m opPredicates, x *operand, op syntax.Operator) bool {
+ if pred := m[op]; pred != nil {
+ if !pred(x.typ) {
+ check.errorf(x, invalidOp+"operator %s not defined on %s", op, x)
+ return false
+ }
+ } else {
+ check.errorf(x, invalidAST+"unknown operator %s", op)
+ return false
+ }
+ return true
+}
+
+// overflow checks that the constant x is representable by its type.
+// For untyped constants, it checks that the value doesn't become
+// arbitrarily large.
+func (check *Checker) overflow(x *operand) {
+ assert(x.mode == constant_)
+
+ // If the corresponding expression is an operation, use the
+ // operator position rather than the start of the expression
+ // as error position.
+ pos := syntax.StartPos(x.expr)
+ what := "" // operator description, if any
+ if op, _ := x.expr.(*syntax.Operation); op != nil {
+ pos = op.Pos()
+ what = opName(op)
+ }
+
+ if x.val.Kind() == constant.Unknown {
+ // TODO(gri) We should report exactly what went wrong. At the
+ // moment we don't have the (go/constant) API for that.
+ // See also TODO in go/constant/value.go.
+ check.error(pos, "constant result is not representable")
+ return
+ }
+
+ // Typed constants must be representable in
+ // their type after each constant operation.
+ // x.typ cannot be a type parameter (type
+ // parameters cannot be constant types).
+ if isTyped(x.typ) {
+ check.representable(x, under(x.typ).(*Basic))
+ return
+ }
+
+ // Untyped integer values must not grow arbitrarily.
+ const prec = 512 // 512 is the constant precision
+ if x.val.Kind() == constant.Int && constant.BitLen(x.val) > prec {
+ check.errorf(pos, "constant %s overflow", what)
+ x.val = constant.MakeUnknown()
+ }
+}
+
+// opName returns the name of an operation, or the empty string.
+// Only operations that might overflow are handled.
+func opName(e *syntax.Operation) string {
+ op := int(e.Op)
+ if e.Y == nil {
+ if op < len(op2str1) {
+ return op2str1[op]
+ }
+ } else {
+ if op < len(op2str2) {
+ return op2str2[op]
+ }
+ }
+ return ""
+}
+
+var op2str1 = [...]string{
+ syntax.Xor: "bitwise complement",
+}
+
+// This is only used for operations that may cause overflow.
+var op2str2 = [...]string{
+ syntax.Add: "addition",
+ syntax.Sub: "subtraction",
+ syntax.Xor: "bitwise XOR",
+ syntax.Mul: "multiplication",
+ syntax.Shl: "shift",
+}
+
+// If typ is a type parameter, underIs returns the result of typ.underIs(f).
+// Otherwise, underIs returns the result of f(under(typ)).
+func underIs(typ Type, f func(Type) bool) bool {
+ if tpar, _ := typ.(*TypeParam); tpar != nil {
+ return tpar.underIs(f)
+ }
+ return f(under(typ))
+}
+
+func (check *Checker) unary(x *operand, e *syntax.Operation) {
+ check.expr(x, e.X)
+ if x.mode == invalid {
+ return
+ }
+
+ switch e.Op {
+ case syntax.And:
+ // spec: "As an exception to the addressability
+ // requirement x may also be a composite literal."
+ if _, ok := unparen(e.X).(*syntax.CompositeLit); !ok && x.mode != variable {
+ check.errorf(x, invalidOp+"cannot take address of %s", x)
+ x.mode = invalid
+ return
+ }
+ x.mode = value
+ x.typ = &Pointer{base: x.typ}
+ return
+
+ case syntax.Recv:
+ u := coreType(x.typ)
+ if u == nil {
+ check.errorf(x, invalidOp+"cannot receive from %s: no core type", x)
+ x.mode = invalid
+ return
+ }
+ ch, _ := u.(*Chan)
+ if ch == nil {
+ check.errorf(x, invalidOp+"cannot receive from non-channel %s", x)
+ x.mode = invalid
+ return
+ }
+ if ch.dir == SendOnly {
+ check.errorf(x, invalidOp+"cannot receive from send-only channel %s", x)
+ x.mode = invalid
+ return
+ }
+ x.mode = commaok
+ x.typ = ch.elem
+ check.hasCallOrRecv = true
+ return
+ }
+
+ if !check.op(unaryOpPredicates, x, e.Op) {
+ x.mode = invalid
+ return
+ }
+
+ if x.mode == constant_ {
+ if x.val.Kind() == constant.Unknown {
+ // nothing to do (and don't cause an error below in the overflow check)
+ return
+ }
+ var prec uint
+ if isUnsigned(x.typ) {
+ prec = uint(check.conf.sizeof(x.typ) * 8)
+ }
+ x.val = constant.UnaryOp(op2tok[e.Op], x.val, prec)
+ x.expr = e
+ check.overflow(x)
+ return
+ }
+
+ x.mode = value
+ // x.typ remains unchanged
+}
+
+func isShift(op syntax.Operator) bool {
+ return op == syntax.Shl || op == syntax.Shr
+}
+
+func isComparison(op syntax.Operator) bool {
+ // Note: tokens are not ordered well to make this much easier
+ switch op {
+ case syntax.Eql, syntax.Neq, syntax.Lss, syntax.Leq, syntax.Gtr, syntax.Geq:
+ return true
+ }
+ return false
+}
+
+func fitsFloat32(x constant.Value) bool {
+ f32, _ := constant.Float32Val(x)
+ f := float64(f32)
+ return !math.IsInf(f, 0)
+}
+
+func roundFloat32(x constant.Value) constant.Value {
+ f32, _ := constant.Float32Val(x)
+ f := float64(f32)
+ if !math.IsInf(f, 0) {
+ return constant.MakeFloat64(f)
+ }
+ return nil
+}
+
+func fitsFloat64(x constant.Value) bool {
+ f, _ := constant.Float64Val(x)
+ return !math.IsInf(f, 0)
+}
+
+func roundFloat64(x constant.Value) constant.Value {
+ f, _ := constant.Float64Val(x)
+ if !math.IsInf(f, 0) {
+ return constant.MakeFloat64(f)
+ }
+ return nil
+}
+
+// representableConst reports whether x can be represented as
+// value of the given basic type and for the configuration
+// provided (only needed for int/uint sizes).
+//
+// If rounded != nil, *rounded is set to the rounded value of x for
+// representable floating-point and complex values, and to an Int
+// value for integer values; it is left alone otherwise.
+// It is ok to provide the addressof the first argument for rounded.
+//
+// The check parameter may be nil if representableConst is invoked
+// (indirectly) through an exported API call (AssignableTo, ConvertibleTo)
+// because we don't need the Checker's config for those calls.
+func representableConst(x constant.Value, check *Checker, typ *Basic, rounded *constant.Value) bool {
+ if x.Kind() == constant.Unknown {
+ return true // avoid follow-up errors
+ }
+
+ var conf *Config
+ if check != nil {
+ conf = check.conf
+ }
+
+ switch {
+ case isInteger(typ):
+ x := constant.ToInt(x)
+ if x.Kind() != constant.Int {
+ return false
+ }
+ if rounded != nil {
+ *rounded = x
+ }
+ if x, ok := constant.Int64Val(x); ok {
+ switch typ.kind {
+ case Int:
+ var s = uint(conf.sizeof(typ)) * 8
+ return int64(-1)<<(s-1) <= x && x <= int64(1)<<(s-1)-1
+ case Int8:
+ const s = 8
+ return -1<<(s-1) <= x && x <= 1<<(s-1)-1
+ case Int16:
+ const s = 16
+ return -1<<(s-1) <= x && x <= 1<<(s-1)-1
+ case Int32:
+ const s = 32
+ return -1<<(s-1) <= x && x <= 1<<(s-1)-1
+ case Int64, UntypedInt:
+ return true
+ case Uint, Uintptr:
+ if s := uint(conf.sizeof(typ)) * 8; s < 64 {
+ return 0 <= x && x <= int64(1)<<s-1
+ }
+ return 0 <= x
+ case Uint8:
+ const s = 8
+ return 0 <= x && x <= 1<<s-1
+ case Uint16:
+ const s = 16
+ return 0 <= x && x <= 1<<s-1
+ case Uint32:
+ const s = 32
+ return 0 <= x && x <= 1<<s-1
+ case Uint64:
+ return 0 <= x
+ default:
+ unreachable()
+ }
+ }
+ // x does not fit into int64
+ switch n := constant.BitLen(x); typ.kind {
+ case Uint, Uintptr:
+ var s = uint(conf.sizeof(typ)) * 8
+ return constant.Sign(x) >= 0 && n <= int(s)
+ case Uint64:
+ return constant.Sign(x) >= 0 && n <= 64
+ case UntypedInt:
+ return true
+ }
+
+ case isFloat(typ):
+ x := constant.ToFloat(x)
+ if x.Kind() != constant.Float {
+ return false
+ }
+ switch typ.kind {
+ case Float32:
+ if rounded == nil {
+ return fitsFloat32(x)
+ }
+ r := roundFloat32(x)
+ if r != nil {
+ *rounded = r
+ return true
+ }
+ case Float64:
+ if rounded == nil {
+ return fitsFloat64(x)
+ }
+ r := roundFloat64(x)
+ if r != nil {
+ *rounded = r
+ return true
+ }
+ case UntypedFloat:
+ return true
+ default:
+ unreachable()
+ }
+
+ case isComplex(typ):
+ x := constant.ToComplex(x)
+ if x.Kind() != constant.Complex {
+ return false
+ }
+ switch typ.kind {
+ case Complex64:
+ if rounded == nil {
+ return fitsFloat32(constant.Real(x)) && fitsFloat32(constant.Imag(x))
+ }
+ re := roundFloat32(constant.Real(x))
+ im := roundFloat32(constant.Imag(x))
+ if re != nil && im != nil {
+ *rounded = constant.BinaryOp(re, token.ADD, constant.MakeImag(im))
+ return true
+ }
+ case Complex128:
+ if rounded == nil {
+ return fitsFloat64(constant.Real(x)) && fitsFloat64(constant.Imag(x))
+ }
+ re := roundFloat64(constant.Real(x))
+ im := roundFloat64(constant.Imag(x))
+ if re != nil && im != nil {
+ *rounded = constant.BinaryOp(re, token.ADD, constant.MakeImag(im))
+ return true
+ }
+ case UntypedComplex:
+ return true
+ default:
+ unreachable()
+ }
+
+ case isString(typ):
+ return x.Kind() == constant.String
+
+ case isBoolean(typ):
+ return x.Kind() == constant.Bool
+ }
+
+ return false
+}
+
+// An errorCode is a (constant) value uniquely identifing a specific error.
+type errorCode int
+
+// The following error codes are "borrowed" from go/types which codes for
+// all errors. Here we list the few codes currently needed by the various
+// conversion checking functions.
+// Eventually we will switch to reporting codes for all errors, using a
+// an error code table shared between types2 and go/types.
+const (
+ _ = errorCode(iota)
+ _TruncatedFloat
+ _NumericOverflow
+ _InvalidConstVal
+ _InvalidUntypedConversion
+
+ // The following error codes are only returned by operand.assignableTo
+ // and none of its callers use the error. Still, we keep returning the
+ // error codes to make the transition to reporting error codes all the
+ // time easier in the future.
+ _IncompatibleAssign
+ _InvalidIfaceAssign
+ _InvalidChanAssign
+)
+
+// representable checks that a constant operand is representable in the given
+// basic type.
+func (check *Checker) representable(x *operand, typ *Basic) {
+ v, code := check.representation(x, typ)
+ if code != 0 {
+ check.invalidConversion(code, x, typ)
+ x.mode = invalid
+ return
+ }
+ assert(v != nil)
+ x.val = v
+}
+
+// representation returns the representation of the constant operand x as the
+// basic type typ.
+//
+// If no such representation is possible, it returns a non-zero error code.
+func (check *Checker) representation(x *operand, typ *Basic) (constant.Value, errorCode) {
+ assert(x.mode == constant_)
+ v := x.val
+ if !representableConst(x.val, check, typ, &v) {
+ if isNumeric(x.typ) && isNumeric(typ) {
+ // numeric conversion : error msg
+ //
+ // integer -> integer : overflows
+ // integer -> float : overflows (actually not possible)
+ // float -> integer : truncated
+ // float -> float : overflows
+ //
+ if !isInteger(x.typ) && isInteger(typ) {
+ return nil, _TruncatedFloat
+ } else {
+ return nil, _NumericOverflow
+ }
+ }
+ return nil, _InvalidConstVal
+ }
+ return v, 0
+}
+
+func (check *Checker) invalidConversion(code errorCode, x *operand, target Type) {
+ msg := "cannot convert %s to %s"
+ switch code {
+ case _TruncatedFloat:
+ msg = "%s truncated to %s"
+ case _NumericOverflow:
+ msg = "%s overflows %s"
+ }
+ check.errorf(x, msg, x, target)
+}
+
+// updateExprType updates the type of x to typ and invokes itself
+// recursively for the operands of x, depending on expression kind.
+// If typ is still an untyped and not the final type, updateExprType
+// only updates the recorded untyped type for x and possibly its
+// operands. Otherwise (i.e., typ is not an untyped type anymore,
+// or it is the final type for x), the type and value are recorded.
+// Also, if x is a constant, it must be representable as a value of typ,
+// and if x is the (formerly untyped) lhs operand of a non-constant
+// shift, it must be an integer value.
+func (check *Checker) updateExprType(x syntax.Expr, typ Type, final bool) {
+ check.updateExprType0(nil, x, typ, final)
+}
+
+func (check *Checker) updateExprType0(parent, x syntax.Expr, typ Type, final bool) {
+ old, found := check.untyped[x]
+ if !found {
+ return // nothing to do
+ }
+
+ // update operands of x if necessary
+ switch x := x.(type) {
+ case *syntax.BadExpr,
+ *syntax.FuncLit,
+ *syntax.CompositeLit,
+ *syntax.IndexExpr,
+ *syntax.SliceExpr,
+ *syntax.AssertExpr,
+ *syntax.ListExpr,
+ //*syntax.StarExpr,
+ *syntax.KeyValueExpr,
+ *syntax.ArrayType,
+ *syntax.StructType,
+ *syntax.FuncType,
+ *syntax.InterfaceType,
+ *syntax.MapType,
+ *syntax.ChanType:
+ // These expression are never untyped - nothing to do.
+ // The respective sub-expressions got their final types
+ // upon assignment or use.
+ if debug {
+ check.dump("%v: found old type(%s): %s (new: %s)", posFor(x), x, old.typ, typ)
+ unreachable()
+ }
+ return
+
+ case *syntax.CallExpr:
+ // Resulting in an untyped constant (e.g., built-in complex).
+ // The respective calls take care of calling updateExprType
+ // for the arguments if necessary.
+
+ case *syntax.Name, *syntax.BasicLit, *syntax.SelectorExpr:
+ // An identifier denoting a constant, a constant literal,
+ // or a qualified identifier (imported untyped constant).
+ // No operands to take care of.
+
+ case *syntax.ParenExpr:
+ check.updateExprType0(x, x.X, typ, final)
+
+ // case *syntax.UnaryExpr:
+ // // If x is a constant, the operands were constants.
+ // // The operands don't need to be updated since they
+ // // never get "materialized" into a typed value. If
+ // // left in the untyped map, they will be processed
+ // // at the end of the type check.
+ // if old.val != nil {
+ // break
+ // }
+ // check.updateExprType0(x, x.X, typ, final)
+
+ case *syntax.Operation:
+ if x.Y == nil {
+ // unary expression
+ if x.Op == syntax.Mul {
+ // see commented out code for StarExpr above
+ // TODO(gri) needs cleanup
+ if debug {
+ unimplemented()
+ }
+ return
+ }
+ // If x is a constant, the operands were constants.
+ // The operands don't need to be updated since they
+ // never get "materialized" into a typed value. If
+ // left in the untyped map, they will be processed
+ // at the end of the type check.
+ if old.val != nil {
+ break
+ }
+ check.updateExprType0(x, x.X, typ, final)
+ break
+ }
+
+ // binary expression
+ if old.val != nil {
+ break // see comment for unary expressions
+ }
+ if isComparison(x.Op) {
+ // The result type is independent of operand types
+ // and the operand types must have final types.
+ } else if isShift(x.Op) {
+ // The result type depends only on lhs operand.
+ // The rhs type was updated when checking the shift.
+ check.updateExprType0(x, x.X, typ, final)
+ } else {
+ // The operand types match the result type.
+ check.updateExprType0(x, x.X, typ, final)
+ check.updateExprType0(x, x.Y, typ, final)
+ }
+
+ default:
+ unreachable()
+ }
+
+ // If the new type is not final and still untyped, just
+ // update the recorded type.
+ if !final && isUntyped(typ) {
+ old.typ = under(typ).(*Basic)
+ check.untyped[x] = old
+ return
+ }
+
+ // Otherwise we have the final (typed or untyped type).
+ // Remove it from the map of yet untyped expressions.
+ delete(check.untyped, x)
+
+ if old.isLhs {
+ // If x is the lhs of a shift, its final type must be integer.
+ // We already know from the shift check that it is representable
+ // as an integer if it is a constant.
+ if !allInteger(typ) {
+ if check.conf.CompilerErrorMessages {
+ check.errorf(x, invalidOp+"%s (shift of type %s)", parent, typ)
+ } else {
+ check.errorf(x, invalidOp+"shifted operand %s (type %s) must be integer", x, typ)
+ }
+ return
+ }
+ // Even if we have an integer, if the value is a constant we
+ // still must check that it is representable as the specific
+ // int type requested (was issue #22969). Fall through here.
+ }
+ if old.val != nil {
+ // If x is a constant, it must be representable as a value of typ.
+ c := operand{old.mode, x, old.typ, old.val, 0}
+ check.convertUntyped(&c, typ)
+ if c.mode == invalid {
+ return
+ }
+ }
+
+ // Everything's fine, record final type and value for x.
+ check.recordTypeAndValue(x, old.mode, typ, old.val)
+}
+
+// updateExprVal updates the value of x to val.
+func (check *Checker) updateExprVal(x syntax.Expr, val constant.Value) {
+ if info, ok := check.untyped[x]; ok {
+ info.val = val
+ check.untyped[x] = info
+ }
+}
+
+// convertUntyped attempts to set the type of an untyped value to the target type.
+func (check *Checker) convertUntyped(x *operand, target Type) {
+ newType, val, code := check.implicitTypeAndValue(x, target)
+ if code != 0 {
+ t := target
+ if !isTypeParam(target) {
+ t = safeUnderlying(target)
+ }
+ check.invalidConversion(code, x, t)
+ x.mode = invalid
+ return
+ }
+ if val != nil {
+ x.val = val
+ check.updateExprVal(x.expr, val)
+ }
+ if newType != x.typ {
+ x.typ = newType
+ check.updateExprType(x.expr, newType, false)
+ }
+}
+
+// implicitTypeAndValue returns the implicit type of x when used in a context
+// where the target type is expected. If no such implicit conversion is
+// possible, it returns a nil Type and non-zero error code.
+//
+// If x is a constant operand, the returned constant.Value will be the
+// representation of x in this context.
+func (check *Checker) implicitTypeAndValue(x *operand, target Type) (Type, constant.Value, errorCode) {
+ if x.mode == invalid || isTyped(x.typ) || target == Typ[Invalid] {
+ return x.typ, nil, 0
+ }
+
+ if isUntyped(target) {
+ // both x and target are untyped
+ xkind := x.typ.(*Basic).kind
+ tkind := target.(*Basic).kind
+ if isNumeric(x.typ) && isNumeric(target) {
+ if xkind < tkind {
+ return target, nil, 0
+ }
+ } else if xkind != tkind {
+ return nil, nil, _InvalidUntypedConversion
+ }
+ return x.typ, nil, 0
+ }
+
+ if x.isNil() {
+ assert(isUntyped(x.typ))
+ if hasNil(target) {
+ return target, nil, 0
+ }
+ return nil, nil, _InvalidUntypedConversion
+ }
+
+ switch u := under(target).(type) {
+ case *Basic:
+ if x.mode == constant_ {
+ v, code := check.representation(x, u)
+ if code != 0 {
+ return nil, nil, code
+ }
+ return target, v, code
+ }
+ // Non-constant untyped values may appear as the
+ // result of comparisons (untyped bool), intermediate
+ // (delayed-checked) rhs operands of shifts, and as
+ // the value nil.
+ switch x.typ.(*Basic).kind {
+ case UntypedBool:
+ if !isBoolean(target) {
+ return nil, nil, _InvalidUntypedConversion
+ }
+ case UntypedInt, UntypedRune, UntypedFloat, UntypedComplex:
+ if !isNumeric(target) {
+ return nil, nil, _InvalidUntypedConversion
+ }
+ case UntypedString:
+ // Non-constant untyped string values are not permitted by the spec and
+ // should not occur during normal typechecking passes, but this path is
+ // reachable via the AssignableTo API.
+ if !isString(target) {
+ return nil, nil, _InvalidUntypedConversion
+ }
+ default:
+ return nil, nil, _InvalidUntypedConversion
+ }
+ case *Interface:
+ if isTypeParam(target) {
+ if !u.typeSet().underIs(func(u Type) bool {
+ if u == nil {
+ return false
+ }
+ t, _, _ := check.implicitTypeAndValue(x, u)
+ return t != nil
+ }) {
+ return nil, nil, _InvalidUntypedConversion
+ }
+ break
+ }
+ // Update operand types to the default type rather than the target
+ // (interface) type: values must have concrete dynamic types.
+ // Untyped nil was handled upfront.
+ if !u.Empty() {
+ return nil, nil, _InvalidUntypedConversion // cannot assign untyped values to non-empty interfaces
+ }
+ return Default(x.typ), nil, 0 // default type for nil is nil
+ default:
+ return nil, nil, _InvalidUntypedConversion
+ }
+ return target, nil, 0
+}
+
+// If switchCase is true, the operator op is ignored.
+func (check *Checker) comparison(x, y *operand, op syntax.Operator, switchCase bool) {
+ if switchCase {
+ op = syntax.Eql
+ }
+
+ errOp := x // operand for which error is reported, if any
+ cause := "" // specific error cause, if any
+
+ // spec: "In any comparison, the first operand must be assignable
+ // to the type of the second operand, or vice versa."
+ ok, _ := x.assignableTo(check, y.typ, nil)
+ if !ok {
+ ok, _ = y.assignableTo(check, x.typ, nil)
+ }
+ if !ok {
+ // Report the error on the 2nd operand since we only
+ // know after seeing the 2nd operand whether we have
+ // a type mismatch.
+ errOp = y
+ // For now, if we're not running the compiler, use the
+ // position of x to minimize changes to existing tests.
+ if !check.conf.CompilerErrorMessages {
+ errOp = x
+ }
+ cause = check.sprintf("mismatched types %s and %s", x.typ, y.typ)
+ goto Error
+ }
+
+ // check if comparison is defined for operands
+ switch op {
+ case syntax.Eql, syntax.Neq:
+ // spec: "The equality operators == and != apply to operands that are comparable."
+ switch {
+ case x.isNil() || y.isNil():
+ // Comparison against nil requires that the other operand type has nil.
+ typ := x.typ
+ if x.isNil() {
+ typ = y.typ
+ }
+ if !hasNil(typ) {
+ // This case should only be possible for "nil == nil".
+ // Report the error on the 2nd operand since we only
+ // know after seeing the 2nd operand whether we have
+ // an invalid comparison.
+ errOp = y
+ goto Error
+ }
+
+ case !Comparable(x.typ):
+ errOp = x
+ cause = check.incomparableCause(x.typ)
+ goto Error
+
+ case !Comparable(y.typ):
+ errOp = y
+ cause = check.incomparableCause(y.typ)
+ goto Error
+ }
+
+ case syntax.Lss, syntax.Leq, syntax.Gtr, syntax.Geq:
+ // spec: The ordering operators <, <=, >, and >= apply to operands that are ordered."
+ switch {
+ case !allOrdered(x.typ):
+ errOp = x
+ goto Error
+ case !allOrdered(y.typ):
+ errOp = y
+ goto Error
+ }
+
+ default:
+ unreachable()
+ }
+
+ // comparison is ok
+ if x.mode == constant_ && y.mode == constant_ {
+ x.val = constant.MakeBool(constant.Compare(x.val, op2tok[op], y.val))
+ // The operands are never materialized; no need to update
+ // their types.
+ } else {
+ x.mode = value
+ // The operands have now their final types, which at run-
+ // time will be materialized. Update the expression trees.
+ // If the current types are untyped, the materialized type
+ // is the respective default type.
+ check.updateExprType(x.expr, Default(x.typ), true)
+ check.updateExprType(y.expr, Default(y.typ), true)
+ }
+
+ // spec: "Comparison operators compare two operands and yield
+ // an untyped boolean value."
+ x.typ = Typ[UntypedBool]
+ return
+
+Error:
+ // We have an offending operand errOp and possibly an error cause.
+ if cause == "" {
+ if isTypeParam(x.typ) || isTypeParam(y.typ) {
+ // TODO(gri) should report the specific type causing the problem, if any
+ if !isTypeParam(x.typ) {
+ errOp = y
+ }
+ cause = check.sprintf("type parameter %s is not comparable with %s", errOp.typ, op)
+ } else {
+ cause = check.sprintf("operator %s not defined on %s", op, check.kindString(errOp.typ)) // catch-all
+ }
+ }
+ if switchCase {
+ check.errorf(x, "invalid case %s in switch on %s (%s)", x.expr, y.expr, cause) // error position always at 1st operand
+ } else {
+ if check.conf.CompilerErrorMessages {
+ check.errorf(errOp, invalidOp+"%s %s %s (%s)", x.expr, op, y.expr, cause)
+ } else {
+ check.errorf(errOp, invalidOp+"cannot compare %s %s %s (%s)", x.expr, op, y.expr, cause)
+ }
+ }
+ x.mode = invalid
+}
+
+// incomparableCause returns a more specific cause why typ is not comparable.
+// If there is no more specific cause, the result is "".
+func (check *Checker) incomparableCause(typ Type) string {
+ switch under(typ).(type) {
+ case *Slice, *Signature, *Map:
+ return check.kindString(typ) + " can only be compared to nil"
+ }
+ // see if we can extract a more specific error
+ var cause string
+ comparable(typ, true, nil, func(format string, args ...interface{}) {
+ cause = check.sprintf(format, args...)
+ })
+ return cause
+}
+
+// kindString returns the type kind as a string.
+func (check *Checker) kindString(typ Type) string {
+ switch under(typ).(type) {
+ case *Array:
+ return "array"
+ case *Slice:
+ return "slice"
+ case *Struct:
+ return "struct"
+ case *Pointer:
+ return "pointer"
+ case *Signature:
+ return "func"
+ case *Interface:
+ if isTypeParam(typ) {
+ return check.sprintf("type parameter %s", typ)
+ }
+ return "interface"
+ case *Map:
+ return "map"
+ case *Chan:
+ return "chan"
+ default:
+ return check.sprintf("%s", typ) // catch-all
+ }
+}
+
+// If e != nil, it must be the shift expression; it may be nil for non-constant shifts.
+func (check *Checker) shift(x, y *operand, e syntax.Expr, op syntax.Operator) {
+ // TODO(gri) This function seems overly complex. Revisit.
+
+ var xval constant.Value
+ if x.mode == constant_ {
+ xval = constant.ToInt(x.val)
+ }
+
+ if allInteger(x.typ) || isUntyped(x.typ) && xval != nil && xval.Kind() == constant.Int {
+ // The lhs is of integer type or an untyped constant representable
+ // as an integer. Nothing to do.
+ } else {
+ // shift has no chance
+ check.errorf(x, invalidOp+"shifted operand %s must be integer", x)
+ x.mode = invalid
+ return
+ }
+
+ // spec: "The right operand in a shift expression must have integer type
+ // or be an untyped constant representable by a value of type uint."
+
+ // Provide a good error message for negative shift counts.
+ if y.mode == constant_ {
+ yval := constant.ToInt(y.val) // consider -1, 1.0, but not -1.1
+ if yval.Kind() == constant.Int && constant.Sign(yval) < 0 {
+ check.errorf(y, invalidOp+"negative shift count %s", y)
+ x.mode = invalid
+ return
+ }
+ }
+
+ // Caution: Check for isUntyped first because isInteger includes untyped
+ // integers (was bug #43697).
+ if isUntyped(y.typ) {
+ check.convertUntyped(y, Typ[Uint])
+ if y.mode == invalid {
+ x.mode = invalid
+ return
+ }
+ } else if !allInteger(y.typ) {
+ check.errorf(y, invalidOp+"shift count %s must be integer", y)
+ x.mode = invalid
+ return
+ } else if !allUnsigned(y.typ) && !check.allowVersion(check.pkg, 1, 13) {
+ check.versionErrorf(y, "go1.13", invalidOp+"signed shift count %s", y)
+ x.mode = invalid
+ return
+ }
+
+ if x.mode == constant_ {
+ if y.mode == constant_ {
+ // if either x or y has an unknown value, the result is unknown
+ if x.val.Kind() == constant.Unknown || y.val.Kind() == constant.Unknown {
+ x.val = constant.MakeUnknown()
+ // ensure the correct type - see comment below
+ if !isInteger(x.typ) {
+ x.typ = Typ[UntypedInt]
+ }
+ return
+ }
+ // rhs must be within reasonable bounds in constant shifts
+ const shiftBound = 1023 - 1 + 52 // so we can express smallestFloat64 (see issue #44057)
+ s, ok := constant.Uint64Val(y.val)
+ if !ok || s > shiftBound {
+ check.errorf(y, invalidOp+"invalid shift count %s", y)
+ x.mode = invalid
+ return
+ }
+ // The lhs is representable as an integer but may not be an integer
+ // (e.g., 2.0, an untyped float) - this can only happen for untyped
+ // non-integer numeric constants. Correct the type so that the shift
+ // result is of integer type.
+ if !isInteger(x.typ) {
+ x.typ = Typ[UntypedInt]
+ }
+ // x is a constant so xval != nil and it must be of Int kind.
+ x.val = constant.Shift(xval, op2tok[op], uint(s))
+ x.expr = e
+ check.overflow(x)
+ return
+ }
+
+ // non-constant shift with constant lhs
+ if isUntyped(x.typ) {
+ // spec: "If the left operand of a non-constant shift
+ // expression is an untyped constant, the type of the
+ // constant is what it would be if the shift expression
+ // were replaced by its left operand alone.".
+ //
+ // Delay operand checking until we know the final type
+ // by marking the lhs expression as lhs shift operand.
+ //
+ // Usually (in correct programs), the lhs expression
+ // is in the untyped map. However, it is possible to
+ // create incorrect programs where the same expression
+ // is evaluated twice (via a declaration cycle) such
+ // that the lhs expression type is determined in the
+ // first round and thus deleted from the map, and then
+ // not found in the second round (double insertion of
+ // the same expr node still just leads to one entry for
+ // that node, and it can only be deleted once).
+ // Be cautious and check for presence of entry.
+ // Example: var e, f = int(1<<""[f]) // issue 11347
+ if info, found := check.untyped[x.expr]; found {
+ info.isLhs = true
+ check.untyped[x.expr] = info
+ }
+ // keep x's type
+ x.mode = value
+ return
+ }
+ }
+
+ // non-constant shift - lhs must be an integer
+ if !allInteger(x.typ) {
+ check.errorf(x, invalidOp+"shifted operand %s must be integer", x)
+ x.mode = invalid
+ return
+ }
+
+ x.mode = value
+}
+
+var binaryOpPredicates opPredicates
+
+func init() {
+ // Setting binaryOpPredicates in init avoids declaration cycles.
+ binaryOpPredicates = opPredicates{
+ syntax.Add: allNumericOrString,
+ syntax.Sub: allNumeric,
+ syntax.Mul: allNumeric,
+ syntax.Div: allNumeric,
+ syntax.Rem: allInteger,
+
+ syntax.And: allInteger,
+ syntax.Or: allInteger,
+ syntax.Xor: allInteger,
+ syntax.AndNot: allInteger,
+
+ syntax.AndAnd: allBoolean,
+ syntax.OrOr: allBoolean,
+ }
+}
+
+// If e != nil, it must be the binary expression; it may be nil for non-constant expressions
+// (when invoked for an assignment operation where the binary expression is implicit).
+func (check *Checker) binary(x *operand, e syntax.Expr, lhs, rhs syntax.Expr, op syntax.Operator) {
+ var y operand
+
+ check.expr(x, lhs)
+ check.expr(&y, rhs)
+
+ if x.mode == invalid {
+ return
+ }
+ if y.mode == invalid {
+ x.mode = invalid
+ x.expr = y.expr
+ return
+ }
+
+ if isShift(op) {
+ check.shift(x, &y, e, op)
+ return
+ }
+
+ // TODO(gri) make canMix more efficient - called for each binary operation
+ canMix := func(x, y *operand) bool {
+ if IsInterface(x.typ) && !isTypeParam(x.typ) || IsInterface(y.typ) && !isTypeParam(y.typ) {
+ return true
+ }
+ if allBoolean(x.typ) != allBoolean(y.typ) {
+ return false
+ }
+ if allString(x.typ) != allString(y.typ) {
+ return false
+ }
+ if x.isNil() && !hasNil(y.typ) {
+ return false
+ }
+ if y.isNil() && !hasNil(x.typ) {
+ return false
+ }
+ return true
+ }
+ if canMix(x, &y) {
+ check.convertUntyped(x, y.typ)
+ if x.mode == invalid {
+ return
+ }
+ check.convertUntyped(&y, x.typ)
+ if y.mode == invalid {
+ x.mode = invalid
+ return
+ }
+ }
+
+ if isComparison(op) {
+ check.comparison(x, &y, op, false)
+ return
+ }
+
+ if !Identical(x.typ, y.typ) {
+ // only report an error if we have valid types
+ // (otherwise we had an error reported elsewhere already)
+ if x.typ != Typ[Invalid] && y.typ != Typ[Invalid] {
+ if e != nil {
+ check.errorf(x, invalidOp+"%s (mismatched types %s and %s)", e, x.typ, y.typ)
+ } else {
+ check.errorf(x, invalidOp+"%s %s= %s (mismatched types %s and %s)", lhs, op, rhs, x.typ, y.typ)
+ }
+ }
+ x.mode = invalid
+ return
+ }
+
+ if !check.op(binaryOpPredicates, x, op) {
+ x.mode = invalid
+ return
+ }
+
+ if op == syntax.Div || op == syntax.Rem {
+ // check for zero divisor
+ if (x.mode == constant_ || allInteger(x.typ)) && y.mode == constant_ && constant.Sign(y.val) == 0 {
+ check.error(&y, invalidOp+"division by zero")
+ x.mode = invalid
+ return
+ }
+
+ // check for divisor underflow in complex division (see issue 20227)
+ if x.mode == constant_ && y.mode == constant_ && isComplex(x.typ) {
+ re, im := constant.Real(y.val), constant.Imag(y.val)
+ re2, im2 := constant.BinaryOp(re, token.MUL, re), constant.BinaryOp(im, token.MUL, im)
+ if constant.Sign(re2) == 0 && constant.Sign(im2) == 0 {
+ check.error(&y, invalidOp+"division by zero")
+ x.mode = invalid
+ return
+ }
+ }
+ }
+
+ if x.mode == constant_ && y.mode == constant_ {
+ // if either x or y has an unknown value, the result is unknown
+ if x.val.Kind() == constant.Unknown || y.val.Kind() == constant.Unknown {
+ x.val = constant.MakeUnknown()
+ // x.typ is unchanged
+ return
+ }
+ // force integer division for integer operands
+ tok := op2tok[op]
+ if op == syntax.Div && isInteger(x.typ) {
+ tok = token.QUO_ASSIGN
+ }
+ x.val = constant.BinaryOp(x.val, tok, y.val)
+ x.expr = e
+ check.overflow(x)
+ return
+ }
+
+ x.mode = value
+ // x.typ is unchanged
+}
+
+// exprKind describes the kind of an expression; the kind
+// determines if an expression is valid in 'statement context'.
+type exprKind int
+
+const (
+ conversion exprKind = iota
+ expression
+ statement
+)
+
+// rawExpr typechecks expression e and initializes x with the expression
+// value or type. If an error occurred, x.mode is set to invalid.
+// If hint != nil, it is the type of a composite literal element.
+// If allowGeneric is set, the operand type may be an uninstantiated
+// parameterized type or function value.
+//
+func (check *Checker) rawExpr(x *operand, e syntax.Expr, hint Type, allowGeneric bool) exprKind {
+ if check.conf.Trace {
+ check.trace(e.Pos(), "expr %s", e)
+ check.indent++
+ defer func() {
+ check.indent--
+ check.trace(e.Pos(), "=> %s", x)
+ }()
+ }
+
+ kind := check.exprInternal(x, e, hint)
+
+ if !allowGeneric {
+ check.nonGeneric(x)
+ }
+
+ check.record(x)
+
+ return kind
+}
+
+// If x is a generic function or type, nonGeneric reports an error and invalidates x.mode and x.typ.
+// Otherwise it leaves x alone.
+func (check *Checker) nonGeneric(x *operand) {
+ if x.mode == invalid || x.mode == novalue {
+ return
+ }
+ var what string
+ switch t := x.typ.(type) {
+ case *Named:
+ if isGeneric(t) {
+ what = "type"
+ }
+ case *Signature:
+ if t.tparams != nil {
+ what = "function"
+ }
+ }
+ if what != "" {
+ check.errorf(x.expr, "cannot use generic %s %s without instantiation", what, x.expr)
+ x.mode = invalid
+ x.typ = Typ[Invalid]
+ }
+}
+
+// exprInternal contains the core of type checking of expressions.
+// Must only be called by rawExpr.
+//
+func (check *Checker) exprInternal(x *operand, e syntax.Expr, hint Type) exprKind {
+ // make sure x has a valid state in case of bailout
+ // (was issue 5770)
+ x.mode = invalid
+ x.typ = Typ[Invalid]
+
+ switch e := e.(type) {
+ case nil:
+ unreachable()
+
+ case *syntax.BadExpr:
+ goto Error // error was reported before
+
+ case *syntax.Name:
+ check.ident(x, e, nil, false)
+
+ case *syntax.DotsType:
+ // dots are handled explicitly where they are legal
+ // (array composite literals and parameter lists)
+ check.error(e, "invalid use of '...'")
+ goto Error
+
+ case *syntax.BasicLit:
+ if e.Bad {
+ goto Error // error reported during parsing
+ }
+ switch e.Kind {
+ case syntax.IntLit, syntax.FloatLit, syntax.ImagLit:
+ check.langCompat(e)
+ // The max. mantissa precision for untyped numeric values
+ // is 512 bits, or 4048 bits for each of the two integer
+ // parts of a fraction for floating-point numbers that are
+ // represented accurately in the go/constant package.
+ // Constant literals that are longer than this many bits
+ // are not meaningful; and excessively long constants may
+ // consume a lot of space and time for a useless conversion.
+ // Cap constant length with a generous upper limit that also
+ // allows for separators between all digits.
+ const limit = 10000
+ if len(e.Value) > limit {
+ check.errorf(e, "excessively long constant: %s... (%d chars)", e.Value[:10], len(e.Value))
+ goto Error
+ }
+ }
+ x.setConst(e.Kind, e.Value)
+ if x.mode == invalid {
+ // The parser already establishes syntactic correctness.
+ // If we reach here it's because of number under-/overflow.
+ // TODO(gri) setConst (and in turn the go/constant package)
+ // should return an error describing the issue.
+ check.errorf(e, "malformed constant: %s", e.Value)
+ goto Error
+ }
+
+ case *syntax.FuncLit:
+ if sig, ok := check.typ(e.Type).(*Signature); ok {
+ if !check.conf.IgnoreFuncBodies && e.Body != nil {
+ // Anonymous functions are considered part of the
+ // init expression/func declaration which contains
+ // them: use existing package-level declaration info.
+ decl := check.decl // capture for use in closure below
+ iota := check.iota // capture for use in closure below (#22345)
+ // Don't type-check right away because the function may
+ // be part of a type definition to which the function
+ // body refers. Instead, type-check as soon as possible,
+ // but before the enclosing scope contents changes (#22992).
+ check.later(func() {
+ check.funcBody(decl, "<function literal>", sig, e.Body, iota)
+ })
+ }
+ x.mode = value
+ x.typ = sig
+ } else {
+ check.errorf(e, invalidAST+"invalid function literal %v", e)
+ goto Error
+ }
+
+ case *syntax.CompositeLit:
+ var typ, base Type
+
+ switch {
+ case e.Type != nil:
+ // composite literal type present - use it
+ // [...]T array types may only appear with composite literals.
+ // Check for them here so we don't have to handle ... in general.
+ if atyp, _ := e.Type.(*syntax.ArrayType); atyp != nil && atyp.Len == nil {
+ // We have an "open" [...]T array type.
+ // Create a new ArrayType with unknown length (-1)
+ // and finish setting it up after analyzing the literal.
+ typ = &Array{len: -1, elem: check.varType(atyp.Elem)}
+ base = typ
+ break
+ }
+ typ = check.typ(e.Type)
+ base = typ
+
+ case hint != nil:
+ // no composite literal type present - use hint (element type of enclosing type)
+ typ = hint
+ base, _ = deref(coreType(typ)) // *T implies &T{}
+ if base == nil {
+ check.errorf(e, "invalid composite literal element type %s: no core type", typ)
+ goto Error
+ }
+
+ default:
+ // TODO(gri) provide better error messages depending on context
+ check.error(e, "missing type in composite literal")
+ goto Error
+ }
+
+ switch utyp := coreType(base).(type) {
+ case *Struct:
+ // Prevent crash if the struct referred to is not yet set up.
+ // See analogous comment for *Array.
+ if utyp.fields == nil {
+ check.error(e, "illegal cycle in type declaration")
+ goto Error
+ }
+ if len(e.ElemList) == 0 {
+ break
+ }
+ fields := utyp.fields
+ if _, ok := e.ElemList[0].(*syntax.KeyValueExpr); ok {
+ // all elements must have keys
+ visited := make([]bool, len(fields))
+ for _, e := range e.ElemList {
+ kv, _ := e.(*syntax.KeyValueExpr)
+ if kv == nil {
+ check.error(e, "mixture of field:value and value elements in struct literal")
+ continue
+ }
+ key, _ := kv.Key.(*syntax.Name)
+ // do all possible checks early (before exiting due to errors)
+ // so we don't drop information on the floor
+ check.expr(x, kv.Value)
+ if key == nil {
+ check.errorf(kv, "invalid field name %s in struct literal", kv.Key)
+ continue
+ }
+ i := fieldIndex(utyp.fields, check.pkg, key.Value)
+ if i < 0 {
+ if check.conf.CompilerErrorMessages {
+ check.errorf(kv.Key, "unknown field '%s' in struct literal of type %s", key.Value, base)
+ } else {
+ check.errorf(kv.Key, "unknown field %s in struct literal", key.Value)
+ }
+ continue
+ }
+ fld := fields[i]
+ check.recordUse(key, fld)
+ etyp := fld.typ
+ check.assignment(x, etyp, "struct literal")
+ // 0 <= i < len(fields)
+ if visited[i] {
+ check.errorf(kv, "duplicate field name %s in struct literal", key.Value)
+ continue
+ }
+ visited[i] = true
+ }
+ } else {
+ // no element must have a key
+ for i, e := range e.ElemList {
+ if kv, _ := e.(*syntax.KeyValueExpr); kv != nil {
+ check.error(kv, "mixture of field:value and value elements in struct literal")
+ continue
+ }
+ check.expr(x, e)
+ if i >= len(fields) {
+ check.error(x, "too many values in struct literal")
+ break // cannot continue
+ }
+ // i < len(fields)
+ fld := fields[i]
+ if !fld.Exported() && fld.pkg != check.pkg {
+ check.errorf(x, "implicit assignment to unexported field %s in %s literal", fld.name, typ)
+ continue
+ }
+ etyp := fld.typ
+ check.assignment(x, etyp, "struct literal")
+ }
+ if len(e.ElemList) < len(fields) {
+ check.error(e.Rbrace, "too few values in struct literal")
+ // ok to continue
+ }
+ }
+
+ case *Array:
+ // Prevent crash if the array referred to is not yet set up. Was issue #18643.
+ // This is a stop-gap solution. Should use Checker.objPath to report entire
+ // path starting with earliest declaration in the source. TODO(gri) fix this.
+ if utyp.elem == nil {
+ check.error(e, "illegal cycle in type declaration")
+ goto Error
+ }
+ n := check.indexedElts(e.ElemList, utyp.elem, utyp.len)
+ // If we have an array of unknown length (usually [...]T arrays, but also
+ // arrays [n]T where n is invalid) set the length now that we know it and
+ // record the type for the array (usually done by check.typ which is not
+ // called for [...]T). We handle [...]T arrays and arrays with invalid
+ // length the same here because it makes sense to "guess" the length for
+ // the latter if we have a composite literal; e.g. for [n]int{1, 2, 3}
+ // where n is invalid for some reason, it seems fair to assume it should
+ // be 3 (see also Checked.arrayLength and issue #27346).
+ if utyp.len < 0 {
+ utyp.len = n
+ // e.Type is missing if we have a composite literal element
+ // that is itself a composite literal with omitted type. In
+ // that case there is nothing to record (there is no type in
+ // the source at that point).
+ if e.Type != nil {
+ check.recordTypeAndValue(e.Type, typexpr, utyp, nil)
+ }
+ }
+
+ case *Slice:
+ // Prevent crash if the slice referred to is not yet set up.
+ // See analogous comment for *Array.
+ if utyp.elem == nil {
+ check.error(e, "illegal cycle in type declaration")
+ goto Error
+ }
+ check.indexedElts(e.ElemList, utyp.elem, -1)
+
+ case *Map:
+ // Prevent crash if the map referred to is not yet set up.
+ // See analogous comment for *Array.
+ if utyp.key == nil || utyp.elem == nil {
+ check.error(e, "illegal cycle in type declaration")
+ goto Error
+ }
+ visited := make(map[interface{}][]Type, len(e.ElemList))
+ for _, e := range e.ElemList {
+ kv, _ := e.(*syntax.KeyValueExpr)
+ if kv == nil {
+ check.error(e, "missing key in map literal")
+ continue
+ }
+ check.exprWithHint(x, kv.Key, utyp.key)
+ check.assignment(x, utyp.key, "map literal")
+ if x.mode == invalid {
+ continue
+ }
+ if x.mode == constant_ {
+ duplicate := false
+ // if the key is of interface type, the type is also significant when checking for duplicates
+ xkey := keyVal(x.val)
+ if IsInterface(utyp.key) {
+ for _, vtyp := range visited[xkey] {
+ if Identical(vtyp, x.typ) {
+ duplicate = true
+ break
+ }
+ }
+ visited[xkey] = append(visited[xkey], x.typ)
+ } else {
+ _, duplicate = visited[xkey]
+ visited[xkey] = nil
+ }
+ if duplicate {
+ check.errorf(x, "duplicate key %s in map literal", x.val)
+ continue
+ }
+ }
+ check.exprWithHint(x, kv.Value, utyp.elem)
+ check.assignment(x, utyp.elem, "map literal")
+ }
+
+ default:
+ // when "using" all elements unpack KeyValueExpr
+ // explicitly because check.use doesn't accept them
+ for _, e := range e.ElemList {
+ if kv, _ := e.(*syntax.KeyValueExpr); kv != nil {
+ // Ideally, we should also "use" kv.Key but we can't know
+ // if it's an externally defined struct key or not. Going
+ // forward anyway can lead to other errors. Give up instead.
+ e = kv.Value
+ }
+ check.use(e)
+ }
+ // if utyp is invalid, an error was reported before
+ if utyp != Typ[Invalid] {
+ check.errorf(e, "invalid composite literal type %s", typ)
+ goto Error
+ }
+ }
+
+ x.mode = value
+ x.typ = typ
+
+ case *syntax.ParenExpr:
+ kind := check.rawExpr(x, e.X, nil, false)
+ x.expr = e
+ return kind
+
+ case *syntax.SelectorExpr:
+ check.selector(x, e, nil)
+
+ case *syntax.IndexExpr:
+ if check.indexExpr(x, e) {
+ check.funcInst(x, e)
+ }
+ if x.mode == invalid {
+ goto Error
+ }
+
+ case *syntax.SliceExpr:
+ check.sliceExpr(x, e)
+ if x.mode == invalid {
+ goto Error
+ }
+
+ case *syntax.AssertExpr:
+ check.expr(x, e.X)
+ if x.mode == invalid {
+ goto Error
+ }
+ // TODO(gri) we may want to permit type assertions on type parameter values at some point
+ if isTypeParam(x.typ) {
+ check.errorf(x, invalidOp+"cannot use type assertion on type parameter value %s", x)
+ goto Error
+ }
+ if _, ok := under(x.typ).(*Interface); !ok {
+ check.errorf(x, invalidOp+"%s is not an interface", x)
+ goto Error
+ }
+ // x.(type) expressions are encoded via TypeSwitchGuards
+ if e.Type == nil {
+ check.error(e, invalidAST+"invalid use of AssertExpr")
+ goto Error
+ }
+ T := check.varType(e.Type)
+ if T == Typ[Invalid] {
+ goto Error
+ }
+ check.typeAssertion(e, x, T, false)
+ x.mode = commaok
+ x.typ = T
+
+ case *syntax.TypeSwitchGuard:
+ // x.(type) expressions are handled explicitly in type switches
+ check.error(e, invalidAST+"use of .(type) outside type switch")
+ goto Error
+
+ case *syntax.CallExpr:
+ return check.callExpr(x, e)
+
+ case *syntax.ListExpr:
+ // catch-all for unexpected expression lists
+ check.error(e, "unexpected list of expressions")
+ goto Error
+
+ // case *syntax.UnaryExpr:
+ // check.expr(x, e.X)
+ // if x.mode == invalid {
+ // goto Error
+ // }
+ // check.unary(x, e, e.Op)
+ // if x.mode == invalid {
+ // goto Error
+ // }
+ // if e.Op == token.ARROW {
+ // x.expr = e
+ // return statement // receive operations may appear in statement context
+ // }
+
+ // case *syntax.BinaryExpr:
+ // check.binary(x, e, e.X, e.Y, e.Op)
+ // if x.mode == invalid {
+ // goto Error
+ // }
+
+ case *syntax.Operation:
+ if e.Y == nil {
+ // unary expression
+ if e.Op == syntax.Mul {
+ // pointer indirection
+ check.exprOrType(x, e.X, false)
+ switch x.mode {
+ case invalid:
+ goto Error
+ case typexpr:
+ check.validVarType(e.X, x.typ)
+ x.typ = &Pointer{base: x.typ}
+ default:
+ var base Type
+ if !underIs(x.typ, func(u Type) bool {
+ p, _ := u.(*Pointer)
+ if p == nil {
+ check.errorf(x, invalidOp+"cannot indirect %s", x)
+ return false
+ }
+ if base != nil && !Identical(p.base, base) {
+ check.errorf(x, invalidOp+"pointers of %s must have identical base types", x)
+ return false
+ }
+ base = p.base
+ return true
+ }) {
+ goto Error
+ }
+ x.mode = variable
+ x.typ = base
+ }
+ break
+ }
+
+ check.unary(x, e)
+ if x.mode == invalid {
+ goto Error
+ }
+ if e.Op == syntax.Recv {
+ x.expr = e
+ return statement // receive operations may appear in statement context
+ }
+ break
+ }
+
+ // binary expression
+ check.binary(x, e, e.X, e.Y, e.Op)
+ if x.mode == invalid {
+ goto Error
+ }
+
+ case *syntax.KeyValueExpr:
+ // key:value expressions are handled in composite literals
+ check.error(e, invalidAST+"no key:value expected")
+ goto Error
+
+ case *syntax.ArrayType, *syntax.SliceType, *syntax.StructType, *syntax.FuncType,
+ *syntax.InterfaceType, *syntax.MapType, *syntax.ChanType:
+ x.mode = typexpr
+ x.typ = check.typ(e)
+ // Note: rawExpr (caller of exprInternal) will call check.recordTypeAndValue
+ // even though check.typ has already called it. This is fine as both
+ // times the same expression and type are recorded. It is also not a
+ // performance issue because we only reach here for composite literal
+ // types, which are comparatively rare.
+
+ default:
+ panic(fmt.Sprintf("%s: unknown expression type %T", posFor(e), e))
+ }
+
+ // everything went well
+ x.expr = e
+ return expression
+
+Error:
+ x.mode = invalid
+ x.expr = e
+ return statement // avoid follow-up errors
+}
+
+func keyVal(x constant.Value) interface{} {
+ switch x.Kind() {
+ case constant.Bool:
+ return constant.BoolVal(x)
+ case constant.String:
+ return constant.StringVal(x)
+ case constant.Int:
+ if v, ok := constant.Int64Val(x); ok {
+ return v
+ }
+ if v, ok := constant.Uint64Val(x); ok {
+ return v
+ }
+ case constant.Float:
+ v, _ := constant.Float64Val(x)
+ return v
+ case constant.Complex:
+ r, _ := constant.Float64Val(constant.Real(x))
+ i, _ := constant.Float64Val(constant.Imag(x))
+ return complex(r, i)
+ }
+ return x
+}
+
+// typeAssertion checks x.(T). The type of x must be an interface.
+func (check *Checker) typeAssertion(e syntax.Expr, x *operand, T Type, typeSwitch bool) {
+ method, alt := check.assertableTo(under(x.typ).(*Interface), T)
+ if method == nil {
+ return // success
+ }
+
+ cause := check.missingMethodReason(T, x.typ, method, alt)
+
+ if typeSwitch {
+ check.errorf(e, "impossible type switch case: %s\n\t%s cannot have dynamic type %s %s", e, x, T, cause)
+ return
+ }
+
+ check.errorf(e, "impossible type assertion: %s\n\t%s does not implement %s %s", e, T, x.typ, cause)
+}
+
+// expr typechecks expression e and initializes x with the expression value.
+// The result must be a single value.
+// If an error occurred, x.mode is set to invalid.
+//
+func (check *Checker) expr(x *operand, e syntax.Expr) {
+ check.rawExpr(x, e, nil, false)
+ check.exclude(x, 1<<novalue|1<<builtin|1<<typexpr)
+ check.singleValue(x)
+}
+
+// multiExpr is like expr but the result may also be a multi-value.
+func (check *Checker) multiExpr(x *operand, e syntax.Expr) {
+ check.rawExpr(x, e, nil, false)
+ check.exclude(x, 1<<novalue|1<<builtin|1<<typexpr)
+}
+
+// exprWithHint typechecks expression e and initializes x with the expression value;
+// hint is the type of a composite literal element.
+// If an error occurred, x.mode is set to invalid.
+//
+func (check *Checker) exprWithHint(x *operand, e syntax.Expr, hint Type) {
+ assert(hint != nil)
+ check.rawExpr(x, e, hint, false)
+ check.exclude(x, 1<<novalue|1<<builtin|1<<typexpr)
+ check.singleValue(x)
+}
+
+// exprOrType typechecks expression or type e and initializes x with the expression value or type.
+// If allowGeneric is set, the operand type may be an uninstantiated parameterized type or function
+// value.
+// If an error occurred, x.mode is set to invalid.
+//
+func (check *Checker) exprOrType(x *operand, e syntax.Expr, allowGeneric bool) {
+ check.rawExpr(x, e, nil, allowGeneric)
+ check.exclude(x, 1<<novalue)
+ check.singleValue(x)
+}
+
+// exclude reports an error if x.mode is in modeset and sets x.mode to invalid.
+// The modeset may contain any of 1<<novalue, 1<<builtin, 1<<typexpr.
+func (check *Checker) exclude(x *operand, modeset uint) {
+ if modeset&(1<<x.mode) != 0 {
+ var msg string
+ switch x.mode {
+ case novalue:
+ if modeset&(1<<typexpr) != 0 {
+ msg = "%s used as value"
+ } else {
+ msg = "%s used as value or type"
+ }
+ case builtin:
+ msg = "%s must be called"
+ case typexpr:
+ msg = "%s is not an expression"
+ default:
+ unreachable()
+ }
+ check.errorf(x, msg, x)
+ x.mode = invalid
+ }
+}
+
+// singleValue reports an error if x describes a tuple and sets x.mode to invalid.
+func (check *Checker) singleValue(x *operand) {
+ if x.mode == value {
+ // tuple types are never named - no need for underlying type below
+ if t, ok := x.typ.(*Tuple); ok {
+ assert(t.Len() != 1)
+ if check.conf.CompilerErrorMessages {
+ check.errorf(x, "multiple-value %s in single-value context", x)
+ } else {
+ check.errorf(x, "%d-valued %s where single value is expected", t.Len(), x)
+ }
+ x.mode = invalid
+ }
+ }
+}
+
+// op2tok translates syntax.Operators into token.Tokens.
+var op2tok = [...]token.Token{
+ syntax.Def: token.ILLEGAL,
+ syntax.Not: token.NOT,
+ syntax.Recv: token.ILLEGAL,
+
+ syntax.OrOr: token.LOR,
+ syntax.AndAnd: token.LAND,
+
+ syntax.Eql: token.EQL,
+ syntax.Neq: token.NEQ,
+ syntax.Lss: token.LSS,
+ syntax.Leq: token.LEQ,
+ syntax.Gtr: token.GTR,
+ syntax.Geq: token.GEQ,
+
+ syntax.Add: token.ADD,
+ syntax.Sub: token.SUB,
+ syntax.Or: token.OR,
+ syntax.Xor: token.XOR,
+
+ syntax.Mul: token.MUL,
+ syntax.Div: token.QUO,
+ syntax.Rem: token.REM,
+ syntax.And: token.AND,
+ syntax.AndNot: token.AND_NOT,
+ syntax.Shl: token.SHL,
+ syntax.Shr: token.SHR,
+}
diff --git a/src/cmd/compile/internal/types2/gccgosizes.go b/src/cmd/compile/internal/types2/gccgosizes.go
new file mode 100644
index 0000000..05aba53
--- /dev/null
+++ b/src/cmd/compile/internal/types2/gccgosizes.go
@@ -0,0 +1,40 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This is a copy of the file generated during the gccgo build process.
+// Last update 2019-01-22.
+
+package types2
+
+var gccgoArchSizes = map[string]*StdSizes{
+ "386": {4, 4},
+ "alpha": {8, 8},
+ "amd64": {8, 8},
+ "amd64p32": {4, 8},
+ "arm": {4, 8},
+ "armbe": {4, 8},
+ "arm64": {8, 8},
+ "arm64be": {8, 8},
+ "ia64": {8, 8},
+ "m68k": {4, 2},
+ "mips": {4, 8},
+ "mipsle": {4, 8},
+ "mips64": {8, 8},
+ "mips64le": {8, 8},
+ "mips64p32": {4, 8},
+ "mips64p32le": {4, 8},
+ "nios2": {4, 8},
+ "ppc": {4, 8},
+ "ppc64": {8, 8},
+ "ppc64le": {8, 8},
+ "riscv": {4, 8},
+ "riscv64": {8, 8},
+ "s390": {4, 8},
+ "s390x": {8, 8},
+ "sh": {4, 8},
+ "shbe": {4, 8},
+ "sparc": {4, 8},
+ "sparc64": {8, 8},
+ "wasm": {8, 8},
+}
diff --git a/src/cmd/compile/internal/types2/hilbert_test.go b/src/cmd/compile/internal/types2/hilbert_test.go
new file mode 100644
index 0000000..03fea4f
--- /dev/null
+++ b/src/cmd/compile/internal/types2/hilbert_test.go
@@ -0,0 +1,218 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2_test
+
+import (
+ "bytes"
+ "cmd/compile/internal/syntax"
+ "flag"
+ "fmt"
+ "io/ioutil"
+ "testing"
+
+ . "cmd/compile/internal/types2"
+)
+
+var (
+ H = flag.Int("H", 5, "Hilbert matrix size")
+ out = flag.String("out", "", "write generated program to out")
+)
+
+func TestHilbert(t *testing.T) {
+ // generate source
+ src := program(*H, *out)
+ if *out != "" {
+ ioutil.WriteFile(*out, src, 0666)
+ return
+ }
+
+ // parse source
+ f, err := syntax.Parse(syntax.NewFileBase("hilbert.go"), bytes.NewReader(src), nil, nil, 0)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // type-check file
+ DefPredeclaredTestFuncs() // define assert built-in
+ conf := Config{Importer: defaultImporter()}
+ _, err = conf.Check(f.PkgName.Value, []*syntax.File{f}, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+func program(n int, out string) []byte {
+ var g gen
+
+ g.p(`// Code generated by: go test -run=Hilbert -H=%d -out=%q. DO NOT EDIT.
+
+// +`+`build ignore
+
+// This program tests arbitrary precision constant arithmetic
+// by generating the constant elements of a Hilbert matrix H,
+// its inverse I, and the product P = H*I. The product should
+// be the identity matrix.
+package main
+
+func main() {
+ if !ok {
+ printProduct()
+ return
+ }
+ println("PASS")
+}
+
+`, n, out)
+ g.hilbert(n)
+ g.inverse(n)
+ g.product(n)
+ g.verify(n)
+ g.printProduct(n)
+ g.binomials(2*n - 1)
+ g.factorials(2*n - 1)
+
+ return g.Bytes()
+}
+
+type gen struct {
+ bytes.Buffer
+}
+
+func (g *gen) p(format string, args ...interface{}) {
+ fmt.Fprintf(&g.Buffer, format, args...)
+}
+
+func (g *gen) hilbert(n int) {
+ g.p(`// Hilbert matrix, n = %d
+const (
+`, n)
+ for i := 0; i < n; i++ {
+ g.p("\t")
+ for j := 0; j < n; j++ {
+ if j > 0 {
+ g.p(", ")
+ }
+ g.p("h%d_%d", i, j)
+ }
+ if i == 0 {
+ g.p(" = ")
+ for j := 0; j < n; j++ {
+ if j > 0 {
+ g.p(", ")
+ }
+ g.p("1.0/(iota + %d)", j+1)
+ }
+ }
+ g.p("\n")
+ }
+ g.p(")\n\n")
+}
+
+func (g *gen) inverse(n int) {
+ g.p(`// Inverse Hilbert matrix
+const (
+`)
+ for i := 0; i < n; i++ {
+ for j := 0; j < n; j++ {
+ s := "+"
+ if (i+j)&1 != 0 {
+ s = "-"
+ }
+ g.p("\ti%d_%d = %s%d * b%d_%d * b%d_%d * b%d_%d * b%d_%d\n",
+ i, j, s, i+j+1, n+i, n-j-1, n+j, n-i-1, i+j, i, i+j, i)
+ }
+ g.p("\n")
+ }
+ g.p(")\n\n")
+}
+
+func (g *gen) product(n int) {
+ g.p(`// Product matrix
+const (
+`)
+ for i := 0; i < n; i++ {
+ for j := 0; j < n; j++ {
+ g.p("\tp%d_%d = ", i, j)
+ for k := 0; k < n; k++ {
+ if k > 0 {
+ g.p(" + ")
+ }
+ g.p("h%d_%d*i%d_%d", i, k, k, j)
+ }
+ g.p("\n")
+ }
+ g.p("\n")
+ }
+ g.p(")\n\n")
+}
+
+func (g *gen) verify(n int) {
+ g.p(`// Verify that product is the identity matrix
+const ok =
+`)
+ for i := 0; i < n; i++ {
+ for j := 0; j < n; j++ {
+ if j == 0 {
+ g.p("\t")
+ } else {
+ g.p(" && ")
+ }
+ v := 0
+ if i == j {
+ v = 1
+ }
+ g.p("p%d_%d == %d", i, j, v)
+ }
+ g.p(" &&\n")
+ }
+ g.p("\ttrue\n\n")
+
+ // verify ok at type-check time
+ if *out == "" {
+ g.p("const _ = assert(ok)\n\n")
+ }
+}
+
+func (g *gen) printProduct(n int) {
+ g.p("func printProduct() {\n")
+ for i := 0; i < n; i++ {
+ g.p("\tprintln(")
+ for j := 0; j < n; j++ {
+ if j > 0 {
+ g.p(", ")
+ }
+ g.p("p%d_%d", i, j)
+ }
+ g.p(")\n")
+ }
+ g.p("}\n\n")
+}
+
+func (g *gen) binomials(n int) {
+ g.p(`// Binomials
+const (
+`)
+ for j := 0; j <= n; j++ {
+ if j > 0 {
+ g.p("\n")
+ }
+ for k := 0; k <= j; k++ {
+ g.p("\tb%d_%d = f%d / (f%d*f%d)\n", j, k, j, k, j-k)
+ }
+ }
+ g.p(")\n\n")
+}
+
+func (g *gen) factorials(n int) {
+ g.p(`// Factorials
+const (
+ f0 = 1
+ f1 = 1
+`)
+ for i := 2; i <= n; i++ {
+ g.p("\tf%d = f%d * %d\n", i, i-1, i)
+ }
+ g.p(")\n\n")
+}
diff --git a/src/cmd/compile/internal/types2/importer_test.go b/src/cmd/compile/internal/types2/importer_test.go
new file mode 100644
index 0000000..6b9b500
--- /dev/null
+++ b/src/cmd/compile/internal/types2/importer_test.go
@@ -0,0 +1,35 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements the (temporary) plumbing to get importing to work.
+
+package types2_test
+
+import (
+ gcimporter "cmd/compile/internal/importer"
+ "cmd/compile/internal/types2"
+ "io"
+)
+
+func defaultImporter() types2.Importer {
+ return &gcimports{
+ packages: make(map[string]*types2.Package),
+ }
+}
+
+type gcimports struct {
+ packages map[string]*types2.Package
+ lookup func(path string) (io.ReadCloser, error)
+}
+
+func (m *gcimports) Import(path string) (*types2.Package, error) {
+ return m.ImportFrom(path, "" /* no vendoring */, 0)
+}
+
+func (m *gcimports) ImportFrom(path, srcDir string, mode types2.ImportMode) (*types2.Package, error) {
+ if mode != 0 {
+ panic("mode must be 0")
+ }
+ return gcimporter.Import(m.packages, path, srcDir, m.lookup)
+}
diff --git a/src/cmd/compile/internal/types2/index.go b/src/cmd/compile/internal/types2/index.go
new file mode 100644
index 0000000..61009c1
--- /dev/null
+++ b/src/cmd/compile/internal/types2/index.go
@@ -0,0 +1,466 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements typechecking of index/slice expressions.
+
+package types2
+
+import (
+ "cmd/compile/internal/syntax"
+ "go/constant"
+)
+
+// If e is a valid function instantiation, indexExpr returns true.
+// In that case x represents the uninstantiated function value and
+// it is the caller's responsibility to instantiate the function.
+func (check *Checker) indexExpr(x *operand, e *syntax.IndexExpr) (isFuncInst bool) {
+ check.exprOrType(x, e.X, true)
+ // x may be generic
+
+ switch x.mode {
+ case invalid:
+ check.use(e.Index)
+ return false
+
+ case typexpr:
+ // type instantiation
+ x.mode = invalid
+ // TODO(gri) here we re-evaluate e.X - try to avoid this
+ x.typ = check.varType(e)
+ if x.typ != Typ[Invalid] {
+ x.mode = typexpr
+ }
+ return false
+
+ case value:
+ if sig, _ := under(x.typ).(*Signature); sig != nil && sig.TypeParams().Len() > 0 {
+ // function instantiation
+ return true
+ }
+ }
+
+ // x should not be generic at this point, but be safe and check
+ check.nonGeneric(x)
+ if x.mode == invalid {
+ return false
+ }
+
+ // ordinary index expression
+ valid := false
+ length := int64(-1) // valid if >= 0
+ switch typ := under(x.typ).(type) {
+ case *Basic:
+ if isString(typ) {
+ valid = true
+ if x.mode == constant_ {
+ length = int64(len(constant.StringVal(x.val)))
+ }
+ // an indexed string always yields a byte value
+ // (not a constant) even if the string and the
+ // index are constant
+ x.mode = value
+ x.typ = universeByte // use 'byte' name
+ }
+
+ case *Array:
+ valid = true
+ length = typ.len
+ if x.mode != variable {
+ x.mode = value
+ }
+ x.typ = typ.elem
+
+ case *Pointer:
+ if typ, _ := under(typ.base).(*Array); typ != nil {
+ valid = true
+ length = typ.len
+ x.mode = variable
+ x.typ = typ.elem
+ }
+
+ case *Slice:
+ valid = true
+ x.mode = variable
+ x.typ = typ.elem
+
+ case *Map:
+ index := check.singleIndex(e)
+ if index == nil {
+ x.mode = invalid
+ return false
+ }
+ var key operand
+ check.expr(&key, index)
+ check.assignment(&key, typ.key, "map index")
+ // ok to continue even if indexing failed - map element type is known
+ x.mode = mapindex
+ x.typ = typ.elem
+ x.expr = e
+ return false
+
+ case *Interface:
+ if !isTypeParam(x.typ) {
+ break
+ }
+ // TODO(gri) report detailed failure cause for better error messages
+ var key, elem Type // key != nil: we must have all maps
+ mode := variable // non-maps result mode
+ // TODO(gri) factor out closure and use it for non-typeparam cases as well
+ if typ.typeSet().underIs(func(u Type) bool {
+ l := int64(-1) // valid if >= 0
+ var k, e Type // k is only set for maps
+ switch t := u.(type) {
+ case *Basic:
+ if isString(t) {
+ e = universeByte
+ mode = value
+ }
+ case *Array:
+ l = t.len
+ e = t.elem
+ if x.mode != variable {
+ mode = value
+ }
+ case *Pointer:
+ if t, _ := under(t.base).(*Array); t != nil {
+ l = t.len
+ e = t.elem
+ }
+ case *Slice:
+ e = t.elem
+ case *Map:
+ k = t.key
+ e = t.elem
+ }
+ if e == nil {
+ return false
+ }
+ if elem == nil {
+ // first type
+ length = l
+ key, elem = k, e
+ return true
+ }
+ // all map keys must be identical (incl. all nil)
+ // (that is, we cannot mix maps with other types)
+ if !Identical(key, k) {
+ return false
+ }
+ // all element types must be identical
+ if !Identical(elem, e) {
+ return false
+ }
+ // track the minimal length for arrays, if any
+ if l >= 0 && l < length {
+ length = l
+ }
+ return true
+ }) {
+ // For maps, the index expression must be assignable to the map key type.
+ if key != nil {
+ index := check.singleIndex(e)
+ if index == nil {
+ x.mode = invalid
+ return false
+ }
+ var k operand
+ check.expr(&k, index)
+ check.assignment(&k, key, "map index")
+ // ok to continue even if indexing failed - map element type is known
+ x.mode = mapindex
+ x.typ = elem
+ x.expr = e
+ return false
+ }
+
+ // no maps
+ valid = true
+ x.mode = mode
+ x.typ = elem
+ }
+ }
+
+ if !valid {
+ check.errorf(e.Pos(), invalidOp+"cannot index %s", x)
+ x.mode = invalid
+ return false
+ }
+
+ index := check.singleIndex(e)
+ if index == nil {
+ x.mode = invalid
+ return false
+ }
+
+ // In pathological (invalid) cases (e.g.: type T1 [][[]T1{}[0][0]]T0)
+ // the element type may be accessed before it's set. Make sure we have
+ // a valid type.
+ if x.typ == nil {
+ x.typ = Typ[Invalid]
+ }
+
+ check.index(index, length)
+ return false
+}
+
+func (check *Checker) sliceExpr(x *operand, e *syntax.SliceExpr) {
+ check.expr(x, e.X)
+ if x.mode == invalid {
+ check.use(e.Index[:]...)
+ return
+ }
+
+ valid := false
+ length := int64(-1) // valid if >= 0
+ switch u := coreString(x.typ).(type) {
+ case nil:
+ check.errorf(x, invalidOp+"cannot slice %s: %s has no core type", x, x.typ)
+ x.mode = invalid
+ return
+
+ case *Basic:
+ if isString(u) {
+ if e.Full {
+ at := e.Index[2]
+ if at == nil {
+ at = e // e.Index[2] should be present but be careful
+ }
+ check.error(at, invalidOp+"3-index slice of string")
+ x.mode = invalid
+ return
+ }
+ valid = true
+ if x.mode == constant_ {
+ length = int64(len(constant.StringVal(x.val)))
+ }
+ // spec: "For untyped string operands the result
+ // is a non-constant value of type string."
+ if isUntyped(x.typ) {
+ x.typ = Typ[String]
+ }
+ }
+
+ case *Array:
+ valid = true
+ length = u.len
+ if x.mode != variable {
+ check.errorf(x, invalidOp+"%s (slice of unaddressable value)", x)
+ x.mode = invalid
+ return
+ }
+ x.typ = &Slice{elem: u.elem}
+
+ case *Pointer:
+ if u, _ := under(u.base).(*Array); u != nil {
+ valid = true
+ length = u.len
+ x.typ = &Slice{elem: u.elem}
+ }
+
+ case *Slice:
+ valid = true
+ // x.typ doesn't change
+ }
+
+ if !valid {
+ check.errorf(x, invalidOp+"cannot slice %s", x)
+ x.mode = invalid
+ return
+ }
+
+ x.mode = value
+
+ // spec: "Only the first index may be omitted; it defaults to 0."
+ if e.Full && (e.Index[1] == nil || e.Index[2] == nil) {
+ check.error(e, invalidAST+"2nd and 3rd index required in 3-index slice")
+ x.mode = invalid
+ return
+ }
+
+ // check indices
+ var ind [3]int64
+ for i, expr := range e.Index {
+ x := int64(-1)
+ switch {
+ case expr != nil:
+ // The "capacity" is only known statically for strings, arrays,
+ // and pointers to arrays, and it is the same as the length for
+ // those types.
+ max := int64(-1)
+ if length >= 0 {
+ max = length + 1
+ }
+ if _, v := check.index(expr, max); v >= 0 {
+ x = v
+ }
+ case i == 0:
+ // default is 0 for the first index
+ x = 0
+ case length >= 0:
+ // default is length (== capacity) otherwise
+ x = length
+ }
+ ind[i] = x
+ }
+
+ // constant indices must be in range
+ // (check.index already checks that existing indices >= 0)
+L:
+ for i, x := range ind[:len(ind)-1] {
+ if x > 0 {
+ for j, y := range ind[i+1:] {
+ if y >= 0 && y < x {
+ // The value y corresponds to the expression e.Index[i+1+j].
+ // Because y >= 0, it must have been set from the expression
+ // when checking indices and thus e.Index[i+1+j] is not nil.
+ check.errorf(e.Index[i+1+j], "invalid slice indices: %d < %d", y, x)
+ break L // only report one error, ok to continue
+ }
+ }
+ }
+ }
+}
+
+// singleIndex returns the (single) index from the index expression e.
+// If the index is missing, or if there are multiple indices, an error
+// is reported and the result is nil.
+func (check *Checker) singleIndex(e *syntax.IndexExpr) syntax.Expr {
+ index := e.Index
+ if index == nil {
+ check.errorf(e, invalidAST+"missing index for %s", e.X)
+ return nil
+ }
+ if l, _ := index.(*syntax.ListExpr); l != nil {
+ if n := len(l.ElemList); n <= 1 {
+ check.errorf(e, invalidAST+"invalid use of ListExpr for index expression %v with %d indices", e, n)
+ return nil
+ }
+ // len(l.ElemList) > 1
+ check.error(l.ElemList[1], invalidOp+"more than one index")
+ index = l.ElemList[0] // continue with first index
+ }
+ return index
+}
+
+// index checks an index expression for validity.
+// If max >= 0, it is the upper bound for index.
+// If the result typ is != Typ[Invalid], index is valid and typ is its (possibly named) integer type.
+// If the result val >= 0, index is valid and val is its constant int value.
+func (check *Checker) index(index syntax.Expr, max int64) (typ Type, val int64) {
+ typ = Typ[Invalid]
+ val = -1
+
+ var x operand
+ check.expr(&x, index)
+ if !check.isValidIndex(&x, "index", false) {
+ return
+ }
+
+ if x.mode != constant_ {
+ return x.typ, -1
+ }
+
+ if x.val.Kind() == constant.Unknown {
+ return
+ }
+
+ v, ok := constant.Int64Val(x.val)
+ assert(ok)
+ if max >= 0 && v >= max {
+ if check.conf.CompilerErrorMessages {
+ check.errorf(&x, invalidArg+"array index %s out of bounds [0:%d]", x.val.String(), max)
+ } else {
+ check.errorf(&x, invalidArg+"index %s is out of bounds", &x)
+ }
+ return
+ }
+
+ // 0 <= v [ && v < max ]
+ return x.typ, v
+}
+
+// isValidIndex checks whether operand x satisfies the criteria for integer
+// index values. If allowNegative is set, a constant operand may be negative.
+// If the operand is not valid, an error is reported (using what as context)
+// and the result is false.
+func (check *Checker) isValidIndex(x *operand, what string, allowNegative bool) bool {
+ if x.mode == invalid {
+ return false
+ }
+
+ // spec: "a constant index that is untyped is given type int"
+ check.convertUntyped(x, Typ[Int])
+ if x.mode == invalid {
+ return false
+ }
+
+ // spec: "the index x must be of integer type or an untyped constant"
+ if !allInteger(x.typ) {
+ check.errorf(x, invalidArg+"%s %s must be integer", what, x)
+ return false
+ }
+
+ if x.mode == constant_ {
+ // spec: "a constant index must be non-negative ..."
+ if !allowNegative && constant.Sign(x.val) < 0 {
+ check.errorf(x, invalidArg+"%s %s must not be negative", what, x)
+ return false
+ }
+
+ // spec: "... and representable by a value of type int"
+ if !representableConst(x.val, check, Typ[Int], &x.val) {
+ check.errorf(x, invalidArg+"%s %s overflows int", what, x)
+ return false
+ }
+ }
+
+ return true
+}
+
+// indexElts checks the elements (elts) of an array or slice composite literal
+// against the literal's element type (typ), and the element indices against
+// the literal length if known (length >= 0). It returns the length of the
+// literal (maximum index value + 1).
+func (check *Checker) indexedElts(elts []syntax.Expr, typ Type, length int64) int64 {
+ visited := make(map[int64]bool, len(elts))
+ var index, max int64
+ for _, e := range elts {
+ // determine and check index
+ validIndex := false
+ eval := e
+ if kv, _ := e.(*syntax.KeyValueExpr); kv != nil {
+ if typ, i := check.index(kv.Key, length); typ != Typ[Invalid] {
+ if i >= 0 {
+ index = i
+ validIndex = true
+ } else {
+ check.errorf(e, "index %s must be integer constant", kv.Key)
+ }
+ }
+ eval = kv.Value
+ } else if length >= 0 && index >= length {
+ check.errorf(e, "index %d is out of bounds (>= %d)", index, length)
+ } else {
+ validIndex = true
+ }
+
+ // if we have a valid index, check for duplicate entries
+ if validIndex {
+ if visited[index] {
+ check.errorf(e, "duplicate index %d in array or slice literal", index)
+ }
+ visited[index] = true
+ }
+ index++
+ if index > max {
+ max = index
+ }
+
+ // check element against composite literal element type
+ var x operand
+ check.exprWithHint(&x, eval, typ)
+ check.assignment(&x, typ, "array or slice literal")
+ }
+ return max
+}
diff --git a/src/cmd/compile/internal/types2/infer.go b/src/cmd/compile/internal/types2/infer.go
new file mode 100644
index 0000000..e131077
--- /dev/null
+++ b/src/cmd/compile/internal/types2/infer.go
@@ -0,0 +1,788 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements type parameter inference.
+
+package types2
+
+import (
+ "bytes"
+ "cmd/compile/internal/syntax"
+ "fmt"
+)
+
+const useConstraintTypeInference = true
+
+// infer attempts to infer the complete set of type arguments for generic function instantiation/call
+// based on the given type parameters tparams, type arguments targs, function parameters params, and
+// function arguments args, if any. There must be at least one type parameter, no more type arguments
+// than type parameters, and params and args must match in number (incl. zero).
+// If successful, infer returns the complete list of type arguments, one for each type parameter.
+// Otherwise the result is nil and appropriate errors will be reported.
+//
+// Inference proceeds as follows:
+//
+// Starting with given type arguments
+// 1) apply FTI (function type inference) with typed arguments,
+// 2) apply CTI (constraint type inference),
+// 3) apply FTI with untyped function arguments,
+// 4) apply CTI.
+//
+// The process stops as soon as all type arguments are known or an error occurs.
+func (check *Checker) infer(pos syntax.Pos, tparams []*TypeParam, targs []Type, params *Tuple, args []*operand) (result []Type) {
+ if debug {
+ defer func() {
+ assert(result == nil || len(result) == len(tparams))
+ for _, targ := range result {
+ assert(targ != nil)
+ }
+ //check.dump("### inferred targs = %s", result)
+ }()
+ }
+
+ if traceInference {
+ check.dump("-- inferA %s%s ➞ %s", tparams, params, targs)
+ defer func() {
+ check.dump("=> inferA %s ➞ %s", tparams, result)
+ }()
+ }
+
+ // There must be at least one type parameter, and no more type arguments than type parameters.
+ n := len(tparams)
+ assert(n > 0 && len(targs) <= n)
+
+ // Function parameters and arguments must match in number.
+ assert(params.Len() == len(args))
+
+ // If we already have all type arguments, we're done.
+ if len(targs) == n {
+ return targs
+ }
+ // len(targs) < n
+
+ const enableTparamRenaming = true
+ if enableTparamRenaming {
+ // For the purpose of type inference we must differentiate type parameters
+ // occurring in explicit type or value function arguments from the type
+ // parameters we are solving for via unification, because they may be the
+ // same in self-recursive calls. For example:
+ //
+ // func f[P *Q, Q any](p P, q Q) {
+ // f(p)
+ // }
+ //
+ // In this example, the fact that the P used in the instantation f[P] has
+ // the same pointer identity as the P we are trying to solve for via
+ // unification is coincidental: there is nothing special about recursive
+ // calls that should cause them to conflate the identity of type arguments
+ // with type parameters. To put it another way: any such self-recursive
+ // call is equivalent to a mutually recursive call, which does not run into
+ // any problems of type parameter identity. For example, the following code
+ // is equivalent to the code above.
+ //
+ // func f[P interface{*Q}, Q any](p P, q Q) {
+ // f2(p)
+ // }
+ //
+ // func f2[P interface{*Q}, Q any](p P, q Q) {
+ // f(p)
+ // }
+ //
+ // We can turn the first example into the second example by renaming type
+ // parameters in the original signature to give them a new identity. As an
+ // optimization, we do this only for self-recursive calls.
+
+ // We can detect if we are in a self-recursive call by comparing the
+ // identity of the first type parameter in the current function with the
+ // first type parameter in tparams. This works because type parameters are
+ // unique to their type parameter list.
+ selfRecursive := check.sig != nil && check.sig.tparams.Len() > 0 && tparams[0] == check.sig.tparams.At(0)
+
+ if selfRecursive {
+ // In self-recursive inference, rename the type parameters with new type
+ // parameters that are the same but for their pointer identity.
+ tparams2 := make([]*TypeParam, len(tparams))
+ for i, tparam := range tparams {
+ tname := NewTypeName(tparam.Obj().Pos(), tparam.Obj().Pkg(), tparam.Obj().Name(), nil)
+ tparams2[i] = NewTypeParam(tname, nil)
+ tparams2[i].index = tparam.index // == i
+ }
+
+ renameMap := makeRenameMap(tparams, tparams2)
+ for i, tparam := range tparams {
+ tparams2[i].bound = check.subst(pos, tparam.bound, renameMap, nil)
+ }
+
+ tparams = tparams2
+ params = check.subst(pos, params, renameMap, nil).(*Tuple)
+ }
+ }
+
+ // If we have more than 2 arguments, we may have arguments with named and unnamed types.
+ // If that is the case, permutate params and args such that the arguments with named
+ // types are first in the list. This doesn't affect type inference if all types are taken
+ // as is. But when we have inexact unification enabled (as is the case for function type
+ // inference), when a named type is unified with an unnamed type, unification proceeds
+ // with the underlying type of the named type because otherwise unification would fail
+ // right away. This leads to an asymmetry in type inference: in cases where arguments of
+ // named and unnamed types are passed to parameters with identical type, different types
+ // (named vs underlying) may be inferred depending on the order of the arguments.
+ // By ensuring that named types are seen first, order dependence is avoided and unification
+ // succeeds where it can.
+ //
+ // This code is disabled for now pending decision whether we want to address cases like
+ // these and make the spec on type inference more complicated (see issue #43056).
+ const enableArgSorting = false
+ if m := len(args); m >= 2 && enableArgSorting {
+ // Determine indices of arguments with named and unnamed types.
+ var named, unnamed []int
+ for i, arg := range args {
+ if hasName(arg.typ) {
+ named = append(named, i)
+ } else {
+ unnamed = append(unnamed, i)
+ }
+ }
+
+ // If we have named and unnamed types, move the arguments with
+ // named types first. Update the parameter list accordingly.
+ // Make copies so as not to clobber the incoming slices.
+ if len(named) != 0 && len(unnamed) != 0 {
+ params2 := make([]*Var, m)
+ args2 := make([]*operand, m)
+ i := 0
+ for _, j := range named {
+ params2[i] = params.At(j)
+ args2[i] = args[j]
+ i++
+ }
+ for _, j := range unnamed {
+ params2[i] = params.At(j)
+ args2[i] = args[j]
+ i++
+ }
+ params = NewTuple(params2...)
+ args = args2
+ }
+ }
+
+ // --- 1 ---
+ // Continue with the type arguments we have. Avoid matching generic
+ // parameters that already have type arguments against function arguments:
+ // It may fail because matching uses type identity while parameter passing
+ // uses assignment rules. Instantiate the parameter list with the type
+ // arguments we have, and continue with that parameter list.
+
+ // First, make sure we have a "full" list of type arguments, some of which
+ // may be nil (unknown). Make a copy so as to not clobber the incoming slice.
+ if len(targs) < n {
+ targs2 := make([]Type, n)
+ copy(targs2, targs)
+ targs = targs2
+ }
+ // len(targs) == n
+
+ // Substitute type arguments for their respective type parameters in params,
+ // if any. Note that nil targs entries are ignored by check.subst.
+ // TODO(gri) Can we avoid this (we're setting known type arguments below,
+ // but that doesn't impact the isParameterized check for now).
+ if params.Len() > 0 {
+ smap := makeSubstMap(tparams, targs)
+ params = check.subst(nopos, params, smap, nil).(*Tuple)
+ }
+
+ // Unify parameter and argument types for generic parameters with typed arguments
+ // and collect the indices of generic parameters with untyped arguments.
+ // Terminology: generic parameter = function parameter with a type-parameterized type
+ u := newUnifier(false)
+ u.x.init(tparams)
+
+ // Set the type arguments which we know already.
+ for i, targ := range targs {
+ if targ != nil {
+ u.x.set(i, targ)
+ }
+ }
+
+ errorf := func(kind string, tpar, targ Type, arg *operand) {
+ // provide a better error message if we can
+ targs, index := u.x.types()
+ if index == 0 {
+ // The first type parameter couldn't be inferred.
+ // If none of them could be inferred, don't try
+ // to provide the inferred type in the error msg.
+ allFailed := true
+ for _, targ := range targs {
+ if targ != nil {
+ allFailed = false
+ break
+ }
+ }
+ if allFailed {
+ check.errorf(arg, "%s %s of %s does not match %s (cannot infer %s)", kind, targ, arg.expr, tpar, typeParamsString(tparams))
+ return
+ }
+ }
+ smap := makeSubstMap(tparams, targs)
+ inferred := check.subst(arg.Pos(), tpar, smap, nil)
+ if inferred != tpar {
+ check.errorf(arg, "%s %s of %s does not match inferred type %s for %s", kind, targ, arg.expr, inferred, tpar)
+ } else {
+ check.errorf(arg, "%s %s of %s does not match %s", kind, targ, arg.expr, tpar)
+ }
+ }
+
+ // indices of the generic parameters with untyped arguments - save for later
+ var indices []int
+ for i, arg := range args {
+ par := params.At(i)
+ // If we permit bidirectional unification, this conditional code needs to be
+ // executed even if par.typ is not parameterized since the argument may be a
+ // generic function (for which we want to infer its type arguments).
+ if isParameterized(tparams, par.typ) {
+ if arg.mode == invalid {
+ // An error was reported earlier. Ignore this targ
+ // and continue, we may still be able to infer all
+ // targs resulting in fewer follow-on errors.
+ continue
+ }
+ if targ := arg.typ; isTyped(targ) {
+ // If we permit bidirectional unification, and targ is
+ // a generic function, we need to initialize u.y with
+ // the respective type parameters of targ.
+ if !u.unify(par.typ, targ) {
+ errorf("type", par.typ, targ, arg)
+ return nil
+ }
+ } else if _, ok := par.typ.(*TypeParam); ok {
+ // Since default types are all basic (i.e., non-composite) types, an
+ // untyped argument will never match a composite parameter type; the
+ // only parameter type it can possibly match against is a *TypeParam.
+ // Thus, for untyped arguments we only need to look at parameter types
+ // that are single type parameters.
+ indices = append(indices, i)
+ }
+ }
+ }
+
+ // If we've got all type arguments, we're done.
+ var index int
+ targs, index = u.x.types()
+ if index < 0 {
+ return targs
+ }
+
+ // --- 2 ---
+ // See how far we get with constraint type inference.
+ // Note that even if we don't have any type arguments, constraint type inference
+ // may produce results for constraints that explicitly specify a type.
+ if useConstraintTypeInference {
+ targs, index = check.inferB(pos, tparams, targs)
+ if targs == nil || index < 0 {
+ return targs
+ }
+ }
+
+ // --- 3 ---
+ // Use any untyped arguments to infer additional type arguments.
+ // Some generic parameters with untyped arguments may have been given
+ // a type by now, we can ignore them.
+ for _, i := range indices {
+ tpar := params.At(i).typ.(*TypeParam) // is type parameter by construction of indices
+ // Only consider untyped arguments for which the corresponding type
+ // parameter doesn't have an inferred type yet.
+ if targs[tpar.index] == nil {
+ arg := args[i]
+ targ := Default(arg.typ)
+ // The default type for an untyped nil is untyped nil. We must not
+ // infer an untyped nil type as type parameter type. Ignore untyped
+ // nil by making sure all default argument types are typed.
+ if isTyped(targ) && !u.unify(tpar, targ) {
+ errorf("default type", tpar, targ, arg)
+ return nil
+ }
+ }
+ }
+
+ // If we've got all type arguments, we're done.
+ targs, index = u.x.types()
+ if index < 0 {
+ return targs
+ }
+
+ // --- 4 ---
+ // Again, follow up with constraint type inference.
+ if useConstraintTypeInference {
+ targs, index = check.inferB(pos, tparams, targs)
+ if targs == nil || index < 0 {
+ return targs
+ }
+ }
+
+ // At least one type argument couldn't be inferred.
+ assert(targs != nil && index >= 0 && targs[index] == nil)
+ tpar := tparams[index]
+ check.errorf(pos, "cannot infer %s (%s)", tpar.obj.name, tpar.obj.pos)
+ return nil
+}
+
+// typeParamsString produces a string of the type parameter names
+// in list suitable for human consumption.
+func typeParamsString(list []*TypeParam) string {
+ // common cases
+ n := len(list)
+ switch n {
+ case 0:
+ return ""
+ case 1:
+ return list[0].obj.name
+ case 2:
+ return list[0].obj.name + " and " + list[1].obj.name
+ }
+
+ // general case (n > 2)
+ // Would like to use strings.Builder but it's not available in Go 1.4.
+ var b bytes.Buffer
+ for i, tname := range list[:n-1] {
+ if i > 0 {
+ b.WriteString(", ")
+ }
+ b.WriteString(tname.obj.name)
+ }
+ b.WriteString(", and ")
+ b.WriteString(list[n-1].obj.name)
+ return b.String()
+}
+
+// IsParameterized reports whether typ contains any of the type parameters of tparams.
+func isParameterized(tparams []*TypeParam, typ Type) bool {
+ w := tpWalker{
+ seen: make(map[Type]bool),
+ tparams: tparams,
+ }
+ return w.isParameterized(typ)
+}
+
+type tpWalker struct {
+ seen map[Type]bool
+ tparams []*TypeParam
+}
+
+func (w *tpWalker) isParameterized(typ Type) (res bool) {
+ // detect cycles
+ if x, ok := w.seen[typ]; ok {
+ return x
+ }
+ w.seen[typ] = false
+ defer func() {
+ w.seen[typ] = res
+ }()
+
+ switch t := typ.(type) {
+ case nil, *Basic: // TODO(gri) should nil be handled here?
+ break
+
+ case *Array:
+ return w.isParameterized(t.elem)
+
+ case *Slice:
+ return w.isParameterized(t.elem)
+
+ case *Struct:
+ for _, fld := range t.fields {
+ if w.isParameterized(fld.typ) {
+ return true
+ }
+ }
+
+ case *Pointer:
+ return w.isParameterized(t.base)
+
+ case *Tuple:
+ n := t.Len()
+ for i := 0; i < n; i++ {
+ if w.isParameterized(t.At(i).typ) {
+ return true
+ }
+ }
+
+ case *Signature:
+ // t.tparams may not be nil if we are looking at a signature
+ // of a generic function type (or an interface method) that is
+ // part of the type we're testing. We don't care about these type
+ // parameters.
+ // Similarly, the receiver of a method may declare (rather then
+ // use) type parameters, we don't care about those either.
+ // Thus, we only need to look at the input and result parameters.
+ return w.isParameterized(t.params) || w.isParameterized(t.results)
+
+ case *Interface:
+ tset := t.typeSet()
+ for _, m := range tset.methods {
+ if w.isParameterized(m.typ) {
+ return true
+ }
+ }
+ return tset.is(func(t *term) bool {
+ return t != nil && w.isParameterized(t.typ)
+ })
+
+ case *Map:
+ return w.isParameterized(t.key) || w.isParameterized(t.elem)
+
+ case *Chan:
+ return w.isParameterized(t.elem)
+
+ case *Named:
+ return w.isParameterizedTypeList(t.targs.list())
+
+ case *TypeParam:
+ // t must be one of w.tparams
+ return tparamIndex(w.tparams, t) >= 0
+
+ default:
+ unreachable()
+ }
+
+ return false
+}
+
+func (w *tpWalker) isParameterizedTypeList(list []Type) bool {
+ for _, t := range list {
+ if w.isParameterized(t) {
+ return true
+ }
+ }
+ return false
+}
+
+// inferB returns the list of actual type arguments inferred from the type parameters'
+// bounds and an initial set of type arguments. If type inference is impossible because
+// unification fails, an error is reported if report is set to true, the resulting types
+// list is nil, and index is 0.
+// Otherwise, types is the list of inferred type arguments, and index is the index of the
+// first type argument in that list that couldn't be inferred (and thus is nil). If all
+// type arguments were inferred successfully, index is < 0. The number of type arguments
+// provided may be less than the number of type parameters, but there must be at least one.
+func (check *Checker) inferB(pos syntax.Pos, tparams []*TypeParam, targs []Type) (types []Type, index int) {
+ assert(len(tparams) >= len(targs) && len(targs) > 0)
+
+ if traceInference {
+ check.dump("-- inferB %s ➞ %s", tparams, targs)
+ defer func() {
+ check.dump("=> inferB %s ➞ %s", tparams, types)
+ }()
+ }
+
+ // Setup bidirectional unification between constraints
+ // and the corresponding type arguments (which may be nil!).
+ u := newUnifier(false)
+ u.x.init(tparams)
+ u.y = u.x // type parameters between LHS and RHS of unification are identical
+
+ // Set the type arguments which we know already.
+ for i, targ := range targs {
+ if targ != nil {
+ u.x.set(i, targ)
+ }
+ }
+
+ // Repeatedly apply constraint type inference as long as
+ // there are still unknown type arguments and progress is
+ // being made.
+ //
+ // This is an O(n^2) algorithm where n is the number of
+ // type parameters: if there is progress (and iteration
+ // continues), at least one type argument is inferred
+ // per iteration and we have a doubly nested loop.
+ // In practice this is not a problem because the number
+ // of type parameters tends to be very small (< 5 or so).
+ // (It should be possible for unification to efficiently
+ // signal newly inferred type arguments; then the loops
+ // here could handle the respective type parameters only,
+ // but that will come at a cost of extra complexity which
+ // may not be worth it.)
+ for n := u.x.unknowns(); n > 0; {
+ nn := n
+
+ for i, tpar := range tparams {
+ // If there is a core term (i.e., a core type with tilde information)
+ // unify the type parameter with the core type.
+ if core, single := coreTerm(tpar); core != nil {
+ // A type parameter can be unified with its core type in two cases.
+ tx := u.x.at(i)
+ switch {
+ case tx != nil:
+ // The corresponding type argument tx is known.
+ // In this case, if the core type has a tilde, the type argument's underlying
+ // type must match the core type, otherwise the type argument and the core type
+ // must match.
+ // If tx is an external type parameter, don't consider its underlying type
+ // (which is an interface). Core type unification will attempt to unify against
+ // core.typ.
+ // Note also that even with inexact unification we cannot leave away the under
+ // call here because it's possible that both tx and core.typ are named types,
+ // with under(tx) being a (named) basic type matching core.typ. Such cases do
+ // not match with inexact unification.
+ if core.tilde && !isTypeParam(tx) {
+ tx = under(tx)
+ }
+ if !u.unify(tx, core.typ) {
+ // TODO(gri) improve error message by providing the type arguments
+ // which we know already
+ // Don't use term.String() as it always qualifies types, even if they
+ // are in the current package.
+ tilde := ""
+ if core.tilde {
+ tilde = "~"
+ }
+ check.errorf(pos, "%s does not match %s%s", tpar, tilde, core.typ)
+ return nil, 0
+ }
+
+ case single && !core.tilde:
+ // The corresponding type argument tx is unknown and there's a single
+ // specific type and no tilde.
+ // In this case the type argument must be that single type; set it.
+ u.x.set(i, core.typ)
+
+ default:
+ // Unification is not possible and no progress was made.
+ continue
+ }
+
+ // The number of known type arguments may have changed.
+ nn = u.x.unknowns()
+ if nn == 0 {
+ break // all type arguments are known
+ }
+ }
+ }
+
+ assert(nn <= n)
+ if nn == n {
+ break // no progress
+ }
+ n = nn
+ }
+
+ // u.x.types() now contains the incoming type arguments plus any additional type
+ // arguments which were inferred from core terms. The newly inferred non-nil
+ // entries may still contain references to other type parameters.
+ // For instance, for [A any, B interface{ []C }, C interface{ *A }], if A == int
+ // was given, unification produced the type list [int, []C, *A]. We eliminate the
+ // remaining type parameters by substituting the type parameters in this type list
+ // until nothing changes anymore.
+ types, _ = u.x.types()
+ if debug {
+ for i, targ := range targs {
+ assert(targ == nil || types[i] == targ)
+ }
+ }
+
+ // The data structure of each (provided or inferred) type represents a graph, where
+ // each node corresponds to a type and each (directed) vertice points to a component
+ // type. The substitution process described above repeatedly replaces type parameter
+ // nodes in these graphs with the graphs of the types the type parameters stand for,
+ // which creates a new (possibly bigger) graph for each type.
+ // The substitution process will not stop if the replacement graph for a type parameter
+ // also contains that type parameter.
+ // For instance, for [A interface{ *A }], without any type argument provided for A,
+ // unification produces the type list [*A]. Substituting A in *A with the value for
+ // A will lead to infinite expansion by producing [**A], [****A], [********A], etc.,
+ // because the graph A -> *A has a cycle through A.
+ // Generally, cycles may occur across multiple type parameters and inferred types
+ // (for instance, consider [P interface{ *Q }, Q interface{ func(P) }]).
+ // We eliminate cycles by walking the graphs for all type parameters. If a cycle
+ // through a type parameter is detected, cycleFinder nils out the respectice type
+ // which kills the cycle; this also means that the respective type could not be
+ // inferred.
+ //
+ // TODO(gri) If useful, we could report the respective cycle as an error. We don't
+ // do this now because type inference will fail anyway, and furthermore,
+ // constraints with cycles of this kind cannot currently be satisfied by
+ // any user-suplied type. But should that change, reporting an error
+ // would be wrong.
+ w := cycleFinder{tparams, types, make(map[Type]bool)}
+ for _, t := range tparams {
+ w.typ(t) // t != nil
+ }
+
+ // dirty tracks the indices of all types that may still contain type parameters.
+ // We know that nil type entries and entries corresponding to provided (non-nil)
+ // type arguments are clean, so exclude them from the start.
+ var dirty []int
+ for i, typ := range types {
+ if typ != nil && (i >= len(targs) || targs[i] == nil) {
+ dirty = append(dirty, i)
+ }
+ }
+
+ for len(dirty) > 0 {
+ // TODO(gri) Instead of creating a new substMap for each iteration,
+ // provide an update operation for substMaps and only change when
+ // needed. Optimization.
+ smap := makeSubstMap(tparams, types)
+ n := 0
+ for _, index := range dirty {
+ t0 := types[index]
+ if t1 := check.subst(nopos, t0, smap, nil); t1 != t0 {
+ types[index] = t1
+ dirty[n] = index
+ n++
+ }
+ }
+ dirty = dirty[:n]
+ }
+
+ // Once nothing changes anymore, we may still have type parameters left;
+ // e.g., a constraint with core type *P may match a type parameter Q but
+ // we don't have any type arguments to fill in for *P or Q (issue #45548).
+ // Don't let such inferences escape, instead nil them out.
+ for i, typ := range types {
+ if typ != nil && isParameterized(tparams, typ) {
+ types[i] = nil
+ }
+ }
+
+ // update index
+ index = -1
+ for i, typ := range types {
+ if typ == nil {
+ index = i
+ break
+ }
+ }
+
+ return
+}
+
+// If the type parameter has a single specific type S, coreTerm returns (S, true).
+// Otherwise, if tpar has a core type T, it returns a term corresponding to that
+// core type and false. In that case, if any term of tpar has a tilde, the core
+// term has a tilde. In all other cases coreTerm returns (nil, false).
+func coreTerm(tpar *TypeParam) (*term, bool) {
+ n := 0
+ var single *term // valid if n == 1
+ var tilde bool
+ tpar.is(func(t *term) bool {
+ if t == nil {
+ assert(n == 0)
+ return false // no terms
+ }
+ n++
+ single = t
+ if t.tilde {
+ tilde = true
+ }
+ return true
+ })
+ if n == 1 {
+ if debug {
+ assert(debug && under(single.typ) == coreType(tpar))
+ }
+ return single, true
+ }
+ if typ := coreType(tpar); typ != nil {
+ // A core type is always an underlying type.
+ // If any term of tpar has a tilde, we don't
+ // have a precise core type and we must return
+ // a tilde as well.
+ return &term{tilde, typ}, false
+ }
+ return nil, false
+}
+
+type cycleFinder struct {
+ tparams []*TypeParam
+ types []Type
+ seen map[Type]bool
+}
+
+func (w *cycleFinder) typ(typ Type) {
+ if w.seen[typ] {
+ // We have seen typ before. If it is one of the type parameters
+ // in tparams, iterative substitution will lead to infinite expansion.
+ // Nil out the corresponding type which effectively kills the cycle.
+ if tpar, _ := typ.(*TypeParam); tpar != nil {
+ if i := tparamIndex(w.tparams, tpar); i >= 0 {
+ // cycle through tpar
+ w.types[i] = nil
+ }
+ }
+ // If we don't have one of our type parameters, the cycle is due
+ // to an ordinary recursive type and we can just stop walking it.
+ return
+ }
+ w.seen[typ] = true
+ defer delete(w.seen, typ)
+
+ switch t := typ.(type) {
+ case *Basic:
+ // nothing to do
+
+ case *Array:
+ w.typ(t.elem)
+
+ case *Slice:
+ w.typ(t.elem)
+
+ case *Struct:
+ w.varList(t.fields)
+
+ case *Pointer:
+ w.typ(t.base)
+
+ // case *Tuple:
+ // This case should not occur because tuples only appear
+ // in signatures where they are handled explicitly.
+
+ case *Signature:
+ if t.params != nil {
+ w.varList(t.params.vars)
+ }
+ if t.results != nil {
+ w.varList(t.results.vars)
+ }
+
+ case *Union:
+ for _, t := range t.terms {
+ w.typ(t.typ)
+ }
+
+ case *Interface:
+ for _, m := range t.methods {
+ w.typ(m.typ)
+ }
+ for _, t := range t.embeddeds {
+ w.typ(t)
+ }
+
+ case *Map:
+ w.typ(t.key)
+ w.typ(t.elem)
+
+ case *Chan:
+ w.typ(t.elem)
+
+ case *Named:
+ for _, tpar := range t.TypeArgs().list() {
+ w.typ(tpar)
+ }
+
+ case *TypeParam:
+ if i := tparamIndex(w.tparams, t); i >= 0 && w.types[i] != nil {
+ w.typ(w.types[i])
+ }
+
+ default:
+ panic(fmt.Sprintf("unexpected %T", typ))
+ }
+}
+
+func (w *cycleFinder) varList(list []*Var) {
+ for _, v := range list {
+ w.typ(v.typ)
+ }
+}
diff --git a/src/cmd/compile/internal/types2/initorder.go b/src/cmd/compile/internal/types2/initorder.go
new file mode 100644
index 0000000..cf6110b
--- /dev/null
+++ b/src/cmd/compile/internal/types2/initorder.go
@@ -0,0 +1,323 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+import (
+ "container/heap"
+ "fmt"
+ "sort"
+)
+
+// initOrder computes the Info.InitOrder for package variables.
+func (check *Checker) initOrder() {
+ // An InitOrder may already have been computed if a package is
+ // built from several calls to (*Checker).Files. Clear it.
+ check.Info.InitOrder = check.Info.InitOrder[:0]
+
+ // Compute the object dependency graph and initialize
+ // a priority queue with the list of graph nodes.
+ pq := nodeQueue(dependencyGraph(check.objMap))
+ heap.Init(&pq)
+
+ const debug = false
+ if debug {
+ fmt.Printf("Computing initialization order for %s\n\n", check.pkg)
+ fmt.Println("Object dependency graph:")
+ for obj, d := range check.objMap {
+ // only print objects that may appear in the dependency graph
+ if obj, _ := obj.(dependency); obj != nil {
+ if len(d.deps) > 0 {
+ fmt.Printf("\t%s depends on\n", obj.Name())
+ for dep := range d.deps {
+ fmt.Printf("\t\t%s\n", dep.Name())
+ }
+ } else {
+ fmt.Printf("\t%s has no dependencies\n", obj.Name())
+ }
+ }
+ }
+ fmt.Println()
+
+ fmt.Println("Transposed object dependency graph (functions eliminated):")
+ for _, n := range pq {
+ fmt.Printf("\t%s depends on %d nodes\n", n.obj.Name(), n.ndeps)
+ for p := range n.pred {
+ fmt.Printf("\t\t%s is dependent\n", p.obj.Name())
+ }
+ }
+ fmt.Println()
+
+ fmt.Println("Processing nodes:")
+ }
+
+ // Determine initialization order by removing the highest priority node
+ // (the one with the fewest dependencies) and its edges from the graph,
+ // repeatedly, until there are no nodes left.
+ // In a valid Go program, those nodes always have zero dependencies (after
+ // removing all incoming dependencies), otherwise there are initialization
+ // cycles.
+ emitted := make(map[*declInfo]bool)
+ for len(pq) > 0 {
+ // get the next node
+ n := heap.Pop(&pq).(*graphNode)
+
+ if debug {
+ fmt.Printf("\t%s (src pos %d) depends on %d nodes now\n",
+ n.obj.Name(), n.obj.order(), n.ndeps)
+ }
+
+ // if n still depends on other nodes, we have a cycle
+ if n.ndeps > 0 {
+ cycle := findPath(check.objMap, n.obj, n.obj, make(map[Object]bool))
+ // If n.obj is not part of the cycle (e.g., n.obj->b->c->d->c),
+ // cycle will be nil. Don't report anything in that case since
+ // the cycle is reported when the algorithm gets to an object
+ // in the cycle.
+ // Furthermore, once an object in the cycle is encountered,
+ // the cycle will be broken (dependency count will be reduced
+ // below), and so the remaining nodes in the cycle don't trigger
+ // another error (unless they are part of multiple cycles).
+ if cycle != nil {
+ check.reportCycle(cycle)
+ }
+ // Ok to continue, but the variable initialization order
+ // will be incorrect at this point since it assumes no
+ // cycle errors.
+ }
+
+ // reduce dependency count of all dependent nodes
+ // and update priority queue
+ for p := range n.pred {
+ p.ndeps--
+ heap.Fix(&pq, p.index)
+ }
+
+ // record the init order for variables with initializers only
+ v, _ := n.obj.(*Var)
+ info := check.objMap[v]
+ if v == nil || !info.hasInitializer() {
+ continue
+ }
+
+ // n:1 variable declarations such as: a, b = f()
+ // introduce a node for each lhs variable (here: a, b);
+ // but they all have the same initializer - emit only
+ // one, for the first variable seen
+ if emitted[info] {
+ continue // initializer already emitted, if any
+ }
+ emitted[info] = true
+
+ infoLhs := info.lhs // possibly nil (see declInfo.lhs field comment)
+ if infoLhs == nil {
+ infoLhs = []*Var{v}
+ }
+ init := &Initializer{infoLhs, info.init}
+ check.Info.InitOrder = append(check.Info.InitOrder, init)
+ }
+
+ if debug {
+ fmt.Println()
+ fmt.Println("Initialization order:")
+ for _, init := range check.Info.InitOrder {
+ fmt.Printf("\t%s\n", init)
+ }
+ fmt.Println()
+ }
+}
+
+// findPath returns the (reversed) list of objects []Object{to, ... from}
+// such that there is a path of object dependencies from 'from' to 'to'.
+// If there is no such path, the result is nil.
+func findPath(objMap map[Object]*declInfo, from, to Object, seen map[Object]bool) []Object {
+ if seen[from] {
+ return nil
+ }
+ seen[from] = true
+
+ for d := range objMap[from].deps {
+ if d == to {
+ return []Object{d}
+ }
+ if P := findPath(objMap, d, to, seen); P != nil {
+ return append(P, d)
+ }
+ }
+
+ return nil
+}
+
+// reportCycle reports an error for the given cycle.
+func (check *Checker) reportCycle(cycle []Object) {
+ obj := cycle[0]
+ var err error_
+ if check.conf.CompilerErrorMessages {
+ err.errorf(obj, "initialization loop for %s", obj.Name())
+ } else {
+ err.errorf(obj, "initialization cycle for %s", obj.Name())
+ }
+ // subtle loop: print cycle[i] for i = 0, n-1, n-2, ... 1 for len(cycle) = n
+ for i := len(cycle) - 1; i >= 0; i-- {
+ err.errorf(obj, "%s refers to", obj.Name())
+ obj = cycle[i]
+ }
+ // print cycle[0] again to close the cycle
+ err.errorf(obj, "%s", obj.Name())
+ check.report(&err)
+}
+
+// ----------------------------------------------------------------------------
+// Object dependency graph
+
+// A dependency is an object that may be a dependency in an initialization
+// expression. Only constants, variables, and functions can be dependencies.
+// Constants are here because constant expression cycles are reported during
+// initialization order computation.
+type dependency interface {
+ Object
+ isDependency()
+}
+
+// A graphNode represents a node in the object dependency graph.
+// Each node p in n.pred represents an edge p->n, and each node
+// s in n.succ represents an edge n->s; with a->b indicating that
+// a depends on b.
+type graphNode struct {
+ obj dependency // object represented by this node
+ pred, succ nodeSet // consumers and dependencies of this node (lazily initialized)
+ index int // node index in graph slice/priority queue
+ ndeps int // number of outstanding dependencies before this object can be initialized
+}
+
+// cost returns the cost of removing this node, which involves copying each
+// predecessor to each successor (and vice-versa).
+func (n *graphNode) cost() int {
+ return len(n.pred) * len(n.succ)
+}
+
+type nodeSet map[*graphNode]bool
+
+func (s *nodeSet) add(p *graphNode) {
+ if *s == nil {
+ *s = make(nodeSet)
+ }
+ (*s)[p] = true
+}
+
+// dependencyGraph computes the object dependency graph from the given objMap,
+// with any function nodes removed. The resulting graph contains only constants
+// and variables.
+func dependencyGraph(objMap map[Object]*declInfo) []*graphNode {
+ // M is the dependency (Object) -> graphNode mapping
+ M := make(map[dependency]*graphNode)
+ for obj := range objMap {
+ // only consider nodes that may be an initialization dependency
+ if obj, _ := obj.(dependency); obj != nil {
+ M[obj] = &graphNode{obj: obj}
+ }
+ }
+
+ // compute edges for graph M
+ // (We need to include all nodes, even isolated ones, because they still need
+ // to be scheduled for initialization in correct order relative to other nodes.)
+ for obj, n := range M {
+ // for each dependency obj -> d (= deps[i]), create graph edges n->s and s->n
+ for d := range objMap[obj].deps {
+ // only consider nodes that may be an initialization dependency
+ if d, _ := d.(dependency); d != nil {
+ d := M[d]
+ n.succ.add(d)
+ d.pred.add(n)
+ }
+ }
+ }
+
+ var G, funcG []*graphNode // separate non-functions and functions
+ for _, n := range M {
+ if _, ok := n.obj.(*Func); ok {
+ funcG = append(funcG, n)
+ } else {
+ G = append(G, n)
+ }
+ }
+
+ // remove function nodes and collect remaining graph nodes in G
+ // (Mutually recursive functions may introduce cycles among themselves
+ // which are permitted. Yet such cycles may incorrectly inflate the dependency
+ // count for variables which in turn may not get scheduled for initialization
+ // in correct order.)
+ //
+ // Note that because we recursively copy predecessors and successors
+ // throughout the function graph, the cost of removing a function at
+ // position X is proportional to cost * (len(funcG)-X). Therefore, we should
+ // remove high-cost functions last.
+ sort.Slice(funcG, func(i, j int) bool {
+ return funcG[i].cost() < funcG[j].cost()
+ })
+ for _, n := range funcG {
+ // connect each predecessor p of n with each successor s
+ // and drop the function node (don't collect it in G)
+ for p := range n.pred {
+ // ignore self-cycles
+ if p != n {
+ // Each successor s of n becomes a successor of p, and
+ // each predecessor p of n becomes a predecessor of s.
+ for s := range n.succ {
+ // ignore self-cycles
+ if s != n {
+ p.succ.add(s)
+ s.pred.add(p)
+ }
+ }
+ delete(p.succ, n) // remove edge to n
+ }
+ }
+ for s := range n.succ {
+ delete(s.pred, n) // remove edge to n
+ }
+ }
+
+ // fill in index and ndeps fields
+ for i, n := range G {
+ n.index = i
+ n.ndeps = len(n.succ)
+ }
+
+ return G
+}
+
+// ----------------------------------------------------------------------------
+// Priority queue
+
+// nodeQueue implements the container/heap interface;
+// a nodeQueue may be used as a priority queue.
+type nodeQueue []*graphNode
+
+func (a nodeQueue) Len() int { return len(a) }
+
+func (a nodeQueue) Swap(i, j int) {
+ x, y := a[i], a[j]
+ a[i], a[j] = y, x
+ x.index, y.index = j, i
+}
+
+func (a nodeQueue) Less(i, j int) bool {
+ x, y := a[i], a[j]
+ // nodes are prioritized by number of incoming dependencies (1st key)
+ // and source order (2nd key)
+ return x.ndeps < y.ndeps || x.ndeps == y.ndeps && x.obj.order() < y.obj.order()
+}
+
+func (a *nodeQueue) Push(x interface{}) {
+ panic("unreachable")
+}
+
+func (a *nodeQueue) Pop() interface{} {
+ n := len(*a)
+ x := (*a)[n-1]
+ x.index = -1 // for safety
+ *a = (*a)[:n-1]
+ return x
+}
diff --git a/src/cmd/compile/internal/types2/instantiate.go b/src/cmd/compile/internal/types2/instantiate.go
new file mode 100644
index 0000000..2dc82d8
--- /dev/null
+++ b/src/cmd/compile/internal/types2/instantiate.go
@@ -0,0 +1,255 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements instantiation of generic types
+// through substitution of type parameters by type arguments.
+
+package types2
+
+import (
+ "cmd/compile/internal/syntax"
+ "errors"
+ "fmt"
+)
+
+// Instantiate instantiates the type orig with the given type arguments targs.
+// orig must be a *Named or a *Signature type. If there is no error, the
+// resulting Type is an instantiated type of the same kind (either a *Named or
+// a *Signature). Methods attached to a *Named type are also instantiated, and
+// associated with a new *Func that has the same position as the original
+// method, but nil function scope.
+//
+// If ctxt is non-nil, it may be used to de-duplicate the instance against
+// previous instances with the same identity. As a special case, generic
+// *Signature origin types are only considered identical if they are pointer
+// equivalent, so that instantiating distinct (but possibly identical)
+// signatures will yield different instances. The use of a shared context does
+// not guarantee that identical instances are deduplicated in all cases.
+//
+// If validate is set, Instantiate verifies that the number of type arguments
+// and parameters match, and that the type arguments satisfy their
+// corresponding type constraints. If verification fails, the resulting error
+// may wrap an *ArgumentError indicating which type argument did not satisfy
+// its corresponding type parameter constraint, and why.
+//
+// If validate is not set, Instantiate does not verify the type argument count
+// or whether the type arguments satisfy their constraints. Instantiate is
+// guaranteed to not return an error, but may panic. Specifically, for
+// *Signature types, Instantiate will panic immediately if the type argument
+// count is incorrect; for *Named types, a panic may occur later inside the
+// *Named API.
+func Instantiate(ctxt *Context, orig Type, targs []Type, validate bool) (Type, error) {
+ if validate {
+ var tparams []*TypeParam
+ switch t := orig.(type) {
+ case *Named:
+ tparams = t.TypeParams().list()
+ case *Signature:
+ tparams = t.TypeParams().list()
+ }
+ if len(targs) != len(tparams) {
+ return nil, fmt.Errorf("got %d type arguments but %s has %d type parameters", len(targs), orig, len(tparams))
+ }
+ if i, err := (*Checker)(nil).verify(nopos, tparams, targs); err != nil {
+ return nil, &ArgumentError{i, err}
+ }
+ }
+
+ inst := (*Checker)(nil).instance(nopos, orig, targs, ctxt)
+ return inst, nil
+}
+
+// instance creates a type or function instance using the given original type
+// typ and arguments targs. For Named types the resulting instance will be
+// unexpanded.
+func (check *Checker) instance(pos syntax.Pos, orig Type, targs []Type, ctxt *Context) (res Type) {
+ var h string
+ if ctxt != nil {
+ h = ctxt.instanceHash(orig, targs)
+ // typ may already have been instantiated with identical type arguments. In
+ // that case, re-use the existing instance.
+ if inst := ctxt.lookup(h, orig, targs); inst != nil {
+ return inst
+ }
+ }
+
+ switch orig := orig.(type) {
+ case *Named:
+ tname := NewTypeName(pos, orig.obj.pkg, orig.obj.name, nil)
+ named := check.newNamed(tname, orig, nil, nil, nil) // underlying, tparams, and methods are set when named is resolved
+ named.targs = newTypeList(targs)
+ named.resolver = func(ctxt *Context, n *Named) (*TypeParamList, Type, *methodList) {
+ return expandNamed(ctxt, n, pos)
+ }
+ res = named
+
+ case *Signature:
+ tparams := orig.TypeParams()
+ if !check.validateTArgLen(pos, tparams.Len(), len(targs)) {
+ return Typ[Invalid]
+ }
+ if tparams.Len() == 0 {
+ return orig // nothing to do (minor optimization)
+ }
+ sig := check.subst(pos, orig, makeSubstMap(tparams.list(), targs), ctxt).(*Signature)
+ // If the signature doesn't use its type parameters, subst
+ // will not make a copy. In that case, make a copy now (so
+ // we can set tparams to nil w/o causing side-effects).
+ if sig == orig {
+ copy := *sig
+ sig = &copy
+ }
+ // After instantiating a generic signature, it is not generic
+ // anymore; we need to set tparams to nil.
+ sig.tparams = nil
+ res = sig
+ default:
+ // only types and functions can be generic
+ panic(fmt.Sprintf("%v: cannot instantiate %v", pos, orig))
+ }
+
+ if ctxt != nil {
+ // It's possible that we've lost a race to add named to the context.
+ // In this case, use whichever instance is recorded in the context.
+ res = ctxt.update(h, orig, targs, res)
+ }
+
+ return res
+}
+
+// validateTArgLen verifies that the length of targs and tparams matches,
+// reporting an error if not. If validation fails and check is nil,
+// validateTArgLen panics.
+func (check *Checker) validateTArgLen(pos syntax.Pos, ntparams, ntargs int) bool {
+ if ntargs != ntparams {
+ // TODO(gri) provide better error message
+ if check != nil {
+ check.errorf(pos, "got %d arguments but %d type parameters", ntargs, ntparams)
+ return false
+ }
+ panic(fmt.Sprintf("%v: got %d arguments but %d type parameters", pos, ntargs, ntparams))
+ }
+ return true
+}
+
+func (check *Checker) verify(pos syntax.Pos, tparams []*TypeParam, targs []Type) (int, error) {
+ smap := makeSubstMap(tparams, targs)
+ for i, tpar := range tparams {
+ // Ensure that we have a (possibly implicit) interface as type bound (issue #51048).
+ tpar.iface()
+ // The type parameter bound is parameterized with the same type parameters
+ // as the instantiated type; before we can use it for bounds checking we
+ // need to instantiate it with the type arguments with which we instantiated
+ // the parameterized type.
+ bound := check.subst(pos, tpar.bound, smap, nil)
+ if err := check.implements(targs[i], bound); err != nil {
+ return i, err
+ }
+ }
+ return -1, nil
+}
+
+// implements checks if V implements T and reports an error if it doesn't.
+// The receiver may be nil if implements is called through an exported
+// API call such as AssignableTo.
+func (check *Checker) implements(V, T Type) error {
+ Vu := under(V)
+ Tu := under(T)
+ if Vu == Typ[Invalid] || Tu == Typ[Invalid] {
+ return nil // avoid follow-on errors
+ }
+ if p, _ := Vu.(*Pointer); p != nil && under(p.base) == Typ[Invalid] {
+ return nil // avoid follow-on errors (see issue #49541 for an example)
+ }
+
+ errorf := func(format string, args ...interface{}) error {
+ return errors.New(check.sprintf(format, args...))
+ }
+
+ Ti, _ := Tu.(*Interface)
+ if Ti == nil {
+ var cause string
+ if isInterfacePtr(Tu) {
+ cause = check.sprintf("type %s is pointer to interface, not interface", T)
+ } else {
+ cause = check.sprintf("%s is not an interface", T)
+ }
+ return errorf("%s does not implement %s (%s)", V, T, cause)
+ }
+
+ // Every type satisfies the empty interface.
+ if Ti.Empty() {
+ return nil
+ }
+ // T is not the empty interface (i.e., the type set of T is restricted)
+
+ // An interface V with an empty type set satisfies any interface.
+ // (The empty set is a subset of any set.)
+ Vi, _ := Vu.(*Interface)
+ if Vi != nil && Vi.typeSet().IsEmpty() {
+ return nil
+ }
+ // type set of V is not empty
+
+ // No type with non-empty type set satisfies the empty type set.
+ if Ti.typeSet().IsEmpty() {
+ return errorf("cannot implement %s (empty type set)", T)
+ }
+
+ // V must implement T's methods, if any.
+ if m, wrong := check.missingMethod(V, Ti, true); m != nil /* !Implements(V, Ti) */ {
+ return errorf("%s does not implement %s %s", V, T, check.missingMethodReason(V, T, m, wrong))
+ }
+
+ // If T is comparable, V must be comparable.
+ // Remember as a pending error and report only if we don't have a more specific error.
+ var pending error
+ if Ti.IsComparable() && !comparable(V, false, nil, nil) {
+ pending = errorf("%s does not implement comparable", V)
+ }
+
+ // V must also be in the set of types of T, if any.
+ // Constraints with empty type sets were already excluded above.
+ if !Ti.typeSet().hasTerms() {
+ return pending // nothing to do
+ }
+
+ // If V is itself an interface, each of its possible types must be in the set
+ // of T types (i.e., the V type set must be a subset of the T type set).
+ // Interfaces V with empty type sets were already excluded above.
+ if Vi != nil {
+ if !Vi.typeSet().subsetOf(Ti.typeSet()) {
+ // TODO(gri) report which type is missing
+ return errorf("%s does not implement %s", V, T)
+ }
+ return pending
+ }
+
+ // Otherwise, V's type must be included in the iface type set.
+ var alt Type
+ if Ti.typeSet().is(func(t *term) bool {
+ if !t.includes(V) {
+ // If V ∉ t.typ but V ∈ ~t.typ then remember this type
+ // so we can suggest it as an alternative in the error
+ // message.
+ if alt == nil && !t.tilde && Identical(t.typ, under(t.typ)) {
+ tt := *t
+ tt.tilde = true
+ if tt.includes(V) {
+ alt = t.typ
+ }
+ }
+ return true
+ }
+ return false
+ }) {
+ if alt != nil {
+ return errorf("%s does not implement %s (possibly missing ~ for %s in constraint %s)", V, T, alt, T)
+ } else {
+ return errorf("%s does not implement %s", V, T)
+ }
+ }
+
+ return pending
+}
diff --git a/src/cmd/compile/internal/types2/instantiate_test.go b/src/cmd/compile/internal/types2/instantiate_test.go
new file mode 100644
index 0000000..591b467
--- /dev/null
+++ b/src/cmd/compile/internal/types2/instantiate_test.go
@@ -0,0 +1,247 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+package types2_test
+
+import (
+ . "cmd/compile/internal/types2"
+ "strings"
+ "testing"
+)
+
+func TestInstantiateEquality(t *testing.T) {
+ emptySignature := NewSignatureType(nil, nil, nil, nil, nil, false)
+ tests := []struct {
+ src string
+ name1 string
+ targs1 []Type
+ name2 string
+ targs2 []Type
+ wantEqual bool
+ }{
+ {
+ "package basictype; type T[P any] int",
+ "T", []Type{Typ[Int]},
+ "T", []Type{Typ[Int]},
+ true,
+ },
+ {
+ "package differenttypeargs; type T[P any] int",
+ "T", []Type{Typ[Int]},
+ "T", []Type{Typ[String]},
+ false,
+ },
+ {
+ "package typeslice; type T[P any] int",
+ "T", []Type{NewSlice(Typ[Int])},
+ "T", []Type{NewSlice(Typ[Int])},
+ true,
+ },
+ {
+ // interface{interface{...}} is equivalent to interface{...}
+ "package equivalentinterfaces; type T[P any] int",
+ "T", []Type{
+ NewInterfaceType([]*Func{NewFunc(nopos, nil, "M", emptySignature)}, nil),
+ },
+ "T", []Type{
+ NewInterfaceType(
+ nil,
+ []Type{
+ NewInterfaceType([]*Func{NewFunc(nopos, nil, "M", emptySignature)}, nil),
+ },
+ ),
+ },
+ true,
+ },
+ {
+ // int|string is equivalent to string|int
+ "package equivalenttypesets; type T[P any] int",
+ "T", []Type{
+ NewInterfaceType(nil, []Type{
+ NewUnion([]*Term{NewTerm(false, Typ[Int]), NewTerm(false, Typ[String])}),
+ }),
+ },
+ "T", []Type{
+ NewInterfaceType(nil, []Type{
+ NewUnion([]*Term{NewTerm(false, Typ[String]), NewTerm(false, Typ[Int])}),
+ }),
+ },
+ true,
+ },
+ {
+ "package basicfunc; func F[P any]() {}",
+ "F", []Type{Typ[Int]},
+ "F", []Type{Typ[Int]},
+ true,
+ },
+ {
+ "package funcslice; func F[P any]() {}",
+ "F", []Type{NewSlice(Typ[Int])},
+ "F", []Type{NewSlice(Typ[Int])},
+ true,
+ },
+ {
+ "package funcwithparams; func F[P any](x string) float64 { return 0 }",
+ "F", []Type{Typ[Int]},
+ "F", []Type{Typ[Int]},
+ true,
+ },
+ {
+ "package differentfuncargs; func F[P any](x string) float64 { return 0 }",
+ "F", []Type{Typ[Int]},
+ "F", []Type{Typ[String]},
+ false,
+ },
+ {
+ "package funcequality; func F1[P any](x int) {}; func F2[Q any](x int) {}",
+ "F1", []Type{Typ[Int]},
+ "F2", []Type{Typ[Int]},
+ false,
+ },
+ {
+ "package funcsymmetry; func F1[P any](x P) {}; func F2[Q any](x Q) {}",
+ "F1", []Type{Typ[Int]},
+ "F2", []Type{Typ[Int]},
+ false,
+ },
+ }
+
+ for _, test := range tests {
+ pkg, err := pkgFor(".", test.src, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ t.Run(pkg.Name(), func(t *testing.T) {
+ ctxt := NewContext()
+
+ T1 := pkg.Scope().Lookup(test.name1).Type()
+ res1, err := Instantiate(ctxt, T1, test.targs1, false)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ T2 := pkg.Scope().Lookup(test.name2).Type()
+ res2, err := Instantiate(ctxt, T2, test.targs2, false)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ if gotEqual := res1 == res2; gotEqual != test.wantEqual {
+ t.Errorf("%s == %s: %t, want %t", res1, res2, gotEqual, test.wantEqual)
+ }
+ })
+ }
+}
+
+func TestInstantiateNonEquality(t *testing.T) {
+ const src = "package p; type T[P any] int"
+ pkg1, err := pkgFor(".", src, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ pkg2, err := pkgFor(".", src, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ // We consider T1 and T2 to be distinct types, so their instances should not
+ // be deduplicated by the context.
+ T1 := pkg1.Scope().Lookup("T").Type().(*Named)
+ T2 := pkg2.Scope().Lookup("T").Type().(*Named)
+ ctxt := NewContext()
+ res1, err := Instantiate(ctxt, T1, []Type{Typ[Int]}, false)
+ if err != nil {
+ t.Fatal(err)
+ }
+ res2, err := Instantiate(ctxt, T2, []Type{Typ[Int]}, false)
+ if err != nil {
+ t.Fatal(err)
+ }
+ if res1 == res2 {
+ t.Errorf("instance from pkg1 (%s) is pointer-equivalent to instance from pkg2 (%s)", res1, res2)
+ }
+ if Identical(res1, res2) {
+ t.Errorf("instance from pkg1 (%s) is identical to instance from pkg2 (%s)", res1, res2)
+ }
+}
+
+func TestMethodInstantiation(t *testing.T) {
+ const prefix = `package p
+
+type T[P any] struct{}
+
+var X T[int]
+
+`
+ tests := []struct {
+ decl string
+ want string
+ }{
+ {"func (r T[P]) m() P", "func (T[int]).m() int"},
+ {"func (r T[P]) m(P)", "func (T[int]).m(int)"},
+ {"func (r *T[P]) m(P)", "func (*T[int]).m(int)"},
+ {"func (r T[P]) m() T[P]", "func (T[int]).m() T[int]"},
+ {"func (r T[P]) m(T[P])", "func (T[int]).m(T[int])"},
+ {"func (r T[P]) m(T[P], P, string)", "func (T[int]).m(T[int], int, string)"},
+ {"func (r T[P]) m(T[P], T[string], T[int])", "func (T[int]).m(T[int], T[string], T[int])"},
+ }
+
+ for _, test := range tests {
+ src := prefix + test.decl
+ pkg, err := pkgFor(".", src, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ typ := NewPointer(pkg.Scope().Lookup("X").Type())
+ obj, _, _ := LookupFieldOrMethod(typ, false, pkg, "m")
+ m, _ := obj.(*Func)
+ if m == nil {
+ t.Fatalf(`LookupFieldOrMethod(%s, "m") = %v, want func m`, typ, obj)
+ }
+ if got := ObjectString(m, RelativeTo(pkg)); got != test.want {
+ t.Errorf("instantiated %q, want %q", got, test.want)
+ }
+ }
+}
+
+func TestImmutableSignatures(t *testing.T) {
+ const src = `package p
+
+type T[P any] struct{}
+
+func (T[P]) m() {}
+
+var _ T[int]
+`
+ pkg, err := pkgFor(".", src, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+ typ := pkg.Scope().Lookup("T").Type().(*Named)
+ obj, _, _ := LookupFieldOrMethod(typ, false, pkg, "m")
+ if obj == nil {
+ t.Fatalf(`LookupFieldOrMethod(%s, "m") = %v, want func m`, typ, obj)
+ }
+
+ // Verify that the original method is not mutated by instantiating T (this
+ // bug manifested when subst did not return a new signature).
+ want := "func (T[P]).m()"
+ if got := stripAnnotations(ObjectString(obj, RelativeTo(pkg))); got != want {
+ t.Errorf("instantiated %q, want %q", got, want)
+ }
+}
+
+// Copied from errors.go.
+func stripAnnotations(s string) string {
+ var b strings.Builder
+ for _, r := range s {
+ // strip #'s and subscript digits
+ if r < '₀' || '₀'+10 <= r { // '₀' == U+2080
+ b.WriteRune(r)
+ }
+ }
+ if b.Len() < len(s) {
+ return b.String()
+ }
+ return s
+}
diff --git a/src/cmd/compile/internal/types2/interface.go b/src/cmd/compile/internal/types2/interface.go
new file mode 100644
index 0000000..75597ab
--- /dev/null
+++ b/src/cmd/compile/internal/types2/interface.go
@@ -0,0 +1,185 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+import "cmd/compile/internal/syntax"
+
+// ----------------------------------------------------------------------------
+// API
+
+// An Interface represents an interface type.
+type Interface struct {
+ check *Checker // for error reporting; nil once type set is computed
+ obj *TypeName // corresponding declared object; or nil (for better error messages)
+ methods []*Func // ordered list of explicitly declared methods
+ embeddeds []Type // ordered list of explicitly embedded elements
+ embedPos *[]syntax.Pos // positions of embedded elements; or nil (for error messages) - use pointer to save space
+ implicit bool // interface is wrapper for type set literal (non-interface T, ~T, or A|B)
+ complete bool // indicates that all fields (except for tset) are set up
+
+ tset *_TypeSet // type set described by this interface, computed lazily
+}
+
+// typeSet returns the type set for interface t.
+func (t *Interface) typeSet() *_TypeSet { return computeInterfaceTypeSet(t.check, nopos, t) }
+
+// emptyInterface represents the empty interface
+var emptyInterface = Interface{complete: true, tset: &topTypeSet}
+
+// NewInterfaceType returns a new interface for the given methods and embedded types.
+// NewInterfaceType takes ownership of the provided methods and may modify their types
+// by setting missing receivers.
+func NewInterfaceType(methods []*Func, embeddeds []Type) *Interface {
+ if len(methods) == 0 && len(embeddeds) == 0 {
+ return &emptyInterface
+ }
+
+ // set method receivers if necessary
+ typ := (*Checker)(nil).newInterface()
+ for _, m := range methods {
+ if sig := m.typ.(*Signature); sig.recv == nil {
+ sig.recv = NewVar(m.pos, m.pkg, "", typ)
+ }
+ }
+
+ // sort for API stability
+ sortMethods(methods)
+
+ typ.methods = methods
+ typ.embeddeds = embeddeds
+ typ.complete = true
+
+ return typ
+}
+
+// check may be nil
+func (check *Checker) newInterface() *Interface {
+ typ := &Interface{check: check}
+ if check != nil {
+ check.needsCleanup(typ)
+ }
+ return typ
+}
+
+// MarkImplicit marks the interface t as implicit, meaning this interface
+// corresponds to a constraint literal such as ~T or A|B without explicit
+// interface embedding. MarkImplicit should be called before any concurrent use
+// of implicit interfaces.
+func (t *Interface) MarkImplicit() {
+ t.implicit = true
+}
+
+// NumExplicitMethods returns the number of explicitly declared methods of interface t.
+func (t *Interface) NumExplicitMethods() int { return len(t.methods) }
+
+// ExplicitMethod returns the i'th explicitly declared method of interface t for 0 <= i < t.NumExplicitMethods().
+// The methods are ordered by their unique Id.
+func (t *Interface) ExplicitMethod(i int) *Func { return t.methods[i] }
+
+// NumEmbeddeds returns the number of embedded types in interface t.
+func (t *Interface) NumEmbeddeds() int { return len(t.embeddeds) }
+
+// EmbeddedType returns the i'th embedded type of interface t for 0 <= i < t.NumEmbeddeds().
+func (t *Interface) EmbeddedType(i int) Type { return t.embeddeds[i] }
+
+// NumMethods returns the total number of methods of interface t.
+func (t *Interface) NumMethods() int { return t.typeSet().NumMethods() }
+
+// Method returns the i'th method of interface t for 0 <= i < t.NumMethods().
+// The methods are ordered by their unique Id.
+func (t *Interface) Method(i int) *Func { return t.typeSet().Method(i) }
+
+// Empty reports whether t is the empty interface.
+func (t *Interface) Empty() bool { return t.typeSet().IsAll() }
+
+// IsComparable reports whether each type in interface t's type set is comparable.
+func (t *Interface) IsComparable() bool { return t.typeSet().IsComparable(nil) }
+
+// IsMethodSet reports whether the interface t is fully described by its method set.
+func (t *Interface) IsMethodSet() bool { return t.typeSet().IsMethodSet() }
+
+// IsImplicit reports whether the interface t is a wrapper for a type set literal.
+func (t *Interface) IsImplicit() bool { return t.implicit }
+
+func (t *Interface) Underlying() Type { return t }
+func (t *Interface) String() string { return TypeString(t, nil) }
+
+// ----------------------------------------------------------------------------
+// Implementation
+
+func (t *Interface) cleanup() {
+ t.check = nil
+ t.embedPos = nil
+}
+
+func (check *Checker) interfaceType(ityp *Interface, iface *syntax.InterfaceType, def *Named) {
+ addEmbedded := func(pos syntax.Pos, typ Type) {
+ ityp.embeddeds = append(ityp.embeddeds, typ)
+ if ityp.embedPos == nil {
+ ityp.embedPos = new([]syntax.Pos)
+ }
+ *ityp.embedPos = append(*ityp.embedPos, pos)
+ }
+
+ for _, f := range iface.MethodList {
+ if f.Name == nil {
+ addEmbedded(posFor(f.Type), parseUnion(check, f.Type))
+ continue
+ }
+ // f.Name != nil
+
+ // We have a method with name f.Name.
+ name := f.Name.Value
+ if name == "_" {
+ if check.conf.CompilerErrorMessages {
+ check.error(f.Name, "methods must have a unique non-blank name")
+ } else {
+ check.error(f.Name, "invalid method name _")
+ }
+ continue // ignore
+ }
+
+ typ := check.typ(f.Type)
+ sig, _ := typ.(*Signature)
+ if sig == nil {
+ if typ != Typ[Invalid] {
+ check.errorf(f.Type, invalidAST+"%s is not a method signature", typ)
+ }
+ continue // ignore
+ }
+
+ // use named receiver type if available (for better error messages)
+ var recvTyp Type = ityp
+ if def != nil {
+ recvTyp = def
+ }
+ sig.recv = NewVar(f.Name.Pos(), check.pkg, "", recvTyp)
+
+ m := NewFunc(f.Name.Pos(), check.pkg, name, sig)
+ check.recordDef(f.Name, m)
+ ityp.methods = append(ityp.methods, m)
+ }
+
+ // All methods and embedded elements for this interface are collected;
+ // i.e., this interface may be used in a type set computation.
+ ityp.complete = true
+
+ if len(ityp.methods) == 0 && len(ityp.embeddeds) == 0 {
+ // empty interface
+ ityp.tset = &topTypeSet
+ return
+ }
+
+ // sort for API stability
+ // (don't sort embeddeds: they must correspond to *embedPos entries)
+ sortMethods(ityp.methods)
+
+ // Compute type set as soon as possible to report any errors.
+ // Subsequent uses of type sets will use this computed type
+ // set and won't need to pass in a *Checker.
+ check.later(func() {
+ computeInterfaceTypeSet(check, iface.Pos(), ityp)
+ }).describef(iface, "compute type set for %s", ityp)
+}
diff --git a/src/cmd/compile/internal/types2/issues_test.go b/src/cmd/compile/internal/types2/issues_test.go
new file mode 100644
index 0000000..fc4aeb1
--- /dev/null
+++ b/src/cmd/compile/internal/types2/issues_test.go
@@ -0,0 +1,676 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements tests for various issues.
+
+package types2_test
+
+import (
+ "bytes"
+ "cmd/compile/internal/syntax"
+ "fmt"
+ "internal/testenv"
+ "sort"
+ "strings"
+ "testing"
+
+ . "cmd/compile/internal/types2"
+)
+
+func mustParse(t *testing.T, src string) *syntax.File {
+ f, err := parseSrc("", src)
+ if err != nil {
+ t.Fatal(err)
+ }
+ return f
+}
+func TestIssue5770(t *testing.T) {
+ f := mustParse(t, `package p; type S struct{T}`)
+ var conf Config
+ _, err := conf.Check(f.PkgName.Value, []*syntax.File{f}, nil) // do not crash
+ want := "undeclared name: T"
+ if err == nil || !strings.Contains(err.Error(), want) {
+ t.Errorf("got: %v; want: %s", err, want)
+ }
+}
+
+func TestIssue5849(t *testing.T) {
+ src := `
+package p
+var (
+ s uint
+ _ = uint8(8)
+ _ = uint16(16) << s
+ _ = uint32(32 << s)
+ _ = uint64(64 << s + s)
+ _ = (interface{})("foo")
+ _ = (interface{})(nil)
+)`
+ f := mustParse(t, src)
+
+ var conf Config
+ types := make(map[syntax.Expr]TypeAndValue)
+ _, err := conf.Check(f.PkgName.Value, []*syntax.File{f}, &Info{Types: types})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ for x, tv := range types {
+ var want Type
+ switch x := x.(type) {
+ case *syntax.BasicLit:
+ switch x.Value {
+ case `8`:
+ want = Typ[Uint8]
+ case `16`:
+ want = Typ[Uint16]
+ case `32`:
+ want = Typ[Uint32]
+ case `64`:
+ want = Typ[Uint] // because of "+ s", s is of type uint
+ case `"foo"`:
+ want = Typ[String]
+ }
+ case *syntax.Name:
+ if x.Value == "nil" {
+ want = NewInterfaceType(nil, nil) // interface{} (for now, go/types types this as "untyped nil")
+ }
+ }
+ if want != nil && !Identical(tv.Type, want) {
+ t.Errorf("got %s; want %s", tv.Type, want)
+ }
+ }
+}
+
+func TestIssue6413(t *testing.T) {
+ src := `
+package p
+func f() int {
+ defer f()
+ go f()
+ return 0
+}
+`
+ f := mustParse(t, src)
+
+ var conf Config
+ types := make(map[syntax.Expr]TypeAndValue)
+ _, err := conf.Check(f.PkgName.Value, []*syntax.File{f}, &Info{Types: types})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ want := Typ[Int]
+ n := 0
+ for x, tv := range types {
+ if _, ok := x.(*syntax.CallExpr); ok {
+ if tv.Type != want {
+ t.Errorf("%s: got %s; want %s", x.Pos(), tv.Type, want)
+ }
+ n++
+ }
+ }
+
+ if n != 2 {
+ t.Errorf("got %d CallExprs; want 2", n)
+ }
+}
+
+func TestIssue7245(t *testing.T) {
+ src := `
+package p
+func (T) m() (res bool) { return }
+type T struct{} // receiver type after method declaration
+`
+ f := mustParse(t, src)
+
+ var conf Config
+ defs := make(map[*syntax.Name]Object)
+ _, err := conf.Check(f.PkgName.Value, []*syntax.File{f}, &Info{Defs: defs})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ m := f.DeclList[0].(*syntax.FuncDecl)
+ res1 := defs[m.Name].(*Func).Type().(*Signature).Results().At(0)
+ res2 := defs[m.Type.ResultList[0].Name].(*Var)
+
+ if res1 != res2 {
+ t.Errorf("got %s (%p) != %s (%p)", res1, res2, res1, res2)
+ }
+}
+
+// This tests that uses of existing vars on the LHS of an assignment
+// are Uses, not Defs; and also that the (illegal) use of a non-var on
+// the LHS of an assignment is a Use nonetheless.
+func TestIssue7827(t *testing.T) {
+ const src = `
+package p
+func _() {
+ const w = 1 // defs w
+ x, y := 2, 3 // defs x, y
+ w, x, z := 4, 5, 6 // uses w, x, defs z; error: cannot assign to w
+ _, _, _ = x, y, z // uses x, y, z
+}
+`
+ f := mustParse(t, src)
+
+ const want = `L3 defs func p._()
+L4 defs const w untyped int
+L5 defs var x int
+L5 defs var y int
+L6 defs var z int
+L6 uses const w untyped int
+L6 uses var x int
+L7 uses var x int
+L7 uses var y int
+L7 uses var z int`
+
+ // don't abort at the first error
+ conf := Config{Error: func(err error) { t.Log(err) }}
+ defs := make(map[*syntax.Name]Object)
+ uses := make(map[*syntax.Name]Object)
+ _, err := conf.Check(f.PkgName.Value, []*syntax.File{f}, &Info{Defs: defs, Uses: uses})
+ if s := fmt.Sprint(err); !strings.HasSuffix(s, "cannot assign to w") {
+ t.Errorf("Check: unexpected error: %s", s)
+ }
+
+ var facts []string
+ for id, obj := range defs {
+ if obj != nil {
+ fact := fmt.Sprintf("L%d defs %s", id.Pos().Line(), obj)
+ facts = append(facts, fact)
+ }
+ }
+ for id, obj := range uses {
+ fact := fmt.Sprintf("L%d uses %s", id.Pos().Line(), obj)
+ facts = append(facts, fact)
+ }
+ sort.Strings(facts)
+
+ got := strings.Join(facts, "\n")
+ if got != want {
+ t.Errorf("Unexpected defs/uses\ngot:\n%s\nwant:\n%s", got, want)
+ }
+}
+
+// This tests that the package associated with the types2.Object.Pkg method
+// is the type's package independent of the order in which the imports are
+// listed in the sources src1, src2 below.
+// The actual issue is in go/internal/gcimporter which has a corresponding
+// test; we leave this test here to verify correct behavior at the go/types
+// level.
+func TestIssue13898(t *testing.T) {
+ testenv.MustHaveGoBuild(t)
+
+ const src0 = `
+package main
+
+import "go/types"
+
+func main() {
+ var info types.Info
+ for _, obj := range info.Uses {
+ _ = obj.Pkg()
+ }
+}
+`
+ // like src0, but also imports go/importer
+ const src1 = `
+package main
+
+import (
+ "go/types"
+ _ "go/importer"
+)
+
+func main() {
+ var info types.Info
+ for _, obj := range info.Uses {
+ _ = obj.Pkg()
+ }
+}
+`
+ // like src1 but with different import order
+ // (used to fail with this issue)
+ const src2 = `
+package main
+
+import (
+ _ "go/importer"
+ "go/types"
+)
+
+func main() {
+ var info types.Info
+ for _, obj := range info.Uses {
+ _ = obj.Pkg()
+ }
+}
+`
+ f := func(test, src string) {
+ f := mustParse(t, src)
+ conf := Config{Importer: defaultImporter()}
+ info := Info{Uses: make(map[*syntax.Name]Object)}
+ _, err := conf.Check("main", []*syntax.File{f}, &info)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ var pkg *Package
+ count := 0
+ for id, obj := range info.Uses {
+ if id.Value == "Pkg" {
+ pkg = obj.Pkg()
+ count++
+ }
+ }
+ if count != 1 {
+ t.Fatalf("%s: got %d entries named Pkg; want 1", test, count)
+ }
+ if pkg.Name() != "types" {
+ t.Fatalf("%s: got %v; want package types2", test, pkg)
+ }
+ }
+
+ f("src0", src0)
+ f("src1", src1)
+ f("src2", src2)
+}
+
+func TestIssue22525(t *testing.T) {
+ f := mustParse(t, `package p; func f() { var a, b, c, d, e int }`)
+
+ got := "\n"
+ conf := Config{Error: func(err error) { got += err.Error() + "\n" }}
+ conf.Check(f.PkgName.Value, []*syntax.File{f}, nil) // do not crash
+ want := `
+:1:27: a declared but not used
+:1:30: b declared but not used
+:1:33: c declared but not used
+:1:36: d declared but not used
+:1:39: e declared but not used
+`
+ if got != want {
+ t.Errorf("got: %swant: %s", got, want)
+ }
+}
+
+func TestIssue25627(t *testing.T) {
+ const prefix = `package p; import "unsafe"; type P *struct{}; type I interface{}; type T `
+ // The src strings (without prefix) are constructed such that the number of semicolons
+ // plus one corresponds to the number of fields expected in the respective struct.
+ for _, src := range []string{
+ `struct { x Missing }`,
+ `struct { Missing }`,
+ `struct { *Missing }`,
+ `struct { unsafe.Pointer }`,
+ `struct { P }`,
+ `struct { *I }`,
+ `struct { a int; b Missing; *Missing }`,
+ } {
+ f := mustParse(t, prefix+src)
+
+ conf := Config{Importer: defaultImporter(), Error: func(err error) {}}
+ info := &Info{Types: make(map[syntax.Expr]TypeAndValue)}
+ _, err := conf.Check(f.PkgName.Value, []*syntax.File{f}, info)
+ if err != nil {
+ if _, ok := err.(Error); !ok {
+ t.Fatal(err)
+ }
+ }
+
+ syntax.Crawl(f, func(n syntax.Node) bool {
+ if decl, _ := n.(*syntax.TypeDecl); decl != nil {
+ if tv, ok := info.Types[decl.Type]; ok && decl.Name.Value == "T" {
+ want := strings.Count(src, ";") + 1
+ if got := tv.Type.(*Struct).NumFields(); got != want {
+ t.Errorf("%s: got %d fields; want %d", src, got, want)
+ }
+ }
+ }
+ return false
+ })
+ }
+}
+
+func TestIssue28005(t *testing.T) {
+ // method names must match defining interface name for this test
+ // (see last comment in this function)
+ sources := [...]string{
+ "package p; type A interface{ A() }",
+ "package p; type B interface{ B() }",
+ "package p; type X interface{ A; B }",
+ }
+
+ // compute original file ASTs
+ var orig [len(sources)]*syntax.File
+ for i, src := range sources {
+ orig[i] = mustParse(t, src)
+ }
+
+ // run the test for all order permutations of the incoming files
+ for _, perm := range [][len(sources)]int{
+ {0, 1, 2},
+ {0, 2, 1},
+ {1, 0, 2},
+ {1, 2, 0},
+ {2, 0, 1},
+ {2, 1, 0},
+ } {
+ // create file order permutation
+ files := make([]*syntax.File, len(sources))
+ for i := range perm {
+ files[i] = orig[perm[i]]
+ }
+
+ // type-check package with given file order permutation
+ var conf Config
+ info := &Info{Defs: make(map[*syntax.Name]Object)}
+ _, err := conf.Check("", files, info)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // look for interface object X
+ var obj Object
+ for name, def := range info.Defs {
+ if name.Value == "X" {
+ obj = def
+ break
+ }
+ }
+ if obj == nil {
+ t.Fatal("object X not found")
+ }
+ iface := obj.Type().Underlying().(*Interface) // object X must be an interface
+
+ // Each iface method m is embedded; and m's receiver base type name
+ // must match the method's name per the choice in the source file.
+ for i := 0; i < iface.NumMethods(); i++ {
+ m := iface.Method(i)
+ recvName := m.Type().(*Signature).Recv().Type().(*Named).Obj().Name()
+ if recvName != m.Name() {
+ t.Errorf("perm %v: got recv %s; want %s", perm, recvName, m.Name())
+ }
+ }
+ }
+}
+
+func TestIssue28282(t *testing.T) {
+ // create type interface { error }
+ et := Universe.Lookup("error").Type()
+ it := NewInterfaceType(nil, []Type{et})
+ // verify that after completing the interface, the embedded method remains unchanged
+ // (interfaces are "completed" lazily now, so the completion happens implicitly when
+ // accessing Method(0))
+ want := et.Underlying().(*Interface).Method(0)
+ got := it.Method(0)
+ if got != want {
+ t.Fatalf("%s.Method(0): got %q (%p); want %q (%p)", it, got, got, want, want)
+ }
+ // verify that lookup finds the same method in both interfaces (redundant check)
+ obj, _, _ := LookupFieldOrMethod(et, false, nil, "Error")
+ if obj != want {
+ t.Fatalf("%s.Lookup: got %q (%p); want %q (%p)", et, obj, obj, want, want)
+ }
+ obj, _, _ = LookupFieldOrMethod(it, false, nil, "Error")
+ if obj != want {
+ t.Fatalf("%s.Lookup: got %q (%p); want %q (%p)", it, obj, obj, want, want)
+ }
+}
+
+func TestIssue29029(t *testing.T) {
+ f1 := mustParse(t, `package p; type A interface { M() }`)
+ f2 := mustParse(t, `package p; var B interface { A }`)
+
+ // printInfo prints the *Func definitions recorded in info, one *Func per line.
+ printInfo := func(info *Info) string {
+ var buf bytes.Buffer
+ for _, obj := range info.Defs {
+ if fn, ok := obj.(*Func); ok {
+ fmt.Fprintln(&buf, fn)
+ }
+ }
+ return buf.String()
+ }
+
+ // The *Func (method) definitions for package p must be the same
+ // independent on whether f1 and f2 are type-checked together, or
+ // incrementally.
+
+ // type-check together
+ var conf Config
+ info := &Info{Defs: make(map[*syntax.Name]Object)}
+ check := NewChecker(&conf, NewPackage("", "p"), info)
+ if err := check.Files([]*syntax.File{f1, f2}); err != nil {
+ t.Fatal(err)
+ }
+ want := printInfo(info)
+
+ // type-check incrementally
+ info = &Info{Defs: make(map[*syntax.Name]Object)}
+ check = NewChecker(&conf, NewPackage("", "p"), info)
+ if err := check.Files([]*syntax.File{f1}); err != nil {
+ t.Fatal(err)
+ }
+ if err := check.Files([]*syntax.File{f2}); err != nil {
+ t.Fatal(err)
+ }
+ got := printInfo(info)
+
+ if got != want {
+ t.Errorf("\ngot : %swant: %s", got, want)
+ }
+}
+
+func TestIssue34151(t *testing.T) {
+ const asrc = `package a; type I interface{ M() }; type T struct { F interface { I } }`
+ const bsrc = `package b; import "a"; type T struct { F interface { a.I } }; var _ = a.T(T{})`
+
+ a, err := pkgFor("a", asrc, nil)
+ if err != nil {
+ t.Fatalf("package %s failed to typecheck: %v", a.Name(), err)
+ }
+
+ bast := mustParse(t, bsrc)
+ conf := Config{Importer: importHelper{pkg: a}}
+ b, err := conf.Check(bast.PkgName.Value, []*syntax.File{bast}, nil)
+ if err != nil {
+ t.Errorf("package %s failed to typecheck: %v", b.Name(), err)
+ }
+}
+
+type importHelper struct {
+ pkg *Package
+ fallback Importer
+}
+
+func (h importHelper) Import(path string) (*Package, error) {
+ if path == h.pkg.Path() {
+ return h.pkg, nil
+ }
+ if h.fallback == nil {
+ return nil, fmt.Errorf("got package path %q; want %q", path, h.pkg.Path())
+ }
+ return h.fallback.Import(path)
+}
+
+// TestIssue34921 verifies that we don't update an imported type's underlying
+// type when resolving an underlying type. Specifically, when determining the
+// underlying type of b.T (which is the underlying type of a.T, which is int)
+// we must not set the underlying type of a.T again since that would lead to
+// a race condition if package b is imported elsewhere, in a package that is
+// concurrently type-checked.
+func TestIssue34921(t *testing.T) {
+ defer func() {
+ if r := recover(); r != nil {
+ t.Error(r)
+ }
+ }()
+
+ var sources = []string{
+ `package a; type T int`,
+ `package b; import "a"; type T a.T`,
+ }
+
+ var pkg *Package
+ for _, src := range sources {
+ f := mustParse(t, src)
+ conf := Config{Importer: importHelper{pkg: pkg}}
+ res, err := conf.Check(f.PkgName.Value, []*syntax.File{f}, nil)
+ if err != nil {
+ t.Errorf("%q failed to typecheck: %v", src, err)
+ }
+ pkg = res // res is imported by the next package in this test
+ }
+}
+
+func TestIssue43088(t *testing.T) {
+ // type T1 struct {
+ // _ T2
+ // }
+ //
+ // type T2 struct {
+ // _ struct {
+ // _ T2
+ // }
+ // }
+ n1 := NewTypeName(syntax.Pos{}, nil, "T1", nil)
+ T1 := NewNamed(n1, nil, nil)
+ n2 := NewTypeName(syntax.Pos{}, nil, "T2", nil)
+ T2 := NewNamed(n2, nil, nil)
+ s1 := NewStruct([]*Var{NewField(syntax.Pos{}, nil, "_", T2, false)}, nil)
+ T1.SetUnderlying(s1)
+ s2 := NewStruct([]*Var{NewField(syntax.Pos{}, nil, "_", T2, false)}, nil)
+ s3 := NewStruct([]*Var{NewField(syntax.Pos{}, nil, "_", s2, false)}, nil)
+ T2.SetUnderlying(s3)
+
+ // These calls must terminate (no endless recursion).
+ Comparable(T1)
+ Comparable(T2)
+}
+
+func TestIssue44515(t *testing.T) {
+ typ := Unsafe.Scope().Lookup("Pointer").Type()
+
+ got := TypeString(typ, nil)
+ want := "unsafe.Pointer"
+ if got != want {
+ t.Errorf("got %q; want %q", got, want)
+ }
+
+ qf := func(pkg *Package) string {
+ if pkg == Unsafe {
+ return "foo"
+ }
+ return ""
+ }
+ got = TypeString(typ, qf)
+ want = "foo.Pointer"
+ if got != want {
+ t.Errorf("got %q; want %q", got, want)
+ }
+}
+
+func TestIssue43124(t *testing.T) {
+ // All involved packages have the same name (template). Error messages should
+ // disambiguate between text/template and html/template by printing the full
+ // path.
+ const (
+ asrc = `package a; import "text/template"; func F(template.Template) {}; func G(int) {}`
+ bsrc = `package b; import ("a"; "html/template"); func _() { a.F(template.Template{}) }`
+ csrc = `package c; import ("a"; "html/template"); func _() { a.G(template.Template{}) }`
+ )
+
+ a, err := pkgFor("a", asrc, nil)
+ if err != nil {
+ t.Fatalf("package a failed to typecheck: %v", err)
+ }
+ conf := Config{Importer: importHelper{pkg: a, fallback: defaultImporter()}}
+
+ // Packages should be fully qualified when there is ambiguity within the
+ // error string itself.
+ bast := mustParse(t, bsrc)
+ _, err = conf.Check(bast.PkgName.Value, []*syntax.File{bast}, nil)
+ if err == nil {
+ t.Fatal("package b had no errors")
+ }
+ if !strings.Contains(err.Error(), "text/template") || !strings.Contains(err.Error(), "html/template") {
+ t.Errorf("type checking error for b does not disambiguate package template: %q", err)
+ }
+
+ // ...and also when there is any ambiguity in reachable packages.
+ cast := mustParse(t, csrc)
+ _, err = conf.Check(cast.PkgName.Value, []*syntax.File{cast}, nil)
+ if err == nil {
+ t.Fatal("package c had no errors")
+ }
+ if !strings.Contains(err.Error(), "html/template") {
+ t.Errorf("type checking error for c does not disambiguate package template: %q", err)
+ }
+}
+
+func TestIssue50646(t *testing.T) {
+ anyType := Universe.Lookup("any").Type()
+ comparableType := Universe.Lookup("comparable").Type()
+
+ if !Comparable(anyType) {
+ t.Errorf("any is not a comparable type")
+ }
+ if !Comparable(comparableType) {
+ t.Errorf("comparable is not a comparable type")
+ }
+
+ if Implements(anyType, comparableType.Underlying().(*Interface)) {
+ t.Errorf("any implements comparable")
+ }
+ if !Implements(comparableType, anyType.(*Interface)) {
+ t.Errorf("comparable does not implement any")
+ }
+
+ if AssignableTo(anyType, comparableType) {
+ t.Errorf("any assignable to comparable")
+ }
+ if !AssignableTo(comparableType, anyType) {
+ t.Errorf("comparable not assignable to any")
+ }
+}
+
+func TestIssue55030(t *testing.T) {
+ // makeSig makes the signature func(typ...)
+ makeSig := func(typ Type) {
+ par := NewVar(nopos, nil, "", typ)
+ params := NewTuple(par)
+ NewSignatureType(nil, nil, nil, params, nil, true)
+ }
+
+ // makeSig must not panic for the following (example) types:
+ // []int
+ makeSig(NewSlice(Typ[Int]))
+
+ // string
+ makeSig(Typ[String])
+
+ // P where P's core type is string
+ {
+ P := NewTypeName(nopos, nil, "P", nil) // [P string]
+ makeSig(NewTypeParam(P, NewInterfaceType(nil, []Type{Typ[String]})))
+ }
+
+ // P where P's core type is an (unnamed) slice
+ {
+ P := NewTypeName(nopos, nil, "P", nil) // [P []int]
+ makeSig(NewTypeParam(P, NewInterfaceType(nil, []Type{NewSlice(Typ[Int])})))
+ }
+
+ // P where P's core type is bytestring (i.e., string or []byte)
+ {
+ t1 := NewTerm(true, Typ[String]) // ~string
+ t2 := NewTerm(false, NewSlice(Typ[Byte])) // []byte
+ u := NewUnion([]*Term{t1, t2}) // ~string | []byte
+ P := NewTypeName(nopos, nil, "P", nil) // [P ~string | []byte]
+ makeSig(NewTypeParam(P, NewInterfaceType(nil, []Type{u})))
+ }
+}
diff --git a/src/cmd/compile/internal/types2/labels.go b/src/cmd/compile/internal/types2/labels.go
new file mode 100644
index 0000000..6f02e2f
--- /dev/null
+++ b/src/cmd/compile/internal/types2/labels.go
@@ -0,0 +1,263 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+import (
+ "cmd/compile/internal/syntax"
+)
+
+// labels checks correct label use in body.
+func (check *Checker) labels(body *syntax.BlockStmt) {
+ // set of all labels in this body
+ all := NewScope(nil, body.Pos(), syntax.EndPos(body), "label")
+
+ fwdJumps := check.blockBranches(all, nil, nil, body.List)
+
+ // If there are any forward jumps left, no label was found for
+ // the corresponding goto statements. Either those labels were
+ // never defined, or they are inside blocks and not reachable
+ // for the respective gotos.
+ for _, jmp := range fwdJumps {
+ var msg string
+ name := jmp.Label.Value
+ if alt := all.Lookup(name); alt != nil {
+ msg = "goto %s jumps into block"
+ alt.(*Label).used = true // avoid another error
+ } else {
+ msg = "label %s not declared"
+ }
+ check.errorf(jmp.Label, msg, name)
+ }
+
+ // spec: "It is illegal to define a label that is never used."
+ for name, obj := range all.elems {
+ obj = resolve(name, obj)
+ if lbl := obj.(*Label); !lbl.used {
+ check.softErrorf(lbl.pos, "label %s declared but not used", lbl.name)
+ }
+ }
+}
+
+// A block tracks label declarations in a block and its enclosing blocks.
+type block struct {
+ parent *block // enclosing block
+ lstmt *syntax.LabeledStmt // labeled statement to which this block belongs, or nil
+ labels map[string]*syntax.LabeledStmt // allocated lazily
+}
+
+// insert records a new label declaration for the current block.
+// The label must not have been declared before in any block.
+func (b *block) insert(s *syntax.LabeledStmt) {
+ name := s.Label.Value
+ if debug {
+ assert(b.gotoTarget(name) == nil)
+ }
+ labels := b.labels
+ if labels == nil {
+ labels = make(map[string]*syntax.LabeledStmt)
+ b.labels = labels
+ }
+ labels[name] = s
+}
+
+// gotoTarget returns the labeled statement in the current
+// or an enclosing block with the given label name, or nil.
+func (b *block) gotoTarget(name string) *syntax.LabeledStmt {
+ for s := b; s != nil; s = s.parent {
+ if t := s.labels[name]; t != nil {
+ return t
+ }
+ }
+ return nil
+}
+
+// enclosingTarget returns the innermost enclosing labeled
+// statement with the given label name, or nil.
+func (b *block) enclosingTarget(name string) *syntax.LabeledStmt {
+ for s := b; s != nil; s = s.parent {
+ if t := s.lstmt; t != nil && t.Label.Value == name {
+ return t
+ }
+ }
+ return nil
+}
+
+// blockBranches processes a block's statement list and returns the set of outgoing forward jumps.
+// all is the scope of all declared labels, parent the set of labels declared in the immediately
+// enclosing block, and lstmt is the labeled statement this block is associated with (or nil).
+func (check *Checker) blockBranches(all *Scope, parent *block, lstmt *syntax.LabeledStmt, list []syntax.Stmt) []*syntax.BranchStmt {
+ b := &block{parent, lstmt, nil}
+
+ var (
+ varDeclPos syntax.Pos
+ fwdJumps, badJumps []*syntax.BranchStmt
+ )
+
+ // All forward jumps jumping over a variable declaration are possibly
+ // invalid (they may still jump out of the block and be ok).
+ // recordVarDecl records them for the given position.
+ recordVarDecl := func(pos syntax.Pos) {
+ varDeclPos = pos
+ badJumps = append(badJumps[:0], fwdJumps...) // copy fwdJumps to badJumps
+ }
+
+ jumpsOverVarDecl := func(jmp *syntax.BranchStmt) bool {
+ if varDeclPos.IsKnown() {
+ for _, bad := range badJumps {
+ if jmp == bad {
+ return true
+ }
+ }
+ }
+ return false
+ }
+
+ var stmtBranches func(syntax.Stmt)
+ stmtBranches = func(s syntax.Stmt) {
+ switch s := s.(type) {
+ case *syntax.DeclStmt:
+ for _, d := range s.DeclList {
+ if d, _ := d.(*syntax.VarDecl); d != nil {
+ recordVarDecl(d.Pos())
+ }
+ }
+
+ case *syntax.LabeledStmt:
+ // declare non-blank label
+ if name := s.Label.Value; name != "_" {
+ lbl := NewLabel(s.Label.Pos(), check.pkg, name)
+ if alt := all.Insert(lbl); alt != nil {
+ var err error_
+ err.soft = true
+ err.errorf(lbl.pos, "label %s already declared", name)
+ err.recordAltDecl(alt)
+ check.report(&err)
+ // ok to continue
+ } else {
+ b.insert(s)
+ check.recordDef(s.Label, lbl)
+ }
+ // resolve matching forward jumps and remove them from fwdJumps
+ i := 0
+ for _, jmp := range fwdJumps {
+ if jmp.Label.Value == name {
+ // match
+ lbl.used = true
+ check.recordUse(jmp.Label, lbl)
+ if jumpsOverVarDecl(jmp) {
+ check.softErrorf(
+ jmp.Label,
+ "goto %s jumps over variable declaration at line %d",
+ name,
+ varDeclPos.Line(),
+ )
+ // ok to continue
+ }
+ } else {
+ // no match - record new forward jump
+ fwdJumps[i] = jmp
+ i++
+ }
+ }
+ fwdJumps = fwdJumps[:i]
+ lstmt = s
+ }
+ stmtBranches(s.Stmt)
+
+ case *syntax.BranchStmt:
+ if s.Label == nil {
+ return // checked in 1st pass (check.stmt)
+ }
+
+ // determine and validate target
+ name := s.Label.Value
+ switch s.Tok {
+ case syntax.Break:
+ // spec: "If there is a label, it must be that of an enclosing
+ // "for", "switch", or "select" statement, and that is the one
+ // whose execution terminates."
+ valid := false
+ if t := b.enclosingTarget(name); t != nil {
+ switch t.Stmt.(type) {
+ case *syntax.SwitchStmt, *syntax.SelectStmt, *syntax.ForStmt:
+ valid = true
+ }
+ }
+ if !valid {
+ check.errorf(s.Label, "invalid break label %s", name)
+ return
+ }
+
+ case syntax.Continue:
+ // spec: "If there is a label, it must be that of an enclosing
+ // "for" statement, and that is the one whose execution advances."
+ valid := false
+ if t := b.enclosingTarget(name); t != nil {
+ switch t.Stmt.(type) {
+ case *syntax.ForStmt:
+ valid = true
+ }
+ }
+ if !valid {
+ check.errorf(s.Label, "invalid continue label %s", name)
+ return
+ }
+
+ case syntax.Goto:
+ if b.gotoTarget(name) == nil {
+ // label may be declared later - add branch to forward jumps
+ fwdJumps = append(fwdJumps, s)
+ return
+ }
+
+ default:
+ check.errorf(s, invalidAST+"branch statement: %s %s", s.Tok, name)
+ return
+ }
+
+ // record label use
+ obj := all.Lookup(name)
+ obj.(*Label).used = true
+ check.recordUse(s.Label, obj)
+
+ case *syntax.AssignStmt:
+ if s.Op == syntax.Def {
+ recordVarDecl(s.Pos())
+ }
+
+ case *syntax.BlockStmt:
+ // Unresolved forward jumps inside the nested block
+ // become forward jumps in the current block.
+ fwdJumps = append(fwdJumps, check.blockBranches(all, b, lstmt, s.List)...)
+
+ case *syntax.IfStmt:
+ stmtBranches(s.Then)
+ if s.Else != nil {
+ stmtBranches(s.Else)
+ }
+
+ case *syntax.SwitchStmt:
+ b := &block{b, lstmt, nil}
+ for _, s := range s.Body {
+ fwdJumps = append(fwdJumps, check.blockBranches(all, b, nil, s.Body)...)
+ }
+
+ case *syntax.SelectStmt:
+ b := &block{b, lstmt, nil}
+ for _, s := range s.Body {
+ fwdJumps = append(fwdJumps, check.blockBranches(all, b, nil, s.Body)...)
+ }
+
+ case *syntax.ForStmt:
+ stmtBranches(s.Body)
+ }
+ }
+
+ for _, s := range list {
+ stmtBranches(s)
+ }
+
+ return fwdJumps
+}
diff --git a/src/cmd/compile/internal/types2/lookup.go b/src/cmd/compile/internal/types2/lookup.go
new file mode 100644
index 0000000..482b6bd
--- /dev/null
+++ b/src/cmd/compile/internal/types2/lookup.go
@@ -0,0 +1,521 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements various field and method lookup functions.
+
+package types2
+
+import (
+ "bytes"
+ "strings"
+)
+
+// Internal use of LookupFieldOrMethod: If the obj result is a method
+// associated with a concrete (non-interface) type, the method's signature
+// may not be fully set up. Call Checker.objDecl(obj, nil) before accessing
+// the method's type.
+
+// LookupFieldOrMethod looks up a field or method with given package and name
+// in T and returns the corresponding *Var or *Func, an index sequence, and a
+// bool indicating if there were any pointer indirections on the path to the
+// field or method. If addressable is set, T is the type of an addressable
+// variable (only matters for method lookups). T must not be nil.
+//
+// The last index entry is the field or method index in the (possibly embedded)
+// type where the entry was found, either:
+//
+// 1. the list of declared methods of a named type; or
+// 2. the list of all methods (method set) of an interface type; or
+// 3. the list of fields of a struct type.
+//
+// The earlier index entries are the indices of the embedded struct fields
+// traversed to get to the found entry, starting at depth 0.
+//
+// If no entry is found, a nil object is returned. In this case, the returned
+// index and indirect values have the following meaning:
+//
+// - If index != nil, the index sequence points to an ambiguous entry
+// (the same name appeared more than once at the same embedding level).
+//
+// - If indirect is set, a method with a pointer receiver type was found
+// but there was no pointer on the path from the actual receiver type to
+// the method's formal receiver base type, nor was the receiver addressable.
+func LookupFieldOrMethod(T Type, addressable bool, pkg *Package, name string) (obj Object, index []int, indirect bool) {
+ if T == nil {
+ panic("LookupFieldOrMethod on nil type")
+ }
+
+ // Methods cannot be associated to a named pointer type.
+ // (spec: "The type denoted by T is called the receiver base type;
+ // it must not be a pointer or interface type and it must be declared
+ // in the same package as the method.").
+ // Thus, if we have a named pointer type, proceed with the underlying
+ // pointer type but discard the result if it is a method since we would
+ // not have found it for T (see also issue 8590).
+ if t, _ := T.(*Named); t != nil {
+ if p, _ := t.Underlying().(*Pointer); p != nil {
+ obj, index, indirect = lookupFieldOrMethod(p, false, pkg, name, false)
+ if _, ok := obj.(*Func); ok {
+ return nil, nil, false
+ }
+ return
+ }
+ }
+
+ obj, index, indirect = lookupFieldOrMethod(T, addressable, pkg, name, false)
+
+ // If we didn't find anything and if we have a type parameter with a core type,
+ // see if there is a matching field (but not a method, those need to be declared
+ // explicitly in the constraint). If the constraint is a named pointer type (see
+ // above), we are ok here because only fields are accepted as results.
+ const enableTParamFieldLookup = false // see issue #51576
+ if enableTParamFieldLookup && obj == nil && isTypeParam(T) {
+ if t := coreType(T); t != nil {
+ obj, index, indirect = lookupFieldOrMethod(t, addressable, pkg, name, false)
+ if _, ok := obj.(*Var); !ok {
+ obj, index, indirect = nil, nil, false // accept fields (variables) only
+ }
+ }
+ }
+ return
+}
+
+// lookupFieldOrMethod should only be called by LookupFieldOrMethod and missingMethod.
+// If foldCase is true, the lookup for methods will include looking for any method
+// which case-folds to the same as 'name' (used for giving helpful error messages).
+//
+// The resulting object may not be fully type-checked.
+func lookupFieldOrMethod(T Type, addressable bool, pkg *Package, name string, foldCase bool) (obj Object, index []int, indirect bool) {
+ // WARNING: The code in this function is extremely subtle - do not modify casually!
+
+ if name == "_" {
+ return // blank fields/methods are never found
+ }
+
+ typ, isPtr := deref(T)
+
+ // *typ where typ is an interface (incl. a type parameter) has no methods.
+ if isPtr {
+ if _, ok := under(typ).(*Interface); ok {
+ return
+ }
+ }
+
+ // Start with typ as single entry at shallowest depth.
+ current := []embeddedType{{typ, nil, isPtr, false}}
+
+ // seen tracks named types that we have seen already, allocated lazily.
+ // Used to avoid endless searches in case of recursive types.
+ //
+ // We must use a lookup on identity rather than a simple map[*Named]bool as
+ // instantiated types may be identical but not equal.
+ var seen instanceLookup
+
+ // search current depth
+ for len(current) > 0 {
+ var next []embeddedType // embedded types found at current depth
+
+ // look for (pkg, name) in all types at current depth
+ for _, e := range current {
+ typ := e.typ
+
+ // If we have a named type, we may have associated methods.
+ // Look for those first.
+ if named, _ := typ.(*Named); named != nil {
+ if alt := seen.lookup(named); alt != nil {
+ // We have seen this type before, at a more shallow depth
+ // (note that multiples of this type at the current depth
+ // were consolidated before). The type at that depth shadows
+ // this same type at the current depth, so we can ignore
+ // this one.
+ continue
+ }
+ seen.add(named)
+
+ // look for a matching attached method
+ named.resolve(nil)
+ if i, m := named.lookupMethod(pkg, name, foldCase); m != nil {
+ // potential match
+ // caution: method may not have a proper signature yet
+ index = concat(e.index, i)
+ if obj != nil || e.multiples {
+ return nil, index, false // collision
+ }
+ obj = m
+ indirect = e.indirect
+ continue // we can't have a matching field or interface method
+ }
+ }
+
+ switch t := under(typ).(type) {
+ case *Struct:
+ // look for a matching field and collect embedded types
+ for i, f := range t.fields {
+ if f.sameId(pkg, name) {
+ assert(f.typ != nil)
+ index = concat(e.index, i)
+ if obj != nil || e.multiples {
+ return nil, index, false // collision
+ }
+ obj = f
+ indirect = e.indirect
+ continue // we can't have a matching interface method
+ }
+ // Collect embedded struct fields for searching the next
+ // lower depth, but only if we have not seen a match yet
+ // (if we have a match it is either the desired field or
+ // we have a name collision on the same depth; in either
+ // case we don't need to look further).
+ // Embedded fields are always of the form T or *T where
+ // T is a type name. If e.typ appeared multiple times at
+ // this depth, f.typ appears multiple times at the next
+ // depth.
+ if obj == nil && f.embedded {
+ typ, isPtr := deref(f.typ)
+ // TODO(gri) optimization: ignore types that can't
+ // have fields or methods (only Named, Struct, and
+ // Interface types need to be considered).
+ next = append(next, embeddedType{typ, concat(e.index, i), e.indirect || isPtr, e.multiples})
+ }
+ }
+
+ case *Interface:
+ // look for a matching method (interface may be a type parameter)
+ if i, m := t.typeSet().LookupMethod(pkg, name, foldCase); m != nil {
+ assert(m.typ != nil)
+ index = concat(e.index, i)
+ if obj != nil || e.multiples {
+ return nil, index, false // collision
+ }
+ obj = m
+ indirect = e.indirect
+ }
+ }
+ }
+
+ if obj != nil {
+ // found a potential match
+ // spec: "A method call x.m() is valid if the method set of (the type of) x
+ // contains m and the argument list can be assigned to the parameter
+ // list of m. If x is addressable and &x's method set contains m, x.m()
+ // is shorthand for (&x).m()".
+ if f, _ := obj.(*Func); f != nil {
+ // determine if method has a pointer receiver
+ if f.hasPtrRecv() && !indirect && !addressable {
+ return nil, nil, true // pointer/addressable receiver required
+ }
+ }
+ return
+ }
+
+ current = consolidateMultiples(next)
+ }
+
+ return nil, nil, false // not found
+}
+
+// embeddedType represents an embedded type
+type embeddedType struct {
+ typ Type
+ index []int // embedded field indices, starting with index at depth 0
+ indirect bool // if set, there was a pointer indirection on the path to this field
+ multiples bool // if set, typ appears multiple times at this depth
+}
+
+// consolidateMultiples collects multiple list entries with the same type
+// into a single entry marked as containing multiples. The result is the
+// consolidated list.
+func consolidateMultiples(list []embeddedType) []embeddedType {
+ if len(list) <= 1 {
+ return list // at most one entry - nothing to do
+ }
+
+ n := 0 // number of entries w/ unique type
+ prev := make(map[Type]int) // index at which type was previously seen
+ for _, e := range list {
+ if i, found := lookupType(prev, e.typ); found {
+ list[i].multiples = true
+ // ignore this entry
+ } else {
+ prev[e.typ] = n
+ list[n] = e
+ n++
+ }
+ }
+ return list[:n]
+}
+
+func lookupType(m map[Type]int, typ Type) (int, bool) {
+ // fast path: maybe the types are equal
+ if i, found := m[typ]; found {
+ return i, true
+ }
+
+ for t, i := range m {
+ if Identical(t, typ) {
+ return i, true
+ }
+ }
+
+ return 0, false
+}
+
+type instanceLookup struct {
+ m map[*Named][]*Named
+}
+
+func (l *instanceLookup) lookup(inst *Named) *Named {
+ for _, t := range l.m[inst.Origin()] {
+ if Identical(inst, t) {
+ return t
+ }
+ }
+ return nil
+}
+
+func (l *instanceLookup) add(inst *Named) {
+ if l.m == nil {
+ l.m = make(map[*Named][]*Named)
+ }
+ insts := l.m[inst.Origin()]
+ l.m[inst.Origin()] = append(insts, inst)
+}
+
+// MissingMethod returns (nil, false) if V implements T, otherwise it
+// returns a missing method required by T and whether it is missing or
+// just has the wrong type.
+//
+// For non-interface types V, or if static is set, V implements T if all
+// methods of T are present in V. Otherwise (V is an interface and static
+// is not set), MissingMethod only checks that methods of T which are also
+// present in V have matching types (e.g., for a type assertion x.(T) where
+// x is of interface type V).
+func MissingMethod(V Type, T *Interface, static bool) (method *Func, wrongType bool) {
+ m, alt := (*Checker)(nil).missingMethod(V, T, static)
+ // Only report a wrong type if the alternative method has the same name as m.
+ return m, alt != nil && alt.name == m.name // alt != nil implies m != nil
+}
+
+// missingMethod is like MissingMethod but accepts a *Checker as receiver.
+// The receiver may be nil if missingMethod is invoked through an exported
+// API call (such as MissingMethod), i.e., when all methods have been type-
+// checked.
+//
+// If a method is missing on T but is found on *T, or if a method is found
+// on T when looked up with case-folding, this alternative method is returned
+// as the second result.
+func (check *Checker) missingMethod(V Type, T *Interface, static bool) (method, alt *Func) {
+ if T.NumMethods() == 0 {
+ return
+ }
+
+ // V is an interface
+ if u, _ := under(V).(*Interface); u != nil {
+ tset := u.typeSet()
+ for _, m := range T.typeSet().methods {
+ _, f := tset.LookupMethod(m.pkg, m.name, false)
+
+ if f == nil {
+ if !static {
+ continue
+ }
+ return m, nil
+ }
+
+ if !Identical(f.typ, m.typ) {
+ return m, f
+ }
+ }
+
+ return
+ }
+
+ // V is not an interface
+ for _, m := range T.typeSet().methods {
+ // TODO(gri) should this be calling LookupFieldOrMethod instead (and why not)?
+ obj, _, _ := lookupFieldOrMethod(V, false, m.pkg, m.name, false)
+
+ // check if m is on *V, or on V with case-folding
+ found := obj != nil
+ if !found {
+ // TODO(gri) Instead of NewPointer(V) below, can we just set the "addressable" argument?
+ obj, _, _ = lookupFieldOrMethod(NewPointer(V), false, m.pkg, m.name, false)
+ if obj == nil {
+ obj, _, _ = lookupFieldOrMethod(V, false, m.pkg, m.name, true /* fold case */)
+ }
+ }
+
+ // we must have a method (not a struct field)
+ f, _ := obj.(*Func)
+ if f == nil {
+ return m, nil
+ }
+
+ // methods may not have a fully set up signature yet
+ if check != nil {
+ check.objDecl(f, nil)
+ }
+
+ if !found || !Identical(f.typ, m.typ) {
+ return m, f
+ }
+ }
+
+ return
+}
+
+// missingMethodReason returns a string giving the detailed reason for a missing method m,
+// where m is missing from V, but required by T. It puts the reason in parentheses,
+// and may include more have/want info after that. If non-nil, alt is a relevant
+// method that matches in some way. It may have the correct name, but wrong type, or
+// it may have a pointer receiver, or it may have the correct name except wrong case.
+// check may be nil.
+func (check *Checker) missingMethodReason(V, T Type, m, alt *Func) string {
+ var mname string
+ if check != nil && check.conf.CompilerErrorMessages {
+ mname = m.Name() + " method"
+ } else {
+ mname = "method " + m.Name()
+ }
+
+ if alt != nil {
+ if m.Name() != alt.Name() {
+ return check.sprintf("(missing %s)\n\t\thave %s\n\t\twant %s",
+ mname, check.funcString(alt), check.funcString(m))
+ }
+
+ if Identical(m.typ, alt.typ) {
+ return check.sprintf("(%s has pointer receiver)", mname)
+ }
+
+ return check.sprintf("(wrong type for %s)\n\t\thave %s\n\t\twant %s",
+ mname, check.funcString(alt), check.funcString(m))
+ }
+
+ if isInterfacePtr(V) {
+ return "(" + check.interfacePtrError(V) + ")"
+ }
+
+ if isInterfacePtr(T) {
+ return "(" + check.interfacePtrError(T) + ")"
+ }
+
+ return check.sprintf("(missing %s)", mname)
+}
+
+func isInterfacePtr(T Type) bool {
+ p, _ := under(T).(*Pointer)
+ return p != nil && IsInterface(p.base)
+}
+
+// check may be nil.
+func (check *Checker) interfacePtrError(T Type) string {
+ assert(isInterfacePtr(T))
+ if p, _ := under(T).(*Pointer); isTypeParam(p.base) {
+ return check.sprintf("type %s is pointer to type parameter, not type parameter", T)
+ }
+ return check.sprintf("type %s is pointer to interface, not interface", T)
+}
+
+// funcString returns a string of the form name + signature for f.
+// check may be nil.
+func (check *Checker) funcString(f *Func) string {
+ buf := bytes.NewBufferString(f.name)
+ var qf Qualifier
+ if check != nil {
+ qf = check.qualifier
+ }
+ WriteSignature(buf, f.typ.(*Signature), qf)
+ return buf.String()
+}
+
+// assertableTo reports whether a value of type V can be asserted to have type T.
+// It returns (nil, false) as affirmative answer. Otherwise it returns a missing
+// method required by V and whether it is missing or just has the wrong type.
+// The receiver may be nil if assertableTo is invoked through an exported API call
+// (such as AssertableTo), i.e., when all methods have been type-checked.
+// TODO(gri) replace calls to this function with calls to newAssertableTo.
+func (check *Checker) assertableTo(V *Interface, T Type) (method, wrongType *Func) {
+ // no static check is required if T is an interface
+ // spec: "If T is an interface type, x.(T) asserts that the
+ // dynamic type of x implements the interface T."
+ if IsInterface(T) {
+ return
+ }
+ // TODO(gri) fix this for generalized interfaces
+ return check.missingMethod(T, V, false)
+}
+
+// newAssertableTo reports whether a value of type V can be asserted to have type T.
+// It also implements behavior for interfaces that currently are only permitted
+// in constraint position (we have not yet defined that behavior in the spec).
+func (check *Checker) newAssertableTo(V *Interface, T Type) error {
+ // no static check is required if T is an interface
+ // spec: "If T is an interface type, x.(T) asserts that the
+ // dynamic type of x implements the interface T."
+ if IsInterface(T) {
+ return nil
+ }
+ return check.implements(T, V)
+}
+
+// deref dereferences typ if it is a *Pointer and returns its base and true.
+// Otherwise it returns (typ, false).
+func deref(typ Type) (Type, bool) {
+ if p, _ := typ.(*Pointer); p != nil {
+ // p.base should never be nil, but be conservative
+ if p.base == nil {
+ if debug {
+ panic("pointer with nil base type (possibly due to an invalid cyclic declaration)")
+ }
+ return Typ[Invalid], true
+ }
+ return p.base, true
+ }
+ return typ, false
+}
+
+// derefStructPtr dereferences typ if it is a (named or unnamed) pointer to a
+// (named or unnamed) struct and returns its base. Otherwise it returns typ.
+func derefStructPtr(typ Type) Type {
+ if p, _ := under(typ).(*Pointer); p != nil {
+ if _, ok := under(p.base).(*Struct); ok {
+ return p.base
+ }
+ }
+ return typ
+}
+
+// concat returns the result of concatenating list and i.
+// The result does not share its underlying array with list.
+func concat(list []int, i int) []int {
+ var t []int
+ t = append(t, list...)
+ return append(t, i)
+}
+
+// fieldIndex returns the index for the field with matching package and name, or a value < 0.
+func fieldIndex(fields []*Var, pkg *Package, name string) int {
+ if name != "_" {
+ for i, f := range fields {
+ if f.sameId(pkg, name) {
+ return i
+ }
+ }
+ }
+ return -1
+}
+
+// lookupMethod returns the index of and method with matching package and name, or (-1, nil).
+// If foldCase is true, method names are considered equal if they are equal with case folding.
+func lookupMethod(methods []*Func, pkg *Package, name string, foldCase bool) (int, *Func) {
+ if name != "_" {
+ for i, m := range methods {
+ if (m.name == name || foldCase && strings.EqualFold(m.name, name)) && m.sameId(pkg, m.name) {
+ return i, m
+ }
+ }
+ }
+ return -1, nil
+}
diff --git a/src/cmd/compile/internal/types2/map.go b/src/cmd/compile/internal/types2/map.go
new file mode 100644
index 0000000..0d3464c
--- /dev/null
+++ b/src/cmd/compile/internal/types2/map.go
@@ -0,0 +1,24 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+// A Map represents a map type.
+type Map struct {
+ key, elem Type
+}
+
+// NewMap returns a new map for the given key and element types.
+func NewMap(key, elem Type) *Map {
+ return &Map{key: key, elem: elem}
+}
+
+// Key returns the key type of map m.
+func (m *Map) Key() Type { return m.key }
+
+// Elem returns the element type of map m.
+func (m *Map) Elem() Type { return m.elem }
+
+func (t *Map) Underlying() Type { return t }
+func (t *Map) String() string { return TypeString(t, nil) }
diff --git a/src/cmd/compile/internal/types2/methodlist.go b/src/cmd/compile/internal/types2/methodlist.go
new file mode 100644
index 0000000..cd6c06c
--- /dev/null
+++ b/src/cmd/compile/internal/types2/methodlist.go
@@ -0,0 +1,79 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+import "sync"
+
+// methodList holds a list of methods that may be lazily resolved by a provided
+// resolution method.
+type methodList struct {
+ methods []*Func
+
+ // guards synchronizes the instantiation of lazy methods. For lazy method
+ // lists, guards is non-nil and of the length passed to newLazyMethodList.
+ // For non-lazy method lists, guards is nil.
+ guards *[]sync.Once
+}
+
+// newMethodList creates a non-lazy method list holding the given methods.
+func newMethodList(methods []*Func) *methodList {
+ return &methodList{methods: methods}
+}
+
+// newLazyMethodList creates a lazy method list of the given length. Methods
+// may be resolved lazily for a given index by providing a resolver function.
+func newLazyMethodList(length int) *methodList {
+ guards := make([]sync.Once, length)
+ return &methodList{
+ methods: make([]*Func, length),
+ guards: &guards,
+ }
+}
+
+// isLazy reports whether the receiver is a lazy method list.
+func (l *methodList) isLazy() bool {
+ return l != nil && l.guards != nil
+}
+
+// Add appends a method to the method list if not not already present. Add
+// panics if the receiver is lazy.
+func (l *methodList) Add(m *Func) {
+ assert(!l.isLazy())
+ if i, _ := lookupMethod(l.methods, m.pkg, m.name, false); i < 0 {
+ l.methods = append(l.methods, m)
+ }
+}
+
+// Lookup looks up the method identified by pkg and name in the receiver.
+// Lookup panics if the receiver is lazy. If foldCase is true, method names
+// are considered equal if they are equal with case folding.
+func (l *methodList) Lookup(pkg *Package, name string, foldCase bool) (int, *Func) {
+ assert(!l.isLazy())
+ if l == nil {
+ return -1, nil
+ }
+ return lookupMethod(l.methods, pkg, name, foldCase)
+}
+
+// Len returns the length of the method list.
+func (l *methodList) Len() int {
+ if l == nil {
+ return 0
+ }
+ return len(l.methods)
+}
+
+// At returns the i'th method of the method list. At panics if i is out of
+// bounds, or if the receiver is lazy and resolve is nil.
+func (l *methodList) At(i int, resolve func() *Func) *Func {
+ if !l.isLazy() {
+ return l.methods[i]
+ }
+ assert(resolve != nil)
+ (*l.guards)[i].Do(func() {
+ l.methods[i] = resolve()
+ })
+ return l.methods[i]
+}
diff --git a/src/cmd/compile/internal/types2/methodlist_test.go b/src/cmd/compile/internal/types2/methodlist_test.go
new file mode 100644
index 0000000..7a183ac
--- /dev/null
+++ b/src/cmd/compile/internal/types2/methodlist_test.go
@@ -0,0 +1,40 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+import (
+ "testing"
+)
+
+func TestLazyMethodList(t *testing.T) {
+ l := newLazyMethodList(2)
+
+ if got := l.Len(); got != 2 {
+ t.Fatalf("Len() = %d, want 2", got)
+ }
+
+ f0 := NewFunc(nopos, nil, "f0", nil)
+ f1 := NewFunc(nopos, nil, "f1", nil)
+
+ // Verify that methodList.At is idempotent, by calling it repeatedly with a
+ // resolve func that returns different pointer values (f0 or f1).
+ steps := []struct {
+ index int
+ resolve *Func // the *Func returned by the resolver
+ want *Func // the actual *Func returned by methodList.At
+ }{
+ {0, f0, f0},
+ {0, f1, f0},
+ {1, f1, f1},
+ {1, f0, f1},
+ }
+
+ for i, step := range steps {
+ got := l.At(step.index, func() *Func { return step.resolve })
+ if got != step.want {
+ t.Errorf("step %d: At(%d, ...) = %s, want %s", i, step.index, got.Name(), step.want.Name())
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/types2/mono.go b/src/cmd/compile/internal/types2/mono.go
new file mode 100644
index 0000000..7bd79f4
--- /dev/null
+++ b/src/cmd/compile/internal/types2/mono.go
@@ -0,0 +1,337 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+import (
+ "cmd/compile/internal/syntax"
+)
+
+// This file implements a check to validate that a Go package doesn't
+// have unbounded recursive instantiation, which is not compatible
+// with compilers using static instantiation (such as
+// monomorphization).
+//
+// It implements a sort of "type flow" analysis by detecting which
+// type parameters are instantiated with other type parameters (or
+// types derived thereof). A package cannot be statically instantiated
+// if the graph has any cycles involving at least one derived type.
+//
+// Concretely, we construct a directed, weighted graph. Vertices are
+// used to represent type parameters as well as some defined
+// types. Edges are used to represent how types depend on each other:
+//
+// * Everywhere a type-parameterized function or type is instantiated,
+// we add edges to each type parameter from the vertices (if any)
+// representing each type parameter or defined type referenced by
+// the type argument. If the type argument is just the referenced
+// type itself, then the edge has weight 0, otherwise 1.
+//
+// * For every defined type declared within a type-parameterized
+// function or method, we add an edge of weight 1 to the defined
+// type from each ambient type parameter.
+//
+// For example, given:
+//
+// func f[A, B any]() {
+// type T int
+// f[T, map[A]B]()
+// }
+//
+// we construct vertices representing types A, B, and T. Because of
+// declaration "type T int", we construct edges T<-A and T<-B with
+// weight 1; and because of instantiation "f[T, map[A]B]" we construct
+// edges A<-T with weight 0, and B<-A and B<-B with weight 1.
+//
+// Finally, we look for any positive-weight cycles. Zero-weight cycles
+// are allowed because static instantiation will reach a fixed point.
+
+type monoGraph struct {
+ vertices []monoVertex
+ edges []monoEdge
+
+ // canon maps method receiver type parameters to their respective
+ // receiver type's type parameters.
+ canon map[*TypeParam]*TypeParam
+
+ // nameIdx maps a defined type or (canonical) type parameter to its
+ // vertex index.
+ nameIdx map[*TypeName]int
+}
+
+type monoVertex struct {
+ weight int // weight of heaviest known path to this vertex
+ pre int // previous edge (if any) in the above path
+ len int // length of the above path
+
+ // obj is the defined type or type parameter represented by this
+ // vertex.
+ obj *TypeName
+}
+
+type monoEdge struct {
+ dst, src int
+ weight int
+
+ pos syntax.Pos
+ typ Type
+}
+
+func (check *Checker) monomorph() {
+ // We detect unbounded instantiation cycles using a variant of
+ // Bellman-Ford's algorithm. Namely, instead of always running |V|
+ // iterations, we run until we either reach a fixed point or we've
+ // found a path of length |V|. This allows us to terminate earlier
+ // when there are no cycles, which should be the common case.
+
+ again := true
+ for again {
+ again = false
+
+ for i, edge := range check.mono.edges {
+ src := &check.mono.vertices[edge.src]
+ dst := &check.mono.vertices[edge.dst]
+
+ // N.B., we're looking for the greatest weight paths, unlike
+ // typical Bellman-Ford.
+ w := src.weight + edge.weight
+ if w <= dst.weight {
+ continue
+ }
+
+ dst.pre = i
+ dst.len = src.len + 1
+ if dst.len == len(check.mono.vertices) {
+ check.reportInstanceLoop(edge.dst)
+ return
+ }
+
+ dst.weight = w
+ again = true
+ }
+ }
+}
+
+func (check *Checker) reportInstanceLoop(v int) {
+ var stack []int
+ seen := make([]bool, len(check.mono.vertices))
+
+ // We have a path that contains a cycle and ends at v, but v may
+ // only be reachable from the cycle, not on the cycle itself. We
+ // start by walking backwards along the path until we find a vertex
+ // that appears twice.
+ for !seen[v] {
+ stack = append(stack, v)
+ seen[v] = true
+ v = check.mono.edges[check.mono.vertices[v].pre].src
+ }
+
+ // Trim any vertices we visited before visiting v the first
+ // time. Since v is the first vertex we found within the cycle, any
+ // vertices we visited earlier cannot be part of the cycle.
+ for stack[0] != v {
+ stack = stack[1:]
+ }
+
+ // TODO(mdempsky): Pivot stack so we report the cycle from the top?
+
+ var err error_
+ obj0 := check.mono.vertices[v].obj
+ err.errorf(obj0, "instantiation cycle:")
+
+ qf := RelativeTo(check.pkg)
+ for _, v := range stack {
+ edge := check.mono.edges[check.mono.vertices[v].pre]
+ obj := check.mono.vertices[edge.dst].obj
+
+ switch obj.Type().(type) {
+ default:
+ panic("unexpected type")
+ case *Named:
+ err.errorf(edge.pos, "%s implicitly parameterized by %s", obj.Name(), TypeString(edge.typ, qf)) // secondary error, \t indented
+ case *TypeParam:
+ err.errorf(edge.pos, "%s instantiated as %s", obj.Name(), TypeString(edge.typ, qf)) // secondary error, \t indented
+ }
+ }
+ check.report(&err)
+}
+
+// recordCanon records that tpar is the canonical type parameter
+// corresponding to method type parameter mpar.
+func (w *monoGraph) recordCanon(mpar, tpar *TypeParam) {
+ if w.canon == nil {
+ w.canon = make(map[*TypeParam]*TypeParam)
+ }
+ w.canon[mpar] = tpar
+}
+
+// recordInstance records that the given type parameters were
+// instantiated with the corresponding type arguments.
+func (w *monoGraph) recordInstance(pkg *Package, pos syntax.Pos, tparams []*TypeParam, targs []Type, xlist []syntax.Expr) {
+ for i, tpar := range tparams {
+ pos := pos
+ if i < len(xlist) {
+ pos = syntax.StartPos(xlist[i])
+ }
+ w.assign(pkg, pos, tpar, targs[i])
+ }
+}
+
+// assign records that tpar was instantiated as targ at pos.
+func (w *monoGraph) assign(pkg *Package, pos syntax.Pos, tpar *TypeParam, targ Type) {
+ // Go generics do not have an analog to C++`s template-templates,
+ // where a template parameter can itself be an instantiable
+ // template. So any instantiation cycles must occur within a single
+ // package. Accordingly, we can ignore instantiations of imported
+ // type parameters.
+ //
+ // TODO(mdempsky): Push this check up into recordInstance? All type
+ // parameters in a list will appear in the same package.
+ if tpar.Obj().Pkg() != pkg {
+ return
+ }
+
+ // flow adds an edge from vertex src representing that typ flows to tpar.
+ flow := func(src int, typ Type) {
+ weight := 1
+ if typ == targ {
+ weight = 0
+ }
+
+ w.addEdge(w.typeParamVertex(tpar), src, weight, pos, targ)
+ }
+
+ // Recursively walk the type argument to find any defined types or
+ // type parameters.
+ var do func(typ Type)
+ do = func(typ Type) {
+ switch typ := typ.(type) {
+ default:
+ panic("unexpected type")
+
+ case *TypeParam:
+ assert(typ.Obj().Pkg() == pkg)
+ flow(w.typeParamVertex(typ), typ)
+
+ case *Named:
+ if src := w.localNamedVertex(pkg, typ.Origin()); src >= 0 {
+ flow(src, typ)
+ }
+
+ targs := typ.TypeArgs()
+ for i := 0; i < targs.Len(); i++ {
+ do(targs.At(i))
+ }
+
+ case *Array:
+ do(typ.Elem())
+ case *Basic:
+ // ok
+ case *Chan:
+ do(typ.Elem())
+ case *Map:
+ do(typ.Key())
+ do(typ.Elem())
+ case *Pointer:
+ do(typ.Elem())
+ case *Slice:
+ do(typ.Elem())
+
+ case *Interface:
+ for i := 0; i < typ.NumMethods(); i++ {
+ do(typ.Method(i).Type())
+ }
+ case *Signature:
+ tuple := func(tup *Tuple) {
+ for i := 0; i < tup.Len(); i++ {
+ do(tup.At(i).Type())
+ }
+ }
+ tuple(typ.Params())
+ tuple(typ.Results())
+ case *Struct:
+ for i := 0; i < typ.NumFields(); i++ {
+ do(typ.Field(i).Type())
+ }
+ }
+ }
+ do(targ)
+}
+
+// localNamedVertex returns the index of the vertex representing
+// named, or -1 if named doesn't need representation.
+func (w *monoGraph) localNamedVertex(pkg *Package, named *Named) int {
+ obj := named.Obj()
+ if obj.Pkg() != pkg {
+ return -1 // imported type
+ }
+
+ root := pkg.Scope()
+ if obj.Parent() == root {
+ return -1 // package scope, no ambient type parameters
+ }
+
+ if idx, ok := w.nameIdx[obj]; ok {
+ return idx
+ }
+
+ idx := -1
+
+ // Walk the type definition's scope to find any ambient type
+ // parameters that it's implicitly parameterized by.
+ for scope := obj.Parent(); scope != root; scope = scope.Parent() {
+ for _, elem := range scope.elems {
+ if elem, ok := elem.(*TypeName); ok && !elem.IsAlias() && elem.Pos().Cmp(obj.Pos()) < 0 {
+ if tpar, ok := elem.Type().(*TypeParam); ok {
+ if idx < 0 {
+ idx = len(w.vertices)
+ w.vertices = append(w.vertices, monoVertex{obj: obj})
+ }
+
+ w.addEdge(idx, w.typeParamVertex(tpar), 1, obj.Pos(), tpar)
+ }
+ }
+ }
+ }
+
+ if w.nameIdx == nil {
+ w.nameIdx = make(map[*TypeName]int)
+ }
+ w.nameIdx[obj] = idx
+ return idx
+}
+
+// typeParamVertex returns the index of the vertex representing tpar.
+func (w *monoGraph) typeParamVertex(tpar *TypeParam) int {
+ if x, ok := w.canon[tpar]; ok {
+ tpar = x
+ }
+
+ obj := tpar.Obj()
+
+ if idx, ok := w.nameIdx[obj]; ok {
+ return idx
+ }
+
+ if w.nameIdx == nil {
+ w.nameIdx = make(map[*TypeName]int)
+ }
+
+ idx := len(w.vertices)
+ w.vertices = append(w.vertices, monoVertex{obj: obj})
+ w.nameIdx[obj] = idx
+ return idx
+}
+
+func (w *monoGraph) addEdge(dst, src, weight int, pos syntax.Pos, typ Type) {
+ // TODO(mdempsky): Deduplicate redundant edges?
+ w.edges = append(w.edges, monoEdge{
+ dst: dst,
+ src: src,
+ weight: weight,
+
+ pos: pos,
+ typ: typ,
+ })
+}
diff --git a/src/cmd/compile/internal/types2/mono_test.go b/src/cmd/compile/internal/types2/mono_test.go
new file mode 100644
index 0000000..19d0e95
--- /dev/null
+++ b/src/cmd/compile/internal/types2/mono_test.go
@@ -0,0 +1,89 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2_test
+
+import (
+ "bytes"
+ "cmd/compile/internal/syntax"
+ "cmd/compile/internal/types2"
+ "errors"
+ "fmt"
+ "strings"
+ "testing"
+)
+
+func checkMono(t *testing.T, body string) error {
+ src := "package x; import `unsafe`; var _ unsafe.Pointer;\n" + body
+ file, err := syntax.Parse(syntax.NewFileBase("x.go"), strings.NewReader(src), nil, nil, syntax.AllowGenerics)
+ if err != nil {
+ t.Fatal(err)
+ }
+ files := []*syntax.File{file}
+
+ var buf bytes.Buffer
+ conf := types2.Config{
+ Error: func(err error) { fmt.Fprintln(&buf, err) },
+ Importer: defaultImporter(),
+ }
+ conf.Check("x", files, nil)
+ if buf.Len() == 0 {
+ return nil
+ }
+ return errors.New(strings.TrimRight(buf.String(), "\n"))
+}
+
+func TestMonoGood(t *testing.T) {
+ for i, good := range goods {
+ if err := checkMono(t, good); err != nil {
+ t.Errorf("%d: unexpected failure: %v", i, err)
+ }
+ }
+}
+
+func TestMonoBad(t *testing.T) {
+ for i, bad := range bads {
+ if err := checkMono(t, bad); err == nil {
+ t.Errorf("%d: unexpected success", i)
+ } else {
+ t.Log(err)
+ }
+ }
+}
+
+var goods = []string{
+ "func F[T any](x T) { F(x) }",
+ "func F[T, U, V any]() { F[U, V, T](); F[V, T, U]() }",
+ "type Ring[A, B, C any] struct { L *Ring[B, C, A]; R *Ring[C, A, B] }",
+ "func F[T any]() { type U[T any] [unsafe.Sizeof(F[*T])]byte }",
+ "func F[T any]() { type U[T any] [unsafe.Sizeof(F[*T])]byte; var _ U[int] }",
+ "type U[T any] [unsafe.Sizeof(F[*T])]byte; func F[T any]() { var _ U[U[int]] }",
+ "func F[T any]() { type A = int; F[A]() }",
+}
+
+// TODO(mdempsky): Validate specific error messages and positioning.
+
+var bads = []string{
+ "func F[T any](x T) { F(&x) }",
+ "func F[T any]() { F[*T]() }",
+ "func F[T any]() { F[[]T]() }",
+ "func F[T any]() { F[[1]T]() }",
+ "func F[T any]() { F[chan T]() }",
+ "func F[T any]() { F[map[*T]int]() }",
+ "func F[T any]() { F[map[error]T]() }",
+ "func F[T any]() { F[func(T)]() }",
+ "func F[T any]() { F[func() T]() }",
+ "func F[T any]() { F[struct{ t T }]() }",
+ "func F[T any]() { F[interface{ t() T }]() }",
+ "type U[_ any] int; func F[T any]() { F[U[T]]() }",
+ "func F[T any]() { type U int; F[U]() }",
+ "func F[T any]() { type U int; F[*U]() }",
+ "type U[T any] int; func (U[T]) m() { var _ U[*T] }",
+ "type U[T any] int; func (*U[T]) m() { var _ U[*T] }",
+ "type U[T1 any] [unsafe.Sizeof(F[*T1])]byte; func F[T2 any]() { var _ U[T2] }",
+ "func F[A, B, C, D, E any]() { F[B, C, D, E, *A]() }",
+ "type U[_ any] int; const X = unsafe.Sizeof(func() { type A[T any] U[A[*T]] })",
+ "func F[T any]() { type A = *T; F[A]() }",
+ "type A[T any] struct { _ A[*T] }",
+}
diff --git a/src/cmd/compile/internal/types2/named.go b/src/cmd/compile/internal/types2/named.go
new file mode 100644
index 0000000..8a936eb
--- /dev/null
+++ b/src/cmd/compile/internal/types2/named.go
@@ -0,0 +1,413 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+import (
+ "cmd/compile/internal/syntax"
+ "sync"
+)
+
+// A Named represents a named (defined) type.
+type Named struct {
+ check *Checker
+ obj *TypeName // corresponding declared object for declared types; placeholder for instantiated types
+ orig *Named // original, uninstantiated type
+ fromRHS Type // type (on RHS of declaration) this *Named type is derived from (for cycle reporting)
+ underlying Type // possibly a *Named during setup; never a *Named once set up completely
+ tparams *TypeParamList // type parameters, or nil
+ targs *TypeList // type arguments (after instantiation), or nil
+
+ // methods declared for this type (not the method set of this type).
+ // Signatures are type-checked lazily.
+ // For non-instantiated types, this is a fully populated list of methods. For
+ // instantiated types, this is a 'lazy' list, and methods are instantiated
+ // when they are first accessed.
+ methods *methodList
+
+ // resolver may be provided to lazily resolve type parameters, underlying, and methods.
+ resolver func(*Context, *Named) (tparams *TypeParamList, underlying Type, methods *methodList)
+ once sync.Once // ensures that tparams, underlying, and methods are resolved before accessing
+}
+
+// NewNamed returns a new named type for the given type name, underlying type, and associated methods.
+// If the given type name obj doesn't have a type yet, its type is set to the returned named type.
+// The underlying type must not be a *Named.
+func NewNamed(obj *TypeName, underlying Type, methods []*Func) *Named {
+ if _, ok := underlying.(*Named); ok {
+ panic("underlying type must not be *Named")
+ }
+ return (*Checker)(nil).newNamed(obj, nil, underlying, nil, newMethodList(methods))
+}
+
+func (t *Named) resolve(ctxt *Context) *Named {
+ if t.resolver == nil {
+ return t
+ }
+
+ t.once.Do(func() {
+ // TODO(mdempsky): Since we're passing t to the resolver anyway
+ // (necessary because types2 expects the receiver type for methods
+ // on defined interface types to be the Named rather than the
+ // underlying Interface), maybe it should just handle calling
+ // SetTypeParams, SetUnderlying, and AddMethod instead? Those
+ // methods would need to support reentrant calls though. It would
+ // also make the API more future-proof towards further extensions
+ // (like SetTypeParams).
+ t.tparams, t.underlying, t.methods = t.resolver(ctxt, t)
+ t.fromRHS = t.underlying // for cycle detection
+ })
+ return t
+}
+
+// newNamed is like NewNamed but with a *Checker receiver and additional orig argument.
+func (check *Checker) newNamed(obj *TypeName, orig *Named, underlying Type, tparams *TypeParamList, methods *methodList) *Named {
+ typ := &Named{check: check, obj: obj, orig: orig, fromRHS: underlying, underlying: underlying, tparams: tparams, methods: methods}
+ if typ.orig == nil {
+ typ.orig = typ
+ }
+ if obj.typ == nil {
+ obj.typ = typ
+ }
+ // Ensure that typ is always expanded and sanity-checked.
+ if check != nil {
+ check.needsCleanup(typ)
+ }
+ return typ
+}
+
+func (t *Named) cleanup() {
+ // Ensure that every defined type created in the course of type-checking has
+ // either non-*Named underlying, or is unresolved.
+ //
+ // This guarantees that we don't leak any types whose underlying is *Named,
+ // because any unresolved instances will lazily compute their underlying by
+ // substituting in the underlying of their origin. The origin must have
+ // either been imported or type-checked and expanded here, and in either case
+ // its underlying will be fully expanded.
+ switch t.underlying.(type) {
+ case nil:
+ if t.resolver == nil {
+ panic("nil underlying")
+ }
+ case *Named:
+ t.under() // t.under may add entries to check.cleaners
+ }
+ t.check = nil
+}
+
+// Obj returns the type name for the declaration defining the named type t. For
+// instantiated types, this is same as the type name of the origin type.
+func (t *Named) Obj() *TypeName { return t.orig.obj } // for non-instances this is the same as t.obj
+
+// Origin returns the generic type from which the named type t is
+// instantiated. If t is not an instantiated type, the result is t.
+func (t *Named) Origin() *Named { return t.orig }
+
+// TODO(gri) Come up with a better representation and API to distinguish
+// between parameterized instantiated and non-instantiated types.
+
+// TypeParams returns the type parameters of the named type t, or nil.
+// The result is non-nil for an (originally) generic type even if it is instantiated.
+func (t *Named) TypeParams() *TypeParamList { return t.resolve(nil).tparams }
+
+// SetTypeParams sets the type parameters of the named type t.
+// t must not have type arguments.
+func (t *Named) SetTypeParams(tparams []*TypeParam) {
+ assert(t.targs.Len() == 0)
+ t.resolve(nil).tparams = bindTParams(tparams)
+}
+
+// TypeArgs returns the type arguments used to instantiate the named type t.
+func (t *Named) TypeArgs() *TypeList { return t.targs }
+
+// NumMethods returns the number of explicit methods defined for t.
+//
+// For an ordinary or instantiated type t, the receiver base type of these
+// methods will be the named type t. For an uninstantiated generic type t, each
+// method receiver will be instantiated with its receiver type parameters.
+func (t *Named) NumMethods() int { return t.resolve(nil).methods.Len() }
+
+// Method returns the i'th method of named type t for 0 <= i < t.NumMethods().
+func (t *Named) Method(i int) *Func {
+ t.resolve(nil)
+ return t.methods.At(i, func() *Func {
+ return t.instantiateMethod(i)
+ })
+}
+
+// instiateMethod instantiates the i'th method for an instantiated receiver.
+func (t *Named) instantiateMethod(i int) *Func {
+ assert(t.TypeArgs().Len() > 0) // t must be an instance
+
+ // t.orig.methods is not lazy. origm is the method instantiated with its
+ // receiver type parameters (the "origin" method).
+ origm := t.orig.Method(i)
+ assert(origm != nil)
+
+ check := t.check
+ // Ensure that the original method is type-checked.
+ if check != nil {
+ check.objDecl(origm, nil)
+ }
+
+ origSig := origm.typ.(*Signature)
+ rbase, _ := deref(origSig.Recv().Type())
+
+ // If rbase is t, then origm is already the instantiated method we're looking
+ // for. In this case, we return origm to preserve the invariant that
+ // traversing Method->Receiver Type->Method should get back to the same
+ // method.
+ //
+ // This occurs if t is instantiated with the receiver type parameters, as in
+ // the use of m in func (r T[_]) m() { r.m() }.
+ if rbase == t {
+ return origm
+ }
+
+ sig := origSig
+ // We can only substitute if we have a correspondence between type arguments
+ // and type parameters. This check is necessary in the presence of invalid
+ // code.
+ if origSig.RecvTypeParams().Len() == t.targs.Len() {
+ ctxt := check.bestContext(nil)
+ smap := makeSubstMap(origSig.RecvTypeParams().list(), t.targs.list())
+ sig = check.subst(origm.pos, origSig, smap, ctxt).(*Signature)
+ }
+
+ if sig == origSig {
+ // No substitution occurred, but we still need to create a new signature to
+ // hold the instantiated receiver.
+ copy := *origSig
+ sig = &copy
+ }
+
+ var rtyp Type
+ if origm.hasPtrRecv() {
+ rtyp = NewPointer(t)
+ } else {
+ rtyp = t
+ }
+
+ sig.recv = substVar(origSig.recv, rtyp)
+ return NewFunc(origm.pos, origm.pkg, origm.name, sig)
+}
+
+// SetUnderlying sets the underlying type and marks t as complete.
+// t must not have type arguments.
+func (t *Named) SetUnderlying(underlying Type) {
+ assert(t.targs.Len() == 0)
+ if underlying == nil {
+ panic("underlying type must not be nil")
+ }
+ if _, ok := underlying.(*Named); ok {
+ panic("underlying type must not be *Named")
+ }
+ t.resolve(nil).underlying = underlying
+ if t.fromRHS == nil {
+ t.fromRHS = underlying // for cycle detection
+ }
+}
+
+// AddMethod adds method m unless it is already in the method list.
+// t must not have type arguments.
+func (t *Named) AddMethod(m *Func) {
+ assert(t.targs.Len() == 0)
+ t.resolve(nil)
+ if t.methods == nil {
+ t.methods = newMethodList(nil)
+ }
+ t.methods.Add(m)
+}
+
+func (t *Named) Underlying() Type { return t.resolve(nil).underlying }
+func (t *Named) String() string { return TypeString(t, nil) }
+
+// ----------------------------------------------------------------------------
+// Implementation
+
+// under returns the expanded underlying type of n0; possibly by following
+// forward chains of named types. If an underlying type is found, resolve
+// the chain by setting the underlying type for each defined type in the
+// chain before returning it. If no underlying type is found or a cycle
+// is detected, the result is Typ[Invalid]. If a cycle is detected and
+// n0.check != nil, the cycle is reported.
+//
+// This is necessary because the underlying type of named may be itself a
+// named type that is incomplete:
+//
+// type (
+// A B
+// B *C
+// C A
+// )
+//
+// The type of C is the (named) type of A which is incomplete,
+// and which has as its underlying type the named type B.
+func (n0 *Named) under() Type {
+ u := n0.Underlying()
+
+ // If the underlying type of a defined type is not a defined
+ // (incl. instance) type, then that is the desired underlying
+ // type.
+ var n1 *Named
+ switch u1 := u.(type) {
+ case nil:
+ // After expansion via Underlying(), we should never encounter a nil
+ // underlying.
+ panic("nil underlying")
+ default:
+ // common case
+ return u
+ case *Named:
+ // handled below
+ n1 = u1
+ }
+
+ if n0.check == nil {
+ panic("Named.check == nil but type is incomplete")
+ }
+
+ // Invariant: after this point n0 as well as any named types in its
+ // underlying chain should be set up when this function exits.
+ check := n0.check
+ n := n0
+
+ seen := make(map[*Named]int) // types that need their underlying resolved
+ var path []Object // objects encountered, for cycle reporting
+
+loop:
+ for {
+ seen[n] = len(seen)
+ path = append(path, n.obj)
+ n = n1
+ if i, ok := seen[n]; ok {
+ // cycle
+ check.cycleError(path[i:])
+ u = Typ[Invalid]
+ break
+ }
+ u = n.Underlying()
+ switch u1 := u.(type) {
+ case nil:
+ u = Typ[Invalid]
+ break loop
+ default:
+ break loop
+ case *Named:
+ // Continue collecting *Named types in the chain.
+ n1 = u1
+ }
+ }
+
+ for n := range seen {
+ // We should never have to update the underlying type of an imported type;
+ // those underlying types should have been resolved during the import.
+ // Also, doing so would lead to a race condition (was issue #31749).
+ // Do this check always, not just in debug mode (it's cheap).
+ if n.obj.pkg != check.pkg {
+ panic("imported type with unresolved underlying type")
+ }
+ n.underlying = u
+ }
+
+ return u
+}
+
+func (n *Named) setUnderlying(typ Type) {
+ if n != nil {
+ n.underlying = typ
+ }
+}
+
+func (n *Named) lookupMethod(pkg *Package, name string, foldCase bool) (int, *Func) {
+ n.resolve(nil)
+ // If n is an instance, we may not have yet instantiated all of its methods.
+ // Look up the method index in orig, and only instantiate method at the
+ // matching index (if any).
+ i, _ := n.orig.methods.Lookup(pkg, name, foldCase)
+ if i < 0 {
+ return -1, nil
+ }
+ // For instances, m.Method(i) will be different from the orig method.
+ return i, n.Method(i)
+}
+
+// bestContext returns the best available context. In order of preference:
+// - the given ctxt, if non-nil
+// - check.ctxt, if check is non-nil
+// - a new Context
+func (check *Checker) bestContext(ctxt *Context) *Context {
+ if ctxt != nil {
+ return ctxt
+ }
+ if check != nil {
+ if check.ctxt == nil {
+ check.ctxt = NewContext()
+ }
+ return check.ctxt
+ }
+ return NewContext()
+}
+
+// expandNamed ensures that the underlying type of n is instantiated.
+// The underlying type will be Typ[Invalid] if there was an error.
+func expandNamed(ctxt *Context, n *Named, instPos syntax.Pos) (tparams *TypeParamList, underlying Type, methods *methodList) {
+ n.orig.resolve(ctxt)
+ assert(n.orig.underlying != nil)
+
+ check := n.check
+
+ if _, unexpanded := n.orig.underlying.(*Named); unexpanded {
+ // We should only get an unexpanded underlying here during type checking
+ // (for example, in recursive type declarations).
+ assert(check != nil)
+ }
+
+ // Mismatching arg and tparam length may be checked elsewhere.
+ if n.orig.tparams.Len() == n.targs.Len() {
+ // We must always have a context, to avoid infinite recursion.
+ ctxt = check.bestContext(ctxt)
+ h := ctxt.instanceHash(n.orig, n.targs.list())
+ // ensure that an instance is recorded for h to avoid infinite recursion.
+ ctxt.update(h, n.orig, n.TypeArgs().list(), n)
+
+ smap := makeSubstMap(n.orig.tparams.list(), n.targs.list())
+ underlying = n.check.subst(instPos, n.orig.underlying, smap, ctxt)
+ // If the underlying of n is an interface, we need to set the receiver of
+ // its methods accurately -- we set the receiver of interface methods on
+ // the RHS of a type declaration to the defined type.
+ if iface, _ := underlying.(*Interface); iface != nil {
+ if methods, copied := replaceRecvType(iface.methods, n.orig, n); copied {
+ // If the underlying doesn't actually use type parameters, it's possible
+ // that it wasn't substituted. In this case we need to create a new
+ // *Interface before modifying receivers.
+ if iface == n.orig.underlying {
+ old := iface
+ iface = check.newInterface()
+ iface.embeddeds = old.embeddeds
+ iface.complete = old.complete
+ iface.implicit = old.implicit // should be false but be conservative
+ underlying = iface
+ }
+ iface.methods = methods
+ }
+ }
+ } else {
+ underlying = Typ[Invalid]
+ }
+
+ return n.orig.tparams, underlying, newLazyMethodList(n.orig.methods.Len())
+}
+
+// safeUnderlying returns the underlying of typ without expanding instances, to
+// avoid infinite recursion.
+//
+// TODO(rfindley): eliminate this function or give it a better name.
+func safeUnderlying(typ Type) Type {
+ if t, _ := typ.(*Named); t != nil {
+ return t.underlying
+ }
+ return typ.Underlying()
+}
diff --git a/src/cmd/compile/internal/types2/object.go b/src/cmd/compile/internal/types2/object.go
new file mode 100644
index 0000000..08d37cb
--- /dev/null
+++ b/src/cmd/compile/internal/types2/object.go
@@ -0,0 +1,597 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+import (
+ "bytes"
+ "cmd/compile/internal/syntax"
+ "fmt"
+ "go/constant"
+ "unicode"
+ "unicode/utf8"
+)
+
+// An Object describes a named language entity such as a package,
+// constant, type, variable, function (incl. methods), or label.
+// All objects implement the Object interface.
+//
+type Object interface {
+ Parent() *Scope // scope in which this object is declared; nil for methods and struct fields
+ Pos() syntax.Pos // position of object identifier in declaration
+ Pkg() *Package // package to which this object belongs; nil for labels and objects in the Universe scope
+ Name() string // package local object name
+ Type() Type // object type
+ Exported() bool // reports whether the name starts with a capital letter
+ Id() string // object name if exported, qualified name if not exported (see func Id)
+
+ // String returns a human-readable string of the object.
+ String() string
+
+ // order reflects a package-level object's source order: if object
+ // a is before object b in the source, then a.order() < b.order().
+ // order returns a value > 0 for package-level objects; it returns
+ // 0 for all other objects (including objects in file scopes).
+ order() uint32
+
+ // color returns the object's color.
+ color() color
+
+ // setType sets the type of the object.
+ setType(Type)
+
+ // setOrder sets the order number of the object. It must be > 0.
+ setOrder(uint32)
+
+ // setColor sets the object's color. It must not be white.
+ setColor(color color)
+
+ // setParent sets the parent scope of the object.
+ setParent(*Scope)
+
+ // sameId reports whether obj.Id() and Id(pkg, name) are the same.
+ sameId(pkg *Package, name string) bool
+
+ // scopePos returns the start position of the scope of this Object
+ scopePos() syntax.Pos
+
+ // setScopePos sets the start position of the scope for this Object.
+ setScopePos(pos syntax.Pos)
+}
+
+func isExported(name string) bool {
+ ch, _ := utf8.DecodeRuneInString(name)
+ return unicode.IsUpper(ch)
+}
+
+// Id returns name if it is exported, otherwise it
+// returns the name qualified with the package path.
+func Id(pkg *Package, name string) string {
+ if isExported(name) {
+ return name
+ }
+ // unexported names need the package path for differentiation
+ // (if there's no package, make sure we don't start with '.'
+ // as that may change the order of methods between a setup
+ // inside a package and outside a package - which breaks some
+ // tests)
+ path := "_"
+ // pkg is nil for objects in Universe scope and possibly types
+ // introduced via Eval (see also comment in object.sameId)
+ if pkg != nil && pkg.path != "" {
+ path = pkg.path
+ }
+ return path + "." + name
+}
+
+// An object implements the common parts of an Object.
+type object struct {
+ parent *Scope
+ pos syntax.Pos
+ pkg *Package
+ name string
+ typ Type
+ order_ uint32
+ color_ color
+ scopePos_ syntax.Pos
+}
+
+// color encodes the color of an object (see Checker.objDecl for details).
+type color uint32
+
+// An object may be painted in one of three colors.
+// Color values other than white or black are considered grey.
+const (
+ white color = iota
+ black
+ grey // must be > white and black
+)
+
+func (c color) String() string {
+ switch c {
+ case white:
+ return "white"
+ case black:
+ return "black"
+ default:
+ return "grey"
+ }
+}
+
+// colorFor returns the (initial) color for an object depending on
+// whether its type t is known or not.
+func colorFor(t Type) color {
+ if t != nil {
+ return black
+ }
+ return white
+}
+
+// Parent returns the scope in which the object is declared.
+// The result is nil for methods and struct fields.
+func (obj *object) Parent() *Scope { return obj.parent }
+
+// Pos returns the declaration position of the object's identifier.
+func (obj *object) Pos() syntax.Pos { return obj.pos }
+
+// Pkg returns the package to which the object belongs.
+// The result is nil for labels and objects in the Universe scope.
+func (obj *object) Pkg() *Package { return obj.pkg }
+
+// Name returns the object's (package-local, unqualified) name.
+func (obj *object) Name() string { return obj.name }
+
+// Type returns the object's type.
+func (obj *object) Type() Type { return obj.typ }
+
+// Exported reports whether the object is exported (starts with a capital letter).
+// It doesn't take into account whether the object is in a local (function) scope
+// or not.
+func (obj *object) Exported() bool { return isExported(obj.name) }
+
+// Id is a wrapper for Id(obj.Pkg(), obj.Name()).
+func (obj *object) Id() string { return Id(obj.pkg, obj.name) }
+
+func (obj *object) String() string { panic("abstract") }
+func (obj *object) order() uint32 { return obj.order_ }
+func (obj *object) color() color { return obj.color_ }
+func (obj *object) scopePos() syntax.Pos { return obj.scopePos_ }
+
+func (obj *object) setParent(parent *Scope) { obj.parent = parent }
+func (obj *object) setType(typ Type) { obj.typ = typ }
+func (obj *object) setOrder(order uint32) { assert(order > 0); obj.order_ = order }
+func (obj *object) setColor(color color) { assert(color != white); obj.color_ = color }
+func (obj *object) setScopePos(pos syntax.Pos) { obj.scopePos_ = pos }
+
+func (obj *object) sameId(pkg *Package, name string) bool {
+ // spec:
+ // "Two identifiers are different if they are spelled differently,
+ // or if they appear in different packages and are not exported.
+ // Otherwise, they are the same."
+ if name != obj.name {
+ return false
+ }
+ // obj.Name == name
+ if obj.Exported() {
+ return true
+ }
+ // not exported, so packages must be the same (pkg == nil for
+ // fields in Universe scope; this can only happen for types
+ // introduced via Eval)
+ if pkg == nil || obj.pkg == nil {
+ return pkg == obj.pkg
+ }
+ // pkg != nil && obj.pkg != nil
+ return pkg.path == obj.pkg.path
+}
+
+// less reports whether object a is ordered before object b.
+//
+// Objects are ordered nil before non-nil, exported before
+// non-exported, then by name, and finally (for non-exported
+// functions) by package height and path.
+func (a *object) less(b *object) bool {
+ if a == b {
+ return false
+ }
+
+ // Nil before non-nil.
+ if a == nil {
+ return true
+ }
+ if b == nil {
+ return false
+ }
+
+ // Exported functions before non-exported.
+ ea := isExported(a.name)
+ eb := isExported(b.name)
+ if ea != eb {
+ return ea
+ }
+
+ // Order by name and then (for non-exported names) by package.
+ if a.name != b.name {
+ return a.name < b.name
+ }
+ if !ea {
+ if a.pkg.height != b.pkg.height {
+ return a.pkg.height < b.pkg.height
+ }
+ return a.pkg.path < b.pkg.path
+ }
+
+ return false
+}
+
+// A PkgName represents an imported Go package.
+// PkgNames don't have a type.
+type PkgName struct {
+ object
+ imported *Package
+ used bool // set if the package was used
+}
+
+// NewPkgName returns a new PkgName object representing an imported package.
+// The remaining arguments set the attributes found with all Objects.
+func NewPkgName(pos syntax.Pos, pkg *Package, name string, imported *Package) *PkgName {
+ return &PkgName{object{nil, pos, pkg, name, Typ[Invalid], 0, black, nopos}, imported, false}
+}
+
+// Imported returns the package that was imported.
+// It is distinct from Pkg(), which is the package containing the import statement.
+func (obj *PkgName) Imported() *Package { return obj.imported }
+
+// A Const represents a declared constant.
+type Const struct {
+ object
+ val constant.Value
+}
+
+// NewConst returns a new constant with value val.
+// The remaining arguments set the attributes found with all Objects.
+func NewConst(pos syntax.Pos, pkg *Package, name string, typ Type, val constant.Value) *Const {
+ return &Const{object{nil, pos, pkg, name, typ, 0, colorFor(typ), nopos}, val}
+}
+
+// Val returns the constant's value.
+func (obj *Const) Val() constant.Value { return obj.val }
+
+func (*Const) isDependency() {} // a constant may be a dependency of an initialization expression
+
+// A TypeName represents a name for a (defined or alias) type.
+type TypeName struct {
+ object
+}
+
+// NewTypeName returns a new type name denoting the given typ.
+// The remaining arguments set the attributes found with all Objects.
+//
+// The typ argument may be a defined (Named) type or an alias type.
+// It may also be nil such that the returned TypeName can be used as
+// argument for NewNamed, which will set the TypeName's type as a side-
+// effect.
+func NewTypeName(pos syntax.Pos, pkg *Package, name string, typ Type) *TypeName {
+ return &TypeName{object{nil, pos, pkg, name, typ, 0, colorFor(typ), nopos}}
+}
+
+// NewTypeNameLazy returns a new defined type like NewTypeName, but it
+// lazily calls resolve to finish constructing the Named object.
+func NewTypeNameLazy(pos syntax.Pos, pkg *Package, name string, load func(named *Named) (tparams []*TypeParam, underlying Type, methods []*Func)) *TypeName {
+ obj := NewTypeName(pos, pkg, name, nil)
+
+ resolve := func(_ *Context, t *Named) (*TypeParamList, Type, *methodList) {
+ tparams, underlying, methods := load(t)
+
+ switch underlying.(type) {
+ case nil, *Named:
+ panic(fmt.Sprintf("invalid underlying type %T", t.underlying))
+ }
+
+ return bindTParams(tparams), underlying, newMethodList(methods)
+ }
+
+ NewNamed(obj, nil, nil).resolver = resolve
+ return obj
+}
+
+// IsAlias reports whether obj is an alias name for a type.
+func (obj *TypeName) IsAlias() bool {
+ switch t := obj.typ.(type) {
+ case nil:
+ return false
+ case *Basic:
+ // unsafe.Pointer is not an alias.
+ if obj.pkg == Unsafe {
+ return false
+ }
+ // Any user-defined type name for a basic type is an alias for a
+ // basic type (because basic types are pre-declared in the Universe
+ // scope, outside any package scope), and so is any type name with
+ // a different name than the name of the basic type it refers to.
+ // Additionally, we need to look for "byte" and "rune" because they
+ // are aliases but have the same names (for better error messages).
+ return obj.pkg != nil || t.name != obj.name || t == universeByte || t == universeRune
+ case *Named:
+ return obj != t.obj
+ case *TypeParam:
+ return obj != t.obj
+ default:
+ return true
+ }
+}
+
+// A Variable represents a declared variable (including function parameters and results, and struct fields).
+type Var struct {
+ object
+ embedded bool // if set, the variable is an embedded struct field, and name is the type name
+ isField bool // var is struct field
+ used bool // set if the variable was used
+}
+
+// NewVar returns a new variable.
+// The arguments set the attributes found with all Objects.
+func NewVar(pos syntax.Pos, pkg *Package, name string, typ Type) *Var {
+ return &Var{object: object{nil, pos, pkg, name, typ, 0, colorFor(typ), nopos}}
+}
+
+// NewParam returns a new variable representing a function parameter.
+func NewParam(pos syntax.Pos, pkg *Package, name string, typ Type) *Var {
+ return &Var{object: object{nil, pos, pkg, name, typ, 0, colorFor(typ), nopos}, used: true} // parameters are always 'used'
+}
+
+// NewField returns a new variable representing a struct field.
+// For embedded fields, the name is the unqualified type name
+/// under which the field is accessible.
+func NewField(pos syntax.Pos, pkg *Package, name string, typ Type, embedded bool) *Var {
+ return &Var{object: object{nil, pos, pkg, name, typ, 0, colorFor(typ), nopos}, embedded: embedded, isField: true}
+}
+
+// Anonymous reports whether the variable is an embedded field.
+// Same as Embedded; only present for backward-compatibility.
+func (obj *Var) Anonymous() bool { return obj.embedded }
+
+// Embedded reports whether the variable is an embedded field.
+func (obj *Var) Embedded() bool { return obj.embedded }
+
+// IsField reports whether the variable is a struct field.
+func (obj *Var) IsField() bool { return obj.isField }
+
+func (*Var) isDependency() {} // a variable may be a dependency of an initialization expression
+
+// A Func represents a declared function, concrete method, or abstract
+// (interface) method. Its Type() is always a *Signature.
+// An abstract method may belong to many interfaces due to embedding.
+type Func struct {
+ object
+ hasPtrRecv_ bool // only valid for methods that don't have a type yet; use hasPtrRecv() to read
+}
+
+// NewFunc returns a new function with the given signature, representing
+// the function's type.
+func NewFunc(pos syntax.Pos, pkg *Package, name string, sig *Signature) *Func {
+ // don't store a (typed) nil signature
+ var typ Type
+ if sig != nil {
+ typ = sig
+ }
+ return &Func{object{nil, pos, pkg, name, typ, 0, colorFor(typ), nopos}, false}
+}
+
+// FullName returns the package- or receiver-type-qualified name of
+// function or method obj.
+func (obj *Func) FullName() string {
+ var buf bytes.Buffer
+ writeFuncName(&buf, obj, nil)
+ return buf.String()
+}
+
+// Scope returns the scope of the function's body block.
+// The result is nil for imported or instantiated functions and methods
+// (but there is also no mechanism to get to an instantiated function).
+func (obj *Func) Scope() *Scope { return obj.typ.(*Signature).scope }
+
+// hasPtrRecv reports whether the receiver is of the form *T for the given method obj.
+func (obj *Func) hasPtrRecv() bool {
+ // If a method's receiver type is set, use that as the source of truth for the receiver.
+ // Caution: Checker.funcDecl (decl.go) marks a function by setting its type to an empty
+ // signature. We may reach here before the signature is fully set up: we must explicitly
+ // check if the receiver is set (we cannot just look for non-nil obj.typ).
+ if sig, _ := obj.typ.(*Signature); sig != nil && sig.recv != nil {
+ _, isPtr := deref(sig.recv.typ)
+ return isPtr
+ }
+
+ // If a method's type is not set it may be a method/function that is:
+ // 1) client-supplied (via NewFunc with no signature), or
+ // 2) internally created but not yet type-checked.
+ // For case 1) we can't do anything; the client must know what they are doing.
+ // For case 2) we can use the information gathered by the resolver.
+ return obj.hasPtrRecv_
+}
+
+func (*Func) isDependency() {} // a function may be a dependency of an initialization expression
+
+// A Label represents a declared label.
+// Labels don't have a type.
+type Label struct {
+ object
+ used bool // set if the label was used
+}
+
+// NewLabel returns a new label.
+func NewLabel(pos syntax.Pos, pkg *Package, name string) *Label {
+ return &Label{object{pos: pos, pkg: pkg, name: name, typ: Typ[Invalid], color_: black}, false}
+}
+
+// A Builtin represents a built-in function.
+// Builtins don't have a valid type.
+type Builtin struct {
+ object
+ id builtinId
+}
+
+func newBuiltin(id builtinId) *Builtin {
+ return &Builtin{object{name: predeclaredFuncs[id].name, typ: Typ[Invalid], color_: black}, id}
+}
+
+// Nil represents the predeclared value nil.
+type Nil struct {
+ object
+}
+
+func writeObject(buf *bytes.Buffer, obj Object, qf Qualifier) {
+ var tname *TypeName
+ typ := obj.Type()
+
+ switch obj := obj.(type) {
+ case *PkgName:
+ fmt.Fprintf(buf, "package %s", obj.Name())
+ if path := obj.imported.path; path != "" && path != obj.name {
+ fmt.Fprintf(buf, " (%q)", path)
+ }
+ return
+
+ case *Const:
+ buf.WriteString("const")
+
+ case *TypeName:
+ tname = obj
+ buf.WriteString("type")
+ if isTypeParam(typ) {
+ buf.WriteString(" parameter")
+ }
+
+ case *Var:
+ if obj.isField {
+ buf.WriteString("field")
+ } else {
+ buf.WriteString("var")
+ }
+
+ case *Func:
+ buf.WriteString("func ")
+ writeFuncName(buf, obj, qf)
+ if typ != nil {
+ WriteSignature(buf, typ.(*Signature), qf)
+ }
+ return
+
+ case *Label:
+ buf.WriteString("label")
+ typ = nil
+
+ case *Builtin:
+ buf.WriteString("builtin")
+ typ = nil
+
+ case *Nil:
+ buf.WriteString("nil")
+ return
+
+ default:
+ panic(fmt.Sprintf("writeObject(%T)", obj))
+ }
+
+ buf.WriteByte(' ')
+
+ // For package-level objects, qualify the name.
+ if obj.Pkg() != nil && obj.Pkg().scope.Lookup(obj.Name()) == obj {
+ writePackage(buf, obj.Pkg(), qf)
+ }
+ buf.WriteString(obj.Name())
+
+ if typ == nil {
+ return
+ }
+
+ if tname != nil {
+ switch t := typ.(type) {
+ case *Basic:
+ // Don't print anything more for basic types since there's
+ // no more information.
+ return
+ case *Named:
+ if t.TypeParams().Len() > 0 {
+ newTypeWriter(buf, qf).tParamList(t.TypeParams().list())
+ }
+ }
+ if tname.IsAlias() {
+ buf.WriteString(" =")
+ } else if t, _ := typ.(*TypeParam); t != nil {
+ typ = t.bound
+ } else {
+ // TODO(gri) should this be fromRHS for *Named?
+ typ = under(typ)
+ }
+ }
+
+ // Special handling for any: because WriteType will format 'any' as 'any',
+ // resulting in the object string `type any = any` rather than `type any =
+ // interface{}`. To avoid this, swap in a different empty interface.
+ if obj == universeAny {
+ assert(Identical(typ, &emptyInterface))
+ typ = &emptyInterface
+ }
+
+ buf.WriteByte(' ')
+ WriteType(buf, typ, qf)
+}
+
+func writePackage(buf *bytes.Buffer, pkg *Package, qf Qualifier) {
+ if pkg == nil {
+ return
+ }
+ var s string
+ if qf != nil {
+ s = qf(pkg)
+ } else {
+ s = pkg.Path()
+ }
+ if s != "" {
+ buf.WriteString(s)
+ buf.WriteByte('.')
+ }
+}
+
+// ObjectString returns the string form of obj.
+// The Qualifier controls the printing of
+// package-level objects, and may be nil.
+func ObjectString(obj Object, qf Qualifier) string {
+ var buf bytes.Buffer
+ writeObject(&buf, obj, qf)
+ return buf.String()
+}
+
+func (obj *PkgName) String() string { return ObjectString(obj, nil) }
+func (obj *Const) String() string { return ObjectString(obj, nil) }
+func (obj *TypeName) String() string { return ObjectString(obj, nil) }
+func (obj *Var) String() string { return ObjectString(obj, nil) }
+func (obj *Func) String() string { return ObjectString(obj, nil) }
+func (obj *Label) String() string { return ObjectString(obj, nil) }
+func (obj *Builtin) String() string { return ObjectString(obj, nil) }
+func (obj *Nil) String() string { return ObjectString(obj, nil) }
+
+func writeFuncName(buf *bytes.Buffer, f *Func, qf Qualifier) {
+ if f.typ != nil {
+ sig := f.typ.(*Signature)
+ if recv := sig.Recv(); recv != nil {
+ buf.WriteByte('(')
+ if _, ok := recv.Type().(*Interface); ok {
+ // gcimporter creates abstract methods of
+ // named interfaces using the interface type
+ // (not the named type) as the receiver.
+ // Don't print it in full.
+ buf.WriteString("interface")
+ } else {
+ WriteType(buf, recv.Type(), qf)
+ }
+ buf.WriteByte(')')
+ buf.WriteByte('.')
+ } else if f.pkg != nil {
+ writePackage(buf, f.pkg, qf)
+ }
+ }
+ buf.WriteString(f.name)
+}
diff --git a/src/cmd/compile/internal/types2/object_test.go b/src/cmd/compile/internal/types2/object_test.go
new file mode 100644
index 0000000..8f0303d
--- /dev/null
+++ b/src/cmd/compile/internal/types2/object_test.go
@@ -0,0 +1,167 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2_test
+
+import (
+ "cmd/compile/internal/syntax"
+ "internal/testenv"
+ "strings"
+ "testing"
+
+ . "cmd/compile/internal/types2"
+)
+
+func TestIsAlias(t *testing.T) {
+ check := func(obj *TypeName, want bool) {
+ if got := obj.IsAlias(); got != want {
+ t.Errorf("%v: got IsAlias = %v; want %v", obj, got, want)
+ }
+ }
+
+ // predeclared types
+ check(Unsafe.Scope().Lookup("Pointer").(*TypeName), false)
+ for _, name := range Universe.Names() {
+ if obj, _ := Universe.Lookup(name).(*TypeName); obj != nil {
+ check(obj, name == "any" || name == "byte" || name == "rune")
+ }
+ }
+
+ // various other types
+ pkg := NewPackage("p", "p")
+ t1 := NewTypeName(nopos, pkg, "t1", nil)
+ n1 := NewNamed(t1, new(Struct), nil)
+ t5 := NewTypeName(nopos, pkg, "t5", nil)
+ NewTypeParam(t5, nil)
+ for _, test := range []struct {
+ name *TypeName
+ alias bool
+ }{
+ {NewTypeName(nopos, nil, "t0", nil), false}, // no type yet
+ {NewTypeName(nopos, pkg, "t0", nil), false}, // no type yet
+ {t1, false}, // type name refers to named type and vice versa
+ {NewTypeName(nopos, nil, "t2", NewInterfaceType(nil, nil)), true}, // type name refers to unnamed type
+ {NewTypeName(nopos, pkg, "t3", n1), true}, // type name refers to named type with different type name
+ {NewTypeName(nopos, nil, "t4", Typ[Int32]), true}, // type name refers to basic type with different name
+ {NewTypeName(nopos, nil, "int32", Typ[Int32]), false}, // type name refers to basic type with same name
+ {NewTypeName(nopos, pkg, "int32", Typ[Int32]), true}, // type name is declared in user-defined package (outside Universe)
+ {NewTypeName(nopos, nil, "rune", Typ[Rune]), true}, // type name refers to basic type rune which is an alias already
+ {t5, false}, // type name refers to type parameter and vice versa
+ } {
+ check(test.name, test.alias)
+ }
+}
+
+// TestEmbeddedMethod checks that an embedded method is represented by
+// the same Func Object as the original method. See also issue #34421.
+func TestEmbeddedMethod(t *testing.T) {
+ const src = `package p; type I interface { error }`
+
+ // type-check src
+ f, err := parseSrc("", src)
+ if err != nil {
+ t.Fatalf("parse failed: %s", err)
+ }
+ var conf Config
+ pkg, err := conf.Check(f.PkgName.Value, []*syntax.File{f}, nil)
+ if err != nil {
+ t.Fatalf("typecheck failed: %s", err)
+ }
+
+ // get original error.Error method
+ eface := Universe.Lookup("error")
+ orig, _, _ := LookupFieldOrMethod(eface.Type(), false, nil, "Error")
+ if orig == nil {
+ t.Fatalf("original error.Error not found")
+ }
+
+ // get embedded error.Error method
+ iface := pkg.Scope().Lookup("I")
+ embed, _, _ := LookupFieldOrMethod(iface.Type(), false, nil, "Error")
+ if embed == nil {
+ t.Fatalf("embedded error.Error not found")
+ }
+
+ // original and embedded Error object should be identical
+ if orig != embed {
+ t.Fatalf("%s (%p) != %s (%p)", orig, orig, embed, embed)
+ }
+}
+
+var testObjects = []struct {
+ src string
+ obj string
+ want string
+}{
+ {"import \"io\"; var r io.Reader", "r", "var p.r io.Reader"},
+
+ {"const c = 1.2", "c", "const p.c untyped float"},
+ {"const c float64 = 3.14", "c", "const p.c float64"},
+
+ {"type t struct{f int}", "t", "type p.t struct{f int}"},
+ {"type t func(int)", "t", "type p.t func(int)"},
+ {"type t[P any] struct{f P}", "t", "type p.t[P any] struct{f P}"},
+ {"type t[P any] struct{f P}", "t.P", "type parameter P any"},
+ {"type C interface{m()}; type t[P C] struct{}", "t.P", "type parameter P p.C"},
+
+ {"type t = struct{f int}", "t", "type p.t = struct{f int}"},
+ {"type t = func(int)", "t", "type p.t = func(int)"},
+
+ {"var v int", "v", "var p.v int"},
+
+ {"func f(int) string", "f", "func p.f(int) string"},
+ {"func g[P any](x P){}", "g", "func p.g[P any](x P)"},
+ {"func g[P interface{~int}](x P){}", "g.P", "type parameter P interface{~int}"},
+ {"", "any", "type any = interface{}"},
+}
+
+func TestObjectString(t *testing.T) {
+ testenv.MustHaveGoBuild(t)
+
+ for _, test := range testObjects {
+ src := "package p; " + test.src
+ pkg, err := makePkg(src)
+ if err != nil {
+ t.Errorf("%s: %s", src, err)
+ continue
+ }
+
+ names := strings.Split(test.obj, ".")
+ if len(names) != 1 && len(names) != 2 {
+ t.Errorf("%s: invalid object path %s", test.src, test.obj)
+ continue
+ }
+ _, obj := pkg.Scope().LookupParent(names[0], nopos)
+ if obj == nil {
+ t.Errorf("%s: %s not found", test.src, names[0])
+ continue
+ }
+ if len(names) == 2 {
+ if typ, ok := obj.Type().(interface{ TypeParams() *TypeParamList }); ok {
+ obj = lookupTypeParamObj(typ.TypeParams(), names[1])
+ if obj == nil {
+ t.Errorf("%s: %s not found", test.src, test.obj)
+ continue
+ }
+ } else {
+ t.Errorf("%s: %s has no type parameters", test.src, names[0])
+ continue
+ }
+ }
+
+ if got := obj.String(); got != test.want {
+ t.Errorf("%s: got %s, want %s", test.src, got, test.want)
+ }
+ }
+}
+
+func lookupTypeParamObj(list *TypeParamList, name string) Object {
+ for i := 0; i < list.Len(); i++ {
+ tpar := list.At(i)
+ if tpar.Obj().Name() == name {
+ return tpar.Obj()
+ }
+ }
+ return nil
+}
diff --git a/src/cmd/compile/internal/types2/objset.go b/src/cmd/compile/internal/types2/objset.go
new file mode 100644
index 0000000..88ff0af
--- /dev/null
+++ b/src/cmd/compile/internal/types2/objset.go
@@ -0,0 +1,31 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements objsets.
+//
+// An objset is similar to a Scope but objset elements
+// are identified by their unique id, instead of their
+// object name.
+
+package types2
+
+// An objset is a set of objects identified by their unique id.
+// The zero value for objset is a ready-to-use empty objset.
+type objset map[string]Object // initialized lazily
+
+// insert attempts to insert an object obj into objset s.
+// If s already contains an alternative object alt with
+// the same name, insert leaves s unchanged and returns alt.
+// Otherwise it inserts obj and returns nil.
+func (s *objset) insert(obj Object) Object {
+ id := obj.Id()
+ if alt := (*s)[id]; alt != nil {
+ return alt
+ }
+ if *s == nil {
+ *s = make(map[string]Object)
+ }
+ (*s)[id] = obj
+ return nil
+}
diff --git a/src/cmd/compile/internal/types2/operand.go b/src/cmd/compile/internal/types2/operand.go
new file mode 100644
index 0000000..fce9a11
--- /dev/null
+++ b/src/cmd/compile/internal/types2/operand.go
@@ -0,0 +1,389 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file defines operands and associated operations.
+
+package types2
+
+import (
+ "bytes"
+ "cmd/compile/internal/syntax"
+ "fmt"
+ "go/constant"
+ "go/token"
+)
+
+// An operandMode specifies the (addressing) mode of an operand.
+type operandMode byte
+
+const (
+ invalid operandMode = iota // operand is invalid
+ novalue // operand represents no value (result of a function call w/o result)
+ builtin // operand is a built-in function
+ typexpr // operand is a type
+ constant_ // operand is a constant; the operand's typ is a Basic type
+ variable // operand is an addressable variable
+ mapindex // operand is a map index expression (acts like a variable on lhs, commaok on rhs of an assignment)
+ value // operand is a computed value
+ nilvalue // operand is the nil value
+ commaok // like value, but operand may be used in a comma,ok expression
+ commaerr // like commaok, but second value is error, not boolean
+ cgofunc // operand is a cgo function
+)
+
+var operandModeString = [...]string{
+ invalid: "invalid operand",
+ novalue: "no value",
+ builtin: "built-in",
+ typexpr: "type",
+ constant_: "constant",
+ variable: "variable",
+ mapindex: "map index expression",
+ value: "value",
+ nilvalue: "nil",
+ commaok: "comma, ok expression",
+ commaerr: "comma, error expression",
+ cgofunc: "cgo function",
+}
+
+// An operand represents an intermediate value during type checking.
+// Operands have an (addressing) mode, the expression evaluating to
+// the operand, the operand's type, a value for constants, and an id
+// for built-in functions.
+// The zero value of operand is a ready to use invalid operand.
+//
+type operand struct {
+ mode operandMode
+ expr syntax.Expr
+ typ Type
+ val constant.Value
+ id builtinId
+}
+
+// Pos returns the position of the expression corresponding to x.
+// If x is invalid the position is nopos.
+//
+func (x *operand) Pos() syntax.Pos {
+ // x.expr may not be set if x is invalid
+ if x.expr == nil {
+ return nopos
+ }
+ return x.expr.Pos()
+}
+
+// Operand string formats
+// (not all "untyped" cases can appear due to the type system,
+// but they fall out naturally here)
+//
+// mode format
+//
+// invalid <expr> ( <mode> )
+// novalue <expr> ( <mode> )
+// builtin <expr> ( <mode> )
+// typexpr <expr> ( <mode> )
+//
+// constant <expr> (<untyped kind> <mode> )
+// constant <expr> ( <mode> of type <typ>)
+// constant <expr> (<untyped kind> <mode> <val> )
+// constant <expr> ( <mode> <val> of type <typ>)
+//
+// variable <expr> (<untyped kind> <mode> )
+// variable <expr> ( <mode> of type <typ>)
+//
+// mapindex <expr> (<untyped kind> <mode> )
+// mapindex <expr> ( <mode> of type <typ>)
+//
+// value <expr> (<untyped kind> <mode> )
+// value <expr> ( <mode> of type <typ>)
+//
+// nilvalue untyped nil
+// nilvalue nil ( of type <typ>)
+//
+// commaok <expr> (<untyped kind> <mode> )
+// commaok <expr> ( <mode> of type <typ>)
+//
+// commaerr <expr> (<untyped kind> <mode> )
+// commaerr <expr> ( <mode> of type <typ>)
+//
+// cgofunc <expr> (<untyped kind> <mode> )
+// cgofunc <expr> ( <mode> of type <typ>)
+//
+func operandString(x *operand, qf Qualifier) string {
+ // special-case nil
+ if x.mode == nilvalue {
+ switch x.typ {
+ case nil, Typ[Invalid]:
+ return "nil (with invalid type)"
+ case Typ[UntypedNil]:
+ return "nil"
+ default:
+ return fmt.Sprintf("nil (of type %s)", TypeString(x.typ, qf))
+ }
+ }
+
+ var buf bytes.Buffer
+
+ var expr string
+ if x.expr != nil {
+ expr = syntax.String(x.expr)
+ } else {
+ switch x.mode {
+ case builtin:
+ expr = predeclaredFuncs[x.id].name
+ case typexpr:
+ expr = TypeString(x.typ, qf)
+ case constant_:
+ expr = x.val.String()
+ }
+ }
+
+ // <expr> (
+ if expr != "" {
+ buf.WriteString(expr)
+ buf.WriteString(" (")
+ }
+
+ // <untyped kind>
+ hasType := false
+ switch x.mode {
+ case invalid, novalue, builtin, typexpr:
+ // no type
+ default:
+ // should have a type, but be cautious (don't crash during printing)
+ if x.typ != nil {
+ if isUntyped(x.typ) {
+ buf.WriteString(x.typ.(*Basic).name)
+ buf.WriteByte(' ')
+ break
+ }
+ hasType = true
+ }
+ }
+
+ // <mode>
+ buf.WriteString(operandModeString[x.mode])
+
+ // <val>
+ if x.mode == constant_ {
+ if s := x.val.String(); s != expr {
+ buf.WriteByte(' ')
+ buf.WriteString(s)
+ }
+ }
+
+ // <typ>
+ if hasType {
+ if x.typ != Typ[Invalid] {
+ var intro string
+ if isGeneric(x.typ) {
+ intro = " of parameterized type "
+ } else {
+ intro = " of type "
+ }
+ buf.WriteString(intro)
+ WriteType(&buf, x.typ, qf)
+ if tpar, _ := x.typ.(*TypeParam); tpar != nil {
+ buf.WriteString(" constrained by ")
+ WriteType(&buf, tpar.bound, qf) // do not compute interface type sets here
+ }
+ } else {
+ buf.WriteString(" with invalid type")
+ }
+ }
+
+ // )
+ if expr != "" {
+ buf.WriteByte(')')
+ }
+
+ return buf.String()
+}
+
+func (x *operand) String() string {
+ return operandString(x, nil)
+}
+
+// setConst sets x to the untyped constant for literal lit.
+func (x *operand) setConst(k syntax.LitKind, lit string) {
+ var kind BasicKind
+ switch k {
+ case syntax.IntLit:
+ kind = UntypedInt
+ case syntax.FloatLit:
+ kind = UntypedFloat
+ case syntax.ImagLit:
+ kind = UntypedComplex
+ case syntax.RuneLit:
+ kind = UntypedRune
+ case syntax.StringLit:
+ kind = UntypedString
+ default:
+ unreachable()
+ }
+
+ val := constant.MakeFromLiteral(lit, kind2tok[k], 0)
+ if val.Kind() == constant.Unknown {
+ x.mode = invalid
+ x.typ = Typ[Invalid]
+ return
+ }
+ x.mode = constant_
+ x.typ = Typ[kind]
+ x.val = val
+}
+
+// isNil reports whether x is a typed or the untyped nil value.
+func (x *operand) isNil() bool { return x.mode == nilvalue }
+
+// assignableTo reports whether x is assignable to a variable of type T. If the
+// result is false and a non-nil reason is provided, it may be set to a more
+// detailed explanation of the failure (result != ""). The returned error code
+// is only valid if the (first) result is false. The check parameter may be nil
+// if assignableTo is invoked through an exported API call, i.e., when all
+// methods have been type-checked.
+func (x *operand) assignableTo(check *Checker, T Type, reason *string) (bool, errorCode) {
+ if x.mode == invalid || T == Typ[Invalid] {
+ return true, 0 // avoid spurious errors
+ }
+
+ V := x.typ
+
+ // x's type is identical to T
+ if Identical(V, T) {
+ return true, 0
+ }
+
+ Vu := under(V)
+ Tu := under(T)
+ Vp, _ := V.(*TypeParam)
+ Tp, _ := T.(*TypeParam)
+
+ // x is an untyped value representable by a value of type T.
+ if isUntyped(Vu) {
+ assert(Vp == nil)
+ if Tp != nil {
+ // T is a type parameter: x is assignable to T if it is
+ // representable by each specific type in the type set of T.
+ return Tp.is(func(t *term) bool {
+ if t == nil {
+ return false
+ }
+ // A term may be a tilde term but the underlying
+ // type of an untyped value doesn't change so we
+ // don't need to do anything special.
+ newType, _, _ := check.implicitTypeAndValue(x, t.typ)
+ return newType != nil
+ }), _IncompatibleAssign
+ }
+ newType, _, _ := check.implicitTypeAndValue(x, T)
+ return newType != nil, _IncompatibleAssign
+ }
+ // Vu is typed
+
+ // x's type V and T have identical underlying types
+ // and at least one of V or T is not a named type
+ // and neither V nor T is a type parameter.
+ if Identical(Vu, Tu) && (!hasName(V) || !hasName(T)) && Vp == nil && Tp == nil {
+ return true, 0
+ }
+
+ // T is an interface type and x implements T and T is not a type parameter.
+ // Also handle the case where T is a pointer to an interface.
+ if _, ok := Tu.(*Interface); ok && Tp == nil || isInterfacePtr(Tu) {
+ if err := check.implements(V, T); err != nil {
+ if reason != nil {
+ *reason = err.Error()
+ }
+ return false, _InvalidIfaceAssign
+ }
+ return true, 0
+ }
+
+ // If V is an interface, check if a missing type assertion is the problem.
+ if Vi, _ := Vu.(*Interface); Vi != nil && Vp == nil {
+ if check.implements(T, V) == nil {
+ // T implements V, so give hint about type assertion.
+ if reason != nil {
+ *reason = "need type assertion"
+ }
+ return false, _IncompatibleAssign
+ }
+ }
+
+ // x is a bidirectional channel value, T is a channel
+ // type, x's type V and T have identical element types,
+ // and at least one of V or T is not a named type.
+ if Vc, ok := Vu.(*Chan); ok && Vc.dir == SendRecv {
+ if Tc, ok := Tu.(*Chan); ok && Identical(Vc.elem, Tc.elem) {
+ return !hasName(V) || !hasName(T), _InvalidChanAssign
+ }
+ }
+
+ // optimization: if we don't have type parameters, we're done
+ if Vp == nil && Tp == nil {
+ return false, _IncompatibleAssign
+ }
+
+ errorf := func(format string, args ...interface{}) {
+ if check != nil && reason != nil {
+ msg := check.sprintf(format, args...)
+ if *reason != "" {
+ msg += "\n\t" + *reason
+ }
+ *reason = msg
+ }
+ }
+
+ // x's type V is not a named type and T is a type parameter, and
+ // x is assignable to each specific type in T's type set.
+ if !hasName(V) && Tp != nil {
+ ok := false
+ code := _IncompatibleAssign
+ Tp.is(func(T *term) bool {
+ if T == nil {
+ return false // no specific types
+ }
+ ok, code = x.assignableTo(check, T.typ, reason)
+ if !ok {
+ errorf("cannot assign %s to %s (in %s)", x.typ, T.typ, Tp)
+ return false
+ }
+ return true
+ })
+ return ok, code
+ }
+
+ // x's type V is a type parameter and T is not a named type,
+ // and values x' of each specific type in V's type set are
+ // assignable to T.
+ if Vp != nil && !hasName(T) {
+ x := *x // don't clobber outer x
+ ok := false
+ code := _IncompatibleAssign
+ Vp.is(func(V *term) bool {
+ if V == nil {
+ return false // no specific types
+ }
+ x.typ = V.typ
+ ok, code = x.assignableTo(check, T, reason)
+ if !ok {
+ errorf("cannot assign %s (in %s) to %s", V.typ, Vp, T)
+ return false
+ }
+ return true
+ })
+ return ok, code
+ }
+
+ return false, _IncompatibleAssign
+}
+
+// kind2tok translates syntax.LitKinds into token.Tokens.
+var kind2tok = [...]token.Token{
+ syntax.IntLit: token.INT,
+ syntax.FloatLit: token.FLOAT,
+ syntax.ImagLit: token.IMAG,
+ syntax.RuneLit: token.CHAR,
+ syntax.StringLit: token.STRING,
+}
diff --git a/src/cmd/compile/internal/types2/package.go b/src/cmd/compile/internal/types2/package.go
new file mode 100644
index 0000000..8044e7e
--- /dev/null
+++ b/src/cmd/compile/internal/types2/package.go
@@ -0,0 +1,80 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+import (
+ "fmt"
+)
+
+// A Package describes a Go package.
+type Package struct {
+ path string
+ name string
+ scope *Scope
+ imports []*Package
+ height int
+ complete bool
+ fake bool // scope lookup errors are silently dropped if package is fake (internal use only)
+ cgo bool // uses of this package will be rewritten into uses of declarations from _cgo_gotypes.go
+}
+
+// NewPackage returns a new Package for the given package path and name.
+// The package is not complete and contains no explicit imports.
+func NewPackage(path, name string) *Package {
+ return NewPackageHeight(path, name, 0)
+}
+
+// NewPackageHeight is like NewPackage, but allows specifying the
+// package's height.
+func NewPackageHeight(path, name string, height int) *Package {
+ scope := NewScope(Universe, nopos, nopos, fmt.Sprintf("package %q", path))
+ return &Package{path: path, name: name, scope: scope, height: height}
+}
+
+// Path returns the package path.
+func (pkg *Package) Path() string { return pkg.path }
+
+// Name returns the package name.
+func (pkg *Package) Name() string { return pkg.name }
+
+// Height returns the package height.
+func (pkg *Package) Height() int { return pkg.height }
+
+// SetName sets the package name.
+func (pkg *Package) SetName(name string) { pkg.name = name }
+
+// Scope returns the (complete or incomplete) package scope
+// holding the objects declared at package level (TypeNames,
+// Consts, Vars, and Funcs).
+// For a nil pkg receiver, Scope returns the Universe scope.
+func (pkg *Package) Scope() *Scope {
+ if pkg != nil {
+ return pkg.scope
+ }
+ return Universe
+}
+
+// A package is complete if its scope contains (at least) all
+// exported objects; otherwise it is incomplete.
+func (pkg *Package) Complete() bool { return pkg.complete }
+
+// MarkComplete marks a package as complete.
+func (pkg *Package) MarkComplete() { pkg.complete = true }
+
+// Imports returns the list of packages directly imported by
+// pkg; the list is in source order.
+//
+// If pkg was loaded from export data, Imports includes packages that
+// provide package-level objects referenced by pkg. This may be more or
+// less than the set of packages directly imported by pkg's source code.
+func (pkg *Package) Imports() []*Package { return pkg.imports }
+
+// SetImports sets the list of explicitly imported packages to list.
+// It is the caller's responsibility to make sure list elements are unique.
+func (pkg *Package) SetImports(list []*Package) { pkg.imports = list }
+
+func (pkg *Package) String() string {
+ return fmt.Sprintf("package %s (%q)", pkg.name, pkg.path)
+}
diff --git a/src/cmd/compile/internal/types2/pointer.go b/src/cmd/compile/internal/types2/pointer.go
new file mode 100644
index 0000000..63055fc
--- /dev/null
+++ b/src/cmd/compile/internal/types2/pointer.go
@@ -0,0 +1,19 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+// A Pointer represents a pointer type.
+type Pointer struct {
+ base Type // element type
+}
+
+// NewPointer returns a new pointer type for the given element (base) type.
+func NewPointer(elem Type) *Pointer { return &Pointer{base: elem} }
+
+// Elem returns the element type for the given pointer p.
+func (p *Pointer) Elem() Type { return p.base }
+
+func (p *Pointer) Underlying() Type { return p }
+func (p *Pointer) String() string { return TypeString(p, nil) }
diff --git a/src/cmd/compile/internal/types2/predicates.go b/src/cmd/compile/internal/types2/predicates.go
new file mode 100644
index 0000000..ba25934
--- /dev/null
+++ b/src/cmd/compile/internal/types2/predicates.go
@@ -0,0 +1,467 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements commonly used type predicates.
+
+package types2
+
+// The isX predicates below report whether t is an X.
+// If t is a type parameter the result is false; i.e.,
+// these predicates don't look inside a type parameter.
+
+func isBoolean(t Type) bool { return isBasic(t, IsBoolean) }
+func isInteger(t Type) bool { return isBasic(t, IsInteger) }
+func isUnsigned(t Type) bool { return isBasic(t, IsUnsigned) }
+func isFloat(t Type) bool { return isBasic(t, IsFloat) }
+func isComplex(t Type) bool { return isBasic(t, IsComplex) }
+func isNumeric(t Type) bool { return isBasic(t, IsNumeric) }
+func isString(t Type) bool { return isBasic(t, IsString) }
+func isIntegerOrFloat(t Type) bool { return isBasic(t, IsInteger|IsFloat) }
+func isConstType(t Type) bool { return isBasic(t, IsConstType) }
+
+// isBasic reports whether under(t) is a basic type with the specified info.
+// If t is a type parameter the result is false; i.e.,
+// isBasic does not look inside a type parameter.
+func isBasic(t Type, info BasicInfo) bool {
+ u, _ := under(t).(*Basic)
+ return u != nil && u.info&info != 0
+}
+
+// The allX predicates below report whether t is an X.
+// If t is a type parameter the result is true if isX is true
+// for all specified types of the type parameter's type set.
+// allX is an optimized version of isX(coreType(t)) (which
+// is the same as underIs(t, isX)).
+
+func allBoolean(t Type) bool { return allBasic(t, IsBoolean) }
+func allInteger(t Type) bool { return allBasic(t, IsInteger) }
+func allUnsigned(t Type) bool { return allBasic(t, IsUnsigned) }
+func allNumeric(t Type) bool { return allBasic(t, IsNumeric) }
+func allString(t Type) bool { return allBasic(t, IsString) }
+func allOrdered(t Type) bool { return allBasic(t, IsOrdered) }
+func allNumericOrString(t Type) bool { return allBasic(t, IsNumeric|IsString) }
+
+// allBasic reports whether under(t) is a basic type with the specified info.
+// If t is a type parameter, the result is true if isBasic(t, info) is true
+// for all specific types of the type parameter's type set.
+// allBasic(t, info) is an optimized version of isBasic(coreType(t), info).
+func allBasic(t Type, info BasicInfo) bool {
+ if tpar, _ := t.(*TypeParam); tpar != nil {
+ return tpar.is(func(t *term) bool { return t != nil && isBasic(t.typ, info) })
+ }
+ return isBasic(t, info)
+}
+
+// hasName reports whether t has a name. This includes
+// predeclared types, defined types, and type parameters.
+// hasName may be called with types that are not fully set up.
+func hasName(t Type) bool {
+ switch t.(type) {
+ case *Basic, *Named, *TypeParam:
+ return true
+ }
+ return false
+}
+
+// isTyped reports whether t is typed; i.e., not an untyped
+// constant or boolean. isTyped may be called with types that
+// are not fully set up.
+func isTyped(t Type) bool {
+ // isTyped is called with types that are not fully
+ // set up. Must not call under()!
+ b, _ := t.(*Basic)
+ return b == nil || b.info&IsUntyped == 0
+}
+
+// isUntyped(t) is the same as !isTyped(t).
+func isUntyped(t Type) bool {
+ return !isTyped(t)
+}
+
+// IsInterface reports whether t is an interface type.
+func IsInterface(t Type) bool {
+ _, ok := under(t).(*Interface)
+ return ok
+}
+
+// isTypeParam reports whether t is a type parameter.
+func isTypeParam(t Type) bool {
+ _, ok := t.(*TypeParam)
+ return ok
+}
+
+// isGeneric reports whether a type is a generic, uninstantiated type
+// (generic signatures are not included).
+// TODO(gri) should we include signatures or assert that they are not present?
+func isGeneric(t Type) bool {
+ // A parameterized type is only generic if it doesn't have an instantiation already.
+ named, _ := t.(*Named)
+ return named != nil && named.obj != nil && named.targs == nil && named.TypeParams() != nil
+}
+
+// Comparable reports whether values of type T are comparable.
+func Comparable(T Type) bool {
+ return comparable(T, true, nil, nil)
+}
+
+// If dynamic is set, non-type parameter interfaces are always comparable.
+// If reportf != nil, it may be used to report why T is not comparable.
+func comparable(T Type, dynamic bool, seen map[Type]bool, reportf func(string, ...interface{})) bool {
+ if seen[T] {
+ return true
+ }
+ if seen == nil {
+ seen = make(map[Type]bool)
+ }
+ seen[T] = true
+
+ switch t := under(T).(type) {
+ case *Basic:
+ // assume invalid types to be comparable
+ // to avoid follow-up errors
+ return t.kind != UntypedNil
+ case *Pointer, *Chan:
+ return true
+ case *Struct:
+ for _, f := range t.fields {
+ if !comparable(f.typ, dynamic, seen, nil) {
+ if reportf != nil {
+ reportf("struct containing %s cannot be compared", f.typ)
+ }
+ return false
+ }
+ }
+ return true
+ case *Array:
+ if !comparable(t.elem, dynamic, seen, nil) {
+ if reportf != nil {
+ reportf("%s cannot be compared", t)
+ }
+ return false
+ }
+ return true
+ case *Interface:
+ return dynamic && !isTypeParam(T) || t.typeSet().IsComparable(seen)
+ }
+ return false
+}
+
+// hasNil reports whether type t includes the nil value.
+func hasNil(t Type) bool {
+ switch u := under(t).(type) {
+ case *Basic:
+ return u.kind == UnsafePointer
+ case *Slice, *Pointer, *Signature, *Map, *Chan:
+ return true
+ case *Interface:
+ return !isTypeParam(t) || u.typeSet().underIs(func(u Type) bool {
+ return u != nil && hasNil(u)
+ })
+ }
+ return false
+}
+
+// An ifacePair is a node in a stack of interface type pairs compared for identity.
+type ifacePair struct {
+ x, y *Interface
+ prev *ifacePair
+}
+
+func (p *ifacePair) identical(q *ifacePair) bool {
+ return p.x == q.x && p.y == q.y || p.x == q.y && p.y == q.x
+}
+
+// For changes to this code the corresponding changes should be made to unifier.nify.
+func identical(x, y Type, cmpTags bool, p *ifacePair) bool {
+ if x == y {
+ return true
+ }
+
+ switch x := x.(type) {
+ case *Basic:
+ // Basic types are singletons except for the rune and byte
+ // aliases, thus we cannot solely rely on the x == y check
+ // above. See also comment in TypeName.IsAlias.
+ if y, ok := y.(*Basic); ok {
+ return x.kind == y.kind
+ }
+
+ case *Array:
+ // Two array types are identical if they have identical element types
+ // and the same array length.
+ if y, ok := y.(*Array); ok {
+ // If one or both array lengths are unknown (< 0) due to some error,
+ // assume they are the same to avoid spurious follow-on errors.
+ return (x.len < 0 || y.len < 0 || x.len == y.len) && identical(x.elem, y.elem, cmpTags, p)
+ }
+
+ case *Slice:
+ // Two slice types are identical if they have identical element types.
+ if y, ok := y.(*Slice); ok {
+ return identical(x.elem, y.elem, cmpTags, p)
+ }
+
+ case *Struct:
+ // Two struct types are identical if they have the same sequence of fields,
+ // and if corresponding fields have the same names, and identical types,
+ // and identical tags. Two embedded fields are considered to have the same
+ // name. Lower-case field names from different packages are always different.
+ if y, ok := y.(*Struct); ok {
+ if x.NumFields() == y.NumFields() {
+ for i, f := range x.fields {
+ g := y.fields[i]
+ if f.embedded != g.embedded ||
+ cmpTags && x.Tag(i) != y.Tag(i) ||
+ !f.sameId(g.pkg, g.name) ||
+ !identical(f.typ, g.typ, cmpTags, p) {
+ return false
+ }
+ }
+ return true
+ }
+ }
+
+ case *Pointer:
+ // Two pointer types are identical if they have identical base types.
+ if y, ok := y.(*Pointer); ok {
+ return identical(x.base, y.base, cmpTags, p)
+ }
+
+ case *Tuple:
+ // Two tuples types are identical if they have the same number of elements
+ // and corresponding elements have identical types.
+ if y, ok := y.(*Tuple); ok {
+ if x.Len() == y.Len() {
+ if x != nil {
+ for i, v := range x.vars {
+ w := y.vars[i]
+ if !identical(v.typ, w.typ, cmpTags, p) {
+ return false
+ }
+ }
+ }
+ return true
+ }
+ }
+
+ case *Signature:
+ y, _ := y.(*Signature)
+ if y == nil {
+ return false
+ }
+
+ // Two function types are identical if they have the same number of
+ // parameters and result values, corresponding parameter and result types
+ // are identical, and either both functions are variadic or neither is.
+ // Parameter and result names are not required to match, and type
+ // parameters are considered identical modulo renaming.
+
+ if x.TypeParams().Len() != y.TypeParams().Len() {
+ return false
+ }
+
+ // In the case of generic signatures, we will substitute in yparams and
+ // yresults.
+ yparams := y.params
+ yresults := y.results
+
+ if x.TypeParams().Len() > 0 {
+ // We must ignore type parameter names when comparing x and y. The
+ // easiest way to do this is to substitute x's type parameters for y's.
+ xtparams := x.TypeParams().list()
+ ytparams := y.TypeParams().list()
+
+ var targs []Type
+ for i := range xtparams {
+ targs = append(targs, x.TypeParams().At(i))
+ }
+ smap := makeSubstMap(ytparams, targs)
+
+ var check *Checker // ok to call subst on a nil *Checker
+
+ // Constraints must be pair-wise identical, after substitution.
+ for i, xtparam := range xtparams {
+ ybound := check.subst(nopos, ytparams[i].bound, smap, nil)
+ if !identical(xtparam.bound, ybound, cmpTags, p) {
+ return false
+ }
+ }
+
+ yparams = check.subst(nopos, y.params, smap, nil).(*Tuple)
+ yresults = check.subst(nopos, y.results, smap, nil).(*Tuple)
+ }
+
+ return x.variadic == y.variadic &&
+ identical(x.params, yparams, cmpTags, p) &&
+ identical(x.results, yresults, cmpTags, p)
+
+ case *Union:
+ if y, _ := y.(*Union); y != nil {
+ // TODO(rfindley): can this be reached during type checking? If so,
+ // consider passing a type set map.
+ unionSets := make(map[*Union]*_TypeSet)
+ xset := computeUnionTypeSet(nil, unionSets, nopos, x)
+ yset := computeUnionTypeSet(nil, unionSets, nopos, y)
+ return xset.terms.equal(yset.terms)
+ }
+
+ case *Interface:
+ // Two interface types are identical if they describe the same type sets.
+ // With the existing implementation restriction, this simplifies to:
+ //
+ // Two interface types are identical if they have the same set of methods with
+ // the same names and identical function types, and if any type restrictions
+ // are the same. Lower-case method names from different packages are always
+ // different. The order of the methods is irrelevant.
+ if y, ok := y.(*Interface); ok {
+ xset := x.typeSet()
+ yset := y.typeSet()
+ if xset.comparable != yset.comparable {
+ return false
+ }
+ if !xset.terms.equal(yset.terms) {
+ return false
+ }
+ a := xset.methods
+ b := yset.methods
+ if len(a) == len(b) {
+ // Interface types are the only types where cycles can occur
+ // that are not "terminated" via named types; and such cycles
+ // can only be created via method parameter types that are
+ // anonymous interfaces (directly or indirectly) embedding
+ // the current interface. Example:
+ //
+ // type T interface {
+ // m() interface{T}
+ // }
+ //
+ // If two such (differently named) interfaces are compared,
+ // endless recursion occurs if the cycle is not detected.
+ //
+ // If x and y were compared before, they must be equal
+ // (if they were not, the recursion would have stopped);
+ // search the ifacePair stack for the same pair.
+ //
+ // This is a quadratic algorithm, but in practice these stacks
+ // are extremely short (bounded by the nesting depth of interface
+ // type declarations that recur via parameter types, an extremely
+ // rare occurrence). An alternative implementation might use a
+ // "visited" map, but that is probably less efficient overall.
+ q := &ifacePair{x, y, p}
+ for p != nil {
+ if p.identical(q) {
+ return true // same pair was compared before
+ }
+ p = p.prev
+ }
+ if debug {
+ assertSortedMethods(a)
+ assertSortedMethods(b)
+ }
+ for i, f := range a {
+ g := b[i]
+ if f.Id() != g.Id() || !identical(f.typ, g.typ, cmpTags, q) {
+ return false
+ }
+ }
+ return true
+ }
+ }
+
+ case *Map:
+ // Two map types are identical if they have identical key and value types.
+ if y, ok := y.(*Map); ok {
+ return identical(x.key, y.key, cmpTags, p) && identical(x.elem, y.elem, cmpTags, p)
+ }
+
+ case *Chan:
+ // Two channel types are identical if they have identical value types
+ // and the same direction.
+ if y, ok := y.(*Chan); ok {
+ return x.dir == y.dir && identical(x.elem, y.elem, cmpTags, p)
+ }
+
+ case *Named:
+ // Two named types are identical if their type names originate
+ // in the same type declaration.
+ if y, ok := y.(*Named); ok {
+ xargs := x.TypeArgs().list()
+ yargs := y.TypeArgs().list()
+
+ if len(xargs) != len(yargs) {
+ return false
+ }
+
+ if len(xargs) > 0 {
+ // Instances are identical if their original type and type arguments
+ // are identical.
+ if !Identical(x.orig, y.orig) {
+ return false
+ }
+ for i, xa := range xargs {
+ if !Identical(xa, yargs[i]) {
+ return false
+ }
+ }
+ return true
+ }
+
+ // TODO(gri) Why is x == y not sufficient? And if it is,
+ // we can just return false here because x == y
+ // is caught in the very beginning of this function.
+ return x.obj == y.obj
+ }
+
+ case *TypeParam:
+ // nothing to do (x and y being equal is caught in the very beginning of this function)
+
+ case nil:
+ // avoid a crash in case of nil type
+
+ default:
+ unreachable()
+ }
+
+ return false
+}
+
+// identicalInstance reports if two type instantiations are identical.
+// Instantiations are identical if their origin and type arguments are
+// identical.
+func identicalInstance(xorig Type, xargs []Type, yorig Type, yargs []Type) bool {
+ if len(xargs) != len(yargs) {
+ return false
+ }
+
+ for i, xa := range xargs {
+ if !Identical(xa, yargs[i]) {
+ return false
+ }
+ }
+
+ return Identical(xorig, yorig)
+}
+
+// Default returns the default "typed" type for an "untyped" type;
+// it returns the incoming type for all other types. The default type
+// for untyped nil is untyped nil.
+func Default(t Type) Type {
+ if t, ok := t.(*Basic); ok {
+ switch t.kind {
+ case UntypedBool:
+ return Typ[Bool]
+ case UntypedInt:
+ return Typ[Int]
+ case UntypedRune:
+ return universeRune // use 'rune' name
+ case UntypedFloat:
+ return Typ[Float64]
+ case UntypedComplex:
+ return Typ[Complex128]
+ case UntypedString:
+ return Typ[String]
+ }
+ }
+ return t
+}
diff --git a/src/cmd/compile/internal/types2/resolver.go b/src/cmd/compile/internal/types2/resolver.go
new file mode 100644
index 0000000..10dd86f
--- /dev/null
+++ b/src/cmd/compile/internal/types2/resolver.go
@@ -0,0 +1,756 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+import (
+ "cmd/compile/internal/syntax"
+ "fmt"
+ "go/constant"
+ "sort"
+ "strconv"
+ "strings"
+ "unicode"
+)
+
+// A declInfo describes a package-level const, type, var, or func declaration.
+type declInfo struct {
+ file *Scope // scope of file containing this declaration
+ lhs []*Var // lhs of n:1 variable declarations, or nil
+ vtyp syntax.Expr // type, or nil (for const and var declarations only)
+ init syntax.Expr // init/orig expression, or nil (for const and var declarations only)
+ inherited bool // if set, the init expression is inherited from a previous constant declaration
+ tdecl *syntax.TypeDecl // type declaration, or nil
+ fdecl *syntax.FuncDecl // func declaration, or nil
+
+ // The deps field tracks initialization expression dependencies.
+ deps map[Object]bool // lazily initialized
+}
+
+// hasInitializer reports whether the declared object has an initialization
+// expression or function body.
+func (d *declInfo) hasInitializer() bool {
+ return d.init != nil || d.fdecl != nil && d.fdecl.Body != nil
+}
+
+// addDep adds obj to the set of objects d's init expression depends on.
+func (d *declInfo) addDep(obj Object) {
+ m := d.deps
+ if m == nil {
+ m = make(map[Object]bool)
+ d.deps = m
+ }
+ m[obj] = true
+}
+
+// arity checks that the lhs and rhs of a const or var decl
+// have a matching number of names and initialization values.
+// If inherited is set, the initialization values are from
+// another (constant) declaration.
+func (check *Checker) arity(pos syntax.Pos, names []*syntax.Name, inits []syntax.Expr, constDecl, inherited bool) {
+ l := len(names)
+ r := len(inits)
+
+ switch {
+ case l < r:
+ n := inits[l]
+ if inherited {
+ check.errorf(pos, "extra init expr at %s", n.Pos())
+ } else {
+ check.errorf(n, "extra init expr %s", n)
+ }
+ case l > r && (constDecl || r != 1): // if r == 1 it may be a multi-valued function and we can't say anything yet
+ n := names[r]
+ check.errorf(n, "missing init expr for %s", n.Value)
+ }
+}
+
+func validatedImportPath(path string) (string, error) {
+ s, err := strconv.Unquote(path)
+ if err != nil {
+ return "", err
+ }
+ if s == "" {
+ return "", fmt.Errorf("empty string")
+ }
+ const illegalChars = `!"#$%&'()*,:;<=>?[\]^{|}` + "`\uFFFD"
+ for _, r := range s {
+ if !unicode.IsGraphic(r) || unicode.IsSpace(r) || strings.ContainsRune(illegalChars, r) {
+ return s, fmt.Errorf("invalid character %#U", r)
+ }
+ }
+ return s, nil
+}
+
+// declarePkgObj declares obj in the package scope, records its ident -> obj mapping,
+// and updates check.objMap. The object must not be a function or method.
+func (check *Checker) declarePkgObj(ident *syntax.Name, obj Object, d *declInfo) {
+ assert(ident.Value == obj.Name())
+
+ // spec: "A package-scope or file-scope identifier with name init
+ // may only be declared to be a function with this (func()) signature."
+ if ident.Value == "init" {
+ check.error(ident, "cannot declare init - must be func")
+ return
+ }
+
+ // spec: "The main package must have package name main and declare
+ // a function main that takes no arguments and returns no value."
+ if ident.Value == "main" && check.pkg.name == "main" {
+ check.error(ident, "cannot declare main - must be func")
+ return
+ }
+
+ check.declare(check.pkg.scope, ident, obj, nopos)
+ check.objMap[obj] = d
+ obj.setOrder(uint32(len(check.objMap)))
+}
+
+// filename returns a filename suitable for debugging output.
+func (check *Checker) filename(fileNo int) string {
+ file := check.files[fileNo]
+ if pos := file.Pos(); pos.IsKnown() {
+ // return check.fset.File(pos).Name()
+ // TODO(gri) do we need the actual file name here?
+ return pos.RelFilename()
+ }
+ return fmt.Sprintf("file[%d]", fileNo)
+}
+
+func (check *Checker) importPackage(pos syntax.Pos, path, dir string) *Package {
+ // If we already have a package for the given (path, dir)
+ // pair, use it instead of doing a full import.
+ // Checker.impMap only caches packages that are marked Complete
+ // or fake (dummy packages for failed imports). Incomplete but
+ // non-fake packages do require an import to complete them.
+ key := importKey{path, dir}
+ imp := check.impMap[key]
+ if imp != nil {
+ return imp
+ }
+
+ // no package yet => import it
+ if path == "C" && (check.conf.FakeImportC || check.conf.go115UsesCgo) {
+ imp = NewPackage("C", "C")
+ imp.fake = true // package scope is not populated
+ imp.cgo = check.conf.go115UsesCgo
+ } else {
+ // ordinary import
+ var err error
+ if importer := check.conf.Importer; importer == nil {
+ err = fmt.Errorf("Config.Importer not installed")
+ } else if importerFrom, ok := importer.(ImporterFrom); ok {
+ imp, err = importerFrom.ImportFrom(path, dir, 0)
+ if imp == nil && err == nil {
+ err = fmt.Errorf("Config.Importer.ImportFrom(%s, %s, 0) returned nil but no error", path, dir)
+ }
+ } else {
+ imp, err = importer.Import(path)
+ if imp == nil && err == nil {
+ err = fmt.Errorf("Config.Importer.Import(%s) returned nil but no error", path)
+ }
+ }
+ // make sure we have a valid package name
+ // (errors here can only happen through manipulation of packages after creation)
+ if err == nil && imp != nil && (imp.name == "_" || imp.name == "") {
+ err = fmt.Errorf("invalid package name: %q", imp.name)
+ imp = nil // create fake package below
+ }
+ if err != nil {
+ check.errorf(pos, "could not import %s (%s)", path, err)
+ if imp == nil {
+ // create a new fake package
+ // come up with a sensible package name (heuristic)
+ name := path
+ if i := len(name); i > 0 && name[i-1] == '/' {
+ name = name[:i-1]
+ }
+ if i := strings.LastIndex(name, "/"); i >= 0 {
+ name = name[i+1:]
+ }
+ imp = NewPackage(path, name)
+ }
+ // continue to use the package as best as we can
+ imp.fake = true // avoid follow-up lookup failures
+ }
+ }
+
+ // package should be complete or marked fake, but be cautious
+ if imp.complete || imp.fake {
+ check.impMap[key] = imp
+ // Once we've formatted an error message once, keep the pkgPathMap
+ // up-to-date on subsequent imports.
+ if check.pkgPathMap != nil {
+ check.markImports(imp)
+ }
+ return imp
+ }
+
+ // something went wrong (importer may have returned incomplete package without error)
+ return nil
+}
+
+// collectObjects collects all file and package objects and inserts them
+// into their respective scopes. It also performs imports and associates
+// methods with receiver base type names.
+func (check *Checker) collectObjects() {
+ pkg := check.pkg
+ pkg.height = 0
+
+ // pkgImports is the set of packages already imported by any package file seen
+ // so far. Used to avoid duplicate entries in pkg.imports. Allocate and populate
+ // it (pkg.imports may not be empty if we are checking test files incrementally).
+ // Note that pkgImports is keyed by package (and thus package path), not by an
+ // importKey value. Two different importKey values may map to the same package
+ // which is why we cannot use the check.impMap here.
+ var pkgImports = make(map[*Package]bool)
+ for _, imp := range pkg.imports {
+ pkgImports[imp] = true
+ }
+
+ type methodInfo struct {
+ obj *Func // method
+ ptr bool // true if pointer receiver
+ recv *syntax.Name // receiver type name
+ }
+ var methods []methodInfo // collected methods with valid receivers and non-blank _ names
+ var fileScopes []*Scope
+ for fileNo, file := range check.files {
+ // The package identifier denotes the current package,
+ // but there is no corresponding package object.
+ check.recordDef(file.PkgName, nil)
+
+ fileScope := NewScope(check.pkg.scope, syntax.StartPos(file), syntax.EndPos(file), check.filename(fileNo))
+ fileScopes = append(fileScopes, fileScope)
+ check.recordScope(file, fileScope)
+
+ // determine file directory, necessary to resolve imports
+ // FileName may be "" (typically for tests) in which case
+ // we get "." as the directory which is what we would want.
+ fileDir := dir(file.PkgName.Pos().RelFilename()) // TODO(gri) should this be filename?
+
+ first := -1 // index of first ConstDecl in the current group, or -1
+ var last *syntax.ConstDecl // last ConstDecl with init expressions, or nil
+ for index, decl := range file.DeclList {
+ if _, ok := decl.(*syntax.ConstDecl); !ok {
+ first = -1 // we're not in a constant declaration
+ }
+
+ switch s := decl.(type) {
+ case *syntax.ImportDecl:
+ // import package
+ if s.Path == nil || s.Path.Bad {
+ continue // error reported during parsing
+ }
+ path, err := validatedImportPath(s.Path.Value)
+ if err != nil {
+ check.errorf(s.Path, "invalid import path (%s)", err)
+ continue
+ }
+
+ imp := check.importPackage(s.Path.Pos(), path, fileDir)
+ if imp == nil {
+ continue
+ }
+
+ if imp == Unsafe {
+ // typecheck ignores imports of package unsafe for
+ // calculating height.
+ // TODO(mdempsky): Revisit this. This seems fine, but I
+ // don't remember explicitly considering this case.
+ } else if h := imp.height + 1; h > pkg.height {
+ pkg.height = h
+ }
+
+ // local name overrides imported package name
+ name := imp.name
+ if s.LocalPkgName != nil {
+ name = s.LocalPkgName.Value
+ if path == "C" {
+ // match cmd/compile (not prescribed by spec)
+ check.error(s.LocalPkgName, `cannot rename import "C"`)
+ continue
+ }
+ }
+
+ if name == "init" {
+ check.error(s, "cannot import package as init - init must be a func")
+ continue
+ }
+
+ // add package to list of explicit imports
+ // (this functionality is provided as a convenience
+ // for clients; it is not needed for type-checking)
+ if !pkgImports[imp] {
+ pkgImports[imp] = true
+ pkg.imports = append(pkg.imports, imp)
+ }
+
+ pkgName := NewPkgName(s.Pos(), pkg, name, imp)
+ if s.LocalPkgName != nil {
+ // in a dot-import, the dot represents the package
+ check.recordDef(s.LocalPkgName, pkgName)
+ } else {
+ check.recordImplicit(s, pkgName)
+ }
+
+ if path == "C" {
+ // match cmd/compile (not prescribed by spec)
+ pkgName.used = true
+ }
+
+ // add import to file scope
+ check.imports = append(check.imports, pkgName)
+ if name == "." {
+ // dot-import
+ if check.dotImportMap == nil {
+ check.dotImportMap = make(map[dotImportKey]*PkgName)
+ }
+ // merge imported scope with file scope
+ for name, obj := range imp.scope.elems {
+ // Note: Avoid eager resolve(name, obj) here, so we only
+ // resolve dot-imported objects as needed.
+
+ // A package scope may contain non-exported objects,
+ // do not import them!
+ if isExported(name) {
+ // declare dot-imported object
+ // (Do not use check.declare because it modifies the object
+ // via Object.setScopePos, which leads to a race condition;
+ // the object may be imported into more than one file scope
+ // concurrently. See issue #32154.)
+ if alt := fileScope.Lookup(name); alt != nil {
+ var err error_
+ err.errorf(s.LocalPkgName, "%s redeclared in this block", alt.Name())
+ err.recordAltDecl(alt)
+ check.report(&err)
+ } else {
+ fileScope.insert(name, obj)
+ check.dotImportMap[dotImportKey{fileScope, name}] = pkgName
+ }
+ }
+ }
+ } else {
+ // declare imported package object in file scope
+ // (no need to provide s.LocalPkgName since we called check.recordDef earlier)
+ check.declare(fileScope, nil, pkgName, nopos)
+ }
+
+ case *syntax.ConstDecl:
+ // iota is the index of the current constDecl within the group
+ if first < 0 || s.Group == nil || file.DeclList[index-1].(*syntax.ConstDecl).Group != s.Group {
+ first = index
+ last = nil
+ }
+ iota := constant.MakeInt64(int64(index - first))
+
+ // determine which initialization expressions to use
+ inherited := true
+ switch {
+ case s.Type != nil || s.Values != nil:
+ last = s
+ inherited = false
+ case last == nil:
+ last = new(syntax.ConstDecl) // make sure last exists
+ inherited = false
+ }
+
+ // declare all constants
+ values := unpackExpr(last.Values)
+ for i, name := range s.NameList {
+ obj := NewConst(name.Pos(), pkg, name.Value, nil, iota)
+
+ var init syntax.Expr
+ if i < len(values) {
+ init = values[i]
+ }
+
+ d := &declInfo{file: fileScope, vtyp: last.Type, init: init, inherited: inherited}
+ check.declarePkgObj(name, obj, d)
+ }
+
+ // Constants must always have init values.
+ check.arity(s.Pos(), s.NameList, values, true, inherited)
+
+ case *syntax.VarDecl:
+ lhs := make([]*Var, len(s.NameList))
+ // If there's exactly one rhs initializer, use
+ // the same declInfo d1 for all lhs variables
+ // so that each lhs variable depends on the same
+ // rhs initializer (n:1 var declaration).
+ var d1 *declInfo
+ if _, ok := s.Values.(*syntax.ListExpr); !ok {
+ // The lhs elements are only set up after the for loop below,
+ // but that's ok because declarePkgObj only collects the declInfo
+ // for a later phase.
+ d1 = &declInfo{file: fileScope, lhs: lhs, vtyp: s.Type, init: s.Values}
+ }
+
+ // declare all variables
+ values := unpackExpr(s.Values)
+ for i, name := range s.NameList {
+ obj := NewVar(name.Pos(), pkg, name.Value, nil)
+ lhs[i] = obj
+
+ d := d1
+ if d == nil {
+ // individual assignments
+ var init syntax.Expr
+ if i < len(values) {
+ init = values[i]
+ }
+ d = &declInfo{file: fileScope, vtyp: s.Type, init: init}
+ }
+
+ check.declarePkgObj(name, obj, d)
+ }
+
+ // If we have no type, we must have values.
+ if s.Type == nil || values != nil {
+ check.arity(s.Pos(), s.NameList, values, false, false)
+ }
+
+ case *syntax.TypeDecl:
+ if len(s.TParamList) != 0 && !check.allowVersion(pkg, 1, 18) {
+ check.versionErrorf(s.TParamList[0], "go1.18", "type parameter")
+ }
+ obj := NewTypeName(s.Name.Pos(), pkg, s.Name.Value, nil)
+ check.declarePkgObj(s.Name, obj, &declInfo{file: fileScope, tdecl: s})
+
+ case *syntax.FuncDecl:
+ name := s.Name.Value
+ obj := NewFunc(s.Name.Pos(), pkg, name, nil)
+ hasTParamError := false // avoid duplicate type parameter errors
+ if s.Recv == nil {
+ // regular function
+ if name == "init" || name == "main" && pkg.name == "main" {
+ if len(s.TParamList) != 0 {
+ check.softErrorf(s.TParamList[0], "func %s must have no type parameters", name)
+ hasTParamError = true
+ }
+ if t := s.Type; len(t.ParamList) != 0 || len(t.ResultList) != 0 {
+ check.softErrorf(s, "func %s must have no arguments and no return values", name)
+ }
+ }
+ // don't declare init functions in the package scope - they are invisible
+ if name == "init" {
+ obj.parent = pkg.scope
+ check.recordDef(s.Name, obj)
+ // init functions must have a body
+ if s.Body == nil {
+ // TODO(gri) make this error message consistent with the others above
+ check.softErrorf(obj.pos, "missing function body")
+ }
+ } else {
+ check.declare(pkg.scope, s.Name, obj, nopos)
+ }
+ } else {
+ // method
+ // d.Recv != nil
+ ptr, recv, _ := check.unpackRecv(s.Recv.Type, false)
+ // Methods with invalid receiver cannot be associated to a type, and
+ // methods with blank _ names are never found; no need to collect any
+ // of them. They will still be type-checked with all the other functions.
+ if recv != nil && name != "_" {
+ methods = append(methods, methodInfo{obj, ptr, recv})
+ }
+ check.recordDef(s.Name, obj)
+ }
+ if len(s.TParamList) != 0 && !check.allowVersion(pkg, 1, 18) && !hasTParamError {
+ check.versionErrorf(s.TParamList[0], "go1.18", "type parameter")
+ }
+ info := &declInfo{file: fileScope, fdecl: s}
+ // Methods are not package-level objects but we still track them in the
+ // object map so that we can handle them like regular functions (if the
+ // receiver is invalid); also we need their fdecl info when associating
+ // them with their receiver base type, below.
+ check.objMap[obj] = info
+ obj.setOrder(uint32(len(check.objMap)))
+
+ default:
+ check.errorf(s, invalidAST+"unknown syntax.Decl node %T", s)
+ }
+ }
+ }
+
+ // verify that objects in package and file scopes have different names
+ for _, scope := range fileScopes {
+ for name, obj := range scope.elems {
+ if alt := pkg.scope.Lookup(name); alt != nil {
+ obj = resolve(name, obj)
+ var err error_
+ if pkg, ok := obj.(*PkgName); ok {
+ err.errorf(alt, "%s already declared through import of %s", alt.Name(), pkg.Imported())
+ err.recordAltDecl(pkg)
+ } else {
+ err.errorf(alt, "%s already declared through dot-import of %s", alt.Name(), obj.Pkg())
+ // TODO(gri) dot-imported objects don't have a position; recordAltDecl won't print anything
+ err.recordAltDecl(obj)
+ }
+ check.report(&err)
+ }
+ }
+ }
+
+ // Now that we have all package scope objects and all methods,
+ // associate methods with receiver base type name where possible.
+ // Ignore methods that have an invalid receiver. They will be
+ // type-checked later, with regular functions.
+ if methods != nil {
+ check.methods = make(map[*TypeName][]*Func)
+ for i := range methods {
+ m := &methods[i]
+ // Determine the receiver base type and associate m with it.
+ ptr, base := check.resolveBaseTypeName(m.ptr, m.recv)
+ if base != nil {
+ m.obj.hasPtrRecv_ = ptr
+ check.methods[base] = append(check.methods[base], m.obj)
+ }
+ }
+ }
+}
+
+// unpackRecv unpacks a receiver type and returns its components: ptr indicates whether
+// rtyp is a pointer receiver, rname is the receiver type name, and tparams are its
+// type parameters, if any. The type parameters are only unpacked if unpackParams is
+// set. If rname is nil, the receiver is unusable (i.e., the source has a bug which we
+// cannot easily work around).
+func (check *Checker) unpackRecv(rtyp syntax.Expr, unpackParams bool) (ptr bool, rname *syntax.Name, tparams []*syntax.Name) {
+L: // unpack receiver type
+ // This accepts invalid receivers such as ***T and does not
+ // work for other invalid receivers, but we don't care. The
+ // validity of receiver expressions is checked elsewhere.
+ for {
+ switch t := rtyp.(type) {
+ case *syntax.ParenExpr:
+ rtyp = t.X
+ // case *ast.StarExpr:
+ // ptr = true
+ // rtyp = t.X
+ case *syntax.Operation:
+ if t.Op != syntax.Mul || t.Y != nil {
+ break
+ }
+ ptr = true
+ rtyp = t.X
+ default:
+ break L
+ }
+ }
+
+ // unpack type parameters, if any
+ if ptyp, _ := rtyp.(*syntax.IndexExpr); ptyp != nil {
+ rtyp = ptyp.X
+ if unpackParams {
+ for _, arg := range unpackExpr(ptyp.Index) {
+ var par *syntax.Name
+ switch arg := arg.(type) {
+ case *syntax.Name:
+ par = arg
+ case *syntax.BadExpr:
+ // ignore - error already reported by parser
+ case nil:
+ check.error(ptyp, invalidAST+"parameterized receiver contains nil parameters")
+ default:
+ check.errorf(arg, "receiver type parameter %s must be an identifier", arg)
+ }
+ if par == nil {
+ par = syntax.NewName(arg.Pos(), "_")
+ }
+ tparams = append(tparams, par)
+ }
+
+ }
+ }
+
+ // unpack receiver name
+ if name, _ := rtyp.(*syntax.Name); name != nil {
+ rname = name
+ }
+
+ return
+}
+
+// resolveBaseTypeName returns the non-alias base type name for typ, and whether
+// there was a pointer indirection to get to it. The base type name must be declared
+// in package scope, and there can be at most one pointer indirection. If no such type
+// name exists, the returned base is nil.
+func (check *Checker) resolveBaseTypeName(seenPtr bool, typ syntax.Expr) (ptr bool, base *TypeName) {
+ // Algorithm: Starting from a type expression, which may be a name,
+ // we follow that type through alias declarations until we reach a
+ // non-alias type name. If we encounter anything but pointer types or
+ // parentheses we're done. If we encounter more than one pointer type
+ // we're done.
+ ptr = seenPtr
+ var seen map[*TypeName]bool
+ for {
+ typ = unparen(typ)
+
+ // check if we have a pointer type
+ // if pexpr, _ := typ.(*ast.StarExpr); pexpr != nil {
+ if pexpr, _ := typ.(*syntax.Operation); pexpr != nil && pexpr.Op == syntax.Mul && pexpr.Y == nil {
+ // if we've already seen a pointer, we're done
+ if ptr {
+ return false, nil
+ }
+ ptr = true
+ typ = unparen(pexpr.X) // continue with pointer base type
+ }
+
+ // typ must be a name
+ name, _ := typ.(*syntax.Name)
+ if name == nil {
+ return false, nil
+ }
+
+ // name must denote an object found in the current package scope
+ // (note that dot-imported objects are not in the package scope!)
+ obj := check.pkg.scope.Lookup(name.Value)
+ if obj == nil {
+ return false, nil
+ }
+
+ // the object must be a type name...
+ tname, _ := obj.(*TypeName)
+ if tname == nil {
+ return false, nil
+ }
+
+ // ... which we have not seen before
+ if seen[tname] {
+ return false, nil
+ }
+
+ // we're done if tdecl defined tname as a new type
+ // (rather than an alias)
+ tdecl := check.objMap[tname].tdecl // must exist for objects in package scope
+ if !tdecl.Alias {
+ return ptr, tname
+ }
+
+ // otherwise, continue resolving
+ typ = tdecl.Type
+ if seen == nil {
+ seen = make(map[*TypeName]bool)
+ }
+ seen[tname] = true
+ }
+}
+
+// packageObjects typechecks all package objects, but not function bodies.
+func (check *Checker) packageObjects() {
+ // process package objects in source order for reproducible results
+ objList := make([]Object, len(check.objMap))
+ i := 0
+ for obj := range check.objMap {
+ objList[i] = obj
+ i++
+ }
+ sort.Sort(inSourceOrder(objList))
+
+ // add new methods to already type-checked types (from a prior Checker.Files call)
+ for _, obj := range objList {
+ if obj, _ := obj.(*TypeName); obj != nil && obj.typ != nil {
+ check.collectMethods(obj)
+ }
+ }
+
+ // We process non-alias type declarations first, followed by alias declarations,
+ // and then everything else. This appears to avoid most situations where the type
+ // of an alias is needed before it is available.
+ // There may still be cases where this is not good enough (see also issue #25838).
+ // In those cases Checker.ident will report an error ("invalid use of type alias").
+ var aliasList []*TypeName
+ var othersList []Object // everything that's not a type
+ // phase 1: non-alias type declarations
+ for _, obj := range objList {
+ if tname, _ := obj.(*TypeName); tname != nil {
+ if check.objMap[tname].tdecl.Alias {
+ aliasList = append(aliasList, tname)
+ } else {
+ check.objDecl(obj, nil)
+ }
+ } else {
+ othersList = append(othersList, obj)
+ }
+ }
+ // phase 2: alias type declarations
+ for _, obj := range aliasList {
+ check.objDecl(obj, nil)
+ }
+ // phase 3: all other declarations
+ for _, obj := range othersList {
+ check.objDecl(obj, nil)
+ }
+
+ // At this point we may have a non-empty check.methods map; this means that not all
+ // entries were deleted at the end of typeDecl because the respective receiver base
+ // types were not found. In that case, an error was reported when declaring those
+ // methods. We can now safely discard this map.
+ check.methods = nil
+}
+
+// inSourceOrder implements the sort.Sort interface.
+type inSourceOrder []Object
+
+func (a inSourceOrder) Len() int { return len(a) }
+func (a inSourceOrder) Less(i, j int) bool { return a[i].order() < a[j].order() }
+func (a inSourceOrder) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+
+// unusedImports checks for unused imports.
+func (check *Checker) unusedImports() {
+ // if function bodies are not checked, packages' uses are likely missing - don't check
+ if check.conf.IgnoreFuncBodies {
+ return
+ }
+
+ // spec: "It is illegal (...) to directly import a package without referring to
+ // any of its exported identifiers. To import a package solely for its side-effects
+ // (initialization), use the blank identifier as explicit package name."
+
+ for _, obj := range check.imports {
+ if !obj.used && obj.name != "_" {
+ check.errorUnusedPkg(obj)
+ }
+ }
+}
+
+func (check *Checker) errorUnusedPkg(obj *PkgName) {
+ // If the package was imported with a name other than the final
+ // import path element, show it explicitly in the error message.
+ // Note that this handles both renamed imports and imports of
+ // packages containing unconventional package declarations.
+ // Note that this uses / always, even on Windows, because Go import
+ // paths always use forward slashes.
+ path := obj.imported.path
+ elem := path
+ if i := strings.LastIndex(elem, "/"); i >= 0 {
+ elem = elem[i+1:]
+ }
+ if obj.name == "" || obj.name == "." || obj.name == elem {
+ if check.conf.CompilerErrorMessages {
+ check.softErrorf(obj, "imported and not used: %q", path)
+ } else {
+ check.softErrorf(obj, "%q imported but not used", path)
+ }
+ } else {
+ if check.conf.CompilerErrorMessages {
+ check.softErrorf(obj, "imported and not used: %q as %s", path, obj.name)
+ } else {
+ check.softErrorf(obj, "%q imported but not used as %s", path, obj.name)
+ }
+ }
+}
+
+// dir makes a good-faith attempt to return the directory
+// portion of path. If path is empty, the result is ".".
+// (Per the go/build package dependency tests, we cannot import
+// path/filepath and simply use filepath.Dir.)
+func dir(path string) string {
+ if i := strings.LastIndexAny(path, `/\`); i > 0 {
+ return path[:i]
+ }
+ // i <= 0
+ return "."
+}
diff --git a/src/cmd/compile/internal/types2/resolver_test.go b/src/cmd/compile/internal/types2/resolver_test.go
new file mode 100644
index 0000000..a02abce
--- /dev/null
+++ b/src/cmd/compile/internal/types2/resolver_test.go
@@ -0,0 +1,222 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2_test
+
+import (
+ "cmd/compile/internal/syntax"
+ "fmt"
+ "internal/testenv"
+ "sort"
+ "testing"
+
+ . "cmd/compile/internal/types2"
+)
+
+type resolveTestImporter struct {
+ importer ImporterFrom
+ imported map[string]bool
+}
+
+func (imp *resolveTestImporter) Import(string) (*Package, error) {
+ panic("should not be called")
+}
+
+func (imp *resolveTestImporter) ImportFrom(path, srcDir string, mode ImportMode) (*Package, error) {
+ if mode != 0 {
+ panic("mode must be 0")
+ }
+ if imp.importer == nil {
+ imp.importer = defaultImporter().(ImporterFrom)
+ imp.imported = make(map[string]bool)
+ }
+ pkg, err := imp.importer.ImportFrom(path, srcDir, mode)
+ if err != nil {
+ return nil, err
+ }
+ imp.imported[path] = true
+ return pkg, nil
+}
+
+func TestResolveIdents(t *testing.T) {
+ testenv.MustHaveGoBuild(t)
+
+ sources := []string{
+ `
+ package p
+ import "fmt"
+ import "math"
+ const pi = math.Pi
+ func sin(x float64) float64 {
+ return math.Sin(x)
+ }
+ var Println = fmt.Println
+ `,
+ `
+ package p
+ import "fmt"
+ type errorStringer struct { fmt.Stringer; error }
+ func f() string {
+ _ = "foo"
+ return fmt.Sprintf("%d", g())
+ }
+ func g() (x int) { return }
+ `,
+ `
+ package p
+ import . "go/parser"
+ import "sync"
+ func h() Mode { return ImportsOnly }
+ var _, x int = 1, 2
+ func init() {}
+ type T struct{ *sync.Mutex; a, b, c int}
+ type I interface{ m() }
+ var _ = T{a: 1, b: 2, c: 3}
+ func (_ T) m() {}
+ func (T) _() {}
+ var i I
+ var _ = i.m
+ func _(s []int) { for i, x := range s { _, _ = i, x } }
+ func _(x interface{}) {
+ switch x := x.(type) {
+ case int:
+ _ = x
+ }
+ switch {} // implicit 'true' tag
+ }
+ `,
+ `
+ package p
+ type S struct{}
+ func (T) _() {}
+ func (T) _() {}
+ `,
+ `
+ package p
+ func _() {
+ L0:
+ L1:
+ goto L0
+ for {
+ goto L1
+ }
+ if true {
+ goto L2
+ }
+ L2:
+ }
+ `,
+ }
+
+ pkgnames := []string{
+ "fmt",
+ "math",
+ }
+
+ // parse package files
+ var files []*syntax.File
+ for i, src := range sources {
+ f, err := parseSrc(fmt.Sprintf("sources[%d]", i), src)
+ if err != nil {
+ t.Fatal(err)
+ }
+ files = append(files, f)
+ }
+
+ // resolve and type-check package AST
+ importer := new(resolveTestImporter)
+ conf := Config{Importer: importer}
+ uses := make(map[*syntax.Name]Object)
+ defs := make(map[*syntax.Name]Object)
+ _, err := conf.Check("testResolveIdents", files, &Info{Defs: defs, Uses: uses})
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ // check that all packages were imported
+ for _, name := range pkgnames {
+ if !importer.imported[name] {
+ t.Errorf("package %s not imported", name)
+ }
+ }
+
+ // check that qualified identifiers are resolved
+ for _, f := range files {
+ syntax.Crawl(f, func(n syntax.Node) bool {
+ if s, ok := n.(*syntax.SelectorExpr); ok {
+ if x, ok := s.X.(*syntax.Name); ok {
+ obj := uses[x]
+ if obj == nil {
+ t.Errorf("%s: unresolved qualified identifier %s", x.Pos(), x.Value)
+ return true
+ }
+ if _, ok := obj.(*PkgName); ok && uses[s.Sel] == nil {
+ t.Errorf("%s: unresolved selector %s", s.Sel.Pos(), s.Sel.Value)
+ return true
+ }
+ return true
+ }
+ return true
+ }
+ return false
+ })
+ }
+
+ for id, obj := range uses {
+ if obj == nil {
+ t.Errorf("%s: Uses[%s] == nil", id.Pos(), id.Value)
+ }
+ }
+
+ // Check that each identifier in the source is found in uses or defs or both.
+ // We need the foundUses/Defs maps (rather then just deleting the found objects
+ // from the uses and defs maps) because syntax.Walk traverses shared nodes multiple
+ // times (e.g. types in field lists such as "a, b, c int").
+ foundUses := make(map[*syntax.Name]bool)
+ foundDefs := make(map[*syntax.Name]bool)
+ var both []string
+ for _, f := range files {
+ syntax.Crawl(f, func(n syntax.Node) bool {
+ if x, ok := n.(*syntax.Name); ok {
+ var objects int
+ if _, found := uses[x]; found {
+ objects |= 1
+ foundUses[x] = true
+ }
+ if _, found := defs[x]; found {
+ objects |= 2
+ foundDefs[x] = true
+ }
+ switch objects {
+ case 0:
+ t.Errorf("%s: unresolved identifier %s", x.Pos(), x.Value)
+ case 3:
+ both = append(both, x.Value)
+ }
+ return true
+ }
+ return false
+ })
+ }
+
+ // check the expected set of idents that are simultaneously uses and defs
+ sort.Strings(both)
+ if got, want := fmt.Sprint(both), "[Mutex Stringer error]"; got != want {
+ t.Errorf("simultaneous uses/defs = %s, want %s", got, want)
+ }
+
+ // any left-over identifiers didn't exist in the source
+ for x := range uses {
+ if !foundUses[x] {
+ t.Errorf("%s: identifier %s not present in source", x.Pos(), x.Value)
+ }
+ }
+ for x := range defs {
+ if !foundDefs[x] {
+ t.Errorf("%s: identifier %s not present in source", x.Pos(), x.Value)
+ }
+ }
+
+ // TODO(gri) add tests to check ImplicitObj callbacks
+}
diff --git a/src/cmd/compile/internal/types2/return.go b/src/cmd/compile/internal/types2/return.go
new file mode 100644
index 0000000..6c3e184
--- /dev/null
+++ b/src/cmd/compile/internal/types2/return.go
@@ -0,0 +1,184 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements isTerminating.
+
+package types2
+
+import (
+ "cmd/compile/internal/syntax"
+)
+
+// isTerminating reports if s is a terminating statement.
+// If s is labeled, label is the label name; otherwise s
+// is "".
+func (check *Checker) isTerminating(s syntax.Stmt, label string) bool {
+ switch s := s.(type) {
+ default:
+ unreachable()
+
+ case *syntax.DeclStmt, *syntax.EmptyStmt, *syntax.SendStmt,
+ *syntax.AssignStmt, *syntax.CallStmt:
+ // no chance
+
+ case *syntax.LabeledStmt:
+ return check.isTerminating(s.Stmt, s.Label.Value)
+
+ case *syntax.ExprStmt:
+ // calling the predeclared (possibly parenthesized) panic() function is terminating
+ if call, ok := unparen(s.X).(*syntax.CallExpr); ok && check.isPanic[call] {
+ return true
+ }
+
+ case *syntax.ReturnStmt:
+ return true
+
+ case *syntax.BranchStmt:
+ if s.Tok == syntax.Goto || s.Tok == syntax.Fallthrough {
+ return true
+ }
+
+ case *syntax.BlockStmt:
+ return check.isTerminatingList(s.List, "")
+
+ case *syntax.IfStmt:
+ if s.Else != nil &&
+ check.isTerminating(s.Then, "") &&
+ check.isTerminating(s.Else, "") {
+ return true
+ }
+
+ case *syntax.SwitchStmt:
+ return check.isTerminatingSwitch(s.Body, label)
+
+ case *syntax.SelectStmt:
+ for _, cc := range s.Body {
+ if !check.isTerminatingList(cc.Body, "") || hasBreakList(cc.Body, label, true) {
+ return false
+ }
+
+ }
+ return true
+
+ case *syntax.ForStmt:
+ if _, ok := s.Init.(*syntax.RangeClause); ok {
+ // Range clauses guarantee that the loop terminates,
+ // so the loop is not a terminating statement. See issue 49003.
+ break
+ }
+ if s.Cond == nil && !hasBreak(s.Body, label, true) {
+ return true
+ }
+ }
+
+ return false
+}
+
+func (check *Checker) isTerminatingList(list []syntax.Stmt, label string) bool {
+ // trailing empty statements are permitted - skip them
+ for i := len(list) - 1; i >= 0; i-- {
+ if _, ok := list[i].(*syntax.EmptyStmt); !ok {
+ return check.isTerminating(list[i], label)
+ }
+ }
+ return false // all statements are empty
+}
+
+func (check *Checker) isTerminatingSwitch(body []*syntax.CaseClause, label string) bool {
+ hasDefault := false
+ for _, cc := range body {
+ if cc.Cases == nil {
+ hasDefault = true
+ }
+ if !check.isTerminatingList(cc.Body, "") || hasBreakList(cc.Body, label, true) {
+ return false
+ }
+ }
+ return hasDefault
+}
+
+// TODO(gri) For nested breakable statements, the current implementation of hasBreak
+// will traverse the same subtree repeatedly, once for each label. Replace
+// with a single-pass label/break matching phase.
+
+// hasBreak reports if s is or contains a break statement
+// referring to the label-ed statement or implicit-ly the
+// closest outer breakable statement.
+func hasBreak(s syntax.Stmt, label string, implicit bool) bool {
+ switch s := s.(type) {
+ default:
+ unreachable()
+
+ case *syntax.DeclStmt, *syntax.EmptyStmt, *syntax.ExprStmt,
+ *syntax.SendStmt, *syntax.AssignStmt, *syntax.CallStmt,
+ *syntax.ReturnStmt:
+ // no chance
+
+ case *syntax.LabeledStmt:
+ return hasBreak(s.Stmt, label, implicit)
+
+ case *syntax.BranchStmt:
+ if s.Tok == syntax.Break {
+ if s.Label == nil {
+ return implicit
+ }
+ if s.Label.Value == label {
+ return true
+ }
+ }
+
+ case *syntax.BlockStmt:
+ return hasBreakList(s.List, label, implicit)
+
+ case *syntax.IfStmt:
+ if hasBreak(s.Then, label, implicit) ||
+ s.Else != nil && hasBreak(s.Else, label, implicit) {
+ return true
+ }
+
+ case *syntax.SwitchStmt:
+ if label != "" && hasBreakCaseList(s.Body, label, false) {
+ return true
+ }
+
+ case *syntax.SelectStmt:
+ if label != "" && hasBreakCommList(s.Body, label, false) {
+ return true
+ }
+
+ case *syntax.ForStmt:
+ if label != "" && hasBreak(s.Body, label, false) {
+ return true
+ }
+ }
+
+ return false
+}
+
+func hasBreakList(list []syntax.Stmt, label string, implicit bool) bool {
+ for _, s := range list {
+ if hasBreak(s, label, implicit) {
+ return true
+ }
+ }
+ return false
+}
+
+func hasBreakCaseList(list []*syntax.CaseClause, label string, implicit bool) bool {
+ for _, s := range list {
+ if hasBreakList(s.Body, label, implicit) {
+ return true
+ }
+ }
+ return false
+}
+
+func hasBreakCommList(list []*syntax.CommClause, label string, implicit bool) bool {
+ for _, s := range list {
+ if hasBreakList(s.Body, label, implicit) {
+ return true
+ }
+ }
+ return false
+}
diff --git a/src/cmd/compile/internal/types2/scope.go b/src/cmd/compile/internal/types2/scope.go
new file mode 100644
index 0000000..095875d
--- /dev/null
+++ b/src/cmd/compile/internal/types2/scope.go
@@ -0,0 +1,293 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements Scopes.
+
+package types2
+
+import (
+ "bytes"
+ "cmd/compile/internal/syntax"
+ "fmt"
+ "io"
+ "sort"
+ "strings"
+ "sync"
+)
+
+// A Scope maintains a set of objects and links to its containing
+// (parent) and contained (children) scopes. Objects may be inserted
+// and looked up by name. The zero value for Scope is a ready-to-use
+// empty scope.
+type Scope struct {
+ parent *Scope
+ children []*Scope
+ number int // parent.children[number-1] is this scope; 0 if there is no parent
+ elems map[string]Object // lazily allocated
+ pos, end syntax.Pos // scope extent; may be invalid
+ comment string // for debugging only
+ isFunc bool // set if this is a function scope (internal use only)
+}
+
+// NewScope returns a new, empty scope contained in the given parent
+// scope, if any. The comment is for debugging only.
+func NewScope(parent *Scope, pos, end syntax.Pos, comment string) *Scope {
+ s := &Scope{parent, nil, 0, nil, pos, end, comment, false}
+ // don't add children to Universe scope!
+ if parent != nil && parent != Universe {
+ parent.children = append(parent.children, s)
+ s.number = len(parent.children)
+ }
+ return s
+}
+
+// Parent returns the scope's containing (parent) scope.
+func (s *Scope) Parent() *Scope { return s.parent }
+
+// Len returns the number of scope elements.
+func (s *Scope) Len() int { return len(s.elems) }
+
+// Names returns the scope's element names in sorted order.
+func (s *Scope) Names() []string {
+ names := make([]string, len(s.elems))
+ i := 0
+ for name := range s.elems {
+ names[i] = name
+ i++
+ }
+ sort.Strings(names)
+ return names
+}
+
+// NumChildren returns the number of scopes nested in s.
+func (s *Scope) NumChildren() int { return len(s.children) }
+
+// Child returns the i'th child scope for 0 <= i < NumChildren().
+func (s *Scope) Child(i int) *Scope { return s.children[i] }
+
+// Lookup returns the object in scope s with the given name if such an
+// object exists; otherwise the result is nil.
+func (s *Scope) Lookup(name string) Object {
+ return resolve(name, s.elems[name])
+}
+
+// LookupParent follows the parent chain of scopes starting with s until
+// it finds a scope where Lookup(name) returns a non-nil object, and then
+// returns that scope and object. If a valid position pos is provided,
+// only objects that were declared at or before pos are considered.
+// If no such scope and object exists, the result is (nil, nil).
+//
+// Note that obj.Parent() may be different from the returned scope if the
+// object was inserted into the scope and already had a parent at that
+// time (see Insert). This can only happen for dot-imported objects
+// whose scope is the scope of the package that exported them.
+func (s *Scope) LookupParent(name string, pos syntax.Pos) (*Scope, Object) {
+ for ; s != nil; s = s.parent {
+ if obj := s.Lookup(name); obj != nil && (!pos.IsKnown() || obj.scopePos().Cmp(pos) <= 0) {
+ return s, obj
+ }
+ }
+ return nil, nil
+}
+
+// Insert attempts to insert an object obj into scope s.
+// If s already contains an alternative object alt with
+// the same name, Insert leaves s unchanged and returns alt.
+// Otherwise it inserts obj, sets the object's parent scope
+// if not already set, and returns nil.
+func (s *Scope) Insert(obj Object) Object {
+ name := obj.Name()
+ if alt := s.Lookup(name); alt != nil {
+ return alt
+ }
+ s.insert(name, obj)
+ if obj.Parent() == nil {
+ obj.setParent(s)
+ }
+ return nil
+}
+
+// InsertLazy is like Insert, but allows deferring construction of the
+// inserted object until it's accessed with Lookup. The Object
+// returned by resolve must have the same name as given to InsertLazy.
+// If s already contains an alternative object with the same name,
+// InsertLazy leaves s unchanged and returns false. Otherwise it
+// records the binding and returns true. The object's parent scope
+// will be set to s after resolve is called.
+func (s *Scope) InsertLazy(name string, resolve func() Object) bool {
+ if s.elems[name] != nil {
+ return false
+ }
+ s.insert(name, &lazyObject{parent: s, resolve: resolve})
+ return true
+}
+
+func (s *Scope) insert(name string, obj Object) {
+ if s.elems == nil {
+ s.elems = make(map[string]Object)
+ }
+ s.elems[name] = obj
+}
+
+// Squash merges s with its parent scope p by adding all
+// objects of s to p, adding all children of s to the
+// children of p, and removing s from p's children.
+// The function f is called for each object obj in s which
+// has an object alt in p. s should be discarded after
+// having been squashed.
+func (s *Scope) Squash(err func(obj, alt Object)) {
+ p := s.parent
+ assert(p != nil)
+ for name, obj := range s.elems {
+ obj = resolve(name, obj)
+ obj.setParent(nil)
+ if alt := p.Insert(obj); alt != nil {
+ err(obj, alt)
+ }
+ }
+
+ j := -1 // index of s in p.children
+ for i, ch := range p.children {
+ if ch == s {
+ j = i
+ break
+ }
+ }
+ assert(j >= 0)
+ k := len(p.children) - 1
+ p.children[j] = p.children[k]
+ p.children = p.children[:k]
+
+ p.children = append(p.children, s.children...)
+
+ s.children = nil
+ s.elems = nil
+}
+
+// Pos and End describe the scope's source code extent [pos, end).
+// The results are guaranteed to be valid only if the type-checked
+// AST has complete position information. The extent is undefined
+// for Universe and package scopes.
+func (s *Scope) Pos() syntax.Pos { return s.pos }
+func (s *Scope) End() syntax.Pos { return s.end }
+
+// Contains reports whether pos is within the scope's extent.
+// The result is guaranteed to be valid only if the type-checked
+// AST has complete position information.
+func (s *Scope) Contains(pos syntax.Pos) bool {
+ return s.pos.Cmp(pos) <= 0 && pos.Cmp(s.end) < 0
+}
+
+// Innermost returns the innermost (child) scope containing
+// pos. If pos is not within any scope, the result is nil.
+// The result is also nil for the Universe scope.
+// The result is guaranteed to be valid only if the type-checked
+// AST has complete position information.
+func (s *Scope) Innermost(pos syntax.Pos) *Scope {
+ // Package scopes do not have extents since they may be
+ // discontiguous, so iterate over the package's files.
+ if s.parent == Universe {
+ for _, s := range s.children {
+ if inner := s.Innermost(pos); inner != nil {
+ return inner
+ }
+ }
+ }
+
+ if s.Contains(pos) {
+ for _, s := range s.children {
+ if s.Contains(pos) {
+ return s.Innermost(pos)
+ }
+ }
+ return s
+ }
+ return nil
+}
+
+// WriteTo writes a string representation of the scope to w,
+// with the scope elements sorted by name.
+// The level of indentation is controlled by n >= 0, with
+// n == 0 for no indentation.
+// If recurse is set, it also writes nested (children) scopes.
+func (s *Scope) WriteTo(w io.Writer, n int, recurse bool) {
+ const ind = ". "
+ indn := strings.Repeat(ind, n)
+
+ fmt.Fprintf(w, "%s%s scope %p {\n", indn, s.comment, s)
+
+ indn1 := indn + ind
+ for _, name := range s.Names() {
+ fmt.Fprintf(w, "%s%s\n", indn1, s.Lookup(name))
+ }
+
+ if recurse {
+ for _, s := range s.children {
+ s.WriteTo(w, n+1, recurse)
+ }
+ }
+
+ fmt.Fprintf(w, "%s}\n", indn)
+}
+
+// String returns a string representation of the scope, for debugging.
+func (s *Scope) String() string {
+ var buf bytes.Buffer
+ s.WriteTo(&buf, 0, false)
+ return buf.String()
+}
+
+// A lazyObject represents an imported Object that has not been fully
+// resolved yet by its importer.
+type lazyObject struct {
+ parent *Scope
+ resolve func() Object
+ obj Object
+ once sync.Once
+}
+
+// resolve returns the Object represented by obj, resolving lazy
+// objects as appropriate.
+func resolve(name string, obj Object) Object {
+ if lazy, ok := obj.(*lazyObject); ok {
+ lazy.once.Do(func() {
+ obj := lazy.resolve()
+
+ if _, ok := obj.(*lazyObject); ok {
+ panic("recursive lazy object")
+ }
+ if obj.Name() != name {
+ panic("lazy object has unexpected name")
+ }
+
+ if obj.Parent() == nil {
+ obj.setParent(lazy.parent)
+ }
+ lazy.obj = obj
+ })
+
+ obj = lazy.obj
+ }
+ return obj
+}
+
+// stub implementations so *lazyObject implements Object and we can
+// store them directly into Scope.elems.
+func (*lazyObject) Parent() *Scope { panic("unreachable") }
+func (*lazyObject) Pos() syntax.Pos { panic("unreachable") }
+func (*lazyObject) Pkg() *Package { panic("unreachable") }
+func (*lazyObject) Name() string { panic("unreachable") }
+func (*lazyObject) Type() Type { panic("unreachable") }
+func (*lazyObject) Exported() bool { panic("unreachable") }
+func (*lazyObject) Id() string { panic("unreachable") }
+func (*lazyObject) String() string { panic("unreachable") }
+func (*lazyObject) order() uint32 { panic("unreachable") }
+func (*lazyObject) color() color { panic("unreachable") }
+func (*lazyObject) setType(Type) { panic("unreachable") }
+func (*lazyObject) setOrder(uint32) { panic("unreachable") }
+func (*lazyObject) setColor(color color) { panic("unreachable") }
+func (*lazyObject) setParent(*Scope) { panic("unreachable") }
+func (*lazyObject) sameId(pkg *Package, name string) bool { panic("unreachable") }
+func (*lazyObject) scopePos() syntax.Pos { panic("unreachable") }
+func (*lazyObject) setScopePos(pos syntax.Pos) { panic("unreachable") }
diff --git a/src/cmd/compile/internal/types2/selection.go b/src/cmd/compile/internal/types2/selection.go
new file mode 100644
index 0000000..8128aee
--- /dev/null
+++ b/src/cmd/compile/internal/types2/selection.go
@@ -0,0 +1,143 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements Selections.
+
+package types2
+
+import (
+ "bytes"
+ "fmt"
+)
+
+// SelectionKind describes the kind of a selector expression x.f
+// (excluding qualified identifiers).
+type SelectionKind int
+
+const (
+ FieldVal SelectionKind = iota // x.f is a struct field selector
+ MethodVal // x.f is a method selector
+ MethodExpr // x.f is a method expression
+)
+
+// A Selection describes a selector expression x.f.
+// For the declarations:
+//
+// type T struct{ x int; E }
+// type E struct{}
+// func (e E) m() {}
+// var p *T
+//
+// the following relations exist:
+//
+// Selector Kind Recv Obj Type Index Indirect
+//
+// p.x FieldVal T x int {0} true
+// p.m MethodVal *T m func() {1, 0} true
+// T.m MethodExpr T m func(T) {1, 0} false
+//
+type Selection struct {
+ kind SelectionKind
+ recv Type // type of x
+ obj Object // object denoted by x.f
+ index []int // path from x to x.f
+ indirect bool // set if there was any pointer indirection on the path
+}
+
+// Kind returns the selection kind.
+func (s *Selection) Kind() SelectionKind { return s.kind }
+
+// Recv returns the type of x in x.f.
+func (s *Selection) Recv() Type { return s.recv }
+
+// Obj returns the object denoted by x.f; a *Var for
+// a field selection, and a *Func in all other cases.
+func (s *Selection) Obj() Object { return s.obj }
+
+// Type returns the type of x.f, which may be different from the type of f.
+// See Selection for more information.
+func (s *Selection) Type() Type {
+ switch s.kind {
+ case MethodVal:
+ // The type of x.f is a method with its receiver type set
+ // to the type of x.
+ sig := *s.obj.(*Func).typ.(*Signature)
+ recv := *sig.recv
+ recv.typ = s.recv
+ sig.recv = &recv
+ return &sig
+
+ case MethodExpr:
+ // The type of x.f is a function (without receiver)
+ // and an additional first argument with the same type as x.
+ // TODO(gri) Similar code is already in call.go - factor!
+ // TODO(gri) Compute this eagerly to avoid allocations.
+ sig := *s.obj.(*Func).typ.(*Signature)
+ arg0 := *sig.recv
+ sig.recv = nil
+ arg0.typ = s.recv
+ var params []*Var
+ if sig.params != nil {
+ params = sig.params.vars
+ }
+ sig.params = NewTuple(append([]*Var{&arg0}, params...)...)
+ return &sig
+ }
+
+ // In all other cases, the type of x.f is the type of x.
+ return s.obj.Type()
+}
+
+// Index describes the path from x to f in x.f.
+// The last index entry is the field or method index of the type declaring f;
+// either:
+//
+// 1) the list of declared methods of a named type; or
+// 2) the list of methods of an interface type; or
+// 3) the list of fields of a struct type.
+//
+// The earlier index entries are the indices of the embedded fields implicitly
+// traversed to get from (the type of) x to f, starting at embedding depth 0.
+func (s *Selection) Index() []int { return s.index }
+
+// Indirect reports whether any pointer indirection was required to get from
+// x to f in x.f.
+func (s *Selection) Indirect() bool { return s.indirect }
+
+func (s *Selection) String() string { return SelectionString(s, nil) }
+
+// SelectionString returns the string form of s.
+// The Qualifier controls the printing of
+// package-level objects, and may be nil.
+//
+// Examples:
+// "field (T) f int"
+// "method (T) f(X) Y"
+// "method expr (T) f(X) Y"
+//
+func SelectionString(s *Selection, qf Qualifier) string {
+ var k string
+ switch s.kind {
+ case FieldVal:
+ k = "field "
+ case MethodVal:
+ k = "method "
+ case MethodExpr:
+ k = "method expr "
+ default:
+ unreachable()
+ }
+ var buf bytes.Buffer
+ buf.WriteString(k)
+ buf.WriteByte('(')
+ WriteType(&buf, s.Recv(), qf)
+ fmt.Fprintf(&buf, ") %s", s.obj.Name())
+ if T := s.Type(); s.kind == FieldVal {
+ buf.WriteByte(' ')
+ WriteType(&buf, T, qf)
+ } else {
+ WriteSignature(&buf, T.(*Signature), qf)
+ }
+ return buf.String()
+}
diff --git a/src/cmd/compile/internal/types2/self_test.go b/src/cmd/compile/internal/types2/self_test.go
new file mode 100644
index 0000000..9a01ccd
--- /dev/null
+++ b/src/cmd/compile/internal/types2/self_test.go
@@ -0,0 +1,113 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2_test
+
+import (
+ "cmd/compile/internal/syntax"
+ "path"
+ "path/filepath"
+ "runtime"
+ "testing"
+ "time"
+
+ . "cmd/compile/internal/types2"
+)
+
+func TestSelf(t *testing.T) {
+ files, err := pkgFiles(".")
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ conf := Config{Importer: defaultImporter()}
+ _, err = conf.Check("cmd/compile/internal/types2", files, nil)
+ if err != nil {
+ t.Fatal(err)
+ }
+}
+
+func BenchmarkCheck(b *testing.B) {
+ for _, p := range []string{
+ filepath.Join("src", "net", "http"),
+ filepath.Join("src", "go", "parser"),
+ filepath.Join("src", "go", "constant"),
+ filepath.Join("src", "runtime"),
+ filepath.Join("src", "go", "internal", "gcimporter"),
+ } {
+ b.Run(path.Base(p), func(b *testing.B) {
+ path := filepath.Join(runtime.GOROOT(), p)
+ for _, ignoreFuncBodies := range []bool{false, true} {
+ name := "funcbodies"
+ if ignoreFuncBodies {
+ name = "nofuncbodies"
+ }
+ b.Run(name, func(b *testing.B) {
+ b.Run("info", func(b *testing.B) {
+ runbench(b, path, ignoreFuncBodies, true)
+ })
+ b.Run("noinfo", func(b *testing.B) {
+ runbench(b, path, ignoreFuncBodies, false)
+ })
+ })
+ }
+ })
+ }
+}
+
+func runbench(b *testing.B, path string, ignoreFuncBodies, writeInfo bool) {
+ files, err := pkgFiles(path)
+ if err != nil {
+ b.Fatal(err)
+ }
+
+ // determine line count
+ var lines uint
+ for _, f := range files {
+ lines += f.EOF.Line()
+ }
+
+ b.ResetTimer()
+ start := time.Now()
+ for i := 0; i < b.N; i++ {
+ conf := Config{
+ IgnoreFuncBodies: ignoreFuncBodies,
+ Importer: defaultImporter(),
+ }
+ var info *Info
+ if writeInfo {
+ info = &Info{
+ Types: make(map[syntax.Expr]TypeAndValue),
+ Defs: make(map[*syntax.Name]Object),
+ Uses: make(map[*syntax.Name]Object),
+ Implicits: make(map[syntax.Node]Object),
+ Selections: make(map[*syntax.SelectorExpr]*Selection),
+ Scopes: make(map[syntax.Node]*Scope),
+ }
+ }
+ if _, err := conf.Check(path, files, info); err != nil {
+ b.Fatal(err)
+ }
+ }
+ b.StopTimer()
+ b.ReportMetric(float64(lines)*float64(b.N)/time.Since(start).Seconds(), "lines/s")
+}
+
+func pkgFiles(path string) ([]*syntax.File, error) {
+ filenames, err := pkgFilenames(path) // from stdlib_test.go
+ if err != nil {
+ return nil, err
+ }
+
+ var files []*syntax.File
+ for _, filename := range filenames {
+ file, err := syntax.ParseFile(filename, nil, nil, 0)
+ if err != nil {
+ return nil, err
+ }
+ files = append(files, file)
+ }
+
+ return files, nil
+}
diff --git a/src/cmd/compile/internal/types2/signature.go b/src/cmd/compile/internal/types2/signature.go
new file mode 100644
index 0000000..f52f3e2
--- /dev/null
+++ b/src/cmd/compile/internal/types2/signature.go
@@ -0,0 +1,336 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+import (
+ "cmd/compile/internal/syntax"
+ "fmt"
+)
+
+// ----------------------------------------------------------------------------
+// API
+
+// A Signature represents a (non-builtin) function or method type.
+// The receiver is ignored when comparing signatures for identity.
+type Signature struct {
+ // We need to keep the scope in Signature (rather than passing it around
+ // and store it in the Func Object) because when type-checking a function
+ // literal we call the general type checker which returns a general Type.
+ // We then unpack the *Signature and use the scope for the literal body.
+ rparams *TypeParamList // receiver type parameters from left to right, or nil
+ tparams *TypeParamList // type parameters from left to right, or nil
+ scope *Scope // function scope for package-local and non-instantiated signatures; nil otherwise
+ recv *Var // nil if not a method
+ params *Tuple // (incoming) parameters from left to right; or nil
+ results *Tuple // (outgoing) results from left to right; or nil
+ variadic bool // true if the last parameter's type is of the form ...T (or string, for append built-in only)
+}
+
+// NewSignatureType creates a new function type for the given receiver,
+// receiver type parameters, type parameters, parameters, and results. If
+// variadic is set, params must hold at least one parameter and the last
+// parameter's core type must be of unnamed slice or bytestring type.
+// If recv is non-nil, typeParams must be empty. If recvTypeParams is
+// non-empty, recv must be non-nil.
+func NewSignatureType(recv *Var, recvTypeParams, typeParams []*TypeParam, params, results *Tuple, variadic bool) *Signature {
+ if variadic {
+ n := params.Len()
+ if n == 0 {
+ panic("variadic function must have at least one parameter")
+ }
+ core := coreString(params.At(n - 1).typ)
+ if _, ok := core.(*Slice); !ok && !isString(core) {
+ panic(fmt.Sprintf("got %s, want variadic parameter with unnamed slice type or string as core type", core.String()))
+ }
+ }
+ sig := &Signature{recv: recv, params: params, results: results, variadic: variadic}
+ if len(recvTypeParams) != 0 {
+ if recv == nil {
+ panic("function with receiver type parameters must have a receiver")
+ }
+ sig.rparams = bindTParams(recvTypeParams)
+ }
+ if len(typeParams) != 0 {
+ if recv != nil {
+ panic("function with type parameters cannot have a receiver")
+ }
+ sig.tparams = bindTParams(typeParams)
+ }
+ return sig
+}
+
+// Recv returns the receiver of signature s (if a method), or nil if a
+// function. It is ignored when comparing signatures for identity.
+//
+// For an abstract method, Recv returns the enclosing interface either
+// as a *Named or an *Interface. Due to embedding, an interface may
+// contain methods whose receiver type is a different interface.
+func (s *Signature) Recv() *Var { return s.recv }
+
+// TypeParams returns the type parameters of signature s, or nil.
+func (s *Signature) TypeParams() *TypeParamList { return s.tparams }
+
+// SetTypeParams sets the type parameters of signature s.
+func (s *Signature) SetTypeParams(tparams []*TypeParam) { s.tparams = bindTParams(tparams) }
+
+// RecvTypeParams returns the receiver type parameters of signature s, or nil.
+func (s *Signature) RecvTypeParams() *TypeParamList { return s.rparams }
+
+// Params returns the parameters of signature s, or nil.
+func (s *Signature) Params() *Tuple { return s.params }
+
+// Results returns the results of signature s, or nil.
+func (s *Signature) Results() *Tuple { return s.results }
+
+// Variadic reports whether the signature s is variadic.
+func (s *Signature) Variadic() bool { return s.variadic }
+
+func (s *Signature) Underlying() Type { return s }
+func (s *Signature) String() string { return TypeString(s, nil) }
+
+// ----------------------------------------------------------------------------
+// Implementation
+
+// funcType type-checks a function or method type.
+func (check *Checker) funcType(sig *Signature, recvPar *syntax.Field, tparams []*syntax.Field, ftyp *syntax.FuncType) {
+ check.openScope(ftyp, "function")
+ check.scope.isFunc = true
+ check.recordScope(ftyp, check.scope)
+ sig.scope = check.scope
+ defer check.closeScope()
+
+ if recvPar != nil {
+ // collect generic receiver type parameters, if any
+ // - a receiver type parameter is like any other type parameter, except that it is declared implicitly
+ // - the receiver specification acts as local declaration for its type parameters, which may be blank
+ _, rname, rparams := check.unpackRecv(recvPar.Type, true)
+ if len(rparams) > 0 {
+ tparams := make([]*TypeParam, len(rparams))
+ for i, rparam := range rparams {
+ tparams[i] = check.declareTypeParam(rparam)
+ }
+ sig.rparams = bindTParams(tparams)
+ // Blank identifiers don't get declared, so naive type-checking of the
+ // receiver type expression would fail in Checker.collectParams below,
+ // when Checker.ident cannot resolve the _ to a type.
+ //
+ // Checker.recvTParamMap maps these blank identifiers to their type parameter
+ // types, so that they may be resolved in Checker.ident when they fail
+ // lookup in the scope.
+ for i, p := range rparams {
+ if p.Value == "_" {
+ if check.recvTParamMap == nil {
+ check.recvTParamMap = make(map[*syntax.Name]*TypeParam)
+ }
+ check.recvTParamMap[p] = tparams[i]
+ }
+ }
+ // determine receiver type to get its type parameters
+ // and the respective type parameter bounds
+ var recvTParams []*TypeParam
+ if rname != nil {
+ // recv should be a Named type (otherwise an error is reported elsewhere)
+ // Also: Don't report an error via genericType since it will be reported
+ // again when we type-check the signature.
+ // TODO(gri) maybe the receiver should be marked as invalid instead?
+ if recv, _ := check.genericType(rname, false).(*Named); recv != nil {
+ recvTParams = recv.TypeParams().list()
+ }
+ }
+ // provide type parameter bounds
+ if len(tparams) == len(recvTParams) {
+ smap := makeRenameMap(recvTParams, tparams)
+ for i, tpar := range tparams {
+ recvTPar := recvTParams[i]
+ check.mono.recordCanon(tpar, recvTPar)
+ // recvTPar.bound is (possibly) parameterized in the context of the
+ // receiver type declaration. Substitute parameters for the current
+ // context.
+ tpar.bound = check.subst(tpar.obj.pos, recvTPar.bound, smap, nil)
+ }
+ } else if len(tparams) < len(recvTParams) {
+ // Reporting an error here is a stop-gap measure to avoid crashes in the
+ // compiler when a type parameter/argument cannot be inferred later. It
+ // may lead to follow-on errors (see issues #51339, #51343).
+ // TODO(gri) find a better solution
+ got := measure(len(tparams), "type parameter")
+ check.errorf(recvPar, "got %s, but receiver base type declares %d", got, len(recvTParams))
+ }
+ }
+ }
+
+ if tparams != nil {
+ // The parser will complain about invalid type parameters for methods.
+ check.collectTypeParams(&sig.tparams, tparams)
+ }
+
+ // Value (non-type) parameters' scope starts in the function body. Use a temporary scope for their
+ // declarations and then squash that scope into the parent scope (and report any redeclarations at
+ // that time).
+ scope := NewScope(check.scope, nopos, nopos, "function body (temp. scope)")
+ var recvList []*Var // TODO(gri) remove the need for making a list here
+ if recvPar != nil {
+ recvList, _ = check.collectParams(scope, []*syntax.Field{recvPar}, false) // use rewritten receiver type, if any
+ }
+ params, variadic := check.collectParams(scope, ftyp.ParamList, true)
+ results, _ := check.collectParams(scope, ftyp.ResultList, false)
+ scope.Squash(func(obj, alt Object) {
+ var err error_
+ err.errorf(obj, "%s redeclared in this block", obj.Name())
+ err.recordAltDecl(alt)
+ check.report(&err)
+ })
+
+ if recvPar != nil {
+ // recv parameter list present (may be empty)
+ // spec: "The receiver is specified via an extra parameter section preceding the
+ // method name. That parameter section must declare a single parameter, the receiver."
+ var recv *Var
+ switch len(recvList) {
+ case 0:
+ // error reported by resolver
+ recv = NewParam(nopos, nil, "", Typ[Invalid]) // ignore recv below
+ default:
+ // more than one receiver
+ check.error(recvList[len(recvList)-1].Pos(), "method must have exactly one receiver")
+ fallthrough // continue with first receiver
+ case 1:
+ recv = recvList[0]
+ }
+ sig.recv = recv
+
+ // Delay validation of receiver type as it may cause premature expansion
+ // of types the receiver type is dependent on (see issues #51232, #51233).
+ check.later(func() {
+ rtyp, _ := deref(recv.typ)
+
+ // spec: "The receiver type must be of the form T or *T where T is a type name."
+ // (ignore invalid types - error was reported before)
+ if rtyp != Typ[Invalid] {
+ var err string
+ switch T := rtyp.(type) {
+ case *Named:
+ T.resolve(check.bestContext(nil))
+ // The receiver type may be an instantiated type referred to
+ // by an alias (which cannot have receiver parameters for now).
+ if T.TypeArgs() != nil && sig.RecvTypeParams() == nil {
+ check.errorf(recv.pos, "cannot define methods on instantiated type %s", recv.typ)
+ break
+ }
+ // spec: "The type denoted by T is called the receiver base type; it must not
+ // be a pointer or interface type and it must be declared in the same package
+ // as the method."
+ if T.obj.pkg != check.pkg {
+ err = "type not defined in this package"
+ if check.conf.CompilerErrorMessages {
+ check.errorf(recv.pos, "cannot define new methods on non-local type %s", recv.typ)
+ err = ""
+ }
+ } else {
+ // The underlying type of a receiver base type can be a type parameter;
+ // e.g. for methods with a generic receiver T[P] with type T[P any] P.
+ // TODO(gri) Such declarations are currently disallowed.
+ // Revisit the need for underIs.
+ underIs(T, func(u Type) bool {
+ switch u := u.(type) {
+ case *Basic:
+ // unsafe.Pointer is treated like a regular pointer
+ if u.kind == UnsafePointer {
+ err = "unsafe.Pointer"
+ return false
+ }
+ case *Pointer, *Interface:
+ err = "pointer or interface type"
+ return false
+ }
+ return true
+ })
+ }
+ case *Basic:
+ err = "basic or unnamed type"
+ if check.conf.CompilerErrorMessages {
+ check.errorf(recv.pos, "cannot define new methods on non-local type %s", recv.typ)
+ err = ""
+ }
+ default:
+ check.errorf(recv.pos, "invalid receiver type %s", recv.typ)
+ }
+ if err != "" {
+ check.errorf(recv.pos, "invalid receiver type %s (%s)", recv.typ, err)
+ }
+ }
+ }).describef(recv, "validate receiver %s", recv)
+ }
+
+ sig.params = NewTuple(params...)
+ sig.results = NewTuple(results...)
+ sig.variadic = variadic
+}
+
+// collectParams declares the parameters of list in scope and returns the corresponding
+// variable list.
+func (check *Checker) collectParams(scope *Scope, list []*syntax.Field, variadicOk bool) (params []*Var, variadic bool) {
+ if list == nil {
+ return
+ }
+
+ var named, anonymous bool
+
+ var typ Type
+ var prev syntax.Expr
+ for i, field := range list {
+ ftype := field.Type
+ // type-check type of grouped fields only once
+ if ftype != prev {
+ prev = ftype
+ if t, _ := ftype.(*syntax.DotsType); t != nil {
+ ftype = t.Elem
+ if variadicOk && i == len(list)-1 {
+ variadic = true
+ } else {
+ check.softErrorf(t, "can only use ... with final parameter in list")
+ // ignore ... and continue
+ }
+ }
+ typ = check.varType(ftype)
+ }
+ // The parser ensures that f.Tag is nil and we don't
+ // care if a constructed AST contains a non-nil tag.
+ if field.Name != nil {
+ // named parameter
+ name := field.Name.Value
+ if name == "" {
+ check.error(field.Name, invalidAST+"anonymous parameter")
+ // ok to continue
+ }
+ par := NewParam(field.Name.Pos(), check.pkg, name, typ)
+ check.declare(scope, field.Name, par, scope.pos)
+ params = append(params, par)
+ named = true
+ } else {
+ // anonymous parameter
+ par := NewParam(field.Pos(), check.pkg, "", typ)
+ check.recordImplicit(field, par)
+ params = append(params, par)
+ anonymous = true
+ }
+ }
+
+ if named && anonymous {
+ check.error(list[0], invalidAST+"list contains both named and anonymous parameters")
+ // ok to continue
+ }
+
+ // For a variadic function, change the last parameter's type from T to []T.
+ // Since we type-checked T rather than ...T, we also need to retro-actively
+ // record the type for ...T.
+ if variadic {
+ last := params[len(params)-1]
+ last.typ = &Slice{elem: last.typ}
+ check.recordTypeAndValue(list[len(list)-1].Type, typexpr, last.typ, nil)
+ }
+
+ return
+}
diff --git a/src/cmd/compile/internal/types2/sizeof_test.go b/src/cmd/compile/internal/types2/sizeof_test.go
new file mode 100644
index 0000000..1402005
--- /dev/null
+++ b/src/cmd/compile/internal/types2/sizeof_test.go
@@ -0,0 +1,64 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+import (
+ "reflect"
+ "testing"
+)
+
+// Signal size changes of important structures.
+
+func TestSizeof(t *testing.T) {
+ const _64bit = ^uint(0)>>32 != 0
+
+ var tests = []struct {
+ val interface{} // type as a value
+ _32bit uintptr // size on 32bit platforms
+ _64bit uintptr // size on 64bit platforms
+ }{
+ // Types
+ {Basic{}, 16, 32},
+ {Array{}, 16, 24},
+ {Slice{}, 8, 16},
+ {Struct{}, 24, 48},
+ {Pointer{}, 8, 16},
+ {Tuple{}, 12, 24},
+ {Signature{}, 28, 56},
+ {Union{}, 12, 24},
+ {Interface{}, 44, 88},
+ {Map{}, 16, 32},
+ {Chan{}, 12, 24},
+ {Named{}, 56, 104},
+ {TypeParam{}, 28, 48},
+ {term{}, 12, 24},
+
+ // Objects
+ {PkgName{}, 64, 104},
+ {Const{}, 64, 104},
+ {TypeName{}, 56, 88},
+ {Var{}, 60, 96},
+ {Func{}, 60, 96},
+ {Label{}, 60, 96},
+ {Builtin{}, 60, 96},
+ {Nil{}, 56, 88},
+
+ // Misc
+ {Scope{}, 60, 104},
+ {Package{}, 40, 80},
+ {_TypeSet{}, 28, 56},
+ }
+
+ for _, test := range tests {
+ got := reflect.TypeOf(test.val).Size()
+ want := test._32bit
+ if _64bit {
+ want = test._64bit
+ }
+ if got != want {
+ t.Errorf("unsafe.Sizeof(%T) = %d, want %d", test.val, got, want)
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/types2/sizes.go b/src/cmd/compile/internal/types2/sizes.go
new file mode 100644
index 0000000..6f98196
--- /dev/null
+++ b/src/cmd/compile/internal/types2/sizes.go
@@ -0,0 +1,273 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements Sizes.
+
+package types2
+
+// Sizes defines the sizing functions for package unsafe.
+type Sizes interface {
+ // Alignof returns the alignment of a variable of type T.
+ // Alignof must implement the alignment guarantees required by the spec.
+ Alignof(T Type) int64
+
+ // Offsetsof returns the offsets of the given struct fields, in bytes.
+ // Offsetsof must implement the offset guarantees required by the spec.
+ Offsetsof(fields []*Var) []int64
+
+ // Sizeof returns the size of a variable of type T.
+ // Sizeof must implement the size guarantees required by the spec.
+ Sizeof(T Type) int64
+}
+
+// StdSizes is a convenience type for creating commonly used Sizes.
+// It makes the following simplifying assumptions:
+//
+// - The size of explicitly sized basic types (int16, etc.) is the
+// specified size.
+// - The size of strings and interfaces is 2*WordSize.
+// - The size of slices is 3*WordSize.
+// - The size of an array of n elements corresponds to the size of
+// a struct of n consecutive fields of the array's element type.
+// - The size of a struct is the offset of the last field plus that
+// field's size. As with all element types, if the struct is used
+// in an array its size must first be aligned to a multiple of the
+// struct's alignment.
+// - All other types have size WordSize.
+// - Arrays and structs are aligned per spec definition; all other
+// types are naturally aligned with a maximum alignment MaxAlign.
+//
+// *StdSizes implements Sizes.
+//
+type StdSizes struct {
+ WordSize int64 // word size in bytes - must be >= 4 (32bits)
+ MaxAlign int64 // maximum alignment in bytes - must be >= 1
+}
+
+func (s *StdSizes) Alignof(T Type) int64 {
+ // For arrays and structs, alignment is defined in terms
+ // of alignment of the elements and fields, respectively.
+ switch t := under(T).(type) {
+ case *Array:
+ // spec: "For a variable x of array type: unsafe.Alignof(x)
+ // is the same as unsafe.Alignof(x[0]), but at least 1."
+ return s.Alignof(t.elem)
+ case *Struct:
+ // spec: "For a variable x of struct type: unsafe.Alignof(x)
+ // is the largest of the values unsafe.Alignof(x.f) for each
+ // field f of x, but at least 1."
+ max := int64(1)
+ for _, f := range t.fields {
+ if a := s.Alignof(f.typ); a > max {
+ max = a
+ }
+ }
+ return max
+ case *Slice, *Interface:
+ // Multiword data structures are effectively structs
+ // in which each element has size WordSize.
+ // Type parameters lead to variable sizes/alignments;
+ // StdSizes.Alignof won't be called for them.
+ assert(!isTypeParam(T))
+ return s.WordSize
+ case *Basic:
+ // Strings are like slices and interfaces.
+ if t.Info()&IsString != 0 {
+ return s.WordSize
+ }
+ case *TypeParam, *Union:
+ unreachable()
+ }
+ a := s.Sizeof(T) // may be 0
+ // spec: "For a variable x of any type: unsafe.Alignof(x) is at least 1."
+ if a < 1 {
+ return 1
+ }
+ // complex{64,128} are aligned like [2]float{32,64}.
+ if isComplex(T) {
+ a /= 2
+ }
+ if a > s.MaxAlign {
+ return s.MaxAlign
+ }
+ return a
+}
+
+func (s *StdSizes) Offsetsof(fields []*Var) []int64 {
+ offsets := make([]int64, len(fields))
+ var o int64
+ for i, f := range fields {
+ a := s.Alignof(f.typ)
+ o = align(o, a)
+ offsets[i] = o
+ o += s.Sizeof(f.typ)
+ }
+ return offsets
+}
+
+var basicSizes = [...]byte{
+ Bool: 1,
+ Int8: 1,
+ Int16: 2,
+ Int32: 4,
+ Int64: 8,
+ Uint8: 1,
+ Uint16: 2,
+ Uint32: 4,
+ Uint64: 8,
+ Float32: 4,
+ Float64: 8,
+ Complex64: 8,
+ Complex128: 16,
+}
+
+func (s *StdSizes) Sizeof(T Type) int64 {
+ switch t := under(T).(type) {
+ case *Basic:
+ assert(isTyped(T))
+ k := t.kind
+ if int(k) < len(basicSizes) {
+ if s := basicSizes[k]; s > 0 {
+ return int64(s)
+ }
+ }
+ if k == String {
+ return s.WordSize * 2
+ }
+ case *Array:
+ n := t.len
+ if n <= 0 {
+ return 0
+ }
+ // n > 0
+ a := s.Alignof(t.elem)
+ z := s.Sizeof(t.elem)
+ return align(z, a)*(n-1) + z
+ case *Slice:
+ return s.WordSize * 3
+ case *Struct:
+ n := t.NumFields()
+ if n == 0 {
+ return 0
+ }
+ offsets := s.Offsetsof(t.fields)
+ return offsets[n-1] + s.Sizeof(t.fields[n-1].typ)
+ case *Interface:
+ // Type parameters lead to variable sizes/alignments;
+ // StdSizes.Sizeof won't be called for them.
+ assert(!isTypeParam(T))
+ return s.WordSize * 2
+ case *TypeParam, *Union:
+ unreachable()
+ }
+ return s.WordSize // catch-all
+}
+
+// common architecture word sizes and alignments
+var gcArchSizes = map[string]*StdSizes{
+ "386": {4, 4},
+ "arm": {4, 4},
+ "arm64": {8, 8},
+ "amd64": {8, 8},
+ "amd64p32": {4, 8},
+ "mips": {4, 4},
+ "mipsle": {4, 4},
+ "mips64": {8, 8},
+ "mips64le": {8, 8},
+ "ppc64": {8, 8},
+ "ppc64le": {8, 8},
+ "riscv64": {8, 8},
+ "s390x": {8, 8},
+ "sparc64": {8, 8},
+ "wasm": {8, 8},
+ // When adding more architectures here,
+ // update the doc string of SizesFor below.
+}
+
+// SizesFor returns the Sizes used by a compiler for an architecture.
+// The result is nil if a compiler/architecture pair is not known.
+//
+// Supported architectures for compiler "gc":
+// "386", "arm", "arm64", "amd64", "amd64p32", "mips", "mipsle",
+// "mips64", "mips64le", "ppc64", "ppc64le", "riscv64", "s390x", "sparc64", "wasm".
+func SizesFor(compiler, arch string) Sizes {
+ var m map[string]*StdSizes
+ switch compiler {
+ case "gc":
+ m = gcArchSizes
+ case "gccgo":
+ m = gccgoArchSizes
+ default:
+ return nil
+ }
+ s, ok := m[arch]
+ if !ok {
+ return nil
+ }
+ return s
+}
+
+// stdSizes is used if Config.Sizes == nil.
+var stdSizes = SizesFor("gc", "amd64")
+
+func (conf *Config) alignof(T Type) int64 {
+ if s := conf.Sizes; s != nil {
+ if a := s.Alignof(T); a >= 1 {
+ return a
+ }
+ panic("Config.Sizes.Alignof returned an alignment < 1")
+ }
+ return stdSizes.Alignof(T)
+}
+
+func (conf *Config) offsetsof(T *Struct) []int64 {
+ var offsets []int64
+ if T.NumFields() > 0 {
+ // compute offsets on demand
+ if s := conf.Sizes; s != nil {
+ offsets = s.Offsetsof(T.fields)
+ // sanity checks
+ if len(offsets) != T.NumFields() {
+ panic("Config.Sizes.Offsetsof returned the wrong number of offsets")
+ }
+ for _, o := range offsets {
+ if o < 0 {
+ panic("Config.Sizes.Offsetsof returned an offset < 0")
+ }
+ }
+ } else {
+ offsets = stdSizes.Offsetsof(T.fields)
+ }
+ }
+ return offsets
+}
+
+// offsetof returns the offset of the field specified via
+// the index sequence relative to typ. All embedded fields
+// must be structs (rather than pointer to structs).
+func (conf *Config) offsetof(typ Type, index []int) int64 {
+ var o int64
+ for _, i := range index {
+ s := under(typ).(*Struct)
+ o += conf.offsetsof(s)[i]
+ typ = s.fields[i].typ
+ }
+ return o
+}
+
+func (conf *Config) sizeof(T Type) int64 {
+ if s := conf.Sizes; s != nil {
+ if z := s.Sizeof(T); z >= 0 {
+ return z
+ }
+ panic("Config.Sizes.Sizeof returned a size < 0")
+ }
+ return stdSizes.Sizeof(T)
+}
+
+// align returns the smallest y >= x such that y % a == 0.
+func align(x, a int64) int64 {
+ y := x + a - 1
+ return y - y%a
+}
diff --git a/src/cmd/compile/internal/types2/sizes_test.go b/src/cmd/compile/internal/types2/sizes_test.go
new file mode 100644
index 0000000..c9a4942
--- /dev/null
+++ b/src/cmd/compile/internal/types2/sizes_test.go
@@ -0,0 +1,107 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains tests for sizes.
+
+package types2_test
+
+import (
+ "cmd/compile/internal/syntax"
+ "cmd/compile/internal/types2"
+ "testing"
+)
+
+// findStructType typechecks src and returns the first struct type encountered.
+func findStructType(t *testing.T, src string) *types2.Struct {
+ f, err := parseSrc("x.go", src)
+ if err != nil {
+ t.Fatal(err)
+ }
+ info := types2.Info{Types: make(map[syntax.Expr]types2.TypeAndValue)}
+ var conf types2.Config
+ _, err = conf.Check("x", []*syntax.File{f}, &info)
+ if err != nil {
+ t.Fatal(err)
+ }
+ for _, tv := range info.Types {
+ if ts, ok := tv.Type.(*types2.Struct); ok {
+ return ts
+ }
+ }
+ t.Fatalf("failed to find a struct type in src:\n%s\n", src)
+ return nil
+}
+
+// Issue 16316
+func TestMultipleSizeUse(t *testing.T) {
+ const src = `
+package main
+
+type S struct {
+ i int
+ b bool
+ s string
+ n int
+}
+`
+ ts := findStructType(t, src)
+ sizes := types2.StdSizes{WordSize: 4, MaxAlign: 4}
+ if got := sizes.Sizeof(ts); got != 20 {
+ t.Errorf("Sizeof(%v) with WordSize 4 = %d want 20", ts, got)
+ }
+ sizes = types2.StdSizes{WordSize: 8, MaxAlign: 8}
+ if got := sizes.Sizeof(ts); got != 40 {
+ t.Errorf("Sizeof(%v) with WordSize 8 = %d want 40", ts, got)
+ }
+}
+
+// Issue 16464
+func TestAlignofNaclSlice(t *testing.T) {
+ const src = `
+package main
+
+var s struct {
+ x *int
+ y []byte
+}
+`
+ ts := findStructType(t, src)
+ sizes := &types2.StdSizes{WordSize: 4, MaxAlign: 8}
+ var fields []*types2.Var
+ // Make a copy manually :(
+ for i := 0; i < ts.NumFields(); i++ {
+ fields = append(fields, ts.Field(i))
+ }
+ offsets := sizes.Offsetsof(fields)
+ if offsets[0] != 0 || offsets[1] != 4 {
+ t.Errorf("OffsetsOf(%v) = %v want %v", ts, offsets, []int{0, 4})
+ }
+}
+
+func TestIssue16902(t *testing.T) {
+ const src = `
+package a
+
+import "unsafe"
+
+const _ = unsafe.Offsetof(struct{ x int64 }{}.x)
+`
+ f, err := parseSrc("x.go", src)
+ if err != nil {
+ t.Fatal(err)
+ }
+ info := types2.Info{Types: make(map[syntax.Expr]types2.TypeAndValue)}
+ conf := types2.Config{
+ Importer: defaultImporter(),
+ Sizes: &types2.StdSizes{WordSize: 8, MaxAlign: 8},
+ }
+ _, err = conf.Check("x", []*syntax.File{f}, &info)
+ if err != nil {
+ t.Fatal(err)
+ }
+ for _, tv := range info.Types {
+ _ = conf.Sizes.Sizeof(tv.Type)
+ _ = conf.Sizes.Alignof(tv.Type)
+ }
+}
diff --git a/src/cmd/compile/internal/types2/slice.go b/src/cmd/compile/internal/types2/slice.go
new file mode 100644
index 0000000..9c22a6f
--- /dev/null
+++ b/src/cmd/compile/internal/types2/slice.go
@@ -0,0 +1,19 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+// A Slice represents a slice type.
+type Slice struct {
+ elem Type
+}
+
+// NewSlice returns a new slice type for the given element type.
+func NewSlice(elem Type) *Slice { return &Slice{elem: elem} }
+
+// Elem returns the element type of slice s.
+func (s *Slice) Elem() Type { return s.elem }
+
+func (s *Slice) Underlying() Type { return s }
+func (s *Slice) String() string { return TypeString(s, nil) }
diff --git a/src/cmd/compile/internal/types2/stdlib_test.go b/src/cmd/compile/internal/types2/stdlib_test.go
new file mode 100644
index 0000000..551611d
--- /dev/null
+++ b/src/cmd/compile/internal/types2/stdlib_test.go
@@ -0,0 +1,332 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file tests types2.Check by using it to
+// typecheck the standard library and tests.
+
+package types2_test
+
+import (
+ "bytes"
+ "cmd/compile/internal/syntax"
+ "fmt"
+ "go/build"
+ "internal/testenv"
+ "os"
+ "path/filepath"
+ "runtime"
+ "strings"
+ "testing"
+ "time"
+
+ . "cmd/compile/internal/types2"
+)
+
+var stdLibImporter = defaultImporter()
+
+func TestStdlib(t *testing.T) {
+ testenv.MustHaveGoBuild(t)
+
+ pkgCount := 0
+ duration := walkPkgDirs(filepath.Join(runtime.GOROOT(), "src"), func(dir string, filenames []string) {
+ typecheck(t, dir, filenames)
+ pkgCount++
+ }, t.Error)
+
+ if testing.Verbose() {
+ fmt.Println(pkgCount, "packages typechecked in", duration)
+ }
+}
+
+// firstComment returns the contents of the first non-empty comment in
+// the given file, "skip", or the empty string. No matter the present
+// comments, if any of them contains a build tag, the result is always
+// "skip". Only comments within the first 4K of the file are considered.
+// TODO(gri) should only read until we see "package" token.
+func firstComment(filename string) (first string) {
+ f, err := os.Open(filename)
+ if err != nil {
+ return ""
+ }
+ defer f.Close()
+
+ // read at most 4KB
+ var buf [4 << 10]byte
+ n, _ := f.Read(buf[:])
+ src := bytes.NewBuffer(buf[:n])
+
+ // TODO(gri) we need a better way to terminate CommentsDo
+ defer func() {
+ if p := recover(); p != nil {
+ if s, ok := p.(string); ok {
+ first = s
+ }
+ }
+ }()
+
+ syntax.CommentsDo(src, func(_, _ uint, text string) {
+ if text[0] != '/' {
+ return // not a comment
+ }
+
+ // extract comment text
+ if text[1] == '*' {
+ text = text[:len(text)-2]
+ }
+ text = strings.TrimSpace(text[2:])
+
+ if strings.HasPrefix(text, "+build ") {
+ panic("skip")
+ }
+ if first == "" {
+ first = text // text may be "" but that's ok
+ }
+ // continue as we may still see build tags
+ })
+
+ return
+}
+
+func testTestDir(t *testing.T, path string, ignore ...string) {
+ files, err := os.ReadDir(path)
+ if err != nil {
+ t.Fatal(err)
+ }
+
+ excluded := make(map[string]bool)
+ for _, filename := range ignore {
+ excluded[filename] = true
+ }
+
+ for _, f := range files {
+ // filter directory contents
+ if f.IsDir() || !strings.HasSuffix(f.Name(), ".go") || excluded[f.Name()] {
+ continue
+ }
+
+ // get per-file instructions
+ expectErrors := false
+ filename := filepath.Join(path, f.Name())
+ goVersion := ""
+ if comment := firstComment(filename); comment != "" {
+ fields := strings.Fields(comment)
+ switch fields[0] {
+ case "skip", "compiledir":
+ continue // ignore this file
+ case "errorcheck":
+ expectErrors = true
+ for _, arg := range fields[1:] {
+ if arg == "-0" || arg == "-+" || arg == "-std" {
+ // Marked explicitly as not expecting errors (-0),
+ // or marked as compiling runtime/stdlib, which is only done
+ // to trigger runtime/stdlib-only error output.
+ // In both cases, the code should typecheck.
+ expectErrors = false
+ break
+ }
+ const prefix = "-lang="
+ if strings.HasPrefix(arg, prefix) {
+ goVersion = arg[len(prefix):]
+ }
+ }
+ }
+ }
+
+ // parse and type-check file
+ if testing.Verbose() {
+ fmt.Println("\t", filename)
+ }
+ file, err := syntax.ParseFile(filename, nil, nil, 0)
+ if err == nil {
+ conf := Config{GoVersion: goVersion, Importer: stdLibImporter}
+ _, err = conf.Check(filename, []*syntax.File{file}, nil)
+ }
+
+ if expectErrors {
+ if err == nil {
+ t.Errorf("expected errors but found none in %s", filename)
+ }
+ } else {
+ if err != nil {
+ t.Error(err)
+ }
+ }
+ }
+}
+
+func TestStdTest(t *testing.T) {
+ testenv.MustHaveGoBuild(t)
+
+ if testing.Short() && testenv.Builder() == "" {
+ t.Skip("skipping in short mode")
+ }
+
+ testTestDir(t, filepath.Join(runtime.GOROOT(), "test"),
+ "cmplxdivide.go", // also needs file cmplxdivide1.go - ignore
+ "directive.go", // tests compiler rejection of bad directive placement - ignore
+ "directive2.go", // tests compiler rejection of bad directive placement - ignore
+ "embedfunc.go", // tests //go:embed
+ "embedvers.go", // tests //go:embed
+ "linkname2.go", // types2 doesn't check validity of //go:xxx directives
+ "linkname3.go", // types2 doesn't check validity of //go:xxx directives
+ )
+}
+
+func TestStdFixed(t *testing.T) {
+ testenv.MustHaveGoBuild(t)
+
+ if testing.Short() && testenv.Builder() == "" {
+ t.Skip("skipping in short mode")
+ }
+
+ testTestDir(t, filepath.Join(runtime.GOROOT(), "test", "fixedbugs"),
+ "bug248.go", "bug302.go", "bug369.go", // complex test instructions - ignore
+ "issue6889.go", // gc-specific test
+ "issue11362.go", // canonical import path check
+ "issue16369.go", // types2 handles this correctly - not an issue
+ "issue18459.go", // types2 doesn't check validity of //go:xxx directives
+ "issue18882.go", // types2 doesn't check validity of //go:xxx directives
+ "issue20529.go", // types2 does not have constraints on stack size
+ "issue22200.go", // types2 does not have constraints on stack size
+ "issue22200b.go", // types2 does not have constraints on stack size
+ "issue25507.go", // types2 does not have constraints on stack size
+ "issue20780.go", // types2 does not have constraints on stack size
+ "issue42058a.go", // types2 does not have constraints on channel element size
+ "issue42058b.go", // types2 does not have constraints on channel element size
+ "issue48097.go", // go/types doesn't check validity of //go:xxx directives, and non-init bodyless function
+ "issue48230.go", // go/types doesn't check validity of //go:xxx directives
+ "issue49767.go", // go/types does not have constraints on channel element size
+ "issue49814.go", // go/types does not have constraints on array size
+ )
+}
+
+func TestStdKen(t *testing.T) {
+ testenv.MustHaveGoBuild(t)
+
+ testTestDir(t, filepath.Join(runtime.GOROOT(), "test", "ken"))
+}
+
+// Package paths of excluded packages.
+var excluded = map[string]bool{
+ "builtin": true,
+
+ // See #46027: some imports are missing for this submodule.
+ "crypto/ed25519/internal/edwards25519/field/_asm": true,
+}
+
+// typecheck typechecks the given package files.
+func typecheck(t *testing.T, path string, filenames []string) {
+ // parse package files
+ var files []*syntax.File
+ for _, filename := range filenames {
+ errh := func(err error) { t.Error(err) }
+ file, err := syntax.ParseFile(filename, errh, nil, syntax.AllowGenerics)
+ if err != nil {
+ return
+ }
+
+ if testing.Verbose() {
+ if len(files) == 0 {
+ fmt.Println("package", file.PkgName.Value)
+ }
+ fmt.Println("\t", filename)
+ }
+
+ files = append(files, file)
+ }
+
+ // typecheck package files
+ conf := Config{
+ Error: func(err error) { t.Error(err) },
+ Importer: stdLibImporter,
+ }
+ info := Info{Uses: make(map[*syntax.Name]Object)}
+ conf.Check(path, files, &info)
+
+ // Perform checks of API invariants.
+
+ // All Objects have a package, except predeclared ones.
+ errorError := Universe.Lookup("error").Type().Underlying().(*Interface).ExplicitMethod(0) // (error).Error
+ for id, obj := range info.Uses {
+ predeclared := obj == Universe.Lookup(obj.Name()) || obj == errorError
+ if predeclared == (obj.Pkg() != nil) {
+ posn := id.Pos()
+ if predeclared {
+ t.Errorf("%s: predeclared object with package: %s", posn, obj)
+ } else {
+ t.Errorf("%s: user-defined object without package: %s", posn, obj)
+ }
+ }
+ }
+}
+
+// pkgFilenames returns the list of package filenames for the given directory.
+func pkgFilenames(dir string) ([]string, error) {
+ ctxt := build.Default
+ ctxt.CgoEnabled = false
+ pkg, err := ctxt.ImportDir(dir, 0)
+ if err != nil {
+ if _, nogo := err.(*build.NoGoError); nogo {
+ return nil, nil // no *.go files, not an error
+ }
+ return nil, err
+ }
+ if excluded[pkg.ImportPath] {
+ return nil, nil
+ }
+ var filenames []string
+ for _, name := range pkg.GoFiles {
+ filenames = append(filenames, filepath.Join(pkg.Dir, name))
+ }
+ for _, name := range pkg.TestGoFiles {
+ filenames = append(filenames, filepath.Join(pkg.Dir, name))
+ }
+ return filenames, nil
+}
+
+func walkPkgDirs(dir string, pkgh func(dir string, filenames []string), errh func(args ...interface{})) time.Duration {
+ w := walker{time.Now(), 10 * time.Millisecond, pkgh, errh}
+ w.walk(dir)
+ return time.Since(w.start)
+}
+
+type walker struct {
+ start time.Time
+ dmax time.Duration
+ pkgh func(dir string, filenames []string)
+ errh func(args ...interface{})
+}
+
+func (w *walker) walk(dir string) {
+ // limit run time for short tests
+ if testing.Short() && time.Since(w.start) >= w.dmax {
+ return
+ }
+
+ files, err := os.ReadDir(dir)
+ if err != nil {
+ w.errh(err)
+ return
+ }
+
+ // apply pkgh to the files in directory dir
+ // but ignore files directly under $GOROOT/src (might be temporary test files).
+ if dir != filepath.Join(runtime.GOROOT(), "src") {
+ files, err := pkgFilenames(dir)
+ if err != nil {
+ w.errh(err)
+ return
+ }
+ if files != nil {
+ w.pkgh(dir, files)
+ }
+ }
+
+ // traverse subdirectories, but don't walk into testdata
+ for _, f := range files {
+ if f.IsDir() && f.Name() != "testdata" {
+ w.walk(filepath.Join(dir, f.Name()))
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/types2/stmt.go b/src/cmd/compile/internal/types2/stmt.go
new file mode 100644
index 0000000..4c8eac7
--- /dev/null
+++ b/src/cmd/compile/internal/types2/stmt.go
@@ -0,0 +1,962 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements typechecking of statements.
+
+package types2
+
+import (
+ "cmd/compile/internal/syntax"
+ "go/constant"
+ "sort"
+)
+
+func (check *Checker) funcBody(decl *declInfo, name string, sig *Signature, body *syntax.BlockStmt, iota constant.Value) {
+ if check.conf.IgnoreFuncBodies {
+ panic("function body not ignored")
+ }
+
+ if check.conf.Trace {
+ check.trace(body.Pos(), "--- %s: %s", name, sig)
+ defer func() {
+ check.trace(syntax.EndPos(body), "--- <end>")
+ }()
+ }
+
+ // set function scope extent
+ sig.scope.pos = body.Pos()
+ sig.scope.end = syntax.EndPos(body)
+
+ // save/restore current environment and set up function environment
+ // (and use 0 indentation at function start)
+ defer func(env environment, indent int) {
+ check.environment = env
+ check.indent = indent
+ }(check.environment, check.indent)
+ check.environment = environment{
+ decl: decl,
+ scope: sig.scope,
+ iota: iota,
+ sig: sig,
+ }
+ check.indent = 0
+
+ check.stmtList(0, body.List)
+
+ if check.hasLabel && !check.conf.IgnoreLabels {
+ check.labels(body)
+ }
+
+ if sig.results.Len() > 0 && !check.isTerminating(body, "") {
+ check.error(body.Rbrace, "missing return")
+ }
+
+ // spec: "Implementation restriction: A compiler may make it illegal to
+ // declare a variable inside a function body if the variable is never used."
+ check.usage(sig.scope)
+}
+
+func (check *Checker) usage(scope *Scope) {
+ var unused []*Var
+ for name, elem := range scope.elems {
+ elem = resolve(name, elem)
+ if v, _ := elem.(*Var); v != nil && !v.used {
+ unused = append(unused, v)
+ }
+ }
+ sort.Slice(unused, func(i, j int) bool {
+ return unused[i].pos.Cmp(unused[j].pos) < 0
+ })
+ for _, v := range unused {
+ check.softErrorf(v.pos, "%s declared but not used", v.name)
+ }
+
+ for _, scope := range scope.children {
+ // Don't go inside function literal scopes a second time;
+ // they are handled explicitly by funcBody.
+ if !scope.isFunc {
+ check.usage(scope)
+ }
+ }
+}
+
+// stmtContext is a bitset describing which
+// control-flow statements are permissible,
+// and provides additional context information
+// for better error messages.
+type stmtContext uint
+
+const (
+ // permissible control-flow statements
+ breakOk stmtContext = 1 << iota
+ continueOk
+ fallthroughOk
+
+ // additional context information
+ finalSwitchCase
+)
+
+func (check *Checker) simpleStmt(s syntax.Stmt) {
+ if s != nil {
+ check.stmt(0, s)
+ }
+}
+
+func trimTrailingEmptyStmts(list []syntax.Stmt) []syntax.Stmt {
+ for i := len(list); i > 0; i-- {
+ if _, ok := list[i-1].(*syntax.EmptyStmt); !ok {
+ return list[:i]
+ }
+ }
+ return nil
+}
+
+func (check *Checker) stmtList(ctxt stmtContext, list []syntax.Stmt) {
+ ok := ctxt&fallthroughOk != 0
+ inner := ctxt &^ fallthroughOk
+ list = trimTrailingEmptyStmts(list) // trailing empty statements are "invisible" to fallthrough analysis
+ for i, s := range list {
+ inner := inner
+ if ok && i+1 == len(list) {
+ inner |= fallthroughOk
+ }
+ check.stmt(inner, s)
+ }
+}
+
+func (check *Checker) multipleSwitchDefaults(list []*syntax.CaseClause) {
+ var first *syntax.CaseClause
+ for _, c := range list {
+ if c.Cases == nil {
+ if first != nil {
+ check.errorf(c, "multiple defaults (first at %s)", first.Pos())
+ // TODO(gri) probably ok to bail out after first error (and simplify this code)
+ } else {
+ first = c
+ }
+ }
+ }
+}
+
+func (check *Checker) multipleSelectDefaults(list []*syntax.CommClause) {
+ var first *syntax.CommClause
+ for _, c := range list {
+ if c.Comm == nil {
+ if first != nil {
+ check.errorf(c, "multiple defaults (first at %s)", first.Pos())
+ // TODO(gri) probably ok to bail out after first error (and simplify this code)
+ } else {
+ first = c
+ }
+ }
+ }
+}
+
+func (check *Checker) openScope(node syntax.Node, comment string) {
+ check.openScopeUntil(node, syntax.EndPos(node), comment)
+}
+
+func (check *Checker) openScopeUntil(node syntax.Node, end syntax.Pos, comment string) {
+ scope := NewScope(check.scope, node.Pos(), end, comment)
+ check.recordScope(node, scope)
+ check.scope = scope
+}
+
+func (check *Checker) closeScope() {
+ check.scope = check.scope.Parent()
+}
+
+func (check *Checker) suspendedCall(keyword string, call *syntax.CallExpr) {
+ var x operand
+ var msg string
+ switch check.rawExpr(&x, call, nil, false) {
+ case conversion:
+ msg = "requires function call, not conversion"
+ case expression:
+ msg = "discards result of"
+ case statement:
+ return
+ default:
+ unreachable()
+ }
+ check.errorf(&x, "%s %s %s", keyword, msg, &x)
+}
+
+// goVal returns the Go value for val, or nil.
+func goVal(val constant.Value) interface{} {
+ // val should exist, but be conservative and check
+ if val == nil {
+ return nil
+ }
+ // Match implementation restriction of other compilers.
+ // gc only checks duplicates for integer, floating-point
+ // and string values, so only create Go values for these
+ // types.
+ switch val.Kind() {
+ case constant.Int:
+ if x, ok := constant.Int64Val(val); ok {
+ return x
+ }
+ if x, ok := constant.Uint64Val(val); ok {
+ return x
+ }
+ case constant.Float:
+ if x, ok := constant.Float64Val(val); ok {
+ return x
+ }
+ case constant.String:
+ return constant.StringVal(val)
+ }
+ return nil
+}
+
+// A valueMap maps a case value (of a basic Go type) to a list of positions
+// where the same case value appeared, together with the corresponding case
+// types.
+// Since two case values may have the same "underlying" value but different
+// types we need to also check the value's types (e.g., byte(1) vs myByte(1))
+// when the switch expression is of interface type.
+type (
+ valueMap map[interface{}][]valueType // underlying Go value -> valueType
+ valueType struct {
+ pos syntax.Pos
+ typ Type
+ }
+)
+
+func (check *Checker) caseValues(x *operand, values []syntax.Expr, seen valueMap) {
+L:
+ for _, e := range values {
+ var v operand
+ check.expr(&v, e)
+ if x.mode == invalid || v.mode == invalid {
+ continue L
+ }
+ check.convertUntyped(&v, x.typ)
+ if v.mode == invalid {
+ continue L
+ }
+ // Order matters: By comparing v against x, error positions are at the case values.
+ res := v // keep original v unchanged
+ check.comparison(&res, x, syntax.Eql, true)
+ if res.mode == invalid {
+ continue L
+ }
+ if v.mode != constant_ {
+ continue L // we're done
+ }
+ // look for duplicate values
+ if val := goVal(v.val); val != nil {
+ // look for duplicate types for a given value
+ // (quadratic algorithm, but these lists tend to be very short)
+ for _, vt := range seen[val] {
+ if Identical(v.typ, vt.typ) {
+ var err error_
+ err.errorf(&v, "duplicate case %s in expression switch", &v)
+ err.errorf(vt.pos, "previous case")
+ check.report(&err)
+ continue L
+ }
+ }
+ seen[val] = append(seen[val], valueType{v.Pos(), v.typ})
+ }
+ }
+}
+
+// isNil reports whether the expression e denotes the predeclared value nil.
+func (check *Checker) isNil(e syntax.Expr) bool {
+ // The only way to express the nil value is by literally writing nil (possibly in parentheses).
+ if name, _ := unparen(e).(*syntax.Name); name != nil {
+ _, ok := check.lookup(name.Value).(*Nil)
+ return ok
+ }
+ return false
+}
+
+// If the type switch expression is invalid, x is nil.
+func (check *Checker) caseTypes(x *operand, types []syntax.Expr, seen map[Type]syntax.Expr) (T Type) {
+ var dummy operand
+L:
+ for _, e := range types {
+ // The spec allows the value nil instead of a type.
+ if check.isNil(e) {
+ T = nil
+ check.expr(&dummy, e) // run e through expr so we get the usual Info recordings
+ } else {
+ T = check.varType(e)
+ if T == Typ[Invalid] {
+ continue L
+ }
+ }
+ // look for duplicate types
+ // (quadratic algorithm, but type switches tend to be reasonably small)
+ for t, other := range seen {
+ if T == nil && t == nil || T != nil && t != nil && Identical(T, t) {
+ // talk about "case" rather than "type" because of nil case
+ Ts := "nil"
+ if T != nil {
+ Ts = TypeString(T, check.qualifier)
+ }
+ var err error_
+ err.errorf(e, "duplicate case %s in type switch", Ts)
+ err.errorf(other, "previous case")
+ check.report(&err)
+ continue L
+ }
+ }
+ seen[T] = e
+ if x != nil && T != nil {
+ check.typeAssertion(e, x, T, true)
+ }
+ }
+ return
+}
+
+// TODO(gri) Once we are certain that typeHash is correct in all situations, use this version of caseTypes instead.
+// (Currently it may be possible that different types have identical names and import paths due to ImporterFrom.)
+//
+// func (check *Checker) caseTypes(x *operand, xtyp *Interface, types []syntax.Expr, seen map[string]syntax.Expr) (T Type) {
+// var dummy operand
+// L:
+// for _, e := range types {
+// // The spec allows the value nil instead of a type.
+// var hash string
+// if check.isNil(e) {
+// check.expr(&dummy, e) // run e through expr so we get the usual Info recordings
+// T = nil
+// hash = "<nil>" // avoid collision with a type named nil
+// } else {
+// T = check.varType(e)
+// if T == Typ[Invalid] {
+// continue L
+// }
+// hash = typeHash(T, nil)
+// }
+// // look for duplicate types
+// if other := seen[hash]; other != nil {
+// // talk about "case" rather than "type" because of nil case
+// Ts := "nil"
+// if T != nil {
+// Ts = TypeString(T, check.qualifier)
+// }
+// var err error_
+// err.errorf(e, "duplicate case %s in type switch", Ts)
+// err.errorf(other, "previous case")
+// check.report(&err)
+// continue L
+// }
+// seen[hash] = e
+// if T != nil {
+// check.typeAssertion(e, x, xtyp, T, true)
+// }
+// }
+// return
+// }
+
+// stmt typechecks statement s.
+func (check *Checker) stmt(ctxt stmtContext, s syntax.Stmt) {
+ // statements must end with the same top scope as they started with
+ if debug {
+ defer func(scope *Scope) {
+ // don't check if code is panicking
+ if p := recover(); p != nil {
+ panic(p)
+ }
+ assert(scope == check.scope)
+ }(check.scope)
+ }
+
+ // process collected function literals before scope changes
+ defer check.processDelayed(len(check.delayed))
+
+ inner := ctxt &^ (fallthroughOk | finalSwitchCase)
+ switch s := s.(type) {
+ case *syntax.EmptyStmt:
+ // ignore
+
+ case *syntax.DeclStmt:
+ check.declStmt(s.DeclList)
+
+ case *syntax.LabeledStmt:
+ check.hasLabel = true
+ check.stmt(ctxt, s.Stmt)
+
+ case *syntax.ExprStmt:
+ // spec: "With the exception of specific built-in functions,
+ // function and method calls and receive operations can appear
+ // in statement context. Such statements may be parenthesized."
+ var x operand
+ kind := check.rawExpr(&x, s.X, nil, false)
+ var msg string
+ switch x.mode {
+ default:
+ if kind == statement {
+ return
+ }
+ msg = "is not used"
+ case builtin:
+ msg = "must be called"
+ case typexpr:
+ msg = "is not an expression"
+ }
+ check.errorf(&x, "%s %s", &x, msg)
+
+ case *syntax.SendStmt:
+ var ch, val operand
+ check.expr(&ch, s.Chan)
+ check.expr(&val, s.Value)
+ if ch.mode == invalid || val.mode == invalid {
+ return
+ }
+ u := coreType(ch.typ)
+ if u == nil {
+ check.errorf(s, invalidOp+"cannot send to %s: no core type", &ch)
+ return
+ }
+ uch, _ := u.(*Chan)
+ if uch == nil {
+ check.errorf(s, invalidOp+"cannot send to non-channel %s", &ch)
+ return
+ }
+ if uch.dir == RecvOnly {
+ check.errorf(s, invalidOp+"cannot send to receive-only channel %s", &ch)
+ return
+ }
+ check.assignment(&val, uch.elem, "send")
+
+ case *syntax.AssignStmt:
+ lhs := unpackExpr(s.Lhs)
+ if s.Rhs == nil {
+ // x++ or x--
+ if len(lhs) != 1 {
+ check.errorf(s, invalidAST+"%s%s requires one operand", s.Op, s.Op)
+ return
+ }
+ var x operand
+ check.expr(&x, lhs[0])
+ if x.mode == invalid {
+ return
+ }
+ if !allNumeric(x.typ) {
+ check.errorf(lhs[0], invalidOp+"%s%s%s (non-numeric type %s)", lhs[0], s.Op, s.Op, x.typ)
+ return
+ }
+ check.assignVar(lhs[0], &x)
+ return
+ }
+
+ rhs := unpackExpr(s.Rhs)
+ switch s.Op {
+ case 0:
+ check.assignVars(lhs, rhs)
+ return
+ case syntax.Def:
+ check.shortVarDecl(s.Pos(), lhs, rhs)
+ return
+ }
+
+ // assignment operations
+ if len(lhs) != 1 || len(rhs) != 1 {
+ check.errorf(s, "assignment operation %s requires single-valued expressions", s.Op)
+ return
+ }
+
+ var x operand
+ check.binary(&x, nil, lhs[0], rhs[0], s.Op)
+ check.assignVar(lhs[0], &x)
+
+ case *syntax.CallStmt:
+ kind := "go"
+ if s.Tok == syntax.Defer {
+ kind = "defer"
+ }
+ check.suspendedCall(kind, s.Call)
+
+ case *syntax.ReturnStmt:
+ res := check.sig.results
+ // Return with implicit results allowed for function with named results.
+ // (If one is named, all are named.)
+ results := unpackExpr(s.Results)
+ if len(results) == 0 && res.Len() > 0 && res.vars[0].name != "" {
+ // spec: "Implementation restriction: A compiler may disallow an empty expression
+ // list in a "return" statement if a different entity (constant, type, or variable)
+ // with the same name as a result parameter is in scope at the place of the return."
+ for _, obj := range res.vars {
+ if alt := check.lookup(obj.name); alt != nil && alt != obj {
+ var err error_
+ err.errorf(s, "result parameter %s not in scope at return", obj.name)
+ err.errorf(alt, "inner declaration of %s", obj)
+ check.report(&err)
+ // ok to continue
+ }
+ }
+ } else {
+ var lhs []*Var
+ if res.Len() > 0 {
+ lhs = res.vars
+ }
+ check.initVars(lhs, results, s)
+ }
+
+ case *syntax.BranchStmt:
+ if s.Label != nil {
+ check.hasLabel = true
+ break // checked in 2nd pass (check.labels)
+ }
+ switch s.Tok {
+ case syntax.Break:
+ if ctxt&breakOk == 0 {
+ if check.conf.CompilerErrorMessages {
+ check.error(s, "break is not in a loop, switch, or select statement")
+ } else {
+ check.error(s, "break not in for, switch, or select statement")
+ }
+ }
+ case syntax.Continue:
+ if ctxt&continueOk == 0 {
+ if check.conf.CompilerErrorMessages {
+ check.error(s, "continue is not in a loop")
+ } else {
+ check.error(s, "continue not in for statement")
+ }
+ }
+ case syntax.Fallthrough:
+ if ctxt&fallthroughOk == 0 {
+ msg := "fallthrough statement out of place"
+ if ctxt&finalSwitchCase != 0 {
+ msg = "cannot fallthrough final case in switch"
+ }
+ check.error(s, msg)
+ }
+ case syntax.Goto:
+ // goto's must have labels, should have been caught above
+ fallthrough
+ default:
+ check.errorf(s, invalidAST+"branch statement: %s", s.Tok)
+ }
+
+ case *syntax.BlockStmt:
+ check.openScope(s, "block")
+ defer check.closeScope()
+
+ check.stmtList(inner, s.List)
+
+ case *syntax.IfStmt:
+ check.openScope(s, "if")
+ defer check.closeScope()
+
+ check.simpleStmt(s.Init)
+ var x operand
+ check.expr(&x, s.Cond)
+ if x.mode != invalid && !allBoolean(x.typ) {
+ check.error(s.Cond, "non-boolean condition in if statement")
+ }
+ check.stmt(inner, s.Then)
+ // The parser produces a correct AST but if it was modified
+ // elsewhere the else branch may be invalid. Check again.
+ switch s.Else.(type) {
+ case nil:
+ // valid or error already reported
+ case *syntax.IfStmt, *syntax.BlockStmt:
+ check.stmt(inner, s.Else)
+ default:
+ check.error(s.Else, "invalid else branch in if statement")
+ }
+
+ case *syntax.SwitchStmt:
+ inner |= breakOk
+ check.openScope(s, "switch")
+ defer check.closeScope()
+
+ check.simpleStmt(s.Init)
+
+ if g, _ := s.Tag.(*syntax.TypeSwitchGuard); g != nil {
+ check.typeSwitchStmt(inner, s, g)
+ } else {
+ check.switchStmt(inner, s)
+ }
+
+ case *syntax.SelectStmt:
+ inner |= breakOk
+
+ check.multipleSelectDefaults(s.Body)
+
+ for i, clause := range s.Body {
+ if clause == nil {
+ continue // error reported before
+ }
+
+ // clause.Comm must be a SendStmt, RecvStmt, or default case
+ valid := false
+ var rhs syntax.Expr // rhs of RecvStmt, or nil
+ switch s := clause.Comm.(type) {
+ case nil, *syntax.SendStmt:
+ valid = true
+ case *syntax.AssignStmt:
+ if _, ok := s.Rhs.(*syntax.ListExpr); !ok {
+ rhs = s.Rhs
+ }
+ case *syntax.ExprStmt:
+ rhs = s.X
+ }
+
+ // if present, rhs must be a receive operation
+ if rhs != nil {
+ if x, _ := unparen(rhs).(*syntax.Operation); x != nil && x.Y == nil && x.Op == syntax.Recv {
+ valid = true
+ }
+ }
+
+ if !valid {
+ check.error(clause.Comm, "select case must be send or receive (possibly with assignment)")
+ continue
+ }
+ end := s.Rbrace
+ if i+1 < len(s.Body) {
+ end = s.Body[i+1].Pos()
+ }
+ check.openScopeUntil(clause, end, "case")
+ if clause.Comm != nil {
+ check.stmt(inner, clause.Comm)
+ }
+ check.stmtList(inner, clause.Body)
+ check.closeScope()
+ }
+
+ case *syntax.ForStmt:
+ inner |= breakOk | continueOk
+
+ if rclause, _ := s.Init.(*syntax.RangeClause); rclause != nil {
+ check.rangeStmt(inner, s, rclause)
+ break
+ }
+
+ check.openScope(s, "for")
+ defer check.closeScope()
+
+ check.simpleStmt(s.Init)
+ if s.Cond != nil {
+ var x operand
+ check.expr(&x, s.Cond)
+ if x.mode != invalid && !allBoolean(x.typ) {
+ check.error(s.Cond, "non-boolean condition in for statement")
+ }
+ }
+ check.simpleStmt(s.Post)
+ // spec: "The init statement may be a short variable
+ // declaration, but the post statement must not."
+ if s, _ := s.Post.(*syntax.AssignStmt); s != nil && s.Op == syntax.Def {
+ // The parser already reported an error.
+ check.use(s.Lhs) // avoid follow-up errors
+ }
+ check.stmt(inner, s.Body)
+
+ default:
+ check.error(s, "invalid statement")
+ }
+}
+
+func (check *Checker) switchStmt(inner stmtContext, s *syntax.SwitchStmt) {
+ // init statement already handled
+
+ var x operand
+ if s.Tag != nil {
+ check.expr(&x, s.Tag)
+ // By checking assignment of x to an invisible temporary
+ // (as a compiler would), we get all the relevant checks.
+ check.assignment(&x, nil, "switch expression")
+ if x.mode != invalid && !Comparable(x.typ) && !hasNil(x.typ) {
+ check.errorf(&x, "cannot switch on %s (%s is not comparable)", &x, x.typ)
+ x.mode = invalid
+ }
+ } else {
+ // spec: "A missing switch expression is
+ // equivalent to the boolean value true."
+ x.mode = constant_
+ x.typ = Typ[Bool]
+ x.val = constant.MakeBool(true)
+ // TODO(gri) should have a better position here
+ pos := s.Rbrace
+ if len(s.Body) > 0 {
+ pos = s.Body[0].Pos()
+ }
+ x.expr = syntax.NewName(pos, "true")
+ }
+
+ check.multipleSwitchDefaults(s.Body)
+
+ seen := make(valueMap) // map of seen case values to positions and types
+ for i, clause := range s.Body {
+ if clause == nil {
+ check.error(clause, invalidAST+"incorrect expression switch case")
+ continue
+ }
+ end := s.Rbrace
+ inner := inner
+ if i+1 < len(s.Body) {
+ end = s.Body[i+1].Pos()
+ inner |= fallthroughOk
+ } else {
+ inner |= finalSwitchCase
+ }
+ check.caseValues(&x, unpackExpr(clause.Cases), seen)
+ check.openScopeUntil(clause, end, "case")
+ check.stmtList(inner, clause.Body)
+ check.closeScope()
+ }
+}
+
+func (check *Checker) typeSwitchStmt(inner stmtContext, s *syntax.SwitchStmt, guard *syntax.TypeSwitchGuard) {
+ // init statement already handled
+
+ // A type switch guard must be of the form:
+ //
+ // TypeSwitchGuard = [ identifier ":=" ] PrimaryExpr "." "(" "type" ")" .
+ // \__lhs__/ \___rhs___/
+
+ // check lhs, if any
+ lhs := guard.Lhs
+ if lhs != nil {
+ if lhs.Value == "_" {
+ // _ := x.(type) is an invalid short variable declaration
+ check.softErrorf(lhs, "no new variable on left side of :=")
+ lhs = nil // avoid declared but not used error below
+ } else {
+ check.recordDef(lhs, nil) // lhs variable is implicitly declared in each cause clause
+ }
+ }
+
+ // check rhs
+ var x operand
+ check.expr(&x, guard.X)
+ if x.mode == invalid {
+ return
+ }
+
+ // TODO(gri) we may want to permit type switches on type parameter values at some point
+ var sx *operand // switch expression against which cases are compared against; nil if invalid
+ if isTypeParam(x.typ) {
+ check.errorf(&x, "cannot use type switch on type parameter value %s", &x)
+ } else {
+ if _, ok := under(x.typ).(*Interface); ok {
+ sx = &x
+ } else {
+ check.errorf(&x, "%s is not an interface", &x)
+ }
+ }
+
+ check.multipleSwitchDefaults(s.Body)
+
+ var lhsVars []*Var // list of implicitly declared lhs variables
+ seen := make(map[Type]syntax.Expr) // map of seen types to positions
+ for i, clause := range s.Body {
+ if clause == nil {
+ check.error(s, invalidAST+"incorrect type switch case")
+ continue
+ }
+ end := s.Rbrace
+ if i+1 < len(s.Body) {
+ end = s.Body[i+1].Pos()
+ }
+ // Check each type in this type switch case.
+ cases := unpackExpr(clause.Cases)
+ T := check.caseTypes(sx, cases, seen)
+ check.openScopeUntil(clause, end, "case")
+ // If lhs exists, declare a corresponding variable in the case-local scope.
+ if lhs != nil {
+ // spec: "The TypeSwitchGuard may include a short variable declaration.
+ // When that form is used, the variable is declared at the beginning of
+ // the implicit block in each clause. In clauses with a case listing
+ // exactly one type, the variable has that type; otherwise, the variable
+ // has the type of the expression in the TypeSwitchGuard."
+ if len(cases) != 1 || T == nil {
+ T = x.typ
+ }
+ obj := NewVar(lhs.Pos(), check.pkg, lhs.Value, T)
+ // TODO(mdempsky): Just use clause.Colon? Why did I even suggest
+ // "at the end of the TypeSwitchCase" in #16794 instead?
+ scopePos := clause.Pos() // for default clause (len(List) == 0)
+ if n := len(cases); n > 0 {
+ scopePos = syntax.EndPos(cases[n-1])
+ }
+ check.declare(check.scope, nil, obj, scopePos)
+ check.recordImplicit(clause, obj)
+ // For the "declared but not used" error, all lhs variables act as
+ // one; i.e., if any one of them is 'used', all of them are 'used'.
+ // Collect them for later analysis.
+ lhsVars = append(lhsVars, obj)
+ }
+ check.stmtList(inner, clause.Body)
+ check.closeScope()
+ }
+
+ // If lhs exists, we must have at least one lhs variable that was used.
+ // (We can't use check.usage because that only looks at one scope; and
+ // we don't want to use the same variable for all scopes and change the
+ // variable type underfoot.)
+ if lhs != nil {
+ var used bool
+ for _, v := range lhsVars {
+ if v.used {
+ used = true
+ }
+ v.used = true // avoid usage error when checking entire function
+ }
+ if !used {
+ check.softErrorf(lhs, "%s declared but not used", lhs.Value)
+ }
+ }
+}
+
+func (check *Checker) rangeStmt(inner stmtContext, s *syntax.ForStmt, rclause *syntax.RangeClause) {
+ // determine lhs, if any
+ sKey := rclause.Lhs // possibly nil
+ var sValue, sExtra syntax.Expr
+ if p, _ := sKey.(*syntax.ListExpr); p != nil {
+ if len(p.ElemList) < 2 {
+ check.error(s, invalidAST+"invalid lhs in range clause")
+ return
+ }
+ // len(p.ElemList) >= 2
+ sKey = p.ElemList[0]
+ sValue = p.ElemList[1]
+ if len(p.ElemList) > 2 {
+ // delay error reporting until we know more
+ sExtra = p.ElemList[2]
+ }
+ }
+
+ // check expression to iterate over
+ var x operand
+ check.expr(&x, rclause.X)
+
+ // determine key/value types
+ var key, val Type
+ if x.mode != invalid {
+ // Ranging over a type parameter is permitted if it has a core type.
+ var cause string
+ u := coreType(x.typ)
+ if t, _ := u.(*Chan); t != nil {
+ if sValue != nil {
+ check.softErrorf(sValue, "range over %s permits only one iteration variable", &x)
+ // ok to continue
+ }
+ if t.dir == SendOnly {
+ cause = "receive from send-only channel"
+ }
+ } else {
+ if sExtra != nil {
+ check.softErrorf(sExtra, "range clause permits at most two iteration variables")
+ // ok to continue
+ }
+ if u == nil {
+ cause = check.sprintf("%s has no core type", x.typ)
+ }
+ }
+ key, val = rangeKeyVal(u)
+ if key == nil || cause != "" {
+ if cause == "" {
+ check.softErrorf(&x, "cannot range over %s", &x)
+ } else {
+ check.softErrorf(&x, "cannot range over %s (%s)", &x, cause)
+ }
+ // ok to continue
+ }
+ }
+
+ // Open the for-statement block scope now, after the range clause.
+ // Iteration variables declared with := need to go in this scope (was issue #51437).
+ check.openScope(s, "range")
+ defer check.closeScope()
+
+ // check assignment to/declaration of iteration variables
+ // (irregular assignment, cannot easily map to existing assignment checks)
+
+ // lhs expressions and initialization value (rhs) types
+ lhs := [2]syntax.Expr{sKey, sValue}
+ rhs := [2]Type{key, val} // key, val may be nil
+
+ if rclause.Def {
+ // short variable declaration
+ var vars []*Var
+ for i, lhs := range lhs {
+ if lhs == nil {
+ continue
+ }
+
+ // determine lhs variable
+ var obj *Var
+ if ident, _ := lhs.(*syntax.Name); ident != nil {
+ // declare new variable
+ name := ident.Value
+ obj = NewVar(ident.Pos(), check.pkg, name, nil)
+ check.recordDef(ident, obj)
+ // _ variables don't count as new variables
+ if name != "_" {
+ vars = append(vars, obj)
+ }
+ } else {
+ check.errorf(lhs, "cannot declare %s", lhs)
+ obj = NewVar(lhs.Pos(), check.pkg, "_", nil) // dummy variable
+ }
+
+ // initialize lhs variable
+ if typ := rhs[i]; typ != nil {
+ x.mode = value
+ x.expr = lhs // we don't have a better rhs expression to use here
+ x.typ = typ
+ check.initVar(obj, &x, "range clause")
+ } else {
+ obj.typ = Typ[Invalid]
+ obj.used = true // don't complain about unused variable
+ }
+ }
+
+ // declare variables
+ if len(vars) > 0 {
+ scopePos := s.Body.Pos()
+ for _, obj := range vars {
+ check.declare(check.scope, nil /* recordDef already called */, obj, scopePos)
+ }
+ } else {
+ check.error(s, "no new variables on left side of :=")
+ }
+ } else {
+ // ordinary assignment
+ for i, lhs := range lhs {
+ if lhs == nil {
+ continue
+ }
+ if typ := rhs[i]; typ != nil {
+ x.mode = value
+ x.expr = lhs // we don't have a better rhs expression to use here
+ x.typ = typ
+ check.assignVar(lhs, &x)
+ }
+ }
+ }
+
+ check.stmt(inner, s.Body)
+}
+
+// rangeKeyVal returns the key and value type produced by a range clause
+// over an expression of type typ. If the range clause is not permitted
+// the results are nil.
+func rangeKeyVal(typ Type) (key, val Type) {
+ switch typ := arrayPtrDeref(typ).(type) {
+ case *Basic:
+ if isString(typ) {
+ return Typ[Int], universeRune // use 'rune' name
+ }
+ case *Array:
+ return Typ[Int], typ.elem
+ case *Slice:
+ return Typ[Int], typ.elem
+ case *Map:
+ return typ.key, typ.elem
+ case *Chan:
+ return typ.elem, Typ[Invalid]
+ }
+ return
+}
diff --git a/src/cmd/compile/internal/types2/struct.go b/src/cmd/compile/internal/types2/struct.go
new file mode 100644
index 0000000..31a3b1a
--- /dev/null
+++ b/src/cmd/compile/internal/types2/struct.go
@@ -0,0 +1,225 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+import (
+ "cmd/compile/internal/syntax"
+ "strconv"
+)
+
+// ----------------------------------------------------------------------------
+// API
+
+// A Struct represents a struct type.
+type Struct struct {
+ fields []*Var // fields != nil indicates the struct is set up (possibly with len(fields) == 0)
+ tags []string // field tags; nil if there are no tags
+}
+
+// NewStruct returns a new struct with the given fields and corresponding field tags.
+// If a field with index i has a tag, tags[i] must be that tag, but len(tags) may be
+// only as long as required to hold the tag with the largest index i. Consequently,
+// if no field has a tag, tags may be nil.
+func NewStruct(fields []*Var, tags []string) *Struct {
+ var fset objset
+ for _, f := range fields {
+ if f.name != "_" && fset.insert(f) != nil {
+ panic("multiple fields with the same name")
+ }
+ }
+ if len(tags) > len(fields) {
+ panic("more tags than fields")
+ }
+ s := &Struct{fields: fields, tags: tags}
+ s.markComplete()
+ return s
+}
+
+// NumFields returns the number of fields in the struct (including blank and embedded fields).
+func (s *Struct) NumFields() int { return len(s.fields) }
+
+// Field returns the i'th field for 0 <= i < NumFields().
+func (s *Struct) Field(i int) *Var { return s.fields[i] }
+
+// Tag returns the i'th field tag for 0 <= i < NumFields().
+func (s *Struct) Tag(i int) string {
+ if i < len(s.tags) {
+ return s.tags[i]
+ }
+ return ""
+}
+
+func (s *Struct) Underlying() Type { return s }
+func (s *Struct) String() string { return TypeString(s, nil) }
+
+// ----------------------------------------------------------------------------
+// Implementation
+
+func (s *Struct) markComplete() {
+ if s.fields == nil {
+ s.fields = make([]*Var, 0)
+ }
+}
+
+func (check *Checker) structType(styp *Struct, e *syntax.StructType) {
+ if e.FieldList == nil {
+ styp.markComplete()
+ return
+ }
+
+ // struct fields and tags
+ var fields []*Var
+ var tags []string
+
+ // for double-declaration checks
+ var fset objset
+
+ // current field typ and tag
+ var typ Type
+ var tag string
+ add := func(ident *syntax.Name, embedded bool, pos syntax.Pos) {
+ if tag != "" && tags == nil {
+ tags = make([]string, len(fields))
+ }
+ if tags != nil {
+ tags = append(tags, tag)
+ }
+
+ name := ident.Value
+ fld := NewField(pos, check.pkg, name, typ, embedded)
+ // spec: "Within a struct, non-blank field names must be unique."
+ if name == "_" || check.declareInSet(&fset, pos, fld) {
+ fields = append(fields, fld)
+ check.recordDef(ident, fld)
+ }
+ }
+
+ // addInvalid adds an embedded field of invalid type to the struct for
+ // fields with errors; this keeps the number of struct fields in sync
+ // with the source as long as the fields are _ or have different names
+ // (issue #25627).
+ addInvalid := func(ident *syntax.Name, pos syntax.Pos) {
+ typ = Typ[Invalid]
+ tag = ""
+ add(ident, true, pos)
+ }
+
+ var prev syntax.Expr
+ for i, f := range e.FieldList {
+ // Fields declared syntactically with the same type (e.g.: a, b, c T)
+ // share the same type expression. Only check type if it's a new type.
+ if i == 0 || f.Type != prev {
+ typ = check.varType(f.Type)
+ prev = f.Type
+ }
+ tag = ""
+ if i < len(e.TagList) {
+ tag = check.tag(e.TagList[i])
+ }
+ if f.Name != nil {
+ // named field
+ add(f.Name, false, f.Name.Pos())
+ } else {
+ // embedded field
+ // spec: "An embedded type must be specified as a type name T or as a
+ // pointer to a non-interface type name *T, and T itself may not be a
+ // pointer type."
+ pos := syntax.StartPos(f.Type)
+ name := embeddedFieldIdent(f.Type)
+ if name == nil {
+ check.errorf(pos, "invalid embedded field type %s", f.Type)
+ name = &syntax.Name{Value: "_"} // TODO(gri) need to set position to pos
+ addInvalid(name, pos)
+ continue
+ }
+ add(name, true, pos)
+
+ // Because we have a name, typ must be of the form T or *T, where T is the name
+ // of a (named or alias) type, and t (= deref(typ)) must be the type of T.
+ // We must delay this check to the end because we don't want to instantiate
+ // (via under(t)) a possibly incomplete type.
+ embeddedTyp := typ // for closure below
+ embeddedPos := pos
+ check.later(func() {
+ t, isPtr := deref(embeddedTyp)
+ switch u := under(t).(type) {
+ case *Basic:
+ if t == Typ[Invalid] {
+ // error was reported before
+ return
+ }
+ // unsafe.Pointer is treated like a regular pointer
+ if u.kind == UnsafePointer {
+ check.error(embeddedPos, "embedded field type cannot be unsafe.Pointer")
+ }
+ case *Pointer:
+ check.error(embeddedPos, "embedded field type cannot be a pointer")
+ case *Interface:
+ if isTypeParam(t) {
+ check.error(embeddedPos, "embedded field type cannot be a (pointer to a) type parameter")
+ break
+ }
+ if isPtr {
+ check.error(embeddedPos, "embedded field type cannot be a pointer to an interface")
+ }
+ }
+ }).describef(embeddedPos, "check embedded type %s", embeddedTyp)
+ }
+ }
+
+ styp.fields = fields
+ styp.tags = tags
+ styp.markComplete()
+}
+
+func embeddedFieldIdent(e syntax.Expr) *syntax.Name {
+ switch e := e.(type) {
+ case *syntax.Name:
+ return e
+ case *syntax.Operation:
+ if base := ptrBase(e); base != nil {
+ // *T is valid, but **T is not
+ if op, _ := base.(*syntax.Operation); op == nil || ptrBase(op) == nil {
+ return embeddedFieldIdent(e.X)
+ }
+ }
+ case *syntax.SelectorExpr:
+ return e.Sel
+ case *syntax.IndexExpr:
+ return embeddedFieldIdent(e.X)
+ }
+ return nil // invalid embedded field
+}
+
+func (check *Checker) declareInSet(oset *objset, pos syntax.Pos, obj Object) bool {
+ if alt := oset.insert(obj); alt != nil {
+ var err error_
+ err.errorf(pos, "%s redeclared", obj.Name())
+ err.recordAltDecl(alt)
+ check.report(&err)
+ return false
+ }
+ return true
+}
+
+func (check *Checker) tag(t *syntax.BasicLit) string {
+ // If t.Bad, an error was reported during parsing.
+ if t != nil && !t.Bad {
+ if t.Kind == syntax.StringLit {
+ if val, err := strconv.Unquote(t.Value); err == nil {
+ return val
+ }
+ }
+ check.errorf(t, invalidAST+"incorrect tag syntax: %q", t.Value)
+ }
+ return ""
+}
+
+func ptrBase(x *syntax.Operation) syntax.Expr {
+ if x.Op == syntax.Mul && x.Y == nil {
+ return x.X
+ }
+ return nil
+}
diff --git a/src/cmd/compile/internal/types2/subst.go b/src/cmd/compile/internal/types2/subst.go
new file mode 100644
index 0000000..c9e8f96
--- /dev/null
+++ b/src/cmd/compile/internal/types2/subst.go
@@ -0,0 +1,422 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements type parameter substitution.
+
+package types2
+
+import "cmd/compile/internal/syntax"
+
+type substMap map[*TypeParam]Type
+
+// makeSubstMap creates a new substitution map mapping tpars[i] to targs[i].
+// If targs[i] is nil, tpars[i] is not substituted.
+func makeSubstMap(tpars []*TypeParam, targs []Type) substMap {
+ assert(len(tpars) == len(targs))
+ proj := make(substMap, len(tpars))
+ for i, tpar := range tpars {
+ proj[tpar] = targs[i]
+ }
+ return proj
+}
+
+// makeRenameMap is like makeSubstMap, but creates a map used to rename type
+// parameters in from with the type parameters in to.
+func makeRenameMap(from, to []*TypeParam) substMap {
+ assert(len(from) == len(to))
+ proj := make(substMap, len(from))
+ for i, tpar := range from {
+ proj[tpar] = to[i]
+ }
+ return proj
+}
+
+func (m substMap) empty() bool {
+ return len(m) == 0
+}
+
+func (m substMap) lookup(tpar *TypeParam) Type {
+ if t := m[tpar]; t != nil {
+ return t
+ }
+ return tpar
+}
+
+// subst returns the type typ with its type parameters tpars replaced by the
+// corresponding type arguments targs, recursively. subst doesn't modify the
+// incoming type. If a substitution took place, the result type is different
+// from the incoming type.
+//
+// If the given context is non-nil, it is used in lieu of check.Config.Context.
+func (check *Checker) subst(pos syntax.Pos, typ Type, smap substMap, ctxt *Context) Type {
+ if smap.empty() {
+ return typ
+ }
+
+ // common cases
+ switch t := typ.(type) {
+ case *Basic:
+ return typ // nothing to do
+ case *TypeParam:
+ return smap.lookup(t)
+ }
+
+ // general case
+ subst := subster{
+ pos: pos,
+ smap: smap,
+ check: check,
+ ctxt: check.bestContext(ctxt),
+ }
+ return subst.typ(typ)
+}
+
+type subster struct {
+ pos syntax.Pos
+ smap substMap
+ check *Checker // nil if called via Instantiate
+ ctxt *Context
+}
+
+func (subst *subster) typ(typ Type) Type {
+ switch t := typ.(type) {
+ case nil:
+ // Call typOrNil if it's possible that typ is nil.
+ panic("nil typ")
+
+ case *Basic:
+ // nothing to do
+
+ case *Array:
+ elem := subst.typOrNil(t.elem)
+ if elem != t.elem {
+ return &Array{len: t.len, elem: elem}
+ }
+
+ case *Slice:
+ elem := subst.typOrNil(t.elem)
+ if elem != t.elem {
+ return &Slice{elem: elem}
+ }
+
+ case *Struct:
+ if fields, copied := subst.varList(t.fields); copied {
+ s := &Struct{fields: fields, tags: t.tags}
+ s.markComplete()
+ return s
+ }
+
+ case *Pointer:
+ base := subst.typ(t.base)
+ if base != t.base {
+ return &Pointer{base: base}
+ }
+
+ case *Tuple:
+ return subst.tuple(t)
+
+ case *Signature:
+ // Preserve the receiver: it is handled during *Interface and *Named type
+ // substitution.
+ //
+ // Naively doing the substitution here can lead to an infinite recursion in
+ // the case where the receiver is an interface. For example, consider the
+ // following declaration:
+ //
+ // type T[A any] struct { f interface{ m() } }
+ //
+ // In this case, the type of f is an interface that is itself the receiver
+ // type of all of its methods. Because we have no type name to break
+ // cycles, substituting in the recv results in an infinite loop of
+ // recv->interface->recv->interface->...
+ recv := t.recv
+
+ params := subst.tuple(t.params)
+ results := subst.tuple(t.results)
+ if params != t.params || results != t.results {
+ return &Signature{
+ rparams: t.rparams,
+ // TODO(gri) why can't we nil out tparams here, rather than in instantiate?
+ tparams: t.tparams,
+ // instantiated signatures have a nil scope
+ recv: recv,
+ params: params,
+ results: results,
+ variadic: t.variadic,
+ }
+ }
+
+ case *Union:
+ terms, copied := subst.termlist(t.terms)
+ if copied {
+ // term list substitution may introduce duplicate terms (unlikely but possible).
+ // This is ok; lazy type set computation will determine the actual type set
+ // in normal form.
+ return &Union{terms}
+ }
+
+ case *Interface:
+ methods, mcopied := subst.funcList(t.methods)
+ embeddeds, ecopied := subst.typeList(t.embeddeds)
+ if mcopied || ecopied {
+ iface := subst.check.newInterface()
+ iface.embeddeds = embeddeds
+ iface.implicit = t.implicit
+ iface.complete = t.complete
+ // If we've changed the interface type, we may need to replace its
+ // receiver if the receiver type is the original interface. Receivers of
+ // *Named type are replaced during named type expansion.
+ //
+ // Notably, it's possible to reach here and not create a new *Interface,
+ // even though the receiver type may be parameterized. For example:
+ //
+ // type T[P any] interface{ m() }
+ //
+ // In this case the interface will not be substituted here, because its
+ // method signatures do not depend on the type parameter P, but we still
+ // need to create new interface methods to hold the instantiated
+ // receiver. This is handled by expandNamed.
+ iface.methods, _ = replaceRecvType(methods, t, iface)
+ return iface
+ }
+
+ case *Map:
+ key := subst.typ(t.key)
+ elem := subst.typ(t.elem)
+ if key != t.key || elem != t.elem {
+ return &Map{key: key, elem: elem}
+ }
+
+ case *Chan:
+ elem := subst.typ(t.elem)
+ if elem != t.elem {
+ return &Chan{dir: t.dir, elem: elem}
+ }
+
+ case *Named:
+ // dump is for debugging
+ dump := func(string, ...interface{}) {}
+ if subst.check != nil && subst.check.conf.Trace {
+ subst.check.indent++
+ defer func() {
+ subst.check.indent--
+ }()
+ dump = func(format string, args ...interface{}) {
+ subst.check.trace(subst.pos, format, args...)
+ }
+ }
+
+ // subst is called by expandNamed, so in this function we need to be
+ // careful not to call any methods that would cause t to be expanded: doing
+ // so would result in deadlock.
+ //
+ // So we call t.orig.TypeParams() rather than t.TypeParams() here and
+ // below.
+ if t.orig.TypeParams().Len() == 0 {
+ dump(">>> %s is not parameterized", t)
+ return t // type is not parameterized
+ }
+
+ var newTArgs []Type
+ if t.targs.Len() != t.orig.TypeParams().Len() {
+ return Typ[Invalid] // error reported elsewhere
+ }
+
+ // already instantiated
+ dump(">>> %s already instantiated", t)
+ // For each (existing) type argument targ, determine if it needs
+ // to be substituted; i.e., if it is or contains a type parameter
+ // that has a type argument for it.
+ for i, targ := range t.targs.list() {
+ dump(">>> %d targ = %s", i, targ)
+ new_targ := subst.typ(targ)
+ if new_targ != targ {
+ dump(">>> substituted %d targ %s => %s", i, targ, new_targ)
+ if newTArgs == nil {
+ newTArgs = make([]Type, t.orig.TypeParams().Len())
+ copy(newTArgs, t.targs.list())
+ }
+ newTArgs[i] = new_targ
+ }
+ }
+
+ if newTArgs == nil {
+ dump(">>> nothing to substitute in %s", t)
+ return t // nothing to substitute
+ }
+
+ // before creating a new named type, check if we have this one already
+ h := subst.ctxt.instanceHash(t.orig, newTArgs)
+ dump(">>> new type hash: %s", h)
+ if named := subst.ctxt.lookup(h, t.orig, newTArgs); named != nil {
+ dump(">>> found %s", named)
+ return named
+ }
+
+ // Create a new instance and populate the context to avoid endless
+ // recursion. The position used here is irrelevant because validation only
+ // occurs on t (we don't call validType on named), but we use subst.pos to
+ // help with debugging.
+ t.orig.resolve(subst.ctxt)
+ return subst.check.instance(subst.pos, t.orig, newTArgs, subst.ctxt)
+
+ // Note that if we were to expose substitution more generally (not just in
+ // the context of a declaration), we'd have to substitute in
+ // named.underlying as well.
+ //
+ // But this is unnecessary for now.
+
+ case *TypeParam:
+ return subst.smap.lookup(t)
+
+ default:
+ unimplemented()
+ }
+
+ return typ
+}
+
+// typOrNil is like typ but if the argument is nil it is replaced with Typ[Invalid].
+// A nil type may appear in pathological cases such as type T[P any] []func(_ T([]_))
+// where an array/slice element is accessed before it is set up.
+func (subst *subster) typOrNil(typ Type) Type {
+ if typ == nil {
+ return Typ[Invalid]
+ }
+ return subst.typ(typ)
+}
+
+func (subst *subster) var_(v *Var) *Var {
+ if v != nil {
+ if typ := subst.typ(v.typ); typ != v.typ {
+ return substVar(v, typ)
+ }
+ }
+ return v
+}
+
+func substVar(v *Var, typ Type) *Var {
+ copy := *v
+ copy.typ = typ
+ return &copy
+}
+
+func (subst *subster) tuple(t *Tuple) *Tuple {
+ if t != nil {
+ if vars, copied := subst.varList(t.vars); copied {
+ return &Tuple{vars: vars}
+ }
+ }
+ return t
+}
+
+func (subst *subster) varList(in []*Var) (out []*Var, copied bool) {
+ out = in
+ for i, v := range in {
+ if w := subst.var_(v); w != v {
+ if !copied {
+ // first variable that got substituted => allocate new out slice
+ // and copy all variables
+ new := make([]*Var, len(in))
+ copy(new, out)
+ out = new
+ copied = true
+ }
+ out[i] = w
+ }
+ }
+ return
+}
+
+func (subst *subster) func_(f *Func) *Func {
+ if f != nil {
+ if typ := subst.typ(f.typ); typ != f.typ {
+ copy := *f
+ copy.typ = typ
+ return &copy
+ }
+ }
+ return f
+}
+
+func (subst *subster) funcList(in []*Func) (out []*Func, copied bool) {
+ out = in
+ for i, f := range in {
+ if g := subst.func_(f); g != f {
+ if !copied {
+ // first function that got substituted => allocate new out slice
+ // and copy all functions
+ new := make([]*Func, len(in))
+ copy(new, out)
+ out = new
+ copied = true
+ }
+ out[i] = g
+ }
+ }
+ return
+}
+
+func (subst *subster) typeList(in []Type) (out []Type, copied bool) {
+ out = in
+ for i, t := range in {
+ if u := subst.typ(t); u != t {
+ if !copied {
+ // first function that got substituted => allocate new out slice
+ // and copy all functions
+ new := make([]Type, len(in))
+ copy(new, out)
+ out = new
+ copied = true
+ }
+ out[i] = u
+ }
+ }
+ return
+}
+
+func (subst *subster) termlist(in []*Term) (out []*Term, copied bool) {
+ out = in
+ for i, t := range in {
+ if u := subst.typ(t.typ); u != t.typ {
+ if !copied {
+ // first function that got substituted => allocate new out slice
+ // and copy all functions
+ new := make([]*Term, len(in))
+ copy(new, out)
+ out = new
+ copied = true
+ }
+ out[i] = NewTerm(t.tilde, u)
+ }
+ }
+ return
+}
+
+// replaceRecvType updates any function receivers that have type old to have
+// type new. It does not modify the input slice; if modifications are required,
+// the input slice and any affected signatures will be copied before mutating.
+//
+// The resulting out slice contains the updated functions, and copied reports
+// if anything was modified.
+func replaceRecvType(in []*Func, old, new Type) (out []*Func, copied bool) {
+ out = in
+ for i, method := range in {
+ sig := method.Type().(*Signature)
+ if sig.recv != nil && sig.recv.Type() == old {
+ if !copied {
+ // Allocate a new methods slice before mutating for the first time.
+ // This is defensive, as we may share methods across instantiations of
+ // a given interface type if they do not get substituted.
+ out = make([]*Func, len(in))
+ copy(out, in)
+ copied = true
+ }
+ newsig := *sig
+ newsig.recv = substVar(sig.recv, new)
+ out[i] = NewFunc(method.pos, method.pkg, method.name, &newsig)
+ }
+ }
+ return
+}
diff --git a/src/cmd/compile/internal/types2/termlist.go b/src/cmd/compile/internal/types2/termlist.go
new file mode 100644
index 0000000..a0108c4
--- /dev/null
+++ b/src/cmd/compile/internal/types2/termlist.go
@@ -0,0 +1,158 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+import "bytes"
+
+// A termlist represents the type set represented by the union
+// t1 ∪ y2 ∪ ... tn of the type sets of the terms t1 to tn.
+// A termlist is in normal form if all terms are disjoint.
+// termlist operations don't require the operands to be in
+// normal form.
+type termlist []*term
+
+// allTermlist represents the set of all types.
+// It is in normal form.
+var allTermlist = termlist{new(term)}
+
+// String prints the termlist exactly (without normalization).
+func (xl termlist) String() string {
+ if len(xl) == 0 {
+ return "∅"
+ }
+ var buf bytes.Buffer
+ for i, x := range xl {
+ if i > 0 {
+ buf.WriteString(" ∪ ")
+ }
+ buf.WriteString(x.String())
+ }
+ return buf.String()
+}
+
+// isEmpty reports whether the termlist xl represents the empty set of types.
+func (xl termlist) isEmpty() bool {
+ // If there's a non-nil term, the entire list is not empty.
+ // If the termlist is in normal form, this requires at most
+ // one iteration.
+ for _, x := range xl {
+ if x != nil {
+ return false
+ }
+ }
+ return true
+}
+
+// isAll reports whether the termlist xl represents the set of all types.
+func (xl termlist) isAll() bool {
+ // If there's a 𝓤 term, the entire list is 𝓤.
+ // If the termlist is in normal form, this requires at most
+ // one iteration.
+ for _, x := range xl {
+ if x != nil && x.typ == nil {
+ return true
+ }
+ }
+ return false
+}
+
+// norm returns the normal form of xl.
+func (xl termlist) norm() termlist {
+ // Quadratic algorithm, but good enough for now.
+ // TODO(gri) fix asymptotic performance
+ used := make([]bool, len(xl))
+ var rl termlist
+ for i, xi := range xl {
+ if xi == nil || used[i] {
+ continue
+ }
+ for j := i + 1; j < len(xl); j++ {
+ xj := xl[j]
+ if xj == nil || used[j] {
+ continue
+ }
+ if u1, u2 := xi.union(xj); u2 == nil {
+ // If we encounter a 𝓤 term, the entire list is 𝓤.
+ // Exit early.
+ // (Note that this is not just an optimization;
+ // if we continue, we may end up with a 𝓤 term
+ // and other terms and the result would not be
+ // in normal form.)
+ if u1.typ == nil {
+ return allTermlist
+ }
+ xi = u1
+ used[j] = true // xj is now unioned into xi - ignore it in future iterations
+ }
+ }
+ rl = append(rl, xi)
+ }
+ return rl
+}
+
+// union returns the union xl ∪ yl.
+func (xl termlist) union(yl termlist) termlist {
+ return append(xl, yl...).norm()
+}
+
+// intersect returns the intersection xl ∩ yl.
+func (xl termlist) intersect(yl termlist) termlist {
+ if xl.isEmpty() || yl.isEmpty() {
+ return nil
+ }
+
+ // Quadratic algorithm, but good enough for now.
+ // TODO(gri) fix asymptotic performance
+ var rl termlist
+ for _, x := range xl {
+ for _, y := range yl {
+ if r := x.intersect(y); r != nil {
+ rl = append(rl, r)
+ }
+ }
+ }
+ return rl.norm()
+}
+
+// equal reports whether xl and yl represent the same type set.
+func (xl termlist) equal(yl termlist) bool {
+ // TODO(gri) this should be more efficient
+ return xl.subsetOf(yl) && yl.subsetOf(xl)
+}
+
+// includes reports whether t ∈ xl.
+func (xl termlist) includes(t Type) bool {
+ for _, x := range xl {
+ if x.includes(t) {
+ return true
+ }
+ }
+ return false
+}
+
+// supersetOf reports whether y ⊆ xl.
+func (xl termlist) supersetOf(y *term) bool {
+ for _, x := range xl {
+ if y.subsetOf(x) {
+ return true
+ }
+ }
+ return false
+}
+
+// subsetOf reports whether xl ⊆ yl.
+func (xl termlist) subsetOf(yl termlist) bool {
+ if yl.isEmpty() {
+ return xl.isEmpty()
+ }
+
+ // each term x of xl must be a subset of yl
+ for _, x := range xl {
+ if !yl.supersetOf(x) {
+ return false // x is not a subset yl
+ }
+ }
+ return true
+}
diff --git a/src/cmd/compile/internal/types2/termlist_test.go b/src/cmd/compile/internal/types2/termlist_test.go
new file mode 100644
index 0000000..d1e3bdf
--- /dev/null
+++ b/src/cmd/compile/internal/types2/termlist_test.go
@@ -0,0 +1,284 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+import (
+ "strings"
+ "testing"
+)
+
+// maketl makes a term list from a string of the term list.
+func maketl(s string) termlist {
+ s = strings.ReplaceAll(s, " ", "")
+ names := strings.Split(s, "∪")
+ r := make(termlist, len(names))
+ for i, n := range names {
+ r[i] = testTerm(n)
+ }
+ return r
+}
+
+func TestTermlistAll(t *testing.T) {
+ if !allTermlist.isAll() {
+ t.Errorf("allTermlist is not the set of all types")
+ }
+}
+
+func TestTermlistString(t *testing.T) {
+ for _, want := range []string{
+ "∅",
+ "𝓤",
+ "int",
+ "~int",
+ "myInt",
+ "∅ ∪ ∅",
+ "𝓤 ∪ 𝓤",
+ "∅ ∪ 𝓤 ∪ int",
+ "∅ ∪ 𝓤 ∪ int ∪ myInt",
+ } {
+ if got := maketl(want).String(); got != want {
+ t.Errorf("(%v).String() == %v", want, got)
+ }
+ }
+}
+
+func TestTermlistIsEmpty(t *testing.T) {
+ for test, want := range map[string]bool{
+ "∅": true,
+ "∅ ∪ ∅": true,
+ "∅ ∪ ∅ ∪ 𝓤": false,
+ "∅ ∪ ∅ ∪ myInt": false,
+ "𝓤": false,
+ "𝓤 ∪ int": false,
+ "𝓤 ∪ myInt ∪ ∅": false,
+ } {
+ xl := maketl(test)
+ got := xl.isEmpty()
+ if got != want {
+ t.Errorf("(%v).isEmpty() == %v; want %v", test, got, want)
+ }
+ }
+}
+
+func TestTermlistIsAll(t *testing.T) {
+ for test, want := range map[string]bool{
+ "∅": false,
+ "∅ ∪ ∅": false,
+ "int ∪ ~string": false,
+ "~int ∪ myInt": false,
+ "∅ ∪ ∅ ∪ 𝓤": true,
+ "𝓤": true,
+ "𝓤 ∪ int": true,
+ "myInt ∪ 𝓤": true,
+ } {
+ xl := maketl(test)
+ got := xl.isAll()
+ if got != want {
+ t.Errorf("(%v).isAll() == %v; want %v", test, got, want)
+ }
+ }
+}
+
+func TestTermlistNorm(t *testing.T) {
+ for _, test := range []struct {
+ xl, want string
+ }{
+ {"∅", "∅"},
+ {"∅ ∪ ∅", "∅"},
+ {"∅ ∪ int", "int"},
+ {"∅ ∪ myInt", "myInt"},
+ {"𝓤 ∪ int", "𝓤"},
+ {"𝓤 ∪ myInt", "𝓤"},
+ {"int ∪ myInt", "int ∪ myInt"},
+ {"~int ∪ int", "~int"},
+ {"~int ∪ myInt", "~int"},
+ {"int ∪ ~string ∪ int", "int ∪ ~string"},
+ {"~int ∪ string ∪ 𝓤 ∪ ~string ∪ int", "𝓤"},
+ {"~int ∪ string ∪ myInt ∪ ~string ∪ int", "~int ∪ ~string"},
+ } {
+ xl := maketl(test.xl)
+ got := maketl(test.xl).norm()
+ if got.String() != test.want {
+ t.Errorf("(%v).norm() = %v; want %v", xl, got, test.want)
+ }
+ }
+}
+
+func TestTermlistUnion(t *testing.T) {
+ for _, test := range []struct {
+ xl, yl, want string
+ }{
+
+ {"∅", "∅", "∅"},
+ {"∅", "𝓤", "𝓤"},
+ {"∅", "int", "int"},
+ {"𝓤", "~int", "𝓤"},
+ {"int", "~int", "~int"},
+ {"int", "string", "int ∪ string"},
+ {"int", "myInt", "int ∪ myInt"},
+ {"~int", "myInt", "~int"},
+ {"int ∪ string", "~string", "int ∪ ~string"},
+ {"~int ∪ string", "~string ∪ int", "~int ∪ ~string"},
+ {"~int ∪ string ∪ ∅", "~string ∪ int", "~int ∪ ~string"},
+ {"~int ∪ myInt ∪ ∅", "~string ∪ int", "~int ∪ ~string"},
+ {"~int ∪ string ∪ 𝓤", "~string ∪ int", "𝓤"},
+ {"~int ∪ string ∪ myInt", "~string ∪ int", "~int ∪ ~string"},
+ } {
+ xl := maketl(test.xl)
+ yl := maketl(test.yl)
+ got := xl.union(yl).String()
+ if got != test.want {
+ t.Errorf("(%v).union(%v) = %v; want %v", test.xl, test.yl, got, test.want)
+ }
+ }
+}
+
+func TestTermlistIntersect(t *testing.T) {
+ for _, test := range []struct {
+ xl, yl, want string
+ }{
+
+ {"∅", "∅", "∅"},
+ {"∅", "𝓤", "∅"},
+ {"∅", "int", "∅"},
+ {"∅", "myInt", "∅"},
+ {"𝓤", "~int", "~int"},
+ {"𝓤", "myInt", "myInt"},
+ {"int", "~int", "int"},
+ {"int", "string", "∅"},
+ {"int", "myInt", "∅"},
+ {"~int", "myInt", "myInt"},
+ {"int ∪ string", "~string", "string"},
+ {"~int ∪ string", "~string ∪ int", "int ∪ string"},
+ {"~int ∪ string ∪ ∅", "~string ∪ int", "int ∪ string"},
+ {"~int ∪ myInt ∪ ∅", "~string ∪ int", "int"},
+ {"~int ∪ string ∪ 𝓤", "~string ∪ int", "int ∪ ~string"},
+ {"~int ∪ string ∪ myInt", "~string ∪ int", "int ∪ string"},
+ } {
+ xl := maketl(test.xl)
+ yl := maketl(test.yl)
+ got := xl.intersect(yl).String()
+ if got != test.want {
+ t.Errorf("(%v).intersect(%v) = %v; want %v", test.xl, test.yl, got, test.want)
+ }
+ }
+}
+
+func TestTermlistEqual(t *testing.T) {
+ for _, test := range []struct {
+ xl, yl string
+ want bool
+ }{
+ {"∅", "∅", true},
+ {"∅", "𝓤", false},
+ {"𝓤", "𝓤", true},
+ {"𝓤 ∪ int", "𝓤", true},
+ {"𝓤 ∪ int", "string ∪ 𝓤", true},
+ {"𝓤 ∪ myInt", "string ∪ 𝓤", true},
+ {"int ∪ ~string", "string ∪ int", false},
+ {"~int ∪ string", "string ∪ myInt", false},
+ {"int ∪ ~string ∪ ∅", "string ∪ int ∪ ~string", true},
+ } {
+ xl := maketl(test.xl)
+ yl := maketl(test.yl)
+ got := xl.equal(yl)
+ if got != test.want {
+ t.Errorf("(%v).equal(%v) = %v; want %v", test.xl, test.yl, got, test.want)
+ }
+ }
+}
+
+func TestTermlistIncludes(t *testing.T) {
+ for _, test := range []struct {
+ xl, typ string
+ want bool
+ }{
+ {"∅", "int", false},
+ {"𝓤", "int", true},
+ {"~int", "int", true},
+ {"int", "string", false},
+ {"~int", "string", false},
+ {"~int", "myInt", true},
+ {"int ∪ string", "string", true},
+ {"~int ∪ string", "int", true},
+ {"~int ∪ string", "myInt", true},
+ {"~int ∪ myInt ∪ ∅", "myInt", true},
+ {"myInt ∪ ∅ ∪ 𝓤", "int", true},
+ } {
+ xl := maketl(test.xl)
+ yl := testTerm(test.typ).typ
+ got := xl.includes(yl)
+ if got != test.want {
+ t.Errorf("(%v).includes(%v) = %v; want %v", test.xl, yl, got, test.want)
+ }
+ }
+}
+
+func TestTermlistSupersetOf(t *testing.T) {
+ for _, test := range []struct {
+ xl, typ string
+ want bool
+ }{
+ {"∅", "∅", true},
+ {"∅", "𝓤", false},
+ {"∅", "int", false},
+ {"𝓤", "∅", true},
+ {"𝓤", "𝓤", true},
+ {"𝓤", "int", true},
+ {"𝓤", "~int", true},
+ {"𝓤", "myInt", true},
+ {"~int", "int", true},
+ {"~int", "~int", true},
+ {"~int", "myInt", true},
+ {"int", "~int", false},
+ {"myInt", "~int", false},
+ {"int", "string", false},
+ {"~int", "string", false},
+ {"int ∪ string", "string", true},
+ {"int ∪ string", "~string", false},
+ {"~int ∪ string", "int", true},
+ {"~int ∪ string", "myInt", true},
+ {"~int ∪ string ∪ ∅", "string", true},
+ {"~string ∪ ∅ ∪ 𝓤", "myInt", true},
+ } {
+ xl := maketl(test.xl)
+ y := testTerm(test.typ)
+ got := xl.supersetOf(y)
+ if got != test.want {
+ t.Errorf("(%v).supersetOf(%v) = %v; want %v", test.xl, y, got, test.want)
+ }
+ }
+}
+
+func TestTermlistSubsetOf(t *testing.T) {
+ for _, test := range []struct {
+ xl, yl string
+ want bool
+ }{
+ {"∅", "∅", true},
+ {"∅", "𝓤", true},
+ {"𝓤", "∅", false},
+ {"𝓤", "𝓤", true},
+ {"int", "int ∪ string", true},
+ {"~int", "int ∪ string", false},
+ {"~int", "myInt ∪ string", false},
+ {"myInt", "~int ∪ string", true},
+ {"~int", "string ∪ string ∪ int ∪ ~int", true},
+ {"myInt", "string ∪ string ∪ ~int", true},
+ {"int ∪ string", "string", false},
+ {"int ∪ string", "string ∪ int", true},
+ {"int ∪ ~string", "string ∪ int", false},
+ {"myInt ∪ ~string", "string ∪ int ∪ 𝓤", true},
+ {"int ∪ ~string", "string ∪ int ∪ ∅ ∪ string", false},
+ {"int ∪ myInt", "string ∪ ~int ∪ ∅ ∪ string", true},
+ } {
+ xl := maketl(test.xl)
+ yl := maketl(test.yl)
+ got := xl.subsetOf(yl)
+ if got != test.want {
+ t.Errorf("(%v).subsetOf(%v) = %v; want %v", test.xl, test.yl, got, test.want)
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/types2/testdata/check/blank.src b/src/cmd/compile/internal/types2/testdata/check/blank.src
new file mode 100644
index 0000000..6a2507f
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/check/blank.src
@@ -0,0 +1,5 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package _ /* ERROR invalid package name */
diff --git a/src/cmd/compile/internal/types2/testdata/check/builtins.go2 b/src/cmd/compile/internal/types2/testdata/check/builtins.go2
new file mode 100644
index 0000000..7c3f0c9
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/check/builtins.go2
@@ -0,0 +1,274 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file tests built-in calls on generic types.
+
+package builtins
+
+import "unsafe"
+
+// close
+
+type C0 interface{ int }
+type C1 interface{ chan int }
+type C2 interface{ chan int | <-chan int }
+type C3 interface{ chan int | chan float32 }
+type C4 interface{ chan int | chan<- int }
+type C5[T any] interface{ ~chan T | chan<- T }
+
+func _[T any](ch T) {
+ close(ch /* ERROR cannot close non-channel */)
+}
+
+func _[T C0](ch T) {
+ close(ch /* ERROR cannot close non-channel */)
+}
+
+func _[T C1](ch T) {
+ close(ch)
+}
+
+func _[T C2](ch T) {
+ close(ch /* ERROR cannot close receive-only channel */)
+}
+
+func _[T C3](ch T) {
+ close(ch)
+}
+
+func _[T C4](ch T) {
+ close(ch)
+}
+
+func _[T C5[X], X any](ch T) {
+ close(ch)
+}
+
+// copy
+
+func _[T any](x, y T) {
+ copy(x /* ERROR copy expects slice arguments */ , y)
+}
+
+func _[T ~[]byte](x, y T) {
+ copy(x, y)
+ copy(x, "foo")
+ copy("foo" /* ERROR expects slice arguments */ , y)
+
+ var x2 []byte
+ copy(x2, y) // element types are identical
+ copy(y, x2) // element types are identical
+
+ type myByte byte
+ var x3 []myByte
+ copy(x3 /* ERROR different element types */ , y)
+ copy(y, x3 /* ERROR different element types */ )
+}
+
+func _[T ~[]E, E any](x T, y []E) {
+ copy(x, y)
+ copy(x /* ERROR different element types */ , "foo")
+}
+
+func _[T ~string](x []byte, y T) {
+ copy(x, y)
+ copy(y /* ERROR expects slice arguments */ , x)
+}
+
+func _[T ~[]byte|~string](x T, y []byte) {
+ copy(x /* ERROR expects slice arguments */ , y)
+ copy(y, x)
+}
+
+type L0 []int
+type L1 []int
+
+func _[T L0 | L1](x, y T) {
+ copy(x, y)
+}
+
+// delete
+
+type M0 interface{ int }
+type M1 interface{ map[string]int }
+type M2 interface { map[string]int | map[string]float64 }
+type M3 interface{ map[string]int | map[rune]int }
+type M4[K comparable, V any] interface{ map[K]V | map[rune]V }
+
+func _[T any](m T) {
+ delete(m /* ERROR not a map */, "foo")
+}
+
+func _[T M0](m T) {
+ delete(m /* ERROR not a map */, "foo")
+}
+
+func _[T M1](m T) {
+ delete(m, "foo")
+}
+
+func _[T M2](m T) {
+ delete(m, "foo")
+ delete(m, 0 /* ERROR cannot use .* as string */)
+}
+
+func _[T M3](m T) {
+ delete(m /* ERROR must have identical key types */, "foo")
+}
+
+func _[T M4[rune, V], V any](m T) {
+ delete(m, 'k')
+}
+
+func _[T M4[K, V], K comparable, V any](m T) {
+ delete(m /* ERROR must have identical key types */, "foo")
+}
+
+// make
+
+type myChan chan int
+
+func _[
+ S1 ~[]int,
+ S2 ~[]int | ~chan int,
+
+ M1 ~map[string]int,
+ M2 ~map[string]int | ~chan int,
+
+ C1 ~chan int,
+ C2 ~chan int | ~chan string,
+ C3 chan int | myChan, // single underlying type
+]() {
+ type S0 []int
+ _ = make([]int, 10)
+ _ = make(S0, 10)
+ _ = make(S1, 10)
+ _ = make /* ERROR not enough arguments */ ()
+ _ = make /* ERROR expects 2 or 3 arguments */ (S1)
+ _ = make(S1, 10, 20)
+ _ = make /* ERROR expects 2 or 3 arguments */ (S1, 10, 20, 30)
+ _ = make(S2 /* ERROR cannot make S2: no core type */ , 10)
+
+ type M0 map[string]int
+ _ = make(map[string]int)
+ _ = make(M0)
+ _ = make(M1)
+ _ = make(M1, 10)
+ _ = make/* ERROR expects 1 or 2 arguments */(M1, 10, 20)
+ _ = make(M2 /* ERROR cannot make M2: no core type */ )
+
+ type C0 chan int
+ _ = make(chan int)
+ _ = make(C0)
+ _ = make(C1)
+ _ = make(C1, 10)
+ _ = make/* ERROR expects 1 or 2 arguments */(C1, 10, 20)
+ _ = make(C2 /* ERROR cannot make C2: no core type */ )
+ _ = make(C3)
+}
+
+// unsafe.Alignof
+
+func _[T comparable]() {
+ var (
+ b int64
+ a [10]T
+ s struct{ f T }
+ p *T
+ l []T
+ f func(T)
+ i interface{ m() T }
+ c chan T
+ m map[T]T
+ t T
+ )
+
+ const bb = unsafe.Alignof(b)
+ assert(bb == 8)
+ const _ = unsafe /* ERROR not constant */ .Alignof(a)
+ const _ = unsafe /* ERROR not constant */ .Alignof(s)
+ const pp = unsafe.Alignof(p)
+ assert(pp == 8)
+ const ll = unsafe.Alignof(l)
+ assert(ll == 8)
+ const ff = unsafe.Alignof(f)
+ assert(ff == 8)
+ const ii = unsafe.Alignof(i)
+ assert(ii == 8)
+ const cc = unsafe.Alignof(c)
+ assert(cc == 8)
+ const mm = unsafe.Alignof(m)
+ assert(mm == 8)
+ const _ = unsafe /* ERROR not constant */ .Alignof(t)
+}
+
+// unsafe.Offsetof
+
+func _[T comparable]() {
+ var (
+ b struct{ _, f int64 }
+ a struct{ _, f [10]T }
+ s struct{ _, f struct{ f T } }
+ p struct{ _, f *T }
+ l struct{ _, f []T }
+ f struct{ _, f func(T) }
+ i struct{ _, f interface{ m() T } }
+ c struct{ _, f chan T }
+ m struct{ _, f map[T]T }
+ t struct{ _, f T }
+ )
+
+ const bb = unsafe.Offsetof(b.f)
+ assert(bb == 8)
+ const _ = unsafe /* ERROR not constant */ .Alignof(a)
+ const _ = unsafe /* ERROR not constant */ .Alignof(s)
+ const pp = unsafe.Offsetof(p.f)
+ assert(pp == 8)
+ const ll = unsafe.Offsetof(l.f)
+ assert(ll == 24)
+ const ff = unsafe.Offsetof(f.f)
+ assert(ff == 8)
+ const ii = unsafe.Offsetof(i.f)
+ assert(ii == 16)
+ const cc = unsafe.Offsetof(c.f)
+ assert(cc == 8)
+ const mm = unsafe.Offsetof(m.f)
+ assert(mm == 8)
+ const _ = unsafe /* ERROR not constant */ .Alignof(t)
+}
+
+// unsafe.Sizeof
+
+func _[T comparable]() {
+ var (
+ b int64
+ a [10]T
+ s struct{ f T }
+ p *T
+ l []T
+ f func(T)
+ i interface{ m() T }
+ c chan T
+ m map[T]T
+ t T
+ )
+
+ const bb = unsafe.Sizeof(b)
+ assert(bb == 8)
+ const _ = unsafe /* ERROR not constant */ .Alignof(a)
+ const _ = unsafe /* ERROR not constant */ .Alignof(s)
+ const pp = unsafe.Sizeof(p)
+ assert(pp == 8)
+ const ll = unsafe.Sizeof(l)
+ assert(ll == 24)
+ const ff = unsafe.Sizeof(f)
+ assert(ff == 8)
+ const ii = unsafe.Sizeof(i)
+ assert(ii == 16)
+ const cc = unsafe.Sizeof(c)
+ assert(cc == 8)
+ const mm = unsafe.Sizeof(m)
+ assert(mm == 8)
+ const _ = unsafe /* ERROR not constant */ .Alignof(t)
+}
diff --git a/src/cmd/compile/internal/types2/testdata/check/builtins.src b/src/cmd/compile/internal/types2/testdata/check/builtins.src
new file mode 100644
index 0000000..358e9c5
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/check/builtins.src
@@ -0,0 +1,902 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// builtin calls
+
+package builtins
+
+import "unsafe"
+
+func f0() {}
+
+func append1() {
+ var b byte
+ var x int
+ var s []byte
+ _ = append() // ERROR not enough arguments
+ _ = append("foo" /* ERROR must be a slice */ )
+ _ = append(nil /* ERROR must be a slice */ , s)
+ _ = append(x /* ERROR must be a slice */ , s)
+ _ = append(s)
+ _ = append(s, nil...)
+ append /* ERROR not used */ (s)
+
+ _ = append(s, b)
+ _ = append(s, x /* ERROR cannot use x */ )
+ _ = append(s, s /* ERROR cannot use s */ )
+ _ = append(s /* ERROR not enough arguments */ ...)
+ _ = append(s, b, s /* ERROR too many arguments */ ... )
+ _ = append(s, 1, 2, 3)
+ _ = append(s, 1, 2, 3, x /* ERROR cannot use x */ , 5, 6, 6)
+ _ = append(s, 1, 2 /* ERROR too many arguments */ , s... )
+ _ = append([]interface{}(nil), 1, 2, "foo", x, 3.1425, false)
+
+ type S []byte
+ type T string
+ var t T
+ _ = append(s, "foo" /* ERROR cannot use .* in argument to append */ )
+ _ = append(s, "foo"...)
+ _ = append(S(s), "foo" /* ERROR cannot use .* in argument to append */ )
+ _ = append(S(s), "foo"...)
+ _ = append(s, t /* ERROR cannot use t */ )
+ _ = append(s, t...)
+ _ = append(s, T("foo")...)
+ _ = append(S(s), t /* ERROR cannot use t */ )
+ _ = append(S(s), t...)
+ _ = append(S(s), T("foo")...)
+ _ = append([]string{}, t /* ERROR cannot use t */ , "foo")
+ _ = append([]T{}, t, "foo")
+}
+
+// from the spec
+func append2() {
+ s0 := []int{0, 0}
+ s1 := append(s0, 2) // append a single element s1 == []int{0, 0, 2}
+ s2 := append(s1, 3, 5, 7) // append multiple elements s2 == []int{0, 0, 2, 3, 5, 7}
+ s3 := append(s2, s0...) // append a slice s3 == []int{0, 0, 2, 3, 5, 7, 0, 0}
+ s4 := append(s3[3:6], s3[2:]...) // append overlapping slice s4 == []int{3, 5, 7, 2, 3, 5, 7, 0, 0}
+
+ var t []interface{}
+ t = append(t, 42, 3.1415, "foo") // t == []interface{}{42, 3.1415, "foo"}
+
+ var b []byte
+ b = append(b, "bar"...) // append string contents b == []byte{'b', 'a', 'r' }
+
+ _ = s4
+}
+
+func append3() {
+ f1 := func() (s []int) { return }
+ f2 := func() (s []int, x int) { return }
+ f3 := func() (s []int, x, y int) { return }
+ f5 := func() (s []interface{}, x int, y float32, z string, b bool) { return }
+ ff := func() (int, float32) { return 0, 0 }
+ _ = append(f0 /* ERROR used as value */ ())
+ _ = append(f1())
+ _ = append(f2())
+ _ = append(f3())
+ _ = append(f5())
+ _ = append(ff /* ERROR must be a slice */ ()) // TODO(gri) better error message
+}
+
+func cap1() {
+ var a [10]bool
+ var p *[20]int
+ var c chan string
+ _ = cap() // ERROR not enough arguments
+ _ = cap(1, 2) // ERROR too many arguments
+ _ = cap(42 /* ERROR invalid */)
+ const _3 = cap(a)
+ assert(_3 == 10)
+ const _4 = cap(p)
+ assert(_4 == 20)
+ _ = cap(c)
+ cap /* ERROR not used */ (c)
+
+ // issue 4744
+ type T struct{ a [10]int }
+ const _ = cap(((*T)(nil)).a)
+
+ var s [][]byte
+ _ = cap(s)
+ _ = cap(s... /* ERROR invalid use of \.\.\. */ )
+}
+
+func cap2() {
+ f1a := func() (a [10]int) { return }
+ f1s := func() (s []int) { return }
+ f2 := func() (s []int, x int) { return }
+ _ = cap(f0 /* ERROR used as value */ ())
+ _ = cap(f1a())
+ _ = cap(f1s())
+ _ = cap(f2()) // ERROR too many arguments
+}
+
+// test cases for issue 7387
+func cap3() {
+ var f = func() int { return 0 }
+ var x = f()
+ const (
+ _ = cap([4]int{})
+ _ = cap([4]int{x})
+ _ = cap /* ERROR not constant */ ([4]int{f()})
+ _ = cap /* ERROR not constant */ ([4]int{cap([]int{})})
+ _ = cap([4]int{cap([4]int{})})
+ )
+ var y float64
+ var z complex128
+ const (
+ _ = cap([4]float64{})
+ _ = cap([4]float64{y})
+ _ = cap([4]float64{real(2i)})
+ _ = cap /* ERROR not constant */ ([4]float64{real(z)})
+ )
+ var ch chan [10]int
+ const (
+ _ = cap /* ERROR not constant */ (<-ch)
+ _ = cap /* ERROR not constant */ ([4]int{(<-ch)[0]})
+ )
+}
+
+func close1() {
+ var c chan int
+ var r <-chan int
+ close() // ERROR not enough arguments
+ close(1, 2) // ERROR too many arguments
+ close(42 /* ERROR cannot close non-channel */)
+ close(r /* ERROR receive-only channel */)
+ close(c)
+ _ = close /* ERROR used as value */ (c)
+
+ var s []chan int
+ close(s... /* ERROR invalid use of \.\.\. */ )
+}
+
+func close2() {
+ f1 := func() (ch chan int) { return }
+ f2 := func() (ch chan int, x int) { return }
+ close(f0 /* ERROR used as value */ ())
+ close(f1())
+ close(f2()) // ERROR too many arguments
+}
+
+func complex1() {
+ var i32 int32
+ var f32 float32
+ var f64 float64
+ var c64 complex64
+ var c128 complex128
+ _ = complex() // ERROR not enough arguments
+ _ = complex(1) // ERROR not enough arguments
+ _ = complex(true /* ERROR mismatched types */ , 0)
+ _ = complex(i32 /* ERROR expected floating-point */ , 0)
+ _ = complex("foo" /* ERROR mismatched types */ , 0)
+ _ = complex(c64 /* ERROR expected floating-point */ , 0)
+ _ = complex(0 /* ERROR mismatched types */ , true)
+ _ = complex(0 /* ERROR expected floating-point */ , i32)
+ _ = complex(0 /* ERROR mismatched types */ , "foo")
+ _ = complex(0 /* ERROR expected floating-point */ , c64)
+ _ = complex(f32, f32)
+ _ = complex(f32, 1)
+ _ = complex(f32, 1.0)
+ _ = complex(f32, 'a')
+ _ = complex(f64, f64)
+ _ = complex(f64, 1)
+ _ = complex(f64, 1.0)
+ _ = complex(f64, 'a')
+ _ = complex(f32 /* ERROR mismatched types */ , f64)
+ _ = complex(f64 /* ERROR mismatched types */ , f32)
+ _ = complex(1, 1)
+ _ = complex(1, 1.1)
+ _ = complex(1, 'a')
+ complex /* ERROR not used */ (1, 2)
+
+ var _ complex64 = complex(f32, f32)
+ var _ complex64 = complex /* ERROR cannot use .* in variable declaration */ (f64, f64)
+
+ var _ complex128 = complex /* ERROR cannot use .* in variable declaration */ (f32, f32)
+ var _ complex128 = complex(f64, f64)
+
+ // untyped constants
+ const _ int = complex(1, 0)
+ const _ float32 = complex(1, 0)
+ const _ complex64 = complex(1, 0)
+ const _ complex128 = complex(1, 0)
+ const _ = complex(0i, 0i)
+ const _ = complex(0i, 0)
+ const _ int = 1.0 + complex(1, 0i)
+
+ const _ int = complex /* ERROR int */ (1.1, 0)
+ const _ float32 = complex /* ERROR float32 */ (1, 2)
+
+ // untyped values
+ var s uint
+ _ = complex(1 /* ERROR integer */ <<s, 0)
+ const _ = complex /* ERROR not constant */ (1 /* ERROR integer */ <<s, 0)
+ var _ int = complex /* ERROR cannot use .* in variable declaration */ (1 /* ERROR integer */ <<s, 0)
+
+ // floating-point argument types must be identical
+ type F32 float32
+ type F64 float64
+ var x32 F32
+ var x64 F64
+ c64 = complex(x32, x32)
+ _ = complex(x32 /* ERROR mismatched types */ , f32)
+ _ = complex(f32 /* ERROR mismatched types */ , x32)
+ c128 = complex(x64, x64)
+ _ = c128
+ _ = complex(x64 /* ERROR mismatched types */ , f64)
+ _ = complex(f64 /* ERROR mismatched types */ , x64)
+
+ var t []float32
+ _ = complex(t... /* ERROR invalid use of \.\.\. */ )
+}
+
+func complex2() {
+ f1 := func() (x float32) { return }
+ f2 := func() (x, y float32) { return }
+ f3 := func() (x, y, z float32) { return }
+ _ = complex(f0 /* ERROR used as value */ ())
+ _ = complex(f1()) // ERROR not enough arguments
+ _ = complex(f2())
+ _ = complex(f3()) // ERROR too many arguments
+}
+
+func copy1() {
+ copy() // ERROR not enough arguments
+ copy("foo") // ERROR not enough arguments
+ copy([ /* ERROR copy expects slice arguments */ ...]int{}, []int{})
+ copy([ /* ERROR copy expects slice arguments */ ]int{}, [...]int{})
+ copy([ /* ERROR different element types */ ]int8{}, "foo")
+
+ // spec examples
+ var a = [...]int{0, 1, 2, 3, 4, 5, 6, 7}
+ var s = make([]int, 6)
+ var b = make([]byte, 5)
+ n1 := copy(s, a[0:]) // n1 == 6, s == []int{0, 1, 2, 3, 4, 5}
+ n2 := copy(s, s[2:]) // n2 == 4, s == []int{2, 3, 4, 5, 4, 5}
+ n3 := copy(b, "Hello, World!") // n3 == 5, b == []byte("Hello")
+ _, _, _ = n1, n2, n3
+
+ var t [][]int
+ copy(t, t)
+ copy(t /* ERROR copy expects slice arguments */ , nil)
+ copy(nil /* ERROR copy expects slice arguments */ , t)
+ copy(nil /* ERROR copy expects slice arguments */ , nil)
+ copy(t... /* ERROR invalid use of \.\.\. */ )
+}
+
+func copy2() {
+ f1 := func() (a []int) { return }
+ f2 := func() (a, b []int) { return }
+ f3 := func() (a, b, c []int) { return }
+ copy(f0 /* ERROR used as value */ ())
+ copy(f1()) // ERROR not enough arguments
+ copy(f2())
+ copy(f3()) // ERROR too many arguments
+}
+
+func delete1() {
+ var m map[string]int
+ var s string
+ delete() // ERROR not enough arguments
+ delete(1) // ERROR not enough arguments
+ delete(1, 2, 3) // ERROR too many arguments
+ delete(m, 0 /* ERROR cannot use */)
+ delete(m, s)
+ _ = delete /* ERROR used as value */ (m, s)
+
+ var t []map[string]string
+ delete(t... /* ERROR invalid use of \.\.\. */ )
+}
+
+func delete2() {
+ f1 := func() (m map[string]int) { return }
+ f2 := func() (m map[string]int, k string) { return }
+ f3 := func() (m map[string]int, k string, x float32) { return }
+ delete(f0 /* ERROR used as value */ ())
+ delete(f1()) // ERROR not enough arguments
+ delete(f2())
+ delete(f3()) // ERROR too many arguments
+}
+
+func imag1() {
+ var f32 float32
+ var f64 float64
+ var c64 complex64
+ var c128 complex128
+ _ = imag() // ERROR not enough arguments
+ _ = imag(1, 2) // ERROR too many arguments
+ _ = imag(10)
+ _ = imag(2.7182818)
+ _ = imag("foo" /* ERROR expected complex */)
+ _ = imag('a')
+ const _5 = imag(1 + 2i)
+ assert(_5 == 2)
+ f32 = _5
+ f64 = _5
+ const _6 = imag(0i)
+ assert(_6 == 0)
+ f32 = imag(c64)
+ f64 = imag(c128)
+ f32 = imag /* ERROR cannot use .* in assignment */ (c128)
+ f64 = imag /* ERROR cannot use .* in assignment */ (c64)
+ imag /* ERROR not used */ (c64)
+ _, _ = f32, f64
+
+ // complex type may not be predeclared
+ type C64 complex64
+ type C128 complex128
+ var x64 C64
+ var x128 C128
+ f32 = imag(x64)
+ f64 = imag(x128)
+
+ var a []complex64
+ _ = imag(a... /* ERROR invalid use of \.\.\. */ )
+
+ // if argument is untyped, result is untyped
+ const _ byte = imag(1.2 + 3i)
+ const _ complex128 = imag(1.2 + 3i)
+
+ // lhs constant shift operands are typed as complex128
+ var s uint
+ _ = imag(1 /* ERROR must be integer */ << s)
+}
+
+func imag2() {
+ f1 := func() (x complex128) { return }
+ f2 := func() (x, y complex128) { return }
+ _ = imag(f0 /* ERROR used as value */ ())
+ _ = imag(f1())
+ _ = imag(f2()) // ERROR too many arguments
+}
+
+func len1() {
+ const c = "foobar"
+ var a [10]bool
+ var p *[20]int
+ var m map[string]complex128
+ _ = len() // ERROR not enough arguments
+ _ = len(1, 2) // ERROR too many arguments
+ _ = len(42 /* ERROR invalid */)
+ const _3 = len(c)
+ assert(_3 == 6)
+ const _4 = len(a)
+ assert(_4 == 10)
+ const _5 = len(p)
+ assert(_5 == 20)
+ _ = len(m)
+ len /* ERROR not used */ (c)
+
+ // esoteric case
+ var t string
+ var hash map[interface{}][]*[10]int
+ const n = len /* ERROR not constant */ (hash[recover()][len(t)])
+ assert(n == 10) // ok because n has unknown value and no error is reported
+ var ch <-chan int
+ const nn = len /* ERROR not constant */ (hash[<-ch][len(t)])
+
+ // issue 4744
+ type T struct{ a [10]int }
+ const _ = len(((*T)(nil)).a)
+
+ var s [][]byte
+ _ = len(s)
+ _ = len(s... /* ERROR invalid use of \.\.\. */ )
+}
+
+func len2() {
+ f1 := func() (x []int) { return }
+ f2 := func() (x, y []int) { return }
+ _ = len(f0 /* ERROR used as value */ ())
+ _ = len(f1())
+ _ = len(f2()) // ERROR too many arguments
+}
+
+// test cases for issue 7387
+func len3() {
+ var f = func() int { return 0 }
+ var x = f()
+ const (
+ _ = len([4]int{})
+ _ = len([4]int{x})
+ _ = len /* ERROR not constant */ ([4]int{f()})
+ _ = len /* ERROR not constant */ ([4]int{len([]int{})})
+ _ = len([4]int{len([4]int{})})
+ )
+ var y float64
+ var z complex128
+ const (
+ _ = len([4]float64{})
+ _ = len([4]float64{y})
+ _ = len([4]float64{real(2i)})
+ _ = len /* ERROR not constant */ ([4]float64{real(z)})
+ )
+ var ch chan [10]int
+ const (
+ _ = len /* ERROR not constant */ (<-ch)
+ _ = len /* ERROR not constant */ ([4]int{(<-ch)[0]})
+ )
+}
+
+func make1() {
+ var n int
+ var m float32
+ var s uint
+
+ _ = make() // ERROR not enough arguments
+ _ = make(1 /* ERROR not a type */)
+ _ = make(int /* ERROR cannot make */)
+
+ // slices
+ _ = make/* ERROR arguments */ ([]int)
+ _ = make/* ERROR arguments */ ([]int, 2, 3, 4)
+ _ = make([]int, int /* ERROR not an expression */)
+ _ = make([]int, 10, float32 /* ERROR not an expression */)
+ _ = make([]int, "foo" /* ERROR cannot convert */)
+ _ = make([]int, 10, 2.3 /* ERROR truncated */)
+ _ = make([]int, 5, 10.0)
+ _ = make([]int, 0i)
+ _ = make([]int, 1.0)
+ _ = make([]int, 1.0<<s)
+ _ = make([]int, 1.1 /* ERROR int */ <<s)
+ _ = make([]int, - /* ERROR must not be negative */ 1, 10)
+ _ = make([]int, 0, - /* ERROR must not be negative */ 1)
+ _ = make([]int, - /* ERROR must not be negative */ 1, - /* ERROR must not be negative */ 1)
+ _ = make([]int, 1 /* ERROR overflows */ <<100, 1 /* ERROR overflows */ <<100)
+ _ = make([]int, 10 /* ERROR length and capacity swapped */ , 9)
+ _ = make([]int, 1 /* ERROR overflows */ <<100, 12345)
+ _ = make([]int, m /* ERROR must be integer */ )
+ _ = &make /* ERROR cannot take address */ ([]int, 0)
+
+ // maps
+ _ = make /* ERROR arguments */ (map[int]string, 10, 20)
+ _ = make(map[int]float32, int /* ERROR not an expression */)
+ _ = make(map[int]float32, "foo" /* ERROR cannot convert */)
+ _ = make(map[int]float32, 10)
+ _ = make(map[int]float32, n)
+ _ = make(map[int]float32, int64(n))
+ _ = make(map[string]bool, 10.0)
+ _ = make(map[string]bool, 10.0<<s)
+ _ = &make /* ERROR cannot take address */ (map[string]bool)
+
+ // channels
+ _ = make /* ERROR arguments */ (chan int, 10, 20)
+ _ = make(chan int, int /* ERROR not an expression */)
+ _ = make(chan<- int, "foo" /* ERROR cannot convert */)
+ _ = make(chan int, - /* ERROR must not be negative */ 10)
+ _ = make(<-chan float64, 10)
+ _ = make(chan chan int, n)
+ _ = make(chan string, int64(n))
+ _ = make(chan bool, 10.0)
+ _ = make(chan bool, 10.0<<s)
+ _ = &make /* ERROR cannot take address */ (chan bool)
+
+ make /* ERROR not used */ ([]int, 10)
+
+ var t []int
+ _ = make([]int, t[0], t[1])
+ _ = make([]int, t... /* ERROR invalid use of \.\.\. */ )
+}
+
+func make2() {
+ f1 := func() (x []int) { return }
+ _ = make(f0 /* ERROR not a type */ ())
+ _ = make(f1 /* ERROR not a type */ ())
+}
+
+func new1() {
+ _ = new() // ERROR not enough arguments
+ _ = new(1, 2) // ERROR too many arguments
+ _ = new("foo" /* ERROR not a type */)
+ p := new(float64)
+ _ = new(struct{ x, y int })
+ q := new(*float64)
+ _ = *p == **q
+ new /* ERROR not used */ (int)
+ _ = &new /* ERROR cannot take address */ (int)
+
+ _ = new(int... /* ERROR invalid use of \.\.\. */ )
+}
+
+func new2() {
+ f1 := func() (x []int) { return }
+ _ = new(f0 /* ERROR not a type */ ())
+ _ = new(f1 /* ERROR not a type */ ())
+}
+
+func panic1() {
+ panic() // ERROR not enough arguments
+ panic(1, 2) // ERROR too many arguments
+ panic(0)
+ panic("foo")
+ panic(false)
+ panic(1<<10)
+ panic(1 << /* ERROR constant shift overflow */ 1000)
+ _ = panic /* ERROR used as value */ (0)
+
+ var s []byte
+ panic(s)
+ panic(s... /* ERROR invalid use of \.\.\. */ )
+}
+
+func panic2() {
+ f1 := func() (x int) { return }
+ f2 := func() (x, y int) { return }
+ panic(f0 /* ERROR used as value */ ())
+ panic(f1())
+ panic(f2()) // ERROR too many arguments
+}
+
+func print1() {
+ print()
+ print(1)
+ print(1, 2)
+ print("foo")
+ print(2.718281828)
+ print(false)
+ print(1<<10)
+ print(1 << /* ERROR constant shift overflow */ 1000)
+ println(nil /* ERROR untyped nil */ )
+
+ var s []int
+ print(s... /* ERROR invalid use of \.\.\. */ )
+ _ = print /* ERROR used as value */ ()
+}
+
+func print2() {
+ f1 := func() (x int) { return }
+ f2 := func() (x, y int) { return }
+ f3 := func() (x int, y float32, z string) { return }
+ print(f0 /* ERROR used as value */ ())
+ print(f1())
+ print(f2())
+ print(f3())
+}
+
+func println1() {
+ println()
+ println(1)
+ println(1, 2)
+ println("foo")
+ println(2.718281828)
+ println(false)
+ println(1<<10)
+ println(1 << /* ERROR constant shift overflow */ 1000)
+ println(nil /* ERROR untyped nil */ )
+
+ var s []int
+ println(s... /* ERROR invalid use of \.\.\. */ )
+ _ = println /* ERROR used as value */ ()
+}
+
+func println2() {
+ f1 := func() (x int) { return }
+ f2 := func() (x, y int) { return }
+ f3 := func() (x int, y float32, z string) { return }
+ println(f0 /* ERROR used as value */ ())
+ println(f1())
+ println(f2())
+ println(f3())
+}
+
+func real1() {
+ var f32 float32
+ var f64 float64
+ var c64 complex64
+ var c128 complex128
+ _ = real() // ERROR not enough arguments
+ _ = real(1, 2) // ERROR too many arguments
+ _ = real(10)
+ _ = real(2.7182818)
+ _ = real("foo" /* ERROR expected complex */)
+ const _5 = real(1 + 2i)
+ assert(_5 == 1)
+ f32 = _5
+ f64 = _5
+ const _6 = real(0i)
+ assert(_6 == 0)
+ f32 = real(c64)
+ f64 = real(c128)
+ f32 = real /* ERROR cannot use .* in assignment */ (c128)
+ f64 = real /* ERROR cannot use .* in assignment */ (c64)
+ real /* ERROR not used */ (c64)
+
+ // complex type may not be predeclared
+ type C64 complex64
+ type C128 complex128
+ var x64 C64
+ var x128 C128
+ f32 = imag(x64)
+ f64 = imag(x128)
+ _, _ = f32, f64
+
+ var a []complex64
+ _ = real(a... /* ERROR invalid use of \.\.\. */ )
+
+ // if argument is untyped, result is untyped
+ const _ byte = real(1 + 2.3i)
+ const _ complex128 = real(1 + 2.3i)
+
+ // lhs constant shift operands are typed as complex128
+ var s uint
+ _ = real(1 /* ERROR must be integer */ << s)
+}
+
+func real2() {
+ f1 := func() (x complex128) { return }
+ f2 := func() (x, y complex128) { return }
+ _ = real(f0 /* ERROR used as value */ ())
+ _ = real(f1())
+ _ = real(f2()) // ERROR too many arguments
+}
+
+func recover1() {
+ _ = recover()
+ _ = recover(10) // ERROR too many arguments
+ recover()
+
+ var s []int
+ recover(s... /* ERROR invalid use of \.\.\. */ )
+}
+
+func recover2() {
+ f1 := func() (x int) { return }
+ f2 := func() (x, y int) { return }
+ _ = recover(f0 /* ERROR used as value */ ())
+ _ = recover(f1()) // ERROR too many arguments
+ _ = recover(f2()) // ERROR too many arguments
+}
+
+// assuming types.DefaultPtrSize == 8
+type S0 struct{ // offset
+ a bool // 0
+ b rune // 4
+ c *int // 8
+ d bool // 16
+ e complex128 // 24
+} // 40
+
+type S1 struct{ // offset
+ x float32 // 0
+ y string // 8
+ z *S1 // 24
+ S0 // 32
+} // 72
+
+type S2 struct{ // offset
+ *S1 // 0
+} // 8
+
+type S3 struct { // offset
+ a int64 // 0
+ b int32 // 8
+} // 12
+
+type S4 struct { // offset
+ S3 // 0
+ int32 // 12
+} // 16
+
+type S5 struct { // offset
+ a [3]int32 // 0
+ b int32 // 12
+} // 16
+
+func (S2) m() {}
+
+func Alignof1() {
+ var x int
+ _ = unsafe.Alignof() // ERROR not enough arguments
+ _ = unsafe.Alignof(1, 2) // ERROR too many arguments
+ _ = unsafe.Alignof(int /* ERROR not an expression */)
+ _ = unsafe.Alignof(42)
+ _ = unsafe.Alignof(new(struct{}))
+ _ = unsafe.Alignof(1<<10)
+ _ = unsafe.Alignof(1 << /* ERROR constant shift overflow */ 1000)
+ _ = unsafe.Alignof(nil /* ERROR "untyped nil */ )
+ unsafe /* ERROR not used */ .Alignof(x)
+
+ var y S0
+ assert(unsafe.Alignof(y.a) == 1)
+ assert(unsafe.Alignof(y.b) == 4)
+ assert(unsafe.Alignof(y.c) == 8)
+ assert(unsafe.Alignof(y.d) == 1)
+ assert(unsafe.Alignof(y.e) == 8)
+
+ var s []byte
+ _ = unsafe.Alignof(s)
+ _ = unsafe.Alignof(s... /* ERROR invalid use of \.\.\. */ )
+}
+
+func Alignof2() {
+ f1 := func() (x int32) { return }
+ f2 := func() (x, y int32) { return }
+ _ = unsafe.Alignof(f0 /* ERROR used as value */ ())
+ assert(unsafe.Alignof(f1()) == 4)
+ _ = unsafe.Alignof(f2()) // ERROR too many arguments
+}
+
+func Offsetof1() {
+ var x struct{ f int }
+ _ = unsafe.Offsetof() // ERROR not enough arguments
+ _ = unsafe.Offsetof(1, 2) // ERROR too many arguments
+ _ = unsafe.Offsetof(int /* ERROR not a selector expression */ )
+ _ = unsafe.Offsetof(x /* ERROR not a selector expression */ )
+ _ = unsafe.Offsetof(nil /* ERROR not a selector expression */ )
+ _ = unsafe.Offsetof(x.f)
+ _ = unsafe.Offsetof((x.f))
+ _ = unsafe.Offsetof((((((((x))).f)))))
+ unsafe /* ERROR not used */ .Offsetof(x.f)
+
+ var y0 S0
+ assert(unsafe.Offsetof(y0.a) == 0)
+ assert(unsafe.Offsetof(y0.b) == 4)
+ assert(unsafe.Offsetof(y0.c) == 8)
+ assert(unsafe.Offsetof(y0.d) == 16)
+ assert(unsafe.Offsetof(y0.e) == 24)
+
+ var y1 S1
+ assert(unsafe.Offsetof(y1.x) == 0)
+ assert(unsafe.Offsetof(y1.y) == 8)
+ assert(unsafe.Offsetof(y1.z) == 24)
+ assert(unsafe.Offsetof(y1.S0) == 32)
+
+ assert(unsafe.Offsetof(y1.S0.a) == 0) // relative to S0
+ assert(unsafe.Offsetof(y1.a) == 32) // relative to S1
+ assert(unsafe.Offsetof(y1.b) == 36) // relative to S1
+ assert(unsafe.Offsetof(y1.c) == 40) // relative to S1
+ assert(unsafe.Offsetof(y1.d) == 48) // relative to S1
+ assert(unsafe.Offsetof(y1.e) == 56) // relative to S1
+
+ var y1p *S1
+ assert(unsafe.Offsetof(y1p.S0) == 32)
+
+ type P *S1
+ var p P = y1p
+ assert(unsafe.Offsetof(p.S0) == 32)
+
+ var y2 S2
+ assert(unsafe.Offsetof(y2.S1) == 0)
+ _ = unsafe.Offsetof(y2 /* ERROR embedded via a pointer */ .x)
+ _ = unsafe.Offsetof(y2 /* ERROR method value */ .m)
+
+ var s []byte
+ _ = unsafe.Offsetof(s... /* ERROR invalid use of \.\.\. */ )
+}
+
+func Offsetof2() {
+ f1 := func() (x int32) { return }
+ f2 := func() (x, y int32) { return }
+ _ = unsafe.Offsetof(f0 /* ERROR not a selector expression */ ())
+ _ = unsafe.Offsetof(f1 /* ERROR not a selector expression */ ())
+ _ = unsafe.Offsetof(f2 /* ERROR not a selector expression */ ())
+}
+
+func Sizeof1() {
+ var x int
+ _ = unsafe.Sizeof() // ERROR not enough arguments
+ _ = unsafe.Sizeof(1, 2) // ERROR too many arguments
+ _ = unsafe.Sizeof(int /* ERROR not an expression */)
+ _ = unsafe.Sizeof(42)
+ _ = unsafe.Sizeof(new(complex128))
+ _ = unsafe.Sizeof(1<<10)
+ _ = unsafe.Sizeof(1 << /* ERROR constant shift overflow */ 1000)
+ _ = unsafe.Sizeof(nil /* ERROR untyped nil */ )
+ unsafe /* ERROR not used */ .Sizeof(x)
+
+ // basic types have size guarantees
+ assert(unsafe.Sizeof(byte(0)) == 1)
+ assert(unsafe.Sizeof(uint8(0)) == 1)
+ assert(unsafe.Sizeof(int8(0)) == 1)
+ assert(unsafe.Sizeof(uint16(0)) == 2)
+ assert(unsafe.Sizeof(int16(0)) == 2)
+ assert(unsafe.Sizeof(uint32(0)) == 4)
+ assert(unsafe.Sizeof(int32(0)) == 4)
+ assert(unsafe.Sizeof(float32(0)) == 4)
+ assert(unsafe.Sizeof(uint64(0)) == 8)
+ assert(unsafe.Sizeof(int64(0)) == 8)
+ assert(unsafe.Sizeof(float64(0)) == 8)
+ assert(unsafe.Sizeof(complex64(0)) == 8)
+ assert(unsafe.Sizeof(complex128(0)) == 16)
+
+ var y0 S0
+ assert(unsafe.Sizeof(y0.a) == 1)
+ assert(unsafe.Sizeof(y0.b) == 4)
+ assert(unsafe.Sizeof(y0.c) == 8)
+ assert(unsafe.Sizeof(y0.d) == 1)
+ assert(unsafe.Sizeof(y0.e) == 16)
+ assert(unsafe.Sizeof(y0) == 40)
+
+ var y1 S1
+ assert(unsafe.Sizeof(y1) == 72)
+
+ var y2 S2
+ assert(unsafe.Sizeof(y2) == 8)
+
+ var y3 S3
+ assert(unsafe.Sizeof(y3) == 12)
+
+ var y4 S4
+ assert(unsafe.Sizeof(y4) == 16)
+
+ var y5 S5
+ assert(unsafe.Sizeof(y5) == 16)
+
+ var a3 [10]S3
+ assert(unsafe.Sizeof(a3) == 156)
+
+ // test case for issue 5670
+ type T struct {
+ a int32
+ _ int32
+ c int32
+ }
+ assert(unsafe.Sizeof(T{}) == 12)
+
+ var s []byte
+ _ = unsafe.Sizeof(s)
+ _ = unsafe.Sizeof(s... /* ERROR invalid use of \.\.\. */ )
+}
+
+func Sizeof2() {
+ f1 := func() (x int64) { return }
+ f2 := func() (x, y int64) { return }
+ _ = unsafe.Sizeof(f0 /* ERROR used as value */ ())
+ assert(unsafe.Sizeof(f1()) == 8)
+ _ = unsafe.Sizeof(f2()) // ERROR too many arguments
+}
+
+// self-testing only
+func assert1() {
+ var x int
+ assert() /* ERROR not enough arguments */
+ assert(1, 2) /* ERROR too many arguments */
+ assert("foo" /* ERROR boolean constant */ )
+ assert(x /* ERROR boolean constant */)
+ assert(true)
+ assert /* ERROR failed */ (false)
+ _ = assert(true)
+
+ var s []byte
+ assert(s... /* ERROR invalid use of \.\.\. */ )
+}
+
+func assert2() {
+ f1 := func() (x bool) { return }
+ f2 := func() (x bool) { return }
+ assert(f0 /* ERROR used as value */ ())
+ assert(f1 /* ERROR boolean constant */ ())
+ assert(f2 /* ERROR boolean constant */ ())
+}
+
+// self-testing only
+func trace1() {
+ // Uncomment the code below to test trace - will produce console output
+ // _ = trace /* ERROR no value */ ()
+ // _ = trace(1)
+ // _ = trace(true, 1.2, '\'', "foo", 42i, "foo" <= "bar")
+
+ var s []byte
+ trace(s... /* ERROR invalid use of \.\.\. */ )
+}
+
+func trace2() {
+ f1 := func() (x int) { return }
+ f2 := func() (x int, y string) { return }
+ f3 := func() (x int, y string, z []int) { return }
+ _ = f1
+ _ = f2
+ _ = f3
+ // Uncomment the code below to test trace - will produce console output
+ // trace(f0())
+ // trace(f1())
+ // trace(f2())
+ // trace(f3())
+ // trace(f0(), 1)
+ // trace(f1(), 1, 2)
+ // trace(f2(), 1, 2, 3)
+ // trace(f3(), 1, 2, 3, 4)
+}
diff --git a/src/cmd/compile/internal/types2/testdata/check/chans.go2 b/src/cmd/compile/internal/types2/testdata/check/chans.go2
new file mode 100644
index 0000000..fad2bce
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/check/chans.go2
@@ -0,0 +1,62 @@
+package chans
+
+import "runtime"
+
+// Ranger returns a Sender and a Receiver. The Receiver provides a
+// Next method to retrieve values. The Sender provides a Send method
+// to send values and a Close method to stop sending values. The Next
+// method indicates when the Sender has been closed, and the Send
+// method indicates when the Receiver has been freed.
+//
+// This is a convenient way to exit a goroutine sending values when
+// the receiver stops reading them.
+func Ranger[T any]() (*Sender[T], *Receiver[T]) {
+ c := make(chan T)
+ d := make(chan bool)
+ s := &Sender[T]{values: c, done: d}
+ r := &Receiver[T]{values: c, done: d}
+ runtime.SetFinalizer(r, r.finalize)
+ return s, r
+}
+
+// A sender is used to send values to a Receiver.
+type Sender[T any] struct {
+ values chan<- T
+ done <-chan bool
+}
+
+// Send sends a value to the receiver. It returns whether any more
+// values may be sent; if it returns false the value was not sent.
+func (s *Sender[T]) Send(v T) bool {
+ select {
+ case s.values <- v:
+ return true
+ case <-s.done:
+ return false
+ }
+}
+
+// Close tells the receiver that no more values will arrive.
+// After Close is called, the Sender may no longer be used.
+func (s *Sender[T]) Close() {
+ close(s.values)
+}
+
+// A Receiver receives values from a Sender.
+type Receiver[T any] struct {
+ values <-chan T
+ done chan<- bool
+}
+
+// Next returns the next value from the channel. The bool result
+// indicates whether the value is valid, or whether the Sender has
+// been closed and no more values will be received.
+func (r *Receiver[T]) Next() (T, bool) {
+ v, ok := <-r.values
+ return v, ok
+}
+
+// finalize is a finalizer for the receiver.
+func (r *Receiver[T]) finalize() {
+ close(r.done)
+}
diff --git a/src/cmd/compile/internal/types2/testdata/check/compliterals.go2 b/src/cmd/compile/internal/types2/testdata/check/compliterals.go2
new file mode 100644
index 0000000..60eac97
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/check/compliterals.go2
@@ -0,0 +1,22 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Composite literals with parameterized types
+
+package comp_literals
+
+type myStruct struct {
+ f int
+}
+
+type slice[E any] []E
+
+func struct_literals[S struct{f int}|myStruct]() {
+ _ = S{}
+ _ = S{0}
+ _ = S{f: 0}
+
+ _ = slice[int]{1, 2, 3}
+ _ = slice[S]{{}, {0}, {f:0}}
+}
diff --git a/src/cmd/compile/internal/types2/testdata/check/const0.src b/src/cmd/compile/internal/types2/testdata/check/const0.src
new file mode 100644
index 0000000..229c248
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/check/const0.src
@@ -0,0 +1,382 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// constant declarations
+
+package const0
+
+import "unsafe"
+
+// constants declarations must be initialized by constants
+var x = 0
+const c0 = x /* ERROR "not constant" */
+
+// typed constants must have constant types
+const _ interface /* ERROR invalid constant type */ {} = 0
+
+func _ () {
+ const _ interface /* ERROR invalid constant type */ {} = 0
+ for i := 0; i < 10; i++ {} // don't crash with non-nil iota here
+}
+
+// untyped constants
+const (
+ // boolean values
+ ub0 = false
+ ub1 = true
+ ub2 = 2 < 1
+ ub3 = ui1 == uf1
+ ub4 = true /* ERROR "mismatched types untyped bool and untyped int" */ == 0
+
+ // integer values
+ ui0 = 0
+ ui1 = 1
+ ui2 = 42
+ ui3 = 3141592653589793238462643383279502884197169399375105820974944592307816406286
+ ui4 = -10
+
+ ui5 = ui0 + ui1
+ ui6 = ui1 - ui1
+ ui7 = ui2 * ui1
+ ui8 = ui3 / ui3
+ ui9 = ui3 % ui3
+
+ ui10 = 1 / 0 /* ERROR "division by zero" */
+ ui11 = ui1 / 0 /* ERROR "division by zero" */
+ ui12 = ui3 / ui0 /* ERROR "division by zero" */
+ ui13 = 1 % 0 /* ERROR "division by zero" */
+ ui14 = ui1 % 0 /* ERROR "division by zero" */
+ ui15 = ui3 % ui0 /* ERROR "division by zero" */
+
+ ui16 = ui2 & ui3
+ ui17 = ui2 | ui3
+ ui18 = ui2 ^ ui3
+ ui19 = 1 /* ERROR "invalid operation" */ % 1.0
+
+ // floating point values
+ uf0 = 0.
+ uf1 = 1.
+ uf2 = 4.2e1
+ uf3 = 3.141592653589793238462643383279502884197169399375105820974944592307816406286
+ uf4 = 1e-1
+
+ uf5 = uf0 + uf1
+ uf6 = uf1 - uf1
+ uf7 = uf2 * uf1
+ uf8 = uf3 / uf3
+ uf9 = uf3 /* ERROR "not defined" */ % uf3
+
+ uf10 = 1 / 0 /* ERROR "division by zero" */
+ uf11 = uf1 / 0 /* ERROR "division by zero" */
+ uf12 = uf3 / uf0 /* ERROR "division by zero" */
+
+ uf16 = uf2 /* ERROR "not defined" */ & uf3
+ uf17 = uf2 /* ERROR "not defined" */ | uf3
+ uf18 = uf2 /* ERROR "not defined" */ ^ uf3
+
+ // complex values
+ uc0 = 0.i
+ uc1 = 1.i
+ uc2 = 4.2e1i
+ uc3 = 3.141592653589793238462643383279502884197169399375105820974944592307816406286i
+ uc4 = 1e-1i
+
+ uc5 = uc0 + uc1
+ uc6 = uc1 - uc1
+ uc7 = uc2 * uc1
+ uc8 = uc3 / uc3
+ uc9 = uc3 /* ERROR "not defined" */ % uc3
+
+ uc10 = 1 / 0 /* ERROR "division by zero" */
+ uc11 = uc1 / 0 /* ERROR "division by zero" */
+ uc12 = uc3 / uc0 /* ERROR "division by zero" */
+
+ uc16 = uc2 /* ERROR "not defined" */ & uc3
+ uc17 = uc2 /* ERROR "not defined" */ | uc3
+ uc18 = uc2 /* ERROR "not defined" */ ^ uc3
+)
+
+type (
+ mybool bool
+ myint int
+ myfloat float64
+ mycomplex complex128
+)
+
+// typed constants
+const (
+ // boolean values
+ tb0 bool = false
+ tb1 bool = true
+ tb2 mybool = 2 < 1
+ tb3 mybool = ti1 /* ERROR "mismatched types" */ == tf1
+
+ // integer values
+ ti0 int8 = ui0
+ ti1 int32 = ui1
+ ti2 int64 = ui2
+ ti3 myint = ui3 /* ERROR "overflows" */
+ ti4 myint = ui4
+
+ ti5 = ti0 /* ERROR "mismatched types" */ + ti1
+ ti6 = ti1 - ti1
+ ti7 = ti2 /* ERROR "mismatched types" */ * ti1
+ ti8 = ti3 / ti3
+ ti9 = ti3 % ti3
+
+ ti10 = 1 / 0 /* ERROR "division by zero" */
+ ti11 = ti1 / 0 /* ERROR "division by zero" */
+ ti12 = ti3 /* ERROR "mismatched types" */ / ti0
+ ti13 = 1 % 0 /* ERROR "division by zero" */
+ ti14 = ti1 % 0 /* ERROR "division by zero" */
+ ti15 = ti3 /* ERROR "mismatched types" */ % ti0
+
+ ti16 = ti2 /* ERROR "mismatched types" */ & ti3
+ ti17 = ti2 /* ERROR "mismatched types" */ | ti4
+ ti18 = ti2 ^ ti5 // no mismatched types error because the type of ti5 is unknown
+
+ // floating point values
+ tf0 float32 = 0.
+ tf1 float32 = 1.
+ tf2 float64 = 4.2e1
+ tf3 myfloat = 3.141592653589793238462643383279502884197169399375105820974944592307816406286
+ tf4 myfloat = 1e-1
+
+ tf5 = tf0 + tf1
+ tf6 = tf1 - tf1
+ tf7 = tf2 /* ERROR "mismatched types" */ * tf1
+ tf8 = tf3 / tf3
+ tf9 = tf3 /* ERROR "not defined" */ % tf3
+
+ tf10 = 1 / 0 /* ERROR "division by zero" */
+ tf11 = tf1 / 0 /* ERROR "division by zero" */
+ tf12 = tf3 /* ERROR "mismatched types" */ / tf0
+
+ tf16 = tf2 /* ERROR "mismatched types" */ & tf3
+ tf17 = tf2 /* ERROR "mismatched types" */ | tf3
+ tf18 = tf2 /* ERROR "mismatched types" */ ^ tf3
+
+ // complex values
+ tc0 = 0.i
+ tc1 = 1.i
+ tc2 = 4.2e1i
+ tc3 = 3.141592653589793238462643383279502884197169399375105820974944592307816406286i
+ tc4 = 1e-1i
+
+ tc5 = tc0 + tc1
+ tc6 = tc1 - tc1
+ tc7 = tc2 * tc1
+ tc8 = tc3 / tc3
+ tc9 = tc3 /* ERROR "not defined" */ % tc3
+
+ tc10 = 1 / 0 /* ERROR "division by zero" */
+ tc11 = tc1 / 0 /* ERROR "division by zero" */
+ tc12 = tc3 / tc0 /* ERROR "division by zero" */
+
+ tc16 = tc2 /* ERROR "not defined" */ & tc3
+ tc17 = tc2 /* ERROR "not defined" */ | tc3
+ tc18 = tc2 /* ERROR "not defined" */ ^ tc3
+)
+
+// initialization cycles
+const (
+ a /* ERROR "initialization cycle" */ = a
+ b /* ERROR "initialization cycle" */ , c /* ERROR "initialization cycle" */, d, e = e, d, c, b // TODO(gri) should only have one cycle error
+ f float64 = d
+)
+
+// multiple initialization
+const (
+ a1, a2, a3 = 7, 3.1415926, "foo"
+ b1, b2, b3 = b3, b1, 42
+ c1, c2, c3 /* ERROR "missing init expr for c3" */ = 1, 2
+ d1, d2, d3 = 1, 2, 3, 4 /* ERROR "extra init expr 4" */
+ _p0 = assert(a1 == 7)
+ _p1 = assert(a2 == 3.1415926)
+ _p2 = assert(a3 == "foo")
+ _p3 = assert(b1 == 42)
+ _p4 = assert(b2 == 42)
+ _p5 = assert(b3 == 42)
+)
+
+func _() {
+ const (
+ a1, a2, a3 = 7, 3.1415926, "foo"
+ b1, b2, b3 = b3, b1, 42
+ c1, c2, c3 /* ERROR "missing init expr for c3" */ = 1, 2
+ d1, d2, d3 = 1, 2, 3, 4 /* ERROR "extra init expr 4" */
+ _p0 = assert(a1 == 7)
+ _p1 = assert(a2 == 3.1415926)
+ _p2 = assert(a3 == "foo")
+ _p3 = assert(b1 == 42)
+ _p4 = assert(b2 == 42)
+ _p5 = assert(b3 == 42)
+ )
+}
+
+// iota
+const (
+ iota0 = iota
+ iota1 = iota
+ iota2 = iota*2
+ _a0 = assert(iota0 == 0)
+ _a1 = assert(iota1 == 1)
+ _a2 = assert(iota2 == 4)
+ iota6 = iota*3
+
+ iota7
+ iota8
+ _a3 = assert(iota7 == 21)
+ _a4 = assert(iota8 == 24)
+)
+
+const (
+ _b0 = iota
+ _b1 = assert(iota + iota2 == 5)
+ _b2 = len([iota]int{}) // iota may appear in a type!
+ _b3 = assert(_b2 == 2)
+ _b4 = len(A{})
+)
+
+type A [iota /* ERROR "cannot use iota" */ ]int
+
+// constant expressions with operands across different
+// constant declarations must use the right iota values
+const (
+ _c0 = iota
+ _c1
+ _c2
+ _x = _c2 + _d1 + _e0 // 3
+)
+
+const (
+ _d0 = iota
+ _d1
+)
+
+const (
+ _e0 = iota
+)
+
+var _ = assert(_x == 3)
+
+// special cases
+const (
+ _n0 = nil /* ERROR "not constant" */
+ _n1 = [ /* ERROR "not constant" */ ]int{}
+)
+
+// iotas must not be usable in expressions outside constant declarations
+type _ [iota /* ERROR "iota outside constant decl" */ ]byte
+var _ = iota /* ERROR "iota outside constant decl" */
+func _() {
+ _ = iota /* ERROR "iota outside constant decl" */
+ const _ = iota
+ _ = iota /* ERROR "iota outside constant decl" */
+}
+
+func _() {
+ iota := 123
+ const x = iota /* ERROR "is not constant" */
+ var y = iota
+ _ = y
+}
+
+// iotas are usable inside closures in constant declarations (#22345)
+const (
+ _ = iota
+ _ = len([iota]byte{})
+ _ = unsafe.Sizeof(iota)
+ _ = unsafe.Sizeof(func() { _ = iota })
+ _ = unsafe.Sizeof(func() { var _ = iota })
+ _ = unsafe.Sizeof(func() { const _ = iota })
+ _ = unsafe.Sizeof(func() { type _ [iota]byte })
+ _ = unsafe.Sizeof(func() { func() int { return iota }() })
+)
+
+// verify inner and outer const declarations have distinct iotas
+const (
+ zero = iota
+ one = iota
+ _ = unsafe.Sizeof(func() {
+ var x [iota]int // [2]int
+ const (
+ Zero = iota
+ One
+ Two
+ _ = unsafe.Sizeof([iota-1]int{} == x) // assert types are equal
+ _ = unsafe.Sizeof([Two]int{} == x) // assert types are equal
+ )
+ var z [iota]int // [2]int
+ _ = unsafe.Sizeof([2]int{} == z) // assert types are equal
+ })
+ three = iota // the sequence continues
+)
+var _ [three]int = [3]int{} // assert 'three' has correct value
+
+var (
+ _ = iota /* ERROR "iota outside constant decl" */
+ _ = unsafe.Sizeof(iota /* ERROR "iota outside constant decl" */ )
+ _ = unsafe.Sizeof(func() { _ = iota /* ERROR "iota outside constant decl" */ })
+ _ = unsafe.Sizeof(func() { var _ = iota /* ERROR "iota outside constant decl" */ })
+ _ = unsafe.Sizeof(func() { type _ [iota /* ERROR "iota outside constant decl" */ ]byte })
+ _ = unsafe.Sizeof(func() { func() int { return iota /* ERROR "iota outside constant decl" */ }() })
+)
+
+// constant arithmetic precision and rounding must lead to expected (integer) results
+var _ = []int64{
+ 0.0005 * 1e9,
+ 0.001 * 1e9,
+ 0.005 * 1e9,
+ 0.01 * 1e9,
+ 0.05 * 1e9,
+ 0.1 * 1e9,
+ 0.5 * 1e9,
+ 1 * 1e9,
+ 5 * 1e9,
+}
+
+const _ = unsafe.Sizeof(func() {
+ const _ = 0
+ _ = iota
+
+ const (
+ zero = iota
+ one
+ )
+ assert(one == 1)
+ assert(iota == 0)
+})
+
+// issue #52438
+const i1 = iota
+const i2 = iota
+const i3 = iota
+
+func _() {
+ assert(i1 == 0)
+ assert(i2 == 0)
+ assert(i3 == 0)
+
+ const i4 = iota
+ const i5 = iota
+ const i6 = iota
+
+ assert(i4 == 0)
+ assert(i5 == 0)
+ assert(i6 == 0)
+}
+
+// untyped constants must not get arbitrarily large
+const prec = 512 // internal maximum precision for integers
+const maxInt = (1<<(prec/2) - 1) * (1<<(prec/2) + 1) // == 1<<prec - 1
+
+const _ = maxInt + /* ERROR constant addition overflow */ 1
+const _ = -maxInt - /* ERROR constant subtraction overflow */ 1
+const _ = maxInt ^ /* ERROR constant bitwise XOR overflow */ -1
+const _ = maxInt * /* ERROR constant multiplication overflow */ 2
+const _ = maxInt << /* ERROR constant shift overflow */ 2
+const _ = 1 << /* ERROR constant shift overflow */ prec
+
+const _ = ^ /* ERROR constant bitwise complement overflow */ maxInt
diff --git a/src/cmd/compile/internal/types2/testdata/check/const1.src b/src/cmd/compile/internal/types2/testdata/check/const1.src
new file mode 100644
index 0000000..c912801
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/check/const1.src
@@ -0,0 +1,334 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// constant conversions
+
+package const1
+
+import "math"
+
+const(
+ mi = ^int(0)
+ mu = ^uint(0)
+ mp = ^uintptr(0)
+
+ logSizeofInt = uint(mi>>8&1 + mi>>16&1 + mi>>32&1)
+ logSizeofUint = uint(mu>>8&1 + mu>>16&1 + mu>>32&1)
+ logSizeofUintptr = uint(mp>>8&1 + mp>>16&1 + mp>>32&1)
+)
+
+const (
+ minInt8 = -1<<(8<<iota - 1)
+ minInt16
+ minInt32
+ minInt64
+ minInt = -1<<(8<<logSizeofInt - 1)
+)
+
+const (
+ maxInt8 = 1<<(8<<iota - 1) - 1
+ maxInt16
+ maxInt32
+ maxInt64
+ maxInt = 1<<(8<<logSizeofInt - 1) - 1
+)
+
+const (
+ maxUint8 = 1<<(8<<iota) - 1
+ maxUint16
+ maxUint32
+ maxUint64
+ maxUint = 1<<(8<<logSizeofUint) - 1
+ maxUintptr = 1<<(8<<logSizeofUintptr) - 1
+)
+
+const (
+ smallestFloat32 = 1.0 / (1<<(127 - 1 + 23))
+ // TODO(gri) The compiler limits integers to 512 bit and thus
+ // we cannot compute the value (1<<(1023 - 1 + 52))
+ // without overflow. For now we match the compiler.
+ // See also issue #44057.
+ // smallestFloat64 = 1.0 / (1<<(1023 - 1 + 52))
+ smallestFloat64 = math.SmallestNonzeroFloat64
+)
+
+const (
+ _ = assert(smallestFloat32 > 0)
+ _ = assert(smallestFloat64 > 0)
+)
+
+const (
+ maxFloat32 = 1<<127 * (1<<24 - 1) / (1.0<<23)
+ // TODO(gri) The compiler limits integers to 512 bit and thus
+ // we cannot compute the value 1<<1023
+ // without overflow. For now we match the compiler.
+ // See also issue #44057.
+ // maxFloat64 = 1<<1023 * (1<<53 - 1) / (1.0<<52)
+ maxFloat64 = math.MaxFloat64
+)
+
+const (
+ _ int8 = minInt8 /* ERROR "overflows" */ - 1
+ _ int8 = minInt8
+ _ int8 = maxInt8
+ _ int8 = maxInt8 /* ERROR "overflows" */ + 1
+ _ int8 = smallestFloat64 /* ERROR "truncated" */
+
+ _ = int8(minInt8 /* ERROR "cannot convert" */ - 1)
+ _ = int8(minInt8)
+ _ = int8(maxInt8)
+ _ = int8(maxInt8 /* ERROR "cannot convert" */ + 1)
+ _ = int8(smallestFloat64 /* ERROR "cannot convert" */)
+)
+
+const (
+ _ int16 = minInt16 /* ERROR "overflows" */ - 1
+ _ int16 = minInt16
+ _ int16 = maxInt16
+ _ int16 = maxInt16 /* ERROR "overflows" */ + 1
+ _ int16 = smallestFloat64 /* ERROR "truncated" */
+
+ _ = int16(minInt16 /* ERROR "cannot convert" */ - 1)
+ _ = int16(minInt16)
+ _ = int16(maxInt16)
+ _ = int16(maxInt16 /* ERROR "cannot convert" */ + 1)
+ _ = int16(smallestFloat64 /* ERROR "cannot convert" */)
+)
+
+const (
+ _ int32 = minInt32 /* ERROR "overflows" */ - 1
+ _ int32 = minInt32
+ _ int32 = maxInt32
+ _ int32 = maxInt32 /* ERROR "overflows" */ + 1
+ _ int32 = smallestFloat64 /* ERROR "truncated" */
+
+ _ = int32(minInt32 /* ERROR "cannot convert" */ - 1)
+ _ = int32(minInt32)
+ _ = int32(maxInt32)
+ _ = int32(maxInt32 /* ERROR "cannot convert" */ + 1)
+ _ = int32(smallestFloat64 /* ERROR "cannot convert" */)
+)
+
+const (
+ _ int64 = minInt64 /* ERROR "overflows" */ - 1
+ _ int64 = minInt64
+ _ int64 = maxInt64
+ _ int64 = maxInt64 /* ERROR "overflows" */ + 1
+ _ int64 = smallestFloat64 /* ERROR "truncated" */
+
+ _ = int64(minInt64 /* ERROR "cannot convert" */ - 1)
+ _ = int64(minInt64)
+ _ = int64(maxInt64)
+ _ = int64(maxInt64 /* ERROR "cannot convert" */ + 1)
+ _ = int64(smallestFloat64 /* ERROR "cannot convert" */)
+)
+
+const (
+ _ int = minInt /* ERROR "overflows" */ - 1
+ _ int = minInt
+ _ int = maxInt
+ _ int = maxInt /* ERROR "overflows" */ + 1
+ _ int = smallestFloat64 /* ERROR "truncated" */
+
+ _ = int(minInt /* ERROR "cannot convert" */ - 1)
+ _ = int(minInt)
+ _ = int(maxInt)
+ _ = int(maxInt /* ERROR "cannot convert" */ + 1)
+ _ = int(smallestFloat64 /* ERROR "cannot convert" */)
+)
+
+const (
+ _ uint8 = 0 /* ERROR "overflows" */ - 1
+ _ uint8 = 0
+ _ uint8 = maxUint8
+ _ uint8 = maxUint8 /* ERROR "overflows" */ + 1
+ _ uint8 = smallestFloat64 /* ERROR "truncated" */
+
+ _ = uint8(0 /* ERROR "cannot convert" */ - 1)
+ _ = uint8(0)
+ _ = uint8(maxUint8)
+ _ = uint8(maxUint8 /* ERROR "cannot convert" */ + 1)
+ _ = uint8(smallestFloat64 /* ERROR "cannot convert" */)
+)
+
+const (
+ _ uint16 = 0 /* ERROR "overflows" */ - 1
+ _ uint16 = 0
+ _ uint16 = maxUint16
+ _ uint16 = maxUint16 /* ERROR "overflows" */ + 1
+ _ uint16 = smallestFloat64 /* ERROR "truncated" */
+
+ _ = uint16(0 /* ERROR "cannot convert" */ - 1)
+ _ = uint16(0)
+ _ = uint16(maxUint16)
+ _ = uint16(maxUint16 /* ERROR "cannot convert" */ + 1)
+ _ = uint16(smallestFloat64 /* ERROR "cannot convert" */)
+)
+
+const (
+ _ uint32 = 0 /* ERROR "overflows" */ - 1
+ _ uint32 = 0
+ _ uint32 = maxUint32
+ _ uint32 = maxUint32 /* ERROR "overflows" */ + 1
+ _ uint32 = smallestFloat64 /* ERROR "truncated" */
+
+ _ = uint32(0 /* ERROR "cannot convert" */ - 1)
+ _ = uint32(0)
+ _ = uint32(maxUint32)
+ _ = uint32(maxUint32 /* ERROR "cannot convert" */ + 1)
+ _ = uint32(smallestFloat64 /* ERROR "cannot convert" */)
+)
+
+const (
+ _ uint64 = 0 /* ERROR "overflows" */ - 1
+ _ uint64 = 0
+ _ uint64 = maxUint64
+ _ uint64 = maxUint64 /* ERROR "overflows" */ + 1
+ _ uint64 = smallestFloat64 /* ERROR "truncated" */
+
+ _ = uint64(0 /* ERROR "cannot convert" */ - 1)
+ _ = uint64(0)
+ _ = uint64(maxUint64)
+ _ = uint64(maxUint64 /* ERROR "cannot convert" */ + 1)
+ _ = uint64(smallestFloat64 /* ERROR "cannot convert" */)
+)
+
+const (
+ _ uint = 0 /* ERROR "overflows" */ - 1
+ _ uint = 0
+ _ uint = maxUint
+ _ uint = maxUint /* ERROR "overflows" */ + 1
+ _ uint = smallestFloat64 /* ERROR "truncated" */
+
+ _ = uint(0 /* ERROR "cannot convert" */ - 1)
+ _ = uint(0)
+ _ = uint(maxUint)
+ _ = uint(maxUint /* ERROR "cannot convert" */ + 1)
+ _ = uint(smallestFloat64 /* ERROR "cannot convert" */)
+)
+
+const (
+ _ uintptr = 0 /* ERROR "overflows" */ - 1
+ _ uintptr = 0
+ _ uintptr = maxUintptr
+ _ uintptr = maxUintptr /* ERROR "overflows" */ + 1
+ _ uintptr = smallestFloat64 /* ERROR "truncated" */
+
+ _ = uintptr(0 /* ERROR "cannot convert" */ - 1)
+ _ = uintptr(0)
+ _ = uintptr(maxUintptr)
+ _ = uintptr(maxUintptr /* ERROR "cannot convert" */ + 1)
+ _ = uintptr(smallestFloat64 /* ERROR "cannot convert" */)
+)
+
+const (
+ _ float32 = minInt64
+ _ float64 = minInt64
+ _ complex64 = minInt64
+ _ complex128 = minInt64
+
+ _ = float32(minInt64)
+ _ = float64(minInt64)
+ _ = complex64(minInt64)
+ _ = complex128(minInt64)
+)
+
+const (
+ _ float32 = maxUint64
+ _ float64 = maxUint64
+ _ complex64 = maxUint64
+ _ complex128 = maxUint64
+
+ _ = float32(maxUint64)
+ _ = float64(maxUint64)
+ _ = complex64(maxUint64)
+ _ = complex128(maxUint64)
+)
+
+// TODO(gri) find smaller deltas below
+
+const delta32 = maxFloat32/(1 << 23)
+
+const (
+ _ float32 = - /* ERROR "overflow" */ (maxFloat32 + delta32)
+ _ float32 = -maxFloat32
+ _ float32 = maxFloat32
+ _ float32 = maxFloat32 /* ERROR "overflow" */ + delta32
+
+ _ = float32(- /* ERROR "cannot convert" */ (maxFloat32 + delta32))
+ _ = float32(-maxFloat32)
+ _ = float32(maxFloat32)
+ _ = float32(maxFloat32 /* ERROR "cannot convert" */ + delta32)
+
+ _ = assert(float32(smallestFloat32) == smallestFloat32)
+ _ = assert(float32(smallestFloat32/2) == 0)
+ _ = assert(float32(smallestFloat64) == 0)
+ _ = assert(float32(smallestFloat64/2) == 0)
+)
+
+const delta64 = maxFloat64/(1 << 52)
+
+const (
+ _ float64 = - /* ERROR "overflow" */ (maxFloat64 + delta64)
+ _ float64 = -maxFloat64
+ _ float64 = maxFloat64
+ _ float64 = maxFloat64 /* ERROR "overflow" */ + delta64
+
+ _ = float64(- /* ERROR "cannot convert" */ (maxFloat64 + delta64))
+ _ = float64(-maxFloat64)
+ _ = float64(maxFloat64)
+ _ = float64(maxFloat64 /* ERROR "cannot convert" */ + delta64)
+
+ _ = assert(float64(smallestFloat32) == smallestFloat32)
+ _ = assert(float64(smallestFloat32/2) == smallestFloat32/2)
+ _ = assert(float64(smallestFloat64) == smallestFloat64)
+ _ = assert(float64(smallestFloat64/2) == 0)
+)
+
+const (
+ _ complex64 = - /* ERROR "overflow" */ (maxFloat32 + delta32)
+ _ complex64 = -maxFloat32
+ _ complex64 = maxFloat32
+ _ complex64 = maxFloat32 /* ERROR "overflow" */ + delta32
+
+ _ = complex64(- /* ERROR "cannot convert" */ (maxFloat32 + delta32))
+ _ = complex64(-maxFloat32)
+ _ = complex64(maxFloat32)
+ _ = complex64(maxFloat32 /* ERROR "cannot convert" */ + delta32)
+)
+
+const (
+ _ complex128 = - /* ERROR "overflow" */ (maxFloat64 + delta64)
+ _ complex128 = -maxFloat64
+ _ complex128 = maxFloat64
+ _ complex128 = maxFloat64 /* ERROR "overflow" */ + delta64
+
+ _ = complex128(- /* ERROR "cannot convert" */ (maxFloat64 + delta64))
+ _ = complex128(-maxFloat64)
+ _ = complex128(maxFloat64)
+ _ = complex128(maxFloat64 /* ERROR "cannot convert" */ + delta64)
+)
+
+// Initialization of typed constant and conversion are the same:
+const (
+ f32 = 1 + smallestFloat32
+ x32 float32 = f32
+ y32 = float32(f32)
+ _ = assert(x32 - y32 == 0)
+)
+
+const (
+ f64 = 1 + smallestFloat64
+ x64 float64 = f64
+ y64 = float64(f64)
+ _ = assert(x64 - y64 == 0)
+)
+
+const (
+ _ = int8(-1) << 7
+ _ = int8 /* ERROR "overflows" */ (-1) << 8
+
+ _ = uint32(1) << 31
+ _ = uint32 /* ERROR "overflows" */ (1) << 32
+)
diff --git a/src/cmd/compile/internal/types2/testdata/check/constdecl.src b/src/cmd/compile/internal/types2/testdata/check/constdecl.src
new file mode 100644
index 0000000..cb155ab
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/check/constdecl.src
@@ -0,0 +1,138 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package constdecl
+
+import "math"
+import "unsafe"
+
+var v int
+
+// Const decls must be initialized by constants.
+const _ = v /* ERROR "not constant" */
+const _ = math /* ERROR "not constant" */ .Sin(0)
+const _ = int /* ERROR "not an expression" */
+
+func _() {
+ const _ = v /* ERROR "not constant" */
+ const _ = math /* ERROR "not constant" */ .Sin(0)
+ const _ = int /* ERROR "not an expression" */
+}
+
+// Identifier and expression arity must match.
+const _ /* ERROR "missing init expr for _" */
+const _ = 1, 2 /* ERROR "extra init expr 2" */
+
+const _ /* ERROR "missing init expr for _" */ int
+const _ int = 1, 2 /* ERROR "extra init expr 2" */
+
+const (
+ _ /* ERROR "missing init expr for _" */
+ _ = 1, 2 /* ERROR "extra init expr 2" */
+
+ _ /* ERROR "missing init expr for _" */ int
+ _ int = 1, 2 /* ERROR "extra init expr 2" */
+)
+
+const (
+ _ = 1
+ _
+ _, _ /* ERROR "missing init expr for _" */
+ _
+)
+
+const (
+ _, _ = 1, 2
+ _, _
+ _ /* ERROR "extra init expr at" */
+ _, _
+ _, _, _ /* ERROR "missing init expr for _" */
+ _, _
+)
+
+func _() {
+ const _ /* ERROR "missing init expr for _" */
+ const _ = 1, 2 /* ERROR "extra init expr 2" */
+
+ const _ /* ERROR "missing init expr for _" */ int
+ const _ int = 1, 2 /* ERROR "extra init expr 2" */
+
+ const (
+ _ /* ERROR "missing init expr for _" */
+ _ = 1, 2 /* ERROR "extra init expr 2" */
+
+ _ /* ERROR "missing init expr for _" */ int
+ _ int = 1, 2 /* ERROR "extra init expr 2" */
+ )
+
+ const (
+ _ = 1
+ _
+ _, _ /* ERROR "missing init expr for _" */
+ _
+ )
+
+ const (
+ _, _ = 1, 2
+ _, _
+ _ /* ERROR "extra init expr at" */
+ _, _
+ _, _, _ /* ERROR "missing init expr for _" */
+ _, _
+ )
+}
+
+// Test case for constant with invalid initialization.
+// Caused panic because the constant value was not set up (gri - 7/8/2014).
+func _() {
+ const (
+ x string = missing /* ERROR "undeclared name" */
+ y = x + ""
+ )
+}
+
+// Test case for constants depending on function literals (see also #22992).
+const A /* ERROR initialization cycle */ = unsafe.Sizeof(func() { _ = A })
+
+func _() {
+ // The function literal below must not see a.
+ const a = unsafe.Sizeof(func() { _ = a /* ERROR "undeclared name" */ })
+ const b = unsafe.Sizeof(func() { _ = a })
+
+ // The function literal below must not see x, y, or z.
+ const x, y, z = 0, 1, unsafe.Sizeof(func() { _ = x /* ERROR "undeclared name" */ + y /* ERROR "undeclared name" */ + z /* ERROR "undeclared name" */ })
+}
+
+// Test cases for errors in inherited constant initialization expressions.
+// Errors related to inherited initialization expressions must appear at
+// the constant identifier being declared, not at the original expression
+// (issues #42991, #42992).
+const (
+ _ byte = 255 + iota
+ /* some gap */
+ _ // ERROR overflows
+ /* some gap */
+ /* some gap */ _ /* ERROR overflows */; _ /* ERROR overflows */
+ /* some gap */
+ _ = 255 + iota
+ _ = byte /* ERROR overflows */ (255) + iota
+ _ /* ERROR overflows */
+)
+
+// Test cases from issue.
+const (
+ ok = byte(iota + 253)
+ bad
+ barn
+ bard // ERROR cannot convert
+)
+
+const (
+ c = len([1 - iota]int{})
+ d
+ e // ERROR invalid array length
+ f // ERROR invalid array length
+)
+
+// TODO(gri) move extra tests from testdata/const0.src into here
diff --git a/src/cmd/compile/internal/types2/testdata/check/conversions.src b/src/cmd/compile/internal/types2/testdata/check/conversions.src
new file mode 100644
index 0000000..e1336c0
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/check/conversions.src
@@ -0,0 +1,93 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// conversions
+
+package conversions
+
+import "unsafe"
+
+// argument count
+var (
+ _ = int() /* ERROR "missing argument" */
+ _ = int(1, 2 /* ERROR "too many arguments" */ )
+)
+
+// numeric constant conversions are in const1.src.
+
+func string_conversions() {
+ const A = string(65)
+ assert(A == "A")
+ const E = string(-1)
+ assert(E == "\uFFFD")
+ assert(E == string(1234567890))
+
+ type myint int
+ assert(A == string(myint(65)))
+
+ type mystring string
+ const _ mystring = mystring("foo")
+
+ const _ = string(true /* ERROR "cannot convert" */ )
+ const _ = string(1.2 /* ERROR "cannot convert" */ )
+ const _ = string(nil /* ERROR "cannot convert" */ )
+
+ // issues 11357, 11353: argument must be of integer type
+ _ = string(0.0 /* ERROR "cannot convert" */ )
+ _ = string(0i /* ERROR "cannot convert" */ )
+ _ = string(1 /* ERROR "cannot convert" */ + 2i)
+}
+
+func interface_conversions() {
+ type E interface{}
+
+ type I1 interface{
+ m1()
+ }
+
+ type I2 interface{
+ m1()
+ m2(x int)
+ }
+
+ type I3 interface{
+ m1()
+ m2() int
+ }
+
+ var e E
+ var i1 I1
+ var i2 I2
+ var i3 I3
+
+ _ = E(0)
+ _ = E(nil)
+ _ = E(e)
+ _ = E(i1)
+ _ = E(i2)
+
+ _ = I1(0 /* ERROR "cannot convert" */ )
+ _ = I1(nil)
+ _ = I1(i1)
+ _ = I1(e /* ERROR "cannot convert" */ )
+ _ = I1(i2)
+
+ _ = I2(nil)
+ _ = I2(i1 /* ERROR "cannot convert" */ )
+ _ = I2(i2)
+ _ = I2(i3 /* ERROR "cannot convert" */ )
+
+ _ = I3(nil)
+ _ = I3(i1 /* ERROR "cannot convert" */ )
+ _ = I3(i2 /* ERROR "cannot convert" */ )
+ _ = I3(i3)
+
+ // TODO(gri) add more tests, improve error message
+}
+
+func issue6326() {
+ type T unsafe.Pointer
+ var x T
+ _ = uintptr(x) // see issue 6326
+}
diff --git a/src/cmd/compile/internal/types2/testdata/check/conversions2.src b/src/cmd/compile/internal/types2/testdata/check/conversions2.src
new file mode 100644
index 0000000..93a5f18
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/check/conversions2.src
@@ -0,0 +1,313 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Test various valid and invalid struct assignments and conversions.
+// Does not compile.
+
+package conversions2
+
+type I interface {
+ m()
+}
+
+// conversions between structs
+
+func _() {
+ type S struct{}
+ type T struct{}
+ var s S
+ var t T
+ var u struct{}
+ s = s
+ s = t // ERROR "cannot use .* in assignment"
+ s = u
+ s = S(s)
+ s = S(t)
+ s = S(u)
+ t = u
+ t = T(u)
+}
+
+func _() {
+ type S struct{ x int }
+ type T struct {
+ x int "foo"
+ }
+ var s S
+ var t T
+ var u struct {
+ x int "bar"
+ }
+ s = s
+ s = t // ERROR "cannot use .* in assignment"
+ s = u // ERROR "cannot use .* in assignment"
+ s = S(s)
+ s = S(t)
+ s = S(u)
+ t = u // ERROR "cannot use .* in assignment"
+ t = T(u)
+}
+
+func _() {
+ type E struct{ x int }
+ type S struct{ x E }
+ type T struct {
+ x E "foo"
+ }
+ var s S
+ var t T
+ var u struct {
+ x E "bar"
+ }
+ s = s
+ s = t // ERROR "cannot use .* in assignment"
+ s = u // ERROR "cannot use .* in assignment"
+ s = S(s)
+ s = S(t)
+ s = S(u)
+ t = u // ERROR "cannot use .* in assignment"
+ t = T(u)
+}
+
+func _() {
+ type S struct {
+ x struct {
+ x int "foo"
+ }
+ }
+ type T struct {
+ x struct {
+ x int "bar"
+ } "foo"
+ }
+ var s S
+ var t T
+ var u struct {
+ x struct {
+ x int "bar"
+ } "bar"
+ }
+ s = s
+ s = t // ERROR "cannot use .* in assignment"
+ s = u // ERROR "cannot use .* in assignment"
+ s = S(s)
+ s = S(t)
+ s = S(u)
+ t = u // ERROR "cannot use .* in assignment"
+ t = T(u)
+}
+
+func _() {
+ type E1 struct {
+ x int "foo"
+ }
+ type E2 struct {
+ x int "bar"
+ }
+ type S struct{ x E1 }
+ type T struct {
+ x E2 "foo"
+ }
+ var s S
+ var t T
+ var u struct {
+ x E2 "bar"
+ }
+ s = s
+ s = t // ERROR "cannot use .* in assignment"
+ s = u // ERROR "cannot use .* in assignment"
+ s = S(s)
+ s = S(t /* ERROR "cannot convert" */ )
+ s = S(u /* ERROR "cannot convert" */ )
+ t = u // ERROR "cannot use .* in assignment"
+ t = T(u)
+}
+
+func _() {
+ type E struct{ x int }
+ type S struct {
+ f func(struct {
+ x int "foo"
+ })
+ }
+ type T struct {
+ f func(struct {
+ x int "bar"
+ })
+ }
+ var s S
+ var t T
+ var u struct{ f func(E) }
+ s = s
+ s = t // ERROR "cannot use .* in assignment"
+ s = u // ERROR "cannot use .* in assignment"
+ s = S(s)
+ s = S(t)
+ s = S(u /* ERROR "cannot convert" */ )
+ t = u // ERROR "cannot use .* in assignment"
+ t = T(u /* ERROR "cannot convert" */ )
+}
+
+// conversions between pointers to structs
+
+func _() {
+ type S struct{}
+ type T struct{}
+ var s *S
+ var t *T
+ var u *struct{}
+ s = s
+ s = t // ERROR "cannot use .* in assignment"
+ s = u // ERROR "cannot use .* in assignment"
+ s = (*S)(s)
+ s = (*S)(t)
+ s = (*S)(u)
+ t = u // ERROR "cannot use .* in assignment"
+ t = (*T)(u)
+}
+
+func _() {
+ type S struct{ x int }
+ type T struct {
+ x int "foo"
+ }
+ var s *S
+ var t *T
+ var u *struct {
+ x int "bar"
+ }
+ s = s
+ s = t // ERROR "cannot use .* in assignment"
+ s = u // ERROR "cannot use .* in assignment"
+ s = (*S)(s)
+ s = (*S)(t)
+ s = (*S)(u)
+ t = u // ERROR "cannot use .* in assignment"
+ t = (*T)(u)
+}
+
+func _() {
+ type E struct{ x int }
+ type S struct{ x E }
+ type T struct {
+ x E "foo"
+ }
+ var s *S
+ var t *T
+ var u *struct {
+ x E "bar"
+ }
+ s = s
+ s = t // ERROR "cannot use .* in assignment"
+ s = u // ERROR "cannot use .* in assignment"
+ s = (*S)(s)
+ s = (*S)(t)
+ s = (*S)(u)
+ t = u // ERROR "cannot use .* in assignment"
+ t = (*T)(u)
+}
+
+func _() {
+ type S struct {
+ x struct {
+ x int "foo"
+ }
+ }
+ type T struct {
+ x struct {
+ x int "bar"
+ } "foo"
+ }
+ var s *S
+ var t *T
+ var u *struct {
+ x struct {
+ x int "bar"
+ } "bar"
+ }
+ s = s
+ s = t // ERROR "cannot use .* in assignment"
+ s = u // ERROR "cannot use .* in assignment"
+ s = (*S)(s)
+ s = (*S)(t)
+ s = (*S)(u)
+ t = u // ERROR "cannot use .* in assignment"
+ t = (*T)(u)
+}
+
+func _() {
+ type E1 struct {
+ x int "foo"
+ }
+ type E2 struct {
+ x int "bar"
+ }
+ type S struct{ x E1 }
+ type T struct {
+ x E2 "foo"
+ }
+ var s *S
+ var t *T
+ var u *struct {
+ x E2 "bar"
+ }
+ s = s
+ s = t // ERROR "cannot use .* in assignment"
+ s = u // ERROR "cannot use .* in assignment"
+ s = (*S)(s)
+ s = (*S)(t /* ERROR "cannot convert" */ )
+ s = (*S)(u /* ERROR "cannot convert" */ )
+ t = u // ERROR "cannot use .* in assignment"
+ t = (*T)(u)
+}
+
+func _() {
+ type E struct{ x int }
+ type S struct {
+ f func(struct {
+ x int "foo"
+ })
+ }
+ type T struct {
+ f func(struct {
+ x int "bar"
+ })
+ }
+ var s *S
+ var t *T
+ var u *struct{ f func(E) }
+ s = s
+ s = t // ERROR "cannot use .* in assignment"
+ s = u // ERROR "cannot use .* in assignment"
+ s = (*S)(s)
+ s = (*S)(t)
+ s = (*S)(u /* ERROR "cannot convert" */ )
+ t = u // ERROR "cannot use .* in assignment"
+ t = (*T)(u /* ERROR "cannot convert" */ )
+}
+
+func _() {
+ type E struct{ x int }
+ type S struct {
+ f func(*struct {
+ x int "foo"
+ })
+ }
+ type T struct {
+ f func(*struct {
+ x int "bar"
+ })
+ }
+ var s *S
+ var t *T
+ var u *struct{ f func(E) }
+ s = s
+ s = t // ERROR "cannot use .* in assignment"
+ s = u // ERROR "cannot use .* in assignment"
+ s = (*S)(s)
+ s = (*S)(t)
+ s = (*S)(u /* ERROR "cannot convert" */ )
+ t = u // ERROR "cannot use .* in assignment"
+ t = (*T)(u /* ERROR "cannot convert" */ )
+}
diff --git a/src/cmd/compile/internal/types2/testdata/check/cycles.src b/src/cmd/compile/internal/types2/testdata/check/cycles.src
new file mode 100644
index 0000000..998f9f7
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/check/cycles.src
@@ -0,0 +1,175 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package cycles
+
+import "unsafe"
+
+type (
+ T0 int
+ T1 /* ERROR cycle */ T1
+ T2 *T2
+
+ T3 /* ERROR cycle */ T4
+ T4 T5
+ T5 T3
+
+ T6 T7
+ T7 *T8
+ T8 T6
+
+ // arrays
+ A0 /* ERROR cycle */ [10]A0
+ A1 [10]*A1
+
+ A2 /* ERROR cycle */ [10]A3
+ A3 [10]A4
+ A4 A2
+
+ A5 [10]A6
+ A6 *A5
+
+ // slices
+ L0 []L0
+
+ // structs
+ S0 /* ERROR cycle */ struct{ _ S0 }
+ S1 /* ERROR cycle */ struct{ S1 }
+ S2 struct{ _ *S2 }
+ S3 struct{ *S3 }
+
+ S4 /* ERROR cycle */ struct{ S5 }
+ S5 struct{ S6 }
+ S6 S4
+
+ // pointers
+ P0 *P0
+ PP *struct{ PP.f /* ERROR no field or method f */ }
+
+ // functions
+ F0 func(F0)
+ F1 func() F1
+ F2 func(F2) F2
+
+ // interfaces
+ I0 /* ERROR cycle */ interface{ I0 }
+
+ I1 /* ERROR cycle */ interface{ I2 }
+ I2 interface{ I3 }
+ I3 interface{ I1 }
+
+ I4 interface{ f(I4) }
+
+ // testcase for issue 5090
+ I5 interface{ f(I6) }
+ I6 interface{ I5 }
+
+ // maps
+ M0 map[M0 /* ERROR invalid map key */ ]M0
+
+ // channels
+ C0 chan C0
+)
+
+// test case for issue #34771
+type (
+ AA /* ERROR cycle */ B
+ B C
+ C [10]D
+ D E
+ E AA
+)
+
+func _() {
+ type (
+ t1 /* ERROR cycle */ t1
+ t2 *t2
+
+ t3 t4 /* ERROR undeclared */
+ t4 t5 /* ERROR undeclared */
+ t5 t3
+
+ // arrays
+ a0 /* ERROR cycle */ [10]a0
+ a1 [10]*a1
+
+ // slices
+ l0 []l0
+
+ // structs
+ s0 /* ERROR cycle */ struct{ _ s0 }
+ s1 /* ERROR cycle */ struct{ s1 }
+ s2 struct{ _ *s2 }
+ s3 struct{ *s3 }
+
+ // pointers
+ p0 *p0
+
+ // functions
+ f0 func(f0)
+ f1 func() f1
+ f2 func(f2) f2
+
+ // interfaces
+ i0 /* ERROR cycle */ interface{ i0 }
+
+ // maps
+ m0 map[m0 /* ERROR invalid map key */ ]m0
+
+ // channels
+ c0 chan c0
+ )
+}
+
+// test cases for issue 6667
+
+type A [10]map[A /* ERROR invalid map key */ ]bool
+
+type S struct {
+ m map[S /* ERROR invalid map key */ ]bool
+}
+
+// test cases for issue 7236
+// (cycle detection must not be dependent on starting point of resolution)
+
+type (
+ P1 *T9
+ T9 /* ERROR cycle */ T9
+
+ T10 /* ERROR cycle */ T10
+ P2 *T10
+)
+
+func (T11) m() {}
+
+type T11 /* ERROR cycle */ struct{ T11 }
+
+type T12 /* ERROR cycle */ struct{ T12 }
+
+func (*T12) m() {}
+
+type (
+ P3 *T13
+ T13 /* ERROR cycle */ T13
+)
+
+// test cases for issue 18643
+// (type cycle detection when non-type expressions are involved)
+type (
+ T14 [len(T14 /* ERROR cycle */ {})]int
+ T15 [][len(T15 /* ERROR cycle */ {})]int
+ T16 map[[len(T16 /* ERROR cycle */ {1:2})]int]int
+ T17 map[int][len(T17 /* ERROR cycle */ {1:2})]int
+)
+
+// Test case for types depending on function literals (see also #22992).
+type T20 chan [unsafe.Sizeof(func(ch T20){ _ = <-ch })]byte
+type T22 = chan [unsafe.Sizeof(func(ch T20){ _ = <-ch })]byte
+
+func _() {
+ type T0 func(T0)
+ type T1 /* ERROR cycle */ = func(T1)
+ type T2 chan [unsafe.Sizeof(func(ch T2){ _ = <-ch })]byte
+ type T3 /* ERROR cycle */ = chan [unsafe.Sizeof(func(ch T3){ _ = <-ch })]byte
+}
diff --git a/src/cmd/compile/internal/types2/testdata/check/cycles1.src b/src/cmd/compile/internal/types2/testdata/check/cycles1.src
new file mode 100644
index 0000000..ae2b38e
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/check/cycles1.src
@@ -0,0 +1,77 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type (
+ A interface {
+ a() interface {
+ ABC1
+ }
+ }
+ B interface {
+ b() interface {
+ ABC2
+ }
+ }
+ C interface {
+ c() interface {
+ ABC3
+ }
+ }
+
+ AB interface {
+ A
+ B
+ }
+ BC interface {
+ B
+ C
+ }
+
+ ABC1 interface {
+ A
+ B
+ C
+ }
+ ABC2 interface {
+ AB
+ C
+ }
+ ABC3 interface {
+ A
+ BC
+ }
+)
+
+var (
+ x1 ABC1
+ x2 ABC2
+ x3 ABC3
+)
+
+func _() {
+ // all types have the same method set
+ x1 = x2
+ x2 = x1
+
+ x1 = x3
+ x3 = x1
+
+ x2 = x3
+ x3 = x2
+
+ // all methods return the same type again
+ x1 = x1.a()
+ x1 = x1.b()
+ x1 = x1.c()
+
+ x2 = x2.a()
+ x2 = x2.b()
+ x2 = x2.c()
+
+ x3 = x3.a()
+ x3 = x3.b()
+ x3 = x3.c()
+}
diff --git a/src/cmd/compile/internal/types2/testdata/check/cycles2.src b/src/cmd/compile/internal/types2/testdata/check/cycles2.src
new file mode 100644
index 0000000..1a7f40a
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/check/cycles2.src
@@ -0,0 +1,98 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+import "unsafe"
+
+// Test case for issue 5090
+
+type t interface {
+ f(u)
+}
+
+type u interface {
+ t
+}
+
+func _() {
+ var t t
+ var u u
+
+ t.f(t)
+ t.f(u)
+
+ u.f(t)
+ u.f(u)
+}
+
+
+// Test case for issues #6589, #33656.
+
+type A interface {
+ a() interface {
+ AB
+ }
+}
+
+type B interface {
+ b() interface {
+ AB
+ }
+}
+
+type AB interface {
+ a() interface {
+ A
+ B
+ }
+ b() interface {
+ A
+ B
+ }
+}
+
+var x AB
+var y interface {
+ A
+ B
+}
+
+var _ = x == y
+
+
+// Test case for issue 6638.
+
+type T interface {
+ m() [T(nil).m /* ERROR undefined */ ()[0]]int
+}
+
+// Variations of this test case.
+
+type T1 /* ERROR cycle */ interface {
+ m() [x1.m()[0]]int
+}
+
+var x1 T1
+
+type T2 /* ERROR cycle */ interface {
+ m() [len(x2.m())]int
+}
+
+var x2 T2
+
+type T3 /* ERROR cycle */ interface {
+ m() [unsafe.Sizeof(x3.m)]int
+}
+
+var x3 T3
+
+type T4 /* ERROR cycle */ interface {
+ m() [unsafe.Sizeof(cast4(x4.m))]int // cast is invalid but we have a cycle, so all bets are off
+}
+
+var x4 T4
+var _ = cast4(x4.m)
+
+type cast4 func()
diff --git a/src/cmd/compile/internal/types2/testdata/check/cycles3.src b/src/cmd/compile/internal/types2/testdata/check/cycles3.src
new file mode 100644
index 0000000..5e89b62
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/check/cycles3.src
@@ -0,0 +1,60 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+import "unsafe"
+
+var (
+ _ A = A(nil).a().b().c().d().e().f()
+ _ A = A(nil).b().c().d().e().f()
+ _ A = A(nil).c().d().e().f()
+ _ A = A(nil).d().e().f()
+ _ A = A(nil).e().f()
+ _ A = A(nil).f()
+ _ A = A(nil)
+)
+
+type (
+ A interface {
+ a() B
+ B
+ }
+
+ B interface {
+ b() C
+ C
+ }
+
+ C interface {
+ c() D
+ D
+ }
+
+ D interface {
+ d() E
+ E
+ }
+
+ E interface {
+ e() F
+ F
+ }
+
+ F interface {
+ f() A
+ }
+)
+
+type (
+ U /* ERROR cycle */ interface {
+ V
+ }
+
+ V interface {
+ v() [unsafe.Sizeof(u)]int
+ }
+)
+
+var u U
diff --git a/src/cmd/compile/internal/types2/testdata/check/cycles4.src b/src/cmd/compile/internal/types2/testdata/check/cycles4.src
new file mode 100644
index 0000000..924aabf
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/check/cycles4.src
@@ -0,0 +1,121 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+import "unsafe"
+
+// Check that all methods of T are collected before
+// determining the result type of m (which embeds
+// all methods of T).
+
+type T interface {
+ m() interface {T}
+ E
+}
+
+var _ int = T.m(nil).m().e()
+
+type E interface {
+ e() int
+}
+
+// Check that unresolved forward chains are followed
+// (see also comment in resolver.go, checker.typeDecl).
+
+var _ int = C.m(nil).m().e()
+
+type A B
+
+type B interface {
+ m() interface{C}
+ E
+}
+
+type C A
+
+// Check that interface type comparison for identity
+// does not recur endlessly.
+
+type T1 interface {
+ m() interface{T1}
+}
+
+type T2 interface {
+ m() interface{T2}
+}
+
+func _(x T1, y T2) {
+ // Checking for assignability of interfaces must check
+ // if all methods of x are present in y, and that they
+ // have identical signatures. The signatures recur via
+ // the result type, which is an interface that embeds
+ // a single method m that refers to the very interface
+ // that contains it. This requires cycle detection in
+ // identity checks for interface types.
+ x = y
+}
+
+type T3 interface {
+ m() interface{T4}
+}
+
+type T4 interface {
+ m() interface{T3}
+}
+
+func _(x T1, y T3) {
+ x = y
+}
+
+// Check that interfaces are type-checked in order of
+// (embedded interface) dependencies (was issue 7158).
+
+var x1 T5 = T7(nil)
+
+type T5 interface {
+ T6
+}
+
+type T6 interface {
+ m() T7
+}
+type T7 interface {
+ T5
+}
+
+// Actual test case from issue 7158.
+
+func wrapNode() Node {
+ return wrapElement()
+}
+
+func wrapElement() Element {
+ return nil
+}
+
+type EventTarget interface {
+ AddEventListener(Event)
+}
+
+type Node interface {
+ EventTarget
+}
+
+type Element interface {
+ Node
+}
+
+type Event interface {
+ Target() Element
+}
+
+// Check that accessing an interface method too early doesn't lead
+// to follow-on errors due to an incorrectly computed type set.
+
+type T8 interface {
+ m() [unsafe.Sizeof(T8.m /* ERROR undefined */ )]int
+}
+
+var _ = T8.m // no error expected here
diff --git a/src/cmd/compile/internal/types2/testdata/check/cycles5.src b/src/cmd/compile/internal/types2/testdata/check/cycles5.src
new file mode 100644
index 0000000..c932ef9
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/check/cycles5.src
@@ -0,0 +1,200 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+import "unsafe"
+
+// test case from issue #18395
+
+type (
+ A interface { B }
+ B interface { C }
+ C interface { D; F() A }
+ D interface { G() B }
+)
+
+var _ = A(nil).G // G must be found
+
+
+// test case from issue #21804
+
+type sourceBridge interface {
+ listVersions() ([]Version, error)
+}
+
+type Constraint interface {
+ copyTo(*ConstraintMsg)
+}
+
+type ConstraintMsg struct{}
+
+func (m *ConstraintMsg) asUnpairedVersion() UnpairedVersion {
+ return nil
+}
+
+type Version interface {
+ Constraint
+}
+
+type UnpairedVersion interface {
+ Version
+}
+
+var _ Constraint = UnpairedVersion(nil)
+
+
+// derived test case from issue #21804
+
+type (
+ _ interface{ m(B1) }
+ A1 interface{ a(D1) }
+ B1 interface{ A1 }
+ C1 interface{ B1 }
+ D1 interface{ C1 }
+)
+
+var _ A1 = C1(nil)
+
+
+// derived test case from issue #22701
+
+func F(x I4) interface{} {
+ return x.Method()
+}
+
+type Unused interface {
+ RefersToI1(a I1)
+}
+
+type I1 interface {
+ I2
+ I3
+}
+
+type I2 interface {
+ RefersToI4() I4
+}
+
+type I3 interface {
+ Method() interface{}
+}
+
+type I4 interface {
+ I1
+}
+
+
+// check embedding of error interface
+
+type Error interface{ error }
+
+var err Error
+var _ = err.Error()
+
+
+// more esoteric cases
+
+type (
+ T1 interface { T2 }
+ T2 /* ERROR cycle */ T2
+)
+
+type (
+ T3 interface { T4 }
+ T4 /* ERROR cycle */ T5
+ T5 = T6
+ T6 = T7
+ T7 = T4
+)
+
+
+// arbitrary code may appear inside an interface
+
+const n = unsafe.Sizeof(func(){})
+
+type I interface {
+ m([unsafe.Sizeof(func() { I.m(nil, [n]byte{}) })]byte)
+}
+
+
+// test cases for varias alias cycles
+
+type T10 /* ERROR cycle */ = *T10 // issue #25141
+type T11 /* ERROR cycle */ = interface{ f(T11) } // issue #23139
+
+// issue #18640
+type (
+ aa = bb
+ bb struct {
+ *aa
+ }
+)
+
+type (
+ a struct{ *b }
+ b = c
+ c struct{ *b /* ERROR invalid use of type alias */ }
+)
+
+// issue #24939
+type (
+ _ interface {
+ M(P)
+ }
+
+ M interface {
+ F() P // ERROR invalid use of type alias
+ }
+
+ P = interface {
+ I() M
+ }
+)
+
+// issue #8699
+type T12 /* ERROR cycle */ [len(a12)]int
+var a12 = makeArray()
+func makeArray() (res T12) { return }
+
+// issue #20770
+var r /* ERROR cycle */ = newReader()
+func newReader() r
+
+// variations of the theme of #8699 and #20770
+var arr /* ERROR cycle */ = f()
+func f() [len(arr)]int
+
+// issue #25790
+func ff(ff /* ERROR not a type */ )
+func gg((gg /* ERROR not a type */ ))
+
+type T13 /* ERROR cycle */ [len(b13)]int
+var b13 T13
+
+func g1() [unsafe.Sizeof(g1)]int
+func g2() [unsafe.Sizeof(x2)]int
+var x2 = g2
+
+// verify that we get the correct sizes for the functions above
+// (note: assert is statically evaluated in go/types test mode)
+func init() {
+ assert(unsafe.Sizeof(g1) == 8)
+ assert(unsafe.Sizeof(x2) == 8)
+}
+
+func h() [h /* ERROR no value */ ()[0]]int { panic(0) }
+
+var c14 /* ERROR cycle */ T14
+type T14 [uintptr(unsafe.Sizeof(&c14))]byte
+
+// issue #34333
+type T15 /* ERROR cycle */ struct {
+ f func() T16
+ b T16
+}
+
+type T16 struct {
+ T15
+} \ No newline at end of file
diff --git a/src/cmd/compile/internal/types2/testdata/check/decls0.src b/src/cmd/compile/internal/types2/testdata/check/decls0.src
new file mode 100644
index 0000000..09e5d5c
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/check/decls0.src
@@ -0,0 +1,206 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// type declarations
+
+package go1_17 // don't permit non-interface elements in interfaces
+
+import "unsafe"
+
+const pi = 3.1415
+
+type (
+ N undeclared /* ERROR "undeclared" */
+ B bool
+ I int32
+ A [10]P
+ T struct {
+ x, y P
+ }
+ P *T
+ R (*R)
+ F func(A) I
+ Y interface {
+ f(A) I
+ }
+ S [](((P)))
+ M map[I]F
+ C chan<- I
+
+ // blank types must be typechecked
+ _ pi /* ERROR "not a type" */
+ _ struct{}
+ _ struct{ pi /* ERROR "not a type" */ }
+)
+
+
+// declarations of init
+const _, init /* ERROR "cannot declare init" */ , _ = 0, 1, 2
+type init /* ERROR "cannot declare init" */ struct{}
+var _, init /* ERROR "cannot declare init" */ int
+
+func init() {}
+func init /* ERROR "missing function body" */ ()
+
+func _() { const init = 0 }
+func _() { type init int }
+func _() { var init int; _ = init }
+
+// invalid array types
+type (
+ iA0 [... /* ERROR "invalid use of \[...\] array" */ ]byte
+ // The error message below could be better. At the moment
+ // we believe an integer that is too large is not an integer.
+ // But at least we get an error.
+ iA1 [1 /* ERROR "must be integer" */ <<100]int
+ iA2 [- /* ERROR "invalid array length" */ 1]complex128
+ iA3 ["foo" /* ERROR "must be integer" */ ]string
+ iA4 [float64 /* ERROR "must be integer" */ (0)]int
+)
+
+
+type (
+ p1 pi.foo /* ERROR "no field or method foo" */
+ p2 unsafe.Pointer
+)
+
+
+type (
+ Pi pi /* ERROR "not a type" */
+
+ a /* ERROR "illegal cycle" */ a
+ a /* ERROR "redeclared" */ int
+
+ b /* ERROR "illegal cycle" */ c
+ c d
+ d e
+ e b
+
+ t *t
+
+ U V
+ V *W
+ W U
+
+ P1 *S2
+ P2 P1
+
+ S0 struct {
+ }
+ S1 struct {
+ a, b, c int
+ u, v, a /* ERROR "redeclared" */ float32
+ }
+ S2 struct {
+ S0 // embedded field
+ S0 /* ERROR "redeclared" */ int
+ }
+ S3 struct {
+ x S2
+ }
+ S4/* ERROR "illegal cycle" */ struct {
+ S4
+ }
+ S5 /* ERROR "illegal cycle" */ struct {
+ S6
+ }
+ S6 struct {
+ field S7
+ }
+ S7 struct {
+ S5
+ }
+
+ L1 []L1
+ L2 []int
+
+ A1 [10.0]int
+ A2 /* ERROR "illegal cycle" */ [10]A2
+ A3 /* ERROR "illegal cycle" */ [10]struct {
+ x A4
+ }
+ A4 [10]A3
+
+ F1 func()
+ F2 func(x, y, z float32)
+ F3 func(x, y, x /* ERROR "redeclared" */ float32)
+ F4 func() (x, y, x /* ERROR "redeclared" */ float32)
+ F5 func(x int) (x /* ERROR "redeclared" */ float32)
+ F6 func(x ...int)
+
+ I1 interface{}
+ I2 interface {
+ m1()
+ }
+ I3 interface {
+ m1()
+ m1 /* ERROR "duplicate method" */ ()
+ }
+ I4 interface {
+ m1(x, y, x /* ERROR "redeclared" */ float32)
+ m2() (x, y, x /* ERROR "redeclared" */ float32)
+ m3(x int) (x /* ERROR "redeclared" */ float32)
+ }
+ I5 interface {
+ m1(I5)
+ }
+ I6 interface {
+ S0 /* ERROR "non-interface type S0" */
+ }
+ I7 interface {
+ I1
+ I1
+ }
+ I8 /* ERROR "illegal cycle" */ interface {
+ I8
+ }
+ I9 /* ERROR "illegal cycle" */ interface {
+ I10
+ }
+ I10 interface {
+ I11
+ }
+ I11 interface {
+ I9
+ }
+
+ C1 chan int
+ C2 <-chan int
+ C3 chan<- C3
+ C4 chan C5
+ C5 chan C6
+ C6 chan C4
+
+ M1 map[Last]string
+ M2 map[string]M2
+
+ Last int
+)
+
+// cycles in function/method declarations
+// (test cases for issues #5217, #25790 and variants)
+func f1(x f1 /* ERROR "not a type" */ ) {}
+func f2(x *f2 /* ERROR "not a type" */ ) {}
+func f3() (x f3 /* ERROR "not a type" */ ) { return }
+func f4() (x *f4 /* ERROR "not a type" */ ) { return }
+
+func (S0) m1(x S0 /* ERROR illegal cycle in method declaration */ .m1) {}
+func (S0) m2(x *S0 /* ERROR illegal cycle in method declaration */ .m2) {}
+func (S0) m3() (x S0 /* ERROR illegal cycle in method declaration */ .m3) { return }
+func (S0) m4() (x *S0 /* ERROR illegal cycle in method declaration */ .m4) { return }
+
+// interfaces may not have any blank methods
+type BlankI interface {
+ _ /* ERROR "invalid method name" */ ()
+ _ /* ERROR "invalid method name" */ (float32) int
+ m()
+}
+
+// non-interface types may have multiple blank methods
+type BlankT struct{}
+
+func (BlankT) _() {}
+func (BlankT) _(int) {}
+func (BlankT) _() int { return 0 }
+func (BlankT) _(int) int { return 0}
diff --git a/src/cmd/compile/internal/types2/testdata/check/decls1.src b/src/cmd/compile/internal/types2/testdata/check/decls1.src
new file mode 100644
index 0000000..1167ced
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/check/decls1.src
@@ -0,0 +1,144 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// variable declarations
+
+package decls1
+
+import (
+ "math"
+)
+
+// Global variables without initialization
+var (
+ a, b bool
+ c byte
+ d uint8
+ r rune
+ i int
+ j, k, l int
+ x, y float32
+ xx, yy float64
+ u, v complex64
+ uu, vv complex128
+ s, t string
+ array []byte
+ iface interface{}
+
+ blank _ /* ERROR "cannot use _" */
+)
+
+// Global variables with initialization
+var (
+ s1 = i + j
+ s2 = i /* ERROR "mismatched types" */ + x
+ s3 = c + d
+ s4 = s + t
+ s5 = s /* ERROR "invalid operation" */ / t
+ s6 = array[t1]
+ s7 = array[x /* ERROR "integer" */]
+ s8 = &a
+ s10 = &42 /* ERROR "cannot take address" */
+ s11 = &v
+ s12 = -(u + *t11) / *&v
+ s13 = a /* ERROR "shifted operand" */ << d
+ s14 = i << j
+ s18 = math.Pi * 10.0
+ s19 = s1 /* ERROR "cannot call" */ ()
+ s20 = f0 /* ERROR "no value" */ ()
+ s21 = f6(1, s1, i)
+ s22 = f6(1, s1, uu /* ERROR "cannot use .* in argument" */ )
+
+ t1 int = i + j
+ t2 int = i /* ERROR "mismatched types" */ + x
+ t3 int = c /* ERROR "cannot use .* variable declaration" */ + d
+ t4 string = s + t
+ t5 string = s /* ERROR "invalid operation" */ / t
+ t6 byte = array[t1]
+ t7 byte = array[x /* ERROR "must be integer" */]
+ t8 *int = & /* ERROR "cannot use .* variable declaration" */ a
+ t10 *int = &42 /* ERROR "cannot take address" */
+ t11 *complex64 = &v
+ t12 complex64 = -(u + *t11) / *&v
+ t13 int = a /* ERROR "shifted operand" */ << d
+ t14 int = i << j
+ t15 math /* ERROR "not in selector" */
+ t16 math.xxx /* ERROR "not declared" */
+ t17 math /* ERROR "not a type" */ .Pi
+ t18 float64 = math.Pi * 10.0
+ t19 int = t1 /* ERROR "cannot call" */ ()
+ t20 int = f0 /* ERROR "no value" */ ()
+ t21 int = a /* ERROR "cannot use .* variable declaration" */
+)
+
+// Various more complex expressions
+var (
+ u1 = x /* ERROR "not an interface" */ .(int)
+ u2 = iface.([]int)
+ u3 = iface.(a /* ERROR "not a type" */ )
+ u4, ok = iface.(int)
+ u5, ok2, ok3 = iface /* ERROR "cannot initialize" */ .(int)
+)
+
+// Constant expression initializations
+var (
+ v1 = 1 /* ERROR "mismatched types untyped int and untyped string" */ + "foo"
+ v2 = c + 255
+ v3 = c + 256 /* ERROR "overflows" */
+ v4 = r + 2147483647
+ v5 = r + 2147483648 /* ERROR "overflows" */
+ v6 = 42
+ v7 = v6 + 9223372036854775807
+ v8 = v6 + 9223372036854775808 /* ERROR "overflows" */
+ v9 = i + 1 << 10
+ v10 byte = 1024 /* ERROR "overflows" */
+ v11 = xx/yy*yy - xx
+ v12 = true && false
+ v13 = nil /* ERROR "use of untyped nil" */
+)
+
+// Multiple assignment expressions
+var (
+ m1a, m1b = 1, 2
+ m2a, m2b, m2c /* ERROR "missing init expr for m2c" */ = 1, 2
+ m3a, m3b = 1, 2, 3 /* ERROR "extra init expr 3" */
+)
+
+func _() {
+ var (
+ m1a, m1b = 1, 2
+ m2a, m2b, m2c /* ERROR "missing init expr for m2c" */ = 1, 2
+ m3a, m3b = 1, 2, 3 /* ERROR "extra init expr 3" */
+ )
+
+ _, _ = m1a, m1b
+ _, _, _ = m2a, m2b, m2c
+ _, _ = m3a, m3b
+}
+
+// Declaration of parameters and results
+func f0() {}
+func f1(a /* ERROR "not a type" */) {}
+func f2(a, b, c d /* ERROR "not a type" */) {}
+
+func f3() int { return 0 }
+func f4() a /* ERROR "not a type" */ { return 0 }
+func f5() (a, b, c d /* ERROR "not a type" */) { return }
+
+func f6(a, b, c int) complex128 { return 0 }
+
+// Declaration of receivers
+type T struct{}
+
+func (T) m0() {}
+func (*T) m1() {}
+func (x T) m2() {}
+func (x *T) m3() {}
+
+// Initialization functions
+func init() {}
+func /* ERROR "no arguments and no return values" */ init(int) {}
+func /* ERROR "no arguments and no return values" */ init() int { return 0 }
+func /* ERROR "no arguments and no return values" */ init(int) int { return 0 }
+func (T) init(int) int { return 0 }
diff --git a/src/cmd/compile/internal/types2/testdata/check/decls2/decls2a.src b/src/cmd/compile/internal/types2/testdata/check/decls2/decls2a.src
new file mode 100644
index 0000000..d077db5
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/check/decls2/decls2a.src
@@ -0,0 +1,111 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// method declarations
+
+package decls2
+
+import "time"
+import "unsafe"
+
+// T1 declared before its methods.
+type T1 struct{
+ f int
+}
+
+func (T1) m() {}
+func (T1) m /* ERROR "already declared" */ () {}
+func (x *T1) f /* ERROR "field and method" */ () {}
+
+// Conflict between embedded field and method name,
+// with the embedded field being a basic type.
+type T1b struct {
+ int
+}
+
+func (T1b) int /* ERROR "field and method" */ () {}
+
+type T1c struct {
+ time.Time
+}
+
+func (T1c) Time /* ERROR "field and method" */ () int { return 0 }
+
+// Disabled for now: LookupFieldOrMethod will find Pointer even though
+// it's double-declared (it would cost extra in the common case to verify
+// this). But the MethodSet computation will not find it due to the name
+// collision caused by the double-declaration, leading to an internal
+// inconsistency while we are verifying one computation against the other.
+// var _ = T1c{}.Pointer
+
+// T2's method declared before the type.
+func (*T2) f /* ERROR "field and method" */ () {}
+
+type T2 struct {
+ f int
+}
+
+// Methods declared without a declared type.
+func (undeclared /* ERROR "undeclared" */) m() {}
+func (x *undeclared /* ERROR "undeclared" */) m() {}
+
+func (pi /* ERROR "not a type" */) m1() {}
+func (x pi /* ERROR "not a type" */) m2() {}
+func (x *pi /* ERROR "not a type" */ ) m3() {}
+
+// Blank types.
+type _ struct { m int }
+type _ struct { m int }
+
+func (_ /* ERROR "cannot use _" */) m() {}
+func m(_ /* ERROR "cannot use _" */) {}
+
+// Methods with receiver base type declared in another file.
+func (T3) m1() {}
+func (*T3) m2() {}
+func (x T3) m3() {}
+func (x *T3) f /* ERROR "field and method" */ () {}
+
+// Methods of non-struct type.
+type T4 func()
+
+func (self T4) m() func() { return self }
+
+// Methods associated with an interface.
+type T5 interface {
+ m() int
+}
+
+func (T5 /* ERROR "invalid receiver" */ ) m1() {}
+func (T5 /* ERROR "invalid receiver" */ ) m2() {}
+
+// Methods associated with a named pointer type.
+type ptr *int
+func (ptr /* ERROR "invalid receiver" */ ) _() {}
+func (* /* ERROR "invalid receiver" */ ptr) _() {}
+
+// Methods with zero or multiple receivers.
+func ( /* ERROR "no receiver" */ ) _() {}
+func (T3, * /* ERROR "multiple receivers" */ T3) _() {}
+func (T3, T3, T3 /* ERROR "multiple receivers" */ ) _() {}
+func (a, b /* ERROR "multiple receivers" */ T3) _() {}
+func (a, b, c /* ERROR "multiple receivers" */ T3) _() {}
+
+// Methods associated with non-local or unnamed types.
+func (int /* ERROR "invalid receiver" */ ) m() {}
+func ([ /* ERROR "invalid receiver" */ ]int) m() {}
+func (time /* ERROR "invalid receiver" */ .Time) m() {}
+func (* /* ERROR "invalid receiver" */ time.Time) m() {}
+func (x /* ERROR "invalid receiver" */ interface{}) m() {}
+
+// Unsafe.Pointer is treated like a pointer when used as receiver type.
+type UP unsafe.Pointer
+func (UP /* ERROR "invalid" */ ) m1() {}
+func (* /* ERROR "invalid" */ UP) m2() {}
+
+// Double declarations across package files
+const c_double = 0
+type t_double int
+var v_double int
+func f_double() {}
diff --git a/src/cmd/compile/internal/types2/testdata/check/decls2/decls2b.src b/src/cmd/compile/internal/types2/testdata/check/decls2/decls2b.src
new file mode 100644
index 0000000..7b3229c
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/check/decls2/decls2b.src
@@ -0,0 +1,75 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// method declarations
+
+package decls2
+
+import "io"
+
+const pi = 3.1415
+
+func (T1) m /* ERROR "already declared" */ () {}
+func (T2) m(io.Writer) {}
+
+type T3 struct {
+ f *T3
+}
+
+type T6 struct {
+ x int
+}
+
+func (t *T6) m1() int {
+ return t.x
+}
+
+func f() {
+ var t *T6
+ t.m1()
+}
+
+// Double declarations across package files
+const c_double /* ERROR "redeclared" */ = 0
+type t_double /* ERROR "redeclared" */ int
+var v_double /* ERROR "redeclared" */ int
+func f_double /* ERROR "redeclared" */ () {}
+
+// Blank methods need to be type-checked.
+// Verify by checking that errors are reported.
+func (T /* ERROR "undeclared" */ ) _() {}
+func (T1) _(undeclared /* ERROR "undeclared" */ ) {}
+func (T1) _() int { return "foo" /* ERROR "cannot use .* in return statement" */ }
+
+// Methods with undeclared receiver type can still be checked.
+// Verify by checking that errors are reported.
+func (Foo /* ERROR "undeclared" */ ) m() {}
+func (Foo /* ERROR "undeclared" */ ) m(undeclared /* ERROR "undeclared" */ ) {}
+func (Foo /* ERROR "undeclared" */ ) m() int { return "foo" /* ERROR "cannot use .* in return statement" */ }
+
+func (Foo /* ERROR "undeclared" */ ) _() {}
+func (Foo /* ERROR "undeclared" */ ) _(undeclared /* ERROR "undeclared" */ ) {}
+func (Foo /* ERROR "undeclared" */ ) _() int { return "foo" /* ERROR "cannot use .* in return statement" */ }
+
+// Receiver declarations are regular parameter lists;
+// receiver types may use parentheses, and the list
+// may have a trailing comma.
+type T7 struct {}
+
+func (T7) m1() {}
+func ((T7)) m2() {}
+func ((*T7)) m3() {}
+func (x *(T7),) m4() {}
+func (x (*(T7)),) m5() {}
+func (x ((*((T7)))),) m6() {}
+
+// Check that methods with parenthesized receiver are actually present (issue #23130).
+var (
+ _ = T7.m1
+ _ = T7.m2
+ _ = (*T7).m3
+ _ = (*T7).m4
+ _ = (*T7).m5
+ _ = (*T7).m6
+) \ No newline at end of file
diff --git a/src/cmd/compile/internal/types2/testdata/check/decls3.src b/src/cmd/compile/internal/types2/testdata/check/decls3.src
new file mode 100644
index 0000000..d7a0c44
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/check/decls3.src
@@ -0,0 +1,309 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// embedded types
+
+package decls3
+
+import "unsafe"
+import "fmt"
+
+// fields with the same name at the same level cancel each other out
+
+func _() {
+ type (
+ T1 struct { X int }
+ T2 struct { X int }
+ T3 struct { T1; T2 } // X is embedded twice at the same level via T1->X, T2->X
+ )
+
+ var t T3
+ _ = t.X /* ERROR "ambiguous selector t.X" */
+}
+
+func _() {
+ type (
+ T1 struct { X int }
+ T2 struct { T1 }
+ T3 struct { T1 }
+ T4 struct { T2; T3 } // X is embedded twice at the same level via T2->T1->X, T3->T1->X
+ )
+
+ var t T4
+ _ = t.X /* ERROR "ambiguous selector t.X" */
+}
+
+func issue4355() {
+ type (
+ T1 struct {X int}
+ T2 struct {T1}
+ T3 struct {T2}
+ T4 struct {T2}
+ T5 struct {T3; T4} // X is embedded twice at the same level via T3->T2->T1->X, T4->T2->T1->X
+ )
+
+ var t T5
+ _ = t.X /* ERROR "ambiguous selector t.X" */
+}
+
+func _() {
+ type State int
+ type A struct{ State }
+ type B struct{ fmt.State }
+ type T struct{ A; B }
+
+ var t T
+ _ = t.State /* ERROR "ambiguous selector t.State" */
+}
+
+// Embedded fields can be predeclared types.
+
+func _() {
+ type T0 struct{
+ int
+ float32
+ f int
+ }
+ var x T0
+ _ = x.int
+ _ = x.float32
+ _ = x.f
+
+ type T1 struct{
+ T0
+ }
+ var y T1
+ _ = y.int
+ _ = y.float32
+ _ = y.f
+}
+
+// Restrictions on embedded field types.
+
+func _() {
+ type I1 interface{}
+ type I2 interface{}
+ type P1 *int
+ type P2 *int
+ type UP unsafe.Pointer
+
+ type T1 struct {
+ I1
+ * /* ERROR "cannot be a pointer to an interface" */ I2
+ * /* ERROR "cannot be a pointer to an interface" */ error
+ P1 /* ERROR "cannot be a pointer" */
+ * /* ERROR "cannot be a pointer" */ P2
+ }
+
+ // unsafe.Pointers are treated like regular pointers when embedded
+ type T2 struct {
+ unsafe /* ERROR "cannot be unsafe.Pointer" */ .Pointer
+ */* ERROR "cannot be unsafe.Pointer" */ /* ERROR "Pointer redeclared" */ unsafe.Pointer
+ UP /* ERROR "cannot be unsafe.Pointer" */
+ * /* ERROR "cannot be unsafe.Pointer" */ /* ERROR "UP redeclared" */ UP
+ }
+}
+
+// Named types that are pointers.
+
+type S struct{ x int }
+func (*S) m() {}
+type P *S
+
+func _() {
+ var s *S
+ _ = s.x
+ _ = s.m
+
+ var p P
+ _ = p.x
+ _ = p.m /* ERROR "no field or method" */
+ _ = P.m /* ERROR "no field or method" */
+}
+
+// Borrowed from the FieldByName test cases in reflect/all_test.go.
+
+type D1 struct {
+ d int
+}
+type D2 struct {
+ d int
+}
+
+type S0 struct {
+ A, B, C int
+ D1
+ D2
+}
+
+type S1 struct {
+ B int
+ S0
+}
+
+type S2 struct {
+ A int
+ *S1
+}
+
+type S1x struct {
+ S1
+}
+
+type S1y struct {
+ S1
+}
+
+type S3 struct {
+ S1x
+ S2
+ D, E int
+ *S1y
+}
+
+type S4 struct {
+ *S4
+ A int
+}
+
+// The X in S6 and S7 annihilate, but they also block the X in S8.S9.
+type S5 struct {
+ S6
+ S7
+ S8
+}
+
+type S6 struct {
+ X int
+}
+
+type S7 S6
+
+type S8 struct {
+ S9
+}
+
+type S9 struct {
+ X int
+ Y int
+}
+
+// The X in S11.S6 and S12.S6 annihilate, but they also block the X in S13.S8.S9.
+type S10 struct {
+ S11
+ S12
+ S13
+}
+
+type S11 struct {
+ S6
+}
+
+type S12 struct {
+ S6
+}
+
+type S13 struct {
+ S8
+}
+
+func _() {
+ _ = struct{}{}.Foo /* ERROR "no field or method" */
+ _ = S0{}.A
+ _ = S0{}.D /* ERROR "no field or method" */
+ _ = S1{}.A
+ _ = S1{}.B
+ _ = S1{}.S0
+ _ = S1{}.C
+ _ = S2{}.A
+ _ = S2{}.S1
+ _ = S2{}.B
+ _ = S2{}.C
+ _ = S2{}.D /* ERROR "no field or method" */
+ _ = S3{}.S1 /* ERROR "ambiguous selector S3\{\}.S1" */
+ _ = S3{}.A
+ _ = S3{}.B /* ERROR "ambiguous selector" S3\{\}.B */
+ _ = S3{}.D
+ _ = S3{}.E
+ _ = S4{}.A
+ _ = S4{}.B /* ERROR "no field or method" */
+ _ = S5{}.X /* ERROR "ambiguous selector S5\{\}.X" */
+ _ = S5{}.Y
+ _ = S10{}.X /* ERROR "ambiguous selector S10\{\}.X" */
+ _ = S10{}.Y
+}
+
+// Borrowed from the FieldByName benchmark in reflect/all_test.go.
+
+type R0 struct {
+ *R1
+ *R2
+ *R3
+ *R4
+}
+
+type R1 struct {
+ *R5
+ *R6
+ *R7
+ *R8
+}
+
+type R2 R1
+type R3 R1
+type R4 R1
+
+type R5 struct {
+ *R9
+ *R10
+ *R11
+ *R12
+}
+
+type R6 R5
+type R7 R5
+type R8 R5
+
+type R9 struct {
+ *R13
+ *R14
+ *R15
+ *R16
+}
+
+type R10 R9
+type R11 R9
+type R12 R9
+
+type R13 struct {
+ *R17
+ *R18
+ *R19
+ *R20
+}
+
+type R14 R13
+type R15 R13
+type R16 R13
+
+type R17 struct {
+ *R21
+ *R22
+ *R23
+ *R24
+}
+
+type R18 R17
+type R19 R17
+type R20 R17
+
+type R21 struct {
+ X int
+}
+
+type R22 R21
+type R23 R21
+type R24 R21
+
+var _ = R0{}.X /* ERROR "ambiguous selector R0\{\}.X" */ \ No newline at end of file
diff --git a/src/cmd/compile/internal/types2/testdata/check/decls4.src b/src/cmd/compile/internal/types2/testdata/check/decls4.src
new file mode 100644
index 0000000..eb08421
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/check/decls4.src
@@ -0,0 +1,199 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// type aliases
+
+package decls4
+
+type (
+ T0 [10]int
+ T1 []byte
+ T2 struct {
+ x int
+ }
+ T3 interface{
+ m() T2
+ }
+ T4 func(int, T0) chan T2
+)
+
+type (
+ Ai = int
+ A0 = T0
+ A1 = T1
+ A2 = T2
+ A3 = T3
+ A4 = T4
+
+ A10 = [10]int
+ A11 = []byte
+ A12 = struct {
+ x int
+ }
+ A13 = interface{
+ m() A2
+ }
+ A14 = func(int, A0) chan A2
+)
+
+// check assignment compatibility due to equality of types
+var (
+ xi_ int
+ ai Ai = xi_
+
+ x0 T0
+ a0 A0 = x0
+
+ x1 T1
+ a1 A1 = x1
+
+ x2 T2
+ a2 A2 = x2
+
+ x3 T3
+ a3 A3 = x3
+
+ x4 T4
+ a4 A4 = x4
+)
+
+// alias receiver types
+func (Ai /* ERROR "invalid receiver" */) m1() {}
+func (T0) m1() {}
+func (A0) m1 /* ERROR already declared */ () {}
+func (A0) m2 () {}
+func (A3 /* ERROR invalid receiver */ ) m1 () {}
+func (A10 /* ERROR invalid receiver */ ) m1() {}
+
+// x0 has methods m1, m2 declared via receiver type names T0 and A0
+var _ interface{ m1(); m2() } = x0
+
+// alias receiver types (test case for issue #23042)
+type T struct{}
+
+var (
+ _ = T.m
+ _ = T{}.m
+ _ interface{m()} = T{}
+)
+
+var (
+ _ = T.n
+ _ = T{}.n
+ _ interface{m(); n()} = T{}
+)
+
+type U = T
+func (U) m() {}
+
+// alias receiver types (long type declaration chains)
+type (
+ V0 = V1
+ V1 = (V2)
+ V2 = ((V3))
+ V3 = T
+)
+
+func (V0) m /* ERROR already declared */ () {}
+func (V1) n() {}
+
+// alias receiver types (invalid due to cycles)
+type (
+ W0 /* ERROR illegal cycle */ = W1
+ W1 = (W2)
+ W2 = ((W0))
+)
+
+func (W0) m() {} // no error expected (due to above cycle error)
+func (W1) n() {}
+
+// alias receiver types (invalid due to builtin underlying type)
+type (
+ B0 = B1
+ B1 = B2
+ B2 = int
+)
+
+func (B0 /* ERROR invalid receiver */ ) m() {}
+func (B1 /* ERROR invalid receiver */ ) n() {}
+
+// cycles
+type (
+ C2 /* ERROR illegal cycle */ = C2
+ C3 /* ERROR illegal cycle */ = C4
+ C4 = C3
+ C5 struct {
+ f *C6
+ }
+ C6 = C5
+ C7 /* ERROR illegal cycle */ struct {
+ f C8
+ }
+ C8 = C7
+)
+
+// embedded fields
+var (
+ s0 struct { T0 }
+ s1 struct { A0 } = s0 /* ERROR cannot use */ // embedded field names are different
+)
+
+// embedding and lookup of fields and methods
+func _(s struct{A0}) { s.A0 = x0 }
+
+type eX struct{xf int}
+
+func (eX) xm()
+
+type eY = struct{eX} // field/method set of eY includes xf, xm
+
+type eZ = *struct{eX} // field/method set of eZ includes xf, xm
+
+type eA struct {
+ eX // eX contributes xf, xm to eA
+}
+
+type eA2 struct {
+ *eX // *eX contributes xf, xm to eA
+}
+
+type eB struct {
+ eY // eY contributes xf, xm to eB
+}
+
+type eB2 struct {
+ *eY // *eY contributes xf, xm to eB
+}
+
+type eC struct {
+ eZ // eZ contributes xf, xm to eC
+}
+
+var (
+ _ = eA{}.xf
+ _ = eA{}.xm
+ _ = eA2{}.xf
+ _ = eA2{}.xm
+ _ = eB{}.xf
+ _ = eB{}.xm
+ _ = eB2{}.xf
+ _ = eB2{}.xm
+ _ = eC{}.xf
+ _ = eC{}.xm
+)
+
+// ambiguous selectors due to embedding via type aliases
+type eD struct {
+ eY
+ eZ
+}
+
+var (
+ _ = eD{}.xf /* ERROR ambiguous selector eD\{\}.xf */
+ _ = eD{}.xm /* ERROR ambiguous selector eD\{\}.xm */
+)
+
+var (
+ _ interface{ xm() } = eD /* ERROR missing method xm */ {}
+) \ No newline at end of file
diff --git a/src/cmd/compile/internal/types2/testdata/check/decls5.src b/src/cmd/compile/internal/types2/testdata/check/decls5.src
new file mode 100644
index 0000000..88d3194
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/check/decls5.src
@@ -0,0 +1,10 @@
+// Copyright 2017 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+// declarations of main
+const _, main /* ERROR "cannot declare main" */ , _ = 0, 1, 2
+type main /* ERROR "cannot declare main" */ struct{}
+var _, main /* ERROR "cannot declare main" */ int
diff --git a/src/cmd/compile/internal/types2/testdata/check/errors.src b/src/cmd/compile/internal/types2/testdata/check/errors.src
new file mode 100644
index 0000000..5f09197
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/check/errors.src
@@ -0,0 +1,66 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package errors
+
+// Testing precise operand formatting in error messages
+// (matching messages are regular expressions, hence the \'s).
+func f(x int, m map[string]int) {
+ // no values
+ _ = f /* ERROR f\(0, m\) \(no value\) used as value */ (0, m)
+
+ // built-ins
+ _ = println // ERROR println \(built-in\) must be called
+
+ // types
+ _ = complex128 // ERROR complex128 \(type\) is not an expression
+
+ // constants
+ const c1 = 991
+ const c2 float32 = 0.5
+ const c3 = "foo"
+ 0 // ERROR 0 \(untyped int constant\) is not used
+ 0.5 // ERROR 0.5 \(untyped float constant\) is not used
+ "foo" // ERROR "foo" \(untyped string constant\) is not used
+ c1 // ERROR c1 \(untyped int constant 991\) is not used
+ c2 // ERROR c2 \(constant 0.5 of type float32\) is not used
+ c1 /* ERROR c1 \+ c2 \(constant 991.5 of type float32\) is not used */ + c2
+ c3 // ERROR c3 \(untyped string constant "foo"\) is not used
+
+ // variables
+ x // ERROR x \(variable of type int\) is not used
+
+ // values
+ nil // ERROR nil is not used
+ (*int)(nil) // ERROR \(\*int\)\(nil\) \(value of type \*int\) is not used
+ x /* ERROR x != x \(untyped bool value\) is not used */ != x
+ x /* ERROR x \+ x \(value of type int\) is not used */ + x
+
+ // value, ok's
+ const s = "foo"
+ m /* ERROR m\[s\] \(map index expression of type int\) is not used */ [s]
+}
+
+// Valid ERROR comments can have a variety of forms.
+func _() {
+ 0 /* ERROR "0 .* is not used" */
+ 0 /* ERROR 0 .* is not used */
+ 0 // ERROR "0 .* is not used"
+ 0 // ERROR 0 .* is not used
+}
+
+// Don't report spurious errors as a consequence of earlier errors.
+// Add more tests as needed.
+func _() {
+ if err := foo /* ERROR undeclared */ (); err != nil /* no error here */ {}
+}
+
+// Use unqualified names for package-local objects.
+type T struct{}
+var _ int = T /* ERROR value of type T */ {} // use T in error message rather then errors.T
+
+// Don't report errors containing "invalid type" (issue #24182).
+func _(x *missing /* ERROR undeclared name: missing */ ) {
+ x.m() // there shouldn't be an error here referring to *invalid type
+}
diff --git a/src/cmd/compile/internal/types2/testdata/check/expr0.src b/src/cmd/compile/internal/types2/testdata/check/expr0.src
new file mode 100644
index 0000000..1aac726
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/check/expr0.src
@@ -0,0 +1,180 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// unary expressions
+
+package expr0
+
+type mybool bool
+
+var (
+ // bool
+ b0 = true
+ b1 bool = b0
+ b2 = !true
+ b3 = !b1
+ b4 bool = !true
+ b5 bool = !b4
+ b6 = +b0 /* ERROR "not defined" */
+ b7 = -b0 /* ERROR "not defined" */
+ b8 = ^b0 /* ERROR "not defined" */
+ b9 = *b0 /* ERROR "cannot indirect" */
+ b10 = &true /* ERROR "cannot take address" */
+ b11 = &b0
+ b12 = <-b0 /* ERROR "cannot receive" */
+ b13 = & & /* ERROR "cannot take address" */ b0
+
+ // byte
+ _ = byte(0)
+ _ = byte(- /* ERROR "cannot convert" */ 1)
+ _ = - /* ERROR "-byte\(1\) \(constant -1 of type byte\) overflows byte" */ byte(1) // test for issue 11367
+ _ = byte /* ERROR "overflows byte" */ (0) - byte(1)
+
+ // int
+ i0 = 1
+ i1 int = i0
+ i2 = +1
+ i3 = +i0
+ i4 int = +1
+ i5 int = +i4
+ i6 = -1
+ i7 = -i0
+ i8 int = -1
+ i9 int = -i4
+ i10 = !i0 /* ERROR "not defined" */
+ i11 = ^1
+ i12 = ^i0
+ i13 int = ^1
+ i14 int = ^i4
+ i15 = *i0 /* ERROR "cannot indirect" */
+ i16 = &i0
+ i17 = *i16
+ i18 = <-i16 /* ERROR "cannot receive" */
+
+ // uint
+ u0 = uint(1)
+ u1 uint = u0
+ u2 = +1
+ u3 = +u0
+ u4 uint = +1
+ u5 uint = +u4
+ u6 = -1
+ u7 = -u0
+ u8 uint = - /* ERROR "overflows" */ 1
+ u9 uint = -u4
+ u10 = !u0 /* ERROR "not defined" */
+ u11 = ^1
+ u12 = ^i0
+ u13 uint = ^ /* ERROR "overflows" */ 1
+ u14 uint = ^u4
+ u15 = *u0 /* ERROR "cannot indirect" */
+ u16 = &u0
+ u17 = *u16
+ u18 = <-u16 /* ERROR "cannot receive" */
+ u19 = ^uint(0)
+
+ // float64
+ f0 = float64(1)
+ f1 float64 = f0
+ f2 = +1
+ f3 = +f0
+ f4 float64 = +1
+ f5 float64 = +f4
+ f6 = -1
+ f7 = -f0
+ f8 float64 = -1
+ f9 float64 = -f4
+ f10 = !f0 /* ERROR "not defined" */
+ f11 = ^1
+ f12 = ^i0
+ f13 float64 = ^1
+ f14 float64 = ^f4 /* ERROR "not defined" */
+ f15 = *f0 /* ERROR "cannot indirect" */
+ f16 = &f0
+ f17 = *u16
+ f18 = <-u16 /* ERROR "cannot receive" */
+
+ // complex128
+ c0 = complex128(1)
+ c1 complex128 = c0
+ c2 = +1
+ c3 = +c0
+ c4 complex128 = +1
+ c5 complex128 = +c4
+ c6 = -1
+ c7 = -c0
+ c8 complex128 = -1
+ c9 complex128 = -c4
+ c10 = !c0 /* ERROR "not defined" */
+ c11 = ^1
+ c12 = ^i0
+ c13 complex128 = ^1
+ c14 complex128 = ^c4 /* ERROR "not defined" */
+ c15 = *c0 /* ERROR "cannot indirect" */
+ c16 = &c0
+ c17 = *u16
+ c18 = <-u16 /* ERROR "cannot receive" */
+
+ // string
+ s0 = "foo"
+ s1 = +"foo" /* ERROR "not defined" */
+ s2 = -s0 /* ERROR "not defined" */
+ s3 = !s0 /* ERROR "not defined" */
+ s4 = ^s0 /* ERROR "not defined" */
+ s5 = *s4
+ s6 = &s4
+ s7 = *s6
+ s8 = <-s7
+
+ // channel
+ ch chan int
+ rc <-chan float64
+ sc chan <- string
+ ch0 = +ch /* ERROR "not defined" */
+ ch1 = -ch /* ERROR "not defined" */
+ ch2 = !ch /* ERROR "not defined" */
+ ch3 = ^ch /* ERROR "not defined" */
+ ch4 = *ch /* ERROR "cannot indirect" */
+ ch5 = &ch
+ ch6 = *ch5
+ ch7 = <-ch
+ ch8 = <-rc
+ ch9 = <-sc /* ERROR "cannot receive" */
+ ch10, ok = <-ch
+ // ok is of type bool
+ ch11, myok = <-ch
+ _ mybool = myok /* ERROR "cannot use .* in variable declaration" */
+)
+
+// address of composite literals
+type T struct{x, y int}
+
+func f() T { return T{} }
+
+var (
+ _ = &T{1, 2}
+ _ = &[...]int{}
+ _ = &[]int{}
+ _ = &[]int{}
+ _ = &map[string]T{}
+ _ = &(T{1, 2})
+ _ = &((((T{1, 2}))))
+ _ = &f /* ERROR "cannot take address" */ ()
+)
+
+// recursive pointer types
+type P *P
+
+var (
+ p1 P = new(P)
+ p2 P = *p1
+ p3 P = &p2
+)
+
+func g() (a, b int) { return }
+
+func _() {
+ _ = -g /* ERROR 2-valued g */ ()
+ _ = <-g /* ERROR 2-valued g */ ()
+}
diff --git a/src/cmd/compile/internal/types2/testdata/check/expr1.src b/src/cmd/compile/internal/types2/testdata/check/expr1.src
new file mode 100644
index 0000000..85ad234
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/check/expr1.src
@@ -0,0 +1,127 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// binary expressions
+
+package expr1
+
+type mybool bool
+
+func _(x, y bool, z mybool) {
+ x = x || y
+ x = x || true
+ x = x || false
+ x = x && y
+ x = x && true
+ x = x && false
+
+ z = z /* ERROR mismatched types */ || y
+ z = z || true
+ z = z || false
+ z = z /* ERROR mismatched types */ && y
+ z = z && true
+ z = z && false
+}
+
+type myint int
+
+func _(x, y int, z myint) {
+ x = x + 1
+ x = x + 1.0
+ x = x + 1.1 // ERROR truncated to int
+ x = x + y
+ x = x - y
+ x = x * y
+ x = x / y
+ x = x % y
+ x = x << y
+ x = x >> y
+
+ z = z + 1
+ z = z + 1.0
+ z = z + 1.1 // ERROR truncated to int
+ z = z /* ERROR mismatched types */ + y
+ z = z /* ERROR mismatched types */ - y
+ z = z /* ERROR mismatched types */ * y
+ z = z /* ERROR mismatched types */ / y
+ z = z /* ERROR mismatched types */ % y
+ z = z << y
+ z = z >> y
+}
+
+type myuint uint
+
+func _(x, y uint, z myuint) {
+ x = x + 1
+ x = x + - /* ERROR overflows uint */ 1
+ x = x + 1.0
+ x = x + 1.1 // ERROR truncated to uint
+ x = x + y
+ x = x - y
+ x = x * y
+ x = x / y
+ x = x % y
+ x = x << y
+ x = x >> y
+
+ z = z + 1
+ z = x + - /* ERROR overflows uint */ 1
+ z = z + 1.0
+ z = z + 1.1 // ERROR truncated to uint
+ z = z /* ERROR mismatched types */ + y
+ z = z /* ERROR mismatched types */ - y
+ z = z /* ERROR mismatched types */ * y
+ z = z /* ERROR mismatched types */ / y
+ z = z /* ERROR mismatched types */ % y
+ z = z << y
+ z = z >> y
+}
+
+type myfloat64 float64
+
+func _(x, y float64, z myfloat64) {
+ x = x + 1
+ x = x + -1
+ x = x + 1.0
+ x = x + 1.1
+ x = x + y
+ x = x - y
+ x = x * y
+ x = x / y
+ x = x /* ERROR not defined */ % y
+ x = x /* ERROR operand x .* must be integer */ << y
+ x = x /* ERROR operand x .* must be integer */ >> y
+
+ z = z + 1
+ z = z + -1
+ z = z + 1.0
+ z = z + 1.1
+ z = z /* ERROR mismatched types */ + y
+ z = z /* ERROR mismatched types */ - y
+ z = z /* ERROR mismatched types */ * y
+ z = z /* ERROR mismatched types */ / y
+ z = z /* ERROR mismatched types */ % y
+ z = z /* ERROR operand z .* must be integer */ << y
+ z = z /* ERROR operand z .* must be integer */ >> y
+}
+
+type mystring string
+
+func _(x, y string, z mystring) {
+ x = x + "foo"
+ x = x /* ERROR not defined */ - "foo"
+ x = x + 1 // ERROR mismatched types string and untyped int
+ x = x + y
+ x = x /* ERROR not defined */ - y
+ x = x * 10 // ERROR mismatched types string and untyped int
+}
+
+func f() (a, b int) { return }
+
+func _(x int) {
+ _ = f /* ERROR 2-valued f */ () + 1
+ _ = x + f /* ERROR 2-valued f */ ()
+ _ = f /* ERROR 2-valued f */ () + f
+ _ = f /* ERROR 2-valued f */ () + f /* ERROR 2-valued f */ ()
+}
diff --git a/src/cmd/compile/internal/types2/testdata/check/expr2.src b/src/cmd/compile/internal/types2/testdata/check/expr2.src
new file mode 100644
index 0000000..88781f1
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/check/expr2.src
@@ -0,0 +1,260 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// comparisons
+
+package expr2
+
+func _bool() {
+ const t = true == true
+ const f = true == false
+ _ = t /* ERROR cannot compare */ < f
+ _ = 0 /* ERROR mismatched types untyped int and untyped bool */ == t
+ var b bool
+ var x, y float32
+ b = x < y
+ _ = b
+ _ = struct{b bool}{x < y}
+}
+
+// corner cases
+var (
+ v0 = nil == nil // ERROR operator == not defined on untyped nil
+)
+
+func arrays() {
+ // basics
+ var a, b [10]int
+ _ = a == b
+ _ = a != b
+ _ = a /* ERROR < not defined */ < b
+ _ = a == nil /* ERROR invalid operation.*mismatched types */
+
+ type C [10]int
+ var c C
+ _ = a == c
+
+ type D [10]int
+ var d D
+ _ = c /* ERROR mismatched types */ == d
+
+ var e [10]func() int
+ _ = e /* ERROR \[10\]func\(\) int cannot be compared */ == e
+}
+
+func structs() {
+ // basics
+ var s, t struct {
+ x int
+ a [10]float32
+ _ bool
+ }
+ _ = s == t
+ _ = s != t
+ _ = s /* ERROR < not defined */ < t
+ _ = s == nil /* ERROR invalid operation.*mismatched types */
+
+ type S struct {
+ x int
+ a [10]float32
+ _ bool
+ }
+ type T struct {
+ x int
+ a [10]float32
+ _ bool
+ }
+ var ss S
+ var tt T
+ _ = s == ss
+ _ = ss /* ERROR mismatched types */ == tt
+
+ var u struct {
+ x int
+ a [10]map[string]int
+ }
+ _ = u /* ERROR cannot compare */ == u
+}
+
+func pointers() {
+ // nil
+ _ = nil == nil // ERROR operator == not defined on untyped nil
+ _ = nil != nil // ERROR operator != not defined on untyped nil
+ _ = nil /* ERROR < not defined */ < nil
+ _ = nil /* ERROR <= not defined */ <= nil
+ _ = nil /* ERROR > not defined */ > nil
+ _ = nil /* ERROR >= not defined */ >= nil
+
+ // basics
+ var p, q *int
+ _ = p == q
+ _ = p != q
+
+ _ = p == nil
+ _ = p != nil
+ _ = nil == q
+ _ = nil != q
+
+ _ = p /* ERROR < not defined */ < q
+ _ = p /* ERROR <= not defined */ <= q
+ _ = p /* ERROR > not defined */ > q
+ _ = p /* ERROR >= not defined */ >= q
+
+ // various element types
+ type (
+ S1 struct{}
+ S2 struct{}
+ P1 *S1
+ P2 *S2
+ )
+ var (
+ ps1 *S1
+ ps2 *S2
+ p1 P1
+ p2 P2
+ )
+ _ = ps1 == ps1
+ _ = ps1 /* ERROR mismatched types */ == ps2
+ _ = ps2 /* ERROR mismatched types */ == ps1
+
+ _ = p1 == p1
+ _ = p1 /* ERROR mismatched types */ == p2
+
+ _ = p1 == ps1
+}
+
+func channels() {
+ // basics
+ var c, d chan int
+ _ = c == d
+ _ = c != d
+ _ = c == nil
+ _ = c /* ERROR < not defined */ < d
+
+ // various element types (named types)
+ type (
+ C1 chan int
+ C1r <-chan int
+ C1s chan<- int
+ C2 chan float32
+ )
+ var (
+ c1 C1
+ c1r C1r
+ c1s C1s
+ c1a chan int
+ c2 C2
+ )
+ _ = c1 == c1
+ _ = c1 /* ERROR mismatched types */ == c1r
+ _ = c1 /* ERROR mismatched types */ == c1s
+ _ = c1r /* ERROR mismatched types */ == c1s
+ _ = c1 == c1a
+ _ = c1a == c1
+ _ = c1 /* ERROR mismatched types */ == c2
+ _ = c1a /* ERROR mismatched types */ == c2
+
+ // various element types (unnamed types)
+ var (
+ d1 chan int
+ d1r <-chan int
+ d1s chan<- int
+ d1a chan<- int
+ d2 chan float32
+ )
+ _ = d1 == d1
+ _ = d1 == d1r
+ _ = d1 == d1s
+ _ = d1r /* ERROR mismatched types */ == d1s
+ _ = d1 == d1a
+ _ = d1a == d1
+ _ = d1 /* ERROR mismatched types */ == d2
+ _ = d1a /* ERROR mismatched types */ == d2
+}
+
+// for interfaces test
+type S1 struct{}
+type S11 struct{}
+type S2 struct{}
+func (*S1) m() int
+func (*S11) m() int
+func (*S11) n()
+func (*S2) m() float32
+
+func interfaces() {
+ // basics
+ var i, j interface{ m() int }
+ _ = i == j
+ _ = i != j
+ _ = i == nil
+ _ = i /* ERROR < not defined */ < j
+
+ // various interfaces
+ var ii interface { m() int; n() }
+ var k interface { m() float32 }
+ _ = i == ii
+ _ = i /* ERROR mismatched types */ == k
+
+ // interfaces vs values
+ var s1 S1
+ var s11 S11
+ var s2 S2
+
+ _ = i == 0 /* ERROR cannot convert */
+ _ = i /* ERROR mismatched types */ == s1
+ _ = i == &s1
+ _ = i == &s11
+
+ _ = i /* ERROR mismatched types */ == s2
+ _ = i /* ERROR mismatched types */ == &s2
+
+ // issue #28164
+ // testcase from issue
+ _ = interface{}(nil) == [ /* ERROR slice can only be compared to nil */ ]int(nil)
+
+ // related cases
+ var e interface{}
+ var s []int
+ var x int
+ _ = e == s // ERROR slice can only be compared to nil
+ _ = s /* ERROR slice can only be compared to nil */ == e
+ _ = e /* ERROR operator < not defined on interface */ < x
+ _ = x < e // ERROR operator < not defined on interface
+}
+
+func slices() {
+ // basics
+ var s []int
+ _ = s == nil
+ _ = s != nil
+ _ = s /* ERROR < not defined */ < nil
+
+ // slices are not otherwise comparable
+ _ = s /* ERROR slice can only be compared to nil */ == s
+ _ = s /* ERROR < not defined */ < s
+}
+
+func maps() {
+ // basics
+ var m map[string]int
+ _ = m == nil
+ _ = m != nil
+ _ = m /* ERROR < not defined */ < nil
+
+ // maps are not otherwise comparable
+ _ = m /* ERROR map can only be compared to nil */ == m
+ _ = m /* ERROR < not defined */ < m
+}
+
+func funcs() {
+ // basics
+ var f func(int) float32
+ _ = f == nil
+ _ = f != nil
+ _ = f /* ERROR < not defined */ < nil
+
+ // funcs are not otherwise comparable
+ _ = f /* ERROR func can only be compared to nil */ == f
+ _ = f /* ERROR < not defined */ < f
+}
diff --git a/src/cmd/compile/internal/types2/testdata/check/expr3.src b/src/cmd/compile/internal/types2/testdata/check/expr3.src
new file mode 100644
index 0000000..646319e
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/check/expr3.src
@@ -0,0 +1,565 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package expr3
+
+import "time"
+
+func indexes() {
+ var x int
+ _ = 1 /* ERROR "cannot index" */ [0]
+ _ = x /* ERROR "cannot index" */ [0]
+ _ = ( /* ERROR "cannot slice" */ 12 + 3)[1:2]
+
+ var a [10]int
+ _ = a[true /* ERROR "cannot convert" */ ]
+ _ = a["foo" /* ERROR "cannot convert" */ ]
+ _ = a[1.1 /* ERROR "truncated" */ ]
+ _ = a[1.0]
+ _ = a[- /* ERROR "negative" */ 1]
+ _ = a[- /* ERROR "negative" */ 1 :]
+ _ = a[: - /* ERROR "negative" */ 1]
+ _ = a[: /* ERROR "middle index required" */ : /* ERROR "final index required" */ ]
+ _ = a[0: /* ERROR "middle index required" */ : /* ERROR "final index required" */ ]
+ _ = a[0: /* ERROR "middle index required" */ :10]
+ _ = a[:10:10]
+
+ var a0 int
+ a0 = a[0]
+ _ = a0
+ var a1 int32
+ a1 = a /* ERROR "cannot use .* in assignment" */ [1]
+ _ = a1
+
+ _ = a[9]
+ _ = a[10 /* ERROR "index .* out of bounds" */ ]
+ _ = a[1 /* ERROR "overflows" */ <<100]
+ _ = a[1<< /* ERROR "constant shift overflow" */ 1000] // no out-of-bounds follow-on error
+ _ = a[10:]
+ _ = a[:10]
+ _ = a[10:10]
+ _ = a[11 /* ERROR "index .* out of bounds" */ :]
+ _ = a[: 11 /* ERROR "index .* out of bounds" */ ]
+ _ = a[: 1 /* ERROR "overflows" */ <<100]
+ _ = a[:10:10]
+ _ = a[:11 /* ERROR "index .* out of bounds" */ :10]
+ _ = a[:10:11 /* ERROR "index .* out of bounds" */ ]
+ _ = a[10:0 /* ERROR "invalid slice indices" */ :10]
+ _ = a[0:10:0 /* ERROR "invalid slice indices" */ ]
+ _ = a[10:0 /* ERROR "invalid slice indices" */:0]
+ _ = &a /* ERROR "cannot take address" */ [:10]
+
+ pa := &a
+ _ = pa[9]
+ _ = pa[10 /* ERROR "index .* out of bounds" */ ]
+ _ = pa[1 /* ERROR "overflows" */ <<100]
+ _ = pa[10:]
+ _ = pa[:10]
+ _ = pa[10:10]
+ _ = pa[11 /* ERROR "index .* out of bounds" */ :]
+ _ = pa[: 11 /* ERROR "index .* out of bounds" */ ]
+ _ = pa[: 1 /* ERROR "overflows" */ <<100]
+ _ = pa[:10:10]
+ _ = pa[:11 /* ERROR "index .* out of bounds" */ :10]
+ _ = pa[:10:11 /* ERROR "index .* out of bounds" */ ]
+ _ = pa[10:0 /* ERROR "invalid slice indices" */ :10]
+ _ = pa[0:10:0 /* ERROR "invalid slice indices" */ ]
+ _ = pa[10:0 /* ERROR "invalid slice indices" */ :0]
+ _ = &pa /* ERROR "cannot take address" */ [:10]
+
+ var b [0]int
+ _ = b[0 /* ERROR "index .* out of bounds" */ ]
+ _ = b[:]
+ _ = b[0:]
+ _ = b[:0]
+ _ = b[0:0]
+ _ = b[0:0:0]
+ _ = b[1 /* ERROR "index .* out of bounds" */ :0:0]
+
+ var s []int
+ _ = s[- /* ERROR "negative" */ 1]
+ _ = s[- /* ERROR "negative" */ 1 :]
+ _ = s[: - /* ERROR "negative" */ 1]
+ _ = s[0]
+ _ = s[1:2]
+ _ = s[2:1] /* ERROR "invalid slice indices" */
+ _ = s[2:]
+ _ = s[: 1 /* ERROR "overflows" */ <<100]
+ _ = s[1 /* ERROR "overflows" */ <<100 :]
+ _ = s[1 /* ERROR "overflows" */ <<100 : 1 /* ERROR "overflows" */ <<100]
+ _ = s[: /* ERROR "middle index required" */ : /* ERROR "final index required" */ ]
+ _ = s[:10:10]
+ _ = s[10:0 /* ERROR "invalid slice indices" */ :10]
+ _ = s[0:10:0 /* ERROR "invalid slice indices" */ ]
+ _ = s[10:0 /* ERROR "invalid slice indices" */ :0]
+ _ = &s /* ERROR "cannot take address" */ [:10]
+
+ var m map[string]int
+ _ = m[0 /* ERROR "cannot use .* in map index" */ ]
+ _ = m /* ERROR "cannot slice" */ ["foo" : "bar"]
+ _ = m["foo"]
+ // ok is of type bool
+ type mybool bool
+ var ok mybool
+ _, ok = m["bar"]
+ _ = ok
+ _ = m[0 /* ERROR "cannot use 0" */ ] + "foo" // ERROR "mismatched types int and untyped string"
+
+ var t string
+ _ = t[- /* ERROR "negative" */ 1]
+ _ = t[- /* ERROR "negative" */ 1 :]
+ _ = t[: - /* ERROR "negative" */ 1]
+ _ = t[1:2:3 /* ERROR "3-index slice of string" */ ]
+ _ = "foo"[1:2:3 /* ERROR "3-index slice of string" */ ]
+ var t0 byte
+ t0 = t[0]
+ _ = t0
+ var t1 rune
+ t1 = t /* ERROR "cannot use .* in assignment" */ [2]
+ _ = t1
+ _ = ("foo" + "bar")[5]
+ _ = ("foo" + "bar")[6 /* ERROR "index .* out of bounds" */ ]
+
+ const c = "foo"
+ _ = c[- /* ERROR "negative" */ 1]
+ _ = c[- /* ERROR "negative" */ 1 :]
+ _ = c[: - /* ERROR "negative" */ 1]
+ var c0 byte
+ c0 = c[0]
+ _ = c0
+ var c2 float32
+ c2 = c /* ERROR "cannot use .* in assignment" */ [2]
+ _ = c[3 /* ERROR "index .* out of bounds" */ ]
+ _ = ""[0 /* ERROR "index .* out of bounds" */ ]
+ _ = c2
+
+ _ = s[1<<30] // no compile-time error here
+
+ // issue 4913
+ type mystring string
+ var ss string
+ var ms mystring
+ var i, j int
+ ss = "foo"[1:2]
+ ss = "foo"[i:j]
+ ms = "foo" /* ERROR "cannot use .* in assignment" */ [1:2]
+ ms = "foo" /* ERROR "cannot use .* in assignment" */ [i:j]
+ _, _ = ss, ms
+}
+
+type T struct {
+ x int
+ y func()
+}
+
+func (*T) m() {}
+
+func method_expressions() {
+ _ = T.a /* ERROR "no field or method" */
+ _ = T.x /* ERROR "has no method" */
+ _ = T.m /* ERROR "cannot call pointer method m on T" */
+ _ = (*T).m
+
+ var f func(*T) = T.m /* ERROR "cannot call pointer method m on T" */
+ var g func(*T) = (*T).m
+ _, _ = f, g
+
+ _ = T.y /* ERROR "has no method" */
+ _ = (*T).y /* ERROR "has no method" */
+}
+
+func struct_literals() {
+ type T0 struct {
+ a, b, c int
+ }
+
+ type T1 struct {
+ T0
+ a, b int
+ u float64
+ s string
+ }
+
+ // keyed elements
+ _ = T1{}
+ _ = T1{a: 0, 1 /* ERROR "mixture of .* elements" */ }
+ _ = T1{aa /* ERROR "unknown field" */ : 0}
+ _ = T1{1 /* ERROR "invalid field name" */ : 0}
+ _ = T1{a: 0, s: "foo", u: 0, a /* ERROR "duplicate field" */: 10}
+ _ = T1{a: "foo" /* ERROR "cannot use .* in struct literal" */ }
+ _ = T1{c /* ERROR "unknown field" */ : 0}
+ _ = T1{T0: { /* ERROR "missing type" */ }} // struct literal element type may not be elided
+ _ = T1{T0: T0{}}
+ _ = T1{T0 /* ERROR "invalid field name" */ .a: 0}
+
+ // unkeyed elements
+ _ = T0{1, 2, 3}
+ _ = T0{1, b /* ERROR "mixture" */ : 2, 3}
+ _ = T0{1, 2} /* ERROR "too few values" */
+ _ = T0{1, 2, 3, 4 /* ERROR "too many values" */ }
+ _ = T0{1, "foo" /* ERROR "cannot use .* in struct literal" */, 3.4 /* ERROR "cannot use .*\(truncated\)" */}
+
+ // invalid type
+ type P *struct{
+ x int
+ }
+ _ = P /* ERROR "invalid composite literal type" */ {}
+
+ // unexported fields
+ _ = time.Time{}
+ _ = time.Time{sec /* ERROR "unknown field" */ : 0}
+ _ = time.Time{
+ 0 /* ERROR implicit assignment to unexported field wall in time.Time literal */,
+ 0 /* ERROR implicit assignment */ ,
+ nil /* ERROR implicit assignment */ ,
+ }
+}
+
+func array_literals() {
+ type A0 [0]int
+ _ = A0{}
+ _ = A0{0 /* ERROR "index .* out of bounds" */}
+ _ = A0{0 /* ERROR "index .* out of bounds" */ : 0}
+
+ type A1 [10]int
+ _ = A1{}
+ _ = A1{0, 1, 2}
+ _ = A1{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}
+ _ = A1{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10 /* ERROR "index .* out of bounds" */ }
+ _ = A1{- /* ERROR "negative" */ 1: 0}
+ _ = A1{8: 8, 9}
+ _ = A1{8: 8, 9, 10 /* ERROR "index .* out of bounds" */ }
+ _ = A1{0, 1, 2, 0 /* ERROR "duplicate index" */ : 0, 3: 3, 4}
+ _ = A1{5: 5, 6, 7, 3: 3, 4}
+ _ = A1{5: 5, 6, 7, 3: 3, 4, 5 /* ERROR "duplicate index" */ }
+ _ = A1{10 /* ERROR "index .* out of bounds" */ : 10, 10 /* ERROR "index .* out of bounds" */ : 10}
+ _ = A1{5: 5, 6, 7, 3: 3, 1 /* ERROR "overflows" */ <<100: 4, 5 /* ERROR "duplicate index" */ }
+ _ = A1{5: 5, 6, 7, 4: 4, 1 /* ERROR "overflows" */ <<100: 4}
+ _ = A1{2.0}
+ _ = A1{2.1 /* ERROR "truncated" */ }
+ _ = A1{"foo" /* ERROR "cannot use .* in array or slice literal" */ }
+
+ // indices must be integer constants
+ i := 1
+ const f = 2.1
+ const s = "foo"
+ _ = A1{i /* ERROR "index i must be integer constant" */ : 0}
+ _ = A1{f /* ERROR "truncated" */ : 0}
+ _ = A1{s /* ERROR "cannot convert" */ : 0}
+
+ a0 := [...]int{}
+ assert(len(a0) == 0)
+
+ a1 := [...]int{0, 1, 2}
+ assert(len(a1) == 3)
+ var a13 [3]int
+ var a14 [4]int
+ a13 = a1
+ a14 = a1 /* ERROR "cannot use .* in assignment" */
+ _, _ = a13, a14
+
+ a2 := [...]int{- /* ERROR "negative" */ 1: 0}
+ _ = a2
+
+ a3 := [...]int{0, 1, 2, 0 /* ERROR "duplicate index" */ : 0, 3: 3, 4}
+ assert(len(a3) == 5) // somewhat arbitrary
+
+ a4 := [...]complex128{0, 1, 2, 1<<10-2: -1i, 1i, 400: 10, 12, 14}
+ assert(len(a4) == 1024)
+
+ // composite literal element types may be elided
+ type T []int
+ _ = [10]T{T{}, {}, 5: T{1, 2, 3}, 7: {1, 2, 3}}
+ a6 := [...]T{T{}, {}, 5: T{1, 2, 3}, 7: {1, 2, 3}}
+ assert(len(a6) == 8)
+
+ // recursively so
+ _ = [10][10]T{{}, [10]T{{}}, {{1, 2, 3}}}
+
+ // from the spec
+ type Point struct { x, y float32 }
+ _ = [...]Point{Point{1.5, -3.5}, Point{0, 0}}
+ _ = [...]Point{{1.5, -3.5}, {0, 0}}
+ _ = [][]int{[]int{1, 2, 3}, []int{4, 5}}
+ _ = [][]int{{1, 2, 3}, {4, 5}}
+ _ = [...]*Point{&Point{1.5, -3.5}, &Point{0, 0}}
+ _ = [...]*Point{{1.5, -3.5}, {0, 0}}
+}
+
+func slice_literals() {
+ type S0 []int
+ _ = S0{}
+ _ = S0{0, 1, 2}
+ _ = S0{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}
+ _ = S0{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
+ _ = S0{- /* ERROR "negative" */ 1: 0}
+ _ = S0{8: 8, 9}
+ _ = S0{8: 8, 9, 10}
+ _ = S0{0, 1, 2, 0 /* ERROR "duplicate index" */ : 0, 3: 3, 4}
+ _ = S0{5: 5, 6, 7, 3: 3, 4}
+ _ = S0{5: 5, 6, 7, 3: 3, 4, 5 /* ERROR "duplicate index" */ }
+ _ = S0{10: 10, 10 /* ERROR "duplicate index" */ : 10}
+ _ = S0{5: 5, 6, 7, 3: 3, 1 /* ERROR "overflows" */ <<100: 4, 5 /* ERROR "duplicate index" */ }
+ _ = S0{5: 5, 6, 7, 4: 4, 1 /* ERROR "overflows" */ <<100: 4}
+ _ = S0{2.0}
+ _ = S0{2.1 /* ERROR "truncated" */ }
+ _ = S0{"foo" /* ERROR "cannot use .* in array or slice literal" */ }
+
+ // indices must be resolved correctly
+ const index1 = 1
+ _ = S0{index1: 1}
+ _ = S0{index2: 2}
+ _ = S0{index3 /* ERROR "undeclared name" */ : 3}
+
+ // indices must be integer constants
+ i := 1
+ const f = 2.1
+ const s = "foo"
+ _ = S0{i /* ERROR "index i must be integer constant" */ : 0}
+ _ = S0{f /* ERROR "truncated" */ : 0}
+ _ = S0{s /* ERROR "cannot convert" */ : 0}
+
+ // composite literal element types may be elided
+ type T []int
+ _ = []T{T{}, {}, 5: T{1, 2, 3}, 7: {1, 2, 3}}
+ _ = [][]int{{1, 2, 3}, {4, 5}}
+
+ // recursively so
+ _ = [][]T{{}, []T{{}}, {{1, 2, 3}}}
+
+ // issue 17954
+ type T0 *struct { s string }
+ _ = []T0{{}}
+ _ = []T0{{"foo"}}
+
+ type T1 *struct{ int }
+ _ = []T1{}
+ _ = []T1{{0}, {1}, {2}}
+
+ type T2 T1
+ _ = []T2{}
+ _ = []T2{{0}, {1}, {2}}
+
+ _ = map[T0]T2{}
+ _ = map[T0]T2{{}: {}}
+}
+
+const index2 int = 2
+
+type N int
+func (N) f() {}
+
+func map_literals() {
+ type M0 map[string]int
+ type M1 map[bool]int
+ type M2 map[*int]int
+
+ _ = M0{}
+ _ = M0{1 /* ERROR "missing key" */ }
+ _ = M0{1 /* ERROR "cannot use .* in map literal" */ : 2}
+ _ = M0{"foo": "bar" /* ERROR "cannot use .* in map literal" */ }
+ _ = M0{"foo": 1, "bar": 2, "foo" /* ERROR "duplicate key" */ : 3 }
+
+ _ = map[interface{}]int{2: 1, 2 /* ERROR "duplicate key" */ : 1}
+ _ = map[interface{}]int{int(2): 1, int16(2): 1}
+ _ = map[interface{}]int{int16(2): 1, int16 /* ERROR "duplicate key" */ (2): 1}
+
+ type S string
+
+ _ = map[interface{}]int{"a": 1, "a" /* ERROR "duplicate key" */ : 1}
+ _ = map[interface{}]int{"a": 1, S("a"): 1}
+ _ = map[interface{}]int{S("a"): 1, S /* ERROR "duplicate key" */ ("a"): 1}
+ _ = map[interface{}]int{1.0: 1, 1.0 /* ERROR "duplicate key" */: 1}
+ _ = map[interface{}]int{int64(-1): 1, int64 /* ERROR "duplicate key" */ (-1) : 1}
+ _ = map[interface{}]int{^uint64(0): 1, ^ /* ERROR "duplicate key" */ uint64(0): 1}
+ _ = map[interface{}]int{complex(1,2): 1, complex /* ERROR "duplicate key" */ (1,2) : 1}
+
+ type I interface {
+ f()
+ }
+
+ _ = map[I]int{N(0): 1, N(2): 1}
+ _ = map[I]int{N(2): 1, N /* ERROR "duplicate key" */ (2): 1}
+
+ // map keys must be resolved correctly
+ key1 := "foo"
+ _ = M0{key1: 1}
+ _ = M0{key2: 2}
+ _ = M0{key3 /* ERROR "undeclared name" */ : 2}
+
+ var value int
+ _ = M1{true: 1, false: 0}
+ _ = M2{nil: 0, &value: 1}
+
+ // composite literal element types may be elided
+ type T [2]int
+ _ = map[int]T{0: T{3, 4}, 1: {5, 6}}
+
+ // recursively so
+ _ = map[int][]T{0: {}, 1: {{}, T{1, 2}}}
+
+ // composite literal key types may be elided
+ _ = map[T]int{T{3, 4}: 0, {5, 6}: 1}
+
+ // recursively so
+ _ = map[[2]T]int{{}: 0, {{}}: 1, [2]T{{}}: 2, {T{1, 2}}: 3}
+
+ // composite literal element and key types may be elided
+ _ = map[T]T{{}: {}, {1, 2}: T{3, 4}, T{4, 5}: {}}
+ _ = map[T]M0{{} : {}, T{1, 2}: M0{"foo": 0}, {1, 3}: {"foo": 1}}
+
+ // recursively so
+ _ = map[[2]T][]T{{}: {}, {{}}: {{}, T{1, 2}}, [2]T{{}}: nil, {T{1, 2}}: {{}, {}}}
+
+ // from the spec
+ type Point struct { x, y float32 }
+ _ = map[string]Point{"orig": {0, 0}}
+ _ = map[*Point]string{{0, 0}: "orig"}
+
+ // issue 17954
+ type T0 *struct{ s string }
+ type T1 *struct{ int }
+ type T2 T1
+
+ _ = map[T0]T2{}
+ _ = map[T0]T2{{}: {}}
+}
+
+var key2 string = "bar"
+
+type I interface {
+ m()
+}
+
+type I2 interface {
+ m(int)
+}
+
+type T1 struct{}
+type T2 struct{}
+
+func (T2) m(int) {}
+
+type mybool bool
+
+func type_asserts() {
+ var x int
+ _ = x /* ERROR "not an interface" */ .(int)
+
+ var e interface{}
+ var ok bool
+ x, ok = e.(int)
+ _ = ok
+
+ // ok value is of type bool
+ var myok mybool
+ _, myok = e.(int)
+ _ = myok
+
+ var t I
+ _ = t /* ERROR "use of .* outside type switch" */ .(type)
+ _ = t /* ERROR "method m has pointer receiver" */ .(T)
+ _ = t.(*T)
+ _ = t /* ERROR "missing method m" */ .(T1)
+ _ = t /* ERROR "wrong type for method m" */ .(T2)
+ _ = t /* STRICT "wrong type for method m" */ .(I2) // only an error in strict mode (issue 8561)
+
+ // e doesn't statically have an m, but may have one dynamically.
+ _ = e.(I2)
+}
+
+func f0() {}
+func f1(x int) {}
+func f2(u float32, s string) {}
+func fs(s []byte) {}
+func fv(x ...int) {}
+func fi(x ... interface{}) {}
+func (T) fm(x ...int)
+
+func g0() {}
+func g1() int { return 0}
+func g2() (u float32, s string) { return }
+func gs() []byte { return nil }
+
+func _calls() {
+ var x int
+ var y float32
+ var s []int
+
+ f0()
+ _ = f0 /* ERROR "used as value" */ ()
+ f0(g0 /* ERROR "too many arguments" */ )
+
+ f1(0)
+ f1(x)
+ f1(10.0)
+ f1 /* ERROR "not enough arguments in call to f1\n\thave \(\)\n\twant \(int\)" */ ()
+ f1(x, y /* ERROR "too many arguments in call to f1\n\thave \(int, float32\)\n\twant \(int\)" */ )
+ f1(s /* ERROR "cannot use .* in argument" */ )
+ f1(x ... /* ERROR "cannot use ..." */ )
+ f1(g0 /* ERROR "used as value" */ ())
+ f1(g1())
+ f1(g2 /* ERROR "too many arguments in call to f1\n\thave \(float32, string\)\n\twant \(int\)" */ ())
+
+ f2 /* ERROR "not enough arguments in call to f2\n\thave \(\)\n\twant \(float32, string\)" */ ()
+ f2(3.14 /* ERROR "not enough arguments in call to f2\n\thave \(number\)\n\twant \(float32, string\)" */ )
+ f2(3.14, "foo")
+ f2(x /* ERROR "cannot use .* in argument" */ , "foo")
+ f2(g0 /* ERROR "used as value" */ ())
+ f2(g1 /* ERROR "not enough arguments in call to f2\n\thave \(int\)\n\twant \(float32, string\)" */ ())
+ f2(g2())
+
+ fs /* ERROR "not enough arguments" */ ()
+ fs(g0 /* ERROR "used as value" */ ())
+ fs(g1 /* ERROR "cannot use .* in argument" */ ())
+ fs(g2 /* ERROR "too many arguments" */ ())
+ fs(gs())
+
+ fv()
+ fv(1, 2.0, x)
+ fv(s /* ERROR "cannot use .* in argument" */ )
+ fv(s...)
+ fv(x /* ERROR "cannot use" */ ...)
+ fv(1, s /* ERROR "too many arguments" */ ... )
+ fv(gs /* ERROR "cannot use .* in argument" */ ())
+ fv(gs /* ERROR "cannot use .* in argument" */ ()...)
+
+ var t T
+ t.fm()
+ t.fm(1, 2.0, x)
+ t.fm(s /* ERROR "cannot use .* in argument" */ )
+ t.fm(g1())
+ t.fm(1, s /* ERROR "too many arguments" */ ... )
+ t.fm(gs /* ERROR "cannot use .* in argument" */ ())
+ t.fm(gs /* ERROR "cannot use .* in argument" */ ()...)
+
+ T.fm(t, )
+ T.fm(t, 1, 2.0, x)
+ T.fm(t, s /* ERROR "cannot use .* in argument" */ )
+ T.fm(t, g1())
+ T.fm(t, 1, s /* ERROR "too many arguments" */ ... )
+ T.fm(t, gs /* ERROR "cannot use .* in argument" */ ())
+ T.fm(t, gs /* ERROR "cannot use .* in argument" */ ()...)
+
+ var i interface{ fm(x ...int) } = t
+ i.fm()
+ i.fm(1, 2.0, x)
+ i.fm(s /* ERROR "cannot use .* in argument" */ )
+ i.fm(g1())
+ i.fm(1, s /* ERROR "too many arguments" */ ... )
+ i.fm(gs /* ERROR "cannot use .* in argument" */ ())
+ i.fm(gs /* ERROR "cannot use .* in argument" */ ()...)
+
+ fi()
+ fi(1, 2.0, x, 3.14, "foo")
+ fi(g2())
+ fi(0, g2)
+ fi(0, g2 /* ERROR "2-valued g2" */ ())
+}
+
+func issue6344() {
+ type T []interface{}
+ var x T
+ fi(x...) // ... applies also to named slices
+}
diff --git a/src/cmd/compile/internal/types2/testdata/check/funcinference.go2 b/src/cmd/compile/internal/types2/testdata/check/funcinference.go2
new file mode 100644
index 0000000..45d0781
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/check/funcinference.go2
@@ -0,0 +1,104 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package funcInference
+
+import "strconv"
+
+type any interface{}
+
+func f0[A any, B interface{*C}, C interface{*D}, D interface{*A}](A, B, C, D) {}
+func _() {
+ f := f0[string]
+ f("a", nil, nil, nil)
+ f0("a", nil, nil, nil)
+}
+
+func f1[A any, B interface{*A}](A, B) {}
+func _() {
+ f := f1[int]
+ f(int(0), new(int))
+ f1(int(0), new(int))
+}
+
+func f2[A any, B interface{[]A}](A, B) {}
+func _() {
+ f := f2[byte]
+ f(byte(0), []byte{})
+ f2(byte(0), []byte{})
+}
+
+// Embedding stand-alone type parameters is not permitted for now. Disabled.
+// func f3[A any, B interface{~C}, C interface{~*A}](A, B, C)
+// func _() {
+// f := f3[int]
+// var x int
+// f(x, &x, &x)
+// f3(x, &x, &x)
+// }
+
+func f4[A any, B interface{[]C}, C interface{*A}](A, B, C) {}
+func _() {
+ f := f4[int]
+ var x int
+ f(x, []*int{}, &x)
+ f4(x, []*int{}, &x)
+}
+
+func f5[A interface{struct{b B; c C}}, B any, C interface{*B}](x B) A { panic(0) }
+func _() {
+ x := f5(1.2)
+ var _ float64 = x.b
+ var _ float64 = *x.c
+}
+
+func f6[A any, B interface{~struct{f []A}}](B) A { panic(0) }
+func _() {
+ x := f6(struct{f []string}{})
+ var _ string = x
+}
+
+func f7[A interface{*B}, B interface{~*A}]() {}
+
+// More realistic examples
+
+func Double[S interface{ ~[]E }, E interface{ ~int | ~int8 | ~int16 | ~int32 | ~int64 }](s S) S {
+ r := make(S, len(s))
+ for i, v := range s {
+ r[i] = v + v
+ }
+ return r
+}
+
+type MySlice []int
+
+var _ = Double(MySlice{1})
+
+// From the draft design.
+
+type Setter[B any] interface {
+ Set(string)
+ *B
+}
+
+func FromStrings[T interface{}, PT Setter[T]](s []string) []T {
+ result := make([]T, len(s))
+ for i, v := range s {
+ // The type of &result[i] is *T which is in the type list
+ // of Setter, so we can convert it to PT.
+ p := PT(&result[i])
+ // PT has a Set method.
+ p.Set(v)
+ }
+ return result
+}
+
+type Settable int
+
+func (p *Settable) Set(s string) {
+ i, _ := strconv.Atoi(s) // real code should not ignore the error
+ *p = Settable(i)
+}
+
+var _ = FromStrings[Settable]([]string{"1", "2"})
diff --git a/src/cmd/compile/internal/types2/testdata/check/go1_12.src b/src/cmd/compile/internal/types2/testdata/check/go1_12.src
new file mode 100644
index 0000000..75a602b
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/check/go1_12.src
@@ -0,0 +1,34 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Check Go language version-specific errors.
+
+package go1_12 // go1.12
+
+// numeric literals
+const (
+ _ = 1_000 // ERROR "underscores in numeric literals requires go1.13 or later"
+ _ = 0b111 // ERROR "binary literals requires go1.13 or later"
+ _ = 0o567 // ERROR "0o/0O-style octal literals requires go1.13 or later"
+ _ = 0xabc // ok
+ _ = 0x0p1 // ERROR "hexadecimal floating-point literals requires go1.13 or later"
+
+ _ = 0B111 // ERROR "binary"
+ _ = 0O567 // ERROR "octal"
+ _ = 0Xabc // ok
+ _ = 0X0P1 // ERROR "hexadecimal floating-point"
+
+ _ = 1_000i // ERROR "underscores"
+ _ = 0b111i // ERROR "binary"
+ _ = 0o567i // ERROR "octal"
+ _ = 0xabci // ERROR "hexadecimal floating-point"
+ _ = 0x0p1i // ERROR "hexadecimal floating-point"
+)
+
+// signed shift counts
+var (
+ s int
+ _ = 1 << s // ERROR "invalid operation: signed shift count s \(variable of type int\) requires go1.13 or later"
+ _ = 1 >> s // ERROR "signed shift count"
+)
diff --git a/src/cmd/compile/internal/types2/testdata/check/go1_13.src b/src/cmd/compile/internal/types2/testdata/check/go1_13.src
new file mode 100644
index 0000000..93cb4c7
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/check/go1_13.src
@@ -0,0 +1,21 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Check Go language version-specific errors.
+
+package go1_13 // go1.13
+
+// interface embedding
+
+type I interface { m() }
+
+type _ interface {
+ m()
+ I // ERROR "duplicate method m"
+}
+
+type _ interface {
+ I
+ I // ERROR "duplicate method m"
+}
diff --git a/src/cmd/compile/internal/types2/testdata/check/go1_16.src b/src/cmd/compile/internal/types2/testdata/check/go1_16.src
new file mode 100644
index 0000000..fdf5c99
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/check/go1_16.src
@@ -0,0 +1,13 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Check Go language version-specific errors.
+
+package go1_16 // go1.16
+
+type Slice []byte
+type Array [8]byte
+
+var s Slice
+var p = (*Array)(s /* ERROR requires go1.17 or later */ )
diff --git a/src/cmd/compile/internal/types2/testdata/check/go1_8.src b/src/cmd/compile/internal/types2/testdata/check/go1_8.src
new file mode 100644
index 0000000..0f3ba94
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/check/go1_8.src
@@ -0,0 +1,10 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Check Go language version-specific errors.
+
+package go1_8 // go1.8
+
+// type alias declarations
+type any /* ERROR type aliases requires go1.9 or later */ = interface{}
diff --git a/src/cmd/compile/internal/types2/testdata/check/gotos.src b/src/cmd/compile/internal/types2/testdata/check/gotos.src
new file mode 100644
index 0000000..069a94b
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/check/gotos.src
@@ -0,0 +1,560 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file is a modified copy of $GOROOT/test/goto.go.
+
+package gotos
+
+var (
+ i, n int
+ x []int
+ c chan int
+ m map[int]int
+ s string
+)
+
+// goto after declaration okay
+func _() {
+ x := 1
+ goto L
+L:
+ _ = x
+}
+
+// goto before declaration okay
+func _() {
+ goto L
+L:
+ x := 1
+ _ = x
+}
+
+// goto across declaration not okay
+func _() {
+ goto L /* ERROR "goto L jumps over variable declaration at line 36" */
+ x := 1
+ _ = x
+L:
+}
+
+// goto across declaration in inner scope okay
+func _() {
+ goto L
+ {
+ x := 1
+ _ = x
+ }
+L:
+}
+
+// goto across declaration after inner scope not okay
+func _() {
+ goto L /* ERROR "goto L jumps over variable declaration at line 58" */
+ {
+ x := 1
+ _ = x
+ }
+ x := 1
+ _ = x
+L:
+}
+
+// goto across declaration in reverse okay
+func _() {
+L:
+ x := 1
+ _ = x
+ goto L
+}
+
+func _() {
+L: L1:
+ x := 1
+ _ = x
+ goto L
+ goto L1
+}
+
+// error shows first offending variable
+func _() {
+ goto L /* ERROR "goto L jumps over variable declaration at line 84" */
+ x := 1
+ _ = x
+ y := 1
+ _ = y
+L:
+}
+
+// goto not okay even if code path is dead
+func _() {
+ goto L /* ERROR "goto L jumps over variable declaration" */
+ x := 1
+ _ = x
+ y := 1
+ _ = y
+ return
+L:
+}
+
+// goto into outer block okay
+func _() {
+ {
+ goto L
+ }
+L:
+}
+
+func _() {
+ {
+ goto L
+ goto L1
+ }
+L: L1:
+}
+
+// goto backward into outer block okay
+func _() {
+L:
+ {
+ goto L
+ }
+}
+
+func _() {
+L: L1:
+ {
+ goto L
+ goto L1
+ }
+}
+
+// goto into inner block not okay
+func _() {
+ goto L /* ERROR "goto L jumps into block" */
+ {
+ L:
+ }
+}
+
+func _() {
+ goto L /* ERROR "goto L jumps into block" */
+ goto L1 /* ERROR "goto L1 jumps into block" */
+ {
+ L: L1:
+ }
+}
+
+// goto backward into inner block still not okay
+func _() {
+ {
+ L:
+ }
+ goto L /* ERROR "goto L jumps into block" */
+}
+
+func _() {
+ {
+ L: L1:
+ }
+ goto L /* ERROR "goto L jumps into block" */
+ goto L1 /* ERROR "goto L1 jumps into block" */
+}
+
+// error shows first (outermost) offending block
+func _() {
+ goto L /* ERROR "goto L jumps into block" */
+ {
+ {
+ {
+ L:
+ }
+ }
+ }
+}
+
+// error prefers block diagnostic over declaration diagnostic
+func _() {
+ goto L /* ERROR "goto L jumps into block" */
+ x := 1
+ _ = x
+ {
+ L:
+ }
+}
+
+// many kinds of blocks, all invalid to jump into or among,
+// but valid to jump out of
+
+// if
+
+func _() {
+L:
+ if true {
+ goto L
+ }
+}
+
+func _() {
+L:
+ if true {
+ goto L
+ } else {
+ }
+}
+
+func _() {
+L:
+ if false {
+ } else {
+ goto L
+ }
+}
+
+func _() {
+ goto L /* ERROR "goto L jumps into block" */
+ if true {
+ L:
+ }
+}
+
+func _() {
+ goto L /* ERROR "goto L jumps into block" */
+ if true {
+ L:
+ } else {
+ }
+}
+
+func _() {
+ goto L /* ERROR "goto L jumps into block" */
+ if true {
+ } else {
+ L:
+ }
+}
+
+func _() {
+ if false {
+ L:
+ } else {
+ goto L /* ERROR "goto L jumps into block" */
+ }
+}
+
+func _() {
+ if true {
+ goto L /* ERROR "goto L jumps into block" */
+ } else {
+ L:
+ }
+}
+
+func _() {
+ if true {
+ goto L /* ERROR "goto L jumps into block" */
+ } else if false {
+ L:
+ }
+}
+
+func _() {
+ if true {
+ goto L /* ERROR "goto L jumps into block" */
+ } else if false {
+ L:
+ } else {
+ }
+}
+
+func _() {
+ if true {
+ goto L /* ERROR "goto L jumps into block" */
+ } else if false {
+ } else {
+ L:
+ }
+}
+
+func _() {
+ if true {
+ goto L /* ERROR "goto L jumps into block" */
+ } else {
+ L:
+ }
+}
+
+func _() {
+ if true {
+ L:
+ } else {
+ goto L /* ERROR "goto L jumps into block" */
+ }
+}
+
+// for
+
+func _() {
+ for {
+ goto L
+ }
+L:
+}
+
+func _() {
+ for {
+ goto L
+ L:
+ }
+}
+
+func _() {
+ for {
+ L:
+ }
+ goto L /* ERROR "goto L jumps into block" */
+}
+
+func _() {
+ for {
+ goto L
+ L1:
+ }
+L:
+ goto L1 /* ERROR "goto L1 jumps into block" */
+}
+
+func _() {
+ for i < n {
+ L:
+ }
+ goto L /* ERROR "goto L jumps into block" */
+}
+
+func _() {
+ for i = 0; i < n; i++ {
+ L:
+ }
+ goto L /* ERROR "goto L jumps into block" */
+}
+
+func _() {
+ for i = range x {
+ L:
+ }
+ goto L /* ERROR "goto L jumps into block" */
+}
+
+func _() {
+ for i = range c {
+ L:
+ }
+ goto L /* ERROR "goto L jumps into block" */
+}
+
+func _() {
+ for i = range m {
+ L:
+ }
+ goto L /* ERROR "goto L jumps into block" */
+}
+
+func _() {
+ for i = range s {
+ L:
+ }
+ goto L /* ERROR "goto L jumps into block" */
+}
+
+// switch
+
+func _() {
+L:
+ switch i {
+ case 0:
+ goto L
+ }
+}
+
+func _() {
+L:
+ switch i {
+ case 0:
+
+ default:
+ goto L
+ }
+}
+
+func _() {
+ switch i {
+ case 0:
+
+ default:
+ L:
+ goto L
+ }
+}
+
+func _() {
+ switch i {
+ case 0:
+
+ default:
+ goto L
+ L:
+ }
+}
+
+func _() {
+ switch i {
+ case 0:
+ goto L
+ L:
+ ;
+ default:
+ }
+}
+
+func _() {
+ goto L /* ERROR "goto L jumps into block" */
+ switch i {
+ case 0:
+ L:
+ }
+}
+
+func _() {
+ goto L /* ERROR "goto L jumps into block" */
+ switch i {
+ case 0:
+ L:
+ ;
+ default:
+ }
+}
+
+func _() {
+ goto L /* ERROR "goto L jumps into block" */
+ switch i {
+ case 0:
+ default:
+ L:
+ }
+}
+
+func _() {
+ switch i {
+ default:
+ goto L /* ERROR "goto L jumps into block" */
+ case 0:
+ L:
+ }
+}
+
+func _() {
+ switch i {
+ case 0:
+ L:
+ ;
+ default:
+ goto L /* ERROR "goto L jumps into block" */
+ }
+}
+
+// select
+// different from switch. the statement has no implicit block around it.
+
+func _() {
+L:
+ select {
+ case <-c:
+ goto L
+ }
+}
+
+func _() {
+L:
+ select {
+ case c <- 1:
+
+ default:
+ goto L
+ }
+}
+
+func _() {
+ select {
+ case <-c:
+
+ default:
+ L:
+ goto L
+ }
+}
+
+func _() {
+ select {
+ case c <- 1:
+
+ default:
+ goto L
+ L:
+ }
+}
+
+func _() {
+ select {
+ case <-c:
+ goto L
+ L:
+ ;
+ default:
+ }
+}
+
+func _() {
+ goto L /* ERROR "goto L jumps into block" */
+ select {
+ case c <- 1:
+ L:
+ }
+}
+
+func _() {
+ goto L /* ERROR "goto L jumps into block" */
+ select {
+ case c <- 1:
+ L:
+ ;
+ default:
+ }
+}
+
+func _() {
+ goto L /* ERROR "goto L jumps into block" */
+ select {
+ case <-c:
+ default:
+ L:
+ }
+}
+
+func _() {
+ select {
+ default:
+ goto L /* ERROR "goto L jumps into block" */
+ case <-c:
+ L:
+ }
+}
+
+func _() {
+ select {
+ case <-c:
+ L:
+ ;
+ default:
+ goto L /* ERROR "goto L jumps into block" */
+ }
+}
diff --git a/src/cmd/compile/internal/types2/testdata/check/importC.src b/src/cmd/compile/internal/types2/testdata/check/importC.src
new file mode 100644
index 0000000..f55be2d
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/check/importC.src
@@ -0,0 +1,54 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package importC
+
+import "C"
+import _ /* ERROR cannot rename import "C" */ "C"
+import foo /* ERROR cannot rename import "C" */ "C"
+import . /* ERROR cannot rename import "C" */ "C"
+
+// Test cases extracted from issue #22090.
+
+import "unsafe"
+
+const _ C.int = 0xff // no error due to invalid constant type
+
+type T struct {
+ Name string
+ Ordinal int
+}
+
+func _(args []T) {
+ var s string
+ for i, v := range args {
+ cname := C.CString(v.Name)
+ args[i].Ordinal = int(C.sqlite3_bind_parameter_index(s, cname)) // no error due to i not being "used"
+ C.free(unsafe.Pointer(cname))
+ }
+}
+
+type CType C.Type
+
+const _ CType = C.X // no error due to invalid constant type
+const _ = C.X
+
+// Test cases extracted from issue #23712.
+
+func _() {
+ var a [C.ArrayLength]byte
+ _ = a[0] // no index out of bounds error here
+}
+
+// Additional tests to verify fix for #23712.
+
+func _() {
+ var a [C.ArrayLength1]byte
+ _ = 1 / len(a) // no division by zero error here and below
+ _ = 1 / cap(a)
+ _ = uint(unsafe.Sizeof(a)) // must not be negative
+
+ var b [C.ArrayLength2]byte
+ a = b // should be valid
+}
diff --git a/src/cmd/compile/internal/types2/testdata/check/importdecl0/importdecl0a.src b/src/cmd/compile/internal/types2/testdata/check/importdecl0/importdecl0a.src
new file mode 100644
index 0000000..5ceb96e
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/check/importdecl0/importdecl0a.src
@@ -0,0 +1,53 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package importdecl0
+
+import ()
+
+import (
+ // we can have multiple blank imports (was bug)
+ _ "math"
+ _ "net/rpc"
+ init /* ERROR "cannot import package as init" */ "fmt"
+ // reflect defines a type "flag" which shows up in the gc export data
+ "reflect"
+ . /* ERROR "imported but not used" */ "reflect"
+)
+
+import "math" /* ERROR "imported but not used" */
+import m /* ERROR "imported but not used as m" */ "math"
+import _ "math"
+
+import (
+ "math/big" /* ERROR "imported but not used" */
+ b /* ERROR "imported but not used" */ "math/big"
+ _ "math/big"
+)
+
+import "fmt"
+import f1 "fmt"
+import f2 "fmt"
+
+// reflect.flag must not be visible in this package
+type flag int
+type _ reflect.flag /* ERROR "not exported" */
+
+// imported package name may conflict with local objects
+type reflect /* ERROR "reflect already declared" */ int
+
+// dot-imported exported objects may conflict with local objects
+type Value /* ERROR "Value already declared through dot-import of package reflect" */ struct{}
+
+var _ = fmt.Println // use "fmt"
+
+func _() {
+ f1.Println() // use "fmt"
+}
+
+func _() {
+ _ = func() {
+ f2.Println() // use "fmt"
+ }
+}
diff --git a/src/cmd/compile/internal/types2/testdata/check/importdecl0/importdecl0b.src b/src/cmd/compile/internal/types2/testdata/check/importdecl0/importdecl0b.src
new file mode 100644
index 0000000..19b55af
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/check/importdecl0/importdecl0b.src
@@ -0,0 +1,30 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package importdecl0
+
+import "math"
+import m "math"
+
+import . "testing" // declares T in file scope
+import . /* ERROR .unsafe. imported but not used */ "unsafe"
+import . "fmt" // declares Println in file scope
+
+import (
+ "" /* ERROR invalid import path */
+ "a!b" /* ERROR invalid import path */
+ "abc\xffdef" /* ERROR invalid import path */
+)
+
+// using "math" in this file doesn't affect its use in other files
+const Pi0 = math.Pi
+const Pi1 = m.Pi
+
+type _ T // use "testing"
+
+func _() func() interface{} {
+ return func() interface{} {
+ return Println // use "fmt"
+ }
+}
diff --git a/src/cmd/compile/internal/types2/testdata/check/importdecl1/importdecl1a.src b/src/cmd/compile/internal/types2/testdata/check/importdecl1/importdecl1a.src
new file mode 100644
index 0000000..d377c01
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/check/importdecl1/importdecl1a.src
@@ -0,0 +1,22 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Test case for issue 8969.
+
+package importdecl1
+
+import "go/ast"
+import . "unsafe"
+
+var _ Pointer // use dot-imported package unsafe
+
+// Test cases for issue 23914.
+
+type A interface {
+ // Methods m1, m2 must be type-checked in this file scope
+ // even when embedded in an interface in a different
+ // file of the same package.
+ m1() ast.Node
+ m2() Pointer
+}
diff --git a/src/cmd/compile/internal/types2/testdata/check/importdecl1/importdecl1b.src b/src/cmd/compile/internal/types2/testdata/check/importdecl1/importdecl1b.src
new file mode 100644
index 0000000..43a7bcd
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/check/importdecl1/importdecl1b.src
@@ -0,0 +1,11 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package importdecl1
+
+import . /* ERROR .unsafe. imported but not used */ "unsafe"
+
+type B interface {
+ A
+}
diff --git a/src/cmd/compile/internal/types2/testdata/check/init0.src b/src/cmd/compile/internal/types2/testdata/check/init0.src
new file mode 100644
index 0000000..6e8746a
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/check/init0.src
@@ -0,0 +1,106 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// initialization cycles
+
+package init0
+
+// initialization cycles (we don't know the types)
+const (
+ s0 /* ERROR initialization cycle */ = s0
+
+ x0 /* ERROR initialization cycle */ = y0
+ y0 = x0
+
+ a0 = b0
+ b0 /* ERROR initialization cycle */ = c0
+ c0 = d0
+ d0 = b0
+)
+
+var (
+ s1 /* ERROR initialization cycle */ = s1
+
+ x1 /* ERROR initialization cycle */ = y1
+ y1 = x1
+
+ a1 = b1
+ b1 /* ERROR initialization cycle */ = c1
+ c1 = d1
+ d1 = b1
+)
+
+// initialization cycles (we know the types)
+const (
+ s2 /* ERROR initialization cycle */ int = s2
+
+ x2 /* ERROR initialization cycle */ int = y2
+ y2 = x2
+
+ a2 = b2
+ b2 /* ERROR initialization cycle */ int = c2
+ c2 = d2
+ d2 = b2
+)
+
+var (
+ s3 /* ERROR initialization cycle */ int = s3
+
+ x3 /* ERROR initialization cycle */ int = y3
+ y3 = x3
+
+ a3 = b3
+ b3 /* ERROR initialization cycle */ int = c3
+ c3 = d3
+ d3 = b3
+)
+
+// cycles via struct fields
+
+type S1 struct {
+ f int
+}
+const cx3 S1 /* ERROR invalid constant type */ = S1{cx3.f}
+var vx3 /* ERROR initialization cycle */ S1 = S1{vx3.f}
+
+// cycles via functions
+
+var x4 = x5
+var x5 /* ERROR initialization cycle */ = f1()
+func f1() int { return x5*10 }
+
+var x6, x7 /* ERROR initialization cycle */ = f2()
+var x8 = x7
+func f2() (int, int) { return f3() + f3(), 0 }
+func f3() int { return x8 }
+
+// cycles via function literals
+
+var x9 /* ERROR initialization cycle */ = func() int { return x9 }()
+
+var x10 /* ERROR initialization cycle */ = f4()
+
+func f4() int {
+ _ = func() {
+ _ = x10
+ }
+ return 0
+}
+
+// cycles via method expressions
+
+type T1 struct{}
+
+func (T1) m() bool { _ = x11; return false }
+
+var x11 /* ERROR initialization cycle */ = T1.m(T1{})
+
+// cycles via method values
+
+type T2 struct{}
+
+func (T2) m() bool { _ = x12; return false }
+
+var t1 T2
+var x12 /* ERROR initialization cycle */ = t1.m
diff --git a/src/cmd/compile/internal/types2/testdata/check/init1.src b/src/cmd/compile/internal/types2/testdata/check/init1.src
new file mode 100644
index 0000000..39ca314
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/check/init1.src
@@ -0,0 +1,97 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// initialization cycles
+
+package init1
+
+// issue 6683 (marked as WorkingAsIntended)
+
+type T0 struct{}
+
+func (T0) m() int { return y0 }
+
+var x0 = T0{}
+
+var y0 /* ERROR initialization cycle */ = x0.m()
+
+type T1 struct{}
+
+func (T1) m() int { return y1 }
+
+var x1 interface {
+ m() int
+} = T1{}
+
+var y1 = x1.m() // no cycle reported, x1 is of interface type
+
+// issue 6703 (modified)
+
+var x2 /* ERROR initialization cycle */ = T2.m
+
+var y2 = x2
+
+type T2 struct{}
+
+func (T2) m() int {
+ _ = y2
+ return 0
+}
+
+var x3 /* ERROR initialization cycle */ = T3.m(T3{}) // <<<< added (T3{})
+
+var y3 = x3
+
+type T3 struct{}
+
+func (T3) m() int {
+ _ = y3
+ return 0
+}
+
+var x4 /* ERROR initialization cycle */ = T4{}.m // <<<< added {}
+
+var y4 = x4
+
+type T4 struct{}
+
+func (T4) m() int {
+ _ = y4
+ return 0
+}
+
+var x5 /* ERROR initialization cycle */ = T5{}.m() // <<<< added ()
+
+var y5 = x5
+
+type T5 struct{}
+
+func (T5) m() int {
+ _ = y5
+ return 0
+}
+
+// issue 4847
+// simplified test case
+
+var x6 = f6
+var y6 /* ERROR initialization cycle */ = f6
+func f6() { _ = y6 }
+
+// full test case
+
+type (
+ E int
+ S int
+)
+
+type matcher func(s *S) E
+
+func matchList(s *S) E { return matcher(matchAnyFn)(s) }
+
+var foo = matcher(matchList)
+
+var matchAny /* ERROR initialization cycle */ = matcher(matchList)
+
+func matchAnyFn(s *S) (err E) { return matchAny(s) } \ No newline at end of file
diff --git a/src/cmd/compile/internal/types2/testdata/check/init2.src b/src/cmd/compile/internal/types2/testdata/check/init2.src
new file mode 100644
index 0000000..614db6c
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/check/init2.src
@@ -0,0 +1,139 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// initialization cycles
+
+package init2
+
+// cycles through functions
+
+func f1() int { _ = x1; return 0 }
+var x1 /* ERROR initialization cycle */ = f1
+
+func f2() int { _ = x2; return 0 }
+var x2 /* ERROR initialization cycle */ = f2()
+
+// cycles through method expressions
+
+type T3 int
+func (T3) m() int { _ = x3; return 0 }
+var x3 /* ERROR initialization cycle */ = T3.m
+
+type T4 int
+func (T4) m() int { _ = x4; return 0 }
+var x4 /* ERROR initialization cycle */ = T4.m(0)
+
+type T3p int
+func (*T3p) m() int { _ = x3p; return 0 }
+var x3p /* ERROR initialization cycle */ = (*T3p).m
+
+type T4p int
+func (*T4p) m() int { _ = x4p; return 0 }
+var x4p /* ERROR initialization cycle */ = (*T4p).m(nil)
+
+// cycles through method expressions of embedded methods
+
+type T5 struct { E5 }
+type E5 int
+func (E5) m() int { _ = x5; return 0 }
+var x5 /* ERROR initialization cycle */ = T5.m
+
+type T6 struct { E6 }
+type E6 int
+func (E6) m() int { _ = x6; return 0 }
+var x6 /* ERROR initialization cycle */ = T6.m(T6{0})
+
+type T5p struct { E5p }
+type E5p int
+func (*E5p) m() int { _ = x5p; return 0 }
+var x5p /* ERROR initialization cycle */ = (*T5p).m
+
+type T6p struct { E6p }
+type E6p int
+func (*E6p) m() int { _ = x6p; return 0 }
+var x6p /* ERROR initialization cycle */ = (*T6p).m(nil)
+
+// cycles through method values
+
+type T7 int
+func (T7) m() int { _ = x7; return 0 }
+var x7 /* ERROR initialization cycle */ = T7(0).m
+
+type T8 int
+func (T8) m() int { _ = x8; return 0 }
+var x8 /* ERROR initialization cycle */ = T8(0).m()
+
+type T7p int
+func (*T7p) m() int { _ = x7p; return 0 }
+var x7p /* ERROR initialization cycle */ = new(T7p).m
+
+type T8p int
+func (*T8p) m() int { _ = x8p; return 0 }
+var x8p /* ERROR initialization cycle */ = new(T8p).m()
+
+type T7v int
+func (T7v) m() int { _ = x7v; return 0 }
+var x7var T7v
+var x7v /* ERROR initialization cycle */ = x7var.m
+
+type T8v int
+func (T8v) m() int { _ = x8v; return 0 }
+var x8var T8v
+var x8v /* ERROR initialization cycle */ = x8var.m()
+
+type T7pv int
+func (*T7pv) m() int { _ = x7pv; return 0 }
+var x7pvar *T7pv
+var x7pv /* ERROR initialization cycle */ = x7pvar.m
+
+type T8pv int
+func (*T8pv) m() int { _ = x8pv; return 0 }
+var x8pvar *T8pv
+var x8pv /* ERROR initialization cycle */ = x8pvar.m()
+
+// cycles through method values of embedded methods
+
+type T9 struct { E9 }
+type E9 int
+func (E9) m() int { _ = x9; return 0 }
+var x9 /* ERROR initialization cycle */ = T9{0}.m
+
+type T10 struct { E10 }
+type E10 int
+func (E10) m() int { _ = x10; return 0 }
+var x10 /* ERROR initialization cycle */ = T10{0}.m()
+
+type T9p struct { E9p }
+type E9p int
+func (*E9p) m() int { _ = x9p; return 0 }
+var x9p /* ERROR initialization cycle */ = new(T9p).m
+
+type T10p struct { E10p }
+type E10p int
+func (*E10p) m() int { _ = x10p; return 0 }
+var x10p /* ERROR initialization cycle */ = new(T10p).m()
+
+type T9v struct { E9v }
+type E9v int
+func (E9v) m() int { _ = x9v; return 0 }
+var x9var T9v
+var x9v /* ERROR initialization cycle */ = x9var.m
+
+type T10v struct { E10v }
+type E10v int
+func (E10v) m() int { _ = x10v; return 0 }
+var x10var T10v
+var x10v /* ERROR initialization cycle */ = x10var.m()
+
+type T9pv struct { E9pv }
+type E9pv int
+func (*E9pv) m() int { _ = x9pv; return 0 }
+var x9pvar *T9pv
+var x9pv /* ERROR initialization cycle */ = x9pvar.m
+
+type T10pv struct { E10pv }
+type E10pv int
+func (*E10pv) m() int { _ = x10pv; return 0 }
+var x10pvar *T10pv
+var x10pv /* ERROR initialization cycle */ = x10pvar.m()
diff --git a/src/cmd/compile/internal/types2/testdata/check/issue25008/issue25008a.src b/src/cmd/compile/internal/types2/testdata/check/issue25008/issue25008a.src
new file mode 100644
index 0000000..cf71ca1
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/check/issue25008/issue25008a.src
@@ -0,0 +1,15 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+import "io"
+
+type A interface {
+ io.Reader
+}
+
+func f(a A) {
+ a.Read(nil)
+}
diff --git a/src/cmd/compile/internal/types2/testdata/check/issue25008/issue25008b.src b/src/cmd/compile/internal/types2/testdata/check/issue25008/issue25008b.src
new file mode 100644
index 0000000..f132b7f
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/check/issue25008/issue25008b.src
@@ -0,0 +1,9 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type B interface {
+ A
+}
diff --git a/src/cmd/compile/internal/types2/testdata/check/issues.go2 b/src/cmd/compile/internal/types2/testdata/check/issues.go2
new file mode 100644
index 0000000..1763550
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/check/issues.go2
@@ -0,0 +1,253 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains regression tests for bugs found.
+
+package p
+
+import "io"
+import "context"
+
+func eql[T comparable](x, y T) bool {
+ return x == y
+}
+
+func _[X comparable, Y interface{comparable; m()}]() {
+ var x X
+ var y Y
+ eql(x, y /* ERROR does not match */ ) // interfaces of different types
+ eql(x, x)
+ eql(y, y)
+ eql(y, nil /* ERROR cannot use nil as Y value in argument to eql */ )
+ eql[io /* ERROR does not implement comparable */ .Reader](nil, nil)
+}
+
+// If we have a receiver of pointer to type parameter type (below: *T)
+// we don't have any methods, like for interfaces.
+type C[T any] interface {
+ m()
+}
+
+// using type bound C
+func _[T C[T]](x *T) {
+ x.m /* ERROR x\.m undefined */ ()
+}
+
+// using an interface literal as bound
+func _[T interface{ m() }](x *T) {
+ x.m /* ERROR x\.m undefined */ ()
+}
+
+func f2[_ interface{ m1(); m2() }]() {}
+
+type T struct{}
+func (T) m1()
+func (*T) m2()
+
+func _() {
+ f2[T /* ERROR m2 has pointer receiver */ ]()
+ f2[*T]()
+}
+
+// When a type parameter is used as an argument to instantiate a parameterized
+// type with a type list constraint, all of the type argument's types in its
+// bound, but at least one (!), must be in the type list of the bound of the
+// corresponding parameterized type's type parameter.
+type T1[P interface{~uint}] struct{}
+
+func _[P any]() {
+ _ = T1[P /* ERROR P does not implement interface{~uint} */ ]{}
+}
+
+// This is the original (simplified) program causing the same issue.
+type Unsigned interface {
+ ~uint
+}
+
+type T2[U Unsigned] struct {
+ s U
+}
+
+func (u T2[U]) Add1() U {
+ return u.s + 1
+}
+
+func NewT2[U any]() T2[U /* ERROR U does not implement Unsigned */ ] {
+ return T2[U /* ERROR U does not implement Unsigned */ ]{}
+}
+
+func _() {
+ u := NewT2[string]()
+ _ = u.Add1()
+}
+
+// When we encounter an instantiated type such as Elem[T] we must
+// not "expand" the instantiation when the type to be instantiated
+// (Elem in this case) is not yet fully set up.
+type Elem[T any] struct {
+ next *Elem[T]
+ list *List[T]
+}
+
+type List[T any] struct {
+ root Elem[T]
+}
+
+func (l *List[T]) Init() {
+ l.root.next = &l.root
+}
+
+// This is the original program causing the same issue.
+type Element2[TElem any] struct {
+ next, prev *Element2[TElem]
+ list *List2[TElem]
+ Value TElem
+}
+
+type List2[TElem any] struct {
+ root Element2[TElem]
+ len int
+}
+
+func (l *List2[TElem]) Init() *List2[TElem] {
+ l.root.next = &l.root
+ l.root.prev = &l.root
+ l.len = 0
+ return l
+}
+
+// Self-recursive instantiations must work correctly.
+type A[P any] struct { _ *A[P] }
+
+type AB[P any] struct { _ *BA[P] }
+type BA[P any] struct { _ *AB[P] }
+
+// And a variation that also caused a problem with an
+// unresolved underlying type.
+type Element3[TElem any] struct {
+ next, prev *Element3[TElem]
+ list *List3[TElem]
+ Value TElem
+}
+
+func (e *Element3[TElem]) Next() *Element3[TElem] {
+ if p := e.next; e.list != nil && p != &e.list.root {
+ return p
+ }
+ return nil
+}
+
+type List3[TElem any] struct {
+ root Element3[TElem]
+ len int
+}
+
+// Infinite generic type declarations must lead to an error.
+type inf1[T any] struct{ _ inf1 /* ERROR illegal cycle */ [T] }
+type inf2[T any] struct{ inf2 /* ERROR illegal cycle */ [T] }
+
+// The implementation of conversions T(x) between integers and floating-point
+// numbers checks that both T and x have either integer or floating-point
+// type. When the type of T or x is a type parameter, the respective simple
+// predicate disjunction in the implementation was wrong because if a type list
+// contains both an integer and a floating-point type, the type parameter is
+// neither an integer or a floating-point number.
+func convert[T1, T2 interface{~int | ~uint | ~float32}](v T1) T2 {
+ return T2(v)
+}
+
+func _() {
+ convert[int, uint](5)
+}
+
+// When testing binary operators, for +, the operand types must either be
+// both numeric, or both strings. The implementation had the same problem
+// with this check as the conversion issue above (issue #39623).
+
+func issue39623[T interface{~int | ~string}](x, y T) T {
+ return x + y
+}
+
+// Simplified, from https://go2goplay.golang.org/p/efS6x6s-9NI:
+func Sum[T interface{~int | ~string}](s []T) (sum T) {
+ for _, v := range s {
+ sum += v
+ }
+ return
+}
+
+// Assignability of an unnamed pointer type to a type parameter that
+// has a matching underlying type.
+func _[T interface{}, PT interface{~*T}] (x T) PT {
+ return &x
+}
+
+// Indexing of generic types containing type parameters in their type list:
+func at[T interface{ ~[]E }, E interface{}](x T, i int) E {
+ return x[i]
+}
+
+// A generic type inside a function acts like a named type. Its underlying
+// type is itself, its "operational type" is defined by the type list in
+// the tybe bound, if any.
+func _[T interface{~int}](x T) {
+ type myint int
+ var _ int = int(x)
+ var _ T = 42
+ var _ T = T(myint(42))
+}
+
+// Indexing a generic type with an array type bound checks length.
+// (Example by mdempsky@.)
+func _[T interface { ~[10]int }](x T) {
+ _ = x[9] // ok
+ _ = x[20 /* ERROR out of bounds */ ]
+}
+
+// Pointer indirection of a generic type.
+func _[T interface{ ~*int }](p T) int {
+ return *p
+}
+
+// Channel sends and receives on generic types.
+func _[T interface{ ~chan int }](ch T) int {
+ ch <- 0
+ return <- ch
+}
+
+// Calling of a generic variable.
+func _[T interface{ ~func() }](f T) {
+ f()
+ go f()
+}
+
+type F1 func()
+type F2 func()
+func _[T interface{ func()|F1|F2 }](f T) {
+ f()
+ go f()
+}
+
+// We must compare against the underlying type of type list entries
+// when checking if a constraint is satisfied by a type. The under-
+// lying type of each type list entry must be computed after the
+// interface has been instantiated as its typelist may contain a
+// type parameter that was substituted with a defined type.
+// Test case from an (originally) failing example.
+
+type sliceOf[E any] interface{ ~[]E }
+
+func append[T interface{}, S sliceOf[T], T2 interface{}](s S, t ...T2) S { panic(0) }
+
+var f func()
+var cancelSlice []context.CancelFunc
+var _ = append[context.CancelFunc, []context.CancelFunc, context.CancelFunc](cancelSlice, f)
+
+// A generic function must be instantiated with a type, not a value.
+
+func g[T any](T) T { panic(0) }
+
+var _ = g[int]
+var _ = g[nil /* ERROR is not a type */ ]
+var _ = g(0)
diff --git a/src/cmd/compile/internal/types2/testdata/check/issues.src b/src/cmd/compile/internal/types2/testdata/check/issues.src
new file mode 100644
index 0000000..42c5bc8
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/check/issues.src
@@ -0,0 +1,371 @@
+// Copyright 2014 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package go1_17 // don't permit non-interface elements in interfaces
+
+import (
+ "fmt"
+ syn "regexp/syntax"
+ t1 "text/template"
+ t2 "html/template"
+)
+
+func issue7035() {
+ type T struct{ X int }
+ _ = func() {
+ fmt.Println() // must refer to imported fmt rather than the fmt below
+ }
+ fmt := new(T)
+ _ = fmt.X
+}
+
+func issue8066() {
+ const (
+ _ = float32(340282356779733661637539395458142568447)
+ _ = float32(340282356779733661637539395458142568448 /* ERROR cannot convert */ )
+ )
+}
+
+// Check that a missing identifier doesn't lead to a spurious error cascade.
+func issue8799a() {
+ x, ok := missing /* ERROR undeclared */ ()
+ _ = !ok
+ _ = x
+}
+
+func issue8799b(x int, ok bool) {
+ x, ok = missing /* ERROR undeclared */ ()
+ _ = !ok
+ _ = x
+}
+
+func issue9182() {
+ type Point C /* ERROR undeclared */ .Point
+ // no error for composite literal based on unknown type
+ _ = Point{x: 1, y: 2}
+}
+
+func f0() (a []int) { return }
+func f1() (a []int, b int) { return }
+func f2() (a, b []int) { return }
+
+func append_([]int, ...int) {}
+
+func issue9473(a []int, b ...int) {
+ // variadic builtin function
+ _ = append(f0())
+ _ = append(f0(), f0()...)
+ _ = append(f1())
+ _ = append(f2 /* ERROR cannot use .* in argument */ ())
+ _ = append(f2()... /* ERROR cannot use ... */ )
+ _ = append(f0(), f1 /* ERROR 2-valued f1 */ ())
+ _ = append(f0(), f2 /* ERROR 2-valued f2 */ ())
+ _ = append(f0(), f1 /* ERROR 2-valued f1 */ ()...)
+ _ = append(f0(), f2 /* ERROR 2-valued f2 */ ()...)
+
+ // variadic user-defined function
+ append_(f0())
+ append_(f0(), f0()...)
+ append_(f1())
+ append_(f2 /* ERROR cannot use .* in argument */ ())
+ append_(f2()... /* ERROR cannot use ... */ )
+ append_(f0(), f1 /* ERROR 2-valued f1 */ ())
+ append_(f0(), f2 /* ERROR 2-valued f2 */ ())
+ append_(f0(), f1 /* ERROR 2-valued f1 */ ()...)
+ append_(f0(), f2 /* ERROR 2-valued f2 */ ()...)
+}
+
+// Check that embedding a non-interface type in an interface results in a good error message.
+func issue10979() {
+ type _ interface {
+ int /* ERROR non-interface type int */
+ }
+ type T struct{}
+ type _ interface {
+ T /* ERROR non-interface type T */
+ }
+ type _ interface {
+ nosuchtype /* ERROR undeclared name: nosuchtype */
+ }
+ type _ interface {
+ fmt.Nosuchtype /* ERROR Nosuchtype not declared by package fmt */
+ }
+ type _ interface {
+ nosuchpkg /* ERROR undeclared name: nosuchpkg */ .Nosuchtype
+ }
+ type I interface {
+ I.m /* ERROR no field or method m */
+ m()
+ }
+}
+
+// issue11347
+// These should not crash.
+var a1, b1 /* ERROR cycle */ , c1 /* ERROR cycle */ b1 = 0 > 0<<""[""[c1]]>c1
+var a2, b2 /* ERROR cycle */ = 0 /* ERROR cannot initialize */ /* ERROR cannot initialize */ > 0<<""[b2]
+var a3, b3 /* ERROR cycle */ = int /* ERROR cannot initialize */ /* ERROR cannot initialize */ (1<<""[b3])
+
+// issue10260
+// Check that error messages explain reason for interface assignment failures.
+type (
+ I0 interface{}
+ I1 interface{ foo() }
+ I2 interface{ foo(x int) }
+ T0 struct{}
+ T1 struct{}
+ T2 struct{}
+)
+
+func (*T1) foo() {}
+func (*T2) foo(x int) {}
+
+func issue10260() {
+ var (
+ i0 I0
+ i1 I1
+ i2 I2
+ t0 *T0
+ t1 *T1
+ t2 *T2
+ )
+
+ var x I1
+ x = T1 /* ERROR cannot use T1{} .* as I1 value in assignment: T1 does not implement I1 \(method foo has pointer receiver\) */ {}
+ _ = x /* ERROR impossible type assertion: x\.\(T1\)\n\tT1 does not implement I1 \(method foo has pointer receiver\) */ .(T1)
+
+ T1{}.foo /* ERROR cannot call pointer method foo on T1 */ ()
+ x.Foo /* ERROR "x.Foo undefined \(type I1 has no field or method Foo, but does have foo\)" */ ()
+
+ _ = i2 /* ERROR impossible type assertion: i2\.\(\*T1\)\n\t\*T1 does not implement I2 \(wrong type for method foo\)\n\t\thave foo\(\)\n\t\twant foo\(x int\) */ .(*T1)
+
+ i1 = i0 /* ERROR cannot use i0 .* as I1 value in assignment: I0 does not implement I1 \(missing method foo\) */
+ i1 = t0 /* ERROR .* t0 .* as I1 .*: \*T0 does not implement I1 \(missing method foo\) */
+ i1 = i2 /* ERROR .* i2 .* as I1 .*: I2 does not implement I1 \(wrong type for method foo\)\n\t\thave foo\(x int\)\n\t\twant foo\(\) */
+ i1 = t2 /* ERROR .* t2 .* as I1 .*: \*T2 does not implement I1 \(wrong type for method foo\)\n\t\thave foo\(x int\)\n\t\twant foo\(\) */
+ i2 = i1 /* ERROR .* i1 .* as I2 .*: I1 does not implement I2 \(wrong type for method foo\)\n\t\thave foo\(\)\n\t\twant foo\(x int\) */
+ i2 = t1 /* ERROR .* t1 .* as I2 .*: \*T1 does not implement I2 \(wrong type for method foo\)\n\t\thave foo\(\)\n\t\twant foo\(x int\) */
+
+ _ = func() I1 { return i0 /* ERROR cannot use i0 .* as I1 value in return statement: I0 does not implement I1 \(missing method foo\) */ }
+ _ = func() I1 { return t0 /* ERROR .* t0 .* as I1 .*: \*T0 does not implement I1 \(missing method foo\) */ }
+ _ = func() I1 { return i2 /* ERROR .* i2 .* as I1 .*: I2 does not implement I1 \(wrong type for method foo\)\n\t\thave foo\(x int\)\n\t\twant foo\(\) */ }
+ _ = func() I1 { return t2 /* ERROR .* t2 .* as I1 .*: \*T2 does not implement I1 \(wrong type for method foo\)\n\t\thave foo\(x int\)\n\t\twant foo\(\) */ }
+ _ = func() I2 { return i1 /* ERROR .* i1 .* as I2 .*: I1 does not implement I2 \(wrong type for method foo\)\n\t\thave foo\(\)\n\t\twant foo\(x int\) */ }
+ _ = func() I2 { return t1 /* ERROR .* t1 .* as I2 .*: \*T1 does not implement I2 \(wrong type for method foo\)\n\t\thave foo\(\)\n\t\twant foo\(x int\) */ }
+
+ // a few more - less exhaustive now
+
+ f := func(I1, I2){}
+ f(i0 /* ERROR missing method foo */ , i1 /* ERROR wrong type for method foo */ )
+
+ _ = [...]I1{i0 /* ERROR cannot use i0 .* as I1 value in array or slice literal: I0 does not implement I1 \(missing method foo\) */ }
+ _ = [...]I1{i2 /* ERROR cannot use i2 .* as I1 value in array or slice literal: I2 does not implement I1 \(wrong type for method foo\)\n\t\thave foo\(x int\)\n\t\twant foo\(\) */ }
+ _ = []I1{i0 /* ERROR missing method foo */ }
+ _ = []I1{i2 /* ERROR wrong type for method foo */ }
+ _ = map[int]I1{0: i0 /* ERROR missing method foo */ }
+ _ = map[int]I1{0: i2 /* ERROR wrong type for method foo */ }
+
+ make(chan I1) <- i0 /* ERROR missing method foo */
+ make(chan I1) <- i2 /* ERROR wrong type for method foo */
+}
+
+// Check that constants representable as integers are in integer form
+// before being used in operations that are only defined on integers.
+func issue14229() {
+ // from the issue
+ const _ = int64(-1<<63) % 1e6
+
+ // related
+ const (
+ a int = 3
+ b = 4.0
+ _ = a / b
+ _ = a % b
+ _ = b / a
+ _ = b % a
+ )
+}
+
+// Check that in a n:1 variable declaration with type and initialization
+// expression the type is distributed to all variables of the lhs before
+// the initialization expression assignment is checked.
+func issue15755() {
+ // from issue
+ var i interface{}
+ type b bool
+ var x, y b = i.(b)
+ _ = x == y
+
+ // related: we should see an error since the result of f1 is ([]int, int)
+ var u, v []int = f1 /* ERROR cannot use f1 */ ()
+ _ = u
+ _ = v
+}
+
+// Test that we don't get "declared but not used"
+// errors in the context of invalid/C objects.
+func issue20358() {
+ var F C /* ERROR "undeclared" */ .F
+ var A C /* ERROR "undeclared" */ .A
+ var S C /* ERROR "undeclared" */ .S
+ type T C /* ERROR "undeclared" */ .T
+ type P C /* ERROR "undeclared" */ .P
+
+ // these variables must be "used" even though
+ // the LHS expressions/types below in which
+ // context they are used are unknown/invalid
+ var f, a, s1, s2, s3, t, p int
+
+ _ = F(f)
+ _ = A[a]
+ _ = S[s1:s2:s3]
+ _ = T{t}
+ _ = P{f: p}
+}
+
+// Test that we don't declare lhs variables in short variable
+// declarations before we type-check function literals on the
+// rhs.
+func issue24026() {
+ f := func() int { f(0) /* must refer to outer f */; return 0 }
+ _ = f
+
+ _ = func() {
+ f := func() { _ = f() /* must refer to outer f */ }
+ _ = f
+ }
+
+ // b and c must not be visible inside function literal
+ a := 0
+ a, b, c := func() (int, int, int) {
+ return a, b /* ERROR undeclared */ , c /* ERROR undeclared */
+ }()
+ _, _ = b, c
+}
+
+func f(int) {} // for issue24026
+
+// Test that we don't report a "missing return statement" error
+// (due to incorrect context when type-checking interfaces).
+func issue24140(x interface{}) int {
+ switch x.(type) {
+ case interface{}:
+ return 0
+ default:
+ panic(0)
+ }
+}
+
+// Test that we don't crash when the 'if' condition is missing.
+func issue25438() {
+ if { /* ERROR missing condition */ }
+ if x := 0; /* ERROR missing condition */ { _ = x }
+ if
+ { /* ERROR missing condition */ }
+}
+
+// Test that we can embed alias type names in interfaces.
+type issue25301 interface {
+ E
+}
+
+type E = interface {
+ m()
+}
+
+// Test case from issue.
+// cmd/compile reports a cycle as well.
+type issue25301b /* ERROR cycle */ = interface {
+ m() interface{ issue25301b }
+}
+
+type issue25301c interface {
+ notE // ERROR non-interface type struct\{\}
+}
+
+type notE = struct{}
+
+// Test that method declarations don't introduce artificial cycles
+// (issue #26124).
+const CC TT = 1
+type TT int
+func (TT) MM() [CC]TT
+
+// Reduced test case from issue #26124.
+const preloadLimit LNumber = 128
+type LNumber float64
+func (LNumber) assertFunction() *LFunction
+type LFunction struct {
+ GFunction LGFunction
+}
+type LGFunction func(*LState)
+type LState struct {
+ reg *registry
+}
+type registry struct {
+ alloc *allocator
+}
+type allocator struct {
+ _ [int(preloadLimit)]int
+}
+
+// Test that we don't crash when type-checking composite literals
+// containing errors in the type.
+var issue27346 = [][n /* ERROR undeclared */ ]int{
+ 0: {},
+}
+
+var issue22467 = map[int][... /* ERROR invalid use of ... */ ]int{0: {}}
+
+// Test that invalid use of ... in parameter lists is recognized
+// (issue #28281).
+func issue28281a(int, int, ...int)
+func issue28281b(a, b int, c ...int)
+func issue28281c(a, b, c ... /* ERROR can only use ... with final parameter */ int)
+func issue28281d(... /* ERROR can only use ... with final parameter */ int, int)
+func issue28281e(a, b, c ... /* ERROR can only use ... with final parameter */ int, d int)
+func issue28281f(... /* ERROR can only use ... with final parameter */ int, ... /* ERROR can only use ... with final parameter */ int, int)
+func (... /* ERROR can only use ... with final parameter in list */ TT) f()
+func issue28281g() (... /* ERROR can only use ... with final parameter in list */ TT)
+
+// Issue #26234: Make various field/method lookup errors easier to read by matching cmd/compile's output
+func issue26234a(f *syn.Prog) {
+ // The error message below should refer to the actual package name (syntax)
+ // not the local package name (syn).
+ f.foo /* ERROR f\.foo undefined \(type \*syntax\.Prog has no field or method foo\) */
+}
+
+type T struct {
+ x int
+ E1
+ E2
+}
+
+type E1 struct{ f int }
+type E2 struct{ f int }
+
+func issue26234b(x T) {
+ _ = x.f /* ERROR ambiguous selector x.f */
+}
+
+func issue26234c() {
+ T.x /* ERROR T.x undefined \(type T has no method x\) */ ()
+}
+
+func issue35895() {
+ // T is defined in this package, don't qualify its name with the package name.
+ var _ T = 0 // ERROR cannot use 0 \(untyped int constant\) as T
+
+ // There is only one package with name syntax imported, only use the (global) package name in error messages.
+ var _ *syn.Prog = 0 // ERROR cannot use 0 \(untyped int constant\) as \*syntax.Prog
+
+ // Because both t1 and t2 have the same global package name (template),
+ // qualify packages with full path name in this case.
+ var _ t1.Template = t2 /* ERROR cannot use .* \(value of type .html/template.\.Template\) as .text/template.\.Template */ .Template{}
+}
+
+func issue42989(s uint) {
+ var m map[int]string
+ delete(m, 1<<s)
+ delete(m, 1.<<s)
+}
diff --git a/src/cmd/compile/internal/types2/testdata/check/labels.src b/src/cmd/compile/internal/types2/testdata/check/labels.src
new file mode 100644
index 0000000..9f42406
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/check/labels.src
@@ -0,0 +1,207 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file is a modified concatenation of the files
+// $GOROOT/test/label.go and $GOROOT/test/label1.go.
+
+package labels
+
+var x int
+
+func f0() {
+L1 /* ERROR "label L1 declared but not used" */ :
+ for {
+ }
+L2 /* ERROR "label L2 declared but not used" */ :
+ select {
+ }
+L3 /* ERROR "label L3 declared but not used" */ :
+ switch {
+ }
+L4 /* ERROR "label L4 declared but not used" */ :
+ if true {
+ }
+L5 /* ERROR "label L5 declared but not used" */ :
+ f0()
+L6:
+ f0()
+L6 /* ERROR "label L6 already declared" */ :
+ f0()
+ if x == 20 {
+ goto L6
+ }
+
+L7:
+ for {
+ break L7
+ break L8 /* ERROR "invalid break label L8" */
+ }
+
+// A label must be directly associated with a switch, select, or
+// for statement; it cannot be the label of a labeled statement.
+
+L7a /* ERROR "declared but not used" */ : L7b:
+ for {
+ break L7a /* ERROR "invalid break label L7a" */
+ continue L7a /* ERROR "invalid continue label L7a" */
+ continue L7b
+ }
+
+L8:
+ for {
+ if x == 21 {
+ continue L8
+ continue L7 /* ERROR "invalid continue label L7" */
+ }
+ }
+
+L9:
+ switch {
+ case true:
+ break L9
+ defalt /* ERROR "label defalt declared but not used" */ :
+ }
+
+L10:
+ select {
+ default:
+ break L10
+ break L9 /* ERROR "invalid break label L9" */
+ }
+
+ goto L10a
+L10a: L10b:
+ select {
+ default:
+ break L10a /* ERROR "invalid break label L10a" */
+ break L10b
+ continue L10b /* ERROR "invalid continue label L10b" */
+ }
+}
+
+func f1() {
+L1:
+ for {
+ if x == 0 {
+ break L1
+ }
+ if x == 1 {
+ continue L1
+ }
+ goto L1
+ }
+
+L2:
+ select {
+ default:
+ if x == 0 {
+ break L2
+ }
+ if x == 1 {
+ continue L2 /* ERROR "invalid continue label L2" */
+ }
+ goto L2
+ }
+
+L3:
+ switch {
+ case x > 10:
+ if x == 11 {
+ break L3
+ }
+ if x == 12 {
+ continue L3 /* ERROR "invalid continue label L3" */
+ }
+ goto L3
+ }
+
+L4:
+ if true {
+ if x == 13 {
+ break L4 /* ERROR "invalid break label L4" */
+ }
+ if x == 14 {
+ continue L4 /* ERROR "invalid continue label L4" */
+ }
+ if x == 15 {
+ goto L4
+ }
+ }
+
+L5:
+ f1()
+ if x == 16 {
+ break L5 /* ERROR "invalid break label L5" */
+ }
+ if x == 17 {
+ continue L5 /* ERROR "invalid continue label L5" */
+ }
+ if x == 18 {
+ goto L5
+ }
+
+ for {
+ if x == 19 {
+ break L1 /* ERROR "invalid break label L1" */
+ }
+ if x == 20 {
+ continue L1 /* ERROR "invalid continue label L1" */
+ }
+ if x == 21 {
+ goto L1
+ }
+ }
+}
+
+// Additional tests not in the original files.
+
+func f2() {
+L1 /* ERROR "label L1 declared but not used" */ :
+ if x == 0 {
+ for {
+ continue L1 /* ERROR "invalid continue label L1" */
+ }
+ }
+}
+
+func f3() {
+L1:
+L2:
+L3:
+ for {
+ break L1 /* ERROR "invalid break label L1" */
+ break L2 /* ERROR "invalid break label L2" */
+ break L3
+ continue L1 /* ERROR "invalid continue label L1" */
+ continue L2 /* ERROR "invalid continue label L2" */
+ continue L3
+ goto L1
+ goto L2
+ goto L3
+ }
+}
+
+// Blank labels are never declared.
+
+func f4() {
+_:
+_: // multiple blank labels are ok
+ goto _ /* ERROR "label _ not declared" */
+}
+
+func f5() {
+_:
+ for {
+ break _ /* ERROR "invalid break label _" */
+ continue _ /* ERROR "invalid continue label _" */
+ }
+}
+
+func f6() {
+_:
+ switch {
+ default:
+ break _ /* ERROR "invalid break label _" */
+ }
+}
diff --git a/src/cmd/compile/internal/types2/testdata/check/linalg.go2 b/src/cmd/compile/internal/types2/testdata/check/linalg.go2
new file mode 100644
index 0000000..f02e773
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/check/linalg.go2
@@ -0,0 +1,82 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package linalg
+
+// Numeric is type bound that matches any numeric type.
+// It would likely be in a constraints package in the standard library.
+type Numeric interface {
+ ~int | ~int8 | ~int16 | ~int32 | ~int64 |
+ ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr |
+ ~float32 | ~float64 |
+ ~complex64 | ~complex128
+}
+
+func DotProduct[T Numeric](s1, s2 []T) T {
+ if len(s1) != len(s2) {
+ panic("DotProduct: slices of unequal length")
+ }
+ var r T
+ for i := range s1 {
+ r += s1[i] * s2[i]
+ }
+ return r
+}
+
+// NumericAbs matches numeric types with an Abs method.
+type NumericAbs[T any] interface {
+ Numeric
+
+ Abs() T
+}
+
+// AbsDifference computes the absolute value of the difference of
+// a and b, where the absolute value is determined by the Abs method.
+func AbsDifference[T NumericAbs[T]](a, b T) T {
+ d := a - b
+ return d.Abs()
+}
+
+// OrderedNumeric is a type bound that matches numeric types that support the < operator.
+type OrderedNumeric interface {
+ ~int | ~int8 | ~int16 | ~int32 | ~int64 |
+ ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr |
+ ~float32 | ~float64
+}
+
+// Complex is a type bound that matches the two complex types, which do not have a < operator.
+type Complex interface {
+ ~complex64 | ~complex128
+}
+
+// For now, a lone type parameter is not permitted as RHS in a type declaration (issue #45639).
+// // OrderedAbs is a helper type that defines an Abs method for
+// // ordered numeric types.
+// type OrderedAbs[T OrderedNumeric] T
+//
+// func (a OrderedAbs[T]) Abs() OrderedAbs[T] {
+// if a < 0 {
+// return -a
+// }
+// return a
+// }
+//
+// // ComplexAbs is a helper type that defines an Abs method for
+// // complex types.
+// type ComplexAbs[T Complex] T
+//
+// func (a ComplexAbs[T]) Abs() ComplexAbs[T] {
+// r := float64(real(a))
+// i := float64(imag(a))
+// d := math.Sqrt(r * r + i * i)
+// return ComplexAbs[T](complex(d, 0))
+// }
+//
+// func OrderedAbsDifference[T OrderedNumeric](a, b T) T {
+// return T(AbsDifference(OrderedAbs[T](a), OrderedAbs[T](b)))
+// }
+//
+// func ComplexAbsDifference[T Complex](a, b T) T {
+// return T(AbsDifference(ComplexAbs[T](a), ComplexAbs[T](b)))
+// }
diff --git a/src/cmd/compile/internal/types2/testdata/check/literals.src b/src/cmd/compile/internal/types2/testdata/check/literals.src
new file mode 100644
index 0000000..494a465
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/check/literals.src
@@ -0,0 +1,111 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file tests various representations of literals
+// and compares them with literals or constant expressions
+// of equal values.
+
+package literals
+
+func _() {
+ // 0-octals
+ assert(0_123 == 0123)
+ assert(0123_456 == 0123456)
+
+ // decimals
+ assert(1_234 == 1234)
+ assert(1_234_567 == 1234567)
+
+ // hexadecimals
+ assert(0X_0 == 0)
+ assert(0X_1234 == 0x1234)
+ assert(0X_CAFE_f00d == 0xcafef00d)
+
+ // octals
+ assert(0o0 == 0)
+ assert(0o1234 == 01234)
+ assert(0o01234567 == 01234567)
+
+ assert(0O0 == 0)
+ assert(0O1234 == 01234)
+ assert(0O01234567 == 01234567)
+
+ assert(0o_0 == 0)
+ assert(0o_1234 == 01234)
+ assert(0o0123_4567 == 01234567)
+
+ assert(0O_0 == 0)
+ assert(0O_1234 == 01234)
+ assert(0O0123_4567 == 01234567)
+
+ // binaries
+ assert(0b0 == 0)
+ assert(0b1011 == 0xb)
+ assert(0b00101101 == 0x2d)
+
+ assert(0B0 == 0)
+ assert(0B1011 == 0xb)
+ assert(0B00101101 == 0x2d)
+
+ assert(0b_0 == 0)
+ assert(0b10_11 == 0xb)
+ assert(0b_0010_1101 == 0x2d)
+
+ // decimal floats
+ assert(1_2_3. == 123.)
+ assert(0_123. == 123.)
+
+ assert(0_0e0 == 0.)
+ assert(1_2_3e0 == 123.)
+ assert(0_123e0 == 123.)
+
+ assert(0e-0_0 == 0.)
+ assert(1_2_3E+0 == 123.)
+ assert(0123E1_2_3 == 123e123)
+
+ assert(0.e+1 == 0.)
+ assert(123.E-1_0 == 123e-10)
+ assert(01_23.e123 == 123e123)
+
+ assert(.0e-1 == .0)
+ assert(.123E+10 == .123e10)
+ assert(.0123E123 == .0123e123)
+
+ assert(1_2_3.123 == 123.123)
+ assert(0123.01_23 == 123.0123)
+
+ // hexadecimal floats
+ assert(0x0.p+0 == 0.)
+ assert(0Xdeadcafe.p-10 == 0xdeadcafe/1024.0)
+ assert(0x1234.P84 == 0x1234000000000000000000000)
+
+ assert(0x.1p-0 == 1./16)
+ assert(0X.deadcafep4 == 1.0*0xdeadcafe/0x10000000)
+ assert(0x.1234P+12 == 1.0*0x1234/0x10)
+
+ assert(0x0p0 == 0.)
+ assert(0Xdeadcafep+1 == 0x1bd5b95fc)
+ assert(0x1234P-10 == 0x1234/1024.0)
+
+ assert(0x0.0p0 == 0.)
+ assert(0Xdead.cafep+1 == 1.0*0x1bd5b95fc/0x10000)
+ assert(0x12.34P-10 == 1.0*0x1234/0x40000)
+
+ assert(0Xdead_cafep+1 == 0xdeadcafep+1)
+ assert(0x_1234P-10 == 0x1234p-10)
+
+ assert(0X_dead_cafe.p-10 == 0xdeadcafe.p-10)
+ assert(0x12_34.P1_2_3 == 0x1234.p123)
+
+ assert(1_234i == 1234i)
+ assert(1_234_567i == 1234567i)
+
+ assert(0.i == 0i)
+ assert(123.i == 123i)
+ assert(0123.i == 123i)
+
+ assert(0.e+1i == 0i)
+ assert(123.E-1_0i == 123e-10i)
+ assert(01_23.e123i == 123e123i)
+}
diff --git a/src/cmd/compile/internal/types2/testdata/check/main.go2 b/src/cmd/compile/internal/types2/testdata/check/main.go2
new file mode 100644
index 0000000..395e3bf
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/check/main.go2
@@ -0,0 +1,7 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+func main [T /* ERROR "func main must have no type parameters" */ any]() {}
diff --git a/src/cmd/compile/internal/types2/testdata/check/main.src b/src/cmd/compile/internal/types2/testdata/check/main.src
new file mode 100644
index 0000000..f892938
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/check/main.src
@@ -0,0 +1,9 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+func main()
+func /* ERROR "no arguments and no return values" */ main /* ERROR redeclared */ (int)
+func /* ERROR "no arguments and no return values" */ main /* ERROR redeclared */ () int
diff --git a/src/cmd/compile/internal/types2/testdata/check/map.go2 b/src/cmd/compile/internal/types2/testdata/check/map.go2
new file mode 100644
index 0000000..814d953
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/check/map.go2
@@ -0,0 +1,113 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package orderedmap provides an ordered map, implemented as a binary tree.
+package orderedmap
+
+// TODO(gri) fix imports for tests
+import "chans" // ERROR could not import
+
+// Map is an ordered map.
+type Map[K, V any] struct {
+ root *node[K, V]
+ compare func(K, K) int
+}
+
+// node is the type of a node in the binary tree.
+type node[K, V any] struct {
+ key K
+ val V
+ left, right *node[K, V]
+}
+
+// New returns a new map.
+func New[K, V any](compare func(K, K) int) *Map[K, V] {
+ return &Map[K, V]{compare: compare}
+}
+
+// find looks up key in the map, and returns either a pointer
+// to the node holding key, or a pointer to the location where
+// such a node would go.
+func (m *Map[K, V]) find(key K) **node[K, V] {
+ pn := &m.root
+ for *pn != nil {
+ switch cmp := m.compare(key, (*pn).key); {
+ case cmp < 0:
+ pn = &(*pn).left
+ case cmp > 0:
+ pn = &(*pn).right
+ default:
+ return pn
+ }
+ }
+ return pn
+}
+
+// Insert inserts a new key/value into the map.
+// If the key is already present, the value is replaced.
+// Returns true if this is a new key, false if already present.
+func (m *Map[K, V]) Insert(key K, val V) bool {
+ pn := m.find(key)
+ if *pn != nil {
+ (*pn).val = val
+ return false
+ }
+ *pn = &node[K, V]{key: key, val: val}
+ return true
+}
+
+// Find returns the value associated with a key, or zero if not present.
+// The found result reports whether the key was found.
+func (m *Map[K, V]) Find(key K) (V, bool) {
+ pn := m.find(key)
+ if *pn == nil {
+ var zero V // see the discussion of zero values, above
+ return zero, false
+ }
+ return (*pn).val, true
+}
+
+// keyValue is a pair of key and value used when iterating.
+type keyValue[K, V any] struct {
+ key K
+ val V
+}
+
+// InOrder returns an iterator that does an in-order traversal of the map.
+func (m *Map[K, V]) InOrder() *Iterator[K, V] {
+ sender, receiver := chans.Ranger[keyValue[K, V]]()
+ var f func(*node[K, V]) bool
+ f = func(n *node[K, V]) bool {
+ if n == nil {
+ return true
+ }
+ // Stop sending values if sender.Send returns false,
+ // meaning that nothing is listening at the receiver end.
+ return f(n.left) &&
+ sender.Send(keyValue[K, V]{n.key, n.val}) &&
+ f(n.right)
+ }
+ go func() {
+ f(m.root)
+ sender.Close()
+ }()
+ return &Iterator[K, V]{receiver}
+}
+
+// Iterator is used to iterate over the map.
+type Iterator[K, V any] struct {
+ r *chans.Receiver[keyValue[K, V]]
+}
+
+// Next returns the next key and value pair, and a boolean indicating
+// whether they are valid or whether we have reached the end.
+func (it *Iterator[K, V]) Next() (K, V, bool) {
+ keyval, ok := it.r.Next()
+ if !ok {
+ var zerok K
+ var zerov V
+ return zerok, zerov, false
+ }
+ return keyval.key, keyval.val, true
+}
diff --git a/src/cmd/compile/internal/types2/testdata/check/map2.go2 b/src/cmd/compile/internal/types2/testdata/check/map2.go2
new file mode 100644
index 0000000..be2c49f
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/check/map2.go2
@@ -0,0 +1,146 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file is like map.go2, but instead if importing chans, it contains
+// the necessary functionality at the end of the file.
+
+// Package orderedmap provides an ordered map, implemented as a binary tree.
+package orderedmap
+
+// Map is an ordered map.
+type Map[K, V any] struct {
+ root *node[K, V]
+ compare func(K, K) int
+}
+
+// node is the type of a node in the binary tree.
+type node[K, V any] struct {
+ key K
+ val V
+ left, right *node[K, V]
+}
+
+// New returns a new map.
+func New[K, V any](compare func(K, K) int) *Map[K, V] {
+ return &Map[K, V]{compare: compare}
+}
+
+// find looks up key in the map, and returns either a pointer
+// to the node holding key, or a pointer to the location where
+// such a node would go.
+func (m *Map[K, V]) find(key K) **node[K, V] {
+ pn := &m.root
+ for *pn != nil {
+ switch cmp := m.compare(key, (*pn).key); {
+ case cmp < 0:
+ pn = &(*pn).left
+ case cmp > 0:
+ pn = &(*pn).right
+ default:
+ return pn
+ }
+ }
+ return pn
+}
+
+// Insert inserts a new key/value into the map.
+// If the key is already present, the value is replaced.
+// Returns true if this is a new key, false if already present.
+func (m *Map[K, V]) Insert(key K, val V) bool {
+ pn := m.find(key)
+ if *pn != nil {
+ (*pn).val = val
+ return false
+ }
+ *pn = &node[K, V]{key: key, val: val}
+ return true
+}
+
+// Find returns the value associated with a key, or zero if not present.
+// The found result reports whether the key was found.
+func (m *Map[K, V]) Find(key K) (V, bool) {
+ pn := m.find(key)
+ if *pn == nil {
+ var zero V // see the discussion of zero values, above
+ return zero, false
+ }
+ return (*pn).val, true
+}
+
+// keyValue is a pair of key and value used when iterating.
+type keyValue[K, V any] struct {
+ key K
+ val V
+}
+
+// InOrder returns an iterator that does an in-order traversal of the map.
+func (m *Map[K, V]) InOrder() *Iterator[K, V] {
+ sender, receiver := chans_Ranger[keyValue[K, V]]()
+ var f func(*node[K, V]) bool
+ f = func(n *node[K, V]) bool {
+ if n == nil {
+ return true
+ }
+ // Stop sending values if sender.Send returns false,
+ // meaning that nothing is listening at the receiver end.
+ return f(n.left) &&
+ sender.Send(keyValue[K, V]{n.key, n.val}) &&
+ f(n.right)
+ }
+ go func() {
+ f(m.root)
+ sender.Close()
+ }()
+ return &Iterator[K, V]{receiver}
+}
+
+// Iterator is used to iterate over the map.
+type Iterator[K, V any] struct {
+ r *chans_Receiver[keyValue[K, V]]
+}
+
+// Next returns the next key and value pair, and a boolean indicating
+// whether they are valid or whether we have reached the end.
+func (it *Iterator[K, V]) Next() (K, V, bool) {
+ keyval, ok := it.r.Next()
+ if !ok {
+ var zerok K
+ var zerov V
+ return zerok, zerov, false
+ }
+ return keyval.key, keyval.val, true
+}
+
+// chans
+
+func chans_Ranger[T any]() (*chans_Sender[T], *chans_Receiver[T]) { panic(0) }
+
+// A sender is used to send values to a Receiver.
+type chans_Sender[T any] struct {
+ values chan<- T
+ done <-chan bool
+}
+
+func (s *chans_Sender[T]) Send(v T) bool {
+ select {
+ case s.values <- v:
+ return true
+ case <-s.done:
+ return false
+ }
+}
+
+func (s *chans_Sender[T]) Close() {
+ close(s.values)
+}
+
+type chans_Receiver[T any] struct {
+ values <-chan T
+ done chan<- bool
+}
+
+func (r *chans_Receiver[T]) Next() (T, bool) {
+ v, ok := <-r.values
+ return v, ok
+} \ No newline at end of file
diff --git a/src/cmd/compile/internal/types2/testdata/check/methodsets.src b/src/cmd/compile/internal/types2/testdata/check/methodsets.src
new file mode 100644
index 0000000..b0eb14c
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/check/methodsets.src
@@ -0,0 +1,214 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package methodsets
+
+type T0 struct {}
+
+func (T0) v0() {}
+func (*T0) p0() {}
+
+type T1 struct {} // like T0 with different method names
+
+func (T1) v1() {}
+func (*T1) p1() {}
+
+type T2 interface {
+ v2()
+ p2()
+}
+
+type T3 struct {
+ T0
+ *T1
+ T2
+}
+
+// Method expressions
+func _() {
+ var (
+ _ func(T0) = T0.v0
+ _ = T0.p0 /* ERROR "cannot call pointer method p0 on T0" */
+
+ _ func (*T0) = (*T0).v0
+ _ func (*T0) = (*T0).p0
+
+ // T1 is like T0
+
+ _ func(T2) = T2.v2
+ _ func(T2) = T2.p2
+
+ _ func(T3) = T3.v0
+ _ func(T3) = T3.p0 /* ERROR "cannot call pointer method p0 on T3" */
+ _ func(T3) = T3.v1
+ _ func(T3) = T3.p1
+ _ func(T3) = T3.v2
+ _ func(T3) = T3.p2
+
+ _ func(*T3) = (*T3).v0
+ _ func(*T3) = (*T3).p0
+ _ func(*T3) = (*T3).v1
+ _ func(*T3) = (*T3).p1
+ _ func(*T3) = (*T3).v2
+ _ func(*T3) = (*T3).p2
+ )
+}
+
+// Method values with addressable receivers
+func _() {
+ var (
+ v0 T0
+ _ func() = v0.v0
+ _ func() = v0.p0
+ )
+
+ var (
+ p0 *T0
+ _ func() = p0.v0
+ _ func() = p0.p0
+ )
+
+ // T1 is like T0
+
+ var (
+ v2 T2
+ _ func() = v2.v2
+ _ func() = v2.p2
+ )
+
+ var (
+ v4 T3
+ _ func() = v4.v0
+ _ func() = v4.p0
+ _ func() = v4.v1
+ _ func() = v4.p1
+ _ func() = v4.v2
+ _ func() = v4.p2
+ )
+
+ var (
+ p4 *T3
+ _ func() = p4.v0
+ _ func() = p4.p0
+ _ func() = p4.v1
+ _ func() = p4.p1
+ _ func() = p4.v2
+ _ func() = p4.p2
+ )
+}
+
+// Method calls with addressable receivers
+func _() {
+ var v0 T0
+ v0.v0()
+ v0.p0()
+
+ var p0 *T0
+ p0.v0()
+ p0.p0()
+
+ // T1 is like T0
+
+ var v2 T2
+ v2.v2()
+ v2.p2()
+
+ var v4 T3
+ v4.v0()
+ v4.p0()
+ v4.v1()
+ v4.p1()
+ v4.v2()
+ v4.p2()
+
+ var p4 *T3
+ p4.v0()
+ p4.p0()
+ p4.v1()
+ p4.p1()
+ p4.v2()
+ p4.p2()
+}
+
+// Method values with value receivers
+func _() {
+ var (
+ _ func() = T0{}.v0
+ _ func() = T0{}.p0 /* ERROR "cannot call pointer method p0 on T0" */
+
+ _ func() = (&T0{}).v0
+ _ func() = (&T0{}).p0
+
+ // T1 is like T0
+
+ // no values for T2
+
+ _ func() = T3{}.v0
+ _ func() = T3{}.p0 /* ERROR "cannot call pointer method p0 on T3" */
+ _ func() = T3{}.v1
+ _ func() = T3{}.p1
+ _ func() = T3{}.v2
+ _ func() = T3{}.p2
+
+ _ func() = (&T3{}).v0
+ _ func() = (&T3{}).p0
+ _ func() = (&T3{}).v1
+ _ func() = (&T3{}).p1
+ _ func() = (&T3{}).v2
+ _ func() = (&T3{}).p2
+ )
+}
+
+// Method calls with value receivers
+func _() {
+ T0{}.v0()
+ T0{}.p0 /* ERROR "cannot call pointer method p0 on T0" */ ()
+
+ (&T0{}).v0()
+ (&T0{}).p0()
+
+ // T1 is like T0
+
+ // no values for T2
+
+ T3{}.v0()
+ T3{}.p0 /* ERROR "cannot call pointer method p0 on T3" */ ()
+ T3{}.v1()
+ T3{}.p1()
+ T3{}.v2()
+ T3{}.p2()
+
+ (&T3{}).v0()
+ (&T3{}).p0()
+ (&T3{}).v1()
+ (&T3{}).p1()
+ (&T3{}).v2()
+ (&T3{}).p2()
+}
+
+// *T has no methods if T is an interface type
+func issue5918() {
+ var (
+ err error
+ _ = err.Error()
+ _ func() string = err.Error
+ _ func(error) string = error.Error
+
+ perr = &err
+ _ = perr.Error /* ERROR "type \*error is pointer to interface, not interface" */ ()
+ _ func() string = perr.Error /* ERROR "type \*error is pointer to interface, not interface" */
+ _ func(*error) string = (*error).Error /* ERROR "type \*error is pointer to interface, not interface" */
+ )
+
+ type T *interface{ m() int }
+ var (
+ x T
+ _ = (*x).m()
+ _ = (*x).m
+
+ _ = x.m /* ERROR "type T is pointer to interface, not interface" */ ()
+ _ = x.m /* ERROR "type T is pointer to interface, not interface" */
+ _ = T.m /* ERROR "type T is pointer to interface, not interface" */
+ )
+}
diff --git a/src/cmd/compile/internal/types2/testdata/check/mtypeparams.go2 b/src/cmd/compile/internal/types2/testdata/check/mtypeparams.go2
new file mode 100644
index 0000000..1b40659
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/check/mtypeparams.go2
@@ -0,0 +1,52 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// If types2.Config.AcceptMethodTypeParams is set,
+// the type checker accepts methods that have their
+// own type parameter list.
+
+package p
+
+type S struct{}
+
+func (S) m[T any](v T) {}
+
+// TODO(gri) Once we collect interface method type parameters
+// in the parser, we can enable these tests again.
+/*
+type I interface {
+ m[T any](v T)
+}
+
+type J interface {
+ m[T any](v T)
+}
+
+var _ I = S{}
+var _ I = J(nil)
+
+type C interface{ n() }
+
+type Sc struct{}
+
+func (Sc) m[T C](v T)
+
+type Ic interface {
+ m[T C](v T)
+}
+
+type Jc interface {
+ m[T C](v T)
+}
+
+var _ Ic = Sc{}
+var _ Ic = Jc(nil)
+
+// TODO(gri) These should fail because the constraints don't match.
+var _ I = Sc{}
+var _ I = Jc(nil)
+
+var _ Ic = S{}
+var _ Ic = J(nil)
+*/ \ No newline at end of file
diff --git a/src/cmd/compile/internal/types2/testdata/check/shifts.src b/src/cmd/compile/internal/types2/testdata/check/shifts.src
new file mode 100644
index 0000000..37bc84c
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/check/shifts.src
@@ -0,0 +1,398 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package shifts
+
+func shifts0() {
+ // basic constant shifts
+ const (
+ s = 10
+ _ = 0<<0
+ _ = 1<<s
+ _ = 1<<- /* ERROR "negative shift count" */ 1
+ // For the test below we may decide to convert to int
+ // rather than uint and then report a negative shift
+ // count instead, which might be a better error. The
+ // (minor) difference is that this would restrict the
+ // shift count range by half (from all uint values to
+ // the positive int values).
+ // This depends on the exact spec wording which is not
+ // done yet.
+ // TODO(gri) revisit and adjust when spec change is done
+ _ = 1<<- /* ERROR "negative shift count" */ 1.0
+ _ = 1<<1075 /* ERROR "invalid shift" */
+ _ = 2.0<<1
+ _ = 1<<1.0
+ _ = 1<<(1+0i)
+
+ _ int = 2<<s
+ _ float32 = 2<<s
+ _ complex64 = 2<<s
+
+ _ int = 2.0<<s
+ _ float32 = 2.0<<s
+ _ complex64 = 2.0<<s
+
+ _ int = 'a'<<s
+ _ float32 = 'a'<<s
+ _ complex64 = 'a'<<s
+ )
+}
+
+func shifts1() {
+ // basic non-constant shifts
+ var (
+ i int
+ u uint
+
+ _ = 1<<0
+ _ = 1<<i
+ _ = 1<<u
+ _ = 1<<"foo" /* ERROR "cannot convert" */
+ _ = i<<0
+ _ = i<<- /* ERROR "negative shift count" */ 1
+ _ = i<<1.0
+ _ = 1<<(1+0i)
+ _ = 1 /* ERROR "overflows" */ <<100
+
+ _ uint = 1 << 0
+ _ uint = 1 << u
+ _ float32 = 1 /* ERROR "must be integer" */ << u
+
+ // issue #14822
+ _ = 1<<( /* ERROR "overflows uint" */ 1<<64)
+ _ = 1<<( /* ERROR "invalid shift count" */ 1<<64-1)
+
+ // issue #43697
+ _ = u<<( /* ERROR "overflows uint" */ 1<<64)
+ _ = u<<(1<<64-1)
+ )
+}
+
+func shifts2() {
+ // from the spec
+ var (
+ s uint = 33
+ i = 1<<s // 1 has type int
+ j int32 = 1<<s // 1 has type int32; j == 0
+ k = uint64(1<<s) // 1 has type uint64; k == 1<<33
+ m int = 1.0<<s // 1.0 has type int
+ n = 1.0<<s != i // 1.0 has type int; n == false if ints are 32bits in size
+ o = 1<<s == 2<<s // 1 and 2 have type int; o == true if ints are 32bits in size
+ p = 1<<s == 1<<33 // illegal if ints are 32bits in size: 1 has type int, but 1<<33 overflows int
+ u = 1.0 /* ERROR "must be integer" */ <<s // illegal: 1.0 has type float64, cannot shift
+ u1 = 1.0 /* ERROR "must be integer" */ <<s != 0 // illegal: 1.0 has type float64, cannot shift
+ u2 = 1 /* ERROR "must be integer" */ <<s != 1.0 // illegal: 1 has type float64, cannot shift
+ v float32 = 1 /* ERROR "must be integer" */ <<s // illegal: 1 has type float32, cannot shift
+ w int64 = 1.0<<33 // 1.0<<33 is a constant shift expression
+ )
+ _, _, _, _, _, _, _, _, _, _, _, _ = i, j, k, m, n, o, p, u, u1, u2, v, w
+}
+
+func shifts3(a int16, b float32) {
+ // random tests
+ var (
+ s uint = 11
+ u = 1 /* ERROR "must be integer" */ <<s + 1.0
+ v complex128 = 1 /* ERROR "must be integer" */ << s + 1.0 /* ERROR "must be integer" */ << s + 1
+ )
+ x := 1.0 /* ERROR "must be integer" */ <<s + 1
+ shifts3(1.0 << s, 1 /* ERROR "must be integer" */ >> s)
+ _, _, _ = u, v, x
+}
+
+func shifts4() {
+ // shifts in comparisons w/ untyped operands
+ var s uint
+
+ _ = 1<<s == 1
+ _ = 1 /* ERROR "integer" */ <<s == 1.
+ _ = 1. /* ERROR "integer" */ <<s == 1
+ _ = 1. /* ERROR "integer" */ <<s == 1.
+
+ _ = 1<<s + 1 == 1
+ _ = 1 /* ERROR "integer" */ <<s + 1 == 1.
+ _ = 1 /* ERROR "integer" */ <<s + 1. == 1
+ _ = 1 /* ERROR "integer" */ <<s + 1. == 1.
+ _ = 1. /* ERROR "integer" */ <<s + 1 == 1
+ _ = 1. /* ERROR "integer" */ <<s + 1 == 1.
+ _ = 1. /* ERROR "integer" */ <<s + 1. == 1
+ _ = 1. /* ERROR "integer" */ <<s + 1. == 1.
+
+ _ = 1<<s == 1<<s
+ _ = 1 /* ERROR "integer" */ <<s == 1. /* ERROR "integer" */ <<s
+ _ = 1. /* ERROR "integer" */ <<s == 1 /* ERROR "integer" */ <<s
+ _ = 1. /* ERROR "integer" */ <<s == 1. /* ERROR "integer" */ <<s
+
+ _ = 1<<s + 1<<s == 1
+ _ = 1 /* ERROR "integer" */ <<s + 1 /* ERROR "integer" */ <<s == 1.
+ _ = 1 /* ERROR "integer" */ <<s + 1. /* ERROR "integer" */ <<s == 1
+ _ = 1 /* ERROR "integer" */ <<s + 1. /* ERROR "integer" */ <<s == 1.
+ _ = 1. /* ERROR "integer" */ <<s + 1 /* ERROR "integer" */ <<s == 1
+ _ = 1. /* ERROR "integer" */ <<s + 1 /* ERROR "integer" */ <<s == 1.
+ _ = 1. /* ERROR "integer" */ <<s + 1. /* ERROR "integer" */ <<s == 1
+ _ = 1. /* ERROR "integer" */ <<s + 1. /* ERROR "integer" */ <<s == 1.
+
+ _ = 1<<s + 1<<s == 1<<s + 1<<s
+ _ = 1 /* ERROR "integer" */ <<s + 1 /* ERROR "integer" */ <<s == 1 /* ERROR "integer" */ <<s + 1. /* ERROR "integer" */ <<s
+ _ = 1 /* ERROR "integer" */ <<s + 1 /* ERROR "integer" */ <<s == 1. /* ERROR "integer" */ <<s + 1 /* ERROR "integer" */ <<s
+ _ = 1 /* ERROR "integer" */ <<s + 1 /* ERROR "integer" */ <<s == 1. /* ERROR "integer" */ <<s + 1. /* ERROR "integer" */ <<s
+ _ = 1 /* ERROR "integer" */ <<s + 1. /* ERROR "integer" */ <<s == 1 /* ERROR "integer" */ <<s + 1 /* ERROR "integer" */ <<s
+ _ = 1 /* ERROR "integer" */ <<s + 1. /* ERROR "integer" */ <<s == 1 /* ERROR "integer" */ <<s + 1. /* ERROR "integer" */ <<s
+ _ = 1 /* ERROR "integer" */ <<s + 1. /* ERROR "integer" */ <<s == 1. /* ERROR "integer" */ <<s + 1 /* ERROR "integer" */ <<s
+ _ = 1 /* ERROR "integer" */ <<s + 1. /* ERROR "integer" */ <<s == 1. /* ERROR "integer" */ <<s + 1. /* ERROR "integer" */ <<s
+ _ = 1. /* ERROR "integer" */ <<s + 1 /* ERROR "integer" */ <<s == 1 /* ERROR "integer" */ <<s + 1 /* ERROR "integer" */ <<s
+ _ = 1. /* ERROR "integer" */ <<s + 1 /* ERROR "integer" */ <<s == 1 /* ERROR "integer" */ <<s + 1. /* ERROR "integer" */ <<s
+ _ = 1. /* ERROR "integer" */ <<s + 1 /* ERROR "integer" */ <<s == 1. /* ERROR "integer" */ <<s + 1 /* ERROR "integer" */ <<s
+ _ = 1. /* ERROR "integer" */ <<s + 1 /* ERROR "integer" */ <<s == 1. /* ERROR "integer" */ <<s + 1. /* ERROR "integer" */ <<s
+ _ = 1. /* ERROR "integer" */ <<s + 1. /* ERROR "integer" */ <<s == 1 /* ERROR "integer" */ <<s + 1 /* ERROR "integer" */ <<s
+ _ = 1. /* ERROR "integer" */ <<s + 1. /* ERROR "integer" */ <<s == 1 /* ERROR "integer" */ <<s + 1. /* ERROR "integer" */ <<s
+ _ = 1. /* ERROR "integer" */ <<s + 1. /* ERROR "integer" */ <<s == 1. /* ERROR "integer" */ <<s + 1 /* ERROR "integer" */ <<s
+ _ = 1. /* ERROR "integer" */ <<s + 1. /* ERROR "integer" */ <<s == 1. /* ERROR "integer" */ <<s + 1. /* ERROR "integer" */ <<s
+}
+
+func shifts5() {
+ // shifts in comparisons w/ typed operands
+ var s uint
+ var x int
+
+ _ = 1<<s == x
+ _ = 1.<<s == x
+ _ = 1.1 /* ERROR "int" */ <<s == x
+
+ _ = 1<<s + x == 1
+ _ = 1<<s + x == 1.
+ _ = 1<<s + x == 1.1 /* ERROR "int" */
+ _ = 1.<<s + x == 1
+ _ = 1.<<s + x == 1.
+ _ = 1.<<s + x == 1.1 /* ERROR "int" */
+ _ = 1.1 /* ERROR "int" */ <<s + x == 1
+ _ = 1.1 /* ERROR "int" */ <<s + x == 1.
+ _ = 1.1 /* ERROR "int" */ <<s + x == 1.1
+
+ _ = 1<<s == x<<s
+ _ = 1.<<s == x<<s
+ _ = 1.1 /* ERROR "int" */ <<s == x<<s
+}
+
+func shifts6() {
+ // shifts as operands in non-arithmetic operations and as arguments
+ var a [10]int
+ var s uint
+
+ _ = a[1<<s]
+ _ = a[1.0]
+ _ = a[1.0<<s]
+
+ _ = make([]int, 1.0)
+ _ = make([]int, 1.0<<s)
+ _ = make([]int, 1.1 /* ERROR "must be integer" */ <<s)
+
+ _ = float32(1)
+ _ = float32(1 /* ERROR "must be integer" */ <<s)
+ _ = float32(1.0)
+ _ = float32(1.0 /* ERROR "must be integer" */ <<s)
+ _ = float32(1.1 /* ERROR "must be integer" */ <<s)
+
+ // TODO(gri) port fixes from go/types
+ // _ = int32(0x80000000 /* ERROR "overflows int32" */ << s)
+ // TODO(rfindley) Eliminate the redundant error here.
+ // _ = int32(( /* ERROR "truncated to int32" */ 0x80000000 /* ERROR "truncated to int32" */ + 0i) << s)
+
+ _ = int(1+0i<<0)
+ // _ = int((1+0i)<<s)
+ // _ = int(1.0<<s)
+ // _ = int(complex(1, 0)<<s)
+ _ = int(float32/* ERROR "must be integer" */(1.0) <<s)
+ _ = int(1.1 /* ERROR must be integer */ <<s)
+ _ = int(( /* ERROR "must be integer" */ 1+1i) <<s)
+
+ _ = complex(1 /* ERROR "must be integer" */ <<s, 0)
+
+ var b []int
+ _ = append(b, 1<<s)
+ _ = append(b, 1.0<<s)
+ _ = append(b, (1+0i)<<s)
+ _ = append(b, 1.1 /* ERROR "must be integer" */ <<s)
+ _ = append(b, (1 + 0i) <<s)
+ _ = append(b, ( /* ERROR "must be integer" */ 1 + 1i) <<s)
+
+ _ = complex(1.0 /* ERROR "must be integer" */ <<s, 0)
+ _ = complex(1.1 /* ERROR "must be integer" */ <<s, 0)
+ _ = complex(0, 1.0 /* ERROR "must be integer" */ <<s)
+ _ = complex(0, 1.1 /* ERROR "must be integer" */ <<s)
+
+ // TODO(gri) The delete below is not type-checked correctly yet.
+ // var m1 map[int]string
+ // delete(m1, 1<<s)
+}
+
+func shifts7() {
+ // shifts of shifts
+ var s uint
+ var x int
+ _ = x
+
+ _ = 1<<(1<<s)
+ _ = 1<<(1.<<s)
+ _ = 1. /* ERROR "integer" */ <<(1<<s)
+ _ = 1. /* ERROR "integer" */ <<(1.<<s)
+
+ x = 1<<(1<<s)
+ x = 1<<(1.<<s)
+ x = 1.<<(1<<s)
+ x = 1.<<(1.<<s)
+
+ _ = (1<<s)<<(1<<s)
+ _ = (1<<s)<<(1.<<s)
+ _ = ( /* ERROR "integer" */ 1.<<s)<<(1<<s)
+ _ = ( /* ERROR "integer" */ 1.<<s)<<(1.<<s)
+
+ x = (1<<s)<<(1<<s)
+ x = (1<<s)<<(1.<<s)
+ x = ( /* ERROR "integer" */ 1.<<s)<<(1<<s)
+ x = ( /* ERROR "integer" */ 1.<<s)<<(1.<<s)
+}
+
+func shifts8() {
+ // shift examples from shift discussion: better error messages
+ var s uint
+ _ = 1.0 /* ERROR "shifted operand 1.0 \(type float64\) must be integer" */ <<s == 1
+ _ = 1.0 /* ERROR "shifted operand 1.0 \(type float64\) must be integer" */ <<s == 1.0
+ _ = 1 /* ERROR "shifted operand 1 \(type float64\) must be integer" */ <<s == 1.0
+ _ = 1 /* ERROR "shifted operand 1 \(type float64\) must be integer" */ <<s + 1.0 == 1
+ _ = 1 /* ERROR "shifted operand 1 \(type float64\) must be integer" */ <<s + 1.1 == 1
+ _ = 1 /* ERROR "shifted operand 1 \(type float64\) must be integer" */ <<s + 1 == 1.0
+
+ // additional cases
+ _ = complex(1.0 /* ERROR "shifted operand 1.0 \(type float64\) must be integer" */ <<s, 1)
+ _ = complex(1.0, 1 /* ERROR "shifted operand 1 \(type float64\) must be integer" */ <<s)
+
+ _ = int(1.<<s)
+ _ = int(1.1 /* ERROR "shifted operand .* must be integer" */ <<s)
+ _ = float32(1 /* ERROR "shifted operand .* must be integer" */ <<s)
+ _ = float32(1. /* ERROR "shifted operand .* must be integer" */ <<s)
+ _ = float32(1.1 /* ERROR "shifted operand .* must be integer" */ <<s)
+ // TODO(gri) the error messages for these two are incorrect - disabled for now
+ // _ = complex64(1<<s)
+ // _ = complex64(1.<<s)
+ _ = complex64(1.1 /* ERROR "shifted operand .* must be integer" */ <<s)
+}
+
+func shifts9() {
+ // various originally failing snippets of code from the std library
+ // from src/compress/lzw/reader.go:90
+ {
+ var d struct {
+ bits uint32
+ width uint
+ }
+ _ = uint16(d.bits & (1<<d.width - 1))
+ }
+
+ // from src/debug/dwarf/buf.go:116
+ {
+ var ux uint64
+ var bits uint
+ x := int64(ux)
+ if x&(1<<(bits-1)) != 0 {}
+ }
+
+ // from src/encoding/asn1/asn1.go:160
+ {
+ var bytes []byte
+ if bytes[len(bytes)-1]&((1<<bytes[0])-1) != 0 {}
+ }
+
+ // from src/math/big/rat.go:140
+ {
+ var exp int
+ var mantissa uint64
+ shift := uint64(-1022 - (exp - 1)) // [1..53)
+ _ = mantissa & (1<<shift - 1)
+ }
+
+ // from src/net/interface.go:51
+ {
+ type Flags uint
+ var f Flags
+ var i int
+ if f&(1<<uint(i)) != 0 {}
+ }
+
+ // from src/runtime/softfloat64.go:234
+ {
+ var gm uint64
+ var shift uint
+ _ = gm & (1<<shift - 1)
+ }
+
+ // from src/strconv/atof.go:326
+ {
+ var mant uint64
+ var mantbits uint
+ if mant == 2<<mantbits {}
+ }
+
+ // from src/route_bsd.go:82
+ {
+ var Addrs int32
+ const rtaRtMask = 1
+ var i uint
+ if Addrs&rtaRtMask&(1<<i) == 0 {}
+ }
+
+ // from src/text/scanner/scanner.go:540
+ {
+ var s struct { Whitespace uint64 }
+ var ch rune
+ for s.Whitespace&(1<<uint(ch)) != 0 {}
+ }
+}
+
+func issue5895() {
+ var x = 'a' << 1 // type of x must be rune
+ var _ rune = x
+}
+
+func issue11325() {
+ var _ = 0 >> 1.1 /* ERROR "truncated to uint" */ // example from issue 11325
+ _ = 0 >> 1.1 /* ERROR "truncated to uint" */
+ _ = 0 << 1.1 /* ERROR "truncated to uint" */
+ _ = 0 >> 1.
+ _ = 1 >> 1.1 /* ERROR "truncated to uint" */
+ _ = 1 >> 1.
+ _ = 1. >> 1
+ _ = 1. >> 1.
+ _ = 1.1 /* ERROR "must be integer" */ >> 1
+}
+
+func issue11594() {
+ var _ = complex64 /* ERROR "must be integer" */ (1) << 2 // example from issue 11594
+ _ = float32 /* ERROR "must be integer" */ (0) << 1
+ _ = float64 /* ERROR "must be integer" */ (0) >> 2
+ _ = complex64 /* ERROR "must be integer" */ (0) << 3
+ _ = complex64 /* ERROR "must be integer" */ (0) >> 4
+}
+
+func issue21727() {
+ var s uint
+ var a = make([]int, 1<<s + 1.2 /* ERROR "truncated to int" */ )
+ var _ = a[1<<s - 2.3 /* ERROR "truncated to int" */ ]
+ var _ int = 1<<s + 3.4 /* ERROR "truncated to int" */
+ var _ = string(1 /* ERROR shifted operand 1 .* must be integer */ << s)
+ var _ = string(1.0 /* ERROR "cannot convert" */ << s)
+}
+
+func issue22969() {
+ var s uint
+ var a []byte
+ _ = a[0xffffffffffffffff /* ERROR "overflows int" */ <<s] // example from issue 22969
+ _ = make([]int, 0xffffffffffffffff /* ERROR "overflows int" */ << s)
+ _ = make([]int, 0, 0xffffffffffffffff /* ERROR "overflows int" */ << s)
+ var _ byte = 0x100 /* ERROR "overflows byte" */ << s
+ var _ int8 = 0xff /* ERROR "overflows int8" */ << s
+ var _ int16 = 0xffff /* ERROR "overflows int16" */ << s
+ var _ int32 = 0x80000000 /* ERROR "overflows int32" */ << s
+}
diff --git a/src/cmd/compile/internal/types2/testdata/check/slices.go2 b/src/cmd/compile/internal/types2/testdata/check/slices.go2
new file mode 100644
index 0000000..2bacd1c
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/check/slices.go2
@@ -0,0 +1,68 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Package slices implements various slice algorithms.
+package slices
+
+// Map turns a []T1 to a []T2 using a mapping function.
+func Map[T1, T2 any](s []T1, f func(T1) T2) []T2 {
+ r := make([]T2, len(s))
+ for i, v := range s {
+ r[i] = f(v)
+ }
+ return r
+}
+
+// Reduce reduces a []T1 to a single value using a reduction function.
+func Reduce[T1, T2 any](s []T1, initializer T2, f func(T2, T1) T2) T2 {
+ r := initializer
+ for _, v := range s {
+ r = f(r, v)
+ }
+ return r
+}
+
+// Filter filters values from a slice using a filter function.
+func Filter[T any](s []T, f func(T) bool) []T {
+ var r []T
+ for _, v := range s {
+ if f(v) {
+ r = append(r, v)
+ }
+ }
+ return r
+}
+
+// Example uses
+
+func limiter(x int) byte {
+ switch {
+ case x < 0:
+ return 0
+ default:
+ return byte(x)
+ case x > 255:
+ return 255
+ }
+}
+
+var input = []int{-4, 68954, 7, 44, 0, -555, 6945}
+var limited1 = Map[int, byte](input, limiter)
+var limited2 = Map(input, limiter) // using type inference
+
+func reducer(x float64, y int) float64 {
+ return x + float64(y)
+}
+
+var reduced1 = Reduce[int, float64](input, 0, reducer)
+var reduced2 = Reduce(input, 1i /* ERROR overflows */, reducer) // using type inference
+var reduced3 = Reduce(input, 1, reducer) // using type inference
+
+func filter(x int) bool {
+ return x&1 != 0
+}
+
+var filtered1 = Filter[int](input, filter)
+var filtered2 = Filter(input, filter) // using type inference
+
diff --git a/src/cmd/compile/internal/types2/testdata/check/stmt0.src b/src/cmd/compile/internal/types2/testdata/check/stmt0.src
new file mode 100644
index 0000000..90ef095
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/check/stmt0.src
@@ -0,0 +1,992 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// statements
+
+package stmt0
+
+func assignments0() (int, int) {
+ var a, b, c int
+ var ch chan int
+ f0 := func() {}
+ f1 := func() int { return 1 }
+ f2 := func() (int, int) { return 1, 2 }
+ f3 := func() (int, int, int) { return 1, 2, 3 }
+
+ a, b, c = 1, 2, 3
+ a, b, c = 1 /* ERROR "cannot assign [1-9]+ values to [1-9]+ variables" */ , 2
+ a, b, c = 1 /* ERROR "cannot assign [1-9]+ values to [1-9]+ variables" */ , 2, 3, 4
+ _, _, _ = a, b, c
+
+ a = f0 /* ERROR "used as value" */ ()
+ a = f1()
+ a = f2 /* ERROR "cannot assign [1-9]+ values to [1-9]+ variables" */ ()
+ a, b = f2()
+ a, b, c = f2 /* ERROR "cannot assign [1-9]+ values to [1-9]+ variables" */ ()
+ a, b, c = f3()
+ a, b = f3 /* ERROR "cannot assign [1-9]+ values to [1-9]+ variables" */ ()
+
+ a, b, c = <- /* ERROR "cannot assign [1-9]+ values to [1-9]+ variables" */ ch
+
+ return /* ERROR "not enough return values\n\thave \(\)\n\twant \(int, int\)" */
+ return 1 /* ERROR "not enough return values\n\thave \(number\)\n\twant \(int, int\)" */
+ return 1, 2
+ return 1, 2, 3 /* ERROR "too many return values\n\thave \(number, number, number\)\n\twant \(int, int\)" */
+}
+
+func assignments1() {
+ b, i, f, c, s := false, 1, 1.0, 1i, "foo"
+ b = i /* ERROR "cannot use .* in assignment" */
+ i = f /* ERROR "cannot use .* in assignment" */
+ f = c /* ERROR "cannot use .* in assignment" */
+ c = s /* ERROR "cannot use .* in assignment" */
+ s = b /* ERROR "cannot use .* in assignment" */
+
+ v0, v1, v2 := 1 /* ERROR "cannot initialize" */ , 2, 3, 4
+ _, _, _ = v0, v1, v2
+
+ b = true
+
+ i += 1
+ i += "foo" /* ERROR "mismatched types int and untyped string" */
+
+ f -= 1
+ f /= 0
+ f = float32(0)/0 /* ERROR "division by zero" */
+ f -= "foo" /* ERROR "mismatched types float64 and untyped string" */
+
+ c *= 1
+ c /= 0
+
+ s += "bar"
+ s += 1 /* ERROR "mismatched types string and untyped int" */
+
+ var u64 uint64
+ u64 += 1<<u64
+
+ undeclared /* ERROR "undeclared" */ = 991
+
+ // test cases for issue 5800
+ var (
+ _ int = nil /* ERROR "cannot use nil as int value in variable declaration" */
+ _ [10]int = nil /* ERROR "cannot use nil as \[10\]int value in variable declaration" */
+ _ []byte = nil
+ _ struct{} = nil /* ERROR "cannot use nil as struct{} value in variable declaration" */
+ _ func() = nil
+ _ map[int]string = nil
+ _ chan int = nil
+ )
+
+ // test cases for issue 5500
+ _ = func() (int, bool) {
+ var m map[int]int
+ return m /* ERROR "not enough return values" */ [0]
+ }
+
+ g := func(int, bool){}
+ var m map[int]int
+ g(m /* ERROR "not enough arguments" */ [0])
+
+ // assignments to _
+ _ = nil /* ERROR "use of untyped nil" */
+ _ = 1 /* ERROR overflow */ <<1000
+ (_) = 0
+}
+
+func assignments2() {
+ type mybool bool
+ var m map[string][]bool
+ var s []bool
+ var b bool
+ var d mybool
+ _ = s
+ _ = b
+ _ = d
+
+ // assignments to map index expressions are ok
+ s, b = m["foo"]
+ _, d = m["bar"]
+ m["foo"] = nil
+ m["foo"] = nil /* ERROR cannot assign [1-9]+ values to [1-9]+ variables */ , false
+ _ = append(m["foo"])
+ _ = append(m["foo"], true)
+
+ var c chan int
+ _, b = <-c
+ _, d = <-c
+ <- /* ERROR cannot assign */ c = 0
+ <-c = 0 /* ERROR cannot assign [1-9]+ values to [1-9]+ variables */ , false
+
+ var x interface{}
+ _, b = x.(int)
+ x /* ERROR cannot assign */ .(int) = 0
+ x.(int) = 0 /* ERROR cannot assign [1-9]+ values to [1-9]+ variables */ , false
+
+ assignments2 /* ERROR used as value */ () = nil
+ int /* ERROR not an expression */ = 0
+}
+
+func issue6487() {
+ type S struct{x int}
+ _ = &S /* ERROR "cannot take address" */ {}.x
+ _ = &( /* ERROR "cannot take address" */ S{}.x)
+ _ = (&S{}).x
+ S /* ERROR "cannot assign" */ {}.x = 0
+ (&S{}).x = 0
+
+ type M map[string]S
+ var m M
+ m /* ERROR "cannot assign to struct field" */ ["foo"].x = 0
+ _ = &( /* ERROR "cannot take address" */ m["foo"].x)
+ _ = &m /* ERROR "cannot take address" */ ["foo"].x
+}
+
+func issue6766a() {
+ a, a /* ERROR a repeated on left side of := */ := 1, 2
+ _ = a
+ a, b, b /* ERROR b repeated on left side of := */ := 1, 2, 3
+ _ = b
+ c, c /* ERROR c repeated on left side of := */, b := 1, 2, 3
+ _ = c
+ a, b := /* ERROR no new variables */ 1, 2
+}
+
+func shortVarDecls1() {
+ const c = 0
+ type d int
+ a, b, c /* ERROR "cannot assign" */ , d /* ERROR "cannot assign" */ := 1, "zwei", 3.0, 4
+ var _ int = a // a is of type int
+ var _ string = b // b is of type string
+}
+
+func incdecs() {
+ const c = 3.14
+ c /* ERROR "cannot assign" */ ++
+ s := "foo"
+ s /* ERROR "invalid operation" */ --
+ 3.14 /* ERROR "cannot assign" */ ++
+ var (
+ x int
+ y float32
+ z complex128
+ )
+ x++
+ y--
+ z++
+}
+
+func sends() {
+ var ch chan int
+ var rch <-chan int
+ var x int
+ x <- /* ERROR "cannot send" */ x
+ rch <- /* ERROR "cannot send" */ x
+ ch <- "foo" /* ERROR "cannot use .* in send" */
+ ch <- x
+}
+
+func selects() {
+ select {}
+ var (
+ ch chan int
+ sc chan <- bool
+ )
+ select {
+ case <-ch:
+ case (<-ch):
+ case t := <-ch:
+ _ = t
+ case t := (<-ch):
+ _ = t
+ case t, ok := <-ch:
+ _, _ = t, ok
+ case t, ok := (<-ch):
+ _, _ = t, ok
+ case <-sc /* ERROR "cannot receive from send-only channel" */ :
+ }
+ select {
+ default:
+ default /* ERROR "multiple defaults" */ :
+ }
+ select {
+ case a, b := <-ch:
+ _, b = a, b
+ case x /* ERROR send or receive */ :
+ case a /* ERROR send or receive */ := ch:
+ }
+
+ // test for issue 9570: ch2 in second case falsely resolved to
+ // ch2 declared in body of first case
+ ch1 := make(chan int)
+ ch2 := make(chan int)
+ select {
+ case <-ch1:
+ var ch2 /* ERROR ch2 declared but not used */ chan bool
+ case i := <-ch2:
+ print(i + 1)
+ }
+}
+
+func gos() {
+ go 1 /* ERROR must be function call */ /* ERROR cannot call non-function */
+ go int /* ERROR "go requires function call, not conversion" */ (0)
+ go gos()
+ var c chan int
+ go close(c)
+ go len /* ERROR "go discards result" */ (c)
+}
+
+func defers() {
+ defer 1 /* ERROR must be function call */ /* ERROR cannot call non-function */
+ defer int /* ERROR "defer requires function call, not conversion" */ (0)
+ defer defers()
+ var c chan int
+ defer close(c)
+ defer len /* ERROR "defer discards result" */ (c)
+}
+
+func breaks() {
+ var x, y int
+
+ break /* ERROR "break" */
+ {
+ break /* ERROR "break" */
+ }
+ if x < y {
+ break /* ERROR "break" */
+ }
+
+ switch x {
+ case 0:
+ break
+ case 1:
+ if x == y {
+ break
+ }
+ default:
+ break
+ break
+ }
+
+ var z interface{}
+ switch z.(type) {
+ case int:
+ break
+ }
+
+ for {
+ break
+ }
+
+ var a []int
+ for _ = range a {
+ break
+ }
+
+ for {
+ if x == y {
+ break
+ }
+ }
+
+ var ch chan int
+ select {
+ case <-ch:
+ break
+ }
+
+ select {
+ case <-ch:
+ if x == y {
+ break
+ }
+ default:
+ break
+ }
+}
+
+func continues() {
+ var x, y int
+
+ continue /* ERROR "continue" */
+ {
+ continue /* ERROR "continue" */
+ }
+
+ if x < y {
+ continue /* ERROR "continue" */
+ }
+
+ switch x {
+ case 0:
+ continue /* ERROR "continue" */
+ }
+
+ var z interface{}
+ switch z.(type) {
+ case int:
+ continue /* ERROR "continue" */
+ }
+
+ var ch chan int
+ select {
+ case <-ch:
+ continue /* ERROR "continue" */
+ }
+
+ for i := 0; i < 10; i++ {
+ continue
+ if x < y {
+ continue
+ break
+ }
+ switch x {
+ case y:
+ continue
+ default:
+ break
+ }
+ select {
+ case <-ch:
+ continue
+ }
+ }
+
+ var a []int
+ for _ = range a {
+ continue
+ if x < y {
+ continue
+ break
+ }
+ switch x {
+ case y:
+ continue
+ default:
+ break
+ }
+ select {
+ case <-ch:
+ continue
+ }
+ }
+}
+
+func returns0() {
+ return
+ return 0 /* ERROR too many return values */
+}
+
+func returns1(x float64) (int, *float64) {
+ return 0, &x
+ return /* ERROR not enough return values */
+ return "foo" /* ERROR "cannot .* in return statement" */, x /* ERROR "cannot use .* in return statement" */
+ return 0, &x, 1 /* ERROR too many return values */
+}
+
+func returns2() (a, b int) {
+ return
+ return 1, "foo" /* ERROR cannot use .* in return statement */
+ return 1, 2, 3 /* ERROR too many return values */
+ {
+ type a int
+ return 1, 2
+ return /* ERROR a not in scope at return */
+ }
+}
+
+func returns3() (_ int) {
+ return
+ {
+ var _ int // blank (_) identifiers never shadow since they are in no scope
+ return
+ }
+}
+
+func switches0() {
+ var x int
+
+ switch x {
+ }
+
+ switch x {
+ default:
+ default /* ERROR "multiple defaults" */ :
+ }
+
+ switch {
+ case 1 /* ERROR "cannot convert" */ :
+ }
+
+ true := "false"
+ _ = true
+ // A tagless switch is equivalent to the bool
+ // constant true, not the identifier 'true'.
+ switch {
+ case "false" /* ERROR "cannot convert" */:
+ }
+
+ switch int32(x) {
+ case 1, 2:
+ case x /* ERROR "invalid case x in switch on int32\(x\) \(mismatched types int and int32\)" */ :
+ }
+
+ switch x {
+ case 1 /* ERROR "overflows" */ << 100:
+ }
+
+ switch x {
+ case 1:
+ case 1 /* ERROR "duplicate case" */ :
+ case ( /* ERROR "duplicate case" */ 1):
+ case 2, 3, 4:
+ case 5, 1 /* ERROR "duplicate case" */ :
+ }
+
+ switch uint64(x) {
+ case 1<<64 - 1:
+ case 1 /* ERROR duplicate case */ <<64 - 1:
+ case 2, 3, 4:
+ case 5, 1 /* ERROR duplicate case */ <<64 - 1:
+ }
+
+ var y32 float32
+ switch y32 {
+ case 1.1:
+ case 11/10: // integer division!
+ case 11. /* ERROR duplicate case */ /10:
+ case 2, 3.0, 4.1:
+ case 5.2, 1.10 /* ERROR duplicate case */ :
+ }
+
+ var y64 float64
+ switch y64 {
+ case 1.1:
+ case 11/10: // integer division!
+ case 11. /* ERROR duplicate case */ /10:
+ case 2, 3.0, 4.1:
+ case 5.2, 1.10 /* ERROR duplicate case */ :
+ }
+
+ var s string
+ switch s {
+ case "foo":
+ case "foo" /* ERROR duplicate case */ :
+ case "f" /* ERROR duplicate case */ + "oo":
+ case "abc", "def", "ghi":
+ case "jkl", "foo" /* ERROR duplicate case */ :
+ }
+
+ type T int
+ type F float64
+ type S string
+ type B bool
+ var i interface{}
+ switch i {
+ case nil:
+ case nil: // no duplicate detection
+ case (*int)(nil):
+ case (*int)(nil): // do duplicate detection
+ case 1:
+ case byte(1):
+ case int /* ERROR duplicate case */ (1):
+ case T(1):
+ case 1.0:
+ case F(1.0):
+ case F /* ERROR duplicate case */ (1.0):
+ case "hello":
+ case S("hello"):
+ case S /* ERROR duplicate case */ ("hello"):
+ case 1==1, B(false):
+ case false, B(2==2):
+ }
+
+ // switch on array
+ var a [3]int
+ switch a {
+ case [3]int{1, 2, 3}:
+ case [3]int{1, 2, 3}: // no duplicate detection
+ case [ /* ERROR "mismatched types */ 4]int{4, 5, 6}:
+ }
+
+ // switch on channel
+ var c1, c2 chan int
+ switch c1 {
+ case nil:
+ case c1:
+ case c2:
+ case c1, c2: // no duplicate detection
+ }
+}
+
+func switches1() {
+ fallthrough /* ERROR "fallthrough statement out of place" */
+
+ var x int
+ switch x {
+ case 0:
+ fallthrough /* ERROR "fallthrough statement out of place" */
+ break
+ case 1:
+ fallthrough
+ case 2:
+ fallthrough; ; ; // trailing empty statements are ok
+ case 3:
+ default:
+ fallthrough; ;
+ case 4:
+ fallthrough /* ERROR "cannot fallthrough final case in switch" */
+ }
+
+ var y interface{}
+ switch y.(type) {
+ case int:
+ fallthrough /* ERROR "fallthrough statement out of place" */ ; ; ;
+ default:
+ }
+
+ switch x {
+ case 0:
+ if x == 0 {
+ fallthrough /* ERROR "fallthrough statement out of place" */
+ }
+ }
+
+ switch x {
+ case 0:
+ goto L1
+ L1: fallthrough; ;
+ case 1:
+ goto L2
+ goto L3
+ goto L4
+ L2: L3: L4: fallthrough
+ default:
+ }
+
+ switch x {
+ case 0:
+ goto L5
+ L5: fallthrough
+ default:
+ goto L6
+ goto L7
+ goto L8
+ L6: L7: L8: fallthrough /* ERROR "cannot fallthrough final case in switch" */
+ }
+
+ switch x {
+ case 0:
+ fallthrough; ;
+ case 1:
+ {
+ fallthrough /* ERROR "fallthrough statement out of place" */
+ }
+ case 2:
+ fallthrough
+ case 3:
+ fallthrough /* ERROR "fallthrough statement out of place" */
+ { /* empty block is not an empty statement */ }; ;
+ default:
+ fallthrough /* ERROR "cannot fallthrough final case in switch" */
+ }
+
+ switch x {
+ case 0:
+ {
+ fallthrough /* ERROR "fallthrough statement out of place" */
+ }
+ }
+}
+
+func switches2() {
+ // untyped nil is not permitted as switch expression
+ switch nil /* ERROR "use of untyped nil" */ {
+ case 1, 2, "foo": // don't report additional errors here
+ }
+
+ // untyped constants are converted to default types
+ switch 1<<63-1 {
+ }
+ switch 1 /* ERROR "cannot use .* as int value.*\(overflows\)" */ << 63 {
+ }
+ var x int
+ switch 1.0 {
+ case 1.0, 2.0, x /* ERROR "mismatched types int and float64" */ :
+ }
+ switch x {
+ case 1.0:
+ }
+
+ // untyped bools become of type bool
+ type B bool
+ var b B = true
+ switch x == x {
+ case b /* ERROR "mismatched types B and bool" */ :
+ }
+ switch {
+ case b /* ERROR "mismatched types B and bool" */ :
+ }
+}
+
+func issue11667() {
+ switch 9223372036854775808 /* ERROR "cannot use .* as int value.*\(overflows\)" */ {
+ }
+ switch 9223372036854775808 /* ERROR "cannot use .* as int value.*\(overflows\)" */ {
+ case 9223372036854775808:
+ }
+ var x int
+ switch x {
+ case 9223372036854775808 /* ERROR "overflows int" */ :
+ }
+ var y float64
+ switch y {
+ case 9223372036854775808:
+ }
+}
+
+func issue11687() {
+ f := func() (_, _ int) { return }
+ switch f /* ERROR "2-valued f" */ () {
+ }
+ var x int
+ switch f /* ERROR "2-valued f" */ () {
+ case x:
+ }
+ switch x {
+ case f /* ERROR "2-valued f" */ ():
+ }
+}
+
+type I interface {
+ m()
+}
+
+type I2 interface {
+ m(int)
+}
+
+type T struct{}
+type T1 struct{}
+type T2 struct{}
+
+func (T) m() {}
+func (T2) m(int) {}
+
+func typeswitches() {
+ var i int
+ var x interface{}
+
+ switch x.(type) {}
+ switch (x /* ERROR "outside type switch" */ .(type)) {}
+
+ switch x.(type) {
+ default:
+ default /* ERROR "multiple defaults" */ :
+ }
+
+ switch x /* ERROR "declared but not used" */ := x.(type) {}
+ switch _ /* ERROR "no new variable on left side of :=" */ := x.(type) {}
+
+ switch x := x.(type) {
+ case int:
+ var y int = x
+ _ = y
+ }
+
+ switch /* ERROR "x declared but not used" */ x := i /* ERROR "not an interface" */ .(type) {}
+
+ switch t := x.(type) {
+ case nil:
+ var v bool = t /* ERROR "cannot use .* in variable declaration" */
+ _ = v
+ case int:
+ var v int = t
+ _ = v
+ case float32, complex64:
+ var v float32 = t /* ERROR "cannot use .* in variable declaration" */
+ _ = v
+ default:
+ var v float32 = t /* ERROR "cannot use .* in variable declaration" */
+ _ = v
+ }
+
+ var t I
+ switch t.(type) {
+ case T:
+ case T1 /* ERROR "missing method m" */ :
+ case T2 /* ERROR "wrong type for method m" */ :
+ case I2 /* STRICT "wrong type for method m" */ : // only an error in strict mode (issue 8561)
+ }
+
+
+ {
+ x := 1
+ v := 2
+ switch v /* ERROR "v [(]variable of type int[)] is not an interface" */ .(type) {
+ case int:
+ println(x)
+ println(x / /* ERROR "invalid operation: division by zero" */ 0)
+ case /* ERROR "1 is not a type" */ 1:
+ }
+ }
+}
+
+// Test that each case clause uses the correct type of the variable
+// declared by the type switch (issue 5504).
+func typeswitch0() {
+ switch y := interface{}(nil).(type) {
+ case int:
+ func() int { return y + 0 }()
+ case float32:
+ func() float32 { return y }()
+ }
+}
+
+// Test correct scope setup.
+// (no redeclaration errors expected in the type switch)
+func typeswitch1() {
+ var t I
+ switch t := t; t := t.(type) {
+ case nil:
+ var _ I = t
+ case T:
+ var _ T = t
+ default:
+ var _ I = t
+ }
+}
+
+// Test correct typeswitch against interface types.
+type A interface { a() }
+type B interface { b() }
+type C interface { a(int) }
+
+func typeswitch2() {
+ switch A(nil).(type) {
+ case A:
+ case B:
+ case C /* STRICT "cannot have dynamic type" */: // only an error in strict mode (issue 8561)
+ }
+}
+
+func typeswitch3(x interface{}) {
+ switch x.(type) {
+ case int:
+ case float64:
+ case int /* ERROR duplicate case */ :
+ }
+
+ switch x.(type) {
+ case nil:
+ case int:
+ case nil /* ERROR duplicate case */ , nil /* ERROR duplicate case */ :
+ }
+
+ type F func(int)
+ switch x.(type) {
+ case nil:
+ case int, func(int):
+ case float32, func /* ERROR duplicate case */ (x int):
+ case F:
+ }
+}
+
+func fors1() {
+ for {}
+ var i string
+ _ = i
+ for i := 0; i < 10; i++ {}
+ for i := 0; i < 10; j /* ERROR cannot declare */ := 0 {}
+}
+
+func rangeloops1() {
+ var (
+ x int
+ a [10]float32
+ b []string
+ p *[10]complex128
+ pp **[10]complex128
+ s string
+ m map[int]bool
+ c chan int
+ sc chan<- int
+ rc <-chan int
+ )
+
+ for range x /* ERROR "cannot range over" */ {}
+ for _ = range x /* ERROR "cannot range over" */ {}
+ for i := range x /* ERROR "cannot range over" */ {}
+
+ for range a {}
+ for i := range a {
+ var ii int
+ ii = i
+ _ = ii
+ }
+ for i, x := range a {
+ var ii int
+ ii = i
+ _ = ii
+ var xx float64
+ xx = x /* ERROR "cannot use .* in assignment" */
+ _ = xx
+ }
+ var ii int
+ var xx float32
+ for ii, xx = range a {}
+ _, _ = ii, xx
+
+ for range b {}
+ for i := range b {
+ var ii int
+ ii = i
+ _ = ii
+ }
+ for i, x := range b {
+ var ii int
+ ii = i
+ _ = ii
+ var xx string
+ xx = x
+ _ = xx
+ }
+
+ for range s {}
+ for i := range s {
+ var ii int
+ ii = i
+ _ = ii
+ }
+ for i, x := range s {
+ var ii int
+ ii = i
+ _ = ii
+ var xx rune
+ xx = x
+ _ = xx
+ }
+
+ for range p {}
+ for _, x := range p {
+ var xx complex128
+ xx = x
+ _ = xx
+ }
+
+ for range pp /* ERROR "cannot range over" */ {}
+ for _, x := range pp /* ERROR "cannot range over" */ {}
+
+ for range m {}
+ for k := range m {
+ var kk int32
+ kk = k /* ERROR "cannot use .* in assignment" */
+ _ = kk
+ }
+ for k, v := range m {
+ var kk int
+ kk = k
+ _ = kk
+ if v {}
+ }
+
+ for range c {}
+ for _, _ /* ERROR "only one iteration variable" */ = range c {}
+ for e := range c {
+ var ee int
+ ee = e
+ _ = ee
+ }
+ for _ = range sc /* ERROR "send-only channel" */ {}
+ for _ = range rc {}
+
+ // constant strings
+ const cs = "foo"
+ for range cs {}
+ for range "" {}
+ for i, x := range cs { _, _ = i, x }
+ for i, x := range "" {
+ var ii int
+ ii = i
+ _ = ii
+ var xx rune
+ xx = x
+ _ = xx
+ }
+}
+
+func rangeloops2() {
+ type I int
+ type R rune
+
+ var a [10]int
+ var i I
+ _ = i
+ for i /* ERROR cannot use .* in assignment */ = range a {}
+ for i /* ERROR cannot use .* in assignment */ = range &a {}
+ for i /* ERROR cannot use .* in assignment */ = range a[:] {}
+
+ var s string
+ var r R
+ _ = r
+ for i /* ERROR cannot use .* in assignment */ = range s {}
+ for i /* ERROR cannot use .* in assignment */ = range "foo" {}
+ for _, r /* ERROR cannot use .* in assignment */ = range s {}
+ for _, r /* ERROR cannot use .* in assignment */ = range "foo" {}
+}
+
+func issue6766b() {
+ for _ := /* ERROR no new variables */ range "" {}
+ for a, a /* ERROR redeclared */ := range "" { _ = a }
+ var a int
+ _ = a
+ for a, a /* ERROR redeclared */ := range []int{1, 2, 3} { _ = a }
+}
+
+// Test that despite errors in the range clause,
+// the loop body is still type-checked (and thus
+// errors reported).
+func issue10148() {
+ for y /* ERROR declared but not used */ := range "" {
+ _ = "" /* ERROR mismatched types untyped string and untyped int*/ + 1
+ }
+ for range 1 /* ERROR cannot range over 1 */ {
+ _ = "" /* ERROR mismatched types untyped string and untyped int*/ + 1
+ }
+ for y := range 1 /* ERROR cannot range over 1 */ {
+ _ = "" /* ERROR mismatched types untyped string and untyped int*/ + 1
+ }
+}
+
+func labels0() {
+ goto L0
+ goto L1
+ L0:
+ L1:
+ L1 /* ERROR "already declared" */ :
+ if true {
+ goto L2
+ L2:
+ L0 /* ERROR "already declared" */ :
+ }
+ _ = func() {
+ goto L0
+ goto L1
+ goto L2
+ L0:
+ L1:
+ L2:
+ }
+}
+
+func expression_statements(ch chan int) {
+ expression_statements(ch)
+ <-ch
+ println()
+
+ 0 /* ERROR "not used" */
+ 1 /* ERROR "not used" */ +2
+ cap /* ERROR "not used" */ (ch)
+ println /* ERROR "must be called" */
+}
diff --git a/src/cmd/compile/internal/types2/testdata/check/stmt1.src b/src/cmd/compile/internal/types2/testdata/check/stmt1.src
new file mode 100644
index 0000000..f79f920
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/check/stmt1.src
@@ -0,0 +1,259 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// terminating statements
+
+package stmt1
+
+func _() {}
+
+func _() int {} /* ERROR "missing return" */
+
+func _() int { panic(0) }
+func _() int { (panic(0)) }
+
+// block statements
+func _(x, y int) (z int) {
+ {
+ return
+ }
+}
+
+func _(x, y int) (z int) {
+ {
+ return; ; ; // trailing empty statements are ok
+ }
+ ; ; ;
+}
+
+func _(x, y int) (z int) {
+ {
+ }
+} /* ERROR "missing return" */
+
+func _(x, y int) (z int) {
+ {
+ ; ; ;
+ }
+ ; ; ;
+} /* ERROR "missing return" */
+
+// if statements
+func _(x, y int) (z int) {
+ if x < y { return }
+ return 1
+}
+
+func _(x, y int) (z int) {
+ if x < y { return; ; ; ; }
+ return 1
+}
+
+func _(x, y int) (z int) {
+ if x < y { return }
+ return 1; ;
+}
+
+func _(x, y int) (z int) {
+ if x < y { return }
+} /* ERROR "missing return" */
+
+func _(x, y int) (z int) {
+ if x < y {
+ } else { return 1
+ }
+} /* ERROR "missing return" */
+
+func _(x, y int) (z int) {
+ if x < y { return
+ } else { return
+ }
+}
+
+// for statements
+func _(x, y int) (z int) {
+ for x < y {
+ return
+ }
+} /* ERROR "missing return" */
+
+func _(x, y int) (z int) {
+ for {
+ return
+ }
+}
+
+func _(x, y int) (z int) {
+ for {
+ return; ; ; ;
+ }
+}
+
+func _(x, y int) (z int) {
+ for {
+ return
+ break
+ }
+ ; ; ;
+} /* ERROR "missing return" */
+
+func _(x, y int) (z int) {
+ for {
+ for { break }
+ return
+ }
+}
+
+func _(x, y int) (z int) {
+ for {
+ for { break }
+ return ; ;
+ }
+ ;
+}
+
+func _(x, y int) (z int) {
+L: for {
+ for { break L }
+ return
+ }
+} /* ERROR "missing return" */
+
+// switch statements
+func _(x, y int) (z int) {
+ switch x {
+ case 0: return
+ default: return
+ }
+}
+
+func _(x, y int) (z int) {
+ switch x {
+ case 0: return;
+ default: return; ; ;
+ }
+}
+
+func _(x, y int) (z int) {
+ switch x {
+ case 0: return
+ }
+} /* ERROR "missing return" */
+
+func _(x, y int) (z int) {
+ switch x {
+ case 0: return
+ case 1: break
+ }
+} /* ERROR "missing return" */
+
+func _(x, y int) (z int) {
+ switch x {
+ case 0: return
+ default:
+ switch y {
+ case 0: break
+ }
+ panic(0)
+ }
+}
+
+func _(x, y int) (z int) {
+ switch x {
+ case 0: return
+ default:
+ switch y {
+ case 0: break
+ }
+ panic(0); ; ;
+ }
+ ;
+}
+
+func _(x, y int) (z int) {
+L: switch x {
+ case 0: return
+ default:
+ switch y {
+ case 0: break L
+ }
+ panic(0)
+ }
+} /* ERROR "missing return" */
+
+// select statements
+func _(ch chan int) (z int) {
+ select {}
+} // nice!
+
+func _(ch chan int) (z int) {
+ select {}
+ ; ;
+}
+
+func _(ch chan int) (z int) {
+ select {
+ default: break
+ }
+} /* ERROR "missing return" */
+
+func _(ch chan int) (z int) {
+ select {
+ case <-ch: return
+ default: break
+ }
+} /* ERROR "missing return" */
+
+func _(ch chan int) (z int) {
+ select {
+ case <-ch: return
+ default:
+ for i := 0; i < 10; i++ {
+ break
+ }
+ return
+ }
+}
+
+func _(ch chan int) (z int) {
+ select {
+ case <-ch: return; ; ;
+ default:
+ for i := 0; i < 10; i++ {
+ break
+ }
+ return; ; ;
+ }
+ ; ; ;
+}
+
+func _(ch chan int) (z int) {
+L: select {
+ case <-ch: return
+ default:
+ for i := 0; i < 10; i++ {
+ break L
+ }
+ return
+ }
+ ; ; ;
+} /* ERROR "missing return" */
+
+func parenPanic() int {
+ ((((((panic)))(0))))
+}
+
+func issue23218a() int {
+ {
+ panic := func(interface{}){}
+ panic(0)
+ }
+} /* ERROR "missing return" */
+
+func issue23218b() int {
+ {
+ panic := func(interface{}){}
+ ((((panic))))(0)
+ }
+} /* ERROR "missing return" */
diff --git a/src/cmd/compile/internal/types2/testdata/check/typeinference.go2 b/src/cmd/compile/internal/types2/testdata/check/typeinference.go2
new file mode 100644
index 0000000..28f3e28
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/check/typeinference.go2
@@ -0,0 +1,49 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package typeInference
+
+// As of issue #51527, type-type inference has been disabled.
+
+// basic inference
+type Tb[P ~*Q, Q any] int
+func _() {
+ var x Tb /* ERROR got 1 arguments */ [*int]
+ var y Tb[*int, int]
+ x = y /* ERROR cannot use y .* in assignment */
+ _ = x
+}
+
+// recursive inference
+type Tr[A any, B *C, C *D, D *A] int
+func _() {
+ var x Tr /* ERROR got 1 arguments */ [string]
+ var y Tr[string, ***string, **string, *string]
+ var z Tr[int, ***int, **int, *int]
+ x = y /* ERROR cannot use y .* in assignment */
+ x = z // ERROR cannot use z .* as Tr
+ _ = x
+}
+
+// other patterns of inference
+type To0[A any, B []A] int
+type To1[A any, B struct{a A}] int
+type To2[A any, B [][]A] int
+type To3[A any, B [3]*A] int
+type To4[A any, B any, C struct{a A; b B}] int
+func _() {
+ var _ To0 /* ERROR got 1 arguments */ [int]
+ var _ To1 /* ERROR got 1 arguments */ [int]
+ var _ To2 /* ERROR got 1 arguments */ [int]
+ var _ To3 /* ERROR got 1 arguments */ [int]
+ var _ To4 /* ERROR got 2 arguments */ [int, string]
+}
+
+// failed inference
+type Tf0[A, B any] int
+type Tf1[A any, B ~struct{a A; c C}, C any] int
+func _() {
+ var _ Tf0 /* ERROR got 1 arguments but 2 type parameters */ [int]
+ var _ Tf1 /* ERROR got 1 arguments but 3 type parameters */ [int]
+}
diff --git a/src/cmd/compile/internal/types2/testdata/check/typeinst.go2 b/src/cmd/compile/internal/types2/testdata/check/typeinst.go2
new file mode 100644
index 0000000..0e6dc0a
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/check/typeinst.go2
@@ -0,0 +1,62 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type myInt int
+
+// Parameterized type declarations
+
+// For now, a lone type parameter is not permitted as RHS in a type declaration (issue #45639).
+type T1[P any] P // ERROR cannot use a type parameter as RHS in type declaration
+
+type T2[P any] struct {
+ f P
+ g int // int should still be in scope chain
+}
+
+type List[P any] []P
+
+// Alias type declarations cannot have type parameters.
+// Issue #46477 proposses to change that.
+type A1[P any] = /* ERROR cannot be alias */ struct{}
+
+// Pending clarification of #46477 we disallow aliases
+// of generic types.
+type A2 = List // ERROR cannot use generic type
+var _ A2[int]
+var _ A2
+
+type A3 = List[int]
+var _ A3
+
+// Parameterized type instantiations
+
+var x int
+type _ x /* ERROR not a type */ [int]
+
+type _ int /* ERROR not a generic type */ [] // ERROR expecting type
+type _ myInt /* ERROR not a generic type */ [] // ERROR expecting type
+
+// TODO(gri) better error messages
+type _ T1[] // ERROR expecting type
+type _ T1[x /* ERROR not a type */ ]
+type _ T1 /* ERROR got 2 arguments but 1 type parameters */ [int, float32]
+
+var _ T2[int] = T2[int]{}
+
+var _ List[int] = []int{1, 2, 3}
+var _ List[[]int] = [][]int{{1, 2, 3}}
+var _ List[List[List[int]]]
+
+// Parameterized types containing parameterized types
+
+type T3[P any] List[P]
+
+var _ T3[int] = T3[int](List[int]{1, 2, 3})
+
+// Self-recursive generic types are not permitted
+
+type self1[P any] self1 /* ERROR illegal cycle */ [P]
+type self2[P any] *self2[P] // this is ok
diff --git a/src/cmd/compile/internal/types2/testdata/check/typeinst2.go2 b/src/cmd/compile/internal/types2/testdata/check/typeinst2.go2
new file mode 100644
index 0000000..4aaefb3
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/check/typeinst2.go2
@@ -0,0 +1,280 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type List[E any] []E
+var _ List[List[List[int]]]
+var _ List[List[List[int]]] = []List[List[int]]{}
+
+type (
+ T1[P1 any] struct {
+ f1 T2[P1, float32]
+ }
+
+ T2[P2, P3 any] struct {
+ f2 P2
+ f3 P3
+ }
+)
+
+func _() {
+ var x1 T1[int]
+ var x2 T2[int, float32]
+
+ x1.f1.f2 = 0
+ x1.f1 = x2
+}
+
+type T3[P any] T1[T2[P, P]]
+
+func _() {
+ var x1 T3[int]
+ var x2 T2[int, int]
+ x1.f1.f2 = x2
+}
+
+func f[P any] (x P) List[P] {
+ return List[P]{x}
+}
+
+var (
+ _ []int = f(0)
+ _ []float32 = f[float32](10)
+ _ List[complex128] = f(1i)
+ _ []List[int] = f(List[int]{})
+ _ List[List[int]] = []List[int]{}
+ _ = []List[int]{}
+)
+
+// Parameterized types with methods
+
+func (l List[E]) Head() (_ E, _ bool) {
+ if len(l) > 0 {
+ return l[0], true
+ }
+ return
+}
+
+// A test case for instantiating types with other types (extracted from map.go2)
+
+type Pair[K any] struct {
+ key K
+}
+
+type Receiver[T any] struct {
+ values T
+}
+
+type Iterator[K any] struct {
+ r Receiver[Pair[K]]
+}
+
+func Values [T any] (r Receiver[T]) T {
+ return r.values
+}
+
+func (it Iterator[K]) Next() K {
+ return Values[Pair[K]](it.r).key
+}
+
+// A more complex test case testing type bounds (extracted from linalg.go2 and reduced to essence)
+
+type NumericAbs[T any] interface {
+ Abs() T
+}
+
+func AbsDifference[T NumericAbs[T]](x T) { panic(0) }
+
+// For now, a lone type parameter is not permitted as RHS in a type declaration (issue #45639).
+// type OrderedAbs[T any] T
+//
+// func (a OrderedAbs[T]) Abs() OrderedAbs[T]
+//
+// func OrderedAbsDifference[T any](x T) {
+// AbsDifference(OrderedAbs[T](x))
+// }
+
+// same code, reduced to essence
+
+func g[P interface{ m() P }](x P) { panic(0) }
+
+// For now, a lone type parameter is not permitted as RHS in a type declaration (issue #45639).
+// type T4[P any] P
+//
+// func (_ T4[P]) m() T4[P]
+//
+// func _[Q any](x Q) {
+// g(T4[Q](x))
+// }
+
+// Another test case that caused problems in the past
+
+type T5[_ interface { a() }, _ interface{}] struct{}
+
+type A[P any] struct{ x P }
+
+func (_ A[P]) a() {}
+
+var _ T5[A[int], int]
+
+// Invoking methods with parameterized receiver types uses
+// type inference to determine the actual type arguments matching
+// the receiver type parameters from the actual receiver argument.
+// Go does implicit address-taking and dereferenciation depending
+// on the actual receiver and the method's receiver type. To make
+// type inference work, the type-checker matches "pointer-ness"
+// of the actual receiver and the method's receiver type.
+// The following code tests this mechanism.
+
+type R1[A any] struct{}
+func (_ R1[A]) vm()
+func (_ *R1[A]) pm()
+
+func _[T any](r R1[T], p *R1[T]) {
+ r.vm()
+ r.pm()
+ p.vm()
+ p.pm()
+}
+
+type R2[A, B any] struct{}
+func (_ R2[A, B]) vm()
+func (_ *R2[A, B]) pm()
+
+func _[T any](r R2[T, int], p *R2[string, T]) {
+ r.vm()
+ r.pm()
+ p.vm()
+ p.pm()
+}
+
+// It is ok to have multiple embedded unions.
+type _ interface {
+ m0()
+ ~int | ~string | ~bool
+ ~float32 | ~float64
+ m1()
+ m2()
+ ~complex64 | ~complex128
+ ~rune
+}
+
+// Type sets may contain each type at most once.
+type _ interface {
+ ~int|~int /* ERROR overlapping terms ~int */
+ ~int|int /* ERROR overlapping terms int */
+ int|int /* ERROR overlapping terms int */
+}
+
+type _ interface {
+ ~struct{f int} | ~struct{g int} | ~struct /* ERROR overlapping terms */ {f int}
+}
+
+// Interface term lists can contain any type, incl. *Named types.
+// Verify that we use the underlying type to compute the operational type.
+type MyInt int
+func add1[T interface{MyInt}](x T) T {
+ return x + 1
+}
+
+type MyString string
+func double[T interface{MyInt|MyString}](x T) T {
+ return x + x
+}
+
+// Embedding of interfaces with term lists leads to interfaces
+// with term lists that are the intersection of the embedded
+// term lists.
+
+type E0 interface {
+ ~int | ~bool | ~string
+}
+
+type E1 interface {
+ ~int | ~float64 | ~string
+}
+
+type E2 interface {
+ ~float64
+}
+
+type I0 interface {
+ E0
+}
+
+func f0[T I0]() {}
+var _ = f0[int]
+var _ = f0[bool]
+var _ = f0[string]
+var _ = f0[float64 /* ERROR does not implement I0 */ ]
+
+type I01 interface {
+ E0
+ E1
+}
+
+func f01[T I01]() {}
+var _ = f01[int]
+var _ = f01[bool /* ERROR does not implement I0 */ ]
+var _ = f01[string]
+var _ = f01[float64 /* ERROR does not implement I0 */ ]
+
+type I012 interface {
+ E0
+ E1
+ E2
+}
+
+func f012[T I012]() {}
+var _ = f012[int /* ERROR cannot implement I012.*empty type set */ ]
+var _ = f012[bool /* ERROR cannot implement I012.*empty type set */ ]
+var _ = f012[string /* ERROR cannot implement I012.*empty type set */ ]
+var _ = f012[float64 /* ERROR cannot implement I012.*empty type set */ ]
+
+type I12 interface {
+ E1
+ E2
+}
+
+func f12[T I12]() {}
+var _ = f12[int /* ERROR does not implement I12 */ ]
+var _ = f12[bool /* ERROR does not implement I12 */ ]
+var _ = f12[string /* ERROR does not implement I12 */ ]
+var _ = f12[float64]
+
+type I0_ interface {
+ E0
+ ~int
+}
+
+func f0_[T I0_]() {}
+var _ = f0_[int]
+var _ = f0_[bool /* ERROR does not implement I0_ */ ]
+var _ = f0_[string /* ERROR does not implement I0_ */ ]
+var _ = f0_[float64 /* ERROR does not implement I0_ */ ]
+
+// Using a function instance as a type is an error.
+var _ f0 // ERROR not a type
+var _ f0 /* ERROR not a type */ [int]
+
+// Empty type sets can only be satisfied by empty type sets.
+type none interface {
+ // force an empty type set
+ int
+ string
+}
+
+func ff[T none]() {}
+func gg[T any]() {}
+func hh[T ~int]() {}
+
+func _[T none]() {
+ _ = ff[int /* ERROR cannot implement none \(empty type set\) */ ]
+ _ = ff[T] // pathological but ok because T's type set is empty, too
+ _ = gg[int]
+ _ = gg[T]
+ _ = hh[int]
+ _ = hh[T]
+}
diff --git a/src/cmd/compile/internal/types2/testdata/check/typeinstcycles.go2 b/src/cmd/compile/internal/types2/testdata/check/typeinstcycles.go2
new file mode 100644
index 0000000..74fe191
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/check/typeinstcycles.go2
@@ -0,0 +1,11 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+import "unsafe"
+
+func F1[T any](_ [unsafe.Sizeof(F1[int])]T) (res T) { return }
+func F2[T any](_ T) (res [unsafe.Sizeof(F2[string])]int) { return }
+func F3[T any](_ [unsafe.Sizeof(F1[string])]int) {}
diff --git a/src/cmd/compile/internal/types2/testdata/check/typeparams.go2 b/src/cmd/compile/internal/types2/testdata/check/typeparams.go2
new file mode 100644
index 0000000..68b1f0f
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/check/typeparams.go2
@@ -0,0 +1,531 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+// import "io" // for type assertion tests
+
+var _ any // ok to use any anywhere
+func _[_ any, _ interface{any}](any) {
+ var _ any
+}
+
+func identity[T any](x T) T { return x }
+
+func _[_ any](x int) int { panic(0) }
+func _[T any](T /* ERROR redeclared */ T)() {}
+func _[T, T /* ERROR redeclared */ any]() {}
+
+// Constraints (incl. any) may be parenthesized.
+func _[_ (any)]() {}
+func _[_ (interface{})]() {}
+
+func reverse[T any](list []T) []T {
+ rlist := make([]T, len(list))
+ i := len(list)
+ for _, x := range list {
+ i--
+ rlist[i] = x
+ }
+ return rlist
+}
+
+var _ = reverse /* ERROR cannot use generic function reverse */
+var _ = reverse[int, float32 /* ERROR got 2 type arguments */ ] ([]int{1, 2, 3})
+var _ = reverse[int]([ /* ERROR cannot use */ ]float32{1, 2, 3})
+var f = reverse[chan int]
+var _ = f(0 /* ERROR cannot use 0 .* as \[\]chan int */ )
+
+func swap[A, B any](a A, b B) (B, A) { return b, a }
+
+var _ = swap /* ERROR single value is expected */ [int, float32](1, 2)
+var f32, i = swap[int, float32](swap[float32, int](1, 2))
+var _ float32 = f32
+var _ int = i
+
+func swapswap[A, B any](a A, b B) (A, B) {
+ return swap[B, A](b, a)
+}
+
+type F[A, B any] func(A, B) (B, A)
+
+func min[T interface{ ~int }](x, y T) T {
+ if x < y {
+ return x
+ }
+ return y
+}
+
+func _[T interface{~int | ~float32}](x, y T) bool { return x < y }
+func _[T any](x, y T) bool { return x /* ERROR cannot compare */ < y }
+func _[T interface{~int | ~float32 | ~bool}](x, y T) bool { return x /* ERROR cannot compare */ < y }
+
+func _[T C1[T]](x, y T) bool { return x /* ERROR cannot compare */ < y }
+func _[T C2[T]](x, y T) bool { return x < y }
+
+type C1[T any] interface{}
+type C2[T any] interface{ ~int | ~float32 }
+
+func new[T any]() *T {
+ var x T
+ return &x
+}
+
+var _ = new /* ERROR cannot use generic function new */
+var _ *int = new[int]()
+
+func _[T any](map[T /* ERROR invalid map key type T \(missing comparable constraint\) */]int) {} // w/o constraint we don't know if T is comparable
+
+func f1[T1 any](struct{T1 /* ERROR cannot be a .* type parameter */ }) int { panic(0) }
+var _ = f1[int](struct{T1}{})
+type T1 = int
+
+func f2[t1 any](struct{t1 /* ERROR cannot be a .* type parameter */ ; x float32}) int { panic(0) }
+var _ = f2[t1](struct{t1; x float32}{})
+type t1 = int
+
+
+func f3[A, B, C any](A, struct{x B}, func(A, struct{x B}, *C)) int { panic(0) }
+
+var _ = f3[int, rune, bool](1, struct{x rune}{}, nil)
+
+// indexing
+
+func _[T any] (x T, i int) { _ = x /* ERROR "cannot index" */ [i] }
+func _[T interface{ ~int }] (x T, i int) { _ = x /* ERROR "cannot index" */ [i] }
+func _[T interface{ ~string }] (x T, i int) { _ = x[i] }
+func _[T interface{ ~[]int }] (x T, i int) { _ = x[i] }
+func _[T interface{ ~[10]int | ~*[20]int | ~map[int]int }] (x T, i int) { _ = x /* ERROR cannot index */ [i] } // map and non-map types
+func _[T interface{ ~string | ~[]byte }] (x T, i int) { _ = x[i] }
+func _[T interface{ ~[]int | ~[1]rune }] (x T, i int) { _ = x /* ERROR "cannot index" */ [i] }
+func _[T interface{ ~string | ~[]rune }] (x T, i int) { _ = x /* ERROR "cannot index" */ [i] }
+
+// indexing with various combinations of map types in type sets (see issue #42616)
+func _[T interface{ ~[]E | ~map[int]E }, E any](x T, i int) { _ = x /* ERROR cannot index */ [i] } // map and non-map types
+func _[T interface{ ~[]E }, E any](x T, i int) { _ = &x[i] }
+func _[T interface{ ~map[int]E }, E any](x T, i int) { _, _ = x[i] } // comma-ok permitted
+func _[T interface{ ~map[int]E }, E any](x T, i int) { _ = &x /* ERROR cannot take address */ [i] }
+func _[T interface{ ~map[int]E | ~map[uint]E }, E any](x T, i int) { _ = x /* ERROR cannot index */ [i] } // different map element types
+func _[T interface{ ~[]E | ~map[string]E }, E any](x T, i int) { _ = x /* ERROR cannot index */ [i] } // map and non-map types
+
+// indexing with various combinations of array and other types in type sets
+func _[T interface{ [10]int }](x T, i int) { _ = x[i]; _ = x[9]; _ = x[10 /* ERROR out of bounds */ ] }
+func _[T interface{ [10]byte | string }](x T, i int) { _ = x[i]; _ = x[9]; _ = x[10 /* ERROR out of bounds */ ] }
+func _[T interface{ [10]int | *[20]int | []int }](x T, i int) { _ = x[i]; _ = x[9]; _ = x[10 /* ERROR out of bounds */ ] }
+
+// indexing with strings and non-variable arrays (assignment not permitted)
+func _[T string](x T) { _ = x[0]; x /* ERROR cannot assign */ [0] = 0 }
+func _[T []byte | string](x T) { x /* ERROR cannot assign */ [0] = 0 }
+func _[T [10]byte]() { f := func() (x T) { return }; f /* ERROR cannot assign */ ()[0] = 0 }
+func _[T [10]byte]() { f := func() (x *T) { return }; f /* ERROR cannot index */ ()[0] = 0 }
+func _[T [10]byte]() { f := func() (x *T) { return }; (*f())[0] = 0 }
+func _[T *[10]byte]() { f := func() (x T) { return }; f()[0] = 0 }
+
+// slicing
+
+func _[T interface{ ~[10]E }, E any] (x T, i, j, k int) { var _ []E = x[i:j] }
+func _[T interface{ ~[10]E }, E any] (x T, i, j, k int) { var _ []E = x[i:j:k] }
+func _[T interface{ ~[]byte }] (x T, i, j, k int) { var _ T = x[i:j] }
+func _[T interface{ ~[]byte }] (x T, i, j, k int) { var _ T = x[i:j:k] }
+func _[T interface{ ~string }] (x T, i, j, k int) { var _ T = x[i:j] }
+func _[T interface{ ~string }] (x T, i, j, k int) { var _ T = x[i:j:k /* ERROR 3-index slice of string */ ] }
+
+type myByte1 []byte
+type myByte2 []byte
+func _[T interface{ []byte | myByte1 | myByte2 }] (x T, i, j, k int) { var _ T = x[i:j:k] }
+func _[T interface{ []byte | myByte1 | []int }] (x T, i, j, k int) { var _ T = x[ /* ERROR no core type */ i:j:k] }
+
+func _[T interface{ []byte | myByte1 | myByte2 | string }] (x T, i, j, k int) { var _ T = x[i:j] }
+func _[T interface{ []byte | myByte1 | myByte2 | string }] (x T, i, j, k int) { var _ T = x[i:j:k /* ERROR 3-index slice of string */ ] }
+func _[T interface{ []byte | myByte1 | []int | string }] (x T, i, j, k int) { var _ T = x[ /* ERROR no core type */ i:j] }
+
+// len/cap built-ins
+
+func _[T any](x T) { _ = len(x /* ERROR invalid argument */ ) }
+func _[T interface{ ~int }](x T) { _ = len(x /* ERROR invalid argument */ ) }
+func _[T interface{ ~string | ~[]byte | ~int }](x T) { _ = len(x /* ERROR invalid argument */ ) }
+func _[T interface{ ~string }](x T) { _ = len(x) }
+func _[T interface{ ~[10]int }](x T) { _ = len(x) }
+func _[T interface{ ~[]byte }](x T) { _ = len(x) }
+func _[T interface{ ~map[int]int }](x T) { _ = len(x) }
+func _[T interface{ ~chan int }](x T) { _ = len(x) }
+func _[T interface{ ~string | ~[]byte | ~chan int }](x T) { _ = len(x) }
+
+func _[T any](x T) { _ = cap(x /* ERROR invalid argument */ ) }
+func _[T interface{ ~int }](x T) { _ = cap(x /* ERROR invalid argument */ ) }
+func _[T interface{ ~string | ~[]byte | ~int }](x T) { _ = cap(x /* ERROR invalid argument */ ) }
+func _[T interface{ ~string }](x T) { _ = cap(x /* ERROR invalid argument */ ) }
+func _[T interface{ ~[10]int }](x T) { _ = cap(x) }
+func _[T interface{ ~[]byte }](x T) { _ = cap(x) }
+func _[T interface{ ~map[int]int }](x T) { _ = cap(x /* ERROR invalid argument */ ) }
+func _[T interface{ ~chan int }](x T) { _ = cap(x) }
+func _[T interface{ ~[]byte | ~chan int }](x T) { _ = cap(x) }
+
+// range iteration
+
+func _[T interface{}](x T) {
+ for range x /* ERROR cannot range */ {}
+}
+
+type myString string
+
+func _[
+ B1 interface{ string },
+ B2 interface{ string | myString },
+
+ C1 interface{ chan int },
+ C2 interface{ chan int | <-chan int },
+ C3 interface{ chan<- int },
+
+ S1 interface{ []int },
+ S2 interface{ []int | [10]int },
+
+ A1 interface{ [10]int },
+ A2 interface{ [10]int | []int },
+
+ P1 interface{ *[10]int },
+ P2 interface{ *[10]int | *[]int },
+
+ M1 interface{ map[string]int },
+ M2 interface{ map[string]int | map[string]string },
+]() {
+ var b0 string
+ for range b0 {}
+ for _ = range b0 {}
+ for _, _ = range b0 {}
+
+ var b1 B1
+ for range b1 {}
+ for _ = range b1 {}
+ for _, _ = range b1 {}
+
+ var b2 B2
+ for range b2 {}
+
+ var c0 chan int
+ for range c0 {}
+ for _ = range c0 {}
+ for _, _ /* ERROR permits only one iteration variable */ = range c0 {}
+
+ var c1 C1
+ for range c1 {}
+ for _ = range c1 {}
+ for _, _ /* ERROR permits only one iteration variable */ = range c1 {}
+
+ var c2 C2
+ for range c2 {}
+
+ var c3 C3
+ for range c3 /* ERROR receive from send-only channel */ {}
+
+ var s0 []int
+ for range s0 {}
+ for _ = range s0 {}
+ for _, _ = range s0 {}
+
+ var s1 S1
+ for range s1 {}
+ for _ = range s1 {}
+ for _, _ = range s1 {}
+
+ var s2 S2
+ for range s2 /* ERROR cannot range over s2.*no core type */ {}
+
+ var a0 []int
+ for range a0 {}
+ for _ = range a0 {}
+ for _, _ = range a0 {}
+
+ var a1 A1
+ for range a1 {}
+ for _ = range a1 {}
+ for _, _ = range a1 {}
+
+ var a2 A2
+ for range a2 /* ERROR cannot range over a2.*no core type */ {}
+
+ var p0 *[10]int
+ for range p0 {}
+ for _ = range p0 {}
+ for _, _ = range p0 {}
+
+ var p1 P1
+ for range p1 {}
+ for _ = range p1 {}
+ for _, _ = range p1 {}
+
+ var p2 P2
+ for range p2 /* ERROR cannot range over p2.*no core type */ {}
+
+ var m0 map[string]int
+ for range m0 {}
+ for _ = range m0 {}
+ for _, _ = range m0 {}
+
+ var m1 M1
+ for range m1 {}
+ for _ = range m1 {}
+ for _, _ = range m1 {}
+
+ var m2 M2
+ for range m2 /* ERROR cannot range over m2.*no core type */ {}
+}
+
+// type inference checks
+
+var _ = new() /* ERROR cannot infer T */
+
+func f4[A, B, C any](A, B) C { panic(0) }
+
+var _ = f4(1, 2) /* ERROR cannot infer C */
+var _ = f4[int, float32, complex128](1, 2)
+
+func f5[A, B, C any](A, []*B, struct{f []C}) int { panic(0) }
+
+var _ = f5[int, float32, complex128](0, nil, struct{f []complex128}{})
+var _ = f5(0, nil, struct{f []complex128}{}) // ERROR cannot infer
+var _ = f5(0, []*float32{new[float32]()}, struct{f []complex128}{})
+
+func f6[A any](A, []A) int { panic(0) }
+
+var _ = f6(0, nil)
+
+func f6nil[A any](A) int { panic(0) }
+
+var _ = f6nil(nil) // ERROR cannot infer
+
+// type inference with variadic functions
+
+func f7[T any](...T) T { panic(0) }
+
+var _ int = f7() /* ERROR cannot infer T */
+var _ int = f7(1)
+var _ int = f7(1, 2)
+var _ int = f7([]int{}...)
+var _ int = f7 /* ERROR cannot use */ ([]float64{}...)
+var _ float64 = f7([]float64{}...)
+var _ = f7[float64](1, 2.3)
+var _ = f7(float64(1), 2.3)
+var _ = f7(1, 2.3 /* ERROR does not match */ )
+var _ = f7(1.2, 3 /* ERROR does not match */ )
+
+func f8[A, B any](A, B, ...B) int { panic(0) }
+
+var _ = f8(1 /* ERROR not enough arguments */ )
+var _ = f8(1, 2.3)
+var _ = f8(1, 2.3, 3.4, 4.5)
+var _ = f8(1, 2.3, 3.4, 4 /* ERROR does not match */ )
+var _ = f8[int, float64](1, 2.3, 3.4, 4)
+
+var _ = f8[int, float64](0, 0, nil...) // test case for #18268
+
+// init functions cannot have type parameters
+
+func init() {}
+func init[_ /* ERROR func init must have no type parameters */ any]() {}
+func init[P /* ERROR func init must have no type parameters */ any]() {}
+
+type T struct {}
+
+func (T) m1() {}
+// The type checker accepts method type parameters if configured accordingly.
+func (T) m2[_ any]() {}
+func (T) m3[P any]() {}
+
+// type inference across parameterized types
+
+type S1[P any] struct { f P }
+
+func f9[P any](x S1[P]) {}
+
+func _() {
+ f9[int](S1[int]{42})
+ f9(S1[int]{42})
+}
+
+type S2[A, B, C any] struct{}
+
+func f10[X, Y, Z any](a S2[X, int, Z], b S2[X, Y, bool]) {}
+
+func _[P any]() {
+ f10[int, float32, string](S2[int, int, string]{}, S2[int, float32, bool]{})
+ f10(S2[int, int, string]{}, S2[int, float32, bool]{})
+ f10(S2[P, int, P]{}, S2[P, float32, bool]{})
+}
+
+// corner case for type inference
+// (was bug: after instanting f11, the type-checker didn't mark f11 as non-generic)
+
+func f11[T any]() {}
+
+func _() {
+ f11[int]()
+}
+
+// the previous example was extracted from
+
+// For now, a lone type parameter is not permitted as RHS in a type declaration (issue #45639).
+// func f12[T interface{m() T}]() {}
+//
+// type A[T any] T
+//
+// func (a A[T]) m() A[T]
+//
+// func _[T any]() {
+// f12[A[T]]()
+// }
+
+// method expressions
+
+func (_ S1[P]) m()
+
+func _() {
+ m := S1[int].m
+ m(struct { f int }{42})
+}
+
+func _[T any] (x T) {
+ m := S1[T].m
+ m(S1[T]{x})
+}
+
+// type parameters in methods (generalization)
+
+type R0 struct{}
+
+func (R0) _[T any](x T) {}
+func (R0 /* ERROR invalid receiver */ ) _[R0 any]() {} // scope of type parameters starts at "func"
+
+type R1[A, B any] struct{}
+
+func (_ R1[A, B]) m0(A, B)
+func (_ R1[A, B]) m1[T any](A, B, T) T { panic(0) }
+func (_ R1 /* ERROR not a generic type */ [R1, _]) _()
+func (_ R1[A, B]) _[A /* ERROR redeclared */ any](B) {}
+
+func _() {
+ var r R1[int, string]
+ r.m1[rune](42, "foo", 'a')
+ r.m1[rune](42, "foo", 1.2 /* ERROR cannot use .* as rune .* \(truncated\) */)
+ r.m1(42, "foo", 1.2) // using type inference
+ var _ float64 = r.m1(42, "foo", 1.2)
+}
+
+type I1[A any] interface {
+ m1(A)
+}
+
+var _ I1[int] = r1[int]{}
+
+type r1[T any] struct{}
+
+func (_ r1[T]) m1(T)
+
+type I2[A, B any] interface {
+ m1(A)
+ m2(A) B
+}
+
+var _ I2[int, float32] = R2[int, float32]{}
+
+type R2[P, Q any] struct{}
+
+func (_ R2[X, Y]) m1(X)
+func (_ R2[X, Y]) m2(X) Y
+
+// type assertions and type switches over generic types
+// NOTE: These are currently disabled because it's unclear what the correct
+// approach is, and one can always work around by assigning the variable to
+// an interface first.
+
+// // ReadByte1 corresponds to the ReadByte example in the draft design.
+// func ReadByte1[T io.Reader](r T) (byte, error) {
+// if br, ok := r.(io.ByteReader); ok {
+// return br.ReadByte()
+// }
+// var b [1]byte
+// _, err := r.Read(b[:])
+// return b[0], err
+// }
+//
+// // ReadBytes2 is like ReadByte1 but uses a type switch instead.
+// func ReadByte2[T io.Reader](r T) (byte, error) {
+// switch br := r.(type) {
+// case io.ByteReader:
+// return br.ReadByte()
+// }
+// var b [1]byte
+// _, err := r.Read(b[:])
+// return b[0], err
+// }
+//
+// // type assertions and type switches over generic types are strict
+// type I3 interface {
+// m(int)
+// }
+//
+// type I4 interface {
+// m() int // different signature from I3.m
+// }
+//
+// func _[T I3](x I3, p T) {
+// // type assertions and type switches over interfaces are not strict
+// _ = x.(I4)
+// switch x.(type) {
+// case I4:
+// }
+//
+// // type assertions and type switches over generic types are strict
+// _ = p /* ERROR cannot have dynamic type I4 */.(I4)
+// switch p.(type) {
+// case I4 /* ERROR cannot have dynamic type I4 */ :
+// }
+// }
+
+// type assertions and type switches over generic types lead to errors for now
+
+func _[T any](x T) {
+ _ = x /* ERROR cannot use type assertion */ .(int)
+ switch x /* ERROR cannot use type switch */ .(type) {
+ }
+
+ // work-around
+ var t interface{} = x
+ _ = t.(int)
+ switch t.(type) {
+ }
+}
+
+func _[T interface{~int}](x T) {
+ _ = x /* ERROR cannot use type assertion */ .(int)
+ switch x /* ERROR cannot use type switch */ .(type) {
+ }
+
+ // work-around
+ var t interface{} = x
+ _ = t.(int)
+ switch t.(type) {
+ }
+}
+
+// error messages related to type bounds mention those bounds
+type C[P any] interface{}
+
+func _[P C[P]] (x P) {
+ x.m /* ERROR x.m undefined */ ()
+}
+
+type I interface {}
+
+func _[P I] (x P) {
+ x.m /* ERROR type P has no field or method m */ ()
+}
+
+func _[P interface{}] (x P) {
+ x.m /* ERROR type P has no field or method m */ ()
+}
+
+func _[P any] (x P) {
+ x.m /* ERROR type P has no field or method m */ ()
+}
diff --git a/src/cmd/compile/internal/types2/testdata/check/unions.go2 b/src/cmd/compile/internal/types2/testdata/check/unions.go2
new file mode 100644
index 0000000..bcd7de6
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/check/unions.go2
@@ -0,0 +1,66 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Check that overlong unions don't bog down type checking.
+// Disallow them for now.
+
+package p
+
+type t int
+
+type (
+ t00 t; t01 t; t02 t; t03 t; t04 t; t05 t; t06 t; t07 t; t08 t; t09 t
+ t10 t; t11 t; t12 t; t13 t; t14 t; t15 t; t16 t; t17 t; t18 t; t19 t
+ t20 t; t21 t; t22 t; t23 t; t24 t; t25 t; t26 t; t27 t; t28 t; t29 t
+ t30 t; t31 t; t32 t; t33 t; t34 t; t35 t; t36 t; t37 t; t38 t; t39 t
+ t40 t; t41 t; t42 t; t43 t; t44 t; t45 t; t46 t; t47 t; t48 t; t49 t
+ t50 t; t51 t; t52 t; t53 t; t54 t; t55 t; t56 t; t57 t; t58 t; t59 t
+ t60 t; t61 t; t62 t; t63 t; t64 t; t65 t; t66 t; t67 t; t68 t; t69 t
+ t70 t; t71 t; t72 t; t73 t; t74 t; t75 t; t76 t; t77 t; t78 t; t79 t
+ t80 t; t81 t; t82 t; t83 t; t84 t; t85 t; t86 t; t87 t; t88 t; t89 t
+ t90 t; t91 t; t92 t; t93 t; t94 t; t95 t; t96 t; t97 t; t98 t; t99 t
+)
+
+type u99 interface {
+ t00|t01|t02|t03|t04|t05|t06|t07|t08|t09|
+ t10|t11|t12|t13|t14|t15|t16|t17|t18|t19|
+ t20|t21|t22|t23|t24|t25|t26|t27|t28|t29|
+ t30|t31|t32|t33|t34|t35|t36|t37|t38|t39|
+ t40|t41|t42|t43|t44|t45|t46|t47|t48|t49|
+ t50|t51|t52|t53|t54|t55|t56|t57|t58|t59|
+ t60|t61|t62|t63|t64|t65|t66|t67|t68|t69|
+ t70|t71|t72|t73|t74|t75|t76|t77|t78|t79|
+ t80|t81|t82|t83|t84|t85|t86|t87|t88|t89|
+ t90|t91|t92|t93|t94|t95|t96|t97|t98
+}
+
+type u100a interface {
+ u99|float32
+}
+
+type u100b interface {
+ u99|float64
+}
+
+type u101 interface {
+ t00|t01|t02|t03|t04|t05|t06|t07|t08|t09|
+ t10|t11|t12|t13|t14|t15|t16|t17|t18|t19|
+ t20|t21|t22|t23|t24|t25|t26|t27|t28|t29|
+ t30|t31|t32|t33|t34|t35|t36|t37|t38|t39|
+ t40|t41|t42|t43|t44|t45|t46|t47|t48|t49|
+ t50|t51|t52|t53|t54|t55|t56|t57|t58|t59|
+ t60|t61|t62|t63|t64|t65|t66|t67|t68|t69|
+ t70|t71|t72|t73|t74|t75|t76|t77|t78|t79|
+ t80|t81|t82|t83|t84|t85|t86|t87|t88|t89|
+ t90|t91|t92|t93|t94|t95|t96|t97|t98|t99|
+ int // ERROR cannot handle more than 100 union terms
+}
+
+type u102 interface {
+ int /* ERROR cannot handle more than 100 union terms */ |string|u100a
+}
+
+type u200 interface {
+ u100a /* ERROR cannot handle more than 100 union terms */ |u100b
+}
diff --git a/src/cmd/compile/internal/types2/testdata/check/vardecl.src b/src/cmd/compile/internal/types2/testdata/check/vardecl.src
new file mode 100644
index 0000000..c3fe61c
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/check/vardecl.src
@@ -0,0 +1,214 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package vardecl
+
+// Prerequisites.
+import "math"
+func f() {}
+func g() (x, y int) { return }
+var m map[string]int
+
+// Var decls must have a type or an initializer.
+var _ int
+var _, _ int
+
+var _ /* ERROR "expecting type" */
+var _, _ /* ERROR "expecting type" */
+var _, _, _ /* ERROR "expecting type" */
+
+// The initializer must be an expression.
+var _ = int /* ERROR "not an expression" */
+var _ = f /* ERROR "used as value" */ ()
+
+// Identifier and expression arity must match.
+var _, _ = 1, 2
+var _ = 1, 2 /* ERROR "extra init expr 2" */
+var _, _ = 1 /* ERROR "cannot initialize [0-9]+ variables with [0-9]+ values" */
+var _, _, _ /* ERROR "missing init expr for _" */ = 1, 2
+
+var _ = g /* ERROR "2-valued g" */ ()
+var _, _ = g()
+var _, _, _ = g /* ERROR "cannot initialize [0-9]+ variables with [0-9]+ values" */ ()
+
+var _ = m["foo"]
+var _, _ = m["foo"]
+var _, _, _ = m /* ERROR "cannot initialize [0-9]+ variables with [0-9]+ values" */ ["foo"]
+
+var _, _ int = 1, 2
+var _ int = 1, 2 /* ERROR "extra init expr 2" */
+var _, _ int = 1 /* ERROR "cannot initialize [0-9]+ variables with [0-9]+ values" */
+var _, _, _ /* ERROR "missing init expr for _" */ int = 1, 2
+
+var (
+ _, _ = 1, 2
+ _ = 1, 2 /* ERROR "extra init expr 2" */
+ _, _ = 1 /* ERROR "cannot initialize [0-9]+ variables with [0-9]+ values" */
+ _, _, _ /* ERROR "missing init expr for _" */ = 1, 2
+
+ _ = g /* ERROR "2-valued g" */ ()
+ _, _ = g()
+ _, _, _ = g /* ERROR "cannot initialize [0-9]+ variables with [0-9]+ values" */ ()
+
+ _ = m["foo"]
+ _, _ = m["foo"]
+ _, _, _ = m /* ERROR "cannot initialize [0-9]+ variables with [0-9]+ values" */ ["foo"]
+
+ _, _ int = 1, 2
+ _ int = 1, 2 /* ERROR "extra init expr 2" */
+ _, _ int = 1 /* ERROR "cannot initialize [0-9]+ variables with [0-9]+ values" */
+ _, _, _ /* ERROR "missing init expr for _" */ int = 1, 2
+)
+
+// Variables declared in function bodies must be 'used'.
+type T struct{}
+func (r T) _(a, b, c int) (u, v, w int) {
+ var x1 /* ERROR "declared but not used" */ int
+ var x2 /* ERROR "declared but not used" */ int
+ x1 = 1
+ (x2) = 2
+
+ y1 /* ERROR "declared but not used" */ := 1
+ y2 /* ERROR "declared but not used" */ := 2
+ y1 = 1
+ (y1) = 2
+
+ {
+ var x1 /* ERROR "declared but not used" */ int
+ var x2 /* ERROR "declared but not used" */ int
+ x1 = 1
+ (x2) = 2
+
+ y1 /* ERROR "declared but not used" */ := 1
+ y2 /* ERROR "declared but not used" */ := 2
+ y1 = 1
+ (y1) = 2
+ }
+
+ if x /* ERROR "declared but not used" */ := 0; a < b {}
+
+ switch x /* ERROR "declared but not used" */, y := 0, 1; a {
+ case 0:
+ _ = y
+ case 1:
+ x /* ERROR "declared but not used" */ := 0
+ }
+
+ var t interface{}
+ switch t /* ERROR "declared but not used" */ := t.(type) {}
+
+ switch t /* ERROR "declared but not used" */ := t.(type) {
+ case int:
+ }
+
+ switch t /* ERROR "declared but not used" */ := t.(type) {
+ case int:
+ case float32, complex64:
+ t = nil
+ }
+
+ switch t := t.(type) {
+ case int:
+ case float32, complex64:
+ _ = t
+ }
+
+ switch t := t.(type) {
+ case int:
+ case float32:
+ case string:
+ _ = func() string {
+ return t
+ }
+ }
+
+ switch t := t; t /* ERROR "declared but not used" */ := t.(type) {}
+
+ var z1 /* ERROR "declared but not used" */ int
+ var z2 int
+ _ = func(a, b, c int) (u, v, w int) {
+ z1 = a
+ (z1) = b
+ a = z2
+ return
+ }
+
+ var s []int
+ var i /* ERROR "declared but not used" */ , j int
+ for i, j = range s {
+ _ = j
+ }
+
+ for i, j /* ERROR "declared but not used" */ := range s {
+ _ = func() int {
+ return i
+ }
+ }
+ return
+}
+
+// Unused variables in function literals must lead to only one error (issue #22524).
+func _() {
+ _ = func() {
+ var x /* ERROR declared but not used */ int
+ }
+}
+
+// Invalid variable declarations must not lead to "declared but not used errors".
+func _() {
+ var a x // ERROR undeclared name: x
+ var b = x // ERROR undeclared name: x
+ var c int = x // ERROR undeclared name: x
+ var d, e, f x /* ERROR x */ /* ERROR x */ /* ERROR x */
+ var g, h, i = x, x, x /* ERROR x */ /* ERROR x */ /* ERROR x */
+ var j, k, l float32 = x, x, x /* ERROR x */ /* ERROR x */ /* ERROR x */
+ // but no "declared but not used" errors
+}
+
+// Invalid (unused) expressions must not lead to spurious "declared but not used errors".
+func _() {
+ var a, b, c int
+ var x, y int
+ x, y = a /* ERROR cannot assign [0-9]+ values to [0-9]+ variables */ , b, c
+ _ = x
+ _ = y
+}
+
+func _() {
+ var x int
+ return x /* ERROR too many return values */
+ return math /* ERROR too many return values */ .Sin(0)
+}
+
+func _() int {
+ var x, y int
+ return x, y /* ERROR too many return values */
+}
+
+// Short variable declarations must declare at least one new non-blank variable.
+func _() {
+ _ := /* ERROR no new variables */ 0
+ _, a := 0, 1
+ _, a := /* ERROR no new variables */ 0, 1
+ _, a, b := 0, 1, 2
+ _, _, _ := /* ERROR no new variables */ 0, 1, 2
+
+ _ = a
+ _ = b
+}
+
+// Test case for variables depending on function literals (see also #22992).
+var A /* ERROR initialization cycle */ = func() int { return A }()
+
+func _() {
+ // The function literal below must not see a.
+ var a = func() int { return a /* ERROR "undeclared name" */ }()
+ var _ = func() int { return a }()
+
+ // The function literal below must not see x, y, or z.
+ var x, y, z = 0, 1, func() int { return x /* ERROR "undeclared name" */ + y /* ERROR "undeclared name" */ + z /* ERROR "undeclared name" */ }()
+ _, _, _ = x, y, z
+}
+
+// TODO(gri) consolidate other var decl checks in this file \ No newline at end of file
diff --git a/src/cmd/compile/internal/types2/testdata/examples/constraints.go2 b/src/cmd/compile/internal/types2/testdata/examples/constraints.go2
new file mode 100644
index 0000000..0d3e282
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/examples/constraints.go2
@@ -0,0 +1,80 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file shows some examples of generic constraint interfaces.
+
+package p
+
+type MyInt int
+
+type (
+ // Arbitrary types may be embedded like interfaces.
+ _ interface{int}
+ _ interface{~int}
+
+ // Types may be combined into a union.
+ union interface{int|~string}
+
+ // Union terms must describe disjoint (non-overlapping) type sets.
+ _ interface{int|int /* ERROR overlapping terms int */ }
+ _ interface{int|~ /* ERROR overlapping terms ~int */ int }
+ _ interface{~int|~ /* ERROR overlapping terms ~int */ int }
+ _ interface{~int|MyInt /* ERROR overlapping terms p.MyInt and ~int */ }
+ _ interface{int|any}
+ _ interface{int|~string|union}
+ _ interface{int|~string|interface{int}}
+ _ interface{union|int} // interfaces (here: union) are ignored when checking for overlap
+ _ interface{union|union} // ditto
+
+ // For now we do not permit interfaces with methods in unions.
+ _ interface{~ /* ERROR invalid use of ~ */ any}
+ _ interface{int|interface /* ERROR cannot use .* in union */ { m() }}
+)
+
+type (
+ // Tilde is not permitted on defined types or interfaces.
+ foo int
+ bar any
+ _ interface{foo}
+ _ interface{~ /* ERROR invalid use of ~ */ foo }
+ _ interface{~ /* ERROR invalid use of ~ */ bar }
+)
+
+// Stand-alone type parameters are not permitted as elements or terms in unions.
+type (
+ _[T interface{ *T } ] struct{} // ok
+ _[T interface{ int | *T } ] struct{} // ok
+ _[T interface{ T /* ERROR cannot embed a type parameter */ } ] struct{}
+ _[T interface{ ~T /* ERROR cannot embed a type parameter */ } ] struct{}
+ _[T interface{ int|T /* ERROR cannot embed a type parameter */ }] struct{}
+)
+
+// Multiple embedded union elements are intersected. The order in which they
+// appear in the interface doesn't matter since intersection is a symmetric
+// operation.
+
+type myInt1 int
+type myInt2 int
+
+func _[T interface{ myInt1|myInt2; ~int }]() T { return T(0) }
+func _[T interface{ ~int; myInt1|myInt2 }]() T { return T(0) }
+
+// Here the intersections are empty - there's no type that's in the type set of T.
+func _[T interface{ myInt1|myInt2; int }]() T { return T(0 /* ERROR cannot convert */ ) }
+func _[T interface{ int; myInt1|myInt2 }]() T { return T(0 /* ERROR cannot convert */ ) }
+
+// Union elements may be interfaces as long as they don't define
+// any methods or embed comparable.
+
+type (
+ Integer interface{ ~int|~int8|~int16|~int32|~int64 }
+ Unsigned interface{ ~uint|~uint8|~uint16|~uint32|~uint64 }
+ Floats interface{ ~float32|~float64 }
+ Complex interface{ ~complex64|~complex128 }
+ Number interface{ Integer|Unsigned|Floats|Complex }
+ Ordered interface{ Integer|Unsigned|Floats|~string }
+
+ _ interface{ Number | error /* ERROR cannot use error in union */ }
+ _ interface{ Ordered | comparable /* ERROR cannot use comparable in union */ }
+)
diff --git a/src/cmd/compile/internal/types2/testdata/examples/functions.go2 b/src/cmd/compile/internal/types2/testdata/examples/functions.go2
new file mode 100644
index 0000000..ef8953c
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/examples/functions.go2
@@ -0,0 +1,219 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file shows some examples of type-parameterized functions.
+
+package p
+
+// Reverse is a generic function that takes a []T argument and
+// reverses that slice in place.
+func Reverse[T any](list []T) {
+ i := 0
+ j := len(list)-1
+ for i < j {
+ list[i], list[j] = list[j], list[i]
+ i++
+ j--
+ }
+}
+
+func _() {
+ // Reverse can be called with an explicit type argument.
+ Reverse[int](nil)
+ Reverse[string]([]string{"foo", "bar"})
+ Reverse[struct{x, y int}]([]struct{x, y int}{{1, 2}, {2, 3}, {3, 4}})
+
+ // Since the type parameter is used for an incoming argument,
+ // it can be inferred from the provided argument's type.
+ Reverse([]string{"foo", "bar"})
+ Reverse([]struct{x, y int}{{1, 2}, {2, 3}, {3, 4}})
+
+ // But the incoming argument must have a type, even if it's a
+ // default type. An untyped nil won't work.
+ // Reverse(nil) // this won't type-check
+
+ // A typed nil will work, though.
+ Reverse([]int(nil))
+}
+
+// Certain functions, such as the built-in `new` could be written using
+// type parameters.
+func new[T any]() *T {
+ var x T
+ return &x
+}
+
+// When calling our own `new`, we need to pass the type parameter
+// explicitly since there is no (value) argument from which the
+// result type could be inferred. We don't try to infer the
+// result type from the assignment to keep things simple and
+// easy to understand.
+var _ = new[int]()
+var _ *float64 = new[float64]() // the result type is indeed *float64
+
+// A function may have multiple type parameters, of course.
+func foo[A, B, C any](a A, b []B, c *C) B {
+ // do something here
+ return b[0]
+}
+
+// As before, we can pass type parameters explicitly.
+var s = foo[int, string, float64](1, []string{"first"}, new[float64]())
+
+// Or we can use type inference.
+var _ float64 = foo(42, []float64{1.0}, &s)
+
+// Type inference works in a straight-forward manner even
+// for variadic functions.
+func variadic[A, B any](A, B, ...B) int { panic(0) }
+
+// var _ = variadic(1) // ERROR not enough arguments
+var _ = variadic(1, 2.3)
+var _ = variadic(1, 2.3, 3.4, 4.5)
+var _ = variadic[int, float64](1, 2.3, 3.4, 4)
+
+// Type inference also works in recursive function calls where
+// the inferred type is the type parameter of the caller.
+func f1[T any](x T) {
+ f1(x)
+}
+
+func f2a[T any](x, y T) {
+ f2a(x, y)
+}
+
+func f2b[T any](x, y T) {
+ f2b(y, x)
+}
+
+func g2a[P, Q any](x P, y Q) {
+ g2a(x, y)
+}
+
+func g2b[P, Q any](x P, y Q) {
+ g2b(y, x)
+}
+
+// Here's an example of a recursive function call with variadic
+// arguments and type inference inferring the type parameter of
+// the caller (i.e., itself).
+func max[T interface{ ~int }](x ...T) T {
+ var x0 T
+ if len(x) > 0 {
+ x0 = x[0]
+ }
+ if len(x) > 1 {
+ x1 := max(x[1:]...)
+ if x1 > x0 {
+ return x1
+ }
+ }
+ return x0
+}
+
+// When inferring channel types, the channel direction is ignored
+// for the purpose of type inference. Once the type has been in-
+// fered, the usual parameter passing rules are applied.
+// Thus even if a type can be inferred successfully, the function
+// call may not be valid.
+
+func fboth[T any](chan T) {}
+func frecv[T any](<-chan T) {}
+func fsend[T any](chan<- T) {}
+
+func _() {
+ var both chan int
+ var recv <-chan int
+ var send chan<-int
+
+ fboth(both)
+ fboth(recv /* ERROR cannot use */ )
+ fboth(send /* ERROR cannot use */ )
+
+ frecv(both)
+ frecv(recv)
+ frecv(send /* ERROR cannot use */ )
+
+ fsend(both)
+ fsend(recv /* ERROR cannot use */)
+ fsend(send)
+}
+
+func ffboth[T any](func(chan T)) {}
+func ffrecv[T any](func(<-chan T)) {}
+func ffsend[T any](func(chan<- T)) {}
+
+func _() {
+ var both func(chan int)
+ var recv func(<-chan int)
+ var send func(chan<- int)
+
+ ffboth(both)
+ ffboth(recv /* ERROR cannot use */ )
+ ffboth(send /* ERROR cannot use */ )
+
+ ffrecv(both /* ERROR cannot use */ )
+ ffrecv(recv)
+ ffrecv(send /* ERROR cannot use */ )
+
+ ffsend(both /* ERROR cannot use */ )
+ ffsend(recv /* ERROR cannot use */ )
+ ffsend(send)
+}
+
+// When inferring elements of unnamed composite parameter types,
+// if the arguments are defined types, use their underlying types.
+// Even though the matching types are not exactly structurally the
+// same (one is a type literal, the other a named type), because
+// assignment is permitted, parameter passing is permitted as well,
+// so type inference should be able to handle these cases well.
+
+func g1[T any]([]T) {}
+func g2[T any]([]T, T) {}
+func g3[T any](*T, ...T) {}
+
+func _() {
+ type intSlize []int
+ g1([]int{})
+ g1(intSlize{})
+ g2(nil, 0)
+
+ type myString string
+ var s1 string
+ g3(nil, "1", myString("2"), "3")
+ g3(&s1, "1", myString /* ERROR does not match */ ("2"), "3")
+ _ = s1
+
+ type myStruct struct{x int}
+ var s2 myStruct
+ g3(nil, struct{x int}{}, myStruct{})
+ g3(&s2, struct{x int}{}, myStruct{})
+ g3(nil, myStruct{}, struct{x int}{})
+ g3(&s2, myStruct{}, struct{x int}{})
+}
+
+// Here's a realistic example.
+
+func append[T any](s []T, t ...T) []T { panic(0) }
+
+func _() {
+ var f func()
+ type Funcs []func()
+ var funcs Funcs
+ _ = append(funcs, f)
+}
+
+// Generic type declarations cannot have empty type parameter lists
+// (that would indicate a slice type). Thus, generic functions cannot
+// have empty type parameter lists, either. This is a syntax error.
+
+func h[] /* ERROR empty type parameter list */ () {}
+
+func _() {
+ h[] /* ERROR operand */ ()
+}
+
+// Parameterized functions must have a function body.
+
+func _ /* ERROR missing function body */ [P any]()
diff --git a/src/cmd/compile/internal/types2/testdata/examples/inference.go2 b/src/cmd/compile/internal/types2/testdata/examples/inference.go2
new file mode 100644
index 0000000..e3d6bfb
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/examples/inference.go2
@@ -0,0 +1,116 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file shows some examples of type inference.
+
+package p
+
+type Ordered interface {
+ ~int|~float64|~string
+}
+
+func min[T Ordered](x, y T) T { panic(0) }
+
+func _() {
+ // min can be called with explicit instantiation.
+ _ = min[int](1, 2)
+
+ // Alternatively, the type argument can be inferred from
+ // one of the arguments. Untyped arguments will be considered
+ // last.
+ var x int
+ _ = min(x, x)
+ _ = min(x, 1)
+ _ = min(x, 1.0)
+ _ = min(1, 2)
+ _ = min(1, 2.3 /* ERROR default type float64 .* does not match */ )
+
+ var y float64
+ _ = min(1, y)
+ _ = min(1.2, y)
+ _ = min(1.2, 3.4)
+ _ = min(1.2, 3 /* ERROR default type int .* does not match */ )
+
+ var s string
+ _ = min(s, "foo")
+ _ = min("foo", "bar")
+}
+
+func mixed[T1, T2, T3 any](T1, T2, T3) {}
+
+func _() {
+ // mixed can be called with explicit instantiation.
+ mixed[int, string, bool](0, "", false)
+
+ // Alternatively, partial type arguments may be provided
+ // (from left to right), and the other may be inferred.
+ mixed[int, string](0, "", false)
+ mixed[int](0, "", false)
+ mixed(0, "", false)
+
+ // Provided type arguments always take precedence over
+ // inferred types.
+ mixed[int, string](1.1 /* ERROR cannot use 1.1 */ , "", false)
+}
+
+func related1[Slice interface{~[]Elem}, Elem any](s Slice, e Elem) {}
+
+func _() {
+ // related1 can be called with explicit instantiation.
+ var si []int
+ related1[[]int, int](si, 0)
+
+ // Alternatively, the 2nd type argument can be inferred
+ // from the first one through constraint type inference.
+ var ss []string
+ _ = related1[[]string]
+ related1[[]string](ss, "foo")
+
+ // A type argument inferred from another explicitly provided
+ // type argument overrides whatever value argument type is given.
+ related1[[]string](ss, 0 /* ERROR cannot use 0 */ )
+
+ // A type argument may be inferred from a value argument
+ // and then help infer another type argument via constraint
+ // type inference.
+ related1(si, 0)
+ related1(si, "foo" /* ERROR cannot use "foo" */ )
+}
+
+func related2[Elem any, Slice interface{[]Elem}](e Elem, s Slice) {}
+
+func _() {
+ // related2 can be called with explicit instantiation.
+ var si []int
+ related2[int, []int](0, si)
+
+ // Alternatively, the 2nd type argument can be inferred
+ // from the first one through constraint type inference.
+ var ss []string
+ _ = related2[string]
+ related2[string]("foo", ss)
+
+ // A type argument may be inferred from a value argument
+ // and then help infer another type argument via constraint
+ // type inference. Untyped arguments are always considered
+ // last.
+ related2(1.2, []float64{})
+ related2(1.0, []int{})
+ related2( /* ERROR does not implement */ float64(1.0), []int{}) // TODO(gri) fix error position
+}
+
+type List[P any] []P
+
+func related3[Elem any, Slice []Elem | List[Elem]]() Slice { return nil }
+
+func _() {
+ // related3 can be instantiated explicitly
+ related3[int, []int]()
+ related3[byte, List[byte]]()
+
+ // The 2nd type argument cannot be inferred from the first
+ // one because there's two possible choices: []Elem and
+ // List[Elem].
+ related3[int]( /* ERROR cannot infer Slice */ )
+}
diff --git a/src/cmd/compile/internal/types2/testdata/examples/methods.go2 b/src/cmd/compile/internal/types2/testdata/examples/methods.go2
new file mode 100644
index 0000000..a46f789
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/examples/methods.go2
@@ -0,0 +1,112 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file shows some examples of methods on type-parameterized types.
+
+package p
+
+// Parameterized types may have methods.
+type T1[A any] struct{ a A }
+
+// When declaring a method for a parameterized type, the "instantiated"
+// receiver type acts as an implicit declaration of the type parameters
+// for the receiver type. In the example below, method m1 on type T1 has
+// the receiver type T1[A] which declares the type parameter A for use
+// with this method. That is, within the method m1, A stands for the
+// actual type argument provided to an instantiated T1.
+func (t T1[A]) m1() A { return t.a }
+
+// For instance, if T1 is instantiated with the type int, the type
+// parameter A in m1 assumes that type (int) as well and we can write
+// code like this:
+var x T1[int]
+var _ int = x.m1()
+
+// Because the type parameter provided to a parameterized receiver type
+// is declared through that receiver declaration, it must be an identifier.
+// It cannot possibly be some other type because the receiver type is not
+// instantiated with concrete types, it is standing for the parameterized
+// receiver type.
+func (t T1[[ /* ERROR must be an identifier */ ]int]) m2() {}
+
+// Note that using what looks like a predeclared identifier, say int,
+// as type parameter in this situation is deceptive and considered bad
+// style. In m3 below, int is the name of the local receiver type parameter
+// and it shadows the predeclared identifier int which then cannot be used
+// anymore as expected.
+// This is no different from locally re-declaring a predeclared identifier
+// and usually should be avoided. There are some notable exceptions; e.g.,
+// sometimes it makes sense to use the identifier "copy" which happens to
+// also be the name of a predeclared built-in function.
+func (t T1[int]) m3() { var _ int = 42 /* ERROR cannot use 42 .* as int */ }
+
+// The names of the type parameters used in a parameterized receiver
+// type don't have to match the type parameter names in the declaration
+// of the type used for the receiver. In our example, even though T1 is
+// declared with type parameter named A, methods using that receiver type
+// are free to use their own name for that type parameter. That is, the
+// name of type parameters is always local to the declaration where they
+// are introduced. In our example we can write a method m2 and use the
+// name X instead of A for the type parameter w/o any difference.
+func (t T1[X]) m4() X { return t.a }
+
+// If the receiver type is parameterized, type parameters must always be
+// provided: this simply follows from the general rule that a parameterized
+// type must be instantiated before it can be used. A method receiver
+// declaration using a parameterized receiver type is no exception. It is
+// simply that such receiver type expressions perform two tasks simultaneously:
+// they declare the (local) type parameters and then use them to instantiate
+// the receiver type. Forgetting to provide a type parameter leads to an error.
+func (t T1 /* ERROR generic type .* without instantiation */ ) m5() {}
+
+// However, sometimes we don't need the type parameter, and thus it is
+// inconvenient to have to choose a name. Since the receiver type expression
+// serves as a declaration for its type parameters, we are free to choose the
+// blank identifier:
+func (t T1[_]) m6() {}
+
+// Naturally, these rules apply to any number of type parameters on the receiver
+// type. Here are some more complex examples.
+type T2[A, B, C any] struct {
+ a A
+ b B
+ c C
+}
+
+// Naming of the type parameters is local and has no semantic impact:
+func (t T2[A, B, C]) m1() (A, B, C) { return t.a, t.b, t.c }
+func (t T2[C, B, A]) m2() (C, B, A) { return t.a, t.b, t.c }
+func (t T2[X, Y, Z]) m3() (X, Y, Z) { return t.a, t.b, t.c }
+
+// Type parameters may be left blank if they are not needed:
+func (t T2[A, _, C]) m4() (A, C) { return t.a, t.c }
+func (t T2[_, _, X]) m5() X { return t.c }
+func (t T2[_, _, _]) m6() {}
+
+// As usual, blank names may be used for any object which we don't care about
+// using later. For instance, we may write an unnamed method with a receiver
+// that cannot be accessed:
+func (_ T2[_, _, _]) _() int { return 42 }
+
+// Because a receiver parameter list is simply a parameter list, we can
+// leave the receiver argument away for receiver types.
+type T0 struct{}
+func (T0) _() {}
+func (T1[A]) _() {}
+
+// For now, a lone type parameter is not permitted as RHS in a type declaration (issue #45639).
+// // A generic receiver type may constrain its type parameter such
+// // that it must be a pointer type. Such receiver types are not
+// // permitted.
+// type T3a[P interface{ ~int | ~string | ~float64 }] P
+//
+// func (T3a[_]) m() {} // this is ok
+//
+// type T3b[P interface{ ~unsafe.Pointer }] P
+//
+// func (T3b /* ERROR invalid receiver */ [_]) m() {}
+//
+// type T3c[P interface{ *int | *string }] P
+//
+// func (T3c /* ERROR invalid receiver */ [_]) m() {}
diff --git a/src/cmd/compile/internal/types2/testdata/examples/operations.go2 b/src/cmd/compile/internal/types2/testdata/examples/operations.go2
new file mode 100644
index 0000000..18e4d60
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/examples/operations.go2
@@ -0,0 +1,29 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+// indirection
+
+func _[P any](p P) {
+ _ = *p // ERROR cannot indirect p
+}
+
+func _[P interface{ int }](p P) {
+ _ = *p // ERROR cannot indirect p
+}
+
+func _[P interface{ *int }](p P) {
+ _ = *p
+}
+
+func _[P interface{ *int | *string }](p P) {
+ _ = *p // ERROR must have identical base types
+}
+
+type intPtr *int
+
+func _[P interface{ *int | intPtr } ](p P) {
+ var _ int = *p
+}
diff --git a/src/cmd/compile/internal/types2/testdata/examples/types.go2 b/src/cmd/compile/internal/types2/testdata/examples/types.go2
new file mode 100644
index 0000000..ae9c015
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/examples/types.go2
@@ -0,0 +1,315 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file shows some examples of generic types.
+
+package p
+
+// List is just what it says - a slice of E elements.
+type List[E any] []E
+
+// A generic (parameterized) type must always be instantiated
+// before it can be used to designate the type of a variable
+// (including a struct field, or function parameter); though
+// for the latter cases, the provided type may be another type
+// parameter. So:
+var _ List[byte] = []byte{}
+
+// A generic binary tree might be declared as follows.
+type Tree[E any] struct {
+ left, right *Tree[E]
+ payload E
+}
+
+// A simple instantiation of Tree:
+var root1 Tree[int]
+
+// The actual type parameter provided may be a generic type itself:
+var root2 Tree[List[int]]
+
+// A couple of more complex examples.
+// We don't need extra parentheses around the element type of the slices on
+// the right (unlike when we use ()'s rather than []'s for type parameters).
+var _ List[List[int]] = []List[int]{}
+var _ List[List[List[Tree[int]]]] = []List[List[Tree[int]]]{}
+
+// Type parameters act like type aliases when used in generic types
+// in the sense that we can "emulate" a specific type instantiation
+// with type aliases.
+type T1[P any] struct {
+ f P
+}
+
+type T2[P any] struct {
+ f struct {
+ g P
+ }
+}
+
+var x1 T1[struct{ g int }]
+var x2 T2[int]
+
+func _() {
+ // This assignment is invalid because the types of x1, x2 are T1(...)
+ // and T2(...) respectively, which are two different defined types.
+ x1 = x2 // ERROR assignment
+
+ // This assignment is valid because the types of x1.f and x2.f are
+ // both struct { g int }; the type parameters act like type aliases
+ // and their actual names don't come into play here.
+ x1.f = x2.f
+}
+
+// We can verify this behavior using type aliases instead:
+type T1a struct {
+ f A1
+}
+type A1 = struct { g int }
+
+type T2a struct {
+ f struct {
+ g A2
+ }
+}
+type A2 = int
+
+var x1a T1a
+var x2a T2a
+
+func _() {
+ x1a = x2a // ERROR assignment
+ x1a.f = x2a.f
+}
+
+// Another interesting corner case are generic types that don't use
+// their type arguments. For instance:
+type T[P any] struct{}
+
+var xint T[int]
+var xbool T[bool]
+
+// Are these two variables of the same type? After all, their underlying
+// types are identical. We consider them to be different because each type
+// instantiation creates a new named type, in this case T<int> and T<bool>
+// even if their underlying types are identical. This is sensible because
+// we might still have methods that have different signatures or behave
+// differently depending on the type arguments, and thus we can't possibly
+// consider such types identical. Consequently:
+func _() {
+ xint = xbool // ERROR assignment
+}
+
+// Generic types cannot be used without instantiation.
+var _ T // ERROR cannot use generic type T
+var _ = T /* ERROR cannot use generic type T */ (0)
+
+// In type context, generic (parameterized) types cannot be parenthesized before
+// being instantiated. See also NOTES entry from 12/4/2019.
+var _ (T /* ERROR cannot use generic type T */ )[ /* ERROR unexpected \[ */ int]
+
+// All types may be parameterized, including interfaces.
+type I1[T any] interface{
+ m1(T)
+}
+
+// There is no such thing as a variadic generic type.
+type _[T ... /* ERROR invalid use of ... */ any] struct{}
+
+// Generic interfaces may be embedded as one would expect.
+type I2 interface {
+ I1(int) // method!
+ I1[string] // embedded I1
+}
+
+func _() {
+ var x I2
+ x.I1(0)
+ x.m1("foo")
+}
+
+type I0 interface {
+ m0()
+}
+
+type I3 interface {
+ I0
+ I1[bool]
+ m(string)
+}
+
+func _() {
+ var x I3
+ x.m0()
+ x.m1(true)
+ x.m("foo")
+}
+
+type _ struct {
+ ( /* ERROR cannot parenthesize */ int8)
+ ( /* ERROR cannot parenthesize */ *int16)
+ *( /* ERROR cannot parenthesize */ int32)
+ List[int]
+
+ int8 /* ERROR int8 redeclared */
+ * /* ERROR int16 redeclared */ int16
+ List /* ERROR List redeclared */ [int]
+}
+
+// Issue #45639: We don't allow this anymore. Keep this code
+// in case we decide to revisit this decision.
+//
+// It's possible to declare local types whose underlying types
+// are type parameters. As with ordinary type definitions, the
+// types underlying properties are "inherited" but the methods
+// are not.
+// func _[T interface{ m(); ~int }]() {
+// type L T
+// var x L
+//
+// // m is not defined on L (it is not "inherited" from
+// // its underlying type).
+// x.m /* ERROR x.m undefined */ ()
+//
+// // But the properties of T, such that as that it supports
+// // the operations of the types given by its type bound,
+// // are also the properties of L.
+// x++
+// _ = x - x
+//
+// // On the other hand, if we define a local alias for T,
+// // that alias stands for T as expected.
+// type A = T
+// var y A
+// y.m()
+// _ = y < 0
+// }
+
+// For now, a lone type parameter is not permitted as RHS in a type declaration (issue #45639).
+// // It is not permitted to declare a local type whose underlying
+// // type is a type parameter not declared by that type declaration.
+// func _[T any]() {
+// type _ T // ERROR cannot use function type parameter T as RHS in type declaration
+// type _ [_ any] T // ERROR cannot use function type parameter T as RHS in type declaration
+// }
+
+// As a special case, an explicit type argument may be omitted
+// from a type parameter bound if the type bound expects exactly
+// one type argument. In that case, the type argument is the
+// respective type parameter to which the type bound applies.
+// Note: We may not permit this syntactic sugar at first.
+// Note: This is now disabled. All examples below are adjusted.
+type Adder[T any] interface {
+ Add(T) T
+}
+
+// We don't need to explicitly instantiate the Adder bound
+// if we have exactly one type parameter.
+func Sum[T Adder[T]](list []T) T {
+ var sum T
+ for _, x := range list {
+ sum = sum.Add(x)
+ }
+ return sum
+}
+
+// Valid and invalid variations.
+type B0 any
+type B1[_ any] any
+type B2[_, _ any] any
+
+func _[T1 B0]() {}
+func _[T1 B1[T1]]() {}
+func _[T1 B2 /* ERROR cannot use generic type .* without instantiation */ ]() {}
+
+func _[T1, T2 B0]() {}
+func _[T1 B1[T1], T2 B1[T2]]() {}
+func _[T1, T2 B2 /* ERROR cannot use generic type .* without instantiation */ ]() {}
+
+func _[T1 B0, T2 B1[T2]]() {} // here B1 applies to T2
+
+// When the type argument is left away, the type bound is
+// instantiated for each type parameter with that type
+// parameter.
+// Note: We may not permit this syntactic sugar at first.
+func _[A Adder[A], B Adder[B], C Adder[A]]() {
+ var a A // A's type bound is Adder[A]
+ a = a.Add(a)
+ var b B // B's type bound is Adder[B]
+ b = b.Add(b)
+ var c C // C's type bound is Adder[A]
+ a = c.Add(a)
+}
+
+// The type of variables (incl. parameters and return values) cannot
+// be an interface with type constraints or be/embed comparable.
+type I interface {
+ ~int
+}
+
+var (
+ _ interface /* ERROR contains type constraints */ {~int}
+ _ I /* ERROR contains type constraints */
+)
+
+func _(I /* ERROR contains type constraints */ )
+func _(x, y, z I /* ERROR contains type constraints */ )
+func _() I /* ERROR contains type constraints */
+
+func _() {
+ var _ I /* ERROR contains type constraints */
+}
+
+type C interface {
+ comparable
+}
+
+var _ comparable /* ERROR comparable */
+var _ C /* ERROR comparable */
+
+func _(_ comparable /* ERROR comparable */ , _ C /* ERROR comparable */ )
+
+func _() {
+ var _ comparable /* ERROR comparable */
+ var _ C /* ERROR comparable */
+}
+
+// Type parameters are never const types, i.e., it's
+// not possible to declare a constant of type parameter type.
+// (If a type set contains just a single const type, we could
+// allow it, but such type sets don't make much sense in the
+// first place.)
+func _[T interface{~int|~float64}]() {
+ // not valid
+ const _ = T /* ERROR not constant */ (0)
+ const _ T /* ERROR invalid constant type T */ = 1
+
+ // valid
+ var _ = T(0)
+ var _ T = 1
+ _ = T(0)
+}
+
+// It is possible to create composite literals of type parameter
+// type as long as it's possible to create a composite literal
+// of the core type of the type parameter's constraint.
+func _[P interface{ ~[]int }]() P {
+ return P{}
+ return P{1, 2, 3}
+}
+
+func _[P interface{ ~[]E }, E interface{ map[string]P } ]() P {
+ x := P{}
+ return P{{}}
+ return P{E{}}
+ return P{E{"foo": x}}
+ return P{{"foo": x}, {}}
+}
+
+// This is a degenerate case with a singleton type set, but we can create
+// composite literals even if the core type is a defined type.
+type MyInts []int
+
+func _[P MyInts]() P {
+ return P{}
+}
diff --git a/src/cmd/compile/internal/types2/testdata/examples/typesets.go2 b/src/cmd/compile/internal/types2/testdata/examples/typesets.go2
new file mode 100644
index 0000000..55ef022
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/examples/typesets.go2
@@ -0,0 +1,60 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file shows some examples of constraint literals with elided interfaces.
+// These examples are permitted if proposal issue #48424 is accepted.
+
+package p
+
+// Constraint type sets of the form T, ~T, or A|B may omit the interface.
+type (
+ _[T int] struct{}
+ _[T ~int] struct{}
+ _[T int|string] struct{}
+ _[T ~int|~string] struct{}
+)
+
+func min[T int|string](x, y T) T {
+ if x < y {
+ return x
+ }
+ return y
+}
+
+func lookup[M ~map[K]V, K comparable, V any](m M, k K) V {
+ return m[k]
+}
+
+func deref[P ~*E, E any](p P) E {
+ return *p
+}
+
+func _() int {
+ p := new(int)
+ return deref(p)
+}
+
+func addrOfCopy[V any, P *V](v V) P {
+ return &v
+}
+
+func _() *int {
+ return addrOfCopy(0)
+}
+
+// A type parameter may not be embedded in an interface;
+// so it can also not be used as a constraint.
+func _[A any, B A /* ERROR cannot use a type parameter as constraint */ ]() {}
+func _[A any, B, C A /* ERROR cannot use a type parameter as constraint */ ]() {}
+
+
+// Error messages refer to the type constraint as it appears in the source.
+// (No implicit interface should be exposed.)
+func _[T string](x T) T {
+ return x /* ERROR constrained by string */ * x
+}
+
+func _[T int|string](x T) T {
+ return x /* ERROR constrained by int|string */ * x
+}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue20583.src b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue20583.src
new file mode 100644
index 0000000..85f11ec
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue20583.src
@@ -0,0 +1,12 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package issue20583
+const (
+ _ = 6e886451608 /* ERROR malformed constant */ /2
+ _ = 6e886451608i /* ERROR malformed constant */ /2
+ _ = 0 * 1e+1000000000 // ERROR malformed constant
+ x = 1e100000000
+ _ = x*x*x*x*x*x* /* ERROR not representable */ x
+)
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue23203a.src b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue23203a.src
new file mode 100644
index 0000000..48cb588
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue23203a.src
@@ -0,0 +1,14 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import "unsafe"
+
+type T struct{}
+
+func (T) m1() {}
+func (T) m2([unsafe.Sizeof(T.m1)]int) {}
+
+func main() {}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue23203b.src b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue23203b.src
new file mode 100644
index 0000000..638ec6c
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue23203b.src
@@ -0,0 +1,14 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import "unsafe"
+
+type T struct{}
+
+func (T) m2([unsafe.Sizeof(T.m1)]int) {}
+func (T) m1() {}
+
+func main() {}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue25838.go b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue25838.go
new file mode 100644
index 0000000..adbd138
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue25838.go
@@ -0,0 +1,26 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+// examples from the issue
+
+type (
+ e = f
+ f = g
+ g = []h
+ h i
+ i = j
+ j = e
+)
+
+type (
+ e1 = []h1
+ h1 e1
+)
+
+type (
+ P = *T
+ T P
+)
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue26390.src b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue26390.src
new file mode 100644
index 0000000..b8e67e9
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue26390.src
@@ -0,0 +1,11 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package issue26390
+
+type A = T
+
+func (t *T) m() *A { return t }
+
+type T struct{}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue28251.src b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue28251.src
new file mode 100644
index 0000000..ef5e61d
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue28251.src
@@ -0,0 +1,65 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file contains test cases for various forms of
+// method receiver declarations, per the spec clarification
+// https://golang.org/cl/142757.
+
+package issue28251
+
+// test case from issue28251
+type T struct{}
+
+type T0 = *T
+
+func (T0) m() {}
+
+func _() { (&T{}).m() }
+
+// various alternative forms
+type (
+ T1 = (((T)))
+)
+
+func ((*(T1))) m1() {}
+func _() { (T{}).m2() }
+func _() { (&T{}).m2() }
+
+type (
+ T2 = (((T3)))
+ T3 = T
+)
+
+func (T2) m2() {}
+func _() { (T{}).m2() }
+func _() { (&T{}).m2() }
+
+type (
+ T4 = ((*(T5)))
+ T5 = T
+)
+
+func (T4) m4() {}
+func _() { (T{}).m4 /* ERROR "cannot call pointer method m4 on T" */ () }
+func _() { (&T{}).m4() }
+
+type (
+ T6 = (((T7)))
+ T7 = (*(T8))
+ T8 = T
+)
+
+func (T6) m6() {}
+func _() { (T{}).m6 /* ERROR "cannot call pointer method m6 on T" */ () }
+func _() { (&T{}).m6() }
+
+type (
+ T9 = *T10
+ T10 = *T11
+ T11 = T
+)
+
+func (T9 /* ERROR invalid receiver type \*\*T */ ) m9() {}
+func _() { (T{}).m9 /* ERROR has no field or method m9 */ () }
+func _() { (&T{}).m9 /* ERROR has no field or method m9 */ () }
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39634.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39634.go2
new file mode 100644
index 0000000..b408dd7
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39634.go2
@@ -0,0 +1,93 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Examples adjusted to match new [T any] syntax for type parameters.
+// Also, previously permitted empty type parameter lists and instantiations
+// are now syntax errors.
+
+package p
+
+// crash 1
+type nt1[_ any]interface{g /* ERROR undeclared name */ }
+type ph1[e nt1[e],g(d /* ERROR undeclared name */ )]s /* ERROR undeclared name */
+func(*ph1[e,e /* ERROR redeclared */ ])h(d /* ERROR undeclared name */ )
+
+// crash 2
+// Disabled: empty []'s are now syntax errors. This example leads to too many follow-on errors.
+// type Numeric2 interface{t2 /* ERROR not a type */ }
+// func t2[T Numeric2](s[]T){0 /* ERROR not a type */ []{s /* ERROR cannot index */ [0][0]}}
+
+// crash 3
+type t3 *interface{ t3.p /* ERROR no field or method p */ }
+
+// crash 4
+type Numeric4 interface{t4 /* ERROR not a type */ }
+func t4[T Numeric4](s[]T){if( /* ERROR non-boolean */ 0){*s /* ERROR cannot indirect */ [0]}}
+
+// crash 7
+type foo7 interface { bar() }
+type x7[A any] struct{ foo7 }
+func main7() { var _ foo7 = x7[int]{} }
+
+// crash 8
+// Embedding stand-alone type parameters is not permitted for now. Disabled.
+// type foo8[A any] interface { ~A }
+// func bar8[A foo8[A]](a A) {}
+// func main8() {}
+
+// crash 9
+type foo9[A any] interface { foo9 /* ERROR illegal cycle */ [A] }
+func _() { var _ = new(foo9[int]) }
+
+// crash 12
+var u /* ERROR cycle */ , i [func /* ERROR used as value */ /* ERROR used as value */ (u, c /* ERROR undeclared */ /* ERROR undeclared */ ) {}(0, len /* ERROR must be called */ /* ERROR must be called */ )]c /* ERROR undeclared */ /* ERROR undeclared */
+
+// crash 15
+func y15() { var a /* ERROR declared but not used */ interface{ p() } = G15[string]{} }
+type G15[X any] s /* ERROR undeclared name */
+func (G15 /* ERROR generic type .* without instantiation */ ) p()
+
+// crash 16
+type Foo16[T any] r16 /* ERROR not a type */
+func r16[T any]() Foo16[Foo16[T]] { panic(0) }
+
+// crash 17
+type Y17 interface{ c() }
+type Z17 interface {
+ c() Y17
+ Y17 /* ERROR duplicate method */
+}
+func F17[T Z17](T) {}
+
+// crash 18
+type o18[T any] []func(_ o18[[]_ /* ERROR cannot use _ */ ])
+
+// crash 19
+type Z19 [][[]Z19{}[0][0]]c19 /* ERROR undeclared */
+
+// crash 20
+type Z20 /* ERROR illegal cycle */ interface{ Z20 }
+func F20[t Z20]() { F20(t /* ERROR invalid composite literal type */ {}) }
+
+// crash 21
+type Z21 /* ERROR illegal cycle */ interface{ Z21 }
+func F21[T Z21]() { ( /* ERROR not used */ F21[Z21]) }
+
+// For now, a lone type parameter is not permitted as RHS in a type declaration (issue #45639).
+// // crash 24
+// type T24[P any] P
+// func (r T24[P]) m() { T24 /* ERROR without instantiation */ .m() }
+
+// crash 25
+type T25[A any] int
+func (t T25[A]) m1() {}
+var x T25 /* ERROR without instantiation */ .m1
+
+// crash 26
+type T26 = interface{ F26[ /* ERROR interface method must have no type parameters */ Z any]() }
+func F26[Z any]() T26 { return F26 /* ERROR without instantiation */ [] /* ERROR operand */ }
+
+// crash 27
+func e27[T any]() interface{ x27 /* ERROR not a type */ } { panic(0) }
+func x27() { e27( /* ERROR cannot infer T */ ) } \ No newline at end of file
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39664.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39664.go2
new file mode 100644
index 0000000..3b3ec56
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39664.go2
@@ -0,0 +1,15 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type T[_ any] struct {}
+
+func (T /* ERROR instantiation */ ) m()
+
+func _() {
+ var x interface { m() }
+ x = T[int]{}
+ _ = x
+}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39680.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39680.go2
new file mode 100644
index 0000000..e56bc35
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39680.go2
@@ -0,0 +1,31 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+// Embedding stand-alone type parameters is not permitted for now. Disabled.
+
+/*
+import "fmt"
+
+// Minimal test case.
+func _[T interface{~T}](x T) T{
+ return x
+}
+
+// Test case from issue.
+type constr[T any] interface {
+ ~T
+}
+
+func Print[T constr[T]](s []T) {
+ for _, v := range s {
+ fmt.Print(v)
+ }
+}
+
+func f() {
+ Print([]string{"Hello, ", "playground\n"})
+}
+*/
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39693.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39693.go2
new file mode 100644
index 0000000..301c13b
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39693.go2
@@ -0,0 +1,23 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type Number1 interface {
+ // embedding non-interface types is permitted
+ int
+ float64
+}
+
+func Add1[T Number1](a, b T) T {
+ return a /* ERROR not defined */ + b
+}
+
+type Number2 interface {
+ int|float64
+}
+
+func Add2[T Number2](a, b T) T {
+ return a + b
+}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39699.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39699.go2
new file mode 100644
index 0000000..72f8399
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39699.go2
@@ -0,0 +1,29 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type T0 interface{
+}
+
+type T1 interface{
+ ~int
+}
+
+type T2 interface{
+ comparable
+}
+
+type T3 interface {
+ T0
+ T1
+ T2
+}
+
+func _() {
+ _ = T0(0)
+ _ = T1 /* ERROR cannot use interface T1 in conversion */ (1)
+ _ = T2 /* ERROR cannot use interface T2 in conversion */ (2)
+ _ = T3 /* ERROR cannot use interface T3 in conversion */ (3)
+}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39711.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39711.go2
new file mode 100644
index 0000000..8f31012
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39711.go2
@@ -0,0 +1,13 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+// Do not report a duplicate type error for this term list.
+// (Check types after interfaces have been completed.)
+type _ interface {
+ // TODO(gri) Once we have full type sets we can enable this again.
+ // Fow now we don't permit interfaces in term lists.
+ // type interface{ Error() string }, interface{ String() string }
+}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39723.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39723.go2
new file mode 100644
index 0000000..0088523
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39723.go2
@@ -0,0 +1,9 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+// A constraint must be an interface; it cannot
+// be a type parameter, for instance.
+func _[A interface{ ~int }, B A /* ERROR cannot use a type parameter as constraint */ ]() {}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39725.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39725.go2
new file mode 100644
index 0000000..62dc45a
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39725.go2
@@ -0,0 +1,16 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func f1[T1, T2 any](T1, T2, struct{a T1; b T2}) {}
+func _() {
+ f1(42, string("foo"), struct /* ERROR does not match inferred type struct\{a int; b string\} */ {a, b int}{})
+}
+
+// simplified test case from issue
+func f2[T any](_ []T, _ func(T)) {}
+func _() {
+ f2([]string{}, func /* ERROR does not match inferred type func\(string\) */ (f []byte) {})
+}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39754.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39754.go2
new file mode 100644
index 0000000..9edd239
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39754.go2
@@ -0,0 +1,21 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type Optional[T any] struct {}
+
+func (_ Optional[T]) Val() (T, bool)
+
+type Box[T any] interface {
+ Val() (T, bool)
+}
+
+func f[V interface{}, A, B Box[V]]() {}
+
+func _() {
+ f[int, Optional[int], Optional[int]]()
+ _ = f[int, Optional[int], Optional /* ERROR does not implement Box */ [string]]
+ _ = f[int, Optional[int], Optional /* ERROR Optional.* does not implement Box.* */ [string]]
+}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39755.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39755.go2
new file mode 100644
index 0000000..257b73a
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39755.go2
@@ -0,0 +1,23 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func _[T interface{~map[string]int}](x T) {
+ _ = x == nil
+}
+
+// simplified test case from issue
+
+type PathParamsConstraint interface {
+ ~map[string]string | ~[]struct{key, value string}
+}
+
+type PathParams[T PathParamsConstraint] struct {
+ t T
+}
+
+func (pp *PathParams[T]) IsNil() bool {
+ return pp.t == nil // this must succeed
+}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39768.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39768.go2
new file mode 100644
index 0000000..696d9d9
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39768.go2
@@ -0,0 +1,21 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+// For now, a lone type parameter is not permitted as RHS in a type declaration (issue #45639).
+// type T[P any] P
+// type A = T // ERROR cannot use generic type
+// var x A[int]
+// var _ A
+//
+// type B = T[int]
+// var y B = x
+// var _ B /* ERROR not a generic type */ [int]
+
+// test case from issue
+
+type Vector[T any] []T
+type VectorAlias = Vector // ERROR cannot use generic type
+var v Vector[int]
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39938.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39938.go2
new file mode 100644
index 0000000..6bc9284
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39938.go2
@@ -0,0 +1,54 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+// All but E2 and E5 provide an "indirection" and break infinite expansion of a type.
+type E0[P any] []P
+type E1[P any] *P
+type E2[P any] struct{ _ P }
+type E3[P any] struct{ _ *P }
+type E5[P any] struct{ _ [10]P }
+
+type T0 struct {
+ _ E0[T0]
+}
+
+type T0_ struct {
+ E0[T0_]
+}
+
+type T1 struct {
+ _ E1[T1]
+}
+
+type T2 /* ERROR illegal cycle */ struct {
+ _ E2[T2]
+}
+
+type T3 struct {
+ _ E3[T3]
+}
+
+type T4 /* ERROR illegal cycle */ [10]E5[T4]
+
+type T5 struct {
+ _ E0[E2[T5]]
+}
+
+type T6 struct {
+ _ E0[E2[E0[E1[E2[[10]T6]]]]]
+}
+
+type T7 struct {
+ _ E0[[10]E2[E0[E2[E2[T7]]]]]
+}
+
+type T8 struct {
+ _ E0[[]E2[E0[E2[E2[T8]]]]]
+}
+
+type T9 /* ERROR illegal cycle */ [10]E2[E5[E2[T9]]]
+
+type T10 [10]E2[E5[E2[func(T10)]]]
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39948.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39948.go2
new file mode 100644
index 0000000..e38e572
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39948.go2
@@ -0,0 +1,9 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type T[P any] interface{
+ P // ERROR cannot embed a type parameter
+}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39976.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39976.go2
new file mode 100644
index 0000000..d703da9
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39976.go2
@@ -0,0 +1,16 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type policy[K, V any] interface{}
+type LRU[K, V any] struct{}
+
+func NewCache[K, V any](p policy[K, V]) {}
+
+func _() {
+ var lru LRU[int, string]
+ NewCache[int, string](&lru)
+ NewCache(& /* ERROR does not match policy\[K, V\] \(cannot infer K and V\) */ lru)
+}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39982.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39982.go2
new file mode 100644
index 0000000..9810b63
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue39982.go2
@@ -0,0 +1,36 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type (
+ T[_ any] struct{}
+ S[_ any] struct {
+ data T[*T[int]]
+ }
+)
+
+func _() {
+ _ = S[int]{
+ data: T[*T[int]]{},
+ }
+}
+
+// full test case from issue
+
+type (
+ Element[TElem any] struct{}
+
+ entry[K comparable] struct{}
+
+ Cache[K comparable] struct {
+ data map[K]*Element[*entry[K]]
+ }
+)
+
+func _() {
+ _ = Cache[int]{
+ data: make(map[int](*Element[*entry[int]])),
+ }
+}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue40038.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue40038.go2
new file mode 100644
index 0000000..0981a33
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue40038.go2
@@ -0,0 +1,15 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type A[T any] int
+
+func (A[T]) m(A[T])
+
+func f[P interface{m(P)}]() {}
+
+func _() {
+ _ = f[A[int]]
+} \ No newline at end of file
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue40056.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue40056.go2
new file mode 100644
index 0000000..a3f3eec
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue40056.go2
@@ -0,0 +1,15 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func _() {
+ NewS( /* ERROR cannot infer T */ ) .M()
+}
+
+type S struct {}
+
+func NewS[T any]() *S { panic(0) }
+
+func (_ *S /* ERROR S is not a generic type */ [T]) M()
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue40057.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue40057.go2
new file mode 100644
index 0000000..fdc8fb1
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue40057.go2
@@ -0,0 +1,17 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func _() {
+ var x interface{}
+ switch t := x.(type) {
+ case S /* ERROR cannot use generic type */ :
+ t.m()
+ }
+}
+
+type S[T any] struct {}
+
+func (_ S[T]) m()
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue40301.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue40301.go2
new file mode 100644
index 0000000..c78f9a1
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue40301.go2
@@ -0,0 +1,12 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+import "unsafe"
+
+func _[T any](x T) {
+ _ = unsafe.Alignof(x)
+ _ = unsafe.Sizeof(x)
+}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue40684.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue40684.go2
new file mode 100644
index 0000000..58d0f69
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue40684.go2
@@ -0,0 +1,15 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type T[_ any] int
+
+func f[_ any]() {}
+func g[_, _ any]() {}
+
+func _() {
+ _ = f[T /* ERROR without instantiation */ ]
+ _ = g[T /* ERROR without instantiation */ , T /* ERROR without instantiation */ ]
+} \ No newline at end of file
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue40789.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue40789.go2
new file mode 100644
index 0000000..9eea4ad
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue40789.go2
@@ -0,0 +1,37 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import "fmt"
+
+func main() {
+ m := map[string]int{
+ "a": 6,
+ "b": 7,
+ }
+ fmt.Println(copyMap[map[string]int, string, int](m))
+}
+
+type Map[K comparable, V any] interface {
+ map[K] V
+}
+
+func copyMap[M Map[K, V], K comparable, V any](m M) M {
+ m1 := make(M)
+ for k, v := range m {
+ m1[k] = v
+ }
+ return m1
+}
+
+// simpler test case from the same issue
+
+type A[X comparable] interface {
+ []X
+}
+
+func f[B A[X], X comparable]() B {
+ return nil
+}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue41124.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue41124.go2
new file mode 100644
index 0000000..4550dd7
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue41124.go2
@@ -0,0 +1,91 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+// Test case from issue.
+
+type Nat /* ERROR cycle */ interface {
+ Zero|Succ
+}
+
+type Zero struct{}
+type Succ struct{
+ Nat // Nat contains type constraints but is invalid, so no error
+}
+
+// Struct tests.
+
+type I1 interface {
+ comparable
+}
+
+type I2 interface {
+ ~int
+}
+
+type I3 interface {
+ I1
+ I2
+}
+
+type _ struct {
+ f I1 // ERROR interface is .* comparable
+}
+
+type _ struct {
+ comparable // ERROR interface is .* comparable
+}
+
+type _ struct{
+ I1 // ERROR interface is .* comparable
+}
+
+type _ struct{
+ I2 // ERROR interface contains type constraints
+}
+
+type _ struct{
+ I3 // ERROR interface contains type constraints
+}
+
+// General composite types.
+
+type (
+ _ [10]I1 // ERROR interface is .* comparable
+ _ [10]I2 // ERROR interface contains type constraints
+
+ _ []I1 // ERROR interface is .* comparable
+ _ []I2 // ERROR interface contains type constraints
+
+ _ *I3 // ERROR interface contains type constraints
+ _ map[I1 /* ERROR interface is .* comparable */ ]I2 // ERROR interface contains type constraints
+ _ chan I3 // ERROR interface contains type constraints
+ _ func(I1 /* ERROR interface is .* comparable */ )
+ _ func() I2 // ERROR interface contains type constraints
+)
+
+// Other cases.
+
+var _ = [...]I3 /* ERROR interface contains type constraints */ {}
+
+func _(x interface{}) {
+ _ = x.(I3 /* ERROR interface contains type constraints */ )
+}
+
+type T1[_ any] struct{}
+type T3[_, _, _ any] struct{}
+var _ T1[I2 /* ERROR interface contains type constraints */ ]
+var _ T3[int, I2 /* ERROR interface contains type constraints */ , float32]
+
+func f1[_ any]() int { panic(0) }
+var _ = f1[I2 /* ERROR interface contains type constraints */ ]()
+func f3[_, _, _ any]() int { panic(0) }
+var _ = f3[int, I2 /* ERROR interface contains type constraints */ , float32]()
+
+func _(x interface{}) {
+ switch x.(type) {
+ case I2 /* ERROR interface contains type constraints */ :
+ }
+}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue42695.src b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue42695.src
new file mode 100644
index 0000000..d0d6200
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue42695.src
@@ -0,0 +1,17 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package issue42695
+
+const _ = 6e5518446744 // ERROR malformed constant
+const _ uint8 = 6e5518446744 // ERROR malformed constant
+
+var _ = 6e5518446744 // ERROR malformed constant
+var _ uint8 = 6e5518446744 // ERROR malformed constant
+
+func f(x int) int {
+ return x + 6e5518446744 // ERROR malformed constant
+}
+
+var _ = f(6e5518446744 /* ERROR malformed constant */ )
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue42758.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue42758.go2
new file mode 100644
index 0000000..dd66e96
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue42758.go2
@@ -0,0 +1,33 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func _[T any](x interface{}){
+ switch x.(type) {
+ case T: // ok to use a type parameter
+ case int:
+ }
+
+ switch x.(type) {
+ case T:
+ case T /* ERROR duplicate case */ :
+ }
+}
+
+type constraint interface {
+ ~int
+}
+
+func _[T constraint](x interface{}){
+ switch x.(type) {
+ case T: // ok to use a type parameter even if type list contains int
+ case int:
+ }
+}
+
+func _(x constraint /* ERROR contains type constraints */ ) {
+ switch x.(type) { // no need to report another error
+ }
+}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue42987.src b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue42987.src
new file mode 100644
index 0000000..8aa3544
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue42987.src
@@ -0,0 +1,8 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Check that there is only one error (no follow-on errors).
+
+package p
+var _ = [ /* ERROR invalid use of .* array */ ...]byte("foo")
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue43056.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue43056.go2
new file mode 100644
index 0000000..35c7ef5
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue43056.go2
@@ -0,0 +1,31 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+// simplified example
+func f[T ~func(T)](a, b T) {}
+
+type F func(F)
+
+func _() {
+ var i F
+ var j func(F)
+
+ f(i, j)
+ // f(j, i) // disabled for now
+}
+
+// example from issue
+func g[T interface{ Equal(T) bool }](a, b T) {}
+
+type I interface{ Equal(I) bool }
+
+func _() {
+ var i I
+ var j interface{ Equal(I) bool }
+
+ g(i, j)
+ // g(j, i) // disabled for now
+}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue43087.src b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue43087.src
new file mode 100644
index 0000000..85d4450
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue43087.src
@@ -0,0 +1,43 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func _() {
+ a, b, b /* ERROR b repeated on left side of := */ := 1, 2, 3
+ _ = a
+ _ = b
+}
+
+func _() {
+ a, _, _ := 1, 2, 3 // multiple _'s ok
+ _ = a
+}
+
+func _() {
+ var b int
+ a, b, b /* ERROR b repeated on left side of := */ := 1, 2, 3
+ _ = a
+ _ = b
+}
+
+func _() {
+ var a []int
+ a /* ERROR non-name .* on left side of := */ [0], b := 1, 2
+ _ = a
+ _ = b
+}
+
+func _() {
+ var a int
+ a, a /* ERROR a repeated on left side of := */ := 1, 2
+ _ = a
+}
+
+func _() {
+ var a, b int
+ a, b := /* ERROR no new variables on left side of := */ 1, 2
+ _ = a
+ _ = b
+}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue43110.src b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue43110.src
new file mode 100644
index 0000000..8d5c983
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue43110.src
@@ -0,0 +1,43 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type P *struct{}
+
+func _() {
+ // want an error even if the switch is empty
+ var a struct{ _ func() }
+ switch a /* ERROR cannot switch on a */ {
+ }
+
+ switch a /* ERROR cannot switch on a */ {
+ case a: // no follow-on error here
+ }
+
+ // this is ok because f can be compared to nil
+ var f func()
+ switch f {
+ }
+
+ switch f {
+ case nil:
+ }
+
+ switch (func())(nil) {
+ case nil:
+ }
+
+ switch (func())(nil) {
+ case f /* ERROR invalid case f in switch on .* \(func can only be compared to nil\) */ :
+ }
+
+ switch nil /* ERROR use of untyped nil in switch expression */ {
+ }
+
+ // this is ok
+ switch P(nil) {
+ case P(nil):
+ }
+}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue43124.src b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue43124.src
new file mode 100644
index 0000000..7e48c22
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue43124.src
@@ -0,0 +1,16 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+var _ = int(0 /* ERROR invalid use of \.\.\. in type conversion */ ...)
+
+// test case from issue
+
+type M []string
+
+var (
+ x = []string{"a", "b"}
+ _ = M(x /* ERROR invalid use of \.\.\. in type conversion */ ...)
+)
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue43125.src b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue43125.src
new file mode 100644
index 0000000..c2bd970
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue43125.src
@@ -0,0 +1,8 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+var _ = new(- /* ERROR not a type */ 1)
+var _ = new(1 /* ERROR not a type */ + 1)
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue43190.src b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue43190.src
new file mode 100644
index 0000000..ae42719
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue43190.src
@@ -0,0 +1,19 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+import ; // ERROR missing import path
+import
+var /* ERROR missing import path */ _ int
+import .; // ERROR missing import path
+
+import ()
+import (.) // ERROR missing import path
+import (
+ "fmt"
+ .
+) // ERROR missing import path
+
+var _ = fmt.Println // avoid imported but not used error
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue43527.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue43527.go2
new file mode 100644
index 0000000..2955c26
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue43527.go2
@@ -0,0 +1,16 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+const L = 10
+
+type (
+ _ [L]struct{}
+ _ [A /* ERROR undeclared name A for array length */ ]struct{}
+ _ [B /* ERROR invalid array length B */ ]struct{}
+ _[A any] struct{}
+
+ B int
+)
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue43671.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue43671.go2
new file mode 100644
index 0000000..3c78f85
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue43671.go2
@@ -0,0 +1,58 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type C0 interface{ int }
+type C1 interface{ chan int }
+type C2 interface{ chan int | <-chan int }
+type C3 interface{ chan int | chan float32 }
+type C4 interface{ chan int | chan<- int }
+type C5[T any] interface{ ~chan T | <-chan T }
+
+func _[T any](ch T) {
+ <-ch // ERROR cannot receive from ch .* no core type
+}
+
+func _[T C0](ch T) {
+ <-ch // ERROR cannot receive from non-channel ch
+}
+
+func _[T C1](ch T) {
+ <-ch
+}
+
+func _[T C2](ch T) {
+ <-ch
+}
+
+func _[T C3](ch T) {
+ <-ch // ERROR cannot receive from ch .* no core type
+}
+
+func _[T C4](ch T) {
+ <-ch // ERROR cannot receive from send-only channel
+}
+
+func _[T C5[X], X any](ch T, x X) {
+ x = <-ch
+}
+
+// test case from issue, slightly modified
+type RecvChan[T any] interface {
+ ~chan T | ~<-chan T
+}
+
+func _[T any, C RecvChan[T]](ch C) T {
+ return <-ch
+}
+
+func f[T any, C interface{ chan T }](ch C) T {
+ return <-ch
+}
+
+func _(ch chan int) {
+ var x int = f(ch) // test constraint type inference for this case
+ _ = x
+}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue44688.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue44688.go2
new file mode 100644
index 0000000..512bfcc
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue44688.go2
@@ -0,0 +1,83 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package P
+
+type A1[T any] struct{}
+
+func (*A1[T]) m1(T) {}
+
+type A2[T any] interface {
+ m2(T)
+}
+
+type B1[T any] struct {
+ filler int
+ *A1[T]
+ A2[T]
+}
+
+type B2[T any] interface {
+ A2[T]
+}
+
+type C[T any] struct {
+ filler1 int
+ filler2 int
+ B1[T]
+}
+
+type D[T any] struct {
+ filler1 int
+ filler2 int
+ filler3 int
+ C[T]
+}
+
+func _() {
+ // calling embedded methods
+ var b1 B1[string]
+
+ b1.A1.m1("")
+ b1.m1("")
+
+ b1.A2.m2("")
+ b1.m2("")
+
+ var b2 B2[string]
+ b2.m2("")
+
+ // a deeper nesting
+ var d D[string]
+ d.m1("")
+ d.m2("")
+
+ // calling method expressions
+ m1x := B1[string].m1
+ m1x(b1, "")
+ m2x := B2[string].m2
+ m2x(b2, "")
+
+ // calling method values
+ m1v := b1.m1
+ m1v("")
+ m2v := b1.m2
+ m2v("")
+ b2v := b2.m2
+ b2v("")
+}
+
+// actual test case from issue
+
+type A[T any] struct{}
+
+func (*A[T]) f(T) {}
+
+type B[T any] struct{ A[T] }
+
+func _() {
+ var b B[string]
+ b.A.f("")
+ b.f("")
+}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue44799.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue44799.go2
new file mode 100644
index 0000000..9e528a7
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue44799.go2
@@ -0,0 +1,19 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+func Map[F, T any](s []F, f func(F) T) []T { return nil }
+
+func Reduce[Elem1, Elem2 any](s []Elem1, initializer Elem2, f func(Elem2, Elem1) Elem2) Elem2 { var x Elem2; return x }
+
+func main() {
+ var s []int
+ var f1 func(int) float64
+ var f2 func(float64, int) float64
+ _ = Map[int](s, f1)
+ _ = Map(s, f1)
+ _ = Reduce[int](s, 0, f2)
+ _ = Reduce(s, 0, f2)
+}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue45114.go b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue45114.go
new file mode 100644
index 0000000..0093660
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue45114.go
@@ -0,0 +1,8 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+var s uint
+var _ = string(1 /* ERROR shifted operand 1 .* must be integer */ << s)
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue45548.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue45548.go2
new file mode 100644
index 0000000..01c9672
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue45548.go2
@@ -0,0 +1,13 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func f[F interface{*Q}, G interface{*R}, Q, R any](q Q, r R) {}
+
+func _() {
+ f[*float64, *int](1, 2)
+ f[*float64](1, 2)
+ f(1, 2)
+}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue45550.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue45550.go2
new file mode 100644
index 0000000..3eeaca0
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue45550.go2
@@ -0,0 +1,10 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type Builder /* ERROR illegal cycle */ [T interface{ struct{ Builder[T] } }] struct{}
+type myBuilder struct {
+ Builder[myBuilder]
+}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue45635.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue45635.go2
new file mode 100644
index 0000000..2937959
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue45635.go2
@@ -0,0 +1,31 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+func main() {
+ some /* ERROR "undeclared name" */ [int, int]()
+}
+
+type N[T any] struct{}
+
+var _ N[] /* ERROR expecting type */
+
+type I interface {
+ ~[]int
+}
+
+func _[T I](i, j int) {
+ var m map[int]int
+ _ = m[i, j /* ERROR more than one index */ ]
+
+ var a [3]int
+ _ = a[i, j /* ERROR more than one index */ ]
+
+ var s []int
+ _ = s[i, j /* ERROR more than one index */ ]
+
+ var t T
+ _ = t[i, j /* ERROR more than one index */ ]
+}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue45639.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue45639.go2
new file mode 100644
index 0000000..80148fe
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue45639.go2
@@ -0,0 +1,13 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package P
+
+// For now, a lone type parameter is not permitted as RHS in a type declaration (issue #45639).
+// // It is not permitted to declare a local type whose underlying
+// // type is a type parameters not declared by that type declaration.
+// func _[T any]() {
+// type _ T // ERROR cannot use function type parameter T as RHS in type declaration
+// type _ [_ any] T // ERROR cannot use function type parameter T as RHS in type declaration
+// }
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue45920.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue45920.go2
new file mode 100644
index 0000000..b113e10
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue45920.go2
@@ -0,0 +1,17 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func f1[T any, C chan T | <-chan T](ch C) {}
+
+func _(ch chan int) { f1(ch) }
+func _(ch <-chan int) { f1(ch) }
+func _(ch chan<- int) { f1( /* ERROR chan<- int does not implement chan int\|<-chan int */ ch) }
+
+func f2[T any, C chan T | chan<- T](ch C) {}
+
+func _(ch chan int) { f2(ch) }
+func _(ch <-chan int) { f2( /* ERROR <-chan int does not implement chan int\|chan<- int */ ch) }
+func _(ch chan<- int) { f2(ch) }
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue45985.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue45985.go2
new file mode 100644
index 0000000..cea8c14
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue45985.go2
@@ -0,0 +1,13 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package issue45985
+
+func app[S interface{ ~[]T }, T any](s S, e T) S {
+ return append(s, e)
+}
+
+func _() {
+ _ = app[/* ERROR "S does not match" */int]
+}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue46090.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue46090.go2
new file mode 100644
index 0000000..81b3197
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue46090.go2
@@ -0,0 +1,9 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// The predeclared type comparable is not visible before Go 1.18.
+
+package go1_17
+
+type _ comparable // ERROR undeclared
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue46275.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue46275.go2
new file mode 100644
index 0000000..f41ae26
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue46275.go2
@@ -0,0 +1,26 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package issue46275
+
+type N[T any] struct {
+ *N[T]
+ t T
+}
+
+func (n *N[T]) Elem() T {
+ return n.t
+}
+
+type I interface {
+ Elem() string
+}
+
+func _() {
+ var n1 *N[string]
+ var _ I = n1
+ type NS N[string]
+ var n2 *NS
+ var _ I = n2
+}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue46461.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue46461.go2
new file mode 100644
index 0000000..4432402
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue46461.go2
@@ -0,0 +1,20 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+// test case 1
+type T /* ERROR illegal cycle */ [U interface{ M() T[U] }] int
+
+type X int
+
+func (X) M() T[X] { return 0 }
+
+// test case 2
+type A /* ERROR illegal cycle */ [T interface{ A[T] }] interface{}
+
+// test case 3
+type A2 /* ERROR illegal cycle */ [U interface{ A2[U] }] interface{ M() A2[U] }
+
+type I interface{ A2[I]; M() A2[I] }
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue46583.src b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue46583.src
new file mode 100644
index 0000000..da1f1ff
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue46583.src
@@ -0,0 +1,28 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type T1 struct{}
+func (t T1) m(int) {}
+var f1 func(T1)
+
+type T2 struct{}
+func (t T2) m(x int) {}
+var f2 func(T2)
+
+type T3 struct{}
+func (T3) m(int) {}
+var f3 func(T3)
+
+type T4 struct{}
+func (T4) m(x int) {}
+var f4 func(T4)
+
+func _() {
+ f1 = T1 /* ERROR func\(T1, int\) */ .m
+ f2 = T2 /* ERROR func\(t T2, x int\) */ .m
+ f3 = T3 /* ERROR func\(T3, int\) */ .m
+ f4 = T4 /* ERROR func\(_ T4, x int\) */ .m
+}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue47031.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue47031.go2
new file mode 100644
index 0000000..b184f9b
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue47031.go2
@@ -0,0 +1,20 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type Mer interface { M() }
+
+func F[T Mer](p *T) {
+ p.M /* ERROR p\.M undefined */ ()
+}
+
+type MyMer int
+
+func (MyMer) M() {}
+
+func _() {
+ F(new(MyMer))
+ F[Mer](nil)
+}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue47115.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue47115.go2
new file mode 100644
index 0000000..5c1fa80
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue47115.go2
@@ -0,0 +1,40 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type C0 interface{ int }
+type C1 interface{ chan int }
+type C2 interface{ chan int | <-chan int }
+type C3 interface{ chan int | chan float32 }
+type C4 interface{ chan int | chan<- int }
+type C5[T any] interface{ ~chan T | chan<- T }
+
+func _[T any](ch T) {
+ ch /* ERROR cannot send to ch .* no core type */ <- 0
+}
+
+func _[T C0](ch T) {
+ ch /* ERROR cannot send to non-channel */ <- 0
+}
+
+func _[T C1](ch T) {
+ ch <- 0
+}
+
+func _[T C2](ch T) {
+ ch /* ERROR cannot send to receive-only channel */ <- 0
+}
+
+func _[T C3](ch T) {
+ ch /* ERROR cannot send to ch .* no core type */ <- 0
+}
+
+func _[T C4](ch T) {
+ ch <- 0
+}
+
+func _[T C5[X], X any](ch T, x X) {
+ ch <- x
+}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue47127.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue47127.go2
new file mode 100644
index 0000000..108d600
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue47127.go2
@@ -0,0 +1,37 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Embedding of stand-alone type parameters is not permitted.
+
+package p
+
+type (
+ _[P any] interface{ *P | []P | chan P | map[string]P }
+ _[P any] interface{ P /* ERROR "cannot embed a type parameter" */ }
+ _[P any] interface{ ~P /* ERROR "cannot embed a type parameter" */ }
+ _[P any] interface{ int | P /* ERROR "cannot embed a type parameter" */ }
+ _[P any] interface{ int | ~P /* ERROR "cannot embed a type parameter" */ }
+)
+
+func _[P any]() {
+ type (
+ _[P any] interface{ *P | []P | chan P | map[string]P }
+ _[P any] interface{ P /* ERROR "cannot embed a type parameter" */ }
+ _[P any] interface{ ~P /* ERROR "cannot embed a type parameter" */ }
+ _[P any] interface{ int | P /* ERROR "cannot embed a type parameter" */ }
+ _[P any] interface{ int | ~P /* ERROR "cannot embed a type parameter" */ }
+
+ _ interface{ *P | []P | chan P | map[string]P }
+ _ interface{ P /* ERROR "cannot embed a type parameter" */ }
+ _ interface{ ~P /* ERROR "cannot embed a type parameter" */ }
+ _ interface{ int | P /* ERROR "cannot embed a type parameter" */ }
+ _ interface{ int | ~P /* ERROR "cannot embed a type parameter" */ }
+ )
+}
+
+func _[P any, Q interface{ *P | []P | chan P | map[string]P }]() {}
+func _[P any, Q interface{ P /* ERROR "cannot embed a type parameter" */ }]() {}
+func _[P any, Q interface{ ~P /* ERROR "cannot embed a type parameter" */ }]() {}
+func _[P any, Q interface{ int | P /* ERROR "cannot embed a type parameter" */ }]() {}
+func _[P any, Q interface{ int | ~P /* ERROR "cannot embed a type parameter" */ }]() {}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue47411.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue47411.go2
new file mode 100644
index 0000000..3f405ba
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue47411.go2
@@ -0,0 +1,26 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func f[_ comparable]() {}
+func g[_ interface{interface{comparable; ~int|~string}}]() {}
+
+func _[P comparable,
+ Q interface{ comparable; ~int|~string },
+ R any, // not comparable
+ S interface{ comparable; ~func() }, // not comparable
+]() {
+ _ = f[int]
+ _ = f[P]
+ _ = f[Q]
+ _ = f[func( /* ERROR does not implement comparable */ )]
+ _ = f[R /* ERROR R does not implement comparable */ ]
+
+ _ = g[int]
+ _ = g[P /* ERROR P does not implement interface{interface{comparable; ~int\|~string} */ ]
+ _ = g[Q]
+ _ = g[func( /* ERROR func\(\) does not implement interface{interface{comparable; ~int\|~string}} */ )]
+ _ = g[R /* ERROR R does not implement interface{interface{comparable; ~int\|~string} */ ]
+}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue47747.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue47747.go2
new file mode 100644
index 0000000..6f09fc2
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue47747.go2
@@ -0,0 +1,71 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+// For now, a lone type parameter is not permitted as RHS in a type declaration (issue #45639).
+// type T1[P any] P
+//
+// func (T1[_]) m() {}
+//
+// func _[P any](x *T1[P]) {
+// // x.m exists because x is of type *T1 where T1 is a defined type
+// // (even though under(T1) is a type parameter)
+// x.m()
+// }
+
+
+func _[P interface{ m() }](x P) {
+ x.m()
+ // (&x).m doesn't exist because &x is of type *P
+ // and pointers to type parameters don't have methods
+ (&x).m /* ERROR type \*P is pointer to type parameter, not type parameter */ ()
+}
+
+
+type T2 interface{ m() }
+
+func _(x *T2) {
+ // x.m doesn't exists because x is of type *T2
+ // and pointers to interfaces don't have methods
+ x.m /* ERROR type \*T2 is pointer to interface, not interface */()
+}
+
+// Test case 1 from issue
+
+type Fooer1[t any] interface {
+ Foo(Barer[t])
+}
+type Barer[t any] interface {
+ Bar(t)
+}
+
+// For now, a lone type parameter is not permitted as RHS in a type declaration (issue #45639).
+// type Foo1[t any] t
+// type Bar[t any] t
+//
+// func (l Foo1[t]) Foo(v Barer[t]) { v.Bar(t(l)) }
+// func (b *Bar[t]) Bar(l t) { *b = Bar[t](l) }
+//
+// func _[t any](f Fooer1[t]) t {
+// var b Bar[t]
+// f.Foo(&b)
+// return t(b)
+// }
+
+// Test case 2 from issue
+
+// For now, a lone type parameter is not permitted as RHS in a type declaration (issue #45639).
+// type Fooer2[t any] interface {
+// Foo()
+// }
+//
+// type Foo2[t any] t
+//
+// func (f *Foo2[t]) Foo() {}
+//
+// func _[t any](v t) {
+// var f = Foo2[t](v)
+// _ = Fooer2[t](&f)
+// }
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue47796.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue47796.go2
new file mode 100644
index 0000000..6667ba4
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue47796.go2
@@ -0,0 +1,33 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+// parameterized types with self-recursive constraints
+type (
+ T1 /* ERROR illegal cycle */ [P T1[P]] interface{}
+ T2 /* ERROR illegal cycle */ [P, Q T2[P, Q]] interface{}
+ T3[P T2[P, Q], Q interface{ ~string }] interface{}
+
+ T4a /* ERROR illegal cycle */ [P T4a[P]] interface{ ~int }
+ T4b /* ERROR illegal cycle */ [P T4b[int]] interface{ ~int }
+ T4c /* ERROR illegal cycle */ [P T4c[string]] interface{ ~int }
+
+ // mutually recursive constraints
+ T5 /* ERROR illegal cycle */ [P T6[P]] interface{ int }
+ T6[P T5[P]] interface{ int }
+)
+
+// verify that constraints are checked as expected
+var (
+ _ T1[int]
+ _ T2[int, string]
+ _ T3[int, string]
+)
+
+// test case from issue
+
+type Eq /* ERROR illegal cycle */ [a Eq[a]] interface {
+ Equal(that a) bool
+}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue47818.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue47818.go2
new file mode 100644
index 0000000..6069f1f
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue47818.go2
@@ -0,0 +1,57 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Parser accepts type parameters but the type checker
+// needs to report any operations that are not permitted
+// before Go 1.18.
+
+package go1_17
+
+type T[P /* ERROR type parameter requires go1\.18 or later */ any /* ERROR undeclared name: any \(requires version go1\.18 or later\) */ ] struct{}
+
+// for init (and main, but we're not in package main) we should only get one error
+func init[P /* ERROR func init must have no type parameters */ any /* ERROR undeclared name: any \(requires version go1\.18 or later\) */ ]() {}
+func main[P /* ERROR type parameter requires go1\.18 or later */ any /* ERROR undeclared name: any \(requires version go1\.18 or later\) */ ]() {}
+
+func f[P /* ERROR type parameter requires go1\.18 or later */ any /* ERROR undeclared name: any \(requires version go1\.18 or later\) */ ](x P) {
+ var _ T[ /* ERROR type instantiation requires go1\.18 or later */ int]
+ var _ (T[ /* ERROR type instantiation requires go1\.18 or later */ int])
+ _ = T[ /* ERROR type instantiation requires go1\.18 or later */ int]{}
+ _ = T[ /* ERROR type instantiation requires go1\.18 or later */ int](struct{}{})
+}
+
+func (T[ /* ERROR type instantiation requires go1\.18 or later */ P]) g(x int) {
+ f[ /* ERROR function instantiation requires go1\.18 or later */ int](0) // explicit instantiation
+ (f[ /* ERROR function instantiation requires go1\.18 or later */ int])(0) // parentheses (different code path)
+ f( /* ERROR implicit function instantiation requires go1\.18 or later */ x) // implicit instantiation
+}
+
+type C1 interface {
+ comparable // ERROR undeclared name: comparable \(requires version go1\.18 or later\)
+}
+
+type C2 interface {
+ comparable // ERROR undeclared name: comparable \(requires version go1\.18 or later\)
+ int // ERROR embedding non-interface type int requires go1\.18 or later
+ ~ /* ERROR embedding interface element ~int requires go1\.18 or later */ int
+ int /* ERROR embedding interface element int\|~string requires go1\.18 or later */ | ~string
+}
+
+type _ interface {
+ // errors for these were reported with their declaration
+ C1
+ C2
+}
+
+type (
+ _ comparable // ERROR undeclared name: comparable \(requires version go1\.18 or later\)
+ // errors for these were reported with their declaration
+ _ C1
+ _ C2
+
+ _ = comparable // ERROR undeclared name: comparable \(requires version go1\.18 or later\)
+ // errors for these were reported with their declaration
+ _ = C1
+ _ = C2
+)
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue47887.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue47887.go2
new file mode 100644
index 0000000..4c4fc2f
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue47887.go2
@@ -0,0 +1,28 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type Fooer[t any] interface {
+ foo(Barer[t])
+}
+type Barer[t any] interface {
+ bar(Bazer[t])
+}
+type Bazer[t any] interface {
+ Fooer[t]
+ baz(t)
+}
+
+type Int int
+
+func (n Int) baz(int) {}
+func (n Int) foo(b Barer[int]) { b.bar(n) }
+
+type F[t any] interface { f(G[t]) }
+type G[t any] interface { g(H[t]) }
+type H[t any] interface { F[t] }
+
+type T struct{}
+func (n T) f(b G[T]) { b.g(n) }
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue47968.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue47968.go2
new file mode 100644
index 0000000..711e50a
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue47968.go2
@@ -0,0 +1,21 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type T[P any] struct{}
+
+func (T[P]) m1()
+
+type A1 = T // ERROR cannot use generic type
+
+func (A1[P]) m2() {}
+
+type A2 = T[int]
+
+func (A2 /* ERROR cannot define methods on instantiated type T\[int\] */) m3() {}
+func (_ /* ERROR cannot define methods on instantiated type T\[int\] */ A2) m4() {}
+
+func (T[int]) m5() {} // int is the type parameter name, not an instantiation
+func (T[* /* ERROR must be an identifier */ int]) m6() {} // syntax error
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue47996.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue47996.go2
new file mode 100644
index 0000000..2c4b661
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue47996.go2
@@ -0,0 +1,8 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+// don't crash
+func T /* ERROR missing */ [P] /* ERROR missing */ m /* ERROR unexpected */ () /* ERROR \) */ { /* ERROR { */ } /* ERROR } */
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue48008.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue48008.go2
new file mode 100644
index 0000000..6c14c78
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue48008.go2
@@ -0,0 +1,60 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type T[P any] struct{}
+
+func _(x interface{}) {
+ switch x.(type) {
+ case nil:
+ case int:
+
+ case T[int]:
+ case []T[int]:
+ case [10]T[int]:
+ case struct{T[int]}:
+ case *T[int]:
+ case func(T[int]):
+ case interface{m(T[int])}:
+ case map[T[int]] string:
+ case chan T[int]:
+
+ case T /* ERROR cannot use generic type T\[P any\] without instantiation */ :
+ case []T /* ERROR cannot use generic type */ :
+ case [10]T /* ERROR cannot use generic type */ :
+ case struct{T /* ERROR cannot use generic type */ }:
+ case *T /* ERROR cannot use generic type */ :
+ case func(T /* ERROR cannot use generic type */ ):
+ case interface{m(T /* ERROR cannot use generic type */ )}:
+ case map[T /* ERROR cannot use generic type */ ] string:
+ case chan T /* ERROR cannot use generic type */ :
+
+ case T /* ERROR cannot use generic type */ , *T /* ERROR cannot use generic type */ :
+ }
+}
+
+// Make sure a parenthesized nil is ok.
+
+func _(x interface{}) {
+ switch x.(type) {
+ case ((nil)), int:
+ }
+}
+
+// Make sure we look for the predeclared nil.
+
+func _(x interface{}) {
+ type nil int
+ switch x.(type) {
+ case nil: // ok - this is the type nil
+ }
+}
+
+func _(x interface{}) {
+ var nil int
+ switch x.(type) {
+ case nil /* ERROR not a type */ : // not ok - this is the variable nil
+ }
+}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue48018.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue48018.go2
new file mode 100644
index 0000000..e6ccc6b
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue48018.go2
@@ -0,0 +1,20 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+type Box[A any] struct {
+ value A
+}
+
+func Nest[A /* ERROR instantiation cycle */ any](b Box[A], n int) interface{} {
+ if n == 0 {
+ return b
+ }
+ return Nest(Box[Box[A]]{b}, n-1)
+}
+
+func main() {
+ Nest(Box[int]{0}, 10)
+}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue48048.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue48048.go2
new file mode 100644
index 0000000..f401330
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue48048.go2
@@ -0,0 +1,15 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type T[P any] struct{}
+
+func (T[_]) A() {}
+
+var _ = (T[int]).A
+var _ = (*T[int]).A
+
+var _ = (T /* ERROR cannot use generic type */).A
+var _ = (*T /* ERROR cannot use generic type */).A
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue48082.src b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue48082.src
new file mode 100644
index 0000000..5395154
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue48082.src
@@ -0,0 +1,7 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package issue48082
+
+import "init" /* ERROR init must be a func */ /* ERROR could not import init */
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue48083.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue48083.go2
new file mode 100644
index 0000000..3dae514
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue48083.go2
@@ -0,0 +1,9 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type T[P any] struct{}
+
+type _ interface{ int | T /* ERROR cannot use generic type */ } \ No newline at end of file
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue48136.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue48136.go2
new file mode 100644
index 0000000..0ab92df
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue48136.go2
@@ -0,0 +1,36 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func f1[P interface{ *P }]() {}
+func f2[P interface{ func(P) }]() {}
+func f3[P, Q interface{ func(Q) P }]() {}
+func f4[P interface{ *Q }, Q interface{ func(P) }]() {}
+func f5[P interface{ func(P) }]() {}
+func f6[P interface { *Tree[P] }, Q any ]() {}
+
+func _() {
+ f1( /* ERROR cannot infer P */ )
+ f2( /* ERROR cannot infer P */ )
+ f3( /* ERROR cannot infer P */ )
+ f4( /* ERROR cannot infer P */ )
+ f5( /* ERROR cannot infer P */ )
+ f6( /* ERROR cannot infer P */ )
+}
+
+type Tree[P any] struct {
+ left, right *Tree[P]
+ data P
+}
+
+// test case from issue
+
+func foo[Src interface { func() Src }]() Src {
+ return foo[Src]
+}
+
+func _() {
+ foo( /* ERROR cannot infer Src */ )
+}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue48234.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue48234.go2
new file mode 100644
index 0000000..e069930
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue48234.go2
@@ -0,0 +1,10 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+var _ = interface{
+ m()
+ m /* ERROR "duplicate method" */ ()
+}(nil)
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue48312.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue48312.go2
new file mode 100644
index 0000000..2fdb7ca
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue48312.go2
@@ -0,0 +1,20 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type T interface{ m() }
+type P *T
+
+func _(p *T) {
+ p.m /* ERROR type \*T is pointer to interface, not interface */ ()
+}
+
+func _(p P) {
+ p.m /* ERROR type P is pointer to interface, not interface */ ()
+}
+
+func _[P T](p *P) {
+ p.m /* ERROR type \*P is pointer to type parameter, not type parameter */ ()
+}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue48472.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue48472.go2
new file mode 100644
index 0000000..2d908f4
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue48472.go2
@@ -0,0 +1,16 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func g() {
+ var s string
+ var i int
+ _ = s /* ERROR invalid operation: s \+ i \(mismatched types string and int\) */ + i
+}
+
+func f(i int) int {
+ i /* ERROR invalid operation: i \+= "1" \(mismatched types int and untyped string\) */ += "1"
+ return i
+}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue48529.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue48529.go2
new file mode 100644
index 0000000..a3653fa
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue48529.go2
@@ -0,0 +1,11 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type T /* ERROR illegal cycle */ [U interface{ M() T[U, int] }] int
+
+type X int
+
+func (X) M() T[X] { return 0 }
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue48582.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue48582.go2
new file mode 100644
index 0000000..c12091b
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue48582.go2
@@ -0,0 +1,29 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type N /* ERROR cycle */ interface {
+ int | N
+}
+
+type A /* ERROR cycle */ interface {
+ int | B
+}
+
+type B interface {
+ int | A
+}
+
+type S /* ERROR cycle */ struct {
+ I // ERROR interface contains type constraints
+}
+
+type I interface {
+ int | S
+}
+
+type P interface {
+ *P // ERROR interface contains type constraints
+}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue48619.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue48619.go2
new file mode 100644
index 0000000..72eea1e
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue48619.go2
@@ -0,0 +1,22 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func f[P any](a, _ P) {
+ var x int
+ // TODO(gri) these error messages, while correct, could be better
+ f(a, x /* ERROR type int of x does not match inferred type P for P */)
+ f(x, a /* ERROR type P of a does not match inferred type int for P */)
+}
+
+func g[P any](a, b P) {
+ g(a, b)
+ g(&a, &b)
+ g([]P{}, []P{})
+
+ // work-around: provide type argument explicitly
+ g[*P](&a, &b)
+ g[[]P]([]P{}, []P{})
+}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue48656.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue48656.go2
new file mode 100644
index 0000000..0f60f47
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue48656.go2
@@ -0,0 +1,13 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func f[P *Q, Q any](P, Q) {
+ _ = f[P]
+}
+
+func f2[P /* ERROR instantiation cycle */ *Q, Q any](P, Q) {
+ _ = f2[*P]
+}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue48695.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue48695.go2
new file mode 100644
index 0000000..9f4a768
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue48695.go2
@@ -0,0 +1,14 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func g[P ~func(T) P, T any](P) {}
+
+func _() {
+ type F func(int) F
+ var f F
+ g(f)
+ _ = g[F]
+}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue48703.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue48703.go2
new file mode 100644
index 0000000..8a32c1e
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue48703.go2
@@ -0,0 +1,27 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+import "unsafe"
+
+// The actual example from the issue.
+type List[P any] struct{}
+
+func (_ List[P]) m() (_ List[List[P]]) { return }
+
+// Other types of recursion through methods.
+type R[P any] int
+
+func (*R[R /* ERROR must be an identifier */ [int]]) m0() {}
+func (R[P]) m1(R[R[P]]) {}
+func (R[P]) m2(R[*P]) {}
+func (R[P]) m3([unsafe.Sizeof(new(R[P]))]int) {}
+func (R[P]) m4([unsafe.Sizeof(new(R[R[P]]))]int) {}
+
+// Mutual recursion
+type M[P any] int
+
+func (R[P]) m5(M[M[P]]) {}
+func (M[P]) m(R[R[P]]) {}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue48712.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue48712.go2
new file mode 100644
index 0000000..ab39756
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue48712.go2
@@ -0,0 +1,41 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func _[P comparable](x, y P) {
+ _ = x == x
+ _ = x == y
+ _ = y == x
+ _ = y == y
+
+ _ = x /* ERROR type parameter P is not comparable with < */ < y
+}
+
+func _[P comparable](x P, y any) {
+ _ = x == x
+ _ = x == y
+ _ = y == x
+ _ = y == y
+
+ _ = x /* ERROR type parameter P is not comparable with < */ < y
+}
+
+func _[P any](x, y P) {
+ _ = x /* ERROR type parameter P is not comparable with == */ == x
+ _ = x /* ERROR type parameter P is not comparable with == */ == y
+ _ = y /* ERROR type parameter P is not comparable with == */ == x
+ _ = y /* ERROR type parameter P is not comparable with == */ == y
+
+ _ = x /* ERROR type parameter P is not comparable with < */ < y
+}
+
+func _[P any](x P, y any) {
+ _ = x /* ERROR type parameter P is not comparable with == */ == x
+ _ = x /* ERROR type parameter P is not comparable with == */ == y
+ _ = y == x // ERROR type parameter P is not comparable with ==
+ _ = y == y
+
+ _ = x /* ERROR type parameter P is not comparable with < */ < y
+}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue48819.src b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue48819.src
new file mode 100644
index 0000000..9262110
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue48819.src
@@ -0,0 +1,15 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+import "unsafe"
+
+type T /* ERROR illegal cycle in declaration of T */ struct {
+ T
+}
+
+func _(t T) {
+ _ = unsafe.Sizeof(t) // should not go into infinite recursion here
+}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue48951.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue48951.go2
new file mode 100644
index 0000000..a936528
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue48951.go2
@@ -0,0 +1,21 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type (
+ A1[P any] [10]A1 /* ERROR illegal cycle */ [P]
+ A2[P any] [10]A2 /* ERROR illegal cycle */ [*P]
+ A3[P any] [10]*A3[P]
+
+ L1[P any] []L1[P]
+
+ S1[P any] struct{ f S1 /* ERROR illegal cycle */ [P] }
+ S2[P any] struct{ f S2 /* ERROR illegal cycle */ [*P] } // like example in issue
+ S3[P any] struct{ f *S3[P] }
+
+ I1[P any] interface{ I1 /* ERROR illegal cycle */ [P] }
+ I2[P any] interface{ I2 /* ERROR illegal cycle */ [*P] }
+ I3[P any] interface{ *I3 /* ERROR interface contains type constraints */ [P] }
+)
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue48962.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue48962.go2
new file mode 100644
index 0000000..4270da1
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue48962.go2
@@ -0,0 +1,13 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type T0[P any] struct {
+ f P
+}
+
+type T1 /* ERROR illegal cycle */ struct {
+ _ T0[T1]
+}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue48974.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue48974.go2
new file mode 100644
index 0000000..d8ff7c8
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue48974.go2
@@ -0,0 +1,22 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type Fooer interface {
+ Foo()
+}
+
+type Fooable[F /* ERROR instantiation cycle */ Fooer] struct {
+ ptr F
+}
+
+func (f *Fooable[F]) Adapter() *Fooable[*FooerImpl[F]] {
+ return &Fooable[*FooerImpl[F]]{&FooerImpl[F]{}}
+}
+
+type FooerImpl[F Fooer] struct {
+}
+
+func (fi *FooerImpl[F]) Foo() {}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue49003.go b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue49003.go
new file mode 100644
index 0000000..ece1a27
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue49003.go
@@ -0,0 +1,10 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func f(s string) int {
+ for range s {
+ }
+} // ERROR missing return
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue49005.go b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue49005.go
new file mode 100644
index 0000000..7083dc9
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue49005.go
@@ -0,0 +1,31 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type T1 interface{ M() }
+
+func F1() T1
+
+var _ = F1().(*X1 /* ERROR undeclared name: X1 */)
+
+func _() {
+ switch F1().(type) {
+ case *X1 /* ERROR undeclared name: X1 */ :
+ }
+}
+
+type T2 interface{ M() }
+
+func F2() T2
+
+var _ = F2 /* ERROR impossible type assertion: F2\(\)\.\(\*X2\)\n\t\*X2 does not implement T2 \(missing method M\) */ ().(*X2)
+
+type X2 struct{}
+
+func _() {
+ switch F2().(type) {
+ case * /* ERROR impossible type switch case: \*X2\n\tF2\(\) \(value of type T2\) cannot have dynamic type \*X2 \(missing method M\) */ X2:
+ }
+}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue49043.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue49043.go2
new file mode 100644
index 0000000..a360457
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue49043.go2
@@ -0,0 +1,24 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+// The example from the issue.
+type (
+ N[P any] M /* ERROR illegal cycle */ [P]
+ M[P any] N /* ERROR illegal cycle */ [P]
+)
+
+// A slightly more complicated case.
+type (
+ A[P any] B /* ERROR illegal cycle */ [P]
+ B[P any] C[P]
+ C[P any] A[P]
+)
+
+// Confusing but valid (note that `type T *T` is valid).
+type (
+ N1[P any] *M1[P]
+ M1[P any] *N1[P]
+)
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue49112.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue49112.go2
new file mode 100644
index 0000000..0efc906
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue49112.go2
@@ -0,0 +1,15 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func f[P int](P) {}
+
+func _() {
+ _ = f[int]
+ _ = f[[ /* ERROR \[\]int does not implement int */ ]int]
+
+ f(0)
+ f( /* ERROR \[\]int does not implement int */ []int{})
+}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue49179.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue49179.go2
new file mode 100644
index 0000000..75bea18
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue49179.go2
@@ -0,0 +1,37 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func f1[P int | string]() {}
+func f2[P ~int | string | float64]() {}
+func f3[P int](x P) {}
+
+type myInt int
+type myFloat float64
+
+func _() {
+ _ = f1[int]
+ _ = f1[myInt /* ERROR possibly missing ~ for int in constraint int\|string */]
+ _ = f2[myInt]
+ _ = f2[myFloat /* ERROR possibly missing ~ for float64 in constraint int\|string|float64 */]
+ var x myInt
+ f3( /* ERROR myInt does not implement int \(possibly missing ~ for int in constraint int\) */ x)
+}
+
+// test case from the issue
+
+type SliceConstraint[T any] interface {
+ []T
+}
+
+func Map[S SliceConstraint[E], E any](s S, f func(E) E) S {
+ return s
+}
+
+type MySlice []int
+
+func f(s MySlice) {
+ Map[MySlice /* ERROR MySlice does not implement SliceConstraint\[int\] \(possibly missing ~ for \[\]int in constraint SliceConstraint\[int\]\) */, int](s, nil)
+}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue49242.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue49242.go2
new file mode 100644
index 0000000..524a0cb
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue49242.go2
@@ -0,0 +1,27 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func _[P int](x P) int {
+ return x // ERROR cannot use x .* as int value in return statement
+}
+
+func _[P int]() int {
+ return P /* ERROR cannot use P\(1\) .* as int value in return statement */ (1)
+}
+
+func _[P int](x int) P {
+ return x // ERROR cannot use x .* as P value in return statement
+}
+
+func _[P, Q any](x P) Q {
+ return x // ERROR cannot use x .* as Q value in return statement
+}
+
+// test case from issue
+func F[G interface{ uint }]() int {
+ f := func(uint) int { return 0 }
+ return f(G /* ERROR cannot use G\(1\) .* as uint value in argument to f */ (1))
+}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue49247.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue49247.go2
new file mode 100644
index 0000000..3f25e0e
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue49247.go2
@@ -0,0 +1,20 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type integer interface {
+ ~int | ~int8 | ~int16 | ~int32 | ~int64 |
+ ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr
+}
+
+func Add1024[T integer](s []T) {
+ for i, v := range s {
+ s[i] = v + 1024 // ERROR cannot convert 1024 \(untyped int constant\) to T
+ }
+}
+
+func f[T interface{ int8 }]() {
+ println(T(1024 /* ERROR cannot convert 1024 \(untyped int value\) to T */))
+}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue49276.go b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue49276.go
new file mode 100644
index 0000000..8839087
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue49276.go
@@ -0,0 +1,46 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+import "unsafe"
+
+type S /* ERROR illegal cycle in declaration of S */ struct {
+ _ [unsafe.Sizeof(s)]byte
+}
+
+var s S
+
+// Since f is a pointer, this case could be valid.
+// But it's pathological and not worth the expense.
+type T struct {
+ f *[unsafe.Sizeof(T /* ERROR illegal cycle in type declaration */ {})]int
+}
+
+// a mutually recursive case using unsafe.Sizeof
+type (
+ A1 struct {
+ _ [unsafe.Sizeof(B1{})]int
+ }
+
+ B1 struct {
+ _ [unsafe.Sizeof(A1 /* ERROR illegal cycle in type declaration */ {})]int
+ }
+)
+
+// a mutually recursive case using len
+type (
+ A2 struct {
+ f [len(B2{}.f)]int
+ }
+
+ B2 struct {
+ f [len(A2 /* ERROR illegal cycle in type declaration */ {}.f)]int
+ }
+)
+
+// test case from issue
+type a struct {
+ _ [42 - unsafe.Sizeof(a /* ERROR illegal cycle in type declaration */ {})]byte
+}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue49296.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue49296.go2
new file mode 100644
index 0000000..eaa8e4d
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue49296.go2
@@ -0,0 +1,20 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func _[
+ T0 any,
+ T1 []int,
+ T2 ~float64 | ~complex128 | chan int,
+]() {
+ _ = T0(nil /* ERROR cannot convert nil to T0 */ )
+ _ = T1(1 /* ERROR cannot convert 1 .* to T1 */ )
+ _ = T2(2 /* ERROR cannot convert 2 .* to T2 */ )
+}
+
+// test case from issue
+func f[T interface{[]int}]() {
+ _ = T(1 /* ERROR cannot convert */ )
+}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue49439.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue49439.go2
new file mode 100644
index 0000000..6cc838b
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue49439.go2
@@ -0,0 +1,26 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+import "unsafe"
+
+type T0 /* ERROR illegal cycle */ [P T0[P]] struct{}
+
+type T1 /* ERROR illegal cycle */ [P T2[P]] struct{}
+type T2[P T1[P]] struct{}
+
+type T3 /* ERROR illegal cycle */ [P interface{ ~struct{ f T3[int] } }] struct{}
+
+// valid cycle in M
+type N[P M[P]] struct{}
+type M[Q any] struct { F *M[Q] }
+
+// "crazy" case
+type TC[P [unsafe.Sizeof(func() {
+ type T [P [unsafe.Sizeof(func(){})]byte] struct{}
+})]byte] struct{}
+
+// test case from issue
+type X /* ERROR illegal cycle */ [T any, PT X[T]] interface{}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue49482.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue49482.go2
new file mode 100644
index 0000000..f289d2e
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue49482.go2
@@ -0,0 +1,25 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file is tested when running "go test -run Manual"
+// without source arguments. Use for one-off debugging.
+
+package p
+
+// The following is OK, per the special handling for type literals discussed in issue #49482.
+type _[P *struct{}] struct{}
+type _[P *int,] int
+type _[P (*int),] int
+
+const P = 2 // declare P to avoid noisy 'undeclared name' errors below.
+
+// The following parse as invalid array types.
+type _[P *int /* ERROR "int \(type\) is not an expression" */ ] int
+type _[P /* ERROR non-function P */ (*int)] int
+
+// The following should be parsed as a generic type, but is instead parsed as an array type.
+type _[P *struct /* ERROR "not an expression" */ {}| int /* ERROR "not an expression" */ ] struct{}
+
+// The following fails to parse, due to the '~'
+type _[P *struct /* ERROR "not an expression" */ {}|~ /* ERROR "unexpected ~" */ int] struct{}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue49541.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue49541.go2
new file mode 100644
index 0000000..c8499c1
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue49541.go2
@@ -0,0 +1,45 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type S[A, B any] struct {
+ f int
+}
+
+func (S[A, B]) m() {}
+
+// TODO(gri): with type-type inference enabled we should only report one error
+// below. See issue #50588.
+
+func _[A any](s S /* ERROR got 1 arguments but 2 type parameters */ [A]) {
+ // we should see no follow-on errors below
+ s.f = 1
+ s.m()
+}
+
+// another test case from the issue
+
+func _() {
+ X(Interface[*F /* ERROR got 1 arguments but 2 type parameters */ [string]](Impl{}))
+}
+
+func X[Q Qer](fs Interface[Q]) {
+}
+
+type Impl struct{}
+
+func (Impl) M() {}
+
+type Interface[Q Qer] interface {
+ M()
+}
+
+type Qer interface {
+ Q()
+}
+
+type F[A, B any] struct{}
+
+func (f *F[A, B]) Q() {}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue49579.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue49579.go2
new file mode 100644
index 0000000..ee2d94a
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue49579.go2
@@ -0,0 +1,17 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type I[F any] interface {
+ Q(*F)
+}
+
+func G[F any]() I[any] {
+ return g /* ERROR cannot use g\[F\]{} .* as I\[any\] value in return statement: g\[F\] does not implement I\[any\] \(method Q has pointer receiver\) */ [F]{}
+}
+
+type g[F any] struct{}
+
+func (*g[F]) Q(*any) {}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue49592.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue49592.go2
new file mode 100644
index 0000000..846deaa
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue49592.go2
@@ -0,0 +1,11 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func _() {
+ var x *interface{}
+ var y interface{}
+ _ = x == y
+}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue49602.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue49602.go2
new file mode 100644
index 0000000..9edbf14
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue49602.go2
@@ -0,0 +1,19 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type M interface {
+ m()
+}
+
+type C interface {
+ comparable
+}
+
+type _ interface{
+ int | M // ERROR cannot use p\.M in union \(p\.M contains methods\)
+ int | comparable // ERROR cannot use comparable in union
+ int | C // ERROR cannot use p\.C in union \(p\.C embeds comparable\)
+}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue49705.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue49705.go2
new file mode 100644
index 0000000..5b5fba2
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue49705.go2
@@ -0,0 +1,14 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type Integer interface {
+ ~int | ~int8 | ~int16 | ~int32 | ~int64 |
+ ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr
+}
+
+func shl[I Integer](n int) I {
+ return 1 << n
+}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue49735.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue49735.go2
new file mode 100644
index 0000000..5087022
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue49735.go2
@@ -0,0 +1,11 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func _[P1 any, P2 ~byte](s1 P1, s2 P2) {
+ _ = append(nil /* ERROR first argument to append must be a slice; have untyped nil */ , 0)
+ _ = append(s1 /* ERROR s1 .* has no core type */ , 0)
+ _ = append(s2 /* ERROR s2 .* has core type byte */ , 0)
+}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue49739.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue49739.go2
new file mode 100644
index 0000000..46b1e71
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue49739.go2
@@ -0,0 +1,23 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Verify that we get an empty type set (not just an error)
+// when using an invalid ~A.
+
+package p
+
+type A int
+type C interface {
+ ~ /* ERROR invalid use of ~ */ A
+}
+
+func f[_ C]() {}
+func g[_ interface{ C }]() {}
+func h[_ C | int]() {}
+
+func _() {
+ _ = f[int /* ERROR cannot implement C \(empty type set\) */]
+ _ = g[int /* ERROR cannot implement interface{C} \(empty type set\) */]
+ _ = h[int]
+}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue49864.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue49864.go2
new file mode 100644
index 0000000..0437e74
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue49864.go2
@@ -0,0 +1,9 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func _[P ~int, Q any](p P) {
+ _ = Q(p /* ERROR cannot convert */ )
+}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue50259.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue50259.go2
new file mode 100644
index 0000000..6df8c64
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue50259.go2
@@ -0,0 +1,18 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+var x T[B]
+
+type T[_ any] struct{}
+type A T[B]
+type B = T[A]
+
+// test case from issue
+
+var v Box[Step]
+type Box[T any] struct{}
+type Step = Box[StepBox]
+type StepBox Box[Step]
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue50276.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue50276.go2
new file mode 100644
index 0000000..97e477e
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue50276.go2
@@ -0,0 +1,39 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+// simplified test case
+
+type transform[T any] struct{}
+type pair[S any] struct {}
+
+var _ transform[step]
+
+type box transform[step]
+type step = pair[box]
+
+// test case from issue
+
+type Transform[T any] struct{ hold T }
+type Pair[S, T any] struct {
+ First S
+ Second T
+}
+
+var first Transform[Step]
+
+// This line doesn't use the Step alias, and it compiles fine if you uncomment it.
+var second Transform[Pair[Box, interface{}]]
+
+type Box *Transform[Step]
+
+// This line is the same as the `first` line, but it comes after the Box declaration and
+// does not break the compile.
+var third Transform[Step]
+
+type Step = Pair[Box, interface{}]
+
+// This line also does not break the compile
+var fourth Transform[Step]
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue50281.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue50281.go2
new file mode 100644
index 0000000..f333e81
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue50281.go2
@@ -0,0 +1,26 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func _[S string | []byte](s S) {
+ var buf []byte
+ _ = append(buf, s...)
+}
+
+func _[S ~string | ~[]byte](s S) {
+ var buf []byte
+ _ = append(buf, s...)
+}
+
+// test case from issue
+
+type byteseq interface {
+ string | []byte
+}
+
+// This should allow to eliminate the two functions above.
+func AppendByteString[source byteseq](buf []byte, s source) []byte {
+ return append(buf, s[1:6]...)
+}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue50321.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue50321.go2
new file mode 100644
index 0000000..199e66e
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue50321.go2
@@ -0,0 +1,8 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func Ln[A A /* ERROR cannot use a type parameter as constraint */ ](p A) {
+}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue50372.go b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue50372.go
new file mode 100644
index 0000000..0f15dc0
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue50372.go
@@ -0,0 +1,27 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func _(s []int) {
+ var i, j, k, l int
+ _, _, _, _ = i, j, k, l
+
+ for range s {}
+ for i = range s {}
+ for i, j = range s {}
+ for i, j, k /* ERROR range clause permits at most two iteration variables */ = range s {}
+ for i, j, k /* ERROR range clause permits at most two iteration variables */, l = range s {}
+}
+
+func _(s chan int) {
+ var i, j, k, l int
+ _, _, _, _ = i, j, k, l
+
+ for range s {}
+ for i = range s {}
+ for i, j /* ERROR range over .* permits only one iteration variable */ = range s {}
+ for i, j /* ERROR range over .* permits only one iteration variable */, k = range s {}
+ for i, j /* ERROR range over .* permits only one iteration variable */, k, l = range s {}
+}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue50417.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue50417.go2
new file mode 100644
index 0000000..2caef1b
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue50417.go2
@@ -0,0 +1,68 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Field accesses through type parameters are disabled
+// until we have a more thorough understanding of the
+// implications on the spec. See issue #51576.
+
+package p
+
+type Sf struct {
+ f int
+}
+
+func f0[P Sf](p P) {
+ _ = p.f // ERROR p\.f undefined
+ p.f /* ERROR p\.f undefined */ = 0
+}
+
+func f0t[P ~struct{f int}](p P) {
+ _ = p.f // ERROR p\.f undefined
+ p.f /* ERROR p\.f undefined */ = 0
+}
+
+var _ = f0[Sf]
+var _ = f0t[Sf]
+
+var _ = f0[Sm /* ERROR does not implement */ ]
+var _ = f0t[Sm /* ERROR does not implement */ ]
+
+func f1[P interface{ Sf; m() }](p P) {
+ _ = p.f // ERROR p\.f undefined
+ p.f /* ERROR p\.f undefined */ = 0
+ p.m()
+}
+
+var _ = f1[Sf /* ERROR missing method m */ ]
+var _ = f1[Sm /* ERROR does not implement */ ]
+
+type Sm struct {}
+
+func (Sm) m() {}
+
+type Sfm struct {
+ f int
+}
+
+func (Sfm) m() {}
+
+func f2[P interface{ Sfm; m() }](p P) {
+ _ = p.f // ERROR p\.f undefined
+ p.f /* ERROR p\.f undefined */ = 0
+ p.m()
+}
+
+var _ = f2[Sfm]
+
+// special case: core type is a named pointer type
+
+type PSfm *Sfm
+
+func f3[P interface{ PSfm }](p P) {
+ _ = p.f // ERROR p\.f undefined
+ p.f /* ERROR p\.f undefined */ = 0
+ p.m /* ERROR type P has no field or method m */ ()
+}
+
+var _ = f3[PSfm]
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue50426.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue50426.go2
new file mode 100644
index 0000000..17ec0ce
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue50426.go2
@@ -0,0 +1,44 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type A1 [2]uint64
+type A2 [2]uint64
+
+func (a A1) m() A1 { return a }
+func (a A2) m() A2 { return a }
+
+func f[B any, T interface {
+ A1 | A2
+ m() T
+}](v T) {
+}
+
+func _() {
+ var v A2
+ // Use function type inference to infer type A2 for T.
+ // Don't use constraint type inference before function
+ // type inference for typed arguments, otherwise it would
+ // infer type [2]uint64 for T which doesn't have method m
+ // (was the bug).
+ f[int](v)
+}
+
+// Keep using constraint type inference before function type
+// inference for untyped arguments so we infer type float64
+// for E below, and not int (which would not work).
+func g[S ~[]E, E any](S, E) {}
+
+func _() {
+ var s []float64
+ g[[]float64](s, 0)
+}
+
+// Keep using constraint type inference after function
+// type inference for untyped arguments so we infer
+// missing type arguments for which we only have the
+// untyped arguments as starting point.
+func h[E any, R []E](v E) R { return R{v} }
+func _() []int { return h(0) }
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue50450.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue50450.go2
new file mode 100644
index 0000000..bae3111
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue50450.go2
@@ -0,0 +1,11 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type S struct{}
+
+func f[P S]() {}
+
+var _ = f[S]
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue50516.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue50516.go2
new file mode 100644
index 0000000..f73015e
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue50516.go2
@@ -0,0 +1,13 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func _[P struct{ f int }](x P) {
+ _ = x.g // ERROR type P has no field or method g
+}
+
+func _[P struct{ f int } | struct{ g int }](x P) {
+ _ = x.g // ERROR type P has no field or method g
+}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue50646.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue50646.go2
new file mode 100644
index 0000000..3bdba11
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue50646.go2
@@ -0,0 +1,26 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func f1[_ comparable]() {}
+func f2[_ interface{ comparable }]() {}
+
+type T interface{ m() }
+
+func _[P comparable, Q ~int, R any]() {
+ _ = f1[int]
+ _ = f1[T /* ERROR T does not implement comparable */ ]
+ _ = f1[any /* ERROR any does not implement comparable */ ]
+ _ = f1[P]
+ _ = f1[Q]
+ _ = f1[R /* ERROR R does not implement comparable */]
+
+ _ = f2[int]
+ _ = f2[T /* ERROR T does not implement comparable */ ]
+ _ = f2[any /* ERROR any does not implement comparable */ ]
+ _ = f2[P]
+ _ = f2[Q]
+ _ = f2[R /* ERROR R does not implement comparable */]
+}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue50755.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue50755.go2
new file mode 100644
index 0000000..afc7b24
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue50755.go2
@@ -0,0 +1,47 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+// The core type of M2 unifies with the type of m1
+// during function argument type inference.
+// M2's constraint is unnamed.
+func f1[K1 comparable, E1 any](m1 map[K1]E1) {}
+
+func f2[M2 map[string]int](m2 M2) {
+ f1(m2)
+}
+
+// The core type of M3 unifies with the type of m1
+// during function argument type inference.
+// M3's constraint is named.
+type Map3 map[string]int
+
+func f3[M3 Map3](m3 M3) {
+ f1(m3)
+}
+
+// The core type of M5 unifies with the core type of M4
+// during constraint type inference.
+func f4[M4 map[K4]int, K4 comparable](m4 M4) {}
+
+func f5[M5 map[K5]int, K5 comparable](m5 M5) {
+ f4(m5)
+}
+
+// test case from issue
+
+func Copy[MC ~map[KC]VC, KC comparable, VC any](dst, src MC) {
+ for k, v := range src {
+ dst[k] = v
+ }
+}
+
+func Merge[MM ~map[KM]VM, KM comparable, VM any](ms ...MM) MM {
+ result := MM{}
+ for _, m := range ms {
+ Copy(result, m)
+ }
+ return result
+}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue50779.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue50779.go2
new file mode 100644
index 0000000..fe68c28
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue50779.go2
@@ -0,0 +1,23 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type AC interface {
+ C
+}
+
+type ST []int
+
+type R[S any, P any] struct{}
+
+type SR = R[SS, ST]
+
+type SS interface {
+ NSR(any) *SR // ERROR invalid use of type alias SR in recursive type
+}
+
+type C interface {
+ NSR(any) *SR
+}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue50782.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue50782.go2
new file mode 100644
index 0000000..fd1ab11
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue50782.go2
@@ -0,0 +1,47 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// Field accesses through type parameters are disabled
+// until we have a more thorough understanding of the
+// implications on the spec. See issue #51576.
+
+package p
+
+// The first example from the issue.
+type Numeric interface {
+ ~int | ~int8 | ~int16 | ~int32 | ~int64
+}
+
+// numericAbs matches numeric types with an Abs method.
+type numericAbs[T Numeric] interface {
+ ~struct{ Value T }
+ Abs() T
+}
+
+// AbsDifference computes the absolute value of the difference of
+// a and b, where the absolute value is determined by the Abs method.
+func absDifference[T numericAbs[T /* ERROR T does not implement Numeric */]](a, b T) T {
+ // Field accesses are not permitted for now. Keep an error so
+ // we can find and fix this code once the situation changes.
+ return a.Value // ERROR a\.Value undefined
+ // TODO: The error below should probably be positioned on the '-'.
+ // d := a /* ERROR "invalid operation: operator - not defined" */ .Value - b.Value
+ // return d.Abs()
+}
+
+// The second example from the issue.
+type T[P int] struct{ f P }
+
+func _[P T[P /* ERROR "P does not implement int" */ ]]() {}
+
+// Additional tests
+func _[P T[T /* ERROR "T\[P\] does not implement int" */ [P /* ERROR "P does not implement int" */ ]]]() {}
+func _[P T[Q /* ERROR "Q does not implement int" */ ], Q T[P /* ERROR "P does not implement int" */ ]]() {}
+func _[P T[Q], Q int]() {}
+
+type C[P comparable] struct{ f P }
+func _[P C[C[P]]]() {}
+func _[P C[C /* ERROR "C\[Q\] does not implement comparable" */ [Q /* ERROR "Q does not implement comparable" */]], Q func()]() {}
+func _[P [10]C[P]]() {}
+func _[P struct{ f C[C[P]]}]() {}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue50816.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue50816.go2
new file mode 100644
index 0000000..e7e31d9
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue50816.go2
@@ -0,0 +1,23 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package pkg
+
+type I interface {
+ Foo()
+}
+
+type T1 struct{}
+
+func (T1) foo() {}
+
+type T2 struct{}
+
+func (T2) foo() string { return "" }
+
+func _() {
+ var i I
+ _ = i /* ERROR impossible type assertion: i\.\(T1\)\n\tT1 does not implement I \(missing method Foo\)\n\t\thave foo\(\)\n\t\twant Foo\(\) */ .(T1)
+ _ = i /* ERROR impossible type assertion: i\.\(T2\)\n\tT2 does not implement I \(missing method Foo\)\n\t\thave foo\(\) string\n\t\twant Foo\(\) */ .(T2)
+}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue50833.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue50833.go2
new file mode 100644
index 0000000..e912e4d
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue50833.go2
@@ -0,0 +1,16 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type (
+ S struct{ f int }
+ PS *S
+)
+
+func a() []*S { return []*S{{f: 1}} }
+func b() []PS { return []PS{{f: 1}} }
+
+func c[P *S]() []P { return []P{{f: 1}} }
+func d[P PS]() []P { return []P{{f: 1}} }
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue50912.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue50912.go2
new file mode 100644
index 0000000..f161925
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue50912.go2
@@ -0,0 +1,19 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func Real[P ~complex128](x P) {
+ _ = real(x /* ERROR not supported */ )
+}
+
+func Imag[P ~complex128](x P) {
+ _ = imag(x /* ERROR not supported */ )
+}
+
+func Complex[P ~float64](x P) {
+ _ = complex(x /* ERROR not supported */ , 0)
+ _ = complex(0 /* ERROR not supported */ , x)
+ _ = complex(x /* ERROR not supported */ , x)
+}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue50918.go b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue50918.go
new file mode 100644
index 0000000..41604b8
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue50918.go
@@ -0,0 +1,21 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type thing1 struct {
+ things []string
+}
+
+type thing2 struct {
+ things []thing1
+}
+
+func _() {
+ var a1, b1 thing1
+ _ = a1 /* ERROR struct containing \[\]string cannot be compared */ == b1
+
+ var a2, b2 thing2
+ _ = a2 /* ERROR struct containing \[\]thing1 cannot be compared */ == b2
+}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue50929.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue50929.go2
new file mode 100644
index 0000000..3629ecf
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue50929.go2
@@ -0,0 +1,68 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file is tested when running "go test -run Manual"
+// without source arguments. Use for one-off debugging.
+
+package p
+
+import "fmt"
+
+type F[A, B any] int
+
+func G[A, B any](F[A, B]) {
+}
+
+func _() {
+ // TODO(gri) only report one error below (issue #50932)
+ var x F /* ERROR got 1 arguments but 2 type parameters */ [int]
+ G(x /* ERROR does not match */)
+}
+
+// test case from issue
+// (lots of errors but doesn't crash anymore)
+
+type RC[G any, RG any] interface {
+ ~[]RG
+}
+
+type RG[G any] struct{}
+
+type RSC[G any] []*RG[G]
+
+type M[Rc RC[G, RG], G any, RG any] struct {
+ Fn func(Rc)
+}
+
+type NFn[Rc RC[G, RG], G any, RG any] func(Rc)
+
+func NC[Rc RC[G, RG], G any, RG any](nFn NFn[Rc, G, RG]) {
+ var empty Rc
+ nFn(empty)
+}
+
+func NSG[G any](c RSC[G]) {
+ fmt.Println(c)
+}
+
+func MMD[Rc RC /* ERROR got 1 arguments */ [RG], RG any, G any]() M /* ERROR got 2 arguments */ [Rc, RG] {
+
+ var nFn NFn /* ERROR got 2 arguments */ [Rc, RG]
+
+ var empty Rc
+ switch any(empty).(type) {
+ case BC /* ERROR undeclared name: BC */ :
+
+ case RSC[G]:
+ nFn = NSG /* ERROR cannot use NSG\[G\] */ [G]
+ }
+
+ return M /* ERROR got 2 arguments */ [Rc, RG]{
+ Fn: func(rc Rc) {
+ NC(nFn /* ERROR does not match */ )
+ },
+ }
+
+ return M /* ERROR got 2 arguments */ [Rc, RG]{}
+}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue50965.go b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue50965.go
new file mode 100644
index 0000000..bf2dcc9
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue50965.go
@@ -0,0 +1,17 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func _(x int, c string) {
+ switch x {
+ case c /* ERROR invalid case c in switch on x \(mismatched types string and int\) */ :
+ }
+}
+
+func _(x, c []int) {
+ switch x {
+ case c /* ERROR invalid case c in switch on x \(slice can only be compared to nil\) */ :
+ }
+}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue51048.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue51048.go2
new file mode 100644
index 0000000..5830837
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue51048.go2
@@ -0,0 +1,11 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func _[P int]() {
+ _ = f[P]
+}
+
+func f[T int]() {}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue51145.go b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue51145.go
new file mode 100644
index 0000000..b84391d
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue51145.go
@@ -0,0 +1,18 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+import "fmt"
+
+type (
+ _ [fmt /* ERROR invalid array length fmt */ ]int
+ _ [float64 /* ERROR invalid array length float64 */ ]int
+ _ [f /* ERROR invalid array length f */ ]int
+ _ [nil /* ERROR invalid array length nil */ ]int
+)
+
+func f()
+
+var _ fmt.Stringer // use fmt
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue51158.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue51158.go2
new file mode 100644
index 0000000..3edc505
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue51158.go2
@@ -0,0 +1,18 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+// Type checking the following code should not cause an infinite recursion.
+func f[M map[K]int, K comparable](m M) {
+ f(m)
+}
+
+// Equivalent code using mutual recursion.
+func f1[M map[K]int, K comparable](m M) {
+ f2(m)
+}
+func f2[M map[K]int, K comparable](m M) {
+ f1(m)
+}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue51229.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue51229.go2
new file mode 100644
index 0000000..ef873e6
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue51229.go2
@@ -0,0 +1,164 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+// Constraint type inference should be independent of the
+// ordering of the type parameter declarations. Try all
+// permutations in the test case below.
+// Permutations produced by https://go.dev/play/p/PHcZNGJTEBZ.
+
+func f00[S1 ~[]E1, S2 ~[]E2, E1 ~byte, E2 ~byte](S1, S2) {}
+func f01[S2 ~[]E2, S1 ~[]E1, E1 ~byte, E2 ~byte](S1, S2) {}
+func f02[E1 ~byte, S1 ~[]E1, S2 ~[]E2, E2 ~byte](S1, S2) {}
+func f03[S1 ~[]E1, E1 ~byte, S2 ~[]E2, E2 ~byte](S1, S2) {}
+func f04[S2 ~[]E2, E1 ~byte, S1 ~[]E1, E2 ~byte](S1, S2) {}
+func f05[E1 ~byte, S2 ~[]E2, S1 ~[]E1, E2 ~byte](S1, S2) {}
+func f06[E2 ~byte, S2 ~[]E2, S1 ~[]E1, E1 ~byte](S1, S2) {}
+func f07[S2 ~[]E2, E2 ~byte, S1 ~[]E1, E1 ~byte](S1, S2) {}
+func f08[S1 ~[]E1, E2 ~byte, S2 ~[]E2, E1 ~byte](S1, S2) {}
+func f09[E2 ~byte, S1 ~[]E1, S2 ~[]E2, E1 ~byte](S1, S2) {}
+func f10[S2 ~[]E2, S1 ~[]E1, E2 ~byte, E1 ~byte](S1, S2) {}
+func f11[S1 ~[]E1, S2 ~[]E2, E2 ~byte, E1 ~byte](S1, S2) {}
+func f12[S1 ~[]E1, E1 ~byte, E2 ~byte, S2 ~[]E2](S1, S2) {}
+func f13[E1 ~byte, S1 ~[]E1, E2 ~byte, S2 ~[]E2](S1, S2) {}
+func f14[E2 ~byte, S1 ~[]E1, E1 ~byte, S2 ~[]E2](S1, S2) {}
+func f15[S1 ~[]E1, E2 ~byte, E1 ~byte, S2 ~[]E2](S1, S2) {}
+func f16[E1 ~byte, E2 ~byte, S1 ~[]E1, S2 ~[]E2](S1, S2) {}
+func f17[E2 ~byte, E1 ~byte, S1 ~[]E1, S2 ~[]E2](S1, S2) {}
+func f18[E2 ~byte, E1 ~byte, S2 ~[]E2, S1 ~[]E1](S1, S2) {}
+func f19[E1 ~byte, E2 ~byte, S2 ~[]E2, S1 ~[]E1](S1, S2) {}
+func f20[S2 ~[]E2, E2 ~byte, E1 ~byte, S1 ~[]E1](S1, S2) {}
+func f21[E2 ~byte, S2 ~[]E2, E1 ~byte, S1 ~[]E1](S1, S2) {}
+func f22[E1 ~byte, S2 ~[]E2, E2 ~byte, S1 ~[]E1](S1, S2) {}
+func f23[S2 ~[]E2, E1 ~byte, E2 ~byte, S1 ~[]E1](S1, S2) {}
+
+type myByte byte
+
+func _(a []byte, b []myByte) {
+ f00(a, b)
+ f01(a, b)
+ f02(a, b)
+ f03(a, b)
+ f04(a, b)
+ f05(a, b)
+ f06(a, b)
+ f07(a, b)
+ f08(a, b)
+ f09(a, b)
+ f10(a, b)
+ f11(a, b)
+ f12(a, b)
+ f13(a, b)
+ f14(a, b)
+ f15(a, b)
+ f16(a, b)
+ f17(a, b)
+ f18(a, b)
+ f19(a, b)
+ f20(a, b)
+ f21(a, b)
+ f22(a, b)
+ f23(a, b)
+}
+
+// Constraint type inference may have to iterate.
+// Again, the order of the type parameters shouldn't matter.
+
+func g0[S ~[]E, M ~map[string]S, E any](m M) {}
+func g1[M ~map[string]S, S ~[]E, E any](m M) {}
+func g2[E any, S ~[]E, M ~map[string]S](m M) {}
+func g3[S ~[]E, E any, M ~map[string]S](m M) {}
+func g4[M ~map[string]S, E any, S ~[]E](m M) {}
+func g5[E any, M ~map[string]S, S ~[]E](m M) {}
+
+func _(m map[string][]byte) {
+ g0(m)
+ g1(m)
+ g2(m)
+ g3(m)
+ g4(m)
+ g5(m)
+}
+
+// Worst-case scenario.
+// There are 10 unknown type parameters. In each iteration of
+// constraint type inference we infer one more, from right to left.
+// Each iteration looks repeatedly at all 11 type parameters,
+// requiring a total of 10*11 = 110 iterations with the current
+// implementation. Pathological case.
+
+func h[K any, J ~*K, I ~*J, H ~*I, G ~*H, F ~*G, E ~*F, D ~*E, C ~*D, B ~*C, A ~*B](x A) {}
+
+func _(x **********int) {
+ h(x)
+}
+
+// Examples with channel constraints and tilde.
+
+func ch1[P chan<- int]() (_ P) { return } // core(P) == chan<- int (single type, no tilde)
+func ch2[P ~chan int]() { return } // core(P) == ~chan<- int (tilde)
+func ch3[P chan E, E any](E) { return } // core(P) == chan<- E (single type, no tilde)
+func ch4[P chan E | ~chan<- E, E any](E) { return } // core(P) == ~chan<- E (tilde)
+func ch5[P chan int | chan<- int]() { return } // core(P) == chan<- int (not a single type)
+
+func _() {
+ // P can be inferred as there's a single specific type and no tilde.
+ var _ chan int = ch1 /* ERROR cannot use ch1.*value of type chan<- int */ ()
+ var _ chan<- int = ch1()
+
+ // P cannot be inferred as there's a tilde.
+ ch2( /* ERROR cannot infer P */ )
+ type myChan chan int
+ ch2[myChan]()
+
+ // P can be inferred as there's a single specific type and no tilde.
+ var e int
+ ch3(e)
+
+ // P cannot be inferred as there's more than one specific type and a tilde.
+ ch4( /* ERROR cannot infer P */ e)
+ _ = ch4[chan int]
+
+ // P cannot be inferred as there's more than one specific type.
+ ch5( /* ERROR cannot infer P */ )
+ ch5[chan<- int]()
+}
+
+// test case from issue
+
+func equal[M1 ~map[K1]V1, M2 ~map[K2]V2, K1, K2 ~uint32, V1, V2 ~string](m1 M1, m2 M2) bool {
+ if len(m1) != len(m2) {
+ return false
+ }
+ for k, v1 := range m1 {
+ if v2, ok := m2[K2(k)]; !ok || V2(v1) != v2 {
+ return false
+ }
+ }
+ return true
+}
+
+func equalFixed[K1, K2 ~uint32, V1, V2 ~string](m1 map[K1]V1, m2 map[K2]V2) bool {
+ if len(m1) != len(m2) {
+ return false
+ }
+ for k, v1 := range m1 {
+ if v2, ok := m2[K2(k)]; !ok || v1 != V1(v2) {
+ return false
+ }
+ }
+ return true
+}
+
+type (
+ someNumericID uint32
+ someStringID string
+)
+
+func _() {
+ foo := map[uint32]string{10: "bar"}
+ bar := map[someNumericID]someStringID{10: "bar"}
+ equal(foo, bar)
+}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue51232.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue51232.go2
new file mode 100644
index 0000000..3fa6a05
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue51232.go2
@@ -0,0 +1,30 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type RC[RG any] interface {
+ ~[]RG
+}
+
+type Fn[RCT RC[RG], RG any] func(RCT)
+
+type F[RCT RC[RG], RG any] interface {
+ Fn() Fn /* ERROR got 1 arguments */ [RCT]
+}
+
+type concreteF[RCT RC[RG], RG any] struct {
+ makeFn func() Fn /* ERROR got 1 arguments */ [RCT]
+}
+
+func (c *concreteF[RCT, RG]) Fn() Fn /* ERROR got 1 arguments */ [RCT] {
+ return c.makeFn()
+}
+
+func NewConcrete[RCT RC[RG], RG any](Rc RCT) F /* ERROR got 1 arguments */ [RCT] {
+ // TODO(rfindley): eliminate the duplicate error below.
+ return & /* ERROR cannot use .* as F\[RCT\] */ concreteF /* ERROR got 1 arguments */ [RCT]{
+ makeFn: nil,
+ }
+}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue51233.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue51233.go2
new file mode 100644
index 0000000..9c15028
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue51233.go2
@@ -0,0 +1,27 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+// As of issue #51527, type-type inference has been disabled.
+
+type RC[RG any] interface {
+ ~[]RG
+}
+
+type Fn[RCT RC[RG], RG any] func(RCT)
+
+type FFn[RCT RC[RG], RG any] func() Fn /* ERROR got 1 arguments */ [RCT]
+
+type F[RCT RC[RG], RG any] interface {
+ Fn() Fn /* ERROR got 1 arguments */ [RCT]
+}
+
+type concreteF[RCT RC[RG], RG any] struct {
+ makeFn FFn /* ERROR got 1 arguments */ [RCT]
+}
+
+func (c *concreteF[RCT, RG]) Fn() Fn /* ERROR got 1 arguments */ [RCT] {
+ return c.makeFn()
+}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue51257.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue51257.go2
new file mode 100644
index 0000000..bc4208e
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue51257.go2
@@ -0,0 +1,46 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func f[_ comparable]() {}
+
+type S1 struct{ x int }
+type S2 struct{ x any }
+type S3 struct{ x [10]interface{ m() } }
+
+func _[P1 comparable, P2 S2]() {
+ _ = f[S1]
+ _ = f[S2 /* ERROR S2 does not implement comparable */ ]
+ _ = f[S3 /* ERROR S3 does not implement comparable */ ]
+
+ type L1 struct { x P1 }
+ type L2 struct { x P2 }
+ _ = f[L1]
+ _ = f[L2 /* ERROR L2 does not implement comparable */ ]
+}
+
+
+// example from issue
+
+type Set[T comparable] map[T]struct{}
+
+func NewSetFromSlice[T comparable](items []T) *Set[T] {
+ s := Set[T]{}
+
+ for _, item := range items {
+ s[item] = struct{}{}
+ }
+
+ return &s
+}
+
+type T struct{ x any }
+
+func main() {
+ NewSetFromSlice( /* ERROR T does not implement comparable */ []T{
+ {"foo"},
+ {5},
+ })
+}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue51335.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue51335.go2
new file mode 100644
index 0000000..0b5a1af
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue51335.go2
@@ -0,0 +1,16 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type S1 struct{}
+type S2 struct{}
+
+func _[P *S1|*S2]() {
+ _= []P{{ /* ERROR invalid composite literal element type P: no core type */ }}
+}
+
+func _[P *S1|S1]() {
+ _= []P{{ /* ERROR invalid composite literal element type P: no core type */ }}
+}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue51339.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue51339.go2
new file mode 100644
index 0000000..84e551d
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue51339.go2
@@ -0,0 +1,18 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file is tested when running "go test -run Manual"
+// without source arguments. Use for one-off debugging.
+
+package p
+
+type T[P any, B *P] struct{}
+
+func (T /* ERROR cannot use generic type */ ) m0() {}
+
+// TODO(rfindley): eliminate the duplicate errors here.
+func (T /* ERROR got 1 type parameter, but receiver base type declares 2 */ /* ERROR got 1 arguments but 2 type parameters */ [_]) m1() {}
+func (T[_, _]) m2() {}
+// TODO(gri) this error is unfortunate (issue #51343)
+func (T /* ERROR got 3 arguments but 2 type parameters */ [_, _, _]) m3() {}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue51360.go b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue51360.go
new file mode 100644
index 0000000..447ce03
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue51360.go
@@ -0,0 +1,13 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func _() {
+ len. /* ERROR cannot select on len */ Println
+ len. /* ERROR cannot select on len */ Println()
+ _ = len. /* ERROR cannot select on len */ Println
+ _ = len[ /* ERROR cannot index len */ 0]
+ _ = *len /* ERROR cannot indirect len */
+}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue51376.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue51376.go2
new file mode 100644
index 0000000..4eba071
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue51376.go2
@@ -0,0 +1,24 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type Map map[string]int
+
+func f[M ~map[K]V, K comparable, V any](M) {}
+func g[M map[K]V, K comparable, V any](M) {}
+
+func _[M1 ~map[K]V, M2 map[K]V, K comparable, V any]() {
+ var m1 M1
+ f(m1)
+ g( /* ERROR M1 does not implement map\[K\]V */ m1) // M1 has tilde
+
+ var m2 M2
+ f(m2)
+ g(m2) // M1 does not have tilde
+
+ var m3 Map
+ f(m3)
+ g( /* ERROR Map does not implement map\[string\]int */ m3) // M in g does not have tilde
+}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue51386.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue51386.go2
new file mode 100644
index 0000000..ef62239
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue51386.go2
@@ -0,0 +1,17 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type myString string
+
+func _[P ~string | ~[]byte | ~[]rune]() {
+ _ = P("")
+ const s myString = ""
+ _ = P(s)
+}
+
+func _[P myString]() {
+ _ = P("")
+}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue51437.go b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue51437.go
new file mode 100644
index 0000000..3762615
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue51437.go
@@ -0,0 +1,17 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type T struct{}
+
+func (T) m() []int { return nil }
+
+func f(x T) {
+ for _, x := range func() []int {
+ return x.m() // x declared in parameter list of f
+ }() {
+ _ = x // x declared by range clause
+ }
+}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue51472.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue51472.go2
new file mode 100644
index 0000000..f19d906
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue51472.go2
@@ -0,0 +1,54 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func _[T comparable](x T) {
+ _ = x == x
+}
+
+func _[T interface{interface{comparable}}](x T) {
+ _ = x == x
+}
+
+func _[T interface{comparable; interface{comparable}}](x T) {
+ _ = x == x
+}
+
+func _[T interface{comparable; ~int}](x T) {
+ _ = x == x
+}
+
+func _[T interface{comparable; ~[]byte}](x T) {
+ _ = x /* ERROR cannot compare */ == x
+}
+
+// TODO(gri) The error message here should be better. See issue #51525.
+func _[T interface{comparable; ~int; ~string}](x T) {
+ _ = x /* ERROR cannot compare */ == x
+}
+
+// TODO(gri) The error message here should be better. See issue #51525.
+func _[T interface{~int; ~string}](x T) {
+ _ = x /* ERROR cannot compare */ == x
+}
+
+func _[T interface{comparable; interface{~int}; interface{int|float64}}](x T) {
+ _ = x == x
+}
+
+func _[T interface{interface{comparable; ~int}; interface{~float64; comparable; m()}}](x T) {
+ _ = x /* ERROR cannot compare */ == x
+}
+
+// test case from issue
+
+func f[T interface{comparable; []byte|string}](x T) {
+ _ = x == x
+}
+
+func _(s []byte) {
+ f( /* ERROR \[\]byte does not implement interface{comparable; \[\]byte\|string} */ s)
+ _ = f[[ /* ERROR does not implement */ ]byte]
+}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue51509.go b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue51509.go
new file mode 100644
index 0000000..5ae4717
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue51509.go
@@ -0,0 +1,7 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type T /* ERROR illegal cycle */ T.x
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue51578.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue51578.go2
new file mode 100644
index 0000000..5c204ba
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue51578.go2
@@ -0,0 +1,17 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+var _ = (*interface /* ERROR interface contains type constraints */ {int})(nil)
+
+// abbreviated test case from issue
+
+type TypeSet interface{ int | string }
+
+func _() {
+ f((*TypeSet /* ERROR interface contains type constraints */)(nil))
+}
+
+func f(any) {} \ No newline at end of file
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue51593.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue51593.go2
new file mode 100644
index 0000000..d323618
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue51593.go2
@@ -0,0 +1,13 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+func f[P interface{ m(R) }, R any]() {}
+
+type T = interface { m(int) }
+
+func _() {
+ _ = f[ /* ERROR cannot infer R */ T] // don't crash in type inference
+}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue51607.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue51607.go2
new file mode 100644
index 0000000..d8df143
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue51607.go2
@@ -0,0 +1,65 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+// Interface types must be ignored during overlap test.
+
+type (
+ T1 interface{int}
+ T2 interface{~int}
+ T3 interface{T1 | bool | string}
+ T4 interface{T2 | ~bool | ~string}
+)
+
+type (
+ // overlap errors for non-interface terms
+ // (like the interface terms, but explicitly inlined)
+ _ interface{int | int /* ERROR overlapping terms int and int */ }
+ _ interface{int | ~ /* ERROR overlapping terms ~int and int */ int}
+ _ interface{~int | int /* ERROR overlapping terms int and ~int */ }
+ _ interface{~int | ~ /* ERROR overlapping terms ~int and ~int */ int}
+
+ _ interface{T1 | bool | string | T1 | bool /* ERROR overlapping terms bool and bool */ | string /* ERROR overlapping terms string and string */ }
+ _ interface{T1 | bool | string | T2 | ~ /* ERROR overlapping terms ~bool and bool */ bool | ~ /* ERROR overlapping terms ~string and string */ string}
+
+ // no errors for interface terms
+ _ interface{T1 | T1}
+ _ interface{T1 | T2}
+ _ interface{T2 | T1}
+ _ interface{T2 | T2}
+
+ _ interface{T3 | T3 | int}
+ _ interface{T3 | T4 | bool }
+ _ interface{T4 | T3 | string }
+ _ interface{T4 | T4 | float64 }
+)
+
+func _[_ T1 | bool | string | T1 | bool /* ERROR overlapping terms */ ]() {}
+func _[_ T1 | bool | string | T2 | ~ /* ERROR overlapping terms */ bool ]() {}
+func _[_ T2 | ~bool | ~string | T1 | bool /* ERROR overlapping terms */ ]() {}
+func _[_ T2 | ~bool | ~string | T2 | ~ /* ERROR overlapping terms */ bool ]() {}
+
+func _[_ T3 | T3 | int]() {}
+func _[_ T3 | T4 | bool]() {}
+func _[_ T4 | T3 | string]() {}
+func _[_ T4 | T4 | float64]() {}
+
+// test cases from issue
+
+type _ interface {
+ interface {bool | int} | interface {bool | string}
+}
+
+type _ interface {
+ interface {bool | int} ; interface {bool | string}
+}
+
+type _ interface {
+ interface {bool; int} ; interface {bool; string}
+}
+
+type _ interface {
+ interface {bool; int} | interface {bool; string}
+} \ No newline at end of file
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue51658.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue51658.go2
new file mode 100644
index 0000000..c437c92
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue51658.go2
@@ -0,0 +1,39 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type F { // ERROR syntax error
+ float64
+} // ERROR syntax error
+
+func _[T F | int](x T) {
+ _ = x == 0 // don't crash when recording type of 0
+}
+
+// test case from issue
+
+type FloatType { // ERROR syntax error
+ float32 | float64
+} // ERROR syntax error
+
+type IntegerType interface {
+ int8 | int16 | int32 | int64 | int |
+ uint8 | uint16 | uint32 | uint64 | uint
+}
+
+type ComplexType interface {
+ complex64 | complex128
+}
+
+type Number interface {
+ FloatType | IntegerType | ComplexType
+}
+
+func GetDefaultNumber[T Number](value, defaultValue T) T {
+ if value == 0 {
+ return defaultValue
+ }
+ return value
+}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue52529.go2 b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue52529.go2
new file mode 100644
index 0000000..de7b296
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue52529.go2
@@ -0,0 +1,15 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+type Foo[P any] struct {
+ _ *Bar[P]
+}
+
+type Bar[Q any] Foo[Q]
+
+func (v *Bar[R]) M() {
+ _ = (*Foo[R])(v)
+}
diff --git a/src/cmd/compile/internal/types2/testdata/fixedbugs/issue6977.src b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue6977.src
new file mode 100644
index 0000000..8f4e9ba
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/fixedbugs/issue6977.src
@@ -0,0 +1,82 @@
+// Copyright 2019 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package p
+
+import "io"
+
+// Alan's initial report.
+
+type I interface { f(); String() string }
+type J interface { g(); String() string }
+
+type IJ1 = interface { I; J }
+type IJ2 = interface { f(); g(); String() string }
+
+var _ = (*IJ1)(nil) == (*IJ2)(nil) // static assert that IJ1 and IJ2 are identical types
+
+// The canonical example.
+
+type ReadWriteCloser interface { io.ReadCloser; io.WriteCloser }
+
+// Some more cases.
+
+type M interface { m() }
+type M32 interface { m() int32 }
+type M64 interface { m() int64 }
+
+type U1 interface { m() }
+type U2 interface { m(); M }
+type U3 interface { M; m() }
+type U4 interface { M; M; M }
+type U5 interface { U1; U2; U3; U4 }
+
+type U6 interface { m(); m /* ERROR duplicate method */ () }
+type U7 interface { M32 /* ERROR duplicate method */ ; m() }
+type U8 interface { m(); M32 /* ERROR duplicate method */ }
+type U9 interface { M32; M64 /* ERROR duplicate method */ }
+
+// Verify that repeated embedding of the same interface(s)
+// eliminates duplicate methods early (rather than at the
+// end) to prevent exponential memory and time use.
+// Without early elimination, computing T29 may take dozens
+// of minutes.
+type (
+ T0 interface { m() }
+ T1 interface { T0; T0 }
+ T2 interface { T1; T1 }
+ T3 interface { T2; T2 }
+ T4 interface { T3; T3 }
+ T5 interface { T4; T4 }
+ T6 interface { T5; T5 }
+ T7 interface { T6; T6 }
+ T8 interface { T7; T7 }
+ T9 interface { T8; T8 }
+
+ T10 interface { T9; T9 }
+ T11 interface { T10; T10 }
+ T12 interface { T11; T11 }
+ T13 interface { T12; T12 }
+ T14 interface { T13; T13 }
+ T15 interface { T14; T14 }
+ T16 interface { T15; T15 }
+ T17 interface { T16; T16 }
+ T18 interface { T17; T17 }
+ T19 interface { T18; T18 }
+
+ T20 interface { T19; T19 }
+ T21 interface { T20; T20 }
+ T22 interface { T21; T21 }
+ T23 interface { T22; T22 }
+ T24 interface { T23; T23 }
+ T25 interface { T24; T24 }
+ T26 interface { T25; T25 }
+ T27 interface { T26; T26 }
+ T28 interface { T27; T27 }
+ T29 interface { T28; T28 }
+)
+
+// Verify that m is present.
+var x T29
+var _ = x.m
diff --git a/src/cmd/compile/internal/types2/testdata/manual.go2 b/src/cmd/compile/internal/types2/testdata/manual.go2
new file mode 100644
index 0000000..96d4ba6
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/manual.go2
@@ -0,0 +1,8 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file is tested when running "go test -run Manual"
+// without source arguments. Use for one-off debugging.
+
+package p
diff --git a/src/cmd/compile/internal/types2/testdata/spec/assignability.go2 b/src/cmd/compile/internal/types2/testdata/spec/assignability.go2
new file mode 100644
index 0000000..507fe6d
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/spec/assignability.go2
@@ -0,0 +1,264 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package assignability
+
+// See the end of this package for the declarations
+// of the types and variables used in these tests.
+
+// "x's type is identical to T"
+func _[TP any](X TP) {
+ b = b
+ a = a
+ l = l
+ s = s
+ p = p
+ f = f
+ i = i
+ m = m
+ c = c
+ d = d
+
+ B = B
+ A = A
+ L = L
+ S = S
+ P = P
+ F = F
+ I = I
+ M = M
+ C = C
+ D = D
+ X = X
+}
+
+// "x's type V and T have identical underlying types
+// and at least one of V or T is not a named type."
+// (here a named type is a type with a name)
+func _[TP1, TP2 Interface](X1 TP1, X2 TP2) {
+ b = B // ERROR cannot use B .* as int value
+ a = A
+ l = L
+ s = S
+ p = P
+ f = F
+ i = I
+ m = M
+ c = C
+ d = D
+
+ B = b // ERROR cannot use b .* as Basic value
+ A = a
+ L = l
+ S = s
+ P = p
+ F = f
+ I = i
+ M = m
+ C = c
+ D = d
+ X1 = i // ERROR cannot use i .* as TP1 value
+ X1 = X2 // ERROR cannot use X2 .* as TP1 value
+}
+
+// "T is an interface type and x implements T and T is not a type parameter"
+func _[TP Interface](X TP) {
+ i = d // ERROR missing method m
+ i = D
+ i = X
+ X = i // ERROR cannot use i .* as TP value
+}
+
+// "x is a bidirectional channel value, T is a channel type, x's type V and T have identical element types, and at least one of V or T is not a named type"
+// (here a named type is a type with a name)
+type (
+ _SendChan = chan<- int
+ _RecvChan = <-chan int
+
+ SendChan _SendChan
+ RecvChan _RecvChan
+)
+
+func _[
+ _CC ~_Chan,
+ _SC ~_SendChan,
+ _RC ~_RecvChan,
+
+ CC Chan,
+ SC SendChan,
+ RC RecvChan,
+]() {
+ var (
+ _ _SendChan = c
+ _ _RecvChan = c
+ _ _Chan = c
+
+ _ _SendChan = C
+ _ _RecvChan = C
+ _ _Chan = C
+
+ _ SendChan = c
+ _ RecvChan = c
+ _ Chan = c
+
+ _ SendChan = C // ERROR cannot use C .* as SendChan value
+ _ RecvChan = C // ERROR cannot use C .* as RecvChan value
+ _ Chan = C
+ _ Chan = make /* ERROR cannot use make\(chan Basic\) .* as Chan value */ (chan Basic)
+ )
+
+ var (
+ _ _CC = C // ERROR cannot use C .* as _CC value
+ _ _SC = C // ERROR cannot use C .* as _SC value
+ _ _RC = C // ERROR cannot use C .* as _RC value
+
+ _ CC = _CC /* ERROR cannot use _CC\(nil\) .* as CC value */ (nil)
+ _ SC = _CC /* ERROR cannot use _CC\(nil\) .* as SC value */ (nil)
+ _ RC = _CC /* ERROR cannot use _CC\(nil\) .* as RC value */ (nil)
+
+ _ CC = C // ERROR cannot use C .* as CC value
+ _ SC = C // ERROR cannot use C .* as SC value
+ _ RC = C // ERROR cannot use C .* as RC value
+ )
+}
+
+// "x's type V is not a named type and T is a type parameter, and x is assignable to each specific type in T's type set."
+func _[
+ TP0 any,
+ TP1 ~_Chan,
+ TP2 ~chan int | ~chan byte,
+]() {
+ var (
+ _ TP0 = c // ERROR cannot use c .* as TP0 value
+ _ TP0 = C // ERROR cannot use C .* as TP0 value
+ _ TP1 = c
+ _ TP1 = C // ERROR cannot use C .* as TP1 value
+ _ TP2 = c // ERROR .* cannot assign chan int to chan byte
+ )
+}
+
+// "x's type V is a type parameter and T is not a named type, and values x' of each specific type in V's type set are assignable to T."
+func _[
+ TP0 Interface,
+ TP1 ~_Chan,
+ TP2 ~chan int | ~chan byte,
+](X0 TP0, X1 TP1, X2 TP2) {
+ i = X0
+ I = X0
+ c = X1
+ C = X1 // ERROR cannot use X1 .* as Chan value
+ c = X2 // ERROR .* cannot assign chan byte \(in TP2\) to chan int
+}
+
+// "x is the predeclared identifier nil and T is a pointer, function, slice, map, channel, or interface type"
+func _[TP Interface](X TP) {
+ b = nil // ERROR cannot use nil
+ a = nil // ERROR cannot use nil
+ l = nil
+ s = nil // ERROR cannot use nil
+ p = nil
+ f = nil
+ i = nil
+ m = nil
+ c = nil
+ d = nil // ERROR cannot use nil
+
+ B = nil // ERROR cannot use nil
+ A = nil // ERROR cannot use nil
+ L = nil
+ S = nil // ERROR cannot use nil
+ P = nil
+ F = nil
+ I = nil
+ M = nil
+ C = nil
+ D = nil // ERROR cannot use nil
+ X = nil // ERROR cannot use nil
+}
+
+// "x is an untyped constant representable by a value of type T"
+func _[
+ Int8 ~int8,
+ Int16 ~int16,
+ Int32 ~int32,
+ Int64 ~int64,
+ Int8_16 ~int8 | ~int16,
+](
+ i8 Int8,
+ i16 Int16,
+ i32 Int32,
+ i64 Int64,
+ i8_16 Int8_16,
+) {
+ b = 42
+ b = 42.0
+ // etc.
+
+ i8 = -1 << 7
+ i8 = 1<<7 - 1
+ i16 = -1 << 15
+ i16 = 1<<15 - 1
+ i32 = -1 << 31
+ i32 = 1<<31 - 1
+ i64 = -1 << 63
+ i64 = 1<<63 - 1
+
+ i8_16 = -1 << 7
+ i8_16 = 1<<7 - 1
+ i8_16 = - /* ERROR cannot use .* as Int8_16 */ 1 << 15
+ i8_16 = 1 /* ERROR cannot use .* as Int8_16 */ <<15 - 1
+}
+
+// proto-types for tests
+
+type (
+ _Basic = int
+ _Array = [10]int
+ _Slice = []int
+ _Struct = struct{ f int }
+ _Pointer = *int
+ _Func = func(x int) string
+ _Interface = interface{ m() int }
+ _Map = map[string]int
+ _Chan = chan int
+
+ Basic _Basic
+ Array _Array
+ Slice _Slice
+ Struct _Struct
+ Pointer _Pointer
+ Func _Func
+ Interface _Interface
+ Map _Map
+ Chan _Chan
+ Defined _Struct
+)
+
+func (Defined) m() int
+
+// proto-variables for tests
+
+var (
+ b _Basic
+ a _Array
+ l _Slice
+ s _Struct
+ p _Pointer
+ f _Func
+ i _Interface
+ m _Map
+ c _Chan
+ d _Struct
+
+ B Basic
+ A Array
+ L Slice
+ S Struct
+ P Pointer
+ F Func
+ I Interface
+ M Map
+ C Chan
+ D Defined
+)
diff --git a/src/cmd/compile/internal/types2/testdata/spec/comparisons.go2 b/src/cmd/compile/internal/types2/testdata/spec/comparisons.go2
new file mode 100644
index 0000000..62c95d4
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/spec/comparisons.go2
@@ -0,0 +1,120 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package comparisons
+
+type (
+ B int // basic type representative
+ A [10]func()
+ L []byte
+ S struct{ f []byte }
+ P *S
+ F func()
+ I interface{}
+ M map[string]int
+ C chan int
+)
+
+var (
+ b B
+ a A
+ l L
+ s S
+ p P
+ f F
+ i I
+ m M
+ c C
+)
+
+func _() {
+ _ = nil == nil // ERROR operator == not defined on untyped nil
+ _ = b == b
+ _ = a /* ERROR \[10\]func\(\) cannot be compared */ == a
+ _ = l /* ERROR slice can only be compared to nil */ == l
+ _ = s /* ERROR struct containing \[\]byte cannot be compared */ == s
+ _ = p == p
+ _ = f /* ERROR func can only be compared to nil */ == f
+ _ = i == i
+ _ = m /* ERROR map can only be compared to nil */ == m
+ _ = c == c
+
+ _ = b /* ERROR mismatched types */ == nil
+ _ = a /* ERROR mismatched types */ == nil
+ _ = l == nil
+ _ = s /* ERROR mismatched types */ == nil
+ _ = p == nil
+ _ = f == nil
+ _ = i == nil
+ _ = m == nil
+ _ = c == nil
+
+ _ = nil /* ERROR operator < not defined on untyped nil */ < nil
+ _ = b < b
+ _ = a /* ERROR operator < not defined on array */ < a
+ _ = l /* ERROR operator < not defined on slice */ < l
+ _ = s /* ERROR operator < not defined on struct */ < s
+ _ = p /* ERROR operator < not defined on pointer */ < p
+ _ = f /* ERROR operator < not defined on func */ < f
+ _ = i /* ERROR operator < not defined on interface */ < i
+ _ = m /* ERROR operator < not defined on map */ < m
+ _ = c /* ERROR operator < not defined on chan */ < c
+}
+
+func _[
+ B int,
+ A [10]func(),
+ L []byte,
+ S struct{ f []byte },
+ P *S,
+ F func(),
+ I interface{},
+ J comparable,
+ M map[string]int,
+ C chan int,
+] (
+ b B,
+ a A,
+ l L,
+ s S,
+ p P,
+ f F,
+ i I,
+ j J,
+ m M,
+ c C,
+) {
+ _ = b == b
+ _ = a /* ERROR type parameter A is not comparable with == */ == a
+ _ = l /* ERROR type parameter L is not comparable with == */ == l
+ _ = s /* ERROR type parameter S is not comparable with == */ == s
+ _ = p == p
+ _ = f /* ERROR type parameter F is not comparable with == */ == f
+ _ = i /* ERROR type parameter I is not comparable with == */ == i
+ _ = j == j
+ _ = m /* ERROR type parameter M is not comparable with == */ == m
+ _ = c == c
+
+ _ = b /* ERROR mismatched types */ == nil
+ _ = a /* ERROR mismatched types */ == nil
+ _ = l == nil
+ _ = s /* ERROR mismatched types */ == nil
+ _ = p == nil
+ _ = f == nil
+ _ = i /* ERROR mismatched types */ == nil
+ _ = j /* ERROR mismatched types */ == nil
+ _ = m == nil
+ _ = c == nil
+
+ _ = b < b
+ _ = a /* ERROR type parameter A is not comparable with < */ < a
+ _ = l /* ERROR type parameter L is not comparable with < */ < l
+ _ = s /* ERROR type parameter S is not comparable with < */ < s
+ _ = p /* ERROR type parameter P is not comparable with < */ < p
+ _ = f /* ERROR type parameter F is not comparable with < */ < f
+ _ = i /* ERROR type parameter I is not comparable with < */ < i
+ _ = j /* ERROR type parameter J is not comparable with < */ < j
+ _ = m /* ERROR type parameter M is not comparable with < */ < m
+ _ = c /* ERROR type parameter C is not comparable with < */ < c
+}
diff --git a/src/cmd/compile/internal/types2/testdata/spec/conversions.go2 b/src/cmd/compile/internal/types2/testdata/spec/conversions.go2
new file mode 100644
index 0000000..fde332f
--- /dev/null
+++ b/src/cmd/compile/internal/types2/testdata/spec/conversions.go2
@@ -0,0 +1,178 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package conversions
+
+import "unsafe"
+
+// constant conversions
+
+func _[T ~byte]() T { return 255 }
+func _[T ~byte]() T { return 256 /* ERROR cannot use 256 .* as T value */ }
+
+func _[T ~byte]() {
+ const _ = T /* ERROR T\(0\) .* is not constant */ (0)
+ var _ T = 255
+ var _ T = 256 // ERROR cannot use 256 .* as T value
+}
+
+func _[T ~string]() T { return T('a') }
+func _[T ~int | ~string]() T { return T('a') }
+func _[T ~byte | ~int | ~string]() T { return T(256 /* ERROR cannot convert 256 .* to T */ ) }
+
+// implicit conversions never convert to string
+func _[T ~string]() {
+ var _ string = 0 // ERROR cannot use .* as string value
+ var _ T = 0 // ERROR cannot use .* as T value
+}
+
+// failing const conversions of constants to type parameters report a cause
+func _[
+ T1 any,
+ T2 interface{ m() },
+ T3 ~int | ~float64 | ~bool,
+ T4 ~int | ~string,
+]() {
+ _ = T1(0 /* ERROR cannot convert 0 .* to T1\n\tT1 does not contain specific types */ )
+ _ = T2(1 /* ERROR cannot convert 1 .* to T2\n\tT2 does not contain specific types */ )
+ _ = T3(2 /* ERROR cannot convert 2 .* to T3\n\tcannot convert 2 .* to bool \(in T3\) */ )
+ _ = T4(3.14 /* ERROR cannot convert 3.14 .* to T4\n\tcannot convert 3.14 .* to int \(in T4\) */ )
+}
+
+// "x is assignable to T"
+// - tested via assignability tests
+
+// "x's type and T have identical underlying types if tags are ignored"
+
+func _[X ~int, T ~int](x X) T { return T(x) }
+func _[X struct{f int "foo"}, T struct{f int "bar"}](x X) T { return T(x) }
+
+type Foo struct{f int "foo"}
+type Bar struct{f int "bar"}
+type Far struct{f float64 }
+
+func _[X Foo, T Bar](x X) T { return T(x) }
+func _[X Foo|Bar, T Bar](x X) T { return T(x) }
+func _[X Foo, T Foo|Bar](x X) T { return T(x) }
+func _[X Foo, T Far](x X) T { return T(x /* ERROR cannot convert x \(variable of type X constrained by Foo\) to T\n\tcannot convert Foo \(in X\) to Far \(in T\) */ ) }
+
+// "x's type and T are unnamed pointer types and their pointer base types
+// have identical underlying types if tags are ignored"
+
+func _[X ~*Foo, T ~*Bar](x X) T { return T(x) }
+func _[X ~*Foo|~*Bar, T ~*Bar](x X) T { return T(x) }
+func _[X ~*Foo, T ~*Foo|~*Bar](x X) T { return T(x) }
+func _[X ~*Foo, T ~*Far](x X) T { return T(x /* ERROR cannot convert x \(variable of type X constrained by ~\*Foo\) to T\n\tcannot convert \*Foo \(in X\) to \*Far \(in T\) */ ) }
+
+// Verify that the defined types in constraints are considered for the rule above.
+
+type (
+ B int
+ C int
+ X0 *B
+ T0 *C
+)
+
+func _(x X0) T0 { return T0(x /* ERROR cannot convert */ ) } // non-generic reference
+func _[X X0, T T0](x X) T { return T(x /* ERROR cannot convert */ ) }
+func _[T T0](x X0) T { return T(x /* ERROR cannot convert */ ) }
+func _[X X0](x X) T0 { return T0(x /* ERROR cannot convert */ ) }
+
+// "x's type and T are both integer or floating point types"
+
+func _[X Integer, T Integer](x X) T { return T(x) }
+func _[X Unsigned, T Integer](x X) T { return T(x) }
+func _[X Float, T Integer](x X) T { return T(x) }
+
+func _[X Integer, T Unsigned](x X) T { return T(x) }
+func _[X Unsigned, T Unsigned](x X) T { return T(x) }
+func _[X Float, T Unsigned](x X) T { return T(x) }
+
+func _[X Integer, T Float](x X) T { return T(x) }
+func _[X Unsigned, T Float](x X) T { return T(x) }
+func _[X Float, T Float](x X) T { return T(x) }
+
+func _[X, T Integer|Unsigned|Float](x X) T { return T(x) }
+func _[X, T Integer|~string](x X) T { return T(x /* ERROR cannot convert x \(variable of type X constrained by Integer\|~string\) to T\n\tcannot convert string \(in X\) to int \(in T\) */ ) }
+
+// "x's type and T are both complex types"
+
+func _[X, T Complex](x X) T { return T(x) }
+func _[X, T Float|Complex](x X) T { return T(x /* ERROR cannot convert x \(variable of type X constrained by Float\|Complex\) to T\n\tcannot convert float32 \(in X\) to complex64 \(in T\) */ ) }
+
+// "x is an integer or a slice of bytes or runes and T is a string type"
+
+type myInt int
+type myString string
+
+func _[T ~string](x int) T { return T(x) }
+func _[T ~string](x myInt) T { return T(x) }
+func _[X Integer](x X) string { return string(x) }
+func _[X Integer](x X) myString { return myString(x) }
+func _[X Integer](x X) *string { return (*string)(x /* ERROR cannot convert x \(variable of type X constrained by Integer\) to \*string\n\tcannot convert int \(in X\) to \*string */ ) }
+
+func _[T ~string](x []byte) T { return T(x) }
+func _[T ~string](x []rune) T { return T(x) }
+func _[X ~[]byte, T ~string](x X) T { return T(x) }
+func _[X ~[]rune, T ~string](x X) T { return T(x) }
+func _[X Integer|~[]byte|~[]rune, T ~string](x X) T { return T(x) }
+func _[X Integer|~[]byte|~[]rune, T ~*string](x X) T { return T(x /* ERROR cannot convert x \(variable of type X constrained by Integer\|~\[\]byte\|~\[\]rune\) to T\n\tcannot convert int \(in X\) to \*string \(in T\) */ ) }
+
+// "x is a string and T is a slice of bytes or runes"
+
+func _[T ~[]byte](x string) T { return T(x) }
+func _[T ~[]rune](x string) T { return T(x) }
+func _[T ~[]rune](x *string) T { return T(x /* ERROR cannot convert x \(variable of type \*string\) to T\n\tcannot convert \*string to \[\]rune \(in T\) */ ) }
+
+func _[X ~string, T ~[]byte](x X) T { return T(x) }
+func _[X ~string, T ~[]rune](x X) T { return T(x) }
+func _[X ~string, T ~[]byte|~[]rune](x X) T { return T(x) }
+func _[X ~*string, T ~[]byte|~[]rune](x X) T { return T(x /* ERROR cannot convert x \(variable of type X constrained by ~\*string\) to T\n\tcannot convert \*string \(in X\) to \[\]byte \(in T\) */ ) }
+
+// package unsafe:
+// "any pointer or value of underlying type uintptr can be converted into a unsafe.Pointer"
+
+type myUintptr uintptr
+
+func _[X ~uintptr](x X) unsafe.Pointer { return unsafe.Pointer(x) }
+func _[T unsafe.Pointer](x myUintptr) T { return T(x) }
+func _[T unsafe.Pointer](x int64) T { return T(x /* ERROR cannot convert x \(variable of type int64\) to T\n\tcannot convert int64 to unsafe\.Pointer \(in T\) */ ) }
+
+// "and vice versa"
+
+func _[T ~uintptr](x unsafe.Pointer) T { return T(x) }
+func _[X unsafe.Pointer](x X) uintptr { return uintptr(x) }
+func _[X unsafe.Pointer](x X) myUintptr { return myUintptr(x) }
+func _[X unsafe.Pointer](x X) int64 { return int64(x /* ERROR cannot convert x \(variable of type X constrained by unsafe\.Pointer\) to int64\n\tcannot convert unsafe\.Pointer \(in X\) to int64 */ ) }
+
+// "x is a slice, T is a pointer-to-array type,
+// and the slice and array types have identical element types."
+
+func _[X ~[]E, T ~*[10]E, E any](x X) T { return T(x) }
+func _[X ~[]E, T ~[10]E, E any](x X) T { return T(x /* ERROR cannot convert x \(variable of type X constrained by ~\[\]E\) to T\n\tcannot convert \[\]E \(in X\) to \[10\]E \(in T\) */ ) }
+
+// ----------------------------------------------------------------------------
+// The following declarations can be replaced by the exported types of the
+// constraints package once all builders support importing interfaces with
+// type constraints.
+
+type Signed interface {
+ ~int | ~int8 | ~int16 | ~int32 | ~int64
+}
+
+type Unsigned interface {
+ ~uint | ~uint8 | ~uint16 | ~uint32 | ~uint64 | ~uintptr
+}
+
+type Integer interface {
+ Signed | Unsigned
+}
+
+type Float interface {
+ ~float32 | ~float64
+}
+
+type Complex interface {
+ ~complex64 | ~complex128
+}
diff --git a/src/cmd/compile/internal/types2/tuple.go b/src/cmd/compile/internal/types2/tuple.go
new file mode 100644
index 0000000..1356aae
--- /dev/null
+++ b/src/cmd/compile/internal/types2/tuple.go
@@ -0,0 +1,34 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+// A Tuple represents an ordered list of variables; a nil *Tuple is a valid (empty) tuple.
+// Tuples are used as components of signatures and to represent the type of multiple
+// assignments; they are not first class types of Go.
+type Tuple struct {
+ vars []*Var
+}
+
+// NewTuple returns a new tuple for the given variables.
+func NewTuple(x ...*Var) *Tuple {
+ if len(x) > 0 {
+ return &Tuple{vars: x}
+ }
+ return nil
+}
+
+// Len returns the number variables of tuple t.
+func (t *Tuple) Len() int {
+ if t != nil {
+ return len(t.vars)
+ }
+ return 0
+}
+
+// At returns the i'th variable of tuple t.
+func (t *Tuple) At(i int) *Var { return t.vars[i] }
+
+func (t *Tuple) Underlying() Type { return t }
+func (t *Tuple) String() string { return TypeString(t, nil) }
diff --git a/src/cmd/compile/internal/types2/type.go b/src/cmd/compile/internal/types2/type.go
new file mode 100644
index 0000000..0fe39db
--- /dev/null
+++ b/src/cmd/compile/internal/types2/type.go
@@ -0,0 +1,124 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+// A Type represents a type of Go.
+// All types implement the Type interface.
+type Type interface {
+ // Underlying returns the underlying type of a type.
+ Underlying() Type
+
+ // String returns a string representation of a type.
+ String() string
+}
+
+// under returns the true expanded underlying type.
+// If it doesn't exist, the result is Typ[Invalid].
+// under must only be called when a type is known
+// to be fully set up.
+func under(t Type) Type {
+ if t, _ := t.(*Named); t != nil {
+ return t.under()
+ }
+ return t.Underlying()
+}
+
+// If t is not a type parameter, coreType returns the underlying type.
+// If t is a type parameter, coreType returns the single underlying
+// type of all types in its type set if it exists, or nil otherwise. If the
+// type set contains only unrestricted and restricted channel types (with
+// identical element types), the single underlying type is the restricted
+// channel type if the restrictions are always the same, or nil otherwise.
+func coreType(t Type) Type {
+ tpar, _ := t.(*TypeParam)
+ if tpar == nil {
+ return under(t)
+ }
+
+ var su Type
+ if tpar.underIs(func(u Type) bool {
+ if u == nil {
+ return false
+ }
+ if su != nil {
+ u = match(su, u)
+ if u == nil {
+ return false
+ }
+ }
+ // su == nil || match(su, u) != nil
+ su = u
+ return true
+ }) {
+ return su
+ }
+ return nil
+}
+
+// coreString is like coreType but also considers []byte
+// and strings as identical. In this case, if successful and we saw
+// a string, the result is of type (possibly untyped) string.
+func coreString(t Type) Type {
+ tpar, _ := t.(*TypeParam)
+ if tpar == nil {
+ return under(t) // string or untyped string
+ }
+
+ var su Type
+ hasString := false
+ if tpar.underIs(func(u Type) bool {
+ if u == nil {
+ return false
+ }
+ if isString(u) {
+ u = NewSlice(universeByte)
+ hasString = true
+ }
+ if su != nil {
+ u = match(su, u)
+ if u == nil {
+ return false
+ }
+ }
+ // su == nil || match(su, u) != nil
+ su = u
+ return true
+ }) {
+ if hasString {
+ return Typ[String]
+ }
+ return su
+ }
+ return nil
+}
+
+// If x and y are identical, match returns x.
+// If x and y are identical channels but for their direction
+// and one of them is unrestricted, match returns the channel
+// with the restricted direction.
+// In all other cases, match returns nil.
+func match(x, y Type) Type {
+ // Common case: we don't have channels.
+ if Identical(x, y) {
+ return x
+ }
+
+ // We may have channels that differ in direction only.
+ if x, _ := x.(*Chan); x != nil {
+ if y, _ := y.(*Chan); y != nil && Identical(x.elem, y.elem) {
+ // We have channels that differ in direction only.
+ // If there's an unrestricted channel, select the restricted one.
+ switch {
+ case x.dir == SendRecv:
+ return y
+ case y.dir == SendRecv:
+ return x
+ }
+ }
+ }
+
+ // types are different
+ return nil
+}
diff --git a/src/cmd/compile/internal/types2/typelists.go b/src/cmd/compile/internal/types2/typelists.go
new file mode 100644
index 0000000..a2aba4a
--- /dev/null
+++ b/src/cmd/compile/internal/types2/typelists.go
@@ -0,0 +1,69 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+// TypeParamList holds a list of type parameters.
+type TypeParamList struct{ tparams []*TypeParam }
+
+// Len returns the number of type parameters in the list.
+// It is safe to call on a nil receiver.
+func (l *TypeParamList) Len() int { return len(l.list()) }
+
+// At returns the i'th type parameter in the list.
+func (l *TypeParamList) At(i int) *TypeParam { return l.tparams[i] }
+
+// list is for internal use where we expect a []*TypeParam.
+// TODO(rfindley): list should probably be eliminated: we can pass around a
+// TypeParamList instead.
+func (l *TypeParamList) list() []*TypeParam {
+ if l == nil {
+ return nil
+ }
+ return l.tparams
+}
+
+// TypeList holds a list of types.
+type TypeList struct{ types []Type }
+
+// newTypeList returns a new TypeList with the types in list.
+func newTypeList(list []Type) *TypeList {
+ if len(list) == 0 {
+ return nil
+ }
+ return &TypeList{list}
+}
+
+// Len returns the number of types in the list.
+// It is safe to call on a nil receiver.
+func (l *TypeList) Len() int { return len(l.list()) }
+
+// At returns the i'th type in the list.
+func (l *TypeList) At(i int) Type { return l.types[i] }
+
+// list is for internal use where we expect a []Type.
+// TODO(rfindley): list should probably be eliminated: we can pass around a
+// TypeList instead.
+func (l *TypeList) list() []Type {
+ if l == nil {
+ return nil
+ }
+ return l.types
+}
+
+// ----------------------------------------------------------------------------
+// Implementation
+
+func bindTParams(list []*TypeParam) *TypeParamList {
+ if len(list) == 0 {
+ return nil
+ }
+ for i, typ := range list {
+ if typ.index >= 0 {
+ panic("type parameter bound more than once")
+ }
+ typ.index = i
+ }
+ return &TypeParamList{tparams: list}
+}
diff --git a/src/cmd/compile/internal/types2/typeparam.go b/src/cmd/compile/internal/types2/typeparam.go
new file mode 100644
index 0000000..2e9a2ad
--- /dev/null
+++ b/src/cmd/compile/internal/types2/typeparam.go
@@ -0,0 +1,156 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+import "sync/atomic"
+
+// Note: This is a uint32 rather than a uint64 because the
+// respective 64 bit atomic instructions are not available
+// on all platforms.
+var lastID uint32
+
+// nextID returns a value increasing monotonically by 1 with
+// each call, starting with 1. It may be called concurrently.
+func nextID() uint64 { return uint64(atomic.AddUint32(&lastID, 1)) }
+
+// A TypeParam represents a type parameter type.
+type TypeParam struct {
+ check *Checker // for lazy type bound completion
+ id uint64 // unique id, for debugging only
+ obj *TypeName // corresponding type name
+ index int // type parameter index in source order, starting at 0
+ bound Type // any type, but underlying is eventually *Interface for correct programs (see TypeParam.iface)
+}
+
+// Obj returns the type name for the type parameter t.
+func (t *TypeParam) Obj() *TypeName { return t.obj }
+
+// NewTypeParam returns a new TypeParam. Type parameters may be set on a Named
+// or Signature type by calling SetTypeParams. Setting a type parameter on more
+// than one type will result in a panic.
+//
+// The constraint argument can be nil, and set later via SetConstraint. If the
+// constraint is non-nil, it must be fully defined.
+func NewTypeParam(obj *TypeName, constraint Type) *TypeParam {
+ return (*Checker)(nil).newTypeParam(obj, constraint)
+}
+
+// check may be nil
+func (check *Checker) newTypeParam(obj *TypeName, constraint Type) *TypeParam {
+ // Always increment lastID, even if it is not used.
+ id := nextID()
+ if check != nil {
+ check.nextID++
+ id = check.nextID
+ }
+ typ := &TypeParam{check: check, id: id, obj: obj, index: -1, bound: constraint}
+ if obj.typ == nil {
+ obj.typ = typ
+ }
+ // iface may mutate typ.bound, so we must ensure that iface() is called
+ // at least once before the resulting TypeParam escapes.
+ if check != nil {
+ check.needsCleanup(typ)
+ } else if constraint != nil {
+ typ.iface()
+ }
+ return typ
+}
+
+// Index returns the index of the type param within its param list, or -1 if
+// the type parameter has not yet been bound to a type.
+func (t *TypeParam) Index() int {
+ return t.index
+}
+
+// Constraint returns the type constraint specified for t.
+func (t *TypeParam) Constraint() Type {
+ return t.bound
+}
+
+// SetConstraint sets the type constraint for t.
+//
+// It must be called by users of NewTypeParam after the bound's underlying is
+// fully defined, and before using the type parameter in any way other than to
+// form other types. Once SetConstraint returns the receiver, t is safe for
+// concurrent use.
+func (t *TypeParam) SetConstraint(bound Type) {
+ if bound == nil {
+ panic("nil constraint")
+ }
+ t.bound = bound
+ // iface may mutate t.bound (if bound is not an interface), so ensure that
+ // this is done before returning.
+ t.iface()
+}
+
+func (t *TypeParam) Underlying() Type {
+ return t.iface()
+}
+
+func (t *TypeParam) String() string { return TypeString(t, nil) }
+
+// ----------------------------------------------------------------------------
+// Implementation
+
+func (t *TypeParam) cleanup() {
+ t.iface()
+ t.check = nil
+}
+
+// iface returns the constraint interface of t.
+func (t *TypeParam) iface() *Interface {
+ bound := t.bound
+
+ // determine constraint interface
+ var ityp *Interface
+ switch u := under(bound).(type) {
+ case *Basic:
+ if u == Typ[Invalid] {
+ // error is reported elsewhere
+ return &emptyInterface
+ }
+ case *Interface:
+ if isTypeParam(bound) {
+ // error is reported in Checker.collectTypeParams
+ return &emptyInterface
+ }
+ ityp = u
+ }
+
+ // If we don't have an interface, wrap constraint into an implicit interface.
+ if ityp == nil {
+ ityp = NewInterfaceType(nil, []Type{bound})
+ ityp.implicit = true
+ t.bound = ityp // update t.bound for next time (optimization)
+ }
+
+ // compute type set if necessary
+ if ityp.tset == nil {
+ // pos is used for tracing output; start with the type parameter position.
+ pos := t.obj.pos
+ // use the (original or possibly instantiated) type bound position if we have one
+ if n, _ := bound.(*Named); n != nil {
+ pos = n.obj.pos
+ }
+ computeInterfaceTypeSet(t.check, pos, ityp)
+ }
+
+ return ityp
+}
+
+// is calls f with the specific type terms of t's constraint and reports whether
+// all calls to f returned true. If there are no specific terms, is
+// returns the result of f(nil).
+func (t *TypeParam) is(f func(*term) bool) bool {
+ return t.iface().typeSet().is(f)
+}
+
+// underIs calls f with the underlying types of the specific type terms
+// of t's constraint and reports whether all calls to f returned true.
+// If there are no specific terms, underIs returns the result of f(nil).
+func (t *TypeParam) underIs(f func(Type) bool) bool {
+ return t.iface().typeSet().underIs(f)
+}
diff --git a/src/cmd/compile/internal/types2/typeset.go b/src/cmd/compile/internal/types2/typeset.go
new file mode 100644
index 0000000..646b436
--- /dev/null
+++ b/src/cmd/compile/internal/types2/typeset.go
@@ -0,0 +1,433 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+import (
+ "bytes"
+ "cmd/compile/internal/syntax"
+ "fmt"
+ "sort"
+)
+
+// ----------------------------------------------------------------------------
+// API
+
+// A _TypeSet represents the type set of an interface.
+// Because of existing language restrictions, methods can be "factored out"
+// from the terms. The actual type set is the intersection of the type set
+// implied by the methods and the type set described by the terms and the
+// comparable bit. To test whether a type is included in a type set
+// ("implements" relation), the type must implement all methods _and_ be
+// an element of the type set described by the terms and the comparable bit.
+// If the term list describes the set of all types and comparable is true,
+// only comparable types are meant; in all other cases comparable is false.
+type _TypeSet struct {
+ methods []*Func // all methods of the interface; sorted by unique ID
+ terms termlist // type terms of the type set
+ comparable bool // invariant: !comparable || terms.isAll()
+}
+
+// IsEmpty reports whether type set s is the empty set.
+func (s *_TypeSet) IsEmpty() bool { return s.terms.isEmpty() }
+
+// IsAll reports whether type set s is the set of all types (corresponding to the empty interface).
+func (s *_TypeSet) IsAll() bool { return s.IsMethodSet() && len(s.methods) == 0 }
+
+// IsMethodSet reports whether the interface t is fully described by its method set.
+func (s *_TypeSet) IsMethodSet() bool { return !s.comparable && s.terms.isAll() }
+
+// IsComparable reports whether each type in the set is comparable.
+func (s *_TypeSet) IsComparable(seen map[Type]bool) bool {
+ if s.terms.isAll() {
+ return s.comparable
+ }
+ return s.is(func(t *term) bool {
+ return t != nil && comparable(t.typ, false, seen, nil)
+ })
+}
+
+// NumMethods returns the number of methods available.
+func (s *_TypeSet) NumMethods() int { return len(s.methods) }
+
+// Method returns the i'th method of type set s for 0 <= i < s.NumMethods().
+// The methods are ordered by their unique ID.
+func (s *_TypeSet) Method(i int) *Func { return s.methods[i] }
+
+// LookupMethod returns the index of and method with matching package and name, or (-1, nil).
+func (s *_TypeSet) LookupMethod(pkg *Package, name string, foldCase bool) (int, *Func) {
+ return lookupMethod(s.methods, pkg, name, foldCase)
+}
+
+func (s *_TypeSet) String() string {
+ switch {
+ case s.IsEmpty():
+ return "∅"
+ case s.IsAll():
+ return "𝓤"
+ }
+
+ hasMethods := len(s.methods) > 0
+ hasTerms := s.hasTerms()
+
+ var buf bytes.Buffer
+ buf.WriteByte('{')
+ if s.comparable {
+ buf.WriteString("comparable")
+ if hasMethods || hasTerms {
+ buf.WriteString("; ")
+ }
+ }
+ for i, m := range s.methods {
+ if i > 0 {
+ buf.WriteString("; ")
+ }
+ buf.WriteString(m.String())
+ }
+ if hasMethods && hasTerms {
+ buf.WriteString("; ")
+ }
+ if hasTerms {
+ buf.WriteString(s.terms.String())
+ }
+ buf.WriteString("}")
+ return buf.String()
+}
+
+// ----------------------------------------------------------------------------
+// Implementation
+
+// hasTerms reports whether the type set has specific type terms.
+func (s *_TypeSet) hasTerms() bool { return !s.terms.isEmpty() && !s.terms.isAll() }
+
+// subsetOf reports whether s1 ⊆ s2.
+func (s1 *_TypeSet) subsetOf(s2 *_TypeSet) bool { return s1.terms.subsetOf(s2.terms) }
+
+// TODO(gri) TypeSet.is and TypeSet.underIs should probably also go into termlist.go
+
+// is calls f with the specific type terms of s and reports whether
+// all calls to f returned true. If there are no specific terms, is
+// returns the result of f(nil).
+func (s *_TypeSet) is(f func(*term) bool) bool {
+ if !s.hasTerms() {
+ return f(nil)
+ }
+ for _, t := range s.terms {
+ assert(t.typ != nil)
+ if !f(t) {
+ return false
+ }
+ }
+ return true
+}
+
+// underIs calls f with the underlying types of the specific type terms
+// of s and reports whether all calls to f returned true. If there are
+// no specific terms, underIs returns the result of f(nil).
+func (s *_TypeSet) underIs(f func(Type) bool) bool {
+ if !s.hasTerms() {
+ return f(nil)
+ }
+ for _, t := range s.terms {
+ assert(t.typ != nil)
+ // x == under(x) for ~x terms
+ u := t.typ
+ if !t.tilde {
+ u = under(u)
+ }
+ if debug {
+ assert(Identical(u, under(u)))
+ }
+ if !f(u) {
+ return false
+ }
+ }
+ return true
+}
+
+// topTypeSet may be used as type set for the empty interface.
+var topTypeSet = _TypeSet{terms: allTermlist}
+
+// computeInterfaceTypeSet may be called with check == nil.
+func computeInterfaceTypeSet(check *Checker, pos syntax.Pos, ityp *Interface) *_TypeSet {
+ if ityp.tset != nil {
+ return ityp.tset
+ }
+
+ // If the interface is not fully set up yet, the type set will
+ // not be complete, which may lead to errors when using the
+ // type set (e.g. missing method). Don't compute a partial type
+ // set (and don't store it!), so that we still compute the full
+ // type set eventually. Instead, return the top type set and
+ // let any follow-on errors play out.
+ if !ityp.complete {
+ return &topTypeSet
+ }
+
+ if check != nil && check.conf.Trace {
+ // Types don't generally have position information.
+ // If we don't have a valid pos provided, try to use
+ // one close enough.
+ if !pos.IsKnown() && len(ityp.methods) > 0 {
+ pos = ityp.methods[0].pos
+ }
+
+ check.trace(pos, "type set for %s", ityp)
+ check.indent++
+ defer func() {
+ check.indent--
+ check.trace(pos, "=> %s ", ityp.typeSet())
+ }()
+ }
+
+ // An infinitely expanding interface (due to a cycle) is detected
+ // elsewhere (Checker.validType), so here we simply assume we only
+ // have valid interfaces. Mark the interface as complete to avoid
+ // infinite recursion if the validType check occurs later for some
+ // reason.
+ ityp.tset = &_TypeSet{terms: allTermlist} // TODO(gri) is this sufficient?
+
+ var unionSets map[*Union]*_TypeSet
+ if check != nil {
+ if check.unionTypeSets == nil {
+ check.unionTypeSets = make(map[*Union]*_TypeSet)
+ }
+ unionSets = check.unionTypeSets
+ } else {
+ unionSets = make(map[*Union]*_TypeSet)
+ }
+
+ // Methods of embedded interfaces are collected unchanged; i.e., the identity
+ // of a method I.m's Func Object of an interface I is the same as that of
+ // the method m in an interface that embeds interface I. On the other hand,
+ // if a method is embedded via multiple overlapping embedded interfaces, we
+ // don't provide a guarantee which "original m" got chosen for the embedding
+ // interface. See also issue #34421.
+ //
+ // If we don't care to provide this identity guarantee anymore, instead of
+ // reusing the original method in embeddings, we can clone the method's Func
+ // Object and give it the position of a corresponding embedded interface. Then
+ // we can get rid of the mpos map below and simply use the cloned method's
+ // position.
+
+ var todo []*Func
+ var seen objset
+ var allMethods []*Func
+ mpos := make(map[*Func]syntax.Pos) // method specification or method embedding position, for good error messages
+ addMethod := func(pos syntax.Pos, m *Func, explicit bool) {
+ switch other := seen.insert(m); {
+ case other == nil:
+ allMethods = append(allMethods, m)
+ mpos[m] = pos
+ case explicit:
+ if check == nil {
+ panic(fmt.Sprintf("%s: duplicate method %s", m.pos, m.name))
+ }
+ // check != nil
+ var err error_
+ err.errorf(pos, "duplicate method %s", m.name)
+ err.errorf(mpos[other.(*Func)], "other declaration of %s", m.name)
+ check.report(&err)
+ default:
+ // We have a duplicate method name in an embedded (not explicitly declared) method.
+ // Check method signatures after all types are computed (issue #33656).
+ // If we're pre-go1.14 (overlapping embeddings are not permitted), report that
+ // error here as well (even though we could do it eagerly) because it's the same
+ // error message.
+ if check == nil {
+ // check method signatures after all locally embedded interfaces are computed
+ todo = append(todo, m, other.(*Func))
+ break
+ }
+ // check != nil
+ check.later(func() {
+ if !check.allowVersion(m.pkg, 1, 14) || !Identical(m.typ, other.Type()) {
+ var err error_
+ err.errorf(pos, "duplicate method %s", m.name)
+ err.errorf(mpos[other.(*Func)], "other declaration of %s", m.name)
+ check.report(&err)
+ }
+ })
+ }
+ }
+
+ for _, m := range ityp.methods {
+ addMethod(m.pos, m, true)
+ }
+
+ // collect embedded elements
+ allTerms := allTermlist
+ allComparable := false
+ for i, typ := range ityp.embeddeds {
+ // The embedding position is nil for imported interfaces
+ // and also for interface copies after substitution (but
+ // in that case we don't need to report errors again).
+ var pos syntax.Pos // embedding position
+ if ityp.embedPos != nil {
+ pos = (*ityp.embedPos)[i]
+ }
+ var comparable bool
+ var terms termlist
+ switch u := under(typ).(type) {
+ case *Interface:
+ // For now we don't permit type parameters as constraints.
+ assert(!isTypeParam(typ))
+ tset := computeInterfaceTypeSet(check, pos, u)
+ // If typ is local, an error was already reported where typ is specified/defined.
+ if check != nil && check.isImportedConstraint(typ) && !check.allowVersion(check.pkg, 1, 18) {
+ check.versionErrorf(pos, "go1.18", "embedding constraint interface %s", typ)
+ continue
+ }
+ comparable = tset.comparable
+ for _, m := range tset.methods {
+ addMethod(pos, m, false) // use embedding position pos rather than m.pos
+ }
+ terms = tset.terms
+ case *Union:
+ if check != nil && !check.allowVersion(check.pkg, 1, 18) {
+ check.versionErrorf(pos, "go1.18", "embedding interface element %s", u)
+ continue
+ }
+ tset := computeUnionTypeSet(check, unionSets, pos, u)
+ if tset == &invalidTypeSet {
+ continue // ignore invalid unions
+ }
+ assert(!tset.comparable)
+ assert(len(tset.methods) == 0)
+ terms = tset.terms
+ default:
+ if u == Typ[Invalid] {
+ continue
+ }
+ if check != nil && !check.allowVersion(check.pkg, 1, 18) {
+ check.versionErrorf(pos, "go1.18", "embedding non-interface type %s", typ)
+ continue
+ }
+ terms = termlist{{false, typ}}
+ }
+
+ // The type set of an interface is the intersection of the type sets of all its elements.
+ // Due to language restrictions, only embedded interfaces can add methods, they are handled
+ // separately. Here we only need to intersect the term lists and comparable bits.
+ allTerms, allComparable = intersectTermLists(allTerms, allComparable, terms, comparable)
+ }
+ ityp.embedPos = nil // not needed anymore (errors have been reported)
+
+ // process todo's (this only happens if check == nil)
+ for i := 0; i < len(todo); i += 2 {
+ m := todo[i]
+ other := todo[i+1]
+ if !Identical(m.typ, other.typ) {
+ panic(fmt.Sprintf("%s: duplicate method %s", m.pos, m.name))
+ }
+ }
+
+ ityp.tset.comparable = allComparable
+ if len(allMethods) != 0 {
+ sortMethods(allMethods)
+ ityp.tset.methods = allMethods
+ }
+ ityp.tset.terms = allTerms
+
+ return ityp.tset
+}
+
+// TODO(gri) The intersectTermLists function belongs to the termlist implementation.
+// The comparable type set may also be best represented as a term (using
+// a special type).
+
+// intersectTermLists computes the intersection of two term lists and respective comparable bits.
+// xcomp, ycomp are valid only if xterms.isAll() and yterms.isAll() respectively.
+func intersectTermLists(xterms termlist, xcomp bool, yterms termlist, ycomp bool) (termlist, bool) {
+ terms := xterms.intersect(yterms)
+ // If one of xterms or yterms is marked as comparable,
+ // the result must only include comparable types.
+ comp := xcomp || ycomp
+ if comp && !terms.isAll() {
+ // only keep comparable terms
+ i := 0
+ for _, t := range terms {
+ assert(t.typ != nil)
+ if Comparable(t.typ) {
+ terms[i] = t
+ i++
+ }
+ }
+ terms = terms[:i]
+ if !terms.isAll() {
+ comp = false
+ }
+ }
+ assert(!comp || terms.isAll()) // comparable invariant
+ return terms, comp
+}
+
+func sortMethods(list []*Func) {
+ sort.Sort(byUniqueMethodName(list))
+}
+
+func assertSortedMethods(list []*Func) {
+ if !debug {
+ panic("assertSortedMethods called outside debug mode")
+ }
+ if !sort.IsSorted(byUniqueMethodName(list)) {
+ panic("methods not sorted")
+ }
+}
+
+// byUniqueMethodName method lists can be sorted by their unique method names.
+type byUniqueMethodName []*Func
+
+func (a byUniqueMethodName) Len() int { return len(a) }
+func (a byUniqueMethodName) Less(i, j int) bool { return a[i].less(&a[j].object) }
+func (a byUniqueMethodName) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
+
+// invalidTypeSet is a singleton type set to signal an invalid type set
+// due to an error. It's also a valid empty type set, so consumers of
+// type sets may choose to ignore it.
+var invalidTypeSet _TypeSet
+
+// computeUnionTypeSet may be called with check == nil.
+// The result is &invalidTypeSet if the union overflows.
+func computeUnionTypeSet(check *Checker, unionSets map[*Union]*_TypeSet, pos syntax.Pos, utyp *Union) *_TypeSet {
+ if tset, _ := unionSets[utyp]; tset != nil {
+ return tset
+ }
+
+ // avoid infinite recursion (see also computeInterfaceTypeSet)
+ unionSets[utyp] = new(_TypeSet)
+
+ var allTerms termlist
+ for _, t := range utyp.terms {
+ var terms termlist
+ u := under(t.typ)
+ if ui, _ := u.(*Interface); ui != nil {
+ // For now we don't permit type parameters as constraints.
+ assert(!isTypeParam(t.typ))
+ terms = computeInterfaceTypeSet(check, pos, ui).terms
+ } else if u == Typ[Invalid] {
+ continue
+ } else {
+ if t.tilde && !Identical(t.typ, u) {
+ // There is no underlying type which is t.typ.
+ // The corresponding type set is empty.
+ t = nil // ∅ term
+ }
+ terms = termlist{(*term)(t)}
+ }
+ // The type set of a union expression is the union
+ // of the type sets of each term.
+ allTerms = allTerms.union(terms)
+ if len(allTerms) > maxTermCount {
+ if check != nil {
+ check.errorf(pos, "cannot handle more than %d union terms (implementation limitation)", maxTermCount)
+ }
+ unionSets[utyp] = &invalidTypeSet
+ return unionSets[utyp]
+ }
+ }
+ unionSets[utyp].terms = allTerms
+
+ return unionSets[utyp]
+}
diff --git a/src/cmd/compile/internal/types2/typeset_test.go b/src/cmd/compile/internal/types2/typeset_test.go
new file mode 100644
index 0000000..68e5d8a
--- /dev/null
+++ b/src/cmd/compile/internal/types2/typeset_test.go
@@ -0,0 +1,80 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+import (
+ "cmd/compile/internal/syntax"
+ "strings"
+ "testing"
+)
+
+func TestInvalidTypeSet(t *testing.T) {
+ if !invalidTypeSet.IsEmpty() {
+ t.Error("invalidTypeSet is not empty")
+ }
+}
+
+func TestTypeSetString(t *testing.T) {
+ for body, want := range map[string]string{
+ "{}": "𝓤",
+ "{int}": "{int}",
+ "{~int}": "{~int}",
+ "{int|string}": "{int ∪ string}",
+ "{int; string}": "∅",
+
+ "{comparable}": "{comparable}",
+ "{comparable; int}": "{int}",
+ "{~int; comparable}": "{~int}",
+ "{int|string; comparable}": "{int ∪ string}",
+ "{comparable; int; string}": "∅",
+
+ "{m()}": "{func (p.T).m()}",
+ "{m1(); m2() int }": "{func (p.T).m1(); func (p.T).m2() int}",
+ "{error}": "{func (error).Error() string}",
+ "{m(); comparable}": "{comparable; func (p.T).m()}",
+ "{m1(); comparable; m2() int }": "{comparable; func (p.T).m1(); func (p.T).m2() int}",
+ "{comparable; error}": "{comparable; func (error).Error() string}",
+
+ "{m(); comparable; int|float32|string}": "{func (p.T).m(); int ∪ float32 ∪ string}",
+ "{m1(); int; m2(); comparable }": "{func (p.T).m1(); func (p.T).m2(); int}",
+
+ "{E}; type E interface{}": "𝓤",
+ "{E}; type E interface{int;string}": "∅",
+ "{E}; type E interface{comparable}": "{comparable}",
+ } {
+ // parse
+ errh := func(error) {} // dummy error handler so that parsing continues in presence of errors
+ src := "package p; type T interface" + body
+ file, err := syntax.Parse(nil, strings.NewReader(src), errh, nil, syntax.AllowGenerics)
+ if err != nil {
+ t.Fatalf("%s: %v (invalid test case)", body, err)
+ }
+
+ // type check
+ var conf Config
+ pkg, err := conf.Check(file.PkgName.Value, []*syntax.File{file}, nil)
+ if err != nil {
+ t.Fatalf("%s: %v (invalid test case)", body, err)
+ }
+
+ // lookup T
+ obj := pkg.scope.Lookup("T")
+ if obj == nil {
+ t.Fatalf("%s: T not found (invalid test case)", body)
+ }
+ T, ok := under(obj.Type()).(*Interface)
+ if !ok {
+ t.Fatalf("%s: %v is not an interface (invalid test case)", body, obj)
+ }
+
+ // verify test case
+ got := T.typeSet().String()
+ if got != want {
+ t.Errorf("%s: got %s; want %s", body, got, want)
+ }
+ }
+}
+
+// TODO(gri) add more tests
diff --git a/src/cmd/compile/internal/types2/typestring.go b/src/cmd/compile/internal/types2/typestring.go
new file mode 100644
index 0000000..ada0529
--- /dev/null
+++ b/src/cmd/compile/internal/types2/typestring.go
@@ -0,0 +1,483 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements printing of types.
+
+package types2
+
+import (
+ "bytes"
+ "fmt"
+ "sort"
+ "strconv"
+ "strings"
+ "unicode/utf8"
+)
+
+// A Qualifier controls how named package-level objects are printed in
+// calls to TypeString, ObjectString, and SelectionString.
+//
+// These three formatting routines call the Qualifier for each
+// package-level object O, and if the Qualifier returns a non-empty
+// string p, the object is printed in the form p.O.
+// If it returns an empty string, only the object name O is printed.
+//
+// Using a nil Qualifier is equivalent to using (*Package).Path: the
+// object is qualified by the import path, e.g., "encoding/json.Marshal".
+//
+type Qualifier func(*Package) string
+
+// RelativeTo returns a Qualifier that fully qualifies members of
+// all packages other than pkg.
+func RelativeTo(pkg *Package) Qualifier {
+ if pkg == nil {
+ return nil
+ }
+ return func(other *Package) string {
+ if pkg == other {
+ return "" // same package; unqualified
+ }
+ return other.Path()
+ }
+}
+
+// TypeString returns the string representation of typ.
+// The Qualifier controls the printing of
+// package-level objects, and may be nil.
+func TypeString(typ Type, qf Qualifier) string {
+ return typeString(typ, qf, false)
+}
+
+func typeString(typ Type, qf Qualifier, debug bool) string {
+ var buf bytes.Buffer
+ w := newTypeWriter(&buf, qf)
+ w.debug = debug
+ w.typ(typ)
+ return buf.String()
+}
+
+// WriteType writes the string representation of typ to buf.
+// The Qualifier controls the printing of
+// package-level objects, and may be nil.
+func WriteType(buf *bytes.Buffer, typ Type, qf Qualifier) {
+ newTypeWriter(buf, qf).typ(typ)
+}
+
+// WriteSignature writes the representation of the signature sig to buf,
+// without a leading "func" keyword.
+// The Qualifier controls the printing of
+// package-level objects, and may be nil.
+func WriteSignature(buf *bytes.Buffer, sig *Signature, qf Qualifier) {
+ newTypeWriter(buf, qf).signature(sig)
+}
+
+type typeWriter struct {
+ buf *bytes.Buffer
+ seen map[Type]bool
+ qf Qualifier
+ ctxt *Context // if non-nil, we are type hashing
+ tparams *TypeParamList // local type parameters
+ debug bool // if true, write debug annotations
+}
+
+func newTypeWriter(buf *bytes.Buffer, qf Qualifier) *typeWriter {
+ return &typeWriter{buf, make(map[Type]bool), qf, nil, nil, false}
+}
+
+func newTypeHasher(buf *bytes.Buffer, ctxt *Context) *typeWriter {
+ assert(ctxt != nil)
+ return &typeWriter{buf, make(map[Type]bool), nil, ctxt, nil, false}
+}
+
+func (w *typeWriter) byte(b byte) {
+ if w.ctxt != nil {
+ if b == ' ' {
+ b = '#'
+ }
+ w.buf.WriteByte(b)
+ return
+ }
+ w.buf.WriteByte(b)
+ if b == ',' || b == ';' {
+ w.buf.WriteByte(' ')
+ }
+}
+
+func (w *typeWriter) string(s string) {
+ w.buf.WriteString(s)
+}
+
+func (w *typeWriter) error(msg string) {
+ if w.ctxt != nil {
+ panic(msg)
+ }
+ w.buf.WriteString("<" + msg + ">")
+}
+
+func (w *typeWriter) typ(typ Type) {
+ if w.seen[typ] {
+ w.error("cycle to " + goTypeName(typ))
+ return
+ }
+ w.seen[typ] = true
+ defer delete(w.seen, typ)
+
+ switch t := typ.(type) {
+ case nil:
+ w.error("nil")
+
+ case *Basic:
+ // exported basic types go into package unsafe
+ // (currently this is just unsafe.Pointer)
+ if isExported(t.name) {
+ if obj, _ := Unsafe.scope.Lookup(t.name).(*TypeName); obj != nil {
+ w.typeName(obj)
+ break
+ }
+ }
+ w.string(t.name)
+
+ case *Array:
+ w.byte('[')
+ w.string(strconv.FormatInt(t.len, 10))
+ w.byte(']')
+ w.typ(t.elem)
+
+ case *Slice:
+ w.string("[]")
+ w.typ(t.elem)
+
+ case *Struct:
+ w.string("struct{")
+ for i, f := range t.fields {
+ if i > 0 {
+ w.byte(';')
+ }
+ // This doesn't do the right thing for embedded type
+ // aliases where we should print the alias name, not
+ // the aliased type (see issue #44410).
+ if !f.embedded {
+ w.string(f.name)
+ w.byte(' ')
+ }
+ w.typ(f.typ)
+ if tag := t.Tag(i); tag != "" {
+ w.byte(' ')
+ // TODO(gri) If tag contains blanks, replacing them with '#'
+ // in Context.TypeHash may produce another tag
+ // accidentally.
+ w.string(strconv.Quote(tag))
+ }
+ }
+ w.byte('}')
+
+ case *Pointer:
+ w.byte('*')
+ w.typ(t.base)
+
+ case *Tuple:
+ w.tuple(t, false)
+
+ case *Signature:
+ w.string("func")
+ w.signature(t)
+
+ case *Union:
+ // Unions only appear as (syntactic) embedded elements
+ // in interfaces and syntactically cannot be empty.
+ if t.Len() == 0 {
+ w.error("empty union")
+ break
+ }
+ for i, t := range t.terms {
+ if i > 0 {
+ w.byte('|')
+ }
+ if t.tilde {
+ w.byte('~')
+ }
+ w.typ(t.typ)
+ }
+
+ case *Interface:
+ if w.ctxt == nil {
+ if t == universeAny.Type() {
+ // When not hashing, we can try to improve type strings by writing "any"
+ // for a type that is pointer-identical to universeAny. This logic should
+ // be deprecated by more robust handling for aliases.
+ w.string("any")
+ break
+ }
+ if t == universeComparable.Type().(*Named).underlying {
+ w.string("interface{comparable}")
+ break
+ }
+ }
+ if t.implicit {
+ if len(t.methods) == 0 && len(t.embeddeds) == 1 {
+ w.typ(t.embeddeds[0])
+ break
+ }
+ // Something's wrong with the implicit interface.
+ // Print it as such and continue.
+ w.string("/* implicit */ ")
+ }
+ w.string("interface{")
+ first := true
+ if w.ctxt != nil {
+ w.typeSet(t.typeSet())
+ } else {
+ for _, m := range t.methods {
+ if !first {
+ w.byte(';')
+ }
+ first = false
+ w.string(m.name)
+ w.signature(m.typ.(*Signature))
+ }
+ for _, typ := range t.embeddeds {
+ if !first {
+ w.byte(';')
+ }
+ first = false
+ w.typ(typ)
+ }
+ }
+ w.byte('}')
+
+ case *Map:
+ w.string("map[")
+ w.typ(t.key)
+ w.byte(']')
+ w.typ(t.elem)
+
+ case *Chan:
+ var s string
+ var parens bool
+ switch t.dir {
+ case SendRecv:
+ s = "chan "
+ // chan (<-chan T) requires parentheses
+ if c, _ := t.elem.(*Chan); c != nil && c.dir == RecvOnly {
+ parens = true
+ }
+ case SendOnly:
+ s = "chan<- "
+ case RecvOnly:
+ s = "<-chan "
+ default:
+ w.error("unknown channel direction")
+ }
+ w.string(s)
+ if parens {
+ w.byte('(')
+ }
+ w.typ(t.elem)
+ if parens {
+ w.byte(')')
+ }
+
+ case *Named:
+ // If hashing, write a unique prefix for t to represent its identity, since
+ // named type identity is pointer identity.
+ if w.ctxt != nil {
+ w.string(strconv.Itoa(w.ctxt.getID(t)))
+ }
+ w.typeName(t.obj) // when hashing written for readability of the hash only
+ if t.targs != nil {
+ // instantiated type
+ w.typeList(t.targs.list())
+ } else if w.ctxt == nil && t.TypeParams().Len() != 0 { // For type hashing, don't need to format the TypeParams
+ // parameterized type
+ w.tParamList(t.TypeParams().list())
+ }
+
+ case *TypeParam:
+ if t.obj == nil {
+ w.error("unnamed type parameter")
+ break
+ }
+ if i := tparamIndex(w.tparams.list(), t); i >= 0 {
+ // The names of type parameters that are declared by the type being
+ // hashed are not part of the type identity. Replace them with a
+ // placeholder indicating their index.
+ w.string(fmt.Sprintf("$%d", i))
+ } else {
+ w.string(t.obj.name)
+ if w.debug || w.ctxt != nil {
+ w.string(subscript(t.id))
+ }
+ }
+
+ default:
+ // For externally defined implementations of Type.
+ // Note: In this case cycles won't be caught.
+ w.string(t.String())
+ }
+}
+
+// typeSet writes a canonical hash for an interface type set.
+func (w *typeWriter) typeSet(s *_TypeSet) {
+ assert(w.ctxt != nil)
+ first := true
+ for _, m := range s.methods {
+ if !first {
+ w.byte(';')
+ }
+ first = false
+ w.string(m.name)
+ w.signature(m.typ.(*Signature))
+ }
+ switch {
+ case s.terms.isAll():
+ // nothing to do
+ case s.terms.isEmpty():
+ w.string(s.terms.String())
+ default:
+ var termHashes []string
+ for _, term := range s.terms {
+ // terms are not canonically sorted, so we sort their hashes instead.
+ var buf bytes.Buffer
+ if term.tilde {
+ buf.WriteByte('~')
+ }
+ newTypeHasher(&buf, w.ctxt).typ(term.typ)
+ termHashes = append(termHashes, buf.String())
+ }
+ sort.Strings(termHashes)
+ if !first {
+ w.byte(';')
+ }
+ w.string(strings.Join(termHashes, "|"))
+ }
+}
+
+func (w *typeWriter) typeList(list []Type) {
+ w.byte('[')
+ for i, typ := range list {
+ if i > 0 {
+ w.byte(',')
+ }
+ w.typ(typ)
+ }
+ w.byte(']')
+}
+
+func (w *typeWriter) tParamList(list []*TypeParam) {
+ w.byte('[')
+ var prev Type
+ for i, tpar := range list {
+ // Determine the type parameter and its constraint.
+ // list is expected to hold type parameter names,
+ // but don't crash if that's not the case.
+ if tpar == nil {
+ w.error("nil type parameter")
+ continue
+ }
+ if i > 0 {
+ if tpar.bound != prev {
+ // bound changed - write previous one before advancing
+ w.byte(' ')
+ w.typ(prev)
+ }
+ w.byte(',')
+ }
+ prev = tpar.bound
+ w.typ(tpar)
+ }
+ if prev != nil {
+ w.byte(' ')
+ w.typ(prev)
+ }
+ w.byte(']')
+}
+
+func (w *typeWriter) typeName(obj *TypeName) {
+ if obj.pkg != nil {
+ writePackage(w.buf, obj.pkg, w.qf)
+ }
+ w.string(obj.name)
+}
+
+func (w *typeWriter) tuple(tup *Tuple, variadic bool) {
+ w.byte('(')
+ if tup != nil {
+ for i, v := range tup.vars {
+ if i > 0 {
+ w.byte(',')
+ }
+ // parameter names are ignored for type identity and thus type hashes
+ if w.ctxt == nil && v.name != "" {
+ w.string(v.name)
+ w.byte(' ')
+ }
+ typ := v.typ
+ if variadic && i == len(tup.vars)-1 {
+ if s, ok := typ.(*Slice); ok {
+ w.string("...")
+ typ = s.elem
+ } else {
+ // special case:
+ // append(s, "foo"...) leads to signature func([]byte, string...)
+ if t, _ := under(typ).(*Basic); t == nil || t.kind != String {
+ w.error("expected string type")
+ continue
+ }
+ w.typ(typ)
+ w.string("...")
+ continue
+ }
+ }
+ w.typ(typ)
+ }
+ }
+ w.byte(')')
+}
+
+func (w *typeWriter) signature(sig *Signature) {
+ if sig.TypeParams().Len() != 0 {
+ if w.ctxt != nil {
+ assert(w.tparams == nil)
+ w.tparams = sig.TypeParams()
+ defer func() {
+ w.tparams = nil
+ }()
+ }
+ w.tParamList(sig.TypeParams().list())
+ }
+
+ w.tuple(sig.params, sig.variadic)
+
+ n := sig.results.Len()
+ if n == 0 {
+ // no result
+ return
+ }
+
+ w.byte(' ')
+ if n == 1 && (w.ctxt != nil || sig.results.vars[0].name == "") {
+ // single unnamed result (if type hashing, name must be ignored)
+ w.typ(sig.results.vars[0].typ)
+ return
+ }
+
+ // multiple or named result(s)
+ w.tuple(sig.results, false)
+}
+
+// subscript returns the decimal (utf8) representation of x using subscript digits.
+func subscript(x uint64) string {
+ const w = len("₀") // all digits 0...9 have the same utf8 width
+ var buf [32 * w]byte
+ i := len(buf)
+ for {
+ i -= w
+ utf8.EncodeRune(buf[i:], '₀'+rune(x%10)) // '₀' == U+2080
+ x /= 10
+ if x == 0 {
+ break
+ }
+ }
+ return string(buf[i:])
+}
diff --git a/src/cmd/compile/internal/types2/typestring_test.go b/src/cmd/compile/internal/types2/typestring_test.go
new file mode 100644
index 0000000..c0689e8
--- /dev/null
+++ b/src/cmd/compile/internal/types2/typestring_test.go
@@ -0,0 +1,176 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2_test
+
+import (
+ "internal/testenv"
+ "testing"
+
+ "cmd/compile/internal/syntax"
+ . "cmd/compile/internal/types2"
+)
+
+const filename = "<src>"
+
+func makePkg(src string) (*Package, error) {
+ file, err := parseSrc(filename, src)
+ if err != nil {
+ return nil, err
+ }
+ // use the package name as package path
+ conf := Config{Importer: defaultImporter()}
+ return conf.Check(file.PkgName.Value, []*syntax.File{file}, nil)
+}
+
+type testEntry struct {
+ src, str string
+}
+
+// dup returns a testEntry where both src and str are the same.
+func dup(s string) testEntry {
+ return testEntry{s, s}
+}
+
+// types that don't depend on any other type declarations
+var independentTestTypes = []testEntry{
+ // basic types
+ dup("int"),
+ dup("float32"),
+ dup("string"),
+
+ // arrays
+ dup("[10]int"),
+
+ // slices
+ dup("[]int"),
+ dup("[][]int"),
+
+ // structs
+ dup("struct{}"),
+ dup("struct{x int}"),
+ {`struct {
+ x, y int
+ z float32 "foo"
+ }`, `struct{x int; y int; z float32 "foo"}`},
+ {`struct {
+ string
+ elems []complex128
+ }`, `struct{string; elems []complex128}`},
+
+ // pointers
+ dup("*int"),
+ dup("***struct{}"),
+ dup("*struct{a int; b float32}"),
+
+ // functions
+ dup("func()"),
+ dup("func(x int)"),
+ {"func(x, y int)", "func(x int, y int)"},
+ {"func(x, y int, z string)", "func(x int, y int, z string)"},
+ dup("func(int)"),
+ {"func(int, string, byte)", "func(int, string, byte)"},
+
+ dup("func() int"),
+ {"func() (string)", "func() string"},
+ dup("func() (u int)"),
+ {"func() (u, v int, w string)", "func() (u int, v int, w string)"},
+
+ dup("func(int) string"),
+ dup("func(x int) string"),
+ dup("func(x int) (u string)"),
+ {"func(x, y int) (u string)", "func(x int, y int) (u string)"},
+
+ dup("func(...int) string"),
+ dup("func(x ...int) string"),
+ dup("func(x ...int) (u string)"),
+ {"func(x int, y ...int) (u string)", "func(x int, y ...int) (u string)"},
+
+ // interfaces
+ dup("interface{}"),
+ dup("interface{m()}"),
+ dup(`interface{String() string; m(int) float32}`),
+ dup("interface{int|float32|complex128}"),
+ dup("interface{int|~float32|~complex128}"),
+ dup("any"),
+ dup("interface{comparable}"),
+ {"comparable", "interface{comparable}"},
+ {"error", "interface{Error() string}"},
+
+ // maps
+ dup("map[string]int"),
+ {"map[struct{x, y int}][]byte", "map[struct{x int; y int}][]byte"},
+
+ // channels
+ dup("chan<- chan int"),
+ dup("chan<- <-chan int"),
+ dup("<-chan <-chan int"),
+ dup("chan (<-chan int)"),
+ dup("chan<- func()"),
+ dup("<-chan []func() int"),
+}
+
+// types that depend on other type declarations (src in TestTypes)
+var dependentTestTypes = []testEntry{
+ // interfaces
+ dup(`interface{io.Reader; io.Writer}`),
+ dup(`interface{m() int; io.Writer}`),
+ {`interface{m() interface{T}}`, `interface{m() interface{generic_p.T}}`},
+}
+
+func TestTypeString(t *testing.T) {
+ testenv.MustHaveGoBuild(t)
+
+ var tests []testEntry
+ tests = append(tests, independentTestTypes...)
+ tests = append(tests, dependentTestTypes...)
+
+ for _, test := range tests {
+ src := `package generic_p; import "io"; type _ io.Writer; type T ` + test.src
+ pkg, err := makePkg(src)
+ if err != nil {
+ t.Errorf("%s: %s", src, err)
+ continue
+ }
+ obj := pkg.Scope().Lookup("T")
+ if obj == nil {
+ t.Errorf("%s: T not found", test.src)
+ continue
+ }
+ typ := obj.Type().Underlying()
+ if got := typ.String(); got != test.str {
+ t.Errorf("%s: got %s, want %s", test.src, got, test.str)
+ }
+ }
+}
+
+func TestQualifiedTypeString(t *testing.T) {
+ p, _ := pkgFor("p.go", "package p; type T int", nil)
+ q, _ := pkgFor("q.go", "package q", nil)
+
+ pT := p.Scope().Lookup("T").Type()
+ for _, test := range []struct {
+ typ Type
+ this *Package
+ want string
+ }{
+ {nil, nil, "<nil>"},
+ {pT, nil, "p.T"},
+ {pT, p, "T"},
+ {pT, q, "p.T"},
+ {NewPointer(pT), p, "*T"},
+ {NewPointer(pT), q, "*p.T"},
+ } {
+ qualifier := func(pkg *Package) string {
+ if pkg != test.this {
+ return pkg.Name()
+ }
+ return ""
+ }
+ if got := TypeString(test.typ, qualifier); got != test.want {
+ t.Errorf("TypeString(%s, %s) = %s, want %s",
+ test.this, test.typ, got, test.want)
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/types2/typeterm.go b/src/cmd/compile/internal/types2/typeterm.go
new file mode 100644
index 0000000..1d7223f
--- /dev/null
+++ b/src/cmd/compile/internal/types2/typeterm.go
@@ -0,0 +1,166 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+// A term describes elementary type sets:
+//
+// ∅: (*term)(nil) == ∅ // set of no types (empty set)
+// 𝓤: &term{} == 𝓤 // set of all types (𝓤niverse)
+// T: &term{false, T} == {T} // set of type T
+// ~t: &term{true, t} == {t' | under(t') == t} // set of types with underlying type t
+//
+type term struct {
+ tilde bool // valid if typ != nil
+ typ Type
+}
+
+func (x *term) String() string {
+ switch {
+ case x == nil:
+ return "∅"
+ case x.typ == nil:
+ return "𝓤"
+ case x.tilde:
+ return "~" + x.typ.String()
+ default:
+ return x.typ.String()
+ }
+}
+
+// equal reports whether x and y represent the same type set.
+func (x *term) equal(y *term) bool {
+ // easy cases
+ switch {
+ case x == nil || y == nil:
+ return x == y
+ case x.typ == nil || y.typ == nil:
+ return x.typ == y.typ
+ }
+ // ∅ ⊂ x, y ⊂ 𝓤
+
+ return x.tilde == y.tilde && Identical(x.typ, y.typ)
+}
+
+// union returns the union x ∪ y: zero, one, or two non-nil terms.
+func (x *term) union(y *term) (_, _ *term) {
+ // easy cases
+ switch {
+ case x == nil && y == nil:
+ return nil, nil // ∅ ∪ ∅ == ∅
+ case x == nil:
+ return y, nil // ∅ ∪ y == y
+ case y == nil:
+ return x, nil // x ∪ ∅ == x
+ case x.typ == nil:
+ return x, nil // 𝓤 ∪ y == 𝓤
+ case y.typ == nil:
+ return y, nil // x ∪ 𝓤 == 𝓤
+ }
+ // ∅ ⊂ x, y ⊂ 𝓤
+
+ if x.disjoint(y) {
+ return x, y // x ∪ y == (x, y) if x ∩ y == ∅
+ }
+ // x.typ == y.typ
+
+ // ~t ∪ ~t == ~t
+ // ~t ∪ T == ~t
+ // T ∪ ~t == ~t
+ // T ∪ T == T
+ if x.tilde || !y.tilde {
+ return x, nil
+ }
+ return y, nil
+}
+
+// intersect returns the intersection x ∩ y.
+func (x *term) intersect(y *term) *term {
+ // easy cases
+ switch {
+ case x == nil || y == nil:
+ return nil // ∅ ∩ y == ∅ and ∩ ∅ == ∅
+ case x.typ == nil:
+ return y // 𝓤 ∩ y == y
+ case y.typ == nil:
+ return x // x ∩ 𝓤 == x
+ }
+ // ∅ ⊂ x, y ⊂ 𝓤
+
+ if x.disjoint(y) {
+ return nil // x ∩ y == ∅ if x ∩ y == ∅
+ }
+ // x.typ == y.typ
+
+ // ~t ∩ ~t == ~t
+ // ~t ∩ T == T
+ // T ∩ ~t == T
+ // T ∩ T == T
+ if !x.tilde || y.tilde {
+ return x
+ }
+ return y
+}
+
+// includes reports whether t ∈ x.
+func (x *term) includes(t Type) bool {
+ // easy cases
+ switch {
+ case x == nil:
+ return false // t ∈ ∅ == false
+ case x.typ == nil:
+ return true // t ∈ 𝓤 == true
+ }
+ // ∅ ⊂ x ⊂ 𝓤
+
+ u := t
+ if x.tilde {
+ u = under(u)
+ }
+ return Identical(x.typ, u)
+}
+
+// subsetOf reports whether x ⊆ y.
+func (x *term) subsetOf(y *term) bool {
+ // easy cases
+ switch {
+ case x == nil:
+ return true // ∅ ⊆ y == true
+ case y == nil:
+ return false // x ⊆ ∅ == false since x != ∅
+ case y.typ == nil:
+ return true // x ⊆ 𝓤 == true
+ case x.typ == nil:
+ return false // 𝓤 ⊆ y == false since y != 𝓤
+ }
+ // ∅ ⊂ x, y ⊂ 𝓤
+
+ if x.disjoint(y) {
+ return false // x ⊆ y == false if x ∩ y == ∅
+ }
+ // x.typ == y.typ
+
+ // ~t ⊆ ~t == true
+ // ~t ⊆ T == false
+ // T ⊆ ~t == true
+ // T ⊆ T == true
+ return !x.tilde || y.tilde
+}
+
+// disjoint reports whether x ∩ y == ∅.
+// x.typ and y.typ must not be nil.
+func (x *term) disjoint(y *term) bool {
+ if debug && (x.typ == nil || y.typ == nil) {
+ panic("invalid argument(s)")
+ }
+ ux := x.typ
+ if y.tilde {
+ ux = under(ux)
+ }
+ uy := y.typ
+ if x.tilde {
+ uy = under(uy)
+ }
+ return !Identical(ux, uy)
+}
diff --git a/src/cmd/compile/internal/types2/typeterm_test.go b/src/cmd/compile/internal/types2/typeterm_test.go
new file mode 100644
index 0000000..6d9c8db
--- /dev/null
+++ b/src/cmd/compile/internal/types2/typeterm_test.go
@@ -0,0 +1,239 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+import (
+ "strings"
+ "testing"
+)
+
+var myInt = func() Type {
+ tname := NewTypeName(nopos, nil, "myInt", nil)
+ return NewNamed(tname, Typ[Int], nil)
+}()
+
+var testTerms = map[string]*term{
+ "∅": nil,
+ "𝓤": {},
+ "int": {false, Typ[Int]},
+ "~int": {true, Typ[Int]},
+ "string": {false, Typ[String]},
+ "~string": {true, Typ[String]},
+ "myInt": {false, myInt},
+}
+
+func TestTermString(t *testing.T) {
+ for want, x := range testTerms {
+ if got := x.String(); got != want {
+ t.Errorf("%v.String() == %v; want %v", x, got, want)
+ }
+ }
+}
+
+func split(s string, n int) []string {
+ r := strings.Split(s, " ")
+ if len(r) != n {
+ panic("invalid test case: " + s)
+ }
+ return r
+}
+
+func testTerm(name string) *term {
+ r, ok := testTerms[name]
+ if !ok {
+ panic("invalid test argument: " + name)
+ }
+ return r
+}
+
+func TestTermEqual(t *testing.T) {
+ for _, test := range []string{
+ "∅ ∅ T",
+ "𝓤 𝓤 T",
+ "int int T",
+ "~int ~int T",
+ "myInt myInt T",
+ "∅ 𝓤 F",
+ "∅ int F",
+ "∅ ~int F",
+ "𝓤 int F",
+ "𝓤 ~int F",
+ "𝓤 myInt F",
+ "int ~int F",
+ "int myInt F",
+ "~int myInt F",
+ } {
+ args := split(test, 3)
+ x := testTerm(args[0])
+ y := testTerm(args[1])
+ want := args[2] == "T"
+ if got := x.equal(y); got != want {
+ t.Errorf("%v.equal(%v) = %v; want %v", x, y, got, want)
+ }
+ // equal is symmetric
+ x, y = y, x
+ if got := x.equal(y); got != want {
+ t.Errorf("%v.equal(%v) = %v; want %v", x, y, got, want)
+ }
+ }
+}
+
+func TestTermUnion(t *testing.T) {
+ for _, test := range []string{
+ "∅ ∅ ∅ ∅",
+ "∅ 𝓤 𝓤 ∅",
+ "∅ int int ∅",
+ "∅ ~int ~int ∅",
+ "∅ myInt myInt ∅",
+ "𝓤 𝓤 𝓤 ∅",
+ "𝓤 int 𝓤 ∅",
+ "𝓤 ~int 𝓤 ∅",
+ "𝓤 myInt 𝓤 ∅",
+ "int int int ∅",
+ "int ~int ~int ∅",
+ "int string int string",
+ "int ~string int ~string",
+ "int myInt int myInt",
+ "~int ~string ~int ~string",
+ "~int myInt ~int ∅",
+
+ // union is symmetric, but the result order isn't - repeat symmetric cases explicitly
+ "𝓤 ∅ 𝓤 ∅",
+ "int ∅ int ∅",
+ "~int ∅ ~int ∅",
+ "myInt ∅ myInt ∅",
+ "int 𝓤 𝓤 ∅",
+ "~int 𝓤 𝓤 ∅",
+ "myInt 𝓤 𝓤 ∅",
+ "~int int ~int ∅",
+ "string int string int",
+ "~string int ~string int",
+ "myInt int myInt int",
+ "~string ~int ~string ~int",
+ "myInt ~int ~int ∅",
+ } {
+ args := split(test, 4)
+ x := testTerm(args[0])
+ y := testTerm(args[1])
+ want1 := testTerm(args[2])
+ want2 := testTerm(args[3])
+ if got1, got2 := x.union(y); !got1.equal(want1) || !got2.equal(want2) {
+ t.Errorf("%v.union(%v) = %v, %v; want %v, %v", x, y, got1, got2, want1, want2)
+ }
+ }
+}
+
+func TestTermIntersection(t *testing.T) {
+ for _, test := range []string{
+ "∅ ∅ ∅",
+ "∅ 𝓤 ∅",
+ "∅ int ∅",
+ "∅ ~int ∅",
+ "∅ myInt ∅",
+ "𝓤 𝓤 𝓤",
+ "𝓤 int int",
+ "𝓤 ~int ~int",
+ "𝓤 myInt myInt",
+ "int int int",
+ "int ~int int",
+ "int string ∅",
+ "int ~string ∅",
+ "int string ∅",
+ "~int ~string ∅",
+ "~int myInt myInt",
+ } {
+ args := split(test, 3)
+ x := testTerm(args[0])
+ y := testTerm(args[1])
+ want := testTerm(args[2])
+ if got := x.intersect(y); !got.equal(want) {
+ t.Errorf("%v.intersect(%v) = %v; want %v", x, y, got, want)
+ }
+ // intersect is symmetric
+ x, y = y, x
+ if got := x.intersect(y); !got.equal(want) {
+ t.Errorf("%v.intersect(%v) = %v; want %v", x, y, got, want)
+ }
+ }
+}
+
+func TestTermIncludes(t *testing.T) {
+ for _, test := range []string{
+ "∅ int F",
+ "𝓤 int T",
+ "int int T",
+ "~int int T",
+ "~int myInt T",
+ "string int F",
+ "~string int F",
+ "myInt int F",
+ } {
+ args := split(test, 3)
+ x := testTerm(args[0])
+ y := testTerm(args[1]).typ
+ want := args[2] == "T"
+ if got := x.includes(y); got != want {
+ t.Errorf("%v.includes(%v) = %v; want %v", x, y, got, want)
+ }
+ }
+}
+
+func TestTermSubsetOf(t *testing.T) {
+ for _, test := range []string{
+ "∅ ∅ T",
+ "𝓤 𝓤 T",
+ "int int T",
+ "~int ~int T",
+ "myInt myInt T",
+ "∅ 𝓤 T",
+ "∅ int T",
+ "∅ ~int T",
+ "∅ myInt T",
+ "𝓤 int F",
+ "𝓤 ~int F",
+ "𝓤 myInt F",
+ "int ~int T",
+ "int myInt F",
+ "~int myInt F",
+ "myInt int F",
+ "myInt ~int T",
+ } {
+ args := split(test, 3)
+ x := testTerm(args[0])
+ y := testTerm(args[1])
+ want := args[2] == "T"
+ if got := x.subsetOf(y); got != want {
+ t.Errorf("%v.subsetOf(%v) = %v; want %v", x, y, got, want)
+ }
+ }
+}
+
+func TestTermDisjoint(t *testing.T) {
+ for _, test := range []string{
+ "int int F",
+ "~int ~int F",
+ "int ~int F",
+ "int string T",
+ "int ~string T",
+ "int myInt T",
+ "~int ~string T",
+ "~int myInt F",
+ "string myInt T",
+ "~string myInt T",
+ } {
+ args := split(test, 3)
+ x := testTerm(args[0])
+ y := testTerm(args[1])
+ want := args[2] == "T"
+ if got := x.disjoint(y); got != want {
+ t.Errorf("%v.disjoint(%v) = %v; want %v", x, y, got, want)
+ }
+ // disjoint is symmetric
+ x, y = y, x
+ if got := x.disjoint(y); got != want {
+ t.Errorf("%v.disjoint(%v) = %v; want %v", x, y, got, want)
+ }
+ }
+}
diff --git a/src/cmd/compile/internal/types2/typexpr.go b/src/cmd/compile/internal/types2/typexpr.go
new file mode 100644
index 0000000..7e30562
--- /dev/null
+++ b/src/cmd/compile/internal/types2/typexpr.go
@@ -0,0 +1,568 @@
+// Copyright 2013 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements type-checking of identifiers and type expressions.
+
+package types2
+
+import (
+ "cmd/compile/internal/syntax"
+ "fmt"
+ "go/constant"
+ "strings"
+)
+
+// ident type-checks identifier e and initializes x with the value or type of e.
+// If an error occurred, x.mode is set to invalid.
+// For the meaning of def, see Checker.definedType, below.
+// If wantType is set, the identifier e is expected to denote a type.
+//
+func (check *Checker) ident(x *operand, e *syntax.Name, def *Named, wantType bool) {
+ x.mode = invalid
+ x.expr = e
+
+ // Note that we cannot use check.lookup here because the returned scope
+ // may be different from obj.Parent(). See also Scope.LookupParent doc.
+ scope, obj := check.scope.LookupParent(e.Value, check.pos)
+ switch obj {
+ case nil:
+ if e.Value == "_" {
+ // Blank identifiers are never declared, but the current identifier may
+ // be a placeholder for a receiver type parameter. In this case we can
+ // resolve its type and object from Checker.recvTParamMap.
+ if tpar := check.recvTParamMap[e]; tpar != nil {
+ x.mode = typexpr
+ x.typ = tpar
+ } else {
+ check.error(e, "cannot use _ as value or type")
+ }
+ } else {
+ if check.conf.CompilerErrorMessages {
+ check.errorf(e, "undefined: %s", e.Value)
+ } else {
+ check.errorf(e, "undeclared name: %s", e.Value)
+ }
+ }
+ return
+ case universeAny, universeComparable:
+ if !check.allowVersion(check.pkg, 1, 18) {
+ check.errorf(e, "undeclared name: %s (requires version go1.18 or later)", e.Value)
+ return // avoid follow-on errors
+ }
+ }
+ check.recordUse(e, obj)
+
+ // Type-check the object.
+ // Only call Checker.objDecl if the object doesn't have a type yet
+ // (in which case we must actually determine it) or the object is a
+ // TypeName and we also want a type (in which case we might detect
+ // a cycle which needs to be reported). Otherwise we can skip the
+ // call and avoid a possible cycle error in favor of the more
+ // informative "not a type/value" error that this function's caller
+ // will issue (see issue #25790).
+ typ := obj.Type()
+ if _, gotType := obj.(*TypeName); typ == nil || gotType && wantType {
+ check.objDecl(obj, def)
+ typ = obj.Type() // type must have been assigned by Checker.objDecl
+ }
+ assert(typ != nil)
+
+ // The object may have been dot-imported.
+ // If so, mark the respective package as used.
+ // (This code is only needed for dot-imports. Without them,
+ // we only have to mark variables, see *Var case below).
+ if pkgName := check.dotImportMap[dotImportKey{scope, obj.Name()}]; pkgName != nil {
+ pkgName.used = true
+ }
+
+ switch obj := obj.(type) {
+ case *PkgName:
+ check.errorf(e, "use of package %s not in selector", obj.name)
+ return
+
+ case *Const:
+ check.addDeclDep(obj)
+ if typ == Typ[Invalid] {
+ return
+ }
+ if obj == universeIota {
+ if check.iota == nil {
+ check.error(e, "cannot use iota outside constant declaration")
+ return
+ }
+ x.val = check.iota
+ } else {
+ x.val = obj.val
+ }
+ assert(x.val != nil)
+ x.mode = constant_
+
+ case *TypeName:
+ if check.isBrokenAlias(obj) {
+ check.errorf(e, "invalid use of type alias %s in recursive type (see issue #50729)", obj.name)
+ return
+ }
+ x.mode = typexpr
+
+ case *Var:
+ // It's ok to mark non-local variables, but ignore variables
+ // from other packages to avoid potential race conditions with
+ // dot-imported variables.
+ if obj.pkg == check.pkg {
+ obj.used = true
+ }
+ check.addDeclDep(obj)
+ if typ == Typ[Invalid] {
+ return
+ }
+ x.mode = variable
+
+ case *Func:
+ check.addDeclDep(obj)
+ x.mode = value
+
+ case *Builtin:
+ x.id = obj.id
+ x.mode = builtin
+
+ case *Nil:
+ x.mode = nilvalue
+
+ default:
+ unreachable()
+ }
+
+ x.typ = typ
+}
+
+// typ type-checks the type expression e and returns its type, or Typ[Invalid].
+// The type must not be an (uninstantiated) generic type.
+func (check *Checker) typ(e syntax.Expr) Type {
+ return check.definedType(e, nil)
+}
+
+// varType type-checks the type expression e and returns its type, or Typ[Invalid].
+// The type must not be an (uninstantiated) generic type and it must not be a
+// constraint interface.
+func (check *Checker) varType(e syntax.Expr) Type {
+ typ := check.definedType(e, nil)
+ check.validVarType(e, typ)
+ return typ
+}
+
+// validVarType reports an error if typ is a constraint interface.
+// The expression e is used for error reporting, if any.
+func (check *Checker) validVarType(e syntax.Expr, typ Type) {
+ // If we have a type parameter there's nothing to do.
+ if isTypeParam(typ) {
+ return
+ }
+
+ // We don't want to call under() or complete interfaces while we are in
+ // the middle of type-checking parameter declarations that might belong
+ // to interface methods. Delay this check to the end of type-checking.
+ check.later(func() {
+ if t, _ := under(typ).(*Interface); t != nil {
+ pos := syntax.StartPos(e)
+ tset := computeInterfaceTypeSet(check, pos, t) // TODO(gri) is this the correct position?
+ if !tset.IsMethodSet() {
+ if tset.comparable {
+ check.softErrorf(pos, "interface is (or embeds) comparable")
+ } else {
+ check.softErrorf(pos, "interface contains type constraints")
+ }
+ }
+ }
+ })
+}
+
+// definedType is like typ but also accepts a type name def.
+// If def != nil, e is the type specification for the defined type def, declared
+// in a type declaration, and def.underlying will be set to the type of e before
+// any components of e are type-checked.
+//
+func (check *Checker) definedType(e syntax.Expr, def *Named) Type {
+ typ := check.typInternal(e, def)
+ assert(isTyped(typ))
+ if isGeneric(typ) {
+ check.errorf(e, "cannot use generic type %s without instantiation", typ)
+ typ = Typ[Invalid]
+ }
+ check.recordTypeAndValue(e, typexpr, typ, nil)
+ return typ
+}
+
+// genericType is like typ but the type must be an (uninstantiated) generic type.
+func (check *Checker) genericType(e syntax.Expr, reportErr bool) Type {
+ typ := check.typInternal(e, nil)
+ assert(isTyped(typ))
+ if typ != Typ[Invalid] && !isGeneric(typ) {
+ if reportErr {
+ check.errorf(e, "%s is not a generic type", typ)
+ }
+ typ = Typ[Invalid]
+ }
+ // TODO(gri) what is the correct call below?
+ check.recordTypeAndValue(e, typexpr, typ, nil)
+ return typ
+}
+
+// goTypeName returns the Go type name for typ and
+// removes any occurrences of "types2." from that name.
+func goTypeName(typ Type) string {
+ return strings.Replace(fmt.Sprintf("%T", typ), "types2.", "", -1) // strings.ReplaceAll is not available in Go 1.4
+}
+
+// typInternal drives type checking of types.
+// Must only be called by definedType or genericType.
+//
+func (check *Checker) typInternal(e0 syntax.Expr, def *Named) (T Type) {
+ if check.conf.Trace {
+ check.trace(e0.Pos(), "-- type %s", e0)
+ check.indent++
+ defer func() {
+ check.indent--
+ var under Type
+ if T != nil {
+ // Calling under() here may lead to endless instantiations.
+ // Test case: type T[P any] *T[P]
+ under = safeUnderlying(T)
+ }
+ if T == under {
+ check.trace(e0.Pos(), "=> %s // %s", T, goTypeName(T))
+ } else {
+ check.trace(e0.Pos(), "=> %s (under = %s) // %s", T, under, goTypeName(T))
+ }
+ }()
+ }
+
+ switch e := e0.(type) {
+ case *syntax.BadExpr:
+ // ignore - error reported before
+
+ case *syntax.Name:
+ var x operand
+ check.ident(&x, e, def, true)
+
+ switch x.mode {
+ case typexpr:
+ typ := x.typ
+ def.setUnderlying(typ)
+ return typ
+ case invalid:
+ // ignore - error reported before
+ case novalue:
+ check.errorf(&x, "%s used as type", &x)
+ default:
+ check.errorf(&x, "%s is not a type", &x)
+ }
+
+ case *syntax.SelectorExpr:
+ var x operand
+ check.selector(&x, e, def)
+
+ switch x.mode {
+ case typexpr:
+ typ := x.typ
+ def.setUnderlying(typ)
+ return typ
+ case invalid:
+ // ignore - error reported before
+ case novalue:
+ check.errorf(&x, "%s used as type", &x)
+ default:
+ check.errorf(&x, "%s is not a type", &x)
+ }
+
+ case *syntax.IndexExpr:
+ if !check.allowVersion(check.pkg, 1, 18) {
+ check.versionErrorf(e.Pos(), "go1.18", "type instantiation")
+ }
+ return check.instantiatedType(e.X, unpackExpr(e.Index), def)
+
+ case *syntax.ParenExpr:
+ // Generic types must be instantiated before they can be used in any form.
+ // Consequently, generic types cannot be parenthesized.
+ return check.definedType(e.X, def)
+
+ case *syntax.ArrayType:
+ typ := new(Array)
+ def.setUnderlying(typ)
+ if e.Len != nil {
+ typ.len = check.arrayLength(e.Len)
+ } else {
+ // [...]array
+ check.error(e, "invalid use of [...] array (outside a composite literal)")
+ typ.len = -1
+ }
+ typ.elem = check.varType(e.Elem)
+ if typ.len >= 0 {
+ return typ
+ }
+ // report error if we encountered [...]
+
+ case *syntax.SliceType:
+ typ := new(Slice)
+ def.setUnderlying(typ)
+ typ.elem = check.varType(e.Elem)
+ return typ
+
+ case *syntax.DotsType:
+ // dots are handled explicitly where they are legal
+ // (array composite literals and parameter lists)
+ check.error(e, "invalid use of '...'")
+ check.use(e.Elem)
+
+ case *syntax.StructType:
+ typ := new(Struct)
+ def.setUnderlying(typ)
+ check.structType(typ, e)
+ return typ
+
+ case *syntax.Operation:
+ if e.Op == syntax.Mul && e.Y == nil {
+ typ := new(Pointer)
+ typ.base = Typ[Invalid] // avoid nil base in invalid recursive type declaration
+ def.setUnderlying(typ)
+ typ.base = check.varType(e.X)
+ // If typ.base is invalid, it's unlikely that *base is particularly
+ // useful - even a valid dereferenciation will lead to an invalid
+ // type again, and in some cases we get unexpected follow-on errors
+ // (e.g., see #49005). Return an invalid type instead.
+ if typ.base == Typ[Invalid] {
+ return Typ[Invalid]
+ }
+ return typ
+ }
+
+ check.errorf(e0, "%s is not a type", e0)
+ check.use(e0)
+
+ case *syntax.FuncType:
+ typ := new(Signature)
+ def.setUnderlying(typ)
+ check.funcType(typ, nil, nil, e)
+ return typ
+
+ case *syntax.InterfaceType:
+ typ := check.newInterface()
+ def.setUnderlying(typ)
+ if def != nil {
+ typ.obj = def.obj
+ }
+ check.interfaceType(typ, e, def)
+ return typ
+
+ case *syntax.MapType:
+ typ := new(Map)
+ def.setUnderlying(typ)
+
+ typ.key = check.varType(e.Key)
+ typ.elem = check.varType(e.Value)
+
+ // spec: "The comparison operators == and != must be fully defined
+ // for operands of the key type; thus the key type must not be a
+ // function, map, or slice."
+ //
+ // Delay this check because it requires fully setup types;
+ // it is safe to continue in any case (was issue 6667).
+ check.later(func() {
+ if !Comparable(typ.key) {
+ var why string
+ if isTypeParam(typ.key) {
+ why = " (missing comparable constraint)"
+ }
+ check.errorf(e.Key, "invalid map key type %s%s", typ.key, why)
+ }
+ })
+
+ return typ
+
+ case *syntax.ChanType:
+ typ := new(Chan)
+ def.setUnderlying(typ)
+
+ dir := SendRecv
+ switch e.Dir {
+ case 0:
+ // nothing to do
+ case syntax.SendOnly:
+ dir = SendOnly
+ case syntax.RecvOnly:
+ dir = RecvOnly
+ default:
+ check.errorf(e, invalidAST+"unknown channel direction %d", e.Dir)
+ // ok to continue
+ }
+
+ typ.dir = dir
+ typ.elem = check.varType(e.Elem)
+ return typ
+
+ default:
+ check.errorf(e0, "%s is not a type", e0)
+ check.use(e0)
+ }
+
+ typ := Typ[Invalid]
+ def.setUnderlying(typ)
+ return typ
+}
+
+func (check *Checker) instantiatedType(x syntax.Expr, xlist []syntax.Expr, def *Named) (res Type) {
+ if check.conf.Trace {
+ check.trace(x.Pos(), "-- instantiating %s with %s", x, xlist)
+ check.indent++
+ defer func() {
+ check.indent--
+ // Don't format the underlying here. It will always be nil.
+ check.trace(x.Pos(), "=> %s", res)
+ }()
+ }
+
+ gtyp := check.genericType(x, true)
+ if gtyp == Typ[Invalid] {
+ return gtyp // error already reported
+ }
+
+ orig, _ := gtyp.(*Named)
+ if orig == nil {
+ panic(fmt.Sprintf("%v: cannot instantiate %v", x.Pos(), gtyp))
+ }
+
+ // evaluate arguments
+ targs := check.typeList(xlist)
+ if targs == nil {
+ def.setUnderlying(Typ[Invalid]) // avoid errors later due to lazy instantiation
+ return Typ[Invalid]
+ }
+
+ // enableTypeTypeInference controls whether to infer missing type arguments
+ // using constraint type inference. See issue #51527.
+ const enableTypeTypeInference = false
+
+ // create the instance
+ ctxt := check.bestContext(nil)
+ h := ctxt.instanceHash(orig, targs)
+ // targs may be incomplete, and require inference. In any case we should de-duplicate.
+ inst, _ := ctxt.lookup(h, orig, targs).(*Named)
+ // If inst is non-nil, we can't just return here. Inst may have been
+ // constructed via recursive substitution, in which case we wouldn't do the
+ // validation below. Ensure that the validation (and resulting errors) runs
+ // for each instantiated type in the source.
+ if inst == nil {
+ // x may be a selector for an imported type; use its start pos rather than x.Pos().
+ tname := NewTypeName(syntax.StartPos(x), orig.obj.pkg, orig.obj.name, nil)
+ inst = check.newNamed(tname, orig, nil, nil, nil) // underlying, methods and tparams are set when named is resolved
+ inst.targs = newTypeList(targs)
+ inst = ctxt.update(h, orig, targs, inst).(*Named)
+ }
+ def.setUnderlying(inst)
+
+ inst.resolver = func(ctxt *Context, n *Named) (*TypeParamList, Type, *methodList) {
+ tparams := n.orig.TypeParams().list()
+
+ targs := n.targs.list()
+ if enableTypeTypeInference && len(targs) < len(tparams) {
+ // If inference fails, len(inferred) will be 0, and inst.underlying will
+ // be set to Typ[Invalid] in expandNamed.
+ inferred := check.infer(x.Pos(), tparams, targs, nil, nil)
+ if len(inferred) > len(targs) {
+ n.targs = newTypeList(inferred)
+ }
+ }
+
+ return expandNamed(ctxt, n, x.Pos())
+ }
+
+ // orig.tparams may not be set up, so we need to do expansion later.
+ check.later(func() {
+ // This is an instance from the source, not from recursive substitution,
+ // and so it must be resolved during type-checking so that we can report
+ // errors.
+ inst.resolve(ctxt)
+ // Since check is non-nil, we can still mutate inst. Unpinning the resolver
+ // frees some memory.
+ inst.resolver = nil
+ check.recordInstance(x, inst.TypeArgs().list(), inst)
+
+ if check.validateTArgLen(x.Pos(), inst.tparams.Len(), inst.targs.Len()) {
+ if i, err := check.verify(x.Pos(), inst.tparams.list(), inst.targs.list()); err != nil {
+ // best position for error reporting
+ pos := x.Pos()
+ if i < len(xlist) {
+ pos = syntax.StartPos(xlist[i])
+ }
+ check.softErrorf(pos, "%s", err)
+ } else {
+ check.mono.recordInstance(check.pkg, x.Pos(), inst.tparams.list(), inst.targs.list(), xlist)
+ }
+ }
+
+ check.validType(inst)
+ })
+
+ return inst
+}
+
+// arrayLength type-checks the array length expression e
+// and returns the constant length >= 0, or a value < 0
+// to indicate an error (and thus an unknown length).
+func (check *Checker) arrayLength(e syntax.Expr) int64 {
+ // If e is an identifier, the array declaration might be an
+ // attempt at a parameterized type declaration with missing
+ // constraint. Provide an error message that mentions array
+ // length.
+ if name, _ := e.(*syntax.Name); name != nil {
+ obj := check.lookup(name.Value)
+ if obj == nil {
+ check.errorf(name, "undeclared name %s for array length", name.Value)
+ return -1
+ }
+ if _, ok := obj.(*Const); !ok {
+ check.errorf(name, "invalid array length %s", name.Value)
+ return -1
+ }
+ }
+
+ var x operand
+ check.expr(&x, e)
+ if x.mode != constant_ {
+ if x.mode != invalid {
+ check.errorf(&x, "array length %s must be constant", &x)
+ }
+ return -1
+ }
+
+ if isUntyped(x.typ) || isInteger(x.typ) {
+ if val := constant.ToInt(x.val); val.Kind() == constant.Int {
+ if representableConst(val, check, Typ[Int], nil) {
+ if n, ok := constant.Int64Val(val); ok && n >= 0 {
+ return n
+ }
+ check.errorf(&x, "invalid array length %s", &x)
+ return -1
+ }
+ }
+ }
+
+ check.errorf(&x, "array length %s must be integer", &x)
+ return -1
+}
+
+// typeList provides the list of types corresponding to the incoming expression list.
+// If an error occurred, the result is nil, but all list elements were type-checked.
+func (check *Checker) typeList(list []syntax.Expr) []Type {
+ res := make([]Type, len(list)) // res != nil even if len(list) == 0
+ for i, x := range list {
+ t := check.varType(x)
+ if t == Typ[Invalid] {
+ res = nil
+ }
+ if res != nil {
+ res[i] = t
+ }
+ }
+ return res
+}
diff --git a/src/cmd/compile/internal/types2/unify.go b/src/cmd/compile/internal/types2/unify.go
new file mode 100644
index 0000000..97d327c
--- /dev/null
+++ b/src/cmd/compile/internal/types2/unify.go
@@ -0,0 +1,582 @@
+// Copyright 2020 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file implements type unification.
+
+package types2
+
+import (
+ "bytes"
+ "fmt"
+ "strings"
+)
+
+// The unifier maintains two separate sets of type parameters x and y
+// which are used to resolve type parameters in the x and y arguments
+// provided to the unify call. For unidirectional unification, only
+// one of these sets (say x) is provided, and then type parameters are
+// only resolved for the x argument passed to unify, not the y argument
+// (even if that also contains possibly the same type parameters). This
+// is crucial to infer the type parameters of self-recursive calls:
+//
+// func f[P any](a P) { f(a) }
+//
+// For the call f(a) we want to infer that the type argument for P is P.
+// During unification, the parameter type P must be resolved to the type
+// parameter P ("x" side), but the argument type P must be left alone so
+// that unification resolves the type parameter P to P.
+//
+// For bidirectional unification, both sets are provided. This enables
+// unification to go from argument to parameter type and vice versa.
+// For constraint type inference, we use bidirectional unification
+// where both the x and y type parameters are identical. This is done
+// by setting up one of them (using init) and then assigning its value
+// to the other.
+
+const (
+ // Upper limit for recursion depth. Used to catch infinite recursions
+ // due to implementation issues (e.g., see issues #48619, #48656).
+ unificationDepthLimit = 50
+
+ // Whether to panic when unificationDepthLimit is reached. Turn on when
+ // investigating infinite recursion.
+ panicAtUnificationDepthLimit = false
+
+ // If enableCoreTypeUnification is set, unification will consider
+ // the core types, if any, of non-local (unbound) type parameters.
+ enableCoreTypeUnification = true
+
+ // If traceInference is set, unification will print a trace of its operation.
+ // Interpretation of trace:
+ // x ≡ y attempt to unify types x and y
+ // p ➞ y type parameter p is set to type y (p is inferred to be y)
+ // p ⇄ q type parameters p and q match (p is inferred to be q and vice versa)
+ // x ≢ y types x and y cannot be unified
+ // [p, q, ...] ➞ [x, y, ...] mapping from type parameters to types
+ traceInference = false
+)
+
+// A unifier maintains the current type parameters for x and y
+// and the respective types inferred for each type parameter.
+// A unifier is created by calling newUnifier.
+type unifier struct {
+ exact bool
+ x, y tparamsList // x and y must initialized via tparamsList.init
+ types []Type // inferred types, shared by x and y
+ depth int // recursion depth during unification
+}
+
+// newUnifier returns a new unifier.
+// If exact is set, unification requires unified types to match
+// exactly. If exact is not set, a named type's underlying type
+// is considered if unification would fail otherwise, and the
+// direction of channels is ignored.
+// TODO(gri) exact is not set anymore by a caller. Consider removing it.
+func newUnifier(exact bool) *unifier {
+ u := &unifier{exact: exact}
+ u.x.unifier = u
+ u.y.unifier = u
+ return u
+}
+
+// unify attempts to unify x and y and reports whether it succeeded.
+func (u *unifier) unify(x, y Type) bool {
+ return u.nify(x, y, nil)
+}
+
+func (u *unifier) tracef(format string, args ...interface{}) {
+ fmt.Println(strings.Repeat(". ", u.depth) + sprintf(nil, true, format, args...))
+}
+
+// A tparamsList describes a list of type parameters and the types inferred for them.
+type tparamsList struct {
+ unifier *unifier
+ tparams []*TypeParam
+ // For each tparams element, there is a corresponding type slot index in indices.
+ // index < 0: unifier.types[-index-1] == nil
+ // index == 0: no type slot allocated yet
+ // index > 0: unifier.types[index-1] == typ
+ // Joined tparams elements share the same type slot and thus have the same index.
+ // By using a negative index for nil types we don't need to check unifier.types
+ // to see if we have a type or not.
+ indices []int // len(d.indices) == len(d.tparams)
+}
+
+// String returns a string representation for a tparamsList. For debugging.
+func (d *tparamsList) String() string {
+ var buf bytes.Buffer
+ w := newTypeWriter(&buf, nil)
+ w.byte('[')
+ for i, tpar := range d.tparams {
+ if i > 0 {
+ w.string(", ")
+ }
+ w.typ(tpar)
+ w.string(": ")
+ w.typ(d.at(i))
+ }
+ w.byte(']')
+ return buf.String()
+}
+
+// init initializes d with the given type parameters.
+// The type parameters must be in the order in which they appear in their declaration
+// (this ensures that the tparams indices match the respective type parameter index).
+func (d *tparamsList) init(tparams []*TypeParam) {
+ if len(tparams) == 0 {
+ return
+ }
+ if debug {
+ for i, tpar := range tparams {
+ assert(i == tpar.index)
+ }
+ }
+ d.tparams = tparams
+ d.indices = make([]int, len(tparams))
+}
+
+// join unifies the i'th type parameter of x with the j'th type parameter of y.
+// If both type parameters already have a type associated with them and they are
+// not joined, join fails and returns false.
+func (u *unifier) join(i, j int) bool {
+ if traceInference {
+ u.tracef("%s ⇄ %s", u.x.tparams[i], u.y.tparams[j])
+ }
+ ti := u.x.indices[i]
+ tj := u.y.indices[j]
+ switch {
+ case ti == 0 && tj == 0:
+ // Neither type parameter has a type slot associated with them.
+ // Allocate a new joined nil type slot (negative index).
+ u.types = append(u.types, nil)
+ u.x.indices[i] = -len(u.types)
+ u.y.indices[j] = -len(u.types)
+ case ti == 0:
+ // The type parameter for x has no type slot yet. Use slot of y.
+ u.x.indices[i] = tj
+ case tj == 0:
+ // The type parameter for y has no type slot yet. Use slot of x.
+ u.y.indices[j] = ti
+
+ // Both type parameters have a slot: ti != 0 && tj != 0.
+ case ti == tj:
+ // Both type parameters already share the same slot. Nothing to do.
+ break
+ case ti > 0 && tj > 0:
+ // Both type parameters have (possibly different) inferred types. Cannot join.
+ // TODO(gri) Should we check if types are identical? Investigate.
+ return false
+ case ti > 0:
+ // Only the type parameter for x has an inferred type. Use x slot for y.
+ u.y.setIndex(j, ti)
+ // This case is handled like the default case.
+ // case tj > 0:
+ // // Only the type parameter for y has an inferred type. Use y slot for x.
+ // u.x.setIndex(i, tj)
+ default:
+ // Neither type parameter has an inferred type. Use y slot for x
+ // (or x slot for y, it doesn't matter).
+ u.x.setIndex(i, tj)
+ }
+ return true
+}
+
+// If typ is a type parameter of d, index returns the type parameter index.
+// Otherwise, the result is < 0.
+func (d *tparamsList) index(typ Type) int {
+ if tpar, ok := typ.(*TypeParam); ok {
+ return tparamIndex(d.tparams, tpar)
+ }
+ return -1
+}
+
+// If tpar is a type parameter in list, tparamIndex returns the type parameter index.
+// Otherwise, the result is < 0. tpar must not be nil.
+func tparamIndex(list []*TypeParam, tpar *TypeParam) int {
+ // Once a type parameter is bound its index is >= 0. However, there are some
+ // code paths (namely tracing and type hashing) by which it is possible to
+ // arrive here with a type parameter that has not been bound, hence the check
+ // for 0 <= i below.
+ // TODO(rfindley): investigate a better approach for guarding against using
+ // unbound type parameters.
+ if i := tpar.index; 0 <= i && i < len(list) && list[i] == tpar {
+ return i
+ }
+ return -1
+}
+
+// setIndex sets the type slot index for the i'th type parameter
+// (and all its joined parameters) to tj. The type parameter
+// must have a (possibly nil) type slot associated with it.
+func (d *tparamsList) setIndex(i, tj int) {
+ ti := d.indices[i]
+ assert(ti != 0 && tj != 0)
+ for k, tk := range d.indices {
+ if tk == ti {
+ d.indices[k] = tj
+ }
+ }
+}
+
+// at returns the type set for the i'th type parameter; or nil.
+func (d *tparamsList) at(i int) Type {
+ if ti := d.indices[i]; ti > 0 {
+ return d.unifier.types[ti-1]
+ }
+ return nil
+}
+
+// set sets the type typ for the i'th type parameter;
+// typ must not be nil and it must not have been set before.
+func (d *tparamsList) set(i int, typ Type) {
+ assert(typ != nil)
+ u := d.unifier
+ if traceInference {
+ u.tracef("%s ➞ %s", d.tparams[i], typ)
+ }
+ switch ti := d.indices[i]; {
+ case ti < 0:
+ u.types[-ti-1] = typ
+ d.setIndex(i, -ti)
+ case ti == 0:
+ u.types = append(u.types, typ)
+ d.indices[i] = len(u.types)
+ default:
+ panic("type already set")
+ }
+}
+
+// unknowns returns the number of type parameters for which no type has been set yet.
+func (d *tparamsList) unknowns() int {
+ n := 0
+ for _, ti := range d.indices {
+ if ti <= 0 {
+ n++
+ }
+ }
+ return n
+}
+
+// types returns the list of inferred types (via unification) for the type parameters
+// described by d, and an index. If all types were inferred, the returned index is < 0.
+// Otherwise, it is the index of the first type parameter which couldn't be inferred;
+// i.e., for which list[index] is nil.
+func (d *tparamsList) types() (list []Type, index int) {
+ list = make([]Type, len(d.tparams))
+ index = -1
+ for i := range d.tparams {
+ t := d.at(i)
+ list[i] = t
+ if index < 0 && t == nil {
+ index = i
+ }
+ }
+ return
+}
+
+func (u *unifier) nifyEq(x, y Type, p *ifacePair) bool {
+ return x == y || u.nify(x, y, p)
+}
+
+// nify implements the core unification algorithm which is an
+// adapted version of Checker.identical. For changes to that
+// code the corresponding changes should be made here.
+// Must not be called directly from outside the unifier.
+func (u *unifier) nify(x, y Type, p *ifacePair) (result bool) {
+ if traceInference {
+ u.tracef("%s ≡ %s", x, y)
+ }
+
+ // Stop gap for cases where unification fails.
+ if u.depth >= unificationDepthLimit {
+ if traceInference {
+ u.tracef("depth %d >= %d", u.depth, unificationDepthLimit)
+ }
+ if panicAtUnificationDepthLimit {
+ panic("unification reached recursion depth limit")
+ }
+ return false
+ }
+ u.depth++
+ defer func() {
+ u.depth--
+ if traceInference && !result {
+ u.tracef("%s ≢ %s", x, y)
+ }
+ }()
+
+ if !u.exact {
+ // If exact unification is known to fail because we attempt to
+ // match a type name against an unnamed type literal, consider
+ // the underlying type of the named type.
+ // (We use !hasName to exclude any type with a name, including
+ // basic types and type parameters; the rest are unamed types.)
+ if nx, _ := x.(*Named); nx != nil && !hasName(y) {
+ if traceInference {
+ u.tracef("under %s ≡ %s", nx, y)
+ }
+ return u.nify(nx.under(), y, p)
+ } else if ny, _ := y.(*Named); ny != nil && !hasName(x) {
+ if traceInference {
+ u.tracef("%s ≡ under %s", x, ny)
+ }
+ return u.nify(x, ny.under(), p)
+ }
+ }
+
+ // Cases where at least one of x or y is a type parameter.
+ switch i, j := u.x.index(x), u.y.index(y); {
+ case i >= 0 && j >= 0:
+ // both x and y are type parameters
+ if u.join(i, j) {
+ return true
+ }
+ // both x and y have an inferred type - they must match
+ return u.nifyEq(u.x.at(i), u.y.at(j), p)
+
+ case i >= 0:
+ // x is a type parameter, y is not
+ if tx := u.x.at(i); tx != nil {
+ return u.nifyEq(tx, y, p)
+ }
+ // otherwise, infer type from y
+ u.x.set(i, y)
+ return true
+
+ case j >= 0:
+ // y is a type parameter, x is not
+ if ty := u.y.at(j); ty != nil {
+ return u.nifyEq(x, ty, p)
+ }
+ // otherwise, infer type from x
+ u.y.set(j, x)
+ return true
+ }
+
+ // If we get here and x or y is a type parameter, they are type parameters
+ // from outside our declaration list. Try to unify their core types, if any
+ // (see issue #50755 for a test case).
+ if enableCoreTypeUnification && !u.exact {
+ if isTypeParam(x) && !hasName(y) {
+ // When considering the type parameter for unification
+ // we look at the adjusted core term (adjusted core type
+ // with tilde information).
+ // If the adjusted core type is a named type N; the
+ // corresponding core type is under(N). Since !u.exact
+ // and y doesn't have a name, unification will end up
+ // comparing under(N) to y, so we can just use the core
+ // type instead. And we can ignore the tilde because we
+ // already look at the underlying types on both sides
+ // and we have known types on both sides.
+ // Optimization.
+ if cx := coreType(x); cx != nil {
+ if traceInference {
+ u.tracef("core %s ≡ %s", x, y)
+ }
+ return u.nify(cx, y, p)
+ }
+ } else if isTypeParam(y) && !hasName(x) {
+ // see comment above
+ if cy := coreType(y); cy != nil {
+ if traceInference {
+ u.tracef("%s ≡ core %s", x, y)
+ }
+ return u.nify(x, cy, p)
+ }
+ }
+ }
+
+ // For type unification, do not shortcut (x == y) for identical
+ // types. Instead keep comparing them element-wise to unify the
+ // matching (and equal type parameter types). A simple test case
+ // where this matters is: func f[P any](a P) { f(a) } .
+
+ switch x := x.(type) {
+ case *Basic:
+ // Basic types are singletons except for the rune and byte
+ // aliases, thus we cannot solely rely on the x == y check
+ // above. See also comment in TypeName.IsAlias.
+ if y, ok := y.(*Basic); ok {
+ return x.kind == y.kind
+ }
+
+ case *Array:
+ // Two array types are identical if they have identical element types
+ // and the same array length.
+ if y, ok := y.(*Array); ok {
+ // If one or both array lengths are unknown (< 0) due to some error,
+ // assume they are the same to avoid spurious follow-on errors.
+ return (x.len < 0 || y.len < 0 || x.len == y.len) && u.nify(x.elem, y.elem, p)
+ }
+
+ case *Slice:
+ // Two slice types are identical if they have identical element types.
+ if y, ok := y.(*Slice); ok {
+ return u.nify(x.elem, y.elem, p)
+ }
+
+ case *Struct:
+ // Two struct types are identical if they have the same sequence of fields,
+ // and if corresponding fields have the same names, and identical types,
+ // and identical tags. Two embedded fields are considered to have the same
+ // name. Lower-case field names from different packages are always different.
+ if y, ok := y.(*Struct); ok {
+ if x.NumFields() == y.NumFields() {
+ for i, f := range x.fields {
+ g := y.fields[i]
+ if f.embedded != g.embedded ||
+ x.Tag(i) != y.Tag(i) ||
+ !f.sameId(g.pkg, g.name) ||
+ !u.nify(f.typ, g.typ, p) {
+ return false
+ }
+ }
+ return true
+ }
+ }
+
+ case *Pointer:
+ // Two pointer types are identical if they have identical base types.
+ if y, ok := y.(*Pointer); ok {
+ return u.nify(x.base, y.base, p)
+ }
+
+ case *Tuple:
+ // Two tuples types are identical if they have the same number of elements
+ // and corresponding elements have identical types.
+ if y, ok := y.(*Tuple); ok {
+ if x.Len() == y.Len() {
+ if x != nil {
+ for i, v := range x.vars {
+ w := y.vars[i]
+ if !u.nify(v.typ, w.typ, p) {
+ return false
+ }
+ }
+ }
+ return true
+ }
+ }
+
+ case *Signature:
+ // Two function types are identical if they have the same number of parameters
+ // and result values, corresponding parameter and result types are identical,
+ // and either both functions are variadic or neither is. Parameter and result
+ // names are not required to match.
+ // TODO(gri) handle type parameters or document why we can ignore them.
+ if y, ok := y.(*Signature); ok {
+ return x.variadic == y.variadic &&
+ u.nify(x.params, y.params, p) &&
+ u.nify(x.results, y.results, p)
+ }
+
+ case *Interface:
+ // Two interface types are identical if they have the same set of methods with
+ // the same names and identical function types. Lower-case method names from
+ // different packages are always different. The order of the methods is irrelevant.
+ if y, ok := y.(*Interface); ok {
+ xset := x.typeSet()
+ yset := y.typeSet()
+ if xset.comparable != yset.comparable {
+ return false
+ }
+ if !xset.terms.equal(yset.terms) {
+ return false
+ }
+ a := xset.methods
+ b := yset.methods
+ if len(a) == len(b) {
+ // Interface types are the only types where cycles can occur
+ // that are not "terminated" via named types; and such cycles
+ // can only be created via method parameter types that are
+ // anonymous interfaces (directly or indirectly) embedding
+ // the current interface. Example:
+ //
+ // type T interface {
+ // m() interface{T}
+ // }
+ //
+ // If two such (differently named) interfaces are compared,
+ // endless recursion occurs if the cycle is not detected.
+ //
+ // If x and y were compared before, they must be equal
+ // (if they were not, the recursion would have stopped);
+ // search the ifacePair stack for the same pair.
+ //
+ // This is a quadratic algorithm, but in practice these stacks
+ // are extremely short (bounded by the nesting depth of interface
+ // type declarations that recur via parameter types, an extremely
+ // rare occurrence). An alternative implementation might use a
+ // "visited" map, but that is probably less efficient overall.
+ q := &ifacePair{x, y, p}
+ for p != nil {
+ if p.identical(q) {
+ return true // same pair was compared before
+ }
+ p = p.prev
+ }
+ if debug {
+ assertSortedMethods(a)
+ assertSortedMethods(b)
+ }
+ for i, f := range a {
+ g := b[i]
+ if f.Id() != g.Id() || !u.nify(f.typ, g.typ, q) {
+ return false
+ }
+ }
+ return true
+ }
+ }
+
+ case *Map:
+ // Two map types are identical if they have identical key and value types.
+ if y, ok := y.(*Map); ok {
+ return u.nify(x.key, y.key, p) && u.nify(x.elem, y.elem, p)
+ }
+
+ case *Chan:
+ // Two channel types are identical if they have identical value types.
+ if y, ok := y.(*Chan); ok {
+ return (!u.exact || x.dir == y.dir) && u.nify(x.elem, y.elem, p)
+ }
+
+ case *Named:
+ // TODO(gri) This code differs now from the parallel code in Checker.identical. Investigate.
+ if y, ok := y.(*Named); ok {
+ xargs := x.targs.list()
+ yargs := y.targs.list()
+
+ if len(xargs) != len(yargs) {
+ return false
+ }
+
+ // TODO(gri) This is not always correct: two types may have the same names
+ // in the same package if one of them is nested in a function.
+ // Extremely unlikely but we need an always correct solution.
+ if x.obj.pkg == y.obj.pkg && x.obj.name == y.obj.name {
+ for i, x := range xargs {
+ if !u.nify(x, yargs[i], p) {
+ return false
+ }
+ }
+ return true
+ }
+ }
+
+ case *TypeParam:
+ // Two type parameters (which are not part of the type parameters of the
+ // enclosing type as those are handled in the beginning of this function)
+ // are identical if they originate in the same declaration.
+ return x == y
+
+ case nil:
+ // avoid a crash in case of nil type
+
+ default:
+ panic(sprintf(nil, true, "u.nify(%s, %s), u.x.tparams = %s", x, y, u.x.tparams))
+ }
+
+ return false
+}
diff --git a/src/cmd/compile/internal/types2/union.go b/src/cmd/compile/internal/types2/union.go
new file mode 100644
index 0000000..4a146f8
--- /dev/null
+++ b/src/cmd/compile/internal/types2/union.go
@@ -0,0 +1,192 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+import "cmd/compile/internal/syntax"
+
+// ----------------------------------------------------------------------------
+// API
+
+// A Union represents a union of terms embedded in an interface.
+type Union struct {
+ terms []*Term // list of syntactical terms (not a canonicalized termlist)
+}
+
+// NewUnion returns a new Union type with the given terms.
+// It is an error to create an empty union; they are syntactically not possible.
+func NewUnion(terms []*Term) *Union {
+ if len(terms) == 0 {
+ panic("empty union")
+ }
+ return &Union{terms}
+}
+
+func (u *Union) Len() int { return len(u.terms) }
+func (u *Union) Term(i int) *Term { return u.terms[i] }
+
+func (u *Union) Underlying() Type { return u }
+func (u *Union) String() string { return TypeString(u, nil) }
+
+// A Term represents a term in a Union.
+type Term term
+
+// NewTerm returns a new union term.
+func NewTerm(tilde bool, typ Type) *Term { return &Term{tilde, typ} }
+
+func (t *Term) Tilde() bool { return t.tilde }
+func (t *Term) Type() Type { return t.typ }
+func (t *Term) String() string { return (*term)(t).String() }
+
+// ----------------------------------------------------------------------------
+// Implementation
+
+// Avoid excessive type-checking times due to quadratic termlist operations.
+const maxTermCount = 100
+
+// parseUnion parses uexpr as a union of expressions.
+// The result is a Union type, or Typ[Invalid] for some errors.
+func parseUnion(check *Checker, uexpr syntax.Expr) Type {
+ blist, tlist := flattenUnion(nil, uexpr)
+ assert(len(blist) == len(tlist)-1)
+
+ var terms []*Term
+
+ var u Type
+ for i, x := range tlist {
+ term := parseTilde(check, x)
+ if len(tlist) == 1 && !term.tilde {
+ // Single type. Ok to return early because all relevant
+ // checks have been performed in parseTilde (no need to
+ // run through term validity check below).
+ return term.typ // typ already recorded through check.typ in parseTilde
+ }
+ if len(terms) >= maxTermCount {
+ if u != Typ[Invalid] {
+ check.errorf(x, "cannot handle more than %d union terms (implementation limitation)", maxTermCount)
+ u = Typ[Invalid]
+ }
+ } else {
+ terms = append(terms, term)
+ u = &Union{terms}
+ }
+
+ if i > 0 {
+ check.recordTypeAndValue(blist[i-1], typexpr, u, nil)
+ }
+ }
+
+ if u == Typ[Invalid] {
+ return u
+ }
+
+ // Check validity of terms.
+ // Do this check later because it requires types to be set up.
+ // Note: This is a quadratic algorithm, but unions tend to be short.
+ check.later(func() {
+ for i, t := range terms {
+ if t.typ == Typ[Invalid] {
+ continue
+ }
+
+ u := under(t.typ)
+ f, _ := u.(*Interface)
+ if t.tilde {
+ if f != nil {
+ check.errorf(tlist[i], "invalid use of ~ (%s is an interface)", t.typ)
+ continue // don't report another error for t
+ }
+
+ if !Identical(u, t.typ) {
+ check.errorf(tlist[i], "invalid use of ~ (underlying type of %s is %s)", t.typ, u)
+ continue
+ }
+ }
+
+ // Stand-alone embedded interfaces are ok and are handled by the single-type case
+ // in the beginning. Embedded interfaces with tilde are excluded above. If we reach
+ // here, we must have at least two terms in the syntactic term list (but not necessarily
+ // in the term list of the union's type set).
+ if f != nil {
+ tset := f.typeSet()
+ switch {
+ case tset.NumMethods() != 0:
+ check.errorf(tlist[i], "cannot use %s in union (%s contains methods)", t, t)
+ case t.typ == universeComparable.Type():
+ check.error(tlist[i], "cannot use comparable in union")
+ case tset.comparable:
+ check.errorf(tlist[i], "cannot use %s in union (%s embeds comparable)", t, t)
+ }
+ continue // terms with interface types are not subject to the no-overlap rule
+ }
+
+ // Report overlapping (non-disjoint) terms such as
+ // a|a, a|~a, ~a|~a, and ~a|A (where under(A) == a).
+ if j := overlappingTerm(terms[:i], t); j >= 0 {
+ check.softErrorf(tlist[i], "overlapping terms %s and %s", t, terms[j])
+ }
+ }
+ })
+
+ return u
+}
+
+func parseTilde(check *Checker, tx syntax.Expr) *Term {
+ x := tx
+ var tilde bool
+ if op, _ := x.(*syntax.Operation); op != nil && op.Op == syntax.Tilde {
+ x = op.X
+ tilde = true
+ }
+ typ := check.typ(x)
+ // Embedding stand-alone type parameters is not permitted (issue #47127).
+ // We don't need this restriction anymore if we make the underlying type of a type
+ // parameter its constraint interface: if we embed a lone type parameter, we will
+ // simply use its underlying type (like we do for other named, embedded interfaces),
+ // and since the underlying type is an interface the embedding is well defined.
+ if isTypeParam(typ) {
+ check.error(x, "cannot embed a type parameter")
+ typ = Typ[Invalid]
+ }
+ term := NewTerm(tilde, typ)
+ if tilde {
+ check.recordTypeAndValue(tx, typexpr, &Union{[]*Term{term}}, nil)
+ }
+ return term
+}
+
+// overlappingTerm reports the index of the term x in terms which is
+// overlapping (not disjoint) from y. The result is < 0 if there is no
+// such term. The type of term y must not be an interface, and terms
+// with an interface type are ignored in the terms list.
+func overlappingTerm(terms []*Term, y *Term) int {
+ assert(!IsInterface(y.typ))
+ for i, x := range terms {
+ if IsInterface(x.typ) {
+ continue
+ }
+ // disjoint requires non-nil, non-top arguments,
+ // and non-interface types as term types.
+ if debug {
+ if x == nil || x.typ == nil || y == nil || y.typ == nil {
+ panic("empty or top union term")
+ }
+ }
+ if !(*term)(x).disjoint((*term)(y)) {
+ return i
+ }
+ }
+ return -1
+}
+
+// flattenUnion walks a union type expression of the form A | B | C | ...,
+// extracting both the binary exprs (blist) and leaf types (tlist).
+func flattenUnion(list []syntax.Expr, x syntax.Expr) (blist, tlist []syntax.Expr) {
+ if o, _ := x.(*syntax.Operation); o != nil && o.Op == syntax.Or {
+ blist, tlist = flattenUnion(list, o.X)
+ blist = append(blist, o)
+ x = o.Y
+ }
+ return blist, append(tlist, x)
+}
diff --git a/src/cmd/compile/internal/types2/universe.go b/src/cmd/compile/internal/types2/universe.go
new file mode 100644
index 0000000..11c8186
--- /dev/null
+++ b/src/cmd/compile/internal/types2/universe.go
@@ -0,0 +1,276 @@
+// Copyright 2011 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+// This file sets up the universe scope and the unsafe package.
+
+package types2
+
+import (
+ "go/constant"
+ "strings"
+)
+
+// The Universe scope contains all predeclared objects of Go.
+// It is the outermost scope of any chain of nested scopes.
+var Universe *Scope
+
+// The Unsafe package is the package returned by an importer
+// for the import path "unsafe".
+var Unsafe *Package
+
+var (
+ universeIota Object
+ universeByte Type // uint8 alias, but has name "byte"
+ universeRune Type // int32 alias, but has name "rune"
+ universeAny Object
+ universeError Type
+ universeComparable Object
+)
+
+// Typ contains the predeclared *Basic types indexed by their
+// corresponding BasicKind.
+//
+// The *Basic type for Typ[Byte] will have the name "uint8".
+// Use Universe.Lookup("byte").Type() to obtain the specific
+// alias basic type named "byte" (and analogous for "rune").
+var Typ = [...]*Basic{
+ Invalid: {Invalid, 0, "invalid type"},
+
+ Bool: {Bool, IsBoolean, "bool"},
+ Int: {Int, IsInteger, "int"},
+ Int8: {Int8, IsInteger, "int8"},
+ Int16: {Int16, IsInteger, "int16"},
+ Int32: {Int32, IsInteger, "int32"},
+ Int64: {Int64, IsInteger, "int64"},
+ Uint: {Uint, IsInteger | IsUnsigned, "uint"},
+ Uint8: {Uint8, IsInteger | IsUnsigned, "uint8"},
+ Uint16: {Uint16, IsInteger | IsUnsigned, "uint16"},
+ Uint32: {Uint32, IsInteger | IsUnsigned, "uint32"},
+ Uint64: {Uint64, IsInteger | IsUnsigned, "uint64"},
+ Uintptr: {Uintptr, IsInteger | IsUnsigned, "uintptr"},
+ Float32: {Float32, IsFloat, "float32"},
+ Float64: {Float64, IsFloat, "float64"},
+ Complex64: {Complex64, IsComplex, "complex64"},
+ Complex128: {Complex128, IsComplex, "complex128"},
+ String: {String, IsString, "string"},
+ UnsafePointer: {UnsafePointer, 0, "Pointer"},
+
+ UntypedBool: {UntypedBool, IsBoolean | IsUntyped, "untyped bool"},
+ UntypedInt: {UntypedInt, IsInteger | IsUntyped, "untyped int"},
+ UntypedRune: {UntypedRune, IsInteger | IsUntyped, "untyped rune"},
+ UntypedFloat: {UntypedFloat, IsFloat | IsUntyped, "untyped float"},
+ UntypedComplex: {UntypedComplex, IsComplex | IsUntyped, "untyped complex"},
+ UntypedString: {UntypedString, IsString | IsUntyped, "untyped string"},
+ UntypedNil: {UntypedNil, IsUntyped, "untyped nil"},
+}
+
+var aliases = [...]*Basic{
+ {Byte, IsInteger | IsUnsigned, "byte"},
+ {Rune, IsInteger, "rune"},
+}
+
+func defPredeclaredTypes() {
+ for _, t := range Typ {
+ def(NewTypeName(nopos, nil, t.name, t))
+ }
+ for _, t := range aliases {
+ def(NewTypeName(nopos, nil, t.name, t))
+ }
+
+ // type any = interface{}
+ // Note: don't use &emptyInterface for the type of any. Using a unique
+ // pointer allows us to detect any and format it as "any" rather than
+ // interface{}, which clarifies user-facing error messages significantly.
+ def(NewTypeName(nopos, nil, "any", &Interface{complete: true, tset: &topTypeSet}))
+
+ // type error interface{ Error() string }
+ {
+ obj := NewTypeName(nopos, nil, "error", nil)
+ obj.setColor(black)
+ typ := NewNamed(obj, nil, nil)
+
+ // error.Error() string
+ recv := NewVar(nopos, nil, "", typ)
+ res := NewVar(nopos, nil, "", Typ[String])
+ sig := NewSignatureType(recv, nil, nil, nil, NewTuple(res), false)
+ err := NewFunc(nopos, nil, "Error", sig)
+
+ // interface{ Error() string }
+ ityp := &Interface{obj: obj, methods: []*Func{err}, complete: true}
+ computeInterfaceTypeSet(nil, nopos, ityp) // prevent races due to lazy computation of tset
+
+ typ.SetUnderlying(ityp)
+ def(obj)
+ }
+
+ // type comparable interface{} // marked as comparable
+ {
+ obj := NewTypeName(nopos, nil, "comparable", nil)
+ obj.setColor(black)
+ typ := NewNamed(obj, nil, nil)
+
+ // interface{} // marked as comparable
+ ityp := &Interface{obj: obj, complete: true, tset: &_TypeSet{nil, allTermlist, true}}
+
+ typ.SetUnderlying(ityp)
+ def(obj)
+ }
+}
+
+var predeclaredConsts = [...]struct {
+ name string
+ kind BasicKind
+ val constant.Value
+}{
+ {"true", UntypedBool, constant.MakeBool(true)},
+ {"false", UntypedBool, constant.MakeBool(false)},
+ {"iota", UntypedInt, constant.MakeInt64(0)},
+}
+
+func defPredeclaredConsts() {
+ for _, c := range predeclaredConsts {
+ def(NewConst(nopos, nil, c.name, Typ[c.kind], c.val))
+ }
+}
+
+func defPredeclaredNil() {
+ def(&Nil{object{name: "nil", typ: Typ[UntypedNil], color_: black}})
+}
+
+// A builtinId is the id of a builtin function.
+type builtinId int
+
+const (
+ // universe scope
+ _Append builtinId = iota
+ _Cap
+ _Close
+ _Complex
+ _Copy
+ _Delete
+ _Imag
+ _Len
+ _Make
+ _New
+ _Panic
+ _Print
+ _Println
+ _Real
+ _Recover
+
+ // package unsafe
+ _Add
+ _Alignof
+ _Offsetof
+ _Sizeof
+ _Slice
+
+ // testing support
+ _Assert
+ _Trace
+)
+
+var predeclaredFuncs = [...]struct {
+ name string
+ nargs int
+ variadic bool
+ kind exprKind
+}{
+ _Append: {"append", 1, true, expression},
+ _Cap: {"cap", 1, false, expression},
+ _Close: {"close", 1, false, statement},
+ _Complex: {"complex", 2, false, expression},
+ _Copy: {"copy", 2, false, statement},
+ _Delete: {"delete", 2, false, statement},
+ _Imag: {"imag", 1, false, expression},
+ _Len: {"len", 1, false, expression},
+ _Make: {"make", 1, true, expression},
+ _New: {"new", 1, false, expression},
+ _Panic: {"panic", 1, false, statement},
+ _Print: {"print", 0, true, statement},
+ _Println: {"println", 0, true, statement},
+ _Real: {"real", 1, false, expression},
+ _Recover: {"recover", 0, false, statement},
+
+ _Add: {"Add", 2, false, expression},
+ _Alignof: {"Alignof", 1, false, expression},
+ _Offsetof: {"Offsetof", 1, false, expression},
+ _Sizeof: {"Sizeof", 1, false, expression},
+ _Slice: {"Slice", 2, false, expression},
+
+ _Assert: {"assert", 1, false, statement},
+ _Trace: {"trace", 0, true, statement},
+}
+
+func defPredeclaredFuncs() {
+ for i := range predeclaredFuncs {
+ id := builtinId(i)
+ if id == _Assert || id == _Trace {
+ continue // only define these in testing environment
+ }
+ def(newBuiltin(id))
+ }
+}
+
+// DefPredeclaredTestFuncs defines the assert and trace built-ins.
+// These built-ins are intended for debugging and testing of this
+// package only.
+func DefPredeclaredTestFuncs() {
+ if Universe.Lookup("assert") != nil {
+ return // already defined
+ }
+ def(newBuiltin(_Assert))
+ def(newBuiltin(_Trace))
+}
+
+func init() {
+ Universe = NewScope(nil, nopos, nopos, "universe")
+ Unsafe = NewPackage("unsafe", "unsafe")
+ Unsafe.complete = true
+
+ defPredeclaredTypes()
+ defPredeclaredConsts()
+ defPredeclaredNil()
+ defPredeclaredFuncs()
+
+ universeIota = Universe.Lookup("iota")
+ universeByte = Universe.Lookup("byte").Type()
+ universeRune = Universe.Lookup("rune").Type()
+ universeAny = Universe.Lookup("any")
+ universeError = Universe.Lookup("error").Type()
+ universeComparable = Universe.Lookup("comparable")
+}
+
+// Objects with names containing blanks are internal and not entered into
+// a scope. Objects with exported names are inserted in the unsafe package
+// scope; other objects are inserted in the universe scope.
+//
+func def(obj Object) {
+ assert(obj.color() == black)
+ name := obj.Name()
+ if strings.Contains(name, " ") {
+ return // nothing to do
+ }
+ // fix Obj link for named types
+ if typ, _ := obj.Type().(*Named); typ != nil {
+ typ.obj = obj.(*TypeName)
+ }
+ // exported identifiers go into package unsafe
+ scope := Universe
+ if obj.Exported() {
+ scope = Unsafe.scope
+ // set Pkg field
+ switch obj := obj.(type) {
+ case *TypeName:
+ obj.pkg = Unsafe
+ case *Builtin:
+ obj.pkg = Unsafe
+ default:
+ unreachable()
+ }
+ }
+ if scope.Insert(obj) != nil {
+ panic("double declaration of predeclared identifier")
+ }
+}
diff --git a/src/cmd/compile/internal/types2/validtype.go b/src/cmd/compile/internal/types2/validtype.go
new file mode 100644
index 0000000..f365ad1
--- /dev/null
+++ b/src/cmd/compile/internal/types2/validtype.go
@@ -0,0 +1,147 @@
+// Copyright 2022 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+// validType verifies that the given type does not "expand" indefinitely
+// producing a cycle in the type graph. Cycles are detected by marking
+// defined types.
+// (Cycles involving alias types, as in "type A = [10]A" are detected
+// earlier, via the objDecl cycle detection mechanism.)
+func (check *Checker) validType(typ *Named) {
+ check.validType0(typ, nil, nil)
+}
+
+type typeInfo uint
+
+// validType0 checks if the given type is valid. If typ is a type parameter
+// its value is looked up in the provided environment. The environment is
+// nil if typ is not part of (the RHS of) an instantiated type, in that case
+// any type parameter encountered must be from an enclosing function and can
+// be ignored. The path is the list of type names that lead to the current typ.
+func (check *Checker) validType0(typ Type, env *tparamEnv, path []Object) typeInfo {
+ const (
+ unknown typeInfo = iota
+ marked
+ valid
+ invalid
+ )
+
+ switch t := typ.(type) {
+ case nil:
+ // We should never see a nil type but be conservative and panic
+ // only in debug mode.
+ if debug {
+ panic("validType0(nil)")
+ }
+
+ case *Array:
+ return check.validType0(t.elem, env, path)
+
+ case *Struct:
+ for _, f := range t.fields {
+ if check.validType0(f.typ, env, path) == invalid {
+ return invalid
+ }
+ }
+
+ case *Union:
+ for _, t := range t.terms {
+ if check.validType0(t.typ, env, path) == invalid {
+ return invalid
+ }
+ }
+
+ case *Interface:
+ for _, etyp := range t.embeddeds {
+ if check.validType0(etyp, env, path) == invalid {
+ return invalid
+ }
+ }
+
+ case *Named:
+ // Don't report a 2nd error if we already know the type is invalid
+ // (e.g., if a cycle was detected earlier, via under).
+ if t.underlying == Typ[Invalid] {
+ check.infoMap[t] = invalid
+ return invalid
+ }
+
+ switch check.infoMap[t] {
+ case unknown:
+ check.infoMap[t] = marked
+ check.infoMap[t] = check.validType0(t.orig.fromRHS, env.push(t), append(path, t.obj))
+ case marked:
+ // We have seen type t before and thus must have a cycle.
+ check.infoMap[t] = invalid
+ // t cannot be in an imported package otherwise that package
+ // would have reported a type cycle and couldn't have been
+ // imported in the first place.
+ assert(t.obj.pkg == check.pkg)
+ t.underlying = Typ[Invalid] // t is in the current package (no race possibility)
+ // Find the starting point of the cycle and report it.
+ for i, tn := range path {
+ if tn == t.obj {
+ check.cycleError(path[i:])
+ return invalid
+ }
+ }
+ panic("cycle start not found")
+ }
+ return check.infoMap[t]
+
+ case *TypeParam:
+ // A type parameter stands for the type (argument) it was instantiated with.
+ // Check the corresponding type argument for validity if we have one.
+ if env != nil {
+ if targ := env.tmap[t]; targ != nil {
+ // Type arguments found in targ must be looked
+ // up in the enclosing environment env.link.
+ return check.validType0(targ, env.link, path)
+ }
+ }
+ }
+
+ return valid
+}
+
+// A tparamEnv provides the environment for looking up the type arguments
+// with which type parameters for a given instance were instantiated.
+// If we don't have an instance, the corresponding tparamEnv is nil.
+type tparamEnv struct {
+ tmap substMap
+ link *tparamEnv
+}
+
+func (env *tparamEnv) push(typ *Named) *tparamEnv {
+ // If typ is not an instantiated type there are no typ-specific
+ // type parameters to look up and we don't need an environment.
+ targs := typ.TypeArgs()
+ if targs == nil {
+ return nil // no instance => nil environment
+ }
+
+ // Populate tmap: remember the type argument for each type parameter.
+ // We cannot use makeSubstMap because the number of type parameters
+ // and arguments may not match due to errors in the source (too many
+ // or too few type arguments). Populate tmap "manually".
+ tparams := typ.TypeParams()
+ n, m := targs.Len(), tparams.Len()
+ if n > m {
+ n = m // too many targs
+ }
+ tmap := make(substMap, n)
+ for i := 0; i < n; i++ {
+ tmap[tparams.At(i)] = targs.At(i)
+ }
+
+ return &tparamEnv{tmap: tmap, link: env}
+}
+
+// TODO(gri) Alternative implementation:
+// We may not need to build a stack of environments to
+// look up the type arguments for type parameters. The
+// same information should be available via the path:
+// We should be able to just walk the path backwards
+// and find the type arguments in the instance objects.
diff --git a/src/cmd/compile/internal/types2/version.go b/src/cmd/compile/internal/types2/version.go
new file mode 100644
index 0000000..b649f09
--- /dev/null
+++ b/src/cmd/compile/internal/types2/version.go
@@ -0,0 +1,81 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package types2
+
+import (
+ "cmd/compile/internal/syntax"
+ "fmt"
+ "regexp"
+ "strconv"
+ "strings"
+)
+
+// langCompat reports an error if the representation of a numeric
+// literal is not compatible with the current language version.
+func (check *Checker) langCompat(lit *syntax.BasicLit) {
+ s := lit.Value
+ if len(s) <= 2 || check.allowVersion(check.pkg, 1, 13) {
+ return
+ }
+ // len(s) > 2
+ if strings.Contains(s, "_") {
+ check.versionErrorf(lit, "go1.13", "underscores in numeric literals")
+ return
+ }
+ if s[0] != '0' {
+ return
+ }
+ radix := s[1]
+ if radix == 'b' || radix == 'B' {
+ check.versionErrorf(lit, "go1.13", "binary literals")
+ return
+ }
+ if radix == 'o' || radix == 'O' {
+ check.versionErrorf(lit, "go1.13", "0o/0O-style octal literals")
+ return
+ }
+ if lit.Kind != syntax.IntLit && (radix == 'x' || radix == 'X') {
+ check.versionErrorf(lit, "go1.13", "hexadecimal floating-point literals")
+ }
+}
+
+// allowVersion reports whether the given package
+// is allowed to use version major.minor.
+func (check *Checker) allowVersion(pkg *Package, major, minor int) bool {
+ // We assume that imported packages have all been checked,
+ // so we only have to check for the local package.
+ if pkg != check.pkg {
+ return true
+ }
+ ma, mi := check.version.major, check.version.minor
+ return ma == 0 && mi == 0 || ma > major || ma == major && mi >= minor
+}
+
+type version struct {
+ major, minor int
+}
+
+// parseGoVersion parses a Go version string (such as "go1.12")
+// and returns the version, or an error. If s is the empty
+// string, the version is 0.0.
+func parseGoVersion(s string) (v version, err error) {
+ if s == "" {
+ return
+ }
+ matches := goVersionRx.FindStringSubmatch(s)
+ if matches == nil {
+ err = fmt.Errorf(`should be something like "go1.12"`)
+ return
+ }
+ v.major, err = strconv.Atoi(matches[1])
+ if err != nil {
+ return
+ }
+ v.minor, err = strconv.Atoi(matches[2])
+ return
+}
+
+// goVersionRx matches a Go version string, e.g. "go1.12".
+var goVersionRx = regexp.MustCompile(`^go([1-9][0-9]*)\.(0|[1-9][0-9]*)$`)
diff --git a/src/cmd/compile/internal/walk/assign.go b/src/cmd/compile/internal/walk/assign.go
new file mode 100644
index 0000000..9350c38
--- /dev/null
+++ b/src/cmd/compile/internal/walk/assign.go
@@ -0,0 +1,719 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package walk
+
+import (
+ "go/constant"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/reflectdata"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+)
+
+// walkAssign walks an OAS (AssignExpr) or OASOP (AssignOpExpr) node.
+func walkAssign(init *ir.Nodes, n ir.Node) ir.Node {
+ init.Append(ir.TakeInit(n)...)
+
+ var left, right ir.Node
+ switch n.Op() {
+ case ir.OAS:
+ n := n.(*ir.AssignStmt)
+ left, right = n.X, n.Y
+ case ir.OASOP:
+ n := n.(*ir.AssignOpStmt)
+ left, right = n.X, n.Y
+ }
+
+ // Recognize m[k] = append(m[k], ...) so we can reuse
+ // the mapassign call.
+ var mapAppend *ir.CallExpr
+ if left.Op() == ir.OINDEXMAP && right.Op() == ir.OAPPEND {
+ left := left.(*ir.IndexExpr)
+ mapAppend = right.(*ir.CallExpr)
+ if !ir.SameSafeExpr(left, mapAppend.Args[0]) {
+ base.Fatalf("not same expressions: %v != %v", left, mapAppend.Args[0])
+ }
+ }
+
+ left = walkExpr(left, init)
+ left = safeExpr(left, init)
+ if mapAppend != nil {
+ mapAppend.Args[0] = left
+ }
+
+ if n.Op() == ir.OASOP {
+ // Rewrite x op= y into x = x op y.
+ n = ir.NewAssignStmt(base.Pos, left, typecheck.Expr(ir.NewBinaryExpr(base.Pos, n.(*ir.AssignOpStmt).AsOp, left, right)))
+ } else {
+ n.(*ir.AssignStmt).X = left
+ }
+ as := n.(*ir.AssignStmt)
+
+ if oaslit(as, init) {
+ return ir.NewBlockStmt(as.Pos(), nil)
+ }
+
+ if as.Y == nil {
+ // TODO(austin): Check all "implicit zeroing"
+ return as
+ }
+
+ if !base.Flag.Cfg.Instrumenting && ir.IsZero(as.Y) {
+ return as
+ }
+
+ switch as.Y.Op() {
+ default:
+ as.Y = walkExpr(as.Y, init)
+
+ case ir.ORECV:
+ // x = <-c; as.Left is x, as.Right.Left is c.
+ // order.stmt made sure x is addressable.
+ recv := as.Y.(*ir.UnaryExpr)
+ recv.X = walkExpr(recv.X, init)
+
+ n1 := typecheck.NodAddr(as.X)
+ r := recv.X // the channel
+ return mkcall1(chanfn("chanrecv1", 2, r.Type()), nil, init, r, n1)
+
+ case ir.OAPPEND:
+ // x = append(...)
+ call := as.Y.(*ir.CallExpr)
+ if call.Type().Elem().NotInHeap() {
+ base.Errorf("%v can't be allocated in Go; it is incomplete (or unallocatable)", call.Type().Elem())
+ }
+ var r ir.Node
+ switch {
+ case isAppendOfMake(call):
+ // x = append(y, make([]T, y)...)
+ r = extendSlice(call, init)
+ case call.IsDDD:
+ r = appendSlice(call, init) // also works for append(slice, string).
+ default:
+ r = walkAppend(call, init, as)
+ }
+ as.Y = r
+ if r.Op() == ir.OAPPEND {
+ // Left in place for back end.
+ // Do not add a new write barrier.
+ // Set up address of type for back end.
+ r.(*ir.CallExpr).X = reflectdata.TypePtr(r.Type().Elem())
+ return as
+ }
+ // Otherwise, lowered for race detector.
+ // Treat as ordinary assignment.
+ }
+
+ if as.X != nil && as.Y != nil {
+ return convas(as, init)
+ }
+ return as
+}
+
+// walkAssignDotType walks an OAS2DOTTYPE node.
+func walkAssignDotType(n *ir.AssignListStmt, init *ir.Nodes) ir.Node {
+ walkExprListSafe(n.Lhs, init)
+ n.Rhs[0] = walkExpr(n.Rhs[0], init)
+ return n
+}
+
+// walkAssignFunc walks an OAS2FUNC node.
+func walkAssignFunc(init *ir.Nodes, n *ir.AssignListStmt) ir.Node {
+ init.Append(ir.TakeInit(n)...)
+
+ r := n.Rhs[0]
+ walkExprListSafe(n.Lhs, init)
+ r = walkExpr(r, init)
+
+ if ir.IsIntrinsicCall(r.(*ir.CallExpr)) {
+ n.Rhs = []ir.Node{r}
+ return n
+ }
+ init.Append(r)
+
+ ll := ascompatet(n.Lhs, r.Type())
+ return ir.NewBlockStmt(src.NoXPos, ll)
+}
+
+// walkAssignList walks an OAS2 node.
+func walkAssignList(init *ir.Nodes, n *ir.AssignListStmt) ir.Node {
+ init.Append(ir.TakeInit(n)...)
+ return ir.NewBlockStmt(src.NoXPos, ascompatee(ir.OAS, n.Lhs, n.Rhs))
+}
+
+// walkAssignMapRead walks an OAS2MAPR node.
+func walkAssignMapRead(init *ir.Nodes, n *ir.AssignListStmt) ir.Node {
+ init.Append(ir.TakeInit(n)...)
+
+ r := n.Rhs[0].(*ir.IndexExpr)
+ walkExprListSafe(n.Lhs, init)
+ r.X = walkExpr(r.X, init)
+ r.Index = walkExpr(r.Index, init)
+ t := r.X.Type()
+
+ fast := mapfast(t)
+ key := mapKeyArg(fast, r, r.Index)
+
+ // from:
+ // a,b = m[i]
+ // to:
+ // var,b = mapaccess2*(t, m, i)
+ // a = *var
+ a := n.Lhs[0]
+
+ var call *ir.CallExpr
+ if w := t.Elem().Size(); w <= zeroValSize {
+ fn := mapfn(mapaccess2[fast], t, false)
+ call = mkcall1(fn, fn.Type().Results(), init, reflectdata.TypePtr(t), r.X, key)
+ } else {
+ fn := mapfn("mapaccess2_fat", t, true)
+ z := reflectdata.ZeroAddr(w)
+ call = mkcall1(fn, fn.Type().Results(), init, reflectdata.TypePtr(t), r.X, key, z)
+ }
+
+ // mapaccess2* returns a typed bool, but due to spec changes,
+ // the boolean result of i.(T) is now untyped so we make it the
+ // same type as the variable on the lhs.
+ if ok := n.Lhs[1]; !ir.IsBlank(ok) && ok.Type().IsBoolean() {
+ call.Type().Field(1).Type = ok.Type()
+ }
+ n.Rhs = []ir.Node{call}
+ n.SetOp(ir.OAS2FUNC)
+
+ // don't generate a = *var if a is _
+ if ir.IsBlank(a) {
+ return walkExpr(typecheck.Stmt(n), init)
+ }
+
+ var_ := typecheck.Temp(types.NewPtr(t.Elem()))
+ var_.SetTypecheck(1)
+ var_.MarkNonNil() // mapaccess always returns a non-nil pointer
+
+ n.Lhs[0] = var_
+ init.Append(walkExpr(n, init))
+
+ as := ir.NewAssignStmt(base.Pos, a, ir.NewStarExpr(base.Pos, var_))
+ return walkExpr(typecheck.Stmt(as), init)
+}
+
+// walkAssignRecv walks an OAS2RECV node.
+func walkAssignRecv(init *ir.Nodes, n *ir.AssignListStmt) ir.Node {
+ init.Append(ir.TakeInit(n)...)
+
+ r := n.Rhs[0].(*ir.UnaryExpr) // recv
+ walkExprListSafe(n.Lhs, init)
+ r.X = walkExpr(r.X, init)
+ var n1 ir.Node
+ if ir.IsBlank(n.Lhs[0]) {
+ n1 = typecheck.NodNil()
+ } else {
+ n1 = typecheck.NodAddr(n.Lhs[0])
+ }
+ fn := chanfn("chanrecv2", 2, r.X.Type())
+ ok := n.Lhs[1]
+ call := mkcall1(fn, types.Types[types.TBOOL], init, r.X, n1)
+ return typecheck.Stmt(ir.NewAssignStmt(base.Pos, ok, call))
+}
+
+// walkReturn walks an ORETURN node.
+func walkReturn(n *ir.ReturnStmt) ir.Node {
+ fn := ir.CurFunc
+
+ fn.NumReturns++
+ if len(n.Results) == 0 {
+ return n
+ }
+
+ results := fn.Type().Results().FieldSlice()
+ dsts := make([]ir.Node, len(results))
+ for i, v := range results {
+ // TODO(mdempsky): typecheck should have already checked the result variables.
+ dsts[i] = typecheck.AssignExpr(v.Nname.(*ir.Name))
+ }
+
+ n.Results = ascompatee(n.Op(), dsts, n.Results)
+ return n
+}
+
+// check assign type list to
+// an expression list. called in
+// expr-list = func()
+func ascompatet(nl ir.Nodes, nr *types.Type) []ir.Node {
+ if len(nl) != nr.NumFields() {
+ base.Fatalf("ascompatet: assignment count mismatch: %d = %d", len(nl), nr.NumFields())
+ }
+
+ var nn ir.Nodes
+ for i, l := range nl {
+ if ir.IsBlank(l) {
+ continue
+ }
+ r := nr.Field(i)
+
+ // Order should have created autotemps of the appropriate type for
+ // us to store results into.
+ if tmp, ok := l.(*ir.Name); !ok || !tmp.AutoTemp() || !types.Identical(tmp.Type(), r.Type) {
+ base.FatalfAt(l.Pos(), "assigning %v to %+v", r.Type, l)
+ }
+
+ res := ir.NewResultExpr(base.Pos, nil, types.BADWIDTH)
+ res.Index = int64(i)
+ res.SetType(r.Type)
+ res.SetTypecheck(1)
+
+ nn.Append(ir.NewAssignStmt(base.Pos, l, res))
+ }
+ return nn
+}
+
+// check assign expression list to
+// an expression list. called in
+// expr-list = expr-list
+func ascompatee(op ir.Op, nl, nr []ir.Node) []ir.Node {
+ // cannot happen: should have been rejected during type checking
+ if len(nl) != len(nr) {
+ base.Fatalf("assignment operands mismatch: %+v / %+v", ir.Nodes(nl), ir.Nodes(nr))
+ }
+
+ var assigned ir.NameSet
+ var memWrite, deferResultWrite bool
+
+ // affected reports whether expression n could be affected by
+ // the assignments applied so far.
+ affected := func(n ir.Node) bool {
+ if deferResultWrite {
+ return true
+ }
+ return ir.Any(n, func(n ir.Node) bool {
+ if n.Op() == ir.ONAME && assigned.Has(n.(*ir.Name)) {
+ return true
+ }
+ if memWrite && readsMemory(n) {
+ return true
+ }
+ return false
+ })
+ }
+
+ // If a needed expression may be affected by an
+ // earlier assignment, make an early copy of that
+ // expression and use the copy instead.
+ var early ir.Nodes
+ save := func(np *ir.Node) {
+ if n := *np; affected(n) {
+ *np = copyExpr(n, n.Type(), &early)
+ }
+ }
+
+ var late ir.Nodes
+ for i, lorig := range nl {
+ l, r := lorig, nr[i]
+
+ // Do not generate 'x = x' during return. See issue 4014.
+ if op == ir.ORETURN && ir.SameSafeExpr(l, r) {
+ continue
+ }
+
+ // Save subexpressions needed on left side.
+ // Drill through non-dereferences.
+ for {
+ // If an expression has init statements, they must be evaluated
+ // before any of its saved sub-operands (#45706).
+ // TODO(mdempsky): Disallow init statements on lvalues.
+ init := ir.TakeInit(l)
+ walkStmtList(init)
+ early.Append(init...)
+
+ switch ll := l.(type) {
+ case *ir.IndexExpr:
+ if ll.X.Type().IsArray() {
+ save(&ll.Index)
+ l = ll.X
+ continue
+ }
+ case *ir.ParenExpr:
+ l = ll.X
+ continue
+ case *ir.SelectorExpr:
+ if ll.Op() == ir.ODOT {
+ l = ll.X
+ continue
+ }
+ }
+ break
+ }
+
+ var name *ir.Name
+ switch l.Op() {
+ default:
+ base.Fatalf("unexpected lvalue %v", l.Op())
+ case ir.ONAME:
+ name = l.(*ir.Name)
+ case ir.OINDEX, ir.OINDEXMAP:
+ l := l.(*ir.IndexExpr)
+ save(&l.X)
+ save(&l.Index)
+ case ir.ODEREF:
+ l := l.(*ir.StarExpr)
+ save(&l.X)
+ case ir.ODOTPTR:
+ l := l.(*ir.SelectorExpr)
+ save(&l.X)
+ }
+
+ // Save expression on right side.
+ save(&r)
+
+ appendWalkStmt(&late, convas(ir.NewAssignStmt(base.Pos, lorig, r), &late))
+
+ // Check for reasons why we may need to compute later expressions
+ // before this assignment happens.
+
+ if name == nil {
+ // Not a direct assignment to a declared variable.
+ // Conservatively assume any memory access might alias.
+ memWrite = true
+ continue
+ }
+
+ if name.Class == ir.PPARAMOUT && ir.CurFunc.HasDefer() {
+ // Assignments to a result parameter in a function with defers
+ // becomes visible early if evaluation of any later expression
+ // panics (#43835).
+ deferResultWrite = true
+ continue
+ }
+
+ if sym := types.OrigSym(name.Sym()); sym == nil || sym.IsBlank() {
+ // We can ignore assignments to blank or anonymous result parameters.
+ // These can't appear in expressions anyway.
+ continue
+ }
+
+ if name.Addrtaken() || !name.OnStack() {
+ // Global variable, heap escaped, or just addrtaken.
+ // Conservatively assume any memory access might alias.
+ memWrite = true
+ continue
+ }
+
+ // Local, non-addrtaken variable.
+ // Assignments can only alias with direct uses of this variable.
+ assigned.Add(name)
+ }
+
+ early.Append(late.Take()...)
+ return early
+}
+
+// readsMemory reports whether the evaluation n directly reads from
+// memory that might be written to indirectly.
+func readsMemory(n ir.Node) bool {
+ switch n.Op() {
+ case ir.ONAME:
+ n := n.(*ir.Name)
+ if n.Class == ir.PFUNC {
+ return false
+ }
+ return n.Addrtaken() || !n.OnStack()
+
+ case ir.OADD,
+ ir.OAND,
+ ir.OANDAND,
+ ir.OANDNOT,
+ ir.OBITNOT,
+ ir.OCONV,
+ ir.OCONVIFACE,
+ ir.OCONVIDATA,
+ ir.OCONVNOP,
+ ir.ODIV,
+ ir.ODOT,
+ ir.ODOTTYPE,
+ ir.OLITERAL,
+ ir.OLSH,
+ ir.OMOD,
+ ir.OMUL,
+ ir.ONEG,
+ ir.ONIL,
+ ir.OOR,
+ ir.OOROR,
+ ir.OPAREN,
+ ir.OPLUS,
+ ir.ORSH,
+ ir.OSUB,
+ ir.OXOR:
+ return false
+ }
+
+ // Be conservative.
+ return true
+}
+
+// expand append(l1, l2...) to
+// init {
+// s := l1
+// n := len(s) + len(l2)
+// // Compare as uint so growslice can panic on overflow.
+// if uint(n) > uint(cap(s)) {
+// s = growslice(s, n)
+// }
+// s = s[:n]
+// memmove(&s[len(l1)], &l2[0], len(l2)*sizeof(T))
+// }
+// s
+//
+// l2 is allowed to be a string.
+func appendSlice(n *ir.CallExpr, init *ir.Nodes) ir.Node {
+ walkAppendArgs(n, init)
+
+ l1 := n.Args[0]
+ l2 := n.Args[1]
+ l2 = cheapExpr(l2, init)
+ n.Args[1] = l2
+
+ var nodes ir.Nodes
+
+ // var s []T
+ s := typecheck.Temp(l1.Type())
+ nodes.Append(ir.NewAssignStmt(base.Pos, s, l1)) // s = l1
+
+ elemtype := s.Type().Elem()
+
+ // n := len(s) + len(l2)
+ nn := typecheck.Temp(types.Types[types.TINT])
+ nodes.Append(ir.NewAssignStmt(base.Pos, nn, ir.NewBinaryExpr(base.Pos, ir.OADD, ir.NewUnaryExpr(base.Pos, ir.OLEN, s), ir.NewUnaryExpr(base.Pos, ir.OLEN, l2))))
+
+ // if uint(n) > uint(cap(s))
+ nif := ir.NewIfStmt(base.Pos, nil, nil, nil)
+ nuint := typecheck.Conv(nn, types.Types[types.TUINT])
+ scapuint := typecheck.Conv(ir.NewUnaryExpr(base.Pos, ir.OCAP, s), types.Types[types.TUINT])
+ nif.Cond = ir.NewBinaryExpr(base.Pos, ir.OGT, nuint, scapuint)
+
+ // instantiate growslice(typ *type, []any, int) []any
+ fn := typecheck.LookupRuntime("growslice")
+ fn = typecheck.SubstArgTypes(fn, elemtype, elemtype)
+
+ // s = growslice(T, s, n)
+ nif.Body = []ir.Node{ir.NewAssignStmt(base.Pos, s, mkcall1(fn, s.Type(), nif.PtrInit(), reflectdata.TypePtr(elemtype), s, nn))}
+ nodes.Append(nif)
+
+ // s = s[:n]
+ nt := ir.NewSliceExpr(base.Pos, ir.OSLICE, s, nil, nn, nil)
+ nt.SetBounded(true)
+ nodes.Append(ir.NewAssignStmt(base.Pos, s, nt))
+
+ var ncopy ir.Node
+ if elemtype.HasPointers() {
+ // copy(s[len(l1):], l2)
+ slice := ir.NewSliceExpr(base.Pos, ir.OSLICE, s, ir.NewUnaryExpr(base.Pos, ir.OLEN, l1), nil, nil)
+ slice.SetType(s.Type())
+
+ ir.CurFunc.SetWBPos(n.Pos())
+
+ // instantiate typedslicecopy(typ *type, dstPtr *any, dstLen int, srcPtr *any, srcLen int) int
+ fn := typecheck.LookupRuntime("typedslicecopy")
+ fn = typecheck.SubstArgTypes(fn, l1.Type().Elem(), l2.Type().Elem())
+ ptr1, len1 := backingArrayPtrLen(cheapExpr(slice, &nodes))
+ ptr2, len2 := backingArrayPtrLen(l2)
+ ncopy = mkcall1(fn, types.Types[types.TINT], &nodes, reflectdata.TypePtr(elemtype), ptr1, len1, ptr2, len2)
+ } else if base.Flag.Cfg.Instrumenting && !base.Flag.CompilingRuntime {
+ // rely on runtime to instrument:
+ // copy(s[len(l1):], l2)
+ // l2 can be a slice or string.
+ slice := ir.NewSliceExpr(base.Pos, ir.OSLICE, s, ir.NewUnaryExpr(base.Pos, ir.OLEN, l1), nil, nil)
+ slice.SetType(s.Type())
+
+ ptr1, len1 := backingArrayPtrLen(cheapExpr(slice, &nodes))
+ ptr2, len2 := backingArrayPtrLen(l2)
+
+ fn := typecheck.LookupRuntime("slicecopy")
+ fn = typecheck.SubstArgTypes(fn, ptr1.Type().Elem(), ptr2.Type().Elem())
+ ncopy = mkcall1(fn, types.Types[types.TINT], &nodes, ptr1, len1, ptr2, len2, ir.NewInt(elemtype.Size()))
+ } else {
+ // memmove(&s[len(l1)], &l2[0], len(l2)*sizeof(T))
+ ix := ir.NewIndexExpr(base.Pos, s, ir.NewUnaryExpr(base.Pos, ir.OLEN, l1))
+ ix.SetBounded(true)
+ addr := typecheck.NodAddr(ix)
+
+ sptr := ir.NewUnaryExpr(base.Pos, ir.OSPTR, l2)
+
+ nwid := cheapExpr(typecheck.Conv(ir.NewUnaryExpr(base.Pos, ir.OLEN, l2), types.Types[types.TUINTPTR]), &nodes)
+ nwid = ir.NewBinaryExpr(base.Pos, ir.OMUL, nwid, ir.NewInt(elemtype.Size()))
+
+ // instantiate func memmove(to *any, frm *any, length uintptr)
+ fn := typecheck.LookupRuntime("memmove")
+ fn = typecheck.SubstArgTypes(fn, elemtype, elemtype)
+ ncopy = mkcall1(fn, nil, &nodes, addr, sptr, nwid)
+ }
+ ln := append(nodes, ncopy)
+
+ typecheck.Stmts(ln)
+ walkStmtList(ln)
+ init.Append(ln...)
+ return s
+}
+
+// isAppendOfMake reports whether n is of the form append(x, make([]T, y)...).
+// isAppendOfMake assumes n has already been typechecked.
+func isAppendOfMake(n ir.Node) bool {
+ if base.Flag.N != 0 || base.Flag.Cfg.Instrumenting {
+ return false
+ }
+
+ if n.Typecheck() == 0 {
+ base.Fatalf("missing typecheck: %+v", n)
+ }
+
+ if n.Op() != ir.OAPPEND {
+ return false
+ }
+ call := n.(*ir.CallExpr)
+ if !call.IsDDD || len(call.Args) != 2 || call.Args[1].Op() != ir.OMAKESLICE {
+ return false
+ }
+
+ mk := call.Args[1].(*ir.MakeExpr)
+ if mk.Cap != nil {
+ return false
+ }
+
+ // y must be either an integer constant or the largest possible positive value
+ // of variable y needs to fit into an uint.
+
+ // typecheck made sure that constant arguments to make are not negative and fit into an int.
+
+ // The care of overflow of the len argument to make will be handled by an explicit check of int(len) < 0 during runtime.
+ y := mk.Len
+ if !ir.IsConst(y, constant.Int) && y.Type().Size() > types.Types[types.TUINT].Size() {
+ return false
+ }
+
+ return true
+}
+
+// extendSlice rewrites append(l1, make([]T, l2)...) to
+// init {
+// if l2 >= 0 { // Empty if block here for more meaningful node.SetLikely(true)
+// } else {
+// panicmakeslicelen()
+// }
+// s := l1
+// n := len(s) + l2
+// // Compare n and s as uint so growslice can panic on overflow of len(s) + l2.
+// // cap is a positive int and n can become negative when len(s) + l2
+// // overflows int. Interpreting n when negative as uint makes it larger
+// // than cap(s). growslice will check the int n arg and panic if n is
+// // negative. This prevents the overflow from being undetected.
+// if uint(n) > uint(cap(s)) {
+// s = growslice(T, s, n)
+// }
+// s = s[:n]
+// lptr := &l1[0]
+// sptr := &s[0]
+// if lptr == sptr || !T.HasPointers() {
+// // growslice did not clear the whole underlying array (or did not get called)
+// hp := &s[len(l1)]
+// hn := l2 * sizeof(T)
+// memclr(hp, hn)
+// }
+// }
+// s
+func extendSlice(n *ir.CallExpr, init *ir.Nodes) ir.Node {
+ // isAppendOfMake made sure all possible positive values of l2 fit into an uint.
+ // The case of l2 overflow when converting from e.g. uint to int is handled by an explicit
+ // check of l2 < 0 at runtime which is generated below.
+ l2 := typecheck.Conv(n.Args[1].(*ir.MakeExpr).Len, types.Types[types.TINT])
+ l2 = typecheck.Expr(l2)
+ n.Args[1] = l2 // walkAppendArgs expects l2 in n.List.Second().
+
+ walkAppendArgs(n, init)
+
+ l1 := n.Args[0]
+ l2 = n.Args[1] // re-read l2, as it may have been updated by walkAppendArgs
+
+ var nodes []ir.Node
+
+ // if l2 >= 0 (likely happens), do nothing
+ nifneg := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OGE, l2, ir.NewInt(0)), nil, nil)
+ nifneg.Likely = true
+
+ // else panicmakeslicelen()
+ nifneg.Else = []ir.Node{mkcall("panicmakeslicelen", nil, init)}
+ nodes = append(nodes, nifneg)
+
+ // s := l1
+ s := typecheck.Temp(l1.Type())
+ nodes = append(nodes, ir.NewAssignStmt(base.Pos, s, l1))
+
+ elemtype := s.Type().Elem()
+
+ // n := len(s) + l2
+ nn := typecheck.Temp(types.Types[types.TINT])
+ nodes = append(nodes, ir.NewAssignStmt(base.Pos, nn, ir.NewBinaryExpr(base.Pos, ir.OADD, ir.NewUnaryExpr(base.Pos, ir.OLEN, s), l2)))
+
+ // if uint(n) > uint(cap(s))
+ nuint := typecheck.Conv(nn, types.Types[types.TUINT])
+ capuint := typecheck.Conv(ir.NewUnaryExpr(base.Pos, ir.OCAP, s), types.Types[types.TUINT])
+ nif := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OGT, nuint, capuint), nil, nil)
+
+ // instantiate growslice(typ *type, old []any, newcap int) []any
+ fn := typecheck.LookupRuntime("growslice")
+ fn = typecheck.SubstArgTypes(fn, elemtype, elemtype)
+
+ // s = growslice(T, s, n)
+ nif.Body = []ir.Node{ir.NewAssignStmt(base.Pos, s, mkcall1(fn, s.Type(), nif.PtrInit(), reflectdata.TypePtr(elemtype), s, nn))}
+ nodes = append(nodes, nif)
+
+ // s = s[:n]
+ nt := ir.NewSliceExpr(base.Pos, ir.OSLICE, s, nil, nn, nil)
+ nt.SetBounded(true)
+ nodes = append(nodes, ir.NewAssignStmt(base.Pos, s, nt))
+
+ // lptr := &l1[0]
+ l1ptr := typecheck.Temp(l1.Type().Elem().PtrTo())
+ tmp := ir.NewUnaryExpr(base.Pos, ir.OSPTR, l1)
+ nodes = append(nodes, ir.NewAssignStmt(base.Pos, l1ptr, tmp))
+
+ // sptr := &s[0]
+ sptr := typecheck.Temp(elemtype.PtrTo())
+ tmp = ir.NewUnaryExpr(base.Pos, ir.OSPTR, s)
+ nodes = append(nodes, ir.NewAssignStmt(base.Pos, sptr, tmp))
+
+ // hp := &s[len(l1)]
+ ix := ir.NewIndexExpr(base.Pos, s, ir.NewUnaryExpr(base.Pos, ir.OLEN, l1))
+ ix.SetBounded(true)
+ hp := typecheck.ConvNop(typecheck.NodAddr(ix), types.Types[types.TUNSAFEPTR])
+
+ // hn := l2 * sizeof(elem(s))
+ hn := typecheck.Conv(ir.NewBinaryExpr(base.Pos, ir.OMUL, l2, ir.NewInt(elemtype.Size())), types.Types[types.TUINTPTR])
+
+ clrname := "memclrNoHeapPointers"
+ hasPointers := elemtype.HasPointers()
+ if hasPointers {
+ clrname = "memclrHasPointers"
+ ir.CurFunc.SetWBPos(n.Pos())
+ }
+
+ var clr ir.Nodes
+ clrfn := mkcall(clrname, nil, &clr, hp, hn)
+ clr.Append(clrfn)
+
+ if hasPointers {
+ // if l1ptr == sptr
+ nifclr := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OEQ, l1ptr, sptr), nil, nil)
+ nifclr.Body = clr
+ nodes = append(nodes, nifclr)
+ } else {
+ nodes = append(nodes, clr...)
+ }
+
+ typecheck.Stmts(nodes)
+ walkStmtList(nodes)
+ init.Append(nodes...)
+ return s
+}
diff --git a/src/cmd/compile/internal/walk/builtin.go b/src/cmd/compile/internal/walk/builtin.go
new file mode 100644
index 0000000..d0aaee0
--- /dev/null
+++ b/src/cmd/compile/internal/walk/builtin.go
@@ -0,0 +1,708 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package walk
+
+import (
+ "fmt"
+ "go/constant"
+ "go/token"
+ "strings"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/escape"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/reflectdata"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+)
+
+// Rewrite append(src, x, y, z) so that any side effects in
+// x, y, z (including runtime panics) are evaluated in
+// initialization statements before the append.
+// For normal code generation, stop there and leave the
+// rest to cgen_append.
+//
+// For race detector, expand append(src, a [, b]* ) to
+//
+// init {
+// s := src
+// const argc = len(args) - 1
+// if cap(s) - len(s) < argc {
+// s = growslice(s, len(s)+argc)
+// }
+// n := len(s)
+// s = s[:n+argc]
+// s[n] = a
+// s[n+1] = b
+// ...
+// }
+// s
+func walkAppend(n *ir.CallExpr, init *ir.Nodes, dst ir.Node) ir.Node {
+ if !ir.SameSafeExpr(dst, n.Args[0]) {
+ n.Args[0] = safeExpr(n.Args[0], init)
+ n.Args[0] = walkExpr(n.Args[0], init)
+ }
+ walkExprListSafe(n.Args[1:], init)
+
+ nsrc := n.Args[0]
+
+ // walkExprListSafe will leave OINDEX (s[n]) alone if both s
+ // and n are name or literal, but those may index the slice we're
+ // modifying here. Fix explicitly.
+ // Using cheapExpr also makes sure that the evaluation
+ // of all arguments (and especially any panics) happen
+ // before we begin to modify the slice in a visible way.
+ ls := n.Args[1:]
+ for i, n := range ls {
+ n = cheapExpr(n, init)
+ if !types.Identical(n.Type(), nsrc.Type().Elem()) {
+ n = typecheck.AssignConv(n, nsrc.Type().Elem(), "append")
+ n = walkExpr(n, init)
+ }
+ ls[i] = n
+ }
+
+ argc := len(n.Args) - 1
+ if argc < 1 {
+ return nsrc
+ }
+
+ // General case, with no function calls left as arguments.
+ // Leave for gen, except that instrumentation requires old form.
+ if !base.Flag.Cfg.Instrumenting || base.Flag.CompilingRuntime {
+ return n
+ }
+
+ var l []ir.Node
+
+ ns := typecheck.Temp(nsrc.Type())
+ l = append(l, ir.NewAssignStmt(base.Pos, ns, nsrc)) // s = src
+
+ na := ir.NewInt(int64(argc)) // const argc
+ nif := ir.NewIfStmt(base.Pos, nil, nil, nil) // if cap(s) - len(s) < argc
+ nif.Cond = ir.NewBinaryExpr(base.Pos, ir.OLT, ir.NewBinaryExpr(base.Pos, ir.OSUB, ir.NewUnaryExpr(base.Pos, ir.OCAP, ns), ir.NewUnaryExpr(base.Pos, ir.OLEN, ns)), na)
+
+ fn := typecheck.LookupRuntime("growslice") // growslice(<type>, old []T, mincap int) (ret []T)
+ fn = typecheck.SubstArgTypes(fn, ns.Type().Elem(), ns.Type().Elem())
+
+ nif.Body = []ir.Node{ir.NewAssignStmt(base.Pos, ns, mkcall1(fn, ns.Type(), nif.PtrInit(), reflectdata.TypePtr(ns.Type().Elem()), ns,
+ ir.NewBinaryExpr(base.Pos, ir.OADD, ir.NewUnaryExpr(base.Pos, ir.OLEN, ns), na)))}
+
+ l = append(l, nif)
+
+ nn := typecheck.Temp(types.Types[types.TINT])
+ l = append(l, ir.NewAssignStmt(base.Pos, nn, ir.NewUnaryExpr(base.Pos, ir.OLEN, ns))) // n = len(s)
+
+ slice := ir.NewSliceExpr(base.Pos, ir.OSLICE, ns, nil, ir.NewBinaryExpr(base.Pos, ir.OADD, nn, na), nil) // ...s[:n+argc]
+ slice.SetBounded(true)
+ l = append(l, ir.NewAssignStmt(base.Pos, ns, slice)) // s = s[:n+argc]
+
+ ls = n.Args[1:]
+ for i, n := range ls {
+ ix := ir.NewIndexExpr(base.Pos, ns, nn) // s[n] ...
+ ix.SetBounded(true)
+ l = append(l, ir.NewAssignStmt(base.Pos, ix, n)) // s[n] = arg
+ if i+1 < len(ls) {
+ l = append(l, ir.NewAssignStmt(base.Pos, nn, ir.NewBinaryExpr(base.Pos, ir.OADD, nn, ir.NewInt(1)))) // n = n + 1
+ }
+ }
+
+ typecheck.Stmts(l)
+ walkStmtList(l)
+ init.Append(l...)
+ return ns
+}
+
+// walkClose walks an OCLOSE node.
+func walkClose(n *ir.UnaryExpr, init *ir.Nodes) ir.Node {
+ // cannot use chanfn - closechan takes any, not chan any
+ fn := typecheck.LookupRuntime("closechan")
+ fn = typecheck.SubstArgTypes(fn, n.X.Type())
+ return mkcall1(fn, nil, init, n.X)
+}
+
+// Lower copy(a, b) to a memmove call or a runtime call.
+//
+// init {
+// n := len(a)
+// if n > len(b) { n = len(b) }
+// if a.ptr != b.ptr { memmove(a.ptr, b.ptr, n*sizeof(elem(a))) }
+// }
+// n;
+//
+// Also works if b is a string.
+//
+func walkCopy(n *ir.BinaryExpr, init *ir.Nodes, runtimecall bool) ir.Node {
+ if n.X.Type().Elem().HasPointers() {
+ ir.CurFunc.SetWBPos(n.Pos())
+ fn := writebarrierfn("typedslicecopy", n.X.Type().Elem(), n.Y.Type().Elem())
+ n.X = cheapExpr(n.X, init)
+ ptrL, lenL := backingArrayPtrLen(n.X)
+ n.Y = cheapExpr(n.Y, init)
+ ptrR, lenR := backingArrayPtrLen(n.Y)
+ return mkcall1(fn, n.Type(), init, reflectdata.TypePtr(n.X.Type().Elem()), ptrL, lenL, ptrR, lenR)
+ }
+
+ if runtimecall {
+ // rely on runtime to instrument:
+ // copy(n.Left, n.Right)
+ // n.Right can be a slice or string.
+
+ n.X = cheapExpr(n.X, init)
+ ptrL, lenL := backingArrayPtrLen(n.X)
+ n.Y = cheapExpr(n.Y, init)
+ ptrR, lenR := backingArrayPtrLen(n.Y)
+
+ fn := typecheck.LookupRuntime("slicecopy")
+ fn = typecheck.SubstArgTypes(fn, ptrL.Type().Elem(), ptrR.Type().Elem())
+
+ return mkcall1(fn, n.Type(), init, ptrL, lenL, ptrR, lenR, ir.NewInt(n.X.Type().Elem().Size()))
+ }
+
+ n.X = walkExpr(n.X, init)
+ n.Y = walkExpr(n.Y, init)
+ nl := typecheck.Temp(n.X.Type())
+ nr := typecheck.Temp(n.Y.Type())
+ var l []ir.Node
+ l = append(l, ir.NewAssignStmt(base.Pos, nl, n.X))
+ l = append(l, ir.NewAssignStmt(base.Pos, nr, n.Y))
+
+ nfrm := ir.NewUnaryExpr(base.Pos, ir.OSPTR, nr)
+ nto := ir.NewUnaryExpr(base.Pos, ir.OSPTR, nl)
+
+ nlen := typecheck.Temp(types.Types[types.TINT])
+
+ // n = len(to)
+ l = append(l, ir.NewAssignStmt(base.Pos, nlen, ir.NewUnaryExpr(base.Pos, ir.OLEN, nl)))
+
+ // if n > len(frm) { n = len(frm) }
+ nif := ir.NewIfStmt(base.Pos, nil, nil, nil)
+
+ nif.Cond = ir.NewBinaryExpr(base.Pos, ir.OGT, nlen, ir.NewUnaryExpr(base.Pos, ir.OLEN, nr))
+ nif.Body.Append(ir.NewAssignStmt(base.Pos, nlen, ir.NewUnaryExpr(base.Pos, ir.OLEN, nr)))
+ l = append(l, nif)
+
+ // if to.ptr != frm.ptr { memmove( ... ) }
+ ne := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.ONE, nto, nfrm), nil, nil)
+ ne.Likely = true
+ l = append(l, ne)
+
+ fn := typecheck.LookupRuntime("memmove")
+ fn = typecheck.SubstArgTypes(fn, nl.Type().Elem(), nl.Type().Elem())
+ nwid := ir.Node(typecheck.Temp(types.Types[types.TUINTPTR]))
+ setwid := ir.NewAssignStmt(base.Pos, nwid, typecheck.Conv(nlen, types.Types[types.TUINTPTR]))
+ ne.Body.Append(setwid)
+ nwid = ir.NewBinaryExpr(base.Pos, ir.OMUL, nwid, ir.NewInt(nl.Type().Elem().Size()))
+ call := mkcall1(fn, nil, init, nto, nfrm, nwid)
+ ne.Body.Append(call)
+
+ typecheck.Stmts(l)
+ walkStmtList(l)
+ init.Append(l...)
+ return nlen
+}
+
+// walkDelete walks an ODELETE node.
+func walkDelete(init *ir.Nodes, n *ir.CallExpr) ir.Node {
+ init.Append(ir.TakeInit(n)...)
+ map_ := n.Args[0]
+ key := n.Args[1]
+ map_ = walkExpr(map_, init)
+ key = walkExpr(key, init)
+
+ t := map_.Type()
+ fast := mapfast(t)
+ key = mapKeyArg(fast, n, key)
+ return mkcall1(mapfndel(mapdelete[fast], t), nil, init, reflectdata.TypePtr(t), map_, key)
+}
+
+// walkLenCap walks an OLEN or OCAP node.
+func walkLenCap(n *ir.UnaryExpr, init *ir.Nodes) ir.Node {
+ if isRuneCount(n) {
+ // Replace len([]rune(string)) with runtime.countrunes(string).
+ return mkcall("countrunes", n.Type(), init, typecheck.Conv(n.X.(*ir.ConvExpr).X, types.Types[types.TSTRING]))
+ }
+
+ n.X = walkExpr(n.X, init)
+
+ // replace len(*[10]int) with 10.
+ // delayed until now to preserve side effects.
+ t := n.X.Type()
+
+ if t.IsPtr() {
+ t = t.Elem()
+ }
+ if t.IsArray() {
+ safeExpr(n.X, init)
+ con := typecheck.OrigInt(n, t.NumElem())
+ con.SetTypecheck(1)
+ return con
+ }
+ return n
+}
+
+// walkMakeChan walks an OMAKECHAN node.
+func walkMakeChan(n *ir.MakeExpr, init *ir.Nodes) ir.Node {
+ // When size fits into int, use makechan instead of
+ // makechan64, which is faster and shorter on 32 bit platforms.
+ size := n.Len
+ fnname := "makechan64"
+ argtype := types.Types[types.TINT64]
+
+ // Type checking guarantees that TIDEAL size is positive and fits in an int.
+ // The case of size overflow when converting TUINT or TUINTPTR to TINT
+ // will be handled by the negative range checks in makechan during runtime.
+ if size.Type().IsKind(types.TIDEAL) || size.Type().Size() <= types.Types[types.TUINT].Size() {
+ fnname = "makechan"
+ argtype = types.Types[types.TINT]
+ }
+
+ return mkcall1(chanfn(fnname, 1, n.Type()), n.Type(), init, reflectdata.TypePtr(n.Type()), typecheck.Conv(size, argtype))
+}
+
+// walkMakeMap walks an OMAKEMAP node.
+func walkMakeMap(n *ir.MakeExpr, init *ir.Nodes) ir.Node {
+ t := n.Type()
+ hmapType := reflectdata.MapType(t)
+ hint := n.Len
+
+ // var h *hmap
+ var h ir.Node
+ if n.Esc() == ir.EscNone {
+ // Allocate hmap on stack.
+
+ // var hv hmap
+ // h = &hv
+ h = stackTempAddr(init, hmapType)
+
+ // Allocate one bucket pointed to by hmap.buckets on stack if hint
+ // is not larger than BUCKETSIZE. In case hint is larger than
+ // BUCKETSIZE runtime.makemap will allocate the buckets on the heap.
+ // Maximum key and elem size is 128 bytes, larger objects
+ // are stored with an indirection. So max bucket size is 2048+eps.
+ if !ir.IsConst(hint, constant.Int) ||
+ constant.Compare(hint.Val(), token.LEQ, constant.MakeInt64(reflectdata.BUCKETSIZE)) {
+
+ // In case hint is larger than BUCKETSIZE runtime.makemap
+ // will allocate the buckets on the heap, see #20184
+ //
+ // if hint <= BUCKETSIZE {
+ // var bv bmap
+ // b = &bv
+ // h.buckets = b
+ // }
+
+ nif := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OLE, hint, ir.NewInt(reflectdata.BUCKETSIZE)), nil, nil)
+ nif.Likely = true
+
+ // var bv bmap
+ // b = &bv
+ b := stackTempAddr(&nif.Body, reflectdata.MapBucketType(t))
+
+ // h.buckets = b
+ bsym := hmapType.Field(5).Sym // hmap.buckets see reflect.go:hmap
+ na := ir.NewAssignStmt(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, h, bsym), b)
+ nif.Body.Append(na)
+ appendWalkStmt(init, nif)
+ }
+ }
+
+ if ir.IsConst(hint, constant.Int) && constant.Compare(hint.Val(), token.LEQ, constant.MakeInt64(reflectdata.BUCKETSIZE)) {
+ // Handling make(map[any]any) and
+ // make(map[any]any, hint) where hint <= BUCKETSIZE
+ // special allows for faster map initialization and
+ // improves binary size by using calls with fewer arguments.
+ // For hint <= BUCKETSIZE overLoadFactor(hint, 0) is false
+ // and no buckets will be allocated by makemap. Therefore,
+ // no buckets need to be allocated in this code path.
+ if n.Esc() == ir.EscNone {
+ // Only need to initialize h.hash0 since
+ // hmap h has been allocated on the stack already.
+ // h.hash0 = fastrand()
+ rand := mkcall("fastrand", types.Types[types.TUINT32], init)
+ hashsym := hmapType.Field(4).Sym // hmap.hash0 see reflect.go:hmap
+ appendWalkStmt(init, ir.NewAssignStmt(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, h, hashsym), rand))
+ return typecheck.ConvNop(h, t)
+ }
+ // Call runtime.makehmap to allocate an
+ // hmap on the heap and initialize hmap's hash0 field.
+ fn := typecheck.LookupRuntime("makemap_small")
+ fn = typecheck.SubstArgTypes(fn, t.Key(), t.Elem())
+ return mkcall1(fn, n.Type(), init)
+ }
+
+ if n.Esc() != ir.EscNone {
+ h = typecheck.NodNil()
+ }
+ // Map initialization with a variable or large hint is
+ // more complicated. We therefore generate a call to
+ // runtime.makemap to initialize hmap and allocate the
+ // map buckets.
+
+ // When hint fits into int, use makemap instead of
+ // makemap64, which is faster and shorter on 32 bit platforms.
+ fnname := "makemap64"
+ argtype := types.Types[types.TINT64]
+
+ // Type checking guarantees that TIDEAL hint is positive and fits in an int.
+ // See checkmake call in TMAP case of OMAKE case in OpSwitch in typecheck1 function.
+ // The case of hint overflow when converting TUINT or TUINTPTR to TINT
+ // will be handled by the negative range checks in makemap during runtime.
+ if hint.Type().IsKind(types.TIDEAL) || hint.Type().Size() <= types.Types[types.TUINT].Size() {
+ fnname = "makemap"
+ argtype = types.Types[types.TINT]
+ }
+
+ fn := typecheck.LookupRuntime(fnname)
+ fn = typecheck.SubstArgTypes(fn, hmapType, t.Key(), t.Elem())
+ return mkcall1(fn, n.Type(), init, reflectdata.TypePtr(n.Type()), typecheck.Conv(hint, argtype), h)
+}
+
+// walkMakeSlice walks an OMAKESLICE node.
+func walkMakeSlice(n *ir.MakeExpr, init *ir.Nodes) ir.Node {
+ l := n.Len
+ r := n.Cap
+ if r == nil {
+ r = safeExpr(l, init)
+ l = r
+ }
+ t := n.Type()
+ if t.Elem().NotInHeap() {
+ base.Errorf("%v can't be allocated in Go; it is incomplete (or unallocatable)", t.Elem())
+ }
+ if n.Esc() == ir.EscNone {
+ if why := escape.HeapAllocReason(n); why != "" {
+ base.Fatalf("%v has EscNone, but %v", n, why)
+ }
+ // var arr [r]T
+ // n = arr[:l]
+ i := typecheck.IndexConst(r)
+ if i < 0 {
+ base.Fatalf("walkExpr: invalid index %v", r)
+ }
+
+ // cap is constrained to [0,2^31) or [0,2^63) depending on whether
+ // we're in 32-bit or 64-bit systems. So it's safe to do:
+ //
+ // if uint64(len) > cap {
+ // if len < 0 { panicmakeslicelen() }
+ // panicmakeslicecap()
+ // }
+ nif := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OGT, typecheck.Conv(l, types.Types[types.TUINT64]), ir.NewInt(i)), nil, nil)
+ niflen := ir.NewIfStmt(base.Pos, ir.NewBinaryExpr(base.Pos, ir.OLT, l, ir.NewInt(0)), nil, nil)
+ niflen.Body = []ir.Node{mkcall("panicmakeslicelen", nil, init)}
+ nif.Body.Append(niflen, mkcall("panicmakeslicecap", nil, init))
+ init.Append(typecheck.Stmt(nif))
+
+ t = types.NewArray(t.Elem(), i) // [r]T
+ var_ := typecheck.Temp(t)
+ appendWalkStmt(init, ir.NewAssignStmt(base.Pos, var_, nil)) // zero temp
+ r := ir.NewSliceExpr(base.Pos, ir.OSLICE, var_, nil, l, nil) // arr[:l]
+ // The conv is necessary in case n.Type is named.
+ return walkExpr(typecheck.Expr(typecheck.Conv(r, n.Type())), init)
+ }
+
+ // n escapes; set up a call to makeslice.
+ // When len and cap can fit into int, use makeslice instead of
+ // makeslice64, which is faster and shorter on 32 bit platforms.
+
+ len, cap := l, r
+
+ fnname := "makeslice64"
+ argtype := types.Types[types.TINT64]
+
+ // Type checking guarantees that TIDEAL len/cap are positive and fit in an int.
+ // The case of len or cap overflow when converting TUINT or TUINTPTR to TINT
+ // will be handled by the negative range checks in makeslice during runtime.
+ if (len.Type().IsKind(types.TIDEAL) || len.Type().Size() <= types.Types[types.TUINT].Size()) &&
+ (cap.Type().IsKind(types.TIDEAL) || cap.Type().Size() <= types.Types[types.TUINT].Size()) {
+ fnname = "makeslice"
+ argtype = types.Types[types.TINT]
+ }
+ fn := typecheck.LookupRuntime(fnname)
+ ptr := mkcall1(fn, types.Types[types.TUNSAFEPTR], init, reflectdata.TypePtr(t.Elem()), typecheck.Conv(len, argtype), typecheck.Conv(cap, argtype))
+ ptr.MarkNonNil()
+ len = typecheck.Conv(len, types.Types[types.TINT])
+ cap = typecheck.Conv(cap, types.Types[types.TINT])
+ sh := ir.NewSliceHeaderExpr(base.Pos, t, ptr, len, cap)
+ return walkExpr(typecheck.Expr(sh), init)
+}
+
+// walkMakeSliceCopy walks an OMAKESLICECOPY node.
+func walkMakeSliceCopy(n *ir.MakeExpr, init *ir.Nodes) ir.Node {
+ if n.Esc() == ir.EscNone {
+ base.Fatalf("OMAKESLICECOPY with EscNone: %v", n)
+ }
+
+ t := n.Type()
+ if t.Elem().NotInHeap() {
+ base.Errorf("%v can't be allocated in Go; it is incomplete (or unallocatable)", t.Elem())
+ }
+
+ length := typecheck.Conv(n.Len, types.Types[types.TINT])
+ copylen := ir.NewUnaryExpr(base.Pos, ir.OLEN, n.Cap)
+ copyptr := ir.NewUnaryExpr(base.Pos, ir.OSPTR, n.Cap)
+
+ if !t.Elem().HasPointers() && n.Bounded() {
+ // When len(to)==len(from) and elements have no pointers:
+ // replace make+copy with runtime.mallocgc+runtime.memmove.
+
+ // We do not check for overflow of len(to)*elem.Width here
+ // since len(from) is an existing checked slice capacity
+ // with same elem.Width for the from slice.
+ size := ir.NewBinaryExpr(base.Pos, ir.OMUL, typecheck.Conv(length, types.Types[types.TUINTPTR]), typecheck.Conv(ir.NewInt(t.Elem().Size()), types.Types[types.TUINTPTR]))
+
+ // instantiate mallocgc(size uintptr, typ *byte, needszero bool) unsafe.Pointer
+ fn := typecheck.LookupRuntime("mallocgc")
+ ptr := mkcall1(fn, types.Types[types.TUNSAFEPTR], init, size, typecheck.NodNil(), ir.NewBool(false))
+ ptr.MarkNonNil()
+ sh := ir.NewSliceHeaderExpr(base.Pos, t, ptr, length, length)
+
+ s := typecheck.Temp(t)
+ r := typecheck.Stmt(ir.NewAssignStmt(base.Pos, s, sh))
+ r = walkExpr(r, init)
+ init.Append(r)
+
+ // instantiate memmove(to *any, frm *any, size uintptr)
+ fn = typecheck.LookupRuntime("memmove")
+ fn = typecheck.SubstArgTypes(fn, t.Elem(), t.Elem())
+ ncopy := mkcall1(fn, nil, init, ir.NewUnaryExpr(base.Pos, ir.OSPTR, s), copyptr, size)
+ init.Append(walkExpr(typecheck.Stmt(ncopy), init))
+
+ return s
+ }
+ // Replace make+copy with runtime.makeslicecopy.
+ // instantiate makeslicecopy(typ *byte, tolen int, fromlen int, from unsafe.Pointer) unsafe.Pointer
+ fn := typecheck.LookupRuntime("makeslicecopy")
+ ptr := mkcall1(fn, types.Types[types.TUNSAFEPTR], init, reflectdata.TypePtr(t.Elem()), length, copylen, typecheck.Conv(copyptr, types.Types[types.TUNSAFEPTR]))
+ ptr.MarkNonNil()
+ sh := ir.NewSliceHeaderExpr(base.Pos, t, ptr, length, length)
+ return walkExpr(typecheck.Expr(sh), init)
+}
+
+// walkNew walks an ONEW node.
+func walkNew(n *ir.UnaryExpr, init *ir.Nodes) ir.Node {
+ t := n.Type().Elem()
+ if t.NotInHeap() {
+ base.Errorf("%v can't be allocated in Go; it is incomplete (or unallocatable)", n.Type().Elem())
+ }
+ if n.Esc() == ir.EscNone {
+ if t.Size() > ir.MaxImplicitStackVarSize {
+ base.Fatalf("large ONEW with EscNone: %v", n)
+ }
+ return stackTempAddr(init, t)
+ }
+ types.CalcSize(t)
+ n.MarkNonNil()
+ return n
+}
+
+// generate code for print
+func walkPrint(nn *ir.CallExpr, init *ir.Nodes) ir.Node {
+ // Hoist all the argument evaluation up before the lock.
+ walkExprListCheap(nn.Args, init)
+
+ // For println, add " " between elements and "\n" at the end.
+ if nn.Op() == ir.OPRINTN {
+ s := nn.Args
+ t := make([]ir.Node, 0, len(s)*2)
+ for i, n := range s {
+ if i != 0 {
+ t = append(t, ir.NewString(" "))
+ }
+ t = append(t, n)
+ }
+ t = append(t, ir.NewString("\n"))
+ nn.Args = t
+ }
+
+ // Collapse runs of constant strings.
+ s := nn.Args
+ t := make([]ir.Node, 0, len(s))
+ for i := 0; i < len(s); {
+ var strs []string
+ for i < len(s) && ir.IsConst(s[i], constant.String) {
+ strs = append(strs, ir.StringVal(s[i]))
+ i++
+ }
+ if len(strs) > 0 {
+ t = append(t, ir.NewString(strings.Join(strs, "")))
+ }
+ if i < len(s) {
+ t = append(t, s[i])
+ i++
+ }
+ }
+ nn.Args = t
+
+ calls := []ir.Node{mkcall("printlock", nil, init)}
+ for i, n := range nn.Args {
+ if n.Op() == ir.OLITERAL {
+ if n.Type() == types.UntypedRune {
+ n = typecheck.DefaultLit(n, types.RuneType)
+ }
+
+ switch n.Val().Kind() {
+ case constant.Int:
+ n = typecheck.DefaultLit(n, types.Types[types.TINT64])
+
+ case constant.Float:
+ n = typecheck.DefaultLit(n, types.Types[types.TFLOAT64])
+ }
+ }
+
+ if n.Op() != ir.OLITERAL && n.Type() != nil && n.Type().Kind() == types.TIDEAL {
+ n = typecheck.DefaultLit(n, types.Types[types.TINT64])
+ }
+ n = typecheck.DefaultLit(n, nil)
+ nn.Args[i] = n
+ if n.Type() == nil || n.Type().Kind() == types.TFORW {
+ continue
+ }
+
+ var on *ir.Name
+ switch n.Type().Kind() {
+ case types.TINTER:
+ if n.Type().IsEmptyInterface() {
+ on = typecheck.LookupRuntime("printeface")
+ } else {
+ on = typecheck.LookupRuntime("printiface")
+ }
+ on = typecheck.SubstArgTypes(on, n.Type()) // any-1
+ case types.TPTR:
+ if n.Type().Elem().NotInHeap() {
+ on = typecheck.LookupRuntime("printuintptr")
+ n = ir.NewConvExpr(base.Pos, ir.OCONV, nil, n)
+ n.SetType(types.Types[types.TUNSAFEPTR])
+ n = ir.NewConvExpr(base.Pos, ir.OCONV, nil, n)
+ n.SetType(types.Types[types.TUINTPTR])
+ break
+ }
+ fallthrough
+ case types.TCHAN, types.TMAP, types.TFUNC, types.TUNSAFEPTR:
+ on = typecheck.LookupRuntime("printpointer")
+ on = typecheck.SubstArgTypes(on, n.Type()) // any-1
+ case types.TSLICE:
+ on = typecheck.LookupRuntime("printslice")
+ on = typecheck.SubstArgTypes(on, n.Type()) // any-1
+ case types.TUINT, types.TUINT8, types.TUINT16, types.TUINT32, types.TUINT64, types.TUINTPTR:
+ if types.IsRuntimePkg(n.Type().Sym().Pkg) && n.Type().Sym().Name == "hex" {
+ on = typecheck.LookupRuntime("printhex")
+ } else {
+ on = typecheck.LookupRuntime("printuint")
+ }
+ case types.TINT, types.TINT8, types.TINT16, types.TINT32, types.TINT64:
+ on = typecheck.LookupRuntime("printint")
+ case types.TFLOAT32, types.TFLOAT64:
+ on = typecheck.LookupRuntime("printfloat")
+ case types.TCOMPLEX64, types.TCOMPLEX128:
+ on = typecheck.LookupRuntime("printcomplex")
+ case types.TBOOL:
+ on = typecheck.LookupRuntime("printbool")
+ case types.TSTRING:
+ cs := ""
+ if ir.IsConst(n, constant.String) {
+ cs = ir.StringVal(n)
+ }
+ switch cs {
+ case " ":
+ on = typecheck.LookupRuntime("printsp")
+ case "\n":
+ on = typecheck.LookupRuntime("printnl")
+ default:
+ on = typecheck.LookupRuntime("printstring")
+ }
+ default:
+ badtype(ir.OPRINT, n.Type(), nil)
+ continue
+ }
+
+ r := ir.NewCallExpr(base.Pos, ir.OCALL, on, nil)
+ if params := on.Type().Params().FieldSlice(); len(params) > 0 {
+ t := params[0].Type
+ n = typecheck.Conv(n, t)
+ r.Args.Append(n)
+ }
+ calls = append(calls, r)
+ }
+
+ calls = append(calls, mkcall("printunlock", nil, init))
+
+ typecheck.Stmts(calls)
+ walkExprList(calls, init)
+
+ r := ir.NewBlockStmt(base.Pos, nil)
+ r.List = calls
+ return walkStmt(typecheck.Stmt(r))
+}
+
+// walkRecover walks an ORECOVERFP node.
+func walkRecoverFP(nn *ir.CallExpr, init *ir.Nodes) ir.Node {
+ return mkcall("gorecover", nn.Type(), init, walkExpr(nn.Args[0], init))
+}
+
+func walkUnsafeSlice(n *ir.BinaryExpr, init *ir.Nodes) ir.Node {
+ ptr := safeExpr(n.X, init)
+ len := safeExpr(n.Y, init)
+
+ fnname := "unsafeslice64"
+ lenType := types.Types[types.TINT64]
+
+ // Type checking guarantees that TIDEAL len/cap are positive and fit in an int.
+ // The case of len or cap overflow when converting TUINT or TUINTPTR to TINT
+ // will be handled by the negative range checks in unsafeslice during runtime.
+ if ir.ShouldCheckPtr(ir.CurFunc, 1) {
+ fnname = "unsafeslicecheckptr"
+ // for simplicity, unsafeslicecheckptr always uses int64
+ } else if len.Type().IsKind(types.TIDEAL) || len.Type().Size() <= types.Types[types.TUINT].Size() {
+ fnname = "unsafeslice"
+ lenType = types.Types[types.TINT]
+ }
+
+ t := n.Type()
+
+ // Call runtime.unsafeslice{,64,checkptr} to check ptr and len.
+ fn := typecheck.LookupRuntime(fnname)
+ init.Append(mkcall1(fn, nil, init, reflectdata.TypePtr(t.Elem()), typecheck.Conv(ptr, types.Types[types.TUNSAFEPTR]), typecheck.Conv(len, lenType)))
+
+ h := ir.NewSliceHeaderExpr(n.Pos(), t,
+ typecheck.Conv(ptr, types.Types[types.TUNSAFEPTR]),
+ typecheck.Conv(len, types.Types[types.TINT]),
+ typecheck.Conv(len, types.Types[types.TINT]))
+ return walkExpr(typecheck.Expr(h), init)
+}
+
+func badtype(op ir.Op, tl, tr *types.Type) {
+ var s string
+ if tl != nil {
+ s += fmt.Sprintf("\n\t%v", tl)
+ }
+ if tr != nil {
+ s += fmt.Sprintf("\n\t%v", tr)
+ }
+
+ // common mistake: *struct and *interface.
+ if tl != nil && tr != nil && tl.IsPtr() && tr.IsPtr() {
+ if tl.Elem().IsStruct() && tr.Elem().IsInterface() {
+ s += "\n\t(*struct vs *interface)"
+ } else if tl.Elem().IsInterface() && tr.Elem().IsStruct() {
+ s += "\n\t(*interface vs *struct)"
+ }
+ }
+
+ base.Errorf("illegal types for operand: %v%s", op, s)
+}
+
+func writebarrierfn(name string, l *types.Type, r *types.Type) ir.Node {
+ fn := typecheck.LookupRuntime(name)
+ fn = typecheck.SubstArgTypes(fn, l, r)
+ return fn
+}
+
+// isRuneCount reports whether n is of the form len([]rune(string)).
+// These are optimized into a call to runtime.countrunes.
+func isRuneCount(n ir.Node) bool {
+ return base.Flag.N == 0 && !base.Flag.Cfg.Instrumenting && n.Op() == ir.OLEN && n.(*ir.UnaryExpr).X.Op() == ir.OSTR2RUNES
+}
diff --git a/src/cmd/compile/internal/walk/closure.go b/src/cmd/compile/internal/walk/closure.go
new file mode 100644
index 0000000..cd922c9
--- /dev/null
+++ b/src/cmd/compile/internal/walk/closure.go
@@ -0,0 +1,276 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package walk
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+)
+
+// directClosureCall rewrites a direct call of a function literal into
+// a normal function call with closure variables passed as arguments.
+// This avoids allocation of a closure object.
+//
+// For illustration, the following call:
+//
+// func(a int) {
+// println(byval)
+// byref++
+// }(42)
+//
+// becomes:
+//
+// func(byval int, &byref *int, a int) {
+// println(byval)
+// (*&byref)++
+// }(byval, &byref, 42)
+func directClosureCall(n *ir.CallExpr) {
+ clo := n.X.(*ir.ClosureExpr)
+ clofn := clo.Func
+
+ if ir.IsTrivialClosure(clo) {
+ return // leave for walkClosure to handle
+ }
+
+ // We are going to insert captured variables before input args.
+ var params []*types.Field
+ var decls []*ir.Name
+ for _, v := range clofn.ClosureVars {
+ if !v.Byval() {
+ // If v of type T is captured by reference,
+ // we introduce function param &v *T
+ // and v remains PAUTOHEAP with &v heapaddr
+ // (accesses will implicitly deref &v).
+
+ addr := ir.NewNameAt(clofn.Pos(), typecheck.Lookup("&"+v.Sym().Name))
+ addr.Curfn = clofn
+ addr.SetType(types.NewPtr(v.Type()))
+ v.Heapaddr = addr
+ v = addr
+ }
+
+ v.Class = ir.PPARAM
+ decls = append(decls, v)
+
+ fld := types.NewField(src.NoXPos, v.Sym(), v.Type())
+ fld.Nname = v
+ params = append(params, fld)
+ }
+
+ // f is ONAME of the actual function.
+ f := clofn.Nname
+ typ := f.Type()
+
+ // Create new function type with parameters prepended, and
+ // then update type and declarations.
+ typ = types.NewSignature(typ.Pkg(), nil, nil, append(params, typ.Params().FieldSlice()...), typ.Results().FieldSlice())
+ f.SetType(typ)
+ clofn.Dcl = append(decls, clofn.Dcl...)
+
+ // Rewrite call.
+ n.X = f
+ n.Args.Prepend(closureArgs(clo)...)
+
+ // Update the call expression's type. We need to do this
+ // because typecheck gave it the result type of the OCLOSURE
+ // node, but we only rewrote the ONAME node's type. Logically,
+ // they're the same, but the stack offsets probably changed.
+ if typ.NumResults() == 1 {
+ n.SetType(typ.Results().Field(0).Type)
+ } else {
+ n.SetType(typ.Results())
+ }
+
+ // Add to Closures for enqueueFunc. It's no longer a proper
+ // closure, but we may have already skipped over it in the
+ // functions list as a non-trivial closure, so this just
+ // ensures it's compiled.
+ ir.CurFunc.Closures = append(ir.CurFunc.Closures, clofn)
+}
+
+func walkClosure(clo *ir.ClosureExpr, init *ir.Nodes) ir.Node {
+ clofn := clo.Func
+
+ // If no closure vars, don't bother wrapping.
+ if ir.IsTrivialClosure(clo) {
+ if base.Debug.Closure > 0 {
+ base.WarnfAt(clo.Pos(), "closure converted to global")
+ }
+ return clofn.Nname
+ }
+
+ // The closure is not trivial or directly called, so it's going to stay a closure.
+ ir.ClosureDebugRuntimeCheck(clo)
+ clofn.SetNeedctxt(true)
+
+ // The closure expression may be walked more than once if it appeared in composite
+ // literal initialization (e.g, see issue #49029).
+ //
+ // Don't add the closure function to compilation queue more than once, since when
+ // compiling a function twice would lead to an ICE.
+ if !clofn.Walked() {
+ clofn.SetWalked(true)
+ ir.CurFunc.Closures = append(ir.CurFunc.Closures, clofn)
+ }
+
+ typ := typecheck.ClosureType(clo)
+
+ clos := ir.NewCompLitExpr(base.Pos, ir.OCOMPLIT, ir.TypeNode(typ), nil)
+ clos.SetEsc(clo.Esc())
+ clos.List = append([]ir.Node{ir.NewUnaryExpr(base.Pos, ir.OCFUNC, clofn.Nname)}, closureArgs(clo)...)
+ for i, value := range clos.List {
+ clos.List[i] = ir.NewStructKeyExpr(base.Pos, typ.Field(i), value)
+ }
+
+ addr := typecheck.NodAddr(clos)
+ addr.SetEsc(clo.Esc())
+
+ // Force type conversion from *struct to the func type.
+ cfn := typecheck.ConvNop(addr, clo.Type())
+
+ // non-escaping temp to use, if any.
+ if x := clo.Prealloc; x != nil {
+ if !types.Identical(typ, x.Type()) {
+ panic("closure type does not match order's assigned type")
+ }
+ addr.Prealloc = x
+ clo.Prealloc = nil
+ }
+
+ return walkExpr(cfn, init)
+}
+
+// closureArgs returns a slice of expressions that an be used to
+// initialize the given closure's free variables. These correspond
+// one-to-one with the variables in clo.Func.ClosureVars, and will be
+// either an ONAME node (if the variable is captured by value) or an
+// OADDR-of-ONAME node (if not).
+func closureArgs(clo *ir.ClosureExpr) []ir.Node {
+ fn := clo.Func
+
+ args := make([]ir.Node, len(fn.ClosureVars))
+ for i, v := range fn.ClosureVars {
+ var outer ir.Node
+ outer = v.Outer
+ if !v.Byval() {
+ outer = typecheck.NodAddrAt(fn.Pos(), outer)
+ }
+ args[i] = typecheck.Expr(outer)
+ }
+ return args
+}
+
+func walkMethodValue(n *ir.SelectorExpr, init *ir.Nodes) ir.Node {
+ // Create closure in the form of a composite literal.
+ // For x.M with receiver (x) type T, the generated code looks like:
+ //
+ // clos = &struct{F uintptr; R T}{T.M·f, x}
+ //
+ // Like walkClosure above.
+
+ if n.X.Type().IsInterface() {
+ // Trigger panic for method on nil interface now.
+ // Otherwise it happens in the wrapper and is confusing.
+ n.X = cheapExpr(n.X, init)
+ n.X = walkExpr(n.X, nil)
+
+ tab := ir.NewUnaryExpr(base.Pos, ir.OITAB, n.X)
+ check := ir.NewUnaryExpr(base.Pos, ir.OCHECKNIL, tab)
+ init.Append(typecheck.Stmt(check))
+ }
+
+ typ := typecheck.MethodValueType(n)
+
+ clos := ir.NewCompLitExpr(base.Pos, ir.OCOMPLIT, ir.TypeNode(typ), nil)
+ clos.SetEsc(n.Esc())
+ clos.List = []ir.Node{ir.NewUnaryExpr(base.Pos, ir.OCFUNC, methodValueWrapper(n)), n.X}
+
+ addr := typecheck.NodAddr(clos)
+ addr.SetEsc(n.Esc())
+
+ // Force type conversion from *struct to the func type.
+ cfn := typecheck.ConvNop(addr, n.Type())
+
+ // non-escaping temp to use, if any.
+ if x := n.Prealloc; x != nil {
+ if !types.Identical(typ, x.Type()) {
+ panic("partial call type does not match order's assigned type")
+ }
+ addr.Prealloc = x
+ n.Prealloc = nil
+ }
+
+ return walkExpr(cfn, init)
+}
+
+// methodValueWrapper returns the ONAME node representing the
+// wrapper function (*-fm) needed for the given method value. If the
+// wrapper function hasn't already been created yet, it's created and
+// added to typecheck.Target.Decls.
+func methodValueWrapper(dot *ir.SelectorExpr) *ir.Name {
+ if dot.Op() != ir.OMETHVALUE {
+ base.Fatalf("methodValueWrapper: unexpected %v (%v)", dot, dot.Op())
+ }
+
+ t0 := dot.Type()
+ meth := dot.Sel
+ rcvrtype := dot.X.Type()
+ sym := ir.MethodSymSuffix(rcvrtype, meth, "-fm")
+
+ if sym.Uniq() {
+ return sym.Def.(*ir.Name)
+ }
+ sym.SetUniq(true)
+
+ if base.Debug.Unified != 0 && base.Debug.UnifiedQuirks == 0 {
+ base.FatalfAt(dot.Pos(), "missing wrapper for %v", meth)
+ }
+
+ savecurfn := ir.CurFunc
+ saveLineNo := base.Pos
+ ir.CurFunc = nil
+
+ base.Pos = base.AutogeneratedPos
+
+ tfn := ir.NewFuncType(base.Pos, nil,
+ typecheck.NewFuncParams(t0.Params(), true),
+ typecheck.NewFuncParams(t0.Results(), false))
+
+ fn := typecheck.DeclFunc(sym, tfn)
+ fn.SetDupok(true)
+ fn.SetWrapper(true)
+
+ // Declare and initialize variable holding receiver.
+ ptr := ir.NewHiddenParam(base.Pos, fn, typecheck.Lookup(".this"), rcvrtype)
+
+ call := ir.NewCallExpr(base.Pos, ir.OCALL, ir.NewSelectorExpr(base.Pos, ir.OXDOT, ptr, meth), nil)
+ call.Args = ir.ParamNames(tfn.Type())
+ call.IsDDD = tfn.Type().IsVariadic()
+
+ var body ir.Node = call
+ if t0.NumResults() != 0 {
+ ret := ir.NewReturnStmt(base.Pos, nil)
+ ret.Results = []ir.Node{call}
+ body = ret
+ }
+
+ fn.Body = []ir.Node{body}
+ typecheck.FinishFuncBody()
+
+ typecheck.Func(fn)
+ // Need to typecheck the body of the just-generated wrapper.
+ // typecheckslice() requires that Curfn is set when processing an ORETURN.
+ ir.CurFunc = fn
+ typecheck.Stmts(fn.Body)
+ sym.Def = fn.Nname
+ typecheck.Target.Decls = append(typecheck.Target.Decls, fn)
+ ir.CurFunc = savecurfn
+ base.Pos = saveLineNo
+
+ return fn.Nname
+}
diff --git a/src/cmd/compile/internal/walk/compare.go b/src/cmd/compile/internal/walk/compare.go
new file mode 100644
index 0000000..625e216
--- /dev/null
+++ b/src/cmd/compile/internal/walk/compare.go
@@ -0,0 +1,491 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package walk
+
+import (
+ "go/constant"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/reflectdata"
+ "cmd/compile/internal/ssagen"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+)
+
+// The result of walkCompare MUST be assigned back to n, e.g.
+// n.Left = walkCompare(n.Left, init)
+func walkCompare(n *ir.BinaryExpr, init *ir.Nodes) ir.Node {
+ if n.X.Type().IsInterface() && n.Y.Type().IsInterface() && n.X.Op() != ir.ONIL && n.Y.Op() != ir.ONIL {
+ return walkCompareInterface(n, init)
+ }
+
+ if n.X.Type().IsString() && n.Y.Type().IsString() {
+ return walkCompareString(n, init)
+ }
+
+ n.X = walkExpr(n.X, init)
+ n.Y = walkExpr(n.Y, init)
+
+ // Given mixed interface/concrete comparison,
+ // rewrite into types-equal && data-equal.
+ // This is efficient, avoids allocations, and avoids runtime calls.
+ if n.X.Type().IsInterface() != n.Y.Type().IsInterface() {
+ // Preserve side-effects in case of short-circuiting; see #32187.
+ l := cheapExpr(n.X, init)
+ r := cheapExpr(n.Y, init)
+ // Swap so that l is the interface value and r is the concrete value.
+ if n.Y.Type().IsInterface() {
+ l, r = r, l
+ }
+
+ // Handle both == and !=.
+ eq := n.Op()
+ andor := ir.OOROR
+ if eq == ir.OEQ {
+ andor = ir.OANDAND
+ }
+ // Check for types equal.
+ // For empty interface, this is:
+ // l.tab == type(r)
+ // For non-empty interface, this is:
+ // l.tab != nil && l.tab._type == type(r)
+ var eqtype ir.Node
+ tab := ir.NewUnaryExpr(base.Pos, ir.OITAB, l)
+ rtyp := reflectdata.TypePtr(r.Type())
+ if l.Type().IsEmptyInterface() {
+ tab.SetType(types.NewPtr(types.Types[types.TUINT8]))
+ tab.SetTypecheck(1)
+ eqtype = ir.NewBinaryExpr(base.Pos, eq, tab, rtyp)
+ } else {
+ nonnil := ir.NewBinaryExpr(base.Pos, brcom(eq), typecheck.NodNil(), tab)
+ match := ir.NewBinaryExpr(base.Pos, eq, itabType(tab), rtyp)
+ eqtype = ir.NewLogicalExpr(base.Pos, andor, nonnil, match)
+ }
+ // Check for data equal.
+ eqdata := ir.NewBinaryExpr(base.Pos, eq, ifaceData(n.Pos(), l, r.Type()), r)
+ // Put it all together.
+ expr := ir.NewLogicalExpr(base.Pos, andor, eqtype, eqdata)
+ return finishCompare(n, expr, init)
+ }
+
+ // Must be comparison of array or struct.
+ // Otherwise back end handles it.
+ // While we're here, decide whether to
+ // inline or call an eq alg.
+ t := n.X.Type()
+ var inline bool
+
+ maxcmpsize := int64(4)
+ unalignedLoad := ssagen.Arch.LinkArch.CanMergeLoads
+ if unalignedLoad {
+ // Keep this low enough to generate less code than a function call.
+ maxcmpsize = 2 * int64(ssagen.Arch.LinkArch.RegSize)
+ }
+
+ switch t.Kind() {
+ default:
+ if base.Debug.Libfuzzer != 0 && t.IsInteger() {
+ n.X = cheapExpr(n.X, init)
+ n.Y = cheapExpr(n.Y, init)
+
+ // If exactly one comparison operand is
+ // constant, invoke the constcmp functions
+ // instead, and arrange for the constant
+ // operand to be the first argument.
+ l, r := n.X, n.Y
+ if r.Op() == ir.OLITERAL {
+ l, r = r, l
+ }
+ constcmp := l.Op() == ir.OLITERAL && r.Op() != ir.OLITERAL
+
+ var fn string
+ var paramType *types.Type
+ switch t.Size() {
+ case 1:
+ fn = "libfuzzerTraceCmp1"
+ if constcmp {
+ fn = "libfuzzerTraceConstCmp1"
+ }
+ paramType = types.Types[types.TUINT8]
+ case 2:
+ fn = "libfuzzerTraceCmp2"
+ if constcmp {
+ fn = "libfuzzerTraceConstCmp2"
+ }
+ paramType = types.Types[types.TUINT16]
+ case 4:
+ fn = "libfuzzerTraceCmp4"
+ if constcmp {
+ fn = "libfuzzerTraceConstCmp4"
+ }
+ paramType = types.Types[types.TUINT32]
+ case 8:
+ fn = "libfuzzerTraceCmp8"
+ if constcmp {
+ fn = "libfuzzerTraceConstCmp8"
+ }
+ paramType = types.Types[types.TUINT64]
+ default:
+ base.Fatalf("unexpected integer size %d for %v", t.Size(), t)
+ }
+ init.Append(mkcall(fn, nil, init, tracecmpArg(l, paramType, init), tracecmpArg(r, paramType, init)))
+ }
+ return n
+ case types.TARRAY:
+ // We can compare several elements at once with 2/4/8 byte integer compares
+ inline = t.NumElem() <= 1 || (types.IsSimple[t.Elem().Kind()] && (t.NumElem() <= 4 || t.Elem().Size()*t.NumElem() <= maxcmpsize))
+ case types.TSTRUCT:
+ inline = t.NumComponents(types.IgnoreBlankFields) <= 4
+ }
+
+ cmpl := n.X
+ for cmpl != nil && cmpl.Op() == ir.OCONVNOP {
+ cmpl = cmpl.(*ir.ConvExpr).X
+ }
+ cmpr := n.Y
+ for cmpr != nil && cmpr.Op() == ir.OCONVNOP {
+ cmpr = cmpr.(*ir.ConvExpr).X
+ }
+
+ // Chose not to inline. Call equality function directly.
+ if !inline {
+ // eq algs take pointers; cmpl and cmpr must be addressable
+ if !ir.IsAddressable(cmpl) || !ir.IsAddressable(cmpr) {
+ base.Fatalf("arguments of comparison must be lvalues - %v %v", cmpl, cmpr)
+ }
+
+ fn, needsize := eqFor(t)
+ call := ir.NewCallExpr(base.Pos, ir.OCALL, fn, nil)
+ call.Args.Append(typecheck.NodAddr(cmpl))
+ call.Args.Append(typecheck.NodAddr(cmpr))
+ if needsize {
+ call.Args.Append(ir.NewInt(t.Size()))
+ }
+ res := ir.Node(call)
+ if n.Op() != ir.OEQ {
+ res = ir.NewUnaryExpr(base.Pos, ir.ONOT, res)
+ }
+ return finishCompare(n, res, init)
+ }
+
+ // inline: build boolean expression comparing element by element
+ andor := ir.OANDAND
+ if n.Op() == ir.ONE {
+ andor = ir.OOROR
+ }
+ var expr ir.Node
+ compare := func(el, er ir.Node) {
+ a := ir.NewBinaryExpr(base.Pos, n.Op(), el, er)
+ if expr == nil {
+ expr = a
+ } else {
+ expr = ir.NewLogicalExpr(base.Pos, andor, expr, a)
+ }
+ }
+ cmpl = safeExpr(cmpl, init)
+ cmpr = safeExpr(cmpr, init)
+ if t.IsStruct() {
+ for _, f := range t.Fields().Slice() {
+ sym := f.Sym
+ if sym.IsBlank() {
+ continue
+ }
+ compare(
+ ir.NewSelectorExpr(base.Pos, ir.OXDOT, cmpl, sym),
+ ir.NewSelectorExpr(base.Pos, ir.OXDOT, cmpr, sym),
+ )
+ }
+ } else {
+ step := int64(1)
+ remains := t.NumElem() * t.Elem().Size()
+ combine64bit := unalignedLoad && types.RegSize == 8 && t.Elem().Size() <= 4 && t.Elem().IsInteger()
+ combine32bit := unalignedLoad && t.Elem().Size() <= 2 && t.Elem().IsInteger()
+ combine16bit := unalignedLoad && t.Elem().Size() == 1 && t.Elem().IsInteger()
+ for i := int64(0); remains > 0; {
+ var convType *types.Type
+ switch {
+ case remains >= 8 && combine64bit:
+ convType = types.Types[types.TINT64]
+ step = 8 / t.Elem().Size()
+ case remains >= 4 && combine32bit:
+ convType = types.Types[types.TUINT32]
+ step = 4 / t.Elem().Size()
+ case remains >= 2 && combine16bit:
+ convType = types.Types[types.TUINT16]
+ step = 2 / t.Elem().Size()
+ default:
+ step = 1
+ }
+ if step == 1 {
+ compare(
+ ir.NewIndexExpr(base.Pos, cmpl, ir.NewInt(i)),
+ ir.NewIndexExpr(base.Pos, cmpr, ir.NewInt(i)),
+ )
+ i++
+ remains -= t.Elem().Size()
+ } else {
+ elemType := t.Elem().ToUnsigned()
+ cmplw := ir.Node(ir.NewIndexExpr(base.Pos, cmpl, ir.NewInt(i)))
+ cmplw = typecheck.Conv(cmplw, elemType) // convert to unsigned
+ cmplw = typecheck.Conv(cmplw, convType) // widen
+ cmprw := ir.Node(ir.NewIndexExpr(base.Pos, cmpr, ir.NewInt(i)))
+ cmprw = typecheck.Conv(cmprw, elemType)
+ cmprw = typecheck.Conv(cmprw, convType)
+ // For code like this: uint32(s[0]) | uint32(s[1])<<8 | uint32(s[2])<<16 ...
+ // ssa will generate a single large load.
+ for offset := int64(1); offset < step; offset++ {
+ lb := ir.Node(ir.NewIndexExpr(base.Pos, cmpl, ir.NewInt(i+offset)))
+ lb = typecheck.Conv(lb, elemType)
+ lb = typecheck.Conv(lb, convType)
+ lb = ir.NewBinaryExpr(base.Pos, ir.OLSH, lb, ir.NewInt(8*t.Elem().Size()*offset))
+ cmplw = ir.NewBinaryExpr(base.Pos, ir.OOR, cmplw, lb)
+ rb := ir.Node(ir.NewIndexExpr(base.Pos, cmpr, ir.NewInt(i+offset)))
+ rb = typecheck.Conv(rb, elemType)
+ rb = typecheck.Conv(rb, convType)
+ rb = ir.NewBinaryExpr(base.Pos, ir.OLSH, rb, ir.NewInt(8*t.Elem().Size()*offset))
+ cmprw = ir.NewBinaryExpr(base.Pos, ir.OOR, cmprw, rb)
+ }
+ compare(cmplw, cmprw)
+ i += step
+ remains -= step * t.Elem().Size()
+ }
+ }
+ }
+ if expr == nil {
+ expr = ir.NewBool(n.Op() == ir.OEQ)
+ // We still need to use cmpl and cmpr, in case they contain
+ // an expression which might panic. See issue 23837.
+ t := typecheck.Temp(cmpl.Type())
+ a1 := typecheck.Stmt(ir.NewAssignStmt(base.Pos, t, cmpl))
+ a2 := typecheck.Stmt(ir.NewAssignStmt(base.Pos, t, cmpr))
+ init.Append(a1, a2)
+ }
+ return finishCompare(n, expr, init)
+}
+
+func walkCompareInterface(n *ir.BinaryExpr, init *ir.Nodes) ir.Node {
+ n.Y = cheapExpr(n.Y, init)
+ n.X = cheapExpr(n.X, init)
+ eqtab, eqdata := reflectdata.EqInterface(n.X, n.Y)
+ var cmp ir.Node
+ if n.Op() == ir.OEQ {
+ cmp = ir.NewLogicalExpr(base.Pos, ir.OANDAND, eqtab, eqdata)
+ } else {
+ eqtab.SetOp(ir.ONE)
+ cmp = ir.NewLogicalExpr(base.Pos, ir.OOROR, eqtab, ir.NewUnaryExpr(base.Pos, ir.ONOT, eqdata))
+ }
+ return finishCompare(n, cmp, init)
+}
+
+func walkCompareString(n *ir.BinaryExpr, init *ir.Nodes) ir.Node {
+ // Rewrite comparisons to short constant strings as length+byte-wise comparisons.
+ var cs, ncs ir.Node // const string, non-const string
+ switch {
+ case ir.IsConst(n.X, constant.String) && ir.IsConst(n.Y, constant.String):
+ // ignore; will be constant evaluated
+ case ir.IsConst(n.X, constant.String):
+ cs = n.X
+ ncs = n.Y
+ case ir.IsConst(n.Y, constant.String):
+ cs = n.Y
+ ncs = n.X
+ }
+ if cs != nil {
+ cmp := n.Op()
+ // Our comparison below assumes that the non-constant string
+ // is on the left hand side, so rewrite "" cmp x to x cmp "".
+ // See issue 24817.
+ if ir.IsConst(n.X, constant.String) {
+ cmp = brrev(cmp)
+ }
+
+ // maxRewriteLen was chosen empirically.
+ // It is the value that minimizes cmd/go file size
+ // across most architectures.
+ // See the commit description for CL 26758 for details.
+ maxRewriteLen := 6
+ // Some architectures can load unaligned byte sequence as 1 word.
+ // So we can cover longer strings with the same amount of code.
+ canCombineLoads := ssagen.Arch.LinkArch.CanMergeLoads
+ combine64bit := false
+ if canCombineLoads {
+ // Keep this low enough to generate less code than a function call.
+ maxRewriteLen = 2 * ssagen.Arch.LinkArch.RegSize
+ combine64bit = ssagen.Arch.LinkArch.RegSize >= 8
+ }
+
+ var and ir.Op
+ switch cmp {
+ case ir.OEQ:
+ and = ir.OANDAND
+ case ir.ONE:
+ and = ir.OOROR
+ default:
+ // Don't do byte-wise comparisons for <, <=, etc.
+ // They're fairly complicated.
+ // Length-only checks are ok, though.
+ maxRewriteLen = 0
+ }
+ if s := ir.StringVal(cs); len(s) <= maxRewriteLen {
+ if len(s) > 0 {
+ ncs = safeExpr(ncs, init)
+ }
+ r := ir.Node(ir.NewBinaryExpr(base.Pos, cmp, ir.NewUnaryExpr(base.Pos, ir.OLEN, ncs), ir.NewInt(int64(len(s)))))
+ remains := len(s)
+ for i := 0; remains > 0; {
+ if remains == 1 || !canCombineLoads {
+ cb := ir.NewInt(int64(s[i]))
+ ncb := ir.NewIndexExpr(base.Pos, ncs, ir.NewInt(int64(i)))
+ r = ir.NewLogicalExpr(base.Pos, and, r, ir.NewBinaryExpr(base.Pos, cmp, ncb, cb))
+ remains--
+ i++
+ continue
+ }
+ var step int
+ var convType *types.Type
+ switch {
+ case remains >= 8 && combine64bit:
+ convType = types.Types[types.TINT64]
+ step = 8
+ case remains >= 4:
+ convType = types.Types[types.TUINT32]
+ step = 4
+ case remains >= 2:
+ convType = types.Types[types.TUINT16]
+ step = 2
+ }
+ ncsubstr := typecheck.Conv(ir.NewIndexExpr(base.Pos, ncs, ir.NewInt(int64(i))), convType)
+ csubstr := int64(s[i])
+ // Calculate large constant from bytes as sequence of shifts and ors.
+ // Like this: uint32(s[0]) | uint32(s[1])<<8 | uint32(s[2])<<16 ...
+ // ssa will combine this into a single large load.
+ for offset := 1; offset < step; offset++ {
+ b := typecheck.Conv(ir.NewIndexExpr(base.Pos, ncs, ir.NewInt(int64(i+offset))), convType)
+ b = ir.NewBinaryExpr(base.Pos, ir.OLSH, b, ir.NewInt(int64(8*offset)))
+ ncsubstr = ir.NewBinaryExpr(base.Pos, ir.OOR, ncsubstr, b)
+ csubstr |= int64(s[i+offset]) << uint8(8*offset)
+ }
+ csubstrPart := ir.NewInt(csubstr)
+ // Compare "step" bytes as once
+ r = ir.NewLogicalExpr(base.Pos, and, r, ir.NewBinaryExpr(base.Pos, cmp, csubstrPart, ncsubstr))
+ remains -= step
+ i += step
+ }
+ return finishCompare(n, r, init)
+ }
+ }
+
+ var r ir.Node
+ if n.Op() == ir.OEQ || n.Op() == ir.ONE {
+ // prepare for rewrite below
+ n.X = cheapExpr(n.X, init)
+ n.Y = cheapExpr(n.Y, init)
+ eqlen, eqmem := reflectdata.EqString(n.X, n.Y)
+ // quick check of len before full compare for == or !=.
+ // memequal then tests equality up to length len.
+ if n.Op() == ir.OEQ {
+ // len(left) == len(right) && memequal(left, right, len)
+ r = ir.NewLogicalExpr(base.Pos, ir.OANDAND, eqlen, eqmem)
+ } else {
+ // len(left) != len(right) || !memequal(left, right, len)
+ eqlen.SetOp(ir.ONE)
+ r = ir.NewLogicalExpr(base.Pos, ir.OOROR, eqlen, ir.NewUnaryExpr(base.Pos, ir.ONOT, eqmem))
+ }
+ } else {
+ // sys_cmpstring(s1, s2) :: 0
+ r = mkcall("cmpstring", types.Types[types.TINT], init, typecheck.Conv(n.X, types.Types[types.TSTRING]), typecheck.Conv(n.Y, types.Types[types.TSTRING]))
+ r = ir.NewBinaryExpr(base.Pos, n.Op(), r, ir.NewInt(0))
+ }
+
+ return finishCompare(n, r, init)
+}
+
+// The result of finishCompare MUST be assigned back to n, e.g.
+// n.Left = finishCompare(n.Left, x, r, init)
+func finishCompare(n *ir.BinaryExpr, r ir.Node, init *ir.Nodes) ir.Node {
+ r = typecheck.Expr(r)
+ r = typecheck.Conv(r, n.Type())
+ r = walkExpr(r, init)
+ return r
+}
+
+func eqFor(t *types.Type) (n ir.Node, needsize bool) {
+ // Should only arrive here with large memory or
+ // a struct/array containing a non-memory field/element.
+ // Small memory is handled inline, and single non-memory
+ // is handled by walkCompare.
+ switch a, _ := types.AlgType(t); a {
+ case types.AMEM:
+ n := typecheck.LookupRuntime("memequal")
+ n = typecheck.SubstArgTypes(n, t, t)
+ return n, true
+ case types.ASPECIAL:
+ sym := reflectdata.TypeSymPrefix(".eq", t)
+ // TODO(austin): This creates an ir.Name with a nil Func.
+ n := typecheck.NewName(sym)
+ ir.MarkFunc(n)
+ n.SetType(types.NewSignature(types.NoPkg, nil, nil, []*types.Field{
+ types.NewField(base.Pos, nil, types.NewPtr(t)),
+ types.NewField(base.Pos, nil, types.NewPtr(t)),
+ }, []*types.Field{
+ types.NewField(base.Pos, nil, types.Types[types.TBOOL]),
+ }))
+ return n, false
+ }
+ base.Fatalf("eqFor %v", t)
+ return nil, false
+}
+
+// brcom returns !(op).
+// For example, brcom(==) is !=.
+func brcom(op ir.Op) ir.Op {
+ switch op {
+ case ir.OEQ:
+ return ir.ONE
+ case ir.ONE:
+ return ir.OEQ
+ case ir.OLT:
+ return ir.OGE
+ case ir.OGT:
+ return ir.OLE
+ case ir.OLE:
+ return ir.OGT
+ case ir.OGE:
+ return ir.OLT
+ }
+ base.Fatalf("brcom: no com for %v\n", op)
+ return op
+}
+
+// brrev returns reverse(op).
+// For example, Brrev(<) is >.
+func brrev(op ir.Op) ir.Op {
+ switch op {
+ case ir.OEQ:
+ return ir.OEQ
+ case ir.ONE:
+ return ir.ONE
+ case ir.OLT:
+ return ir.OGT
+ case ir.OGT:
+ return ir.OLT
+ case ir.OLE:
+ return ir.OGE
+ case ir.OGE:
+ return ir.OLE
+ }
+ base.Fatalf("brrev: no rev for %v\n", op)
+ return op
+}
+
+func tracecmpArg(n ir.Node, t *types.Type, init *ir.Nodes) ir.Node {
+ // Ugly hack to avoid "constant -1 overflows uintptr" errors, etc.
+ if n.Op() == ir.OLITERAL && n.Type().IsSigned() && ir.Int64Val(n) < 0 {
+ n = copyExpr(n, n.Type(), init)
+ }
+
+ return typecheck.Conv(n, t)
+}
diff --git a/src/cmd/compile/internal/walk/complit.go b/src/cmd/compile/internal/walk/complit.go
new file mode 100644
index 0000000..e957580
--- /dev/null
+++ b/src/cmd/compile/internal/walk/complit.go
@@ -0,0 +1,676 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package walk
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/ssagen"
+ "cmd/compile/internal/staticdata"
+ "cmd/compile/internal/staticinit"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+)
+
+// walkCompLit walks a composite literal node:
+// OARRAYLIT, OSLICELIT, OMAPLIT, OSTRUCTLIT (all CompLitExpr), or OPTRLIT (AddrExpr).
+func walkCompLit(n ir.Node, init *ir.Nodes) ir.Node {
+ if isStaticCompositeLiteral(n) && !ssagen.TypeOK(n.Type()) {
+ n := n.(*ir.CompLitExpr) // not OPTRLIT
+ // n can be directly represented in the read-only data section.
+ // Make direct reference to the static data. See issue 12841.
+ vstat := readonlystaticname(n.Type())
+ fixedlit(inInitFunction, initKindStatic, n, vstat, init)
+ return typecheck.Expr(vstat)
+ }
+ var_ := typecheck.Temp(n.Type())
+ anylit(n, var_, init)
+ return var_
+}
+
+// initContext is the context in which static data is populated.
+// It is either in an init function or in any other function.
+// Static data populated in an init function will be written either
+// zero times (as a readonly, static data symbol) or
+// one time (during init function execution).
+// Either way, there is no opportunity for races or further modification,
+// so the data can be written to a (possibly readonly) data symbol.
+// Static data populated in any other function needs to be local to
+// that function to allow multiple instances of that function
+// to execute concurrently without clobbering each others' data.
+type initContext uint8
+
+const (
+ inInitFunction initContext = iota
+ inNonInitFunction
+)
+
+func (c initContext) String() string {
+ if c == inInitFunction {
+ return "inInitFunction"
+ }
+ return "inNonInitFunction"
+}
+
+// readonlystaticname returns a name backed by a read-only static data symbol.
+func readonlystaticname(t *types.Type) *ir.Name {
+ n := staticinit.StaticName(t)
+ n.MarkReadonly()
+ n.Linksym().Set(obj.AttrContentAddressable, true)
+ n.Linksym().Set(obj.AttrLocal, true)
+ return n
+}
+
+func isSimpleName(nn ir.Node) bool {
+ if nn.Op() != ir.ONAME || ir.IsBlank(nn) {
+ return false
+ }
+ n := nn.(*ir.Name)
+ return n.OnStack()
+}
+
+func litas(l ir.Node, r ir.Node, init *ir.Nodes) {
+ appendWalkStmt(init, ir.NewAssignStmt(base.Pos, l, r))
+}
+
+// initGenType is a bitmap indicating the types of generation that will occur for a static value.
+type initGenType uint8
+
+const (
+ initDynamic initGenType = 1 << iota // contains some dynamic values, for which init code will be generated
+ initConst // contains some constant values, which may be written into data symbols
+)
+
+// getdyn calculates the initGenType for n.
+// If top is false, getdyn is recursing.
+func getdyn(n ir.Node, top bool) initGenType {
+ switch n.Op() {
+ default:
+ if ir.IsConstNode(n) {
+ return initConst
+ }
+ return initDynamic
+
+ case ir.OSLICELIT:
+ n := n.(*ir.CompLitExpr)
+ if !top {
+ return initDynamic
+ }
+ if n.Len/4 > int64(len(n.List)) {
+ // <25% of entries have explicit values.
+ // Very rough estimation, it takes 4 bytes of instructions
+ // to initialize 1 byte of result. So don't use a static
+ // initializer if the dynamic initialization code would be
+ // smaller than the static value.
+ // See issue 23780.
+ return initDynamic
+ }
+
+ case ir.OARRAYLIT, ir.OSTRUCTLIT:
+ }
+ lit := n.(*ir.CompLitExpr)
+
+ var mode initGenType
+ for _, n1 := range lit.List {
+ switch n1.Op() {
+ case ir.OKEY:
+ n1 = n1.(*ir.KeyExpr).Value
+ case ir.OSTRUCTKEY:
+ n1 = n1.(*ir.StructKeyExpr).Value
+ }
+ mode |= getdyn(n1, false)
+ if mode == initDynamic|initConst {
+ break
+ }
+ }
+ return mode
+}
+
+// isStaticCompositeLiteral reports whether n is a compile-time constant.
+func isStaticCompositeLiteral(n ir.Node) bool {
+ switch n.Op() {
+ case ir.OSLICELIT:
+ return false
+ case ir.OARRAYLIT:
+ n := n.(*ir.CompLitExpr)
+ for _, r := range n.List {
+ if r.Op() == ir.OKEY {
+ r = r.(*ir.KeyExpr).Value
+ }
+ if !isStaticCompositeLiteral(r) {
+ return false
+ }
+ }
+ return true
+ case ir.OSTRUCTLIT:
+ n := n.(*ir.CompLitExpr)
+ for _, r := range n.List {
+ r := r.(*ir.StructKeyExpr)
+ if !isStaticCompositeLiteral(r.Value) {
+ return false
+ }
+ }
+ return true
+ case ir.OLITERAL, ir.ONIL:
+ return true
+ case ir.OCONVIFACE:
+ // See staticassign's OCONVIFACE case for comments.
+ n := n.(*ir.ConvExpr)
+ val := ir.Node(n)
+ for val.Op() == ir.OCONVIFACE {
+ val = val.(*ir.ConvExpr).X
+ }
+ if val.Type().IsInterface() {
+ return val.Op() == ir.ONIL
+ }
+ if types.IsDirectIface(val.Type()) && val.Op() == ir.ONIL {
+ return true
+ }
+ return isStaticCompositeLiteral(val)
+ }
+ return false
+}
+
+// initKind is a kind of static initialization: static, dynamic, or local.
+// Static initialization represents literals and
+// literal components of composite literals.
+// Dynamic initialization represents non-literals and
+// non-literal components of composite literals.
+// LocalCode initialization represents initialization
+// that occurs purely in generated code local to the function of use.
+// Initialization code is sometimes generated in passes,
+// first static then dynamic.
+type initKind uint8
+
+const (
+ initKindStatic initKind = iota + 1
+ initKindDynamic
+ initKindLocalCode
+)
+
+// fixedlit handles struct, array, and slice literals.
+// TODO: expand documentation.
+func fixedlit(ctxt initContext, kind initKind, n *ir.CompLitExpr, var_ ir.Node, init *ir.Nodes) {
+ isBlank := var_ == ir.BlankNode
+ var splitnode func(ir.Node) (a ir.Node, value ir.Node)
+ switch n.Op() {
+ case ir.OARRAYLIT, ir.OSLICELIT:
+ var k int64
+ splitnode = func(r ir.Node) (ir.Node, ir.Node) {
+ if r.Op() == ir.OKEY {
+ kv := r.(*ir.KeyExpr)
+ k = typecheck.IndexConst(kv.Key)
+ if k < 0 {
+ base.Fatalf("fixedlit: invalid index %v", kv.Key)
+ }
+ r = kv.Value
+ }
+ a := ir.NewIndexExpr(base.Pos, var_, ir.NewInt(k))
+ k++
+ if isBlank {
+ return ir.BlankNode, r
+ }
+ return a, r
+ }
+ case ir.OSTRUCTLIT:
+ splitnode = func(rn ir.Node) (ir.Node, ir.Node) {
+ r := rn.(*ir.StructKeyExpr)
+ if r.Sym().IsBlank() || isBlank {
+ return ir.BlankNode, r.Value
+ }
+ ir.SetPos(r)
+ return ir.NewSelectorExpr(base.Pos, ir.ODOT, var_, r.Sym()), r.Value
+ }
+ default:
+ base.Fatalf("fixedlit bad op: %v", n.Op())
+ }
+
+ for _, r := range n.List {
+ a, value := splitnode(r)
+ if a == ir.BlankNode && !staticinit.AnySideEffects(value) {
+ // Discard.
+ continue
+ }
+
+ switch value.Op() {
+ case ir.OSLICELIT:
+ value := value.(*ir.CompLitExpr)
+ if (kind == initKindStatic && ctxt == inNonInitFunction) || (kind == initKindDynamic && ctxt == inInitFunction) {
+ slicelit(ctxt, value, a, init)
+ continue
+ }
+
+ case ir.OARRAYLIT, ir.OSTRUCTLIT:
+ value := value.(*ir.CompLitExpr)
+ fixedlit(ctxt, kind, value, a, init)
+ continue
+ }
+
+ islit := ir.IsConstNode(value)
+ if (kind == initKindStatic && !islit) || (kind == initKindDynamic && islit) {
+ continue
+ }
+
+ // build list of assignments: var[index] = expr
+ ir.SetPos(a)
+ as := ir.NewAssignStmt(base.Pos, a, value)
+ as = typecheck.Stmt(as).(*ir.AssignStmt)
+ switch kind {
+ case initKindStatic:
+ genAsStatic(as)
+ case initKindDynamic, initKindLocalCode:
+ a = orderStmtInPlace(as, map[string][]*ir.Name{})
+ a = walkStmt(a)
+ init.Append(a)
+ default:
+ base.Fatalf("fixedlit: bad kind %d", kind)
+ }
+
+ }
+}
+
+func isSmallSliceLit(n *ir.CompLitExpr) bool {
+ if n.Op() != ir.OSLICELIT {
+ return false
+ }
+
+ return n.Type().Elem().Size() == 0 || n.Len <= ir.MaxSmallArraySize/n.Type().Elem().Size()
+}
+
+func slicelit(ctxt initContext, n *ir.CompLitExpr, var_ ir.Node, init *ir.Nodes) {
+ // make an array type corresponding the number of elements we have
+ t := types.NewArray(n.Type().Elem(), n.Len)
+ types.CalcSize(t)
+
+ if ctxt == inNonInitFunction {
+ // put everything into static array
+ vstat := staticinit.StaticName(t)
+
+ fixedlit(ctxt, initKindStatic, n, vstat, init)
+ fixedlit(ctxt, initKindDynamic, n, vstat, init)
+
+ // copy static to slice
+ var_ = typecheck.AssignExpr(var_)
+ name, offset, ok := staticinit.StaticLoc(var_)
+ if !ok || name.Class != ir.PEXTERN {
+ base.Fatalf("slicelit: %v", var_)
+ }
+ staticdata.InitSlice(name, offset, vstat.Linksym(), t.NumElem())
+ return
+ }
+
+ // recipe for var = []t{...}
+ // 1. make a static array
+ // var vstat [...]t
+ // 2. assign (data statements) the constant part
+ // vstat = constpart{}
+ // 3. make an auto pointer to array and allocate heap to it
+ // var vauto *[...]t = new([...]t)
+ // 4. copy the static array to the auto array
+ // *vauto = vstat
+ // 5. for each dynamic part assign to the array
+ // vauto[i] = dynamic part
+ // 6. assign slice of allocated heap to var
+ // var = vauto[:]
+ //
+ // an optimization is done if there is no constant part
+ // 3. var vauto *[...]t = new([...]t)
+ // 5. vauto[i] = dynamic part
+ // 6. var = vauto[:]
+
+ // if the literal contains constants,
+ // make static initialized array (1),(2)
+ var vstat ir.Node
+
+ mode := getdyn(n, true)
+ if mode&initConst != 0 && !isSmallSliceLit(n) {
+ if ctxt == inInitFunction {
+ vstat = readonlystaticname(t)
+ } else {
+ vstat = staticinit.StaticName(t)
+ }
+ fixedlit(ctxt, initKindStatic, n, vstat, init)
+ }
+
+ // make new auto *array (3 declare)
+ vauto := typecheck.Temp(types.NewPtr(t))
+
+ // set auto to point at new temp or heap (3 assign)
+ var a ir.Node
+ if x := n.Prealloc; x != nil {
+ // temp allocated during order.go for dddarg
+ if !types.Identical(t, x.Type()) {
+ panic("dotdotdot base type does not match order's assigned type")
+ }
+ a = initStackTemp(init, x, vstat)
+ } else if n.Esc() == ir.EscNone {
+ a = initStackTemp(init, typecheck.Temp(t), vstat)
+ } else {
+ a = ir.NewUnaryExpr(base.Pos, ir.ONEW, ir.TypeNode(t))
+ }
+ appendWalkStmt(init, ir.NewAssignStmt(base.Pos, vauto, a))
+
+ if vstat != nil && n.Prealloc == nil && n.Esc() != ir.EscNone {
+ // If we allocated on the heap with ONEW, copy the static to the
+ // heap (4). We skip this for stack temporaries, because
+ // initStackTemp already handled the copy.
+ a = ir.NewStarExpr(base.Pos, vauto)
+ appendWalkStmt(init, ir.NewAssignStmt(base.Pos, a, vstat))
+ }
+
+ // put dynamics into array (5)
+ var index int64
+ for _, value := range n.List {
+ if value.Op() == ir.OKEY {
+ kv := value.(*ir.KeyExpr)
+ index = typecheck.IndexConst(kv.Key)
+ if index < 0 {
+ base.Fatalf("slicelit: invalid index %v", kv.Key)
+ }
+ value = kv.Value
+ }
+ a := ir.NewIndexExpr(base.Pos, vauto, ir.NewInt(index))
+ a.SetBounded(true)
+ index++
+
+ // TODO need to check bounds?
+
+ switch value.Op() {
+ case ir.OSLICELIT:
+ break
+
+ case ir.OARRAYLIT, ir.OSTRUCTLIT:
+ value := value.(*ir.CompLitExpr)
+ k := initKindDynamic
+ if vstat == nil {
+ // Generate both static and dynamic initializations.
+ // See issue #31987.
+ k = initKindLocalCode
+ }
+ fixedlit(ctxt, k, value, a, init)
+ continue
+ }
+
+ if vstat != nil && ir.IsConstNode(value) { // already set by copy from static value
+ continue
+ }
+
+ // build list of vauto[c] = expr
+ ir.SetPos(value)
+ as := typecheck.Stmt(ir.NewAssignStmt(base.Pos, a, value))
+ as = orderStmtInPlace(as, map[string][]*ir.Name{})
+ as = walkStmt(as)
+ init.Append(as)
+ }
+
+ // make slice out of heap (6)
+ a = ir.NewAssignStmt(base.Pos, var_, ir.NewSliceExpr(base.Pos, ir.OSLICE, vauto, nil, nil, nil))
+
+ a = typecheck.Stmt(a)
+ a = orderStmtInPlace(a, map[string][]*ir.Name{})
+ a = walkStmt(a)
+ init.Append(a)
+}
+
+func maplit(n *ir.CompLitExpr, m ir.Node, init *ir.Nodes) {
+ // make the map var
+ a := ir.NewCallExpr(base.Pos, ir.OMAKE, nil, nil)
+ a.SetEsc(n.Esc())
+ a.Args = []ir.Node{ir.TypeNode(n.Type()), ir.NewInt(int64(len(n.List)))}
+ litas(m, a, init)
+
+ entries := n.List
+
+ // The order pass already removed any dynamic (runtime-computed) entries.
+ // All remaining entries are static. Double-check that.
+ for _, r := range entries {
+ r := r.(*ir.KeyExpr)
+ if !isStaticCompositeLiteral(r.Key) || !isStaticCompositeLiteral(r.Value) {
+ base.Fatalf("maplit: entry is not a literal: %v", r)
+ }
+ }
+
+ if len(entries) > 25 {
+ // For a large number of entries, put them in an array and loop.
+
+ // build types [count]Tindex and [count]Tvalue
+ tk := types.NewArray(n.Type().Key(), int64(len(entries)))
+ te := types.NewArray(n.Type().Elem(), int64(len(entries)))
+
+ // TODO(#47904): mark tk and te NoAlg here once the
+ // compiler/linker can handle NoAlg types correctly.
+
+ types.CalcSize(tk)
+ types.CalcSize(te)
+
+ // make and initialize static arrays
+ vstatk := readonlystaticname(tk)
+ vstate := readonlystaticname(te)
+
+ datak := ir.NewCompLitExpr(base.Pos, ir.OARRAYLIT, nil, nil)
+ datae := ir.NewCompLitExpr(base.Pos, ir.OARRAYLIT, nil, nil)
+ for _, r := range entries {
+ r := r.(*ir.KeyExpr)
+ datak.List.Append(r.Key)
+ datae.List.Append(r.Value)
+ }
+ fixedlit(inInitFunction, initKindStatic, datak, vstatk, init)
+ fixedlit(inInitFunction, initKindStatic, datae, vstate, init)
+
+ // loop adding structure elements to map
+ // for i = 0; i < len(vstatk); i++ {
+ // map[vstatk[i]] = vstate[i]
+ // }
+ i := typecheck.Temp(types.Types[types.TINT])
+ rhs := ir.NewIndexExpr(base.Pos, vstate, i)
+ rhs.SetBounded(true)
+
+ kidx := ir.NewIndexExpr(base.Pos, vstatk, i)
+ kidx.SetBounded(true)
+ lhs := ir.NewIndexExpr(base.Pos, m, kidx)
+
+ zero := ir.NewAssignStmt(base.Pos, i, ir.NewInt(0))
+ cond := ir.NewBinaryExpr(base.Pos, ir.OLT, i, ir.NewInt(tk.NumElem()))
+ incr := ir.NewAssignStmt(base.Pos, i, ir.NewBinaryExpr(base.Pos, ir.OADD, i, ir.NewInt(1)))
+
+ var body ir.Node = ir.NewAssignStmt(base.Pos, lhs, rhs)
+ body = typecheck.Stmt(body) // typechecker rewrites OINDEX to OINDEXMAP
+ body = orderStmtInPlace(body, map[string][]*ir.Name{})
+
+ loop := ir.NewForStmt(base.Pos, nil, cond, incr, nil)
+ loop.Body = []ir.Node{body}
+ loop.SetInit([]ir.Node{zero})
+
+ appendWalkStmt(init, loop)
+ return
+ }
+ // For a small number of entries, just add them directly.
+
+ // Build list of var[c] = expr.
+ // Use temporaries so that mapassign1 can have addressable key, elem.
+ // TODO(josharian): avoid map key temporaries for mapfast_* assignments with literal keys.
+ tmpkey := typecheck.Temp(m.Type().Key())
+ tmpelem := typecheck.Temp(m.Type().Elem())
+
+ for _, r := range entries {
+ r := r.(*ir.KeyExpr)
+ index, elem := r.Key, r.Value
+
+ ir.SetPos(index)
+ appendWalkStmt(init, ir.NewAssignStmt(base.Pos, tmpkey, index))
+
+ ir.SetPos(elem)
+ appendWalkStmt(init, ir.NewAssignStmt(base.Pos, tmpelem, elem))
+
+ ir.SetPos(tmpelem)
+ var a ir.Node = ir.NewAssignStmt(base.Pos, ir.NewIndexExpr(base.Pos, m, tmpkey), tmpelem)
+ a = typecheck.Stmt(a) // typechecker rewrites OINDEX to OINDEXMAP
+ a = orderStmtInPlace(a, map[string][]*ir.Name{})
+ appendWalkStmt(init, a)
+ }
+
+ appendWalkStmt(init, ir.NewUnaryExpr(base.Pos, ir.OVARKILL, tmpkey))
+ appendWalkStmt(init, ir.NewUnaryExpr(base.Pos, ir.OVARKILL, tmpelem))
+}
+
+func anylit(n ir.Node, var_ ir.Node, init *ir.Nodes) {
+ t := n.Type()
+ switch n.Op() {
+ default:
+ base.Fatalf("anylit: not lit, op=%v node=%v", n.Op(), n)
+
+ case ir.ONAME:
+ n := n.(*ir.Name)
+ appendWalkStmt(init, ir.NewAssignStmt(base.Pos, var_, n))
+
+ case ir.OMETHEXPR:
+ n := n.(*ir.SelectorExpr)
+ anylit(n.FuncName(), var_, init)
+
+ case ir.OPTRLIT:
+ n := n.(*ir.AddrExpr)
+ if !t.IsPtr() {
+ base.Fatalf("anylit: not ptr")
+ }
+
+ var r ir.Node
+ if n.Prealloc != nil {
+ // n.Prealloc is stack temporary used as backing store.
+ r = initStackTemp(init, n.Prealloc, nil)
+ } else {
+ r = ir.NewUnaryExpr(base.Pos, ir.ONEW, ir.TypeNode(n.X.Type()))
+ r.SetEsc(n.Esc())
+ }
+ appendWalkStmt(init, ir.NewAssignStmt(base.Pos, var_, r))
+
+ var_ = ir.NewStarExpr(base.Pos, var_)
+ var_ = typecheck.AssignExpr(var_)
+ anylit(n.X, var_, init)
+
+ case ir.OSTRUCTLIT, ir.OARRAYLIT:
+ n := n.(*ir.CompLitExpr)
+ if !t.IsStruct() && !t.IsArray() {
+ base.Fatalf("anylit: not struct/array")
+ }
+
+ if isSimpleName(var_) && len(n.List) > 4 {
+ // lay out static data
+ vstat := readonlystaticname(t)
+
+ ctxt := inInitFunction
+ if n.Op() == ir.OARRAYLIT {
+ ctxt = inNonInitFunction
+ }
+ fixedlit(ctxt, initKindStatic, n, vstat, init)
+
+ // copy static to var
+ appendWalkStmt(init, ir.NewAssignStmt(base.Pos, var_, vstat))
+
+ // add expressions to automatic
+ fixedlit(inInitFunction, initKindDynamic, n, var_, init)
+ break
+ }
+
+ var components int64
+ if n.Op() == ir.OARRAYLIT {
+ components = t.NumElem()
+ } else {
+ components = int64(t.NumFields())
+ }
+ // initialization of an array or struct with unspecified components (missing fields or arrays)
+ if isSimpleName(var_) || int64(len(n.List)) < components {
+ appendWalkStmt(init, ir.NewAssignStmt(base.Pos, var_, nil))
+ }
+
+ fixedlit(inInitFunction, initKindLocalCode, n, var_, init)
+
+ case ir.OSLICELIT:
+ n := n.(*ir.CompLitExpr)
+ slicelit(inInitFunction, n, var_, init)
+
+ case ir.OMAPLIT:
+ n := n.(*ir.CompLitExpr)
+ if !t.IsMap() {
+ base.Fatalf("anylit: not map")
+ }
+ maplit(n, var_, init)
+ }
+}
+
+// oaslit handles special composite literal assignments.
+// It returns true if n's effects have been added to init,
+// in which case n should be dropped from the program by the caller.
+func oaslit(n *ir.AssignStmt, init *ir.Nodes) bool {
+ if n.X == nil || n.Y == nil {
+ // not a special composite literal assignment
+ return false
+ }
+ if n.X.Type() == nil || n.Y.Type() == nil {
+ // not a special composite literal assignment
+ return false
+ }
+ if !isSimpleName(n.X) {
+ // not a special composite literal assignment
+ return false
+ }
+ x := n.X.(*ir.Name)
+ if !types.Identical(n.X.Type(), n.Y.Type()) {
+ // not a special composite literal assignment
+ return false
+ }
+ if x.Addrtaken() {
+ // If x is address-taken, the RHS may (implicitly) uses LHS.
+ // Not safe to do a special composite literal assignment
+ // (which may expand to multiple assignments).
+ return false
+ }
+
+ switch n.Y.Op() {
+ default:
+ // not a special composite literal assignment
+ return false
+
+ case ir.OSTRUCTLIT, ir.OARRAYLIT, ir.OSLICELIT, ir.OMAPLIT:
+ if ir.Any(n.Y, func(y ir.Node) bool { return ir.Uses(y, x) }) {
+ // not safe to do a special composite literal assignment if RHS uses LHS.
+ return false
+ }
+ anylit(n.Y, n.X, init)
+ }
+
+ return true
+}
+
+func genAsStatic(as *ir.AssignStmt) {
+ if as.X.Type() == nil {
+ base.Fatalf("genAsStatic as.Left not typechecked")
+ }
+
+ name, offset, ok := staticinit.StaticLoc(as.X)
+ if !ok || (name.Class != ir.PEXTERN && as.X != ir.BlankNode) {
+ base.Fatalf("genAsStatic: lhs %v", as.X)
+ }
+
+ switch r := as.Y; r.Op() {
+ case ir.OLITERAL:
+ staticdata.InitConst(name, offset, r, int(r.Type().Size()))
+ return
+ case ir.OMETHEXPR:
+ r := r.(*ir.SelectorExpr)
+ staticdata.InitAddr(name, offset, staticdata.FuncLinksym(r.FuncName()))
+ return
+ case ir.ONAME:
+ r := r.(*ir.Name)
+ if r.Offset_ != 0 {
+ base.Fatalf("genAsStatic %+v", as)
+ }
+ if r.Class == ir.PFUNC {
+ staticdata.InitAddr(name, offset, staticdata.FuncLinksym(r))
+ return
+ }
+ }
+ base.Fatalf("genAsStatic: rhs %v", as.Y)
+}
diff --git a/src/cmd/compile/internal/walk/convert.go b/src/cmd/compile/internal/walk/convert.go
new file mode 100644
index 0000000..ffc5fd1
--- /dev/null
+++ b/src/cmd/compile/internal/walk/convert.go
@@ -0,0 +1,474 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package walk
+
+import (
+ "encoding/binary"
+ "go/constant"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/reflectdata"
+ "cmd/compile/internal/ssagen"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+ "cmd/internal/sys"
+)
+
+// walkConv walks an OCONV or OCONVNOP (but not OCONVIFACE) node.
+func walkConv(n *ir.ConvExpr, init *ir.Nodes) ir.Node {
+ n.X = walkExpr(n.X, init)
+ if n.Op() == ir.OCONVNOP && n.Type() == n.X.Type() {
+ return n.X
+ }
+ if n.Op() == ir.OCONVNOP && ir.ShouldCheckPtr(ir.CurFunc, 1) {
+ if n.Type().IsUnsafePtr() && n.X.Type().IsUintptr() { // uintptr to unsafe.Pointer
+ return walkCheckPtrArithmetic(n, init)
+ }
+ }
+ param, result := rtconvfn(n.X.Type(), n.Type())
+ if param == types.Txxx {
+ return n
+ }
+ fn := types.BasicTypeNames[param] + "to" + types.BasicTypeNames[result]
+ return typecheck.Conv(mkcall(fn, types.Types[result], init, typecheck.Conv(n.X, types.Types[param])), n.Type())
+}
+
+// walkConvInterface walks an OCONVIFACE node.
+func walkConvInterface(n *ir.ConvExpr, init *ir.Nodes) ir.Node {
+
+ n.X = walkExpr(n.X, init)
+
+ fromType := n.X.Type()
+ toType := n.Type()
+ if !fromType.IsInterface() && !ir.IsBlank(ir.CurFunc.Nname) {
+ // skip unnamed functions (func _())
+ reflectdata.MarkTypeUsedInInterface(fromType, ir.CurFunc.LSym)
+ }
+
+ if !fromType.IsInterface() {
+ var typeWord ir.Node
+ if toType.IsEmptyInterface() {
+ typeWord = reflectdata.TypePtr(fromType)
+ } else {
+ typeWord = reflectdata.ITabAddr(fromType, toType)
+ }
+ l := ir.NewBinaryExpr(base.Pos, ir.OEFACE, typeWord, dataWord(n.Pos(), n.X, init, n.Esc() != ir.EscNone))
+ l.SetType(toType)
+ l.SetTypecheck(n.Typecheck())
+ return l
+ }
+ if fromType.IsEmptyInterface() {
+ base.Fatalf("OCONVIFACE can't operate on an empty interface")
+ }
+
+ // Evaluate the input interface.
+ c := typecheck.Temp(fromType)
+ init.Append(ir.NewAssignStmt(base.Pos, c, n.X))
+
+ // Grab its parts.
+ itab := ir.NewUnaryExpr(base.Pos, ir.OITAB, c)
+ itab.SetType(types.Types[types.TUINTPTR].PtrTo())
+ itab.SetTypecheck(1)
+ data := ir.NewUnaryExpr(n.Pos(), ir.OIDATA, c)
+ data.SetType(types.Types[types.TUINT8].PtrTo()) // Type is generic pointer - we're just passing it through.
+ data.SetTypecheck(1)
+
+ var typeWord ir.Node
+ if toType.IsEmptyInterface() {
+ // Implement interface to empty interface conversion.
+ // res = itab
+ // if res != nil {
+ // res = res.type
+ // }
+ typeWord = typecheck.Temp(types.NewPtr(types.Types[types.TUINT8]))
+ init.Append(ir.NewAssignStmt(base.Pos, typeWord, itab))
+ nif := ir.NewIfStmt(base.Pos, typecheck.Expr(ir.NewBinaryExpr(base.Pos, ir.ONE, typeWord, typecheck.NodNil())), nil, nil)
+ nif.Body = []ir.Node{ir.NewAssignStmt(base.Pos, typeWord, itabType(typeWord))}
+ init.Append(nif)
+ } else {
+ // Must be converting I2I (more specific to less specific interface).
+ // res = convI2I(toType, itab)
+ fn := typecheck.LookupRuntime("convI2I")
+ types.CalcSize(fn.Type())
+ call := ir.NewCallExpr(base.Pos, ir.OCALL, fn, nil)
+ call.Args = []ir.Node{reflectdata.TypePtr(toType), itab}
+ typeWord = walkExpr(typecheck.Expr(call), init)
+ }
+
+ // Build the result.
+ // e = iface{typeWord, data}
+ e := ir.NewBinaryExpr(base.Pos, ir.OEFACE, typeWord, data)
+ e.SetType(toType) // assign type manually, typecheck doesn't understand OEFACE.
+ e.SetTypecheck(1)
+ return e
+}
+
+// Returns the data word (the second word) used to represent n in an interface.
+// n must not be of interface type.
+// esc describes whether the result escapes.
+func dataWord(pos src.XPos, n ir.Node, init *ir.Nodes, escapes bool) ir.Node {
+ fromType := n.Type()
+
+ // If it's a pointer, it is its own representation.
+ if types.IsDirectIface(fromType) {
+ return n
+ }
+
+ // Try a bunch of cases to avoid an allocation.
+ var value ir.Node
+ switch {
+ case fromType.Size() == 0:
+ // n is zero-sized. Use zerobase.
+ cheapExpr(n, init) // Evaluate n for side-effects. See issue 19246.
+ value = ir.NewLinksymExpr(base.Pos, ir.Syms.Zerobase, types.Types[types.TUINTPTR])
+ case fromType.IsBoolean() || (fromType.Size() == 1 && fromType.IsInteger()):
+ // n is a bool/byte. Use staticuint64s[n * 8] on little-endian
+ // and staticuint64s[n * 8 + 7] on big-endian.
+ n = cheapExpr(n, init)
+ // byteindex widens n so that the multiplication doesn't overflow.
+ index := ir.NewBinaryExpr(base.Pos, ir.OLSH, byteindex(n), ir.NewInt(3))
+ if ssagen.Arch.LinkArch.ByteOrder == binary.BigEndian {
+ index = ir.NewBinaryExpr(base.Pos, ir.OADD, index, ir.NewInt(7))
+ }
+ // The actual type is [256]uint64, but we use [256*8]uint8 so we can address
+ // individual bytes.
+ staticuint64s := ir.NewLinksymExpr(base.Pos, ir.Syms.Staticuint64s, types.NewArray(types.Types[types.TUINT8], 256*8))
+ xe := ir.NewIndexExpr(base.Pos, staticuint64s, index)
+ xe.SetBounded(true)
+ value = xe
+ case n.Op() == ir.ONAME && n.(*ir.Name).Class == ir.PEXTERN && n.(*ir.Name).Readonly():
+ // n is a readonly global; use it directly.
+ value = n
+ case !escapes && fromType.Size() <= 1024:
+ // n does not escape. Use a stack temporary initialized to n.
+ value = typecheck.Temp(fromType)
+ init.Append(typecheck.Stmt(ir.NewAssignStmt(base.Pos, value, n)))
+ }
+ if value != nil {
+ // The interface data word is &value.
+ return typecheck.Expr(typecheck.NodAddr(value))
+ }
+
+ // Time to do an allocation. We'll call into the runtime for that.
+ fnname, argType, needsaddr := dataWordFuncName(fromType)
+ fn := typecheck.LookupRuntime(fnname)
+
+ var args []ir.Node
+ if needsaddr {
+ // Types of large or unknown size are passed by reference.
+ // Orderexpr arranged for n to be a temporary for all
+ // the conversions it could see. Comparison of an interface
+ // with a non-interface, especially in a switch on interface value
+ // with non-interface cases, is not visible to order.stmt, so we
+ // have to fall back on allocating a temp here.
+ if !ir.IsAddressable(n) {
+ n = copyExpr(n, fromType, init)
+ }
+ fn = typecheck.SubstArgTypes(fn, fromType)
+ args = []ir.Node{reflectdata.TypePtr(fromType), typecheck.NodAddr(n)}
+ } else {
+ // Use a specialized conversion routine that takes the type being
+ // converted by value, not by pointer.
+ var arg ir.Node
+ switch {
+ case fromType == argType:
+ // already in the right type, nothing to do
+ arg = n
+ case fromType.Kind() == argType.Kind(),
+ fromType.IsPtrShaped() && argType.IsPtrShaped():
+ // can directly convert (e.g. named type to underlying type, or one pointer to another)
+ // TODO: never happens because pointers are directIface?
+ arg = ir.NewConvExpr(pos, ir.OCONVNOP, argType, n)
+ case fromType.IsInteger() && argType.IsInteger():
+ // can directly convert (e.g. int32 to uint32)
+ arg = ir.NewConvExpr(pos, ir.OCONV, argType, n)
+ default:
+ // unsafe cast through memory
+ arg = copyExpr(n, fromType, init)
+ var addr ir.Node = typecheck.NodAddr(arg)
+ addr = ir.NewConvExpr(pos, ir.OCONVNOP, argType.PtrTo(), addr)
+ arg = ir.NewStarExpr(pos, addr)
+ arg.SetType(argType)
+ }
+ args = []ir.Node{arg}
+ }
+ call := ir.NewCallExpr(base.Pos, ir.OCALL, fn, nil)
+ call.Args = args
+ return safeExpr(walkExpr(typecheck.Expr(call), init), init)
+}
+
+// walkConvIData walks an OCONVIDATA node.
+func walkConvIData(n *ir.ConvExpr, init *ir.Nodes) ir.Node {
+ n.X = walkExpr(n.X, init)
+ return dataWord(n.Pos(), n.X, init, n.Esc() != ir.EscNone)
+}
+
+// walkBytesRunesToString walks an OBYTES2STR or ORUNES2STR node.
+func walkBytesRunesToString(n *ir.ConvExpr, init *ir.Nodes) ir.Node {
+ a := typecheck.NodNil()
+ if n.Esc() == ir.EscNone {
+ // Create temporary buffer for string on stack.
+ a = stackBufAddr(tmpstringbufsize, types.Types[types.TUINT8])
+ }
+ if n.Op() == ir.ORUNES2STR {
+ // slicerunetostring(*[32]byte, []rune) string
+ return mkcall("slicerunetostring", n.Type(), init, a, n.X)
+ }
+ // slicebytetostring(*[32]byte, ptr *byte, n int) string
+ n.X = cheapExpr(n.X, init)
+ ptr, len := backingArrayPtrLen(n.X)
+ return mkcall("slicebytetostring", n.Type(), init, a, ptr, len)
+}
+
+// walkBytesToStringTemp walks an OBYTES2STRTMP node.
+func walkBytesToStringTemp(n *ir.ConvExpr, init *ir.Nodes) ir.Node {
+ n.X = walkExpr(n.X, init)
+ if !base.Flag.Cfg.Instrumenting {
+ // Let the backend handle OBYTES2STRTMP directly
+ // to avoid a function call to slicebytetostringtmp.
+ return n
+ }
+ // slicebytetostringtmp(ptr *byte, n int) string
+ n.X = cheapExpr(n.X, init)
+ ptr, len := backingArrayPtrLen(n.X)
+ return mkcall("slicebytetostringtmp", n.Type(), init, ptr, len)
+}
+
+// walkRuneToString walks an ORUNESTR node.
+func walkRuneToString(n *ir.ConvExpr, init *ir.Nodes) ir.Node {
+ a := typecheck.NodNil()
+ if n.Esc() == ir.EscNone {
+ a = stackBufAddr(4, types.Types[types.TUINT8])
+ }
+ // intstring(*[4]byte, rune)
+ return mkcall("intstring", n.Type(), init, a, typecheck.Conv(n.X, types.Types[types.TINT64]))
+}
+
+// walkStringToBytes walks an OSTR2BYTES node.
+func walkStringToBytes(n *ir.ConvExpr, init *ir.Nodes) ir.Node {
+ s := n.X
+ if ir.IsConst(s, constant.String) {
+ sc := ir.StringVal(s)
+
+ // Allocate a [n]byte of the right size.
+ t := types.NewArray(types.Types[types.TUINT8], int64(len(sc)))
+ var a ir.Node
+ if n.Esc() == ir.EscNone && len(sc) <= int(ir.MaxImplicitStackVarSize) {
+ a = stackBufAddr(t.NumElem(), t.Elem())
+ } else {
+ types.CalcSize(t)
+ a = ir.NewUnaryExpr(base.Pos, ir.ONEW, nil)
+ a.SetType(types.NewPtr(t))
+ a.SetTypecheck(1)
+ a.MarkNonNil()
+ }
+ p := typecheck.Temp(t.PtrTo()) // *[n]byte
+ init.Append(typecheck.Stmt(ir.NewAssignStmt(base.Pos, p, a)))
+
+ // Copy from the static string data to the [n]byte.
+ if len(sc) > 0 {
+ as := ir.NewAssignStmt(base.Pos, ir.NewStarExpr(base.Pos, p), ir.NewStarExpr(base.Pos, typecheck.ConvNop(ir.NewUnaryExpr(base.Pos, ir.OSPTR, s), t.PtrTo())))
+ appendWalkStmt(init, as)
+ }
+
+ // Slice the [n]byte to a []byte.
+ slice := ir.NewSliceExpr(n.Pos(), ir.OSLICEARR, p, nil, nil, nil)
+ slice.SetType(n.Type())
+ slice.SetTypecheck(1)
+ return walkExpr(slice, init)
+ }
+
+ a := typecheck.NodNil()
+ if n.Esc() == ir.EscNone {
+ // Create temporary buffer for slice on stack.
+ a = stackBufAddr(tmpstringbufsize, types.Types[types.TUINT8])
+ }
+ // stringtoslicebyte(*32[byte], string) []byte
+ return mkcall("stringtoslicebyte", n.Type(), init, a, typecheck.Conv(s, types.Types[types.TSTRING]))
+}
+
+// walkStringToBytesTemp walks an OSTR2BYTESTMP node.
+func walkStringToBytesTemp(n *ir.ConvExpr, init *ir.Nodes) ir.Node {
+ // []byte(string) conversion that creates a slice
+ // referring to the actual string bytes.
+ // This conversion is handled later by the backend and
+ // is only for use by internal compiler optimizations
+ // that know that the slice won't be mutated.
+ // The only such case today is:
+ // for i, c := range []byte(string)
+ n.X = walkExpr(n.X, init)
+ return n
+}
+
+// walkStringToRunes walks an OSTR2RUNES node.
+func walkStringToRunes(n *ir.ConvExpr, init *ir.Nodes) ir.Node {
+ a := typecheck.NodNil()
+ if n.Esc() == ir.EscNone {
+ // Create temporary buffer for slice on stack.
+ a = stackBufAddr(tmpstringbufsize, types.Types[types.TINT32])
+ }
+ // stringtoslicerune(*[32]rune, string) []rune
+ return mkcall("stringtoslicerune", n.Type(), init, a, typecheck.Conv(n.X, types.Types[types.TSTRING]))
+}
+
+// dataWordFuncName returns the name of the function used to convert a value of type "from"
+// to the data word of an interface.
+// argType is the type the argument needs to be coerced to.
+// needsaddr reports whether the value should be passed (needaddr==false) or its address (needsaddr==true).
+func dataWordFuncName(from *types.Type) (fnname string, argType *types.Type, needsaddr bool) {
+ if from.IsInterface() {
+ base.Fatalf("can only handle non-interfaces")
+ }
+ switch {
+ case from.Size() == 2 && uint8(from.Alignment()) == 2:
+ return "convT16", types.Types[types.TUINT16], false
+ case from.Size() == 4 && uint8(from.Alignment()) == 4 && !from.HasPointers():
+ return "convT32", types.Types[types.TUINT32], false
+ case from.Size() == 8 && uint8(from.Alignment()) == uint8(types.Types[types.TUINT64].Alignment()) && !from.HasPointers():
+ return "convT64", types.Types[types.TUINT64], false
+ }
+ if sc := from.SoleComponent(); sc != nil {
+ switch {
+ case sc.IsString():
+ return "convTstring", types.Types[types.TSTRING], false
+ case sc.IsSlice():
+ return "convTslice", types.NewSlice(types.Types[types.TUINT8]), false // the element type doesn't matter
+ }
+ }
+
+ if from.HasPointers() {
+ return "convT", types.Types[types.TUNSAFEPTR], true
+ }
+ return "convTnoptr", types.Types[types.TUNSAFEPTR], true
+}
+
+// rtconvfn returns the parameter and result types that will be used by a
+// runtime function to convert from type src to type dst. The runtime function
+// name can be derived from the names of the returned types.
+//
+// If no such function is necessary, it returns (Txxx, Txxx).
+func rtconvfn(src, dst *types.Type) (param, result types.Kind) {
+ if ssagen.Arch.SoftFloat {
+ return types.Txxx, types.Txxx
+ }
+
+ switch ssagen.Arch.LinkArch.Family {
+ case sys.ARM, sys.MIPS:
+ if src.IsFloat() {
+ switch dst.Kind() {
+ case types.TINT64, types.TUINT64:
+ return types.TFLOAT64, dst.Kind()
+ }
+ }
+ if dst.IsFloat() {
+ switch src.Kind() {
+ case types.TINT64, types.TUINT64:
+ return src.Kind(), dst.Kind()
+ }
+ }
+
+ case sys.I386:
+ if src.IsFloat() {
+ switch dst.Kind() {
+ case types.TINT64, types.TUINT64:
+ return types.TFLOAT64, dst.Kind()
+ case types.TUINT32, types.TUINT, types.TUINTPTR:
+ return types.TFLOAT64, types.TUINT32
+ }
+ }
+ if dst.IsFloat() {
+ switch src.Kind() {
+ case types.TINT64, types.TUINT64:
+ return src.Kind(), dst.Kind()
+ case types.TUINT32, types.TUINT, types.TUINTPTR:
+ return types.TUINT32, types.TFLOAT64
+ }
+ }
+ }
+ return types.Txxx, types.Txxx
+}
+
+// byteindex converts n, which is byte-sized, to an int used to index into an array.
+// We cannot use conv, because we allow converting bool to int here,
+// which is forbidden in user code.
+func byteindex(n ir.Node) ir.Node {
+ // We cannot convert from bool to int directly.
+ // While converting from int8 to int is possible, it would yield
+ // the wrong result for negative values.
+ // Reinterpreting the value as an unsigned byte solves both cases.
+ if !types.Identical(n.Type(), types.Types[types.TUINT8]) {
+ n = ir.NewConvExpr(base.Pos, ir.OCONV, nil, n)
+ n.SetType(types.Types[types.TUINT8])
+ n.SetTypecheck(1)
+ }
+ n = ir.NewConvExpr(base.Pos, ir.OCONV, nil, n)
+ n.SetType(types.Types[types.TINT])
+ n.SetTypecheck(1)
+ return n
+}
+
+func walkCheckPtrArithmetic(n *ir.ConvExpr, init *ir.Nodes) ir.Node {
+ // Calling cheapExpr(n, init) below leads to a recursive call to
+ // walkExpr, which leads us back here again. Use n.Checkptr to
+ // prevent infinite loops.
+ if n.CheckPtr() {
+ return n
+ }
+ n.SetCheckPtr(true)
+ defer n.SetCheckPtr(false)
+
+ // TODO(mdempsky): Make stricter. We only need to exempt
+ // reflect.Value.Pointer and reflect.Value.UnsafeAddr.
+ switch n.X.Op() {
+ case ir.OCALLMETH:
+ base.FatalfAt(n.X.Pos(), "OCALLMETH missed by typecheck")
+ case ir.OCALLFUNC, ir.OCALLINTER:
+ return n
+ }
+
+ if n.X.Op() == ir.ODOTPTR && ir.IsReflectHeaderDataField(n.X) {
+ return n
+ }
+
+ // Find original unsafe.Pointer operands involved in this
+ // arithmetic expression.
+ //
+ // "It is valid both to add and to subtract offsets from a
+ // pointer in this way. It is also valid to use &^ to round
+ // pointers, usually for alignment."
+ var originals []ir.Node
+ var walk func(n ir.Node)
+ walk = func(n ir.Node) {
+ switch n.Op() {
+ case ir.OADD:
+ n := n.(*ir.BinaryExpr)
+ walk(n.X)
+ walk(n.Y)
+ case ir.OSUB, ir.OANDNOT:
+ n := n.(*ir.BinaryExpr)
+ walk(n.X)
+ case ir.OCONVNOP:
+ n := n.(*ir.ConvExpr)
+ if n.X.Type().IsUnsafePtr() {
+ n.X = cheapExpr(n.X, init)
+ originals = append(originals, typecheck.ConvNop(n.X, types.Types[types.TUNSAFEPTR]))
+ }
+ }
+ }
+ walk(n.X)
+
+ cheap := cheapExpr(n, init)
+
+ slice := typecheck.MakeDotArgs(base.Pos, types.NewSlice(types.Types[types.TUNSAFEPTR]), originals)
+ slice.SetEsc(ir.EscNone)
+
+ init.Append(mkcall("checkptrArithmetic", nil, init, typecheck.ConvNop(cheap, types.Types[types.TUNSAFEPTR]), slice))
+ // TODO(khr): Mark backing store of slice as dead. This will allow us to reuse
+ // the backing store for multiple calls to checkptrArithmetic.
+
+ return cheap
+}
diff --git a/src/cmd/compile/internal/walk/expr.go b/src/cmd/compile/internal/walk/expr.go
new file mode 100644
index 0000000..e5bf6cf
--- /dev/null
+++ b/src/cmd/compile/internal/walk/expr.go
@@ -0,0 +1,1024 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package walk
+
+import (
+ "fmt"
+ "go/constant"
+ "internal/buildcfg"
+ "strings"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/reflectdata"
+ "cmd/compile/internal/staticdata"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+)
+
+// The result of walkExpr MUST be assigned back to n, e.g.
+// n.Left = walkExpr(n.Left, init)
+func walkExpr(n ir.Node, init *ir.Nodes) ir.Node {
+ if n == nil {
+ return n
+ }
+
+ if n, ok := n.(ir.InitNode); ok && init == n.PtrInit() {
+ // not okay to use n->ninit when walking n,
+ // because we might replace n with some other node
+ // and would lose the init list.
+ base.Fatalf("walkExpr init == &n->ninit")
+ }
+
+ if len(n.Init()) != 0 {
+ walkStmtList(n.Init())
+ init.Append(ir.TakeInit(n)...)
+ }
+
+ lno := ir.SetPos(n)
+
+ if base.Flag.LowerW > 1 {
+ ir.Dump("before walk expr", n)
+ }
+
+ if n.Typecheck() != 1 {
+ base.Fatalf("missed typecheck: %+v", n)
+ }
+
+ if n.Type().IsUntyped() {
+ base.Fatalf("expression has untyped type: %+v", n)
+ }
+
+ n = walkExpr1(n, init)
+
+ // Eagerly compute sizes of all expressions for the back end.
+ if typ := n.Type(); typ != nil && typ.Kind() != types.TBLANK && !typ.IsFuncArgStruct() {
+ types.CheckSize(typ)
+ }
+ if n, ok := n.(*ir.Name); ok && n.Heapaddr != nil {
+ types.CheckSize(n.Heapaddr.Type())
+ }
+ if ir.IsConst(n, constant.String) {
+ // Emit string symbol now to avoid emitting
+ // any concurrently during the backend.
+ _ = staticdata.StringSym(n.Pos(), constant.StringVal(n.Val()))
+ }
+
+ if base.Flag.LowerW != 0 && n != nil {
+ ir.Dump("after walk expr", n)
+ }
+
+ base.Pos = lno
+ return n
+}
+
+func walkExpr1(n ir.Node, init *ir.Nodes) ir.Node {
+ switch n.Op() {
+ default:
+ ir.Dump("walk", n)
+ base.Fatalf("walkExpr: switch 1 unknown op %+v", n.Op())
+ panic("unreachable")
+
+ case ir.OGETG, ir.OGETCALLERPC, ir.OGETCALLERSP:
+ return n
+
+ case ir.OTYPE, ir.ONAME, ir.OLITERAL, ir.ONIL, ir.OLINKSYMOFFSET:
+ // TODO(mdempsky): Just return n; see discussion on CL 38655.
+ // Perhaps refactor to use Node.mayBeShared for these instead.
+ // If these return early, make sure to still call
+ // StringSym for constant strings.
+ return n
+
+ case ir.OMETHEXPR:
+ // TODO(mdempsky): Do this right after type checking.
+ n := n.(*ir.SelectorExpr)
+ return n.FuncName()
+
+ case ir.ONOT, ir.ONEG, ir.OPLUS, ir.OBITNOT, ir.OREAL, ir.OIMAG, ir.OSPTR, ir.OITAB, ir.OIDATA:
+ n := n.(*ir.UnaryExpr)
+ n.X = walkExpr(n.X, init)
+ return n
+
+ case ir.ODOTMETH, ir.ODOTINTER:
+ n := n.(*ir.SelectorExpr)
+ n.X = walkExpr(n.X, init)
+ return n
+
+ case ir.OADDR:
+ n := n.(*ir.AddrExpr)
+ n.X = walkExpr(n.X, init)
+ return n
+
+ case ir.ODEREF:
+ n := n.(*ir.StarExpr)
+ n.X = walkExpr(n.X, init)
+ return n
+
+ case ir.OEFACE, ir.OAND, ir.OANDNOT, ir.OSUB, ir.OMUL, ir.OADD, ir.OOR, ir.OXOR, ir.OLSH, ir.ORSH,
+ ir.OUNSAFEADD:
+ n := n.(*ir.BinaryExpr)
+ n.X = walkExpr(n.X, init)
+ n.Y = walkExpr(n.Y, init)
+ return n
+
+ case ir.OUNSAFESLICE:
+ n := n.(*ir.BinaryExpr)
+ return walkUnsafeSlice(n, init)
+
+ case ir.ODOT, ir.ODOTPTR:
+ n := n.(*ir.SelectorExpr)
+ return walkDot(n, init)
+
+ case ir.ODOTTYPE, ir.ODOTTYPE2:
+ n := n.(*ir.TypeAssertExpr)
+ return walkDotType(n, init)
+
+ case ir.ODYNAMICDOTTYPE, ir.ODYNAMICDOTTYPE2:
+ n := n.(*ir.DynamicTypeAssertExpr)
+ return walkDynamicDotType(n, init)
+
+ case ir.OLEN, ir.OCAP:
+ n := n.(*ir.UnaryExpr)
+ return walkLenCap(n, init)
+
+ case ir.OCOMPLEX:
+ n := n.(*ir.BinaryExpr)
+ n.X = walkExpr(n.X, init)
+ n.Y = walkExpr(n.Y, init)
+ return n
+
+ case ir.OEQ, ir.ONE, ir.OLT, ir.OLE, ir.OGT, ir.OGE:
+ n := n.(*ir.BinaryExpr)
+ return walkCompare(n, init)
+
+ case ir.OANDAND, ir.OOROR:
+ n := n.(*ir.LogicalExpr)
+ return walkLogical(n, init)
+
+ case ir.OPRINT, ir.OPRINTN:
+ return walkPrint(n.(*ir.CallExpr), init)
+
+ case ir.OPANIC:
+ n := n.(*ir.UnaryExpr)
+ return mkcall("gopanic", nil, init, n.X)
+
+ case ir.ORECOVERFP:
+ return walkRecoverFP(n.(*ir.CallExpr), init)
+
+ case ir.OCFUNC:
+ return n
+
+ case ir.OCALLINTER, ir.OCALLFUNC:
+ n := n.(*ir.CallExpr)
+ return walkCall(n, init)
+
+ case ir.OAS, ir.OASOP:
+ return walkAssign(init, n)
+
+ case ir.OAS2:
+ n := n.(*ir.AssignListStmt)
+ return walkAssignList(init, n)
+
+ // a,b,... = fn()
+ case ir.OAS2FUNC:
+ n := n.(*ir.AssignListStmt)
+ return walkAssignFunc(init, n)
+
+ // x, y = <-c
+ // order.stmt made sure x is addressable or blank.
+ case ir.OAS2RECV:
+ n := n.(*ir.AssignListStmt)
+ return walkAssignRecv(init, n)
+
+ // a,b = m[i]
+ case ir.OAS2MAPR:
+ n := n.(*ir.AssignListStmt)
+ return walkAssignMapRead(init, n)
+
+ case ir.ODELETE:
+ n := n.(*ir.CallExpr)
+ return walkDelete(init, n)
+
+ case ir.OAS2DOTTYPE:
+ n := n.(*ir.AssignListStmt)
+ return walkAssignDotType(n, init)
+
+ case ir.OCONVIFACE:
+ n := n.(*ir.ConvExpr)
+ return walkConvInterface(n, init)
+
+ case ir.OCONVIDATA:
+ n := n.(*ir.ConvExpr)
+ return walkConvIData(n, init)
+
+ case ir.OCONV, ir.OCONVNOP:
+ n := n.(*ir.ConvExpr)
+ return walkConv(n, init)
+
+ case ir.OSLICE2ARRPTR:
+ n := n.(*ir.ConvExpr)
+ n.X = walkExpr(n.X, init)
+ return n
+
+ case ir.ODIV, ir.OMOD:
+ n := n.(*ir.BinaryExpr)
+ return walkDivMod(n, init)
+
+ case ir.OINDEX:
+ n := n.(*ir.IndexExpr)
+ return walkIndex(n, init)
+
+ case ir.OINDEXMAP:
+ n := n.(*ir.IndexExpr)
+ return walkIndexMap(n, init)
+
+ case ir.ORECV:
+ base.Fatalf("walkExpr ORECV") // should see inside OAS only
+ panic("unreachable")
+
+ case ir.OSLICEHEADER:
+ n := n.(*ir.SliceHeaderExpr)
+ return walkSliceHeader(n, init)
+
+ case ir.OSLICE, ir.OSLICEARR, ir.OSLICESTR, ir.OSLICE3, ir.OSLICE3ARR:
+ n := n.(*ir.SliceExpr)
+ return walkSlice(n, init)
+
+ case ir.ONEW:
+ n := n.(*ir.UnaryExpr)
+ return walkNew(n, init)
+
+ case ir.OADDSTR:
+ return walkAddString(n.(*ir.AddStringExpr), init)
+
+ case ir.OAPPEND:
+ // order should make sure we only see OAS(node, OAPPEND), which we handle above.
+ base.Fatalf("append outside assignment")
+ panic("unreachable")
+
+ case ir.OCOPY:
+ return walkCopy(n.(*ir.BinaryExpr), init, base.Flag.Cfg.Instrumenting && !base.Flag.CompilingRuntime)
+
+ case ir.OCLOSE:
+ n := n.(*ir.UnaryExpr)
+ return walkClose(n, init)
+
+ case ir.OMAKECHAN:
+ n := n.(*ir.MakeExpr)
+ return walkMakeChan(n, init)
+
+ case ir.OMAKEMAP:
+ n := n.(*ir.MakeExpr)
+ return walkMakeMap(n, init)
+
+ case ir.OMAKESLICE:
+ n := n.(*ir.MakeExpr)
+ return walkMakeSlice(n, init)
+
+ case ir.OMAKESLICECOPY:
+ n := n.(*ir.MakeExpr)
+ return walkMakeSliceCopy(n, init)
+
+ case ir.ORUNESTR:
+ n := n.(*ir.ConvExpr)
+ return walkRuneToString(n, init)
+
+ case ir.OBYTES2STR, ir.ORUNES2STR:
+ n := n.(*ir.ConvExpr)
+ return walkBytesRunesToString(n, init)
+
+ case ir.OBYTES2STRTMP:
+ n := n.(*ir.ConvExpr)
+ return walkBytesToStringTemp(n, init)
+
+ case ir.OSTR2BYTES:
+ n := n.(*ir.ConvExpr)
+ return walkStringToBytes(n, init)
+
+ case ir.OSTR2BYTESTMP:
+ n := n.(*ir.ConvExpr)
+ return walkStringToBytesTemp(n, init)
+
+ case ir.OSTR2RUNES:
+ n := n.(*ir.ConvExpr)
+ return walkStringToRunes(n, init)
+
+ case ir.OARRAYLIT, ir.OSLICELIT, ir.OMAPLIT, ir.OSTRUCTLIT, ir.OPTRLIT:
+ return walkCompLit(n, init)
+
+ case ir.OSEND:
+ n := n.(*ir.SendStmt)
+ return walkSend(n, init)
+
+ case ir.OCLOSURE:
+ return walkClosure(n.(*ir.ClosureExpr), init)
+
+ case ir.OMETHVALUE:
+ return walkMethodValue(n.(*ir.SelectorExpr), init)
+ }
+
+ // No return! Each case must return (or panic),
+ // to avoid confusion about what gets returned
+ // in the presence of type assertions.
+}
+
+// walk the whole tree of the body of an
+// expression or simple statement.
+// the types expressions are calculated.
+// compile-time constants are evaluated.
+// complex side effects like statements are appended to init
+func walkExprList(s []ir.Node, init *ir.Nodes) {
+ for i := range s {
+ s[i] = walkExpr(s[i], init)
+ }
+}
+
+func walkExprListCheap(s []ir.Node, init *ir.Nodes) {
+ for i, n := range s {
+ s[i] = cheapExpr(n, init)
+ s[i] = walkExpr(s[i], init)
+ }
+}
+
+func walkExprListSafe(s []ir.Node, init *ir.Nodes) {
+ for i, n := range s {
+ s[i] = safeExpr(n, init)
+ s[i] = walkExpr(s[i], init)
+ }
+}
+
+// return side-effect free and cheap n, appending side effects to init.
+// result may not be assignable.
+func cheapExpr(n ir.Node, init *ir.Nodes) ir.Node {
+ switch n.Op() {
+ case ir.ONAME, ir.OLITERAL, ir.ONIL:
+ return n
+ }
+
+ return copyExpr(n, n.Type(), init)
+}
+
+// return side effect-free n, appending side effects to init.
+// result is assignable if n is.
+func safeExpr(n ir.Node, init *ir.Nodes) ir.Node {
+ if n == nil {
+ return nil
+ }
+
+ if len(n.Init()) != 0 {
+ walkStmtList(n.Init())
+ init.Append(ir.TakeInit(n)...)
+ }
+
+ switch n.Op() {
+ case ir.ONAME, ir.OLITERAL, ir.ONIL, ir.OLINKSYMOFFSET:
+ return n
+
+ case ir.OLEN, ir.OCAP:
+ n := n.(*ir.UnaryExpr)
+ l := safeExpr(n.X, init)
+ if l == n.X {
+ return n
+ }
+ a := ir.Copy(n).(*ir.UnaryExpr)
+ a.X = l
+ return walkExpr(typecheck.Expr(a), init)
+
+ case ir.ODOT, ir.ODOTPTR:
+ n := n.(*ir.SelectorExpr)
+ l := safeExpr(n.X, init)
+ if l == n.X {
+ return n
+ }
+ a := ir.Copy(n).(*ir.SelectorExpr)
+ a.X = l
+ return walkExpr(typecheck.Expr(a), init)
+
+ case ir.ODEREF:
+ n := n.(*ir.StarExpr)
+ l := safeExpr(n.X, init)
+ if l == n.X {
+ return n
+ }
+ a := ir.Copy(n).(*ir.StarExpr)
+ a.X = l
+ return walkExpr(typecheck.Expr(a), init)
+
+ case ir.OINDEX, ir.OINDEXMAP:
+ n := n.(*ir.IndexExpr)
+ l := safeExpr(n.X, init)
+ r := safeExpr(n.Index, init)
+ if l == n.X && r == n.Index {
+ return n
+ }
+ a := ir.Copy(n).(*ir.IndexExpr)
+ a.X = l
+ a.Index = r
+ return walkExpr(typecheck.Expr(a), init)
+
+ case ir.OSTRUCTLIT, ir.OARRAYLIT, ir.OSLICELIT:
+ n := n.(*ir.CompLitExpr)
+ if isStaticCompositeLiteral(n) {
+ return n
+ }
+ }
+
+ // make a copy; must not be used as an lvalue
+ if ir.IsAddressable(n) {
+ base.Fatalf("missing lvalue case in safeExpr: %v", n)
+ }
+ return cheapExpr(n, init)
+}
+
+func copyExpr(n ir.Node, t *types.Type, init *ir.Nodes) ir.Node {
+ l := typecheck.Temp(t)
+ appendWalkStmt(init, ir.NewAssignStmt(base.Pos, l, n))
+ return l
+}
+
+func walkAddString(n *ir.AddStringExpr, init *ir.Nodes) ir.Node {
+ c := len(n.List)
+
+ if c < 2 {
+ base.Fatalf("walkAddString count %d too small", c)
+ }
+
+ buf := typecheck.NodNil()
+ if n.Esc() == ir.EscNone {
+ sz := int64(0)
+ for _, n1 := range n.List {
+ if n1.Op() == ir.OLITERAL {
+ sz += int64(len(ir.StringVal(n1)))
+ }
+ }
+
+ // Don't allocate the buffer if the result won't fit.
+ if sz < tmpstringbufsize {
+ // Create temporary buffer for result string on stack.
+ buf = stackBufAddr(tmpstringbufsize, types.Types[types.TUINT8])
+ }
+ }
+
+ // build list of string arguments
+ args := []ir.Node{buf}
+ for _, n2 := range n.List {
+ args = append(args, typecheck.Conv(n2, types.Types[types.TSTRING]))
+ }
+
+ var fn string
+ if c <= 5 {
+ // small numbers of strings use direct runtime helpers.
+ // note: order.expr knows this cutoff too.
+ fn = fmt.Sprintf("concatstring%d", c)
+ } else {
+ // large numbers of strings are passed to the runtime as a slice.
+ fn = "concatstrings"
+
+ t := types.NewSlice(types.Types[types.TSTRING])
+ // args[1:] to skip buf arg
+ slice := ir.NewCompLitExpr(base.Pos, ir.OCOMPLIT, ir.TypeNode(t), args[1:])
+ slice.Prealloc = n.Prealloc
+ args = []ir.Node{buf, slice}
+ slice.SetEsc(ir.EscNone)
+ }
+
+ cat := typecheck.LookupRuntime(fn)
+ r := ir.NewCallExpr(base.Pos, ir.OCALL, cat, nil)
+ r.Args = args
+ r1 := typecheck.Expr(r)
+ r1 = walkExpr(r1, init)
+ r1.SetType(n.Type())
+
+ return r1
+}
+
+// walkCall walks an OCALLFUNC or OCALLINTER node.
+func walkCall(n *ir.CallExpr, init *ir.Nodes) ir.Node {
+ if n.Op() == ir.OCALLMETH {
+ base.FatalfAt(n.Pos(), "OCALLMETH missed by typecheck")
+ }
+ if n.Op() == ir.OCALLINTER || n.X.Op() == ir.OMETHEXPR {
+ // We expect both interface call reflect.Type.Method and concrete
+ // call reflect.(*rtype).Method.
+ usemethod(n)
+ }
+ if n.Op() == ir.OCALLINTER {
+ reflectdata.MarkUsedIfaceMethod(n)
+ }
+
+ if n.Op() == ir.OCALLFUNC && n.X.Op() == ir.OCLOSURE {
+ directClosureCall(n)
+ }
+
+ if isFuncPCIntrinsic(n) {
+ // For internal/abi.FuncPCABIxxx(fn), if fn is a defined function, rewrite
+ // it to the address of the function of the ABI fn is defined.
+ name := n.X.(*ir.Name).Sym().Name
+ arg := n.Args[0]
+ var wantABI obj.ABI
+ switch name {
+ case "FuncPCABI0":
+ wantABI = obj.ABI0
+ case "FuncPCABIInternal":
+ wantABI = obj.ABIInternal
+ }
+ if isIfaceOfFunc(arg) {
+ fn := arg.(*ir.ConvExpr).X.(*ir.Name)
+ abi := fn.Func.ABI
+ if abi != wantABI {
+ base.ErrorfAt(n.Pos(), "internal/abi.%s expects an %v function, %s is defined as %v", name, wantABI, fn.Sym().Name, abi)
+ }
+ var e ir.Node = ir.NewLinksymExpr(n.Pos(), fn.Sym().LinksymABI(abi), types.Types[types.TUINTPTR])
+ e = ir.NewAddrExpr(n.Pos(), e)
+ e.SetType(types.Types[types.TUINTPTR].PtrTo())
+ e = ir.NewConvExpr(n.Pos(), ir.OCONVNOP, n.Type(), e)
+ return e
+ }
+ // fn is not a defined function. It must be ABIInternal.
+ // Read the address from func value, i.e. *(*uintptr)(idata(fn)).
+ if wantABI != obj.ABIInternal {
+ base.ErrorfAt(n.Pos(), "internal/abi.%s does not accept func expression, which is ABIInternal", name)
+ }
+ arg = walkExpr(arg, init)
+ var e ir.Node = ir.NewUnaryExpr(n.Pos(), ir.OIDATA, arg)
+ e.SetType(n.Type().PtrTo())
+ e = ir.NewStarExpr(n.Pos(), e)
+ e.SetType(n.Type())
+ return e
+ }
+
+ walkCall1(n, init)
+ return n
+}
+
+func walkCall1(n *ir.CallExpr, init *ir.Nodes) {
+ if n.Walked() {
+ return // already walked
+ }
+ n.SetWalked(true)
+
+ if n.Op() == ir.OCALLMETH {
+ base.FatalfAt(n.Pos(), "OCALLMETH missed by typecheck")
+ }
+
+ args := n.Args
+ params := n.X.Type().Params()
+
+ n.X = walkExpr(n.X, init)
+ walkExprList(args, init)
+
+ for i, arg := range args {
+ // Validate argument and parameter types match.
+ param := params.Field(i)
+ if !types.Identical(arg.Type(), param.Type) {
+ base.FatalfAt(n.Pos(), "assigning %L to parameter %v (type %v)", arg, param.Sym, param.Type)
+ }
+
+ // For any argument whose evaluation might require a function call,
+ // store that argument into a temporary variable,
+ // to prevent that calls from clobbering arguments already on the stack.
+ if mayCall(arg) {
+ // assignment of arg to Temp
+ tmp := typecheck.Temp(param.Type)
+ init.Append(convas(typecheck.Stmt(ir.NewAssignStmt(base.Pos, tmp, arg)).(*ir.AssignStmt), init))
+ // replace arg with temp
+ args[i] = tmp
+ }
+ }
+
+ n.Args = args
+}
+
+// walkDivMod walks an ODIV or OMOD node.
+func walkDivMod(n *ir.BinaryExpr, init *ir.Nodes) ir.Node {
+ n.X = walkExpr(n.X, init)
+ n.Y = walkExpr(n.Y, init)
+
+ // rewrite complex div into function call.
+ et := n.X.Type().Kind()
+
+ if types.IsComplex[et] && n.Op() == ir.ODIV {
+ t := n.Type()
+ call := mkcall("complex128div", types.Types[types.TCOMPLEX128], init, typecheck.Conv(n.X, types.Types[types.TCOMPLEX128]), typecheck.Conv(n.Y, types.Types[types.TCOMPLEX128]))
+ return typecheck.Conv(call, t)
+ }
+
+ // Nothing to do for float divisions.
+ if types.IsFloat[et] {
+ return n
+ }
+
+ // rewrite 64-bit div and mod on 32-bit architectures.
+ // TODO: Remove this code once we can introduce
+ // runtime calls late in SSA processing.
+ if types.RegSize < 8 && (et == types.TINT64 || et == types.TUINT64) {
+ if n.Y.Op() == ir.OLITERAL {
+ // Leave div/mod by constant powers of 2 or small 16-bit constants.
+ // The SSA backend will handle those.
+ switch et {
+ case types.TINT64:
+ c := ir.Int64Val(n.Y)
+ if c < 0 {
+ c = -c
+ }
+ if c != 0 && c&(c-1) == 0 {
+ return n
+ }
+ case types.TUINT64:
+ c := ir.Uint64Val(n.Y)
+ if c < 1<<16 {
+ return n
+ }
+ if c != 0 && c&(c-1) == 0 {
+ return n
+ }
+ }
+ }
+ var fn string
+ if et == types.TINT64 {
+ fn = "int64"
+ } else {
+ fn = "uint64"
+ }
+ if n.Op() == ir.ODIV {
+ fn += "div"
+ } else {
+ fn += "mod"
+ }
+ return mkcall(fn, n.Type(), init, typecheck.Conv(n.X, types.Types[et]), typecheck.Conv(n.Y, types.Types[et]))
+ }
+ return n
+}
+
+// walkDot walks an ODOT or ODOTPTR node.
+func walkDot(n *ir.SelectorExpr, init *ir.Nodes) ir.Node {
+ usefield(n)
+ n.X = walkExpr(n.X, init)
+ return n
+}
+
+// walkDotType walks an ODOTTYPE or ODOTTYPE2 node.
+func walkDotType(n *ir.TypeAssertExpr, init *ir.Nodes) ir.Node {
+ n.X = walkExpr(n.X, init)
+ // Set up interface type addresses for back end.
+ if !n.Type().IsInterface() && !n.X.Type().IsEmptyInterface() {
+ n.Itab = reflectdata.ITabAddr(n.Type(), n.X.Type())
+ }
+ return n
+}
+
+// walkDynamicdotType walks an ODYNAMICDOTTYPE or ODYNAMICDOTTYPE2 node.
+func walkDynamicDotType(n *ir.DynamicTypeAssertExpr, init *ir.Nodes) ir.Node {
+ n.X = walkExpr(n.X, init)
+ n.T = walkExpr(n.T, init)
+ return n
+}
+
+// walkIndex walks an OINDEX node.
+func walkIndex(n *ir.IndexExpr, init *ir.Nodes) ir.Node {
+ n.X = walkExpr(n.X, init)
+
+ // save the original node for bounds checking elision.
+ // If it was a ODIV/OMOD walk might rewrite it.
+ r := n.Index
+
+ n.Index = walkExpr(n.Index, init)
+
+ // if range of type cannot exceed static array bound,
+ // disable bounds check.
+ if n.Bounded() {
+ return n
+ }
+ t := n.X.Type()
+ if t != nil && t.IsPtr() {
+ t = t.Elem()
+ }
+ if t.IsArray() {
+ n.SetBounded(bounded(r, t.NumElem()))
+ if base.Flag.LowerM != 0 && n.Bounded() && !ir.IsConst(n.Index, constant.Int) {
+ base.Warn("index bounds check elided")
+ }
+ if ir.IsSmallIntConst(n.Index) && !n.Bounded() {
+ base.Errorf("index out of bounds")
+ }
+ } else if ir.IsConst(n.X, constant.String) {
+ n.SetBounded(bounded(r, int64(len(ir.StringVal(n.X)))))
+ if base.Flag.LowerM != 0 && n.Bounded() && !ir.IsConst(n.Index, constant.Int) {
+ base.Warn("index bounds check elided")
+ }
+ if ir.IsSmallIntConst(n.Index) && !n.Bounded() {
+ base.Errorf("index out of bounds")
+ }
+ }
+
+ if ir.IsConst(n.Index, constant.Int) {
+ if v := n.Index.Val(); constant.Sign(v) < 0 || ir.ConstOverflow(v, types.Types[types.TINT]) {
+ base.Errorf("index out of bounds")
+ }
+ }
+ return n
+}
+
+// mapKeyArg returns an expression for key that is suitable to be passed
+// as the key argument for mapaccess and mapdelete functions.
+// n is is the map indexing or delete Node (to provide Pos).
+// Note: this is not used for mapassign, which does distinguish pointer vs.
+// integer key.
+func mapKeyArg(fast int, n, key ir.Node) ir.Node {
+ switch fast {
+ case mapslow:
+ // standard version takes key by reference.
+ // order.expr made sure key is addressable.
+ return typecheck.NodAddr(key)
+ case mapfast32ptr:
+ // mapaccess and mapdelete don't distinguish pointer vs. integer key.
+ return ir.NewConvExpr(n.Pos(), ir.OCONVNOP, types.Types[types.TUINT32], key)
+ case mapfast64ptr:
+ // mapaccess and mapdelete don't distinguish pointer vs. integer key.
+ return ir.NewConvExpr(n.Pos(), ir.OCONVNOP, types.Types[types.TUINT64], key)
+ default:
+ // fast version takes key by value.
+ return key
+ }
+}
+
+// walkIndexMap walks an OINDEXMAP node.
+func walkIndexMap(n *ir.IndexExpr, init *ir.Nodes) ir.Node {
+ // Replace m[k] with *map{access1,assign}(maptype, m, &k)
+ n.X = walkExpr(n.X, init)
+ n.Index = walkExpr(n.Index, init)
+ map_ := n.X
+ key := n.Index
+ t := map_.Type()
+ var call *ir.CallExpr
+ if n.Assigned {
+ // This m[k] expression is on the left-hand side of an assignment.
+ fast := mapfast(t)
+ if fast == mapslow {
+ // standard version takes key by reference.
+ // order.expr made sure key is addressable.
+ key = typecheck.NodAddr(key)
+ }
+ call = mkcall1(mapfn(mapassign[fast], t, false), nil, init, reflectdata.TypePtr(t), map_, key)
+ } else {
+ // m[k] is not the target of an assignment.
+ fast := mapfast(t)
+ key = mapKeyArg(fast, n, key)
+ if w := t.Elem().Size(); w <= zeroValSize {
+ call = mkcall1(mapfn(mapaccess1[fast], t, false), types.NewPtr(t.Elem()), init, reflectdata.TypePtr(t), map_, key)
+ } else {
+ z := reflectdata.ZeroAddr(w)
+ call = mkcall1(mapfn("mapaccess1_fat", t, true), types.NewPtr(t.Elem()), init, reflectdata.TypePtr(t), map_, key, z)
+ }
+ }
+ call.SetType(types.NewPtr(t.Elem()))
+ call.MarkNonNil() // mapaccess1* and mapassign always return non-nil pointers.
+ star := ir.NewStarExpr(base.Pos, call)
+ star.SetType(t.Elem())
+ star.SetTypecheck(1)
+ return star
+}
+
+// walkLogical walks an OANDAND or OOROR node.
+func walkLogical(n *ir.LogicalExpr, init *ir.Nodes) ir.Node {
+ n.X = walkExpr(n.X, init)
+
+ // cannot put side effects from n.Right on init,
+ // because they cannot run before n.Left is checked.
+ // save elsewhere and store on the eventual n.Right.
+ var ll ir.Nodes
+
+ n.Y = walkExpr(n.Y, &ll)
+ n.Y = ir.InitExpr(ll, n.Y)
+ return n
+}
+
+// walkSend walks an OSEND node.
+func walkSend(n *ir.SendStmt, init *ir.Nodes) ir.Node {
+ n1 := n.Value
+ n1 = typecheck.AssignConv(n1, n.Chan.Type().Elem(), "chan send")
+ n1 = walkExpr(n1, init)
+ n1 = typecheck.NodAddr(n1)
+ return mkcall1(chanfn("chansend1", 2, n.Chan.Type()), nil, init, n.Chan, n1)
+}
+
+// walkSlice walks an OSLICE, OSLICEARR, OSLICESTR, OSLICE3, or OSLICE3ARR node.
+func walkSlice(n *ir.SliceExpr, init *ir.Nodes) ir.Node {
+ n.X = walkExpr(n.X, init)
+ n.Low = walkExpr(n.Low, init)
+ if n.Low != nil && ir.IsZero(n.Low) {
+ // Reduce x[0:j] to x[:j] and x[0:j:k] to x[:j:k].
+ n.Low = nil
+ }
+ n.High = walkExpr(n.High, init)
+ n.Max = walkExpr(n.Max, init)
+
+ if n.Op().IsSlice3() {
+ if n.Max != nil && n.Max.Op() == ir.OCAP && ir.SameSafeExpr(n.X, n.Max.(*ir.UnaryExpr).X) {
+ // Reduce x[i:j:cap(x)] to x[i:j].
+ if n.Op() == ir.OSLICE3 {
+ n.SetOp(ir.OSLICE)
+ } else {
+ n.SetOp(ir.OSLICEARR)
+ }
+ return reduceSlice(n)
+ }
+ return n
+ }
+ return reduceSlice(n)
+}
+
+// walkSliceHeader walks an OSLICEHEADER node.
+func walkSliceHeader(n *ir.SliceHeaderExpr, init *ir.Nodes) ir.Node {
+ n.Ptr = walkExpr(n.Ptr, init)
+ n.Len = walkExpr(n.Len, init)
+ n.Cap = walkExpr(n.Cap, init)
+ return n
+}
+
+// TODO(josharian): combine this with its caller and simplify
+func reduceSlice(n *ir.SliceExpr) ir.Node {
+ if n.High != nil && n.High.Op() == ir.OLEN && ir.SameSafeExpr(n.X, n.High.(*ir.UnaryExpr).X) {
+ // Reduce x[i:len(x)] to x[i:].
+ n.High = nil
+ }
+ if (n.Op() == ir.OSLICE || n.Op() == ir.OSLICESTR) && n.Low == nil && n.High == nil {
+ // Reduce x[:] to x.
+ if base.Debug.Slice > 0 {
+ base.Warn("slice: omit slice operation")
+ }
+ return n.X
+ }
+ return n
+}
+
+// return 1 if integer n must be in range [0, max), 0 otherwise
+func bounded(n ir.Node, max int64) bool {
+ if n.Type() == nil || !n.Type().IsInteger() {
+ return false
+ }
+
+ sign := n.Type().IsSigned()
+ bits := int32(8 * n.Type().Size())
+
+ if ir.IsSmallIntConst(n) {
+ v := ir.Int64Val(n)
+ return 0 <= v && v < max
+ }
+
+ switch n.Op() {
+ case ir.OAND, ir.OANDNOT:
+ n := n.(*ir.BinaryExpr)
+ v := int64(-1)
+ switch {
+ case ir.IsSmallIntConst(n.X):
+ v = ir.Int64Val(n.X)
+ case ir.IsSmallIntConst(n.Y):
+ v = ir.Int64Val(n.Y)
+ if n.Op() == ir.OANDNOT {
+ v = ^v
+ if !sign {
+ v &= 1<<uint(bits) - 1
+ }
+ }
+ }
+ if 0 <= v && v < max {
+ return true
+ }
+
+ case ir.OMOD:
+ n := n.(*ir.BinaryExpr)
+ if !sign && ir.IsSmallIntConst(n.Y) {
+ v := ir.Int64Val(n.Y)
+ if 0 <= v && v <= max {
+ return true
+ }
+ }
+
+ case ir.ODIV:
+ n := n.(*ir.BinaryExpr)
+ if !sign && ir.IsSmallIntConst(n.Y) {
+ v := ir.Int64Val(n.Y)
+ for bits > 0 && v >= 2 {
+ bits--
+ v >>= 1
+ }
+ }
+
+ case ir.ORSH:
+ n := n.(*ir.BinaryExpr)
+ if !sign && ir.IsSmallIntConst(n.Y) {
+ v := ir.Int64Val(n.Y)
+ if v > int64(bits) {
+ return true
+ }
+ bits -= int32(v)
+ }
+ }
+
+ if !sign && bits <= 62 && 1<<uint(bits) <= max {
+ return true
+ }
+
+ return false
+}
+
+// usemethod checks calls for uses of reflect.Type.{Method,MethodByName}.
+func usemethod(n *ir.CallExpr) {
+ // Don't mark reflect.(*rtype).Method, etc. themselves in the reflect package.
+ // Those functions may be alive via the itab, which should not cause all methods
+ // alive. We only want to mark their callers.
+ if base.Ctxt.Pkgpath == "reflect" {
+ switch ir.CurFunc.Nname.Sym().Name { // TODO: is there a better way than hardcoding the names?
+ case "(*rtype).Method", "(*rtype).MethodByName", "(*interfaceType).Method", "(*interfaceType).MethodByName":
+ return
+ }
+ }
+
+ dot, ok := n.X.(*ir.SelectorExpr)
+ if !ok {
+ return
+ }
+
+ // Looking for either direct method calls and interface method calls of:
+ // reflect.Type.Method - func(int) reflect.Method
+ // reflect.Type.MethodByName - func(string) (reflect.Method, bool)
+ var pKind types.Kind
+
+ switch dot.Sel.Name {
+ case "Method":
+ pKind = types.TINT
+ case "MethodByName":
+ pKind = types.TSTRING
+ default:
+ return
+ }
+
+ t := dot.Selection.Type
+ if t.NumParams() != 1 || t.Params().Field(0).Type.Kind() != pKind {
+ return
+ }
+ switch t.NumResults() {
+ case 1:
+ // ok
+ case 2:
+ if t.Results().Field(1).Type.Kind() != types.TBOOL {
+ return
+ }
+ default:
+ return
+ }
+
+ // Check that first result type is "reflect.Method". Note that we have to check sym name and sym package
+ // separately, as we can't check for exact string "reflect.Method" reliably (e.g., see #19028 and #38515).
+ if s := t.Results().Field(0).Type.Sym(); s != nil && s.Name == "Method" && types.IsReflectPkg(s.Pkg) {
+ ir.CurFunc.SetReflectMethod(true)
+ // The LSym is initialized at this point. We need to set the attribute on the LSym.
+ ir.CurFunc.LSym.Set(obj.AttrReflectMethod, true)
+ }
+}
+
+func usefield(n *ir.SelectorExpr) {
+ if !buildcfg.Experiment.FieldTrack {
+ return
+ }
+
+ switch n.Op() {
+ default:
+ base.Fatalf("usefield %v", n.Op())
+
+ case ir.ODOT, ir.ODOTPTR:
+ break
+ }
+
+ field := n.Selection
+ if field == nil {
+ base.Fatalf("usefield %v %v without paramfld", n.X.Type(), n.Sel)
+ }
+ if field.Sym != n.Sel {
+ base.Fatalf("field inconsistency: %v != %v", field.Sym, n.Sel)
+ }
+ if !strings.Contains(field.Note, "go:\"track\"") {
+ return
+ }
+
+ outer := n.X.Type()
+ if outer.IsPtr() {
+ outer = outer.Elem()
+ }
+ if outer.Sym() == nil {
+ base.Errorf("tracked field must be in named struct type")
+ }
+ if !types.IsExported(field.Sym.Name) {
+ base.Errorf("tracked field must be exported (upper case)")
+ }
+
+ sym := reflectdata.TrackSym(outer, field)
+ if ir.CurFunc.FieldTrack == nil {
+ ir.CurFunc.FieldTrack = make(map[*obj.LSym]struct{})
+ }
+ ir.CurFunc.FieldTrack[sym] = struct{}{}
+}
diff --git a/src/cmd/compile/internal/walk/order.go b/src/cmd/compile/internal/walk/order.go
new file mode 100644
index 0000000..861c122
--- /dev/null
+++ b/src/cmd/compile/internal/walk/order.go
@@ -0,0 +1,1507 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package walk
+
+import (
+ "fmt"
+ "go/constant"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/reflectdata"
+ "cmd/compile/internal/staticinit"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "cmd/internal/objabi"
+ "cmd/internal/src"
+)
+
+// Rewrite tree to use separate statements to enforce
+// order of evaluation. Makes walk easier, because it
+// can (after this runs) reorder at will within an expression.
+//
+// Rewrite m[k] op= r into m[k] = m[k] op r if op is / or %.
+//
+// Introduce temporaries as needed by runtime routines.
+// For example, the map runtime routines take the map key
+// by reference, so make sure all map keys are addressable
+// by copying them to temporaries as needed.
+// The same is true for channel operations.
+//
+// Arrange that map index expressions only appear in direct
+// assignments x = m[k] or m[k] = x, never in larger expressions.
+//
+// Arrange that receive expressions only appear in direct assignments
+// x = <-c or as standalone statements <-c, never in larger expressions.
+
+// TODO(rsc): The temporary introduction during multiple assignments
+// should be moved into this file, so that the temporaries can be cleaned
+// and so that conversions implicit in the OAS2FUNC and OAS2RECV
+// nodes can be made explicit and then have their temporaries cleaned.
+
+// TODO(rsc): Goto and multilevel break/continue can jump over
+// inserted VARKILL annotations. Work out a way to handle these.
+// The current implementation is safe, in that it will execute correctly.
+// But it won't reuse temporaries as aggressively as it might, and
+// it can result in unnecessary zeroing of those variables in the function
+// prologue.
+
+// orderState holds state during the ordering process.
+type orderState struct {
+ out []ir.Node // list of generated statements
+ temp []*ir.Name // stack of temporary variables
+ free map[string][]*ir.Name // free list of unused temporaries, by type.LinkString().
+ edit func(ir.Node) ir.Node // cached closure of o.exprNoLHS
+}
+
+// Order rewrites fn.Nbody to apply the ordering constraints
+// described in the comment at the top of the file.
+func order(fn *ir.Func) {
+ if base.Flag.W > 1 {
+ s := fmt.Sprintf("\nbefore order %v", fn.Sym())
+ ir.DumpList(s, fn.Body)
+ }
+
+ orderBlock(&fn.Body, map[string][]*ir.Name{})
+}
+
+// append typechecks stmt and appends it to out.
+func (o *orderState) append(stmt ir.Node) {
+ o.out = append(o.out, typecheck.Stmt(stmt))
+}
+
+// newTemp allocates a new temporary with the given type,
+// pushes it onto the temp stack, and returns it.
+// If clear is true, newTemp emits code to zero the temporary.
+func (o *orderState) newTemp(t *types.Type, clear bool) *ir.Name {
+ var v *ir.Name
+ key := t.LinkString()
+ if a := o.free[key]; len(a) > 0 {
+ v = a[len(a)-1]
+ if !types.Identical(t, v.Type()) {
+ base.Fatalf("expected %L to have type %v", v, t)
+ }
+ o.free[key] = a[:len(a)-1]
+ } else {
+ v = typecheck.Temp(t)
+ }
+ if clear {
+ o.append(ir.NewAssignStmt(base.Pos, v, nil))
+ }
+
+ o.temp = append(o.temp, v)
+ return v
+}
+
+// copyExpr behaves like newTemp but also emits
+// code to initialize the temporary to the value n.
+func (o *orderState) copyExpr(n ir.Node) *ir.Name {
+ return o.copyExpr1(n, false)
+}
+
+// copyExprClear is like copyExpr but clears the temp before assignment.
+// It is provided for use when the evaluation of tmp = n turns into
+// a function call that is passed a pointer to the temporary as the output space.
+// If the call blocks before tmp has been written,
+// the garbage collector will still treat the temporary as live,
+// so we must zero it before entering that call.
+// Today, this only happens for channel receive operations.
+// (The other candidate would be map access, but map access
+// returns a pointer to the result data instead of taking a pointer
+// to be filled in.)
+func (o *orderState) copyExprClear(n ir.Node) *ir.Name {
+ return o.copyExpr1(n, true)
+}
+
+func (o *orderState) copyExpr1(n ir.Node, clear bool) *ir.Name {
+ t := n.Type()
+ v := o.newTemp(t, clear)
+ o.append(ir.NewAssignStmt(base.Pos, v, n))
+ return v
+}
+
+// cheapExpr returns a cheap version of n.
+// The definition of cheap is that n is a variable or constant.
+// If not, cheapExpr allocates a new tmp, emits tmp = n,
+// and then returns tmp.
+func (o *orderState) cheapExpr(n ir.Node) ir.Node {
+ if n == nil {
+ return nil
+ }
+
+ switch n.Op() {
+ case ir.ONAME, ir.OLITERAL, ir.ONIL:
+ return n
+ case ir.OLEN, ir.OCAP:
+ n := n.(*ir.UnaryExpr)
+ l := o.cheapExpr(n.X)
+ if l == n.X {
+ return n
+ }
+ a := ir.SepCopy(n).(*ir.UnaryExpr)
+ a.X = l
+ return typecheck.Expr(a)
+ }
+
+ return o.copyExpr(n)
+}
+
+// safeExpr returns a safe version of n.
+// The definition of safe is that n can appear multiple times
+// without violating the semantics of the original program,
+// and that assigning to the safe version has the same effect
+// as assigning to the original n.
+//
+// The intended use is to apply to x when rewriting x += y into x = x + y.
+func (o *orderState) safeExpr(n ir.Node) ir.Node {
+ switch n.Op() {
+ case ir.ONAME, ir.OLITERAL, ir.ONIL:
+ return n
+
+ case ir.OLEN, ir.OCAP:
+ n := n.(*ir.UnaryExpr)
+ l := o.safeExpr(n.X)
+ if l == n.X {
+ return n
+ }
+ a := ir.SepCopy(n).(*ir.UnaryExpr)
+ a.X = l
+ return typecheck.Expr(a)
+
+ case ir.ODOT:
+ n := n.(*ir.SelectorExpr)
+ l := o.safeExpr(n.X)
+ if l == n.X {
+ return n
+ }
+ a := ir.SepCopy(n).(*ir.SelectorExpr)
+ a.X = l
+ return typecheck.Expr(a)
+
+ case ir.ODOTPTR:
+ n := n.(*ir.SelectorExpr)
+ l := o.cheapExpr(n.X)
+ if l == n.X {
+ return n
+ }
+ a := ir.SepCopy(n).(*ir.SelectorExpr)
+ a.X = l
+ return typecheck.Expr(a)
+
+ case ir.ODEREF:
+ n := n.(*ir.StarExpr)
+ l := o.cheapExpr(n.X)
+ if l == n.X {
+ return n
+ }
+ a := ir.SepCopy(n).(*ir.StarExpr)
+ a.X = l
+ return typecheck.Expr(a)
+
+ case ir.OINDEX, ir.OINDEXMAP:
+ n := n.(*ir.IndexExpr)
+ var l ir.Node
+ if n.X.Type().IsArray() {
+ l = o.safeExpr(n.X)
+ } else {
+ l = o.cheapExpr(n.X)
+ }
+ r := o.cheapExpr(n.Index)
+ if l == n.X && r == n.Index {
+ return n
+ }
+ a := ir.SepCopy(n).(*ir.IndexExpr)
+ a.X = l
+ a.Index = r
+ return typecheck.Expr(a)
+
+ default:
+ base.Fatalf("order.safeExpr %v", n.Op())
+ return nil // not reached
+ }
+}
+
+// isaddrokay reports whether it is okay to pass n's address to runtime routines.
+// Taking the address of a variable makes the liveness and optimization analyses
+// lose track of where the variable's lifetime ends. To avoid hurting the analyses
+// of ordinary stack variables, those are not 'isaddrokay'. Temporaries are okay,
+// because we emit explicit VARKILL instructions marking the end of those
+// temporaries' lifetimes.
+func isaddrokay(n ir.Node) bool {
+ return ir.IsAddressable(n) && (n.Op() != ir.ONAME || n.(*ir.Name).Class == ir.PEXTERN || ir.IsAutoTmp(n))
+}
+
+// addrTemp ensures that n is okay to pass by address to runtime routines.
+// If the original argument n is not okay, addrTemp creates a tmp, emits
+// tmp = n, and then returns tmp.
+// The result of addrTemp MUST be assigned back to n, e.g.
+// n.Left = o.addrTemp(n.Left)
+func (o *orderState) addrTemp(n ir.Node) ir.Node {
+ if n.Op() == ir.OLITERAL || n.Op() == ir.ONIL {
+ // TODO: expand this to all static composite literal nodes?
+ n = typecheck.DefaultLit(n, nil)
+ types.CalcSize(n.Type())
+ vstat := readonlystaticname(n.Type())
+ var s staticinit.Schedule
+ s.StaticAssign(vstat, 0, n, n.Type())
+ if s.Out != nil {
+ base.Fatalf("staticassign of const generated code: %+v", n)
+ }
+ vstat = typecheck.Expr(vstat).(*ir.Name)
+ return vstat
+ }
+ if isaddrokay(n) {
+ return n
+ }
+ return o.copyExpr(n)
+}
+
+// mapKeyTemp prepares n to be a key in a map runtime call and returns n.
+// It should only be used for map runtime calls which have *_fast* versions.
+func (o *orderState) mapKeyTemp(t *types.Type, n ir.Node) ir.Node {
+ // Most map calls need to take the address of the key.
+ // Exception: map*_fast* calls. See golang.org/issue/19015.
+ alg := mapfast(t)
+ if alg == mapslow {
+ return o.addrTemp(n)
+ }
+ var kt *types.Type
+ switch alg {
+ case mapfast32:
+ kt = types.Types[types.TUINT32]
+ case mapfast64:
+ kt = types.Types[types.TUINT64]
+ case mapfast32ptr, mapfast64ptr:
+ kt = types.Types[types.TUNSAFEPTR]
+ case mapfaststr:
+ kt = types.Types[types.TSTRING]
+ }
+ nt := n.Type()
+ switch {
+ case nt == kt:
+ return n
+ case nt.Kind() == kt.Kind(), nt.IsPtrShaped() && kt.IsPtrShaped():
+ // can directly convert (e.g. named type to underlying type, or one pointer to another)
+ return typecheck.Expr(ir.NewConvExpr(n.Pos(), ir.OCONVNOP, kt, n))
+ case nt.IsInteger() && kt.IsInteger():
+ // can directly convert (e.g. int32 to uint32)
+ if n.Op() == ir.OLITERAL && nt.IsSigned() {
+ // avoid constant overflow error
+ n = ir.NewConstExpr(constant.MakeUint64(uint64(ir.Int64Val(n))), n)
+ n.SetType(kt)
+ return n
+ }
+ return typecheck.Expr(ir.NewConvExpr(n.Pos(), ir.OCONV, kt, n))
+ default:
+ // Unsafe cast through memory.
+ // We'll need to do a load with type kt. Create a temporary of type kt to
+ // ensure sufficient alignment. nt may be under-aligned.
+ if uint8(kt.Alignment()) < uint8(nt.Alignment()) {
+ base.Fatalf("mapKeyTemp: key type is not sufficiently aligned, kt=%v nt=%v", kt, nt)
+ }
+ tmp := o.newTemp(kt, true)
+ // *(*nt)(&tmp) = n
+ var e ir.Node = typecheck.NodAddr(tmp)
+ e = ir.NewConvExpr(n.Pos(), ir.OCONVNOP, nt.PtrTo(), e)
+ e = ir.NewStarExpr(n.Pos(), e)
+ o.append(ir.NewAssignStmt(base.Pos, e, n))
+ return tmp
+ }
+}
+
+// mapKeyReplaceStrConv replaces OBYTES2STR by OBYTES2STRTMP
+// in n to avoid string allocations for keys in map lookups.
+// Returns a bool that signals if a modification was made.
+//
+// For:
+// x = m[string(k)]
+// x = m[T1{... Tn{..., string(k), ...}]
+// where k is []byte, T1 to Tn is a nesting of struct and array literals,
+// the allocation of backing bytes for the string can be avoided
+// by reusing the []byte backing array. These are special cases
+// for avoiding allocations when converting byte slices to strings.
+// It would be nice to handle these generally, but because
+// []byte keys are not allowed in maps, the use of string(k)
+// comes up in important cases in practice. See issue 3512.
+func mapKeyReplaceStrConv(n ir.Node) bool {
+ var replaced bool
+ switch n.Op() {
+ case ir.OBYTES2STR:
+ n := n.(*ir.ConvExpr)
+ n.SetOp(ir.OBYTES2STRTMP)
+ replaced = true
+ case ir.OSTRUCTLIT:
+ n := n.(*ir.CompLitExpr)
+ for _, elem := range n.List {
+ elem := elem.(*ir.StructKeyExpr)
+ if mapKeyReplaceStrConv(elem.Value) {
+ replaced = true
+ }
+ }
+ case ir.OARRAYLIT:
+ n := n.(*ir.CompLitExpr)
+ for _, elem := range n.List {
+ if elem.Op() == ir.OKEY {
+ elem = elem.(*ir.KeyExpr).Value
+ }
+ if mapKeyReplaceStrConv(elem) {
+ replaced = true
+ }
+ }
+ }
+ return replaced
+}
+
+type ordermarker int
+
+// markTemp returns the top of the temporary variable stack.
+func (o *orderState) markTemp() ordermarker {
+ return ordermarker(len(o.temp))
+}
+
+// popTemp pops temporaries off the stack until reaching the mark,
+// which must have been returned by markTemp.
+func (o *orderState) popTemp(mark ordermarker) {
+ for _, n := range o.temp[mark:] {
+ key := n.Type().LinkString()
+ o.free[key] = append(o.free[key], n)
+ }
+ o.temp = o.temp[:mark]
+}
+
+// cleanTempNoPop emits VARKILL instructions to *out
+// for each temporary above the mark on the temporary stack.
+// It does not pop the temporaries from the stack.
+func (o *orderState) cleanTempNoPop(mark ordermarker) []ir.Node {
+ var out []ir.Node
+ for i := len(o.temp) - 1; i >= int(mark); i-- {
+ n := o.temp[i]
+ out = append(out, typecheck.Stmt(ir.NewUnaryExpr(base.Pos, ir.OVARKILL, n)))
+ }
+ return out
+}
+
+// cleanTemp emits VARKILL instructions for each temporary above the
+// mark on the temporary stack and removes them from the stack.
+func (o *orderState) cleanTemp(top ordermarker) {
+ o.out = append(o.out, o.cleanTempNoPop(top)...)
+ o.popTemp(top)
+}
+
+// stmtList orders each of the statements in the list.
+func (o *orderState) stmtList(l ir.Nodes) {
+ s := l
+ for i := range s {
+ orderMakeSliceCopy(s[i:])
+ o.stmt(s[i])
+ }
+}
+
+// orderMakeSliceCopy matches the pattern:
+// m = OMAKESLICE([]T, x); OCOPY(m, s)
+// and rewrites it to:
+// m = OMAKESLICECOPY([]T, x, s); nil
+func orderMakeSliceCopy(s []ir.Node) {
+ if base.Flag.N != 0 || base.Flag.Cfg.Instrumenting {
+ return
+ }
+ if len(s) < 2 || s[0] == nil || s[0].Op() != ir.OAS || s[1] == nil || s[1].Op() != ir.OCOPY {
+ return
+ }
+
+ as := s[0].(*ir.AssignStmt)
+ cp := s[1].(*ir.BinaryExpr)
+ if as.Y == nil || as.Y.Op() != ir.OMAKESLICE || ir.IsBlank(as.X) ||
+ as.X.Op() != ir.ONAME || cp.X.Op() != ir.ONAME || cp.Y.Op() != ir.ONAME ||
+ as.X.Name() != cp.X.Name() || cp.X.Name() == cp.Y.Name() {
+ // The line above this one is correct with the differing equality operators:
+ // we want as.X and cp.X to be the same name,
+ // but we want the initial data to be coming from a different name.
+ return
+ }
+
+ mk := as.Y.(*ir.MakeExpr)
+ if mk.Esc() == ir.EscNone || mk.Len == nil || mk.Cap != nil {
+ return
+ }
+ mk.SetOp(ir.OMAKESLICECOPY)
+ mk.Cap = cp.Y
+ // Set bounded when m = OMAKESLICE([]T, len(s)); OCOPY(m, s)
+ mk.SetBounded(mk.Len.Op() == ir.OLEN && ir.SameSafeExpr(mk.Len.(*ir.UnaryExpr).X, cp.Y))
+ as.Y = typecheck.Expr(mk)
+ s[1] = nil // remove separate copy call
+}
+
+// edge inserts coverage instrumentation for libfuzzer.
+func (o *orderState) edge() {
+ if base.Debug.Libfuzzer == 0 {
+ return
+ }
+
+ // Create a new uint8 counter to be allocated in section
+ // __libfuzzer_extra_counters.
+ counter := staticinit.StaticName(types.Types[types.TUINT8])
+ counter.SetLibfuzzerExtraCounter(true)
+ // As well as setting SetLibfuzzerExtraCounter, we preemptively set the
+ // symbol type to SLIBFUZZER_EXTRA_COUNTER so that the race detector
+ // instrumentation pass (which does not have access to the flags set by
+ // SetLibfuzzerExtraCounter) knows to ignore them. This information is
+ // lost by the time it reaches the compile step, so SetLibfuzzerExtraCounter
+ // is still necessary.
+ counter.Linksym().Type = objabi.SLIBFUZZER_EXTRA_COUNTER
+
+ // counter += 1
+ incr := ir.NewAssignOpStmt(base.Pos, ir.OADD, counter, ir.NewInt(1))
+ o.append(incr)
+}
+
+// orderBlock orders the block of statements in n into a new slice,
+// and then replaces the old slice in n with the new slice.
+// free is a map that can be used to obtain temporary variables by type.
+func orderBlock(n *ir.Nodes, free map[string][]*ir.Name) {
+ var order orderState
+ order.free = free
+ mark := order.markTemp()
+ order.edge()
+ order.stmtList(*n)
+ order.cleanTemp(mark)
+ *n = order.out
+}
+
+// exprInPlace orders the side effects in *np and
+// leaves them as the init list of the final *np.
+// The result of exprInPlace MUST be assigned back to n, e.g.
+// n.Left = o.exprInPlace(n.Left)
+func (o *orderState) exprInPlace(n ir.Node) ir.Node {
+ var order orderState
+ order.free = o.free
+ n = order.expr(n, nil)
+ n = ir.InitExpr(order.out, n)
+
+ // insert new temporaries from order
+ // at head of outer list.
+ o.temp = append(o.temp, order.temp...)
+ return n
+}
+
+// orderStmtInPlace orders the side effects of the single statement *np
+// and replaces it with the resulting statement list.
+// The result of orderStmtInPlace MUST be assigned back to n, e.g.
+// n.Left = orderStmtInPlace(n.Left)
+// free is a map that can be used to obtain temporary variables by type.
+func orderStmtInPlace(n ir.Node, free map[string][]*ir.Name) ir.Node {
+ var order orderState
+ order.free = free
+ mark := order.markTemp()
+ order.stmt(n)
+ order.cleanTemp(mark)
+ return ir.NewBlockStmt(src.NoXPos, order.out)
+}
+
+// init moves n's init list to o.out.
+func (o *orderState) init(n ir.Node) {
+ if ir.MayBeShared(n) {
+ // For concurrency safety, don't mutate potentially shared nodes.
+ // First, ensure that no work is required here.
+ if len(n.Init()) > 0 {
+ base.Fatalf("order.init shared node with ninit")
+ }
+ return
+ }
+ o.stmtList(ir.TakeInit(n))
+}
+
+// call orders the call expression n.
+// n.Op is OCALLFUNC/OCALLINTER or a builtin like OCOPY.
+func (o *orderState) call(nn ir.Node) {
+ if len(nn.Init()) > 0 {
+ // Caller should have already called o.init(nn).
+ base.Fatalf("%v with unexpected ninit", nn.Op())
+ }
+ if nn.Op() == ir.OCALLMETH {
+ base.FatalfAt(nn.Pos(), "OCALLMETH missed by typecheck")
+ }
+
+ // Builtin functions.
+ if nn.Op() != ir.OCALLFUNC && nn.Op() != ir.OCALLINTER {
+ switch n := nn.(type) {
+ default:
+ base.Fatalf("unexpected call: %+v", n)
+ case *ir.UnaryExpr:
+ n.X = o.expr(n.X, nil)
+ case *ir.ConvExpr:
+ n.X = o.expr(n.X, nil)
+ case *ir.BinaryExpr:
+ n.X = o.expr(n.X, nil)
+ n.Y = o.expr(n.Y, nil)
+ case *ir.MakeExpr:
+ n.Len = o.expr(n.Len, nil)
+ n.Cap = o.expr(n.Cap, nil)
+ case *ir.CallExpr:
+ o.exprList(n.Args)
+ }
+ return
+ }
+
+ n := nn.(*ir.CallExpr)
+ typecheck.FixVariadicCall(n)
+
+ if isFuncPCIntrinsic(n) && isIfaceOfFunc(n.Args[0]) {
+ // For internal/abi.FuncPCABIxxx(fn), if fn is a defined function,
+ // do not introduce temporaries here, so it is easier to rewrite it
+ // to symbol address reference later in walk.
+ return
+ }
+
+ n.X = o.expr(n.X, nil)
+ o.exprList(n.Args)
+}
+
+// mapAssign appends n to o.out.
+func (o *orderState) mapAssign(n ir.Node) {
+ switch n.Op() {
+ default:
+ base.Fatalf("order.mapAssign %v", n.Op())
+
+ case ir.OAS:
+ n := n.(*ir.AssignStmt)
+ if n.X.Op() == ir.OINDEXMAP {
+ n.Y = o.safeMapRHS(n.Y)
+ }
+ o.out = append(o.out, n)
+ case ir.OASOP:
+ n := n.(*ir.AssignOpStmt)
+ if n.X.Op() == ir.OINDEXMAP {
+ n.Y = o.safeMapRHS(n.Y)
+ }
+ o.out = append(o.out, n)
+ }
+}
+
+func (o *orderState) safeMapRHS(r ir.Node) ir.Node {
+ // Make sure we evaluate the RHS before starting the map insert.
+ // We need to make sure the RHS won't panic. See issue 22881.
+ if r.Op() == ir.OAPPEND {
+ r := r.(*ir.CallExpr)
+ s := r.Args[1:]
+ for i, n := range s {
+ s[i] = o.cheapExpr(n)
+ }
+ return r
+ }
+ return o.cheapExpr(r)
+}
+
+// stmt orders the statement n, appending to o.out.
+// Temporaries created during the statement are cleaned
+// up using VARKILL instructions as possible.
+func (o *orderState) stmt(n ir.Node) {
+ if n == nil {
+ return
+ }
+
+ lno := ir.SetPos(n)
+ o.init(n)
+
+ switch n.Op() {
+ default:
+ base.Fatalf("order.stmt %v", n.Op())
+
+ case ir.OVARKILL, ir.OVARLIVE, ir.OINLMARK:
+ o.out = append(o.out, n)
+
+ case ir.OAS:
+ n := n.(*ir.AssignStmt)
+ t := o.markTemp()
+ n.X = o.expr(n.X, nil)
+ n.Y = o.expr(n.Y, n.X)
+ o.mapAssign(n)
+ o.cleanTemp(t)
+
+ case ir.OASOP:
+ n := n.(*ir.AssignOpStmt)
+ t := o.markTemp()
+ n.X = o.expr(n.X, nil)
+ n.Y = o.expr(n.Y, nil)
+
+ if base.Flag.Cfg.Instrumenting || n.X.Op() == ir.OINDEXMAP && (n.AsOp == ir.ODIV || n.AsOp == ir.OMOD) {
+ // Rewrite m[k] op= r into m[k] = m[k] op r so
+ // that we can ensure that if op panics
+ // because r is zero, the panic happens before
+ // the map assignment.
+ // DeepCopy is a big hammer here, but safeExpr
+ // makes sure there is nothing too deep being copied.
+ l1 := o.safeExpr(n.X)
+ l2 := ir.DeepCopy(src.NoXPos, l1)
+ if l2.Op() == ir.OINDEXMAP {
+ l2 := l2.(*ir.IndexExpr)
+ l2.Assigned = false
+ }
+ l2 = o.copyExpr(l2)
+ r := o.expr(typecheck.Expr(ir.NewBinaryExpr(n.Pos(), n.AsOp, l2, n.Y)), nil)
+ as := typecheck.Stmt(ir.NewAssignStmt(n.Pos(), l1, r))
+ o.mapAssign(as)
+ o.cleanTemp(t)
+ return
+ }
+
+ o.mapAssign(n)
+ o.cleanTemp(t)
+
+ case ir.OAS2:
+ n := n.(*ir.AssignListStmt)
+ t := o.markTemp()
+ o.exprList(n.Lhs)
+ o.exprList(n.Rhs)
+ o.out = append(o.out, n)
+ o.cleanTemp(t)
+
+ // Special: avoid copy of func call n.Right
+ case ir.OAS2FUNC:
+ n := n.(*ir.AssignListStmt)
+ t := o.markTemp()
+ o.exprList(n.Lhs)
+ call := n.Rhs[0]
+ o.init(call)
+ if ic, ok := call.(*ir.InlinedCallExpr); ok {
+ o.stmtList(ic.Body)
+
+ n.SetOp(ir.OAS2)
+ n.Rhs = ic.ReturnVars
+
+ o.exprList(n.Rhs)
+ o.out = append(o.out, n)
+ } else {
+ o.call(call)
+ o.as2func(n)
+ }
+ o.cleanTemp(t)
+
+ // Special: use temporary variables to hold result,
+ // so that runtime can take address of temporary.
+ // No temporary for blank assignment.
+ //
+ // OAS2MAPR: make sure key is addressable if needed,
+ // and make sure OINDEXMAP is not copied out.
+ case ir.OAS2DOTTYPE, ir.OAS2RECV, ir.OAS2MAPR:
+ n := n.(*ir.AssignListStmt)
+ t := o.markTemp()
+ o.exprList(n.Lhs)
+
+ switch r := n.Rhs[0]; r.Op() {
+ case ir.ODOTTYPE2:
+ r := r.(*ir.TypeAssertExpr)
+ r.X = o.expr(r.X, nil)
+ case ir.ODYNAMICDOTTYPE2:
+ r := r.(*ir.DynamicTypeAssertExpr)
+ r.X = o.expr(r.X, nil)
+ r.T = o.expr(r.T, nil)
+ case ir.ORECV:
+ r := r.(*ir.UnaryExpr)
+ r.X = o.expr(r.X, nil)
+ case ir.OINDEXMAP:
+ r := r.(*ir.IndexExpr)
+ r.X = o.expr(r.X, nil)
+ r.Index = o.expr(r.Index, nil)
+ // See similar conversion for OINDEXMAP below.
+ _ = mapKeyReplaceStrConv(r.Index)
+ r.Index = o.mapKeyTemp(r.X.Type(), r.Index)
+ default:
+ base.Fatalf("order.stmt: %v", r.Op())
+ }
+
+ o.as2ok(n)
+ o.cleanTemp(t)
+
+ // Special: does not save n onto out.
+ case ir.OBLOCK:
+ n := n.(*ir.BlockStmt)
+ o.stmtList(n.List)
+
+ // Special: n->left is not an expression; save as is.
+ case ir.OBREAK,
+ ir.OCONTINUE,
+ ir.ODCL,
+ ir.ODCLCONST,
+ ir.ODCLTYPE,
+ ir.OFALL,
+ ir.OGOTO,
+ ir.OLABEL,
+ ir.OTAILCALL:
+ o.out = append(o.out, n)
+
+ // Special: handle call arguments.
+ case ir.OCALLFUNC, ir.OCALLINTER:
+ n := n.(*ir.CallExpr)
+ t := o.markTemp()
+ o.call(n)
+ o.out = append(o.out, n)
+ o.cleanTemp(t)
+
+ case ir.OINLCALL:
+ n := n.(*ir.InlinedCallExpr)
+ o.stmtList(n.Body)
+
+ // discard results; double-check for no side effects
+ for _, result := range n.ReturnVars {
+ if staticinit.AnySideEffects(result) {
+ base.FatalfAt(result.Pos(), "inlined call result has side effects: %v", result)
+ }
+ }
+
+ case ir.OCHECKNIL, ir.OCLOSE, ir.OPANIC, ir.ORECV:
+ n := n.(*ir.UnaryExpr)
+ t := o.markTemp()
+ n.X = o.expr(n.X, nil)
+ o.out = append(o.out, n)
+ o.cleanTemp(t)
+
+ case ir.OCOPY:
+ n := n.(*ir.BinaryExpr)
+ t := o.markTemp()
+ n.X = o.expr(n.X, nil)
+ n.Y = o.expr(n.Y, nil)
+ o.out = append(o.out, n)
+ o.cleanTemp(t)
+
+ case ir.OPRINT, ir.OPRINTN, ir.ORECOVERFP:
+ n := n.(*ir.CallExpr)
+ t := o.markTemp()
+ o.call(n)
+ o.out = append(o.out, n)
+ o.cleanTemp(t)
+
+ // Special: order arguments to inner call but not call itself.
+ case ir.ODEFER, ir.OGO:
+ n := n.(*ir.GoDeferStmt)
+ t := o.markTemp()
+ o.init(n.Call)
+ o.call(n.Call)
+ o.out = append(o.out, n)
+ o.cleanTemp(t)
+
+ case ir.ODELETE:
+ n := n.(*ir.CallExpr)
+ t := o.markTemp()
+ n.Args[0] = o.expr(n.Args[0], nil)
+ n.Args[1] = o.expr(n.Args[1], nil)
+ n.Args[1] = o.mapKeyTemp(n.Args[0].Type(), n.Args[1])
+ o.out = append(o.out, n)
+ o.cleanTemp(t)
+
+ // Clean temporaries from condition evaluation at
+ // beginning of loop body and after for statement.
+ case ir.OFOR:
+ n := n.(*ir.ForStmt)
+ t := o.markTemp()
+ n.Cond = o.exprInPlace(n.Cond)
+ n.Body.Prepend(o.cleanTempNoPop(t)...)
+ orderBlock(&n.Body, o.free)
+ n.Post = orderStmtInPlace(n.Post, o.free)
+ o.out = append(o.out, n)
+ o.cleanTemp(t)
+
+ // Clean temporaries from condition at
+ // beginning of both branches.
+ case ir.OIF:
+ n := n.(*ir.IfStmt)
+ t := o.markTemp()
+ n.Cond = o.exprInPlace(n.Cond)
+ n.Body.Prepend(o.cleanTempNoPop(t)...)
+ n.Else.Prepend(o.cleanTempNoPop(t)...)
+ o.popTemp(t)
+ orderBlock(&n.Body, o.free)
+ orderBlock(&n.Else, o.free)
+ o.out = append(o.out, n)
+
+ case ir.ORANGE:
+ // n.Right is the expression being ranged over.
+ // order it, and then make a copy if we need one.
+ // We almost always do, to ensure that we don't
+ // see any value changes made during the loop.
+ // Usually the copy is cheap (e.g., array pointer,
+ // chan, slice, string are all tiny).
+ // The exception is ranging over an array value
+ // (not a slice, not a pointer to array),
+ // which must make a copy to avoid seeing updates made during
+ // the range body. Ranging over an array value is uncommon though.
+
+ // Mark []byte(str) range expression to reuse string backing storage.
+ // It is safe because the storage cannot be mutated.
+ n := n.(*ir.RangeStmt)
+ if n.X.Op() == ir.OSTR2BYTES {
+ n.X.(*ir.ConvExpr).SetOp(ir.OSTR2BYTESTMP)
+ }
+
+ t := o.markTemp()
+ n.X = o.expr(n.X, nil)
+
+ orderBody := true
+ xt := typecheck.RangeExprType(n.X.Type())
+ switch xt.Kind() {
+ default:
+ base.Fatalf("order.stmt range %v", n.Type())
+
+ case types.TARRAY, types.TSLICE:
+ if n.Value == nil || ir.IsBlank(n.Value) {
+ // for i := range x will only use x once, to compute len(x).
+ // No need to copy it.
+ break
+ }
+ fallthrough
+
+ case types.TCHAN, types.TSTRING:
+ // chan, string, slice, array ranges use value multiple times.
+ // make copy.
+ r := n.X
+
+ if r.Type().IsString() && r.Type() != types.Types[types.TSTRING] {
+ r = ir.NewConvExpr(base.Pos, ir.OCONV, nil, r)
+ r.SetType(types.Types[types.TSTRING])
+ r = typecheck.Expr(r)
+ }
+
+ n.X = o.copyExpr(r)
+
+ case types.TMAP:
+ if isMapClear(n) {
+ // Preserve the body of the map clear pattern so it can
+ // be detected during walk. The loop body will not be used
+ // when optimizing away the range loop to a runtime call.
+ orderBody = false
+ break
+ }
+
+ // copy the map value in case it is a map literal.
+ // TODO(rsc): Make tmp = literal expressions reuse tmp.
+ // For maps tmp is just one word so it hardly matters.
+ r := n.X
+ n.X = o.copyExpr(r)
+
+ // n.Prealloc is the temp for the iterator.
+ // MapIterType contains pointers and needs to be zeroed.
+ n.Prealloc = o.newTemp(reflectdata.MapIterType(xt), true)
+ }
+ n.Key = o.exprInPlace(n.Key)
+ n.Value = o.exprInPlace(n.Value)
+ if orderBody {
+ orderBlock(&n.Body, o.free)
+ }
+ o.out = append(o.out, n)
+ o.cleanTemp(t)
+
+ case ir.ORETURN:
+ n := n.(*ir.ReturnStmt)
+ o.exprList(n.Results)
+ o.out = append(o.out, n)
+
+ // Special: clean case temporaries in each block entry.
+ // Select must enter one of its blocks, so there is no
+ // need for a cleaning at the end.
+ // Doubly special: evaluation order for select is stricter
+ // than ordinary expressions. Even something like p.c
+ // has to be hoisted into a temporary, so that it cannot be
+ // reordered after the channel evaluation for a different
+ // case (if p were nil, then the timing of the fault would
+ // give this away).
+ case ir.OSELECT:
+ n := n.(*ir.SelectStmt)
+ t := o.markTemp()
+ for _, ncas := range n.Cases {
+ r := ncas.Comm
+ ir.SetPos(ncas)
+
+ // Append any new body prologue to ninit.
+ // The next loop will insert ninit into nbody.
+ if len(ncas.Init()) != 0 {
+ base.Fatalf("order select ninit")
+ }
+ if r == nil {
+ continue
+ }
+ switch r.Op() {
+ default:
+ ir.Dump("select case", r)
+ base.Fatalf("unknown op in select %v", r.Op())
+
+ case ir.OSELRECV2:
+ // case x, ok = <-c
+ r := r.(*ir.AssignListStmt)
+ recv := r.Rhs[0].(*ir.UnaryExpr)
+ recv.X = o.expr(recv.X, nil)
+ if !ir.IsAutoTmp(recv.X) {
+ recv.X = o.copyExpr(recv.X)
+ }
+ init := ir.TakeInit(r)
+
+ colas := r.Def
+ do := func(i int, t *types.Type) {
+ n := r.Lhs[i]
+ if ir.IsBlank(n) {
+ return
+ }
+ // If this is case x := <-ch or case x, y := <-ch, the case has
+ // the ODCL nodes to declare x and y. We want to delay that
+ // declaration (and possible allocation) until inside the case body.
+ // Delete the ODCL nodes here and recreate them inside the body below.
+ if colas {
+ if len(init) > 0 && init[0].Op() == ir.ODCL && init[0].(*ir.Decl).X == n {
+ init = init[1:]
+
+ // iimport may have added a default initialization assignment,
+ // due to how it handles ODCL statements.
+ if len(init) > 0 && init[0].Op() == ir.OAS && init[0].(*ir.AssignStmt).X == n {
+ init = init[1:]
+ }
+ }
+ dcl := typecheck.Stmt(ir.NewDecl(base.Pos, ir.ODCL, n.(*ir.Name)))
+ ncas.PtrInit().Append(dcl)
+ }
+ tmp := o.newTemp(t, t.HasPointers())
+ as := typecheck.Stmt(ir.NewAssignStmt(base.Pos, n, typecheck.Conv(tmp, n.Type())))
+ ncas.PtrInit().Append(as)
+ r.Lhs[i] = tmp
+ }
+ do(0, recv.X.Type().Elem())
+ do(1, types.Types[types.TBOOL])
+ if len(init) != 0 {
+ ir.DumpList("ninit", r.Init())
+ base.Fatalf("ninit on select recv")
+ }
+ orderBlock(ncas.PtrInit(), o.free)
+
+ case ir.OSEND:
+ r := r.(*ir.SendStmt)
+ if len(r.Init()) != 0 {
+ ir.DumpList("ninit", r.Init())
+ base.Fatalf("ninit on select send")
+ }
+
+ // case c <- x
+ // r->left is c, r->right is x, both are always evaluated.
+ r.Chan = o.expr(r.Chan, nil)
+
+ if !ir.IsAutoTmp(r.Chan) {
+ r.Chan = o.copyExpr(r.Chan)
+ }
+ r.Value = o.expr(r.Value, nil)
+ if !ir.IsAutoTmp(r.Value) {
+ r.Value = o.copyExpr(r.Value)
+ }
+ }
+ }
+ // Now that we have accumulated all the temporaries, clean them.
+ // Also insert any ninit queued during the previous loop.
+ // (The temporary cleaning must follow that ninit work.)
+ for _, cas := range n.Cases {
+ orderBlock(&cas.Body, o.free)
+ cas.Body.Prepend(o.cleanTempNoPop(t)...)
+
+ // TODO(mdempsky): Is this actually necessary?
+ // walkSelect appears to walk Ninit.
+ cas.Body.Prepend(ir.TakeInit(cas)...)
+ }
+
+ o.out = append(o.out, n)
+ o.popTemp(t)
+
+ // Special: value being sent is passed as a pointer; make it addressable.
+ case ir.OSEND:
+ n := n.(*ir.SendStmt)
+ t := o.markTemp()
+ n.Chan = o.expr(n.Chan, nil)
+ n.Value = o.expr(n.Value, nil)
+ if base.Flag.Cfg.Instrumenting {
+ // Force copying to the stack so that (chan T)(nil) <- x
+ // is still instrumented as a read of x.
+ n.Value = o.copyExpr(n.Value)
+ } else {
+ n.Value = o.addrTemp(n.Value)
+ }
+ o.out = append(o.out, n)
+ o.cleanTemp(t)
+
+ // TODO(rsc): Clean temporaries more aggressively.
+ // Note that because walkSwitch will rewrite some of the
+ // switch into a binary search, this is not as easy as it looks.
+ // (If we ran that code here we could invoke order.stmt on
+ // the if-else chain instead.)
+ // For now just clean all the temporaries at the end.
+ // In practice that's fine.
+ case ir.OSWITCH:
+ n := n.(*ir.SwitchStmt)
+ if base.Debug.Libfuzzer != 0 && !hasDefaultCase(n) {
+ // Add empty "default:" case for instrumentation.
+ n.Cases = append(n.Cases, ir.NewCaseStmt(base.Pos, nil, nil))
+ }
+
+ t := o.markTemp()
+ n.Tag = o.expr(n.Tag, nil)
+ for _, ncas := range n.Cases {
+ o.exprListInPlace(ncas.List)
+ orderBlock(&ncas.Body, o.free)
+ }
+
+ o.out = append(o.out, n)
+ o.cleanTemp(t)
+ }
+
+ base.Pos = lno
+}
+
+func hasDefaultCase(n *ir.SwitchStmt) bool {
+ for _, ncas := range n.Cases {
+ if len(ncas.List) == 0 {
+ return true
+ }
+ }
+ return false
+}
+
+// exprList orders the expression list l into o.
+func (o *orderState) exprList(l ir.Nodes) {
+ s := l
+ for i := range s {
+ s[i] = o.expr(s[i], nil)
+ }
+}
+
+// exprListInPlace orders the expression list l but saves
+// the side effects on the individual expression ninit lists.
+func (o *orderState) exprListInPlace(l ir.Nodes) {
+ s := l
+ for i := range s {
+ s[i] = o.exprInPlace(s[i])
+ }
+}
+
+func (o *orderState) exprNoLHS(n ir.Node) ir.Node {
+ return o.expr(n, nil)
+}
+
+// expr orders a single expression, appending side
+// effects to o.out as needed.
+// If this is part of an assignment lhs = *np, lhs is given.
+// Otherwise lhs == nil. (When lhs != nil it may be possible
+// to avoid copying the result of the expression to a temporary.)
+// The result of expr MUST be assigned back to n, e.g.
+// n.Left = o.expr(n.Left, lhs)
+func (o *orderState) expr(n, lhs ir.Node) ir.Node {
+ if n == nil {
+ return n
+ }
+ lno := ir.SetPos(n)
+ n = o.expr1(n, lhs)
+ base.Pos = lno
+ return n
+}
+
+func (o *orderState) expr1(n, lhs ir.Node) ir.Node {
+ o.init(n)
+
+ switch n.Op() {
+ default:
+ if o.edit == nil {
+ o.edit = o.exprNoLHS // create closure once
+ }
+ ir.EditChildren(n, o.edit)
+ return n
+
+ // Addition of strings turns into a function call.
+ // Allocate a temporary to hold the strings.
+ // Fewer than 5 strings use direct runtime helpers.
+ case ir.OADDSTR:
+ n := n.(*ir.AddStringExpr)
+ o.exprList(n.List)
+
+ if len(n.List) > 5 {
+ t := types.NewArray(types.Types[types.TSTRING], int64(len(n.List)))
+ n.Prealloc = o.newTemp(t, false)
+ }
+
+ // Mark string(byteSlice) arguments to reuse byteSlice backing
+ // buffer during conversion. String concatenation does not
+ // memorize the strings for later use, so it is safe.
+ // However, we can do it only if there is at least one non-empty string literal.
+ // Otherwise if all other arguments are empty strings,
+ // concatstrings will return the reference to the temp string
+ // to the caller.
+ hasbyte := false
+
+ haslit := false
+ for _, n1 := range n.List {
+ hasbyte = hasbyte || n1.Op() == ir.OBYTES2STR
+ haslit = haslit || n1.Op() == ir.OLITERAL && len(ir.StringVal(n1)) != 0
+ }
+
+ if haslit && hasbyte {
+ for _, n2 := range n.List {
+ if n2.Op() == ir.OBYTES2STR {
+ n2 := n2.(*ir.ConvExpr)
+ n2.SetOp(ir.OBYTES2STRTMP)
+ }
+ }
+ }
+ return n
+
+ case ir.OINDEXMAP:
+ n := n.(*ir.IndexExpr)
+ n.X = o.expr(n.X, nil)
+ n.Index = o.expr(n.Index, nil)
+ needCopy := false
+
+ if !n.Assigned {
+ // Enforce that any []byte slices we are not copying
+ // can not be changed before the map index by forcing
+ // the map index to happen immediately following the
+ // conversions. See copyExpr a few lines below.
+ needCopy = mapKeyReplaceStrConv(n.Index)
+
+ if base.Flag.Cfg.Instrumenting {
+ // Race detector needs the copy.
+ needCopy = true
+ }
+ }
+
+ // key must be addressable
+ n.Index = o.mapKeyTemp(n.X.Type(), n.Index)
+ if needCopy {
+ return o.copyExpr(n)
+ }
+ return n
+
+ // concrete type (not interface) argument might need an addressable
+ // temporary to pass to the runtime conversion routine.
+ case ir.OCONVIFACE, ir.OCONVIDATA:
+ n := n.(*ir.ConvExpr)
+ n.X = o.expr(n.X, nil)
+ if n.X.Type().IsInterface() {
+ return n
+ }
+ if _, _, needsaddr := dataWordFuncName(n.X.Type()); needsaddr || isStaticCompositeLiteral(n.X) {
+ // Need a temp if we need to pass the address to the conversion function.
+ // We also process static composite literal node here, making a named static global
+ // whose address we can put directly in an interface (see OCONVIFACE/OCONVIDATA case in walk).
+ n.X = o.addrTemp(n.X)
+ }
+ return n
+
+ case ir.OCONVNOP:
+ n := n.(*ir.ConvExpr)
+ if n.X.Op() == ir.OCALLMETH {
+ base.FatalfAt(n.X.Pos(), "OCALLMETH missed by typecheck")
+ }
+ if n.Type().IsKind(types.TUNSAFEPTR) && n.X.Type().IsKind(types.TUINTPTR) && (n.X.Op() == ir.OCALLFUNC || n.X.Op() == ir.OCALLINTER) {
+ call := n.X.(*ir.CallExpr)
+ // When reordering unsafe.Pointer(f()) into a separate
+ // statement, the conversion and function call must stay
+ // together. See golang.org/issue/15329.
+ o.init(call)
+ o.call(call)
+ if lhs == nil || lhs.Op() != ir.ONAME || base.Flag.Cfg.Instrumenting {
+ return o.copyExpr(n)
+ }
+ } else {
+ n.X = o.expr(n.X, nil)
+ }
+ return n
+
+ case ir.OANDAND, ir.OOROR:
+ // ... = LHS && RHS
+ //
+ // var r bool
+ // r = LHS
+ // if r { // or !r, for OROR
+ // r = RHS
+ // }
+ // ... = r
+
+ n := n.(*ir.LogicalExpr)
+ r := o.newTemp(n.Type(), false)
+
+ // Evaluate left-hand side.
+ lhs := o.expr(n.X, nil)
+ o.out = append(o.out, typecheck.Stmt(ir.NewAssignStmt(base.Pos, r, lhs)))
+
+ // Evaluate right-hand side, save generated code.
+ saveout := o.out
+ o.out = nil
+ t := o.markTemp()
+ o.edge()
+ rhs := o.expr(n.Y, nil)
+ o.out = append(o.out, typecheck.Stmt(ir.NewAssignStmt(base.Pos, r, rhs)))
+ o.cleanTemp(t)
+ gen := o.out
+ o.out = saveout
+
+ // If left-hand side doesn't cause a short-circuit, issue right-hand side.
+ nif := ir.NewIfStmt(base.Pos, r, nil, nil)
+ if n.Op() == ir.OANDAND {
+ nif.Body = gen
+ } else {
+ nif.Else = gen
+ }
+ o.out = append(o.out, nif)
+ return r
+
+ case ir.OCALLMETH:
+ base.FatalfAt(n.Pos(), "OCALLMETH missed by typecheck")
+ panic("unreachable")
+
+ case ir.OCALLFUNC,
+ ir.OCALLINTER,
+ ir.OCAP,
+ ir.OCOMPLEX,
+ ir.OCOPY,
+ ir.OIMAG,
+ ir.OLEN,
+ ir.OMAKECHAN,
+ ir.OMAKEMAP,
+ ir.OMAKESLICE,
+ ir.OMAKESLICECOPY,
+ ir.ONEW,
+ ir.OREAL,
+ ir.ORECOVERFP,
+ ir.OSTR2BYTES,
+ ir.OSTR2BYTESTMP,
+ ir.OSTR2RUNES:
+
+ if isRuneCount(n) {
+ // len([]rune(s)) is rewritten to runtime.countrunes(s) later.
+ conv := n.(*ir.UnaryExpr).X.(*ir.ConvExpr)
+ conv.X = o.expr(conv.X, nil)
+ } else {
+ o.call(n)
+ }
+
+ if lhs == nil || lhs.Op() != ir.ONAME || base.Flag.Cfg.Instrumenting {
+ return o.copyExpr(n)
+ }
+ return n
+
+ case ir.OINLCALL:
+ n := n.(*ir.InlinedCallExpr)
+ o.stmtList(n.Body)
+ return n.SingleResult()
+
+ case ir.OAPPEND:
+ // Check for append(x, make([]T, y)...) .
+ n := n.(*ir.CallExpr)
+ if isAppendOfMake(n) {
+ n.Args[0] = o.expr(n.Args[0], nil) // order x
+ mk := n.Args[1].(*ir.MakeExpr)
+ mk.Len = o.expr(mk.Len, nil) // order y
+ } else {
+ o.exprList(n.Args)
+ }
+
+ if lhs == nil || lhs.Op() != ir.ONAME && !ir.SameSafeExpr(lhs, n.Args[0]) {
+ return o.copyExpr(n)
+ }
+ return n
+
+ case ir.OSLICE, ir.OSLICEARR, ir.OSLICESTR, ir.OSLICE3, ir.OSLICE3ARR:
+ n := n.(*ir.SliceExpr)
+ n.X = o.expr(n.X, nil)
+ n.Low = o.cheapExpr(o.expr(n.Low, nil))
+ n.High = o.cheapExpr(o.expr(n.High, nil))
+ n.Max = o.cheapExpr(o.expr(n.Max, nil))
+ if lhs == nil || lhs.Op() != ir.ONAME && !ir.SameSafeExpr(lhs, n.X) {
+ return o.copyExpr(n)
+ }
+ return n
+
+ case ir.OCLOSURE:
+ n := n.(*ir.ClosureExpr)
+ if n.Transient() && len(n.Func.ClosureVars) > 0 {
+ n.Prealloc = o.newTemp(typecheck.ClosureType(n), false)
+ }
+ return n
+
+ case ir.OMETHVALUE:
+ n := n.(*ir.SelectorExpr)
+ n.X = o.expr(n.X, nil)
+ if n.Transient() {
+ t := typecheck.MethodValueType(n)
+ n.Prealloc = o.newTemp(t, false)
+ }
+ return n
+
+ case ir.OSLICELIT:
+ n := n.(*ir.CompLitExpr)
+ o.exprList(n.List)
+ if n.Transient() {
+ t := types.NewArray(n.Type().Elem(), n.Len)
+ n.Prealloc = o.newTemp(t, false)
+ }
+ return n
+
+ case ir.ODOTTYPE, ir.ODOTTYPE2:
+ n := n.(*ir.TypeAssertExpr)
+ n.X = o.expr(n.X, nil)
+ if !types.IsDirectIface(n.Type()) || base.Flag.Cfg.Instrumenting {
+ return o.copyExprClear(n)
+ }
+ return n
+
+ case ir.ORECV:
+ n := n.(*ir.UnaryExpr)
+ n.X = o.expr(n.X, nil)
+ return o.copyExprClear(n)
+
+ case ir.OEQ, ir.ONE, ir.OLT, ir.OLE, ir.OGT, ir.OGE:
+ n := n.(*ir.BinaryExpr)
+ n.X = o.expr(n.X, nil)
+ n.Y = o.expr(n.Y, nil)
+
+ t := n.X.Type()
+ switch {
+ case t.IsString():
+ // Mark string(byteSlice) arguments to reuse byteSlice backing
+ // buffer during conversion. String comparison does not
+ // memorize the strings for later use, so it is safe.
+ if n.X.Op() == ir.OBYTES2STR {
+ n.X.(*ir.ConvExpr).SetOp(ir.OBYTES2STRTMP)
+ }
+ if n.Y.Op() == ir.OBYTES2STR {
+ n.Y.(*ir.ConvExpr).SetOp(ir.OBYTES2STRTMP)
+ }
+
+ case t.IsStruct() || t.IsArray():
+ // for complex comparisons, we need both args to be
+ // addressable so we can pass them to the runtime.
+ n.X = o.addrTemp(n.X)
+ n.Y = o.addrTemp(n.Y)
+ }
+ return n
+
+ case ir.OMAPLIT:
+ // Order map by converting:
+ // map[int]int{
+ // a(): b(),
+ // c(): d(),
+ // e(): f(),
+ // }
+ // to
+ // m := map[int]int{}
+ // m[a()] = b()
+ // m[c()] = d()
+ // m[e()] = f()
+ // Then order the result.
+ // Without this special case, order would otherwise compute all
+ // the keys and values before storing any of them to the map.
+ // See issue 26552.
+ n := n.(*ir.CompLitExpr)
+ entries := n.List
+ statics := entries[:0]
+ var dynamics []*ir.KeyExpr
+ for _, r := range entries {
+ r := r.(*ir.KeyExpr)
+
+ if !isStaticCompositeLiteral(r.Key) || !isStaticCompositeLiteral(r.Value) {
+ dynamics = append(dynamics, r)
+ continue
+ }
+
+ // Recursively ordering some static entries can change them to dynamic;
+ // e.g., OCONVIFACE nodes. See #31777.
+ r = o.expr(r, nil).(*ir.KeyExpr)
+ if !isStaticCompositeLiteral(r.Key) || !isStaticCompositeLiteral(r.Value) {
+ dynamics = append(dynamics, r)
+ continue
+ }
+
+ statics = append(statics, r)
+ }
+ n.List = statics
+
+ if len(dynamics) == 0 {
+ return n
+ }
+
+ // Emit the creation of the map (with all its static entries).
+ m := o.newTemp(n.Type(), false)
+ as := ir.NewAssignStmt(base.Pos, m, n)
+ typecheck.Stmt(as)
+ o.stmt(as)
+
+ // Emit eval+insert of dynamic entries, one at a time.
+ for _, r := range dynamics {
+ as := ir.NewAssignStmt(base.Pos, ir.NewIndexExpr(base.Pos, m, r.Key), r.Value)
+ typecheck.Stmt(as) // Note: this converts the OINDEX to an OINDEXMAP
+ o.stmt(as)
+ }
+ return m
+ }
+
+ // No return - type-assertions above. Each case must return for itself.
+}
+
+// as2func orders OAS2FUNC nodes. It creates temporaries to ensure left-to-right assignment.
+// The caller should order the right-hand side of the assignment before calling order.as2func.
+// It rewrites,
+// a, b, a = ...
+// as
+// tmp1, tmp2, tmp3 = ...
+// a, b, a = tmp1, tmp2, tmp3
+// This is necessary to ensure left to right assignment order.
+func (o *orderState) as2func(n *ir.AssignListStmt) {
+ results := n.Rhs[0].Type()
+ as := ir.NewAssignListStmt(n.Pos(), ir.OAS2, nil, nil)
+ for i, nl := range n.Lhs {
+ if !ir.IsBlank(nl) {
+ typ := results.Field(i).Type
+ tmp := o.newTemp(typ, typ.HasPointers())
+ n.Lhs[i] = tmp
+ as.Lhs = append(as.Lhs, nl)
+ as.Rhs = append(as.Rhs, tmp)
+ }
+ }
+
+ o.out = append(o.out, n)
+ o.stmt(typecheck.Stmt(as))
+}
+
+// as2ok orders OAS2XXX with ok.
+// Just like as2func, this also adds temporaries to ensure left-to-right assignment.
+func (o *orderState) as2ok(n *ir.AssignListStmt) {
+ as := ir.NewAssignListStmt(n.Pos(), ir.OAS2, nil, nil)
+
+ do := func(i int, typ *types.Type) {
+ if nl := n.Lhs[i]; !ir.IsBlank(nl) {
+ var tmp ir.Node = o.newTemp(typ, typ.HasPointers())
+ n.Lhs[i] = tmp
+ as.Lhs = append(as.Lhs, nl)
+ if i == 1 {
+ // The "ok" result is an untyped boolean according to the Go
+ // spec. We need to explicitly convert it to the LHS type in
+ // case the latter is a defined boolean type (#8475).
+ tmp = typecheck.Conv(tmp, nl.Type())
+ }
+ as.Rhs = append(as.Rhs, tmp)
+ }
+ }
+
+ do(0, n.Rhs[0].Type())
+ do(1, types.Types[types.TBOOL])
+
+ o.out = append(o.out, n)
+ o.stmt(typecheck.Stmt(as))
+}
+
+// isFuncPCIntrinsic returns whether n is a direct call of internal/abi.FuncPCABIxxx functions.
+func isFuncPCIntrinsic(n *ir.CallExpr) bool {
+ if n.Op() != ir.OCALLFUNC || n.X.Op() != ir.ONAME {
+ return false
+ }
+ fn := n.X.(*ir.Name).Sym()
+ return (fn.Name == "FuncPCABI0" || fn.Name == "FuncPCABIInternal") &&
+ (fn.Pkg.Path == "internal/abi" || fn.Pkg == types.LocalPkg && base.Ctxt.Pkgpath == "internal/abi")
+}
+
+// isIfaceOfFunc returns whether n is an interface conversion from a direct reference of a func.
+func isIfaceOfFunc(n ir.Node) bool {
+ return n.Op() == ir.OCONVIFACE && n.(*ir.ConvExpr).X.Op() == ir.ONAME && n.(*ir.ConvExpr).X.(*ir.Name).Class == ir.PFUNC
+}
diff --git a/src/cmd/compile/internal/walk/race.go b/src/cmd/compile/internal/walk/race.go
new file mode 100644
index 0000000..859e5c5
--- /dev/null
+++ b/src/cmd/compile/internal/walk/race.go
@@ -0,0 +1,34 @@
+// Copyright 2012 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package walk
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+)
+
+func instrument(fn *ir.Func) {
+ if fn.Pragma&ir.Norace != 0 || (fn.Linksym() != nil && fn.Linksym().ABIWrapper()) {
+ return
+ }
+
+ if !base.Flag.Race || !base.Compiling(base.NoRacePkgs) {
+ fn.SetInstrumentBody(true)
+ }
+
+ if base.Flag.Race {
+ lno := base.Pos
+ base.Pos = src.NoXPos
+ var init ir.Nodes
+ fn.Enter.Prepend(mkcallstmt("racefuncenter", mkcall("getcallerpc", types.Types[types.TUINTPTR], &init)))
+ if len(init) != 0 {
+ base.Fatalf("race walk: unexpected init for getcallerpc")
+ }
+ fn.Exit.Append(mkcallstmt("racefuncexit"))
+ base.Pos = lno
+ }
+}
diff --git a/src/cmd/compile/internal/walk/range.go b/src/cmd/compile/internal/walk/range.go
new file mode 100644
index 0000000..aa8c548
--- /dev/null
+++ b/src/cmd/compile/internal/walk/range.go
@@ -0,0 +1,475 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package walk
+
+import (
+ "unicode/utf8"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/reflectdata"
+ "cmd/compile/internal/ssagen"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "cmd/internal/sys"
+)
+
+func cheapComputableIndex(width int64) bool {
+ switch ssagen.Arch.LinkArch.Family {
+ // MIPS does not have R+R addressing
+ // Arm64 may lack ability to generate this code in our assembler,
+ // but the architecture supports it.
+ case sys.PPC64, sys.S390X:
+ return width == 1
+ case sys.AMD64, sys.I386, sys.ARM64, sys.ARM:
+ switch width {
+ case 1, 2, 4, 8:
+ return true
+ }
+ }
+ return false
+}
+
+// walkRange transforms various forms of ORANGE into
+// simpler forms. The result must be assigned back to n.
+// Node n may also be modified in place, and may also be
+// the returned node.
+func walkRange(nrange *ir.RangeStmt) ir.Node {
+ if isMapClear(nrange) {
+ m := nrange.X
+ lno := ir.SetPos(m)
+ n := mapClear(m)
+ base.Pos = lno
+ return n
+ }
+
+ nfor := ir.NewForStmt(nrange.Pos(), nil, nil, nil, nil)
+ nfor.SetInit(nrange.Init())
+ nfor.Label = nrange.Label
+
+ // variable name conventions:
+ // ohv1, hv1, hv2: hidden (old) val 1, 2
+ // ha, hit: hidden aggregate, iterator
+ // hn, hp: hidden len, pointer
+ // hb: hidden bool
+ // a, v1, v2: not hidden aggregate, val 1, 2
+
+ a := nrange.X
+ t := typecheck.RangeExprType(a.Type())
+ lno := ir.SetPos(a)
+
+ v1, v2 := nrange.Key, nrange.Value
+
+ if ir.IsBlank(v2) {
+ v2 = nil
+ }
+
+ if ir.IsBlank(v1) && v2 == nil {
+ v1 = nil
+ }
+
+ if v1 == nil && v2 != nil {
+ base.Fatalf("walkRange: v2 != nil while v1 == nil")
+ }
+
+ var ifGuard *ir.IfStmt
+
+ var body []ir.Node
+ var init []ir.Node
+ switch t.Kind() {
+ default:
+ base.Fatalf("walkRange")
+
+ case types.TARRAY, types.TSLICE:
+ if nn := arrayClear(nrange, v1, v2, a); nn != nil {
+ base.Pos = lno
+ return nn
+ }
+
+ // order.stmt arranged for a copy of the array/slice variable if needed.
+ ha := a
+
+ hv1 := typecheck.Temp(types.Types[types.TINT])
+ hn := typecheck.Temp(types.Types[types.TINT])
+
+ init = append(init, ir.NewAssignStmt(base.Pos, hv1, nil))
+ init = append(init, ir.NewAssignStmt(base.Pos, hn, ir.NewUnaryExpr(base.Pos, ir.OLEN, ha)))
+
+ nfor.Cond = ir.NewBinaryExpr(base.Pos, ir.OLT, hv1, hn)
+ nfor.Post = ir.NewAssignStmt(base.Pos, hv1, ir.NewBinaryExpr(base.Pos, ir.OADD, hv1, ir.NewInt(1)))
+
+ // for range ha { body }
+ if v1 == nil {
+ break
+ }
+
+ // for v1 := range ha { body }
+ if v2 == nil {
+ body = []ir.Node{ir.NewAssignStmt(base.Pos, v1, hv1)}
+ break
+ }
+
+ // for v1, v2 := range ha { body }
+ if cheapComputableIndex(t.Elem().Size()) {
+ // v1, v2 = hv1, ha[hv1]
+ tmp := ir.NewIndexExpr(base.Pos, ha, hv1)
+ tmp.SetBounded(true)
+ // Use OAS2 to correctly handle assignments
+ // of the form "v1, a[v1] := range".
+ a := ir.NewAssignListStmt(base.Pos, ir.OAS2, []ir.Node{v1, v2}, []ir.Node{hv1, tmp})
+ body = []ir.Node{a}
+ break
+ }
+
+ // TODO(austin): OFORUNTIL is a strange beast, but is
+ // necessary for expressing the control flow we need
+ // while also making "break" and "continue" work. It
+ // would be nice to just lower ORANGE during SSA, but
+ // racewalk needs to see many of the operations
+ // involved in ORANGE's implementation. If racewalk
+ // moves into SSA, consider moving ORANGE into SSA and
+ // eliminating OFORUNTIL.
+
+ // TODO(austin): OFORUNTIL inhibits bounds-check
+ // elimination on the index variable (see #20711).
+ // Enhance the prove pass to understand this.
+ ifGuard = ir.NewIfStmt(base.Pos, nil, nil, nil)
+ ifGuard.Cond = ir.NewBinaryExpr(base.Pos, ir.OLT, hv1, hn)
+ nfor.SetOp(ir.OFORUNTIL)
+
+ hp := typecheck.Temp(types.NewPtr(t.Elem()))
+ tmp := ir.NewIndexExpr(base.Pos, ha, ir.NewInt(0))
+ tmp.SetBounded(true)
+ init = append(init, ir.NewAssignStmt(base.Pos, hp, typecheck.NodAddr(tmp)))
+
+ // Use OAS2 to correctly handle assignments
+ // of the form "v1, a[v1] := range".
+ a := ir.NewAssignListStmt(base.Pos, ir.OAS2, []ir.Node{v1, v2}, []ir.Node{hv1, ir.NewStarExpr(base.Pos, hp)})
+ body = append(body, a)
+
+ // Advance pointer as part of the late increment.
+ //
+ // This runs *after* the condition check, so we know
+ // advancing the pointer is safe and won't go past the
+ // end of the allocation.
+ as := ir.NewAssignStmt(base.Pos, hp, addptr(hp, t.Elem().Size()))
+ nfor.Late = []ir.Node{typecheck.Stmt(as)}
+
+ case types.TMAP:
+ // order.stmt allocated the iterator for us.
+ // we only use a once, so no copy needed.
+ ha := a
+
+ hit := nrange.Prealloc
+ th := hit.Type()
+ // depends on layout of iterator struct.
+ // See cmd/compile/internal/reflectdata/reflect.go:MapIterType
+ keysym := th.Field(0).Sym
+ elemsym := th.Field(1).Sym // ditto
+
+ fn := typecheck.LookupRuntime("mapiterinit")
+
+ fn = typecheck.SubstArgTypes(fn, t.Key(), t.Elem(), th)
+ init = append(init, mkcallstmt1(fn, reflectdata.TypePtr(t), ha, typecheck.NodAddr(hit)))
+ nfor.Cond = ir.NewBinaryExpr(base.Pos, ir.ONE, ir.NewSelectorExpr(base.Pos, ir.ODOT, hit, keysym), typecheck.NodNil())
+
+ fn = typecheck.LookupRuntime("mapiternext")
+ fn = typecheck.SubstArgTypes(fn, th)
+ nfor.Post = mkcallstmt1(fn, typecheck.NodAddr(hit))
+
+ key := ir.NewStarExpr(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, hit, keysym))
+ if v1 == nil {
+ body = nil
+ } else if v2 == nil {
+ body = []ir.Node{ir.NewAssignStmt(base.Pos, v1, key)}
+ } else {
+ elem := ir.NewStarExpr(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, hit, elemsym))
+ a := ir.NewAssignListStmt(base.Pos, ir.OAS2, []ir.Node{v1, v2}, []ir.Node{key, elem})
+ body = []ir.Node{a}
+ }
+
+ case types.TCHAN:
+ // order.stmt arranged for a copy of the channel variable.
+ ha := a
+
+ hv1 := typecheck.Temp(t.Elem())
+ hv1.SetTypecheck(1)
+ if t.Elem().HasPointers() {
+ init = append(init, ir.NewAssignStmt(base.Pos, hv1, nil))
+ }
+ hb := typecheck.Temp(types.Types[types.TBOOL])
+
+ nfor.Cond = ir.NewBinaryExpr(base.Pos, ir.ONE, hb, ir.NewBool(false))
+ lhs := []ir.Node{hv1, hb}
+ rhs := []ir.Node{ir.NewUnaryExpr(base.Pos, ir.ORECV, ha)}
+ a := ir.NewAssignListStmt(base.Pos, ir.OAS2RECV, lhs, rhs)
+ a.SetTypecheck(1)
+ nfor.Cond = ir.InitExpr([]ir.Node{a}, nfor.Cond)
+ if v1 == nil {
+ body = nil
+ } else {
+ body = []ir.Node{ir.NewAssignStmt(base.Pos, v1, hv1)}
+ }
+ // Zero hv1. This prevents hv1 from being the sole, inaccessible
+ // reference to an otherwise GC-able value during the next channel receive.
+ // See issue 15281.
+ body = append(body, ir.NewAssignStmt(base.Pos, hv1, nil))
+
+ case types.TSTRING:
+ // Transform string range statements like "for v1, v2 = range a" into
+ //
+ // ha := a
+ // for hv1 := 0; hv1 < len(ha); {
+ // hv1t := hv1
+ // hv2 := rune(ha[hv1])
+ // if hv2 < utf8.RuneSelf {
+ // hv1++
+ // } else {
+ // hv2, hv1 = decoderune(ha, hv1)
+ // }
+ // v1, v2 = hv1t, hv2
+ // // original body
+ // }
+
+ // order.stmt arranged for a copy of the string variable.
+ ha := a
+
+ hv1 := typecheck.Temp(types.Types[types.TINT])
+ hv1t := typecheck.Temp(types.Types[types.TINT])
+ hv2 := typecheck.Temp(types.RuneType)
+
+ // hv1 := 0
+ init = append(init, ir.NewAssignStmt(base.Pos, hv1, nil))
+
+ // hv1 < len(ha)
+ nfor.Cond = ir.NewBinaryExpr(base.Pos, ir.OLT, hv1, ir.NewUnaryExpr(base.Pos, ir.OLEN, ha))
+
+ if v1 != nil {
+ // hv1t = hv1
+ body = append(body, ir.NewAssignStmt(base.Pos, hv1t, hv1))
+ }
+
+ // hv2 := rune(ha[hv1])
+ nind := ir.NewIndexExpr(base.Pos, ha, hv1)
+ nind.SetBounded(true)
+ body = append(body, ir.NewAssignStmt(base.Pos, hv2, typecheck.Conv(nind, types.RuneType)))
+
+ // if hv2 < utf8.RuneSelf
+ nif := ir.NewIfStmt(base.Pos, nil, nil, nil)
+ nif.Cond = ir.NewBinaryExpr(base.Pos, ir.OLT, hv2, ir.NewInt(utf8.RuneSelf))
+
+ // hv1++
+ nif.Body = []ir.Node{ir.NewAssignStmt(base.Pos, hv1, ir.NewBinaryExpr(base.Pos, ir.OADD, hv1, ir.NewInt(1)))}
+
+ // } else {
+ // hv2, hv1 = decoderune(ha, hv1)
+ fn := typecheck.LookupRuntime("decoderune")
+ call := mkcall1(fn, fn.Type().Results(), &nif.Else, ha, hv1)
+ a := ir.NewAssignListStmt(base.Pos, ir.OAS2, []ir.Node{hv2, hv1}, []ir.Node{call})
+ nif.Else.Append(a)
+
+ body = append(body, nif)
+
+ if v1 != nil {
+ if v2 != nil {
+ // v1, v2 = hv1t, hv2
+ a := ir.NewAssignListStmt(base.Pos, ir.OAS2, []ir.Node{v1, v2}, []ir.Node{hv1t, hv2})
+ body = append(body, a)
+ } else {
+ // v1 = hv1t
+ body = append(body, ir.NewAssignStmt(base.Pos, v1, hv1t))
+ }
+ }
+ }
+
+ typecheck.Stmts(init)
+
+ if ifGuard != nil {
+ ifGuard.PtrInit().Append(init...)
+ ifGuard = typecheck.Stmt(ifGuard).(*ir.IfStmt)
+ } else {
+ nfor.PtrInit().Append(init...)
+ }
+
+ typecheck.Stmts(nfor.Cond.Init())
+
+ nfor.Cond = typecheck.Expr(nfor.Cond)
+ nfor.Cond = typecheck.DefaultLit(nfor.Cond, nil)
+ nfor.Post = typecheck.Stmt(nfor.Post)
+ typecheck.Stmts(body)
+ nfor.Body.Append(body...)
+ nfor.Body.Append(nrange.Body...)
+
+ var n ir.Node = nfor
+ if ifGuard != nil {
+ ifGuard.Body = []ir.Node{n}
+ n = ifGuard
+ }
+
+ n = walkStmt(n)
+
+ base.Pos = lno
+ return n
+}
+
+// isMapClear checks if n is of the form:
+//
+// for k := range m {
+// delete(m, k)
+// }
+//
+// where == for keys of map m is reflexive.
+func isMapClear(n *ir.RangeStmt) bool {
+ if base.Flag.N != 0 || base.Flag.Cfg.Instrumenting {
+ return false
+ }
+
+ t := n.X.Type()
+ if n.Op() != ir.ORANGE || t.Kind() != types.TMAP || n.Key == nil || n.Value != nil {
+ return false
+ }
+
+ k := n.Key
+ // Require k to be a new variable name.
+ if !ir.DeclaredBy(k, n) {
+ return false
+ }
+
+ if len(n.Body) != 1 {
+ return false
+ }
+
+ stmt := n.Body[0] // only stmt in body
+ if stmt == nil || stmt.Op() != ir.ODELETE {
+ return false
+ }
+
+ m := n.X
+ if delete := stmt.(*ir.CallExpr); !ir.SameSafeExpr(delete.Args[0], m) || !ir.SameSafeExpr(delete.Args[1], k) {
+ return false
+ }
+
+ // Keys where equality is not reflexive can not be deleted from maps.
+ if !types.IsReflexive(t.Key()) {
+ return false
+ }
+
+ return true
+}
+
+// mapClear constructs a call to runtime.mapclear for the map m.
+func mapClear(m ir.Node) ir.Node {
+ t := m.Type()
+
+ // instantiate mapclear(typ *type, hmap map[any]any)
+ fn := typecheck.LookupRuntime("mapclear")
+ fn = typecheck.SubstArgTypes(fn, t.Key(), t.Elem())
+ n := mkcallstmt1(fn, reflectdata.TypePtr(t), m)
+ return walkStmt(typecheck.Stmt(n))
+}
+
+// Lower n into runtime·memclr if possible, for
+// fast zeroing of slices and arrays (issue 5373).
+// Look for instances of
+//
+// for i := range a {
+// a[i] = zero
+// }
+//
+// in which the evaluation of a is side-effect-free.
+//
+// Parameters are as in walkRange: "for v1, v2 = range a".
+func arrayClear(loop *ir.RangeStmt, v1, v2, a ir.Node) ir.Node {
+ if base.Flag.N != 0 || base.Flag.Cfg.Instrumenting {
+ return nil
+ }
+
+ if v1 == nil || v2 != nil {
+ return nil
+ }
+
+ if len(loop.Body) != 1 || loop.Body[0] == nil {
+ return nil
+ }
+
+ stmt1 := loop.Body[0] // only stmt in body
+ if stmt1.Op() != ir.OAS {
+ return nil
+ }
+ stmt := stmt1.(*ir.AssignStmt)
+ if stmt.X.Op() != ir.OINDEX {
+ return nil
+ }
+ lhs := stmt.X.(*ir.IndexExpr)
+
+ if !ir.SameSafeExpr(lhs.X, a) || !ir.SameSafeExpr(lhs.Index, v1) {
+ return nil
+ }
+
+ elemsize := typecheck.RangeExprType(loop.X.Type()).Elem().Size()
+ if elemsize <= 0 || !ir.IsZero(stmt.Y) {
+ return nil
+ }
+
+ // Convert to
+ // if len(a) != 0 {
+ // hp = &a[0]
+ // hn = len(a)*sizeof(elem(a))
+ // memclr{NoHeap,Has}Pointers(hp, hn)
+ // i = len(a) - 1
+ // }
+ n := ir.NewIfStmt(base.Pos, nil, nil, nil)
+ n.Cond = ir.NewBinaryExpr(base.Pos, ir.ONE, ir.NewUnaryExpr(base.Pos, ir.OLEN, a), ir.NewInt(0))
+
+ // hp = &a[0]
+ hp := typecheck.Temp(types.Types[types.TUNSAFEPTR])
+
+ ix := ir.NewIndexExpr(base.Pos, a, ir.NewInt(0))
+ ix.SetBounded(true)
+ addr := typecheck.ConvNop(typecheck.NodAddr(ix), types.Types[types.TUNSAFEPTR])
+ n.Body.Append(ir.NewAssignStmt(base.Pos, hp, addr))
+
+ // hn = len(a) * sizeof(elem(a))
+ hn := typecheck.Temp(types.Types[types.TUINTPTR])
+ mul := typecheck.Conv(ir.NewBinaryExpr(base.Pos, ir.OMUL, ir.NewUnaryExpr(base.Pos, ir.OLEN, a), ir.NewInt(elemsize)), types.Types[types.TUINTPTR])
+ n.Body.Append(ir.NewAssignStmt(base.Pos, hn, mul))
+
+ var fn ir.Node
+ if a.Type().Elem().HasPointers() {
+ // memclrHasPointers(hp, hn)
+ ir.CurFunc.SetWBPos(stmt.Pos())
+ fn = mkcallstmt("memclrHasPointers", hp, hn)
+ } else {
+ // memclrNoHeapPointers(hp, hn)
+ fn = mkcallstmt("memclrNoHeapPointers", hp, hn)
+ }
+
+ n.Body.Append(fn)
+
+ // i = len(a) - 1
+ v1 = ir.NewAssignStmt(base.Pos, v1, ir.NewBinaryExpr(base.Pos, ir.OSUB, ir.NewUnaryExpr(base.Pos, ir.OLEN, a), ir.NewInt(1)))
+
+ n.Body.Append(v1)
+
+ n.Cond = typecheck.Expr(n.Cond)
+ n.Cond = typecheck.DefaultLit(n.Cond, nil)
+ typecheck.Stmts(n.Body)
+ return walkStmt(n)
+}
+
+// addptr returns (*T)(uintptr(p) + n).
+func addptr(p ir.Node, n int64) ir.Node {
+ t := p.Type()
+
+ p = ir.NewConvExpr(base.Pos, ir.OCONVNOP, nil, p)
+ p.SetType(types.Types[types.TUINTPTR])
+
+ p = ir.NewBinaryExpr(base.Pos, ir.OADD, p, ir.NewInt(n))
+
+ p = ir.NewConvExpr(base.Pos, ir.OCONVNOP, nil, p)
+ p.SetType(t)
+
+ return p
+}
diff --git a/src/cmd/compile/internal/walk/select.go b/src/cmd/compile/internal/walk/select.go
new file mode 100644
index 0000000..fde8f50
--- /dev/null
+++ b/src/cmd/compile/internal/walk/select.go
@@ -0,0 +1,291 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package walk
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+)
+
+func walkSelect(sel *ir.SelectStmt) {
+ lno := ir.SetPos(sel)
+ if sel.Walked() {
+ base.Fatalf("double walkSelect")
+ }
+ sel.SetWalked(true)
+
+ init := ir.TakeInit(sel)
+
+ init = append(init, walkSelectCases(sel.Cases)...)
+ sel.Cases = nil
+
+ sel.Compiled = init
+ walkStmtList(sel.Compiled)
+
+ base.Pos = lno
+}
+
+func walkSelectCases(cases []*ir.CommClause) []ir.Node {
+ ncas := len(cases)
+ sellineno := base.Pos
+
+ // optimization: zero-case select
+ if ncas == 0 {
+ return []ir.Node{mkcallstmt("block")}
+ }
+
+ // optimization: one-case select: single op.
+ if ncas == 1 {
+ cas := cases[0]
+ ir.SetPos(cas)
+ l := cas.Init()
+ if cas.Comm != nil { // not default:
+ n := cas.Comm
+ l = append(l, ir.TakeInit(n)...)
+ switch n.Op() {
+ default:
+ base.Fatalf("select %v", n.Op())
+
+ case ir.OSEND:
+ // already ok
+
+ case ir.OSELRECV2:
+ r := n.(*ir.AssignListStmt)
+ if ir.IsBlank(r.Lhs[0]) && ir.IsBlank(r.Lhs[1]) {
+ n = r.Rhs[0]
+ break
+ }
+ r.SetOp(ir.OAS2RECV)
+ }
+
+ l = append(l, n)
+ }
+
+ l = append(l, cas.Body...)
+ l = append(l, ir.NewBranchStmt(base.Pos, ir.OBREAK, nil))
+ return l
+ }
+
+ // convert case value arguments to addresses.
+ // this rewrite is used by both the general code and the next optimization.
+ var dflt *ir.CommClause
+ for _, cas := range cases {
+ ir.SetPos(cas)
+ n := cas.Comm
+ if n == nil {
+ dflt = cas
+ continue
+ }
+ switch n.Op() {
+ case ir.OSEND:
+ n := n.(*ir.SendStmt)
+ n.Value = typecheck.NodAddr(n.Value)
+ n.Value = typecheck.Expr(n.Value)
+
+ case ir.OSELRECV2:
+ n := n.(*ir.AssignListStmt)
+ if !ir.IsBlank(n.Lhs[0]) {
+ n.Lhs[0] = typecheck.NodAddr(n.Lhs[0])
+ n.Lhs[0] = typecheck.Expr(n.Lhs[0])
+ }
+ }
+ }
+
+ // optimization: two-case select but one is default: single non-blocking op.
+ if ncas == 2 && dflt != nil {
+ cas := cases[0]
+ if cas == dflt {
+ cas = cases[1]
+ }
+
+ n := cas.Comm
+ ir.SetPos(n)
+ r := ir.NewIfStmt(base.Pos, nil, nil, nil)
+ r.SetInit(cas.Init())
+ var cond ir.Node
+ switch n.Op() {
+ default:
+ base.Fatalf("select %v", n.Op())
+
+ case ir.OSEND:
+ // if selectnbsend(c, v) { body } else { default body }
+ n := n.(*ir.SendStmt)
+ ch := n.Chan
+ cond = mkcall1(chanfn("selectnbsend", 2, ch.Type()), types.Types[types.TBOOL], r.PtrInit(), ch, n.Value)
+
+ case ir.OSELRECV2:
+ n := n.(*ir.AssignListStmt)
+ recv := n.Rhs[0].(*ir.UnaryExpr)
+ ch := recv.X
+ elem := n.Lhs[0]
+ if ir.IsBlank(elem) {
+ elem = typecheck.NodNil()
+ }
+ cond = typecheck.Temp(types.Types[types.TBOOL])
+ fn := chanfn("selectnbrecv", 2, ch.Type())
+ call := mkcall1(fn, fn.Type().Results(), r.PtrInit(), elem, ch)
+ as := ir.NewAssignListStmt(r.Pos(), ir.OAS2, []ir.Node{cond, n.Lhs[1]}, []ir.Node{call})
+ r.PtrInit().Append(typecheck.Stmt(as))
+ }
+
+ r.Cond = typecheck.Expr(cond)
+ r.Body = cas.Body
+ r.Else = append(dflt.Init(), dflt.Body...)
+ return []ir.Node{r, ir.NewBranchStmt(base.Pos, ir.OBREAK, nil)}
+ }
+
+ if dflt != nil {
+ ncas--
+ }
+ casorder := make([]*ir.CommClause, ncas)
+ nsends, nrecvs := 0, 0
+
+ var init []ir.Node
+
+ // generate sel-struct
+ base.Pos = sellineno
+ selv := typecheck.Temp(types.NewArray(scasetype(), int64(ncas)))
+ init = append(init, typecheck.Stmt(ir.NewAssignStmt(base.Pos, selv, nil)))
+
+ // No initialization for order; runtime.selectgo is responsible for that.
+ order := typecheck.Temp(types.NewArray(types.Types[types.TUINT16], 2*int64(ncas)))
+
+ var pc0, pcs ir.Node
+ if base.Flag.Race {
+ pcs = typecheck.Temp(types.NewArray(types.Types[types.TUINTPTR], int64(ncas)))
+ pc0 = typecheck.Expr(typecheck.NodAddr(ir.NewIndexExpr(base.Pos, pcs, ir.NewInt(0))))
+ } else {
+ pc0 = typecheck.NodNil()
+ }
+
+ // register cases
+ for _, cas := range cases {
+ ir.SetPos(cas)
+
+ init = append(init, ir.TakeInit(cas)...)
+
+ n := cas.Comm
+ if n == nil { // default:
+ continue
+ }
+
+ var i int
+ var c, elem ir.Node
+ switch n.Op() {
+ default:
+ base.Fatalf("select %v", n.Op())
+ case ir.OSEND:
+ n := n.(*ir.SendStmt)
+ i = nsends
+ nsends++
+ c = n.Chan
+ elem = n.Value
+ case ir.OSELRECV2:
+ n := n.(*ir.AssignListStmt)
+ nrecvs++
+ i = ncas - nrecvs
+ recv := n.Rhs[0].(*ir.UnaryExpr)
+ c = recv.X
+ elem = n.Lhs[0]
+ }
+
+ casorder[i] = cas
+
+ setField := func(f string, val ir.Node) {
+ r := ir.NewAssignStmt(base.Pos, ir.NewSelectorExpr(base.Pos, ir.ODOT, ir.NewIndexExpr(base.Pos, selv, ir.NewInt(int64(i))), typecheck.Lookup(f)), val)
+ init = append(init, typecheck.Stmt(r))
+ }
+
+ c = typecheck.ConvNop(c, types.Types[types.TUNSAFEPTR])
+ setField("c", c)
+ if !ir.IsBlank(elem) {
+ elem = typecheck.ConvNop(elem, types.Types[types.TUNSAFEPTR])
+ setField("elem", elem)
+ }
+
+ // TODO(mdempsky): There should be a cleaner way to
+ // handle this.
+ if base.Flag.Race {
+ r := mkcallstmt("selectsetpc", typecheck.NodAddr(ir.NewIndexExpr(base.Pos, pcs, ir.NewInt(int64(i)))))
+ init = append(init, r)
+ }
+ }
+ if nsends+nrecvs != ncas {
+ base.Fatalf("walkSelectCases: miscount: %v + %v != %v", nsends, nrecvs, ncas)
+ }
+
+ // run the select
+ base.Pos = sellineno
+ chosen := typecheck.Temp(types.Types[types.TINT])
+ recvOK := typecheck.Temp(types.Types[types.TBOOL])
+ r := ir.NewAssignListStmt(base.Pos, ir.OAS2, nil, nil)
+ r.Lhs = []ir.Node{chosen, recvOK}
+ fn := typecheck.LookupRuntime("selectgo")
+ var fnInit ir.Nodes
+ r.Rhs = []ir.Node{mkcall1(fn, fn.Type().Results(), &fnInit, bytePtrToIndex(selv, 0), bytePtrToIndex(order, 0), pc0, ir.NewInt(int64(nsends)), ir.NewInt(int64(nrecvs)), ir.NewBool(dflt == nil))}
+ init = append(init, fnInit...)
+ init = append(init, typecheck.Stmt(r))
+
+ // selv and order are no longer alive after selectgo.
+ init = append(init, ir.NewUnaryExpr(base.Pos, ir.OVARKILL, selv))
+ init = append(init, ir.NewUnaryExpr(base.Pos, ir.OVARKILL, order))
+ if base.Flag.Race {
+ init = append(init, ir.NewUnaryExpr(base.Pos, ir.OVARKILL, pcs))
+ }
+
+ // dispatch cases
+ dispatch := func(cond ir.Node, cas *ir.CommClause) {
+ cond = typecheck.Expr(cond)
+ cond = typecheck.DefaultLit(cond, nil)
+
+ r := ir.NewIfStmt(base.Pos, cond, nil, nil)
+
+ if n := cas.Comm; n != nil && n.Op() == ir.OSELRECV2 {
+ n := n.(*ir.AssignListStmt)
+ if !ir.IsBlank(n.Lhs[1]) {
+ x := ir.NewAssignStmt(base.Pos, n.Lhs[1], recvOK)
+ r.Body.Append(typecheck.Stmt(x))
+ }
+ }
+
+ r.Body.Append(cas.Body.Take()...)
+ r.Body.Append(ir.NewBranchStmt(base.Pos, ir.OBREAK, nil))
+ init = append(init, r)
+ }
+
+ if dflt != nil {
+ ir.SetPos(dflt)
+ dispatch(ir.NewBinaryExpr(base.Pos, ir.OLT, chosen, ir.NewInt(0)), dflt)
+ }
+ for i, cas := range casorder {
+ ir.SetPos(cas)
+ dispatch(ir.NewBinaryExpr(base.Pos, ir.OEQ, chosen, ir.NewInt(int64(i))), cas)
+ }
+
+ return init
+}
+
+// bytePtrToIndex returns a Node representing "(*byte)(&n[i])".
+func bytePtrToIndex(n ir.Node, i int64) ir.Node {
+ s := typecheck.NodAddr(ir.NewIndexExpr(base.Pos, n, ir.NewInt(i)))
+ t := types.NewPtr(types.Types[types.TUINT8])
+ return typecheck.ConvNop(s, t)
+}
+
+var scase *types.Type
+
+// Keep in sync with src/runtime/select.go.
+func scasetype() *types.Type {
+ if scase == nil {
+ scase = types.NewStruct(types.NoPkg, []*types.Field{
+ types.NewField(base.Pos, typecheck.Lookup("c"), types.Types[types.TUNSAFEPTR]),
+ types.NewField(base.Pos, typecheck.Lookup("elem"), types.Types[types.TUNSAFEPTR]),
+ })
+ scase.SetNoalg(true)
+ }
+ return scase
+}
diff --git a/src/cmd/compile/internal/walk/stmt.go b/src/cmd/compile/internal/walk/stmt.go
new file mode 100644
index 0000000..f09e916
--- /dev/null
+++ b/src/cmd/compile/internal/walk/stmt.go
@@ -0,0 +1,231 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package walk
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+)
+
+// The result of walkStmt MUST be assigned back to n, e.g.
+// n.Left = walkStmt(n.Left)
+func walkStmt(n ir.Node) ir.Node {
+ if n == nil {
+ return n
+ }
+
+ ir.SetPos(n)
+
+ walkStmtList(n.Init())
+
+ switch n.Op() {
+ default:
+ if n.Op() == ir.ONAME {
+ n := n.(*ir.Name)
+ base.Errorf("%v is not a top level statement", n.Sym())
+ } else {
+ base.Errorf("%v is not a top level statement", n.Op())
+ }
+ ir.Dump("nottop", n)
+ return n
+
+ case ir.OAS,
+ ir.OASOP,
+ ir.OAS2,
+ ir.OAS2DOTTYPE,
+ ir.OAS2RECV,
+ ir.OAS2FUNC,
+ ir.OAS2MAPR,
+ ir.OCLOSE,
+ ir.OCOPY,
+ ir.OCALLINTER,
+ ir.OCALL,
+ ir.OCALLFUNC,
+ ir.ODELETE,
+ ir.OSEND,
+ ir.OPRINT,
+ ir.OPRINTN,
+ ir.OPANIC,
+ ir.ORECOVERFP,
+ ir.OGETG:
+ if n.Typecheck() == 0 {
+ base.Fatalf("missing typecheck: %+v", n)
+ }
+ init := ir.TakeInit(n)
+ n = walkExpr(n, &init)
+ if n.Op() == ir.ONAME {
+ // copy rewrote to a statement list and a temp for the length.
+ // Throw away the temp to avoid plain values as statements.
+ n = ir.NewBlockStmt(n.Pos(), init)
+ init = nil
+ }
+ if len(init) > 0 {
+ switch n.Op() {
+ case ir.OAS, ir.OAS2, ir.OBLOCK:
+ n.(ir.InitNode).PtrInit().Prepend(init...)
+
+ default:
+ init.Append(n)
+ n = ir.NewBlockStmt(n.Pos(), init)
+ }
+ }
+ return n
+
+ // special case for a receive where we throw away
+ // the value received.
+ case ir.ORECV:
+ n := n.(*ir.UnaryExpr)
+ return walkRecv(n)
+
+ case ir.OBREAK,
+ ir.OCONTINUE,
+ ir.OFALL,
+ ir.OGOTO,
+ ir.OLABEL,
+ ir.ODCL,
+ ir.ODCLCONST,
+ ir.ODCLTYPE,
+ ir.OCHECKNIL,
+ ir.OVARDEF,
+ ir.OVARKILL,
+ ir.OVARLIVE:
+ return n
+
+ case ir.OBLOCK:
+ n := n.(*ir.BlockStmt)
+ walkStmtList(n.List)
+ return n
+
+ case ir.OCASE:
+ base.Errorf("case statement out of place")
+ panic("unreachable")
+
+ case ir.ODEFER:
+ n := n.(*ir.GoDeferStmt)
+ ir.CurFunc.SetHasDefer(true)
+ ir.CurFunc.NumDefers++
+ if ir.CurFunc.NumDefers > maxOpenDefers {
+ // Don't allow open-coded defers if there are more than
+ // 8 defers in the function, since we use a single
+ // byte to record active defers.
+ ir.CurFunc.SetOpenCodedDeferDisallowed(true)
+ }
+ if n.Esc() != ir.EscNever {
+ // If n.Esc is not EscNever, then this defer occurs in a loop,
+ // so open-coded defers cannot be used in this function.
+ ir.CurFunc.SetOpenCodedDeferDisallowed(true)
+ }
+ fallthrough
+ case ir.OGO:
+ n := n.(*ir.GoDeferStmt)
+ return walkGoDefer(n)
+
+ case ir.OFOR, ir.OFORUNTIL:
+ n := n.(*ir.ForStmt)
+ return walkFor(n)
+
+ case ir.OIF:
+ n := n.(*ir.IfStmt)
+ return walkIf(n)
+
+ case ir.ORETURN:
+ n := n.(*ir.ReturnStmt)
+ return walkReturn(n)
+
+ case ir.OTAILCALL:
+ n := n.(*ir.TailCallStmt)
+
+ var init ir.Nodes
+ n.Call.X = walkExpr(n.Call.X, &init)
+
+ if len(init) > 0 {
+ init.Append(n)
+ return ir.NewBlockStmt(n.Pos(), init)
+ }
+ return n
+
+ case ir.OINLMARK:
+ n := n.(*ir.InlineMarkStmt)
+ return n
+
+ case ir.OSELECT:
+ n := n.(*ir.SelectStmt)
+ walkSelect(n)
+ return n
+
+ case ir.OSWITCH:
+ n := n.(*ir.SwitchStmt)
+ walkSwitch(n)
+ return n
+
+ case ir.ORANGE:
+ n := n.(*ir.RangeStmt)
+ return walkRange(n)
+ }
+
+ // No return! Each case must return (or panic),
+ // to avoid confusion about what gets returned
+ // in the presence of type assertions.
+}
+
+func walkStmtList(s []ir.Node) {
+ for i := range s {
+ s[i] = walkStmt(s[i])
+ }
+}
+
+// walkFor walks an OFOR or OFORUNTIL node.
+func walkFor(n *ir.ForStmt) ir.Node {
+ if n.Cond != nil {
+ init := ir.TakeInit(n.Cond)
+ walkStmtList(init)
+ n.Cond = walkExpr(n.Cond, &init)
+ n.Cond = ir.InitExpr(init, n.Cond)
+ }
+
+ n.Post = walkStmt(n.Post)
+ if n.Op() == ir.OFORUNTIL {
+ walkStmtList(n.Late)
+ }
+ walkStmtList(n.Body)
+ return n
+}
+
+// validGoDeferCall reports whether call is a valid call to appear in
+// a go or defer statement; that is, whether it's a regular function
+// call without arguments or results.
+func validGoDeferCall(call ir.Node) bool {
+ if call, ok := call.(*ir.CallExpr); ok && call.Op() == ir.OCALLFUNC && len(call.KeepAlive) == 0 {
+ sig := call.X.Type()
+ return sig.NumParams()+sig.NumResults() == 0
+ }
+ return false
+}
+
+// walkGoDefer walks an OGO or ODEFER node.
+func walkGoDefer(n *ir.GoDeferStmt) ir.Node {
+ if !validGoDeferCall(n.Call) {
+ base.FatalfAt(n.Pos(), "invalid %v call: %v", n.Op(), n.Call)
+ }
+
+ var init ir.Nodes
+
+ call := n.Call.(*ir.CallExpr)
+ call.X = walkExpr(call.X, &init)
+
+ if len(init) > 0 {
+ init.Append(n)
+ return ir.NewBlockStmt(n.Pos(), init)
+ }
+ return n
+}
+
+// walkIf walks an OIF node.
+func walkIf(n *ir.IfStmt) ir.Node {
+ n.Cond = walkExpr(n.Cond, n.PtrInit())
+ walkStmtList(n.Body)
+ walkStmtList(n.Else)
+ return n
+}
diff --git a/src/cmd/compile/internal/walk/switch.go b/src/cmd/compile/internal/walk/switch.go
new file mode 100644
index 0000000..3705c5b
--- /dev/null
+++ b/src/cmd/compile/internal/walk/switch.go
@@ -0,0 +1,597 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package walk
+
+import (
+ "go/constant"
+ "go/token"
+ "sort"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+)
+
+// walkSwitch walks a switch statement.
+func walkSwitch(sw *ir.SwitchStmt) {
+ // Guard against double walk, see #25776.
+ if sw.Walked() {
+ return // Was fatal, but eliminating every possible source of double-walking is hard
+ }
+ sw.SetWalked(true)
+
+ if sw.Tag != nil && sw.Tag.Op() == ir.OTYPESW {
+ walkSwitchType(sw)
+ } else {
+ walkSwitchExpr(sw)
+ }
+}
+
+// walkSwitchExpr generates an AST implementing sw. sw is an
+// expression switch.
+func walkSwitchExpr(sw *ir.SwitchStmt) {
+ lno := ir.SetPos(sw)
+
+ cond := sw.Tag
+ sw.Tag = nil
+
+ // convert switch {...} to switch true {...}
+ if cond == nil {
+ cond = ir.NewBool(true)
+ cond = typecheck.Expr(cond)
+ cond = typecheck.DefaultLit(cond, nil)
+ }
+
+ // Given "switch string(byteslice)",
+ // with all cases being side-effect free,
+ // use a zero-cost alias of the byte slice.
+ // Do this before calling walkExpr on cond,
+ // because walkExpr will lower the string
+ // conversion into a runtime call.
+ // See issue 24937 for more discussion.
+ if cond.Op() == ir.OBYTES2STR && allCaseExprsAreSideEffectFree(sw) {
+ cond := cond.(*ir.ConvExpr)
+ cond.SetOp(ir.OBYTES2STRTMP)
+ }
+
+ cond = walkExpr(cond, sw.PtrInit())
+ if cond.Op() != ir.OLITERAL && cond.Op() != ir.ONIL {
+ cond = copyExpr(cond, cond.Type(), &sw.Compiled)
+ }
+
+ base.Pos = lno
+
+ s := exprSwitch{
+ exprname: cond,
+ }
+
+ var defaultGoto ir.Node
+ var body ir.Nodes
+ for _, ncase := range sw.Cases {
+ label := typecheck.AutoLabel(".s")
+ jmp := ir.NewBranchStmt(ncase.Pos(), ir.OGOTO, label)
+
+ // Process case dispatch.
+ if len(ncase.List) == 0 {
+ if defaultGoto != nil {
+ base.Fatalf("duplicate default case not detected during typechecking")
+ }
+ defaultGoto = jmp
+ }
+
+ for _, n1 := range ncase.List {
+ s.Add(ncase.Pos(), n1, jmp)
+ }
+
+ // Process body.
+ body.Append(ir.NewLabelStmt(ncase.Pos(), label))
+ body.Append(ncase.Body...)
+ if fall, pos := endsInFallthrough(ncase.Body); !fall {
+ br := ir.NewBranchStmt(base.Pos, ir.OBREAK, nil)
+ br.SetPos(pos)
+ body.Append(br)
+ }
+ }
+ sw.Cases = nil
+
+ if defaultGoto == nil {
+ br := ir.NewBranchStmt(base.Pos, ir.OBREAK, nil)
+ br.SetPos(br.Pos().WithNotStmt())
+ defaultGoto = br
+ }
+
+ s.Emit(&sw.Compiled)
+ sw.Compiled.Append(defaultGoto)
+ sw.Compiled.Append(body.Take()...)
+ walkStmtList(sw.Compiled)
+}
+
+// An exprSwitch walks an expression switch.
+type exprSwitch struct {
+ exprname ir.Node // value being switched on
+
+ done ir.Nodes
+ clauses []exprClause
+}
+
+type exprClause struct {
+ pos src.XPos
+ lo, hi ir.Node
+ jmp ir.Node
+}
+
+func (s *exprSwitch) Add(pos src.XPos, expr, jmp ir.Node) {
+ c := exprClause{pos: pos, lo: expr, hi: expr, jmp: jmp}
+ if types.IsOrdered[s.exprname.Type().Kind()] && expr.Op() == ir.OLITERAL {
+ s.clauses = append(s.clauses, c)
+ return
+ }
+
+ s.flush()
+ s.clauses = append(s.clauses, c)
+ s.flush()
+}
+
+func (s *exprSwitch) Emit(out *ir.Nodes) {
+ s.flush()
+ out.Append(s.done.Take()...)
+}
+
+func (s *exprSwitch) flush() {
+ cc := s.clauses
+ s.clauses = nil
+ if len(cc) == 0 {
+ return
+ }
+
+ // Caution: If len(cc) == 1, then cc[0] might not an OLITERAL.
+ // The code below is structured to implicitly handle this case
+ // (e.g., sort.Slice doesn't need to invoke the less function
+ // when there's only a single slice element).
+
+ if s.exprname.Type().IsString() && len(cc) >= 2 {
+ // Sort strings by length and then by value. It is
+ // much cheaper to compare lengths than values, and
+ // all we need here is consistency. We respect this
+ // sorting below.
+ sort.Slice(cc, func(i, j int) bool {
+ si := ir.StringVal(cc[i].lo)
+ sj := ir.StringVal(cc[j].lo)
+ if len(si) != len(sj) {
+ return len(si) < len(sj)
+ }
+ return si < sj
+ })
+
+ // runLen returns the string length associated with a
+ // particular run of exprClauses.
+ runLen := func(run []exprClause) int64 { return int64(len(ir.StringVal(run[0].lo))) }
+
+ // Collapse runs of consecutive strings with the same length.
+ var runs [][]exprClause
+ start := 0
+ for i := 1; i < len(cc); i++ {
+ if runLen(cc[start:]) != runLen(cc[i:]) {
+ runs = append(runs, cc[start:i])
+ start = i
+ }
+ }
+ runs = append(runs, cc[start:])
+
+ // Perform two-level binary search.
+ binarySearch(len(runs), &s.done,
+ func(i int) ir.Node {
+ return ir.NewBinaryExpr(base.Pos, ir.OLE, ir.NewUnaryExpr(base.Pos, ir.OLEN, s.exprname), ir.NewInt(runLen(runs[i-1])))
+ },
+ func(i int, nif *ir.IfStmt) {
+ run := runs[i]
+ nif.Cond = ir.NewBinaryExpr(base.Pos, ir.OEQ, ir.NewUnaryExpr(base.Pos, ir.OLEN, s.exprname), ir.NewInt(runLen(run)))
+ s.search(run, &nif.Body)
+ },
+ )
+ return
+ }
+
+ sort.Slice(cc, func(i, j int) bool {
+ return constant.Compare(cc[i].lo.Val(), token.LSS, cc[j].lo.Val())
+ })
+
+ // Merge consecutive integer cases.
+ if s.exprname.Type().IsInteger() {
+ consecutive := func(last, next constant.Value) bool {
+ delta := constant.BinaryOp(next, token.SUB, last)
+ return constant.Compare(delta, token.EQL, constant.MakeInt64(1))
+ }
+
+ merged := cc[:1]
+ for _, c := range cc[1:] {
+ last := &merged[len(merged)-1]
+ if last.jmp == c.jmp && consecutive(last.hi.Val(), c.lo.Val()) {
+ last.hi = c.lo
+ } else {
+ merged = append(merged, c)
+ }
+ }
+ cc = merged
+ }
+
+ s.search(cc, &s.done)
+}
+
+func (s *exprSwitch) search(cc []exprClause, out *ir.Nodes) {
+ binarySearch(len(cc), out,
+ func(i int) ir.Node {
+ return ir.NewBinaryExpr(base.Pos, ir.OLE, s.exprname, cc[i-1].hi)
+ },
+ func(i int, nif *ir.IfStmt) {
+ c := &cc[i]
+ nif.Cond = c.test(s.exprname)
+ nif.Body = []ir.Node{c.jmp}
+ },
+ )
+}
+
+func (c *exprClause) test(exprname ir.Node) ir.Node {
+ // Integer range.
+ if c.hi != c.lo {
+ low := ir.NewBinaryExpr(c.pos, ir.OGE, exprname, c.lo)
+ high := ir.NewBinaryExpr(c.pos, ir.OLE, exprname, c.hi)
+ return ir.NewLogicalExpr(c.pos, ir.OANDAND, low, high)
+ }
+
+ // Optimize "switch true { ...}" and "switch false { ... }".
+ if ir.IsConst(exprname, constant.Bool) && !c.lo.Type().IsInterface() {
+ if ir.BoolVal(exprname) {
+ return c.lo
+ } else {
+ return ir.NewUnaryExpr(c.pos, ir.ONOT, c.lo)
+ }
+ }
+
+ return ir.NewBinaryExpr(c.pos, ir.OEQ, exprname, c.lo)
+}
+
+func allCaseExprsAreSideEffectFree(sw *ir.SwitchStmt) bool {
+ // In theory, we could be more aggressive, allowing any
+ // side-effect-free expressions in cases, but it's a bit
+ // tricky because some of that information is unavailable due
+ // to the introduction of temporaries during order.
+ // Restricting to constants is simple and probably powerful
+ // enough.
+
+ for _, ncase := range sw.Cases {
+ for _, v := range ncase.List {
+ if v.Op() != ir.OLITERAL {
+ return false
+ }
+ }
+ }
+ return true
+}
+
+// endsInFallthrough reports whether stmts ends with a "fallthrough" statement.
+func endsInFallthrough(stmts []ir.Node) (bool, src.XPos) {
+ // Search backwards for the index of the fallthrough
+ // statement. Do not assume it'll be in the last
+ // position, since in some cases (e.g. when the statement
+ // list contains autotmp_ variables), one or more OVARKILL
+ // nodes will be at the end of the list.
+
+ i := len(stmts) - 1
+ for i >= 0 && stmts[i].Op() == ir.OVARKILL {
+ i--
+ }
+ if i < 0 {
+ return false, src.NoXPos
+ }
+ return stmts[i].Op() == ir.OFALL, stmts[i].Pos()
+}
+
+// walkSwitchType generates an AST that implements sw, where sw is a
+// type switch.
+func walkSwitchType(sw *ir.SwitchStmt) {
+ var s typeSwitch
+ s.facename = sw.Tag.(*ir.TypeSwitchGuard).X
+ sw.Tag = nil
+
+ s.facename = walkExpr(s.facename, sw.PtrInit())
+ s.facename = copyExpr(s.facename, s.facename.Type(), &sw.Compiled)
+ s.okname = typecheck.Temp(types.Types[types.TBOOL])
+
+ // Get interface descriptor word.
+ // For empty interfaces this will be the type.
+ // For non-empty interfaces this will be the itab.
+ itab := ir.NewUnaryExpr(base.Pos, ir.OITAB, s.facename)
+
+ // For empty interfaces, do:
+ // if e._type == nil {
+ // do nil case if it exists, otherwise default
+ // }
+ // h := e._type.hash
+ // Use a similar strategy for non-empty interfaces.
+ ifNil := ir.NewIfStmt(base.Pos, nil, nil, nil)
+ ifNil.Cond = ir.NewBinaryExpr(base.Pos, ir.OEQ, itab, typecheck.NodNil())
+ base.Pos = base.Pos.WithNotStmt() // disable statement marks after the first check.
+ ifNil.Cond = typecheck.Expr(ifNil.Cond)
+ ifNil.Cond = typecheck.DefaultLit(ifNil.Cond, nil)
+ // ifNil.Nbody assigned at end.
+ sw.Compiled.Append(ifNil)
+
+ // Load hash from type or itab.
+ dotHash := typeHashFieldOf(base.Pos, itab)
+ s.hashname = copyExpr(dotHash, dotHash.Type(), &sw.Compiled)
+
+ br := ir.NewBranchStmt(base.Pos, ir.OBREAK, nil)
+ var defaultGoto, nilGoto ir.Node
+ var body ir.Nodes
+ for _, ncase := range sw.Cases {
+ caseVar := ncase.Var
+
+ // For single-type cases with an interface type,
+ // we initialize the case variable as part of the type assertion.
+ // In other cases, we initialize it in the body.
+ var singleType *types.Type
+ if len(ncase.List) == 1 && ncase.List[0].Op() == ir.OTYPE {
+ singleType = ncase.List[0].Type()
+ }
+ caseVarInitialized := false
+
+ label := typecheck.AutoLabel(".s")
+ jmp := ir.NewBranchStmt(ncase.Pos(), ir.OGOTO, label)
+
+ if len(ncase.List) == 0 { // default:
+ if defaultGoto != nil {
+ base.Fatalf("duplicate default case not detected during typechecking")
+ }
+ defaultGoto = jmp
+ }
+
+ for _, n1 := range ncase.List {
+ if ir.IsNil(n1) { // case nil:
+ if nilGoto != nil {
+ base.Fatalf("duplicate nil case not detected during typechecking")
+ }
+ nilGoto = jmp
+ continue
+ }
+
+ if singleType != nil && singleType.IsInterface() {
+ s.Add(ncase.Pos(), n1, caseVar, jmp)
+ caseVarInitialized = true
+ } else {
+ s.Add(ncase.Pos(), n1, nil, jmp)
+ }
+ }
+
+ body.Append(ir.NewLabelStmt(ncase.Pos(), label))
+ if caseVar != nil && !caseVarInitialized {
+ val := s.facename
+ if singleType != nil {
+ // We have a single concrete type. Extract the data.
+ if singleType.IsInterface() {
+ base.Fatalf("singleType interface should have been handled in Add")
+ }
+ val = ifaceData(ncase.Pos(), s.facename, singleType)
+ }
+ if len(ncase.List) == 1 && ncase.List[0].Op() == ir.ODYNAMICTYPE {
+ dt := ncase.List[0].(*ir.DynamicType)
+ x := ir.NewDynamicTypeAssertExpr(ncase.Pos(), ir.ODYNAMICDOTTYPE, val, dt.X)
+ if dt.ITab != nil {
+ // TODO: make ITab a separate field in DynamicTypeAssertExpr?
+ x.T = dt.ITab
+ }
+ x.SetType(caseVar.Type())
+ x.SetTypecheck(1)
+ val = x
+ }
+ l := []ir.Node{
+ ir.NewDecl(ncase.Pos(), ir.ODCL, caseVar),
+ ir.NewAssignStmt(ncase.Pos(), caseVar, val),
+ }
+ typecheck.Stmts(l)
+ body.Append(l...)
+ }
+ body.Append(ncase.Body...)
+ body.Append(br)
+ }
+ sw.Cases = nil
+
+ if defaultGoto == nil {
+ defaultGoto = br
+ }
+ if nilGoto == nil {
+ nilGoto = defaultGoto
+ }
+ ifNil.Body = []ir.Node{nilGoto}
+
+ s.Emit(&sw.Compiled)
+ sw.Compiled.Append(defaultGoto)
+ sw.Compiled.Append(body.Take()...)
+
+ walkStmtList(sw.Compiled)
+}
+
+// typeHashFieldOf returns an expression to select the type hash field
+// from an interface's descriptor word (whether a *runtime._type or
+// *runtime.itab pointer).
+func typeHashFieldOf(pos src.XPos, itab *ir.UnaryExpr) *ir.SelectorExpr {
+ if itab.Op() != ir.OITAB {
+ base.Fatalf("expected OITAB, got %v", itab.Op())
+ }
+ var hashField *types.Field
+ if itab.X.Type().IsEmptyInterface() {
+ // runtime._type's hash field
+ if rtypeHashField == nil {
+ rtypeHashField = runtimeField("hash", int64(2*types.PtrSize), types.Types[types.TUINT32])
+ }
+ hashField = rtypeHashField
+ } else {
+ // runtime.itab's hash field
+ if itabHashField == nil {
+ itabHashField = runtimeField("hash", int64(2*types.PtrSize), types.Types[types.TUINT32])
+ }
+ hashField = itabHashField
+ }
+ return boundedDotPtr(pos, itab, hashField)
+}
+
+var rtypeHashField, itabHashField *types.Field
+
+// A typeSwitch walks a type switch.
+type typeSwitch struct {
+ // Temporary variables (i.e., ONAMEs) used by type switch dispatch logic:
+ facename ir.Node // value being type-switched on
+ hashname ir.Node // type hash of the value being type-switched on
+ okname ir.Node // boolean used for comma-ok type assertions
+
+ done ir.Nodes
+ clauses []typeClause
+}
+
+type typeClause struct {
+ hash uint32
+ body ir.Nodes
+}
+
+func (s *typeSwitch) Add(pos src.XPos, n1 ir.Node, caseVar *ir.Name, jmp ir.Node) {
+ typ := n1.Type()
+ var body ir.Nodes
+ if caseVar != nil {
+ l := []ir.Node{
+ ir.NewDecl(pos, ir.ODCL, caseVar),
+ ir.NewAssignStmt(pos, caseVar, nil),
+ }
+ typecheck.Stmts(l)
+ body.Append(l...)
+ } else {
+ caseVar = ir.BlankNode.(*ir.Name)
+ }
+
+ // cv, ok = iface.(type)
+ as := ir.NewAssignListStmt(pos, ir.OAS2, nil, nil)
+ as.Lhs = []ir.Node{caseVar, s.okname} // cv, ok =
+ switch n1.Op() {
+ case ir.OTYPE:
+ // Static type assertion (non-generic)
+ dot := ir.NewTypeAssertExpr(pos, s.facename, nil)
+ dot.SetType(typ) // iface.(type)
+ as.Rhs = []ir.Node{dot}
+ case ir.ODYNAMICTYPE:
+ // Dynamic type assertion (generic)
+ dt := n1.(*ir.DynamicType)
+ dot := ir.NewDynamicTypeAssertExpr(pos, ir.ODYNAMICDOTTYPE, s.facename, dt.X)
+ if dt.ITab != nil {
+ dot.T = dt.ITab
+ }
+ dot.SetType(typ)
+ dot.SetTypecheck(1)
+ as.Rhs = []ir.Node{dot}
+ default:
+ base.Fatalf("unhandled type case %s", n1.Op())
+ }
+ appendWalkStmt(&body, as)
+
+ // if ok { goto label }
+ nif := ir.NewIfStmt(pos, nil, nil, nil)
+ nif.Cond = s.okname
+ nif.Body = []ir.Node{jmp}
+ body.Append(nif)
+
+ if n1.Op() == ir.OTYPE && !typ.IsInterface() {
+ // Defer static, noninterface cases so they can be binary searched by hash.
+ s.clauses = append(s.clauses, typeClause{
+ hash: types.TypeHash(n1.Type()),
+ body: body,
+ })
+ return
+ }
+
+ s.flush()
+ s.done.Append(body.Take()...)
+}
+
+func (s *typeSwitch) Emit(out *ir.Nodes) {
+ s.flush()
+ out.Append(s.done.Take()...)
+}
+
+func (s *typeSwitch) flush() {
+ cc := s.clauses
+ s.clauses = nil
+ if len(cc) == 0 {
+ return
+ }
+
+ sort.Slice(cc, func(i, j int) bool { return cc[i].hash < cc[j].hash })
+
+ // Combine adjacent cases with the same hash.
+ merged := cc[:1]
+ for _, c := range cc[1:] {
+ last := &merged[len(merged)-1]
+ if last.hash == c.hash {
+ last.body.Append(c.body.Take()...)
+ } else {
+ merged = append(merged, c)
+ }
+ }
+ cc = merged
+
+ binarySearch(len(cc), &s.done,
+ func(i int) ir.Node {
+ return ir.NewBinaryExpr(base.Pos, ir.OLE, s.hashname, ir.NewInt(int64(cc[i-1].hash)))
+ },
+ func(i int, nif *ir.IfStmt) {
+ // TODO(mdempsky): Omit hash equality check if
+ // there's only one type.
+ c := cc[i]
+ nif.Cond = ir.NewBinaryExpr(base.Pos, ir.OEQ, s.hashname, ir.NewInt(int64(c.hash)))
+ nif.Body.Append(c.body.Take()...)
+ },
+ )
+}
+
+// binarySearch constructs a binary search tree for handling n cases,
+// and appends it to out. It's used for efficiently implementing
+// switch statements.
+//
+// less(i) should return a boolean expression. If it evaluates true,
+// then cases before i will be tested; otherwise, cases i and later.
+//
+// leaf(i, nif) should setup nif (an OIF node) to test case i. In
+// particular, it should set nif.Left and nif.Nbody.
+func binarySearch(n int, out *ir.Nodes, less func(i int) ir.Node, leaf func(i int, nif *ir.IfStmt)) {
+ const binarySearchMin = 4 // minimum number of cases for binary search
+
+ var do func(lo, hi int, out *ir.Nodes)
+ do = func(lo, hi int, out *ir.Nodes) {
+ n := hi - lo
+ if n < binarySearchMin {
+ for i := lo; i < hi; i++ {
+ nif := ir.NewIfStmt(base.Pos, nil, nil, nil)
+ leaf(i, nif)
+ base.Pos = base.Pos.WithNotStmt()
+ nif.Cond = typecheck.Expr(nif.Cond)
+ nif.Cond = typecheck.DefaultLit(nif.Cond, nil)
+ out.Append(nif)
+ out = &nif.Else
+ }
+ return
+ }
+
+ half := lo + n/2
+ nif := ir.NewIfStmt(base.Pos, nil, nil, nil)
+ nif.Cond = less(half)
+ base.Pos = base.Pos.WithNotStmt()
+ nif.Cond = typecheck.Expr(nif.Cond)
+ nif.Cond = typecheck.DefaultLit(nif.Cond, nil)
+ do(lo, half, &nif.Body)
+ do(half, hi, &nif.Else)
+ out.Append(nif)
+ }
+
+ do(0, n, out)
+}
diff --git a/src/cmd/compile/internal/walk/temp.go b/src/cmd/compile/internal/walk/temp.go
new file mode 100644
index 0000000..9879a6c
--- /dev/null
+++ b/src/cmd/compile/internal/walk/temp.go
@@ -0,0 +1,40 @@
+// Copyright 2021 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package walk
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+)
+
+// initStackTemp appends statements to init to initialize the given
+// temporary variable to val, and then returns the expression &tmp.
+func initStackTemp(init *ir.Nodes, tmp *ir.Name, val ir.Node) *ir.AddrExpr {
+ if val != nil && !types.Identical(tmp.Type(), val.Type()) {
+ base.Fatalf("bad initial value for %L: %L", tmp, val)
+ }
+ appendWalkStmt(init, ir.NewAssignStmt(base.Pos, tmp, val))
+ return typecheck.Expr(typecheck.NodAddr(tmp)).(*ir.AddrExpr)
+}
+
+// stackTempAddr returns the expression &tmp, where tmp is a newly
+// allocated temporary variable of the given type. Statements to
+// zero-initialize tmp are appended to init.
+func stackTempAddr(init *ir.Nodes, typ *types.Type) *ir.AddrExpr {
+ return initStackTemp(init, typecheck.Temp(typ), nil)
+}
+
+// stackBufAddr returns thte expression &tmp, where tmp is a newly
+// allocated temporary variable of type [len]elem. This variable is
+// initialized, and elem must not contain pointers.
+func stackBufAddr(len int64, elem *types.Type) *ir.AddrExpr {
+ if elem.HasPointers() {
+ base.FatalfAt(base.Pos, "%v has pointers", elem)
+ }
+ tmp := typecheck.Temp(types.NewArray(elem, len))
+ return typecheck.Expr(typecheck.NodAddr(tmp)).(*ir.AddrExpr)
+}
diff --git a/src/cmd/compile/internal/walk/walk.go b/src/cmd/compile/internal/walk/walk.go
new file mode 100644
index 0000000..78cc2e6
--- /dev/null
+++ b/src/cmd/compile/internal/walk/walk.go
@@ -0,0 +1,402 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package walk
+
+import (
+ "errors"
+ "fmt"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/reflectdata"
+ "cmd/compile/internal/ssagen"
+ "cmd/compile/internal/typecheck"
+ "cmd/compile/internal/types"
+ "cmd/internal/src"
+)
+
+// The constant is known to runtime.
+const tmpstringbufsize = 32
+const zeroValSize = 1024 // must match value of runtime/map.go:maxZero
+
+func Walk(fn *ir.Func) {
+ ir.CurFunc = fn
+ errorsBefore := base.Errors()
+ order(fn)
+ if base.Errors() > errorsBefore {
+ return
+ }
+
+ if base.Flag.W != 0 {
+ s := fmt.Sprintf("\nbefore walk %v", ir.CurFunc.Sym())
+ ir.DumpList(s, ir.CurFunc.Body)
+ }
+
+ lno := base.Pos
+
+ base.Pos = lno
+ if base.Errors() > errorsBefore {
+ return
+ }
+ walkStmtList(ir.CurFunc.Body)
+ if base.Flag.W != 0 {
+ s := fmt.Sprintf("after walk %v", ir.CurFunc.Sym())
+ ir.DumpList(s, ir.CurFunc.Body)
+ }
+
+ if base.Flag.Cfg.Instrumenting {
+ instrument(fn)
+ }
+
+ // Eagerly compute sizes of all variables for SSA.
+ for _, n := range fn.Dcl {
+ types.CalcSize(n.Type())
+ }
+}
+
+// walkRecv walks an ORECV node.
+func walkRecv(n *ir.UnaryExpr) ir.Node {
+ if n.Typecheck() == 0 {
+ base.Fatalf("missing typecheck: %+v", n)
+ }
+ init := ir.TakeInit(n)
+
+ n.X = walkExpr(n.X, &init)
+ call := walkExpr(mkcall1(chanfn("chanrecv1", 2, n.X.Type()), nil, &init, n.X, typecheck.NodNil()), &init)
+ return ir.InitExpr(init, call)
+}
+
+func convas(n *ir.AssignStmt, init *ir.Nodes) *ir.AssignStmt {
+ if n.Op() != ir.OAS {
+ base.Fatalf("convas: not OAS %v", n.Op())
+ }
+ n.SetTypecheck(1)
+
+ if n.X == nil || n.Y == nil {
+ return n
+ }
+
+ lt := n.X.Type()
+ rt := n.Y.Type()
+ if lt == nil || rt == nil {
+ return n
+ }
+
+ if ir.IsBlank(n.X) {
+ n.Y = typecheck.DefaultLit(n.Y, nil)
+ return n
+ }
+
+ if !types.Identical(lt, rt) {
+ n.Y = typecheck.AssignConv(n.Y, lt, "assignment")
+ n.Y = walkExpr(n.Y, init)
+ }
+ types.CalcSize(n.Y.Type())
+
+ return n
+}
+
+var stop = errors.New("stop")
+
+func vmkcall(fn ir.Node, t *types.Type, init *ir.Nodes, va []ir.Node) *ir.CallExpr {
+ if init == nil {
+ base.Fatalf("mkcall with nil init: %v", fn)
+ }
+ if fn.Type() == nil || fn.Type().Kind() != types.TFUNC {
+ base.Fatalf("mkcall %v %v", fn, fn.Type())
+ }
+
+ n := fn.Type().NumParams()
+ if n != len(va) {
+ base.Fatalf("vmkcall %v needs %v args got %v", fn, n, len(va))
+ }
+
+ call := typecheck.Call(base.Pos, fn, va, false).(*ir.CallExpr)
+ call.SetType(t)
+ return walkExpr(call, init).(*ir.CallExpr)
+}
+
+func mkcall(name string, t *types.Type, init *ir.Nodes, args ...ir.Node) *ir.CallExpr {
+ return vmkcall(typecheck.LookupRuntime(name), t, init, args)
+}
+
+func mkcallstmt(name string, args ...ir.Node) ir.Node {
+ return mkcallstmt1(typecheck.LookupRuntime(name), args...)
+}
+
+func mkcall1(fn ir.Node, t *types.Type, init *ir.Nodes, args ...ir.Node) *ir.CallExpr {
+ return vmkcall(fn, t, init, args)
+}
+
+func mkcallstmt1(fn ir.Node, args ...ir.Node) ir.Node {
+ var init ir.Nodes
+ n := vmkcall(fn, nil, &init, args)
+ if len(init) == 0 {
+ return n
+ }
+ init.Append(n)
+ return ir.NewBlockStmt(n.Pos(), init)
+}
+
+func chanfn(name string, n int, t *types.Type) ir.Node {
+ if !t.IsChan() {
+ base.Fatalf("chanfn %v", t)
+ }
+ fn := typecheck.LookupRuntime(name)
+ switch n {
+ default:
+ base.Fatalf("chanfn %d", n)
+ case 1:
+ fn = typecheck.SubstArgTypes(fn, t.Elem())
+ case 2:
+ fn = typecheck.SubstArgTypes(fn, t.Elem(), t.Elem())
+ }
+ return fn
+}
+
+func mapfn(name string, t *types.Type, isfat bool) ir.Node {
+ if !t.IsMap() {
+ base.Fatalf("mapfn %v", t)
+ }
+ fn := typecheck.LookupRuntime(name)
+ if mapfast(t) == mapslow || isfat {
+ fn = typecheck.SubstArgTypes(fn, t.Key(), t.Elem(), t.Key(), t.Elem())
+ } else {
+ fn = typecheck.SubstArgTypes(fn, t.Key(), t.Elem(), t.Elem())
+ }
+ return fn
+}
+
+func mapfndel(name string, t *types.Type) ir.Node {
+ if !t.IsMap() {
+ base.Fatalf("mapfn %v", t)
+ }
+ fn := typecheck.LookupRuntime(name)
+ if mapfast(t) == mapslow {
+ fn = typecheck.SubstArgTypes(fn, t.Key(), t.Elem(), t.Key())
+ } else {
+ fn = typecheck.SubstArgTypes(fn, t.Key(), t.Elem())
+ }
+ return fn
+}
+
+const (
+ mapslow = iota
+ mapfast32
+ mapfast32ptr
+ mapfast64
+ mapfast64ptr
+ mapfaststr
+ nmapfast
+)
+
+type mapnames [nmapfast]string
+
+func mkmapnames(base string, ptr string) mapnames {
+ return mapnames{base, base + "_fast32", base + "_fast32" + ptr, base + "_fast64", base + "_fast64" + ptr, base + "_faststr"}
+}
+
+var mapaccess1 = mkmapnames("mapaccess1", "")
+var mapaccess2 = mkmapnames("mapaccess2", "")
+var mapassign = mkmapnames("mapassign", "ptr")
+var mapdelete = mkmapnames("mapdelete", "")
+
+func mapfast(t *types.Type) int {
+ // Check runtime/map.go:maxElemSize before changing.
+ if t.Elem().Size() > 128 {
+ return mapslow
+ }
+ switch reflectdata.AlgType(t.Key()) {
+ case types.AMEM32:
+ if !t.Key().HasPointers() {
+ return mapfast32
+ }
+ if types.PtrSize == 4 {
+ return mapfast32ptr
+ }
+ base.Fatalf("small pointer %v", t.Key())
+ case types.AMEM64:
+ if !t.Key().HasPointers() {
+ return mapfast64
+ }
+ if types.PtrSize == 8 {
+ return mapfast64ptr
+ }
+ // Two-word object, at least one of which is a pointer.
+ // Use the slow path.
+ case types.ASTRING:
+ return mapfaststr
+ }
+ return mapslow
+}
+
+func walkAppendArgs(n *ir.CallExpr, init *ir.Nodes) {
+ walkExprListSafe(n.Args, init)
+
+ // walkExprListSafe will leave OINDEX (s[n]) alone if both s
+ // and n are name or literal, but those may index the slice we're
+ // modifying here. Fix explicitly.
+ ls := n.Args
+ for i1, n1 := range ls {
+ ls[i1] = cheapExpr(n1, init)
+ }
+}
+
+// appendWalkStmt typechecks and walks stmt and then appends it to init.
+func appendWalkStmt(init *ir.Nodes, stmt ir.Node) {
+ op := stmt.Op()
+ n := typecheck.Stmt(stmt)
+ if op == ir.OAS || op == ir.OAS2 {
+ // If the assignment has side effects, walkExpr will append them
+ // directly to init for us, while walkStmt will wrap it in an OBLOCK.
+ // We need to append them directly.
+ // TODO(rsc): Clean this up.
+ n = walkExpr(n, init)
+ } else {
+ n = walkStmt(n)
+ }
+ init.Append(n)
+}
+
+// The max number of defers in a function using open-coded defers. We enforce this
+// limit because the deferBits bitmask is currently a single byte (to minimize code size)
+const maxOpenDefers = 8
+
+// backingArrayPtrLen extracts the pointer and length from a slice or string.
+// This constructs two nodes referring to n, so n must be a cheapExpr.
+func backingArrayPtrLen(n ir.Node) (ptr, length ir.Node) {
+ var init ir.Nodes
+ c := cheapExpr(n, &init)
+ if c != n || len(init) != 0 {
+ base.Fatalf("backingArrayPtrLen not cheap: %v", n)
+ }
+ ptr = ir.NewUnaryExpr(base.Pos, ir.OSPTR, n)
+ if n.Type().IsString() {
+ ptr.SetType(types.Types[types.TUINT8].PtrTo())
+ } else {
+ ptr.SetType(n.Type().Elem().PtrTo())
+ }
+ length = ir.NewUnaryExpr(base.Pos, ir.OLEN, n)
+ length.SetType(types.Types[types.TINT])
+ return ptr, length
+}
+
+// mayCall reports whether evaluating expression n may require
+// function calls, which could clobber function call arguments/results
+// currently on the stack.
+func mayCall(n ir.Node) bool {
+ // When instrumenting, any expression might require function calls.
+ if base.Flag.Cfg.Instrumenting {
+ return true
+ }
+
+ isSoftFloat := func(typ *types.Type) bool {
+ return types.IsFloat[typ.Kind()] || types.IsComplex[typ.Kind()]
+ }
+
+ return ir.Any(n, func(n ir.Node) bool {
+ // walk should have already moved any Init blocks off of
+ // expressions.
+ if len(n.Init()) != 0 {
+ base.FatalfAt(n.Pos(), "mayCall %+v", n)
+ }
+
+ switch n.Op() {
+ default:
+ base.FatalfAt(n.Pos(), "mayCall %+v", n)
+
+ case ir.OCALLFUNC, ir.OCALLINTER,
+ ir.OUNSAFEADD, ir.OUNSAFESLICE:
+ return true
+
+ case ir.OINDEX, ir.OSLICE, ir.OSLICEARR, ir.OSLICE3, ir.OSLICE3ARR, ir.OSLICESTR,
+ ir.ODEREF, ir.ODOTPTR, ir.ODOTTYPE, ir.ODYNAMICDOTTYPE, ir.ODIV, ir.OMOD, ir.OSLICE2ARRPTR:
+ // These ops might panic, make sure they are done
+ // before we start marshaling args for a call. See issue 16760.
+ return true
+
+ case ir.OANDAND, ir.OOROR:
+ n := n.(*ir.LogicalExpr)
+ // The RHS expression may have init statements that
+ // should only execute conditionally, and so cannot be
+ // pulled out to the top-level init list. We could try
+ // to be more precise here.
+ return len(n.Y.Init()) != 0
+
+ // When using soft-float, these ops might be rewritten to function calls
+ // so we ensure they are evaluated first.
+ case ir.OADD, ir.OSUB, ir.OMUL, ir.ONEG:
+ return ssagen.Arch.SoftFloat && isSoftFloat(n.Type())
+ case ir.OLT, ir.OEQ, ir.ONE, ir.OLE, ir.OGE, ir.OGT:
+ n := n.(*ir.BinaryExpr)
+ return ssagen.Arch.SoftFloat && isSoftFloat(n.X.Type())
+ case ir.OCONV:
+ n := n.(*ir.ConvExpr)
+ return ssagen.Arch.SoftFloat && (isSoftFloat(n.Type()) || isSoftFloat(n.X.Type()))
+
+ case ir.OLITERAL, ir.ONIL, ir.ONAME, ir.OLINKSYMOFFSET, ir.OMETHEXPR,
+ ir.OAND, ir.OANDNOT, ir.OLSH, ir.OOR, ir.ORSH, ir.OXOR, ir.OCOMPLEX, ir.OEFACE,
+ ir.OADDR, ir.OBITNOT, ir.ONOT, ir.OPLUS,
+ ir.OCAP, ir.OIMAG, ir.OLEN, ir.OREAL,
+ ir.OCONVNOP, ir.ODOT,
+ ir.OCFUNC, ir.OIDATA, ir.OITAB, ir.OSPTR,
+ ir.OBYTES2STRTMP, ir.OGETG, ir.OGETCALLERPC, ir.OGETCALLERSP, ir.OSLICEHEADER:
+ // ok: operations that don't require function calls.
+ // Expand as needed.
+ }
+
+ return false
+ })
+}
+
+// itabType loads the _type field from a runtime.itab struct.
+func itabType(itab ir.Node) ir.Node {
+ if itabTypeField == nil {
+ // runtime.itab's _type field
+ itabTypeField = runtimeField("_type", int64(types.PtrSize), types.NewPtr(types.Types[types.TUINT8]))
+ }
+ return boundedDotPtr(base.Pos, itab, itabTypeField)
+}
+
+var itabTypeField *types.Field
+
+// boundedDotPtr returns a selector expression representing ptr.field
+// and omits nil-pointer checks for ptr.
+func boundedDotPtr(pos src.XPos, ptr ir.Node, field *types.Field) *ir.SelectorExpr {
+ sel := ir.NewSelectorExpr(pos, ir.ODOTPTR, ptr, field.Sym)
+ sel.Selection = field
+ sel.SetType(field.Type)
+ sel.SetTypecheck(1)
+ sel.SetBounded(true) // guaranteed not to fault
+ return sel
+}
+
+func runtimeField(name string, offset int64, typ *types.Type) *types.Field {
+ f := types.NewField(src.NoXPos, ir.Pkgs.Runtime.Lookup(name), typ)
+ f.Offset = offset
+ return f
+}
+
+// ifaceData loads the data field from an interface.
+// The concrete type must be known to have type t.
+// It follows the pointer if !IsDirectIface(t).
+func ifaceData(pos src.XPos, n ir.Node, t *types.Type) ir.Node {
+ if t.IsInterface() {
+ base.Fatalf("ifaceData interface: %v", t)
+ }
+ ptr := ir.NewUnaryExpr(pos, ir.OIDATA, n)
+ if types.IsDirectIface(t) {
+ ptr.SetType(t)
+ ptr.SetTypecheck(1)
+ return ptr
+ }
+ ptr.SetType(types.NewPtr(t))
+ ptr.SetTypecheck(1)
+ ind := ir.NewStarExpr(pos, ptr)
+ ind.SetType(t)
+ ind.SetTypecheck(1)
+ ind.SetBounded(true)
+ return ind
+}
diff --git a/src/cmd/compile/internal/wasm/ssa.go b/src/cmd/compile/internal/wasm/ssa.go
new file mode 100644
index 0000000..765051c
--- /dev/null
+++ b/src/cmd/compile/internal/wasm/ssa.go
@@ -0,0 +1,511 @@
+// Copyright 2018 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package wasm
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/logopt"
+ "cmd/compile/internal/objw"
+ "cmd/compile/internal/ssa"
+ "cmd/compile/internal/ssagen"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/obj/wasm"
+ "internal/buildcfg"
+)
+
+func Init(arch *ssagen.ArchInfo) {
+ arch.LinkArch = &wasm.Linkwasm
+ arch.REGSP = wasm.REG_SP
+ arch.MAXWIDTH = 1 << 50
+
+ arch.ZeroRange = zeroRange
+ arch.Ginsnop = ginsnop
+
+ arch.SSAMarkMoves = ssaMarkMoves
+ arch.SSAGenValue = ssaGenValue
+ arch.SSAGenBlock = ssaGenBlock
+}
+
+func zeroRange(pp *objw.Progs, p *obj.Prog, off, cnt int64, state *uint32) *obj.Prog {
+ if cnt == 0 {
+ return p
+ }
+ if cnt%8 != 0 {
+ base.Fatalf("zerorange count not a multiple of widthptr %d", cnt)
+ }
+
+ for i := int64(0); i < cnt; i += 8 {
+ p = pp.Append(p, wasm.AGet, obj.TYPE_REG, wasm.REG_SP, 0, 0, 0, 0)
+ p = pp.Append(p, wasm.AI64Const, obj.TYPE_CONST, 0, 0, 0, 0, 0)
+ p = pp.Append(p, wasm.AI64Store, 0, 0, 0, obj.TYPE_CONST, 0, off+i)
+ }
+
+ return p
+}
+
+func ginsnop(pp *objw.Progs) *obj.Prog {
+ return pp.Prog(wasm.ANop)
+}
+
+func ssaMarkMoves(s *ssagen.State, b *ssa.Block) {
+}
+
+func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
+ switch b.Kind {
+ case ssa.BlockPlain:
+ if next != b.Succs[0].Block() {
+ s.Br(obj.AJMP, b.Succs[0].Block())
+ }
+
+ case ssa.BlockIf:
+ switch next {
+ case b.Succs[0].Block():
+ // if false, jump to b.Succs[1]
+ getValue32(s, b.Controls[0])
+ s.Prog(wasm.AI32Eqz)
+ s.Prog(wasm.AIf)
+ s.Br(obj.AJMP, b.Succs[1].Block())
+ s.Prog(wasm.AEnd)
+ case b.Succs[1].Block():
+ // if true, jump to b.Succs[0]
+ getValue32(s, b.Controls[0])
+ s.Prog(wasm.AIf)
+ s.Br(obj.AJMP, b.Succs[0].Block())
+ s.Prog(wasm.AEnd)
+ default:
+ // if true, jump to b.Succs[0], else jump to b.Succs[1]
+ getValue32(s, b.Controls[0])
+ s.Prog(wasm.AIf)
+ s.Br(obj.AJMP, b.Succs[0].Block())
+ s.Prog(wasm.AEnd)
+ s.Br(obj.AJMP, b.Succs[1].Block())
+ }
+
+ case ssa.BlockRet:
+ s.Prog(obj.ARET)
+
+ case ssa.BlockExit, ssa.BlockRetJmp:
+
+ case ssa.BlockDefer:
+ p := s.Prog(wasm.AGet)
+ p.From = obj.Addr{Type: obj.TYPE_REG, Reg: wasm.REG_RET0}
+ s.Prog(wasm.AI64Eqz)
+ s.Prog(wasm.AI32Eqz)
+ s.Prog(wasm.AIf)
+ s.Br(obj.AJMP, b.Succs[1].Block())
+ s.Prog(wasm.AEnd)
+ if next != b.Succs[0].Block() {
+ s.Br(obj.AJMP, b.Succs[0].Block())
+ }
+
+ default:
+ panic("unexpected block")
+ }
+
+ // Entry point for the next block. Used by the JMP in goToBlock.
+ s.Prog(wasm.ARESUMEPOINT)
+
+ if s.OnWasmStackSkipped != 0 {
+ panic("wasm: bad stack")
+ }
+}
+
+func ssaGenValue(s *ssagen.State, v *ssa.Value) {
+ switch v.Op {
+ case ssa.OpWasmLoweredStaticCall, ssa.OpWasmLoweredClosureCall, ssa.OpWasmLoweredInterCall, ssa.OpWasmLoweredTailCall:
+ s.PrepareCall(v)
+ if call, ok := v.Aux.(*ssa.AuxCall); ok && call.Fn == ir.Syms.Deferreturn {
+ // The runtime needs to inject jumps to
+ // deferreturn calls using the address in
+ // _func.deferreturn. Hence, the call to
+ // deferreturn must itself be a resumption
+ // point so it gets a target PC.
+ s.Prog(wasm.ARESUMEPOINT)
+ }
+ if v.Op == ssa.OpWasmLoweredClosureCall {
+ getValue64(s, v.Args[1])
+ setReg(s, wasm.REG_CTXT)
+ }
+ if call, ok := v.Aux.(*ssa.AuxCall); ok && call.Fn != nil {
+ sym := call.Fn
+ p := s.Prog(obj.ACALL)
+ p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: sym}
+ p.Pos = v.Pos
+ if v.Op == ssa.OpWasmLoweredTailCall {
+ p.As = obj.ARET
+ }
+ } else {
+ getValue64(s, v.Args[0])
+ p := s.Prog(obj.ACALL)
+ p.To = obj.Addr{Type: obj.TYPE_NONE}
+ p.Pos = v.Pos
+ }
+
+ case ssa.OpWasmLoweredMove:
+ getValue32(s, v.Args[0])
+ getValue32(s, v.Args[1])
+ i32Const(s, int32(v.AuxInt))
+ p := s.Prog(wasm.ACall)
+ p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: ir.Syms.WasmMove}
+
+ case ssa.OpWasmLoweredZero:
+ getValue32(s, v.Args[0])
+ i32Const(s, int32(v.AuxInt))
+ p := s.Prog(wasm.ACall)
+ p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: ir.Syms.WasmZero}
+
+ case ssa.OpWasmLoweredNilCheck:
+ getValue64(s, v.Args[0])
+ s.Prog(wasm.AI64Eqz)
+ s.Prog(wasm.AIf)
+ p := s.Prog(wasm.ACALLNORESUME)
+ p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: ir.Syms.SigPanic}
+ s.Prog(wasm.AEnd)
+ if logopt.Enabled() {
+ logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name)
+ }
+ if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
+ base.WarnfAt(v.Pos, "generated nil check")
+ }
+
+ case ssa.OpWasmLoweredWB:
+ getValue64(s, v.Args[0])
+ getValue64(s, v.Args[1])
+ p := s.Prog(wasm.ACALLNORESUME) // TODO(neelance): If possible, turn this into a simple wasm.ACall).
+ p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: v.Aux.(*obj.LSym)}
+
+ case ssa.OpWasmI64Store8, ssa.OpWasmI64Store16, ssa.OpWasmI64Store32, ssa.OpWasmI64Store, ssa.OpWasmF32Store, ssa.OpWasmF64Store:
+ getValue32(s, v.Args[0])
+ getValue64(s, v.Args[1])
+ p := s.Prog(v.Op.Asm())
+ p.To = obj.Addr{Type: obj.TYPE_CONST, Offset: v.AuxInt}
+
+ case ssa.OpStoreReg:
+ getReg(s, wasm.REG_SP)
+ getValue64(s, v.Args[0])
+ p := s.Prog(storeOp(v.Type))
+ ssagen.AddrAuto(&p.To, v)
+
+ case ssa.OpClobber, ssa.OpClobberReg:
+ // TODO: implement for clobberdead experiment. Nop is ok for now.
+
+ default:
+ if v.Type.IsMemory() {
+ return
+ }
+ if v.OnWasmStack {
+ s.OnWasmStackSkipped++
+ // If a Value is marked OnWasmStack, we don't generate the value and store it to a register now.
+ // Instead, we delay the generation to when the value is used and then directly generate it on the WebAssembly stack.
+ return
+ }
+ ssaGenValueOnStack(s, v, true)
+ if s.OnWasmStackSkipped != 0 {
+ panic("wasm: bad stack")
+ }
+ setReg(s, v.Reg())
+ }
+}
+
+func ssaGenValueOnStack(s *ssagen.State, v *ssa.Value, extend bool) {
+ switch v.Op {
+ case ssa.OpWasmLoweredGetClosurePtr:
+ getReg(s, wasm.REG_CTXT)
+
+ case ssa.OpWasmLoweredGetCallerPC:
+ p := s.Prog(wasm.AI64Load)
+ // Caller PC is stored 8 bytes below first parameter.
+ p.From = obj.Addr{
+ Type: obj.TYPE_MEM,
+ Name: obj.NAME_PARAM,
+ Offset: -8,
+ }
+
+ case ssa.OpWasmLoweredGetCallerSP:
+ p := s.Prog(wasm.AGet)
+ // Caller SP is the address of the first parameter.
+ p.From = obj.Addr{
+ Type: obj.TYPE_ADDR,
+ Name: obj.NAME_PARAM,
+ Reg: wasm.REG_SP,
+ Offset: 0,
+ }
+
+ case ssa.OpWasmLoweredAddr:
+ if v.Aux == nil { // address of off(SP), no symbol
+ getValue64(s, v.Args[0])
+ i64Const(s, v.AuxInt)
+ s.Prog(wasm.AI64Add)
+ break
+ }
+ p := s.Prog(wasm.AGet)
+ p.From.Type = obj.TYPE_ADDR
+ switch v.Aux.(type) {
+ case *obj.LSym:
+ ssagen.AddAux(&p.From, v)
+ case *ir.Name:
+ p.From.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.From, v)
+ default:
+ panic("wasm: bad LoweredAddr")
+ }
+
+ case ssa.OpWasmLoweredConvert:
+ getValue64(s, v.Args[0])
+
+ case ssa.OpWasmSelect:
+ getValue64(s, v.Args[0])
+ getValue64(s, v.Args[1])
+ getValue32(s, v.Args[2])
+ s.Prog(v.Op.Asm())
+
+ case ssa.OpWasmI64AddConst:
+ getValue64(s, v.Args[0])
+ i64Const(s, v.AuxInt)
+ s.Prog(v.Op.Asm())
+
+ case ssa.OpWasmI64Const:
+ i64Const(s, v.AuxInt)
+
+ case ssa.OpWasmF32Const:
+ f32Const(s, v.AuxFloat())
+
+ case ssa.OpWasmF64Const:
+ f64Const(s, v.AuxFloat())
+
+ case ssa.OpWasmI64Load8U, ssa.OpWasmI64Load8S, ssa.OpWasmI64Load16U, ssa.OpWasmI64Load16S, ssa.OpWasmI64Load32U, ssa.OpWasmI64Load32S, ssa.OpWasmI64Load, ssa.OpWasmF32Load, ssa.OpWasmF64Load:
+ getValue32(s, v.Args[0])
+ p := s.Prog(v.Op.Asm())
+ p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: v.AuxInt}
+
+ case ssa.OpWasmI64Eqz:
+ getValue64(s, v.Args[0])
+ s.Prog(v.Op.Asm())
+ if extend {
+ s.Prog(wasm.AI64ExtendI32U)
+ }
+
+ case ssa.OpWasmI64Eq, ssa.OpWasmI64Ne, ssa.OpWasmI64LtS, ssa.OpWasmI64LtU, ssa.OpWasmI64GtS, ssa.OpWasmI64GtU, ssa.OpWasmI64LeS, ssa.OpWasmI64LeU, ssa.OpWasmI64GeS, ssa.OpWasmI64GeU,
+ ssa.OpWasmF32Eq, ssa.OpWasmF32Ne, ssa.OpWasmF32Lt, ssa.OpWasmF32Gt, ssa.OpWasmF32Le, ssa.OpWasmF32Ge,
+ ssa.OpWasmF64Eq, ssa.OpWasmF64Ne, ssa.OpWasmF64Lt, ssa.OpWasmF64Gt, ssa.OpWasmF64Le, ssa.OpWasmF64Ge:
+ getValue64(s, v.Args[0])
+ getValue64(s, v.Args[1])
+ s.Prog(v.Op.Asm())
+ if extend {
+ s.Prog(wasm.AI64ExtendI32U)
+ }
+
+ case ssa.OpWasmI64Add, ssa.OpWasmI64Sub, ssa.OpWasmI64Mul, ssa.OpWasmI64DivU, ssa.OpWasmI64RemS, ssa.OpWasmI64RemU, ssa.OpWasmI64And, ssa.OpWasmI64Or, ssa.OpWasmI64Xor, ssa.OpWasmI64Shl, ssa.OpWasmI64ShrS, ssa.OpWasmI64ShrU, ssa.OpWasmI64Rotl,
+ ssa.OpWasmF32Add, ssa.OpWasmF32Sub, ssa.OpWasmF32Mul, ssa.OpWasmF32Div, ssa.OpWasmF32Copysign,
+ ssa.OpWasmF64Add, ssa.OpWasmF64Sub, ssa.OpWasmF64Mul, ssa.OpWasmF64Div, ssa.OpWasmF64Copysign:
+ getValue64(s, v.Args[0])
+ getValue64(s, v.Args[1])
+ s.Prog(v.Op.Asm())
+
+ case ssa.OpWasmI32Rotl:
+ getValue32(s, v.Args[0])
+ getValue32(s, v.Args[1])
+ s.Prog(wasm.AI32Rotl)
+ s.Prog(wasm.AI64ExtendI32U)
+
+ case ssa.OpWasmI64DivS:
+ getValue64(s, v.Args[0])
+ getValue64(s, v.Args[1])
+ if v.Type.Size() == 8 {
+ // Division of int64 needs helper function wasmDiv to handle the MinInt64 / -1 case.
+ p := s.Prog(wasm.ACall)
+ p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: ir.Syms.WasmDiv}
+ break
+ }
+ s.Prog(wasm.AI64DivS)
+
+ case ssa.OpWasmI64TruncSatF32S, ssa.OpWasmI64TruncSatF64S:
+ getValue64(s, v.Args[0])
+ if buildcfg.GOWASM.SatConv {
+ s.Prog(v.Op.Asm())
+ } else {
+ if v.Op == ssa.OpWasmI64TruncSatF32S {
+ s.Prog(wasm.AF64PromoteF32)
+ }
+ p := s.Prog(wasm.ACall)
+ p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: ir.Syms.WasmTruncS}
+ }
+
+ case ssa.OpWasmI64TruncSatF32U, ssa.OpWasmI64TruncSatF64U:
+ getValue64(s, v.Args[0])
+ if buildcfg.GOWASM.SatConv {
+ s.Prog(v.Op.Asm())
+ } else {
+ if v.Op == ssa.OpWasmI64TruncSatF32U {
+ s.Prog(wasm.AF64PromoteF32)
+ }
+ p := s.Prog(wasm.ACall)
+ p.To = obj.Addr{Type: obj.TYPE_MEM, Name: obj.NAME_EXTERN, Sym: ir.Syms.WasmTruncU}
+ }
+
+ case ssa.OpWasmF32DemoteF64:
+ getValue64(s, v.Args[0])
+ s.Prog(v.Op.Asm())
+
+ case ssa.OpWasmF64PromoteF32:
+ getValue64(s, v.Args[0])
+ s.Prog(v.Op.Asm())
+
+ case ssa.OpWasmF32ConvertI64S, ssa.OpWasmF32ConvertI64U,
+ ssa.OpWasmF64ConvertI64S, ssa.OpWasmF64ConvertI64U,
+ ssa.OpWasmI64Extend8S, ssa.OpWasmI64Extend16S, ssa.OpWasmI64Extend32S,
+ ssa.OpWasmF32Neg, ssa.OpWasmF32Sqrt, ssa.OpWasmF32Trunc, ssa.OpWasmF32Ceil, ssa.OpWasmF32Floor, ssa.OpWasmF32Nearest, ssa.OpWasmF32Abs,
+ ssa.OpWasmF64Neg, ssa.OpWasmF64Sqrt, ssa.OpWasmF64Trunc, ssa.OpWasmF64Ceil, ssa.OpWasmF64Floor, ssa.OpWasmF64Nearest, ssa.OpWasmF64Abs,
+ ssa.OpWasmI64Ctz, ssa.OpWasmI64Clz, ssa.OpWasmI64Popcnt:
+ getValue64(s, v.Args[0])
+ s.Prog(v.Op.Asm())
+
+ case ssa.OpLoadReg:
+ p := s.Prog(loadOp(v.Type))
+ ssagen.AddrAuto(&p.From, v.Args[0])
+
+ case ssa.OpCopy:
+ getValue64(s, v.Args[0])
+
+ default:
+ v.Fatalf("unexpected op: %s", v.Op)
+
+ }
+}
+
+func isCmp(v *ssa.Value) bool {
+ switch v.Op {
+ case ssa.OpWasmI64Eqz, ssa.OpWasmI64Eq, ssa.OpWasmI64Ne, ssa.OpWasmI64LtS, ssa.OpWasmI64LtU, ssa.OpWasmI64GtS, ssa.OpWasmI64GtU, ssa.OpWasmI64LeS, ssa.OpWasmI64LeU, ssa.OpWasmI64GeS, ssa.OpWasmI64GeU,
+ ssa.OpWasmF32Eq, ssa.OpWasmF32Ne, ssa.OpWasmF32Lt, ssa.OpWasmF32Gt, ssa.OpWasmF32Le, ssa.OpWasmF32Ge,
+ ssa.OpWasmF64Eq, ssa.OpWasmF64Ne, ssa.OpWasmF64Lt, ssa.OpWasmF64Gt, ssa.OpWasmF64Le, ssa.OpWasmF64Ge:
+ return true
+ default:
+ return false
+ }
+}
+
+func getValue32(s *ssagen.State, v *ssa.Value) {
+ if v.OnWasmStack {
+ s.OnWasmStackSkipped--
+ ssaGenValueOnStack(s, v, false)
+ if !isCmp(v) {
+ s.Prog(wasm.AI32WrapI64)
+ }
+ return
+ }
+
+ reg := v.Reg()
+ getReg(s, reg)
+ if reg != wasm.REG_SP {
+ s.Prog(wasm.AI32WrapI64)
+ }
+}
+
+func getValue64(s *ssagen.State, v *ssa.Value) {
+ if v.OnWasmStack {
+ s.OnWasmStackSkipped--
+ ssaGenValueOnStack(s, v, true)
+ return
+ }
+
+ reg := v.Reg()
+ getReg(s, reg)
+ if reg == wasm.REG_SP {
+ s.Prog(wasm.AI64ExtendI32U)
+ }
+}
+
+func i32Const(s *ssagen.State, val int32) {
+ p := s.Prog(wasm.AI32Const)
+ p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: int64(val)}
+}
+
+func i64Const(s *ssagen.State, val int64) {
+ p := s.Prog(wasm.AI64Const)
+ p.From = obj.Addr{Type: obj.TYPE_CONST, Offset: val}
+}
+
+func f32Const(s *ssagen.State, val float64) {
+ p := s.Prog(wasm.AF32Const)
+ p.From = obj.Addr{Type: obj.TYPE_FCONST, Val: val}
+}
+
+func f64Const(s *ssagen.State, val float64) {
+ p := s.Prog(wasm.AF64Const)
+ p.From = obj.Addr{Type: obj.TYPE_FCONST, Val: val}
+}
+
+func getReg(s *ssagen.State, reg int16) {
+ p := s.Prog(wasm.AGet)
+ p.From = obj.Addr{Type: obj.TYPE_REG, Reg: reg}
+}
+
+func setReg(s *ssagen.State, reg int16) {
+ p := s.Prog(wasm.ASet)
+ p.To = obj.Addr{Type: obj.TYPE_REG, Reg: reg}
+}
+
+func loadOp(t *types.Type) obj.As {
+ if t.IsFloat() {
+ switch t.Size() {
+ case 4:
+ return wasm.AF32Load
+ case 8:
+ return wasm.AF64Load
+ default:
+ panic("bad load type")
+ }
+ }
+
+ switch t.Size() {
+ case 1:
+ if t.IsSigned() {
+ return wasm.AI64Load8S
+ }
+ return wasm.AI64Load8U
+ case 2:
+ if t.IsSigned() {
+ return wasm.AI64Load16S
+ }
+ return wasm.AI64Load16U
+ case 4:
+ if t.IsSigned() {
+ return wasm.AI64Load32S
+ }
+ return wasm.AI64Load32U
+ case 8:
+ return wasm.AI64Load
+ default:
+ panic("bad load type")
+ }
+}
+
+func storeOp(t *types.Type) obj.As {
+ if t.IsFloat() {
+ switch t.Size() {
+ case 4:
+ return wasm.AF32Store
+ case 8:
+ return wasm.AF64Store
+ default:
+ panic("bad store type")
+ }
+ }
+
+ switch t.Size() {
+ case 1:
+ return wasm.AI64Store8
+ case 2:
+ return wasm.AI64Store16
+ case 4:
+ return wasm.AI64Store32
+ case 8:
+ return wasm.AI64Store
+ default:
+ panic("bad store type")
+ }
+}
diff --git a/src/cmd/compile/internal/x86/galign.go b/src/cmd/compile/internal/x86/galign.go
new file mode 100644
index 0000000..5565bd3
--- /dev/null
+++ b/src/cmd/compile/internal/x86/galign.go
@@ -0,0 +1,39 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package x86
+
+import (
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ssagen"
+ "cmd/internal/obj/x86"
+ "fmt"
+ "internal/buildcfg"
+ "os"
+)
+
+func Init(arch *ssagen.ArchInfo) {
+ arch.LinkArch = &x86.Link386
+ arch.REGSP = x86.REGSP
+ arch.SSAGenValue = ssaGenValue
+ arch.SSAGenBlock = ssaGenBlock
+ arch.MAXWIDTH = (1 << 32) - 1
+ switch v := buildcfg.GO386; v {
+ case "sse2":
+ case "softfloat":
+ arch.SoftFloat = true
+ case "387":
+ fmt.Fprintf(os.Stderr, "unsupported setting GO386=387. Consider using GO386=softfloat instead.\n")
+ base.Exit(1)
+ default:
+ fmt.Fprintf(os.Stderr, "unsupported setting GO386=%s\n", v)
+ base.Exit(1)
+
+ }
+
+ arch.ZeroRange = zerorange
+ arch.Ginsnop = ginsnop
+
+ arch.SSAMarkMoves = ssaMarkMoves
+}
diff --git a/src/cmd/compile/internal/x86/ggen.go b/src/cmd/compile/internal/x86/ggen.go
new file mode 100644
index 0000000..3ca4797
--- /dev/null
+++ b/src/cmd/compile/internal/x86/ggen.go
@@ -0,0 +1,50 @@
+// Copyright 2009 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package x86
+
+import (
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/objw"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/obj/x86"
+)
+
+func zerorange(pp *objw.Progs, p *obj.Prog, off, cnt int64, ax *uint32) *obj.Prog {
+ if cnt == 0 {
+ return p
+ }
+ if *ax == 0 {
+ p = pp.Append(p, x86.AMOVL, obj.TYPE_CONST, 0, 0, obj.TYPE_REG, x86.REG_AX, 0)
+ *ax = 1
+ }
+
+ if cnt <= int64(4*types.RegSize) {
+ for i := int64(0); i < cnt; i += int64(types.RegSize) {
+ p = pp.Append(p, x86.AMOVL, obj.TYPE_REG, x86.REG_AX, 0, obj.TYPE_MEM, x86.REG_SP, off+i)
+ }
+ } else if cnt <= int64(128*types.RegSize) {
+ p = pp.Append(p, x86.ALEAL, obj.TYPE_MEM, x86.REG_SP, off, obj.TYPE_REG, x86.REG_DI, 0)
+ p = pp.Append(p, obj.ADUFFZERO, obj.TYPE_NONE, 0, 0, obj.TYPE_ADDR, 0, 1*(128-cnt/int64(types.RegSize)))
+ p.To.Sym = ir.Syms.Duffzero
+ } else {
+ p = pp.Append(p, x86.AMOVL, obj.TYPE_CONST, 0, cnt/int64(types.RegSize), obj.TYPE_REG, x86.REG_CX, 0)
+ p = pp.Append(p, x86.ALEAL, obj.TYPE_MEM, x86.REG_SP, off, obj.TYPE_REG, x86.REG_DI, 0)
+ p = pp.Append(p, x86.AREP, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0)
+ p = pp.Append(p, x86.ASTOSL, obj.TYPE_NONE, 0, 0, obj.TYPE_NONE, 0, 0)
+ }
+
+ return p
+}
+
+func ginsnop(pp *objw.Progs) *obj.Prog {
+ // See comment in ../amd64/ggen.go.
+ p := pp.Prog(x86.AXCHGL)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = x86.REG_AX
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = x86.REG_AX
+ return p
+}
diff --git a/src/cmd/compile/internal/x86/ssa.go b/src/cmd/compile/internal/x86/ssa.go
new file mode 100644
index 0000000..32e29f3
--- /dev/null
+++ b/src/cmd/compile/internal/x86/ssa.go
@@ -0,0 +1,931 @@
+// Copyright 2016 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package x86
+
+import (
+ "fmt"
+ "math"
+
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/ir"
+ "cmd/compile/internal/logopt"
+ "cmd/compile/internal/ssa"
+ "cmd/compile/internal/ssagen"
+ "cmd/compile/internal/types"
+ "cmd/internal/obj"
+ "cmd/internal/obj/x86"
+)
+
+// markMoves marks any MOVXconst ops that need to avoid clobbering flags.
+func ssaMarkMoves(s *ssagen.State, b *ssa.Block) {
+ flive := b.FlagsLiveAtEnd
+ for _, c := range b.ControlValues() {
+ flive = c.Type.IsFlags() || flive
+ }
+ for i := len(b.Values) - 1; i >= 0; i-- {
+ v := b.Values[i]
+ if flive && v.Op == ssa.Op386MOVLconst {
+ // The "mark" is any non-nil Aux value.
+ v.Aux = v
+ }
+ if v.Type.IsFlags() {
+ flive = false
+ }
+ for _, a := range v.Args {
+ if a.Type.IsFlags() {
+ flive = true
+ }
+ }
+ }
+}
+
+// loadByType returns the load instruction of the given type.
+func loadByType(t *types.Type) obj.As {
+ // Avoid partial register write
+ if !t.IsFloat() {
+ switch t.Size() {
+ case 1:
+ return x86.AMOVBLZX
+ case 2:
+ return x86.AMOVWLZX
+ }
+ }
+ // Otherwise, there's no difference between load and store opcodes.
+ return storeByType(t)
+}
+
+// storeByType returns the store instruction of the given type.
+func storeByType(t *types.Type) obj.As {
+ width := t.Size()
+ if t.IsFloat() {
+ switch width {
+ case 4:
+ return x86.AMOVSS
+ case 8:
+ return x86.AMOVSD
+ }
+ } else {
+ switch width {
+ case 1:
+ return x86.AMOVB
+ case 2:
+ return x86.AMOVW
+ case 4:
+ return x86.AMOVL
+ }
+ }
+ panic("bad store type")
+}
+
+// moveByType returns the reg->reg move instruction of the given type.
+func moveByType(t *types.Type) obj.As {
+ if t.IsFloat() {
+ switch t.Size() {
+ case 4:
+ return x86.AMOVSS
+ case 8:
+ return x86.AMOVSD
+ default:
+ panic(fmt.Sprintf("bad float register width %d:%s", t.Size(), t))
+ }
+ } else {
+ switch t.Size() {
+ case 1:
+ // Avoids partial register write
+ return x86.AMOVL
+ case 2:
+ return x86.AMOVL
+ case 4:
+ return x86.AMOVL
+ default:
+ panic(fmt.Sprintf("bad int register width %d:%s", t.Size(), t))
+ }
+ }
+}
+
+// opregreg emits instructions for
+// dest := dest(To) op src(From)
+// and also returns the created obj.Prog so it
+// may be further adjusted (offset, scale, etc).
+func opregreg(s *ssagen.State, op obj.As, dest, src int16) *obj.Prog {
+ p := s.Prog(op)
+ p.From.Type = obj.TYPE_REG
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = dest
+ p.From.Reg = src
+ return p
+}
+
+func ssaGenValue(s *ssagen.State, v *ssa.Value) {
+ switch v.Op {
+ case ssa.Op386ADDL:
+ r := v.Reg()
+ r1 := v.Args[0].Reg()
+ r2 := v.Args[1].Reg()
+ switch {
+ case r == r1:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r2
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ case r == r2:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r1
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ default:
+ p := s.Prog(x86.ALEAL)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = r1
+ p.From.Scale = 1
+ p.From.Index = r2
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ }
+
+ // 2-address opcode arithmetic
+ case ssa.Op386SUBL,
+ ssa.Op386MULL,
+ ssa.Op386ANDL,
+ ssa.Op386ORL,
+ ssa.Op386XORL,
+ ssa.Op386SHLL,
+ ssa.Op386SHRL, ssa.Op386SHRW, ssa.Op386SHRB,
+ ssa.Op386SARL, ssa.Op386SARW, ssa.Op386SARB,
+ ssa.Op386ADDSS, ssa.Op386ADDSD, ssa.Op386SUBSS, ssa.Op386SUBSD,
+ ssa.Op386MULSS, ssa.Op386MULSD, ssa.Op386DIVSS, ssa.Op386DIVSD,
+ ssa.Op386PXOR,
+ ssa.Op386ADCL,
+ ssa.Op386SBBL:
+ opregreg(s, v.Op.Asm(), v.Reg(), v.Args[1].Reg())
+
+ case ssa.Op386ADDLcarry, ssa.Op386SUBLcarry:
+ // output 0 is carry/borrow, output 1 is the low 32 bits.
+ opregreg(s, v.Op.Asm(), v.Reg0(), v.Args[1].Reg())
+
+ case ssa.Op386ADDLconstcarry, ssa.Op386SUBLconstcarry:
+ // output 0 is carry/borrow, output 1 is the low 32 bits.
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg0()
+
+ case ssa.Op386DIVL, ssa.Op386DIVW,
+ ssa.Op386DIVLU, ssa.Op386DIVWU,
+ ssa.Op386MODL, ssa.Op386MODW,
+ ssa.Op386MODLU, ssa.Op386MODWU:
+
+ // Arg[0] is already in AX as it's the only register we allow
+ // and AX is the only output
+ x := v.Args[1].Reg()
+
+ // CPU faults upon signed overflow, which occurs when most
+ // negative int is divided by -1.
+ var j *obj.Prog
+ if v.Op == ssa.Op386DIVL || v.Op == ssa.Op386DIVW ||
+ v.Op == ssa.Op386MODL || v.Op == ssa.Op386MODW {
+
+ if ssa.DivisionNeedsFixUp(v) {
+ var c *obj.Prog
+ switch v.Op {
+ case ssa.Op386DIVL, ssa.Op386MODL:
+ c = s.Prog(x86.ACMPL)
+ j = s.Prog(x86.AJEQ)
+
+ case ssa.Op386DIVW, ssa.Op386MODW:
+ c = s.Prog(x86.ACMPW)
+ j = s.Prog(x86.AJEQ)
+ }
+ c.From.Type = obj.TYPE_REG
+ c.From.Reg = x
+ c.To.Type = obj.TYPE_CONST
+ c.To.Offset = -1
+
+ j.To.Type = obj.TYPE_BRANCH
+ }
+ // sign extend the dividend
+ switch v.Op {
+ case ssa.Op386DIVL, ssa.Op386MODL:
+ s.Prog(x86.ACDQ)
+ case ssa.Op386DIVW, ssa.Op386MODW:
+ s.Prog(x86.ACWD)
+ }
+ }
+
+ // for unsigned ints, we sign extend by setting DX = 0
+ // signed ints were sign extended above
+ if v.Op == ssa.Op386DIVLU || v.Op == ssa.Op386MODLU ||
+ v.Op == ssa.Op386DIVWU || v.Op == ssa.Op386MODWU {
+ c := s.Prog(x86.AXORL)
+ c.From.Type = obj.TYPE_REG
+ c.From.Reg = x86.REG_DX
+ c.To.Type = obj.TYPE_REG
+ c.To.Reg = x86.REG_DX
+ }
+
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = x
+
+ // signed division, rest of the check for -1 case
+ if j != nil {
+ j2 := s.Prog(obj.AJMP)
+ j2.To.Type = obj.TYPE_BRANCH
+
+ var n *obj.Prog
+ if v.Op == ssa.Op386DIVL || v.Op == ssa.Op386DIVW {
+ // n * -1 = -n
+ n = s.Prog(x86.ANEGL)
+ n.To.Type = obj.TYPE_REG
+ n.To.Reg = x86.REG_AX
+ } else {
+ // n % -1 == 0
+ n = s.Prog(x86.AXORL)
+ n.From.Type = obj.TYPE_REG
+ n.From.Reg = x86.REG_DX
+ n.To.Type = obj.TYPE_REG
+ n.To.Reg = x86.REG_DX
+ }
+
+ j.To.SetTarget(n)
+ j2.To.SetTarget(s.Pc())
+ }
+
+ case ssa.Op386HMULL, ssa.Op386HMULLU:
+ // the frontend rewrites constant division by 8/16/32 bit integers into
+ // HMUL by a constant
+ // SSA rewrites generate the 64 bit versions
+
+ // Arg[0] is already in AX as it's the only register we allow
+ // and DX is the only output we care about (the high bits)
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+
+ // IMULB puts the high portion in AH instead of DL,
+ // so move it to DL for consistency
+ if v.Type.Size() == 1 {
+ m := s.Prog(x86.AMOVB)
+ m.From.Type = obj.TYPE_REG
+ m.From.Reg = x86.REG_AH
+ m.To.Type = obj.TYPE_REG
+ m.To.Reg = x86.REG_DX
+ }
+
+ case ssa.Op386MULLU:
+ // Arg[0] is already in AX as it's the only register we allow
+ // results lo in AX
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+
+ case ssa.Op386MULLQU:
+ // AX * args[1], high 32 bits in DX (result[0]), low 32 bits in AX (result[1]).
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+
+ case ssa.Op386AVGLU:
+ // compute (x+y)/2 unsigned.
+ // Do a 32-bit add, the overflow goes into the carry.
+ // Shift right once and pull the carry back into the 31st bit.
+ p := s.Prog(x86.AADDL)
+ p.From.Type = obj.TYPE_REG
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ p.From.Reg = v.Args[1].Reg()
+ p = s.Prog(x86.ARCRL)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = 1
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+
+ case ssa.Op386ADDLconst:
+ r := v.Reg()
+ a := v.Args[0].Reg()
+ if r == a {
+ if v.AuxInt == 1 {
+ p := s.Prog(x86.AINCL)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ return
+ }
+ if v.AuxInt == -1 {
+ p := s.Prog(x86.ADECL)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ return
+ }
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ return
+ }
+ p := s.Prog(x86.ALEAL)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = a
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+
+ case ssa.Op386MULLconst:
+ r := v.Reg()
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ p.SetFrom3Reg(v.Args[0].Reg())
+
+ case ssa.Op386SUBLconst,
+ ssa.Op386ADCLconst,
+ ssa.Op386SBBLconst,
+ ssa.Op386ANDLconst,
+ ssa.Op386ORLconst,
+ ssa.Op386XORLconst,
+ ssa.Op386SHLLconst,
+ ssa.Op386SHRLconst, ssa.Op386SHRWconst, ssa.Op386SHRBconst,
+ ssa.Op386SARLconst, ssa.Op386SARWconst, ssa.Op386SARBconst,
+ ssa.Op386ROLLconst, ssa.Op386ROLWconst, ssa.Op386ROLBconst:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.Op386SBBLcarrymask:
+ r := v.Reg()
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = r
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ case ssa.Op386LEAL1, ssa.Op386LEAL2, ssa.Op386LEAL4, ssa.Op386LEAL8:
+ r := v.Args[0].Reg()
+ i := v.Args[1].Reg()
+ p := s.Prog(x86.ALEAL)
+ switch v.Op {
+ case ssa.Op386LEAL1:
+ p.From.Scale = 1
+ if i == x86.REG_SP {
+ r, i = i, r
+ }
+ case ssa.Op386LEAL2:
+ p.From.Scale = 2
+ case ssa.Op386LEAL4:
+ p.From.Scale = 4
+ case ssa.Op386LEAL8:
+ p.From.Scale = 8
+ }
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = r
+ p.From.Index = i
+ ssagen.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.Op386LEAL:
+ p := s.Prog(x86.ALEAL)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.Op386CMPL, ssa.Op386CMPW, ssa.Op386CMPB,
+ ssa.Op386TESTL, ssa.Op386TESTW, ssa.Op386TESTB:
+ opregreg(s, v.Op.Asm(), v.Args[1].Reg(), v.Args[0].Reg())
+ case ssa.Op386UCOMISS, ssa.Op386UCOMISD:
+ // Go assembler has swapped operands for UCOMISx relative to CMP,
+ // must account for that right here.
+ opregreg(s, v.Op.Asm(), v.Args[0].Reg(), v.Args[1].Reg())
+ case ssa.Op386CMPLconst, ssa.Op386CMPWconst, ssa.Op386CMPBconst:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_CONST
+ p.To.Offset = v.AuxInt
+ case ssa.Op386TESTLconst, ssa.Op386TESTWconst, ssa.Op386TESTBconst:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Args[0].Reg()
+ case ssa.Op386CMPLload, ssa.Op386CMPWload, ssa.Op386CMPBload:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Args[1].Reg()
+ case ssa.Op386CMPLconstload, ssa.Op386CMPWconstload, ssa.Op386CMPBconstload:
+ sc := v.AuxValAndOff()
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ ssagen.AddAux2(&p.From, v, sc.Off64())
+ p.To.Type = obj.TYPE_CONST
+ p.To.Offset = sc.Val64()
+ case ssa.Op386MOVLconst:
+ x := v.Reg()
+
+ // If flags aren't live (indicated by v.Aux == nil),
+ // then we can rewrite MOV $0, AX into XOR AX, AX.
+ if v.AuxInt == 0 && v.Aux == nil {
+ p := s.Prog(x86.AXORL)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = x
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = x
+ break
+ }
+
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = v.AuxInt
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = x
+ case ssa.Op386MOVSSconst, ssa.Op386MOVSDconst:
+ x := v.Reg()
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_FCONST
+ p.From.Val = math.Float64frombits(uint64(v.AuxInt))
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = x
+ case ssa.Op386MOVSSconst1, ssa.Op386MOVSDconst1:
+ p := s.Prog(x86.ALEAL)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Name = obj.NAME_EXTERN
+ f := math.Float64frombits(uint64(v.AuxInt))
+ if v.Op == ssa.Op386MOVSDconst1 {
+ p.From.Sym = base.Ctxt.Float64Sym(f)
+ } else {
+ p.From.Sym = base.Ctxt.Float32Sym(float32(f))
+ }
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.Op386MOVSSconst2, ssa.Op386MOVSDconst2:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+
+ case ssa.Op386MOVSSload, ssa.Op386MOVSDload, ssa.Op386MOVLload, ssa.Op386MOVWload, ssa.Op386MOVBload, ssa.Op386MOVBLSXload, ssa.Op386MOVWLSXload:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.Op386MOVBloadidx1, ssa.Op386MOVWloadidx1, ssa.Op386MOVLloadidx1, ssa.Op386MOVSSloadidx1, ssa.Op386MOVSDloadidx1,
+ ssa.Op386MOVSDloadidx8, ssa.Op386MOVLloadidx4, ssa.Op386MOVSSloadidx4, ssa.Op386MOVWloadidx2:
+ r := v.Args[0].Reg()
+ i := v.Args[1].Reg()
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_MEM
+ switch v.Op {
+ case ssa.Op386MOVBloadidx1, ssa.Op386MOVWloadidx1, ssa.Op386MOVLloadidx1, ssa.Op386MOVSSloadidx1, ssa.Op386MOVSDloadidx1:
+ if i == x86.REG_SP {
+ r, i = i, r
+ }
+ p.From.Scale = 1
+ case ssa.Op386MOVSDloadidx8:
+ p.From.Scale = 8
+ case ssa.Op386MOVLloadidx4, ssa.Op386MOVSSloadidx4:
+ p.From.Scale = 4
+ case ssa.Op386MOVWloadidx2:
+ p.From.Scale = 2
+ }
+ p.From.Reg = r
+ p.From.Index = i
+ ssagen.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.Op386ADDLloadidx4, ssa.Op386SUBLloadidx4, ssa.Op386MULLloadidx4,
+ ssa.Op386ANDLloadidx4, ssa.Op386ORLloadidx4, ssa.Op386XORLloadidx4:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[1].Reg()
+ p.From.Index = v.Args[2].Reg()
+ p.From.Scale = 4
+ ssagen.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.Op386ADDLload, ssa.Op386SUBLload, ssa.Op386MULLload,
+ ssa.Op386ANDLload, ssa.Op386ORLload, ssa.Op386XORLload,
+ ssa.Op386ADDSDload, ssa.Op386ADDSSload, ssa.Op386SUBSDload, ssa.Op386SUBSSload,
+ ssa.Op386MULSDload, ssa.Op386MULSSload, ssa.Op386DIVSSload, ssa.Op386DIVSDload:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = v.Args[1].Reg()
+ ssagen.AddAux(&p.From, v)
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.Op386MOVSSstore, ssa.Op386MOVSDstore, ssa.Op386MOVLstore, ssa.Op386MOVWstore, ssa.Op386MOVBstore,
+ ssa.Op386ADDLmodify, ssa.Op386SUBLmodify, ssa.Op386ANDLmodify, ssa.Op386ORLmodify, ssa.Op386XORLmodify:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[1].Reg()
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.To, v)
+ case ssa.Op386ADDLconstmodify:
+ sc := v.AuxValAndOff()
+ val := sc.Val()
+ if val == 1 || val == -1 {
+ var p *obj.Prog
+ if val == 1 {
+ p = s.Prog(x86.AINCL)
+ } else {
+ p = s.Prog(x86.ADECL)
+ }
+ off := sc.Off64()
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ ssagen.AddAux2(&p.To, v, off)
+ break
+ }
+ fallthrough
+ case ssa.Op386ANDLconstmodify, ssa.Op386ORLconstmodify, ssa.Op386XORLconstmodify:
+ sc := v.AuxValAndOff()
+ off := sc.Off64()
+ val := sc.Val64()
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = val
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ ssagen.AddAux2(&p.To, v, off)
+ case ssa.Op386MOVBstoreidx1, ssa.Op386MOVWstoreidx1, ssa.Op386MOVLstoreidx1, ssa.Op386MOVSSstoreidx1, ssa.Op386MOVSDstoreidx1,
+ ssa.Op386MOVSDstoreidx8, ssa.Op386MOVSSstoreidx4, ssa.Op386MOVLstoreidx4, ssa.Op386MOVWstoreidx2,
+ ssa.Op386ADDLmodifyidx4, ssa.Op386SUBLmodifyidx4, ssa.Op386ANDLmodifyidx4, ssa.Op386ORLmodifyidx4, ssa.Op386XORLmodifyidx4:
+ r := v.Args[0].Reg()
+ i := v.Args[1].Reg()
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[2].Reg()
+ p.To.Type = obj.TYPE_MEM
+ switch v.Op {
+ case ssa.Op386MOVBstoreidx1, ssa.Op386MOVWstoreidx1, ssa.Op386MOVLstoreidx1, ssa.Op386MOVSSstoreidx1, ssa.Op386MOVSDstoreidx1:
+ if i == x86.REG_SP {
+ r, i = i, r
+ }
+ p.To.Scale = 1
+ case ssa.Op386MOVSDstoreidx8:
+ p.To.Scale = 8
+ case ssa.Op386MOVSSstoreidx4, ssa.Op386MOVLstoreidx4,
+ ssa.Op386ADDLmodifyidx4, ssa.Op386SUBLmodifyidx4, ssa.Op386ANDLmodifyidx4, ssa.Op386ORLmodifyidx4, ssa.Op386XORLmodifyidx4:
+ p.To.Scale = 4
+ case ssa.Op386MOVWstoreidx2:
+ p.To.Scale = 2
+ }
+ p.To.Reg = r
+ p.To.Index = i
+ ssagen.AddAux(&p.To, v)
+ case ssa.Op386MOVLstoreconst, ssa.Op386MOVWstoreconst, ssa.Op386MOVBstoreconst:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ sc := v.AuxValAndOff()
+ p.From.Offset = sc.Val64()
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ ssagen.AddAux2(&p.To, v, sc.Off64())
+ case ssa.Op386ADDLconstmodifyidx4:
+ sc := v.AuxValAndOff()
+ val := sc.Val()
+ if val == 1 || val == -1 {
+ var p *obj.Prog
+ if val == 1 {
+ p = s.Prog(x86.AINCL)
+ } else {
+ p = s.Prog(x86.ADECL)
+ }
+ off := sc.Off64()
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ p.To.Scale = 4
+ p.To.Index = v.Args[1].Reg()
+ ssagen.AddAux2(&p.To, v, off)
+ break
+ }
+ fallthrough
+ case ssa.Op386MOVLstoreconstidx1, ssa.Op386MOVLstoreconstidx4, ssa.Op386MOVWstoreconstidx1, ssa.Op386MOVWstoreconstidx2, ssa.Op386MOVBstoreconstidx1,
+ ssa.Op386ANDLconstmodifyidx4, ssa.Op386ORLconstmodifyidx4, ssa.Op386XORLconstmodifyidx4:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_CONST
+ sc := v.AuxValAndOff()
+ p.From.Offset = sc.Val64()
+ r := v.Args[0].Reg()
+ i := v.Args[1].Reg()
+ switch v.Op {
+ case ssa.Op386MOVBstoreconstidx1, ssa.Op386MOVWstoreconstidx1, ssa.Op386MOVLstoreconstidx1:
+ p.To.Scale = 1
+ if i == x86.REG_SP {
+ r, i = i, r
+ }
+ case ssa.Op386MOVWstoreconstidx2:
+ p.To.Scale = 2
+ case ssa.Op386MOVLstoreconstidx4,
+ ssa.Op386ADDLconstmodifyidx4, ssa.Op386ANDLconstmodifyidx4, ssa.Op386ORLconstmodifyidx4, ssa.Op386XORLconstmodifyidx4:
+ p.To.Scale = 4
+ }
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = r
+ p.To.Index = i
+ ssagen.AddAux2(&p.To, v, sc.Off64())
+ case ssa.Op386MOVWLSX, ssa.Op386MOVBLSX, ssa.Op386MOVWLZX, ssa.Op386MOVBLZX,
+ ssa.Op386CVTSL2SS, ssa.Op386CVTSL2SD,
+ ssa.Op386CVTTSS2SL, ssa.Op386CVTTSD2SL,
+ ssa.Op386CVTSS2SD, ssa.Op386CVTSD2SS:
+ opregreg(s, v.Op.Asm(), v.Reg(), v.Args[0].Reg())
+ case ssa.Op386DUFFZERO:
+ p := s.Prog(obj.ADUFFZERO)
+ p.To.Type = obj.TYPE_ADDR
+ p.To.Sym = ir.Syms.Duffzero
+ p.To.Offset = v.AuxInt
+ case ssa.Op386DUFFCOPY:
+ p := s.Prog(obj.ADUFFCOPY)
+ p.To.Type = obj.TYPE_ADDR
+ p.To.Sym = ir.Syms.Duffcopy
+ p.To.Offset = v.AuxInt
+
+ case ssa.OpCopy: // TODO: use MOVLreg for reg->reg copies instead of OpCopy?
+ if v.Type.IsMemory() {
+ return
+ }
+ x := v.Args[0].Reg()
+ y := v.Reg()
+ if x != y {
+ opregreg(s, moveByType(v.Type), y, x)
+ }
+ case ssa.OpLoadReg:
+ if v.Type.IsFlags() {
+ v.Fatalf("load flags not implemented: %v", v.LongString())
+ return
+ }
+ p := s.Prog(loadByType(v.Type))
+ ssagen.AddrAuto(&p.From, v.Args[0])
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+
+ case ssa.OpStoreReg:
+ if v.Type.IsFlags() {
+ v.Fatalf("store flags not implemented: %v", v.LongString())
+ return
+ }
+ p := s.Prog(storeByType(v.Type))
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ ssagen.AddrAuto(&p.To, v)
+ case ssa.Op386LoweredGetClosurePtr:
+ // Closure pointer is DX.
+ ssagen.CheckLoweredGetClosurePtr(v)
+ case ssa.Op386LoweredGetG:
+ r := v.Reg()
+ // See the comments in cmd/internal/obj/x86/obj6.go
+ // near CanUse1InsnTLS for a detailed explanation of these instructions.
+ if x86.CanUse1InsnTLS(base.Ctxt) {
+ // MOVL (TLS), r
+ p := s.Prog(x86.AMOVL)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Reg = x86.REG_TLS
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ } else {
+ // MOVL TLS, r
+ // MOVL (r)(TLS*1), r
+ p := s.Prog(x86.AMOVL)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = x86.REG_TLS
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = r
+ q := s.Prog(x86.AMOVL)
+ q.From.Type = obj.TYPE_MEM
+ q.From.Reg = r
+ q.From.Index = x86.REG_TLS
+ q.From.Scale = 1
+ q.To.Type = obj.TYPE_REG
+ q.To.Reg = r
+ }
+
+ case ssa.Op386LoweredGetCallerPC:
+ p := s.Prog(x86.AMOVL)
+ p.From.Type = obj.TYPE_MEM
+ p.From.Offset = -4 // PC is stored 4 bytes below first parameter.
+ p.From.Name = obj.NAME_PARAM
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+
+ case ssa.Op386LoweredGetCallerSP:
+ // caller's SP is the address of the first arg
+ p := s.Prog(x86.AMOVL)
+ p.From.Type = obj.TYPE_ADDR
+ p.From.Offset = -base.Ctxt.FixedFrameSize() // 0 on 386, just to be consistent with other architectures
+ p.From.Name = obj.NAME_PARAM
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+
+ case ssa.Op386LoweredWB:
+ p := s.Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = v.Aux.(*obj.LSym)
+
+ case ssa.Op386LoweredPanicBoundsA, ssa.Op386LoweredPanicBoundsB, ssa.Op386LoweredPanicBoundsC:
+ p := s.Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = ssagen.BoundsCheckFunc[v.AuxInt]
+ s.UseArgs(8) // space used in callee args area by assembly stubs
+
+ case ssa.Op386LoweredPanicExtendA, ssa.Op386LoweredPanicExtendB, ssa.Op386LoweredPanicExtendC:
+ p := s.Prog(obj.ACALL)
+ p.To.Type = obj.TYPE_MEM
+ p.To.Name = obj.NAME_EXTERN
+ p.To.Sym = ssagen.ExtendCheckFunc[v.AuxInt]
+ s.UseArgs(12) // space used in callee args area by assembly stubs
+
+ case ssa.Op386CALLstatic, ssa.Op386CALLclosure, ssa.Op386CALLinter:
+ s.Call(v)
+ case ssa.Op386CALLtail:
+ s.TailCall(v)
+ case ssa.Op386NEGL,
+ ssa.Op386BSWAPL,
+ ssa.Op386NOTL:
+ p := s.Prog(v.Op.Asm())
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.Op386BSFL, ssa.Op386BSFW,
+ ssa.Op386BSRL, ssa.Op386BSRW,
+ ssa.Op386SQRTSS, ssa.Op386SQRTSD:
+ p := s.Prog(v.Op.Asm())
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = v.Args[0].Reg()
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ case ssa.Op386SETEQ, ssa.Op386SETNE,
+ ssa.Op386SETL, ssa.Op386SETLE,
+ ssa.Op386SETG, ssa.Op386SETGE,
+ ssa.Op386SETGF, ssa.Op386SETGEF,
+ ssa.Op386SETB, ssa.Op386SETBE,
+ ssa.Op386SETORD, ssa.Op386SETNAN,
+ ssa.Op386SETA, ssa.Op386SETAE,
+ ssa.Op386SETO:
+ p := s.Prog(v.Op.Asm())
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+
+ case ssa.Op386SETNEF:
+ p := s.Prog(v.Op.Asm())
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ q := s.Prog(x86.ASETPS)
+ q.To.Type = obj.TYPE_REG
+ q.To.Reg = x86.REG_AX
+ opregreg(s, x86.AORL, v.Reg(), x86.REG_AX)
+
+ case ssa.Op386SETEQF:
+ p := s.Prog(v.Op.Asm())
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = v.Reg()
+ q := s.Prog(x86.ASETPC)
+ q.To.Type = obj.TYPE_REG
+ q.To.Reg = x86.REG_AX
+ opregreg(s, x86.AANDL, v.Reg(), x86.REG_AX)
+
+ case ssa.Op386InvertFlags:
+ v.Fatalf("InvertFlags should never make it to codegen %v", v.LongString())
+ case ssa.Op386FlagEQ, ssa.Op386FlagLT_ULT, ssa.Op386FlagLT_UGT, ssa.Op386FlagGT_ULT, ssa.Op386FlagGT_UGT:
+ v.Fatalf("Flag* ops should never make it to codegen %v", v.LongString())
+ case ssa.Op386REPSTOSL:
+ s.Prog(x86.AREP)
+ s.Prog(x86.ASTOSL)
+ case ssa.Op386REPMOVSL:
+ s.Prog(x86.AREP)
+ s.Prog(x86.AMOVSL)
+ case ssa.Op386LoweredNilCheck:
+ // Issue a load which will fault if the input is nil.
+ // TODO: We currently use the 2-byte instruction TESTB AX, (reg).
+ // Should we use the 3-byte TESTB $0, (reg) instead? It is larger
+ // but it doesn't have false dependency on AX.
+ // Or maybe allocate an output register and use MOVL (reg),reg2 ?
+ // That trades clobbering flags for clobbering a register.
+ p := s.Prog(x86.ATESTB)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = x86.REG_AX
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = v.Args[0].Reg()
+ ssagen.AddAux(&p.To, v)
+ if logopt.Enabled() {
+ logopt.LogOpt(v.Pos, "nilcheck", "genssa", v.Block.Func.Name)
+ }
+ if base.Debug.Nil != 0 && v.Pos.Line() > 1 { // v.Pos.Line()==1 in generated wrappers
+ base.WarnfAt(v.Pos, "generated nil check")
+ }
+ case ssa.OpClobber:
+ p := s.Prog(x86.AMOVL)
+ p.From.Type = obj.TYPE_CONST
+ p.From.Offset = 0xdeaddead
+ p.To.Type = obj.TYPE_MEM
+ p.To.Reg = x86.REG_SP
+ ssagen.AddAux(&p.To, v)
+ case ssa.OpClobberReg:
+ // TODO: implement for clobberdead experiment. Nop is ok for now.
+ default:
+ v.Fatalf("genValue not implemented: %s", v.LongString())
+ }
+}
+
+var blockJump = [...]struct {
+ asm, invasm obj.As
+}{
+ ssa.Block386EQ: {x86.AJEQ, x86.AJNE},
+ ssa.Block386NE: {x86.AJNE, x86.AJEQ},
+ ssa.Block386LT: {x86.AJLT, x86.AJGE},
+ ssa.Block386GE: {x86.AJGE, x86.AJLT},
+ ssa.Block386LE: {x86.AJLE, x86.AJGT},
+ ssa.Block386GT: {x86.AJGT, x86.AJLE},
+ ssa.Block386OS: {x86.AJOS, x86.AJOC},
+ ssa.Block386OC: {x86.AJOC, x86.AJOS},
+ ssa.Block386ULT: {x86.AJCS, x86.AJCC},
+ ssa.Block386UGE: {x86.AJCC, x86.AJCS},
+ ssa.Block386UGT: {x86.AJHI, x86.AJLS},
+ ssa.Block386ULE: {x86.AJLS, x86.AJHI},
+ ssa.Block386ORD: {x86.AJPC, x86.AJPS},
+ ssa.Block386NAN: {x86.AJPS, x86.AJPC},
+}
+
+var eqfJumps = [2][2]ssagen.IndexJump{
+ {{Jump: x86.AJNE, Index: 1}, {Jump: x86.AJPS, Index: 1}}, // next == b.Succs[0]
+ {{Jump: x86.AJNE, Index: 1}, {Jump: x86.AJPC, Index: 0}}, // next == b.Succs[1]
+}
+var nefJumps = [2][2]ssagen.IndexJump{
+ {{Jump: x86.AJNE, Index: 0}, {Jump: x86.AJPC, Index: 1}}, // next == b.Succs[0]
+ {{Jump: x86.AJNE, Index: 0}, {Jump: x86.AJPS, Index: 0}}, // next == b.Succs[1]
+}
+
+func ssaGenBlock(s *ssagen.State, b, next *ssa.Block) {
+ switch b.Kind {
+ case ssa.BlockPlain:
+ if b.Succs[0].Block() != next {
+ p := s.Prog(obj.AJMP)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
+ }
+ case ssa.BlockDefer:
+ // defer returns in rax:
+ // 0 if we should continue executing
+ // 1 if we should jump to deferreturn call
+ p := s.Prog(x86.ATESTL)
+ p.From.Type = obj.TYPE_REG
+ p.From.Reg = x86.REG_AX
+ p.To.Type = obj.TYPE_REG
+ p.To.Reg = x86.REG_AX
+ p = s.Prog(x86.AJNE)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[1].Block()})
+ if b.Succs[0].Block() != next {
+ p := s.Prog(obj.AJMP)
+ p.To.Type = obj.TYPE_BRANCH
+ s.Branches = append(s.Branches, ssagen.Branch{P: p, B: b.Succs[0].Block()})
+ }
+ case ssa.BlockExit, ssa.BlockRetJmp:
+ case ssa.BlockRet:
+ s.Prog(obj.ARET)
+
+ case ssa.Block386EQF:
+ s.CombJump(b, next, &eqfJumps)
+
+ case ssa.Block386NEF:
+ s.CombJump(b, next, &nefJumps)
+
+ case ssa.Block386EQ, ssa.Block386NE,
+ ssa.Block386LT, ssa.Block386GE,
+ ssa.Block386LE, ssa.Block386GT,
+ ssa.Block386OS, ssa.Block386OC,
+ ssa.Block386ULT, ssa.Block386UGT,
+ ssa.Block386ULE, ssa.Block386UGE:
+ jmp := blockJump[b.Kind]
+ switch next {
+ case b.Succs[0].Block():
+ s.Br(jmp.invasm, b.Succs[1].Block())
+ case b.Succs[1].Block():
+ s.Br(jmp.asm, b.Succs[0].Block())
+ default:
+ if b.Likely != ssa.BranchUnlikely {
+ s.Br(jmp.asm, b.Succs[0].Block())
+ s.Br(obj.AJMP, b.Succs[1].Block())
+ } else {
+ s.Br(jmp.invasm, b.Succs[1].Block())
+ s.Br(obj.AJMP, b.Succs[0].Block())
+ }
+ }
+ default:
+ b.Fatalf("branch not implemented: %s", b.LongString())
+ }
+}
diff --git a/src/cmd/compile/main.go b/src/cmd/compile/main.go
new file mode 100644
index 0000000..3af1e1f
--- /dev/null
+++ b/src/cmd/compile/main.go
@@ -0,0 +1,57 @@
+// Copyright 2015 The Go Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style
+// license that can be found in the LICENSE file.
+
+package main
+
+import (
+ "cmd/compile/internal/amd64"
+ "cmd/compile/internal/arm"
+ "cmd/compile/internal/arm64"
+ "cmd/compile/internal/base"
+ "cmd/compile/internal/gc"
+ "cmd/compile/internal/mips"
+ "cmd/compile/internal/mips64"
+ "cmd/compile/internal/ppc64"
+ "cmd/compile/internal/riscv64"
+ "cmd/compile/internal/s390x"
+ "cmd/compile/internal/ssagen"
+ "cmd/compile/internal/wasm"
+ "cmd/compile/internal/x86"
+ "fmt"
+ "internal/buildcfg"
+ "log"
+ "os"
+)
+
+var archInits = map[string]func(*ssagen.ArchInfo){
+ "386": x86.Init,
+ "amd64": amd64.Init,
+ "arm": arm.Init,
+ "arm64": arm64.Init,
+ "mips": mips.Init,
+ "mipsle": mips.Init,
+ "mips64": mips64.Init,
+ "mips64le": mips64.Init,
+ "ppc64": ppc64.Init,
+ "ppc64le": ppc64.Init,
+ "riscv64": riscv64.Init,
+ "s390x": s390x.Init,
+ "wasm": wasm.Init,
+}
+
+func main() {
+ // disable timestamps for reproducible output
+ log.SetFlags(0)
+ log.SetPrefix("compile: ")
+
+ buildcfg.Check()
+ archInit, ok := archInits[buildcfg.GOARCH]
+ if !ok {
+ fmt.Fprintf(os.Stderr, "compile: unknown architecture %q\n", buildcfg.GOARCH)
+ os.Exit(2)
+ }
+
+ gc.Main(archInit)
+ base.Exit(0)
+}